/tools/perf/scripts/python/bin/

.submit();'>mode:
Diffstat (limited to 'include/linux')
-rw-r--r--include/linux/8250_pci.h1
-rw-r--r--include/linux/a.out.h17
-rw-r--r--include/linux/acct.h5
-rw-r--r--include/linux/acpi.h851
-rw-r--r--include/linux/acpi_amd_wbrf.h91
-rw-r--r--include/linux/acpi_dma.h14
-rw-r--r--include/linux/acpi_iort.h71
-rw-r--r--include/linux/acpi_mdio.h33
-rw-r--r--include/linux/acpi_pmtmr.h14
-rw-r--r--include/linux/acpi_rimt.h28
-rw-r--r--include/linux/acpi_viot.h21
-rw-r--r--include/linux/adb.h1
-rw-r--r--include/linux/adfs_fs.h1
-rw-r--r--include/linux/adi-axi-common.h77
-rw-r--r--include/linux/adreno-smmu-priv.h79
-rw-r--r--include/linux/adxl.h13
-rw-r--r--include/linux/aer.h54
-rw-r--r--include/linux/agpgart.h2
-rw-r--r--include/linux/ahci-remap.h1
-rw-r--r--include/linux/ahci_platform.h22
-rw-r--r--include/linux/aio.h7
-rw-r--r--include/linux/alarmtimer.h18
-rw-r--r--include/linux/alcor_pci.h281
-rw-r--r--include/linux/align.h7
-rw-r--r--include/linux/alloc_tag.h268
-rw-r--r--include/linux/altera_jtaguart.h1
-rw-r--r--include/linux/altera_uart.h1
-rw-r--r--include/linux/amba/bus.h136
-rw-r--r--include/linux/amba/clcd-regs.h86
-rw-r--r--include/linux/amba/clcd.h321
-rw-r--r--include/linux/amba/kmi.h16
-rw-r--r--include/linux/amba/mmci.h18
-rw-r--r--include/linux/amba/pl022.h25
-rw-r--r--include/linux/amba/pl080.h5
-rw-r--r--include/linux/amba/pl08x.h5
-rw-r--r--include/linux/amba/pl093.h80
-rw-r--r--include/linux/amba/serial.h276
-rw-r--r--include/linux/amd-iommu.h206
-rw-r--r--include/linux/amd-pmf-io.h65
-rw-r--r--include/linux/amifd.h62
-rw-r--r--include/linux/amifdreg.h81
-rw-r--r--include/linux/annotate.h127
-rw-r--r--include/linux/anon_inodes.h15
-rw-r--r--include/linux/aperture.h62
-rw-r--r--include/linux/apm-emulation.h3
-rw-r--r--include/linux/apm_bios.h11
-rw-r--r--include/linux/apple-gmux.h158
-rw-r--r--include/linux/apple_bl.h26
-rw-r--r--include/linux/arch_topology.h101
-rw-r--r--include/linux/args.h28
-rw-r--r--include/linux/arm-cci.h17
-rw-r--r--include/linux/arm-smccc.h646
-rw-r--r--include/linux/arm_ffa.h515
-rw-r--r--include/linux/arm_mpam.h66
-rw-r--r--include/linux/arm_sdei.h86
-rw-r--r--include/linux/armada-37xx-rwtm-mailbox.h23
-rw-r--r--include/linux/array_size.h13
-rw-r--r--include/linux/ascii85.h39
-rw-r--r--include/linux/asn1.h6
-rw-r--r--include/linux/asn1_ber_bytecode.h6
-rw-r--r--include/linux/asn1_decoder.h7
-rw-r--r--include/linux/asn1_encoder.h31
-rw-r--r--include/linux/assoc_array.h8
-rw-r--r--include/linux/assoc_array_priv.h8
-rw-r--r--include/linux/async.h92
-rw-r--r--include/linux/async_tx.h41
-rw-r--r--include/linux/ata.h177
-rw-r--r--include/linux/ata_platform.h4
-rw-r--r--include/linux/atalk.h32
-rw-r--r--include/linux/ath9k_platform.h51
-rw-r--r--include/linux/atm.h1
-rw-r--r--include/linux/atm_suni.h12
-rw-r--r--include/linux/atm_tcp.h3
-rw-r--r--include/linux/atmdev.h37
-rw-r--r--include/linux/atmel-isc-media.h58
-rw-r--r--include/linux/atmel-mci.h45
-rw-r--r--include/linux/atmel-ssc.h1
-rw-r--r--include/linux/atmel_pdc.h6
-rw-r--r--include/linux/atmel_tc.h270
-rw-r--r--include/linux/atomic.h1057
-rw-r--r--include/linux/atomic/atomic-arch-fallback.h4693
-rw-r--r--include/linux/atomic/atomic-instrumented.h5053
-rw-r--r--include/linux/atomic/atomic-long.h1812
-rw-r--r--include/linux/attribute_container.h16
-rw-r--r--include/linux/audit.h380
-rw-r--r--include/linux/audit_arch.h26
-rw-r--r--include/linux/auto_dev-ioctl.h5
-rw-r--r--include/linux/auto_fs.h5
-rw-r--r--include/linux/auxiliary_bus.h289
-rw-r--r--include/linux/auxvec.h3
-rw-r--r--include/linux/average.h11
-rw-r--r--include/linux/avf/virtchnl.h1527
-rw-r--r--include/linux/b1pcmcia.h21
-rw-r--r--include/linux/backing-dev-defs.h148
-rw-r--r--include/linux/backing-dev.h293
-rw-r--r--include/linux/backing-file.h44
-rw-r--r--include/linux/backlight.h430
-rw-r--r--include/linux/badblocks.h41
-rw-r--r--include/linux/balloon_compaction.h175
-rw-r--r--include/linux/base64.h22
-rw-r--r--include/linux/bcd.h5
-rw-r--r--include/linux/bch.h25
-rw-r--r--include/linux/bcm47xx_nvram.h12
-rw-r--r--include/linux/bcm47xx_sprom.h17
-rw-r--r--include/linux/bcm47xx_wdt.h1
-rw-r--r--include/linux/bcm963xx_nvram.h17
-rw-r--r--include/linux/bcm963xx_tag.h3
-rw-r--r--include/linux/bcma/bcma.h12
-rw-r--r--include/linux/bcma/bcma_driver_arm_c9.h1
-rw-r--r--include/linux/bcma/bcma_driver_chipcommon.h9
-rw-r--r--include/linux/bcma/bcma_driver_gmac_cmn.h1
-rw-r--r--include/linux/bcma/bcma_driver_mips.h1
-rw-r--r--include/linux/bcma/bcma_driver_pci.h3
-rw-r--r--include/linux/bcma/bcma_driver_pcie2.h1
-rw-r--r--include/linux/bcma/bcma_regs.h1
-rw-r--r--include/linux/bcma/bcma_soc.h2
-rw-r--r--include/linux/bfin_mac.h30
-rw-r--r--include/linux/binfmts.h104
-rw-r--r--include/linux/bio-integrity.h148
-rw-r--r--include/linux/bio.h705
-rw-r--r--include/linux/bit_spinlock.h9
-rw-r--r--include/linux/bitfield.h242
-rw-r--r--include/linux/bitmap-str.h18
-rw-r--r--include/linux/bitmap.h838
-rw-r--r--include/linux/bitops.h291
-rw-r--r--include/linux/bitrev.h47
-rw-r--r--include/linux/bits.h89
-rw-r--r--include/linux/blk-cgroup.h764
-rw-r--r--include/linux/blk-crypto-profile.h228
-rw-r--r--include/linux/blk-crypto.h194
-rw-r--r--include/linux/blk-integrity.h183
-rw-r--r--include/linux/blk-mq-dma.h76
-rw-r--r--include/linux/blk-mq-pci.h9
-rw-r--r--include/linux/blk-mq-rdma.h10
-rw-r--r--include/linux/blk-mq-virtio.h10
-rw-r--r--include/linux/blk-mq.h1186
-rw-r--r--include/linux/blk-pm.h23
-rw-r--r--include/linux/blk_types.h494
-rw-r--r--include/linux/blkdev.h2626
-rw-r--r--include/linux/blkpg.h1
-rw-r--r--include/linux/blktrace_api.h65
-rw-r--r--include/linux/blockgroup_lock.h1
-rw-r--r--include/linux/bma150.h19
-rw-r--r--include/linux/bnxt/hsi.h11166
-rw-r--r--include/linux/bootconfig.h308
-rw-r--r--include/linux/bootmem.h374
-rw-r--r--include/linux/bootmem_info.h94
-rw-r--r--include/linux/bottom_half.h10
-rw-r--r--include/linux/bpf-cgroup-defs.h85
-rw-r--r--include/linux/bpf-cgroup.h480
-rw-r--r--include/linux/bpf-netns.h62
-rw-r--r--include/linux/bpf.h3697
-rw-r--r--include/linux/bpf_crypto.h24
-rw-r--r--include/linux/bpf_lirc.h30
-rw-r--r--include/linux/bpf_local_storage.h203
-rw-r--r--include/linux/bpf_lsm.h109
-rw-r--r--include/linux/bpf_mem_alloc.h51
-rw-r--r--include/linux/bpf_mprog.h343
-rw-r--r--include/linux/bpf_trace.h2
-rw-r--r--include/linux/bpf_types.h148
-rw-r--r--include/linux/bpf_verifier.h1065
-rw-r--r--include/linux/bpfptr.h89
-rw-r--r--include/linux/brcmphy.h304
-rw-r--r--include/linux/bsearch.h27
-rw-r--r--include/linux/bsg-lib.h38
-rw-r--r--include/linux/bsg.h40
-rw-r--r--include/linux/btf.h686
-rw-r--r--include/linux/btf_ids.h288
-rw-r--r--include/linux/btree-128.h1
-rw-r--r--include/linux/btree-type.h1
-rw-r--r--include/linux/btree.h3
-rw-r--r--include/linux/btrfs.h1
-rw-r--r--include/linux/buffer_head.h362
-rw-r--r--include/linux/bug.h39
-rw-r--r--include/linux/build-salt.h20
-rw-r--r--include/linux/build_bug.h73
-rw-r--r--include/linux/buildid.h46
-rw-r--r--include/linux/bus/stm32_firewall_device.h145
-rw-r--r--include/linux/bvec.h286
-rw-r--r--include/linux/byteorder/big_endian.h5
-rw-r--r--include/linux/byteorder/generic.h66
-rw-r--r--include/linux/byteorder/little_endian.h5
-rw-r--r--include/linux/c2port.h9
-rw-r--r--include/linux/cache.h121
-rw-r--r--include/linux/cache_coherency.h61
-rw-r--r--include/linux/cacheflush.h29
-rw-r--r--include/linux/cacheinfo.h112
-rw-r--r--include/linux/call_once.h66
-rw-r--r--include/linux/can/bittiming.h280
-rw-r--r--include/linux/can/can-ml.h80
-rw-r--r--include/linux/can/core.h20
-rw-r--r--include/linux/can/dev.h205
-rw-r--r--include/linux/can/dev/peak_canfd.h20
-rw-r--r--include/linux/can/led.h54
-rw-r--r--include/linux/can/length.h306
-rw-r--r--include/linux/can/platform/cc770.h1
-rw-r--r--include/linux/can/platform/flexcan.h23
-rw-r--r--include/linux/can/platform/mcp251x.h21
-rw-r--r--include/linux/can/platform/rcar_can.h17
-rw-r--r--include/linux/can/platform/sja1000.h3
-rw-r--r--include/linux/can/rx-offload.h50
-rw-r--r--include/linux/can/skb.h111
-rw-r--r--include/linux/capability.h175
-rw-r--r--include/linux/cb710.h11
-rw-r--r--include/linux/cc_platform.h135
-rw-r--r--include/linux/cciss_ioctl.h1
-rw-r--r--include/linux/ccp.h21
-rw-r--r--include/linux/cdev.h1
-rw-r--r--include/linux/cdrom.h36
-rw-r--r--include/linux/cdx/bitfield.h90
-rw-r--r--include/linux/cdx/cdx_bus.h291
-rw-r--r--include/linux/cdx/edac_cdx_pcol.h28
-rw-r--r--include/linux/cdx/mcdi.h199
-rw-r--r--include/linux/ceph/auth.h81
-rw-r--r--include/linux/ceph/buffer.h4
-rw-r--r--include/linux/ceph/ceph_debug.h43
-rw-r--r--include/linux/ceph/ceph_features.h41
-rw-r--r--include/linux/ceph/ceph_frag.h1
-rw-r--r--include/linux/ceph/ceph_fs.h146
-rw-r--r--include/linux/ceph/ceph_hash.h1
-rw-r--r--include/linux/ceph/cls_lock_client.h4
-rw-r--r--include/linux/ceph/debugfs.h19
-rw-r--r--include/linux/ceph/decode.h42
-rw-r--r--include/linux/ceph/libceph.h99
-rw-r--r--include/linux/ceph/mdsmap.h69
-rw-r--r--include/linux/ceph/messenger.h471
-rw-r--r--include/linux/ceph/mon_client.h11
-rw-r--r--include/linux/ceph/msgpool.h12
-rw-r--r--include/linux/ceph/msgr.h69
-rw-r--r--include/linux/ceph/osd_client.h221
-rw-r--r--include/linux/ceph/osdmap.h69
-rw-r--r--include/linux/ceph/pagelist.h26
-rw-r--r--include/linux/ceph/rados.h58
-rw-r--r--include/linux/ceph/string_table.h1
-rw-r--r--include/linux/ceph/striper.h71
-rw-r--r--include/linux/ceph/types.h2
-rw-r--r--include/linux/cfag12864b.h35
-rw-r--r--include/linux/cfi.h86
-rw-r--r--include/linux/cfi_types.h68
-rw-r--r--include/linux/cgroup-defs.h540
-rw-r--r--include/linux/cgroup.h412
-rw-r--r--include/linux/cgroup_api.h1
-rw-r--r--include/linux/cgroup_dmem.h66
-rw-r--r--include/linux/cgroup_namespace.h58
-rw-r--r--include/linux/cgroup_rdma.h7
-rw-r--r--include/linux/cgroup_refcnt.h96
-rw-r--r--include/linux/cgroup_subsys.h9
-rw-r--r--include/linux/circ_buf.h3
-rw-r--r--include/linux/cleancache.h123
-rw-r--r--include/linux/cleanup.h534
-rw-r--r--include/linux/clk-provider.h1164
-rw-r--r--include/linux/clk.h579
-rw-r--r--include/linux/clk/analogbits-wrpll-cln28hpc.h79
-rw-r--r--include/linux/clk/at91_pmc.h116
-rw-r--r--include/linux/clk/bcm2835.h24
-rw-r--r--include/linux/clk/clk-conf.h10
-rw-r--r--include/linux/clk/davinci.h17
-rw-r--r--include/linux/clk/imx.h15
-rw-r--r--include/linux/clk/mmp.h17
-rw-r--r--include/linux/clk/mxs.h5
-rw-r--r--include/linux/clk/pxa.h16
-rw-r--r--include/linux/clk/renesas.h153
-rw-r--r--include/linux/clk/samsung.h24
-rw-r--r--include/linux/clk/spear.h37
-rw-r--r--include/linux/clk/sunxi-ng.h14
-rw-r--r--include/linux/clk/tegra.h173
-rw-r--r--include/linux/clk/ti.h59
-rw-r--r--include/linux/clk/zynq.h15
-rw-r--r--include/linux/clkdev.h19
-rw-r--r--include/linux/clock_cooling.h65
-rw-r--r--include/linux/clockchips.h7
-rw-r--r--include/linux/clocksource.h161
-rw-r--r--include/linux/clocksource_ids.h17
-rw-r--r--include/linux/closure.h492
-rw-r--r--include/linux/cm4000_cs.h10
-rw-r--r--include/linux/cma.h67
-rw-r--r--include/linux/cmdline-parser.h45
-rw-r--r--include/linux/cmpxchg-emu.h15
-rw-r--r--include/linux/cnt32_to_63.h5
-rw-r--r--include/linux/coda.h3
-rw-r--r--include/linux/coda_psdev.h71
-rw-r--r--include/linux/codetag.h115
-rw-r--r--include/linux/comedi/comedi_8254.h161
-rw-r--r--include/linux/comedi/comedi_8255.h54
-rw-r--r--include/linux/comedi/comedi_isadma.h114
-rw-r--r--include/linux/comedi/comedi_pci.h56
-rw-r--r--include/linux/comedi/comedi_pcmcia.h48
-rw-r--r--include/linux/comedi/comedi_usb.h41
-rw-r--r--include/linux/comedi/comedidev.h1054
-rw-r--r--include/linux/comedi/comedilib.h56
-rw-r--r--include/linux/compaction.h139
-rw-r--r--include/linux/compat.h1095
-rw-r--r--include/linux/compiler-clang.h158
-rw-r--r--include/linux/compiler-gcc.h325
-rw-r--r--include/linux/compiler-intel.h45
-rw-r--r--include/linux/compiler-version.h44
-rw-r--r--include/linux/compiler.h709
-rw-r--r--include/linux/compiler_attributes.h412
-rw-r--r--include/linux/compiler_types.h659
-rw-r--r--include/linux/completion.h60
-rw-r--r--include/linux/component.h97
-rw-r--r--include/linux/concap.h112
-rw-r--r--include/linux/configfs.h48
-rw-r--r--include/linux/connector.h98
-rw-r--r--include/linux/console.h744
-rw-r--r--include/linux/console_struct.h120
-rw-r--r--include/linux/consolemap.h84
-rw-r--r--include/linux/const.h6
-rw-r--r--include/linux/container.h12
-rw-r--r--include/linux/container_of.h41
-rw-r--r--include/linux/context_tracking.h196
-rw-r--r--include/linux/context_tracking_irq.h21
-rw-r--r--include/linux/context_tracking_state.h165
-rw-r--r--include/linux/cookie.h51
-rw-r--r--include/linux/cordic.h9
-rw-r--r--include/linux/coredump.h67
-rw-r--r--include/linux/coresight-pmu.h78
-rw-r--r--include/linux/coresight-stm.h1
-rw-r--r--include/linux/coresight.h680
-rw-r--r--include/linux/count_zeros.h6
-rw-r--r--include/linux/counter.h638
-rw-r--r--include/linux/cper.h563
-rw-r--r--include/linux/cpu.h199
-rw-r--r--include/linux/cpu_cooling.h93
-rw-r--r--include/linux/cpu_pm.h11
-rw-r--r--include/linux/cpu_rmap.h14
-rw-r--r--include/linux/cpu_smt.h33
-rw-r--r--include/linux/cpufeature.h7
-rw-r--r--include/linux/cpufreq.h773
-rw-r--r--include/linux/cpuhotplug.h270
-rw-r--r--include/linux/cpuhplock.h49
-rw-r--r--include/linux/cpuidle.h188
-rw-r--r--include/linux/cpuidle_haltpoll.h16
-rw-r--r--include/linux/cpumask.h1140
-rw-r--r--include/linux/cpumask_api.h1
-rw-r--r--include/linux/cpumask_types.h66
-rw-r--r--include/linux/cpuset.h109
-rw-r--r--include/linux/crash_core.h148
-rw-r--r--include/linux/crash_dump.h131
-rw-r--r--include/linux/crash_reserve.h66
-rw-r--r--include/linux/crc-ccitt.h1
-rw-r--r--include/linux/crc-itu-t.h6
-rw-r--r--include/linux/crc-t10dif.h12
-rw-r--r--include/linux/crc16.h13
-rw-r--r--include/linux/crc32.h123
-rw-r--r--include/linux/crc32c.h8
-rw-r--r--include/linux/crc32poly.h14
-rw-r--r--include/linux/crc4.h1
-rw-r--r--include/linux/crc64.h28
-rw-r--r--include/linux/crc7.h8
-rw-r--r--include/linux/crc8.h2
-rw-r--r--include/linux/cred.h201
-rw-r--r--include/linux/crush/crush.h20
-rw-r--r--include/linux/crush/hash.h1
-rw-r--r--include/linux/crush/mapper.h1
-rw-r--r--include/linux/crypto.h1441
-rw-r--r--include/linux/cryptohash.h13
-rw-r--r--include/linux/cs5535.h5
-rw-r--r--include/linux/ctype.h16
-rw-r--r--include/linux/cuda.h7
-rw-r--r--include/linux/cyclades.h360
-rw-r--r--include/linux/damon.h975
-rw-r--r--include/linux/dasd_mod.h11
-rw-r--r--include/linux/davinci_emac.h1
-rw-r--r--include/linux/dax.h295
-rw-r--r--include/linux/dca.h18
-rw-r--r--include/linux/dcache.h344
-rw-r--r--include/linux/dccp.h292
-rw-r--r--include/linux/dcookies.h68
-rw-r--r--include/linux/debug_locks.h14
-rw-r--r--include/linux/debugfs.h440
-rw-r--r--include/linux/debugobjects.h47
-rw-r--r--include/linux/decompress/bunzip2.h1
-rw-r--r--include/linux/decompress/generic.h1
-rw-r--r--include/linux/decompress/inflate.h1
-rw-r--r--include/linux/decompress/mm.h15
-rw-r--r--include/linux/decompress/unlz4.h1
-rw-r--r--include/linux/decompress/unlzma.h1
-rw-r--r--include/linux/decompress/unlzo.h1
-rw-r--r--include/linux/decompress/unxz.h5
-rw-r--r--include/linux/decompress/unzstd.h11
-rw-r--r--include/linux/delay.h99
-rw-r--r--include/linux/delayacct.h228
-rw-r--r--include/linux/delayed_call.h1
-rw-r--r--include/linux/dell-led.h6
-rw-r--r--include/linux/dev_printk.h289
-rw-r--r--include/linux/devcoredump.h75
-rw-r--r--include/linux/devfreq-event.h25
-rw-r--r--include/linux/devfreq-governor.h102
-rw-r--r--include/linux/devfreq.h250
-rw-r--r--include/linux/devfreq_cooling.h38
-rw-r--r--include/linux/device-mapper.h382
-rw-r--r--include/linux/device.h1671
-rw-r--r--include/linux/device/bus.h289
-rw-r--r--include/linux/device/class.h229
-rw-r--r--include/linux/device/devres.h189
-rw-r--r--include/linux/device/driver.h291
-rw-r--r--include/linux/device/faux.h69
-rw-r--r--include/linux/device_cgroup.h59
-rw-r--r--include/linux/devm-helpers.h79
-rw-r--r--include/linux/devpts_fs.h5
-rw-r--r--include/linux/dfl.h95
-rw-r--r--include/linux/dibs.h464
-rw-r--r--include/linux/digsig.h10
-rw-r--r--include/linux/dim.h451
-rw-r--r--include/linux/dio.h10
-rw-r--r--include/linux/dirent.h3
-rw-r--r--include/linux/dlm.h62
-rw-r--r--include/linux/dlm_plock.h7
-rw-r--r--include/linux/dm-bufio.h171
-rw-r--r--include/linux/dm-dirty-log.h9
-rw-r--r--include/linux/dm-io.h14
-rw-r--r--include/linux/dm-kcopyd.h28
-rw-r--r--include/linux/dm-region-hash.h9
-rw-r--r--include/linux/dm-verity-loadpin.h27
-rw-r--r--include/linux/dm9000.h6
-rw-r--r--include/linux/dma-buf-mapping.h17
-rw-r--r--include/linux/dma-buf.h398
-rw-r--r--include/linux/dma-buf/heaps/cma.h16
-rw-r--r--include/linux/dma-contiguous.h164
-rw-r--r--include/linux/dma-debug.h213
-rw-r--r--include/linux/dma-direct.h153
-rw-r--r--include/linux/dma-direction.h15
-rw-r--r--include/linux/dma-fence-array.h55
-rw-r--r--include/linux/dma-fence-chain.h131
-rw-r--r--include/linux/dma-fence-unwrap.h77
-rw-r--r--include/linux/dma-fence.h452
-rw-r--r--include/linux/dma-heap.h49
-rw-r--r--include/linux/dma-iommu.h112
-rw-r--r--include/linux/dma-map-ops.h434
-rw-r--r--include/linux/dma-mapping.h1082
-rw-r--r--include/linux/dma-resv.h487
-rw-r--r--include/linux/dma/amd_xdma.h16
-rw-r--r--include/linux/dma/dw.h9
-rw-r--r--include/linux/dma/edma.h120
-rw-r--r--include/linux/dma/hsu.h11
-rw-r--r--include/linux/dma/idma64.h14
-rw-r--r--include/linux/dma/imx-dma.h (renamed from include/linux/platform_data/dma-imx.h)46
-rw-r--r--include/linux/dma/ipu-dma.h177
-rw-r--r--include/linux/dma/k3-event-router.h16
-rw-r--r--include/linux/dma/k3-psil.h86
-rw-r--r--include/linux/dma/k3-udma-glue.h153
-rw-r--r--include/linux/dma/mmp-pdma.h15
-rw-r--r--include/linux/dma/mxs-dma.h24
-rw-r--r--include/linux/dma/pxa-dma.h21
-rw-r--r--include/linux/dma/qcom-gpi-dma.h83
-rw-r--r--include/linux/dma/qcom_adm.h12
-rw-r--r--include/linux/dma/qcom_bam_dma.h71
-rw-r--r--include/linux/dma/sprd-dma.h190
-rw-r--r--include/linux/dma/ti-cppi5.h1060
-rw-r--r--include/linux/dma/xilinx_dma.h22
-rw-r--r--include/linux/dma/xilinx_dpdma.h11
-rw-r--r--include/linux/dma_remapping.h57
-rw-r--r--include/linux/dmaengine.h485
-rw-r--r--include/linux/dmapool.h53
-rw-r--r--include/linux/dmar.h210
-rw-r--r--include/linux/dmi.h17
-rw-r--r--include/linux/dnotify.h8
-rw-r--r--include/linux/dns_resolver.h10
-rw-r--r--include/linux/dpll.h229
-rw-r--r--include/linux/dqblk_qtree.h1
-rw-r--r--include/linux/dqblk_v1.h1
-rw-r--r--include/linux/dqblk_v2.h1
-rw-r--r--include/linux/drbd.h21
-rw-r--r--include/linux/drbd_config.h16
-rw-r--r--include/linux/drbd_genl.h4
-rw-r--r--include/linux/drbd_genl_api.h3
-rw-r--r--include/linux/drbd_limits.h207
-rw-r--r--include/linux/ds2782_battery.h1
-rw-r--r--include/linux/dsa/8021q.h37
-rw-r--r--include/linux/dsa/brcm.h16
-rw-r--r--include/linux/dsa/ksz_common.h53
-rw-r--r--include/linux/dsa/lan9303.h39
-rw-r--r--include/linux/dsa/loop.h42
-rw-r--r--include/linux/dsa/mv88e6xxx.h13
-rw-r--r--include/linux/dsa/ocelot.h324
-rw-r--r--include/linux/dsa/sja1105.h75
-rw-r--r--include/linux/dsa/tag_qca.h87
-rw-r--r--include/linux/dtlk.h1
-rw-r--r--include/linux/dtpm.h73
-rw-r--r--include/linux/dw_apb_timer.h9
-rw-r--r--include/linux/dynamic_debug.h312
-rw-r--r--include/linux/dynamic_queue_limits.h66
-rw-r--r--include/linux/earlycpio.h1
-rw-r--r--include/linux/ecryptfs.h1
-rw-r--r--include/linux/edac.h436
-rw-r--r--include/linux/edd.h11
-rw-r--r--include/linux/edma.h29
-rw-r--r--include/linux/eeprom_93cx6.h26
-rw-r--r--include/linux/eeprom_93xx46.h26
-rw-r--r--include/linux/efi-bgrt.h1
-rw-r--r--include/linux/efi.h1540
-rw-r--r--include/linux/efi_embedded_fw.h41
-rw-r--r--include/linux/efs_vh.h1
-rw-r--r--include/linux/ehl_pse_io_aux.h24
-rw-r--r--include/linux/eisa.h10
-rw-r--r--include/linux/elevator.h270
-rw-r--r--include/linux/elf-fdpic.h20
-rw-r--r--include/linux/elf-randomize.h1
-rw-r--r--include/linux/elf.h56
-rw-r--r--include/linux/elfcore-compat.h33
-rw-r--r--include/linux/elfcore.h125
-rw-r--r--include/linux/elfnote-lto.h14
-rw-r--r--include/linux/elfnote.h18
-rw-r--r--include/linux/enclosure.h15
-rw-r--r--include/linux/energy_model.h425
-rw-r--r--include/linux/entry-common.h199
-rw-r--r--include/linux/entry-virt.h95
-rw-r--r--include/linux/err.h72
-rw-r--r--include/linux/errname.h16
-rw-r--r--include/linux/errno.h3
-rw-r--r--include/linux/error-injection.h28
-rw-r--r--include/linux/errqueue.h1
-rw-r--r--include/linux/errseq.h15
-rw-r--r--include/linux/etherdevice.h193
-rw-r--r--include/linux/ethtool.h1210
-rw-r--r--include/linux/ethtool_netlink.h146
-rw-r--r--include/linux/eventfd.h49
-rw-r--r--include/linux/eventpoll.h51
-rw-r--r--include/linux/evm.h75
-rw-r--r--include/linux/execmem.h207
-rw-r--r--include/linux/export-internal.h72
-rw-r--r--include/linux/export.h171
-rw-r--r--include/linux/exportfs.h169
-rw-r--r--include/linux/ext2_fs.h1
-rw-r--r--include/linux/extable.h13
-rw-r--r--include/linux/extcon-provider.h134
-rw-r--r--include/linux/extcon.h195
-rw-r--r--include/linux/extcon/extcon-adc-jack.h6
-rw-r--r--include/linux/extcon/extcon-gpio.h47
-rw-r--r--include/linux/f2fs_fs.h276
-rw-r--r--include/linux/falloc.h44
-rw-r--r--include/linux/fanotify.h146
-rw-r--r--include/linux/fault-inject-usercopy.h22
-rw-r--r--include/linux/fault-inject.h75
-rw-r--r--include/linux/fb.h433
-rw-r--r--include/linux/fbcon.h55
-rw-r--r--include/linux/fcdevice.h7
-rw-r--r--include/linux/fcntl.h17
-rw-r--r--include/linux/fd.h1
-rw-r--r--include/linux/fddidevice.h6
-rw-r--r--include/linux/fdtable.h61
-rw-r--r--include/linux/fec.h5
-rw-r--r--include/linux/fiemap.h27
-rw-r--r--include/linux/file.h235
-rw-r--r--include/linux/file_ref.h218
-rw-r--r--include/linux/fileattr.h83
-rw-r--r--include/linux/filelock.h584
-rw-r--r--include/linux/filter.h1253
-rw-r--r--include/linux/find.h697
-rw-r--r--include/linux/fips.h8
-rw-r--r--include/linux/firewire.h211
-rw-r--r--include/linux/firmware-map.h11
-rw-r--r--include/linux/firmware.h167
-rw-r--r--include/linux/firmware/broadcom/tee_bnxt_fw.h14
-rw-r--r--include/linux/firmware/cirrus/cs_dsp.h357
-rw-r--r--include/linux/firmware/cirrus/cs_dsp_test_utils.h159
-rw-r--r--include/linux/firmware/cirrus/wmfw.h203
-rw-r--r--include/linux/firmware/imx/dsp.h71
-rw-r--r--include/linux/firmware/imx/ipc.h71
-rw-r--r--include/linux/firmware/imx/s4.h20
-rw-r--r--include/linux/firmware/imx/sci.h57
-rw-r--r--include/linux/firmware/imx/sm.h97
-rw-r--r--include/linux/firmware/imx/svc/misc.h77
-rw-r--r--include/linux/firmware/imx/svc/pm.h85
-rw-r--r--include/linux/firmware/imx/svc/rm.h74
-rw-r--r--include/linux/firmware/intel/stratix10-smc.h734
-rw-r--r--include/linux/firmware/intel/stratix10-svc-client.h392
-rw-r--r--include/linux/firmware/mediatek/mtk-adsp-ipc.h59
-rw-r--r--include/linux/firmware/meson/meson_sm.h26
-rw-r--r--include/linux/firmware/qcom/qcom_qseecom.h54
-rw-r--r--include/linux/firmware/qcom/qcom_scm.h184
-rw-r--r--include/linux/firmware/qcom/qcom_tzmem.h80
-rw-r--r--include/linux/firmware/samsung/exynos-acpm-protocol.h70
-rw-r--r--include/linux/firmware/thead/thead,th1520-aon.h200
-rw-r--r--include/linux/firmware/trusted_foundations.h92
-rw-r--r--include/linux/firmware/xlnx-event-manager.h46
-rw-r--r--include/linux/firmware/xlnx-zynqmp-ufs.h38
-rw-r--r--include/linux/firmware/xlnx-zynqmp.h970
-rw-r--r--include/linux/fixp-arith.h34
-rw-r--r--include/linux/flat.h59
-rw-r--r--include/linux/flex_array.h148
-rw-r--r--include/linux/flex_proportions.h42
-rw-r--r--include/linux/fmc-sdb.h38
-rw-r--r--include/linux/fmc.h270
-rw-r--r--include/linux/folio_queue.h282
-rw-r--r--include/linux/font.h27
-rw-r--r--include/linux/fortify-string.h819
-rw-r--r--include/linux/fpga/altera-pr-ip-core.h14
-rw-r--r--include/linux/fpga/fpga-bridge.h46
-rw-r--r--include/linux/fpga/fpga-mgr.h172
-rw-r--r--include/linux/fpga/fpga-region.h76
-rw-r--r--include/linux/fprobe.h156
-rw-r--r--include/linux/fpu.h12
-rw-r--r--include/linux/frame.h23
-rw-r--r--include/linux/framer/framer-provider.h193
-rw-r--r--include/linux/framer/framer.h205
-rw-r--r--include/linux/framer/pef2256.h31
-rw-r--r--include/linux/freezer.h247
-rw-r--r--include/linux/frontswap.h114
-rw-r--r--include/linux/fs.h3656
-rw-r--r--include/linux/fs/super.h238
-rw-r--r--include/linux/fs/super_types.h336
-rw-r--r--include/linux/fs_api.h1
-rw-r--r--include/linux/fs_context.h256
-rw-r--r--include/linux/fs_dirent.h78
-rw-r--r--include/linux/fs_enet_pd.h165
-rw-r--r--include/linux/fs_parser.h143
-rw-r--r--include/linux/fs_pin.h2
-rw-r--r--include/linux/fs_stack.h11
-rw-r--r--include/linux/fs_struct.h18
-rw-r--r--include/linux/fs_uart_pd.h71
-rw-r--r--include/linux/fscache-cache.h616
-rw-r--r--include/linux/fscache.h1016
-rw-r--r--include/linux/fscrypt.h1154
-rw-r--r--include/linux/fscrypt_common.h141
-rw-r--r--include/linux/fscrypt_notsupp.h177
-rw-r--r--include/linux/fscrypt_supp.h145
-rw-r--r--include/linux/fsi-occ.h27
-rw-r--r--include/linux/fsi-sbefifo.h25
-rw-r--r--include/linux/fsi.h24
-rw-r--r--include/linux/fsl-diu-fb.h7
-rw-r--r--include/linux/fsl/bestcomm/bestcomm.h2
-rw-r--r--include/linux/fsl/bestcomm/gen_bd.h8
-rw-r--r--include/linux/fsl/edac.h1
-rw-r--r--include/linux/fsl/enetc_mdio.h68
-rw-r--r--include/linux/fsl/ftm.h88
-rw-r--r--include/linux/fsl/guts.h13
-rw-r--r--include/linux/fsl/mc.h681
-rw-r--r--include/linux/fsl/netc_global.h19
-rw-r--r--include/linux/fsl/ntmp.h121
-rw-r--r--include/linux/fsl/ptp_qoriq.h198
-rw-r--r--include/linux/fsl_devices.h15
-rw-r--r--include/linux/fsl_ifc.h23
-rw-r--r--include/linux/fsldma.h5
-rw-r--r--include/linux/fsnotify.h460
-rw-r--r--include/linux/fsnotify_backend.h799
-rw-r--r--include/linux/fsverity.h359
-rw-r--r--include/linux/ftrace.h875
-rw-r--r--include/linux/ftrace_irq.h25
-rw-r--r--include/linux/ftrace_regs.h43
-rw-r--r--include/linux/futex.h98
-rw-r--r--include/linux/fw_table.h61
-rw-r--r--include/linux/fwctl.h135
-rw-r--r--include/linux/fwnode.h146
-rw-r--r--include/linux/fwnode_mdio.h35
-rw-r--r--include/linux/gameport.h22
-rw-r--r--include/linux/gcd.h4
-rw-r--r--include/linux/genalloc.h88
-rw-r--r--include/linux/generic-radix-tree.h402
-rw-r--r--include/linux/generic_pt/common.h191
-rw-r--r--include/linux/generic_pt/iommu.h293
-rw-r--r--include/linux/genetlink.h42
-rw-r--r--include/linux/genhd.h735
-rw-r--r--include/linux/genl_magic_func.h42
-rw-r--r--include/linux/genl_magic_struct.h19
-rw-r--r--include/linux/getcpu.h1
-rw-r--r--include/linux/gfp.h601
-rw-r--r--include/linux/gfp_api.h1
-rw-r--r--include/linux/gfp_types.h386
-rw-r--r--include/linux/glob.h1
-rw-r--r--include/linux/gnss.h76
-rw-r--r--include/linux/goldfish.h22
-rw-r--r--include/linux/gpio-fan.h36
-rw-r--r--include/linux/gpio-pxa.h1
-rw-r--r--include/linux/gpio.h233
-rw-r--r--include/linux/gpio/aspeed.h19
-rw-r--r--include/linux/gpio/consumer.h426
-rw-r--r--include/linux/gpio/driver.h956
-rw-r--r--include/linux/gpio/forwarder.h41
-rw-r--r--include/linux/gpio/generic.h190
-rw-r--r--include/linux/gpio/gpio-nomadik.h292
-rw-r--r--include/linux/gpio/gpio-reg.h7
-rw-r--r--include/linux/gpio/machine.h91
-rw-r--r--include/linux/gpio/property.h14
-rw-r--r--include/linux/gpio/regmap.h117
-rw-r--r--include/linux/gpio_keys.h7
-rw-r--r--include/linux/gpio_mouse.h61
-rw-r--r--include/linux/greybus.h122
-rw-r--r--include/linux/greybus/bundle.h92
-rw-r--r--include/linux/greybus/connection.h131
-rw-r--r--include/linux/greybus/control.h60
-rw-r--r--include/linux/greybus/greybus_id.h27
-rw-r--r--include/linux/greybus/greybus_manifest.h181
-rw-r--r--include/linux/greybus/greybus_protocols.h2174
-rw-r--r--include/linux/greybus/hd.h85
-rw-r--r--include/linux/greybus/interface.h85
-rw-r--r--include/linux/greybus/manifest.h17
-rw-r--r--include/linux/greybus/module.h36
-rw-r--r--include/linux/greybus/operation.h229
-rw-r--r--include/linux/greybus/svc.h103
-rw-r--r--include/linux/group_cpus.h14
-rw-r--r--include/linux/habanalabs/cpucp_if.h1437
-rw-r--r--include/linux/habanalabs/hl_boot_if.h807
-rw-r--r--include/linux/hardirq.h119
-rw-r--r--include/linux/hash.h5
-rw-r--r--include/linux/hashtable.h9
-rw-r--r--include/linux/hashtable_api.h1
-rw-r--r--include/linux/hdlc.h9
-rw-r--r--include/linux/hdlcdrv.h3
-rw-r--r--include/linux/hdmi.h132
-rw-r--r--include/linux/hex.h35
-rw-r--r--include/linux/hfs_common.h653
-rw-r--r--include/linux/hid-debug.h24
-rw-r--r--include/linux/hid-over-i2c.h117
-rw-r--r--include/linux/hid-over-spi.h155
-rw-r--r--include/linux/hid-roccat.h7
-rw-r--r--include/linux/hid-sensor-hub.h57
-rw-r--r--include/linux/hid-sensor-ids.h37
-rw-r--r--include/linux/hid.h615
-rw-r--r--include/linux/hid_bpf.h236
-rw-r--r--include/linux/hidden.h19
-rw-r--r--include/linux/hiddev.h14
-rw-r--r--include/linux/hidraw.h11
-rw-r--r--include/linux/highmem-internal.h298
-rw-r--r--include/linux/highmem.h727
-rw-r--r--include/linux/highuid.h1
-rw-r--r--include/linux/hil_mlc.h8
-rw-r--r--include/linux/hippidevice.h10
-rw-r--r--include/linux/hisi_acc_qm.h604
-rw-r--r--include/linux/hmm-dma.h33
-rw-r--r--include/linux/hmm.h136
-rw-r--r--include/linux/host1x.h295
-rw-r--r--include/linux/host1x_context_bus.h15
-rw-r--r--include/linux/hp_sdc.h4
-rw-r--r--include/linux/hpet.h3
-rw-r--r--include/linux/hrtimer.h328
-rw-r--r--include/linux/hrtimer_api.h1
-rw-r--r--include/linux/hrtimer_defs.h130
-rw-r--r--include/linux/hrtimer_types.h50
-rw-r--r--include/linux/hsi/hsi.h15
-rw-r--r--include/linux/hsi/ssi_protocol.h16
-rw-r--r--include/linux/htcpld.h24
-rw-r--r--include/linux/hte.h271
-rw-r--r--include/linux/htirq.h38
-rw-r--r--include/linux/huge_mm.h783
-rw-r--r--include/linux/hugetlb.h1211
-rw-r--r--include/linux/hugetlb_cgroup.h228
-rw-r--r--include/linux/hugetlb_inline.h16
-rw-r--r--include/linux/hung_task.h101
-rw-r--r--include/linux/hw_bitfield.h62
-rw-r--r--include/linux/hw_breakpoint.h24
-rw-r--r--include/linux/hw_random.h14
-rw-r--r--include/linux/hwmon-sysfs.h55
-rw-r--r--include/linux/hwmon-vid.h14
-rw-r--r--include/linux/hwmon.h148
-rw-r--r--include/linux/hwspinlock.h168
-rw-r--r--include/linux/hyperv.h947
-rw-r--r--include/linux/hypervisor.h37
-rw-r--r--include/linux/i2c-algo-bit.h34
-rw-r--r--include/linux/i2c-algo-pca.h18
-rw-r--r--include/linux/i2c-algo-pcf.h16
-rw-r--r--include/linux/i2c-atr.h149
-rw-r--r--include/linux/i2c-dev.h15
-rw-r--r--include/linux/i2c-mux-pinctrl.h41
-rw-r--r--include/linux/i2c-mux.h21
-rw-r--r--include/linux/i2c-of-prober.h140
-rw-r--r--include/linux/i2c-pnx.h38
-rw-r--r--include/linux/i2c-pxa.h17
-rw-r--r--include/linux/i2c-smbus.h49
-rw-r--r--include/linux/i2c.h740
-rw-r--r--include/linux/i2c/bfin_twi.h145
-rw-r--r--include/linux/i2c/dm355evm_msp.h79
-rw-r--r--include/linux/i2c/mlxcpld.h52
-rw-r--r--include/linux/i2c/pca954x.h48
-rw-r--r--include/linux/i2c/pxa-i2c.h85
-rw-r--r--include/linux/i2c/tc35876x.h11
-rw-r--r--include/linux/i3c/ccc.h385
-rw-r--r--include/linux/i3c/device.h363
-rw-r--r--include/linux/i3c/master.h737
-rw-r--r--include/linux/i7300_idle.h83
-rw-r--r--include/linux/i8042.h35
-rw-r--r--include/linux/i8253.h1
-rw-r--r--include/linux/i8254.h21
-rw-r--r--include/linux/icmp.h58
-rw-r--r--include/linux/icmpv6.h84
-rw-r--r--include/linux/ide.h1617
-rw-r--r--include/linux/idle_inject.h36
-rw-r--r--include/linux/idr.h261
-rw-r--r--include/linux/ieee80211-eht.h1182
-rw-r--r--include/linux/ieee80211-he.h825
-rw-r--r--include/linux/ieee80211-ht.h292
-rw-r--r--include/linux/ieee80211-mesh.h230
-rw-r--r--include/linux/ieee80211-nan.h35
-rw-r--r--include/linux/ieee80211-p2p.h71
-rw-r--r--include/linux/ieee80211-s1g.h575
-rw-r--r--include/linux/ieee80211-vht.h236
-rw-r--r--include/linux/ieee80211.h1553
-rw-r--r--include/linux/ieee802154.h122
-rw-r--r--include/linux/if_arp.h15
-rw-r--r--include/linux/if_bridge.h136
-rw-r--r--include/linux/if_eql.h2
-rw-r--r--include/linux/if_ether.h17
-rw-r--r--include/linux/if_fddi.h6
-rw-r--r--include/linux/if_frad.h95
-rw-r--r--include/linux/if_hsr.h73
-rw-r--r--include/linux/if_link.h3
-rw-r--r--include/linux/if_ltalk.h7
-rw-r--r--include/linux/if_macvlan.h64
-rw-r--r--include/linux/if_phonet.h3
-rw-r--r--include/linux/if_pppol2tp.h8
-rw-r--r--include/linux/if_pppox.h13
-rw-r--r--include/linux/if_rmnet.h74
-rw-r--r--include/linux/if_tap.h27
-rw-r--r--include/linux/if_team.h50
-rw-r--r--include/linux/if_tun.h64
-rw-r--r--include/linux/if_tunnel.h1
-rw-r--r--include/linux/if_vlan.h352
-rw-r--r--include/linux/igmp.h45
-rw-r--r--include/linux/ihex.h32
-rw-r--r--include/linux/iio/accel/kxcjk_1013.h13
-rw-r--r--include/linux/iio/adc-helpers.h27
-rw-r--r--include/linux/iio/adc/ad_sigma_delta.h154
-rw-r--r--include/linux/iio/adc/qcom-vadc-common.h168
-rw-r--r--include/linux/iio/adc/stm32-dfsdm-adc.h20
-rw-r--r--include/linux/iio/afe/rescale.h36
-rw-r--r--include/linux/iio/backend.h270
-rw-r--r--include/linux/iio/buffer-dma.h55
-rw-r--r--include/linux/iio/buffer-dmaengine.h33
-rw-r--r--include/linux/iio/buffer.h57
-rw-r--r--include/linux/iio/buffer_impl.h94
-rw-r--r--include/linux/iio/common/cros_ec_sensors_core.h131
-rw-r--r--include/linux/iio/common/inv_sensors_timestamp.h94
-rw-r--r--include/linux/iio/common/ssp_sensors.h12
-rw-r--r--include/linux/iio/common/st_sensors.h166
-rw-r--r--include/linux/iio/common/st_sensors_i2c.h17
-rw-r--r--include/linux/iio/common/st_sensors_spi.h7
-rw-r--r--include/linux/iio/configfs.h5
-rw-r--r--include/linux/iio/consumer.h183
-rw-r--r--include/linux/iio/dac/ad5421.h1
-rw-r--r--include/linux/iio/dac/ad5504.h3
-rw-r--r--include/linux/iio/dac/ad5791.h3
-rw-r--r--include/linux/iio/dac/max517.h3
-rw-r--r--include/linux/iio/dac/mcp4725.h5
-rw-r--r--include/linux/iio/driver.h23
-rw-r--r--include/linux/iio/events.h36
-rw-r--r--include/linux/iio/frequency/ad9523.h11
-rw-r--r--include/linux/iio/frequency/adf4350.h9
-rw-r--r--include/linux/iio/gyro/itg3200.h8
-rw-r--r--include/linux/iio/hw-consumer.h20
-rw-r--r--include/linux/iio/iio-gts-helper.h213
-rw-r--r--include/linux/iio/iio-opaque.h82
-rw-r--r--include/linux/iio/iio.h558
-rw-r--r--include/linux/iio/imu/adis.h405
-rw-r--r--include/linux/iio/kfifo_buf.h13
-rw-r--r--include/linux/iio/machine.h12
-rw-r--r--include/linux/iio/magnetometer/ak8975.h16
-rw-r--r--include/linux/iio/sw_device.h10
-rw-r--r--include/linux/iio/sw_trigger.h10
-rw-r--r--include/linux/iio/sysfs.h19
-rw-r--r--include/linux/iio/timer/stm32-lptim-trigger.h38
-rw-r--r--include/linux/iio/timer/stm32-timer-trigger.h20
-rw-r--r--include/linux/iio/trigger.h48
-rw-r--r--include/linux/iio/trigger_consumer.h14
-rw-r--r--include/linux/iio/triggered_buffer.h33
-rw-r--r--include/linux/iio/triggered_event.h1
-rw-r--r--include/linux/iio/types.h47
-rw-r--r--include/linux/ima.h109
-rw-r--r--include/linux/imx-media.h6
-rw-r--r--include/linux/in.h13
-rw-r--r--include/linux/in6.h13
-rw-r--r--include/linux/indirect_call_wrapper.h71
-rw-r--r--include/linux/inet.h7
-rw-r--r--include/linux/inet_diag.h72
-rw-r--r--include/linux/inetdevice.h89
-rw-r--r--include/linux/init.h212
-rw-r--r--include/linux/init_ohci1394_dma.h1
-rw-r--r--include/linux/init_syscalls.h19
-rw-r--r--include/linux/init_task.h277
-rw-r--r--include/linux/initrd.h28
-rw-r--r--include/linux/inotify.h6
-rw-r--r--include/linux/input-polldev.h61
-rw-r--r--include/linux/input.h82
-rw-r--r--include/linux/input/ad714x.h3
-rw-r--r--include/linux/input/adp5589.h10
-rw-r--r--include/linux/input/adxl34x.h3
-rw-r--r--include/linux/input/as5011.h6
-rw-r--r--include/linux/input/auo-pixcir-ts.h54
-rw-r--r--include/linux/input/bu21013.h34
-rw-r--r--include/linux/input/cma3000.h13
-rw-r--r--include/linux/input/cy8ctmg110_pdata.h10
-rw-r--r--include/linux/input/cyttsp.h43
-rw-r--r--include/linux/input/elan-i2c-ids.h80
-rw-r--r--include/linux/input/gp2ap002a00f.h22
-rw-r--r--include/linux/input/gpio_tilt.h73
-rw-r--r--include/linux/input/ili210x.h10
-rw-r--r--include/linux/input/kxtj9.h15
-rw-r--r--include/linux/input/lm8333.h2
-rw-r--r--include/linux/input/matrix_keypad.h54
-rw-r--r--include/linux/input/mt.h15
-rw-r--r--include/linux/input/navpoint.h12
-rw-r--r--include/linux/input/samsung-keypad.h6
-rw-r--r--include/linux/input/sh_keysc.h1
-rw-r--r--include/linux/input/sparse-keymap.h6
-rw-r--r--include/linux/input/touch-overlay.h25
-rw-r--r--include/linux/input/touchscreen.h5
-rw-r--r--include/linux/input/vivaldi-fmap.h27
-rw-r--r--include/linux/instruction_pointer.h13
-rw-r--r--include/linux/instrumentation.h60
-rw-r--r--include/linux/instrumented.h216
-rw-r--r--include/linux/int_log.h56
-rw-r--r--include/linux/integrity.h47
-rw-r--r--include/linux/intel-iommu.h494
-rw-r--r--include/linux/intel-ish-client-if.h126
-rw-r--r--include/linux/intel-svm.h141
-rw-r--r--include/linux/intel_dg_nvm_aux.h32
-rw-r--r--include/linux/intel_pmt_features.h157
-rw-r--r--include/linux/intel_rapl.h223
-rw-r--r--include/linux/intel_tcc.h19
-rw-r--r--include/linux/intel_th.h79
-rw-r--r--include/linux/intel_tpmi.h37
-rw-r--r--include/linux/intel_vsec.h239
-rw-r--r--include/linux/interconnect-clk.h26
-rw-r--r--include/linux/interconnect-provider.h201
-rw-r--r--include/linux/interconnect.h141
-rw-r--r--include/linux/interrupt.h456
-rw-r--r--include/linux/interval_tree.h71
-rw-r--r--include/linux/interval_tree_generic.h94
-rw-r--r--include/linux/io-64-nonatomic-hi-lo.h81
-rw-r--r--include/linux/io-64-nonatomic-lo-hi.h81
-rw-r--r--include/linux/io-mapping.h86
-rw-r--r--include/linux/io-pgtable.h327
-rw-r--r--include/linux/io.h92
-rw-r--r--include/linux/io_uring.h51
-rw-r--r--include/linux/io_uring/cmd.h184
-rw-r--r--include/linux/io_uring/net.h18
-rw-r--r--include/linux/io_uring_types.h744
-rw-r--r--include/linux/ioam6.h13
-rw-r--r--include/linux/ioam6_genl.h13
-rw-r--r--include/linux/ioam6_iptunnel.h13
-rw-r--r--include/linux/ioc3.h93
-rw-r--r--include/linux/ioc4.h184
-rw-r--r--include/linux/iocontext.h57
-rw-r--r--include/linux/iomap.h559
-rw-r--r--include/linux/iommu-common.h52
-rw-r--r--include/linux/iommu-dma.h64
-rw-r--r--include/linux/iommu-helper.h18
-rw-r--r--include/linux/iommu.h1468
-rw-r--r--include/linux/iommufd.h400
-rw-r--r--include/linux/iopoll.h221
-rw-r--r--include/linux/ioport.h188
-rw-r--r--include/linux/ioprio.h92
-rw-r--r--include/linux/ioremap.h31
-rw-r--r--include/linux/iosys-map.h511
-rw-r--r--include/linux/iov_iter.h380
-rw-r--r--include/linux/iova.h77
-rw-r--r--include/linux/iova_bitmap.h52
-rw-r--r--include/linux/ip.h32
-rw-r--r--include/linux/ipack.h28
-rw-r--r--include/linux/ipc.h11
-rw-r--r--include/linux/ipc_namespace.h79
-rw-r--r--include/linux/ipmi-fru.h135
-rw-r--r--include/linux/ipmi.h205
-rw-r--r--include/linux/ipmi_smi.h282
-rw-r--r--include/linux/ipv6.h173
-rw-r--r--include/linux/ipv6_route.h6
-rw-r--r--include/linux/irq-entry-common.h458
-rw-r--r--include/linux/irq.h445
-rw-r--r--include/linux/irq_cpustat.h31
-rw-r--r--include/linux/irq_poll.h1
-rw-r--r--include/linux/irq_sim.h59
-rw-r--r--include/linux/irq_work.h51
-rw-r--r--include/linux/irq_work_types.h14
-rw-r--r--include/linux/irqbypass.h51
-rw-r--r--include/linux/irqchip.h55
-rw-r--r--include/linux/irqchip/arm-gic-common.h31
-rw-r--r--include/linux/irqchip/arm-gic-v3-prio.h52
-rw-r--r--include/linux/irqchip/arm-gic-v3.h242
-rw-r--r--include/linux/irqchip/arm-gic-v4.h93
-rw-r--r--include/linux/irqchip/arm-gic-v5.h394
-rw-r--r--include/linux/irqchip/arm-gic.h38
-rw-r--r--include/linux/irqchip/arm-vgic-info.h49
-rw-r--r--include/linux/irqchip/arm-vic.h26
-rw-r--r--include/linux/irqchip/chained_irq.h13
-rw-r--r--include/linux/irqchip/ingenic.h23
-rw-r--r--include/linux/irqchip/irq-bcm2836.h61
-rw-r--r--include/linux/irqchip/irq-madera.h132
-rw-r--r--include/linux/irqchip/irq-msi-lib.h28
-rw-r--r--include/linux/irqchip/irq-omap-intc.h14
-rw-r--r--include/linux/irqchip/irq-partition-percpu.h59
-rw-r--r--include/linux/irqchip/irq-renesas-rzv2h.h23
-rw-r--r--include/linux/irqchip/irq-sa11x0.h5
-rw-r--r--include/linux/irqchip/metag-ext.h33
-rw-r--r--include/linux/irqchip/metag.h24
-rw-r--r--include/linux/irqchip/mips-gic.h297
-rw-r--r--include/linux/irqchip/mmp.h6
-rw-r--r--include/linux/irqchip/mxs.h14
-rw-r--r--include/linux/irqchip/riscv-aplic.h145
-rw-r--r--include/linux/irqchip/riscv-imsic.h95
-rw-r--r--include/linux/irqchip/versatile-fpga.h13
-rw-r--r--include/linux/irqdesc.h108
-rw-r--r--include/linux/irqdomain.h839
-rw-r--r--include/linux/irqdomain_defs.h32
-rw-r--r--include/linux/irqflags.h196
-rw-r--r--include/linux/irqflags_types.h22
-rw-r--r--include/linux/irqhandler.h4
-rw-r--r--include/linux/irqnr.h37
-rw-r--r--include/linux/irqreturn.h9
-rw-r--r--include/linux/isa-dma.h14
-rw-r--r--include/linux/isa.h55
-rw-r--r--include/linux/isapnp.h23
-rw-r--r--include/linux/iscsi_boot_sysfs.h10
-rw-r--r--include/linux/iscsi_ibft.h38
-rw-r--r--include/linux/isdn.h473
-rw-r--r--include/linux/isdn/capilli.h20
-rw-r--r--include/linux/isdn/capiutil.h456
-rw-r--r--include/linux/isdn/hdlc.h82
-rw-r--r--include/linux/isdn_divertif.h35
-rw-r--r--include/linux/isdn_ppp.h194
-rw-r--r--include/linux/isdnif.h505
-rw-r--r--include/linux/isicom.h84
-rw-r--r--include/linux/ism.h67
-rw-r--r--include/linux/iversion.h300
-rw-r--r--include/linux/jbd2.h989
-rw-r--r--include/linux/jhash.h46
-rw-r--r--include/linux/jiffies.h274
-rw-r--r--include/linux/journal-head.h22
-rw-r--r--include/linux/joystick.h18
-rw-r--r--include/linux/jump_label.h207
-rw-r--r--include/linux/jump_label_ratelimit.h80
-rw-r--r--include/linux/jz4740-adc.h1
-rw-r--r--include/linux/jz4780-nemc.h6
-rw-r--r--include/linux/kallsyms.h118
-rw-r--r--include/linux/kasan-checks.h50
-rw-r--r--include/linux/kasan-enabled.h49
-rw-r--r--include/linux/kasan-tags.h15
-rw-r--r--include/linux/kasan.h695
-rw-r--r--include/linux/kbd_diacr.h1
-rw-r--r--include/linux/kbd_kern.h14
-rw-r--r--include/linux/kbuild.h1
-rw-r--r--include/linux/kconfig.h13
-rw-r--r--include/linux/kcore.h14
-rw-r--r--include/linux/kcov.h91
-rw-r--r--include/linux/kcsan-checks.h533
-rw-r--r--include/linux/kcsan.h75
-rw-r--r--include/linux/kd.h7
-rw-r--r--include/linux/kdb.h55
-rw-r--r--include/linux/kdebug.h1
-rw-r--r--include/linux/kdev_t.h23
-rw-r--r--include/linux/kern_levels.h3
-rw-r--r--include/linux/kernel-page-flags.h6
-rw-r--r--include/linux/kernel.h733
-rw-r--r--include/linux/kernel_read_file.h56
-rw-r--r--include/linux/kernel_stat.h51
-rw-r--r--include/linux/kernelcapi.h76
-rw-r--r--include/linux/kernfs.h282
-rw-r--r--include/linux/kexec.h331
-rw-r--r--include/linux/kexec_handover.h143
-rw-r--r--include/linux/key-type.h55
-rw-r--r--include/linux/key.h230
-rw-r--r--include/linux/keyboard.h1
-rw-r--r--include/linux/keyctl.h42
-rw-r--r--include/linux/kfence.h253
-rw-r--r--include/linux/kfifo.h293
-rw-r--r--include/linux/kgdb.h92
-rw-r--r--include/linux/kho/abi/luo.h166
-rw-r--r--include/linux/kho/abi/memfd.h77
-rw-r--r--include/linux/khugepaged.h74
-rw-r--r--include/linux/klist.h4
-rw-r--r--include/linux/kmemcheck.h171
-rw-r--r--include/linux/kmemleak.h38
-rw-r--r--include/linux/kmod.h78
-rw-r--r--include/linux/kmsan-checks.h98
-rw-r--r--include/linux/kmsan.h411
-rw-r--r--include/linux/kmsan_string.h21
-rw-r--r--include/linux/kmsan_types.h37
-rw-r--r--include/linux/kmsg_dump.h81
-rw-r--r--include/linux/kobj_map.h1
-rw-r--r--include/linux/kobject.h97
-rw-r--r--include/linux/kobject_api.h1
-rw-r--r--include/linux/kobject_ns.h11
-rw-r--r--include/linux/kprobes.h377
-rw-r--r--include/linux/kref.h55
-rw-r--r--include/linux/kref_api.h1
-rw-r--r--include/linux/ks0108.h18
-rw-r--r--include/linux/ks8842.h14
-rw-r--r--include/linux/ks8851_mll.h14
-rw-r--r--include/linux/ksm.h122
-rw-r--r--include/linux/kstack_erase.h89
-rw-r--r--include/linux/kstrtox.h151
-rw-r--r--include/linux/kthread.h144
-rw-r--r--include/linux/ktime.h65
-rw-r--r--include/linux/ktime_api.h1
-rw-r--r--include/linux/kvm_dirty_ring.h94
-rw-r--r--include/linux/kvm_host.h1983
-rw-r--r--include/linux/kvm_irqfd.h18
-rw-r--r--include/linux/kvm_para.h6
-rw-r--r--include/linux/kvm_types.h113
-rw-r--r--include/linux/l2tp.h1
-rw-r--r--include/linux/lantiq.h23
-rw-r--r--include/linux/lapb.h6
-rw-r--r--include/linux/latencytop.h8
-rw-r--r--include/linux/lcd.h67
-rw-r--r--include/linux/lcm.h1
-rw-r--r--include/linux/leafops.h619
-rw-r--r--include/linux/led-class-flash.h74
-rw-r--r--include/linux/led-class-multicolor.h80
-rw-r--r--include/linux/led-lm3530.h3
-rw-r--r--include/linux/leds-bd2802.h7
-rw-r--r--include/linux/leds-expresswire.h38
-rw-r--r--include/linux/leds-lp3944.h6
-rw-r--r--include/linux/leds-lp3952.h6
-rw-r--r--include/linux/leds-pca9532.h6
-rw-r--r--include/linux/leds-regulator.h6
-rw-r--r--include/linux/leds-tca6507.h34
-rw-r--r--include/linux/leds-ti-lmu-common.h47
-rw-r--r--include/linux/leds.h526
-rw-r--r--include/linux/leds_pwm.h21
-rw-r--r--include/linux/libata.h912
-rw-r--r--include/linux/libfdt.h2
-rw-r--r--include/linux/libfdt_env.h11
-rw-r--r--include/linux/libgcc.h41
-rw-r--r--include/linux/libnvdimm.h251
-rw-r--r--include/linux/libps2.h85
-rw-r--r--include/linux/license.h1
-rw-r--r--include/linux/lightnvm.h510
-rw-r--r--include/linux/limits.h29
-rw-r--r--include/linux/linear_range.h61
-rw-r--r--include/linux/linkage.h288
-rw-r--r--include/linux/linkmode.h85
-rw-r--r--include/linux/linux_logo.h7
-rw-r--r--include/linux/lis3lv02d.h1
-rw-r--r--include/linux/list.h532
-rw-r--r--include/linux/list_bl.h27
-rw-r--r--include/linux/list_lru.h187
-rw-r--r--include/linux/list_nulls.h32
-rw-r--r--include/linux/list_sort.h9
-rw-r--r--include/linux/litex.h83
-rw-r--r--include/linux/livepatch.h123
-rw-r--r--include/linux/livepatch_external.h76
-rw-r--r--include/linux/livepatch_helpers.h77
-rw-r--r--include/linux/livepatch_sched.h25
-rw-r--r--include/linux/liveupdate.h138
-rw-r--r--include/linux/llist.h114
-rw-r--r--include/linux/llist_api.h1
-rw-r--r--include/linux/local_lock.h107
-rw-r--r--include/linux/local_lock_internal.h295
-rw-r--r--include/linux/lockd/bind.h9
-rw-r--r--include/linux/lockd/debug.h5
-rw-r--r--include/linux/lockd/lockd.h95
-rw-r--r--include/linux/lockd/nlm.h1
-rw-r--r--include/linux/lockd/share.h1
-rw-r--r--include/linux/lockd/xdr.h41
-rw-r--r--include/linux/lockd/xdr4.h34
-rw-r--r--include/linux/lockdep.h734
-rw-r--r--include/linux/lockdep_api.h1
-rw-r--r--include/linux/lockdep_types.h275
-rw-r--r--include/linux/lockref.h30
-rw-r--r--include/linux/log2.h136
-rw-r--r--include/linux/logic_iomem.h62
-rw-r--r--include/linux/logic_pio.h121
-rw-r--r--include/linux/lp.h1
-rw-r--r--include/linux/lru_cache.h29
-rw-r--r--include/linux/lsm/apparmor.h17
-rw-r--r--include/linux/lsm/bpf.h16
-rw-r--r--include/linux/lsm/selinux.h16
-rw-r--r--include/linux/lsm/smack.h17
-rw-r--r--include/linux/lsm_audit.h42
-rw-r--r--include/linux/lsm_count.h135
-rw-r--r--include/linux/lsm_hook_defs.h468
-rw-r--r--include/linux/lsm_hooks.h2086
-rw-r--r--include/linux/lwq.h124
-rw-r--r--include/linux/lz4.h24
-rw-r--r--include/linux/lzo.h15
-rw-r--r--include/linux/mISDNdsp.h1
-rw-r--r--include/linux/mISDNhw.h11
-rw-r--r--include/linux/mISDNif.h3
-rw-r--r--include/linux/mailbox/arm_mhuv2_message.h20
-rw-r--r--include/linux/mailbox/brcm-message.h5
-rw-r--r--include/linux/mailbox/exynos-message.h19
-rw-r--r--include/linux/mailbox/mchp-ipc.h33
-rw-r--r--include/linux/mailbox/mtk-cmdq-mailbox.h92
-rw-r--r--include/linux/mailbox/riscv-rpmi-message.h243
-rw-r--r--include/linux/mailbox/zynqmp-ipi-message.h20
-rw-r--r--include/linux/mailbox_client.h9
-rw-r--r--include/linux/mailbox_controller.h22
-rw-r--r--include/linux/maple.h4
-rw-r--r--include/linux/maple_tree.h903
-rw-r--r--include/linux/marvell_phy.h23
-rw-r--r--include/linux/math.h227
-rw-r--r--include/linux/math64.h228
-rw-r--r--include/linux/max17040_battery.h19
-rw-r--r--include/linux/mbcache.h43
-rw-r--r--include/linux/mbus.h4
-rw-r--r--include/linux/mc146818rtc.h9
-rw-r--r--include/linux/mc33xs2410.h16
-rw-r--r--include/linux/mc6821.h1
-rw-r--r--include/linux/mcb.h18
-rw-r--r--include/linux/mdev.h163
-rw-r--r--include/linux/mdio-bitbang.h11
-rw-r--r--include/linux/mdio-gpio.h9
-rw-r--r--include/linux/mdio-mux.h9
-rw-r--r--include/linux/mdio.h490
-rw-r--r--include/linux/mdio/mdio-i2c.h24
-rw-r--r--include/linux/mdio/mdio-mscc-miim.h19
-rw-r--r--include/linux/mdio/mdio-regmap.h26
-rw-r--r--include/linux/mdio/mdio-xgene.h134
-rw-r--r--include/linux/mei_aux.h31
-rw-r--r--include/linux/mei_cl_bus.h42
-rw-r--r--include/linux/mem_encrypt.h51
-rw-r--r--include/linux/memblock.h465
-rw-r--r--include/linux/memcontrol.h1863
-rw-r--r--include/linux/memfd.h36
-rw-r--r--include/linux/memory-failure.h17
-rw-r--r--include/linux/memory-tiers.h155
-rw-r--r--include/linux/memory.h199
-rw-r--r--include/linux/memory/ti-aemif.h32
-rw-r--r--include/linux/memory_hotplug.h367
-rw-r--r--include/linux/mempolicy.h119
-rw-r--r--include/linux/mempool.h95
-rw-r--r--include/linux/memregion.h71
-rw-r--r--include/linux/memremap.h301
-rw-r--r--include/linux/memstick.h11
-rw-r--r--include/linux/mfd/88pm80x.h7
-rw-r--r--include/linux/mfd/88pm860x.h11
-rw-r--r--include/linux/mfd/88pm886.h136
-rw-r--r--include/linux/mfd/aat2870.h19
-rw-r--r--include/linux/mfd/ab3100.h129
-rw-r--r--include/linux/mfd/abx500.h279
-rw-r--r--include/linux/mfd/abx500/ab8500-bm.h478
-rw-r--r--include/linux/mfd/abx500/ab8500-codec.h5
-rw-r--r--include/linux/mfd/abx500/ab8500-gpadc.h75
-rw-r--r--include/linux/mfd/abx500/ab8500-sysctrl.h2
-rw-r--r--include/linux/mfd/abx500/ab8500.h15
-rw-r--r--include/linux/mfd/abx500/ux500_chargalg.h55
-rw-r--r--include/linux/mfd/ac100.h5
-rw-r--r--include/linux/mfd/adp5520.h3
-rw-r--r--include/linux/mfd/adp5585.h226
-rw-r--r--include/linux/mfd/altera-a10sr.h13
-rw-r--r--include/linux/mfd/altera-sysmgr.h29
-rw-r--r--include/linux/mfd/arizona/core.h5
-rw-r--r--include/linux/mfd/arizona/pdata.h17
-rw-r--r--include/linux/mfd/arizona/registers.h12
-rw-r--r--include/linux/mfd/as3711.h9
-rw-r--r--include/linux/mfd/as3722.h19
-rw-r--r--include/linux/mfd/asic3.h316
-rw-r--r--include/linux/mfd/atc260x/atc2603c.h281
-rw-r--r--include/linux/mfd/atc260x/atc2609a.h308
-rw-r--r--include/linux/mfd/atc260x/core.h58
-rw-r--r--include/linux/mfd/atmel-hlcdc.h23
-rw-r--r--include/linux/mfd/axp20x.h397
-rw-r--r--include/linux/mfd/bcm2835-pm.h15
-rw-r--r--include/linux/mfd/bcm590xx.h35
-rw-r--r--include/linux/mfd/bd9571mwv.h109
-rw-r--r--include/linux/mfd/bq257xx.h104
-rw-r--r--include/linux/mfd/cgbc.h44
-rw-r--r--include/linux/mfd/core.h103
-rw-r--r--include/linux/mfd/cros_ec.h343
-rw-r--r--include/linux/mfd/cros_ec_commands.h3021
-rw-r--r--include/linux/mfd/cros_ec_lpc_mec.h90
-rw-r--r--include/linux/mfd/cros_ec_lpc_reg.h61
-rw-r--r--include/linux/mfd/cs40l50.h137
-rw-r--r--include/linux/mfd/cs42l43-regs.h1184
-rw-r--r--include/linux/mfd/cs42l43.h103
-rw-r--r--include/linux/mfd/da8xx-cfgchip.h11
-rw-r--r--include/linux/mfd/da903x.h1
-rw-r--r--include/linux/mfd/da9052/da9052.h18
-rw-r--r--include/linux/mfd/da9052/pdata.h16
-rw-r--r--include/linux/mfd/da9052/reg.h16
-rw-r--r--include/linux/mfd/da9055/core.h16
-rw-r--r--include/linux/mfd/da9055/pdata.h21
-rw-r--r--include/linux/mfd/da9055/reg.h16
-rw-r--r--include/linux/mfd/da9062/core.h11
-rw-r--r--include/linux/mfd/da9062/registers.h14
-rw-r--r--include/linux/mfd/da9063/core.h25
-rw-r--r--include/linux/mfd/da9063/pdata.h112
-rw-r--r--include/linux/mfd/da9063/registers.h54
-rw-r--r--include/linux/mfd/da9150/core.h6
-rw-r--r--include/linux/mfd/da9150/registers.h6
-rw-r--r--include/linux/mfd/davinci_voicecodec.h23
-rw-r--r--include/linux/mfd/db8500-prcmu.h26
-rw-r--r--include/linux/mfd/dbx500-prcmu.h90
-rw-r--r--include/linux/mfd/dln2.h1
-rw-r--r--include/linux/mfd/ds1wm.h28
-rw-r--r--include/linux/mfd/ezx-pcap.h2
-rw-r--r--include/linux/mfd/gsc.h76
-rw-r--r--include/linux/mfd/hi6421-pmic.h12
-rw-r--r--include/linux/mfd/hi655x-pmic.h12
-rw-r--r--include/linux/mfd/htc-pasic3.h54
-rw-r--r--include/linux/mfd/idt82p33_reg.h115
-rw-r--r--include/linux/mfd/idt8a340_reg.h768
-rw-r--r--include/linux/mfd/idtRC38xxx_reg.h273
-rw-r--r--include/linux/mfd/imx25-tsadc.h1
-rw-r--r--include/linux/mfd/ingenic-tcu.h56
-rw-r--r--include/linux/mfd/intel-m10-bmc.h309
-rw-r--r--include/linux/mfd/intel_msic.h456
-rw-r--r--include/linux/mfd/intel_pmc_bxt.h53
-rw-r--r--include/linux/mfd/intel_soc_pmic.h40
-rw-r--r--include/linux/mfd/intel_soc_pmic_bxtwc.h10
-rw-r--r--include/linux/mfd/intel_soc_pmic_mrfld.h81
-rw-r--r--include/linux/mfd/ipaq-micro.h5
-rw-r--r--include/linux/mfd/iqs62x.h143
-rw-r--r--include/linux/mfd/janz.h6
-rw-r--r--include/linux/mfd/kempld.h5
-rw-r--r--include/linux/mfd/khadas-mcu.h91
-rw-r--r--include/linux/mfd/lm3533.h11
-rw-r--r--include/linux/mfd/lochnagar.h55
-rw-r--r--include/linux/mfd/lochnagar1_regs.h157
-rw-r--r--include/linux/mfd/lochnagar2_regs.h291
-rw-r--r--include/linux/mfd/loongson-se.h53
-rw-r--r--include/linux/mfd/lp3943.h7
-rw-r--r--include/linux/mfd/lp873x.h12
-rw-r--r--include/linux/mfd/lp87565.h51
-rw-r--r--include/linux/mfd/lp8788-isink.h6
-rw-r--r--include/linux/mfd/lp8788.h67
-rw-r--r--include/linux/mfd/lpc_ich.h23
-rw-r--r--include/linux/mfd/macsmc.h280
-rw-r--r--include/linux/mfd/madera/core.h210
-rw-r--r--include/linux/mfd/madera/pdata.h59
-rw-r--r--include/linux/mfd/madera/registers.h3449
-rw-r--r--include/linux/mfd/max14577-private.h13
-rw-r--r--include/linux/mfd/max14577.h13
-rw-r--r--include/linux/mfd/max5970.h84
-rw-r--r--include/linux/mfd/max7360.h109
-rw-r--r--include/linux/mfd/max77541.h91
-rw-r--r--include/linux/mfd/max77620.h11
-rw-r--r--include/linux/mfd/max77650.h59
-rw-r--r--include/linux/mfd/max77686-private.h49
-rw-r--r--include/linux/mfd/max77686.h17
-rw-r--r--include/linux/mfd/max77693-common.h10
-rw-r--r--include/linux/mfd/max77693-private.h37
-rw-r--r--include/linux/mfd/max77693.h17
-rw-r--r--include/linux/mfd/max77705-private.h195
-rw-r--r--include/linux/mfd/max77714.h60
-rw-r--r--include/linux/mfd/max77759.h165
-rw-r--r--include/linux/mfd/max77843-private.h13
-rw-r--r--include/linux/mfd/max8907.h5
-rw-r--r--include/linux/mfd/max8925.h5
-rw-r--r--include/linux/mfd/max8997-private.h18
-rw-r--r--include/linux/mfd/max8997.h30
-rw-r--r--include/linux/mfd/max8998-private.h17
-rw-r--r--include/linux/mfd/max8998.h24
-rw-r--r--include/linux/mfd/mc13783.h5
-rw-r--r--include/linux/mfd/mc13892.h5
-rw-r--r--include/linux/mfd/mc13xxx.h14
-rw-r--r--include/linux/mfd/mcp.h5
-rw-r--r--include/linux/mfd/menelaus.h1
-rw-r--r--include/linux/mfd/motorola-cpcap.h5
-rw-r--r--include/linux/mfd/mp2629.h26
-rw-r--r--include/linux/mfd/mt6323/core.h5
-rw-r--r--include/linux/mfd/mt6323/registers.h5
-rw-r--r--include/linux/mfd/mt6328/core.h53
-rw-r--r--include/linux/mfd/mt6328/registers.h822
-rw-r--r--include/linux/mfd/mt6331/core.h40
-rw-r--r--include/linux/mfd/mt6331/registers.h584
-rw-r--r--include/linux/mfd/mt6332/core.h65
-rw-r--r--include/linux/mfd/mt6332/registers.h642
-rw-r--r--include/linux/mfd/mt6357/core.h119
-rw-r--r--include/linux/mfd/mt6357/registers.h1574
-rw-r--r--include/linux/mfd/mt6358/core.h156
-rw-r--r--include/linux/mfd/mt6358/registers.h314
-rw-r--r--include/linux/mfd/mt6359/core.h133
-rw-r--r--include/linux/mfd/mt6359/registers.h531
-rw-r--r--include/linux/mfd/mt6359p/registers.h249
-rw-r--r--include/linux/mfd/mt6397/core.h42
-rw-r--r--include/linux/mfd/mt6397/registers.h10
-rw-r--r--include/linux/mfd/mt6397/rtc.h81
-rw-r--r--include/linux/mfd/mxs-lradc.h11
-rw-r--r--include/linux/mfd/nct6694.h102
-rw-r--r--include/linux/mfd/ntxec.h38
-rw-r--r--include/linux/mfd/ocelot.h62
-rw-r--r--include/linux/mfd/palmas.h26
-rw-r--r--include/linux/mfd/pcf50633/adc.h73
-rw-r--r--include/linux/mfd/pcf50633/backlight.h51
-rw-r--r--include/linux/mfd/pcf50633/core.h238
-rw-r--r--include/linux/mfd/pcf50633/gpio.h52
-rw-r--r--include/linux/mfd/pcf50633/mbc.h134
-rw-r--r--include/linux/mfd/pcf50633/pmic.h67
-rw-r--r--include/linux/mfd/pf1550.h273
-rw-r--r--include/linux/mfd/qcom_rpm.h1
-rw-r--r--include/linux/mfd/qnap-mcu.h28
-rw-r--r--include/linux/mfd/rave-sp.h62
-rw-r--r--include/linux/mfd/rc5t583.h14
-rw-r--r--include/linux/mfd/rdc321x.h1
-rw-r--r--include/linux/mfd/rk808.h1044
-rw-r--r--include/linux/mfd/rn5t618.h44
-rw-r--r--include/linux/mfd/rohm-bd71815.h562
-rw-r--r--include/linux/mfd/rohm-bd71828.h490
-rw-r--r--include/linux/mfd/rohm-bd718x7.h313
-rw-r--r--include/linux/mfd/rohm-bd957x.h140
-rw-r--r--include/linux/mfd/rohm-bd96801.h217
-rw-r--r--include/linux/mfd/rohm-bd96802.h74
-rw-r--r--include/linux/mfd/rohm-generic.h90
-rw-r--r--include/linux/mfd/rohm-shared.h21
-rw-r--r--include/linux/mfd/rsmu.h39
-rw-r--r--include/linux/mfd/rt5033-private.h114
-rw-r--r--include/linux/mfd/rt5033.h30
-rw-r--r--include/linux/mfd/rz-mtu3.h191
-rw-r--r--include/linux/mfd/samsung/core.h60
-rw-r--r--include/linux/mfd/samsung/irq.h213
-rw-r--r--include/linux/mfd/samsung/rtc.h61
-rw-r--r--include/linux/mfd/samsung/s2mpa01.h7
-rw-r--r--include/linux/mfd/samsung/s2mpg10.h454
-rw-r--r--include/linux/mfd/samsung/s2mps11.h18
-rw-r--r--include/linux/mfd/samsung/s2mps13.h14
-rw-r--r--include/linux/mfd/samsung/s2mps14.h14
-rw-r--r--include/linux/mfd/samsung/s2mps15.h11
-rw-r--r--include/linux/mfd/samsung/s2mpu02.h14
-rw-r--r--include/linux/mfd/samsung/s2mpu05.h183
-rw-r--r--include/linux/mfd/samsung/s5m8763.h96
-rw-r--r--include/linux/mfd/samsung/s5m8767.h10
-rw-r--r--include/linux/mfd/sc27xx-pmic.h7
-rw-r--r--include/linux/mfd/si476x-core.h13
-rw-r--r--include/linux/mfd/si476x-platform.h13
-rw-r--r--include/linux/mfd/si476x-reports.h11
-rw-r--r--include/linux/mfd/sky81452.h15
-rw-r--r--include/linux/mfd/smsc.h109
-rw-r--r--include/linux/mfd/sta2x11-mfd.h518
-rw-r--r--include/linux/mfd/stm32-lptimer.h99
-rw-r--r--include/linux/mfd/stm32-timers.h221
-rw-r--r--include/linux/mfd/stmfx.h122
-rw-r--r--include/linux/mfd/stmpe.h23
-rw-r--r--include/linux/mfd/stpmic1.h212
-rw-r--r--include/linux/mfd/stw481x.h3
-rw-r--r--include/linux/mfd/sun4i-gpadc.h9
-rw-r--r--include/linux/mfd/sy7636a.h34
-rw-r--r--include/linux/mfd/syscon.h54
-rw-r--r--include/linux/mfd/syscon/atmel-matrix.h7
-rw-r--r--include/linux/mfd/syscon/atmel-mc.h6
-rw-r--r--include/linux/mfd/syscon/atmel-smc.h43
-rw-r--r--include/linux/mfd/syscon/atmel-st.h6
-rw-r--r--include/linux/mfd/syscon/clps711x.h6
-rw-r--r--include/linux/mfd/syscon/exynos4-pmu.h21
-rw-r--r--include/linux/mfd/syscon/exynos5-pmu.h19
-rw-r--r--include/linux/mfd/syscon/imx6q-iomuxc-gpr.h26
-rw-r--r--include/linux/mfd/syscon/imx7-iomuxc-gpr.h5
-rw-r--r--include/linux/mfd/syscon/xlnx-vcu.h39
-rw-r--r--include/linux/mfd/t7l66xb.h34
-rw-r--r--include/linux/mfd/tc3589x.h9
-rw-r--r--include/linux/mfd/tc6387xb.h20
-rw-r--r--include/linux/mfd/tc6393xb.h59
-rw-r--r--include/linux/mfd/ti-lmu-register.h106
-rw-r--r--include/linux/mfd/ti-lmu.h14
-rw-r--r--include/linux/mfd/ti_am335x_tscadc.h116
-rw-r--r--include/linux/mfd/tmio.h160
-rw-r--r--include/linux/mfd/tps6105x.h3
-rw-r--r--include/linux/mfd/tps65010.h (renamed from include/linux/i2c/tps65010.h)13
-rw-r--r--include/linux/mfd/tps65086.h35
-rw-r--r--include/linux/mfd/tps65090.h24
-rw-r--r--include/linux/mfd/tps65217.h18
-rw-r--r--include/linux/mfd/tps65218.h44
-rw-r--r--include/linux/mfd/tps65219.h449
-rw-r--r--include/linux/mfd/tps6586x.h2
-rw-r--r--include/linux/mfd/tps65910.h49
-rw-r--r--include/linux/mfd/tps65912.h13
-rw-r--r--include/linux/mfd/tps6594.h1346
-rw-r--r--include/linux/mfd/tps68470.h97
-rw-r--r--include/linux/mfd/tps80031.h637
-rw-r--r--include/linux/mfd/twl.h (renamed from include/linux/i2c/twl.h)114
-rw-r--r--include/linux/mfd/twl4030-audio.h16
-rw-r--r--include/linux/mfd/twl6040.h48
-rw-r--r--include/linux/mfd/ucb1x00.h6
-rw-r--r--include/linux/mfd/upboard-fpga.h55
-rw-r--r--include/linux/mfd/viperboard.h7
-rw-r--r--include/linux/mfd/wcd934x/registers.h588
-rw-r--r--include/linux/mfd/wcd934x/wcd934x.h31
-rw-r--r--include/linux/mfd/wl1273-core.h290
-rw-r--r--include/linux/mfd/wm831x/auxadc.h7
-rw-r--r--include/linux/mfd/wm831x/core.h8
-rw-r--r--include/linux/mfd/wm831x/gpio.h7
-rw-r--r--include/linux/mfd/wm831x/irq.h7
-rw-r--r--include/linux/mfd/wm831x/otp.h7
-rw-r--r--include/linux/mfd/wm831x/pdata.h9
-rw-r--r--include/linux/mfd/wm831x/pmu.h7
-rw-r--r--include/linux/mfd/wm831x/regulator.h9
-rw-r--r--include/linux/mfd/wm831x/status.h7
-rw-r--r--include/linux/mfd/wm831x/watchdog.h7
-rw-r--r--include/linux/mfd/wm8350/audio.h10
-rw-r--r--include/linux/mfd/wm8350/comparator.h6
-rw-r--r--include/linux/mfd/wm8350/core.h18
-rw-r--r--include/linux/mfd/wm8350/gpio.h7
-rw-r--r--include/linux/mfd/wm8350/pmic.h7
-rw-r--r--include/linux/mfd/wm8350/rtc.h6
-rw-r--r--include/linux/mfd/wm8350/supply.h7
-rw-r--r--include/linux/mfd/wm8350/wdt.h6
-rw-r--r--include/linux/mfd/wm8400-audio.h15
-rw-r--r--include/linux/mfd/wm8400-private.h23
-rw-r--r--include/linux/mfd/wm8400.h15
-rw-r--r--include/linux/mfd/wm8994/core.h7
-rw-r--r--include/linux/mfd/wm8994/gpio.h7
-rw-r--r--include/linux/mfd/wm8994/pdata.h18
-rw-r--r--include/linux/mfd/wm8994/registers.h7
-rw-r--r--include/linux/mfd/wm97xx.h21
-rw-r--r--include/linux/mhi.h812
-rw-r--r--include/linux/mhi_ep.h305
-rw-r--r--include/linux/mic_bus.h111
-rw-r--r--include/linux/micrel_phy.h40
-rw-r--r--include/linux/microchipphy.h25
-rw-r--r--include/linux/migrate.h261
-rw-r--r--include/linux/migrate_mode.h15
-rw-r--r--include/linux/mii.h259
-rw-r--r--include/linux/mii_timestamper.h128
-rw-r--r--include/linux/min_heap.h477
-rw-r--r--include/linux/minmax.h319
-rw-r--r--include/linux/misc/keba.h72
-rw-r--r--include/linux/misc_cgroup.h138
-rw-r--r--include/linux/miscdevice.h44
-rw-r--r--include/linux/mlx4/cq.h8
-rw-r--r--include/linux/mlx4/device.h92
-rw-r--r--include/linux/mlx4/driver.h64
-rw-r--r--include/linux/mlx4/qp.h4
-rw-r--r--include/linux/mlx5/cmd.h51
-rw-r--r--include/linux/mlx5/cq.h60
-rw-r--r--include/linux/mlx5/device.h607
-rw-r--r--include/linux/mlx5/doorbell.h39
-rw-r--r--include/linux/mlx5/driver.h1235
-rw-r--r--include/linux/mlx5/eq.h63
-rw-r--r--include/linux/mlx5/eswitch.h223
-rw-r--r--include/linux/mlx5/fs.h253
-rw-r--r--include/linux/mlx5/fs_helpers.h94
-rw-r--r--include/linux/mlx5/macsec.h32
-rw-r--r--include/linux/mlx5/mlx5_ifc.h5914
-rw-r--r--include/linux/mlx5/mlx5_ifc_fpga.h71
-rw-r--r--include/linux/mlx5/mlx5_ifc_vdpa.h226
-rw-r--r--include/linux/mlx5/mpfs.h18
-rw-r--r--include/linux/mlx5/port.h106
-rw-r--r--include/linux/mlx5/qp.h188
-rw-r--r--include/linux/mlx5/rsc_dump.h51
-rw-r--r--include/linux/mlx5/srq.h71
-rw-r--r--include/linux/mlx5/transobj.h55
-rw-r--r--include/linux/mlx5/vport.h52
-rw-r--r--include/linux/mm-arch-hooks.h25
-rw-r--r--include/linux/mm.h4311
-rw-r--r--include/linux/mm_api.h1
-rw-r--r--include/linux/mm_inline.h671
-rw-r--r--include/linux/mm_types.h2030
-rw-r--r--include/linux/mm_types_task.h57
-rw-r--r--include/linux/mman.h153
-rw-r--r--include/linux/mmap_lock.h435
-rw-r--r--include/linux/mmc/card.h105
-rw-r--r--include/linux/mmc/core.h73
-rw-r--r--include/linux/mmc/host.h368
-rw-r--r--include/linux/mmc/mmc.h31
-rw-r--r--include/linux/mmc/pm.h5
-rw-r--r--include/linux/mmc/sd.h20
-rw-r--r--include/linux/mmc/sd_uhs2.h240
-rw-r--r--include/linux/mmc/sdhci-pci-data.h20
-rw-r--r--include/linux/mmc/sdio.h13
-rw-r--r--include/linux/mmc/sdio_func.h33
-rw-r--r--include/linux/mmc/sdio_ids.h117
-rw-r--r--include/linux/mmc/slot-gpio.h25
-rw-r--r--include/linux/mmdebug.h99
-rw-r--r--include/linux/mmiotrace.h1
-rw-r--r--include/linux/mmu_context.h40
-rw-r--r--include/linux/mmu_notifier.h525
-rw-r--r--include/linux/mmzone.h1777
-rw-r--r--include/linux/mnt_idmapping.h253
-rw-r--r--include/linux/mnt_namespace.h11
-rw-r--r--include/linux/mod_devicetable.h322
-rw-r--r--include/linux/module.h649
-rw-r--r--include/linux/module_signature.h46
-rw-r--r--include/linux/module_symbol.h15
-rw-r--r--include/linux/moduleloader.h54
-rw-r--r--include/linux/moduleparam.h184
-rw-r--r--include/linux/most.h337
-rw-r--r--include/linux/mount.h126
-rw-r--r--include/linux/moxtet.h102
-rw-r--r--include/linux/mpage.h9
-rw-r--r--include/linux/mpi.h97
-rw-r--r--include/linux/mpls.h1
-rw-r--r--include/linux/mpls_iptunnel.h1
-rw-r--r--include/linux/mroute.h110
-rw-r--r--include/linux/mroute6.h121
-rw-r--r--include/linux/mroute_base.h482
-rw-r--r--include/linux/msdos_fs.h1
-rw-r--r--include/linux/msdos_partition.h50
-rw-r--r--include/linux/msg.h26
-rw-r--r--include/linux/msi.h755
-rw-r--r--include/linux/msi_api.h72
-rw-r--r--include/linux/mtd/bbm.h34
-rw-r--r--include/linux/mtd/blktrans.h34
-rw-r--r--include/linux/mtd/cfi.h63
-rw-r--r--include/linux/mtd/cfi_endian.h16
-rw-r--r--include/linux/mtd/concat.h16
-rw-r--r--include/linux/mtd/doc2000.h16
-rw-r--r--include/linux/mtd/flashchip.h20
-rw-r--r--include/linux/mtd/gen_probe.h16
-rw-r--r--include/linux/mtd/hyperbus.h95
-rw-r--r--include/linux/mtd/inftl.h1
-rw-r--r--include/linux/mtd/jedec.h94
-rw-r--r--include/linux/mtd/latch-addr-flash.h29
-rw-r--r--include/linux/mtd/lpc32xx_mlc.h7
-rw-r--r--include/linux/mtd/lpc32xx_slc.h7
-rw-r--r--include/linux/mtd/map.h160
-rw-r--r--include/linux/mtd/mtd.h256
-rw-r--r--include/linux/mtd/mtdram.h1
-rw-r--r--include/linux/mtd/nand-ecc-mtk.h47
-rw-r--r--include/linux/mtd/nand-ecc-mxic.h49
-rw-r--r--include/linux/mtd/nand-ecc-sw-bch.h71
-rw-r--r--include/linux/mtd/nand-ecc-sw-hamming.h89
-rw-r--r--include/linux/mtd/nand-gpio.h8
-rw-r--r--include/linux/mtd/nand-qpic-common.h483
-rw-r--r--include/linux/mtd/nand.h2109
-rw-r--r--include/linux/mtd/nand_bch.h68
-rw-r--r--include/linux/mtd/nand_ecc.h42
-rw-r--r--include/linux/mtd/ndfc.h8
-rw-r--r--include/linux/mtd/nftl.h16
-rw-r--r--include/linux/mtd/onenand.h8
-rw-r--r--include/linux/mtd/onenand_regs.h6
-rw-r--r--include/linux/mtd/onfi.h190
-rw-r--r--include/linux/mtd/partitions.h4
-rw-r--r--include/linux/mtd/pfow.h36
-rw-r--r--include/linux/mtd/physmap.h7
-rw-r--r--include/linux/mtd/pismo.h5
-rw-r--r--include/linux/mtd/plat-ram.h6
-rw-r--r--include/linux/mtd/platnand.h74
-rw-r--r--include/linux/mtd/qinfo.h3
-rw-r--r--include/linux/mtd/rawnand.h1640
-rw-r--r--include/linux/mtd/sh_flctl.h18
-rw-r--r--include/linux/mtd/sharpsl.h14
-rw-r--r--include/linux/mtd/spear_smi.h19
-rw-r--r--include/linux/mtd/spi-nor.h374
-rw-r--r--include/linux/mtd/spinand.h771
-rw-r--r--include/linux/mtd/super.h12
-rw-r--r--include/linux/mtd/ubi.h19
-rw-r--r--include/linux/mtd/xip.h15
-rw-r--r--include/linux/mtio.h60
-rw-r--r--include/linux/mutex.h247
-rw-r--r--include/linux/mutex_api.h1
-rw-r--r--include/linux/mutex_types.h71
-rw-r--r--include/linux/mux/consumer.h46
-rw-r--r--include/linux/mux/driver.h13
-rw-r--r--include/linux/mv643xx.h979
-rw-r--r--include/linux/mv643xx_eth.h3
-rw-r--r--include/linux/mv643xx_i2c.h5
-rw-r--r--include/linux/mxm-wmi.h15
-rw-r--r--include/linux/n_r3964.h175
-rw-r--r--include/linux/namei.h229
-rw-r--r--include/linux/nd.h107
-rw-r--r--include/linux/ndctl.h22
-rw-r--r--include/linux/net.h163
-rw-r--r--include/linux/net/intel/i40e_client.h191
-rw-r--r--include/linux/net/intel/iidc_rdma.h68
-rw-r--r--include/linux/net/intel/iidc_rdma_ice.h70
-rw-r--r--include/linux/net/intel/iidc_rdma_idpf.h55
-rw-r--r--include/linux/net/intel/libie/adminq.h399
-rw-r--r--include/linux/net/intel/libie/fwlog.h97
-rw-r--r--include/linux/net/intel/libie/pctype.h41
-rw-r--r--include/linux/net/intel/libie/rx.h50
-rw-r--r--include/linux/net_tstamp.h96
-rw-r--r--include/linux/netdev_features.h124
-rw-r--r--include/linux/netdevice.h3880
-rw-r--r--include/linux/netdevice_xmit.h26
-rw-r--r--include/linux/netfilter.h344
-rw-r--r--include/linux/netfilter/ipset/ip_set.h174
-rw-r--r--include/linux/netfilter/ipset/ip_set_bitmap.h15
-rw-r--r--include/linux/netfilter/ipset/ip_set_comment.h76
-rw-r--r--include/linux/netfilter/ipset/ip_set_counter.h75
-rw-r--r--include/linux/netfilter/ipset/ip_set_getport.h10
-rw-r--r--include/linux/netfilter/ipset/ip_set_hash.h1
-rw-r--r--include/linux/netfilter/ipset/ip_set_list.h1
-rw-r--r--include/linux/netfilter/ipset/ip_set_skbinfo.h46
-rw-r--r--include/linux/netfilter/ipset/ip_set_timeout.h73
-rw-r--r--include/linux/netfilter/ipset/pfxlen.h1
-rw-r--r--include/linux/netfilter/nf_conntrack_amanda.h5
-rw-r--r--include/linux/netfilter/nf_conntrack_common.h27
-rw-r--r--include/linux/netfilter/nf_conntrack_dccp.h40
-rw-r--r--include/linux/netfilter/nf_conntrack_ftp.h9
-rw-r--r--include/linux/netfilter/nf_conntrack_h323.h125
-rw-r--r--include/linux/netfilter/nf_conntrack_h323_asn1.h9
-rw-r--r--include/linux/netfilter/nf_conntrack_h323_types.h8
-rw-r--r--include/linux/netfilter/nf_conntrack_irc.h6
-rw-r--r--include/linux/netfilter/nf_conntrack_pptp.h55
-rw-r--r--include/linux/netfilter/nf_conntrack_proto_gre.h8
-rw-r--r--include/linux/netfilter/nf_conntrack_sane.h5
-rw-r--r--include/linux/netfilter/nf_conntrack_sctp.h4
-rw-r--r--include/linux/netfilter/nf_conntrack_sip.h9
-rw-r--r--include/linux/netfilter/nf_conntrack_snmp.h4
-rw-r--r--include/linux/netfilter/nf_conntrack_tcp.h1
-rw-r--r--include/linux/netfilter/nf_conntrack_tftp.h6
-rw-r--r--include/linux/netfilter/nf_conntrack_zones_common.h1
-rw-r--r--include/linux/netfilter/nfnetlink.h90
-rw-r--r--include/linux/netfilter/nfnetlink_acct.h4
-rw-r--r--include/linux/netfilter/nfnetlink_osf.h38
-rw-r--r--include/linux/netfilter/x_tables.h67
-rw-r--r--include/linux/netfilter/xt_hashlimit.h9
-rw-r--r--include/linux/netfilter/xt_physdev.h7
-rw-r--r--include/linux/netfilter_arp/arp_tables.h16
-rw-r--r--include/linux/netfilter_bridge.h52
-rw-r--r--include/linux/netfilter_bridge/ebt_802_3.h11
-rw-r--r--include/linux/netfilter_bridge/ebtables.h30
-rw-r--r--include/linux/netfilter_defs.h7
-rw-r--r--include/linux/netfilter_ingress.h57
-rw-r--r--include/linux/netfilter_ipv4.h31
-rw-r--r--include/linux/netfilter_ipv4/ip_tables.h23
-rw-r--r--include/linux/netfilter_ipv6.h181
-rw-r--r--include/linux/netfilter_ipv6/ip6_tables.h34
-rw-r--r--include/linux/netfilter_netdev.h151
-rw-r--r--include/linux/netfs.h555
-rw-r--r--include/linux/netlink.h214
-rw-r--r--include/linux/netpoll.h51
-rw-r--r--include/linux/nfs.h31
-rw-r--r--include/linux/nfs3.h1
-rw-r--r--include/linux/nfs4.h407
-rw-r--r--include/linux/nfs_common.h18
-rw-r--r--include/linux/nfs_fs.h371
-rw-r--r--include/linux/nfs_fs_i.h1
-rw-r--r--include/linux/nfs_fs_sb.h128
-rw-r--r--include/linux/nfs_iostat.h13
-rw-r--r--include/linux/nfs_page.h127
-rw-r--r--include/linux/nfs_ssc.h81
-rw-r--r--include/linux/nfs_xdr.h477
-rw-r--r--include/linux/nfsacl.h7
-rw-r--r--include/linux/nfslocalio.h123
-rw-r--r--include/linux/nitro_enclaves.h11
-rw-r--r--include/linux/nl802154.h13
-rw-r--r--include/linux/nls.h3
-rw-r--r--include/linux/nmi.h205
-rw-r--r--include/linux/node.h212
-rw-r--r--include/linux/nodemask.h186
-rw-r--r--include/linux/nodemask_types.h19
-rw-r--r--include/linux/nospec.h74
-rw-r--r--include/linux/notifier.h60
-rw-r--r--include/linux/ns/ns_common_types.h196
-rw-r--r--include/linux/ns/nstree_types.h55
-rw-r--r--include/linux/ns_common.h150
-rw-r--r--include/linux/nsc_gpio.h1
-rw-r--r--include/linux/nsfs.h43
-rw-r--r--include/linux/nsproxy.h49
-rw-r--r--include/linux/nstree.h96
-rw-r--r--include/linux/ntb.h274
-rw-r--r--include/linux/nubus.h189
-rw-r--r--include/linux/numa.h70
-rw-r--r--include/linux/numa_memblks.h62
-rw-r--r--include/linux/nvme-auth.h51
-rw-r--r--include/linux/nvme-fc-driver.h472
-rw-r--r--include/linux/nvme-fc.h197
-rw-r--r--include/linux/nvme-keyring.h42
-rw-r--r--include/linux/nvme-rdma.h24
-rw-r--r--include/linux/nvme-tcp.h199
-rw-r--r--include/linux/nvme.h1377
-rw-r--r--include/linux/nvmem-consumer.h158
-rw-r--r--include/linux/nvmem-provider.h195
-rw-r--r--include/linux/nvram.h134
-rw-r--r--include/linux/oa_tc6.h24
-rw-r--r--include/linux/objagg.h62
-rw-r--r--include/linux/objpool.h277
-rw-r--r--include/linux/objtool.h130
-rw-r--r--include/linux/objtool_types.h72
-rw-r--r--include/linux/of.h1000
-rw-r--r--include/linux/of_address.h132
-rw-r--r--include/linux/of_clk.h33
-rw-r--r--include/linux/of_device.h81
-rw-r--r--include/linux/of_dma.h5
-rw-r--r--include/linux/of_fdt.h57
-rw-r--r--include/linux/of_gpio.h141
-rw-r--r--include/linux/of_graph.h64
-rw-r--r--include/linux/of_iommu.h36
-rw-r--r--include/linux/of_irq.h45
-rw-r--r--include/linux/of_mdio.h83
-rw-r--r--include/linux/of_net.h30
-rw-r--r--include/linux/of_pci.h65
-rw-r--r--include/linux/of_pdt.h8
-rw-r--r--include/linux/of_platform.h58
-rw-r--r--include/linux/of_reserved_mem.h64
-rw-r--r--include/linux/oid_registry.h80
-rw-r--r--include/linux/olpc-ec.h40
-rw-r--r--include/linux/omap-dma.h56
-rw-r--r--include/linux/omap-dmaengine.h21
-rw-r--r--include/linux/omap-gpmc.h57
-rw-r--r--include/linux/omap-iommu.h25
-rw-r--r--include/linux/omap-mailbox.h20
-rw-r--r--include/linux/omapfb.h15
-rw-r--r--include/linux/once.h45
-rw-r--r--include/linux/once_lite.h36
-rw-r--r--include/linux/oom.h26
-rw-r--r--include/linux/openvswitch.h20
-rw-r--r--include/linux/oprofile.h209
-rw-r--r--include/linux/osq_lock.h6
-rw-r--r--include/linux/overflow.h555
-rw-r--r--include/linux/oxu210hp.h7
-rw-r--r--include/linux/packing.h458
-rw-r--r--include/linux/padata.h162
-rw-r--r--include/linux/page-flags-layout.h68
-rw-r--r--include/linux/page-flags.h1129
-rw-r--r--include/linux/page-isolation.h76
-rw-r--r--include/linux/page_counter.h86
-rw-r--r--include/linux/page_ext.h173
-rw-r--r--include/linux/page_frag_cache.h61
-rw-r--r--include/linux/page_idle.h116
-rw-r--r--include/linux/page_owner.h47
-rw-r--r--include/linux/page_ref.h171
-rw-r--r--include/linux/page_reporting.h29
-rw-r--r--include/linux/page_table_check.h155
-rw-r--r--include/linux/pageblock-flags.h111
-rw-r--r--include/linux/pagemap.h1558
-rw-r--r--include/linux/pagevec.h113
-rw-r--r--include/linux/pagewalk.h207
-rw-r--r--include/linux/panic.h105
-rw-r--r--include/linux/panic_notifier.h12
-rw-r--r--include/linux/papr_scm.h49
-rw-r--r--include/linux/parport.h68
-rw-r--r--include/linux/parport_pc.h4
-rw-r--r--include/linux/parser.h7
-rw-r--r--include/linux/part_stat.h84
-rw-r--r--include/linux/patchkey.h1
-rw-r--r--include/linux/path.h10
-rw-r--r--include/linux/pch_dma.h14
-rw-r--r--include/linux/pci-acpi.h37
-rw-r--r--include/linux/pci-aspm.h65
-rw-r--r--include/linux/pci-ats.h87
-rw-r--r--include/linux/pci-bwctrl.h28
-rw-r--r--include/linux/pci-dma-compat.h147
-rw-r--r--include/linux/pci-dma.h11
-rw-r--r--include/linux/pci-doe.h29
-rw-r--r--include/linux/pci-ecam.h73
-rw-r--r--include/linux/pci-ep-cfs.h11
-rw-r--r--include/linux/pci-ep-msi.h28
-rw-r--r--include/linux/pci-epc.h276
-rw-r--r--include/linux/pci-epf.h153
-rw-r--r--include/linux/pci-ide.h119
-rw-r--r--include/linux/pci-p2pdma.h213
-rw-r--r--include/linux/pci-pwrctrl.h54
-rw-r--r--include/linux/pci-tph.h46
-rw-r--r--include/linux/pci-tsm.h243
-rw-r--r--include/linux/pci.h1806
-rw-r--r--include/linux/pci_hotplug.h143
-rw-r--r--include/linux/pci_ids.h392
-rw-r--r--include/linux/pcie-dwc.h38
-rw-r--r--include/linux/pcieport_if.h70
-rw-r--r--include/linux/pcs-lynx.h17
-rw-r--r--include/linux/pcs-rzn1-miic.h18
-rw-r--r--include/linux/pcs/pcs-mtk-lynxi.h13
-rw-r--r--include/linux/pcs/pcs-xpcs.h63
-rw-r--r--include/linux/pda_power.h42
-rw-r--r--include/linux/pds/pds_adminq.h1545
-rw-r--r--include/linux/pds/pds_auxbus.h20
-rw-r--r--include/linux/pds/pds_common.h56
-rw-r--r--include/linux/pds/pds_core_if.h572
-rw-r--r--include/linux/pds/pds_intr.h163
-rw-r--r--include/linux/pe.h314
-rw-r--r--include/linux/peci-cpu.h64
-rw-r--r--include/linux/peci.h109
-rw-r--r--include/linux/percpu-defs.h124
-rw-r--r--include/linux/percpu-refcount.h161
-rw-r--r--include/linux/percpu-rwsem.h144
-rw-r--r--include/linux/percpu.h85
-rw-r--r--include/linux/percpu_counter.h125
-rw-r--r--include/linux/percpu_ida.h82
-rw-r--r--include/linux/perf/arm_pmu.h118
-rw-r--r--include/linux/perf/arm_pmuv3.h318
-rw-r--r--include/linux/perf/riscv_pmu.h97
-rw-r--r--include/linux/perf_event.h1288
-rw-r--r--include/linux/perf_event_api.h1
-rw-r--r--include/linux/perf_regs.h15
-rw-r--r--include/linux/personality.h1
-rw-r--r--include/linux/pfn.h10
-rw-r--r--include/linux/pfn_t.h122
-rw-r--r--include/linux/pgalloc.h29
-rw-r--r--include/linux/pgalloc_tag.h214
-rw-r--r--include/linux/pgtable.h2197
-rw-r--r--include/linux/pgtable_api.h1
-rw-r--r--include/linux/phonet.h15
-rw-r--r--include/linux/phy.h2275
-rw-r--r--include/linux/phy/omap_control_phy.h12
-rw-r--r--include/linux/phy/omap_usb.h81
-rw-r--r--include/linux/phy/pcie.h12
-rw-r--r--include/linux/phy/phy-dp.h98
-rw-r--r--include/linux/phy/phy-hdmi.h21
-rw-r--r--include/linux/phy/phy-lvds.h32
-rw-r--r--include/linux/phy/phy-mipi-dphy.h287
-rw-r--r--include/linux/phy/phy-qcom-ufs.h41
-rw-r--r--include/linux/phy/phy-sun4i-usb.h12
-rw-r--r--include/linux/phy/phy.h272
-rw-r--r--include/linux/phy/tegra/xusb.h27
-rw-r--r--include/linux/phy/ulpi_phy.h1
-rw-r--r--include/linux/phy_fixed.h52
-rw-r--r--include/linux/phy_led_triggers.h15
-rw-r--r--include/linux/phy_link_topology.h82
-rw-r--r--include/linux/phylib_stubs.h110
-rw-r--r--include/linux/phylink.h839
-rw-r--r--include/linux/pid.h198
-rw-r--r--include/linux/pid_namespace.h101
-rw-r--r--include/linux/pid_types.h16
-rw-r--r--include/linux/pidfs.h19
-rw-r--r--include/linux/pim.h1
-rw-r--r--include/linux/pinctrl/consumer.h107
-rw-r--r--include/linux/pinctrl/devinfo.h22
-rw-r--r--include/linux/pinctrl/machine.h33
-rw-r--r--include/linux/pinctrl/pinconf-generic.h115
-rw-r--r--include/linux/pinctrl/pinconf.h32
-rw-r--r--include/linux/pinctrl/pinctrl-state.h6
-rw-r--r--include/linux/pinctrl/pinctrl.h129
-rw-r--r--include/linux/pinctrl/pinmux.h46
-rw-r--r--include/linux/pipe_fs_i.h237
-rw-r--r--include/linux/pkeys.h14
-rw-r--r--include/linux/pktcdvd.h205
-rw-r--r--include/linux/pl320-ipc.h12
-rw-r--r--include/linux/platform_data/ad5449.h40
-rw-r--r--include/linux/platform_data/ad5755.h103
-rw-r--r--include/linux/platform_data/ad5761.h5
-rw-r--r--include/linux/platform_data/ad7266.h6
-rw-r--r--include/linux/platform_data/ad7291.h12
-rw-r--r--include/linux/platform_data/ad7298.h20
-rw-r--r--include/linux/platform_data/ad7303.h21
-rw-r--r--include/linux/platform_data/ad7791.h1
-rw-r--r--include/linux/platform_data/ad7793.h5
-rw-r--r--include/linux/platform_data/ad7879.h41
-rw-r--r--include/linux/platform_data/ad7887.h7
-rw-r--r--include/linux/platform_data/adau17x1.h3
-rw-r--r--include/linux/platform_data/adau1977.h45
-rw-r--r--include/linux/platform_data/adp5588.h172
-rw-r--r--include/linux/platform_data/adp8860.h3
-rw-r--r--include/linux/platform_data/adp8870.h3
-rw-r--r--include/linux/platform_data/ads1015.h36
-rw-r--r--include/linux/platform_data/ads7828.h7
-rw-r--r--include/linux/platform_data/amd_qdma.h38
-rw-r--r--include/linux/platform_data/amd_xdma.h34
-rw-r--r--include/linux/platform_data/ams-delta-fiq.h58
-rw-r--r--include/linux/platform_data/apds990x.h16
-rw-r--r--include/linux/platform_data/arm-ux500-pm.h3
-rw-r--r--include/linux/platform_data/asoc-imx-ssi.h1
-rw-r--r--include/linux/platform_data/asoc-kirkwood.h1
-rw-r--r--include/linux/platform_data/asoc-mx27vis.h11
-rw-r--r--include/linux/platform_data/asoc-palm27x.h8
-rw-r--r--include/linux/platform_data/asoc-pxa.h32
-rw-r--r--include/linux/platform_data/asoc-s3c.h7
-rw-r--r--include/linux/platform_data/asoc-s3c24xx_simtec.h33
-rw-r--r--include/linux/platform_data/asoc-ti-mcbsp.h28
-rw-r--r--include/linux/platform_data/asoc-ux500-msp.h20
-rw-r--r--include/linux/platform_data/at24.h58
-rw-r--r--include/linux/platform_data/at91_adc.h50
-rw-r--r--include/linux/platform_data/ata-pxa.h15
-rw-r--r--include/linux/platform_data/ata-samsung_cf.h34
-rw-r--r--include/linux/platform_data/atmel.h15
-rw-r--r--include/linux/platform_data/atmel_mxt_ts.h31
-rw-r--r--include/linux/platform_data/b53.h6
-rw-r--r--include/linux/platform_data/bcm7038_wdt.h8
-rw-r--r--include/linux/platform_data/bcmgenet.h18
-rw-r--r--include/linux/platform_data/bd6107.h8
-rw-r--r--include/linux/platform_data/bfin_rotary.h117
-rw-r--r--include/linux/platform_data/bh1770glc.h16
-rw-r--r--include/linux/platform_data/brcmfmac.h6
-rw-r--r--include/linux/platform_data/brcmnand.h12
-rw-r--r--include/linux/platform_data/bt-nokia-h4p.h38
-rw-r--r--include/linux/platform_data/clk-da8xx-cfgchip.h21
-rw-r--r--include/linux/platform_data/clk-fch.h18
-rw-r--r--include/linux/platform_data/clk-integrator.h2
-rw-r--r--include/linux/platform_data/clk-u300.h1
-rw-r--r--include/linux/platform_data/cpuidle-exynos.h5
-rw-r--r--include/linux/platform_data/cros_ec_chardev.h38
-rw-r--r--include/linux/platform_data/cros_ec_commands.h6638
-rw-r--r--include/linux/platform_data/cros_ec_proto.h298
-rw-r--r--include/linux/platform_data/cros_ec_sensorhub.h194
-rw-r--r--include/linux/platform_data/cros_usbpd_notify.h17
-rw-r--r--include/linux/platform_data/crypto-atmel.h22
-rw-r--r--include/linux/platform_data/crypto-ux500.h2
-rw-r--r--include/linux/platform_data/cyttsp4.h76
-rw-r--r--include/linux/platform_data/davinci-cpufreq.h25
-rw-r--r--include/linux/platform_data/davinci_asp.h29
-rw-r--r--include/linux/platform_data/db8500_thermal.h38
-rw-r--r--include/linux/platform_data/dma-atmel.h65
-rw-r--r--include/linux/platform_data/dma-coh901318.h72
-rw-r--r--include/linux/platform_data/dma-dw.h49
-rw-r--r--include/linux/platform_data/dma-ep93xx.h93
-rw-r--r--include/linux/platform_data/dma-hsu.h7
-rw-r--r--include/linux/platform_data/dma-imx-sdma.h67
-rw-r--r--include/linux/platform_data/dma-iop32x.h110
-rw-r--r--include/linux/platform_data/dma-mcf-edma.h38
-rw-r--r--include/linux/platform_data/dma-mmp_tdma.h40
-rw-r--r--include/linux/platform_data/dma-mv_xor.h1
-rw-r--r--include/linux/platform_data/dma-s3c24xx.h52
-rw-r--r--include/linux/platform_data/dma-ste-dma40.h209
-rw-r--r--include/linux/platform_data/dmtimer-omap.h55
-rw-r--r--include/linux/platform_data/ds620.h3
-rw-r--r--include/linux/platform_data/dsa.h68
-rw-r--r--include/linux/platform_data/dwc3-omap.h43
-rw-r--r--include/linux/platform_data/edma.h6
-rw-r--r--include/linux/platform_data/efm32-spi.h14
-rw-r--r--include/linux/platform_data/efm32-uart.h18
-rw-r--r--include/linux/platform_data/ehci-sh.h28
-rw-r--r--include/linux/platform_data/elm.h16
-rw-r--r--include/linux/platform_data/emc2305.h28
-rw-r--r--include/linux/platform_data/emif_plat.h5
-rw-r--r--include/linux/platform_data/eth-netx.h25
-rw-r--r--include/linux/platform_data/fsa9480.h27
-rw-r--r--include/linux/platform_data/g762.h15
-rw-r--r--include/linux/platform_data/gpio-ath79.h19
-rw-r--r--include/linux/platform_data/gpio-davinci.h60
-rw-r--r--include/linux/platform_data/gpio-dwapb.h31
-rw-r--r--include/linux/platform_data/gpio-htc-egpio.h6
-rw-r--r--include/linux/platform_data/gpio-omap.h45
-rw-r--r--include/linux/platform_data/gpio-ts5500.h27
-rw-r--r--include/linux/platform_data/gpio/gpio-amd-fch.h46
-rw-r--r--include/linux/platform_data/gpio_backlight.h11
-rw-r--r--include/linux/platform_data/gpmc-omap.h15
-rw-r--r--include/linux/platform_data/gsc_hwmon.h45
-rw-r--r--include/linux/platform_data/hirschmann-hellcreek.h24
-rw-r--r--include/linux/platform_data/hsmmc-omap.h12
-rw-r--r--include/linux/platform_data/huawei-gaokun-ec.h79
-rw-r--r--include/linux/platform_data/hwmon-s3c.h15
-rw-r--r--include/linux/platform_data/i2c-cbus-gpio.h27
-rw-r--r--include/linux/platform_data/i2c-davinci.h27
-rw-r--r--include/linux/platform_data/i2c-designware.h21
-rw-r--r--include/linux/platform_data/i2c-gpio.h (renamed from include/linux/i2c-gpio.h)18
-rw-r--r--include/linux/platform_data/i2c-hid.h42
-rw-r--r--include/linux/platform_data/i2c-imx.h3
-rw-r--r--include/linux/platform_data/i2c-mux-gpio.h (renamed from include/linux/i2c-mux-gpio.h)16
-rw-r--r--include/linux/platform_data/i2c-mux-reg.h8
-rw-r--r--include/linux/platform_data/i2c-nuc900.h9
-rw-r--r--include/linux/platform_data/i2c-ocores.h (renamed from include/linux/i2c-ocores.h)8
-rw-r--r--include/linux/platform_data/i2c-omap.h (renamed from include/linux/i2c-omap.h)1
-rw-r--r--include/linux/platform_data/i2c-pca-platform.h (renamed from include/linux/i2c-pca-platform.h)4
-rw-r--r--include/linux/platform_data/i2c-pxa.h18
-rw-r--r--include/linux/platform_data/i2c-s3c2410.h5
-rw-r--r--include/linux/platform_data/i2c-xiic.h (renamed from include/linux/i2c-xiic.h)14
-rw-r--r--include/linux/platform_data/ina2xx.h9
-rw-r--r--include/linux/platform_data/invensense_mpu6050.h12
-rw-r--r--include/linux/platform_data/iommu-omap.h9
-rw-r--r--include/linux/platform_data/irda-pxaficp.h25
-rw-r--r--include/linux/platform_data/irda-sa11x0.h20
-rw-r--r--include/linux/platform_data/isl9305.h6
-rw-r--r--include/linux/platform_data/itco_wdt.h12
-rw-r--r--include/linux/platform_data/keyboard-pxa930_rotary.h20
-rw-r--r--include/linux/platform_data/keyboard-spear.h164
-rw-r--r--include/linux/platform_data/keypad-ep93xx.h31
-rw-r--r--include/linux/platform_data/keypad-nomadik-ske.h50
-rw-r--r--include/linux/platform_data/keypad-omap.h8
-rw-r--r--include/linux/platform_data/keypad-pxa27x.h72
-rw-r--r--include/linux/platform_data/keypad-w90p910.h15
-rw-r--r--include/linux/platform_data/keyscan-davinci.h42
-rw-r--r--include/linux/platform_data/lcd-mipid.h3
-rw-r--r--include/linux/platform_data/leds-kirkwood-netxbig.h54
-rw-r--r--include/linux/platform_data/leds-kirkwood-ns2.h38
-rw-r--r--include/linux/platform_data/leds-lm355x.h3
-rw-r--r--include/linux/platform_data/leds-lm3642.h3
-rw-r--r--include/linux/platform_data/leds-lp55xx.h21
-rw-r--r--include/linux/platform_data/leds-omap.h22
-rw-r--r--include/linux/platform_data/leds-pca963x.h48
-rw-r--r--include/linux/platform_data/leds-s3c24xx.h27
-rw-r--r--include/linux/platform_data/lenovo-yoga-c630.h44
-rw-r--r--include/linux/platform_data/lm3630a_bl.h10
-rw-r--r--include/linux/platform_data/lm3639_bl.h6
-rw-r--r--include/linux/platform_data/lm8323.h14
-rw-r--r--include/linux/platform_data/lp855x.h10
-rw-r--r--include/linux/platform_data/lp8727.h5
-rw-r--r--include/linux/platform_data/lp8755.h6
-rw-r--r--include/linux/platform_data/ltc4245.h6
-rw-r--r--include/linux/platform_data/lv5207lp.h7
-rw-r--r--include/linux/platform_data/macb.h32
-rw-r--r--include/linux/platform_data/max197.h7
-rw-r--r--include/linux/platform_data/max3421-hcd.h1
-rw-r--r--include/linux/platform_data/max6639.h14
-rw-r--r--include/linux/platform_data/max6697.h36
-rw-r--r--include/linux/platform_data/max732x.h13
-rw-r--r--include/linux/platform_data/mcs.h35
-rw-r--r--include/linux/platform_data/mdio-bcm-unimac.h16
-rw-r--r--include/linux/platform_data/mdio-gpio.h29
-rw-r--r--include/linux/platform_data/media/camera-mx2.h44
-rw-r--r--include/linux/platform_data/media/camera-mx3.h52
-rw-r--r--include/linux/platform_data/media/camera-pxa.h14
-rw-r--r--include/linux/platform_data/media/coda.h18
-rw-r--r--include/linux/platform_data/media/gpio-ir-recv.h23
-rw-r--r--include/linux/platform_data/media/ir-rx51.h8
-rw-r--r--include/linux/platform_data/media/mmp-camera.h22
-rw-r--r--include/linux/platform_data/media/omap1_camera.h35
-rw-r--r--include/linux/platform_data/media/omap4iss.h65
-rw-r--r--include/linux/platform_data/media/s5p_hdmi.h36
-rw-r--r--include/linux/platform_data/media/si4713.h4
-rw-r--r--include/linux/platform_data/media/sii9234.h24
-rw-r--r--include/linux/platform_data/media/soc_camera_platform.h83
-rw-r--r--include/linux/platform_data/media/timb_radio.h14
-rw-r--r--include/linux/platform_data/media/timb_video.h14
-rw-r--r--include/linux/platform_data/mfd-mcp-sa11x0.h5
-rw-r--r--include/linux/platform_data/microchip-ksz.h30
-rw-r--r--include/linux/platform_data/mlxcpld-hotplug.h99
-rw-r--r--include/linux/platform_data/mlxcpld.h31
-rw-r--r--include/linux/platform_data/mlxreg.h239
-rw-r--r--include/linux/platform_data/mmc-davinci.h1
-rw-r--r--include/linux/platform_data/mmc-esdhc-imx.h51
-rw-r--r--include/linux/platform_data/mmc-esdhc-mcf.h17
-rw-r--r--include/linux/platform_data/mmc-mxcmmc.h1
-rw-r--r--include/linux/platform_data/mmc-omap.h13
-rw-r--r--include/linux/platform_data/mmc-pxamci.h9
-rw-r--r--include/linux/platform_data/mmc-s3cmci.h52
-rw-r--r--include/linux/platform_data/mmc-sdhci-s3c.h1
-rw-r--r--include/linux/platform_data/mmp_audio.h22
-rw-r--r--include/linux/platform_data/mmp_dma.h10
-rw-r--r--include/linux/platform_data/mms114.h24
-rw-r--r--include/linux/platform_data/mouse-pxa930_trkball.h10
-rw-r--r--include/linux/platform_data/mtd-davinci-aemif.h37
-rw-r--r--include/linux/platform_data/mtd-davinci.h90
-rw-r--r--include/linux/platform_data/mtd-mxc_nand.h32
-rw-r--r--include/linux/platform_data/mtd-nand-omap2.h30
-rw-r--r--include/linux/platform_data/mtd-nand-pxa3xx.h44
-rw-r--r--include/linux/platform_data/mtd-nand-s3c2410.h73
-rw-r--r--include/linux/platform_data/mtd-onenand-omap2.h34
-rw-r--r--include/linux/platform_data/mtd-orion_nand.h1
-rw-r--r--include/linux/platform_data/mv88e6xxx.h19
-rw-r--r--include/linux/platform_data/mv_usb.h15
-rw-r--r--include/linux/platform_data/net-cw1200.h6
-rw-r--r--include/linux/platform_data/nfcmrvl.h48
-rw-r--r--include/linux/platform_data/ntc_thermistor.h62
-rw-r--r--include/linux/platform_data/nxp-nci.h27
-rw-r--r--include/linux/platform_data/omap-twl4030.h20
-rw-r--r--include/linux/platform_data/omap-wd-timer.h6
-rw-r--r--include/linux/platform_data/omap1_bl.h2
-rw-r--r--include/linux/platform_data/omapdss.h6
-rw-r--r--include/linux/platform_data/pca953x.h14
-rw-r--r--include/linux/platform_data/pcf857x.h44
-rw-r--r--include/linux/platform_data/pcmcia-pxa2xx_viper.h11
-rw-r--r--include/linux/platform_data/phy-da8xx-usb.h21
-rw-r--r--include/linux/platform_data/pinctrl-adi2.h40
-rw-r--r--include/linux/platform_data/pinctrl-single.h7
-rw-r--r--include/linux/platform_data/pixcir_i2c_ts.h63
-rw-r--r--include/linux/platform_data/pm33xx.h75
-rw-r--r--include/linux/platform_data/pwm_omap_dmtimer.h90
-rw-r--r--include/linux/platform_data/pxa2xx_udc.h7
-rw-r--r--include/linux/platform_data/pxa_sdhci.h9
-rw-r--r--include/linux/platform_data/regulator-haptic.h5
-rw-r--r--include/linux/platform_data/remoteproc-omap.h59
-rw-r--r--include/linux/platform_data/rtc-ds2404.h20
-rw-r--r--include/linux/platform_data/rtc-v3020.h41
-rw-r--r--include/linux/platform_data/s3c-hsotg.h5
-rw-r--r--include/linux/platform_data/s3c-hsudc.h34
-rw-r--r--include/linux/platform_data/sa11x0-serial.h2
-rw-r--r--include/linux/platform_data/sc18is602.h7
-rw-r--r--include/linux/platform_data/sdhci-pic32.h10
-rw-r--r--include/linux/platform_data/serial-imx.h28
-rw-r--r--include/linux/platform_data/serial-omap.h6
-rw-r--r--include/linux/platform_data/serial-sccnxp.h6
-rw-r--r--include/linux/platform_data/sgi-w1.h13
-rw-r--r--include/linux/platform_data/sh_ipmmu.h18
-rw-r--r--include/linux/platform_data/sh_mmcif.h (renamed from include/linux/mmc/sh_mmcif.h)8
-rw-r--r--include/linux/platform_data/shmob_drm.h69
-rw-r--r--include/linux/platform_data/sht15.h38
-rw-r--r--include/linux/platform_data/sht3x.h25
-rw-r--r--include/linux/platform_data/shtc1.h11
-rw-r--r--include/linux/platform_data/si5351.h5
-rw-r--r--include/linux/platform_data/simplefb.h10
-rw-r--r--include/linux/platform_data/sky81452-backlight.h46
-rw-r--r--include/linux/platform_data/spi-clps711x.h21
-rw-r--r--include/linux/platform_data/spi-davinci.h90
-rw-r--r--include/linux/platform_data/spi-ep93xx.h18
-rw-r--r--include/linux/platform_data/spi-imx.h27
-rw-r--r--include/linux/platform_data/spi-mt65xx.h9
-rw-r--r--include/linux/platform_data/spi-nuc900.h33
-rw-r--r--include/linux/platform_data/spi-omap2-mcspi.h13
-rw-r--r--include/linux/platform_data/spi-s3c64xx.h22
-rw-r--r--include/linux/platform_data/ssm2518.h22
-rw-r--r--include/linux/platform_data/st1232_pdata.h13
-rw-r--r--include/linux/platform_data/st33zp24.h28
-rw-r--r--include/linux/platform_data/st_sensors_pdata.h10
-rw-r--r--include/linux/platform_data/syscon.h8
-rw-r--r--include/linux/platform_data/tda9950.h16
-rw-r--r--include/linux/platform_data/ti-aemif.h23
-rw-r--r--include/linux/platform_data/ti-prm.h21
-rw-r--r--include/linux/platform_data/ti-sysc.h171
-rw-r--r--include/linux/platform_data/tmio.h65
-rw-r--r--include/linux/platform_data/touchscreen-s3c2410.h25
-rw-r--r--include/linux/platform_data/tps68470.h40
-rw-r--r--include/linux/platform_data/tsc2007.h1
-rw-r--r--include/linux/platform_data/tsl2563.h8
-rw-r--r--include/linux/platform_data/tsl2772.h101
-rw-r--r--include/linux/platform_data/txx9/ndfmc.h28
-rw-r--r--include/linux/platform_data/uio_dmem_genirq.h10
-rw-r--r--include/linux/platform_data/uio_pruss.h26
-rw-r--r--include/linux/platform_data/usb-davinci.h36
-rw-r--r--include/linux/platform_data/usb-ehci-mxc.h13
-rw-r--r--include/linux/platform_data/usb-musb-ux500.h2
-rw-r--r--include/linux/platform_data/usb-mx2.h38
-rw-r--r--include/linux/platform_data/usb-ohci-pxa27x.h1
-rw-r--r--include/linux/platform_data/usb-ohci-s3c2410.h5
-rw-r--r--include/linux/platform_data/usb-omap.h18
-rw-r--r--include/linux/platform_data/usb-omap1.h4
-rw-r--r--include/linux/platform_data/usb-pxa3xx-ulpi.h35
-rw-r--r--include/linux/platform_data/usb-s3c2410_udc.h44
-rw-r--r--include/linux/platform_data/usb3503.h5
-rw-r--r--include/linux/platform_data/ux500_wdt.h19
-rw-r--r--include/linux/platform_data/video-clcd-versatile.h27
-rw-r--r--include/linux/platform_data/video-ep93xx.h1
-rw-r--r--include/linux/platform_data/video-imxfb.h69
-rw-r--r--include/linux/platform_data/video-mx3fb.h53
-rw-r--r--include/linux/platform_data/video-nuc900fb.h83
-rw-r--r--include/linux/platform_data/video-pxafb.h28
-rw-r--r--include/linux/platform_data/video_s3c.h1
-rw-r--r--include/linux/platform_data/voltage-omap.h6
-rw-r--r--include/linux/platform_data/wilco-ec.h225
-rw-r--r--include/linux/platform_data/wiznet.h3
-rw-r--r--include/linux/platform_data/wkup_m3.h10
-rw-r--r--include/linux/platform_data/x86/amd-fch.h13
-rw-r--r--include/linux/platform_data/x86/asus-wmi-leds-ids.h50
-rw-r--r--include/linux/platform_data/x86/asus-wmi.h203
-rw-r--r--include/linux/platform_data/x86/clk-lpss.h (renamed from include/linux/platform_data/clk-lpss.h)7
-rw-r--r--include/linux/platform_data/x86/clk-pmc-atom.h13
-rw-r--r--include/linux/platform_data/x86/int3472.h166
-rw-r--r--include/linux/platform_data/x86/intel-mid_wdt.h (renamed from include/linux/platform_data/intel-mid_wdt.h)11
-rw-r--r--include/linux/platform_data/x86/intel_pmc_ipc.h98
-rw-r--r--include/linux/platform_data/x86/intel_scu_ipc.h72
-rw-r--r--include/linux/platform_data/x86/nvidia-wmi-ec-backlight.h76
-rw-r--r--include/linux/platform_data/x86/p2sb.h28
-rw-r--r--include/linux/platform_data/x86/pmc_atom.h49
-rw-r--r--include/linux/platform_data/x86/pwm-lpss.h60
-rw-r--r--include/linux/platform_data/x86/simatic-ipc-base.h31
-rw-r--r--include/linux/platform_data/x86/simatic-ipc.h79
-rw-r--r--include/linux/platform_data/x86/soc.h70
-rw-r--r--include/linux/platform_data/x86/spi-intel.h (renamed from include/linux/platform_data/intel-spi.h)18
-rw-r--r--include/linux/platform_data/xilinx-ll-temac.h33
-rw-r--r--include/linux/platform_data/xtalk-bridge.h22
-rw-r--r--include/linux/platform_data/zforce_ts.h23
-rw-r--r--include/linux/platform_device.h156
-rw-r--r--include/linux/platform_profile.h61
-rw-r--r--include/linux/pldmfw.h173
-rw-r--r--include/linux/plist.h21
-rw-r--r--include/linux/plist_types.h17
-rw-r--r--include/linux/pm-trace.h1
-rw-r--r--include/linux/pm.h279
-rw-r--r--include/linux/pm2301_charger.h61
-rw-r--r--include/linux/pm_clock.h17
-rw-r--r--include/linux/pm_domain.h456
-rw-r--r--include/linux/pm_opp.h586
-rw-r--r--include/linux/pm_qos.h244
-rw-r--r--include/linux/pm_runtime.h596
-rw-r--r--include/linux/pm_wakeirq.h29
-rw-r--r--include/linux/pm_wakeup.h161
-rw-r--r--include/linux/pmbus.h79
-rw-r--r--include/linux/pmu.h7
-rw-r--r--include/linux/pnfs_osd_xdr.h317
-rw-r--r--include/linux/pnp.h50
-rw-r--r--include/linux/poison.h53
-rw-r--r--include/linux/poll.h80
-rw-r--r--include/linux/polynomial.h35
-rw-r--r--include/linux/posix-clock.h75
-rw-r--r--include/linux/posix-timers.h281
-rw-r--r--include/linux/posix-timers_types.h80
-rw-r--r--include/linux/posix_acl.h82
-rw-r--r--include/linux/posix_acl_xattr.h41
-rw-r--r--include/linux/power/ab8500.h16
-rw-r--r--include/linux/power/bq2415x_charger.h21
-rw-r--r--include/linux/power/bq24190_charger.h15
-rw-r--r--include/linux/power/bq24735-charger.h15
-rw-r--r--include/linux/power/bq25890_charger.h15
-rw-r--r--include/linux/power/bq27xxx_battery.h54
-rw-r--r--include/linux/power/charger-manager.h48
-rw-r--r--include/linux/power/generic-adc-battery.h29
-rw-r--r--include/linux/power/gpio-charger.h17
-rw-r--r--include/linux/power/isp1704_charger.h30
-rw-r--r--include/linux/power/jz4740-battery.h11
-rw-r--r--include/linux/power/max17042_battery.h73
-rw-r--r--include/linux/power/max77705_charger.h193
-rw-r--r--include/linux/power/max8903_charger.h57
-rw-r--r--include/linux/power/power_on_reason.h19
-rw-r--r--include/linux/power/sbs-battery.h15
-rw-r--r--include/linux/power/smartreflex.h28
-rw-r--r--include/linux/power/smb347-charger.h117
-rw-r--r--include/linux/power/twl4030_madc_battery.h11
-rw-r--r--include/linux/power_supply.h708
-rw-r--r--include/linux/powercap.h25
-rw-r--r--include/linux/ppp-comp.h7
-rw-r--r--include/linux/ppp_channel.h14
-rw-r--r--include/linux/ppp_defs.h19
-rw-r--r--include/linux/pps-gpio.h32
-rw-r--r--include/linux/pps_gen_kernel.h78
-rw-r--r--include/linux/pps_kernel.h35
-rw-r--r--include/linux/pr.h26
-rw-r--r--include/linux/prandom.h50
-rw-r--r--include/linux/preempt.h249
-rw-r--r--include/linux/prefetch.h16
-rw-r--r--include/linux/prime_numbers.h1
-rw-r--r--include/linux/printk.h492
-rw-r--r--include/linux/prmt.h16
-rw-r--r--include/linux/proc_fs.h188
-rw-r--r--include/linux/proc_ns.h45
-rw-r--r--include/linux/processor.h10
-rw-r--r--include/linux/profile.h56
-rw-r--r--include/linux/projid.h1
-rw-r--r--include/linux/property.h559
-rw-r--r--include/linux/pruss_driver.h177
-rw-r--r--include/linux/psci.h29
-rw-r--r--include/linux/pse-pd/pse.h421
-rw-r--r--include/linux/pseudo_fs.h18
-rw-r--r--include/linux/psi.h70
-rw-r--r--include/linux/psi_types.h216
-rw-r--r--include/linux/psp-platform-access.h72
-rw-r--r--include/linux/psp-sev.h1093
-rw-r--r--include/linux/psp-tee.h91
-rw-r--r--include/linux/psp.h29
-rw-r--r--include/linux/pstore.h64
-rw-r--r--include/linux/pstore_blk.h55
-rw-r--r--include/linux/pstore_ram.h70
-rw-r--r--include/linux/pstore_zone.h60
-rw-r--r--include/linux/ptdump.h40
-rw-r--r--include/linux/pti.h55
-rw-r--r--include/linux/ptp_classify.h194
-rw-r--r--include/linux/ptp_clock_kernel.h328
-rw-r--r--include/linux/ptp_kvm.h22
-rw-r--r--include/linux/ptp_mock.h38
-rw-r--r--include/linux/ptp_pch.h26
-rw-r--r--include/linux/ptr_ring.h175
-rw-r--r--include/linux/ptrace.h162
-rw-r--r--include/linux/ptrace_api.h1
-rw-r--r--include/linux/purgatory.h3
-rw-r--r--include/linux/pvclock_gtod.h1
-rw-r--r--include/linux/pwm.h472
-rw-r--r--include/linux/pwm_backlight.h7
-rw-r--r--include/linux/pwrseq/consumer.h56
-rw-r--r--include/linux/pwrseq/provider.h78
-rw-r--r--include/linux/pxa168_eth.h1
-rw-r--r--include/linux/pxa2xx_ssp.h235
-rw-r--r--include/linux/qat/qat_mig_dev.h31
-rw-r--r--include/linux/qcom_scm.h77
-rw-r--r--include/linux/qed/common_hsi.h1423
-rw-r--r--include/linux/qed/eth_common.h479
-rw-r--r--include/linux/qed/fcoe_common.h928
-rw-r--r--include/linux/qed/iscsi_common.h1635
-rw-r--r--include/linux/qed/iwarp_common.h47
-rw-r--r--include/linux/qed/nvmetcp_common.h531
-rw-r--r--include/linux/qed/qed_chain.h518
-rw-r--r--include/linux/qed/qed_eth_if.h101
-rw-r--r--include/linux/qed/qed_fcoe_if.h10
-rw-r--r--include/linux/qed/qed_if.h960
-rw-r--r--include/linux/qed/qed_iov_if.h30
-rw-r--r--include/linux/qed/qed_iscsi_if.h38
-rw-r--r--include/linux/qed/qed_ll2_if.h102
-rw-r--r--include/linux/qed/qed_nvmetcp_if.h257
-rw-r--r--include/linux/qed/qed_rdma_if.h90
-rw-r--r--include/linux/qed/qede_rdma.h45
-rw-r--r--include/linux/qed/rdma_common.h58
-rw-r--r--include/linux/qed/roce_common.h49
-rw-r--r--include/linux/qed/storage_common.h124
-rw-r--r--include/linux/qed/tcp_common.h195
-rw-r--r--include/linux/qnx6_fs.h1
-rw-r--r--include/linux/quicklist.h93
-rw-r--r--include/linux/quota.h65
-rw-r--r--include/linux/quotaops.h52
-rw-r--r--include/linux/radix-tree.h237
-rw-r--r--include/linux/raid/detect.h11
-rw-r--r--include/linux/raid/md_u.h20
-rw-r--r--include/linux/raid/pq.h60
-rw-r--r--include/linux/raid/xor.h22
-rw-r--r--include/linux/raid_class.h10
-rw-r--r--include/linux/ramfs.h11
-rw-r--r--include/linux/random.h246
-rw-r--r--include/linux/randomize_kstack.h98
-rw-r--r--include/linux/range.h33
-rw-r--r--include/linux/ras.h38
-rw-r--r--include/linux/raspberrypi/vchiq.h112
-rw-r--r--include/linux/raspberrypi/vchiq_arm.h164
-rw-r--r--include/linux/raspberrypi/vchiq_bus.h60
-rw-r--r--include/linux/raspberrypi/vchiq_cfg.h41
-rw-r--r--include/linux/raspberrypi/vchiq_core.h646
-rw-r--r--include/linux/raspberrypi/vchiq_debugfs.h22
-rw-r--r--include/linux/ratelimit.h74
-rw-r--r--include/linux/ratelimit_types.h48
-rw-r--r--include/linux/rational.h1
-rw-r--r--include/linux/rbtree.h398
-rw-r--r--include/linux/rbtree_augmented.h165
-rw-r--r--include/linux/rbtree_latch.h26
-rw-r--r--include/linux/rbtree_types.h34
-rw-r--r--include/linux/rcu_node_tree.h19
-rw-r--r--include/linux/rcu_notifier.h32
-rw-r--r--include/linux/rcu_segcblist.h152
-rw-r--r--include/linux/rcu_sync.h54
-rw-r--r--include/linux/rculist.h310
-rw-r--r--include/linux/rculist_bl.h29
-rw-r--r--include/linux/rculist_nulls.h108
-rw-r--r--include/linux/rcupdate.h835
-rw-r--r--include/linux/rcupdate_trace.h106
-rw-r--r--include/linux/rcupdate_wait.h66
-rw-r--r--include/linux/rcuref.h178
-rw-r--r--include/linux/rcutiny.h125
-rw-r--r--include/linux/rcutree.h120
-rw-r--r--include/linux/rcuwait.h85
-rw-r--r--include/linux/rcuwait_api.h1
-rw-r--r--include/linux/reboot-mode.h1
-rw-r--r--include/linux/reboot.h146
-rw-r--r--include/linux/reciprocal_div.h69
-rw-r--r--include/linux/ref_tracker.h142
-rw-r--r--include/linux/refcount.h469
-rw-r--r--include/linux/refcount_api.h1
-rw-r--r--include/linux/refcount_types.h19
-rw-r--r--include/linux/regmap.h1157
-rw-r--r--include/linux/regset.h239
-rw-r--r--include/linux/regulator/ab8500.h325
-rw-r--r--include/linux/regulator/act8865.h10
-rw-r--r--include/linux/regulator/arizona-ldo1.h8
-rw-r--r--include/linux/regulator/arizona-micsupp.h5
-rw-r--r--include/linux/regulator/consumer.h256
-rw-r--r--include/linux/regulator/coupler.h101
-rw-r--r--include/linux/regulator/da9121.h36
-rw-r--r--include/linux/regulator/da9211.h20
-rw-r--r--include/linux/regulator/db8500-prcmu.h9
-rw-r--r--include/linux/regulator/driver.h402
-rw-r--r--include/linux/regulator/fan53555.h6
-rw-r--r--include/linux/regulator/fixed.h20
-rw-r--r--include/linux/regulator/gpio-regulator.h26
-rw-r--r--include/linux/regulator/lp3971.h15
-rw-r--r--include/linux/regulator/lp3972.h15
-rw-r--r--include/linux/regulator/lp872x.h23
-rw-r--r--include/linux/regulator/machine.h101
-rw-r--r--include/linux/regulator/max1586.h15
-rw-r--r--include/linux/regulator/max8649.h5
-rw-r--r--include/linux/regulator/max8660.h14
-rw-r--r--include/linux/regulator/max8952.h21
-rw-r--r--include/linux/regulator/max8973-regulator.h22
-rw-r--r--include/linux/regulator/mt6311.h10
-rw-r--r--include/linux/regulator/mt6315-regulator.h44
-rw-r--r--include/linux/regulator/mt6323-regulator.h10
-rw-r--r--include/linux/regulator/mt6331-regulator.h46
-rw-r--r--include/linux/regulator/mt6332-regulator.h27
-rw-r--r--include/linux/regulator/mt6357-regulator.h51
-rw-r--r--include/linux/regulator/mt6358-regulator.h98
-rw-r--r--include/linux/regulator/mt6359-regulator.h59
-rw-r--r--include/linux/regulator/mt6363-regulator.h330
-rw-r--r--include/linux/regulator/mt6380-regulator.h10
-rw-r--r--include/linux/regulator/mt6397-regulator.h10
-rw-r--r--include/linux/regulator/of_regulator.h1
-rw-r--r--include/linux/regulator/pca9450.h275
-rw-r--r--include/linux/regulator/pfuze100.h33
-rw-r--r--include/linux/regulator/s2dos05.h73
-rw-r--r--include/linux/regulator/tps51632-regulator.h16
-rw-r--r--include/linux/regulator/tps62360.h22
-rw-r--r--include/linux/regulator/tps6507x.h14
-rw-r--r--include/linux/regulator/userspace-consumer.h2
-rw-r--r--include/linux/relay.h61
-rw-r--r--include/linux/remoteproc.h302
-rw-r--r--include/linux/remoteproc/mtk_scp.h69
-rw-r--r--include/linux/remoteproc/pruss.h83
-rw-r--r--include/linux/remoteproc/qcom_rproc.h48
-rw-r--r--include/linux/remoteproc/st_slim_rproc.h6
-rw-r--r--include/linux/resctrl.h700
-rw-r--r--include/linux/resctrl_types.h60
-rw-r--r--include/linux/reservation.h267
-rw-r--r--include/linux/reset-controller.h29
-rw-r--r--include/linux/reset.h804
-rw-r--r--include/linux/reset/bcm63xx_pmb.h10
-rw-r--r--include/linux/reset/reset-simple.h48
-rw-r--r--include/linux/reset/socfpga.h7
-rw-r--r--include/linux/reset/sunxi.h7
-rw-r--r--include/linux/resource.h3
-rw-r--r--include/linux/resource_ext.h22
-rw-r--r--include/linux/restart_block.h17
-rw-r--r--include/linux/resume_user_mode.h65
-rw-r--r--include/linux/rethook.h98
-rw-r--r--include/linux/rfkill.h57
-rw-r--r--include/linux/rhashtable-types.h142
-rw-r--r--include/linux/rhashtable.h752
-rw-r--r--include/linux/ring_buffer.h182
-rw-r--r--include/linux/rio.h14
-rw-r--r--include/linux/rio_drv.h14
-rw-r--r--include/linux/rio_ids.h19
-rw-r--r--include/linux/rio_regs.h6
-rw-r--r--include/linux/rmap.h897
-rw-r--r--include/linux/rmi.h20
-rw-r--r--include/linux/rndis.h1
-rw-r--r--include/linux/rodata_test.h6
-rw-r--r--include/linux/rolling_buffer.h61
-rw-r--r--include/linux/root_dev.h11
-rw-r--r--include/linux/rpmb.h167
-rw-r--r--include/linux/rpmsg.h144
-rw-r--r--include/linux/rpmsg/byteorder.h67
-rw-r--r--include/linux/rpmsg/mtk_rpmsg.h38
-rw-r--r--include/linux/rpmsg/ns.h45
-rw-r--r--include/linux/rpmsg/qcom_glink.h34
-rw-r--r--include/linux/rpmsg/qcom_smd.h6
-rw-r--r--include/linux/rseq.h166
-rw-r--r--include/linux/rseq_entry.h616
-rw-r--r--include/linux/rseq_types.h164
-rw-r--r--include/linux/rslib.h75
-rw-r--r--include/linux/rtc.h181
-rw-r--r--include/linux/rtc/ds1685.h22
-rw-r--r--include/linux/rtc/m48t59.h8
-rw-r--r--include/linux/rtc/rtc-omap.h7
-rw-r--r--include/linux/rtc/sirfsoc_rtciobrg.h22
-rw-r--r--include/linux/rtmutex.h132
-rw-r--r--include/linux/rtnetlink.h166
-rw-r--r--include/linux/rtsx_common.h (renamed from include/linux/mfd/rtsx_common.h)15
-rw-r--r--include/linux/rtsx_pci.h (renamed from include/linux/mfd/rtsx_pci.h)408
-rw-r--r--include/linux/rtsx_usb.h (renamed from include/linux/mfd/rtsx_usb.h)30
-rw-r--r--include/linux/rv.h131
-rw-r--r--include/linux/rw_hint.h25
-rw-r--r--include/linux/rwbase_rt.h44
-rw-r--r--include/linux/rwlock.h26
-rw-r--r--include/linux/rwlock_api_smp.h34
-rw-r--r--include/linux/rwlock_rt.h150
-rw-r--r--include/linux/rwlock_types.h52
-rw-r--r--include/linux/rwsem-spinlock.h46
-rw-r--r--include/linux/rwsem.h198
-rw-r--r--include/linux/rxrpc.h79
-rw-r--r--include/linux/s3c_adc_battery.h41
-rw-r--r--include/linux/sa11x0-dma.h24
-rw-r--r--include/linux/sbitmap.h389
-rw-r--r--include/linux/scatterlist.h425
-rw-r--r--include/linux/scc.h1
-rw-r--r--include/linux/sched.h1717
-rw-r--r--include/linux/sched/affinity.h1
-rw-r--r--include/linux/sched/autogroup.h1
-rw-r--r--include/linux/sched/clock.h26
-rw-r--r--include/linux/sched/cond_resched.h1
-rw-r--r--include/linux/sched/coredump.h65
-rw-r--r--include/linux/sched/cpufreq.h21
-rw-r--r--include/linux/sched/cputime.h30
-rw-r--r--include/linux/sched/deadline.h25
-rw-r--r--include/linux/sched/debug.h10
-rw-r--r--include/linux/sched/ext.h257
-rw-r--r--include/linux/sched/hotplug.h7
-rw-r--r--include/linux/sched/idle.h66
-rw-r--r--include/linux/sched/init.h1
-rw-r--r--include/linux/sched/isolation.h80
-rw-r--r--include/linux/sched/jobctl.h11
-rw-r--r--include/linux/sched/loadavg.h27
-rw-r--r--include/linux/sched/mm.h448
-rw-r--r--include/linux/sched/nohz.h21
-rw-r--r--include/linux/sched/numa_balancing.h21
-rw-r--r--include/linux/sched/posix-timers.h1
-rw-r--r--include/linux/sched/prio.h20
-rw-r--r--include/linux/sched/rseq_api.h1
-rw-r--r--include/linux/sched/rt.h55
-rw-r--r--include/linux/sched/sd_flags.h162
-rw-r--r--include/linux/sched/signal.h381
-rw-r--r--include/linux/sched/smt.h20
-rw-r--r--include/linux/sched/stat.h20
-rw-r--r--include/linux/sched/sysctl.h71
-rw-r--r--include/linux/sched/task.h162
-rw-r--r--include/linux/sched/task_flags.h1
-rw-r--r--include/linux/sched/task_stack.h42
-rw-r--r--include/linux/sched/thread_info_api.h1
-rw-r--r--include/linux/sched/topology.h181
-rw-r--r--include/linux/sched/types.h23
-rw-r--r--include/linux/sched/user.h37
-rw-r--r--include/linux/sched/vhost_task.h14
-rw-r--r--include/linux/sched/wake_q.h57
-rw-r--r--include/linux/sched/xacct.h1
-rw-r--r--include/linux/sched_clock.h40
-rw-r--r--include/linux/scif.h1339
-rw-r--r--include/linux/scmi_imx_protocol.h102
-rw-r--r--include/linux/scmi_protocol.h1114
-rw-r--r--include/linux/scpi_protocol.h27
-rw-r--r--include/linux/screen_info.h148
-rw-r--r--include/linux/scs.h86
-rw-r--r--include/linux/sctp.h324
-rw-r--r--include/linux/scx200.h1
-rw-r--r--include/linux/scx200_gpio.h1
-rw-r--r--include/linux/sdb.h159
-rw-r--r--include/linux/sdla.h244
-rw-r--r--include/linux/seccomp.h67
-rw-r--r--include/linux/seccomp_types.h35
-rw-r--r--include/linux/secretmem.h36
-rw-r--r--include/linux/securebits.h1
-rw-r--r--include/linux/security.h1149
-rw-r--r--include/linux/sed-opal-key.h26
-rw-r--r--include/linux/sed-opal.h26
-rw-r--r--include/linux/seg6.h1
-rw-r--r--include/linux/seg6_genl.h1
-rw-r--r--include/linux/seg6_hmac.h1
-rw-r--r--include/linux/seg6_iptunnel.h1
-rw-r--r--include/linux/seg6_local.h6
-rw-r--r--include/linux/selection.h42
-rw-r--r--include/linux/selinux.h35
-rw-r--r--include/linux/sem.h52
-rw-r--r--include/linux/sem_types.h13
-rw-r--r--include/linux/semaphore.h30
-rw-r--r--include/linux/seq_buf.h84
-rw-r--r--include/linux/seq_file.h127
-rw-r--r--include/linux/seq_file_net.h24
-rw-r--r--include/linux/seqlock.h1311
-rw-r--r--include/linux/seqlock_api.h1
-rw-r--r--include/linux/seqlock_types.h93
-rw-r--r--include/linux/seqno-fence.h117
-rw-r--r--include/linux/serdev.h102
-rw-r--r--include/linux/serial.h27
-rw-r--r--include/linux/serial_8250.h125
-rw-r--r--include/linux/serial_bcm63xx.h1
-rw-r--r--include/linux/serial_core.h1045
-rw-r--r--include/linux/serial_max3100.h52
-rw-r--r--include/linux/serial_pnx8xxx.h80
-rw-r--r--include/linux/serial_s3c.h44
-rw-r--r--include/linux/serial_sci.h3
-rw-r--r--include/linux/serio.h12
-rw-r--r--include/linux/set_memory.h83
-rw-r--r--include/linux/sfi.h210
-rw-r--r--include/linux/sfi_acpi.h93
-rw-r--r--include/linux/sfp.h669
-rw-r--r--include/linux/sh_clk.h1
-rw-r--r--include/linux/sh_dma.h5
-rw-r--r--include/linux/sh_eth.h5
-rw-r--r--include/linux/sh_intc.h12
-rw-r--r--include/linux/sh_timer.h1
-rw-r--r--include/linux/shdma-base.h9
-rw-r--r--include/linux/shm.h32
-rw-r--r--include/linux/shmem_fs.h206
-rw-r--r--include/linux/shrinker.h109
-rw-r--r--include/linux/signal.h125
-rw-r--r--include/linux/signal_types.h30
-rw-r--r--include/linux/signalfd.h1
-rw-r--r--include/linux/siox.h84
-rw-r--r--include/linux/siphash.h54
-rw-r--r--include/linux/sirfsoc_dma.h6
-rw-r--r--include/linux/sizes.h33
-rw-r--r--include/linux/skb_array.h38
-rw-r--r--include/linux/skbuff.h2962
-rw-r--r--include/linux/skbuff_ref.h74
-rw-r--r--include/linux/skmsg.h575
-rw-r--r--include/linux/slab.h1230
-rw-r--r--include/linux/slab_def.h102
-rw-r--r--include/linux/slimbus.h212
-rw-r--r--include/linux/slub_def.h176
-rw-r--r--include/linux/sm501-regs.h5
-rw-r--r--include/linux/sm501.h17
-rw-r--r--include/linux/smc911x.h13
-rw-r--r--include/linux/smc91x.h1
-rw-r--r--include/linux/smp.h173
-rw-r--r--include/linux/smp_types.h69
-rw-r--r--include/linux/smpboot.h18
-rw-r--r--include/linux/smsc911x.h15
-rw-r--r--include/linux/smscphy.h45
-rw-r--r--include/linux/soc/airoha/airoha_offload.h317
-rw-r--r--include/linux/soc/amd/isp4_misc.h12
-rw-r--r--include/linux/soc/amlogic/meson-canvas.h66
-rw-r--r--include/linux/soc/andes/irq.h18
-rw-r--r--include/linux/soc/apple/rtkit.h175
-rw-r--r--include/linux/soc/apple/sart.h53
-rw-r--r--include/linux/soc/brcmstb/brcmstb.h34
-rw-r--r--include/linux/soc/cirrus/ep93xx.h38
-rw-r--r--include/linux/soc/dove/pmu.h1
-rw-r--r--include/linux/soc/ixp4xx/cpu.h120
-rw-r--r--include/linux/soc/ixp4xx/npe.h40
-rw-r--r--include/linux/soc/ixp4xx/qmgr.h88
-rw-r--r--include/linux/soc/marvell/octeontx2/asm.h57
-rw-r--r--include/linux/soc/marvell/silicons.h25
-rw-r--r--include/linux/soc/mediatek/dvfsrc.h36
-rw-r--r--include/linux/soc/mediatek/infracfg.h435
-rw-r--r--include/linux/soc/mediatek/mtk-cmdq.h516
-rw-r--r--include/linux/soc/mediatek/mtk-mmsys.h115
-rw-r--r--include/linux/soc/mediatek/mtk-mutex.h90
-rw-r--r--include/linux/soc/mediatek/mtk_sip_svc.h31
-rw-r--r--include/linux/soc/mediatek/mtk_wed.h333
-rw-r--r--include/linux/soc/mmp/cputype.h65
-rw-r--r--include/linux/soc/nxp/lpc32xx-misc.h33
-rw-r--r--include/linux/soc/pxa/cpu.h252
-rw-r--r--include/linux/soc/pxa/mfp.h470
-rw-r--r--include/linux/soc/pxa/smemc.h29
-rw-r--r--include/linux/soc/qcom/apr.h197
-rw-r--r--include/linux/soc/qcom/geni-se.h539
-rw-r--r--include/linux/soc/qcom/irq.h34
-rw-r--r--include/linux/soc/qcom/llcc-qcom.h242
-rw-r--r--include/linux/soc/qcom/mdt_loader.h57
-rw-r--r--include/linux/soc/qcom/pdr.h29
-rw-r--r--include/linux/soc/qcom/pmic_glink.h33
-rw-r--r--include/linux/soc/qcom/qcom-pbs.h30
-rw-r--r--include/linux/soc/qcom/qcom_aoss.h38
-rw-r--r--include/linux/soc/qcom/qmi.h272
-rw-r--r--include/linux/soc/qcom/smd-rpm.h37
-rw-r--r--include/linux/soc/qcom/smem.h9
-rw-r--r--include/linux/soc/qcom/smem_state.h9
-rw-r--r--include/linux/soc/qcom/socinfo.h115
-rw-r--r--include/linux/soc/qcom/ubwc.h76
-rw-r--r--include/linux/soc/qcom/wcnss_ctrl.h1
-rw-r--r--include/linux/soc/renesas/r9a06g032-sysctrl.h11
-rw-r--r--include/linux/soc/renesas/rcar-rst.h3
-rw-r--r--include/linux/soc/renesas/rcar-sysc.h14
-rw-r--r--include/linux/soc/samsung/exynos-chipid.h50
-rw-r--r--include/linux/soc/samsung/exynos-pmu.h18
-rw-r--r--include/linux/soc/samsung/exynos-regs-pmu.h394
-rw-r--r--include/linux/soc/samsung/s3c-pm.h36
-rw-r--r--include/linux/soc/sunxi/sunxi_sram.h2
-rw-r--r--include/linux/soc/ti/k3-ringacc.h270
-rw-r--r--include/linux/soc/ti/knav_dma.h24
-rw-r--r--include/linux/soc/ti/knav_qmss.h13
-rw-r--r--include/linux/soc/ti/omap1-io.h143
-rw-r--r--include/linux/soc/ti/omap1-mux.h311
-rw-r--r--include/linux/soc/ti/omap1-soc.h163
-rw-r--r--include/linux/soc/ti/omap1-usb.h116
-rw-r--r--include/linux/soc/ti/ti-msgmgr.h18
-rw-r--r--include/linux/soc/ti/ti_sci_inta_msi.h21
-rw-r--r--include/linux/soc/ti/ti_sci_protocol.h483
-rw-r--r--include/linux/sock_diag.h25
-rw-r--r--include/linux/socket.h169
-rw-r--r--include/linux/sockptr.h172
-rw-r--r--include/linux/softirq.h1
-rw-r--r--include/linux/sonet.h1
-rw-r--r--include/linux/sony-laptop.h34
-rw-r--r--include/linux/sonypi.h16
-rw-r--r--include/linux/sort.h31
-rw-r--r--include/linux/sound.h3
-rw-r--r--include/linux/soundwire/sdw.h1202
-rw-r--r--include/linux/soundwire/sdw_amd.h174
-rw-r--r--include/linux/soundwire/sdw_intel.h465
-rw-r--r--include/linux/soundwire/sdw_registers.h360
-rw-r--r--include/linux/soundwire/sdw_type.h37
-rw-r--r--include/linux/spi/ad7877.h1
-rw-r--r--include/linux/spi/adi_spi3.h254
-rw-r--r--include/linux/spi/ads7846.h18
-rw-r--r--include/linux/spi/altera.h50
-rw-r--r--include/linux/spi/at73c213.h1
-rw-r--r--include/linux/spi/at86rf230.h28
-rw-r--r--include/linux/spi/cc2520.h26
-rw-r--r--include/linux/spi/corgi_lcd.h6
-rw-r--r--include/linux/spi/ds1305.h1
-rw-r--r--include/linux/spi/eeprom.h3
-rw-r--r--include/linux/spi/flash.h1
-rw-r--r--include/linux/spi/ifx_modem.h19
-rw-r--r--include/linux/spi/l4f00242t03.h25
-rw-r--r--include/linux/spi/libertas_spi.h6
-rw-r--r--include/linux/spi/lms283gf05.h24
-rw-r--r--include/linux/spi/max7301.h5
-rw-r--r--include/linux/spi/mc33880.h1
-rw-r--r--include/linux/spi/mcp23s08.h17
-rw-r--r--include/linux/spi/mmc_spi.h25
-rw-r--r--include/linux/spi/mxs-spi.h11
-rw-r--r--include/linux/spi/offload/consumer.h39
-rw-r--r--include/linux/spi/offload/provider.h47
-rw-r--r--include/linux/spi/offload/types.h109
-rw-r--r--include/linux/spi/pxa2xx_spi.h59
-rw-r--r--include/linux/spi/rspi.h26
-rw-r--r--include/linux/spi/s3c24xx.h28
-rw-r--r--include/linux/spi/sh_hspi.h10
-rw-r--r--include/linux/spi/sh_msiof.h130
-rw-r--r--include/linux/spi/spi-fsl-dspi.h23
-rw-r--r--include/linux/spi/spi-mem.h470
-rw-r--r--include/linux/spi/spi.h1047
-rw-r--r--include/linux/spi/spi_bitbang.h15
-rw-r--r--include/linux/spi/spi_gpio.h54
-rw-r--r--include/linux/spi/spi_oc_tiny.h5
-rw-r--r--include/linux/spi/tdo24m.h1
-rw-r--r--include/linux/spi/tle62x0.h10
-rw-r--r--include/linux/spi/xilinx_spi.h16
-rw-r--r--include/linux/spinlock.h307
-rw-r--r--include/linux/spinlock_api.h1
-rw-r--r--include/linux/spinlock_api_smp.h26
-rw-r--r--include/linux/spinlock_api_up.h3
-rw-r--r--include/linux/spinlock_rt.h155
-rw-r--r--include/linux/spinlock_types.h92
-rw-r--r--include/linux/spinlock_types_raw.h73
-rw-r--r--include/linux/spinlock_types_up.h4
-rw-r--r--include/linux/spinlock_up.h16
-rw-r--r--include/linux/splice.h44
-rw-r--r--include/linux/spmi.h17
-rw-r--r--include/linux/sprintf.h31
-rw-r--r--include/linux/sram.h14
-rw-r--r--include/linux/srcu.h523
-rw-r--r--include/linux/srcutiny.h108
-rw-r--r--include/linux/srcutree.h324
-rw-r--r--include/linux/ssb/ssb.h17
-rw-r--r--include/linux/ssb/ssb_driver_chipcommon.h3
-rw-r--r--include/linux/ssb/ssb_driver_extif.h5
-rw-r--r--include/linux/ssb/ssb_driver_gige.h17
-rw-r--r--include/linux/ssb/ssb_driver_mips.h1
-rw-r--r--include/linux/ssb/ssb_driver_pci.h1
-rw-r--r--include/linux/ssb/ssb_embedded.h1
-rw-r--r--include/linux/ssb/ssb_regs.h1
-rw-r--r--include/linux/ssbi.h10
-rw-r--r--include/linux/stackdepot.h257
-rw-r--r--include/linux/stackprotector.h22
-rw-r--r--include/linux/stacktrace.h102
-rw-r--r--include/linux/start_kernel.h3
-rw-r--r--include/linux/stat.h36
-rw-r--r--include/linux/statfs.h18
-rw-r--r--include/linux/static_call.h352
-rw-r--r--include/linux/static_call_types.h107
-rw-r--r--include/linux/stdarg.h11
-rw-r--r--include/linux/stddef.h116
-rw-r--r--include/linux/ste_modem_shm.h56
-rw-r--r--include/linux/stm.h24
-rw-r--r--include/linux/stmmac.h190
-rw-r--r--include/linux/stmp3xxx_rtc_wdt.h3
-rw-r--r--include/linux/stmp_device.h6
-rw-r--r--include/linux/stop_machine.h102
-rw-r--r--include/linux/string.h627
-rw-r--r--include/linux/string_choices.h97
-rw-r--r--include/linux/string_helpers.h75
-rw-r--r--include/linux/stringhash.h5
-rw-r--r--include/linux/stringify.h2
-rw-r--r--include/linux/sudmac.h52
-rw-r--r--include/linux/sungem_phy.h5
-rw-r--r--include/linux/sunrpc/addr.h1
-rw-r--r--include/linux/sunrpc/auth.h124
-rw-r--r--include/linux/sunrpc/auth_gss.h4
-rw-r--r--include/linux/sunrpc/bc_xprt.h44
-rw-r--r--include/linux/sunrpc/cache.h115
-rw-r--r--include/linux/sunrpc/clnt.h78
-rw-r--r--include/linux/sunrpc/debug.h31
-rw-r--r--include/linux/sunrpc/gss_api.h14
-rw-r--r--include/linux/sunrpc/gss_asn1.h81
-rw-r--r--include/linux/sunrpc/gss_err.h3
-rw-r--r--include/linux/sunrpc/gss_krb5.h210
-rw-r--r--include/linux/sunrpc/gss_krb5_enctypes.h4
-rw-r--r--include/linux/sunrpc/metrics.h12
-rw-r--r--include/linux/sunrpc/msg_prot.h33
-rw-r--r--include/linux/sunrpc/rdma_rn.h27
-rw-r--r--include/linux/sunrpc/rpc_pipe_fs.h17
-rw-r--r--include/linux/sunrpc/rpc_rdma.h138
-rw-r--r--include/linux/sunrpc/rpc_rdma_cid.h24
-rw-r--r--include/linux/sunrpc/sched.h100
-rw-r--r--include/linux/sunrpc/stats.h28
-rw-r--r--include/linux/sunrpc/svc.h504
-rw-r--r--include/linux/sunrpc/svc_rdma.h279
-rw-r--r--include/linux/sunrpc/svc_rdma_pcl.h128
-rw-r--r--include/linux/sunrpc/svc_xprt.h106
-rw-r--r--include/linux/sunrpc/svcauth.h72
-rw-r--r--include/linux/sunrpc/svcauth_gss.h6
-rw-r--r--include/linux/sunrpc/svcsock.h33
-rw-r--r--include/linux/sunrpc/timer.h1
-rw-r--r--include/linux/sunrpc/types.h1
-rw-r--r--include/linux/sunrpc/xdr.h483
-rw-r--r--include/linux/sunrpc/xdrgen/_builtins.h243
-rw-r--r--include/linux/sunrpc/xdrgen/_defs.h35
-rw-r--r--include/linux/sunrpc/xdrgen/nfs4_1.h153
-rw-r--r--include/linux/sunrpc/xprt.h171
-rw-r--r--include/linux/sunrpc/xprtmultipath.h19
-rw-r--r--include/linux/sunrpc/xprtrdma.h7
-rw-r--r--include/linux/sunrpc/xprtsock.h53
-rw-r--r--include/linux/sunserialcore.h1
-rw-r--r--include/linux/sunxi-rsb.h2
-rw-r--r--include/linux/superhyway.h107
-rw-r--r--include/linux/surface_acpi_notify.h39
-rw-r--r--include/linux/surface_aggregator/controller.h994
-rw-r--r--include/linux/surface_aggregator/device.h632
-rw-r--r--include/linux/surface_aggregator/serial_hub.h691
-rw-r--r--include/linux/suspend.h330
-rw-r--r--include/linux/svga.h1
-rw-r--r--include/linux/sw842.h1
-rw-r--r--include/linux/swab.h27
-rw-r--r--include/linux/swait.h122
-rw-r--r--include/linux/swait_api.h1
-rw-r--r--include/linux/swap.h548
-rw-r--r--include/linux/swap_cgroup.h19
-rw-r--r--include/linux/swap_slots.h30
-rw-r--r--include/linux/swapfile.h16
-rw-r--r--include/linux/swapops.h321
-rw-r--r--include/linux/swiotlb.h360
-rw-r--r--include/linux/switchtec.h526
-rw-r--r--include/linux/sxgbe_platform.h11
-rw-r--r--include/linux/sync_core.h35
-rw-r--r--include/linux/sync_file.h4
-rw-r--r--include/linux/sys.h1
-rw-r--r--include/linux/sys_info.h28
-rw-r--r--include/linux/sys_soc.h4
-rw-r--r--include/linux/syscall_user_dispatch.h51
-rw-r--r--include/linux/syscall_user_dispatch_types.h22
-rw-r--r--include/linux/syscalls.h1600
-rw-r--r--include/linux/syscalls_api.h1
-rw-r--r--include/linux/syscore_ops.h18
-rw-r--r--include/linux/sysctl.h301
-rw-r--r--include/linux/sysfb.h125
-rw-r--r--include/linux/sysfs.h386
-rw-r--r--include/linux/syslog.h27
-rw-r--r--include/linux/sysrq.h34
-rw-r--r--include/linux/sysv_fs.h213
-rw-r--r--include/linux/t10-pi.h41
-rw-r--r--include/linux/task_io_accounting.h1
-rw-r--r--include/linux/task_io_accounting_ops.h1
-rw-r--r--include/linux/task_work.h23
-rw-r--r--include/linux/taskstats_kern.h1
-rw-r--r--include/linux/tboot.h27
-rw-r--r--include/linux/tc.h5
-rw-r--r--include/linux/tca6416_keypad.h34
-rw-r--r--include/linux/tcp.h506
-rw-r--r--include/linux/tee_core.h427
-rw-r--r--include/linux/tee_drv.h396
-rw-r--r--include/linux/tegra-icc.h65
-rw-r--r--include/linux/termios_internal.h49
-rw-r--r--include/linux/text-patching.h15
-rw-r--r--include/linux/textsearch.h7
-rw-r--r--include/linux/textsearch_fsm.h1
-rw-r--r--include/linux/tfrc.h55
-rw-r--r--include/linux/thermal.h547
-rw-r--r--include/linux/thinkpad_acpi.h15
-rw-r--r--include/linux/thread_info.h177
-rw-r--r--include/linux/threads.h9
-rw-r--r--include/linux/thunderbolt.h698
-rw-r--r--include/linux/ti-emif-sram.h139
-rw-r--r--include/linux/ti_wilink_st.h23
-rw-r--r--include/linux/tick.h152
-rw-r--r--include/linux/tifm.h8
-rw-r--r--include/linux/timb_dma.h14
-rw-r--r--include/linux/timb_gpio.h14
-rw-r--r--include/linux/time.h260
-rw-r--r--include/linux/time32.h72
-rw-r--r--include/linux/time64.h133
-rw-r--r--include/linux/time_namespace.h178
-rw-r--r--include/linux/timecounter.h30
-rw-r--r--include/linux/timekeeper_internal.h179
-rw-r--r--include/linux/timekeeping.h344
-rw-r--r--include/linux/timer.h211
-rw-r--r--include/linux/timer_types.h23
-rw-r--r--include/linux/timerfd.h1
-rw-r--r--include/linux/timeriomem-rng.h10
-rw-r--r--include/linux/timerqueue.h30
-rw-r--r--include/linux/timerqueue_types.h17
-rw-r--r--include/linux/timex.h23
-rw-r--r--include/linux/tnum.h129
-rw-r--r--include/linux/topology.h177
-rw-r--r--include/linux/torture.h88
-rw-r--r--include/linux/toshiba.h12
-rw-r--r--include/linux/tpm.h551
-rw-r--r--include/linux/tpm_command.h1
-rw-r--r--include/linux/tpm_eventlog.h294
-rw-r--r--include/linux/tpm_svsm.h149
-rw-r--r--include/linux/trace.h73
-rw-r--r--include/linux/trace_clock.h1
-rw-r--r--include/linux/trace_events.h617
-rw-r--r--include/linux/trace_recursion.h189
-rw-r--r--include/linux/trace_seq.h53
-rw-r--r--include/linux/tracefs.h76
-rw-r--r--include/linux/tracehook.h196
-rw-r--r--include/linux/tracepoint-defs.h66
-rw-r--r--include/linux/tracepoint.h414
-rw-r--r--include/linux/transport_class.h15
-rw-r--r--include/linux/ts-nbus.h18
-rw-r--r--include/linux/tsacct_kern.h1
-rw-r--r--include/linux/tsm-mr.h89
-rw-r--r--include/linux/tsm.h129
-rw-r--r--include/linux/tty.h765
-rw-r--r--include/linux/tty_buffer.h57
-rw-r--r--include/linux/tty_driver.h716
-rw-r--r--include/linux/tty_flip.h94
-rw-r--r--include/linux/tty_ldisc.h378
-rw-r--r--include/linux/tty_port.h287
-rw-r--r--include/linux/turris-omnia-mcu-interface.h397
-rw-r--r--include/linux/turris-signing-key.h35
-rw-r--r--include/linux/typecheck.h10
-rw-r--r--include/linux/types.h112
-rw-r--r--include/linux/u64_stats_sync.h212
-rw-r--r--include/linux/u64_stats_sync_api.h1
-rw-r--r--include/linux/uacce.h161
-rw-r--r--include/linux/uaccess.h765
-rw-r--r--include/linux/ubsan.h14
-rw-r--r--include/linux/ucb1400.h165
-rw-r--r--include/linux/ucopysize.h63
-rw-r--r--include/linux/ucs2_string.h2
-rw-r--r--include/linux/udp.h187
-rw-r--r--include/linux/uidgid.h31
-rw-r--r--include/linux/uidgid_types.h15
-rw-r--r--include/linux/uinput.h81
-rw-r--r--include/linux/uio.h380
-rw-r--r--include/linux/uio_driver.h70
-rw-r--r--include/linux/ulpi/driver.h3
-rw-r--r--include/linux/ulpi/interface.h1
-rw-r--r--include/linux/ulpi/regs.h1
-rw-r--r--include/linux/umh.h68
-rw-r--r--include/linux/unaligned.h146
-rw-r--r--include/linux/unaligned/access_ok.h67
-rw-r--r--include/linux/unaligned/be_byteshift.h70
-rw-r--r--include/linux/unaligned/be_memmove.h36
-rw-r--r--include/linux/unaligned/be_struct.h36
-rw-r--r--include/linux/unaligned/generic.h68
-rw-r--r--include/linux/unaligned/le_byteshift.h70
-rw-r--r--include/linux/unaligned/le_memmove.h36
-rw-r--r--include/linux/unaligned/le_struct.h36
-rw-r--r--include/linux/unaligned/memmove.h45
-rw-r--r--include/linux/unaligned/packed_struct.h2
-rw-r--r--include/linux/unicode.h83
-rw-r--r--include/linux/union_find.h41
-rw-r--r--include/linux/units.h119
-rw-r--r--include/linux/unroll.h78
-rw-r--r--include/linux/unwind_deferred.h79
-rw-r--r--include/linux/unwind_deferred_types.h55
-rw-r--r--include/linux/unwind_user.h14
-rw-r--r--include/linux/unwind_user_types.h46
-rw-r--r--include/linux/uprobes.h186
-rw-r--r--include/linux/usb.h338
-rw-r--r--include/linux/usb/association.h150
-rw-r--r--include/linux/usb/atmel_usba_udc.h23
-rw-r--r--include/linux/usb/audio-v2.h51
-rw-r--r--include/linux/usb/audio-v3.h454
-rw-r--r--include/linux/usb/audio.h4
-rw-r--r--include/linux/usb/c67x00.h16
-rw-r--r--include/linux/usb/ccid.h39
-rw-r--r--include/linux/usb/cdc-wdm.h8
-rw-r--r--include/linux/usb/cdc.h5
-rw-r--r--include/linux/usb/cdc_ncm.h23
-rw-r--r--include/linux/usb/ch9.h58
-rw-r--r--include/linux/usb/chipidea.h26
-rw-r--r--include/linux/usb/composite.h91
-rw-r--r--include/linux/usb/ehci-dbgp.h1
-rw-r--r--include/linux/usb/ehci_def.h50
-rw-r--r--include/linux/usb/ehci_pdriver.h16
-rw-r--r--include/linux/usb/ezusb.h1
-rw-r--r--include/linux/usb/func_utils.h86
-rw-r--r--include/linux/usb/functionfs.h1
-rw-r--r--include/linux/usb/g_hid.h15
-rw-r--r--include/linux/usb/gadget.h204
-rw-r--r--include/linux/usb/gadget_configfs.h8
-rw-r--r--include/linux/usb/gpio_vbus.h32
-rw-r--r--include/linux/usb/hcd.h129
-rw-r--r--include/linux/usb/input.h5
-rw-r--r--include/linux/usb/iowarrior.h1
-rw-r--r--include/linux/usb/irda.h14
-rw-r--r--include/linux/usb/isp116x.h1
-rw-r--r--include/linux/usb/isp1301.h11
-rw-r--r--include/linux/usb/isp1362.h1
-rw-r--r--include/linux/usb/isp1760.h18
-rw-r--r--include/linux/usb/ljca.h145
-rw-r--r--include/linux/usb/m66592.h15
-rw-r--r--include/linux/usb/mctp-usb.h30
-rw-r--r--include/linux/usb/midi-v2.h94
-rw-r--r--include/linux/usb/msm_hsusb_hw.h77
-rw-r--r--include/linux/usb/musb-ux500.h11
-rw-r--r--include/linux/usb/musb.h40
-rw-r--r--include/linux/usb/net2280.h15
-rw-r--r--include/linux/usb/of.h31
-rw-r--r--include/linux/usb/ohci_pdriver.h15
-rw-r--r--include/linux/usb/onboard_dev.h18
-rw-r--r--include/linux/usb/otg-fsm.h25
-rw-r--r--include/linux/usb/otg.h4
-rw-r--r--include/linux/usb/pd.h623
-rw-r--r--include/linux/usb/pd_ado.h42
-rw-r--r--include/linux/usb/pd_bdo.h22
-rw-r--r--include/linux/usb/pd_ext_sdb.h27
-rw-r--r--include/linux/usb/pd_vdo.h527
-rw-r--r--include/linux/usb/phy.h42
-rw-r--r--include/linux/usb/phy_companion.h13
-rw-r--r--include/linux/usb/quirks.h21
-rw-r--r--include/linux/usb/r8152.h40
-rw-r--r--include/linux/usb/r8a66597.h15
-rw-r--r--include/linux/usb/renesas_usbhs.h63
-rw-r--r--include/linux/usb/rndis_host.h16
-rw-r--r--include/linux/usb/role.h126
-rw-r--r--include/linux/usb/rzv2m_usb3drd.h20
-rw-r--r--include/linux/usb/samsung_usb_phy.h16
-rw-r--r--include/linux/usb/serial.h144
-rw-r--r--include/linux/usb/sl811.h1
-rw-r--r--include/linux/usb/storage.h13
-rw-r--r--include/linux/usb/tcpci.h255
-rw-r--r--include/linux/usb/tcpm.h195
-rw-r--r--include/linux/usb/tegra_usb_phy.h34
-rw-r--r--include/linux/usb/tilegx.h34
-rw-r--r--include/linux/usb/typec.h275
-rw-r--r--include/linux/usb/typec_altmode.h234
-rw-r--r--include/linux/usb/typec_dp.h131
-rw-r--r--include/linux/usb/typec_mux.h144
-rw-r--r--include/linux/usb/typec_retimer.h45
-rw-r--r--include/linux/usb/typec_tbt.h61
-rw-r--r--include/linux/usb/uas.h1
-rw-r--r--include/linux/usb/ulpi.h15
-rw-r--r--include/linux/usb/usb338x.h47
-rw-r--r--include/linux/usb/usb_phy_generic.h13
-rw-r--r--include/linux/usb/usbio.h177
-rw-r--r--include/linux/usb/usbnet.h62
-rw-r--r--include/linux/usb/uvc.h189
-rw-r--r--include/linux/usb/webusb.h80
-rw-r--r--include/linux/usb/wusb-wa.h303
-rw-r--r--include/linux/usb/wusb.h377
-rw-r--r--include/linux/usb/xhci-dbgp.h7
-rw-r--r--include/linux/usb/xhci-sideband.h111
-rw-r--r--include/linux/usb_usual.h7
-rw-r--r--include/linux/usbdevice_fs.h3
-rw-r--r--include/linux/user-return-notifier.h1
-rw-r--r--include/linux/user_events.h84
-rw-r--r--include/linux/user_namespace.h129
-rw-r--r--include/linux/userfaultfd_k.h357
-rw-r--r--include/linux/util_macros.h153
-rw-r--r--include/linux/uts.h1
-rw-r--r--include/linux/uts_namespace.h65
-rw-r--r--include/linux/utsname.h50
-rw-r--r--include/linux/uuid.h53
-rw-r--r--include/linux/uwb.h831
-rw-r--r--include/linux/uwb/debug-cmd.h68
-rw-r--r--include/linux/uwb/spec.h781
-rw-r--r--include/linux/uwb/umc.h193
-rw-r--r--include/linux/uwb/whci.h117
-rw-r--r--include/linux/vbox_utils.h59
-rw-r--r--include/linux/vdpa.h630
-rw-r--r--include/linux/vdso_datastore.h10
-rw-r--r--include/linux/verification.h37
-rw-r--r--include/linux/vermagic.h26
-rw-r--r--include/linux/vexpress.h39
-rw-r--r--include/linux/vfio.h438
-rw-r--r--include/linux/vfio_pci_core.h233
-rw-r--r--include/linux/vfs.h1
-rw-r--r--include/linux/vfsdebug.h45
-rw-r--r--include/linux/vga_switcheroo.h17
-rw-r--r--include/linux/vgaarb.h147
-rw-r--r--include/linux/vhost_iotlb.h52
-rw-r--r--include/linux/via-core.h19
-rw-r--r--include/linux/via-gpio.h14
-rw-r--r--include/linux/via.h1
-rw-r--r--include/linux/via_i2c.h16
-rw-r--r--include/linux/videodev2.h3
-rw-r--r--include/linux/virtio.h222
-rw-r--r--include/linux/virtio_anchor.h19
-rw-r--r--include/linux/virtio_byteorder.h1
-rw-r--r--include/linux/virtio_caif.h6
-rw-r--r--include/linux/virtio_config.h487
-rw-r--r--include/linux/virtio_console.h38
-rw-r--r--include/linux/virtio_dma_buf.h37
-rw-r--r--include/linux/virtio_features.h89
-rw-r--r--include/linux/virtio_net.h377
-rw-r--r--include/linux/virtio_pci_admin.h34
-rw-r--r--include/linux/virtio_pci_legacy.h40
-rw-r--r--include/linux/virtio_pci_modern.h165
-rw-r--r--include/linux/virtio_ring.h54
-rw-r--r--include/linux/virtio_vsock.h203
-rw-r--r--include/linux/vlynq.h162
-rw-r--r--include/linux/vm_event_item.h100
-rw-r--r--include/linux/vm_sockets.h23
-rw-r--r--include/linux/vmacache.h38
-rw-r--r--include/linux/vmalloc.h302
-rw-r--r--include/linux/vmcore_info.h88
-rw-r--r--include/linux/vme.h189
-rw-r--r--include/linux/vmpressure.h5
-rw-r--r--include/linux/vmstat.h290
-rw-r--r--include/linux/vmw_vmci_api.h19
-rw-r--r--include/linux/vmw_vmci_defs.h195
-rw-r--r--include/linux/vringh.h111
-rw-r--r--include/linux/vt.h1
-rw-r--r--include/linux/vt_buffer.h22
-rw-r--r--include/linux/vt_kern.h97
-rw-r--r--include/linux/vtime.h169
-rw-r--r--include/linux/w1-gpio.h26
-rw-r--r--include/linux/w1.h31
-rw-r--r--include/linux/wait.h299
-rw-r--r--include/linux/wait_api.h1
-rw-r--r--include/linux/wait_bit.h563
-rw-r--r--include/linux/wanrouter.h10
-rw-r--r--include/linux/watch_queue.h133
-rw-r--r--include/linux/watchdog.h32
-rw-r--r--include/linux/wimax/debug.h526
-rw-r--r--include/linux/win_minmax.h5
-rw-r--r--include/linux/wireless.h16
-rw-r--r--include/linux/wkup_m3_ipc.h34
-rw-r--r--include/linux/wl12xx.h58
-rw-r--r--include/linux/wm97xx.h6
-rw-r--r--include/linux/wmi.h97
-rw-r--r--include/linux/wordpart.h57
-rw-r--r--include/linux/workqueue.h547
-rw-r--r--include/linux/workqueue_api.h1
-rw-r--r--include/linux/workqueue_types.h25
-rw-r--r--include/linux/writeback.h234
-rw-r--r--include/linux/ww_mutex.h147
-rw-r--r--include/linux/wwan.h205
-rw-r--r--include/linux/xarray.h1915
-rw-r--r--include/linux/xattr.h109
-rw-r--r--include/linux/xxhash.h189
-rw-r--r--include/linux/xz.h129
-rw-r--r--include/linux/yam.h17
-rw-r--r--include/linux/z2_battery.h17
-rw-r--r--include/linux/zbud.h22
-rw-r--r--include/linux/zlib.h8
-rw-r--r--include/linux/zorro.h15
-rw-r--r--include/linux/zpool.h110
-rw-r--r--include/linux/zsmalloc.h39
-rw-r--r--include/linux/zstd.h691
-rw-r--r--include/linux/zstd_errors.h87
-rw-r--r--include/linux/zstd_lib.h3160
-rw-r--r--include/linux/zswap.h74
3137 files changed, 344154 insertions, 114824 deletions
diff --git a/include/linux/8250_pci.h b/include/linux/8250_pci.h
index b24ff086a662..9c777d2c98f5 100644
--- a/include/linux/8250_pci.h
+++ b/include/linux/8250_pci.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Definitions for PCI support.
*/
diff --git a/include/linux/a.out.h b/include/linux/a.out.h
deleted file mode 100644
index ee884168989f..000000000000
--- a/include/linux/a.out.h
+++ /dev/null
@@ -1,17 +0,0 @@
-#ifndef __A_OUT_GNU_H__
-#define __A_OUT_GNU_H__
-
-#include <uapi/linux/a.out.h>
-
-#ifndef __ASSEMBLY__
-#ifdef linux
-#include <asm/page.h>
-#if defined(__i386__) || defined(__mc68000__)
-#else
-#ifndef SEGMENT_SIZE
-#define SEGMENT_SIZE PAGE_SIZE
-#endif
-#endif
-#endif
-#endif /*__ASSEMBLY__ */
-#endif /* __A_OUT_GNU_H__ */
diff --git a/include/linux/acct.h b/include/linux/acct.h
index dccc2d4fe7de..2718c4854815 100644
--- a/include/linux/acct.h
+++ b/include/linux/acct.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* BSD Process Accounting for Linux - Definitions
*
@@ -19,11 +20,7 @@
#ifdef CONFIG_BSD_PROCESS_ACCT
-struct vfsmount;
-struct super_block;
-struct pacct_struct;
struct pid_namespace;
-extern int acct_parm[]; /* for sysctl */
extern void acct_collect(long exitcode, int group_dead);
extern void acct_process(void);
extern void acct_exit_ns(struct pid_namespace *);
diff --git a/include/linux/acpi.h b/include/linux/acpi.h
index 502af53ec012..fbf0c3a65f59 100644
--- a/include/linux/acpi.h
+++ b/include/linux/acpi.h
@@ -1,52 +1,55 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* acpi.h - ACPI Interface
*
* Copyright (C) 2001 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
- *
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*/
#ifndef _LINUX_ACPI_H
#define _LINUX_ACPI_H
+#include <linux/cleanup.h>
#include <linux/errno.h>
#include <linux/ioport.h> /* for struct resource */
#include <linux/resource_ext.h>
#include <linux/device.h>
+#include <linux/mod_devicetable.h>
#include <linux/property.h>
#include <linux/uuid.h>
+#include <linux/node.h>
+
+struct irq_domain;
+struct irq_domain_ops;
#ifndef _LINUX
#define _LINUX
#endif
#include <acpi/acpi.h>
+#include <acpi/acpi_numa.h>
#ifdef CONFIG_ACPI
#include <linux/list.h>
-#include <linux/mod_devicetable.h>
#include <linux/dynamic_debug.h>
#include <linux/module.h>
#include <linux/mutex.h>
+#include <linux/fw_table.h>
#include <acpi/acpi_bus.h>
#include <acpi/acpi_drivers.h>
-#include <acpi/acpi_numa.h>
#include <acpi/acpi_io.h>
#include <asm/acpi.h>
+#ifdef CONFIG_ACPI_TABLE_LIB
+#define EXPORT_SYMBOL_ACPI_LIB(x) EXPORT_SYMBOL_NS_GPL(x, "ACPI")
+#define __init_or_acpilib
+#define __initdata_or_acpilib
+#else
+#define EXPORT_SYMBOL_ACPI_LIB(x)
+#define __init_or_acpilib __init
+#define __initdata_or_acpilib __initdata
+#endif
+
static inline acpi_handle acpi_device_handle(struct acpi_device *adev)
{
return adev ? adev->handle : NULL;
@@ -56,6 +59,8 @@ static inline acpi_handle acpi_device_handle(struct acpi_device *adev)
#define ACPI_COMPANION_SET(dev, adev) set_primary_fwnode(dev, (adev) ? \
acpi_fwnode_handle(adev) : NULL)
#define ACPI_HANDLE(dev) acpi_device_handle(ACPI_COMPANION(dev))
+#define ACPI_HANDLE_FWNODE(fwnode) \
+ acpi_device_handle(to_acpi_device_node(fwnode))
static inline struct fwnode_handle *acpi_alloc_fwnode_static(void)
{
@@ -65,7 +70,7 @@ static inline struct fwnode_handle *acpi_alloc_fwnode_static(void)
if (!fwnode)
return NULL;
- fwnode->ops = &acpi_static_fwnode_ops;
+ fwnode_init(fwnode, &acpi_static_fwnode_ops);
return fwnode;
}
@@ -78,19 +83,6 @@ static inline void acpi_free_fwnode_static(struct fwnode_handle *fwnode)
kfree(fwnode);
}
-/**
- * ACPI_DEVICE_CLASS - macro used to describe an ACPI device with
- * the PCI-defined class-code information
- *
- * @_cls : the class, subclass, prog-if triple for this device
- * @_msk : the class mask for this device
- *
- * This macro is used to create a struct acpi_device_id that matches a
- * specific PCI class. The .id and .driver_data fields will be left
- * initialized with the default value.
- */
-#define ACPI_DEVICE_CLASS(_cls, _msk) .cls = (_cls), .cls_msk = (_msk),
-
static inline bool has_acpi_companion(struct device *dev)
{
return is_acpi_device_node(dev->fwnode);
@@ -99,7 +91,7 @@ static inline bool has_acpi_companion(struct device *dev)
static inline void acpi_preset_companion(struct device *dev,
struct acpi_device *parent, u64 addr)
{
- ACPI_COMPANION_SET(dev, acpi_find_child_device(parent, addr, NULL));
+ ACPI_COMPANION_SET(dev, acpi_find_child_device(parent, addr, false));
}
static inline const char *acpi_dev_name(struct acpi_device *adev)
@@ -115,6 +107,8 @@ enum acpi_irq_model_id {
ACPI_IRQ_MODEL_IOSAPIC,
ACPI_IRQ_MODEL_PLATFORM,
ACPI_IRQ_MODEL_GIC,
+ ACPI_IRQ_MODEL_LPIC,
+ ACPI_IRQ_MODEL_RINTC,
ACPI_IRQ_MODEL_COUNT
};
@@ -139,12 +133,8 @@ enum acpi_address_range_id {
/* Table Handlers */
-
typedef int (*acpi_tbl_table_handler)(struct acpi_table_header *table);
-typedef int (*acpi_tbl_entry_handler)(struct acpi_subtable_header *header,
- const unsigned long end);
-
/* Debugger support */
struct acpi_debugger_ops {
@@ -218,39 +208,49 @@ static inline int acpi_debugger_notify_command_complete(void)
(!entry) || (unsigned long)entry + sizeof(*entry) > end || \
((struct acpi_subtable_header *)entry)->length < sizeof(*entry))
-struct acpi_subtable_proc {
- int id;
- acpi_tbl_entry_handler handler;
- int count;
-};
-
void __iomem *__acpi_map_table(unsigned long phys, unsigned long size);
void __acpi_unmap_table(void __iomem *map, unsigned long size);
int early_acpi_boot_init(void);
int acpi_boot_init (void);
+void acpi_boot_table_prepare (void);
void acpi_boot_table_init (void);
int acpi_mps_check (void);
int acpi_numa_init (void);
+int acpi_locate_initial_tables (void);
+void acpi_reserve_initial_tables (void);
+void acpi_table_init_complete (void);
int acpi_table_init (void);
+
+static inline struct acpi_table_header *acpi_get_table_pointer(char *signature, u32 instance)
+{
+ struct acpi_table_header *table;
+ int status = acpi_get_table(signature, instance, &table);
+
+ if (ACPI_FAILURE(status))
+ return ERR_PTR(-ENOENT);
+ return table;
+}
+DEFINE_FREE(acpi_put_table, struct acpi_table_header *, if (!IS_ERR_OR_NULL(_T)) acpi_put_table(_T))
+
int acpi_table_parse(char *id, acpi_tbl_table_handler handler);
-int __init acpi_table_parse_entries(char *id, unsigned long table_size,
- int entry_id,
- acpi_tbl_entry_handler handler,
- unsigned int max_entries);
-int __init acpi_table_parse_entries_array(char *id, unsigned long table_size,
- struct acpi_subtable_proc *proc, int proc_num,
- unsigned int max_entries);
+int __init_or_acpilib acpi_table_parse_entries(char *id,
+ unsigned long table_size, int entry_id,
+ acpi_tbl_entry_handler handler, unsigned int max_entries);
+int __init_or_acpilib acpi_table_parse_entries_array(char *id,
+ unsigned long table_size, struct acpi_subtable_proc *proc,
+ int proc_num, unsigned int max_entries);
int acpi_table_parse_madt(enum acpi_madt_type id,
acpi_tbl_entry_handler handler,
unsigned int max_entries);
+int __init_or_acpilib
+acpi_table_parse_cedt(enum acpi_cedt_type id,
+ acpi_tbl_entry_handler_arg handler_arg, void *arg);
+
int acpi_parse_mcfg (struct acpi_table_header *header);
void acpi_table_print_madt_entry (struct acpi_subtable_header *madt);
-/* the following numa functions are architecture-dependent */
-void acpi_numa_slit_init (struct acpi_table_slit *slit);
-
-#if defined(CONFIG_X86) || defined(CONFIG_IA64)
+#if defined(CONFIG_X86) || defined(CONFIG_LOONGARCH)
void acpi_numa_processor_affinity_init (struct acpi_srat_cpu_affinity *pa);
#else
static inline void
@@ -259,6 +259,12 @@ acpi_numa_processor_affinity_init(struct acpi_srat_cpu_affinity *pa) { }
void acpi_numa_x2apic_affinity_init(struct acpi_srat_x2apic_cpu_affinity *pa);
+#if defined(CONFIG_ARM64) || defined(CONFIG_LOONGARCH)
+void acpi_arch_dma_setup(struct device *dev);
+#else
+static inline void acpi_arch_dma_setup(struct device *dev) { }
+#endif
+
#ifdef CONFIG_ARM64
void acpi_numa_gicc_affinity_init(struct acpi_srat_gicc_affinity *pa);
#else
@@ -266,7 +272,11 @@ static inline void
acpi_numa_gicc_affinity_init(struct acpi_srat_gicc_affinity *pa) { }
#endif
-int acpi_numa_memory_affinity_init (struct acpi_srat_mem_affinity *ma);
+#ifdef CONFIG_RISCV
+void acpi_numa_rintc_affinity_init(struct acpi_srat_rintc_affinity *pa);
+#else
+static inline void acpi_numa_rintc_affinity_init(struct acpi_srat_rintc_affinity *pa) { }
+#endif
#ifndef PHYS_CPUID_INVALID
typedef u32 phys_cpuid_t;
@@ -283,8 +293,26 @@ static inline bool invalid_phys_cpuid(phys_cpuid_t phys_id)
return phys_id == PHYS_CPUID_INVALID;
}
+
+int __init acpi_get_madt_revision(void);
+
/* Validate the processor object's proc_id */
bool acpi_duplicate_processor_id(int proc_id);
+/* Processor _CTS control */
+struct acpi_processor_power;
+
+#ifdef CONFIG_ACPI_PROCESSOR_CSTATE
+bool acpi_processor_claim_cst_control(void);
+int acpi_processor_evaluate_cst(acpi_handle handle, u32 cpu,
+ struct acpi_processor_power *info);
+#else
+static inline bool acpi_processor_claim_cst_control(void) { return false; }
+static inline int acpi_processor_evaluate_cst(acpi_handle handle, u32 cpu,
+ struct acpi_processor_power *info)
+{
+ return -ENODEV;
+}
+#endif
#ifdef CONFIG_ACPI_HOTPLUG_CPU
/* Arch dependent functions for cpu hotplug support */
@@ -293,6 +321,8 @@ int acpi_map_cpu(acpi_handle handle, phys_cpuid_t physid, u32 acpi_id,
int acpi_unmap_cpu(int cpu);
#endif /* CONFIG_ACPI_HOTPLUG_CPU */
+acpi_handle acpi_get_processor_handle(int cpu);
+
#ifdef CONFIG_ACPI_HOTPLUG_IOAPIC
int acpi_get_ioapic_id(acpi_handle handle, u32 gsi_base, u64 *phys_addr);
#endif
@@ -312,19 +342,31 @@ static inline bool acpi_sci_irq_valid(void)
}
extern int sbf_port;
-extern unsigned long acpi_realmode_flags;
int acpi_register_gsi (struct device *dev, u32 gsi, int triggering, int polarity);
int acpi_gsi_to_irq (u32 gsi, unsigned int *irq);
int acpi_isa_irq_to_gsi (unsigned isa_irq, u32 *gsi);
+typedef struct fwnode_handle *(*acpi_gsi_domain_disp_fn)(u32);
+
void acpi_set_irq_model(enum acpi_irq_model_id model,
- struct fwnode_handle *fwnode);
+ acpi_gsi_domain_disp_fn fn);
+acpi_gsi_domain_disp_fn acpi_get_gsi_dispatcher(void);
+void acpi_set_gsi_to_irq_fallback(u32 (*)(u32));
+
+struct irq_domain *acpi_irq_create_hierarchy(unsigned int flags,
+ unsigned int size,
+ struct fwnode_handle *fwnode,
+ const struct irq_domain_ops *ops,
+ void *host_data);
#ifdef CONFIG_X86_IO_APIC
extern int acpi_get_override_irq(u32 gsi, int *trigger, int *polarity);
#else
-#define acpi_get_override_irq(gsi, trigger, polarity) (-1)
+static inline int acpi_get_override_irq(u32 gsi, int *trigger, int *polarity)
+{
+ return -1;
+}
#endif
/*
* This function undoes the effect of one call to acpi_register_gsi().
@@ -335,10 +377,18 @@ void acpi_unregister_gsi (u32 gsi);
struct pci_dev;
+struct acpi_prt_entry *acpi_pci_irq_lookup(struct pci_dev *dev, int pin);
int acpi_pci_irq_enable (struct pci_dev *dev);
void acpi_penalize_isa_irq(int irq, int active);
bool acpi_isa_irq_available(int irq);
+#ifdef CONFIG_PCI
void acpi_penalize_sci_irq(int irq, int trigger, int polarity);
+#else
+static inline void acpi_penalize_sci_irq(int irq, int trigger,
+ int polarity)
+{
+}
+#endif
void acpi_pci_irq_disable (struct pci_dev *dev);
extern int ec_read(u8 addr, u8 *val);
@@ -352,7 +402,9 @@ extern bool acpi_is_pnp_device(struct acpi_device *);
#if defined(CONFIG_ACPI_WMI) || defined(CONFIG_ACPI_WMI_MODULE)
-typedef void (*wmi_notify_handler) (u32 value, void *context);
+typedef void (*wmi_notify_handler) (union acpi_object *data, void *context);
+
+int wmi_instance_count(const char *guid);
extern acpi_status wmi_evaluate_method(const char *guid, u8 instance,
u32 method_id,
@@ -365,8 +417,8 @@ extern acpi_status wmi_set_block(const char *guid, u8 instance,
extern acpi_status wmi_install_notify_handler(const char *guid,
wmi_notify_handler handler, void *data);
extern acpi_status wmi_remove_notify_handler(const char *guid);
-extern acpi_status wmi_get_event_data(u32 event, struct acpi_buffer *out);
extern bool wmi_has_guid(const char *guid);
+extern char *wmi_get_acpi_device_uid(const char *guid);
#endif /* CONFIG_ACPI_WMI */
@@ -385,15 +437,57 @@ extern bool wmi_has_guid(const char *guid);
extern char acpi_video_backlight_string[];
extern long acpi_is_video_device(acpi_handle handle);
-extern int acpi_blacklisted(void);
+
extern void acpi_osi_setup(char *str);
extern bool acpi_osi_is_win8(void);
+#ifdef CONFIG_ACPI_THERMAL_LIB
+int thermal_acpi_active_trip_temp(struct acpi_device *adev, int id, int *ret_temp);
+int thermal_acpi_passive_trip_temp(struct acpi_device *adev, int *ret_temp);
+int thermal_acpi_hot_trip_temp(struct acpi_device *adev, int *ret_temp);
+int thermal_acpi_critical_trip_temp(struct acpi_device *adev, int *ret_temp);
+#endif
+
+#ifdef CONFIG_ACPI_HMAT
+int acpi_get_genport_coordinates(u32 uid, struct access_coordinate *coord);
+#else
+static inline int acpi_get_genport_coordinates(u32 uid,
+ struct access_coordinate *coord)
+{
+ return -EOPNOTSUPP;
+}
+#endif
+
#ifdef CONFIG_ACPI_NUMA
-int acpi_map_pxm_to_online_node(int pxm);
+int acpi_map_pxm_to_node(int pxm);
int acpi_get_node(acpi_handle handle);
+
+/**
+ * pxm_to_online_node - Map proximity ID to online node
+ * @pxm: ACPI proximity ID
+ *
+ * This is similar to pxm_to_node(), but always returns an online
+ * node. When the mapped node from a given proximity ID is offline, it
+ * looks up the node distance table and returns the nearest online node.
+ *
+ * ACPI device drivers, which are called after the NUMA initialization has
+ * completed in the kernel, can call this interface to obtain their device
+ * NUMA topology from ACPI tables. Such drivers do not have to deal with
+ * offline nodes. A node may be offline when SRAT memory entry does not exist,
+ * or NUMA is disabled, ex. "numa=off" on x86.
+ */
+static inline int pxm_to_online_node(int pxm)
+{
+ int node = pxm_to_node(pxm);
+
+ return numa_map_to_online_node(node);
+}
#else
-static inline int acpi_map_pxm_to_online_node(int pxm)
+static inline int pxm_to_online_node(int pxm)
+{
+ return 0;
+}
+static inline int acpi_map_pxm_to_node(int pxm)
{
return 0;
}
@@ -402,8 +496,6 @@ static inline int acpi_get_node(acpi_handle handle)
return 0;
}
#endif
-extern int acpi_paddr_to_node(u64 start_addr, u64 size);
-
extern int pnpacpi_disabled;
#define PXM_INVAL (-1)
@@ -414,7 +506,7 @@ bool acpi_dev_resource_address_space(struct acpi_resource *ares,
struct resource_win *win);
bool acpi_dev_resource_ext_address_space(struct acpi_resource *ares,
struct resource_win *win);
-unsigned long acpi_dev_irq_flags(u8 triggering, u8 polarity, u8 shareable);
+unsigned long acpi_dev_irq_flags(u8 triggering, u8 polarity, u8 shareable, u8 wake_capable);
unsigned int acpi_dev_get_irq_type(int triggering, int polarity);
bool acpi_dev_resource_interrupt(struct acpi_resource *ares, int index,
struct resource *res);
@@ -425,6 +517,7 @@ int acpi_dev_get_resources(struct acpi_device *adev, struct list_head *list,
void *preproc_data);
int acpi_dev_get_dma_resources(struct acpi_device *adev,
struct list_head *list);
+int acpi_dev_get_memory_resources(struct acpi_device *adev, struct list_head *list);
int acpi_dev_filter_resource_type(struct acpi_resource *ares,
unsigned long types);
@@ -444,15 +537,21 @@ int acpi_check_region(resource_size_t start, resource_size_t n,
int acpi_resources_are_enforced(void);
#ifdef CONFIG_HIBERNATION
-void __init acpi_no_s4_hw_signature(void);
+extern int acpi_check_s4_hw_signature;
#endif
#ifdef CONFIG_PM_SLEEP
void __init acpi_old_suspend_ordering(void);
void __init acpi_nvs_nosave(void);
void __init acpi_nvs_nosave_s3(void);
+void __init acpi_sleep_no_blacklist(void);
#endif /* CONFIG_PM_SLEEP */
+int acpi_register_wakeup_handler(
+ int wake_irq, bool (*wakeup)(void *context), void *context);
+void acpi_unregister_wakeup_handler(
+ bool (*wakeup)(void *context), void *context);
+
struct acpi_osc_context {
char *uuid_str; /* UUID string */
int rev;
@@ -462,10 +561,16 @@ struct acpi_osc_context {
acpi_status acpi_run_osc(acpi_handle handle, struct acpi_osc_context *context);
-/* Indexes into _OSC Capabilities Buffer (DWORDs 2 & 3 are device-specific) */
+/* Number of _OSC capability DWORDS depends on bridge type */
+#define OSC_PCI_CAPABILITY_DWORDS 3
+#define OSC_CXL_CAPABILITY_DWORDS 5
+
+/* Indexes into _OSC Capabilities Buffer (DWORDs 2 to 5 are device-specific) */
#define OSC_QUERY_DWORD 0 /* DWORD 1 */
#define OSC_SUPPORT_DWORD 1 /* DWORD 2 */
#define OSC_CONTROL_DWORD 2 /* DWORD 3 */
+#define OSC_EXT_SUPPORT_DWORD 3 /* DWORD 4 */
+#define OSC_EXT_CONTROL_DWORD 4 /* DWORD 5 */
/* _OSC Capabilities DWORD 1: Query/Control and Error Returns (generic) */
#define OSC_QUERY_ENABLE 0x00000001 /* input */
@@ -484,10 +589,31 @@ acpi_status acpi_run_osc(acpi_handle handle, struct acpi_osc_context *context);
#define OSC_SB_CPCV2_SUPPORT 0x00000040
#define OSC_SB_PCLPI_SUPPORT 0x00000080
#define OSC_SB_OSLPI_SUPPORT 0x00000100
+#define OSC_SB_FAST_THERMAL_SAMPLING_SUPPORT 0x00000200
+#define OSC_SB_OVER_16_PSTATES_SUPPORT 0x00000400
+#define OSC_SB_GED_SUPPORT 0x00000800
#define OSC_SB_CPC_DIVERSE_HIGH_SUPPORT 0x00001000
+#define OSC_SB_IRQ_RESOURCE_SOURCE_SUPPORT 0x00002000
+#define OSC_SB_CPC_FLEXIBLE_ADR_SPACE 0x00004000
+#define OSC_SB_GENERIC_INITIATOR_SUPPORT 0x00020000
+#define OSC_SB_NATIVE_USB4_SUPPORT 0x00040000
+#define OSC_SB_BATTERY_CHARGE_LIMITING_SUPPORT 0x00080000
+#define OSC_SB_PRM_SUPPORT 0x00200000
+#define OSC_SB_FFH_OPR_SUPPORT 0x00400000
extern bool osc_sb_apei_support_acked;
extern bool osc_pc_lpi_support_confirmed;
+extern bool osc_sb_native_usb4_support_confirmed;
+extern bool osc_sb_cppc2_support_acked;
+extern bool osc_cpc_flexible_adr_space_confirmed;
+
+/* USB4 Capabilities */
+#define OSC_USB_USB3_TUNNELING 0x00000001
+#define OSC_USB_DP_TUNNELING 0x00000002
+#define OSC_USB_PCIE_TUNNELING 0x00000004
+#define OSC_USB_XDOMAIN 0x00000008
+
+extern u32 osc_sb_native_usb4_control;
/* PCI Host Bridge _OSC: Capabilities DWORD 2: Support Field */
#define OSC_PCI_EXT_CONFIG_SUPPORT 0x00000001
@@ -495,7 +621,8 @@ extern bool osc_pc_lpi_support_confirmed;
#define OSC_PCI_CLOCK_PM_SUPPORT 0x00000004
#define OSC_PCI_SEGMENT_GROUPS_SUPPORT 0x00000008
#define OSC_PCI_MSI_SUPPORT 0x00000010
-#define OSC_PCI_SUPPORT_MASKS 0x0000001f
+#define OSC_PCI_EDR_SUPPORT 0x00000080
+#define OSC_PCI_HPX_TYPE_3_SUPPORT 0x00000100
/* PCI Host Bridge _OSC: Capabilities DWORD 3: Control Field */
#define OSC_PCI_EXPRESS_NATIVE_HP_CONTROL 0x00000001
@@ -503,7 +630,31 @@ extern bool osc_pc_lpi_support_confirmed;
#define OSC_PCI_EXPRESS_PME_CONTROL 0x00000004
#define OSC_PCI_EXPRESS_AER_CONTROL 0x00000008
#define OSC_PCI_EXPRESS_CAPABILITY_CONTROL 0x00000010
-#define OSC_PCI_CONTROL_MASKS 0x0000001f
+#define OSC_PCI_EXPRESS_LTR_CONTROL 0x00000020
+#define OSC_PCI_EXPRESS_DPC_CONTROL 0x00000080
+
+/* CXL _OSC: Capabilities DWORD 4: Support Field */
+#define OSC_CXL_1_1_PORT_REG_ACCESS_SUPPORT 0x00000001
+#define OSC_CXL_2_0_PORT_DEV_REG_ACCESS_SUPPORT 0x00000002
+#define OSC_CXL_PROTOCOL_ERR_REPORTING_SUPPORT 0x00000004
+#define OSC_CXL_NATIVE_HP_SUPPORT 0x00000008
+
+/* CXL _OSC: Capabilities DWORD 5: Control Field */
+#define OSC_CXL_ERROR_REPORTING_CONTROL 0x00000001
+
+static inline u32 acpi_osc_ctx_get_pci_control(struct acpi_osc_context *context)
+{
+ u32 *ret = context->ret.pointer;
+
+ return ret[OSC_CONTROL_DWORD];
+}
+
+static inline u32 acpi_osc_ctx_get_cxl_control(struct acpi_osc_context *context)
+{
+ u32 *ret = context->ret.pointer;
+
+ return ret[OSC_EXT_CONTROL_DWORD];
+}
#define ACPI_GSB_ACCESS_ATTRIB_QUICK 0x00000002
#define ACPI_GSB_ACCESS_ATTRIB_SEND_RCV 0x00000004
@@ -516,9 +667,6 @@ extern bool osc_pc_lpi_support_confirmed;
#define ACPI_GSB_ACCESS_ATTRIB_RAW_BYTES 0x0000000E
#define ACPI_GSB_ACCESS_ATTRIB_RAW_PROCESS 0x0000000F
-extern acpi_status acpi_pci_osc_control_set(acpi_handle handle,
- u32 *mask, u32 req);
-
/* Enable _OST when all relevant hotplug operations are enabled */
#if defined(CONFIG_ACPI_HOTPLUG_CPU) && \
defined(CONFIG_ACPI_HOTPLUG_MEMORY) && \
@@ -581,17 +729,20 @@ extern int acpi_nvs_register(__u64 start, __u64 size);
extern int acpi_nvs_for_each_region(int (*func)(__u64, __u64, void *),
void *data);
+const struct acpi_device_id *acpi_match_acpi_device(const struct acpi_device_id *ids,
+ const struct acpi_device *adev);
+
const struct acpi_device_id *acpi_match_device(const struct acpi_device_id *ids,
const struct device *dev);
+const void *acpi_device_get_match_data(const struct device *dev);
extern bool acpi_driver_match_device(struct device *dev,
const struct device_driver *drv);
-int acpi_device_uevent_modalias(struct device *, struct kobj_uevent_env *);
+int acpi_device_uevent_modalias(const struct device *, struct kobj_uevent_env *);
int acpi_device_modalias(struct device *, char *, int);
-void acpi_walk_dep_device_list(acpi_handle handle);
struct platform_device *acpi_create_platform_device(struct acpi_device *,
- struct property_entry *);
+ const struct property_entry *);
#define ACPI_PTR(_ptr) (_ptr)
static inline void acpi_device_set_enumerated(struct acpi_device *adev)
@@ -616,7 +767,27 @@ int acpi_reconfig_notifier_unregister(struct notifier_block *nb);
int acpi_gtdt_init(struct acpi_table_header *table, int *platform_timer_count);
int acpi_gtdt_map_ppi(int type);
bool acpi_gtdt_c3stop(int type);
-int acpi_arch_timer_mem_init(struct arch_timer_mem *timer_mem, int *timer_count);
+#endif
+
+#ifndef ACPI_HAVE_ARCH_SET_ROOT_POINTER
+static __always_inline void acpi_arch_set_root_pointer(u64 addr)
+{
+}
+#endif
+
+#ifndef ACPI_HAVE_ARCH_GET_ROOT_POINTER
+static __always_inline u64 acpi_arch_get_root_pointer(void)
+{
+ return 0;
+}
+#endif
+
+int acpi_get_local_u64_address(acpi_handle handle, u64 *addr);
+int acpi_get_local_address(acpi_handle handle, u32 *addr);
+const char *acpi_get_subsystem_id(acpi_handle handle);
+
+#ifdef CONFIG_ACPI_MRRM
+int acpi_mrrm_max_mem_region(void);
#endif
#else /* !CONFIG_ACPI */
@@ -626,7 +797,11 @@ int acpi_arch_timer_mem_init(struct arch_timer_mem *timer_mem, int *timer_count)
#define ACPI_COMPANION(dev) (NULL)
#define ACPI_COMPANION_SET(dev, adev) do { } while (0)
#define ACPI_HANDLE(dev) (NULL)
-#define ACPI_DEVICE_CLASS(_cls, _msk) .cls = (0), .cls_msk = (0),
+#define ACPI_HANDLE_FWNODE(fwnode) (NULL)
+
+/* Get rid of the -Wunused-variable for adev */
+#define acpi_dev_uid_match(adev, uid2) (adev && false)
+#define acpi_dev_hid_uid_match(adev, hid2, uid2) (adev && false)
struct fwnode_handle;
@@ -640,32 +815,52 @@ static inline bool acpi_dev_present(const char *hid, const char *uid, s64 hrv)
return false;
}
-static inline bool is_acpi_node(struct fwnode_handle *fwnode)
+struct acpi_device;
+
+static inline int acpi_dev_uid_to_integer(struct acpi_device *adev, u64 *integer)
+{
+ return -ENODEV;
+}
+
+static inline struct acpi_device *
+acpi_dev_get_first_match_dev(const char *hid, const char *uid, s64 hrv)
+{
+ return NULL;
+}
+
+static inline bool acpi_reduced_hardware(void)
+{
+ return false;
+}
+
+static inline void acpi_dev_put(struct acpi_device *adev) {}
+
+static inline bool is_acpi_node(const struct fwnode_handle *fwnode)
{
return false;
}
-static inline bool is_acpi_device_node(struct fwnode_handle *fwnode)
+static inline bool is_acpi_device_node(const struct fwnode_handle *fwnode)
{
return false;
}
-static inline struct acpi_device *to_acpi_device_node(struct fwnode_handle *fwnode)
+static inline struct acpi_device *to_acpi_device_node(const struct fwnode_handle *fwnode)
{
return NULL;
}
-static inline bool is_acpi_data_node(struct fwnode_handle *fwnode)
+static inline bool is_acpi_data_node(const struct fwnode_handle *fwnode)
{
return false;
}
-static inline struct acpi_data_node *to_acpi_data_node(struct fwnode_handle *fwnode)
+static inline struct acpi_data_node *to_acpi_data_node(const struct fwnode_handle *fwnode)
{
return NULL;
}
-static inline bool acpi_data_node_match(struct fwnode_handle *fwnode,
+static inline bool acpi_data_node_match(const struct fwnode_handle *fwnode,
const char *name)
{
return false;
@@ -676,6 +871,11 @@ static inline struct fwnode_handle *acpi_fwnode_handle(struct acpi_device *adev)
return NULL;
}
+static inline acpi_handle acpi_device_handle(struct acpi_device *adev)
+{
+ return NULL;
+}
+
static inline bool has_acpi_companion(struct device *dev)
{
return false;
@@ -708,9 +908,12 @@ static inline int acpi_boot_init(void)
return 0;
}
+static inline void acpi_boot_table_prepare(void)
+{
+}
+
static inline void acpi_boot_table_init(void)
{
- return;
}
static inline int acpi_mps_check(void)
@@ -749,27 +952,53 @@ static inline int acpi_nvs_for_each_region(int (*func)(__u64, __u64, void *),
struct acpi_device_id;
+static inline const struct acpi_device_id *acpi_match_acpi_device(
+ const struct acpi_device_id *ids, const struct acpi_device *adev)
+{
+ return NULL;
+}
+
static inline const struct acpi_device_id *acpi_match_device(
const struct acpi_device_id *ids, const struct device *dev)
{
return NULL;
}
+static inline const void *acpi_device_get_match_data(const struct device *dev)
+{
+ return NULL;
+}
+
static inline bool acpi_driver_match_device(struct device *dev,
const struct device_driver *drv)
{
return false;
}
+static inline bool acpi_check_dsm(acpi_handle handle, const guid_t *guid,
+ u64 rev, u64 funcs)
+{
+ return false;
+}
+
static inline union acpi_object *acpi_evaluate_dsm(acpi_handle handle,
const guid_t *guid,
- int rev, int func,
+ u64 rev, u64 func,
union acpi_object *argv4)
{
return NULL;
}
-static inline int acpi_device_uevent_modalias(struct device *dev,
+static inline union acpi_object *acpi_evaluate_dsm_typed(acpi_handle handle,
+ const guid_t *guid,
+ u64 rev, u64 func,
+ union acpi_object *argv4,
+ acpi_object_type type)
+{
+ return NULL;
+}
+
+static inline int acpi_device_uevent_modalias(const struct device *dev,
struct kobj_uevent_env *env)
{
return -ENODEV;
@@ -781,7 +1010,14 @@ static inline int acpi_device_modalias(struct device *dev,
return -ENODEV;
}
-static inline bool acpi_dma_supported(struct acpi_device *adev)
+static inline struct platform_device *
+acpi_create_platform_device(struct acpi_device *adev,
+ const struct property_entry *properties)
+{
+ return NULL;
+}
+
+static inline bool acpi_dma_supported(const struct acpi_device *adev)
{
return false;
}
@@ -791,8 +1027,7 @@ static inline enum dev_dma_attr acpi_get_dma_attr(struct acpi_device *adev)
return DEV_DMA_NOT_SUPPORTED;
}
-static inline int acpi_dma_get_range(struct device *dev, u64 *dma_addr,
- u64 *offset, u64 *size)
+static inline int acpi_dma_get_range(struct device *dev, const struct bus_dma_region **map)
{
return -ENODEV;
}
@@ -803,7 +1038,12 @@ static inline int acpi_dma_configure(struct device *dev,
return 0;
}
-static inline void acpi_dma_deconfigure(struct device *dev) { }
+static inline int acpi_dma_configure_id(struct device *dev,
+ enum dev_dma_attr attr,
+ const u32 *input_id)
+{
+ return 0;
+}
#define ACPI_PTR(_ptr) (NULL)
@@ -830,8 +1070,66 @@ static inline struct acpi_device *acpi_resource_consumer(struct resource *res)
return NULL;
}
+static inline int acpi_get_local_address(acpi_handle handle, u32 *addr)
+{
+ return -ENODEV;
+}
+
+static inline const char *acpi_get_subsystem_id(acpi_handle handle)
+{
+ return ERR_PTR(-ENODEV);
+}
+
+static inline int acpi_register_wakeup_handler(int wake_irq,
+ bool (*wakeup)(void *context), void *context)
+{
+ return -ENXIO;
+}
+
+static inline void acpi_unregister_wakeup_handler(
+ bool (*wakeup)(void *context), void *context) { }
+
+struct acpi_osc_context;
+static inline u32 acpi_osc_ctx_get_pci_control(struct acpi_osc_context *context)
+{
+ return 0;
+}
+
+static inline u32 acpi_osc_ctx_get_cxl_control(struct acpi_osc_context *context)
+{
+ return 0;
+}
+
+static inline bool acpi_sleep_state_supported(u8 sleep_state)
+{
+ return false;
+}
+
+static inline acpi_handle acpi_get_processor_handle(int cpu)
+{
+ return NULL;
+}
+
+static inline int acpi_mrrm_max_mem_region(void)
+{
+ return 1;
+}
+
#endif /* !CONFIG_ACPI */
+#ifdef CONFIG_ACPI_HMAT
+int hmat_get_extended_linear_cache_size(struct resource *backing_res, int nid,
+ resource_size_t *size);
+#else
+static inline int hmat_get_extended_linear_cache_size(struct resource *backing_res,
+ int nid, resource_size_t *size)
+{
+ return -EOPNOTSUPP;
+}
+#endif
+
+extern void arch_post_acpi_subsys_init(void);
+
#ifdef CONFIG_ACPI_HOTPLUG_IOAPIC
int acpi_ioapic_add(acpi_handle root);
#else
@@ -850,76 +1148,100 @@ void acpi_os_set_prepare_extended_sleep(int (*func)(u8 sleep_state,
acpi_status acpi_os_prepare_extended_sleep(u8 sleep_state,
u32 val_a, u32 val_b);
-
-#ifdef CONFIG_X86
-void arch_reserve_mem_area(acpi_physical_address addr, size_t size);
-#else
-static inline void arch_reserve_mem_area(acpi_physical_address addr,
- size_t size)
+struct acpi_s2idle_dev_ops {
+ struct list_head list_node;
+ void (*prepare)(void);
+ void (*check)(void);
+ void (*restore)(void);
+};
+#if defined(CONFIG_SUSPEND) && defined(CONFIG_X86)
+int acpi_register_lps0_dev(struct acpi_s2idle_dev_ops *arg);
+void acpi_unregister_lps0_dev(struct acpi_s2idle_dev_ops *arg);
+#else /* CONFIG_SUSPEND && CONFIG_X86 */
+static inline int acpi_register_lps0_dev(struct acpi_s2idle_dev_ops *arg)
+{
+ return -ENODEV;
+}
+static inline void acpi_unregister_lps0_dev(struct acpi_s2idle_dev_ops *arg)
{
}
-#endif /* CONFIG_X86 */
+#endif /* CONFIG_SUSPEND && CONFIG_X86 */
+void arch_reserve_mem_area(acpi_physical_address addr, size_t size);
#else
#define acpi_os_set_prepare_sleep(func, pm1a_ctrl, pm1b_ctrl) do { } while (0)
#endif
#if defined(CONFIG_ACPI) && defined(CONFIG_PM)
-int acpi_dev_runtime_suspend(struct device *dev);
-int acpi_dev_runtime_resume(struct device *dev);
+int acpi_dev_suspend(struct device *dev, bool wakeup);
+int acpi_dev_resume(struct device *dev);
int acpi_subsys_runtime_suspend(struct device *dev);
int acpi_subsys_runtime_resume(struct device *dev);
-struct acpi_device *acpi_dev_pm_get_node(struct device *dev);
int acpi_dev_pm_attach(struct device *dev, bool power_on);
+bool acpi_storage_d3(struct device *dev);
+bool acpi_dev_state_d0(struct device *dev);
#else
-static inline int acpi_dev_runtime_suspend(struct device *dev) { return 0; }
-static inline int acpi_dev_runtime_resume(struct device *dev) { return 0; }
static inline int acpi_subsys_runtime_suspend(struct device *dev) { return 0; }
static inline int acpi_subsys_runtime_resume(struct device *dev) { return 0; }
-static inline struct acpi_device *acpi_dev_pm_get_node(struct device *dev)
+static inline int acpi_dev_pm_attach(struct device *dev, bool power_on)
{
- return NULL;
+ return 0;
}
-static inline int acpi_dev_pm_attach(struct device *dev, bool power_on)
+static inline bool acpi_storage_d3(struct device *dev)
{
- return -ENODEV;
+ return false;
+}
+static inline bool acpi_dev_state_d0(struct device *dev)
+{
+ return true;
}
#endif
#if defined(CONFIG_ACPI) && defined(CONFIG_PM_SLEEP)
-int acpi_dev_suspend_late(struct device *dev);
-int acpi_dev_resume_early(struct device *dev);
int acpi_subsys_prepare(struct device *dev);
void acpi_subsys_complete(struct device *dev);
int acpi_subsys_suspend_late(struct device *dev);
-int acpi_subsys_resume_early(struct device *dev);
+int acpi_subsys_suspend_noirq(struct device *dev);
int acpi_subsys_suspend(struct device *dev);
int acpi_subsys_freeze(struct device *dev);
+int acpi_subsys_poweroff(struct device *dev);
+int acpi_subsys_restore_early(struct device *dev);
#else
-static inline int acpi_dev_suspend_late(struct device *dev) { return 0; }
-static inline int acpi_dev_resume_early(struct device *dev) { return 0; }
static inline int acpi_subsys_prepare(struct device *dev) { return 0; }
static inline void acpi_subsys_complete(struct device *dev) {}
static inline int acpi_subsys_suspend_late(struct device *dev) { return 0; }
-static inline int acpi_subsys_resume_early(struct device *dev) { return 0; }
+static inline int acpi_subsys_suspend_noirq(struct device *dev) { return 0; }
static inline int acpi_subsys_suspend(struct device *dev) { return 0; }
static inline int acpi_subsys_freeze(struct device *dev) { return 0; }
+static inline int acpi_subsys_poweroff(struct device *dev) { return 0; }
+static inline int acpi_subsys_restore_early(struct device *dev) { return 0; }
+#endif
+
+#if defined(CONFIG_ACPI_EC) && defined(CONFIG_PM_SLEEP)
+void acpi_ec_mark_gpe_for_wake(void);
+void acpi_ec_set_gpe_wake_mask(u8 action);
+#else
+static inline void acpi_ec_mark_gpe_for_wake(void) {}
+static inline void acpi_ec_set_gpe_wake_mask(u8 action) {}
#endif
#ifdef CONFIG_ACPI
+char *acpi_handle_path(acpi_handle handle);
__printf(3, 4)
void acpi_handle_printk(const char *level, acpi_handle handle,
const char *fmt, ...);
+void acpi_evaluation_failure_warn(acpi_handle handle, const char *name,
+ acpi_status status);
#else /* !CONFIG_ACPI */
static inline __printf(3, 4) void
acpi_handle_printk(const char *level, void *handle, const char *fmt, ...) {}
+static inline void acpi_evaluation_failure_warn(acpi_handle handle,
+ const char *name,
+ acpi_status status) {}
#endif /* !CONFIG_ACPI */
#if defined(CONFIG_ACPI) && defined(CONFIG_DYNAMIC_DEBUG)
__printf(3, 4)
void __acpi_handle_debug(struct _ddebug *descriptor, acpi_handle handle, const char *fmt, ...);
-#else
-#define __acpi_handle_debug(descriptor, handle, fmt, ...) \
- acpi_handle_printk(KERN_DEBUG, handle, fmt, ##__VA_ARGS__);
#endif
/*
@@ -949,12 +1271,8 @@ void __acpi_handle_debug(struct _ddebug *descriptor, acpi_handle handle, const c
#else
#if defined(CONFIG_DYNAMIC_DEBUG)
#define acpi_handle_debug(handle, fmt, ...) \
-do { \
- DEFINE_DYNAMIC_DEBUG_METADATA(descriptor, fmt); \
- if (unlikely(descriptor.flags & _DPRINTK_FLAGS_PRINT)) \
- __acpi_handle_debug(&descriptor, handle, pr_fmt(fmt), \
- ##__VA_ARGS__); \
-} while (0)
+ _dynamic_func_call(fmt, __acpi_handle_debug, \
+ handle, pr_fmt(fmt), ##__VA_ARGS__)
#else
#define acpi_handle_debug(handle, fmt, ...) \
({ \
@@ -965,108 +1283,77 @@ do { \
#endif
#endif
-struct acpi_gpio_params {
- unsigned int crs_entry_index;
- unsigned int line_index;
- bool active_low;
-};
-
-struct acpi_gpio_mapping {
- const char *name;
- const struct acpi_gpio_params *data;
- unsigned int size;
-};
-
#if defined(CONFIG_ACPI) && defined(CONFIG_GPIOLIB)
-int acpi_dev_add_driver_gpios(struct acpi_device *adev,
- const struct acpi_gpio_mapping *gpios);
-
-static inline void acpi_dev_remove_driver_gpios(struct acpi_device *adev)
-{
- if (adev)
- adev->driver_gpios = NULL;
-}
-
-int devm_acpi_dev_add_driver_gpios(struct device *dev,
- const struct acpi_gpio_mapping *gpios);
-void devm_acpi_dev_remove_driver_gpios(struct device *dev);
-
bool acpi_gpio_get_irq_resource(struct acpi_resource *ares,
struct acpi_resource_gpio **agpio);
-int acpi_dev_gpio_irq_get(struct acpi_device *adev, int index);
+bool acpi_gpio_get_io_resource(struct acpi_resource *ares,
+ struct acpi_resource_gpio **agpio);
+int acpi_dev_gpio_irq_wake_get_by(struct acpi_device *adev, const char *con_id, int index,
+ bool *wake_capable);
#else
-static inline int acpi_dev_add_driver_gpios(struct acpi_device *adev,
- const struct acpi_gpio_mapping *gpios)
+static inline bool acpi_gpio_get_irq_resource(struct acpi_resource *ares,
+ struct acpi_resource_gpio **agpio)
+{
+ return false;
+}
+static inline bool acpi_gpio_get_io_resource(struct acpi_resource *ares,
+ struct acpi_resource_gpio **agpio)
+{
+ return false;
+}
+static inline int acpi_dev_gpio_irq_wake_get_by(struct acpi_device *adev, const char *con_id,
+ int index, bool *wake_capable)
{
return -ENXIO;
}
-static inline void acpi_dev_remove_driver_gpios(struct acpi_device *adev) {}
+#endif
-static inline int devm_acpi_dev_add_driver_gpios(struct device *dev,
- const struct acpi_gpio_mapping *gpios)
+static inline int acpi_dev_gpio_irq_wake_get(struct acpi_device *adev, int index,
+ bool *wake_capable)
{
- return -ENXIO;
+ return acpi_dev_gpio_irq_wake_get_by(adev, NULL, index, wake_capable);
}
-static inline void devm_acpi_dev_remove_driver_gpios(struct device *dev) {}
-static inline bool acpi_gpio_get_irq_resource(struct acpi_resource *ares,
- struct acpi_resource_gpio **agpio)
+static inline int acpi_dev_gpio_irq_get_by(struct acpi_device *adev, const char *con_id,
+ int index)
{
- return false;
+ return acpi_dev_gpio_irq_wake_get_by(adev, con_id, index, NULL);
}
+
static inline int acpi_dev_gpio_irq_get(struct acpi_device *adev, int index)
{
- return -ENXIO;
+ return acpi_dev_gpio_irq_wake_get_by(adev, NULL, index, NULL);
}
-#endif
/* Device properties */
-#define MAX_ACPI_REFERENCE_ARGS 8
-struct acpi_reference_args {
- struct acpi_device *adev;
- size_t nargs;
- u64 args[MAX_ACPI_REFERENCE_ARGS];
-};
-
#ifdef CONFIG_ACPI
int acpi_dev_get_property(const struct acpi_device *adev, const char *name,
acpi_object_type type, const union acpi_object **obj);
int __acpi_node_get_property_reference(const struct fwnode_handle *fwnode,
const char *name, size_t index, size_t num_args,
- struct acpi_reference_args *args);
+ struct fwnode_reference_args *args);
static inline int acpi_node_get_property_reference(
const struct fwnode_handle *fwnode,
const char *name, size_t index,
- struct acpi_reference_args *args)
+ struct fwnode_reference_args *args)
{
return __acpi_node_get_property_reference(fwnode, name, index,
- MAX_ACPI_REFERENCE_ARGS, args);
+ NR_FWNODE_REFERENCE_ARGS, args);
+}
+
+static inline bool acpi_dev_has_props(const struct acpi_device *adev)
+{
+ return !list_empty(&adev->data.properties);
}
+struct acpi_device_properties *
+acpi_data_add_props(struct acpi_device_data *data, const guid_t *guid,
+ union acpi_object *properties);
+
int acpi_node_prop_get(const struct fwnode_handle *fwnode, const char *propname,
void **valptr);
-int acpi_dev_prop_read_single(struct acpi_device *adev,
- const char *propname, enum dev_prop_type proptype,
- void *val);
-int acpi_node_prop_read(const struct fwnode_handle *fwnode,
- const char *propname, enum dev_prop_type proptype,
- void *val, size_t nval);
-int acpi_dev_prop_read(const struct acpi_device *adev, const char *propname,
- enum dev_prop_type proptype, void *val, size_t nval);
-
-struct fwnode_handle *acpi_get_next_subnode(const struct fwnode_handle *fwnode,
- struct fwnode_handle *child);
-struct fwnode_handle *acpi_node_get_parent(const struct fwnode_handle *fwnode);
-
-struct fwnode_handle *
-acpi_graph_get_next_endpoint(const struct fwnode_handle *fwnode,
- struct fwnode_handle *prev);
-int acpi_graph_get_remote_endpoint(const struct fwnode_handle *fwnode,
- struct fwnode_handle **remote,
- struct fwnode_handle **port,
- struct fwnode_handle **endpoint);
struct acpi_probe_entry;
typedef bool (*acpi_probe_entry_validate_subtbl)(struct acpi_subtable_header *,
@@ -1098,16 +1385,29 @@ struct acpi_probe_entry {
kernel_ulong_t driver_data;
};
-#define ACPI_DECLARE_PROBE_ENTRY(table, name, table_id, subtable, valid, data, fn) \
+void arch_sort_irqchip_probe(struct acpi_probe_entry *ap_head, int nr);
+
+#define ACPI_DECLARE_PROBE_ENTRY(table, name, table_id, subtable, \
+ valid, data, fn) \
static const struct acpi_probe_entry __acpi_probe_##name \
- __used __section(__##table##_acpi_probe_table) \
- = { \
+ __used __section("__" #table "_acpi_probe_table") = { \
.id = table_id, \
.type = subtable, \
.subtable_valid = valid, \
- .probe_table = (acpi_tbl_table_handler)fn, \
- .driver_data = data, \
- }
+ .probe_table = fn, \
+ .driver_data = data, \
+ }
+
+#define ACPI_DECLARE_SUBTABLE_PROBE_ENTRY(table, name, table_id, \
+ subtable, valid, data, fn) \
+ static const struct acpi_probe_entry __acpi_probe_##name \
+ __used __section("__" #table "_acpi_probe_table") = { \
+ .id = table_id, \
+ .type = subtable, \
+ .subtable_valid = valid, \
+ .probe_subtbl = fn, \
+ .driver_data = data, \
+ }
#define ACPI_PROBE_TABLE(name) __##name##_acpi_probe_table
#define ACPI_PROBE_TABLE_END(name) __##name##_acpi_probe_table_end
@@ -1133,7 +1433,7 @@ static inline int acpi_dev_get_property(struct acpi_device *adev,
static inline int
__acpi_node_get_property_reference(const struct fwnode_handle *fwnode,
const char *name, size_t index, size_t num_args,
- struct acpi_reference_args *args)
+ struct fwnode_reference_args *args)
{
return -ENXIO;
}
@@ -1141,7 +1441,7 @@ __acpi_node_get_property_reference(const struct fwnode_handle *fwnode,
static inline int
acpi_node_get_property_reference(const struct fwnode_handle *fwnode,
const char *name, size_t index,
- struct acpi_reference_args *args)
+ struct fwnode_reference_args *args)
{
return -ENXIO;
}
@@ -1153,50 +1453,6 @@ static inline int acpi_node_prop_get(const struct fwnode_handle *fwnode,
return -ENXIO;
}
-static inline int acpi_dev_prop_get(const struct acpi_device *adev,
- const char *propname,
- void **valptr)
-{
- return -ENXIO;
-}
-
-static inline int acpi_dev_prop_read_single(const struct acpi_device *adev,
- const char *propname,
- enum dev_prop_type proptype,
- void *val)
-{
- return -ENXIO;
-}
-
-static inline int acpi_node_prop_read(const struct fwnode_handle *fwnode,
- const char *propname,
- enum dev_prop_type proptype,
- void *val, size_t nval)
-{
- return -ENXIO;
-}
-
-static inline int acpi_dev_prop_read(const struct acpi_device *adev,
- const char *propname,
- enum dev_prop_type proptype,
- void *val, size_t nval)
-{
- return -ENXIO;
-}
-
-static inline struct fwnode_handle *
-acpi_get_next_subnode(const struct fwnode_handle *fwnode,
- struct fwnode_handle *child)
-{
- return NULL;
-}
-
-static inline struct fwnode_handle *
-acpi_node_get_parent(const struct fwnode_handle *fwnode)
-{
- return NULL;
-}
-
static inline struct fwnode_handle *
acpi_graph_get_next_endpoint(const struct fwnode_handle *fwnode,
struct fwnode_handle *prev)
@@ -1239,19 +1495,130 @@ static inline bool acpi_has_watchdog(void) { return false; }
#ifdef CONFIG_ACPI_SPCR_TABLE
extern bool qdf2400_e44_present;
-int parse_spcr(bool earlycon);
+int acpi_parse_spcr(bool enable_earlycon, bool enable_console);
#else
-static inline int parse_spcr(bool earlycon) { return 0; }
+static inline int acpi_parse_spcr(bool enable_earlycon, bool enable_console)
+{
+ return -ENODEV;
+}
#endif
#if IS_ENABLED(CONFIG_ACPI_GENERIC_GSI)
int acpi_irq_get(acpi_handle handle, unsigned int index, struct resource *res);
+const struct cpumask *acpi_irq_get_affinity(acpi_handle handle,
+ unsigned int index);
#else
static inline
int acpi_irq_get(acpi_handle handle, unsigned int index, struct resource *res)
{
return -EINVAL;
}
+static inline const struct cpumask *acpi_irq_get_affinity(acpi_handle handle,
+ unsigned int index)
+{
+ return NULL;
+}
+#endif
+
+#ifdef CONFIG_ACPI_LPIT
+int lpit_read_residency_count_address(u64 *address);
+#else
+static inline int lpit_read_residency_count_address(u64 *address)
+{
+ return -EINVAL;
+}
+#endif
+
+#ifdef CONFIG_ACPI_PROCESSOR_IDLE
+#ifndef arch_get_idle_state_flags
+static inline unsigned int arch_get_idle_state_flags(u32 arch_flags)
+{
+ return 0;
+}
+#endif
+#endif /* CONFIG_ACPI_PROCESSOR_IDLE */
+
+#ifdef CONFIG_ACPI_PPTT
+int acpi_pptt_cpu_is_thread(unsigned int cpu);
+int find_acpi_cpu_topology(unsigned int cpu, int level);
+int find_acpi_cpu_topology_cluster(unsigned int cpu);
+int find_acpi_cpu_topology_package(unsigned int cpu);
+int find_acpi_cpu_topology_hetero_id(unsigned int cpu);
+void acpi_pptt_get_cpus_from_container(u32 acpi_cpu_id, cpumask_t *cpus);
+int find_acpi_cache_level_from_id(u32 cache_id);
+int acpi_pptt_get_cpumask_from_cache_id(u32 cache_id, cpumask_t *cpus);
+#else
+static inline int acpi_pptt_cpu_is_thread(unsigned int cpu)
+{
+ return -EINVAL;
+}
+static inline int find_acpi_cpu_topology(unsigned int cpu, int level)
+{
+ return -EINVAL;
+}
+static inline int find_acpi_cpu_topology_cluster(unsigned int cpu)
+{
+ return -EINVAL;
+}
+static inline int find_acpi_cpu_topology_package(unsigned int cpu)
+{
+ return -EINVAL;
+}
+static inline int find_acpi_cpu_topology_hetero_id(unsigned int cpu)
+{
+ return -EINVAL;
+}
+static inline void acpi_pptt_get_cpus_from_container(u32 acpi_cpu_id,
+ cpumask_t *cpus) { }
+static inline int find_acpi_cache_level_from_id(u32 cache_id)
+{
+ return -ENOENT;
+}
+static inline int acpi_pptt_get_cpumask_from_cache_id(u32 cache_id,
+ cpumask_t *cpus)
+{
+ return -ENOENT;
+}
+#endif
+
+void acpi_arch_init(void);
+
+#ifdef CONFIG_ACPI_PCC
+void acpi_init_pcc(void);
+#else
+static inline void acpi_init_pcc(void) { }
+#endif
+
+#ifdef CONFIG_ACPI_FFH
+void acpi_init_ffh(void);
+extern int acpi_ffh_address_space_arch_setup(void *handler_ctxt,
+ void **region_ctxt);
+extern int acpi_ffh_address_space_arch_handler(acpi_integer *value,
+ void *region_context);
+#else
+static inline void acpi_init_ffh(void) { }
+#endif
+
+#ifdef CONFIG_ACPI
+extern void acpi_device_notify(struct device *dev);
+extern void acpi_device_notify_remove(struct device *dev);
+#else
+static inline void acpi_device_notify(struct device *dev) { }
+static inline void acpi_device_notify_remove(struct device *dev) { }
+#endif
+
+static inline void acpi_use_parent_companion(struct device *dev)
+{
+ ACPI_COMPANION_SET(dev, ACPI_COMPANION(dev->parent));
+}
+
+#ifdef CONFIG_ACPI_NUMA
+bool acpi_node_backed_by_real_pxm(int nid);
+#else
+static inline bool acpi_node_backed_by_real_pxm(int nid)
+{
+ return false;
+}
#endif
#endif /*_LINUX_ACPI_H*/
diff --git a/include/linux/acpi_amd_wbrf.h b/include/linux/acpi_amd_wbrf.h
new file mode 100644
index 000000000000..898f31d536d4
--- /dev/null
+++ b/include/linux/acpi_amd_wbrf.h
@@ -0,0 +1,91 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Wifi Band Exclusion Interface (AMD ACPI Implementation)
+ * Copyright (C) 2023 Advanced Micro Devices
+ */
+
+#ifndef _ACPI_AMD_WBRF_H
+#define _ACPI_AMD_WBRF_H
+
+#include <linux/device.h>
+#include <linux/notifier.h>
+
+/* The maximum number of frequency band ranges */
+#define MAX_NUM_OF_WBRF_RANGES 11
+
+/* Record actions */
+#define WBRF_RECORD_ADD 0x0
+#define WBRF_RECORD_REMOVE 0x1
+
+/**
+ * struct freq_band_range - Wifi frequency band range definition
+ * @start: start frequency point (in Hz)
+ * @end: end frequency point (in Hz)
+ */
+struct freq_band_range {
+ u64 start;
+ u64 end;
+};
+
+/**
+ * struct wbrf_ranges_in_out - wbrf ranges info
+ * @num_of_ranges: total number of band ranges in this struct
+ * @band_list: array of Wifi band ranges
+ */
+struct wbrf_ranges_in_out {
+ u64 num_of_ranges;
+ struct freq_band_range band_list[MAX_NUM_OF_WBRF_RANGES];
+};
+
+/**
+ * enum wbrf_notifier_actions - wbrf notifier actions index
+ * @WBRF_CHANGED: there was some frequency band updates. The consumers
+ * should retrieve the latest active frequency bands.
+ */
+enum wbrf_notifier_actions {
+ WBRF_CHANGED,
+};
+
+#if IS_ENABLED(CONFIG_AMD_WBRF)
+bool acpi_amd_wbrf_supported_producer(struct device *dev);
+int acpi_amd_wbrf_add_remove(struct device *dev, uint8_t action, struct wbrf_ranges_in_out *in);
+bool acpi_amd_wbrf_supported_consumer(struct device *dev);
+int amd_wbrf_retrieve_freq_band(struct device *dev, struct wbrf_ranges_in_out *out);
+int amd_wbrf_register_notifier(struct notifier_block *nb);
+int amd_wbrf_unregister_notifier(struct notifier_block *nb);
+#else
+static inline
+bool acpi_amd_wbrf_supported_consumer(struct device *dev)
+{
+ return false;
+}
+
+static inline
+int acpi_amd_wbrf_add_remove(struct device *dev, uint8_t action, struct wbrf_ranges_in_out *in)
+{
+ return -ENODEV;
+}
+
+static inline
+bool acpi_amd_wbrf_supported_producer(struct device *dev)
+{
+ return false;
+}
+static inline
+int amd_wbrf_retrieve_freq_band(struct device *dev, struct wbrf_ranges_in_out *out)
+{
+ return -ENODEV;
+}
+static inline
+int amd_wbrf_register_notifier(struct notifier_block *nb)
+{
+ return -ENODEV;
+}
+static inline
+int amd_wbrf_unregister_notifier(struct notifier_block *nb)
+{
+ return -ENODEV;
+}
+#endif /* CONFIG_AMD_WBRF */
+
+#endif /* _ACPI_AMD_WBRF_H */
diff --git a/include/linux/acpi_dma.h b/include/linux/acpi_dma.h
index 329436d38e66..e748b2877602 100644
--- a/include/linux/acpi_dma.h
+++ b/include/linux/acpi_dma.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* ACPI helpers for DMA request / controller
*
@@ -5,19 +6,16 @@
*
* Copyright (C) 2013, Intel Corporation
* Author: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef __LINUX_ACPI_DMA_H
#define __LINUX_ACPI_DMA_H
-#include <linux/list.h>
-#include <linux/device.h>
#include <linux/err.h>
#include <linux/dmaengine.h>
+#include <linux/types.h>
+
+struct device;
/**
* struct acpi_dma_spec - slave device DMA resources
@@ -68,7 +66,6 @@ int devm_acpi_dma_controller_register(struct device *dev,
struct dma_chan *(*acpi_dma_xlate)
(struct acpi_dma_spec *, struct acpi_dma *),
void *data);
-void devm_acpi_dma_controller_free(struct device *dev);
struct dma_chan *acpi_dma_request_slave_chan_by_index(struct device *dev,
size_t index);
@@ -97,9 +94,6 @@ static inline int devm_acpi_dma_controller_register(struct device *dev,
{
return -ENODEV;
}
-static inline void devm_acpi_dma_controller_free(struct device *dev)
-{
-}
static inline struct dma_chan *acpi_dma_request_slave_chan_by_index(
struct device *dev, size_t index)
diff --git a/include/linux/acpi_iort.h b/include/linux/acpi_iort.h
index 8d3f0bf80379..d4ed5622cf2b 100644
--- a/include/linux/acpi_iort.h
+++ b/include/linux/acpi_iort.h
@@ -1,19 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2016, Semihalf
* Author: Tomasz Nowicki <tn@semihalf.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
- * Place - Suite 330, Boston, MA 02111-1307 USA.
*/
#ifndef __ACPI_IORT_H__
@@ -26,32 +14,57 @@
#define IORT_IRQ_MASK(irq) (irq & 0xffffffffULL)
#define IORT_IRQ_TRIGGER_MASK(irq) ((irq >> 32) & 0xffffffffULL)
-int iort_register_domain_token(int trans_id, struct fwnode_handle *fw_node);
+/*
+ * PMCG model identifiers for use in smmu pmu driver. Please note
+ * that this is purely for the use of software and has nothing to
+ * do with hardware or with IORT specification.
+ */
+#define IORT_SMMU_V3_PMCG_GENERIC 0x00000000 /* Generic SMMUv3 PMCG */
+#define IORT_SMMU_V3_PMCG_HISI_HIP08 0x00000001 /* HiSilicon HIP08 PMCG */
+#define IORT_SMMU_V3_PMCG_HISI_HIP09 0x00000002 /* HiSilicon HIP09 PMCG */
+
+int iort_register_domain_token(int trans_id, phys_addr_t base,
+ struct fwnode_handle *fw_node);
void iort_deregister_domain_token(int trans_id);
struct fwnode_handle *iort_find_domain_token(int trans_id);
+int iort_pmsi_get_dev_id(struct device *dev, u32 *dev_id);
+
#ifdef CONFIG_ACPI_IORT
-void acpi_iort_init(void);
-u32 iort_msi_map_rid(struct device *dev, u32 req_id);
-struct irq_domain *iort_get_device_domain(struct device *dev, u32 req_id);
+u32 iort_msi_map_id(struct device *dev, u32 id);
+struct irq_domain *iort_get_device_domain(struct device *dev, u32 id,
+ enum irq_domain_bus_token bus_token);
void acpi_configure_pmsi_domain(struct device *dev);
-int iort_pmsi_get_dev_id(struct device *dev, u32 *dev_id);
+void iort_get_rmr_sids(struct fwnode_handle *iommu_fwnode,
+ struct list_head *head);
+void iort_put_rmr_sids(struct fwnode_handle *iommu_fwnode,
+ struct list_head *head);
/* IOMMU interface */
-void iort_dma_setup(struct device *dev, u64 *dma_addr, u64 *size);
-const struct iommu_ops *iort_iommu_configure(struct device *dev);
+int iort_dma_get_ranges(struct device *dev, u64 *limit);
+int iort_iommu_configure_id(struct device *dev, const u32 *id_in);
+void iort_iommu_get_resv_regions(struct device *dev, struct list_head *head);
+phys_addr_t acpi_iort_dma_get_max_cpu_address(void);
#else
-static inline void acpi_iort_init(void) { }
-static inline u32 iort_msi_map_rid(struct device *dev, u32 req_id)
-{ return req_id; }
-static inline struct irq_domain *iort_get_device_domain(struct device *dev,
- u32 req_id)
+static inline u32 iort_msi_map_id(struct device *dev, u32 id)
+{ return id; }
+static inline struct irq_domain *iort_get_device_domain(
+ struct device *dev, u32 id, enum irq_domain_bus_token bus_token)
{ return NULL; }
static inline void acpi_configure_pmsi_domain(struct device *dev) { }
+static inline
+void iort_get_rmr_sids(struct fwnode_handle *iommu_fwnode, struct list_head *head) { }
+static inline
+void iort_put_rmr_sids(struct fwnode_handle *iommu_fwnode, struct list_head *head) { }
/* IOMMU interface */
-static inline void iort_dma_setup(struct device *dev, u64 *dma_addr,
- u64 *size) { }
+static inline int iort_dma_get_ranges(struct device *dev, u64 *limit)
+{ return -ENODEV; }
+static inline int iort_iommu_configure_id(struct device *dev, const u32 *id_in)
+{ return -ENODEV; }
static inline
-const struct iommu_ops *iort_iommu_configure(struct device *dev)
-{ return NULL; }
+void iort_iommu_get_resv_regions(struct device *dev, struct list_head *head)
+{ }
+
+static inline phys_addr_t acpi_iort_dma_get_max_cpu_address(void)
+{ return PHYS_ADDR_MAX; }
#endif
#endif /* __ACPI_IORT_H__ */
diff --git a/include/linux/acpi_mdio.h b/include/linux/acpi_mdio.h
new file mode 100644
index 000000000000..8e2eefa9fbc0
--- /dev/null
+++ b/include/linux/acpi_mdio.h
@@ -0,0 +1,33 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * ACPI helper for the MDIO (Ethernet PHY) API
+ */
+
+#ifndef __LINUX_ACPI_MDIO_H
+#define __LINUX_ACPI_MDIO_H
+
+#include <linux/phy.h>
+
+#if IS_ENABLED(CONFIG_ACPI_MDIO)
+int __acpi_mdiobus_register(struct mii_bus *mdio, struct fwnode_handle *fwnode,
+ struct module *owner);
+
+static inline int
+acpi_mdiobus_register(struct mii_bus *mdio, struct fwnode_handle *handle)
+{
+ return __acpi_mdiobus_register(mdio, handle, THIS_MODULE);
+}
+#else /* CONFIG_ACPI_MDIO */
+static inline int
+acpi_mdiobus_register(struct mii_bus *mdio, struct fwnode_handle *fwnode)
+{
+ /*
+ * Fall back to mdiobus_register() function to register a bus.
+ * This way, we don't have to keep compat bits around in drivers.
+ */
+
+ return mdiobus_register(mdio);
+}
+#endif
+
+#endif /* __LINUX_ACPI_MDIO_H */
diff --git a/include/linux/acpi_pmtmr.h b/include/linux/acpi_pmtmr.h
index 1d0ef1ae8036..0ded9220d379 100644
--- a/include/linux/acpi_pmtmr.h
+++ b/include/linux/acpi_pmtmr.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ACPI_PMTMR_H_
#define _ACPI_PMTMR_H_
@@ -25,6 +26,19 @@ static inline u32 acpi_pm_read_early(void)
return acpi_pm_read_verified() & ACPI_PM_MASK;
}
+/**
+ * Register callback for suspend and resume event
+ *
+ * @cb Callback triggered on suspend and resume
+ * @data Data passed with the callback
+ */
+void acpi_pmtmr_register_suspend_resume_callback(void (*cb)(void *data, bool suspend), void *data);
+
+/**
+ * Remove registered callback for suspend and resume event
+ */
+void acpi_pmtmr_unregister_suspend_resume_callback(void);
+
#else
static inline u32 acpi_pm_read_early(void)
diff --git a/include/linux/acpi_rimt.h b/include/linux/acpi_rimt.h
new file mode 100644
index 000000000000..fad3adc4d899
--- /dev/null
+++ b/include/linux/acpi_rimt.h
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2024-2025, Ventana Micro Systems Inc.
+ * Author: Sunil V L <sunilvl@ventanamicro.com>
+ */
+
+#ifndef _ACPI_RIMT_H
+#define _ACPI_RIMT_H
+
+#ifdef CONFIG_ACPI_RIMT
+int rimt_iommu_register(struct device *dev);
+#else
+static inline int rimt_iommu_register(struct device *dev)
+{
+ return -ENODEV;
+}
+#endif
+
+#if defined(CONFIG_IOMMU_API) && defined(CONFIG_ACPI_RIMT)
+int rimt_iommu_configure_id(struct device *dev, const u32 *id_in);
+#else
+static inline int rimt_iommu_configure_id(struct device *dev, const u32 *id_in)
+{
+ return -ENODEV;
+}
+#endif
+
+#endif /* _ACPI_RIMT_H */
diff --git a/include/linux/acpi_viot.h b/include/linux/acpi_viot.h
new file mode 100644
index 000000000000..a5a122431563
--- /dev/null
+++ b/include/linux/acpi_viot.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#ifndef __ACPI_VIOT_H__
+#define __ACPI_VIOT_H__
+
+#include <linux/acpi.h>
+
+#ifdef CONFIG_ACPI_VIOT
+void __init acpi_viot_early_init(void);
+void __init acpi_viot_init(void);
+int viot_iommu_configure(struct device *dev);
+#else
+static inline void acpi_viot_early_init(void) {}
+static inline void acpi_viot_init(void) {}
+static inline int viot_iommu_configure(struct device *dev)
+{
+ return -ENODEV;
+}
+#endif
+
+#endif /* __ACPI_VIOT_H__ */
diff --git a/include/linux/adb.h b/include/linux/adb.h
index cde41300c7ad..f6306fc86015 100644
--- a/include/linux/adb.h
+++ b/include/linux/adb.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Definitions for ADB (Apple Desktop Bus) support.
*/
diff --git a/include/linux/adfs_fs.h b/include/linux/adfs_fs.h
index 0d991071a9d4..4836e382ad52 100644
--- a/include/linux/adfs_fs.h
+++ b/include/linux/adfs_fs.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ADFS_FS_H
#define _ADFS_FS_H
diff --git a/include/linux/adi-axi-common.h b/include/linux/adi-axi-common.h
new file mode 100644
index 000000000000..37962ba530df
--- /dev/null
+++ b/include/linux/adi-axi-common.h
@@ -0,0 +1,77 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Analog Devices AXI common registers & definitions
+ *
+ * Copyright 2019 Analog Devices Inc.
+ *
+ * https://wiki.analog.com/resources/fpga/docs/axi_ip
+ * https://wiki.analog.com/resources/fpga/docs/hdl/regmap
+ */
+
+#include <linux/types.h>
+
+#ifndef ADI_AXI_COMMON_H_
+#define ADI_AXI_COMMON_H_
+
+#define ADI_AXI_REG_VERSION 0x0000
+#define ADI_AXI_REG_FPGA_INFO 0x001C
+
+#define ADI_AXI_PCORE_VER(major, minor, patch) \
+ (((major) << 16) | ((minor) << 8) | (patch))
+
+#define ADI_AXI_PCORE_VER_MAJOR(version) (((version) >> 16) & 0xff)
+#define ADI_AXI_PCORE_VER_MINOR(version) (((version) >> 8) & 0xff)
+#define ADI_AXI_PCORE_VER_PATCH(version) ((version) & 0xff)
+
+/**
+ * adi_axi_pcore_ver_gteq() - check if a version is satisfied
+ * @version: the full version read from the hardware
+ * @major: the major version to compare against
+ * @minor: the minor version to compare against
+ *
+ * ADI AXI IP Cores use semantic versioning, so this can be used to check for
+ * feature availability.
+ *
+ * Return: true if the version is greater than or equal to the specified
+ * major and minor version, false otherwise.
+ */
+static inline bool adi_axi_pcore_ver_gteq(u32 version, u32 major, u32 minor)
+{
+ return ADI_AXI_PCORE_VER_MAJOR(version) > (major) ||
+ (ADI_AXI_PCORE_VER_MAJOR(version) == (major) &&
+ ADI_AXI_PCORE_VER_MINOR(version) >= (minor));
+}
+
+#define ADI_AXI_INFO_FPGA_TECH(info) (((info) >> 24) & 0xff)
+#define ADI_AXI_INFO_FPGA_FAMILY(info) (((info) >> 16) & 0xff)
+#define ADI_AXI_INFO_FPGA_SPEED_GRADE(info) (((info) >> 8) & 0xff)
+
+enum adi_axi_fpga_technology {
+ ADI_AXI_FPGA_TECH_UNKNOWN = 0,
+ ADI_AXI_FPGA_TECH_SERIES7,
+ ADI_AXI_FPGA_TECH_ULTRASCALE,
+ ADI_AXI_FPGA_TECH_ULTRASCALE_PLUS,
+};
+
+enum adi_axi_fpga_family {
+ ADI_AXI_FPGA_FAMILY_UNKNOWN = 0,
+ ADI_AXI_FPGA_FAMILY_ARTIX,
+ ADI_AXI_FPGA_FAMILY_KINTEX,
+ ADI_AXI_FPGA_FAMILY_VIRTEX,
+ ADI_AXI_FPGA_FAMILY_ZYNQ,
+};
+
+enum adi_axi_fpga_speed_grade {
+ ADI_AXI_FPGA_SPEED_UNKNOWN = 0,
+ ADI_AXI_FPGA_SPEED_1 = 10,
+ ADI_AXI_FPGA_SPEED_1L = 11,
+ ADI_AXI_FPGA_SPEED_1H = 12,
+ ADI_AXI_FPGA_SPEED_1HV = 13,
+ ADI_AXI_FPGA_SPEED_1LV = 14,
+ ADI_AXI_FPGA_SPEED_2 = 20,
+ ADI_AXI_FPGA_SPEED_2L = 21,
+ ADI_AXI_FPGA_SPEED_2LV = 22,
+ ADI_AXI_FPGA_SPEED_3 = 30,
+};
+
+#endif /* ADI_AXI_COMMON_H_ */
diff --git a/include/linux/adreno-smmu-priv.h b/include/linux/adreno-smmu-priv.h
new file mode 100644
index 000000000000..d83c9175828f
--- /dev/null
+++ b/include/linux/adreno-smmu-priv.h
@@ -0,0 +1,79 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2020 Google, Inc
+ */
+
+#ifndef __ADRENO_SMMU_PRIV_H
+#define __ADRENO_SMMU_PRIV_H
+
+#include <linux/io-pgtable.h>
+
+/**
+ * struct adreno_smmu_fault_info - container for key fault information
+ *
+ * @far: The faulting IOVA from ARM_SMMU_CB_FAR
+ * @ttbr0: The current TTBR0 pagetable from ARM_SMMU_CB_TTBR0
+ * @contextidr: The value of ARM_SMMU_CB_CONTEXTIDR
+ * @fsr: The fault status from ARM_SMMU_CB_FSR
+ * @fsynr0: The value of FSYNR0 from ARM_SMMU_CB_FSYNR0
+ * @fsynr1: The value of FSYNR1 from ARM_SMMU_CB_FSYNR0
+ * @cbfrsynra: The value of CBFRSYNRA from ARM_SMMU_GR1_CBFRSYNRA(idx)
+ *
+ * This struct passes back key page fault information to the GPU driver
+ * through the get_fault_info function pointer.
+ * The GPU driver can use this information to print informative
+ * log messages and provide deeper GPU specific insight into the fault.
+ */
+struct adreno_smmu_fault_info {
+ u64 far;
+ u64 ttbr0;
+ u32 contextidr;
+ u32 fsr;
+ u32 fsynr0;
+ u32 fsynr1;
+ u32 cbfrsynra;
+};
+
+/**
+ * struct adreno_smmu_priv - private interface between adreno-smmu and GPU
+ *
+ * @cookie: An opque token provided by adreno-smmu and passed
+ * back into the callbacks
+ * @get_ttbr1_cfg: Get the TTBR1 config for the GPUs context-bank
+ * @set_ttbr0_cfg: Set the TTBR0 config for the GPUs context bank. A
+ * NULL config disables TTBR0 translation, otherwise
+ * TTBR0 translation is enabled with the specified cfg
+ * @get_fault_info: Called by the GPU fault handler to get information about
+ * the fault
+ * @set_stall: Configure whether stall on fault (CFCFG) is enabled. If
+ * stalling on fault is enabled, the GPU driver must call
+ * resume_translation()
+ * @resume_translation: Resume translation after a fault
+ *
+ * @set_prr_bit: [optional] Configure the GPU's Partially Resident
+ * Region (PRR) bit in the ACTLR register.
+ * @set_prr_addr: [optional] Configure the PRR_CFG_*ADDR register with
+ * the physical address of PRR page passed from GPU
+ * driver.
+ *
+ * The GPU driver (drm/msm) and adreno-smmu work together for controlling
+ * the GPU's SMMU instance. This is by necessity, as the GPU is directly
+ * updating the SMMU for context switches, while on the other hand we do
+ * not want to duplicate all of the initial setup logic from arm-smmu.
+ *
+ * This private interface is used for the two drivers to coordinate. The
+ * cookie and callback functions are populated when the GPU driver attaches
+ * it's domain.
+ */
+struct adreno_smmu_priv {
+ const void *cookie;
+ const struct io_pgtable_cfg *(*get_ttbr1_cfg)(const void *cookie);
+ int (*set_ttbr0_cfg)(const void *cookie, const struct io_pgtable_cfg *cfg);
+ void (*get_fault_info)(const void *cookie, struct adreno_smmu_fault_info *info);
+ void (*set_stall)(const void *cookie, bool enabled);
+ void (*resume_translation)(const void *cookie, bool terminate);
+ void (*set_prr_bit)(const void *cookie, bool set);
+ void (*set_prr_addr)(const void *cookie, phys_addr_t page_addr);
+};
+
+#endif /* __ADRENO_SMMU_PRIV_H */
diff --git a/include/linux/adxl.h b/include/linux/adxl.h
new file mode 100644
index 000000000000..2a629acb4c3f
--- /dev/null
+++ b/include/linux/adxl.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Address translation interface via ACPI DSM.
+ * Copyright (C) 2018 Intel Corporation
+ */
+
+#ifndef _LINUX_ADXL_H
+#define _LINUX_ADXL_H
+
+const char * const *adxl_get_component_names(void);
+int adxl_decode(u64 addr, u64 component_values[]);
+
+#endif /* _LINUX_ADXL_H */
diff --git a/include/linux/aer.h b/include/linux/aer.h
index 04602cbe85dc..02940be66324 100644
--- a/include/linux/aer.h
+++ b/include/linux/aer.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) 2006 Intel Corp.
* Tom Long Nguyen (tom.l.nguyen@intel.com)
@@ -13,14 +14,28 @@
#define AER_NONFATAL 0
#define AER_FATAL 1
#define AER_CORRECTABLE 2
+#define DPC_FATAL 3
+
+/*
+ * AER and DPC capabilities TLP Logging register sizes (PCIe r6.2, sec 7.8.4
+ * & 7.9.14).
+ */
+#define PCIE_STD_NUM_TLP_HEADERLOG 4
+#define PCIE_STD_MAX_TLP_PREFIXLOG 4
+#define PCIE_STD_MAX_TLP_HEADERLOG (PCIE_STD_NUM_TLP_HEADERLOG + 10)
struct pci_dev;
-struct aer_header_log_regs {
- unsigned int dw0;
- unsigned int dw1;
- unsigned int dw2;
- unsigned int dw3;
+struct pcie_tlp_log {
+ union {
+ u32 dw[PCIE_STD_MAX_TLP_HEADERLOG];
+ struct {
+ u32 _do_not_use[PCIE_STD_NUM_TLP_HEADERLOG];
+ u32 prefix[PCIE_STD_MAX_TLP_PREFIXLOG];
+ };
+ };
+ u8 header_len; /* Length of the Logged TLP Header in DWORDs */
+ bool flit; /* TLP was logged when in Flit mode */
};
struct aer_capability_regs {
@@ -31,7 +46,7 @@ struct aer_capability_regs {
u32 cor_status;
u32 cor_mask;
u32 cap_control;
- struct aer_header_log_regs header_log;
+ struct pcie_tlp_log header_log;
u32 root_command;
u32 root_status;
u16 cor_err_source;
@@ -39,35 +54,20 @@ struct aer_capability_regs {
};
#if defined(CONFIG_PCIEAER)
-/* pci-e port driver needs this function to enable aer */
-int pci_enable_pcie_error_reporting(struct pci_dev *dev);
-int pci_disable_pcie_error_reporting(struct pci_dev *dev);
-int pci_cleanup_aer_uncorrect_error_status(struct pci_dev *dev);
-int pci_cleanup_aer_error_status_regs(struct pci_dev *dev);
+int pci_aer_clear_nonfatal_status(struct pci_dev *dev);
+int pcie_aer_is_native(struct pci_dev *dev);
#else
-static inline int pci_enable_pcie_error_reporting(struct pci_dev *dev)
-{
- return -EINVAL;
-}
-static inline int pci_disable_pcie_error_reporting(struct pci_dev *dev)
-{
- return -EINVAL;
-}
-static inline int pci_cleanup_aer_uncorrect_error_status(struct pci_dev *dev)
-{
- return -EINVAL;
-}
-static inline int pci_cleanup_aer_error_status_regs(struct pci_dev *dev)
+static inline int pci_aer_clear_nonfatal_status(struct pci_dev *dev)
{
return -EINVAL;
}
+static inline int pcie_aer_is_native(struct pci_dev *dev) { return 0; }
#endif
-void cper_print_aer(struct pci_dev *dev, int aer_severity,
+void pci_print_aer(struct pci_dev *dev, int aer_severity,
struct aer_capability_regs *aer);
int cper_severity_to_aer(int cper_severity);
void aer_recover_queue(int domain, unsigned int bus, unsigned int devfn,
- int severity,
- struct aer_capability_regs *aer_regs);
+ int severity, struct aer_capability_regs *aer_regs);
#endif //_AER_H_
diff --git a/include/linux/agpgart.h b/include/linux/agpgart.h
index c6b61ca97053..21b34a96cfd8 100644
--- a/include/linux/agpgart.h
+++ b/include/linux/agpgart.h
@@ -30,8 +30,6 @@
#include <linux/agp_backend.h>
#include <uapi/linux/agpgart.h>
-#define AGPGART_MINOR 175
-
struct agp_info {
struct agp_version version; /* version of the driver */
u32 bridge_id; /* bridge vendor/device */
diff --git a/include/linux/ahci-remap.h b/include/linux/ahci-remap.h
index 62be3a40239d..230c871ba084 100644
--- a/include/linux/ahci-remap.h
+++ b/include/linux/ahci-remap.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_AHCI_REMAP_H
#define _LINUX_AHCI_REMAP_H
diff --git a/include/linux/ahci_platform.h b/include/linux/ahci_platform.h
index a270f25ee7c7..fe0760ce34c8 100644
--- a/include/linux/ahci_platform.h
+++ b/include/linux/ahci_platform.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* AHCI SATA platform driver
*
@@ -5,11 +6,6 @@
* Jeff Garzik <jgarzik@pobox.com>
* Copyright 2010 MontaVista Software, LLC.
* Anton Vorontsov <avorontsov@ru.mvista.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
*/
#ifndef _AHCI_PLATFORM_H
@@ -17,28 +13,40 @@
#include <linux/compiler.h>
+struct clk;
struct device;
struct ata_port_info;
struct ahci_host_priv;
struct platform_device;
struct scsi_host_template;
+int ahci_platform_enable_phys(struct ahci_host_priv *hpriv);
+void ahci_platform_disable_phys(struct ahci_host_priv *hpriv);
+struct clk *ahci_platform_find_clk(struct ahci_host_priv *hpriv,
+ const char *con_id);
int ahci_platform_enable_clks(struct ahci_host_priv *hpriv);
void ahci_platform_disable_clks(struct ahci_host_priv *hpriv);
+int ahci_platform_deassert_rsts(struct ahci_host_priv *hpriv);
+int ahci_platform_assert_rsts(struct ahci_host_priv *hpriv);
int ahci_platform_enable_regulators(struct ahci_host_priv *hpriv);
void ahci_platform_disable_regulators(struct ahci_host_priv *hpriv);
int ahci_platform_enable_resources(struct ahci_host_priv *hpriv);
void ahci_platform_disable_resources(struct ahci_host_priv *hpriv);
struct ahci_host_priv *ahci_platform_get_resources(
- struct platform_device *pdev);
+ struct platform_device *pdev, unsigned int flags);
int ahci_platform_init_host(struct platform_device *pdev,
struct ahci_host_priv *hpriv,
const struct ata_port_info *pi_template,
- struct scsi_host_template *sht);
+ const struct scsi_host_template *sht);
+
+void ahci_platform_shutdown(struct platform_device *pdev);
int ahci_platform_suspend_host(struct device *dev);
int ahci_platform_resume_host(struct device *dev);
int ahci_platform_suspend(struct device *dev);
int ahci_platform_resume(struct device *dev);
+#define AHCI_PLATFORM_GET_RESETS BIT(0)
+#define AHCI_PLATFORM_RST_TRIGGER BIT(1)
+
#endif /* _AHCI_PLATFORM_H */
diff --git a/include/linux/aio.h b/include/linux/aio.h
index fdd0a343f455..86892a4fe7c8 100644
--- a/include/linux/aio.h
+++ b/include/linux/aio.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __LINUX__AIO_H
#define __LINUX__AIO_H
@@ -7,8 +8,6 @@ struct kioctx;
struct kiocb;
struct mm_struct;
-#define KIOCB_KEY 0
-
typedef int (kiocb_cancel_fn)(struct kiocb *);
/* prototypes */
@@ -21,8 +20,4 @@ static inline void kiocb_set_cancel_fn(struct kiocb *req,
kiocb_cancel_fn *cancel) { }
#endif /* CONFIG_AIO */
-/* for sysctl: */
-extern unsigned long aio_nr;
-extern unsigned long aio_max_nr;
-
#endif /* __LINUX__AIO_H */
diff --git a/include/linux/alarmtimer.h b/include/linux/alarmtimer.h
index c70aac13244a..3ffa5341dce2 100644
--- a/include/linux/alarmtimer.h
+++ b/include/linux/alarmtimer.h
@@ -1,10 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_ALARMTIMER_H
#define _LINUX_ALARMTIMER_H
#include <linux/time.h>
#include <linux/hrtimer.h>
#include <linux/timerqueue.h>
-#include <linux/rtc.h>
+
+struct rtc_device;
enum alarmtimer_type {
ALARM_REALTIME,
@@ -18,12 +20,6 @@ enum alarmtimer_type {
ALARM_BOOTTIME_FREEZER,
};
-enum alarmtimer_restart {
- ALARMTIMER_NORESTART,
- ALARMTIMER_RESTART,
-};
-
-
#define ALARMTIMER_STATE_INACTIVE 0x00
#define ALARMTIMER_STATE_ENQUEUED 0x01
@@ -40,14 +36,14 @@ enum alarmtimer_restart {
struct alarm {
struct timerqueue_node node;
struct hrtimer timer;
- enum alarmtimer_restart (*function)(struct alarm *, ktime_t now);
+ void (*function)(struct alarm *, ktime_t now);
enum alarmtimer_type type;
int state;
void *data;
};
void alarm_init(struct alarm *alarm, enum alarmtimer_type type,
- enum alarmtimer_restart (*function)(struct alarm *, ktime_t));
+ void (*function)(struct alarm *, ktime_t));
void alarm_start(struct alarm *alarm, ktime_t start);
void alarm_start_relative(struct alarm *alarm, ktime_t start);
void alarm_restart(struct alarm *alarm);
@@ -58,7 +54,11 @@ u64 alarm_forward(struct alarm *alarm, ktime_t now, ktime_t interval);
u64 alarm_forward_now(struct alarm *alarm, ktime_t interval);
ktime_t alarm_expires_remaining(const struct alarm *alarm);
+#ifdef CONFIG_RTC_CLASS
/* Provide way to access the rtc device being used by alarmtimers */
struct rtc_device *alarmtimer_get_rtcdev(void);
+#else
+static inline struct rtc_device *alarmtimer_get_rtcdev(void) { return NULL; }
+#endif
#endif
diff --git a/include/linux/alcor_pci.h b/include/linux/alcor_pci.h
new file mode 100644
index 000000000000..dcb1d37dabc2
--- /dev/null
+++ b/include/linux/alcor_pci.h
@@ -0,0 +1,281 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright (C) 2018 Oleksij Rempel <linux@rempel-privat.de>
+ *
+ * Driver for Alcor Micro AU6601 and AU6621 controllers
+ */
+
+#ifndef __ALCOR_PCI_H
+#define __ALCOR_PCI_H
+
+#define ALCOR_SD_CARD 0
+#define ALCOR_MS_CARD 1
+
+#define DRV_NAME_ALCOR_PCI "alcor_pci"
+#define DRV_NAME_ALCOR_PCI_SDMMC "alcor_sdmmc"
+#define DRV_NAME_ALCOR_PCI_MS "alcor_ms"
+
+#define PCI_ID_ALCOR_MICRO 0x1AEA
+#define PCI_ID_AU6601 0x6601
+#define PCI_ID_AU6621 0x6621
+#define PCI_ID_AU6625 0x6625
+
+#define MHZ_TO_HZ(freq) ((freq) * 1000 * 1000)
+
+#define AU6601_BASE_CLOCK 31000000
+#define AU6601_MIN_CLOCK 150000
+#define AU6601_MAX_CLOCK 208000000
+#define AU6601_MAX_DMA_SEGMENTS 64
+#define AU6601_MAX_PIO_SEGMENTS 1
+#define AU6601_MAX_DMA_BLOCK_SIZE 0x1000
+#define AU6601_MAX_PIO_BLOCK_SIZE 0x200
+#define AU6601_MAX_DMA_BLOCKS 1
+#define AU6601_DMA_LOCAL_SEGMENTS 1
+
+/* registers spotter by reverse engineering but still
+ * with unknown functionality:
+ * 0x10 - ADMA phy address. AU6621 only?
+ * 0x51 - LED ctrl?
+ * 0x52 - unknown
+ * 0x61 - LED related? Always toggled BIT0
+ * 0x63 - Same as 0x61?
+ * 0x77 - unknown
+ */
+
+/* SDMA phy address. Higher then 0x0800.0000?
+ * The au6601 and au6621 have different DMA engines with different issues. One
+ * For example au6621 engine is triggered by addr change. No other interaction
+ * is needed. This means, if we get two buffers with same address, then engine
+ * will stall.
+ */
+#define AU6601_REG_SDMA_ADDR 0x00
+#define AU6601_SDMA_MASK 0xffffffff
+
+#define AU6601_DMA_BOUNDARY 0x05
+#define AU6621_DMA_PAGE_CNT 0x05
+/* PIO */
+#define AU6601_REG_BUFFER 0x08
+/* ADMA ctrl? AU6621 only. */
+#define AU6621_DMA_CTRL 0x0c
+#define AU6621_DMA_ENABLE BIT(0)
+/* CMD index */
+#define AU6601_REG_CMD_OPCODE 0x23
+/* CMD parametr */
+#define AU6601_REG_CMD_ARG 0x24
+/* CMD response 4x4 Bytes */
+#define AU6601_REG_CMD_RSP0 0x30
+#define AU6601_REG_CMD_RSP1 0x34
+#define AU6601_REG_CMD_RSP2 0x38
+#define AU6601_REG_CMD_RSP3 0x3C
+/* default timeout set to 125: 125 * 40ms = 5 sec
+ * how exactly it is calculated?
+ */
+#define AU6601_TIME_OUT_CTRL 0x69
+/* Block size for SDMA or PIO */
+#define AU6601_REG_BLOCK_SIZE 0x6c
+/* Some power related reg, used together with AU6601_OUTPUT_ENABLE */
+#define AU6601_POWER_CONTROL 0x70
+
+/* PLL ctrl */
+#define AU6601_CLK_SELECT 0x72
+#define AU6601_CLK_OVER_CLK 0x80
+#define AU6601_CLK_384_MHZ 0x30
+#define AU6601_CLK_125_MHZ 0x20
+#define AU6601_CLK_48_MHZ 0x10
+#define AU6601_CLK_EXT_PLL 0x04
+#define AU6601_CLK_X2_MODE 0x02
+#define AU6601_CLK_ENABLE 0x01
+#define AU6601_CLK_31_25_MHZ 0x00
+
+#define AU6601_CLK_DIVIDER 0x73
+
+#define AU6601_INTERFACE_MODE_CTRL 0x74
+#define AU6601_DLINK_MODE 0x80
+#define AU6601_INTERRUPT_DELAY_TIME 0x40
+#define AU6601_SIGNAL_REQ_CTRL 0x30
+#define AU6601_MS_CARD_WP BIT(3)
+#define AU6601_SD_CARD_WP BIT(0)
+
+/* same register values are used for:
+ * - AU6601_OUTPUT_ENABLE
+ * - AU6601_POWER_CONTROL
+ */
+#define AU6601_ACTIVE_CTRL 0x75
+#define AU6601_XD_CARD BIT(4)
+/* AU6601_MS_CARD_ACTIVE - will cativate MS card section? */
+#define AU6601_MS_CARD BIT(3)
+#define AU6601_SD_CARD BIT(0)
+
+/* card slot state. It should automatically detect type of
+ * the card
+ */
+#define AU6601_DETECT_STATUS 0x76
+#define AU6601_DETECT_EN BIT(7)
+#define AU6601_MS_DETECTED BIT(3)
+#define AU6601_SD_DETECTED BIT(0)
+#define AU6601_DETECT_STATUS_M 0xf
+
+#define AU6601_REG_SW_RESET 0x79
+#define AU6601_BUF_CTRL_RESET BIT(7)
+#define AU6601_RESET_DATA BIT(3)
+#define AU6601_RESET_CMD BIT(0)
+
+#define AU6601_OUTPUT_ENABLE 0x7a
+
+#define AU6601_PAD_DRIVE0 0x7b
+#define AU6601_PAD_DRIVE1 0x7c
+#define AU6601_PAD_DRIVE2 0x7d
+/* read EEPROM? */
+#define AU6601_FUNCTION 0x7f
+
+#define AU6601_CMD_XFER_CTRL 0x81
+#define AU6601_CMD_17_BYTE_CRC 0xc0
+#define AU6601_CMD_6_BYTE_WO_CRC 0x80
+#define AU6601_CMD_6_BYTE_CRC 0x40
+#define AU6601_CMD_START_XFER 0x20
+#define AU6601_CMD_STOP_WAIT_RDY 0x10
+#define AU6601_CMD_NO_RESP 0x00
+
+#define AU6601_REG_BUS_CTRL 0x82
+#define AU6601_BUS_WIDTH_4BIT 0x20
+#define AU6601_BUS_WIDTH_8BIT 0x10
+#define AU6601_BUS_WIDTH_1BIT 0x00
+
+#define AU6601_DATA_XFER_CTRL 0x83
+#define AU6601_DATA_WRITE BIT(7)
+#define AU6601_DATA_DMA_MODE BIT(6)
+#define AU6601_DATA_START_XFER BIT(0)
+
+#define AU6601_DATA_PIN_STATE 0x84
+#define AU6601_BUS_STAT_CMD BIT(15)
+/* BIT(4) - BIT(7) are permanently 1.
+ * May be reserved or not attached DAT4-DAT7
+ */
+#define AU6601_BUS_STAT_DAT3 BIT(3)
+#define AU6601_BUS_STAT_DAT2 BIT(2)
+#define AU6601_BUS_STAT_DAT1 BIT(1)
+#define AU6601_BUS_STAT_DAT0 BIT(0)
+#define AU6601_BUS_STAT_DAT_MASK 0xf
+
+#define AU6601_OPT 0x85
+#define AU6601_OPT_CMD_LINE_LEVEL 0x80
+#define AU6601_OPT_NCRC_16_CLK BIT(4)
+#define AU6601_OPT_CMD_NWT BIT(3)
+#define AU6601_OPT_STOP_CLK BIT(2)
+#define AU6601_OPT_DDR_MODE BIT(1)
+#define AU6601_OPT_SD_18V BIT(0)
+
+#define AU6601_CLK_DELAY 0x86
+#define AU6601_CLK_DATA_POSITIVE_EDGE 0x80
+#define AU6601_CLK_CMD_POSITIVE_EDGE 0x40
+#define AU6601_CLK_POSITIVE_EDGE_ALL (AU6601_CLK_CMD_POSITIVE_EDGE \
+ | AU6601_CLK_DATA_POSITIVE_EDGE)
+
+
+#define AU6601_REG_INT_STATUS 0x90
+#define AU6601_REG_INT_ENABLE 0x94
+#define AU6601_INT_DATA_END_BIT_ERR BIT(22)
+#define AU6601_INT_DATA_CRC_ERR BIT(21)
+#define AU6601_INT_DATA_TIMEOUT_ERR BIT(20)
+#define AU6601_INT_CMD_INDEX_ERR BIT(19)
+#define AU6601_INT_CMD_END_BIT_ERR BIT(18)
+#define AU6601_INT_CMD_CRC_ERR BIT(17)
+#define AU6601_INT_CMD_TIMEOUT_ERR BIT(16)
+#define AU6601_INT_ERROR BIT(15)
+#define AU6601_INT_OVER_CURRENT_ERR BIT(8)
+#define AU6601_INT_CARD_INSERT BIT(7)
+#define AU6601_INT_CARD_REMOVE BIT(6)
+#define AU6601_INT_READ_BUF_RDY BIT(5)
+#define AU6601_INT_WRITE_BUF_RDY BIT(4)
+#define AU6601_INT_DMA_END BIT(3)
+#define AU6601_INT_DATA_END BIT(1)
+#define AU6601_INT_CMD_END BIT(0)
+
+#define AU6601_INT_NORMAL_MASK 0x00007FFF
+#define AU6601_INT_ERROR_MASK 0xFFFF8000
+
+#define AU6601_INT_CMD_MASK (AU6601_INT_CMD_END | \
+ AU6601_INT_CMD_TIMEOUT_ERR | AU6601_INT_CMD_CRC_ERR | \
+ AU6601_INT_CMD_END_BIT_ERR | AU6601_INT_CMD_INDEX_ERR)
+#define AU6601_INT_DATA_MASK (AU6601_INT_DATA_END | AU6601_INT_DMA_END | \
+ AU6601_INT_READ_BUF_RDY | AU6601_INT_WRITE_BUF_RDY | \
+ AU6601_INT_DATA_TIMEOUT_ERR | AU6601_INT_DATA_CRC_ERR | \
+ AU6601_INT_DATA_END_BIT_ERR)
+#define AU6601_INT_ALL_MASK ((u32)-1)
+
+/* MS_CARD mode registers */
+
+#define AU6601_MS_STATUS 0xa0
+
+#define AU6601_MS_BUS_MODE_CTRL 0xa1
+#define AU6601_MS_BUS_8BIT_MODE 0x03
+#define AU6601_MS_BUS_4BIT_MODE 0x01
+#define AU6601_MS_BUS_1BIT_MODE 0x00
+
+#define AU6601_MS_TPC_CMD 0xa2
+#define AU6601_MS_TPC_READ_PAGE_DATA 0x02
+#define AU6601_MS_TPC_READ_REG 0x04
+#define AU6601_MS_TPC_GET_INT 0x07
+#define AU6601_MS_TPC_WRITE_PAGE_DATA 0x0D
+#define AU6601_MS_TPC_WRITE_REG 0x0B
+#define AU6601_MS_TPC_SET_RW_REG_ADRS 0x08
+#define AU6601_MS_TPC_SET_CMD 0x0E
+#define AU6601_MS_TPC_EX_SET_CMD 0x09
+#define AU6601_MS_TPC_READ_SHORT_DATA 0x03
+#define AU6601_MS_TPC_WRITE_SHORT_DATA 0x0C
+
+#define AU6601_MS_TRANSFER_MODE 0xa3
+#define AU6601_MS_XFER_INT_TIMEOUT_CHK BIT(2)
+#define AU6601_MS_XFER_DMA_ENABLE BIT(1)
+#define AU6601_MS_XFER_START BIT(0)
+
+#define AU6601_MS_DATA_PIN_STATE 0xa4
+
+#define AU6601_MS_INT_STATUS 0xb0
+#define AU6601_MS_INT_ENABLE 0xb4
+#define AU6601_MS_INT_OVER_CURRENT_ERROR BIT(23)
+#define AU6601_MS_INT_DATA_CRC_ERROR BIT(21)
+#define AU6601_MS_INT_INT_TIMEOUT BIT(20)
+#define AU6601_MS_INT_INT_RESP_ERROR BIT(19)
+#define AU6601_MS_INT_CED_ERROR BIT(18)
+#define AU6601_MS_INT_TPC_TIMEOUT BIT(16)
+#define AU6601_MS_INT_ERROR BIT(15)
+#define AU6601_MS_INT_CARD_INSERT BIT(7)
+#define AU6601_MS_INT_CARD_REMOVE BIT(6)
+#define AU6601_MS_INT_BUF_READ_RDY BIT(5)
+#define AU6601_MS_INT_BUF_WRITE_RDY BIT(4)
+#define AU6601_MS_INT_DMA_END BIT(3)
+#define AU6601_MS_INT_TPC_END BIT(1)
+
+#define AU6601_MS_INT_DATA_MASK 0x00000038
+#define AU6601_MS_INT_TPC_MASK 0x003d8002
+#define AU6601_MS_INT_TPC_ERROR 0x003d0000
+
+#define ALCOR_PCIE_LINK_CTRL_OFFSET 0x10
+#define ALCOR_PCIE_LINK_CAP_OFFSET 0x0c
+#define ALCOR_CAP_START_OFFSET 0x34
+
+struct alcor_dev_cfg {
+ u8 dma;
+};
+
+struct alcor_pci_priv {
+ struct pci_dev *pdev;
+ struct pci_dev *parent_pdev;
+ struct device *dev;
+ void __iomem *iobase;
+ unsigned int irq;
+
+ unsigned long id; /* idr id */
+
+ struct alcor_dev_cfg *cfg;
+};
+
+void alcor_write8(struct alcor_pci_priv *priv, u8 val, unsigned int addr);
+void alcor_write16(struct alcor_pci_priv *priv, u16 val, unsigned int addr);
+void alcor_write32(struct alcor_pci_priv *priv, u32 val, unsigned int addr);
+void alcor_write32be(struct alcor_pci_priv *priv, u32 val, unsigned int addr);
+u8 alcor_read8(struct alcor_pci_priv *priv, unsigned int addr);
+u32 alcor_read32(struct alcor_pci_priv *priv, unsigned int addr);
+u32 alcor_read32be(struct alcor_pci_priv *priv, unsigned int addr);
+#endif
diff --git a/include/linux/align.h b/include/linux/align.h
new file mode 100644
index 000000000000..55debf105a5d
--- /dev/null
+++ b/include/linux/align.h
@@ -0,0 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_ALIGN_H
+#define _LINUX_ALIGN_H
+
+#include <vdso/align.h>
+
+#endif /* _LINUX_ALIGN_H */
diff --git a/include/linux/alloc_tag.h b/include/linux/alloc_tag.h
new file mode 100644
index 000000000000..d40ac39bfbe8
--- /dev/null
+++ b/include/linux/alloc_tag.h
@@ -0,0 +1,268 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * allocation tagging
+ */
+#ifndef _LINUX_ALLOC_TAG_H
+#define _LINUX_ALLOC_TAG_H
+
+#include <linux/bug.h>
+#include <linux/codetag.h>
+#include <linux/container_of.h>
+#include <linux/preempt.h>
+#include <asm/percpu.h>
+#include <linux/cpumask.h>
+#include <linux/smp.h>
+#include <linux/static_key.h>
+#include <linux/irqflags.h>
+
+struct alloc_tag_counters {
+ u64 bytes;
+ u64 calls;
+};
+
+/*
+ * An instance of this structure is created in a special ELF section at every
+ * allocation callsite. At runtime, the special section is treated as
+ * an array of these. Embedded codetag utilizes codetag framework.
+ */
+struct alloc_tag {
+ struct codetag ct;
+ struct alloc_tag_counters __percpu *counters;
+} __aligned(8);
+
+struct alloc_tag_kernel_section {
+ struct alloc_tag *first_tag;
+ unsigned long count;
+};
+
+struct alloc_tag_module_section {
+ union {
+ unsigned long start_addr;
+ struct alloc_tag *first_tag;
+ };
+ unsigned long end_addr;
+ /* used size */
+ unsigned long size;
+};
+
+#ifdef CONFIG_MEM_ALLOC_PROFILING_DEBUG
+
+#define CODETAG_EMPTY ((void *)1)
+
+static inline bool is_codetag_empty(union codetag_ref *ref)
+{
+ return ref->ct == CODETAG_EMPTY;
+}
+
+static inline void set_codetag_empty(union codetag_ref *ref)
+{
+ if (ref)
+ ref->ct = CODETAG_EMPTY;
+}
+
+#else /* CONFIG_MEM_ALLOC_PROFILING_DEBUG */
+
+static inline bool is_codetag_empty(union codetag_ref *ref) { return false; }
+
+static inline void set_codetag_empty(union codetag_ref *ref)
+{
+ if (ref)
+ ref->ct = NULL;
+}
+
+#endif /* CONFIG_MEM_ALLOC_PROFILING_DEBUG */
+
+#ifdef CONFIG_MEM_ALLOC_PROFILING
+
+#define ALLOC_TAG_SECTION_NAME "alloc_tags"
+
+struct codetag_bytes {
+ struct codetag *ct;
+ s64 bytes;
+};
+
+size_t alloc_tag_top_users(struct codetag_bytes *tags, size_t count, bool can_sleep);
+
+static inline struct alloc_tag *ct_to_alloc_tag(struct codetag *ct)
+{
+ return container_of(ct, struct alloc_tag, ct);
+}
+
+#if defined(CONFIG_ARCH_MODULE_NEEDS_WEAK_PER_CPU) && defined(MODULE)
+/*
+ * When percpu variables are required to be defined as weak, static percpu
+ * variables can't be used inside a function (see comments for DECLARE_PER_CPU_SECTION).
+ * Instead we will account all module allocations to a single counter.
+ */
+DECLARE_PER_CPU(struct alloc_tag_counters, _shared_alloc_tag);
+
+#define DEFINE_ALLOC_TAG(_alloc_tag) \
+ static struct alloc_tag _alloc_tag __used __aligned(8) \
+ __section(ALLOC_TAG_SECTION_NAME) = { \
+ .ct = CODE_TAG_INIT, \
+ .counters = &_shared_alloc_tag };
+
+#else /* CONFIG_ARCH_MODULE_NEEDS_WEAK_PER_CPU && MODULE */
+
+#ifdef MODULE
+
+#define DEFINE_ALLOC_TAG(_alloc_tag) \
+ static struct alloc_tag _alloc_tag __used __aligned(8) \
+ __section(ALLOC_TAG_SECTION_NAME) = { \
+ .ct = CODE_TAG_INIT, \
+ .counters = NULL };
+
+#else /* MODULE */
+
+#define DEFINE_ALLOC_TAG(_alloc_tag) \
+ static DEFINE_PER_CPU(struct alloc_tag_counters, _alloc_tag_cntr); \
+ static struct alloc_tag _alloc_tag __used __aligned(8) \
+ __section(ALLOC_TAG_SECTION_NAME) = { \
+ .ct = CODE_TAG_INIT, \
+ .counters = &_alloc_tag_cntr };
+
+#endif /* MODULE */
+
+#endif /* CONFIG_ARCH_MODULE_NEEDS_WEAK_PER_CPU && MODULE */
+
+DECLARE_STATIC_KEY_MAYBE(CONFIG_MEM_ALLOC_PROFILING_ENABLED_BY_DEFAULT,
+ mem_alloc_profiling_key);
+
+static inline bool mem_alloc_profiling_enabled(void)
+{
+ return static_branch_maybe(CONFIG_MEM_ALLOC_PROFILING_ENABLED_BY_DEFAULT,
+ &mem_alloc_profiling_key);
+}
+
+static inline struct alloc_tag_counters alloc_tag_read(struct alloc_tag *tag)
+{
+ struct alloc_tag_counters v = { 0, 0 };
+ struct alloc_tag_counters *counter;
+ int cpu;
+
+ for_each_possible_cpu(cpu) {
+ counter = per_cpu_ptr(tag->counters, cpu);
+ v.bytes += counter->bytes;
+ v.calls += counter->calls;
+ }
+
+ return v;
+}
+
+#ifdef CONFIG_MEM_ALLOC_PROFILING_DEBUG
+static inline void alloc_tag_add_check(union codetag_ref *ref, struct alloc_tag *tag)
+{
+ WARN_ONCE(ref && ref->ct && !is_codetag_empty(ref),
+ "alloc_tag was not cleared (got tag for %s:%u)\n",
+ ref->ct->filename, ref->ct->lineno);
+
+ WARN_ONCE(!tag, "current->alloc_tag not set\n");
+}
+
+static inline void alloc_tag_sub_check(union codetag_ref *ref)
+{
+ WARN_ONCE(ref && !ref->ct, "alloc_tag was not set\n");
+}
+#else
+static inline void alloc_tag_add_check(union codetag_ref *ref, struct alloc_tag *tag) {}
+static inline void alloc_tag_sub_check(union codetag_ref *ref) {}
+#endif
+
+/* Caller should verify both ref and tag to be valid */
+static inline bool __alloc_tag_ref_set(union codetag_ref *ref, struct alloc_tag *tag)
+{
+ alloc_tag_add_check(ref, tag);
+ if (!ref || !tag)
+ return false;
+
+ ref->ct = &tag->ct;
+ return true;
+}
+
+static inline bool alloc_tag_ref_set(union codetag_ref *ref, struct alloc_tag *tag)
+{
+ if (unlikely(!__alloc_tag_ref_set(ref, tag)))
+ return false;
+
+ /*
+ * We need in increment the call counter every time we have a new
+ * allocation or when we split a large allocation into smaller ones.
+ * Each new reference for every sub-allocation needs to increment call
+ * counter because when we free each part the counter will be decremented.
+ */
+ this_cpu_inc(tag->counters->calls);
+ return true;
+}
+
+static inline void alloc_tag_add(union codetag_ref *ref, struct alloc_tag *tag, size_t bytes)
+{
+ if (likely(alloc_tag_ref_set(ref, tag)))
+ this_cpu_add(tag->counters->bytes, bytes);
+}
+
+static inline void alloc_tag_sub(union codetag_ref *ref, size_t bytes)
+{
+ struct alloc_tag *tag;
+
+ alloc_tag_sub_check(ref);
+ if (!ref || !ref->ct)
+ return;
+
+ if (is_codetag_empty(ref)) {
+ ref->ct = NULL;
+ return;
+ }
+
+ tag = ct_to_alloc_tag(ref->ct);
+
+ this_cpu_sub(tag->counters->bytes, bytes);
+ this_cpu_dec(tag->counters->calls);
+
+ ref->ct = NULL;
+}
+
+static inline void alloc_tag_set_inaccurate(struct alloc_tag *tag)
+{
+ tag->ct.flags |= CODETAG_FLAG_INACCURATE;
+}
+
+static inline bool alloc_tag_is_inaccurate(struct alloc_tag *tag)
+{
+ return !!(tag->ct.flags & CODETAG_FLAG_INACCURATE);
+}
+
+#define alloc_tag_record(p) ((p) = current->alloc_tag)
+
+#else /* CONFIG_MEM_ALLOC_PROFILING */
+
+#define DEFINE_ALLOC_TAG(_alloc_tag)
+static inline bool mem_alloc_profiling_enabled(void) { return false; }
+static inline void alloc_tag_add(union codetag_ref *ref, struct alloc_tag *tag,
+ size_t bytes) {}
+static inline void alloc_tag_sub(union codetag_ref *ref, size_t bytes) {}
+static inline void alloc_tag_set_inaccurate(struct alloc_tag *tag) {}
+static inline bool alloc_tag_is_inaccurate(struct alloc_tag *tag) { return false; }
+#define alloc_tag_record(p) do {} while (0)
+
+#endif /* CONFIG_MEM_ALLOC_PROFILING */
+
+#define alloc_hooks_tag(_tag, _do_alloc) \
+({ \
+ typeof(_do_alloc) _res; \
+ if (mem_alloc_profiling_enabled()) { \
+ struct alloc_tag * __maybe_unused _old; \
+ _old = alloc_tag_save(_tag); \
+ _res = _do_alloc; \
+ alloc_tag_restore(_tag, _old); \
+ } else \
+ _res = _do_alloc; \
+ _res; \
+})
+
+#define alloc_hooks(_do_alloc) \
+({ \
+ DEFINE_ALLOC_TAG(_alloc_tag); \
+ alloc_hooks_tag(&_alloc_tag, _do_alloc); \
+})
+
+#endif /* _LINUX_ALLOC_TAG_H */
diff --git a/include/linux/altera_jtaguart.h b/include/linux/altera_jtaguart.h
index 953b178a1650..527a142cd530 100644
--- a/include/linux/altera_jtaguart.h
+++ b/include/linux/altera_jtaguart.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* altera_jtaguart.h -- Altera JTAG UART driver defines.
*/
diff --git a/include/linux/altera_uart.h b/include/linux/altera_uart.h
index c022c82db7ca..3eb73b8c49c8 100644
--- a/include/linux/altera_uart.h
+++ b/include/linux/altera_uart.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* altera_uart.h -- Altera UART driver defines.
*/
diff --git a/include/linux/amba/bus.h b/include/linux/amba/bus.h
index d143c13bed26..9946276aff73 100644
--- a/include/linux/amba/bus.h
+++ b/include/linux/amba/bus.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* linux/include/amba/bus.h
*
@@ -6,10 +7,6 @@
* region or that is derived from a PrimeCell.
*
* Copyright (C) 2003 Deep Blue Solutions Ltd, All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef ASMARM_AMBA_H
#define ASMARM_AMBA_H
@@ -25,23 +22,76 @@
#define AMBA_CID 0xb105f00d
#define CORESIGHT_CID 0xb105900d
+/*
+ * CoreSight Architecture specification updates the ID specification
+ * for components on the AMBA bus. (ARM IHI 0029E)
+ *
+ * Bits 15:12 of the CID are the device class.
+ *
+ * Class 0xF remains for PrimeCell and legacy components. (AMBA_CID above)
+ * Class 0x9 defines the component as CoreSight (CORESIGHT_CID above)
+ * Class 0x0, 0x1, 0xB, 0xE define components that do not have driver support
+ * at present.
+ * Class 0x2-0x8,0xA and 0xD-0xD are presently reserved.
+ *
+ * Remaining CID bits stay as 0xb105-00d
+ */
+
+/**
+ * Class 0x9 components use additional values to form a Unique Component
+ * Identifier (UCI), where peripheral ID values are identical for different
+ * components. Passed to the amba bus code from the component driver via
+ * the amba_id->data pointer.
+ * @devarch : coresight devarch register value
+ * @devarch_mask: mask bits used for matching. 0 indicates UCI not used.
+ * @devtype : coresight device type value
+ * @data : additional driver data. As we have usurped the original
+ * pointer some devices may still need additional data
+ */
+struct amba_cs_uci_id {
+ unsigned int devarch;
+ unsigned int devarch_mask;
+ unsigned int devtype;
+ void *data;
+};
+
+/* define offsets for registers used by UCI */
+#define UCI_REG_DEVTYPE_OFFSET 0xFCC
+#define UCI_REG_DEVARCH_OFFSET 0xFBC
+
struct clk;
struct amba_device {
struct device dev;
struct resource res;
struct clk *pclk;
+ struct device_dma_parameters dma_parms;
unsigned int periphid;
+ struct mutex periphid_lock;
+ unsigned int cid;
+ struct amba_cs_uci_id uci;
unsigned int irq[AMBA_NR_IRQS];
- char *driver_override;
+ /*
+ * Driver name to force a match. Do not set directly, because core
+ * frees it. Use driver_set_override() to set or clear it.
+ */
+ const char *driver_override;
};
struct amba_driver {
struct device_driver drv;
int (*probe)(struct amba_device *, const struct amba_id *);
- int (*remove)(struct amba_device *);
+ void (*remove)(struct amba_device *);
void (*shutdown)(struct amba_device *);
const struct amba_id *id_table;
+ /*
+ * For most device drivers, no need to care about this flag as long as
+ * all DMAs are handled through the kernel DMA API. For some special
+ * ones, for example VFIO drivers, they know how to manage the DMA
+ * themselves and set this flag so that the IOMMU layer will allow them
+ * to setup and manage their own I/O address space.
+ */
+ bool driver_managed_dma;
};
/*
@@ -53,69 +103,47 @@ enum amba_vendor {
AMBA_VENDOR_ST = 0x80,
AMBA_VENDOR_QCOM = 0x51,
AMBA_VENDOR_LSI = 0xb6,
- AMBA_VENDOR_LINUX = 0xfe, /* This value is not official */
};
-/* This is used to generate pseudo-ID for AMBA device */
-#define AMBA_LINUX_ID(conf, rev, part) \
- (((conf) & 0xff) << 24 | ((rev) & 0xf) << 20 | \
- AMBA_VENDOR_LINUX << 12 | ((part) & 0xfff))
-
-extern struct bus_type amba_bustype;
+extern const struct bus_type amba_bustype;
-#define to_amba_device(d) container_of(d, struct amba_device, dev)
+#define to_amba_device(d) container_of_const(d, struct amba_device, dev)
#define amba_get_drvdata(d) dev_get_drvdata(&d->dev)
#define amba_set_drvdata(d,p) dev_set_drvdata(&d->dev, p)
-int amba_driver_register(struct amba_driver *);
-void amba_driver_unregister(struct amba_driver *);
-struct amba_device *amba_device_alloc(const char *, resource_size_t, size_t);
-void amba_device_put(struct amba_device *);
-int amba_device_add(struct amba_device *, struct resource *);
-int amba_device_register(struct amba_device *, struct resource *);
-struct amba_device *amba_apb_device_add(struct device *parent, const char *name,
- resource_size_t base, size_t size,
- int irq1, int irq2, void *pdata,
- unsigned int periphid);
-struct amba_device *amba_ahb_device_add(struct device *parent, const char *name,
- resource_size_t base, size_t size,
- int irq1, int irq2, void *pdata,
- unsigned int periphid);
-struct amba_device *
-amba_apb_device_add_res(struct device *parent, const char *name,
- resource_size_t base, size_t size, int irq1,
- int irq2, void *pdata, unsigned int periphid,
- struct resource *resbase);
-struct amba_device *
-amba_ahb_device_add_res(struct device *parent, const char *name,
- resource_size_t base, size_t size, int irq1,
- int irq2, void *pdata, unsigned int periphid,
- struct resource *resbase);
-void amba_device_unregister(struct amba_device *);
-struct amba_device *amba_find_device(const char *, struct device *, unsigned int, unsigned int);
-int amba_request_regions(struct amba_device *, const char *);
-void amba_release_regions(struct amba_device *);
+/*
+ * use a macro to avoid include chaining to get THIS_MODULE
+ */
+#define amba_driver_register(drv) \
+ __amba_driver_register(drv, THIS_MODULE)
-static inline int amba_pclk_enable(struct amba_device *dev)
+#ifdef CONFIG_ARM_AMBA
+int __amba_driver_register(struct amba_driver *, struct module *);
+void amba_driver_unregister(struct amba_driver *);
+bool dev_is_amba(const struct device *dev);
+#else
+static inline int __amba_driver_register(struct amba_driver *drv,
+ struct module *owner)
{
- return clk_enable(dev->pclk);
+ return -EINVAL;
}
-
-static inline void amba_pclk_disable(struct amba_device *dev)
+static inline void amba_driver_unregister(struct amba_driver *drv)
{
- clk_disable(dev->pclk);
}
-
-static inline int amba_pclk_prepare(struct amba_device *dev)
+static inline bool dev_is_amba(const struct device *dev)
{
- return clk_prepare(dev->pclk);
+ return false;
}
+#endif
-static inline void amba_pclk_unprepare(struct amba_device *dev)
-{
- clk_unprepare(dev->pclk);
-}
+struct amba_device *amba_device_alloc(const char *, resource_size_t, size_t);
+void amba_device_put(struct amba_device *);
+int amba_device_add(struct amba_device *, struct resource *);
+int amba_device_register(struct amba_device *, struct resource *);
+void amba_device_unregister(struct amba_device *);
+int amba_request_regions(struct amba_device *, const char *);
+void amba_release_regions(struct amba_device *);
/* Some drivers don't use the struct amba_device */
#define AMBA_CONFIG_BITS(a) (((a) >> 24) & 0xff)
diff --git a/include/linux/amba/clcd-regs.h b/include/linux/amba/clcd-regs.h
deleted file mode 100644
index 516a6fda83c5..000000000000
--- a/include/linux/amba/clcd-regs.h
+++ /dev/null
@@ -1,86 +0,0 @@
-/*
- * David A Rusling
- *
- * Copyright (C) 2001 ARM Limited
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file COPYING in the main directory of this archive
- * for more details.
- */
-
-#ifndef AMBA_CLCD_REGS_H
-#define AMBA_CLCD_REGS_H
-
-/*
- * CLCD Controller Internal Register addresses
- */
-#define CLCD_TIM0 0x00000000
-#define CLCD_TIM1 0x00000004
-#define CLCD_TIM2 0x00000008
-#define CLCD_TIM3 0x0000000c
-#define CLCD_UBAS 0x00000010
-#define CLCD_LBAS 0x00000014
-
-#define CLCD_PL110_IENB 0x00000018
-#define CLCD_PL110_CNTL 0x0000001c
-#define CLCD_PL110_STAT 0x00000020
-#define CLCD_PL110_INTR 0x00000024
-#define CLCD_PL110_UCUR 0x00000028
-#define CLCD_PL110_LCUR 0x0000002C
-
-#define CLCD_PL111_CNTL 0x00000018
-#define CLCD_PL111_IENB 0x0000001c
-#define CLCD_PL111_RIS 0x00000020
-#define CLCD_PL111_MIS 0x00000024
-#define CLCD_PL111_ICR 0x00000028
-#define CLCD_PL111_UCUR 0x0000002c
-#define CLCD_PL111_LCUR 0x00000030
-
-#define CLCD_PALL 0x00000200
-#define CLCD_PALETTE 0x00000200
-
-#define TIM2_PCD_LO_MASK GENMASK(4, 0)
-#define TIM2_PCD_LO_BITS 5
-#define TIM2_CLKSEL (1 << 5)
-#define TIM2_IVS (1 << 11)
-#define TIM2_IHS (1 << 12)
-#define TIM2_IPC (1 << 13)
-#define TIM2_IOE (1 << 14)
-#define TIM2_BCD (1 << 26)
-#define TIM2_PCD_HI_MASK GENMASK(31, 27)
-#define TIM2_PCD_HI_BITS 5
-#define TIM2_PCD_HI_SHIFT 27
-
-#define CNTL_LCDEN (1 << 0)
-#define CNTL_LCDBPP1 (0 << 1)
-#define CNTL_LCDBPP2 (1 << 1)
-#define CNTL_LCDBPP4 (2 << 1)
-#define CNTL_LCDBPP8 (3 << 1)
-#define CNTL_LCDBPP16 (4 << 1)
-#define CNTL_LCDBPP16_565 (6 << 1)
-#define CNTL_LCDBPP16_444 (7 << 1)
-#define CNTL_LCDBPP24 (5 << 1)
-#define CNTL_LCDBW (1 << 4)
-#define CNTL_LCDTFT (1 << 5)
-#define CNTL_LCDMONO8 (1 << 6)
-#define CNTL_LCDDUAL (1 << 7)
-#define CNTL_BGR (1 << 8)
-#define CNTL_BEBO (1 << 9)
-#define CNTL_BEPO (1 << 10)
-#define CNTL_LCDPWR (1 << 11)
-#define CNTL_LCDVCOMP(x) ((x) << 12)
-#define CNTL_LDMAFIFOTIME (1 << 15)
-#define CNTL_WATERMARK (1 << 16)
-
-/* ST Microelectronics variant bits */
-#define CNTL_ST_1XBPP_444 0x0
-#define CNTL_ST_1XBPP_5551 (1 << 17)
-#define CNTL_ST_1XBPP_565 (1 << 18)
-#define CNTL_ST_CDWID_12 0x0
-#define CNTL_ST_CDWID_16 (1 << 19)
-#define CNTL_ST_CDWID_18 (1 << 20)
-#define CNTL_ST_CDWID_24 ((1 << 19)|(1 << 20))
-#define CNTL_ST_CEAEN (1 << 21)
-#define CNTL_ST_LCDBPP24_PACKED (6 << 1)
-
-#endif /* AMBA_CLCD_REGS_H */
diff --git a/include/linux/amba/clcd.h b/include/linux/amba/clcd.h
deleted file mode 100644
index d0c3be77c18e..000000000000
--- a/include/linux/amba/clcd.h
+++ /dev/null
@@ -1,321 +0,0 @@
-/*
- * linux/include/asm-arm/hardware/amba_clcd.h -- Integrator LCD panel.
- *
- * David A Rusling
- *
- * Copyright (C) 2001 ARM Limited
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file COPYING in the main directory of this archive
- * for more details.
- */
-#include <linux/fb.h>
-#include <linux/amba/clcd-regs.h>
-
-enum {
- /* individual formats */
- CLCD_CAP_RGB444 = (1 << 0),
- CLCD_CAP_RGB5551 = (1 << 1),
- CLCD_CAP_RGB565 = (1 << 2),
- CLCD_CAP_RGB888 = (1 << 3),
- CLCD_CAP_BGR444 = (1 << 4),
- CLCD_CAP_BGR5551 = (1 << 5),
- CLCD_CAP_BGR565 = (1 << 6),
- CLCD_CAP_BGR888 = (1 << 7),
-
- /* connection layouts */
- CLCD_CAP_444 = CLCD_CAP_RGB444 | CLCD_CAP_BGR444,
- CLCD_CAP_5551 = CLCD_CAP_RGB5551 | CLCD_CAP_BGR5551,
- CLCD_CAP_565 = CLCD_CAP_RGB565 | CLCD_CAP_BGR565,
- CLCD_CAP_888 = CLCD_CAP_RGB888 | CLCD_CAP_BGR888,
-
- /* red/blue ordering */
- CLCD_CAP_RGB = CLCD_CAP_RGB444 | CLCD_CAP_RGB5551 |
- CLCD_CAP_RGB565 | CLCD_CAP_RGB888,
- CLCD_CAP_BGR = CLCD_CAP_BGR444 | CLCD_CAP_BGR5551 |
- CLCD_CAP_BGR565 | CLCD_CAP_BGR888,
-
- CLCD_CAP_ALL = CLCD_CAP_BGR | CLCD_CAP_RGB,
-};
-
-struct backlight_device;
-
-struct clcd_panel {
- struct fb_videomode mode;
- signed short width; /* width in mm */
- signed short height; /* height in mm */
- u32 tim2;
- u32 tim3;
- u32 cntl;
- u32 caps;
- unsigned int bpp:8,
- fixedtimings:1,
- grayscale:1;
- unsigned int connector;
- struct backlight_device *backlight;
- /*
- * If the B/R lines are switched between the CLCD
- * and the panel we need to know this and not try to
- * compensate with the BGR bit in the control register.
- */
- bool bgr_connection;
-};
-
-struct clcd_regs {
- u32 tim0;
- u32 tim1;
- u32 tim2;
- u32 tim3;
- u32 cntl;
- unsigned long pixclock;
-};
-
-struct clcd_fb;
-
-/*
- * the board-type specific routines
- */
-struct clcd_board {
- const char *name;
-
- /*
- * Optional. Hardware capability flags.
- */
- u32 caps;
-
- /*
- * Optional. Check whether the var structure is acceptable
- * for this display.
- */
- int (*check)(struct clcd_fb *fb, struct fb_var_screeninfo *var);
-
- /*
- * Compulsory. Decode fb->fb.var into regs->*. In the case of
- * fixed timing, set regs->* to the register values required.
- */
- void (*decode)(struct clcd_fb *fb, struct clcd_regs *regs);
-
- /*
- * Optional. Disable any extra display hardware.
- */
- void (*disable)(struct clcd_fb *);
-
- /*
- * Optional. Enable any extra display hardware.
- */
- void (*enable)(struct clcd_fb *);
-
- /*
- * Setup platform specific parts of CLCD driver
- */
- int (*setup)(struct clcd_fb *);
-
- /*
- * mmap the framebuffer memory
- */
- int (*mmap)(struct clcd_fb *, struct vm_area_struct *);
-
- /*
- * Remove platform specific parts of CLCD driver
- */
- void (*remove)(struct clcd_fb *);
-};
-
-struct amba_device;
-struct clk;
-
-/**
- * struct clcd_vendor_data - holds hardware (IP-block) vendor-specific
- * variant information
- *
- * @clock_timregs: the CLCD needs to be clocked when accessing the
- * timer registers, or the hardware will hang.
- * @packed_24_bit_pixels: this variant supports 24bit packed pixel data,
- * so that RGB accesses 3 bytes at a time, not just on even 32bit
- * boundaries, packing the pixel data in memory. ST Microelectronics
- * have this.
- * @st_bitmux_control: ST Microelectronics have implemented output
- * bit line multiplexing into the CLCD control register. This indicates
- * that we need to use this.
- * @init_board: custom board init function for this variant
- * @init_panel: custom panel init function for this variant
- */
-struct clcd_vendor_data {
- bool clock_timregs;
- bool packed_24_bit_pixels;
- bool st_bitmux_control;
- int (*init_board)(struct amba_device *adev,
- struct clcd_board *board);
- int (*init_panel)(struct clcd_fb *fb,
- struct device_node *panel);
-};
-
-/* this data structure describes each frame buffer device we find */
-struct clcd_fb {
- struct fb_info fb;
- struct amba_device *dev;
- struct clk *clk;
- struct clcd_vendor_data *vendor;
- struct clcd_panel *panel;
- struct clcd_board *board;
- void *board_data;
- void __iomem *regs;
- u16 off_ienb;
- u16 off_cntl;
- u32 clcd_cntl;
- u32 cmap[16];
- bool clk_enabled;
-};
-
-static inline void clcdfb_decode(struct clcd_fb *fb, struct clcd_regs *regs)
-{
- struct fb_var_screeninfo *var = &fb->fb.var;
- u32 val, cpl;
-
- /*
- * Program the CLCD controller registers and start the CLCD
- */
- val = ((var->xres / 16) - 1) << 2;
- val |= (var->hsync_len - 1) << 8;
- val |= (var->right_margin - 1) << 16;
- val |= (var->left_margin - 1) << 24;
- regs->tim0 = val;
-
- val = var->yres;
- if (fb->panel->cntl & CNTL_LCDDUAL)
- val /= 2;
- val -= 1;
- val |= (var->vsync_len - 1) << 10;
- val |= var->lower_margin << 16;
- val |= var->upper_margin << 24;
- regs->tim1 = val;
-
- val = fb->panel->tim2;
- val |= var->sync & FB_SYNC_HOR_HIGH_ACT ? 0 : TIM2_IHS;
- val |= var->sync & FB_SYNC_VERT_HIGH_ACT ? 0 : TIM2_IVS;
-
- cpl = var->xres_virtual;
- if (fb->panel->cntl & CNTL_LCDTFT) /* TFT */
- /* / 1 */;
- else if (!var->grayscale) /* STN color */
- cpl = cpl * 8 / 3;
- else if (fb->panel->cntl & CNTL_LCDMONO8) /* STN monochrome, 8bit */
- cpl /= 8;
- else /* STN monochrome, 4bit */
- cpl /= 4;
-
- regs->tim2 = val | ((cpl - 1) << 16);
-
- regs->tim3 = fb->panel->tim3;
-
- val = fb->panel->cntl;
- if (var->grayscale)
- val |= CNTL_LCDBW;
-
- if (fb->panel->caps && fb->board->caps && var->bits_per_pixel >= 16) {
- /*
- * if board and panel supply capabilities, we can support
- * changing BGR/RGB depending on supplied parameters. Here
- * we switch to what the framebuffer is providing if need
- * be, so if the framebuffer is BGR but the display connection
- * is RGB (first case) we switch it around. Vice versa mutatis
- * mutandis if the framebuffer is RGB but the display connection
- * is BGR, we flip it around.
- */
- if (var->red.offset == 0)
- val &= ~CNTL_BGR;
- else
- val |= CNTL_BGR;
- if (fb->panel->bgr_connection)
- val ^= CNTL_BGR;
- }
-
- switch (var->bits_per_pixel) {
- case 1:
- val |= CNTL_LCDBPP1;
- break;
- case 2:
- val |= CNTL_LCDBPP2;
- break;
- case 4:
- val |= CNTL_LCDBPP4;
- break;
- case 8:
- val |= CNTL_LCDBPP8;
- break;
- case 16:
- /*
- * PL110 cannot choose between 5551 and 565 modes in its
- * control register. It is possible to use 565 with
- * custom external wiring.
- */
- if (amba_part(fb->dev) == 0x110 ||
- var->green.length == 5)
- val |= CNTL_LCDBPP16;
- else if (var->green.length == 6)
- val |= CNTL_LCDBPP16_565;
- else
- val |= CNTL_LCDBPP16_444;
- break;
- case 24:
- /* Modified variant supporting 24 bit packed pixels */
- val |= CNTL_ST_LCDBPP24_PACKED;
- break;
- case 32:
- val |= CNTL_LCDBPP24;
- break;
- }
-
- regs->cntl = val;
- regs->pixclock = var->pixclock;
-}
-
-static inline int clcdfb_check(struct clcd_fb *fb, struct fb_var_screeninfo *var)
-{
- var->xres_virtual = var->xres = (var->xres + 15) & ~15;
- var->yres_virtual = var->yres = (var->yres + 1) & ~1;
-
-#define CHECK(e,l,h) (var->e < l || var->e > h)
- if (CHECK(right_margin, (5+1), 256) || /* back porch */
- CHECK(left_margin, (5+1), 256) || /* front porch */
- CHECK(hsync_len, (5+1), 256) ||
- var->xres > 4096 ||
- var->lower_margin > 255 || /* back porch */
- var->upper_margin > 255 || /* front porch */
- var->vsync_len > 32 ||
- var->yres > 1024)
- return -EINVAL;
-#undef CHECK
-
- /* single panel mode: PCD = max(PCD, 1) */
- /* dual panel mode: PCD = max(PCD, 5) */
-
- /*
- * You can't change the grayscale setting, and
- * we can only do non-interlaced video.
- */
- if (var->grayscale != fb->fb.var.grayscale ||
- (var->vmode & FB_VMODE_MASK) != FB_VMODE_NONINTERLACED)
- return -EINVAL;
-
-#define CHECK(e) (var->e != fb->fb.var.e)
- if (fb->panel->fixedtimings &&
- (CHECK(xres) ||
- CHECK(yres) ||
- CHECK(bits_per_pixel) ||
- CHECK(pixclock) ||
- CHECK(left_margin) ||
- CHECK(right_margin) ||
- CHECK(upper_margin) ||
- CHECK(lower_margin) ||
- CHECK(hsync_len) ||
- CHECK(vsync_len) ||
- CHECK(sync)))
- return -EINVAL;
-#undef CHECK
-
- var->nonstd = 0;
- var->accel_flags = 0;
-
- return 0;
-}
diff --git a/include/linux/amba/kmi.h b/include/linux/amba/kmi.h
index a39e5be751b3..94dd727f1aea 100644
--- a/include/linux/amba/kmi.h
+++ b/include/linux/amba/kmi.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* linux/include/asm-arm/hardware/amba_kmi.h
*
@@ -5,21 +6,6 @@
*
* Copyright (C) 2000 Deep Blue Solutions Ltd.
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
- *
* ---------------------------------------------------------------------------
* From ARM PrimeCell(tm) PS2 Keyboard/Mouse Interface (PL050) Technical
* Reference Manual - ARM DDI 0143B - see http://www.arm.com/
diff --git a/include/linux/amba/mmci.h b/include/linux/amba/mmci.h
index 8c98113069ce..6f96dc2209c0 100644
--- a/include/linux/amba/mmci.h
+++ b/include/linux/amba/mmci.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* include/linux/amba/mmci.h
*/
@@ -12,25 +13,12 @@
* @ocr_mask: available voltages on the 4 pins from the block, this
* is ignored if a regulator is used, see the MMC_VDD_* masks in
* mmc/host.h
- * @ios_handler: a callback function to act on specfic ios changes,
- * used for example to control a levelshifter
- * mask into a value to be binary (or set some other custom bits
- * in MMCIPWR) or:ed and written into the MMCIPWR register of the
- * block. May also control external power based on the power_mode.
- * @status: if no GPIO read function was given to the block in
- * gpio_wp (below) this function will be called to determine
- * whether a card is present in the MMC slot or not
- * @gpio_wp: read this GPIO pin to see if the card is write protected
- * @gpio_cd: read this GPIO pin to detect card insertion
- * @cd_invert: true if the gpio_cd pin value is active low
+ * @status: if no GPIO line was given to the block in this function will
+ * be called to determine whether a card is present in the MMC slot or not
*/
struct mmci_platform_data {
unsigned int ocr_mask;
- int (*ios_handler)(struct device *, struct mmc_ios *);
unsigned int (*status)(struct device *);
- int gpio_wp;
- int gpio_cd;
- bool cd_invert;
};
#endif
diff --git a/include/linux/amba/pl022.h b/include/linux/amba/pl022.h
index 854b7294f6c6..d7b07d0311e1 100644
--- a/include/linux/amba/pl022.h
+++ b/include/linux/amba/pl022.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* include/linux/amba/pl022.h
*
@@ -10,21 +11,12 @@
* linux-2.6.17-rc3-mm1/drivers/spi/pxa2xx_spi.c
* Initial adoption to PL022 by:
* Sachin Verma <sachin.verma@st.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#ifndef _SSP_PL022_H
#define _SSP_PL022_H
+#include <linux/dmaengine.h>
#include <linux/types.h>
/**
@@ -232,11 +224,8 @@ struct dma_chan;
/**
* struct pl022_ssp_master - device.platform_data for SPI controller devices.
* @bus_id: identifier for this bus
- * @num_chipselect: chipselects are used to distinguish individual
- * SPI slaves, and are numbered from zero to num_chipselects - 1.
- * each slave has a chipselect signal, but it's common that not
- * every chipselect is connected to a slave.
* @enable_dma: if true enables DMA driven transfers.
+ * @dma_filter: callback filter for dma_request_channel.
* @dma_rx_param: parameter to locate an RX DMA channel.
* @dma_tx_param: parameter to locate a TX DMA channel.
* @autosuspend_delay: delay in ms following transfer completion before the
@@ -244,18 +233,15 @@ struct dma_chan;
* indicates no delay and the device will be suspended immediately.
* @rt: indicates the controller should run the message pump with realtime
* priority to minimise the transfer latency on the bus.
- * @chipselects: list of <num_chipselects> chip select gpios
*/
struct pl022_ssp_controller {
u16 bus_id;
- u8 num_chipselect;
u8 enable_dma:1;
- bool (*dma_filter)(struct dma_chan *chan, void *filter_param);
+ dma_filter_fn dma_filter;
void *dma_rx_param;
void *dma_tx_param;
int autosuspend_delay;
bool rt;
- int *chipselects;
};
/**
@@ -274,8 +260,6 @@ struct pl022_ssp_controller {
* @duplex: Microwire interface: Full/Half duplex
* @clkdelay: on the PL023 variant, the delay in feeback clock cycles
* before sampling the incoming line
- * @cs_control: function pointer to board-specific function to
- * assert/deassert I/O port to control HW generation of devices chip-select.
*/
struct pl022_config_chip {
enum ssp_interface iface;
@@ -289,7 +273,6 @@ struct pl022_config_chip {
enum ssp_microwire_wait_state wait_state;
enum ssp_duplex duplex;
enum ssp_clkdelay clkdelay;
- void (*cs_control) (u32 control);
};
#endif /* _SSP_PL022_H */
diff --git a/include/linux/amba/pl080.h b/include/linux/amba/pl080.h
index ab036b6b1804..e192d546639b 100644
--- a/include/linux/amba/pl080.h
+++ b/include/linux/amba/pl080.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/* include/linux/amba/pl080.h
*
* Copyright 2008 Openmoko, Inc.
@@ -6,10 +7,6 @@
* Ben Dooks <ben@simtec.co.uk>
*
* ARM PrimeCell PL080 DMA controller
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
/* Note, there are some Samsung updates to this controller block which
diff --git a/include/linux/amba/pl08x.h b/include/linux/amba/pl08x.h
index 79d1bcee738d..3100e0debcdd 100644
--- a/include/linux/amba/pl08x.h
+++ b/include/linux/amba/pl08x.h
@@ -1,13 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* linux/amba/pl08x.h - ARM PrimeCell DMA Controller driver
*
* Copyright (C) 2005 ARM Ltd
* Copyright (C) 2010 ST-Ericsson SA
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
* pl08x information required by platform code
*
* Please credit ARM.com
diff --git a/include/linux/amba/pl093.h b/include/linux/amba/pl093.h
deleted file mode 100644
index 2983e3671adb..000000000000
--- a/include/linux/amba/pl093.h
+++ /dev/null
@@ -1,80 +0,0 @@
-/* linux/amba/pl093.h
- *
- * Copyright (c) 2008 Simtec Electronics
- * http://armlinux.simtec.co.uk/
- * Ben Dooks <ben@simtec.co.uk>
- *
- * AMBA PL093 SSMC (synchronous static memory controller)
- * See DDI0236.pdf (r0p4) for more details
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
-
-#define SMB_BANK(x) ((x) * 0x20) /* each bank control set is 0x20 apart */
-
-/* Offsets for SMBxxxxRy registers */
-
-#define SMBIDCYR (0x00)
-#define SMBWSTRDR (0x04)
-#define SMBWSTWRR (0x08)
-#define SMBWSTOENR (0x0C)
-#define SMBWSTWENR (0x10)
-#define SMBCR (0x14)
-#define SMBSR (0x18)
-#define SMBWSTBRDR (0x1C)
-
-/* Masks for SMB registers */
-#define IDCY_MASK (0xf)
-#define WSTRD_MASK (0xf)
-#define WSTWR_MASK (0xf)
-#define WSTOEN_MASK (0xf)
-#define WSTWEN_MASK (0xf)
-
-/* Notes from datasheet:
- * WSTOEN <= WSTRD
- * WSTWEN <= WSTWR
- *
- * WSTOEN is not used with nWAIT
- */
-
-/* SMBCR bit definitions */
-#define SMBCR_BIWRITEEN (1 << 21)
-#define SMBCR_ADDRVALIDWRITEEN (1 << 20)
-#define SMBCR_SYNCWRITE (1 << 17)
-#define SMBCR_BMWRITE (1 << 16)
-#define SMBCR_WRAPREAD (1 << 14)
-#define SMBCR_BIREADEN (1 << 13)
-#define SMBCR_ADDRVALIDREADEN (1 << 12)
-#define SMBCR_SYNCREAD (1 << 9)
-#define SMBCR_BMREAD (1 << 8)
-#define SMBCR_SMBLSPOL (1 << 6)
-#define SMBCR_WP (1 << 3)
-#define SMBCR_WAITEN (1 << 2)
-#define SMBCR_WAITPOL (1 << 1)
-#define SMBCR_RBLE (1 << 0)
-
-#define SMBCR_BURSTLENWRITE_MASK (3 << 18)
-#define SMBCR_BURSTLENWRITE_4 (0 << 18)
-#define SMBCR_BURSTLENWRITE_8 (1 << 18)
-#define SMBCR_BURSTLENWRITE_RESERVED (2 << 18)
-#define SMBCR_BURSTLENWRITE_CONTINUOUS (3 << 18)
-
-#define SMBCR_BURSTLENREAD_MASK (3 << 10)
-#define SMBCR_BURSTLENREAD_4 (0 << 10)
-#define SMBCR_BURSTLENREAD_8 (1 << 10)
-#define SMBCR_BURSTLENREAD_16 (2 << 10)
-#define SMBCR_BURSTLENREAD_CONTINUOUS (3 << 10)
-
-#define SMBCR_MW_MASK (3 << 4)
-#define SMBCR_MW_8BIT (0 << 4)
-#define SMBCR_MW_16BIT (1 << 4)
-#define SMBCR_MW_M32BIT (2 << 4)
-
-/* SSMC status registers */
-#define SSMCCSR (0x200)
-#define SSMCCR (0x204)
-#define SSMCITCR (0x208)
-#define SSMCITIP (0x20C)
-#define SSMCITIOP (0x210)
diff --git a/include/linux/amba/serial.h b/include/linux/amba/serial.h
index ad0965e21a5e..9120de05ead0 100644
--- a/include/linux/amba/serial.h
+++ b/include/linux/amba/serial.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* linux/include/asm-arm/hardware/serial_amba.h
*
@@ -5,24 +6,15 @@
*
* Copyright (C) ARM Limited
* Copyright (C) 2000 Deep Blue Solutions Ltd.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#ifndef ASM_ARM_HARDWARE_SERIAL_AMBA_H
#define ASM_ARM_HARDWARE_SERIAL_AMBA_H
+#ifndef __ASSEMBLY__
+#include <linux/bitfield.h>
+#include <linux/bits.h>
+#endif
+
#include <linux/types.h>
/* -------------------------------------------------------------------------------
@@ -83,141 +75,145 @@
#define ZX_UART011_ICR 0x4c
#define ZX_UART011_DMACR 0x50
-#define UART011_DR_OE (1 << 11)
-#define UART011_DR_BE (1 << 10)
-#define UART011_DR_PE (1 << 9)
-#define UART011_DR_FE (1 << 8)
-
-#define UART01x_RSR_OE 0x08
-#define UART01x_RSR_BE 0x04
-#define UART01x_RSR_PE 0x02
-#define UART01x_RSR_FE 0x01
-
-#define UART011_FR_RI 0x100
-#define UART011_FR_TXFE 0x080
-#define UART011_FR_RXFF 0x040
-#define UART01x_FR_TXFF 0x020
-#define UART01x_FR_RXFE 0x010
-#define UART01x_FR_BUSY 0x008
-#define UART01x_FR_DCD 0x004
-#define UART01x_FR_DSR 0x002
-#define UART01x_FR_CTS 0x001
+#define UART011_DR_OE BIT(11)
+#define UART011_DR_BE BIT(10)
+#define UART011_DR_PE BIT(9)
+#define UART011_DR_FE BIT(8)
+
+#define UART01x_RSR_OE BIT(3)
+#define UART01x_RSR_BE BIT(2)
+#define UART01x_RSR_PE BIT(1)
+#define UART01x_RSR_FE BIT(0)
+
+#define UART011_FR_RI BIT(8)
+#define UART011_FR_TXFE BIT(7)
+#define UART011_FR_RXFF BIT(6)
+#define UART01x_FR_TXFF (1 << 5) /* used in ASM */
+#define UART01x_FR_RXFE BIT(4)
+#define UART01x_FR_BUSY (1 << 3) /* used in ASM */
+#define UART01x_FR_DCD BIT(2)
+#define UART01x_FR_DSR BIT(1)
+#define UART01x_FR_CTS BIT(0)
#define UART01x_FR_TMSK (UART01x_FR_TXFF + UART01x_FR_BUSY)
/*
* Some bits of Flag Register on ZTE device have different position from
* standard ones.
*/
-#define ZX_UART01x_FR_BUSY 0x100
-#define ZX_UART01x_FR_DSR 0x008
-#define ZX_UART01x_FR_CTS 0x002
-#define ZX_UART011_FR_RI 0x001
-
-#define UART011_CR_CTSEN 0x8000 /* CTS hardware flow control */
-#define UART011_CR_RTSEN 0x4000 /* RTS hardware flow control */
-#define UART011_CR_OUT2 0x2000 /* OUT2 */
-#define UART011_CR_OUT1 0x1000 /* OUT1 */
-#define UART011_CR_RTS 0x0800 /* RTS */
-#define UART011_CR_DTR 0x0400 /* DTR */
-#define UART011_CR_RXE 0x0200 /* receive enable */
-#define UART011_CR_TXE 0x0100 /* transmit enable */
-#define UART011_CR_LBE 0x0080 /* loopback enable */
-#define UART010_CR_RTIE 0x0040
-#define UART010_CR_TIE 0x0020
-#define UART010_CR_RIE 0x0010
-#define UART010_CR_MSIE 0x0008
-#define ST_UART011_CR_OVSFACT 0x0008 /* Oversampling factor */
-#define UART01x_CR_IIRLP 0x0004 /* SIR low power mode */
-#define UART01x_CR_SIREN 0x0002 /* SIR enable */
-#define UART01x_CR_UARTEN 0x0001 /* UART enable */
-
-#define UART011_LCRH_SPS 0x80
+#define ZX_UART01x_FR_BUSY BIT(8)
+#define ZX_UART01x_FR_DSR BIT(3)
+#define ZX_UART01x_FR_CTS BIT(1)
+#define ZX_UART011_FR_RI BIT(0)
+
+#define UART011_CR_CTSEN BIT(15) /* CTS hardware flow control */
+#define UART011_CR_RTSEN BIT(14) /* RTS hardware flow control */
+#define UART011_CR_OUT2 BIT(13) /* OUT2 */
+#define UART011_CR_OUT1 BIT(12) /* OUT1 */
+#define UART011_CR_RTS BIT(11) /* RTS */
+#define UART011_CR_DTR BIT(10) /* DTR */
+#define UART011_CR_RXE BIT(9) /* receive enable */
+#define UART011_CR_TXE BIT(8) /* transmit enable */
+#define UART011_CR_LBE BIT(7) /* loopback enable */
+#define UART010_CR_RTIE BIT(6)
+#define UART010_CR_TIE BIT(5)
+#define UART010_CR_RIE BIT(4)
+#define UART010_CR_MSIE BIT(3)
+#define ST_UART011_CR_OVSFACT BIT(3) /* Oversampling factor */
+#define UART01x_CR_IIRLP BIT(2) /* SIR low power mode */
+#define UART01x_CR_SIREN BIT(1) /* SIR enable */
+#define UART01x_CR_UARTEN BIT(0) /* UART enable */
+
+#define UART011_LCRH_SPS BIT(7)
#define UART01x_LCRH_WLEN_8 0x60
#define UART01x_LCRH_WLEN_7 0x40
#define UART01x_LCRH_WLEN_6 0x20
#define UART01x_LCRH_WLEN_5 0x00
-#define UART01x_LCRH_FEN 0x10
-#define UART01x_LCRH_STP2 0x08
-#define UART01x_LCRH_EPS 0x04
-#define UART01x_LCRH_PEN 0x02
-#define UART01x_LCRH_BRK 0x01
-
-#define ST_UART011_DMAWM_RX_1 (0 << 3)
-#define ST_UART011_DMAWM_RX_2 (1 << 3)
-#define ST_UART011_DMAWM_RX_4 (2 << 3)
-#define ST_UART011_DMAWM_RX_8 (3 << 3)
-#define ST_UART011_DMAWM_RX_16 (4 << 3)
-#define ST_UART011_DMAWM_RX_32 (5 << 3)
-#define ST_UART011_DMAWM_RX_48 (6 << 3)
-#define ST_UART011_DMAWM_TX_1 0
-#define ST_UART011_DMAWM_TX_2 1
-#define ST_UART011_DMAWM_TX_4 2
-#define ST_UART011_DMAWM_TX_8 3
-#define ST_UART011_DMAWM_TX_16 4
-#define ST_UART011_DMAWM_TX_32 5
-#define ST_UART011_DMAWM_TX_48 6
-
-#define UART010_IIR_RTIS 0x08
-#define UART010_IIR_TIS 0x04
-#define UART010_IIR_RIS 0x02
-#define UART010_IIR_MIS 0x01
-
-#define UART011_IFLS_RX1_8 (0 << 3)
-#define UART011_IFLS_RX2_8 (1 << 3)
-#define UART011_IFLS_RX4_8 (2 << 3)
-#define UART011_IFLS_RX6_8 (3 << 3)
-#define UART011_IFLS_RX7_8 (4 << 3)
-#define UART011_IFLS_TX1_8 (0 << 0)
-#define UART011_IFLS_TX2_8 (1 << 0)
-#define UART011_IFLS_TX4_8 (2 << 0)
-#define UART011_IFLS_TX6_8 (3 << 0)
-#define UART011_IFLS_TX7_8 (4 << 0)
+#define UART01x_LCRH_FEN BIT(4)
+#define UART01x_LCRH_STP2 BIT(3)
+#define UART01x_LCRH_EPS BIT(2)
+#define UART01x_LCRH_PEN BIT(1)
+#define UART01x_LCRH_BRK BIT(0)
+
+#define ST_UART011_DMAWM_RX GENMASK(5, 3)
+#define ST_UART011_DMAWM_RX_1 FIELD_PREP_CONST(ST_UART011_DMAWM_RX, 0)
+#define ST_UART011_DMAWM_RX_2 FIELD_PREP_CONST(ST_UART011_DMAWM_RX, 1)
+#define ST_UART011_DMAWM_RX_4 FIELD_PREP_CONST(ST_UART011_DMAWM_RX, 2)
+#define ST_UART011_DMAWM_RX_8 FIELD_PREP_CONST(ST_UART011_DMAWM_RX, 3)
+#define ST_UART011_DMAWM_RX_16 FIELD_PREP_CONST(ST_UART011_DMAWM_RX, 4)
+#define ST_UART011_DMAWM_RX_32 FIELD_PREP_CONST(ST_UART011_DMAWM_RX, 5)
+#define ST_UART011_DMAWM_RX_48 FIELD_PREP_CONST(ST_UART011_DMAWM_RX, 6)
+#define ST_UART011_DMAWM_TX GENMASK(2, 0)
+#define ST_UART011_DMAWM_TX_1 FIELD_PREP_CONST(ST_UART011_DMAWM_TX, 0)
+#define ST_UART011_DMAWM_TX_2 FIELD_PREP_CONST(ST_UART011_DMAWM_TX, 1)
+#define ST_UART011_DMAWM_TX_4 FIELD_PREP_CONST(ST_UART011_DMAWM_TX, 2)
+#define ST_UART011_DMAWM_TX_8 FIELD_PREP_CONST(ST_UART011_DMAWM_TX, 3)
+#define ST_UART011_DMAWM_TX_16 FIELD_PREP_CONST(ST_UART011_DMAWM_TX, 4)
+#define ST_UART011_DMAWM_TX_32 FIELD_PREP_CONST(ST_UART011_DMAWM_TX, 5)
+#define ST_UART011_DMAWM_TX_48 FIELD_PREP_CONST(ST_UART011_DMAWM_TX, 6)
+
+#define UART010_IIR_RTIS BIT(3)
+#define UART010_IIR_TIS BIT(2)
+#define UART010_IIR_RIS BIT(1)
+#define UART010_IIR_MIS BIT(0)
+
+#define UART011_IFLS_RXIFLSEL GENMASK(5, 3)
+#define UART011_IFLS_RX1_8 FIELD_PREP_CONST(UART011_IFLS_RXIFLSEL, 0)
+#define UART011_IFLS_RX2_8 FIELD_PREP_CONST(UART011_IFLS_RXIFLSEL, 1)
+#define UART011_IFLS_RX4_8 FIELD_PREP_CONST(UART011_IFLS_RXIFLSEL, 2)
+#define UART011_IFLS_RX6_8 FIELD_PREP_CONST(UART011_IFLS_RXIFLSEL, 3)
+#define UART011_IFLS_RX7_8 FIELD_PREP_CONST(UART011_IFLS_RXIFLSEL, 4)
+#define UART011_IFLS_TXIFLSEL GENMASK(2, 0)
+#define UART011_IFLS_TX1_8 FIELD_PREP_CONST(UART011_IFLS_TXIFLSEL, 0)
+#define UART011_IFLS_TX2_8 FIELD_PREP_CONST(UART011_IFLS_TXIFLSEL, 1)
+#define UART011_IFLS_TX4_8 FIELD_PREP_CONST(UART011_IFLS_TXIFLSEL, 2)
+#define UART011_IFLS_TX6_8 FIELD_PREP_CONST(UART011_IFLS_TXIFLSEL, 3)
+#define UART011_IFLS_TX7_8 FIELD_PREP_CONST(UART011_IFLS_TXIFLSEL, 4)
/* special values for ST vendor with deeper fifo */
-#define UART011_IFLS_RX_HALF (5 << 3)
-#define UART011_IFLS_TX_HALF (5 << 0)
-
-#define UART011_OEIM (1 << 10) /* overrun error interrupt mask */
-#define UART011_BEIM (1 << 9) /* break error interrupt mask */
-#define UART011_PEIM (1 << 8) /* parity error interrupt mask */
-#define UART011_FEIM (1 << 7) /* framing error interrupt mask */
-#define UART011_RTIM (1 << 6) /* receive timeout interrupt mask */
-#define UART011_TXIM (1 << 5) /* transmit interrupt mask */
-#define UART011_RXIM (1 << 4) /* receive interrupt mask */
-#define UART011_DSRMIM (1 << 3) /* DSR interrupt mask */
-#define UART011_DCDMIM (1 << 2) /* DCD interrupt mask */
-#define UART011_CTSMIM (1 << 1) /* CTS interrupt mask */
-#define UART011_RIMIM (1 << 0) /* RI interrupt mask */
-
-#define UART011_OEIS (1 << 10) /* overrun error interrupt status */
-#define UART011_BEIS (1 << 9) /* break error interrupt status */
-#define UART011_PEIS (1 << 8) /* parity error interrupt status */
-#define UART011_FEIS (1 << 7) /* framing error interrupt status */
-#define UART011_RTIS (1 << 6) /* receive timeout interrupt status */
-#define UART011_TXIS (1 << 5) /* transmit interrupt status */
-#define UART011_RXIS (1 << 4) /* receive interrupt status */
-#define UART011_DSRMIS (1 << 3) /* DSR interrupt status */
-#define UART011_DCDMIS (1 << 2) /* DCD interrupt status */
-#define UART011_CTSMIS (1 << 1) /* CTS interrupt status */
-#define UART011_RIMIS (1 << 0) /* RI interrupt status */
-
-#define UART011_OEIC (1 << 10) /* overrun error interrupt clear */
-#define UART011_BEIC (1 << 9) /* break error interrupt clear */
-#define UART011_PEIC (1 << 8) /* parity error interrupt clear */
-#define UART011_FEIC (1 << 7) /* framing error interrupt clear */
-#define UART011_RTIC (1 << 6) /* receive timeout interrupt clear */
-#define UART011_TXIC (1 << 5) /* transmit interrupt clear */
-#define UART011_RXIC (1 << 4) /* receive interrupt clear */
-#define UART011_DSRMIC (1 << 3) /* DSR interrupt clear */
-#define UART011_DCDMIC (1 << 2) /* DCD interrupt clear */
-#define UART011_CTSMIC (1 << 1) /* CTS interrupt clear */
-#define UART011_RIMIC (1 << 0) /* RI interrupt clear */
-
-#define UART011_DMAONERR (1 << 2) /* disable dma on error */
-#define UART011_TXDMAE (1 << 1) /* enable transmit dma */
-#define UART011_RXDMAE (1 << 0) /* enable receive dma */
-
-#define UART01x_RSR_ANY (UART01x_RSR_OE|UART01x_RSR_BE|UART01x_RSR_PE|UART01x_RSR_FE)
-#define UART01x_FR_MODEM_ANY (UART01x_FR_DCD|UART01x_FR_DSR|UART01x_FR_CTS)
+#define UART011_IFLS_RX_HALF FIELD_PREP_CONST(UART011_IFLS_RXIFLSEL, 5)
+#define UART011_IFLS_TX_HALF FIELD_PREP_CONST(UART011_IFLS_TXIFLSEL, 5)
+
+#define UART011_OEIM BIT(10) /* overrun error interrupt mask */
+#define UART011_BEIM BIT(9) /* break error interrupt mask */
+#define UART011_PEIM BIT(8) /* parity error interrupt mask */
+#define UART011_FEIM BIT(7) /* framing error interrupt mask */
+#define UART011_RTIM BIT(6) /* receive timeout interrupt mask */
+#define UART011_TXIM BIT(5) /* transmit interrupt mask */
+#define UART011_RXIM BIT(4) /* receive interrupt mask */
+#define UART011_DSRMIM BIT(3) /* DSR interrupt mask */
+#define UART011_DCDMIM BIT(2) /* DCD interrupt mask */
+#define UART011_CTSMIM BIT(1) /* CTS interrupt mask */
+#define UART011_RIMIM BIT(0) /* RI interrupt mask */
+
+#define UART011_OEIS BIT(10) /* overrun error interrupt status */
+#define UART011_BEIS BIT(9) /* break error interrupt status */
+#define UART011_PEIS BIT(8) /* parity error interrupt status */
+#define UART011_FEIS BIT(7) /* framing error interrupt status */
+#define UART011_RTIS BIT(6) /* receive timeout interrupt status */
+#define UART011_TXIS BIT(5) /* transmit interrupt status */
+#define UART011_RXIS BIT(4) /* receive interrupt status */
+#define UART011_DSRMIS BIT(3) /* DSR interrupt status */
+#define UART011_DCDMIS BIT(2) /* DCD interrupt status */
+#define UART011_CTSMIS BIT(1) /* CTS interrupt status */
+#define UART011_RIMIS BIT(0) /* RI interrupt status */
+
+#define UART011_OEIC BIT(10) /* overrun error interrupt clear */
+#define UART011_BEIC BIT(9) /* break error interrupt clear */
+#define UART011_PEIC BIT(8) /* parity error interrupt clear */
+#define UART011_FEIC BIT(7) /* framing error interrupt clear */
+#define UART011_RTIC BIT(6) /* receive timeout interrupt clear */
+#define UART011_TXIC BIT(5) /* transmit interrupt clear */
+#define UART011_RXIC BIT(4) /* receive interrupt clear */
+#define UART011_DSRMIC BIT(3) /* DSR interrupt clear */
+#define UART011_DCDMIC BIT(2) /* DCD interrupt clear */
+#define UART011_CTSMIC BIT(1) /* CTS interrupt clear */
+#define UART011_RIMIC BIT(0) /* RI interrupt clear */
+
+#define UART011_DMAONERR BIT(2) /* disable dma on error */
+#define UART011_TXDMAE BIT(1) /* enable transmit dma */
+#define UART011_RXDMAE BIT(0) /* enable receive dma */
+
+#define UART01x_RSR_ANY (UART01x_RSR_OE | UART01x_RSR_BE | UART01x_RSR_PE | UART01x_RSR_FE)
+#define UART01x_FR_MODEM_ANY (UART01x_FR_DCD | UART01x_FR_DSR | UART01x_FR_CTS)
#ifndef __ASSEMBLY__
struct amba_device; /* in uncompress this is included but amba/bus.h is not */
@@ -233,8 +229,8 @@ struct amba_pl011_data {
bool dma_rx_poll_enable;
unsigned int dma_rx_poll_rate;
unsigned int dma_rx_poll_timeout;
- void (*init) (void);
- void (*exit) (void);
+ void (*init)(void);
+ void (*exit)(void);
};
#endif
diff --git a/include/linux/amd-iommu.h b/include/linux/amd-iommu.h
index 09751d349963..edcee9f5335a 100644
--- a/include/linux/amd-iommu.h
+++ b/include/linux/amd-iommu.h
@@ -1,20 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2007-2010 Advanced Micro Devices, Inc.
* Author: Joerg Roedel <joerg.roedel@amd.com>
* Leo Duran <leo.duran@amd.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published
- * by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#ifndef _ASM_X86_AMD_IOMMU_H
@@ -22,169 +10,18 @@
#include <linux/types.h>
-/*
- * This is mainly used to communicate information back-and-forth
- * between SVM and IOMMU for setting up and tearing down posted
- * interrupt
- */
-struct amd_iommu_pi_data {
- u32 ga_tag;
- u32 prev_ga_tag;
- u64 base;
- bool is_guest_mode;
- struct vcpu_data *vcpu_data;
- void *ir_data;
-};
+struct amd_iommu;
#ifdef CONFIG_AMD_IOMMU
struct task_struct;
struct pci_dev;
-extern int amd_iommu_detect(void);
-extern int amd_iommu_init_hardware(void);
-
-/**
- * amd_iommu_enable_device_erratum() - Enable erratum workaround for device
- * in the IOMMUv2 driver
- * @pdev: The PCI device the workaround is necessary for
- * @erratum: The erratum workaround to enable
- *
- * The function needs to be called before amd_iommu_init_device().
- * Possible values for the erratum number are for now:
- * - AMD_PRI_DEV_ERRATUM_ENABLE_RESET - Reset PRI capability when PRI
- * is enabled
- * - AMD_PRI_DEV_ERRATUM_LIMIT_REQ_ONE - Limit number of outstanding PRI
- * requests to one
- */
-#define AMD_PRI_DEV_ERRATUM_ENABLE_RESET 0
-#define AMD_PRI_DEV_ERRATUM_LIMIT_REQ_ONE 1
-
-extern void amd_iommu_enable_device_erratum(struct pci_dev *pdev, u32 erratum);
-
-/**
- * amd_iommu_init_device() - Init device for use with IOMMUv2 driver
- * @pdev: The PCI device to initialize
- * @pasids: Number of PASIDs to support for this device
- *
- * This function does all setup for the device pdev so that it can be
- * used with IOMMUv2.
- * Returns 0 on success or negative value on error.
- */
-extern int amd_iommu_init_device(struct pci_dev *pdev, int pasids);
-
-/**
- * amd_iommu_free_device() - Free all IOMMUv2 related device resources
- * and disable IOMMUv2 usage for this device
- * @pdev: The PCI device to disable IOMMUv2 usage for'
- */
-extern void amd_iommu_free_device(struct pci_dev *pdev);
-
-/**
- * amd_iommu_bind_pasid() - Bind a given task to a PASID on a device
- * @pdev: The PCI device to bind the task to
- * @pasid: The PASID on the device the task should be bound to
- * @task: the task to bind
- *
- * The function returns 0 on success or a negative value on error.
- */
-extern int amd_iommu_bind_pasid(struct pci_dev *pdev, int pasid,
- struct task_struct *task);
-
-/**
- * amd_iommu_unbind_pasid() - Unbind a PASID from its task on
- * a device
- * @pdev: The device of the PASID
- * @pasid: The PASID to unbind
- *
- * When this function returns the device is no longer using the PASID
- * and the PASID is no longer bound to its task.
- */
-extern void amd_iommu_unbind_pasid(struct pci_dev *pdev, int pasid);
-
-/**
- * amd_iommu_set_invalid_ppr_cb() - Register a call-back for failed
- * PRI requests
- * @pdev: The PCI device the call-back should be registered for
- * @cb: The call-back function
- *
- * The IOMMUv2 driver invokes this call-back when it is unable to
- * successfully handle a PRI request. The device driver can then decide
- * which PRI response the device should see. Possible return values for
- * the call-back are:
- *
- * - AMD_IOMMU_INV_PRI_RSP_SUCCESS - Send SUCCESS back to the device
- * - AMD_IOMMU_INV_PRI_RSP_INVALID - Send INVALID back to the device
- * - AMD_IOMMU_INV_PRI_RSP_FAIL - Send Failure back to the device,
- * the device is required to disable
- * PRI when it receives this response
- *
- * The function returns 0 on success or negative value on error.
- */
-#define AMD_IOMMU_INV_PRI_RSP_SUCCESS 0
-#define AMD_IOMMU_INV_PRI_RSP_INVALID 1
-#define AMD_IOMMU_INV_PRI_RSP_FAIL 2
-
-typedef int (*amd_iommu_invalid_ppr_cb)(struct pci_dev *pdev,
- int pasid,
- unsigned long address,
- u16);
-
-extern int amd_iommu_set_invalid_ppr_cb(struct pci_dev *pdev,
- amd_iommu_invalid_ppr_cb cb);
-
-#define PPR_FAULT_EXEC (1 << 1)
-#define PPR_FAULT_READ (1 << 2)
-#define PPR_FAULT_WRITE (1 << 5)
-#define PPR_FAULT_USER (1 << 6)
-#define PPR_FAULT_RSVD (1 << 7)
-#define PPR_FAULT_GN (1 << 8)
-
-/**
- * amd_iommu_device_info() - Get information about IOMMUv2 support of a
- * PCI device
- * @pdev: PCI device to query information from
- * @info: A pointer to an amd_iommu_device_info structure which will contain
- * the information about the PCI device
- *
- * Returns 0 on success, negative value on error
- */
-
-#define AMD_IOMMU_DEVICE_FLAG_ATS_SUP 0x1 /* ATS feature supported */
-#define AMD_IOMMU_DEVICE_FLAG_PRI_SUP 0x2 /* PRI feature supported */
-#define AMD_IOMMU_DEVICE_FLAG_PASID_SUP 0x4 /* PASID context supported */
-#define AMD_IOMMU_DEVICE_FLAG_EXEC_SUP 0x8 /* Device may request execution
- on memory pages */
-#define AMD_IOMMU_DEVICE_FLAG_PRIV_SUP 0x10 /* Device may request
- super-user privileges */
-
-struct amd_iommu_device_info {
- int max_pasids;
- u32 flags;
-};
-
-extern int amd_iommu_device_info(struct pci_dev *pdev,
- struct amd_iommu_device_info *info);
-
-/**
- * amd_iommu_set_invalidate_ctx_cb() - Register a call-back for invalidating
- * a pasid context. This call-back is
- * invoked when the IOMMUv2 driver needs to
- * invalidate a PASID context, for example
- * because the task that is bound to that
- * context is about to exit.
- *
- * @pdev: The PCI device the call-back should be registered for
- * @cb: The call-back function
- */
-
-typedef void (*amd_iommu_invalidate_ctx)(struct pci_dev *pdev, int pasid);
+extern void amd_iommu_detect(void);
-extern int amd_iommu_set_invalidate_ctx_cb(struct pci_dev *pdev,
- amd_iommu_invalidate_ctx cb);
#else /* CONFIG_AMD_IOMMU */
-static inline int amd_iommu_detect(void) { return -ENODEV; }
+static inline void amd_iommu_detect(void) { }
#endif /* CONFIG_AMD_IOMMU */
@@ -193,8 +30,9 @@ static inline int amd_iommu_detect(void) { return -ENODEV; }
/* IOMMU AVIC Function */
extern int amd_iommu_register_ga_log_notifier(int (*notifier)(u32));
-extern int
-amd_iommu_update_ga(int cpu, bool is_run, void *data);
+extern int amd_iommu_update_ga(void *data, int cpu, bool ga_log_intr);
+extern int amd_iommu_activate_guest_mode(void *data, int cpu, bool ga_log_intr);
+extern int amd_iommu_deactivate_guest_mode(void *data);
#else /* defined(CONFIG_AMD_IOMMU) && defined(CONFIG_IRQ_REMAP) */
@@ -204,12 +42,38 @@ amd_iommu_register_ga_log_notifier(int (*notifier)(u32))
return 0;
}
-static inline int
-amd_iommu_update_ga(int cpu, bool is_run, void *data)
+static inline int amd_iommu_update_ga(void *data, int cpu, bool ga_log_intr)
{
return 0;
}
+static inline int amd_iommu_activate_guest_mode(void *data, int cpu, bool ga_log_intr)
+{
+ return 0;
+}
+
+static inline int amd_iommu_deactivate_guest_mode(void *data)
+{
+ return 0;
+}
#endif /* defined(CONFIG_AMD_IOMMU) && defined(CONFIG_IRQ_REMAP) */
+int amd_iommu_get_num_iommus(void);
+bool amd_iommu_pc_supported(void);
+u8 amd_iommu_pc_get_max_banks(unsigned int idx);
+u8 amd_iommu_pc_get_max_counters(unsigned int idx);
+int amd_iommu_pc_set_reg(struct amd_iommu *iommu, u8 bank, u8 cntr, u8 fxn,
+ u64 *value);
+int amd_iommu_pc_get_reg(struct amd_iommu *iommu, u8 bank, u8 cntr, u8 fxn,
+ u64 *value);
+struct amd_iommu *get_amd_iommu(unsigned int idx);
+
+#ifdef CONFIG_KVM_AMD_SEV
+int amd_iommu_snp_disable(void);
+extern bool amd_iommu_sev_tio_supported(void);
+#else
+static inline int amd_iommu_snp_disable(void) { return 0; }
+static inline bool amd_iommu_sev_tio_supported(void) { return false; }
+#endif
+
#endif /* _ASM_X86_AMD_IOMMU_H */
diff --git a/include/linux/amd-pmf-io.h b/include/linux/amd-pmf-io.h
new file mode 100644
index 000000000000..6fa510f419c0
--- /dev/null
+++ b/include/linux/amd-pmf-io.h
@@ -0,0 +1,65 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * AMD Platform Management Framework Interface
+ *
+ * Copyright (c) 2023, Advanced Micro Devices, Inc.
+ * All Rights Reserved.
+ *
+ * Authors: Shyam Sundar S K <Shyam-sundar.S-k@amd.com>
+ * Basavaraj Natikar <Basavaraj.Natikar@amd.com>
+ */
+
+#ifndef AMD_PMF_IO_H
+#define AMD_PMF_IO_H
+
+#include <linux/types.h>
+
+/**
+ * enum sfh_message_type - Query the SFH message type
+ * @MT_HPD: Message ID to know the Human presence info from MP2 FW
+ * @MT_ALS: Message ID to know the Ambient light info from MP2 FW
+ * @MT_SRA: Message ID to know the SRA data from MP2 FW
+ */
+enum sfh_message_type {
+ MT_HPD,
+ MT_ALS,
+ MT_SRA,
+};
+
+/**
+ * enum sfh_hpd_info - Query the Human presence information
+ * @SFH_NOT_DETECTED: Check the HPD connection information from MP2 FW
+ * @SFH_USER_PRESENT: Check if the user is present from HPD sensor
+ * @SFH_USER_AWAY: Check if the user is away from HPD sensor
+ */
+enum sfh_hpd_info {
+ SFH_NOT_DETECTED,
+ SFH_USER_PRESENT,
+ SFH_USER_AWAY,
+};
+
+/**
+ * struct amd_sfh_info - get HPD sensor info from MP2 FW
+ * @ambient_light: Populates the ambient light information
+ * @user_present: Populates the user presence information
+ * @platform_type: Operating modes (clamshell, flat, tent, etc.)
+ * @laptop_placement: Device states (ontable, onlap, outbag)
+ */
+struct amd_sfh_info {
+ u32 ambient_light;
+ u8 user_present;
+ u32 platform_type;
+ u32 laptop_placement;
+};
+
+enum laptop_placement {
+ LP_UNKNOWN = 0,
+ ON_TABLE,
+ ON_LAP_MOTION,
+ IN_BAG,
+ OUT_OF_BAG,
+ LP_UNDEFINED,
+};
+
+int amd_get_sfh_info(struct amd_sfh_info *sfh_info, enum sfh_message_type op);
+#endif
diff --git a/include/linux/amifd.h b/include/linux/amifd.h
deleted file mode 100644
index 346993268b45..000000000000
--- a/include/linux/amifd.h
+++ /dev/null
@@ -1,62 +0,0 @@
-#ifndef _AMIFD_H
-#define _AMIFD_H
-
-/* Definitions for the Amiga floppy driver */
-
-#include <linux/fd.h>
-
-#define FD_MAX_UNITS 4 /* Max. Number of drives */
-#define FLOPPY_MAX_SECTORS 22 /* Max. Number of sectors per track */
-
-#ifndef ASSEMBLER
-
-struct fd_data_type {
- char *name; /* description of data type */
- int sects; /* sectors per track */
-#ifdef __STDC__
- int (*read_fkt)(int);
- void (*write_fkt)(int);
-#else
- int (*read_fkt)(); /* read whole track */
- void (*write_fkt)(); /* write whole track */
-#endif
-};
-
-/*
-** Floppy type descriptions
-*/
-
-struct fd_drive_type {
- unsigned long code; /* code returned from drive */
- char *name; /* description of drive */
- unsigned int tracks; /* number of tracks */
- unsigned int heads; /* number of heads */
- unsigned int read_size; /* raw read size for one track */
- unsigned int write_size; /* raw write size for one track */
- unsigned int sect_mult; /* sectors and gap multiplier (HD = 2) */
- unsigned int precomp1; /* start track for precomp 1 */
- unsigned int precomp2; /* start track for precomp 2 */
- unsigned int step_delay; /* time (in ms) for delay after step */
- unsigned int settle_time; /* time to settle after dir change */
- unsigned int side_time; /* time needed to change sides */
-};
-
-struct amiga_floppy_struct {
- struct fd_drive_type *type; /* type of floppy for this unit */
- struct fd_data_type *dtype; /* type of floppy for this unit */
- int track; /* current track (-1 == unknown) */
- unsigned char *trackbuf; /* current track (kmaloc()'d */
-
- int blocks; /* total # blocks on disk */
-
- int changed; /* true when not known */
- int disk; /* disk in drive (-1 == unknown) */
- int motor; /* true when motor is at speed */
- int busy; /* true when drive is active */
- int dirty; /* true when trackbuf is not on disk */
- int status; /* current error code for unit */
- struct gendisk *gendisk;
-};
-#endif
-
-#endif
diff --git a/include/linux/amifdreg.h b/include/linux/amifdreg.h
deleted file mode 100644
index 76188bf48d3b..000000000000
--- a/include/linux/amifdreg.h
+++ /dev/null
@@ -1,81 +0,0 @@
-#ifndef _LINUX_AMIFDREG_H
-#define _LINUX_AMIFDREG_H
-
-/*
-** CIAAPRA bits (read only)
-*/
-
-#define DSKRDY (0x1<<5) /* disk ready when low */
-#define DSKTRACK0 (0x1<<4) /* head at track zero when low */
-#define DSKPROT (0x1<<3) /* disk protected when low */
-#define DSKCHANGE (0x1<<2) /* low when disk removed */
-
-/*
-** CIAAPRB bits (read/write)
-*/
-
-#define DSKMOTOR (0x1<<7) /* motor on when low */
-#define DSKSEL3 (0x1<<6) /* select drive 3 when low */
-#define DSKSEL2 (0x1<<5) /* select drive 2 when low */
-#define DSKSEL1 (0x1<<4) /* select drive 1 when low */
-#define DSKSEL0 (0x1<<3) /* select drive 0 when low */
-#define DSKSIDE (0x1<<2) /* side selection: 0 = upper, 1 = lower */
-#define DSKDIREC (0x1<<1) /* step direction: 0=in, 1=out (to trk 0) */
-#define DSKSTEP (0x1) /* pulse low to step head 1 track */
-
-/*
-** DSKBYTR bits (read only)
-*/
-
-#define DSKBYT (1<<15) /* register contains valid byte when set */
-#define DMAON (1<<14) /* disk DMA enabled */
-#define DISKWRITE (1<<13) /* disk write bit in DSKLEN enabled */
-#define WORDEQUAL (1<<12) /* DSKSYNC register match when true */
-/* bits 7-0 are data */
-
-/*
-** ADKCON/ADKCONR bits
-*/
-
-#ifndef SETCLR
-#define ADK_SETCLR (1<<15) /* control bit */
-#endif
-#define ADK_PRECOMP1 (1<<14) /* precompensation selection */
-#define ADK_PRECOMP0 (1<<13) /* 00=none, 01=140ns, 10=280ns, 11=500ns */
-#define ADK_MFMPREC (1<<12) /* 0=GCR precomp., 1=MFM precomp. */
-#define ADK_WORDSYNC (1<<10) /* enable DSKSYNC auto DMA */
-#define ADK_MSBSYNC (1<<9) /* when 1, enable sync on MSbit (for GCR) */
-#define ADK_FAST (1<<8) /* bit cell: 0=2us (GCR), 1=1us (MFM) */
-
-/*
-** DSKLEN bits
-*/
-
-#define DSKLEN_DMAEN (1<<15)
-#define DSKLEN_WRITE (1<<14)
-
-/*
-** INTENA/INTREQ bits
-*/
-
-#define DSKINDEX (0x1<<4) /* DSKINDEX bit */
-
-/*
-** Misc
-*/
-
-#define MFM_SYNC 0x4489 /* standard MFM sync value */
-
-/* Values for FD_COMMAND */
-#define FD_RECALIBRATE 0x07 /* move to track 0 */
-#define FD_SEEK 0x0F /* seek track */
-#define FD_READ 0xE6 /* read with MT, MFM, SKip deleted */
-#define FD_WRITE 0xC5 /* write with MT, MFM */
-#define FD_SENSEI 0x08 /* Sense Interrupt Status */
-#define FD_SPECIFY 0x03 /* specify HUT etc */
-#define FD_FORMAT 0x4D /* format one track */
-#define FD_VERSION 0x10 /* get version code */
-#define FD_CONFIGURE 0x13 /* configure FIFO operation */
-#define FD_PERPENDICULAR 0x12 /* perpendicular r/w mode */
-
-#endif /* _LINUX_AMIFDREG_H */
diff --git a/include/linux/annotate.h b/include/linux/annotate.h
new file mode 100644
index 000000000000..2f1599c9e573
--- /dev/null
+++ b/include/linux/annotate.h
@@ -0,0 +1,127 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_ANNOTATE_H
+#define _LINUX_ANNOTATE_H
+
+#include <linux/objtool_types.h>
+
+#ifdef CONFIG_OBJTOOL
+
+#define __ASM_ANNOTATE(section, label, type) \
+ .pushsection section, "M", @progbits, 8; \
+ .long label - ., type; \
+ .popsection
+
+#ifndef __ASSEMBLY__
+
+#define ASM_ANNOTATE_LABEL(label, type) \
+ __stringify(__ASM_ANNOTATE(.discard.annotate_insn, label, type))
+
+#define ASM_ANNOTATE(type) \
+ "911: " \
+ __stringify(__ASM_ANNOTATE(.discard.annotate_insn, 911b, type))
+
+#define ASM_ANNOTATE_DATA(type) \
+ "912: " \
+ __stringify(__ASM_ANNOTATE(.discard.annotate_data, 912b, type))
+
+#else /* __ASSEMBLY__ */
+
+.macro ANNOTATE type
+.Lhere_\@:
+ __ASM_ANNOTATE(.discard.annotate_insn, .Lhere_\@, \type)
+.endm
+
+.macro ANNOTATE_DATA type
+.Lhere_\@:
+ __ASM_ANNOTATE(.discard.annotate_data, .Lhere_\@, \type)
+.endm
+
+#endif /* __ASSEMBLY__ */
+
+#else /* !CONFIG_OBJTOOL */
+#ifndef __ASSEMBLY__
+#define ASM_ANNOTATE_LABEL(label, type) ""
+#define ASM_ANNOTATE(type)
+#define ASM_ANNOTATE_DATA(type)
+#else /* __ASSEMBLY__ */
+.macro ANNOTATE type
+.endm
+.macro ANNOTATE_DATA type
+.endm
+#endif /* __ASSEMBLY__ */
+#endif /* !CONFIG_OBJTOOL */
+
+#ifndef __ASSEMBLY__
+
+/*
+ * Annotate away the various 'relocation to !ENDBR` complaints; knowing that
+ * these relocations will never be used for indirect calls.
+ */
+#define ANNOTATE_NOENDBR ASM_ANNOTATE(ANNOTYPE_NOENDBR)
+#define ANNOTATE_NOENDBR_SYM(sym) asm(ASM_ANNOTATE_LABEL(sym, ANNOTYPE_NOENDBR))
+
+/*
+ * This should be used immediately before an indirect jump/call. It tells
+ * objtool the subsequent indirect jump/call is vouched safe for retpoline
+ * builds.
+ */
+#define ANNOTATE_RETPOLINE_SAFE ASM_ANNOTATE(ANNOTYPE_RETPOLINE_SAFE)
+/*
+ * See linux/instrumentation.h
+ */
+#define ANNOTATE_INSTR_BEGIN(label) ASM_ANNOTATE_LABEL(label, ANNOTYPE_INSTR_BEGIN)
+#define ANNOTATE_INSTR_END(label) ASM_ANNOTATE_LABEL(label, ANNOTYPE_INSTR_END)
+/*
+ * objtool annotation to ignore the alternatives and only consider the original
+ * instruction(s).
+ */
+#define ANNOTATE_IGNORE_ALTERNATIVE ASM_ANNOTATE(ANNOTYPE_IGNORE_ALTS)
+/*
+ * This macro indicates that the following intra-function call is valid.
+ * Any non-annotated intra-function call will cause objtool to issue a warning.
+ */
+#define ANNOTATE_INTRA_FUNCTION_CALL ASM_ANNOTATE(ANNOTYPE_INTRA_FUNCTION_CALL)
+/*
+ * Use objtool to validate the entry requirement that all code paths do
+ * VALIDATE_UNRET_END before RET.
+ *
+ * NOTE: The macro must be used at the beginning of a global symbol, otherwise
+ * it will be ignored.
+ */
+#define ANNOTATE_UNRET_BEGIN ASM_ANNOTATE(ANNOTYPE_UNRET_BEGIN)
+/*
+ * This should be used to refer to an instruction that is considered
+ * terminating, like a noreturn CALL or UD2 when we know they are not -- eg
+ * WARN using UD2.
+ */
+#define ANNOTATE_REACHABLE(label) ASM_ANNOTATE_LABEL(label, ANNOTYPE_REACHABLE)
+/*
+ * This should not be used; it annotates away CFI violations. There are a few
+ * valid use cases like kexec handover to the next kernel image, and there is
+ * no security concern there.
+ *
+ * There are also a few real issues annotated away, like EFI because we can't
+ * control the EFI code.
+ */
+#define ANNOTATE_NOCFI_SYM(sym) asm(ASM_ANNOTATE_LABEL(sym, ANNOTYPE_NOCFI))
+
+/*
+ * Annotate a special section entry. This emables livepatch module generation
+ * to find and extract individual special section entries as needed.
+ */
+#define ANNOTATE_DATA_SPECIAL ASM_ANNOTATE_DATA(ANNOTYPE_DATA_SPECIAL)
+
+#else /* __ASSEMBLY__ */
+#define ANNOTATE_NOENDBR ANNOTATE type=ANNOTYPE_NOENDBR
+#define ANNOTATE_RETPOLINE_SAFE ANNOTATE type=ANNOTYPE_RETPOLINE_SAFE
+/* ANNOTATE_INSTR_BEGIN ANNOTATE type=ANNOTYPE_INSTR_BEGIN */
+/* ANNOTATE_INSTR_END ANNOTATE type=ANNOTYPE_INSTR_END */
+#define ANNOTATE_IGNORE_ALTERNATIVE ANNOTATE type=ANNOTYPE_IGNORE_ALTS
+#define ANNOTATE_INTRA_FUNCTION_CALL ANNOTATE type=ANNOTYPE_INTRA_FUNCTION_CALL
+#define ANNOTATE_UNRET_BEGIN ANNOTATE type=ANNOTYPE_UNRET_BEGIN
+#define ANNOTATE_REACHABLE ANNOTATE type=ANNOTYPE_REACHABLE
+#define ANNOTATE_NOCFI_SYM ANNOTATE type=ANNOTYPE_NOCFI
+#define ANNOTATE_DATA_SPECIAL ANNOTATE_DATA type=ANNOTYPE_DATA_SPECIAL
+#endif /* __ASSEMBLY__ */
+
+#endif /* _LINUX_ANNOTATE_H */
diff --git a/include/linux/anon_inodes.h b/include/linux/anon_inodes.h
index 8013a45242fe..edef565c2a1a 100644
--- a/include/linux/anon_inodes.h
+++ b/include/linux/anon_inodes.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* include/linux/anon_inodes.h
*
@@ -8,13 +9,27 @@
#ifndef _LINUX_ANON_INODES_H
#define _LINUX_ANON_INODES_H
+#include <linux/types.h>
+
struct file_operations;
+struct inode;
struct file *anon_inode_getfile(const char *name,
const struct file_operations *fops,
void *priv, int flags);
+struct file *anon_inode_getfile_fmode(const char *name,
+ const struct file_operations *fops,
+ void *priv, int flags, fmode_t f_mode);
+struct file *anon_inode_create_getfile(const char *name,
+ const struct file_operations *fops,
+ void *priv, int flags,
+ const struct inode *context_inode);
int anon_inode_getfd(const char *name, const struct file_operations *fops,
void *priv, int flags);
+int anon_inode_create_getfd(const char *name,
+ const struct file_operations *fops,
+ void *priv, int flags,
+ const struct inode *context_inode);
#endif /* _LINUX_ANON_INODES_H */
diff --git a/include/linux/aperture.h b/include/linux/aperture.h
new file mode 100644
index 000000000000..1a9a88b11584
--- /dev/null
+++ b/include/linux/aperture.h
@@ -0,0 +1,62 @@
+/* SPDX-License-Identifier: MIT */
+
+#ifndef _LINUX_APERTURE_H_
+#define _LINUX_APERTURE_H_
+
+#include <linux/types.h>
+
+struct pci_dev;
+struct platform_device;
+
+#if defined(CONFIG_APERTURE_HELPERS)
+int devm_aperture_acquire_for_platform_device(struct platform_device *pdev,
+ resource_size_t base,
+ resource_size_t size);
+
+int aperture_remove_conflicting_devices(resource_size_t base, resource_size_t size,
+ const char *name);
+
+int __aperture_remove_legacy_vga_devices(struct pci_dev *pdev);
+
+int aperture_remove_conflicting_pci_devices(struct pci_dev *pdev, const char *name);
+#else
+static inline int devm_aperture_acquire_for_platform_device(struct platform_device *pdev,
+ resource_size_t base,
+ resource_size_t size)
+{
+ return 0;
+}
+
+static inline int aperture_remove_conflicting_devices(resource_size_t base, resource_size_t size,
+ const char *name)
+{
+ return 0;
+}
+
+static inline int __aperture_remove_legacy_vga_devices(struct pci_dev *pdev)
+{
+ return 0;
+}
+
+static inline int aperture_remove_conflicting_pci_devices(struct pci_dev *pdev, const char *name)
+{
+ return 0;
+}
+#endif
+
+/**
+ * aperture_remove_all_conflicting_devices - remove all existing framebuffers
+ * @name: a descriptive name of the requesting driver
+ *
+ * This function removes all graphics device drivers. Use this function on systems
+ * that can have their framebuffer located anywhere in memory.
+ *
+ * Returns:
+ * 0 on success, or a negative errno code otherwise
+ */
+static inline int aperture_remove_all_conflicting_devices(const char *name)
+{
+ return aperture_remove_conflicting_devices(0, (resource_size_t)-1, name);
+}
+
+#endif
diff --git a/include/linux/apm-emulation.h b/include/linux/apm-emulation.h
index e6d800358dd6..94c036957948 100644
--- a/include/linux/apm-emulation.h
+++ b/include/linux/apm-emulation.h
@@ -1,9 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/* -*- linux-c -*-
*
* (C) 2003 zecke@handhelds.org
*
- * GPL version 2
- *
* based on arch/arm/kernel/apm.c
* factor out the information needed by architectures to provide
* apm status
diff --git a/include/linux/apm_bios.h b/include/linux/apm_bios.h
index 9c3a87184f48..7554192c3ae3 100644
--- a/include/linux/apm_bios.h
+++ b/include/linux/apm_bios.h
@@ -1,16 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* Include file for the interface to an APM BIOS
* Copyright 1994-2001 Stephen Rothwell (sfr@canb.auug.org.au)
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2, or (at your option) any
- * later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
*/
#ifndef _LINUX_APM_H
#define _LINUX_APM_H
diff --git a/include/linux/apple-gmux.h b/include/linux/apple-gmux.h
index 714186de8c36..206d97ffda79 100644
--- a/include/linux/apple-gmux.h
+++ b/include/linux/apple-gmux.h
@@ -1,36 +1,161 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* apple-gmux.h - microcontroller built into dual GPU MacBook Pro & Mac Pro
* Copyright (C) 2015 Lukas Wunner <lukas@wunner.de>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License (version 2) as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#ifndef LINUX_APPLE_GMUX_H
#define LINUX_APPLE_GMUX_H
#include <linux/acpi.h>
+#include <linux/io.h>
+#include <linux/pnp.h>
#define GMUX_ACPI_HID "APP000B"
+/*
+ * gmux port offsets. Many of these are not yet used, but may be in the
+ * future, and it's useful to have them documented here anyhow.
+ */
+#define GMUX_PORT_VERSION_MAJOR 0x04
+#define GMUX_PORT_VERSION_MINOR 0x05
+#define GMUX_PORT_VERSION_RELEASE 0x06
+#define GMUX_PORT_SWITCH_DISPLAY 0x10
+#define GMUX_PORT_SWITCH_GET_DISPLAY 0x11
+#define GMUX_PORT_INTERRUPT_ENABLE 0x14
+#define GMUX_PORT_INTERRUPT_STATUS 0x16
+#define GMUX_PORT_SWITCH_DDC 0x28
+#define GMUX_PORT_SWITCH_EXTERNAL 0x40
+#define GMUX_PORT_SWITCH_GET_EXTERNAL 0x41
+#define GMUX_PORT_DISCRETE_POWER 0x50
+#define GMUX_PORT_MAX_BRIGHTNESS 0x70
+#define GMUX_PORT_BRIGHTNESS 0x74
+#define GMUX_PORT_VALUE 0xc2
+#define GMUX_PORT_READ 0xd0
+#define GMUX_PORT_WRITE 0xd4
+
+#define GMUX_MMIO_PORT_SELECT 0x0e
+#define GMUX_MMIO_COMMAND_SEND 0x0f
+
+#define GMUX_MMIO_READ 0x00
+#define GMUX_MMIO_WRITE 0x40
+
+#define GMUX_MIN_IO_LEN (GMUX_PORT_BRIGHTNESS + 4)
+
+enum apple_gmux_type {
+ APPLE_GMUX_TYPE_PIO,
+ APPLE_GMUX_TYPE_INDEXED,
+ APPLE_GMUX_TYPE_MMIO,
+};
+
#if IS_ENABLED(CONFIG_APPLE_GMUX)
+static inline bool apple_gmux_is_indexed(unsigned long iostart)
+{
+ u16 val;
+
+ outb(0xaa, iostart + 0xcc);
+ outb(0x55, iostart + 0xcd);
+ outb(0x00, iostart + 0xce);
+
+ val = inb(iostart + 0xcc) | (inb(iostart + 0xcd) << 8);
+ if (val == 0x55aa)
+ return true;
+
+ return false;
+}
+
+static inline bool apple_gmux_is_mmio(unsigned long iostart)
+{
+ u8 __iomem *iomem_base = ioremap(iostart, 16);
+ u8 val;
+
+ if (!iomem_base)
+ return false;
+
+ /*
+ * If this is 0xff, then gmux must not be present, as the gmux would
+ * reset it to 0x00, or it would be one of 0x1, 0x4, 0x41, 0x44 if a
+ * command is currently being processed.
+ */
+ val = ioread8(iomem_base + GMUX_MMIO_COMMAND_SEND);
+ iounmap(iomem_base);
+ return (val != 0xff);
+}
/**
- * apple_gmux_present() - detect if gmux is built into the machine
+ * apple_gmux_detect() - detect if gmux is built into the machine
+ *
+ * @pnp_dev: Device to probe or NULL to use the first matching device
+ * @type_ret: Returns (by reference) the apple_gmux_type of the device
+ *
+ * Detect if a supported gmux device is present by actually probing it.
+ * This avoids the false positives returned on some models by
+ * apple_gmux_present().
+ *
+ * Return: %true if a supported gmux ACPI device is detected and the kernel
+ * was configured with CONFIG_APPLE_GMUX, %false otherwise.
+ */
+static inline bool apple_gmux_detect(struct pnp_dev *pnp_dev, enum apple_gmux_type *type_ret)
+{
+ u8 ver_major, ver_minor, ver_release;
+ struct device *dev = NULL;
+ struct acpi_device *adev;
+ struct resource *res;
+ enum apple_gmux_type type = APPLE_GMUX_TYPE_PIO;
+ bool ret = false;
+
+ if (!pnp_dev) {
+ adev = acpi_dev_get_first_match_dev(GMUX_ACPI_HID, NULL, -1);
+ if (!adev)
+ return false;
+
+ dev = get_device(acpi_get_first_physical_node(adev));
+ acpi_dev_put(adev);
+ if (!dev)
+ return false;
+
+ pnp_dev = to_pnp_dev(dev);
+ }
+
+ res = pnp_get_resource(pnp_dev, IORESOURCE_IO, 0);
+ if (res && resource_size(res) >= GMUX_MIN_IO_LEN) {
+ /*
+ * Invalid version information may indicate either that the gmux
+ * device isn't present or that it's a new one that uses indexed io.
+ */
+ ver_major = inb(res->start + GMUX_PORT_VERSION_MAJOR);
+ ver_minor = inb(res->start + GMUX_PORT_VERSION_MINOR);
+ ver_release = inb(res->start + GMUX_PORT_VERSION_RELEASE);
+ if (ver_major == 0xff && ver_minor == 0xff && ver_release == 0xff) {
+ if (apple_gmux_is_indexed(res->start))
+ type = APPLE_GMUX_TYPE_INDEXED;
+ else
+ goto out;
+ }
+ } else {
+ res = pnp_get_resource(pnp_dev, IORESOURCE_MEM, 0);
+ if (res && apple_gmux_is_mmio(res->start))
+ type = APPLE_GMUX_TYPE_MMIO;
+ else
+ goto out;
+ }
+
+ if (type_ret)
+ *type_ret = type;
+
+ ret = true;
+out:
+ put_device(dev);
+ return ret;
+}
+
+/**
+ * apple_gmux_present() - check if gmux ACPI device is present
*
* Drivers may use this to activate quirks specific to dual GPU MacBook Pros
* and Mac Pros, e.g. for deferred probing, runtime pm and backlight.
*
- * Return: %true if gmux is present and the kernel was configured
+ * Return: %true if gmux ACPI device is present and the kernel was configured
* with CONFIG_APPLE_GMUX, %false otherwise.
*/
static inline bool apple_gmux_present(void)
@@ -45,6 +170,11 @@ static inline bool apple_gmux_present(void)
return false;
}
+static inline bool apple_gmux_detect(struct pnp_dev *pnp_dev, bool *indexed_ret)
+{
+ return false;
+}
+
#endif /* !CONFIG_APPLE_GMUX */
#endif /* LINUX_APPLE_GMUX_H */
diff --git a/include/linux/apple_bl.h b/include/linux/apple_bl.h
deleted file mode 100644
index 0a95e730fcea..000000000000
--- a/include/linux/apple_bl.h
+++ /dev/null
@@ -1,26 +0,0 @@
-/*
- * apple_bl exported symbols
- */
-
-#ifndef _LINUX_APPLE_BL_H
-#define _LINUX_APPLE_BL_H
-
-#if defined(CONFIG_BACKLIGHT_APPLE) || defined(CONFIG_BACKLIGHT_APPLE_MODULE)
-
-extern int apple_bl_register(void);
-extern void apple_bl_unregister(void);
-
-#else /* !CONFIG_BACKLIGHT_APPLE */
-
-static inline int apple_bl_register(void)
-{
- return 0;
-}
-
-static inline void apple_bl_unregister(void)
-{
-}
-
-#endif /* !CONFIG_BACKLIGHT_APPLE */
-
-#endif /* _LINUX_APPLE_BL_H */
diff --git a/include/linux/arch_topology.h b/include/linux/arch_topology.h
index 716ce587247e..ebd7f8935f96 100644
--- a/include/linux/arch_topology.h
+++ b/include/linux/arch_topology.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* include/linux/arch_topology.h - arch specific cpu topology information
*/
@@ -5,15 +6,109 @@
#define _LINUX_ARCH_TOPOLOGY_H_
#include <linux/types.h>
+#include <linux/percpu.h>
void topology_normalize_cpu_scale(void);
+int topology_update_cpu_topology(void);
struct device_node;
bool topology_parse_cpu_capacity(struct device_node *cpu_node, int cpu);
-struct sched_domain;
-unsigned long topology_get_cpu_scale(struct sched_domain *sd, int cpu);
-void topology_set_cpu_scale(unsigned int cpu, unsigned long capacity);
+DECLARE_PER_CPU(unsigned long, capacity_freq_ref);
+
+static inline unsigned long topology_get_freq_ref(int cpu)
+{
+ return per_cpu(capacity_freq_ref, cpu);
+}
+
+DECLARE_PER_CPU(unsigned long, arch_freq_scale);
+
+static inline unsigned long topology_get_freq_scale(int cpu)
+{
+ return per_cpu(arch_freq_scale, cpu);
+}
+
+void topology_set_freq_scale(const struct cpumask *cpus, unsigned long cur_freq,
+ unsigned long max_freq);
+bool topology_scale_freq_invariant(void);
+
+enum scale_freq_source {
+ SCALE_FREQ_SOURCE_CPUFREQ = 0,
+ SCALE_FREQ_SOURCE_ARCH,
+ SCALE_FREQ_SOURCE_CPPC,
+ SCALE_FREQ_SOURCE_VIRT,
+};
+
+struct scale_freq_data {
+ enum scale_freq_source source;
+ void (*set_freq_scale)(void);
+};
+
+void topology_scale_freq_tick(void);
+void topology_set_scale_freq_source(struct scale_freq_data *data, const struct cpumask *cpus);
+void topology_clear_scale_freq_source(enum scale_freq_source source, const struct cpumask *cpus);
+
+DECLARE_PER_CPU(unsigned long, hw_pressure);
+
+static inline unsigned long topology_get_hw_pressure(int cpu)
+{
+ return per_cpu(hw_pressure, cpu);
+}
+
+void topology_update_hw_pressure(const struct cpumask *cpus,
+ unsigned long capped_freq);
+
+struct cpu_topology {
+ int thread_id;
+ int core_id;
+ int cluster_id;
+ int package_id;
+ cpumask_t thread_sibling;
+ cpumask_t core_sibling;
+ cpumask_t cluster_sibling;
+ cpumask_t llc_sibling;
+};
+
+#ifdef CONFIG_GENERIC_ARCH_TOPOLOGY
+extern struct cpu_topology cpu_topology[NR_CPUS];
+
+#define topology_physical_package_id(cpu) (cpu_topology[cpu].package_id)
+#define topology_cluster_id(cpu) (cpu_topology[cpu].cluster_id)
+#define topology_core_id(cpu) (cpu_topology[cpu].core_id)
+#define topology_core_cpumask(cpu) (&cpu_topology[cpu].core_sibling)
+#define topology_sibling_cpumask(cpu) (&cpu_topology[cpu].thread_sibling)
+#define topology_cluster_cpumask(cpu) (&cpu_topology[cpu].cluster_sibling)
+#define topology_llc_cpumask(cpu) (&cpu_topology[cpu].llc_sibling)
+
+#ifndef arch_cpu_is_threaded
+#define arch_cpu_is_threaded() (0)
+#endif
+
+void init_cpu_topology(void);
+void store_cpu_topology(unsigned int cpuid);
+const struct cpumask *cpu_coregroup_mask(int cpu);
+const struct cpumask *cpu_clustergroup_mask(int cpu);
+void update_siblings_masks(unsigned int cpu);
+void remove_cpu_topology(unsigned int cpuid);
+void reset_cpu_topology(void);
+int parse_acpi_topology(void);
+void freq_inv_set_max_ratio(int cpu, u64 max_rate);
+
+/*
+ * Architectures like ARM64 don't have reliable architectural way to get SMT
+ * information and depend on the firmware (ACPI/OF) report. Non-SMT core won't
+ * initialize thread_id so we can use this to detect the SMT implementation.
+ */
+static inline bool topology_core_has_smt(int cpu)
+{
+ return cpu_topology[cpu].thread_id != -1;
+}
+
+#else
+
+static inline bool topology_core_has_smt(int cpu) { return false; }
+
+#endif /* CONFIG_GENERIC_ARCH_TOPOLOGY */
#endif /* _LINUX_ARCH_TOPOLOGY_H_ */
diff --git a/include/linux/args.h b/include/linux/args.h
new file mode 100644
index 000000000000..2e8e65d975c7
--- /dev/null
+++ b/include/linux/args.h
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef _LINUX_ARGS_H
+#define _LINUX_ARGS_H
+
+/*
+ * How do these macros work?
+ *
+ * In __COUNT_ARGS() _0 to _12 are just placeholders from the start
+ * in order to make sure _n is positioned over the correct number
+ * from 12 to 0 (depending on X, which is a variadic argument list).
+ * They serve no purpose other than occupying a position. Since each
+ * macro parameter must have a distinct identifier, those identifiers
+ * are as good as any.
+ *
+ * In COUNT_ARGS() we use actual integers, so __COUNT_ARGS() returns
+ * that as _n.
+ */
+
+/* This counts to 15. Any more, it will return 16th argument. */
+#define __COUNT_ARGS(_0, _1, _2, _3, _4, _5, _6, _7, _8, _9, _10, _11, _12, _13, _14, _15, _n, X...) _n
+#define COUNT_ARGS(X...) __COUNT_ARGS(, ##X, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0)
+
+/* Concatenate two parameters, but allow them to be expanded beforehand. */
+#define __CONCAT(a, b) a ## b
+#define CONCATENATE(a, b) __CONCAT(a, b)
+
+#endif /* _LINUX_ARGS_H */
diff --git a/include/linux/arm-cci.h b/include/linux/arm-cci.h
index 521ec1f2e6bc..7f7a576267bc 100644
--- a/include/linux/arm-cci.h
+++ b/include/linux/arm-cci.h
@@ -1,21 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* CCI cache coherent interconnect support
*
* Copyright (C) 2013 ARM Ltd.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#ifndef __LINUX_ARM_CCI_H
@@ -56,6 +43,8 @@ static inline int __cci_control_port_by_index(u32 port, bool enable)
}
#endif
+void cci_enable_port_for_self(void);
+
#define cci_disable_port_by_device(dev) \
__cci_control_port_by_device(dev, false)
#define cci_enable_port_by_device(dev) \
diff --git a/include/linux/arm-smccc.h b/include/linux/arm-smccc.h
index 4c5bca38c653..50b47eba7d01 100644
--- a/include/linux/arm-smccc.h
+++ b/include/linux/arm-smccc.h
@@ -1,27 +1,29 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2015, Linaro Limited
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
*/
#ifndef __LINUX_ARM_SMCCC_H
#define __LINUX_ARM_SMCCC_H
+#include <linux/args.h>
+#include <linux/init.h>
+
+#ifndef __ASSEMBLY__
+#include <linux/uuid.h>
+#endif
+
+#include <uapi/linux/const.h>
+
/*
* This file provides common defines for ARM SMC Calling Convention as
* specified in
- * http://infocenter.arm.com/help/topic/com.arm.doc.den0028a/index.html
+ * https://developer.arm.com/docs/den0028/latest
+ *
+ * This code is up-to-date with version DEN 0028 C
*/
-#define ARM_SMCCC_STD_CALL 0
-#define ARM_SMCCC_FAST_CALL 1
+#define ARM_SMCCC_STD_CALL _AC(0,U)
+#define ARM_SMCCC_FAST_CALL _AC(1,U)
#define ARM_SMCCC_TYPE_SHIFT 31
#define ARM_SMCCC_SMC_32 0
@@ -52,18 +54,356 @@
#define ARM_SMCCC_OWNER_SIP 2
#define ARM_SMCCC_OWNER_OEM 3
#define ARM_SMCCC_OWNER_STANDARD 4
+#define ARM_SMCCC_OWNER_STANDARD_HYP 5
+#define ARM_SMCCC_OWNER_VENDOR_HYP 6
#define ARM_SMCCC_OWNER_TRUSTED_APP 48
#define ARM_SMCCC_OWNER_TRUSTED_APP_END 49
#define ARM_SMCCC_OWNER_TRUSTED_OS 50
#define ARM_SMCCC_OWNER_TRUSTED_OS_END 63
+#define ARM_SMCCC_FUNC_QUERY_CALL_UID 0xff01
+
#define ARM_SMCCC_QUIRK_NONE 0
#define ARM_SMCCC_QUIRK_QCOM_A6 1 /* Save/restore register a6 */
+#define ARM_SMCCC_VERSION_1_0 0x10000
+#define ARM_SMCCC_VERSION_1_1 0x10001
+#define ARM_SMCCC_VERSION_1_2 0x10002
+#define ARM_SMCCC_VERSION_1_3 0x10003
+
+#define ARM_SMCCC_1_3_SVE_HINT 0x10000
+#define ARM_SMCCC_CALL_HINTS ARM_SMCCC_1_3_SVE_HINT
+
+
+#define ARM_SMCCC_VERSION_FUNC_ID \
+ ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \
+ ARM_SMCCC_SMC_32, \
+ 0, 0)
+
+#define ARM_SMCCC_ARCH_FEATURES_FUNC_ID \
+ ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \
+ ARM_SMCCC_SMC_32, \
+ 0, 1)
+
+#define ARM_SMCCC_ARCH_SOC_ID \
+ ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \
+ ARM_SMCCC_SMC_32, \
+ 0, 2)
+
+#define ARM_SMCCC_ARCH_WORKAROUND_1 \
+ ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \
+ ARM_SMCCC_SMC_32, \
+ 0, 0x8000)
+
+#define ARM_SMCCC_ARCH_WORKAROUND_2 \
+ ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \
+ ARM_SMCCC_SMC_32, \
+ 0, 0x7fff)
+
+#define ARM_SMCCC_ARCH_WORKAROUND_3 \
+ ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \
+ ARM_SMCCC_SMC_32, \
+ 0, 0x3fff)
+
+#define ARM_SMCCC_VENDOR_HYP_CALL_UID_FUNC_ID \
+ ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \
+ ARM_SMCCC_SMC_32, \
+ ARM_SMCCC_OWNER_VENDOR_HYP, \
+ ARM_SMCCC_FUNC_QUERY_CALL_UID)
+
+/* KVM UID value: 28b46fb6-2ec5-11e9-a9ca-4b564d003a74 */
+#define ARM_SMCCC_VENDOR_HYP_UID_KVM UUID_INIT(\
+ 0x28b46fb6, 0x2ec5, 0x11e9, \
+ 0xa9, 0xca, 0x4b, 0x56, \
+ 0x4d, 0x00, 0x3a, 0x74)
+
+/* KVM "vendor specific" services */
+#define ARM_SMCCC_KVM_FUNC_FEATURES 0
+#define ARM_SMCCC_KVM_FUNC_PTP 1
+/* Start of pKVM hypercall range */
+#define ARM_SMCCC_KVM_FUNC_HYP_MEMINFO 2
+#define ARM_SMCCC_KVM_FUNC_MEM_SHARE 3
+#define ARM_SMCCC_KVM_FUNC_MEM_UNSHARE 4
+#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_5 5
+#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_6 6
+#define ARM_SMCCC_KVM_FUNC_MMIO_GUARD 7
+#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_8 8
+#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_9 9
+#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_10 10
+#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_11 11
+#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_12 12
+#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_13 13
+#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_14 14
+#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_15 15
+#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_16 16
+#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_17 17
+#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_18 18
+#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_19 19
+#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_20 20
+#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_21 21
+#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_22 22
+#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_23 23
+#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_24 24
+#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_25 25
+#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_26 26
+#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_27 27
+#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_28 28
+#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_29 29
+#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_30 30
+#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_31 31
+#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_32 32
+#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_33 33
+#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_34 34
+#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_35 35
+#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_36 36
+#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_37 37
+#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_38 38
+#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_39 39
+#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_40 40
+#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_41 41
+#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_42 42
+#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_43 43
+#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_44 44
+#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_45 45
+#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_46 46
+#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_47 47
+#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_48 48
+#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_49 49
+#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_50 50
+#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_51 51
+#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_52 52
+#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_53 53
+#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_54 54
+#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_55 55
+#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_56 56
+#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_57 57
+#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_58 58
+#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_59 59
+#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_60 60
+#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_61 61
+#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_62 62
+#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_63 63
+/* End of pKVM hypercall range */
+#define ARM_SMCCC_KVM_FUNC_DISCOVER_IMPL_VER 64
+#define ARM_SMCCC_KVM_FUNC_DISCOVER_IMPL_CPUS 65
+
+#define ARM_SMCCC_KVM_FUNC_FEATURES_2 127
+#define ARM_SMCCC_KVM_NUM_FUNCS 128
+
+#define ARM_SMCCC_VENDOR_HYP_KVM_FEATURES_FUNC_ID \
+ ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \
+ ARM_SMCCC_SMC_32, \
+ ARM_SMCCC_OWNER_VENDOR_HYP, \
+ ARM_SMCCC_KVM_FUNC_FEATURES)
+
+#define SMCCC_ARCH_WORKAROUND_RET_UNAFFECTED 1
+
+/*
+ * ptp_kvm is a feature used for time sync between vm and host.
+ * ptp_kvm module in guest kernel will get service from host using
+ * this hypercall ID.
+ */
+#define ARM_SMCCC_VENDOR_HYP_KVM_PTP_FUNC_ID \
+ ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \
+ ARM_SMCCC_SMC_32, \
+ ARM_SMCCC_OWNER_VENDOR_HYP, \
+ ARM_SMCCC_KVM_FUNC_PTP)
+
+#define ARM_SMCCC_VENDOR_HYP_KVM_HYP_MEMINFO_FUNC_ID \
+ ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \
+ ARM_SMCCC_SMC_64, \
+ ARM_SMCCC_OWNER_VENDOR_HYP, \
+ ARM_SMCCC_KVM_FUNC_HYP_MEMINFO)
+
+#define ARM_SMCCC_VENDOR_HYP_KVM_MEM_SHARE_FUNC_ID \
+ ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \
+ ARM_SMCCC_SMC_64, \
+ ARM_SMCCC_OWNER_VENDOR_HYP, \
+ ARM_SMCCC_KVM_FUNC_MEM_SHARE)
+
+#define ARM_SMCCC_VENDOR_HYP_KVM_MEM_UNSHARE_FUNC_ID \
+ ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \
+ ARM_SMCCC_SMC_64, \
+ ARM_SMCCC_OWNER_VENDOR_HYP, \
+ ARM_SMCCC_KVM_FUNC_MEM_UNSHARE)
+
+#define ARM_SMCCC_VENDOR_HYP_KVM_MMIO_GUARD_FUNC_ID \
+ ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \
+ ARM_SMCCC_SMC_64, \
+ ARM_SMCCC_OWNER_VENDOR_HYP, \
+ ARM_SMCCC_KVM_FUNC_MMIO_GUARD)
+
+#define ARM_SMCCC_VENDOR_HYP_KVM_DISCOVER_IMPL_VER_FUNC_ID \
+ ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \
+ ARM_SMCCC_SMC_64, \
+ ARM_SMCCC_OWNER_VENDOR_HYP, \
+ ARM_SMCCC_KVM_FUNC_DISCOVER_IMPL_VER)
+
+#define ARM_SMCCC_VENDOR_HYP_KVM_DISCOVER_IMPL_CPUS_FUNC_ID \
+ ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \
+ ARM_SMCCC_SMC_64, \
+ ARM_SMCCC_OWNER_VENDOR_HYP, \
+ ARM_SMCCC_KVM_FUNC_DISCOVER_IMPL_CPUS)
+
+/* ptp_kvm counter type ID */
+#define KVM_PTP_VIRT_COUNTER 0
+#define KVM_PTP_PHYS_COUNTER 1
+
+/* Paravirtualised time calls (defined by ARM DEN0057A) */
+#define ARM_SMCCC_HV_PV_TIME_FEATURES \
+ ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \
+ ARM_SMCCC_SMC_64, \
+ ARM_SMCCC_OWNER_STANDARD_HYP, \
+ 0x20)
+
+#define ARM_SMCCC_HV_PV_TIME_ST \
+ ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \
+ ARM_SMCCC_SMC_64, \
+ ARM_SMCCC_OWNER_STANDARD_HYP, \
+ 0x21)
+
+/* TRNG entropy source calls (defined by ARM DEN0098) */
+#define ARM_SMCCC_TRNG_VERSION \
+ ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \
+ ARM_SMCCC_SMC_32, \
+ ARM_SMCCC_OWNER_STANDARD, \
+ 0x50)
+
+#define ARM_SMCCC_TRNG_FEATURES \
+ ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \
+ ARM_SMCCC_SMC_32, \
+ ARM_SMCCC_OWNER_STANDARD, \
+ 0x51)
+
+#define ARM_SMCCC_TRNG_GET_UUID \
+ ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \
+ ARM_SMCCC_SMC_32, \
+ ARM_SMCCC_OWNER_STANDARD, \
+ 0x52)
+
+#define ARM_SMCCC_TRNG_RND32 \
+ ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \
+ ARM_SMCCC_SMC_32, \
+ ARM_SMCCC_OWNER_STANDARD, \
+ 0x53)
+
+#define ARM_SMCCC_TRNG_RND64 \
+ ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \
+ ARM_SMCCC_SMC_64, \
+ ARM_SMCCC_OWNER_STANDARD, \
+ 0x53)
+
+/*
+ * Return codes defined in ARM DEN 0070A
+ * ARM DEN 0070A is now merged/consolidated into ARM DEN 0028 C
+ */
+#define SMCCC_RET_SUCCESS 0
+#define SMCCC_RET_NOT_SUPPORTED -1
+#define SMCCC_RET_NOT_REQUIRED -2
+#define SMCCC_RET_INVALID_PARAMETER -3
+
#ifndef __ASSEMBLY__
#include <linux/linkage.h>
#include <linux/types.h>
+
+enum arm_smccc_conduit {
+ SMCCC_CONDUIT_NONE,
+ SMCCC_CONDUIT_SMC,
+ SMCCC_CONDUIT_HVC,
+};
+
+/**
+ * arm_smccc_1_1_get_conduit()
+ *
+ * Returns the conduit to be used for SMCCCv1.1 or later.
+ *
+ * When SMCCCv1.1 is not present, returns SMCCC_CONDUIT_NONE.
+ */
+enum arm_smccc_conduit arm_smccc_1_1_get_conduit(void);
+
+/**
+ * arm_smccc_get_version()
+ *
+ * Returns the version to be used for SMCCCv1.1 or later.
+ *
+ * When SMCCCv1.1 or above is not present, returns SMCCCv1.0, but this
+ * does not imply the presence of firmware or a valid conduit. Caller
+ * handling SMCCCv1.0 must determine the conduit by other means.
+ */
+u32 arm_smccc_get_version(void);
+
+void __init arm_smccc_version_init(u32 version, enum arm_smccc_conduit conduit);
+
+/**
+ * arm_smccc_get_soc_id_version()
+ *
+ * Returns the SOC ID version.
+ *
+ * When ARM_SMCCC_ARCH_SOC_ID is not present, returns SMCCC_RET_NOT_SUPPORTED.
+ */
+s32 arm_smccc_get_soc_id_version(void);
+
+/**
+ * arm_smccc_get_soc_id_revision()
+ *
+ * Returns the SOC ID revision.
+ *
+ * When ARM_SMCCC_ARCH_SOC_ID is not present, returns SMCCC_RET_NOT_SUPPORTED.
+ */
+s32 arm_smccc_get_soc_id_revision(void);
+
+#ifndef __ASSEMBLY__
+
+/*
+ * Returns whether a specific hypervisor UUID is advertised for the
+ * Vendor Specific Hypervisor Service range.
+ */
+bool arm_smccc_hypervisor_has_uuid(const uuid_t *uuid);
+
+static inline uuid_t smccc_res_to_uuid(u32 r0, u32 r1, u32 r2, u32 r3)
+{
+ uuid_t uuid = {
+ .b = {
+ [0] = (r0 >> 0) & 0xff,
+ [1] = (r0 >> 8) & 0xff,
+ [2] = (r0 >> 16) & 0xff,
+ [3] = (r0 >> 24) & 0xff,
+
+ [4] = (r1 >> 0) & 0xff,
+ [5] = (r1 >> 8) & 0xff,
+ [6] = (r1 >> 16) & 0xff,
+ [7] = (r1 >> 24) & 0xff,
+
+ [8] = (r2 >> 0) & 0xff,
+ [9] = (r2 >> 8) & 0xff,
+ [10] = (r2 >> 16) & 0xff,
+ [11] = (r2 >> 24) & 0xff,
+
+ [12] = (r3 >> 0) & 0xff,
+ [13] = (r3 >> 8) & 0xff,
+ [14] = (r3 >> 16) & 0xff,
+ [15] = (r3 >> 24) & 0xff,
+ },
+ };
+
+ return uuid;
+}
+
+static inline u32 smccc_uuid_to_reg(const uuid_t *uuid, int reg)
+{
+ u32 val = 0;
+
+ val |= (u32)(uuid->b[4 * reg + 0] << 0);
+ val |= (u32)(uuid->b[4 * reg + 1] << 8);
+ val |= (u32)(uuid->b[4 * reg + 2] << 16);
+ val |= (u32)(uuid->b[4 * reg + 3] << 24);
+
+ return val;
+}
+
+#endif /* !__ASSEMBLY__ */
+
/**
* struct arm_smccc_res - Result from SMC/HVC call
* @a0-a3 result values from registers 0 to 3
@@ -75,6 +415,61 @@ struct arm_smccc_res {
unsigned long a3;
};
+#ifdef CONFIG_ARM64
+/**
+ * struct arm_smccc_1_2_regs - Arguments for or Results from SMC/HVC call
+ * @a0-a17 argument values from registers 0 to 17
+ */
+struct arm_smccc_1_2_regs {
+ unsigned long a0;
+ unsigned long a1;
+ unsigned long a2;
+ unsigned long a3;
+ unsigned long a4;
+ unsigned long a5;
+ unsigned long a6;
+ unsigned long a7;
+ unsigned long a8;
+ unsigned long a9;
+ unsigned long a10;
+ unsigned long a11;
+ unsigned long a12;
+ unsigned long a13;
+ unsigned long a14;
+ unsigned long a15;
+ unsigned long a16;
+ unsigned long a17;
+};
+
+/**
+ * arm_smccc_1_2_hvc() - make HVC calls
+ * @args: arguments passed via struct arm_smccc_1_2_regs
+ * @res: result values via struct arm_smccc_1_2_regs
+ *
+ * This function is used to make HVC calls following SMC Calling Convention
+ * v1.2 or above. The content of the supplied param are copied from the
+ * structure to registers prior to the HVC instruction. The return values
+ * are updated with the content from registers on return from the HVC
+ * instruction.
+ */
+asmlinkage void arm_smccc_1_2_hvc(const struct arm_smccc_1_2_regs *args,
+ struct arm_smccc_1_2_regs *res);
+
+/**
+ * arm_smccc_1_2_smc() - make SMC calls
+ * @args: arguments passed via struct arm_smccc_1_2_regs
+ * @res: result values via struct arm_smccc_1_2_regs
+ *
+ * This function is used to make SMC calls following SMC Calling Convention
+ * v1.2 or above. The content of the supplied param are copied from the
+ * structure to registers prior to the SMC instruction. The return values
+ * are updated with the content from registers on return from the SMC
+ * instruction.
+ */
+asmlinkage void arm_smccc_1_2_smc(const struct arm_smccc_1_2_regs *args,
+ struct arm_smccc_1_2_regs *res);
+#endif
+
/**
* struct arm_smccc_quirk - Contains quirk information
* @id: quirk identification
@@ -100,10 +495,20 @@ struct arm_smccc_quirk {
* from register 0 to 3 on return from the SMC instruction. An optional
* quirk structure provides vendor specific behavior.
*/
+#ifdef CONFIG_HAVE_ARM_SMCCC
asmlinkage void __arm_smccc_smc(unsigned long a0, unsigned long a1,
unsigned long a2, unsigned long a3, unsigned long a4,
unsigned long a5, unsigned long a6, unsigned long a7,
struct arm_smccc_res *res, struct arm_smccc_quirk *quirk);
+#else
+static inline void __arm_smccc_smc(unsigned long a0, unsigned long a1,
+ unsigned long a2, unsigned long a3, unsigned long a4,
+ unsigned long a5, unsigned long a6, unsigned long a7,
+ struct arm_smccc_res *res, struct arm_smccc_quirk *quirk)
+{
+ *res = (struct arm_smccc_res){};
+}
+#endif
/**
* __arm_smccc_hvc() - make HVC calls
@@ -130,5 +535,220 @@ asmlinkage void __arm_smccc_hvc(unsigned long a0, unsigned long a1,
#define arm_smccc_hvc_quirk(...) __arm_smccc_hvc(__VA_ARGS__)
+/* SMCCC v1.1 implementation madness follows */
+#ifdef CONFIG_ARM64
+
+#define SMCCC_SMC_INST "smc #0"
+#define SMCCC_HVC_INST "hvc #0"
+
+#elif defined(CONFIG_ARM)
+#include <asm/opcodes-sec.h>
+#include <asm/opcodes-virt.h>
+
+#define SMCCC_SMC_INST __SMC(0)
+#define SMCCC_HVC_INST __HVC(0)
+
+#endif
+
+#define __constraint_read_2 "r" (arg0)
+#define __constraint_read_3 __constraint_read_2, "r" (arg1)
+#define __constraint_read_4 __constraint_read_3, "r" (arg2)
+#define __constraint_read_5 __constraint_read_4, "r" (arg3)
+#define __constraint_read_6 __constraint_read_5, "r" (arg4)
+#define __constraint_read_7 __constraint_read_6, "r" (arg5)
+#define __constraint_read_8 __constraint_read_7, "r" (arg6)
+#define __constraint_read_9 __constraint_read_8, "r" (arg7)
+
+#define __declare_arg_2(a0, res) \
+ struct arm_smccc_res *___res = res; \
+ register unsigned long arg0 asm("r0") = (u32)a0
+
+#define __declare_arg_3(a0, a1, res) \
+ typeof(a1) __a1 = a1; \
+ struct arm_smccc_res *___res = res; \
+ register unsigned long arg0 asm("r0") = (u32)a0; \
+ register typeof(a1) arg1 asm("r1") = __a1
+
+#define __declare_arg_4(a0, a1, a2, res) \
+ typeof(a1) __a1 = a1; \
+ typeof(a2) __a2 = a2; \
+ struct arm_smccc_res *___res = res; \
+ register unsigned long arg0 asm("r0") = (u32)a0; \
+ register typeof(a1) arg1 asm("r1") = __a1; \
+ register typeof(a2) arg2 asm("r2") = __a2
+
+#define __declare_arg_5(a0, a1, a2, a3, res) \
+ typeof(a1) __a1 = a1; \
+ typeof(a2) __a2 = a2; \
+ typeof(a3) __a3 = a3; \
+ struct arm_smccc_res *___res = res; \
+ register unsigned long arg0 asm("r0") = (u32)a0; \
+ register typeof(a1) arg1 asm("r1") = __a1; \
+ register typeof(a2) arg2 asm("r2") = __a2; \
+ register typeof(a3) arg3 asm("r3") = __a3
+
+#define __declare_arg_6(a0, a1, a2, a3, a4, res) \
+ typeof(a4) __a4 = a4; \
+ __declare_arg_5(a0, a1, a2, a3, res); \
+ register typeof(a4) arg4 asm("r4") = __a4
+
+#define __declare_arg_7(a0, a1, a2, a3, a4, a5, res) \
+ typeof(a5) __a5 = a5; \
+ __declare_arg_6(a0, a1, a2, a3, a4, res); \
+ register typeof(a5) arg5 asm("r5") = __a5
+
+#define __declare_arg_8(a0, a1, a2, a3, a4, a5, a6, res) \
+ typeof(a6) __a6 = a6; \
+ __declare_arg_7(a0, a1, a2, a3, a4, a5, res); \
+ register typeof(a6) arg6 asm("r6") = __a6
+
+#define __declare_arg_9(a0, a1, a2, a3, a4, a5, a6, a7, res) \
+ typeof(a7) __a7 = a7; \
+ __declare_arg_8(a0, a1, a2, a3, a4, a5, a6, res); \
+ register typeof(a7) arg7 asm("r7") = __a7
+
+/*
+ * We have an output list that is not necessarily used, and GCC feels
+ * entitled to optimise the whole sequence away. "volatile" is what
+ * makes it stick.
+ */
+#define __arm_smccc_1_1(inst, ...) \
+ do { \
+ register unsigned long r0 asm("r0"); \
+ register unsigned long r1 asm("r1"); \
+ register unsigned long r2 asm("r2"); \
+ register unsigned long r3 asm("r3"); \
+ CONCATENATE(__declare_arg_, \
+ COUNT_ARGS(__VA_ARGS__))(__VA_ARGS__); \
+ asm volatile(inst "\n" : \
+ "=r" (r0), "=r" (r1), "=r" (r2), "=r" (r3) \
+ : CONCATENATE(__constraint_read_, \
+ COUNT_ARGS(__VA_ARGS__)) \
+ : "memory"); \
+ if (___res) \
+ *___res = (typeof(*___res)){r0, r1, r2, r3}; \
+ } while (0)
+
+/*
+ * arm_smccc_1_1_smc() - make an SMCCC v1.1 compliant SMC call
+ *
+ * This is a variadic macro taking one to eight source arguments, and
+ * an optional return structure.
+ *
+ * @a0-a7: arguments passed in registers 0 to 7
+ * @res: result values from registers 0 to 3
+ *
+ * This macro is used to make SMC calls following SMC Calling Convention v1.1.
+ * The content of the supplied param are copied to registers 0 to 7 prior
+ * to the SMC instruction. The return values are updated with the content
+ * from register 0 to 3 on return from the SMC instruction if not NULL.
+ */
+#define arm_smccc_1_1_smc(...) __arm_smccc_1_1(SMCCC_SMC_INST, __VA_ARGS__)
+
+/*
+ * arm_smccc_1_1_hvc() - make an SMCCC v1.1 compliant HVC call
+ *
+ * This is a variadic macro taking one to eight source arguments, and
+ * an optional return structure.
+ *
+ * @a0-a7: arguments passed in registers 0 to 7
+ * @res: result values from registers 0 to 3
+ *
+ * This macro is used to make HVC calls following SMC Calling Convention v1.1.
+ * The content of the supplied param are copied to registers 0 to 7 prior
+ * to the HVC instruction. The return values are updated with the content
+ * from register 0 to 3 on return from the HVC instruction if not NULL.
+ */
+#define arm_smccc_1_1_hvc(...) __arm_smccc_1_1(SMCCC_HVC_INST, __VA_ARGS__)
+
+/*
+ * Like arm_smccc_1_1* but always returns SMCCC_RET_NOT_SUPPORTED.
+ * Used when the SMCCC conduit is not defined. The empty asm statement
+ * avoids compiler warnings about unused variables.
+ */
+#define __fail_smccc_1_1(...) \
+ do { \
+ CONCATENATE(__declare_arg_, \
+ COUNT_ARGS(__VA_ARGS__))(__VA_ARGS__); \
+ asm ("" : \
+ : CONCATENATE(__constraint_read_, \
+ COUNT_ARGS(__VA_ARGS__)) \
+ : "memory"); \
+ if (___res) \
+ ___res->a0 = SMCCC_RET_NOT_SUPPORTED; \
+ } while (0)
+
+/*
+ * arm_smccc_1_1_invoke() - make an SMCCC v1.1 compliant call
+ *
+ * This is a variadic macro taking one to eight source arguments, and
+ * an optional return structure.
+ *
+ * @a0-a7: arguments passed in registers 0 to 7
+ * @res: result values from registers 0 to 3
+ *
+ * This macro will make either an HVC call or an SMC call depending on the
+ * current SMCCC conduit. If no valid conduit is available then -1
+ * (SMCCC_RET_NOT_SUPPORTED) is returned in @res.a0 (if supplied).
+ *
+ * The return value also provides the conduit that was used.
+ */
+#define arm_smccc_1_1_invoke(...) ({ \
+ int method = arm_smccc_1_1_get_conduit(); \
+ switch (method) { \
+ case SMCCC_CONDUIT_HVC: \
+ arm_smccc_1_1_hvc(__VA_ARGS__); \
+ break; \
+ case SMCCC_CONDUIT_SMC: \
+ arm_smccc_1_1_smc(__VA_ARGS__); \
+ break; \
+ default: \
+ __fail_smccc_1_1(__VA_ARGS__); \
+ method = SMCCC_CONDUIT_NONE; \
+ break; \
+ } \
+ method; \
+ })
+
+#ifdef CONFIG_ARM64
+
+#define __fail_smccc_1_2(___res) \
+ do { \
+ if (___res) \
+ ___res->a0 = SMCCC_RET_NOT_SUPPORTED; \
+ } while (0)
+
+/*
+ * arm_smccc_1_2_invoke() - make an SMCCC v1.2 compliant call
+ *
+ * @args: SMC args are in the a0..a17 fields of the arm_smcc_1_2_regs structure
+ * @res: result values from registers 0 to 17
+ *
+ * This macro will make either an HVC call or an SMC call depending on the
+ * current SMCCC conduit. If no valid conduit is available then -1
+ * (SMCCC_RET_NOT_SUPPORTED) is returned in @res.a0 (if supplied).
+ *
+ * The return value also provides the conduit that was used.
+ */
+#define arm_smccc_1_2_invoke(args, res) ({ \
+ struct arm_smccc_1_2_regs *__args = args; \
+ struct arm_smccc_1_2_regs *__res = res; \
+ int method = arm_smccc_1_1_get_conduit(); \
+ switch (method) { \
+ case SMCCC_CONDUIT_HVC: \
+ arm_smccc_1_2_hvc(__args, __res); \
+ break; \
+ case SMCCC_CONDUIT_SMC: \
+ arm_smccc_1_2_smc(__args, __res); \
+ break; \
+ default: \
+ __fail_smccc_1_2(__res); \
+ method = SMCCC_CONDUIT_NONE; \
+ break; \
+ } \
+ method; \
+ })
+#endif /*CONFIG_ARM64*/
+
#endif /*__ASSEMBLY__*/
#endif /*__LINUX_ARM_SMCCC_H*/
diff --git a/include/linux/arm_ffa.h b/include/linux/arm_ffa.h
new file mode 100644
index 000000000000..81e603839c4a
--- /dev/null
+++ b/include/linux/arm_ffa.h
@@ -0,0 +1,515 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2021 ARM Ltd.
+ */
+
+#ifndef _LINUX_ARM_FFA_H
+#define _LINUX_ARM_FFA_H
+
+#include <linux/bitfield.h>
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/uuid.h>
+
+#define FFA_SMC(calling_convention, func_num) \
+ ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, (calling_convention), \
+ ARM_SMCCC_OWNER_STANDARD, (func_num))
+
+#define FFA_SMC_32(func_num) FFA_SMC(ARM_SMCCC_SMC_32, (func_num))
+#define FFA_SMC_64(func_num) FFA_SMC(ARM_SMCCC_SMC_64, (func_num))
+
+#define FFA_ERROR FFA_SMC_32(0x60)
+#define FFA_SUCCESS FFA_SMC_32(0x61)
+#define FFA_FN64_SUCCESS FFA_SMC_64(0x61)
+#define FFA_INTERRUPT FFA_SMC_32(0x62)
+#define FFA_VERSION FFA_SMC_32(0x63)
+#define FFA_FEATURES FFA_SMC_32(0x64)
+#define FFA_RX_RELEASE FFA_SMC_32(0x65)
+#define FFA_RXTX_MAP FFA_SMC_32(0x66)
+#define FFA_FN64_RXTX_MAP FFA_SMC_64(0x66)
+#define FFA_RXTX_UNMAP FFA_SMC_32(0x67)
+#define FFA_PARTITION_INFO_GET FFA_SMC_32(0x68)
+#define FFA_ID_GET FFA_SMC_32(0x69)
+#define FFA_MSG_POLL FFA_SMC_32(0x6A)
+#define FFA_MSG_WAIT FFA_SMC_32(0x6B)
+#define FFA_YIELD FFA_SMC_32(0x6C)
+#define FFA_RUN FFA_SMC_32(0x6D)
+#define FFA_MSG_SEND FFA_SMC_32(0x6E)
+#define FFA_MSG_SEND_DIRECT_REQ FFA_SMC_32(0x6F)
+#define FFA_FN64_MSG_SEND_DIRECT_REQ FFA_SMC_64(0x6F)
+#define FFA_MSG_SEND_DIRECT_RESP FFA_SMC_32(0x70)
+#define FFA_FN64_MSG_SEND_DIRECT_RESP FFA_SMC_64(0x70)
+#define FFA_MEM_DONATE FFA_SMC_32(0x71)
+#define FFA_FN64_MEM_DONATE FFA_SMC_64(0x71)
+#define FFA_MEM_LEND FFA_SMC_32(0x72)
+#define FFA_FN64_MEM_LEND FFA_SMC_64(0x72)
+#define FFA_MEM_SHARE FFA_SMC_32(0x73)
+#define FFA_FN64_MEM_SHARE FFA_SMC_64(0x73)
+#define FFA_MEM_RETRIEVE_REQ FFA_SMC_32(0x74)
+#define FFA_FN64_MEM_RETRIEVE_REQ FFA_SMC_64(0x74)
+#define FFA_MEM_RETRIEVE_RESP FFA_SMC_32(0x75)
+#define FFA_MEM_RELINQUISH FFA_SMC_32(0x76)
+#define FFA_MEM_RECLAIM FFA_SMC_32(0x77)
+#define FFA_MEM_OP_PAUSE FFA_SMC_32(0x78)
+#define FFA_MEM_OP_RESUME FFA_SMC_32(0x79)
+#define FFA_MEM_FRAG_RX FFA_SMC_32(0x7A)
+#define FFA_MEM_FRAG_TX FFA_SMC_32(0x7B)
+#define FFA_NORMAL_WORLD_RESUME FFA_SMC_32(0x7C)
+#define FFA_NOTIFICATION_BITMAP_CREATE FFA_SMC_32(0x7D)
+#define FFA_NOTIFICATION_BITMAP_DESTROY FFA_SMC_32(0x7E)
+#define FFA_NOTIFICATION_BIND FFA_SMC_32(0x7F)
+#define FFA_NOTIFICATION_UNBIND FFA_SMC_32(0x80)
+#define FFA_NOTIFICATION_SET FFA_SMC_32(0x81)
+#define FFA_NOTIFICATION_GET FFA_SMC_32(0x82)
+#define FFA_NOTIFICATION_INFO_GET FFA_SMC_32(0x83)
+#define FFA_FN64_NOTIFICATION_INFO_GET FFA_SMC_64(0x83)
+#define FFA_RX_ACQUIRE FFA_SMC_32(0x84)
+#define FFA_SPM_ID_GET FFA_SMC_32(0x85)
+#define FFA_MSG_SEND2 FFA_SMC_32(0x86)
+#define FFA_SECONDARY_EP_REGISTER FFA_SMC_32(0x87)
+#define FFA_FN64_SECONDARY_EP_REGISTER FFA_SMC_64(0x87)
+#define FFA_MEM_PERM_GET FFA_SMC_32(0x88)
+#define FFA_FN64_MEM_PERM_GET FFA_SMC_64(0x88)
+#define FFA_MEM_PERM_SET FFA_SMC_32(0x89)
+#define FFA_FN64_MEM_PERM_SET FFA_SMC_64(0x89)
+#define FFA_CONSOLE_LOG FFA_SMC_32(0x8A)
+#define FFA_PARTITION_INFO_GET_REGS FFA_SMC_64(0x8B)
+#define FFA_EL3_INTR_HANDLE FFA_SMC_32(0x8C)
+#define FFA_MSG_SEND_DIRECT_REQ2 FFA_SMC_64(0x8D)
+#define FFA_MSG_SEND_DIRECT_RESP2 FFA_SMC_64(0x8E)
+
+/*
+ * For some calls it is necessary to use SMC64 to pass or return 64-bit values.
+ * For such calls FFA_FN_NATIVE(name) will choose the appropriate
+ * (native-width) function ID.
+ */
+#ifdef CONFIG_64BIT
+#define FFA_FN_NATIVE(name) FFA_FN64_##name
+#else
+#define FFA_FN_NATIVE(name) FFA_##name
+#endif
+
+/* FFA error codes. */
+#define FFA_RET_SUCCESS (0)
+#define FFA_RET_NOT_SUPPORTED (-1)
+#define FFA_RET_INVALID_PARAMETERS (-2)
+#define FFA_RET_NO_MEMORY (-3)
+#define FFA_RET_BUSY (-4)
+#define FFA_RET_INTERRUPTED (-5)
+#define FFA_RET_DENIED (-6)
+#define FFA_RET_RETRY (-7)
+#define FFA_RET_ABORTED (-8)
+#define FFA_RET_NO_DATA (-9)
+
+/* FFA version encoding */
+#define FFA_MAJOR_VERSION_MASK GENMASK(30, 16)
+#define FFA_MINOR_VERSION_MASK GENMASK(15, 0)
+#define FFA_MAJOR_VERSION(x) ((u16)(FIELD_GET(FFA_MAJOR_VERSION_MASK, (x))))
+#define FFA_MINOR_VERSION(x) ((u16)(FIELD_GET(FFA_MINOR_VERSION_MASK, (x))))
+#define FFA_PACK_VERSION_INFO(major, minor) \
+ (FIELD_PREP(FFA_MAJOR_VERSION_MASK, (major)) | \
+ FIELD_PREP(FFA_MINOR_VERSION_MASK, (minor)))
+#define FFA_VERSION_1_0 FFA_PACK_VERSION_INFO(1, 0)
+#define FFA_VERSION_1_1 FFA_PACK_VERSION_INFO(1, 1)
+#define FFA_VERSION_1_2 FFA_PACK_VERSION_INFO(1, 2)
+
+/**
+ * FF-A specification mentions explicitly about '4K pages'. This should
+ * not be confused with the kernel PAGE_SIZE, which is the translation
+ * granule kernel is configured and may be one among 4K, 16K and 64K.
+ */
+#define FFA_PAGE_SIZE SZ_4K
+
+/*
+ * Minimum buffer size/alignment encodings returned by an FFA_FEATURES
+ * query for FFA_RXTX_MAP.
+ */
+#define FFA_FEAT_RXTX_MIN_SZ_4K 0
+#define FFA_FEAT_RXTX_MIN_SZ_64K 1
+#define FFA_FEAT_RXTX_MIN_SZ_16K 2
+#define FFA_FEAT_RXTX_MIN_SZ_MASK GENMASK(1, 0)
+
+/* FFA Bus/Device/Driver related */
+struct ffa_device {
+ u32 id;
+ u32 properties;
+ int vm_id;
+ bool mode_32bit;
+ uuid_t uuid;
+ struct device dev;
+ const struct ffa_ops *ops;
+};
+
+#define to_ffa_dev(d) container_of(d, struct ffa_device, dev)
+
+struct ffa_device_id {
+ uuid_t uuid;
+};
+
+struct ffa_driver {
+ const char *name;
+ int (*probe)(struct ffa_device *sdev);
+ void (*remove)(struct ffa_device *sdev);
+ const struct ffa_device_id *id_table;
+
+ struct device_driver driver;
+};
+
+#define to_ffa_driver(d) container_of_const(d, struct ffa_driver, driver)
+
+static inline void ffa_dev_set_drvdata(struct ffa_device *fdev, void *data)
+{
+ dev_set_drvdata(&fdev->dev, data);
+}
+
+static inline void *ffa_dev_get_drvdata(struct ffa_device *fdev)
+{
+ return dev_get_drvdata(&fdev->dev);
+}
+
+struct ffa_partition_info;
+
+#if IS_REACHABLE(CONFIG_ARM_FFA_TRANSPORT)
+struct ffa_device *
+ffa_device_register(const struct ffa_partition_info *part_info,
+ const struct ffa_ops *ops);
+void ffa_device_unregister(struct ffa_device *ffa_dev);
+int ffa_driver_register(struct ffa_driver *driver, struct module *owner,
+ const char *mod_name);
+void ffa_driver_unregister(struct ffa_driver *driver);
+void ffa_devices_unregister(void);
+bool ffa_device_is_valid(struct ffa_device *ffa_dev);
+
+#else
+static inline struct ffa_device *
+ffa_device_register(const struct ffa_partition_info *part_info,
+ const struct ffa_ops *ops)
+{
+ return NULL;
+}
+
+static inline void ffa_device_unregister(struct ffa_device *dev) {}
+
+static inline void ffa_devices_unregister(void) {}
+
+static inline int
+ffa_driver_register(struct ffa_driver *driver, struct module *owner,
+ const char *mod_name)
+{
+ return -EINVAL;
+}
+
+static inline void ffa_driver_unregister(struct ffa_driver *driver) {}
+
+static inline
+bool ffa_device_is_valid(struct ffa_device *ffa_dev) { return false; }
+
+#endif /* CONFIG_ARM_FFA_TRANSPORT */
+
+#define ffa_register(driver) \
+ ffa_driver_register(driver, THIS_MODULE, KBUILD_MODNAME)
+#define ffa_unregister(driver) \
+ ffa_driver_unregister(driver)
+
+/**
+ * module_ffa_driver() - Helper macro for registering a psa_ffa driver
+ * @__ffa_driver: ffa_driver structure
+ *
+ * Helper macro for psa_ffa drivers to set up proper module init / exit
+ * functions. Replaces module_init() and module_exit() and keeps people from
+ * printing pointless things to the kernel log when their driver is loaded.
+ */
+#define module_ffa_driver(__ffa_driver) \
+ module_driver(__ffa_driver, ffa_register, ffa_unregister)
+
+extern const struct bus_type ffa_bus_type;
+
+/* The FF-A 1.0 partition structure lacks the uuid[4] */
+#define FFA_1_0_PARTITON_INFO_SZ (8)
+
+/* FFA transport related */
+struct ffa_partition_info {
+ u16 id;
+ u16 exec_ctxt;
+/* partition supports receipt of direct requests */
+#define FFA_PARTITION_DIRECT_RECV BIT(0)
+/* partition can send direct requests. */
+#define FFA_PARTITION_DIRECT_SEND BIT(1)
+/* partition can send and receive indirect messages. */
+#define FFA_PARTITION_INDIRECT_MSG BIT(2)
+/* partition can receive notifications */
+#define FFA_PARTITION_NOTIFICATION_RECV BIT(3)
+/* partition runs in the AArch64 execution state. */
+#define FFA_PARTITION_AARCH64_EXEC BIT(8)
+/* partition supports receipt of direct request2 */
+#define FFA_PARTITION_DIRECT_REQ2_RECV BIT(9)
+/* partition can send direct request2. */
+#define FFA_PARTITION_DIRECT_REQ2_SEND BIT(10)
+ u32 properties;
+ uuid_t uuid;
+};
+
+static inline
+bool ffa_partition_check_property(struct ffa_device *dev, u32 property)
+{
+ return dev->properties & property;
+}
+
+#define ffa_partition_supports_notify_recv(dev) \
+ ffa_partition_check_property(dev, FFA_PARTITION_NOTIFICATION_RECV)
+
+#define ffa_partition_supports_indirect_msg(dev) \
+ ffa_partition_check_property(dev, FFA_PARTITION_INDIRECT_MSG)
+
+#define ffa_partition_supports_direct_recv(dev) \
+ ffa_partition_check_property(dev, FFA_PARTITION_DIRECT_RECV)
+
+#define ffa_partition_supports_direct_req2_recv(dev) \
+ (ffa_partition_check_property(dev, FFA_PARTITION_DIRECT_REQ2_RECV) && \
+ !dev->mode_32bit)
+
+/* For use with FFA_MSG_SEND_DIRECT_{REQ,RESP} which pass data via registers */
+struct ffa_send_direct_data {
+ unsigned long data0; /* w3/x3 */
+ unsigned long data1; /* w4/x4 */
+ unsigned long data2; /* w5/x5 */
+ unsigned long data3; /* w6/x6 */
+ unsigned long data4; /* w7/x7 */
+};
+
+struct ffa_indirect_msg_hdr {
+ u32 flags;
+ u32 res0;
+ u32 offset;
+ u32 send_recv_id;
+ u32 size;
+ u32 res1;
+ uuid_t uuid;
+};
+
+/* For use with FFA_MSG_SEND_DIRECT_{REQ,RESP}2 which pass data via registers */
+struct ffa_send_direct_data2 {
+ unsigned long data[14]; /* x4-x17 */
+};
+
+struct ffa_mem_region_addr_range {
+ /* The base IPA of the constituent memory region, aligned to 4 kiB */
+ u64 address;
+ /* The number of 4 kiB pages in the constituent memory region. */
+ u32 pg_cnt;
+ u32 reserved;
+};
+
+struct ffa_composite_mem_region {
+ /*
+ * The total number of 4 kiB pages included in this memory region. This
+ * must be equal to the sum of page counts specified in each
+ * `struct ffa_mem_region_addr_range`.
+ */
+ u32 total_pg_cnt;
+ /* The number of constituents included in this memory region range */
+ u32 addr_range_cnt;
+ u64 reserved;
+ /** An array of `addr_range_cnt` memory region constituents. */
+ struct ffa_mem_region_addr_range constituents[];
+};
+
+struct ffa_mem_region_attributes {
+ /* The ID of the VM to which the memory is being given or shared. */
+ u16 receiver;
+ /*
+ * The permissions with which the memory region should be mapped in the
+ * receiver's page table.
+ */
+#define FFA_MEM_EXEC BIT(3)
+#define FFA_MEM_NO_EXEC BIT(2)
+#define FFA_MEM_RW BIT(1)
+#define FFA_MEM_RO BIT(0)
+ u8 attrs;
+ /*
+ * Flags used during FFA_MEM_RETRIEVE_REQ and FFA_MEM_RETRIEVE_RESP
+ * for memory regions with multiple borrowers.
+ */
+#define FFA_MEM_RETRIEVE_SELF_BORROWER BIT(0)
+ u8 flag;
+ /*
+ * Offset in bytes from the start of the outer `ffa_memory_region` to
+ * an `struct ffa_mem_region_addr_range`.
+ */
+ u32 composite_off;
+ u8 impdef_val[16];
+ u64 reserved;
+};
+
+struct ffa_mem_region {
+ /* The ID of the VM/owner which originally sent the memory region */
+ u16 sender_id;
+#define FFA_MEM_NORMAL BIT(5)
+#define FFA_MEM_DEVICE BIT(4)
+
+#define FFA_MEM_WRITE_BACK (3 << 2)
+#define FFA_MEM_NON_CACHEABLE (1 << 2)
+
+#define FFA_DEV_nGnRnE (0 << 2)
+#define FFA_DEV_nGnRE (1 << 2)
+#define FFA_DEV_nGRE (2 << 2)
+#define FFA_DEV_GRE (3 << 2)
+
+#define FFA_MEM_NON_SHAREABLE (0)
+#define FFA_MEM_OUTER_SHAREABLE (2)
+#define FFA_MEM_INNER_SHAREABLE (3)
+ /* Memory region attributes, upper byte MBZ pre v1.1 */
+ u16 attributes;
+/*
+ * Clear memory region contents after unmapping it from the sender and
+ * before mapping it for any receiver.
+ */
+#define FFA_MEM_CLEAR BIT(0)
+/*
+ * Whether the hypervisor may time slice the memory sharing or retrieval
+ * operation.
+ */
+#define FFA_TIME_SLICE_ENABLE BIT(1)
+
+#define FFA_MEM_RETRIEVE_TYPE_IN_RESP (0 << 3)
+#define FFA_MEM_RETRIEVE_TYPE_SHARE (1 << 3)
+#define FFA_MEM_RETRIEVE_TYPE_LEND (2 << 3)
+#define FFA_MEM_RETRIEVE_TYPE_DONATE (3 << 3)
+
+#define FFA_MEM_RETRIEVE_ADDR_ALIGN_HINT BIT(9)
+#define FFA_MEM_RETRIEVE_ADDR_ALIGN(x) ((x) << 5)
+ /* Flags to control behaviour of the transaction. */
+ u32 flags;
+#define HANDLE_LOW_MASK GENMASK_ULL(31, 0)
+#define HANDLE_HIGH_MASK GENMASK_ULL(63, 32)
+#define HANDLE_LOW(x) ((u32)(FIELD_GET(HANDLE_LOW_MASK, (x))))
+#define HANDLE_HIGH(x) ((u32)(FIELD_GET(HANDLE_HIGH_MASK, (x))))
+
+#define PACK_HANDLE(l, h) \
+ (FIELD_PREP(HANDLE_LOW_MASK, (l)) | FIELD_PREP(HANDLE_HIGH_MASK, (h)))
+ /*
+ * A globally-unique ID assigned by the hypervisor for a region
+ * of memory being sent between VMs.
+ */
+ u64 handle;
+ /*
+ * An implementation defined value associated with the receiver and the
+ * memory region.
+ */
+ u64 tag;
+ /* Size of each endpoint memory access descriptor, MBZ pre v1.1 */
+ u32 ep_mem_size;
+ /*
+ * The number of `ffa_mem_region_attributes` entries included in this
+ * transaction.
+ */
+ u32 ep_count;
+ /*
+ * 16-byte aligned offset from the base address of this descriptor
+ * to the first element of the endpoint memory access descriptor array
+ * Valid only from v1.1
+ */
+ u32 ep_mem_offset;
+ /* MBZ, valid only from v1.1 */
+ u32 reserved[3];
+};
+
+#define CONSTITUENTS_OFFSET(x) \
+ (offsetof(struct ffa_composite_mem_region, constituents[x]))
+
+#define FFA_EMAD_HAS_IMPDEF_FIELD(version) ((version) >= FFA_VERSION_1_2)
+#define FFA_MEM_REGION_HAS_EP_MEM_OFFSET(version) ((version) > FFA_VERSION_1_0)
+
+static inline u32 ffa_emad_size_get(u32 ffa_version)
+{
+ u32 sz;
+ struct ffa_mem_region_attributes *ep_mem_access;
+
+ if (FFA_EMAD_HAS_IMPDEF_FIELD(ffa_version))
+ sz = sizeof(*ep_mem_access);
+ else
+ sz = sizeof(*ep_mem_access) - sizeof(ep_mem_access->impdef_val);
+
+ return sz;
+}
+
+static inline u32
+ffa_mem_desc_offset(struct ffa_mem_region *buf, int count, u32 ffa_version)
+{
+ u32 offset = count * ffa_emad_size_get(ffa_version);
+ /*
+ * Earlier to v1.1, the endpoint memory descriptor array started at
+ * offset 32(i.e. offset of ep_mem_offset in the current structure)
+ */
+ if (!FFA_MEM_REGION_HAS_EP_MEM_OFFSET(ffa_version))
+ offset += offsetof(struct ffa_mem_region, ep_mem_offset);
+ else
+ offset += sizeof(struct ffa_mem_region);
+
+ return offset;
+}
+
+struct ffa_mem_ops_args {
+ bool use_txbuf;
+ u32 nattrs;
+ u32 flags;
+ u64 tag;
+ u64 g_handle;
+ struct scatterlist *sg;
+ struct ffa_mem_region_attributes *attrs;
+};
+
+struct ffa_info_ops {
+ u32 (*api_version_get)(void);
+ int (*partition_info_get)(const char *uuid_str,
+ struct ffa_partition_info *buffer);
+};
+
+struct ffa_msg_ops {
+ void (*mode_32bit_set)(struct ffa_device *dev);
+ int (*sync_send_receive)(struct ffa_device *dev,
+ struct ffa_send_direct_data *data);
+ int (*indirect_send)(struct ffa_device *dev, void *buf, size_t sz);
+ int (*sync_send_receive2)(struct ffa_device *dev,
+ struct ffa_send_direct_data2 *data);
+};
+
+struct ffa_mem_ops {
+ int (*memory_reclaim)(u64 g_handle, u32 flags);
+ int (*memory_share)(struct ffa_mem_ops_args *args);
+ int (*memory_lend)(struct ffa_mem_ops_args *args);
+};
+
+struct ffa_cpu_ops {
+ int (*run)(struct ffa_device *dev, u16 vcpu);
+};
+
+typedef void (*ffa_sched_recv_cb)(u16 vcpu, bool is_per_vcpu, void *cb_data);
+typedef void (*ffa_notifier_cb)(int notify_id, void *cb_data);
+typedef void (*ffa_fwk_notifier_cb)(int notify_id, void *cb_data, void *buf);
+
+struct ffa_notifier_ops {
+ int (*sched_recv_cb_register)(struct ffa_device *dev,
+ ffa_sched_recv_cb cb, void *cb_data);
+ int (*sched_recv_cb_unregister)(struct ffa_device *dev);
+ int (*notify_request)(struct ffa_device *dev, bool per_vcpu,
+ ffa_notifier_cb cb, void *cb_data, int notify_id);
+ int (*notify_relinquish)(struct ffa_device *dev, int notify_id);
+ int (*fwk_notify_request)(struct ffa_device *dev,
+ ffa_fwk_notifier_cb cb, void *cb_data,
+ int notify_id);
+ int (*fwk_notify_relinquish)(struct ffa_device *dev, int notify_id);
+ int (*notify_send)(struct ffa_device *dev, int notify_id, bool per_vcpu,
+ u16 vcpu);
+};
+
+struct ffa_ops {
+ const struct ffa_info_ops *info_ops;
+ const struct ffa_msg_ops *msg_ops;
+ const struct ffa_mem_ops *mem_ops;
+ const struct ffa_cpu_ops *cpu_ops;
+ const struct ffa_notifier_ops *notifier_ops;
+};
+
+#endif /* _LINUX_ARM_FFA_H */
diff --git a/include/linux/arm_mpam.h b/include/linux/arm_mpam.h
new file mode 100644
index 000000000000..7f00c5285a32
--- /dev/null
+++ b/include/linux/arm_mpam.h
@@ -0,0 +1,66 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2025 Arm Ltd. */
+
+#ifndef __LINUX_ARM_MPAM_H
+#define __LINUX_ARM_MPAM_H
+
+#include <linux/acpi.h>
+#include <linux/types.h>
+
+struct mpam_msc;
+
+enum mpam_msc_iface {
+ MPAM_IFACE_MMIO, /* a real MPAM MSC */
+ MPAM_IFACE_PCC, /* a fake MPAM MSC */
+};
+
+enum mpam_class_types {
+ MPAM_CLASS_CACHE, /* Caches, e.g. L2, L3 */
+ MPAM_CLASS_MEMORY, /* Main memory */
+ MPAM_CLASS_UNKNOWN, /* Everything else, e.g. SMMU */
+};
+
+#define MPAM_CLASS_ID_DEFAULT 255
+
+#ifdef CONFIG_ACPI_MPAM
+int acpi_mpam_parse_resources(struct mpam_msc *msc,
+ struct acpi_mpam_msc_node *tbl_msc);
+
+int acpi_mpam_count_msc(void);
+#else
+static inline int acpi_mpam_parse_resources(struct mpam_msc *msc,
+ struct acpi_mpam_msc_node *tbl_msc)
+{
+ return -EINVAL;
+}
+
+static inline int acpi_mpam_count_msc(void) { return -EINVAL; }
+#endif
+
+#ifdef CONFIG_ARM64_MPAM_DRIVER
+int mpam_ris_create(struct mpam_msc *msc, u8 ris_idx,
+ enum mpam_class_types type, u8 class_id, int component_id);
+#else
+static inline int mpam_ris_create(struct mpam_msc *msc, u8 ris_idx,
+ enum mpam_class_types type, u8 class_id,
+ int component_id)
+{
+ return -EINVAL;
+}
+#endif
+
+/**
+ * mpam_register_requestor() - Register a requestor with the MPAM driver
+ * @partid_max: The maximum PARTID value the requestor can generate.
+ * @pmg_max: The maximum PMG value the requestor can generate.
+ *
+ * Registers a requestor with the MPAM driver to ensure the chosen system-wide
+ * minimum PARTID and PMG values will allow the requestors features to be used.
+ *
+ * Returns an error if the registration is too late, and a larger PARTID/PMG
+ * value has been advertised to user-space. In this case the requestor should
+ * not use its MPAM features. Returns 0 on success.
+ */
+int mpam_register_requestor(u16 partid_max, u8 pmg_max);
+
+#endif /* __LINUX_ARM_MPAM_H */
diff --git a/include/linux/arm_sdei.h b/include/linux/arm_sdei.h
new file mode 100644
index 000000000000..f652a5028b59
--- /dev/null
+++ b/include/linux/arm_sdei.h
@@ -0,0 +1,86 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2017 Arm Ltd.
+#ifndef __LINUX_ARM_SDEI_H
+#define __LINUX_ARM_SDEI_H
+
+#include <uapi/linux/arm_sdei.h>
+
+#include <acpi/ghes.h>
+
+#ifdef CONFIG_ARM_SDE_INTERFACE
+#include <asm/sdei.h>
+#endif
+
+/* Arch code should override this to set the entry point from firmware... */
+#ifndef sdei_arch_get_entry_point
+#define sdei_arch_get_entry_point(conduit) (0)
+#endif
+
+/*
+ * When an event occurs sdei_event_handler() will call a user-provided callback
+ * like this in NMI context on the CPU that received the event.
+ */
+typedef int (sdei_event_callback)(u32 event, struct pt_regs *regs, void *arg);
+
+/*
+ * Register your callback to claim an event. The event must be described
+ * by firmware.
+ */
+int sdei_event_register(u32 event_num, sdei_event_callback *cb, void *arg);
+
+/*
+ * Calls to sdei_event_unregister() may return EINPROGRESS. Keep calling
+ * it until it succeeds.
+ */
+int sdei_event_unregister(u32 event_num);
+
+int sdei_event_enable(u32 event_num);
+int sdei_event_disable(u32 event_num);
+
+/* GHES register/unregister helpers */
+int sdei_register_ghes(struct ghes *ghes, sdei_event_callback *normal_cb,
+ sdei_event_callback *critical_cb);
+int sdei_unregister_ghes(struct ghes *ghes);
+
+#ifdef CONFIG_ARM_SDE_INTERFACE
+/* For use by arch code when CPU hotplug notifiers are not appropriate. */
+int sdei_mask_local_cpu(void);
+int sdei_unmask_local_cpu(void);
+void __init acpi_sdei_init(void);
+void sdei_handler_abort(void);
+#else
+static inline int sdei_mask_local_cpu(void) { return 0; }
+static inline int sdei_unmask_local_cpu(void) { return 0; }
+static inline void acpi_sdei_init(void) { }
+static inline void sdei_handler_abort(void) { }
+#endif /* CONFIG_ARM_SDE_INTERFACE */
+
+
+/*
+ * This struct represents an event that has been registered. The driver
+ * maintains a list of all events, and which ones are registered. (Private
+ * events have one entry in the list, but are registered on each CPU).
+ * A pointer to this struct is passed to firmware, and back to the event
+ * handler. The event handler can then use this to invoke the registered
+ * callback, without having to walk the list.
+ *
+ * For CPU private events, this structure is per-cpu.
+ */
+struct sdei_registered_event {
+ /* For use by arch code: */
+ struct pt_regs interrupted_regs;
+
+ sdei_event_callback *callback;
+ void *callback_arg;
+ u32 event_num;
+ u8 priority;
+};
+
+/* The arch code entry point should then call this when an event arrives. */
+int notrace sdei_event_handler(struct pt_regs *regs,
+ struct sdei_registered_event *arg);
+
+/* arch code may use this to retrieve the extra registers. */
+int sdei_api_event_context(u32 query, u64 *result);
+
+#endif /* __LINUX_ARM_SDEI_H */
diff --git a/include/linux/armada-37xx-rwtm-mailbox.h b/include/linux/armada-37xx-rwtm-mailbox.h
new file mode 100644
index 000000000000..ef4bd705eb65
--- /dev/null
+++ b/include/linux/armada-37xx-rwtm-mailbox.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * rWTM BIU Mailbox driver for Armada 37xx
+ *
+ * Author: Marek Behún <kabel@kernel.org>
+ */
+
+#ifndef _LINUX_ARMADA_37XX_RWTM_MAILBOX_H_
+#define _LINUX_ARMADA_37XX_RWTM_MAILBOX_H_
+
+#include <linux/types.h>
+
+struct armada_37xx_rwtm_tx_msg {
+ u16 command;
+ u32 args[16];
+};
+
+struct armada_37xx_rwtm_rx_msg {
+ u32 retval;
+ u32 status[16];
+};
+
+#endif /* _LINUX_ARMADA_37XX_RWTM_MAILBOX_H_ */
diff --git a/include/linux/array_size.h b/include/linux/array_size.h
new file mode 100644
index 000000000000..06d7d83196ca
--- /dev/null
+++ b/include/linux/array_size.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_ARRAY_SIZE_H
+#define _LINUX_ARRAY_SIZE_H
+
+#include <linux/compiler.h>
+
+/**
+ * ARRAY_SIZE - get the number of elements in array @arr
+ * @arr: array to be sized
+ */
+#define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0]) + __must_be_array(arr))
+
+#endif /* _LINUX_ARRAY_SIZE_H */
diff --git a/include/linux/ascii85.h b/include/linux/ascii85.h
new file mode 100644
index 000000000000..83ad775ad0aa
--- /dev/null
+++ b/include/linux/ascii85.h
@@ -0,0 +1,39 @@
+/*
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright (c) 2008 Intel Corporation
+ * Copyright (c) 2018 The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _ASCII85_H_
+#define _ASCII85_H_
+
+#include <linux/math.h>
+#include <linux/types.h>
+
+#define ASCII85_BUFSZ 6
+
+static inline long
+ascii85_encode_len(long len)
+{
+ return DIV_ROUND_UP(len, 4);
+}
+
+static inline const char *
+ascii85_encode(u32 in, char *out)
+{
+ int i;
+
+ if (in == 0)
+ return "z";
+
+ out[5] = '\0';
+ for (i = 5; i--; ) {
+ out[i] = '!' + in % 85;
+ in /= 85;
+ }
+
+ return out;
+}
+
+#endif
diff --git a/include/linux/asn1.h b/include/linux/asn1.h
index eed6982860ba..a4d0bdd10711 100644
--- a/include/linux/asn1.h
+++ b/include/linux/asn1.h
@@ -1,12 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/* ASN.1 BER/DER/CER encoding definitions
*
* Copyright (C) 2012 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public Licence
- * as published by the Free Software Foundation; either version
- * 2 of the Licence, or (at your option) any later version.
*/
#ifndef _LINUX_ASN1_H
diff --git a/include/linux/asn1_ber_bytecode.h b/include/linux/asn1_ber_bytecode.h
index ab3a6c002f7b..b38361953a48 100644
--- a/include/linux/asn1_ber_bytecode.h
+++ b/include/linux/asn1_ber_bytecode.h
@@ -1,12 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/* ASN.1 BER/DER/CER parsing state machine internal definitions
*
* Copyright (C) 2012 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public Licence
- * as published by the Free Software Foundation; either version
- * 2 of the Licence, or (at your option) any later version.
*/
#ifndef _LINUX_ASN1_BER_BYTECODE_H
diff --git a/include/linux/asn1_decoder.h b/include/linux/asn1_decoder.h
index fa2ff5bc0483..b41bce82a191 100644
--- a/include/linux/asn1_decoder.h
+++ b/include/linux/asn1_decoder.h
@@ -1,18 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/* ASN.1 decoder
*
* Copyright (C) 2012 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public Licence
- * as published by the Free Software Foundation; either version
- * 2 of the Licence, or (at your option) any later version.
*/
#ifndef _LINUX_ASN1_DECODER_H
#define _LINUX_ASN1_DECODER_H
#include <linux/asn1.h>
+#include <linux/types.h>
struct asn1_decoder;
diff --git a/include/linux/asn1_encoder.h b/include/linux/asn1_encoder.h
new file mode 100644
index 000000000000..d17484dffb74
--- /dev/null
+++ b/include/linux/asn1_encoder.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#ifndef _LINUX_ASN1_ENCODER_H
+#define _LINUX_ASN1_ENCODER_H
+
+#include <linux/types.h>
+#include <linux/asn1.h>
+#include <linux/asn1_ber_bytecode.h>
+
+#define asn1_oid_len(oid) (sizeof(oid)/sizeof(u32))
+unsigned char *
+asn1_encode_integer(unsigned char *data, const unsigned char *end_data,
+ s64 integer);
+unsigned char *
+asn1_encode_oid(unsigned char *data, const unsigned char *end_data,
+ u32 oid[], int oid_len);
+unsigned char *
+asn1_encode_tag(unsigned char *data, const unsigned char *end_data,
+ u32 tag, const unsigned char *string, int len);
+unsigned char *
+asn1_encode_octet_string(unsigned char *data,
+ const unsigned char *end_data,
+ const unsigned char *string, u32 len);
+unsigned char *
+asn1_encode_sequence(unsigned char *data, const unsigned char *end_data,
+ const unsigned char *seq, int len);
+unsigned char *
+asn1_encode_boolean(unsigned char *data, const unsigned char *end_data,
+ bool val);
+
+#endif
diff --git a/include/linux/assoc_array.h b/include/linux/assoc_array.h
index a89df3be1686..8b3f230ce894 100644
--- a/include/linux/assoc_array.h
+++ b/include/linux/assoc_array.h
@@ -1,14 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/* Generic associative array implementation.
*
- * See Documentation/assoc_array.txt for information.
+ * See Documentation/core-api/assoc_array.rst for information.
*
* Copyright (C) 2013 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public Licence
- * as published by the Free Software Foundation; either version
- * 2 of the Licence, or (at your option) any later version.
*/
#ifndef _LINUX_ASSOC_ARRAY_H
diff --git a/include/linux/assoc_array_priv.h b/include/linux/assoc_array_priv.h
index 711275e6681c..dca733ef6750 100644
--- a/include/linux/assoc_array_priv.h
+++ b/include/linux/assoc_array_priv.h
@@ -1,14 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/* Private definitions for the generic associative array implementation.
*
- * See Documentation/assoc_array.txt for information.
+ * See Documentation/core-api/assoc_array.rst for information.
*
* Copyright (C) 2013 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public Licence
- * as published by the Free Software Foundation; either version
- * 2 of the Licence, or (at your option) any later version.
*/
#ifndef _LINUX_ASSOC_ARRAY_PRIV_H
diff --git a/include/linux/async.h b/include/linux/async.h
index 6b0226bdaadc..19b778d08600 100644
--- a/include/linux/async.h
+++ b/include/linux/async.h
@@ -1,19 +1,17 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* async.h: Asynchronous function calls for boot performance
*
* (C) Copyright 2009 Intel Corporation
* Author: Arjan van de Ven <arjan@linux.intel.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; version 2
- * of the License.
*/
#ifndef __ASYNC_H__
#define __ASYNC_H__
#include <linux/types.h>
#include <linux/list.h>
+#include <linux/numa.h>
+#include <linux/device.h>
typedef u64 async_cookie_t;
typedef void (*async_func_t) (void *data, async_cookie_t cookie);
@@ -37,14 +35,90 @@ struct async_domain {
struct async_domain _name = { .pending = LIST_HEAD_INIT(_name.pending), \
.registered = 0 }
-extern async_cookie_t async_schedule(async_func_t func, void *data);
-extern async_cookie_t async_schedule_domain(async_func_t func, void *data,
- struct async_domain *domain);
-void async_unregister_domain(struct async_domain *domain);
+async_cookie_t async_schedule_node(async_func_t func, void *data,
+ int node);
+async_cookie_t async_schedule_node_domain(async_func_t func, void *data,
+ int node,
+ struct async_domain *domain);
+
+/**
+ * async_schedule - schedule a function for asynchronous execution
+ * @func: function to execute asynchronously
+ * @data: data pointer to pass to the function
+ *
+ * Returns an async_cookie_t that may be used for checkpointing later.
+ * Note: This function may be called from atomic or non-atomic contexts.
+ */
+static inline async_cookie_t async_schedule(async_func_t func, void *data)
+{
+ return async_schedule_node(func, data, NUMA_NO_NODE);
+}
+
+/**
+ * async_schedule_domain - schedule a function for asynchronous execution within a certain domain
+ * @func: function to execute asynchronously
+ * @data: data pointer to pass to the function
+ * @domain: the domain
+ *
+ * Returns an async_cookie_t that may be used for checkpointing later.
+ * @domain may be used in the async_synchronize_*_domain() functions to
+ * wait within a certain synchronization domain rather than globally.
+ * Note: This function may be called from atomic or non-atomic contexts.
+ */
+static inline async_cookie_t
+async_schedule_domain(async_func_t func, void *data,
+ struct async_domain *domain)
+{
+ return async_schedule_node_domain(func, data, NUMA_NO_NODE, domain);
+}
+
+/**
+ * async_schedule_dev - A device specific version of async_schedule
+ * @func: function to execute asynchronously
+ * @dev: device argument to be passed to function
+ *
+ * Returns an async_cookie_t that may be used for checkpointing later.
+ * @dev is used as both the argument for the function and to provide NUMA
+ * context for where to run the function. By doing this we can try to
+ * provide for the best possible outcome by operating on the device on the
+ * CPUs closest to the device.
+ * Note: This function may be called from atomic or non-atomic contexts.
+ */
+static inline async_cookie_t
+async_schedule_dev(async_func_t func, struct device *dev)
+{
+ return async_schedule_node(func, dev, dev_to_node(dev));
+}
+
+bool async_schedule_dev_nocall(async_func_t func, struct device *dev);
+
+/**
+ * async_schedule_dev_domain - A device specific version of async_schedule_domain
+ * @func: function to execute asynchronously
+ * @dev: device argument to be passed to function
+ * @domain: the domain
+ *
+ * Returns an async_cookie_t that may be used for checkpointing later.
+ * @dev is used as both the argument for the function and to provide NUMA
+ * context for where to run the function. By doing this we can try to
+ * provide for the best possible outcome by operating on the device on the
+ * CPUs closest to the device.
+ * @domain may be used in the async_synchronize_*_domain() functions to
+ * wait within a certain synchronization domain rather than globally.
+ * Note: This function may be called from atomic or non-atomic contexts.
+ */
+static inline async_cookie_t
+async_schedule_dev_domain(async_func_t func, struct device *dev,
+ struct async_domain *domain)
+{
+ return async_schedule_node_domain(func, dev, dev_to_node(dev), domain);
+}
+
extern void async_synchronize_full(void);
extern void async_synchronize_full_domain(struct async_domain *domain);
extern void async_synchronize_cookie(async_cookie_t cookie);
extern void async_synchronize_cookie_domain(async_cookie_t cookie,
struct async_domain *domain);
extern bool current_is_async(void);
+extern void async_init(void);
#endif
diff --git a/include/linux/async_tx.h b/include/linux/async_tx.h
index 28e3cf1465ab..1ca9f9e05f4f 100644
--- a/include/linux/async_tx.h
+++ b/include/linux/async_tx.h
@@ -1,19 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright © 2006, Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
- *
*/
#ifndef _ASYNC_TX_H_
#define _ASYNC_TX_H_
@@ -49,7 +36,7 @@ struct dma_chan_ref {
/**
* async_tx_flags - modifiers for the async_* calls
* @ASYNC_TX_XOR_ZERO_DST: this flag must be used for xor operations where the
- * the destination address is not a source. The asynchronous case handles this
+ * destination address is not a source. The asynchronous case handles this
* implicitly, the synchronous case needs to zero the destination block.
* @ASYNC_TX_XOR_DROP_DST: this flag must be used if the destination address is
* also one of the source addresses. In the synchronous case the destination
@@ -176,9 +163,15 @@ async_xor(struct page *dest, struct page **src_list, unsigned int offset,
int src_cnt, size_t len, struct async_submit_ctl *submit);
struct dma_async_tx_descriptor *
-async_xor_val(struct page *dest, struct page **src_list, unsigned int offset,
- int src_cnt, size_t len, enum sum_check_flags *result,
- struct async_submit_ctl *submit);
+async_xor_offs(struct page *dest, unsigned int offset,
+ struct page **src_list, unsigned int *src_offset,
+ int src_cnt, size_t len, struct async_submit_ctl *submit);
+
+struct dma_async_tx_descriptor *
+async_xor_val_offs(struct page *dest, unsigned int offset,
+ struct page **src_list, unsigned int *src_offset,
+ int src_cnt, size_t len, enum sum_check_flags *result,
+ struct async_submit_ctl *submit);
struct dma_async_tx_descriptor *
async_memcpy(struct page *dest, struct page *src, unsigned int dest_offset,
@@ -188,21 +181,23 @@ async_memcpy(struct page *dest, struct page *src, unsigned int dest_offset,
struct dma_async_tx_descriptor *async_trigger_callback(struct async_submit_ctl *submit);
struct dma_async_tx_descriptor *
-async_gen_syndrome(struct page **blocks, unsigned int offset, int src_cnt,
+async_gen_syndrome(struct page **blocks, unsigned int *offsets, int src_cnt,
size_t len, struct async_submit_ctl *submit);
struct dma_async_tx_descriptor *
-async_syndrome_val(struct page **blocks, unsigned int offset, int src_cnt,
+async_syndrome_val(struct page **blocks, unsigned int *offsets, int src_cnt,
size_t len, enum sum_check_flags *pqres, struct page *spare,
- struct async_submit_ctl *submit);
+ unsigned int s_off, struct async_submit_ctl *submit);
struct dma_async_tx_descriptor *
async_raid6_2data_recov(int src_num, size_t bytes, int faila, int failb,
- struct page **ptrs, struct async_submit_ctl *submit);
+ struct page **ptrs, unsigned int *offs,
+ struct async_submit_ctl *submit);
struct dma_async_tx_descriptor *
async_raid6_datap_recov(int src_num, size_t bytes, int faila,
- struct page **ptrs, struct async_submit_ctl *submit);
+ struct page **ptrs, unsigned int *offs,
+ struct async_submit_ctl *submit);
void async_tx_quiesce(struct dma_async_tx_descriptor **tx);
#endif /* _ASYNC_TX_H_ */
diff --git a/include/linux/ata.h b/include/linux/ata.h
index c7a353825450..54b416e26995 100644
--- a/include/linux/ata.h
+++ b/include/linux/ata.h
@@ -1,38 +1,21 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* Copyright 2003-2004 Red Hat, Inc. All rights reserved.
* Copyright 2003-2004 Jeff Garzik
*
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; see the file COPYING. If not, write to
- * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
- *
- *
* libata documentation is available via 'make {ps|pdf}docs',
* as Documentation/driver-api/libata.rst
*
* Hardware documentation available from http://www.t13.org/
- *
*/
#ifndef __LINUX_ATA_H__
#define __LINUX_ATA_H__
-#include <linux/kernel.h>
+#include <linux/bits.h>
#include <linux/string.h>
#include <linux/types.h>
-#include <asm/byteorder.h>
/* defines only for the constants which don't work well as enums */
#define ATA_DMA_BOUNDARY 0xffffUL
@@ -46,6 +29,7 @@ enum {
ATA_MAX_SECTORS_128 = 128,
ATA_MAX_SECTORS = 256,
ATA_MAX_SECTORS_1024 = 1024,
+ ATA_MAX_SECTORS_8191 = 8191,
ATA_MAX_SECTORS_LBA48 = 65535,/* avoid count to be 0000h */
ATA_MAX_SECTORS_TAPE = 65535,
ATA_MAX_TRIM_RNUM = 64, /* 512-byte payload / (6-byte LBA + 2-byte range per entry) */
@@ -339,14 +323,21 @@ enum {
ATA_LOG_SATA_NCQ = 0x10,
ATA_LOG_NCQ_NON_DATA = 0x12,
ATA_LOG_NCQ_SEND_RECV = 0x13,
+ ATA_LOG_CDL = 0x18,
+ ATA_LOG_CDL_SIZE = ATA_SECT_SIZE,
ATA_LOG_IDENTIFY_DEVICE = 0x30,
+ ATA_LOG_SENSE_NCQ = 0x0F,
+ ATA_LOG_SENSE_NCQ_SIZE = ATA_SECT_SIZE * 2,
+ ATA_LOG_CONCURRENT_POSITIONING_RANGES = 0x47,
/* Identify device log pages: */
+ ATA_LOG_SUPPORTED_CAPABILITIES = 0x03,
+ ATA_LOG_CURRENT_SETTINGS = 0x04,
ATA_LOG_SECURITY = 0x06,
ATA_LOG_SATA_SETTINGS = 0x08,
ATA_LOG_ZONED_INFORMATION = 0x09,
- /* Identify device SATA settings log:*/
+ /* Identify device SATA settings log: */
ATA_LOG_DEVSLP_OFFSET = 0x30,
ATA_LOG_DEVSLP_SIZE = 0x08,
ATA_LOG_DEVSLP_MDAT = 0x00,
@@ -431,6 +422,8 @@ enum {
SETFEATURES_SATA_ENABLE = 0x10, /* Enable use of SATA feature */
SETFEATURES_SATA_DISABLE = 0x90, /* Disable use of SATA feature */
+ SETFEATURES_CDL = 0x0d, /* Enable/disable cmd duration limits */
+
/* SETFEATURE Sector counts for SATA features */
SATA_FPDMA_OFFSET = 0x01, /* FPDMA non-zero buffer offsets */
SATA_FPDMA_AA = 0x02, /* FPDMA Setup FIS Auto-Activate */
@@ -441,6 +434,7 @@ enum {
SATA_DEVSLP = 0x09, /* Device Sleep */
SETFEATURE_SENSE_DATA = 0xC3, /* Sense Data Reporting feature */
+ SETFEATURE_SENSE_DATA_SUCC_NCQ = 0xC4, /* Sense Data for successful NCQ commands */
/* feature values for SET_MAX */
ATA_SET_MAX_ADDR = 0x00,
@@ -448,6 +442,8 @@ enum {
ATA_SET_MAX_LOCK = 0x02,
ATA_SET_MAX_UNLOCK = 0x03,
ATA_SET_MAX_FREEZE_LOCK = 0x04,
+ ATA_SET_MAX_PASSWD_DMA = 0x05,
+ ATA_SET_MAX_UNLOCK_DMA = 0x06,
/* feature values for DEVICE CONFIGURATION OVERLAY */
ATA_DCO_RESTORE = 0xC0,
@@ -571,6 +567,7 @@ struct ata_bmdma_prd {
#define ata_id_has_ncq(id) ((id)[ATA_ID_SATA_CAPABILITY] & (1 << 8))
#define ata_id_queue_depth(id) (((id)[ATA_ID_QUEUE_DEPTH] & 0x1f) + 1)
#define ata_id_removable(id) ((id)[ATA_ID_CONFIG] & (1 << 7))
+#define ata_id_is_locked(id) (((id)[ATA_ID_DLF] & 0x7) == 0x7)
#define ata_id_has_atapi_AN(id) \
((((id)[ATA_ID_SATA_CAPABILITY] != 0x0000) && \
((id)[ATA_ID_SATA_CAPABILITY] != 0xffff)) && \
@@ -579,6 +576,18 @@ struct ata_bmdma_prd {
((((id)[ATA_ID_SATA_CAPABILITY] != 0x0000) && \
((id)[ATA_ID_SATA_CAPABILITY] != 0xffff)) && \
((id)[ATA_ID_FEATURE_SUPP] & (1 << 2)))
+#define ata_id_has_devslp(id) \
+ ((((id)[ATA_ID_SATA_CAPABILITY] != 0x0000) && \
+ ((id)[ATA_ID_SATA_CAPABILITY] != 0xffff)) && \
+ ((id)[ATA_ID_FEATURE_SUPP] & (1 << 8)))
+#define ata_id_has_ncq_autosense(id) \
+ ((((id)[ATA_ID_SATA_CAPABILITY] != 0x0000) && \
+ ((id)[ATA_ID_SATA_CAPABILITY] != 0xffff)) && \
+ ((id)[ATA_ID_FEATURE_SUPP] & (1 << 7)))
+#define ata_id_has_dipm(id) \
+ ((((id)[ATA_ID_SATA_CAPABILITY] != 0x0000) && \
+ ((id)[ATA_ID_SATA_CAPABILITY] != 0xffff)) && \
+ ((id)[ATA_ID_FEATURE_SUPP] & (1 << 3)))
#define ata_id_iordy_disable(id) ((id)[ATA_ID_CAPABILITY] & (1 << 10))
#define ata_id_has_iordy(id) ((id)[ATA_ID_CAPABILITY] & (1 << 11))
#define ata_id_u32(id,n) \
@@ -591,9 +600,6 @@ struct ata_bmdma_prd {
#define ata_id_cdb_intr(id) (((id)[ATA_ID_CONFIG] & 0x60) == 0x20)
#define ata_id_has_da(id) ((id)[ATA_ID_SATA_CAPABILITY_2] & (1 << 4))
-#define ata_id_has_devslp(id) ((id)[ATA_ID_FEATURE_SUPP] & (1 << 8))
-#define ata_id_has_ncq_autosense(id) \
- ((id)[ATA_ID_FEATURE_SUPP] & (1 << 7))
static inline bool ata_id_has_hipm(const u16 *id)
{
@@ -605,17 +611,6 @@ static inline bool ata_id_has_hipm(const u16 *id)
return val & (1 << 9);
}
-static inline bool ata_id_has_dipm(const u16 *id)
-{
- u16 val = id[ATA_ID_FEATURE_SUPP];
-
- if (val == 0 || val == 0xffff)
- return false;
-
- return val & (1 << 3);
-}
-
-
static inline bool ata_id_has_fua(const u16 *id)
{
if ((id[ATA_ID_CFSSE] & 0xC000) != 0x4000)
@@ -630,15 +625,6 @@ static inline bool ata_id_has_flush(const u16 *id)
return id[ATA_ID_COMMAND_SET_2] & (1 << 12);
}
-static inline bool ata_id_flush_enabled(const u16 *id)
-{
- if (ata_id_has_flush(id) == 0)
- return false;
- if ((id[ATA_ID_CSF_DEFAULT] & 0xC000) != 0x4000)
- return false;
- return id[ATA_ID_CFS_ENABLE_2] & (1 << 12);
-}
-
static inline bool ata_id_has_flush_ext(const u16 *id)
{
if ((id[ATA_ID_COMMAND_SET_2] & 0xC000) != 0x4000)
@@ -646,19 +632,6 @@ static inline bool ata_id_has_flush_ext(const u16 *id)
return id[ATA_ID_COMMAND_SET_2] & (1 << 13);
}
-static inline bool ata_id_flush_ext_enabled(const u16 *id)
-{
- if (ata_id_has_flush_ext(id) == 0)
- return false;
- if ((id[ATA_ID_CSF_DEFAULT] & 0xC000) != 0x4000)
- return false;
- /*
- * some Maxtor disks have bit 13 defined incorrectly
- * so check bit 10 too
- */
- return (id[ATA_ID_CFS_ENABLE_2] & 0x2400) == 0x2400;
-}
-
static inline u32 ata_id_logical_sector_size(const u16 *id)
{
/* T13/1699-D Revision 6a, Sep 6, 2008. Page 128.
@@ -713,15 +686,6 @@ static inline bool ata_id_has_lba48(const u16 *id)
return id[ATA_ID_COMMAND_SET_2] & (1 << 10);
}
-static inline bool ata_id_lba48_enabled(const u16 *id)
-{
- if (ata_id_has_lba48(id) == 0)
- return false;
- if ((id[ATA_ID_CSF_DEFAULT] & 0xC000) != 0x4000)
- return false;
- return id[ATA_ID_CFS_ENABLE_2] & (1 << 10);
-}
-
static inline bool ata_id_hpa_enabled(const u16 *id)
{
/* Yes children, word 83 valid bits cover word 82 data */
@@ -784,16 +748,21 @@ static inline bool ata_id_has_read_log_dma_ext(const u16 *id)
static inline bool ata_id_has_sense_reporting(const u16 *id)
{
- if (!(id[ATA_ID_CFS_ENABLE_2] & (1 << 15)))
+ if (!(id[ATA_ID_CFS_ENABLE_2] & BIT(15)))
+ return false;
+ if ((id[ATA_ID_COMMAND_SET_3] & (BIT(15) | BIT(14))) != BIT(14))
return false;
- return id[ATA_ID_COMMAND_SET_3] & (1 << 6);
+ return id[ATA_ID_COMMAND_SET_3] & BIT(6);
}
static inline bool ata_id_sense_reporting_enabled(const u16 *id)
{
- if (!(id[ATA_ID_CFS_ENABLE_2] & (1 << 15)))
+ if (!ata_id_has_sense_reporting(id))
+ return false;
+ /* ata_id_has_sense_reporting() == true, word 86 must have bit 15 set */
+ if ((id[ATA_ID_COMMAND_SET_4] & (BIT(15) | BIT(14))) != BIT(14))
return false;
- return id[ATA_ID_COMMAND_SET_4] & (1 << 6);
+ return id[ATA_ID_COMMAND_SET_4] & BIT(6);
}
/**
@@ -1058,76 +1027,6 @@ static inline bool atapi_id_dmadir(const u16 *dev_id)
return ata_id_major_version(dev_id) >= 7 && (dev_id[62] & 0x8000);
}
-/*
- * ata_id_is_lba_capacity_ok() performs a sanity check on
- * the claimed LBA capacity value for the device.
- *
- * Returns 1 if LBA capacity looks sensible, 0 otherwise.
- *
- * It is called only once for each device.
- */
-static inline bool ata_id_is_lba_capacity_ok(u16 *id)
-{
- unsigned long lba_sects, chs_sects, head, tail;
-
- /* No non-LBA info .. so valid! */
- if (id[ATA_ID_CYLS] == 0)
- return true;
-
- lba_sects = ata_id_u32(id, ATA_ID_LBA_CAPACITY);
-
- /*
- * The ATA spec tells large drives to return
- * C/H/S = 16383/16/63 independent of their size.
- * Some drives can be jumpered to use 15 heads instead of 16.
- * Some drives can be jumpered to use 4092 cyls instead of 16383.
- */
- if ((id[ATA_ID_CYLS] == 16383 ||
- (id[ATA_ID_CYLS] == 4092 && id[ATA_ID_CUR_CYLS] == 16383)) &&
- id[ATA_ID_SECTORS] == 63 &&
- (id[ATA_ID_HEADS] == 15 || id[ATA_ID_HEADS] == 16) &&
- (lba_sects >= 16383 * 63 * id[ATA_ID_HEADS]))
- return true;
-
- chs_sects = id[ATA_ID_CYLS] * id[ATA_ID_HEADS] * id[ATA_ID_SECTORS];
-
- /* perform a rough sanity check on lba_sects: within 10% is OK */
- if (lba_sects - chs_sects < chs_sects/10)
- return true;
-
- /* some drives have the word order reversed */
- head = (lba_sects >> 16) & 0xffff;
- tail = lba_sects & 0xffff;
- lba_sects = head | (tail << 16);
-
- if (lba_sects - chs_sects < chs_sects/10) {
- *(__le32 *)&id[ATA_ID_LBA_CAPACITY] = __cpu_to_le32(lba_sects);
- return true; /* LBA capacity is (now) good */
- }
-
- return false; /* LBA capacity value may be bad */
-}
-
-static inline void ata_id_to_hd_driveid(u16 *id)
-{
-#ifdef __BIG_ENDIAN
- /* accessed in struct hd_driveid as 8-bit values */
- id[ATA_ID_MAX_MULTSECT] = __cpu_to_le16(id[ATA_ID_MAX_MULTSECT]);
- id[ATA_ID_CAPABILITY] = __cpu_to_le16(id[ATA_ID_CAPABILITY]);
- id[ATA_ID_OLD_PIO_MODES] = __cpu_to_le16(id[ATA_ID_OLD_PIO_MODES]);
- id[ATA_ID_OLD_DMA_MODES] = __cpu_to_le16(id[ATA_ID_OLD_DMA_MODES]);
- id[ATA_ID_MULTSECT] = __cpu_to_le16(id[ATA_ID_MULTSECT]);
-
- /* as 32-bit values */
- *(u32 *)&id[ATA_ID_LBA_CAPACITY] = ata_id_u32(id, ATA_ID_LBA_CAPACITY);
- *(u32 *)&id[ATA_ID_SPG] = ata_id_u32(id, ATA_ID_SPG);
-
- /* as 64-bit value */
- *(u64 *)&id[ATA_ID_LBA_CAPACITY_2] =
- ata_id_u64(id, ATA_ID_LBA_CAPACITY_2);
-#endif
-}
-
static inline bool ata_ok(u8 status)
{
return ((status & (ATA_BUSY | ATA_DRDY | ATA_DF | ATA_DRQ | ATA_ERR))
diff --git a/include/linux/ata_platform.h b/include/linux/ata_platform.h
index 619d9e78e644..b9745cc08e38 100644
--- a/include/linux/ata_platform.h
+++ b/include/linux/ata_platform.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __LINUX_ATA_PLATFORM_H
#define __LINUX_ATA_PLATFORM_H
@@ -18,7 +19,8 @@ extern int __pata_platform_probe(struct device *dev,
struct resource *irq_res,
unsigned int ioport_shift,
int __pio_mask,
- struct scsi_host_template *sht);
+ const struct scsi_host_template *sht,
+ bool use16bit);
/*
* Marvell SATA private data
diff --git a/include/linux/atalk.h b/include/linux/atalk.h
index 73fd8b7e9534..a55bfc6567d0 100644
--- a/include/linux/atalk.h
+++ b/include/linux/atalk.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __LINUX_ATALK_H__
#define __LINUX_ATALK_H__
@@ -107,15 +108,17 @@ static __inline__ struct elapaarp *aarp_hdr(struct sk_buff *skb)
#define AARP_RESOLVE_TIME (10 * HZ)
extern struct datalink_proto *ddp_dl, *aarp_dl;
-extern void aarp_proto_init(void);
+extern int aarp_proto_init(void);
/* Inter module exports */
/* Give a device find its atif control structure */
+#if IS_ENABLED(CONFIG_ATALK)
static inline struct atalk_iface *atalk_find_dev(struct net_device *dev)
{
return dev->atalk_ptr;
}
+#endif
extern struct atalk_addr *atalk_find_dev_addr(struct net_device *dev);
extern struct net_device *atrtr_get_dev(struct atalk_addr *sa);
@@ -142,7 +145,12 @@ extern rwlock_t atalk_interfaces_lock;
extern struct atalk_route atrtr_default;
-extern const struct file_operations atalk_seq_arp_fops;
+struct aarp_iter_state {
+ int bucket;
+ struct aarp_entry **table;
+};
+
+extern const struct seq_operations aarp_seq_ops;
extern int sysctl_aarp_expiry_time;
extern int sysctl_aarp_tick_time;
@@ -150,19 +158,29 @@ extern int sysctl_aarp_retransmit_limit;
extern int sysctl_aarp_resolve_time;
#ifdef CONFIG_SYSCTL
-extern void atalk_register_sysctl(void);
+extern int atalk_register_sysctl(void);
extern void atalk_unregister_sysctl(void);
#else
-#define atalk_register_sysctl() do { } while(0)
-#define atalk_unregister_sysctl() do { } while(0)
+static inline int atalk_register_sysctl(void)
+{
+ return 0;
+}
+static inline void atalk_unregister_sysctl(void)
+{
+}
#endif
#ifdef CONFIG_PROC_FS
extern int atalk_proc_init(void);
extern void atalk_proc_exit(void);
#else
-#define atalk_proc_init() ({ 0; })
-#define atalk_proc_exit() do { } while(0)
+static inline int atalk_proc_init(void)
+{
+ return 0;
+}
+static inline void atalk_proc_exit(void)
+{
+}
#endif /* CONFIG_PROC_FS */
#endif /* __LINUX_ATALK_H__ */
diff --git a/include/linux/ath9k_platform.h b/include/linux/ath9k_platform.h
deleted file mode 100644
index 76860a461ed2..000000000000
--- a/include/linux/ath9k_platform.h
+++ /dev/null
@@ -1,51 +0,0 @@
-/*
- * Copyright (c) 2008 Atheros Communications Inc.
- * Copyright (c) 2009 Gabor Juhos <juhosg@openwrt.org>
- * Copyright (c) 2009 Imre Kaloz <kaloz@openwrt.org>
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- */
-
-#ifndef _LINUX_ATH9K_PLATFORM_H
-#define _LINUX_ATH9K_PLATFORM_H
-
-#define ATH9K_PLAT_EEP_MAX_WORDS 2048
-
-struct ath9k_platform_data {
- const char *eeprom_name;
-
- u16 eeprom_data[ATH9K_PLAT_EEP_MAX_WORDS];
- u8 *macaddr;
-
- int led_pin;
- u32 gpio_mask;
- u32 gpio_val;
-
- u32 bt_active_pin;
- u32 bt_priority_pin;
- u32 wlan_active_pin;
-
- bool endian_check;
- bool is_clk_25mhz;
- bool tx_gain_buffalo;
- bool disable_2ghz;
- bool disable_5ghz;
- bool led_active_high;
-
- int (*get_mac_revision)(void);
- int (*external_reset)(void);
-
- bool use_eeprom;
-};
-
-#endif /* _LINUX_ATH9K_PLATFORM_H */
diff --git a/include/linux/atm.h b/include/linux/atm.h
index 30006c435951..4b50fd0a6eab 100644
--- a/include/linux/atm.h
+++ b/include/linux/atm.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/* atm.h - general ATM declarations */
#ifndef _LINUX_ATM_H
#define _LINUX_ATM_H
diff --git a/include/linux/atm_suni.h b/include/linux/atm_suni.h
deleted file mode 100644
index 84f3aab54468..000000000000
--- a/include/linux/atm_suni.h
+++ /dev/null
@@ -1,12 +0,0 @@
-/* atm_suni.h - Driver-specific declarations of the SUNI driver (for use by
- driver-specific utilities) */
-
-/* Written 1998,2000 by Werner Almesberger, EPFL ICA */
-
-
-#ifndef LINUX_ATM_SUNI_H
-#define LINUX_ATM_SUNI_H
-
-/* everything obsoleted */
-
-#endif
diff --git a/include/linux/atm_tcp.h b/include/linux/atm_tcp.h
index db6b65fc0aec..2558439d849b 100644
--- a/include/linux/atm_tcp.h
+++ b/include/linux/atm_tcp.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/* atm_tcp.h - Driver-specific declarations of the ATMTCP driver (for use by
driver-specific utilities) */
@@ -8,6 +9,8 @@
#include <uapi/linux/atm_tcp.h>
+struct atm_vcc;
+struct module;
struct atm_tcp_ops {
int (*attach)(struct atm_vcc *vcc,int itf);
diff --git a/include/linux/atmdev.h b/include/linux/atmdev.h
index 0ec9bdb1cc9f..70807c679f1a 100644
--- a/include/linux/atmdev.h
+++ b/include/linux/atmdev.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/* atmdev.h - ATM device driver declarations and various related items */
#ifndef LINUX_ATMDEV_H
#define LINUX_ATMDEV_H
@@ -150,7 +151,7 @@ struct atm_dev {
const char *type; /* device type name */
int number; /* device index */
void *dev_data; /* per-device data */
- void *phy_data; /* private PHY date */
+ void *phy_data; /* private PHY data */
unsigned long flags; /* device flags (ATM_DF_*) */
struct list_head local; /* local ATM addresses */
struct list_head lecs; /* LECS ATM addresses learned via ILMI */
@@ -175,11 +176,6 @@ struct atm_dev {
#define ATM_OF_IMMED 1 /* Attempt immediate delivery */
#define ATM_OF_INRATE 2 /* Attempt in-rate delivery */
-
-/*
- * ioctl, getsockopt, and setsockopt are optional and can be set to NULL.
- */
-
struct atmdev_ops { /* only send is required */
void (*dev_close)(struct atm_dev *dev);
int (*open)(struct atm_vcc *vcc);
@@ -189,11 +185,9 @@ struct atmdev_ops { /* only send is required */
int (*compat_ioctl)(struct atm_dev *dev,unsigned int cmd,
void __user *arg);
#endif
- int (*getsockopt)(struct atm_vcc *vcc,int level,int optname,
- void __user *optval,int optlen);
- int (*setsockopt)(struct atm_vcc *vcc,int level,int optname,
- void __user *optval,unsigned int optlen);
+ int (*pre_send)(struct atm_vcc *vcc, struct sk_buff *skb);
int (*send)(struct atm_vcc *vcc,struct sk_buff *skb);
+ int (*send_bh)(struct atm_vcc *vcc, struct sk_buff *skb);
int (*send_oam)(struct atm_vcc *vcc,void *cell,int flags);
void (*phy_put)(struct atm_dev *dev,unsigned char value,
unsigned long addr);
@@ -213,7 +207,8 @@ struct atmphy_ops {
struct atm_skb_data {
struct atm_vcc *vcc; /* ATM VCC */
unsigned long atm_options; /* ATM layer options */
-};
+ unsigned int acct_truesize; /* truesize accounted to vcc */
+} __packed;
#define VCC_HTABLE_SIZE 32
@@ -240,6 +235,26 @@ void vcc_insert_socket(struct sock *sk);
void atm_dev_release_vccs(struct atm_dev *dev);
+static inline void atm_account_tx(struct atm_vcc *vcc, struct sk_buff *skb)
+{
+ /*
+ * Because ATM skbs may not belong to a sock (and we don't
+ * necessarily want to), skb->truesize may be adjusted,
+ * escaping the hack in pskb_expand_head() which avoids
+ * doing so for some cases. So stash the value of truesize
+ * at the time we accounted it, and atm_pop_raw() can use
+ * that value later, in case it changes.
+ */
+ refcount_add(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc);
+ ATM_SKB(skb)->acct_truesize = skb->truesize;
+ ATM_SKB(skb)->atm_options = vcc->atm_options;
+}
+
+static inline void atm_return_tx(struct atm_vcc *vcc, struct sk_buff *skb)
+{
+ WARN_ON_ONCE(refcount_sub_and_test(ATM_SKB(skb)->acct_truesize,
+ &sk_atm(vcc)->sk_wmem_alloc));
+}
static inline void atm_force_charge(struct atm_vcc *vcc,int truesize)
{
diff --git a/include/linux/atmel-isc-media.h b/include/linux/atmel-isc-media.h
new file mode 100644
index 000000000000..79a320fb724e
--- /dev/null
+++ b/include/linux/atmel-isc-media.h
@@ -0,0 +1,58 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2019 Microchip Technology Inc. and its subsidiaries
+ *
+ * Author: Eugen Hristev <eugen.hristev@microchip.com>
+ */
+
+#ifndef __LINUX_ATMEL_ISC_MEDIA_H__
+#define __LINUX_ATMEL_ISC_MEDIA_H__
+
+/*
+ * There are 8 controls available:
+ * 4 gain controls, sliders, for each of the BAYER components: R, B, GR, GB.
+ * These gains are multipliers for each component, in format unsigned 0:4:9 with
+ * a default value of 512 (1.0 multiplier).
+ * 4 offset controls, sliders, for each of the BAYER components: R, B, GR, GB.
+ * These offsets are added/substracted from each component, in format signed
+ * 1:12:0 with a default value of 0 (+/- 0)
+ *
+ * To expose this to userspace, added 8 custom controls, in an auto cluster.
+ *
+ * To summarize the functionality:
+ * The auto cluster switch is the auto white balance control, and it works
+ * like this:
+ * AWB == 1: autowhitebalance is on, the do_white_balance button is inactive,
+ * the gains/offsets are inactive, but volatile and readable.
+ * Thus, the results of the whitebalance algorithm are available to userspace to
+ * read at any time.
+ * AWB == 0: autowhitebalance is off, cluster is in manual mode, user can
+ * configure the gain/offsets directly.
+ * More than that, if the do_white_balance button is
+ * pressed, the driver will perform one-time-adjustment, (preferably with color
+ * checker card) and the userspace can read again the new values.
+ *
+ * With this feature, the userspace can save the coefficients and reinstall them
+ * for example after reboot or reprobing the driver.
+ */
+
+enum atmel_isc_ctrl_id {
+ /* Red component gain control */
+ ISC_CID_R_GAIN = (V4L2_CID_USER_ATMEL_ISC_BASE + 0),
+ /* Blue component gain control */
+ ISC_CID_B_GAIN,
+ /* Green Red component gain control */
+ ISC_CID_GR_GAIN,
+ /* Green Blue gain control */
+ ISC_CID_GB_GAIN,
+ /* Red component offset control */
+ ISC_CID_R_OFFSET,
+ /* Blue component offset control */
+ ISC_CID_B_OFFSET,
+ /* Green Red component offset control */
+ ISC_CID_GR_OFFSET,
+ /* Green Blue component offset control */
+ ISC_CID_GB_OFFSET,
+};
+
+#endif
diff --git a/include/linux/atmel-mci.h b/include/linux/atmel-mci.h
deleted file mode 100644
index 42a9e1884842..000000000000
--- a/include/linux/atmel-mci.h
+++ /dev/null
@@ -1,45 +0,0 @@
-#ifndef __LINUX_ATMEL_MCI_H
-#define __LINUX_ATMEL_MCI_H
-
-#include <linux/types.h>
-#include <linux/dmaengine.h>
-
-#define ATMCI_MAX_NR_SLOTS 2
-
-/**
- * struct mci_slot_pdata - board-specific per-slot configuration
- * @bus_width: Number of data lines wired up the slot
- * @detect_pin: GPIO pin wired to the card detect switch
- * @wp_pin: GPIO pin wired to the write protect sensor
- * @detect_is_active_high: The state of the detect pin when it is active
- * @non_removable: The slot is not removable, only detect once
- *
- * If a given slot is not present on the board, @bus_width should be
- * set to 0. The other fields are ignored in this case.
- *
- * Any pins that aren't available should be set to a negative value.
- *
- * Note that support for multiple slots is experimental -- some cards
- * might get upset if we don't get the clock management exactly right.
- * But in most cases, it should work just fine.
- */
-struct mci_slot_pdata {
- unsigned int bus_width;
- int detect_pin;
- int wp_pin;
- bool detect_is_active_high;
- bool non_removable;
-};
-
-/**
- * struct mci_platform_data - board-specific MMC/SDcard configuration
- * @dma_slave: DMA slave interface to use in data transfers.
- * @slot: Per-slot configuration data.
- */
-struct mci_platform_data {
- void *dma_slave;
- dma_filter_fn dma_filter;
- struct mci_slot_pdata slot[ATMCI_MAX_NR_SLOTS];
-};
-
-#endif /* __LINUX_ATMEL_MCI_H */
diff --git a/include/linux/atmel-ssc.h b/include/linux/atmel-ssc.h
index fdb545101ede..6091d2abc1eb 100644
--- a/include/linux/atmel-ssc.h
+++ b/include/linux/atmel-ssc.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __INCLUDE_ATMEL_SSC_H
#define __INCLUDE_ATMEL_SSC_H
diff --git a/include/linux/atmel_pdc.h b/include/linux/atmel_pdc.h
index 63499ce806ea..00a766b5ee96 100644
--- a/include/linux/atmel_pdc.h
+++ b/include/linux/atmel_pdc.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* include/linux/atmel_pdc.h
*
@@ -6,11 +7,6 @@
*
* Peripheral Data Controller (PDC) registers.
* Based on AT91RM9200 datasheet revision E.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
*/
#ifndef ATMEL_PDC_H
diff --git a/include/linux/atmel_tc.h b/include/linux/atmel_tc.h
deleted file mode 100644
index 468fdfa643f0..000000000000
--- a/include/linux/atmel_tc.h
+++ /dev/null
@@ -1,270 +0,0 @@
-/*
- * Timer/Counter Unit (TC) registers.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
-
-#ifndef ATMEL_TC_H
-#define ATMEL_TC_H
-
-#include <linux/compiler.h>
-#include <linux/list.h>
-
-/*
- * Many 32-bit Atmel SOCs include one or more TC blocks, each of which holds
- * three general-purpose 16-bit timers. These timers share one register bank.
- * Depending on the SOC, each timer may have its own clock and IRQ, or those
- * may be shared by the whole TC block.
- *
- * These TC blocks may have up to nine external pins: TCLK0..2 signals for
- * clocks or clock gates, and per-timer TIOA and TIOB signals used for PWM
- * or triggering. Those pins need to be set up for use with the TC block,
- * else they will be used as GPIOs or for a different controller.
- *
- * Although we expect each TC block to have a platform_device node, those
- * nodes are not what drivers bind to. Instead, they ask for a specific
- * TC block, by number ... which is a common approach on systems with many
- * timers. Then they use clk_get() and platform_get_irq() to get clock and
- * IRQ resources.
- */
-
-struct clk;
-
-/**
- * struct atmel_tcb_config - SoC data for a Timer/Counter Block
- * @counter_width: size in bits of a timer counter register
- */
-struct atmel_tcb_config {
- size_t counter_width;
-};
-
-/**
- * struct atmel_tc - information about a Timer/Counter Block
- * @pdev: physical device
- * @regs: mapping through which the I/O registers can be accessed
- * @id: block id
- * @tcb_config: configuration data from SoC
- * @irq: irq for each of the three channels
- * @clk: internal clock source for each of the three channels
- * @node: list node, for tclib internal use
- * @allocated: if already used, for tclib internal use
- *
- * On some platforms, each TC channel has its own clocks and IRQs,
- * while on others, all TC channels share the same clock and IRQ.
- * Drivers should clk_enable() all the clocks they need even though
- * all the entries in @clk may point to the same physical clock.
- * Likewise, drivers should request irqs independently for each
- * channel, but they must use IRQF_SHARED in case some of the entries
- * in @irq are actually the same IRQ.
- */
-struct atmel_tc {
- struct platform_device *pdev;
- void __iomem *regs;
- int id;
- const struct atmel_tcb_config *tcb_config;
- int irq[3];
- struct clk *clk[3];
- struct clk *slow_clk;
- struct list_head node;
- bool allocated;
-};
-
-extern struct atmel_tc *atmel_tc_alloc(unsigned block);
-extern void atmel_tc_free(struct atmel_tc *tc);
-
-/* platform-specific ATMEL_TC_TIMER_CLOCKx divisors (0 means 32KiHz) */
-extern const u8 atmel_tc_divisors[5];
-
-
-/*
- * Two registers have block-wide controls. These are: configuring the three
- * "external" clocks (or event sources) used by the timer channels; and
- * synchronizing the timers by resetting them all at once.
- *
- * "External" can mean "external to chip" using the TCLK0, TCLK1, or TCLK2
- * signals. Or, it can mean "external to timer", using the TIOA output from
- * one of the other two timers that's being run in waveform mode.
- */
-
-#define ATMEL_TC_BCR 0xc0 /* TC Block Control Register */
-#define ATMEL_TC_SYNC (1 << 0) /* synchronize timers */
-
-#define ATMEL_TC_BMR 0xc4 /* TC Block Mode Register */
-#define ATMEL_TC_TC0XC0S (3 << 0) /* external clock 0 source */
-#define ATMEL_TC_TC0XC0S_TCLK0 (0 << 0)
-#define ATMEL_TC_TC0XC0S_NONE (1 << 0)
-#define ATMEL_TC_TC0XC0S_TIOA1 (2 << 0)
-#define ATMEL_TC_TC0XC0S_TIOA2 (3 << 0)
-#define ATMEL_TC_TC1XC1S (3 << 2) /* external clock 1 source */
-#define ATMEL_TC_TC1XC1S_TCLK1 (0 << 2)
-#define ATMEL_TC_TC1XC1S_NONE (1 << 2)
-#define ATMEL_TC_TC1XC1S_TIOA0 (2 << 2)
-#define ATMEL_TC_TC1XC1S_TIOA2 (3 << 2)
-#define ATMEL_TC_TC2XC2S (3 << 4) /* external clock 2 source */
-#define ATMEL_TC_TC2XC2S_TCLK2 (0 << 4)
-#define ATMEL_TC_TC2XC2S_NONE (1 << 4)
-#define ATMEL_TC_TC2XC2S_TIOA0 (2 << 4)
-#define ATMEL_TC_TC2XC2S_TIOA1 (3 << 4)
-
-
-/*
- * Each TC block has three "channels", each with one counter and controls.
- *
- * Note that the semantics of ATMEL_TC_TIMER_CLOCKx (input clock selection
- * when it's not "external") is silicon-specific. AT91 platforms use one
- * set of definitions; AVR32 platforms use a different set. Don't hard-wire
- * such knowledge into your code, use the global "atmel_tc_divisors" ...
- * where index N is the divisor for clock N+1, else zero to indicate it uses
- * the 32 KiHz clock.
- *
- * The timers can be chained in various ways, and operated in "waveform"
- * generation mode (including PWM) or "capture" mode (to time events). In
- * both modes, behavior can be configured in many ways.
- *
- * Each timer has two I/O pins, TIOA and TIOB. Waveform mode uses TIOA as a
- * PWM output, and TIOB as either another PWM or as a trigger. Capture mode
- * uses them only as inputs.
- */
-#define ATMEL_TC_CHAN(idx) ((idx)*0x40)
-#define ATMEL_TC_REG(idx, reg) (ATMEL_TC_CHAN(idx) + ATMEL_TC_ ## reg)
-
-#define ATMEL_TC_CCR 0x00 /* Channel Control Register */
-#define ATMEL_TC_CLKEN (1 << 0) /* clock enable */
-#define ATMEL_TC_CLKDIS (1 << 1) /* clock disable */
-#define ATMEL_TC_SWTRG (1 << 2) /* software trigger */
-
-#define ATMEL_TC_CMR 0x04 /* Channel Mode Register */
-
-/* Both modes share some CMR bits */
-#define ATMEL_TC_TCCLKS (7 << 0) /* clock source */
-#define ATMEL_TC_TIMER_CLOCK1 (0 << 0)
-#define ATMEL_TC_TIMER_CLOCK2 (1 << 0)
-#define ATMEL_TC_TIMER_CLOCK3 (2 << 0)
-#define ATMEL_TC_TIMER_CLOCK4 (3 << 0)
-#define ATMEL_TC_TIMER_CLOCK5 (4 << 0)
-#define ATMEL_TC_XC0 (5 << 0)
-#define ATMEL_TC_XC1 (6 << 0)
-#define ATMEL_TC_XC2 (7 << 0)
-#define ATMEL_TC_CLKI (1 << 3) /* clock invert */
-#define ATMEL_TC_BURST (3 << 4) /* clock gating */
-#define ATMEL_TC_GATE_NONE (0 << 4)
-#define ATMEL_TC_GATE_XC0 (1 << 4)
-#define ATMEL_TC_GATE_XC1 (2 << 4)
-#define ATMEL_TC_GATE_XC2 (3 << 4)
-#define ATMEL_TC_WAVE (1 << 15) /* true = Waveform mode */
-
-/* CAPTURE mode CMR bits */
-#define ATMEL_TC_LDBSTOP (1 << 6) /* counter stops on RB load */
-#define ATMEL_TC_LDBDIS (1 << 7) /* counter disable on RB load */
-#define ATMEL_TC_ETRGEDG (3 << 8) /* external trigger edge */
-#define ATMEL_TC_ETRGEDG_NONE (0 << 8)
-#define ATMEL_TC_ETRGEDG_RISING (1 << 8)
-#define ATMEL_TC_ETRGEDG_FALLING (2 << 8)
-#define ATMEL_TC_ETRGEDG_BOTH (3 << 8)
-#define ATMEL_TC_ABETRG (1 << 10) /* external trigger is TIOA? */
-#define ATMEL_TC_CPCTRG (1 << 14) /* RC compare trigger enable */
-#define ATMEL_TC_LDRA (3 << 16) /* RA loading edge (of TIOA) */
-#define ATMEL_TC_LDRA_NONE (0 << 16)
-#define ATMEL_TC_LDRA_RISING (1 << 16)
-#define ATMEL_TC_LDRA_FALLING (2 << 16)
-#define ATMEL_TC_LDRA_BOTH (3 << 16)
-#define ATMEL_TC_LDRB (3 << 18) /* RB loading edge (of TIOA) */
-#define ATMEL_TC_LDRB_NONE (0 << 18)
-#define ATMEL_TC_LDRB_RISING (1 << 18)
-#define ATMEL_TC_LDRB_FALLING (2 << 18)
-#define ATMEL_TC_LDRB_BOTH (3 << 18)
-
-/* WAVEFORM mode CMR bits */
-#define ATMEL_TC_CPCSTOP (1 << 6) /* RC compare stops counter */
-#define ATMEL_TC_CPCDIS (1 << 7) /* RC compare disables counter */
-#define ATMEL_TC_EEVTEDG (3 << 8) /* external event edge */
-#define ATMEL_TC_EEVTEDG_NONE (0 << 8)
-#define ATMEL_TC_EEVTEDG_RISING (1 << 8)
-#define ATMEL_TC_EEVTEDG_FALLING (2 << 8)
-#define ATMEL_TC_EEVTEDG_BOTH (3 << 8)
-#define ATMEL_TC_EEVT (3 << 10) /* external event source */
-#define ATMEL_TC_EEVT_TIOB (0 << 10)
-#define ATMEL_TC_EEVT_XC0 (1 << 10)
-#define ATMEL_TC_EEVT_XC1 (2 << 10)
-#define ATMEL_TC_EEVT_XC2 (3 << 10)
-#define ATMEL_TC_ENETRG (1 << 12) /* external event is trigger */
-#define ATMEL_TC_WAVESEL (3 << 13) /* waveform type */
-#define ATMEL_TC_WAVESEL_UP (0 << 13)
-#define ATMEL_TC_WAVESEL_UPDOWN (1 << 13)
-#define ATMEL_TC_WAVESEL_UP_AUTO (2 << 13)
-#define ATMEL_TC_WAVESEL_UPDOWN_AUTO (3 << 13)
-#define ATMEL_TC_ACPA (3 << 16) /* RA compare changes TIOA */
-#define ATMEL_TC_ACPA_NONE (0 << 16)
-#define ATMEL_TC_ACPA_SET (1 << 16)
-#define ATMEL_TC_ACPA_CLEAR (2 << 16)
-#define ATMEL_TC_ACPA_TOGGLE (3 << 16)
-#define ATMEL_TC_ACPC (3 << 18) /* RC compare changes TIOA */
-#define ATMEL_TC_ACPC_NONE (0 << 18)
-#define ATMEL_TC_ACPC_SET (1 << 18)
-#define ATMEL_TC_ACPC_CLEAR (2 << 18)
-#define ATMEL_TC_ACPC_TOGGLE (3 << 18)
-#define ATMEL_TC_AEEVT (3 << 20) /* external event changes TIOA */
-#define ATMEL_TC_AEEVT_NONE (0 << 20)
-#define ATMEL_TC_AEEVT_SET (1 << 20)
-#define ATMEL_TC_AEEVT_CLEAR (2 << 20)
-#define ATMEL_TC_AEEVT_TOGGLE (3 << 20)
-#define ATMEL_TC_ASWTRG (3 << 22) /* software trigger changes TIOA */
-#define ATMEL_TC_ASWTRG_NONE (0 << 22)
-#define ATMEL_TC_ASWTRG_SET (1 << 22)
-#define ATMEL_TC_ASWTRG_CLEAR (2 << 22)
-#define ATMEL_TC_ASWTRG_TOGGLE (3 << 22)
-#define ATMEL_TC_BCPB (3 << 24) /* RB compare changes TIOB */
-#define ATMEL_TC_BCPB_NONE (0 << 24)
-#define ATMEL_TC_BCPB_SET (1 << 24)
-#define ATMEL_TC_BCPB_CLEAR (2 << 24)
-#define ATMEL_TC_BCPB_TOGGLE (3 << 24)
-#define ATMEL_TC_BCPC (3 << 26) /* RC compare changes TIOB */
-#define ATMEL_TC_BCPC_NONE (0 << 26)
-#define ATMEL_TC_BCPC_SET (1 << 26)
-#define ATMEL_TC_BCPC_CLEAR (2 << 26)
-#define ATMEL_TC_BCPC_TOGGLE (3 << 26)
-#define ATMEL_TC_BEEVT (3 << 28) /* external event changes TIOB */
-#define ATMEL_TC_BEEVT_NONE (0 << 28)
-#define ATMEL_TC_BEEVT_SET (1 << 28)
-#define ATMEL_TC_BEEVT_CLEAR (2 << 28)
-#define ATMEL_TC_BEEVT_TOGGLE (3 << 28)
-#define ATMEL_TC_BSWTRG (3 << 30) /* software trigger changes TIOB */
-#define ATMEL_TC_BSWTRG_NONE (0 << 30)
-#define ATMEL_TC_BSWTRG_SET (1 << 30)
-#define ATMEL_TC_BSWTRG_CLEAR (2 << 30)
-#define ATMEL_TC_BSWTRG_TOGGLE (3 << 30)
-
-#define ATMEL_TC_CV 0x10 /* counter Value */
-#define ATMEL_TC_RA 0x14 /* register A */
-#define ATMEL_TC_RB 0x18 /* register B */
-#define ATMEL_TC_RC 0x1c /* register C */
-
-#define ATMEL_TC_SR 0x20 /* status (read-only) */
-/* Status-only flags */
-#define ATMEL_TC_CLKSTA (1 << 16) /* clock enabled */
-#define ATMEL_TC_MTIOA (1 << 17) /* TIOA mirror */
-#define ATMEL_TC_MTIOB (1 << 18) /* TIOB mirror */
-
-#define ATMEL_TC_IER 0x24 /* interrupt enable (write-only) */
-#define ATMEL_TC_IDR 0x28 /* interrupt disable (write-only) */
-#define ATMEL_TC_IMR 0x2c /* interrupt mask (read-only) */
-
-/* Status and IRQ flags */
-#define ATMEL_TC_COVFS (1 << 0) /* counter overflow */
-#define ATMEL_TC_LOVRS (1 << 1) /* load overrun */
-#define ATMEL_TC_CPAS (1 << 2) /* RA compare */
-#define ATMEL_TC_CPBS (1 << 3) /* RB compare */
-#define ATMEL_TC_CPCS (1 << 4) /* RC compare */
-#define ATMEL_TC_LDRAS (1 << 5) /* RA loading */
-#define ATMEL_TC_LDRBS (1 << 6) /* RB loading */
-#define ATMEL_TC_ETRGS (1 << 7) /* external trigger */
-#define ATMEL_TC_ALL_IRQ (ATMEL_TC_COVFS | ATMEL_TC_LOVRS | \
- ATMEL_TC_CPAS | ATMEL_TC_CPBS | \
- ATMEL_TC_CPCS | ATMEL_TC_LDRAS | \
- ATMEL_TC_LDRBS | ATMEL_TC_ETRGS) \
- /* all IRQs */
-
-#endif
diff --git a/include/linux/atomic.h b/include/linux/atomic.h
index 40d6bfec0e0d..8dd57c3a99e9 100644
--- a/include/linux/atomic.h
+++ b/include/linux/atomic.h
@@ -1,6 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/* Atomic operations usable in machine independent code */
#ifndef _LINUX_ATOMIC_H
#define _LINUX_ATOMIC_H
+#include <linux/types.h>
+
#include <asm/atomic.h>
#include <asm/barrier.h>
@@ -22,1056 +25,60 @@
* See Documentation/memory-barriers.txt for ACQUIRE/RELEASE definitions.
*/
-#ifndef atomic_read_acquire
-#define atomic_read_acquire(v) smp_load_acquire(&(v)->counter)
-#endif
+#define atomic_cond_read_acquire(v, c) smp_cond_load_acquire(&(v)->counter, (c))
+#define atomic_cond_read_relaxed(v, c) smp_cond_load_relaxed(&(v)->counter, (c))
-#ifndef atomic_set_release
-#define atomic_set_release(v, i) smp_store_release(&(v)->counter, (i))
-#endif
+#define atomic64_cond_read_acquire(v, c) smp_cond_load_acquire(&(v)->counter, (c))
+#define atomic64_cond_read_relaxed(v, c) smp_cond_load_relaxed(&(v)->counter, (c))
/*
* The idea here is to build acquire/release variants by adding explicit
* barriers on top of the relaxed variant. In the case where the relaxed
* variant is already fully ordered, no additional barriers are needed.
*
- * Besides, if an arch has a special barrier for acquire/release, it could
- * implement its own __atomic_op_* and use the same framework for building
- * variants
- *
- * If an architecture overrides __atomic_op_acquire() it will probably want
- * to define smp_mb__after_spinlock().
+ * If an architecture overrides __atomic_acquire_fence() it will probably
+ * want to define smp_mb__after_spinlock().
*/
-#ifndef __atomic_op_acquire
+#ifndef __atomic_acquire_fence
+#define __atomic_acquire_fence smp_mb__after_atomic
+#endif
+
+#ifndef __atomic_release_fence
+#define __atomic_release_fence smp_mb__before_atomic
+#endif
+
+#ifndef __atomic_pre_full_fence
+#define __atomic_pre_full_fence smp_mb__before_atomic
+#endif
+
+#ifndef __atomic_post_full_fence
+#define __atomic_post_full_fence smp_mb__after_atomic
+#endif
+
#define __atomic_op_acquire(op, args...) \
({ \
typeof(op##_relaxed(args)) __ret = op##_relaxed(args); \
- smp_mb__after_atomic(); \
+ __atomic_acquire_fence(); \
__ret; \
})
-#endif
-#ifndef __atomic_op_release
#define __atomic_op_release(op, args...) \
({ \
- smp_mb__before_atomic(); \
+ __atomic_release_fence(); \
op##_relaxed(args); \
})
-#endif
-#ifndef __atomic_op_fence
#define __atomic_op_fence(op, args...) \
({ \
typeof(op##_relaxed(args)) __ret; \
- smp_mb__before_atomic(); \
+ __atomic_pre_full_fence(); \
__ret = op##_relaxed(args); \
- smp_mb__after_atomic(); \
+ __atomic_post_full_fence(); \
__ret; \
})
-#endif
-
-/* atomic_add_return_relaxed */
-#ifndef atomic_add_return_relaxed
-#define atomic_add_return_relaxed atomic_add_return
-#define atomic_add_return_acquire atomic_add_return
-#define atomic_add_return_release atomic_add_return
-
-#else /* atomic_add_return_relaxed */
-
-#ifndef atomic_add_return_acquire
-#define atomic_add_return_acquire(...) \
- __atomic_op_acquire(atomic_add_return, __VA_ARGS__)
-#endif
-
-#ifndef atomic_add_return_release
-#define atomic_add_return_release(...) \
- __atomic_op_release(atomic_add_return, __VA_ARGS__)
-#endif
-
-#ifndef atomic_add_return
-#define atomic_add_return(...) \
- __atomic_op_fence(atomic_add_return, __VA_ARGS__)
-#endif
-#endif /* atomic_add_return_relaxed */
-
-/* atomic_inc_return_relaxed */
-#ifndef atomic_inc_return_relaxed
-#define atomic_inc_return_relaxed atomic_inc_return
-#define atomic_inc_return_acquire atomic_inc_return
-#define atomic_inc_return_release atomic_inc_return
-
-#else /* atomic_inc_return_relaxed */
-
-#ifndef atomic_inc_return_acquire
-#define atomic_inc_return_acquire(...) \
- __atomic_op_acquire(atomic_inc_return, __VA_ARGS__)
-#endif
-
-#ifndef atomic_inc_return_release
-#define atomic_inc_return_release(...) \
- __atomic_op_release(atomic_inc_return, __VA_ARGS__)
-#endif
-
-#ifndef atomic_inc_return
-#define atomic_inc_return(...) \
- __atomic_op_fence(atomic_inc_return, __VA_ARGS__)
-#endif
-#endif /* atomic_inc_return_relaxed */
-
-/* atomic_sub_return_relaxed */
-#ifndef atomic_sub_return_relaxed
-#define atomic_sub_return_relaxed atomic_sub_return
-#define atomic_sub_return_acquire atomic_sub_return
-#define atomic_sub_return_release atomic_sub_return
-
-#else /* atomic_sub_return_relaxed */
-
-#ifndef atomic_sub_return_acquire
-#define atomic_sub_return_acquire(...) \
- __atomic_op_acquire(atomic_sub_return, __VA_ARGS__)
-#endif
-
-#ifndef atomic_sub_return_release
-#define atomic_sub_return_release(...) \
- __atomic_op_release(atomic_sub_return, __VA_ARGS__)
-#endif
-
-#ifndef atomic_sub_return
-#define atomic_sub_return(...) \
- __atomic_op_fence(atomic_sub_return, __VA_ARGS__)
-#endif
-#endif /* atomic_sub_return_relaxed */
-
-/* atomic_dec_return_relaxed */
-#ifndef atomic_dec_return_relaxed
-#define atomic_dec_return_relaxed atomic_dec_return
-#define atomic_dec_return_acquire atomic_dec_return
-#define atomic_dec_return_release atomic_dec_return
-
-#else /* atomic_dec_return_relaxed */
-
-#ifndef atomic_dec_return_acquire
-#define atomic_dec_return_acquire(...) \
- __atomic_op_acquire(atomic_dec_return, __VA_ARGS__)
-#endif
-
-#ifndef atomic_dec_return_release
-#define atomic_dec_return_release(...) \
- __atomic_op_release(atomic_dec_return, __VA_ARGS__)
-#endif
-
-#ifndef atomic_dec_return
-#define atomic_dec_return(...) \
- __atomic_op_fence(atomic_dec_return, __VA_ARGS__)
-#endif
-#endif /* atomic_dec_return_relaxed */
-
-
-/* atomic_fetch_add_relaxed */
-#ifndef atomic_fetch_add_relaxed
-#define atomic_fetch_add_relaxed atomic_fetch_add
-#define atomic_fetch_add_acquire atomic_fetch_add
-#define atomic_fetch_add_release atomic_fetch_add
-
-#else /* atomic_fetch_add_relaxed */
-
-#ifndef atomic_fetch_add_acquire
-#define atomic_fetch_add_acquire(...) \
- __atomic_op_acquire(atomic_fetch_add, __VA_ARGS__)
-#endif
-
-#ifndef atomic_fetch_add_release
-#define atomic_fetch_add_release(...) \
- __atomic_op_release(atomic_fetch_add, __VA_ARGS__)
-#endif
-
-#ifndef atomic_fetch_add
-#define atomic_fetch_add(...) \
- __atomic_op_fence(atomic_fetch_add, __VA_ARGS__)
-#endif
-#endif /* atomic_fetch_add_relaxed */
-
-/* atomic_fetch_inc_relaxed */
-#ifndef atomic_fetch_inc_relaxed
-
-#ifndef atomic_fetch_inc
-#define atomic_fetch_inc(v) atomic_fetch_add(1, (v))
-#define atomic_fetch_inc_relaxed(v) atomic_fetch_add_relaxed(1, (v))
-#define atomic_fetch_inc_acquire(v) atomic_fetch_add_acquire(1, (v))
-#define atomic_fetch_inc_release(v) atomic_fetch_add_release(1, (v))
-#else /* atomic_fetch_inc */
-#define atomic_fetch_inc_relaxed atomic_fetch_inc
-#define atomic_fetch_inc_acquire atomic_fetch_inc
-#define atomic_fetch_inc_release atomic_fetch_inc
-#endif /* atomic_fetch_inc */
-
-#else /* atomic_fetch_inc_relaxed */
-
-#ifndef atomic_fetch_inc_acquire
-#define atomic_fetch_inc_acquire(...) \
- __atomic_op_acquire(atomic_fetch_inc, __VA_ARGS__)
-#endif
-
-#ifndef atomic_fetch_inc_release
-#define atomic_fetch_inc_release(...) \
- __atomic_op_release(atomic_fetch_inc, __VA_ARGS__)
-#endif
-
-#ifndef atomic_fetch_inc
-#define atomic_fetch_inc(...) \
- __atomic_op_fence(atomic_fetch_inc, __VA_ARGS__)
-#endif
-#endif /* atomic_fetch_inc_relaxed */
-
-/* atomic_fetch_sub_relaxed */
-#ifndef atomic_fetch_sub_relaxed
-#define atomic_fetch_sub_relaxed atomic_fetch_sub
-#define atomic_fetch_sub_acquire atomic_fetch_sub
-#define atomic_fetch_sub_release atomic_fetch_sub
-
-#else /* atomic_fetch_sub_relaxed */
-
-#ifndef atomic_fetch_sub_acquire
-#define atomic_fetch_sub_acquire(...) \
- __atomic_op_acquire(atomic_fetch_sub, __VA_ARGS__)
-#endif
-
-#ifndef atomic_fetch_sub_release
-#define atomic_fetch_sub_release(...) \
- __atomic_op_release(atomic_fetch_sub, __VA_ARGS__)
-#endif
-
-#ifndef atomic_fetch_sub
-#define atomic_fetch_sub(...) \
- __atomic_op_fence(atomic_fetch_sub, __VA_ARGS__)
-#endif
-#endif /* atomic_fetch_sub_relaxed */
-
-/* atomic_fetch_dec_relaxed */
-#ifndef atomic_fetch_dec_relaxed
-
-#ifndef atomic_fetch_dec
-#define atomic_fetch_dec(v) atomic_fetch_sub(1, (v))
-#define atomic_fetch_dec_relaxed(v) atomic_fetch_sub_relaxed(1, (v))
-#define atomic_fetch_dec_acquire(v) atomic_fetch_sub_acquire(1, (v))
-#define atomic_fetch_dec_release(v) atomic_fetch_sub_release(1, (v))
-#else /* atomic_fetch_dec */
-#define atomic_fetch_dec_relaxed atomic_fetch_dec
-#define atomic_fetch_dec_acquire atomic_fetch_dec
-#define atomic_fetch_dec_release atomic_fetch_dec
-#endif /* atomic_fetch_dec */
-
-#else /* atomic_fetch_dec_relaxed */
-
-#ifndef atomic_fetch_dec_acquire
-#define atomic_fetch_dec_acquire(...) \
- __atomic_op_acquire(atomic_fetch_dec, __VA_ARGS__)
-#endif
-
-#ifndef atomic_fetch_dec_release
-#define atomic_fetch_dec_release(...) \
- __atomic_op_release(atomic_fetch_dec, __VA_ARGS__)
-#endif
-
-#ifndef atomic_fetch_dec
-#define atomic_fetch_dec(...) \
- __atomic_op_fence(atomic_fetch_dec, __VA_ARGS__)
-#endif
-#endif /* atomic_fetch_dec_relaxed */
-
-/* atomic_fetch_or_relaxed */
-#ifndef atomic_fetch_or_relaxed
-#define atomic_fetch_or_relaxed atomic_fetch_or
-#define atomic_fetch_or_acquire atomic_fetch_or
-#define atomic_fetch_or_release atomic_fetch_or
-
-#else /* atomic_fetch_or_relaxed */
-
-#ifndef atomic_fetch_or_acquire
-#define atomic_fetch_or_acquire(...) \
- __atomic_op_acquire(atomic_fetch_or, __VA_ARGS__)
-#endif
-
-#ifndef atomic_fetch_or_release
-#define atomic_fetch_or_release(...) \
- __atomic_op_release(atomic_fetch_or, __VA_ARGS__)
-#endif
-
-#ifndef atomic_fetch_or
-#define atomic_fetch_or(...) \
- __atomic_op_fence(atomic_fetch_or, __VA_ARGS__)
-#endif
-#endif /* atomic_fetch_or_relaxed */
-
-/* atomic_fetch_and_relaxed */
-#ifndef atomic_fetch_and_relaxed
-#define atomic_fetch_and_relaxed atomic_fetch_and
-#define atomic_fetch_and_acquire atomic_fetch_and
-#define atomic_fetch_and_release atomic_fetch_and
-
-#else /* atomic_fetch_and_relaxed */
-
-#ifndef atomic_fetch_and_acquire
-#define atomic_fetch_and_acquire(...) \
- __atomic_op_acquire(atomic_fetch_and, __VA_ARGS__)
-#endif
-
-#ifndef atomic_fetch_and_release
-#define atomic_fetch_and_release(...) \
- __atomic_op_release(atomic_fetch_and, __VA_ARGS__)
-#endif
-
-#ifndef atomic_fetch_and
-#define atomic_fetch_and(...) \
- __atomic_op_fence(atomic_fetch_and, __VA_ARGS__)
-#endif
-#endif /* atomic_fetch_and_relaxed */
-
-#ifdef atomic_andnot
-/* atomic_fetch_andnot_relaxed */
-#ifndef atomic_fetch_andnot_relaxed
-#define atomic_fetch_andnot_relaxed atomic_fetch_andnot
-#define atomic_fetch_andnot_acquire atomic_fetch_andnot
-#define atomic_fetch_andnot_release atomic_fetch_andnot
-
-#else /* atomic_fetch_andnot_relaxed */
-
-#ifndef atomic_fetch_andnot_acquire
-#define atomic_fetch_andnot_acquire(...) \
- __atomic_op_acquire(atomic_fetch_andnot, __VA_ARGS__)
-#endif
-
-#ifndef atomic_fetch_andnot_release
-#define atomic_fetch_andnot_release(...) \
- __atomic_op_release(atomic_fetch_andnot, __VA_ARGS__)
-#endif
-
-#ifndef atomic_fetch_andnot
-#define atomic_fetch_andnot(...) \
- __atomic_op_fence(atomic_fetch_andnot, __VA_ARGS__)
-#endif
-#endif /* atomic_fetch_andnot_relaxed */
-#endif /* atomic_andnot */
-
-/* atomic_fetch_xor_relaxed */
-#ifndef atomic_fetch_xor_relaxed
-#define atomic_fetch_xor_relaxed atomic_fetch_xor
-#define atomic_fetch_xor_acquire atomic_fetch_xor
-#define atomic_fetch_xor_release atomic_fetch_xor
-
-#else /* atomic_fetch_xor_relaxed */
-
-#ifndef atomic_fetch_xor_acquire
-#define atomic_fetch_xor_acquire(...) \
- __atomic_op_acquire(atomic_fetch_xor, __VA_ARGS__)
-#endif
-
-#ifndef atomic_fetch_xor_release
-#define atomic_fetch_xor_release(...) \
- __atomic_op_release(atomic_fetch_xor, __VA_ARGS__)
-#endif
-
-#ifndef atomic_fetch_xor
-#define atomic_fetch_xor(...) \
- __atomic_op_fence(atomic_fetch_xor, __VA_ARGS__)
-#endif
-#endif /* atomic_fetch_xor_relaxed */
-
-
-/* atomic_xchg_relaxed */
-#ifndef atomic_xchg_relaxed
-#define atomic_xchg_relaxed atomic_xchg
-#define atomic_xchg_acquire atomic_xchg
-#define atomic_xchg_release atomic_xchg
-
-#else /* atomic_xchg_relaxed */
-
-#ifndef atomic_xchg_acquire
-#define atomic_xchg_acquire(...) \
- __atomic_op_acquire(atomic_xchg, __VA_ARGS__)
-#endif
-
-#ifndef atomic_xchg_release
-#define atomic_xchg_release(...) \
- __atomic_op_release(atomic_xchg, __VA_ARGS__)
-#endif
-
-#ifndef atomic_xchg
-#define atomic_xchg(...) \
- __atomic_op_fence(atomic_xchg, __VA_ARGS__)
-#endif
-#endif /* atomic_xchg_relaxed */
-
-/* atomic_cmpxchg_relaxed */
-#ifndef atomic_cmpxchg_relaxed
-#define atomic_cmpxchg_relaxed atomic_cmpxchg
-#define atomic_cmpxchg_acquire atomic_cmpxchg
-#define atomic_cmpxchg_release atomic_cmpxchg
-
-#else /* atomic_cmpxchg_relaxed */
-
-#ifndef atomic_cmpxchg_acquire
-#define atomic_cmpxchg_acquire(...) \
- __atomic_op_acquire(atomic_cmpxchg, __VA_ARGS__)
-#endif
-
-#ifndef atomic_cmpxchg_release
-#define atomic_cmpxchg_release(...) \
- __atomic_op_release(atomic_cmpxchg, __VA_ARGS__)
-#endif
-
-#ifndef atomic_cmpxchg
-#define atomic_cmpxchg(...) \
- __atomic_op_fence(atomic_cmpxchg, __VA_ARGS__)
-#endif
-#endif /* atomic_cmpxchg_relaxed */
-
-#ifndef atomic_try_cmpxchg
-
-#define __atomic_try_cmpxchg(type, _p, _po, _n) \
-({ \
- typeof(_po) __po = (_po); \
- typeof(*(_po)) __r, __o = *__po; \
- __r = atomic_cmpxchg##type((_p), __o, (_n)); \
- if (unlikely(__r != __o)) \
- *__po = __r; \
- likely(__r == __o); \
-})
-
-#define atomic_try_cmpxchg(_p, _po, _n) __atomic_try_cmpxchg(, _p, _po, _n)
-#define atomic_try_cmpxchg_relaxed(_p, _po, _n) __atomic_try_cmpxchg(_relaxed, _p, _po, _n)
-#define atomic_try_cmpxchg_acquire(_p, _po, _n) __atomic_try_cmpxchg(_acquire, _p, _po, _n)
-#define atomic_try_cmpxchg_release(_p, _po, _n) __atomic_try_cmpxchg(_release, _p, _po, _n)
-
-#else /* atomic_try_cmpxchg */
-#define atomic_try_cmpxchg_relaxed atomic_try_cmpxchg
-#define atomic_try_cmpxchg_acquire atomic_try_cmpxchg
-#define atomic_try_cmpxchg_release atomic_try_cmpxchg
-#endif /* atomic_try_cmpxchg */
-
-/* cmpxchg_relaxed */
-#ifndef cmpxchg_relaxed
-#define cmpxchg_relaxed cmpxchg
-#define cmpxchg_acquire cmpxchg
-#define cmpxchg_release cmpxchg
-
-#else /* cmpxchg_relaxed */
-
-#ifndef cmpxchg_acquire
-#define cmpxchg_acquire(...) \
- __atomic_op_acquire(cmpxchg, __VA_ARGS__)
-#endif
-
-#ifndef cmpxchg_release
-#define cmpxchg_release(...) \
- __atomic_op_release(cmpxchg, __VA_ARGS__)
-#endif
-
-#ifndef cmpxchg
-#define cmpxchg(...) \
- __atomic_op_fence(cmpxchg, __VA_ARGS__)
-#endif
-#endif /* cmpxchg_relaxed */
-
-/* cmpxchg64_relaxed */
-#ifndef cmpxchg64_relaxed
-#define cmpxchg64_relaxed cmpxchg64
-#define cmpxchg64_acquire cmpxchg64
-#define cmpxchg64_release cmpxchg64
-
-#else /* cmpxchg64_relaxed */
-
-#ifndef cmpxchg64_acquire
-#define cmpxchg64_acquire(...) \
- __atomic_op_acquire(cmpxchg64, __VA_ARGS__)
-#endif
-
-#ifndef cmpxchg64_release
-#define cmpxchg64_release(...) \
- __atomic_op_release(cmpxchg64, __VA_ARGS__)
-#endif
-
-#ifndef cmpxchg64
-#define cmpxchg64(...) \
- __atomic_op_fence(cmpxchg64, __VA_ARGS__)
-#endif
-#endif /* cmpxchg64_relaxed */
-
-/* xchg_relaxed */
-#ifndef xchg_relaxed
-#define xchg_relaxed xchg
-#define xchg_acquire xchg
-#define xchg_release xchg
-
-#else /* xchg_relaxed */
-
-#ifndef xchg_acquire
-#define xchg_acquire(...) __atomic_op_acquire(xchg, __VA_ARGS__)
-#endif
-
-#ifndef xchg_release
-#define xchg_release(...) __atomic_op_release(xchg, __VA_ARGS__)
-#endif
-
-#ifndef xchg
-#define xchg(...) __atomic_op_fence(xchg, __VA_ARGS__)
-#endif
-#endif /* xchg_relaxed */
-
-/**
- * atomic_add_unless - add unless the number is already a given value
- * @v: pointer of type atomic_t
- * @a: the amount to add to v...
- * @u: ...unless v is equal to u.
- *
- * Atomically adds @a to @v, so long as @v was not already @u.
- * Returns non-zero if @v was not @u, and zero otherwise.
- */
-static inline int atomic_add_unless(atomic_t *v, int a, int u)
-{
- return __atomic_add_unless(v, a, u) != u;
-}
-
-/**
- * atomic_inc_not_zero - increment unless the number is zero
- * @v: pointer of type atomic_t
- *
- * Atomically increments @v by 1, so long as @v is non-zero.
- * Returns non-zero if @v was non-zero, and zero otherwise.
- */
-#ifndef atomic_inc_not_zero
-#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
-#endif
-
-#ifndef atomic_andnot
-static inline void atomic_andnot(int i, atomic_t *v)
-{
- atomic_and(~i, v);
-}
-
-static inline int atomic_fetch_andnot(int i, atomic_t *v)
-{
- return atomic_fetch_and(~i, v);
-}
-
-static inline int atomic_fetch_andnot_relaxed(int i, atomic_t *v)
-{
- return atomic_fetch_and_relaxed(~i, v);
-}
-
-static inline int atomic_fetch_andnot_acquire(int i, atomic_t *v)
-{
- return atomic_fetch_and_acquire(~i, v);
-}
-
-static inline int atomic_fetch_andnot_release(int i, atomic_t *v)
-{
- return atomic_fetch_and_release(~i, v);
-}
-#endif
-
-/**
- * atomic_inc_not_zero_hint - increment if not null
- * @v: pointer of type atomic_t
- * @hint: probable value of the atomic before the increment
- *
- * This version of atomic_inc_not_zero() gives a hint of probable
- * value of the atomic. This helps processor to not read the memory
- * before doing the atomic read/modify/write cycle, lowering
- * number of bus transactions on some arches.
- *
- * Returns: 0 if increment was not done, 1 otherwise.
- */
-#ifndef atomic_inc_not_zero_hint
-static inline int atomic_inc_not_zero_hint(atomic_t *v, int hint)
-{
- int val, c = hint;
-
- /* sanity test, should be removed by compiler if hint is a constant */
- if (!hint)
- return atomic_inc_not_zero(v);
-
- do {
- val = atomic_cmpxchg(v, c, c + 1);
- if (val == c)
- return 1;
- c = val;
- } while (c);
-
- return 0;
-}
-#endif
-
-#ifndef atomic_inc_unless_negative
-static inline int atomic_inc_unless_negative(atomic_t *p)
-{
- int v, v1;
- for (v = 0; v >= 0; v = v1) {
- v1 = atomic_cmpxchg(p, v, v + 1);
- if (likely(v1 == v))
- return 1;
- }
- return 0;
-}
-#endif
-
-#ifndef atomic_dec_unless_positive
-static inline int atomic_dec_unless_positive(atomic_t *p)
-{
- int v, v1;
- for (v = 0; v <= 0; v = v1) {
- v1 = atomic_cmpxchg(p, v, v - 1);
- if (likely(v1 == v))
- return 1;
- }
- return 0;
-}
-#endif
-
-/*
- * atomic_dec_if_positive - decrement by 1 if old value positive
- * @v: pointer of type atomic_t
- *
- * The function returns the old value of *v minus 1, even if
- * the atomic variable, v, was not decremented.
- */
-#ifndef atomic_dec_if_positive
-static inline int atomic_dec_if_positive(atomic_t *v)
-{
- int c, old, dec;
- c = atomic_read(v);
- for (;;) {
- dec = c - 1;
- if (unlikely(dec < 0))
- break;
- old = atomic_cmpxchg((v), c, dec);
- if (likely(old == c))
- break;
- c = old;
- }
- return dec;
-}
-#endif
-
-#ifdef CONFIG_GENERIC_ATOMIC64
-#include <asm-generic/atomic64.h>
-#endif
-
-#ifndef atomic64_read_acquire
-#define atomic64_read_acquire(v) smp_load_acquire(&(v)->counter)
-#endif
-
-#ifndef atomic64_set_release
-#define atomic64_set_release(v, i) smp_store_release(&(v)->counter, (i))
-#endif
-
-/* atomic64_add_return_relaxed */
-#ifndef atomic64_add_return_relaxed
-#define atomic64_add_return_relaxed atomic64_add_return
-#define atomic64_add_return_acquire atomic64_add_return
-#define atomic64_add_return_release atomic64_add_return
-
-#else /* atomic64_add_return_relaxed */
-
-#ifndef atomic64_add_return_acquire
-#define atomic64_add_return_acquire(...) \
- __atomic_op_acquire(atomic64_add_return, __VA_ARGS__)
-#endif
-
-#ifndef atomic64_add_return_release
-#define atomic64_add_return_release(...) \
- __atomic_op_release(atomic64_add_return, __VA_ARGS__)
-#endif
-
-#ifndef atomic64_add_return
-#define atomic64_add_return(...) \
- __atomic_op_fence(atomic64_add_return, __VA_ARGS__)
-#endif
-#endif /* atomic64_add_return_relaxed */
-
-/* atomic64_inc_return_relaxed */
-#ifndef atomic64_inc_return_relaxed
-#define atomic64_inc_return_relaxed atomic64_inc_return
-#define atomic64_inc_return_acquire atomic64_inc_return
-#define atomic64_inc_return_release atomic64_inc_return
-
-#else /* atomic64_inc_return_relaxed */
-
-#ifndef atomic64_inc_return_acquire
-#define atomic64_inc_return_acquire(...) \
- __atomic_op_acquire(atomic64_inc_return, __VA_ARGS__)
-#endif
-
-#ifndef atomic64_inc_return_release
-#define atomic64_inc_return_release(...) \
- __atomic_op_release(atomic64_inc_return, __VA_ARGS__)
-#endif
-
-#ifndef atomic64_inc_return
-#define atomic64_inc_return(...) \
- __atomic_op_fence(atomic64_inc_return, __VA_ARGS__)
-#endif
-#endif /* atomic64_inc_return_relaxed */
-
-
-/* atomic64_sub_return_relaxed */
-#ifndef atomic64_sub_return_relaxed
-#define atomic64_sub_return_relaxed atomic64_sub_return
-#define atomic64_sub_return_acquire atomic64_sub_return
-#define atomic64_sub_return_release atomic64_sub_return
-
-#else /* atomic64_sub_return_relaxed */
-
-#ifndef atomic64_sub_return_acquire
-#define atomic64_sub_return_acquire(...) \
- __atomic_op_acquire(atomic64_sub_return, __VA_ARGS__)
-#endif
-
-#ifndef atomic64_sub_return_release
-#define atomic64_sub_return_release(...) \
- __atomic_op_release(atomic64_sub_return, __VA_ARGS__)
-#endif
-
-#ifndef atomic64_sub_return
-#define atomic64_sub_return(...) \
- __atomic_op_fence(atomic64_sub_return, __VA_ARGS__)
-#endif
-#endif /* atomic64_sub_return_relaxed */
-
-/* atomic64_dec_return_relaxed */
-#ifndef atomic64_dec_return_relaxed
-#define atomic64_dec_return_relaxed atomic64_dec_return
-#define atomic64_dec_return_acquire atomic64_dec_return
-#define atomic64_dec_return_release atomic64_dec_return
-
-#else /* atomic64_dec_return_relaxed */
-
-#ifndef atomic64_dec_return_acquire
-#define atomic64_dec_return_acquire(...) \
- __atomic_op_acquire(atomic64_dec_return, __VA_ARGS__)
-#endif
-
-#ifndef atomic64_dec_return_release
-#define atomic64_dec_return_release(...) \
- __atomic_op_release(atomic64_dec_return, __VA_ARGS__)
-#endif
-
-#ifndef atomic64_dec_return
-#define atomic64_dec_return(...) \
- __atomic_op_fence(atomic64_dec_return, __VA_ARGS__)
-#endif
-#endif /* atomic64_dec_return_relaxed */
-
-
-/* atomic64_fetch_add_relaxed */
-#ifndef atomic64_fetch_add_relaxed
-#define atomic64_fetch_add_relaxed atomic64_fetch_add
-#define atomic64_fetch_add_acquire atomic64_fetch_add
-#define atomic64_fetch_add_release atomic64_fetch_add
-
-#else /* atomic64_fetch_add_relaxed */
-
-#ifndef atomic64_fetch_add_acquire
-#define atomic64_fetch_add_acquire(...) \
- __atomic_op_acquire(atomic64_fetch_add, __VA_ARGS__)
-#endif
-
-#ifndef atomic64_fetch_add_release
-#define atomic64_fetch_add_release(...) \
- __atomic_op_release(atomic64_fetch_add, __VA_ARGS__)
-#endif
-
-#ifndef atomic64_fetch_add
-#define atomic64_fetch_add(...) \
- __atomic_op_fence(atomic64_fetch_add, __VA_ARGS__)
-#endif
-#endif /* atomic64_fetch_add_relaxed */
-
-/* atomic64_fetch_inc_relaxed */
-#ifndef atomic64_fetch_inc_relaxed
-
-#ifndef atomic64_fetch_inc
-#define atomic64_fetch_inc(v) atomic64_fetch_add(1, (v))
-#define atomic64_fetch_inc_relaxed(v) atomic64_fetch_add_relaxed(1, (v))
-#define atomic64_fetch_inc_acquire(v) atomic64_fetch_add_acquire(1, (v))
-#define atomic64_fetch_inc_release(v) atomic64_fetch_add_release(1, (v))
-#else /* atomic64_fetch_inc */
-#define atomic64_fetch_inc_relaxed atomic64_fetch_inc
-#define atomic64_fetch_inc_acquire atomic64_fetch_inc
-#define atomic64_fetch_inc_release atomic64_fetch_inc
-#endif /* atomic64_fetch_inc */
-
-#else /* atomic64_fetch_inc_relaxed */
-
-#ifndef atomic64_fetch_inc_acquire
-#define atomic64_fetch_inc_acquire(...) \
- __atomic_op_acquire(atomic64_fetch_inc, __VA_ARGS__)
-#endif
-
-#ifndef atomic64_fetch_inc_release
-#define atomic64_fetch_inc_release(...) \
- __atomic_op_release(atomic64_fetch_inc, __VA_ARGS__)
-#endif
-
-#ifndef atomic64_fetch_inc
-#define atomic64_fetch_inc(...) \
- __atomic_op_fence(atomic64_fetch_inc, __VA_ARGS__)
-#endif
-#endif /* atomic64_fetch_inc_relaxed */
-
-/* atomic64_fetch_sub_relaxed */
-#ifndef atomic64_fetch_sub_relaxed
-#define atomic64_fetch_sub_relaxed atomic64_fetch_sub
-#define atomic64_fetch_sub_acquire atomic64_fetch_sub
-#define atomic64_fetch_sub_release atomic64_fetch_sub
-
-#else /* atomic64_fetch_sub_relaxed */
-
-#ifndef atomic64_fetch_sub_acquire
-#define atomic64_fetch_sub_acquire(...) \
- __atomic_op_acquire(atomic64_fetch_sub, __VA_ARGS__)
-#endif
-
-#ifndef atomic64_fetch_sub_release
-#define atomic64_fetch_sub_release(...) \
- __atomic_op_release(atomic64_fetch_sub, __VA_ARGS__)
-#endif
-
-#ifndef atomic64_fetch_sub
-#define atomic64_fetch_sub(...) \
- __atomic_op_fence(atomic64_fetch_sub, __VA_ARGS__)
-#endif
-#endif /* atomic64_fetch_sub_relaxed */
-
-/* atomic64_fetch_dec_relaxed */
-#ifndef atomic64_fetch_dec_relaxed
-
-#ifndef atomic64_fetch_dec
-#define atomic64_fetch_dec(v) atomic64_fetch_sub(1, (v))
-#define atomic64_fetch_dec_relaxed(v) atomic64_fetch_sub_relaxed(1, (v))
-#define atomic64_fetch_dec_acquire(v) atomic64_fetch_sub_acquire(1, (v))
-#define atomic64_fetch_dec_release(v) atomic64_fetch_sub_release(1, (v))
-#else /* atomic64_fetch_dec */
-#define atomic64_fetch_dec_relaxed atomic64_fetch_dec
-#define atomic64_fetch_dec_acquire atomic64_fetch_dec
-#define atomic64_fetch_dec_release atomic64_fetch_dec
-#endif /* atomic64_fetch_dec */
-
-#else /* atomic64_fetch_dec_relaxed */
-
-#ifndef atomic64_fetch_dec_acquire
-#define atomic64_fetch_dec_acquire(...) \
- __atomic_op_acquire(atomic64_fetch_dec, __VA_ARGS__)
-#endif
-
-#ifndef atomic64_fetch_dec_release
-#define atomic64_fetch_dec_release(...) \
- __atomic_op_release(atomic64_fetch_dec, __VA_ARGS__)
-#endif
-
-#ifndef atomic64_fetch_dec
-#define atomic64_fetch_dec(...) \
- __atomic_op_fence(atomic64_fetch_dec, __VA_ARGS__)
-#endif
-#endif /* atomic64_fetch_dec_relaxed */
-
-/* atomic64_fetch_or_relaxed */
-#ifndef atomic64_fetch_or_relaxed
-#define atomic64_fetch_or_relaxed atomic64_fetch_or
-#define atomic64_fetch_or_acquire atomic64_fetch_or
-#define atomic64_fetch_or_release atomic64_fetch_or
-
-#else /* atomic64_fetch_or_relaxed */
-
-#ifndef atomic64_fetch_or_acquire
-#define atomic64_fetch_or_acquire(...) \
- __atomic_op_acquire(atomic64_fetch_or, __VA_ARGS__)
-#endif
-
-#ifndef atomic64_fetch_or_release
-#define atomic64_fetch_or_release(...) \
- __atomic_op_release(atomic64_fetch_or, __VA_ARGS__)
-#endif
-
-#ifndef atomic64_fetch_or
-#define atomic64_fetch_or(...) \
- __atomic_op_fence(atomic64_fetch_or, __VA_ARGS__)
-#endif
-#endif /* atomic64_fetch_or_relaxed */
-
-/* atomic64_fetch_and_relaxed */
-#ifndef atomic64_fetch_and_relaxed
-#define atomic64_fetch_and_relaxed atomic64_fetch_and
-#define atomic64_fetch_and_acquire atomic64_fetch_and
-#define atomic64_fetch_and_release atomic64_fetch_and
-
-#else /* atomic64_fetch_and_relaxed */
-
-#ifndef atomic64_fetch_and_acquire
-#define atomic64_fetch_and_acquire(...) \
- __atomic_op_acquire(atomic64_fetch_and, __VA_ARGS__)
-#endif
-
-#ifndef atomic64_fetch_and_release
-#define atomic64_fetch_and_release(...) \
- __atomic_op_release(atomic64_fetch_and, __VA_ARGS__)
-#endif
-
-#ifndef atomic64_fetch_and
-#define atomic64_fetch_and(...) \
- __atomic_op_fence(atomic64_fetch_and, __VA_ARGS__)
-#endif
-#endif /* atomic64_fetch_and_relaxed */
-
-#ifdef atomic64_andnot
-/* atomic64_fetch_andnot_relaxed */
-#ifndef atomic64_fetch_andnot_relaxed
-#define atomic64_fetch_andnot_relaxed atomic64_fetch_andnot
-#define atomic64_fetch_andnot_acquire atomic64_fetch_andnot
-#define atomic64_fetch_andnot_release atomic64_fetch_andnot
-
-#else /* atomic64_fetch_andnot_relaxed */
-
-#ifndef atomic64_fetch_andnot_acquire
-#define atomic64_fetch_andnot_acquire(...) \
- __atomic_op_acquire(atomic64_fetch_andnot, __VA_ARGS__)
-#endif
-
-#ifndef atomic64_fetch_andnot_release
-#define atomic64_fetch_andnot_release(...) \
- __atomic_op_release(atomic64_fetch_andnot, __VA_ARGS__)
-#endif
-
-#ifndef atomic64_fetch_andnot
-#define atomic64_fetch_andnot(...) \
- __atomic_op_fence(atomic64_fetch_andnot, __VA_ARGS__)
-#endif
-#endif /* atomic64_fetch_andnot_relaxed */
-#endif /* atomic64_andnot */
-
-/* atomic64_fetch_xor_relaxed */
-#ifndef atomic64_fetch_xor_relaxed
-#define atomic64_fetch_xor_relaxed atomic64_fetch_xor
-#define atomic64_fetch_xor_acquire atomic64_fetch_xor
-#define atomic64_fetch_xor_release atomic64_fetch_xor
-
-#else /* atomic64_fetch_xor_relaxed */
-
-#ifndef atomic64_fetch_xor_acquire
-#define atomic64_fetch_xor_acquire(...) \
- __atomic_op_acquire(atomic64_fetch_xor, __VA_ARGS__)
-#endif
-
-#ifndef atomic64_fetch_xor_release
-#define atomic64_fetch_xor_release(...) \
- __atomic_op_release(atomic64_fetch_xor, __VA_ARGS__)
-#endif
-
-#ifndef atomic64_fetch_xor
-#define atomic64_fetch_xor(...) \
- __atomic_op_fence(atomic64_fetch_xor, __VA_ARGS__)
-#endif
-#endif /* atomic64_fetch_xor_relaxed */
-
-
-/* atomic64_xchg_relaxed */
-#ifndef atomic64_xchg_relaxed
-#define atomic64_xchg_relaxed atomic64_xchg
-#define atomic64_xchg_acquire atomic64_xchg
-#define atomic64_xchg_release atomic64_xchg
-
-#else /* atomic64_xchg_relaxed */
-
-#ifndef atomic64_xchg_acquire
-#define atomic64_xchg_acquire(...) \
- __atomic_op_acquire(atomic64_xchg, __VA_ARGS__)
-#endif
-
-#ifndef atomic64_xchg_release
-#define atomic64_xchg_release(...) \
- __atomic_op_release(atomic64_xchg, __VA_ARGS__)
-#endif
-
-#ifndef atomic64_xchg
-#define atomic64_xchg(...) \
- __atomic_op_fence(atomic64_xchg, __VA_ARGS__)
-#endif
-#endif /* atomic64_xchg_relaxed */
-
-/* atomic64_cmpxchg_relaxed */
-#ifndef atomic64_cmpxchg_relaxed
-#define atomic64_cmpxchg_relaxed atomic64_cmpxchg
-#define atomic64_cmpxchg_acquire atomic64_cmpxchg
-#define atomic64_cmpxchg_release atomic64_cmpxchg
-
-#else /* atomic64_cmpxchg_relaxed */
-
-#ifndef atomic64_cmpxchg_acquire
-#define atomic64_cmpxchg_acquire(...) \
- __atomic_op_acquire(atomic64_cmpxchg, __VA_ARGS__)
-#endif
-
-#ifndef atomic64_cmpxchg_release
-#define atomic64_cmpxchg_release(...) \
- __atomic_op_release(atomic64_cmpxchg, __VA_ARGS__)
-#endif
-
-#ifndef atomic64_cmpxchg
-#define atomic64_cmpxchg(...) \
- __atomic_op_fence(atomic64_cmpxchg, __VA_ARGS__)
-#endif
-#endif /* atomic64_cmpxchg_relaxed */
-
-#ifndef atomic64_try_cmpxchg
-
-#define __atomic64_try_cmpxchg(type, _p, _po, _n) \
-({ \
- typeof(_po) __po = (_po); \
- typeof(*(_po)) __r, __o = *__po; \
- __r = atomic64_cmpxchg##type((_p), __o, (_n)); \
- if (unlikely(__r != __o)) \
- *__po = __r; \
- likely(__r == __o); \
-})
-
-#define atomic64_try_cmpxchg(_p, _po, _n) __atomic64_try_cmpxchg(, _p, _po, _n)
-#define atomic64_try_cmpxchg_relaxed(_p, _po, _n) __atomic64_try_cmpxchg(_relaxed, _p, _po, _n)
-#define atomic64_try_cmpxchg_acquire(_p, _po, _n) __atomic64_try_cmpxchg(_acquire, _p, _po, _n)
-#define atomic64_try_cmpxchg_release(_p, _po, _n) __atomic64_try_cmpxchg(_release, _p, _po, _n)
-
-#else /* atomic64_try_cmpxchg */
-#define atomic64_try_cmpxchg_relaxed atomic64_try_cmpxchg
-#define atomic64_try_cmpxchg_acquire atomic64_try_cmpxchg
-#define atomic64_try_cmpxchg_release atomic64_try_cmpxchg
-#endif /* atomic64_try_cmpxchg */
-
-#ifndef atomic64_andnot
-static inline void atomic64_andnot(long long i, atomic64_t *v)
-{
- atomic64_and(~i, v);
-}
-
-static inline long long atomic64_fetch_andnot(long long i, atomic64_t *v)
-{
- return atomic64_fetch_and(~i, v);
-}
-
-static inline long long atomic64_fetch_andnot_relaxed(long long i, atomic64_t *v)
-{
- return atomic64_fetch_and_relaxed(~i, v);
-}
-
-static inline long long atomic64_fetch_andnot_acquire(long long i, atomic64_t *v)
-{
- return atomic64_fetch_and_acquire(~i, v);
-}
-
-static inline long long atomic64_fetch_andnot_release(long long i, atomic64_t *v)
-{
- return atomic64_fetch_and_release(~i, v);
-}
-#endif
-#include <asm-generic/atomic-long.h>
+#include <linux/atomic/atomic-arch-fallback.h>
+#include <linux/atomic/atomic-long.h>
+#include <linux/atomic/atomic-instrumented.h>
#endif /* _LINUX_ATOMIC_H */
diff --git a/include/linux/atomic/atomic-arch-fallback.h b/include/linux/atomic/atomic-arch-fallback.h
new file mode 100644
index 000000000000..2f9d36b72bd8
--- /dev/null
+++ b/include/linux/atomic/atomic-arch-fallback.h
@@ -0,0 +1,4693 @@
+// SPDX-License-Identifier: GPL-2.0
+
+// Generated by scripts/atomic/gen-atomic-fallback.sh
+// DO NOT MODIFY THIS FILE DIRECTLY
+
+#ifndef _LINUX_ATOMIC_FALLBACK_H
+#define _LINUX_ATOMIC_FALLBACK_H
+
+#include <linux/compiler.h>
+
+#if defined(arch_xchg)
+#define raw_xchg arch_xchg
+#elif defined(arch_xchg_relaxed)
+#define raw_xchg(...) \
+ __atomic_op_fence(arch_xchg, __VA_ARGS__)
+#else
+extern void raw_xchg_not_implemented(void);
+#define raw_xchg(...) raw_xchg_not_implemented()
+#endif
+
+#if defined(arch_xchg_acquire)
+#define raw_xchg_acquire arch_xchg_acquire
+#elif defined(arch_xchg_relaxed)
+#define raw_xchg_acquire(...) \
+ __atomic_op_acquire(arch_xchg, __VA_ARGS__)
+#elif defined(arch_xchg)
+#define raw_xchg_acquire arch_xchg
+#else
+extern void raw_xchg_acquire_not_implemented(void);
+#define raw_xchg_acquire(...) raw_xchg_acquire_not_implemented()
+#endif
+
+#if defined(arch_xchg_release)
+#define raw_xchg_release arch_xchg_release
+#elif defined(arch_xchg_relaxed)
+#define raw_xchg_release(...) \
+ __atomic_op_release(arch_xchg, __VA_ARGS__)
+#elif defined(arch_xchg)
+#define raw_xchg_release arch_xchg
+#else
+extern void raw_xchg_release_not_implemented(void);
+#define raw_xchg_release(...) raw_xchg_release_not_implemented()
+#endif
+
+#if defined(arch_xchg_relaxed)
+#define raw_xchg_relaxed arch_xchg_relaxed
+#elif defined(arch_xchg)
+#define raw_xchg_relaxed arch_xchg
+#else
+extern void raw_xchg_relaxed_not_implemented(void);
+#define raw_xchg_relaxed(...) raw_xchg_relaxed_not_implemented()
+#endif
+
+#if defined(arch_cmpxchg)
+#define raw_cmpxchg arch_cmpxchg
+#elif defined(arch_cmpxchg_relaxed)
+#define raw_cmpxchg(...) \
+ __atomic_op_fence(arch_cmpxchg, __VA_ARGS__)
+#else
+extern void raw_cmpxchg_not_implemented(void);
+#define raw_cmpxchg(...) raw_cmpxchg_not_implemented()
+#endif
+
+#if defined(arch_cmpxchg_acquire)
+#define raw_cmpxchg_acquire arch_cmpxchg_acquire
+#elif defined(arch_cmpxchg_relaxed)
+#define raw_cmpxchg_acquire(...) \
+ __atomic_op_acquire(arch_cmpxchg, __VA_ARGS__)
+#elif defined(arch_cmpxchg)
+#define raw_cmpxchg_acquire arch_cmpxchg
+#else
+extern void raw_cmpxchg_acquire_not_implemented(void);
+#define raw_cmpxchg_acquire(...) raw_cmpxchg_acquire_not_implemented()
+#endif
+
+#if defined(arch_cmpxchg_release)
+#define raw_cmpxchg_release arch_cmpxchg_release
+#elif defined(arch_cmpxchg_relaxed)
+#define raw_cmpxchg_release(...) \
+ __atomic_op_release(arch_cmpxchg, __VA_ARGS__)
+#elif defined(arch_cmpxchg)
+#define raw_cmpxchg_release arch_cmpxchg
+#else
+extern void raw_cmpxchg_release_not_implemented(void);
+#define raw_cmpxchg_release(...) raw_cmpxchg_release_not_implemented()
+#endif
+
+#if defined(arch_cmpxchg_relaxed)
+#define raw_cmpxchg_relaxed arch_cmpxchg_relaxed
+#elif defined(arch_cmpxchg)
+#define raw_cmpxchg_relaxed arch_cmpxchg
+#else
+extern void raw_cmpxchg_relaxed_not_implemented(void);
+#define raw_cmpxchg_relaxed(...) raw_cmpxchg_relaxed_not_implemented()
+#endif
+
+#if defined(arch_cmpxchg64)
+#define raw_cmpxchg64 arch_cmpxchg64
+#elif defined(arch_cmpxchg64_relaxed)
+#define raw_cmpxchg64(...) \
+ __atomic_op_fence(arch_cmpxchg64, __VA_ARGS__)
+#else
+extern void raw_cmpxchg64_not_implemented(void);
+#define raw_cmpxchg64(...) raw_cmpxchg64_not_implemented()
+#endif
+
+#if defined(arch_cmpxchg64_acquire)
+#define raw_cmpxchg64_acquire arch_cmpxchg64_acquire
+#elif defined(arch_cmpxchg64_relaxed)
+#define raw_cmpxchg64_acquire(...) \
+ __atomic_op_acquire(arch_cmpxchg64, __VA_ARGS__)
+#elif defined(arch_cmpxchg64)
+#define raw_cmpxchg64_acquire arch_cmpxchg64
+#else
+extern void raw_cmpxchg64_acquire_not_implemented(void);
+#define raw_cmpxchg64_acquire(...) raw_cmpxchg64_acquire_not_implemented()
+#endif
+
+#if defined(arch_cmpxchg64_release)
+#define raw_cmpxchg64_release arch_cmpxchg64_release
+#elif defined(arch_cmpxchg64_relaxed)
+#define raw_cmpxchg64_release(...) \
+ __atomic_op_release(arch_cmpxchg64, __VA_ARGS__)
+#elif defined(arch_cmpxchg64)
+#define raw_cmpxchg64_release arch_cmpxchg64
+#else
+extern void raw_cmpxchg64_release_not_implemented(void);
+#define raw_cmpxchg64_release(...) raw_cmpxchg64_release_not_implemented()
+#endif
+
+#if defined(arch_cmpxchg64_relaxed)
+#define raw_cmpxchg64_relaxed arch_cmpxchg64_relaxed
+#elif defined(arch_cmpxchg64)
+#define raw_cmpxchg64_relaxed arch_cmpxchg64
+#else
+extern void raw_cmpxchg64_relaxed_not_implemented(void);
+#define raw_cmpxchg64_relaxed(...) raw_cmpxchg64_relaxed_not_implemented()
+#endif
+
+#if defined(arch_cmpxchg128)
+#define raw_cmpxchg128 arch_cmpxchg128
+#elif defined(arch_cmpxchg128_relaxed)
+#define raw_cmpxchg128(...) \
+ __atomic_op_fence(arch_cmpxchg128, __VA_ARGS__)
+#else
+extern void raw_cmpxchg128_not_implemented(void);
+#define raw_cmpxchg128(...) raw_cmpxchg128_not_implemented()
+#endif
+
+#if defined(arch_cmpxchg128_acquire)
+#define raw_cmpxchg128_acquire arch_cmpxchg128_acquire
+#elif defined(arch_cmpxchg128_relaxed)
+#define raw_cmpxchg128_acquire(...) \
+ __atomic_op_acquire(arch_cmpxchg128, __VA_ARGS__)
+#elif defined(arch_cmpxchg128)
+#define raw_cmpxchg128_acquire arch_cmpxchg128
+#else
+extern void raw_cmpxchg128_acquire_not_implemented(void);
+#define raw_cmpxchg128_acquire(...) raw_cmpxchg128_acquire_not_implemented()
+#endif
+
+#if defined(arch_cmpxchg128_release)
+#define raw_cmpxchg128_release arch_cmpxchg128_release
+#elif defined(arch_cmpxchg128_relaxed)
+#define raw_cmpxchg128_release(...) \
+ __atomic_op_release(arch_cmpxchg128, __VA_ARGS__)
+#elif defined(arch_cmpxchg128)
+#define raw_cmpxchg128_release arch_cmpxchg128
+#else
+extern void raw_cmpxchg128_release_not_implemented(void);
+#define raw_cmpxchg128_release(...) raw_cmpxchg128_release_not_implemented()
+#endif
+
+#if defined(arch_cmpxchg128_relaxed)
+#define raw_cmpxchg128_relaxed arch_cmpxchg128_relaxed
+#elif defined(arch_cmpxchg128)
+#define raw_cmpxchg128_relaxed arch_cmpxchg128
+#else
+extern void raw_cmpxchg128_relaxed_not_implemented(void);
+#define raw_cmpxchg128_relaxed(...) raw_cmpxchg128_relaxed_not_implemented()
+#endif
+
+#if defined(arch_try_cmpxchg)
+#define raw_try_cmpxchg arch_try_cmpxchg
+#elif defined(arch_try_cmpxchg_relaxed)
+#define raw_try_cmpxchg(...) \
+ __atomic_op_fence(arch_try_cmpxchg, __VA_ARGS__)
+#else
+#define raw_try_cmpxchg(_ptr, _oldp, _new) \
+({ \
+ typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \
+ ___r = raw_cmpxchg((_ptr), ___o, (_new)); \
+ if (unlikely(___r != ___o)) \
+ *___op = ___r; \
+ likely(___r == ___o); \
+})
+#endif
+
+#if defined(arch_try_cmpxchg_acquire)
+#define raw_try_cmpxchg_acquire arch_try_cmpxchg_acquire
+#elif defined(arch_try_cmpxchg_relaxed)
+#define raw_try_cmpxchg_acquire(...) \
+ __atomic_op_acquire(arch_try_cmpxchg, __VA_ARGS__)
+#elif defined(arch_try_cmpxchg)
+#define raw_try_cmpxchg_acquire arch_try_cmpxchg
+#else
+#define raw_try_cmpxchg_acquire(_ptr, _oldp, _new) \
+({ \
+ typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \
+ ___r = raw_cmpxchg_acquire((_ptr), ___o, (_new)); \
+ if (unlikely(___r != ___o)) \
+ *___op = ___r; \
+ likely(___r == ___o); \
+})
+#endif
+
+#if defined(arch_try_cmpxchg_release)
+#define raw_try_cmpxchg_release arch_try_cmpxchg_release
+#elif defined(arch_try_cmpxchg_relaxed)
+#define raw_try_cmpxchg_release(...) \
+ __atomic_op_release(arch_try_cmpxchg, __VA_ARGS__)
+#elif defined(arch_try_cmpxchg)
+#define raw_try_cmpxchg_release arch_try_cmpxchg
+#else
+#define raw_try_cmpxchg_release(_ptr, _oldp, _new) \
+({ \
+ typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \
+ ___r = raw_cmpxchg_release((_ptr), ___o, (_new)); \
+ if (unlikely(___r != ___o)) \
+ *___op = ___r; \
+ likely(___r == ___o); \
+})
+#endif
+
+#if defined(arch_try_cmpxchg_relaxed)
+#define raw_try_cmpxchg_relaxed arch_try_cmpxchg_relaxed
+#elif defined(arch_try_cmpxchg)
+#define raw_try_cmpxchg_relaxed arch_try_cmpxchg
+#else
+#define raw_try_cmpxchg_relaxed(_ptr, _oldp, _new) \
+({ \
+ typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \
+ ___r = raw_cmpxchg_relaxed((_ptr), ___o, (_new)); \
+ if (unlikely(___r != ___o)) \
+ *___op = ___r; \
+ likely(___r == ___o); \
+})
+#endif
+
+#if defined(arch_try_cmpxchg64)
+#define raw_try_cmpxchg64 arch_try_cmpxchg64
+#elif defined(arch_try_cmpxchg64_relaxed)
+#define raw_try_cmpxchg64(...) \
+ __atomic_op_fence(arch_try_cmpxchg64, __VA_ARGS__)
+#else
+#define raw_try_cmpxchg64(_ptr, _oldp, _new) \
+({ \
+ typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \
+ ___r = raw_cmpxchg64((_ptr), ___o, (_new)); \
+ if (unlikely(___r != ___o)) \
+ *___op = ___r; \
+ likely(___r == ___o); \
+})
+#endif
+
+#if defined(arch_try_cmpxchg64_acquire)
+#define raw_try_cmpxchg64_acquire arch_try_cmpxchg64_acquire
+#elif defined(arch_try_cmpxchg64_relaxed)
+#define raw_try_cmpxchg64_acquire(...) \
+ __atomic_op_acquire(arch_try_cmpxchg64, __VA_ARGS__)
+#elif defined(arch_try_cmpxchg64)
+#define raw_try_cmpxchg64_acquire arch_try_cmpxchg64
+#else
+#define raw_try_cmpxchg64_acquire(_ptr, _oldp, _new) \
+({ \
+ typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \
+ ___r = raw_cmpxchg64_acquire((_ptr), ___o, (_new)); \
+ if (unlikely(___r != ___o)) \
+ *___op = ___r; \
+ likely(___r == ___o); \
+})
+#endif
+
+#if defined(arch_try_cmpxchg64_release)
+#define raw_try_cmpxchg64_release arch_try_cmpxchg64_release
+#elif defined(arch_try_cmpxchg64_relaxed)
+#define raw_try_cmpxchg64_release(...) \
+ __atomic_op_release(arch_try_cmpxchg64, __VA_ARGS__)
+#elif defined(arch_try_cmpxchg64)
+#define raw_try_cmpxchg64_release arch_try_cmpxchg64
+#else
+#define raw_try_cmpxchg64_release(_ptr, _oldp, _new) \
+({ \
+ typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \
+ ___r = raw_cmpxchg64_release((_ptr), ___o, (_new)); \
+ if (unlikely(___r != ___o)) \
+ *___op = ___r; \
+ likely(___r == ___o); \
+})
+#endif
+
+#if defined(arch_try_cmpxchg64_relaxed)
+#define raw_try_cmpxchg64_relaxed arch_try_cmpxchg64_relaxed
+#elif defined(arch_try_cmpxchg64)
+#define raw_try_cmpxchg64_relaxed arch_try_cmpxchg64
+#else
+#define raw_try_cmpxchg64_relaxed(_ptr, _oldp, _new) \
+({ \
+ typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \
+ ___r = raw_cmpxchg64_relaxed((_ptr), ___o, (_new)); \
+ if (unlikely(___r != ___o)) \
+ *___op = ___r; \
+ likely(___r == ___o); \
+})
+#endif
+
+#if defined(arch_try_cmpxchg128)
+#define raw_try_cmpxchg128 arch_try_cmpxchg128
+#elif defined(arch_try_cmpxchg128_relaxed)
+#define raw_try_cmpxchg128(...) \
+ __atomic_op_fence(arch_try_cmpxchg128, __VA_ARGS__)
+#else
+#define raw_try_cmpxchg128(_ptr, _oldp, _new) \
+({ \
+ typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \
+ ___r = raw_cmpxchg128((_ptr), ___o, (_new)); \
+ if (unlikely(___r != ___o)) \
+ *___op = ___r; \
+ likely(___r == ___o); \
+})
+#endif
+
+#if defined(arch_try_cmpxchg128_acquire)
+#define raw_try_cmpxchg128_acquire arch_try_cmpxchg128_acquire
+#elif defined(arch_try_cmpxchg128_relaxed)
+#define raw_try_cmpxchg128_acquire(...) \
+ __atomic_op_acquire(arch_try_cmpxchg128, __VA_ARGS__)
+#elif defined(arch_try_cmpxchg128)
+#define raw_try_cmpxchg128_acquire arch_try_cmpxchg128
+#else
+#define raw_try_cmpxchg128_acquire(_ptr, _oldp, _new) \
+({ \
+ typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \
+ ___r = raw_cmpxchg128_acquire((_ptr), ___o, (_new)); \
+ if (unlikely(___r != ___o)) \
+ *___op = ___r; \
+ likely(___r == ___o); \
+})
+#endif
+
+#if defined(arch_try_cmpxchg128_release)
+#define raw_try_cmpxchg128_release arch_try_cmpxchg128_release
+#elif defined(arch_try_cmpxchg128_relaxed)
+#define raw_try_cmpxchg128_release(...) \
+ __atomic_op_release(arch_try_cmpxchg128, __VA_ARGS__)
+#elif defined(arch_try_cmpxchg128)
+#define raw_try_cmpxchg128_release arch_try_cmpxchg128
+#else
+#define raw_try_cmpxchg128_release(_ptr, _oldp, _new) \
+({ \
+ typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \
+ ___r = raw_cmpxchg128_release((_ptr), ___o, (_new)); \
+ if (unlikely(___r != ___o)) \
+ *___op = ___r; \
+ likely(___r == ___o); \
+})
+#endif
+
+#if defined(arch_try_cmpxchg128_relaxed)
+#define raw_try_cmpxchg128_relaxed arch_try_cmpxchg128_relaxed
+#elif defined(arch_try_cmpxchg128)
+#define raw_try_cmpxchg128_relaxed arch_try_cmpxchg128
+#else
+#define raw_try_cmpxchg128_relaxed(_ptr, _oldp, _new) \
+({ \
+ typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \
+ ___r = raw_cmpxchg128_relaxed((_ptr), ___o, (_new)); \
+ if (unlikely(___r != ___o)) \
+ *___op = ___r; \
+ likely(___r == ___o); \
+})
+#endif
+
+#define raw_cmpxchg_local arch_cmpxchg_local
+
+#ifdef arch_try_cmpxchg_local
+#define raw_try_cmpxchg_local arch_try_cmpxchg_local
+#else
+#define raw_try_cmpxchg_local(_ptr, _oldp, _new) \
+({ \
+ typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \
+ ___r = raw_cmpxchg_local((_ptr), ___o, (_new)); \
+ if (unlikely(___r != ___o)) \
+ *___op = ___r; \
+ likely(___r == ___o); \
+})
+#endif
+
+#define raw_cmpxchg64_local arch_cmpxchg64_local
+
+#ifdef arch_try_cmpxchg64_local
+#define raw_try_cmpxchg64_local arch_try_cmpxchg64_local
+#else
+#define raw_try_cmpxchg64_local(_ptr, _oldp, _new) \
+({ \
+ typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \
+ ___r = raw_cmpxchg64_local((_ptr), ___o, (_new)); \
+ if (unlikely(___r != ___o)) \
+ *___op = ___r; \
+ likely(___r == ___o); \
+})
+#endif
+
+#define raw_cmpxchg128_local arch_cmpxchg128_local
+
+#ifdef arch_try_cmpxchg128_local
+#define raw_try_cmpxchg128_local arch_try_cmpxchg128_local
+#else
+#define raw_try_cmpxchg128_local(_ptr, _oldp, _new) \
+({ \
+ typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \
+ ___r = raw_cmpxchg128_local((_ptr), ___o, (_new)); \
+ if (unlikely(___r != ___o)) \
+ *___op = ___r; \
+ likely(___r == ___o); \
+})
+#endif
+
+#define raw_sync_cmpxchg arch_sync_cmpxchg
+
+#ifdef arch_sync_try_cmpxchg
+#define raw_sync_try_cmpxchg arch_sync_try_cmpxchg
+#else
+#define raw_sync_try_cmpxchg(_ptr, _oldp, _new) \
+({ \
+ typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \
+ ___r = raw_sync_cmpxchg((_ptr), ___o, (_new)); \
+ if (unlikely(___r != ___o)) \
+ *___op = ___r; \
+ likely(___r == ___o); \
+})
+#endif
+
+/**
+ * raw_atomic_read() - atomic load with relaxed ordering
+ * @v: pointer to atomic_t
+ *
+ * Atomically loads the value of @v with relaxed ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_read() elsewhere.
+ *
+ * Return: The value loaded from @v.
+ */
+static __always_inline int
+raw_atomic_read(const atomic_t *v)
+{
+ return arch_atomic_read(v);
+}
+
+/**
+ * raw_atomic_read_acquire() - atomic load with acquire ordering
+ * @v: pointer to atomic_t
+ *
+ * Atomically loads the value of @v with acquire ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_read_acquire() elsewhere.
+ *
+ * Return: The value loaded from @v.
+ */
+static __always_inline int
+raw_atomic_read_acquire(const atomic_t *v)
+{
+#if defined(arch_atomic_read_acquire)
+ return arch_atomic_read_acquire(v);
+#else
+ int ret;
+
+ if (__native_word(atomic_t)) {
+ ret = smp_load_acquire(&(v)->counter);
+ } else {
+ ret = raw_atomic_read(v);
+ __atomic_acquire_fence();
+ }
+
+ return ret;
+#endif
+}
+
+/**
+ * raw_atomic_set() - atomic set with relaxed ordering
+ * @v: pointer to atomic_t
+ * @i: int value to assign
+ *
+ * Atomically sets @v to @i with relaxed ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_set() elsewhere.
+ *
+ * Return: Nothing.
+ */
+static __always_inline void
+raw_atomic_set(atomic_t *v, int i)
+{
+ arch_atomic_set(v, i);
+}
+
+/**
+ * raw_atomic_set_release() - atomic set with release ordering
+ * @v: pointer to atomic_t
+ * @i: int value to assign
+ *
+ * Atomically sets @v to @i with release ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_set_release() elsewhere.
+ *
+ * Return: Nothing.
+ */
+static __always_inline void
+raw_atomic_set_release(atomic_t *v, int i)
+{
+#if defined(arch_atomic_set_release)
+ arch_atomic_set_release(v, i);
+#else
+ if (__native_word(atomic_t)) {
+ smp_store_release(&(v)->counter, i);
+ } else {
+ __atomic_release_fence();
+ raw_atomic_set(v, i);
+ }
+#endif
+}
+
+/**
+ * raw_atomic_add() - atomic add with relaxed ordering
+ * @i: int value to add
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v + @i) with relaxed ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_add() elsewhere.
+ *
+ * Return: Nothing.
+ */
+static __always_inline void
+raw_atomic_add(int i, atomic_t *v)
+{
+ arch_atomic_add(i, v);
+}
+
+/**
+ * raw_atomic_add_return() - atomic add with full ordering
+ * @i: int value to add
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v + @i) with full ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_add_return() elsewhere.
+ *
+ * Return: The updated value of @v.
+ */
+static __always_inline int
+raw_atomic_add_return(int i, atomic_t *v)
+{
+#if defined(arch_atomic_add_return)
+ return arch_atomic_add_return(i, v);
+#elif defined(arch_atomic_add_return_relaxed)
+ int ret;
+ __atomic_pre_full_fence();
+ ret = arch_atomic_add_return_relaxed(i, v);
+ __atomic_post_full_fence();
+ return ret;
+#else
+#error "Unable to define raw_atomic_add_return"
+#endif
+}
+
+/**
+ * raw_atomic_add_return_acquire() - atomic add with acquire ordering
+ * @i: int value to add
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v + @i) with acquire ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_add_return_acquire() elsewhere.
+ *
+ * Return: The updated value of @v.
+ */
+static __always_inline int
+raw_atomic_add_return_acquire(int i, atomic_t *v)
+{
+#if defined(arch_atomic_add_return_acquire)
+ return arch_atomic_add_return_acquire(i, v);
+#elif defined(arch_atomic_add_return_relaxed)
+ int ret = arch_atomic_add_return_relaxed(i, v);
+ __atomic_acquire_fence();
+ return ret;
+#elif defined(arch_atomic_add_return)
+ return arch_atomic_add_return(i, v);
+#else
+#error "Unable to define raw_atomic_add_return_acquire"
+#endif
+}
+
+/**
+ * raw_atomic_add_return_release() - atomic add with release ordering
+ * @i: int value to add
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v + @i) with release ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_add_return_release() elsewhere.
+ *
+ * Return: The updated value of @v.
+ */
+static __always_inline int
+raw_atomic_add_return_release(int i, atomic_t *v)
+{
+#if defined(arch_atomic_add_return_release)
+ return arch_atomic_add_return_release(i, v);
+#elif defined(arch_atomic_add_return_relaxed)
+ __atomic_release_fence();
+ return arch_atomic_add_return_relaxed(i, v);
+#elif defined(arch_atomic_add_return)
+ return arch_atomic_add_return(i, v);
+#else
+#error "Unable to define raw_atomic_add_return_release"
+#endif
+}
+
+/**
+ * raw_atomic_add_return_relaxed() - atomic add with relaxed ordering
+ * @i: int value to add
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v + @i) with relaxed ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_add_return_relaxed() elsewhere.
+ *
+ * Return: The updated value of @v.
+ */
+static __always_inline int
+raw_atomic_add_return_relaxed(int i, atomic_t *v)
+{
+#if defined(arch_atomic_add_return_relaxed)
+ return arch_atomic_add_return_relaxed(i, v);
+#elif defined(arch_atomic_add_return)
+ return arch_atomic_add_return(i, v);
+#else
+#error "Unable to define raw_atomic_add_return_relaxed"
+#endif
+}
+
+/**
+ * raw_atomic_fetch_add() - atomic add with full ordering
+ * @i: int value to add
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v + @i) with full ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_fetch_add() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline int
+raw_atomic_fetch_add(int i, atomic_t *v)
+{
+#if defined(arch_atomic_fetch_add)
+ return arch_atomic_fetch_add(i, v);
+#elif defined(arch_atomic_fetch_add_relaxed)
+ int ret;
+ __atomic_pre_full_fence();
+ ret = arch_atomic_fetch_add_relaxed(i, v);
+ __atomic_post_full_fence();
+ return ret;
+#else
+#error "Unable to define raw_atomic_fetch_add"
+#endif
+}
+
+/**
+ * raw_atomic_fetch_add_acquire() - atomic add with acquire ordering
+ * @i: int value to add
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v + @i) with acquire ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_fetch_add_acquire() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline int
+raw_atomic_fetch_add_acquire(int i, atomic_t *v)
+{
+#if defined(arch_atomic_fetch_add_acquire)
+ return arch_atomic_fetch_add_acquire(i, v);
+#elif defined(arch_atomic_fetch_add_relaxed)
+ int ret = arch_atomic_fetch_add_relaxed(i, v);
+ __atomic_acquire_fence();
+ return ret;
+#elif defined(arch_atomic_fetch_add)
+ return arch_atomic_fetch_add(i, v);
+#else
+#error "Unable to define raw_atomic_fetch_add_acquire"
+#endif
+}
+
+/**
+ * raw_atomic_fetch_add_release() - atomic add with release ordering
+ * @i: int value to add
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v + @i) with release ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_fetch_add_release() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline int
+raw_atomic_fetch_add_release(int i, atomic_t *v)
+{
+#if defined(arch_atomic_fetch_add_release)
+ return arch_atomic_fetch_add_release(i, v);
+#elif defined(arch_atomic_fetch_add_relaxed)
+ __atomic_release_fence();
+ return arch_atomic_fetch_add_relaxed(i, v);
+#elif defined(arch_atomic_fetch_add)
+ return arch_atomic_fetch_add(i, v);
+#else
+#error "Unable to define raw_atomic_fetch_add_release"
+#endif
+}
+
+/**
+ * raw_atomic_fetch_add_relaxed() - atomic add with relaxed ordering
+ * @i: int value to add
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v + @i) with relaxed ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_fetch_add_relaxed() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline int
+raw_atomic_fetch_add_relaxed(int i, atomic_t *v)
+{
+#if defined(arch_atomic_fetch_add_relaxed)
+ return arch_atomic_fetch_add_relaxed(i, v);
+#elif defined(arch_atomic_fetch_add)
+ return arch_atomic_fetch_add(i, v);
+#else
+#error "Unable to define raw_atomic_fetch_add_relaxed"
+#endif
+}
+
+/**
+ * raw_atomic_sub() - atomic subtract with relaxed ordering
+ * @i: int value to subtract
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v - @i) with relaxed ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_sub() elsewhere.
+ *
+ * Return: Nothing.
+ */
+static __always_inline void
+raw_atomic_sub(int i, atomic_t *v)
+{
+ arch_atomic_sub(i, v);
+}
+
+/**
+ * raw_atomic_sub_return() - atomic subtract with full ordering
+ * @i: int value to subtract
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v - @i) with full ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_sub_return() elsewhere.
+ *
+ * Return: The updated value of @v.
+ */
+static __always_inline int
+raw_atomic_sub_return(int i, atomic_t *v)
+{
+#if defined(arch_atomic_sub_return)
+ return arch_atomic_sub_return(i, v);
+#elif defined(arch_atomic_sub_return_relaxed)
+ int ret;
+ __atomic_pre_full_fence();
+ ret = arch_atomic_sub_return_relaxed(i, v);
+ __atomic_post_full_fence();
+ return ret;
+#else
+#error "Unable to define raw_atomic_sub_return"
+#endif
+}
+
+/**
+ * raw_atomic_sub_return_acquire() - atomic subtract with acquire ordering
+ * @i: int value to subtract
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v - @i) with acquire ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_sub_return_acquire() elsewhere.
+ *
+ * Return: The updated value of @v.
+ */
+static __always_inline int
+raw_atomic_sub_return_acquire(int i, atomic_t *v)
+{
+#if defined(arch_atomic_sub_return_acquire)
+ return arch_atomic_sub_return_acquire(i, v);
+#elif defined(arch_atomic_sub_return_relaxed)
+ int ret = arch_atomic_sub_return_relaxed(i, v);
+ __atomic_acquire_fence();
+ return ret;
+#elif defined(arch_atomic_sub_return)
+ return arch_atomic_sub_return(i, v);
+#else
+#error "Unable to define raw_atomic_sub_return_acquire"
+#endif
+}
+
+/**
+ * raw_atomic_sub_return_release() - atomic subtract with release ordering
+ * @i: int value to subtract
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v - @i) with release ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_sub_return_release() elsewhere.
+ *
+ * Return: The updated value of @v.
+ */
+static __always_inline int
+raw_atomic_sub_return_release(int i, atomic_t *v)
+{
+#if defined(arch_atomic_sub_return_release)
+ return arch_atomic_sub_return_release(i, v);
+#elif defined(arch_atomic_sub_return_relaxed)
+ __atomic_release_fence();
+ return arch_atomic_sub_return_relaxed(i, v);
+#elif defined(arch_atomic_sub_return)
+ return arch_atomic_sub_return(i, v);
+#else
+#error "Unable to define raw_atomic_sub_return_release"
+#endif
+}
+
+/**
+ * raw_atomic_sub_return_relaxed() - atomic subtract with relaxed ordering
+ * @i: int value to subtract
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v - @i) with relaxed ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_sub_return_relaxed() elsewhere.
+ *
+ * Return: The updated value of @v.
+ */
+static __always_inline int
+raw_atomic_sub_return_relaxed(int i, atomic_t *v)
+{
+#if defined(arch_atomic_sub_return_relaxed)
+ return arch_atomic_sub_return_relaxed(i, v);
+#elif defined(arch_atomic_sub_return)
+ return arch_atomic_sub_return(i, v);
+#else
+#error "Unable to define raw_atomic_sub_return_relaxed"
+#endif
+}
+
+/**
+ * raw_atomic_fetch_sub() - atomic subtract with full ordering
+ * @i: int value to subtract
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v - @i) with full ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_fetch_sub() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline int
+raw_atomic_fetch_sub(int i, atomic_t *v)
+{
+#if defined(arch_atomic_fetch_sub)
+ return arch_atomic_fetch_sub(i, v);
+#elif defined(arch_atomic_fetch_sub_relaxed)
+ int ret;
+ __atomic_pre_full_fence();
+ ret = arch_atomic_fetch_sub_relaxed(i, v);
+ __atomic_post_full_fence();
+ return ret;
+#else
+#error "Unable to define raw_atomic_fetch_sub"
+#endif
+}
+
+/**
+ * raw_atomic_fetch_sub_acquire() - atomic subtract with acquire ordering
+ * @i: int value to subtract
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v - @i) with acquire ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_fetch_sub_acquire() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline int
+raw_atomic_fetch_sub_acquire(int i, atomic_t *v)
+{
+#if defined(arch_atomic_fetch_sub_acquire)
+ return arch_atomic_fetch_sub_acquire(i, v);
+#elif defined(arch_atomic_fetch_sub_relaxed)
+ int ret = arch_atomic_fetch_sub_relaxed(i, v);
+ __atomic_acquire_fence();
+ return ret;
+#elif defined(arch_atomic_fetch_sub)
+ return arch_atomic_fetch_sub(i, v);
+#else
+#error "Unable to define raw_atomic_fetch_sub_acquire"
+#endif
+}
+
+/**
+ * raw_atomic_fetch_sub_release() - atomic subtract with release ordering
+ * @i: int value to subtract
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v - @i) with release ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_fetch_sub_release() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline int
+raw_atomic_fetch_sub_release(int i, atomic_t *v)
+{
+#if defined(arch_atomic_fetch_sub_release)
+ return arch_atomic_fetch_sub_release(i, v);
+#elif defined(arch_atomic_fetch_sub_relaxed)
+ __atomic_release_fence();
+ return arch_atomic_fetch_sub_relaxed(i, v);
+#elif defined(arch_atomic_fetch_sub)
+ return arch_atomic_fetch_sub(i, v);
+#else
+#error "Unable to define raw_atomic_fetch_sub_release"
+#endif
+}
+
+/**
+ * raw_atomic_fetch_sub_relaxed() - atomic subtract with relaxed ordering
+ * @i: int value to subtract
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v - @i) with relaxed ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_fetch_sub_relaxed() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline int
+raw_atomic_fetch_sub_relaxed(int i, atomic_t *v)
+{
+#if defined(arch_atomic_fetch_sub_relaxed)
+ return arch_atomic_fetch_sub_relaxed(i, v);
+#elif defined(arch_atomic_fetch_sub)
+ return arch_atomic_fetch_sub(i, v);
+#else
+#error "Unable to define raw_atomic_fetch_sub_relaxed"
+#endif
+}
+
+/**
+ * raw_atomic_inc() - atomic increment with relaxed ordering
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v + 1) with relaxed ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_inc() elsewhere.
+ *
+ * Return: Nothing.
+ */
+static __always_inline void
+raw_atomic_inc(atomic_t *v)
+{
+#if defined(arch_atomic_inc)
+ arch_atomic_inc(v);
+#else
+ raw_atomic_add(1, v);
+#endif
+}
+
+/**
+ * raw_atomic_inc_return() - atomic increment with full ordering
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v + 1) with full ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_inc_return() elsewhere.
+ *
+ * Return: The updated value of @v.
+ */
+static __always_inline int
+raw_atomic_inc_return(atomic_t *v)
+{
+#if defined(arch_atomic_inc_return)
+ return arch_atomic_inc_return(v);
+#elif defined(arch_atomic_inc_return_relaxed)
+ int ret;
+ __atomic_pre_full_fence();
+ ret = arch_atomic_inc_return_relaxed(v);
+ __atomic_post_full_fence();
+ return ret;
+#else
+ return raw_atomic_add_return(1, v);
+#endif
+}
+
+/**
+ * raw_atomic_inc_return_acquire() - atomic increment with acquire ordering
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v + 1) with acquire ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_inc_return_acquire() elsewhere.
+ *
+ * Return: The updated value of @v.
+ */
+static __always_inline int
+raw_atomic_inc_return_acquire(atomic_t *v)
+{
+#if defined(arch_atomic_inc_return_acquire)
+ return arch_atomic_inc_return_acquire(v);
+#elif defined(arch_atomic_inc_return_relaxed)
+ int ret = arch_atomic_inc_return_relaxed(v);
+ __atomic_acquire_fence();
+ return ret;
+#elif defined(arch_atomic_inc_return)
+ return arch_atomic_inc_return(v);
+#else
+ return raw_atomic_add_return_acquire(1, v);
+#endif
+}
+
+/**
+ * raw_atomic_inc_return_release() - atomic increment with release ordering
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v + 1) with release ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_inc_return_release() elsewhere.
+ *
+ * Return: The updated value of @v.
+ */
+static __always_inline int
+raw_atomic_inc_return_release(atomic_t *v)
+{
+#if defined(arch_atomic_inc_return_release)
+ return arch_atomic_inc_return_release(v);
+#elif defined(arch_atomic_inc_return_relaxed)
+ __atomic_release_fence();
+ return arch_atomic_inc_return_relaxed(v);
+#elif defined(arch_atomic_inc_return)
+ return arch_atomic_inc_return(v);
+#else
+ return raw_atomic_add_return_release(1, v);
+#endif
+}
+
+/**
+ * raw_atomic_inc_return_relaxed() - atomic increment with relaxed ordering
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v + 1) with relaxed ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_inc_return_relaxed() elsewhere.
+ *
+ * Return: The updated value of @v.
+ */
+static __always_inline int
+raw_atomic_inc_return_relaxed(atomic_t *v)
+{
+#if defined(arch_atomic_inc_return_relaxed)
+ return arch_atomic_inc_return_relaxed(v);
+#elif defined(arch_atomic_inc_return)
+ return arch_atomic_inc_return(v);
+#else
+ return raw_atomic_add_return_relaxed(1, v);
+#endif
+}
+
+/**
+ * raw_atomic_fetch_inc() - atomic increment with full ordering
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v + 1) with full ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_fetch_inc() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline int
+raw_atomic_fetch_inc(atomic_t *v)
+{
+#if defined(arch_atomic_fetch_inc)
+ return arch_atomic_fetch_inc(v);
+#elif defined(arch_atomic_fetch_inc_relaxed)
+ int ret;
+ __atomic_pre_full_fence();
+ ret = arch_atomic_fetch_inc_relaxed(v);
+ __atomic_post_full_fence();
+ return ret;
+#else
+ return raw_atomic_fetch_add(1, v);
+#endif
+}
+
+/**
+ * raw_atomic_fetch_inc_acquire() - atomic increment with acquire ordering
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v + 1) with acquire ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_fetch_inc_acquire() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline int
+raw_atomic_fetch_inc_acquire(atomic_t *v)
+{
+#if defined(arch_atomic_fetch_inc_acquire)
+ return arch_atomic_fetch_inc_acquire(v);
+#elif defined(arch_atomic_fetch_inc_relaxed)
+ int ret = arch_atomic_fetch_inc_relaxed(v);
+ __atomic_acquire_fence();
+ return ret;
+#elif defined(arch_atomic_fetch_inc)
+ return arch_atomic_fetch_inc(v);
+#else
+ return raw_atomic_fetch_add_acquire(1, v);
+#endif
+}
+
+/**
+ * raw_atomic_fetch_inc_release() - atomic increment with release ordering
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v + 1) with release ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_fetch_inc_release() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline int
+raw_atomic_fetch_inc_release(atomic_t *v)
+{
+#if defined(arch_atomic_fetch_inc_release)
+ return arch_atomic_fetch_inc_release(v);
+#elif defined(arch_atomic_fetch_inc_relaxed)
+ __atomic_release_fence();
+ return arch_atomic_fetch_inc_relaxed(v);
+#elif defined(arch_atomic_fetch_inc)
+ return arch_atomic_fetch_inc(v);
+#else
+ return raw_atomic_fetch_add_release(1, v);
+#endif
+}
+
+/**
+ * raw_atomic_fetch_inc_relaxed() - atomic increment with relaxed ordering
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v + 1) with relaxed ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_fetch_inc_relaxed() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline int
+raw_atomic_fetch_inc_relaxed(atomic_t *v)
+{
+#if defined(arch_atomic_fetch_inc_relaxed)
+ return arch_atomic_fetch_inc_relaxed(v);
+#elif defined(arch_atomic_fetch_inc)
+ return arch_atomic_fetch_inc(v);
+#else
+ return raw_atomic_fetch_add_relaxed(1, v);
+#endif
+}
+
+/**
+ * raw_atomic_dec() - atomic decrement with relaxed ordering
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v - 1) with relaxed ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_dec() elsewhere.
+ *
+ * Return: Nothing.
+ */
+static __always_inline void
+raw_atomic_dec(atomic_t *v)
+{
+#if defined(arch_atomic_dec)
+ arch_atomic_dec(v);
+#else
+ raw_atomic_sub(1, v);
+#endif
+}
+
+/**
+ * raw_atomic_dec_return() - atomic decrement with full ordering
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v - 1) with full ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_dec_return() elsewhere.
+ *
+ * Return: The updated value of @v.
+ */
+static __always_inline int
+raw_atomic_dec_return(atomic_t *v)
+{
+#if defined(arch_atomic_dec_return)
+ return arch_atomic_dec_return(v);
+#elif defined(arch_atomic_dec_return_relaxed)
+ int ret;
+ __atomic_pre_full_fence();
+ ret = arch_atomic_dec_return_relaxed(v);
+ __atomic_post_full_fence();
+ return ret;
+#else
+ return raw_atomic_sub_return(1, v);
+#endif
+}
+
+/**
+ * raw_atomic_dec_return_acquire() - atomic decrement with acquire ordering
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v - 1) with acquire ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_dec_return_acquire() elsewhere.
+ *
+ * Return: The updated value of @v.
+ */
+static __always_inline int
+raw_atomic_dec_return_acquire(atomic_t *v)
+{
+#if defined(arch_atomic_dec_return_acquire)
+ return arch_atomic_dec_return_acquire(v);
+#elif defined(arch_atomic_dec_return_relaxed)
+ int ret = arch_atomic_dec_return_relaxed(v);
+ __atomic_acquire_fence();
+ return ret;
+#elif defined(arch_atomic_dec_return)
+ return arch_atomic_dec_return(v);
+#else
+ return raw_atomic_sub_return_acquire(1, v);
+#endif
+}
+
+/**
+ * raw_atomic_dec_return_release() - atomic decrement with release ordering
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v - 1) with release ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_dec_return_release() elsewhere.
+ *
+ * Return: The updated value of @v.
+ */
+static __always_inline int
+raw_atomic_dec_return_release(atomic_t *v)
+{
+#if defined(arch_atomic_dec_return_release)
+ return arch_atomic_dec_return_release(v);
+#elif defined(arch_atomic_dec_return_relaxed)
+ __atomic_release_fence();
+ return arch_atomic_dec_return_relaxed(v);
+#elif defined(arch_atomic_dec_return)
+ return arch_atomic_dec_return(v);
+#else
+ return raw_atomic_sub_return_release(1, v);
+#endif
+}
+
+/**
+ * raw_atomic_dec_return_relaxed() - atomic decrement with relaxed ordering
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v - 1) with relaxed ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_dec_return_relaxed() elsewhere.
+ *
+ * Return: The updated value of @v.
+ */
+static __always_inline int
+raw_atomic_dec_return_relaxed(atomic_t *v)
+{
+#if defined(arch_atomic_dec_return_relaxed)
+ return arch_atomic_dec_return_relaxed(v);
+#elif defined(arch_atomic_dec_return)
+ return arch_atomic_dec_return(v);
+#else
+ return raw_atomic_sub_return_relaxed(1, v);
+#endif
+}
+
+/**
+ * raw_atomic_fetch_dec() - atomic decrement with full ordering
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v - 1) with full ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_fetch_dec() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline int
+raw_atomic_fetch_dec(atomic_t *v)
+{
+#if defined(arch_atomic_fetch_dec)
+ return arch_atomic_fetch_dec(v);
+#elif defined(arch_atomic_fetch_dec_relaxed)
+ int ret;
+ __atomic_pre_full_fence();
+ ret = arch_atomic_fetch_dec_relaxed(v);
+ __atomic_post_full_fence();
+ return ret;
+#else
+ return raw_atomic_fetch_sub(1, v);
+#endif
+}
+
+/**
+ * raw_atomic_fetch_dec_acquire() - atomic decrement with acquire ordering
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v - 1) with acquire ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_fetch_dec_acquire() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline int
+raw_atomic_fetch_dec_acquire(atomic_t *v)
+{
+#if defined(arch_atomic_fetch_dec_acquire)
+ return arch_atomic_fetch_dec_acquire(v);
+#elif defined(arch_atomic_fetch_dec_relaxed)
+ int ret = arch_atomic_fetch_dec_relaxed(v);
+ __atomic_acquire_fence();
+ return ret;
+#elif defined(arch_atomic_fetch_dec)
+ return arch_atomic_fetch_dec(v);
+#else
+ return raw_atomic_fetch_sub_acquire(1, v);
+#endif
+}
+
+/**
+ * raw_atomic_fetch_dec_release() - atomic decrement with release ordering
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v - 1) with release ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_fetch_dec_release() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline int
+raw_atomic_fetch_dec_release(atomic_t *v)
+{
+#if defined(arch_atomic_fetch_dec_release)
+ return arch_atomic_fetch_dec_release(v);
+#elif defined(arch_atomic_fetch_dec_relaxed)
+ __atomic_release_fence();
+ return arch_atomic_fetch_dec_relaxed(v);
+#elif defined(arch_atomic_fetch_dec)
+ return arch_atomic_fetch_dec(v);
+#else
+ return raw_atomic_fetch_sub_release(1, v);
+#endif
+}
+
+/**
+ * raw_atomic_fetch_dec_relaxed() - atomic decrement with relaxed ordering
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v - 1) with relaxed ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_fetch_dec_relaxed() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline int
+raw_atomic_fetch_dec_relaxed(atomic_t *v)
+{
+#if defined(arch_atomic_fetch_dec_relaxed)
+ return arch_atomic_fetch_dec_relaxed(v);
+#elif defined(arch_atomic_fetch_dec)
+ return arch_atomic_fetch_dec(v);
+#else
+ return raw_atomic_fetch_sub_relaxed(1, v);
+#endif
+}
+
+/**
+ * raw_atomic_and() - atomic bitwise AND with relaxed ordering
+ * @i: int value
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v & @i) with relaxed ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_and() elsewhere.
+ *
+ * Return: Nothing.
+ */
+static __always_inline void
+raw_atomic_and(int i, atomic_t *v)
+{
+ arch_atomic_and(i, v);
+}
+
+/**
+ * raw_atomic_fetch_and() - atomic bitwise AND with full ordering
+ * @i: int value
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v & @i) with full ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_fetch_and() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline int
+raw_atomic_fetch_and(int i, atomic_t *v)
+{
+#if defined(arch_atomic_fetch_and)
+ return arch_atomic_fetch_and(i, v);
+#elif defined(arch_atomic_fetch_and_relaxed)
+ int ret;
+ __atomic_pre_full_fence();
+ ret = arch_atomic_fetch_and_relaxed(i, v);
+ __atomic_post_full_fence();
+ return ret;
+#else
+#error "Unable to define raw_atomic_fetch_and"
+#endif
+}
+
+/**
+ * raw_atomic_fetch_and_acquire() - atomic bitwise AND with acquire ordering
+ * @i: int value
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v & @i) with acquire ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_fetch_and_acquire() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline int
+raw_atomic_fetch_and_acquire(int i, atomic_t *v)
+{
+#if defined(arch_atomic_fetch_and_acquire)
+ return arch_atomic_fetch_and_acquire(i, v);
+#elif defined(arch_atomic_fetch_and_relaxed)
+ int ret = arch_atomic_fetch_and_relaxed(i, v);
+ __atomic_acquire_fence();
+ return ret;
+#elif defined(arch_atomic_fetch_and)
+ return arch_atomic_fetch_and(i, v);
+#else
+#error "Unable to define raw_atomic_fetch_and_acquire"
+#endif
+}
+
+/**
+ * raw_atomic_fetch_and_release() - atomic bitwise AND with release ordering
+ * @i: int value
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v & @i) with release ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_fetch_and_release() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline int
+raw_atomic_fetch_and_release(int i, atomic_t *v)
+{
+#if defined(arch_atomic_fetch_and_release)
+ return arch_atomic_fetch_and_release(i, v);
+#elif defined(arch_atomic_fetch_and_relaxed)
+ __atomic_release_fence();
+ return arch_atomic_fetch_and_relaxed(i, v);
+#elif defined(arch_atomic_fetch_and)
+ return arch_atomic_fetch_and(i, v);
+#else
+#error "Unable to define raw_atomic_fetch_and_release"
+#endif
+}
+
+/**
+ * raw_atomic_fetch_and_relaxed() - atomic bitwise AND with relaxed ordering
+ * @i: int value
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v & @i) with relaxed ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_fetch_and_relaxed() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline int
+raw_atomic_fetch_and_relaxed(int i, atomic_t *v)
+{
+#if defined(arch_atomic_fetch_and_relaxed)
+ return arch_atomic_fetch_and_relaxed(i, v);
+#elif defined(arch_atomic_fetch_and)
+ return arch_atomic_fetch_and(i, v);
+#else
+#error "Unable to define raw_atomic_fetch_and_relaxed"
+#endif
+}
+
+/**
+ * raw_atomic_andnot() - atomic bitwise AND NOT with relaxed ordering
+ * @i: int value
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v & ~@i) with relaxed ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_andnot() elsewhere.
+ *
+ * Return: Nothing.
+ */
+static __always_inline void
+raw_atomic_andnot(int i, atomic_t *v)
+{
+#if defined(arch_atomic_andnot)
+ arch_atomic_andnot(i, v);
+#else
+ raw_atomic_and(~i, v);
+#endif
+}
+
+/**
+ * raw_atomic_fetch_andnot() - atomic bitwise AND NOT with full ordering
+ * @i: int value
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v & ~@i) with full ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_fetch_andnot() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline int
+raw_atomic_fetch_andnot(int i, atomic_t *v)
+{
+#if defined(arch_atomic_fetch_andnot)
+ return arch_atomic_fetch_andnot(i, v);
+#elif defined(arch_atomic_fetch_andnot_relaxed)
+ int ret;
+ __atomic_pre_full_fence();
+ ret = arch_atomic_fetch_andnot_relaxed(i, v);
+ __atomic_post_full_fence();
+ return ret;
+#else
+ return raw_atomic_fetch_and(~i, v);
+#endif
+}
+
+/**
+ * raw_atomic_fetch_andnot_acquire() - atomic bitwise AND NOT with acquire ordering
+ * @i: int value
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v & ~@i) with acquire ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_fetch_andnot_acquire() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline int
+raw_atomic_fetch_andnot_acquire(int i, atomic_t *v)
+{
+#if defined(arch_atomic_fetch_andnot_acquire)
+ return arch_atomic_fetch_andnot_acquire(i, v);
+#elif defined(arch_atomic_fetch_andnot_relaxed)
+ int ret = arch_atomic_fetch_andnot_relaxed(i, v);
+ __atomic_acquire_fence();
+ return ret;
+#elif defined(arch_atomic_fetch_andnot)
+ return arch_atomic_fetch_andnot(i, v);
+#else
+ return raw_atomic_fetch_and_acquire(~i, v);
+#endif
+}
+
+/**
+ * raw_atomic_fetch_andnot_release() - atomic bitwise AND NOT with release ordering
+ * @i: int value
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v & ~@i) with release ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_fetch_andnot_release() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline int
+raw_atomic_fetch_andnot_release(int i, atomic_t *v)
+{
+#if defined(arch_atomic_fetch_andnot_release)
+ return arch_atomic_fetch_andnot_release(i, v);
+#elif defined(arch_atomic_fetch_andnot_relaxed)
+ __atomic_release_fence();
+ return arch_atomic_fetch_andnot_relaxed(i, v);
+#elif defined(arch_atomic_fetch_andnot)
+ return arch_atomic_fetch_andnot(i, v);
+#else
+ return raw_atomic_fetch_and_release(~i, v);
+#endif
+}
+
+/**
+ * raw_atomic_fetch_andnot_relaxed() - atomic bitwise AND NOT with relaxed ordering
+ * @i: int value
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v & ~@i) with relaxed ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_fetch_andnot_relaxed() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline int
+raw_atomic_fetch_andnot_relaxed(int i, atomic_t *v)
+{
+#if defined(arch_atomic_fetch_andnot_relaxed)
+ return arch_atomic_fetch_andnot_relaxed(i, v);
+#elif defined(arch_atomic_fetch_andnot)
+ return arch_atomic_fetch_andnot(i, v);
+#else
+ return raw_atomic_fetch_and_relaxed(~i, v);
+#endif
+}
+
+/**
+ * raw_atomic_or() - atomic bitwise OR with relaxed ordering
+ * @i: int value
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v | @i) with relaxed ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_or() elsewhere.
+ *
+ * Return: Nothing.
+ */
+static __always_inline void
+raw_atomic_or(int i, atomic_t *v)
+{
+ arch_atomic_or(i, v);
+}
+
+/**
+ * raw_atomic_fetch_or() - atomic bitwise OR with full ordering
+ * @i: int value
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v | @i) with full ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_fetch_or() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline int
+raw_atomic_fetch_or(int i, atomic_t *v)
+{
+#if defined(arch_atomic_fetch_or)
+ return arch_atomic_fetch_or(i, v);
+#elif defined(arch_atomic_fetch_or_relaxed)
+ int ret;
+ __atomic_pre_full_fence();
+ ret = arch_atomic_fetch_or_relaxed(i, v);
+ __atomic_post_full_fence();
+ return ret;
+#else
+#error "Unable to define raw_atomic_fetch_or"
+#endif
+}
+
+/**
+ * raw_atomic_fetch_or_acquire() - atomic bitwise OR with acquire ordering
+ * @i: int value
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v | @i) with acquire ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_fetch_or_acquire() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline int
+raw_atomic_fetch_or_acquire(int i, atomic_t *v)
+{
+#if defined(arch_atomic_fetch_or_acquire)
+ return arch_atomic_fetch_or_acquire(i, v);
+#elif defined(arch_atomic_fetch_or_relaxed)
+ int ret = arch_atomic_fetch_or_relaxed(i, v);
+ __atomic_acquire_fence();
+ return ret;
+#elif defined(arch_atomic_fetch_or)
+ return arch_atomic_fetch_or(i, v);
+#else
+#error "Unable to define raw_atomic_fetch_or_acquire"
+#endif
+}
+
+/**
+ * raw_atomic_fetch_or_release() - atomic bitwise OR with release ordering
+ * @i: int value
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v | @i) with release ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_fetch_or_release() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline int
+raw_atomic_fetch_or_release(int i, atomic_t *v)
+{
+#if defined(arch_atomic_fetch_or_release)
+ return arch_atomic_fetch_or_release(i, v);
+#elif defined(arch_atomic_fetch_or_relaxed)
+ __atomic_release_fence();
+ return arch_atomic_fetch_or_relaxed(i, v);
+#elif defined(arch_atomic_fetch_or)
+ return arch_atomic_fetch_or(i, v);
+#else
+#error "Unable to define raw_atomic_fetch_or_release"
+#endif
+}
+
+/**
+ * raw_atomic_fetch_or_relaxed() - atomic bitwise OR with relaxed ordering
+ * @i: int value
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v | @i) with relaxed ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_fetch_or_relaxed() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline int
+raw_atomic_fetch_or_relaxed(int i, atomic_t *v)
+{
+#if defined(arch_atomic_fetch_or_relaxed)
+ return arch_atomic_fetch_or_relaxed(i, v);
+#elif defined(arch_atomic_fetch_or)
+ return arch_atomic_fetch_or(i, v);
+#else
+#error "Unable to define raw_atomic_fetch_or_relaxed"
+#endif
+}
+
+/**
+ * raw_atomic_xor() - atomic bitwise XOR with relaxed ordering
+ * @i: int value
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v ^ @i) with relaxed ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_xor() elsewhere.
+ *
+ * Return: Nothing.
+ */
+static __always_inline void
+raw_atomic_xor(int i, atomic_t *v)
+{
+ arch_atomic_xor(i, v);
+}
+
+/**
+ * raw_atomic_fetch_xor() - atomic bitwise XOR with full ordering
+ * @i: int value
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v ^ @i) with full ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_fetch_xor() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline int
+raw_atomic_fetch_xor(int i, atomic_t *v)
+{
+#if defined(arch_atomic_fetch_xor)
+ return arch_atomic_fetch_xor(i, v);
+#elif defined(arch_atomic_fetch_xor_relaxed)
+ int ret;
+ __atomic_pre_full_fence();
+ ret = arch_atomic_fetch_xor_relaxed(i, v);
+ __atomic_post_full_fence();
+ return ret;
+#else
+#error "Unable to define raw_atomic_fetch_xor"
+#endif
+}
+
+/**
+ * raw_atomic_fetch_xor_acquire() - atomic bitwise XOR with acquire ordering
+ * @i: int value
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v ^ @i) with acquire ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_fetch_xor_acquire() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline int
+raw_atomic_fetch_xor_acquire(int i, atomic_t *v)
+{
+#if defined(arch_atomic_fetch_xor_acquire)
+ return arch_atomic_fetch_xor_acquire(i, v);
+#elif defined(arch_atomic_fetch_xor_relaxed)
+ int ret = arch_atomic_fetch_xor_relaxed(i, v);
+ __atomic_acquire_fence();
+ return ret;
+#elif defined(arch_atomic_fetch_xor)
+ return arch_atomic_fetch_xor(i, v);
+#else
+#error "Unable to define raw_atomic_fetch_xor_acquire"
+#endif
+}
+
+/**
+ * raw_atomic_fetch_xor_release() - atomic bitwise XOR with release ordering
+ * @i: int value
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v ^ @i) with release ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_fetch_xor_release() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline int
+raw_atomic_fetch_xor_release(int i, atomic_t *v)
+{
+#if defined(arch_atomic_fetch_xor_release)
+ return arch_atomic_fetch_xor_release(i, v);
+#elif defined(arch_atomic_fetch_xor_relaxed)
+ __atomic_release_fence();
+ return arch_atomic_fetch_xor_relaxed(i, v);
+#elif defined(arch_atomic_fetch_xor)
+ return arch_atomic_fetch_xor(i, v);
+#else
+#error "Unable to define raw_atomic_fetch_xor_release"
+#endif
+}
+
+/**
+ * raw_atomic_fetch_xor_relaxed() - atomic bitwise XOR with relaxed ordering
+ * @i: int value
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v ^ @i) with relaxed ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_fetch_xor_relaxed() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline int
+raw_atomic_fetch_xor_relaxed(int i, atomic_t *v)
+{
+#if defined(arch_atomic_fetch_xor_relaxed)
+ return arch_atomic_fetch_xor_relaxed(i, v);
+#elif defined(arch_atomic_fetch_xor)
+ return arch_atomic_fetch_xor(i, v);
+#else
+#error "Unable to define raw_atomic_fetch_xor_relaxed"
+#endif
+}
+
+/**
+ * raw_atomic_xchg() - atomic exchange with full ordering
+ * @v: pointer to atomic_t
+ * @new: int value to assign
+ *
+ * Atomically updates @v to @new with full ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_xchg() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline int
+raw_atomic_xchg(atomic_t *v, int new)
+{
+#if defined(arch_atomic_xchg)
+ return arch_atomic_xchg(v, new);
+#elif defined(arch_atomic_xchg_relaxed)
+ int ret;
+ __atomic_pre_full_fence();
+ ret = arch_atomic_xchg_relaxed(v, new);
+ __atomic_post_full_fence();
+ return ret;
+#else
+ return raw_xchg(&v->counter, new);
+#endif
+}
+
+/**
+ * raw_atomic_xchg_acquire() - atomic exchange with acquire ordering
+ * @v: pointer to atomic_t
+ * @new: int value to assign
+ *
+ * Atomically updates @v to @new with acquire ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_xchg_acquire() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline int
+raw_atomic_xchg_acquire(atomic_t *v, int new)
+{
+#if defined(arch_atomic_xchg_acquire)
+ return arch_atomic_xchg_acquire(v, new);
+#elif defined(arch_atomic_xchg_relaxed)
+ int ret = arch_atomic_xchg_relaxed(v, new);
+ __atomic_acquire_fence();
+ return ret;
+#elif defined(arch_atomic_xchg)
+ return arch_atomic_xchg(v, new);
+#else
+ return raw_xchg_acquire(&v->counter, new);
+#endif
+}
+
+/**
+ * raw_atomic_xchg_release() - atomic exchange with release ordering
+ * @v: pointer to atomic_t
+ * @new: int value to assign
+ *
+ * Atomically updates @v to @new with release ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_xchg_release() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline int
+raw_atomic_xchg_release(atomic_t *v, int new)
+{
+#if defined(arch_atomic_xchg_release)
+ return arch_atomic_xchg_release(v, new);
+#elif defined(arch_atomic_xchg_relaxed)
+ __atomic_release_fence();
+ return arch_atomic_xchg_relaxed(v, new);
+#elif defined(arch_atomic_xchg)
+ return arch_atomic_xchg(v, new);
+#else
+ return raw_xchg_release(&v->counter, new);
+#endif
+}
+
+/**
+ * raw_atomic_xchg_relaxed() - atomic exchange with relaxed ordering
+ * @v: pointer to atomic_t
+ * @new: int value to assign
+ *
+ * Atomically updates @v to @new with relaxed ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_xchg_relaxed() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline int
+raw_atomic_xchg_relaxed(atomic_t *v, int new)
+{
+#if defined(arch_atomic_xchg_relaxed)
+ return arch_atomic_xchg_relaxed(v, new);
+#elif defined(arch_atomic_xchg)
+ return arch_atomic_xchg(v, new);
+#else
+ return raw_xchg_relaxed(&v->counter, new);
+#endif
+}
+
+/**
+ * raw_atomic_cmpxchg() - atomic compare and exchange with full ordering
+ * @v: pointer to atomic_t
+ * @old: int value to compare with
+ * @new: int value to assign
+ *
+ * If (@v == @old), atomically updates @v to @new with full ordering.
+ * Otherwise, @v is not modified and relaxed ordering is provided.
+ *
+ * Safe to use in noinstr code; prefer atomic_cmpxchg() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline int
+raw_atomic_cmpxchg(atomic_t *v, int old, int new)
+{
+#if defined(arch_atomic_cmpxchg)
+ return arch_atomic_cmpxchg(v, old, new);
+#elif defined(arch_atomic_cmpxchg_relaxed)
+ int ret;
+ __atomic_pre_full_fence();
+ ret = arch_atomic_cmpxchg_relaxed(v, old, new);
+ __atomic_post_full_fence();
+ return ret;
+#else
+ return raw_cmpxchg(&v->counter, old, new);
+#endif
+}
+
+/**
+ * raw_atomic_cmpxchg_acquire() - atomic compare and exchange with acquire ordering
+ * @v: pointer to atomic_t
+ * @old: int value to compare with
+ * @new: int value to assign
+ *
+ * If (@v == @old), atomically updates @v to @new with acquire ordering.
+ * Otherwise, @v is not modified and relaxed ordering is provided.
+ *
+ * Safe to use in noinstr code; prefer atomic_cmpxchg_acquire() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline int
+raw_atomic_cmpxchg_acquire(atomic_t *v, int old, int new)
+{
+#if defined(arch_atomic_cmpxchg_acquire)
+ return arch_atomic_cmpxchg_acquire(v, old, new);
+#elif defined(arch_atomic_cmpxchg_relaxed)
+ int ret = arch_atomic_cmpxchg_relaxed(v, old, new);
+ __atomic_acquire_fence();
+ return ret;
+#elif defined(arch_atomic_cmpxchg)
+ return arch_atomic_cmpxchg(v, old, new);
+#else
+ return raw_cmpxchg_acquire(&v->counter, old, new);
+#endif
+}
+
+/**
+ * raw_atomic_cmpxchg_release() - atomic compare and exchange with release ordering
+ * @v: pointer to atomic_t
+ * @old: int value to compare with
+ * @new: int value to assign
+ *
+ * If (@v == @old), atomically updates @v to @new with release ordering.
+ * Otherwise, @v is not modified and relaxed ordering is provided.
+ *
+ * Safe to use in noinstr code; prefer atomic_cmpxchg_release() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline int
+raw_atomic_cmpxchg_release(atomic_t *v, int old, int new)
+{
+#if defined(arch_atomic_cmpxchg_release)
+ return arch_atomic_cmpxchg_release(v, old, new);
+#elif defined(arch_atomic_cmpxchg_relaxed)
+ __atomic_release_fence();
+ return arch_atomic_cmpxchg_relaxed(v, old, new);
+#elif defined(arch_atomic_cmpxchg)
+ return arch_atomic_cmpxchg(v, old, new);
+#else
+ return raw_cmpxchg_release(&v->counter, old, new);
+#endif
+}
+
+/**
+ * raw_atomic_cmpxchg_relaxed() - atomic compare and exchange with relaxed ordering
+ * @v: pointer to atomic_t
+ * @old: int value to compare with
+ * @new: int value to assign
+ *
+ * If (@v == @old), atomically updates @v to @new with relaxed ordering.
+ * Otherwise, @v is not modified and relaxed ordering is provided.
+ *
+ * Safe to use in noinstr code; prefer atomic_cmpxchg_relaxed() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline int
+raw_atomic_cmpxchg_relaxed(atomic_t *v, int old, int new)
+{
+#if defined(arch_atomic_cmpxchg_relaxed)
+ return arch_atomic_cmpxchg_relaxed(v, old, new);
+#elif defined(arch_atomic_cmpxchg)
+ return arch_atomic_cmpxchg(v, old, new);
+#else
+ return raw_cmpxchg_relaxed(&v->counter, old, new);
+#endif
+}
+
+/**
+ * raw_atomic_try_cmpxchg() - atomic compare and exchange with full ordering
+ * @v: pointer to atomic_t
+ * @old: pointer to int value to compare with
+ * @new: int value to assign
+ *
+ * If (@v == @old), atomically updates @v to @new with full ordering.
+ * Otherwise, @v is not modified, @old is updated to the current value of @v,
+ * and relaxed ordering is provided.
+ *
+ * Safe to use in noinstr code; prefer atomic_try_cmpxchg() elsewhere.
+ *
+ * Return: @true if the exchange occured, @false otherwise.
+ */
+static __always_inline bool
+raw_atomic_try_cmpxchg(atomic_t *v, int *old, int new)
+{
+#if defined(arch_atomic_try_cmpxchg)
+ return arch_atomic_try_cmpxchg(v, old, new);
+#elif defined(arch_atomic_try_cmpxchg_relaxed)
+ bool ret;
+ __atomic_pre_full_fence();
+ ret = arch_atomic_try_cmpxchg_relaxed(v, old, new);
+ __atomic_post_full_fence();
+ return ret;
+#else
+ int r, o = *old;
+ r = raw_atomic_cmpxchg(v, o, new);
+ if (unlikely(r != o))
+ *old = r;
+ return likely(r == o);
+#endif
+}
+
+/**
+ * raw_atomic_try_cmpxchg_acquire() - atomic compare and exchange with acquire ordering
+ * @v: pointer to atomic_t
+ * @old: pointer to int value to compare with
+ * @new: int value to assign
+ *
+ * If (@v == @old), atomically updates @v to @new with acquire ordering.
+ * Otherwise, @v is not modified, @old is updated to the current value of @v,
+ * and relaxed ordering is provided.
+ *
+ * Safe to use in noinstr code; prefer atomic_try_cmpxchg_acquire() elsewhere.
+ *
+ * Return: @true if the exchange occured, @false otherwise.
+ */
+static __always_inline bool
+raw_atomic_try_cmpxchg_acquire(atomic_t *v, int *old, int new)
+{
+#if defined(arch_atomic_try_cmpxchg_acquire)
+ return arch_atomic_try_cmpxchg_acquire(v, old, new);
+#elif defined(arch_atomic_try_cmpxchg_relaxed)
+ bool ret = arch_atomic_try_cmpxchg_relaxed(v, old, new);
+ __atomic_acquire_fence();
+ return ret;
+#elif defined(arch_atomic_try_cmpxchg)
+ return arch_atomic_try_cmpxchg(v, old, new);
+#else
+ int r, o = *old;
+ r = raw_atomic_cmpxchg_acquire(v, o, new);
+ if (unlikely(r != o))
+ *old = r;
+ return likely(r == o);
+#endif
+}
+
+/**
+ * raw_atomic_try_cmpxchg_release() - atomic compare and exchange with release ordering
+ * @v: pointer to atomic_t
+ * @old: pointer to int value to compare with
+ * @new: int value to assign
+ *
+ * If (@v == @old), atomically updates @v to @new with release ordering.
+ * Otherwise, @v is not modified, @old is updated to the current value of @v,
+ * and relaxed ordering is provided.
+ *
+ * Safe to use in noinstr code; prefer atomic_try_cmpxchg_release() elsewhere.
+ *
+ * Return: @true if the exchange occured, @false otherwise.
+ */
+static __always_inline bool
+raw_atomic_try_cmpxchg_release(atomic_t *v, int *old, int new)
+{
+#if defined(arch_atomic_try_cmpxchg_release)
+ return arch_atomic_try_cmpxchg_release(v, old, new);
+#elif defined(arch_atomic_try_cmpxchg_relaxed)
+ __atomic_release_fence();
+ return arch_atomic_try_cmpxchg_relaxed(v, old, new);
+#elif defined(arch_atomic_try_cmpxchg)
+ return arch_atomic_try_cmpxchg(v, old, new);
+#else
+ int r, o = *old;
+ r = raw_atomic_cmpxchg_release(v, o, new);
+ if (unlikely(r != o))
+ *old = r;
+ return likely(r == o);
+#endif
+}
+
+/**
+ * raw_atomic_try_cmpxchg_relaxed() - atomic compare and exchange with relaxed ordering
+ * @v: pointer to atomic_t
+ * @old: pointer to int value to compare with
+ * @new: int value to assign
+ *
+ * If (@v == @old), atomically updates @v to @new with relaxed ordering.
+ * Otherwise, @v is not modified, @old is updated to the current value of @v,
+ * and relaxed ordering is provided.
+ *
+ * Safe to use in noinstr code; prefer atomic_try_cmpxchg_relaxed() elsewhere.
+ *
+ * Return: @true if the exchange occured, @false otherwise.
+ */
+static __always_inline bool
+raw_atomic_try_cmpxchg_relaxed(atomic_t *v, int *old, int new)
+{
+#if defined(arch_atomic_try_cmpxchg_relaxed)
+ return arch_atomic_try_cmpxchg_relaxed(v, old, new);
+#elif defined(arch_atomic_try_cmpxchg)
+ return arch_atomic_try_cmpxchg(v, old, new);
+#else
+ int r, o = *old;
+ r = raw_atomic_cmpxchg_relaxed(v, o, new);
+ if (unlikely(r != o))
+ *old = r;
+ return likely(r == o);
+#endif
+}
+
+/**
+ * raw_atomic_sub_and_test() - atomic subtract and test if zero with full ordering
+ * @i: int value to subtract
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v - @i) with full ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_sub_and_test() elsewhere.
+ *
+ * Return: @true if the resulting value of @v is zero, @false otherwise.
+ */
+static __always_inline bool
+raw_atomic_sub_and_test(int i, atomic_t *v)
+{
+#if defined(arch_atomic_sub_and_test)
+ return arch_atomic_sub_and_test(i, v);
+#else
+ return raw_atomic_sub_return(i, v) == 0;
+#endif
+}
+
+/**
+ * raw_atomic_dec_and_test() - atomic decrement and test if zero with full ordering
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v - 1) with full ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_dec_and_test() elsewhere.
+ *
+ * Return: @true if the resulting value of @v is zero, @false otherwise.
+ */
+static __always_inline bool
+raw_atomic_dec_and_test(atomic_t *v)
+{
+#if defined(arch_atomic_dec_and_test)
+ return arch_atomic_dec_and_test(v);
+#else
+ return raw_atomic_dec_return(v) == 0;
+#endif
+}
+
+/**
+ * raw_atomic_inc_and_test() - atomic increment and test if zero with full ordering
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v + 1) with full ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_inc_and_test() elsewhere.
+ *
+ * Return: @true if the resulting value of @v is zero, @false otherwise.
+ */
+static __always_inline bool
+raw_atomic_inc_and_test(atomic_t *v)
+{
+#if defined(arch_atomic_inc_and_test)
+ return arch_atomic_inc_and_test(v);
+#else
+ return raw_atomic_inc_return(v) == 0;
+#endif
+}
+
+/**
+ * raw_atomic_add_negative() - atomic add and test if negative with full ordering
+ * @i: int value to add
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v + @i) with full ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_add_negative() elsewhere.
+ *
+ * Return: @true if the resulting value of @v is negative, @false otherwise.
+ */
+static __always_inline bool
+raw_atomic_add_negative(int i, atomic_t *v)
+{
+#if defined(arch_atomic_add_negative)
+ return arch_atomic_add_negative(i, v);
+#elif defined(arch_atomic_add_negative_relaxed)
+ bool ret;
+ __atomic_pre_full_fence();
+ ret = arch_atomic_add_negative_relaxed(i, v);
+ __atomic_post_full_fence();
+ return ret;
+#else
+ return raw_atomic_add_return(i, v) < 0;
+#endif
+}
+
+/**
+ * raw_atomic_add_negative_acquire() - atomic add and test if negative with acquire ordering
+ * @i: int value to add
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v + @i) with acquire ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_add_negative_acquire() elsewhere.
+ *
+ * Return: @true if the resulting value of @v is negative, @false otherwise.
+ */
+static __always_inline bool
+raw_atomic_add_negative_acquire(int i, atomic_t *v)
+{
+#if defined(arch_atomic_add_negative_acquire)
+ return arch_atomic_add_negative_acquire(i, v);
+#elif defined(arch_atomic_add_negative_relaxed)
+ bool ret = arch_atomic_add_negative_relaxed(i, v);
+ __atomic_acquire_fence();
+ return ret;
+#elif defined(arch_atomic_add_negative)
+ return arch_atomic_add_negative(i, v);
+#else
+ return raw_atomic_add_return_acquire(i, v) < 0;
+#endif
+}
+
+/**
+ * raw_atomic_add_negative_release() - atomic add and test if negative with release ordering
+ * @i: int value to add
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v + @i) with release ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_add_negative_release() elsewhere.
+ *
+ * Return: @true if the resulting value of @v is negative, @false otherwise.
+ */
+static __always_inline bool
+raw_atomic_add_negative_release(int i, atomic_t *v)
+{
+#if defined(arch_atomic_add_negative_release)
+ return arch_atomic_add_negative_release(i, v);
+#elif defined(arch_atomic_add_negative_relaxed)
+ __atomic_release_fence();
+ return arch_atomic_add_negative_relaxed(i, v);
+#elif defined(arch_atomic_add_negative)
+ return arch_atomic_add_negative(i, v);
+#else
+ return raw_atomic_add_return_release(i, v) < 0;
+#endif
+}
+
+/**
+ * raw_atomic_add_negative_relaxed() - atomic add and test if negative with relaxed ordering
+ * @i: int value to add
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v + @i) with relaxed ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_add_negative_relaxed() elsewhere.
+ *
+ * Return: @true if the resulting value of @v is negative, @false otherwise.
+ */
+static __always_inline bool
+raw_atomic_add_negative_relaxed(int i, atomic_t *v)
+{
+#if defined(arch_atomic_add_negative_relaxed)
+ return arch_atomic_add_negative_relaxed(i, v);
+#elif defined(arch_atomic_add_negative)
+ return arch_atomic_add_negative(i, v);
+#else
+ return raw_atomic_add_return_relaxed(i, v) < 0;
+#endif
+}
+
+/**
+ * raw_atomic_fetch_add_unless() - atomic add unless value with full ordering
+ * @v: pointer to atomic_t
+ * @a: int value to add
+ * @u: int value to compare with
+ *
+ * If (@v != @u), atomically updates @v to (@v + @a) with full ordering.
+ * Otherwise, @v is not modified and relaxed ordering is provided.
+ *
+ * Safe to use in noinstr code; prefer atomic_fetch_add_unless() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline int
+raw_atomic_fetch_add_unless(atomic_t *v, int a, int u)
+{
+#if defined(arch_atomic_fetch_add_unless)
+ return arch_atomic_fetch_add_unless(v, a, u);
+#else
+ int c = raw_atomic_read(v);
+
+ do {
+ if (unlikely(c == u))
+ break;
+ } while (!raw_atomic_try_cmpxchg(v, &c, c + a));
+
+ return c;
+#endif
+}
+
+/**
+ * raw_atomic_add_unless() - atomic add unless value with full ordering
+ * @v: pointer to atomic_t
+ * @a: int value to add
+ * @u: int value to compare with
+ *
+ * If (@v != @u), atomically updates @v to (@v + @a) with full ordering.
+ * Otherwise, @v is not modified and relaxed ordering is provided.
+ *
+ * Safe to use in noinstr code; prefer atomic_add_unless() elsewhere.
+ *
+ * Return: @true if @v was updated, @false otherwise.
+ */
+static __always_inline bool
+raw_atomic_add_unless(atomic_t *v, int a, int u)
+{
+#if defined(arch_atomic_add_unless)
+ return arch_atomic_add_unless(v, a, u);
+#else
+ return raw_atomic_fetch_add_unless(v, a, u) != u;
+#endif
+}
+
+/**
+ * raw_atomic_inc_not_zero() - atomic increment unless zero with full ordering
+ * @v: pointer to atomic_t
+ *
+ * If (@v != 0), atomically updates @v to (@v + 1) with full ordering.
+ * Otherwise, @v is not modified and relaxed ordering is provided.
+ *
+ * Safe to use in noinstr code; prefer atomic_inc_not_zero() elsewhere.
+ *
+ * Return: @true if @v was updated, @false otherwise.
+ */
+static __always_inline bool
+raw_atomic_inc_not_zero(atomic_t *v)
+{
+#if defined(arch_atomic_inc_not_zero)
+ return arch_atomic_inc_not_zero(v);
+#else
+ return raw_atomic_add_unless(v, 1, 0);
+#endif
+}
+
+/**
+ * raw_atomic_inc_unless_negative() - atomic increment unless negative with full ordering
+ * @v: pointer to atomic_t
+ *
+ * If (@v >= 0), atomically updates @v to (@v + 1) with full ordering.
+ * Otherwise, @v is not modified and relaxed ordering is provided.
+ *
+ * Safe to use in noinstr code; prefer atomic_inc_unless_negative() elsewhere.
+ *
+ * Return: @true if @v was updated, @false otherwise.
+ */
+static __always_inline bool
+raw_atomic_inc_unless_negative(atomic_t *v)
+{
+#if defined(arch_atomic_inc_unless_negative)
+ return arch_atomic_inc_unless_negative(v);
+#else
+ int c = raw_atomic_read(v);
+
+ do {
+ if (unlikely(c < 0))
+ return false;
+ } while (!raw_atomic_try_cmpxchg(v, &c, c + 1));
+
+ return true;
+#endif
+}
+
+/**
+ * raw_atomic_dec_unless_positive() - atomic decrement unless positive with full ordering
+ * @v: pointer to atomic_t
+ *
+ * If (@v <= 0), atomically updates @v to (@v - 1) with full ordering.
+ * Otherwise, @v is not modified and relaxed ordering is provided.
+ *
+ * Safe to use in noinstr code; prefer atomic_dec_unless_positive() elsewhere.
+ *
+ * Return: @true if @v was updated, @false otherwise.
+ */
+static __always_inline bool
+raw_atomic_dec_unless_positive(atomic_t *v)
+{
+#if defined(arch_atomic_dec_unless_positive)
+ return arch_atomic_dec_unless_positive(v);
+#else
+ int c = raw_atomic_read(v);
+
+ do {
+ if (unlikely(c > 0))
+ return false;
+ } while (!raw_atomic_try_cmpxchg(v, &c, c - 1));
+
+ return true;
+#endif
+}
+
+/**
+ * raw_atomic_dec_if_positive() - atomic decrement if positive with full ordering
+ * @v: pointer to atomic_t
+ *
+ * If (@v > 0), atomically updates @v to (@v - 1) with full ordering.
+ * Otherwise, @v is not modified and relaxed ordering is provided.
+ *
+ * Safe to use in noinstr code; prefer atomic_dec_if_positive() elsewhere.
+ *
+ * Return: The old value of (@v - 1), regardless of whether @v was updated.
+ */
+static __always_inline int
+raw_atomic_dec_if_positive(atomic_t *v)
+{
+#if defined(arch_atomic_dec_if_positive)
+ return arch_atomic_dec_if_positive(v);
+#else
+ int dec, c = raw_atomic_read(v);
+
+ do {
+ dec = c - 1;
+ if (unlikely(dec < 0))
+ break;
+ } while (!raw_atomic_try_cmpxchg(v, &c, dec));
+
+ return dec;
+#endif
+}
+
+#ifdef CONFIG_GENERIC_ATOMIC64
+#include <asm-generic/atomic64.h>
+#endif
+
+/**
+ * raw_atomic64_read() - atomic load with relaxed ordering
+ * @v: pointer to atomic64_t
+ *
+ * Atomically loads the value of @v with relaxed ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic64_read() elsewhere.
+ *
+ * Return: The value loaded from @v.
+ */
+static __always_inline s64
+raw_atomic64_read(const atomic64_t *v)
+{
+ return arch_atomic64_read(v);
+}
+
+/**
+ * raw_atomic64_read_acquire() - atomic load with acquire ordering
+ * @v: pointer to atomic64_t
+ *
+ * Atomically loads the value of @v with acquire ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic64_read_acquire() elsewhere.
+ *
+ * Return: The value loaded from @v.
+ */
+static __always_inline s64
+raw_atomic64_read_acquire(const atomic64_t *v)
+{
+#if defined(arch_atomic64_read_acquire)
+ return arch_atomic64_read_acquire(v);
+#else
+ s64 ret;
+
+ if (__native_word(atomic64_t)) {
+ ret = smp_load_acquire(&(v)->counter);
+ } else {
+ ret = raw_atomic64_read(v);
+ __atomic_acquire_fence();
+ }
+
+ return ret;
+#endif
+}
+
+/**
+ * raw_atomic64_set() - atomic set with relaxed ordering
+ * @v: pointer to atomic64_t
+ * @i: s64 value to assign
+ *
+ * Atomically sets @v to @i with relaxed ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic64_set() elsewhere.
+ *
+ * Return: Nothing.
+ */
+static __always_inline void
+raw_atomic64_set(atomic64_t *v, s64 i)
+{
+ arch_atomic64_set(v, i);
+}
+
+/**
+ * raw_atomic64_set_release() - atomic set with release ordering
+ * @v: pointer to atomic64_t
+ * @i: s64 value to assign
+ *
+ * Atomically sets @v to @i with release ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic64_set_release() elsewhere.
+ *
+ * Return: Nothing.
+ */
+static __always_inline void
+raw_atomic64_set_release(atomic64_t *v, s64 i)
+{
+#if defined(arch_atomic64_set_release)
+ arch_atomic64_set_release(v, i);
+#else
+ if (__native_word(atomic64_t)) {
+ smp_store_release(&(v)->counter, i);
+ } else {
+ __atomic_release_fence();
+ raw_atomic64_set(v, i);
+ }
+#endif
+}
+
+/**
+ * raw_atomic64_add() - atomic add with relaxed ordering
+ * @i: s64 value to add
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v + @i) with relaxed ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic64_add() elsewhere.
+ *
+ * Return: Nothing.
+ */
+static __always_inline void
+raw_atomic64_add(s64 i, atomic64_t *v)
+{
+ arch_atomic64_add(i, v);
+}
+
+/**
+ * raw_atomic64_add_return() - atomic add with full ordering
+ * @i: s64 value to add
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v + @i) with full ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic64_add_return() elsewhere.
+ *
+ * Return: The updated value of @v.
+ */
+static __always_inline s64
+raw_atomic64_add_return(s64 i, atomic64_t *v)
+{
+#if defined(arch_atomic64_add_return)
+ return arch_atomic64_add_return(i, v);
+#elif defined(arch_atomic64_add_return_relaxed)
+ s64 ret;
+ __atomic_pre_full_fence();
+ ret = arch_atomic64_add_return_relaxed(i, v);
+ __atomic_post_full_fence();
+ return ret;
+#else
+#error "Unable to define raw_atomic64_add_return"
+#endif
+}
+
+/**
+ * raw_atomic64_add_return_acquire() - atomic add with acquire ordering
+ * @i: s64 value to add
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v + @i) with acquire ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic64_add_return_acquire() elsewhere.
+ *
+ * Return: The updated value of @v.
+ */
+static __always_inline s64
+raw_atomic64_add_return_acquire(s64 i, atomic64_t *v)
+{
+#if defined(arch_atomic64_add_return_acquire)
+ return arch_atomic64_add_return_acquire(i, v);
+#elif defined(arch_atomic64_add_return_relaxed)
+ s64 ret = arch_atomic64_add_return_relaxed(i, v);
+ __atomic_acquire_fence();
+ return ret;
+#elif defined(arch_atomic64_add_return)
+ return arch_atomic64_add_return(i, v);
+#else
+#error "Unable to define raw_atomic64_add_return_acquire"
+#endif
+}
+
+/**
+ * raw_atomic64_add_return_release() - atomic add with release ordering
+ * @i: s64 value to add
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v + @i) with release ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic64_add_return_release() elsewhere.
+ *
+ * Return: The updated value of @v.
+ */
+static __always_inline s64
+raw_atomic64_add_return_release(s64 i, atomic64_t *v)
+{
+#if defined(arch_atomic64_add_return_release)
+ return arch_atomic64_add_return_release(i, v);
+#elif defined(arch_atomic64_add_return_relaxed)
+ __atomic_release_fence();
+ return arch_atomic64_add_return_relaxed(i, v);
+#elif defined(arch_atomic64_add_return)
+ return arch_atomic64_add_return(i, v);
+#else
+#error "Unable to define raw_atomic64_add_return_release"
+#endif
+}
+
+/**
+ * raw_atomic64_add_return_relaxed() - atomic add with relaxed ordering
+ * @i: s64 value to add
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v + @i) with relaxed ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic64_add_return_relaxed() elsewhere.
+ *
+ * Return: The updated value of @v.
+ */
+static __always_inline s64
+raw_atomic64_add_return_relaxed(s64 i, atomic64_t *v)
+{
+#if defined(arch_atomic64_add_return_relaxed)
+ return arch_atomic64_add_return_relaxed(i, v);
+#elif defined(arch_atomic64_add_return)
+ return arch_atomic64_add_return(i, v);
+#else
+#error "Unable to define raw_atomic64_add_return_relaxed"
+#endif
+}
+
+/**
+ * raw_atomic64_fetch_add() - atomic add with full ordering
+ * @i: s64 value to add
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v + @i) with full ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic64_fetch_add() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline s64
+raw_atomic64_fetch_add(s64 i, atomic64_t *v)
+{
+#if defined(arch_atomic64_fetch_add)
+ return arch_atomic64_fetch_add(i, v);
+#elif defined(arch_atomic64_fetch_add_relaxed)
+ s64 ret;
+ __atomic_pre_full_fence();
+ ret = arch_atomic64_fetch_add_relaxed(i, v);
+ __atomic_post_full_fence();
+ return ret;
+#else
+#error "Unable to define raw_atomic64_fetch_add"
+#endif
+}
+
+/**
+ * raw_atomic64_fetch_add_acquire() - atomic add with acquire ordering
+ * @i: s64 value to add
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v + @i) with acquire ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic64_fetch_add_acquire() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline s64
+raw_atomic64_fetch_add_acquire(s64 i, atomic64_t *v)
+{
+#if defined(arch_atomic64_fetch_add_acquire)
+ return arch_atomic64_fetch_add_acquire(i, v);
+#elif defined(arch_atomic64_fetch_add_relaxed)
+ s64 ret = arch_atomic64_fetch_add_relaxed(i, v);
+ __atomic_acquire_fence();
+ return ret;
+#elif defined(arch_atomic64_fetch_add)
+ return arch_atomic64_fetch_add(i, v);
+#else
+#error "Unable to define raw_atomic64_fetch_add_acquire"
+#endif
+}
+
+/**
+ * raw_atomic64_fetch_add_release() - atomic add with release ordering
+ * @i: s64 value to add
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v + @i) with release ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic64_fetch_add_release() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline s64
+raw_atomic64_fetch_add_release(s64 i, atomic64_t *v)
+{
+#if defined(arch_atomic64_fetch_add_release)
+ return arch_atomic64_fetch_add_release(i, v);
+#elif defined(arch_atomic64_fetch_add_relaxed)
+ __atomic_release_fence();
+ return arch_atomic64_fetch_add_relaxed(i, v);
+#elif defined(arch_atomic64_fetch_add)
+ return arch_atomic64_fetch_add(i, v);
+#else
+#error "Unable to define raw_atomic64_fetch_add_release"
+#endif
+}
+
+/**
+ * raw_atomic64_fetch_add_relaxed() - atomic add with relaxed ordering
+ * @i: s64 value to add
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v + @i) with relaxed ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic64_fetch_add_relaxed() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline s64
+raw_atomic64_fetch_add_relaxed(s64 i, atomic64_t *v)
+{
+#if defined(arch_atomic64_fetch_add_relaxed)
+ return arch_atomic64_fetch_add_relaxed(i, v);
+#elif defined(arch_atomic64_fetch_add)
+ return arch_atomic64_fetch_add(i, v);
+#else
+#error "Unable to define raw_atomic64_fetch_add_relaxed"
+#endif
+}
+
+/**
+ * raw_atomic64_sub() - atomic subtract with relaxed ordering
+ * @i: s64 value to subtract
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v - @i) with relaxed ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic64_sub() elsewhere.
+ *
+ * Return: Nothing.
+ */
+static __always_inline void
+raw_atomic64_sub(s64 i, atomic64_t *v)
+{
+ arch_atomic64_sub(i, v);
+}
+
+/**
+ * raw_atomic64_sub_return() - atomic subtract with full ordering
+ * @i: s64 value to subtract
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v - @i) with full ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic64_sub_return() elsewhere.
+ *
+ * Return: The updated value of @v.
+ */
+static __always_inline s64
+raw_atomic64_sub_return(s64 i, atomic64_t *v)
+{
+#if defined(arch_atomic64_sub_return)
+ return arch_atomic64_sub_return(i, v);
+#elif defined(arch_atomic64_sub_return_relaxed)
+ s64 ret;
+ __atomic_pre_full_fence();
+ ret = arch_atomic64_sub_return_relaxed(i, v);
+ __atomic_post_full_fence();
+ return ret;
+#else
+#error "Unable to define raw_atomic64_sub_return"
+#endif
+}
+
+/**
+ * raw_atomic64_sub_return_acquire() - atomic subtract with acquire ordering
+ * @i: s64 value to subtract
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v - @i) with acquire ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic64_sub_return_acquire() elsewhere.
+ *
+ * Return: The updated value of @v.
+ */
+static __always_inline s64
+raw_atomic64_sub_return_acquire(s64 i, atomic64_t *v)
+{
+#if defined(arch_atomic64_sub_return_acquire)
+ return arch_atomic64_sub_return_acquire(i, v);
+#elif defined(arch_atomic64_sub_return_relaxed)
+ s64 ret = arch_atomic64_sub_return_relaxed(i, v);
+ __atomic_acquire_fence();
+ return ret;
+#elif defined(arch_atomic64_sub_return)
+ return arch_atomic64_sub_return(i, v);
+#else
+#error "Unable to define raw_atomic64_sub_return_acquire"
+#endif
+}
+
+/**
+ * raw_atomic64_sub_return_release() - atomic subtract with release ordering
+ * @i: s64 value to subtract
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v - @i) with release ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic64_sub_return_release() elsewhere.
+ *
+ * Return: The updated value of @v.
+ */
+static __always_inline s64
+raw_atomic64_sub_return_release(s64 i, atomic64_t *v)
+{
+#if defined(arch_atomic64_sub_return_release)
+ return arch_atomic64_sub_return_release(i, v);
+#elif defined(arch_atomic64_sub_return_relaxed)
+ __atomic_release_fence();
+ return arch_atomic64_sub_return_relaxed(i, v);
+#elif defined(arch_atomic64_sub_return)
+ return arch_atomic64_sub_return(i, v);
+#else
+#error "Unable to define raw_atomic64_sub_return_release"
+#endif
+}
+
+/**
+ * raw_atomic64_sub_return_relaxed() - atomic subtract with relaxed ordering
+ * @i: s64 value to subtract
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v - @i) with relaxed ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic64_sub_return_relaxed() elsewhere.
+ *
+ * Return: The updated value of @v.
+ */
+static __always_inline s64
+raw_atomic64_sub_return_relaxed(s64 i, atomic64_t *v)
+{
+#if defined(arch_atomic64_sub_return_relaxed)
+ return arch_atomic64_sub_return_relaxed(i, v);
+#elif defined(arch_atomic64_sub_return)
+ return arch_atomic64_sub_return(i, v);
+#else
+#error "Unable to define raw_atomic64_sub_return_relaxed"
+#endif
+}
+
+/**
+ * raw_atomic64_fetch_sub() - atomic subtract with full ordering
+ * @i: s64 value to subtract
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v - @i) with full ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic64_fetch_sub() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline s64
+raw_atomic64_fetch_sub(s64 i, atomic64_t *v)
+{
+#if defined(arch_atomic64_fetch_sub)
+ return arch_atomic64_fetch_sub(i, v);
+#elif defined(arch_atomic64_fetch_sub_relaxed)
+ s64 ret;
+ __atomic_pre_full_fence();
+ ret = arch_atomic64_fetch_sub_relaxed(i, v);
+ __atomic_post_full_fence();
+ return ret;
+#else
+#error "Unable to define raw_atomic64_fetch_sub"
+#endif
+}
+
+/**
+ * raw_atomic64_fetch_sub_acquire() - atomic subtract with acquire ordering
+ * @i: s64 value to subtract
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v - @i) with acquire ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic64_fetch_sub_acquire() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline s64
+raw_atomic64_fetch_sub_acquire(s64 i, atomic64_t *v)
+{
+#if defined(arch_atomic64_fetch_sub_acquire)
+ return arch_atomic64_fetch_sub_acquire(i, v);
+#elif defined(arch_atomic64_fetch_sub_relaxed)
+ s64 ret = arch_atomic64_fetch_sub_relaxed(i, v);
+ __atomic_acquire_fence();
+ return ret;
+#elif defined(arch_atomic64_fetch_sub)
+ return arch_atomic64_fetch_sub(i, v);
+#else
+#error "Unable to define raw_atomic64_fetch_sub_acquire"
+#endif
+}
+
+/**
+ * raw_atomic64_fetch_sub_release() - atomic subtract with release ordering
+ * @i: s64 value to subtract
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v - @i) with release ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic64_fetch_sub_release() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline s64
+raw_atomic64_fetch_sub_release(s64 i, atomic64_t *v)
+{
+#if defined(arch_atomic64_fetch_sub_release)
+ return arch_atomic64_fetch_sub_release(i, v);
+#elif defined(arch_atomic64_fetch_sub_relaxed)
+ __atomic_release_fence();
+ return arch_atomic64_fetch_sub_relaxed(i, v);
+#elif defined(arch_atomic64_fetch_sub)
+ return arch_atomic64_fetch_sub(i, v);
+#else
+#error "Unable to define raw_atomic64_fetch_sub_release"
+#endif
+}
+
+/**
+ * raw_atomic64_fetch_sub_relaxed() - atomic subtract with relaxed ordering
+ * @i: s64 value to subtract
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v - @i) with relaxed ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic64_fetch_sub_relaxed() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline s64
+raw_atomic64_fetch_sub_relaxed(s64 i, atomic64_t *v)
+{
+#if defined(arch_atomic64_fetch_sub_relaxed)
+ return arch_atomic64_fetch_sub_relaxed(i, v);
+#elif defined(arch_atomic64_fetch_sub)
+ return arch_atomic64_fetch_sub(i, v);
+#else
+#error "Unable to define raw_atomic64_fetch_sub_relaxed"
+#endif
+}
+
+/**
+ * raw_atomic64_inc() - atomic increment with relaxed ordering
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v + 1) with relaxed ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic64_inc() elsewhere.
+ *
+ * Return: Nothing.
+ */
+static __always_inline void
+raw_atomic64_inc(atomic64_t *v)
+{
+#if defined(arch_atomic64_inc)
+ arch_atomic64_inc(v);
+#else
+ raw_atomic64_add(1, v);
+#endif
+}
+
+/**
+ * raw_atomic64_inc_return() - atomic increment with full ordering
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v + 1) with full ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic64_inc_return() elsewhere.
+ *
+ * Return: The updated value of @v.
+ */
+static __always_inline s64
+raw_atomic64_inc_return(atomic64_t *v)
+{
+#if defined(arch_atomic64_inc_return)
+ return arch_atomic64_inc_return(v);
+#elif defined(arch_atomic64_inc_return_relaxed)
+ s64 ret;
+ __atomic_pre_full_fence();
+ ret = arch_atomic64_inc_return_relaxed(v);
+ __atomic_post_full_fence();
+ return ret;
+#else
+ return raw_atomic64_add_return(1, v);
+#endif
+}
+
+/**
+ * raw_atomic64_inc_return_acquire() - atomic increment with acquire ordering
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v + 1) with acquire ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic64_inc_return_acquire() elsewhere.
+ *
+ * Return: The updated value of @v.
+ */
+static __always_inline s64
+raw_atomic64_inc_return_acquire(atomic64_t *v)
+{
+#if defined(arch_atomic64_inc_return_acquire)
+ return arch_atomic64_inc_return_acquire(v);
+#elif defined(arch_atomic64_inc_return_relaxed)
+ s64 ret = arch_atomic64_inc_return_relaxed(v);
+ __atomic_acquire_fence();
+ return ret;
+#elif defined(arch_atomic64_inc_return)
+ return arch_atomic64_inc_return(v);
+#else
+ return raw_atomic64_add_return_acquire(1, v);
+#endif
+}
+
+/**
+ * raw_atomic64_inc_return_release() - atomic increment with release ordering
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v + 1) with release ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic64_inc_return_release() elsewhere.
+ *
+ * Return: The updated value of @v.
+ */
+static __always_inline s64
+raw_atomic64_inc_return_release(atomic64_t *v)
+{
+#if defined(arch_atomic64_inc_return_release)
+ return arch_atomic64_inc_return_release(v);
+#elif defined(arch_atomic64_inc_return_relaxed)
+ __atomic_release_fence();
+ return arch_atomic64_inc_return_relaxed(v);
+#elif defined(arch_atomic64_inc_return)
+ return arch_atomic64_inc_return(v);
+#else
+ return raw_atomic64_add_return_release(1, v);
+#endif
+}
+
+/**
+ * raw_atomic64_inc_return_relaxed() - atomic increment with relaxed ordering
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v + 1) with relaxed ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic64_inc_return_relaxed() elsewhere.
+ *
+ * Return: The updated value of @v.
+ */
+static __always_inline s64
+raw_atomic64_inc_return_relaxed(atomic64_t *v)
+{
+#if defined(arch_atomic64_inc_return_relaxed)
+ return arch_atomic64_inc_return_relaxed(v);
+#elif defined(arch_atomic64_inc_return)
+ return arch_atomic64_inc_return(v);
+#else
+ return raw_atomic64_add_return_relaxed(1, v);
+#endif
+}
+
+/**
+ * raw_atomic64_fetch_inc() - atomic increment with full ordering
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v + 1) with full ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic64_fetch_inc() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline s64
+raw_atomic64_fetch_inc(atomic64_t *v)
+{
+#if defined(arch_atomic64_fetch_inc)
+ return arch_atomic64_fetch_inc(v);
+#elif defined(arch_atomic64_fetch_inc_relaxed)
+ s64 ret;
+ __atomic_pre_full_fence();
+ ret = arch_atomic64_fetch_inc_relaxed(v);
+ __atomic_post_full_fence();
+ return ret;
+#else
+ return raw_atomic64_fetch_add(1, v);
+#endif
+}
+
+/**
+ * raw_atomic64_fetch_inc_acquire() - atomic increment with acquire ordering
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v + 1) with acquire ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic64_fetch_inc_acquire() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline s64
+raw_atomic64_fetch_inc_acquire(atomic64_t *v)
+{
+#if defined(arch_atomic64_fetch_inc_acquire)
+ return arch_atomic64_fetch_inc_acquire(v);
+#elif defined(arch_atomic64_fetch_inc_relaxed)
+ s64 ret = arch_atomic64_fetch_inc_relaxed(v);
+ __atomic_acquire_fence();
+ return ret;
+#elif defined(arch_atomic64_fetch_inc)
+ return arch_atomic64_fetch_inc(v);
+#else
+ return raw_atomic64_fetch_add_acquire(1, v);
+#endif
+}
+
+/**
+ * raw_atomic64_fetch_inc_release() - atomic increment with release ordering
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v + 1) with release ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic64_fetch_inc_release() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline s64
+raw_atomic64_fetch_inc_release(atomic64_t *v)
+{
+#if defined(arch_atomic64_fetch_inc_release)
+ return arch_atomic64_fetch_inc_release(v);
+#elif defined(arch_atomic64_fetch_inc_relaxed)
+ __atomic_release_fence();
+ return arch_atomic64_fetch_inc_relaxed(v);
+#elif defined(arch_atomic64_fetch_inc)
+ return arch_atomic64_fetch_inc(v);
+#else
+ return raw_atomic64_fetch_add_release(1, v);
+#endif
+}
+
+/**
+ * raw_atomic64_fetch_inc_relaxed() - atomic increment with relaxed ordering
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v + 1) with relaxed ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic64_fetch_inc_relaxed() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline s64
+raw_atomic64_fetch_inc_relaxed(atomic64_t *v)
+{
+#if defined(arch_atomic64_fetch_inc_relaxed)
+ return arch_atomic64_fetch_inc_relaxed(v);
+#elif defined(arch_atomic64_fetch_inc)
+ return arch_atomic64_fetch_inc(v);
+#else
+ return raw_atomic64_fetch_add_relaxed(1, v);
+#endif
+}
+
+/**
+ * raw_atomic64_dec() - atomic decrement with relaxed ordering
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v - 1) with relaxed ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic64_dec() elsewhere.
+ *
+ * Return: Nothing.
+ */
+static __always_inline void
+raw_atomic64_dec(atomic64_t *v)
+{
+#if defined(arch_atomic64_dec)
+ arch_atomic64_dec(v);
+#else
+ raw_atomic64_sub(1, v);
+#endif
+}
+
+/**
+ * raw_atomic64_dec_return() - atomic decrement with full ordering
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v - 1) with full ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic64_dec_return() elsewhere.
+ *
+ * Return: The updated value of @v.
+ */
+static __always_inline s64
+raw_atomic64_dec_return(atomic64_t *v)
+{
+#if defined(arch_atomic64_dec_return)
+ return arch_atomic64_dec_return(v);
+#elif defined(arch_atomic64_dec_return_relaxed)
+ s64 ret;
+ __atomic_pre_full_fence();
+ ret = arch_atomic64_dec_return_relaxed(v);
+ __atomic_post_full_fence();
+ return ret;
+#else
+ return raw_atomic64_sub_return(1, v);
+#endif
+}
+
+/**
+ * raw_atomic64_dec_return_acquire() - atomic decrement with acquire ordering
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v - 1) with acquire ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic64_dec_return_acquire() elsewhere.
+ *
+ * Return: The updated value of @v.
+ */
+static __always_inline s64
+raw_atomic64_dec_return_acquire(atomic64_t *v)
+{
+#if defined(arch_atomic64_dec_return_acquire)
+ return arch_atomic64_dec_return_acquire(v);
+#elif defined(arch_atomic64_dec_return_relaxed)
+ s64 ret = arch_atomic64_dec_return_relaxed(v);
+ __atomic_acquire_fence();
+ return ret;
+#elif defined(arch_atomic64_dec_return)
+ return arch_atomic64_dec_return(v);
+#else
+ return raw_atomic64_sub_return_acquire(1, v);
+#endif
+}
+
+/**
+ * raw_atomic64_dec_return_release() - atomic decrement with release ordering
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v - 1) with release ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic64_dec_return_release() elsewhere.
+ *
+ * Return: The updated value of @v.
+ */
+static __always_inline s64
+raw_atomic64_dec_return_release(atomic64_t *v)
+{
+#if defined(arch_atomic64_dec_return_release)
+ return arch_atomic64_dec_return_release(v);
+#elif defined(arch_atomic64_dec_return_relaxed)
+ __atomic_release_fence();
+ return arch_atomic64_dec_return_relaxed(v);
+#elif defined(arch_atomic64_dec_return)
+ return arch_atomic64_dec_return(v);
+#else
+ return raw_atomic64_sub_return_release(1, v);
+#endif
+}
+
+/**
+ * raw_atomic64_dec_return_relaxed() - atomic decrement with relaxed ordering
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v - 1) with relaxed ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic64_dec_return_relaxed() elsewhere.
+ *
+ * Return: The updated value of @v.
+ */
+static __always_inline s64
+raw_atomic64_dec_return_relaxed(atomic64_t *v)
+{
+#if defined(arch_atomic64_dec_return_relaxed)
+ return arch_atomic64_dec_return_relaxed(v);
+#elif defined(arch_atomic64_dec_return)
+ return arch_atomic64_dec_return(v);
+#else
+ return raw_atomic64_sub_return_relaxed(1, v);
+#endif
+}
+
+/**
+ * raw_atomic64_fetch_dec() - atomic decrement with full ordering
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v - 1) with full ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic64_fetch_dec() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline s64
+raw_atomic64_fetch_dec(atomic64_t *v)
+{
+#if defined(arch_atomic64_fetch_dec)
+ return arch_atomic64_fetch_dec(v);
+#elif defined(arch_atomic64_fetch_dec_relaxed)
+ s64 ret;
+ __atomic_pre_full_fence();
+ ret = arch_atomic64_fetch_dec_relaxed(v);
+ __atomic_post_full_fence();
+ return ret;
+#else
+ return raw_atomic64_fetch_sub(1, v);
+#endif
+}
+
+/**
+ * raw_atomic64_fetch_dec_acquire() - atomic decrement with acquire ordering
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v - 1) with acquire ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic64_fetch_dec_acquire() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline s64
+raw_atomic64_fetch_dec_acquire(atomic64_t *v)
+{
+#if defined(arch_atomic64_fetch_dec_acquire)
+ return arch_atomic64_fetch_dec_acquire(v);
+#elif defined(arch_atomic64_fetch_dec_relaxed)
+ s64 ret = arch_atomic64_fetch_dec_relaxed(v);
+ __atomic_acquire_fence();
+ return ret;
+#elif defined(arch_atomic64_fetch_dec)
+ return arch_atomic64_fetch_dec(v);
+#else
+ return raw_atomic64_fetch_sub_acquire(1, v);
+#endif
+}
+
+/**
+ * raw_atomic64_fetch_dec_release() - atomic decrement with release ordering
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v - 1) with release ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic64_fetch_dec_release() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline s64
+raw_atomic64_fetch_dec_release(atomic64_t *v)
+{
+#if defined(arch_atomic64_fetch_dec_release)
+ return arch_atomic64_fetch_dec_release(v);
+#elif defined(arch_atomic64_fetch_dec_relaxed)
+ __atomic_release_fence();
+ return arch_atomic64_fetch_dec_relaxed(v);
+#elif defined(arch_atomic64_fetch_dec)
+ return arch_atomic64_fetch_dec(v);
+#else
+ return raw_atomic64_fetch_sub_release(1, v);
+#endif
+}
+
+/**
+ * raw_atomic64_fetch_dec_relaxed() - atomic decrement with relaxed ordering
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v - 1) with relaxed ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic64_fetch_dec_relaxed() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline s64
+raw_atomic64_fetch_dec_relaxed(atomic64_t *v)
+{
+#if defined(arch_atomic64_fetch_dec_relaxed)
+ return arch_atomic64_fetch_dec_relaxed(v);
+#elif defined(arch_atomic64_fetch_dec)
+ return arch_atomic64_fetch_dec(v);
+#else
+ return raw_atomic64_fetch_sub_relaxed(1, v);
+#endif
+}
+
+/**
+ * raw_atomic64_and() - atomic bitwise AND with relaxed ordering
+ * @i: s64 value
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v & @i) with relaxed ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic64_and() elsewhere.
+ *
+ * Return: Nothing.
+ */
+static __always_inline void
+raw_atomic64_and(s64 i, atomic64_t *v)
+{
+ arch_atomic64_and(i, v);
+}
+
+/**
+ * raw_atomic64_fetch_and() - atomic bitwise AND with full ordering
+ * @i: s64 value
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v & @i) with full ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic64_fetch_and() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline s64
+raw_atomic64_fetch_and(s64 i, atomic64_t *v)
+{
+#if defined(arch_atomic64_fetch_and)
+ return arch_atomic64_fetch_and(i, v);
+#elif defined(arch_atomic64_fetch_and_relaxed)
+ s64 ret;
+ __atomic_pre_full_fence();
+ ret = arch_atomic64_fetch_and_relaxed(i, v);
+ __atomic_post_full_fence();
+ return ret;
+#else
+#error "Unable to define raw_atomic64_fetch_and"
+#endif
+}
+
+/**
+ * raw_atomic64_fetch_and_acquire() - atomic bitwise AND with acquire ordering
+ * @i: s64 value
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v & @i) with acquire ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic64_fetch_and_acquire() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline s64
+raw_atomic64_fetch_and_acquire(s64 i, atomic64_t *v)
+{
+#if defined(arch_atomic64_fetch_and_acquire)
+ return arch_atomic64_fetch_and_acquire(i, v);
+#elif defined(arch_atomic64_fetch_and_relaxed)
+ s64 ret = arch_atomic64_fetch_and_relaxed(i, v);
+ __atomic_acquire_fence();
+ return ret;
+#elif defined(arch_atomic64_fetch_and)
+ return arch_atomic64_fetch_and(i, v);
+#else
+#error "Unable to define raw_atomic64_fetch_and_acquire"
+#endif
+}
+
+/**
+ * raw_atomic64_fetch_and_release() - atomic bitwise AND with release ordering
+ * @i: s64 value
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v & @i) with release ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic64_fetch_and_release() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline s64
+raw_atomic64_fetch_and_release(s64 i, atomic64_t *v)
+{
+#if defined(arch_atomic64_fetch_and_release)
+ return arch_atomic64_fetch_and_release(i, v);
+#elif defined(arch_atomic64_fetch_and_relaxed)
+ __atomic_release_fence();
+ return arch_atomic64_fetch_and_relaxed(i, v);
+#elif defined(arch_atomic64_fetch_and)
+ return arch_atomic64_fetch_and(i, v);
+#else
+#error "Unable to define raw_atomic64_fetch_and_release"
+#endif
+}
+
+/**
+ * raw_atomic64_fetch_and_relaxed() - atomic bitwise AND with relaxed ordering
+ * @i: s64 value
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v & @i) with relaxed ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic64_fetch_and_relaxed() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline s64
+raw_atomic64_fetch_and_relaxed(s64 i, atomic64_t *v)
+{
+#if defined(arch_atomic64_fetch_and_relaxed)
+ return arch_atomic64_fetch_and_relaxed(i, v);
+#elif defined(arch_atomic64_fetch_and)
+ return arch_atomic64_fetch_and(i, v);
+#else
+#error "Unable to define raw_atomic64_fetch_and_relaxed"
+#endif
+}
+
+/**
+ * raw_atomic64_andnot() - atomic bitwise AND NOT with relaxed ordering
+ * @i: s64 value
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v & ~@i) with relaxed ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic64_andnot() elsewhere.
+ *
+ * Return: Nothing.
+ */
+static __always_inline void
+raw_atomic64_andnot(s64 i, atomic64_t *v)
+{
+#if defined(arch_atomic64_andnot)
+ arch_atomic64_andnot(i, v);
+#else
+ raw_atomic64_and(~i, v);
+#endif
+}
+
+/**
+ * raw_atomic64_fetch_andnot() - atomic bitwise AND NOT with full ordering
+ * @i: s64 value
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v & ~@i) with full ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic64_fetch_andnot() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline s64
+raw_atomic64_fetch_andnot(s64 i, atomic64_t *v)
+{
+#if defined(arch_atomic64_fetch_andnot)
+ return arch_atomic64_fetch_andnot(i, v);
+#elif defined(arch_atomic64_fetch_andnot_relaxed)
+ s64 ret;
+ __atomic_pre_full_fence();
+ ret = arch_atomic64_fetch_andnot_relaxed(i, v);
+ __atomic_post_full_fence();
+ return ret;
+#else
+ return raw_atomic64_fetch_and(~i, v);
+#endif
+}
+
+/**
+ * raw_atomic64_fetch_andnot_acquire() - atomic bitwise AND NOT with acquire ordering
+ * @i: s64 value
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v & ~@i) with acquire ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic64_fetch_andnot_acquire() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline s64
+raw_atomic64_fetch_andnot_acquire(s64 i, atomic64_t *v)
+{
+#if defined(arch_atomic64_fetch_andnot_acquire)
+ return arch_atomic64_fetch_andnot_acquire(i, v);
+#elif defined(arch_atomic64_fetch_andnot_relaxed)
+ s64 ret = arch_atomic64_fetch_andnot_relaxed(i, v);
+ __atomic_acquire_fence();
+ return ret;
+#elif defined(arch_atomic64_fetch_andnot)
+ return arch_atomic64_fetch_andnot(i, v);
+#else
+ return raw_atomic64_fetch_and_acquire(~i, v);
+#endif
+}
+
+/**
+ * raw_atomic64_fetch_andnot_release() - atomic bitwise AND NOT with release ordering
+ * @i: s64 value
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v & ~@i) with release ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic64_fetch_andnot_release() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline s64
+raw_atomic64_fetch_andnot_release(s64 i, atomic64_t *v)
+{
+#if defined(arch_atomic64_fetch_andnot_release)
+ return arch_atomic64_fetch_andnot_release(i, v);
+#elif defined(arch_atomic64_fetch_andnot_relaxed)
+ __atomic_release_fence();
+ return arch_atomic64_fetch_andnot_relaxed(i, v);
+#elif defined(arch_atomic64_fetch_andnot)
+ return arch_atomic64_fetch_andnot(i, v);
+#else
+ return raw_atomic64_fetch_and_release(~i, v);
+#endif
+}
+
+/**
+ * raw_atomic64_fetch_andnot_relaxed() - atomic bitwise AND NOT with relaxed ordering
+ * @i: s64 value
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v & ~@i) with relaxed ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic64_fetch_andnot_relaxed() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline s64
+raw_atomic64_fetch_andnot_relaxed(s64 i, atomic64_t *v)
+{
+#if defined(arch_atomic64_fetch_andnot_relaxed)
+ return arch_atomic64_fetch_andnot_relaxed(i, v);
+#elif defined(arch_atomic64_fetch_andnot)
+ return arch_atomic64_fetch_andnot(i, v);
+#else
+ return raw_atomic64_fetch_and_relaxed(~i, v);
+#endif
+}
+
+/**
+ * raw_atomic64_or() - atomic bitwise OR with relaxed ordering
+ * @i: s64 value
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v | @i) with relaxed ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic64_or() elsewhere.
+ *
+ * Return: Nothing.
+ */
+static __always_inline void
+raw_atomic64_or(s64 i, atomic64_t *v)
+{
+ arch_atomic64_or(i, v);
+}
+
+/**
+ * raw_atomic64_fetch_or() - atomic bitwise OR with full ordering
+ * @i: s64 value
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v | @i) with full ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic64_fetch_or() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline s64
+raw_atomic64_fetch_or(s64 i, atomic64_t *v)
+{
+#if defined(arch_atomic64_fetch_or)
+ return arch_atomic64_fetch_or(i, v);
+#elif defined(arch_atomic64_fetch_or_relaxed)
+ s64 ret;
+ __atomic_pre_full_fence();
+ ret = arch_atomic64_fetch_or_relaxed(i, v);
+ __atomic_post_full_fence();
+ return ret;
+#else
+#error "Unable to define raw_atomic64_fetch_or"
+#endif
+}
+
+/**
+ * raw_atomic64_fetch_or_acquire() - atomic bitwise OR with acquire ordering
+ * @i: s64 value
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v | @i) with acquire ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic64_fetch_or_acquire() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline s64
+raw_atomic64_fetch_or_acquire(s64 i, atomic64_t *v)
+{
+#if defined(arch_atomic64_fetch_or_acquire)
+ return arch_atomic64_fetch_or_acquire(i, v);
+#elif defined(arch_atomic64_fetch_or_relaxed)
+ s64 ret = arch_atomic64_fetch_or_relaxed(i, v);
+ __atomic_acquire_fence();
+ return ret;
+#elif defined(arch_atomic64_fetch_or)
+ return arch_atomic64_fetch_or(i, v);
+#else
+#error "Unable to define raw_atomic64_fetch_or_acquire"
+#endif
+}
+
+/**
+ * raw_atomic64_fetch_or_release() - atomic bitwise OR with release ordering
+ * @i: s64 value
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v | @i) with release ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic64_fetch_or_release() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline s64
+raw_atomic64_fetch_or_release(s64 i, atomic64_t *v)
+{
+#if defined(arch_atomic64_fetch_or_release)
+ return arch_atomic64_fetch_or_release(i, v);
+#elif defined(arch_atomic64_fetch_or_relaxed)
+ __atomic_release_fence();
+ return arch_atomic64_fetch_or_relaxed(i, v);
+#elif defined(arch_atomic64_fetch_or)
+ return arch_atomic64_fetch_or(i, v);
+#else
+#error "Unable to define raw_atomic64_fetch_or_release"
+#endif
+}
+
+/**
+ * raw_atomic64_fetch_or_relaxed() - atomic bitwise OR with relaxed ordering
+ * @i: s64 value
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v | @i) with relaxed ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic64_fetch_or_relaxed() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline s64
+raw_atomic64_fetch_or_relaxed(s64 i, atomic64_t *v)
+{
+#if defined(arch_atomic64_fetch_or_relaxed)
+ return arch_atomic64_fetch_or_relaxed(i, v);
+#elif defined(arch_atomic64_fetch_or)
+ return arch_atomic64_fetch_or(i, v);
+#else
+#error "Unable to define raw_atomic64_fetch_or_relaxed"
+#endif
+}
+
+/**
+ * raw_atomic64_xor() - atomic bitwise XOR with relaxed ordering
+ * @i: s64 value
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v ^ @i) with relaxed ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic64_xor() elsewhere.
+ *
+ * Return: Nothing.
+ */
+static __always_inline void
+raw_atomic64_xor(s64 i, atomic64_t *v)
+{
+ arch_atomic64_xor(i, v);
+}
+
+/**
+ * raw_atomic64_fetch_xor() - atomic bitwise XOR with full ordering
+ * @i: s64 value
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v ^ @i) with full ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic64_fetch_xor() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline s64
+raw_atomic64_fetch_xor(s64 i, atomic64_t *v)
+{
+#if defined(arch_atomic64_fetch_xor)
+ return arch_atomic64_fetch_xor(i, v);
+#elif defined(arch_atomic64_fetch_xor_relaxed)
+ s64 ret;
+ __atomic_pre_full_fence();
+ ret = arch_atomic64_fetch_xor_relaxed(i, v);
+ __atomic_post_full_fence();
+ return ret;
+#else
+#error "Unable to define raw_atomic64_fetch_xor"
+#endif
+}
+
+/**
+ * raw_atomic64_fetch_xor_acquire() - atomic bitwise XOR with acquire ordering
+ * @i: s64 value
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v ^ @i) with acquire ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic64_fetch_xor_acquire() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline s64
+raw_atomic64_fetch_xor_acquire(s64 i, atomic64_t *v)
+{
+#if defined(arch_atomic64_fetch_xor_acquire)
+ return arch_atomic64_fetch_xor_acquire(i, v);
+#elif defined(arch_atomic64_fetch_xor_relaxed)
+ s64 ret = arch_atomic64_fetch_xor_relaxed(i, v);
+ __atomic_acquire_fence();
+ return ret;
+#elif defined(arch_atomic64_fetch_xor)
+ return arch_atomic64_fetch_xor(i, v);
+#else
+#error "Unable to define raw_atomic64_fetch_xor_acquire"
+#endif
+}
+
+/**
+ * raw_atomic64_fetch_xor_release() - atomic bitwise XOR with release ordering
+ * @i: s64 value
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v ^ @i) with release ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic64_fetch_xor_release() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline s64
+raw_atomic64_fetch_xor_release(s64 i, atomic64_t *v)
+{
+#if defined(arch_atomic64_fetch_xor_release)
+ return arch_atomic64_fetch_xor_release(i, v);
+#elif defined(arch_atomic64_fetch_xor_relaxed)
+ __atomic_release_fence();
+ return arch_atomic64_fetch_xor_relaxed(i, v);
+#elif defined(arch_atomic64_fetch_xor)
+ return arch_atomic64_fetch_xor(i, v);
+#else
+#error "Unable to define raw_atomic64_fetch_xor_release"
+#endif
+}
+
+/**
+ * raw_atomic64_fetch_xor_relaxed() - atomic bitwise XOR with relaxed ordering
+ * @i: s64 value
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v ^ @i) with relaxed ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic64_fetch_xor_relaxed() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline s64
+raw_atomic64_fetch_xor_relaxed(s64 i, atomic64_t *v)
+{
+#if defined(arch_atomic64_fetch_xor_relaxed)
+ return arch_atomic64_fetch_xor_relaxed(i, v);
+#elif defined(arch_atomic64_fetch_xor)
+ return arch_atomic64_fetch_xor(i, v);
+#else
+#error "Unable to define raw_atomic64_fetch_xor_relaxed"
+#endif
+}
+
+/**
+ * raw_atomic64_xchg() - atomic exchange with full ordering
+ * @v: pointer to atomic64_t
+ * @new: s64 value to assign
+ *
+ * Atomically updates @v to @new with full ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic64_xchg() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline s64
+raw_atomic64_xchg(atomic64_t *v, s64 new)
+{
+#if defined(arch_atomic64_xchg)
+ return arch_atomic64_xchg(v, new);
+#elif defined(arch_atomic64_xchg_relaxed)
+ s64 ret;
+ __atomic_pre_full_fence();
+ ret = arch_atomic64_xchg_relaxed(v, new);
+ __atomic_post_full_fence();
+ return ret;
+#else
+ return raw_xchg(&v->counter, new);
+#endif
+}
+
+/**
+ * raw_atomic64_xchg_acquire() - atomic exchange with acquire ordering
+ * @v: pointer to atomic64_t
+ * @new: s64 value to assign
+ *
+ * Atomically updates @v to @new with acquire ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic64_xchg_acquire() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline s64
+raw_atomic64_xchg_acquire(atomic64_t *v, s64 new)
+{
+#if defined(arch_atomic64_xchg_acquire)
+ return arch_atomic64_xchg_acquire(v, new);
+#elif defined(arch_atomic64_xchg_relaxed)
+ s64 ret = arch_atomic64_xchg_relaxed(v, new);
+ __atomic_acquire_fence();
+ return ret;
+#elif defined(arch_atomic64_xchg)
+ return arch_atomic64_xchg(v, new);
+#else
+ return raw_xchg_acquire(&v->counter, new);
+#endif
+}
+
+/**
+ * raw_atomic64_xchg_release() - atomic exchange with release ordering
+ * @v: pointer to atomic64_t
+ * @new: s64 value to assign
+ *
+ * Atomically updates @v to @new with release ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic64_xchg_release() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline s64
+raw_atomic64_xchg_release(atomic64_t *v, s64 new)
+{
+#if defined(arch_atomic64_xchg_release)
+ return arch_atomic64_xchg_release(v, new);
+#elif defined(arch_atomic64_xchg_relaxed)
+ __atomic_release_fence();
+ return arch_atomic64_xchg_relaxed(v, new);
+#elif defined(arch_atomic64_xchg)
+ return arch_atomic64_xchg(v, new);
+#else
+ return raw_xchg_release(&v->counter, new);
+#endif
+}
+
+/**
+ * raw_atomic64_xchg_relaxed() - atomic exchange with relaxed ordering
+ * @v: pointer to atomic64_t
+ * @new: s64 value to assign
+ *
+ * Atomically updates @v to @new with relaxed ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic64_xchg_relaxed() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline s64
+raw_atomic64_xchg_relaxed(atomic64_t *v, s64 new)
+{
+#if defined(arch_atomic64_xchg_relaxed)
+ return arch_atomic64_xchg_relaxed(v, new);
+#elif defined(arch_atomic64_xchg)
+ return arch_atomic64_xchg(v, new);
+#else
+ return raw_xchg_relaxed(&v->counter, new);
+#endif
+}
+
+/**
+ * raw_atomic64_cmpxchg() - atomic compare and exchange with full ordering
+ * @v: pointer to atomic64_t
+ * @old: s64 value to compare with
+ * @new: s64 value to assign
+ *
+ * If (@v == @old), atomically updates @v to @new with full ordering.
+ * Otherwise, @v is not modified and relaxed ordering is provided.
+ *
+ * Safe to use in noinstr code; prefer atomic64_cmpxchg() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline s64
+raw_atomic64_cmpxchg(atomic64_t *v, s64 old, s64 new)
+{
+#if defined(arch_atomic64_cmpxchg)
+ return arch_atomic64_cmpxchg(v, old, new);
+#elif defined(arch_atomic64_cmpxchg_relaxed)
+ s64 ret;
+ __atomic_pre_full_fence();
+ ret = arch_atomic64_cmpxchg_relaxed(v, old, new);
+ __atomic_post_full_fence();
+ return ret;
+#else
+ return raw_cmpxchg(&v->counter, old, new);
+#endif
+}
+
+/**
+ * raw_atomic64_cmpxchg_acquire() - atomic compare and exchange with acquire ordering
+ * @v: pointer to atomic64_t
+ * @old: s64 value to compare with
+ * @new: s64 value to assign
+ *
+ * If (@v == @old), atomically updates @v to @new with acquire ordering.
+ * Otherwise, @v is not modified and relaxed ordering is provided.
+ *
+ * Safe to use in noinstr code; prefer atomic64_cmpxchg_acquire() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline s64
+raw_atomic64_cmpxchg_acquire(atomic64_t *v, s64 old, s64 new)
+{
+#if defined(arch_atomic64_cmpxchg_acquire)
+ return arch_atomic64_cmpxchg_acquire(v, old, new);
+#elif defined(arch_atomic64_cmpxchg_relaxed)
+ s64 ret = arch_atomic64_cmpxchg_relaxed(v, old, new);
+ __atomic_acquire_fence();
+ return ret;
+#elif defined(arch_atomic64_cmpxchg)
+ return arch_atomic64_cmpxchg(v, old, new);
+#else
+ return raw_cmpxchg_acquire(&v->counter, old, new);
+#endif
+}
+
+/**
+ * raw_atomic64_cmpxchg_release() - atomic compare and exchange with release ordering
+ * @v: pointer to atomic64_t
+ * @old: s64 value to compare with
+ * @new: s64 value to assign
+ *
+ * If (@v == @old), atomically updates @v to @new with release ordering.
+ * Otherwise, @v is not modified and relaxed ordering is provided.
+ *
+ * Safe to use in noinstr code; prefer atomic64_cmpxchg_release() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline s64
+raw_atomic64_cmpxchg_release(atomic64_t *v, s64 old, s64 new)
+{
+#if defined(arch_atomic64_cmpxchg_release)
+ return arch_atomic64_cmpxchg_release(v, old, new);
+#elif defined(arch_atomic64_cmpxchg_relaxed)
+ __atomic_release_fence();
+ return arch_atomic64_cmpxchg_relaxed(v, old, new);
+#elif defined(arch_atomic64_cmpxchg)
+ return arch_atomic64_cmpxchg(v, old, new);
+#else
+ return raw_cmpxchg_release(&v->counter, old, new);
+#endif
+}
+
+/**
+ * raw_atomic64_cmpxchg_relaxed() - atomic compare and exchange with relaxed ordering
+ * @v: pointer to atomic64_t
+ * @old: s64 value to compare with
+ * @new: s64 value to assign
+ *
+ * If (@v == @old), atomically updates @v to @new with relaxed ordering.
+ * Otherwise, @v is not modified and relaxed ordering is provided.
+ *
+ * Safe to use in noinstr code; prefer atomic64_cmpxchg_relaxed() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline s64
+raw_atomic64_cmpxchg_relaxed(atomic64_t *v, s64 old, s64 new)
+{
+#if defined(arch_atomic64_cmpxchg_relaxed)
+ return arch_atomic64_cmpxchg_relaxed(v, old, new);
+#elif defined(arch_atomic64_cmpxchg)
+ return arch_atomic64_cmpxchg(v, old, new);
+#else
+ return raw_cmpxchg_relaxed(&v->counter, old, new);
+#endif
+}
+
+/**
+ * raw_atomic64_try_cmpxchg() - atomic compare and exchange with full ordering
+ * @v: pointer to atomic64_t
+ * @old: pointer to s64 value to compare with
+ * @new: s64 value to assign
+ *
+ * If (@v == @old), atomically updates @v to @new with full ordering.
+ * Otherwise, @v is not modified, @old is updated to the current value of @v,
+ * and relaxed ordering is provided.
+ *
+ * Safe to use in noinstr code; prefer atomic64_try_cmpxchg() elsewhere.
+ *
+ * Return: @true if the exchange occured, @false otherwise.
+ */
+static __always_inline bool
+raw_atomic64_try_cmpxchg(atomic64_t *v, s64 *old, s64 new)
+{
+#if defined(arch_atomic64_try_cmpxchg)
+ return arch_atomic64_try_cmpxchg(v, old, new);
+#elif defined(arch_atomic64_try_cmpxchg_relaxed)
+ bool ret;
+ __atomic_pre_full_fence();
+ ret = arch_atomic64_try_cmpxchg_relaxed(v, old, new);
+ __atomic_post_full_fence();
+ return ret;
+#else
+ s64 r, o = *old;
+ r = raw_atomic64_cmpxchg(v, o, new);
+ if (unlikely(r != o))
+ *old = r;
+ return likely(r == o);
+#endif
+}
+
+/**
+ * raw_atomic64_try_cmpxchg_acquire() - atomic compare and exchange with acquire ordering
+ * @v: pointer to atomic64_t
+ * @old: pointer to s64 value to compare with
+ * @new: s64 value to assign
+ *
+ * If (@v == @old), atomically updates @v to @new with acquire ordering.
+ * Otherwise, @v is not modified, @old is updated to the current value of @v,
+ * and relaxed ordering is provided.
+ *
+ * Safe to use in noinstr code; prefer atomic64_try_cmpxchg_acquire() elsewhere.
+ *
+ * Return: @true if the exchange occured, @false otherwise.
+ */
+static __always_inline bool
+raw_atomic64_try_cmpxchg_acquire(atomic64_t *v, s64 *old, s64 new)
+{
+#if defined(arch_atomic64_try_cmpxchg_acquire)
+ return arch_atomic64_try_cmpxchg_acquire(v, old, new);
+#elif defined(arch_atomic64_try_cmpxchg_relaxed)
+ bool ret = arch_atomic64_try_cmpxchg_relaxed(v, old, new);
+ __atomic_acquire_fence();
+ return ret;
+#elif defined(arch_atomic64_try_cmpxchg)
+ return arch_atomic64_try_cmpxchg(v, old, new);
+#else
+ s64 r, o = *old;
+ r = raw_atomic64_cmpxchg_acquire(v, o, new);
+ if (unlikely(r != o))
+ *old = r;
+ return likely(r == o);
+#endif
+}
+
+/**
+ * raw_atomic64_try_cmpxchg_release() - atomic compare and exchange with release ordering
+ * @v: pointer to atomic64_t
+ * @old: pointer to s64 value to compare with
+ * @new: s64 value to assign
+ *
+ * If (@v == @old), atomically updates @v to @new with release ordering.
+ * Otherwise, @v is not modified, @old is updated to the current value of @v,
+ * and relaxed ordering is provided.
+ *
+ * Safe to use in noinstr code; prefer atomic64_try_cmpxchg_release() elsewhere.
+ *
+ * Return: @true if the exchange occured, @false otherwise.
+ */
+static __always_inline bool
+raw_atomic64_try_cmpxchg_release(atomic64_t *v, s64 *old, s64 new)
+{
+#if defined(arch_atomic64_try_cmpxchg_release)
+ return arch_atomic64_try_cmpxchg_release(v, old, new);
+#elif defined(arch_atomic64_try_cmpxchg_relaxed)
+ __atomic_release_fence();
+ return arch_atomic64_try_cmpxchg_relaxed(v, old, new);
+#elif defined(arch_atomic64_try_cmpxchg)
+ return arch_atomic64_try_cmpxchg(v, old, new);
+#else
+ s64 r, o = *old;
+ r = raw_atomic64_cmpxchg_release(v, o, new);
+ if (unlikely(r != o))
+ *old = r;
+ return likely(r == o);
+#endif
+}
+
+/**
+ * raw_atomic64_try_cmpxchg_relaxed() - atomic compare and exchange with relaxed ordering
+ * @v: pointer to atomic64_t
+ * @old: pointer to s64 value to compare with
+ * @new: s64 value to assign
+ *
+ * If (@v == @old), atomically updates @v to @new with relaxed ordering.
+ * Otherwise, @v is not modified, @old is updated to the current value of @v,
+ * and relaxed ordering is provided.
+ *
+ * Safe to use in noinstr code; prefer atomic64_try_cmpxchg_relaxed() elsewhere.
+ *
+ * Return: @true if the exchange occured, @false otherwise.
+ */
+static __always_inline bool
+raw_atomic64_try_cmpxchg_relaxed(atomic64_t *v, s64 *old, s64 new)
+{
+#if defined(arch_atomic64_try_cmpxchg_relaxed)
+ return arch_atomic64_try_cmpxchg_relaxed(v, old, new);
+#elif defined(arch_atomic64_try_cmpxchg)
+ return arch_atomic64_try_cmpxchg(v, old, new);
+#else
+ s64 r, o = *old;
+ r = raw_atomic64_cmpxchg_relaxed(v, o, new);
+ if (unlikely(r != o))
+ *old = r;
+ return likely(r == o);
+#endif
+}
+
+/**
+ * raw_atomic64_sub_and_test() - atomic subtract and test if zero with full ordering
+ * @i: s64 value to subtract
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v - @i) with full ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic64_sub_and_test() elsewhere.
+ *
+ * Return: @true if the resulting value of @v is zero, @false otherwise.
+ */
+static __always_inline bool
+raw_atomic64_sub_and_test(s64 i, atomic64_t *v)
+{
+#if defined(arch_atomic64_sub_and_test)
+ return arch_atomic64_sub_and_test(i, v);
+#else
+ return raw_atomic64_sub_return(i, v) == 0;
+#endif
+}
+
+/**
+ * raw_atomic64_dec_and_test() - atomic decrement and test if zero with full ordering
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v - 1) with full ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic64_dec_and_test() elsewhere.
+ *
+ * Return: @true if the resulting value of @v is zero, @false otherwise.
+ */
+static __always_inline bool
+raw_atomic64_dec_and_test(atomic64_t *v)
+{
+#if defined(arch_atomic64_dec_and_test)
+ return arch_atomic64_dec_and_test(v);
+#else
+ return raw_atomic64_dec_return(v) == 0;
+#endif
+}
+
+/**
+ * raw_atomic64_inc_and_test() - atomic increment and test if zero with full ordering
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v + 1) with full ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic64_inc_and_test() elsewhere.
+ *
+ * Return: @true if the resulting value of @v is zero, @false otherwise.
+ */
+static __always_inline bool
+raw_atomic64_inc_and_test(atomic64_t *v)
+{
+#if defined(arch_atomic64_inc_and_test)
+ return arch_atomic64_inc_and_test(v);
+#else
+ return raw_atomic64_inc_return(v) == 0;
+#endif
+}
+
+/**
+ * raw_atomic64_add_negative() - atomic add and test if negative with full ordering
+ * @i: s64 value to add
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v + @i) with full ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic64_add_negative() elsewhere.
+ *
+ * Return: @true if the resulting value of @v is negative, @false otherwise.
+ */
+static __always_inline bool
+raw_atomic64_add_negative(s64 i, atomic64_t *v)
+{
+#if defined(arch_atomic64_add_negative)
+ return arch_atomic64_add_negative(i, v);
+#elif defined(arch_atomic64_add_negative_relaxed)
+ bool ret;
+ __atomic_pre_full_fence();
+ ret = arch_atomic64_add_negative_relaxed(i, v);
+ __atomic_post_full_fence();
+ return ret;
+#else
+ return raw_atomic64_add_return(i, v) < 0;
+#endif
+}
+
+/**
+ * raw_atomic64_add_negative_acquire() - atomic add and test if negative with acquire ordering
+ * @i: s64 value to add
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v + @i) with acquire ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic64_add_negative_acquire() elsewhere.
+ *
+ * Return: @true if the resulting value of @v is negative, @false otherwise.
+ */
+static __always_inline bool
+raw_atomic64_add_negative_acquire(s64 i, atomic64_t *v)
+{
+#if defined(arch_atomic64_add_negative_acquire)
+ return arch_atomic64_add_negative_acquire(i, v);
+#elif defined(arch_atomic64_add_negative_relaxed)
+ bool ret = arch_atomic64_add_negative_relaxed(i, v);
+ __atomic_acquire_fence();
+ return ret;
+#elif defined(arch_atomic64_add_negative)
+ return arch_atomic64_add_negative(i, v);
+#else
+ return raw_atomic64_add_return_acquire(i, v) < 0;
+#endif
+}
+
+/**
+ * raw_atomic64_add_negative_release() - atomic add and test if negative with release ordering
+ * @i: s64 value to add
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v + @i) with release ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic64_add_negative_release() elsewhere.
+ *
+ * Return: @true if the resulting value of @v is negative, @false otherwise.
+ */
+static __always_inline bool
+raw_atomic64_add_negative_release(s64 i, atomic64_t *v)
+{
+#if defined(arch_atomic64_add_negative_release)
+ return arch_atomic64_add_negative_release(i, v);
+#elif defined(arch_atomic64_add_negative_relaxed)
+ __atomic_release_fence();
+ return arch_atomic64_add_negative_relaxed(i, v);
+#elif defined(arch_atomic64_add_negative)
+ return arch_atomic64_add_negative(i, v);
+#else
+ return raw_atomic64_add_return_release(i, v) < 0;
+#endif
+}
+
+/**
+ * raw_atomic64_add_negative_relaxed() - atomic add and test if negative with relaxed ordering
+ * @i: s64 value to add
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v + @i) with relaxed ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic64_add_negative_relaxed() elsewhere.
+ *
+ * Return: @true if the resulting value of @v is negative, @false otherwise.
+ */
+static __always_inline bool
+raw_atomic64_add_negative_relaxed(s64 i, atomic64_t *v)
+{
+#if defined(arch_atomic64_add_negative_relaxed)
+ return arch_atomic64_add_negative_relaxed(i, v);
+#elif defined(arch_atomic64_add_negative)
+ return arch_atomic64_add_negative(i, v);
+#else
+ return raw_atomic64_add_return_relaxed(i, v) < 0;
+#endif
+}
+
+/**
+ * raw_atomic64_fetch_add_unless() - atomic add unless value with full ordering
+ * @v: pointer to atomic64_t
+ * @a: s64 value to add
+ * @u: s64 value to compare with
+ *
+ * If (@v != @u), atomically updates @v to (@v + @a) with full ordering.
+ * Otherwise, @v is not modified and relaxed ordering is provided.
+ *
+ * Safe to use in noinstr code; prefer atomic64_fetch_add_unless() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline s64
+raw_atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
+{
+#if defined(arch_atomic64_fetch_add_unless)
+ return arch_atomic64_fetch_add_unless(v, a, u);
+#else
+ s64 c = raw_atomic64_read(v);
+
+ do {
+ if (unlikely(c == u))
+ break;
+ } while (!raw_atomic64_try_cmpxchg(v, &c, c + a));
+
+ return c;
+#endif
+}
+
+/**
+ * raw_atomic64_add_unless() - atomic add unless value with full ordering
+ * @v: pointer to atomic64_t
+ * @a: s64 value to add
+ * @u: s64 value to compare with
+ *
+ * If (@v != @u), atomically updates @v to (@v + @a) with full ordering.
+ * Otherwise, @v is not modified and relaxed ordering is provided.
+ *
+ * Safe to use in noinstr code; prefer atomic64_add_unless() elsewhere.
+ *
+ * Return: @true if @v was updated, @false otherwise.
+ */
+static __always_inline bool
+raw_atomic64_add_unless(atomic64_t *v, s64 a, s64 u)
+{
+#if defined(arch_atomic64_add_unless)
+ return arch_atomic64_add_unless(v, a, u);
+#else
+ return raw_atomic64_fetch_add_unless(v, a, u) != u;
+#endif
+}
+
+/**
+ * raw_atomic64_inc_not_zero() - atomic increment unless zero with full ordering
+ * @v: pointer to atomic64_t
+ *
+ * If (@v != 0), atomically updates @v to (@v + 1) with full ordering.
+ * Otherwise, @v is not modified and relaxed ordering is provided.
+ *
+ * Safe to use in noinstr code; prefer atomic64_inc_not_zero() elsewhere.
+ *
+ * Return: @true if @v was updated, @false otherwise.
+ */
+static __always_inline bool
+raw_atomic64_inc_not_zero(atomic64_t *v)
+{
+#if defined(arch_atomic64_inc_not_zero)
+ return arch_atomic64_inc_not_zero(v);
+#else
+ return raw_atomic64_add_unless(v, 1, 0);
+#endif
+}
+
+/**
+ * raw_atomic64_inc_unless_negative() - atomic increment unless negative with full ordering
+ * @v: pointer to atomic64_t
+ *
+ * If (@v >= 0), atomically updates @v to (@v + 1) with full ordering.
+ * Otherwise, @v is not modified and relaxed ordering is provided.
+ *
+ * Safe to use in noinstr code; prefer atomic64_inc_unless_negative() elsewhere.
+ *
+ * Return: @true if @v was updated, @false otherwise.
+ */
+static __always_inline bool
+raw_atomic64_inc_unless_negative(atomic64_t *v)
+{
+#if defined(arch_atomic64_inc_unless_negative)
+ return arch_atomic64_inc_unless_negative(v);
+#else
+ s64 c = raw_atomic64_read(v);
+
+ do {
+ if (unlikely(c < 0))
+ return false;
+ } while (!raw_atomic64_try_cmpxchg(v, &c, c + 1));
+
+ return true;
+#endif
+}
+
+/**
+ * raw_atomic64_dec_unless_positive() - atomic decrement unless positive with full ordering
+ * @v: pointer to atomic64_t
+ *
+ * If (@v <= 0), atomically updates @v to (@v - 1) with full ordering.
+ * Otherwise, @v is not modified and relaxed ordering is provided.
+ *
+ * Safe to use in noinstr code; prefer atomic64_dec_unless_positive() elsewhere.
+ *
+ * Return: @true if @v was updated, @false otherwise.
+ */
+static __always_inline bool
+raw_atomic64_dec_unless_positive(atomic64_t *v)
+{
+#if defined(arch_atomic64_dec_unless_positive)
+ return arch_atomic64_dec_unless_positive(v);
+#else
+ s64 c = raw_atomic64_read(v);
+
+ do {
+ if (unlikely(c > 0))
+ return false;
+ } while (!raw_atomic64_try_cmpxchg(v, &c, c - 1));
+
+ return true;
+#endif
+}
+
+/**
+ * raw_atomic64_dec_if_positive() - atomic decrement if positive with full ordering
+ * @v: pointer to atomic64_t
+ *
+ * If (@v > 0), atomically updates @v to (@v - 1) with full ordering.
+ * Otherwise, @v is not modified and relaxed ordering is provided.
+ *
+ * Safe to use in noinstr code; prefer atomic64_dec_if_positive() elsewhere.
+ *
+ * Return: The old value of (@v - 1), regardless of whether @v was updated.
+ */
+static __always_inline s64
+raw_atomic64_dec_if_positive(atomic64_t *v)
+{
+#if defined(arch_atomic64_dec_if_positive)
+ return arch_atomic64_dec_if_positive(v);
+#else
+ s64 dec, c = raw_atomic64_read(v);
+
+ do {
+ dec = c - 1;
+ if (unlikely(dec < 0))
+ break;
+ } while (!raw_atomic64_try_cmpxchg(v, &c, dec));
+
+ return dec;
+#endif
+}
+
+#endif /* _LINUX_ATOMIC_FALLBACK_H */
+// b565db590afeeff0d7c9485ccbca5bb6e155749f
diff --git a/include/linux/atomic/atomic-instrumented.h b/include/linux/atomic/atomic-instrumented.h
new file mode 100644
index 000000000000..37ab6314a9f7
--- /dev/null
+++ b/include/linux/atomic/atomic-instrumented.h
@@ -0,0 +1,5053 @@
+// SPDX-License-Identifier: GPL-2.0
+
+// Generated by scripts/atomic/gen-atomic-instrumented.sh
+// DO NOT MODIFY THIS FILE DIRECTLY
+
+/*
+ * This file provoides atomic operations with explicit instrumentation (e.g.
+ * KASAN, KCSAN), which should be used unless it is necessary to avoid
+ * instrumentation. Where it is necessary to aovid instrumenation, the
+ * raw_atomic*() operations should be used.
+ */
+#ifndef _LINUX_ATOMIC_INSTRUMENTED_H
+#define _LINUX_ATOMIC_INSTRUMENTED_H
+
+#include <linux/build_bug.h>
+#include <linux/compiler.h>
+#include <linux/instrumented.h>
+
+/**
+ * atomic_read() - atomic load with relaxed ordering
+ * @v: pointer to atomic_t
+ *
+ * Atomically loads the value of @v with relaxed ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_read() there.
+ *
+ * Return: The value loaded from @v.
+ */
+static __always_inline int
+atomic_read(const atomic_t *v)
+{
+ instrument_atomic_read(v, sizeof(*v));
+ return raw_atomic_read(v);
+}
+
+/**
+ * atomic_read_acquire() - atomic load with acquire ordering
+ * @v: pointer to atomic_t
+ *
+ * Atomically loads the value of @v with acquire ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_read_acquire() there.
+ *
+ * Return: The value loaded from @v.
+ */
+static __always_inline int
+atomic_read_acquire(const atomic_t *v)
+{
+ instrument_atomic_read(v, sizeof(*v));
+ return raw_atomic_read_acquire(v);
+}
+
+/**
+ * atomic_set() - atomic set with relaxed ordering
+ * @v: pointer to atomic_t
+ * @i: int value to assign
+ *
+ * Atomically sets @v to @i with relaxed ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_set() there.
+ *
+ * Return: Nothing.
+ */
+static __always_inline void
+atomic_set(atomic_t *v, int i)
+{
+ instrument_atomic_write(v, sizeof(*v));
+ raw_atomic_set(v, i);
+}
+
+/**
+ * atomic_set_release() - atomic set with release ordering
+ * @v: pointer to atomic_t
+ * @i: int value to assign
+ *
+ * Atomically sets @v to @i with release ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_set_release() there.
+ *
+ * Return: Nothing.
+ */
+static __always_inline void
+atomic_set_release(atomic_t *v, int i)
+{
+ kcsan_release();
+ instrument_atomic_write(v, sizeof(*v));
+ raw_atomic_set_release(v, i);
+}
+
+/**
+ * atomic_add() - atomic add with relaxed ordering
+ * @i: int value to add
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v + @i) with relaxed ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_add() there.
+ *
+ * Return: Nothing.
+ */
+static __always_inline void
+atomic_add(int i, atomic_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ raw_atomic_add(i, v);
+}
+
+/**
+ * atomic_add_return() - atomic add with full ordering
+ * @i: int value to add
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v + @i) with full ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_add_return() there.
+ *
+ * Return: The updated value of @v.
+ */
+static __always_inline int
+atomic_add_return(int i, atomic_t *v)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_add_return(i, v);
+}
+
+/**
+ * atomic_add_return_acquire() - atomic add with acquire ordering
+ * @i: int value to add
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v + @i) with acquire ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_add_return_acquire() there.
+ *
+ * Return: The updated value of @v.
+ */
+static __always_inline int
+atomic_add_return_acquire(int i, atomic_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_add_return_acquire(i, v);
+}
+
+/**
+ * atomic_add_return_release() - atomic add with release ordering
+ * @i: int value to add
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v + @i) with release ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_add_return_release() there.
+ *
+ * Return: The updated value of @v.
+ */
+static __always_inline int
+atomic_add_return_release(int i, atomic_t *v)
+{
+ kcsan_release();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_add_return_release(i, v);
+}
+
+/**
+ * atomic_add_return_relaxed() - atomic add with relaxed ordering
+ * @i: int value to add
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v + @i) with relaxed ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_add_return_relaxed() there.
+ *
+ * Return: The updated value of @v.
+ */
+static __always_inline int
+atomic_add_return_relaxed(int i, atomic_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_add_return_relaxed(i, v);
+}
+
+/**
+ * atomic_fetch_add() - atomic add with full ordering
+ * @i: int value to add
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v + @i) with full ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_fetch_add() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline int
+atomic_fetch_add(int i, atomic_t *v)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_fetch_add(i, v);
+}
+
+/**
+ * atomic_fetch_add_acquire() - atomic add with acquire ordering
+ * @i: int value to add
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v + @i) with acquire ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_fetch_add_acquire() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline int
+atomic_fetch_add_acquire(int i, atomic_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_fetch_add_acquire(i, v);
+}
+
+/**
+ * atomic_fetch_add_release() - atomic add with release ordering
+ * @i: int value to add
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v + @i) with release ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_fetch_add_release() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline int
+atomic_fetch_add_release(int i, atomic_t *v)
+{
+ kcsan_release();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_fetch_add_release(i, v);
+}
+
+/**
+ * atomic_fetch_add_relaxed() - atomic add with relaxed ordering
+ * @i: int value to add
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v + @i) with relaxed ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_fetch_add_relaxed() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline int
+atomic_fetch_add_relaxed(int i, atomic_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_fetch_add_relaxed(i, v);
+}
+
+/**
+ * atomic_sub() - atomic subtract with relaxed ordering
+ * @i: int value to subtract
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v - @i) with relaxed ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_sub() there.
+ *
+ * Return: Nothing.
+ */
+static __always_inline void
+atomic_sub(int i, atomic_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ raw_atomic_sub(i, v);
+}
+
+/**
+ * atomic_sub_return() - atomic subtract with full ordering
+ * @i: int value to subtract
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v - @i) with full ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_sub_return() there.
+ *
+ * Return: The updated value of @v.
+ */
+static __always_inline int
+atomic_sub_return(int i, atomic_t *v)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_sub_return(i, v);
+}
+
+/**
+ * atomic_sub_return_acquire() - atomic subtract with acquire ordering
+ * @i: int value to subtract
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v - @i) with acquire ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_sub_return_acquire() there.
+ *
+ * Return: The updated value of @v.
+ */
+static __always_inline int
+atomic_sub_return_acquire(int i, atomic_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_sub_return_acquire(i, v);
+}
+
+/**
+ * atomic_sub_return_release() - atomic subtract with release ordering
+ * @i: int value to subtract
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v - @i) with release ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_sub_return_release() there.
+ *
+ * Return: The updated value of @v.
+ */
+static __always_inline int
+atomic_sub_return_release(int i, atomic_t *v)
+{
+ kcsan_release();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_sub_return_release(i, v);
+}
+
+/**
+ * atomic_sub_return_relaxed() - atomic subtract with relaxed ordering
+ * @i: int value to subtract
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v - @i) with relaxed ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_sub_return_relaxed() there.
+ *
+ * Return: The updated value of @v.
+ */
+static __always_inline int
+atomic_sub_return_relaxed(int i, atomic_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_sub_return_relaxed(i, v);
+}
+
+/**
+ * atomic_fetch_sub() - atomic subtract with full ordering
+ * @i: int value to subtract
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v - @i) with full ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_fetch_sub() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline int
+atomic_fetch_sub(int i, atomic_t *v)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_fetch_sub(i, v);
+}
+
+/**
+ * atomic_fetch_sub_acquire() - atomic subtract with acquire ordering
+ * @i: int value to subtract
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v - @i) with acquire ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_fetch_sub_acquire() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline int
+atomic_fetch_sub_acquire(int i, atomic_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_fetch_sub_acquire(i, v);
+}
+
+/**
+ * atomic_fetch_sub_release() - atomic subtract with release ordering
+ * @i: int value to subtract
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v - @i) with release ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_fetch_sub_release() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline int
+atomic_fetch_sub_release(int i, atomic_t *v)
+{
+ kcsan_release();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_fetch_sub_release(i, v);
+}
+
+/**
+ * atomic_fetch_sub_relaxed() - atomic subtract with relaxed ordering
+ * @i: int value to subtract
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v - @i) with relaxed ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_fetch_sub_relaxed() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline int
+atomic_fetch_sub_relaxed(int i, atomic_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_fetch_sub_relaxed(i, v);
+}
+
+/**
+ * atomic_inc() - atomic increment with relaxed ordering
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v + 1) with relaxed ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_inc() there.
+ *
+ * Return: Nothing.
+ */
+static __always_inline void
+atomic_inc(atomic_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ raw_atomic_inc(v);
+}
+
+/**
+ * atomic_inc_return() - atomic increment with full ordering
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v + 1) with full ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_inc_return() there.
+ *
+ * Return: The updated value of @v.
+ */
+static __always_inline int
+atomic_inc_return(atomic_t *v)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_inc_return(v);
+}
+
+/**
+ * atomic_inc_return_acquire() - atomic increment with acquire ordering
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v + 1) with acquire ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_inc_return_acquire() there.
+ *
+ * Return: The updated value of @v.
+ */
+static __always_inline int
+atomic_inc_return_acquire(atomic_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_inc_return_acquire(v);
+}
+
+/**
+ * atomic_inc_return_release() - atomic increment with release ordering
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v + 1) with release ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_inc_return_release() there.
+ *
+ * Return: The updated value of @v.
+ */
+static __always_inline int
+atomic_inc_return_release(atomic_t *v)
+{
+ kcsan_release();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_inc_return_release(v);
+}
+
+/**
+ * atomic_inc_return_relaxed() - atomic increment with relaxed ordering
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v + 1) with relaxed ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_inc_return_relaxed() there.
+ *
+ * Return: The updated value of @v.
+ */
+static __always_inline int
+atomic_inc_return_relaxed(atomic_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_inc_return_relaxed(v);
+}
+
+/**
+ * atomic_fetch_inc() - atomic increment with full ordering
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v + 1) with full ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_fetch_inc() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline int
+atomic_fetch_inc(atomic_t *v)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_fetch_inc(v);
+}
+
+/**
+ * atomic_fetch_inc_acquire() - atomic increment with acquire ordering
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v + 1) with acquire ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_fetch_inc_acquire() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline int
+atomic_fetch_inc_acquire(atomic_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_fetch_inc_acquire(v);
+}
+
+/**
+ * atomic_fetch_inc_release() - atomic increment with release ordering
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v + 1) with release ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_fetch_inc_release() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline int
+atomic_fetch_inc_release(atomic_t *v)
+{
+ kcsan_release();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_fetch_inc_release(v);
+}
+
+/**
+ * atomic_fetch_inc_relaxed() - atomic increment with relaxed ordering
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v + 1) with relaxed ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_fetch_inc_relaxed() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline int
+atomic_fetch_inc_relaxed(atomic_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_fetch_inc_relaxed(v);
+}
+
+/**
+ * atomic_dec() - atomic decrement with relaxed ordering
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v - 1) with relaxed ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_dec() there.
+ *
+ * Return: Nothing.
+ */
+static __always_inline void
+atomic_dec(atomic_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ raw_atomic_dec(v);
+}
+
+/**
+ * atomic_dec_return() - atomic decrement with full ordering
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v - 1) with full ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_dec_return() there.
+ *
+ * Return: The updated value of @v.
+ */
+static __always_inline int
+atomic_dec_return(atomic_t *v)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_dec_return(v);
+}
+
+/**
+ * atomic_dec_return_acquire() - atomic decrement with acquire ordering
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v - 1) with acquire ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_dec_return_acquire() there.
+ *
+ * Return: The updated value of @v.
+ */
+static __always_inline int
+atomic_dec_return_acquire(atomic_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_dec_return_acquire(v);
+}
+
+/**
+ * atomic_dec_return_release() - atomic decrement with release ordering
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v - 1) with release ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_dec_return_release() there.
+ *
+ * Return: The updated value of @v.
+ */
+static __always_inline int
+atomic_dec_return_release(atomic_t *v)
+{
+ kcsan_release();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_dec_return_release(v);
+}
+
+/**
+ * atomic_dec_return_relaxed() - atomic decrement with relaxed ordering
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v - 1) with relaxed ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_dec_return_relaxed() there.
+ *
+ * Return: The updated value of @v.
+ */
+static __always_inline int
+atomic_dec_return_relaxed(atomic_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_dec_return_relaxed(v);
+}
+
+/**
+ * atomic_fetch_dec() - atomic decrement with full ordering
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v - 1) with full ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_fetch_dec() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline int
+atomic_fetch_dec(atomic_t *v)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_fetch_dec(v);
+}
+
+/**
+ * atomic_fetch_dec_acquire() - atomic decrement with acquire ordering
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v - 1) with acquire ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_fetch_dec_acquire() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline int
+atomic_fetch_dec_acquire(atomic_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_fetch_dec_acquire(v);
+}
+
+/**
+ * atomic_fetch_dec_release() - atomic decrement with release ordering
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v - 1) with release ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_fetch_dec_release() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline int
+atomic_fetch_dec_release(atomic_t *v)
+{
+ kcsan_release();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_fetch_dec_release(v);
+}
+
+/**
+ * atomic_fetch_dec_relaxed() - atomic decrement with relaxed ordering
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v - 1) with relaxed ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_fetch_dec_relaxed() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline int
+atomic_fetch_dec_relaxed(atomic_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_fetch_dec_relaxed(v);
+}
+
+/**
+ * atomic_and() - atomic bitwise AND with relaxed ordering
+ * @i: int value
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v & @i) with relaxed ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_and() there.
+ *
+ * Return: Nothing.
+ */
+static __always_inline void
+atomic_and(int i, atomic_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ raw_atomic_and(i, v);
+}
+
+/**
+ * atomic_fetch_and() - atomic bitwise AND with full ordering
+ * @i: int value
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v & @i) with full ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_fetch_and() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline int
+atomic_fetch_and(int i, atomic_t *v)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_fetch_and(i, v);
+}
+
+/**
+ * atomic_fetch_and_acquire() - atomic bitwise AND with acquire ordering
+ * @i: int value
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v & @i) with acquire ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_fetch_and_acquire() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline int
+atomic_fetch_and_acquire(int i, atomic_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_fetch_and_acquire(i, v);
+}
+
+/**
+ * atomic_fetch_and_release() - atomic bitwise AND with release ordering
+ * @i: int value
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v & @i) with release ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_fetch_and_release() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline int
+atomic_fetch_and_release(int i, atomic_t *v)
+{
+ kcsan_release();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_fetch_and_release(i, v);
+}
+
+/**
+ * atomic_fetch_and_relaxed() - atomic bitwise AND with relaxed ordering
+ * @i: int value
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v & @i) with relaxed ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_fetch_and_relaxed() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline int
+atomic_fetch_and_relaxed(int i, atomic_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_fetch_and_relaxed(i, v);
+}
+
+/**
+ * atomic_andnot() - atomic bitwise AND NOT with relaxed ordering
+ * @i: int value
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v & ~@i) with relaxed ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_andnot() there.
+ *
+ * Return: Nothing.
+ */
+static __always_inline void
+atomic_andnot(int i, atomic_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ raw_atomic_andnot(i, v);
+}
+
+/**
+ * atomic_fetch_andnot() - atomic bitwise AND NOT with full ordering
+ * @i: int value
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v & ~@i) with full ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_fetch_andnot() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline int
+atomic_fetch_andnot(int i, atomic_t *v)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_fetch_andnot(i, v);
+}
+
+/**
+ * atomic_fetch_andnot_acquire() - atomic bitwise AND NOT with acquire ordering
+ * @i: int value
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v & ~@i) with acquire ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_fetch_andnot_acquire() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline int
+atomic_fetch_andnot_acquire(int i, atomic_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_fetch_andnot_acquire(i, v);
+}
+
+/**
+ * atomic_fetch_andnot_release() - atomic bitwise AND NOT with release ordering
+ * @i: int value
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v & ~@i) with release ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_fetch_andnot_release() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline int
+atomic_fetch_andnot_release(int i, atomic_t *v)
+{
+ kcsan_release();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_fetch_andnot_release(i, v);
+}
+
+/**
+ * atomic_fetch_andnot_relaxed() - atomic bitwise AND NOT with relaxed ordering
+ * @i: int value
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v & ~@i) with relaxed ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_fetch_andnot_relaxed() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline int
+atomic_fetch_andnot_relaxed(int i, atomic_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_fetch_andnot_relaxed(i, v);
+}
+
+/**
+ * atomic_or() - atomic bitwise OR with relaxed ordering
+ * @i: int value
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v | @i) with relaxed ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_or() there.
+ *
+ * Return: Nothing.
+ */
+static __always_inline void
+atomic_or(int i, atomic_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ raw_atomic_or(i, v);
+}
+
+/**
+ * atomic_fetch_or() - atomic bitwise OR with full ordering
+ * @i: int value
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v | @i) with full ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_fetch_or() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline int
+atomic_fetch_or(int i, atomic_t *v)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_fetch_or(i, v);
+}
+
+/**
+ * atomic_fetch_or_acquire() - atomic bitwise OR with acquire ordering
+ * @i: int value
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v | @i) with acquire ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_fetch_or_acquire() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline int
+atomic_fetch_or_acquire(int i, atomic_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_fetch_or_acquire(i, v);
+}
+
+/**
+ * atomic_fetch_or_release() - atomic bitwise OR with release ordering
+ * @i: int value
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v | @i) with release ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_fetch_or_release() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline int
+atomic_fetch_or_release(int i, atomic_t *v)
+{
+ kcsan_release();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_fetch_or_release(i, v);
+}
+
+/**
+ * atomic_fetch_or_relaxed() - atomic bitwise OR with relaxed ordering
+ * @i: int value
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v | @i) with relaxed ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_fetch_or_relaxed() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline int
+atomic_fetch_or_relaxed(int i, atomic_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_fetch_or_relaxed(i, v);
+}
+
+/**
+ * atomic_xor() - atomic bitwise XOR with relaxed ordering
+ * @i: int value
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v ^ @i) with relaxed ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_xor() there.
+ *
+ * Return: Nothing.
+ */
+static __always_inline void
+atomic_xor(int i, atomic_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ raw_atomic_xor(i, v);
+}
+
+/**
+ * atomic_fetch_xor() - atomic bitwise XOR with full ordering
+ * @i: int value
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v ^ @i) with full ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_fetch_xor() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline int
+atomic_fetch_xor(int i, atomic_t *v)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_fetch_xor(i, v);
+}
+
+/**
+ * atomic_fetch_xor_acquire() - atomic bitwise XOR with acquire ordering
+ * @i: int value
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v ^ @i) with acquire ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_fetch_xor_acquire() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline int
+atomic_fetch_xor_acquire(int i, atomic_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_fetch_xor_acquire(i, v);
+}
+
+/**
+ * atomic_fetch_xor_release() - atomic bitwise XOR with release ordering
+ * @i: int value
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v ^ @i) with release ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_fetch_xor_release() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline int
+atomic_fetch_xor_release(int i, atomic_t *v)
+{
+ kcsan_release();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_fetch_xor_release(i, v);
+}
+
+/**
+ * atomic_fetch_xor_relaxed() - atomic bitwise XOR with relaxed ordering
+ * @i: int value
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v ^ @i) with relaxed ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_fetch_xor_relaxed() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline int
+atomic_fetch_xor_relaxed(int i, atomic_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_fetch_xor_relaxed(i, v);
+}
+
+/**
+ * atomic_xchg() - atomic exchange with full ordering
+ * @v: pointer to atomic_t
+ * @new: int value to assign
+ *
+ * Atomically updates @v to @new with full ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_xchg() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline int
+atomic_xchg(atomic_t *v, int new)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_xchg(v, new);
+}
+
+/**
+ * atomic_xchg_acquire() - atomic exchange with acquire ordering
+ * @v: pointer to atomic_t
+ * @new: int value to assign
+ *
+ * Atomically updates @v to @new with acquire ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_xchg_acquire() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline int
+atomic_xchg_acquire(atomic_t *v, int new)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_xchg_acquire(v, new);
+}
+
+/**
+ * atomic_xchg_release() - atomic exchange with release ordering
+ * @v: pointer to atomic_t
+ * @new: int value to assign
+ *
+ * Atomically updates @v to @new with release ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_xchg_release() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline int
+atomic_xchg_release(atomic_t *v, int new)
+{
+ kcsan_release();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_xchg_release(v, new);
+}
+
+/**
+ * atomic_xchg_relaxed() - atomic exchange with relaxed ordering
+ * @v: pointer to atomic_t
+ * @new: int value to assign
+ *
+ * Atomically updates @v to @new with relaxed ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_xchg_relaxed() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline int
+atomic_xchg_relaxed(atomic_t *v, int new)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_xchg_relaxed(v, new);
+}
+
+/**
+ * atomic_cmpxchg() - atomic compare and exchange with full ordering
+ * @v: pointer to atomic_t
+ * @old: int value to compare with
+ * @new: int value to assign
+ *
+ * If (@v == @old), atomically updates @v to @new with full ordering.
+ * Otherwise, @v is not modified and relaxed ordering is provided.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_cmpxchg() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline int
+atomic_cmpxchg(atomic_t *v, int old, int new)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_cmpxchg(v, old, new);
+}
+
+/**
+ * atomic_cmpxchg_acquire() - atomic compare and exchange with acquire ordering
+ * @v: pointer to atomic_t
+ * @old: int value to compare with
+ * @new: int value to assign
+ *
+ * If (@v == @old), atomically updates @v to @new with acquire ordering.
+ * Otherwise, @v is not modified and relaxed ordering is provided.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_cmpxchg_acquire() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline int
+atomic_cmpxchg_acquire(atomic_t *v, int old, int new)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_cmpxchg_acquire(v, old, new);
+}
+
+/**
+ * atomic_cmpxchg_release() - atomic compare and exchange with release ordering
+ * @v: pointer to atomic_t
+ * @old: int value to compare with
+ * @new: int value to assign
+ *
+ * If (@v == @old), atomically updates @v to @new with release ordering.
+ * Otherwise, @v is not modified and relaxed ordering is provided.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_cmpxchg_release() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline int
+atomic_cmpxchg_release(atomic_t *v, int old, int new)
+{
+ kcsan_release();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_cmpxchg_release(v, old, new);
+}
+
+/**
+ * atomic_cmpxchg_relaxed() - atomic compare and exchange with relaxed ordering
+ * @v: pointer to atomic_t
+ * @old: int value to compare with
+ * @new: int value to assign
+ *
+ * If (@v == @old), atomically updates @v to @new with relaxed ordering.
+ * Otherwise, @v is not modified and relaxed ordering is provided.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_cmpxchg_relaxed() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline int
+atomic_cmpxchg_relaxed(atomic_t *v, int old, int new)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_cmpxchg_relaxed(v, old, new);
+}
+
+/**
+ * atomic_try_cmpxchg() - atomic compare and exchange with full ordering
+ * @v: pointer to atomic_t
+ * @old: pointer to int value to compare with
+ * @new: int value to assign
+ *
+ * If (@v == @old), atomically updates @v to @new with full ordering.
+ * Otherwise, @v is not modified, @old is updated to the current value of @v,
+ * and relaxed ordering is provided.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_try_cmpxchg() there.
+ *
+ * Return: @true if the exchange occured, @false otherwise.
+ */
+static __always_inline bool
+atomic_try_cmpxchg(atomic_t *v, int *old, int new)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ instrument_read_write(old, sizeof(*old));
+ return raw_atomic_try_cmpxchg(v, old, new);
+}
+
+/**
+ * atomic_try_cmpxchg_acquire() - atomic compare and exchange with acquire ordering
+ * @v: pointer to atomic_t
+ * @old: pointer to int value to compare with
+ * @new: int value to assign
+ *
+ * If (@v == @old), atomically updates @v to @new with acquire ordering.
+ * Otherwise, @v is not modified, @old is updated to the current value of @v,
+ * and relaxed ordering is provided.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_try_cmpxchg_acquire() there.
+ *
+ * Return: @true if the exchange occured, @false otherwise.
+ */
+static __always_inline bool
+atomic_try_cmpxchg_acquire(atomic_t *v, int *old, int new)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ instrument_read_write(old, sizeof(*old));
+ return raw_atomic_try_cmpxchg_acquire(v, old, new);
+}
+
+/**
+ * atomic_try_cmpxchg_release() - atomic compare and exchange with release ordering
+ * @v: pointer to atomic_t
+ * @old: pointer to int value to compare with
+ * @new: int value to assign
+ *
+ * If (@v == @old), atomically updates @v to @new with release ordering.
+ * Otherwise, @v is not modified, @old is updated to the current value of @v,
+ * and relaxed ordering is provided.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_try_cmpxchg_release() there.
+ *
+ * Return: @true if the exchange occured, @false otherwise.
+ */
+static __always_inline bool
+atomic_try_cmpxchg_release(atomic_t *v, int *old, int new)
+{
+ kcsan_release();
+ instrument_atomic_read_write(v, sizeof(*v));
+ instrument_read_write(old, sizeof(*old));
+ return raw_atomic_try_cmpxchg_release(v, old, new);
+}
+
+/**
+ * atomic_try_cmpxchg_relaxed() - atomic compare and exchange with relaxed ordering
+ * @v: pointer to atomic_t
+ * @old: pointer to int value to compare with
+ * @new: int value to assign
+ *
+ * If (@v == @old), atomically updates @v to @new with relaxed ordering.
+ * Otherwise, @v is not modified, @old is updated to the current value of @v,
+ * and relaxed ordering is provided.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_try_cmpxchg_relaxed() there.
+ *
+ * Return: @true if the exchange occured, @false otherwise.
+ */
+static __always_inline bool
+atomic_try_cmpxchg_relaxed(atomic_t *v, int *old, int new)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ instrument_read_write(old, sizeof(*old));
+ return raw_atomic_try_cmpxchg_relaxed(v, old, new);
+}
+
+/**
+ * atomic_sub_and_test() - atomic subtract and test if zero with full ordering
+ * @i: int value to subtract
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v - @i) with full ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_sub_and_test() there.
+ *
+ * Return: @true if the resulting value of @v is zero, @false otherwise.
+ */
+static __always_inline bool
+atomic_sub_and_test(int i, atomic_t *v)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_sub_and_test(i, v);
+}
+
+/**
+ * atomic_dec_and_test() - atomic decrement and test if zero with full ordering
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v - 1) with full ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_dec_and_test() there.
+ *
+ * Return: @true if the resulting value of @v is zero, @false otherwise.
+ */
+static __always_inline bool
+atomic_dec_and_test(atomic_t *v)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_dec_and_test(v);
+}
+
+/**
+ * atomic_inc_and_test() - atomic increment and test if zero with full ordering
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v + 1) with full ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_inc_and_test() there.
+ *
+ * Return: @true if the resulting value of @v is zero, @false otherwise.
+ */
+static __always_inline bool
+atomic_inc_and_test(atomic_t *v)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_inc_and_test(v);
+}
+
+/**
+ * atomic_add_negative() - atomic add and test if negative with full ordering
+ * @i: int value to add
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v + @i) with full ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_add_negative() there.
+ *
+ * Return: @true if the resulting value of @v is negative, @false otherwise.
+ */
+static __always_inline bool
+atomic_add_negative(int i, atomic_t *v)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_add_negative(i, v);
+}
+
+/**
+ * atomic_add_negative_acquire() - atomic add and test if negative with acquire ordering
+ * @i: int value to add
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v + @i) with acquire ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_add_negative_acquire() there.
+ *
+ * Return: @true if the resulting value of @v is negative, @false otherwise.
+ */
+static __always_inline bool
+atomic_add_negative_acquire(int i, atomic_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_add_negative_acquire(i, v);
+}
+
+/**
+ * atomic_add_negative_release() - atomic add and test if negative with release ordering
+ * @i: int value to add
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v + @i) with release ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_add_negative_release() there.
+ *
+ * Return: @true if the resulting value of @v is negative, @false otherwise.
+ */
+static __always_inline bool
+atomic_add_negative_release(int i, atomic_t *v)
+{
+ kcsan_release();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_add_negative_release(i, v);
+}
+
+/**
+ * atomic_add_negative_relaxed() - atomic add and test if negative with relaxed ordering
+ * @i: int value to add
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v + @i) with relaxed ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_add_negative_relaxed() there.
+ *
+ * Return: @true if the resulting value of @v is negative, @false otherwise.
+ */
+static __always_inline bool
+atomic_add_negative_relaxed(int i, atomic_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_add_negative_relaxed(i, v);
+}
+
+/**
+ * atomic_fetch_add_unless() - atomic add unless value with full ordering
+ * @v: pointer to atomic_t
+ * @a: int value to add
+ * @u: int value to compare with
+ *
+ * If (@v != @u), atomically updates @v to (@v + @a) with full ordering.
+ * Otherwise, @v is not modified and relaxed ordering is provided.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_fetch_add_unless() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline int
+atomic_fetch_add_unless(atomic_t *v, int a, int u)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_fetch_add_unless(v, a, u);
+}
+
+/**
+ * atomic_add_unless() - atomic add unless value with full ordering
+ * @v: pointer to atomic_t
+ * @a: int value to add
+ * @u: int value to compare with
+ *
+ * If (@v != @u), atomically updates @v to (@v + @a) with full ordering.
+ * Otherwise, @v is not modified and relaxed ordering is provided.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_add_unless() there.
+ *
+ * Return: @true if @v was updated, @false otherwise.
+ */
+static __always_inline bool
+atomic_add_unless(atomic_t *v, int a, int u)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_add_unless(v, a, u);
+}
+
+/**
+ * atomic_inc_not_zero() - atomic increment unless zero with full ordering
+ * @v: pointer to atomic_t
+ *
+ * If (@v != 0), atomically updates @v to (@v + 1) with full ordering.
+ * Otherwise, @v is not modified and relaxed ordering is provided.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_inc_not_zero() there.
+ *
+ * Return: @true if @v was updated, @false otherwise.
+ */
+static __always_inline bool
+atomic_inc_not_zero(atomic_t *v)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_inc_not_zero(v);
+}
+
+/**
+ * atomic_inc_unless_negative() - atomic increment unless negative with full ordering
+ * @v: pointer to atomic_t
+ *
+ * If (@v >= 0), atomically updates @v to (@v + 1) with full ordering.
+ * Otherwise, @v is not modified and relaxed ordering is provided.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_inc_unless_negative() there.
+ *
+ * Return: @true if @v was updated, @false otherwise.
+ */
+static __always_inline bool
+atomic_inc_unless_negative(atomic_t *v)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_inc_unless_negative(v);
+}
+
+/**
+ * atomic_dec_unless_positive() - atomic decrement unless positive with full ordering
+ * @v: pointer to atomic_t
+ *
+ * If (@v <= 0), atomically updates @v to (@v - 1) with full ordering.
+ * Otherwise, @v is not modified and relaxed ordering is provided.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_dec_unless_positive() there.
+ *
+ * Return: @true if @v was updated, @false otherwise.
+ */
+static __always_inline bool
+atomic_dec_unless_positive(atomic_t *v)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_dec_unless_positive(v);
+}
+
+/**
+ * atomic_dec_if_positive() - atomic decrement if positive with full ordering
+ * @v: pointer to atomic_t
+ *
+ * If (@v > 0), atomically updates @v to (@v - 1) with full ordering.
+ * Otherwise, @v is not modified and relaxed ordering is provided.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_dec_if_positive() there.
+ *
+ * Return: The old value of (@v - 1), regardless of whether @v was updated.
+ */
+static __always_inline int
+atomic_dec_if_positive(atomic_t *v)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_dec_if_positive(v);
+}
+
+/**
+ * atomic64_read() - atomic load with relaxed ordering
+ * @v: pointer to atomic64_t
+ *
+ * Atomically loads the value of @v with relaxed ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_read() there.
+ *
+ * Return: The value loaded from @v.
+ */
+static __always_inline s64
+atomic64_read(const atomic64_t *v)
+{
+ instrument_atomic_read(v, sizeof(*v));
+ return raw_atomic64_read(v);
+}
+
+/**
+ * atomic64_read_acquire() - atomic load with acquire ordering
+ * @v: pointer to atomic64_t
+ *
+ * Atomically loads the value of @v with acquire ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_read_acquire() there.
+ *
+ * Return: The value loaded from @v.
+ */
+static __always_inline s64
+atomic64_read_acquire(const atomic64_t *v)
+{
+ instrument_atomic_read(v, sizeof(*v));
+ return raw_atomic64_read_acquire(v);
+}
+
+/**
+ * atomic64_set() - atomic set with relaxed ordering
+ * @v: pointer to atomic64_t
+ * @i: s64 value to assign
+ *
+ * Atomically sets @v to @i with relaxed ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_set() there.
+ *
+ * Return: Nothing.
+ */
+static __always_inline void
+atomic64_set(atomic64_t *v, s64 i)
+{
+ instrument_atomic_write(v, sizeof(*v));
+ raw_atomic64_set(v, i);
+}
+
+/**
+ * atomic64_set_release() - atomic set with release ordering
+ * @v: pointer to atomic64_t
+ * @i: s64 value to assign
+ *
+ * Atomically sets @v to @i with release ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_set_release() there.
+ *
+ * Return: Nothing.
+ */
+static __always_inline void
+atomic64_set_release(atomic64_t *v, s64 i)
+{
+ kcsan_release();
+ instrument_atomic_write(v, sizeof(*v));
+ raw_atomic64_set_release(v, i);
+}
+
+/**
+ * atomic64_add() - atomic add with relaxed ordering
+ * @i: s64 value to add
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v + @i) with relaxed ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_add() there.
+ *
+ * Return: Nothing.
+ */
+static __always_inline void
+atomic64_add(s64 i, atomic64_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ raw_atomic64_add(i, v);
+}
+
+/**
+ * atomic64_add_return() - atomic add with full ordering
+ * @i: s64 value to add
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v + @i) with full ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_add_return() there.
+ *
+ * Return: The updated value of @v.
+ */
+static __always_inline s64
+atomic64_add_return(s64 i, atomic64_t *v)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic64_add_return(i, v);
+}
+
+/**
+ * atomic64_add_return_acquire() - atomic add with acquire ordering
+ * @i: s64 value to add
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v + @i) with acquire ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_add_return_acquire() there.
+ *
+ * Return: The updated value of @v.
+ */
+static __always_inline s64
+atomic64_add_return_acquire(s64 i, atomic64_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic64_add_return_acquire(i, v);
+}
+
+/**
+ * atomic64_add_return_release() - atomic add with release ordering
+ * @i: s64 value to add
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v + @i) with release ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_add_return_release() there.
+ *
+ * Return: The updated value of @v.
+ */
+static __always_inline s64
+atomic64_add_return_release(s64 i, atomic64_t *v)
+{
+ kcsan_release();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic64_add_return_release(i, v);
+}
+
+/**
+ * atomic64_add_return_relaxed() - atomic add with relaxed ordering
+ * @i: s64 value to add
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v + @i) with relaxed ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_add_return_relaxed() there.
+ *
+ * Return: The updated value of @v.
+ */
+static __always_inline s64
+atomic64_add_return_relaxed(s64 i, atomic64_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic64_add_return_relaxed(i, v);
+}
+
+/**
+ * atomic64_fetch_add() - atomic add with full ordering
+ * @i: s64 value to add
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v + @i) with full ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_fetch_add() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline s64
+atomic64_fetch_add(s64 i, atomic64_t *v)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic64_fetch_add(i, v);
+}
+
+/**
+ * atomic64_fetch_add_acquire() - atomic add with acquire ordering
+ * @i: s64 value to add
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v + @i) with acquire ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_fetch_add_acquire() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline s64
+atomic64_fetch_add_acquire(s64 i, atomic64_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic64_fetch_add_acquire(i, v);
+}
+
+/**
+ * atomic64_fetch_add_release() - atomic add with release ordering
+ * @i: s64 value to add
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v + @i) with release ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_fetch_add_release() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline s64
+atomic64_fetch_add_release(s64 i, atomic64_t *v)
+{
+ kcsan_release();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic64_fetch_add_release(i, v);
+}
+
+/**
+ * atomic64_fetch_add_relaxed() - atomic add with relaxed ordering
+ * @i: s64 value to add
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v + @i) with relaxed ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_fetch_add_relaxed() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline s64
+atomic64_fetch_add_relaxed(s64 i, atomic64_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic64_fetch_add_relaxed(i, v);
+}
+
+/**
+ * atomic64_sub() - atomic subtract with relaxed ordering
+ * @i: s64 value to subtract
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v - @i) with relaxed ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_sub() there.
+ *
+ * Return: Nothing.
+ */
+static __always_inline void
+atomic64_sub(s64 i, atomic64_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ raw_atomic64_sub(i, v);
+}
+
+/**
+ * atomic64_sub_return() - atomic subtract with full ordering
+ * @i: s64 value to subtract
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v - @i) with full ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_sub_return() there.
+ *
+ * Return: The updated value of @v.
+ */
+static __always_inline s64
+atomic64_sub_return(s64 i, atomic64_t *v)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic64_sub_return(i, v);
+}
+
+/**
+ * atomic64_sub_return_acquire() - atomic subtract with acquire ordering
+ * @i: s64 value to subtract
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v - @i) with acquire ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_sub_return_acquire() there.
+ *
+ * Return: The updated value of @v.
+ */
+static __always_inline s64
+atomic64_sub_return_acquire(s64 i, atomic64_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic64_sub_return_acquire(i, v);
+}
+
+/**
+ * atomic64_sub_return_release() - atomic subtract with release ordering
+ * @i: s64 value to subtract
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v - @i) with release ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_sub_return_release() there.
+ *
+ * Return: The updated value of @v.
+ */
+static __always_inline s64
+atomic64_sub_return_release(s64 i, atomic64_t *v)
+{
+ kcsan_release();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic64_sub_return_release(i, v);
+}
+
+/**
+ * atomic64_sub_return_relaxed() - atomic subtract with relaxed ordering
+ * @i: s64 value to subtract
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v - @i) with relaxed ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_sub_return_relaxed() there.
+ *
+ * Return: The updated value of @v.
+ */
+static __always_inline s64
+atomic64_sub_return_relaxed(s64 i, atomic64_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic64_sub_return_relaxed(i, v);
+}
+
+/**
+ * atomic64_fetch_sub() - atomic subtract with full ordering
+ * @i: s64 value to subtract
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v - @i) with full ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_fetch_sub() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline s64
+atomic64_fetch_sub(s64 i, atomic64_t *v)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic64_fetch_sub(i, v);
+}
+
+/**
+ * atomic64_fetch_sub_acquire() - atomic subtract with acquire ordering
+ * @i: s64 value to subtract
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v - @i) with acquire ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_fetch_sub_acquire() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline s64
+atomic64_fetch_sub_acquire(s64 i, atomic64_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic64_fetch_sub_acquire(i, v);
+}
+
+/**
+ * atomic64_fetch_sub_release() - atomic subtract with release ordering
+ * @i: s64 value to subtract
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v - @i) with release ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_fetch_sub_release() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline s64
+atomic64_fetch_sub_release(s64 i, atomic64_t *v)
+{
+ kcsan_release();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic64_fetch_sub_release(i, v);
+}
+
+/**
+ * atomic64_fetch_sub_relaxed() - atomic subtract with relaxed ordering
+ * @i: s64 value to subtract
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v - @i) with relaxed ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_fetch_sub_relaxed() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline s64
+atomic64_fetch_sub_relaxed(s64 i, atomic64_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic64_fetch_sub_relaxed(i, v);
+}
+
+/**
+ * atomic64_inc() - atomic increment with relaxed ordering
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v + 1) with relaxed ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_inc() there.
+ *
+ * Return: Nothing.
+ */
+static __always_inline void
+atomic64_inc(atomic64_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ raw_atomic64_inc(v);
+}
+
+/**
+ * atomic64_inc_return() - atomic increment with full ordering
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v + 1) with full ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_inc_return() there.
+ *
+ * Return: The updated value of @v.
+ */
+static __always_inline s64
+atomic64_inc_return(atomic64_t *v)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic64_inc_return(v);
+}
+
+/**
+ * atomic64_inc_return_acquire() - atomic increment with acquire ordering
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v + 1) with acquire ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_inc_return_acquire() there.
+ *
+ * Return: The updated value of @v.
+ */
+static __always_inline s64
+atomic64_inc_return_acquire(atomic64_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic64_inc_return_acquire(v);
+}
+
+/**
+ * atomic64_inc_return_release() - atomic increment with release ordering
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v + 1) with release ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_inc_return_release() there.
+ *
+ * Return: The updated value of @v.
+ */
+static __always_inline s64
+atomic64_inc_return_release(atomic64_t *v)
+{
+ kcsan_release();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic64_inc_return_release(v);
+}
+
+/**
+ * atomic64_inc_return_relaxed() - atomic increment with relaxed ordering
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v + 1) with relaxed ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_inc_return_relaxed() there.
+ *
+ * Return: The updated value of @v.
+ */
+static __always_inline s64
+atomic64_inc_return_relaxed(atomic64_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic64_inc_return_relaxed(v);
+}
+
+/**
+ * atomic64_fetch_inc() - atomic increment with full ordering
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v + 1) with full ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_fetch_inc() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline s64
+atomic64_fetch_inc(atomic64_t *v)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic64_fetch_inc(v);
+}
+
+/**
+ * atomic64_fetch_inc_acquire() - atomic increment with acquire ordering
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v + 1) with acquire ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_fetch_inc_acquire() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline s64
+atomic64_fetch_inc_acquire(atomic64_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic64_fetch_inc_acquire(v);
+}
+
+/**
+ * atomic64_fetch_inc_release() - atomic increment with release ordering
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v + 1) with release ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_fetch_inc_release() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline s64
+atomic64_fetch_inc_release(atomic64_t *v)
+{
+ kcsan_release();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic64_fetch_inc_release(v);
+}
+
+/**
+ * atomic64_fetch_inc_relaxed() - atomic increment with relaxed ordering
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v + 1) with relaxed ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_fetch_inc_relaxed() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline s64
+atomic64_fetch_inc_relaxed(atomic64_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic64_fetch_inc_relaxed(v);
+}
+
+/**
+ * atomic64_dec() - atomic decrement with relaxed ordering
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v - 1) with relaxed ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_dec() there.
+ *
+ * Return: Nothing.
+ */
+static __always_inline void
+atomic64_dec(atomic64_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ raw_atomic64_dec(v);
+}
+
+/**
+ * atomic64_dec_return() - atomic decrement with full ordering
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v - 1) with full ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_dec_return() there.
+ *
+ * Return: The updated value of @v.
+ */
+static __always_inline s64
+atomic64_dec_return(atomic64_t *v)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic64_dec_return(v);
+}
+
+/**
+ * atomic64_dec_return_acquire() - atomic decrement with acquire ordering
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v - 1) with acquire ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_dec_return_acquire() there.
+ *
+ * Return: The updated value of @v.
+ */
+static __always_inline s64
+atomic64_dec_return_acquire(atomic64_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic64_dec_return_acquire(v);
+}
+
+/**
+ * atomic64_dec_return_release() - atomic decrement with release ordering
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v - 1) with release ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_dec_return_release() there.
+ *
+ * Return: The updated value of @v.
+ */
+static __always_inline s64
+atomic64_dec_return_release(atomic64_t *v)
+{
+ kcsan_release();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic64_dec_return_release(v);
+}
+
+/**
+ * atomic64_dec_return_relaxed() - atomic decrement with relaxed ordering
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v - 1) with relaxed ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_dec_return_relaxed() there.
+ *
+ * Return: The updated value of @v.
+ */
+static __always_inline s64
+atomic64_dec_return_relaxed(atomic64_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic64_dec_return_relaxed(v);
+}
+
+/**
+ * atomic64_fetch_dec() - atomic decrement with full ordering
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v - 1) with full ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_fetch_dec() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline s64
+atomic64_fetch_dec(atomic64_t *v)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic64_fetch_dec(v);
+}
+
+/**
+ * atomic64_fetch_dec_acquire() - atomic decrement with acquire ordering
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v - 1) with acquire ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_fetch_dec_acquire() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline s64
+atomic64_fetch_dec_acquire(atomic64_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic64_fetch_dec_acquire(v);
+}
+
+/**
+ * atomic64_fetch_dec_release() - atomic decrement with release ordering
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v - 1) with release ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_fetch_dec_release() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline s64
+atomic64_fetch_dec_release(atomic64_t *v)
+{
+ kcsan_release();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic64_fetch_dec_release(v);
+}
+
+/**
+ * atomic64_fetch_dec_relaxed() - atomic decrement with relaxed ordering
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v - 1) with relaxed ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_fetch_dec_relaxed() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline s64
+atomic64_fetch_dec_relaxed(atomic64_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic64_fetch_dec_relaxed(v);
+}
+
+/**
+ * atomic64_and() - atomic bitwise AND with relaxed ordering
+ * @i: s64 value
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v & @i) with relaxed ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_and() there.
+ *
+ * Return: Nothing.
+ */
+static __always_inline void
+atomic64_and(s64 i, atomic64_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ raw_atomic64_and(i, v);
+}
+
+/**
+ * atomic64_fetch_and() - atomic bitwise AND with full ordering
+ * @i: s64 value
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v & @i) with full ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_fetch_and() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline s64
+atomic64_fetch_and(s64 i, atomic64_t *v)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic64_fetch_and(i, v);
+}
+
+/**
+ * atomic64_fetch_and_acquire() - atomic bitwise AND with acquire ordering
+ * @i: s64 value
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v & @i) with acquire ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_fetch_and_acquire() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline s64
+atomic64_fetch_and_acquire(s64 i, atomic64_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic64_fetch_and_acquire(i, v);
+}
+
+/**
+ * atomic64_fetch_and_release() - atomic bitwise AND with release ordering
+ * @i: s64 value
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v & @i) with release ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_fetch_and_release() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline s64
+atomic64_fetch_and_release(s64 i, atomic64_t *v)
+{
+ kcsan_release();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic64_fetch_and_release(i, v);
+}
+
+/**
+ * atomic64_fetch_and_relaxed() - atomic bitwise AND with relaxed ordering
+ * @i: s64 value
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v & @i) with relaxed ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_fetch_and_relaxed() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline s64
+atomic64_fetch_and_relaxed(s64 i, atomic64_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic64_fetch_and_relaxed(i, v);
+}
+
+/**
+ * atomic64_andnot() - atomic bitwise AND NOT with relaxed ordering
+ * @i: s64 value
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v & ~@i) with relaxed ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_andnot() there.
+ *
+ * Return: Nothing.
+ */
+static __always_inline void
+atomic64_andnot(s64 i, atomic64_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ raw_atomic64_andnot(i, v);
+}
+
+/**
+ * atomic64_fetch_andnot() - atomic bitwise AND NOT with full ordering
+ * @i: s64 value
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v & ~@i) with full ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_fetch_andnot() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline s64
+atomic64_fetch_andnot(s64 i, atomic64_t *v)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic64_fetch_andnot(i, v);
+}
+
+/**
+ * atomic64_fetch_andnot_acquire() - atomic bitwise AND NOT with acquire ordering
+ * @i: s64 value
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v & ~@i) with acquire ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_fetch_andnot_acquire() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline s64
+atomic64_fetch_andnot_acquire(s64 i, atomic64_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic64_fetch_andnot_acquire(i, v);
+}
+
+/**
+ * atomic64_fetch_andnot_release() - atomic bitwise AND NOT with release ordering
+ * @i: s64 value
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v & ~@i) with release ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_fetch_andnot_release() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline s64
+atomic64_fetch_andnot_release(s64 i, atomic64_t *v)
+{
+ kcsan_release();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic64_fetch_andnot_release(i, v);
+}
+
+/**
+ * atomic64_fetch_andnot_relaxed() - atomic bitwise AND NOT with relaxed ordering
+ * @i: s64 value
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v & ~@i) with relaxed ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_fetch_andnot_relaxed() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline s64
+atomic64_fetch_andnot_relaxed(s64 i, atomic64_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic64_fetch_andnot_relaxed(i, v);
+}
+
+/**
+ * atomic64_or() - atomic bitwise OR with relaxed ordering
+ * @i: s64 value
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v | @i) with relaxed ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_or() there.
+ *
+ * Return: Nothing.
+ */
+static __always_inline void
+atomic64_or(s64 i, atomic64_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ raw_atomic64_or(i, v);
+}
+
+/**
+ * atomic64_fetch_or() - atomic bitwise OR with full ordering
+ * @i: s64 value
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v | @i) with full ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_fetch_or() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline s64
+atomic64_fetch_or(s64 i, atomic64_t *v)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic64_fetch_or(i, v);
+}
+
+/**
+ * atomic64_fetch_or_acquire() - atomic bitwise OR with acquire ordering
+ * @i: s64 value
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v | @i) with acquire ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_fetch_or_acquire() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline s64
+atomic64_fetch_or_acquire(s64 i, atomic64_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic64_fetch_or_acquire(i, v);
+}
+
+/**
+ * atomic64_fetch_or_release() - atomic bitwise OR with release ordering
+ * @i: s64 value
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v | @i) with release ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_fetch_or_release() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline s64
+atomic64_fetch_or_release(s64 i, atomic64_t *v)
+{
+ kcsan_release();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic64_fetch_or_release(i, v);
+}
+
+/**
+ * atomic64_fetch_or_relaxed() - atomic bitwise OR with relaxed ordering
+ * @i: s64 value
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v | @i) with relaxed ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_fetch_or_relaxed() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline s64
+atomic64_fetch_or_relaxed(s64 i, atomic64_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic64_fetch_or_relaxed(i, v);
+}
+
+/**
+ * atomic64_xor() - atomic bitwise XOR with relaxed ordering
+ * @i: s64 value
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v ^ @i) with relaxed ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_xor() there.
+ *
+ * Return: Nothing.
+ */
+static __always_inline void
+atomic64_xor(s64 i, atomic64_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ raw_atomic64_xor(i, v);
+}
+
+/**
+ * atomic64_fetch_xor() - atomic bitwise XOR with full ordering
+ * @i: s64 value
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v ^ @i) with full ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_fetch_xor() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline s64
+atomic64_fetch_xor(s64 i, atomic64_t *v)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic64_fetch_xor(i, v);
+}
+
+/**
+ * atomic64_fetch_xor_acquire() - atomic bitwise XOR with acquire ordering
+ * @i: s64 value
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v ^ @i) with acquire ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_fetch_xor_acquire() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline s64
+atomic64_fetch_xor_acquire(s64 i, atomic64_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic64_fetch_xor_acquire(i, v);
+}
+
+/**
+ * atomic64_fetch_xor_release() - atomic bitwise XOR with release ordering
+ * @i: s64 value
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v ^ @i) with release ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_fetch_xor_release() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline s64
+atomic64_fetch_xor_release(s64 i, atomic64_t *v)
+{
+ kcsan_release();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic64_fetch_xor_release(i, v);
+}
+
+/**
+ * atomic64_fetch_xor_relaxed() - atomic bitwise XOR with relaxed ordering
+ * @i: s64 value
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v ^ @i) with relaxed ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_fetch_xor_relaxed() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline s64
+atomic64_fetch_xor_relaxed(s64 i, atomic64_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic64_fetch_xor_relaxed(i, v);
+}
+
+/**
+ * atomic64_xchg() - atomic exchange with full ordering
+ * @v: pointer to atomic64_t
+ * @new: s64 value to assign
+ *
+ * Atomically updates @v to @new with full ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_xchg() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline s64
+atomic64_xchg(atomic64_t *v, s64 new)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic64_xchg(v, new);
+}
+
+/**
+ * atomic64_xchg_acquire() - atomic exchange with acquire ordering
+ * @v: pointer to atomic64_t
+ * @new: s64 value to assign
+ *
+ * Atomically updates @v to @new with acquire ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_xchg_acquire() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline s64
+atomic64_xchg_acquire(atomic64_t *v, s64 new)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic64_xchg_acquire(v, new);
+}
+
+/**
+ * atomic64_xchg_release() - atomic exchange with release ordering
+ * @v: pointer to atomic64_t
+ * @new: s64 value to assign
+ *
+ * Atomically updates @v to @new with release ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_xchg_release() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline s64
+atomic64_xchg_release(atomic64_t *v, s64 new)
+{
+ kcsan_release();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic64_xchg_release(v, new);
+}
+
+/**
+ * atomic64_xchg_relaxed() - atomic exchange with relaxed ordering
+ * @v: pointer to atomic64_t
+ * @new: s64 value to assign
+ *
+ * Atomically updates @v to @new with relaxed ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_xchg_relaxed() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline s64
+atomic64_xchg_relaxed(atomic64_t *v, s64 new)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic64_xchg_relaxed(v, new);
+}
+
+/**
+ * atomic64_cmpxchg() - atomic compare and exchange with full ordering
+ * @v: pointer to atomic64_t
+ * @old: s64 value to compare with
+ * @new: s64 value to assign
+ *
+ * If (@v == @old), atomically updates @v to @new with full ordering.
+ * Otherwise, @v is not modified and relaxed ordering is provided.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_cmpxchg() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline s64
+atomic64_cmpxchg(atomic64_t *v, s64 old, s64 new)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic64_cmpxchg(v, old, new);
+}
+
+/**
+ * atomic64_cmpxchg_acquire() - atomic compare and exchange with acquire ordering
+ * @v: pointer to atomic64_t
+ * @old: s64 value to compare with
+ * @new: s64 value to assign
+ *
+ * If (@v == @old), atomically updates @v to @new with acquire ordering.
+ * Otherwise, @v is not modified and relaxed ordering is provided.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_cmpxchg_acquire() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline s64
+atomic64_cmpxchg_acquire(atomic64_t *v, s64 old, s64 new)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic64_cmpxchg_acquire(v, old, new);
+}
+
+/**
+ * atomic64_cmpxchg_release() - atomic compare and exchange with release ordering
+ * @v: pointer to atomic64_t
+ * @old: s64 value to compare with
+ * @new: s64 value to assign
+ *
+ * If (@v == @old), atomically updates @v to @new with release ordering.
+ * Otherwise, @v is not modified and relaxed ordering is provided.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_cmpxchg_release() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline s64
+atomic64_cmpxchg_release(atomic64_t *v, s64 old, s64 new)
+{
+ kcsan_release();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic64_cmpxchg_release(v, old, new);
+}
+
+/**
+ * atomic64_cmpxchg_relaxed() - atomic compare and exchange with relaxed ordering
+ * @v: pointer to atomic64_t
+ * @old: s64 value to compare with
+ * @new: s64 value to assign
+ *
+ * If (@v == @old), atomically updates @v to @new with relaxed ordering.
+ * Otherwise, @v is not modified and relaxed ordering is provided.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_cmpxchg_relaxed() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline s64
+atomic64_cmpxchg_relaxed(atomic64_t *v, s64 old, s64 new)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic64_cmpxchg_relaxed(v, old, new);
+}
+
+/**
+ * atomic64_try_cmpxchg() - atomic compare and exchange with full ordering
+ * @v: pointer to atomic64_t
+ * @old: pointer to s64 value to compare with
+ * @new: s64 value to assign
+ *
+ * If (@v == @old), atomically updates @v to @new with full ordering.
+ * Otherwise, @v is not modified, @old is updated to the current value of @v,
+ * and relaxed ordering is provided.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_try_cmpxchg() there.
+ *
+ * Return: @true if the exchange occured, @false otherwise.
+ */
+static __always_inline bool
+atomic64_try_cmpxchg(atomic64_t *v, s64 *old, s64 new)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ instrument_read_write(old, sizeof(*old));
+ return raw_atomic64_try_cmpxchg(v, old, new);
+}
+
+/**
+ * atomic64_try_cmpxchg_acquire() - atomic compare and exchange with acquire ordering
+ * @v: pointer to atomic64_t
+ * @old: pointer to s64 value to compare with
+ * @new: s64 value to assign
+ *
+ * If (@v == @old), atomically updates @v to @new with acquire ordering.
+ * Otherwise, @v is not modified, @old is updated to the current value of @v,
+ * and relaxed ordering is provided.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_try_cmpxchg_acquire() there.
+ *
+ * Return: @true if the exchange occured, @false otherwise.
+ */
+static __always_inline bool
+atomic64_try_cmpxchg_acquire(atomic64_t *v, s64 *old, s64 new)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ instrument_read_write(old, sizeof(*old));
+ return raw_atomic64_try_cmpxchg_acquire(v, old, new);
+}
+
+/**
+ * atomic64_try_cmpxchg_release() - atomic compare and exchange with release ordering
+ * @v: pointer to atomic64_t
+ * @old: pointer to s64 value to compare with
+ * @new: s64 value to assign
+ *
+ * If (@v == @old), atomically updates @v to @new with release ordering.
+ * Otherwise, @v is not modified, @old is updated to the current value of @v,
+ * and relaxed ordering is provided.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_try_cmpxchg_release() there.
+ *
+ * Return: @true if the exchange occured, @false otherwise.
+ */
+static __always_inline bool
+atomic64_try_cmpxchg_release(atomic64_t *v, s64 *old, s64 new)
+{
+ kcsan_release();
+ instrument_atomic_read_write(v, sizeof(*v));
+ instrument_read_write(old, sizeof(*old));
+ return raw_atomic64_try_cmpxchg_release(v, old, new);
+}
+
+/**
+ * atomic64_try_cmpxchg_relaxed() - atomic compare and exchange with relaxed ordering
+ * @v: pointer to atomic64_t
+ * @old: pointer to s64 value to compare with
+ * @new: s64 value to assign
+ *
+ * If (@v == @old), atomically updates @v to @new with relaxed ordering.
+ * Otherwise, @v is not modified, @old is updated to the current value of @v,
+ * and relaxed ordering is provided.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_try_cmpxchg_relaxed() there.
+ *
+ * Return: @true if the exchange occured, @false otherwise.
+ */
+static __always_inline bool
+atomic64_try_cmpxchg_relaxed(atomic64_t *v, s64 *old, s64 new)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ instrument_read_write(old, sizeof(*old));
+ return raw_atomic64_try_cmpxchg_relaxed(v, old, new);
+}
+
+/**
+ * atomic64_sub_and_test() - atomic subtract and test if zero with full ordering
+ * @i: s64 value to subtract
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v - @i) with full ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_sub_and_test() there.
+ *
+ * Return: @true if the resulting value of @v is zero, @false otherwise.
+ */
+static __always_inline bool
+atomic64_sub_and_test(s64 i, atomic64_t *v)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic64_sub_and_test(i, v);
+}
+
+/**
+ * atomic64_dec_and_test() - atomic decrement and test if zero with full ordering
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v - 1) with full ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_dec_and_test() there.
+ *
+ * Return: @true if the resulting value of @v is zero, @false otherwise.
+ */
+static __always_inline bool
+atomic64_dec_and_test(atomic64_t *v)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic64_dec_and_test(v);
+}
+
+/**
+ * atomic64_inc_and_test() - atomic increment and test if zero with full ordering
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v + 1) with full ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_inc_and_test() there.
+ *
+ * Return: @true if the resulting value of @v is zero, @false otherwise.
+ */
+static __always_inline bool
+atomic64_inc_and_test(atomic64_t *v)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic64_inc_and_test(v);
+}
+
+/**
+ * atomic64_add_negative() - atomic add and test if negative with full ordering
+ * @i: s64 value to add
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v + @i) with full ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_add_negative() there.
+ *
+ * Return: @true if the resulting value of @v is negative, @false otherwise.
+ */
+static __always_inline bool
+atomic64_add_negative(s64 i, atomic64_t *v)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic64_add_negative(i, v);
+}
+
+/**
+ * atomic64_add_negative_acquire() - atomic add and test if negative with acquire ordering
+ * @i: s64 value to add
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v + @i) with acquire ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_add_negative_acquire() there.
+ *
+ * Return: @true if the resulting value of @v is negative, @false otherwise.
+ */
+static __always_inline bool
+atomic64_add_negative_acquire(s64 i, atomic64_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic64_add_negative_acquire(i, v);
+}
+
+/**
+ * atomic64_add_negative_release() - atomic add and test if negative with release ordering
+ * @i: s64 value to add
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v + @i) with release ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_add_negative_release() there.
+ *
+ * Return: @true if the resulting value of @v is negative, @false otherwise.
+ */
+static __always_inline bool
+atomic64_add_negative_release(s64 i, atomic64_t *v)
+{
+ kcsan_release();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic64_add_negative_release(i, v);
+}
+
+/**
+ * atomic64_add_negative_relaxed() - atomic add and test if negative with relaxed ordering
+ * @i: s64 value to add
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v + @i) with relaxed ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_add_negative_relaxed() there.
+ *
+ * Return: @true if the resulting value of @v is negative, @false otherwise.
+ */
+static __always_inline bool
+atomic64_add_negative_relaxed(s64 i, atomic64_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic64_add_negative_relaxed(i, v);
+}
+
+/**
+ * atomic64_fetch_add_unless() - atomic add unless value with full ordering
+ * @v: pointer to atomic64_t
+ * @a: s64 value to add
+ * @u: s64 value to compare with
+ *
+ * If (@v != @u), atomically updates @v to (@v + @a) with full ordering.
+ * Otherwise, @v is not modified and relaxed ordering is provided.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_fetch_add_unless() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline s64
+atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic64_fetch_add_unless(v, a, u);
+}
+
+/**
+ * atomic64_add_unless() - atomic add unless value with full ordering
+ * @v: pointer to atomic64_t
+ * @a: s64 value to add
+ * @u: s64 value to compare with
+ *
+ * If (@v != @u), atomically updates @v to (@v + @a) with full ordering.
+ * Otherwise, @v is not modified and relaxed ordering is provided.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_add_unless() there.
+ *
+ * Return: @true if @v was updated, @false otherwise.
+ */
+static __always_inline bool
+atomic64_add_unless(atomic64_t *v, s64 a, s64 u)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic64_add_unless(v, a, u);
+}
+
+/**
+ * atomic64_inc_not_zero() - atomic increment unless zero with full ordering
+ * @v: pointer to atomic64_t
+ *
+ * If (@v != 0), atomically updates @v to (@v + 1) with full ordering.
+ * Otherwise, @v is not modified and relaxed ordering is provided.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_inc_not_zero() there.
+ *
+ * Return: @true if @v was updated, @false otherwise.
+ */
+static __always_inline bool
+atomic64_inc_not_zero(atomic64_t *v)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic64_inc_not_zero(v);
+}
+
+/**
+ * atomic64_inc_unless_negative() - atomic increment unless negative with full ordering
+ * @v: pointer to atomic64_t
+ *
+ * If (@v >= 0), atomically updates @v to (@v + 1) with full ordering.
+ * Otherwise, @v is not modified and relaxed ordering is provided.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_inc_unless_negative() there.
+ *
+ * Return: @true if @v was updated, @false otherwise.
+ */
+static __always_inline bool
+atomic64_inc_unless_negative(atomic64_t *v)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic64_inc_unless_negative(v);
+}
+
+/**
+ * atomic64_dec_unless_positive() - atomic decrement unless positive with full ordering
+ * @v: pointer to atomic64_t
+ *
+ * If (@v <= 0), atomically updates @v to (@v - 1) with full ordering.
+ * Otherwise, @v is not modified and relaxed ordering is provided.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_dec_unless_positive() there.
+ *
+ * Return: @true if @v was updated, @false otherwise.
+ */
+static __always_inline bool
+atomic64_dec_unless_positive(atomic64_t *v)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic64_dec_unless_positive(v);
+}
+
+/**
+ * atomic64_dec_if_positive() - atomic decrement if positive with full ordering
+ * @v: pointer to atomic64_t
+ *
+ * If (@v > 0), atomically updates @v to (@v - 1) with full ordering.
+ * Otherwise, @v is not modified and relaxed ordering is provided.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_dec_if_positive() there.
+ *
+ * Return: The old value of (@v - 1), regardless of whether @v was updated.
+ */
+static __always_inline s64
+atomic64_dec_if_positive(atomic64_t *v)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic64_dec_if_positive(v);
+}
+
+/**
+ * atomic_long_read() - atomic load with relaxed ordering
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically loads the value of @v with relaxed ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_read() there.
+ *
+ * Return: The value loaded from @v.
+ */
+static __always_inline long
+atomic_long_read(const atomic_long_t *v)
+{
+ instrument_atomic_read(v, sizeof(*v));
+ return raw_atomic_long_read(v);
+}
+
+/**
+ * atomic_long_read_acquire() - atomic load with acquire ordering
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically loads the value of @v with acquire ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_read_acquire() there.
+ *
+ * Return: The value loaded from @v.
+ */
+static __always_inline long
+atomic_long_read_acquire(const atomic_long_t *v)
+{
+ instrument_atomic_read(v, sizeof(*v));
+ return raw_atomic_long_read_acquire(v);
+}
+
+/**
+ * atomic_long_set() - atomic set with relaxed ordering
+ * @v: pointer to atomic_long_t
+ * @i: long value to assign
+ *
+ * Atomically sets @v to @i with relaxed ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_set() there.
+ *
+ * Return: Nothing.
+ */
+static __always_inline void
+atomic_long_set(atomic_long_t *v, long i)
+{
+ instrument_atomic_write(v, sizeof(*v));
+ raw_atomic_long_set(v, i);
+}
+
+/**
+ * atomic_long_set_release() - atomic set with release ordering
+ * @v: pointer to atomic_long_t
+ * @i: long value to assign
+ *
+ * Atomically sets @v to @i with release ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_set_release() there.
+ *
+ * Return: Nothing.
+ */
+static __always_inline void
+atomic_long_set_release(atomic_long_t *v, long i)
+{
+ kcsan_release();
+ instrument_atomic_write(v, sizeof(*v));
+ raw_atomic_long_set_release(v, i);
+}
+
+/**
+ * atomic_long_add() - atomic add with relaxed ordering
+ * @i: long value to add
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v + @i) with relaxed ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_add() there.
+ *
+ * Return: Nothing.
+ */
+static __always_inline void
+atomic_long_add(long i, atomic_long_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ raw_atomic_long_add(i, v);
+}
+
+/**
+ * atomic_long_add_return() - atomic add with full ordering
+ * @i: long value to add
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v + @i) with full ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_add_return() there.
+ *
+ * Return: The updated value of @v.
+ */
+static __always_inline long
+atomic_long_add_return(long i, atomic_long_t *v)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_long_add_return(i, v);
+}
+
+/**
+ * atomic_long_add_return_acquire() - atomic add with acquire ordering
+ * @i: long value to add
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v + @i) with acquire ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_add_return_acquire() there.
+ *
+ * Return: The updated value of @v.
+ */
+static __always_inline long
+atomic_long_add_return_acquire(long i, atomic_long_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_long_add_return_acquire(i, v);
+}
+
+/**
+ * atomic_long_add_return_release() - atomic add with release ordering
+ * @i: long value to add
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v + @i) with release ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_add_return_release() there.
+ *
+ * Return: The updated value of @v.
+ */
+static __always_inline long
+atomic_long_add_return_release(long i, atomic_long_t *v)
+{
+ kcsan_release();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_long_add_return_release(i, v);
+}
+
+/**
+ * atomic_long_add_return_relaxed() - atomic add with relaxed ordering
+ * @i: long value to add
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v + @i) with relaxed ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_add_return_relaxed() there.
+ *
+ * Return: The updated value of @v.
+ */
+static __always_inline long
+atomic_long_add_return_relaxed(long i, atomic_long_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_long_add_return_relaxed(i, v);
+}
+
+/**
+ * atomic_long_fetch_add() - atomic add with full ordering
+ * @i: long value to add
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v + @i) with full ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_fetch_add() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline long
+atomic_long_fetch_add(long i, atomic_long_t *v)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_long_fetch_add(i, v);
+}
+
+/**
+ * atomic_long_fetch_add_acquire() - atomic add with acquire ordering
+ * @i: long value to add
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v + @i) with acquire ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_fetch_add_acquire() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline long
+atomic_long_fetch_add_acquire(long i, atomic_long_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_long_fetch_add_acquire(i, v);
+}
+
+/**
+ * atomic_long_fetch_add_release() - atomic add with release ordering
+ * @i: long value to add
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v + @i) with release ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_fetch_add_release() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline long
+atomic_long_fetch_add_release(long i, atomic_long_t *v)
+{
+ kcsan_release();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_long_fetch_add_release(i, v);
+}
+
+/**
+ * atomic_long_fetch_add_relaxed() - atomic add with relaxed ordering
+ * @i: long value to add
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v + @i) with relaxed ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_fetch_add_relaxed() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline long
+atomic_long_fetch_add_relaxed(long i, atomic_long_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_long_fetch_add_relaxed(i, v);
+}
+
+/**
+ * atomic_long_sub() - atomic subtract with relaxed ordering
+ * @i: long value to subtract
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v - @i) with relaxed ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_sub() there.
+ *
+ * Return: Nothing.
+ */
+static __always_inline void
+atomic_long_sub(long i, atomic_long_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ raw_atomic_long_sub(i, v);
+}
+
+/**
+ * atomic_long_sub_return() - atomic subtract with full ordering
+ * @i: long value to subtract
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v - @i) with full ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_sub_return() there.
+ *
+ * Return: The updated value of @v.
+ */
+static __always_inline long
+atomic_long_sub_return(long i, atomic_long_t *v)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_long_sub_return(i, v);
+}
+
+/**
+ * atomic_long_sub_return_acquire() - atomic subtract with acquire ordering
+ * @i: long value to subtract
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v - @i) with acquire ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_sub_return_acquire() there.
+ *
+ * Return: The updated value of @v.
+ */
+static __always_inline long
+atomic_long_sub_return_acquire(long i, atomic_long_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_long_sub_return_acquire(i, v);
+}
+
+/**
+ * atomic_long_sub_return_release() - atomic subtract with release ordering
+ * @i: long value to subtract
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v - @i) with release ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_sub_return_release() there.
+ *
+ * Return: The updated value of @v.
+ */
+static __always_inline long
+atomic_long_sub_return_release(long i, atomic_long_t *v)
+{
+ kcsan_release();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_long_sub_return_release(i, v);
+}
+
+/**
+ * atomic_long_sub_return_relaxed() - atomic subtract with relaxed ordering
+ * @i: long value to subtract
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v - @i) with relaxed ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_sub_return_relaxed() there.
+ *
+ * Return: The updated value of @v.
+ */
+static __always_inline long
+atomic_long_sub_return_relaxed(long i, atomic_long_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_long_sub_return_relaxed(i, v);
+}
+
+/**
+ * atomic_long_fetch_sub() - atomic subtract with full ordering
+ * @i: long value to subtract
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v - @i) with full ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_fetch_sub() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline long
+atomic_long_fetch_sub(long i, atomic_long_t *v)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_long_fetch_sub(i, v);
+}
+
+/**
+ * atomic_long_fetch_sub_acquire() - atomic subtract with acquire ordering
+ * @i: long value to subtract
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v - @i) with acquire ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_fetch_sub_acquire() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline long
+atomic_long_fetch_sub_acquire(long i, atomic_long_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_long_fetch_sub_acquire(i, v);
+}
+
+/**
+ * atomic_long_fetch_sub_release() - atomic subtract with release ordering
+ * @i: long value to subtract
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v - @i) with release ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_fetch_sub_release() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline long
+atomic_long_fetch_sub_release(long i, atomic_long_t *v)
+{
+ kcsan_release();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_long_fetch_sub_release(i, v);
+}
+
+/**
+ * atomic_long_fetch_sub_relaxed() - atomic subtract with relaxed ordering
+ * @i: long value to subtract
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v - @i) with relaxed ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_fetch_sub_relaxed() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline long
+atomic_long_fetch_sub_relaxed(long i, atomic_long_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_long_fetch_sub_relaxed(i, v);
+}
+
+/**
+ * atomic_long_inc() - atomic increment with relaxed ordering
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v + 1) with relaxed ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_inc() there.
+ *
+ * Return: Nothing.
+ */
+static __always_inline void
+atomic_long_inc(atomic_long_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ raw_atomic_long_inc(v);
+}
+
+/**
+ * atomic_long_inc_return() - atomic increment with full ordering
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v + 1) with full ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_inc_return() there.
+ *
+ * Return: The updated value of @v.
+ */
+static __always_inline long
+atomic_long_inc_return(atomic_long_t *v)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_long_inc_return(v);
+}
+
+/**
+ * atomic_long_inc_return_acquire() - atomic increment with acquire ordering
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v + 1) with acquire ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_inc_return_acquire() there.
+ *
+ * Return: The updated value of @v.
+ */
+static __always_inline long
+atomic_long_inc_return_acquire(atomic_long_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_long_inc_return_acquire(v);
+}
+
+/**
+ * atomic_long_inc_return_release() - atomic increment with release ordering
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v + 1) with release ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_inc_return_release() there.
+ *
+ * Return: The updated value of @v.
+ */
+static __always_inline long
+atomic_long_inc_return_release(atomic_long_t *v)
+{
+ kcsan_release();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_long_inc_return_release(v);
+}
+
+/**
+ * atomic_long_inc_return_relaxed() - atomic increment with relaxed ordering
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v + 1) with relaxed ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_inc_return_relaxed() there.
+ *
+ * Return: The updated value of @v.
+ */
+static __always_inline long
+atomic_long_inc_return_relaxed(atomic_long_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_long_inc_return_relaxed(v);
+}
+
+/**
+ * atomic_long_fetch_inc() - atomic increment with full ordering
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v + 1) with full ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_fetch_inc() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline long
+atomic_long_fetch_inc(atomic_long_t *v)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_long_fetch_inc(v);
+}
+
+/**
+ * atomic_long_fetch_inc_acquire() - atomic increment with acquire ordering
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v + 1) with acquire ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_fetch_inc_acquire() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline long
+atomic_long_fetch_inc_acquire(atomic_long_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_long_fetch_inc_acquire(v);
+}
+
+/**
+ * atomic_long_fetch_inc_release() - atomic increment with release ordering
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v + 1) with release ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_fetch_inc_release() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline long
+atomic_long_fetch_inc_release(atomic_long_t *v)
+{
+ kcsan_release();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_long_fetch_inc_release(v);
+}
+
+/**
+ * atomic_long_fetch_inc_relaxed() - atomic increment with relaxed ordering
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v + 1) with relaxed ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_fetch_inc_relaxed() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline long
+atomic_long_fetch_inc_relaxed(atomic_long_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_long_fetch_inc_relaxed(v);
+}
+
+/**
+ * atomic_long_dec() - atomic decrement with relaxed ordering
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v - 1) with relaxed ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_dec() there.
+ *
+ * Return: Nothing.
+ */
+static __always_inline void
+atomic_long_dec(atomic_long_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ raw_atomic_long_dec(v);
+}
+
+/**
+ * atomic_long_dec_return() - atomic decrement with full ordering
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v - 1) with full ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_dec_return() there.
+ *
+ * Return: The updated value of @v.
+ */
+static __always_inline long
+atomic_long_dec_return(atomic_long_t *v)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_long_dec_return(v);
+}
+
+/**
+ * atomic_long_dec_return_acquire() - atomic decrement with acquire ordering
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v - 1) with acquire ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_dec_return_acquire() there.
+ *
+ * Return: The updated value of @v.
+ */
+static __always_inline long
+atomic_long_dec_return_acquire(atomic_long_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_long_dec_return_acquire(v);
+}
+
+/**
+ * atomic_long_dec_return_release() - atomic decrement with release ordering
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v - 1) with release ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_dec_return_release() there.
+ *
+ * Return: The updated value of @v.
+ */
+static __always_inline long
+atomic_long_dec_return_release(atomic_long_t *v)
+{
+ kcsan_release();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_long_dec_return_release(v);
+}
+
+/**
+ * atomic_long_dec_return_relaxed() - atomic decrement with relaxed ordering
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v - 1) with relaxed ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_dec_return_relaxed() there.
+ *
+ * Return: The updated value of @v.
+ */
+static __always_inline long
+atomic_long_dec_return_relaxed(atomic_long_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_long_dec_return_relaxed(v);
+}
+
+/**
+ * atomic_long_fetch_dec() - atomic decrement with full ordering
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v - 1) with full ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_fetch_dec() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline long
+atomic_long_fetch_dec(atomic_long_t *v)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_long_fetch_dec(v);
+}
+
+/**
+ * atomic_long_fetch_dec_acquire() - atomic decrement with acquire ordering
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v - 1) with acquire ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_fetch_dec_acquire() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline long
+atomic_long_fetch_dec_acquire(atomic_long_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_long_fetch_dec_acquire(v);
+}
+
+/**
+ * atomic_long_fetch_dec_release() - atomic decrement with release ordering
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v - 1) with release ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_fetch_dec_release() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline long
+atomic_long_fetch_dec_release(atomic_long_t *v)
+{
+ kcsan_release();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_long_fetch_dec_release(v);
+}
+
+/**
+ * atomic_long_fetch_dec_relaxed() - atomic decrement with relaxed ordering
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v - 1) with relaxed ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_fetch_dec_relaxed() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline long
+atomic_long_fetch_dec_relaxed(atomic_long_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_long_fetch_dec_relaxed(v);
+}
+
+/**
+ * atomic_long_and() - atomic bitwise AND with relaxed ordering
+ * @i: long value
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v & @i) with relaxed ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_and() there.
+ *
+ * Return: Nothing.
+ */
+static __always_inline void
+atomic_long_and(long i, atomic_long_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ raw_atomic_long_and(i, v);
+}
+
+/**
+ * atomic_long_fetch_and() - atomic bitwise AND with full ordering
+ * @i: long value
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v & @i) with full ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_fetch_and() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline long
+atomic_long_fetch_and(long i, atomic_long_t *v)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_long_fetch_and(i, v);
+}
+
+/**
+ * atomic_long_fetch_and_acquire() - atomic bitwise AND with acquire ordering
+ * @i: long value
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v & @i) with acquire ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_fetch_and_acquire() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline long
+atomic_long_fetch_and_acquire(long i, atomic_long_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_long_fetch_and_acquire(i, v);
+}
+
+/**
+ * atomic_long_fetch_and_release() - atomic bitwise AND with release ordering
+ * @i: long value
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v & @i) with release ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_fetch_and_release() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline long
+atomic_long_fetch_and_release(long i, atomic_long_t *v)
+{
+ kcsan_release();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_long_fetch_and_release(i, v);
+}
+
+/**
+ * atomic_long_fetch_and_relaxed() - atomic bitwise AND with relaxed ordering
+ * @i: long value
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v & @i) with relaxed ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_fetch_and_relaxed() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline long
+atomic_long_fetch_and_relaxed(long i, atomic_long_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_long_fetch_and_relaxed(i, v);
+}
+
+/**
+ * atomic_long_andnot() - atomic bitwise AND NOT with relaxed ordering
+ * @i: long value
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v & ~@i) with relaxed ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_andnot() there.
+ *
+ * Return: Nothing.
+ */
+static __always_inline void
+atomic_long_andnot(long i, atomic_long_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ raw_atomic_long_andnot(i, v);
+}
+
+/**
+ * atomic_long_fetch_andnot() - atomic bitwise AND NOT with full ordering
+ * @i: long value
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v & ~@i) with full ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_fetch_andnot() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline long
+atomic_long_fetch_andnot(long i, atomic_long_t *v)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_long_fetch_andnot(i, v);
+}
+
+/**
+ * atomic_long_fetch_andnot_acquire() - atomic bitwise AND NOT with acquire ordering
+ * @i: long value
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v & ~@i) with acquire ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_fetch_andnot_acquire() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline long
+atomic_long_fetch_andnot_acquire(long i, atomic_long_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_long_fetch_andnot_acquire(i, v);
+}
+
+/**
+ * atomic_long_fetch_andnot_release() - atomic bitwise AND NOT with release ordering
+ * @i: long value
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v & ~@i) with release ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_fetch_andnot_release() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline long
+atomic_long_fetch_andnot_release(long i, atomic_long_t *v)
+{
+ kcsan_release();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_long_fetch_andnot_release(i, v);
+}
+
+/**
+ * atomic_long_fetch_andnot_relaxed() - atomic bitwise AND NOT with relaxed ordering
+ * @i: long value
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v & ~@i) with relaxed ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_fetch_andnot_relaxed() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline long
+atomic_long_fetch_andnot_relaxed(long i, atomic_long_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_long_fetch_andnot_relaxed(i, v);
+}
+
+/**
+ * atomic_long_or() - atomic bitwise OR with relaxed ordering
+ * @i: long value
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v | @i) with relaxed ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_or() there.
+ *
+ * Return: Nothing.
+ */
+static __always_inline void
+atomic_long_or(long i, atomic_long_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ raw_atomic_long_or(i, v);
+}
+
+/**
+ * atomic_long_fetch_or() - atomic bitwise OR with full ordering
+ * @i: long value
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v | @i) with full ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_fetch_or() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline long
+atomic_long_fetch_or(long i, atomic_long_t *v)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_long_fetch_or(i, v);
+}
+
+/**
+ * atomic_long_fetch_or_acquire() - atomic bitwise OR with acquire ordering
+ * @i: long value
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v | @i) with acquire ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_fetch_or_acquire() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline long
+atomic_long_fetch_or_acquire(long i, atomic_long_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_long_fetch_or_acquire(i, v);
+}
+
+/**
+ * atomic_long_fetch_or_release() - atomic bitwise OR with release ordering
+ * @i: long value
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v | @i) with release ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_fetch_or_release() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline long
+atomic_long_fetch_or_release(long i, atomic_long_t *v)
+{
+ kcsan_release();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_long_fetch_or_release(i, v);
+}
+
+/**
+ * atomic_long_fetch_or_relaxed() - atomic bitwise OR with relaxed ordering
+ * @i: long value
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v | @i) with relaxed ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_fetch_or_relaxed() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline long
+atomic_long_fetch_or_relaxed(long i, atomic_long_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_long_fetch_or_relaxed(i, v);
+}
+
+/**
+ * atomic_long_xor() - atomic bitwise XOR with relaxed ordering
+ * @i: long value
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v ^ @i) with relaxed ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_xor() there.
+ *
+ * Return: Nothing.
+ */
+static __always_inline void
+atomic_long_xor(long i, atomic_long_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ raw_atomic_long_xor(i, v);
+}
+
+/**
+ * atomic_long_fetch_xor() - atomic bitwise XOR with full ordering
+ * @i: long value
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v ^ @i) with full ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_fetch_xor() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline long
+atomic_long_fetch_xor(long i, atomic_long_t *v)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_long_fetch_xor(i, v);
+}
+
+/**
+ * atomic_long_fetch_xor_acquire() - atomic bitwise XOR with acquire ordering
+ * @i: long value
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v ^ @i) with acquire ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_fetch_xor_acquire() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline long
+atomic_long_fetch_xor_acquire(long i, atomic_long_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_long_fetch_xor_acquire(i, v);
+}
+
+/**
+ * atomic_long_fetch_xor_release() - atomic bitwise XOR with release ordering
+ * @i: long value
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v ^ @i) with release ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_fetch_xor_release() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline long
+atomic_long_fetch_xor_release(long i, atomic_long_t *v)
+{
+ kcsan_release();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_long_fetch_xor_release(i, v);
+}
+
+/**
+ * atomic_long_fetch_xor_relaxed() - atomic bitwise XOR with relaxed ordering
+ * @i: long value
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v ^ @i) with relaxed ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_fetch_xor_relaxed() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline long
+atomic_long_fetch_xor_relaxed(long i, atomic_long_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_long_fetch_xor_relaxed(i, v);
+}
+
+/**
+ * atomic_long_xchg() - atomic exchange with full ordering
+ * @v: pointer to atomic_long_t
+ * @new: long value to assign
+ *
+ * Atomically updates @v to @new with full ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_xchg() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline long
+atomic_long_xchg(atomic_long_t *v, long new)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_long_xchg(v, new);
+}
+
+/**
+ * atomic_long_xchg_acquire() - atomic exchange with acquire ordering
+ * @v: pointer to atomic_long_t
+ * @new: long value to assign
+ *
+ * Atomically updates @v to @new with acquire ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_xchg_acquire() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline long
+atomic_long_xchg_acquire(atomic_long_t *v, long new)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_long_xchg_acquire(v, new);
+}
+
+/**
+ * atomic_long_xchg_release() - atomic exchange with release ordering
+ * @v: pointer to atomic_long_t
+ * @new: long value to assign
+ *
+ * Atomically updates @v to @new with release ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_xchg_release() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline long
+atomic_long_xchg_release(atomic_long_t *v, long new)
+{
+ kcsan_release();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_long_xchg_release(v, new);
+}
+
+/**
+ * atomic_long_xchg_relaxed() - atomic exchange with relaxed ordering
+ * @v: pointer to atomic_long_t
+ * @new: long value to assign
+ *
+ * Atomically updates @v to @new with relaxed ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_xchg_relaxed() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline long
+atomic_long_xchg_relaxed(atomic_long_t *v, long new)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_long_xchg_relaxed(v, new);
+}
+
+/**
+ * atomic_long_cmpxchg() - atomic compare and exchange with full ordering
+ * @v: pointer to atomic_long_t
+ * @old: long value to compare with
+ * @new: long value to assign
+ *
+ * If (@v == @old), atomically updates @v to @new with full ordering.
+ * Otherwise, @v is not modified and relaxed ordering is provided.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_cmpxchg() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline long
+atomic_long_cmpxchg(atomic_long_t *v, long old, long new)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_long_cmpxchg(v, old, new);
+}
+
+/**
+ * atomic_long_cmpxchg_acquire() - atomic compare and exchange with acquire ordering
+ * @v: pointer to atomic_long_t
+ * @old: long value to compare with
+ * @new: long value to assign
+ *
+ * If (@v == @old), atomically updates @v to @new with acquire ordering.
+ * Otherwise, @v is not modified and relaxed ordering is provided.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_cmpxchg_acquire() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline long
+atomic_long_cmpxchg_acquire(atomic_long_t *v, long old, long new)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_long_cmpxchg_acquire(v, old, new);
+}
+
+/**
+ * atomic_long_cmpxchg_release() - atomic compare and exchange with release ordering
+ * @v: pointer to atomic_long_t
+ * @old: long value to compare with
+ * @new: long value to assign
+ *
+ * If (@v == @old), atomically updates @v to @new with release ordering.
+ * Otherwise, @v is not modified and relaxed ordering is provided.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_cmpxchg_release() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline long
+atomic_long_cmpxchg_release(atomic_long_t *v, long old, long new)
+{
+ kcsan_release();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_long_cmpxchg_release(v, old, new);
+}
+
+/**
+ * atomic_long_cmpxchg_relaxed() - atomic compare and exchange with relaxed ordering
+ * @v: pointer to atomic_long_t
+ * @old: long value to compare with
+ * @new: long value to assign
+ *
+ * If (@v == @old), atomically updates @v to @new with relaxed ordering.
+ * Otherwise, @v is not modified and relaxed ordering is provided.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_cmpxchg_relaxed() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline long
+atomic_long_cmpxchg_relaxed(atomic_long_t *v, long old, long new)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_long_cmpxchg_relaxed(v, old, new);
+}
+
+/**
+ * atomic_long_try_cmpxchg() - atomic compare and exchange with full ordering
+ * @v: pointer to atomic_long_t
+ * @old: pointer to long value to compare with
+ * @new: long value to assign
+ *
+ * If (@v == @old), atomically updates @v to @new with full ordering.
+ * Otherwise, @v is not modified, @old is updated to the current value of @v,
+ * and relaxed ordering is provided.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_try_cmpxchg() there.
+ *
+ * Return: @true if the exchange occured, @false otherwise.
+ */
+static __always_inline bool
+atomic_long_try_cmpxchg(atomic_long_t *v, long *old, long new)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ instrument_read_write(old, sizeof(*old));
+ return raw_atomic_long_try_cmpxchg(v, old, new);
+}
+
+/**
+ * atomic_long_try_cmpxchg_acquire() - atomic compare and exchange with acquire ordering
+ * @v: pointer to atomic_long_t
+ * @old: pointer to long value to compare with
+ * @new: long value to assign
+ *
+ * If (@v == @old), atomically updates @v to @new with acquire ordering.
+ * Otherwise, @v is not modified, @old is updated to the current value of @v,
+ * and relaxed ordering is provided.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_try_cmpxchg_acquire() there.
+ *
+ * Return: @true if the exchange occured, @false otherwise.
+ */
+static __always_inline bool
+atomic_long_try_cmpxchg_acquire(atomic_long_t *v, long *old, long new)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ instrument_read_write(old, sizeof(*old));
+ return raw_atomic_long_try_cmpxchg_acquire(v, old, new);
+}
+
+/**
+ * atomic_long_try_cmpxchg_release() - atomic compare and exchange with release ordering
+ * @v: pointer to atomic_long_t
+ * @old: pointer to long value to compare with
+ * @new: long value to assign
+ *
+ * If (@v == @old), atomically updates @v to @new with release ordering.
+ * Otherwise, @v is not modified, @old is updated to the current value of @v,
+ * and relaxed ordering is provided.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_try_cmpxchg_release() there.
+ *
+ * Return: @true if the exchange occured, @false otherwise.
+ */
+static __always_inline bool
+atomic_long_try_cmpxchg_release(atomic_long_t *v, long *old, long new)
+{
+ kcsan_release();
+ instrument_atomic_read_write(v, sizeof(*v));
+ instrument_read_write(old, sizeof(*old));
+ return raw_atomic_long_try_cmpxchg_release(v, old, new);
+}
+
+/**
+ * atomic_long_try_cmpxchg_relaxed() - atomic compare and exchange with relaxed ordering
+ * @v: pointer to atomic_long_t
+ * @old: pointer to long value to compare with
+ * @new: long value to assign
+ *
+ * If (@v == @old), atomically updates @v to @new with relaxed ordering.
+ * Otherwise, @v is not modified, @old is updated to the current value of @v,
+ * and relaxed ordering is provided.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_try_cmpxchg_relaxed() there.
+ *
+ * Return: @true if the exchange occured, @false otherwise.
+ */
+static __always_inline bool
+atomic_long_try_cmpxchg_relaxed(atomic_long_t *v, long *old, long new)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ instrument_read_write(old, sizeof(*old));
+ return raw_atomic_long_try_cmpxchg_relaxed(v, old, new);
+}
+
+/**
+ * atomic_long_sub_and_test() - atomic subtract and test if zero with full ordering
+ * @i: long value to subtract
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v - @i) with full ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_sub_and_test() there.
+ *
+ * Return: @true if the resulting value of @v is zero, @false otherwise.
+ */
+static __always_inline bool
+atomic_long_sub_and_test(long i, atomic_long_t *v)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_long_sub_and_test(i, v);
+}
+
+/**
+ * atomic_long_dec_and_test() - atomic decrement and test if zero with full ordering
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v - 1) with full ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_dec_and_test() there.
+ *
+ * Return: @true if the resulting value of @v is zero, @false otherwise.
+ */
+static __always_inline bool
+atomic_long_dec_and_test(atomic_long_t *v)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_long_dec_and_test(v);
+}
+
+/**
+ * atomic_long_inc_and_test() - atomic increment and test if zero with full ordering
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v + 1) with full ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_inc_and_test() there.
+ *
+ * Return: @true if the resulting value of @v is zero, @false otherwise.
+ */
+static __always_inline bool
+atomic_long_inc_and_test(atomic_long_t *v)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_long_inc_and_test(v);
+}
+
+/**
+ * atomic_long_add_negative() - atomic add and test if negative with full ordering
+ * @i: long value to add
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v + @i) with full ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_add_negative() there.
+ *
+ * Return: @true if the resulting value of @v is negative, @false otherwise.
+ */
+static __always_inline bool
+atomic_long_add_negative(long i, atomic_long_t *v)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_long_add_negative(i, v);
+}
+
+/**
+ * atomic_long_add_negative_acquire() - atomic add and test if negative with acquire ordering
+ * @i: long value to add
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v + @i) with acquire ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_add_negative_acquire() there.
+ *
+ * Return: @true if the resulting value of @v is negative, @false otherwise.
+ */
+static __always_inline bool
+atomic_long_add_negative_acquire(long i, atomic_long_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_long_add_negative_acquire(i, v);
+}
+
+/**
+ * atomic_long_add_negative_release() - atomic add and test if negative with release ordering
+ * @i: long value to add
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v + @i) with release ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_add_negative_release() there.
+ *
+ * Return: @true if the resulting value of @v is negative, @false otherwise.
+ */
+static __always_inline bool
+atomic_long_add_negative_release(long i, atomic_long_t *v)
+{
+ kcsan_release();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_long_add_negative_release(i, v);
+}
+
+/**
+ * atomic_long_add_negative_relaxed() - atomic add and test if negative with relaxed ordering
+ * @i: long value to add
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v + @i) with relaxed ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_add_negative_relaxed() there.
+ *
+ * Return: @true if the resulting value of @v is negative, @false otherwise.
+ */
+static __always_inline bool
+atomic_long_add_negative_relaxed(long i, atomic_long_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_long_add_negative_relaxed(i, v);
+}
+
+/**
+ * atomic_long_fetch_add_unless() - atomic add unless value with full ordering
+ * @v: pointer to atomic_long_t
+ * @a: long value to add
+ * @u: long value to compare with
+ *
+ * If (@v != @u), atomically updates @v to (@v + @a) with full ordering.
+ * Otherwise, @v is not modified and relaxed ordering is provided.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_fetch_add_unless() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline long
+atomic_long_fetch_add_unless(atomic_long_t *v, long a, long u)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_long_fetch_add_unless(v, a, u);
+}
+
+/**
+ * atomic_long_add_unless() - atomic add unless value with full ordering
+ * @v: pointer to atomic_long_t
+ * @a: long value to add
+ * @u: long value to compare with
+ *
+ * If (@v != @u), atomically updates @v to (@v + @a) with full ordering.
+ * Otherwise, @v is not modified and relaxed ordering is provided.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_add_unless() there.
+ *
+ * Return: @true if @v was updated, @false otherwise.
+ */
+static __always_inline bool
+atomic_long_add_unless(atomic_long_t *v, long a, long u)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_long_add_unless(v, a, u);
+}
+
+/**
+ * atomic_long_inc_not_zero() - atomic increment unless zero with full ordering
+ * @v: pointer to atomic_long_t
+ *
+ * If (@v != 0), atomically updates @v to (@v + 1) with full ordering.
+ * Otherwise, @v is not modified and relaxed ordering is provided.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_inc_not_zero() there.
+ *
+ * Return: @true if @v was updated, @false otherwise.
+ */
+static __always_inline bool
+atomic_long_inc_not_zero(atomic_long_t *v)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_long_inc_not_zero(v);
+}
+
+/**
+ * atomic_long_inc_unless_negative() - atomic increment unless negative with full ordering
+ * @v: pointer to atomic_long_t
+ *
+ * If (@v >= 0), atomically updates @v to (@v + 1) with full ordering.
+ * Otherwise, @v is not modified and relaxed ordering is provided.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_inc_unless_negative() there.
+ *
+ * Return: @true if @v was updated, @false otherwise.
+ */
+static __always_inline bool
+atomic_long_inc_unless_negative(atomic_long_t *v)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_long_inc_unless_negative(v);
+}
+
+/**
+ * atomic_long_dec_unless_positive() - atomic decrement unless positive with full ordering
+ * @v: pointer to atomic_long_t
+ *
+ * If (@v <= 0), atomically updates @v to (@v - 1) with full ordering.
+ * Otherwise, @v is not modified and relaxed ordering is provided.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_dec_unless_positive() there.
+ *
+ * Return: @true if @v was updated, @false otherwise.
+ */
+static __always_inline bool
+atomic_long_dec_unless_positive(atomic_long_t *v)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_long_dec_unless_positive(v);
+}
+
+/**
+ * atomic_long_dec_if_positive() - atomic decrement if positive with full ordering
+ * @v: pointer to atomic_long_t
+ *
+ * If (@v > 0), atomically updates @v to (@v - 1) with full ordering.
+ * Otherwise, @v is not modified and relaxed ordering is provided.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_dec_if_positive() there.
+ *
+ * Return: The old value of (@v - 1), regardless of whether @v was updated.
+ */
+static __always_inline long
+atomic_long_dec_if_positive(atomic_long_t *v)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_long_dec_if_positive(v);
+}
+
+#define xchg(ptr, ...) \
+({ \
+ typeof(ptr) __ai_ptr = (ptr); \
+ kcsan_mb(); \
+ instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \
+ raw_xchg(__ai_ptr, __VA_ARGS__); \
+})
+
+#define xchg_acquire(ptr, ...) \
+({ \
+ typeof(ptr) __ai_ptr = (ptr); \
+ instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \
+ raw_xchg_acquire(__ai_ptr, __VA_ARGS__); \
+})
+
+#define xchg_release(ptr, ...) \
+({ \
+ typeof(ptr) __ai_ptr = (ptr); \
+ kcsan_release(); \
+ instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \
+ raw_xchg_release(__ai_ptr, __VA_ARGS__); \
+})
+
+#define xchg_relaxed(ptr, ...) \
+({ \
+ typeof(ptr) __ai_ptr = (ptr); \
+ instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \
+ raw_xchg_relaxed(__ai_ptr, __VA_ARGS__); \
+})
+
+#define cmpxchg(ptr, ...) \
+({ \
+ typeof(ptr) __ai_ptr = (ptr); \
+ kcsan_mb(); \
+ instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \
+ raw_cmpxchg(__ai_ptr, __VA_ARGS__); \
+})
+
+#define cmpxchg_acquire(ptr, ...) \
+({ \
+ typeof(ptr) __ai_ptr = (ptr); \
+ instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \
+ raw_cmpxchg_acquire(__ai_ptr, __VA_ARGS__); \
+})
+
+#define cmpxchg_release(ptr, ...) \
+({ \
+ typeof(ptr) __ai_ptr = (ptr); \
+ kcsan_release(); \
+ instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \
+ raw_cmpxchg_release(__ai_ptr, __VA_ARGS__); \
+})
+
+#define cmpxchg_relaxed(ptr, ...) \
+({ \
+ typeof(ptr) __ai_ptr = (ptr); \
+ instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \
+ raw_cmpxchg_relaxed(__ai_ptr, __VA_ARGS__); \
+})
+
+#define cmpxchg64(ptr, ...) \
+({ \
+ typeof(ptr) __ai_ptr = (ptr); \
+ kcsan_mb(); \
+ instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \
+ raw_cmpxchg64(__ai_ptr, __VA_ARGS__); \
+})
+
+#define cmpxchg64_acquire(ptr, ...) \
+({ \
+ typeof(ptr) __ai_ptr = (ptr); \
+ instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \
+ raw_cmpxchg64_acquire(__ai_ptr, __VA_ARGS__); \
+})
+
+#define cmpxchg64_release(ptr, ...) \
+({ \
+ typeof(ptr) __ai_ptr = (ptr); \
+ kcsan_release(); \
+ instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \
+ raw_cmpxchg64_release(__ai_ptr, __VA_ARGS__); \
+})
+
+#define cmpxchg64_relaxed(ptr, ...) \
+({ \
+ typeof(ptr) __ai_ptr = (ptr); \
+ instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \
+ raw_cmpxchg64_relaxed(__ai_ptr, __VA_ARGS__); \
+})
+
+#define cmpxchg128(ptr, ...) \
+({ \
+ typeof(ptr) __ai_ptr = (ptr); \
+ kcsan_mb(); \
+ instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \
+ raw_cmpxchg128(__ai_ptr, __VA_ARGS__); \
+})
+
+#define cmpxchg128_acquire(ptr, ...) \
+({ \
+ typeof(ptr) __ai_ptr = (ptr); \
+ instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \
+ raw_cmpxchg128_acquire(__ai_ptr, __VA_ARGS__); \
+})
+
+#define cmpxchg128_release(ptr, ...) \
+({ \
+ typeof(ptr) __ai_ptr = (ptr); \
+ kcsan_release(); \
+ instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \
+ raw_cmpxchg128_release(__ai_ptr, __VA_ARGS__); \
+})
+
+#define cmpxchg128_relaxed(ptr, ...) \
+({ \
+ typeof(ptr) __ai_ptr = (ptr); \
+ instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \
+ raw_cmpxchg128_relaxed(__ai_ptr, __VA_ARGS__); \
+})
+
+#define try_cmpxchg(ptr, oldp, ...) \
+({ \
+ typeof(ptr) __ai_ptr = (ptr); \
+ typeof(oldp) __ai_oldp = (oldp); \
+ kcsan_mb(); \
+ instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \
+ instrument_read_write(__ai_oldp, sizeof(*__ai_oldp)); \
+ raw_try_cmpxchg(__ai_ptr, __ai_oldp, __VA_ARGS__); \
+})
+
+#define try_cmpxchg_acquire(ptr, oldp, ...) \
+({ \
+ typeof(ptr) __ai_ptr = (ptr); \
+ typeof(oldp) __ai_oldp = (oldp); \
+ instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \
+ instrument_read_write(__ai_oldp, sizeof(*__ai_oldp)); \
+ raw_try_cmpxchg_acquire(__ai_ptr, __ai_oldp, __VA_ARGS__); \
+})
+
+#define try_cmpxchg_release(ptr, oldp, ...) \
+({ \
+ typeof(ptr) __ai_ptr = (ptr); \
+ typeof(oldp) __ai_oldp = (oldp); \
+ kcsan_release(); \
+ instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \
+ instrument_read_write(__ai_oldp, sizeof(*__ai_oldp)); \
+ raw_try_cmpxchg_release(__ai_ptr, __ai_oldp, __VA_ARGS__); \
+})
+
+#define try_cmpxchg_relaxed(ptr, oldp, ...) \
+({ \
+ typeof(ptr) __ai_ptr = (ptr); \
+ typeof(oldp) __ai_oldp = (oldp); \
+ instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \
+ instrument_read_write(__ai_oldp, sizeof(*__ai_oldp)); \
+ raw_try_cmpxchg_relaxed(__ai_ptr, __ai_oldp, __VA_ARGS__); \
+})
+
+#define try_cmpxchg64(ptr, oldp, ...) \
+({ \
+ typeof(ptr) __ai_ptr = (ptr); \
+ typeof(oldp) __ai_oldp = (oldp); \
+ kcsan_mb(); \
+ instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \
+ instrument_read_write(__ai_oldp, sizeof(*__ai_oldp)); \
+ raw_try_cmpxchg64(__ai_ptr, __ai_oldp, __VA_ARGS__); \
+})
+
+#define try_cmpxchg64_acquire(ptr, oldp, ...) \
+({ \
+ typeof(ptr) __ai_ptr = (ptr); \
+ typeof(oldp) __ai_oldp = (oldp); \
+ instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \
+ instrument_read_write(__ai_oldp, sizeof(*__ai_oldp)); \
+ raw_try_cmpxchg64_acquire(__ai_ptr, __ai_oldp, __VA_ARGS__); \
+})
+
+#define try_cmpxchg64_release(ptr, oldp, ...) \
+({ \
+ typeof(ptr) __ai_ptr = (ptr); \
+ typeof(oldp) __ai_oldp = (oldp); \
+ kcsan_release(); \
+ instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \
+ instrument_read_write(__ai_oldp, sizeof(*__ai_oldp)); \
+ raw_try_cmpxchg64_release(__ai_ptr, __ai_oldp, __VA_ARGS__); \
+})
+
+#define try_cmpxchg64_relaxed(ptr, oldp, ...) \
+({ \
+ typeof(ptr) __ai_ptr = (ptr); \
+ typeof(oldp) __ai_oldp = (oldp); \
+ instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \
+ instrument_read_write(__ai_oldp, sizeof(*__ai_oldp)); \
+ raw_try_cmpxchg64_relaxed(__ai_ptr, __ai_oldp, __VA_ARGS__); \
+})
+
+#define try_cmpxchg128(ptr, oldp, ...) \
+({ \
+ typeof(ptr) __ai_ptr = (ptr); \
+ typeof(oldp) __ai_oldp = (oldp); \
+ kcsan_mb(); \
+ instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \
+ instrument_read_write(__ai_oldp, sizeof(*__ai_oldp)); \
+ raw_try_cmpxchg128(__ai_ptr, __ai_oldp, __VA_ARGS__); \
+})
+
+#define try_cmpxchg128_acquire(ptr, oldp, ...) \
+({ \
+ typeof(ptr) __ai_ptr = (ptr); \
+ typeof(oldp) __ai_oldp = (oldp); \
+ instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \
+ instrument_read_write(__ai_oldp, sizeof(*__ai_oldp)); \
+ raw_try_cmpxchg128_acquire(__ai_ptr, __ai_oldp, __VA_ARGS__); \
+})
+
+#define try_cmpxchg128_release(ptr, oldp, ...) \
+({ \
+ typeof(ptr) __ai_ptr = (ptr); \
+ typeof(oldp) __ai_oldp = (oldp); \
+ kcsan_release(); \
+ instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \
+ instrument_read_write(__ai_oldp, sizeof(*__ai_oldp)); \
+ raw_try_cmpxchg128_release(__ai_ptr, __ai_oldp, __VA_ARGS__); \
+})
+
+#define try_cmpxchg128_relaxed(ptr, oldp, ...) \
+({ \
+ typeof(ptr) __ai_ptr = (ptr); \
+ typeof(oldp) __ai_oldp = (oldp); \
+ instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \
+ instrument_read_write(__ai_oldp, sizeof(*__ai_oldp)); \
+ raw_try_cmpxchg128_relaxed(__ai_ptr, __ai_oldp, __VA_ARGS__); \
+})
+
+#define cmpxchg_local(ptr, ...) \
+({ \
+ typeof(ptr) __ai_ptr = (ptr); \
+ instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \
+ raw_cmpxchg_local(__ai_ptr, __VA_ARGS__); \
+})
+
+#define cmpxchg64_local(ptr, ...) \
+({ \
+ typeof(ptr) __ai_ptr = (ptr); \
+ instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \
+ raw_cmpxchg64_local(__ai_ptr, __VA_ARGS__); \
+})
+
+#define cmpxchg128_local(ptr, ...) \
+({ \
+ typeof(ptr) __ai_ptr = (ptr); \
+ instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \
+ raw_cmpxchg128_local(__ai_ptr, __VA_ARGS__); \
+})
+
+#define sync_cmpxchg(ptr, ...) \
+({ \
+ typeof(ptr) __ai_ptr = (ptr); \
+ kcsan_mb(); \
+ instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \
+ raw_sync_cmpxchg(__ai_ptr, __VA_ARGS__); \
+})
+
+#define try_cmpxchg_local(ptr, oldp, ...) \
+({ \
+ typeof(ptr) __ai_ptr = (ptr); \
+ typeof(oldp) __ai_oldp = (oldp); \
+ instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \
+ instrument_read_write(__ai_oldp, sizeof(*__ai_oldp)); \
+ raw_try_cmpxchg_local(__ai_ptr, __ai_oldp, __VA_ARGS__); \
+})
+
+#define try_cmpxchg64_local(ptr, oldp, ...) \
+({ \
+ typeof(ptr) __ai_ptr = (ptr); \
+ typeof(oldp) __ai_oldp = (oldp); \
+ instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \
+ instrument_read_write(__ai_oldp, sizeof(*__ai_oldp)); \
+ raw_try_cmpxchg64_local(__ai_ptr, __ai_oldp, __VA_ARGS__); \
+})
+
+#define try_cmpxchg128_local(ptr, oldp, ...) \
+({ \
+ typeof(ptr) __ai_ptr = (ptr); \
+ typeof(oldp) __ai_oldp = (oldp); \
+ instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \
+ instrument_read_write(__ai_oldp, sizeof(*__ai_oldp)); \
+ raw_try_cmpxchg128_local(__ai_ptr, __ai_oldp, __VA_ARGS__); \
+})
+
+#define sync_try_cmpxchg(ptr, ...) \
+({ \
+ typeof(ptr) __ai_ptr = (ptr); \
+ kcsan_mb(); \
+ instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \
+ raw_sync_try_cmpxchg(__ai_ptr, __VA_ARGS__); \
+})
+
+
+#endif /* _LINUX_ATOMIC_INSTRUMENTED_H */
+// f618ac667f868941a84ce0ab2242f1786e049ed4
diff --git a/include/linux/atomic/atomic-long.h b/include/linux/atomic/atomic-long.h
new file mode 100644
index 000000000000..f86b29d90877
--- /dev/null
+++ b/include/linux/atomic/atomic-long.h
@@ -0,0 +1,1812 @@
+// SPDX-License-Identifier: GPL-2.0
+
+// Generated by scripts/atomic/gen-atomic-long.sh
+// DO NOT MODIFY THIS FILE DIRECTLY
+
+#ifndef _LINUX_ATOMIC_LONG_H
+#define _LINUX_ATOMIC_LONG_H
+
+#include <linux/compiler.h>
+#include <asm/types.h>
+
+#ifdef CONFIG_64BIT
+typedef atomic64_t atomic_long_t;
+#define ATOMIC_LONG_INIT(i) ATOMIC64_INIT(i)
+#define atomic_long_cond_read_acquire atomic64_cond_read_acquire
+#define atomic_long_cond_read_relaxed atomic64_cond_read_relaxed
+#else
+typedef atomic_t atomic_long_t;
+#define ATOMIC_LONG_INIT(i) ATOMIC_INIT(i)
+#define atomic_long_cond_read_acquire atomic_cond_read_acquire
+#define atomic_long_cond_read_relaxed atomic_cond_read_relaxed
+#endif
+
+/**
+ * raw_atomic_long_read() - atomic load with relaxed ordering
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically loads the value of @v with relaxed ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_read() elsewhere.
+ *
+ * Return: The value loaded from @v.
+ */
+static __always_inline long
+raw_atomic_long_read(const atomic_long_t *v)
+{
+#ifdef CONFIG_64BIT
+ return raw_atomic64_read(v);
+#else
+ return raw_atomic_read(v);
+#endif
+}
+
+/**
+ * raw_atomic_long_read_acquire() - atomic load with acquire ordering
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically loads the value of @v with acquire ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_read_acquire() elsewhere.
+ *
+ * Return: The value loaded from @v.
+ */
+static __always_inline long
+raw_atomic_long_read_acquire(const atomic_long_t *v)
+{
+#ifdef CONFIG_64BIT
+ return raw_atomic64_read_acquire(v);
+#else
+ return raw_atomic_read_acquire(v);
+#endif
+}
+
+/**
+ * raw_atomic_long_set() - atomic set with relaxed ordering
+ * @v: pointer to atomic_long_t
+ * @i: long value to assign
+ *
+ * Atomically sets @v to @i with relaxed ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_set() elsewhere.
+ *
+ * Return: Nothing.
+ */
+static __always_inline void
+raw_atomic_long_set(atomic_long_t *v, long i)
+{
+#ifdef CONFIG_64BIT
+ raw_atomic64_set(v, i);
+#else
+ raw_atomic_set(v, i);
+#endif
+}
+
+/**
+ * raw_atomic_long_set_release() - atomic set with release ordering
+ * @v: pointer to atomic_long_t
+ * @i: long value to assign
+ *
+ * Atomically sets @v to @i with release ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_set_release() elsewhere.
+ *
+ * Return: Nothing.
+ */
+static __always_inline void
+raw_atomic_long_set_release(atomic_long_t *v, long i)
+{
+#ifdef CONFIG_64BIT
+ raw_atomic64_set_release(v, i);
+#else
+ raw_atomic_set_release(v, i);
+#endif
+}
+
+/**
+ * raw_atomic_long_add() - atomic add with relaxed ordering
+ * @i: long value to add
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v + @i) with relaxed ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_add() elsewhere.
+ *
+ * Return: Nothing.
+ */
+static __always_inline void
+raw_atomic_long_add(long i, atomic_long_t *v)
+{
+#ifdef CONFIG_64BIT
+ raw_atomic64_add(i, v);
+#else
+ raw_atomic_add(i, v);
+#endif
+}
+
+/**
+ * raw_atomic_long_add_return() - atomic add with full ordering
+ * @i: long value to add
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v + @i) with full ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_add_return() elsewhere.
+ *
+ * Return: The updated value of @v.
+ */
+static __always_inline long
+raw_atomic_long_add_return(long i, atomic_long_t *v)
+{
+#ifdef CONFIG_64BIT
+ return raw_atomic64_add_return(i, v);
+#else
+ return raw_atomic_add_return(i, v);
+#endif
+}
+
+/**
+ * raw_atomic_long_add_return_acquire() - atomic add with acquire ordering
+ * @i: long value to add
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v + @i) with acquire ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_add_return_acquire() elsewhere.
+ *
+ * Return: The updated value of @v.
+ */
+static __always_inline long
+raw_atomic_long_add_return_acquire(long i, atomic_long_t *v)
+{
+#ifdef CONFIG_64BIT
+ return raw_atomic64_add_return_acquire(i, v);
+#else
+ return raw_atomic_add_return_acquire(i, v);
+#endif
+}
+
+/**
+ * raw_atomic_long_add_return_release() - atomic add with release ordering
+ * @i: long value to add
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v + @i) with release ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_add_return_release() elsewhere.
+ *
+ * Return: The updated value of @v.
+ */
+static __always_inline long
+raw_atomic_long_add_return_release(long i, atomic_long_t *v)
+{
+#ifdef CONFIG_64BIT
+ return raw_atomic64_add_return_release(i, v);
+#else
+ return raw_atomic_add_return_release(i, v);
+#endif
+}
+
+/**
+ * raw_atomic_long_add_return_relaxed() - atomic add with relaxed ordering
+ * @i: long value to add
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v + @i) with relaxed ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_add_return_relaxed() elsewhere.
+ *
+ * Return: The updated value of @v.
+ */
+static __always_inline long
+raw_atomic_long_add_return_relaxed(long i, atomic_long_t *v)
+{
+#ifdef CONFIG_64BIT
+ return raw_atomic64_add_return_relaxed(i, v);
+#else
+ return raw_atomic_add_return_relaxed(i, v);
+#endif
+}
+
+/**
+ * raw_atomic_long_fetch_add() - atomic add with full ordering
+ * @i: long value to add
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v + @i) with full ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_fetch_add() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline long
+raw_atomic_long_fetch_add(long i, atomic_long_t *v)
+{
+#ifdef CONFIG_64BIT
+ return raw_atomic64_fetch_add(i, v);
+#else
+ return raw_atomic_fetch_add(i, v);
+#endif
+}
+
+/**
+ * raw_atomic_long_fetch_add_acquire() - atomic add with acquire ordering
+ * @i: long value to add
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v + @i) with acquire ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_fetch_add_acquire() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline long
+raw_atomic_long_fetch_add_acquire(long i, atomic_long_t *v)
+{
+#ifdef CONFIG_64BIT
+ return raw_atomic64_fetch_add_acquire(i, v);
+#else
+ return raw_atomic_fetch_add_acquire(i, v);
+#endif
+}
+
+/**
+ * raw_atomic_long_fetch_add_release() - atomic add with release ordering
+ * @i: long value to add
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v + @i) with release ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_fetch_add_release() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline long
+raw_atomic_long_fetch_add_release(long i, atomic_long_t *v)
+{
+#ifdef CONFIG_64BIT
+ return raw_atomic64_fetch_add_release(i, v);
+#else
+ return raw_atomic_fetch_add_release(i, v);
+#endif
+}
+
+/**
+ * raw_atomic_long_fetch_add_relaxed() - atomic add with relaxed ordering
+ * @i: long value to add
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v + @i) with relaxed ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_fetch_add_relaxed() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline long
+raw_atomic_long_fetch_add_relaxed(long i, atomic_long_t *v)
+{
+#ifdef CONFIG_64BIT
+ return raw_atomic64_fetch_add_relaxed(i, v);
+#else
+ return raw_atomic_fetch_add_relaxed(i, v);
+#endif
+}
+
+/**
+ * raw_atomic_long_sub() - atomic subtract with relaxed ordering
+ * @i: long value to subtract
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v - @i) with relaxed ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_sub() elsewhere.
+ *
+ * Return: Nothing.
+ */
+static __always_inline void
+raw_atomic_long_sub(long i, atomic_long_t *v)
+{
+#ifdef CONFIG_64BIT
+ raw_atomic64_sub(i, v);
+#else
+ raw_atomic_sub(i, v);
+#endif
+}
+
+/**
+ * raw_atomic_long_sub_return() - atomic subtract with full ordering
+ * @i: long value to subtract
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v - @i) with full ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_sub_return() elsewhere.
+ *
+ * Return: The updated value of @v.
+ */
+static __always_inline long
+raw_atomic_long_sub_return(long i, atomic_long_t *v)
+{
+#ifdef CONFIG_64BIT
+ return raw_atomic64_sub_return(i, v);
+#else
+ return raw_atomic_sub_return(i, v);
+#endif
+}
+
+/**
+ * raw_atomic_long_sub_return_acquire() - atomic subtract with acquire ordering
+ * @i: long value to subtract
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v - @i) with acquire ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_sub_return_acquire() elsewhere.
+ *
+ * Return: The updated value of @v.
+ */
+static __always_inline long
+raw_atomic_long_sub_return_acquire(long i, atomic_long_t *v)
+{
+#ifdef CONFIG_64BIT
+ return raw_atomic64_sub_return_acquire(i, v);
+#else
+ return raw_atomic_sub_return_acquire(i, v);
+#endif
+}
+
+/**
+ * raw_atomic_long_sub_return_release() - atomic subtract with release ordering
+ * @i: long value to subtract
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v - @i) with release ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_sub_return_release() elsewhere.
+ *
+ * Return: The updated value of @v.
+ */
+static __always_inline long
+raw_atomic_long_sub_return_release(long i, atomic_long_t *v)
+{
+#ifdef CONFIG_64BIT
+ return raw_atomic64_sub_return_release(i, v);
+#else
+ return raw_atomic_sub_return_release(i, v);
+#endif
+}
+
+/**
+ * raw_atomic_long_sub_return_relaxed() - atomic subtract with relaxed ordering
+ * @i: long value to subtract
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v - @i) with relaxed ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_sub_return_relaxed() elsewhere.
+ *
+ * Return: The updated value of @v.
+ */
+static __always_inline long
+raw_atomic_long_sub_return_relaxed(long i, atomic_long_t *v)
+{
+#ifdef CONFIG_64BIT
+ return raw_atomic64_sub_return_relaxed(i, v);
+#else
+ return raw_atomic_sub_return_relaxed(i, v);
+#endif
+}
+
+/**
+ * raw_atomic_long_fetch_sub() - atomic subtract with full ordering
+ * @i: long value to subtract
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v - @i) with full ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_fetch_sub() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline long
+raw_atomic_long_fetch_sub(long i, atomic_long_t *v)
+{
+#ifdef CONFIG_64BIT
+ return raw_atomic64_fetch_sub(i, v);
+#else
+ return raw_atomic_fetch_sub(i, v);
+#endif
+}
+
+/**
+ * raw_atomic_long_fetch_sub_acquire() - atomic subtract with acquire ordering
+ * @i: long value to subtract
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v - @i) with acquire ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_fetch_sub_acquire() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline long
+raw_atomic_long_fetch_sub_acquire(long i, atomic_long_t *v)
+{
+#ifdef CONFIG_64BIT
+ return raw_atomic64_fetch_sub_acquire(i, v);
+#else
+ return raw_atomic_fetch_sub_acquire(i, v);
+#endif
+}
+
+/**
+ * raw_atomic_long_fetch_sub_release() - atomic subtract with release ordering
+ * @i: long value to subtract
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v - @i) with release ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_fetch_sub_release() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline long
+raw_atomic_long_fetch_sub_release(long i, atomic_long_t *v)
+{
+#ifdef CONFIG_64BIT
+ return raw_atomic64_fetch_sub_release(i, v);
+#else
+ return raw_atomic_fetch_sub_release(i, v);
+#endif
+}
+
+/**
+ * raw_atomic_long_fetch_sub_relaxed() - atomic subtract with relaxed ordering
+ * @i: long value to subtract
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v - @i) with relaxed ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_fetch_sub_relaxed() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline long
+raw_atomic_long_fetch_sub_relaxed(long i, atomic_long_t *v)
+{
+#ifdef CONFIG_64BIT
+ return raw_atomic64_fetch_sub_relaxed(i, v);
+#else
+ return raw_atomic_fetch_sub_relaxed(i, v);
+#endif
+}
+
+/**
+ * raw_atomic_long_inc() - atomic increment with relaxed ordering
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v + 1) with relaxed ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_inc() elsewhere.
+ *
+ * Return: Nothing.
+ */
+static __always_inline void
+raw_atomic_long_inc(atomic_long_t *v)
+{
+#ifdef CONFIG_64BIT
+ raw_atomic64_inc(v);
+#else
+ raw_atomic_inc(v);
+#endif
+}
+
+/**
+ * raw_atomic_long_inc_return() - atomic increment with full ordering
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v + 1) with full ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_inc_return() elsewhere.
+ *
+ * Return: The updated value of @v.
+ */
+static __always_inline long
+raw_atomic_long_inc_return(atomic_long_t *v)
+{
+#ifdef CONFIG_64BIT
+ return raw_atomic64_inc_return(v);
+#else
+ return raw_atomic_inc_return(v);
+#endif
+}
+
+/**
+ * raw_atomic_long_inc_return_acquire() - atomic increment with acquire ordering
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v + 1) with acquire ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_inc_return_acquire() elsewhere.
+ *
+ * Return: The updated value of @v.
+ */
+static __always_inline long
+raw_atomic_long_inc_return_acquire(atomic_long_t *v)
+{
+#ifdef CONFIG_64BIT
+ return raw_atomic64_inc_return_acquire(v);
+#else
+ return raw_atomic_inc_return_acquire(v);
+#endif
+}
+
+/**
+ * raw_atomic_long_inc_return_release() - atomic increment with release ordering
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v + 1) with release ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_inc_return_release() elsewhere.
+ *
+ * Return: The updated value of @v.
+ */
+static __always_inline long
+raw_atomic_long_inc_return_release(atomic_long_t *v)
+{
+#ifdef CONFIG_64BIT
+ return raw_atomic64_inc_return_release(v);
+#else
+ return raw_atomic_inc_return_release(v);
+#endif
+}
+
+/**
+ * raw_atomic_long_inc_return_relaxed() - atomic increment with relaxed ordering
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v + 1) with relaxed ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_inc_return_relaxed() elsewhere.
+ *
+ * Return: The updated value of @v.
+ */
+static __always_inline long
+raw_atomic_long_inc_return_relaxed(atomic_long_t *v)
+{
+#ifdef CONFIG_64BIT
+ return raw_atomic64_inc_return_relaxed(v);
+#else
+ return raw_atomic_inc_return_relaxed(v);
+#endif
+}
+
+/**
+ * raw_atomic_long_fetch_inc() - atomic increment with full ordering
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v + 1) with full ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_fetch_inc() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline long
+raw_atomic_long_fetch_inc(atomic_long_t *v)
+{
+#ifdef CONFIG_64BIT
+ return raw_atomic64_fetch_inc(v);
+#else
+ return raw_atomic_fetch_inc(v);
+#endif
+}
+
+/**
+ * raw_atomic_long_fetch_inc_acquire() - atomic increment with acquire ordering
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v + 1) with acquire ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_fetch_inc_acquire() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline long
+raw_atomic_long_fetch_inc_acquire(atomic_long_t *v)
+{
+#ifdef CONFIG_64BIT
+ return raw_atomic64_fetch_inc_acquire(v);
+#else
+ return raw_atomic_fetch_inc_acquire(v);
+#endif
+}
+
+/**
+ * raw_atomic_long_fetch_inc_release() - atomic increment with release ordering
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v + 1) with release ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_fetch_inc_release() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline long
+raw_atomic_long_fetch_inc_release(atomic_long_t *v)
+{
+#ifdef CONFIG_64BIT
+ return raw_atomic64_fetch_inc_release(v);
+#else
+ return raw_atomic_fetch_inc_release(v);
+#endif
+}
+
+/**
+ * raw_atomic_long_fetch_inc_relaxed() - atomic increment with relaxed ordering
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v + 1) with relaxed ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_fetch_inc_relaxed() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline long
+raw_atomic_long_fetch_inc_relaxed(atomic_long_t *v)
+{
+#ifdef CONFIG_64BIT
+ return raw_atomic64_fetch_inc_relaxed(v);
+#else
+ return raw_atomic_fetch_inc_relaxed(v);
+#endif
+}
+
+/**
+ * raw_atomic_long_dec() - atomic decrement with relaxed ordering
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v - 1) with relaxed ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_dec() elsewhere.
+ *
+ * Return: Nothing.
+ */
+static __always_inline void
+raw_atomic_long_dec(atomic_long_t *v)
+{
+#ifdef CONFIG_64BIT
+ raw_atomic64_dec(v);
+#else
+ raw_atomic_dec(v);
+#endif
+}
+
+/**
+ * raw_atomic_long_dec_return() - atomic decrement with full ordering
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v - 1) with full ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_dec_return() elsewhere.
+ *
+ * Return: The updated value of @v.
+ */
+static __always_inline long
+raw_atomic_long_dec_return(atomic_long_t *v)
+{
+#ifdef CONFIG_64BIT
+ return raw_atomic64_dec_return(v);
+#else
+ return raw_atomic_dec_return(v);
+#endif
+}
+
+/**
+ * raw_atomic_long_dec_return_acquire() - atomic decrement with acquire ordering
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v - 1) with acquire ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_dec_return_acquire() elsewhere.
+ *
+ * Return: The updated value of @v.
+ */
+static __always_inline long
+raw_atomic_long_dec_return_acquire(atomic_long_t *v)
+{
+#ifdef CONFIG_64BIT
+ return raw_atomic64_dec_return_acquire(v);
+#else
+ return raw_atomic_dec_return_acquire(v);
+#endif
+}
+
+/**
+ * raw_atomic_long_dec_return_release() - atomic decrement with release ordering
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v - 1) with release ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_dec_return_release() elsewhere.
+ *
+ * Return: The updated value of @v.
+ */
+static __always_inline long
+raw_atomic_long_dec_return_release(atomic_long_t *v)
+{
+#ifdef CONFIG_64BIT
+ return raw_atomic64_dec_return_release(v);
+#else
+ return raw_atomic_dec_return_release(v);
+#endif
+}
+
+/**
+ * raw_atomic_long_dec_return_relaxed() - atomic decrement with relaxed ordering
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v - 1) with relaxed ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_dec_return_relaxed() elsewhere.
+ *
+ * Return: The updated value of @v.
+ */
+static __always_inline long
+raw_atomic_long_dec_return_relaxed(atomic_long_t *v)
+{
+#ifdef CONFIG_64BIT
+ return raw_atomic64_dec_return_relaxed(v);
+#else
+ return raw_atomic_dec_return_relaxed(v);
+#endif
+}
+
+/**
+ * raw_atomic_long_fetch_dec() - atomic decrement with full ordering
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v - 1) with full ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_fetch_dec() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline long
+raw_atomic_long_fetch_dec(atomic_long_t *v)
+{
+#ifdef CONFIG_64BIT
+ return raw_atomic64_fetch_dec(v);
+#else
+ return raw_atomic_fetch_dec(v);
+#endif
+}
+
+/**
+ * raw_atomic_long_fetch_dec_acquire() - atomic decrement with acquire ordering
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v - 1) with acquire ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_fetch_dec_acquire() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline long
+raw_atomic_long_fetch_dec_acquire(atomic_long_t *v)
+{
+#ifdef CONFIG_64BIT
+ return raw_atomic64_fetch_dec_acquire(v);
+#else
+ return raw_atomic_fetch_dec_acquire(v);
+#endif
+}
+
+/**
+ * raw_atomic_long_fetch_dec_release() - atomic decrement with release ordering
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v - 1) with release ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_fetch_dec_release() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline long
+raw_atomic_long_fetch_dec_release(atomic_long_t *v)
+{
+#ifdef CONFIG_64BIT
+ return raw_atomic64_fetch_dec_release(v);
+#else
+ return raw_atomic_fetch_dec_release(v);
+#endif
+}
+
+/**
+ * raw_atomic_long_fetch_dec_relaxed() - atomic decrement with relaxed ordering
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v - 1) with relaxed ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_fetch_dec_relaxed() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline long
+raw_atomic_long_fetch_dec_relaxed(atomic_long_t *v)
+{
+#ifdef CONFIG_64BIT
+ return raw_atomic64_fetch_dec_relaxed(v);
+#else
+ return raw_atomic_fetch_dec_relaxed(v);
+#endif
+}
+
+/**
+ * raw_atomic_long_and() - atomic bitwise AND with relaxed ordering
+ * @i: long value
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v & @i) with relaxed ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_and() elsewhere.
+ *
+ * Return: Nothing.
+ */
+static __always_inline void
+raw_atomic_long_and(long i, atomic_long_t *v)
+{
+#ifdef CONFIG_64BIT
+ raw_atomic64_and(i, v);
+#else
+ raw_atomic_and(i, v);
+#endif
+}
+
+/**
+ * raw_atomic_long_fetch_and() - atomic bitwise AND with full ordering
+ * @i: long value
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v & @i) with full ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_fetch_and() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline long
+raw_atomic_long_fetch_and(long i, atomic_long_t *v)
+{
+#ifdef CONFIG_64BIT
+ return raw_atomic64_fetch_and(i, v);
+#else
+ return raw_atomic_fetch_and(i, v);
+#endif
+}
+
+/**
+ * raw_atomic_long_fetch_and_acquire() - atomic bitwise AND with acquire ordering
+ * @i: long value
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v & @i) with acquire ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_fetch_and_acquire() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline long
+raw_atomic_long_fetch_and_acquire(long i, atomic_long_t *v)
+{
+#ifdef CONFIG_64BIT
+ return raw_atomic64_fetch_and_acquire(i, v);
+#else
+ return raw_atomic_fetch_and_acquire(i, v);
+#endif
+}
+
+/**
+ * raw_atomic_long_fetch_and_release() - atomic bitwise AND with release ordering
+ * @i: long value
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v & @i) with release ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_fetch_and_release() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline long
+raw_atomic_long_fetch_and_release(long i, atomic_long_t *v)
+{
+#ifdef CONFIG_64BIT
+ return raw_atomic64_fetch_and_release(i, v);
+#else
+ return raw_atomic_fetch_and_release(i, v);
+#endif
+}
+
+/**
+ * raw_atomic_long_fetch_and_relaxed() - atomic bitwise AND with relaxed ordering
+ * @i: long value
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v & @i) with relaxed ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_fetch_and_relaxed() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline long
+raw_atomic_long_fetch_and_relaxed(long i, atomic_long_t *v)
+{
+#ifdef CONFIG_64BIT
+ return raw_atomic64_fetch_and_relaxed(i, v);
+#else
+ return raw_atomic_fetch_and_relaxed(i, v);
+#endif
+}
+
+/**
+ * raw_atomic_long_andnot() - atomic bitwise AND NOT with relaxed ordering
+ * @i: long value
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v & ~@i) with relaxed ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_andnot() elsewhere.
+ *
+ * Return: Nothing.
+ */
+static __always_inline void
+raw_atomic_long_andnot(long i, atomic_long_t *v)
+{
+#ifdef CONFIG_64BIT
+ raw_atomic64_andnot(i, v);
+#else
+ raw_atomic_andnot(i, v);
+#endif
+}
+
+/**
+ * raw_atomic_long_fetch_andnot() - atomic bitwise AND NOT with full ordering
+ * @i: long value
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v & ~@i) with full ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_fetch_andnot() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline long
+raw_atomic_long_fetch_andnot(long i, atomic_long_t *v)
+{
+#ifdef CONFIG_64BIT
+ return raw_atomic64_fetch_andnot(i, v);
+#else
+ return raw_atomic_fetch_andnot(i, v);
+#endif
+}
+
+/**
+ * raw_atomic_long_fetch_andnot_acquire() - atomic bitwise AND NOT with acquire ordering
+ * @i: long value
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v & ~@i) with acquire ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_fetch_andnot_acquire() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline long
+raw_atomic_long_fetch_andnot_acquire(long i, atomic_long_t *v)
+{
+#ifdef CONFIG_64BIT
+ return raw_atomic64_fetch_andnot_acquire(i, v);
+#else
+ return raw_atomic_fetch_andnot_acquire(i, v);
+#endif
+}
+
+/**
+ * raw_atomic_long_fetch_andnot_release() - atomic bitwise AND NOT with release ordering
+ * @i: long value
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v & ~@i) with release ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_fetch_andnot_release() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline long
+raw_atomic_long_fetch_andnot_release(long i, atomic_long_t *v)
+{
+#ifdef CONFIG_64BIT
+ return raw_atomic64_fetch_andnot_release(i, v);
+#else
+ return raw_atomic_fetch_andnot_release(i, v);
+#endif
+}
+
+/**
+ * raw_atomic_long_fetch_andnot_relaxed() - atomic bitwise AND NOT with relaxed ordering
+ * @i: long value
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v & ~@i) with relaxed ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_fetch_andnot_relaxed() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline long
+raw_atomic_long_fetch_andnot_relaxed(long i, atomic_long_t *v)
+{
+#ifdef CONFIG_64BIT
+ return raw_atomic64_fetch_andnot_relaxed(i, v);
+#else
+ return raw_atomic_fetch_andnot_relaxed(i, v);
+#endif
+}
+
+/**
+ * raw_atomic_long_or() - atomic bitwise OR with relaxed ordering
+ * @i: long value
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v | @i) with relaxed ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_or() elsewhere.
+ *
+ * Return: Nothing.
+ */
+static __always_inline void
+raw_atomic_long_or(long i, atomic_long_t *v)
+{
+#ifdef CONFIG_64BIT
+ raw_atomic64_or(i, v);
+#else
+ raw_atomic_or(i, v);
+#endif
+}
+
+/**
+ * raw_atomic_long_fetch_or() - atomic bitwise OR with full ordering
+ * @i: long value
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v | @i) with full ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_fetch_or() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline long
+raw_atomic_long_fetch_or(long i, atomic_long_t *v)
+{
+#ifdef CONFIG_64BIT
+ return raw_atomic64_fetch_or(i, v);
+#else
+ return raw_atomic_fetch_or(i, v);
+#endif
+}
+
+/**
+ * raw_atomic_long_fetch_or_acquire() - atomic bitwise OR with acquire ordering
+ * @i: long value
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v | @i) with acquire ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_fetch_or_acquire() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline long
+raw_atomic_long_fetch_or_acquire(long i, atomic_long_t *v)
+{
+#ifdef CONFIG_64BIT
+ return raw_atomic64_fetch_or_acquire(i, v);
+#else
+ return raw_atomic_fetch_or_acquire(i, v);
+#endif
+}
+
+/**
+ * raw_atomic_long_fetch_or_release() - atomic bitwise OR with release ordering
+ * @i: long value
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v | @i) with release ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_fetch_or_release() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline long
+raw_atomic_long_fetch_or_release(long i, atomic_long_t *v)
+{
+#ifdef CONFIG_64BIT
+ return raw_atomic64_fetch_or_release(i, v);
+#else
+ return raw_atomic_fetch_or_release(i, v);
+#endif
+}
+
+/**
+ * raw_atomic_long_fetch_or_relaxed() - atomic bitwise OR with relaxed ordering
+ * @i: long value
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v | @i) with relaxed ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_fetch_or_relaxed() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline long
+raw_atomic_long_fetch_or_relaxed(long i, atomic_long_t *v)
+{
+#ifdef CONFIG_64BIT
+ return raw_atomic64_fetch_or_relaxed(i, v);
+#else
+ return raw_atomic_fetch_or_relaxed(i, v);
+#endif
+}
+
+/**
+ * raw_atomic_long_xor() - atomic bitwise XOR with relaxed ordering
+ * @i: long value
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v ^ @i) with relaxed ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_xor() elsewhere.
+ *
+ * Return: Nothing.
+ */
+static __always_inline void
+raw_atomic_long_xor(long i, atomic_long_t *v)
+{
+#ifdef CONFIG_64BIT
+ raw_atomic64_xor(i, v);
+#else
+ raw_atomic_xor(i, v);
+#endif
+}
+
+/**
+ * raw_atomic_long_fetch_xor() - atomic bitwise XOR with full ordering
+ * @i: long value
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v ^ @i) with full ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_fetch_xor() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline long
+raw_atomic_long_fetch_xor(long i, atomic_long_t *v)
+{
+#ifdef CONFIG_64BIT
+ return raw_atomic64_fetch_xor(i, v);
+#else
+ return raw_atomic_fetch_xor(i, v);
+#endif
+}
+
+/**
+ * raw_atomic_long_fetch_xor_acquire() - atomic bitwise XOR with acquire ordering
+ * @i: long value
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v ^ @i) with acquire ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_fetch_xor_acquire() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline long
+raw_atomic_long_fetch_xor_acquire(long i, atomic_long_t *v)
+{
+#ifdef CONFIG_64BIT
+ return raw_atomic64_fetch_xor_acquire(i, v);
+#else
+ return raw_atomic_fetch_xor_acquire(i, v);
+#endif
+}
+
+/**
+ * raw_atomic_long_fetch_xor_release() - atomic bitwise XOR with release ordering
+ * @i: long value
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v ^ @i) with release ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_fetch_xor_release() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline long
+raw_atomic_long_fetch_xor_release(long i, atomic_long_t *v)
+{
+#ifdef CONFIG_64BIT
+ return raw_atomic64_fetch_xor_release(i, v);
+#else
+ return raw_atomic_fetch_xor_release(i, v);
+#endif
+}
+
+/**
+ * raw_atomic_long_fetch_xor_relaxed() - atomic bitwise XOR with relaxed ordering
+ * @i: long value
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v ^ @i) with relaxed ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_fetch_xor_relaxed() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline long
+raw_atomic_long_fetch_xor_relaxed(long i, atomic_long_t *v)
+{
+#ifdef CONFIG_64BIT
+ return raw_atomic64_fetch_xor_relaxed(i, v);
+#else
+ return raw_atomic_fetch_xor_relaxed(i, v);
+#endif
+}
+
+/**
+ * raw_atomic_long_xchg() - atomic exchange with full ordering
+ * @v: pointer to atomic_long_t
+ * @new: long value to assign
+ *
+ * Atomically updates @v to @new with full ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_xchg() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline long
+raw_atomic_long_xchg(atomic_long_t *v, long new)
+{
+#ifdef CONFIG_64BIT
+ return raw_atomic64_xchg(v, new);
+#else
+ return raw_atomic_xchg(v, new);
+#endif
+}
+
+/**
+ * raw_atomic_long_xchg_acquire() - atomic exchange with acquire ordering
+ * @v: pointer to atomic_long_t
+ * @new: long value to assign
+ *
+ * Atomically updates @v to @new with acquire ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_xchg_acquire() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline long
+raw_atomic_long_xchg_acquire(atomic_long_t *v, long new)
+{
+#ifdef CONFIG_64BIT
+ return raw_atomic64_xchg_acquire(v, new);
+#else
+ return raw_atomic_xchg_acquire(v, new);
+#endif
+}
+
+/**
+ * raw_atomic_long_xchg_release() - atomic exchange with release ordering
+ * @v: pointer to atomic_long_t
+ * @new: long value to assign
+ *
+ * Atomically updates @v to @new with release ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_xchg_release() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline long
+raw_atomic_long_xchg_release(atomic_long_t *v, long new)
+{
+#ifdef CONFIG_64BIT
+ return raw_atomic64_xchg_release(v, new);
+#else
+ return raw_atomic_xchg_release(v, new);
+#endif
+}
+
+/**
+ * raw_atomic_long_xchg_relaxed() - atomic exchange with relaxed ordering
+ * @v: pointer to atomic_long_t
+ * @new: long value to assign
+ *
+ * Atomically updates @v to @new with relaxed ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_xchg_relaxed() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline long
+raw_atomic_long_xchg_relaxed(atomic_long_t *v, long new)
+{
+#ifdef CONFIG_64BIT
+ return raw_atomic64_xchg_relaxed(v, new);
+#else
+ return raw_atomic_xchg_relaxed(v, new);
+#endif
+}
+
+/**
+ * raw_atomic_long_cmpxchg() - atomic compare and exchange with full ordering
+ * @v: pointer to atomic_long_t
+ * @old: long value to compare with
+ * @new: long value to assign
+ *
+ * If (@v == @old), atomically updates @v to @new with full ordering.
+ * Otherwise, @v is not modified and relaxed ordering is provided.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_cmpxchg() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline long
+raw_atomic_long_cmpxchg(atomic_long_t *v, long old, long new)
+{
+#ifdef CONFIG_64BIT
+ return raw_atomic64_cmpxchg(v, old, new);
+#else
+ return raw_atomic_cmpxchg(v, old, new);
+#endif
+}
+
+/**
+ * raw_atomic_long_cmpxchg_acquire() - atomic compare and exchange with acquire ordering
+ * @v: pointer to atomic_long_t
+ * @old: long value to compare with
+ * @new: long value to assign
+ *
+ * If (@v == @old), atomically updates @v to @new with acquire ordering.
+ * Otherwise, @v is not modified and relaxed ordering is provided.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_cmpxchg_acquire() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline long
+raw_atomic_long_cmpxchg_acquire(atomic_long_t *v, long old, long new)
+{
+#ifdef CONFIG_64BIT
+ return raw_atomic64_cmpxchg_acquire(v, old, new);
+#else
+ return raw_atomic_cmpxchg_acquire(v, old, new);
+#endif
+}
+
+/**
+ * raw_atomic_long_cmpxchg_release() - atomic compare and exchange with release ordering
+ * @v: pointer to atomic_long_t
+ * @old: long value to compare with
+ * @new: long value to assign
+ *
+ * If (@v == @old), atomically updates @v to @new with release ordering.
+ * Otherwise, @v is not modified and relaxed ordering is provided.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_cmpxchg_release() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline long
+raw_atomic_long_cmpxchg_release(atomic_long_t *v, long old, long new)
+{
+#ifdef CONFIG_64BIT
+ return raw_atomic64_cmpxchg_release(v, old, new);
+#else
+ return raw_atomic_cmpxchg_release(v, old, new);
+#endif
+}
+
+/**
+ * raw_atomic_long_cmpxchg_relaxed() - atomic compare and exchange with relaxed ordering
+ * @v: pointer to atomic_long_t
+ * @old: long value to compare with
+ * @new: long value to assign
+ *
+ * If (@v == @old), atomically updates @v to @new with relaxed ordering.
+ * Otherwise, @v is not modified and relaxed ordering is provided.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_cmpxchg_relaxed() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline long
+raw_atomic_long_cmpxchg_relaxed(atomic_long_t *v, long old, long new)
+{
+#ifdef CONFIG_64BIT
+ return raw_atomic64_cmpxchg_relaxed(v, old, new);
+#else
+ return raw_atomic_cmpxchg_relaxed(v, old, new);
+#endif
+}
+
+/**
+ * raw_atomic_long_try_cmpxchg() - atomic compare and exchange with full ordering
+ * @v: pointer to atomic_long_t
+ * @old: pointer to long value to compare with
+ * @new: long value to assign
+ *
+ * If (@v == @old), atomically updates @v to @new with full ordering.
+ * Otherwise, @v is not modified, @old is updated to the current value of @v,
+ * and relaxed ordering is provided.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_try_cmpxchg() elsewhere.
+ *
+ * Return: @true if the exchange occured, @false otherwise.
+ */
+static __always_inline bool
+raw_atomic_long_try_cmpxchg(atomic_long_t *v, long *old, long new)
+{
+#ifdef CONFIG_64BIT
+ return raw_atomic64_try_cmpxchg(v, (s64 *)old, new);
+#else
+ return raw_atomic_try_cmpxchg(v, (int *)old, new);
+#endif
+}
+
+/**
+ * raw_atomic_long_try_cmpxchg_acquire() - atomic compare and exchange with acquire ordering
+ * @v: pointer to atomic_long_t
+ * @old: pointer to long value to compare with
+ * @new: long value to assign
+ *
+ * If (@v == @old), atomically updates @v to @new with acquire ordering.
+ * Otherwise, @v is not modified, @old is updated to the current value of @v,
+ * and relaxed ordering is provided.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_try_cmpxchg_acquire() elsewhere.
+ *
+ * Return: @true if the exchange occured, @false otherwise.
+ */
+static __always_inline bool
+raw_atomic_long_try_cmpxchg_acquire(atomic_long_t *v, long *old, long new)
+{
+#ifdef CONFIG_64BIT
+ return raw_atomic64_try_cmpxchg_acquire(v, (s64 *)old, new);
+#else
+ return raw_atomic_try_cmpxchg_acquire(v, (int *)old, new);
+#endif
+}
+
+/**
+ * raw_atomic_long_try_cmpxchg_release() - atomic compare and exchange with release ordering
+ * @v: pointer to atomic_long_t
+ * @old: pointer to long value to compare with
+ * @new: long value to assign
+ *
+ * If (@v == @old), atomically updates @v to @new with release ordering.
+ * Otherwise, @v is not modified, @old is updated to the current value of @v,
+ * and relaxed ordering is provided.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_try_cmpxchg_release() elsewhere.
+ *
+ * Return: @true if the exchange occured, @false otherwise.
+ */
+static __always_inline bool
+raw_atomic_long_try_cmpxchg_release(atomic_long_t *v, long *old, long new)
+{
+#ifdef CONFIG_64BIT
+ return raw_atomic64_try_cmpxchg_release(v, (s64 *)old, new);
+#else
+ return raw_atomic_try_cmpxchg_release(v, (int *)old, new);
+#endif
+}
+
+/**
+ * raw_atomic_long_try_cmpxchg_relaxed() - atomic compare and exchange with relaxed ordering
+ * @v: pointer to atomic_long_t
+ * @old: pointer to long value to compare with
+ * @new: long value to assign
+ *
+ * If (@v == @old), atomically updates @v to @new with relaxed ordering.
+ * Otherwise, @v is not modified, @old is updated to the current value of @v,
+ * and relaxed ordering is provided.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_try_cmpxchg_relaxed() elsewhere.
+ *
+ * Return: @true if the exchange occured, @false otherwise.
+ */
+static __always_inline bool
+raw_atomic_long_try_cmpxchg_relaxed(atomic_long_t *v, long *old, long new)
+{
+#ifdef CONFIG_64BIT
+ return raw_atomic64_try_cmpxchg_relaxed(v, (s64 *)old, new);
+#else
+ return raw_atomic_try_cmpxchg_relaxed(v, (int *)old, new);
+#endif
+}
+
+/**
+ * raw_atomic_long_sub_and_test() - atomic subtract and test if zero with full ordering
+ * @i: long value to subtract
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v - @i) with full ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_sub_and_test() elsewhere.
+ *
+ * Return: @true if the resulting value of @v is zero, @false otherwise.
+ */
+static __always_inline bool
+raw_atomic_long_sub_and_test(long i, atomic_long_t *v)
+{
+#ifdef CONFIG_64BIT
+ return raw_atomic64_sub_and_test(i, v);
+#else
+ return raw_atomic_sub_and_test(i, v);
+#endif
+}
+
+/**
+ * raw_atomic_long_dec_and_test() - atomic decrement and test if zero with full ordering
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v - 1) with full ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_dec_and_test() elsewhere.
+ *
+ * Return: @true if the resulting value of @v is zero, @false otherwise.
+ */
+static __always_inline bool
+raw_atomic_long_dec_and_test(atomic_long_t *v)
+{
+#ifdef CONFIG_64BIT
+ return raw_atomic64_dec_and_test(v);
+#else
+ return raw_atomic_dec_and_test(v);
+#endif
+}
+
+/**
+ * raw_atomic_long_inc_and_test() - atomic increment and test if zero with full ordering
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v + 1) with full ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_inc_and_test() elsewhere.
+ *
+ * Return: @true if the resulting value of @v is zero, @false otherwise.
+ */
+static __always_inline bool
+raw_atomic_long_inc_and_test(atomic_long_t *v)
+{
+#ifdef CONFIG_64BIT
+ return raw_atomic64_inc_and_test(v);
+#else
+ return raw_atomic_inc_and_test(v);
+#endif
+}
+
+/**
+ * raw_atomic_long_add_negative() - atomic add and test if negative with full ordering
+ * @i: long value to add
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v + @i) with full ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_add_negative() elsewhere.
+ *
+ * Return: @true if the resulting value of @v is negative, @false otherwise.
+ */
+static __always_inline bool
+raw_atomic_long_add_negative(long i, atomic_long_t *v)
+{
+#ifdef CONFIG_64BIT
+ return raw_atomic64_add_negative(i, v);
+#else
+ return raw_atomic_add_negative(i, v);
+#endif
+}
+
+/**
+ * raw_atomic_long_add_negative_acquire() - atomic add and test if negative with acquire ordering
+ * @i: long value to add
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v + @i) with acquire ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_add_negative_acquire() elsewhere.
+ *
+ * Return: @true if the resulting value of @v is negative, @false otherwise.
+ */
+static __always_inline bool
+raw_atomic_long_add_negative_acquire(long i, atomic_long_t *v)
+{
+#ifdef CONFIG_64BIT
+ return raw_atomic64_add_negative_acquire(i, v);
+#else
+ return raw_atomic_add_negative_acquire(i, v);
+#endif
+}
+
+/**
+ * raw_atomic_long_add_negative_release() - atomic add and test if negative with release ordering
+ * @i: long value to add
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v + @i) with release ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_add_negative_release() elsewhere.
+ *
+ * Return: @true if the resulting value of @v is negative, @false otherwise.
+ */
+static __always_inline bool
+raw_atomic_long_add_negative_release(long i, atomic_long_t *v)
+{
+#ifdef CONFIG_64BIT
+ return raw_atomic64_add_negative_release(i, v);
+#else
+ return raw_atomic_add_negative_release(i, v);
+#endif
+}
+
+/**
+ * raw_atomic_long_add_negative_relaxed() - atomic add and test if negative with relaxed ordering
+ * @i: long value to add
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v + @i) with relaxed ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_add_negative_relaxed() elsewhere.
+ *
+ * Return: @true if the resulting value of @v is negative, @false otherwise.
+ */
+static __always_inline bool
+raw_atomic_long_add_negative_relaxed(long i, atomic_long_t *v)
+{
+#ifdef CONFIG_64BIT
+ return raw_atomic64_add_negative_relaxed(i, v);
+#else
+ return raw_atomic_add_negative_relaxed(i, v);
+#endif
+}
+
+/**
+ * raw_atomic_long_fetch_add_unless() - atomic add unless value with full ordering
+ * @v: pointer to atomic_long_t
+ * @a: long value to add
+ * @u: long value to compare with
+ *
+ * If (@v != @u), atomically updates @v to (@v + @a) with full ordering.
+ * Otherwise, @v is not modified and relaxed ordering is provided.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_fetch_add_unless() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline long
+raw_atomic_long_fetch_add_unless(atomic_long_t *v, long a, long u)
+{
+#ifdef CONFIG_64BIT
+ return raw_atomic64_fetch_add_unless(v, a, u);
+#else
+ return raw_atomic_fetch_add_unless(v, a, u);
+#endif
+}
+
+/**
+ * raw_atomic_long_add_unless() - atomic add unless value with full ordering
+ * @v: pointer to atomic_long_t
+ * @a: long value to add
+ * @u: long value to compare with
+ *
+ * If (@v != @u), atomically updates @v to (@v + @a) with full ordering.
+ * Otherwise, @v is not modified and relaxed ordering is provided.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_add_unless() elsewhere.
+ *
+ * Return: @true if @v was updated, @false otherwise.
+ */
+static __always_inline bool
+raw_atomic_long_add_unless(atomic_long_t *v, long a, long u)
+{
+#ifdef CONFIG_64BIT
+ return raw_atomic64_add_unless(v, a, u);
+#else
+ return raw_atomic_add_unless(v, a, u);
+#endif
+}
+
+/**
+ * raw_atomic_long_inc_not_zero() - atomic increment unless zero with full ordering
+ * @v: pointer to atomic_long_t
+ *
+ * If (@v != 0), atomically updates @v to (@v + 1) with full ordering.
+ * Otherwise, @v is not modified and relaxed ordering is provided.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_inc_not_zero() elsewhere.
+ *
+ * Return: @true if @v was updated, @false otherwise.
+ */
+static __always_inline bool
+raw_atomic_long_inc_not_zero(atomic_long_t *v)
+{
+#ifdef CONFIG_64BIT
+ return raw_atomic64_inc_not_zero(v);
+#else
+ return raw_atomic_inc_not_zero(v);
+#endif
+}
+
+/**
+ * raw_atomic_long_inc_unless_negative() - atomic increment unless negative with full ordering
+ * @v: pointer to atomic_long_t
+ *
+ * If (@v >= 0), atomically updates @v to (@v + 1) with full ordering.
+ * Otherwise, @v is not modified and relaxed ordering is provided.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_inc_unless_negative() elsewhere.
+ *
+ * Return: @true if @v was updated, @false otherwise.
+ */
+static __always_inline bool
+raw_atomic_long_inc_unless_negative(atomic_long_t *v)
+{
+#ifdef CONFIG_64BIT
+ return raw_atomic64_inc_unless_negative(v);
+#else
+ return raw_atomic_inc_unless_negative(v);
+#endif
+}
+
+/**
+ * raw_atomic_long_dec_unless_positive() - atomic decrement unless positive with full ordering
+ * @v: pointer to atomic_long_t
+ *
+ * If (@v <= 0), atomically updates @v to (@v - 1) with full ordering.
+ * Otherwise, @v is not modified and relaxed ordering is provided.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_dec_unless_positive() elsewhere.
+ *
+ * Return: @true if @v was updated, @false otherwise.
+ */
+static __always_inline bool
+raw_atomic_long_dec_unless_positive(atomic_long_t *v)
+{
+#ifdef CONFIG_64BIT
+ return raw_atomic64_dec_unless_positive(v);
+#else
+ return raw_atomic_dec_unless_positive(v);
+#endif
+}
+
+/**
+ * raw_atomic_long_dec_if_positive() - atomic decrement if positive with full ordering
+ * @v: pointer to atomic_long_t
+ *
+ * If (@v > 0), atomically updates @v to (@v - 1) with full ordering.
+ * Otherwise, @v is not modified and relaxed ordering is provided.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_dec_if_positive() elsewhere.
+ *
+ * Return: The old value of (@v - 1), regardless of whether @v was updated.
+ */
+static __always_inline long
+raw_atomic_long_dec_if_positive(atomic_long_t *v)
+{
+#ifdef CONFIG_64BIT
+ return raw_atomic64_dec_if_positive(v);
+#else
+ return raw_atomic_dec_if_positive(v);
+#endif
+}
+
+#endif /* _LINUX_ATOMIC_LONG_H */
+// eadf183c3600b8b92b91839dd3be6bcc560c752d
diff --git a/include/linux/attribute_container.h b/include/linux/attribute_container.h
index 896c6892f327..b3643de9931d 100644
--- a/include/linux/attribute_container.h
+++ b/include/linux/attribute_container.h
@@ -1,9 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* attribute_container.h - a generic container for all classes
*
* Copyright (c) 2005 - James Bottomley <James.Bottomley@steeleye.com>
- *
- * This file is licensed under GPLv2
*/
#ifndef _ATTRIBUTE_CONTAINER_H_
@@ -55,14 +54,15 @@ void attribute_container_device_trigger(struct device *dev,
int (*fn)(struct attribute_container *,
struct device *,
struct device *));
-void attribute_container_trigger(struct device *dev,
- int (*fn)(struct attribute_container *,
- struct device *));
+int attribute_container_device_trigger_safe(struct device *dev,
+ int (*fn)(struct attribute_container *,
+ struct device *,
+ struct device *),
+ int (*undo)(struct attribute_container *,
+ struct device *,
+ struct device *));
int attribute_container_add_attrs(struct device *classdev);
int attribute_container_add_class_device(struct device *classdev);
-int attribute_container_add_class_device_adapter(struct attribute_container *cont,
- struct device *dev,
- struct device *classdev);
void attribute_container_remove_attrs(struct device *classdev);
void attribute_container_class_device_del(struct device *classdev);
struct attribute_container *attribute_container_classdev_to_container(struct device *);
diff --git a/include/linux/audit.h b/include/linux/audit.h
index 2150bdccfbab..536f8ee8da81 100644
--- a/include/linux/audit.h
+++ b/include/linux/audit.h
@@ -1,31 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/* audit.h -- Auditing support
*
* Copyright 2003-2004 Red Hat Inc., Durham, North Carolina.
* All Rights Reserved.
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
* Written by Rickard E. (Rik) Faith <faith@redhat.com>
- *
*/
#ifndef _LINUX_AUDIT_H_
#define _LINUX_AUDIT_H_
#include <linux/sched.h>
#include <linux/ptrace.h>
+#include <linux/audit_arch.h>
#include <uapi/linux/audit.h>
+#include <uapi/linux/netfilter/nf_tables.h>
+#include <uapi/linux/fanotify.h>
#define AUDIT_INO_UNSET ((unsigned long)-1)
#define AUDIT_DEV_UNSET ((dev_t)-1)
@@ -33,7 +22,7 @@
struct audit_sig_info {
uid_t uid;
pid_t pid;
- char ctx[0];
+ char ctx[];
};
struct audit_buffer;
@@ -47,6 +36,9 @@ struct mqstat;
struct audit_watch;
struct audit_tree;
struct sk_buff;
+struct kern_ipc_perm;
+struct lsm_id;
+struct lsm_prop;
struct audit_krule {
u32 pflags;
@@ -85,7 +77,53 @@ struct audit_field {
u32 op;
};
-extern int is_audit_feature_set(int which);
+enum audit_ntp_type {
+ AUDIT_NTP_OFFSET,
+ AUDIT_NTP_FREQ,
+ AUDIT_NTP_STATUS,
+ AUDIT_NTP_TAI,
+ AUDIT_NTP_TICK,
+ AUDIT_NTP_ADJUST,
+
+ AUDIT_NTP_NVALS /* count */
+};
+
+#ifdef CONFIG_AUDITSYSCALL
+struct audit_ntp_val {
+ long long oldval, newval;
+};
+
+struct audit_ntp_data {
+ struct audit_ntp_val vals[AUDIT_NTP_NVALS];
+};
+#else
+struct audit_ntp_data {};
+#endif
+
+enum audit_nfcfgop {
+ AUDIT_XT_OP_REGISTER,
+ AUDIT_XT_OP_REPLACE,
+ AUDIT_XT_OP_UNREGISTER,
+ AUDIT_NFT_OP_TABLE_REGISTER,
+ AUDIT_NFT_OP_TABLE_UNREGISTER,
+ AUDIT_NFT_OP_CHAIN_REGISTER,
+ AUDIT_NFT_OP_CHAIN_UNREGISTER,
+ AUDIT_NFT_OP_RULE_REGISTER,
+ AUDIT_NFT_OP_RULE_UNREGISTER,
+ AUDIT_NFT_OP_SET_REGISTER,
+ AUDIT_NFT_OP_SET_UNREGISTER,
+ AUDIT_NFT_OP_SETELEM_REGISTER,
+ AUDIT_NFT_OP_SETELEM_UNREGISTER,
+ AUDIT_NFT_OP_GEN_REGISTER,
+ AUDIT_NFT_OP_OBJ_REGISTER,
+ AUDIT_NFT_OP_OBJ_UNREGISTER,
+ AUDIT_NFT_OP_OBJ_RESET,
+ AUDIT_NFT_OP_FLOWTABLE_REGISTER,
+ AUDIT_NFT_OP_FLOWTABLE_UNREGISTER,
+ AUDIT_NFT_OP_SETELEM_RESET,
+ AUDIT_NFT_OP_RULE_RESET,
+ AUDIT_NFT_OP_INVALID,
+};
extern int __init audit_register_class(int class, unsigned *list);
extern int audit_classify_syscall(int abi, unsigned syscall);
@@ -97,8 +135,6 @@ extern unsigned compat_dir_class[];
extern unsigned compat_chattr_class[];
extern unsigned compat_signal_class[];
-extern int audit_classify_compat_syscall(int abi, unsigned syscall);
-
/* audit_names->type values */
#define AUDIT_TYPE_UNKNOWN 0 /* we don't know yet */
#define AUDIT_TYPE_NORMAL 1 /* a "normal" audit record */
@@ -113,10 +149,15 @@ extern int audit_classify_compat_syscall(int abi, unsigned syscall);
#define AUDIT_TTY_ENABLE BIT(0)
#define AUDIT_TTY_LOG_PASSWD BIT(1)
-struct filename;
+/* bit values for audit_cfg_lsm */
+#define AUDIT_CFG_LSM_SECCTX_SUBJECT BIT(0)
+#define AUDIT_CFG_LSM_SECCTX_OBJECT BIT(1)
-extern void audit_log_session_info(struct audit_buffer *ab);
+struct filename;
+#define AUDIT_OFF 0
+#define AUDIT_ON 1
+#define AUDIT_LOCKED 2
#ifdef CONFIG_AUDIT
/* These are defined in audit.c */
/* Public API */
@@ -146,19 +187,14 @@ extern void audit_log_d_path(struct audit_buffer *ab,
const struct path *path);
extern void audit_log_key(struct audit_buffer *ab,
char *key);
-extern void audit_log_link_denied(const char *operation,
- const struct path *link);
+extern void audit_log_path_denied(int type,
+ const char *operation);
extern void audit_log_lost(const char *message);
-#ifdef CONFIG_SECURITY
-extern void audit_log_secctx(struct audit_buffer *ab, u32 secid);
-#else
-static inline void audit_log_secctx(struct audit_buffer *ab, u32 secid)
-{ }
-#endif
+extern int audit_log_subj_ctx(struct audit_buffer *ab, struct lsm_prop *prop);
+extern int audit_log_obj_ctx(struct audit_buffer *ab, struct lsm_prop *prop);
extern int audit_log_task_context(struct audit_buffer *ab);
-extern void audit_log_task_info(struct audit_buffer *ab,
- struct task_struct *tsk);
+extern void audit_log_task_info(struct audit_buffer *ab);
extern int audit_update_lsm_rules(void);
@@ -166,7 +202,24 @@ extern int audit_update_lsm_rules(void);
extern int audit_rule_change(int type, int seq, void *data, size_t datasz);
extern int audit_list_rules_send(struct sk_buff *request_skb, int seq);
+extern int audit_set_loginuid(kuid_t loginuid);
+
+static inline kuid_t audit_get_loginuid(struct task_struct *tsk)
+{
+ return tsk->loginuid;
+}
+
+static inline unsigned int audit_get_sessionid(struct task_struct *tsk)
+{
+ return tsk->sessionid;
+}
+
extern u32 audit_enabled;
+
+extern int audit_signal_info(int sig, struct task_struct *t);
+
+extern void audit_cfg_lsm(const struct lsm_id *lsmid, int flags);
+
#else /* CONFIG_AUDIT */
static inline __printf(4, 5)
void audit_log(struct audit_context *ctx, gfp_t gfp_mask, int type,
@@ -200,19 +253,45 @@ static inline void audit_log_d_path(struct audit_buffer *ab,
{ }
static inline void audit_log_key(struct audit_buffer *ab, char *key)
{ }
-static inline void audit_log_link_denied(const char *string,
- const struct path *link)
-{ }
-static inline void audit_log_secctx(struct audit_buffer *ab, u32 secid)
+static inline void audit_log_path_denied(int type, const char *operation)
{ }
+static inline int audit_log_subj_ctx(struct audit_buffer *ab,
+ struct lsm_prop *prop)
+{
+ return 0;
+}
+static inline int audit_log_obj_ctx(struct audit_buffer *ab,
+ struct lsm_prop *prop)
+{
+ return 0;
+}
static inline int audit_log_task_context(struct audit_buffer *ab)
{
return 0;
}
-static inline void audit_log_task_info(struct audit_buffer *ab,
- struct task_struct *tsk)
+static inline void audit_log_task_info(struct audit_buffer *ab)
{ }
-#define audit_enabled 0
+
+static inline kuid_t audit_get_loginuid(struct task_struct *tsk)
+{
+ return INVALID_UID;
+}
+
+static inline unsigned int audit_get_sessionid(struct task_struct *tsk)
+{
+ return AUDIT_SID_UNSET;
+}
+
+#define audit_enabled AUDIT_OFF
+
+static inline int audit_signal_info(int sig, struct task_struct *t)
+{
+ return 0;
+}
+
+static inline void audit_cfg_lsm(const struct lsm_id *lsmid, int flags)
+{ }
+
#endif /* CONFIG_AUDIT */
#ifdef CONFIG_AUDIT_COMPAT_GENERIC
@@ -221,6 +300,10 @@ static inline void audit_log_task_info(struct audit_buffer *ab,
#define audit_is_compat(arch) false
#endif
+#define AUDIT_INODE_PARENT 1 /* dentry represents the parent */
+#define AUDIT_INODE_HIDDEN 2 /* audit record should be hidden */
+#define AUDIT_INODE_NOEVAL 4 /* audit record incomplete */
+
#ifdef CONFIG_AUDITSYSCALL
#include <asm/syscall.h> /* for syscall_get_arch() */
@@ -228,26 +311,37 @@ static inline void audit_log_task_info(struct audit_buffer *ab,
/* Public API */
extern int audit_alloc(struct task_struct *task);
extern void __audit_free(struct task_struct *task);
+extern void __audit_uring_entry(u8 op);
+extern void __audit_uring_exit(int success, long code);
extern void __audit_syscall_entry(int major, unsigned long a0, unsigned long a1,
unsigned long a2, unsigned long a3);
extern void __audit_syscall_exit(int ret_success, long ret_value);
extern struct filename *__audit_reusename(const __user char *uptr);
extern void __audit_getname(struct filename *name);
-
-#define AUDIT_INODE_PARENT 1 /* dentry represents the parent */
-#define AUDIT_INODE_HIDDEN 2 /* audit record should be hidden */
extern void __audit_inode(struct filename *name, const struct dentry *dentry,
unsigned int flags);
extern void __audit_file(const struct file *);
extern void __audit_inode_child(struct inode *parent,
const struct dentry *dentry,
const unsigned char type);
-extern void __audit_seccomp(unsigned long syscall, long signr, int code);
+extern void audit_seccomp(unsigned long syscall, long signr, int code);
+extern void audit_seccomp_actions_logged(const char *names,
+ const char *old_names, int res);
extern void __audit_ptrace(struct task_struct *t);
+static inline void audit_set_context(struct task_struct *task, struct audit_context *ctx)
+{
+ task->audit_context = ctx;
+}
+
+static inline struct audit_context *audit_context(void)
+{
+ return current->audit_context;
+}
+
static inline bool audit_dummy_context(void)
{
- void *p = current->audit_context;
+ void *p = audit_context();
return !p || *(int *)p;
}
static inline void audit_free(struct task_struct *task)
@@ -255,16 +349,31 @@ static inline void audit_free(struct task_struct *task)
if (unlikely(task->audit_context))
__audit_free(task);
}
+static inline void audit_uring_entry(u8 op)
+{
+ /*
+ * We intentionally check audit_context() before audit_enabled as most
+ * Linux systems (as of ~2021) rely on systemd which forces audit to
+ * be enabled regardless of the user's audit configuration.
+ */
+ if (unlikely(audit_context() && audit_enabled))
+ __audit_uring_entry(op);
+}
+static inline void audit_uring_exit(int success, long code)
+{
+ if (unlikely(audit_context()))
+ __audit_uring_exit(success, code);
+}
static inline void audit_syscall_entry(int major, unsigned long a0,
unsigned long a1, unsigned long a2,
unsigned long a3)
{
- if (unlikely(current->audit_context))
+ if (unlikely(audit_context()))
__audit_syscall_entry(major, a0, a1, a2, a3);
}
static inline void audit_syscall_exit(void *pt_regs)
{
- if (unlikely(current->audit_context)) {
+ if (unlikely(audit_context())) {
int success = is_syscall_success(pt_regs);
long return_code = regs_return_value(pt_regs);
@@ -284,13 +393,9 @@ static inline void audit_getname(struct filename *name)
}
static inline void audit_inode(struct filename *name,
const struct dentry *dentry,
- unsigned int parent) {
- if (unlikely(!audit_dummy_context())) {
- unsigned int flags = 0;
- if (parent)
- flags |= AUDIT_INODE_PARENT;
- __audit_inode(name, dentry, flags);
- }
+ unsigned int aflags) {
+ if (unlikely(!audit_dummy_context()))
+ __audit_inode(name, dentry, aflags);
}
static inline void audit_file(struct file *file)
{
@@ -312,16 +417,6 @@ static inline void audit_inode_child(struct inode *parent,
}
void audit_core_dumps(long signr);
-static inline void audit_seccomp(unsigned long syscall, long signr, int code)
-{
- if (!audit_enabled)
- return;
-
- /* Force a record to be reported if a signal was delivered. */
- if (signr || unlikely(!audit_dummy_context()))
- __audit_seccomp(syscall, signr, code);
-}
-
static inline void audit_ptrace(struct task_struct *t)
{
if (unlikely(!audit_dummy_context()))
@@ -329,21 +424,6 @@ static inline void audit_ptrace(struct task_struct *t)
}
/* Private API (for audit.c only) */
-extern unsigned int audit_serial(void);
-extern int auditsc_get_stamp(struct audit_context *ctx,
- struct timespec64 *t, unsigned int *serial);
-extern int audit_set_loginuid(kuid_t loginuid);
-
-static inline kuid_t audit_get_loginuid(struct task_struct *tsk)
-{
- return tsk->loginuid;
-}
-
-static inline unsigned int audit_get_sessionid(struct task_struct *tsk)
-{
- return tsk->sessionid;
-}
-
extern void __audit_ipc_obj(struct kern_ipc_perm *ipcp);
extern void __audit_ipc_set_perm(unsigned long qbytes, uid_t uid, gid_t gid, umode_t mode);
extern void __audit_bprm(struct linux_binprm *bprm);
@@ -351,7 +431,7 @@ extern int __audit_socketcall(int nargs, unsigned long *args);
extern int __audit_sockaddr(int len, void *addr);
extern void __audit_fd_pair(int fd1, int fd2);
extern void __audit_mq_open(int oflag, umode_t mode, struct mq_attr *attr);
-extern void __audit_mq_sendrecv(mqd_t mqdes, size_t msg_len, unsigned int msg_prio, const struct timespec *abs_timeout);
+extern void __audit_mq_sendrecv(mqd_t mqdes, size_t msg_len, unsigned int msg_prio, const struct timespec64 *abs_timeout);
extern void __audit_mq_notify(mqd_t mqdes, const struct sigevent *notification);
extern void __audit_mq_getsetattr(mqd_t mqdes, struct mq_attr *mqstat);
extern int __audit_log_bprm_fcaps(struct linux_binprm *bprm,
@@ -359,7 +439,13 @@ extern int __audit_log_bprm_fcaps(struct linux_binprm *bprm,
const struct cred *old);
extern void __audit_log_capset(const struct cred *new, const struct cred *old);
extern void __audit_mmap_fd(int fd, int flags);
-extern void __audit_log_kern_module(char *name);
+extern void __audit_openat2_how(struct open_how *how);
+extern void __audit_log_kern_module(const char *name);
+extern void __audit_fanotify(u32 response, struct fanotify_response_info_audit_rule *friar);
+extern void __audit_tk_injoffset(struct timespec64 offset);
+extern void __audit_ntp_log(const struct audit_ntp_data *ad);
+extern void __audit_log_nfcfg(const char *name, u8 af, unsigned int nentries,
+ enum audit_nfcfgop op, gfp_t gfp);
static inline void audit_ipc_obj(struct kern_ipc_perm *ipcp)
{
@@ -412,7 +498,7 @@ static inline void audit_mq_open(int oflag, umode_t mode, struct mq_attr *attr)
if (unlikely(!audit_dummy_context()))
__audit_mq_open(oflag, mode, attr);
}
-static inline void audit_mq_sendrecv(mqd_t mqdes, size_t msg_len, unsigned int msg_prio, const struct timespec *abs_timeout)
+static inline void audit_mq_sendrecv(mqd_t mqdes, size_t msg_len, unsigned int msg_prio, const struct timespec64 *abs_timeout)
{
if (unlikely(!audit_dummy_context()))
__audit_mq_sendrecv(mqdes, msg_len, msg_prio, abs_timeout);
@@ -450,12 +536,65 @@ static inline void audit_mmap_fd(int fd, int flags)
__audit_mmap_fd(fd, flags);
}
-static inline void audit_log_kern_module(char *name)
+static inline void audit_openat2_how(struct open_how *how)
+{
+ if (unlikely(!audit_dummy_context()))
+ __audit_openat2_how(how);
+}
+
+static inline void audit_log_kern_module(const char *name)
{
if (!audit_dummy_context())
__audit_log_kern_module(name);
}
+static inline void audit_fanotify(u32 response, struct fanotify_response_info_audit_rule *friar)
+{
+ if (audit_enabled)
+ __audit_fanotify(response, friar);
+}
+
+static inline void audit_tk_injoffset(struct timespec64 offset)
+{
+ /* ignore no-op events */
+ if (offset.tv_sec == 0 && offset.tv_nsec == 0)
+ return;
+
+ if (!audit_dummy_context())
+ __audit_tk_injoffset(offset);
+}
+
+static inline void audit_ntp_init(struct audit_ntp_data *ad)
+{
+ memset(ad, 0, sizeof(*ad));
+}
+
+static inline void audit_ntp_set_old(struct audit_ntp_data *ad,
+ enum audit_ntp_type type, long long val)
+{
+ ad->vals[type].oldval = val;
+}
+
+static inline void audit_ntp_set_new(struct audit_ntp_data *ad,
+ enum audit_ntp_type type, long long val)
+{
+ ad->vals[type].newval = val;
+}
+
+static inline void audit_ntp_log(const struct audit_ntp_data *ad)
+{
+ if (!audit_dummy_context())
+ __audit_ntp_log(ad);
+}
+
+static inline void audit_log_nfcfg(const char *name, u8 af,
+ unsigned int nentries,
+ enum audit_nfcfgop op, gfp_t gfp)
+{
+ if (audit_enabled)
+ __audit_log_nfcfg(name, af, nentries, op, gfp);
+}
+
extern int audit_n_rules;
extern int audit_signals;
#else /* CONFIG_AUDITSYSCALL */
@@ -465,6 +604,10 @@ static inline int audit_alloc(struct task_struct *task)
}
static inline void audit_free(struct task_struct *task)
{ }
+static inline void audit_uring_entry(u8 op)
+{ }
+static inline void audit_uring_exit(int success, long code)
+{ }
static inline void audit_syscall_entry(int major, unsigned long a0,
unsigned long a1, unsigned long a2,
unsigned long a3)
@@ -475,23 +618,21 @@ static inline bool audit_dummy_context(void)
{
return true;
}
+static inline void audit_set_context(struct task_struct *task, struct audit_context *ctx)
+{ }
+static inline struct audit_context *audit_context(void)
+{
+ return NULL;
+}
static inline struct filename *audit_reusename(const __user char *name)
{
return NULL;
}
static inline void audit_getname(struct filename *name)
{ }
-static inline void __audit_inode(struct filename *name,
- const struct dentry *dentry,
- unsigned int flags)
-{ }
-static inline void __audit_inode_child(struct inode *parent,
- const struct dentry *dentry,
- const unsigned char type)
-{ }
static inline void audit_inode(struct filename *name,
const struct dentry *dentry,
- unsigned int parent)
+ unsigned int aflags)
{ }
static inline void audit_file(struct file *file)
{
@@ -505,23 +646,11 @@ static inline void audit_inode_child(struct inode *parent,
{ }
static inline void audit_core_dumps(long signr)
{ }
-static inline void __audit_seccomp(unsigned long syscall, long signr, int code)
-{ }
static inline void audit_seccomp(unsigned long syscall, long signr, int code)
{ }
-static inline int auditsc_get_stamp(struct audit_context *ctx,
- struct timespec64 *t, unsigned int *serial)
-{
- return 0;
-}
-static inline kuid_t audit_get_loginuid(struct task_struct *tsk)
-{
- return INVALID_UID;
-}
-static inline unsigned int audit_get_sessionid(struct task_struct *tsk)
-{
- return -1;
-}
+static inline void audit_seccomp_actions_logged(const char *names,
+ const char *old_names, int res)
+{ }
static inline void audit_ipc_obj(struct kern_ipc_perm *ipcp)
{ }
static inline void audit_ipc_set_perm(unsigned long qbytes, uid_t uid,
@@ -549,7 +678,7 @@ static inline void audit_mq_open(int oflag, umode_t mode, struct mq_attr *attr)
{ }
static inline void audit_mq_sendrecv(mqd_t mqdes, size_t msg_len,
unsigned int msg_prio,
- const struct timespec *abs_timeout)
+ const struct timespec64 *abs_timeout)
{ }
static inline void audit_mq_notify(mqd_t mqdes,
const struct sigevent *notification)
@@ -568,12 +697,40 @@ static inline void audit_log_capset(const struct cred *new,
static inline void audit_mmap_fd(int fd, int flags)
{ }
-static inline void audit_log_kern_module(char *name)
-{
-}
+static inline void audit_openat2_how(struct open_how *how)
+{ }
+
+static inline void audit_log_kern_module(const char *name)
+{ }
+
+static inline void audit_fanotify(u32 response, struct fanotify_response_info_audit_rule *friar)
+{ }
+
+static inline void audit_tk_injoffset(struct timespec64 offset)
+{ }
+
+static inline void audit_ntp_init(struct audit_ntp_data *ad)
+{ }
+
+static inline void audit_ntp_set_old(struct audit_ntp_data *ad,
+ enum audit_ntp_type type, long long val)
+{ }
+
+static inline void audit_ntp_set_new(struct audit_ntp_data *ad,
+ enum audit_ntp_type type, long long val)
+{ }
+
+static inline void audit_ntp_log(const struct audit_ntp_data *ad)
+{ }
static inline void audit_ptrace(struct task_struct *t)
{ }
+
+static inline void audit_log_nfcfg(const char *name, u8 af,
+ unsigned int nentries,
+ enum audit_nfcfgop op, gfp_t gfp)
+{ }
+
#define audit_n_rules 0
#define audit_signals 0
#endif /* CONFIG_AUDITSYSCALL */
@@ -583,9 +740,4 @@ static inline bool audit_loginuid_set(struct task_struct *tsk)
return uid_valid(audit_get_loginuid(tsk));
}
-static inline void audit_log_string(struct audit_buffer *ab, const char *buf)
-{
- audit_log_n_string(ab, buf, strlen(buf));
-}
-
#endif
diff --git a/include/linux/audit_arch.h b/include/linux/audit_arch.h
new file mode 100644
index 000000000000..0e34d673ef17
--- /dev/null
+++ b/include/linux/audit_arch.h
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/* audit_arch.h -- Arch layer specific support for audit
+ *
+ * Copyright 2021 Red Hat Inc., Durham, North Carolina.
+ * All Rights Reserved.
+ *
+ * Author: Richard Guy Briggs <rgb@redhat.com>
+ */
+#ifndef _LINUX_AUDIT_ARCH_H_
+#define _LINUX_AUDIT_ARCH_H_
+
+enum auditsc_class_t {
+ AUDITSC_NATIVE = 0,
+ AUDITSC_COMPAT,
+ AUDITSC_OPEN,
+ AUDITSC_OPENAT,
+ AUDITSC_SOCKETCALL,
+ AUDITSC_EXECVE,
+ AUDITSC_OPENAT2,
+
+ AUDITSC_NVALS /* count */
+};
+
+extern int audit_classify_compat_syscall(int abi, unsigned syscall);
+
+#endif
diff --git a/include/linux/auto_dev-ioctl.h b/include/linux/auto_dev-ioctl.h
index 28c15050ebe6..6e1ca6f95f80 100644
--- a/include/linux/auto_dev-ioctl.h
+++ b/include/linux/auto_dev-ioctl.h
@@ -1,10 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* Copyright 2008 Red Hat, Inc. All rights reserved.
* Copyright 2008 Ian Kent <raven@themaw.net>
- *
- * This file is part of the Linux kernel and is made available under
- * the terms of the GNU General Public License, version 2, or at your
- * option, any later version, incorporated herein by reference.
*/
#ifndef _LINUX_AUTO_DEV_IOCTL_H
diff --git a/include/linux/auto_fs.h b/include/linux/auto_fs.h
index b8f814c95cf5..893f952ca40c 100644
--- a/include/linux/auto_fs.h
+++ b/include/linux/auto_fs.h
@@ -1,9 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* Copyright 1997 Transmeta Corporation - All Rights Reserved
- *
- * This file is part of the Linux kernel and is made available under
- * the terms of the GNU General Public License, version 2, or at your
- * option, any later version, incorporated herein by reference.
*/
#ifndef _LINUX_AUTO_FS_H
diff --git a/include/linux/auxiliary_bus.h b/include/linux/auxiliary_bus.h
new file mode 100644
index 000000000000..4086afd0cc6b
--- /dev/null
+++ b/include/linux/auxiliary_bus.h
@@ -0,0 +1,289 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2019-2020 Intel Corporation
+ *
+ * Please see Documentation/driver-api/auxiliary_bus.rst for more information.
+ */
+
+#ifndef _AUXILIARY_BUS_H_
+#define _AUXILIARY_BUS_H_
+
+#include <linux/device.h>
+#include <linux/mod_devicetable.h>
+
+/**
+ * DOC: DEVICE_LIFESPAN
+ *
+ * The registering driver is the entity that allocates memory for the
+ * auxiliary_device and registers it on the auxiliary bus. It is important to
+ * note that, as opposed to the platform bus, the registering driver is wholly
+ * responsible for the management of the memory used for the device object.
+ *
+ * To be clear the memory for the auxiliary_device is freed in the release()
+ * callback defined by the registering driver. The registering driver should
+ * only call auxiliary_device_delete() and then auxiliary_device_uninit() when
+ * it is done with the device. The release() function is then automatically
+ * called if and when other code releases their reference to the devices.
+ *
+ * A parent object, defined in the shared header file, contains the
+ * auxiliary_device. It also contains a pointer to the shared object(s), which
+ * also is defined in the shared header. Both the parent object and the shared
+ * object(s) are allocated by the registering driver. This layout allows the
+ * auxiliary_driver's registering module to perform a container_of() call to go
+ * from the pointer to the auxiliary_device, that is passed during the call to
+ * the auxiliary_driver's probe function, up to the parent object, and then
+ * have access to the shared object(s).
+ *
+ * The memory for the shared object(s) must have a lifespan equal to, or
+ * greater than, the lifespan of the memory for the auxiliary_device. The
+ * auxiliary_driver should only consider that the shared object is valid as
+ * long as the auxiliary_device is still registered on the auxiliary bus. It
+ * is up to the registering driver to manage (e.g. free or keep available) the
+ * memory for the shared object beyond the life of the auxiliary_device.
+ *
+ * The registering driver must unregister all auxiliary devices before its own
+ * driver.remove() is completed. An easy way to ensure this is to use the
+ * devm_add_action_or_reset() call to register a function against the parent
+ * device which unregisters the auxiliary device object(s).
+ *
+ * Finally, any operations which operate on the auxiliary devices must continue
+ * to function (if only to return an error) after the registering driver
+ * unregisters the auxiliary device.
+ */
+
+/**
+ * struct auxiliary_device - auxiliary device object.
+ * @dev: Device,
+ * The release and parent fields of the device structure must be filled
+ * in
+ * @name: Match name found by the auxiliary device driver,
+ * @id: unique identitier if multiple devices of the same name are exported,
+ * @sysfs: embedded struct which hold all sysfs related fields,
+ * @sysfs.irqs: irqs xarray contains irq indices which are used by the device,
+ * @sysfs.lock: Synchronize irq sysfs creation,
+ * @sysfs.irq_dir_exists: whether "irqs" directory exists,
+ *
+ * An auxiliary_device represents a part of its parent device's functionality.
+ * It is given a name that, combined with the registering drivers
+ * KBUILD_MODNAME, creates a match_name that is used for driver binding, and an
+ * id that combined with the match_name provide a unique name to register with
+ * the bus subsystem. For example, a driver registering an auxiliary device is
+ * named 'foo_mod.ko' and the subdevice is named 'foo_dev'. The match name is
+ * therefore 'foo_mod.foo_dev'.
+ *
+ * Registering an auxiliary_device is a three-step process.
+ *
+ * First, a 'struct auxiliary_device' needs to be defined or allocated for each
+ * sub-device desired. The name, id, dev.release, and dev.parent fields of
+ * this structure must be filled in as follows.
+ *
+ * The 'name' field is to be given a name that is recognized by the auxiliary
+ * driver. If two auxiliary_devices with the same match_name, eg
+ * "foo_mod.foo_dev", are registered onto the bus, they must have unique id
+ * values (e.g. "x" and "y") so that the registered devices names are
+ * "foo_mod.foo_dev.x" and "foo_mod.foo_dev.y". If match_name + id are not
+ * unique, then the device_add fails and generates an error message.
+ *
+ * The auxiliary_device.dev.type.release or auxiliary_device.dev.release must
+ * be populated with a non-NULL pointer to successfully register the
+ * auxiliary_device. This release call is where resources associated with the
+ * auxiliary device must be free'ed. Because once the device is placed on the
+ * bus the parent driver can not tell what other code may have a reference to
+ * this data.
+ *
+ * The auxiliary_device.dev.parent should be set. Typically to the registering
+ * drivers device.
+ *
+ * Second, call auxiliary_device_init(), which checks several aspects of the
+ * auxiliary_device struct and performs a device_initialize(). After this step
+ * completes, any error state must have a call to auxiliary_device_uninit() in
+ * its resolution path.
+ *
+ * The third and final step in registering an auxiliary_device is to perform a
+ * call to auxiliary_device_add(), which sets the name of the device and adds
+ * the device to the bus.
+ *
+ * .. code-block:: c
+ *
+ * #define MY_DEVICE_NAME "foo_dev"
+ *
+ * ...
+ *
+ * struct auxiliary_device *my_aux_dev = my_aux_dev_alloc(xxx);
+ *
+ * // Step 1:
+ * my_aux_dev->name = MY_DEVICE_NAME;
+ * my_aux_dev->id = my_unique_id_alloc(xxx);
+ * my_aux_dev->dev.release = my_aux_dev_release;
+ * my_aux_dev->dev.parent = my_dev;
+ *
+ * // Step 2:
+ * if (auxiliary_device_init(my_aux_dev))
+ * goto fail;
+ *
+ * // Step 3:
+ * if (auxiliary_device_add(my_aux_dev)) {
+ * auxiliary_device_uninit(my_aux_dev);
+ * goto fail;
+ * }
+ *
+ * ...
+ *
+ *
+ * Unregistering an auxiliary_device is a two-step process to mirror the
+ * register process. First call auxiliary_device_delete(), then call
+ * auxiliary_device_uninit().
+ *
+ * .. code-block:: c
+ *
+ * auxiliary_device_delete(my_dev->my_aux_dev);
+ * auxiliary_device_uninit(my_dev->my_aux_dev);
+ */
+struct auxiliary_device {
+ struct device dev;
+ const char *name;
+ u32 id;
+ struct {
+ struct xarray irqs;
+ struct mutex lock; /* Synchronize irq sysfs creation */
+ bool irq_dir_exists;
+ } sysfs;
+};
+
+/**
+ * struct auxiliary_driver - Definition of an auxiliary bus driver
+ * @probe: Called when a matching device is added to the bus.
+ * @remove: Called when device is removed from the bus.
+ * @shutdown: Called at shut-down time to quiesce the device.
+ * @suspend: Called to put the device to sleep mode. Usually to a power state.
+ * @resume: Called to bring a device from sleep mode.
+ * @name: Driver name.
+ * @driver: Core driver structure.
+ * @id_table: Table of devices this driver should match on the bus.
+ *
+ * Auxiliary drivers follow the standard driver model convention, where
+ * discovery/enumeration is handled by the core, and drivers provide probe()
+ * and remove() methods. They support power management and shutdown
+ * notifications using the standard conventions.
+ *
+ * Auxiliary drivers register themselves with the bus by calling
+ * auxiliary_driver_register(). The id_table contains the match_names of
+ * auxiliary devices that a driver can bind with.
+ *
+ * .. code-block:: c
+ *
+ * static const struct auxiliary_device_id my_auxiliary_id_table[] = {
+ * { .name = "foo_mod.foo_dev" },
+ * {},
+ * };
+ *
+ * MODULE_DEVICE_TABLE(auxiliary, my_auxiliary_id_table);
+ *
+ * struct auxiliary_driver my_drv = {
+ * .name = "myauxiliarydrv",
+ * .id_table = my_auxiliary_id_table,
+ * .probe = my_drv_probe,
+ * .remove = my_drv_remove
+ * };
+ */
+struct auxiliary_driver {
+ int (*probe)(struct auxiliary_device *auxdev, const struct auxiliary_device_id *id);
+ void (*remove)(struct auxiliary_device *auxdev);
+ void (*shutdown)(struct auxiliary_device *auxdev);
+ int (*suspend)(struct auxiliary_device *auxdev, pm_message_t state);
+ int (*resume)(struct auxiliary_device *auxdev);
+ const char *name;
+ struct device_driver driver;
+ const struct auxiliary_device_id *id_table;
+};
+
+static inline void *auxiliary_get_drvdata(struct auxiliary_device *auxdev)
+{
+ return dev_get_drvdata(&auxdev->dev);
+}
+
+static inline void auxiliary_set_drvdata(struct auxiliary_device *auxdev, void *data)
+{
+ dev_set_drvdata(&auxdev->dev, data);
+}
+
+static inline struct auxiliary_device *to_auxiliary_dev(struct device *dev)
+{
+ return container_of(dev, struct auxiliary_device, dev);
+}
+
+static inline const struct auxiliary_driver *to_auxiliary_drv(const struct device_driver *drv)
+{
+ return container_of(drv, struct auxiliary_driver, driver);
+}
+
+int auxiliary_device_init(struct auxiliary_device *auxdev);
+int __auxiliary_device_add(struct auxiliary_device *auxdev, const char *modname);
+#define auxiliary_device_add(auxdev) __auxiliary_device_add(auxdev, KBUILD_MODNAME)
+
+#ifdef CONFIG_SYSFS
+int auxiliary_device_sysfs_irq_add(struct auxiliary_device *auxdev, int irq);
+void auxiliary_device_sysfs_irq_remove(struct auxiliary_device *auxdev,
+ int irq);
+#else /* CONFIG_SYSFS */
+static inline int
+auxiliary_device_sysfs_irq_add(struct auxiliary_device *auxdev, int irq)
+{
+ return 0;
+}
+
+static inline void
+auxiliary_device_sysfs_irq_remove(struct auxiliary_device *auxdev, int irq) {}
+#endif
+
+static inline void auxiliary_device_uninit(struct auxiliary_device *auxdev)
+{
+ mutex_destroy(&auxdev->sysfs.lock);
+ put_device(&auxdev->dev);
+}
+
+static inline void auxiliary_device_delete(struct auxiliary_device *auxdev)
+{
+ device_del(&auxdev->dev);
+}
+
+int __auxiliary_driver_register(struct auxiliary_driver *auxdrv, struct module *owner,
+ const char *modname);
+#define auxiliary_driver_register(auxdrv) \
+ __auxiliary_driver_register(auxdrv, THIS_MODULE, KBUILD_MODNAME)
+
+void auxiliary_driver_unregister(struct auxiliary_driver *auxdrv);
+
+struct auxiliary_device *auxiliary_device_create(struct device *dev,
+ const char *modname,
+ const char *devname,
+ void *platform_data,
+ int id);
+void auxiliary_device_destroy(void *auxdev);
+
+struct auxiliary_device *__devm_auxiliary_device_create(struct device *dev,
+ const char *modname,
+ const char *devname,
+ void *platform_data,
+ int id);
+
+#define devm_auxiliary_device_create(dev, devname, platform_data) \
+ __devm_auxiliary_device_create(dev, KBUILD_MODNAME, devname, \
+ platform_data, 0)
+
+/**
+ * module_auxiliary_driver() - Helper macro for registering an auxiliary driver
+ * @__auxiliary_driver: auxiliary driver struct
+ *
+ * Helper macro for auxiliary drivers which do not do anything special in
+ * module init/exit. This eliminates a lot of boilerplate. Each module may only
+ * use this macro once, and calling it replaces module_init() and module_exit()
+ *
+ * .. code-block:: c
+ *
+ * module_auxiliary_driver(my_drv);
+ */
+#define module_auxiliary_driver(__auxiliary_driver) \
+ module_driver(__auxiliary_driver, auxiliary_driver_register, auxiliary_driver_unregister)
+
+#endif /* _AUXILIARY_BUS_H_ */
diff --git a/include/linux/auxvec.h b/include/linux/auxvec.h
index 3e0fbe441763..407f7005e6d6 100644
--- a/include/linux/auxvec.h
+++ b/include/linux/auxvec.h
@@ -1,8 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_AUXVEC_H
#define _LINUX_AUXVEC_H
#include <uapi/linux/auxvec.h>
-#define AT_VECTOR_SIZE_BASE 20 /* NEW_AUX_ENT entries in auxiliary table */
+#define AT_VECTOR_SIZE_BASE 22 /* NEW_AUX_ENT entries in auxiliary table */
/* number of "#define AT_.*" above, minus {AT_NULL, AT_IGNORE, AT_NOTELF} */
#endif /* _LINUX_AUXVEC_H */
diff --git a/include/linux/average.h b/include/linux/average.h
index 7ddaf340d2ac..a1a8f09631ce 100644
--- a/include/linux/average.h
+++ b/include/linux/average.h
@@ -1,6 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_AVERAGE_H
#define _LINUX_AVERAGE_H
+#include <linux/bug.h>
+#include <linux/compiler.h>
+#include <linux/log2.h>
+
/*
* Exponentially weighted moving average (EWMA)
*
@@ -48,7 +53,7 @@
static inline void ewma_##name##_add(struct ewma_##name *e, \
unsigned long val) \
{ \
- unsigned long internal = ACCESS_ONCE(e->internal); \
+ unsigned long internal = READ_ONCE(e->internal); \
unsigned long weight_rcp = ilog2(_weight_rcp); \
unsigned long precision = _precision; \
\
@@ -57,10 +62,10 @@
BUILD_BUG_ON((_precision) > 30); \
BUILD_BUG_ON_NOT_POWER_OF_2(_weight_rcp); \
\
- ACCESS_ONCE(e->internal) = internal ? \
+ WRITE_ONCE(e->internal, internal ? \
(((internal << weight_rcp) - internal) + \
(val << precision)) >> weight_rcp : \
- (val << precision); \
+ (val << precision)); \
}
#endif /* _LINUX_AVERAGE_H */
diff --git a/include/linux/avf/virtchnl.h b/include/linux/avf/virtchnl.h
index c893b9520a67..11bdab5522fd 100644
--- a/include/linux/avf/virtchnl.h
+++ b/include/linux/avf/virtchnl.h
@@ -1,35 +1,18 @@
-/*******************************************************************************
- *
- * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
- * Copyright(c) 2013 - 2014 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program. If not, see <http://www.gnu.org/licenses/>.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * Contact Information:
- * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- ******************************************************************************/
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (c) 2013-2022, Intel Corporation. */
#ifndef _VIRTCHNL_H_
#define _VIRTCHNL_H_
+#include <linux/bitops.h>
+#include <linux/bits.h>
+#include <linux/overflow.h>
+#include <uapi/linux/if_ether.h>
+
/* Description:
- * This header file describes the VF-PF communication protocol used
- * by the drivers for all devices starting from our 40G product line
+ * This header file describes the Virtual Function (VF) - Physical Function
+ * (PF) communication protocol used by the drivers for all devices starting
+ * from our 40G product line
*
* Admin queue buffer usage:
* desc->opcode is always aqc_opc_send_msg_to_pf
@@ -43,8 +26,8 @@
* have a maximum of sixteen queues for all of its VSIs.
*
* The PF is required to return a status code in v_retval for all messages
- * except RESET_VF, which does not require any response. The return value
- * is of status_code type, defined in the shared type.h.
+ * except RESET_VF, which does not require any response. The returned value
+ * is of virtchnl_status_code type, defined here.
*
* In general, VF driver initialization should roughly follow the order of
* these opcodes. The VF driver must first validate the API version of the
@@ -62,19 +45,27 @@
/* Error Codes */
enum virtchnl_status_code {
VIRTCHNL_STATUS_SUCCESS = 0,
- VIRTCHNL_ERR_PARAM = -5,
+ VIRTCHNL_STATUS_ERR_PARAM = -5,
+ VIRTCHNL_STATUS_ERR_NO_MEMORY = -18,
VIRTCHNL_STATUS_ERR_OPCODE_MISMATCH = -38,
VIRTCHNL_STATUS_ERR_CQP_COMPL_ERROR = -39,
VIRTCHNL_STATUS_ERR_INVALID_VF_ID = -40,
- VIRTCHNL_STATUS_NOT_SUPPORTED = -64,
+ VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR = -53,
+ VIRTCHNL_STATUS_ERR_NOT_SUPPORTED = -64,
};
+/* Backward compatibility */
+#define VIRTCHNL_ERR_PARAM VIRTCHNL_STATUS_ERR_PARAM
+#define VIRTCHNL_STATUS_NOT_SUPPORTED VIRTCHNL_STATUS_ERR_NOT_SUPPORTED
+
+#define VIRTCHNL_LINK_SPEED_2_5GB_SHIFT 0x0
#define VIRTCHNL_LINK_SPEED_100MB_SHIFT 0x1
#define VIRTCHNL_LINK_SPEED_1000MB_SHIFT 0x2
#define VIRTCHNL_LINK_SPEED_10GB_SHIFT 0x3
#define VIRTCHNL_LINK_SPEED_40GB_SHIFT 0x4
#define VIRTCHNL_LINK_SPEED_20GB_SHIFT 0x5
#define VIRTCHNL_LINK_SPEED_25GB_SHIFT 0x6
+#define VIRTCHNL_LINK_SPEED_5GB_SHIFT 0x7
enum virtchnl_link_speed {
VIRTCHNL_LINK_SPEED_UNKNOWN = 0,
@@ -84,6 +75,8 @@ enum virtchnl_link_speed {
VIRTCHNL_LINK_SPEED_40GB = BIT(VIRTCHNL_LINK_SPEED_40GB_SHIFT),
VIRTCHNL_LINK_SPEED_20GB = BIT(VIRTCHNL_LINK_SPEED_20GB_SHIFT),
VIRTCHNL_LINK_SPEED_25GB = BIT(VIRTCHNL_LINK_SPEED_25GB_SHIFT),
+ VIRTCHNL_LINK_SPEED_2_5GB = BIT(VIRTCHNL_LINK_SPEED_2_5GB_SHIFT),
+ VIRTCHNL_LINK_SPEED_5GB = BIT(VIRTCHNL_LINK_SPEED_5GB_SHIFT),
};
/* for hsplit_0 field of Rx HMC context */
@@ -96,6 +89,9 @@ enum virtchnl_rx_hsplit {
VIRTCHNL_RX_HSPLIT_SPLIT_SCTP = 8,
};
+enum virtchnl_bw_limit_type {
+ VIRTCHNL_BW_SHAPER = 0,
+};
/* END GENERIC DEFINES */
/* Opcodes for VF-PF communication. These are placed in the v_opcode field
@@ -126,37 +122,60 @@ enum virtchnl_ops {
VIRTCHNL_OP_GET_STATS = 15,
VIRTCHNL_OP_RSVD = 16,
VIRTCHNL_OP_EVENT = 17, /* must ALWAYS be 17 */
+ VIRTCHNL_OP_CONFIG_RSS_HFUNC = 18,
+ /* opcode 19 is reserved */
VIRTCHNL_OP_IWARP = 20, /* advanced opcode */
+ VIRTCHNL_OP_RDMA = VIRTCHNL_OP_IWARP,
VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP = 21, /* advanced opcode */
+ VIRTCHNL_OP_CONFIG_RDMA_IRQ_MAP = VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP,
VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP = 22, /* advanced opcode */
+ VIRTCHNL_OP_RELEASE_RDMA_IRQ_MAP = VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP,
VIRTCHNL_OP_CONFIG_RSS_KEY = 23,
VIRTCHNL_OP_CONFIG_RSS_LUT = 24,
- VIRTCHNL_OP_GET_RSS_HENA_CAPS = 25,
- VIRTCHNL_OP_SET_RSS_HENA = 26,
+ VIRTCHNL_OP_GET_RSS_HASHCFG_CAPS = 25,
+ VIRTCHNL_OP_SET_RSS_HASHCFG = 26,
+ VIRTCHNL_OP_ENABLE_VLAN_STRIPPING = 27,
+ VIRTCHNL_OP_DISABLE_VLAN_STRIPPING = 28,
+ VIRTCHNL_OP_REQUEST_QUEUES = 29,
+ VIRTCHNL_OP_ENABLE_CHANNELS = 30,
+ VIRTCHNL_OP_DISABLE_CHANNELS = 31,
+ VIRTCHNL_OP_ADD_CLOUD_FILTER = 32,
+ VIRTCHNL_OP_DEL_CLOUD_FILTER = 33,
+ /* opcode 34 - 43 are reserved */
+ VIRTCHNL_OP_GET_SUPPORTED_RXDIDS = 44,
+ VIRTCHNL_OP_ADD_RSS_CFG = 45,
+ VIRTCHNL_OP_DEL_RSS_CFG = 46,
+ VIRTCHNL_OP_ADD_FDIR_FILTER = 47,
+ VIRTCHNL_OP_DEL_FDIR_FILTER = 48,
+ VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS = 51,
+ VIRTCHNL_OP_ADD_VLAN_V2 = 52,
+ VIRTCHNL_OP_DEL_VLAN_V2 = 53,
+ VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2 = 54,
+ VIRTCHNL_OP_DISABLE_VLAN_STRIPPING_V2 = 55,
+ VIRTCHNL_OP_ENABLE_VLAN_INSERTION_V2 = 56,
+ VIRTCHNL_OP_DISABLE_VLAN_INSERTION_V2 = 57,
+ /* opcode 58 and 59 are reserved */
+ VIRTCHNL_OP_1588_PTP_GET_CAPS = 60,
+ VIRTCHNL_OP_1588_PTP_GET_TIME = 61,
+ /* opcode 62 - 65 are reserved */
+ VIRTCHNL_OP_GET_QOS_CAPS = 66,
+ /* opcode 68 through 111 are reserved */
+ VIRTCHNL_OP_CONFIG_QUEUE_BW = 112,
+ VIRTCHNL_OP_CONFIG_QUANTA = 113,
+ VIRTCHNL_OP_MAX,
};
-/* This macro is used to generate a compilation error if a structure
+/* These macros are used to generate compilation errors if a structure/union
* is not exactly the correct length. It gives a divide by zero error if the
- * structure is not of the correct size, otherwise it creates an enum that is
- * never used.
+ * structure/union is not of the correct size, otherwise it creates an enum
+ * that is never used.
*/
#define VIRTCHNL_CHECK_STRUCT_LEN(n, X) enum virtchnl_static_assert_enum_##X \
{ virtchnl_static_assert_##X = (n)/((sizeof(struct X) == (n)) ? 1 : 0) }
+#define VIRTCHNL_CHECK_UNION_LEN(n, X) enum virtchnl_static_asset_enum_##X \
+ { virtchnl_static_assert_##X = (n)/((sizeof(union X) == (n)) ? 1 : 0) }
-/* Virtual channel message descriptor. This overlays the admin queue
- * descriptor. All other data is passed in external buffers.
- */
-
-struct virtchnl_msg {
- u8 pad[8]; /* AQ flags/opcode/len/retval fields */
- enum virtchnl_ops v_opcode; /* avoid confusion with desc->opcode */
- enum virtchnl_status_code v_retval; /* ditto for desc->retval */
- u32 vfid; /* used by PF when sending to VF */
-};
-
-VIRTCHNL_CHECK_STRUCT_LEN(20, virtchnl_msg);
-
-/* Message descriptions and data structures.*/
+/* Message descriptions and data structures. */
/* VIRTCHNL_OP_VERSION
* VF posts its version number to the PF. PF responds with its version number
@@ -216,30 +235,45 @@ enum virtchnl_vsi_type {
struct virtchnl_vsi_resource {
u16 vsi_id;
u16 num_queue_pairs;
- enum virtchnl_vsi_type vsi_type;
+
+ /* see enum virtchnl_vsi_type */
+ s32 vsi_type;
u16 qset_handle;
u8 default_mac_addr[ETH_ALEN];
};
VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_vsi_resource);
-/* VF offload flags
+/* VF capability flags
* VIRTCHNL_VF_OFFLOAD_L2 flag is inclusive of base mode L2 offloads including
* TX/RX Checksum offloading and TSO for non-tunnelled packets.
*/
-#define VIRTCHNL_VF_OFFLOAD_L2 0x00000001
-#define VIRTCHNL_VF_OFFLOAD_IWARP 0x00000002
-#define VIRTCHNL_VF_OFFLOAD_RSVD 0x00000004
-#define VIRTCHNL_VF_OFFLOAD_RSS_AQ 0x00000008
-#define VIRTCHNL_VF_OFFLOAD_RSS_REG 0x00000010
-#define VIRTCHNL_VF_OFFLOAD_WB_ON_ITR 0x00000020
-#define VIRTCHNL_VF_OFFLOAD_VLAN 0x00010000
-#define VIRTCHNL_VF_OFFLOAD_RX_POLLING 0x00020000
-#define VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2 0x00040000
-#define VIRTCHNL_VF_OFFLOAD_RSS_PF 0X00080000
-#define VIRTCHNL_VF_OFFLOAD_ENCAP 0X00100000
-#define VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM 0X00200000
-#define VIRTCHNL_VF_OFFLOAD_RX_ENCAP_CSUM 0X00400000
+#define VIRTCHNL_VF_OFFLOAD_L2 BIT(0)
+#define VIRTCHNL_VF_OFFLOAD_RDMA BIT(1)
+#define VIRTCHNL_VF_CAP_RDMA VIRTCHNL_VF_OFFLOAD_RDMA
+#define VIRTCHNL_VF_OFFLOAD_RSS_AQ BIT(3)
+#define VIRTCHNL_VF_OFFLOAD_RSS_REG BIT(4)
+#define VIRTCHNL_VF_OFFLOAD_WB_ON_ITR BIT(5)
+#define VIRTCHNL_VF_OFFLOAD_REQ_QUEUES BIT(6)
+/* used to negotiate communicating link speeds in Mbps */
+#define VIRTCHNL_VF_CAP_ADV_LINK_SPEED BIT(7)
+#define VIRTCHNL_VF_OFFLOAD_CRC BIT(10)
+#define VIRTCHNL_VF_OFFLOAD_TC_U32 BIT(11)
+#define VIRTCHNL_VF_OFFLOAD_VLAN_V2 BIT(15)
+#define VIRTCHNL_VF_OFFLOAD_VLAN BIT(16)
+#define VIRTCHNL_VF_OFFLOAD_RX_POLLING BIT(17)
+#define VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2 BIT(18)
+#define VIRTCHNL_VF_OFFLOAD_RSS_PF BIT(19)
+#define VIRTCHNL_VF_OFFLOAD_ENCAP BIT(20)
+#define VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM BIT(21)
+#define VIRTCHNL_VF_OFFLOAD_RX_ENCAP_CSUM BIT(22)
+#define VIRTCHNL_VF_OFFLOAD_ADQ BIT(23)
+#define VIRTCHNL_VF_OFFLOAD_USO BIT(25)
+#define VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC BIT(26)
+#define VIRTCHNL_VF_OFFLOAD_ADV_RSS_PF BIT(27)
+#define VIRTCHNL_VF_OFFLOAD_FDIR_PF BIT(28)
+#define VIRTCHNL_VF_OFFLOAD_QOS BIT(29)
+#define VIRTCHNL_VF_CAP_PTP BIT(31)
#define VF_BASE_MODE_OFFLOADS (VIRTCHNL_VF_OFFLOAD_L2 | \
VIRTCHNL_VF_OFFLOAD_VLAN | \
@@ -251,14 +285,15 @@ struct virtchnl_vf_resource {
u16 max_vectors;
u16 max_mtu;
- u32 vf_offload_flags;
+ u32 vf_cap_flags;
u32 rss_key_size;
u32 rss_lut_size;
- struct virtchnl_vsi_resource vsi_res[1];
+ struct virtchnl_vsi_resource vsi_res[];
};
-VIRTCHNL_CHECK_STRUCT_LEN(36, virtchnl_vf_resource);
+VIRTCHNL_CHECK_STRUCT_LEN(20, virtchnl_vf_resource);
+#define virtchnl_vf_resource_LEGACY_SIZEOF 36
/* VIRTCHNL_OP_CONFIG_TX_QUEUE
* VF sends this message to set up parameters for one TX queue.
@@ -278,10 +313,70 @@ struct virtchnl_txq_info {
VIRTCHNL_CHECK_STRUCT_LEN(24, virtchnl_txq_info);
+/* RX descriptor IDs (range from 0 to 63) */
+enum virtchnl_rx_desc_ids {
+ VIRTCHNL_RXDID_0_16B_BASE = 0,
+ VIRTCHNL_RXDID_1_32B_BASE = 1,
+ VIRTCHNL_RXDID_2_FLEX_SQ_NIC = 2,
+ VIRTCHNL_RXDID_3_FLEX_SQ_SW = 3,
+ VIRTCHNL_RXDID_4_FLEX_SQ_NIC_VEB = 4,
+ VIRTCHNL_RXDID_5_FLEX_SQ_NIC_ACL = 5,
+ VIRTCHNL_RXDID_6_FLEX_SQ_NIC_2 = 6,
+ VIRTCHNL_RXDID_7_HW_RSVD = 7,
+ /* 8 through 15 are reserved */
+ VIRTCHNL_RXDID_16_COMMS_GENERIC = 16,
+ VIRTCHNL_RXDID_17_COMMS_AUX_VLAN = 17,
+ VIRTCHNL_RXDID_18_COMMS_AUX_IPV4 = 18,
+ VIRTCHNL_RXDID_19_COMMS_AUX_IPV6 = 19,
+ VIRTCHNL_RXDID_20_COMMS_AUX_FLOW = 20,
+ VIRTCHNL_RXDID_21_COMMS_AUX_TCP = 21,
+ /* 22 through 63 are reserved */
+};
+
+#define VIRTCHNL_RXDID_BIT(x) BIT_ULL(VIRTCHNL_RXDID_##x)
+
+/* RX descriptor ID bitmasks */
+enum virtchnl_rx_desc_id_bitmasks {
+ VIRTCHNL_RXDID_0_16B_BASE_M = VIRTCHNL_RXDID_BIT(0_16B_BASE),
+ VIRTCHNL_RXDID_1_32B_BASE_M = VIRTCHNL_RXDID_BIT(1_32B_BASE),
+ VIRTCHNL_RXDID_2_FLEX_SQ_NIC_M = VIRTCHNL_RXDID_BIT(2_FLEX_SQ_NIC),
+ VIRTCHNL_RXDID_3_FLEX_SQ_SW_M = VIRTCHNL_RXDID_BIT(3_FLEX_SQ_SW),
+ VIRTCHNL_RXDID_4_FLEX_SQ_NIC_VEB_M = VIRTCHNL_RXDID_BIT(4_FLEX_SQ_NIC_VEB),
+ VIRTCHNL_RXDID_5_FLEX_SQ_NIC_ACL_M = VIRTCHNL_RXDID_BIT(5_FLEX_SQ_NIC_ACL),
+ VIRTCHNL_RXDID_6_FLEX_SQ_NIC_2_M = VIRTCHNL_RXDID_BIT(6_FLEX_SQ_NIC_2),
+ VIRTCHNL_RXDID_7_HW_RSVD_M = VIRTCHNL_RXDID_BIT(7_HW_RSVD),
+ /* 8 through 15 are reserved */
+ VIRTCHNL_RXDID_16_COMMS_GENERIC_M = VIRTCHNL_RXDID_BIT(16_COMMS_GENERIC),
+ VIRTCHNL_RXDID_17_COMMS_AUX_VLAN_M = VIRTCHNL_RXDID_BIT(17_COMMS_AUX_VLAN),
+ VIRTCHNL_RXDID_18_COMMS_AUX_IPV4_M = VIRTCHNL_RXDID_BIT(18_COMMS_AUX_IPV4),
+ VIRTCHNL_RXDID_19_COMMS_AUX_IPV6_M = VIRTCHNL_RXDID_BIT(19_COMMS_AUX_IPV6),
+ VIRTCHNL_RXDID_20_COMMS_AUX_FLOW_M = VIRTCHNL_RXDID_BIT(20_COMMS_AUX_FLOW),
+ VIRTCHNL_RXDID_21_COMMS_AUX_TCP_M = VIRTCHNL_RXDID_BIT(21_COMMS_AUX_TCP),
+ /* 22 through 63 are reserved */
+};
+
+/* virtchnl_rxq_info_flags - definition of bits in the flags field of the
+ * virtchnl_rxq_info structure.
+ *
+ * @VIRTCHNL_PTP_RX_TSTAMP: request to enable Rx timestamping
+ *
+ * Other flag bits are currently reserved and they may be extended in the
+ * future.
+ */
+enum virtchnl_rxq_info_flags {
+ VIRTCHNL_PTP_RX_TSTAMP = BIT(0),
+};
+
/* VIRTCHNL_OP_CONFIG_RX_QUEUE
* VF sends this message to set up parameters for one RX queue.
* External data buffer contains one instance of virtchnl_rxq_info.
- * PF configures requested queue and returns a status code.
+ * PF configures requested queue and returns a status code. The
+ * crc_disable flag disables CRC stripping on the VF. Setting
+ * the crc_disable flag to 1 will disable CRC stripping for each
+ * queue in the VF where the flag is set. The VIRTCHNL_VF_OFFLOAD_CRC
+ * offload must have been set prior to sending this info or the PF
+ * will ignore the request. This flag should be set the same for
+ * all of the queues for a VF.
*/
/* Rx queue config info */
@@ -293,9 +388,19 @@ struct virtchnl_rxq_info {
u16 splithdr_enabled; /* deprecated with AVF 1.0 */
u32 databuffer_size;
u32 max_pkt_size;
- u32 pad1;
+ u8 crc_disable;
+ /* see enum virtchnl_rx_desc_ids;
+ * only used when VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC is supported. Note
+ * that when the offload is not supported, the descriptor format aligns
+ * with VIRTCHNL_RXDID_1_32B_BASE.
+ */
+ enum virtchnl_rx_desc_ids rxdid:8;
+ enum virtchnl_rxq_info_flags flags:8; /* see virtchnl_rxq_info_flags */
+ u8 pad1;
u64 dma_ring_addr;
- enum virtchnl_rx_hsplit rx_split_pos; /* deprecated with AVF 1.0 */
+
+ /* see enum virtchnl_rx_hsplit; deprecated with AVF 1.0 */
+ s32 rx_split_pos;
u32 pad2;
};
@@ -307,6 +412,9 @@ VIRTCHNL_CHECK_STRUCT_LEN(40, virtchnl_rxq_info);
* PF configures queues and returns status.
* If the number of queues specified is greater than the number of queues
* associated with the VSI, an error is returned and no queues are configured.
+ * NOTE: The VF is not required to configure all queues in a single request.
+ * It may send multiple messages. PF drivers must correctly handle all VF
+ * requests.
*/
struct virtchnl_queue_pair_info {
/* NOTE: vsi_id and queue_id should be identical for both queues. */
@@ -320,17 +428,38 @@ struct virtchnl_vsi_queue_config_info {
u16 vsi_id;
u16 num_queue_pairs;
u32 pad;
- struct virtchnl_queue_pair_info qpair[1];
+ struct virtchnl_queue_pair_info qpair[];
};
-VIRTCHNL_CHECK_STRUCT_LEN(72, virtchnl_vsi_queue_config_info);
+VIRTCHNL_CHECK_STRUCT_LEN(8, virtchnl_vsi_queue_config_info);
+#define virtchnl_vsi_queue_config_info_LEGACY_SIZEOF 72
+
+/* VIRTCHNL_OP_REQUEST_QUEUES
+ * VF sends this message to request the PF to allocate additional queues to
+ * this VF. Each VF gets a guaranteed number of queues on init but asking for
+ * additional queues must be negotiated. This is a best effort request as it
+ * is possible the PF does not have enough queues left to support the request.
+ * If the PF cannot support the number requested it will respond with the
+ * maximum number it is able to support. If the request is successful, PF will
+ * then reset the VF to institute required changes.
+ */
+
+/* VF resource request */
+struct virtchnl_vf_res_request {
+ u16 num_queue_pairs;
+};
/* VIRTCHNL_OP_CONFIG_IRQ_MAP
* VF uses this message to map vectors to queues.
* The rxq_map and txq_map fields are bitmaps used to indicate which queues
* are to be associated with the specified vector.
- * The "other" causes are always mapped to vector 0.
+ * The "other" causes are always mapped to vector 0. The VF may not request
+ * that vector 0 be used for traffic.
* PF configures interrupt mapping and returns status.
+ * NOTE: due to hardware requirements, all active queues (both TX and RX)
+ * should be mapped to interrupts, even if the driver intends to operate
+ * only in polling mode. In this case the interrupt may be disabled, but
+ * the ITR timer will still run to trigger writebacks.
*/
struct virtchnl_vector_map {
u16 vsi_id;
@@ -345,10 +474,11 @@ VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_vector_map);
struct virtchnl_irq_map_info {
u16 num_vectors;
- struct virtchnl_vector_map vecmap[1];
+ struct virtchnl_vector_map vecmap[];
};
-VIRTCHNL_CHECK_STRUCT_LEN(14, virtchnl_irq_map_info);
+VIRTCHNL_CHECK_STRUCT_LEN(2, virtchnl_irq_map_info);
+#define virtchnl_irq_map_info_LEGACY_SIZEOF 14
/* VIRTCHNL_OP_ENABLE_QUEUES
* VIRTCHNL_OP_DISABLE_QUEUES
@@ -357,6 +487,9 @@ VIRTCHNL_CHECK_STRUCT_LEN(14, virtchnl_irq_map_info);
* (Currently, we only support 16 queues per VF, but we make the field
* u32 to allow for expansion.)
* PF performs requested action and returns status.
+ * NOTE: The VF is not required to enable/disable all queues in a single
+ * request. It may send multiple messages.
+ * PF drivers must correctly handle all VF requests.
*/
struct virtchnl_queue_select {
u16 vsi_id;
@@ -379,9 +512,36 @@ VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_queue_select);
* PF removes the filters and returns status.
*/
+/* VIRTCHNL_ETHER_ADDR_LEGACY
+ * Prior to adding the @type member to virtchnl_ether_addr, there were 2 pad
+ * bytes. Moving forward all VF drivers should not set type to
+ * VIRTCHNL_ETHER_ADDR_LEGACY. This is only here to not break previous/legacy
+ * behavior. The control plane function (i.e. PF) can use a best effort method
+ * of tracking the primary/device unicast in this case, but there is no
+ * guarantee and functionality depends on the implementation of the PF.
+ */
+
+/* VIRTCHNL_ETHER_ADDR_PRIMARY
+ * All VF drivers should set @type to VIRTCHNL_ETHER_ADDR_PRIMARY for the
+ * primary/device unicast MAC address filter for VIRTCHNL_OP_ADD_ETH_ADDR and
+ * VIRTCHNL_OP_DEL_ETH_ADDR. This allows for the underlying control plane
+ * function (i.e. PF) to accurately track and use this MAC address for
+ * displaying on the host and for VM/function reset.
+ */
+
+/* VIRTCHNL_ETHER_ADDR_EXTRA
+ * All VF drivers should set @type to VIRTCHNL_ETHER_ADDR_EXTRA for any extra
+ * unicast and/or multicast filters that are being added/deleted via
+ * VIRTCHNL_OP_DEL_ETH_ADDR/VIRTCHNL_OP_ADD_ETH_ADDR respectively.
+ */
struct virtchnl_ether_addr {
u8 addr[ETH_ALEN];
- u8 pad[2];
+ u8 type;
+#define VIRTCHNL_ETHER_ADDR_LEGACY 0
+#define VIRTCHNL_ETHER_ADDR_PRIMARY 1
+#define VIRTCHNL_ETHER_ADDR_EXTRA 2
+#define VIRTCHNL_ETHER_ADDR_TYPE_MASK 3 /* first two bits of type are valid */
+ u8 pad;
};
VIRTCHNL_CHECK_STRUCT_LEN(8, virtchnl_ether_addr);
@@ -389,10 +549,11 @@ VIRTCHNL_CHECK_STRUCT_LEN(8, virtchnl_ether_addr);
struct virtchnl_ether_addr_list {
u16 vsi_id;
u16 num_elements;
- struct virtchnl_ether_addr list[1];
+ struct virtchnl_ether_addr list[];
};
-VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_ether_addr_list);
+VIRTCHNL_CHECK_STRUCT_LEN(4, virtchnl_ether_addr_list);
+#define virtchnl_ether_addr_list_LEGACY_SIZEOF 12
/* VIRTCHNL_OP_ADD_VLAN
* VF sends this message to add one or more VLAN tag filters for receives.
@@ -411,10 +572,357 @@ VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_ether_addr_list);
struct virtchnl_vlan_filter_list {
u16 vsi_id;
u16 num_elements;
- u16 vlan_id[1];
+ u16 vlan_id[];
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(4, virtchnl_vlan_filter_list);
+#define virtchnl_vlan_filter_list_LEGACY_SIZEOF 6
+
+/* This enum is used for all of the VIRTCHNL_VF_OFFLOAD_VLAN_V2_CAPS related
+ * structures and opcodes.
+ *
+ * VIRTCHNL_VLAN_UNSUPPORTED - This field is not supported and if a VF driver
+ * populates it the PF should return VIRTCHNL_STATUS_ERR_NOT_SUPPORTED.
+ *
+ * VIRTCHNL_VLAN_ETHERTYPE_8100 - This field supports 0x8100 ethertype.
+ * VIRTCHNL_VLAN_ETHERTYPE_88A8 - This field supports 0x88A8 ethertype.
+ * VIRTCHNL_VLAN_ETHERTYPE_9100 - This field supports 0x9100 ethertype.
+ *
+ * VIRTCHNL_VLAN_ETHERTYPE_AND - Used when multiple ethertypes can be supported
+ * by the PF concurrently. For example, if the PF can support
+ * VIRTCHNL_VLAN_ETHERTYPE_8100 AND VIRTCHNL_VLAN_ETHERTYPE_88A8 filters it
+ * would OR the following bits:
+ *
+ * VIRTHCNL_VLAN_ETHERTYPE_8100 |
+ * VIRTCHNL_VLAN_ETHERTYPE_88A8 |
+ * VIRTCHNL_VLAN_ETHERTYPE_AND;
+ *
+ * The VF would interpret this as VLAN filtering can be supported on both 0x8100
+ * and 0x88A8 VLAN ethertypes.
+ *
+ * VIRTCHNL_ETHERTYPE_XOR - Used when only a single ethertype can be supported
+ * by the PF concurrently. For example if the PF can support
+ * VIRTCHNL_VLAN_ETHERTYPE_8100 XOR VIRTCHNL_VLAN_ETHERTYPE_88A8 stripping
+ * offload it would OR the following bits:
+ *
+ * VIRTCHNL_VLAN_ETHERTYPE_8100 |
+ * VIRTCHNL_VLAN_ETHERTYPE_88A8 |
+ * VIRTCHNL_VLAN_ETHERTYPE_XOR;
+ *
+ * The VF would interpret this as VLAN stripping can be supported on either
+ * 0x8100 or 0x88a8 VLAN ethertypes. So when requesting VLAN stripping via
+ * VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2 the specified ethertype will override
+ * the previously set value.
+ *
+ * VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1 - Used to tell the VF to insert and/or
+ * strip the VLAN tag using the L2TAG1 field of the Tx/Rx descriptors.
+ *
+ * VIRTCHNL_VLAN_TAG_LOCATION_L2TAG2 - Used to tell the VF to insert hardware
+ * offloaded VLAN tags using the L2TAG2 field of the Tx descriptor.
+ *
+ * VIRTCHNL_VLAN_TAG_LOCATION_L2TAG2 - Used to tell the VF to strip hardware
+ * offloaded VLAN tags using the L2TAG2_2 field of the Rx descriptor.
+ *
+ * VIRTCHNL_VLAN_PRIO - This field supports VLAN priority bits. This is used for
+ * VLAN filtering if the underlying PF supports it.
+ *
+ * VIRTCHNL_VLAN_TOGGLE_ALLOWED - This field is used to say whether a
+ * certain VLAN capability can be toggled. For example if the underlying PF/CP
+ * allows the VF to toggle VLAN filtering, stripping, and/or insertion it should
+ * set this bit along with the supported ethertypes.
+ */
+enum virtchnl_vlan_support {
+ VIRTCHNL_VLAN_UNSUPPORTED = 0,
+ VIRTCHNL_VLAN_ETHERTYPE_8100 = BIT(0),
+ VIRTCHNL_VLAN_ETHERTYPE_88A8 = BIT(1),
+ VIRTCHNL_VLAN_ETHERTYPE_9100 = BIT(2),
+ VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1 = BIT(8),
+ VIRTCHNL_VLAN_TAG_LOCATION_L2TAG2 = BIT(9),
+ VIRTCHNL_VLAN_TAG_LOCATION_L2TAG2_2 = BIT(10),
+ VIRTCHNL_VLAN_PRIO = BIT(24),
+ VIRTCHNL_VLAN_FILTER_MASK = BIT(28),
+ VIRTCHNL_VLAN_ETHERTYPE_AND = BIT(29),
+ VIRTCHNL_VLAN_ETHERTYPE_XOR = BIT(30),
+ VIRTCHNL_VLAN_TOGGLE = BIT(31),
+};
+
+/* This structure is used as part of the VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS
+ * for filtering, insertion, and stripping capabilities.
+ *
+ * If only outer capabilities are supported (for filtering, insertion, and/or
+ * stripping) then this refers to the outer most or single VLAN from the VF's
+ * perspective.
+ *
+ * If only inner capabilities are supported (for filtering, insertion, and/or
+ * stripping) then this refers to the outer most or single VLAN from the VF's
+ * perspective. Functionally this is the same as if only outer capabilities are
+ * supported. The VF driver is just forced to use the inner fields when
+ * adding/deleting filters and enabling/disabling offloads (if supported).
+ *
+ * If both outer and inner capabilities are supported (for filtering, insertion,
+ * and/or stripping) then outer refers to the outer most or single VLAN and
+ * inner refers to the second VLAN, if it exists, in the packet.
+ *
+ * There is no support for tunneled VLAN offloads, so outer or inner are never
+ * referring to a tunneled packet from the VF's perspective.
+ */
+struct virtchnl_vlan_supported_caps {
+ u32 outer;
+ u32 inner;
};
-VIRTCHNL_CHECK_STRUCT_LEN(6, virtchnl_vlan_filter_list);
+/* The PF populates these fields based on the supported VLAN filtering. If a
+ * field is VIRTCHNL_VLAN_UNSUPPORTED then it's not supported and the PF will
+ * reject any VIRTCHNL_OP_ADD_VLAN_V2 or VIRTCHNL_OP_DEL_VLAN_V2 messages using
+ * the unsupported fields.
+ *
+ * Also, a VF is only allowed to toggle its VLAN filtering setting if the
+ * VIRTCHNL_VLAN_TOGGLE bit is set.
+ *
+ * The ethertype(s) specified in the ethertype_init field are the ethertypes
+ * enabled for VLAN filtering. VLAN filtering in this case refers to the outer
+ * most VLAN from the VF's perspective. If both inner and outer filtering are
+ * allowed then ethertype_init only refers to the outer most VLAN as only
+ * VLAN ethertype supported for inner VLAN filtering is
+ * VIRTCHNL_VLAN_ETHERTYPE_8100. By default, inner VLAN filtering is disabled
+ * when both inner and outer filtering are allowed.
+ *
+ * The max_filters field tells the VF how many VLAN filters it's allowed to have
+ * at any one time. If it exceeds this amount and tries to add another filter,
+ * then the request will be rejected by the PF. To prevent failures, the VF
+ * should keep track of how many VLAN filters it has added and not attempt to
+ * add more than max_filters.
+ */
+struct virtchnl_vlan_filtering_caps {
+ struct virtchnl_vlan_supported_caps filtering_support;
+ u32 ethertype_init;
+ u16 max_filters;
+ u8 pad[2];
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_vlan_filtering_caps);
+
+/* This enum is used for the virtchnl_vlan_offload_caps structure to specify
+ * if the PF supports a different ethertype for stripping and insertion.
+ *
+ * VIRTCHNL_ETHERTYPE_STRIPPING_MATCHES_INSERTION - The ethertype(s) specified
+ * for stripping affect the ethertype(s) specified for insertion and visa versa
+ * as well. If the VF tries to configure VLAN stripping via
+ * VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2 with VIRTCHNL_VLAN_ETHERTYPE_8100 then
+ * that will be the ethertype for both stripping and insertion.
+ *
+ * VIRTCHNL_ETHERTYPE_MATCH_NOT_REQUIRED - The ethertype(s) specified for
+ * stripping do not affect the ethertype(s) specified for insertion and visa
+ * versa.
+ */
+enum virtchnl_vlan_ethertype_match {
+ VIRTCHNL_ETHERTYPE_STRIPPING_MATCHES_INSERTION = 0,
+ VIRTCHNL_ETHERTYPE_MATCH_NOT_REQUIRED = 1,
+};
+
+/* The PF populates these fields based on the supported VLAN offloads. If a
+ * field is VIRTCHNL_VLAN_UNSUPPORTED then it's not supported and the PF will
+ * reject any VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2 or
+ * VIRTCHNL_OP_DISABLE_VLAN_STRIPPING_V2 messages using the unsupported fields.
+ *
+ * Also, a VF is only allowed to toggle its VLAN offload setting if the
+ * VIRTCHNL_VLAN_TOGGLE_ALLOWED bit is set.
+ *
+ * The VF driver needs to be aware of how the tags are stripped by hardware and
+ * inserted by the VF driver based on the level of offload support. The PF will
+ * populate these fields based on where the VLAN tags are expected to be
+ * offloaded via the VIRTHCNL_VLAN_TAG_LOCATION_* bits. The VF will need to
+ * interpret these fields. See the definition of the
+ * VIRTCHNL_VLAN_TAG_LOCATION_* bits above the virtchnl_vlan_support
+ * enumeration.
+ */
+struct virtchnl_vlan_offload_caps {
+ struct virtchnl_vlan_supported_caps stripping_support;
+ struct virtchnl_vlan_supported_caps insertion_support;
+ u32 ethertype_init;
+ u8 ethertype_match;
+ u8 pad[3];
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(24, virtchnl_vlan_offload_caps);
+
+/* VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS
+ * VF sends this message to determine its VLAN capabilities.
+ *
+ * PF will mark which capabilities it supports based on hardware support and
+ * current configuration. For example, if a port VLAN is configured the PF will
+ * not allow outer VLAN filtering, stripping, or insertion to be configured so
+ * it will block these features from the VF.
+ *
+ * The VF will need to cross reference its capabilities with the PFs
+ * capabilities in the response message from the PF to determine the VLAN
+ * support.
+ */
+struct virtchnl_vlan_caps {
+ struct virtchnl_vlan_filtering_caps filtering;
+ struct virtchnl_vlan_offload_caps offloads;
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(40, virtchnl_vlan_caps);
+
+struct virtchnl_vlan {
+ u16 tci; /* tci[15:13] = PCP and tci[11:0] = VID */
+ u16 tci_mask; /* only valid if VIRTCHNL_VLAN_FILTER_MASK set in
+ * filtering caps
+ */
+ u16 tpid; /* 0x8100, 0x88a8, etc. and only type(s) set in
+ * filtering caps. Note that tpid here does not refer to
+ * VIRTCHNL_VLAN_ETHERTYPE_*, but it refers to the
+ * actual 2-byte VLAN TPID
+ */
+ u8 pad[2];
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(8, virtchnl_vlan);
+
+struct virtchnl_vlan_filter {
+ struct virtchnl_vlan inner;
+ struct virtchnl_vlan outer;
+ u8 pad[16];
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(32, virtchnl_vlan_filter);
+
+/* VIRTCHNL_OP_ADD_VLAN_V2
+ * VIRTCHNL_OP_DEL_VLAN_V2
+ *
+ * VF sends these messages to add/del one or more VLAN tag filters for Rx
+ * traffic.
+ *
+ * The PF attempts to add the filters and returns status.
+ *
+ * The VF should only ever attempt to add/del virtchnl_vlan_filter(s) using the
+ * supported fields negotiated via VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS.
+ */
+struct virtchnl_vlan_filter_list_v2 {
+ u16 vport_id;
+ u16 num_elements;
+ u8 pad[4];
+ struct virtchnl_vlan_filter filters[];
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(8, virtchnl_vlan_filter_list_v2);
+#define virtchnl_vlan_filter_list_v2_LEGACY_SIZEOF 40
+
+/* VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2
+ * VIRTCHNL_OP_DISABLE_VLAN_STRIPPING_V2
+ * VIRTCHNL_OP_ENABLE_VLAN_INSERTION_V2
+ * VIRTCHNL_OP_DISABLE_VLAN_INSERTION_V2
+ *
+ * VF sends this message to enable or disable VLAN stripping or insertion. It
+ * also needs to specify an ethertype. The VF knows which VLAN ethertypes are
+ * allowed and whether or not it's allowed to enable/disable the specific
+ * offload via the VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS message. The VF needs to
+ * parse the virtchnl_vlan_caps.offloads fields to determine which offload
+ * messages are allowed.
+ *
+ * For example, if the PF populates the virtchnl_vlan_caps.offloads in the
+ * following manner the VF will be allowed to enable and/or disable 0x8100 inner
+ * VLAN insertion and/or stripping via the opcodes listed above. Inner in this
+ * case means the outer most or single VLAN from the VF's perspective. This is
+ * because no outer offloads are supported. See the comments above the
+ * virtchnl_vlan_supported_caps structure for more details.
+ *
+ * virtchnl_vlan_caps.offloads.stripping_support.inner =
+ * VIRTCHNL_VLAN_TOGGLE |
+ * VIRTCHNL_VLAN_ETHERTYPE_8100;
+ *
+ * virtchnl_vlan_caps.offloads.insertion_support.inner =
+ * VIRTCHNL_VLAN_TOGGLE |
+ * VIRTCHNL_VLAN_ETHERTYPE_8100;
+ *
+ * In order to enable inner (again note that in this case inner is the outer
+ * most or single VLAN from the VF's perspective) VLAN stripping for 0x8100
+ * VLANs, the VF would populate the virtchnl_vlan_setting structure in the
+ * following manner and send the VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2 message.
+ *
+ * virtchnl_vlan_setting.inner_ethertype_setting =
+ * VIRTCHNL_VLAN_ETHERTYPE_8100;
+ *
+ * virtchnl_vlan_setting.vport_id = vport_id or vsi_id assigned to the VF on
+ * initialization.
+ *
+ * The reason that VLAN TPID(s) are not being used for the
+ * outer_ethertype_setting and inner_ethertype_setting fields is because it's
+ * possible a device could support VLAN insertion and/or stripping offload on
+ * multiple ethertypes concurrently, so this method allows a VF to request
+ * multiple ethertypes in one message using the virtchnl_vlan_support
+ * enumeration.
+ *
+ * For example, if the PF populates the virtchnl_vlan_caps.offloads in the
+ * following manner the VF will be allowed to enable 0x8100 and 0x88a8 outer
+ * VLAN insertion and stripping simultaneously. The
+ * virtchnl_vlan_caps.offloads.ethertype_match field will also have to be
+ * populated based on what the PF can support.
+ *
+ * virtchnl_vlan_caps.offloads.stripping_support.outer =
+ * VIRTCHNL_VLAN_TOGGLE |
+ * VIRTCHNL_VLAN_ETHERTYPE_8100 |
+ * VIRTCHNL_VLAN_ETHERTYPE_88A8 |
+ * VIRTCHNL_VLAN_ETHERTYPE_AND;
+ *
+ * virtchnl_vlan_caps.offloads.insertion_support.outer =
+ * VIRTCHNL_VLAN_TOGGLE |
+ * VIRTCHNL_VLAN_ETHERTYPE_8100 |
+ * VIRTCHNL_VLAN_ETHERTYPE_88A8 |
+ * VIRTCHNL_VLAN_ETHERTYPE_AND;
+ *
+ * In order to enable outer VLAN stripping for 0x8100 and 0x88a8 VLANs, the VF
+ * would populate the virthcnl_vlan_offload_structure in the following manner
+ * and send the VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2 message.
+ *
+ * virtchnl_vlan_setting.outer_ethertype_setting =
+ * VIRTHCNL_VLAN_ETHERTYPE_8100 |
+ * VIRTHCNL_VLAN_ETHERTYPE_88A8;
+ *
+ * virtchnl_vlan_setting.vport_id = vport_id or vsi_id assigned to the VF on
+ * initialization.
+ *
+ * There is also the case where a PF and the underlying hardware can support
+ * VLAN offloads on multiple ethertypes, but not concurrently. For example, if
+ * the PF populates the virtchnl_vlan_caps.offloads in the following manner the
+ * VF will be allowed to enable and/or disable 0x8100 XOR 0x88a8 outer VLAN
+ * offloads. The ethertypes must match for stripping and insertion.
+ *
+ * virtchnl_vlan_caps.offloads.stripping_support.outer =
+ * VIRTCHNL_VLAN_TOGGLE |
+ * VIRTCHNL_VLAN_ETHERTYPE_8100 |
+ * VIRTCHNL_VLAN_ETHERTYPE_88A8 |
+ * VIRTCHNL_VLAN_ETHERTYPE_XOR;
+ *
+ * virtchnl_vlan_caps.offloads.insertion_support.outer =
+ * VIRTCHNL_VLAN_TOGGLE |
+ * VIRTCHNL_VLAN_ETHERTYPE_8100 |
+ * VIRTCHNL_VLAN_ETHERTYPE_88A8 |
+ * VIRTCHNL_VLAN_ETHERTYPE_XOR;
+ *
+ * virtchnl_vlan_caps.offloads.ethertype_match =
+ * VIRTCHNL_ETHERTYPE_STRIPPING_MATCHES_INSERTION;
+ *
+ * In order to enable outer VLAN stripping for 0x88a8 VLANs, the VF would
+ * populate the virtchnl_vlan_setting structure in the following manner and send
+ * the VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2. Also, this will change the
+ * ethertype for VLAN insertion if it's enabled. So, for completeness, a
+ * VIRTCHNL_OP_ENABLE_VLAN_INSERTION_V2 with the same ethertype should be sent.
+ *
+ * virtchnl_vlan_setting.outer_ethertype_setting = VIRTHCNL_VLAN_ETHERTYPE_88A8;
+ *
+ * virtchnl_vlan_setting.vport_id = vport_id or vsi_id assigned to the VF on
+ * initialization.
+ */
+struct virtchnl_vlan_setting {
+ u32 outer_ethertype_setting;
+ u32 inner_ethertype_setting;
+ u16 vport_id;
+ u8 pad[6];
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_vlan_setting);
/* VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE
* VF sends VSI id and flags.
@@ -451,31 +959,143 @@ VIRTCHNL_CHECK_STRUCT_LEN(4, virtchnl_promisc_info);
struct virtchnl_rss_key {
u16 vsi_id;
u16 key_len;
- u8 key[1]; /* RSS hash key, packed bytes */
+ u8 key[]; /* RSS hash key, packed bytes */
};
-VIRTCHNL_CHECK_STRUCT_LEN(6, virtchnl_rss_key);
+VIRTCHNL_CHECK_STRUCT_LEN(4, virtchnl_rss_key);
+#define virtchnl_rss_key_LEGACY_SIZEOF 6
struct virtchnl_rss_lut {
u16 vsi_id;
u16 lut_entries;
- u8 lut[1]; /* RSS lookup table*/
+ u8 lut[]; /* RSS lookup table */
};
-VIRTCHNL_CHECK_STRUCT_LEN(6, virtchnl_rss_lut);
+VIRTCHNL_CHECK_STRUCT_LEN(4, virtchnl_rss_lut);
+#define virtchnl_rss_lut_LEGACY_SIZEOF 6
-/* VIRTCHNL_OP_GET_RSS_HENA_CAPS
- * VIRTCHNL_OP_SET_RSS_HENA
- * VF sends these messages to get and set the hash filter enable bits for RSS.
+/* VIRTCHNL_OP_GET_RSS_HASHCFG_CAPS
+ * VIRTCHNL_OP_SET_RSS_HASHCFG
+ * VF sends these messages to get and set the hash filter configuration for RSS.
* By default, the PF sets these to all possible traffic types that the
* hardware supports. The VF can query this value if it wants to change the
* traffic types that are hashed by the hardware.
*/
-struct virtchnl_rss_hena {
- u64 hena;
+struct virtchnl_rss_hashcfg {
+ /* Bits defined by enum libie_filter_pctype */
+ u64 hashcfg;
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(8, virtchnl_rss_hashcfg);
+
+/* Type of RSS algorithm */
+enum virtchnl_rss_algorithm {
+ VIRTCHNL_RSS_ALG_TOEPLITZ_ASYMMETRIC = 0,
+ VIRTCHNL_RSS_ALG_R_ASYMMETRIC = 1,
+ VIRTCHNL_RSS_ALG_TOEPLITZ_SYMMETRIC = 2,
+ VIRTCHNL_RSS_ALG_XOR_SYMMETRIC = 3,
+};
+
+/* VIRTCHNL_OP_CONFIG_RSS_HFUNC
+ * VF sends this message to configure the RSS hash function. Only supported
+ * if both PF and VF drivers set the VIRTCHNL_VF_OFFLOAD_RSS_PF bit during
+ * configuration negotiation.
+ * The hash function is initialized to VIRTCHNL_RSS_ALG_TOEPLITZ_ASYMMETRIC
+ * by the PF.
+ */
+struct virtchnl_rss_hfunc {
+ u16 vsi_id;
+ u16 rss_algorithm; /* enum virtchnl_rss_algorithm */
+ u32 reserved;
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(8, virtchnl_rss_hfunc);
+
+/* VIRTCHNL_OP_ENABLE_CHANNELS
+ * VIRTCHNL_OP_DISABLE_CHANNELS
+ * VF sends these messages to enable or disable channels based on
+ * the user specified queue count and queue offset for each traffic class.
+ * This struct encompasses all the information that the PF needs from
+ * VF to create a channel.
+ */
+struct virtchnl_channel_info {
+ u16 count; /* number of queues in a channel */
+ u16 offset; /* queues in a channel start from 'offset' */
+ u32 pad;
+ u64 max_tx_rate;
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_channel_info);
+
+struct virtchnl_tc_info {
+ u32 num_tc;
+ u32 pad;
+ struct virtchnl_channel_info list[];
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(8, virtchnl_tc_info);
+#define virtchnl_tc_info_LEGACY_SIZEOF 24
+
+/* VIRTCHNL_ADD_CLOUD_FILTER
+ * VIRTCHNL_DEL_CLOUD_FILTER
+ * VF sends these messages to add or delete a cloud filter based on the
+ * user specified match and action filters. These structures encompass
+ * all the information that the PF needs from the VF to add/delete a
+ * cloud filter.
+ */
+
+struct virtchnl_l4_spec {
+ u8 src_mac[ETH_ALEN];
+ u8 dst_mac[ETH_ALEN];
+ __be16 vlan_id;
+ __be16 pad; /* reserved for future use */
+ __be32 src_ip[4];
+ __be32 dst_ip[4];
+ __be16 src_port;
+ __be16 dst_port;
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(52, virtchnl_l4_spec);
+
+union virtchnl_flow_spec {
+ struct virtchnl_l4_spec tcp_spec;
+ u8 buffer[128]; /* reserved for future use */
+};
+
+VIRTCHNL_CHECK_UNION_LEN(128, virtchnl_flow_spec);
+
+enum virtchnl_action {
+ /* action types */
+ VIRTCHNL_ACTION_DROP = 0,
+ VIRTCHNL_ACTION_TC_REDIRECT,
+ VIRTCHNL_ACTION_PASSTHRU,
+ VIRTCHNL_ACTION_QUEUE,
+ VIRTCHNL_ACTION_Q_REGION,
+ VIRTCHNL_ACTION_MARK,
+ VIRTCHNL_ACTION_COUNT,
+};
+
+enum virtchnl_flow_type {
+ /* flow types */
+ VIRTCHNL_TCP_V4_FLOW = 0,
+ VIRTCHNL_TCP_V6_FLOW,
};
-VIRTCHNL_CHECK_STRUCT_LEN(8, virtchnl_rss_hena);
+struct virtchnl_filter {
+ union virtchnl_flow_spec data;
+ union virtchnl_flow_spec mask;
+
+ /* see enum virtchnl_flow_type */
+ s32 flow_type;
+
+ /* see enum virtchnl_action */
+ s32 action;
+ u32 action_meta;
+ u8 field_flags;
+ u8 pad[3];
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(272, virtchnl_filter);
/* VIRTCHNL_OP_EVENT
* PF sends this message to inform the VF driver of events that may affect it.
@@ -493,46 +1113,66 @@ enum virtchnl_event_codes {
#define PF_EVENT_SEVERITY_CERTAIN_DOOM 255
struct virtchnl_pf_event {
- enum virtchnl_event_codes event;
+ /* see enum virtchnl_event_codes */
+ s32 event;
union {
+ /* If the PF driver does not support the new speed reporting
+ * capabilities then use link_event else use link_event_adv to
+ * get the speed and link information. The ability to understand
+ * new speeds is indicated by setting the capability flag
+ * VIRTCHNL_VF_CAP_ADV_LINK_SPEED in vf_cap_flags parameter
+ * in virtchnl_vf_resource struct and can be used to determine
+ * which link event struct to use below.
+ */
struct {
enum virtchnl_link_speed link_speed;
bool link_status;
+ u8 pad[3];
} link_event;
+ struct {
+ /* link_speed provided in Mbps */
+ u32 link_speed;
+ u8 link_status;
+ u8 pad[3];
+ } link_event_adv;
} event_data;
- int severity;
+ s32 severity;
};
VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_pf_event);
-/* VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP
- * VF uses this message to request PF to map IWARP vectors to IWARP queues.
- * The request for this originates from the VF IWARP driver through
- * a client interface between VF LAN and VF IWARP driver.
+/* used to specify if a ceq_idx or aeq_idx is invalid */
+#define VIRTCHNL_RDMA_INVALID_QUEUE_IDX 0xFFFF
+/* VIRTCHNL_OP_CONFIG_RDMA_IRQ_MAP
+ * VF uses this message to request PF to map RDMA vectors to RDMA queues.
+ * The request for this originates from the VF RDMA driver through
+ * a client interface between VF LAN and VF RDMA driver.
* A vector could have an AEQ and CEQ attached to it although
- * there is a single AEQ per VF IWARP instance in which case
- * most vectors will have an INVALID_IDX for aeq and valid idx for ceq.
- * There will never be a case where there will be multiple CEQs attached
- * to a single vector.
+ * there is a single AEQ per VF RDMA instance in which case
+ * most vectors will have an VIRTCHNL_RDMA_INVALID_QUEUE_IDX for aeq and valid
+ * idx for ceqs There will never be a case where there will be multiple CEQs
+ * attached to a single vector.
* PF configures interrupt mapping and returns status.
*/
-struct virtchnl_iwarp_qv_info {
+struct virtchnl_rdma_qv_info {
u32 v_idx; /* msix_vector */
- u16 ceq_idx;
- u16 aeq_idx;
+ u16 ceq_idx; /* set to VIRTCHNL_RDMA_INVALID_QUEUE_IDX if invalid */
+ u16 aeq_idx; /* set to VIRTCHNL_RDMA_INVALID_QUEUE_IDX if invalid */
u8 itr_idx;
+ u8 pad[3];
};
-VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_iwarp_qv_info);
+VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_rdma_qv_info);
-struct virtchnl_iwarp_qvlist_info {
+struct virtchnl_rdma_qvlist_info {
u32 num_vectors;
- struct virtchnl_iwarp_qv_info qv_info[1];
+ struct virtchnl_rdma_qv_info qv_info[];
};
-VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_iwarp_qvlist_info);
+VIRTCHNL_CHECK_STRUCT_LEN(4, virtchnl_rdma_qvlist_info);
+#define virtchnl_rdma_qvlist_info_LEGACY_SIZEOF 16
/* VF reset states - these are written into the RSTAT register:
* VFGEN_RSTAT on the VF
@@ -551,6 +1191,513 @@ enum virtchnl_vfr_states {
VIRTCHNL_VFR_VFACTIVE,
};
+#define VIRTCHNL_MAX_NUM_PROTO_HDRS 32
+#define VIRTCHNL_MAX_SIZE_RAW_PACKET 1024
+#define PROTO_HDR_SHIFT 5
+#define PROTO_HDR_FIELD_START(proto_hdr_type) ((proto_hdr_type) << PROTO_HDR_SHIFT)
+#define PROTO_HDR_FIELD_MASK ((1UL << PROTO_HDR_SHIFT) - 1)
+
+/* VF use these macros to configure each protocol header.
+ * Specify which protocol headers and protocol header fields base on
+ * virtchnl_proto_hdr_type and virtchnl_proto_hdr_field.
+ * @param hdr: a struct of virtchnl_proto_hdr
+ * @param hdr_type: ETH/IPV4/TCP, etc
+ * @param field: SRC/DST/TEID/SPI, etc
+ */
+#define VIRTCHNL_ADD_PROTO_HDR_FIELD(hdr, field) \
+ ((hdr)->field_selector |= BIT((field) & PROTO_HDR_FIELD_MASK))
+#define VIRTCHNL_DEL_PROTO_HDR_FIELD(hdr, field) \
+ ((hdr)->field_selector &= ~BIT((field) & PROTO_HDR_FIELD_MASK))
+#define VIRTCHNL_TEST_PROTO_HDR_FIELD(hdr, val) \
+ ((hdr)->field_selector & BIT((val) & PROTO_HDR_FIELD_MASK))
+#define VIRTCHNL_GET_PROTO_HDR_FIELD(hdr) ((hdr)->field_selector)
+
+#define VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, hdr_type, field) \
+ (VIRTCHNL_ADD_PROTO_HDR_FIELD(hdr, \
+ VIRTCHNL_PROTO_HDR_ ## hdr_type ## _ ## field))
+#define VIRTCHNL_DEL_PROTO_HDR_FIELD_BIT(hdr, hdr_type, field) \
+ (VIRTCHNL_DEL_PROTO_HDR_FIELD(hdr, \
+ VIRTCHNL_PROTO_HDR_ ## hdr_type ## _ ## field))
+
+#define VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, hdr_type) \
+ ((hdr)->type = VIRTCHNL_PROTO_HDR_ ## hdr_type)
+#define VIRTCHNL_GET_PROTO_HDR_TYPE(hdr) \
+ (((hdr)->type) >> PROTO_HDR_SHIFT)
+#define VIRTCHNL_TEST_PROTO_HDR_TYPE(hdr, val) \
+ ((hdr)->type == ((s32)((val) >> PROTO_HDR_SHIFT)))
+#define VIRTCHNL_TEST_PROTO_HDR(hdr, val) \
+ (VIRTCHNL_TEST_PROTO_HDR_TYPE((hdr), (val)) && \
+ VIRTCHNL_TEST_PROTO_HDR_FIELD((hdr), (val)))
+
+/* Protocol header type within a packet segment. A segment consists of one or
+ * more protocol headers that make up a logical group of protocol headers. Each
+ * logical group of protocol headers encapsulates or is encapsulated using/by
+ * tunneling or encapsulation protocols for network virtualization.
+ */
+enum virtchnl_proto_hdr_type {
+ VIRTCHNL_PROTO_HDR_NONE,
+ VIRTCHNL_PROTO_HDR_ETH,
+ VIRTCHNL_PROTO_HDR_S_VLAN,
+ VIRTCHNL_PROTO_HDR_C_VLAN,
+ VIRTCHNL_PROTO_HDR_IPV4,
+ VIRTCHNL_PROTO_HDR_IPV6,
+ VIRTCHNL_PROTO_HDR_TCP,
+ VIRTCHNL_PROTO_HDR_UDP,
+ VIRTCHNL_PROTO_HDR_SCTP,
+ VIRTCHNL_PROTO_HDR_GTPU_IP,
+ VIRTCHNL_PROTO_HDR_GTPU_EH,
+ VIRTCHNL_PROTO_HDR_GTPU_EH_PDU_DWN,
+ VIRTCHNL_PROTO_HDR_GTPU_EH_PDU_UP,
+ VIRTCHNL_PROTO_HDR_PPPOE,
+ VIRTCHNL_PROTO_HDR_L2TPV3,
+ VIRTCHNL_PROTO_HDR_ESP,
+ VIRTCHNL_PROTO_HDR_AH,
+ VIRTCHNL_PROTO_HDR_PFCP,
+ VIRTCHNL_PROTO_HDR_GTPC,
+ VIRTCHNL_PROTO_HDR_ECPRI,
+ VIRTCHNL_PROTO_HDR_L2TPV2,
+ VIRTCHNL_PROTO_HDR_PPP,
+ /* IPv4 and IPv6 Fragment header types are only associated to
+ * VIRTCHNL_PROTO_HDR_IPV4 and VIRTCHNL_PROTO_HDR_IPV6 respectively,
+ * cannot be used independently.
+ */
+ VIRTCHNL_PROTO_HDR_IPV4_FRAG,
+ VIRTCHNL_PROTO_HDR_IPV6_EH_FRAG,
+ VIRTCHNL_PROTO_HDR_GRE,
+};
+
+/* Protocol header field within a protocol header. */
+enum virtchnl_proto_hdr_field {
+ /* ETHER */
+ VIRTCHNL_PROTO_HDR_ETH_SRC =
+ PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_ETH),
+ VIRTCHNL_PROTO_HDR_ETH_DST,
+ VIRTCHNL_PROTO_HDR_ETH_ETHERTYPE,
+ /* S-VLAN */
+ VIRTCHNL_PROTO_HDR_S_VLAN_ID =
+ PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_S_VLAN),
+ /* C-VLAN */
+ VIRTCHNL_PROTO_HDR_C_VLAN_ID =
+ PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_C_VLAN),
+ /* IPV4 */
+ VIRTCHNL_PROTO_HDR_IPV4_SRC =
+ PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_IPV4),
+ VIRTCHNL_PROTO_HDR_IPV4_DST,
+ VIRTCHNL_PROTO_HDR_IPV4_DSCP,
+ VIRTCHNL_PROTO_HDR_IPV4_TTL,
+ VIRTCHNL_PROTO_HDR_IPV4_PROT,
+ VIRTCHNL_PROTO_HDR_IPV4_CHKSUM,
+ /* IPV6 */
+ VIRTCHNL_PROTO_HDR_IPV6_SRC =
+ PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_IPV6),
+ VIRTCHNL_PROTO_HDR_IPV6_DST,
+ VIRTCHNL_PROTO_HDR_IPV6_TC,
+ VIRTCHNL_PROTO_HDR_IPV6_HOP_LIMIT,
+ VIRTCHNL_PROTO_HDR_IPV6_PROT,
+ /* IPV6 Prefix */
+ VIRTCHNL_PROTO_HDR_IPV6_PREFIX32_SRC,
+ VIRTCHNL_PROTO_HDR_IPV6_PREFIX32_DST,
+ VIRTCHNL_PROTO_HDR_IPV6_PREFIX40_SRC,
+ VIRTCHNL_PROTO_HDR_IPV6_PREFIX40_DST,
+ VIRTCHNL_PROTO_HDR_IPV6_PREFIX48_SRC,
+ VIRTCHNL_PROTO_HDR_IPV6_PREFIX48_DST,
+ VIRTCHNL_PROTO_HDR_IPV6_PREFIX56_SRC,
+ VIRTCHNL_PROTO_HDR_IPV6_PREFIX56_DST,
+ VIRTCHNL_PROTO_HDR_IPV6_PREFIX64_SRC,
+ VIRTCHNL_PROTO_HDR_IPV6_PREFIX64_DST,
+ VIRTCHNL_PROTO_HDR_IPV6_PREFIX96_SRC,
+ VIRTCHNL_PROTO_HDR_IPV6_PREFIX96_DST,
+ /* TCP */
+ VIRTCHNL_PROTO_HDR_TCP_SRC_PORT =
+ PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_TCP),
+ VIRTCHNL_PROTO_HDR_TCP_DST_PORT,
+ VIRTCHNL_PROTO_HDR_TCP_CHKSUM,
+ /* UDP */
+ VIRTCHNL_PROTO_HDR_UDP_SRC_PORT =
+ PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_UDP),
+ VIRTCHNL_PROTO_HDR_UDP_DST_PORT,
+ VIRTCHNL_PROTO_HDR_UDP_CHKSUM,
+ /* SCTP */
+ VIRTCHNL_PROTO_HDR_SCTP_SRC_PORT =
+ PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_SCTP),
+ VIRTCHNL_PROTO_HDR_SCTP_DST_PORT,
+ VIRTCHNL_PROTO_HDR_SCTP_CHKSUM,
+ /* GTPU_IP */
+ VIRTCHNL_PROTO_HDR_GTPU_IP_TEID =
+ PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_GTPU_IP),
+ /* GTPU_EH */
+ VIRTCHNL_PROTO_HDR_GTPU_EH_PDU =
+ PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_GTPU_EH),
+ VIRTCHNL_PROTO_HDR_GTPU_EH_QFI,
+ /* PPPOE */
+ VIRTCHNL_PROTO_HDR_PPPOE_SESS_ID =
+ PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_PPPOE),
+ /* L2TPV3 */
+ VIRTCHNL_PROTO_HDR_L2TPV3_SESS_ID =
+ PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_L2TPV3),
+ /* ESP */
+ VIRTCHNL_PROTO_HDR_ESP_SPI =
+ PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_ESP),
+ /* AH */
+ VIRTCHNL_PROTO_HDR_AH_SPI =
+ PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_AH),
+ /* PFCP */
+ VIRTCHNL_PROTO_HDR_PFCP_S_FIELD =
+ PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_PFCP),
+ VIRTCHNL_PROTO_HDR_PFCP_SEID,
+ /* GTPC */
+ VIRTCHNL_PROTO_HDR_GTPC_TEID =
+ PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_GTPC),
+ /* ECPRI */
+ VIRTCHNL_PROTO_HDR_ECPRI_MSG_TYPE =
+ PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_ECPRI),
+ VIRTCHNL_PROTO_HDR_ECPRI_PC_RTC_ID,
+ /* IPv4 Dummy Fragment */
+ VIRTCHNL_PROTO_HDR_IPV4_FRAG_PKID =
+ PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_IPV4_FRAG),
+ /* IPv6 Extension Fragment */
+ VIRTCHNL_PROTO_HDR_IPV6_EH_FRAG_PKID =
+ PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_IPV6_EH_FRAG),
+ /* GTPU_DWN/UP */
+ VIRTCHNL_PROTO_HDR_GTPU_DWN_QFI =
+ PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_GTPU_EH_PDU_DWN),
+ VIRTCHNL_PROTO_HDR_GTPU_UP_QFI =
+ PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_GTPU_EH_PDU_UP),
+ /* L2TPv2 */
+ VIRTCHNL_PROTO_HDR_L2TPV2_SESS_ID =
+ PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_L2TPV2),
+ VIRTCHNL_PROTO_HDR_L2TPV2_LEN_SESS_ID,
+};
+
+struct virtchnl_proto_hdr {
+ /* see enum virtchnl_proto_hdr_type */
+ s32 type;
+ u32 field_selector; /* a bit mask to select field for header type */
+ u8 buffer[64];
+ /**
+ * binary buffer in network order for specific header type.
+ * For example, if type = VIRTCHNL_PROTO_HDR_IPV4, a IPv4
+ * header is expected to be copied into the buffer.
+ */
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(72, virtchnl_proto_hdr);
+
+struct virtchnl_proto_hdrs {
+ u8 tunnel_level;
+ u8 pad[3];
+ /**
+ * specify where protocol header start from.
+ * must be 0 when sending a raw packet request.
+ * 0 - from the outer layer
+ * 1 - from the first inner layer
+ * 2 - from the second inner layer
+ * ....
+ **/
+ u32 count; /* the proto layers must < VIRTCHNL_MAX_NUM_PROTO_HDRS */
+ union {
+ struct virtchnl_proto_hdr
+ proto_hdr[VIRTCHNL_MAX_NUM_PROTO_HDRS];
+ struct {
+ u16 pkt_len;
+ u8 spec[VIRTCHNL_MAX_SIZE_RAW_PACKET];
+ u8 mask[VIRTCHNL_MAX_SIZE_RAW_PACKET];
+ } raw;
+ };
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(2312, virtchnl_proto_hdrs);
+
+struct virtchnl_rss_cfg {
+ struct virtchnl_proto_hdrs proto_hdrs; /* protocol headers */
+
+ /* see enum virtchnl_rss_algorithm; rss algorithm type */
+ s32 rss_algorithm;
+ u8 reserved[128]; /* reserve for future */
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(2444, virtchnl_rss_cfg);
+
+/* action configuration for FDIR */
+struct virtchnl_filter_action {
+ /* see enum virtchnl_action type */
+ s32 type;
+ union {
+ /* used for queue and qgroup action */
+ struct {
+ u16 index;
+ u8 region;
+ } queue;
+ /* used for count action */
+ struct {
+ /* share counter ID with other flow rules */
+ u8 shared;
+ u32 id; /* counter ID */
+ } count;
+ /* used for mark action */
+ u32 mark_id;
+ u8 reserve[32];
+ } act_conf;
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(36, virtchnl_filter_action);
+
+#define VIRTCHNL_MAX_NUM_ACTIONS 8
+
+struct virtchnl_filter_action_set {
+ /* action number must be less then VIRTCHNL_MAX_NUM_ACTIONS */
+ u32 count;
+ struct virtchnl_filter_action actions[VIRTCHNL_MAX_NUM_ACTIONS];
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(292, virtchnl_filter_action_set);
+
+/* pattern and action for FDIR rule */
+struct virtchnl_fdir_rule {
+ struct virtchnl_proto_hdrs proto_hdrs;
+ struct virtchnl_filter_action_set action_set;
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(2604, virtchnl_fdir_rule);
+
+/* Status returned to VF after VF requests FDIR commands
+ * VIRTCHNL_FDIR_SUCCESS
+ * VF FDIR related request is successfully done by PF
+ * The request can be OP_ADD/DEL/QUERY_FDIR_FILTER.
+ *
+ * VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE
+ * OP_ADD_FDIR_FILTER request is failed due to no Hardware resource.
+ *
+ * VIRTCHNL_FDIR_FAILURE_RULE_EXIST
+ * OP_ADD_FDIR_FILTER request is failed due to the rule is already existed.
+ *
+ * VIRTCHNL_FDIR_FAILURE_RULE_CONFLICT
+ * OP_ADD_FDIR_FILTER request is failed due to conflict with existing rule.
+ *
+ * VIRTCHNL_FDIR_FAILURE_RULE_NONEXIST
+ * OP_DEL_FDIR_FILTER request is failed due to this rule doesn't exist.
+ *
+ * VIRTCHNL_FDIR_FAILURE_RULE_INVALID
+ * OP_ADD_FDIR_FILTER request is failed due to parameters validation
+ * or HW doesn't support.
+ *
+ * VIRTCHNL_FDIR_FAILURE_RULE_TIMEOUT
+ * OP_ADD/DEL_FDIR_FILTER request is failed due to timing out
+ * for programming.
+ *
+ * VIRTCHNL_FDIR_FAILURE_QUERY_INVALID
+ * OP_QUERY_FDIR_FILTER request is failed due to parameters validation,
+ * for example, VF query counter of a rule who has no counter action.
+ */
+enum virtchnl_fdir_prgm_status {
+ VIRTCHNL_FDIR_SUCCESS = 0,
+ VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE,
+ VIRTCHNL_FDIR_FAILURE_RULE_EXIST,
+ VIRTCHNL_FDIR_FAILURE_RULE_CONFLICT,
+ VIRTCHNL_FDIR_FAILURE_RULE_NONEXIST,
+ VIRTCHNL_FDIR_FAILURE_RULE_INVALID,
+ VIRTCHNL_FDIR_FAILURE_RULE_TIMEOUT,
+ VIRTCHNL_FDIR_FAILURE_QUERY_INVALID,
+};
+
+/* VIRTCHNL_OP_ADD_FDIR_FILTER
+ * VF sends this request to PF by filling out vsi_id,
+ * validate_only and rule_cfg. PF will return flow_id
+ * if the request is successfully done and return add_status to VF.
+ */
+struct virtchnl_fdir_add {
+ u16 vsi_id; /* INPUT */
+ /*
+ * 1 for validating a fdir rule, 0 for creating a fdir rule.
+ * Validate and create share one ops: VIRTCHNL_OP_ADD_FDIR_FILTER.
+ */
+ u16 validate_only; /* INPUT */
+ u32 flow_id; /* OUTPUT */
+ struct virtchnl_fdir_rule rule_cfg; /* INPUT */
+
+ /* see enum virtchnl_fdir_prgm_status; OUTPUT */
+ s32 status;
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(2616, virtchnl_fdir_add);
+
+/* VIRTCHNL_OP_DEL_FDIR_FILTER
+ * VF sends this request to PF by filling out vsi_id
+ * and flow_id. PF will return del_status to VF.
+ */
+struct virtchnl_fdir_del {
+ u16 vsi_id; /* INPUT */
+ u16 pad;
+ u32 flow_id; /* INPUT */
+
+ /* see enum virtchnl_fdir_prgm_status; OUTPUT */
+ s32 status;
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_fdir_del);
+
+#define VIRTCHNL_1588_PTP_CAP_RX_TSTAMP BIT(1)
+#define VIRTCHNL_1588_PTP_CAP_READ_PHC BIT(2)
+
+/**
+ * struct virtchnl_ptp_caps - Defines the PTP caps available to the VF.
+ * @caps: On send, VF sets what capabilities it requests. On reply, PF
+ * indicates what has been enabled for this VF. The PF shall not set
+ * bits which were not requested by the VF.
+ * @rsvd: Reserved bits for future extension.
+ *
+ * Structure that defines the PTP capabilities available to the VF. The VF
+ * sends VIRTCHNL_OP_1588_PTP_GET_CAPS, and must fill in the ptp_caps field
+ * indicating what capabilities it is requesting. The PF will respond with the
+ * same message with the virtchnl_ptp_caps structure indicating what is
+ * enabled for the VF.
+ *
+ * VIRTCHNL_1588_PTP_CAP_RX_TSTAMP indicates that the VF receive queues have
+ * receive timestamps enabled in the flexible descriptors. Note that this
+ * requires a VF to also negotiate to enable advanced flexible descriptors in
+ * the receive path instead of the default legacy descriptor format.
+ *
+ * VIRTCHNL_1588_PTP_CAP_READ_PHC indicates that the VF may read the PHC time
+ * via the VIRTCHNL_OP_1588_PTP_GET_TIME command.
+ *
+ * Note that in the future, additional capability flags may be added which
+ * indicate additional extended support. All fields marked as reserved by this
+ * header will be set to zero. VF implementations should verify this to ensure
+ * that future extensions do not break compatibility.
+ */
+struct virtchnl_ptp_caps {
+ u32 caps;
+ u8 rsvd[44];
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(48, virtchnl_ptp_caps);
+
+/**
+ * struct virtchnl_phc_time - Contains the 64bits of PHC clock time in ns.
+ * @time: PHC time in nanoseconds
+ * @rsvd: Reserved for future extension
+ *
+ * Structure received with VIRTCHNL_OP_1588_PTP_GET_TIME. Contains the 64bits
+ * of PHC clock time in nanoseconds.
+ *
+ * VIRTCHNL_OP_1588_PTP_GET_TIME may be sent to request the current time of
+ * the PHC. This op is available in case direct access via the PHC registers
+ * is not available.
+ */
+struct virtchnl_phc_time {
+ u64 time;
+ u8 rsvd[8];
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_phc_time);
+
+struct virtchnl_shaper_bw {
+ /* Unit is Kbps */
+ u32 committed;
+ u32 peak;
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(8, virtchnl_shaper_bw);
+
+/* VIRTCHNL_OP_GET_QOS_CAPS
+ * VF sends this message to get its QoS Caps, such as
+ * TC number, Arbiter and Bandwidth.
+ */
+struct virtchnl_qos_cap_elem {
+ u8 tc_num;
+ u8 tc_prio;
+#define VIRTCHNL_ABITER_STRICT 0
+#define VIRTCHNL_ABITER_ETS 2
+ u8 arbiter;
+#define VIRTCHNL_STRICT_WEIGHT 1
+ u8 weight;
+ enum virtchnl_bw_limit_type type;
+ union {
+ struct virtchnl_shaper_bw shaper;
+ u8 pad2[32];
+ };
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(40, virtchnl_qos_cap_elem);
+
+struct virtchnl_qos_cap_list {
+ u16 vsi_id;
+ u16 num_elem;
+ struct virtchnl_qos_cap_elem cap[];
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(4, virtchnl_qos_cap_list);
+#define virtchnl_qos_cap_list_LEGACY_SIZEOF 44
+
+/* VIRTCHNL_OP_CONFIG_QUEUE_BW */
+struct virtchnl_queue_bw {
+ u16 queue_id;
+ u8 tc;
+ u8 pad;
+ struct virtchnl_shaper_bw shaper;
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_queue_bw);
+
+struct virtchnl_queues_bw_cfg {
+ u16 vsi_id;
+ u16 num_queues;
+ struct virtchnl_queue_bw cfg[];
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(4, virtchnl_queues_bw_cfg);
+#define virtchnl_queues_bw_cfg_LEGACY_SIZEOF 16
+
+enum virtchnl_queue_type {
+ VIRTCHNL_QUEUE_TYPE_TX = 0,
+ VIRTCHNL_QUEUE_TYPE_RX = 1,
+};
+
+/* structure to specify a chunk of contiguous queues */
+struct virtchnl_queue_chunk {
+ /* see enum virtchnl_queue_type */
+ s32 type;
+ u16 start_queue_id;
+ u16 num_queues;
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(8, virtchnl_queue_chunk);
+
+struct virtchnl_quanta_cfg {
+ u16 quanta_size;
+ u16 pad;
+ struct virtchnl_queue_chunk queue_select;
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_quanta_cfg);
+
+#define __vss_byone(p, member, count, old) \
+ (struct_size(p, member, count) + (old - 1 - struct_size(p, member, 0)))
+
+#define __vss_byelem(p, member, count, old) \
+ (struct_size(p, member, count - 1) + (old - struct_size(p, member, 0)))
+
+#define __vss_full(p, member, count, old) \
+ (struct_size(p, member, count) + (old - struct_size(p, member, 0)))
+
+#define __vss(type, func, p, member, count) \
+ struct type: func(p, member, count, type##_LEGACY_SIZEOF)
+
+#define virtchnl_struct_size(p, m, c) \
+ _Generic(*p, \
+ __vss(virtchnl_vf_resource, __vss_full, p, m, c), \
+ __vss(virtchnl_vsi_queue_config_info, __vss_full, p, m, c), \
+ __vss(virtchnl_irq_map_info, __vss_full, p, m, c), \
+ __vss(virtchnl_ether_addr_list, __vss_full, p, m, c), \
+ __vss(virtchnl_vlan_filter_list, __vss_full, p, m, c), \
+ __vss(virtchnl_vlan_filter_list_v2, __vss_byelem, p, m, c), \
+ __vss(virtchnl_tc_info, __vss_byelem, p, m, c), \
+ __vss(virtchnl_rdma_qvlist_info, __vss_byelem, p, m, c), \
+ __vss(virtchnl_qos_cap_list, __vss_byelem, p, m, c), \
+ __vss(virtchnl_queues_bw_cfg, __vss_byelem, p, m, c), \
+ __vss(virtchnl_rss_key, __vss_byone, p, m, c), \
+ __vss(virtchnl_rss_lut, __vss_byone, p, m, c))
+
/**
* virtchnl_vc_validate_vf_msg
* @ver: Virtchnl version info
@@ -565,7 +1712,7 @@ virtchnl_vc_validate_vf_msg(struct virtchnl_version_info *ver, u32 v_opcode,
u8 *msg, u16 msglen)
{
bool err_msg_format = false;
- int valid_len = 0;
+ u32 valid_len = 0;
/* Validate message length. */
switch (v_opcode) {
@@ -585,24 +1732,23 @@ virtchnl_vc_validate_vf_msg(struct virtchnl_version_info *ver, u32 v_opcode,
valid_len = sizeof(struct virtchnl_rxq_info);
break;
case VIRTCHNL_OP_CONFIG_VSI_QUEUES:
- valid_len = sizeof(struct virtchnl_vsi_queue_config_info);
+ valid_len = virtchnl_vsi_queue_config_info_LEGACY_SIZEOF;
if (msglen >= valid_len) {
struct virtchnl_vsi_queue_config_info *vqc =
(struct virtchnl_vsi_queue_config_info *)msg;
- valid_len += (vqc->num_queue_pairs *
- sizeof(struct
- virtchnl_queue_pair_info));
+ valid_len = virtchnl_struct_size(vqc, qpair,
+ vqc->num_queue_pairs);
if (vqc->num_queue_pairs == 0)
err_msg_format = true;
}
break;
case VIRTCHNL_OP_CONFIG_IRQ_MAP:
- valid_len = sizeof(struct virtchnl_irq_map_info);
+ valid_len = virtchnl_irq_map_info_LEGACY_SIZEOF;
if (msglen >= valid_len) {
struct virtchnl_irq_map_info *vimi =
(struct virtchnl_irq_map_info *)msg;
- valid_len += (vimi->num_vectors *
- sizeof(struct virtchnl_vector_map));
+ valid_len = virtchnl_struct_size(vimi, vecmap,
+ vimi->num_vectors);
if (vimi->num_vectors == 0)
err_msg_format = true;
}
@@ -613,23 +1759,24 @@ virtchnl_vc_validate_vf_msg(struct virtchnl_version_info *ver, u32 v_opcode,
break;
case VIRTCHNL_OP_ADD_ETH_ADDR:
case VIRTCHNL_OP_DEL_ETH_ADDR:
- valid_len = sizeof(struct virtchnl_ether_addr_list);
+ valid_len = virtchnl_ether_addr_list_LEGACY_SIZEOF;
if (msglen >= valid_len) {
struct virtchnl_ether_addr_list *veal =
(struct virtchnl_ether_addr_list *)msg;
- valid_len += veal->num_elements *
- sizeof(struct virtchnl_ether_addr);
+ valid_len = virtchnl_struct_size(veal, list,
+ veal->num_elements);
if (veal->num_elements == 0)
err_msg_format = true;
}
break;
case VIRTCHNL_OP_ADD_VLAN:
case VIRTCHNL_OP_DEL_VLAN:
- valid_len = sizeof(struct virtchnl_vlan_filter_list);
+ valid_len = virtchnl_vlan_filter_list_LEGACY_SIZEOF;
if (msglen >= valid_len) {
struct virtchnl_vlan_filter_list *vfl =
(struct virtchnl_vlan_filter_list *)msg;
- valid_len += vfl->num_elements * sizeof(u16);
+ valid_len = virtchnl_struct_size(vfl, vlan_id,
+ vfl->num_elements);
if (vfl->num_elements == 0)
err_msg_format = true;
}
@@ -640,7 +1787,7 @@ virtchnl_vc_validate_vf_msg(struct virtchnl_version_info *ver, u32 v_opcode,
case VIRTCHNL_OP_GET_STATS:
valid_len = sizeof(struct virtchnl_queue_select);
break;
- case VIRTCHNL_OP_IWARP:
+ case VIRTCHNL_OP_RDMA:
/* These messages are opaque to us and will be validated in
* the RDMA client code. We just need to check for nonzero
* length. The firmware will enforce max length restrictions.
@@ -650,50 +1797,146 @@ virtchnl_vc_validate_vf_msg(struct virtchnl_version_info *ver, u32 v_opcode,
else
err_msg_format = true;
break;
- case VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP:
+ case VIRTCHNL_OP_RELEASE_RDMA_IRQ_MAP:
break;
- case VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP:
- valid_len = sizeof(struct virtchnl_iwarp_qvlist_info);
+ case VIRTCHNL_OP_CONFIG_RDMA_IRQ_MAP:
+ valid_len = virtchnl_rdma_qvlist_info_LEGACY_SIZEOF;
if (msglen >= valid_len) {
- struct virtchnl_iwarp_qvlist_info *qv =
- (struct virtchnl_iwarp_qvlist_info *)msg;
- if (qv->num_vectors == 0) {
- err_msg_format = true;
- break;
- }
- valid_len += ((qv->num_vectors - 1) *
- sizeof(struct virtchnl_iwarp_qv_info));
+ struct virtchnl_rdma_qvlist_info *qv =
+ (struct virtchnl_rdma_qvlist_info *)msg;
+
+ valid_len = virtchnl_struct_size(qv, qv_info,
+ qv->num_vectors);
}
break;
case VIRTCHNL_OP_CONFIG_RSS_KEY:
- valid_len = sizeof(struct virtchnl_rss_key);
+ valid_len = virtchnl_rss_key_LEGACY_SIZEOF;
if (msglen >= valid_len) {
struct virtchnl_rss_key *vrk =
(struct virtchnl_rss_key *)msg;
- valid_len += vrk->key_len - 1;
+ valid_len = virtchnl_struct_size(vrk, key,
+ vrk->key_len);
}
break;
case VIRTCHNL_OP_CONFIG_RSS_LUT:
- valid_len = sizeof(struct virtchnl_rss_lut);
+ valid_len = virtchnl_rss_lut_LEGACY_SIZEOF;
if (msglen >= valid_len) {
struct virtchnl_rss_lut *vrl =
(struct virtchnl_rss_lut *)msg;
- valid_len += vrl->lut_entries - 1;
+ valid_len = virtchnl_struct_size(vrl, lut,
+ vrl->lut_entries);
+ }
+ break;
+ case VIRTCHNL_OP_CONFIG_RSS_HFUNC:
+ valid_len = sizeof(struct virtchnl_rss_hfunc);
+ break;
+ case VIRTCHNL_OP_GET_RSS_HASHCFG_CAPS:
+ break;
+ case VIRTCHNL_OP_SET_RSS_HASHCFG:
+ valid_len = sizeof(struct virtchnl_rss_hashcfg);
+ break;
+ case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING:
+ case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING:
+ break;
+ case VIRTCHNL_OP_REQUEST_QUEUES:
+ valid_len = sizeof(struct virtchnl_vf_res_request);
+ break;
+ case VIRTCHNL_OP_ENABLE_CHANNELS:
+ valid_len = virtchnl_tc_info_LEGACY_SIZEOF;
+ if (msglen >= valid_len) {
+ struct virtchnl_tc_info *vti =
+ (struct virtchnl_tc_info *)msg;
+ valid_len = virtchnl_struct_size(vti, list,
+ vti->num_tc);
+ if (vti->num_tc == 0)
+ err_msg_format = true;
+ }
+ break;
+ case VIRTCHNL_OP_DISABLE_CHANNELS:
+ break;
+ case VIRTCHNL_OP_ADD_CLOUD_FILTER:
+ case VIRTCHNL_OP_DEL_CLOUD_FILTER:
+ valid_len = sizeof(struct virtchnl_filter);
+ break;
+ case VIRTCHNL_OP_GET_SUPPORTED_RXDIDS:
+ break;
+ case VIRTCHNL_OP_ADD_RSS_CFG:
+ case VIRTCHNL_OP_DEL_RSS_CFG:
+ valid_len = sizeof(struct virtchnl_rss_cfg);
+ break;
+ case VIRTCHNL_OP_ADD_FDIR_FILTER:
+ valid_len = sizeof(struct virtchnl_fdir_add);
+ break;
+ case VIRTCHNL_OP_DEL_FDIR_FILTER:
+ valid_len = sizeof(struct virtchnl_fdir_del);
+ break;
+ case VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS:
+ break;
+ case VIRTCHNL_OP_ADD_VLAN_V2:
+ case VIRTCHNL_OP_DEL_VLAN_V2:
+ valid_len = virtchnl_vlan_filter_list_v2_LEGACY_SIZEOF;
+ if (msglen >= valid_len) {
+ struct virtchnl_vlan_filter_list_v2 *vfl =
+ (struct virtchnl_vlan_filter_list_v2 *)msg;
+
+ valid_len = virtchnl_struct_size(vfl, filters,
+ vfl->num_elements);
+
+ if (vfl->num_elements == 0) {
+ err_msg_format = true;
+ break;
+ }
+ }
+ break;
+ case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2:
+ case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING_V2:
+ case VIRTCHNL_OP_ENABLE_VLAN_INSERTION_V2:
+ case VIRTCHNL_OP_DISABLE_VLAN_INSERTION_V2:
+ valid_len = sizeof(struct virtchnl_vlan_setting);
+ break;
+ case VIRTCHNL_OP_GET_QOS_CAPS:
+ break;
+ case VIRTCHNL_OP_CONFIG_QUEUE_BW:
+ valid_len = virtchnl_queues_bw_cfg_LEGACY_SIZEOF;
+ if (msglen >= valid_len) {
+ struct virtchnl_queues_bw_cfg *q_bw =
+ (struct virtchnl_queues_bw_cfg *)msg;
+
+ valid_len = virtchnl_struct_size(q_bw, cfg,
+ q_bw->num_queues);
+ if (q_bw->num_queues == 0) {
+ err_msg_format = true;
+ break;
+ }
+ }
+ break;
+ case VIRTCHNL_OP_CONFIG_QUANTA:
+ valid_len = sizeof(struct virtchnl_quanta_cfg);
+ if (msglen >= valid_len) {
+ struct virtchnl_quanta_cfg *q_quanta =
+ (struct virtchnl_quanta_cfg *)msg;
+
+ if (q_quanta->quanta_size == 0 ||
+ q_quanta->queue_select.num_queues == 0) {
+ err_msg_format = true;
+ break;
+ }
}
break;
- case VIRTCHNL_OP_GET_RSS_HENA_CAPS:
+ case VIRTCHNL_OP_1588_PTP_GET_CAPS:
+ valid_len = sizeof(struct virtchnl_ptp_caps);
break;
- case VIRTCHNL_OP_SET_RSS_HENA:
- valid_len = sizeof(struct virtchnl_rss_hena);
+ case VIRTCHNL_OP_1588_PTP_GET_TIME:
+ valid_len = sizeof(struct virtchnl_phc_time);
break;
/* These are always errors coming from the VF. */
case VIRTCHNL_OP_EVENT:
case VIRTCHNL_OP_UNKNOWN:
default:
- return VIRTCHNL_ERR_PARAM;
+ return VIRTCHNL_STATUS_ERR_PARAM;
}
/* few more checks */
- if ((valid_len != msglen) || (err_msg_format))
+ if (err_msg_format || valid_len != msglen)
return VIRTCHNL_STATUS_ERR_OPCODE_MISMATCH;
return 0;
diff --git a/include/linux/b1pcmcia.h b/include/linux/b1pcmcia.h
deleted file mode 100644
index 12a867c6061e..000000000000
--- a/include/linux/b1pcmcia.h
+++ /dev/null
@@ -1,21 +0,0 @@
-/* $Id: b1pcmcia.h,v 1.1.8.2 2001/09/23 22:25:05 kai Exp $
- *
- * Exported functions of module b1pcmcia to be called by
- * avm_cs card services module.
- *
- * Copyright 1999 by Carsten Paeth (calle@calle.in-berlin.de)
- *
- * This software may be used and distributed according to the terms
- * of the GNU General Public License, incorporated herein by reference.
- *
- */
-
-#ifndef _B1PCMCIA_H_
-#define _B1PCMCIA_H_
-
-int b1pcmcia_addcard_b1(unsigned int port, unsigned irq);
-int b1pcmcia_addcard_m1(unsigned int port, unsigned irq);
-int b1pcmcia_addcard_m2(unsigned int port, unsigned irq);
-int b1pcmcia_delcard(unsigned int port, unsigned irq);
-
-#endif /* _B1PCMCIA_H_ */
diff --git a/include/linux/backing-dev-defs.h b/include/linux/backing-dev-defs.h
index 866c433e7d32..0217c1073735 100644
--- a/include/linux/backing-dev-defs.h
+++ b/include/linux/backing-dev-defs.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __LINUX_BACKING_DEV_DEFS_H
#define __LINUX_BACKING_DEV_DEFS_H
@@ -11,6 +12,7 @@
#include <linux/timer.h>
#include <linux/workqueue.h>
#include <linux/kref.h>
+#include <linux/refcount.h>
struct page;
struct device;
@@ -21,18 +23,11 @@ struct dentry;
*/
enum wb_state {
WB_registered, /* bdi_register() was done */
- WB_shutting_down, /* wb_shutdown() in progress */
WB_writeback_running, /* Writeback is in progress */
WB_has_dirty_io, /* Dirty inodes on ->b_{dirty|io|more_io} */
+ WB_start_all, /* nr_pages == 0 (all) work pending */
};
-enum wb_congested_state {
- WB_async_congested, /* The async (write) queue is getting full */
- WB_sync_congested, /* The sync queue is getting full */
-};
-
-typedef int (congested_fn)(void *, int);
-
enum wb_stat_item {
WB_RECLAIMABLE,
WB_WRITEBACK,
@@ -44,25 +39,49 @@ enum wb_stat_item {
#define WB_STAT_BATCH (8*(1+ilog2(nr_cpu_ids)))
/*
- * For cgroup writeback, multiple wb's may map to the same blkcg. Those
- * wb's can operate mostly independently but should share the congested
- * state. To facilitate such sharing, the congested state is tracked using
- * the following struct which is created on demand, indexed by blkcg ID on
- * its bdi, and refcounted.
+ * why some writeback work was initiated
*/
-struct bdi_writeback_congested {
- unsigned long state; /* WB_[a]sync_congested flags */
- atomic_t refcnt; /* nr of attached wb's and blkg */
+enum wb_reason {
+ WB_REASON_BACKGROUND,
+ WB_REASON_VMSCAN,
+ WB_REASON_SYNC,
+ WB_REASON_PERIODIC,
+ WB_REASON_LAPTOP_TIMER,
+ WB_REASON_FS_FREE_SPACE,
+ /*
+ * There is no bdi forker thread any more and works are done
+ * by emergency worker, however, this is TPs userland visible
+ * and we'll be exposing exactly the same information,
+ * so it has a mismatch name.
+ */
+ WB_REASON_FORKER_THREAD,
+ WB_REASON_FOREIGN_FLUSH,
-#ifdef CONFIG_CGROUP_WRITEBACK
- struct backing_dev_info *__bdi; /* the associated bdi, set to NULL
- * on bdi unregistration. For memcg-wb
- * internal use only! */
- int blkcg_id; /* ID of the associated blkcg */
- struct rb_node rb_node; /* on bdi->cgwb_congestion_tree */
-#endif
+ WB_REASON_MAX,
+};
+
+struct wb_completion {
+ atomic_t cnt;
+ wait_queue_head_t *waitq;
+ unsigned long progress_stamp; /* The jiffies when slow progress is detected */
+ unsigned long wait_start; /* The jiffies when waiting for the writeback work to finish */
};
+#define __WB_COMPLETION_INIT(_waitq) \
+ (struct wb_completion){ .cnt = ATOMIC_INIT(1), .waitq = (_waitq) }
+
+/*
+ * If one wants to wait for one or more wb_writeback_works, each work's
+ * ->done should be set to a wb_completion defined using the following
+ * macro. Once all work items are issued with wb_queue_work(), the caller
+ * can wait for the completion of all using wb_wait_for_completion(). Work
+ * items which are waited upon aren't freed automatically on completion.
+ */
+#define WB_COMPLETION_INIT(bdi) __WB_COMPLETION_INIT(&(bdi)->wb_waitq)
+
+#define DEFINE_WB_COMPLETION(cmpl, bdi) \
+ struct wb_completion cmpl = WB_COMPLETION_INIT(bdi)
+
/*
* Each wb (bdi_writeback) can perform writeback operations, is measured
* and throttled, independently. Without cgroup writeback, each bdi
@@ -81,6 +100,9 @@ struct bdi_writeback_congested {
* change as blkcg is disabled and enabled higher up in the hierarchy, a wb
* is tested for blkcg after lookup and removed from index on mismatch so
* that a new wb for the combination can be created.
+ *
+ * Each bdi_writeback that is not embedded into the backing_dev_info must hold
+ * a reference to the parent backing_dev_info. See cgwb_create() for details.
*/
struct bdi_writeback {
struct backing_dev_info *bdi; /* our parent bdi */
@@ -94,10 +116,9 @@ struct bdi_writeback {
struct list_head b_dirty_time; /* time stamps are dirty */
spinlock_t list_lock; /* protects the b_* lists */
+ atomic_t writeback_inodes; /* number of inodes under writeback */
struct percpu_counter stat[NR_WB_STAT_ITEMS];
- struct bdi_writeback_congested *congested;
-
unsigned long bw_time_stamp; /* last time write bw is updated */
unsigned long dirtied_stamp;
unsigned long written_stamp; /* pages written at bw_time_stamp */
@@ -115,12 +136,12 @@ struct bdi_writeback {
struct fprop_local_percpu completions;
int dirty_exceeded;
+ enum wb_reason start_all_reason;
spinlock_t work_lock; /* protects work_list & dwork scheduling */
struct list_head work_list;
struct delayed_work dwork; /* work item used for writeback */
-
- unsigned long dirty_sleep; /* last wait */
+ struct delayed_work bw_dwork; /* work item used for bandwidth estimate */
struct list_head bdi_node; /* anchored at bdi->wb_list */
@@ -131,6 +152,12 @@ struct bdi_writeback {
struct cgroup_subsys_state *blkcg_css; /* and blkcg */
struct list_head memcg_node; /* anchored at memcg->cgwb_list */
struct list_head blkcg_node; /* anchored at blkcg->cgwb_list */
+ struct list_head b_attached; /* attached inodes, protected by list_lock */
+ struct list_head offline_node; /* anchored at offline_cgwbs */
+ struct work_struct switch_work; /* work used to perform inode switching
+ * to this wb */
+ struct llist_head switch_wbs_ctxs; /* queued contexts for
+ * writeback switching */
union {
struct work_struct release_work;
@@ -140,13 +167,13 @@ struct bdi_writeback {
};
struct backing_dev_info {
+ u64 id;
+ struct rb_node rb_node; /* keyed by ->id */
struct list_head bdi_list;
- unsigned long ra_pages; /* max readahead in PAGE_SIZE units */
- unsigned long io_pages; /* max allowed IO size */
- congested_fn *congested_fn; /* Function pointer if device is md/dm */
- void *congested_data; /* Pointer to aux data for congested func */
+ /* max readahead in PAGE_SIZE units */
+ unsigned long __data_racy ra_pages;
- const char *name;
+ unsigned long io_pages; /* max allowed IO size */
struct kref refcnt; /* Reference counter for the structure */
unsigned int capabilities; /* Device capabilities */
@@ -158,46 +185,37 @@ struct backing_dev_info {
* any dirty wbs, which is depended upon by bdi_has_dirty().
*/
atomic_long_t tot_write_bandwidth;
+ /*
+ * Jiffies when last process was dirty throttled on this bdi. Used by
+ * blk-wbt.
+ */
+ unsigned long last_bdp_sleep;
struct bdi_writeback wb; /* the root writeback info for this bdi */
struct list_head wb_list; /* list of all wbs */
#ifdef CONFIG_CGROUP_WRITEBACK
struct radix_tree_root cgwb_tree; /* radix tree of active cgroup wbs */
- struct rb_root cgwb_congested_tree; /* their congested states */
-#else
- struct bdi_writeback_congested *wb_congested;
+ struct mutex cgwb_release_mutex; /* protect shutdown of wb structs */
+ struct rw_semaphore wb_switch_rwsem; /* no cgwb switch while syncing */
#endif
wait_queue_head_t wb_waitq;
struct device *dev;
+ char dev_name[64];
struct device *owner;
struct timer_list laptop_mode_wb_timer;
#ifdef CONFIG_DEBUG_FS
struct dentry *debug_dir;
- struct dentry *debug_stats;
#endif
};
-enum {
- BLK_RW_ASYNC = 0,
- BLK_RW_SYNC = 1,
+struct wb_lock_cookie {
+ bool locked;
+ unsigned long flags;
};
-void clear_wb_congested(struct bdi_writeback_congested *congested, int sync);
-void set_wb_congested(struct bdi_writeback_congested *congested, int sync);
-
-static inline void clear_bdi_congested(struct backing_dev_info *bdi, int sync)
-{
- clear_wb_congested(bdi->wb.congested, sync);
-}
-
-static inline void set_bdi_congested(struct backing_dev_info *bdi, int sync)
-{
- set_wb_congested(bdi->wb.congested, sync);
-}
-
#ifdef CONFIG_CGROUP_WRITEBACK
/**
@@ -224,11 +242,29 @@ static inline void wb_get(struct bdi_writeback *wb)
/**
* wb_put - decrement a wb's refcount
* @wb: bdi_writeback to put
+ * @nr: number of references to put
*/
-static inline void wb_put(struct bdi_writeback *wb)
+static inline void wb_put_many(struct bdi_writeback *wb, unsigned long nr)
{
+ if (WARN_ON_ONCE(!wb->bdi)) {
+ /*
+ * A driver bug might cause a file to be removed before bdi was
+ * initialized.
+ */
+ return;
+ }
+
if (wb != &wb->bdi->wb)
- percpu_ref_put(&wb->refcnt);
+ percpu_ref_put_many(&wb->refcnt, nr);
+}
+
+/**
+ * wb_put - decrement a wb's refcount
+ * @wb: bdi_writeback to put
+ */
+static inline void wb_put(struct bdi_writeback *wb)
+{
+ wb_put_many(wb, 1);
}
/**
@@ -257,6 +293,10 @@ static inline void wb_put(struct bdi_writeback *wb)
{
}
+static inline void wb_put_many(struct bdi_writeback *wb, unsigned long nr)
+{
+}
+
static inline bool wb_dying(struct bdi_writeback *wb)
{
return false;
diff --git a/include/linux/backing-dev.h b/include/linux/backing-dev.h
index 854e1bdd0b2a..0c8342747cab 100644
--- a/include/linux/backing-dev.h
+++ b/include/linux/backing-dev.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* include/linux/backing-dev.h
*
@@ -11,9 +12,8 @@
#include <linux/kernel.h>
#include <linux/fs.h>
#include <linux/sched.h>
-#include <linux/blkdev.h>
+#include <linux/device.h>
#include <linux/writeback.h>
-#include <linux/blk-cgroup.h>
#include <linux/backing-dev-defs.h>
#include <linux/slab.h>
@@ -23,26 +23,23 @@ static inline struct backing_dev_info *bdi_get(struct backing_dev_info *bdi)
return bdi;
}
+struct backing_dev_info *bdi_get_by_id(u64 id);
void bdi_put(struct backing_dev_info *bdi);
__printf(2, 3)
int bdi_register(struct backing_dev_info *bdi, const char *fmt, ...);
+__printf(2, 0)
int bdi_register_va(struct backing_dev_info *bdi, const char *fmt,
va_list args);
-int bdi_register_owner(struct backing_dev_info *bdi, struct device *owner);
+void bdi_set_owner(struct backing_dev_info *bdi, struct device *owner);
void bdi_unregister(struct backing_dev_info *bdi);
-struct backing_dev_info *bdi_alloc_node(gfp_t gfp_mask, int node_id);
-static inline struct backing_dev_info *bdi_alloc(gfp_t gfp_mask)
-{
- return bdi_alloc_node(gfp_mask, NUMA_NO_NODE);
-}
+struct backing_dev_info *bdi_alloc(int node_id);
-void wb_start_writeback(struct bdi_writeback *wb, long nr_pages,
- bool range_cyclic, enum wb_reason reason);
void wb_start_background_writeback(struct bdi_writeback *wb);
void wb_workfn(struct work_struct *work);
-void wb_wakeup_delayed(struct bdi_writeback *wb);
+
+void wb_wait_for_completion(struct wb_completion *done);
extern spinlock_t bdi_lock;
extern struct list_head bdi_list;
@@ -63,22 +60,12 @@ static inline bool bdi_has_dirty_io(struct backing_dev_info *bdi)
return atomic_long_read(&bdi->tot_write_bandwidth);
}
-static inline void __add_wb_stat(struct bdi_writeback *wb,
+static inline void wb_stat_mod(struct bdi_writeback *wb,
enum wb_stat_item item, s64 amount)
{
percpu_counter_add_batch(&wb->stat[item], amount, WB_STAT_BATCH);
}
-static inline void inc_wb_stat(struct bdi_writeback *wb, enum wb_stat_item item)
-{
- __add_wb_stat(wb, item, 1);
-}
-
-static inline void dec_wb_stat(struct bdi_writeback *wb, enum wb_stat_item item)
-{
- __add_wb_stat(wb, item, -1);
-}
-
static inline s64 wb_stat(struct bdi_writeback *wb, enum wb_stat_item item)
{
return percpu_counter_read_positive(&wb->stat[item]);
@@ -94,7 +81,7 @@ extern void wb_writeout_inc(struct bdi_writeback *wb);
/*
* maximal error of a stat counter.
*/
-static inline unsigned long wb_stat_error(struct bdi_writeback *wb)
+static inline unsigned long wb_stat_error(void)
{
#ifdef CONFIG_SMP
return nr_cpu_ids * WB_STAT_BATCH;
@@ -103,39 +90,33 @@ static inline unsigned long wb_stat_error(struct bdi_writeback *wb)
#endif
}
+/* BDI ratio is expressed as part per 1000000 for finer granularity. */
+#define BDI_RATIO_SCALE 10000
+
+u64 bdi_get_min_bytes(struct backing_dev_info *bdi);
+u64 bdi_get_max_bytes(struct backing_dev_info *bdi);
int bdi_set_min_ratio(struct backing_dev_info *bdi, unsigned int min_ratio);
int bdi_set_max_ratio(struct backing_dev_info *bdi, unsigned int max_ratio);
+int bdi_set_min_ratio_no_scale(struct backing_dev_info *bdi, unsigned int min_ratio);
+int bdi_set_max_ratio_no_scale(struct backing_dev_info *bdi, unsigned int max_ratio);
+int bdi_set_min_bytes(struct backing_dev_info *bdi, u64 min_bytes);
+int bdi_set_max_bytes(struct backing_dev_info *bdi, u64 max_bytes);
+int bdi_set_strict_limit(struct backing_dev_info *bdi, unsigned int strict_limit);
/*
* Flags in backing_dev_info::capability
*
- * The first three flags control whether dirty pages will contribute to the
- * VM's accounting and whether writepages() should be called for dirty pages
- * (something that would not, for example, be appropriate for ramfs)
- *
- * WARNING: these flags are closely related and should not normally be
- * used separately. The BDI_CAP_NO_ACCT_AND_WRITEBACK combines these
- * three flags into a single convenience macro.
- *
- * BDI_CAP_NO_ACCT_DIRTY: Dirty pages shouldn't contribute to accounting
- * BDI_CAP_NO_WRITEBACK: Don't write pages back
- * BDI_CAP_NO_ACCT_WB: Don't automatically account writeback pages
- * BDI_CAP_STRICTLIMIT: Keep number of dirty pages below bdi threshold.
- *
- * BDI_CAP_CGROUP_WRITEBACK: Supports cgroup-aware writeback.
+ * BDI_CAP_WRITEBACK: Supports dirty page writeback, and dirty pages
+ * should contribute to accounting
+ * BDI_CAP_STRICTLIMIT: Keep number of dirty pages below bdi threshold
*/
-#define BDI_CAP_NO_ACCT_DIRTY 0x00000001
-#define BDI_CAP_NO_WRITEBACK 0x00000002
-#define BDI_CAP_NO_ACCT_WB 0x00000004
-#define BDI_CAP_STABLE_WRITES 0x00000008
-#define BDI_CAP_STRICTLIMIT 0x00000010
-#define BDI_CAP_CGROUP_WRITEBACK 0x00000020
-
-#define BDI_CAP_NO_ACCT_AND_WRITEBACK \
- (BDI_CAP_NO_WRITEBACK | BDI_CAP_NO_ACCT_DIRTY | BDI_CAP_NO_ACCT_WB)
+#define BDI_CAP_WRITEBACK (1 << 0)
+#define BDI_CAP_STRICTLIMIT (1 << 1)
extern struct backing_dev_info noop_backing_dev_info;
+int bdi_init(struct backing_dev_info *bdi);
+
/**
* writeback_in_progress - determine whether there is writeback in progress
* @wb: bdi_writeback of interest
@@ -148,92 +129,30 @@ static inline bool writeback_in_progress(struct bdi_writeback *wb)
return test_bit(WB_writeback_running, &wb->state);
}
-static inline struct backing_dev_info *inode_to_bdi(struct inode *inode)
-{
- struct super_block *sb;
-
- if (!inode)
- return &noop_backing_dev_info;
-
- sb = inode->i_sb;
-#ifdef CONFIG_BLOCK
- if (sb_is_blkdev_sb(sb))
- return I_BDEV(inode)->bd_bdi;
-#endif
- return sb->s_bdi;
-}
-
-static inline int wb_congested(struct bdi_writeback *wb, int cong_bits)
-{
- struct backing_dev_info *bdi = wb->bdi;
-
- if (bdi->congested_fn)
- return bdi->congested_fn(bdi->congested_data, cong_bits);
- return wb->congested->state & cong_bits;
-}
-
-long congestion_wait(int sync, long timeout);
-long wait_iff_congested(struct pglist_data *pgdat, int sync, long timeout);
-int pdflush_proc_obsolete(struct ctl_table *table, int write,
- void __user *buffer, size_t *lenp, loff_t *ppos);
-
-static inline bool bdi_cap_stable_pages_required(struct backing_dev_info *bdi)
-{
- return bdi->capabilities & BDI_CAP_STABLE_WRITES;
-}
-
-static inline bool bdi_cap_writeback_dirty(struct backing_dev_info *bdi)
-{
- return !(bdi->capabilities & BDI_CAP_NO_WRITEBACK);
-}
-
-static inline bool bdi_cap_account_dirty(struct backing_dev_info *bdi)
-{
- return !(bdi->capabilities & BDI_CAP_NO_ACCT_DIRTY);
-}
+struct backing_dev_info *inode_to_bdi(struct inode *inode);
-static inline bool bdi_cap_account_writeback(struct backing_dev_info *bdi)
+static inline bool mapping_can_writeback(struct address_space *mapping)
{
- /* Paranoia: BDI_CAP_NO_WRITEBACK implies BDI_CAP_NO_ACCT_WB */
- return !(bdi->capabilities & (BDI_CAP_NO_ACCT_WB |
- BDI_CAP_NO_WRITEBACK));
-}
-
-static inline bool mapping_cap_writeback_dirty(struct address_space *mapping)
-{
- return bdi_cap_writeback_dirty(inode_to_bdi(mapping->host));
-}
-
-static inline bool mapping_cap_account_dirty(struct address_space *mapping)
-{
- return bdi_cap_account_dirty(inode_to_bdi(mapping->host));
-}
-
-static inline int bdi_sched_wait(void *word)
-{
- schedule();
- return 0;
+ return inode_to_bdi(mapping->host)->capabilities & BDI_CAP_WRITEBACK;
}
#ifdef CONFIG_CGROUP_WRITEBACK
-struct bdi_writeback_congested *
-wb_congested_get_create(struct backing_dev_info *bdi, int blkcg_id, gfp_t gfp);
-void wb_congested_put(struct bdi_writeback_congested *congested);
+struct bdi_writeback *wb_get_lookup(struct backing_dev_info *bdi,
+ struct cgroup_subsys_state *memcg_css);
struct bdi_writeback *wb_get_create(struct backing_dev_info *bdi,
struct cgroup_subsys_state *memcg_css,
gfp_t gfp);
void wb_memcg_offline(struct mem_cgroup *memcg);
-void wb_blkcg_offline(struct blkcg *blkcg);
-int inode_congested(struct inode *inode, int cong_bits);
+void wb_blkcg_offline(struct cgroup_subsys_state *css);
/**
* inode_cgwb_enabled - test whether cgroup writeback is enabled on an inode
* @inode: inode of interest
*
- * cgroup writeback requires support from both the bdi and filesystem.
- * Also, both memcg and iocg have to be on the default hierarchy. Test
- * whether all conditions are met.
+ * Cgroup writeback requires support from the filesystem. Also, both memcg and
+ * iocg have to be on the default hierarchy. Test whether all conditions are
+ * met.
*
* Note that the test result may change dynamically on the same inode
* depending on how memcg and iocg are configured.
@@ -244,8 +163,7 @@ static inline bool inode_cgwb_enabled(struct inode *inode)
return cgroup_subsys_on_dfl(memory_cgrp_subsys) &&
cgroup_subsys_on_dfl(io_cgrp_subsys) &&
- bdi_cap_account_dirty(bdi) &&
- (bdi->capabilities & BDI_CAP_CGROUP_WRITEBACK) &&
+ (bdi->capabilities & BDI_CAP_WRITEBACK) &&
(inode->i_sb->s_iflags & SB_I_CGROUPWB);
}
@@ -308,68 +226,69 @@ wb_get_create_current(struct backing_dev_info *bdi, gfp_t gfp)
}
/**
- * inode_to_wb_is_valid - test whether an inode has a wb associated
- * @inode: inode of interest
- *
- * Returns %true if @inode has a wb associated. May be called without any
- * locking.
- */
-static inline bool inode_to_wb_is_valid(struct inode *inode)
-{
- return inode->i_wb;
-}
-
-/**
* inode_to_wb - determine the wb of an inode
* @inode: inode of interest
*
* Returns the wb @inode is currently associated with. The caller must be
- * holding either @inode->i_lock, @inode->i_mapping->tree_lock, or the
+ * holding either @inode->i_lock, the i_pages lock, or the
* associated wb's list_lock.
*/
-static inline struct bdi_writeback *inode_to_wb(struct inode *inode)
+static inline struct bdi_writeback *inode_to_wb(const struct inode *inode)
{
#ifdef CONFIG_LOCKDEP
WARN_ON_ONCE(debug_locks &&
+ (inode->i_sb->s_iflags & SB_I_CGROUPWB) &&
(!lockdep_is_held(&inode->i_lock) &&
- !lockdep_is_held(&inode->i_mapping->tree_lock) &&
+ !lockdep_is_held(&inode->i_mapping->i_pages.xa_lock) &&
!lockdep_is_held(&inode->i_wb->list_lock)));
#endif
return inode->i_wb;
}
+static inline struct bdi_writeback *inode_to_wb_wbc(
+ struct inode *inode,
+ struct writeback_control *wbc)
+{
+ /*
+ * If wbc does not have inode attached, it means cgroup writeback was
+ * disabled when wbc started. Just use the default wb in that case.
+ */
+ return wbc->wb ? wbc->wb : &inode_to_bdi(inode)->wb;
+}
+
/**
* unlocked_inode_to_wb_begin - begin unlocked inode wb access transaction
* @inode: target inode
- * @lockedp: temp bool output param, to be passed to the end function
+ * @cookie: output param, to be passed to the end function
*
* The caller wants to access the wb associated with @inode but isn't
- * holding inode->i_lock, mapping->tree_lock or wb->list_lock. This
+ * holding inode->i_lock, the i_pages lock or wb->list_lock. This
* function determines the wb associated with @inode and ensures that the
* association doesn't change until the transaction is finished with
* unlocked_inode_to_wb_end().
*
- * The caller must call unlocked_inode_to_wb_end() with *@lockdep
- * afterwards and can't sleep during transaction. IRQ may or may not be
- * disabled on return.
+ * The caller must call unlocked_inode_to_wb_end() with *@cookie afterwards and
+ * can't sleep during the transaction. IRQs may or may not be disabled on
+ * return.
*/
static inline struct bdi_writeback *
-unlocked_inode_to_wb_begin(struct inode *inode, bool *lockedp)
+unlocked_inode_to_wb_begin(struct inode *inode, struct wb_lock_cookie *cookie)
{
rcu_read_lock();
/*
- * Paired with store_release in inode_switch_wb_work_fn() and
+ * Paired with a release fence in inode_do_switch_wbs() and
* ensures that we see the new wb if we see cleared I_WB_SWITCH.
*/
- *lockedp = smp_load_acquire(&inode->i_state) & I_WB_SWITCH;
+ cookie->locked = inode_state_read_once(inode) & I_WB_SWITCH;
+ smp_rmb();
- if (unlikely(*lockedp))
- spin_lock_irq(&inode->i_mapping->tree_lock);
+ if (unlikely(cookie->locked))
+ xa_lock_irqsave(&inode->i_mapping->i_pages, cookie->flags);
/*
- * Protected by either !I_WB_SWITCH + rcu_read_lock() or tree_lock.
- * inode_to_wb() will bark. Deref directly.
+ * Protected by either !I_WB_SWITCH + rcu_read_lock() or the i_pages
+ * lock. inode_to_wb() will bark. Deref directly.
*/
return inode->i_wb;
}
@@ -377,12 +296,13 @@ unlocked_inode_to_wb_begin(struct inode *inode, bool *lockedp)
/**
* unlocked_inode_to_wb_end - end inode wb access transaction
* @inode: target inode
- * @locked: *@lockedp from unlocked_inode_to_wb_begin()
+ * @cookie: @cookie from unlocked_inode_to_wb_begin()
*/
-static inline void unlocked_inode_to_wb_end(struct inode *inode, bool locked)
+static inline void unlocked_inode_to_wb_end(struct inode *inode,
+ struct wb_lock_cookie *cookie)
{
- if (unlikely(locked))
- spin_unlock_irq(&inode->i_mapping->tree_lock);
+ if (unlikely(cookie->locked))
+ xa_unlock_irqrestore(&inode->i_mapping->i_pages, cookie->flags);
rcu_read_unlock();
}
@@ -394,19 +314,6 @@ static inline bool inode_cgwb_enabled(struct inode *inode)
return false;
}
-static inline struct bdi_writeback_congested *
-wb_congested_get_create(struct backing_dev_info *bdi, int blkcg_id, gfp_t gfp)
-{
- atomic_inc(&bdi->wb_congested->refcnt);
- return bdi->wb_congested;
-}
-
-static inline void wb_congested_put(struct bdi_writeback_congested *congested)
-{
- if (atomic_dec_and_test(&congested->refcnt))
- kfree(congested);
-}
-
static inline struct bdi_writeback *wb_find_current(struct backing_dev_info *bdi)
{
return &bdi->wb;
@@ -418,76 +325,40 @@ wb_get_create_current(struct backing_dev_info *bdi, gfp_t gfp)
return &bdi->wb;
}
-static inline bool inode_to_wb_is_valid(struct inode *inode)
-{
- return true;
-}
-
static inline struct bdi_writeback *inode_to_wb(struct inode *inode)
{
return &inode_to_bdi(inode)->wb;
}
-static inline struct bdi_writeback *
-unlocked_inode_to_wb_begin(struct inode *inode, bool *lockedp)
+static inline struct bdi_writeback *inode_to_wb_wbc(
+ struct inode *inode,
+ struct writeback_control *wbc)
{
return inode_to_wb(inode);
}
-static inline void unlocked_inode_to_wb_end(struct inode *inode, bool locked)
-{
-}
-
-static inline void wb_memcg_offline(struct mem_cgroup *memcg)
-{
-}
-
-static inline void wb_blkcg_offline(struct blkcg *blkcg)
-{
-}
-
-static inline int inode_congested(struct inode *inode, int cong_bits)
-{
- return wb_congested(&inode_to_bdi(inode)->wb, cong_bits);
-}
-
-#endif /* CONFIG_CGROUP_WRITEBACK */
-static inline int inode_read_congested(struct inode *inode)
-{
- return inode_congested(inode, 1 << WB_sync_congested);
-}
-
-static inline int inode_write_congested(struct inode *inode)
+static inline struct bdi_writeback *
+unlocked_inode_to_wb_begin(struct inode *inode, struct wb_lock_cookie *cookie)
{
- return inode_congested(inode, 1 << WB_async_congested);
+ return inode_to_wb(inode);
}
-static inline int inode_rw_congested(struct inode *inode)
+static inline void unlocked_inode_to_wb_end(struct inode *inode,
+ struct wb_lock_cookie *cookie)
{
- return inode_congested(inode, (1 << WB_sync_congested) |
- (1 << WB_async_congested));
}
-static inline int bdi_congested(struct backing_dev_info *bdi, int cong_bits)
+static inline void wb_memcg_offline(struct mem_cgroup *memcg)
{
- return wb_congested(&bdi->wb, cong_bits);
}
-static inline int bdi_read_congested(struct backing_dev_info *bdi)
+static inline void wb_blkcg_offline(struct cgroup_subsys_state *css)
{
- return bdi_congested(bdi, 1 << WB_sync_congested);
}
-static inline int bdi_write_congested(struct backing_dev_info *bdi)
-{
- return bdi_congested(bdi, 1 << WB_async_congested);
-}
+#endif /* CONFIG_CGROUP_WRITEBACK */
-static inline int bdi_rw_congested(struct backing_dev_info *bdi)
-{
- return bdi_congested(bdi, (1 << WB_sync_congested) |
- (1 << WB_async_congested));
-}
+const char *bdi_dev_name(struct backing_dev_info *bdi);
#endif /* _LINUX_BACKING_DEV_H */
diff --git a/include/linux/backing-file.h b/include/linux/backing-file.h
new file mode 100644
index 000000000000..1476a6ed1bfd
--- /dev/null
+++ b/include/linux/backing-file.h
@@ -0,0 +1,44 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Common helpers for stackable filesystems and backing files.
+ *
+ * Copyright (C) 2023 CTERA Networks.
+ */
+
+#ifndef _LINUX_BACKING_FILE_H
+#define _LINUX_BACKING_FILE_H
+
+#include <linux/file.h>
+#include <linux/uio.h>
+#include <linux/fs.h>
+
+struct backing_file_ctx {
+ const struct cred *cred;
+ void (*accessed)(struct file *file);
+ void (*end_write)(struct kiocb *iocb, ssize_t);
+};
+
+struct file *backing_file_open(const struct path *user_path, int flags,
+ const struct path *real_path,
+ const struct cred *cred);
+struct file *backing_tmpfile_open(const struct path *user_path, int flags,
+ const struct path *real_parentpath,
+ umode_t mode, const struct cred *cred);
+ssize_t backing_file_read_iter(struct file *file, struct iov_iter *iter,
+ struct kiocb *iocb, int flags,
+ struct backing_file_ctx *ctx);
+ssize_t backing_file_write_iter(struct file *file, struct iov_iter *iter,
+ struct kiocb *iocb, int flags,
+ struct backing_file_ctx *ctx);
+ssize_t backing_file_splice_read(struct file *in, struct kiocb *iocb,
+ struct pipe_inode_info *pipe, size_t len,
+ unsigned int flags,
+ struct backing_file_ctx *ctx);
+ssize_t backing_file_splice_write(struct pipe_inode_info *pipe,
+ struct file *out, struct kiocb *iocb,
+ size_t len, unsigned int flags,
+ struct backing_file_ctx *ctx);
+int backing_file_mmap(struct file *file, struct vm_area_struct *vma,
+ struct backing_file_ctx *ctx);
+
+#endif /* _LINUX_BACKING_FILE_H */
diff --git a/include/linux/backlight.h b/include/linux/backlight.h
index 5f2fd61ef4fb..f29a9ef1052e 100644
--- a/include/linux/backlight.h
+++ b/include/linux/backlight.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Backlight Lowlevel Control Abstraction
*
@@ -9,114 +10,292 @@
#define _LINUX_BACKLIGHT_H
#include <linux/device.h>
-#include <linux/fb.h>
#include <linux/mutex.h>
-#include <linux/notifier.h>
+#include <linux/types.h>
-/* Notes on locking:
+/**
+ * enum backlight_update_reason - what method was used to update backlight
*
- * backlight_device->ops_lock is an internal backlight lock protecting the
- * ops pointer and no code outside the core should need to touch it.
- *
- * Access to update_status() is serialised by the update_lock mutex since
- * most drivers seem to need this and historically get it wrong.
- *
- * Most drivers don't need locking on their get_brightness() method.
- * If yours does, you need to implement it in the driver. You can use the
- * update_lock mutex if appropriate.
- *
- * Any other use of the locks below is probably wrong.
+ * A driver indicates the method (reason) used for updating the backlight
+ * when calling backlight_force_update().
*/
-
enum backlight_update_reason {
+ /**
+ * @BACKLIGHT_UPDATE_HOTKEY: The backlight was updated using a hot-key.
+ */
BACKLIGHT_UPDATE_HOTKEY,
+
+ /**
+ * @BACKLIGHT_UPDATE_SYSFS: The backlight was updated using sysfs.
+ */
BACKLIGHT_UPDATE_SYSFS,
};
+/**
+ * enum backlight_type - the type of backlight control
+ *
+ * The type of interface used to control the backlight.
+ */
enum backlight_type {
+ /**
+ * @BACKLIGHT_RAW:
+ *
+ * The backlight is controlled using hardware registers.
+ */
BACKLIGHT_RAW = 1,
+
+ /**
+ * @BACKLIGHT_PLATFORM:
+ *
+ * The backlight is controlled using a platform-specific interface.
+ */
BACKLIGHT_PLATFORM,
+
+ /**
+ * @BACKLIGHT_FIRMWARE:
+ *
+ * The backlight is controlled using a standard firmware interface.
+ */
BACKLIGHT_FIRMWARE,
+
+ /**
+ * @BACKLIGHT_TYPE_MAX: Number of entries.
+ */
BACKLIGHT_TYPE_MAX,
};
-enum backlight_notification {
- BACKLIGHT_REGISTERED,
- BACKLIGHT_UNREGISTERED,
+/** enum backlight_scale - the type of scale used for brightness values
+ *
+ * The type of scale used for brightness values.
+ */
+enum backlight_scale {
+ /**
+ * @BACKLIGHT_SCALE_UNKNOWN: The scale is unknown.
+ */
+ BACKLIGHT_SCALE_UNKNOWN = 0,
+
+ /**
+ * @BACKLIGHT_SCALE_LINEAR: The scale is linear.
+ *
+ * The linear scale will increase brightness the same for each step.
+ */
+ BACKLIGHT_SCALE_LINEAR,
+
+ /**
+ * @BACKLIGHT_SCALE_NON_LINEAR: The scale is not linear.
+ *
+ * This is often used when the brightness values tries to adjust to
+ * the relative perception of the eye demanding a non-linear scale.
+ */
+ BACKLIGHT_SCALE_NON_LINEAR,
};
struct backlight_device;
-struct fb_info;
+/**
+ * struct backlight_ops - backlight operations
+ *
+ * The backlight operations are specified when the backlight device is registered.
+ */
struct backlight_ops {
+ /**
+ * @options: Configure how operations are called from the core.
+ *
+ * The options parameter is used to adjust the behaviour of the core.
+ * Set BL_CORE_SUSPENDRESUME to get the update_status() operation called
+ * upon suspend and resume.
+ */
unsigned int options;
#define BL_CORE_SUSPENDRESUME (1 << 0)
- /* Notify the backlight driver some property has changed */
+ /**
+ * @update_status: Operation called when properties have changed.
+ *
+ * Notify the backlight driver some property has changed.
+ * The update_status operation is protected by the update_lock.
+ *
+ * The backlight driver is expected to use backlight_is_blank()
+ * to check if the display is blanked and set brightness accordingly.
+ * update_status() is called when any of the properties has changed.
+ *
+ * RETURNS:
+ *
+ * 0 on success, negative error code if any failure occurred.
+ */
int (*update_status)(struct backlight_device *);
- /* Return the current backlight brightness (accounting for power,
- fb_blank etc.) */
+
+ /**
+ * @get_brightness: Return the current backlight brightness.
+ *
+ * The driver may implement this as a readback from the HW.
+ * This operation is optional and if not present then the current
+ * brightness property value is used.
+ *
+ * RETURNS:
+ *
+ * A brightness value which is 0 or a positive number.
+ * On failure a negative error code is returned.
+ */
int (*get_brightness)(struct backlight_device *);
- /* Check if given framebuffer device is the one bound to this backlight;
- return 0 if not, !=0 if it is. If NULL, backlight always matches the fb. */
- int (*check_fb)(struct backlight_device *, struct fb_info *);
+
+ /**
+ * @controls_device: Check against the display device
+ *
+ * Check if the backlight controls the given display device. This
+ * operation is optional and if not implemented it is assumed that
+ * the display is always the one controlled by the backlight.
+ *
+ * RETURNS:
+ *
+ * If display_dev is NULL or display_dev matches the device controlled by
+ * the backlight, return true. Otherwise return false.
+ */
+ bool (*controls_device)(struct backlight_device *bd, struct device *display_dev);
};
-/* This structure defines all the properties of a backlight */
+/**
+ * struct backlight_properties - backlight properties
+ *
+ * This structure defines all the properties of a backlight.
+ */
struct backlight_properties {
- /* Current User requested brightness (0 - max_brightness) */
+ /**
+ * @brightness: The current brightness requested by the user.
+ *
+ * The backlight core makes sure the range is (0 to max_brightness)
+ * when the brightness is set via the sysfs attribute:
+ * /sys/class/backlight/<backlight>/brightness.
+ *
+ * This value can be set in the backlight_properties passed
+ * to devm_backlight_device_register() to set a default brightness
+ * value.
+ */
int brightness;
- /* Maximal value for brightness (read-only) */
+
+ /**
+ * @max_brightness: The maximum brightness value.
+ *
+ * This value must be set in the backlight_properties passed to
+ * devm_backlight_device_register() and shall not be modified by the
+ * driver after registration.
+ */
int max_brightness;
- /* Current FB Power mode (0: full on, 1..3: power saving
- modes; 4: full off), see FB_BLANK_XXX */
+
+ /**
+ * @power: The current power mode.
+ *
+ * User space can configure the power mode using the sysfs
+ * attribute: /sys/class/backlight/<backlight>/bl_power
+ * When the power property is updated update_status() is called.
+ *
+ * The possible values are: (0: full on, 4: full off), see
+ * BACKLIGHT_POWER constants.
+ *
+ * When the backlight device is enabled, @power is set to
+ * BACKLIGHT_POWER_ON. When the backlight device is disabled,
+ * @power is set to BACKLIGHT_POWER_OFF.
+ */
int power;
- /* FB Blanking active? (values as for power) */
- /* Due to be removed, please use (state & BL_CORE_FBBLANK) */
- int fb_blank;
- /* Backlight type */
+
+#define BACKLIGHT_POWER_ON (0)
+#define BACKLIGHT_POWER_OFF (4)
+#define BACKLIGHT_POWER_REDUCED (1) // deprecated; don't use in new code
+
+ /**
+ * @type: The type of backlight supported.
+ *
+ * The backlight type allows userspace to make appropriate
+ * policy decisions based on the backlight type.
+ *
+ * This value must be set in the backlight_properties
+ * passed to devm_backlight_device_register().
+ */
enum backlight_type type;
- /* Flags used to signal drivers of state changes */
- /* Upper 4 bits are reserved for driver internal use */
+
+ /**
+ * @state: The state of the backlight core.
+ *
+ * The state is a bitmask. BL_CORE_FBBLANK is set when the display
+ * is expected to be blank. BL_CORE_SUSPENDED is set when the
+ * driver is suspended.
+ *
+ * backlight drivers are expected to use backlight_is_blank()
+ * in their update_status() operation rather than reading the
+ * state property.
+ *
+ * The state is maintained by the core and drivers may not modify it.
+ */
unsigned int state;
#define BL_CORE_SUSPENDED (1 << 0) /* backlight is suspended */
#define BL_CORE_FBBLANK (1 << 1) /* backlight is under an fb blank event */
-#define BL_CORE_DRIVER4 (1 << 28) /* reserved for driver specific use */
-#define BL_CORE_DRIVER3 (1 << 29) /* reserved for driver specific use */
-#define BL_CORE_DRIVER2 (1 << 30) /* reserved for driver specific use */
-#define BL_CORE_DRIVER1 (1 << 31) /* reserved for driver specific use */
+ /**
+ * @scale: The type of the brightness scale.
+ */
+ enum backlight_scale scale;
};
+/**
+ * struct backlight_device - backlight device data
+ *
+ * This structure holds all data required by a backlight device.
+ */
struct backlight_device {
- /* Backlight properties */
+ /**
+ * @props: Backlight properties
+ */
struct backlight_properties props;
- /* Serialise access to update_status method */
+ /**
+ * @update_lock: The lock used when calling the update_status() operation.
+ *
+ * update_lock is an internal backlight lock that serialise access
+ * to the update_status() operation. The backlight core holds the update_lock
+ * when calling the update_status() operation. The update_lock shall not
+ * be used by backlight drivers.
+ */
struct mutex update_lock;
- /* This protects the 'ops' field. If 'ops' is NULL, the driver that
- registered this device has been unloaded, and if class_get_devdata()
- points to something in the body of that driver, it is also invalid. */
+ /**
+ * @ops_lock: The lock used around everything related to backlight_ops.
+ *
+ * ops_lock is an internal backlight lock that protects the ops pointer
+ * and is used around all accesses to ops and when the operations are
+ * invoked. The ops_lock shall not be used by backlight drivers.
+ */
struct mutex ops_lock;
- const struct backlight_ops *ops;
- /* The framebuffer notifier block */
- struct notifier_block fb_notif;
+ /**
+ * @ops: Pointer to the backlight operations.
+ *
+ * If ops is NULL, the driver that registered this device has been unloaded,
+ * and if class_get_devdata() points to something in the body of that driver,
+ * it is also invalid.
+ */
+ const struct backlight_ops *ops;
- /* list entry of all registered backlight devices */
+ /**
+ * @entry: List entry of all registered backlight devices
+ */
struct list_head entry;
+ /**
+ * @dev: Parent device.
+ */
struct device dev;
- /* Multiple framebuffers may share one backlight device */
- bool fb_bl_on[FB_MAX];
-
+ /**
+ * @use_count: The number of unblanked displays.
+ */
int use_count;
};
+/**
+ * backlight_update_status - force an update of the backlight device status
+ * @bd: the backlight device
+ */
static inline int backlight_update_status(struct backlight_device *bd)
{
int ret = -ENOENT;
@@ -129,39 +308,126 @@ static inline int backlight_update_status(struct backlight_device *bd)
return ret;
}
-extern struct backlight_device *backlight_device_register(const char *name,
- struct device *dev, void *devdata, const struct backlight_ops *ops,
- const struct backlight_properties *props);
-extern struct backlight_device *devm_backlight_device_register(
- struct device *dev, const char *name, struct device *parent,
- void *devdata, const struct backlight_ops *ops,
- const struct backlight_properties *props);
-extern void backlight_device_unregister(struct backlight_device *bd);
-extern void devm_backlight_device_unregister(struct device *dev,
- struct backlight_device *bd);
-extern void backlight_force_update(struct backlight_device *bd,
- enum backlight_update_reason reason);
-extern int backlight_register_notifier(struct notifier_block *nb);
-extern int backlight_unregister_notifier(struct notifier_block *nb);
-extern struct backlight_device *backlight_device_get_by_type(enum backlight_type type);
-extern int backlight_device_set_brightness(struct backlight_device *bd, unsigned long brightness);
+/**
+ * backlight_enable - Enable backlight
+ * @bd: the backlight device to enable
+ */
+static inline int backlight_enable(struct backlight_device *bd)
+{
+ if (!bd)
+ return 0;
+
+ bd->props.power = BACKLIGHT_POWER_ON;
+ bd->props.state &= ~BL_CORE_FBBLANK;
+
+ return backlight_update_status(bd);
+}
+
+/**
+ * backlight_disable - Disable backlight
+ * @bd: the backlight device to disable
+ */
+static inline int backlight_disable(struct backlight_device *bd)
+{
+ if (!bd)
+ return 0;
+
+ bd->props.power = BACKLIGHT_POWER_OFF;
+ bd->props.state |= BL_CORE_FBBLANK;
+
+ return backlight_update_status(bd);
+}
+
+/**
+ * backlight_is_blank - Return true if display is expected to be blank
+ * @bd: the backlight device
+ *
+ * Display is expected to be blank if any of these is true::
+ *
+ * 1) if power in not UNBLANK
+ * 2) if state indicate BLANK or SUSPENDED
+ *
+ * Returns true if display is expected to be blank, false otherwise.
+ */
+static inline bool backlight_is_blank(const struct backlight_device *bd)
+{
+ return bd->props.power != BACKLIGHT_POWER_ON ||
+ bd->props.state & (BL_CORE_SUSPENDED | BL_CORE_FBBLANK);
+}
+
+/**
+ * backlight_get_brightness - Returns the current brightness value
+ * @bd: the backlight device
+ *
+ * Returns the current brightness value, taking in consideration the current
+ * state. If backlight_is_blank() returns true then return 0 as brightness
+ * otherwise return the current brightness property value.
+ *
+ * Backlight drivers are expected to use this function in their update_status()
+ * operation to get the brightness value.
+ */
+static inline int backlight_get_brightness(const struct backlight_device *bd)
+{
+ if (backlight_is_blank(bd))
+ return 0;
+ else
+ return bd->props.brightness;
+}
+
+struct backlight_device *
+backlight_device_register(const char *name, struct device *dev, void *devdata,
+ const struct backlight_ops *ops,
+ const struct backlight_properties *props);
+struct backlight_device *
+devm_backlight_device_register(struct device *dev, const char *name,
+ struct device *parent, void *devdata,
+ const struct backlight_ops *ops,
+ const struct backlight_properties *props);
+void backlight_device_unregister(struct backlight_device *bd);
+void devm_backlight_device_unregister(struct device *dev,
+ struct backlight_device *bd);
+void backlight_force_update(struct backlight_device *bd,
+ enum backlight_update_reason reason);
+struct backlight_device *backlight_device_get_by_name(const char *name);
+struct backlight_device *backlight_device_get_by_type(enum backlight_type type);
+int backlight_device_set_brightness(struct backlight_device *bd,
+ unsigned long brightness);
+
+#if IS_REACHABLE(CONFIG_BACKLIGHT_CLASS_DEVICE)
+void backlight_notify_blank(struct backlight_device *bd,
+ struct device *display_dev,
+ bool fb_on, bool prev_fb_on);
+void backlight_notify_blank_all(struct device *display_dev,
+ bool fb_on, bool prev_fb_on);
+#else
+static inline void backlight_notify_blank(struct backlight_device *bd,
+ struct device *display_dev,
+ bool fb_on, bool prev_fb_on)
+{ }
+static inline void backlight_notify_blank_all(struct device *display_dev,
+ bool fb_on, bool prev_fb_on)
+{ }
+#endif
#define to_backlight_device(obj) container_of(obj, struct backlight_device, dev)
+/**
+ * bl_get_data - access devdata
+ * @bl_dev: pointer to backlight device
+ *
+ * When a backlight device is registered the driver has the possibility
+ * to supply a void * devdata. bl_get_data() return a pointer to the
+ * devdata.
+ *
+ * RETURNS:
+ *
+ * pointer to devdata stored while registering the backlight device.
+ */
static inline void * bl_get_data(struct backlight_device *bl_dev)
{
return dev_get_drvdata(&bl_dev->dev);
}
-struct generic_bl_info {
- const char *name;
- int max_intensity;
- int default_intensity;
- int limit_mask;
- void (*set_bl_intensity)(int intensity);
- void (*kick_battery)(void);
-};
-
#ifdef CONFIG_OF
struct backlight_device *of_find_backlight_by_node(struct device_node *node);
#else
@@ -172,4 +438,14 @@ of_find_backlight_by_node(struct device_node *node)
}
#endif
+#if IS_ENABLED(CONFIG_BACKLIGHT_CLASS_DEVICE)
+struct backlight_device *devm_of_find_backlight(struct device *dev);
+#else
+static inline struct backlight_device *
+devm_of_find_backlight(struct device *dev)
+{
+ return NULL;
+}
+#endif
+
#endif
diff --git a/include/linux/badblocks.h b/include/linux/badblocks.h
index c3bdf8c59480..996493917f36 100644
--- a/include/linux/badblocks.h
+++ b/include/linux/badblocks.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_BADBLOCKS_H
#define _LINUX_BADBLOCKS_H
@@ -14,6 +15,7 @@
#define BB_OFFSET(x) (((x) & BB_OFFSET_MASK) >> 9)
#define BB_LEN(x) (((x) & BB_LEN_MASK) + 1)
#define BB_ACK(x) (!!((x) & BB_ACK_MASK))
+#define BB_END(x) (BB_OFFSET(x) + BB_LEN(x))
#define BB_MAKE(a, l, ack) (((a)<<9) | ((l)-1) | ((u64)(!!(ack)) << 63))
/* Bad block numbers are stored sorted in a single page.
@@ -40,11 +42,17 @@ struct badblocks {
sector_t size; /* in sectors */
};
-int badblocks_check(struct badblocks *bb, sector_t s, int sectors,
- sector_t *first_bad, int *bad_sectors);
-int badblocks_set(struct badblocks *bb, sector_t s, int sectors,
- int acknowledged);
-int badblocks_clear(struct badblocks *bb, sector_t s, int sectors);
+struct badblocks_context {
+ sector_t start;
+ sector_t len;
+ int ack;
+};
+
+int badblocks_check(struct badblocks *bb, sector_t s, sector_t sectors,
+ sector_t *first_bad, sector_t *bad_sectors);
+bool badblocks_set(struct badblocks *bb, sector_t s, sector_t sectors,
+ int acknowledged);
+bool badblocks_clear(struct badblocks *bb, sector_t s, sector_t sectors);
void ack_all_badblocks(struct badblocks *bb);
ssize_t badblocks_show(struct badblocks *bb, char *page, int unack);
ssize_t badblocks_store(struct badblocks *bb, const char *page, size_t len,
@@ -62,4 +70,27 @@ static inline void devm_exit_badblocks(struct device *dev, struct badblocks *bb)
}
badblocks_exit(bb);
}
+
+static inline int badblocks_full(struct badblocks *bb)
+{
+ return (bb->count >= MAX_BADBLOCKS);
+}
+
+static inline int badblocks_empty(struct badblocks *bb)
+{
+ return (bb->count == 0);
+}
+
+static inline void set_changed(struct badblocks *bb)
+{
+ if (bb->changed != 1)
+ bb->changed = 1;
+}
+
+static inline void clear_changed(struct badblocks *bb)
+{
+ if (bb->changed != 0)
+ bb->changed = 0;
+}
+
#endif
diff --git a/include/linux/balloon_compaction.h b/include/linux/balloon_compaction.h
index 79542b2698ec..7cfe48769239 100644
--- a/include/linux/balloon_compaction.h
+++ b/include/linux/balloon_compaction.h
@@ -1,38 +1,30 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* include/linux/balloon_compaction.h
*
* Common interface definitions for making balloon pages movable by compaction.
*
- * Despite being perfectly possible to perform ballooned pages migration, they
- * make a special corner case to compaction scans because balloon pages are not
- * enlisted at any LRU list like the other pages we do compact / migrate.
+ * Balloon page migration makes use of the general "movable_ops page migration"
+ * feature.
+ *
+ * page->private is used to reference the responsible balloon device.
+ * That these pages have movable_ops, and which movable_ops apply,
+ * is derived from the page type (PageOffline()) combined with the
+ * PG_movable_ops flag (PageMovableOps()).
*
* As the page isolation scanning step a compaction thread does is a lockless
* procedure (from a page standpoint), it might bring some racy situations while
* performing balloon page compaction. In order to sort out these racy scenarios
* and safely perform balloon's page compaction and migration we must, always,
- * ensure following these three simple rules:
+ * ensure following these simple rules:
*
- * i. when updating a balloon's page ->mapping element, strictly do it under
- * the following lock order, independently of the far superior
- * locking scheme (lru_lock, balloon_lock):
+ * i. Setting the PG_movable_ops flag and page->private with the following
+ * lock order
* +-page_lock(page);
* +--spin_lock_irq(&b_dev_info->pages_lock);
- * ... page->mapping updates here ...
- *
- * ii. before isolating or dequeueing a balloon page from the balloon device
- * pages list, the page reference counter must be raised by one and the
- * extra refcount must be dropped when the page is enqueued back into
- * the balloon device page list, thus a balloon page keeps its reference
- * counter raised only while it is under our special handling;
*
- * iii. after the lockless scan step have selected a potential balloon page for
- * isolation, re-test the PageBalloon mark and the PagePrivate flag
- * under the proper page lock, to ensure isolating a valid balloon page
- * (not yet isolated, nor under release procedure)
- *
- * iv. isolation or dequeueing procedure must clear PagePrivate flag under
- * page lock together with removing page from balloon device page list.
+ * ii. isolation or dequeueing procedure must remove the page from balloon
+ * device page list under b_dev_info->pages_lock.
*
* The functions provided by this interface are placed to help on coping with
* the aforementioned balloon page corner case, as well as to ensure the simple
@@ -49,6 +41,7 @@
#include <linux/gfp.h>
#include <linux/err.h>
#include <linux/fs.h>
+#include <linux/list.h>
/*
* Balloon device information descriptor.
@@ -63,11 +56,16 @@ struct balloon_dev_info {
struct list_head pages; /* Pages enqueued & handled to Host */
int (*migratepage)(struct balloon_dev_info *, struct page *newpage,
struct page *page, enum migrate_mode mode);
- struct inode *inode;
};
-extern struct page *balloon_page_enqueue(struct balloon_dev_info *b_dev_info);
+extern struct page *balloon_page_alloc(void);
+extern void balloon_page_enqueue(struct balloon_dev_info *b_dev_info,
+ struct page *page);
extern struct page *balloon_page_dequeue(struct balloon_dev_info *b_dev_info);
+extern size_t balloon_page_list_enqueue(struct balloon_dev_info *b_dev_info,
+ struct list_head *pages);
+extern size_t balloon_page_list_dequeue(struct balloon_dev_info *b_dev_info,
+ struct list_head *pages, size_t n_req_pages);
static inline void balloon_devinfo_init(struct balloon_dev_info *balloon)
{
@@ -75,17 +73,19 @@ static inline void balloon_devinfo_init(struct balloon_dev_info *balloon)
spin_lock_init(&balloon->pages_lock);
INIT_LIST_HEAD(&balloon->pages);
balloon->migratepage = NULL;
- balloon->inode = NULL;
}
#ifdef CONFIG_BALLOON_COMPACTION
-extern const struct address_space_operations balloon_aops;
-extern bool balloon_page_isolate(struct page *page,
- isolate_mode_t mode);
-extern void balloon_page_putback(struct page *page);
-extern int balloon_page_migrate(struct address_space *mapping,
- struct page *newpage,
- struct page *page, enum migrate_mode mode);
+extern const struct movable_operations balloon_mops;
+/*
+ * balloon_page_device - get the b_dev_info descriptor for the balloon device
+ * that enqueues the given page.
+ */
+static inline struct balloon_dev_info *balloon_page_device(struct page *page)
+{
+ return (struct balloon_dev_info *)page_private(page);
+}
+#endif /* CONFIG_BALLOON_COMPACTION */
/*
* balloon_page_insert - insert a page into the balloon's page list and make
@@ -99,97 +99,62 @@ extern int balloon_page_migrate(struct address_space *mapping,
static inline void balloon_page_insert(struct balloon_dev_info *balloon,
struct page *page)
{
- __SetPageBalloon(page);
- __SetPageMovable(page, balloon->inode->i_mapping);
- set_page_private(page, (unsigned long)balloon);
+ __SetPageOffline(page);
+ if (IS_ENABLED(CONFIG_BALLOON_COMPACTION)) {
+ SetPageMovableOps(page);
+ set_page_private(page, (unsigned long)balloon);
+ }
list_add(&page->lru, &balloon->pages);
}
+static inline gfp_t balloon_mapping_gfp_mask(void)
+{
+ if (IS_ENABLED(CONFIG_BALLOON_COMPACTION))
+ return GFP_HIGHUSER_MOVABLE;
+ return GFP_HIGHUSER;
+}
+
/*
- * balloon_page_delete - delete a page from balloon's page list and clear
- * the page->private assignement accordingly.
- * @page : page to be released from balloon's page list
+ * balloon_page_finalize - prepare a balloon page that was removed from the
+ * balloon list for release to the page allocator
+ * @page: page to be released to the page allocator
*
- * Caller must ensure the page is locked and the spin_lock protecting balloon
- * pages list is held before deleting a page from the balloon device.
+ * Caller must ensure that the page is locked.
*/
-static inline void balloon_page_delete(struct page *page)
+static inline void balloon_page_finalize(struct page *page)
{
- __ClearPageBalloon(page);
- __ClearPageMovable(page);
- set_page_private(page, 0);
- /*
- * No touch page.lru field once @page has been isolated
- * because VM is using the field.
- */
- if (!PageIsolated(page))
- list_del(&page->lru);
+ if (IS_ENABLED(CONFIG_BALLOON_COMPACTION))
+ set_page_private(page, 0);
+ /* PageOffline is sticky until the page is freed to the buddy. */
}
/*
- * balloon_page_device - get the b_dev_info descriptor for the balloon device
- * that enqueues the given page.
+ * balloon_page_push - insert a page into a page list.
+ * @head : pointer to list
+ * @page : page to be added
+ *
+ * Caller must ensure the page is private and protect the list.
*/
-static inline struct balloon_dev_info *balloon_page_device(struct page *page)
+static inline void balloon_page_push(struct list_head *pages, struct page *page)
{
- return (struct balloon_dev_info *)page_private(page);
+ list_add(&page->lru, pages);
}
-static inline gfp_t balloon_mapping_gfp_mask(void)
+/*
+ * balloon_page_pop - remove a page from a page list.
+ * @head : pointer to list
+ * @page : page to be added
+ *
+ * Caller must ensure the page is private and protect the list.
+ */
+static inline struct page *balloon_page_pop(struct list_head *pages)
{
- return GFP_HIGHUSER_MOVABLE;
-}
-
-#else /* !CONFIG_BALLOON_COMPACTION */
+ struct page *page = list_first_entry_or_null(pages, struct page, lru);
-static inline void balloon_page_insert(struct balloon_dev_info *balloon,
- struct page *page)
-{
- __SetPageBalloon(page);
- list_add(&page->lru, &balloon->pages);
-}
+ if (!page)
+ return NULL;
-static inline void balloon_page_delete(struct page *page)
-{
- __ClearPageBalloon(page);
list_del(&page->lru);
+ return page;
}
-
-static inline bool __is_movable_balloon_page(struct page *page)
-{
- return false;
-}
-
-static inline bool balloon_page_movable(struct page *page)
-{
- return false;
-}
-
-static inline bool isolated_balloon_page(struct page *page)
-{
- return false;
-}
-
-static inline bool balloon_page_isolate(struct page *page)
-{
- return false;
-}
-
-static inline void balloon_page_putback(struct page *page)
-{
- return;
-}
-
-static inline int balloon_page_migrate(struct page *newpage,
- struct page *page, enum migrate_mode mode)
-{
- return 0;
-}
-
-static inline gfp_t balloon_mapping_gfp_mask(void)
-{
- return GFP_HIGHUSER;
-}
-
-#endif /* CONFIG_BALLOON_COMPACTION */
#endif /* _LINUX_BALLOON_COMPACTION_H */
diff --git a/include/linux/base64.h b/include/linux/base64.h
new file mode 100644
index 000000000000..a2c6c9222da3
--- /dev/null
+++ b/include/linux/base64.h
@@ -0,0 +1,22 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * base64 encoding, lifted from fs/crypto/fname.c.
+ */
+
+#ifndef _LINUX_BASE64_H
+#define _LINUX_BASE64_H
+
+#include <linux/types.h>
+
+enum base64_variant {
+ BASE64_STD, /* RFC 4648 (standard) */
+ BASE64_URLSAFE, /* RFC 4648 (base64url) */
+ BASE64_IMAP, /* RFC 3501 */
+};
+
+#define BASE64_CHARS(nbytes) DIV_ROUND_UP((nbytes) * 4, 3)
+
+int base64_encode(const u8 *src, int len, char *dst, bool padding, enum base64_variant variant);
+int base64_decode(const char *src, int len, u8 *dst, bool padding, enum base64_variant variant);
+
+#endif /* _LINUX_BASE64_H */
diff --git a/include/linux/bcd.h b/include/linux/bcd.h
index 18fff11fb3ea..abbc8149178e 100644
--- a/include/linux/bcd.h
+++ b/include/linux/bcd.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _BCD_H
#define _BCD_H
@@ -13,8 +14,12 @@
const_bin2bcd(x) : \
_bin2bcd(x))
+#define bcd_is_valid(x) \
+ const_bcd_is_valid(x)
+
#define const_bcd2bin(x) (((x) & 0x0f) + ((x) >> 4) * 10)
#define const_bin2bcd(x) ((((x) / 10) << 4) + (x) % 10)
+#define const_bcd_is_valid(x) (((x) & 0x0f) < 10 && ((x) >> 4) < 10)
unsigned _bcd2bin(unsigned char val) __attribute_const__;
unsigned char _bin2bcd(unsigned val) __attribute_const__;
diff --git a/include/linux/bch.h b/include/linux/bch.h
index 295b4ef153bb..85fdce83d4e2 100644
--- a/include/linux/bch.h
+++ b/include/linux/bch.h
@@ -1,19 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Generic binary BCH encoding/decoding library
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc., 51
- * Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
* Copyright © 2011 Parrot S.A.
*
* Author: Ivan Djelic <ivan.djelic@parrot.com>
@@ -45,6 +33,7 @@
* @cache: log-based polynomial representation buffer
* @elp: error locator polynomial
* @poly_2t: temporary polynomials of degree 2t
+ * @swap_bits: swap bits within data and syndrome bytes
*/
struct bch_control {
unsigned int m;
@@ -63,16 +52,18 @@ struct bch_control {
int *cache;
struct gf_poly *elp;
struct gf_poly *poly_2t[4];
+ bool swap_bits;
};
-struct bch_control *init_bch(int m, int t, unsigned int prim_poly);
+struct bch_control *bch_init(int m, int t, unsigned int prim_poly,
+ bool swap_bits);
-void free_bch(struct bch_control *bch);
+void bch_free(struct bch_control *bch);
-void encode_bch(struct bch_control *bch, const uint8_t *data,
+void bch_encode(struct bch_control *bch, const uint8_t *data,
unsigned int len, uint8_t *ecc);
-int decode_bch(struct bch_control *bch, const uint8_t *data, unsigned int len,
+int bch_decode(struct bch_control *bch, const uint8_t *data, unsigned int len,
const uint8_t *recv_ecc, const uint8_t *calc_ecc,
const unsigned int *syn, unsigned int *errloc);
diff --git a/include/linux/bcm47xx_nvram.h b/include/linux/bcm47xx_nvram.h
index a414a2b53e41..e4b6ce953ddb 100644
--- a/include/linux/bcm47xx_nvram.h
+++ b/include/linux/bcm47xx_nvram.h
@@ -1,8 +1,5 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
*/
#ifndef __BCM47XX_NVRAM_H
@@ -10,10 +7,10 @@
#include <linux/errno.h>
#include <linux/types.h>
-#include <linux/kernel.h>
#include <linux/vmalloc.h>
#ifdef CONFIG_BCM47XX_NVRAM
+int bcm47xx_nvram_init_from_iomem(void __iomem *nvram_start, size_t res_size);
int bcm47xx_nvram_init_from_mem(u32 base, u32 lim);
int bcm47xx_nvram_getenv(const char *name, char *val, size_t val_len);
int bcm47xx_nvram_gpio_pin(const char *name);
@@ -23,6 +20,11 @@ static inline void bcm47xx_nvram_release_contents(char *nvram)
vfree(nvram);
};
#else
+static inline int bcm47xx_nvram_init_from_iomem(void __iomem *nvram_start,
+ size_t res_size)
+{
+ return -ENOTSUPP;
+}
static inline int bcm47xx_nvram_init_from_mem(u32 base, u32 lim)
{
return -ENOTSUPP;
diff --git a/include/linux/bcm47xx_sprom.h b/include/linux/bcm47xx_sprom.h
index c06b47c84e1a..40a7da3ef50e 100644
--- a/include/linux/bcm47xx_sprom.h
+++ b/include/linux/bcm47xx_sprom.h
@@ -1,20 +1,27 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
*/
#ifndef __BCM47XX_SPROM_H
#define __BCM47XX_SPROM_H
+#include <linux/errno.h>
#include <linux/types.h>
-#include <linux/kernel.h>
#include <linux/vmalloc.h>
+struct ssb_sprom;
+
#ifdef CONFIG_BCM47XX_SPROM
+void bcm47xx_fill_sprom(struct ssb_sprom *sprom, const char *prefix,
+ bool fallback);
int bcm47xx_sprom_register_fallbacks(void);
#else
+static inline void bcm47xx_fill_sprom(struct ssb_sprom *sprom,
+ const char *prefix,
+ bool fallback)
+{
+}
+
static inline int bcm47xx_sprom_register_fallbacks(void)
{
return -ENOTSUPP;
diff --git a/include/linux/bcm47xx_wdt.h b/include/linux/bcm47xx_wdt.h
index 8d9d07ec22a5..fc9dcdb4b979 100644
--- a/include/linux/bcm47xx_wdt.h
+++ b/include/linux/bcm47xx_wdt.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef LINUX_BCM47XX_WDT_H_
#define LINUX_BCM47XX_WDT_H_
diff --git a/include/linux/bcm963xx_nvram.h b/include/linux/bcm963xx_nvram.h
index 290c231b8cf1..48830bf18042 100644
--- a/include/linux/bcm963xx_nvram.h
+++ b/include/linux/bcm963xx_nvram.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __LINUX_BCM963XX_NVRAM_H__
#define __LINUX_BCM963XX_NVRAM_H__
@@ -80,25 +81,21 @@ static int __maybe_unused bcm963xx_nvram_checksum(
const struct bcm963xx_nvram *nvram,
u32 *expected_out, u32 *actual_out)
{
+ const u32 zero = 0;
u32 expected, actual;
size_t len;
if (nvram->version <= 4) {
expected = nvram->checksum_v4;
- len = BCM963XX_NVRAM_V4_SIZE - sizeof(u32);
+ len = BCM963XX_NVRAM_V4_SIZE;
} else {
expected = nvram->checksum_v5;
- len = BCM963XX_NVRAM_V5_SIZE - sizeof(u32);
+ len = BCM963XX_NVRAM_V5_SIZE;
}
- /*
- * Calculate the CRC32 value for the nvram with a checksum value
- * of 0 without modifying or copying the nvram by combining:
- * - The CRC32 of the nvram without the checksum value
- * - The CRC32 of a zero checksum value (which is also 0)
- */
- actual = crc32_le_combine(
- crc32_le(~0, (u8 *)nvram, len), 0, sizeof(u32));
+ /* Calculate the CRC32 of the nvram with the checksum field set to 0. */
+ actual = crc32_le(~0, nvram, len - sizeof(u32));
+ actual = crc32_le(actual, &zero, sizeof(u32));
if (expected_out)
*expected_out = expected;
diff --git a/include/linux/bcm963xx_tag.h b/include/linux/bcm963xx_tag.h
index 161c7b37a77b..7edb809a2586 100644
--- a/include/linux/bcm963xx_tag.h
+++ b/include/linux/bcm963xx_tag.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __LINUX_BCM963XX_TAG_H__
#define __LINUX_BCM963XX_TAG_H__
@@ -83,7 +84,7 @@ struct bcm_tag {
char flash_layout_ver[FLASHLAYOUTVER_LEN];
/* 196-199: kernel+rootfs CRC32 */
__u32 fskernel_crc;
- /* 200-215: Unused except on Alice Gate where is is information */
+ /* 200-215: Unused except on Alice Gate where it is information */
char information2[TAGINFO2_LEN];
/* 216-219: CRC32 of image less imagetag (kernel for Alice Gate) */
__u32 image_crc;
diff --git a/include/linux/bcma/bcma.h b/include/linux/bcma/bcma.h
index 8eeedb2db924..60b94b944e9f 100644
--- a/include/linux/bcma/bcma.h
+++ b/include/linux/bcma/bcma.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef LINUX_BCMA_H_
#define LINUX_BCMA_H_
@@ -331,6 +332,8 @@ extern int bcma_arch_register_fallback_sprom(
struct ssb_sprom *out));
struct bcma_bus {
+ struct device *dev;
+
/* The MMIO area. */
void __iomem *mmio;
@@ -338,14 +341,7 @@ struct bcma_bus {
enum bcma_hosttype hosttype;
bool host_is_pcie2; /* Used for BCMA_HOSTTYPE_PCI only */
- union {
- /* Pointer to the PCI bus (only for BCMA_HOSTTYPE_PCI) */
- struct pci_dev *host_pci;
- /* Pointer to the SDIO device (only for BCMA_HOSTTYPE_SDIO) */
- struct sdio_func *host_sdio;
- /* Pointer to platform device (only for BCMA_HOSTTYPE_SOC) */
- struct platform_device *host_pdev;
- };
+ struct pci_dev *host_pci; /* PCI bus pointer (BCMA_HOSTTYPE_PCI only) */
struct bcma_chipinfo chipinfo;
diff --git a/include/linux/bcma/bcma_driver_arm_c9.h b/include/linux/bcma/bcma_driver_arm_c9.h
index 93bd73d670d5..688cf590c99b 100644
--- a/include/linux/bcma/bcma_driver_arm_c9.h
+++ b/include/linux/bcma/bcma_driver_arm_c9.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef LINUX_BCMA_DRIVER_ARM_C9_H_
#define LINUX_BCMA_DRIVER_ARM_C9_H_
diff --git a/include/linux/bcma/bcma_driver_chipcommon.h b/include/linux/bcma/bcma_driver_chipcommon.h
index 2f1c690a3e66..0cb6638b55e5 100644
--- a/include/linux/bcma/bcma_driver_chipcommon.h
+++ b/include/linux/bcma/bcma_driver_chipcommon.h
@@ -1,8 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef LINUX_BCMA_DRIVER_CC_H_
#define LINUX_BCMA_DRIVER_CC_H_
#include <linux/platform_device.h>
-#include <linux/gpio.h>
+#include <linux/platform_data/brcmnand.h>
+#include <linux/gpio/driver.h>
/** ChipCommon core registers. **/
#define BCMA_CC_ID 0x0000
@@ -269,6 +271,7 @@
#define BCMA_CC_SROM_CONTROL_OP_WRDIS 0x40000000
#define BCMA_CC_SROM_CONTROL_OP_WREN 0x60000000
#define BCMA_CC_SROM_CONTROL_OTPSEL 0x00000010
+#define BCMA_CC_SROM_CONTROL_OTP_PRESENT 0x00000020
#define BCMA_CC_SROM_CONTROL_LOCK 0x00000008
#define BCMA_CC_SROM_CONTROL_SIZE_MASK 0x00000006
#define BCMA_CC_SROM_CONTROL_SIZE_1K 0x00000000
@@ -598,6 +601,10 @@ struct bcma_sflash {
#ifdef CONFIG_BCMA_NFLASH
struct bcma_nflash {
+ /* Must be the fist member for the brcmnand driver to
+ * de-reference that structure.
+ */
+ struct brcmnand_platform_data brcmnand_info;
bool present;
bool boot; /* This is the flash the SoC boots from */
};
diff --git a/include/linux/bcma/bcma_driver_gmac_cmn.h b/include/linux/bcma/bcma_driver_gmac_cmn.h
index 4354d4ea6713..420e222d7a22 100644
--- a/include/linux/bcma/bcma_driver_gmac_cmn.h
+++ b/include/linux/bcma/bcma_driver_gmac_cmn.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef LINUX_BCMA_DRIVER_GMAC_CMN_H_
#define LINUX_BCMA_DRIVER_GMAC_CMN_H_
diff --git a/include/linux/bcma/bcma_driver_mips.h b/include/linux/bcma/bcma_driver_mips.h
index 8eea7f9e33b4..798013fab54f 100644
--- a/include/linux/bcma/bcma_driver_mips.h
+++ b/include/linux/bcma/bcma_driver_mips.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef LINUX_BCMA_DRIVER_MIPS_H_
#define LINUX_BCMA_DRIVER_MIPS_H_
diff --git a/include/linux/bcma/bcma_driver_pci.h b/include/linux/bcma/bcma_driver_pci.h
index bca6a5e4ca3d..dba41b65ae0d 100644
--- a/include/linux/bcma/bcma_driver_pci.h
+++ b/include/linux/bcma/bcma_driver_pci.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef LINUX_BCMA_DRIVER_PCI_H_
#define LINUX_BCMA_DRIVER_PCI_H_
@@ -202,7 +203,7 @@ struct pci_dev;
#define BCMA_CORE_PCI_MDIO_RXCTRL0 0x840
/* PCIE Root Capability Register bits (Host mode only) */
-#define BCMA_CORE_PCI_RC_CRS_VISIBILITY 0x0001
+#define BCMA_CORE_PCI_RC_RRS_VISIBILITY 0x0001
struct bcma_drv_pci;
struct bcma_bus;
diff --git a/include/linux/bcma/bcma_driver_pcie2.h b/include/linux/bcma/bcma_driver_pcie2.h
index 31e6d17ab798..91ce515e3a77 100644
--- a/include/linux/bcma/bcma_driver_pcie2.h
+++ b/include/linux/bcma/bcma_driver_pcie2.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef LINUX_BCMA_DRIVER_PCIE2_H_
#define LINUX_BCMA_DRIVER_PCIE2_H_
diff --git a/include/linux/bcma/bcma_regs.h b/include/linux/bcma/bcma_regs.h
index 9986f8288d01..944105cbd671 100644
--- a/include/linux/bcma/bcma_regs.h
+++ b/include/linux/bcma/bcma_regs.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef LINUX_BCMA_REGS_H_
#define LINUX_BCMA_REGS_H_
diff --git a/include/linux/bcma/bcma_soc.h b/include/linux/bcma/bcma_soc.h
index 1b5fc0c3b1b5..f3c43519baa7 100644
--- a/include/linux/bcma/bcma_soc.h
+++ b/include/linux/bcma/bcma_soc.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef LINUX_BCMA_SOC_H_
#define LINUX_BCMA_SOC_H_
@@ -5,6 +6,7 @@
struct bcma_soc {
struct bcma_bus bus;
+ struct device *dev;
};
int __init bcma_host_soc_register(struct bcma_soc *soc);
diff --git a/include/linux/bfin_mac.h b/include/linux/bfin_mac.h
deleted file mode 100644
index a69554ef8476..000000000000
--- a/include/linux/bfin_mac.h
+++ /dev/null
@@ -1,30 +0,0 @@
-/*
- * Blackfin On-Chip MAC Driver
- *
- * Copyright 2004-2010 Analog Devices Inc.
- *
- * Enter bugs at http://blackfin.uclinux.org/
- *
- * Licensed under the GPL-2 or later.
- */
-
-#ifndef _LINUX_BFIN_MAC_H_
-#define _LINUX_BFIN_MAC_H_
-
-#include <linux/phy.h>
-
-struct bfin_phydev_platform_data {
- unsigned short addr;
- int irq;
-};
-
-struct bfin_mii_bus_platform_data {
- int phydev_number;
- struct bfin_phydev_platform_data *phydev_data;
- const unsigned short *mac_peripherals;
- int phy_mode;
- unsigned int phy_mask;
- unsigned short vlan1_mask, vlan2_mask;
-};
-
-#endif
diff --git a/include/linux/binfmts.h b/include/linux/binfmts.h
index 3ae9013eeaaa..65abd5ab8836 100644
--- a/include/linux/binfmts.h
+++ b/include/linux/binfmts.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_BINFMTS_H
#define _LINUX_BINFMTS_H
@@ -7,6 +8,7 @@
#include <uapi/linux/binfmts.h>
struct filename;
+struct coredump_params;
#define CORENAME_MAX_SIZE 128
@@ -14,10 +16,10 @@ struct filename;
* This structure is used to hold the arguments that are used when loading binaries.
*/
struct linux_binprm {
- char buf[BINPRM_BUF_SIZE];
#ifdef CONFIG_MMU
struct vm_area_struct *vma;
unsigned long vma_pages;
+ unsigned long argmin; /* rlimit marker for copy_strings() */
#else
# define MAX_ARG_PAGES 32
struct page *page[MAX_ARG_PAGES];
@@ -25,50 +27,60 @@ struct linux_binprm {
struct mm_struct *mm;
unsigned long p; /* current top of mem */
unsigned int
- cred_prepared:1,/* true if creds already prepared (multiple
- * preps happen for interpreters) */
- cap_effective:1;/* true if has elevated effective capabilities,
- * false if not; except for init which inherits
- * its parent's caps anyway */
-#ifdef __alpha__
- unsigned int taso:1;
-#endif
- unsigned int recursion_depth; /* only for search_binary_handler() */
- struct file * file;
+ /* Should an execfd be passed to userspace? */
+ have_execfd:1,
+
+ /* Use the creds of a script (see binfmt_misc) */
+ execfd_creds:1,
+ /*
+ * Set by bprm_creds_for_exec hook to indicate a
+ * privilege-gaining exec has happened. Used to set
+ * AT_SECURE auxv for glibc.
+ */
+ secureexec:1,
+ /*
+ * Set when errors can no longer be returned to the
+ * original userspace.
+ */
+ point_of_no_return:1,
+ /* Set when "comm" must come from the dentry. */
+ comm_from_dentry:1,
+ /*
+ * Set by user space to check executability according to the
+ * caller's environment.
+ */
+ is_check:1;
+ struct file *executable; /* Executable to pass to the interpreter */
+ struct file *interpreter;
+ struct file *file;
struct cred *cred; /* new credentials */
int unsafe; /* how unsafe this exec is (mask of LSM_UNSAFE_*) */
unsigned int per_clear; /* bits to clear in current->personality */
int argc, envc;
- const char * filename; /* Name of binary as seen by procps */
- const char * interp; /* Name of the binary really executed. Most
+ const char *filename; /* Name of binary as seen by procps */
+ const char *interp; /* Name of the binary really executed. Most
of the time same as filename, but could be
different for binfmt_{misc,script} */
+ const char *fdpath; /* generated filename for execveat */
unsigned interp_flags;
- unsigned interp_data;
- unsigned long loader, exec;
+ int execfd; /* File descriptor of the executable */
+ unsigned long exec;
+
+ struct rlimit rlim_stack; /* Saved RLIMIT_STACK used during exec. */
+
+ char buf[BINPRM_BUF_SIZE];
} __randomize_layout;
#define BINPRM_FLAGS_ENFORCE_NONDUMP_BIT 0
#define BINPRM_FLAGS_ENFORCE_NONDUMP (1 << BINPRM_FLAGS_ENFORCE_NONDUMP_BIT)
-/* fd of the binary should be passed to the interpreter */
-#define BINPRM_FLAGS_EXECFD_BIT 1
-#define BINPRM_FLAGS_EXECFD (1 << BINPRM_FLAGS_EXECFD_BIT)
-
/* filename of the binary will be inaccessible after exec */
#define BINPRM_FLAGS_PATH_INACCESSIBLE_BIT 2
#define BINPRM_FLAGS_PATH_INACCESSIBLE (1 << BINPRM_FLAGS_PATH_INACCESSIBLE_BIT)
-/* Function parameter for binfmt->coredump */
-struct coredump_params {
- const siginfo_t *siginfo;
- struct pt_regs *regs;
- struct file *file;
- unsigned long limit;
- unsigned long mm_flags;
- loff_t written;
- loff_t pos;
-};
+/* preserve argv0 for the interpreter */
+#define BINPRM_FLAGS_PRESERVE_ARGV0_BIT 3
+#define BINPRM_FLAGS_PRESERVE_ARGV0 (1 << BINPRM_FLAGS_PRESERVE_ARGV0_BIT)
/*
* This structure defines the functions that are used to load the binary formats that
@@ -78,11 +90,22 @@ struct linux_binfmt {
struct list_head lh;
struct module *module;
int (*load_binary)(struct linux_binprm *);
- int (*load_shlib)(struct file *);
+#ifdef CONFIG_COREDUMP
int (*core_dump)(struct coredump_params *cprm);
unsigned long min_coredump; /* minimal dump size */
+#endif
+} __randomize_layout;
+
+#if IS_ENABLED(CONFIG_BINFMT_MISC)
+struct binfmt_misc {
+ struct list_head entries;
+ rwlock_t entries_lock;
+ bool enabled;
} __randomize_layout;
+extern struct binfmt_misc init_binfmt_misc;
+#endif
+
extern void __register_binfmt(struct linux_binfmt *fmt, int insert);
/* Registration of default binfmt handlers */
@@ -98,11 +121,10 @@ static inline void insert_binfmt(struct linux_binfmt *fmt)
extern void unregister_binfmt(struct linux_binfmt *);
-extern int prepare_binprm(struct linux_binprm *);
extern int __must_check remove_arg_zero(struct linux_binprm *);
-extern int search_binary_handler(struct linux_binprm *);
-extern int flush_old_exec(struct linux_binprm * bprm);
+extern int begin_new_exec(struct linux_binprm * bprm);
extern void setup_new_exec(struct linux_binprm * bprm);
+extern void finalize_exec(struct linux_binprm *bprm);
extern void would_dump(struct linux_binprm *, struct file *);
extern int suid_dumpable;
@@ -117,20 +139,12 @@ extern int setup_arg_pages(struct linux_binprm * bprm,
int executable_stack);
extern int transfer_args_to_stack(struct linux_binprm *bprm,
unsigned long *sp_location);
-extern int bprm_change_interp(char *interp, struct linux_binprm *bprm);
-extern int copy_strings_kernel(int argc, const char *const *argv,
- struct linux_binprm *bprm);
-extern int prepare_bprm_creds(struct linux_binprm *bprm);
-extern void install_exec_creds(struct linux_binprm *bprm);
+extern int bprm_change_interp(const char *interp, struct linux_binprm *bprm);
+int copy_string_kernel(const char *arg, struct linux_binprm *bprm);
extern void set_binfmt(struct linux_binfmt *new);
extern ssize_t read_code(struct file *, unsigned long, loff_t, size_t);
-extern int do_execve(struct filename *,
- const char __user * const __user *,
- const char __user * const __user *);
-extern int do_execveat(int, struct filename *,
- const char __user * const __user *,
- const char __user * const __user *,
- int);
+int kernel_execve(const char *filename,
+ const char *const *argv, const char *const *envp);
#endif /* _LINUX_BINFMTS_H */
diff --git a/include/linux/bio-integrity.h b/include/linux/bio-integrity.h
new file mode 100644
index 000000000000..21e4652dcfd2
--- /dev/null
+++ b/include/linux/bio-integrity.h
@@ -0,0 +1,148 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_BIO_INTEGRITY_H
+#define _LINUX_BIO_INTEGRITY_H
+
+#include <linux/bio.h>
+
+enum bip_flags {
+ BIP_BLOCK_INTEGRITY = 1 << 0, /* block layer owns integrity data */
+ BIP_MAPPED_INTEGRITY = 1 << 1, /* ref tag has been remapped */
+ BIP_DISK_NOCHECK = 1 << 2, /* disable disk integrity checking */
+ BIP_IP_CHECKSUM = 1 << 3, /* IP checksum */
+ BIP_COPY_USER = 1 << 4, /* Kernel bounce buffer in use */
+ BIP_CHECK_GUARD = 1 << 5, /* guard check */
+ BIP_CHECK_REFTAG = 1 << 6, /* reftag check */
+ BIP_CHECK_APPTAG = 1 << 7, /* apptag check */
+
+ BIP_MEMPOOL = 1 << 15, /* buffer backed by mempool */
+};
+
+struct bio_integrity_payload {
+ struct bvec_iter bip_iter;
+
+ unsigned short bip_vcnt; /* # of integrity bio_vecs */
+ unsigned short bip_max_vcnt; /* integrity bio_vec slots */
+ unsigned short bip_flags; /* control flags */
+ u16 app_tag; /* application tag value */
+
+ struct bio_vec *bip_vec;
+};
+
+#define BIP_CLONE_FLAGS (BIP_MAPPED_INTEGRITY | BIP_IP_CHECKSUM | \
+ BIP_CHECK_GUARD | BIP_CHECK_REFTAG | BIP_CHECK_APPTAG)
+
+#ifdef CONFIG_BLK_DEV_INTEGRITY
+
+#define bip_for_each_vec(bvl, bip, iter) \
+ for_each_bvec(bvl, (bip)->bip_vec, iter, (bip)->bip_iter)
+
+#define bio_for_each_integrity_vec(_bvl, _bio, _iter) \
+ for_each_bio(_bio) \
+ bip_for_each_vec(_bvl, _bio->bi_integrity, _iter)
+
+static inline struct bio_integrity_payload *bio_integrity(struct bio *bio)
+{
+ if (bio->bi_opf & REQ_INTEGRITY)
+ return bio->bi_integrity;
+
+ return NULL;
+}
+
+static inline bool bio_integrity_flagged(struct bio *bio, enum bip_flags flag)
+{
+ struct bio_integrity_payload *bip = bio_integrity(bio);
+
+ if (bip)
+ return bip->bip_flags & flag;
+
+ return false;
+}
+
+static inline sector_t bip_get_seed(struct bio_integrity_payload *bip)
+{
+ return bip->bip_iter.bi_sector;
+}
+
+static inline void bip_set_seed(struct bio_integrity_payload *bip,
+ sector_t seed)
+{
+ bip->bip_iter.bi_sector = seed;
+}
+
+void bio_integrity_init(struct bio *bio, struct bio_integrity_payload *bip,
+ struct bio_vec *bvecs, unsigned int nr_vecs);
+struct bio_integrity_payload *bio_integrity_alloc(struct bio *bio, gfp_t gfp,
+ unsigned int nr);
+int bio_integrity_add_page(struct bio *bio, struct page *page, unsigned int len,
+ unsigned int offset);
+int bio_integrity_map_user(struct bio *bio, struct iov_iter *iter);
+int bio_integrity_map_iter(struct bio *bio, struct uio_meta *meta);
+void bio_integrity_unmap_user(struct bio *bio);
+bool bio_integrity_prep(struct bio *bio);
+void bio_integrity_advance(struct bio *bio, unsigned int bytes_done);
+void bio_integrity_trim(struct bio *bio);
+int bio_integrity_clone(struct bio *bio, struct bio *bio_src, gfp_t gfp_mask);
+
+#else /* CONFIG_BLK_DEV_INTEGRITY */
+
+static inline struct bio_integrity_payload *bio_integrity(struct bio *bio)
+{
+ return NULL;
+}
+
+static inline int bio_integrity_map_user(struct bio *bio, struct iov_iter *iter)
+{
+ return -EINVAL;
+}
+
+static inline int bio_integrity_map_iter(struct bio *bio, struct uio_meta *meta)
+{
+ return -EINVAL;
+}
+
+static inline void bio_integrity_unmap_user(struct bio *bio)
+{
+}
+
+static inline bool bio_integrity_prep(struct bio *bio)
+{
+ return true;
+}
+
+static inline int bio_integrity_clone(struct bio *bio, struct bio *bio_src,
+ gfp_t gfp_mask)
+{
+ return 0;
+}
+
+static inline void bio_integrity_advance(struct bio *bio,
+ unsigned int bytes_done)
+{
+}
+
+static inline void bio_integrity_trim(struct bio *bio)
+{
+}
+
+static inline bool bio_integrity_flagged(struct bio *bio, enum bip_flags flag)
+{
+ return false;
+}
+
+static inline struct bio_integrity_payload *
+bio_integrity_alloc(struct bio *bio, gfp_t gfp, unsigned int nr)
+{
+ return ERR_PTR(-EINVAL);
+}
+
+static inline int bio_integrity_add_page(struct bio *bio, struct page *page,
+ unsigned int len, unsigned int offset)
+{
+ return 0;
+}
+#endif /* CONFIG_BLK_DEV_INTEGRITY */
+
+void bio_integrity_alloc_buf(struct bio *bio, bool zero_buffer);
+void bio_integrity_free_buf(struct bio_integrity_payload *bip);
+
+#endif /* _LINUX_BIO_INTEGRITY_H */
diff --git a/include/linux/bio.h b/include/linux/bio.h
index 1f0720de8990..ad2d57908c1c 100644
--- a/include/linux/bio.h
+++ b/include/linux/bio.h
@@ -1,55 +1,24 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) 2001 Jens Axboe <axboe@suse.de>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- *
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public Licens
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-
*/
#ifndef __LINUX_BIO_H
#define __LINUX_BIO_H
-#include <linux/highmem.h>
#include <linux/mempool.h>
-#include <linux/ioprio.h>
-#include <linux/bug.h>
-
-#ifdef CONFIG_BLOCK
-
-#include <asm/io.h>
-
/* struct bio, bio_vec and BIO_* flags are defined in blk_types.h */
#include <linux/blk_types.h>
+#include <linux/uio.h>
-#define BIO_DEBUG
-
-#ifdef BIO_DEBUG
-#define BIO_BUG_ON BUG_ON
-#else
-#define BIO_BUG_ON
-#endif
+#define BIO_MAX_VECS 256U
+#define BIO_MAX_INLINE_VECS UIO_MAXIOV
-#ifdef CONFIG_THP_SWAP
-#if HPAGE_PMD_NR > 256
-#define BIO_MAX_PAGES HPAGE_PMD_NR
-#else
-#define BIO_MAX_PAGES 256
-#endif
-#else
-#define BIO_MAX_PAGES 256
-#endif
+struct queue_limits;
-#define bio_prio(bio) (bio)->bi_ioprio
-#define bio_set_prio(bio, prio) ((bio)->bi_ioprio = prio)
+static inline unsigned int bio_max_segs(unsigned int nr_segs)
+{
+ return min(nr_segs, BIO_MAX_VECS);
+}
#define bio_iter_iovec(bio, iter) \
bvec_iter_bvec((bio)->bi_io_vec, (iter))
@@ -65,10 +34,11 @@
#define bio_offset(bio) bio_iter_offset((bio), (bio)->bi_iter)
#define bio_iovec(bio) bio_iter_iovec((bio), (bio)->bi_iter)
-#define bio_multiple_segments(bio) \
- ((bio)->bi_iter.bi_size != bio_iovec(bio).bv_len)
-#define bio_sectors(bio) ((bio)->bi_iter.bi_size >> 9)
-#define bio_end_sector(bio) ((bio)->bi_iter.bi_sector + bio_sectors((bio)))
+#define bvec_iter_sectors(iter) ((iter).bi_size >> 9)
+#define bvec_iter_end_sector(iter) ((iter).bi_sector + bvec_iter_sectors((iter)))
+
+#define bio_sectors(bio) bvec_iter_sectors((bio)->bi_iter)
+#define bio_end_sector(bio) bvec_iter_end_sector((bio)->bi_iter)
/*
* Return the data direction, READ or WRITE.
@@ -91,30 +61,13 @@ static inline bool bio_has_data(struct bio *bio)
return false;
}
-static inline bool bio_no_advance_iter(struct bio *bio)
+static inline bool bio_no_advance_iter(const struct bio *bio)
{
return bio_op(bio) == REQ_OP_DISCARD ||
bio_op(bio) == REQ_OP_SECURE_ERASE ||
- bio_op(bio) == REQ_OP_WRITE_SAME ||
bio_op(bio) == REQ_OP_WRITE_ZEROES;
}
-static inline bool bio_mergeable(struct bio *bio)
-{
- if (bio->bi_opf & REQ_NOMERGE_FLAGS)
- return false;
-
- return true;
-}
-
-static inline unsigned int bio_cur_bytes(struct bio *bio)
-{
- if (bio_has_data(bio))
- return bio_iovec(bio).bv_len;
- else /* dataless requests such as discard */
- return bio->bi_iter.bi_size;
-}
-
static inline void *bio_data(struct bio *bio)
{
if (bio_has_data(bio))
@@ -123,88 +76,97 @@ static inline void *bio_data(struct bio *bio)
return NULL;
}
-/*
- * will die
- */
-#define bvec_to_phys(bv) (page_to_phys((bv)->bv_page) + (unsigned long) (bv)->bv_offset)
-
-/*
- * queues that have highmem support enabled may still need to revert to
- * PIO transfers occasionally and thus map high pages temporarily. For
- * permanent PIO fall back, user is probably better off disabling highmem
- * I/O completely on that queue (see ide-dma for example)
- */
-#define __bio_kmap_atomic(bio, iter) \
- (kmap_atomic(bio_iter_iovec((bio), (iter)).bv_page) + \
- bio_iter_iovec((bio), (iter)).bv_offset)
-
-#define __bio_kunmap_atomic(addr) kunmap_atomic(addr)
-
-/*
- * merge helpers etc
- */
-
-/* Default implementation of BIOVEC_PHYS_MERGEABLE */
-#define __BIOVEC_PHYS_MERGEABLE(vec1, vec2) \
- ((bvec_to_phys((vec1)) + (vec1)->bv_len) == bvec_to_phys((vec2)))
-
-/*
- * allow arch override, for eg virtualized architectures (put in asm/io.h)
- */
-#ifndef BIOVEC_PHYS_MERGEABLE
-#define BIOVEC_PHYS_MERGEABLE(vec1, vec2) \
- __BIOVEC_PHYS_MERGEABLE(vec1, vec2)
-#endif
+static inline bool bio_next_segment(const struct bio *bio,
+ struct bvec_iter_all *iter)
+{
+ if (iter->idx >= bio->bi_vcnt)
+ return false;
-#define __BIO_SEG_BOUNDARY(addr1, addr2, mask) \
- (((addr1) | (mask)) == (((addr2) - 1) | (mask)))
-#define BIOVEC_SEG_BOUNDARY(q, b1, b2) \
- __BIO_SEG_BOUNDARY(bvec_to_phys((b1)), bvec_to_phys((b2)) + (b2)->bv_len, queue_segment_boundary((q)))
+ bvec_advance(&bio->bi_io_vec[iter->idx], iter);
+ return true;
+}
/*
* drivers should _never_ use the all version - the bio may have been split
* before it got to the driver and the driver won't own all of it
*/
-#define bio_for_each_segment_all(bvl, bio, i) \
- for (i = 0, bvl = (bio)->bi_io_vec; i < (bio)->bi_vcnt; i++, bvl++)
+#define bio_for_each_segment_all(bvl, bio, iter) \
+ for (bvl = bvec_init_iter_all(&iter); bio_next_segment((bio), &iter); )
-static inline void bio_advance_iter(struct bio *bio, struct bvec_iter *iter,
- unsigned bytes)
+static inline void bio_advance_iter(const struct bio *bio,
+ struct bvec_iter *iter, unsigned int bytes)
{
iter->bi_sector += bytes >> 9;
- if (bio_no_advance_iter(bio)) {
+ if (bio_no_advance_iter(bio))
iter->bi_size -= bytes;
- iter->bi_done += bytes;
- } else {
+ else
bvec_iter_advance(bio->bi_io_vec, iter, bytes);
/* TODO: It is reasonable to complete bio with error here. */
- }
}
-static inline bool bio_rewind_iter(struct bio *bio, struct bvec_iter *iter,
- unsigned int bytes)
+/* @bytes should be less or equal to bvec[i->bi_idx].bv_len */
+static inline void bio_advance_iter_single(const struct bio *bio,
+ struct bvec_iter *iter,
+ unsigned int bytes)
{
- iter->bi_sector -= bytes >> 9;
+ iter->bi_sector += bytes >> 9;
- if (bio_no_advance_iter(bio)) {
- iter->bi_size += bytes;
- iter->bi_done -= bytes;
- return true;
- }
+ if (bio_no_advance_iter(bio))
+ iter->bi_size -= bytes;
+ else
+ bvec_iter_advance_single(bio->bi_io_vec, iter, bytes);
+}
+
+void __bio_advance(struct bio *, unsigned bytes);
- return bvec_iter_rewind(bio->bi_io_vec, iter, bytes);
+/**
+ * bio_advance - increment/complete a bio by some number of bytes
+ * @bio: bio to advance
+ * @nbytes: number of bytes to complete
+ *
+ * This updates bi_sector, bi_size and bi_idx; if the number of bytes to
+ * complete doesn't align with a bvec boundary, then bv_len and bv_offset will
+ * be updated on the last bvec as well.
+ *
+ * @bio will then represent the remaining, uncompleted portion of the io.
+ */
+static inline void bio_advance(struct bio *bio, unsigned int nbytes)
+{
+ if (nbytes == bio->bi_iter.bi_size) {
+ bio->bi_iter.bi_size = 0;
+ return;
+ }
+ __bio_advance(bio, nbytes);
}
#define __bio_for_each_segment(bvl, bio, iter, start) \
for (iter = (start); \
(iter).bi_size && \
((bvl = bio_iter_iovec((bio), (iter))), 1); \
- bio_advance_iter((bio), &(iter), (bvl).bv_len))
+ bio_advance_iter_single((bio), &(iter), (bvl).bv_len))
#define bio_for_each_segment(bvl, bio, iter) \
__bio_for_each_segment(bvl, bio, iter, (bio)->bi_iter)
+#define __bio_for_each_bvec(bvl, bio, iter, start) \
+ for (iter = (start); \
+ (iter).bi_size && \
+ ((bvl = mp_bvec_iter_bvec((bio)->bi_io_vec, (iter))), 1); \
+ bio_advance_iter_single((bio), &(iter), (bvl).bv_len))
+
+/* iterate over multi-page bvec */
+#define bio_for_each_bvec(bvl, bio, iter) \
+ __bio_for_each_bvec(bvl, bio, iter, (bio)->bi_iter)
+
+/*
+ * Iterate over all multi-page bvecs. Drivers shouldn't use this version for the
+ * same reasons as bio_for_each_segment_all().
+ */
+#define bio_for_each_bvec_all(bvl, bio, i) \
+ for (i = 0, bvl = bio_first_bvec_all(bio); \
+ i < (bio)->bi_vcnt; i++, bvl++)
+
#define bio_iter_last(bvec, iter) ((iter).bi_size == (bvec).bv_len)
static inline unsigned bio_segments(struct bio *bio)
@@ -223,8 +185,6 @@ static inline unsigned bio_segments(struct bio *bio)
case REQ_OP_SECURE_ERASE:
case REQ_OP_WRITE_ZEROES:
return 0;
- case REQ_OP_WRITE_SAME:
- return 1;
default:
break;
}
@@ -260,14 +220,14 @@ static inline void bio_cnt_set(struct bio *bio, unsigned int count)
{
if (count != 1) {
bio->bi_flags |= (1 << BIO_REFFED);
- smp_mb__before_atomic();
+ smp_mb();
}
atomic_set(&bio->__bi_cnt, count);
}
static inline bool bio_flagged(struct bio *bio, unsigned int bit)
{
- return (bio->bi_flags & (1U << bit)) != 0;
+ return bio->bi_flags & (1U << bit);
}
static inline void bio_set_flag(struct bio *bio, unsigned int bit)
@@ -280,101 +240,92 @@ static inline void bio_clear_flag(struct bio *bio, unsigned int bit)
bio->bi_flags &= ~(1U << bit);
}
-static inline void bio_get_first_bvec(struct bio *bio, struct bio_vec *bv)
+static inline struct bio_vec *bio_first_bvec_all(struct bio *bio)
{
- *bv = bio_iovec(bio);
+ WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED));
+ return bio->bi_io_vec;
}
-static inline void bio_get_last_bvec(struct bio *bio, struct bio_vec *bv)
+static inline struct page *bio_first_page_all(struct bio *bio)
{
- struct bvec_iter iter = bio->bi_iter;
- int idx;
-
- if (unlikely(!bio_multiple_segments(bio))) {
- *bv = bio_iovec(bio);
- return;
- }
-
- bio_advance_iter(bio, &iter, iter.bi_size);
-
- if (!iter.bi_bvec_done)
- idx = iter.bi_idx - 1;
- else /* in the middle of bvec */
- idx = iter.bi_idx;
-
- *bv = bio->bi_io_vec[idx];
+ return bio_first_bvec_all(bio)->bv_page;
+}
- /*
- * iter.bi_bvec_done records actual length of the last bvec
- * if this bio ends in the middle of one io vector
- */
- if (iter.bi_bvec_done)
- bv->bv_len = iter.bi_bvec_done;
+static inline struct folio *bio_first_folio_all(struct bio *bio)
+{
+ return page_folio(bio_first_page_all(bio));
}
-enum bip_flags {
- BIP_BLOCK_INTEGRITY = 1 << 0, /* block layer owns integrity data */
- BIP_MAPPED_INTEGRITY = 1 << 1, /* ref tag has been remapped */
- BIP_CTRL_NOCHECK = 1 << 2, /* disable HBA integrity checking */
- BIP_DISK_NOCHECK = 1 << 3, /* disable disk integrity checking */
- BIP_IP_CHECKSUM = 1 << 4, /* IP checksum */
-};
+static inline struct bio_vec *bio_last_bvec_all(struct bio *bio)
+{
+ WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED));
+ return &bio->bi_io_vec[bio->bi_vcnt - 1];
+}
-/*
- * bio integrity payload
+/**
+ * struct folio_iter - State for iterating all folios in a bio.
+ * @folio: The current folio we're iterating. NULL after the last folio.
+ * @offset: The byte offset within the current folio.
+ * @length: The number of bytes in this iteration (will not cross folio
+ * boundary).
*/
-struct bio_integrity_payload {
- struct bio *bip_bio; /* parent bio */
-
- struct bvec_iter bip_iter;
-
- unsigned short bip_slab; /* slab the bip came from */
- unsigned short bip_vcnt; /* # of integrity bio_vecs */
- unsigned short bip_max_vcnt; /* integrity bio_vec slots */
- unsigned short bip_flags; /* control flags */
-
- struct work_struct bip_work; /* I/O completion */
-
- struct bio_vec *bip_vec;
- struct bio_vec bip_inline_vecs[0];/* embedded bvec array */
+struct folio_iter {
+ struct folio *folio;
+ size_t offset;
+ size_t length;
+ /* private: for use by the iterator */
+ struct folio *_next;
+ size_t _seg_count;
+ int _i;
};
-#if defined(CONFIG_BLK_DEV_INTEGRITY)
-
-static inline struct bio_integrity_payload *bio_integrity(struct bio *bio)
+static inline void bio_first_folio(struct folio_iter *fi, struct bio *bio,
+ int i)
{
- if (bio->bi_opf & REQ_INTEGRITY)
- return bio->bi_integrity;
+ struct bio_vec *bvec = bio_first_bvec_all(bio) + i;
- return NULL;
-}
-
-static inline bool bio_integrity_flagged(struct bio *bio, enum bip_flags flag)
-{
- struct bio_integrity_payload *bip = bio_integrity(bio);
-
- if (bip)
- return bip->bip_flags & flag;
-
- return false;
-}
+ if (unlikely(i >= bio->bi_vcnt)) {
+ fi->folio = NULL;
+ return;
+ }
-static inline sector_t bip_get_seed(struct bio_integrity_payload *bip)
-{
- return bip->bip_iter.bi_sector;
+ fi->folio = page_folio(bvec->bv_page);
+ fi->offset = bvec->bv_offset +
+ PAGE_SIZE * folio_page_idx(fi->folio, bvec->bv_page);
+ fi->_seg_count = bvec->bv_len;
+ fi->length = min(folio_size(fi->folio) - fi->offset, fi->_seg_count);
+ fi->_next = folio_next(fi->folio);
+ fi->_i = i;
}
-static inline void bip_set_seed(struct bio_integrity_payload *bip,
- sector_t seed)
+static inline void bio_next_folio(struct folio_iter *fi, struct bio *bio)
{
- bip->bip_iter.bi_sector = seed;
+ fi->_seg_count -= fi->length;
+ if (fi->_seg_count) {
+ fi->folio = fi->_next;
+ fi->offset = 0;
+ fi->length = min(folio_size(fi->folio), fi->_seg_count);
+ fi->_next = folio_next(fi->folio);
+ } else {
+ bio_first_folio(fi, bio, fi->_i + 1);
+ }
}
-#endif /* CONFIG_BLK_DEV_INTEGRITY */
+/**
+ * bio_for_each_folio_all - Iterate over each folio in a bio.
+ * @fi: struct folio_iter which is updated for each folio.
+ * @bio: struct bio to iterate over.
+ */
+#define bio_for_each_folio_all(fi, bio) \
+ for (bio_first_folio(&fi, bio, 0); fi.folio; bio_next_folio(&fi, bio))
-extern void bio_trim(struct bio *bio, int offset, int size);
+void bio_trim(struct bio *bio, sector_t offset, sector_t size);
extern struct bio *bio_split(struct bio *bio, int sectors,
gfp_t gfp, struct bio_set *bs);
+int bio_split_io_at(struct bio *bio, const struct queue_limits *lim,
+ unsigned *segs, unsigned max_bytes, unsigned len_align);
+u8 bio_seg_gap(struct request_queue *q, struct bio *prev, struct bio *next,
+ u8 gaps_bit);
/**
* bio_next_split - get next @sectors from a bio, splitting if necessary
@@ -383,7 +334,7 @@ extern struct bio *bio_split(struct bio *bio, int sectors,
* @gfp: gfp mask
* @bs: bio set to allocate from
*
- * Returns a bio representing the next @sectors of @bio - if the bio is smaller
+ * Return: a bio representing the next @sectors of @bio - if the bio is smaller
* than @sectors, returns the original bio unchanged.
*/
static inline struct bio *bio_next_split(struct bio *bio, int sectors,
@@ -395,40 +346,35 @@ static inline struct bio *bio_next_split(struct bio *bio, int sectors,
return bio_split(bio, sectors, gfp, bs);
}
-extern struct bio_set *bioset_create(unsigned int, unsigned int, int flags);
enum {
BIOSET_NEED_BVECS = BIT(0),
BIOSET_NEED_RESCUER = BIT(1),
+ BIOSET_PERCPU_CACHE = BIT(2),
};
-extern void bioset_free(struct bio_set *);
-extern mempool_t *biovec_create_pool(int pool_entries);
-
-extern struct bio *bio_alloc_bioset(gfp_t, unsigned int, struct bio_set *);
+extern int bioset_init(struct bio_set *, unsigned int, unsigned int, int flags);
+extern void bioset_exit(struct bio_set *);
+extern int biovec_init_pool(mempool_t *pool, int pool_entries);
+
+struct bio *bio_alloc_bioset(struct block_device *bdev, unsigned short nr_vecs,
+ blk_opf_t opf, gfp_t gfp_mask,
+ struct bio_set *bs);
+struct bio *bio_kmalloc(unsigned short nr_vecs, gfp_t gfp_mask);
extern void bio_put(struct bio *);
-extern void __bio_clone_fast(struct bio *, struct bio *);
-extern struct bio *bio_clone_fast(struct bio *, gfp_t, struct bio_set *);
-extern struct bio *bio_clone_bioset(struct bio *, gfp_t, struct bio_set *bs);
-
-extern struct bio_set *fs_bio_set;
-
-static inline struct bio *bio_alloc(gfp_t gfp_mask, unsigned int nr_iovecs)
-{
- return bio_alloc_bioset(gfp_mask, nr_iovecs, fs_bio_set);
-}
+struct bio *bio_alloc_clone(struct block_device *bdev, struct bio *bio_src,
+ gfp_t gfp, struct bio_set *bs);
+int bio_init_clone(struct block_device *bdev, struct bio *bio,
+ struct bio *bio_src, gfp_t gfp);
-static inline struct bio *bio_kmalloc(gfp_t gfp_mask, unsigned int nr_iovecs)
-{
- return bio_alloc_bioset(gfp_mask, nr_iovecs, NULL);
-}
+extern struct bio_set fs_bio_set;
-static inline struct bio *bio_clone_kmalloc(struct bio *bio, gfp_t gfp_mask)
+static inline struct bio *bio_alloc(struct block_device *bdev,
+ unsigned short nr_vecs, blk_opf_t opf, gfp_t gfp_mask)
{
- return bio_clone_bioset(bio, gfp_mask, NULL);
-
+ return bio_alloc_bioset(bdev, nr_vecs, opf, gfp_mask, &fs_bio_set);
}
-extern blk_qc_t submit_bio(struct bio *);
+void submit_bio(struct bio *bio);
extern void bio_endio(struct bio *);
@@ -440,132 +386,124 @@ static inline void bio_io_error(struct bio *bio)
static inline void bio_wouldblock_error(struct bio *bio)
{
+ bio_set_flag(bio, BIO_QUIET);
bio->bi_status = BLK_STS_AGAIN;
bio_endio(bio);
}
-struct request_queue;
-extern int bio_phys_segments(struct request_queue *, struct bio *);
+/*
+ * Calculate number of bvec segments that should be allocated to fit data
+ * pointed by @iter. If @iter is backed by bvec it's going to be reused
+ * instead of allocating a new one.
+ */
+static inline int bio_iov_vecs_to_alloc(struct iov_iter *iter, int max_segs)
+{
+ if (iov_iter_is_bvec(iter))
+ return 0;
+ return iov_iter_npages(iter, max_segs);
+}
-extern int submit_bio_wait(struct bio *bio);
-extern void bio_advance(struct bio *, unsigned);
+struct request_queue;
-extern void bio_init(struct bio *bio, struct bio_vec *table,
- unsigned short max_vecs);
+void bio_init(struct bio *bio, struct block_device *bdev, struct bio_vec *table,
+ unsigned short max_vecs, blk_opf_t opf);
+static inline void bio_init_inline(struct bio *bio, struct block_device *bdev,
+ unsigned short max_vecs, blk_opf_t opf)
+{
+ bio_init(bio, bdev, bio_inline_vecs(bio), max_vecs, opf);
+}
extern void bio_uninit(struct bio *);
-extern void bio_reset(struct bio *);
+void bio_reset(struct bio *bio, struct block_device *bdev, blk_opf_t opf);
void bio_chain(struct bio *, struct bio *);
-extern int bio_add_page(struct bio *, struct page *, unsigned int,unsigned int);
-extern int bio_add_pc_page(struct request_queue *, struct bio *, struct page *,
- unsigned int, unsigned int);
-int bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter);
-struct rq_map_data;
-extern struct bio *bio_map_user_iov(struct request_queue *,
- const struct iov_iter *, gfp_t);
-extern void bio_unmap_user(struct bio *);
-extern struct bio *bio_map_kern(struct request_queue *, void *, unsigned int,
- gfp_t);
-extern struct bio *bio_copy_kern(struct request_queue *, void *, unsigned int,
- gfp_t, int);
-extern void bio_set_pages_dirty(struct bio *bio);
-extern void bio_check_pages_dirty(struct bio *bio);
-
-void generic_start_io_acct(int rw, unsigned long sectors,
- struct hd_struct *part);
-void generic_end_io_acct(int rw, struct hd_struct *part,
- unsigned long start_time);
+int __must_check bio_add_page(struct bio *bio, struct page *page, unsigned len,
+ unsigned off);
+bool __must_check bio_add_folio(struct bio *bio, struct folio *folio,
+ size_t len, size_t off);
+void __bio_add_page(struct bio *bio, struct page *page,
+ unsigned int len, unsigned int off);
+void bio_add_folio_nofail(struct bio *bio, struct folio *folio, size_t len,
+ size_t off);
+void bio_add_virt_nofail(struct bio *bio, void *vaddr, unsigned len);
-#ifndef ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE
-# error "You should define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE for your platform"
-#endif
-#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE
-extern void bio_flush_dcache_pages(struct bio *bi);
-#else
-static inline void bio_flush_dcache_pages(struct bio *bi)
+/**
+ * bio_add_max_vecs - number of bio_vecs needed to add data to a bio
+ * @kaddr: kernel virtual address to add
+ * @len: length in bytes to add
+ *
+ * Calculate how many bio_vecs need to be allocated to add the kernel virtual
+ * address range in [@kaddr:@len] in the worse case.
+ */
+static inline unsigned int bio_add_max_vecs(void *kaddr, unsigned int len)
{
+ if (is_vmalloc_addr(kaddr))
+ return DIV_ROUND_UP(offset_in_page(kaddr) + len, PAGE_SIZE);
+ return 1;
}
-#endif
-
-extern void bio_copy_data(struct bio *dst, struct bio *src);
-extern int bio_alloc_pages(struct bio *bio, gfp_t gfp);
-extern void bio_free_pages(struct bio *bio);
-extern struct bio *bio_copy_user_iov(struct request_queue *,
- struct rq_map_data *,
- const struct iov_iter *,
- gfp_t);
-extern int bio_uncopy_user(struct bio *);
-void zero_fill_bio(struct bio *bio);
-extern struct bio_vec *bvec_alloc(gfp_t, int, unsigned long *, mempool_t *);
-extern void bvec_free(mempool_t *, struct bio_vec *, unsigned int);
-extern unsigned int bvec_nr_vecs(unsigned short idx);
+unsigned int bio_add_vmalloc_chunk(struct bio *bio, void *vaddr, unsigned len);
+bool bio_add_vmalloc(struct bio *bio, void *vaddr, unsigned int len);
-#ifdef CONFIG_BLK_CGROUP
-int bio_associate_blkcg(struct bio *bio, struct cgroup_subsys_state *blkcg_css);
-int bio_associate_current(struct bio *bio);
-void bio_disassociate_task(struct bio *bio);
-void bio_clone_blkcg_association(struct bio *dst, struct bio *src);
-#else /* CONFIG_BLK_CGROUP */
-static inline int bio_associate_blkcg(struct bio *bio,
- struct cgroup_subsys_state *blkcg_css) { return 0; }
-static inline int bio_associate_current(struct bio *bio) { return -ENOENT; }
-static inline void bio_disassociate_task(struct bio *bio) { }
-static inline void bio_clone_blkcg_association(struct bio *dst,
- struct bio *src) { }
-#endif /* CONFIG_BLK_CGROUP */
+int submit_bio_wait(struct bio *bio);
+int bdev_rw_virt(struct block_device *bdev, sector_t sector, void *data,
+ size_t len, enum req_op op);
-#ifdef CONFIG_HIGHMEM
-/*
- * remember never ever reenable interrupts between a bvec_kmap_irq and
- * bvec_kunmap_irq!
- */
-static inline char *bvec_kmap_irq(struct bio_vec *bvec, unsigned long *flags)
-{
- unsigned long addr;
+int bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter,
+ unsigned len_align_mask);
- /*
- * might not be a highmem page, but the preempt/irq count
- * balancing is a lot nicer this way
- */
- local_irq_save(*flags);
- addr = (unsigned long) kmap_atomic(bvec->bv_page);
-
- BUG_ON(addr & ~PAGE_MASK);
+void bio_iov_bvec_set(struct bio *bio, const struct iov_iter *iter);
+void __bio_release_pages(struct bio *bio, bool mark_dirty);
+extern void bio_set_pages_dirty(struct bio *bio);
+extern void bio_check_pages_dirty(struct bio *bio);
- return (char *) addr + bvec->bv_offset;
-}
+extern void bio_copy_data_iter(struct bio *dst, struct bvec_iter *dst_iter,
+ struct bio *src, struct bvec_iter *src_iter);
+extern void bio_copy_data(struct bio *dst, struct bio *src);
+extern void bio_free_pages(struct bio *bio);
+void guard_bio_eod(struct bio *bio);
+void zero_fill_bio_iter(struct bio *bio, struct bvec_iter iter);
-static inline void bvec_kunmap_irq(char *buffer, unsigned long *flags)
+static inline void zero_fill_bio(struct bio *bio)
{
- unsigned long ptr = (unsigned long) buffer & PAGE_MASK;
-
- kunmap_atomic((void *) ptr);
- local_irq_restore(*flags);
+ zero_fill_bio_iter(bio, bio->bi_iter);
}
-#else
-static inline char *bvec_kmap_irq(struct bio_vec *bvec, unsigned long *flags)
+static inline void bio_release_pages(struct bio *bio, bool mark_dirty)
{
- return page_address(bvec->bv_page) + bvec->bv_offset;
+ if (bio_flagged(bio, BIO_PAGE_PINNED))
+ __bio_release_pages(bio, mark_dirty);
}
-static inline void bvec_kunmap_irq(char *buffer, unsigned long *flags)
+#define bio_dev(bio) \
+ disk_devt((bio)->bi_bdev->bd_disk)
+
+#ifdef CONFIG_BLK_CGROUP
+void bio_associate_blkg(struct bio *bio);
+void bio_associate_blkg_from_css(struct bio *bio,
+ struct cgroup_subsys_state *css);
+void bio_clone_blkg_association(struct bio *dst, struct bio *src);
+void blkcg_punt_bio_submit(struct bio *bio);
+#else /* CONFIG_BLK_CGROUP */
+static inline void bio_associate_blkg(struct bio *bio) { }
+static inline void bio_associate_blkg_from_css(struct bio *bio,
+ struct cgroup_subsys_state *css)
+{ }
+static inline void bio_clone_blkg_association(struct bio *dst,
+ struct bio *src) { }
+static inline void blkcg_punt_bio_submit(struct bio *bio)
{
- *flags = 0;
+ submit_bio(bio);
}
-#endif
+#endif /* CONFIG_BLK_CGROUP */
-static inline char *__bio_kmap_irq(struct bio *bio, struct bvec_iter iter,
- unsigned long *flags)
+static inline void bio_set_dev(struct bio *bio, struct block_device *bdev)
{
- return bvec_kmap_irq(&bio_iter_iovec(bio, iter), flags);
+ bio_clear_flag(bio, BIO_REMAPPED);
+ if (bio->bi_bdev != bdev)
+ bio_clear_flag(bio, BIO_BPS_THROTTLED);
+ bio->bi_bdev = bdev;
+ bio_associate_blkg(bio);
}
-#define __bio_kunmap_irq(buf, flags) bvec_kunmap_irq(buf, flags)
-
-#define bio_kmap_irq(bio, flags) \
- __bio_kmap_irq((bio), (bio)->bi_iter, (flags))
-#define bio_kunmap_irq(buf,flags) __bio_kunmap_irq(buf, flags)
/*
* BIO list management for use by remapping drivers (e.g. DM or MD) and loop.
@@ -640,6 +578,13 @@ static inline void bio_list_merge(struct bio_list *bl, struct bio_list *bl2)
bl->tail = bl2->tail;
}
+static inline void bio_list_merge_init(struct bio_list *bl,
+ struct bio_list *bl2)
+{
+ bio_list_merge(bl, bl2);
+ bio_list_init(bl2);
+}
+
static inline void bio_list_merge_head(struct bio_list *bl,
struct bio_list *bl2)
{
@@ -706,13 +651,15 @@ struct bio_set {
struct kmem_cache *bio_slab;
unsigned int front_pad;
- mempool_t *bio_pool;
- mempool_t *bvec_pool;
-#if defined(CONFIG_BLK_DEV_INTEGRITY)
- mempool_t *bio_integrity_pool;
- mempool_t *bvec_integrity_pool;
-#endif
+ /*
+ * per-cpu bio alloc cache
+ */
+ struct bio_alloc_cache __percpu *cache;
+
+ mempool_t bio_pool;
+ mempool_t bvec_pool;
+ unsigned int back_pad;
/*
* Deadlock avoidance for stacking block drivers: see comments in
* bio_alloc_bioset() for details
@@ -721,101 +668,59 @@ struct bio_set {
struct bio_list rescue_list;
struct work_struct rescue_work;
struct workqueue_struct *rescue_workqueue;
-};
-struct biovec_slab {
- int nr_vecs;
- char *name;
- struct kmem_cache *slab;
+ /*
+ * Hot un-plug notifier for the per-cpu cache, if used
+ */
+ struct hlist_node cpuhp_dead;
};
-/*
- * a small number of entries is fine, not going to be performance critical.
- * basically we just need to survive
- */
-#define BIO_SPLIT_ENTRIES 2
-
-#if defined(CONFIG_BLK_DEV_INTEGRITY)
-
-#define bip_for_each_vec(bvl, bip, iter) \
- for_each_bvec(bvl, (bip)->bip_vec, iter, (bip)->bip_iter)
-
-#define bio_for_each_integrity_vec(_bvl, _bio, _iter) \
- for_each_bio(_bio) \
- bip_for_each_vec(_bvl, _bio->bi_integrity, _iter)
-
-extern struct bio_integrity_payload *bio_integrity_alloc(struct bio *, gfp_t, unsigned int);
-extern int bio_integrity_add_page(struct bio *, struct page *, unsigned int, unsigned int);
-extern bool bio_integrity_prep(struct bio *);
-extern void bio_integrity_advance(struct bio *, unsigned int);
-extern void bio_integrity_trim(struct bio *);
-extern int bio_integrity_clone(struct bio *, struct bio *, gfp_t);
-extern int bioset_integrity_create(struct bio_set *, int);
-extern void bioset_integrity_free(struct bio_set *);
-extern void bio_integrity_init(void);
-
-#else /* CONFIG_BLK_DEV_INTEGRITY */
-
-static inline void *bio_integrity(struct bio *bio)
+static inline bool bioset_initialized(struct bio_set *bs)
{
- return NULL;
-}
-
-static inline int bioset_integrity_create(struct bio_set *bs, int pool_size)
-{
- return 0;
+ return bs->bio_slab != NULL;
}
-static inline void bioset_integrity_free (struct bio_set *bs)
-{
- return;
-}
-
-static inline bool bio_integrity_prep(struct bio *bio)
-{
- return true;
-}
-
-static inline int bio_integrity_clone(struct bio *bio, struct bio *bio_src,
- gfp_t gfp_mask)
-{
- return 0;
-}
-
-static inline void bio_integrity_advance(struct bio *bio,
- unsigned int bytes_done)
-{
- return;
-}
-
-static inline void bio_integrity_trim(struct bio *bio)
-{
- return;
-}
-
-static inline void bio_integrity_init(void)
+/*
+ * Mark a bio as polled. Note that for async polled IO, the caller must
+ * expect -EWOULDBLOCK if we cannot allocate a request (or other resources).
+ * We cannot block waiting for requests on polled IO, as those completions
+ * must be found by the caller. This is different than IRQ driven IO, where
+ * it's safe to wait for IO to complete.
+ */
+static inline void bio_set_polled(struct bio *bio, struct kiocb *kiocb)
{
- return;
+ bio->bi_opf |= REQ_POLLED;
+ if (kiocb->ki_flags & IOCB_NOWAIT)
+ bio->bi_opf |= REQ_NOWAIT;
}
-static inline bool bio_integrity_flagged(struct bio *bio, enum bip_flags flag)
+static inline void bio_clear_polled(struct bio *bio)
{
- return false;
+ bio->bi_opf &= ~REQ_POLLED;
}
-static inline void *bio_integrity_alloc(struct bio * bio, gfp_t gfp,
- unsigned int nr)
+/**
+ * bio_is_zone_append - is this a zone append bio?
+ * @bio: bio to check
+ *
+ * Check if @bio is a zone append operation. Core block layer code and end_io
+ * handlers must use this instead of an open coded REQ_OP_ZONE_APPEND check
+ * because the block layer can rewrite REQ_OP_ZONE_APPEND to REQ_OP_WRITE if
+ * it is not natively supported.
+ */
+static inline bool bio_is_zone_append(struct bio *bio)
{
- return ERR_PTR(-EINVAL);
+ if (!IS_ENABLED(CONFIG_BLK_DEV_ZONED))
+ return false;
+ return bio_op(bio) == REQ_OP_ZONE_APPEND ||
+ bio_flagged(bio, BIO_EMULATES_ZONE_APPEND);
}
-static inline int bio_integrity_add_page(struct bio *bio, struct page *page,
- unsigned int len, unsigned int offset)
-{
- return 0;
-}
+struct bio *blk_next_bio(struct bio *bio, struct block_device *bdev,
+ unsigned int nr_pages, blk_opf_t opf, gfp_t gfp);
+struct bio *bio_chain_and_submit(struct bio *prev, struct bio *new);
-#endif /* CONFIG_BLK_DEV_INTEGRITY */
+struct bio *blk_alloc_discard_bio(struct block_device *bdev,
+ sector_t *sector, sector_t *nr_sects, gfp_t gfp_mask);
-#endif /* CONFIG_BLOCK */
#endif /* __LINUX_BIO_H */
diff --git a/include/linux/bit_spinlock.h b/include/linux/bit_spinlock.h
index 3b5bafce4337..c0989b5b0407 100644
--- a/include/linux/bit_spinlock.h
+++ b/include/linux/bit_spinlock.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __LINUX_BIT_SPINLOCK_H
#define __LINUX_BIT_SPINLOCK_H
@@ -12,7 +13,7 @@
* Don't use this unless you really need to: spin_lock() and spin_unlock()
* are significantly faster.
*/
-static inline void bit_spin_lock(int bitnum, unsigned long *addr)
+static __always_inline void bit_spin_lock(int bitnum, unsigned long *addr)
{
/*
* Assuming the lock is uncontended, this never enters
@@ -37,7 +38,7 @@ static inline void bit_spin_lock(int bitnum, unsigned long *addr)
/*
* Return true if it was acquired
*/
-static inline int bit_spin_trylock(int bitnum, unsigned long *addr)
+static __always_inline int bit_spin_trylock(int bitnum, unsigned long *addr)
{
preempt_disable();
#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
@@ -53,7 +54,7 @@ static inline int bit_spin_trylock(int bitnum, unsigned long *addr)
/*
* bit-based spin_unlock()
*/
-static inline void bit_spin_unlock(int bitnum, unsigned long *addr)
+static __always_inline void bit_spin_unlock(int bitnum, unsigned long *addr)
{
#ifdef CONFIG_DEBUG_SPINLOCK
BUG_ON(!test_bit(bitnum, addr));
@@ -70,7 +71,7 @@ static inline void bit_spin_unlock(int bitnum, unsigned long *addr)
* non-atomic version, which can be used eg. if the bit lock itself is
* protecting the rest of the flags in the word.
*/
-static inline void __bit_spin_unlock(int bitnum, unsigned long *addr)
+static __always_inline void __bit_spin_unlock(int bitnum, unsigned long *addr)
{
#ifdef CONFIG_DEBUG_SPINLOCK
BUG_ON(!test_bit(bitnum, addr));
diff --git a/include/linux/bitfield.h b/include/linux/bitfield.h
index 8b9d6fff002d..126dc5b380af 100644
--- a/include/linux/bitfield.h
+++ b/include/linux/bitfield.h
@@ -1,21 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2014 Felix Fietkau <nbd@nbd.name>
* Copyright (C) 2004 - 2009 Ivo van Doorn <IvDoorn@gmail.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2
- * as published by the Free Software Foundation
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#ifndef _LINUX_BITFIELD_H
#define _LINUX_BITFIELD_H
-#include <linux/bug.h>
+#include <linux/build_bug.h>
+#include <linux/typecheck.h>
+#include <asm/byteorder.h>
/*
* Bitfield access macros
@@ -23,9 +17,13 @@
* FIELD_{GET,PREP} macros take as first parameter shifted mask
* from which they extract the base mask and shift amount.
* Mask must be a compilation time constant.
+ * field_{get,prep} are variants that take a non-const mask.
*
* Example:
*
+ * #include <linux/bitfield.h>
+ * #include <linux/bits.h>
+ *
* #define REG_FIELD_A GENMASK(6, 0)
* #define REG_FIELD_B BIT(7)
* #define REG_FIELD_C GENMASK(15, 8)
@@ -42,26 +40,76 @@
* FIELD_PREP(REG_FIELD_D, 0x40);
*
* Modify:
- * reg &= ~REG_FIELD_C;
- * reg |= FIELD_PREP(REG_FIELD_C, c);
+ * FIELD_MODIFY(REG_FIELD_C, &reg, c);
*/
#define __bf_shf(x) (__builtin_ffsll(x) - 1)
-#define __BF_FIELD_CHECK(_mask, _reg, _val, _pfx) \
+#define __scalar_type_to_unsigned_cases(type) \
+ unsigned type: (unsigned type)0, \
+ signed type: (unsigned type)0
+
+#define __unsigned_scalar_typeof(x) typeof( \
+ _Generic((x), \
+ char: (unsigned char)0, \
+ __scalar_type_to_unsigned_cases(char), \
+ __scalar_type_to_unsigned_cases(short), \
+ __scalar_type_to_unsigned_cases(int), \
+ __scalar_type_to_unsigned_cases(long), \
+ __scalar_type_to_unsigned_cases(long long), \
+ default: (x)))
+
+#define __bf_cast_unsigned(type, x) ((__unsigned_scalar_typeof(type))(x))
+
+#define __BF_FIELD_CHECK_MASK(_mask, _val, _pfx) \
({ \
BUILD_BUG_ON_MSG(!__builtin_constant_p(_mask), \
_pfx "mask is not constant"); \
- BUILD_BUG_ON_MSG(!(_mask), _pfx "mask is zero"); \
+ BUILD_BUG_ON_MSG((_mask) == 0, _pfx "mask is zero"); \
BUILD_BUG_ON_MSG(__builtin_constant_p(_val) ? \
- ~((_mask) >> __bf_shf(_mask)) & (_val) : 0, \
+ ~((_mask) >> __bf_shf(_mask)) & \
+ (0 + (_val)) : 0, \
_pfx "value too large for the field"); \
- BUILD_BUG_ON_MSG((_mask) > (typeof(_reg))~0ull, \
- _pfx "type of reg too small for mask"); \
__BUILD_BUG_ON_NOT_POWER_OF_2((_mask) + \
(1ULL << __bf_shf(_mask))); \
})
+#define __BF_FIELD_CHECK_REG(mask, reg, pfx) \
+ BUILD_BUG_ON_MSG(__bf_cast_unsigned(mask, mask) > \
+ __bf_cast_unsigned(reg, ~0ull), \
+ pfx "type of reg too small for mask")
+
+#define __BF_FIELD_CHECK(mask, reg, val, pfx) \
+ ({ \
+ __BF_FIELD_CHECK_MASK(mask, val, pfx); \
+ __BF_FIELD_CHECK_REG(mask, reg, pfx); \
+ })
+
+#define __FIELD_PREP(mask, val, pfx) \
+ ({ \
+ __BF_FIELD_CHECK_MASK(mask, val, pfx); \
+ ((typeof(mask))(val) << __bf_shf(mask)) & (mask); \
+ })
+
+#define __FIELD_GET(mask, reg, pfx) \
+ ({ \
+ __BF_FIELD_CHECK_MASK(mask, 0U, pfx); \
+ (typeof(mask))(((reg) & (mask)) >> __bf_shf(mask)); \
+ })
+
+/**
+ * FIELD_MAX() - produce the maximum value representable by a field
+ * @_mask: shifted mask defining the field's length and position
+ *
+ * FIELD_MAX() returns the maximum value that can be held in the field
+ * specified by @_mask.
+ */
+#define FIELD_MAX(_mask) \
+ ({ \
+ __BF_FIELD_CHECK(_mask, 0ULL, 0ULL, "FIELD_MAX: "); \
+ (typeof(_mask))((_mask) >> __bf_shf(_mask)); \
+ })
+
/**
* FIELD_FIT() - check if value fits in the field
* @_mask: shifted mask defining the field's length and position
@@ -71,7 +119,7 @@
*/
#define FIELD_FIT(_mask, _val) \
({ \
- __BF_FIELD_CHECK(_mask, 0ULL, _val, "FIELD_FIT: "); \
+ __BF_FIELD_CHECK(_mask, 0ULL, 0ULL, "FIELD_FIT: "); \
!((((typeof(_mask))_val) << __bf_shf(_mask)) & ~(_mask)); \
})
@@ -85,22 +133,170 @@
*/
#define FIELD_PREP(_mask, _val) \
({ \
- __BF_FIELD_CHECK(_mask, 0ULL, _val, "FIELD_PREP: "); \
- ((typeof(_mask))(_val) << __bf_shf(_mask)) & (_mask); \
+ __BF_FIELD_CHECK_REG(_mask, 0ULL, "FIELD_PREP: "); \
+ __FIELD_PREP(_mask, _val, "FIELD_PREP: "); \
})
+#define __BF_CHECK_POW2(n) BUILD_BUG_ON_ZERO(((n) & ((n) - 1)) != 0)
+
+/**
+ * FIELD_PREP_CONST() - prepare a constant bitfield element
+ * @_mask: shifted mask defining the field's length and position
+ * @_val: value to put in the field
+ *
+ * FIELD_PREP_CONST() masks and shifts up the value. The result should
+ * be combined with other fields of the bitfield using logical OR.
+ *
+ * Unlike FIELD_PREP() this is a constant expression and can therefore
+ * be used in initializers. Error checking is less comfortable for this
+ * version, and non-constant masks cannot be used.
+ */
+#define FIELD_PREP_CONST(_mask, _val) \
+ ( \
+ /* mask must be non-zero */ \
+ BUILD_BUG_ON_ZERO((_mask) == 0) + \
+ /* check if value fits */ \
+ BUILD_BUG_ON_ZERO(~((_mask) >> __bf_shf(_mask)) & (_val)) + \
+ /* check if mask is contiguous */ \
+ __BF_CHECK_POW2((_mask) + (1ULL << __bf_shf(_mask))) + \
+ /* and create the value */ \
+ (((typeof(_mask))(_val) << __bf_shf(_mask)) & (_mask)) \
+ )
+
/**
* FIELD_GET() - extract a bitfield element
* @_mask: shifted mask defining the field's length and position
- * @_reg: 32bit value of entire bitfield
+ * @_reg: value of entire bitfield
*
* FIELD_GET() extracts the field specified by @_mask from the
* bitfield passed in as @_reg by masking and shifting it down.
*/
#define FIELD_GET(_mask, _reg) \
({ \
- __BF_FIELD_CHECK(_mask, _reg, 0U, "FIELD_GET: "); \
- (typeof(_mask))(((_reg) & (_mask)) >> __bf_shf(_mask)); \
+ __BF_FIELD_CHECK_REG(_mask, _reg, "FIELD_GET: "); \
+ __FIELD_GET(_mask, _reg, "FIELD_GET: "); \
+ })
+
+/**
+ * FIELD_MODIFY() - modify a bitfield element
+ * @_mask: shifted mask defining the field's length and position
+ * @_reg_p: pointer to the memory that should be updated
+ * @_val: value to store in the bitfield
+ *
+ * FIELD_MODIFY() modifies the set of bits in @_reg_p specified by @_mask,
+ * by replacing them with the bitfield value passed in as @_val.
+ */
+#define FIELD_MODIFY(_mask, _reg_p, _val) \
+ ({ \
+ typecheck_pointer(_reg_p); \
+ __BF_FIELD_CHECK(_mask, *(_reg_p), _val, "FIELD_MODIFY: "); \
+ *(_reg_p) &= ~(_mask); \
+ *(_reg_p) |= (((typeof(_mask))(_val) << __bf_shf(_mask)) & (_mask)); \
+ })
+
+extern void __compiletime_error("value doesn't fit into mask")
+__field_overflow(void);
+extern void __compiletime_error("bad bitfield mask")
+__bad_mask(void);
+static __always_inline u64 field_multiplier(u64 field)
+{
+ if ((field | (field - 1)) & ((field | (field - 1)) + 1))
+ __bad_mask();
+ return field & -field;
+}
+static __always_inline u64 field_mask(u64 field)
+{
+ return field / field_multiplier(field);
+}
+#define field_max(field) ((typeof(field))field_mask(field))
+#define ____MAKE_OP(type,base,to,from) \
+static __always_inline __##type __must_check type##_encode_bits(base v, base field) \
+{ \
+ if (__builtin_constant_p(v) && (v & ~field_mask(field))) \
+ __field_overflow(); \
+ return to((v & field_mask(field)) * field_multiplier(field)); \
+} \
+static __always_inline __##type __must_check type##_replace_bits(__##type old, \
+ base val, base field) \
+{ \
+ return (old & ~to(field)) | type##_encode_bits(val, field); \
+} \
+static __always_inline void type##p_replace_bits(__##type *p, \
+ base val, base field) \
+{ \
+ *p = (*p & ~to(field)) | type##_encode_bits(val, field); \
+} \
+static __always_inline base __must_check type##_get_bits(__##type v, base field) \
+{ \
+ return (from(v) & field)/field_multiplier(field); \
+}
+#define __MAKE_OP(size) \
+ ____MAKE_OP(le##size,u##size,cpu_to_le##size,le##size##_to_cpu) \
+ ____MAKE_OP(be##size,u##size,cpu_to_be##size,be##size##_to_cpu) \
+ ____MAKE_OP(u##size,u##size,,)
+____MAKE_OP(u8,u8,,)
+__MAKE_OP(16)
+__MAKE_OP(32)
+__MAKE_OP(64)
+#undef __MAKE_OP
+#undef ____MAKE_OP
+
+#define __field_prep(mask, val) \
+ ({ \
+ __auto_type __mask = (mask); \
+ typeof(__mask) __val = (val); \
+ unsigned int __shift = BITS_PER_TYPE(__mask) <= 32 ? \
+ __ffs(__mask) : __ffs64(__mask); \
+ (__val << __shift) & __mask; \
})
+#define __field_get(mask, reg) \
+ ({ \
+ __auto_type __mask = (mask); \
+ typeof(__mask) __reg = (reg); \
+ unsigned int __shift = BITS_PER_TYPE(__mask) <= 32 ? \
+ __ffs(__mask) : __ffs64(__mask); \
+ (__reg & __mask) >> __shift; \
+ })
+
+/**
+ * field_prep() - prepare a bitfield element
+ * @mask: shifted mask defining the field's length and position, must be
+ * non-zero
+ * @val: value to put in the field
+ *
+ * Return: field value masked and shifted to its final destination
+ *
+ * field_prep() masks and shifts up the value. The result should be
+ * combined with other fields of the bitfield using logical OR.
+ * Unlike FIELD_PREP(), @mask is not limited to a compile-time constant.
+ * Typical usage patterns are a value stored in a table, or calculated by
+ * shifting a constant by a variable number of bits.
+ * If you want to ensure that @mask is a compile-time constant, please use
+ * FIELD_PREP() directly instead.
+ */
+#define field_prep(mask, val) \
+ (__builtin_constant_p(mask) ? __FIELD_PREP(mask, val, "field_prep: ") \
+ : __field_prep(mask, val))
+
+/**
+ * field_get() - extract a bitfield element
+ * @mask: shifted mask defining the field's length and position, must be
+ * non-zero
+ * @reg: value of entire bitfield
+ *
+ * Return: extracted field value
+ *
+ * field_get() extracts the field specified by @mask from the
+ * bitfield passed in as @reg by masking and shifting it down.
+ * Unlike FIELD_GET(), @mask is not limited to a compile-time constant.
+ * Typical usage patterns are a value stored in a table, or calculated by
+ * shifting a constant by a variable number of bits.
+ * If you want to ensure that @mask is a compile-time constant, please use
+ * FIELD_GET() directly instead.
+ */
+#define field_get(mask, reg) \
+ (__builtin_constant_p(mask) ? __FIELD_GET(mask, reg, "field_get: ") \
+ : __field_get(mask, reg))
+
#endif
diff --git a/include/linux/bitmap-str.h b/include/linux/bitmap-str.h
new file mode 100644
index 000000000000..53d3e1b32d3d
--- /dev/null
+++ b/include/linux/bitmap-str.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __LINUX_BITMAP_STR_H
+#define __LINUX_BITMAP_STR_H
+
+#include <linux/types.h>
+
+int bitmap_parse_user(const char __user *ubuf, unsigned int ulen, unsigned long *dst, int nbits);
+int bitmap_print_to_pagebuf(bool list, char *buf, const unsigned long *maskp, int nmaskbits);
+int bitmap_print_bitmask_to_buf(char *buf, const unsigned long *maskp, int nmaskbits,
+ loff_t off, size_t count);
+int bitmap_print_list_to_buf(char *buf, const unsigned long *maskp, int nmaskbits,
+ loff_t off, size_t count);
+int bitmap_parse(const char *buf, unsigned int buflen, unsigned long *dst, int nbits);
+int bitmap_parselist(const char *buf, unsigned long *maskp, int nmaskbits);
+int bitmap_parselist_user(const char __user *ubuf, unsigned int ulen,
+ unsigned long *dst, int nbits);
+
+#endif /* __LINUX_BITMAP_STR_H */
diff --git a/include/linux/bitmap.h b/include/linux/bitmap.h
index 5797ca6fdfe2..b0395e4ccf90 100644
--- a/include/linux/bitmap.h
+++ b/include/linux/bitmap.h
@@ -1,12 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __LINUX_BITMAP_H
#define __LINUX_BITMAP_H
#ifndef __ASSEMBLY__
-#include <linux/types.h>
+#include <linux/align.h>
#include <linux/bitops.h>
+#include <linux/cleanup.h>
+#include <linux/errno.h>
+#include <linux/find.h>
+#include <linux/limits.h>
#include <linux/string.h>
-#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/bitmap-str.h>
+
+struct device;
/*
* bitmaps provide bit arrays that consume one or more unsigned
@@ -15,112 +23,176 @@
*
* Function implementations generic to all architectures are in
* lib/bitmap.c. Functions implementations that are architecture
- * specific are in various include/asm-<arch>/bitops.h headers
+ * specific are in various arch/<arch>/include/asm/bitops.h headers
* and other arch/<arch> specific files.
*
* See lib/bitmap.c for more details.
*/
-/*
+/**
+ * DOC: bitmap overview
+ *
* The available bitmap operations and their rough meaning in the
* case that the bitmap is a single unsigned long are thus:
*
- * Note that nbits should be always a compile time evaluable constant.
- * Otherwise many inlines will generate horrible code.
- *
- * bitmap_zero(dst, nbits) *dst = 0UL
- * bitmap_fill(dst, nbits) *dst = ~0UL
- * bitmap_copy(dst, src, nbits) *dst = *src
- * bitmap_and(dst, src1, src2, nbits) *dst = *src1 & *src2
- * bitmap_or(dst, src1, src2, nbits) *dst = *src1 | *src2
- * bitmap_xor(dst, src1, src2, nbits) *dst = *src1 ^ *src2
- * bitmap_andnot(dst, src1, src2, nbits) *dst = *src1 & ~(*src2)
- * bitmap_complement(dst, src, nbits) *dst = ~(*src)
- * bitmap_equal(src1, src2, nbits) Are *src1 and *src2 equal?
- * bitmap_intersects(src1, src2, nbits) Do *src1 and *src2 overlap?
- * bitmap_subset(src1, src2, nbits) Is *src1 a subset of *src2?
- * bitmap_empty(src, nbits) Are all bits zero in *src?
- * bitmap_full(src, nbits) Are all bits set in *src?
- * bitmap_weight(src, nbits) Hamming Weight: number set bits
- * bitmap_set(dst, pos, nbits) Set specified bit area
- * bitmap_clear(dst, pos, nbits) Clear specified bit area
- * bitmap_find_next_zero_area(buf, len, pos, n, mask) Find bit free area
- * bitmap_find_next_zero_area_off(buf, len, pos, n, mask) as above
- * bitmap_shift_right(dst, src, n, nbits) *dst = *src >> n
- * bitmap_shift_left(dst, src, n, nbits) *dst = *src << n
- * bitmap_remap(dst, src, old, new, nbits) *dst = map(old, new)(src)
- * bitmap_bitremap(oldbit, old, new, nbits) newbit = map(old, new)(oldbit)
- * bitmap_onto(dst, orig, relmap, nbits) *dst = orig relative to relmap
- * bitmap_fold(dst, orig, sz, nbits) dst bits = orig bits mod sz
- * bitmap_parse(buf, buflen, dst, nbits) Parse bitmap dst from kernel buf
- * bitmap_parse_user(ubuf, ulen, dst, nbits) Parse bitmap dst from user buf
- * bitmap_parselist(buf, dst, nbits) Parse bitmap dst from kernel buf
- * bitmap_parselist_user(buf, dst, nbits) Parse bitmap dst from user buf
- * bitmap_find_free_region(bitmap, bits, order) Find and allocate bit region
- * bitmap_release_region(bitmap, pos, order) Free specified bit region
- * bitmap_allocate_region(bitmap, pos, order) Allocate specified bit region
- * bitmap_from_u32array(dst, nbits, buf, nwords) *dst = *buf (nwords 32b words)
- * bitmap_to_u32array(buf, nwords, src, nbits) *buf = *dst (nwords 32b words)
+ * The generated code is more efficient when nbits is known at
+ * compile-time and at most BITS_PER_LONG.
+ *
+ * ::
+ *
+ * bitmap_zero(dst, nbits) *dst = 0UL
+ * bitmap_fill(dst, nbits) *dst = ~0UL
+ * bitmap_copy(dst, src, nbits) *dst = *src
+ * bitmap_and(dst, src1, src2, nbits) *dst = *src1 & *src2
+ * bitmap_or(dst, src1, src2, nbits) *dst = *src1 | *src2
+ * bitmap_weighted_or(dst, src1, src2, nbits) *dst = *src1 | *src2. Returns Hamming Weight of dst
+ * bitmap_xor(dst, src1, src2, nbits) *dst = *src1 ^ *src2
+ * bitmap_andnot(dst, src1, src2, nbits) *dst = *src1 & ~(*src2)
+ * bitmap_complement(dst, src, nbits) *dst = ~(*src)
+ * bitmap_equal(src1, src2, nbits) Are *src1 and *src2 equal?
+ * bitmap_intersects(src1, src2, nbits) Do *src1 and *src2 overlap?
+ * bitmap_subset(src1, src2, nbits) Is *src1 a subset of *src2?
+ * bitmap_empty(src, nbits) Are all bits zero in *src?
+ * bitmap_full(src, nbits) Are all bits set in *src?
+ * bitmap_weight(src, nbits) Hamming Weight: number set bits
+ * bitmap_weight_and(src1, src2, nbits) Hamming Weight of and'ed bitmap
+ * bitmap_weight_andnot(src1, src2, nbits) Hamming Weight of andnot'ed bitmap
+ * bitmap_set(dst, pos, nbits) Set specified bit area
+ * bitmap_clear(dst, pos, nbits) Clear specified bit area
+ * bitmap_find_next_zero_area(buf, len, pos, n, mask) Find bit free area
+ * bitmap_find_next_zero_area_off(buf, len, pos, n, mask, mask_off) as above
+ * bitmap_shift_right(dst, src, n, nbits) *dst = *src >> n
+ * bitmap_shift_left(dst, src, n, nbits) *dst = *src << n
+ * bitmap_cut(dst, src, first, n, nbits) Cut n bits from first, copy rest
+ * bitmap_replace(dst, old, new, mask, nbits) *dst = (*old & ~(*mask)) | (*new & *mask)
+ * bitmap_scatter(dst, src, mask, nbits) *dst = map(dense, sparse)(src)
+ * bitmap_gather(dst, src, mask, nbits) *dst = map(sparse, dense)(src)
+ * bitmap_remap(dst, src, old, new, nbits) *dst = map(old, new)(src)
+ * bitmap_bitremap(oldbit, old, new, nbits) newbit = map(old, new)(oldbit)
+ * bitmap_onto(dst, orig, relmap, nbits) *dst = orig relative to relmap
+ * bitmap_fold(dst, orig, sz, nbits) dst bits = orig bits mod sz
+ * bitmap_parse(buf, buflen, dst, nbits) Parse bitmap dst from kernel buf
+ * bitmap_parse_user(ubuf, ulen, dst, nbits) Parse bitmap dst from user buf
+ * bitmap_parselist(buf, dst, nbits) Parse bitmap dst from kernel buf
+ * bitmap_parselist_user(buf, dst, nbits) Parse bitmap dst from user buf
+ * bitmap_find_free_region(bitmap, bits, order) Find and allocate bit region
+ * bitmap_release_region(bitmap, pos, order) Free specified bit region
+ * bitmap_allocate_region(bitmap, pos, order) Allocate specified bit region
+ * bitmap_from_arr32(dst, buf, nbits) Copy nbits from u32[] buf to dst
+ * bitmap_from_arr64(dst, buf, nbits) Copy nbits from u64[] buf to dst
+ * bitmap_to_arr32(buf, src, nbits) Copy nbits from buf to u32[] dst
+ * bitmap_to_arr64(buf, src, nbits) Copy nbits from buf to u64[] dst
+ * bitmap_get_value8(map, start) Get 8bit value from map at start
+ * bitmap_set_value8(map, value, start) Set 8bit value to map at start
+ * bitmap_read(map, start, nbits) Read an nbits-sized value from
+ * map at start
+ * bitmap_write(map, value, start, nbits) Write an nbits-sized value to
+ * map at start
+ *
+ * Note, bitmap_zero() and bitmap_fill() operate over the region of
+ * unsigned longs, that is, bits behind bitmap till the unsigned long
+ * boundary will be zeroed or filled as well. Consider to use
+ * bitmap_clear() or bitmap_set() to make explicit zeroing or filling
+ * respectively.
*/
-/*
- * Also the following operations in asm/bitops.h apply to bitmaps.
- *
- * set_bit(bit, addr) *addr |= bit
- * clear_bit(bit, addr) *addr &= ~bit
- * change_bit(bit, addr) *addr ^= bit
- * test_bit(bit, addr) Is bit set in *addr?
- * test_and_set_bit(bit, addr) Set bit and return old value
- * test_and_clear_bit(bit, addr) Clear bit and return old value
- * test_and_change_bit(bit, addr) Change bit and return old value
- * find_first_zero_bit(addr, nbits) Position first zero bit in *addr
- * find_first_bit(addr, nbits) Position first set bit in *addr
- * find_next_zero_bit(addr, nbits, bit) Position next zero bit in *addr >= bit
- * find_next_bit(addr, nbits, bit) Position next set bit in *addr >= bit
+/**
+ * DOC: bitmap bitops
+ *
+ * Also the following operations in asm/bitops.h apply to bitmaps.::
+ *
+ * set_bit(bit, addr) *addr |= bit
+ * clear_bit(bit, addr) *addr &= ~bit
+ * change_bit(bit, addr) *addr ^= bit
+ * test_bit(bit, addr) Is bit set in *addr?
+ * test_and_set_bit(bit, addr) Set bit and return old value
+ * test_and_clear_bit(bit, addr) Clear bit and return old value
+ * test_and_change_bit(bit, addr) Change bit and return old value
+ * find_first_zero_bit(addr, nbits) Position first zero bit in *addr
+ * find_first_bit(addr, nbits) Position first set bit in *addr
+ * find_next_zero_bit(addr, nbits, bit)
+ * Position next zero bit in *addr >= bit
+ * find_next_bit(addr, nbits, bit) Position next set bit in *addr >= bit
+ * find_next_and_bit(addr1, addr2, nbits, bit)
+ * Same as find_next_bit, but in
+ * (*addr1 & *addr2)
+ *
*/
-/*
+/**
+ * DOC: declare bitmap
* The DECLARE_BITMAP(name,bits) macro, in linux/types.h, can be used
* to declare an array named 'name' of just enough unsigned longs to
* contain all bit positions from 0 to 'bits' - 1.
*/
/*
+ * Allocation and deallocation of bitmap.
+ * Provided in lib/bitmap.c to avoid circular dependency.
+ */
+unsigned long *bitmap_alloc(unsigned int nbits, gfp_t flags);
+unsigned long *bitmap_zalloc(unsigned int nbits, gfp_t flags);
+unsigned long *bitmap_alloc_node(unsigned int nbits, gfp_t flags, int node);
+unsigned long *bitmap_zalloc_node(unsigned int nbits, gfp_t flags, int node);
+void bitmap_free(const unsigned long *bitmap);
+
+DEFINE_FREE(bitmap, unsigned long *, if (_T) bitmap_free(_T))
+
+/* Managed variants of the above. */
+unsigned long *devm_bitmap_alloc(struct device *dev,
+ unsigned int nbits, gfp_t flags);
+unsigned long *devm_bitmap_zalloc(struct device *dev,
+ unsigned int nbits, gfp_t flags);
+
+/*
* lib/bitmap.c provides these functions:
*/
-extern int __bitmap_empty(const unsigned long *bitmap, unsigned int nbits);
-extern int __bitmap_full(const unsigned long *bitmap, unsigned int nbits);
-extern int __bitmap_equal(const unsigned long *bitmap1,
- const unsigned long *bitmap2, unsigned int nbits);
-extern void __bitmap_complement(unsigned long *dst, const unsigned long *src,
- unsigned int nbits);
-extern void __bitmap_shift_right(unsigned long *dst, const unsigned long *src,
- unsigned int shift, unsigned int nbits);
-extern void __bitmap_shift_left(unsigned long *dst, const unsigned long *src,
- unsigned int shift, unsigned int nbits);
-extern int __bitmap_and(unsigned long *dst, const unsigned long *bitmap1,
- const unsigned long *bitmap2, unsigned int nbits);
-extern void __bitmap_or(unsigned long *dst, const unsigned long *bitmap1,
- const unsigned long *bitmap2, unsigned int nbits);
-extern void __bitmap_xor(unsigned long *dst, const unsigned long *bitmap1,
- const unsigned long *bitmap2, unsigned int nbits);
-extern int __bitmap_andnot(unsigned long *dst, const unsigned long *bitmap1,
- const unsigned long *bitmap2, unsigned int nbits);
-extern int __bitmap_intersects(const unsigned long *bitmap1,
- const unsigned long *bitmap2, unsigned int nbits);
-extern int __bitmap_subset(const unsigned long *bitmap1,
- const unsigned long *bitmap2, unsigned int nbits);
-extern int __bitmap_weight(const unsigned long *bitmap, unsigned int nbits);
-extern void __bitmap_set(unsigned long *map, unsigned int start, int len);
-extern void __bitmap_clear(unsigned long *map, unsigned int start, int len);
-
-extern unsigned long bitmap_find_next_zero_area_off(unsigned long *map,
- unsigned long size,
- unsigned long start,
- unsigned int nr,
- unsigned long align_mask,
- unsigned long align_offset);
+bool __bitmap_equal(const unsigned long *bitmap1,
+ const unsigned long *bitmap2, unsigned int nbits);
+bool __pure __bitmap_or_equal(const unsigned long *src1,
+ const unsigned long *src2,
+ const unsigned long *src3,
+ unsigned int nbits);
+void __bitmap_complement(unsigned long *dst, const unsigned long *src,
+ unsigned int nbits);
+void __bitmap_shift_right(unsigned long *dst, const unsigned long *src,
+ unsigned int shift, unsigned int nbits);
+void __bitmap_shift_left(unsigned long *dst, const unsigned long *src,
+ unsigned int shift, unsigned int nbits);
+void bitmap_cut(unsigned long *dst, const unsigned long *src,
+ unsigned int first, unsigned int cut, unsigned int nbits);
+bool __bitmap_and(unsigned long *dst, const unsigned long *bitmap1,
+ const unsigned long *bitmap2, unsigned int nbits);
+void __bitmap_or(unsigned long *dst, const unsigned long *bitmap1,
+ const unsigned long *bitmap2, unsigned int nbits);
+unsigned int __bitmap_weighted_or(unsigned long *dst, const unsigned long *bitmap1,
+ const unsigned long *bitmap2, unsigned int nbits);
+void __bitmap_xor(unsigned long *dst, const unsigned long *bitmap1,
+ const unsigned long *bitmap2, unsigned int nbits);
+bool __bitmap_andnot(unsigned long *dst, const unsigned long *bitmap1,
+ const unsigned long *bitmap2, unsigned int nbits);
+void __bitmap_replace(unsigned long *dst,
+ const unsigned long *old, const unsigned long *new,
+ const unsigned long *mask, unsigned int nbits);
+bool __bitmap_intersects(const unsigned long *bitmap1,
+ const unsigned long *bitmap2, unsigned int nbits);
+bool __bitmap_subset(const unsigned long *bitmap1,
+ const unsigned long *bitmap2, unsigned int nbits);
+unsigned int __bitmap_weight(const unsigned long *bitmap, unsigned int nbits);
+unsigned int __bitmap_weight_and(const unsigned long *bitmap1,
+ const unsigned long *bitmap2, unsigned int nbits);
+unsigned int __bitmap_weight_andnot(const unsigned long *bitmap1,
+ const unsigned long *bitmap2, unsigned int nbits);
+void __bitmap_set(unsigned long *map, unsigned int start, int len);
+void __bitmap_clear(unsigned long *map, unsigned int start, int len);
+
+unsigned long bitmap_find_next_zero_area_off(unsigned long *map,
+ unsigned long size,
+ unsigned long start,
+ unsigned int nr,
+ unsigned long align_mask,
+ unsigned long align_offset);
/**
* bitmap_find_next_zero_area - find a contiguous aligned zero area
@@ -134,100 +206,133 @@ extern unsigned long bitmap_find_next_zero_area_off(unsigned long *map,
* the bit offset of all zero areas this function finds is multiples of that
* power of 2. A @align_mask of 0 means no alignment is required.
*/
-static inline unsigned long
-bitmap_find_next_zero_area(unsigned long *map,
- unsigned long size,
- unsigned long start,
- unsigned int nr,
- unsigned long align_mask)
+static __always_inline
+unsigned long bitmap_find_next_zero_area(unsigned long *map,
+ unsigned long size,
+ unsigned long start,
+ unsigned int nr,
+ unsigned long align_mask)
{
return bitmap_find_next_zero_area_off(map, size, start, nr,
align_mask, 0);
}
-extern int __bitmap_parse(const char *buf, unsigned int buflen, int is_user,
- unsigned long *dst, int nbits);
-extern int bitmap_parse_user(const char __user *ubuf, unsigned int ulen,
- unsigned long *dst, int nbits);
-extern int bitmap_parselist(const char *buf, unsigned long *maskp,
- int nmaskbits);
-extern int bitmap_parselist_user(const char __user *ubuf, unsigned int ulen,
- unsigned long *dst, int nbits);
-extern void bitmap_remap(unsigned long *dst, const unsigned long *src,
+void bitmap_remap(unsigned long *dst, const unsigned long *src,
const unsigned long *old, const unsigned long *new, unsigned int nbits);
-extern int bitmap_bitremap(int oldbit,
+int bitmap_bitremap(int oldbit,
const unsigned long *old, const unsigned long *new, int bits);
-extern void bitmap_onto(unsigned long *dst, const unsigned long *orig,
+void bitmap_onto(unsigned long *dst, const unsigned long *orig,
const unsigned long *relmap, unsigned int bits);
-extern void bitmap_fold(unsigned long *dst, const unsigned long *orig,
+void bitmap_fold(unsigned long *dst, const unsigned long *orig,
unsigned int sz, unsigned int nbits);
-extern int bitmap_find_free_region(unsigned long *bitmap, unsigned int bits, int order);
-extern void bitmap_release_region(unsigned long *bitmap, unsigned int pos, int order);
-extern int bitmap_allocate_region(unsigned long *bitmap, unsigned int pos, int order);
-extern unsigned int bitmap_from_u32array(unsigned long *bitmap,
- unsigned int nbits,
- const u32 *buf,
- unsigned int nwords);
-extern unsigned int bitmap_to_u32array(u32 *buf,
- unsigned int nwords,
- const unsigned long *bitmap,
- unsigned int nbits);
-#ifdef __BIG_ENDIAN
-extern void bitmap_copy_le(unsigned long *dst, const unsigned long *src, unsigned int nbits);
-#else
-#define bitmap_copy_le bitmap_copy
-#endif
-extern unsigned int bitmap_ord_to_pos(const unsigned long *bitmap, unsigned int ord, unsigned int nbits);
-extern int bitmap_print_to_pagebuf(bool list, char *buf,
- const unsigned long *maskp, int nmaskbits);
#define BITMAP_FIRST_WORD_MASK(start) (~0UL << ((start) & (BITS_PER_LONG - 1)))
#define BITMAP_LAST_WORD_MASK(nbits) (~0UL >> (-(nbits) & (BITS_PER_LONG - 1)))
-#define small_const_nbits(nbits) \
- (__builtin_constant_p(nbits) && (nbits) <= BITS_PER_LONG)
+#define bitmap_size(nbits) (ALIGN(nbits, BITS_PER_LONG) / BITS_PER_BYTE)
-static inline void bitmap_zero(unsigned long *dst, unsigned int nbits)
+static __always_inline void bitmap_zero(unsigned long *dst, unsigned int nbits)
{
+ unsigned int len = bitmap_size(nbits);
+
if (small_const_nbits(nbits))
- *dst = 0UL;
- else {
- unsigned int len = BITS_TO_LONGS(nbits) * sizeof(unsigned long);
+ *dst = 0;
+ else
memset(dst, 0, len);
- }
}
-static inline void bitmap_fill(unsigned long *dst, unsigned int nbits)
+static __always_inline void bitmap_fill(unsigned long *dst, unsigned int nbits)
{
- unsigned int nlongs = BITS_TO_LONGS(nbits);
- if (!small_const_nbits(nbits)) {
- unsigned int len = (nlongs - 1) * sizeof(unsigned long);
- memset(dst, 0xff, len);
- }
- dst[nlongs - 1] = BITMAP_LAST_WORD_MASK(nbits);
+ unsigned int len = bitmap_size(nbits);
+
+ if (small_const_nbits(nbits))
+ *dst = ~0UL;
+ else
+ memset(dst, 0xff, len);
}
-static inline void bitmap_copy(unsigned long *dst, const unsigned long *src,
- unsigned int nbits)
+static __always_inline
+void bitmap_copy(unsigned long *dst, const unsigned long *src, unsigned int nbits)
{
+ unsigned int len = bitmap_size(nbits);
+
if (small_const_nbits(nbits))
*dst = *src;
- else {
- unsigned int len = BITS_TO_LONGS(nbits) * sizeof(unsigned long);
+ else
memcpy(dst, src, len);
- }
}
-static inline int bitmap_and(unsigned long *dst, const unsigned long *src1,
- const unsigned long *src2, unsigned int nbits)
+/*
+ * Copy bitmap and clear tail bits in last word.
+ */
+static __always_inline
+void bitmap_copy_clear_tail(unsigned long *dst, const unsigned long *src, unsigned int nbits)
+{
+ bitmap_copy(dst, src, nbits);
+ if (nbits % BITS_PER_LONG)
+ dst[nbits / BITS_PER_LONG] &= BITMAP_LAST_WORD_MASK(nbits);
+}
+
+static inline void bitmap_copy_and_extend(unsigned long *to,
+ const unsigned long *from,
+ unsigned int count, unsigned int size)
+{
+ unsigned int copy = BITS_TO_LONGS(count);
+
+ memcpy(to, from, copy * sizeof(long));
+ if (count % BITS_PER_LONG)
+ to[copy - 1] &= BITMAP_LAST_WORD_MASK(count);
+ memset(to + copy, 0, bitmap_size(size) - copy * sizeof(long));
+}
+
+/*
+ * On 32-bit systems bitmaps are represented as u32 arrays internally. On LE64
+ * machines the order of hi and lo parts of numbers match the bitmap structure.
+ * In both cases conversion is not needed when copying data from/to arrays of
+ * u32. But in LE64 case, typecast in bitmap_copy_clear_tail() may lead
+ * to out-of-bound access. To avoid that, both LE and BE variants of 64-bit
+ * architectures are not using bitmap_copy_clear_tail().
+ */
+#if BITS_PER_LONG == 64
+void bitmap_from_arr32(unsigned long *bitmap, const u32 *buf,
+ unsigned int nbits);
+void bitmap_to_arr32(u32 *buf, const unsigned long *bitmap,
+ unsigned int nbits);
+#else
+#define bitmap_from_arr32(bitmap, buf, nbits) \
+ bitmap_copy_clear_tail((unsigned long *) (bitmap), \
+ (const unsigned long *) (buf), (nbits))
+#define bitmap_to_arr32(buf, bitmap, nbits) \
+ bitmap_copy_clear_tail((unsigned long *) (buf), \
+ (const unsigned long *) (bitmap), (nbits))
+#endif
+
+/*
+ * On 64-bit systems bitmaps are represented as u64 arrays internally. So,
+ * the conversion is not needed when copying data from/to arrays of u64.
+ */
+#if BITS_PER_LONG == 32
+void bitmap_from_arr64(unsigned long *bitmap, const u64 *buf, unsigned int nbits);
+void bitmap_to_arr64(u64 *buf, const unsigned long *bitmap, unsigned int nbits);
+#else
+#define bitmap_from_arr64(bitmap, buf, nbits) \
+ bitmap_copy_clear_tail((unsigned long *)(bitmap), (const unsigned long *)(buf), (nbits))
+#define bitmap_to_arr64(buf, bitmap, nbits) \
+ bitmap_copy_clear_tail((unsigned long *)(buf), (const unsigned long *)(bitmap), (nbits))
+#endif
+
+static __always_inline
+bool bitmap_and(unsigned long *dst, const unsigned long *src1,
+ const unsigned long *src2, unsigned int nbits)
{
if (small_const_nbits(nbits))
return (*dst = *src1 & *src2 & BITMAP_LAST_WORD_MASK(nbits)) != 0;
return __bitmap_and(dst, src1, src2, nbits);
}
-static inline void bitmap_or(unsigned long *dst, const unsigned long *src1,
- const unsigned long *src2, unsigned int nbits)
+static __always_inline
+void bitmap_or(unsigned long *dst, const unsigned long *src1,
+ const unsigned long *src2, unsigned int nbits)
{
if (small_const_nbits(nbits))
*dst = *src1 | *src2;
@@ -235,8 +340,21 @@ static inline void bitmap_or(unsigned long *dst, const unsigned long *src1,
__bitmap_or(dst, src1, src2, nbits);
}
-static inline void bitmap_xor(unsigned long *dst, const unsigned long *src1,
- const unsigned long *src2, unsigned int nbits)
+static __always_inline
+unsigned int bitmap_weighted_or(unsigned long *dst, const unsigned long *src1,
+ const unsigned long *src2, unsigned int nbits)
+{
+ if (small_const_nbits(nbits)) {
+ *dst = *src1 | *src2;
+ return hweight_long(*dst & BITMAP_LAST_WORD_MASK(nbits));
+ } else {
+ return __bitmap_weighted_or(dst, src1, src2, nbits);
+ }
+}
+
+static __always_inline
+void bitmap_xor(unsigned long *dst, const unsigned long *src1,
+ const unsigned long *src2, unsigned int nbits)
{
if (small_const_nbits(nbits))
*dst = *src1 ^ *src2;
@@ -244,16 +362,17 @@ static inline void bitmap_xor(unsigned long *dst, const unsigned long *src1,
__bitmap_xor(dst, src1, src2, nbits);
}
-static inline int bitmap_andnot(unsigned long *dst, const unsigned long *src1,
- const unsigned long *src2, unsigned int nbits)
+static __always_inline
+bool bitmap_andnot(unsigned long *dst, const unsigned long *src1,
+ const unsigned long *src2, unsigned int nbits)
{
if (small_const_nbits(nbits))
return (*dst = *src1 & ~(*src2) & BITMAP_LAST_WORD_MASK(nbits)) != 0;
return __bitmap_andnot(dst, src1, src2, nbits);
}
-static inline void bitmap_complement(unsigned long *dst, const unsigned long *src,
- unsigned int nbits)
+static __always_inline
+void bitmap_complement(unsigned long *dst, const unsigned long *src, unsigned int nbits)
{
if (small_const_nbits(nbits))
*dst = ~(*src);
@@ -261,18 +380,45 @@ static inline void bitmap_complement(unsigned long *dst, const unsigned long *sr
__bitmap_complement(dst, src, nbits);
}
-static inline int bitmap_equal(const unsigned long *src1,
- const unsigned long *src2, unsigned int nbits)
+#ifdef __LITTLE_ENDIAN
+#define BITMAP_MEM_ALIGNMENT 8
+#else
+#define BITMAP_MEM_ALIGNMENT (8 * sizeof(unsigned long))
+#endif
+#define BITMAP_MEM_MASK (BITMAP_MEM_ALIGNMENT - 1)
+
+static __always_inline
+bool bitmap_equal(const unsigned long *src1, const unsigned long *src2, unsigned int nbits)
{
if (small_const_nbits(nbits))
return !((*src1 ^ *src2) & BITMAP_LAST_WORD_MASK(nbits));
- if (__builtin_constant_p(nbits & 7) && IS_ALIGNED(nbits, 8))
+ if (__builtin_constant_p(nbits & BITMAP_MEM_MASK) &&
+ IS_ALIGNED(nbits, BITMAP_MEM_ALIGNMENT))
return !memcmp(src1, src2, nbits / 8);
return __bitmap_equal(src1, src2, nbits);
}
-static inline int bitmap_intersects(const unsigned long *src1,
- const unsigned long *src2, unsigned int nbits)
+/**
+ * bitmap_or_equal - Check whether the or of two bitmaps is equal to a third
+ * @src1: Pointer to bitmap 1
+ * @src2: Pointer to bitmap 2 will be or'ed with bitmap 1
+ * @src3: Pointer to bitmap 3. Compare to the result of *@src1 | *@src2
+ * @nbits: number of bits in each of these bitmaps
+ *
+ * Returns: True if (*@src1 | *@src2) == *@src3, false otherwise
+ */
+static __always_inline
+bool bitmap_or_equal(const unsigned long *src1, const unsigned long *src2,
+ const unsigned long *src3, unsigned int nbits)
+{
+ if (!small_const_nbits(nbits))
+ return __bitmap_or_equal(src1, src2, src3, nbits);
+
+ return !(((*src1 | *src2) ^ *src3) & BITMAP_LAST_WORD_MASK(nbits));
+}
+
+static __always_inline
+bool bitmap_intersects(const unsigned long *src1, const unsigned long *src2, unsigned int nbits)
{
if (small_const_nbits(nbits))
return ((*src1 & *src2) & BITMAP_LAST_WORD_MASK(nbits)) != 0;
@@ -280,8 +426,8 @@ static inline int bitmap_intersects(const unsigned long *src1,
return __bitmap_intersects(src1, src2, nbits);
}
-static inline int bitmap_subset(const unsigned long *src1,
- const unsigned long *src2, unsigned int nbits)
+static __always_inline
+bool bitmap_subset(const unsigned long *src1, const unsigned long *src2, unsigned int nbits)
{
if (small_const_nbits(nbits))
return ! ((*src1 & ~(*src2)) & BITMAP_LAST_WORD_MASK(nbits));
@@ -289,7 +435,8 @@ static inline int bitmap_subset(const unsigned long *src1,
return __bitmap_subset(src1, src2, nbits);
}
-static inline int bitmap_empty(const unsigned long *src, unsigned nbits)
+static __always_inline
+bool bitmap_empty(const unsigned long *src, unsigned nbits)
{
if (small_const_nbits(nbits))
return ! (*src & BITMAP_LAST_WORD_MASK(nbits));
@@ -297,7 +444,8 @@ static inline int bitmap_empty(const unsigned long *src, unsigned nbits)
return find_first_bit(src, nbits) == nbits;
}
-static inline int bitmap_full(const unsigned long *src, unsigned int nbits)
+static __always_inline
+bool bitmap_full(const unsigned long *src, unsigned int nbits)
{
if (small_const_nbits(nbits))
return ! (~(*src) & BITMAP_LAST_WORD_MASK(nbits));
@@ -305,39 +453,67 @@ static inline int bitmap_full(const unsigned long *src, unsigned int nbits)
return find_first_zero_bit(src, nbits) == nbits;
}
-static __always_inline int bitmap_weight(const unsigned long *src, unsigned int nbits)
+static __always_inline
+unsigned int bitmap_weight(const unsigned long *src, unsigned int nbits)
{
if (small_const_nbits(nbits))
return hweight_long(*src & BITMAP_LAST_WORD_MASK(nbits));
return __bitmap_weight(src, nbits);
}
-static __always_inline void bitmap_set(unsigned long *map, unsigned int start,
- unsigned int nbits)
+static __always_inline
+unsigned long bitmap_weight_and(const unsigned long *src1,
+ const unsigned long *src2, unsigned int nbits)
+{
+ if (small_const_nbits(nbits))
+ return hweight_long(*src1 & *src2 & BITMAP_LAST_WORD_MASK(nbits));
+ return __bitmap_weight_and(src1, src2, nbits);
+}
+
+static __always_inline
+unsigned long bitmap_weight_andnot(const unsigned long *src1,
+ const unsigned long *src2, unsigned int nbits)
+{
+ if (small_const_nbits(nbits))
+ return hweight_long(*src1 & ~(*src2) & BITMAP_LAST_WORD_MASK(nbits));
+ return __bitmap_weight_andnot(src1, src2, nbits);
+}
+
+static __always_inline
+void bitmap_set(unsigned long *map, unsigned int start, unsigned int nbits)
{
if (__builtin_constant_p(nbits) && nbits == 1)
__set_bit(start, map);
- else if (__builtin_constant_p(start & 7) && IS_ALIGNED(start, 8) &&
- __builtin_constant_p(nbits & 7) && IS_ALIGNED(nbits, 8))
+ else if (small_const_nbits(start + nbits))
+ *map |= GENMASK(start + nbits - 1, start);
+ else if (__builtin_constant_p(start & BITMAP_MEM_MASK) &&
+ IS_ALIGNED(start, BITMAP_MEM_ALIGNMENT) &&
+ __builtin_constant_p(nbits & BITMAP_MEM_MASK) &&
+ IS_ALIGNED(nbits, BITMAP_MEM_ALIGNMENT))
memset((char *)map + start / 8, 0xff, nbits / 8);
else
__bitmap_set(map, start, nbits);
}
-static __always_inline void bitmap_clear(unsigned long *map, unsigned int start,
- unsigned int nbits)
+static __always_inline
+void bitmap_clear(unsigned long *map, unsigned int start, unsigned int nbits)
{
if (__builtin_constant_p(nbits) && nbits == 1)
__clear_bit(start, map);
- else if (__builtin_constant_p(start & 7) && IS_ALIGNED(start, 8) &&
- __builtin_constant_p(nbits & 7) && IS_ALIGNED(nbits, 8))
+ else if (small_const_nbits(start + nbits))
+ *map &= ~GENMASK(start + nbits - 1, start);
+ else if (__builtin_constant_p(start & BITMAP_MEM_MASK) &&
+ IS_ALIGNED(start, BITMAP_MEM_ALIGNMENT) &&
+ __builtin_constant_p(nbits & BITMAP_MEM_MASK) &&
+ IS_ALIGNED(nbits, BITMAP_MEM_ALIGNMENT))
memset((char *)map + start / 8, 0, nbits / 8);
else
__bitmap_clear(map, start, nbits);
}
-static inline void bitmap_shift_right(unsigned long *dst, const unsigned long *src,
- unsigned int shift, int nbits)
+static __always_inline
+void bitmap_shift_right(unsigned long *dst, const unsigned long *src,
+ unsigned int shift, unsigned int nbits)
{
if (small_const_nbits(nbits))
*dst = (*src & BITMAP_LAST_WORD_MASK(nbits)) >> shift;
@@ -345,8 +521,9 @@ static inline void bitmap_shift_right(unsigned long *dst, const unsigned long *s
__bitmap_shift_right(dst, src, shift, nbits);
}
-static inline void bitmap_shift_left(unsigned long *dst, const unsigned long *src,
- unsigned int shift, unsigned int nbits)
+static __always_inline
+void bitmap_shift_left(unsigned long *dst, const unsigned long *src,
+ unsigned int shift, unsigned int nbits)
{
if (small_const_nbits(nbits))
*dst = (*src << shift) & BITMAP_LAST_WORD_MASK(nbits);
@@ -354,30 +531,321 @@ static inline void bitmap_shift_left(unsigned long *dst, const unsigned long *sr
__bitmap_shift_left(dst, src, shift, nbits);
}
-static inline int bitmap_parse(const char *buf, unsigned int buflen,
- unsigned long *maskp, int nmaskbits)
+static __always_inline
+void bitmap_replace(unsigned long *dst,
+ const unsigned long *old,
+ const unsigned long *new,
+ const unsigned long *mask,
+ unsigned int nbits)
{
- return __bitmap_parse(buf, buflen, 0, maskp, nmaskbits);
+ if (small_const_nbits(nbits))
+ *dst = (*old & ~(*mask)) | (*new & *mask);
+ else
+ __bitmap_replace(dst, old, new, mask, nbits);
}
-/*
+/**
+ * bitmap_scatter - Scatter a bitmap according to the given mask
+ * @dst: scattered bitmap
+ * @src: gathered bitmap
+ * @mask: mask representing bits to assign to in the scattered bitmap
+ * @nbits: number of bits in each of these bitmaps
+ *
+ * Scatters bitmap with sequential bits according to the given @mask.
+ *
+ * Example:
+ * If @src bitmap = 0x005a, with @mask = 0x1313, @dst will be 0x0302.
+ *
+ * Or in binary form
+ * @src @mask @dst
+ * 0000000001011010 0001001100010011 0000001100000010
+ *
+ * (Bits 0, 1, 2, 3, 4, 5 are copied to the bits 0, 1, 4, 8, 9, 12)
+ *
+ * A more 'visual' description of the operation::
+ *
+ * src: 0000000001011010
+ * ||||||
+ * +------+|||||
+ * | +----+||||
+ * | |+----+|||
+ * | || +-+||
+ * | || | ||
+ * mask: ...v..vv...v..vv
+ * ...0..11...0..10
+ * dst: 0000001100000010
+ *
+ * A relationship exists between bitmap_scatter() and bitmap_gather(). See
+ * bitmap_gather() for the bitmap gather detailed operations. TL;DR:
+ * bitmap_gather() can be seen as the 'reverse' bitmap_scatter() operation.
+ */
+static __always_inline
+void bitmap_scatter(unsigned long *dst, const unsigned long *src,
+ const unsigned long *mask, unsigned int nbits)
+{
+ unsigned int n = 0;
+ unsigned int bit;
+
+ bitmap_zero(dst, nbits);
+
+ for_each_set_bit(bit, mask, nbits)
+ __assign_bit(bit, dst, test_bit(n++, src));
+}
+
+/**
+ * bitmap_gather - Gather a bitmap according to given mask
+ * @dst: gathered bitmap
+ * @src: scattered bitmap
+ * @mask: mask representing bits to extract from in the scattered bitmap
+ * @nbits: number of bits in each of these bitmaps
+ *
+ * Gathers bitmap with sparse bits according to the given @mask.
+ *
+ * Example:
+ * If @src bitmap = 0x0302, with @mask = 0x1313, @dst will be 0x001a.
+ *
+ * Or in binary form
+ * @src @mask @dst
+ * 0000001100000010 0001001100010011 0000000000011010
+ *
+ * (Bits 0, 1, 4, 8, 9, 12 are copied to the bits 0, 1, 2, 3, 4, 5)
+ *
+ * A more 'visual' description of the operation::
+ *
+ * mask: ...v..vv...v..vv
+ * src: 0000001100000010
+ * ^ ^^ ^ 0
+ * | || | 10
+ * | || > 010
+ * | |+--> 1010
+ * | +--> 11010
+ * +----> 011010
+ * dst: 0000000000011010
+ *
+ * A relationship exists between bitmap_gather() and bitmap_scatter(). See
+ * bitmap_scatter() for the bitmap scatter detailed operations. TL;DR:
+ * bitmap_scatter() can be seen as the 'reverse' bitmap_gather() operation.
+ *
+ * Suppose scattered computed using bitmap_scatter(scattered, src, mask, n).
+ * The operation bitmap_gather(result, scattered, mask, n) leads to a result
+ * equal or equivalent to src.
+ *
+ * The result can be 'equivalent' because bitmap_scatter() and bitmap_gather()
+ * are not bijective.
+ * The result and src values are equivalent in that sense that a call to
+ * bitmap_scatter(res, src, mask, n) and a call to
+ * bitmap_scatter(res, result, mask, n) will lead to the same res value.
+ */
+static __always_inline
+void bitmap_gather(unsigned long *dst, const unsigned long *src,
+ const unsigned long *mask, unsigned int nbits)
+{
+ unsigned int n = 0;
+ unsigned int bit;
+
+ bitmap_zero(dst, nbits);
+
+ for_each_set_bit(bit, mask, nbits)
+ __assign_bit(n++, dst, test_bit(bit, src));
+}
+
+static __always_inline
+void bitmap_next_set_region(unsigned long *bitmap, unsigned int *rs,
+ unsigned int *re, unsigned int end)
+{
+ *rs = find_next_bit(bitmap, end, *rs);
+ *re = find_next_zero_bit(bitmap, end, *rs + 1);
+}
+
+/**
+ * bitmap_release_region - release allocated bitmap region
+ * @bitmap: array of unsigned longs corresponding to the bitmap
+ * @pos: beginning of bit region to release
+ * @order: region size (log base 2 of number of bits) to release
+ *
+ * This is the complement to __bitmap_find_free_region() and releases
+ * the found region (by clearing it in the bitmap).
+ */
+static __always_inline
+void bitmap_release_region(unsigned long *bitmap, unsigned int pos, int order)
+{
+ bitmap_clear(bitmap, pos, BIT(order));
+}
+
+/**
+ * bitmap_allocate_region - allocate bitmap region
+ * @bitmap: array of unsigned longs corresponding to the bitmap
+ * @pos: beginning of bit region to allocate
+ * @order: region size (log base 2 of number of bits) to allocate
+ *
+ * Allocate (set bits in) a specified region of a bitmap.
+ *
+ * Returns: 0 on success, or %-EBUSY if specified region wasn't
+ * free (not all bits were zero).
+ */
+static __always_inline
+int bitmap_allocate_region(unsigned long *bitmap, unsigned int pos, int order)
+{
+ unsigned int len = BIT(order);
+
+ if (find_next_bit(bitmap, pos + len, pos) < pos + len)
+ return -EBUSY;
+ bitmap_set(bitmap, pos, len);
+ return 0;
+}
+
+/**
+ * bitmap_find_free_region - find a contiguous aligned mem region
+ * @bitmap: array of unsigned longs corresponding to the bitmap
+ * @bits: number of bits in the bitmap
+ * @order: region size (log base 2 of number of bits) to find
+ *
+ * Find a region of free (zero) bits in a @bitmap of @bits bits and
+ * allocate them (set them to one). Only consider regions of length
+ * a power (@order) of two, aligned to that power of two, which
+ * makes the search algorithm much faster.
+ *
+ * Returns: the bit offset in bitmap of the allocated region,
+ * or -errno on failure.
+ */
+static __always_inline
+int bitmap_find_free_region(unsigned long *bitmap, unsigned int bits, int order)
+{
+ unsigned int pos, end; /* scans bitmap by regions of size order */
+
+ for (pos = 0; (end = pos + BIT(order)) <= bits; pos = end) {
+ if (!bitmap_allocate_region(bitmap, pos, order))
+ return pos;
+ }
+ return -ENOMEM;
+}
+
+/**
+ * BITMAP_FROM_U64() - Represent u64 value in the format suitable for bitmap.
+ * @n: u64 value
+ *
+ * Linux bitmaps are internally arrays of unsigned longs, i.e. 32-bit
+ * integers in 32-bit environment, and 64-bit integers in 64-bit one.
+ *
+ * There are four combinations of endianness and length of the word in linux
+ * ABIs: LE64, BE64, LE32 and BE32.
+ *
+ * On 64-bit kernels 64-bit LE and BE numbers are naturally ordered in
+ * bitmaps and therefore don't require any special handling.
+ *
+ * On 32-bit kernels 32-bit LE ABI orders lo word of 64-bit number in memory
+ * prior to hi, and 32-bit BE orders hi word prior to lo. The bitmap on the
+ * other hand is represented as an array of 32-bit words and the position of
+ * bit N may therefore be calculated as: word #(N/32) and bit #(N%32) in that
+ * word. For example, bit #42 is located at 10th position of 2nd word.
+ * It matches 32-bit LE ABI, and we can simply let the compiler store 64-bit
+ * values in memory as it usually does. But for BE we need to swap hi and lo
+ * words manually.
+ *
+ * With all that, the macro BITMAP_FROM_U64() does explicit reordering of hi and
+ * lo parts of u64. For LE32 it does nothing, and for BE environment it swaps
+ * hi and lo words, as is expected by bitmap.
+ */
+#if __BITS_PER_LONG == 64
+#define BITMAP_FROM_U64(n) (n)
+#else
+#define BITMAP_FROM_U64(n) ((unsigned long) ((u64)(n) & ULONG_MAX)), \
+ ((unsigned long) ((u64)(n) >> 32))
+#endif
+
+/**
* bitmap_from_u64 - Check and swap words within u64.
* @mask: source bitmap
* @dst: destination bitmap
*
- * In 32-bit Big Endian kernel, when using (u32 *)(&val)[*]
+ * In 32-bit Big Endian kernel, when using ``(u32 *)(&val)[*]``
* to read u64 mask, we will get the wrong word.
- * That is "(u32 *)(&val)[0]" gets the upper 32 bits,
+ * That is ``(u32 *)(&val)[0]`` gets the upper 32 bits,
* but we expect the lower 32-bits of u64.
*/
-static inline void bitmap_from_u64(unsigned long *dst, u64 mask)
+static __always_inline void bitmap_from_u64(unsigned long *dst, u64 mask)
+{
+ bitmap_from_arr64(dst, &mask, 64);
+}
+
+/**
+ * bitmap_read - read a value of n-bits from the memory region
+ * @map: address to the bitmap memory region
+ * @start: bit offset of the n-bit value
+ * @nbits: size of value in bits, nonzero, up to BITS_PER_LONG
+ *
+ * Returns: value of @nbits bits located at the @start bit offset within the
+ * @map memory region. For @nbits = 0 and @nbits > BITS_PER_LONG the return
+ * value is undefined.
+ */
+static __always_inline
+unsigned long bitmap_read(const unsigned long *map, unsigned long start, unsigned long nbits)
{
- dst[0] = mask & ULONG_MAX;
+ size_t index = BIT_WORD(start);
+ unsigned long offset = start % BITS_PER_LONG;
+ unsigned long space = BITS_PER_LONG - offset;
+ unsigned long value_low, value_high;
+
+ if (unlikely(!nbits || nbits > BITS_PER_LONG))
+ return 0;
- if (sizeof(mask) > sizeof(unsigned long))
- dst[1] = mask >> 32;
+ if (space >= nbits)
+ return (map[index] >> offset) & BITMAP_LAST_WORD_MASK(nbits);
+
+ value_low = map[index] & BITMAP_FIRST_WORD_MASK(start);
+ value_high = map[index + 1] & BITMAP_LAST_WORD_MASK(start + nbits);
+ return (value_low >> offset) | (value_high << space);
+}
+
+/**
+ * bitmap_write - write n-bit value within a memory region
+ * @map: address to the bitmap memory region
+ * @value: value to write, clamped to nbits
+ * @start: bit offset of the n-bit value
+ * @nbits: size of value in bits, nonzero, up to BITS_PER_LONG.
+ *
+ * bitmap_write() behaves as-if implemented as @nbits calls of __assign_bit(),
+ * i.e. bits beyond @nbits are ignored:
+ *
+ * for (bit = 0; bit < nbits; bit++)
+ * __assign_bit(start + bit, bitmap, val & BIT(bit));
+ *
+ * For @nbits == 0 and @nbits > BITS_PER_LONG no writes are performed.
+ */
+static __always_inline
+void bitmap_write(unsigned long *map, unsigned long value,
+ unsigned long start, unsigned long nbits)
+{
+ size_t index;
+ unsigned long offset;
+ unsigned long space;
+ unsigned long mask;
+ bool fit;
+
+ if (unlikely(!nbits || nbits > BITS_PER_LONG))
+ return;
+
+ mask = BITMAP_LAST_WORD_MASK(nbits);
+ value &= mask;
+ offset = start % BITS_PER_LONG;
+ space = BITS_PER_LONG - offset;
+ fit = space >= nbits;
+ index = BIT_WORD(start);
+
+ map[index] &= (fit ? (~(mask << offset)) : ~BITMAP_FIRST_WORD_MASK(start));
+ map[index] |= value << offset;
+ if (fit)
+ return;
+
+ map[index + 1] &= BITMAP_FIRST_WORD_MASK(start + nbits);
+ map[index + 1] |= (value >> space);
}
+#define bitmap_get_value8(map, start) \
+ bitmap_read(map, start, BITS_PER_BYTE)
+#define bitmap_set_value8(map, value, start) \
+ bitmap_write(map, value, start, BITS_PER_BYTE)
+
#endif /* __ASSEMBLY__ */
#endif /* __LINUX_BITMAP_H */
diff --git a/include/linux/bitops.h b/include/linux/bitops.h
index a83c822c35c2..ea7898cc5903 100644
--- a/include/linux/bitops.h
+++ b/include/linux/bitops.h
@@ -1,28 +1,19 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_BITOPS_H
#define _LINUX_BITOPS_H
+
#include <asm/types.h>
+#include <linux/bits.h>
+#include <linux/typecheck.h>
-#ifdef __KERNEL__
-#define BIT(nr) (1UL << (nr))
-#define BIT_ULL(nr) (1ULL << (nr))
-#define BIT_MASK(nr) (1UL << ((nr) % BITS_PER_LONG))
-#define BIT_WORD(nr) ((nr) / BITS_PER_LONG)
-#define BIT_ULL_MASK(nr) (1ULL << ((nr) % BITS_PER_LONG_LONG))
-#define BIT_ULL_WORD(nr) ((nr) / BITS_PER_LONG_LONG)
-#define BITS_PER_BYTE 8
-#define BITS_TO_LONGS(nr) DIV_ROUND_UP(nr, BITS_PER_BYTE * sizeof(long))
-#endif
+#include <uapi/linux/kernel.h>
-/*
- * Create a contiguous bitmask starting at bit position @l and ending at
- * position @h. For example
- * GENMASK_ULL(39, 21) gives us the 64bit vector 0x000000ffffe00000.
- */
-#define GENMASK(h, l) \
- (((~0UL) << (l)) & (~0UL >> (BITS_PER_LONG - 1 - (h))))
+#define BITS_TO_LONGS(nr) __KERNEL_DIV_ROUND_UP(nr, BITS_PER_TYPE(long))
+#define BITS_TO_U64(nr) __KERNEL_DIV_ROUND_UP(nr, BITS_PER_TYPE(u64))
+#define BITS_TO_U32(nr) __KERNEL_DIV_ROUND_UP(nr, BITS_PER_TYPE(u32))
+#define BITS_TO_BYTES(nr) __KERNEL_DIV_ROUND_UP(nr, BITS_PER_TYPE(char))
-#define GENMASK_ULL(h, l) \
- (((~0ULL) << (l)) & (~0ULL >> (BITS_PER_LONG_LONG - 1 - (h))))
+#define BYTES_TO_BITS(nb) ((nb) * BITS_PER_BYTE)
extern unsigned int __sw_hweight8(unsigned int w);
extern unsigned int __sw_hweight16(unsigned int w);
@@ -30,32 +21,67 @@ extern unsigned int __sw_hweight32(unsigned int w);
extern unsigned long __sw_hweight64(__u64 w);
/*
+ * Defined here because those may be needed by architecture-specific static
+ * inlines.
+ */
+
+#include <asm-generic/bitops/generic-non-atomic.h>
+
+/*
+ * Many architecture-specific non-atomic bitops contain inline asm code and due
+ * to that the compiler can't optimize them to compile-time expressions or
+ * constants. In contrary, generic_*() helpers are defined in pure C and
+ * compilers optimize them just well.
+ * Therefore, to make `unsigned long foo = 0; __set_bit(BAR, &foo)` effectively
+ * equal to `unsigned long foo = BIT(BAR)`, pick the generic C alternative when
+ * the arguments can be resolved at compile time. That expression itself is a
+ * constant and doesn't bring any functional changes to the rest of cases.
+ * The casts to `uintptr_t` are needed to mitigate `-Waddress` warnings when
+ * passing a bitmap from .bss or .data (-> `!!addr` is always true).
+ */
+#define bitop(op, nr, addr) \
+ ((__builtin_constant_p(nr) && \
+ __builtin_constant_p((uintptr_t)(addr) != (uintptr_t)NULL) && \
+ (uintptr_t)(addr) != (uintptr_t)NULL && \
+ __builtin_constant_p(*(const unsigned long *)(addr))) ? \
+ const##op(nr, addr) : op(nr, addr))
+
+/*
+ * The following macros are non-atomic versions of their non-underscored
+ * counterparts.
+ */
+#define __set_bit(nr, addr) bitop(___set_bit, nr, addr)
+#define __clear_bit(nr, addr) bitop(___clear_bit, nr, addr)
+#define __change_bit(nr, addr) bitop(___change_bit, nr, addr)
+#define __test_and_set_bit(nr, addr) bitop(___test_and_set_bit, nr, addr)
+#define __test_and_clear_bit(nr, addr) bitop(___test_and_clear_bit, nr, addr)
+#define __test_and_change_bit(nr, addr) bitop(___test_and_change_bit, nr, addr)
+
+#define test_bit(nr, addr) bitop(_test_bit, nr, addr)
+#define test_bit_acquire(nr, addr) bitop(_test_bit_acquire, nr, addr)
+
+/*
* Include this here because some architectures need generic_ffs/fls in
* scope
*/
#include <asm/bitops.h>
-#define for_each_set_bit(bit, addr, size) \
- for ((bit) = find_first_bit((addr), (size)); \
- (bit) < (size); \
- (bit) = find_next_bit((addr), (size), (bit) + 1))
-
-/* same as for_each_set_bit() but use bit as value to start with */
-#define for_each_set_bit_from(bit, addr, size) \
- for ((bit) = find_next_bit((addr), (size), (bit)); \
- (bit) < (size); \
- (bit) = find_next_bit((addr), (size), (bit) + 1))
+/* Check that the bitops prototypes are sane */
+#define __check_bitop_pr(name) \
+ static_assert(__same_type(arch_##name, generic_##name) && \
+ __same_type(const_##name, generic_##name) && \
+ __same_type(_##name, generic_##name))
-#define for_each_clear_bit(bit, addr, size) \
- for ((bit) = find_first_zero_bit((addr), (size)); \
- (bit) < (size); \
- (bit) = find_next_zero_bit((addr), (size), (bit) + 1))
+__check_bitop_pr(__set_bit);
+__check_bitop_pr(__clear_bit);
+__check_bitop_pr(__change_bit);
+__check_bitop_pr(__test_and_set_bit);
+__check_bitop_pr(__test_and_clear_bit);
+__check_bitop_pr(__test_and_change_bit);
+__check_bitop_pr(test_bit);
+__check_bitop_pr(test_bit_acquire);
-/* same as for_each_clear_bit() but use bit as value to start with */
-#define for_each_clear_bit_from(bit, addr, size) \
- for ((bit) = find_next_zero_bit((addr), (size), (bit)); \
- (bit) < (size); \
- (bit) = find_next_zero_bit((addr), (size), (bit) + 1))
+#undef __check_bitop_pr
static inline int get_bitmask_order(unsigned int count)
{
@@ -67,7 +93,7 @@ static inline int get_bitmask_order(unsigned int count)
static __always_inline unsigned long hweight_long(unsigned long w)
{
- return sizeof(w) == 4 ? hweight32(w) : hweight64(w);
+ return sizeof(w) == 4 ? hweight32(w) : hweight64((__u64)w);
}
/**
@@ -77,7 +103,7 @@ static __always_inline unsigned long hweight_long(unsigned long w)
*/
static inline __u64 rol64(__u64 word, unsigned int shift)
{
- return (word << shift) | (word >> (64 - shift));
+ return (word << (shift & 63)) | (word >> ((-shift) & 63));
}
/**
@@ -87,7 +113,7 @@ static inline __u64 rol64(__u64 word, unsigned int shift)
*/
static inline __u64 ror64(__u64 word, unsigned int shift)
{
- return (word >> shift) | (word << (64 - shift));
+ return (word >> (shift & 63)) | (word << ((-shift) & 63));
}
/**
@@ -97,7 +123,7 @@ static inline __u64 ror64(__u64 word, unsigned int shift)
*/
static inline __u32 rol32(__u32 word, unsigned int shift)
{
- return (word << shift) | (word >> ((-shift) & 31));
+ return (word << (shift & 31)) | (word >> ((-shift) & 31));
}
/**
@@ -107,7 +133,7 @@ static inline __u32 rol32(__u32 word, unsigned int shift)
*/
static inline __u32 ror32(__u32 word, unsigned int shift)
{
- return (word >> shift) | (word << (32 - shift));
+ return (word >> (shift & 31)) | (word << ((-shift) & 31));
}
/**
@@ -117,7 +143,7 @@ static inline __u32 ror32(__u32 word, unsigned int shift)
*/
static inline __u16 rol16(__u16 word, unsigned int shift)
{
- return (word << shift) | (word >> (16 - shift));
+ return (word << (shift & 15)) | (word >> ((-shift) & 15));
}
/**
@@ -127,7 +153,7 @@ static inline __u16 rol16(__u16 word, unsigned int shift)
*/
static inline __u16 ror16(__u16 word, unsigned int shift)
{
- return (word >> shift) | (word << (16 - shift));
+ return (word >> (shift & 15)) | (word << ((-shift) & 15));
}
/**
@@ -137,7 +163,7 @@ static inline __u16 ror16(__u16 word, unsigned int shift)
*/
static inline __u8 rol8(__u8 word, unsigned int shift)
{
- return (word << shift) | (word >> (8 - shift));
+ return (word << (shift & 7)) | (word >> ((-shift) & 7));
}
/**
@@ -147,7 +173,7 @@ static inline __u8 rol8(__u8 word, unsigned int shift)
*/
static inline __u8 ror8(__u8 word, unsigned int shift)
{
- return (word >> shift) | (word << (8 - shift));
+ return (word >> (shift & 7)) | (word << ((-shift) & 7));
}
/**
@@ -157,7 +183,7 @@ static inline __u8 ror8(__u8 word, unsigned int shift)
*
* This is safe to use for 16- and 8-bit types as well.
*/
-static inline __s32 sign_extend32(__u32 value, int index)
+static __always_inline __s32 sign_extend32(__u32 value, int index)
{
__u8 shift = 31 - index;
return (__s32)(value << shift) >> shift;
@@ -168,13 +194,13 @@ static inline __s32 sign_extend32(__u32 value, int index)
* @value: value to sign extend
* @index: 0 based bit index (0<=index<64) to sign bit
*/
-static inline __s64 sign_extend64(__u64 value, int index)
+static __always_inline __s64 sign_extend64(__u64 value, int index)
{
__u8 shift = 63 - index;
return (__s64)(value << shift) >> shift;
}
-static inline unsigned fls_long(unsigned long l)
+static inline unsigned int fls_long(unsigned long l)
{
if (sizeof(l) == 4)
return fls(l);
@@ -183,12 +209,10 @@ static inline unsigned fls_long(unsigned long l)
static inline int get_count_order(unsigned int count)
{
- int order;
+ if (count == 0)
+ return -1;
- order = fls(count) - 1;
- if (count & (count - 1))
- order++;
- return order;
+ return fls(--count);
}
/**
@@ -201,21 +225,49 @@ static inline int get_count_order_long(unsigned long l)
{
if (l == 0UL)
return -1;
- else if (l & (l - 1UL))
- return (int)fls_long(l);
- else
- return (int)fls_long(l) - 1;
+ return (int)fls_long(--l);
+}
+
+/**
+ * parity8 - get the parity of an u8 value
+ * @value: the value to be examined
+ *
+ * Determine the parity of the u8 argument.
+ *
+ * Returns:
+ * 0 for even parity, 1 for odd parity
+ *
+ * Note: This function informs you about the current parity. Example to bail
+ * out when parity is odd:
+ *
+ * if (parity8(val) == 1)
+ * return -EBADMSG;
+ *
+ * If you need to calculate a parity bit, you need to draw the conclusion from
+ * this result yourself. Example to enforce odd parity, parity bit is bit 7:
+ *
+ * if (parity8(val) == 0)
+ * val ^= BIT(7);
+ */
+static inline int parity8(u8 val)
+{
+ /*
+ * One explanation of this algorithm:
+ * https://funloop.org/codex/problem/parity/README.html
+ */
+ val ^= val >> 4;
+ return (0x6996 >> (val & 0xf)) & 1;
}
/**
* __ffs64 - find first set bit in a 64 bit word
* @word: The 64 bit word
*
- * On 64 bit arches this is a synomyn for __ffs
+ * On 64 bit arches this is a synonym for __ffs
* The result is not defined if no bits are set, so check that @word
* is non-zero before calling this.
*/
-static inline unsigned long __ffs64(u64 word)
+static inline __attribute_const__ unsigned int __ffs64(u64 word)
{
#if BITS_PER_LONG == 32
if (((u32)word) == 0UL)
@@ -226,50 +278,113 @@ static inline unsigned long __ffs64(u64 word)
return __ffs((unsigned long)word);
}
+/**
+ * fns - find N'th set bit in a word
+ * @word: The word to search
+ * @n: Bit to find
+ */
+static inline unsigned int fns(unsigned long word, unsigned int n)
+{
+ while (word && n--)
+ word &= word - 1;
+
+ return word ? __ffs(word) : BITS_PER_LONG;
+}
+
+/**
+ * assign_bit - Assign value to a bit in memory
+ * @nr: the bit to set
+ * @addr: the address to start counting from
+ * @value: the value to assign
+ */
+#define assign_bit(nr, addr, value) \
+ ((value) ? set_bit((nr), (addr)) : clear_bit((nr), (addr)))
+
+#define __assign_bit(nr, addr, value) \
+ ((value) ? __set_bit((nr), (addr)) : __clear_bit((nr), (addr)))
+
+/**
+ * __ptr_set_bit - Set bit in a pointer's value
+ * @nr: the bit to set
+ * @addr: the address of the pointer variable
+ *
+ * Example:
+ * void *p = foo();
+ * __ptr_set_bit(bit, &p);
+ */
+#define __ptr_set_bit(nr, addr) \
+ ({ \
+ typecheck_pointer(*(addr)); \
+ __set_bit(nr, (unsigned long *)(addr)); \
+ })
+
+/**
+ * __ptr_clear_bit - Clear bit in a pointer's value
+ * @nr: the bit to clear
+ * @addr: the address of the pointer variable
+ *
+ * Example:
+ * void *p = foo();
+ * __ptr_clear_bit(bit, &p);
+ */
+#define __ptr_clear_bit(nr, addr) \
+ ({ \
+ typecheck_pointer(*(addr)); \
+ __clear_bit(nr, (unsigned long *)(addr)); \
+ })
+
+/**
+ * __ptr_test_bit - Test bit in a pointer's value
+ * @nr: the bit to test
+ * @addr: the address of the pointer variable
+ *
+ * Example:
+ * void *p = foo();
+ * if (__ptr_test_bit(bit, &p)) {
+ * ...
+ * } else {
+ * ...
+ * }
+ */
+#define __ptr_test_bit(nr, addr) \
+ ({ \
+ typecheck_pointer(*(addr)); \
+ test_bit(nr, (unsigned long *)(addr)); \
+ })
+
#ifdef __KERNEL__
#ifndef set_mask_bits
-#define set_mask_bits(ptr, _mask, _bits) \
+#define set_mask_bits(ptr, mask, bits) \
({ \
- const typeof(*ptr) mask = (_mask), bits = (_bits); \
- typeof(*ptr) old, new; \
+ const typeof(*(ptr)) mask__ = (mask), bits__ = (bits); \
+ typeof(*(ptr)) old__, new__; \
\
+ old__ = READ_ONCE(*(ptr)); \
do { \
- old = ACCESS_ONCE(*ptr); \
- new = (old & ~mask) | bits; \
- } while (cmpxchg(ptr, old, new) != old); \
+ new__ = (old__ & ~mask__) | bits__; \
+ } while (!try_cmpxchg(ptr, &old__, new__)); \
\
- new; \
+ old__; \
})
#endif
#ifndef bit_clear_unless
-#define bit_clear_unless(ptr, _clear, _test) \
+#define bit_clear_unless(ptr, clear, test) \
({ \
- const typeof(*ptr) clear = (_clear), test = (_test); \
- typeof(*ptr) old, new; \
+ const typeof(*(ptr)) clear__ = (clear), test__ = (test);\
+ typeof(*(ptr)) old__, new__; \
\
+ old__ = READ_ONCE(*(ptr)); \
do { \
- old = ACCESS_ONCE(*ptr); \
- new = old & ~clear; \
- } while (!(old & test) && \
- cmpxchg(ptr, old, new) != old); \
+ if (old__ & test__) \
+ break; \
+ new__ = old__ & ~clear__; \
+ } while (!try_cmpxchg(ptr, &old__, new__)); \
\
- !(old & test); \
+ !(old__ & test__); \
})
#endif
-#ifndef find_last_bit
-/**
- * find_last_bit - find the last set bit in a memory region
- * @addr: The address to start the search at
- * @size: The number of bits to search
- *
- * Returns the bit number of the last set bit, or size.
- */
-extern unsigned long find_last_bit(const unsigned long *addr,
- unsigned long size);
-#endif
-
#endif /* __KERNEL__ */
#endif
diff --git a/include/linux/bitrev.h b/include/linux/bitrev.h
index b97be27e5a85..d35b8ec1c485 100644
--- a/include/linux/bitrev.h
+++ b/include/linux/bitrev.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_BITREV_H
#define _LINUX_BITREV_H
@@ -33,41 +34,41 @@ static inline u32 __bitrev32(u32 x)
#define __constant_bitrev32(x) \
({ \
- u32 __x = x; \
- __x = (__x >> 16) | (__x << 16); \
- __x = ((__x & (u32)0xFF00FF00UL) >> 8) | ((__x & (u32)0x00FF00FFUL) << 8); \
- __x = ((__x & (u32)0xF0F0F0F0UL) >> 4) | ((__x & (u32)0x0F0F0F0FUL) << 4); \
- __x = ((__x & (u32)0xCCCCCCCCUL) >> 2) | ((__x & (u32)0x33333333UL) << 2); \
- __x = ((__x & (u32)0xAAAAAAAAUL) >> 1) | ((__x & (u32)0x55555555UL) << 1); \
- __x; \
+ u32 ___x = x; \
+ ___x = (___x >> 16) | (___x << 16); \
+ ___x = ((___x & (u32)0xFF00FF00UL) >> 8) | ((___x & (u32)0x00FF00FFUL) << 8); \
+ ___x = ((___x & (u32)0xF0F0F0F0UL) >> 4) | ((___x & (u32)0x0F0F0F0FUL) << 4); \
+ ___x = ((___x & (u32)0xCCCCCCCCUL) >> 2) | ((___x & (u32)0x33333333UL) << 2); \
+ ___x = ((___x & (u32)0xAAAAAAAAUL) >> 1) | ((___x & (u32)0x55555555UL) << 1); \
+ ___x; \
})
#define __constant_bitrev16(x) \
({ \
- u16 __x = x; \
- __x = (__x >> 8) | (__x << 8); \
- __x = ((__x & (u16)0xF0F0U) >> 4) | ((__x & (u16)0x0F0FU) << 4); \
- __x = ((__x & (u16)0xCCCCU) >> 2) | ((__x & (u16)0x3333U) << 2); \
- __x = ((__x & (u16)0xAAAAU) >> 1) | ((__x & (u16)0x5555U) << 1); \
- __x; \
+ u16 ___x = x; \
+ ___x = (___x >> 8) | (___x << 8); \
+ ___x = ((___x & (u16)0xF0F0U) >> 4) | ((___x & (u16)0x0F0FU) << 4); \
+ ___x = ((___x & (u16)0xCCCCU) >> 2) | ((___x & (u16)0x3333U) << 2); \
+ ___x = ((___x & (u16)0xAAAAU) >> 1) | ((___x & (u16)0x5555U) << 1); \
+ ___x; \
})
#define __constant_bitrev8x4(x) \
({ \
- u32 __x = x; \
- __x = ((__x & (u32)0xF0F0F0F0UL) >> 4) | ((__x & (u32)0x0F0F0F0FUL) << 4); \
- __x = ((__x & (u32)0xCCCCCCCCUL) >> 2) | ((__x & (u32)0x33333333UL) << 2); \
- __x = ((__x & (u32)0xAAAAAAAAUL) >> 1) | ((__x & (u32)0x55555555UL) << 1); \
- __x; \
+ u32 ___x = x; \
+ ___x = ((___x & (u32)0xF0F0F0F0UL) >> 4) | ((___x & (u32)0x0F0F0F0FUL) << 4); \
+ ___x = ((___x & (u32)0xCCCCCCCCUL) >> 2) | ((___x & (u32)0x33333333UL) << 2); \
+ ___x = ((___x & (u32)0xAAAAAAAAUL) >> 1) | ((___x & (u32)0x55555555UL) << 1); \
+ ___x; \
})
#define __constant_bitrev8(x) \
({ \
- u8 __x = x; \
- __x = (__x >> 4) | (__x << 4); \
- __x = ((__x & (u8)0xCCU) >> 2) | ((__x & (u8)0x33U) << 2); \
- __x = ((__x & (u8)0xAAU) >> 1) | ((__x & (u8)0x55U) << 1); \
- __x; \
+ u8 ___x = x; \
+ ___x = (___x >> 4) | (___x << 4); \
+ ___x = ((___x & (u8)0xCCU) >> 2) | ((___x & (u8)0x33U) << 2); \
+ ___x = ((___x & (u8)0xAAU) >> 1) | ((___x & (u8)0x55U) << 1); \
+ ___x; \
})
#define bitrev32(x) \
diff --git a/include/linux/bits.h b/include/linux/bits.h
new file mode 100644
index 000000000000..a40cc861b3a7
--- /dev/null
+++ b/include/linux/bits.h
@@ -0,0 +1,89 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __LINUX_BITS_H
+#define __LINUX_BITS_H
+
+#include <vdso/bits.h>
+#include <uapi/linux/bits.h>
+
+#define BIT_MASK(nr) (UL(1) << ((nr) % BITS_PER_LONG))
+#define BIT_WORD(nr) ((nr) / BITS_PER_LONG)
+#define BIT_ULL_MASK(nr) (ULL(1) << ((nr) % BITS_PER_LONG_LONG))
+#define BIT_ULL_WORD(nr) ((nr) / BITS_PER_LONG_LONG)
+#define BITS_PER_BYTE 8
+#define BITS_PER_TYPE(type) (sizeof(type) * BITS_PER_BYTE)
+
+/*
+ * Create a contiguous bitmask starting at bit position @l and ending at
+ * position @h. For example
+ * GENMASK_ULL(39, 21) gives us the 64bit vector 0x000000ffffe00000.
+ */
+#if !defined(__ASSEMBLY__)
+
+/*
+ * Missing asm support
+ *
+ * GENMASK_U*() and BIT_U*() depend on BITS_PER_TYPE() which relies on sizeof(),
+ * something not available in asm. Nevertheless, fixed width integers is a C
+ * concept. Assembly code can rely on the long and long long versions instead.
+ */
+
+#include <linux/build_bug.h>
+#include <linux/compiler.h>
+#include <linux/overflow.h>
+
+#define GENMASK_INPUT_CHECK(h, l) BUILD_BUG_ON_ZERO(const_true((l) > (h)))
+
+/*
+ * Generate a mask for the specified type @t. Additional checks are made to
+ * guarantee the value returned fits in that type, relying on
+ * -Wshift-count-overflow compiler check to detect incompatible arguments.
+ * For example, all these create build errors or warnings:
+ *
+ * - GENMASK(15, 20): wrong argument order
+ * - GENMASK(72, 15): doesn't fit unsigned long
+ * - GENMASK_U32(33, 15): doesn't fit in a u32
+ */
+#define GENMASK_TYPE(t, h, l) \
+ ((t)(GENMASK_INPUT_CHECK(h, l) + \
+ (type_max(t) << (l) & \
+ type_max(t) >> (BITS_PER_TYPE(t) - 1 - (h)))))
+
+#define GENMASK(h, l) GENMASK_TYPE(unsigned long, h, l)
+#define GENMASK_ULL(h, l) GENMASK_TYPE(unsigned long long, h, l)
+
+#define GENMASK_U8(h, l) GENMASK_TYPE(u8, h, l)
+#define GENMASK_U16(h, l) GENMASK_TYPE(u16, h, l)
+#define GENMASK_U32(h, l) GENMASK_TYPE(u32, h, l)
+#define GENMASK_U64(h, l) GENMASK_TYPE(u64, h, l)
+#define GENMASK_U128(h, l) GENMASK_TYPE(u128, h, l)
+
+/*
+ * Fixed-type variants of BIT(), with additional checks like GENMASK_TYPE(). The
+ * following examples generate compiler warnings due to -Wshift-count-overflow:
+ *
+ * - BIT_U8(8)
+ * - BIT_U32(-1)
+ * - BIT_U32(40)
+ */
+#define BIT_INPUT_CHECK(type, nr) \
+ BUILD_BUG_ON_ZERO(const_true((nr) >= BITS_PER_TYPE(type)))
+
+#define BIT_TYPE(type, nr) ((type)(BIT_INPUT_CHECK(type, nr) + BIT_ULL(nr)))
+
+#define BIT_U8(nr) BIT_TYPE(u8, nr)
+#define BIT_U16(nr) BIT_TYPE(u16, nr)
+#define BIT_U32(nr) BIT_TYPE(u32, nr)
+#define BIT_U64(nr) BIT_TYPE(u64, nr)
+
+#else /* defined(__ASSEMBLY__) */
+
+/*
+ * BUILD_BUG_ON_ZERO is not available in h files included from asm files,
+ * disable the input check if that is the case.
+ */
+#define GENMASK(h, l) __GENMASK(h, l)
+#define GENMASK_ULL(h, l) __GENMASK_ULL(h, l)
+
+#endif /* !defined(__ASSEMBLY__) */
+
+#endif /* __LINUX_BITS_H */
diff --git a/include/linux/blk-cgroup.h b/include/linux/blk-cgroup.h
index 7104bea8dab1..dd5841a42c33 100644
--- a/include/linux/blk-cgroup.h
+++ b/include/linux/blk-cgroup.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _BLK_CGROUP_H
#define _BLK_CGROUP_H
/*
@@ -13,765 +14,38 @@
* Nauman Rafique <nauman@google.com>
*/
-#include <linux/cgroup.h>
-#include <linux/percpu_counter.h>
-#include <linux/seq_file.h>
-#include <linux/radix-tree.h>
-#include <linux/blkdev.h>
-#include <linux/atomic.h>
+#include <linux/types.h>
-/* percpu_counter batch for blkg_[rw]stats, per-cpu drift doesn't matter */
-#define BLKG_STAT_CPU_BATCH (INT_MAX / 2)
+struct bio;
+struct cgroup_subsys_state;
+struct gendisk;
-/* Max limits for throttle policy */
-#define THROTL_IOPS_MAX UINT_MAX
+#define FC_APPID_LEN 129
#ifdef CONFIG_BLK_CGROUP
-
-enum blkg_rwstat_type {
- BLKG_RWSTAT_READ,
- BLKG_RWSTAT_WRITE,
- BLKG_RWSTAT_SYNC,
- BLKG_RWSTAT_ASYNC,
-
- BLKG_RWSTAT_NR,
- BLKG_RWSTAT_TOTAL = BLKG_RWSTAT_NR,
-};
-
-struct blkcg_gq;
-
-struct blkcg {
- struct cgroup_subsys_state css;
- spinlock_t lock;
-
- struct radix_tree_root blkg_tree;
- struct blkcg_gq __rcu *blkg_hint;
- struct hlist_head blkg_list;
-
- struct blkcg_policy_data *cpd[BLKCG_MAX_POLS];
-
- struct list_head all_blkcgs_node;
-#ifdef CONFIG_CGROUP_WRITEBACK
- struct list_head cgwb_list;
-#endif
-};
-
-/*
- * blkg_[rw]stat->aux_cnt is excluded for local stats but included for
- * recursive. Used to carry stats of dead children, and, for blkg_rwstat,
- * to carry result values from read and sum operations.
- */
-struct blkg_stat {
- struct percpu_counter cpu_cnt;
- atomic64_t aux_cnt;
-};
-
-struct blkg_rwstat {
- struct percpu_counter cpu_cnt[BLKG_RWSTAT_NR];
- atomic64_t aux_cnt[BLKG_RWSTAT_NR];
-};
-
-/*
- * A blkcg_gq (blkg) is association between a block cgroup (blkcg) and a
- * request_queue (q). This is used by blkcg policies which need to track
- * information per blkcg - q pair.
- *
- * There can be multiple active blkcg policies and each blkg:policy pair is
- * represented by a blkg_policy_data which is allocated and freed by each
- * policy's pd_alloc/free_fn() methods. A policy can allocate private data
- * area by allocating larger data structure which embeds blkg_policy_data
- * at the beginning.
- */
-struct blkg_policy_data {
- /* the blkg and policy id this per-policy data belongs to */
- struct blkcg_gq *blkg;
- int plid;
-};
-
-/*
- * Policies that need to keep per-blkcg data which is independent from any
- * request_queue associated to it should implement cpd_alloc/free_fn()
- * methods. A policy can allocate private data area by allocating larger
- * data structure which embeds blkcg_policy_data at the beginning.
- * cpd_init() is invoked to let each policy handle per-blkcg data.
- */
-struct blkcg_policy_data {
- /* the blkcg and policy id this per-policy data belongs to */
- struct blkcg *blkcg;
- int plid;
-};
-
-/* association between a blk cgroup and a request queue */
-struct blkcg_gq {
- /* Pointer to the associated request_queue */
- struct request_queue *q;
- struct list_head q_node;
- struct hlist_node blkcg_node;
- struct blkcg *blkcg;
-
- /*
- * Each blkg gets congested separately and the congestion state is
- * propagated to the matching bdi_writeback_congested.
- */
- struct bdi_writeback_congested *wb_congested;
-
- /* all non-root blkcg_gq's are guaranteed to have access to parent */
- struct blkcg_gq *parent;
-
- /* request allocation list for this blkcg-q pair */
- struct request_list rl;
-
- /* reference count */
- atomic_t refcnt;
-
- /* is this blkg online? protected by both blkcg and q locks */
- bool online;
-
- struct blkg_rwstat stat_bytes;
- struct blkg_rwstat stat_ios;
-
- struct blkg_policy_data *pd[BLKCG_MAX_POLS];
-
- struct rcu_head rcu_head;
-};
-
-typedef struct blkcg_policy_data *(blkcg_pol_alloc_cpd_fn)(gfp_t gfp);
-typedef void (blkcg_pol_init_cpd_fn)(struct blkcg_policy_data *cpd);
-typedef void (blkcg_pol_free_cpd_fn)(struct blkcg_policy_data *cpd);
-typedef void (blkcg_pol_bind_cpd_fn)(struct blkcg_policy_data *cpd);
-typedef struct blkg_policy_data *(blkcg_pol_alloc_pd_fn)(gfp_t gfp, int node);
-typedef void (blkcg_pol_init_pd_fn)(struct blkg_policy_data *pd);
-typedef void (blkcg_pol_online_pd_fn)(struct blkg_policy_data *pd);
-typedef void (blkcg_pol_offline_pd_fn)(struct blkg_policy_data *pd);
-typedef void (blkcg_pol_free_pd_fn)(struct blkg_policy_data *pd);
-typedef void (blkcg_pol_reset_pd_stats_fn)(struct blkg_policy_data *pd);
-
-struct blkcg_policy {
- int plid;
- /* cgroup files for the policy */
- struct cftype *dfl_cftypes;
- struct cftype *legacy_cftypes;
-
- /* operations */
- blkcg_pol_alloc_cpd_fn *cpd_alloc_fn;
- blkcg_pol_init_cpd_fn *cpd_init_fn;
- blkcg_pol_free_cpd_fn *cpd_free_fn;
- blkcg_pol_bind_cpd_fn *cpd_bind_fn;
-
- blkcg_pol_alloc_pd_fn *pd_alloc_fn;
- blkcg_pol_init_pd_fn *pd_init_fn;
- blkcg_pol_online_pd_fn *pd_online_fn;
- blkcg_pol_offline_pd_fn *pd_offline_fn;
- blkcg_pol_free_pd_fn *pd_free_fn;
- blkcg_pol_reset_pd_stats_fn *pd_reset_stats_fn;
-};
-
-extern struct blkcg blkcg_root;
extern struct cgroup_subsys_state * const blkcg_root_css;
-struct blkcg_gq *blkg_lookup_slowpath(struct blkcg *blkcg,
- struct request_queue *q, bool update_hint);
-struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg,
- struct request_queue *q);
-int blkcg_init_queue(struct request_queue *q);
-void blkcg_drain_queue(struct request_queue *q);
-void blkcg_exit_queue(struct request_queue *q);
-
-/* Blkio controller policy registration */
-int blkcg_policy_register(struct blkcg_policy *pol);
-void blkcg_policy_unregister(struct blkcg_policy *pol);
-int blkcg_activate_policy(struct request_queue *q,
- const struct blkcg_policy *pol);
-void blkcg_deactivate_policy(struct request_queue *q,
- const struct blkcg_policy *pol);
-
-const char *blkg_dev_name(struct blkcg_gq *blkg);
-void blkcg_print_blkgs(struct seq_file *sf, struct blkcg *blkcg,
- u64 (*prfill)(struct seq_file *,
- struct blkg_policy_data *, int),
- const struct blkcg_policy *pol, int data,
- bool show_total);
-u64 __blkg_prfill_u64(struct seq_file *sf, struct blkg_policy_data *pd, u64 v);
-u64 __blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd,
- const struct blkg_rwstat *rwstat);
-u64 blkg_prfill_stat(struct seq_file *sf, struct blkg_policy_data *pd, int off);
-u64 blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd,
- int off);
-int blkg_print_stat_bytes(struct seq_file *sf, void *v);
-int blkg_print_stat_ios(struct seq_file *sf, void *v);
-int blkg_print_stat_bytes_recursive(struct seq_file *sf, void *v);
-int blkg_print_stat_ios_recursive(struct seq_file *sf, void *v);
-
-u64 blkg_stat_recursive_sum(struct blkcg_gq *blkg,
- struct blkcg_policy *pol, int off);
-struct blkg_rwstat blkg_rwstat_recursive_sum(struct blkcg_gq *blkg,
- struct blkcg_policy *pol, int off);
-
-struct blkg_conf_ctx {
- struct gendisk *disk;
- struct blkcg_gq *blkg;
- char *body;
-};
-
-int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol,
- char *input, struct blkg_conf_ctx *ctx);
-void blkg_conf_finish(struct blkg_conf_ctx *ctx);
-
-
-static inline struct blkcg *css_to_blkcg(struct cgroup_subsys_state *css)
-{
- return css ? container_of(css, struct blkcg, css) : NULL;
-}
-
-static inline struct blkcg *task_blkcg(struct task_struct *tsk)
-{
- return css_to_blkcg(task_css(tsk, io_cgrp_id));
-}
-
-static inline struct blkcg *bio_blkcg(struct bio *bio)
-{
- if (bio && bio->bi_css)
- return css_to_blkcg(bio->bi_css);
- return task_blkcg(current);
-}
-
-static inline struct cgroup_subsys_state *
-task_get_blkcg_css(struct task_struct *task)
-{
- return task_get_css(task, io_cgrp_id);
-}
-
-/**
- * blkcg_parent - get the parent of a blkcg
- * @blkcg: blkcg of interest
- *
- * Return the parent blkcg of @blkcg. Can be called anytime.
- */
-static inline struct blkcg *blkcg_parent(struct blkcg *blkcg)
-{
- return css_to_blkcg(blkcg->css.parent);
-}
-
-/**
- * __blkg_lookup - internal version of blkg_lookup()
- * @blkcg: blkcg of interest
- * @q: request_queue of interest
- * @update_hint: whether to update lookup hint with the result or not
- *
- * This is internal version and shouldn't be used by policy
- * implementations. Looks up blkgs for the @blkcg - @q pair regardless of
- * @q's bypass state. If @update_hint is %true, the caller should be
- * holding @q->queue_lock and lookup hint is updated on success.
- */
-static inline struct blkcg_gq *__blkg_lookup(struct blkcg *blkcg,
- struct request_queue *q,
- bool update_hint)
-{
- struct blkcg_gq *blkg;
-
- if (blkcg == &blkcg_root)
- return q->root_blkg;
-
- blkg = rcu_dereference(blkcg->blkg_hint);
- if (blkg && blkg->q == q)
- return blkg;
-
- return blkg_lookup_slowpath(blkcg, q, update_hint);
-}
-
-/**
- * blkg_lookup - lookup blkg for the specified blkcg - q pair
- * @blkcg: blkcg of interest
- * @q: request_queue of interest
- *
- * Lookup blkg for the @blkcg - @q pair. This function should be called
- * under RCU read lock and is guaranteed to return %NULL if @q is bypassing
- * - see blk_queue_bypass_start() for details.
- */
-static inline struct blkcg_gq *blkg_lookup(struct blkcg *blkcg,
- struct request_queue *q)
-{
- WARN_ON_ONCE(!rcu_read_lock_held());
-
- if (unlikely(blk_queue_bypass(q)))
- return NULL;
- return __blkg_lookup(blkcg, q, false);
-}
-
-/**
- * blkg_to_pdata - get policy private data
- * @blkg: blkg of interest
- * @pol: policy of interest
- *
- * Return pointer to private data associated with the @blkg-@pol pair.
- */
-static inline struct blkg_policy_data *blkg_to_pd(struct blkcg_gq *blkg,
- struct blkcg_policy *pol)
-{
- return blkg ? blkg->pd[pol->plid] : NULL;
-}
-
-static inline struct blkcg_policy_data *blkcg_to_cpd(struct blkcg *blkcg,
- struct blkcg_policy *pol)
-{
- return blkcg ? blkcg->cpd[pol->plid] : NULL;
-}
-
-/**
- * pdata_to_blkg - get blkg associated with policy private data
- * @pd: policy private data of interest
- *
- * @pd is policy private data. Determine the blkg it's associated with.
- */
-static inline struct blkcg_gq *pd_to_blkg(struct blkg_policy_data *pd)
-{
- return pd ? pd->blkg : NULL;
-}
-
-static inline struct blkcg *cpd_to_blkcg(struct blkcg_policy_data *cpd)
-{
- return cpd ? cpd->blkcg : NULL;
-}
-
-/**
- * blkg_path - format cgroup path of blkg
- * @blkg: blkg of interest
- * @buf: target buffer
- * @buflen: target buffer length
- *
- * Format the path of the cgroup of @blkg into @buf.
- */
-static inline int blkg_path(struct blkcg_gq *blkg, char *buf, int buflen)
-{
- return cgroup_path(blkg->blkcg->css.cgroup, buf, buflen);
-}
-
-/**
- * blkg_get - get a blkg reference
- * @blkg: blkg to get
- *
- * The caller should be holding an existing reference.
- */
-static inline void blkg_get(struct blkcg_gq *blkg)
-{
- WARN_ON_ONCE(atomic_read(&blkg->refcnt) <= 0);
- atomic_inc(&blkg->refcnt);
-}
-
-void __blkg_release_rcu(struct rcu_head *rcu);
-
-/**
- * blkg_put - put a blkg reference
- * @blkg: blkg to put
- */
-static inline void blkg_put(struct blkcg_gq *blkg)
-{
- WARN_ON_ONCE(atomic_read(&blkg->refcnt) <= 0);
- if (atomic_dec_and_test(&blkg->refcnt))
- call_rcu(&blkg->rcu_head, __blkg_release_rcu);
-}
-
-/**
- * blkg_for_each_descendant_pre - pre-order walk of a blkg's descendants
- * @d_blkg: loop cursor pointing to the current descendant
- * @pos_css: used for iteration
- * @p_blkg: target blkg to walk descendants of
- *
- * Walk @c_blkg through the descendants of @p_blkg. Must be used with RCU
- * read locked. If called under either blkcg or queue lock, the iteration
- * is guaranteed to include all and only online blkgs. The caller may
- * update @pos_css by calling css_rightmost_descendant() to skip subtree.
- * @p_blkg is included in the iteration and the first node to be visited.
- */
-#define blkg_for_each_descendant_pre(d_blkg, pos_css, p_blkg) \
- css_for_each_descendant_pre((pos_css), &(p_blkg)->blkcg->css) \
- if (((d_blkg) = __blkg_lookup(css_to_blkcg(pos_css), \
- (p_blkg)->q, false)))
-
-/**
- * blkg_for_each_descendant_post - post-order walk of a blkg's descendants
- * @d_blkg: loop cursor pointing to the current descendant
- * @pos_css: used for iteration
- * @p_blkg: target blkg to walk descendants of
- *
- * Similar to blkg_for_each_descendant_pre() but performs post-order
- * traversal instead. Synchronization rules are the same. @p_blkg is
- * included in the iteration and the last node to be visited.
- */
-#define blkg_for_each_descendant_post(d_blkg, pos_css, p_blkg) \
- css_for_each_descendant_post((pos_css), &(p_blkg)->blkcg->css) \
- if (((d_blkg) = __blkg_lookup(css_to_blkcg(pos_css), \
- (p_blkg)->q, false)))
-
-/**
- * blk_get_rl - get request_list to use
- * @q: request_queue of interest
- * @bio: bio which will be attached to the allocated request (may be %NULL)
- *
- * The caller wants to allocate a request from @q to use for @bio. Find
- * the request_list to use and obtain a reference on it. Should be called
- * under queue_lock. This function is guaranteed to return non-%NULL
- * request_list.
- */
-static inline struct request_list *blk_get_rl(struct request_queue *q,
- struct bio *bio)
-{
- struct blkcg *blkcg;
- struct blkcg_gq *blkg;
-
- rcu_read_lock();
-
- blkcg = bio_blkcg(bio);
-
- /* bypass blkg lookup and use @q->root_rl directly for root */
- if (blkcg == &blkcg_root)
- goto root_rl;
-
- /*
- * Try to use blkg->rl. blkg lookup may fail under memory pressure
- * or if either the blkcg or queue is going away. Fall back to
- * root_rl in such cases.
- */
- blkg = blkg_lookup(blkcg, q);
- if (unlikely(!blkg))
- goto root_rl;
-
- blkg_get(blkg);
- rcu_read_unlock();
- return &blkg->rl;
-root_rl:
- rcu_read_unlock();
- return &q->root_rl;
-}
-
-/**
- * blk_put_rl - put request_list
- * @rl: request_list to put
- *
- * Put the reference acquired by blk_get_rl(). Should be called under
- * queue_lock.
- */
-static inline void blk_put_rl(struct request_list *rl)
-{
- if (rl->blkg->blkcg != &blkcg_root)
- blkg_put(rl->blkg);
-}
-
-/**
- * blk_rq_set_rl - associate a request with a request_list
- * @rq: request of interest
- * @rl: target request_list
- *
- * Associate @rq with @rl so that accounting and freeing can know the
- * request_list @rq came from.
- */
-static inline void blk_rq_set_rl(struct request *rq, struct request_list *rl)
-{
- rq->rl = rl;
-}
-
-/**
- * blk_rq_rl - return the request_list a request came from
- * @rq: request of interest
- *
- * Return the request_list @rq is allocated from.
- */
-static inline struct request_list *blk_rq_rl(struct request *rq)
-{
- return rq->rl;
-}
-
-struct request_list *__blk_queue_next_rl(struct request_list *rl,
- struct request_queue *q);
-/**
- * blk_queue_for_each_rl - iterate through all request_lists of a request_queue
- *
- * Should be used under queue_lock.
- */
-#define blk_queue_for_each_rl(rl, q) \
- for ((rl) = &(q)->root_rl; (rl); (rl) = __blk_queue_next_rl((rl), (q)))
-
-static inline int blkg_stat_init(struct blkg_stat *stat, gfp_t gfp)
-{
- int ret;
-
- ret = percpu_counter_init(&stat->cpu_cnt, 0, gfp);
- if (ret)
- return ret;
-
- atomic64_set(&stat->aux_cnt, 0);
- return 0;
-}
-
-static inline void blkg_stat_exit(struct blkg_stat *stat)
-{
- percpu_counter_destroy(&stat->cpu_cnt);
-}
-
-/**
- * blkg_stat_add - add a value to a blkg_stat
- * @stat: target blkg_stat
- * @val: value to add
- *
- * Add @val to @stat. The caller must ensure that IRQ on the same CPU
- * don't re-enter this function for the same counter.
- */
-static inline void blkg_stat_add(struct blkg_stat *stat, uint64_t val)
-{
- percpu_counter_add_batch(&stat->cpu_cnt, val, BLKG_STAT_CPU_BATCH);
-}
-
-/**
- * blkg_stat_read - read the current value of a blkg_stat
- * @stat: blkg_stat to read
- */
-static inline uint64_t blkg_stat_read(struct blkg_stat *stat)
-{
- return percpu_counter_sum_positive(&stat->cpu_cnt);
-}
-
-/**
- * blkg_stat_reset - reset a blkg_stat
- * @stat: blkg_stat to reset
- */
-static inline void blkg_stat_reset(struct blkg_stat *stat)
-{
- percpu_counter_set(&stat->cpu_cnt, 0);
- atomic64_set(&stat->aux_cnt, 0);
-}
-
-/**
- * blkg_stat_add_aux - add a blkg_stat into another's aux count
- * @to: the destination blkg_stat
- * @from: the source
- *
- * Add @from's count including the aux one to @to's aux count.
- */
-static inline void blkg_stat_add_aux(struct blkg_stat *to,
- struct blkg_stat *from)
-{
- atomic64_add(blkg_stat_read(from) + atomic64_read(&from->aux_cnt),
- &to->aux_cnt);
-}
-
-static inline int blkg_rwstat_init(struct blkg_rwstat *rwstat, gfp_t gfp)
-{
- int i, ret;
-
- for (i = 0; i < BLKG_RWSTAT_NR; i++) {
- ret = percpu_counter_init(&rwstat->cpu_cnt[i], 0, gfp);
- if (ret) {
- while (--i >= 0)
- percpu_counter_destroy(&rwstat->cpu_cnt[i]);
- return ret;
- }
- atomic64_set(&rwstat->aux_cnt[i], 0);
- }
- return 0;
-}
-
-static inline void blkg_rwstat_exit(struct blkg_rwstat *rwstat)
-{
- int i;
-
- for (i = 0; i < BLKG_RWSTAT_NR; i++)
- percpu_counter_destroy(&rwstat->cpu_cnt[i]);
-}
-
-/**
- * blkg_rwstat_add - add a value to a blkg_rwstat
- * @rwstat: target blkg_rwstat
- * @op: REQ_OP and flags
- * @val: value to add
- *
- * Add @val to @rwstat. The counters are chosen according to @rw. The
- * caller is responsible for synchronizing calls to this function.
- */
-static inline void blkg_rwstat_add(struct blkg_rwstat *rwstat,
- unsigned int op, uint64_t val)
-{
- struct percpu_counter *cnt;
-
- if (op_is_write(op))
- cnt = &rwstat->cpu_cnt[BLKG_RWSTAT_WRITE];
- else
- cnt = &rwstat->cpu_cnt[BLKG_RWSTAT_READ];
-
- percpu_counter_add_batch(cnt, val, BLKG_STAT_CPU_BATCH);
-
- if (op_is_sync(op))
- cnt = &rwstat->cpu_cnt[BLKG_RWSTAT_SYNC];
- else
- cnt = &rwstat->cpu_cnt[BLKG_RWSTAT_ASYNC];
-
- percpu_counter_add_batch(cnt, val, BLKG_STAT_CPU_BATCH);
-}
-
-/**
- * blkg_rwstat_read - read the current values of a blkg_rwstat
- * @rwstat: blkg_rwstat to read
- *
- * Read the current snapshot of @rwstat and return it in the aux counts.
- */
-static inline struct blkg_rwstat blkg_rwstat_read(struct blkg_rwstat *rwstat)
-{
- struct blkg_rwstat result;
- int i;
-
- for (i = 0; i < BLKG_RWSTAT_NR; i++)
- atomic64_set(&result.aux_cnt[i],
- percpu_counter_sum_positive(&rwstat->cpu_cnt[i]));
- return result;
-}
-
-/**
- * blkg_rwstat_total - read the total count of a blkg_rwstat
- * @rwstat: blkg_rwstat to read
- *
- * Return the total count of @rwstat regardless of the IO direction. This
- * function can be called without synchronization and takes care of u64
- * atomicity.
- */
-static inline uint64_t blkg_rwstat_total(struct blkg_rwstat *rwstat)
-{
- struct blkg_rwstat tmp = blkg_rwstat_read(rwstat);
-
- return atomic64_read(&tmp.aux_cnt[BLKG_RWSTAT_READ]) +
- atomic64_read(&tmp.aux_cnt[BLKG_RWSTAT_WRITE]);
-}
-
-/**
- * blkg_rwstat_reset - reset a blkg_rwstat
- * @rwstat: blkg_rwstat to reset
- */
-static inline void blkg_rwstat_reset(struct blkg_rwstat *rwstat)
-{
- int i;
-
- for (i = 0; i < BLKG_RWSTAT_NR; i++) {
- percpu_counter_set(&rwstat->cpu_cnt[i], 0);
- atomic64_set(&rwstat->aux_cnt[i], 0);
- }
-}
-
-/**
- * blkg_rwstat_add_aux - add a blkg_rwstat into another's aux count
- * @to: the destination blkg_rwstat
- * @from: the source
- *
- * Add @from's count including the aux one to @to's aux count.
- */
-static inline void blkg_rwstat_add_aux(struct blkg_rwstat *to,
- struct blkg_rwstat *from)
-{
- struct blkg_rwstat v = blkg_rwstat_read(from);
- int i;
-
- for (i = 0; i < BLKG_RWSTAT_NR; i++)
- atomic64_add(atomic64_read(&v.aux_cnt[i]) +
- atomic64_read(&from->aux_cnt[i]),
- &to->aux_cnt[i]);
-}
-
-#ifdef CONFIG_BLK_DEV_THROTTLING
-extern bool blk_throtl_bio(struct request_queue *q, struct blkcg_gq *blkg,
- struct bio *bio);
-#else
-static inline bool blk_throtl_bio(struct request_queue *q, struct blkcg_gq *blkg,
- struct bio *bio) { return false; }
-#endif
-
-static inline bool blkcg_bio_issue_check(struct request_queue *q,
- struct bio *bio)
-{
- struct blkcg *blkcg;
- struct blkcg_gq *blkg;
- bool throtl = false;
-
- rcu_read_lock();
- blkcg = bio_blkcg(bio);
-
- blkg = blkg_lookup(blkcg, q);
- if (unlikely(!blkg)) {
- spin_lock_irq(q->queue_lock);
- blkg = blkg_lookup_create(blkcg, q);
- if (IS_ERR(blkg))
- blkg = NULL;
- spin_unlock_irq(q->queue_lock);
- }
-
- throtl = blk_throtl_bio(q, blkg, bio);
-
- if (!throtl) {
- blkg = blkg ?: q->root_blkg;
- blkg_rwstat_add(&blkg->stat_bytes, bio->bi_opf,
- bio->bi_iter.bi_size);
- blkg_rwstat_add(&blkg->stat_ios, bio->bi_opf, 1);
- }
-
- rcu_read_unlock();
- return !throtl;
-}
+void blkcg_schedule_throttle(struct gendisk *disk, bool use_memdelay);
+void blkcg_maybe_throttle_current(void);
+bool blk_cgroup_congested(void);
+void blkcg_pin_online(struct cgroup_subsys_state *blkcg_css);
+void blkcg_unpin_online(struct cgroup_subsys_state *blkcg_css);
+struct list_head *blkcg_get_cgwb_list(struct cgroup_subsys_state *css);
+struct cgroup_subsys_state *bio_blkcg_css(struct bio *bio);
#else /* CONFIG_BLK_CGROUP */
-struct blkcg {
-};
-
-struct blkg_policy_data {
-};
-
-struct blkcg_policy_data {
-};
-
-struct blkcg_gq {
-};
-
-struct blkcg_policy {
-};
-
#define blkcg_root_css ((struct cgroup_subsys_state *)ERR_PTR(-EINVAL))
-static inline struct cgroup_subsys_state *
-task_get_blkcg_css(struct task_struct *task)
+static inline void blkcg_maybe_throttle_current(void) { }
+static inline bool blk_cgroup_congested(void) { return false; }
+static inline struct cgroup_subsys_state *bio_blkcg_css(struct bio *bio)
{
return NULL;
}
+#endif /* CONFIG_BLK_CGROUP */
-#ifdef CONFIG_BLOCK
-
-static inline struct blkcg_gq *blkg_lookup(struct blkcg *blkcg, void *key) { return NULL; }
-static inline int blkcg_init_queue(struct request_queue *q) { return 0; }
-static inline void blkcg_drain_queue(struct request_queue *q) { }
-static inline void blkcg_exit_queue(struct request_queue *q) { }
-static inline int blkcg_policy_register(struct blkcg_policy *pol) { return 0; }
-static inline void blkcg_policy_unregister(struct blkcg_policy *pol) { }
-static inline int blkcg_activate_policy(struct request_queue *q,
- const struct blkcg_policy *pol) { return 0; }
-static inline void blkcg_deactivate_policy(struct request_queue *q,
- const struct blkcg_policy *pol) { }
-
-static inline struct blkcg *bio_blkcg(struct bio *bio) { return NULL; }
-
-static inline struct blkg_policy_data *blkg_to_pd(struct blkcg_gq *blkg,
- struct blkcg_policy *pol) { return NULL; }
-static inline struct blkcg_gq *pd_to_blkg(struct blkg_policy_data *pd) { return NULL; }
-static inline char *blkg_path(struct blkcg_gq *blkg) { return NULL; }
-static inline void blkg_get(struct blkcg_gq *blkg) { }
-static inline void blkg_put(struct blkcg_gq *blkg) { }
-
-static inline struct request_list *blk_get_rl(struct request_queue *q,
- struct bio *bio) { return &q->root_rl; }
-static inline void blk_put_rl(struct request_list *rl) { }
-static inline void blk_rq_set_rl(struct request *rq, struct request_list *rl) { }
-static inline struct request_list *blk_rq_rl(struct request *rq) { return &rq->q->root_rl; }
-
-static inline bool blkcg_bio_issue_check(struct request_queue *q,
- struct bio *bio) { return true; }
-
-#define blk_queue_for_each_rl(rl, q) \
- for ((rl) = &(q)->root_rl; (rl); (rl) = NULL)
+int blkcg_set_fc_appid(char *app_id, u64 cgrp_id, size_t app_id_len);
+char *blkcg_get_fc_appid(struct bio *bio);
-#endif /* CONFIG_BLOCK */
-#endif /* CONFIG_BLK_CGROUP */
#endif /* _BLK_CGROUP_H */
diff --git a/include/linux/blk-crypto-profile.h b/include/linux/blk-crypto-profile.h
new file mode 100644
index 000000000000..4f39e9cd7576
--- /dev/null
+++ b/include/linux/blk-crypto-profile.h
@@ -0,0 +1,228 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright 2019 Google LLC
+ */
+
+#ifndef __LINUX_BLK_CRYPTO_PROFILE_H
+#define __LINUX_BLK_CRYPTO_PROFILE_H
+
+#include <linux/bio.h>
+#include <linux/blk-crypto.h>
+
+struct blk_crypto_profile;
+
+/**
+ * struct blk_crypto_ll_ops - functions to control inline encryption hardware
+ *
+ * Low-level operations for controlling inline encryption hardware. This
+ * interface must be implemented by storage drivers that support inline
+ * encryption. All functions may sleep, are serialized by profile->lock, and
+ * are never called while profile->dev (if set) is runtime-suspended.
+ */
+struct blk_crypto_ll_ops {
+
+ /**
+ * @keyslot_program: Program a key into the inline encryption hardware.
+ *
+ * Program @key into the specified @slot in the inline encryption
+ * hardware, overwriting any key that the keyslot may already contain.
+ * The keyslot is guaranteed to not be in-use by any I/O.
+ *
+ * This is required if the device has keyslots. Otherwise (i.e. if the
+ * device is a layered device, or if the device is real hardware that
+ * simply doesn't have the concept of keyslots) it is never called.
+ *
+ * Must return 0 on success, or -errno on failure.
+ */
+ int (*keyslot_program)(struct blk_crypto_profile *profile,
+ const struct blk_crypto_key *key,
+ unsigned int slot);
+
+ /**
+ * @keyslot_evict: Evict a key from the inline encryption hardware.
+ *
+ * If the device has keyslots, this function must evict the key from the
+ * specified @slot. The slot will contain @key, but there should be no
+ * need for the @key argument to be used as @slot should be sufficient.
+ * The keyslot is guaranteed to not be in-use by any I/O.
+ *
+ * If the device doesn't have keyslots itself, this function must evict
+ * @key from any underlying devices. @slot won't be valid in this case.
+ *
+ * If there are no keyslots and no underlying devices, this function
+ * isn't required.
+ *
+ * Must return 0 on success, or -errno on failure.
+ */
+ int (*keyslot_evict)(struct blk_crypto_profile *profile,
+ const struct blk_crypto_key *key,
+ unsigned int slot);
+
+ /**
+ * @derive_sw_secret: Derive the software secret from a hardware-wrapped
+ * key in ephemerally-wrapped form.
+ *
+ * This only needs to be implemented if BLK_CRYPTO_KEY_TYPE_HW_WRAPPED
+ * is supported.
+ *
+ * Must return 0 on success, -EBADMSG if the key is invalid, or another
+ * -errno code on other errors.
+ */
+ int (*derive_sw_secret)(struct blk_crypto_profile *profile,
+ const u8 *eph_key, size_t eph_key_size,
+ u8 sw_secret[BLK_CRYPTO_SW_SECRET_SIZE]);
+
+ /**
+ * @import_key: Create a hardware-wrapped key by importing a raw key.
+ *
+ * This only needs to be implemented if BLK_CRYPTO_KEY_TYPE_HW_WRAPPED
+ * is supported.
+ *
+ * On success, must write the new key in long-term wrapped form to
+ * @lt_key and return its size in bytes. On failure, must return a
+ * -errno value.
+ */
+ int (*import_key)(struct blk_crypto_profile *profile,
+ const u8 *raw_key, size_t raw_key_size,
+ u8 lt_key[BLK_CRYPTO_MAX_HW_WRAPPED_KEY_SIZE]);
+
+ /**
+ * @generate_key: Generate a hardware-wrapped key.
+ *
+ * This only needs to be implemented if BLK_CRYPTO_KEY_TYPE_HW_WRAPPED
+ * is supported.
+ *
+ * On success, must write the new key in long-term wrapped form to
+ * @lt_key and return its size in bytes. On failure, must return a
+ * -errno value.
+ */
+ int (*generate_key)(struct blk_crypto_profile *profile,
+ u8 lt_key[BLK_CRYPTO_MAX_HW_WRAPPED_KEY_SIZE]);
+
+ /**
+ * @prepare_key: Prepare a hardware-wrapped key to be used.
+ *
+ * Prepare a hardware-wrapped key to be used by converting it from
+ * long-term wrapped form to ephemerally-wrapped form. This only needs
+ * to be implemented if BLK_CRYPTO_KEY_TYPE_HW_WRAPPED is supported.
+ *
+ * On success, must write the key in ephemerally-wrapped form to
+ * @eph_key and return its size in bytes. On failure, must return
+ * -EBADMSG if the key is invalid, or another -errno on other error.
+ */
+ int (*prepare_key)(struct blk_crypto_profile *profile,
+ const u8 *lt_key, size_t lt_key_size,
+ u8 eph_key[BLK_CRYPTO_MAX_HW_WRAPPED_KEY_SIZE]);
+};
+
+/**
+ * struct blk_crypto_profile - inline encryption profile for a device
+ *
+ * This struct contains a storage device's inline encryption capabilities (e.g.
+ * the supported crypto algorithms), driver-provided functions to control the
+ * inline encryption hardware (e.g. programming and evicting keys), and optional
+ * device-independent keyslot management data.
+ */
+struct blk_crypto_profile {
+
+ /* public: Drivers must initialize the following fields. */
+
+ /**
+ * @ll_ops: Driver-provided functions to control the inline encryption
+ * hardware, e.g. program and evict keys.
+ */
+ struct blk_crypto_ll_ops ll_ops;
+
+ /**
+ * @max_dun_bytes_supported: The maximum number of bytes supported for
+ * specifying the data unit number (DUN). Specifically, the range of
+ * supported DUNs is 0 through (1 << (8 * max_dun_bytes_supported)) - 1.
+ */
+ unsigned int max_dun_bytes_supported;
+
+ /**
+ * @key_types_supported: A bitmask of the supported key types:
+ * BLK_CRYPTO_KEY_TYPE_RAW and/or BLK_CRYPTO_KEY_TYPE_HW_WRAPPED.
+ */
+ unsigned int key_types_supported;
+
+ /**
+ * @modes_supported: Array of bitmasks that specifies whether each
+ * combination of crypto mode and data unit size is supported.
+ * Specifically, the i'th bit of modes_supported[crypto_mode] is set if
+ * crypto_mode can be used with a data unit size of (1 << i). Note that
+ * only data unit sizes that are powers of 2 can be supported.
+ */
+ unsigned int modes_supported[BLK_ENCRYPTION_MODE_MAX];
+
+ /**
+ * @dev: An optional device for runtime power management. If the driver
+ * provides this device, it will be runtime-resumed before any function
+ * in @ll_ops is called and will remain resumed during the call.
+ */
+ struct device *dev;
+
+ /* private: The following fields shouldn't be accessed by drivers. */
+
+ /* Number of keyslots, or 0 if not applicable */
+ unsigned int num_slots;
+
+ /*
+ * Serializes all calls to functions in @ll_ops as well as all changes
+ * to @slot_hashtable. This can also be taken in read mode to look up
+ * keyslots while ensuring that they can't be changed concurrently.
+ */
+ struct rw_semaphore lock;
+ struct lock_class_key lockdep_key;
+
+ /* List of idle slots, with least recently used slot at front */
+ wait_queue_head_t idle_slots_wait_queue;
+ struct list_head idle_slots;
+ spinlock_t idle_slots_lock;
+
+ /*
+ * Hash table which maps struct *blk_crypto_key to keyslots, so that we
+ * can find a key's keyslot in O(1) time rather than O(num_slots).
+ * Protected by 'lock'.
+ */
+ struct hlist_head *slot_hashtable;
+ unsigned int log_slot_ht_size;
+
+ /* Per-keyslot data */
+ struct blk_crypto_keyslot *slots;
+};
+
+int blk_crypto_profile_init(struct blk_crypto_profile *profile,
+ unsigned int num_slots);
+
+int devm_blk_crypto_profile_init(struct device *dev,
+ struct blk_crypto_profile *profile,
+ unsigned int num_slots);
+
+unsigned int blk_crypto_keyslot_index(struct blk_crypto_keyslot *slot);
+
+void blk_crypto_reprogram_all_keys(struct blk_crypto_profile *profile);
+
+void blk_crypto_profile_destroy(struct blk_crypto_profile *profile);
+
+int blk_crypto_import_key(struct blk_crypto_profile *profile,
+ const u8 *raw_key, size_t raw_key_size,
+ u8 lt_key[BLK_CRYPTO_MAX_HW_WRAPPED_KEY_SIZE]);
+
+int blk_crypto_generate_key(struct blk_crypto_profile *profile,
+ u8 lt_key[BLK_CRYPTO_MAX_HW_WRAPPED_KEY_SIZE]);
+
+int blk_crypto_prepare_key(struct blk_crypto_profile *profile,
+ const u8 *lt_key, size_t lt_key_size,
+ u8 eph_key[BLK_CRYPTO_MAX_HW_WRAPPED_KEY_SIZE]);
+
+void blk_crypto_intersect_capabilities(struct blk_crypto_profile *parent,
+ const struct blk_crypto_profile *child);
+
+bool blk_crypto_has_capabilities(const struct blk_crypto_profile *target,
+ const struct blk_crypto_profile *reference);
+
+void blk_crypto_update_capabilities(struct blk_crypto_profile *dst,
+ const struct blk_crypto_profile *src);
+
+#endif /* __LINUX_BLK_CRYPTO_PROFILE_H */
diff --git a/include/linux/blk-crypto.h b/include/linux/blk-crypto.h
new file mode 100644
index 000000000000..58b0c5254a67
--- /dev/null
+++ b/include/linux/blk-crypto.h
@@ -0,0 +1,194 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright 2019 Google LLC
+ */
+
+#ifndef __LINUX_BLK_CRYPTO_H
+#define __LINUX_BLK_CRYPTO_H
+
+#include <linux/minmax.h>
+#include <linux/types.h>
+#include <uapi/linux/blk-crypto.h>
+
+enum blk_crypto_mode_num {
+ BLK_ENCRYPTION_MODE_INVALID,
+ BLK_ENCRYPTION_MODE_AES_256_XTS,
+ BLK_ENCRYPTION_MODE_AES_128_CBC_ESSIV,
+ BLK_ENCRYPTION_MODE_ADIANTUM,
+ BLK_ENCRYPTION_MODE_SM4_XTS,
+ BLK_ENCRYPTION_MODE_MAX,
+};
+
+/*
+ * Supported types of keys. Must be bitflags due to their use in
+ * blk_crypto_profile::key_types_supported.
+ */
+enum blk_crypto_key_type {
+ /*
+ * Raw keys (i.e. "software keys"). These keys are simply kept in raw,
+ * plaintext form in kernel memory.
+ */
+ BLK_CRYPTO_KEY_TYPE_RAW = 0x1,
+
+ /*
+ * Hardware-wrapped keys. These keys are only present in kernel memory
+ * in ephemerally-wrapped form, and they can only be unwrapped by
+ * dedicated hardware. For details, see the "Hardware-wrapped keys"
+ * section of Documentation/block/inline-encryption.rst.
+ */
+ BLK_CRYPTO_KEY_TYPE_HW_WRAPPED = 0x2,
+};
+
+/*
+ * Currently the maximum raw key size is 64 bytes, as that is the key size of
+ * BLK_ENCRYPTION_MODE_AES_256_XTS which takes the longest key.
+ *
+ * The maximum hardware-wrapped key size depends on the hardware's key wrapping
+ * algorithm, which is a hardware implementation detail, so it isn't precisely
+ * specified. But currently 128 bytes is plenty in practice. Implementations
+ * are recommended to wrap a 32-byte key for the hardware KDF with AES-256-GCM,
+ * which should result in a size closer to 64 bytes than 128.
+ *
+ * Both of these values can trivially be increased if ever needed.
+ */
+#define BLK_CRYPTO_MAX_RAW_KEY_SIZE 64
+#define BLK_CRYPTO_MAX_HW_WRAPPED_KEY_SIZE 128
+
+#define BLK_CRYPTO_MAX_ANY_KEY_SIZE \
+ MAX(BLK_CRYPTO_MAX_RAW_KEY_SIZE, BLK_CRYPTO_MAX_HW_WRAPPED_KEY_SIZE)
+
+/*
+ * Size of the "software secret" which can be derived from a hardware-wrapped
+ * key. This is currently always 32 bytes. Note, the choice of 32 bytes
+ * assumes that the software secret is only used directly for algorithms that
+ * don't require more than a 256-bit key to get the desired security strength.
+ * If it were to be used e.g. directly as an AES-256-XTS key, then this would
+ * need to be increased (which is possible if hardware supports it, but care
+ * would need to be taken to avoid breaking users who need exactly 32 bytes).
+ */
+#define BLK_CRYPTO_SW_SECRET_SIZE 32
+
+/**
+ * struct blk_crypto_config - an inline encryption key's crypto configuration
+ * @crypto_mode: encryption algorithm this key is for
+ * @data_unit_size: the data unit size for all encryption/decryptions with this
+ * key. This is the size in bytes of each individual plaintext and
+ * ciphertext. This is always a power of 2. It might be e.g. the
+ * filesystem block size or the disk sector size.
+ * @dun_bytes: the maximum number of bytes of DUN used when using this key
+ * @key_type: the type of this key -- either raw or hardware-wrapped
+ */
+struct blk_crypto_config {
+ enum blk_crypto_mode_num crypto_mode;
+ unsigned int data_unit_size;
+ unsigned int dun_bytes;
+ enum blk_crypto_key_type key_type;
+};
+
+/**
+ * struct blk_crypto_key - an inline encryption key
+ * @crypto_cfg: the crypto mode, data unit size, key type, and other
+ * characteristics of this key and how it will be used
+ * @data_unit_size_bits: log2 of data_unit_size
+ * @size: size of this key in bytes. The size of a raw key is fixed for a given
+ * crypto mode, but the size of a hardware-wrapped key can vary.
+ * @bytes: the bytes of this key. Only the first @size bytes are significant.
+ *
+ * A blk_crypto_key is immutable once created, and many bios can reference it at
+ * the same time. It must not be freed until all bios using it have completed
+ * and it has been evicted from all devices on which it may have been used.
+ */
+struct blk_crypto_key {
+ struct blk_crypto_config crypto_cfg;
+ unsigned int data_unit_size_bits;
+ unsigned int size;
+ u8 bytes[BLK_CRYPTO_MAX_ANY_KEY_SIZE];
+};
+
+#define BLK_CRYPTO_MAX_IV_SIZE 32
+#define BLK_CRYPTO_DUN_ARRAY_SIZE (BLK_CRYPTO_MAX_IV_SIZE / sizeof(u64))
+
+/**
+ * struct bio_crypt_ctx - an inline encryption context
+ * @bc_key: the key, algorithm, and data unit size to use
+ * @bc_dun: the data unit number (starting IV) to use
+ *
+ * A bio_crypt_ctx specifies that the contents of the bio will be encrypted (for
+ * write requests) or decrypted (for read requests) inline by the storage device
+ * or controller, or by the crypto API fallback.
+ */
+struct bio_crypt_ctx {
+ const struct blk_crypto_key *bc_key;
+ u64 bc_dun[BLK_CRYPTO_DUN_ARRAY_SIZE];
+};
+
+#include <linux/blk_types.h>
+#include <linux/blkdev.h>
+
+#ifdef CONFIG_BLK_INLINE_ENCRYPTION
+
+static inline bool bio_has_crypt_ctx(struct bio *bio)
+{
+ return bio->bi_crypt_context;
+}
+
+void bio_crypt_set_ctx(struct bio *bio, const struct blk_crypto_key *key,
+ const u64 dun[BLK_CRYPTO_DUN_ARRAY_SIZE],
+ gfp_t gfp_mask);
+
+bool bio_crypt_dun_is_contiguous(const struct bio_crypt_ctx *bc,
+ unsigned int bytes,
+ const u64 next_dun[BLK_CRYPTO_DUN_ARRAY_SIZE]);
+
+int blk_crypto_init_key(struct blk_crypto_key *blk_key,
+ const u8 *key_bytes, size_t key_size,
+ enum blk_crypto_key_type key_type,
+ enum blk_crypto_mode_num crypto_mode,
+ unsigned int dun_bytes,
+ unsigned int data_unit_size);
+
+int blk_crypto_start_using_key(struct block_device *bdev,
+ const struct blk_crypto_key *key);
+
+void blk_crypto_evict_key(struct block_device *bdev,
+ const struct blk_crypto_key *key);
+
+bool blk_crypto_config_supported_natively(struct block_device *bdev,
+ const struct blk_crypto_config *cfg);
+bool blk_crypto_config_supported(struct block_device *bdev,
+ const struct blk_crypto_config *cfg);
+
+int blk_crypto_derive_sw_secret(struct block_device *bdev,
+ const u8 *eph_key, size_t eph_key_size,
+ u8 sw_secret[BLK_CRYPTO_SW_SECRET_SIZE]);
+
+#else /* CONFIG_BLK_INLINE_ENCRYPTION */
+
+static inline bool bio_has_crypt_ctx(struct bio *bio)
+{
+ return false;
+}
+
+#endif /* CONFIG_BLK_INLINE_ENCRYPTION */
+
+int __bio_crypt_clone(struct bio *dst, struct bio *src, gfp_t gfp_mask);
+/**
+ * bio_crypt_clone - clone bio encryption context
+ * @dst: destination bio
+ * @src: source bio
+ * @gfp_mask: memory allocation flags
+ *
+ * If @src has an encryption context, clone it to @dst.
+ *
+ * Return: 0 on success, -ENOMEM if out of memory. -ENOMEM is only possible if
+ * @gfp_mask doesn't include %__GFP_DIRECT_RECLAIM.
+ */
+static inline int bio_crypt_clone(struct bio *dst, struct bio *src,
+ gfp_t gfp_mask)
+{
+ if (bio_has_crypt_ctx(src))
+ return __bio_crypt_clone(dst, src, gfp_mask);
+ return 0;
+}
+
+#endif /* __LINUX_BLK_CRYPTO_H */
diff --git a/include/linux/blk-integrity.h b/include/linux/blk-integrity.h
new file mode 100644
index 000000000000..a6b84206eb94
--- /dev/null
+++ b/include/linux/blk-integrity.h
@@ -0,0 +1,183 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_BLK_INTEGRITY_H
+#define _LINUX_BLK_INTEGRITY_H
+
+#include <linux/blk-mq.h>
+#include <linux/bio-integrity.h>
+#include <linux/blk-mq-dma.h>
+
+struct request;
+
+/*
+ * Maximum contiguous integrity buffer allocation.
+ */
+#define BLK_INTEGRITY_MAX_SIZE SZ_2M
+
+enum blk_integrity_flags {
+ BLK_INTEGRITY_NOVERIFY = 1 << 0,
+ BLK_INTEGRITY_NOGENERATE = 1 << 1,
+ BLK_INTEGRITY_DEVICE_CAPABLE = 1 << 2,
+ BLK_INTEGRITY_REF_TAG = 1 << 3,
+ BLK_INTEGRITY_STACKED = 1 << 4,
+};
+
+const char *blk_integrity_profile_name(struct blk_integrity *bi);
+bool queue_limits_stack_integrity(struct queue_limits *t,
+ struct queue_limits *b);
+static inline bool queue_limits_stack_integrity_bdev(struct queue_limits *t,
+ struct block_device *bdev)
+{
+ return queue_limits_stack_integrity(t, &bdev->bd_disk->queue->limits);
+}
+
+#ifdef CONFIG_BLK_DEV_INTEGRITY
+int blk_rq_map_integrity_sg(struct request *, struct scatterlist *);
+
+int blk_rq_count_integrity_sg(struct request_queue *, struct bio *);
+int blk_rq_integrity_map_user(struct request *rq, void __user *ubuf,
+ ssize_t bytes);
+int blk_get_meta_cap(struct block_device *bdev, unsigned int cmd,
+ struct logical_block_metadata_cap __user *argp);
+bool blk_rq_integrity_dma_map_iter_start(struct request *req,
+ struct device *dma_dev, struct dma_iova_state *state,
+ struct blk_dma_iter *iter);
+bool blk_rq_integrity_dma_map_iter_next(struct request *req,
+ struct device *dma_dev, struct blk_dma_iter *iter);
+
+static inline bool
+blk_integrity_queue_supports_integrity(struct request_queue *q)
+{
+ return q->limits.integrity.metadata_size;
+}
+
+static inline struct blk_integrity *blk_get_integrity(struct gendisk *disk)
+{
+ if (!blk_integrity_queue_supports_integrity(disk->queue))
+ return NULL;
+ return &disk->queue->limits.integrity;
+}
+
+static inline struct blk_integrity *
+bdev_get_integrity(struct block_device *bdev)
+{
+ return blk_get_integrity(bdev->bd_disk);
+}
+
+static inline unsigned short
+queue_max_integrity_segments(const struct request_queue *q)
+{
+ return q->limits.max_integrity_segments;
+}
+
+/**
+ * bio_integrity_intervals - Return number of integrity intervals for a bio
+ * @bi: blk_integrity profile for device
+ * @sectors: Size of the bio in 512-byte sectors
+ *
+ * Description: The block layer calculates everything in 512 byte
+ * sectors but integrity metadata is done in terms of the data integrity
+ * interval size of the storage device. Convert the block layer sectors
+ * to the appropriate number of integrity intervals.
+ */
+static inline unsigned int bio_integrity_intervals(struct blk_integrity *bi,
+ unsigned int sectors)
+{
+ return sectors >> (bi->interval_exp - 9);
+}
+
+static inline unsigned int bio_integrity_bytes(struct blk_integrity *bi,
+ unsigned int sectors)
+{
+ return bio_integrity_intervals(bi, sectors) * bi->metadata_size;
+}
+
+static inline bool blk_integrity_rq(struct request *rq)
+{
+ return rq->cmd_flags & REQ_INTEGRITY;
+}
+
+/*
+ * Return the current bvec that contains the integrity data. bip_iter may be
+ * advanced to iterate over the integrity data.
+ */
+static inline struct bio_vec rq_integrity_vec(struct request *rq)
+{
+ return mp_bvec_iter_bvec(rq->bio->bi_integrity->bip_vec,
+ rq->bio->bi_integrity->bip_iter);
+}
+#else /* CONFIG_BLK_DEV_INTEGRITY */
+static inline int blk_get_meta_cap(struct block_device *bdev, unsigned int cmd,
+ struct logical_block_metadata_cap __user *argp)
+{
+ return -ENOIOCTLCMD;
+}
+static inline int blk_rq_count_integrity_sg(struct request_queue *q,
+ struct bio *b)
+{
+ return 0;
+}
+static inline int blk_rq_map_integrity_sg(struct request *q,
+ struct scatterlist *s)
+{
+ return 0;
+}
+static inline int blk_rq_integrity_map_user(struct request *rq,
+ void __user *ubuf,
+ ssize_t bytes)
+{
+ return -EINVAL;
+}
+static inline bool blk_rq_integrity_dma_map_iter_start(struct request *req,
+ struct device *dma_dev, struct dma_iova_state *state,
+ struct blk_dma_iter *iter)
+{
+ return false;
+}
+static inline bool blk_rq_integrity_dma_map_iter_next(struct request *req,
+ struct device *dma_dev, struct blk_dma_iter *iter)
+{
+ return false;
+}
+static inline struct blk_integrity *bdev_get_integrity(struct block_device *b)
+{
+ return NULL;
+}
+static inline struct blk_integrity *blk_get_integrity(struct gendisk *disk)
+{
+ return NULL;
+}
+static inline bool
+blk_integrity_queue_supports_integrity(struct request_queue *q)
+{
+ return false;
+}
+static inline unsigned short
+queue_max_integrity_segments(const struct request_queue *q)
+{
+ return 0;
+}
+
+static inline unsigned int bio_integrity_intervals(struct blk_integrity *bi,
+ unsigned int sectors)
+{
+ return 0;
+}
+
+static inline unsigned int bio_integrity_bytes(struct blk_integrity *bi,
+ unsigned int sectors)
+{
+ return 0;
+}
+static inline int blk_integrity_rq(struct request *rq)
+{
+ return 0;
+}
+
+static inline struct bio_vec rq_integrity_vec(struct request *rq)
+{
+ /* the optimizer will remove all calls to this function */
+ return (struct bio_vec){ };
+}
+#endif /* CONFIG_BLK_DEV_INTEGRITY */
+
+#endif /* _LINUX_BLK_INTEGRITY_H */
diff --git a/include/linux/blk-mq-dma.h b/include/linux/blk-mq-dma.h
new file mode 100644
index 000000000000..cb88fc791fbd
--- /dev/null
+++ b/include/linux/blk-mq-dma.h
@@ -0,0 +1,76 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+#ifndef BLK_MQ_DMA_H
+#define BLK_MQ_DMA_H
+
+#include <linux/blk-mq.h>
+#include <linux/pci-p2pdma.h>
+
+struct blk_map_iter {
+ struct bvec_iter iter;
+ struct bio *bio;
+ struct bio_vec *bvecs;
+ bool is_integrity;
+};
+
+struct blk_dma_iter {
+ /* Output address range for this iteration */
+ dma_addr_t addr;
+ u32 len;
+ struct pci_p2pdma_map_state p2pdma;
+
+ /* Status code. Only valid when blk_rq_dma_map_iter_* returned false */
+ blk_status_t status;
+
+ /* Internal to blk_rq_dma_map_iter_* */
+ struct blk_map_iter iter;
+};
+
+bool blk_rq_dma_map_iter_start(struct request *req, struct device *dma_dev,
+ struct dma_iova_state *state, struct blk_dma_iter *iter);
+bool blk_rq_dma_map_iter_next(struct request *req, struct device *dma_dev,
+ struct dma_iova_state *state, struct blk_dma_iter *iter);
+
+/**
+ * blk_rq_dma_map_coalesce - were all segments coalesced?
+ * @state: DMA state to check
+ *
+ * Returns true if blk_rq_dma_map_iter_start coalesced all segments into a
+ * single DMA range.
+ */
+static inline bool blk_rq_dma_map_coalesce(struct dma_iova_state *state)
+{
+ return dma_use_iova(state);
+}
+
+/**
+ * blk_rq_dma_unmap - try to DMA unmap a request
+ * @req: request to unmap
+ * @dma_dev: device to unmap from
+ * @state: DMA IOVA state
+ * @mapped_len: number of bytes to unmap
+ * @map: peer-to-peer mapping type
+ *
+ * Returns %false if the callers need to manually unmap every DMA segment
+ * mapped using @iter or %true if no work is left to be done.
+ */
+static inline bool blk_rq_dma_unmap(struct request *req, struct device *dma_dev,
+ struct dma_iova_state *state, size_t mapped_len,
+ enum pci_p2pdma_map_type map)
+{
+ if (map == PCI_P2PDMA_MAP_BUS_ADDR)
+ return true;
+
+ if (dma_use_iova(state)) {
+ unsigned int attrs = 0;
+
+ if (map == PCI_P2PDMA_MAP_THRU_HOST_BRIDGE)
+ attrs |= DMA_ATTR_MMIO;
+
+ dma_iova_destroy(dma_dev, state, mapped_len, rq_dma_dir(req),
+ attrs);
+ return true;
+ }
+
+ return !dma_need_unmap(dma_dev);
+}
+#endif /* BLK_MQ_DMA_H */
diff --git a/include/linux/blk-mq-pci.h b/include/linux/blk-mq-pci.h
deleted file mode 100644
index 6ab595259112..000000000000
--- a/include/linux/blk-mq-pci.h
+++ /dev/null
@@ -1,9 +0,0 @@
-#ifndef _LINUX_BLK_MQ_PCI_H
-#define _LINUX_BLK_MQ_PCI_H
-
-struct blk_mq_tag_set;
-struct pci_dev;
-
-int blk_mq_pci_map_queues(struct blk_mq_tag_set *set, struct pci_dev *pdev);
-
-#endif /* _LINUX_BLK_MQ_PCI_H */
diff --git a/include/linux/blk-mq-rdma.h b/include/linux/blk-mq-rdma.h
deleted file mode 100644
index b4ade198007d..000000000000
--- a/include/linux/blk-mq-rdma.h
+++ /dev/null
@@ -1,10 +0,0 @@
-#ifndef _LINUX_BLK_MQ_RDMA_H
-#define _LINUX_BLK_MQ_RDMA_H
-
-struct blk_mq_tag_set;
-struct ib_device;
-
-int blk_mq_rdma_map_queues(struct blk_mq_tag_set *set,
- struct ib_device *dev, int first_vec);
-
-#endif /* _LINUX_BLK_MQ_RDMA_H */
diff --git a/include/linux/blk-mq-virtio.h b/include/linux/blk-mq-virtio.h
deleted file mode 100644
index b1ef6e14744f..000000000000
--- a/include/linux/blk-mq-virtio.h
+++ /dev/null
@@ -1,10 +0,0 @@
-#ifndef _LINUX_BLK_MQ_VIRTIO_H
-#define _LINUX_BLK_MQ_VIRTIO_H
-
-struct blk_mq_tag_set;
-struct virtio_device;
-
-int blk_mq_virtio_map_queues(struct blk_mq_tag_set *set,
- struct virtio_device *vdev, int first_vec);
-
-#endif /* _LINUX_BLK_MQ_VIRTIO_H */
diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h
index 14542308d25b..cae9e857aea4 100644
--- a/include/linux/blk-mq.h
+++ b/include/linux/blk-mq.h
@@ -1,214 +1,804 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef BLK_MQ_H
#define BLK_MQ_H
#include <linux/blkdev.h>
#include <linux/sbitmap.h>
+#include <linux/lockdep.h>
+#include <linux/scatterlist.h>
+#include <linux/prefetch.h>
#include <linux/srcu.h>
+#include <linux/rw_hint.h>
+#include <linux/rwsem.h>
struct blk_mq_tags;
struct blk_flush_queue;
+#define BLKDEV_MIN_RQ 4
+#define BLKDEV_DEFAULT_RQ 128
+
+enum rq_end_io_ret {
+ RQ_END_IO_NONE,
+ RQ_END_IO_FREE,
+};
+
+typedef enum rq_end_io_ret (rq_end_io_fn)(struct request *, blk_status_t);
+
+/*
+ * request flags */
+typedef __u32 __bitwise req_flags_t;
+
+/* Keep rqf_name[] in sync with the definitions below */
+enum rqf_flags {
+ /* drive already may have started this one */
+ __RQF_STARTED,
+ /* request for flush sequence */
+ __RQF_FLUSH_SEQ,
+ /* merge of different types, fail separately */
+ __RQF_MIXED_MERGE,
+ /* don't call prep for this one */
+ __RQF_DONTPREP,
+ /* use hctx->sched_tags */
+ __RQF_SCHED_TAGS,
+ /* use an I/O scheduler for this request */
+ __RQF_USE_SCHED,
+ /* vaguely specified driver internal error. Ignored by block layer */
+ __RQF_FAILED,
+ /* don't warn about errors */
+ __RQF_QUIET,
+ /* account into disk and partition IO statistics */
+ __RQF_IO_STAT,
+ /* runtime pm request */
+ __RQF_PM,
+ /* on IO scheduler merge hash */
+ __RQF_HASHED,
+ /* track IO completion time */
+ __RQF_STATS,
+ /* Look at ->special_vec for the actual data payload instead of the
+ bio chain. */
+ __RQF_SPECIAL_PAYLOAD,
+ /* request completion needs to be signaled to zone write plugging. */
+ __RQF_ZONE_WRITE_PLUGGING,
+ /* ->timeout has been called, don't expire again */
+ __RQF_TIMED_OUT,
+ __RQF_RESV,
+ __RQF_BITS
+};
+
+#define RQF_STARTED ((__force req_flags_t)(1 << __RQF_STARTED))
+#define RQF_FLUSH_SEQ ((__force req_flags_t)(1 << __RQF_FLUSH_SEQ))
+#define RQF_MIXED_MERGE ((__force req_flags_t)(1 << __RQF_MIXED_MERGE))
+#define RQF_DONTPREP ((__force req_flags_t)(1 << __RQF_DONTPREP))
+#define RQF_SCHED_TAGS ((__force req_flags_t)(1 << __RQF_SCHED_TAGS))
+#define RQF_USE_SCHED ((__force req_flags_t)(1 << __RQF_USE_SCHED))
+#define RQF_FAILED ((__force req_flags_t)(1 << __RQF_FAILED))
+#define RQF_QUIET ((__force req_flags_t)(1 << __RQF_QUIET))
+#define RQF_IO_STAT ((__force req_flags_t)(1 << __RQF_IO_STAT))
+#define RQF_PM ((__force req_flags_t)(1 << __RQF_PM))
+#define RQF_HASHED ((__force req_flags_t)(1 << __RQF_HASHED))
+#define RQF_STATS ((__force req_flags_t)(1 << __RQF_STATS))
+#define RQF_SPECIAL_PAYLOAD \
+ ((__force req_flags_t)(1 << __RQF_SPECIAL_PAYLOAD))
+#define RQF_ZONE_WRITE_PLUGGING \
+ ((__force req_flags_t)(1 << __RQF_ZONE_WRITE_PLUGGING))
+#define RQF_TIMED_OUT ((__force req_flags_t)(1 << __RQF_TIMED_OUT))
+#define RQF_RESV ((__force req_flags_t)(1 << __RQF_RESV))
+
+/* flags that prevent us from merging requests: */
+#define RQF_NOMERGE_FLAGS \
+ (RQF_STARTED | RQF_FLUSH_SEQ | RQF_SPECIAL_PAYLOAD)
+
+enum mq_rq_state {
+ MQ_RQ_IDLE = 0,
+ MQ_RQ_IN_FLIGHT = 1,
+ MQ_RQ_COMPLETE = 2,
+};
+
+/*
+ * Try to put the fields that are referenced together in the same cacheline.
+ *
+ * If you modify this structure, make sure to update blk_rq_init() and
+ * especially blk_mq_rq_ctx_init() to take care of the added fields.
+ */
+struct request {
+ struct request_queue *q;
+ struct blk_mq_ctx *mq_ctx;
+ struct blk_mq_hw_ctx *mq_hctx;
+
+ blk_opf_t cmd_flags; /* op and common flags */
+ req_flags_t rq_flags;
+
+ int tag;
+ int internal_tag;
+
+ unsigned int timeout;
+
+ /* the following two fields are internal, NEVER access directly */
+ unsigned int __data_len; /* total data len */
+ sector_t __sector; /* sector cursor */
+
+ struct bio *bio;
+ struct bio *biotail;
+
+ union {
+ struct list_head queuelist;
+ struct request *rq_next;
+ };
+
+ struct block_device *part;
+#ifdef CONFIG_BLK_RQ_ALLOC_TIME
+ /* Time that the first bio started allocating this request. */
+ u64 alloc_time_ns;
+#endif
+ /* Time that this request was allocated for this IO. */
+ u64 start_time_ns;
+ /* Time that I/O was submitted to the device. */
+ u64 io_start_time_ns;
+
+#ifdef CONFIG_BLK_WBT
+ unsigned short wbt_flags;
+#endif
+ /*
+ * rq sectors used for blk stats. It has the same value
+ * with blk_rq_sectors(rq), except that it never be zeroed
+ * by completion.
+ */
+ unsigned short stats_sectors;
+
+ /*
+ * Number of scatter-gather DMA addr+len pairs after
+ * physical address coalescing is performed.
+ */
+ unsigned short nr_phys_segments;
+ unsigned short nr_integrity_segments;
+
+ /*
+ * The lowest set bit for address gaps between physical segments. This
+ * provides information necessary for dma optimization opprotunities,
+ * like for testing if the segments can be coalesced against the
+ * device's iommu granule.
+ */
+ unsigned char phys_gap_bit;
+
+#ifdef CONFIG_BLK_INLINE_ENCRYPTION
+ struct bio_crypt_ctx *crypt_ctx;
+ struct blk_crypto_keyslot *crypt_keyslot;
+#endif
+
+ enum mq_rq_state state;
+ atomic_t ref;
+
+ unsigned long deadline;
+
+ /*
+ * The hash is used inside the scheduler, and killed once the
+ * request reaches the dispatch list. The ipi_list is only used
+ * to queue the request for softirq completion, which is long
+ * after the request has been unhashed (and even removed from
+ * the dispatch list).
+ */
+ union {
+ struct hlist_node hash; /* merge hash */
+ struct llist_node ipi_list;
+ };
+
+ /*
+ * The rb_node is only used inside the io scheduler, requests
+ * are pruned when moved to the dispatch queue. special_vec must
+ * only be used if RQF_SPECIAL_PAYLOAD is set, and those cannot be
+ * insert into an IO scheduler.
+ */
+ union {
+ struct rb_node rb_node; /* sort/lookup */
+ struct bio_vec special_vec;
+ };
+
+ /*
+ * Three pointers are available for the IO schedulers, if they need
+ * more they have to dynamically allocate it.
+ */
+ struct {
+ struct io_cq *icq;
+ void *priv[2];
+ } elv;
+
+ struct {
+ unsigned int seq;
+ rq_end_io_fn *saved_end_io;
+ } flush;
+
+ u64 fifo_time;
+
+ /*
+ * completion callback.
+ */
+ rq_end_io_fn *end_io;
+ void *end_io_data;
+};
+
+/*
+ * Returns a mask with all bits starting at req->phys_gap_bit set to 1.
+ */
+static inline unsigned long req_phys_gap_mask(const struct request *req)
+{
+ return ~(((1 << req->phys_gap_bit) >> 1) - 1);
+}
+
+static inline enum req_op req_op(const struct request *req)
+{
+ return req->cmd_flags & REQ_OP_MASK;
+}
+
+static inline bool blk_rq_is_passthrough(struct request *rq)
+{
+ return blk_op_is_passthrough(rq->cmd_flags);
+}
+
+static inline unsigned short req_get_ioprio(struct request *req)
+{
+ if (req->bio)
+ return req->bio->bi_ioprio;
+ return 0;
+}
+
+#define rq_data_dir(rq) (op_is_write(req_op(rq)) ? WRITE : READ)
+
+#define rq_dma_dir(rq) \
+ (op_is_write(req_op(rq)) ? DMA_TO_DEVICE : DMA_FROM_DEVICE)
+
+static inline int rq_list_empty(const struct rq_list *rl)
+{
+ return rl->head == NULL;
+}
+
+static inline void rq_list_init(struct rq_list *rl)
+{
+ rl->head = NULL;
+ rl->tail = NULL;
+}
+
+static inline void rq_list_add_tail(struct rq_list *rl, struct request *rq)
+{
+ rq->rq_next = NULL;
+ if (rl->tail)
+ rl->tail->rq_next = rq;
+ else
+ rl->head = rq;
+ rl->tail = rq;
+}
+
+static inline void rq_list_add_head(struct rq_list *rl, struct request *rq)
+{
+ rq->rq_next = rl->head;
+ rl->head = rq;
+ if (!rl->tail)
+ rl->tail = rq;
+}
+
+static inline struct request *rq_list_pop(struct rq_list *rl)
+{
+ struct request *rq = rl->head;
+
+ if (rq) {
+ rl->head = rl->head->rq_next;
+ if (!rl->head)
+ rl->tail = NULL;
+ rq->rq_next = NULL;
+ }
+
+ return rq;
+}
+
+static inline struct request *rq_list_peek(struct rq_list *rl)
+{
+ return rl->head;
+}
+
+#define rq_list_for_each(rl, pos) \
+ for (pos = rq_list_peek((rl)); (pos); pos = pos->rq_next)
+
+#define rq_list_for_each_safe(rl, pos, nxt) \
+ for (pos = rq_list_peek((rl)), nxt = pos->rq_next; \
+ pos; pos = nxt, nxt = pos ? pos->rq_next : NULL)
+
+/**
+ * enum blk_eh_timer_return - How the timeout handler should proceed
+ * @BLK_EH_DONE: The block driver completed the command or will complete it at
+ * a later time.
+ * @BLK_EH_RESET_TIMER: Reset the request timer and continue waiting for the
+ * request to complete.
+ */
+enum blk_eh_timer_return {
+ BLK_EH_DONE,
+ BLK_EH_RESET_TIMER,
+};
+
+/**
+ * struct blk_mq_hw_ctx - State for a hardware queue facing the hardware
+ * block device
+ */
struct blk_mq_hw_ctx {
struct {
+ /** @lock: Protects the dispatch list. */
spinlock_t lock;
+ /**
+ * @dispatch: Used for requests that are ready to be
+ * dispatched to the hardware but for some reason (e.g. lack of
+ * resources) could not be sent to the hardware. As soon as the
+ * driver can send new requests, requests at this list will
+ * be sent first for a fairer dispatch.
+ */
struct list_head dispatch;
- unsigned long state; /* BLK_MQ_S_* flags */
+ /**
+ * @state: BLK_MQ_S_* flags. Defines the state of the hw
+ * queue (active, scheduled to restart, stopped).
+ */
+ unsigned long state;
} ____cacheline_aligned_in_smp;
+ /**
+ * @run_work: Used for scheduling a hardware queue run at a later time.
+ */
struct delayed_work run_work;
+ /** @cpumask: Map of available CPUs where this hctx can run. */
cpumask_var_t cpumask;
+ /**
+ * @next_cpu: Used by blk_mq_hctx_next_cpu() for round-robin CPU
+ * selection from @cpumask.
+ */
int next_cpu;
+ /**
+ * @next_cpu_batch: Counter of how many works left in the batch before
+ * changing to the next CPU.
+ */
int next_cpu_batch;
- unsigned long flags; /* BLK_MQ_F_* flags */
+ /** @flags: BLK_MQ_F_* flags. Defines the behaviour of the queue. */
+ unsigned long flags;
+ /**
+ * @sched_data: Pointer owned by the IO scheduler attached to a request
+ * queue. It's up to the IO scheduler how to use this pointer.
+ */
void *sched_data;
+ /**
+ * @queue: Pointer to the request queue that owns this hardware context.
+ */
struct request_queue *queue;
+ /** @fq: Queue of requests that need to perform a flush operation. */
struct blk_flush_queue *fq;
+ /**
+ * @driver_data: Pointer to data owned by the block driver that created
+ * this hctx
+ */
void *driver_data;
+ /**
+ * @ctx_map: Bitmap for each software queue. If bit is on, there is a
+ * pending request in that software queue.
+ */
struct sbitmap ctx_map;
+ /**
+ * @dispatch_from: Software queue to be used when no scheduler was
+ * selected.
+ */
+ struct blk_mq_ctx *dispatch_from;
+ /**
+ * @dispatch_busy: Number used by blk_mq_update_dispatch_busy() to
+ * decide if the hw_queue is busy using Exponential Weighted Moving
+ * Average algorithm.
+ */
+ unsigned int dispatch_busy;
+
+ /** @type: HCTX_TYPE_* flags. Type of hardware queue. */
+ unsigned short type;
+ /** @nr_ctx: Number of software queues. */
+ unsigned short nr_ctx;
+ /** @ctxs: Array of software queues. */
struct blk_mq_ctx **ctxs;
- unsigned int nr_ctx;
- wait_queue_entry_t dispatch_wait;
+ /** @dispatch_wait_lock: Lock for dispatch_wait queue. */
+ spinlock_t dispatch_wait_lock;
+ /**
+ * @dispatch_wait: Waitqueue to put requests when there is no tag
+ * available at the moment, to wait for another try in the future.
+ */
+ wait_queue_entry_t dispatch_wait;
+
+ /**
+ * @wait_index: Index of next available dispatch_wait queue to insert
+ * requests.
+ */
atomic_t wait_index;
+ /**
+ * @tags: Tags owned by the block driver. A tag at this set is only
+ * assigned when a request is dispatched from a hardware queue.
+ */
struct blk_mq_tags *tags;
+ /**
+ * @sched_tags: Tags owned by I/O scheduler. If there is an I/O
+ * scheduler associated with a request queue, a tag is assigned when
+ * that request is allocated. Else, this member is not used.
+ */
struct blk_mq_tags *sched_tags;
- unsigned long queued;
- unsigned long run;
-#define BLK_MQ_MAX_DISPATCH_ORDER 7
- unsigned long dispatched[BLK_MQ_MAX_DISPATCH_ORDER];
-
+ /** @numa_node: NUMA node the storage adapter has been connected to. */
unsigned int numa_node;
+ /** @queue_num: Index of this hardware queue. */
unsigned int queue_num;
+ /**
+ * @nr_active: Number of active requests. Only used when a tag set is
+ * shared across request queues.
+ */
atomic_t nr_active;
+ /** @cpuhp_online: List to store request if CPU is going to die */
+ struct hlist_node cpuhp_online;
+ /** @cpuhp_dead: List to store request if some CPU die. */
struct hlist_node cpuhp_dead;
+ /** @kobj: Kernel object for sysfs. */
struct kobject kobj;
- unsigned long poll_considered;
- unsigned long poll_invoked;
- unsigned long poll_success;
-
#ifdef CONFIG_BLK_DEBUG_FS
+ /**
+ * @debugfs_dir: debugfs directory for this hardware queue. Named
+ * as cpu<cpu_number>.
+ */
struct dentry *debugfs_dir;
+ /** @sched_debugfs_dir: debugfs directory for the scheduler. */
struct dentry *sched_debugfs_dir;
#endif
- /* Must be the last member - see also blk_mq_hw_ctx_size(). */
- struct srcu_struct queue_rq_srcu[0];
+ /**
+ * @hctx_list: if this hctx is not in use, this is an entry in
+ * q->unused_hctx_list.
+ */
+ struct list_head hctx_list;
+};
+
+/**
+ * struct blk_mq_queue_map - Map software queues to hardware queues
+ * @mq_map: CPU ID to hardware queue index map. This is an array
+ * with nr_cpu_ids elements. Each element has a value in the range
+ * [@queue_offset, @queue_offset + @nr_queues).
+ * @nr_queues: Number of hardware queues to map CPU IDs onto.
+ * @queue_offset: First hardware queue to map onto. Used by the PCIe NVMe
+ * driver to map each hardware queue type (enum hctx_type) onto a distinct
+ * set of hardware queues.
+ */
+struct blk_mq_queue_map {
+ unsigned int *mq_map;
+ unsigned int nr_queues;
+ unsigned int queue_offset;
+};
+
+/**
+ * enum hctx_type - Type of hardware queue
+ * @HCTX_TYPE_DEFAULT: All I/O not otherwise accounted for.
+ * @HCTX_TYPE_READ: Just for READ I/O.
+ * @HCTX_TYPE_POLL: Polled I/O of any kind.
+ * @HCTX_MAX_TYPES: Number of types of hctx.
+ */
+enum hctx_type {
+ HCTX_TYPE_DEFAULT,
+ HCTX_TYPE_READ,
+ HCTX_TYPE_POLL,
+
+ HCTX_MAX_TYPES,
};
+/**
+ * struct blk_mq_tag_set - tag set that can be shared between request queues
+ * @ops: Pointers to functions that implement block driver behavior.
+ * @map: One or more ctx -> hctx mappings. One map exists for each
+ * hardware queue type (enum hctx_type) that the driver wishes
+ * to support. There are no restrictions on maps being of the
+ * same size, and it's perfectly legal to share maps between
+ * types.
+ * @nr_maps: Number of elements in the @map array. A number in the range
+ * [1, HCTX_MAX_TYPES].
+ * @nr_hw_queues: Number of hardware queues supported by the block driver that
+ * owns this data structure.
+ * @queue_depth: Number of tags per hardware queue, reserved tags included.
+ * @reserved_tags: Number of tags to set aside for BLK_MQ_REQ_RESERVED tag
+ * allocations.
+ * @cmd_size: Number of additional bytes to allocate per request. The block
+ * driver owns these additional bytes.
+ * @numa_node: NUMA node the storage adapter has been connected to.
+ * @timeout: Request processing timeout in jiffies.
+ * @flags: Zero or more BLK_MQ_F_* flags.
+ * @driver_data: Pointer to data owned by the block driver that created this
+ * tag set.
+ * @tags: Tag sets. One tag set per hardware queue. Has @nr_hw_queues
+ * elements.
+ * @shared_tags:
+ * Shared set of tags. Has @nr_hw_queues elements. If set,
+ * shared by all @tags.
+ * @tag_list_lock: Serializes tag_list accesses.
+ * @tag_list: List of the request queues that use this tag set. See also
+ * request_queue.tag_set_list.
+ * @srcu: Use as lock when type of the request queue is blocking
+ * (BLK_MQ_F_BLOCKING).
+ * @tags_srcu: SRCU used to defer freeing of tags page_list to prevent
+ * use-after-free when iterating tags.
+ * @update_nr_hwq_lock:
+ * Synchronize updating nr_hw_queues with add/del disk &
+ * switching elevator.
+ */
struct blk_mq_tag_set {
- unsigned int *mq_map;
const struct blk_mq_ops *ops;
+ struct blk_mq_queue_map map[HCTX_MAX_TYPES];
+ unsigned int nr_maps;
unsigned int nr_hw_queues;
- unsigned int queue_depth; /* max hw supported */
+ unsigned int queue_depth;
unsigned int reserved_tags;
- unsigned int cmd_size; /* per-request extra data */
+ unsigned int cmd_size;
int numa_node;
unsigned int timeout;
- unsigned int flags; /* BLK_MQ_F_* */
+ unsigned int flags;
void *driver_data;
struct blk_mq_tags **tags;
+ struct blk_mq_tags *shared_tags;
+
struct mutex tag_list_lock;
struct list_head tag_list;
+ struct srcu_struct *srcu;
+ struct srcu_struct tags_srcu;
+
+ struct rw_semaphore update_nr_hwq_lock;
};
+/**
+ * struct blk_mq_queue_data - Data about a request inserted in a queue
+ *
+ * @rq: Request pointer.
+ * @last: If it is the last request in the queue.
+ */
struct blk_mq_queue_data {
struct request *rq;
bool last;
};
-typedef blk_status_t (queue_rq_fn)(struct blk_mq_hw_ctx *,
- const struct blk_mq_queue_data *);
-typedef enum blk_eh_timer_return (timeout_fn)(struct request *, bool);
-typedef int (init_hctx_fn)(struct blk_mq_hw_ctx *, void *, unsigned int);
-typedef void (exit_hctx_fn)(struct blk_mq_hw_ctx *, unsigned int);
-typedef int (init_request_fn)(struct blk_mq_tag_set *set, struct request *,
- unsigned int, unsigned int);
-typedef void (exit_request_fn)(struct blk_mq_tag_set *set, struct request *,
- unsigned int);
-typedef int (reinit_request_fn)(void *, struct request *);
+typedef bool (busy_tag_iter_fn)(struct request *, void *);
-typedef void (busy_iter_fn)(struct blk_mq_hw_ctx *, struct request *, void *,
- bool);
-typedef void (busy_tag_iter_fn)(struct request *, void *, bool);
-typedef int (poll_fn)(struct blk_mq_hw_ctx *, unsigned int);
-typedef int (map_queues_fn)(struct blk_mq_tag_set *set);
+/**
+ * struct blk_mq_ops - Callback functions that implements block driver
+ * behaviour.
+ */
+struct blk_mq_ops {
+ /**
+ * @queue_rq: Queue a new request from block IO.
+ */
+ blk_status_t (*queue_rq)(struct blk_mq_hw_ctx *,
+ const struct blk_mq_queue_data *);
+ /**
+ * @commit_rqs: If a driver uses bd->last to judge when to submit
+ * requests to hardware, it must define this function. In case of errors
+ * that make us stop issuing further requests, this hook serves the
+ * purpose of kicking the hardware (which the last request otherwise
+ * would have done).
+ */
+ void (*commit_rqs)(struct blk_mq_hw_ctx *);
-struct blk_mq_ops {
- /*
- * Queue request
+ /**
+ * @queue_rqs: Queue a list of new requests. Driver is guaranteed
+ * that each request belongs to the same queue. If the driver doesn't
+ * empty the @rqlist completely, then the rest will be queued
+ * individually by the block layer upon return.
*/
- queue_rq_fn *queue_rq;
+ void (*queue_rqs)(struct rq_list *rqlist);
- /*
- * Called on request timeout
+ /**
+ * @get_budget: Reserve budget before queue request, once .queue_rq is
+ * run, it is driver's responsibility to release the
+ * reserved budget. Also we have to handle failure case
+ * of .get_budget for avoiding I/O deadlock.
*/
- timeout_fn *timeout;
+ int (*get_budget)(struct request_queue *);
- /*
- * Called to poll for completion of a specific tag.
+ /**
+ * @put_budget: Release the reserved budget.
*/
- poll_fn *poll;
+ void (*put_budget)(struct request_queue *, int);
- softirq_done_fn *complete;
+ /**
+ * @set_rq_budget_token: store rq's budget token
+ */
+ void (*set_rq_budget_token)(struct request *, int);
+ /**
+ * @get_rq_budget_token: retrieve rq's budget token
+ */
+ int (*get_rq_budget_token)(struct request *);
- /*
- * Called when the block layer side of a hardware queue has been
- * set up, allowing the driver to allocate/init matching structures.
- * Ditto for exit/teardown.
+ /**
+ * @timeout: Called on request timeout.
*/
- init_hctx_fn *init_hctx;
- exit_hctx_fn *exit_hctx;
+ enum blk_eh_timer_return (*timeout)(struct request *);
- /*
- * Called for every command allocated by the block layer to allow
- * the driver to set up driver specific data.
+ /**
+ * @poll: Called to poll for completion of a specific tag.
+ */
+ int (*poll)(struct blk_mq_hw_ctx *, struct io_comp_batch *);
+
+ /**
+ * @complete: Mark the request as complete.
+ */
+ void (*complete)(struct request *);
+
+ /**
+ * @init_hctx: Called when the block layer side of a hardware queue has
+ * been set up, allowing the driver to allocate/init matching
+ * structures.
+ */
+ int (*init_hctx)(struct blk_mq_hw_ctx *, void *, unsigned int);
+ /**
+ * @exit_hctx: Ditto for exit/teardown.
+ */
+ void (*exit_hctx)(struct blk_mq_hw_ctx *, unsigned int);
+
+ /**
+ * @init_request: Called for every command allocated by the block layer
+ * to allow the driver to set up driver specific data.
*
* Tag greater than or equal to queue_depth is for setting up
* flush request.
- *
- * Ditto for exit/teardown.
*/
- init_request_fn *init_request;
- exit_request_fn *exit_request;
- reinit_request_fn *reinit_request;
- /* Called from inside blk_get_request() */
- void (*initialize_rq_fn)(struct request *rq);
+ int (*init_request)(struct blk_mq_tag_set *set, struct request *,
+ unsigned int, unsigned int);
+ /**
+ * @exit_request: Ditto for exit/teardown.
+ */
+ void (*exit_request)(struct blk_mq_tag_set *set, struct request *,
+ unsigned int);
- map_queues_fn *map_queues;
+ /**
+ * @cleanup_rq: Called before freeing one request which isn't completed
+ * yet, and usually for freeing the driver private data.
+ */
+ void (*cleanup_rq)(struct request *);
+
+ /**
+ * @busy: If set, returns whether or not this queue currently is busy.
+ */
+ bool (*busy)(struct request_queue *);
+
+ /**
+ * @map_queues: This allows drivers specify their own queue mapping by
+ * overriding the setup-time function that builds the mq_map.
+ */
+ void (*map_queues)(struct blk_mq_tag_set *set);
#ifdef CONFIG_BLK_DEBUG_FS
- /*
- * Used by the debugfs implementation to show driver-specific
+ /**
+ * @show_rq: Used by the debugfs implementation to show driver-specific
* information about a request.
*/
void (*show_rq)(struct seq_file *m, struct request *rq);
#endif
};
+/* Keep hctx_flag_name[] in sync with the definitions below */
+enum {
+ BLK_MQ_F_TAG_QUEUE_SHARED = 1 << 1,
+ /*
+ * Set when this device requires underlying blk-mq device for
+ * completing IO:
+ */
+ BLK_MQ_F_STACKING = 1 << 2,
+ BLK_MQ_F_TAG_HCTX_SHARED = 1 << 3,
+ BLK_MQ_F_BLOCKING = 1 << 4,
+
+ /*
+ * Alloc tags on a round-robin base instead of the first available one.
+ */
+ BLK_MQ_F_TAG_RR = 1 << 5,
+
+ /*
+ * Select 'none' during queue registration in case of a single hwq
+ * or shared hwqs instead of 'mq-deadline'.
+ */
+ BLK_MQ_F_NO_SCHED_BY_DEFAULT = 1 << 6,
+
+ BLK_MQ_F_MAX = 1 << 7,
+};
+
+#define BLK_MQ_MAX_DEPTH (10240)
+#define BLK_MQ_NO_HCTX_IDX (-1U)
+
enum {
- BLK_MQ_F_SHOULD_MERGE = 1 << 0,
- BLK_MQ_F_TAG_SHARED = 1 << 1,
- BLK_MQ_F_SG_MERGE = 1 << 2,
- BLK_MQ_F_BLOCKING = 1 << 5,
- BLK_MQ_F_NO_SCHED = 1 << 6,
- BLK_MQ_F_ALLOC_POLICY_START_BIT = 8,
- BLK_MQ_F_ALLOC_POLICY_BITS = 1,
-
- BLK_MQ_S_STOPPED = 0,
- BLK_MQ_S_TAG_ACTIVE = 1,
- BLK_MQ_S_SCHED_RESTART = 2,
- BLK_MQ_S_TAG_WAITING = 3,
- BLK_MQ_S_START_ON_RUN = 4,
-
- BLK_MQ_MAX_DEPTH = 10240,
-
- BLK_MQ_CPU_WORK_BATCH = 8,
+ /* Keep hctx_state_name[] in sync with the definitions below */
+ BLK_MQ_S_STOPPED,
+ BLK_MQ_S_TAG_ACTIVE,
+ BLK_MQ_S_SCHED_RESTART,
+ /* hw queue is inactive after all its CPUs become offline */
+ BLK_MQ_S_INACTIVE,
+ BLK_MQ_S_MAX
};
-#define BLK_MQ_FLAG_TO_ALLOC_POLICY(flags) \
- ((flags >> BLK_MQ_F_ALLOC_POLICY_START_BIT) & \
- ((1 << BLK_MQ_F_ALLOC_POLICY_BITS) - 1))
-#define BLK_ALLOC_POLICY_TO_MQ_FLAG(policy) \
- ((policy & ((1 << BLK_MQ_F_ALLOC_POLICY_BITS) - 1)) \
- << BLK_MQ_F_ALLOC_POLICY_START_BIT)
-
-struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *);
-struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
- struct request_queue *q);
-int blk_mq_register_dev(struct device *, struct request_queue *);
-void blk_mq_unregister_dev(struct device *, struct request_queue *);
+
+struct gendisk *__blk_mq_alloc_disk(struct blk_mq_tag_set *set,
+ struct queue_limits *lim, void *queuedata,
+ struct lock_class_key *lkclass);
+#define blk_mq_alloc_disk(set, lim, queuedata) \
+({ \
+ static struct lock_class_key __key; \
+ \
+ __blk_mq_alloc_disk(set, lim, queuedata, &__key); \
+})
+struct gendisk *blk_mq_alloc_disk_for_queue(struct request_queue *q,
+ struct lock_class_key *lkclass);
+struct request_queue *blk_mq_alloc_queue(struct blk_mq_tag_set *set,
+ struct queue_limits *lim, void *queuedata);
+int blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
+ struct request_queue *q);
+void blk_mq_destroy_queue(struct request_queue *);
int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set);
+int blk_mq_alloc_sq_tag_set(struct blk_mq_tag_set *set,
+ const struct blk_mq_ops *ops, unsigned int queue_depth,
+ unsigned int set_flags);
void blk_mq_free_tag_set(struct blk_mq_tag_set *set);
-void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule);
-
void blk_mq_free_request(struct request *rq);
-bool blk_mq_can_queue(struct blk_mq_hw_ctx *);
+int blk_rq_poll(struct request *rq, struct io_comp_batch *iob,
+ unsigned int poll_flags);
+
+bool blk_mq_queue_inflight(struct request_queue *q);
enum {
- BLK_MQ_REQ_NOWAIT = (1 << 0), /* return when out of requests */
- BLK_MQ_REQ_RESERVED = (1 << 1), /* allocate from reserved pool */
- BLK_MQ_REQ_INTERNAL = (1 << 2), /* allocate internal/sched tag */
+ /* return when out of requests */
+ BLK_MQ_REQ_NOWAIT = (__force blk_mq_req_flags_t)(1 << 0),
+ /* allocate from reserved pool */
+ BLK_MQ_REQ_RESERVED = (__force blk_mq_req_flags_t)(1 << 1),
+ /* set RQF_PM */
+ BLK_MQ_REQ_PM = (__force blk_mq_req_flags_t)(1 << 2),
};
-struct request *blk_mq_alloc_request(struct request_queue *q, unsigned int op,
- unsigned int flags);
+struct request *blk_mq_alloc_request(struct request_queue *q, blk_opf_t opf,
+ blk_mq_req_flags_t flags);
struct request *blk_mq_alloc_request_hctx(struct request_queue *q,
- unsigned int op, unsigned int flags, unsigned int hctx_idx);
-struct request *blk_mq_tag_to_rq(struct blk_mq_tags *tags, unsigned int tag);
+ blk_opf_t opf, blk_mq_req_flags_t flags,
+ unsigned int hctx_idx);
+
+/*
+ * Tag address space map.
+ */
+struct blk_mq_tags {
+ unsigned int nr_tags;
+ unsigned int nr_reserved_tags;
+ unsigned int active_queues;
+
+ struct sbitmap_queue bitmap_tags;
+ struct sbitmap_queue breserved_tags;
+
+ struct request **rqs;
+ struct request **static_rqs;
+ struct list_head page_list;
+
+ /*
+ * used to clear request reference in rqs[] before freeing one
+ * request pool
+ */
+ spinlock_t lock;
+ struct rcu_head rcu_head;
+};
+
+static inline struct request *blk_mq_tag_to_rq(struct blk_mq_tags *tags,
+ unsigned int tag)
+{
+ if (tag < tags->nr_tags) {
+ prefetch(tags->rqs[tag]);
+ return tags->rqs[tag];
+ }
+
+ return NULL;
+}
enum {
BLK_MQ_UNIQUE_TAG_BITS = 16,
@@ -227,20 +817,115 @@ static inline u16 blk_mq_unique_tag_to_tag(u32 unique_tag)
return unique_tag & BLK_MQ_UNIQUE_TAG_MASK;
}
+/**
+ * blk_mq_rq_state() - read the current MQ_RQ_* state of a request
+ * @rq: target request.
+ */
+static inline enum mq_rq_state blk_mq_rq_state(struct request *rq)
+{
+ return READ_ONCE(rq->state);
+}
+
+static inline int blk_mq_request_started(struct request *rq)
+{
+ return blk_mq_rq_state(rq) != MQ_RQ_IDLE;
+}
+
+static inline int blk_mq_request_completed(struct request *rq)
+{
+ return blk_mq_rq_state(rq) == MQ_RQ_COMPLETE;
+}
+
+/*
+ *
+ * Set the state to complete when completing a request from inside ->queue_rq.
+ * This is used by drivers that want to ensure special complete actions that
+ * need access to the request are called on failure, e.g. by nvme for
+ * multipathing.
+ */
+static inline void blk_mq_set_request_complete(struct request *rq)
+{
+ WRITE_ONCE(rq->state, MQ_RQ_COMPLETE);
+}
+
+/*
+ * Complete the request directly instead of deferring it to softirq or
+ * completing it another CPU. Useful in preemptible instead of an interrupt.
+ */
+static inline void blk_mq_complete_request_direct(struct request *rq,
+ void (*complete)(struct request *rq))
+{
+ WRITE_ONCE(rq->state, MQ_RQ_COMPLETE);
+ complete(rq);
+}
-int blk_mq_request_started(struct request *rq);
void blk_mq_start_request(struct request *rq);
void blk_mq_end_request(struct request *rq, blk_status_t error);
void __blk_mq_end_request(struct request *rq, blk_status_t error);
+void blk_mq_end_request_batch(struct io_comp_batch *ib);
+
+/*
+ * Only need start/end time stamping if we have iostat or
+ * blk stats enabled, or using an IO scheduler.
+ */
+static inline bool blk_mq_need_time_stamp(struct request *rq)
+{
+ return (rq->rq_flags & (RQF_IO_STAT | RQF_STATS | RQF_USE_SCHED));
+}
+
+static inline bool blk_mq_is_reserved_rq(struct request *rq)
+{
+ return rq->rq_flags & RQF_RESV;
+}
+
+/**
+ * blk_mq_add_to_batch() - add a request to the completion batch
+ * @req: The request to add to batch
+ * @iob: The batch to add the request
+ * @is_error: Specify true if the request failed with an error
+ * @complete: The completaion handler for the request
+ *
+ * Batched completions only work when there is no I/O error and no special
+ * ->end_io handler.
+ *
+ * Return: true when the request was added to the batch, otherwise false
+ */
+static inline bool blk_mq_add_to_batch(struct request *req,
+ struct io_comp_batch *iob, bool is_error,
+ void (*complete)(struct io_comp_batch *))
+{
+ /*
+ * Check various conditions that exclude batch processing:
+ * 1) No batch container
+ * 2) Has scheduler data attached
+ * 3) Not a passthrough request and end_io set
+ * 4) Not a passthrough request and failed with an error
+ */
+ if (!iob)
+ return false;
+ if (req->rq_flags & RQF_SCHED_TAGS)
+ return false;
+ if (!blk_rq_is_passthrough(req)) {
+ if (req->end_io)
+ return false;
+ if (is_error)
+ return false;
+ }
+
+ if (!iob->complete)
+ iob->complete = complete;
+ else if (iob->complete != complete)
+ return false;
+ iob->need_ts |= blk_mq_need_time_stamp(req);
+ rq_list_add_tail(&iob->req_list, req);
+ return true;
+}
void blk_mq_requeue_request(struct request *rq, bool kick_requeue_list);
-void blk_mq_add_to_requeue_list(struct request *rq, bool at_head,
- bool kick_requeue_list);
void blk_mq_kick_requeue_list(struct request_queue *q);
void blk_mq_delay_kick_requeue_list(struct request_queue *q, unsigned long msecs);
void blk_mq_complete_request(struct request *rq);
-
-bool blk_mq_queue_stopped(struct request_queue *q);
+bool blk_mq_complete_request_remote(struct request *rq);
void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx);
void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx);
void blk_mq_stop_hw_queues(struct request_queue *q);
@@ -248,45 +933,312 @@ void blk_mq_start_hw_queues(struct request_queue *q);
void blk_mq_start_stopped_hw_queue(struct blk_mq_hw_ctx *hctx, bool async);
void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async);
void blk_mq_quiesce_queue(struct request_queue *q);
+void blk_mq_wait_quiesce_done(struct blk_mq_tag_set *set);
+void blk_mq_quiesce_tagset(struct blk_mq_tag_set *set);
+void blk_mq_unquiesce_tagset(struct blk_mq_tag_set *set);
void blk_mq_unquiesce_queue(struct request_queue *q);
void blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs);
void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async);
void blk_mq_run_hw_queues(struct request_queue *q, bool async);
-void blk_mq_delay_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs);
+void blk_mq_delay_run_hw_queues(struct request_queue *q, unsigned long msecs);
void blk_mq_tagset_busy_iter(struct blk_mq_tag_set *tagset,
busy_tag_iter_fn *fn, void *priv);
-void blk_mq_freeze_queue(struct request_queue *q);
-void blk_mq_unfreeze_queue(struct request_queue *q);
+void blk_mq_tagset_wait_completed_request(struct blk_mq_tag_set *tagset);
+void blk_mq_freeze_queue_nomemsave(struct request_queue *q);
+void blk_mq_unfreeze_queue_nomemrestore(struct request_queue *q);
+static inline unsigned int __must_check
+blk_mq_freeze_queue(struct request_queue *q)
+{
+ unsigned int memflags = memalloc_noio_save();
+
+ blk_mq_freeze_queue_nomemsave(q);
+ return memflags;
+}
+static inline void
+blk_mq_unfreeze_queue(struct request_queue *q, unsigned int memflags)
+{
+ blk_mq_unfreeze_queue_nomemrestore(q);
+ memalloc_noio_restore(memflags);
+}
void blk_freeze_queue_start(struct request_queue *q);
void blk_mq_freeze_queue_wait(struct request_queue *q);
int blk_mq_freeze_queue_wait_timeout(struct request_queue *q,
unsigned long timeout);
-int blk_mq_reinit_tagset(struct blk_mq_tag_set *set);
+void blk_mq_unfreeze_queue_non_owner(struct request_queue *q);
+void blk_freeze_queue_start_non_owner(struct request_queue *q);
-int blk_mq_map_queues(struct blk_mq_tag_set *set);
+unsigned int blk_mq_num_possible_queues(unsigned int max_queues);
+unsigned int blk_mq_num_online_queues(unsigned int max_queues);
+void blk_mq_map_queues(struct blk_mq_queue_map *qmap);
+void blk_mq_map_hw_queues(struct blk_mq_queue_map *qmap,
+ struct device *dev, unsigned int offset);
void blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, int nr_hw_queues);
void blk_mq_quiesce_queue_nowait(struct request_queue *q);
-/*
+unsigned int blk_mq_rq_cpu(struct request *rq);
+
+bool __blk_should_fake_timeout(struct request_queue *q);
+static inline bool blk_should_fake_timeout(struct request_queue *q)
+{
+ if (IS_ENABLED(CONFIG_FAIL_IO_TIMEOUT) &&
+ test_bit(QUEUE_FLAG_FAIL_IO, &q->queue_flags))
+ return __blk_should_fake_timeout(q);
+ return false;
+}
+
+/**
+ * blk_mq_rq_from_pdu - cast a PDU to a request
+ * @pdu: the PDU (Protocol Data Unit) to be casted
+ *
+ * Return: request
+ *
* Driver command data is immediately after the request. So subtract request
- * size to get back to the original request, add request size to get the PDU.
+ * size to get back to the original request.
*/
static inline struct request *blk_mq_rq_from_pdu(void *pdu)
{
return pdu - sizeof(struct request);
}
+
+/**
+ * blk_mq_rq_to_pdu - cast a request to a PDU
+ * @rq: the request to be casted
+ *
+ * Return: pointer to the PDU
+ *
+ * Driver command data is immediately after the request. So add request to get
+ * the PDU.
+ */
static inline void *blk_mq_rq_to_pdu(struct request *rq)
{
return rq + 1;
}
+static inline struct blk_mq_hw_ctx *queue_hctx(struct request_queue *q, int id)
+{
+ struct blk_mq_hw_ctx *hctx;
+
+ rcu_read_lock();
+ hctx = rcu_dereference(q->queue_hw_ctx)[id];
+ rcu_read_unlock();
+
+ return hctx;
+}
+
#define queue_for_each_hw_ctx(q, hctx, i) \
for ((i) = 0; (i) < (q)->nr_hw_queues && \
- ({ hctx = (q)->queue_hw_ctx[i]; 1; }); (i)++)
+ ({ hctx = queue_hctx((q), i); 1; }); (i)++)
#define hctx_for_each_ctx(hctx, ctx, i) \
for ((i) = 0; (i) < (hctx)->nr_ctx && \
({ ctx = (hctx)->ctxs[(i)]; 1; }); (i)++)
-#endif
+static inline void blk_mq_cleanup_rq(struct request *rq)
+{
+ if (rq->q->mq_ops->cleanup_rq)
+ rq->q->mq_ops->cleanup_rq(rq);
+}
+
+void blk_mq_hctx_set_fq_lock_class(struct blk_mq_hw_ctx *hctx,
+ struct lock_class_key *key);
+
+static inline bool rq_is_sync(struct request *rq)
+{
+ return op_is_sync(rq->cmd_flags);
+}
+
+void blk_rq_init(struct request_queue *q, struct request *rq);
+int blk_rq_prep_clone(struct request *rq, struct request *rq_src,
+ struct bio_set *bs, gfp_t gfp_mask,
+ int (*bio_ctr)(struct bio *, struct bio *, void *), void *data);
+void blk_rq_unprep_clone(struct request *rq);
+blk_status_t blk_insert_cloned_request(struct request *rq);
+
+struct rq_map_data {
+ struct page **pages;
+ unsigned long offset;
+ unsigned short page_order;
+ unsigned short nr_entries;
+ bool null_mapped;
+ bool from_user;
+};
+
+int blk_rq_map_user(struct request_queue *, struct request *,
+ struct rq_map_data *, void __user *, unsigned long, gfp_t);
+int blk_rq_map_user_io(struct request *, struct rq_map_data *,
+ void __user *, unsigned long, gfp_t, bool, int, bool, int);
+int blk_rq_map_user_iov(struct request_queue *, struct request *,
+ struct rq_map_data *, const struct iov_iter *, gfp_t);
+int blk_rq_unmap_user(struct bio *);
+int blk_rq_map_kern(struct request *rq, void *kbuf, unsigned int len,
+ gfp_t gfp);
+int blk_rq_append_bio(struct request *rq, struct bio *bio);
+void blk_execute_rq_nowait(struct request *rq, bool at_head);
+blk_status_t blk_execute_rq(struct request *rq, bool at_head);
+bool blk_rq_is_poll(struct request *rq);
+
+struct req_iterator {
+ struct bvec_iter iter;
+ struct bio *bio;
+};
+
+#define __rq_for_each_bio(_bio, rq) \
+ if ((rq->bio)) \
+ for (_bio = (rq)->bio; _bio; _bio = _bio->bi_next)
+
+#define rq_for_each_segment(bvl, _rq, _iter) \
+ __rq_for_each_bio(_iter.bio, _rq) \
+ bio_for_each_segment(bvl, _iter.bio, _iter.iter)
+
+#define rq_for_each_bvec(bvl, _rq, _iter) \
+ __rq_for_each_bio(_iter.bio, _rq) \
+ bio_for_each_bvec(bvl, _iter.bio, _iter.iter)
+
+#define rq_iter_last(bvec, _iter) \
+ (_iter.bio->bi_next == NULL && \
+ bio_iter_last(bvec, _iter.iter))
+
+/*
+ * blk_rq_pos() : the current sector
+ * blk_rq_bytes() : bytes left in the entire request
+ * blk_rq_cur_bytes() : bytes left in the current segment
+ * blk_rq_sectors() : sectors left in the entire request
+ * blk_rq_cur_sectors() : sectors left in the current segment
+ * blk_rq_stats_sectors() : sectors of the entire request used for stats
+ */
+static inline sector_t blk_rq_pos(const struct request *rq)
+{
+ return rq->__sector;
+}
+
+static inline unsigned int blk_rq_bytes(const struct request *rq)
+{
+ return rq->__data_len;
+}
+
+static inline int blk_rq_cur_bytes(const struct request *rq)
+{
+ if (!rq->bio)
+ return 0;
+ if (!bio_has_data(rq->bio)) /* dataless requests such as discard */
+ return rq->bio->bi_iter.bi_size;
+ return bio_iovec(rq->bio).bv_len;
+}
+
+static inline unsigned int blk_rq_sectors(const struct request *rq)
+{
+ return blk_rq_bytes(rq) >> SECTOR_SHIFT;
+}
+
+static inline unsigned int blk_rq_cur_sectors(const struct request *rq)
+{
+ return blk_rq_cur_bytes(rq) >> SECTOR_SHIFT;
+}
+
+static inline unsigned int blk_rq_stats_sectors(const struct request *rq)
+{
+ return rq->stats_sectors;
+}
+
+/*
+ * Some commands like WRITE SAME have a payload or data transfer size which
+ * is different from the size of the request. Any driver that supports such
+ * commands using the RQF_SPECIAL_PAYLOAD flag needs to use this helper to
+ * calculate the data transfer size.
+ */
+static inline unsigned int blk_rq_payload_bytes(struct request *rq)
+{
+ if (rq->rq_flags & RQF_SPECIAL_PAYLOAD)
+ return rq->special_vec.bv_len;
+ return blk_rq_bytes(rq);
+}
+
+/*
+ * Return the first full biovec in the request. The caller needs to check that
+ * there are any bvecs before calling this helper.
+ */
+static inline struct bio_vec req_bvec(struct request *rq)
+{
+ if (rq->rq_flags & RQF_SPECIAL_PAYLOAD)
+ return rq->special_vec;
+ return mp_bvec_iter_bvec(rq->bio->bi_io_vec, rq->bio->bi_iter);
+}
+
+static inline unsigned int blk_rq_count_bios(struct request *rq)
+{
+ unsigned int nr_bios = 0;
+ struct bio *bio;
+
+ __rq_for_each_bio(bio, rq)
+ nr_bios++;
+
+ return nr_bios;
+}
+
+void blk_steal_bios(struct bio_list *list, struct request *rq);
+
+/*
+ * Request completion related functions.
+ *
+ * blk_update_request() completes given number of bytes and updates
+ * the request without completing it.
+ */
+bool blk_update_request(struct request *rq, blk_status_t error,
+ unsigned int nr_bytes);
+void blk_abort_request(struct request *);
+
+/*
+ * Number of physical segments as sent to the device.
+ *
+ * Normally this is the number of discontiguous data segments sent by the
+ * submitter. But for data-less command like discard we might have no
+ * actual data segments submitted, but the driver might have to add it's
+ * own special payload. In that case we still return 1 here so that this
+ * special payload will be mapped.
+ */
+static inline unsigned short blk_rq_nr_phys_segments(struct request *rq)
+{
+ if (rq->rq_flags & RQF_SPECIAL_PAYLOAD)
+ return 1;
+ return rq->nr_phys_segments;
+}
+
+/*
+ * Number of discard segments (or ranges) the driver needs to fill in.
+ * Each discard bio merged into a request is counted as one segment.
+ */
+static inline unsigned short blk_rq_nr_discard_segments(struct request *rq)
+{
+ return max_t(unsigned short, rq->nr_phys_segments, 1);
+}
+
+/**
+ * blk_rq_nr_bvec - return number of bvecs in a request
+ * @rq: request to calculate bvecs for
+ *
+ * Returns the number of bvecs.
+ */
+static inline unsigned int blk_rq_nr_bvec(struct request *rq)
+{
+ struct req_iterator rq_iter;
+ struct bio_vec bv;
+ unsigned int nr_bvec = 0;
+
+ rq_for_each_bvec(bv, rq, rq_iter)
+ nr_bvec++;
+
+ return nr_bvec;
+}
+
+int __blk_rq_map_sg(struct request *rq, struct scatterlist *sglist,
+ struct scatterlist **last_sg);
+static inline int blk_rq_map_sg(struct request *rq, struct scatterlist *sglist)
+{
+ struct scatterlist *last_sg = NULL;
+
+ return __blk_rq_map_sg(rq, sglist, &last_sg);
+}
+void blk_dump_rq_flags(struct request *, char *);
+
+#endif /* BLK_MQ_H */
diff --git a/include/linux/blk-pm.h b/include/linux/blk-pm.h
new file mode 100644
index 000000000000..004b38a538ff
--- /dev/null
+++ b/include/linux/blk-pm.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef _BLK_PM_H_
+#define _BLK_PM_H_
+
+struct device;
+struct request_queue;
+
+/*
+ * block layer runtime pm functions
+ */
+#ifdef CONFIG_PM
+extern void blk_pm_runtime_init(struct request_queue *q, struct device *dev);
+extern int blk_pre_runtime_suspend(struct request_queue *q);
+extern void blk_post_runtime_suspend(struct request_queue *q, int err);
+extern void blk_pre_runtime_resume(struct request_queue *q);
+extern void blk_post_runtime_resume(struct request_queue *q);
+#else
+static inline void blk_pm_runtime_init(struct request_queue *q,
+ struct device *dev) {}
+#endif
+
+#endif /* _BLK_PM_H_ */
diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h
index d2eb87c84d82..5dc061d318a4 100644
--- a/include/linux/blk_types.h
+++ b/include/linux/blk_types.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Block data types and constants. Directly include this file only to
* break include dependency loop.
@@ -7,27 +8,100 @@
#include <linux/types.h>
#include <linux/bvec.h>
+#include <linux/device.h>
+#include <linux/ktime.h>
+#include <linux/rw_hint.h>
struct bio_set;
struct bio;
struct bio_integrity_payload;
struct page;
-struct block_device;
struct io_context;
struct cgroup_subsys_state;
typedef void (bio_end_io_t) (struct bio *);
+struct bio_crypt_ctx;
+
+/*
+ * The basic unit of block I/O is a sector. It is used in a number of contexts
+ * in Linux (blk, bio, genhd). The size of one sector is 512 = 2**9
+ * bytes. Variables of type sector_t represent an offset or size that is a
+ * multiple of 512 bytes. Hence these two constants.
+ */
+#ifndef SECTOR_SHIFT
+#define SECTOR_SHIFT 9
+#endif
+#ifndef SECTOR_SIZE
+#define SECTOR_SIZE (1 << SECTOR_SHIFT)
+#endif
+
+#define PAGE_SECTORS_SHIFT (PAGE_SHIFT - SECTOR_SHIFT)
+#define PAGE_SECTORS (1 << PAGE_SECTORS_SHIFT)
+#define SECTOR_MASK (PAGE_SECTORS - 1)
+
+struct block_device {
+ sector_t bd_start_sect;
+ sector_t bd_nr_sectors;
+ struct gendisk * bd_disk;
+ struct request_queue * bd_queue;
+ struct disk_stats __percpu *bd_stats;
+ unsigned long bd_stamp;
+ atomic_t __bd_flags; // partition number + flags
+#define BD_PARTNO 255 // lower 8 bits; assign-once
+#define BD_READ_ONLY (1u<<8) // read-only policy
+#define BD_WRITE_HOLDER (1u<<9)
+#define BD_HAS_SUBMIT_BIO (1u<<10)
+#define BD_RO_WARNED (1u<<11)
+#ifdef CONFIG_FAIL_MAKE_REQUEST
+#define BD_MAKE_IT_FAIL (1u<<12)
+#endif
+ dev_t bd_dev;
+ struct address_space *bd_mapping; /* page cache */
+
+ atomic_t bd_openers;
+ spinlock_t bd_size_lock; /* for bd_inode->i_size updates */
+ void * bd_claiming;
+ void * bd_holder;
+ const struct blk_holder_ops *bd_holder_ops;
+ struct mutex bd_holder_lock;
+ int bd_holders;
+ struct kobject *bd_holder_dir;
+
+ atomic_t bd_fsfreeze_count; /* number of freeze requests */
+ struct mutex bd_fsfreeze_mutex; /* serialize freeze/thaw */
+
+ struct partition_meta_info *bd_meta_info;
+ int bd_writers;
+#ifdef CONFIG_SECURITY
+ void *bd_security;
+#endif
+ /*
+ * keep this out-of-line as it's both big and not needed in the fast
+ * path
+ */
+ struct device bd_device;
+} __randomize_layout;
+
+#define bdev_whole(_bdev) \
+ ((_bdev)->bd_disk->part0)
+
+#define dev_to_bdev(device) \
+ container_of((device), struct block_device, bd_device)
+
+#define bdev_kobj(_bdev) \
+ (&((_bdev)->bd_device.kobj))
/*
* Block error status values. See block/blk-core:blk_errors for the details.
*/
typedef u8 __bitwise blk_status_t;
+typedef u16 blk_short_t;
#define BLK_STS_OK 0
#define BLK_STS_NOTSUPP ((__force blk_status_t)1)
#define BLK_STS_TIMEOUT ((__force blk_status_t)2)
#define BLK_STS_NOSPC ((__force blk_status_t)3)
#define BLK_STS_TRANSPORT ((__force blk_status_t)4)
#define BLK_STS_TARGET ((__force blk_status_t)5)
-#define BLK_STS_NEXUS ((__force blk_status_t)6)
+#define BLK_STS_RESV_CONFLICT ((__force blk_status_t)6)
#define BLK_STS_MEDIUM ((__force blk_status_t)7)
#define BLK_STS_PROTECTION ((__force blk_status_t)8)
#define BLK_STS_RESOURCE ((__force blk_status_t)9)
@@ -36,11 +110,98 @@ typedef u8 __bitwise blk_status_t;
/* hack for device mapper, don't use elsewhere: */
#define BLK_STS_DM_REQUEUE ((__force blk_status_t)11)
+/*
+ * BLK_STS_AGAIN should only be returned if RQF_NOWAIT is set
+ * and the bio would block (cf bio_wouldblock_error())
+ */
#define BLK_STS_AGAIN ((__force blk_status_t)12)
-struct blk_issue_stat {
- u64 stat;
-};
+/*
+ * BLK_STS_DEV_RESOURCE is returned from the driver to the block layer if
+ * device related resources are unavailable, but the driver can guarantee
+ * that the queue will be rerun in the future once resources become
+ * available again. This is typically the case for device specific
+ * resources that are consumed for IO. If the driver fails allocating these
+ * resources, we know that inflight (or pending) IO will free these
+ * resource upon completion.
+ *
+ * This is different from BLK_STS_RESOURCE in that it explicitly references
+ * a device specific resource. For resources of wider scope, allocation
+ * failure can happen without having pending IO. This means that we can't
+ * rely on request completions freeing these resources, as IO may not be in
+ * flight. Examples of that are kernel memory allocations, DMA mappings, or
+ * any other system wide resources.
+ */
+#define BLK_STS_DEV_RESOURCE ((__force blk_status_t)13)
+
+/*
+ * BLK_STS_ZONE_OPEN_RESOURCE is returned from the driver in the completion
+ * path if the device returns a status indicating that too many zone resources
+ * are currently open. The same command should be successful if resubmitted
+ * after the number of open zones decreases below the device's limits, which is
+ * reported in the request_queue's max_open_zones.
+ */
+#define BLK_STS_ZONE_OPEN_RESOURCE ((__force blk_status_t)14)
+
+/*
+ * BLK_STS_ZONE_ACTIVE_RESOURCE is returned from the driver in the completion
+ * path if the device returns a status indicating that too many zone resources
+ * are currently active. The same command should be successful if resubmitted
+ * after the number of active zones decreases below the device's limits, which
+ * is reported in the request_queue's max_active_zones.
+ */
+#define BLK_STS_ZONE_ACTIVE_RESOURCE ((__force blk_status_t)15)
+
+/*
+ * BLK_STS_OFFLINE is returned from the driver when the target device is offline
+ * or is being taken offline. This could help differentiate the case where a
+ * device is intentionally being shut down from a real I/O error.
+ */
+#define BLK_STS_OFFLINE ((__force blk_status_t)16)
+
+/*
+ * BLK_STS_DURATION_LIMIT is returned from the driver when the target device
+ * aborted the command because it exceeded one of its Command Duration Limits.
+ */
+#define BLK_STS_DURATION_LIMIT ((__force blk_status_t)17)
+
+/*
+ * Invalid size or alignment.
+ */
+#define BLK_STS_INVAL ((__force blk_status_t)19)
+
+/**
+ * blk_path_error - returns true if error may be path related
+ * @error: status the request was completed with
+ *
+ * Description:
+ * This classifies block error status into non-retryable errors and ones
+ * that may be successful if retried on a failover path.
+ *
+ * Return:
+ * %false - retrying failover path will not help
+ * %true - may succeed if retried
+ */
+static inline bool blk_path_error(blk_status_t error)
+{
+ switch (error) {
+ case BLK_STS_NOTSUPP:
+ case BLK_STS_NOSPC:
+ case BLK_STS_TARGET:
+ case BLK_STS_RESV_CONFLICT:
+ case BLK_STS_MEDIUM:
+ case BLK_STS_PROTECTION:
+ return false;
+ }
+
+ /* Anything else could be a path failure, so should be retried */
+ return true;
+}
+
+typedef __u32 __bitwise blk_opf_t;
+
+typedef unsigned int blk_qc_t;
+#define BLK_QC_T_NONE -1U
/*
* main unit of I/O for the block layer and lower layers (ie drivers and
@@ -49,51 +210,60 @@ struct blk_issue_stat {
struct bio {
struct bio *bi_next; /* request queue link */
struct block_device *bi_bdev;
- blk_status_t bi_status;
- unsigned int bi_opf; /* bottom bits req flags,
- * top bits REQ_OP. Use
- * accessors.
+ blk_opf_t bi_opf; /* bottom bits REQ_OP, top bits
+ * req_flags.
*/
- unsigned short bi_flags; /* status, etc and bvec pool number */
+ unsigned short bi_flags; /* BIO_* below */
unsigned short bi_ioprio;
- unsigned short bi_write_hint;
-
- struct bvec_iter bi_iter;
-
- /* Number of segments in this BIO after
- * physical address coalescing is performed.
- */
- unsigned int bi_phys_segments;
+ enum rw_hint bi_write_hint;
+ u8 bi_write_stream;
+ blk_status_t bi_status;
/*
- * To keep track of the max segment size, we account for the
- * sizes of the first and last mergeable segments in this bio.
+ * The bvec gap bit indicates the lowest set bit in any address offset
+ * between all bi_io_vecs. This field is initialized only after the bio
+ * is split to the hardware limits (see bio_split_io_at()). The value
+ * may be used to consider DMA optimization when performing that
+ * mapping. The value is compared to a power of two mask where the
+ * result depends on any bit set within the mask, so saving the lowest
+ * bit is sufficient to know if any segment gap collides with the mask.
*/
- unsigned int bi_seg_front_size;
- unsigned int bi_seg_back_size;
+ u8 bi_bvec_gap_bit;
atomic_t __bi_remaining;
- bio_end_io_t *bi_end_io;
+ struct bvec_iter bi_iter;
+ union {
+ /* for polled bios: */
+ blk_qc_t bi_cookie;
+ /* for plugged zoned writes only: */
+ unsigned int __bi_nr_segments;
+ };
+ bio_end_io_t *bi_end_io;
void *bi_private;
#ifdef CONFIG_BLK_CGROUP
/*
- * Optional ioc and css associated with this bio. Put on bio
- * release. Read comment on top of bio_associate_current().
+ * Represents the association of the css and request_queue for the bio.
+ * If a bio goes direct to device, it will not have a blkg as it will
+ * not have a request_queue associated with it. The reference is put
+ * on release of the bio.
*/
- struct io_context *bi_ioc;
- struct cgroup_subsys_state *bi_css;
-#ifdef CONFIG_BLK_DEV_THROTTLING_LOW
- void *bi_cg_private;
- struct blk_issue_stat bi_issue_stat;
+ struct blkcg_gq *bi_blkg;
+ /* Time that this bio was issued. */
+ u64 issue_time_ns;
+#ifdef CONFIG_BLK_CGROUP_IOCOST
+ u64 bi_iocost_cost;
#endif
#endif
- union {
+
+#ifdef CONFIG_BLK_INLINE_ENCRYPTION
+ struct bio_crypt_ctx *bi_crypt_context;
+#endif
+
#if defined(CONFIG_BLK_DEV_INTEGRITY)
- struct bio_integrity_payload *bi_integrity; /* data integrity */
+ struct bio_integrity_payload *bi_integrity; /* data integrity */
#endif
- };
unsigned short bi_vcnt; /* how many bio_vec's */
@@ -108,61 +278,54 @@ struct bio {
struct bio_vec *bi_io_vec; /* the actual vec list */
struct bio_set *bi_pool;
-
- /*
- * We can inline a number of vecs at the end of the bio, to avoid
- * double allocations for a small number of bio_vecs. This member
- * MUST obviously be kept at the very end of the bio.
- */
- struct bio_vec bi_inline_vecs[0];
};
#define BIO_RESET_BYTES offsetof(struct bio, bi_max_vecs)
+#define BIO_MAX_SECTORS (UINT_MAX >> SECTOR_SHIFT)
+
+static inline struct bio_vec *bio_inline_vecs(struct bio *bio)
+{
+ return (struct bio_vec *)(bio + 1);
+}
/*
* bio flags
*/
-#define BIO_SEG_VALID 1 /* bi_phys_segments valid */
-#define BIO_CLONED 2 /* doesn't own data */
-#define BIO_BOUNCED 3 /* bio is a bounce bio */
-#define BIO_USER_MAPPED 4 /* contains user pages */
-#define BIO_NULL_MAPPED 5 /* contains invalid user pages */
-#define BIO_QUIET 6 /* Make BIO Quiet */
-#define BIO_CHAIN 7 /* chained bio, ->bi_remaining in effect */
-#define BIO_REFFED 8 /* bio has elevated ->bi_cnt */
-#define BIO_THROTTLED 9 /* This bio has already been subjected to
+enum {
+ BIO_PAGE_PINNED, /* Unpin pages in bio_release_pages() */
+ BIO_CLONED, /* doesn't own data */
+ BIO_QUIET, /* Make BIO Quiet */
+ BIO_CHAIN, /* chained bio, ->bi_remaining in effect */
+ BIO_REFFED, /* bio has elevated ->bi_cnt */
+ BIO_BPS_THROTTLED, /* This bio has already been subjected to
* throttling rules. Don't do it again. */
-#define BIO_TRACE_COMPLETION 10 /* bio_endio() should trace the final completion
+ BIO_TRACE_COMPLETION, /* bio_endio() should trace the final completion
* of this bio. */
-/* See BVEC_POOL_OFFSET below before adding new flags */
+ BIO_CGROUP_ACCT, /* has been accounted to a cgroup */
+ BIO_QOS_THROTTLED, /* bio went through rq_qos throttle path */
+ /*
+ * This bio has completed bps throttling at the single tg granularity,
+ * which is different from BIO_BPS_THROTTLED. When the bio is enqueued
+ * into the sq->queued of the upper tg, or is about to be dispatched,
+ * this flag needs to be cleared. Since blk-throttle and rq_qos are not
+ * on the same hierarchical level, reuse the value.
+ */
+ BIO_TG_BPS_THROTTLED = BIO_QOS_THROTTLED,
+ BIO_QOS_MERGED, /* but went through rq_qos merge path */
+ BIO_REMAPPED,
+ BIO_ZONE_WRITE_PLUGGING, /* bio handled through zone write plugging */
+ BIO_EMULATES_ZONE_APPEND, /* bio emulates a zone append operation */
+ BIO_FLAG_LAST
+};
-/*
- * We support 6 different bvec pools, the last one is magic in that it
- * is backed by a mempool.
- */
-#define BVEC_POOL_NR 6
-#define BVEC_POOL_MAX (BVEC_POOL_NR - 1)
+typedef __u32 __bitwise blk_mq_req_flags_t;
-/*
- * Top 3 bits of bio flags indicate the pool the bvecs came from. We add
- * 1 to the actual index so that 0 indicates that there are no bvecs to be
- * freed.
- */
-#define BVEC_POOL_BITS (3)
-#define BVEC_POOL_OFFSET (16 - BVEC_POOL_BITS)
-#define BVEC_POOL_IDX(bio) ((bio)->bi_flags >> BVEC_POOL_OFFSET)
-#if (1<< BVEC_POOL_BITS) < (BVEC_POOL_NR+1)
-# error "BVEC_POOL_BITS is too small"
-#endif
-
-/*
- * Flags starting here get preserved by bio_reset() - this includes
- * only BVEC_POOL_IDX()
- */
-#define BIO_RESET_BITS BVEC_POOL_OFFSET
+#define REQ_OP_BITS 8
+#define REQ_OP_MASK (__force blk_opf_t)((1 << REQ_OP_BITS) - 1)
+#define REQ_FLAG_BITS 24
-/*
- * Operations and flags common to the bio and request structures.
+/**
+ * enum req_op - Operations common to the bio and request structures.
* We use 8 bits for encoding the operation, and the remaining 24 for flags.
*
* The least significant bit of the operation number indicates the data
@@ -174,40 +337,40 @@ struct bio {
* If a operation does not transfer data the least significant bit has no
* meaning.
*/
-#define REQ_OP_BITS 8
-#define REQ_OP_MASK ((1 << REQ_OP_BITS) - 1)
-#define REQ_FLAG_BITS 24
-
-enum req_opf {
+enum req_op {
/* read sectors from the device */
- REQ_OP_READ = 0,
+ REQ_OP_READ = (__force blk_opf_t)0,
/* write sectors to the device */
- REQ_OP_WRITE = 1,
+ REQ_OP_WRITE = (__force blk_opf_t)1,
/* flush the volatile write cache */
- REQ_OP_FLUSH = 2,
+ REQ_OP_FLUSH = (__force blk_opf_t)2,
/* discard sectors */
- REQ_OP_DISCARD = 3,
- /* get zone information */
- REQ_OP_ZONE_REPORT = 4,
+ REQ_OP_DISCARD = (__force blk_opf_t)3,
/* securely erase sectors */
- REQ_OP_SECURE_ERASE = 5,
- /* seset a zone write pointer */
- REQ_OP_ZONE_RESET = 6,
- /* write the same sector many times */
- REQ_OP_WRITE_SAME = 7,
+ REQ_OP_SECURE_ERASE = (__force blk_opf_t)5,
+ /* write data at the current zone write pointer */
+ REQ_OP_ZONE_APPEND = (__force blk_opf_t)7,
/* write the zero filled sector many times */
- REQ_OP_WRITE_ZEROES = 9,
+ REQ_OP_WRITE_ZEROES = (__force blk_opf_t)9,
+ /* Open a zone */
+ REQ_OP_ZONE_OPEN = (__force blk_opf_t)11,
+ /* Close a zone */
+ REQ_OP_ZONE_CLOSE = (__force blk_opf_t)13,
+ /* Transition a zone to full */
+ REQ_OP_ZONE_FINISH = (__force blk_opf_t)15,
+ /* reset a zone write pointer */
+ REQ_OP_ZONE_RESET = (__force blk_opf_t)17,
+ /* reset all the zone present on the device */
+ REQ_OP_ZONE_RESET_ALL = (__force blk_opf_t)19,
- /* SCSI passthrough using struct scsi_request */
- REQ_OP_SCSI_IN = 32,
- REQ_OP_SCSI_OUT = 33,
/* Driver private requests */
- REQ_OP_DRV_IN = 34,
- REQ_OP_DRV_OUT = 35,
+ REQ_OP_DRV_IN = (__force blk_opf_t)34,
+ REQ_OP_DRV_OUT = (__force blk_opf_t)35,
- REQ_OP_LAST,
+ REQ_OP_LAST = (__force blk_opf_t)36,
};
+/* Keep cmd_flag_name[] in sync with the definitions below */
enum req_flag_bits {
__REQ_FAILFAST_DEV = /* no driver retries of device errors */
REQ_OP_BITS,
@@ -223,30 +386,47 @@ enum req_flag_bits {
__REQ_PREFLUSH, /* request for cache flush */
__REQ_RAHEAD, /* read ahead, can fail anytime */
__REQ_BACKGROUND, /* background IO */
-
- /* command specific flags for REQ_OP_WRITE_ZEROES: */
+ __REQ_NOWAIT, /* Don't wait if request will block */
+ __REQ_POLLED, /* caller polls for completion using bio_poll */
+ __REQ_ALLOC_CACHE, /* allocate IO from cache if available */
+ __REQ_SWAP, /* swap I/O */
+ __REQ_DRV, /* for driver use */
+ __REQ_FS_PRIVATE, /* for file system (submitter) use */
+ __REQ_ATOMIC, /* for atomic write operations */
+ /*
+ * Command specific flags, keep last:
+ */
+ /* for REQ_OP_WRITE_ZEROES: */
__REQ_NOUNMAP, /* do not free blocks when zeroing */
- __REQ_NOWAIT, /* Don't wait if request will block */
__REQ_NR_BITS, /* stops here */
};
-#define REQ_FAILFAST_DEV (1ULL << __REQ_FAILFAST_DEV)
-#define REQ_FAILFAST_TRANSPORT (1ULL << __REQ_FAILFAST_TRANSPORT)
-#define REQ_FAILFAST_DRIVER (1ULL << __REQ_FAILFAST_DRIVER)
-#define REQ_SYNC (1ULL << __REQ_SYNC)
-#define REQ_META (1ULL << __REQ_META)
-#define REQ_PRIO (1ULL << __REQ_PRIO)
-#define REQ_NOMERGE (1ULL << __REQ_NOMERGE)
-#define REQ_IDLE (1ULL << __REQ_IDLE)
-#define REQ_INTEGRITY (1ULL << __REQ_INTEGRITY)
-#define REQ_FUA (1ULL << __REQ_FUA)
-#define REQ_PREFLUSH (1ULL << __REQ_PREFLUSH)
-#define REQ_RAHEAD (1ULL << __REQ_RAHEAD)
-#define REQ_BACKGROUND (1ULL << __REQ_BACKGROUND)
-
-#define REQ_NOUNMAP (1ULL << __REQ_NOUNMAP)
-#define REQ_NOWAIT (1ULL << __REQ_NOWAIT)
+#define REQ_FAILFAST_DEV \
+ (__force blk_opf_t)(1ULL << __REQ_FAILFAST_DEV)
+#define REQ_FAILFAST_TRANSPORT \
+ (__force blk_opf_t)(1ULL << __REQ_FAILFAST_TRANSPORT)
+#define REQ_FAILFAST_DRIVER \
+ (__force blk_opf_t)(1ULL << __REQ_FAILFAST_DRIVER)
+#define REQ_SYNC (__force blk_opf_t)(1ULL << __REQ_SYNC)
+#define REQ_META (__force blk_opf_t)(1ULL << __REQ_META)
+#define REQ_PRIO (__force blk_opf_t)(1ULL << __REQ_PRIO)
+#define REQ_NOMERGE (__force blk_opf_t)(1ULL << __REQ_NOMERGE)
+#define REQ_IDLE (__force blk_opf_t)(1ULL << __REQ_IDLE)
+#define REQ_INTEGRITY (__force blk_opf_t)(1ULL << __REQ_INTEGRITY)
+#define REQ_FUA (__force blk_opf_t)(1ULL << __REQ_FUA)
+#define REQ_PREFLUSH (__force blk_opf_t)(1ULL << __REQ_PREFLUSH)
+#define REQ_RAHEAD (__force blk_opf_t)(1ULL << __REQ_RAHEAD)
+#define REQ_BACKGROUND (__force blk_opf_t)(1ULL << __REQ_BACKGROUND)
+#define REQ_NOWAIT (__force blk_opf_t)(1ULL << __REQ_NOWAIT)
+#define REQ_POLLED (__force blk_opf_t)(1ULL << __REQ_POLLED)
+#define REQ_ALLOC_CACHE (__force blk_opf_t)(1ULL << __REQ_ALLOC_CACHE)
+#define REQ_SWAP (__force blk_opf_t)(1ULL << __REQ_SWAP)
+#define REQ_DRV (__force blk_opf_t)(1ULL << __REQ_DRV)
+#define REQ_FS_PRIVATE (__force blk_opf_t)(1ULL << __REQ_FS_PRIVATE)
+#define REQ_ATOMIC (__force blk_opf_t)(1ULL << __REQ_ATOMIC)
+
+#define REQ_NOUNMAP (__force blk_opf_t)(1ULL << __REQ_NOUNMAP)
#define REQ_FAILFAST_MASK \
(REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | REQ_FAILFAST_DRIVER)
@@ -254,28 +434,30 @@ enum req_flag_bits {
#define REQ_NOMERGE_FLAGS \
(REQ_NOMERGE | REQ_PREFLUSH | REQ_FUA)
-#define bio_op(bio) \
- ((bio)->bi_opf & REQ_OP_MASK)
-#define req_op(req) \
- ((req)->cmd_flags & REQ_OP_MASK)
+enum stat_group {
+ STAT_READ,
+ STAT_WRITE,
+ STAT_DISCARD,
+ STAT_FLUSH,
-/* obsolete, don't use in new code */
-static inline void bio_set_op_attrs(struct bio *bio, unsigned op,
- unsigned op_flags)
+ NR_STAT_GROUPS
+};
+
+static inline enum req_op bio_op(const struct bio *bio)
{
- bio->bi_opf = op | op_flags;
+ return bio->bi_opf & REQ_OP_MASK;
}
-static inline bool op_is_write(unsigned int op)
+static inline bool op_is_write(blk_opf_t op)
{
- return (op & 1);
+ return !!(op & (__force blk_opf_t)1);
}
/*
* Check if the bio or request is one that needs special treatment in the
* flush state machine.
*/
-static inline bool op_is_flush(unsigned int op)
+static inline bool op_is_flush(blk_opf_t op)
{
return op & (REQ_FUA | REQ_PREFLUSH);
}
@@ -285,54 +467,46 @@ static inline bool op_is_flush(unsigned int op)
* PREFLUSH flag. Other operations may be marked as synchronous using the
* REQ_SYNC flag.
*/
-static inline bool op_is_sync(unsigned int op)
+static inline bool op_is_sync(blk_opf_t op)
{
return (op & REQ_OP_MASK) == REQ_OP_READ ||
(op & (REQ_SYNC | REQ_FUA | REQ_PREFLUSH));
}
-typedef unsigned int blk_qc_t;
-#define BLK_QC_T_NONE -1U
-#define BLK_QC_T_SHIFT 16
-#define BLK_QC_T_INTERNAL (1U << 31)
-
-static inline bool blk_qc_t_valid(blk_qc_t cookie)
-{
- return cookie != BLK_QC_T_NONE;
-}
-
-static inline blk_qc_t blk_tag_to_qc_t(unsigned int tag, unsigned int queue_num,
- bool internal)
-{
- blk_qc_t ret = tag | (queue_num << BLK_QC_T_SHIFT);
-
- if (internal)
- ret |= BLK_QC_T_INTERNAL;
-
- return ret;
-}
-
-static inline unsigned int blk_qc_t_to_queue_num(blk_qc_t cookie)
+static inline bool op_is_discard(blk_opf_t op)
{
- return (cookie & ~BLK_QC_T_INTERNAL) >> BLK_QC_T_SHIFT;
+ return (op & REQ_OP_MASK) == REQ_OP_DISCARD;
}
-static inline unsigned int blk_qc_t_to_tag(blk_qc_t cookie)
+/*
+ * Check if a bio or request operation is a zone management operation.
+ */
+static inline bool op_is_zone_mgmt(enum req_op op)
{
- return cookie & ((1u << BLK_QC_T_SHIFT) - 1);
+ switch (op & REQ_OP_MASK) {
+ case REQ_OP_ZONE_RESET:
+ case REQ_OP_ZONE_RESET_ALL:
+ case REQ_OP_ZONE_OPEN:
+ case REQ_OP_ZONE_CLOSE:
+ case REQ_OP_ZONE_FINISH:
+ return true;
+ default:
+ return false;
+ }
}
-static inline bool blk_qc_t_is_internal(blk_qc_t cookie)
+static inline int op_stat_group(enum req_op op)
{
- return (cookie & BLK_QC_T_INTERNAL) != 0;
+ if (op_is_discard(op))
+ return STAT_DISCARD;
+ return op_is_write(op);
}
struct blk_rq_stat {
- s64 mean;
+ u64 mean;
u64 min;
u64 max;
- s32 nr_samples;
- s32 nr_batch;
+ u32 nr_samples;
u64 batch;
};
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 4b99b13c7e68..72e34acd439c 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -1,321 +1,376 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Portions Copyright (C) 1992 Drew Eckhardt
+ */
#ifndef _LINUX_BLKDEV_H
#define _LINUX_BLKDEV_H
-#include <linux/sched.h>
-#include <linux/sched/clock.h>
-
-#ifdef CONFIG_BLOCK
-
-#include <linux/major.h>
-#include <linux/genhd.h>
+#include <linux/types.h>
+#include <linux/blk_types.h>
+#include <linux/device.h>
#include <linux/list.h>
#include <linux/llist.h>
+#include <linux/minmax.h>
#include <linux/timer.h>
#include <linux/workqueue.h>
-#include <linux/pagemap.h>
-#include <linux/backing-dev-defs.h>
#include <linux/wait.h>
-#include <linux/mempool.h>
-#include <linux/pfn.h>
#include <linux/bio.h>
-#include <linux/stringify.h>
#include <linux/gfp.h>
-#include <linux/bsg.h>
-#include <linux/smp.h>
+#include <linux/kdev_t.h>
#include <linux/rcupdate.h>
#include <linux/percpu-refcount.h>
-#include <linux/scatterlist.h>
#include <linux/blkzoned.h>
+#include <linux/sched.h>
+#include <linux/sbitmap.h>
+#include <linux/uuid.h>
+#include <linux/xarray.h>
+#include <linux/file.h>
+#include <linux/lockdep.h>
struct module;
-struct scsi_ioctl_command;
-
struct request_queue;
struct elevator_queue;
struct blk_trace;
struct request;
struct sg_io_hdr;
-struct bsg_job;
struct blkcg_gq;
struct blk_flush_queue;
+struct kiocb;
struct pr_ops;
-struct rq_wb;
+struct rq_qos;
+struct blk_report_zones_args;
struct blk_queue_stats;
struct blk_stat_callback;
+struct blk_crypto_profile;
-#define BLKDEV_MIN_RQ 4
-#define BLKDEV_MAX_RQ 128 /* Default maximum */
-
-/* Must be consisitent with blk_mq_poll_stats_bkt() */
-#define BLK_MQ_POLL_STATS_BKTS 16
+extern const struct device_type disk_type;
+extern const struct device_type part_type;
+extern const struct class block_class;
/*
* Maximum number of blkcg policies allowed to be registered concurrently.
* Defined here to simplify include dependency.
*/
-#define BLKCG_MAX_POLS 3
+#define BLKCG_MAX_POLS 6
-typedef void (rq_end_io_fn)(struct request *, blk_status_t);
+#define DISK_MAX_PARTS 256
+#define DISK_NAME_LEN 32
-#define BLK_RL_SYNCFULL (1U << 0)
-#define BLK_RL_ASYNCFULL (1U << 1)
+#define PARTITION_META_INFO_VOLNAMELTH 64
+/*
+ * Enough for the string representation of any kind of UUID plus NULL.
+ * EFI UUID is 36 characters. MSDOS UUID is 11 characters.
+ */
+#define PARTITION_META_INFO_UUIDLTH (UUID_STRING_LEN + 1)
-struct request_list {
- struct request_queue *q; /* the queue this rl belongs to */
-#ifdef CONFIG_BLK_CGROUP
- struct blkcg_gq *blkg; /* blkg this request pool belongs to */
-#endif
- /*
- * count[], starved[], and wait[] are indexed by
- * BLK_RW_SYNC/BLK_RW_ASYNC
- */
- int count[2];
- int starved[2];
- mempool_t *rq_pool;
- wait_queue_head_t wait[2];
- unsigned int flags;
+struct partition_meta_info {
+ char uuid[PARTITION_META_INFO_UUIDLTH];
+ u8 volname[PARTITION_META_INFO_VOLNAMELTH];
};
-/*
- * request flags */
-typedef __u32 __bitwise req_flags_t;
-
-/* elevator knows about this request */
-#define RQF_SORTED ((__force req_flags_t)(1 << 0))
-/* drive already may have started this one */
-#define RQF_STARTED ((__force req_flags_t)(1 << 1))
-/* uses tagged queueing */
-#define RQF_QUEUED ((__force req_flags_t)(1 << 2))
-/* may not be passed by ioscheduler */
-#define RQF_SOFTBARRIER ((__force req_flags_t)(1 << 3))
-/* request for flush sequence */
-#define RQF_FLUSH_SEQ ((__force req_flags_t)(1 << 4))
-/* merge of different types, fail separately */
-#define RQF_MIXED_MERGE ((__force req_flags_t)(1 << 5))
-/* track inflight for MQ */
-#define RQF_MQ_INFLIGHT ((__force req_flags_t)(1 << 6))
-/* don't call prep for this one */
-#define RQF_DONTPREP ((__force req_flags_t)(1 << 7))
-/* set for "ide_preempt" requests and also for requests for which the SCSI
- "quiesce" state must be ignored. */
-#define RQF_PREEMPT ((__force req_flags_t)(1 << 8))
-/* contains copies of user pages */
-#define RQF_COPY_USER ((__force req_flags_t)(1 << 9))
-/* vaguely specified driver internal error. Ignored by the block layer */
-#define RQF_FAILED ((__force req_flags_t)(1 << 10))
-/* don't warn about errors */
-#define RQF_QUIET ((__force req_flags_t)(1 << 11))
-/* elevator private data attached */
-#define RQF_ELVPRIV ((__force req_flags_t)(1 << 12))
-/* account I/O stat */
-#define RQF_IO_STAT ((__force req_flags_t)(1 << 13))
-/* request came from our alloc pool */
-#define RQF_ALLOCED ((__force req_flags_t)(1 << 14))
-/* runtime pm request */
-#define RQF_PM ((__force req_flags_t)(1 << 15))
-/* on IO scheduler merge hash */
-#define RQF_HASHED ((__force req_flags_t)(1 << 16))
-/* IO stats tracking on */
-#define RQF_STATS ((__force req_flags_t)(1 << 17))
-/* Look at ->special_vec for the actual data payload instead of the
- bio chain. */
-#define RQF_SPECIAL_PAYLOAD ((__force req_flags_t)(1 << 18))
-
-/* flags that prevent us from merging requests: */
-#define RQF_NOMERGE_FLAGS \
- (RQF_STARTED | RQF_SOFTBARRIER | RQF_FLUSH_SEQ | RQF_SPECIAL_PAYLOAD)
-
-/*
- * Try to put the fields that are referenced together in the same cacheline.
+/**
+ * DOC: genhd capability flags
+ *
+ * ``GENHD_FL_REMOVABLE``: indicates that the block device gives access to
+ * removable media. When set, the device remains present even when media is not
+ * inserted. Shall not be set for devices which are removed entirely when the
+ * media is removed.
+ *
+ * ``GENHD_FL_HIDDEN``: the block device is hidden; it doesn't produce events,
+ * doesn't appear in sysfs, and can't be opened from userspace or using
+ * blkdev_get*. Used for the underlying components of multipath devices.
+ *
+ * ``GENHD_FL_NO_PART``: partition support is disabled. The kernel will not
+ * scan for partitions from add_disk, and users can't add partitions manually.
*
- * If you modify this structure, make sure to update blk_rq_init() and
- * especially blk_mq_rq_ctx_init() to take care of the added fields.
*/
-struct request {
- struct list_head queuelist;
- union {
- call_single_data_t csd;
- u64 fifo_time;
- };
-
- struct request_queue *q;
- struct blk_mq_ctx *mq_ctx;
-
- int cpu;
- unsigned int cmd_flags; /* op and common flags */
- req_flags_t rq_flags;
-
- int internal_tag;
+enum {
+ GENHD_FL_REMOVABLE = 1 << 0,
+ GENHD_FL_HIDDEN = 1 << 1,
+ GENHD_FL_NO_PART = 1 << 2,
+};
- unsigned long atomic_flags;
+enum {
+ DISK_EVENT_MEDIA_CHANGE = 1 << 0, /* media changed */
+ DISK_EVENT_EJECT_REQUEST = 1 << 1, /* eject requested */
+};
- /* the following two fields are internal, NEVER access directly */
- unsigned int __data_len; /* total data len */
- int tag;
- sector_t __sector; /* sector cursor */
+enum {
+ /* Poll even if events_poll_msecs is unset */
+ DISK_EVENT_FLAG_POLL = 1 << 0,
+ /* Forward events to udev */
+ DISK_EVENT_FLAG_UEVENT = 1 << 1,
+ /* Block event polling when open for exclusive write */
+ DISK_EVENT_FLAG_BLOCK_ON_EXCL_WRITE = 1 << 2,
+};
- struct bio *bio;
- struct bio *biotail;
+struct disk_events;
+struct badblocks;
+
+enum blk_integrity_checksum {
+ BLK_INTEGRITY_CSUM_NONE = 0,
+ BLK_INTEGRITY_CSUM_IP = 1,
+ BLK_INTEGRITY_CSUM_CRC = 2,
+ BLK_INTEGRITY_CSUM_CRC64 = 3,
+} __packed ;
+
+struct blk_integrity {
+ unsigned char flags;
+ enum blk_integrity_checksum csum_type;
+ unsigned char metadata_size;
+ unsigned char pi_offset;
+ unsigned char interval_exp;
+ unsigned char tag_size;
+ unsigned char pi_tuple_size;
+};
+typedef unsigned int __bitwise blk_mode_t;
+
+/* open for reading */
+#define BLK_OPEN_READ ((__force blk_mode_t)(1 << 0))
+/* open for writing */
+#define BLK_OPEN_WRITE ((__force blk_mode_t)(1 << 1))
+/* open exclusively (vs other exclusive openers */
+#define BLK_OPEN_EXCL ((__force blk_mode_t)(1 << 2))
+/* opened with O_NDELAY */
+#define BLK_OPEN_NDELAY ((__force blk_mode_t)(1 << 3))
+/* open for "writes" only for ioctls (specialy hack for floppy.c) */
+#define BLK_OPEN_WRITE_IOCTL ((__force blk_mode_t)(1 << 4))
+/* open is exclusive wrt all other BLK_OPEN_WRITE opens to the device */
+#define BLK_OPEN_RESTRICT_WRITES ((__force blk_mode_t)(1 << 5))
+/* return partition scanning errors */
+#define BLK_OPEN_STRICT_SCAN ((__force blk_mode_t)(1 << 6))
+
+struct gendisk {
/*
- * The hash is used inside the scheduler, and killed once the
- * request reaches the dispatch list. The ipi_list is only used
- * to queue the request for softirq completion, which is long
- * after the request has been unhashed (and even removed from
- * the dispatch list).
+ * major/first_minor/minors should not be set by any new driver, the
+ * block core will take care of allocating them automatically.
*/
- union {
- struct hlist_node hash; /* merge hash */
- struct list_head ipi_list;
- };
+ int major;
+ int first_minor;
+ int minors;
+
+ char disk_name[DISK_NAME_LEN]; /* name of major driver */
+
+ unsigned short events; /* supported events */
+ unsigned short event_flags; /* flags related to event processing */
+
+ struct xarray part_tbl;
+ struct block_device *part0;
+
+ const struct block_device_operations *fops;
+ struct request_queue *queue;
+ void *private_data;
+
+ struct bio_set bio_split;
+
+ int flags;
+ unsigned long state;
+#define GD_NEED_PART_SCAN 0
+#define GD_READ_ONLY 1
+#define GD_DEAD 2
+#define GD_NATIVE_CAPACITY 3
+#define GD_ADDED 4
+#define GD_SUPPRESS_PART_SCAN 5
+#define GD_OWNS_QUEUE 6
+#define GD_ZONE_APPEND_USED 7
+
+ struct mutex open_mutex; /* open/close mutex */
+ unsigned open_partitions; /* number of open partitions */
+
+ struct backing_dev_info *bdi;
+ struct kobject queue_kobj; /* the queue/ directory */
+ struct kobject *slave_dir;
+#ifdef CONFIG_BLOCK_HOLDER_DEPRECATED
+ struct list_head slave_bdevs;
+#endif
+ struct timer_rand_state *random;
+ struct disk_events *ev;
+#ifdef CONFIG_BLK_DEV_ZONED
/*
- * The rb_node is only used inside the io scheduler, requests
- * are pruned when moved to the dispatch queue. So let the
- * completion_data share space with the rb_node.
+ * Zoned block device information. Reads of this information must be
+ * protected with blk_queue_enter() / blk_queue_exit(). Modifying this
+ * information is only allowed while no requests are being processed.
+ * See also blk_mq_freeze_queue() and blk_mq_unfreeze_queue().
*/
- union {
- struct rb_node rb_node; /* sort/lookup */
- struct bio_vec special_vec;
- void *completion_data;
- int error_count; /* for legacy drivers, don't use */
- };
+ unsigned int nr_zones;
+ unsigned int zone_capacity;
+ unsigned int last_zone_capacity;
+ u8 __rcu *zones_cond;
+ unsigned int zone_wplugs_hash_bits;
+ atomic_t nr_zone_wplugs;
+ spinlock_t zone_wplugs_lock;
+ struct mempool *zone_wplugs_pool;
+ struct hlist_head *zone_wplugs_hash;
+ struct workqueue_struct *zone_wplugs_wq;
+#endif /* CONFIG_BLK_DEV_ZONED */
- /*
- * Three pointers are available for the IO schedulers, if they need
- * more they have to dynamically allocate it. Flush requests are
- * never put on the IO scheduler. So let the flush fields share
- * space with the elevator data.
- */
- union {
- struct {
- struct io_cq *icq;
- void *priv[2];
- } elv;
-
- struct {
- unsigned int seq;
- struct list_head list;
- rq_end_io_fn *saved_end_io;
- } flush;
- };
-
- struct gendisk *rq_disk;
- struct hd_struct *part;
- unsigned long start_time;
- struct blk_issue_stat issue_stat;
-#ifdef CONFIG_BLK_CGROUP
- struct request_list *rl; /* rl this rq is alloced from */
- unsigned long long start_time_ns;
- unsigned long long io_start_time_ns; /* when passed to hardware */
-#endif
- /* Number of scatter-gather DMA addr+len pairs after
- * physical address coalescing is performed.
- */
- unsigned short nr_phys_segments;
-#if defined(CONFIG_BLK_DEV_INTEGRITY)
- unsigned short nr_integrity_segments;
+#if IS_ENABLED(CONFIG_CDROM)
+ struct cdrom_device_info *cdi;
#endif
-
- unsigned short ioprio;
-
- unsigned int timeout;
-
- void *special; /* opaque pointer available for LLD use */
-
- unsigned int extra_len; /* length of alignment and padding */
-
- unsigned short write_hint;
-
- unsigned long deadline;
- struct list_head timeout_list;
+ int node_id;
+ struct badblocks *bb;
+ struct lockdep_map lockdep_map;
+ u64 diskseq;
+ blk_mode_t open_mode;
/*
- * completion callback.
+ * Independent sector access ranges. This is always NULL for
+ * devices that do not have multiple independent access ranges.
*/
- rq_end_io_fn *end_io;
- void *end_io_data;
+ struct blk_independent_access_ranges *ia_ranges;
- /* for bidi */
- struct request *next_rq;
+ struct mutex rqos_state_mutex; /* rqos state change mutex */
};
-static inline bool blk_rq_is_scsi(struct request *rq)
+/**
+ * disk_openers - returns how many openers are there for a disk
+ * @disk: disk to check
+ *
+ * This returns the number of openers for a disk. Note that this value is only
+ * stable if disk->open_mutex is held.
+ *
+ * Note: Due to a quirk in the block layer open code, each open partition is
+ * only counted once even if there are multiple openers.
+ */
+static inline unsigned int disk_openers(struct gendisk *disk)
+{
+ return atomic_read(&disk->part0->bd_openers);
+}
+
+/**
+ * disk_has_partscan - return %true if partition scanning is enabled on a disk
+ * @disk: disk to check
+ *
+ * Returns %true if partitions scanning is enabled for @disk, or %false if
+ * partition scanning is disabled either permanently or temporarily.
+ */
+static inline bool disk_has_partscan(struct gendisk *disk)
{
- return req_op(rq) == REQ_OP_SCSI_IN || req_op(rq) == REQ_OP_SCSI_OUT;
+ return !(disk->flags & (GENHD_FL_NO_PART | GENHD_FL_HIDDEN)) &&
+ !test_bit(GD_SUPPRESS_PART_SCAN, &disk->state);
}
-static inline bool blk_rq_is_private(struct request *rq)
+/*
+ * The gendisk is refcounted by the part0 block_device, and the bd_device
+ * therein is also used for device model presentation in sysfs.
+ */
+#define dev_to_disk(device) \
+ (dev_to_bdev(device)->bd_disk)
+#define disk_to_dev(disk) \
+ (&((disk)->part0->bd_device))
+
+#if IS_REACHABLE(CONFIG_CDROM)
+#define disk_to_cdi(disk) ((disk)->cdi)
+#else
+#define disk_to_cdi(disk) NULL
+#endif
+
+static inline dev_t disk_devt(struct gendisk *disk)
{
- return req_op(rq) == REQ_OP_DRV_IN || req_op(rq) == REQ_OP_DRV_OUT;
+ return MKDEV(disk->major, disk->first_minor);
}
-static inline bool blk_rq_is_passthrough(struct request *rq)
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+/*
+ * We should strive for 1 << (PAGE_SHIFT + MAX_PAGECACHE_ORDER)
+ * however we constrain this to what we can validate and test.
+ */
+#define BLK_MAX_BLOCK_SIZE SZ_64K
+#else
+#define BLK_MAX_BLOCK_SIZE PAGE_SIZE
+#endif
+
+
+/* blk_validate_limits() validates bsize, so drivers don't usually need to */
+static inline int blk_validate_block_size(unsigned long bsize)
{
- return blk_rq_is_scsi(rq) || blk_rq_is_private(rq);
+ if (bsize < 512 || bsize > BLK_MAX_BLOCK_SIZE || !is_power_of_2(bsize))
+ return -EINVAL;
+
+ return 0;
}
-static inline unsigned short req_get_ioprio(struct request *req)
+static inline bool blk_op_is_passthrough(blk_opf_t op)
{
- return req->ioprio;
+ op &= REQ_OP_MASK;
+ return op == REQ_OP_DRV_IN || op == REQ_OP_DRV_OUT;
}
-#include <linux/elevator.h>
+/* flags set by the driver in queue_limits.features */
+typedef unsigned int __bitwise blk_features_t;
-struct blk_queue_ctx;
+/* supports a volatile write cache */
+#define BLK_FEAT_WRITE_CACHE ((__force blk_features_t)(1u << 0))
-typedef void (request_fn_proc) (struct request_queue *q);
-typedef blk_qc_t (make_request_fn) (struct request_queue *q, struct bio *bio);
-typedef int (prep_rq_fn) (struct request_queue *, struct request *);
-typedef void (unprep_rq_fn) (struct request_queue *, struct request *);
+/* supports passing on the FUA bit */
+#define BLK_FEAT_FUA ((__force blk_features_t)(1u << 1))
-struct bio_vec;
-typedef void (softirq_done_fn)(struct request *);
-typedef int (dma_drain_needed_fn)(struct request *);
-typedef int (lld_busy_fn) (struct request_queue *q);
-typedef int (bsg_job_fn) (struct bsg_job *);
-typedef int (init_rq_fn)(struct request_queue *, struct request *, gfp_t);
-typedef void (exit_rq_fn)(struct request_queue *, struct request *);
+/* rotational device (hard drive or floppy) */
+#define BLK_FEAT_ROTATIONAL ((__force blk_features_t)(1u << 2))
-enum blk_eh_timer_return {
- BLK_EH_NOT_HANDLED,
- BLK_EH_HANDLED,
- BLK_EH_RESET_TIMER,
-};
+/* contributes to the random number pool */
+#define BLK_FEAT_ADD_RANDOM ((__force blk_features_t)(1u << 3))
-typedef enum blk_eh_timer_return (rq_timed_out_fn)(struct request *);
+/* do disk/partitions IO accounting */
+#define BLK_FEAT_IO_STAT ((__force blk_features_t)(1u << 4))
-enum blk_queue_state {
- Queue_down,
- Queue_up,
-};
+/* don't modify data until writeback is done */
+#define BLK_FEAT_STABLE_WRITES ((__force blk_features_t)(1u << 5))
-struct blk_queue_tag {
- struct request **tag_index; /* map of busy tags */
- unsigned long *tag_map; /* bit map of free/busy tags */
- int max_depth; /* what we will send to device */
- int real_max_depth; /* what the array can hold */
- atomic_t refcnt; /* map can be shared */
- int alloc_policy; /* tag allocation policy */
- int next_tag; /* next tag */
-};
-#define BLK_TAG_ALLOC_FIFO 0 /* allocate starting from 0 */
-#define BLK_TAG_ALLOC_RR 1 /* allocate starting from last allocated tag */
+/* always completes in submit context */
+#define BLK_FEAT_SYNCHRONOUS ((__force blk_features_t)(1u << 6))
+
+/* supports REQ_NOWAIT */
+#define BLK_FEAT_NOWAIT ((__force blk_features_t)(1u << 7))
-#define BLK_SCSI_MAX_CMDS (256)
-#define BLK_SCSI_CMD_PER_LONG (BLK_SCSI_MAX_CMDS / (sizeof(long) * 8))
+/* supports DAX */
+#define BLK_FEAT_DAX ((__force blk_features_t)(1u << 8))
+
+/* supports I/O polling */
+#define BLK_FEAT_POLL ((__force blk_features_t)(1u << 9))
+
+/* is a zoned device */
+#define BLK_FEAT_ZONED ((__force blk_features_t)(1u << 10))
+
+/* supports PCI(e) p2p requests */
+#define BLK_FEAT_PCI_P2PDMA ((__force blk_features_t)(1u << 12))
+
+/* skip this queue in blk_mq_(un)quiesce_tagset */
+#define BLK_FEAT_SKIP_TAGSET_QUIESCE ((__force blk_features_t)(1u << 13))
+
+/* undocumented magic for bcache */
+#define BLK_FEAT_RAID_PARTIAL_STRIPES_EXPENSIVE \
+ ((__force blk_features_t)(1u << 15))
+
+/* atomic writes enabled */
+#define BLK_FEAT_ATOMIC_WRITES \
+ ((__force blk_features_t)(1u << 16))
/*
- * Zoned block device models (zoned limit).
+ * Flags automatically inherited when stacking limits.
*/
-enum blk_zoned_model {
- BLK_ZONED_NONE, /* Regular block device */
- BLK_ZONED_HA, /* Host-aware zoned block device */
- BLK_ZONED_HM, /* Host-managed zoned block device */
-};
+#define BLK_FEAT_INHERIT_MASK \
+ (BLK_FEAT_WRITE_CACHE | BLK_FEAT_FUA | BLK_FEAT_ROTATIONAL | \
+ BLK_FEAT_STABLE_WRITES | BLK_FEAT_ZONED | \
+ BLK_FEAT_RAID_PARTIAL_STRIPES_EXPENSIVE)
+
+/* internal flags in queue_limits.flags */
+typedef unsigned int __bitwise blk_flags_t;
+
+/* do not send FLUSH/FUA commands despite advertising a write cache */
+#define BLK_FLAG_WRITE_CACHE_DISABLED ((__force blk_flags_t)(1u << 0))
+
+/* I/O topology is misaligned */
+#define BLK_FLAG_MISALIGNED ((__force blk_flags_t)(1u << 1))
+
+/* passthrough command IO accounting */
+#define BLK_FLAG_IOSTATS_PASSTHROUGH ((__force blk_flags_t)(1u << 2))
struct queue_limits {
- unsigned long bounce_pfn;
+ blk_features_t features;
+ blk_flags_t flags;
unsigned long seg_boundary_mask;
unsigned long virt_boundary_mask;
@@ -323,938 +378,786 @@ struct queue_limits {
unsigned int max_dev_sectors;
unsigned int chunk_sectors;
unsigned int max_sectors;
+ unsigned int max_user_sectors;
unsigned int max_segment_size;
+ unsigned int max_fast_segment_size;
unsigned int physical_block_size;
+ unsigned int logical_block_size;
unsigned int alignment_offset;
unsigned int io_min;
unsigned int io_opt;
unsigned int max_discard_sectors;
unsigned int max_hw_discard_sectors;
- unsigned int max_write_same_sectors;
+ unsigned int max_user_discard_sectors;
+ unsigned int max_secure_erase_sectors;
unsigned int max_write_zeroes_sectors;
+ unsigned int max_wzeroes_unmap_sectors;
+ unsigned int max_hw_wzeroes_unmap_sectors;
+ unsigned int max_user_wzeroes_unmap_sectors;
+ unsigned int max_hw_zone_append_sectors;
+ unsigned int max_zone_append_sectors;
unsigned int discard_granularity;
unsigned int discard_alignment;
+ unsigned int zone_write_granularity;
+
+ /* atomic write limits */
+ unsigned int atomic_write_hw_max;
+ unsigned int atomic_write_max_sectors;
+ unsigned int atomic_write_hw_boundary;
+ unsigned int atomic_write_boundary_sectors;
+ unsigned int atomic_write_hw_unit_min;
+ unsigned int atomic_write_unit_min;
+ unsigned int atomic_write_hw_unit_max;
+ unsigned int atomic_write_unit_max;
- unsigned short logical_block_size;
unsigned short max_segments;
unsigned short max_integrity_segments;
unsigned short max_discard_segments;
- unsigned char misaligned;
- unsigned char discard_misaligned;
- unsigned char cluster;
- unsigned char raid_partial_stripes_expensive;
- enum blk_zoned_model zoned;
-};
+ unsigned short max_write_streams;
+ unsigned int write_stream_granularity;
-#ifdef CONFIG_BLK_DEV_ZONED
+ unsigned int max_open_zones;
+ unsigned int max_active_zones;
-struct blk_zone_report_hdr {
- unsigned int nr_zones;
- u8 padding[60];
+ /*
+ * Drivers that set dma_alignment to less than 511 must be prepared to
+ * handle individual bvec's that are not a multiple of a SECTOR_SIZE
+ * due to possible offsets.
+ */
+ unsigned int dma_alignment;
+ unsigned int dma_pad_mask;
+
+ struct blk_integrity integrity;
};
-extern int blkdev_report_zones(struct block_device *bdev,
- sector_t sector, struct blk_zone *zones,
- unsigned int *nr_zones, gfp_t gfp_mask);
-extern int blkdev_reset_zones(struct block_device *bdev, sector_t sectors,
- sector_t nr_sectors, gfp_t gfp_mask);
+typedef int (*report_zones_cb)(struct blk_zone *zone, unsigned int idx,
+ void *data);
-extern int blkdev_report_zones_ioctl(struct block_device *bdev, fmode_t mode,
- unsigned int cmd, unsigned long arg);
-extern int blkdev_reset_zones_ioctl(struct block_device *bdev, fmode_t mode,
- unsigned int cmd, unsigned long arg);
+int disk_report_zone(struct gendisk *disk, struct blk_zone *zone,
+ unsigned int idx, struct blk_report_zones_args *args);
-#else /* CONFIG_BLK_DEV_ZONED */
+int blkdev_get_zone_info(struct block_device *bdev, sector_t sector,
+ struct blk_zone *zone);
-static inline int blkdev_report_zones_ioctl(struct block_device *bdev,
- fmode_t mode, unsigned int cmd,
- unsigned long arg)
-{
- return -ENOTTY;
-}
+#define BLK_ALL_ZONES ((unsigned int)-1)
+int blkdev_report_zones(struct block_device *bdev, sector_t sector,
+ unsigned int nr_zones, report_zones_cb cb, void *data);
+int blkdev_report_zones_cached(struct block_device *bdev, sector_t sector,
+ unsigned int nr_zones, report_zones_cb cb, void *data);
+int blkdev_zone_mgmt(struct block_device *bdev, enum req_op op,
+ sector_t sectors, sector_t nr_sectors);
+int blk_revalidate_disk_zones(struct gendisk *disk);
-static inline int blkdev_reset_zones_ioctl(struct block_device *bdev,
- fmode_t mode, unsigned int cmd,
- unsigned long arg)
-{
- return -ENOTTY;
-}
+/*
+ * Independent access ranges: struct blk_independent_access_range describes
+ * a range of contiguous sectors that can be accessed using device command
+ * execution resources that are independent from the resources used for
+ * other access ranges. This is typically found with single-LUN multi-actuator
+ * HDDs where each access range is served by a different set of heads.
+ * The set of independent ranges supported by the device is defined using
+ * struct blk_independent_access_ranges. The independent ranges must not overlap
+ * and must include all sectors within the disk capacity (no sector holes
+ * allowed).
+ * For a device with multiple ranges, requests targeting sectors in different
+ * ranges can be executed in parallel. A request can straddle an access range
+ * boundary.
+ */
+struct blk_independent_access_range {
+ struct kobject kobj;
+ sector_t sector;
+ sector_t nr_sectors;
+};
-#endif /* CONFIG_BLK_DEV_ZONED */
+struct blk_independent_access_ranges {
+ struct kobject kobj;
+ bool sysfs_registered;
+ unsigned int nr_ia_ranges;
+ struct blk_independent_access_range ia_range[];
+};
struct request_queue {
/*
- * Together with queue_head for cacheline sharing
+ * The queue owner gets to use this for whatever they like.
+ * ll_rw_blk doesn't touch it.
*/
- struct list_head queue_head;
- struct request *last_merge;
+ void *queuedata;
+
struct elevator_queue *elevator;
- int nr_rqs[2]; /* # allocated [a]sync rqs */
- int nr_rqs_elvpriv; /* # allocated rqs w/ elvpriv */
- atomic_t shared_hctx_restart;
+ const struct blk_mq_ops *mq_ops;
- struct blk_queue_stats *stats;
- struct rq_wb *rq_wb;
+ /* sw queues */
+ struct blk_mq_ctx __percpu *queue_ctx;
/*
- * If blkcg is not used, @q->root_rl serves all requests. If blkcg
- * is used, root blkg allocates from @q->root_rl and all other
- * blkgs from their own blkg->rl. Which one to use should be
- * determined using bio_request_list().
+ * various queue flags, see QUEUE_* below
*/
- struct request_list root_rl;
-
- request_fn_proc *request_fn;
- make_request_fn *make_request_fn;
- prep_rq_fn *prep_rq_fn;
- unprep_rq_fn *unprep_rq_fn;
- softirq_done_fn *softirq_done_fn;
- rq_timed_out_fn *rq_timed_out_fn;
- dma_drain_needed_fn *dma_drain_needed;
- lld_busy_fn *lld_busy_fn;
- /* Called just after a request is allocated */
- init_rq_fn *init_rq_fn;
- /* Called just before a request is freed */
- exit_rq_fn *exit_rq_fn;
- /* Called from inside blk_get_request() */
- void (*initialize_rq_fn)(struct request *rq);
-
- const struct blk_mq_ops *mq_ops;
-
- unsigned int *mq_map;
+ unsigned long queue_flags;
- /* sw queues */
- struct blk_mq_ctx __percpu *queue_ctx;
- unsigned int nr_queues;
+ unsigned int __data_racy rq_timeout;
unsigned int queue_depth;
+ refcount_t refs;
+
/* hw dispatch queues */
- struct blk_mq_hw_ctx **queue_hw_ctx;
unsigned int nr_hw_queues;
+ struct blk_mq_hw_ctx * __rcu *queue_hw_ctx;
- /*
- * Dispatch queue sorting
- */
- sector_t end_sector;
- struct request *boundary_rq;
-
- /*
- * Delayed queue handling
- */
- struct delayed_work delay_work;
-
- struct backing_dev_info *backing_dev_info;
-
- /*
- * The queue owner gets to use this for whatever they like.
- * ll_rw_blk doesn't touch it.
- */
- void *queuedata;
+ struct percpu_ref q_usage_counter;
+ struct lock_class_key io_lock_cls_key;
+ struct lockdep_map io_lockdep_map;
- /*
- * various queue flags, see QUEUE_* below
- */
- unsigned long queue_flags;
+ struct lock_class_key q_lock_cls_key;
+ struct lockdep_map q_lockdep_map;
- /*
- * ida allocated id for this queue. Used to index queues from
- * ioctx.
- */
- int id;
+ struct request *last_merge;
- /*
- * queue needs bounce pages for pages above this limit
- */
- gfp_t bounce_gfp;
+ spinlock_t queue_lock;
- /*
- * protects queue structures from reentrancy. ->__queue_lock should
- * _never_ be used directly, it is queue private. always use
- * ->queue_lock.
- */
- spinlock_t __queue_lock;
- spinlock_t *queue_lock;
+ int quiesce_depth;
- /*
- * queue kobject
- */
- struct kobject kobj;
+ struct gendisk *disk;
/*
* mq queue kobject
*/
- struct kobject mq_kobj;
+ struct kobject *mq_kobj;
-#ifdef CONFIG_BLK_DEV_INTEGRITY
- struct blk_integrity integrity;
-#endif /* CONFIG_BLK_DEV_INTEGRITY */
+ struct queue_limits limits;
#ifdef CONFIG_PM
struct device *dev;
- int rpm_status;
- unsigned int nr_pending;
+ enum rpm_status rpm_status;
#endif
/*
- * queue settings
+ * Number of contexts that have called blk_set_pm_only(). If this
+ * counter is above zero then only RQF_PM requests are processed.
*/
- unsigned long nr_requests; /* Max # of requests */
- unsigned int nr_congestion_on;
- unsigned int nr_congestion_off;
- unsigned int nr_batching;
-
- unsigned int dma_drain_size;
- void *dma_drain_buffer;
- unsigned int dma_pad_mask;
- unsigned int dma_alignment;
-
- struct blk_queue_tag *queue_tags;
- struct list_head tag_busy_list;
+ atomic_t pm_only;
- unsigned int nr_sorted;
- unsigned int in_flight[2];
+ struct blk_queue_stats *stats;
+ struct rq_qos *rq_qos;
+ struct mutex rq_qos_mutex;
/*
- * Number of active block driver functions for which blk_drain_queue()
- * must wait. Must be incremented around functions that unlock the
- * queue_lock internally, e.g. scsi_request_fn().
+ * ida allocated id for this queue. Used to index queues from
+ * ioctx.
*/
- unsigned int request_fn_active;
+ int id;
- unsigned int rq_timeout;
- int poll_nsec;
+ /*
+ * queue settings
+ */
+ unsigned long nr_requests; /* Max # of requests */
- struct blk_stat_callback *poll_cb;
- struct blk_rq_stat poll_stat[BLK_MQ_POLL_STATS_BKTS];
+#ifdef CONFIG_BLK_INLINE_ENCRYPTION
+ struct blk_crypto_profile *crypto_profile;
+ struct kobject *crypto_kobject;
+#endif
struct timer_list timeout;
struct work_struct timeout_work;
- struct list_head timeout_list;
+
+ atomic_t nr_active_requests_shared_tags;
+
+ struct blk_mq_tags *sched_shared_tags;
struct list_head icq_list;
#ifdef CONFIG_BLK_CGROUP
DECLARE_BITMAP (blkcg_pols, BLKCG_MAX_POLS);
struct blkcg_gq *root_blkg;
struct list_head blkg_list;
+ struct mutex blkcg_mutex;
#endif
- struct queue_limits limits;
-
- /*
- * sg stuff
- */
- unsigned int sg_timeout;
- unsigned int sg_reserved_size;
int node;
+
+ spinlock_t requeue_lock;
+ struct list_head requeue_list;
+ struct delayed_work requeue_work;
+
#ifdef CONFIG_BLK_DEV_IO_TRACE
- struct blk_trace *blk_trace;
+ struct blk_trace __rcu *blk_trace;
#endif
/*
* for flush operations
*/
struct blk_flush_queue *fq;
+ struct list_head flush_list;
- struct list_head requeue_list;
- spinlock_t requeue_lock;
- struct delayed_work requeue_work;
+ /*
+ * Protects against I/O scheduler switching, particularly when updating
+ * q->elevator. Since the elevator update code path may also modify q->
+ * nr_requests and wbt latency, this lock also protects the sysfs attrs
+ * nr_requests and wbt_lat_usec. Additionally the nr_hw_queues update
+ * may modify hctx tags, reserved-tags and cpumask, so this lock also
+ * helps protect the hctx sysfs/debugfs attrs. To ensure proper locking
+ * order during an elevator or nr_hw_queue update, first freeze the
+ * queue, then acquire ->elevator_lock.
+ */
+ struct mutex elevator_lock;
struct mutex sysfs_lock;
+ /*
+ * Protects queue limits and also sysfs attribute read_ahead_kb.
+ */
+ struct mutex limits_lock;
- int bypass_depth;
- atomic_t mq_freeze_depth;
+ /*
+ * for reusing dead hctx instance in case of updating
+ * nr_hw_queues
+ */
+ struct list_head unused_hctx_list;
+ spinlock_t unused_hctx_lock;
-#if defined(CONFIG_BLK_DEV_BSG)
- bsg_job_fn *bsg_job_fn;
- struct bsg_class_device bsg_dev;
-#endif
+ int mq_freeze_depth;
#ifdef CONFIG_BLK_DEV_THROTTLING
/* Throttle data */
struct throtl_data *td;
#endif
struct rcu_head rcu_head;
+#ifdef CONFIG_LOCKDEP
+ struct task_struct *mq_freeze_owner;
+ int mq_freeze_owner_depth;
+ /*
+ * Records disk & queue state in current context, used in unfreeze
+ * queue
+ */
+ bool mq_freeze_disk_dead;
+ bool mq_freeze_queue_dying;
+#endif
wait_queue_head_t mq_freeze_wq;
- struct percpu_ref q_usage_counter;
- struct list_head all_q_node;
+ /*
+ * Protect concurrent access to q_usage_counter by
+ * percpu_ref_kill() and percpu_ref_reinit().
+ */
+ struct mutex mq_freeze_lock;
struct blk_mq_tag_set *tag_set;
struct list_head tag_set_list;
- struct bio_set *bio_split;
-#ifdef CONFIG_BLK_DEBUG_FS
struct dentry *debugfs_dir;
struct dentry *sched_debugfs_dir;
-#endif
+ struct dentry *rqos_debugfs_dir;
+ /*
+ * Serializes all debugfs metadata operations using the above dentries.
+ */
+ struct mutex debugfs_mutex;
+};
- bool mq_sysfs_init_done;
+/* Keep blk_queue_flag_name[] in sync with the definitions below */
+enum {
+ QUEUE_FLAG_DYING, /* queue being torn down */
+ QUEUE_FLAG_NOMERGES, /* disable merge attempts */
+ QUEUE_FLAG_SAME_COMP, /* complete on same CPU-group */
+ QUEUE_FLAG_FAIL_IO, /* fake timeout */
+ QUEUE_FLAG_NOXMERGES, /* No extended merges */
+ QUEUE_FLAG_SAME_FORCE, /* force complete on same CPU */
+ QUEUE_FLAG_INIT_DONE, /* queue is initialized */
+ QUEUE_FLAG_STATS, /* track IO start and completion times */
+ QUEUE_FLAG_REGISTERED, /* queue has been registered to a disk */
+ QUEUE_FLAG_QUIESCED, /* queue has been quiesced */
+ QUEUE_FLAG_RQ_ALLOC_TIME, /* record rq->alloc_time_ns */
+ QUEUE_FLAG_HCTX_ACTIVE, /* at least one blk-mq hctx is active */
+ QUEUE_FLAG_SQ_SCHED, /* single queue style io dispatch */
+ QUEUE_FLAG_DISABLE_WBT_DEF, /* for sched to disable/enable wbt */
+ QUEUE_FLAG_NO_ELV_SWITCH, /* can't switch elevator any more */
+ QUEUE_FLAG_QOS_ENABLED, /* qos is enabled */
+ QUEUE_FLAG_BIO_ISSUE_TIME, /* record bio->issue_time_ns */
+ QUEUE_FLAG_MAX
+};
- size_t cmd_size;
- void *rq_alloc_data;
+#define QUEUE_FLAG_MQ_DEFAULT (1UL << QUEUE_FLAG_SAME_COMP)
- struct work_struct release_work;
+void blk_queue_flag_set(unsigned int flag, struct request_queue *q);
+void blk_queue_flag_clear(unsigned int flag, struct request_queue *q);
-#define BLK_MAX_WRITE_HINTS 5
- u64 write_hints[BLK_MAX_WRITE_HINTS];
-};
+#define blk_queue_dying(q) test_bit(QUEUE_FLAG_DYING, &(q)->queue_flags)
+#define blk_queue_init_done(q) test_bit(QUEUE_FLAG_INIT_DONE, &(q)->queue_flags)
+#define blk_queue_nomerges(q) test_bit(QUEUE_FLAG_NOMERGES, &(q)->queue_flags)
+#define blk_queue_noxmerges(q) \
+ test_bit(QUEUE_FLAG_NOXMERGES, &(q)->queue_flags)
+#define blk_queue_nonrot(q) (!((q)->limits.features & BLK_FEAT_ROTATIONAL))
+#define blk_queue_io_stat(q) ((q)->limits.features & BLK_FEAT_IO_STAT)
+#define blk_queue_passthrough_stat(q) \
+ ((q)->limits.flags & BLK_FLAG_IOSTATS_PASSTHROUGH)
+#define blk_queue_dax(q) ((q)->limits.features & BLK_FEAT_DAX)
+#define blk_queue_pci_p2pdma(q) ((q)->limits.features & BLK_FEAT_PCI_P2PDMA)
+#ifdef CONFIG_BLK_RQ_ALLOC_TIME
+#define blk_queue_rq_alloc_time(q) \
+ test_bit(QUEUE_FLAG_RQ_ALLOC_TIME, &(q)->queue_flags)
+#else
+#define blk_queue_rq_alloc_time(q) false
+#endif
-#define QUEUE_FLAG_QUEUED 1 /* uses generic tag queueing */
-#define QUEUE_FLAG_STOPPED 2 /* queue is stopped */
-#define QUEUE_FLAG_SYNCFULL 3 /* read queue has been filled */
-#define QUEUE_FLAG_ASYNCFULL 4 /* write queue has been filled */
-#define QUEUE_FLAG_DYING 5 /* queue being torn down */
-#define QUEUE_FLAG_BYPASS 6 /* act as dumb FIFO queue */
-#define QUEUE_FLAG_BIDI 7 /* queue supports bidi requests */
-#define QUEUE_FLAG_NOMERGES 8 /* disable merge attempts */
-#define QUEUE_FLAG_SAME_COMP 9 /* complete on same CPU-group */
-#define QUEUE_FLAG_FAIL_IO 10 /* fake timeout */
-#define QUEUE_FLAG_STACKABLE 11 /* supports request stacking */
-#define QUEUE_FLAG_NONROT 12 /* non-rotational device (SSD) */
-#define QUEUE_FLAG_VIRT QUEUE_FLAG_NONROT /* paravirt device */
-#define QUEUE_FLAG_IO_STAT 13 /* do IO stats */
-#define QUEUE_FLAG_DISCARD 14 /* supports DISCARD */
-#define QUEUE_FLAG_NOXMERGES 15 /* No extended merges */
-#define QUEUE_FLAG_ADD_RANDOM 16 /* Contributes to random pool */
-#define QUEUE_FLAG_SECERASE 17 /* supports secure erase */
-#define QUEUE_FLAG_SAME_FORCE 18 /* force complete on same CPU */
-#define QUEUE_FLAG_DEAD 19 /* queue tear-down finished */
-#define QUEUE_FLAG_INIT_DONE 20 /* queue is initialized */
-#define QUEUE_FLAG_NO_SG_MERGE 21 /* don't attempt to merge SG segments*/
-#define QUEUE_FLAG_POLL 22 /* IO polling enabled if set */
-#define QUEUE_FLAG_WC 23 /* Write back caching */
-#define QUEUE_FLAG_FUA 24 /* device supports FUA writes */
-#define QUEUE_FLAG_FLUSH_NQ 25 /* flush not queueuable */
-#define QUEUE_FLAG_DAX 26 /* device supports DAX */
-#define QUEUE_FLAG_STATS 27 /* track rq completion times */
-#define QUEUE_FLAG_POLL_STATS 28 /* collecting stats for hybrid polling */
-#define QUEUE_FLAG_REGISTERED 29 /* queue has been registered to a disk */
-#define QUEUE_FLAG_SCSI_PASSTHROUGH 30 /* queue supports SCSI commands */
-#define QUEUE_FLAG_QUIESCED 31 /* queue has been quiesced */
-
-#define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \
- (1 << QUEUE_FLAG_STACKABLE) | \
- (1 << QUEUE_FLAG_SAME_COMP) | \
- (1 << QUEUE_FLAG_ADD_RANDOM))
-
-#define QUEUE_FLAG_MQ_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \
- (1 << QUEUE_FLAG_STACKABLE) | \
- (1 << QUEUE_FLAG_SAME_COMP) | \
- (1 << QUEUE_FLAG_POLL))
+#define blk_noretry_request(rq) \
+ ((rq)->cmd_flags & (REQ_FAILFAST_DEV|REQ_FAILFAST_TRANSPORT| \
+ REQ_FAILFAST_DRIVER))
+#define blk_queue_quiesced(q) test_bit(QUEUE_FLAG_QUIESCED, &(q)->queue_flags)
+#define blk_queue_pm_only(q) atomic_read(&(q)->pm_only)
+#define blk_queue_registered(q) test_bit(QUEUE_FLAG_REGISTERED, &(q)->queue_flags)
+#define blk_queue_sq_sched(q) test_bit(QUEUE_FLAG_SQ_SCHED, &(q)->queue_flags)
+#define blk_queue_skip_tagset_quiesce(q) \
+ ((q)->limits.features & BLK_FEAT_SKIP_TAGSET_QUIESCE)
+#define blk_queue_disable_wbt(q) \
+ test_bit(QUEUE_FLAG_DISABLE_WBT_DEF, &(q)->queue_flags)
+#define blk_queue_no_elv_switch(q) \
+ test_bit(QUEUE_FLAG_NO_ELV_SWITCH, &(q)->queue_flags)
+
+extern void blk_set_pm_only(struct request_queue *q);
+extern void blk_clear_pm_only(struct request_queue *q);
-/*
- * @q->queue_lock is set while a queue is being initialized. Since we know
- * that no other threads access the queue object before @q->queue_lock has
- * been set, it is safe to manipulate queue flags without holding the
- * queue_lock if @q->queue_lock == NULL. See also blk_alloc_queue_node() and
- * blk_init_allocated_queue().
- */
-static inline void queue_lockdep_assert_held(struct request_queue *q)
+#define list_entry_rq(ptr) list_entry((ptr), struct request, queuelist)
+
+#define dma_map_bvec(dev, bv, dir, attrs) \
+ dma_map_page_attrs(dev, (bv)->bv_page, (bv)->bv_offset, (bv)->bv_len, \
+ (dir), (attrs))
+
+static inline bool queue_is_mq(struct request_queue *q)
{
- if (q->queue_lock)
- lockdep_assert_held(q->queue_lock);
+ return q->mq_ops;
}
-static inline void queue_flag_set_unlocked(unsigned int flag,
- struct request_queue *q)
+#ifdef CONFIG_PM
+static inline enum rpm_status queue_rpm_status(struct request_queue *q)
{
- __set_bit(flag, &q->queue_flags);
+ return q->rpm_status;
}
-
-static inline int queue_flag_test_and_clear(unsigned int flag,
- struct request_queue *q)
+#else
+static inline enum rpm_status queue_rpm_status(struct request_queue *q)
{
- queue_lockdep_assert_held(q);
-
- if (test_bit(flag, &q->queue_flags)) {
- __clear_bit(flag, &q->queue_flags);
- return 1;
- }
-
- return 0;
+ return RPM_ACTIVE;
}
+#endif
-static inline int queue_flag_test_and_set(unsigned int flag,
- struct request_queue *q)
+static inline bool blk_queue_is_zoned(struct request_queue *q)
{
- queue_lockdep_assert_held(q);
-
- if (!test_bit(flag, &q->queue_flags)) {
- __set_bit(flag, &q->queue_flags);
- return 0;
- }
-
- return 1;
+ return IS_ENABLED(CONFIG_BLK_DEV_ZONED) &&
+ (q->limits.features & BLK_FEAT_ZONED);
}
-static inline void queue_flag_set(unsigned int flag, struct request_queue *q)
+static inline unsigned int disk_zone_no(struct gendisk *disk, sector_t sector)
{
- queue_lockdep_assert_held(q);
- __set_bit(flag, &q->queue_flags);
+ if (!blk_queue_is_zoned(disk->queue))
+ return 0;
+ return sector >> ilog2(disk->queue->limits.chunk_sectors);
}
-static inline void queue_flag_clear_unlocked(unsigned int flag,
- struct request_queue *q)
+static inline unsigned int bdev_max_open_zones(struct block_device *bdev)
{
- __clear_bit(flag, &q->queue_flags);
+ return bdev->bd_disk->queue->limits.max_open_zones;
}
-static inline int queue_in_flight(struct request_queue *q)
+static inline unsigned int bdev_max_active_zones(struct block_device *bdev)
{
- return q->in_flight[0] + q->in_flight[1];
+ return bdev->bd_disk->queue->limits.max_active_zones;
}
-static inline void queue_flag_clear(unsigned int flag, struct request_queue *q)
+static inline unsigned int blk_queue_depth(struct request_queue *q)
{
- queue_lockdep_assert_held(q);
- __clear_bit(flag, &q->queue_flags);
+ if (q->queue_depth)
+ return q->queue_depth;
+
+ return q->nr_requests;
}
-#define blk_queue_tagged(q) test_bit(QUEUE_FLAG_QUEUED, &(q)->queue_flags)
-#define blk_queue_stopped(q) test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags)
-#define blk_queue_dying(q) test_bit(QUEUE_FLAG_DYING, &(q)->queue_flags)
-#define blk_queue_dead(q) test_bit(QUEUE_FLAG_DEAD, &(q)->queue_flags)
-#define blk_queue_bypass(q) test_bit(QUEUE_FLAG_BYPASS, &(q)->queue_flags)
-#define blk_queue_init_done(q) test_bit(QUEUE_FLAG_INIT_DONE, &(q)->queue_flags)
-#define blk_queue_nomerges(q) test_bit(QUEUE_FLAG_NOMERGES, &(q)->queue_flags)
-#define blk_queue_noxmerges(q) \
- test_bit(QUEUE_FLAG_NOXMERGES, &(q)->queue_flags)
-#define blk_queue_nonrot(q) test_bit(QUEUE_FLAG_NONROT, &(q)->queue_flags)
-#define blk_queue_io_stat(q) test_bit(QUEUE_FLAG_IO_STAT, &(q)->queue_flags)
-#define blk_queue_add_random(q) test_bit(QUEUE_FLAG_ADD_RANDOM, &(q)->queue_flags)
-#define blk_queue_stackable(q) \
- test_bit(QUEUE_FLAG_STACKABLE, &(q)->queue_flags)
-#define blk_queue_discard(q) test_bit(QUEUE_FLAG_DISCARD, &(q)->queue_flags)
-#define blk_queue_secure_erase(q) \
- (test_bit(QUEUE_FLAG_SECERASE, &(q)->queue_flags))
-#define blk_queue_dax(q) test_bit(QUEUE_FLAG_DAX, &(q)->queue_flags)
-#define blk_queue_scsi_passthrough(q) \
- test_bit(QUEUE_FLAG_SCSI_PASSTHROUGH, &(q)->queue_flags)
+/*
+ * default timeout for SG_IO if none specified
+ */
+#define BLK_DEFAULT_SG_TIMEOUT (60 * HZ)
+#define BLK_MIN_SG_TIMEOUT (7 * HZ)
-#define blk_noretry_request(rq) \
- ((rq)->cmd_flags & (REQ_FAILFAST_DEV|REQ_FAILFAST_TRANSPORT| \
- REQ_FAILFAST_DRIVER))
-#define blk_queue_quiesced(q) test_bit(QUEUE_FLAG_QUIESCED, &(q)->queue_flags)
+/* This should not be used directly - use rq_for_each_segment */
+#define for_each_bio(_bio) \
+ for (; _bio; _bio = _bio->bi_next)
-static inline bool blk_account_rq(struct request *rq)
+int __must_check add_disk_fwnode(struct device *parent, struct gendisk *disk,
+ const struct attribute_group **groups,
+ struct fwnode_handle *fwnode);
+int __must_check device_add_disk(struct device *parent, struct gendisk *disk,
+ const struct attribute_group **groups);
+static inline int __must_check add_disk(struct gendisk *disk)
{
- return (rq->rq_flags & RQF_STARTED) && !blk_rq_is_passthrough(rq);
+ return device_add_disk(NULL, disk, NULL);
}
+void del_gendisk(struct gendisk *gp);
+void invalidate_disk(struct gendisk *disk);
+void set_disk_ro(struct gendisk *disk, bool read_only);
+void disk_uevent(struct gendisk *disk, enum kobject_action action);
-#define blk_rq_cpu_valid(rq) ((rq)->cpu != -1)
-#define blk_bidi_rq(rq) ((rq)->next_rq != NULL)
-/* rq->queuelist of dequeued request must be list_empty() */
-#define blk_queued_rq(rq) (!list_empty(&(rq)->queuelist))
-
-#define list_entry_rq(ptr) list_entry((ptr), struct request, queuelist)
-
-#define rq_data_dir(rq) (op_is_write(req_op(rq)) ? WRITE : READ)
-
-/*
- * Driver can handle struct request, if it either has an old style
- * request_fn defined, or is blk-mq based.
- */
-static inline bool queue_is_rq_based(struct request_queue *q)
+static inline u8 bdev_partno(const struct block_device *bdev)
{
- return q->request_fn || q->mq_ops;
+ return atomic_read(&bdev->__bd_flags) & BD_PARTNO;
}
-static inline unsigned int blk_queue_cluster(struct request_queue *q)
+static inline bool bdev_test_flag(const struct block_device *bdev, unsigned flag)
{
- return q->limits.cluster;
+ return atomic_read(&bdev->__bd_flags) & flag;
}
-static inline enum blk_zoned_model
-blk_queue_zoned_model(struct request_queue *q)
+static inline void bdev_set_flag(struct block_device *bdev, unsigned flag)
{
- return q->limits.zoned;
+ atomic_or(flag, &bdev->__bd_flags);
}
-static inline bool blk_queue_is_zoned(struct request_queue *q)
+static inline void bdev_clear_flag(struct block_device *bdev, unsigned flag)
{
- switch (blk_queue_zoned_model(q)) {
- case BLK_ZONED_HA:
- case BLK_ZONED_HM:
- return true;
- default:
- return false;
- }
+ atomic_andnot(flag, &bdev->__bd_flags);
}
-static inline unsigned int blk_queue_zone_sectors(struct request_queue *q)
+static inline bool get_disk_ro(struct gendisk *disk)
{
- return blk_queue_is_zoned(q) ? q->limits.chunk_sectors : 0;
+ return bdev_test_flag(disk->part0, BD_READ_ONLY) ||
+ test_bit(GD_READ_ONLY, &disk->state);
}
-static inline bool rq_is_sync(struct request *rq)
+static inline bool bdev_read_only(struct block_device *bdev)
{
- return op_is_sync(rq->cmd_flags);
+ return bdev_test_flag(bdev, BD_READ_ONLY) || get_disk_ro(bdev->bd_disk);
}
-static inline bool blk_rl_full(struct request_list *rl, bool sync)
+bool set_capacity_and_notify(struct gendisk *disk, sector_t size);
+void disk_force_media_change(struct gendisk *disk);
+void bdev_mark_dead(struct block_device *bdev, bool surprise);
+
+void add_disk_randomness(struct gendisk *disk) __latent_entropy;
+void rand_initialize_disk(struct gendisk *disk);
+
+static inline sector_t get_start_sect(struct block_device *bdev)
{
- unsigned int flag = sync ? BLK_RL_SYNCFULL : BLK_RL_ASYNCFULL;
+ return bdev->bd_start_sect;
+}
- return rl->flags & flag;
+static inline sector_t bdev_nr_sectors(struct block_device *bdev)
+{
+ return bdev->bd_nr_sectors;
}
-static inline void blk_set_rl_full(struct request_list *rl, bool sync)
+static inline loff_t bdev_nr_bytes(struct block_device *bdev)
{
- unsigned int flag = sync ? BLK_RL_SYNCFULL : BLK_RL_ASYNCFULL;
+ return (loff_t)bdev_nr_sectors(bdev) << SECTOR_SHIFT;
+}
- rl->flags |= flag;
+static inline sector_t get_capacity(struct gendisk *disk)
+{
+ return bdev_nr_sectors(disk->part0);
}
-static inline void blk_clear_rl_full(struct request_list *rl, bool sync)
+static inline u64 sb_bdev_nr_blocks(struct super_block *sb)
{
- unsigned int flag = sync ? BLK_RL_SYNCFULL : BLK_RL_ASYNCFULL;
+ return bdev_nr_sectors(sb->s_bdev) >>
+ (sb->s_blocksize_bits - SECTOR_SHIFT);
+}
- rl->flags &= ~flag;
+#ifdef CONFIG_BLK_DEV_ZONED
+static inline unsigned int disk_nr_zones(struct gendisk *disk)
+{
+ return disk->nr_zones;
}
-static inline bool rq_mergeable(struct request *rq)
+/**
+ * bio_needs_zone_write_plugging - Check if a BIO needs to be handled with zone
+ * write plugging
+ * @bio: The BIO being submitted
+ *
+ * Return true whenever @bio execution needs to be handled through zone
+ * write plugging (using blk_zone_plug_bio()). Return false otherwise.
+ */
+static inline bool bio_needs_zone_write_plugging(struct bio *bio)
{
- if (blk_rq_is_passthrough(rq))
- return false;
+ enum req_op op = bio_op(bio);
- if (req_op(rq) == REQ_OP_FLUSH)
+ /*
+ * Only zoned block devices have a zone write plug hash table. But not
+ * all of them have one (e.g. DM devices may not need one).
+ */
+ if (!bio->bi_bdev->bd_disk->zone_wplugs_hash)
return false;
- if (req_op(rq) == REQ_OP_WRITE_ZEROES)
+ /* Only write operations need zone write plugging. */
+ if (!op_is_write(op))
return false;
- if (rq->cmd_flags & REQ_NOMERGE_FLAGS)
+ /* Ignore empty flush */
+ if (op_is_flush(bio->bi_opf) && !bio_sectors(bio))
return false;
- if (rq->rq_flags & RQF_NOMERGE_FLAGS)
+
+ /* Ignore BIOs that already have been handled by zone write plugging. */
+ if (bio_flagged(bio, BIO_ZONE_WRITE_PLUGGING))
return false;
- return true;
+ /*
+ * All zone write operations must be handled through zone write plugging
+ * using blk_zone_plug_bio().
+ */
+ switch (op) {
+ case REQ_OP_ZONE_APPEND:
+ case REQ_OP_WRITE:
+ case REQ_OP_WRITE_ZEROES:
+ case REQ_OP_ZONE_FINISH:
+ case REQ_OP_ZONE_RESET:
+ case REQ_OP_ZONE_RESET_ALL:
+ return true;
+ default:
+ return false;
+ }
}
-static inline bool blk_write_same_mergeable(struct bio *a, struct bio *b)
+bool blk_zone_plug_bio(struct bio *bio, unsigned int nr_segs);
+
+/**
+ * disk_zone_capacity - returns the zone capacity of zone containing @sector
+ * @disk: disk to work with
+ * @sector: sector number within the querying zone
+ *
+ * Returns the zone capacity of a zone containing @sector. @sector can be any
+ * sector in the zone.
+ */
+static inline unsigned int disk_zone_capacity(struct gendisk *disk,
+ sector_t sector)
{
- if (bio_page(a) == bio_page(b) &&
- bio_offset(a) == bio_offset(b))
- return true;
+ sector_t zone_sectors = disk->queue->limits.chunk_sectors;
- return false;
+ if (sector + zone_sectors >= get_capacity(disk))
+ return disk->last_zone_capacity;
+ return disk->zone_capacity;
}
-
-static inline unsigned int blk_queue_depth(struct request_queue *q)
+static inline unsigned int bdev_zone_capacity(struct block_device *bdev,
+ sector_t pos)
{
- if (q->queue_depth)
- return q->queue_depth;
-
- return q->nr_requests;
+ return disk_zone_capacity(bdev->bd_disk, pos);
}
-/*
- * q->prep_rq_fn return values
- */
-enum {
- BLKPREP_OK, /* serve it */
- BLKPREP_KILL, /* fatal error, kill, return -EIO */
- BLKPREP_DEFER, /* leave on queue */
- BLKPREP_INVALID, /* invalid command, kill, return -EREMOTEIO */
-};
-
-extern unsigned long blk_max_low_pfn, blk_max_pfn;
-
-/*
- * standard bounce addresses:
- *
- * BLK_BOUNCE_HIGH : bounce all highmem pages
- * BLK_BOUNCE_ANY : don't bounce anything
- * BLK_BOUNCE_ISA : bounce pages above ISA DMA boundary
- */
+bool bdev_zone_is_seq(struct block_device *bdev, sector_t sector);
-#if BITS_PER_LONG == 32
-#define BLK_BOUNCE_HIGH ((u64)blk_max_low_pfn << PAGE_SHIFT)
-#else
-#define BLK_BOUNCE_HIGH -1ULL
-#endif
-#define BLK_BOUNCE_ANY (-1ULL)
-#define BLK_BOUNCE_ISA (DMA_BIT_MASK(24))
+#else /* CONFIG_BLK_DEV_ZONED */
+static inline unsigned int disk_nr_zones(struct gendisk *disk)
+{
+ return 0;
+}
-/*
- * default timeout for SG_IO if none specified
- */
-#define BLK_DEFAULT_SG_TIMEOUT (60 * HZ)
-#define BLK_MIN_SG_TIMEOUT (7 * HZ)
+static inline bool bdev_zone_is_seq(struct block_device *bdev, sector_t sector)
+{
+ return false;
+}
-struct rq_map_data {
- struct page **pages;
- int page_order;
- int nr_entries;
- unsigned long offset;
- int null_mapped;
- int from_user;
-};
+static inline bool bio_needs_zone_write_plugging(struct bio *bio)
+{
+ return false;
+}
-struct req_iterator {
- struct bvec_iter iter;
- struct bio *bio;
-};
+static inline bool blk_zone_plug_bio(struct bio *bio, unsigned int nr_segs)
+{
+ return false;
+}
+#endif /* CONFIG_BLK_DEV_ZONED */
-/* This should not be used directly - use rq_for_each_segment */
-#define for_each_bio(_bio) \
- for (; _bio; _bio = _bio->bi_next)
-#define __rq_for_each_bio(_bio, rq) \
- if ((rq->bio)) \
- for (_bio = (rq)->bio; _bio; _bio = _bio->bi_next)
+static inline unsigned int bdev_nr_zones(struct block_device *bdev)
+{
+ return disk_nr_zones(bdev->bd_disk);
+}
-#define rq_for_each_segment(bvl, _rq, _iter) \
- __rq_for_each_bio(_iter.bio, _rq) \
- bio_for_each_segment(bvl, _iter.bio, _iter.iter)
+int bdev_disk_changed(struct gendisk *disk, bool invalidate);
-#define rq_iter_last(bvec, _iter) \
- (_iter.bio->bi_next == NULL && \
- bio_iter_last(bvec, _iter.iter))
+void put_disk(struct gendisk *disk);
+struct gendisk *__blk_alloc_disk(struct queue_limits *lim, int node,
+ struct lock_class_key *lkclass);
-#ifndef ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE
-# error "You should define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE for your platform"
-#endif
-#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE
-extern void rq_flush_dcache_pages(struct request *rq);
+/**
+ * blk_alloc_disk - allocate a gendisk structure
+ * @lim: queue limits to be used for this disk.
+ * @node_id: numa node to allocate on
+ *
+ * Allocate and pre-initialize a gendisk structure for use with BIO based
+ * drivers.
+ *
+ * Returns an ERR_PTR on error, else the allocated disk.
+ *
+ * Context: can sleep
+ */
+#define blk_alloc_disk(lim, node_id) \
+({ \
+ static struct lock_class_key __key; \
+ \
+ __blk_alloc_disk(lim, node_id, &__key); \
+})
+
+int __register_blkdev(unsigned int major, const char *name,
+ void (*probe)(dev_t devt));
+#define register_blkdev(major, name) \
+ __register_blkdev(major, name, NULL)
+void unregister_blkdev(unsigned int major, const char *name);
+
+bool disk_check_media_change(struct gendisk *disk);
+void set_capacity(struct gendisk *disk, sector_t size);
+
+#ifdef CONFIG_BLOCK_HOLDER_DEPRECATED
+int bd_link_disk_holder(struct block_device *bdev, struct gendisk *disk);
+void bd_unlink_disk_holder(struct block_device *bdev, struct gendisk *disk);
#else
-static inline void rq_flush_dcache_pages(struct request *rq)
+static inline int bd_link_disk_holder(struct block_device *bdev,
+ struct gendisk *disk)
{
+ return 0;
}
-#endif
+static inline void bd_unlink_disk_holder(struct block_device *bdev,
+ struct gendisk *disk)
+{
+}
+#endif /* CONFIG_BLOCK_HOLDER_DEPRECATED */
-#ifdef CONFIG_PRINTK
-#define vfs_msg(sb, level, fmt, ...) \
- __vfs_msg(sb, level, fmt, ##__VA_ARGS__)
-#else
-#define vfs_msg(sb, level, fmt, ...) \
-do { \
- no_printk(fmt, ##__VA_ARGS__); \
- __vfs_msg(sb, "", " "); \
-} while (0)
-#endif
+dev_t part_devt(struct gendisk *disk, u8 partno);
+void inc_diskseq(struct gendisk *disk);
+void blk_request_module(dev_t devt);
extern int blk_register_queue(struct gendisk *disk);
extern void blk_unregister_queue(struct gendisk *disk);
-extern blk_qc_t generic_make_request(struct bio *bio);
-extern void blk_rq_init(struct request_queue *q, struct request *rq);
-extern void blk_init_request_from_bio(struct request *req, struct bio *bio);
-extern void blk_put_request(struct request *);
-extern void __blk_put_request(struct request_queue *, struct request *);
-extern struct request *blk_get_request(struct request_queue *, unsigned int op,
- gfp_t gfp_mask);
-extern void blk_requeue_request(struct request_queue *, struct request *);
+void submit_bio_noacct(struct bio *bio);
+struct bio *bio_split_to_limits(struct bio *bio);
+struct bio *bio_submit_split_bioset(struct bio *bio, unsigned int split_sectors,
+ struct bio_set *bs);
+
extern int blk_lld_busy(struct request_queue *q);
-extern int blk_rq_prep_clone(struct request *rq, struct request *rq_src,
- struct bio_set *bs, gfp_t gfp_mask,
- int (*bio_ctr)(struct bio *, struct bio *, void *),
- void *data);
-extern void blk_rq_unprep_clone(struct request *rq);
-extern blk_status_t blk_insert_cloned_request(struct request_queue *q,
- struct request *rq);
-extern int blk_rq_append_bio(struct request *rq, struct bio *bio);
-extern void blk_delay_queue(struct request_queue *, unsigned long);
-extern void blk_queue_split(struct request_queue *, struct bio **);
-extern void blk_recount_segments(struct request_queue *, struct bio *);
-extern int scsi_verify_blk_ioctl(struct block_device *, unsigned int);
-extern int scsi_cmd_blk_ioctl(struct block_device *, fmode_t,
- unsigned int, void __user *);
-extern int scsi_cmd_ioctl(struct request_queue *, struct gendisk *, fmode_t,
- unsigned int, void __user *);
-extern int sg_scsi_ioctl(struct request_queue *, struct gendisk *, fmode_t,
- struct scsi_ioctl_command __user *);
-
-extern int blk_queue_enter(struct request_queue *q, bool nowait);
+extern int blk_queue_enter(struct request_queue *q, blk_mq_req_flags_t flags);
extern void blk_queue_exit(struct request_queue *q);
-extern void blk_start_queue(struct request_queue *q);
-extern void blk_start_queue_async(struct request_queue *q);
-extern void blk_stop_queue(struct request_queue *q);
extern void blk_sync_queue(struct request_queue *q);
-extern void __blk_stop_queue(struct request_queue *q);
-extern void __blk_run_queue(struct request_queue *q);
-extern void __blk_run_queue_uncond(struct request_queue *q);
-extern void blk_run_queue(struct request_queue *);
-extern void blk_run_queue_async(struct request_queue *q);
-extern int blk_rq_map_user(struct request_queue *, struct request *,
- struct rq_map_data *, void __user *, unsigned long,
- gfp_t);
-extern int blk_rq_unmap_user(struct bio *);
-extern int blk_rq_map_kern(struct request_queue *, struct request *, void *, unsigned int, gfp_t);
-extern int blk_rq_map_user_iov(struct request_queue *, struct request *,
- struct rq_map_data *, const struct iov_iter *,
- gfp_t);
-extern void blk_execute_rq(struct request_queue *, struct gendisk *,
- struct request *, int);
-extern void blk_execute_rq_nowait(struct request_queue *, struct gendisk *,
- struct request *, int, rq_end_io_fn *);
+
+/* Helper to convert REQ_OP_XXX to its string format XXX */
+extern const char *blk_op_str(enum req_op op);
int blk_status_to_errno(blk_status_t status);
blk_status_t errno_to_blk_status(int errno);
+const char *blk_status_to_str(blk_status_t status);
-bool blk_mq_poll(struct request_queue *q, blk_qc_t cookie);
+/* only poll the hardware once, don't continue until a completion was found */
+#define BLK_POLL_ONESHOT (1 << 0)
+int bio_poll(struct bio *bio, struct io_comp_batch *iob, unsigned int flags);
+int iocb_bio_iopoll(struct kiocb *kiocb, struct io_comp_batch *iob,
+ unsigned int flags);
static inline struct request_queue *bdev_get_queue(struct block_device *bdev)
{
- return bdev->bd_disk->queue; /* this is never NULL */
+ return bdev->bd_queue; /* this is never NULL */
}
-/*
- * blk_rq_pos() : the current sector
- * blk_rq_bytes() : bytes left in the entire request
- * blk_rq_cur_bytes() : bytes left in the current segment
- * blk_rq_err_bytes() : bytes left till the next error boundary
- * blk_rq_sectors() : sectors left in the entire request
- * blk_rq_cur_sectors() : sectors left in the current segment
- */
-static inline sector_t blk_rq_pos(const struct request *rq)
-{
- return rq->__sector;
-}
-
-static inline unsigned int blk_rq_bytes(const struct request *rq)
-{
- return rq->__data_len;
-}
-
-static inline int blk_rq_cur_bytes(const struct request *rq)
-{
- return rq->bio ? bio_cur_bytes(rq->bio) : 0;
-}
-
-extern unsigned int blk_rq_err_bytes(const struct request *rq);
+/* Helper to convert BLK_ZONE_ZONE_XXX to its string format XXX */
+const char *blk_zone_cond_str(enum blk_zone_cond zone_cond);
-static inline unsigned int blk_rq_sectors(const struct request *rq)
+static inline unsigned int bio_zone_no(struct bio *bio)
{
- return blk_rq_bytes(rq) >> 9;
+ return disk_zone_no(bio->bi_bdev->bd_disk, bio->bi_iter.bi_sector);
}
-static inline unsigned int blk_rq_cur_sectors(const struct request *rq)
+static inline bool bio_straddles_zones(struct bio *bio)
{
- return blk_rq_cur_bytes(rq) >> 9;
+ return bio_sectors(bio) &&
+ bio_zone_no(bio) !=
+ disk_zone_no(bio->bi_bdev->bd_disk, bio_end_sector(bio) - 1);
}
/*
- * Some commands like WRITE SAME have a payload or data transfer size which
- * is different from the size of the request. Any driver that supports such
- * commands using the RQF_SPECIAL_PAYLOAD flag needs to use this helper to
- * calculate the data transfer size.
+ * Return how much within the boundary is left to be used for I/O at a given
+ * offset.
*/
-static inline unsigned int blk_rq_payload_bytes(struct request *rq)
+static inline unsigned int blk_boundary_sectors_left(sector_t offset,
+ unsigned int boundary_sectors)
{
- if (rq->rq_flags & RQF_SPECIAL_PAYLOAD)
- return rq->special_vec.bv_len;
- return blk_rq_bytes(rq);
+ if (unlikely(!is_power_of_2(boundary_sectors)))
+ return boundary_sectors - sector_div(offset, boundary_sectors);
+ return boundary_sectors - (offset & (boundary_sectors - 1));
}
-static inline unsigned int blk_queue_get_max_sectors(struct request_queue *q,
- int op)
+/**
+ * queue_limits_start_update - start an atomic update of queue limits
+ * @q: queue to update
+ *
+ * This functions starts an atomic update of the queue limits. It takes a lock
+ * to prevent other updates and returns a snapshot of the current limits that
+ * the caller can modify. The caller must call queue_limits_commit_update()
+ * to finish the update.
+ *
+ * Context: process context.
+ */
+static inline struct queue_limits
+queue_limits_start_update(struct request_queue *q)
+{
+ mutex_lock(&q->limits_lock);
+ return q->limits;
+}
+int queue_limits_commit_update_frozen(struct request_queue *q,
+ struct queue_limits *lim);
+int queue_limits_commit_update(struct request_queue *q,
+ struct queue_limits *lim);
+int queue_limits_set(struct request_queue *q, struct queue_limits *lim);
+int blk_validate_limits(struct queue_limits *lim);
+
+/**
+ * queue_limits_cancel_update - cancel an atomic update of queue limits
+ * @q: queue to update
+ *
+ * This functions cancels an atomic update of the queue limits started by
+ * queue_limits_start_update() and should be used when an error occurs after
+ * starting update.
+ */
+static inline void queue_limits_cancel_update(struct request_queue *q)
{
- if (unlikely(op == REQ_OP_DISCARD || op == REQ_OP_SECURE_ERASE))
- return min(q->limits.max_discard_sectors, UINT_MAX >> 9);
-
- if (unlikely(op == REQ_OP_WRITE_SAME))
- return q->limits.max_write_same_sectors;
-
- if (unlikely(op == REQ_OP_WRITE_ZEROES))
- return q->limits.max_write_zeroes_sectors;
-
- return q->limits.max_sectors;
+ mutex_unlock(&q->limits_lock);
}
/*
- * Return maximum size of a request at given offset. Only valid for
- * file system requests.
+ * These helpers are for drivers that have sloppy feature negotiation and might
+ * have to disable DISCARD, WRITE_ZEROES or SECURE_DISCARD from the I/O
+ * completion handler when the device returned an indicator that the respective
+ * feature is not actually supported. They are racy and the driver needs to
+ * cope with that. Try to avoid this scheme if you can.
*/
-static inline unsigned int blk_max_size_offset(struct request_queue *q,
- sector_t offset)
+static inline void blk_queue_disable_discard(struct request_queue *q)
{
- if (!q->limits.chunk_sectors)
- return q->limits.max_sectors;
-
- return q->limits.chunk_sectors -
- (offset & (q->limits.chunk_sectors - 1));
+ q->limits.max_discard_sectors = 0;
}
-static inline unsigned int blk_rq_get_max_sectors(struct request *rq,
- sector_t offset)
+static inline void blk_queue_disable_secure_erase(struct request_queue *q)
{
- struct request_queue *q = rq->q;
-
- if (blk_rq_is_passthrough(rq))
- return q->limits.max_hw_sectors;
-
- if (!q->limits.chunk_sectors ||
- req_op(rq) == REQ_OP_DISCARD ||
- req_op(rq) == REQ_OP_SECURE_ERASE)
- return blk_queue_get_max_sectors(q, req_op(rq));
-
- return min(blk_max_size_offset(q, offset),
- blk_queue_get_max_sectors(q, req_op(rq)));
+ q->limits.max_secure_erase_sectors = 0;
}
-static inline unsigned int blk_rq_count_bios(struct request *rq)
+static inline void blk_queue_disable_write_zeroes(struct request_queue *q)
{
- unsigned int nr_bios = 0;
- struct bio *bio;
-
- __rq_for_each_bio(bio, rq)
- nr_bios++;
-
- return nr_bios;
+ q->limits.max_write_zeroes_sectors = 0;
+ q->limits.max_wzeroes_unmap_sectors = 0;
}
/*
- * Request issue related functions.
- */
-extern struct request *blk_peek_request(struct request_queue *q);
-extern void blk_start_request(struct request *rq);
-extern struct request *blk_fetch_request(struct request_queue *q);
-
-/*
- * Request completion related functions.
- *
- * blk_update_request() completes given number of bytes and updates
- * the request without completing it.
- *
- * blk_end_request() and friends. __blk_end_request() must be called
- * with the request queue spinlock acquired.
- *
- * Several drivers define their own end_request and call
- * blk_end_request() for parts of the original function.
- * This prevents code duplication in drivers.
- */
-extern bool blk_update_request(struct request *rq, blk_status_t error,
- unsigned int nr_bytes);
-extern void blk_finish_request(struct request *rq, blk_status_t error);
-extern bool blk_end_request(struct request *rq, blk_status_t error,
- unsigned int nr_bytes);
-extern void blk_end_request_all(struct request *rq, blk_status_t error);
-extern bool __blk_end_request(struct request *rq, blk_status_t error,
- unsigned int nr_bytes);
-extern void __blk_end_request_all(struct request *rq, blk_status_t error);
-extern bool __blk_end_request_cur(struct request *rq, blk_status_t error);
-
-extern void blk_complete_request(struct request *);
-extern void __blk_complete_request(struct request *);
-extern void blk_abort_request(struct request *);
-extern void blk_unprep_request(struct request *);
-
-/*
* Access functions for manipulating queue properties
*/
-extern struct request_queue *blk_init_queue_node(request_fn_proc *rfn,
- spinlock_t *lock, int node_id);
-extern struct request_queue *blk_init_queue(request_fn_proc *, spinlock_t *);
-extern int blk_init_allocated_queue(struct request_queue *);
-extern void blk_cleanup_queue(struct request_queue *);
-extern void blk_queue_make_request(struct request_queue *, make_request_fn *);
-extern void blk_queue_bounce_limit(struct request_queue *, u64);
-extern void blk_queue_max_hw_sectors(struct request_queue *, unsigned int);
-extern void blk_queue_chunk_sectors(struct request_queue *, unsigned int);
-extern void blk_queue_max_segments(struct request_queue *, unsigned short);
-extern void blk_queue_max_discard_segments(struct request_queue *,
- unsigned short);
-extern void blk_queue_max_segment_size(struct request_queue *, unsigned int);
-extern void blk_queue_max_discard_sectors(struct request_queue *q,
- unsigned int max_discard_sectors);
-extern void blk_queue_max_write_same_sectors(struct request_queue *q,
- unsigned int max_write_same_sectors);
-extern void blk_queue_max_write_zeroes_sectors(struct request_queue *q,
- unsigned int max_write_same_sectors);
-extern void blk_queue_logical_block_size(struct request_queue *, unsigned short);
-extern void blk_queue_physical_block_size(struct request_queue *, unsigned int);
-extern void blk_queue_alignment_offset(struct request_queue *q,
- unsigned int alignment);
-extern void blk_limits_io_min(struct queue_limits *limits, unsigned int min);
-extern void blk_queue_io_min(struct request_queue *q, unsigned int min);
-extern void blk_limits_io_opt(struct queue_limits *limits, unsigned int opt);
-extern void blk_queue_io_opt(struct request_queue *q, unsigned int opt);
extern void blk_set_queue_depth(struct request_queue *q, unsigned int depth);
-extern void blk_set_default_limits(struct queue_limits *lim);
extern void blk_set_stacking_limits(struct queue_limits *lim);
extern int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
sector_t offset);
-extern int bdev_stack_limits(struct queue_limits *t, struct block_device *bdev,
- sector_t offset);
-extern void disk_stack_limits(struct gendisk *disk, struct block_device *bdev,
- sector_t offset);
-extern void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b);
-extern void blk_queue_dma_pad(struct request_queue *, unsigned int);
-extern void blk_queue_update_dma_pad(struct request_queue *, unsigned int);
-extern int blk_queue_dma_drain(struct request_queue *q,
- dma_drain_needed_fn *dma_drain_needed,
- void *buf, unsigned int size);
-extern void blk_queue_lld_busy(struct request_queue *q, lld_busy_fn *fn);
-extern void blk_queue_segment_boundary(struct request_queue *, unsigned long);
-extern void blk_queue_virt_boundary(struct request_queue *, unsigned long);
-extern void blk_queue_prep_rq(struct request_queue *, prep_rq_fn *pfn);
-extern void blk_queue_unprep_rq(struct request_queue *, unprep_rq_fn *ufn);
-extern void blk_queue_dma_alignment(struct request_queue *, int);
-extern void blk_queue_update_dma_alignment(struct request_queue *, int);
-extern void blk_queue_softirq_done(struct request_queue *, softirq_done_fn *);
-extern void blk_queue_rq_timed_out(struct request_queue *, rq_timed_out_fn *);
+void queue_limits_stack_bdev(struct queue_limits *t, struct block_device *bdev,
+ sector_t offset, const char *pfx);
extern void blk_queue_rq_timeout(struct request_queue *, unsigned int);
-extern void blk_queue_flush_queueable(struct request_queue *q, bool queueable);
-extern void blk_queue_write_cache(struct request_queue *q, bool enabled, bool fua);
-
-/*
- * Number of physical segments as sent to the device.
- *
- * Normally this is the number of discontiguous data segments sent by the
- * submitter. But for data-less command like discard we might have no
- * actual data segments submitted, but the driver might have to add it's
- * own special payload. In that case we still return 1 here so that this
- * special payload will be mapped.
- */
-static inline unsigned short blk_rq_nr_phys_segments(struct request *rq)
-{
- if (rq->rq_flags & RQF_SPECIAL_PAYLOAD)
- return 1;
- return rq->nr_phys_segments;
-}
-
-/*
- * Number of discard segments (or ranges) the driver needs to fill in.
- * Each discard bio merged into a request is counted as one segment.
- */
-static inline unsigned short blk_rq_nr_discard_segments(struct request *rq)
-{
- return max_t(unsigned short, rq->nr_phys_segments, 1);
-}
-extern int blk_rq_map_sg(struct request_queue *, struct request *, struct scatterlist *);
-extern void blk_dump_rq_flags(struct request *, char *);
-extern long nr_blockdev_pages(void);
+struct blk_independent_access_ranges *
+disk_alloc_independent_access_ranges(struct gendisk *disk, int nr_ia_ranges);
+void disk_set_independent_access_ranges(struct gendisk *disk,
+ struct blk_independent_access_ranges *iars);
bool __must_check blk_get_queue(struct request_queue *);
-struct request_queue *blk_alloc_queue(gfp_t);
-struct request_queue *blk_alloc_queue_node(gfp_t, int);
extern void blk_put_queue(struct request_queue *);
-extern void blk_set_queue_dying(struct request_queue *);
-/*
- * block layer runtime pm functions
- */
-#ifdef CONFIG_PM
-extern void blk_pm_runtime_init(struct request_queue *q, struct device *dev);
-extern int blk_pre_runtime_suspend(struct request_queue *q);
-extern void blk_post_runtime_suspend(struct request_queue *q, int err);
-extern void blk_pre_runtime_resume(struct request_queue *q);
-extern void blk_post_runtime_resume(struct request_queue *q, int err);
-extern void blk_set_runtime_active(struct request_queue *q);
-#else
-static inline void blk_pm_runtime_init(struct request_queue *q,
- struct device *dev) {}
-static inline int blk_pre_runtime_suspend(struct request_queue *q)
-{
- return -ENOSYS;
-}
-static inline void blk_post_runtime_suspend(struct request_queue *q, int err) {}
-static inline void blk_pre_runtime_resume(struct request_queue *q) {}
-static inline void blk_post_runtime_resume(struct request_queue *q, int err) {}
-static inline void blk_set_runtime_active(struct request_queue *q) {}
-#endif
+void blk_mark_disk_dead(struct gendisk *disk);
+
+struct rq_list {
+ struct request *head;
+ struct request *tail;
+};
+#ifdef CONFIG_BLOCK
/*
* blk_plug permits building a queue of related requests by holding the I/O
* fragments for a short period. This allows merging of sequential requests
@@ -1263,17 +1166,24 @@ static inline void blk_set_runtime_active(struct request_queue *q) {}
* as the lock contention for request_queue lock is reduced.
*
* It is ok not to disable preemption when adding the request to the plug list
- * or when attempting a merge, because blk_schedule_flush_list() will only flush
- * the plug list when the task sleeps by itself. For details, please see
- * schedule() where blk_schedule_flush_plug() is called.
+ * or when attempting a merge. For details, please see schedule() where
+ * blk_flush_plug() is called.
*/
struct blk_plug {
- struct list_head list; /* requests */
- struct list_head mq_list; /* blk-mq requests */
+ struct rq_list mq_list; /* blk-mq requests */
+
+ /* if ios_left is > 1, we can batch tag/rq allocations */
+ struct rq_list cached_rqs;
+ u64 cur_ktime;
+ unsigned short nr_ios;
+
+ unsigned short rq_count;
+
+ bool multiple_queues;
+ bool has_elevator;
+
struct list_head cb_list; /* md requires an unplug callback */
};
-#define BLK_MAX_REQUEST_COUNT 16
-#define BLK_PLUG_FLUSH_SIZE (128 * 1024)
struct blk_plug_cb;
typedef void (*blk_plug_cb_fn)(struct blk_plug_cb *, bool);
@@ -1285,70 +1195,78 @@ struct blk_plug_cb {
extern struct blk_plug_cb *blk_check_plugged(blk_plug_cb_fn unplug,
void *data, int size);
extern void blk_start_plug(struct blk_plug *);
+extern void blk_start_plug_nr_ios(struct blk_plug *, unsigned short);
extern void blk_finish_plug(struct blk_plug *);
-extern void blk_flush_plug_list(struct blk_plug *, bool);
-static inline void blk_flush_plug(struct task_struct *tsk)
+void __blk_flush_plug(struct blk_plug *plug, bool from_schedule);
+static inline void blk_flush_plug(struct blk_plug *plug, bool async)
{
- struct blk_plug *plug = tsk->plug;
-
if (plug)
- blk_flush_plug_list(plug, false);
+ __blk_flush_plug(plug, async);
}
-static inline void blk_schedule_flush_plug(struct task_struct *tsk)
+/*
+ * tsk == current here
+ */
+static inline void blk_plug_invalidate_ts(struct task_struct *tsk)
{
struct blk_plug *plug = tsk->plug;
if (plug)
- blk_flush_plug_list(plug, true);
+ plug->cur_ktime = 0;
+ current->flags &= ~PF_BLOCK_TS;
}
-static inline bool blk_needs_flush_plug(struct task_struct *tsk)
+int blkdev_issue_flush(struct block_device *bdev);
+long nr_blockdev_pages(void);
+#else /* CONFIG_BLOCK */
+struct blk_plug {
+};
+
+static inline void blk_start_plug_nr_ios(struct blk_plug *plug,
+ unsigned short nr_ios)
{
- struct blk_plug *plug = tsk->plug;
+}
+
+static inline void blk_start_plug(struct blk_plug *plug)
+{
+}
- return plug &&
- (!list_empty(&plug->list) ||
- !list_empty(&plug->mq_list) ||
- !list_empty(&plug->cb_list));
+static inline void blk_finish_plug(struct blk_plug *plug)
+{
}
-/*
- * tag stuff
- */
-extern int blk_queue_start_tag(struct request_queue *, struct request *);
-extern struct request *blk_queue_find_tag(struct request_queue *, int);
-extern void blk_queue_end_tag(struct request_queue *, struct request *);
-extern int blk_queue_init_tags(struct request_queue *, int, struct blk_queue_tag *, int);
-extern void blk_queue_free_tags(struct request_queue *);
-extern int blk_queue_resize_tags(struct request_queue *, int);
-extern void blk_queue_invalidate_tags(struct request_queue *);
-extern struct blk_queue_tag *blk_init_tags(int, int);
-extern void blk_free_tags(struct blk_queue_tag *);
+static inline void blk_flush_plug(struct blk_plug *plug, bool async)
+{
+}
+
+static inline void blk_plug_invalidate_ts(struct task_struct *tsk)
+{
+}
-static inline struct request *blk_map_queue_find_tag(struct blk_queue_tag *bqt,
- int tag)
+static inline int blkdev_issue_flush(struct block_device *bdev)
{
- if (unlikely(bqt == NULL || tag >= bqt->real_max_depth))
- return NULL;
- return bqt->tag_index[tag];
+ return 0;
}
-extern int blkdev_issue_flush(struct block_device *, gfp_t, sector_t *);
-extern int blkdev_issue_write_same(struct block_device *bdev, sector_t sector,
- sector_t nr_sects, gfp_t gfp_mask, struct page *page);
+static inline long nr_blockdev_pages(void)
+{
+ return 0;
+}
+#endif /* CONFIG_BLOCK */
-#define BLKDEV_DISCARD_SECURE (1 << 0) /* issue a secure erase */
+extern void blk_io_schedule(void);
-extern int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
- sector_t nr_sects, gfp_t gfp_mask, unsigned long flags);
-extern int __blkdev_issue_discard(struct block_device *bdev, sector_t sector,
- sector_t nr_sects, gfp_t gfp_mask, int flags,
- struct bio **biop);
+int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
+ sector_t nr_sects, gfp_t gfp_mask);
+int __blkdev_issue_discard(struct block_device *bdev, sector_t sector,
+ sector_t nr_sects, gfp_t gfp_mask, struct bio **biop);
+int blkdev_issue_secure_erase(struct block_device *bdev, sector_t sector,
+ sector_t nr_sects, gfp_t gfp);
#define BLKDEV_ZERO_NOUNMAP (1 << 0) /* do not free blocks */
#define BLKDEV_ZERO_NOFALLBACK (1 << 1) /* don't write explicit zeroes */
+#define BLKDEV_ZERO_KILLABLE (1 << 2) /* interruptible by fatal signals */
extern int __blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
sector_t nr_sects, gfp_t gfp_mask, struct bio **biop,
@@ -1359,82 +1277,120 @@ extern int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
static inline int sb_issue_discard(struct super_block *sb, sector_t block,
sector_t nr_blocks, gfp_t gfp_mask, unsigned long flags)
{
- return blkdev_issue_discard(sb->s_bdev, block << (sb->s_blocksize_bits - 9),
- nr_blocks << (sb->s_blocksize_bits - 9),
- gfp_mask, flags);
+ return blkdev_issue_discard(sb->s_bdev,
+ block << (sb->s_blocksize_bits -
+ SECTOR_SHIFT),
+ nr_blocks << (sb->s_blocksize_bits -
+ SECTOR_SHIFT),
+ gfp_mask);
}
static inline int sb_issue_zeroout(struct super_block *sb, sector_t block,
sector_t nr_blocks, gfp_t gfp_mask)
{
return blkdev_issue_zeroout(sb->s_bdev,
- block << (sb->s_blocksize_bits - 9),
- nr_blocks << (sb->s_blocksize_bits - 9),
+ block << (sb->s_blocksize_bits -
+ SECTOR_SHIFT),
+ nr_blocks << (sb->s_blocksize_bits -
+ SECTOR_SHIFT),
gfp_mask, 0);
}
-extern int blk_verify_command(unsigned char *cmd, fmode_t has_write_perm);
+static inline bool bdev_is_partition(struct block_device *bdev)
+{
+ return bdev_partno(bdev) != 0;
+}
enum blk_default_limits {
BLK_MAX_SEGMENTS = 128,
BLK_SAFE_MAX_SECTORS = 255,
- BLK_DEF_MAX_SECTORS = 2560,
BLK_MAX_SEGMENT_SIZE = 65536,
BLK_SEG_BOUNDARY_MASK = 0xFFFFFFFFUL,
};
-#define blkdev_entry_to_request(entry) list_entry((entry), struct request, queuelist)
+static inline struct queue_limits *bdev_limits(struct block_device *bdev)
+{
+ return &bdev_get_queue(bdev)->limits;
+}
-static inline unsigned long queue_segment_boundary(struct request_queue *q)
+static inline unsigned long queue_segment_boundary(const struct request_queue *q)
{
return q->limits.seg_boundary_mask;
}
-static inline unsigned long queue_virt_boundary(struct request_queue *q)
+static inline unsigned long queue_virt_boundary(const struct request_queue *q)
{
return q->limits.virt_boundary_mask;
}
-static inline unsigned int queue_max_sectors(struct request_queue *q)
+static inline unsigned int queue_max_sectors(const struct request_queue *q)
{
return q->limits.max_sectors;
}
-static inline unsigned int queue_max_hw_sectors(struct request_queue *q)
+static inline unsigned int queue_max_bytes(struct request_queue *q)
+{
+ return min_t(unsigned int, queue_max_sectors(q), INT_MAX >> 9) << 9;
+}
+
+static inline unsigned int queue_max_hw_sectors(const struct request_queue *q)
{
return q->limits.max_hw_sectors;
}
-static inline unsigned short queue_max_segments(struct request_queue *q)
+static inline unsigned short queue_max_segments(const struct request_queue *q)
{
return q->limits.max_segments;
}
-static inline unsigned short queue_max_discard_segments(struct request_queue *q)
+static inline unsigned short queue_max_discard_segments(const struct request_queue *q)
{
return q->limits.max_discard_segments;
}
-static inline unsigned int queue_max_segment_size(struct request_queue *q)
+static inline unsigned int queue_max_segment_size(const struct request_queue *q)
{
return q->limits.max_segment_size;
}
-static inline unsigned short queue_logical_block_size(struct request_queue *q)
+static inline bool queue_emulates_zone_append(struct request_queue *q)
+{
+ return blk_queue_is_zoned(q) && !q->limits.max_hw_zone_append_sectors;
+}
+
+static inline bool bdev_emulates_zone_append(struct block_device *bdev)
+{
+ return queue_emulates_zone_append(bdev_get_queue(bdev));
+}
+
+static inline unsigned int
+bdev_max_zone_append_sectors(struct block_device *bdev)
{
- int retval = 512;
+ return bdev_limits(bdev)->max_zone_append_sectors;
+}
- if (q && q->limits.logical_block_size)
- retval = q->limits.logical_block_size;
+static inline unsigned int bdev_max_segments(struct block_device *bdev)
+{
+ return queue_max_segments(bdev_get_queue(bdev));
+}
+
+static inline unsigned short bdev_max_write_streams(struct block_device *bdev)
+{
+ if (bdev_is_partition(bdev))
+ return 0;
+ return bdev_limits(bdev)->max_write_streams;
+}
- return retval;
+static inline unsigned queue_logical_block_size(const struct request_queue *q)
+{
+ return q->limits.logical_block_size;
}
-static inline unsigned short bdev_logical_block_size(struct block_device *bdev)
+static inline unsigned int bdev_logical_block_size(struct block_device *bdev)
{
return queue_logical_block_size(bdev_get_queue(bdev));
}
-static inline unsigned int queue_physical_block_size(struct request_queue *q)
+static inline unsigned int queue_physical_block_size(const struct request_queue *q)
{
return q->limits.physical_block_size;
}
@@ -1444,563 +1400,477 @@ static inline unsigned int bdev_physical_block_size(struct block_device *bdev)
return queue_physical_block_size(bdev_get_queue(bdev));
}
-static inline unsigned int queue_io_min(struct request_queue *q)
+static inline unsigned int queue_io_min(const struct request_queue *q)
{
return q->limits.io_min;
}
-static inline int bdev_io_min(struct block_device *bdev)
+static inline unsigned int bdev_io_min(struct block_device *bdev)
{
return queue_io_min(bdev_get_queue(bdev));
}
-static inline unsigned int queue_io_opt(struct request_queue *q)
+static inline unsigned int queue_io_opt(const struct request_queue *q)
{
return q->limits.io_opt;
}
-static inline int bdev_io_opt(struct block_device *bdev)
+static inline unsigned int bdev_io_opt(struct block_device *bdev)
{
return queue_io_opt(bdev_get_queue(bdev));
}
-static inline int queue_alignment_offset(struct request_queue *q)
+static inline unsigned int
+queue_zone_write_granularity(const struct request_queue *q)
{
- if (q->limits.misaligned)
- return -1;
-
- return q->limits.alignment_offset;
+ return q->limits.zone_write_granularity;
}
-static inline int queue_limit_alignment_offset(struct queue_limits *lim, sector_t sector)
+static inline unsigned int
+bdev_zone_write_granularity(struct block_device *bdev)
{
- unsigned int granularity = max(lim->physical_block_size, lim->io_min);
- unsigned int alignment = sector_div(sector, granularity >> 9) << 9;
-
- return (granularity + lim->alignment_offset - alignment) % granularity;
+ return queue_zone_write_granularity(bdev_get_queue(bdev));
}
-static inline int bdev_alignment_offset(struct block_device *bdev)
-{
- struct request_queue *q = bdev_get_queue(bdev);
-
- if (q->limits.misaligned)
- return -1;
+int bdev_alignment_offset(struct block_device *bdev);
+unsigned int bdev_discard_alignment(struct block_device *bdev);
- if (bdev != bdev->bd_contains)
- return bdev->bd_part->alignment_offset;
-
- return q->limits.alignment_offset;
+static inline unsigned int bdev_max_discard_sectors(struct block_device *bdev)
+{
+ return bdev_limits(bdev)->max_discard_sectors;
}
-static inline int queue_discard_alignment(struct request_queue *q)
+static inline unsigned int bdev_discard_granularity(struct block_device *bdev)
{
- if (q->limits.discard_misaligned)
- return -1;
-
- return q->limits.discard_alignment;
+ return bdev_limits(bdev)->discard_granularity;
}
-static inline int queue_limit_discard_alignment(struct queue_limits *lim, sector_t sector)
+static inline unsigned int
+bdev_max_secure_erase_sectors(struct block_device *bdev)
{
- unsigned int alignment, granularity, offset;
-
- if (!lim->max_discard_sectors)
- return 0;
-
- /* Why are these in bytes, not sectors? */
- alignment = lim->discard_alignment >> 9;
- granularity = lim->discard_granularity >> 9;
- if (!granularity)
- return 0;
-
- /* Offset of the partition start in 'granularity' sectors */
- offset = sector_div(sector, granularity);
-
- /* And why do we do this modulus *again* in blkdev_issue_discard()? */
- offset = (granularity + alignment - offset) % granularity;
-
- /* Turn it back into bytes, gaah */
- return offset << 9;
+ return bdev_limits(bdev)->max_secure_erase_sectors;
}
-static inline int bdev_discard_alignment(struct block_device *bdev)
+static inline unsigned int bdev_write_zeroes_sectors(struct block_device *bdev)
{
- struct request_queue *q = bdev_get_queue(bdev);
-
- if (bdev != bdev->bd_contains)
- return bdev->bd_part->discard_alignment;
-
- return q->limits.discard_alignment;
+ return bdev_limits(bdev)->max_write_zeroes_sectors;
}
-static inline unsigned int bdev_write_same(struct block_device *bdev)
+static inline unsigned int
+bdev_write_zeroes_unmap_sectors(struct block_device *bdev)
{
- struct request_queue *q = bdev_get_queue(bdev);
-
- if (q)
- return q->limits.max_write_same_sectors;
-
- return 0;
+ return bdev_limits(bdev)->max_wzeroes_unmap_sectors;
}
-static inline unsigned int bdev_write_zeroes_sectors(struct block_device *bdev)
+static inline bool bdev_nonrot(struct block_device *bdev)
{
- struct request_queue *q = bdev_get_queue(bdev);
-
- if (q)
- return q->limits.max_write_zeroes_sectors;
-
- return 0;
+ return blk_queue_nonrot(bdev_get_queue(bdev));
}
-static inline enum blk_zoned_model bdev_zoned_model(struct block_device *bdev)
+static inline bool bdev_synchronous(struct block_device *bdev)
{
- struct request_queue *q = bdev_get_queue(bdev);
-
- if (q)
- return blk_queue_zoned_model(q);
-
- return BLK_ZONED_NONE;
+ return bdev->bd_disk->queue->limits.features & BLK_FEAT_SYNCHRONOUS;
}
-static inline bool bdev_is_zoned(struct block_device *bdev)
+static inline bool bdev_stable_writes(struct block_device *bdev)
{
struct request_queue *q = bdev_get_queue(bdev);
- if (q)
- return blk_queue_is_zoned(q);
-
- return false;
+ if (IS_ENABLED(CONFIG_BLK_DEV_INTEGRITY) &&
+ q->limits.integrity.csum_type != BLK_INTEGRITY_CSUM_NONE)
+ return true;
+ return q->limits.features & BLK_FEAT_STABLE_WRITES;
}
-static inline unsigned int bdev_zone_sectors(struct block_device *bdev)
+static inline bool blk_queue_write_cache(struct request_queue *q)
{
- struct request_queue *q = bdev_get_queue(bdev);
-
- if (q)
- return blk_queue_zone_sectors(q);
-
- return 0;
+ return (q->limits.features & BLK_FEAT_WRITE_CACHE) &&
+ !(q->limits.flags & BLK_FLAG_WRITE_CACHE_DISABLED);
}
-static inline int queue_dma_alignment(struct request_queue *q)
+static inline bool bdev_write_cache(struct block_device *bdev)
{
- return q ? q->dma_alignment : 511;
+ return blk_queue_write_cache(bdev_get_queue(bdev));
}
-static inline int blk_rq_aligned(struct request_queue *q, unsigned long addr,
- unsigned int len)
+static inline bool bdev_fua(struct block_device *bdev)
{
- unsigned int alignment = queue_dma_alignment(q) | q->dma_pad_mask;
- return !(addr & alignment) && !(len & alignment);
+ return bdev_limits(bdev)->features & BLK_FEAT_FUA;
}
-/* assumes size > 256 */
-static inline unsigned int blksize_bits(unsigned int size)
+static inline bool bdev_nowait(struct block_device *bdev)
{
- unsigned int bits = 8;
- do {
- bits++;
- size >>= 1;
- } while (size > 256);
- return bits;
+ return bdev->bd_disk->queue->limits.features & BLK_FEAT_NOWAIT;
}
-static inline unsigned int block_size(struct block_device *bdev)
+static inline bool bdev_is_zoned(struct block_device *bdev)
{
- return bdev->bd_block_size;
+ return blk_queue_is_zoned(bdev_get_queue(bdev));
}
-static inline bool queue_flush_queueable(struct request_queue *q)
+static inline unsigned int bdev_zone_no(struct block_device *bdev, sector_t sec)
{
- return !test_bit(QUEUE_FLAG_FLUSH_NQ, &q->queue_flags);
+ return disk_zone_no(bdev->bd_disk, sec);
}
-typedef struct {struct page *v;} Sector;
+static inline sector_t bdev_zone_sectors(struct block_device *bdev)
+{
+ struct request_queue *q = bdev_get_queue(bdev);
-unsigned char *read_dev_sector(struct block_device *, sector_t, Sector *);
+ if (!blk_queue_is_zoned(q))
+ return 0;
+ return q->limits.chunk_sectors;
+}
-static inline void put_dev_sector(Sector p)
+static inline sector_t bdev_zone_start(struct block_device *bdev,
+ sector_t sector)
{
- put_page(p.v);
+ return sector & ~(bdev_zone_sectors(bdev) - 1);
}
-static inline bool __bvec_gap_to_prev(struct request_queue *q,
- struct bio_vec *bprv, unsigned int offset)
+static inline sector_t bdev_offset_from_zone_start(struct block_device *bdev,
+ sector_t sector)
{
- return offset ||
- ((bprv->bv_offset + bprv->bv_len) & queue_virt_boundary(q));
+ return sector & (bdev_zone_sectors(bdev) - 1);
}
-/*
- * Check if adding a bio_vec after bprv with offset would create a gap in
- * the SG list. Most drivers don't care about this, but some do.
- */
-static inline bool bvec_gap_to_prev(struct request_queue *q,
- struct bio_vec *bprv, unsigned int offset)
+static inline sector_t bio_offset_from_zone_start(struct bio *bio)
{
- if (!queue_virt_boundary(q))
- return false;
- return __bvec_gap_to_prev(q, bprv, offset);
+ return bdev_offset_from_zone_start(bio->bi_bdev,
+ bio->bi_iter.bi_sector);
}
-/*
- * Check if the two bvecs from two bios can be merged to one segment.
- * If yes, no need to check gap between the two bios since the 1st bio
- * and the 1st bvec in the 2nd bio can be handled in one segment.
- */
-static inline bool bios_segs_mergeable(struct request_queue *q,
- struct bio *prev, struct bio_vec *prev_last_bv,
- struct bio_vec *next_first_bv)
+static inline bool bdev_is_zone_start(struct block_device *bdev,
+ sector_t sector)
{
- if (!BIOVEC_PHYS_MERGEABLE(prev_last_bv, next_first_bv))
- return false;
- if (!BIOVEC_SEG_BOUNDARY(q, prev_last_bv, next_first_bv))
- return false;
- if (prev->bi_seg_back_size + next_first_bv->bv_len >
- queue_max_segment_size(q))
- return false;
- return true;
+ return bdev_offset_from_zone_start(bdev, sector) == 0;
}
-static inline bool bio_will_gap(struct request_queue *q,
- struct request *prev_rq,
- struct bio *prev,
- struct bio *next)
-{
- if (bio_has_data(prev) && queue_virt_boundary(q)) {
- struct bio_vec pb, nb;
-
- /*
- * don't merge if the 1st bio starts with non-zero
- * offset, otherwise it is quite difficult to respect
- * sg gap limit. We work hard to merge a huge number of small
- * single bios in case of mkfs.
- */
- if (prev_rq)
- bio_get_first_bvec(prev_rq->bio, &pb);
- else
- bio_get_first_bvec(prev, &pb);
- if (pb.bv_offset)
- return true;
-
- /*
- * We don't need to worry about the situation that the
- * merged segment ends in unaligned virt boundary:
- *
- * - if 'pb' ends aligned, the merged segment ends aligned
- * - if 'pb' ends unaligned, the next bio must include
- * one single bvec of 'nb', otherwise the 'nb' can't
- * merge with 'pb'
- */
- bio_get_last_bvec(prev, &pb);
- bio_get_first_bvec(next, &nb);
-
- if (!bios_segs_mergeable(q, prev, &pb, &nb))
- return __bvec_gap_to_prev(q, &pb, nb.bv_offset);
- }
-
- return false;
+/* Check whether @sector is a multiple of the zone size. */
+static inline bool bdev_is_zone_aligned(struct block_device *bdev,
+ sector_t sector)
+{
+ return bdev_is_zone_start(bdev, sector);
}
-static inline bool req_gap_back_merge(struct request *req, struct bio *bio)
+int blk_zone_issue_zeroout(struct block_device *bdev, sector_t sector,
+ sector_t nr_sects, gfp_t gfp_mask);
+
+static inline unsigned int queue_dma_alignment(const struct request_queue *q)
{
- return bio_will_gap(req->q, req, req->biotail, bio);
+ return q->limits.dma_alignment;
}
-static inline bool req_gap_front_merge(struct request *req, struct bio *bio)
+static inline unsigned int
+queue_atomic_write_unit_max_bytes(const struct request_queue *q)
{
- return bio_will_gap(req->q, NULL, bio, req->bio);
+ return q->limits.atomic_write_unit_max;
}
-int kblockd_schedule_work(struct work_struct *work);
-int kblockd_schedule_work_on(int cpu, struct work_struct *work);
-int kblockd_schedule_delayed_work(struct delayed_work *dwork, unsigned long delay);
-int kblockd_schedule_delayed_work_on(int cpu, struct delayed_work *dwork, unsigned long delay);
-int kblockd_mod_delayed_work_on(int cpu, struct delayed_work *dwork, unsigned long delay);
+static inline unsigned int
+queue_atomic_write_unit_min_bytes(const struct request_queue *q)
+{
+ return q->limits.atomic_write_unit_min;
+}
-#ifdef CONFIG_BLK_CGROUP
-/*
- * This should not be using sched_clock(). A real patch is in progress
- * to fix this up, until that is in place we need to disable preemption
- * around sched_clock() in this function and set_io_start_time_ns().
- */
-static inline void set_start_time_ns(struct request *req)
+static inline unsigned int
+queue_atomic_write_boundary_bytes(const struct request_queue *q)
{
- preempt_disable();
- req->start_time_ns = sched_clock();
- preempt_enable();
+ return q->limits.atomic_write_boundary_sectors << SECTOR_SHIFT;
}
-static inline void set_io_start_time_ns(struct request *req)
+static inline unsigned int
+queue_atomic_write_max_bytes(const struct request_queue *q)
{
- preempt_disable();
- req->io_start_time_ns = sched_clock();
- preempt_enable();
+ return q->limits.atomic_write_max_sectors << SECTOR_SHIFT;
}
-static inline uint64_t rq_start_time_ns(struct request *req)
+static inline unsigned int bdev_dma_alignment(struct block_device *bdev)
{
- return req->start_time_ns;
+ return queue_dma_alignment(bdev_get_queue(bdev));
}
-static inline uint64_t rq_io_start_time_ns(struct request *req)
+static inline unsigned int
+blk_lim_dma_alignment_and_pad(struct queue_limits *lim)
{
- return req->io_start_time_ns;
+ return lim->dma_alignment | lim->dma_pad_mask;
}
-#else
-static inline void set_start_time_ns(struct request *req) {}
-static inline void set_io_start_time_ns(struct request *req) {}
-static inline uint64_t rq_start_time_ns(struct request *req)
+
+static inline bool blk_rq_aligned(struct request_queue *q, unsigned long addr,
+ unsigned int len)
{
- return 0;
+ unsigned int alignment = blk_lim_dma_alignment_and_pad(&q->limits);
+
+ return !(addr & alignment) && !(len & alignment);
}
-static inline uint64_t rq_io_start_time_ns(struct request *req)
+
+/* assumes size > 256 */
+static inline unsigned int blksize_bits(unsigned int size)
{
- return 0;
+ return order_base_2(size >> SECTOR_SHIFT) + SECTOR_SHIFT;
}
-#endif
+
+int kblockd_schedule_work(struct work_struct *work);
+int kblockd_mod_delayed_work_on(int cpu, struct delayed_work *dwork, unsigned long delay);
#define MODULE_ALIAS_BLOCKDEV(major,minor) \
MODULE_ALIAS("block-major-" __stringify(major) "-" __stringify(minor))
#define MODULE_ALIAS_BLOCKDEV_MAJOR(major) \
MODULE_ALIAS("block-major-" __stringify(major) "-*")
-#if defined(CONFIG_BLK_DEV_INTEGRITY)
+#ifdef CONFIG_BLK_INLINE_ENCRYPTION
-enum blk_integrity_flags {
- BLK_INTEGRITY_VERIFY = 1 << 0,
- BLK_INTEGRITY_GENERATE = 1 << 1,
- BLK_INTEGRITY_DEVICE_CAPABLE = 1 << 2,
- BLK_INTEGRITY_IP_CHECKSUM = 1 << 3,
-};
+bool blk_crypto_register(struct blk_crypto_profile *profile,
+ struct request_queue *q);
+
+#else /* CONFIG_BLK_INLINE_ENCRYPTION */
+
+static inline bool blk_crypto_register(struct blk_crypto_profile *profile,
+ struct request_queue *q)
+{
+ return true;
+}
+
+#endif /* CONFIG_BLK_INLINE_ENCRYPTION */
-struct blk_integrity_iter {
- void *prot_buf;
- void *data_buf;
- sector_t seed;
- unsigned int data_size;
- unsigned short interval;
- const char *disk_name;
+enum blk_unique_id {
+ /* these match the Designator Types specified in SPC */
+ BLK_UID_T10 = 1,
+ BLK_UID_EUI64 = 2,
+ BLK_UID_NAA = 3,
};
-typedef blk_status_t (integrity_processing_fn) (struct blk_integrity_iter *);
+struct block_device_operations {
+ void (*submit_bio)(struct bio *bio);
+ int (*poll_bio)(struct bio *bio, struct io_comp_batch *iob,
+ unsigned int flags);
+ int (*open)(struct gendisk *disk, blk_mode_t mode);
+ void (*release)(struct gendisk *disk);
+ int (*ioctl)(struct block_device *bdev, blk_mode_t mode,
+ unsigned cmd, unsigned long arg);
+ int (*compat_ioctl)(struct block_device *bdev, blk_mode_t mode,
+ unsigned cmd, unsigned long arg);
+ unsigned int (*check_events) (struct gendisk *disk,
+ unsigned int clearing);
+ void (*unlock_native_capacity) (struct gendisk *);
+ int (*getgeo)(struct gendisk *, struct hd_geometry *);
+ int (*set_read_only)(struct block_device *bdev, bool ro);
+ void (*free_disk)(struct gendisk *disk);
+ /* this callback is with swap_lock and sometimes page table lock held */
+ void (*swap_slot_free_notify) (struct block_device *, unsigned long);
+ int (*report_zones)(struct gendisk *, sector_t sector,
+ unsigned int nr_zones,
+ struct blk_report_zones_args *args);
+ char *(*devnode)(struct gendisk *disk, umode_t *mode);
+ /* returns the length of the identifier or a negative errno: */
+ int (*get_unique_id)(struct gendisk *disk, u8 id[16],
+ enum blk_unique_id id_type);
+ struct module *owner;
+ const struct pr_ops *pr_ops;
-struct blk_integrity_profile {
- integrity_processing_fn *generate_fn;
- integrity_processing_fn *verify_fn;
- const char *name;
+ /*
+ * Special callback for probing GPT entry at a given sector.
+ * Needed by Android devices, used by GPT scanner and MMC blk
+ * driver.
+ */
+ int (*alternative_gpt_sector)(struct gendisk *disk, sector_t *sector);
};
-extern void blk_integrity_register(struct gendisk *, struct blk_integrity *);
-extern void blk_integrity_unregister(struct gendisk *);
-extern int blk_integrity_compare(struct gendisk *, struct gendisk *);
-extern int blk_rq_map_integrity_sg(struct request_queue *, struct bio *,
- struct scatterlist *);
-extern int blk_rq_count_integrity_sg(struct request_queue *, struct bio *);
-extern bool blk_integrity_merge_rq(struct request_queue *, struct request *,
- struct request *);
-extern bool blk_integrity_merge_bio(struct request_queue *, struct request *,
- struct bio *);
+#ifdef CONFIG_COMPAT
+extern int blkdev_compat_ptr_ioctl(struct block_device *, blk_mode_t,
+ unsigned int, unsigned long);
+#else
+#define blkdev_compat_ptr_ioctl NULL
+#endif
-static inline struct blk_integrity *blk_get_integrity(struct gendisk *disk)
+static inline void blk_wake_io_task(struct task_struct *waiter)
{
- struct blk_integrity *bi = &disk->queue->integrity;
+ /*
+ * If we're polling, the task itself is doing the completions. For
+ * that case, we don't need to signal a wakeup, it's enough to just
+ * mark us as RUNNING.
+ */
+ if (waiter == current)
+ __set_current_state(TASK_RUNNING);
+ else
+ wake_up_process(waiter);
+}
- if (!bi->profile)
- return NULL;
+unsigned long bdev_start_io_acct(struct block_device *bdev, enum req_op op,
+ unsigned long start_time);
+void bdev_end_io_acct(struct block_device *bdev, enum req_op op,
+ unsigned int sectors, unsigned long start_time);
- return bi;
-}
+unsigned long bio_start_io_acct(struct bio *bio);
+void bio_end_io_acct_remapped(struct bio *bio, unsigned long start_time,
+ struct block_device *orig_bdev);
-static inline
-struct blk_integrity *bdev_get_integrity(struct block_device *bdev)
+/**
+ * bio_end_io_acct - end I/O accounting for bio based drivers
+ * @bio: bio to end account for
+ * @start_time: start time returned by bio_start_io_acct()
+ */
+static inline void bio_end_io_acct(struct bio *bio, unsigned long start_time)
{
- return blk_get_integrity(bdev->bd_disk);
+ return bio_end_io_acct_remapped(bio, start_time, bio->bi_bdev);
}
-static inline bool blk_integrity_rq(struct request *rq)
-{
- return rq->cmd_flags & REQ_INTEGRITY;
-}
+int bdev_validate_blocksize(struct block_device *bdev, int block_size);
+int set_blocksize(struct file *file, int size);
-static inline void blk_queue_max_integrity_segments(struct request_queue *q,
- unsigned int segs)
-{
- q->limits.max_integrity_segments = segs;
-}
+int lookup_bdev(const char *pathname, dev_t *dev);
-static inline unsigned short
-queue_max_integrity_segments(struct request_queue *q)
-{
- return q->limits.max_integrity_segments;
-}
+void blkdev_show(struct seq_file *seqf, off_t offset);
-static inline bool integrity_req_gap_back_merge(struct request *req,
- struct bio *next)
-{
- struct bio_integrity_payload *bip = bio_integrity(req->bio);
- struct bio_integrity_payload *bip_next = bio_integrity(next);
+#define BDEVNAME_SIZE 32 /* Largest string for a blockdev identifier */
+#define BDEVT_SIZE 10 /* Largest string for MAJ:MIN for blkdev */
+#ifdef CONFIG_BLOCK
+#define BLKDEV_MAJOR_MAX 512
+#else
+#define BLKDEV_MAJOR_MAX 0
+#endif
- return bvec_gap_to_prev(req->q, &bip->bip_vec[bip->bip_vcnt - 1],
- bip_next->bip_vec[0].bv_offset);
-}
+struct blk_holder_ops {
+ void (*mark_dead)(struct block_device *bdev, bool surprise);
-static inline bool integrity_req_gap_front_merge(struct request *req,
- struct bio *bio)
-{
- struct bio_integrity_payload *bip = bio_integrity(bio);
- struct bio_integrity_payload *bip_next = bio_integrity(req->bio);
+ /*
+ * Sync the file system mounted on the block device.
+ */
+ void (*sync)(struct block_device *bdev);
- return bvec_gap_to_prev(req->q, &bip->bip_vec[bip->bip_vcnt - 1],
- bip_next->bip_vec[0].bv_offset);
-}
+ /*
+ * Freeze the file system mounted on the block device.
+ */
+ int (*freeze)(struct block_device *bdev);
-#else /* CONFIG_BLK_DEV_INTEGRITY */
+ /*
+ * Thaw the file system mounted on the block device.
+ */
+ int (*thaw)(struct block_device *bdev);
+};
-struct bio;
-struct block_device;
-struct gendisk;
-struct blk_integrity;
+/*
+ * For filesystems using @fs_holder_ops, the @holder argument passed to
+ * helpers used to open and claim block devices via
+ * bd_prepare_to_claim() must point to a superblock.
+ */
+extern const struct blk_holder_ops fs_holder_ops;
-static inline int blk_integrity_rq(struct request *rq)
-{
- return 0;
-}
-static inline int blk_rq_count_integrity_sg(struct request_queue *q,
- struct bio *b)
-{
- return 0;
-}
-static inline int blk_rq_map_integrity_sg(struct request_queue *q,
- struct bio *b,
- struct scatterlist *s)
-{
- return 0;
-}
-static inline struct blk_integrity *bdev_get_integrity(struct block_device *b)
-{
- return NULL;
-}
-static inline struct blk_integrity *blk_get_integrity(struct gendisk *disk)
+/*
+ * Return the correct open flags for blkdev_get_by_* for super block flags
+ * as stored in sb->s_flags.
+ */
+#define sb_open_mode(flags) \
+ (BLK_OPEN_READ | BLK_OPEN_RESTRICT_WRITES | \
+ (((flags) & SB_RDONLY) ? 0 : BLK_OPEN_WRITE))
+
+struct file *bdev_file_open_by_dev(dev_t dev, blk_mode_t mode, void *holder,
+ const struct blk_holder_ops *hops);
+struct file *bdev_file_open_by_path(const char *path, blk_mode_t mode,
+ void *holder, const struct blk_holder_ops *hops);
+int bd_prepare_to_claim(struct block_device *bdev, void *holder,
+ const struct blk_holder_ops *hops);
+void bd_abort_claiming(struct block_device *bdev, void *holder);
+
+struct block_device *I_BDEV(struct inode *inode);
+struct block_device *file_bdev(struct file *bdev_file);
+bool disk_live(struct gendisk *disk);
+unsigned int block_size(struct block_device *bdev);
+
+#ifdef CONFIG_BLOCK
+void invalidate_bdev(struct block_device *bdev);
+int sync_blockdev(struct block_device *bdev);
+int sync_blockdev_range(struct block_device *bdev, loff_t lstart, loff_t lend);
+int sync_blockdev_nowait(struct block_device *bdev);
+void sync_bdevs(bool wait);
+void bdev_statx(const struct path *path, struct kstat *stat, u32 request_mask);
+void printk_all_partitions(void);
+int __init early_lookup_bdev(const char *pathname, dev_t *dev);
+#else
+static inline void invalidate_bdev(struct block_device *bdev)
{
- return NULL;
}
-static inline int blk_integrity_compare(struct gendisk *a, struct gendisk *b)
+static inline int sync_blockdev(struct block_device *bdev)
{
return 0;
}
-static inline void blk_integrity_register(struct gendisk *d,
- struct blk_integrity *b)
-{
-}
-static inline void blk_integrity_unregister(struct gendisk *d)
-{
-}
-static inline void blk_queue_max_integrity_segments(struct request_queue *q,
- unsigned int segs)
-{
-}
-static inline unsigned short queue_max_integrity_segments(struct request_queue *q)
+static inline int sync_blockdev_nowait(struct block_device *bdev)
{
return 0;
}
-static inline bool blk_integrity_merge_rq(struct request_queue *rq,
- struct request *r1,
- struct request *r2)
+static inline void sync_bdevs(bool wait)
{
- return true;
}
-static inline bool blk_integrity_merge_bio(struct request_queue *rq,
- struct request *r,
- struct bio *b)
+static inline void bdev_statx(const struct path *path, struct kstat *stat,
+ u32 request_mask)
{
- return true;
}
-
-static inline bool integrity_req_gap_back_merge(struct request *req,
- struct bio *next)
+static inline void printk_all_partitions(void)
{
- return false;
}
-static inline bool integrity_req_gap_front_merge(struct request *req,
- struct bio *bio)
+static inline int early_lookup_bdev(const char *pathname, dev_t *dev)
{
- return false;
+ return -EINVAL;
}
+#endif /* CONFIG_BLOCK */
-#endif /* CONFIG_BLK_DEV_INTEGRITY */
+int bdev_freeze(struct block_device *bdev);
+int bdev_thaw(struct block_device *bdev);
+void bdev_fput(struct file *bdev_file);
-struct block_device_operations {
- int (*open) (struct block_device *, fmode_t);
- void (*release) (struct gendisk *, fmode_t);
- int (*rw_page)(struct block_device *, sector_t, struct page *, bool);
- int (*ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
- int (*compat_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
- unsigned int (*check_events) (struct gendisk *disk,
- unsigned int clearing);
- /* ->media_changed() is DEPRECATED, use ->check_events() instead */
- int (*media_changed) (struct gendisk *);
- void (*unlock_native_capacity) (struct gendisk *);
- int (*revalidate_disk) (struct gendisk *);
- int (*getgeo)(struct block_device *, struct hd_geometry *);
- /* this callback is with swap_lock and sometimes page table lock held */
- void (*swap_slot_free_notify) (struct block_device *, unsigned long);
- struct module *owner;
- const struct pr_ops *pr_ops;
+struct io_comp_batch {
+ struct rq_list req_list;
+ bool need_ts;
+ void (*complete)(struct io_comp_batch *);
};
-extern int __blkdev_driver_ioctl(struct block_device *, fmode_t, unsigned int,
- unsigned long);
-extern int bdev_read_page(struct block_device *, sector_t, struct page *);
-extern int bdev_write_page(struct block_device *, sector_t, struct page *,
- struct writeback_control *);
-#else /* CONFIG_BLOCK */
-
-struct block_device;
-
-/*
- * stubs for when the block layer is configured out
- */
-#define buffer_heads_over_limit 0
-
-static inline long nr_blockdev_pages(void)
+static inline bool blk_atomic_write_start_sect_aligned(sector_t sector,
+ struct queue_limits *limits)
{
- return 0;
-}
+ unsigned int alignment = max(limits->atomic_write_hw_unit_min,
+ limits->atomic_write_hw_boundary);
-struct blk_plug {
-};
-
-static inline void blk_start_plug(struct blk_plug *plug)
-{
+ return IS_ALIGNED(sector, alignment >> SECTOR_SHIFT);
}
-static inline void blk_finish_plug(struct blk_plug *plug)
+static inline bool bdev_can_atomic_write(struct block_device *bdev)
{
-}
+ struct request_queue *bd_queue = bdev->bd_queue;
+ struct queue_limits *limits = &bd_queue->limits;
-static inline void blk_flush_plug(struct task_struct *task)
-{
+ if (!limits->atomic_write_unit_min)
+ return false;
+
+ if (bdev_is_partition(bdev))
+ return blk_atomic_write_start_sect_aligned(bdev->bd_start_sect,
+ limits);
+
+ return true;
}
-static inline void blk_schedule_flush_plug(struct task_struct *task)
+static inline unsigned int
+bdev_atomic_write_unit_min_bytes(struct block_device *bdev)
{
+ if (!bdev_can_atomic_write(bdev))
+ return 0;
+ return queue_atomic_write_unit_min_bytes(bdev_get_queue(bdev));
}
-
-static inline bool blk_needs_flush_plug(struct task_struct *tsk)
+static inline unsigned int
+bdev_atomic_write_unit_max_bytes(struct block_device *bdev)
{
- return false;
+ if (!bdev_can_atomic_write(bdev))
+ return 0;
+ return queue_atomic_write_unit_max_bytes(bdev_get_queue(bdev));
}
-static inline int blkdev_issue_flush(struct block_device *bdev, gfp_t gfp_mask,
- sector_t *error_sector)
+static inline int bio_split_rw_at(struct bio *bio,
+ const struct queue_limits *lim,
+ unsigned *segs, unsigned max_bytes)
{
- return 0;
+ return bio_split_io_at(bio, lim, segs, max_bytes, lim->dma_alignment);
}
-#endif /* CONFIG_BLOCK */
+#define DEFINE_IO_COMP_BATCH(name) struct io_comp_batch name = { }
-#endif
+#endif /* _LINUX_BLKDEV_H */
diff --git a/include/linux/blkpg.h b/include/linux/blkpg.h
index bef124fde61e..1c91753c3c28 100644
--- a/include/linux/blkpg.h
+++ b/include/linux/blkpg.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_BLKPG_H
#define _LINUX_BLKPG_H
diff --git a/include/linux/blktrace_api.h b/include/linux/blktrace_api.h
index d2e908586e3d..05c8754456aa 100644
--- a/include/linux/blktrace_api.h
+++ b/include/linux/blktrace_api.h
@@ -1,37 +1,38 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef BLKTRACE_H
#define BLKTRACE_H
-#include <linux/blkdev.h>
+#include <linux/blk-mq.h>
#include <linux/relay.h>
#include <linux/compat.h>
#include <uapi/linux/blktrace_api.h>
#include <linux/list.h>
+#include <linux/blk_types.h>
#if defined(CONFIG_BLK_DEV_IO_TRACE)
#include <linux/sysfs.h>
struct blk_trace {
+ int version;
int trace_state;
struct rchan *rchan;
unsigned long __percpu *sequence;
unsigned char __percpu *msg_data;
- u16 act_mask;
+ u64 act_mask;
u64 start_lba;
u64 end_lba;
u32 pid;
u32 dev;
struct dentry *dir;
- struct dentry *dropped_file;
- struct dentry *msg_file;
struct list_head running_list;
atomic_t dropped;
};
extern int blk_trace_ioctl(struct block_device *, unsigned, char __user *);
extern void blk_trace_shutdown(struct request_queue *);
-extern __printf(2, 3)
-void __trace_note_message(struct blk_trace *, const char *fmt, ...);
+__printf(3, 4) void __blk_trace_note_message(struct blk_trace *bt,
+ struct cgroup_subsys_state *css, const char *fmt, ...);
/**
* blk_add_trace_msg - Add a (simple) message to the blktrace stream
@@ -46,49 +47,53 @@ void __trace_note_message(struct blk_trace *, const char *fmt, ...);
* NOTE: Can not use 'static inline' due to presence of var args...
*
**/
-#define blk_add_trace_msg(q, fmt, ...) \
+#define blk_add_cgroup_trace_msg(q, css, fmt, ...) \
do { \
- struct blk_trace *bt = (q)->blk_trace; \
+ struct blk_trace *bt; \
+ \
+ rcu_read_lock(); \
+ bt = rcu_dereference((q)->blk_trace); \
if (unlikely(bt)) \
- __trace_note_message(bt, fmt, ##__VA_ARGS__); \
+ __blk_trace_note_message(bt, css, fmt, ##__VA_ARGS__);\
+ rcu_read_unlock(); \
} while (0)
+#define blk_add_trace_msg(q, fmt, ...) \
+ blk_add_cgroup_trace_msg(q, NULL, fmt, ##__VA_ARGS__)
#define BLK_TN_MAX_MSG 128
static inline bool blk_trace_note_message_enabled(struct request_queue *q)
{
- struct blk_trace *bt = q->blk_trace;
- if (likely(!bt))
- return false;
- return bt->act_mask & BLK_TC_NOTIFY;
+ struct blk_trace *bt;
+ bool ret;
+
+ rcu_read_lock();
+ bt = rcu_dereference(q->blk_trace);
+ ret = bt && (bt->act_mask & BLK_TC_NOTIFY);
+ rcu_read_unlock();
+ return ret;
}
-extern void blk_add_driver_data(struct request_queue *q, struct request *rq,
- void *data, size_t len);
+extern void blk_add_driver_data(struct request *rq, void *data, size_t len);
extern int blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
struct block_device *bdev,
char __user *arg);
extern int blk_trace_startstop(struct request_queue *q, int start);
extern int blk_trace_remove(struct request_queue *q);
-extern void blk_trace_remove_sysfs(struct device *dev);
-extern int blk_trace_init_sysfs(struct device *dev);
-
-extern struct attribute_group blk_trace_attr_group;
#else /* !CONFIG_BLK_DEV_IO_TRACE */
# define blk_trace_ioctl(bdev, cmd, arg) (-ENOTTY)
# define blk_trace_shutdown(q) do { } while (0)
-# define blk_add_driver_data(q, rq, data, len) do {} while (0)
+# define blk_add_driver_data(rq, data, len) do {} while (0)
# define blk_trace_setup(q, name, dev, bdev, arg) (-ENOTTY)
# define blk_trace_startstop(q, start) (-ENOTTY)
-# define blk_trace_remove(q) (-ENOTTY)
# define blk_add_trace_msg(q, fmt, ...) do { } while (0)
-# define blk_trace_remove_sysfs(dev) do { } while (0)
+# define blk_add_cgroup_trace_msg(q, cg, fmt, ...) do { } while (0)
# define blk_trace_note_message_enabled(q) (false)
-static inline int blk_trace_init_sysfs(struct device *dev)
+
+static inline int blk_trace_remove(struct request_queue *q)
{
- return 0;
+ return -ENOTTY;
}
-
#endif /* CONFIG_BLK_DEV_IO_TRACE */
#ifdef CONFIG_COMPAT
@@ -106,11 +111,17 @@ struct compat_blk_user_trace_setup {
#endif
-extern void blk_fill_rwbs(char *rwbs, unsigned int op, int bytes);
+void blk_fill_rwbs(char *rwbs, blk_opf_t opf);
static inline sector_t blk_rq_trace_sector(struct request *rq)
{
- return blk_rq_is_passthrough(rq) ? 0 : blk_rq_pos(rq);
+ /*
+ * Tracing should ignore starting sector for passthrough requests and
+ * requests where starting sector didn't get set.
+ */
+ if (blk_rq_is_passthrough(rq) || blk_rq_pos(rq) == (sector_t)-1)
+ return 0;
+ return blk_rq_pos(rq);
}
static inline unsigned int blk_rq_trace_nr_sectors(struct request *rq)
diff --git a/include/linux/blockgroup_lock.h b/include/linux/blockgroup_lock.h
index 225bdb7daec7..511ab123a822 100644
--- a/include/linux/blockgroup_lock.h
+++ b/include/linux/blockgroup_lock.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_BLOCKGROUP_LOCK_H
#define _LINUX_BLOCKGROUP_LOCK_H
/*
diff --git a/include/linux/bma150.h b/include/linux/bma150.h
index 97ade7cdc870..4d4a62d49341 100644
--- a/include/linux/bma150.h
+++ b/include/linux/bma150.h
@@ -1,20 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* Copyright (c) 2011 Bosch Sensortec GmbH
* Copyright (c) 2011 Unixphere
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#ifndef _BMA150_H_
@@ -46,8 +33,8 @@ struct bma150_cfg {
unsigned char lg_hyst; /* Low-G hysterisis */
unsigned char lg_dur; /* Low-G duration */
unsigned char lg_thres; /* Low-G threshold */
- unsigned char range; /* one of BMA0150_RANGE_xxx */
- unsigned char bandwidth; /* one of BMA0150_BW_xxx */
+ unsigned char range; /* one of BMA150_RANGE_xxx */
+ unsigned char bandwidth; /* one of BMA150_BW_xxx */
};
struct bma150_platform_data {
diff --git a/include/linux/bnxt/hsi.h b/include/linux/bnxt/hsi.h
new file mode 100644
index 000000000000..47c34990cf23
--- /dev/null
+++ b/include/linux/bnxt/hsi.h
@@ -0,0 +1,11166 @@
+/* Broadcom NetXtreme-C/E network driver.
+ *
+ * Copyright (c) 2014-2016 Broadcom Corporation
+ * Copyright (c) 2014-2018 Broadcom Limited
+ * Copyright (c) 2018-2025 Broadcom Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ *
+ * DO NOT MODIFY!!! This file is automatically generated.
+ */
+
+#ifndef _BNXT_HSI_H_
+#define _BNXT_HSI_H_
+
+/* hwrm_cmd_hdr (size:128b/16B) */
+struct hwrm_cmd_hdr {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+};
+
+/* hwrm_resp_hdr (size:64b/8B) */
+struct hwrm_resp_hdr {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+};
+
+#define CMD_DISCR_TLV_ENCAP 0x8000UL
+#define CMD_DISCR_LAST CMD_DISCR_TLV_ENCAP
+
+
+#define TLV_TYPE_HWRM_REQUEST 0x1UL
+#define TLV_TYPE_HWRM_RESPONSE 0x2UL
+#define TLV_TYPE_ROCE_SP_COMMAND 0x3UL
+#define TLV_TYPE_QUERY_ROCE_CC_GEN1 0x4UL
+#define TLV_TYPE_MODIFY_ROCE_CC_GEN1 0x5UL
+#define TLV_TYPE_QUERY_ROCE_CC_GEN2 0x6UL
+#define TLV_TYPE_MODIFY_ROCE_CC_GEN2 0x7UL
+#define TLV_TYPE_QUERY_ROCE_CC_GEN1_EXT 0x8UL
+#define TLV_TYPE_MODIFY_ROCE_CC_GEN1_EXT 0x9UL
+#define TLV_TYPE_QUERY_ROCE_CC_GEN2_EXT 0xaUL
+#define TLV_TYPE_MODIFY_ROCE_CC_GEN2_EXT 0xbUL
+#define TLV_TYPE_ENGINE_CKV_ALIAS_ECC_PUBLIC_KEY 0x8001UL
+#define TLV_TYPE_ENGINE_CKV_IV 0x8003UL
+#define TLV_TYPE_ENGINE_CKV_AUTH_TAG 0x8004UL
+#define TLV_TYPE_ENGINE_CKV_CIPHERTEXT 0x8005UL
+#define TLV_TYPE_ENGINE_CKV_HOST_ALGORITHMS 0x8006UL
+#define TLV_TYPE_ENGINE_CKV_HOST_ECC_PUBLIC_KEY 0x8007UL
+#define TLV_TYPE_ENGINE_CKV_ECDSA_SIGNATURE 0x8008UL
+#define TLV_TYPE_ENGINE_CKV_FW_ECC_PUBLIC_KEY 0x8009UL
+#define TLV_TYPE_ENGINE_CKV_FW_ALGORITHMS 0x800aUL
+#define TLV_TYPE_LAST TLV_TYPE_ENGINE_CKV_FW_ALGORITHMS
+
+
+/* tlv (size:64b/8B) */
+struct tlv {
+ __le16 cmd_discr;
+ u8 reserved_8b;
+ u8 flags;
+ #define TLV_FLAGS_MORE 0x1UL
+ #define TLV_FLAGS_MORE_LAST 0x0UL
+ #define TLV_FLAGS_MORE_NOT_LAST 0x1UL
+ #define TLV_FLAGS_REQUIRED 0x2UL
+ #define TLV_FLAGS_REQUIRED_NO (0x0UL << 1)
+ #define TLV_FLAGS_REQUIRED_YES (0x1UL << 1)
+ #define TLV_FLAGS_REQUIRED_LAST TLV_FLAGS_REQUIRED_YES
+ __le16 tlv_type;
+ __le16 length;
+};
+
+/* input (size:128b/16B) */
+struct input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+};
+
+/* output (size:64b/8B) */
+struct output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+};
+
+/* hwrm_short_input (size:128b/16B) */
+struct hwrm_short_input {
+ __le16 req_type;
+ __le16 signature;
+ #define SHORT_REQ_SIGNATURE_SHORT_CMD 0x4321UL
+ #define SHORT_REQ_SIGNATURE_LAST SHORT_REQ_SIGNATURE_SHORT_CMD
+ __le16 target_id;
+ #define SHORT_REQ_TARGET_ID_DEFAULT 0x0UL
+ #define SHORT_REQ_TARGET_ID_TOOLS 0xfffdUL
+ #define SHORT_REQ_TARGET_ID_LAST SHORT_REQ_TARGET_ID_TOOLS
+ __le16 size;
+ __le64 req_addr;
+};
+
+/* cmd_nums (size:64b/8B) */
+struct cmd_nums {
+ __le16 req_type;
+ #define HWRM_VER_GET 0x0UL
+ #define HWRM_FUNC_ECHO_RESPONSE 0xbUL
+ #define HWRM_ERROR_RECOVERY_QCFG 0xcUL
+ #define HWRM_FUNC_DRV_IF_CHANGE 0xdUL
+ #define HWRM_FUNC_BUF_UNRGTR 0xeUL
+ #define HWRM_FUNC_VF_CFG 0xfUL
+ #define HWRM_RESERVED1 0x10UL
+ #define HWRM_FUNC_RESET 0x11UL
+ #define HWRM_FUNC_GETFID 0x12UL
+ #define HWRM_FUNC_VF_ALLOC 0x13UL
+ #define HWRM_FUNC_VF_FREE 0x14UL
+ #define HWRM_FUNC_QCAPS 0x15UL
+ #define HWRM_FUNC_QCFG 0x16UL
+ #define HWRM_FUNC_CFG 0x17UL
+ #define HWRM_FUNC_QSTATS 0x18UL
+ #define HWRM_FUNC_CLR_STATS 0x19UL
+ #define HWRM_FUNC_DRV_UNRGTR 0x1aUL
+ #define HWRM_FUNC_VF_RESC_FREE 0x1bUL
+ #define HWRM_FUNC_VF_VNIC_IDS_QUERY 0x1cUL
+ #define HWRM_FUNC_DRV_RGTR 0x1dUL
+ #define HWRM_FUNC_DRV_QVER 0x1eUL
+ #define HWRM_FUNC_BUF_RGTR 0x1fUL
+ #define HWRM_PORT_PHY_CFG 0x20UL
+ #define HWRM_PORT_MAC_CFG 0x21UL
+ #define HWRM_PORT_TS_QUERY 0x22UL
+ #define HWRM_PORT_QSTATS 0x23UL
+ #define HWRM_PORT_LPBK_QSTATS 0x24UL
+ #define HWRM_PORT_CLR_STATS 0x25UL
+ #define HWRM_PORT_LPBK_CLR_STATS 0x26UL
+ #define HWRM_PORT_PHY_QCFG 0x27UL
+ #define HWRM_PORT_MAC_QCFG 0x28UL
+ #define HWRM_PORT_MAC_PTP_QCFG 0x29UL
+ #define HWRM_PORT_PHY_QCAPS 0x2aUL
+ #define HWRM_PORT_PHY_I2C_WRITE 0x2bUL
+ #define HWRM_PORT_PHY_I2C_READ 0x2cUL
+ #define HWRM_PORT_LED_CFG 0x2dUL
+ #define HWRM_PORT_LED_QCFG 0x2eUL
+ #define HWRM_PORT_LED_QCAPS 0x2fUL
+ #define HWRM_QUEUE_QPORTCFG 0x30UL
+ #define HWRM_QUEUE_QCFG 0x31UL
+ #define HWRM_QUEUE_CFG 0x32UL
+ #define HWRM_FUNC_VLAN_CFG 0x33UL
+ #define HWRM_FUNC_VLAN_QCFG 0x34UL
+ #define HWRM_QUEUE_PFCENABLE_QCFG 0x35UL
+ #define HWRM_QUEUE_PFCENABLE_CFG 0x36UL
+ #define HWRM_QUEUE_PRI2COS_QCFG 0x37UL
+ #define HWRM_QUEUE_PRI2COS_CFG 0x38UL
+ #define HWRM_QUEUE_COS2BW_QCFG 0x39UL
+ #define HWRM_QUEUE_COS2BW_CFG 0x3aUL
+ #define HWRM_QUEUE_DSCP_QCAPS 0x3bUL
+ #define HWRM_QUEUE_DSCP2PRI_QCFG 0x3cUL
+ #define HWRM_QUEUE_DSCP2PRI_CFG 0x3dUL
+ #define HWRM_VNIC_ALLOC 0x40UL
+ #define HWRM_VNIC_FREE 0x41UL
+ #define HWRM_VNIC_CFG 0x42UL
+ #define HWRM_VNIC_QCFG 0x43UL
+ #define HWRM_VNIC_TPA_CFG 0x44UL
+ #define HWRM_VNIC_TPA_QCFG 0x45UL
+ #define HWRM_VNIC_RSS_CFG 0x46UL
+ #define HWRM_VNIC_RSS_QCFG 0x47UL
+ #define HWRM_VNIC_PLCMODES_CFG 0x48UL
+ #define HWRM_VNIC_PLCMODES_QCFG 0x49UL
+ #define HWRM_VNIC_QCAPS 0x4aUL
+ #define HWRM_VNIC_UPDATE 0x4bUL
+ #define HWRM_RING_ALLOC 0x50UL
+ #define HWRM_RING_FREE 0x51UL
+ #define HWRM_RING_CMPL_RING_QAGGINT_PARAMS 0x52UL
+ #define HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS 0x53UL
+ #define HWRM_RING_AGGINT_QCAPS 0x54UL
+ #define HWRM_RING_SCHQ_ALLOC 0x55UL
+ #define HWRM_RING_SCHQ_CFG 0x56UL
+ #define HWRM_RING_SCHQ_FREE 0x57UL
+ #define HWRM_RING_RESET 0x5eUL
+ #define HWRM_RING_GRP_ALLOC 0x60UL
+ #define HWRM_RING_GRP_FREE 0x61UL
+ #define HWRM_RING_CFG 0x62UL
+ #define HWRM_RING_QCFG 0x63UL
+ #define HWRM_RESERVED5 0x64UL
+ #define HWRM_RESERVED6 0x65UL
+ #define HWRM_VNIC_RSS_COS_LB_CTX_ALLOC 0x70UL
+ #define HWRM_VNIC_RSS_COS_LB_CTX_FREE 0x71UL
+ #define HWRM_QUEUE_MPLS_QCAPS 0x80UL
+ #define HWRM_QUEUE_MPLSTC2PRI_QCFG 0x81UL
+ #define HWRM_QUEUE_MPLSTC2PRI_CFG 0x82UL
+ #define HWRM_QUEUE_VLANPRI_QCAPS 0x83UL
+ #define HWRM_QUEUE_VLANPRI2PRI_QCFG 0x84UL
+ #define HWRM_QUEUE_VLANPRI2PRI_CFG 0x85UL
+ #define HWRM_QUEUE_GLOBAL_CFG 0x86UL
+ #define HWRM_QUEUE_GLOBAL_QCFG 0x87UL
+ #define HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_QCFG 0x88UL
+ #define HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_CFG 0x89UL
+ #define HWRM_QUEUE_ADPTV_QOS_TX_FEATURE_QCFG 0x8aUL
+ #define HWRM_QUEUE_ADPTV_QOS_TX_FEATURE_CFG 0x8bUL
+ #define HWRM_QUEUE_QCAPS 0x8cUL
+ #define HWRM_QUEUE_ADPTV_QOS_RX_TUNING_QCFG 0x8dUL
+ #define HWRM_QUEUE_ADPTV_QOS_RX_TUNING_CFG 0x8eUL
+ #define HWRM_QUEUE_ADPTV_QOS_TX_TUNING_QCFG 0x8fUL
+ #define HWRM_CFA_L2_FILTER_ALLOC 0x90UL
+ #define HWRM_CFA_L2_FILTER_FREE 0x91UL
+ #define HWRM_CFA_L2_FILTER_CFG 0x92UL
+ #define HWRM_CFA_L2_SET_RX_MASK 0x93UL
+ #define HWRM_CFA_VLAN_ANTISPOOF_CFG 0x94UL
+ #define HWRM_CFA_TUNNEL_FILTER_ALLOC 0x95UL
+ #define HWRM_CFA_TUNNEL_FILTER_FREE 0x96UL
+ #define HWRM_CFA_ENCAP_RECORD_ALLOC 0x97UL
+ #define HWRM_CFA_ENCAP_RECORD_FREE 0x98UL
+ #define HWRM_CFA_NTUPLE_FILTER_ALLOC 0x99UL
+ #define HWRM_CFA_NTUPLE_FILTER_FREE 0x9aUL
+ #define HWRM_CFA_NTUPLE_FILTER_CFG 0x9bUL
+ #define HWRM_CFA_EM_FLOW_ALLOC 0x9cUL
+ #define HWRM_CFA_EM_FLOW_FREE 0x9dUL
+ #define HWRM_CFA_EM_FLOW_CFG 0x9eUL
+ #define HWRM_TUNNEL_DST_PORT_QUERY 0xa0UL
+ #define HWRM_TUNNEL_DST_PORT_ALLOC 0xa1UL
+ #define HWRM_TUNNEL_DST_PORT_FREE 0xa2UL
+ #define HWRM_QUEUE_ADPTV_QOS_TX_TUNING_CFG 0xa3UL
+ #define HWRM_STAT_CTX_ENG_QUERY 0xafUL
+ #define HWRM_STAT_CTX_ALLOC 0xb0UL
+ #define HWRM_STAT_CTX_FREE 0xb1UL
+ #define HWRM_STAT_CTX_QUERY 0xb2UL
+ #define HWRM_STAT_CTX_CLR_STATS 0xb3UL
+ #define HWRM_PORT_QSTATS_EXT 0xb4UL
+ #define HWRM_PORT_PHY_MDIO_WRITE 0xb5UL
+ #define HWRM_PORT_PHY_MDIO_READ 0xb6UL
+ #define HWRM_PORT_PHY_MDIO_BUS_ACQUIRE 0xb7UL
+ #define HWRM_PORT_PHY_MDIO_BUS_RELEASE 0xb8UL
+ #define HWRM_PORT_QSTATS_EXT_PFC_WD 0xb9UL
+ #define HWRM_RESERVED7 0xbaUL
+ #define HWRM_PORT_TX_FIR_CFG 0xbbUL
+ #define HWRM_PORT_TX_FIR_QCFG 0xbcUL
+ #define HWRM_PORT_ECN_QSTATS 0xbdUL
+ #define HWRM_FW_LIVEPATCH_QUERY 0xbeUL
+ #define HWRM_FW_LIVEPATCH 0xbfUL
+ #define HWRM_FW_RESET 0xc0UL
+ #define HWRM_FW_QSTATUS 0xc1UL
+ #define HWRM_FW_HEALTH_CHECK 0xc2UL
+ #define HWRM_FW_SYNC 0xc3UL
+ #define HWRM_FW_STATE_QCAPS 0xc4UL
+ #define HWRM_FW_STATE_QUIESCE 0xc5UL
+ #define HWRM_FW_STATE_BACKUP 0xc6UL
+ #define HWRM_FW_STATE_RESTORE 0xc7UL
+ #define HWRM_FW_SET_TIME 0xc8UL
+ #define HWRM_FW_GET_TIME 0xc9UL
+ #define HWRM_FW_SET_STRUCTURED_DATA 0xcaUL
+ #define HWRM_FW_GET_STRUCTURED_DATA 0xcbUL
+ #define HWRM_FW_IPC_MAILBOX 0xccUL
+ #define HWRM_FW_ECN_CFG 0xcdUL
+ #define HWRM_FW_ECN_QCFG 0xceUL
+ #define HWRM_FW_SECURE_CFG 0xcfUL
+ #define HWRM_EXEC_FWD_RESP 0xd0UL
+ #define HWRM_REJECT_FWD_RESP 0xd1UL
+ #define HWRM_FWD_RESP 0xd2UL
+ #define HWRM_FWD_ASYNC_EVENT_CMPL 0xd3UL
+ #define HWRM_OEM_CMD 0xd4UL
+ #define HWRM_PORT_PRBS_TEST 0xd5UL
+ #define HWRM_PORT_SFP_SIDEBAND_CFG 0xd6UL
+ #define HWRM_PORT_SFP_SIDEBAND_QCFG 0xd7UL
+ #define HWRM_FW_STATE_UNQUIESCE 0xd8UL
+ #define HWRM_PORT_DSC_DUMP 0xd9UL
+ #define HWRM_PORT_EP_TX_QCFG 0xdaUL
+ #define HWRM_PORT_EP_TX_CFG 0xdbUL
+ #define HWRM_PORT_CFG 0xdcUL
+ #define HWRM_PORT_QCFG 0xddUL
+ #define HWRM_PORT_MAC_QCAPS 0xdfUL
+ #define HWRM_TEMP_MONITOR_QUERY 0xe0UL
+ #define HWRM_REG_POWER_QUERY 0xe1UL
+ #define HWRM_CORE_FREQUENCY_QUERY 0xe2UL
+ #define HWRM_REG_POWER_HISTOGRAM 0xe3UL
+ #define HWRM_MONITOR_PAX_HISTOGRAM_START 0xe4UL
+ #define HWRM_MONITOR_PAX_HISTOGRAM_COLLECT 0xe5UL
+ #define HWRM_STAT_QUERY_ROCE_STATS 0xe6UL
+ #define HWRM_STAT_QUERY_ROCE_STATS_EXT 0xe7UL
+ #define HWRM_WOL_FILTER_ALLOC 0xf0UL
+ #define HWRM_WOL_FILTER_FREE 0xf1UL
+ #define HWRM_WOL_FILTER_QCFG 0xf2UL
+ #define HWRM_WOL_REASON_QCFG 0xf3UL
+ #define HWRM_CFA_METER_QCAPS 0xf4UL
+ #define HWRM_CFA_METER_PROFILE_ALLOC 0xf5UL
+ #define HWRM_CFA_METER_PROFILE_FREE 0xf6UL
+ #define HWRM_CFA_METER_PROFILE_CFG 0xf7UL
+ #define HWRM_CFA_METER_INSTANCE_ALLOC 0xf8UL
+ #define HWRM_CFA_METER_INSTANCE_FREE 0xf9UL
+ #define HWRM_CFA_METER_INSTANCE_CFG 0xfaUL
+ #define HWRM_CFA_VFR_ALLOC 0xfdUL
+ #define HWRM_CFA_VFR_FREE 0xfeUL
+ #define HWRM_CFA_VF_PAIR_ALLOC 0x100UL
+ #define HWRM_CFA_VF_PAIR_FREE 0x101UL
+ #define HWRM_CFA_VF_PAIR_INFO 0x102UL
+ #define HWRM_CFA_FLOW_ALLOC 0x103UL
+ #define HWRM_CFA_FLOW_FREE 0x104UL
+ #define HWRM_CFA_FLOW_FLUSH 0x105UL
+ #define HWRM_CFA_FLOW_STATS 0x106UL
+ #define HWRM_CFA_FLOW_INFO 0x107UL
+ #define HWRM_CFA_DECAP_FILTER_ALLOC 0x108UL
+ #define HWRM_CFA_DECAP_FILTER_FREE 0x109UL
+ #define HWRM_CFA_VLAN_ANTISPOOF_QCFG 0x10aUL
+ #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_ALLOC 0x10bUL
+ #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_FREE 0x10cUL
+ #define HWRM_CFA_PAIR_ALLOC 0x10dUL
+ #define HWRM_CFA_PAIR_FREE 0x10eUL
+ #define HWRM_CFA_PAIR_INFO 0x10fUL
+ #define HWRM_FW_IPC_MSG 0x110UL
+ #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_INFO 0x111UL
+ #define HWRM_CFA_REDIRECT_QUERY_TUNNEL_TYPE 0x112UL
+ #define HWRM_CFA_FLOW_AGING_TIMER_RESET 0x113UL
+ #define HWRM_CFA_FLOW_AGING_CFG 0x114UL
+ #define HWRM_CFA_FLOW_AGING_QCFG 0x115UL
+ #define HWRM_CFA_FLOW_AGING_QCAPS 0x116UL
+ #define HWRM_CFA_CTX_MEM_RGTR 0x117UL
+ #define HWRM_CFA_CTX_MEM_UNRGTR 0x118UL
+ #define HWRM_CFA_CTX_MEM_QCTX 0x119UL
+ #define HWRM_CFA_CTX_MEM_QCAPS 0x11aUL
+ #define HWRM_CFA_COUNTER_QCAPS 0x11bUL
+ #define HWRM_CFA_COUNTER_CFG 0x11cUL
+ #define HWRM_CFA_COUNTER_QCFG 0x11dUL
+ #define HWRM_CFA_COUNTER_QSTATS 0x11eUL
+ #define HWRM_CFA_TCP_FLAG_PROCESS_QCFG 0x11fUL
+ #define HWRM_CFA_EEM_QCAPS 0x120UL
+ #define HWRM_CFA_EEM_CFG 0x121UL
+ #define HWRM_CFA_EEM_QCFG 0x122UL
+ #define HWRM_CFA_EEM_OP 0x123UL
+ #define HWRM_CFA_ADV_FLOW_MGNT_QCAPS 0x124UL
+ #define HWRM_CFA_TFLIB 0x125UL
+ #define HWRM_CFA_LAG_GROUP_MEMBER_RGTR 0x126UL
+ #define HWRM_CFA_LAG_GROUP_MEMBER_UNRGTR 0x127UL
+ #define HWRM_CFA_TLS_FILTER_ALLOC 0x128UL
+ #define HWRM_CFA_TLS_FILTER_FREE 0x129UL
+ #define HWRM_CFA_RELEASE_AFM_FUNC 0x12aUL
+ #define HWRM_ENGINE_CKV_STATUS 0x12eUL
+ #define HWRM_ENGINE_CKV_CKEK_ADD 0x12fUL
+ #define HWRM_ENGINE_CKV_CKEK_DELETE 0x130UL
+ #define HWRM_ENGINE_CKV_KEY_ADD 0x131UL
+ #define HWRM_ENGINE_CKV_KEY_DELETE 0x132UL
+ #define HWRM_ENGINE_CKV_FLUSH 0x133UL
+ #define HWRM_ENGINE_CKV_RNG_GET 0x134UL
+ #define HWRM_ENGINE_CKV_KEY_GEN 0x135UL
+ #define HWRM_ENGINE_CKV_KEY_LABEL_CFG 0x136UL
+ #define HWRM_ENGINE_CKV_KEY_LABEL_QCFG 0x137UL
+ #define HWRM_ENGINE_QG_CONFIG_QUERY 0x13cUL
+ #define HWRM_ENGINE_QG_QUERY 0x13dUL
+ #define HWRM_ENGINE_QG_METER_PROFILE_CONFIG_QUERY 0x13eUL
+ #define HWRM_ENGINE_QG_METER_PROFILE_QUERY 0x13fUL
+ #define HWRM_ENGINE_QG_METER_PROFILE_ALLOC 0x140UL
+ #define HWRM_ENGINE_QG_METER_PROFILE_FREE 0x141UL
+ #define HWRM_ENGINE_QG_METER_QUERY 0x142UL
+ #define HWRM_ENGINE_QG_METER_BIND 0x143UL
+ #define HWRM_ENGINE_QG_METER_UNBIND 0x144UL
+ #define HWRM_ENGINE_QG_FUNC_BIND 0x145UL
+ #define HWRM_ENGINE_SG_CONFIG_QUERY 0x146UL
+ #define HWRM_ENGINE_SG_QUERY 0x147UL
+ #define HWRM_ENGINE_SG_METER_QUERY 0x148UL
+ #define HWRM_ENGINE_SG_METER_CONFIG 0x149UL
+ #define HWRM_ENGINE_SG_QG_BIND 0x14aUL
+ #define HWRM_ENGINE_QG_SG_UNBIND 0x14bUL
+ #define HWRM_ENGINE_CONFIG_QUERY 0x154UL
+ #define HWRM_ENGINE_STATS_CONFIG 0x155UL
+ #define HWRM_ENGINE_STATS_CLEAR 0x156UL
+ #define HWRM_ENGINE_STATS_QUERY 0x157UL
+ #define HWRM_ENGINE_STATS_QUERY_CONTINUOUS_ERROR 0x158UL
+ #define HWRM_ENGINE_RQ_ALLOC 0x15eUL
+ #define HWRM_ENGINE_RQ_FREE 0x15fUL
+ #define HWRM_ENGINE_CQ_ALLOC 0x160UL
+ #define HWRM_ENGINE_CQ_FREE 0x161UL
+ #define HWRM_ENGINE_NQ_ALLOC 0x162UL
+ #define HWRM_ENGINE_NQ_FREE 0x163UL
+ #define HWRM_ENGINE_ON_DIE_RQE_CREDITS 0x164UL
+ #define HWRM_ENGINE_FUNC_QCFG 0x165UL
+ #define HWRM_FUNC_RESOURCE_QCAPS 0x190UL
+ #define HWRM_FUNC_VF_RESOURCE_CFG 0x191UL
+ #define HWRM_FUNC_BACKING_STORE_QCAPS 0x192UL
+ #define HWRM_FUNC_BACKING_STORE_CFG 0x193UL
+ #define HWRM_FUNC_BACKING_STORE_QCFG 0x194UL
+ #define HWRM_FUNC_VF_BW_CFG 0x195UL
+ #define HWRM_FUNC_VF_BW_QCFG 0x196UL
+ #define HWRM_FUNC_HOST_PF_IDS_QUERY 0x197UL
+ #define HWRM_FUNC_QSTATS_EXT 0x198UL
+ #define HWRM_STAT_EXT_CTX_QUERY 0x199UL
+ #define HWRM_FUNC_SPD_CFG 0x19aUL
+ #define HWRM_FUNC_SPD_QCFG 0x19bUL
+ #define HWRM_FUNC_PTP_PIN_QCFG 0x19cUL
+ #define HWRM_FUNC_PTP_PIN_CFG 0x19dUL
+ #define HWRM_FUNC_PTP_CFG 0x19eUL
+ #define HWRM_FUNC_PTP_TS_QUERY 0x19fUL
+ #define HWRM_FUNC_PTP_EXT_CFG 0x1a0UL
+ #define HWRM_FUNC_PTP_EXT_QCFG 0x1a1UL
+ #define HWRM_FUNC_KEY_CTX_ALLOC 0x1a2UL
+ #define HWRM_FUNC_BACKING_STORE_CFG_V2 0x1a3UL
+ #define HWRM_FUNC_BACKING_STORE_QCFG_V2 0x1a4UL
+ #define HWRM_FUNC_DBR_PACING_CFG 0x1a5UL
+ #define HWRM_FUNC_DBR_PACING_QCFG 0x1a6UL
+ #define HWRM_FUNC_DBR_PACING_BROADCAST_EVENT 0x1a7UL
+ #define HWRM_FUNC_BACKING_STORE_QCAPS_V2 0x1a8UL
+ #define HWRM_FUNC_DBR_PACING_NQLIST_QUERY 0x1a9UL
+ #define HWRM_FUNC_DBR_RECOVERY_COMPLETED 0x1aaUL
+ #define HWRM_FUNC_SYNCE_CFG 0x1abUL
+ #define HWRM_FUNC_SYNCE_QCFG 0x1acUL
+ #define HWRM_FUNC_KEY_CTX_FREE 0x1adUL
+ #define HWRM_FUNC_LAG_MODE_CFG 0x1aeUL
+ #define HWRM_FUNC_LAG_MODE_QCFG 0x1afUL
+ #define HWRM_FUNC_LAG_CREATE 0x1b0UL
+ #define HWRM_FUNC_LAG_UPDATE 0x1b1UL
+ #define HWRM_FUNC_LAG_FREE 0x1b2UL
+ #define HWRM_FUNC_LAG_QCFG 0x1b3UL
+ #define HWRM_FUNC_TTX_PACING_RATE_PROF_QUERY 0x1c3UL
+ #define HWRM_FUNC_TTX_PACING_RATE_QUERY 0x1c4UL
+ #define HWRM_SELFTEST_QLIST 0x200UL
+ #define HWRM_SELFTEST_EXEC 0x201UL
+ #define HWRM_SELFTEST_IRQ 0x202UL
+ #define HWRM_SELFTEST_RETRIEVE_SERDES_DATA 0x203UL
+ #define HWRM_PCIE_QSTATS 0x204UL
+ #define HWRM_MFG_FRU_WRITE_CONTROL 0x205UL
+ #define HWRM_MFG_TIMERS_QUERY 0x206UL
+ #define HWRM_MFG_OTP_CFG 0x207UL
+ #define HWRM_MFG_OTP_QCFG 0x208UL
+ #define HWRM_MFG_HDMA_TEST 0x209UL
+ #define HWRM_MFG_FRU_EEPROM_WRITE 0x20aUL
+ #define HWRM_MFG_FRU_EEPROM_READ 0x20bUL
+ #define HWRM_MFG_SOC_IMAGE 0x20cUL
+ #define HWRM_MFG_SOC_QSTATUS 0x20dUL
+ #define HWRM_MFG_PARAM_CRITICAL_DATA_FINALIZE 0x20eUL
+ #define HWRM_MFG_PARAM_CRITICAL_DATA_READ 0x20fUL
+ #define HWRM_MFG_PARAM_CRITICAL_DATA_HEALTH 0x210UL
+ #define HWRM_MFG_PRVSN_EXPORT_CSR 0x211UL
+ #define HWRM_MFG_PRVSN_IMPORT_CERT 0x212UL
+ #define HWRM_MFG_PRVSN_GET_STATE 0x213UL
+ #define HWRM_MFG_GET_NVM_MEASUREMENT 0x214UL
+ #define HWRM_MFG_PSOC_QSTATUS 0x215UL
+ #define HWRM_MFG_SELFTEST_QLIST 0x216UL
+ #define HWRM_MFG_SELFTEST_EXEC 0x217UL
+ #define HWRM_STAT_GENERIC_QSTATS 0x218UL
+ #define HWRM_MFG_PRVSN_EXPORT_CERT 0x219UL
+ #define HWRM_STAT_DB_ERROR_QSTATS 0x21aUL
+ #define HWRM_MFG_TESTS 0x21bUL
+ #define HWRM_MFG_WRITE_CERT_NVM 0x21cUL
+ #define HWRM_PORT_POE_CFG 0x230UL
+ #define HWRM_PORT_POE_QCFG 0x231UL
+ #define HWRM_PORT_PHY_FDRSTAT 0x232UL
+ #define HWRM_UDCC_QCAPS 0x258UL
+ #define HWRM_UDCC_CFG 0x259UL
+ #define HWRM_UDCC_QCFG 0x25aUL
+ #define HWRM_UDCC_SESSION_CFG 0x25bUL
+ #define HWRM_UDCC_SESSION_QCFG 0x25cUL
+ #define HWRM_UDCC_SESSION_QUERY 0x25dUL
+ #define HWRM_UDCC_COMP_CFG 0x25eUL
+ #define HWRM_UDCC_COMP_QCFG 0x25fUL
+ #define HWRM_UDCC_COMP_QUERY 0x260UL
+ #define HWRM_QUEUE_PFCWD_TIMEOUT_QCAPS 0x261UL
+ #define HWRM_QUEUE_PFCWD_TIMEOUT_CFG 0x262UL
+ #define HWRM_QUEUE_PFCWD_TIMEOUT_QCFG 0x263UL
+ #define HWRM_QUEUE_ADPTV_QOS_RX_QCFG 0x264UL
+ #define HWRM_QUEUE_ADPTV_QOS_TX_QCFG 0x265UL
+ #define HWRM_TF 0x2bcUL
+ #define HWRM_TF_VERSION_GET 0x2bdUL
+ #define HWRM_TF_SESSION_OPEN 0x2c6UL
+ #define HWRM_TF_SESSION_REGISTER 0x2c8UL
+ #define HWRM_TF_SESSION_UNREGISTER 0x2c9UL
+ #define HWRM_TF_SESSION_CLOSE 0x2caUL
+ #define HWRM_TF_SESSION_QCFG 0x2cbUL
+ #define HWRM_TF_SESSION_RESC_QCAPS 0x2ccUL
+ #define HWRM_TF_SESSION_RESC_ALLOC 0x2cdUL
+ #define HWRM_TF_SESSION_RESC_FREE 0x2ceUL
+ #define HWRM_TF_SESSION_RESC_FLUSH 0x2cfUL
+ #define HWRM_TF_SESSION_RESC_INFO 0x2d0UL
+ #define HWRM_TF_SESSION_HOTUP_STATE_SET 0x2d1UL
+ #define HWRM_TF_SESSION_HOTUP_STATE_GET 0x2d2UL
+ #define HWRM_TF_TBL_TYPE_GET 0x2daUL
+ #define HWRM_TF_TBL_TYPE_SET 0x2dbUL
+ #define HWRM_TF_TBL_TYPE_BULK_GET 0x2dcUL
+ #define HWRM_TF_EM_INSERT 0x2eaUL
+ #define HWRM_TF_EM_DELETE 0x2ebUL
+ #define HWRM_TF_EM_HASH_INSERT 0x2ecUL
+ #define HWRM_TF_EM_MOVE 0x2edUL
+ #define HWRM_TF_TCAM_SET 0x2f8UL
+ #define HWRM_TF_TCAM_GET 0x2f9UL
+ #define HWRM_TF_TCAM_MOVE 0x2faUL
+ #define HWRM_TF_TCAM_FREE 0x2fbUL
+ #define HWRM_TF_GLOBAL_CFG_SET 0x2fcUL
+ #define HWRM_TF_GLOBAL_CFG_GET 0x2fdUL
+ #define HWRM_TF_IF_TBL_SET 0x2feUL
+ #define HWRM_TF_IF_TBL_GET 0x2ffUL
+ #define HWRM_TF_RESC_USAGE_SET 0x300UL
+ #define HWRM_TF_RESC_USAGE_QUERY 0x301UL
+ #define HWRM_TF_TBL_TYPE_ALLOC 0x302UL
+ #define HWRM_TF_TBL_TYPE_FREE 0x303UL
+ #define HWRM_TFC_TBL_SCOPE_QCAPS 0x380UL
+ #define HWRM_TFC_TBL_SCOPE_ID_ALLOC 0x381UL
+ #define HWRM_TFC_TBL_SCOPE_CONFIG 0x382UL
+ #define HWRM_TFC_TBL_SCOPE_DECONFIG 0x383UL
+ #define HWRM_TFC_TBL_SCOPE_FID_ADD 0x384UL
+ #define HWRM_TFC_TBL_SCOPE_FID_REM 0x385UL
+ #define HWRM_TFC_TBL_SCOPE_POOL_ALLOC 0x386UL
+ #define HWRM_TFC_TBL_SCOPE_POOL_FREE 0x387UL
+ #define HWRM_TFC_SESSION_ID_ALLOC 0x388UL
+ #define HWRM_TFC_SESSION_FID_ADD 0x389UL
+ #define HWRM_TFC_SESSION_FID_REM 0x38aUL
+ #define HWRM_TFC_IDENT_ALLOC 0x38bUL
+ #define HWRM_TFC_IDENT_FREE 0x38cUL
+ #define HWRM_TFC_IDX_TBL_ALLOC 0x38dUL
+ #define HWRM_TFC_IDX_TBL_ALLOC_SET 0x38eUL
+ #define HWRM_TFC_IDX_TBL_SET 0x38fUL
+ #define HWRM_TFC_IDX_TBL_GET 0x390UL
+ #define HWRM_TFC_IDX_TBL_FREE 0x391UL
+ #define HWRM_TFC_GLOBAL_ID_ALLOC 0x392UL
+ #define HWRM_TFC_TCAM_SET 0x393UL
+ #define HWRM_TFC_TCAM_GET 0x394UL
+ #define HWRM_TFC_TCAM_ALLOC 0x395UL
+ #define HWRM_TFC_TCAM_ALLOC_SET 0x396UL
+ #define HWRM_TFC_TCAM_FREE 0x397UL
+ #define HWRM_TFC_IF_TBL_SET 0x398UL
+ #define HWRM_TFC_IF_TBL_GET 0x399UL
+ #define HWRM_TFC_TBL_SCOPE_CONFIG_GET 0x39aUL
+ #define HWRM_TFC_RESC_USAGE_QUERY 0x39bUL
+ #define HWRM_TFC_GLOBAL_ID_FREE 0x39cUL
+ #define HWRM_TFC_TCAM_PRI_UPDATE 0x39dUL
+ #define HWRM_TFC_HOT_UPGRADE_PROCESS 0x3a0UL
+ #define HWRM_SV 0x400UL
+ #define HWRM_DBG_SERDES_TEST 0xff0eUL
+ #define HWRM_DBG_LOG_BUFFER_FLUSH 0xff0fUL
+ #define HWRM_DBG_READ_DIRECT 0xff10UL
+ #define HWRM_DBG_READ_INDIRECT 0xff11UL
+ #define HWRM_DBG_WRITE_DIRECT 0xff12UL
+ #define HWRM_DBG_WRITE_INDIRECT 0xff13UL
+ #define HWRM_DBG_DUMP 0xff14UL
+ #define HWRM_DBG_ERASE_NVM 0xff15UL
+ #define HWRM_DBG_CFG 0xff16UL
+ #define HWRM_DBG_COREDUMP_LIST 0xff17UL
+ #define HWRM_DBG_COREDUMP_INITIATE 0xff18UL
+ #define HWRM_DBG_COREDUMP_RETRIEVE 0xff19UL
+ #define HWRM_DBG_FW_CLI 0xff1aUL
+ #define HWRM_DBG_I2C_CMD 0xff1bUL
+ #define HWRM_DBG_RING_INFO_GET 0xff1cUL
+ #define HWRM_DBG_CRASHDUMP_HEADER 0xff1dUL
+ #define HWRM_DBG_CRASHDUMP_ERASE 0xff1eUL
+ #define HWRM_DBG_DRV_TRACE 0xff1fUL
+ #define HWRM_DBG_QCAPS 0xff20UL
+ #define HWRM_DBG_QCFG 0xff21UL
+ #define HWRM_DBG_CRASHDUMP_MEDIUM_CFG 0xff22UL
+ #define HWRM_DBG_USEQ_ALLOC 0xff23UL
+ #define HWRM_DBG_USEQ_FREE 0xff24UL
+ #define HWRM_DBG_USEQ_FLUSH 0xff25UL
+ #define HWRM_DBG_USEQ_QCAPS 0xff26UL
+ #define HWRM_DBG_USEQ_CW_CFG 0xff27UL
+ #define HWRM_DBG_USEQ_SCHED_CFG 0xff28UL
+ #define HWRM_DBG_USEQ_RUN 0xff29UL
+ #define HWRM_DBG_USEQ_DELIVERY_REQ 0xff2aUL
+ #define HWRM_DBG_USEQ_RESP_HDR 0xff2bUL
+ #define HWRM_DBG_COREDUMP_CAPTURE 0xff2cUL
+ #define HWRM_DBG_PTRACE 0xff2dUL
+ #define HWRM_DBG_SIM_CABLE_STATE 0xff2eUL
+ #define HWRM_DBG_TOKEN_QUERY_AUTH_IDS 0xff2fUL
+ #define HWRM_DBG_TOKEN_CFG 0xff30UL
+ #define HWRM_NVM_GET_VPD_FIELD_INFO 0xffeaUL
+ #define HWRM_NVM_SET_VPD_FIELD_INFO 0xffebUL
+ #define HWRM_NVM_DEFRAG 0xffecUL
+ #define HWRM_NVM_REQ_ARBITRATION 0xffedUL
+ #define HWRM_NVM_FACTORY_DEFAULTS 0xffeeUL
+ #define HWRM_NVM_VALIDATE_OPTION 0xffefUL
+ #define HWRM_NVM_FLUSH 0xfff0UL
+ #define HWRM_NVM_GET_VARIABLE 0xfff1UL
+ #define HWRM_NVM_SET_VARIABLE 0xfff2UL
+ #define HWRM_NVM_INSTALL_UPDATE 0xfff3UL
+ #define HWRM_NVM_MODIFY 0xfff4UL
+ #define HWRM_NVM_VERIFY_UPDATE 0xfff5UL
+ #define HWRM_NVM_GET_DEV_INFO 0xfff6UL
+ #define HWRM_NVM_ERASE_DIR_ENTRY 0xfff7UL
+ #define HWRM_NVM_MOD_DIR_ENTRY 0xfff8UL
+ #define HWRM_NVM_FIND_DIR_ENTRY 0xfff9UL
+ #define HWRM_NVM_GET_DIR_ENTRIES 0xfffaUL
+ #define HWRM_NVM_GET_DIR_INFO 0xfffbUL
+ #define HWRM_NVM_RAW_DUMP 0xfffcUL
+ #define HWRM_NVM_READ 0xfffdUL
+ #define HWRM_NVM_WRITE 0xfffeUL
+ #define HWRM_NVM_RAW_WRITE_BLK 0xffffUL
+ #define HWRM_LAST HWRM_NVM_RAW_WRITE_BLK
+ __le16 unused_0[3];
+};
+
+/* ret_codes (size:64b/8B) */
+struct ret_codes {
+ __le16 error_code;
+ #define HWRM_ERR_CODE_SUCCESS 0x0UL
+ #define HWRM_ERR_CODE_FAIL 0x1UL
+ #define HWRM_ERR_CODE_INVALID_PARAMS 0x2UL
+ #define HWRM_ERR_CODE_RESOURCE_ACCESS_DENIED 0x3UL
+ #define HWRM_ERR_CODE_RESOURCE_ALLOC_ERROR 0x4UL
+ #define HWRM_ERR_CODE_INVALID_FLAGS 0x5UL
+ #define HWRM_ERR_CODE_INVALID_ENABLES 0x6UL
+ #define HWRM_ERR_CODE_UNSUPPORTED_TLV 0x7UL
+ #define HWRM_ERR_CODE_NO_BUFFER 0x8UL
+ #define HWRM_ERR_CODE_UNSUPPORTED_OPTION_ERR 0x9UL
+ #define HWRM_ERR_CODE_HOT_RESET_PROGRESS 0xaUL
+ #define HWRM_ERR_CODE_HOT_RESET_FAIL 0xbUL
+ #define HWRM_ERR_CODE_NO_FLOW_COUNTER_DURING_ALLOC 0xcUL
+ #define HWRM_ERR_CODE_KEY_HASH_COLLISION 0xdUL
+ #define HWRM_ERR_CODE_KEY_ALREADY_EXISTS 0xeUL
+ #define HWRM_ERR_CODE_HWRM_ERROR 0xfUL
+ #define HWRM_ERR_CODE_BUSY 0x10UL
+ #define HWRM_ERR_CODE_RESOURCE_LOCKED 0x11UL
+ #define HWRM_ERR_CODE_PF_UNAVAILABLE 0x12UL
+ #define HWRM_ERR_CODE_ENTITY_NOT_PRESENT 0x13UL
+ #define HWRM_ERR_CODE_SECURE_SOC_ERROR 0x14UL
+ #define HWRM_ERR_CODE_TLV_ENCAPSULATED_RESPONSE 0x8000UL
+ #define HWRM_ERR_CODE_UNKNOWN_ERR 0xfffeUL
+ #define HWRM_ERR_CODE_CMD_NOT_SUPPORTED 0xffffUL
+ #define HWRM_ERR_CODE_LAST HWRM_ERR_CODE_CMD_NOT_SUPPORTED
+ __le16 unused_0[3];
+};
+
+/* hwrm_err_output (size:128b/16B) */
+struct hwrm_err_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 opaque_0;
+ __le16 opaque_1;
+ u8 cmd_err;
+ u8 valid;
+};
+#define HWRM_NA_SIGNATURE ((__le32)(-1))
+#define HWRM_MAX_REQ_LEN 128
+#define HWRM_MAX_RESP_LEN 704
+#define HW_HASH_INDEX_SIZE 0x80
+#define HW_HASH_KEY_SIZE 40
+#define HWRM_RESP_VALID_KEY 1
+#define HWRM_TARGET_ID_BONO 0xFFF8
+#define HWRM_TARGET_ID_KONG 0xFFF9
+#define HWRM_TARGET_ID_APE 0xFFFA
+#define HWRM_TARGET_ID_TOOLS 0xFFFD
+#define HWRM_VERSION_MAJOR 1
+#define HWRM_VERSION_MINOR 10
+#define HWRM_VERSION_UPDATE 3
+#define HWRM_VERSION_RSVD 133
+#define HWRM_VERSION_STR "1.10.3.133"
+
+/* hwrm_ver_get_input (size:192b/24B) */
+struct hwrm_ver_get_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ u8 hwrm_intf_maj;
+ u8 hwrm_intf_min;
+ u8 hwrm_intf_upd;
+ u8 unused_0[5];
+};
+
+/* hwrm_ver_get_output (size:1408b/176B) */
+struct hwrm_ver_get_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 hwrm_intf_maj_8b;
+ u8 hwrm_intf_min_8b;
+ u8 hwrm_intf_upd_8b;
+ u8 hwrm_intf_rsvd_8b;
+ u8 hwrm_fw_maj_8b;
+ u8 hwrm_fw_min_8b;
+ u8 hwrm_fw_bld_8b;
+ u8 hwrm_fw_rsvd_8b;
+ u8 mgmt_fw_maj_8b;
+ u8 mgmt_fw_min_8b;
+ u8 mgmt_fw_bld_8b;
+ u8 mgmt_fw_rsvd_8b;
+ u8 netctrl_fw_maj_8b;
+ u8 netctrl_fw_min_8b;
+ u8 netctrl_fw_bld_8b;
+ u8 netctrl_fw_rsvd_8b;
+ __le32 dev_caps_cfg;
+ #define VER_GET_RESP_DEV_CAPS_CFG_SECURE_FW_UPD_SUPPORTED 0x1UL
+ #define VER_GET_RESP_DEV_CAPS_CFG_FW_DCBX_AGENT_SUPPORTED 0x2UL
+ #define VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_SUPPORTED 0x4UL
+ #define VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_REQUIRED 0x8UL
+ #define VER_GET_RESP_DEV_CAPS_CFG_KONG_MB_CHNL_SUPPORTED 0x10UL
+ #define VER_GET_RESP_DEV_CAPS_CFG_FLOW_HANDLE_64BIT_SUPPORTED 0x20UL
+ #define VER_GET_RESP_DEV_CAPS_CFG_L2_FILTER_TYPES_ROCE_OR_L2_SUPPORTED 0x40UL
+ #define VER_GET_RESP_DEV_CAPS_CFG_VIRTIO_VSWITCH_OFFLOAD_SUPPORTED 0x80UL
+ #define VER_GET_RESP_DEV_CAPS_CFG_TRUSTED_VF_SUPPORTED 0x100UL
+ #define VER_GET_RESP_DEV_CAPS_CFG_FLOW_AGING_SUPPORTED 0x200UL
+ #define VER_GET_RESP_DEV_CAPS_CFG_ADV_FLOW_COUNTERS_SUPPORTED 0x400UL
+ #define VER_GET_RESP_DEV_CAPS_CFG_CFA_EEM_SUPPORTED 0x800UL
+ #define VER_GET_RESP_DEV_CAPS_CFG_CFA_ADV_FLOW_MGNT_SUPPORTED 0x1000UL
+ #define VER_GET_RESP_DEV_CAPS_CFG_CFA_TFLIB_SUPPORTED 0x2000UL
+ #define VER_GET_RESP_DEV_CAPS_CFG_CFA_TRUFLOW_SUPPORTED 0x4000UL
+ #define VER_GET_RESP_DEV_CAPS_CFG_SECURE_BOOT_CAPABLE 0x8000UL
+ #define VER_GET_RESP_DEV_CAPS_CFG_SECURE_SOC_CAPABLE 0x10000UL
+ #define VER_GET_RESP_DEV_CAPS_CFG_DEBUG_TOKEN_SUPPORTED 0x20000UL
+ u8 roce_fw_maj_8b;
+ u8 roce_fw_min_8b;
+ u8 roce_fw_bld_8b;
+ u8 roce_fw_rsvd_8b;
+ char hwrm_fw_name[16];
+ char mgmt_fw_name[16];
+ char netctrl_fw_name[16];
+ char active_pkg_name[16];
+ char roce_fw_name[16];
+ __le16 chip_num;
+ u8 chip_rev;
+ u8 chip_metal;
+ u8 chip_bond_id;
+ u8 chip_platform_type;
+ #define VER_GET_RESP_CHIP_PLATFORM_TYPE_ASIC 0x0UL
+ #define VER_GET_RESP_CHIP_PLATFORM_TYPE_FPGA 0x1UL
+ #define VER_GET_RESP_CHIP_PLATFORM_TYPE_PALLADIUM 0x2UL
+ #define VER_GET_RESP_CHIP_PLATFORM_TYPE_LAST VER_GET_RESP_CHIP_PLATFORM_TYPE_PALLADIUM
+ __le16 max_req_win_len;
+ __le16 max_resp_len;
+ __le16 def_req_timeout;
+ u8 flags;
+ #define VER_GET_RESP_FLAGS_DEV_NOT_RDY 0x1UL
+ #define VER_GET_RESP_FLAGS_EXT_VER_AVAIL 0x2UL
+ #define VER_GET_RESP_FLAGS_DEV_NOT_RDY_BACKING_STORE 0x4UL
+ u8 unused_0[2];
+ u8 always_1;
+ __le16 hwrm_intf_major;
+ __le16 hwrm_intf_minor;
+ __le16 hwrm_intf_build;
+ __le16 hwrm_intf_patch;
+ __le16 hwrm_fw_major;
+ __le16 hwrm_fw_minor;
+ __le16 hwrm_fw_build;
+ __le16 hwrm_fw_patch;
+ __le16 mgmt_fw_major;
+ __le16 mgmt_fw_minor;
+ __le16 mgmt_fw_build;
+ __le16 mgmt_fw_patch;
+ __le16 netctrl_fw_major;
+ __le16 netctrl_fw_minor;
+ __le16 netctrl_fw_build;
+ __le16 netctrl_fw_patch;
+ __le16 roce_fw_major;
+ __le16 roce_fw_minor;
+ __le16 roce_fw_build;
+ __le16 roce_fw_patch;
+ __le16 max_ext_req_len;
+ __le16 max_req_timeout;
+ u8 unused_1[3];
+ u8 valid;
+};
+
+/* eject_cmpl (size:128b/16B) */
+struct eject_cmpl {
+ __le16 type;
+ #define EJECT_CMPL_TYPE_MASK 0x3fUL
+ #define EJECT_CMPL_TYPE_SFT 0
+ #define EJECT_CMPL_TYPE_STAT_EJECT 0x1aUL
+ #define EJECT_CMPL_TYPE_LAST EJECT_CMPL_TYPE_STAT_EJECT
+ #define EJECT_CMPL_FLAGS_MASK 0xffc0UL
+ #define EJECT_CMPL_FLAGS_SFT 6
+ #define EJECT_CMPL_FLAGS_ERROR 0x40UL
+ __le16 len;
+ __le32 opaque;
+ __le16 v;
+ #define EJECT_CMPL_V 0x1UL
+ #define EJECT_CMPL_ERRORS_MASK 0xfffeUL
+ #define EJECT_CMPL_ERRORS_SFT 1
+ #define EJECT_CMPL_ERRORS_BUFFER_ERROR_MASK 0xeUL
+ #define EJECT_CMPL_ERRORS_BUFFER_ERROR_SFT 1
+ #define EJECT_CMPL_ERRORS_BUFFER_ERROR_NO_BUFFER (0x0UL << 1)
+ #define EJECT_CMPL_ERRORS_BUFFER_ERROR_DID_NOT_FIT (0x1UL << 1)
+ #define EJECT_CMPL_ERRORS_BUFFER_ERROR_BAD_FORMAT (0x3UL << 1)
+ #define EJECT_CMPL_ERRORS_BUFFER_ERROR_FLUSH (0x5UL << 1)
+ #define EJECT_CMPL_ERRORS_BUFFER_ERROR_LAST EJECT_CMPL_ERRORS_BUFFER_ERROR_FLUSH
+ __le16 reserved16;
+ __le32 unused_2;
+};
+
+/* hwrm_cmpl (size:128b/16B) */
+struct hwrm_cmpl {
+ __le16 type;
+ #define CMPL_TYPE_MASK 0x3fUL
+ #define CMPL_TYPE_SFT 0
+ #define CMPL_TYPE_HWRM_DONE 0x20UL
+ #define CMPL_TYPE_LAST CMPL_TYPE_HWRM_DONE
+ __le16 sequence_id;
+ __le32 unused_1;
+ __le32 v;
+ #define CMPL_V 0x1UL
+ __le32 unused_3;
+};
+
+/* hwrm_fwd_req_cmpl (size:128b/16B) */
+struct hwrm_fwd_req_cmpl {
+ __le16 req_len_type;
+ #define FWD_REQ_CMPL_TYPE_MASK 0x3fUL
+ #define FWD_REQ_CMPL_TYPE_SFT 0
+ #define FWD_REQ_CMPL_TYPE_HWRM_FWD_REQ 0x22UL
+ #define FWD_REQ_CMPL_TYPE_LAST FWD_REQ_CMPL_TYPE_HWRM_FWD_REQ
+ #define FWD_REQ_CMPL_REQ_LEN_MASK 0xffc0UL
+ #define FWD_REQ_CMPL_REQ_LEN_SFT 6
+ __le16 source_id;
+ __le32 unused0;
+ __le32 req_buf_addr_v[2];
+ #define FWD_REQ_CMPL_V 0x1UL
+ #define FWD_REQ_CMPL_REQ_BUF_ADDR_MASK 0xfffffffeUL
+ #define FWD_REQ_CMPL_REQ_BUF_ADDR_SFT 1
+};
+
+/* hwrm_fwd_resp_cmpl (size:128b/16B) */
+struct hwrm_fwd_resp_cmpl {
+ __le16 type;
+ #define FWD_RESP_CMPL_TYPE_MASK 0x3fUL
+ #define FWD_RESP_CMPL_TYPE_SFT 0
+ #define FWD_RESP_CMPL_TYPE_HWRM_FWD_RESP 0x24UL
+ #define FWD_RESP_CMPL_TYPE_LAST FWD_RESP_CMPL_TYPE_HWRM_FWD_RESP
+ __le16 source_id;
+ __le16 resp_len;
+ __le16 unused_1;
+ __le32 resp_buf_addr_v[2];
+ #define FWD_RESP_CMPL_V 0x1UL
+ #define FWD_RESP_CMPL_RESP_BUF_ADDR_MASK 0xfffffffeUL
+ #define FWD_RESP_CMPL_RESP_BUF_ADDR_SFT 1
+};
+
+/* hwrm_async_event_cmpl (size:128b/16B) */
+struct hwrm_async_event_cmpl {
+ __le16 type;
+ #define ASYNC_EVENT_CMPL_TYPE_MASK 0x3fUL
+ #define ASYNC_EVENT_CMPL_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_TYPE_HWRM_ASYNC_EVENT 0x2eUL
+ #define ASYNC_EVENT_CMPL_TYPE_LAST ASYNC_EVENT_CMPL_TYPE_HWRM_ASYNC_EVENT
+ __le16 event_id;
+ #define ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE 0x0UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_LINK_MTU_CHANGE 0x1UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CHANGE 0x2UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_DCB_CONFIG_CHANGE 0x3UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED 0x4UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_NOT_ALLOWED 0x5UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE 0x6UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_PORT_PHY_CFG_CHANGE 0x7UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_RESET_NOTIFY 0x8UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY 0x9UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_RING_MONITOR_MSG 0xaUL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_FUNC_DRVR_UNLOAD 0x10UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_FUNC_DRVR_LOAD 0x11UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_FUNC_FLR_PROC_CMPLT 0x12UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD 0x20UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_LOAD 0x21UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_VF_FLR 0x30UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_VF_MAC_ADDR_CHANGE 0x31UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_PF_VF_COMM_STATUS_CHANGE 0x32UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE 0x33UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_LLFC_PFC_CHANGE 0x34UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_DEFAULT_VNIC_CHANGE 0x35UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_HW_FLOW_AGED 0x36UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_DEBUG_NOTIFICATION 0x37UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_EEM_CACHE_FLUSH_REQ 0x38UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_EEM_CACHE_FLUSH_DONE 0x39UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_TCP_FLAG_ACTION_CHANGE 0x3aUL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_EEM_FLOW_ACTIVE 0x3bUL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_EEM_CFG_CHANGE 0x3cUL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_TFLIB_DEFAULT_VNIC_CHANGE 0x3dUL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_TFLIB_LINK_STATUS_CHANGE 0x3eUL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_QUIESCE_DONE 0x3fUL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_DEFERRED_RESPONSE 0x40UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_PFC_WATCHDOG_CFG_CHANGE 0x41UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_ECHO_REQUEST 0x42UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_PHC_UPDATE 0x43UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_PPS_TIMESTAMP 0x44UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_ERROR_REPORT 0x45UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_DOORBELL_PACING_THRESHOLD 0x46UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_RSS_CHANGE 0x47UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_DOORBELL_PACING_NQ_UPDATE 0x48UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_HW_DOORBELL_RECOVERY_READ_ERROR 0x49UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_CTX_ERROR 0x4aUL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_UDCC_SESSION_CHANGE 0x4bUL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_DBG_BUF_PRODUCER 0x4cUL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_PEER_MMAP_CHANGE 0x4dUL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_REPRESENTOR_PAIR_CHANGE 0x4eUL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_VF_STAT_CHANGE 0x4fUL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_HOST_COREDUMP 0x50UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_ADPTV_QOS 0x51UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_MAX_RGTR_EVENT_ID 0x52UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_FW_TRACE_MSG 0xfeUL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_HWRM_ERROR 0xffUL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_LAST ASYNC_EVENT_CMPL_EVENT_ID_HWRM_ERROR
+ __le32 event_data2;
+ u8 opaque_v;
+ #define ASYNC_EVENT_CMPL_V 0x1UL
+ #define ASYNC_EVENT_CMPL_OPAQUE_MASK 0xfeUL
+ #define ASYNC_EVENT_CMPL_OPAQUE_SFT 1
+ u8 timestamp_lo;
+ __le16 timestamp_hi;
+ __le32 event_data1;
+};
+
+/* hwrm_async_event_cmpl_link_status_change (size:128b/16B) */
+struct hwrm_async_event_cmpl_link_status_change {
+ __le16 type;
+ #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_TYPE_MASK 0x3fUL
+ #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_TYPE_HWRM_ASYNC_EVENT 0x2eUL
+ #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_TYPE_LAST ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_TYPE_HWRM_ASYNC_EVENT
+ __le16 event_id;
+ #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_ID_LINK_STATUS_CHANGE 0x0UL
+ #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_ID_LAST ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_ID_LINK_STATUS_CHANGE
+ __le32 event_data2;
+ u8 opaque_v;
+ #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_V 0x1UL
+ #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_OPAQUE_MASK 0xfeUL
+ #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_OPAQUE_SFT 1
+ u8 timestamp_lo;
+ __le16 timestamp_hi;
+ __le32 event_data1;
+ #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_LINK_CHANGE 0x1UL
+ #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_LINK_CHANGE_DOWN 0x0UL
+ #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_LINK_CHANGE_UP 0x1UL
+ #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_LINK_CHANGE_LAST ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_LINK_CHANGE_UP
+ #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_PORT_MASK 0xeUL
+ #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_PORT_SFT 1
+ #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_PORT_ID_MASK 0xffff0UL
+ #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_PORT_ID_SFT 4
+ #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_PF_ID_MASK 0xff00000UL
+ #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_PF_ID_SFT 20
+};
+
+/* hwrm_async_event_cmpl_port_conn_not_allowed (size:128b/16B) */
+struct hwrm_async_event_cmpl_port_conn_not_allowed {
+ __le16 type;
+ #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_TYPE_MASK 0x3fUL
+ #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_TYPE_HWRM_ASYNC_EVENT 0x2eUL
+ #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_TYPE_LAST ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_TYPE_HWRM_ASYNC_EVENT
+ __le16 event_id;
+ #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_ID_PORT_CONN_NOT_ALLOWED 0x4UL
+ #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_ID_LAST ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_ID_PORT_CONN_NOT_ALLOWED
+ __le32 event_data2;
+ u8 opaque_v;
+ #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_V 0x1UL
+ #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_OPAQUE_MASK 0xfeUL
+ #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_OPAQUE_SFT 1
+ u8 timestamp_lo;
+ __le16 timestamp_hi;
+ __le32 event_data1;
+ #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_PORT_ID_MASK 0xffffUL
+ #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_PORT_ID_SFT 0
+ #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_MASK 0xff0000UL
+ #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_SFT 16
+ #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_NONE (0x0UL << 16)
+ #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_DISABLETX (0x1UL << 16)
+ #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_WARNINGMSG (0x2UL << 16)
+ #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_PWRDOWN (0x3UL << 16)
+ #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_LAST ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_PWRDOWN
+};
+
+/* hwrm_async_event_cmpl_link_speed_cfg_change (size:128b/16B) */
+struct hwrm_async_event_cmpl_link_speed_cfg_change {
+ __le16 type;
+ #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_TYPE_MASK 0x3fUL
+ #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_TYPE_HWRM_ASYNC_EVENT 0x2eUL
+ #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_TYPE_LAST ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_TYPE_HWRM_ASYNC_EVENT
+ __le16 event_id;
+ #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_EVENT_ID_LINK_SPEED_CFG_CHANGE 0x6UL
+ #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_EVENT_ID_LAST ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_EVENT_ID_LINK_SPEED_CFG_CHANGE
+ __le32 event_data2;
+ u8 opaque_v;
+ #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_V 0x1UL
+ #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_OPAQUE_MASK 0xfeUL
+ #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_OPAQUE_SFT 1
+ u8 timestamp_lo;
+ __le16 timestamp_hi;
+ __le32 event_data1;
+ #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_EVENT_DATA1_PORT_ID_MASK 0xffffUL
+ #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_EVENT_DATA1_PORT_ID_SFT 0
+ #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_EVENT_DATA1_SUPPORTED_LINK_SPEEDS_CHANGE 0x10000UL
+ #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_EVENT_DATA1_ILLEGAL_LINK_SPEED_CFG 0x20000UL
+};
+
+/* hwrm_async_event_cmpl_reset_notify (size:128b/16B) */
+struct hwrm_async_event_cmpl_reset_notify {
+ __le16 type;
+ #define ASYNC_EVENT_CMPL_RESET_NOTIFY_TYPE_MASK 0x3fUL
+ #define ASYNC_EVENT_CMPL_RESET_NOTIFY_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_RESET_NOTIFY_TYPE_HWRM_ASYNC_EVENT 0x2eUL
+ #define ASYNC_EVENT_CMPL_RESET_NOTIFY_TYPE_LAST ASYNC_EVENT_CMPL_RESET_NOTIFY_TYPE_HWRM_ASYNC_EVENT
+ __le16 event_id;
+ #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_ID_RESET_NOTIFY 0x8UL
+ #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_ID_LAST ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_ID_RESET_NOTIFY
+ __le32 event_data2;
+ #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA2_FW_STATUS_CODE_MASK 0xffffUL
+ #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA2_FW_STATUS_CODE_SFT 0
+ u8 opaque_v;
+ #define ASYNC_EVENT_CMPL_RESET_NOTIFY_V 0x1UL
+ #define ASYNC_EVENT_CMPL_RESET_NOTIFY_OPAQUE_MASK 0xfeUL
+ #define ASYNC_EVENT_CMPL_RESET_NOTIFY_OPAQUE_SFT 1
+ u8 timestamp_lo;
+ __le16 timestamp_hi;
+ __le32 event_data1;
+ #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_DRIVER_ACTION_MASK 0xffUL
+ #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_DRIVER_ACTION_SFT 0
+ #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_DRIVER_ACTION_DRIVER_STOP_TX_QUEUE 0x1UL
+ #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_DRIVER_ACTION_DRIVER_IFDOWN 0x2UL
+ #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_DRIVER_ACTION_LAST ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_DRIVER_ACTION_DRIVER_IFDOWN
+ #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_MASK 0xff00UL
+ #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_SFT 8
+ #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_MANAGEMENT_RESET_REQUEST (0x1UL << 8)
+ #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_FW_EXCEPTION_FATAL (0x2UL << 8)
+ #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_FW_EXCEPTION_NON_FATAL (0x3UL << 8)
+ #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_FAST_RESET (0x4UL << 8)
+ #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_FW_ACTIVATION (0x5UL << 8)
+ #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_LAST ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_FW_ACTIVATION
+ #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_DELAY_IN_100MS_TICKS_MASK 0xffff0000UL
+ #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_DELAY_IN_100MS_TICKS_SFT 16
+};
+
+/* hwrm_async_event_cmpl_error_recovery (size:128b/16B) */
+struct hwrm_async_event_cmpl_error_recovery {
+ __le16 type;
+ #define ASYNC_EVENT_CMPL_ERROR_RECOVERY_TYPE_MASK 0x3fUL
+ #define ASYNC_EVENT_CMPL_ERROR_RECOVERY_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_ERROR_RECOVERY_TYPE_HWRM_ASYNC_EVENT 0x2eUL
+ #define ASYNC_EVENT_CMPL_ERROR_RECOVERY_TYPE_LAST ASYNC_EVENT_CMPL_ERROR_RECOVERY_TYPE_HWRM_ASYNC_EVENT
+ __le16 event_id;
+ #define ASYNC_EVENT_CMPL_ERROR_RECOVERY_EVENT_ID_ERROR_RECOVERY 0x9UL
+ #define ASYNC_EVENT_CMPL_ERROR_RECOVERY_EVENT_ID_LAST ASYNC_EVENT_CMPL_ERROR_RECOVERY_EVENT_ID_ERROR_RECOVERY
+ __le32 event_data2;
+ u8 opaque_v;
+ #define ASYNC_EVENT_CMPL_ERROR_RECOVERY_V 0x1UL
+ #define ASYNC_EVENT_CMPL_ERROR_RECOVERY_OPAQUE_MASK 0xfeUL
+ #define ASYNC_EVENT_CMPL_ERROR_RECOVERY_OPAQUE_SFT 1
+ u8 timestamp_lo;
+ __le16 timestamp_hi;
+ __le32 event_data1;
+ #define ASYNC_EVENT_CMPL_ERROR_RECOVERY_EVENT_DATA1_FLAGS_MASK 0xffUL
+ #define ASYNC_EVENT_CMPL_ERROR_RECOVERY_EVENT_DATA1_FLAGS_SFT 0
+ #define ASYNC_EVENT_CMPL_ERROR_RECOVERY_EVENT_DATA1_FLAGS_MASTER_FUNC 0x1UL
+ #define ASYNC_EVENT_CMPL_ERROR_RECOVERY_EVENT_DATA1_FLAGS_RECOVERY_ENABLED 0x2UL
+};
+
+/* hwrm_async_event_cmpl_ring_monitor_msg (size:128b/16B) */
+struct hwrm_async_event_cmpl_ring_monitor_msg {
+ __le16 type;
+ #define ASYNC_EVENT_CMPL_RING_MONITOR_MSG_TYPE_MASK 0x3fUL
+ #define ASYNC_EVENT_CMPL_RING_MONITOR_MSG_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_RING_MONITOR_MSG_TYPE_HWRM_ASYNC_EVENT 0x2eUL
+ #define ASYNC_EVENT_CMPL_RING_MONITOR_MSG_TYPE_LAST ASYNC_EVENT_CMPL_RING_MONITOR_MSG_TYPE_HWRM_ASYNC_EVENT
+ __le16 event_id;
+ #define ASYNC_EVENT_CMPL_RING_MONITOR_MSG_EVENT_ID_RING_MONITOR_MSG 0xaUL
+ #define ASYNC_EVENT_CMPL_RING_MONITOR_MSG_EVENT_ID_LAST ASYNC_EVENT_CMPL_RING_MONITOR_MSG_EVENT_ID_RING_MONITOR_MSG
+ __le32 event_data2;
+ #define ASYNC_EVENT_CMPL_RING_MONITOR_MSG_EVENT_DATA2_DISABLE_RING_TYPE_MASK 0xffUL
+ #define ASYNC_EVENT_CMPL_RING_MONITOR_MSG_EVENT_DATA2_DISABLE_RING_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_RING_MONITOR_MSG_EVENT_DATA2_DISABLE_RING_TYPE_TX 0x0UL
+ #define ASYNC_EVENT_CMPL_RING_MONITOR_MSG_EVENT_DATA2_DISABLE_RING_TYPE_RX 0x1UL
+ #define ASYNC_EVENT_CMPL_RING_MONITOR_MSG_EVENT_DATA2_DISABLE_RING_TYPE_CMPL 0x2UL
+ #define ASYNC_EVENT_CMPL_RING_MONITOR_MSG_EVENT_DATA2_DISABLE_RING_TYPE_LAST ASYNC_EVENT_CMPL_RING_MONITOR_MSG_EVENT_DATA2_DISABLE_RING_TYPE_CMPL
+ u8 opaque_v;
+ #define ASYNC_EVENT_CMPL_RING_MONITOR_MSG_V 0x1UL
+ #define ASYNC_EVENT_CMPL_RING_MONITOR_MSG_OPAQUE_MASK 0xfeUL
+ #define ASYNC_EVENT_CMPL_RING_MONITOR_MSG_OPAQUE_SFT 1
+ u8 timestamp_lo;
+ __le16 timestamp_hi;
+ __le32 event_data1;
+};
+
+/* hwrm_async_event_cmpl_vf_cfg_change (size:128b/16B) */
+struct hwrm_async_event_cmpl_vf_cfg_change {
+ __le16 type;
+ #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_TYPE_MASK 0x3fUL
+ #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_TYPE_HWRM_ASYNC_EVENT 0x2eUL
+ #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_TYPE_LAST ASYNC_EVENT_CMPL_VF_CFG_CHANGE_TYPE_HWRM_ASYNC_EVENT
+ __le16 event_id;
+ #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_ID_VF_CFG_CHANGE 0x33UL
+ #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_ID_LAST ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_ID_VF_CFG_CHANGE
+ __le32 event_data2;
+ #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_DATA2_VF_ID_MASK 0xffffUL
+ #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_DATA2_VF_ID_SFT 0
+ u8 opaque_v;
+ #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_V 0x1UL
+ #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_OPAQUE_MASK 0xfeUL
+ #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_OPAQUE_SFT 1
+ u8 timestamp_lo;
+ __le16 timestamp_hi;
+ __le32 event_data1;
+ #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_DATA1_MTU_CHANGE 0x1UL
+ #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_DATA1_MRU_CHANGE 0x2UL
+ #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_DATA1_DFLT_MAC_ADDR_CHANGE 0x4UL
+ #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_DATA1_DFLT_VLAN_CHANGE 0x8UL
+ #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_DATA1_TRUSTED_VF_CFG_CHANGE 0x10UL
+ #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_DATA1_TF_OWNERSHIP_RELEASE 0x20UL
+};
+
+/* hwrm_async_event_cmpl_default_vnic_change (size:128b/16B) */
+struct hwrm_async_event_cmpl_default_vnic_change {
+ __le16 type;
+ #define ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_TYPE_MASK 0x3fUL
+ #define ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_TYPE_HWRM_ASYNC_EVENT 0x2eUL
+ #define ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_TYPE_LAST ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_TYPE_HWRM_ASYNC_EVENT
+ #define ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_UNUSED1_MASK 0xffc0UL
+ #define ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_UNUSED1_SFT 6
+ __le16 event_id;
+ #define ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_EVENT_ID_ALLOC_FREE_NOTIFICATION 0x35UL
+ #define ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_EVENT_ID_LAST ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_EVENT_ID_ALLOC_FREE_NOTIFICATION
+ __le32 event_data2;
+ u8 opaque_v;
+ #define ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_V 0x1UL
+ #define ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_OPAQUE_MASK 0xfeUL
+ #define ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_OPAQUE_SFT 1
+ u8 timestamp_lo;
+ __le16 timestamp_hi;
+ __le32 event_data1;
+ #define ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_EVENT_DATA1_DEF_VNIC_STATE_MASK 0x3UL
+ #define ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_EVENT_DATA1_DEF_VNIC_STATE_SFT 0
+ #define ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_EVENT_DATA1_DEF_VNIC_STATE_DEF_VNIC_ALLOC 0x1UL
+ #define ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_EVENT_DATA1_DEF_VNIC_STATE_DEF_VNIC_FREE 0x2UL
+ #define ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_EVENT_DATA1_DEF_VNIC_STATE_LAST ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_EVENT_DATA1_DEF_VNIC_STATE_DEF_VNIC_FREE
+ #define ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_EVENT_DATA1_PF_ID_MASK 0x3fcUL
+ #define ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_EVENT_DATA1_PF_ID_SFT 2
+ #define ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_EVENT_DATA1_VF_ID_MASK 0x3fffc00UL
+ #define ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_EVENT_DATA1_VF_ID_SFT 10
+};
+
+/* hwrm_async_event_cmpl_hw_flow_aged (size:128b/16B) */
+struct hwrm_async_event_cmpl_hw_flow_aged {
+ __le16 type;
+ #define ASYNC_EVENT_CMPL_HW_FLOW_AGED_TYPE_MASK 0x3fUL
+ #define ASYNC_EVENT_CMPL_HW_FLOW_AGED_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_HW_FLOW_AGED_TYPE_HWRM_ASYNC_EVENT 0x2eUL
+ #define ASYNC_EVENT_CMPL_HW_FLOW_AGED_TYPE_LAST ASYNC_EVENT_CMPL_HW_FLOW_AGED_TYPE_HWRM_ASYNC_EVENT
+ __le16 event_id;
+ #define ASYNC_EVENT_CMPL_HW_FLOW_AGED_EVENT_ID_HW_FLOW_AGED 0x36UL
+ #define ASYNC_EVENT_CMPL_HW_FLOW_AGED_EVENT_ID_LAST ASYNC_EVENT_CMPL_HW_FLOW_AGED_EVENT_ID_HW_FLOW_AGED
+ __le32 event_data2;
+ u8 opaque_v;
+ #define ASYNC_EVENT_CMPL_HW_FLOW_AGED_V 0x1UL
+ #define ASYNC_EVENT_CMPL_HW_FLOW_AGED_OPAQUE_MASK 0xfeUL
+ #define ASYNC_EVENT_CMPL_HW_FLOW_AGED_OPAQUE_SFT 1
+ u8 timestamp_lo;
+ __le16 timestamp_hi;
+ __le32 event_data1;
+ #define ASYNC_EVENT_CMPL_HW_FLOW_AGED_EVENT_DATA1_FLOW_ID_MASK 0x7fffffffUL
+ #define ASYNC_EVENT_CMPL_HW_FLOW_AGED_EVENT_DATA1_FLOW_ID_SFT 0
+ #define ASYNC_EVENT_CMPL_HW_FLOW_AGED_EVENT_DATA1_FLOW_DIRECTION 0x80000000UL
+ #define ASYNC_EVENT_CMPL_HW_FLOW_AGED_EVENT_DATA1_FLOW_DIRECTION_RX (0x0UL << 31)
+ #define ASYNC_EVENT_CMPL_HW_FLOW_AGED_EVENT_DATA1_FLOW_DIRECTION_TX (0x1UL << 31)
+ #define ASYNC_EVENT_CMPL_HW_FLOW_AGED_EVENT_DATA1_FLOW_DIRECTION_LAST ASYNC_EVENT_CMPL_HW_FLOW_AGED_EVENT_DATA1_FLOW_DIRECTION_TX
+};
+
+/* hwrm_async_event_cmpl_eem_cache_flush_req (size:128b/16B) */
+struct hwrm_async_event_cmpl_eem_cache_flush_req {
+ __le16 type;
+ #define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_REQ_TYPE_MASK 0x3fUL
+ #define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_REQ_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_REQ_TYPE_HWRM_ASYNC_EVENT 0x2eUL
+ #define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_REQ_TYPE_LAST ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_REQ_TYPE_HWRM_ASYNC_EVENT
+ __le16 event_id;
+ #define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_REQ_EVENT_ID_EEM_CACHE_FLUSH_REQ 0x38UL
+ #define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_REQ_EVENT_ID_LAST ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_REQ_EVENT_ID_EEM_CACHE_FLUSH_REQ
+ __le32 event_data2;
+ u8 opaque_v;
+ #define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_REQ_V 0x1UL
+ #define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_REQ_OPAQUE_MASK 0xfeUL
+ #define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_REQ_OPAQUE_SFT 1
+ u8 timestamp_lo;
+ __le16 timestamp_hi;
+ __le32 event_data1;
+};
+
+/* hwrm_async_event_cmpl_eem_cache_flush_done (size:128b/16B) */
+struct hwrm_async_event_cmpl_eem_cache_flush_done {
+ __le16 type;
+ #define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_DONE_TYPE_MASK 0x3fUL
+ #define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_DONE_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_DONE_TYPE_HWRM_ASYNC_EVENT 0x2eUL
+ #define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_DONE_TYPE_LAST ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_DONE_TYPE_HWRM_ASYNC_EVENT
+ __le16 event_id;
+ #define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_DONE_EVENT_ID_EEM_CACHE_FLUSH_DONE 0x39UL
+ #define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_DONE_EVENT_ID_LAST ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_DONE_EVENT_ID_EEM_CACHE_FLUSH_DONE
+ __le32 event_data2;
+ u8 opaque_v;
+ #define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_DONE_V 0x1UL
+ #define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_DONE_OPAQUE_MASK 0xfeUL
+ #define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_DONE_OPAQUE_SFT 1
+ u8 timestamp_lo;
+ __le16 timestamp_hi;
+ __le32 event_data1;
+ #define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_DONE_EVENT_DATA1_FID_MASK 0xffffUL
+ #define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_DONE_EVENT_DATA1_FID_SFT 0
+};
+
+/* hwrm_async_event_cmpl_deferred_response (size:128b/16B) */
+struct hwrm_async_event_cmpl_deferred_response {
+ __le16 type;
+ #define ASYNC_EVENT_CMPL_DEFERRED_RESPONSE_TYPE_MASK 0x3fUL
+ #define ASYNC_EVENT_CMPL_DEFERRED_RESPONSE_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_DEFERRED_RESPONSE_TYPE_HWRM_ASYNC_EVENT 0x2eUL
+ #define ASYNC_EVENT_CMPL_DEFERRED_RESPONSE_TYPE_LAST ASYNC_EVENT_CMPL_DEFERRED_RESPONSE_TYPE_HWRM_ASYNC_EVENT
+ __le16 event_id;
+ #define ASYNC_EVENT_CMPL_DEFERRED_RESPONSE_EVENT_ID_DEFERRED_RESPONSE 0x40UL
+ #define ASYNC_EVENT_CMPL_DEFERRED_RESPONSE_EVENT_ID_LAST ASYNC_EVENT_CMPL_DEFERRED_RESPONSE_EVENT_ID_DEFERRED_RESPONSE
+ __le32 event_data2;
+ #define ASYNC_EVENT_CMPL_DEFERRED_RESPONSE_EVENT_DATA2_SEQ_ID_MASK 0xffffUL
+ #define ASYNC_EVENT_CMPL_DEFERRED_RESPONSE_EVENT_DATA2_SEQ_ID_SFT 0
+ u8 opaque_v;
+ #define ASYNC_EVENT_CMPL_DEFERRED_RESPONSE_V 0x1UL
+ #define ASYNC_EVENT_CMPL_DEFERRED_RESPONSE_OPAQUE_MASK 0xfeUL
+ #define ASYNC_EVENT_CMPL_DEFERRED_RESPONSE_OPAQUE_SFT 1
+ u8 timestamp_lo;
+ __le16 timestamp_hi;
+ __le32 event_data1;
+};
+
+/* hwrm_async_event_cmpl_echo_request (size:128b/16B) */
+struct hwrm_async_event_cmpl_echo_request {
+ __le16 type;
+ #define ASYNC_EVENT_CMPL_ECHO_REQUEST_TYPE_MASK 0x3fUL
+ #define ASYNC_EVENT_CMPL_ECHO_REQUEST_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_ECHO_REQUEST_TYPE_HWRM_ASYNC_EVENT 0x2eUL
+ #define ASYNC_EVENT_CMPL_ECHO_REQUEST_TYPE_LAST ASYNC_EVENT_CMPL_ECHO_REQUEST_TYPE_HWRM_ASYNC_EVENT
+ __le16 event_id;
+ #define ASYNC_EVENT_CMPL_ECHO_REQUEST_EVENT_ID_ECHO_REQUEST 0x42UL
+ #define ASYNC_EVENT_CMPL_ECHO_REQUEST_EVENT_ID_LAST ASYNC_EVENT_CMPL_ECHO_REQUEST_EVENT_ID_ECHO_REQUEST
+ __le32 event_data2;
+ u8 opaque_v;
+ #define ASYNC_EVENT_CMPL_ECHO_REQUEST_V 0x1UL
+ #define ASYNC_EVENT_CMPL_ECHO_REQUEST_OPAQUE_MASK 0xfeUL
+ #define ASYNC_EVENT_CMPL_ECHO_REQUEST_OPAQUE_SFT 1
+ u8 timestamp_lo;
+ __le16 timestamp_hi;
+ __le32 event_data1;
+};
+
+/* hwrm_async_event_cmpl_phc_update (size:128b/16B) */
+struct hwrm_async_event_cmpl_phc_update {
+ __le16 type;
+ #define ASYNC_EVENT_CMPL_PHC_UPDATE_TYPE_MASK 0x3fUL
+ #define ASYNC_EVENT_CMPL_PHC_UPDATE_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_PHC_UPDATE_TYPE_HWRM_ASYNC_EVENT 0x2eUL
+ #define ASYNC_EVENT_CMPL_PHC_UPDATE_TYPE_LAST ASYNC_EVENT_CMPL_PHC_UPDATE_TYPE_HWRM_ASYNC_EVENT
+ __le16 event_id;
+ #define ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_ID_PHC_UPDATE 0x43UL
+ #define ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_ID_LAST ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_ID_PHC_UPDATE
+ __le32 event_data2;
+ #define ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA2_PHC_MASTER_FID_MASK 0xffffUL
+ #define ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA2_PHC_MASTER_FID_SFT 0
+ #define ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA2_PHC_SEC_FID_MASK 0xffff0000UL
+ #define ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA2_PHC_SEC_FID_SFT 16
+ u8 opaque_v;
+ #define ASYNC_EVENT_CMPL_PHC_UPDATE_V 0x1UL
+ #define ASYNC_EVENT_CMPL_PHC_UPDATE_OPAQUE_MASK 0xfeUL
+ #define ASYNC_EVENT_CMPL_PHC_UPDATE_OPAQUE_SFT 1
+ u8 timestamp_lo;
+ __le16 timestamp_hi;
+ __le32 event_data1;
+ #define ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_FLAGS_MASK 0xfUL
+ #define ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_FLAGS_SFT 0
+ #define ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_FLAGS_PHC_MASTER 0x1UL
+ #define ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_FLAGS_PHC_SECONDARY 0x2UL
+ #define ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_FLAGS_PHC_FAILOVER 0x3UL
+ #define ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_FLAGS_PHC_RTC_UPDATE 0x4UL
+ #define ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_FLAGS_LAST ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_FLAGS_PHC_RTC_UPDATE
+ #define ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_PHC_TIME_MSB_MASK 0xffff0UL
+ #define ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_PHC_TIME_MSB_SFT 4
+};
+
+/* hwrm_async_event_cmpl_pps_timestamp (size:128b/16B) */
+struct hwrm_async_event_cmpl_pps_timestamp {
+ __le16 type;
+ #define ASYNC_EVENT_CMPL_PPS_TIMESTAMP_TYPE_MASK 0x3fUL
+ #define ASYNC_EVENT_CMPL_PPS_TIMESTAMP_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_PPS_TIMESTAMP_TYPE_HWRM_ASYNC_EVENT 0x2eUL
+ #define ASYNC_EVENT_CMPL_PPS_TIMESTAMP_TYPE_LAST ASYNC_EVENT_CMPL_PPS_TIMESTAMP_TYPE_HWRM_ASYNC_EVENT
+ __le16 event_id;
+ #define ASYNC_EVENT_CMPL_PPS_TIMESTAMP_EVENT_ID_PPS_TIMESTAMP 0x44UL
+ #define ASYNC_EVENT_CMPL_PPS_TIMESTAMP_EVENT_ID_LAST ASYNC_EVENT_CMPL_PPS_TIMESTAMP_EVENT_ID_PPS_TIMESTAMP
+ __le32 event_data2;
+ #define ASYNC_EVENT_CMPL_PPS_TIMESTAMP_EVENT_DATA2_EVENT_TYPE 0x1UL
+ #define ASYNC_EVENT_CMPL_PPS_TIMESTAMP_EVENT_DATA2_EVENT_TYPE_INTERNAL 0x0UL
+ #define ASYNC_EVENT_CMPL_PPS_TIMESTAMP_EVENT_DATA2_EVENT_TYPE_EXTERNAL 0x1UL
+ #define ASYNC_EVENT_CMPL_PPS_TIMESTAMP_EVENT_DATA2_EVENT_TYPE_LAST ASYNC_EVENT_CMPL_PPS_TIMESTAMP_EVENT_DATA2_EVENT_TYPE_EXTERNAL
+ #define ASYNC_EVENT_CMPL_PPS_TIMESTAMP_EVENT_DATA2_PIN_NUMBER_MASK 0xeUL
+ #define ASYNC_EVENT_CMPL_PPS_TIMESTAMP_EVENT_DATA2_PIN_NUMBER_SFT 1
+ #define ASYNC_EVENT_CMPL_PPS_TIMESTAMP_EVENT_DATA2_PPS_TIMESTAMP_UPPER_MASK 0xffff0UL
+ #define ASYNC_EVENT_CMPL_PPS_TIMESTAMP_EVENT_DATA2_PPS_TIMESTAMP_UPPER_SFT 4
+ u8 opaque_v;
+ #define ASYNC_EVENT_CMPL_PPS_TIMESTAMP_V 0x1UL
+ #define ASYNC_EVENT_CMPL_PPS_TIMESTAMP_OPAQUE_MASK 0xfeUL
+ #define ASYNC_EVENT_CMPL_PPS_TIMESTAMP_OPAQUE_SFT 1
+ u8 timestamp_lo;
+ __le16 timestamp_hi;
+ __le32 event_data1;
+ #define ASYNC_EVENT_CMPL_PPS_TIMESTAMP_EVENT_DATA1_PPS_TIMESTAMP_LOWER_MASK 0xffffffffUL
+ #define ASYNC_EVENT_CMPL_PPS_TIMESTAMP_EVENT_DATA1_PPS_TIMESTAMP_LOWER_SFT 0
+};
+
+/* hwrm_async_event_cmpl_error_report (size:128b/16B) */
+struct hwrm_async_event_cmpl_error_report {
+ __le16 type;
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_TYPE_MASK 0x3fUL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_TYPE_HWRM_ASYNC_EVENT 0x2eUL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_TYPE_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_TYPE_HWRM_ASYNC_EVENT
+ __le16 event_id;
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_EVENT_ID_ERROR_REPORT 0x45UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_EVENT_ID_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_EVENT_ID_ERROR_REPORT
+ __le32 event_data2;
+ u8 opaque_v;
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_V 0x1UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_OPAQUE_MASK 0xfeUL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_OPAQUE_SFT 1
+ u8 timestamp_lo;
+ __le16 timestamp_hi;
+ __le32 event_data1;
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_EVENT_DATA1_ERROR_TYPE_MASK 0xffUL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_EVENT_DATA1_ERROR_TYPE_SFT 0
+};
+
+/* hwrm_async_event_cmpl_dbg_buf_producer (size:128b/16B) */
+struct hwrm_async_event_cmpl_dbg_buf_producer {
+ __le16 type;
+ #define ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_TYPE_MASK 0x3fUL
+ #define ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_TYPE_HWRM_ASYNC_EVENT 0x2eUL
+ #define ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_TYPE_LAST ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_TYPE_HWRM_ASYNC_EVENT
+ __le16 event_id;
+ #define ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_ID_DBG_BUF_PRODUCER 0x4cUL
+ #define ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_ID_LAST ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_ID_DBG_BUF_PRODUCER
+ __le32 event_data2;
+ #define ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA2_CURR_OFF_MASK 0xffffffffUL
+ #define ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA2_CURR_OFF_SFT 0
+ u8 opaque_v;
+ #define ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_V 0x1UL
+ #define ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_OPAQUE_MASK 0xfeUL
+ #define ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_OPAQUE_SFT 1
+ u8 timestamp_lo;
+ __le16 timestamp_hi;
+ __le32 event_data1;
+ #define ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA1_TYPE_MASK 0xffffUL
+ #define ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA1_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA1_TYPE_SRT_TRACE 0x0UL
+ #define ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA1_TYPE_SRT2_TRACE 0x1UL
+ #define ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA1_TYPE_CRT_TRACE 0x2UL
+ #define ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA1_TYPE_CRT2_TRACE 0x3UL
+ #define ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA1_TYPE_RIGP0_TRACE 0x4UL
+ #define ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA1_TYPE_L2_HWRM_TRACE 0x5UL
+ #define ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA1_TYPE_ROCE_HWRM_TRACE 0x6UL
+ #define ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA1_TYPE_CA0_TRACE 0x7UL
+ #define ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA1_TYPE_CA1_TRACE 0x8UL
+ #define ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA1_TYPE_CA2_TRACE 0x9UL
+ #define ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA1_TYPE_RIGP1_TRACE 0xaUL
+ #define ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA1_TYPE_AFM_KONG_HWRM_TRACE 0xbUL
+ #define ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA1_TYPE_ERR_QPC_TRACE 0xcUL
+ #define ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA1_TYPE_LAST ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA1_TYPE_ERR_QPC_TRACE
+};
+
+/* hwrm_async_event_cmpl_hwrm_error (size:128b/16B) */
+struct hwrm_async_event_cmpl_hwrm_error {
+ __le16 type;
+ #define ASYNC_EVENT_CMPL_HWRM_ERROR_TYPE_MASK 0x3fUL
+ #define ASYNC_EVENT_CMPL_HWRM_ERROR_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_HWRM_ERROR_TYPE_HWRM_ASYNC_EVENT 0x2eUL
+ #define ASYNC_EVENT_CMPL_HWRM_ERROR_TYPE_LAST ASYNC_EVENT_CMPL_HWRM_ERROR_TYPE_HWRM_ASYNC_EVENT
+ __le16 event_id;
+ #define ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_ID_HWRM_ERROR 0xffUL
+ #define ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_ID_LAST ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_ID_HWRM_ERROR
+ __le32 event_data2;
+ #define ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_MASK 0xffUL
+ #define ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_SFT 0
+ #define ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_WARNING 0x0UL
+ #define ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_NONFATAL 0x1UL
+ #define ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_FATAL 0x2UL
+ #define ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_LAST ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_FATAL
+ u8 opaque_v;
+ #define ASYNC_EVENT_CMPL_HWRM_ERROR_V 0x1UL
+ #define ASYNC_EVENT_CMPL_HWRM_ERROR_OPAQUE_MASK 0xfeUL
+ #define ASYNC_EVENT_CMPL_HWRM_ERROR_OPAQUE_SFT 1
+ u8 timestamp_lo;
+ __le16 timestamp_hi;
+ __le32 event_data1;
+ #define ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA1_TIMESTAMP 0x1UL
+};
+
+/* hwrm_async_event_cmpl_error_report_base (size:128b/16B) */
+struct hwrm_async_event_cmpl_error_report_base {
+ __le16 type;
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_TYPE_MASK 0x3fUL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_TYPE_HWRM_ASYNC_EVENT 0x2eUL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_TYPE_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_TYPE_HWRM_ASYNC_EVENT
+ __le16 event_id;
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_ID_ERROR_REPORT 0x45UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_ID_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_ID_ERROR_REPORT
+ __le32 event_data2;
+ u8 opaque_v;
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_V 0x1UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_OPAQUE_MASK 0xfeUL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_OPAQUE_SFT 1
+ u8 timestamp_lo;
+ __le16 timestamp_hi;
+ __le32 event_data1;
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_MASK 0xffUL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_RESERVED 0x0UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_PAUSE_STORM 0x1UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_INVALID_SIGNAL 0x2UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_NVM 0x3UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_DOORBELL_DROP_THRESHOLD 0x4UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_THERMAL_THRESHOLD 0x5UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_DUAL_DATA_RATE_NOT_SUPPORTED 0x6UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_DUP_UDCC_SES 0x7UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_DB_DROP 0x8UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_MD_TEMP 0x9UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_VNIC_ERR 0xaUL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_VNIC_ERR
+};
+
+/* hwrm_async_event_cmpl_error_report_pause_storm (size:128b/16B) */
+struct hwrm_async_event_cmpl_error_report_pause_storm {
+ __le16 type;
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_PAUSE_STORM_TYPE_MASK 0x3fUL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_PAUSE_STORM_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_PAUSE_STORM_TYPE_HWRM_ASYNC_EVENT 0x2eUL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_PAUSE_STORM_TYPE_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_PAUSE_STORM_TYPE_HWRM_ASYNC_EVENT
+ __le16 event_id;
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_PAUSE_STORM_EVENT_ID_ERROR_REPORT 0x45UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_PAUSE_STORM_EVENT_ID_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_PAUSE_STORM_EVENT_ID_ERROR_REPORT
+ __le32 event_data2;
+ u8 opaque_v;
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_PAUSE_STORM_V 0x1UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_PAUSE_STORM_OPAQUE_MASK 0xfeUL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_PAUSE_STORM_OPAQUE_SFT 1
+ u8 timestamp_lo;
+ __le16 timestamp_hi;
+ __le32 event_data1;
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_PAUSE_STORM_EVENT_DATA1_ERROR_TYPE_MASK 0xffUL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_PAUSE_STORM_EVENT_DATA1_ERROR_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_PAUSE_STORM_EVENT_DATA1_ERROR_TYPE_PAUSE_STORM 0x1UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_PAUSE_STORM_EVENT_DATA1_ERROR_TYPE_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_PAUSE_STORM_EVENT_DATA1_ERROR_TYPE_PAUSE_STORM
+};
+
+/* hwrm_async_event_cmpl_error_report_invalid_signal (size:128b/16B) */
+struct hwrm_async_event_cmpl_error_report_invalid_signal {
+ __le16 type;
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_INVALID_SIGNAL_TYPE_MASK 0x3fUL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_INVALID_SIGNAL_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_INVALID_SIGNAL_TYPE_HWRM_ASYNC_EVENT 0x2eUL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_INVALID_SIGNAL_TYPE_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_INVALID_SIGNAL_TYPE_HWRM_ASYNC_EVENT
+ __le16 event_id;
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_INVALID_SIGNAL_EVENT_ID_ERROR_REPORT 0x45UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_INVALID_SIGNAL_EVENT_ID_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_INVALID_SIGNAL_EVENT_ID_ERROR_REPORT
+ __le32 event_data2;
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_INVALID_SIGNAL_EVENT_DATA2_PIN_ID_MASK 0xffUL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_INVALID_SIGNAL_EVENT_DATA2_PIN_ID_SFT 0
+ u8 opaque_v;
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_INVALID_SIGNAL_V 0x1UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_INVALID_SIGNAL_OPAQUE_MASK 0xfeUL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_INVALID_SIGNAL_OPAQUE_SFT 1
+ u8 timestamp_lo;
+ __le16 timestamp_hi;
+ __le32 event_data1;
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_INVALID_SIGNAL_EVENT_DATA1_ERROR_TYPE_MASK 0xffUL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_INVALID_SIGNAL_EVENT_DATA1_ERROR_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_INVALID_SIGNAL_EVENT_DATA1_ERROR_TYPE_INVALID_SIGNAL 0x2UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_INVALID_SIGNAL_EVENT_DATA1_ERROR_TYPE_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_INVALID_SIGNAL_EVENT_DATA1_ERROR_TYPE_INVALID_SIGNAL
+};
+
+/* hwrm_async_event_cmpl_error_report_nvm (size:128b/16B) */
+struct hwrm_async_event_cmpl_error_report_nvm {
+ __le16 type;
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_TYPE_MASK 0x3fUL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_TYPE_HWRM_ASYNC_EVENT 0x2eUL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_TYPE_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_TYPE_HWRM_ASYNC_EVENT
+ __le16 event_id;
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_EVENT_ID_ERROR_REPORT 0x45UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_EVENT_ID_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_EVENT_ID_ERROR_REPORT
+ __le32 event_data2;
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_EVENT_DATA2_ERR_ADDR_MASK 0xffffffffUL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_EVENT_DATA2_ERR_ADDR_SFT 0
+ u8 opaque_v;
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_V 0x1UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_OPAQUE_MASK 0xfeUL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_OPAQUE_SFT 1
+ u8 timestamp_lo;
+ __le16 timestamp_hi;
+ __le32 event_data1;
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_EVENT_DATA1_ERROR_TYPE_MASK 0xffUL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_EVENT_DATA1_ERROR_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_EVENT_DATA1_ERROR_TYPE_NVM_ERROR 0x3UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_EVENT_DATA1_ERROR_TYPE_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_EVENT_DATA1_ERROR_TYPE_NVM_ERROR
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_EVENT_DATA1_NVM_ERR_TYPE_MASK 0xff00UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_EVENT_DATA1_NVM_ERR_TYPE_SFT 8
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_EVENT_DATA1_NVM_ERR_TYPE_WRITE (0x1UL << 8)
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_EVENT_DATA1_NVM_ERR_TYPE_ERASE (0x2UL << 8)
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_EVENT_DATA1_NVM_ERR_TYPE_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_EVENT_DATA1_NVM_ERR_TYPE_ERASE
+};
+
+/* hwrm_async_event_cmpl_error_report_doorbell_drop_threshold (size:128b/16B) */
+struct hwrm_async_event_cmpl_error_report_doorbell_drop_threshold {
+ __le16 type;
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_DOORBELL_DROP_THRESHOLD_TYPE_MASK 0x3fUL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_DOORBELL_DROP_THRESHOLD_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_DOORBELL_DROP_THRESHOLD_TYPE_HWRM_ASYNC_EVENT 0x2eUL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_DOORBELL_DROP_THRESHOLD_TYPE_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_DOORBELL_DROP_THRESHOLD_TYPE_HWRM_ASYNC_EVENT
+ __le16 event_id;
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_DOORBELL_DROP_THRESHOLD_EVENT_ID_ERROR_REPORT 0x45UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_DOORBELL_DROP_THRESHOLD_EVENT_ID_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_DOORBELL_DROP_THRESHOLD_EVENT_ID_ERROR_REPORT
+ __le32 event_data2;
+ u8 opaque_v;
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_DOORBELL_DROP_THRESHOLD_V 0x1UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_DOORBELL_DROP_THRESHOLD_OPAQUE_MASK 0xfeUL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_DOORBELL_DROP_THRESHOLD_OPAQUE_SFT 1
+ u8 timestamp_lo;
+ __le16 timestamp_hi;
+ __le32 event_data1;
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_DOORBELL_DROP_THRESHOLD_EVENT_DATA1_ERROR_TYPE_MASK 0xffUL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_DOORBELL_DROP_THRESHOLD_EVENT_DATA1_ERROR_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_DOORBELL_DROP_THRESHOLD_EVENT_DATA1_ERROR_TYPE_DOORBELL_DROP_THRESHOLD 0x4UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_DOORBELL_DROP_THRESHOLD_EVENT_DATA1_ERROR_TYPE_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_DOORBELL_DROP_THRESHOLD_EVENT_DATA1_ERROR_TYPE_DOORBELL_DROP_THRESHOLD
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_DOORBELL_DROP_THRESHOLD_EVENT_DATA1_EPOCH_MASK 0xffffff00UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_DOORBELL_DROP_THRESHOLD_EVENT_DATA1_EPOCH_SFT 8
+};
+
+/* hwrm_async_event_cmpl_error_report_thermal (size:128b/16B) */
+struct hwrm_async_event_cmpl_error_report_thermal {
+ __le16 type;
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_TYPE_MASK 0x3fUL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_TYPE_HWRM_ASYNC_EVENT 0x2eUL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_TYPE_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_TYPE_HWRM_ASYNC_EVENT
+ __le16 event_id;
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_ID_ERROR_REPORT 0x45UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_ID_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_ID_ERROR_REPORT
+ __le32 event_data2;
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA2_CURRENT_TEMP_MASK 0xffUL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA2_CURRENT_TEMP_SFT 0
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA2_THRESHOLD_TEMP_MASK 0xff00UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA2_THRESHOLD_TEMP_SFT 8
+ u8 opaque_v;
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_V 0x1UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_OPAQUE_MASK 0xfeUL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_OPAQUE_SFT 1
+ u8 timestamp_lo;
+ __le16 timestamp_hi;
+ __le32 event_data1;
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_ERROR_TYPE_MASK 0xffUL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_ERROR_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_ERROR_TYPE_THERMAL_EVENT 0x5UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_ERROR_TYPE_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_ERROR_TYPE_THERMAL_EVENT
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_MASK 0x700UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_SFT 8
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_WARN (0x0UL << 8)
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_CRITICAL (0x1UL << 8)
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_FATAL (0x2UL << 8)
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_SHUTDOWN (0x3UL << 8)
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_SHUTDOWN
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_TRANSITION_DIR 0x800UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_TRANSITION_DIR_DECREASING (0x0UL << 11)
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_TRANSITION_DIR_INCREASING (0x1UL << 11)
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_TRANSITION_DIR_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_TRANSITION_DIR_INCREASING
+};
+
+/* hwrm_async_event_cmpl_error_report_dual_data_rate_not_supported (size:128b/16B) */
+struct hwrm_async_event_cmpl_error_report_dual_data_rate_not_supported {
+ __le16 type;
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_DUAL_DATA_RATE_NOT_SUPPORTED_TYPE_MASK 0x3fUL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_DUAL_DATA_RATE_NOT_SUPPORTED_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_DUAL_DATA_RATE_NOT_SUPPORTED_TYPE_HWRM_ASYNC_EVENT 0x2eUL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_DUAL_DATA_RATE_NOT_SUPPORTED_TYPE_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_DUAL_DATA_RATE_NOT_SUPPORTED_TYPE_HWRM_ASYNC_EVENT
+ __le16 event_id;
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_DUAL_DATA_RATE_NOT_SUPPORTED_EVENT_ID_ERROR_REPORT 0x45UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_DUAL_DATA_RATE_NOT_SUPPORTED_EVENT_ID_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_DUAL_DATA_RATE_NOT_SUPPORTED_EVENT_ID_ERROR_REPORT
+ __le32 event_data2;
+ u8 opaque_v;
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_DUAL_DATA_RATE_NOT_SUPPORTED_V 0x1UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_DUAL_DATA_RATE_NOT_SUPPORTED_OPAQUE_MASK 0xfeUL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_DUAL_DATA_RATE_NOT_SUPPORTED_OPAQUE_SFT 1
+ u8 timestamp_lo;
+ __le16 timestamp_hi;
+ __le32 event_data1;
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_DUAL_DATA_RATE_NOT_SUPPORTED_EVENT_DATA1_ERROR_TYPE_MASK 0xffUL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_DUAL_DATA_RATE_NOT_SUPPORTED_EVENT_DATA1_ERROR_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_DUAL_DATA_RATE_NOT_SUPPORTED_EVENT_DATA1_ERROR_TYPE_DUAL_DATA_RATE_NOT_SUPPORTED 0x6UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_DUAL_DATA_RATE_NOT_SUPPORTED_EVENT_DATA1_ERROR_TYPE_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_DUAL_DATA_RATE_NOT_SUPPORTED_EVENT_DATA1_ERROR_TYPE_DUAL_DATA_RATE_NOT_SUPPORTED
+};
+
+/* hwrm_func_reset_input (size:192b/24B) */
+struct hwrm_func_reset_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 enables;
+ #define FUNC_RESET_REQ_ENABLES_VF_ID_VALID 0x1UL
+ __le16 vf_id;
+ u8 func_reset_level;
+ #define FUNC_RESET_REQ_FUNC_RESET_LEVEL_RESETALL 0x0UL
+ #define FUNC_RESET_REQ_FUNC_RESET_LEVEL_RESETME 0x1UL
+ #define FUNC_RESET_REQ_FUNC_RESET_LEVEL_RESETCHILDREN 0x2UL
+ #define FUNC_RESET_REQ_FUNC_RESET_LEVEL_RESETVF 0x3UL
+ #define FUNC_RESET_REQ_FUNC_RESET_LEVEL_LAST FUNC_RESET_REQ_FUNC_RESET_LEVEL_RESETVF
+ u8 unused_0;
+};
+
+/* hwrm_func_reset_output (size:128b/16B) */
+struct hwrm_func_reset_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_func_getfid_input (size:192b/24B) */
+struct hwrm_func_getfid_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 enables;
+ #define FUNC_GETFID_REQ_ENABLES_PCI_ID 0x1UL
+ __le16 pci_id;
+ u8 unused_0[2];
+};
+
+/* hwrm_func_getfid_output (size:128b/16B) */
+struct hwrm_func_getfid_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 fid;
+ u8 unused_0[5];
+ u8 valid;
+};
+
+/* hwrm_func_vf_alloc_input (size:192b/24B) */
+struct hwrm_func_vf_alloc_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 enables;
+ #define FUNC_VF_ALLOC_REQ_ENABLES_FIRST_VF_ID 0x1UL
+ __le16 first_vf_id;
+ __le16 num_vfs;
+};
+
+/* hwrm_func_vf_alloc_output (size:128b/16B) */
+struct hwrm_func_vf_alloc_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 first_vf_id;
+ u8 unused_0[5];
+ u8 valid;
+};
+
+/* hwrm_func_vf_free_input (size:192b/24B) */
+struct hwrm_func_vf_free_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 enables;
+ #define FUNC_VF_FREE_REQ_ENABLES_FIRST_VF_ID 0x1UL
+ __le16 first_vf_id;
+ __le16 num_vfs;
+};
+
+/* hwrm_func_vf_free_output (size:128b/16B) */
+struct hwrm_func_vf_free_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_func_vf_cfg_input (size:576b/72B) */
+struct hwrm_func_vf_cfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 enables;
+ #define FUNC_VF_CFG_REQ_ENABLES_MTU 0x1UL
+ #define FUNC_VF_CFG_REQ_ENABLES_GUEST_VLAN 0x2UL
+ #define FUNC_VF_CFG_REQ_ENABLES_ASYNC_EVENT_CR 0x4UL
+ #define FUNC_VF_CFG_REQ_ENABLES_DFLT_MAC_ADDR 0x8UL
+ #define FUNC_VF_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS 0x10UL
+ #define FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS 0x20UL
+ #define FUNC_VF_CFG_REQ_ENABLES_NUM_TX_RINGS 0x40UL
+ #define FUNC_VF_CFG_REQ_ENABLES_NUM_RX_RINGS 0x80UL
+ #define FUNC_VF_CFG_REQ_ENABLES_NUM_L2_CTXS 0x100UL
+ #define FUNC_VF_CFG_REQ_ENABLES_NUM_VNICS 0x200UL
+ #define FUNC_VF_CFG_REQ_ENABLES_NUM_STAT_CTXS 0x400UL
+ #define FUNC_VF_CFG_REQ_ENABLES_NUM_HW_RING_GRPS 0x800UL
+ #define FUNC_VF_CFG_REQ_ENABLES_NUM_KTLS_TX_KEY_CTXS 0x1000UL
+ #define FUNC_VF_CFG_REQ_ENABLES_NUM_KTLS_RX_KEY_CTXS 0x2000UL
+ #define FUNC_VF_CFG_REQ_ENABLES_NUM_QUIC_TX_KEY_CTXS 0x4000UL
+ #define FUNC_VF_CFG_REQ_ENABLES_NUM_QUIC_RX_KEY_CTXS 0x8000UL
+ __le16 mtu;
+ __le16 guest_vlan;
+ __le16 async_event_cr;
+ u8 dflt_mac_addr[6];
+ __le32 flags;
+ #define FUNC_VF_CFG_REQ_FLAGS_TX_ASSETS_TEST 0x1UL
+ #define FUNC_VF_CFG_REQ_FLAGS_RX_ASSETS_TEST 0x2UL
+ #define FUNC_VF_CFG_REQ_FLAGS_CMPL_ASSETS_TEST 0x4UL
+ #define FUNC_VF_CFG_REQ_FLAGS_RSSCOS_CTX_ASSETS_TEST 0x8UL
+ #define FUNC_VF_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST 0x10UL
+ #define FUNC_VF_CFG_REQ_FLAGS_STAT_CTX_ASSETS_TEST 0x20UL
+ #define FUNC_VF_CFG_REQ_FLAGS_VNIC_ASSETS_TEST 0x40UL
+ #define FUNC_VF_CFG_REQ_FLAGS_L2_CTX_ASSETS_TEST 0x80UL
+ #define FUNC_VF_CFG_REQ_FLAGS_PPP_PUSH_MODE_ENABLE 0x100UL
+ #define FUNC_VF_CFG_REQ_FLAGS_PPP_PUSH_MODE_DISABLE 0x200UL
+ __le16 num_rsscos_ctxs;
+ __le16 num_cmpl_rings;
+ __le16 num_tx_rings;
+ __le16 num_rx_rings;
+ __le16 num_l2_ctxs;
+ __le16 num_vnics;
+ __le16 num_stat_ctxs;
+ __le16 num_hw_ring_grps;
+ __le32 num_ktls_tx_key_ctxs;
+ __le32 num_ktls_rx_key_ctxs;
+ __le16 num_msix;
+ u8 unused[2];
+ __le32 num_quic_tx_key_ctxs;
+ __le32 num_quic_rx_key_ctxs;
+};
+
+/* hwrm_func_vf_cfg_output (size:128b/16B) */
+struct hwrm_func_vf_cfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_func_qcaps_input (size:192b/24B) */
+struct hwrm_func_qcaps_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 fid;
+ u8 unused_0[6];
+};
+
+/* hwrm_func_qcaps_output (size:1152b/144B) */
+struct hwrm_func_qcaps_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 fid;
+ __le16 port_id;
+ __le32 flags;
+ #define FUNC_QCAPS_RESP_FLAGS_PUSH_MODE_SUPPORTED 0x1UL
+ #define FUNC_QCAPS_RESP_FLAGS_GLOBAL_MSIX_AUTOMASKING 0x2UL
+ #define FUNC_QCAPS_RESP_FLAGS_PTP_SUPPORTED 0x4UL
+ #define FUNC_QCAPS_RESP_FLAGS_ROCE_V1_SUPPORTED 0x8UL
+ #define FUNC_QCAPS_RESP_FLAGS_ROCE_V2_SUPPORTED 0x10UL
+ #define FUNC_QCAPS_RESP_FLAGS_WOL_MAGICPKT_SUPPORTED 0x20UL
+ #define FUNC_QCAPS_RESP_FLAGS_WOL_BMP_SUPPORTED 0x40UL
+ #define FUNC_QCAPS_RESP_FLAGS_TX_RING_RL_SUPPORTED 0x80UL
+ #define FUNC_QCAPS_RESP_FLAGS_TX_BW_CFG_SUPPORTED 0x100UL
+ #define FUNC_QCAPS_RESP_FLAGS_VF_TX_RING_RL_SUPPORTED 0x200UL
+ #define FUNC_QCAPS_RESP_FLAGS_VF_BW_CFG_SUPPORTED 0x400UL
+ #define FUNC_QCAPS_RESP_FLAGS_STD_TX_RING_MODE_SUPPORTED 0x800UL
+ #define FUNC_QCAPS_RESP_FLAGS_GENEVE_TUN_FLAGS_SUPPORTED 0x1000UL
+ #define FUNC_QCAPS_RESP_FLAGS_NVGRE_TUN_FLAGS_SUPPORTED 0x2000UL
+ #define FUNC_QCAPS_RESP_FLAGS_GRE_TUN_FLAGS_SUPPORTED 0x4000UL
+ #define FUNC_QCAPS_RESP_FLAGS_MPLS_TUN_FLAGS_SUPPORTED 0x8000UL
+ #define FUNC_QCAPS_RESP_FLAGS_PCIE_STATS_SUPPORTED 0x10000UL
+ #define FUNC_QCAPS_RESP_FLAGS_ADOPTED_PF_SUPPORTED 0x20000UL
+ #define FUNC_QCAPS_RESP_FLAGS_ADMIN_PF_SUPPORTED 0x40000UL
+ #define FUNC_QCAPS_RESP_FLAGS_LINK_ADMIN_STATUS_SUPPORTED 0x80000UL
+ #define FUNC_QCAPS_RESP_FLAGS_WCB_PUSH_MODE 0x100000UL
+ #define FUNC_QCAPS_RESP_FLAGS_DYNAMIC_TX_RING_ALLOC 0x200000UL
+ #define FUNC_QCAPS_RESP_FLAGS_HOT_RESET_CAPABLE 0x400000UL
+ #define FUNC_QCAPS_RESP_FLAGS_ERROR_RECOVERY_CAPABLE 0x800000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_STATS_SUPPORTED 0x1000000UL
+ #define FUNC_QCAPS_RESP_FLAGS_ERR_RECOVER_RELOAD 0x2000000UL
+ #define FUNC_QCAPS_RESP_FLAGS_NOTIFY_VF_DEF_VNIC_CHNG_SUPPORTED 0x4000000UL
+ #define FUNC_QCAPS_RESP_FLAGS_VLAN_ACCELERATION_TX_DISABLED 0x8000000UL
+ #define FUNC_QCAPS_RESP_FLAGS_COREDUMP_CMD_SUPPORTED 0x10000000UL
+ #define FUNC_QCAPS_RESP_FLAGS_CRASHDUMP_CMD_SUPPORTED 0x20000000UL
+ #define FUNC_QCAPS_RESP_FLAGS_PFC_WD_STATS_SUPPORTED 0x40000000UL
+ #define FUNC_QCAPS_RESP_FLAGS_DBG_QCAPS_CMD_SUPPORTED 0x80000000UL
+ u8 mac_address[6];
+ __le16 max_rsscos_ctx;
+ __le16 max_cmpl_rings;
+ __le16 max_tx_rings;
+ __le16 max_rx_rings;
+ __le16 max_l2_ctxs;
+ __le16 max_vnics;
+ __le16 first_vf_id;
+ __le16 max_vfs;
+ __le16 max_stat_ctx;
+ __le32 max_encap_records;
+ __le32 max_decap_records;
+ __le32 max_tx_em_flows;
+ __le32 max_tx_wm_flows;
+ __le32 max_rx_em_flows;
+ __le32 max_rx_wm_flows;
+ __le32 max_mcast_filters;
+ __le32 max_flow_id;
+ __le32 max_hw_ring_grps;
+ __le16 max_sp_tx_rings;
+ __le16 max_msix_vfs;
+ __le32 flags_ext;
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_ECN_MARK_SUPPORTED 0x1UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_ECN_STATS_SUPPORTED 0x2UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_EXT_HW_STATS_SUPPORTED 0x4UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_HOT_RESET_IF_SUPPORT 0x8UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_PROXY_MODE_SUPPORT 0x10UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_TX_PROXY_SRC_INTF_OVERRIDE_SUPPORT 0x20UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_SCHQ_SUPPORTED 0x40UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_PPP_PUSH_MODE_SUPPORTED 0x80UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_EVB_MODE_CFG_NOT_SUPPORTED 0x100UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_SOC_SPD_SUPPORTED 0x200UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_FW_LIVEPATCH_SUPPORTED 0x400UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_FAST_RESET_CAPABLE 0x800UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_TX_METADATA_CFG_CAPABLE 0x1000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_NVM_OPTION_ACTION_SUPPORTED 0x2000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_BD_METADATA_SUPPORTED 0x4000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_ECHO_REQUEST_SUPPORTED 0x8000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_NPAR_1_2_SUPPORTED 0x10000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_PTP_PTM_SUPPORTED 0x20000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_PTP_PPS_SUPPORTED 0x40000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_VF_CFG_ASYNC_FOR_PF_SUPPORTED 0x80000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_PARTITION_BW_SUPPORTED 0x100000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_DFLT_VLAN_TPID_PCP_SUPPORTED 0x200000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_KTLS_SUPPORTED 0x400000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_EP_RATE_CONTROL 0x800000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_MIN_BW_SUPPORTED 0x1000000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_TX_COAL_CMPL_CAP 0x2000000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_BS_V2_SUPPORTED 0x4000000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_BS_V2_REQUIRED 0x8000000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_PTP_64BIT_RTC_SUPPORTED 0x10000000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_DBR_PACING_SUPPORTED 0x20000000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_HW_DBR_DROP_RECOV_SUPPORTED 0x40000000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_DISABLE_CQ_OVERFLOW_DETECTION_SUPPORTED 0x80000000UL
+ u8 max_schqs;
+ u8 mpc_chnls_cap;
+ #define FUNC_QCAPS_RESP_MPC_CHNLS_CAP_TCE 0x1UL
+ #define FUNC_QCAPS_RESP_MPC_CHNLS_CAP_RCE 0x2UL
+ #define FUNC_QCAPS_RESP_MPC_CHNLS_CAP_TE_CFA 0x4UL
+ #define FUNC_QCAPS_RESP_MPC_CHNLS_CAP_RE_CFA 0x8UL
+ #define FUNC_QCAPS_RESP_MPC_CHNLS_CAP_PRIMATE 0x10UL
+ __le16 max_key_ctxs_alloc;
+ __le32 flags_ext2;
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_RX_ALL_PKTS_TIMESTAMPS_SUPPORTED 0x1UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_QUIC_SUPPORTED 0x2UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_KDNET_SUPPORTED 0x4UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_DBR_PACING_EXT_SUPPORTED 0x8UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_SW_DBR_DROP_RECOVERY_SUPPORTED 0x10UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_GENERIC_STATS_SUPPORTED 0x20UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_UDP_GSO_SUPPORTED 0x40UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_SYNCE_SUPPORTED 0x80UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_DBR_PACING_V0_SUPPORTED 0x100UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_TX_PKT_TS_CMPL_SUPPORTED 0x200UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_HW_LAG_SUPPORTED 0x400UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_ON_CHIP_CTX_SUPPORTED 0x800UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_STEERING_TAG_SUPPORTED 0x1000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_ENHANCED_VF_SCALE_SUPPORTED 0x2000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_KEY_XID_PARTITION_SUPPORTED 0x4000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_CONCURRENT_KTLS_QUIC_SUPPORTED 0x8000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_SCHQ_CROSS_TC_CAP_SUPPORTED 0x10000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_SCHQ_PER_TC_CAP_SUPPORTED 0x20000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_SCHQ_PER_TC_RESERVATION_SUPPORTED 0x40000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_DB_ERROR_STATS_SUPPORTED 0x80000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_ROCE_VF_RESOURCE_MGMT_SUPPORTED 0x100000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_UDCC_SUPPORTED 0x200000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_TIMED_TX_SO_TXTIME_SUPPORTED 0x400000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_SW_MAX_RESOURCE_LIMITS_SUPPORTED 0x800000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_TF_INGRESS_NIC_FLOW_SUPPORTED 0x1000000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_LPBK_STATS_SUPPORTED 0x2000000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_TF_EGRESS_NIC_FLOW_SUPPORTED 0x4000000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_MULTI_LOSSLESS_QUEUES_SUPPORTED 0x8000000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_PEER_MMAP_SUPPORTED 0x10000000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_TIMED_TX_PACING_SUPPORTED 0x20000000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_VF_STAT_EJECTION_SUPPORTED 0x40000000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_HOST_COREDUMP_SUPPORTED 0x80000000UL
+ __le16 tunnel_disable_flag;
+ #define FUNC_QCAPS_RESP_TUNNEL_DISABLE_FLAG_DISABLE_VXLAN 0x1UL
+ #define FUNC_QCAPS_RESP_TUNNEL_DISABLE_FLAG_DISABLE_NGE 0x2UL
+ #define FUNC_QCAPS_RESP_TUNNEL_DISABLE_FLAG_DISABLE_NVGRE 0x4UL
+ #define FUNC_QCAPS_RESP_TUNNEL_DISABLE_FLAG_DISABLE_L2GRE 0x8UL
+ #define FUNC_QCAPS_RESP_TUNNEL_DISABLE_FLAG_DISABLE_GRE 0x10UL
+ #define FUNC_QCAPS_RESP_TUNNEL_DISABLE_FLAG_DISABLE_IPINIP 0x20UL
+ #define FUNC_QCAPS_RESP_TUNNEL_DISABLE_FLAG_DISABLE_MPLS 0x40UL
+ #define FUNC_QCAPS_RESP_TUNNEL_DISABLE_FLAG_DISABLE_PPPOE 0x80UL
+ __le16 xid_partition_cap;
+ #define FUNC_QCAPS_RESP_XID_PARTITION_CAP_TX_CK 0x1UL
+ #define FUNC_QCAPS_RESP_XID_PARTITION_CAP_RX_CK 0x2UL
+ u8 device_serial_number[8];
+ __le16 ctxs_per_partition;
+ __le16 max_tso_segs;
+ __le32 roce_vf_max_av;
+ __le32 roce_vf_max_cq;
+ __le32 roce_vf_max_mrw;
+ __le32 roce_vf_max_qp;
+ __le32 roce_vf_max_srq;
+ __le32 roce_vf_max_gid;
+ __le32 flags_ext3;
+ #define FUNC_QCAPS_RESP_FLAGS_EXT3_RM_RSV_WHILE_ALLOC_CAP 0x1UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT3_REQUIRE_L2_FILTER 0x2UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT3_MAX_ROCE_VFS_SUPPORTED 0x4UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT3_RX_RATE_PROFILE_SEL_SUPPORTED 0x8UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT3_BIDI_OPT_SUPPORTED 0x10UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT3_MIRROR_ON_ROCE_SUPPORTED 0x20UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT3_ROCE_VF_DYN_ALLOC_SUPPORT 0x40UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT3_CHANGE_UDP_SRCPORT_SUPPORT 0x80UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT3_PCIE_COMPLIANCE_SUPPORTED 0x100UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT3_MULTI_L2_DB_SUPPORTED 0x200UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT3_PCIE_SECURE_ATS_SUPPORTED 0x400UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT3_MBUF_STATS_SUPPORTED 0x800UL
+ __le16 max_roce_vfs;
+ __le16 max_crypto_rx_flow_filters;
+ u8 unused_3[3];
+ u8 valid;
+};
+
+/* hwrm_func_qcfg_input (size:192b/24B) */
+struct hwrm_func_qcfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 fid;
+ u8 unused_0[6];
+};
+
+/* hwrm_func_qcfg_output (size:1408b/176B) */
+struct hwrm_func_qcfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 fid;
+ __le16 port_id;
+ __le16 vlan;
+ __le16 flags;
+ #define FUNC_QCFG_RESP_FLAGS_OOB_WOL_MAGICPKT_ENABLED 0x1UL
+ #define FUNC_QCFG_RESP_FLAGS_OOB_WOL_BMP_ENABLED 0x2UL
+ #define FUNC_QCFG_RESP_FLAGS_FW_DCBX_AGENT_ENABLED 0x4UL
+ #define FUNC_QCFG_RESP_FLAGS_STD_TX_RING_MODE_ENABLED 0x8UL
+ #define FUNC_QCFG_RESP_FLAGS_FW_LLDP_AGENT_ENABLED 0x10UL
+ #define FUNC_QCFG_RESP_FLAGS_MULTI_HOST 0x20UL
+ #define FUNC_QCFG_RESP_FLAGS_TRUSTED_VF 0x40UL
+ #define FUNC_QCFG_RESP_FLAGS_SECURE_MODE_ENABLED 0x80UL
+ #define FUNC_QCFG_RESP_FLAGS_PREBOOT_LEGACY_L2_RINGS 0x100UL
+ #define FUNC_QCFG_RESP_FLAGS_HOT_RESET_ALLOWED 0x200UL
+ #define FUNC_QCFG_RESP_FLAGS_PPP_PUSH_MODE_ENABLED 0x400UL
+ #define FUNC_QCFG_RESP_FLAGS_RING_MONITOR_ENABLED 0x800UL
+ #define FUNC_QCFG_RESP_FLAGS_FAST_RESET_ALLOWED 0x1000UL
+ #define FUNC_QCFG_RESP_FLAGS_MULTI_ROOT 0x2000UL
+ #define FUNC_QCFG_RESP_FLAGS_ENABLE_RDMA_SRIOV 0x4000UL
+ #define FUNC_QCFG_RESP_FLAGS_ROCE_VNIC_ID_VALID 0x8000UL
+ u8 mac_address[6];
+ __le16 pci_id;
+ __le16 alloc_rsscos_ctx;
+ __le16 alloc_cmpl_rings;
+ __le16 alloc_tx_rings;
+ __le16 alloc_rx_rings;
+ __le16 alloc_l2_ctx;
+ __le16 alloc_vnics;
+ __le16 admin_mtu;
+ __le16 mru;
+ __le16 stat_ctx_id;
+ u8 port_partition_type;
+ #define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_SPF 0x0UL
+ #define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_MPFS 0x1UL
+ #define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_0 0x2UL
+ #define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_5 0x3UL
+ #define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR2_0 0x4UL
+ #define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_2 0x5UL
+ #define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_UNKNOWN 0xffUL
+ #define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_LAST FUNC_QCFG_RESP_PORT_PARTITION_TYPE_UNKNOWN
+ u8 port_pf_cnt;
+ #define FUNC_QCFG_RESP_PORT_PF_CNT_UNAVAIL 0x0UL
+ #define FUNC_QCFG_RESP_PORT_PF_CNT_LAST FUNC_QCFG_RESP_PORT_PF_CNT_UNAVAIL
+ __le16 dflt_vnic_id;
+ __le16 max_mtu_configured;
+ __le32 min_bw;
+ #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_MASK 0xfffffffUL
+ #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_SFT 0
+ #define FUNC_QCFG_RESP_MIN_BW_SCALE 0x10000000UL
+ #define FUNC_QCFG_RESP_MIN_BW_SCALE_BITS (0x0UL << 28)
+ #define FUNC_QCFG_RESP_MIN_BW_SCALE_BYTES (0x1UL << 28)
+ #define FUNC_QCFG_RESP_MIN_BW_SCALE_LAST FUNC_QCFG_RESP_MIN_BW_SCALE_BYTES
+ #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_SFT 29
+ #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+ #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_LAST FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_INVALID
+ __le32 max_bw;
+ #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_MASK 0xfffffffUL
+ #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_SFT 0
+ #define FUNC_QCFG_RESP_MAX_BW_SCALE 0x10000000UL
+ #define FUNC_QCFG_RESP_MAX_BW_SCALE_BITS (0x0UL << 28)
+ #define FUNC_QCFG_RESP_MAX_BW_SCALE_BYTES (0x1UL << 28)
+ #define FUNC_QCFG_RESP_MAX_BW_SCALE_LAST FUNC_QCFG_RESP_MAX_BW_SCALE_BYTES
+ #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_SFT 29
+ #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+ #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_LAST FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_INVALID
+ u8 evb_mode;
+ #define FUNC_QCFG_RESP_EVB_MODE_NO_EVB 0x0UL
+ #define FUNC_QCFG_RESP_EVB_MODE_VEB 0x1UL
+ #define FUNC_QCFG_RESP_EVB_MODE_VEPA 0x2UL
+ #define FUNC_QCFG_RESP_EVB_MODE_LAST FUNC_QCFG_RESP_EVB_MODE_VEPA
+ u8 options;
+ #define FUNC_QCFG_RESP_OPTIONS_CACHE_LINESIZE_MASK 0x3UL
+ #define FUNC_QCFG_RESP_OPTIONS_CACHE_LINESIZE_SFT 0
+ #define FUNC_QCFG_RESP_OPTIONS_CACHE_LINESIZE_SIZE_64 0x0UL
+ #define FUNC_QCFG_RESP_OPTIONS_CACHE_LINESIZE_SIZE_128 0x1UL
+ #define FUNC_QCFG_RESP_OPTIONS_CACHE_LINESIZE_LAST FUNC_QCFG_RESP_OPTIONS_CACHE_LINESIZE_SIZE_128
+ #define FUNC_QCFG_RESP_OPTIONS_LINK_ADMIN_STATE_MASK 0xcUL
+ #define FUNC_QCFG_RESP_OPTIONS_LINK_ADMIN_STATE_SFT 2
+ #define FUNC_QCFG_RESP_OPTIONS_LINK_ADMIN_STATE_FORCED_DOWN (0x0UL << 2)
+ #define FUNC_QCFG_RESP_OPTIONS_LINK_ADMIN_STATE_FORCED_UP (0x1UL << 2)
+ #define FUNC_QCFG_RESP_OPTIONS_LINK_ADMIN_STATE_AUTO (0x2UL << 2)
+ #define FUNC_QCFG_RESP_OPTIONS_LINK_ADMIN_STATE_LAST FUNC_QCFG_RESP_OPTIONS_LINK_ADMIN_STATE_AUTO
+ #define FUNC_QCFG_RESP_OPTIONS_RSVD_MASK 0xf0UL
+ #define FUNC_QCFG_RESP_OPTIONS_RSVD_SFT 4
+ __le16 alloc_vfs;
+ __le32 alloc_mcast_filters;
+ __le32 alloc_hw_ring_grps;
+ __le16 alloc_sp_tx_rings;
+ __le16 alloc_stat_ctx;
+ __le16 alloc_msix;
+ __le16 registered_vfs;
+ __le16 l2_doorbell_bar_size_kb;
+ u8 active_endpoints;
+ u8 always_1;
+ __le32 reset_addr_poll;
+ __le16 legacy_l2_db_size_kb;
+ __le16 svif_info;
+ #define FUNC_QCFG_RESP_SVIF_INFO_SVIF_MASK 0x7fffUL
+ #define FUNC_QCFG_RESP_SVIF_INFO_SVIF_SFT 0
+ #define FUNC_QCFG_RESP_SVIF_INFO_SVIF_VALID 0x8000UL
+ u8 mpc_chnls;
+ #define FUNC_QCFG_RESP_MPC_CHNLS_TCE_ENABLED 0x1UL
+ #define FUNC_QCFG_RESP_MPC_CHNLS_RCE_ENABLED 0x2UL
+ #define FUNC_QCFG_RESP_MPC_CHNLS_TE_CFA_ENABLED 0x4UL
+ #define FUNC_QCFG_RESP_MPC_CHNLS_RE_CFA_ENABLED 0x8UL
+ #define FUNC_QCFG_RESP_MPC_CHNLS_PRIMATE_ENABLED 0x10UL
+ u8 db_page_size;
+ #define FUNC_QCFG_RESP_DB_PAGE_SIZE_4KB 0x0UL
+ #define FUNC_QCFG_RESP_DB_PAGE_SIZE_8KB 0x1UL
+ #define FUNC_QCFG_RESP_DB_PAGE_SIZE_16KB 0x2UL
+ #define FUNC_QCFG_RESP_DB_PAGE_SIZE_32KB 0x3UL
+ #define FUNC_QCFG_RESP_DB_PAGE_SIZE_64KB 0x4UL
+ #define FUNC_QCFG_RESP_DB_PAGE_SIZE_128KB 0x5UL
+ #define FUNC_QCFG_RESP_DB_PAGE_SIZE_256KB 0x6UL
+ #define FUNC_QCFG_RESP_DB_PAGE_SIZE_512KB 0x7UL
+ #define FUNC_QCFG_RESP_DB_PAGE_SIZE_1MB 0x8UL
+ #define FUNC_QCFG_RESP_DB_PAGE_SIZE_2MB 0x9UL
+ #define FUNC_QCFG_RESP_DB_PAGE_SIZE_4MB 0xaUL
+ #define FUNC_QCFG_RESP_DB_PAGE_SIZE_LAST FUNC_QCFG_RESP_DB_PAGE_SIZE_4MB
+ __le16 roce_vnic_id;
+ __le32 partition_min_bw;
+ #define FUNC_QCFG_RESP_PARTITION_MIN_BW_BW_VALUE_MASK 0xfffffffUL
+ #define FUNC_QCFG_RESP_PARTITION_MIN_BW_BW_VALUE_SFT 0
+ #define FUNC_QCFG_RESP_PARTITION_MIN_BW_SCALE 0x10000000UL
+ #define FUNC_QCFG_RESP_PARTITION_MIN_BW_SCALE_BITS (0x0UL << 28)
+ #define FUNC_QCFG_RESP_PARTITION_MIN_BW_SCALE_BYTES (0x1UL << 28)
+ #define FUNC_QCFG_RESP_PARTITION_MIN_BW_SCALE_LAST FUNC_QCFG_RESP_PARTITION_MIN_BW_SCALE_BYTES
+ #define FUNC_QCFG_RESP_PARTITION_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define FUNC_QCFG_RESP_PARTITION_MIN_BW_BW_VALUE_UNIT_SFT 29
+ #define FUNC_QCFG_RESP_PARTITION_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define FUNC_QCFG_RESP_PARTITION_MIN_BW_BW_VALUE_UNIT_LAST FUNC_QCFG_RESP_PARTITION_MIN_BW_BW_VALUE_UNIT_PERCENT1_100
+ __le32 partition_max_bw;
+ #define FUNC_QCFG_RESP_PARTITION_MAX_BW_BW_VALUE_MASK 0xfffffffUL
+ #define FUNC_QCFG_RESP_PARTITION_MAX_BW_BW_VALUE_SFT 0
+ #define FUNC_QCFG_RESP_PARTITION_MAX_BW_SCALE 0x10000000UL
+ #define FUNC_QCFG_RESP_PARTITION_MAX_BW_SCALE_BITS (0x0UL << 28)
+ #define FUNC_QCFG_RESP_PARTITION_MAX_BW_SCALE_BYTES (0x1UL << 28)
+ #define FUNC_QCFG_RESP_PARTITION_MAX_BW_SCALE_LAST FUNC_QCFG_RESP_PARTITION_MAX_BW_SCALE_BYTES
+ #define FUNC_QCFG_RESP_PARTITION_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define FUNC_QCFG_RESP_PARTITION_MAX_BW_BW_VALUE_UNIT_SFT 29
+ #define FUNC_QCFG_RESP_PARTITION_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define FUNC_QCFG_RESP_PARTITION_MAX_BW_BW_VALUE_UNIT_LAST FUNC_QCFG_RESP_PARTITION_MAX_BW_BW_VALUE_UNIT_PERCENT1_100
+ __le16 host_mtu;
+ __le16 flags2;
+ #define FUNC_QCFG_RESP_FLAGS2_SRIOV_DSCP_INSERT_ENABLED 0x1UL
+ __le16 stag_vid;
+ u8 port_kdnet_mode;
+ #define FUNC_QCFG_RESP_PORT_KDNET_MODE_DISABLED 0x0UL
+ #define FUNC_QCFG_RESP_PORT_KDNET_MODE_ENABLED 0x1UL
+ #define FUNC_QCFG_RESP_PORT_KDNET_MODE_LAST FUNC_QCFG_RESP_PORT_KDNET_MODE_ENABLED
+ u8 kdnet_pcie_function;
+ __le16 port_kdnet_fid;
+ u8 unused_5;
+ u8 roce_bidi_opt_mode;
+ #define FUNC_QCFG_RESP_ROCE_BIDI_OPT_MODE_DISABLED 0x1UL
+ #define FUNC_QCFG_RESP_ROCE_BIDI_OPT_MODE_DEDICATED 0x2UL
+ #define FUNC_QCFG_RESP_ROCE_BIDI_OPT_MODE_SHARED 0x4UL
+ __le32 num_ktls_tx_key_ctxs;
+ __le32 num_ktls_rx_key_ctxs;
+ u8 lag_id;
+ u8 parif;
+ u8 fw_lag_id;
+ u8 unused_6;
+ __le32 num_quic_tx_key_ctxs;
+ __le32 num_quic_rx_key_ctxs;
+ __le32 roce_max_av_per_vf;
+ __le32 roce_max_cq_per_vf;
+ __le32 roce_max_mrw_per_vf;
+ __le32 roce_max_qp_per_vf;
+ __le32 roce_max_srq_per_vf;
+ __le32 roce_max_gid_per_vf;
+ __le16 xid_partition_cfg;
+ #define FUNC_QCFG_RESP_XID_PARTITION_CFG_TX_CK 0x1UL
+ #define FUNC_QCFG_RESP_XID_PARTITION_CFG_RX_CK 0x2UL
+ __le16 mirror_vnic_id;
+ u8 max_link_width;
+ #define FUNC_QCFG_RESP_MAX_LINK_WIDTH_UNKNOWN 0x0UL
+ #define FUNC_QCFG_RESP_MAX_LINK_WIDTH_X1 0x1UL
+ #define FUNC_QCFG_RESP_MAX_LINK_WIDTH_X2 0x2UL
+ #define FUNC_QCFG_RESP_MAX_LINK_WIDTH_X4 0x4UL
+ #define FUNC_QCFG_RESP_MAX_LINK_WIDTH_X8 0x8UL
+ #define FUNC_QCFG_RESP_MAX_LINK_WIDTH_X16 0x10UL
+ #define FUNC_QCFG_RESP_MAX_LINK_WIDTH_LAST FUNC_QCFG_RESP_MAX_LINK_WIDTH_X16
+ u8 max_link_speed;
+ #define FUNC_QCFG_RESP_MAX_LINK_SPEED_UNKNOWN 0x0UL
+ #define FUNC_QCFG_RESP_MAX_LINK_SPEED_G1 0x1UL
+ #define FUNC_QCFG_RESP_MAX_LINK_SPEED_G2 0x2UL
+ #define FUNC_QCFG_RESP_MAX_LINK_SPEED_G3 0x3UL
+ #define FUNC_QCFG_RESP_MAX_LINK_SPEED_G4 0x4UL
+ #define FUNC_QCFG_RESP_MAX_LINK_SPEED_G5 0x5UL
+ #define FUNC_QCFG_RESP_MAX_LINK_SPEED_LAST FUNC_QCFG_RESP_MAX_LINK_SPEED_G5
+ u8 negotiated_link_width;
+ #define FUNC_QCFG_RESP_NEGOTIATED_LINK_WIDTH_UNKNOWN 0x0UL
+ #define FUNC_QCFG_RESP_NEGOTIATED_LINK_WIDTH_X1 0x1UL
+ #define FUNC_QCFG_RESP_NEGOTIATED_LINK_WIDTH_X2 0x2UL
+ #define FUNC_QCFG_RESP_NEGOTIATED_LINK_WIDTH_X4 0x4UL
+ #define FUNC_QCFG_RESP_NEGOTIATED_LINK_WIDTH_X8 0x8UL
+ #define FUNC_QCFG_RESP_NEGOTIATED_LINK_WIDTH_X16 0x10UL
+ #define FUNC_QCFG_RESP_NEGOTIATED_LINK_WIDTH_LAST FUNC_QCFG_RESP_NEGOTIATED_LINK_WIDTH_X16
+ u8 negotiated_link_speed;
+ #define FUNC_QCFG_RESP_NEGOTIATED_LINK_SPEED_UNKNOWN 0x0UL
+ #define FUNC_QCFG_RESP_NEGOTIATED_LINK_SPEED_G1 0x1UL
+ #define FUNC_QCFG_RESP_NEGOTIATED_LINK_SPEED_G2 0x2UL
+ #define FUNC_QCFG_RESP_NEGOTIATED_LINK_SPEED_G3 0x3UL
+ #define FUNC_QCFG_RESP_NEGOTIATED_LINK_SPEED_G4 0x4UL
+ #define FUNC_QCFG_RESP_NEGOTIATED_LINK_SPEED_G5 0x5UL
+ #define FUNC_QCFG_RESP_NEGOTIATED_LINK_SPEED_LAST FUNC_QCFG_RESP_NEGOTIATED_LINK_SPEED_G5
+ u8 unused_7[2];
+ u8 pcie_compliance;
+ u8 unused_8;
+ __le16 l2_db_multi_page_size_kb;
+ u8 unused_9[5];
+ u8 valid;
+};
+
+/* hwrm_func_cfg_input (size:1280b/160B) */
+struct hwrm_func_cfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 fid;
+ __le16 num_msix;
+ __le32 flags;
+ #define FUNC_CFG_REQ_FLAGS_SRC_MAC_ADDR_CHECK_DISABLE 0x1UL
+ #define FUNC_CFG_REQ_FLAGS_SRC_MAC_ADDR_CHECK_ENABLE 0x2UL
+ #define FUNC_CFG_REQ_FLAGS_RSVD_MASK 0x1fcUL
+ #define FUNC_CFG_REQ_FLAGS_RSVD_SFT 2
+ #define FUNC_CFG_REQ_FLAGS_STD_TX_RING_MODE_ENABLE 0x200UL
+ #define FUNC_CFG_REQ_FLAGS_STD_TX_RING_MODE_DISABLE 0x400UL
+ #define FUNC_CFG_REQ_FLAGS_VIRT_MAC_PERSIST 0x800UL
+ #define FUNC_CFG_REQ_FLAGS_NO_AUTOCLEAR_STATISTIC 0x1000UL
+ #define FUNC_CFG_REQ_FLAGS_TX_ASSETS_TEST 0x2000UL
+ #define FUNC_CFG_REQ_FLAGS_RX_ASSETS_TEST 0x4000UL
+ #define FUNC_CFG_REQ_FLAGS_CMPL_ASSETS_TEST 0x8000UL
+ #define FUNC_CFG_REQ_FLAGS_RSSCOS_CTX_ASSETS_TEST 0x10000UL
+ #define FUNC_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST 0x20000UL
+ #define FUNC_CFG_REQ_FLAGS_STAT_CTX_ASSETS_TEST 0x40000UL
+ #define FUNC_CFG_REQ_FLAGS_VNIC_ASSETS_TEST 0x80000UL
+ #define FUNC_CFG_REQ_FLAGS_L2_CTX_ASSETS_TEST 0x100000UL
+ #define FUNC_CFG_REQ_FLAGS_TRUSTED_VF_ENABLE 0x200000UL
+ #define FUNC_CFG_REQ_FLAGS_DYNAMIC_TX_RING_ALLOC 0x400000UL
+ #define FUNC_CFG_REQ_FLAGS_NQ_ASSETS_TEST 0x800000UL
+ #define FUNC_CFG_REQ_FLAGS_TRUSTED_VF_DISABLE 0x1000000UL
+ #define FUNC_CFG_REQ_FLAGS_PREBOOT_LEGACY_L2_RINGS 0x2000000UL
+ #define FUNC_CFG_REQ_FLAGS_HOT_RESET_IF_EN_DIS 0x4000000UL
+ #define FUNC_CFG_REQ_FLAGS_PPP_PUSH_MODE_ENABLE 0x8000000UL
+ #define FUNC_CFG_REQ_FLAGS_PPP_PUSH_MODE_DISABLE 0x10000000UL
+ #define FUNC_CFG_REQ_FLAGS_BD_METADATA_ENABLE 0x20000000UL
+ #define FUNC_CFG_REQ_FLAGS_BD_METADATA_DISABLE 0x40000000UL
+ __le32 enables;
+ #define FUNC_CFG_REQ_ENABLES_ADMIN_MTU 0x1UL
+ #define FUNC_CFG_REQ_ENABLES_MRU 0x2UL
+ #define FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS 0x4UL
+ #define FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS 0x8UL
+ #define FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS 0x10UL
+ #define FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS 0x20UL
+ #define FUNC_CFG_REQ_ENABLES_NUM_L2_CTXS 0x40UL
+ #define FUNC_CFG_REQ_ENABLES_NUM_VNICS 0x80UL
+ #define FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS 0x100UL
+ #define FUNC_CFG_REQ_ENABLES_DFLT_MAC_ADDR 0x200UL
+ #define FUNC_CFG_REQ_ENABLES_DFLT_VLAN 0x400UL
+ #define FUNC_CFG_REQ_ENABLES_DFLT_IP_ADDR 0x800UL
+ #define FUNC_CFG_REQ_ENABLES_MIN_BW 0x1000UL
+ #define FUNC_CFG_REQ_ENABLES_MAX_BW 0x2000UL
+ #define FUNC_CFG_REQ_ENABLES_ASYNC_EVENT_CR 0x4000UL
+ #define FUNC_CFG_REQ_ENABLES_VLAN_ANTISPOOF_MODE 0x8000UL
+ #define FUNC_CFG_REQ_ENABLES_ALLOWED_VLAN_PRIS 0x10000UL
+ #define FUNC_CFG_REQ_ENABLES_EVB_MODE 0x20000UL
+ #define FUNC_CFG_REQ_ENABLES_NUM_MCAST_FILTERS 0x40000UL
+ #define FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS 0x80000UL
+ #define FUNC_CFG_REQ_ENABLES_CACHE_LINESIZE 0x100000UL
+ #define FUNC_CFG_REQ_ENABLES_NUM_MSIX 0x200000UL
+ #define FUNC_CFG_REQ_ENABLES_ADMIN_LINK_STATE 0x400000UL
+ #define FUNC_CFG_REQ_ENABLES_HOT_RESET_IF_SUPPORT 0x800000UL
+ #define FUNC_CFG_REQ_ENABLES_SCHQ_ID 0x1000000UL
+ #define FUNC_CFG_REQ_ENABLES_MPC_CHNLS 0x2000000UL
+ #define FUNC_CFG_REQ_ENABLES_PARTITION_MIN_BW 0x4000000UL
+ #define FUNC_CFG_REQ_ENABLES_PARTITION_MAX_BW 0x8000000UL
+ #define FUNC_CFG_REQ_ENABLES_TPID 0x10000000UL
+ #define FUNC_CFG_REQ_ENABLES_HOST_MTU 0x20000000UL
+ #define FUNC_CFG_REQ_ENABLES_KTLS_TX_KEY_CTXS 0x40000000UL
+ #define FUNC_CFG_REQ_ENABLES_KTLS_RX_KEY_CTXS 0x80000000UL
+ __le16 admin_mtu;
+ __le16 mru;
+ __le16 num_rsscos_ctxs;
+ __le16 num_cmpl_rings;
+ __le16 num_tx_rings;
+ __le16 num_rx_rings;
+ __le16 num_l2_ctxs;
+ __le16 num_vnics;
+ __le16 num_stat_ctxs;
+ __le16 num_hw_ring_grps;
+ u8 dflt_mac_addr[6];
+ __le16 dflt_vlan;
+ __be32 dflt_ip_addr[4];
+ __le32 min_bw;
+ #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_MASK 0xfffffffUL
+ #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_SFT 0
+ #define FUNC_CFG_REQ_MIN_BW_SCALE 0x10000000UL
+ #define FUNC_CFG_REQ_MIN_BW_SCALE_BITS (0x0UL << 28)
+ #define FUNC_CFG_REQ_MIN_BW_SCALE_BYTES (0x1UL << 28)
+ #define FUNC_CFG_REQ_MIN_BW_SCALE_LAST FUNC_CFG_REQ_MIN_BW_SCALE_BYTES
+ #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_SFT 29
+ #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+ #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_LAST FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_INVALID
+ __le32 max_bw;
+ #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_MASK 0xfffffffUL
+ #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_SFT 0
+ #define FUNC_CFG_REQ_MAX_BW_SCALE 0x10000000UL
+ #define FUNC_CFG_REQ_MAX_BW_SCALE_BITS (0x0UL << 28)
+ #define FUNC_CFG_REQ_MAX_BW_SCALE_BYTES (0x1UL << 28)
+ #define FUNC_CFG_REQ_MAX_BW_SCALE_LAST FUNC_CFG_REQ_MAX_BW_SCALE_BYTES
+ #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_SFT 29
+ #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+ #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_LAST FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_INVALID
+ __le16 async_event_cr;
+ u8 vlan_antispoof_mode;
+ #define FUNC_CFG_REQ_VLAN_ANTISPOOF_MODE_NOCHECK 0x0UL
+ #define FUNC_CFG_REQ_VLAN_ANTISPOOF_MODE_VALIDATE_VLAN 0x1UL
+ #define FUNC_CFG_REQ_VLAN_ANTISPOOF_MODE_INSERT_IF_VLANDNE 0x2UL
+ #define FUNC_CFG_REQ_VLAN_ANTISPOOF_MODE_INSERT_OR_OVERRIDE_VLAN 0x3UL
+ #define FUNC_CFG_REQ_VLAN_ANTISPOOF_MODE_LAST FUNC_CFG_REQ_VLAN_ANTISPOOF_MODE_INSERT_OR_OVERRIDE_VLAN
+ u8 allowed_vlan_pris;
+ u8 evb_mode;
+ #define FUNC_CFG_REQ_EVB_MODE_NO_EVB 0x0UL
+ #define FUNC_CFG_REQ_EVB_MODE_VEB 0x1UL
+ #define FUNC_CFG_REQ_EVB_MODE_VEPA 0x2UL
+ #define FUNC_CFG_REQ_EVB_MODE_LAST FUNC_CFG_REQ_EVB_MODE_VEPA
+ u8 options;
+ #define FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_MASK 0x3UL
+ #define FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SFT 0
+ #define FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SIZE_64 0x0UL
+ #define FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SIZE_128 0x1UL
+ #define FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_LAST FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SIZE_128
+ #define FUNC_CFG_REQ_OPTIONS_LINK_ADMIN_STATE_MASK 0xcUL
+ #define FUNC_CFG_REQ_OPTIONS_LINK_ADMIN_STATE_SFT 2
+ #define FUNC_CFG_REQ_OPTIONS_LINK_ADMIN_STATE_FORCED_DOWN (0x0UL << 2)
+ #define FUNC_CFG_REQ_OPTIONS_LINK_ADMIN_STATE_FORCED_UP (0x1UL << 2)
+ #define FUNC_CFG_REQ_OPTIONS_LINK_ADMIN_STATE_AUTO (0x2UL << 2)
+ #define FUNC_CFG_REQ_OPTIONS_LINK_ADMIN_STATE_LAST FUNC_CFG_REQ_OPTIONS_LINK_ADMIN_STATE_AUTO
+ #define FUNC_CFG_REQ_OPTIONS_RSVD_MASK 0xf0UL
+ #define FUNC_CFG_REQ_OPTIONS_RSVD_SFT 4
+ __le16 num_mcast_filters;
+ __le16 schq_id;
+ __le16 mpc_chnls;
+ #define FUNC_CFG_REQ_MPC_CHNLS_TCE_ENABLE 0x1UL
+ #define FUNC_CFG_REQ_MPC_CHNLS_TCE_DISABLE 0x2UL
+ #define FUNC_CFG_REQ_MPC_CHNLS_RCE_ENABLE 0x4UL
+ #define FUNC_CFG_REQ_MPC_CHNLS_RCE_DISABLE 0x8UL
+ #define FUNC_CFG_REQ_MPC_CHNLS_TE_CFA_ENABLE 0x10UL
+ #define FUNC_CFG_REQ_MPC_CHNLS_TE_CFA_DISABLE 0x20UL
+ #define FUNC_CFG_REQ_MPC_CHNLS_RE_CFA_ENABLE 0x40UL
+ #define FUNC_CFG_REQ_MPC_CHNLS_RE_CFA_DISABLE 0x80UL
+ #define FUNC_CFG_REQ_MPC_CHNLS_PRIMATE_ENABLE 0x100UL
+ #define FUNC_CFG_REQ_MPC_CHNLS_PRIMATE_DISABLE 0x200UL
+ __le32 partition_min_bw;
+ #define FUNC_CFG_REQ_PARTITION_MIN_BW_BW_VALUE_MASK 0xfffffffUL
+ #define FUNC_CFG_REQ_PARTITION_MIN_BW_BW_VALUE_SFT 0
+ #define FUNC_CFG_REQ_PARTITION_MIN_BW_SCALE 0x10000000UL
+ #define FUNC_CFG_REQ_PARTITION_MIN_BW_SCALE_BITS (0x0UL << 28)
+ #define FUNC_CFG_REQ_PARTITION_MIN_BW_SCALE_BYTES (0x1UL << 28)
+ #define FUNC_CFG_REQ_PARTITION_MIN_BW_SCALE_LAST FUNC_CFG_REQ_PARTITION_MIN_BW_SCALE_BYTES
+ #define FUNC_CFG_REQ_PARTITION_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define FUNC_CFG_REQ_PARTITION_MIN_BW_BW_VALUE_UNIT_SFT 29
+ #define FUNC_CFG_REQ_PARTITION_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define FUNC_CFG_REQ_PARTITION_MIN_BW_BW_VALUE_UNIT_LAST FUNC_CFG_REQ_PARTITION_MIN_BW_BW_VALUE_UNIT_PERCENT1_100
+ __le32 partition_max_bw;
+ #define FUNC_CFG_REQ_PARTITION_MAX_BW_BW_VALUE_MASK 0xfffffffUL
+ #define FUNC_CFG_REQ_PARTITION_MAX_BW_BW_VALUE_SFT 0
+ #define FUNC_CFG_REQ_PARTITION_MAX_BW_SCALE 0x10000000UL
+ #define FUNC_CFG_REQ_PARTITION_MAX_BW_SCALE_BITS (0x0UL << 28)
+ #define FUNC_CFG_REQ_PARTITION_MAX_BW_SCALE_BYTES (0x1UL << 28)
+ #define FUNC_CFG_REQ_PARTITION_MAX_BW_SCALE_LAST FUNC_CFG_REQ_PARTITION_MAX_BW_SCALE_BYTES
+ #define FUNC_CFG_REQ_PARTITION_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define FUNC_CFG_REQ_PARTITION_MAX_BW_BW_VALUE_UNIT_SFT 29
+ #define FUNC_CFG_REQ_PARTITION_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define FUNC_CFG_REQ_PARTITION_MAX_BW_BW_VALUE_UNIT_LAST FUNC_CFG_REQ_PARTITION_MAX_BW_BW_VALUE_UNIT_PERCENT1_100
+ __be16 tpid;
+ __le16 host_mtu;
+ __le32 flags2;
+ #define FUNC_CFG_REQ_FLAGS2_KTLS_KEY_CTX_ASSETS_TEST 0x1UL
+ #define FUNC_CFG_REQ_FLAGS2_QUIC_KEY_CTX_ASSETS_TEST 0x2UL
+ __le32 enables2;
+ #define FUNC_CFG_REQ_ENABLES2_KDNET 0x1UL
+ #define FUNC_CFG_REQ_ENABLES2_DB_PAGE_SIZE 0x2UL
+ #define FUNC_CFG_REQ_ENABLES2_QUIC_TX_KEY_CTXS 0x4UL
+ #define FUNC_CFG_REQ_ENABLES2_QUIC_RX_KEY_CTXS 0x8UL
+ #define FUNC_CFG_REQ_ENABLES2_ROCE_MAX_AV_PER_VF 0x10UL
+ #define FUNC_CFG_REQ_ENABLES2_ROCE_MAX_CQ_PER_VF 0x20UL
+ #define FUNC_CFG_REQ_ENABLES2_ROCE_MAX_MRW_PER_VF 0x40UL
+ #define FUNC_CFG_REQ_ENABLES2_ROCE_MAX_QP_PER_VF 0x80UL
+ #define FUNC_CFG_REQ_ENABLES2_ROCE_MAX_SRQ_PER_VF 0x100UL
+ #define FUNC_CFG_REQ_ENABLES2_ROCE_MAX_GID_PER_VF 0x200UL
+ #define FUNC_CFG_REQ_ENABLES2_XID_PARTITION_CFG 0x400UL
+ #define FUNC_CFG_REQ_ENABLES2_PHYSICAL_SLOT_NUMBER 0x800UL
+ #define FUNC_CFG_REQ_ENABLES2_PCIE_COMPLIANCE 0x1000UL
+ u8 port_kdnet_mode;
+ #define FUNC_CFG_REQ_PORT_KDNET_MODE_DISABLED 0x0UL
+ #define FUNC_CFG_REQ_PORT_KDNET_MODE_ENABLED 0x1UL
+ #define FUNC_CFG_REQ_PORT_KDNET_MODE_LAST FUNC_CFG_REQ_PORT_KDNET_MODE_ENABLED
+ u8 db_page_size;
+ #define FUNC_CFG_REQ_DB_PAGE_SIZE_4KB 0x0UL
+ #define FUNC_CFG_REQ_DB_PAGE_SIZE_8KB 0x1UL
+ #define FUNC_CFG_REQ_DB_PAGE_SIZE_16KB 0x2UL
+ #define FUNC_CFG_REQ_DB_PAGE_SIZE_32KB 0x3UL
+ #define FUNC_CFG_REQ_DB_PAGE_SIZE_64KB 0x4UL
+ #define FUNC_CFG_REQ_DB_PAGE_SIZE_128KB 0x5UL
+ #define FUNC_CFG_REQ_DB_PAGE_SIZE_256KB 0x6UL
+ #define FUNC_CFG_REQ_DB_PAGE_SIZE_512KB 0x7UL
+ #define FUNC_CFG_REQ_DB_PAGE_SIZE_1MB 0x8UL
+ #define FUNC_CFG_REQ_DB_PAGE_SIZE_2MB 0x9UL
+ #define FUNC_CFG_REQ_DB_PAGE_SIZE_4MB 0xaUL
+ #define FUNC_CFG_REQ_DB_PAGE_SIZE_LAST FUNC_CFG_REQ_DB_PAGE_SIZE_4MB
+ __le16 physical_slot_number;
+ __le32 num_ktls_tx_key_ctxs;
+ __le32 num_ktls_rx_key_ctxs;
+ __le32 num_quic_tx_key_ctxs;
+ __le32 num_quic_rx_key_ctxs;
+ __le32 roce_max_av_per_vf;
+ __le32 roce_max_cq_per_vf;
+ __le32 roce_max_mrw_per_vf;
+ __le32 roce_max_qp_per_vf;
+ __le32 roce_max_srq_per_vf;
+ __le32 roce_max_gid_per_vf;
+ __le16 xid_partition_cfg;
+ #define FUNC_CFG_REQ_XID_PARTITION_CFG_TX_CK 0x1UL
+ #define FUNC_CFG_REQ_XID_PARTITION_CFG_RX_CK 0x2UL
+ u8 pcie_compliance;
+ u8 unused_2;
+};
+
+/* hwrm_func_cfg_output (size:128b/16B) */
+struct hwrm_func_cfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_func_cfg_cmd_err (size:64b/8B) */
+struct hwrm_func_cfg_cmd_err {
+ u8 code;
+ #define FUNC_CFG_CMD_ERR_CODE_UNKNOWN 0x0UL
+ #define FUNC_CFG_CMD_ERR_CODE_PARTITION_BW_OUT_OF_RANGE 0x1UL
+ #define FUNC_CFG_CMD_ERR_CODE_NPAR_PARTITION_DOWN_FAILED 0x2UL
+ #define FUNC_CFG_CMD_ERR_CODE_TPID_SET_DFLT_VLAN_NOT_SET 0x3UL
+ #define FUNC_CFG_CMD_ERR_CODE_RES_ARRAY_ALLOC_FAILED 0x4UL
+ #define FUNC_CFG_CMD_ERR_CODE_TX_RING_ASSET_TEST_FAILED 0x5UL
+ #define FUNC_CFG_CMD_ERR_CODE_TX_RING_RES_UPDATE_FAILED 0x6UL
+ #define FUNC_CFG_CMD_ERR_CODE_APPLY_MAX_BW_FAILED 0x7UL
+ #define FUNC_CFG_CMD_ERR_CODE_ENABLE_EVB_FAILED 0x8UL
+ #define FUNC_CFG_CMD_ERR_CODE_RSS_CTXT_ASSET_TEST_FAILED 0x9UL
+ #define FUNC_CFG_CMD_ERR_CODE_RSS_CTXT_RES_UPDATE_FAILED 0xaUL
+ #define FUNC_CFG_CMD_ERR_CODE_CMPL_RING_ASSET_TEST_FAILED 0xbUL
+ #define FUNC_CFG_CMD_ERR_CODE_CMPL_RING_RES_UPDATE_FAILED 0xcUL
+ #define FUNC_CFG_CMD_ERR_CODE_NQ_ASSET_TEST_FAILED 0xdUL
+ #define FUNC_CFG_CMD_ERR_CODE_NQ_RES_UPDATE_FAILED 0xeUL
+ #define FUNC_CFG_CMD_ERR_CODE_RX_RING_ASSET_TEST_FAILED 0xfUL
+ #define FUNC_CFG_CMD_ERR_CODE_RX_RING_RES_UPDATE_FAILED 0x10UL
+ #define FUNC_CFG_CMD_ERR_CODE_VNIC_ASSET_TEST_FAILED 0x11UL
+ #define FUNC_CFG_CMD_ERR_CODE_VNIC_RES_UPDATE_FAILED 0x12UL
+ #define FUNC_CFG_CMD_ERR_CODE_FAILED_TO_START_STATS_THREAD 0x13UL
+ #define FUNC_CFG_CMD_ERR_CODE_RDMA_SRIOV_DISABLED 0x14UL
+ #define FUNC_CFG_CMD_ERR_CODE_TX_KTLS_DISABLED 0x15UL
+ #define FUNC_CFG_CMD_ERR_CODE_TX_KTLS_ASSET_TEST_FAILED 0x16UL
+ #define FUNC_CFG_CMD_ERR_CODE_TX_KTLS_RES_UPDATE_FAILED 0x17UL
+ #define FUNC_CFG_CMD_ERR_CODE_RX_KTLS_DISABLED 0x18UL
+ #define FUNC_CFG_CMD_ERR_CODE_RX_KTLS_ASSET_TEST_FAILED 0x19UL
+ #define FUNC_CFG_CMD_ERR_CODE_RX_KTLS_RES_UPDATE_FAILED 0x1aUL
+ #define FUNC_CFG_CMD_ERR_CODE_TX_QUIC_DISABLED 0x1bUL
+ #define FUNC_CFG_CMD_ERR_CODE_TX_QUIC_ASSET_TEST_FAILED 0x1cUL
+ #define FUNC_CFG_CMD_ERR_CODE_TX_QUIC_RES_UPDATE_FAILED 0x1dUL
+ #define FUNC_CFG_CMD_ERR_CODE_RX_QUIC_DISABLED 0x1eUL
+ #define FUNC_CFG_CMD_ERR_CODE_RX_QUIC_ASSET_TEST_FAILED 0x1fUL
+ #define FUNC_CFG_CMD_ERR_CODE_RX_QUIC_RES_UPDATE_FAILED 0x20UL
+ #define FUNC_CFG_CMD_ERR_CODE_INVALID_KDNET_MODE 0x21UL
+ #define FUNC_CFG_CMD_ERR_CODE_SCHQ_CFG_FAIL 0x22UL
+ #define FUNC_CFG_CMD_ERR_CODE_LAST FUNC_CFG_CMD_ERR_CODE_SCHQ_CFG_FAIL
+ u8 unused_0[7];
+};
+
+/* hwrm_func_qstats_input (size:192b/24B) */
+struct hwrm_func_qstats_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 fid;
+ u8 flags;
+ #define FUNC_QSTATS_REQ_FLAGS_ROCE_ONLY 0x1UL
+ #define FUNC_QSTATS_REQ_FLAGS_COUNTER_MASK 0x2UL
+ #define FUNC_QSTATS_REQ_FLAGS_L2_ONLY 0x4UL
+ u8 unused_0[5];
+};
+
+/* hwrm_func_qstats_output (size:1408b/176B) */
+struct hwrm_func_qstats_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le64 tx_ucast_pkts;
+ __le64 tx_mcast_pkts;
+ __le64 tx_bcast_pkts;
+ __le64 tx_discard_pkts;
+ __le64 tx_drop_pkts;
+ __le64 tx_ucast_bytes;
+ __le64 tx_mcast_bytes;
+ __le64 tx_bcast_bytes;
+ __le64 rx_ucast_pkts;
+ __le64 rx_mcast_pkts;
+ __le64 rx_bcast_pkts;
+ __le64 rx_discard_pkts;
+ __le64 rx_drop_pkts;
+ __le64 rx_ucast_bytes;
+ __le64 rx_mcast_bytes;
+ __le64 rx_bcast_bytes;
+ __le64 rx_agg_pkts;
+ __le64 rx_agg_bytes;
+ __le64 rx_agg_events;
+ __le64 rx_agg_aborts;
+ u8 clear_seq;
+ u8 unused_0[6];
+ u8 valid;
+};
+
+/* hwrm_func_qstats_ext_input (size:256b/32B) */
+struct hwrm_func_qstats_ext_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 fid;
+ u8 flags;
+ #define FUNC_QSTATS_EXT_REQ_FLAGS_ROCE_ONLY 0x1UL
+ #define FUNC_QSTATS_EXT_REQ_FLAGS_COUNTER_MASK 0x2UL
+ u8 unused_0[1];
+ __le32 enables;
+ #define FUNC_QSTATS_EXT_REQ_ENABLES_SCHQ_ID 0x1UL
+ __le16 schq_id;
+ __le16 traffic_class;
+ u8 unused_1[4];
+};
+
+/* hwrm_func_qstats_ext_output (size:1536b/192B) */
+struct hwrm_func_qstats_ext_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le64 rx_ucast_pkts;
+ __le64 rx_mcast_pkts;
+ __le64 rx_bcast_pkts;
+ __le64 rx_discard_pkts;
+ __le64 rx_error_pkts;
+ __le64 rx_ucast_bytes;
+ __le64 rx_mcast_bytes;
+ __le64 rx_bcast_bytes;
+ __le64 tx_ucast_pkts;
+ __le64 tx_mcast_pkts;
+ __le64 tx_bcast_pkts;
+ __le64 tx_error_pkts;
+ __le64 tx_discard_pkts;
+ __le64 tx_ucast_bytes;
+ __le64 tx_mcast_bytes;
+ __le64 tx_bcast_bytes;
+ __le64 rx_tpa_eligible_pkt;
+ __le64 rx_tpa_eligible_bytes;
+ __le64 rx_tpa_pkt;
+ __le64 rx_tpa_bytes;
+ __le64 rx_tpa_errors;
+ __le64 rx_tpa_events;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_func_clr_stats_input (size:192b/24B) */
+struct hwrm_func_clr_stats_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 fid;
+ u8 unused_0[6];
+};
+
+/* hwrm_func_clr_stats_output (size:128b/16B) */
+struct hwrm_func_clr_stats_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_func_vf_resc_free_input (size:192b/24B) */
+struct hwrm_func_vf_resc_free_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 vf_id;
+ u8 unused_0[6];
+};
+
+/* hwrm_func_vf_resc_free_output (size:128b/16B) */
+struct hwrm_func_vf_resc_free_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_func_drv_rgtr_input (size:896b/112B) */
+struct hwrm_func_drv_rgtr_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define FUNC_DRV_RGTR_REQ_FLAGS_FWD_ALL_MODE 0x1UL
+ #define FUNC_DRV_RGTR_REQ_FLAGS_FWD_NONE_MODE 0x2UL
+ #define FUNC_DRV_RGTR_REQ_FLAGS_16BIT_VER_MODE 0x4UL
+ #define FUNC_DRV_RGTR_REQ_FLAGS_FLOW_HANDLE_64BIT_MODE 0x8UL
+ #define FUNC_DRV_RGTR_REQ_FLAGS_HOT_RESET_SUPPORT 0x10UL
+ #define FUNC_DRV_RGTR_REQ_FLAGS_ERROR_RECOVERY_SUPPORT 0x20UL
+ #define FUNC_DRV_RGTR_REQ_FLAGS_MASTER_SUPPORT 0x40UL
+ #define FUNC_DRV_RGTR_REQ_FLAGS_FAST_RESET_SUPPORT 0x80UL
+ #define FUNC_DRV_RGTR_REQ_FLAGS_RSS_STRICT_HASH_TYPE_SUPPORT 0x100UL
+ #define FUNC_DRV_RGTR_REQ_FLAGS_NPAR_1_2_SUPPORT 0x200UL
+ #define FUNC_DRV_RGTR_REQ_FLAGS_ASYM_QUEUE_CFG_SUPPORT 0x400UL
+ #define FUNC_DRV_RGTR_REQ_FLAGS_TF_INGRESS_NIC_FLOW_MODE 0x800UL
+ #define FUNC_DRV_RGTR_REQ_FLAGS_TF_EGRESS_NIC_FLOW_MODE 0x1000UL
+ __le32 enables;
+ #define FUNC_DRV_RGTR_REQ_ENABLES_OS_TYPE 0x1UL
+ #define FUNC_DRV_RGTR_REQ_ENABLES_VER 0x2UL
+ #define FUNC_DRV_RGTR_REQ_ENABLES_TIMESTAMP 0x4UL
+ #define FUNC_DRV_RGTR_REQ_ENABLES_VF_REQ_FWD 0x8UL
+ #define FUNC_DRV_RGTR_REQ_ENABLES_ASYNC_EVENT_FWD 0x10UL
+ __le16 os_type;
+ #define FUNC_DRV_RGTR_REQ_OS_TYPE_UNKNOWN 0x0UL
+ #define FUNC_DRV_RGTR_REQ_OS_TYPE_OTHER 0x1UL
+ #define FUNC_DRV_RGTR_REQ_OS_TYPE_MSDOS 0xeUL
+ #define FUNC_DRV_RGTR_REQ_OS_TYPE_WINDOWS 0x12UL
+ #define FUNC_DRV_RGTR_REQ_OS_TYPE_SOLARIS 0x1dUL
+ #define FUNC_DRV_RGTR_REQ_OS_TYPE_LINUX 0x24UL
+ #define FUNC_DRV_RGTR_REQ_OS_TYPE_FREEBSD 0x2aUL
+ #define FUNC_DRV_RGTR_REQ_OS_TYPE_ESXI 0x68UL
+ #define FUNC_DRV_RGTR_REQ_OS_TYPE_WIN864 0x73UL
+ #define FUNC_DRV_RGTR_REQ_OS_TYPE_WIN2012R2 0x74UL
+ #define FUNC_DRV_RGTR_REQ_OS_TYPE_UEFI 0x8000UL
+ #define FUNC_DRV_RGTR_REQ_OS_TYPE_LAST FUNC_DRV_RGTR_REQ_OS_TYPE_UEFI
+ u8 ver_maj_8b;
+ u8 ver_min_8b;
+ u8 ver_upd_8b;
+ u8 unused_0[3];
+ __le32 timestamp;
+ u8 unused_1[4];
+ __le32 vf_req_fwd[8];
+ __le32 async_event_fwd[8];
+ __le16 ver_maj;
+ __le16 ver_min;
+ __le16 ver_upd;
+ __le16 ver_patch;
+};
+
+/* hwrm_func_drv_rgtr_output (size:128b/16B) */
+struct hwrm_func_drv_rgtr_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 flags;
+ #define FUNC_DRV_RGTR_RESP_FLAGS_IF_CHANGE_SUPPORTED 0x1UL
+ u8 unused_0[3];
+ u8 valid;
+};
+
+/* hwrm_func_drv_unrgtr_input (size:192b/24B) */
+struct hwrm_func_drv_unrgtr_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define FUNC_DRV_UNRGTR_REQ_FLAGS_PREPARE_FOR_SHUTDOWN 0x1UL
+ u8 unused_0[4];
+};
+
+/* hwrm_func_drv_unrgtr_output (size:128b/16B) */
+struct hwrm_func_drv_unrgtr_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_func_buf_rgtr_input (size:1024b/128B) */
+struct hwrm_func_buf_rgtr_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 enables;
+ #define FUNC_BUF_RGTR_REQ_ENABLES_VF_ID 0x1UL
+ #define FUNC_BUF_RGTR_REQ_ENABLES_ERR_BUF_ADDR 0x2UL
+ __le16 vf_id;
+ __le16 req_buf_num_pages;
+ __le16 req_buf_page_size;
+ #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_16B 0x4UL
+ #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_4K 0xcUL
+ #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_8K 0xdUL
+ #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_64K 0x10UL
+ #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_2M 0x15UL
+ #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_4M 0x16UL
+ #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_1G 0x1eUL
+ #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_LAST FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_1G
+ __le16 req_buf_len;
+ __le16 resp_buf_len;
+ u8 unused_0[2];
+ __le64 req_buf_page_addr0;
+ __le64 req_buf_page_addr1;
+ __le64 req_buf_page_addr2;
+ __le64 req_buf_page_addr3;
+ __le64 req_buf_page_addr4;
+ __le64 req_buf_page_addr5;
+ __le64 req_buf_page_addr6;
+ __le64 req_buf_page_addr7;
+ __le64 req_buf_page_addr8;
+ __le64 req_buf_page_addr9;
+ __le64 error_buf_addr;
+ __le64 resp_buf_addr;
+};
+
+/* hwrm_func_buf_rgtr_output (size:128b/16B) */
+struct hwrm_func_buf_rgtr_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_func_drv_qver_input (size:192b/24B) */
+struct hwrm_func_drv_qver_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 reserved;
+ __le16 fid;
+ u8 driver_type;
+ #define FUNC_DRV_QVER_REQ_DRIVER_TYPE_L2 0x0UL
+ #define FUNC_DRV_QVER_REQ_DRIVER_TYPE_ROCE 0x1UL
+ #define FUNC_DRV_QVER_REQ_DRIVER_TYPE_LAST FUNC_DRV_QVER_REQ_DRIVER_TYPE_ROCE
+ u8 unused_0;
+};
+
+/* hwrm_func_drv_qver_output (size:256b/32B) */
+struct hwrm_func_drv_qver_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 os_type;
+ #define FUNC_DRV_QVER_RESP_OS_TYPE_UNKNOWN 0x0UL
+ #define FUNC_DRV_QVER_RESP_OS_TYPE_OTHER 0x1UL
+ #define FUNC_DRV_QVER_RESP_OS_TYPE_MSDOS 0xeUL
+ #define FUNC_DRV_QVER_RESP_OS_TYPE_WINDOWS 0x12UL
+ #define FUNC_DRV_QVER_RESP_OS_TYPE_SOLARIS 0x1dUL
+ #define FUNC_DRV_QVER_RESP_OS_TYPE_LINUX 0x24UL
+ #define FUNC_DRV_QVER_RESP_OS_TYPE_FREEBSD 0x2aUL
+ #define FUNC_DRV_QVER_RESP_OS_TYPE_ESXI 0x68UL
+ #define FUNC_DRV_QVER_RESP_OS_TYPE_WIN864 0x73UL
+ #define FUNC_DRV_QVER_RESP_OS_TYPE_WIN2012R2 0x74UL
+ #define FUNC_DRV_QVER_RESP_OS_TYPE_UEFI 0x8000UL
+ #define FUNC_DRV_QVER_RESP_OS_TYPE_LAST FUNC_DRV_QVER_RESP_OS_TYPE_UEFI
+ u8 ver_maj_8b;
+ u8 ver_min_8b;
+ u8 ver_upd_8b;
+ u8 unused_0[3];
+ __le16 ver_maj;
+ __le16 ver_min;
+ __le16 ver_upd;
+ __le16 ver_patch;
+ u8 unused_1[7];
+ u8 valid;
+};
+
+/* hwrm_func_resource_qcaps_input (size:192b/24B) */
+struct hwrm_func_resource_qcaps_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 fid;
+ u8 unused_0[6];
+};
+
+/* hwrm_func_resource_qcaps_output (size:704b/88B) */
+struct hwrm_func_resource_qcaps_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 max_vfs;
+ __le16 max_msix;
+ __le16 vf_reservation_strategy;
+ #define FUNC_RESOURCE_QCAPS_RESP_VF_RESERVATION_STRATEGY_MAXIMAL 0x0UL
+ #define FUNC_RESOURCE_QCAPS_RESP_VF_RESERVATION_STRATEGY_MINIMAL 0x1UL
+ #define FUNC_RESOURCE_QCAPS_RESP_VF_RESERVATION_STRATEGY_MINIMAL_STATIC 0x2UL
+ #define FUNC_RESOURCE_QCAPS_RESP_VF_RESERVATION_STRATEGY_LAST FUNC_RESOURCE_QCAPS_RESP_VF_RESERVATION_STRATEGY_MINIMAL_STATIC
+ __le16 min_rsscos_ctx;
+ __le16 max_rsscos_ctx;
+ __le16 min_cmpl_rings;
+ __le16 max_cmpl_rings;
+ __le16 min_tx_rings;
+ __le16 max_tx_rings;
+ __le16 min_rx_rings;
+ __le16 max_rx_rings;
+ __le16 min_l2_ctxs;
+ __le16 max_l2_ctxs;
+ __le16 min_vnics;
+ __le16 max_vnics;
+ __le16 min_stat_ctx;
+ __le16 max_stat_ctx;
+ __le16 min_hw_ring_grps;
+ __le16 max_hw_ring_grps;
+ __le16 max_tx_scheduler_inputs;
+ __le16 flags;
+ #define FUNC_RESOURCE_QCAPS_RESP_FLAGS_MIN_GUARANTEED 0x1UL
+ __le16 min_msix;
+ __le32 min_ktls_tx_key_ctxs;
+ __le32 max_ktls_tx_key_ctxs;
+ __le32 min_ktls_rx_key_ctxs;
+ __le32 max_ktls_rx_key_ctxs;
+ __le32 min_quic_tx_key_ctxs;
+ __le32 max_quic_tx_key_ctxs;
+ __le32 min_quic_rx_key_ctxs;
+ __le32 max_quic_rx_key_ctxs;
+ u8 unused_0[3];
+ u8 valid;
+};
+
+/* hwrm_func_vf_resource_cfg_input (size:704b/88B) */
+struct hwrm_func_vf_resource_cfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 vf_id;
+ __le16 max_msix;
+ __le16 min_rsscos_ctx;
+ __le16 max_rsscos_ctx;
+ __le16 min_cmpl_rings;
+ __le16 max_cmpl_rings;
+ __le16 min_tx_rings;
+ __le16 max_tx_rings;
+ __le16 min_rx_rings;
+ __le16 max_rx_rings;
+ __le16 min_l2_ctxs;
+ __le16 max_l2_ctxs;
+ __le16 min_vnics;
+ __le16 max_vnics;
+ __le16 min_stat_ctx;
+ __le16 max_stat_ctx;
+ __le16 min_hw_ring_grps;
+ __le16 max_hw_ring_grps;
+ __le16 flags;
+ #define FUNC_VF_RESOURCE_CFG_REQ_FLAGS_MIN_GUARANTEED 0x1UL
+ __le16 min_msix;
+ __le32 min_ktls_tx_key_ctxs;
+ __le32 max_ktls_tx_key_ctxs;
+ __le32 min_ktls_rx_key_ctxs;
+ __le32 max_ktls_rx_key_ctxs;
+ __le32 min_quic_tx_key_ctxs;
+ __le32 max_quic_tx_key_ctxs;
+ __le32 min_quic_rx_key_ctxs;
+ __le32 max_quic_rx_key_ctxs;
+};
+
+/* hwrm_func_vf_resource_cfg_output (size:384b/48B) */
+struct hwrm_func_vf_resource_cfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 reserved_rsscos_ctx;
+ __le16 reserved_cmpl_rings;
+ __le16 reserved_tx_rings;
+ __le16 reserved_rx_rings;
+ __le16 reserved_l2_ctxs;
+ __le16 reserved_vnics;
+ __le16 reserved_stat_ctx;
+ __le16 reserved_hw_ring_grps;
+ __le32 reserved_ktls_tx_key_ctxs;
+ __le32 reserved_ktls_rx_key_ctxs;
+ __le32 reserved_quic_tx_key_ctxs;
+ __le32 reserved_quic_rx_key_ctxs;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_func_backing_store_qcaps_input (size:128b/16B) */
+struct hwrm_func_backing_store_qcaps_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+};
+
+/* hwrm_func_backing_store_qcaps_output (size:832b/104B) */
+struct hwrm_func_backing_store_qcaps_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 qp_max_entries;
+ __le16 qp_min_qp1_entries;
+ __le16 qp_max_l2_entries;
+ __le16 qp_entry_size;
+ __le16 srq_max_l2_entries;
+ __le32 srq_max_entries;
+ __le16 srq_entry_size;
+ __le16 cq_max_l2_entries;
+ __le32 cq_max_entries;
+ __le16 cq_entry_size;
+ __le16 vnic_max_vnic_entries;
+ __le16 vnic_max_ring_table_entries;
+ __le16 vnic_entry_size;
+ __le32 stat_max_entries;
+ __le16 stat_entry_size;
+ __le16 tqm_entry_size;
+ __le32 tqm_min_entries_per_ring;
+ __le32 tqm_max_entries_per_ring;
+ __le32 mrav_max_entries;
+ __le16 mrav_entry_size;
+ __le16 tim_entry_size;
+ __le32 tim_max_entries;
+ __le16 mrav_num_entries_units;
+ u8 tqm_entries_multiple;
+ u8 ctx_kind_initializer;
+ __le16 ctx_init_mask;
+ #define FUNC_BACKING_STORE_QCAPS_RESP_CTX_INIT_MASK_QP 0x1UL
+ #define FUNC_BACKING_STORE_QCAPS_RESP_CTX_INIT_MASK_SRQ 0x2UL
+ #define FUNC_BACKING_STORE_QCAPS_RESP_CTX_INIT_MASK_CQ 0x4UL
+ #define FUNC_BACKING_STORE_QCAPS_RESP_CTX_INIT_MASK_VNIC 0x8UL
+ #define FUNC_BACKING_STORE_QCAPS_RESP_CTX_INIT_MASK_STAT 0x10UL
+ #define FUNC_BACKING_STORE_QCAPS_RESP_CTX_INIT_MASK_MRAV 0x20UL
+ #define FUNC_BACKING_STORE_QCAPS_RESP_CTX_INIT_MASK_TKC 0x40UL
+ #define FUNC_BACKING_STORE_QCAPS_RESP_CTX_INIT_MASK_RKC 0x80UL
+ u8 qp_init_offset;
+ u8 srq_init_offset;
+ u8 cq_init_offset;
+ u8 vnic_init_offset;
+ u8 tqm_fp_rings_count;
+ u8 stat_init_offset;
+ u8 mrav_init_offset;
+ u8 tqm_fp_rings_count_ext;
+ u8 tkc_init_offset;
+ u8 rkc_init_offset;
+ __le16 tkc_entry_size;
+ __le16 rkc_entry_size;
+ __le32 tkc_max_entries;
+ __le32 rkc_max_entries;
+ __le16 fast_qpmd_qp_num_entries;
+ u8 rsvd1[5];
+ u8 valid;
+};
+
+/* tqm_fp_ring_cfg (size:128b/16B) */
+struct tqm_fp_ring_cfg {
+ u8 tqm_ring_pg_size_tqm_ring_lvl;
+ #define TQM_FP_RING_CFG_TQM_RING_CFG_TQM_RING_LVL_MASK 0xfUL
+ #define TQM_FP_RING_CFG_TQM_RING_CFG_TQM_RING_LVL_SFT 0
+ #define TQM_FP_RING_CFG_TQM_RING_CFG_TQM_RING_LVL_LVL_0 0x0UL
+ #define TQM_FP_RING_CFG_TQM_RING_CFG_TQM_RING_LVL_LVL_1 0x1UL
+ #define TQM_FP_RING_CFG_TQM_RING_CFG_TQM_RING_LVL_LVL_2 0x2UL
+ #define TQM_FP_RING_CFG_TQM_RING_CFG_TQM_RING_LVL_LAST TQM_FP_RING_CFG_TQM_RING_CFG_TQM_RING_LVL_LVL_2
+ #define TQM_FP_RING_CFG_TQM_RING_CFG_TQM_RING_PG_SIZE_MASK 0xf0UL
+ #define TQM_FP_RING_CFG_TQM_RING_CFG_TQM_RING_PG_SIZE_SFT 4
+ #define TQM_FP_RING_CFG_TQM_RING_CFG_TQM_RING_PG_SIZE_PG_4K (0x0UL << 4)
+ #define TQM_FP_RING_CFG_TQM_RING_CFG_TQM_RING_PG_SIZE_PG_8K (0x1UL << 4)
+ #define TQM_FP_RING_CFG_TQM_RING_CFG_TQM_RING_PG_SIZE_PG_64K (0x2UL << 4)
+ #define TQM_FP_RING_CFG_TQM_RING_CFG_TQM_RING_PG_SIZE_PG_2M (0x3UL << 4)
+ #define TQM_FP_RING_CFG_TQM_RING_CFG_TQM_RING_PG_SIZE_PG_8M (0x4UL << 4)
+ #define TQM_FP_RING_CFG_TQM_RING_CFG_TQM_RING_PG_SIZE_PG_1G (0x5UL << 4)
+ #define TQM_FP_RING_CFG_TQM_RING_CFG_TQM_RING_PG_SIZE_LAST TQM_FP_RING_CFG_TQM_RING_CFG_TQM_RING_PG_SIZE_PG_1G
+ u8 unused[3];
+ __le32 tqm_ring_num_entries;
+ __le64 tqm_ring_page_dir;
+};
+
+/* hwrm_func_backing_store_cfg_input (size:2688b/336B) */
+struct hwrm_func_backing_store_cfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define FUNC_BACKING_STORE_CFG_REQ_FLAGS_PREBOOT_MODE 0x1UL
+ #define FUNC_BACKING_STORE_CFG_REQ_FLAGS_MRAV_RESERVATION_SPLIT 0x2UL
+ __le32 enables;
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP 0x1UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_SRQ 0x2UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_CQ 0x4UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_VNIC 0x8UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_STAT 0x10UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_SP 0x20UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING0 0x40UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING1 0x80UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING2 0x100UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING3 0x200UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING4 0x400UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING5 0x800UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING6 0x1000UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING7 0x2000UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_MRAV 0x4000UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TIM 0x8000UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING8 0x10000UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING9 0x20000UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING10 0x40000UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TKC 0x80000UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_RKC 0x100000UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP_FAST_QPMD 0x200000UL
+ u8 qpc_pg_size_qpc_lvl;
+ #define FUNC_BACKING_STORE_CFG_REQ_QPC_LVL_MASK 0xfUL
+ #define FUNC_BACKING_STORE_CFG_REQ_QPC_LVL_SFT 0
+ #define FUNC_BACKING_STORE_CFG_REQ_QPC_LVL_LVL_0 0x0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_QPC_LVL_LVL_1 0x1UL
+ #define FUNC_BACKING_STORE_CFG_REQ_QPC_LVL_LVL_2 0x2UL
+ #define FUNC_BACKING_STORE_CFG_REQ_QPC_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_QPC_LVL_LVL_2
+ #define FUNC_BACKING_STORE_CFG_REQ_QPC_PG_SIZE_MASK 0xf0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_QPC_PG_SIZE_SFT 4
+ #define FUNC_BACKING_STORE_CFG_REQ_QPC_PG_SIZE_PG_4K (0x0UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_QPC_PG_SIZE_PG_8K (0x1UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_QPC_PG_SIZE_PG_64K (0x2UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_QPC_PG_SIZE_PG_2M (0x3UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_QPC_PG_SIZE_PG_8M (0x4UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_QPC_PG_SIZE_PG_1G (0x5UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_QPC_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_QPC_PG_SIZE_PG_1G
+ u8 srq_pg_size_srq_lvl;
+ #define FUNC_BACKING_STORE_CFG_REQ_SRQ_LVL_MASK 0xfUL
+ #define FUNC_BACKING_STORE_CFG_REQ_SRQ_LVL_SFT 0
+ #define FUNC_BACKING_STORE_CFG_REQ_SRQ_LVL_LVL_0 0x0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_SRQ_LVL_LVL_1 0x1UL
+ #define FUNC_BACKING_STORE_CFG_REQ_SRQ_LVL_LVL_2 0x2UL
+ #define FUNC_BACKING_STORE_CFG_REQ_SRQ_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_SRQ_LVL_LVL_2
+ #define FUNC_BACKING_STORE_CFG_REQ_SRQ_PG_SIZE_MASK 0xf0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_SRQ_PG_SIZE_SFT 4
+ #define FUNC_BACKING_STORE_CFG_REQ_SRQ_PG_SIZE_PG_4K (0x0UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_SRQ_PG_SIZE_PG_8K (0x1UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_SRQ_PG_SIZE_PG_64K (0x2UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_SRQ_PG_SIZE_PG_2M (0x3UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_SRQ_PG_SIZE_PG_8M (0x4UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_SRQ_PG_SIZE_PG_1G (0x5UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_SRQ_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_SRQ_PG_SIZE_PG_1G
+ u8 cq_pg_size_cq_lvl;
+ #define FUNC_BACKING_STORE_CFG_REQ_CQ_LVL_MASK 0xfUL
+ #define FUNC_BACKING_STORE_CFG_REQ_CQ_LVL_SFT 0
+ #define FUNC_BACKING_STORE_CFG_REQ_CQ_LVL_LVL_0 0x0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_CQ_LVL_LVL_1 0x1UL
+ #define FUNC_BACKING_STORE_CFG_REQ_CQ_LVL_LVL_2 0x2UL
+ #define FUNC_BACKING_STORE_CFG_REQ_CQ_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_CQ_LVL_LVL_2
+ #define FUNC_BACKING_STORE_CFG_REQ_CQ_PG_SIZE_MASK 0xf0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_CQ_PG_SIZE_SFT 4
+ #define FUNC_BACKING_STORE_CFG_REQ_CQ_PG_SIZE_PG_4K (0x0UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_CQ_PG_SIZE_PG_8K (0x1UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_CQ_PG_SIZE_PG_64K (0x2UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_CQ_PG_SIZE_PG_2M (0x3UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_CQ_PG_SIZE_PG_8M (0x4UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_CQ_PG_SIZE_PG_1G (0x5UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_CQ_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_CQ_PG_SIZE_PG_1G
+ u8 vnic_pg_size_vnic_lvl;
+ #define FUNC_BACKING_STORE_CFG_REQ_VNIC_LVL_MASK 0xfUL
+ #define FUNC_BACKING_STORE_CFG_REQ_VNIC_LVL_SFT 0
+ #define FUNC_BACKING_STORE_CFG_REQ_VNIC_LVL_LVL_0 0x0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_VNIC_LVL_LVL_1 0x1UL
+ #define FUNC_BACKING_STORE_CFG_REQ_VNIC_LVL_LVL_2 0x2UL
+ #define FUNC_BACKING_STORE_CFG_REQ_VNIC_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_VNIC_LVL_LVL_2
+ #define FUNC_BACKING_STORE_CFG_REQ_VNIC_PG_SIZE_MASK 0xf0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_VNIC_PG_SIZE_SFT 4
+ #define FUNC_BACKING_STORE_CFG_REQ_VNIC_PG_SIZE_PG_4K (0x0UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_VNIC_PG_SIZE_PG_8K (0x1UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_VNIC_PG_SIZE_PG_64K (0x2UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_VNIC_PG_SIZE_PG_2M (0x3UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_VNIC_PG_SIZE_PG_8M (0x4UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_VNIC_PG_SIZE_PG_1G (0x5UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_VNIC_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_VNIC_PG_SIZE_PG_1G
+ u8 stat_pg_size_stat_lvl;
+ #define FUNC_BACKING_STORE_CFG_REQ_STAT_LVL_MASK 0xfUL
+ #define FUNC_BACKING_STORE_CFG_REQ_STAT_LVL_SFT 0
+ #define FUNC_BACKING_STORE_CFG_REQ_STAT_LVL_LVL_0 0x0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_STAT_LVL_LVL_1 0x1UL
+ #define FUNC_BACKING_STORE_CFG_REQ_STAT_LVL_LVL_2 0x2UL
+ #define FUNC_BACKING_STORE_CFG_REQ_STAT_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_STAT_LVL_LVL_2
+ #define FUNC_BACKING_STORE_CFG_REQ_STAT_PG_SIZE_MASK 0xf0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_STAT_PG_SIZE_SFT 4
+ #define FUNC_BACKING_STORE_CFG_REQ_STAT_PG_SIZE_PG_4K (0x0UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_STAT_PG_SIZE_PG_8K (0x1UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_STAT_PG_SIZE_PG_64K (0x2UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_STAT_PG_SIZE_PG_2M (0x3UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_STAT_PG_SIZE_PG_8M (0x4UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_STAT_PG_SIZE_PG_1G (0x5UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_STAT_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_STAT_PG_SIZE_PG_1G
+ u8 tqm_sp_pg_size_tqm_sp_lvl;
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_SP_LVL_MASK 0xfUL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_SP_LVL_SFT 0
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_SP_LVL_LVL_0 0x0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_SP_LVL_LVL_1 0x1UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_SP_LVL_LVL_2 0x2UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_SP_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_SP_LVL_LVL_2
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_SP_PG_SIZE_MASK 0xf0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_SP_PG_SIZE_SFT 4
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_SP_PG_SIZE_PG_4K (0x0UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_SP_PG_SIZE_PG_8K (0x1UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_SP_PG_SIZE_PG_64K (0x2UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_SP_PG_SIZE_PG_2M (0x3UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_SP_PG_SIZE_PG_8M (0x4UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_SP_PG_SIZE_PG_1G (0x5UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_SP_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_SP_PG_SIZE_PG_1G
+ u8 tqm_ring0_pg_size_tqm_ring0_lvl;
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_LVL_MASK 0xfUL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_LVL_SFT 0
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_LVL_LVL_0 0x0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_LVL_LVL_1 0x1UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_LVL_LVL_2 0x2UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_LVL_LVL_2
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_PG_SIZE_MASK 0xf0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_PG_SIZE_SFT 4
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_PG_SIZE_PG_4K (0x0UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_PG_SIZE_PG_8K (0x1UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_PG_SIZE_PG_64K (0x2UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_PG_SIZE_PG_2M (0x3UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_PG_SIZE_PG_8M (0x4UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_PG_SIZE_PG_1G (0x5UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_PG_SIZE_PG_1G
+ u8 tqm_ring1_pg_size_tqm_ring1_lvl;
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_LVL_MASK 0xfUL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_LVL_SFT 0
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_LVL_LVL_0 0x0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_LVL_LVL_1 0x1UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_LVL_LVL_2 0x2UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_LVL_LVL_2
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_PG_SIZE_MASK 0xf0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_PG_SIZE_SFT 4
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_PG_SIZE_PG_4K (0x0UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_PG_SIZE_PG_8K (0x1UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_PG_SIZE_PG_64K (0x2UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_PG_SIZE_PG_2M (0x3UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_PG_SIZE_PG_8M (0x4UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_PG_SIZE_PG_1G (0x5UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_PG_SIZE_PG_1G
+ u8 tqm_ring2_pg_size_tqm_ring2_lvl;
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_LVL_MASK 0xfUL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_LVL_SFT 0
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_LVL_LVL_0 0x0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_LVL_LVL_1 0x1UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_LVL_LVL_2 0x2UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_LVL_LVL_2
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_PG_SIZE_MASK 0xf0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_PG_SIZE_SFT 4
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_PG_SIZE_PG_4K (0x0UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_PG_SIZE_PG_8K (0x1UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_PG_SIZE_PG_64K (0x2UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_PG_SIZE_PG_2M (0x3UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_PG_SIZE_PG_8M (0x4UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_PG_SIZE_PG_1G (0x5UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_PG_SIZE_PG_1G
+ u8 tqm_ring3_pg_size_tqm_ring3_lvl;
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_LVL_MASK 0xfUL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_LVL_SFT 0
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_LVL_LVL_0 0x0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_LVL_LVL_1 0x1UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_LVL_LVL_2 0x2UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_LVL_LVL_2
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_PG_SIZE_MASK 0xf0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_PG_SIZE_SFT 4
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_PG_SIZE_PG_4K (0x0UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_PG_SIZE_PG_8K (0x1UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_PG_SIZE_PG_64K (0x2UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_PG_SIZE_PG_2M (0x3UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_PG_SIZE_PG_8M (0x4UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_PG_SIZE_PG_1G (0x5UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_PG_SIZE_PG_1G
+ u8 tqm_ring4_pg_size_tqm_ring4_lvl;
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_LVL_MASK 0xfUL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_LVL_SFT 0
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_LVL_LVL_0 0x0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_LVL_LVL_1 0x1UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_LVL_LVL_2 0x2UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_LVL_LVL_2
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_PG_SIZE_MASK 0xf0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_PG_SIZE_SFT 4
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_PG_SIZE_PG_4K (0x0UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_PG_SIZE_PG_8K (0x1UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_PG_SIZE_PG_64K (0x2UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_PG_SIZE_PG_2M (0x3UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_PG_SIZE_PG_8M (0x4UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_PG_SIZE_PG_1G (0x5UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_PG_SIZE_PG_1G
+ u8 tqm_ring5_pg_size_tqm_ring5_lvl;
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_LVL_MASK 0xfUL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_LVL_SFT 0
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_LVL_LVL_0 0x0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_LVL_LVL_1 0x1UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_LVL_LVL_2 0x2UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_LVL_LVL_2
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_PG_SIZE_MASK 0xf0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_PG_SIZE_SFT 4
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_PG_SIZE_PG_4K (0x0UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_PG_SIZE_PG_8K (0x1UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_PG_SIZE_PG_64K (0x2UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_PG_SIZE_PG_2M (0x3UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_PG_SIZE_PG_8M (0x4UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_PG_SIZE_PG_1G (0x5UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_PG_SIZE_PG_1G
+ u8 tqm_ring6_pg_size_tqm_ring6_lvl;
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_LVL_MASK 0xfUL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_LVL_SFT 0
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_LVL_LVL_0 0x0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_LVL_LVL_1 0x1UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_LVL_LVL_2 0x2UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_LVL_LVL_2
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_PG_SIZE_MASK 0xf0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_PG_SIZE_SFT 4
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_PG_SIZE_PG_4K (0x0UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_PG_SIZE_PG_8K (0x1UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_PG_SIZE_PG_64K (0x2UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_PG_SIZE_PG_2M (0x3UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_PG_SIZE_PG_8M (0x4UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_PG_SIZE_PG_1G (0x5UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_PG_SIZE_PG_1G
+ u8 tqm_ring7_pg_size_tqm_ring7_lvl;
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_LVL_MASK 0xfUL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_LVL_SFT 0
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_LVL_LVL_0 0x0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_LVL_LVL_1 0x1UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_LVL_LVL_2 0x2UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_LVL_LVL_2
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_PG_SIZE_MASK 0xf0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_PG_SIZE_SFT 4
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_PG_SIZE_PG_4K (0x0UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_PG_SIZE_PG_8K (0x1UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_PG_SIZE_PG_64K (0x2UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_PG_SIZE_PG_2M (0x3UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_PG_SIZE_PG_8M (0x4UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_PG_SIZE_PG_1G (0x5UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_PG_SIZE_PG_1G
+ u8 mrav_pg_size_mrav_lvl;
+ #define FUNC_BACKING_STORE_CFG_REQ_MRAV_LVL_MASK 0xfUL
+ #define FUNC_BACKING_STORE_CFG_REQ_MRAV_LVL_SFT 0
+ #define FUNC_BACKING_STORE_CFG_REQ_MRAV_LVL_LVL_0 0x0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_MRAV_LVL_LVL_1 0x1UL
+ #define FUNC_BACKING_STORE_CFG_REQ_MRAV_LVL_LVL_2 0x2UL
+ #define FUNC_BACKING_STORE_CFG_REQ_MRAV_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_MRAV_LVL_LVL_2
+ #define FUNC_BACKING_STORE_CFG_REQ_MRAV_PG_SIZE_MASK 0xf0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_MRAV_PG_SIZE_SFT 4
+ #define FUNC_BACKING_STORE_CFG_REQ_MRAV_PG_SIZE_PG_4K (0x0UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_MRAV_PG_SIZE_PG_8K (0x1UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_MRAV_PG_SIZE_PG_64K (0x2UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_MRAV_PG_SIZE_PG_2M (0x3UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_MRAV_PG_SIZE_PG_8M (0x4UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_MRAV_PG_SIZE_PG_1G (0x5UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_MRAV_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_MRAV_PG_SIZE_PG_1G
+ u8 tim_pg_size_tim_lvl;
+ #define FUNC_BACKING_STORE_CFG_REQ_TIM_LVL_MASK 0xfUL
+ #define FUNC_BACKING_STORE_CFG_REQ_TIM_LVL_SFT 0
+ #define FUNC_BACKING_STORE_CFG_REQ_TIM_LVL_LVL_0 0x0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TIM_LVL_LVL_1 0x1UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TIM_LVL_LVL_2 0x2UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TIM_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_TIM_LVL_LVL_2
+ #define FUNC_BACKING_STORE_CFG_REQ_TIM_PG_SIZE_MASK 0xf0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TIM_PG_SIZE_SFT 4
+ #define FUNC_BACKING_STORE_CFG_REQ_TIM_PG_SIZE_PG_4K (0x0UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TIM_PG_SIZE_PG_8K (0x1UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TIM_PG_SIZE_PG_64K (0x2UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TIM_PG_SIZE_PG_2M (0x3UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TIM_PG_SIZE_PG_8M (0x4UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TIM_PG_SIZE_PG_1G (0x5UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TIM_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_TIM_PG_SIZE_PG_1G
+ __le64 qpc_page_dir;
+ __le64 srq_page_dir;
+ __le64 cq_page_dir;
+ __le64 vnic_page_dir;
+ __le64 stat_page_dir;
+ __le64 tqm_sp_page_dir;
+ __le64 tqm_ring0_page_dir;
+ __le64 tqm_ring1_page_dir;
+ __le64 tqm_ring2_page_dir;
+ __le64 tqm_ring3_page_dir;
+ __le64 tqm_ring4_page_dir;
+ __le64 tqm_ring5_page_dir;
+ __le64 tqm_ring6_page_dir;
+ __le64 tqm_ring7_page_dir;
+ __le64 mrav_page_dir;
+ __le64 tim_page_dir;
+ __le32 qp_num_entries;
+ __le32 srq_num_entries;
+ __le32 cq_num_entries;
+ __le32 stat_num_entries;
+ __le32 tqm_sp_num_entries;
+ __le32 tqm_ring0_num_entries;
+ __le32 tqm_ring1_num_entries;
+ __le32 tqm_ring2_num_entries;
+ __le32 tqm_ring3_num_entries;
+ __le32 tqm_ring4_num_entries;
+ __le32 tqm_ring5_num_entries;
+ __le32 tqm_ring6_num_entries;
+ __le32 tqm_ring7_num_entries;
+ __le32 mrav_num_entries;
+ __le32 tim_num_entries;
+ __le16 qp_num_qp1_entries;
+ __le16 qp_num_l2_entries;
+ __le16 qp_entry_size;
+ __le16 srq_num_l2_entries;
+ __le16 srq_entry_size;
+ __le16 cq_num_l2_entries;
+ __le16 cq_entry_size;
+ __le16 vnic_num_vnic_entries;
+ __le16 vnic_num_ring_table_entries;
+ __le16 vnic_entry_size;
+ __le16 stat_entry_size;
+ __le16 tqm_entry_size;
+ __le16 mrav_entry_size;
+ __le16 tim_entry_size;
+ u8 tqm_ring8_pg_size_tqm_ring_lvl;
+ #define FUNC_BACKING_STORE_CFG_REQ_RING8_TQM_RING_LVL_MASK 0xfUL
+ #define FUNC_BACKING_STORE_CFG_REQ_RING8_TQM_RING_LVL_SFT 0
+ #define FUNC_BACKING_STORE_CFG_REQ_RING8_TQM_RING_LVL_LVL_0 0x0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_RING8_TQM_RING_LVL_LVL_1 0x1UL
+ #define FUNC_BACKING_STORE_CFG_REQ_RING8_TQM_RING_LVL_LVL_2 0x2UL
+ #define FUNC_BACKING_STORE_CFG_REQ_RING8_TQM_RING_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_RING8_TQM_RING_LVL_LVL_2
+ #define FUNC_BACKING_STORE_CFG_REQ_RING8_TQM_RING_PG_SIZE_MASK 0xf0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_RING8_TQM_RING_PG_SIZE_SFT 4
+ #define FUNC_BACKING_STORE_CFG_REQ_RING8_TQM_RING_PG_SIZE_PG_4K (0x0UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_RING8_TQM_RING_PG_SIZE_PG_8K (0x1UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_RING8_TQM_RING_PG_SIZE_PG_64K (0x2UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_RING8_TQM_RING_PG_SIZE_PG_2M (0x3UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_RING8_TQM_RING_PG_SIZE_PG_8M (0x4UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_RING8_TQM_RING_PG_SIZE_PG_1G (0x5UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_RING8_TQM_RING_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_RING8_TQM_RING_PG_SIZE_PG_1G
+ u8 ring8_unused[3];
+ __le32 tqm_ring8_num_entries;
+ __le64 tqm_ring8_page_dir;
+ u8 tqm_ring9_pg_size_tqm_ring_lvl;
+ #define FUNC_BACKING_STORE_CFG_REQ_RING9_TQM_RING_LVL_MASK 0xfUL
+ #define FUNC_BACKING_STORE_CFG_REQ_RING9_TQM_RING_LVL_SFT 0
+ #define FUNC_BACKING_STORE_CFG_REQ_RING9_TQM_RING_LVL_LVL_0 0x0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_RING9_TQM_RING_LVL_LVL_1 0x1UL
+ #define FUNC_BACKING_STORE_CFG_REQ_RING9_TQM_RING_LVL_LVL_2 0x2UL
+ #define FUNC_BACKING_STORE_CFG_REQ_RING9_TQM_RING_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_RING9_TQM_RING_LVL_LVL_2
+ #define FUNC_BACKING_STORE_CFG_REQ_RING9_TQM_RING_PG_SIZE_MASK 0xf0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_RING9_TQM_RING_PG_SIZE_SFT 4
+ #define FUNC_BACKING_STORE_CFG_REQ_RING9_TQM_RING_PG_SIZE_PG_4K (0x0UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_RING9_TQM_RING_PG_SIZE_PG_8K (0x1UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_RING9_TQM_RING_PG_SIZE_PG_64K (0x2UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_RING9_TQM_RING_PG_SIZE_PG_2M (0x3UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_RING9_TQM_RING_PG_SIZE_PG_8M (0x4UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_RING9_TQM_RING_PG_SIZE_PG_1G (0x5UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_RING9_TQM_RING_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_RING9_TQM_RING_PG_SIZE_PG_1G
+ u8 ring9_unused[3];
+ __le32 tqm_ring9_num_entries;
+ __le64 tqm_ring9_page_dir;
+ u8 tqm_ring10_pg_size_tqm_ring_lvl;
+ #define FUNC_BACKING_STORE_CFG_REQ_RING10_TQM_RING_LVL_MASK 0xfUL
+ #define FUNC_BACKING_STORE_CFG_REQ_RING10_TQM_RING_LVL_SFT 0
+ #define FUNC_BACKING_STORE_CFG_REQ_RING10_TQM_RING_LVL_LVL_0 0x0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_RING10_TQM_RING_LVL_LVL_1 0x1UL
+ #define FUNC_BACKING_STORE_CFG_REQ_RING10_TQM_RING_LVL_LVL_2 0x2UL
+ #define FUNC_BACKING_STORE_CFG_REQ_RING10_TQM_RING_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_RING10_TQM_RING_LVL_LVL_2
+ #define FUNC_BACKING_STORE_CFG_REQ_RING10_TQM_RING_PG_SIZE_MASK 0xf0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_RING10_TQM_RING_PG_SIZE_SFT 4
+ #define FUNC_BACKING_STORE_CFG_REQ_RING10_TQM_RING_PG_SIZE_PG_4K (0x0UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_RING10_TQM_RING_PG_SIZE_PG_8K (0x1UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_RING10_TQM_RING_PG_SIZE_PG_64K (0x2UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_RING10_TQM_RING_PG_SIZE_PG_2M (0x3UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_RING10_TQM_RING_PG_SIZE_PG_8M (0x4UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_RING10_TQM_RING_PG_SIZE_PG_1G (0x5UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_RING10_TQM_RING_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_RING10_TQM_RING_PG_SIZE_PG_1G
+ u8 ring10_unused[3];
+ __le32 tqm_ring10_num_entries;
+ __le64 tqm_ring10_page_dir;
+ __le32 tkc_num_entries;
+ __le32 rkc_num_entries;
+ __le64 tkc_page_dir;
+ __le64 rkc_page_dir;
+ __le16 tkc_entry_size;
+ __le16 rkc_entry_size;
+ u8 tkc_pg_size_tkc_lvl;
+ #define FUNC_BACKING_STORE_CFG_REQ_TKC_LVL_MASK 0xfUL
+ #define FUNC_BACKING_STORE_CFG_REQ_TKC_LVL_SFT 0
+ #define FUNC_BACKING_STORE_CFG_REQ_TKC_LVL_LVL_0 0x0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TKC_LVL_LVL_1 0x1UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TKC_LVL_LVL_2 0x2UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TKC_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_TKC_LVL_LVL_2
+ #define FUNC_BACKING_STORE_CFG_REQ_TKC_PG_SIZE_MASK 0xf0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TKC_PG_SIZE_SFT 4
+ #define FUNC_BACKING_STORE_CFG_REQ_TKC_PG_SIZE_PG_4K (0x0UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TKC_PG_SIZE_PG_8K (0x1UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TKC_PG_SIZE_PG_64K (0x2UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TKC_PG_SIZE_PG_2M (0x3UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TKC_PG_SIZE_PG_8M (0x4UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TKC_PG_SIZE_PG_1G (0x5UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TKC_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_TKC_PG_SIZE_PG_1G
+ u8 rkc_pg_size_rkc_lvl;
+ #define FUNC_BACKING_STORE_CFG_REQ_RKC_LVL_MASK 0xfUL
+ #define FUNC_BACKING_STORE_CFG_REQ_RKC_LVL_SFT 0
+ #define FUNC_BACKING_STORE_CFG_REQ_RKC_LVL_LVL_0 0x0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_RKC_LVL_LVL_1 0x1UL
+ #define FUNC_BACKING_STORE_CFG_REQ_RKC_LVL_LVL_2 0x2UL
+ #define FUNC_BACKING_STORE_CFG_REQ_RKC_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_RKC_LVL_LVL_2
+ #define FUNC_BACKING_STORE_CFG_REQ_RKC_PG_SIZE_MASK 0xf0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_RKC_PG_SIZE_SFT 4
+ #define FUNC_BACKING_STORE_CFG_REQ_RKC_PG_SIZE_PG_4K (0x0UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_RKC_PG_SIZE_PG_8K (0x1UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_RKC_PG_SIZE_PG_64K (0x2UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_RKC_PG_SIZE_PG_2M (0x3UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_RKC_PG_SIZE_PG_8M (0x4UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_RKC_PG_SIZE_PG_1G (0x5UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_RKC_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_RKC_PG_SIZE_PG_1G
+ __le16 qp_num_fast_qpmd_entries;
+};
+
+/* hwrm_func_backing_store_cfg_output (size:128b/16B) */
+struct hwrm_func_backing_store_cfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_error_recovery_qcfg_input (size:192b/24B) */
+struct hwrm_error_recovery_qcfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ u8 unused_0[8];
+};
+
+/* hwrm_error_recovery_qcfg_output (size:1664b/208B) */
+struct hwrm_error_recovery_qcfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 flags;
+ #define ERROR_RECOVERY_QCFG_RESP_FLAGS_HOST 0x1UL
+ #define ERROR_RECOVERY_QCFG_RESP_FLAGS_CO_CPU 0x2UL
+ __le32 driver_polling_freq;
+ __le32 master_func_wait_period;
+ __le32 normal_func_wait_period;
+ __le32 master_func_wait_period_after_reset;
+ __le32 max_bailout_time_after_reset;
+ __le32 fw_health_status_reg;
+ #define ERROR_RECOVERY_QCFG_RESP_FW_HEALTH_STATUS_REG_ADDR_SPACE_MASK 0x3UL
+ #define ERROR_RECOVERY_QCFG_RESP_FW_HEALTH_STATUS_REG_ADDR_SPACE_SFT 0
+ #define ERROR_RECOVERY_QCFG_RESP_FW_HEALTH_STATUS_REG_ADDR_SPACE_PCIE_CFG 0x0UL
+ #define ERROR_RECOVERY_QCFG_RESP_FW_HEALTH_STATUS_REG_ADDR_SPACE_GRC 0x1UL
+ #define ERROR_RECOVERY_QCFG_RESP_FW_HEALTH_STATUS_REG_ADDR_SPACE_BAR0 0x2UL
+ #define ERROR_RECOVERY_QCFG_RESP_FW_HEALTH_STATUS_REG_ADDR_SPACE_BAR1 0x3UL
+ #define ERROR_RECOVERY_QCFG_RESP_FW_HEALTH_STATUS_REG_ADDR_SPACE_LAST ERROR_RECOVERY_QCFG_RESP_FW_HEALTH_STATUS_REG_ADDR_SPACE_BAR1
+ #define ERROR_RECOVERY_QCFG_RESP_FW_HEALTH_STATUS_REG_ADDR_MASK 0xfffffffcUL
+ #define ERROR_RECOVERY_QCFG_RESP_FW_HEALTH_STATUS_REG_ADDR_SFT 2
+ __le32 fw_heartbeat_reg;
+ #define ERROR_RECOVERY_QCFG_RESP_FW_HEARTBEAT_REG_ADDR_SPACE_MASK 0x3UL
+ #define ERROR_RECOVERY_QCFG_RESP_FW_HEARTBEAT_REG_ADDR_SPACE_SFT 0
+ #define ERROR_RECOVERY_QCFG_RESP_FW_HEARTBEAT_REG_ADDR_SPACE_PCIE_CFG 0x0UL
+ #define ERROR_RECOVERY_QCFG_RESP_FW_HEARTBEAT_REG_ADDR_SPACE_GRC 0x1UL
+ #define ERROR_RECOVERY_QCFG_RESP_FW_HEARTBEAT_REG_ADDR_SPACE_BAR0 0x2UL
+ #define ERROR_RECOVERY_QCFG_RESP_FW_HEARTBEAT_REG_ADDR_SPACE_BAR1 0x3UL
+ #define ERROR_RECOVERY_QCFG_RESP_FW_HEARTBEAT_REG_ADDR_SPACE_LAST ERROR_RECOVERY_QCFG_RESP_FW_HEARTBEAT_REG_ADDR_SPACE_BAR1
+ #define ERROR_RECOVERY_QCFG_RESP_FW_HEARTBEAT_REG_ADDR_MASK 0xfffffffcUL
+ #define ERROR_RECOVERY_QCFG_RESP_FW_HEARTBEAT_REG_ADDR_SFT 2
+ __le32 fw_reset_cnt_reg;
+ #define ERROR_RECOVERY_QCFG_RESP_FW_RESET_CNT_REG_ADDR_SPACE_MASK 0x3UL
+ #define ERROR_RECOVERY_QCFG_RESP_FW_RESET_CNT_REG_ADDR_SPACE_SFT 0
+ #define ERROR_RECOVERY_QCFG_RESP_FW_RESET_CNT_REG_ADDR_SPACE_PCIE_CFG 0x0UL
+ #define ERROR_RECOVERY_QCFG_RESP_FW_RESET_CNT_REG_ADDR_SPACE_GRC 0x1UL
+ #define ERROR_RECOVERY_QCFG_RESP_FW_RESET_CNT_REG_ADDR_SPACE_BAR0 0x2UL
+ #define ERROR_RECOVERY_QCFG_RESP_FW_RESET_CNT_REG_ADDR_SPACE_BAR1 0x3UL
+ #define ERROR_RECOVERY_QCFG_RESP_FW_RESET_CNT_REG_ADDR_SPACE_LAST ERROR_RECOVERY_QCFG_RESP_FW_RESET_CNT_REG_ADDR_SPACE_BAR1
+ #define ERROR_RECOVERY_QCFG_RESP_FW_RESET_CNT_REG_ADDR_MASK 0xfffffffcUL
+ #define ERROR_RECOVERY_QCFG_RESP_FW_RESET_CNT_REG_ADDR_SFT 2
+ __le32 reset_inprogress_reg;
+ #define ERROR_RECOVERY_QCFG_RESP_RESET_INPROGRESS_REG_ADDR_SPACE_MASK 0x3UL
+ #define ERROR_RECOVERY_QCFG_RESP_RESET_INPROGRESS_REG_ADDR_SPACE_SFT 0
+ #define ERROR_RECOVERY_QCFG_RESP_RESET_INPROGRESS_REG_ADDR_SPACE_PCIE_CFG 0x0UL
+ #define ERROR_RECOVERY_QCFG_RESP_RESET_INPROGRESS_REG_ADDR_SPACE_GRC 0x1UL
+ #define ERROR_RECOVERY_QCFG_RESP_RESET_INPROGRESS_REG_ADDR_SPACE_BAR0 0x2UL
+ #define ERROR_RECOVERY_QCFG_RESP_RESET_INPROGRESS_REG_ADDR_SPACE_BAR1 0x3UL
+ #define ERROR_RECOVERY_QCFG_RESP_RESET_INPROGRESS_REG_ADDR_SPACE_LAST ERROR_RECOVERY_QCFG_RESP_RESET_INPROGRESS_REG_ADDR_SPACE_BAR1
+ #define ERROR_RECOVERY_QCFG_RESP_RESET_INPROGRESS_REG_ADDR_MASK 0xfffffffcUL
+ #define ERROR_RECOVERY_QCFG_RESP_RESET_INPROGRESS_REG_ADDR_SFT 2
+ __le32 reset_inprogress_reg_mask;
+ u8 unused_0[3];
+ u8 reg_array_cnt;
+ __le32 reset_reg[16];
+ #define ERROR_RECOVERY_QCFG_RESP_RESET_REG_ADDR_SPACE_MASK 0x3UL
+ #define ERROR_RECOVERY_QCFG_RESP_RESET_REG_ADDR_SPACE_SFT 0
+ #define ERROR_RECOVERY_QCFG_RESP_RESET_REG_ADDR_SPACE_PCIE_CFG 0x0UL
+ #define ERROR_RECOVERY_QCFG_RESP_RESET_REG_ADDR_SPACE_GRC 0x1UL
+ #define ERROR_RECOVERY_QCFG_RESP_RESET_REG_ADDR_SPACE_BAR0 0x2UL
+ #define ERROR_RECOVERY_QCFG_RESP_RESET_REG_ADDR_SPACE_BAR1 0x3UL
+ #define ERROR_RECOVERY_QCFG_RESP_RESET_REG_ADDR_SPACE_LAST ERROR_RECOVERY_QCFG_RESP_RESET_REG_ADDR_SPACE_BAR1
+ #define ERROR_RECOVERY_QCFG_RESP_RESET_REG_ADDR_MASK 0xfffffffcUL
+ #define ERROR_RECOVERY_QCFG_RESP_RESET_REG_ADDR_SFT 2
+ __le32 reset_reg_val[16];
+ u8 delay_after_reset[16];
+ __le32 err_recovery_cnt_reg;
+ #define ERROR_RECOVERY_QCFG_RESP_ERR_RECOVERY_CNT_REG_ADDR_SPACE_MASK 0x3UL
+ #define ERROR_RECOVERY_QCFG_RESP_ERR_RECOVERY_CNT_REG_ADDR_SPACE_SFT 0
+ #define ERROR_RECOVERY_QCFG_RESP_ERR_RECOVERY_CNT_REG_ADDR_SPACE_PCIE_CFG 0x0UL
+ #define ERROR_RECOVERY_QCFG_RESP_ERR_RECOVERY_CNT_REG_ADDR_SPACE_GRC 0x1UL
+ #define ERROR_RECOVERY_QCFG_RESP_ERR_RECOVERY_CNT_REG_ADDR_SPACE_BAR0 0x2UL
+ #define ERROR_RECOVERY_QCFG_RESP_ERR_RECOVERY_CNT_REG_ADDR_SPACE_BAR1 0x3UL
+ #define ERROR_RECOVERY_QCFG_RESP_ERR_RECOVERY_CNT_REG_ADDR_SPACE_LAST ERROR_RECOVERY_QCFG_RESP_ERR_RECOVERY_CNT_REG_ADDR_SPACE_BAR1
+ #define ERROR_RECOVERY_QCFG_RESP_ERR_RECOVERY_CNT_REG_ADDR_MASK 0xfffffffcUL
+ #define ERROR_RECOVERY_QCFG_RESP_ERR_RECOVERY_CNT_REG_ADDR_SFT 2
+ u8 unused_1[3];
+ u8 valid;
+};
+
+/* hwrm_func_echo_response_input (size:192b/24B) */
+struct hwrm_func_echo_response_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 event_data1;
+ __le32 event_data2;
+};
+
+/* hwrm_func_echo_response_output (size:128b/16B) */
+struct hwrm_func_echo_response_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_func_ptp_pin_qcfg_input (size:192b/24B) */
+struct hwrm_func_ptp_pin_qcfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ u8 unused_0[8];
+};
+
+/* hwrm_func_ptp_pin_qcfg_output (size:128b/16B) */
+struct hwrm_func_ptp_pin_qcfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 num_pins;
+ u8 state;
+ #define FUNC_PTP_PIN_QCFG_RESP_STATE_PIN0_ENABLED 0x1UL
+ #define FUNC_PTP_PIN_QCFG_RESP_STATE_PIN1_ENABLED 0x2UL
+ #define FUNC_PTP_PIN_QCFG_RESP_STATE_PIN2_ENABLED 0x4UL
+ #define FUNC_PTP_PIN_QCFG_RESP_STATE_PIN3_ENABLED 0x8UL
+ u8 pin0_usage;
+ #define FUNC_PTP_PIN_QCFG_RESP_PIN0_USAGE_NONE 0x0UL
+ #define FUNC_PTP_PIN_QCFG_RESP_PIN0_USAGE_PPS_IN 0x1UL
+ #define FUNC_PTP_PIN_QCFG_RESP_PIN0_USAGE_PPS_OUT 0x2UL
+ #define FUNC_PTP_PIN_QCFG_RESP_PIN0_USAGE_SYNC_IN 0x3UL
+ #define FUNC_PTP_PIN_QCFG_RESP_PIN0_USAGE_SYNC_OUT 0x4UL
+ #define FUNC_PTP_PIN_QCFG_RESP_PIN0_USAGE_LAST FUNC_PTP_PIN_QCFG_RESP_PIN0_USAGE_SYNC_OUT
+ u8 pin1_usage;
+ #define FUNC_PTP_PIN_QCFG_RESP_PIN1_USAGE_NONE 0x0UL
+ #define FUNC_PTP_PIN_QCFG_RESP_PIN1_USAGE_PPS_IN 0x1UL
+ #define FUNC_PTP_PIN_QCFG_RESP_PIN1_USAGE_PPS_OUT 0x2UL
+ #define FUNC_PTP_PIN_QCFG_RESP_PIN1_USAGE_SYNC_IN 0x3UL
+ #define FUNC_PTP_PIN_QCFG_RESP_PIN1_USAGE_SYNC_OUT 0x4UL
+ #define FUNC_PTP_PIN_QCFG_RESP_PIN1_USAGE_LAST FUNC_PTP_PIN_QCFG_RESP_PIN1_USAGE_SYNC_OUT
+ u8 pin2_usage;
+ #define FUNC_PTP_PIN_QCFG_RESP_PIN2_USAGE_NONE 0x0UL
+ #define FUNC_PTP_PIN_QCFG_RESP_PIN2_USAGE_PPS_IN 0x1UL
+ #define FUNC_PTP_PIN_QCFG_RESP_PIN2_USAGE_PPS_OUT 0x2UL
+ #define FUNC_PTP_PIN_QCFG_RESP_PIN2_USAGE_SYNC_IN 0x3UL
+ #define FUNC_PTP_PIN_QCFG_RESP_PIN2_USAGE_SYNC_OUT 0x4UL
+ #define FUNC_PTP_PIN_QCFG_RESP_PIN2_USAGE_SYNCE_PRIMARY_CLOCK_OUT 0x5UL
+ #define FUNC_PTP_PIN_QCFG_RESP_PIN2_USAGE_SYNCE_SECONDARY_CLOCK_OUT 0x6UL
+ #define FUNC_PTP_PIN_QCFG_RESP_PIN2_USAGE_LAST FUNC_PTP_PIN_QCFG_RESP_PIN2_USAGE_SYNCE_SECONDARY_CLOCK_OUT
+ u8 pin3_usage;
+ #define FUNC_PTP_PIN_QCFG_RESP_PIN3_USAGE_NONE 0x0UL
+ #define FUNC_PTP_PIN_QCFG_RESP_PIN3_USAGE_PPS_IN 0x1UL
+ #define FUNC_PTP_PIN_QCFG_RESP_PIN3_USAGE_PPS_OUT 0x2UL
+ #define FUNC_PTP_PIN_QCFG_RESP_PIN3_USAGE_SYNC_IN 0x3UL
+ #define FUNC_PTP_PIN_QCFG_RESP_PIN3_USAGE_SYNC_OUT 0x4UL
+ #define FUNC_PTP_PIN_QCFG_RESP_PIN3_USAGE_SYNCE_PRIMARY_CLOCK_OUT 0x5UL
+ #define FUNC_PTP_PIN_QCFG_RESP_PIN3_USAGE_SYNCE_SECONDARY_CLOCK_OUT 0x6UL
+ #define FUNC_PTP_PIN_QCFG_RESP_PIN3_USAGE_LAST FUNC_PTP_PIN_QCFG_RESP_PIN3_USAGE_SYNCE_SECONDARY_CLOCK_OUT
+ u8 unused_0;
+ u8 valid;
+};
+
+/* hwrm_func_ptp_pin_cfg_input (size:256b/32B) */
+struct hwrm_func_ptp_pin_cfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 enables;
+ #define FUNC_PTP_PIN_CFG_REQ_ENABLES_PIN0_STATE 0x1UL
+ #define FUNC_PTP_PIN_CFG_REQ_ENABLES_PIN0_USAGE 0x2UL
+ #define FUNC_PTP_PIN_CFG_REQ_ENABLES_PIN1_STATE 0x4UL
+ #define FUNC_PTP_PIN_CFG_REQ_ENABLES_PIN1_USAGE 0x8UL
+ #define FUNC_PTP_PIN_CFG_REQ_ENABLES_PIN2_STATE 0x10UL
+ #define FUNC_PTP_PIN_CFG_REQ_ENABLES_PIN2_USAGE 0x20UL
+ #define FUNC_PTP_PIN_CFG_REQ_ENABLES_PIN3_STATE 0x40UL
+ #define FUNC_PTP_PIN_CFG_REQ_ENABLES_PIN3_USAGE 0x80UL
+ u8 pin0_state;
+ #define FUNC_PTP_PIN_CFG_REQ_PIN0_STATE_DISABLED 0x0UL
+ #define FUNC_PTP_PIN_CFG_REQ_PIN0_STATE_ENABLED 0x1UL
+ #define FUNC_PTP_PIN_CFG_REQ_PIN0_STATE_LAST FUNC_PTP_PIN_CFG_REQ_PIN0_STATE_ENABLED
+ u8 pin0_usage;
+ #define FUNC_PTP_PIN_CFG_REQ_PIN0_USAGE_NONE 0x0UL
+ #define FUNC_PTP_PIN_CFG_REQ_PIN0_USAGE_PPS_IN 0x1UL
+ #define FUNC_PTP_PIN_CFG_REQ_PIN0_USAGE_PPS_OUT 0x2UL
+ #define FUNC_PTP_PIN_CFG_REQ_PIN0_USAGE_SYNC_IN 0x3UL
+ #define FUNC_PTP_PIN_CFG_REQ_PIN0_USAGE_SYNC_OUT 0x4UL
+ #define FUNC_PTP_PIN_CFG_REQ_PIN0_USAGE_LAST FUNC_PTP_PIN_CFG_REQ_PIN0_USAGE_SYNC_OUT
+ u8 pin1_state;
+ #define FUNC_PTP_PIN_CFG_REQ_PIN1_STATE_DISABLED 0x0UL
+ #define FUNC_PTP_PIN_CFG_REQ_PIN1_STATE_ENABLED 0x1UL
+ #define FUNC_PTP_PIN_CFG_REQ_PIN1_STATE_LAST FUNC_PTP_PIN_CFG_REQ_PIN1_STATE_ENABLED
+ u8 pin1_usage;
+ #define FUNC_PTP_PIN_CFG_REQ_PIN1_USAGE_NONE 0x0UL
+ #define FUNC_PTP_PIN_CFG_REQ_PIN1_USAGE_PPS_IN 0x1UL
+ #define FUNC_PTP_PIN_CFG_REQ_PIN1_USAGE_PPS_OUT 0x2UL
+ #define FUNC_PTP_PIN_CFG_REQ_PIN1_USAGE_SYNC_IN 0x3UL
+ #define FUNC_PTP_PIN_CFG_REQ_PIN1_USAGE_SYNC_OUT 0x4UL
+ #define FUNC_PTP_PIN_CFG_REQ_PIN1_USAGE_LAST FUNC_PTP_PIN_CFG_REQ_PIN1_USAGE_SYNC_OUT
+ u8 pin2_state;
+ #define FUNC_PTP_PIN_CFG_REQ_PIN2_STATE_DISABLED 0x0UL
+ #define FUNC_PTP_PIN_CFG_REQ_PIN2_STATE_ENABLED 0x1UL
+ #define FUNC_PTP_PIN_CFG_REQ_PIN2_STATE_LAST FUNC_PTP_PIN_CFG_REQ_PIN2_STATE_ENABLED
+ u8 pin2_usage;
+ #define FUNC_PTP_PIN_CFG_REQ_PIN2_USAGE_NONE 0x0UL
+ #define FUNC_PTP_PIN_CFG_REQ_PIN2_USAGE_PPS_IN 0x1UL
+ #define FUNC_PTP_PIN_CFG_REQ_PIN2_USAGE_PPS_OUT 0x2UL
+ #define FUNC_PTP_PIN_CFG_REQ_PIN2_USAGE_SYNC_IN 0x3UL
+ #define FUNC_PTP_PIN_CFG_REQ_PIN2_USAGE_SYNC_OUT 0x4UL
+ #define FUNC_PTP_PIN_CFG_REQ_PIN2_USAGE_SYNCE_PRIMARY_CLOCK_OUT 0x5UL
+ #define FUNC_PTP_PIN_CFG_REQ_PIN2_USAGE_SYNCE_SECONDARY_CLOCK_OUT 0x6UL
+ #define FUNC_PTP_PIN_CFG_REQ_PIN2_USAGE_LAST FUNC_PTP_PIN_CFG_REQ_PIN2_USAGE_SYNCE_SECONDARY_CLOCK_OUT
+ u8 pin3_state;
+ #define FUNC_PTP_PIN_CFG_REQ_PIN3_STATE_DISABLED 0x0UL
+ #define FUNC_PTP_PIN_CFG_REQ_PIN3_STATE_ENABLED 0x1UL
+ #define FUNC_PTP_PIN_CFG_REQ_PIN3_STATE_LAST FUNC_PTP_PIN_CFG_REQ_PIN3_STATE_ENABLED
+ u8 pin3_usage;
+ #define FUNC_PTP_PIN_CFG_REQ_PIN3_USAGE_NONE 0x0UL
+ #define FUNC_PTP_PIN_CFG_REQ_PIN3_USAGE_PPS_IN 0x1UL
+ #define FUNC_PTP_PIN_CFG_REQ_PIN3_USAGE_PPS_OUT 0x2UL
+ #define FUNC_PTP_PIN_CFG_REQ_PIN3_USAGE_SYNC_IN 0x3UL
+ #define FUNC_PTP_PIN_CFG_REQ_PIN3_USAGE_SYNC_OUT 0x4UL
+ #define FUNC_PTP_PIN_CFG_REQ_PIN3_USAGE_SYNCE_PRIMARY_CLOCK_OUT 0x5UL
+ #define FUNC_PTP_PIN_CFG_REQ_PIN3_USAGE_SYNCE_SECONDARY_CLOCK_OUT 0x6UL
+ #define FUNC_PTP_PIN_CFG_REQ_PIN3_USAGE_LAST FUNC_PTP_PIN_CFG_REQ_PIN3_USAGE_SYNCE_SECONDARY_CLOCK_OUT
+ u8 unused_0[4];
+};
+
+/* hwrm_func_ptp_pin_cfg_output (size:128b/16B) */
+struct hwrm_func_ptp_pin_cfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_func_ptp_cfg_input (size:384b/48B) */
+struct hwrm_func_ptp_cfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 enables;
+ #define FUNC_PTP_CFG_REQ_ENABLES_PTP_PPS_EVENT 0x1UL
+ #define FUNC_PTP_CFG_REQ_ENABLES_PTP_FREQ_ADJ_DLL_SOURCE 0x2UL
+ #define FUNC_PTP_CFG_REQ_ENABLES_PTP_FREQ_ADJ_DLL_PHASE 0x4UL
+ #define FUNC_PTP_CFG_REQ_ENABLES_PTP_FREQ_ADJ_EXT_PERIOD 0x8UL
+ #define FUNC_PTP_CFG_REQ_ENABLES_PTP_FREQ_ADJ_EXT_UP 0x10UL
+ #define FUNC_PTP_CFG_REQ_ENABLES_PTP_FREQ_ADJ_EXT_PHASE 0x20UL
+ #define FUNC_PTP_CFG_REQ_ENABLES_PTP_SET_TIME 0x40UL
+ u8 ptp_pps_event;
+ #define FUNC_PTP_CFG_REQ_PTP_PPS_EVENT_INTERNAL 0x1UL
+ #define FUNC_PTP_CFG_REQ_PTP_PPS_EVENT_EXTERNAL 0x2UL
+ u8 ptp_freq_adj_dll_source;
+ #define FUNC_PTP_CFG_REQ_PTP_FREQ_ADJ_DLL_SOURCE_NONE 0x0UL
+ #define FUNC_PTP_CFG_REQ_PTP_FREQ_ADJ_DLL_SOURCE_TSIO_0 0x1UL
+ #define FUNC_PTP_CFG_REQ_PTP_FREQ_ADJ_DLL_SOURCE_TSIO_1 0x2UL
+ #define FUNC_PTP_CFG_REQ_PTP_FREQ_ADJ_DLL_SOURCE_TSIO_2 0x3UL
+ #define FUNC_PTP_CFG_REQ_PTP_FREQ_ADJ_DLL_SOURCE_TSIO_3 0x4UL
+ #define FUNC_PTP_CFG_REQ_PTP_FREQ_ADJ_DLL_SOURCE_PORT_0 0x5UL
+ #define FUNC_PTP_CFG_REQ_PTP_FREQ_ADJ_DLL_SOURCE_PORT_1 0x6UL
+ #define FUNC_PTP_CFG_REQ_PTP_FREQ_ADJ_DLL_SOURCE_PORT_2 0x7UL
+ #define FUNC_PTP_CFG_REQ_PTP_FREQ_ADJ_DLL_SOURCE_PORT_3 0x8UL
+ #define FUNC_PTP_CFG_REQ_PTP_FREQ_ADJ_DLL_SOURCE_INVALID 0xffUL
+ #define FUNC_PTP_CFG_REQ_PTP_FREQ_ADJ_DLL_SOURCE_LAST FUNC_PTP_CFG_REQ_PTP_FREQ_ADJ_DLL_SOURCE_INVALID
+ u8 ptp_freq_adj_dll_phase;
+ #define FUNC_PTP_CFG_REQ_PTP_FREQ_ADJ_DLL_PHASE_NONE 0x0UL
+ #define FUNC_PTP_CFG_REQ_PTP_FREQ_ADJ_DLL_PHASE_4K 0x1UL
+ #define FUNC_PTP_CFG_REQ_PTP_FREQ_ADJ_DLL_PHASE_8K 0x2UL
+ #define FUNC_PTP_CFG_REQ_PTP_FREQ_ADJ_DLL_PHASE_10M 0x3UL
+ #define FUNC_PTP_CFG_REQ_PTP_FREQ_ADJ_DLL_PHASE_25M 0x4UL
+ #define FUNC_PTP_CFG_REQ_PTP_FREQ_ADJ_DLL_PHASE_LAST FUNC_PTP_CFG_REQ_PTP_FREQ_ADJ_DLL_PHASE_25M
+ u8 unused_0[3];
+ __le32 ptp_freq_adj_ext_period;
+ __le32 ptp_freq_adj_ext_up;
+ __le32 ptp_freq_adj_ext_phase_lower;
+ __le32 ptp_freq_adj_ext_phase_upper;
+ __le64 ptp_set_time;
+};
+
+/* hwrm_func_ptp_cfg_output (size:128b/16B) */
+struct hwrm_func_ptp_cfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_func_ptp_ts_query_input (size:192b/24B) */
+struct hwrm_func_ptp_ts_query_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define FUNC_PTP_TS_QUERY_REQ_FLAGS_PPS_TIME 0x1UL
+ #define FUNC_PTP_TS_QUERY_REQ_FLAGS_PTM_TIME 0x2UL
+ u8 unused_0[4];
+};
+
+/* hwrm_func_ptp_ts_query_output (size:320b/40B) */
+struct hwrm_func_ptp_ts_query_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le64 pps_event_ts;
+ __le64 ptm_local_ts;
+ __le64 ptm_system_ts;
+ __le32 ptm_link_delay;
+ u8 unused_0[3];
+ u8 valid;
+};
+
+/* hwrm_func_ptp_ext_cfg_input (size:256b/32B) */
+struct hwrm_func_ptp_ext_cfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 enables;
+ #define FUNC_PTP_EXT_CFG_REQ_ENABLES_PHC_MASTER_FID 0x1UL
+ #define FUNC_PTP_EXT_CFG_REQ_ENABLES_PHC_SEC_FID 0x2UL
+ #define FUNC_PTP_EXT_CFG_REQ_ENABLES_PHC_SEC_MODE 0x4UL
+ #define FUNC_PTP_EXT_CFG_REQ_ENABLES_FAILOVER_TIMER 0x8UL
+ __le16 phc_master_fid;
+ __le16 phc_sec_fid;
+ u8 phc_sec_mode;
+ #define FUNC_PTP_EXT_CFG_REQ_PHC_SEC_MODE_SWITCH 0x0UL
+ #define FUNC_PTP_EXT_CFG_REQ_PHC_SEC_MODE_ALL 0x1UL
+ #define FUNC_PTP_EXT_CFG_REQ_PHC_SEC_MODE_PF_ONLY 0x2UL
+ #define FUNC_PTP_EXT_CFG_REQ_PHC_SEC_MODE_LAST FUNC_PTP_EXT_CFG_REQ_PHC_SEC_MODE_PF_ONLY
+ u8 unused_0;
+ __le32 failover_timer;
+ u8 unused_1[4];
+};
+
+/* hwrm_func_ptp_ext_cfg_output (size:128b/16B) */
+struct hwrm_func_ptp_ext_cfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_func_ptp_ext_qcfg_input (size:192b/24B) */
+struct hwrm_func_ptp_ext_qcfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ u8 unused_0[8];
+};
+
+/* hwrm_func_ptp_ext_qcfg_output (size:256b/32B) */
+struct hwrm_func_ptp_ext_qcfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 phc_master_fid;
+ __le16 phc_sec_fid;
+ __le16 phc_active_fid0;
+ __le16 phc_active_fid1;
+ __le32 last_failover_event;
+ __le16 from_fid;
+ __le16 to_fid;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_func_backing_store_cfg_v2_input (size:512b/64B) */
+struct hwrm_func_backing_store_cfg_v2_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 type;
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_QP 0x0UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_SRQ 0x1UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_CQ 0x2UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_VNIC 0x3UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_STAT 0x4UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_SP_TQM_RING 0x5UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_FP_TQM_RING 0x6UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_MRAV 0xeUL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_TIM 0xfUL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_TX_CK 0x13UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_RX_CK 0x14UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_MP_TQM_RING 0x15UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_SQ_DB_SHADOW 0x16UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_RQ_DB_SHADOW 0x17UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_SRQ_DB_SHADOW 0x18UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_CQ_DB_SHADOW 0x19UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_TBL_SCOPE 0x1cUL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_XID_PARTITION 0x1dUL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_SRT_TRACE 0x1eUL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_SRT2_TRACE 0x1fUL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_CRT_TRACE 0x20UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_CRT2_TRACE 0x21UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_RIGP0_TRACE 0x22UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_L2_HWRM_TRACE 0x23UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_ROCE_HWRM_TRACE 0x24UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_TTX_PACING_TQM_RING 0x25UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_CA0_TRACE 0x26UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_CA1_TRACE 0x27UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_CA2_TRACE 0x28UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_RIGP1_TRACE 0x29UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_AFM_KONG_HWRM_TRACE 0x2aUL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_ERR_QPC_TRACE 0x2bUL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_INVALID 0xffffUL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_LAST FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_INVALID
+ __le16 instance;
+ __le32 flags;
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_FLAGS_PREBOOT_MODE 0x1UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_FLAGS_BS_CFG_ALL_DONE 0x2UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_FLAGS_BS_EXTEND 0x4UL
+ __le64 page_dir;
+ __le32 num_entries;
+ __le16 entry_size;
+ u8 page_size_pbl_level;
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_PBL_LEVEL_MASK 0xfUL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_PBL_LEVEL_SFT 0
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_PBL_LEVEL_LVL_0 0x0UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_PBL_LEVEL_LVL_1 0x1UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_PBL_LEVEL_LVL_2 0x2UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_PBL_LEVEL_LAST FUNC_BACKING_STORE_CFG_V2_REQ_PBL_LEVEL_LVL_2
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_PAGE_SIZE_MASK 0xf0UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_PAGE_SIZE_SFT 4
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_PAGE_SIZE_PG_4K (0x0UL << 4)
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_PAGE_SIZE_PG_8K (0x1UL << 4)
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_PAGE_SIZE_PG_64K (0x2UL << 4)
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_PAGE_SIZE_PG_2M (0x3UL << 4)
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_PAGE_SIZE_PG_8M (0x4UL << 4)
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_PAGE_SIZE_PG_1G (0x5UL << 4)
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_PAGE_SIZE_LAST FUNC_BACKING_STORE_CFG_V2_REQ_PAGE_SIZE_PG_1G
+ u8 subtype_valid_cnt;
+ __le32 split_entry_0;
+ __le32 split_entry_1;
+ __le32 split_entry_2;
+ __le32 split_entry_3;
+ __le32 enables;
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_ENABLES_NEXT_BS_OFFSET 0x1UL
+ __le32 next_bs_offset;
+};
+
+/* hwrm_func_backing_store_cfg_v2_output (size:128b/16B) */
+struct hwrm_func_backing_store_cfg_v2_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 rsvd0[7];
+ u8 valid;
+};
+
+/* hwrm_func_backing_store_qcfg_v2_input (size:192b/24B) */
+struct hwrm_func_backing_store_qcfg_v2_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 type;
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_QP 0x0UL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_SRQ 0x1UL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_CQ 0x2UL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_VNIC 0x3UL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_STAT 0x4UL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_SP_TQM_RING 0x5UL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_FP_TQM_RING 0x6UL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_MRAV 0xeUL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_TIM 0xfUL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_TX_CK 0x13UL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_RX_CK 0x14UL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_MP_TQM_RING 0x15UL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_SQ_DB_SHADOW 0x16UL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_RQ_DB_SHADOW 0x17UL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_SRQ_DB_SHADOW 0x18UL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_CQ_DB_SHADOW 0x19UL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_TBL_SCOPE 0x1cUL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_XID_PARTITION_TABLE 0x1dUL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_SRT_TRACE 0x1eUL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_SRT2_TRACE 0x1fUL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_CRT_TRACE 0x20UL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_CRT2_TRACE 0x21UL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_RIGP0_TRACE 0x22UL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_L2_HWRM_TRACE 0x23UL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_ROCE_HWRM_TRACE 0x24UL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_TTX_PACING_TQM_RING 0x25UL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_CA0_TRACE 0x26UL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_CA1_TRACE 0x27UL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_CA2_TRACE 0x28UL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_RIGP1_TRACE 0x29UL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_AFM_KONG_HWRM_TRACE 0x2aUL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_ERR_QPC_TRACE 0x2bUL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_INVALID 0xffffUL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_LAST FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_INVALID
+ __le16 instance;
+ u8 rsvd[4];
+};
+
+/* hwrm_func_backing_store_qcfg_v2_output (size:448b/56B) */
+struct hwrm_func_backing_store_qcfg_v2_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 type;
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_QP 0x0UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_SRQ 0x1UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_CQ 0x2UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_VNIC 0x3UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_STAT 0x4UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_SP_TQM_RING 0x5UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_FP_TQM_RING 0x6UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_MRAV 0xeUL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_TIM 0xfUL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_TX_CK 0x13UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_RX_CK 0x14UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_MP_TQM_RING 0x15UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_TBL_SCOPE 0x1cUL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_XID_PARTITION 0x1dUL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_SRT_TRACE 0x1eUL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_SRT2_TRACE 0x1fUL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_CRT_TRACE 0x20UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_CRT2_TRACE 0x21UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_RIGP0_TRACE 0x22UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_L2_HWRM_TRACE 0x23UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_ROCE_HWRM_TRACE 0x24UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_TTX_PACING_TQM_RING 0x25UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_CA0_TRACE 0x26UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_CA1_TRACE 0x27UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_CA2_TRACE 0x28UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_RIGP1_TRACE 0x29UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_ERR_QPC_TRACE 0x2aUL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_INVALID 0xffffUL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_LAST FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_INVALID
+ __le16 instance;
+ __le32 flags;
+ __le64 page_dir;
+ __le32 num_entries;
+ u8 page_size_pbl_level;
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_PBL_LEVEL_MASK 0xfUL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_PBL_LEVEL_SFT 0
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_PBL_LEVEL_LVL_0 0x0UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_PBL_LEVEL_LVL_1 0x1UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_PBL_LEVEL_LVL_2 0x2UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_PBL_LEVEL_LAST FUNC_BACKING_STORE_QCFG_V2_RESP_PBL_LEVEL_LVL_2
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_PAGE_SIZE_MASK 0xf0UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_PAGE_SIZE_SFT 4
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_PAGE_SIZE_PG_4K (0x0UL << 4)
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_PAGE_SIZE_PG_8K (0x1UL << 4)
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_PAGE_SIZE_PG_64K (0x2UL << 4)
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_PAGE_SIZE_PG_2M (0x3UL << 4)
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_PAGE_SIZE_PG_8M (0x4UL << 4)
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_PAGE_SIZE_PG_1G (0x5UL << 4)
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_PAGE_SIZE_LAST FUNC_BACKING_STORE_QCFG_V2_RESP_PAGE_SIZE_PG_1G
+ u8 subtype_valid_cnt;
+ u8 rsvd[2];
+ __le32 split_entry_0;
+ __le32 split_entry_1;
+ __le32 split_entry_2;
+ __le32 split_entry_3;
+ u8 rsvd2[7];
+ u8 valid;
+};
+
+/* qpc_split_entries (size:128b/16B) */
+struct qpc_split_entries {
+ __le32 qp_num_l2_entries;
+ __le32 qp_num_qp1_entries;
+ __le32 qp_num_fast_qpmd_entries;
+ __le32 rsvd;
+};
+
+/* srq_split_entries (size:128b/16B) */
+struct srq_split_entries {
+ __le32 srq_num_l2_entries;
+ __le32 rsvd;
+ __le32 rsvd2[2];
+};
+
+/* cq_split_entries (size:128b/16B) */
+struct cq_split_entries {
+ __le32 cq_num_l2_entries;
+ __le32 rsvd;
+ __le32 rsvd2[2];
+};
+
+/* vnic_split_entries (size:128b/16B) */
+struct vnic_split_entries {
+ __le32 vnic_num_vnic_entries;
+ __le32 rsvd;
+ __le32 rsvd2[2];
+};
+
+/* mrav_split_entries (size:128b/16B) */
+struct mrav_split_entries {
+ __le32 mrav_num_av_entries;
+ __le32 rsvd;
+ __le32 rsvd2[2];
+};
+
+/* ts_split_entries (size:128b/16B) */
+struct ts_split_entries {
+ __le32 region_num_entries;
+ u8 tsid;
+ u8 lkup_static_bkt_cnt_exp[2];
+ u8 locked;
+ __le32 rsvd2[2];
+};
+
+/* ck_split_entries (size:128b/16B) */
+struct ck_split_entries {
+ __le32 num_quic_entries;
+ __le32 rsvd;
+ __le32 rsvd2[2];
+};
+
+/* hwrm_func_backing_store_qcaps_v2_input (size:192b/24B) */
+struct hwrm_func_backing_store_qcaps_v2_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 type;
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_QP 0x0UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_SRQ 0x1UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_CQ 0x2UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_VNIC 0x3UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_STAT 0x4UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_SP_TQM_RING 0x5UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_FP_TQM_RING 0x6UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_MRAV 0xeUL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_TIM 0xfUL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_TX_CK 0x13UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_RX_CK 0x14UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_MP_TQM_RING 0x15UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_SQ_DB_SHADOW 0x16UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_RQ_DB_SHADOW 0x17UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_SRQ_DB_SHADOW 0x18UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_CQ_DB_SHADOW 0x19UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_TBL_SCOPE 0x1cUL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_XID_PARTITION 0x1dUL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_SRT_TRACE 0x1eUL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_SRT2_TRACE 0x1fUL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_CRT_TRACE 0x20UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_CRT2_TRACE 0x21UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_RIGP0_TRACE 0x22UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_L2_HWRM_TRACE 0x23UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_ROCE_HWRM_TRACE 0x24UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_TTX_PACING_TQM_RING 0x25UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_CA0_TRACE 0x26UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_CA1_TRACE 0x27UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_CA2_TRACE 0x28UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_RIGP1_TRACE 0x29UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_AFM_KONG_HWRM_TRACE 0x2aUL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_ERR_QPC_TRACE 0x2bUL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_INVALID 0xffffUL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_LAST FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_INVALID
+ u8 rsvd[6];
+};
+
+/* hwrm_func_backing_store_qcaps_v2_output (size:448b/56B) */
+struct hwrm_func_backing_store_qcaps_v2_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 type;
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_QP 0x0UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_SRQ 0x1UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_CQ 0x2UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_VNIC 0x3UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_STAT 0x4UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_SP_TQM_RING 0x5UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_FP_TQM_RING 0x6UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_MRAV 0xeUL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_TIM 0xfUL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_TX_CK 0x13UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_RX_CK 0x14UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_MP_TQM_RING 0x15UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_SQ_DB_SHADOW 0x16UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_RQ_DB_SHADOW 0x17UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_SRQ_DB_SHADOW 0x18UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_CQ_DB_SHADOW 0x19UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_TBL_SCOPE 0x1cUL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_XID_PARTITION 0x1dUL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_SRT_TRACE 0x1eUL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_SRT2_TRACE 0x1fUL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_CRT_TRACE 0x20UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_CRT2_TRACE 0x21UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_RIGP0_TRACE 0x22UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_L2_HWRM_TRACE 0x23UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_ROCE_HWRM_TRACE 0x24UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_TTX_PACING_TQM_RING 0x25UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_CA0_TRACE 0x26UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_CA1_TRACE 0x27UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_CA2_TRACE 0x28UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_RIGP1_TRACE 0x29UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_AFM_KONG_HWRM_TRACE 0x2aUL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_ERR_QPC_TRACE 0x2bUL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_INVALID 0xffffUL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_LAST FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_INVALID
+ __le16 entry_size;
+ __le32 flags;
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_FLAGS_ENABLE_CTX_KIND_INIT 0x1UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_FLAGS_TYPE_VALID 0x2UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_FLAGS_DRIVER_MANAGED_MEMORY 0x4UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_FLAGS_ROCE_QP_PSEUDO_STATIC_ALLOC 0x8UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_FLAGS_FW_DBG_TRACE 0x10UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_FLAGS_FW_BIN_DBG_TRACE 0x20UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_FLAGS_NEXT_BS_OFFSET 0x40UL
+ __le32 instance_bit_map;
+ u8 ctx_init_value;
+ u8 ctx_init_offset;
+ u8 entry_multiple;
+ u8 rsvd;
+ __le32 max_num_entries;
+ __le32 min_num_entries;
+ __le16 next_valid_type;
+ u8 subtype_valid_cnt;
+ u8 exact_cnt_bit_map;
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_EXACT_CNT_BIT_MAP_SPLIT_ENTRY_0_EXACT 0x1UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_EXACT_CNT_BIT_MAP_SPLIT_ENTRY_1_EXACT 0x2UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_EXACT_CNT_BIT_MAP_SPLIT_ENTRY_2_EXACT 0x4UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_EXACT_CNT_BIT_MAP_SPLIT_ENTRY_3_EXACT 0x8UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_EXACT_CNT_BIT_MAP_UNUSED_MASK 0xf0UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_EXACT_CNT_BIT_MAP_UNUSED_SFT 4
+ __le32 split_entry_0;
+ __le32 split_entry_1;
+ __le32 split_entry_2;
+ __le32 split_entry_3;
+ __le16 max_instance_count;
+ u8 rsvd3;
+ u8 valid;
+};
+
+/* hwrm_func_dbr_pacing_qcfg_input (size:128b/16B) */
+struct hwrm_func_dbr_pacing_qcfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+};
+
+/* hwrm_func_dbr_pacing_qcfg_output (size:512b/64B) */
+struct hwrm_func_dbr_pacing_qcfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 flags;
+ #define FUNC_DBR_PACING_QCFG_RESP_FLAGS_DBR_NQ_EVENT_ENABLED 0x1UL
+ u8 unused_0[7];
+ __le32 dbr_stat_db_fifo_reg;
+ #define FUNC_DBR_PACING_QCFG_RESP_DBR_STAT_DB_FIFO_REG_ADDR_SPACE_MASK 0x3UL
+ #define FUNC_DBR_PACING_QCFG_RESP_DBR_STAT_DB_FIFO_REG_ADDR_SPACE_SFT 0
+ #define FUNC_DBR_PACING_QCFG_RESP_DBR_STAT_DB_FIFO_REG_ADDR_SPACE_PCIE_CFG 0x0UL
+ #define FUNC_DBR_PACING_QCFG_RESP_DBR_STAT_DB_FIFO_REG_ADDR_SPACE_GRC 0x1UL
+ #define FUNC_DBR_PACING_QCFG_RESP_DBR_STAT_DB_FIFO_REG_ADDR_SPACE_BAR0 0x2UL
+ #define FUNC_DBR_PACING_QCFG_RESP_DBR_STAT_DB_FIFO_REG_ADDR_SPACE_BAR1 0x3UL
+ #define FUNC_DBR_PACING_QCFG_RESP_DBR_STAT_DB_FIFO_REG_ADDR_SPACE_LAST FUNC_DBR_PACING_QCFG_RESP_DBR_STAT_DB_FIFO_REG_ADDR_SPACE_BAR1
+ #define FUNC_DBR_PACING_QCFG_RESP_DBR_STAT_DB_FIFO_REG_ADDR_MASK 0xfffffffcUL
+ #define FUNC_DBR_PACING_QCFG_RESP_DBR_STAT_DB_FIFO_REG_ADDR_SFT 2
+ __le32 dbr_stat_db_fifo_reg_watermark_mask;
+ u8 dbr_stat_db_fifo_reg_watermark_shift;
+ u8 unused_1[3];
+ __le32 dbr_stat_db_fifo_reg_fifo_room_mask;
+ u8 dbr_stat_db_fifo_reg_fifo_room_shift;
+ u8 unused_2[3];
+ __le32 dbr_throttling_aeq_arm_reg;
+ #define FUNC_DBR_PACING_QCFG_RESP_DBR_THROTTLING_AEQ_ARM_REG_ADDR_SPACE_MASK 0x3UL
+ #define FUNC_DBR_PACING_QCFG_RESP_DBR_THROTTLING_AEQ_ARM_REG_ADDR_SPACE_SFT 0
+ #define FUNC_DBR_PACING_QCFG_RESP_DBR_THROTTLING_AEQ_ARM_REG_ADDR_SPACE_PCIE_CFG 0x0UL
+ #define FUNC_DBR_PACING_QCFG_RESP_DBR_THROTTLING_AEQ_ARM_REG_ADDR_SPACE_GRC 0x1UL
+ #define FUNC_DBR_PACING_QCFG_RESP_DBR_THROTTLING_AEQ_ARM_REG_ADDR_SPACE_BAR0 0x2UL
+ #define FUNC_DBR_PACING_QCFG_RESP_DBR_THROTTLING_AEQ_ARM_REG_ADDR_SPACE_BAR1 0x3UL
+ #define FUNC_DBR_PACING_QCFG_RESP_DBR_THROTTLING_AEQ_ARM_REG_ADDR_SPACE_LAST FUNC_DBR_PACING_QCFG_RESP_DBR_THROTTLING_AEQ_ARM_REG_ADDR_SPACE_BAR1
+ #define FUNC_DBR_PACING_QCFG_RESP_DBR_THROTTLING_AEQ_ARM_REG_ADDR_MASK 0xfffffffcUL
+ #define FUNC_DBR_PACING_QCFG_RESP_DBR_THROTTLING_AEQ_ARM_REG_ADDR_SFT 2
+ u8 dbr_throttling_aeq_arm_reg_val;
+ u8 unused_3[3];
+ __le32 dbr_stat_db_max_fifo_depth;
+ __le32 primary_nq_id;
+ __le32 pacing_threshold;
+ u8 unused_4[7];
+ u8 valid;
+};
+
+/* hwrm_func_drv_if_change_input (size:192b/24B) */
+struct hwrm_func_drv_if_change_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define FUNC_DRV_IF_CHANGE_REQ_FLAGS_UP 0x1UL
+ __le32 unused;
+};
+
+/* hwrm_func_drv_if_change_output (size:128b/16B) */
+struct hwrm_func_drv_if_change_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 flags;
+ #define FUNC_DRV_IF_CHANGE_RESP_FLAGS_RESC_CHANGE 0x1UL
+ #define FUNC_DRV_IF_CHANGE_RESP_FLAGS_HOT_FW_RESET_DONE 0x2UL
+ #define FUNC_DRV_IF_CHANGE_RESP_FLAGS_CAPS_CHANGE 0x4UL
+ u8 unused_0[3];
+ u8 valid;
+};
+
+/* hwrm_port_phy_cfg_input (size:512b/64B) */
+struct hwrm_port_phy_cfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define PORT_PHY_CFG_REQ_FLAGS_RESET_PHY 0x1UL
+ #define PORT_PHY_CFG_REQ_FLAGS_DEPRECATED 0x2UL
+ #define PORT_PHY_CFG_REQ_FLAGS_FORCE 0x4UL
+ #define PORT_PHY_CFG_REQ_FLAGS_RESTART_AUTONEG 0x8UL
+ #define PORT_PHY_CFG_REQ_FLAGS_EEE_ENABLE 0x10UL
+ #define PORT_PHY_CFG_REQ_FLAGS_EEE_DISABLE 0x20UL
+ #define PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_ENABLE 0x40UL
+ #define PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_DISABLE 0x80UL
+ #define PORT_PHY_CFG_REQ_FLAGS_FEC_AUTONEG_ENABLE 0x100UL
+ #define PORT_PHY_CFG_REQ_FLAGS_FEC_AUTONEG_DISABLE 0x200UL
+ #define PORT_PHY_CFG_REQ_FLAGS_FEC_CLAUSE74_ENABLE 0x400UL
+ #define PORT_PHY_CFG_REQ_FLAGS_FEC_CLAUSE74_DISABLE 0x800UL
+ #define PORT_PHY_CFG_REQ_FLAGS_FEC_CLAUSE91_ENABLE 0x1000UL
+ #define PORT_PHY_CFG_REQ_FLAGS_FEC_CLAUSE91_DISABLE 0x2000UL
+ #define PORT_PHY_CFG_REQ_FLAGS_FORCE_LINK_DWN 0x4000UL
+ #define PORT_PHY_CFG_REQ_FLAGS_FEC_RS544_1XN_ENABLE 0x8000UL
+ #define PORT_PHY_CFG_REQ_FLAGS_FEC_RS544_1XN_DISABLE 0x10000UL
+ #define PORT_PHY_CFG_REQ_FLAGS_FEC_RS544_IEEE_ENABLE 0x20000UL
+ #define PORT_PHY_CFG_REQ_FLAGS_FEC_RS544_IEEE_DISABLE 0x40000UL
+ #define PORT_PHY_CFG_REQ_FLAGS_FEC_RS272_1XN_ENABLE 0x80000UL
+ #define PORT_PHY_CFG_REQ_FLAGS_FEC_RS272_1XN_DISABLE 0x100000UL
+ #define PORT_PHY_CFG_REQ_FLAGS_FEC_RS272_IEEE_ENABLE 0x200000UL
+ #define PORT_PHY_CFG_REQ_FLAGS_FEC_RS272_IEEE_DISABLE 0x400000UL
+ #define PORT_PHY_CFG_REQ_FLAGS_LINK_TRAINING_ENABLE 0x800000UL
+ #define PORT_PHY_CFG_REQ_FLAGS_LINK_TRAINING_DISABLE 0x1000000UL
+ #define PORT_PHY_CFG_REQ_FLAGS_PRECODING_ENABLE 0x2000000UL
+ #define PORT_PHY_CFG_REQ_FLAGS_PRECODING_DISABLE 0x4000000UL
+ __le32 enables;
+ #define PORT_PHY_CFG_REQ_ENABLES_AUTO_MODE 0x1UL
+ #define PORT_PHY_CFG_REQ_ENABLES_AUTO_DUPLEX 0x2UL
+ #define PORT_PHY_CFG_REQ_ENABLES_AUTO_PAUSE 0x4UL
+ #define PORT_PHY_CFG_REQ_ENABLES_AUTO_LINK_SPEED 0x8UL
+ #define PORT_PHY_CFG_REQ_ENABLES_AUTO_LINK_SPEED_MASK 0x10UL
+ #define PORT_PHY_CFG_REQ_ENABLES_WIRESPEED 0x20UL
+ #define PORT_PHY_CFG_REQ_ENABLES_LPBK 0x40UL
+ #define PORT_PHY_CFG_REQ_ENABLES_PREEMPHASIS 0x80UL
+ #define PORT_PHY_CFG_REQ_ENABLES_FORCE_PAUSE 0x100UL
+ #define PORT_PHY_CFG_REQ_ENABLES_EEE_LINK_SPEED_MASK 0x200UL
+ #define PORT_PHY_CFG_REQ_ENABLES_TX_LPI_TIMER 0x400UL
+ #define PORT_PHY_CFG_REQ_ENABLES_FORCE_PAM4_LINK_SPEED 0x800UL
+ #define PORT_PHY_CFG_REQ_ENABLES_AUTO_PAM4_LINK_SPEED_MASK 0x1000UL
+ #define PORT_PHY_CFG_REQ_ENABLES_FORCE_LINK_SPEEDS2 0x2000UL
+ #define PORT_PHY_CFG_REQ_ENABLES_AUTO_LINK_SPEEDS2_MASK 0x4000UL
+ __le16 port_id;
+ __le16 force_link_speed;
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_100MB 0x1UL
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_1GB 0xaUL
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_2GB 0x14UL
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_2_5GB 0x19UL
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_10GB 0x64UL
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_20GB 0xc8UL
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_25GB 0xfaUL
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_40GB 0x190UL
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_50GB 0x1f4UL
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_100GB 0x3e8UL
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_10MB 0xffffUL
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_LAST PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_10MB
+ u8 auto_mode;
+ #define PORT_PHY_CFG_REQ_AUTO_MODE_NONE 0x0UL
+ #define PORT_PHY_CFG_REQ_AUTO_MODE_ALL_SPEEDS 0x1UL
+ #define PORT_PHY_CFG_REQ_AUTO_MODE_ONE_SPEED 0x2UL
+ #define PORT_PHY_CFG_REQ_AUTO_MODE_ONE_OR_BELOW 0x3UL
+ #define PORT_PHY_CFG_REQ_AUTO_MODE_SPEED_MASK 0x4UL
+ #define PORT_PHY_CFG_REQ_AUTO_MODE_LAST PORT_PHY_CFG_REQ_AUTO_MODE_SPEED_MASK
+ u8 auto_duplex;
+ #define PORT_PHY_CFG_REQ_AUTO_DUPLEX_HALF 0x0UL
+ #define PORT_PHY_CFG_REQ_AUTO_DUPLEX_FULL 0x1UL
+ #define PORT_PHY_CFG_REQ_AUTO_DUPLEX_BOTH 0x2UL
+ #define PORT_PHY_CFG_REQ_AUTO_DUPLEX_LAST PORT_PHY_CFG_REQ_AUTO_DUPLEX_BOTH
+ u8 auto_pause;
+ #define PORT_PHY_CFG_REQ_AUTO_PAUSE_TX 0x1UL
+ #define PORT_PHY_CFG_REQ_AUTO_PAUSE_RX 0x2UL
+ #define PORT_PHY_CFG_REQ_AUTO_PAUSE_AUTONEG_PAUSE 0x4UL
+ u8 mgmt_flag;
+ #define PORT_PHY_CFG_REQ_MGMT_FLAG_LINK_RELEASE 0x1UL
+ #define PORT_PHY_CFG_REQ_MGMT_FLAG_MGMT_VALID 0x80UL
+ __le16 auto_link_speed;
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_100MB 0x1UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_1GB 0xaUL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_2GB 0x14UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_2_5GB 0x19UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_10GB 0x64UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_20GB 0xc8UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_25GB 0xfaUL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_40GB 0x190UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_50GB 0x1f4UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_100GB 0x3e8UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_10MB 0xffffUL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_LAST PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_10MB
+ __le16 auto_link_speed_mask;
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_100MBHD 0x1UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_100MB 0x2UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_1GBHD 0x4UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_1GB 0x8UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_2GB 0x10UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_2_5GB 0x20UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_10GB 0x40UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_20GB 0x80UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_25GB 0x100UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_40GB 0x200UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_50GB 0x400UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_100GB 0x800UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_10MBHD 0x1000UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_10MB 0x2000UL
+ u8 wirespeed;
+ #define PORT_PHY_CFG_REQ_WIRESPEED_OFF 0x0UL
+ #define PORT_PHY_CFG_REQ_WIRESPEED_ON 0x1UL
+ #define PORT_PHY_CFG_REQ_WIRESPEED_LAST PORT_PHY_CFG_REQ_WIRESPEED_ON
+ u8 lpbk;
+ #define PORT_PHY_CFG_REQ_LPBK_NONE 0x0UL
+ #define PORT_PHY_CFG_REQ_LPBK_LOCAL 0x1UL
+ #define PORT_PHY_CFG_REQ_LPBK_REMOTE 0x2UL
+ #define PORT_PHY_CFG_REQ_LPBK_EXTERNAL 0x3UL
+ #define PORT_PHY_CFG_REQ_LPBK_LAST PORT_PHY_CFG_REQ_LPBK_EXTERNAL
+ u8 force_pause;
+ #define PORT_PHY_CFG_REQ_FORCE_PAUSE_TX 0x1UL
+ #define PORT_PHY_CFG_REQ_FORCE_PAUSE_RX 0x2UL
+ u8 unused_1;
+ __le32 preemphasis;
+ __le16 eee_link_speed_mask;
+ #define PORT_PHY_CFG_REQ_EEE_LINK_SPEED_MASK_RSVD1 0x1UL
+ #define PORT_PHY_CFG_REQ_EEE_LINK_SPEED_MASK_100MB 0x2UL
+ #define PORT_PHY_CFG_REQ_EEE_LINK_SPEED_MASK_RSVD2 0x4UL
+ #define PORT_PHY_CFG_REQ_EEE_LINK_SPEED_MASK_1GB 0x8UL
+ #define PORT_PHY_CFG_REQ_EEE_LINK_SPEED_MASK_RSVD3 0x10UL
+ #define PORT_PHY_CFG_REQ_EEE_LINK_SPEED_MASK_RSVD4 0x20UL
+ #define PORT_PHY_CFG_REQ_EEE_LINK_SPEED_MASK_10GB 0x40UL
+ __le16 force_pam4_link_speed;
+ #define PORT_PHY_CFG_REQ_FORCE_PAM4_LINK_SPEED_50GB 0x1f4UL
+ #define PORT_PHY_CFG_REQ_FORCE_PAM4_LINK_SPEED_100GB 0x3e8UL
+ #define PORT_PHY_CFG_REQ_FORCE_PAM4_LINK_SPEED_200GB 0x7d0UL
+ #define PORT_PHY_CFG_REQ_FORCE_PAM4_LINK_SPEED_LAST PORT_PHY_CFG_REQ_FORCE_PAM4_LINK_SPEED_200GB
+ __le32 tx_lpi_timer;
+ #define PORT_PHY_CFG_REQ_TX_LPI_TIMER_MASK 0xffffffUL
+ #define PORT_PHY_CFG_REQ_TX_LPI_TIMER_SFT 0
+ __le16 auto_link_pam4_speed_mask;
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_PAM4_SPEED_MASK_50G 0x1UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_PAM4_SPEED_MASK_100G 0x2UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_PAM4_SPEED_MASK_200G 0x4UL
+ __le16 force_link_speeds2;
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_1GB 0xaUL
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_10GB 0x64UL
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_25GB 0xfaUL
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_40GB 0x190UL
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_50GB 0x1f4UL
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_100GB 0x3e8UL
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_50GB_PAM4_56 0x1f5UL
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_100GB_PAM4_56 0x3e9UL
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_200GB_PAM4_56 0x7d1UL
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_400GB_PAM4_56 0xfa1UL
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_100GB_PAM4_112 0x3eaUL
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_200GB_PAM4_112 0x7d2UL
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_400GB_PAM4_112 0xfa2UL
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_800GB_PAM4_112 0x1f42UL
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_LAST PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_800GB_PAM4_112
+ __le16 auto_link_speeds2_mask;
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEEDS2_MASK_1GB 0x1UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEEDS2_MASK_10GB 0x2UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEEDS2_MASK_25GB 0x4UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEEDS2_MASK_40GB 0x8UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEEDS2_MASK_50GB 0x10UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEEDS2_MASK_100GB 0x20UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEEDS2_MASK_50GB_PAM4_56 0x40UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEEDS2_MASK_100GB_PAM4_56 0x80UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEEDS2_MASK_200GB_PAM4_56 0x100UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEEDS2_MASK_400GB_PAM4_56 0x200UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEEDS2_MASK_100GB_PAM4_112 0x400UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEEDS2_MASK_200GB_PAM4_112 0x800UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEEDS2_MASK_400GB_PAM4_112 0x1000UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEEDS2_MASK_800GB_PAM4_112 0x2000UL
+ u8 unused_2[6];
+};
+
+/* hwrm_port_phy_cfg_output (size:128b/16B) */
+struct hwrm_port_phy_cfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_port_phy_cfg_cmd_err (size:64b/8B) */
+struct hwrm_port_phy_cfg_cmd_err {
+ u8 code;
+ #define PORT_PHY_CFG_CMD_ERR_CODE_UNKNOWN 0x0UL
+ #define PORT_PHY_CFG_CMD_ERR_CODE_ILLEGAL_SPEED 0x1UL
+ #define PORT_PHY_CFG_CMD_ERR_CODE_RETRY 0x2UL
+ #define PORT_PHY_CFG_CMD_ERR_CODE_LAST PORT_PHY_CFG_CMD_ERR_CODE_RETRY
+ u8 unused_0[7];
+};
+
+/* hwrm_port_phy_qcfg_input (size:192b/24B) */
+struct hwrm_port_phy_qcfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 port_id;
+ u8 unused_0[6];
+};
+
+/* hwrm_port_phy_qcfg_output (size:832b/104B) */
+struct hwrm_port_phy_qcfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 link;
+ #define PORT_PHY_QCFG_RESP_LINK_NO_LINK 0x0UL
+ #define PORT_PHY_QCFG_RESP_LINK_SIGNAL 0x1UL
+ #define PORT_PHY_QCFG_RESP_LINK_LINK 0x2UL
+ #define PORT_PHY_QCFG_RESP_LINK_LAST PORT_PHY_QCFG_RESP_LINK_LINK
+ u8 active_fec_signal_mode;
+ #define PORT_PHY_QCFG_RESP_SIGNAL_MODE_MASK 0xfUL
+ #define PORT_PHY_QCFG_RESP_SIGNAL_MODE_SFT 0
+ #define PORT_PHY_QCFG_RESP_SIGNAL_MODE_NRZ 0x0UL
+ #define PORT_PHY_QCFG_RESP_SIGNAL_MODE_PAM4 0x1UL
+ #define PORT_PHY_QCFG_RESP_SIGNAL_MODE_PAM4_112 0x2UL
+ #define PORT_PHY_QCFG_RESP_SIGNAL_MODE_LAST PORT_PHY_QCFG_RESP_SIGNAL_MODE_PAM4_112
+ #define PORT_PHY_QCFG_RESP_ACTIVE_FEC_MASK 0xf0UL
+ #define PORT_PHY_QCFG_RESP_ACTIVE_FEC_SFT 4
+ #define PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_NONE_ACTIVE (0x0UL << 4)
+ #define PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_CLAUSE74_ACTIVE (0x1UL << 4)
+ #define PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_CLAUSE91_ACTIVE (0x2UL << 4)
+ #define PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS544_1XN_ACTIVE (0x3UL << 4)
+ #define PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS544_IEEE_ACTIVE (0x4UL << 4)
+ #define PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS272_1XN_ACTIVE (0x5UL << 4)
+ #define PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS272_IEEE_ACTIVE (0x6UL << 4)
+ #define PORT_PHY_QCFG_RESP_ACTIVE_FEC_LAST PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS272_IEEE_ACTIVE
+ __le16 link_speed;
+ #define PORT_PHY_QCFG_RESP_LINK_SPEED_100MB 0x1UL
+ #define PORT_PHY_QCFG_RESP_LINK_SPEED_1GB 0xaUL
+ #define PORT_PHY_QCFG_RESP_LINK_SPEED_2GB 0x14UL
+ #define PORT_PHY_QCFG_RESP_LINK_SPEED_2_5GB 0x19UL
+ #define PORT_PHY_QCFG_RESP_LINK_SPEED_10GB 0x64UL
+ #define PORT_PHY_QCFG_RESP_LINK_SPEED_20GB 0xc8UL
+ #define PORT_PHY_QCFG_RESP_LINK_SPEED_25GB 0xfaUL
+ #define PORT_PHY_QCFG_RESP_LINK_SPEED_40GB 0x190UL
+ #define PORT_PHY_QCFG_RESP_LINK_SPEED_50GB 0x1f4UL
+ #define PORT_PHY_QCFG_RESP_LINK_SPEED_100GB 0x3e8UL
+ #define PORT_PHY_QCFG_RESP_LINK_SPEED_200GB 0x7d0UL
+ #define PORT_PHY_QCFG_RESP_LINK_SPEED_400GB 0xfa0UL
+ #define PORT_PHY_QCFG_RESP_LINK_SPEED_800GB 0x1f40UL
+ #define PORT_PHY_QCFG_RESP_LINK_SPEED_10MB 0xffffUL
+ #define PORT_PHY_QCFG_RESP_LINK_SPEED_LAST PORT_PHY_QCFG_RESP_LINK_SPEED_10MB
+ u8 duplex_cfg;
+ #define PORT_PHY_QCFG_RESP_DUPLEX_CFG_HALF 0x0UL
+ #define PORT_PHY_QCFG_RESP_DUPLEX_CFG_FULL 0x1UL
+ #define PORT_PHY_QCFG_RESP_DUPLEX_CFG_LAST PORT_PHY_QCFG_RESP_DUPLEX_CFG_FULL
+ u8 pause;
+ #define PORT_PHY_QCFG_RESP_PAUSE_TX 0x1UL
+ #define PORT_PHY_QCFG_RESP_PAUSE_RX 0x2UL
+ __le16 support_speeds;
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_100MBHD 0x1UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_100MB 0x2UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_1GBHD 0x4UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_1GB 0x8UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_2GB 0x10UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_2_5GB 0x20UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_10GB 0x40UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_20GB 0x80UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_25GB 0x100UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_40GB 0x200UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_50GB 0x400UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_100GB 0x800UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_10MBHD 0x1000UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_10MB 0x2000UL
+ __le16 force_link_speed;
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_100MB 0x1UL
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_1GB 0xaUL
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_2GB 0x14UL
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_2_5GB 0x19UL
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_10GB 0x64UL
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_20GB 0xc8UL
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_25GB 0xfaUL
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_40GB 0x190UL
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_50GB 0x1f4UL
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_100GB 0x3e8UL
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_10MB 0xffffUL
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_LAST PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_10MB
+ u8 auto_mode;
+ #define PORT_PHY_QCFG_RESP_AUTO_MODE_NONE 0x0UL
+ #define PORT_PHY_QCFG_RESP_AUTO_MODE_ALL_SPEEDS 0x1UL
+ #define PORT_PHY_QCFG_RESP_AUTO_MODE_ONE_SPEED 0x2UL
+ #define PORT_PHY_QCFG_RESP_AUTO_MODE_ONE_OR_BELOW 0x3UL
+ #define PORT_PHY_QCFG_RESP_AUTO_MODE_SPEED_MASK 0x4UL
+ #define PORT_PHY_QCFG_RESP_AUTO_MODE_LAST PORT_PHY_QCFG_RESP_AUTO_MODE_SPEED_MASK
+ u8 auto_pause;
+ #define PORT_PHY_QCFG_RESP_AUTO_PAUSE_TX 0x1UL
+ #define PORT_PHY_QCFG_RESP_AUTO_PAUSE_RX 0x2UL
+ #define PORT_PHY_QCFG_RESP_AUTO_PAUSE_AUTONEG_PAUSE 0x4UL
+ __le16 auto_link_speed;
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_100MB 0x1UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_1GB 0xaUL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_2GB 0x14UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_2_5GB 0x19UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_10GB 0x64UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_20GB 0xc8UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_25GB 0xfaUL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_40GB 0x190UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_50GB 0x1f4UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_100GB 0x3e8UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_10MB 0xffffUL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_LAST PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_10MB
+ __le16 auto_link_speed_mask;
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_100MBHD 0x1UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_100MB 0x2UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_1GBHD 0x4UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_1GB 0x8UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_2GB 0x10UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_2_5GB 0x20UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_10GB 0x40UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_20GB 0x80UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_25GB 0x100UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_40GB 0x200UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_50GB 0x400UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_100GB 0x800UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_10MBHD 0x1000UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_10MB 0x2000UL
+ u8 wirespeed;
+ #define PORT_PHY_QCFG_RESP_WIRESPEED_OFF 0x0UL
+ #define PORT_PHY_QCFG_RESP_WIRESPEED_ON 0x1UL
+ #define PORT_PHY_QCFG_RESP_WIRESPEED_LAST PORT_PHY_QCFG_RESP_WIRESPEED_ON
+ u8 lpbk;
+ #define PORT_PHY_QCFG_RESP_LPBK_NONE 0x0UL
+ #define PORT_PHY_QCFG_RESP_LPBK_LOCAL 0x1UL
+ #define PORT_PHY_QCFG_RESP_LPBK_REMOTE 0x2UL
+ #define PORT_PHY_QCFG_RESP_LPBK_EXTERNAL 0x3UL
+ #define PORT_PHY_QCFG_RESP_LPBK_LAST PORT_PHY_QCFG_RESP_LPBK_EXTERNAL
+ u8 force_pause;
+ #define PORT_PHY_QCFG_RESP_FORCE_PAUSE_TX 0x1UL
+ #define PORT_PHY_QCFG_RESP_FORCE_PAUSE_RX 0x2UL
+ u8 module_status;
+ #define PORT_PHY_QCFG_RESP_MODULE_STATUS_NONE 0x0UL
+ #define PORT_PHY_QCFG_RESP_MODULE_STATUS_DISABLETX 0x1UL
+ #define PORT_PHY_QCFG_RESP_MODULE_STATUS_WARNINGMSG 0x2UL
+ #define PORT_PHY_QCFG_RESP_MODULE_STATUS_PWRDOWN 0x3UL
+ #define PORT_PHY_QCFG_RESP_MODULE_STATUS_NOTINSERTED 0x4UL
+ #define PORT_PHY_QCFG_RESP_MODULE_STATUS_CURRENTFAULT 0x5UL
+ #define PORT_PHY_QCFG_RESP_MODULE_STATUS_OVERHEATED 0x6UL
+ #define PORT_PHY_QCFG_RESP_MODULE_STATUS_NOTAPPLICABLE 0xffUL
+ #define PORT_PHY_QCFG_RESP_MODULE_STATUS_LAST PORT_PHY_QCFG_RESP_MODULE_STATUS_NOTAPPLICABLE
+ __le32 preemphasis;
+ u8 phy_maj;
+ u8 phy_min;
+ u8 phy_bld;
+ u8 phy_type;
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_UNKNOWN 0x0UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASECR 0x1UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASEKR4 0x2UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASELR 0x3UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASESR 0x4UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASEKR2 0x5UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASEKX 0x6UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASEKR 0x7UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASET 0x8UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASETE 0x9UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_SGMIIEXTPHY 0xaUL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_25G_BASECR_CA_L 0xbUL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_25G_BASECR_CA_S 0xcUL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_25G_BASECR_CA_N 0xdUL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_25G_BASESR 0xeUL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASECR4 0xfUL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASESR4 0x10UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASELR4 0x11UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASEER4 0x12UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASESR10 0x13UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_40G_BASECR4 0x14UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_40G_BASESR4 0x15UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_40G_BASELR4 0x16UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_40G_BASEER4 0x17UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_40G_ACTIVE_CABLE 0x18UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_1G_BASET 0x19UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_1G_BASESX 0x1aUL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_1G_BASECX 0x1bUL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_200G_BASECR4 0x1cUL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_200G_BASESR4 0x1dUL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_200G_BASELR4 0x1eUL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_200G_BASEER4 0x1fUL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_50G_BASECR 0x20UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_50G_BASESR 0x21UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_50G_BASELR 0x22UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_50G_BASEER 0x23UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASECR2 0x24UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASESR2 0x25UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASELR2 0x26UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASEER2 0x27UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASECR 0x28UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASESR 0x29UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASELR 0x2aUL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASEER 0x2bUL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_200G_BASECR2 0x2cUL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_200G_BASESR2 0x2dUL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_200G_BASELR2 0x2eUL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_200G_BASEER2 0x2fUL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_400G_BASECR8 0x30UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_400G_BASESR8 0x31UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_400G_BASELR8 0x32UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_400G_BASEER8 0x33UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_400G_BASECR4 0x34UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_400G_BASESR4 0x35UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_400G_BASELR4 0x36UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_400G_BASEER4 0x37UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_800G_BASECR8 0x38UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_800G_BASESR8 0x39UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_800G_BASELR8 0x3aUL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_800G_BASEER8 0x3bUL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_800G_BASEFR8 0x3cUL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_800G_BASEDR8 0x3dUL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_LAST PORT_PHY_QCFG_RESP_PHY_TYPE_800G_BASEDR8
+ u8 media_type;
+ #define PORT_PHY_QCFG_RESP_MEDIA_TYPE_UNKNOWN 0x0UL
+ #define PORT_PHY_QCFG_RESP_MEDIA_TYPE_TP 0x1UL
+ #define PORT_PHY_QCFG_RESP_MEDIA_TYPE_DAC 0x2UL
+ #define PORT_PHY_QCFG_RESP_MEDIA_TYPE_FIBRE 0x3UL
+ #define PORT_PHY_QCFG_RESP_MEDIA_TYPE_BACKPLANE 0x4UL
+ #define PORT_PHY_QCFG_RESP_MEDIA_TYPE_LAST PORT_PHY_QCFG_RESP_MEDIA_TYPE_BACKPLANE
+ u8 xcvr_pkg_type;
+ #define PORT_PHY_QCFG_RESP_XCVR_PKG_TYPE_XCVR_INTERNAL 0x1UL
+ #define PORT_PHY_QCFG_RESP_XCVR_PKG_TYPE_XCVR_EXTERNAL 0x2UL
+ #define PORT_PHY_QCFG_RESP_XCVR_PKG_TYPE_LAST PORT_PHY_QCFG_RESP_XCVR_PKG_TYPE_XCVR_EXTERNAL
+ u8 eee_config_phy_addr;
+ #define PORT_PHY_QCFG_RESP_PHY_ADDR_MASK 0x1fUL
+ #define PORT_PHY_QCFG_RESP_PHY_ADDR_SFT 0
+ #define PORT_PHY_QCFG_RESP_EEE_CONFIG_MASK 0xe0UL
+ #define PORT_PHY_QCFG_RESP_EEE_CONFIG_SFT 5
+ #define PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_ENABLED 0x20UL
+ #define PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_ACTIVE 0x40UL
+ #define PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_TX_LPI 0x80UL
+ u8 parallel_detect;
+ #define PORT_PHY_QCFG_RESP_PARALLEL_DETECT 0x1UL
+ __le16 link_partner_adv_speeds;
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_100MBHD 0x1UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_100MB 0x2UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_1GBHD 0x4UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_1GB 0x8UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_2GB 0x10UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_2_5GB 0x20UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_10GB 0x40UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_20GB 0x80UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_25GB 0x100UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_40GB 0x200UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_50GB 0x400UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_100GB 0x800UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_10MBHD 0x1000UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_10MB 0x2000UL
+ u8 link_partner_adv_auto_mode;
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_NONE 0x0UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_ALL_SPEEDS 0x1UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_ONE_SPEED 0x2UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_ONE_OR_BELOW 0x3UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_SPEED_MASK 0x4UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_LAST PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_SPEED_MASK
+ u8 link_partner_adv_pause;
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_PAUSE_TX 0x1UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_PAUSE_RX 0x2UL
+ __le16 adv_eee_link_speed_mask;
+ #define PORT_PHY_QCFG_RESP_ADV_EEE_LINK_SPEED_MASK_RSVD1 0x1UL
+ #define PORT_PHY_QCFG_RESP_ADV_EEE_LINK_SPEED_MASK_100MB 0x2UL
+ #define PORT_PHY_QCFG_RESP_ADV_EEE_LINK_SPEED_MASK_RSVD2 0x4UL
+ #define PORT_PHY_QCFG_RESP_ADV_EEE_LINK_SPEED_MASK_1GB 0x8UL
+ #define PORT_PHY_QCFG_RESP_ADV_EEE_LINK_SPEED_MASK_RSVD3 0x10UL
+ #define PORT_PHY_QCFG_RESP_ADV_EEE_LINK_SPEED_MASK_RSVD4 0x20UL
+ #define PORT_PHY_QCFG_RESP_ADV_EEE_LINK_SPEED_MASK_10GB 0x40UL
+ __le16 link_partner_adv_eee_link_speed_mask;
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_RSVD1 0x1UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_100MB 0x2UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_RSVD2 0x4UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_1GB 0x8UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_RSVD3 0x10UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_RSVD4 0x20UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_10GB 0x40UL
+ __le32 xcvr_identifier_type_tx_lpi_timer;
+ #define PORT_PHY_QCFG_RESP_TX_LPI_TIMER_MASK 0xffffffUL
+ #define PORT_PHY_QCFG_RESP_TX_LPI_TIMER_SFT 0
+ #define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_MASK 0xff000000UL
+ #define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_SFT 24
+ #define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_UNKNOWN (0x0UL << 24)
+ #define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_SFP (0x3UL << 24)
+ #define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_QSFP (0xcUL << 24)
+ #define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_QSFPPLUS (0xdUL << 24)
+ #define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_QSFP28 (0x11UL << 24)
+ #define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_QSFPDD (0x18UL << 24)
+ #define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_QSFP112 (0x1eUL << 24)
+ #define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_SFPDD (0x1fUL << 24)
+ #define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_CSFP (0x20UL << 24)
+ #define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_LAST PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_CSFP
+ __le16 fec_cfg;
+ #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_NONE_SUPPORTED 0x1UL
+ #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_AUTONEG_SUPPORTED 0x2UL
+ #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_AUTONEG_ENABLED 0x4UL
+ #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_CLAUSE74_SUPPORTED 0x8UL
+ #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_CLAUSE74_ENABLED 0x10UL
+ #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_CLAUSE91_SUPPORTED 0x20UL
+ #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_CLAUSE91_ENABLED 0x40UL
+ #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_RS544_1XN_SUPPORTED 0x80UL
+ #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_RS544_1XN_ENABLED 0x100UL
+ #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_RS544_IEEE_SUPPORTED 0x200UL
+ #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_RS544_IEEE_ENABLED 0x400UL
+ #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_RS272_1XN_SUPPORTED 0x800UL
+ #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_RS272_1XN_ENABLED 0x1000UL
+ #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_RS272_IEEE_SUPPORTED 0x2000UL
+ #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_RS272_IEEE_ENABLED 0x4000UL
+ u8 duplex_state;
+ #define PORT_PHY_QCFG_RESP_DUPLEX_STATE_HALF 0x0UL
+ #define PORT_PHY_QCFG_RESP_DUPLEX_STATE_FULL 0x1UL
+ #define PORT_PHY_QCFG_RESP_DUPLEX_STATE_LAST PORT_PHY_QCFG_RESP_DUPLEX_STATE_FULL
+ u8 option_flags;
+ #define PORT_PHY_QCFG_RESP_OPTION_FLAGS_MEDIA_AUTO_DETECT 0x1UL
+ #define PORT_PHY_QCFG_RESP_OPTION_FLAGS_SIGNAL_MODE_KNOWN 0x2UL
+ #define PORT_PHY_QCFG_RESP_OPTION_FLAGS_SPEEDS2_SUPPORTED 0x4UL
+ #define PORT_PHY_QCFG_RESP_OPTION_FLAGS_LINK_TRAINING 0x8UL
+ #define PORT_PHY_QCFG_RESP_OPTION_FLAGS_PRECODING 0x10UL
+ char phy_vendor_name[16];
+ char phy_vendor_partnumber[16];
+ __le16 support_pam4_speeds;
+ #define PORT_PHY_QCFG_RESP_SUPPORT_PAM4_SPEEDS_50G 0x1UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_PAM4_SPEEDS_100G 0x2UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_PAM4_SPEEDS_200G 0x4UL
+ __le16 force_pam4_link_speed;
+ #define PORT_PHY_QCFG_RESP_FORCE_PAM4_LINK_SPEED_50GB 0x1f4UL
+ #define PORT_PHY_QCFG_RESP_FORCE_PAM4_LINK_SPEED_100GB 0x3e8UL
+ #define PORT_PHY_QCFG_RESP_FORCE_PAM4_LINK_SPEED_200GB 0x7d0UL
+ #define PORT_PHY_QCFG_RESP_FORCE_PAM4_LINK_SPEED_LAST PORT_PHY_QCFG_RESP_FORCE_PAM4_LINK_SPEED_200GB
+ __le16 auto_pam4_link_speed_mask;
+ #define PORT_PHY_QCFG_RESP_AUTO_PAM4_LINK_SPEED_MASK_50G 0x1UL
+ #define PORT_PHY_QCFG_RESP_AUTO_PAM4_LINK_SPEED_MASK_100G 0x2UL
+ #define PORT_PHY_QCFG_RESP_AUTO_PAM4_LINK_SPEED_MASK_200G 0x4UL
+ u8 link_partner_pam4_adv_speeds;
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_PAM4_ADV_SPEEDS_50GB 0x1UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_PAM4_ADV_SPEEDS_100GB 0x2UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_PAM4_ADV_SPEEDS_200GB 0x4UL
+ u8 link_down_reason;
+ #define PORT_PHY_QCFG_RESP_LINK_DOWN_REASON_RF 0x1UL
+ #define PORT_PHY_QCFG_RESP_LINK_DOWN_REASON_OTP_SPEED_VIOLATION 0x2UL
+ #define PORT_PHY_QCFG_RESP_LINK_DOWN_REASON_CABLE_REMOVED 0x4UL
+ #define PORT_PHY_QCFG_RESP_LINK_DOWN_REASON_MODULE_FAULT 0x8UL
+ #define PORT_PHY_QCFG_RESP_LINK_DOWN_REASON_BMC_REQUEST 0x10UL
+ #define PORT_PHY_QCFG_RESP_LINK_DOWN_REASON_TX_LASER_DISABLED 0x20UL
+ __le16 support_speeds2;
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS2_1GB 0x1UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS2_10GB 0x2UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS2_25GB 0x4UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS2_40GB 0x8UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS2_50GB 0x10UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS2_100GB 0x20UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS2_50GB_PAM4_56 0x40UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS2_100GB_PAM4_56 0x80UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS2_200GB_PAM4_56 0x100UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS2_400GB_PAM4_56 0x200UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS2_100GB_PAM4_112 0x400UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS2_200GB_PAM4_112 0x800UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS2_400GB_PAM4_112 0x1000UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS2_800GB_PAM4_112 0x2000UL
+ __le16 force_link_speeds2;
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEEDS2_1GB 0xaUL
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEEDS2_10GB 0x64UL
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEEDS2_25GB 0xfaUL
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEEDS2_40GB 0x190UL
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEEDS2_50GB 0x1f4UL
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEEDS2_100GB 0x3e8UL
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEEDS2_50GB_PAM4_56 0x1f5UL
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEEDS2_100GB_PAM4_56 0x3e9UL
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEEDS2_200GB_PAM4_56 0x7d1UL
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEEDS2_400GB_PAM4_56 0xfa1UL
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEEDS2_100GB_PAM4_112 0x3eaUL
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEEDS2_200GB_PAM4_112 0x7d2UL
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEEDS2_400GB_PAM4_112 0xfa2UL
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEEDS2_800GB_PAM4_112 0x1f42UL
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEEDS2_LAST PORT_PHY_QCFG_RESP_FORCE_LINK_SPEEDS2_800GB_PAM4_112
+ __le16 auto_link_speeds2;
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEEDS2_1GB 0x1UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEEDS2_10GB 0x2UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEEDS2_25GB 0x4UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEEDS2_40GB 0x8UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEEDS2_50GB 0x10UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEEDS2_100GB 0x20UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEEDS2_50GB_PAM4_56 0x40UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEEDS2_100GB_PAM4_56 0x80UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEEDS2_200GB_PAM4_56 0x100UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEEDS2_400GB_PAM4_56 0x200UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEEDS2_100GB_PAM4_112 0x400UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEEDS2_200GB_PAM4_112 0x800UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEEDS2_400GB_PAM4_112 0x1000UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEEDS2_800GB_PAM4_112 0x2000UL
+ u8 active_lanes;
+ u8 valid;
+};
+
+/* hwrm_port_mac_cfg_input (size:448b/56B) */
+struct hwrm_port_mac_cfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define PORT_MAC_CFG_REQ_FLAGS_MATCH_LINK 0x1UL
+ #define PORT_MAC_CFG_REQ_FLAGS_VLAN_PRI2COS_ENABLE 0x2UL
+ #define PORT_MAC_CFG_REQ_FLAGS_TUNNEL_PRI2COS_ENABLE 0x4UL
+ #define PORT_MAC_CFG_REQ_FLAGS_IP_DSCP2COS_ENABLE 0x8UL
+ #define PORT_MAC_CFG_REQ_FLAGS_PTP_RX_TS_CAPTURE_ENABLE 0x10UL
+ #define PORT_MAC_CFG_REQ_FLAGS_PTP_RX_TS_CAPTURE_DISABLE 0x20UL
+ #define PORT_MAC_CFG_REQ_FLAGS_PTP_TX_TS_CAPTURE_ENABLE 0x40UL
+ #define PORT_MAC_CFG_REQ_FLAGS_PTP_TX_TS_CAPTURE_DISABLE 0x80UL
+ #define PORT_MAC_CFG_REQ_FLAGS_OOB_WOL_ENABLE 0x100UL
+ #define PORT_MAC_CFG_REQ_FLAGS_OOB_WOL_DISABLE 0x200UL
+ #define PORT_MAC_CFG_REQ_FLAGS_VLAN_PRI2COS_DISABLE 0x400UL
+ #define PORT_MAC_CFG_REQ_FLAGS_TUNNEL_PRI2COS_DISABLE 0x800UL
+ #define PORT_MAC_CFG_REQ_FLAGS_IP_DSCP2COS_DISABLE 0x1000UL
+ #define PORT_MAC_CFG_REQ_FLAGS_PTP_ONE_STEP_TX_TS 0x2000UL
+ #define PORT_MAC_CFG_REQ_FLAGS_ALL_RX_TS_CAPTURE_ENABLE 0x4000UL
+ #define PORT_MAC_CFG_REQ_FLAGS_ALL_RX_TS_CAPTURE_DISABLE 0x8000UL
+ __le32 enables;
+ #define PORT_MAC_CFG_REQ_ENABLES_IPG 0x1UL
+ #define PORT_MAC_CFG_REQ_ENABLES_LPBK 0x2UL
+ #define PORT_MAC_CFG_REQ_ENABLES_VLAN_PRI2COS_MAP_PRI 0x4UL
+ #define PORT_MAC_CFG_REQ_ENABLES_TUNNEL_PRI2COS_MAP_PRI 0x10UL
+ #define PORT_MAC_CFG_REQ_ENABLES_DSCP2COS_MAP_PRI 0x20UL
+ #define PORT_MAC_CFG_REQ_ENABLES_RX_TS_CAPTURE_PTP_MSG_TYPE 0x40UL
+ #define PORT_MAC_CFG_REQ_ENABLES_TX_TS_CAPTURE_PTP_MSG_TYPE 0x80UL
+ #define PORT_MAC_CFG_REQ_ENABLES_COS_FIELD_CFG 0x100UL
+ #define PORT_MAC_CFG_REQ_ENABLES_PTP_FREQ_ADJ_PPB 0x200UL
+ #define PORT_MAC_CFG_REQ_ENABLES_PTP_ADJ_PHASE 0x400UL
+ #define PORT_MAC_CFG_REQ_ENABLES_PTP_LOAD_CONTROL 0x800UL
+ __le16 port_id;
+ u8 ipg;
+ u8 lpbk;
+ #define PORT_MAC_CFG_REQ_LPBK_NONE 0x0UL
+ #define PORT_MAC_CFG_REQ_LPBK_LOCAL 0x1UL
+ #define PORT_MAC_CFG_REQ_LPBK_REMOTE 0x2UL
+ #define PORT_MAC_CFG_REQ_LPBK_LAST PORT_MAC_CFG_REQ_LPBK_REMOTE
+ u8 vlan_pri2cos_map_pri;
+ u8 reserved1;
+ u8 tunnel_pri2cos_map_pri;
+ u8 dscp2pri_map_pri;
+ __le16 rx_ts_capture_ptp_msg_type;
+ __le16 tx_ts_capture_ptp_msg_type;
+ u8 cos_field_cfg;
+ #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_RSVD1 0x1UL
+ #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_MASK 0x6UL
+ #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_SFT 1
+ #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_INNERMOST (0x0UL << 1)
+ #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_OUTER (0x1UL << 1)
+ #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_OUTERMOST (0x2UL << 1)
+ #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_UNSPECIFIED (0x3UL << 1)
+ #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_LAST PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_UNSPECIFIED
+ #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_MASK 0x18UL
+ #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_SFT 3
+ #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_INNERMOST (0x0UL << 3)
+ #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_OUTER (0x1UL << 3)
+ #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_OUTERMOST (0x2UL << 3)
+ #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_UNSPECIFIED (0x3UL << 3)
+ #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_LAST PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_UNSPECIFIED
+ #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_DEFAULT_COS_MASK 0xe0UL
+ #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_DEFAULT_COS_SFT 5
+ u8 unused_0[3];
+ __le32 ptp_freq_adj_ppb;
+ u8 unused_1[3];
+ u8 ptp_load_control;
+ #define PORT_MAC_CFG_REQ_PTP_LOAD_CONTROL_NONE 0x0UL
+ #define PORT_MAC_CFG_REQ_PTP_LOAD_CONTROL_IMMEDIATE 0x1UL
+ #define PORT_MAC_CFG_REQ_PTP_LOAD_CONTROL_PPS_EVENT 0x2UL
+ #define PORT_MAC_CFG_REQ_PTP_LOAD_CONTROL_LAST PORT_MAC_CFG_REQ_PTP_LOAD_CONTROL_PPS_EVENT
+ __le64 ptp_adj_phase;
+};
+
+/* hwrm_port_mac_cfg_output (size:128b/16B) */
+struct hwrm_port_mac_cfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 mru;
+ __le16 mtu;
+ u8 ipg;
+ u8 lpbk;
+ #define PORT_MAC_CFG_RESP_LPBK_NONE 0x0UL
+ #define PORT_MAC_CFG_RESP_LPBK_LOCAL 0x1UL
+ #define PORT_MAC_CFG_RESP_LPBK_REMOTE 0x2UL
+ #define PORT_MAC_CFG_RESP_LPBK_LAST PORT_MAC_CFG_RESP_LPBK_REMOTE
+ u8 unused_0;
+ u8 valid;
+};
+
+/* hwrm_port_mac_ptp_qcfg_input (size:192b/24B) */
+struct hwrm_port_mac_ptp_qcfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 port_id;
+ u8 unused_0[6];
+};
+
+/* hwrm_port_mac_ptp_qcfg_output (size:704b/88B) */
+struct hwrm_port_mac_ptp_qcfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 flags;
+ #define PORT_MAC_PTP_QCFG_RESP_FLAGS_DIRECT_ACCESS 0x1UL
+ #define PORT_MAC_PTP_QCFG_RESP_FLAGS_ONE_STEP_TX_TS 0x4UL
+ #define PORT_MAC_PTP_QCFG_RESP_FLAGS_HWRM_ACCESS 0x8UL
+ #define PORT_MAC_PTP_QCFG_RESP_FLAGS_PARTIAL_DIRECT_ACCESS_REF_CLOCK 0x10UL
+ #define PORT_MAC_PTP_QCFG_RESP_FLAGS_RTC_CONFIGURED 0x20UL
+ #define PORT_MAC_PTP_QCFG_RESP_FLAGS_64B_PHC_TIME 0x40UL
+ u8 unused_0[3];
+ __le32 rx_ts_reg_off_lower;
+ __le32 rx_ts_reg_off_upper;
+ __le32 rx_ts_reg_off_seq_id;
+ __le32 rx_ts_reg_off_src_id_0;
+ __le32 rx_ts_reg_off_src_id_1;
+ __le32 rx_ts_reg_off_src_id_2;
+ __le32 rx_ts_reg_off_domain_id;
+ __le32 rx_ts_reg_off_fifo;
+ __le32 rx_ts_reg_off_fifo_adv;
+ __le32 rx_ts_reg_off_granularity;
+ __le32 tx_ts_reg_off_lower;
+ __le32 tx_ts_reg_off_upper;
+ __le32 tx_ts_reg_off_seq_id;
+ __le32 tx_ts_reg_off_fifo;
+ __le32 tx_ts_reg_off_granularity;
+ __le32 ts_ref_clock_reg_lower;
+ __le32 ts_ref_clock_reg_upper;
+ u8 unused_1[7];
+ u8 valid;
+};
+
+/* tx_port_stats (size:3264b/408B) */
+struct tx_port_stats {
+ __le64 tx_64b_frames;
+ __le64 tx_65b_127b_frames;
+ __le64 tx_128b_255b_frames;
+ __le64 tx_256b_511b_frames;
+ __le64 tx_512b_1023b_frames;
+ __le64 tx_1024b_1518b_frames;
+ __le64 tx_good_vlan_frames;
+ __le64 tx_1519b_2047b_frames;
+ __le64 tx_2048b_4095b_frames;
+ __le64 tx_4096b_9216b_frames;
+ __le64 tx_9217b_16383b_frames;
+ __le64 tx_good_frames;
+ __le64 tx_total_frames;
+ __le64 tx_ucast_frames;
+ __le64 tx_mcast_frames;
+ __le64 tx_bcast_frames;
+ __le64 tx_pause_frames;
+ __le64 tx_pfc_frames;
+ __le64 tx_jabber_frames;
+ __le64 tx_fcs_err_frames;
+ __le64 tx_control_frames;
+ __le64 tx_oversz_frames;
+ __le64 tx_single_dfrl_frames;
+ __le64 tx_multi_dfrl_frames;
+ __le64 tx_single_coll_frames;
+ __le64 tx_multi_coll_frames;
+ __le64 tx_late_coll_frames;
+ __le64 tx_excessive_coll_frames;
+ __le64 tx_frag_frames;
+ __le64 tx_err;
+ __le64 tx_tagged_frames;
+ __le64 tx_dbl_tagged_frames;
+ __le64 tx_runt_frames;
+ __le64 tx_fifo_underruns;
+ __le64 tx_pfc_ena_frames_pri0;
+ __le64 tx_pfc_ena_frames_pri1;
+ __le64 tx_pfc_ena_frames_pri2;
+ __le64 tx_pfc_ena_frames_pri3;
+ __le64 tx_pfc_ena_frames_pri4;
+ __le64 tx_pfc_ena_frames_pri5;
+ __le64 tx_pfc_ena_frames_pri6;
+ __le64 tx_pfc_ena_frames_pri7;
+ __le64 tx_eee_lpi_events;
+ __le64 tx_eee_lpi_duration;
+ __le64 tx_llfc_logical_msgs;
+ __le64 tx_hcfc_msgs;
+ __le64 tx_total_collisions;
+ __le64 tx_bytes;
+ __le64 tx_xthol_frames;
+ __le64 tx_stat_discard;
+ __le64 tx_stat_error;
+};
+
+/* rx_port_stats (size:4224b/528B) */
+struct rx_port_stats {
+ __le64 rx_64b_frames;
+ __le64 rx_65b_127b_frames;
+ __le64 rx_128b_255b_frames;
+ __le64 rx_256b_511b_frames;
+ __le64 rx_512b_1023b_frames;
+ __le64 rx_1024b_1518b_frames;
+ __le64 rx_good_vlan_frames;
+ __le64 rx_1519b_2047b_frames;
+ __le64 rx_2048b_4095b_frames;
+ __le64 rx_4096b_9216b_frames;
+ __le64 rx_9217b_16383b_frames;
+ __le64 rx_total_frames;
+ __le64 rx_ucast_frames;
+ __le64 rx_mcast_frames;
+ __le64 rx_bcast_frames;
+ __le64 rx_fcs_err_frames;
+ __le64 rx_ctrl_frames;
+ __le64 rx_pause_frames;
+ __le64 rx_pfc_frames;
+ __le64 rx_unsupported_opcode_frames;
+ __le64 rx_unsupported_da_pausepfc_frames;
+ __le64 rx_wrong_sa_frames;
+ __le64 rx_align_err_frames;
+ __le64 rx_oor_len_frames;
+ __le64 rx_code_err_frames;
+ __le64 rx_false_carrier_frames;
+ __le64 rx_ovrsz_frames;
+ __le64 rx_jbr_frames;
+ __le64 rx_mtu_err_frames;
+ __le64 rx_match_crc_frames;
+ __le64 rx_promiscuous_frames;
+ __le64 rx_tagged_frames;
+ __le64 rx_double_tagged_frames;
+ __le64 rx_trunc_frames;
+ __le64 rx_good_frames;
+ __le64 rx_pfc_xon2xoff_frames_pri0;
+ __le64 rx_pfc_xon2xoff_frames_pri1;
+ __le64 rx_pfc_xon2xoff_frames_pri2;
+ __le64 rx_pfc_xon2xoff_frames_pri3;
+ __le64 rx_pfc_xon2xoff_frames_pri4;
+ __le64 rx_pfc_xon2xoff_frames_pri5;
+ __le64 rx_pfc_xon2xoff_frames_pri6;
+ __le64 rx_pfc_xon2xoff_frames_pri7;
+ __le64 rx_pfc_ena_frames_pri0;
+ __le64 rx_pfc_ena_frames_pri1;
+ __le64 rx_pfc_ena_frames_pri2;
+ __le64 rx_pfc_ena_frames_pri3;
+ __le64 rx_pfc_ena_frames_pri4;
+ __le64 rx_pfc_ena_frames_pri5;
+ __le64 rx_pfc_ena_frames_pri6;
+ __le64 rx_pfc_ena_frames_pri7;
+ __le64 rx_sch_crc_err_frames;
+ __le64 rx_undrsz_frames;
+ __le64 rx_frag_frames;
+ __le64 rx_eee_lpi_events;
+ __le64 rx_eee_lpi_duration;
+ __le64 rx_llfc_physical_msgs;
+ __le64 rx_llfc_logical_msgs;
+ __le64 rx_llfc_msgs_with_crc_err;
+ __le64 rx_hcfc_msgs;
+ __le64 rx_hcfc_msgs_with_crc_err;
+ __le64 rx_bytes;
+ __le64 rx_runt_bytes;
+ __le64 rx_runt_frames;
+ __le64 rx_stat_discard;
+ __le64 rx_stat_err;
+};
+
+/* hwrm_port_qstats_input (size:320b/40B) */
+struct hwrm_port_qstats_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 port_id;
+ u8 flags;
+ #define PORT_QSTATS_REQ_FLAGS_COUNTER_MASK 0x1UL
+ u8 unused_0[5];
+ __le64 tx_stat_host_addr;
+ __le64 rx_stat_host_addr;
+};
+
+/* hwrm_port_qstats_output (size:128b/16B) */
+struct hwrm_port_qstats_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 tx_stat_size;
+ __le16 rx_stat_size;
+ u8 flags;
+ #define PORT_QSTATS_RESP_FLAGS_CLEARED 0x1UL
+ u8 unused_0[2];
+ u8 valid;
+};
+
+/* tx_port_stats_ext (size:2048b/256B) */
+struct tx_port_stats_ext {
+ __le64 tx_bytes_cos0;
+ __le64 tx_bytes_cos1;
+ __le64 tx_bytes_cos2;
+ __le64 tx_bytes_cos3;
+ __le64 tx_bytes_cos4;
+ __le64 tx_bytes_cos5;
+ __le64 tx_bytes_cos6;
+ __le64 tx_bytes_cos7;
+ __le64 tx_packets_cos0;
+ __le64 tx_packets_cos1;
+ __le64 tx_packets_cos2;
+ __le64 tx_packets_cos3;
+ __le64 tx_packets_cos4;
+ __le64 tx_packets_cos5;
+ __le64 tx_packets_cos6;
+ __le64 tx_packets_cos7;
+ __le64 pfc_pri0_tx_duration_us;
+ __le64 pfc_pri0_tx_transitions;
+ __le64 pfc_pri1_tx_duration_us;
+ __le64 pfc_pri1_tx_transitions;
+ __le64 pfc_pri2_tx_duration_us;
+ __le64 pfc_pri2_tx_transitions;
+ __le64 pfc_pri3_tx_duration_us;
+ __le64 pfc_pri3_tx_transitions;
+ __le64 pfc_pri4_tx_duration_us;
+ __le64 pfc_pri4_tx_transitions;
+ __le64 pfc_pri5_tx_duration_us;
+ __le64 pfc_pri5_tx_transitions;
+ __le64 pfc_pri6_tx_duration_us;
+ __le64 pfc_pri6_tx_transitions;
+ __le64 pfc_pri7_tx_duration_us;
+ __le64 pfc_pri7_tx_transitions;
+};
+
+/* rx_port_stats_ext (size:3904b/488B) */
+struct rx_port_stats_ext {
+ __le64 link_down_events;
+ __le64 continuous_pause_events;
+ __le64 resume_pause_events;
+ __le64 continuous_roce_pause_events;
+ __le64 resume_roce_pause_events;
+ __le64 rx_bytes_cos0;
+ __le64 rx_bytes_cos1;
+ __le64 rx_bytes_cos2;
+ __le64 rx_bytes_cos3;
+ __le64 rx_bytes_cos4;
+ __le64 rx_bytes_cos5;
+ __le64 rx_bytes_cos6;
+ __le64 rx_bytes_cos7;
+ __le64 rx_packets_cos0;
+ __le64 rx_packets_cos1;
+ __le64 rx_packets_cos2;
+ __le64 rx_packets_cos3;
+ __le64 rx_packets_cos4;
+ __le64 rx_packets_cos5;
+ __le64 rx_packets_cos6;
+ __le64 rx_packets_cos7;
+ __le64 pfc_pri0_rx_duration_us;
+ __le64 pfc_pri0_rx_transitions;
+ __le64 pfc_pri1_rx_duration_us;
+ __le64 pfc_pri1_rx_transitions;
+ __le64 pfc_pri2_rx_duration_us;
+ __le64 pfc_pri2_rx_transitions;
+ __le64 pfc_pri3_rx_duration_us;
+ __le64 pfc_pri3_rx_transitions;
+ __le64 pfc_pri4_rx_duration_us;
+ __le64 pfc_pri4_rx_transitions;
+ __le64 pfc_pri5_rx_duration_us;
+ __le64 pfc_pri5_rx_transitions;
+ __le64 pfc_pri6_rx_duration_us;
+ __le64 pfc_pri6_rx_transitions;
+ __le64 pfc_pri7_rx_duration_us;
+ __le64 pfc_pri7_rx_transitions;
+ __le64 rx_bits;
+ __le64 rx_buffer_passed_threshold;
+ __le64 rx_pcs_symbol_err;
+ __le64 rx_corrected_bits;
+ __le64 rx_discard_bytes_cos0;
+ __le64 rx_discard_bytes_cos1;
+ __le64 rx_discard_bytes_cos2;
+ __le64 rx_discard_bytes_cos3;
+ __le64 rx_discard_bytes_cos4;
+ __le64 rx_discard_bytes_cos5;
+ __le64 rx_discard_bytes_cos6;
+ __le64 rx_discard_bytes_cos7;
+ __le64 rx_discard_packets_cos0;
+ __le64 rx_discard_packets_cos1;
+ __le64 rx_discard_packets_cos2;
+ __le64 rx_discard_packets_cos3;
+ __le64 rx_discard_packets_cos4;
+ __le64 rx_discard_packets_cos5;
+ __le64 rx_discard_packets_cos6;
+ __le64 rx_discard_packets_cos7;
+ __le64 rx_fec_corrected_blocks;
+ __le64 rx_fec_uncorrectable_blocks;
+ __le64 rx_filter_miss;
+ __le64 rx_fec_symbol_err;
+};
+
+/* hwrm_port_qstats_ext_input (size:320b/40B) */
+struct hwrm_port_qstats_ext_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 port_id;
+ __le16 tx_stat_size;
+ __le16 rx_stat_size;
+ u8 flags;
+ #define PORT_QSTATS_EXT_REQ_FLAGS_COUNTER_MASK 0x1UL
+ u8 unused_0;
+ __le64 tx_stat_host_addr;
+ __le64 rx_stat_host_addr;
+};
+
+/* hwrm_port_qstats_ext_output (size:128b/16B) */
+struct hwrm_port_qstats_ext_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 tx_stat_size;
+ __le16 rx_stat_size;
+ __le16 total_active_cos_queues;
+ u8 flags;
+ #define PORT_QSTATS_EXT_RESP_FLAGS_CLEAR_ROCE_COUNTERS_SUPPORTED 0x1UL
+ #define PORT_QSTATS_EXT_RESP_FLAGS_CLEARED 0x2UL
+ u8 valid;
+};
+
+/* hwrm_port_lpbk_qstats_input (size:256b/32B) */
+struct hwrm_port_lpbk_qstats_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 lpbk_stat_size;
+ u8 flags;
+ #define PORT_LPBK_QSTATS_REQ_FLAGS_COUNTER_MASK 0x1UL
+ u8 unused_0[5];
+ __le64 lpbk_stat_host_addr;
+};
+
+/* hwrm_port_lpbk_qstats_output (size:128b/16B) */
+struct hwrm_port_lpbk_qstats_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 lpbk_stat_size;
+ u8 unused_0[5];
+ u8 valid;
+};
+
+/* port_lpbk_stats (size:640b/80B) */
+struct port_lpbk_stats {
+ __le64 lpbk_ucast_frames;
+ __le64 lpbk_mcast_frames;
+ __le64 lpbk_bcast_frames;
+ __le64 lpbk_ucast_bytes;
+ __le64 lpbk_mcast_bytes;
+ __le64 lpbk_bcast_bytes;
+ __le64 lpbk_tx_discards;
+ __le64 lpbk_tx_errors;
+ __le64 lpbk_rx_discards;
+ __le64 lpbk_rx_errors;
+};
+
+/* hwrm_port_ecn_qstats_input (size:256b/32B) */
+struct hwrm_port_ecn_qstats_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 port_id;
+ __le16 ecn_stat_buf_size;
+ u8 flags;
+ #define PORT_ECN_QSTATS_REQ_FLAGS_COUNTER_MASK 0x1UL
+ u8 unused_0[3];
+ __le64 ecn_stat_host_addr;
+};
+
+/* hwrm_port_ecn_qstats_output (size:128b/16B) */
+struct hwrm_port_ecn_qstats_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 ecn_stat_buf_size;
+ u8 mark_en;
+ u8 unused_0[4];
+ u8 valid;
+};
+
+/* port_stats_ecn (size:512b/64B) */
+struct port_stats_ecn {
+ __le64 mark_cnt_cos0;
+ __le64 mark_cnt_cos1;
+ __le64 mark_cnt_cos2;
+ __le64 mark_cnt_cos3;
+ __le64 mark_cnt_cos4;
+ __le64 mark_cnt_cos5;
+ __le64 mark_cnt_cos6;
+ __le64 mark_cnt_cos7;
+};
+
+/* hwrm_port_clr_stats_input (size:192b/24B) */
+struct hwrm_port_clr_stats_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 port_id;
+ u8 flags;
+ #define PORT_CLR_STATS_REQ_FLAGS_ROCE_COUNTERS 0x1UL
+ u8 unused_0[5];
+};
+
+/* hwrm_port_clr_stats_output (size:128b/16B) */
+struct hwrm_port_clr_stats_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_port_lpbk_clr_stats_input (size:192b/24B) */
+struct hwrm_port_lpbk_clr_stats_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 port_id;
+ u8 unused_0[6];
+};
+
+/* hwrm_port_lpbk_clr_stats_output (size:128b/16B) */
+struct hwrm_port_lpbk_clr_stats_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_port_ts_query_input (size:320b/40B) */
+struct hwrm_port_ts_query_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define PORT_TS_QUERY_REQ_FLAGS_PATH 0x1UL
+ #define PORT_TS_QUERY_REQ_FLAGS_PATH_TX 0x0UL
+ #define PORT_TS_QUERY_REQ_FLAGS_PATH_RX 0x1UL
+ #define PORT_TS_QUERY_REQ_FLAGS_PATH_LAST PORT_TS_QUERY_REQ_FLAGS_PATH_RX
+ #define PORT_TS_QUERY_REQ_FLAGS_CURRENT_TIME 0x2UL
+ __le16 port_id;
+ u8 unused_0[2];
+ __le16 enables;
+ #define PORT_TS_QUERY_REQ_ENABLES_TS_REQ_TIMEOUT 0x1UL
+ #define PORT_TS_QUERY_REQ_ENABLES_PTP_SEQ_ID 0x2UL
+ #define PORT_TS_QUERY_REQ_ENABLES_PTP_HDR_OFFSET 0x4UL
+ __le16 ts_req_timeout;
+ __le32 ptp_seq_id;
+ __le16 ptp_hdr_offset;
+ u8 unused_1[6];
+};
+
+/* hwrm_port_ts_query_output (size:192b/24B) */
+struct hwrm_port_ts_query_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le64 ptp_msg_ts;
+ __le16 ptp_msg_seqid;
+ u8 unused_0[5];
+ u8 valid;
+};
+
+/* hwrm_port_phy_qcaps_input (size:192b/24B) */
+struct hwrm_port_phy_qcaps_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 port_id;
+ u8 unused_0[6];
+};
+
+/* hwrm_port_phy_qcaps_output (size:320b/40B) */
+struct hwrm_port_phy_qcaps_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 flags;
+ #define PORT_PHY_QCAPS_RESP_FLAGS_EEE_SUPPORTED 0x1UL
+ #define PORT_PHY_QCAPS_RESP_FLAGS_EXTERNAL_LPBK_SUPPORTED 0x2UL
+ #define PORT_PHY_QCAPS_RESP_FLAGS_AUTONEG_LPBK_SUPPORTED 0x4UL
+ #define PORT_PHY_QCAPS_RESP_FLAGS_SHARED_PHY_CFG_SUPPORTED 0x8UL
+ #define PORT_PHY_QCAPS_RESP_FLAGS_CUMULATIVE_COUNTERS_ON_RESET 0x10UL
+ #define PORT_PHY_QCAPS_RESP_FLAGS_LOCAL_LPBK_NOT_SUPPORTED 0x20UL
+ #define PORT_PHY_QCAPS_RESP_FLAGS_FW_MANAGED_LINK_DOWN 0x40UL
+ #define PORT_PHY_QCAPS_RESP_FLAGS_NO_FCS 0x80UL
+ u8 port_cnt;
+ #define PORT_PHY_QCAPS_RESP_PORT_CNT_UNKNOWN 0x0UL
+ #define PORT_PHY_QCAPS_RESP_PORT_CNT_1 0x1UL
+ #define PORT_PHY_QCAPS_RESP_PORT_CNT_2 0x2UL
+ #define PORT_PHY_QCAPS_RESP_PORT_CNT_3 0x3UL
+ #define PORT_PHY_QCAPS_RESP_PORT_CNT_4 0x4UL
+ #define PORT_PHY_QCAPS_RESP_PORT_CNT_12 0xcUL
+ #define PORT_PHY_QCAPS_RESP_PORT_CNT_LAST PORT_PHY_QCAPS_RESP_PORT_CNT_12
+ __le16 supported_speeds_force_mode;
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_100MBHD 0x1UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_100MB 0x2UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_1GBHD 0x4UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_1GB 0x8UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_2GB 0x10UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_2_5GB 0x20UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_10GB 0x40UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_20GB 0x80UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_25GB 0x100UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_40GB 0x200UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_50GB 0x400UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_100GB 0x800UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_10MBHD 0x1000UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_10MB 0x2000UL
+ __le16 supported_speeds_auto_mode;
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_100MBHD 0x1UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_100MB 0x2UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_1GBHD 0x4UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_1GB 0x8UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_2GB 0x10UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_2_5GB 0x20UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_10GB 0x40UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_20GB 0x80UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_25GB 0x100UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_40GB 0x200UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_50GB 0x400UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_100GB 0x800UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_10MBHD 0x1000UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_10MB 0x2000UL
+ __le16 supported_speeds_eee_mode;
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_EEE_MODE_RSVD1 0x1UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_EEE_MODE_100MB 0x2UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_EEE_MODE_RSVD2 0x4UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_EEE_MODE_1GB 0x8UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_EEE_MODE_RSVD3 0x10UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_EEE_MODE_RSVD4 0x20UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_EEE_MODE_10GB 0x40UL
+ __le32 tx_lpi_timer_low;
+ #define PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_LOW_MASK 0xffffffUL
+ #define PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_LOW_SFT 0
+ #define PORT_PHY_QCAPS_RESP_RSVD2_MASK 0xff000000UL
+ #define PORT_PHY_QCAPS_RESP_RSVD2_SFT 24
+ __le32 valid_tx_lpi_timer_high;
+ #define PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_HIGH_MASK 0xffffffUL
+ #define PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_HIGH_SFT 0
+ #define PORT_PHY_QCAPS_RESP_RSVD_MASK 0xff000000UL
+ #define PORT_PHY_QCAPS_RESP_RSVD_SFT 24
+ __le16 supported_pam4_speeds_auto_mode;
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_PAM4_SPEEDS_AUTO_MODE_50G 0x1UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_PAM4_SPEEDS_AUTO_MODE_100G 0x2UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_PAM4_SPEEDS_AUTO_MODE_200G 0x4UL
+ __le16 supported_pam4_speeds_force_mode;
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_PAM4_SPEEDS_FORCE_MODE_50G 0x1UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_PAM4_SPEEDS_FORCE_MODE_100G 0x2UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_PAM4_SPEEDS_FORCE_MODE_200G 0x4UL
+ __le16 flags2;
+ #define PORT_PHY_QCAPS_RESP_FLAGS2_PAUSE_UNSUPPORTED 0x1UL
+ #define PORT_PHY_QCAPS_RESP_FLAGS2_PFC_UNSUPPORTED 0x2UL
+ #define PORT_PHY_QCAPS_RESP_FLAGS2_BANK_ADDR_SUPPORTED 0x4UL
+ #define PORT_PHY_QCAPS_RESP_FLAGS2_SPEEDS2_SUPPORTED 0x8UL
+ #define PORT_PHY_QCAPS_RESP_FLAGS2_REMOTE_LPBK_UNSUPPORTED 0x10UL
+ u8 internal_port_cnt;
+ u8 unused_0;
+ __le16 supported_speeds2_force_mode;
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_FORCE_MODE_1GB 0x1UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_FORCE_MODE_10GB 0x2UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_FORCE_MODE_25GB 0x4UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_FORCE_MODE_40GB 0x8UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_FORCE_MODE_50GB 0x10UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_FORCE_MODE_100GB 0x20UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_FORCE_MODE_50GB_PAM4_56 0x40UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_FORCE_MODE_100GB_PAM4_56 0x80UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_FORCE_MODE_200GB_PAM4_56 0x100UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_FORCE_MODE_400GB_PAM4_56 0x200UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_FORCE_MODE_100GB_PAM4_112 0x400UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_FORCE_MODE_200GB_PAM4_112 0x800UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_FORCE_MODE_400GB_PAM4_112 0x1000UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_FORCE_MODE_800GB_PAM4_112 0x2000UL
+ __le16 supported_speeds2_auto_mode;
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_AUTO_MODE_1GB 0x1UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_AUTO_MODE_10GB 0x2UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_AUTO_MODE_25GB 0x4UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_AUTO_MODE_40GB 0x8UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_AUTO_MODE_50GB 0x10UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_AUTO_MODE_100GB 0x20UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_AUTO_MODE_50GB_PAM4_56 0x40UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_AUTO_MODE_100GB_PAM4_56 0x80UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_AUTO_MODE_200GB_PAM4_56 0x100UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_AUTO_MODE_400GB_PAM4_56 0x200UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_AUTO_MODE_100GB_PAM4_112 0x400UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_AUTO_MODE_200GB_PAM4_112 0x800UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_AUTO_MODE_400GB_PAM4_112 0x1000UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_AUTO_MODE_800GB_PAM4_112 0x2000UL
+ u8 unused_1[3];
+ u8 valid;
+};
+
+/* hwrm_port_phy_i2c_write_input (size:832b/104B) */
+struct hwrm_port_phy_i2c_write_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ __le32 enables;
+ #define PORT_PHY_I2C_WRITE_REQ_ENABLES_PAGE_OFFSET 0x1UL
+ #define PORT_PHY_I2C_WRITE_REQ_ENABLES_BANK_NUMBER 0x2UL
+ __le16 port_id;
+ u8 i2c_slave_addr;
+ u8 bank_number;
+ __le16 page_number;
+ __le16 page_offset;
+ u8 data_length;
+ u8 unused_1[7];
+ __le32 data[16];
+};
+
+/* hwrm_port_phy_i2c_write_output (size:128b/16B) */
+struct hwrm_port_phy_i2c_write_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_port_phy_i2c_read_input (size:320b/40B) */
+struct hwrm_port_phy_i2c_read_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ __le32 enables;
+ #define PORT_PHY_I2C_READ_REQ_ENABLES_PAGE_OFFSET 0x1UL
+ #define PORT_PHY_I2C_READ_REQ_ENABLES_BANK_NUMBER 0x2UL
+ __le16 port_id;
+ u8 i2c_slave_addr;
+ u8 bank_number;
+ __le16 page_number;
+ __le16 page_offset;
+ u8 data_length;
+ u8 unused_1[7];
+};
+
+/* hwrm_port_phy_i2c_read_output (size:640b/80B) */
+struct hwrm_port_phy_i2c_read_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 data[16];
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_port_phy_mdio_write_input (size:320b/40B) */
+struct hwrm_port_phy_mdio_write_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 unused_0[2];
+ __le16 port_id;
+ u8 phy_addr;
+ u8 dev_addr;
+ __le16 reg_addr;
+ __le16 reg_data;
+ u8 cl45_mdio;
+ u8 unused_1[7];
+};
+
+/* hwrm_port_phy_mdio_write_output (size:128b/16B) */
+struct hwrm_port_phy_mdio_write_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_port_phy_mdio_read_input (size:256b/32B) */
+struct hwrm_port_phy_mdio_read_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 unused_0[2];
+ __le16 port_id;
+ u8 phy_addr;
+ u8 dev_addr;
+ __le16 reg_addr;
+ u8 cl45_mdio;
+ u8 unused_1;
+};
+
+/* hwrm_port_phy_mdio_read_output (size:128b/16B) */
+struct hwrm_port_phy_mdio_read_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 reg_data;
+ u8 unused_0[5];
+ u8 valid;
+};
+
+/* hwrm_port_led_cfg_input (size:512b/64B) */
+struct hwrm_port_led_cfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 enables;
+ #define PORT_LED_CFG_REQ_ENABLES_LED0_ID 0x1UL
+ #define PORT_LED_CFG_REQ_ENABLES_LED0_STATE 0x2UL
+ #define PORT_LED_CFG_REQ_ENABLES_LED0_COLOR 0x4UL
+ #define PORT_LED_CFG_REQ_ENABLES_LED0_BLINK_ON 0x8UL
+ #define PORT_LED_CFG_REQ_ENABLES_LED0_BLINK_OFF 0x10UL
+ #define PORT_LED_CFG_REQ_ENABLES_LED0_GROUP_ID 0x20UL
+ #define PORT_LED_CFG_REQ_ENABLES_LED1_ID 0x40UL
+ #define PORT_LED_CFG_REQ_ENABLES_LED1_STATE 0x80UL
+ #define PORT_LED_CFG_REQ_ENABLES_LED1_COLOR 0x100UL
+ #define PORT_LED_CFG_REQ_ENABLES_LED1_BLINK_ON 0x200UL
+ #define PORT_LED_CFG_REQ_ENABLES_LED1_BLINK_OFF 0x400UL
+ #define PORT_LED_CFG_REQ_ENABLES_LED1_GROUP_ID 0x800UL
+ #define PORT_LED_CFG_REQ_ENABLES_LED2_ID 0x1000UL
+ #define PORT_LED_CFG_REQ_ENABLES_LED2_STATE 0x2000UL
+ #define PORT_LED_CFG_REQ_ENABLES_LED2_COLOR 0x4000UL
+ #define PORT_LED_CFG_REQ_ENABLES_LED2_BLINK_ON 0x8000UL
+ #define PORT_LED_CFG_REQ_ENABLES_LED2_BLINK_OFF 0x10000UL
+ #define PORT_LED_CFG_REQ_ENABLES_LED2_GROUP_ID 0x20000UL
+ #define PORT_LED_CFG_REQ_ENABLES_LED3_ID 0x40000UL
+ #define PORT_LED_CFG_REQ_ENABLES_LED3_STATE 0x80000UL
+ #define PORT_LED_CFG_REQ_ENABLES_LED3_COLOR 0x100000UL
+ #define PORT_LED_CFG_REQ_ENABLES_LED3_BLINK_ON 0x200000UL
+ #define PORT_LED_CFG_REQ_ENABLES_LED3_BLINK_OFF 0x400000UL
+ #define PORT_LED_CFG_REQ_ENABLES_LED3_GROUP_ID 0x800000UL
+ __le16 port_id;
+ u8 num_leds;
+ u8 rsvd;
+ u8 led0_id;
+ u8 led0_state;
+ #define PORT_LED_CFG_REQ_LED0_STATE_DEFAULT 0x0UL
+ #define PORT_LED_CFG_REQ_LED0_STATE_OFF 0x1UL
+ #define PORT_LED_CFG_REQ_LED0_STATE_ON 0x2UL
+ #define PORT_LED_CFG_REQ_LED0_STATE_BLINK 0x3UL
+ #define PORT_LED_CFG_REQ_LED0_STATE_BLINKALT 0x4UL
+ #define PORT_LED_CFG_REQ_LED0_STATE_LAST PORT_LED_CFG_REQ_LED0_STATE_BLINKALT
+ u8 led0_color;
+ #define PORT_LED_CFG_REQ_LED0_COLOR_DEFAULT 0x0UL
+ #define PORT_LED_CFG_REQ_LED0_COLOR_AMBER 0x1UL
+ #define PORT_LED_CFG_REQ_LED0_COLOR_GREEN 0x2UL
+ #define PORT_LED_CFG_REQ_LED0_COLOR_GREENAMBER 0x3UL
+ #define PORT_LED_CFG_REQ_LED0_COLOR_LAST PORT_LED_CFG_REQ_LED0_COLOR_GREENAMBER
+ u8 unused_0;
+ __le16 led0_blink_on;
+ __le16 led0_blink_off;
+ u8 led0_group_id;
+ u8 rsvd0;
+ u8 led1_id;
+ u8 led1_state;
+ #define PORT_LED_CFG_REQ_LED1_STATE_DEFAULT 0x0UL
+ #define PORT_LED_CFG_REQ_LED1_STATE_OFF 0x1UL
+ #define PORT_LED_CFG_REQ_LED1_STATE_ON 0x2UL
+ #define PORT_LED_CFG_REQ_LED1_STATE_BLINK 0x3UL
+ #define PORT_LED_CFG_REQ_LED1_STATE_BLINKALT 0x4UL
+ #define PORT_LED_CFG_REQ_LED1_STATE_LAST PORT_LED_CFG_REQ_LED1_STATE_BLINKALT
+ u8 led1_color;
+ #define PORT_LED_CFG_REQ_LED1_COLOR_DEFAULT 0x0UL
+ #define PORT_LED_CFG_REQ_LED1_COLOR_AMBER 0x1UL
+ #define PORT_LED_CFG_REQ_LED1_COLOR_GREEN 0x2UL
+ #define PORT_LED_CFG_REQ_LED1_COLOR_GREENAMBER 0x3UL
+ #define PORT_LED_CFG_REQ_LED1_COLOR_LAST PORT_LED_CFG_REQ_LED1_COLOR_GREENAMBER
+ u8 unused_1;
+ __le16 led1_blink_on;
+ __le16 led1_blink_off;
+ u8 led1_group_id;
+ u8 rsvd1;
+ u8 led2_id;
+ u8 led2_state;
+ #define PORT_LED_CFG_REQ_LED2_STATE_DEFAULT 0x0UL
+ #define PORT_LED_CFG_REQ_LED2_STATE_OFF 0x1UL
+ #define PORT_LED_CFG_REQ_LED2_STATE_ON 0x2UL
+ #define PORT_LED_CFG_REQ_LED2_STATE_BLINK 0x3UL
+ #define PORT_LED_CFG_REQ_LED2_STATE_BLINKALT 0x4UL
+ #define PORT_LED_CFG_REQ_LED2_STATE_LAST PORT_LED_CFG_REQ_LED2_STATE_BLINKALT
+ u8 led2_color;
+ #define PORT_LED_CFG_REQ_LED2_COLOR_DEFAULT 0x0UL
+ #define PORT_LED_CFG_REQ_LED2_COLOR_AMBER 0x1UL
+ #define PORT_LED_CFG_REQ_LED2_COLOR_GREEN 0x2UL
+ #define PORT_LED_CFG_REQ_LED2_COLOR_GREENAMBER 0x3UL
+ #define PORT_LED_CFG_REQ_LED2_COLOR_LAST PORT_LED_CFG_REQ_LED2_COLOR_GREENAMBER
+ u8 unused_2;
+ __le16 led2_blink_on;
+ __le16 led2_blink_off;
+ u8 led2_group_id;
+ u8 rsvd2;
+ u8 led3_id;
+ u8 led3_state;
+ #define PORT_LED_CFG_REQ_LED3_STATE_DEFAULT 0x0UL
+ #define PORT_LED_CFG_REQ_LED3_STATE_OFF 0x1UL
+ #define PORT_LED_CFG_REQ_LED3_STATE_ON 0x2UL
+ #define PORT_LED_CFG_REQ_LED3_STATE_BLINK 0x3UL
+ #define PORT_LED_CFG_REQ_LED3_STATE_BLINKALT 0x4UL
+ #define PORT_LED_CFG_REQ_LED3_STATE_LAST PORT_LED_CFG_REQ_LED3_STATE_BLINKALT
+ u8 led3_color;
+ #define PORT_LED_CFG_REQ_LED3_COLOR_DEFAULT 0x0UL
+ #define PORT_LED_CFG_REQ_LED3_COLOR_AMBER 0x1UL
+ #define PORT_LED_CFG_REQ_LED3_COLOR_GREEN 0x2UL
+ #define PORT_LED_CFG_REQ_LED3_COLOR_GREENAMBER 0x3UL
+ #define PORT_LED_CFG_REQ_LED3_COLOR_LAST PORT_LED_CFG_REQ_LED3_COLOR_GREENAMBER
+ u8 unused_3;
+ __le16 led3_blink_on;
+ __le16 led3_blink_off;
+ u8 led3_group_id;
+ u8 rsvd3;
+};
+
+/* hwrm_port_led_cfg_output (size:128b/16B) */
+struct hwrm_port_led_cfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_port_led_qcfg_input (size:192b/24B) */
+struct hwrm_port_led_qcfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 port_id;
+ u8 unused_0[6];
+};
+
+/* hwrm_port_led_qcfg_output (size:448b/56B) */
+struct hwrm_port_led_qcfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 num_leds;
+ u8 led0_id;
+ u8 led0_type;
+ #define PORT_LED_QCFG_RESP_LED0_TYPE_SPEED 0x0UL
+ #define PORT_LED_QCFG_RESP_LED0_TYPE_ACTIVITY 0x1UL
+ #define PORT_LED_QCFG_RESP_LED0_TYPE_INVALID 0xffUL
+ #define PORT_LED_QCFG_RESP_LED0_TYPE_LAST PORT_LED_QCFG_RESP_LED0_TYPE_INVALID
+ u8 led0_state;
+ #define PORT_LED_QCFG_RESP_LED0_STATE_DEFAULT 0x0UL
+ #define PORT_LED_QCFG_RESP_LED0_STATE_OFF 0x1UL
+ #define PORT_LED_QCFG_RESP_LED0_STATE_ON 0x2UL
+ #define PORT_LED_QCFG_RESP_LED0_STATE_BLINK 0x3UL
+ #define PORT_LED_QCFG_RESP_LED0_STATE_BLINKALT 0x4UL
+ #define PORT_LED_QCFG_RESP_LED0_STATE_LAST PORT_LED_QCFG_RESP_LED0_STATE_BLINKALT
+ u8 led0_color;
+ #define PORT_LED_QCFG_RESP_LED0_COLOR_DEFAULT 0x0UL
+ #define PORT_LED_QCFG_RESP_LED0_COLOR_AMBER 0x1UL
+ #define PORT_LED_QCFG_RESP_LED0_COLOR_GREEN 0x2UL
+ #define PORT_LED_QCFG_RESP_LED0_COLOR_GREENAMBER 0x3UL
+ #define PORT_LED_QCFG_RESP_LED0_COLOR_LAST PORT_LED_QCFG_RESP_LED0_COLOR_GREENAMBER
+ u8 unused_0;
+ __le16 led0_blink_on;
+ __le16 led0_blink_off;
+ u8 led0_group_id;
+ u8 led1_id;
+ u8 led1_type;
+ #define PORT_LED_QCFG_RESP_LED1_TYPE_SPEED 0x0UL
+ #define PORT_LED_QCFG_RESP_LED1_TYPE_ACTIVITY 0x1UL
+ #define PORT_LED_QCFG_RESP_LED1_TYPE_INVALID 0xffUL
+ #define PORT_LED_QCFG_RESP_LED1_TYPE_LAST PORT_LED_QCFG_RESP_LED1_TYPE_INVALID
+ u8 led1_state;
+ #define PORT_LED_QCFG_RESP_LED1_STATE_DEFAULT 0x0UL
+ #define PORT_LED_QCFG_RESP_LED1_STATE_OFF 0x1UL
+ #define PORT_LED_QCFG_RESP_LED1_STATE_ON 0x2UL
+ #define PORT_LED_QCFG_RESP_LED1_STATE_BLINK 0x3UL
+ #define PORT_LED_QCFG_RESP_LED1_STATE_BLINKALT 0x4UL
+ #define PORT_LED_QCFG_RESP_LED1_STATE_LAST PORT_LED_QCFG_RESP_LED1_STATE_BLINKALT
+ u8 led1_color;
+ #define PORT_LED_QCFG_RESP_LED1_COLOR_DEFAULT 0x0UL
+ #define PORT_LED_QCFG_RESP_LED1_COLOR_AMBER 0x1UL
+ #define PORT_LED_QCFG_RESP_LED1_COLOR_GREEN 0x2UL
+ #define PORT_LED_QCFG_RESP_LED1_COLOR_GREENAMBER 0x3UL
+ #define PORT_LED_QCFG_RESP_LED1_COLOR_LAST PORT_LED_QCFG_RESP_LED1_COLOR_GREENAMBER
+ u8 unused_1;
+ __le16 led1_blink_on;
+ __le16 led1_blink_off;
+ u8 led1_group_id;
+ u8 led2_id;
+ u8 led2_type;
+ #define PORT_LED_QCFG_RESP_LED2_TYPE_SPEED 0x0UL
+ #define PORT_LED_QCFG_RESP_LED2_TYPE_ACTIVITY 0x1UL
+ #define PORT_LED_QCFG_RESP_LED2_TYPE_INVALID 0xffUL
+ #define PORT_LED_QCFG_RESP_LED2_TYPE_LAST PORT_LED_QCFG_RESP_LED2_TYPE_INVALID
+ u8 led2_state;
+ #define PORT_LED_QCFG_RESP_LED2_STATE_DEFAULT 0x0UL
+ #define PORT_LED_QCFG_RESP_LED2_STATE_OFF 0x1UL
+ #define PORT_LED_QCFG_RESP_LED2_STATE_ON 0x2UL
+ #define PORT_LED_QCFG_RESP_LED2_STATE_BLINK 0x3UL
+ #define PORT_LED_QCFG_RESP_LED2_STATE_BLINKALT 0x4UL
+ #define PORT_LED_QCFG_RESP_LED2_STATE_LAST PORT_LED_QCFG_RESP_LED2_STATE_BLINKALT
+ u8 led2_color;
+ #define PORT_LED_QCFG_RESP_LED2_COLOR_DEFAULT 0x0UL
+ #define PORT_LED_QCFG_RESP_LED2_COLOR_AMBER 0x1UL
+ #define PORT_LED_QCFG_RESP_LED2_COLOR_GREEN 0x2UL
+ #define PORT_LED_QCFG_RESP_LED2_COLOR_GREENAMBER 0x3UL
+ #define PORT_LED_QCFG_RESP_LED2_COLOR_LAST PORT_LED_QCFG_RESP_LED2_COLOR_GREENAMBER
+ u8 unused_2;
+ __le16 led2_blink_on;
+ __le16 led2_blink_off;
+ u8 led2_group_id;
+ u8 led3_id;
+ u8 led3_type;
+ #define PORT_LED_QCFG_RESP_LED3_TYPE_SPEED 0x0UL
+ #define PORT_LED_QCFG_RESP_LED3_TYPE_ACTIVITY 0x1UL
+ #define PORT_LED_QCFG_RESP_LED3_TYPE_INVALID 0xffUL
+ #define PORT_LED_QCFG_RESP_LED3_TYPE_LAST PORT_LED_QCFG_RESP_LED3_TYPE_INVALID
+ u8 led3_state;
+ #define PORT_LED_QCFG_RESP_LED3_STATE_DEFAULT 0x0UL
+ #define PORT_LED_QCFG_RESP_LED3_STATE_OFF 0x1UL
+ #define PORT_LED_QCFG_RESP_LED3_STATE_ON 0x2UL
+ #define PORT_LED_QCFG_RESP_LED3_STATE_BLINK 0x3UL
+ #define PORT_LED_QCFG_RESP_LED3_STATE_BLINKALT 0x4UL
+ #define PORT_LED_QCFG_RESP_LED3_STATE_LAST PORT_LED_QCFG_RESP_LED3_STATE_BLINKALT
+ u8 led3_color;
+ #define PORT_LED_QCFG_RESP_LED3_COLOR_DEFAULT 0x0UL
+ #define PORT_LED_QCFG_RESP_LED3_COLOR_AMBER 0x1UL
+ #define PORT_LED_QCFG_RESP_LED3_COLOR_GREEN 0x2UL
+ #define PORT_LED_QCFG_RESP_LED3_COLOR_GREENAMBER 0x3UL
+ #define PORT_LED_QCFG_RESP_LED3_COLOR_LAST PORT_LED_QCFG_RESP_LED3_COLOR_GREENAMBER
+ u8 unused_3;
+ __le16 led3_blink_on;
+ __le16 led3_blink_off;
+ u8 led3_group_id;
+ u8 unused_4[6];
+ u8 valid;
+};
+
+/* hwrm_port_led_qcaps_input (size:192b/24B) */
+struct hwrm_port_led_qcaps_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 port_id;
+ u8 unused_0[6];
+};
+
+/* hwrm_port_led_qcaps_output (size:384b/48B) */
+struct hwrm_port_led_qcaps_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 num_leds;
+ u8 unused[3];
+ u8 led0_id;
+ u8 led0_type;
+ #define PORT_LED_QCAPS_RESP_LED0_TYPE_SPEED 0x0UL
+ #define PORT_LED_QCAPS_RESP_LED0_TYPE_ACTIVITY 0x1UL
+ #define PORT_LED_QCAPS_RESP_LED0_TYPE_INVALID 0xffUL
+ #define PORT_LED_QCAPS_RESP_LED0_TYPE_LAST PORT_LED_QCAPS_RESP_LED0_TYPE_INVALID
+ u8 led0_group_id;
+ u8 unused_0;
+ __le16 led0_state_caps;
+ #define PORT_LED_QCAPS_RESP_LED0_STATE_CAPS_ENABLED 0x1UL
+ #define PORT_LED_QCAPS_RESP_LED0_STATE_CAPS_OFF_SUPPORTED 0x2UL
+ #define PORT_LED_QCAPS_RESP_LED0_STATE_CAPS_ON_SUPPORTED 0x4UL
+ #define PORT_LED_QCAPS_RESP_LED0_STATE_CAPS_BLINK_SUPPORTED 0x8UL
+ #define PORT_LED_QCAPS_RESP_LED0_STATE_CAPS_BLINK_ALT_SUPPORTED 0x10UL
+ __le16 led0_color_caps;
+ #define PORT_LED_QCAPS_RESP_LED0_COLOR_CAPS_RSVD 0x1UL
+ #define PORT_LED_QCAPS_RESP_LED0_COLOR_CAPS_AMBER_SUPPORTED 0x2UL
+ #define PORT_LED_QCAPS_RESP_LED0_COLOR_CAPS_GREEN_SUPPORTED 0x4UL
+ #define PORT_LED_QCAPS_RESP_LED0_COLOR_CAPS_GRNAMB_SUPPORTED 0x8UL
+ u8 led1_id;
+ u8 led1_type;
+ #define PORT_LED_QCAPS_RESP_LED1_TYPE_SPEED 0x0UL
+ #define PORT_LED_QCAPS_RESP_LED1_TYPE_ACTIVITY 0x1UL
+ #define PORT_LED_QCAPS_RESP_LED1_TYPE_INVALID 0xffUL
+ #define PORT_LED_QCAPS_RESP_LED1_TYPE_LAST PORT_LED_QCAPS_RESP_LED1_TYPE_INVALID
+ u8 led1_group_id;
+ u8 unused_1;
+ __le16 led1_state_caps;
+ #define PORT_LED_QCAPS_RESP_LED1_STATE_CAPS_ENABLED 0x1UL
+ #define PORT_LED_QCAPS_RESP_LED1_STATE_CAPS_OFF_SUPPORTED 0x2UL
+ #define PORT_LED_QCAPS_RESP_LED1_STATE_CAPS_ON_SUPPORTED 0x4UL
+ #define PORT_LED_QCAPS_RESP_LED1_STATE_CAPS_BLINK_SUPPORTED 0x8UL
+ #define PORT_LED_QCAPS_RESP_LED1_STATE_CAPS_BLINK_ALT_SUPPORTED 0x10UL
+ __le16 led1_color_caps;
+ #define PORT_LED_QCAPS_RESP_LED1_COLOR_CAPS_RSVD 0x1UL
+ #define PORT_LED_QCAPS_RESP_LED1_COLOR_CAPS_AMBER_SUPPORTED 0x2UL
+ #define PORT_LED_QCAPS_RESP_LED1_COLOR_CAPS_GREEN_SUPPORTED 0x4UL
+ #define PORT_LED_QCAPS_RESP_LED1_COLOR_CAPS_GRNAMB_SUPPORTED 0x8UL
+ u8 led2_id;
+ u8 led2_type;
+ #define PORT_LED_QCAPS_RESP_LED2_TYPE_SPEED 0x0UL
+ #define PORT_LED_QCAPS_RESP_LED2_TYPE_ACTIVITY 0x1UL
+ #define PORT_LED_QCAPS_RESP_LED2_TYPE_INVALID 0xffUL
+ #define PORT_LED_QCAPS_RESP_LED2_TYPE_LAST PORT_LED_QCAPS_RESP_LED2_TYPE_INVALID
+ u8 led2_group_id;
+ u8 unused_2;
+ __le16 led2_state_caps;
+ #define PORT_LED_QCAPS_RESP_LED2_STATE_CAPS_ENABLED 0x1UL
+ #define PORT_LED_QCAPS_RESP_LED2_STATE_CAPS_OFF_SUPPORTED 0x2UL
+ #define PORT_LED_QCAPS_RESP_LED2_STATE_CAPS_ON_SUPPORTED 0x4UL
+ #define PORT_LED_QCAPS_RESP_LED2_STATE_CAPS_BLINK_SUPPORTED 0x8UL
+ #define PORT_LED_QCAPS_RESP_LED2_STATE_CAPS_BLINK_ALT_SUPPORTED 0x10UL
+ __le16 led2_color_caps;
+ #define PORT_LED_QCAPS_RESP_LED2_COLOR_CAPS_RSVD 0x1UL
+ #define PORT_LED_QCAPS_RESP_LED2_COLOR_CAPS_AMBER_SUPPORTED 0x2UL
+ #define PORT_LED_QCAPS_RESP_LED2_COLOR_CAPS_GREEN_SUPPORTED 0x4UL
+ #define PORT_LED_QCAPS_RESP_LED2_COLOR_CAPS_GRNAMB_SUPPORTED 0x8UL
+ u8 led3_id;
+ u8 led3_type;
+ #define PORT_LED_QCAPS_RESP_LED3_TYPE_SPEED 0x0UL
+ #define PORT_LED_QCAPS_RESP_LED3_TYPE_ACTIVITY 0x1UL
+ #define PORT_LED_QCAPS_RESP_LED3_TYPE_INVALID 0xffUL
+ #define PORT_LED_QCAPS_RESP_LED3_TYPE_LAST PORT_LED_QCAPS_RESP_LED3_TYPE_INVALID
+ u8 led3_group_id;
+ u8 unused_3;
+ __le16 led3_state_caps;
+ #define PORT_LED_QCAPS_RESP_LED3_STATE_CAPS_ENABLED 0x1UL
+ #define PORT_LED_QCAPS_RESP_LED3_STATE_CAPS_OFF_SUPPORTED 0x2UL
+ #define PORT_LED_QCAPS_RESP_LED3_STATE_CAPS_ON_SUPPORTED 0x4UL
+ #define PORT_LED_QCAPS_RESP_LED3_STATE_CAPS_BLINK_SUPPORTED 0x8UL
+ #define PORT_LED_QCAPS_RESP_LED3_STATE_CAPS_BLINK_ALT_SUPPORTED 0x10UL
+ __le16 led3_color_caps;
+ #define PORT_LED_QCAPS_RESP_LED3_COLOR_CAPS_RSVD 0x1UL
+ #define PORT_LED_QCAPS_RESP_LED3_COLOR_CAPS_AMBER_SUPPORTED 0x2UL
+ #define PORT_LED_QCAPS_RESP_LED3_COLOR_CAPS_GREEN_SUPPORTED 0x4UL
+ #define PORT_LED_QCAPS_RESP_LED3_COLOR_CAPS_GRNAMB_SUPPORTED 0x8UL
+ u8 unused_4[3];
+ u8 valid;
+};
+
+/* hwrm_port_mac_qcaps_input (size:192b/24B) */
+struct hwrm_port_mac_qcaps_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 port_id;
+ u8 unused_0[6];
+};
+
+/* hwrm_port_mac_qcaps_output (size:128b/16B) */
+struct hwrm_port_mac_qcaps_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 flags;
+ #define PORT_MAC_QCAPS_RESP_FLAGS_LOCAL_LPBK_NOT_SUPPORTED 0x1UL
+ #define PORT_MAC_QCAPS_RESP_FLAGS_REMOTE_LPBK_SUPPORTED 0x2UL
+ u8 unused_0[6];
+ u8 valid;
+};
+
+/* hwrm_queue_qportcfg_input (size:192b/24B) */
+struct hwrm_queue_qportcfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define QUEUE_QPORTCFG_REQ_FLAGS_PATH 0x1UL
+ #define QUEUE_QPORTCFG_REQ_FLAGS_PATH_TX 0x0UL
+ #define QUEUE_QPORTCFG_REQ_FLAGS_PATH_RX 0x1UL
+ #define QUEUE_QPORTCFG_REQ_FLAGS_PATH_LAST QUEUE_QPORTCFG_REQ_FLAGS_PATH_RX
+ __le16 port_id;
+ u8 drv_qmap_cap;
+ #define QUEUE_QPORTCFG_REQ_DRV_QMAP_CAP_DISABLED 0x0UL
+ #define QUEUE_QPORTCFG_REQ_DRV_QMAP_CAP_ENABLED 0x1UL
+ #define QUEUE_QPORTCFG_REQ_DRV_QMAP_CAP_LAST QUEUE_QPORTCFG_REQ_DRV_QMAP_CAP_ENABLED
+ u8 unused_0;
+};
+
+/* hwrm_queue_qportcfg_output (size:1344b/168B) */
+struct hwrm_queue_qportcfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 max_configurable_queues;
+ u8 max_configurable_lossless_queues;
+ u8 queue_cfg_allowed;
+ u8 queue_cfg_info;
+ #define QUEUE_QPORTCFG_RESP_QUEUE_CFG_INFO_ASYM_CFG 0x1UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_CFG_INFO_USE_PROFILE_TYPE 0x2UL
+ u8 queue_pfcenable_cfg_allowed;
+ u8 queue_pri2cos_cfg_allowed;
+ u8 queue_cos2bw_cfg_allowed;
+ u8 queue_id0;
+ u8 queue_id0_service_profile;
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_LOSSY 0x0UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_LOSSLESS 0x1UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_LOSSLESS_ROCE 0x1UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_LOSSY_ROCE_CNP 0x2UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_LOSSLESS_NIC 0x3UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_UNKNOWN 0xffUL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_LAST QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_UNKNOWN
+ u8 queue_id1;
+ u8 queue_id1_service_profile;
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_LOSSY 0x0UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_LOSSLESS 0x1UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_LOSSLESS_ROCE 0x1UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_LOSSY_ROCE_CNP 0x2UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_LOSSLESS_NIC 0x3UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_UNKNOWN 0xffUL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_LAST QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_UNKNOWN
+ u8 queue_id2;
+ u8 queue_id2_service_profile;
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_LOSSY 0x0UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_LOSSLESS 0x1UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_LOSSLESS_ROCE 0x1UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_LOSSY_ROCE_CNP 0x2UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_LOSSLESS_NIC 0x3UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_UNKNOWN 0xffUL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_LAST QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_UNKNOWN
+ u8 queue_id3;
+ u8 queue_id3_service_profile;
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_LOSSY 0x0UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_LOSSLESS 0x1UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_LOSSLESS_ROCE 0x1UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_LOSSY_ROCE_CNP 0x2UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_LOSSLESS_NIC 0x3UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_UNKNOWN 0xffUL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_LAST QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_UNKNOWN
+ u8 queue_id4;
+ u8 queue_id4_service_profile;
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_LOSSY 0x0UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_LOSSLESS 0x1UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_LOSSLESS_ROCE 0x1UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_LOSSY_ROCE_CNP 0x2UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_LOSSLESS_NIC 0x3UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_UNKNOWN 0xffUL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_LAST QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_UNKNOWN
+ u8 queue_id5;
+ u8 queue_id5_service_profile;
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_LOSSY 0x0UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_LOSSLESS 0x1UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_LOSSLESS_ROCE 0x1UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_LOSSY_ROCE_CNP 0x2UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_LOSSLESS_NIC 0x3UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_UNKNOWN 0xffUL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_LAST QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_UNKNOWN
+ u8 queue_id6;
+ u8 queue_id6_service_profile;
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_LOSSY 0x0UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_LOSSLESS 0x1UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_LOSSLESS_ROCE 0x1UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_LOSSY_ROCE_CNP 0x2UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_LOSSLESS_NIC 0x3UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_UNKNOWN 0xffUL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_LAST QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_UNKNOWN
+ u8 queue_id7;
+ u8 queue_id7_service_profile;
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_LOSSY 0x0UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_LOSSLESS 0x1UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_LOSSLESS_ROCE 0x1UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_LOSSY_ROCE_CNP 0x2UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_LOSSLESS_NIC 0x3UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_UNKNOWN 0xffUL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_LAST QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_UNKNOWN
+ u8 queue_id0_service_profile_type;
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_TYPE_ROCE 0x1UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_TYPE_NIC 0x2UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_TYPE_CNP 0x4UL
+ char qid0_name[16];
+ char qid1_name[16];
+ char qid2_name[16];
+ char qid3_name[16];
+ char qid4_name[16];
+ char qid5_name[16];
+ char qid6_name[16];
+ char qid7_name[16];
+ u8 queue_id1_service_profile_type;
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_TYPE_ROCE 0x1UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_TYPE_NIC 0x2UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_TYPE_CNP 0x4UL
+ u8 queue_id2_service_profile_type;
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_TYPE_ROCE 0x1UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_TYPE_NIC 0x2UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_TYPE_CNP 0x4UL
+ u8 queue_id3_service_profile_type;
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_TYPE_ROCE 0x1UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_TYPE_NIC 0x2UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_TYPE_CNP 0x4UL
+ u8 queue_id4_service_profile_type;
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_TYPE_ROCE 0x1UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_TYPE_NIC 0x2UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_TYPE_CNP 0x4UL
+ u8 queue_id5_service_profile_type;
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_TYPE_ROCE 0x1UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_TYPE_NIC 0x2UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_TYPE_CNP 0x4UL
+ u8 queue_id6_service_profile_type;
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_TYPE_ROCE 0x1UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_TYPE_NIC 0x2UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_TYPE_CNP 0x4UL
+ u8 queue_id7_service_profile_type;
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_TYPE_ROCE 0x1UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_TYPE_NIC 0x2UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_TYPE_CNP 0x4UL
+ u8 valid;
+};
+
+/* hwrm_queue_qcfg_input (size:192b/24B) */
+struct hwrm_queue_qcfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define QUEUE_QCFG_REQ_FLAGS_PATH 0x1UL
+ #define QUEUE_QCFG_REQ_FLAGS_PATH_TX 0x0UL
+ #define QUEUE_QCFG_REQ_FLAGS_PATH_RX 0x1UL
+ #define QUEUE_QCFG_REQ_FLAGS_PATH_LAST QUEUE_QCFG_REQ_FLAGS_PATH_RX
+ __le32 queue_id;
+};
+
+/* hwrm_queue_qcfg_output (size:128b/16B) */
+struct hwrm_queue_qcfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 queue_len;
+ u8 service_profile;
+ #define QUEUE_QCFG_RESP_SERVICE_PROFILE_LOSSY 0x0UL
+ #define QUEUE_QCFG_RESP_SERVICE_PROFILE_LOSSLESS 0x1UL
+ #define QUEUE_QCFG_RESP_SERVICE_PROFILE_UNKNOWN 0xffUL
+ #define QUEUE_QCFG_RESP_SERVICE_PROFILE_LAST QUEUE_QCFG_RESP_SERVICE_PROFILE_UNKNOWN
+ u8 queue_cfg_info;
+ #define QUEUE_QCFG_RESP_QUEUE_CFG_INFO_ASYM_CFG 0x1UL
+ u8 unused_0;
+ u8 valid;
+};
+
+/* hwrm_queue_cfg_input (size:320b/40B) */
+struct hwrm_queue_cfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define QUEUE_CFG_REQ_FLAGS_PATH_MASK 0x3UL
+ #define QUEUE_CFG_REQ_FLAGS_PATH_SFT 0
+ #define QUEUE_CFG_REQ_FLAGS_PATH_TX 0x0UL
+ #define QUEUE_CFG_REQ_FLAGS_PATH_RX 0x1UL
+ #define QUEUE_CFG_REQ_FLAGS_PATH_BIDIR 0x2UL
+ #define QUEUE_CFG_REQ_FLAGS_PATH_LAST QUEUE_CFG_REQ_FLAGS_PATH_BIDIR
+ __le32 enables;
+ #define QUEUE_CFG_REQ_ENABLES_DFLT_LEN 0x1UL
+ #define QUEUE_CFG_REQ_ENABLES_SERVICE_PROFILE 0x2UL
+ __le32 queue_id;
+ __le32 dflt_len;
+ u8 service_profile;
+ #define QUEUE_CFG_REQ_SERVICE_PROFILE_LOSSY 0x0UL
+ #define QUEUE_CFG_REQ_SERVICE_PROFILE_LOSSLESS 0x1UL
+ #define QUEUE_CFG_REQ_SERVICE_PROFILE_UNKNOWN 0xffUL
+ #define QUEUE_CFG_REQ_SERVICE_PROFILE_LAST QUEUE_CFG_REQ_SERVICE_PROFILE_UNKNOWN
+ u8 unused_0[7];
+};
+
+/* hwrm_queue_cfg_output (size:128b/16B) */
+struct hwrm_queue_cfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_queue_pfcenable_qcfg_input (size:192b/24B) */
+struct hwrm_queue_pfcenable_qcfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 port_id;
+ u8 unused_0[6];
+};
+
+/* hwrm_queue_pfcenable_qcfg_output (size:128b/16B) */
+struct hwrm_queue_pfcenable_qcfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 flags;
+ #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI0_PFC_ENABLED 0x1UL
+ #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI1_PFC_ENABLED 0x2UL
+ #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI2_PFC_ENABLED 0x4UL
+ #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI3_PFC_ENABLED 0x8UL
+ #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI4_PFC_ENABLED 0x10UL
+ #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI5_PFC_ENABLED 0x20UL
+ #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI6_PFC_ENABLED 0x40UL
+ #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI7_PFC_ENABLED 0x80UL
+ #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI0_PFC_WATCHDOG_ENABLED 0x100UL
+ #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI1_PFC_WATCHDOG_ENABLED 0x200UL
+ #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI2_PFC_WATCHDOG_ENABLED 0x400UL
+ #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI3_PFC_WATCHDOG_ENABLED 0x800UL
+ #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI4_PFC_WATCHDOG_ENABLED 0x1000UL
+ #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI5_PFC_WATCHDOG_ENABLED 0x2000UL
+ #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI6_PFC_WATCHDOG_ENABLED 0x4000UL
+ #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI7_PFC_WATCHDOG_ENABLED 0x8000UL
+ u8 unused_0[3];
+ u8 valid;
+};
+
+/* hwrm_queue_pfcenable_cfg_input (size:192b/24B) */
+struct hwrm_queue_pfcenable_cfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI0_PFC_ENABLED 0x1UL
+ #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI1_PFC_ENABLED 0x2UL
+ #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI2_PFC_ENABLED 0x4UL
+ #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI3_PFC_ENABLED 0x8UL
+ #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI4_PFC_ENABLED 0x10UL
+ #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI5_PFC_ENABLED 0x20UL
+ #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI6_PFC_ENABLED 0x40UL
+ #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI7_PFC_ENABLED 0x80UL
+ #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI0_PFC_WATCHDOG_ENABLED 0x100UL
+ #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI1_PFC_WATCHDOG_ENABLED 0x200UL
+ #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI2_PFC_WATCHDOG_ENABLED 0x400UL
+ #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI3_PFC_WATCHDOG_ENABLED 0x800UL
+ #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI4_PFC_WATCHDOG_ENABLED 0x1000UL
+ #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI5_PFC_WATCHDOG_ENABLED 0x2000UL
+ #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI6_PFC_WATCHDOG_ENABLED 0x4000UL
+ #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI7_PFC_WATCHDOG_ENABLED 0x8000UL
+ __le16 port_id;
+ u8 unused_0[2];
+};
+
+/* hwrm_queue_pfcenable_cfg_output (size:128b/16B) */
+struct hwrm_queue_pfcenable_cfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_queue_pri2cos_qcfg_input (size:192b/24B) */
+struct hwrm_queue_pri2cos_qcfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define QUEUE_PRI2COS_QCFG_REQ_FLAGS_PATH 0x1UL
+ #define QUEUE_PRI2COS_QCFG_REQ_FLAGS_PATH_TX 0x0UL
+ #define QUEUE_PRI2COS_QCFG_REQ_FLAGS_PATH_RX 0x1UL
+ #define QUEUE_PRI2COS_QCFG_REQ_FLAGS_PATH_LAST QUEUE_PRI2COS_QCFG_REQ_FLAGS_PATH_RX
+ #define QUEUE_PRI2COS_QCFG_REQ_FLAGS_IVLAN 0x2UL
+ u8 port_id;
+ u8 unused_0[3];
+};
+
+/* hwrm_queue_pri2cos_qcfg_output (size:192b/24B) */
+struct hwrm_queue_pri2cos_qcfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 pri0_cos_queue_id;
+ u8 pri1_cos_queue_id;
+ u8 pri2_cos_queue_id;
+ u8 pri3_cos_queue_id;
+ u8 pri4_cos_queue_id;
+ u8 pri5_cos_queue_id;
+ u8 pri6_cos_queue_id;
+ u8 pri7_cos_queue_id;
+ u8 queue_cfg_info;
+ #define QUEUE_PRI2COS_QCFG_RESP_QUEUE_CFG_INFO_ASYM_CFG 0x1UL
+ u8 unused_0[6];
+ u8 valid;
+};
+
+/* hwrm_queue_pri2cos_cfg_input (size:320b/40B) */
+struct hwrm_queue_pri2cos_cfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_MASK 0x3UL
+ #define QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_SFT 0
+ #define QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_TX 0x0UL
+ #define QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_RX 0x1UL
+ #define QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_BIDIR 0x2UL
+ #define QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_LAST QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_BIDIR
+ #define QUEUE_PRI2COS_CFG_REQ_FLAGS_IVLAN 0x4UL
+ __le32 enables;
+ #define QUEUE_PRI2COS_CFG_REQ_ENABLES_PRI0_COS_QUEUE_ID 0x1UL
+ #define QUEUE_PRI2COS_CFG_REQ_ENABLES_PRI1_COS_QUEUE_ID 0x2UL
+ #define QUEUE_PRI2COS_CFG_REQ_ENABLES_PRI2_COS_QUEUE_ID 0x4UL
+ #define QUEUE_PRI2COS_CFG_REQ_ENABLES_PRI3_COS_QUEUE_ID 0x8UL
+ #define QUEUE_PRI2COS_CFG_REQ_ENABLES_PRI4_COS_QUEUE_ID 0x10UL
+ #define QUEUE_PRI2COS_CFG_REQ_ENABLES_PRI5_COS_QUEUE_ID 0x20UL
+ #define QUEUE_PRI2COS_CFG_REQ_ENABLES_PRI6_COS_QUEUE_ID 0x40UL
+ #define QUEUE_PRI2COS_CFG_REQ_ENABLES_PRI7_COS_QUEUE_ID 0x80UL
+ u8 port_id;
+ u8 pri0_cos_queue_id;
+ u8 pri1_cos_queue_id;
+ u8 pri2_cos_queue_id;
+ u8 pri3_cos_queue_id;
+ u8 pri4_cos_queue_id;
+ u8 pri5_cos_queue_id;
+ u8 pri6_cos_queue_id;
+ u8 pri7_cos_queue_id;
+ u8 unused_0[7];
+};
+
+/* hwrm_queue_pri2cos_cfg_output (size:128b/16B) */
+struct hwrm_queue_pri2cos_cfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_queue_cos2bw_qcfg_input (size:192b/24B) */
+struct hwrm_queue_cos2bw_qcfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 port_id;
+ u8 unused_0[6];
+};
+
+/* hwrm_queue_cos2bw_qcfg_output (size:896b/112B) */
+struct hwrm_queue_cos2bw_qcfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 queue_id0;
+ u8 unused_0;
+ __le16 unused_1;
+ __le32 queue_id0_min_bw;
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_SCALE 0x10000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_SCALE_BITS (0x0UL << 28)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_SCALE_BYTES (0x1UL << 28)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_SCALE_BYTES
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_INVALID
+ __le32 queue_id0_max_bw;
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_SCALE 0x10000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_SCALE_BITS (0x0UL << 28)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_SCALE_BYTES (0x1UL << 28)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_SCALE_BYTES
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_INVALID
+ u8 queue_id0_tsa_assign;
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_TSA_ASSIGN_SP 0x0UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_TSA_ASSIGN_ETS 0x1UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_TSA_ASSIGN_RESERVED_FIRST 0x2UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_TSA_ASSIGN_RESERVED_LAST 0xffUL
+ u8 queue_id0_pri_lvl;
+ u8 queue_id0_bw_weight;
+ struct {
+ u8 queue_id;
+ __le32 queue_id_min_bw;
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MIN_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MIN_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MIN_BW_SCALE 0x10000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MIN_BW_SCALE_BITS (0x0UL << 28)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MIN_BW_SCALE_BYTES (0x1UL << 28)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MIN_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MIN_BW_SCALE_BYTES
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MIN_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MIN_BW_BW_VALUE_UNIT_INVALID
+ __le32 queue_id_max_bw;
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MAX_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MAX_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MAX_BW_SCALE 0x10000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MAX_BW_SCALE_BITS (0x0UL << 28)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MAX_BW_SCALE_BYTES (0x1UL << 28)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MAX_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MAX_BW_SCALE_BYTES
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MAX_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MAX_BW_BW_VALUE_UNIT_INVALID
+ u8 queue_id_tsa_assign;
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_TSA_ASSIGN_SP 0x0UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_TSA_ASSIGN_ETS 0x1UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_TSA_ASSIGN_RESERVED_FIRST 0x2UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_TSA_ASSIGN_RESERVED_LAST 0xffUL
+ u8 queue_id_pri_lvl;
+ u8 queue_id_bw_weight;
+ } __packed cfg[7];
+ u8 unused_2[4];
+ u8 valid;
+};
+
+/* hwrm_queue_cos2bw_cfg_input (size:1024b/128B) */
+struct hwrm_queue_cos2bw_cfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ __le32 enables;
+ #define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID0_VALID 0x1UL
+ #define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID1_VALID 0x2UL
+ #define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID2_VALID 0x4UL
+ #define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID3_VALID 0x8UL
+ #define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID4_VALID 0x10UL
+ #define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID5_VALID 0x20UL
+ #define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID6_VALID 0x40UL
+ #define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID7_VALID 0x80UL
+ __le16 port_id;
+ u8 queue_id0;
+ u8 unused_0;
+ __le32 queue_id0_min_bw;
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_SCALE 0x10000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_SCALE_BITS (0x0UL << 28)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_SCALE_BYTES (0x1UL << 28)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_SCALE_BYTES
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_INVALID
+ __le32 queue_id0_max_bw;
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_SCALE 0x10000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_SCALE_BITS (0x0UL << 28)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_SCALE_BYTES (0x1UL << 28)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_SCALE_BYTES
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_INVALID
+ u8 queue_id0_tsa_assign;
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_TSA_ASSIGN_SP 0x0UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_TSA_ASSIGN_ETS 0x1UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_TSA_ASSIGN_RESERVED_FIRST 0x2UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_TSA_ASSIGN_RESERVED_LAST 0xffUL
+ u8 queue_id0_pri_lvl;
+ u8 queue_id0_bw_weight;
+ struct {
+ u8 queue_id;
+ __le32 queue_id_min_bw;
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MIN_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MIN_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MIN_BW_SCALE 0x10000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MIN_BW_SCALE_BITS (0x0UL << 28)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MIN_BW_SCALE_BYTES (0x1UL << 28)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MIN_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MIN_BW_SCALE_BYTES
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MIN_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MIN_BW_BW_VALUE_UNIT_INVALID
+ __le32 queue_id_max_bw;
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MAX_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MAX_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MAX_BW_SCALE 0x10000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MAX_BW_SCALE_BITS (0x0UL << 28)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MAX_BW_SCALE_BYTES (0x1UL << 28)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MAX_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MAX_BW_SCALE_BYTES
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MAX_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MAX_BW_BW_VALUE_UNIT_INVALID
+ u8 queue_id_tsa_assign;
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_TSA_ASSIGN_SP 0x0UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_TSA_ASSIGN_ETS 0x1UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_TSA_ASSIGN_RESERVED_FIRST 0x2UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_TSA_ASSIGN_RESERVED_LAST 0xffUL
+ u8 queue_id_pri_lvl;
+ u8 queue_id_bw_weight;
+ } __packed cfg[7];
+ u8 unused_1[5];
+};
+
+/* hwrm_queue_cos2bw_cfg_output (size:128b/16B) */
+struct hwrm_queue_cos2bw_cfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_queue_dscp_qcaps_input (size:192b/24B) */
+struct hwrm_queue_dscp_qcaps_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ u8 port_id;
+ u8 unused_0[7];
+};
+
+/* hwrm_queue_dscp_qcaps_output (size:128b/16B) */
+struct hwrm_queue_dscp_qcaps_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 num_dscp_bits;
+ u8 unused_0;
+ __le16 max_entries;
+ u8 unused_1[3];
+ u8 valid;
+};
+
+/* hwrm_queue_dscp2pri_qcfg_input (size:256b/32B) */
+struct hwrm_queue_dscp2pri_qcfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le64 dest_data_addr;
+ u8 port_id;
+ u8 unused_0;
+ __le16 dest_data_buffer_size;
+ u8 unused_1[4];
+};
+
+/* hwrm_queue_dscp2pri_qcfg_output (size:128b/16B) */
+struct hwrm_queue_dscp2pri_qcfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 entry_cnt;
+ u8 default_pri;
+ u8 unused_0[4];
+ u8 valid;
+};
+
+/* hwrm_queue_dscp2pri_cfg_input (size:320b/40B) */
+struct hwrm_queue_dscp2pri_cfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le64 src_data_addr;
+ __le32 flags;
+ #define QUEUE_DSCP2PRI_CFG_REQ_FLAGS_USE_HW_DEFAULT_PRI 0x1UL
+ __le32 enables;
+ #define QUEUE_DSCP2PRI_CFG_REQ_ENABLES_DEFAULT_PRI 0x1UL
+ u8 port_id;
+ u8 default_pri;
+ __le16 entry_cnt;
+ u8 unused_0[4];
+};
+
+/* hwrm_queue_dscp2pri_cfg_output (size:128b/16B) */
+struct hwrm_queue_dscp2pri_cfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_queue_pfcwd_timeout_qcaps_input (size:128b/16B) */
+struct hwrm_queue_pfcwd_timeout_qcaps_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+};
+
+/* hwrm_queue_pfcwd_timeout_qcaps_output (size:128b/16B) */
+struct hwrm_queue_pfcwd_timeout_qcaps_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 max_pfcwd_timeout;
+ u8 unused_0[5];
+ u8 valid;
+};
+
+/* hwrm_queue_pfcwd_timeout_cfg_input (size:192b/24B) */
+struct hwrm_queue_pfcwd_timeout_cfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 pfcwd_timeout_value;
+ u8 unused_0[6];
+};
+
+/* hwrm_queue_pfcwd_timeout_cfg_output (size:128b/16B) */
+struct hwrm_queue_pfcwd_timeout_cfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_queue_pfcwd_timeout_qcfg_input (size:128b/16B) */
+struct hwrm_queue_pfcwd_timeout_qcfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+};
+
+/* hwrm_queue_pfcwd_timeout_qcfg_output (size:128b/16B) */
+struct hwrm_queue_pfcwd_timeout_qcfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 pfcwd_timeout_value;
+ u8 unused_0[5];
+ u8 valid;
+};
+
+/* hwrm_vnic_alloc_input (size:192b/24B) */
+struct hwrm_vnic_alloc_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define VNIC_ALLOC_REQ_FLAGS_DEFAULT 0x1UL
+ #define VNIC_ALLOC_REQ_FLAGS_VIRTIO_NET_FID_VALID 0x2UL
+ #define VNIC_ALLOC_REQ_FLAGS_VNIC_ID_VALID 0x4UL
+ __le16 virtio_net_fid;
+ __le16 vnic_id;
+};
+
+/* hwrm_vnic_alloc_output (size:128b/16B) */
+struct hwrm_vnic_alloc_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 vnic_id;
+ u8 unused_0[3];
+ u8 valid;
+};
+
+/* hwrm_vnic_update_input (size:256b/32B) */
+struct hwrm_vnic_update_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 vnic_id;
+ __le32 enables;
+ #define VNIC_UPDATE_REQ_ENABLES_VNIC_STATE_VALID 0x1UL
+ #define VNIC_UPDATE_REQ_ENABLES_MRU_VALID 0x2UL
+ #define VNIC_UPDATE_REQ_ENABLES_METADATA_FORMAT_TYPE_VALID 0x4UL
+ u8 vnic_state;
+ #define VNIC_UPDATE_REQ_VNIC_STATE_NORMAL 0x0UL
+ #define VNIC_UPDATE_REQ_VNIC_STATE_DROP 0x1UL
+ #define VNIC_UPDATE_REQ_VNIC_STATE_LAST VNIC_UPDATE_REQ_VNIC_STATE_DROP
+ u8 metadata_format_type;
+ #define VNIC_UPDATE_REQ_METADATA_FORMAT_TYPE_0 0x0UL
+ #define VNIC_UPDATE_REQ_METADATA_FORMAT_TYPE_1 0x1UL
+ #define VNIC_UPDATE_REQ_METADATA_FORMAT_TYPE_2 0x2UL
+ #define VNIC_UPDATE_REQ_METADATA_FORMAT_TYPE_3 0x3UL
+ #define VNIC_UPDATE_REQ_METADATA_FORMAT_TYPE_4 0x4UL
+ #define VNIC_UPDATE_REQ_METADATA_FORMAT_TYPE_LAST VNIC_UPDATE_REQ_METADATA_FORMAT_TYPE_4
+ __le16 mru;
+ u8 unused_1[4];
+};
+
+/* hwrm_vnic_update_output (size:128b/16B) */
+struct hwrm_vnic_update_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_vnic_free_input (size:192b/24B) */
+struct hwrm_vnic_free_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 vnic_id;
+ u8 unused_0[4];
+};
+
+/* hwrm_vnic_free_output (size:128b/16B) */
+struct hwrm_vnic_free_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_vnic_cfg_input (size:384b/48B) */
+struct hwrm_vnic_cfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define VNIC_CFG_REQ_FLAGS_DEFAULT 0x1UL
+ #define VNIC_CFG_REQ_FLAGS_VLAN_STRIP_MODE 0x2UL
+ #define VNIC_CFG_REQ_FLAGS_BD_STALL_MODE 0x4UL
+ #define VNIC_CFG_REQ_FLAGS_ROCE_DUAL_VNIC_MODE 0x8UL
+ #define VNIC_CFG_REQ_FLAGS_ROCE_ONLY_VNIC_MODE 0x10UL
+ #define VNIC_CFG_REQ_FLAGS_RSS_DFLT_CR_MODE 0x20UL
+ #define VNIC_CFG_REQ_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_MODE 0x40UL
+ #define VNIC_CFG_REQ_FLAGS_PORTCOS_MAPPING_MODE 0x80UL
+ __le32 enables;
+ #define VNIC_CFG_REQ_ENABLES_DFLT_RING_GRP 0x1UL
+ #define VNIC_CFG_REQ_ENABLES_RSS_RULE 0x2UL
+ #define VNIC_CFG_REQ_ENABLES_COS_RULE 0x4UL
+ #define VNIC_CFG_REQ_ENABLES_LB_RULE 0x8UL
+ #define VNIC_CFG_REQ_ENABLES_MRU 0x10UL
+ #define VNIC_CFG_REQ_ENABLES_DEFAULT_RX_RING_ID 0x20UL
+ #define VNIC_CFG_REQ_ENABLES_DEFAULT_CMPL_RING_ID 0x40UL
+ #define VNIC_CFG_REQ_ENABLES_QUEUE_ID 0x80UL
+ #define VNIC_CFG_REQ_ENABLES_RX_CSUM_V2_MODE 0x100UL
+ #define VNIC_CFG_REQ_ENABLES_L2_CQE_MODE 0x200UL
+ #define VNIC_CFG_REQ_ENABLES_RAW_QP_ID 0x400UL
+ __le16 vnic_id;
+ __le16 dflt_ring_grp;
+ __le16 rss_rule;
+ __le16 cos_rule;
+ __le16 lb_rule;
+ __le16 mru;
+ __le16 default_rx_ring_id;
+ __le16 default_cmpl_ring_id;
+ __le16 queue_id;
+ u8 rx_csum_v2_mode;
+ #define VNIC_CFG_REQ_RX_CSUM_V2_MODE_DEFAULT 0x0UL
+ #define VNIC_CFG_REQ_RX_CSUM_V2_MODE_ALL_OK 0x1UL
+ #define VNIC_CFG_REQ_RX_CSUM_V2_MODE_MAX 0x2UL
+ #define VNIC_CFG_REQ_RX_CSUM_V2_MODE_LAST VNIC_CFG_REQ_RX_CSUM_V2_MODE_MAX
+ u8 l2_cqe_mode;
+ #define VNIC_CFG_REQ_L2_CQE_MODE_DEFAULT 0x0UL
+ #define VNIC_CFG_REQ_L2_CQE_MODE_COMPRESSED 0x1UL
+ #define VNIC_CFG_REQ_L2_CQE_MODE_MIXED 0x2UL
+ #define VNIC_CFG_REQ_L2_CQE_MODE_LAST VNIC_CFG_REQ_L2_CQE_MODE_MIXED
+ __le32 raw_qp_id;
+};
+
+/* hwrm_vnic_cfg_output (size:128b/16B) */
+struct hwrm_vnic_cfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_vnic_qcaps_input (size:192b/24B) */
+struct hwrm_vnic_qcaps_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 enables;
+ u8 unused_0[4];
+};
+
+/* hwrm_vnic_qcaps_output (size:192b/24B) */
+struct hwrm_vnic_qcaps_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 mru;
+ u8 unused_0[2];
+ __le32 flags;
+ #define VNIC_QCAPS_RESP_FLAGS_UNUSED 0x1UL
+ #define VNIC_QCAPS_RESP_FLAGS_VLAN_STRIP_CAP 0x2UL
+ #define VNIC_QCAPS_RESP_FLAGS_BD_STALL_CAP 0x4UL
+ #define VNIC_QCAPS_RESP_FLAGS_ROCE_DUAL_VNIC_CAP 0x8UL
+ #define VNIC_QCAPS_RESP_FLAGS_ROCE_ONLY_VNIC_CAP 0x10UL
+ #define VNIC_QCAPS_RESP_FLAGS_RSS_DFLT_CR_CAP 0x20UL
+ #define VNIC_QCAPS_RESP_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_CAP 0x40UL
+ #define VNIC_QCAPS_RESP_FLAGS_OUTERMOST_RSS_CAP 0x80UL
+ #define VNIC_QCAPS_RESP_FLAGS_COS_ASSIGNMENT_CAP 0x100UL
+ #define VNIC_QCAPS_RESP_FLAGS_RX_CMPL_V2_CAP 0x200UL
+ #define VNIC_QCAPS_RESP_FLAGS_VNIC_STATE_CAP 0x400UL
+ #define VNIC_QCAPS_RESP_FLAGS_VIRTIO_NET_VNIC_ALLOC_CAP 0x800UL
+ #define VNIC_QCAPS_RESP_FLAGS_METADATA_FORMAT_CAP 0x1000UL
+ #define VNIC_QCAPS_RESP_FLAGS_RSS_STRICT_HASH_TYPE_CAP 0x2000UL
+ #define VNIC_QCAPS_RESP_FLAGS_RSS_HASH_TYPE_DELTA_CAP 0x4000UL
+ #define VNIC_QCAPS_RESP_FLAGS_RING_SELECT_MODE_TOEPLITZ_CAP 0x8000UL
+ #define VNIC_QCAPS_RESP_FLAGS_RING_SELECT_MODE_XOR_CAP 0x10000UL
+ #define VNIC_QCAPS_RESP_FLAGS_RING_SELECT_MODE_TOEPLITZ_CHKSM_CAP 0x20000UL
+ #define VNIC_QCAPS_RESP_FLAGS_RSS_IPV6_FLOW_LABEL_CAP 0x40000UL
+ #define VNIC_QCAPS_RESP_FLAGS_RX_CMPL_V3_CAP 0x80000UL
+ #define VNIC_QCAPS_RESP_FLAGS_L2_CQE_MODE_CAP 0x100000UL
+ #define VNIC_QCAPS_RESP_FLAGS_RSS_IPSEC_AH_SPI_IPV4_CAP 0x200000UL
+ #define VNIC_QCAPS_RESP_FLAGS_RSS_IPSEC_ESP_SPI_IPV4_CAP 0x400000UL
+ #define VNIC_QCAPS_RESP_FLAGS_RSS_IPSEC_AH_SPI_IPV6_CAP 0x800000UL
+ #define VNIC_QCAPS_RESP_FLAGS_RSS_IPSEC_ESP_SPI_IPV6_CAP 0x1000000UL
+ #define VNIC_QCAPS_RESP_FLAGS_OUTERMOST_RSS_TRUSTED_VF_CAP 0x2000000UL
+ #define VNIC_QCAPS_RESP_FLAGS_PORTCOS_MAPPING_MODE 0x4000000UL
+ #define VNIC_QCAPS_RESP_FLAGS_RSS_PROF_TCAM_MODE_ENABLED 0x8000000UL
+ #define VNIC_QCAPS_RESP_FLAGS_VNIC_RSS_HASH_MODE_CAP 0x10000000UL
+ #define VNIC_QCAPS_RESP_FLAGS_HW_TUNNEL_TPA_CAP 0x20000000UL
+ #define VNIC_QCAPS_RESP_FLAGS_RE_FLUSH_CAP 0x40000000UL
+ __le16 max_aggs_supported;
+ u8 unused_1[5];
+ u8 valid;
+};
+
+/* hwrm_vnic_tpa_cfg_input (size:384b/48B) */
+struct hwrm_vnic_tpa_cfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define VNIC_TPA_CFG_REQ_FLAGS_TPA 0x1UL
+ #define VNIC_TPA_CFG_REQ_FLAGS_ENCAP_TPA 0x2UL
+ #define VNIC_TPA_CFG_REQ_FLAGS_RSC_WND_UPDATE 0x4UL
+ #define VNIC_TPA_CFG_REQ_FLAGS_GRO 0x8UL
+ #define VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_ECN 0x10UL
+ #define VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_SAME_GRE_SEQ 0x20UL
+ #define VNIC_TPA_CFG_REQ_FLAGS_GRO_IPID_CHECK 0x40UL
+ #define VNIC_TPA_CFG_REQ_FLAGS_GRO_TTL_CHECK 0x80UL
+ #define VNIC_TPA_CFG_REQ_FLAGS_AGG_PACK_AS_GRO 0x100UL
+ __le32 enables;
+ #define VNIC_TPA_CFG_REQ_ENABLES_MAX_AGG_SEGS 0x1UL
+ #define VNIC_TPA_CFG_REQ_ENABLES_MAX_AGGS 0x2UL
+ #define VNIC_TPA_CFG_REQ_ENABLES_MAX_AGG_TIMER 0x4UL
+ #define VNIC_TPA_CFG_REQ_ENABLES_MIN_AGG_LEN 0x8UL
+ #define VNIC_TPA_CFG_REQ_ENABLES_TNL_TPA_EN 0x10UL
+ __le16 vnic_id;
+ __le16 max_agg_segs;
+ #define VNIC_TPA_CFG_REQ_MAX_AGG_SEGS_1 0x0UL
+ #define VNIC_TPA_CFG_REQ_MAX_AGG_SEGS_2 0x1UL
+ #define VNIC_TPA_CFG_REQ_MAX_AGG_SEGS_4 0x2UL
+ #define VNIC_TPA_CFG_REQ_MAX_AGG_SEGS_8 0x3UL
+ #define VNIC_TPA_CFG_REQ_MAX_AGG_SEGS_MAX 0x1fUL
+ #define VNIC_TPA_CFG_REQ_MAX_AGG_SEGS_LAST VNIC_TPA_CFG_REQ_MAX_AGG_SEGS_MAX
+ __le16 max_aggs;
+ #define VNIC_TPA_CFG_REQ_MAX_AGGS_1 0x0UL
+ #define VNIC_TPA_CFG_REQ_MAX_AGGS_2 0x1UL
+ #define VNIC_TPA_CFG_REQ_MAX_AGGS_4 0x2UL
+ #define VNIC_TPA_CFG_REQ_MAX_AGGS_8 0x3UL
+ #define VNIC_TPA_CFG_REQ_MAX_AGGS_16 0x4UL
+ #define VNIC_TPA_CFG_REQ_MAX_AGGS_MAX 0x7UL
+ #define VNIC_TPA_CFG_REQ_MAX_AGGS_LAST VNIC_TPA_CFG_REQ_MAX_AGGS_MAX
+ u8 unused_0[2];
+ __le32 max_agg_timer;
+ __le32 min_agg_len;
+ __le32 tnl_tpa_en_bitmap;
+ #define VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_VXLAN 0x1UL
+ #define VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_GENEVE 0x2UL
+ #define VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_NVGRE 0x4UL
+ #define VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_GRE 0x8UL
+ #define VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_IPV4 0x10UL
+ #define VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_IPV6 0x20UL
+ #define VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_VXLAN_GPE 0x40UL
+ #define VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_VXLAN_CUST1 0x80UL
+ #define VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_GRE_CUST1 0x100UL
+ #define VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_UPAR1 0x200UL
+ #define VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_UPAR2 0x400UL
+ #define VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_UPAR3 0x800UL
+ #define VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_UPAR4 0x1000UL
+ #define VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_UPAR5 0x2000UL
+ #define VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_UPAR6 0x4000UL
+ #define VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_UPAR7 0x8000UL
+ #define VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_UPAR8 0x10000UL
+ u8 unused_1[4];
+};
+
+/* hwrm_vnic_tpa_cfg_output (size:128b/16B) */
+struct hwrm_vnic_tpa_cfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_vnic_tpa_qcfg_input (size:192b/24B) */
+struct hwrm_vnic_tpa_qcfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 vnic_id;
+ u8 unused_0[6];
+};
+
+/* hwrm_vnic_tpa_qcfg_output (size:256b/32B) */
+struct hwrm_vnic_tpa_qcfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 flags;
+ #define VNIC_TPA_QCFG_RESP_FLAGS_TPA 0x1UL
+ #define VNIC_TPA_QCFG_RESP_FLAGS_ENCAP_TPA 0x2UL
+ #define VNIC_TPA_QCFG_RESP_FLAGS_RSC_WND_UPDATE 0x4UL
+ #define VNIC_TPA_QCFG_RESP_FLAGS_GRO 0x8UL
+ #define VNIC_TPA_QCFG_RESP_FLAGS_AGG_WITH_ECN 0x10UL
+ #define VNIC_TPA_QCFG_RESP_FLAGS_AGG_WITH_SAME_GRE_SEQ 0x20UL
+ #define VNIC_TPA_QCFG_RESP_FLAGS_GRO_IPID_CHECK 0x40UL
+ #define VNIC_TPA_QCFG_RESP_FLAGS_GRO_TTL_CHECK 0x80UL
+ __le16 max_agg_segs;
+ #define VNIC_TPA_QCFG_RESP_MAX_AGG_SEGS_1 0x0UL
+ #define VNIC_TPA_QCFG_RESP_MAX_AGG_SEGS_2 0x1UL
+ #define VNIC_TPA_QCFG_RESP_MAX_AGG_SEGS_4 0x2UL
+ #define VNIC_TPA_QCFG_RESP_MAX_AGG_SEGS_8 0x3UL
+ #define VNIC_TPA_QCFG_RESP_MAX_AGG_SEGS_MAX 0x1fUL
+ #define VNIC_TPA_QCFG_RESP_MAX_AGG_SEGS_LAST VNIC_TPA_QCFG_RESP_MAX_AGG_SEGS_MAX
+ __le16 max_aggs;
+ #define VNIC_TPA_QCFG_RESP_MAX_AGGS_1 0x0UL
+ #define VNIC_TPA_QCFG_RESP_MAX_AGGS_2 0x1UL
+ #define VNIC_TPA_QCFG_RESP_MAX_AGGS_4 0x2UL
+ #define VNIC_TPA_QCFG_RESP_MAX_AGGS_8 0x3UL
+ #define VNIC_TPA_QCFG_RESP_MAX_AGGS_16 0x4UL
+ #define VNIC_TPA_QCFG_RESP_MAX_AGGS_MAX 0x7UL
+ #define VNIC_TPA_QCFG_RESP_MAX_AGGS_LAST VNIC_TPA_QCFG_RESP_MAX_AGGS_MAX
+ __le32 max_agg_timer;
+ __le32 min_agg_len;
+ __le32 tnl_tpa_en_bitmap;
+ #define VNIC_TPA_QCFG_RESP_TNL_TPA_EN_BITMAP_VXLAN 0x1UL
+ #define VNIC_TPA_QCFG_RESP_TNL_TPA_EN_BITMAP_GENEVE 0x2UL
+ #define VNIC_TPA_QCFG_RESP_TNL_TPA_EN_BITMAP_NVGRE 0x4UL
+ #define VNIC_TPA_QCFG_RESP_TNL_TPA_EN_BITMAP_GRE 0x8UL
+ #define VNIC_TPA_QCFG_RESP_TNL_TPA_EN_BITMAP_IPV4 0x10UL
+ #define VNIC_TPA_QCFG_RESP_TNL_TPA_EN_BITMAP_IPV6 0x20UL
+ #define VNIC_TPA_QCFG_RESP_TNL_TPA_EN_BITMAP_VXLAN_GPE 0x40UL
+ #define VNIC_TPA_QCFG_RESP_TNL_TPA_EN_BITMAP_VXLAN_CUST1 0x80UL
+ #define VNIC_TPA_QCFG_RESP_TNL_TPA_EN_BITMAP_GRE_CUST1 0x100UL
+ #define VNIC_TPA_QCFG_RESP_TNL_TPA_EN_BITMAP_UPAR1 0x200UL
+ #define VNIC_TPA_QCFG_RESP_TNL_TPA_EN_BITMAP_UPAR2 0x400UL
+ #define VNIC_TPA_QCFG_RESP_TNL_TPA_EN_BITMAP_UPAR3 0x800UL
+ #define VNIC_TPA_QCFG_RESP_TNL_TPA_EN_BITMAP_UPAR4 0x1000UL
+ #define VNIC_TPA_QCFG_RESP_TNL_TPA_EN_BITMAP_UPAR5 0x2000UL
+ #define VNIC_TPA_QCFG_RESP_TNL_TPA_EN_BITMAP_UPAR6 0x4000UL
+ #define VNIC_TPA_QCFG_RESP_TNL_TPA_EN_BITMAP_UPAR7 0x8000UL
+ #define VNIC_TPA_QCFG_RESP_TNL_TPA_EN_BITMAP_UPAR8 0x10000UL
+ u8 unused_0[3];
+ u8 valid;
+};
+
+/* hwrm_vnic_rss_cfg_input (size:384b/48B) */
+struct hwrm_vnic_rss_cfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 hash_type;
+ #define VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4 0x1UL
+ #define VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4 0x2UL
+ #define VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4 0x4UL
+ #define VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6 0x8UL
+ #define VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6 0x10UL
+ #define VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6 0x20UL
+ #define VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6_FLOW_LABEL 0x40UL
+ #define VNIC_RSS_CFG_REQ_HASH_TYPE_AH_SPI_IPV4 0x80UL
+ #define VNIC_RSS_CFG_REQ_HASH_TYPE_ESP_SPI_IPV4 0x100UL
+ #define VNIC_RSS_CFG_REQ_HASH_TYPE_AH_SPI_IPV6 0x200UL
+ #define VNIC_RSS_CFG_REQ_HASH_TYPE_ESP_SPI_IPV6 0x400UL
+ __le16 vnic_id;
+ u8 ring_table_pair_index;
+ u8 hash_mode_flags;
+ #define VNIC_RSS_CFG_REQ_HASH_MODE_FLAGS_DEFAULT 0x1UL
+ #define VNIC_RSS_CFG_REQ_HASH_MODE_FLAGS_INNERMOST_4 0x2UL
+ #define VNIC_RSS_CFG_REQ_HASH_MODE_FLAGS_INNERMOST_2 0x4UL
+ #define VNIC_RSS_CFG_REQ_HASH_MODE_FLAGS_OUTERMOST_4 0x8UL
+ #define VNIC_RSS_CFG_REQ_HASH_MODE_FLAGS_OUTERMOST_2 0x10UL
+ __le64 ring_grp_tbl_addr;
+ __le64 hash_key_tbl_addr;
+ __le16 rss_ctx_idx;
+ u8 flags;
+ #define VNIC_RSS_CFG_REQ_FLAGS_HASH_TYPE_INCLUDE 0x1UL
+ #define VNIC_RSS_CFG_REQ_FLAGS_HASH_TYPE_EXCLUDE 0x2UL
+ #define VNIC_RSS_CFG_REQ_FLAGS_IPSEC_HASH_TYPE_CFG_SUPPORT 0x4UL
+ u8 ring_select_mode;
+ #define VNIC_RSS_CFG_REQ_RING_SELECT_MODE_TOEPLITZ 0x0UL
+ #define VNIC_RSS_CFG_REQ_RING_SELECT_MODE_XOR 0x1UL
+ #define VNIC_RSS_CFG_REQ_RING_SELECT_MODE_TOEPLITZ_CHECKSUM 0x2UL
+ #define VNIC_RSS_CFG_REQ_RING_SELECT_MODE_LAST VNIC_RSS_CFG_REQ_RING_SELECT_MODE_TOEPLITZ_CHECKSUM
+ u8 unused_1[4];
+};
+
+/* hwrm_vnic_rss_cfg_output (size:128b/16B) */
+struct hwrm_vnic_rss_cfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_vnic_rss_cfg_cmd_err (size:64b/8B) */
+struct hwrm_vnic_rss_cfg_cmd_err {
+ u8 code;
+ #define VNIC_RSS_CFG_CMD_ERR_CODE_UNKNOWN 0x0UL
+ #define VNIC_RSS_CFG_CMD_ERR_CODE_INTERFACE_NOT_READY 0x1UL
+ #define VNIC_RSS_CFG_CMD_ERR_CODE_UNABLE_TO_GET_RSS_CFG 0x2UL
+ #define VNIC_RSS_CFG_CMD_ERR_CODE_HASH_TYPE_UNSUPPORTED 0x3UL
+ #define VNIC_RSS_CFG_CMD_ERR_CODE_HASH_TYPE_ERR 0x4UL
+ #define VNIC_RSS_CFG_CMD_ERR_CODE_HASH_MODE_FAIL 0x5UL
+ #define VNIC_RSS_CFG_CMD_ERR_CODE_RING_GRP_TABLE_ALLOC_ERR 0x6UL
+ #define VNIC_RSS_CFG_CMD_ERR_CODE_HASH_KEY_ALLOC_ERR 0x7UL
+ #define VNIC_RSS_CFG_CMD_ERR_CODE_DMA_FAILED 0x8UL
+ #define VNIC_RSS_CFG_CMD_ERR_CODE_RX_RING_ALLOC_ERR 0x9UL
+ #define VNIC_RSS_CFG_CMD_ERR_CODE_CMPL_RING_ALLOC_ERR 0xaUL
+ #define VNIC_RSS_CFG_CMD_ERR_CODE_HW_SET_RSS_FAILED 0xbUL
+ #define VNIC_RSS_CFG_CMD_ERR_CODE_CTX_INVALID 0xcUL
+ #define VNIC_RSS_CFG_CMD_ERR_CODE_VNIC_INVALID 0xdUL
+ #define VNIC_RSS_CFG_CMD_ERR_CODE_VNIC_RING_TABLE_PAIR_INVALID 0xeUL
+ #define VNIC_RSS_CFG_CMD_ERR_CODE_LAST VNIC_RSS_CFG_CMD_ERR_CODE_VNIC_RING_TABLE_PAIR_INVALID
+ u8 unused_0[7];
+};
+
+/* hwrm_vnic_rss_qcfg_input (size:192b/24B) */
+struct hwrm_vnic_rss_qcfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 rss_ctx_idx;
+ __le16 vnic_id;
+ u8 unused_0[4];
+};
+
+/* hwrm_vnic_rss_qcfg_output (size:512b/64B) */
+struct hwrm_vnic_rss_qcfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 hash_type;
+ #define VNIC_RSS_QCFG_RESP_HASH_TYPE_IPV4 0x1UL
+ #define VNIC_RSS_QCFG_RESP_HASH_TYPE_TCP_IPV4 0x2UL
+ #define VNIC_RSS_QCFG_RESP_HASH_TYPE_UDP_IPV4 0x4UL
+ #define VNIC_RSS_QCFG_RESP_HASH_TYPE_IPV6 0x8UL
+ #define VNIC_RSS_QCFG_RESP_HASH_TYPE_TCP_IPV6 0x10UL
+ #define VNIC_RSS_QCFG_RESP_HASH_TYPE_UDP_IPV6 0x20UL
+ #define VNIC_RSS_QCFG_RESP_HASH_TYPE_IPV6_FLOW_LABEL 0x40UL
+ #define VNIC_RSS_QCFG_RESP_HASH_TYPE_AH_SPI_IPV4 0x80UL
+ #define VNIC_RSS_QCFG_RESP_HASH_TYPE_ESP_SPI_IPV4 0x100UL
+ #define VNIC_RSS_QCFG_RESP_HASH_TYPE_AH_SPI_IPV6 0x200UL
+ #define VNIC_RSS_QCFG_RESP_HASH_TYPE_ESP_SPI_IPV6 0x400UL
+ u8 unused_0[4];
+ __le32 hash_key[10];
+ u8 hash_mode_flags;
+ #define VNIC_RSS_QCFG_RESP_HASH_MODE_FLAGS_DEFAULT 0x1UL
+ #define VNIC_RSS_QCFG_RESP_HASH_MODE_FLAGS_INNERMOST_4 0x2UL
+ #define VNIC_RSS_QCFG_RESP_HASH_MODE_FLAGS_INNERMOST_2 0x4UL
+ #define VNIC_RSS_QCFG_RESP_HASH_MODE_FLAGS_OUTERMOST_4 0x8UL
+ #define VNIC_RSS_QCFG_RESP_HASH_MODE_FLAGS_OUTERMOST_2 0x10UL
+ u8 ring_select_mode;
+ #define VNIC_RSS_QCFG_RESP_RING_SELECT_MODE_TOEPLITZ 0x0UL
+ #define VNIC_RSS_QCFG_RESP_RING_SELECT_MODE_XOR 0x1UL
+ #define VNIC_RSS_QCFG_RESP_RING_SELECT_MODE_TOEPLITZ_CHECKSUM 0x2UL
+ #define VNIC_RSS_QCFG_RESP_RING_SELECT_MODE_LAST VNIC_RSS_QCFG_RESP_RING_SELECT_MODE_TOEPLITZ_CHECKSUM
+ u8 unused_1[5];
+ u8 valid;
+};
+
+/* hwrm_vnic_plcmodes_cfg_input (size:320b/40B) */
+struct hwrm_vnic_plcmodes_cfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define VNIC_PLCMODES_CFG_REQ_FLAGS_REGULAR_PLACEMENT 0x1UL
+ #define VNIC_PLCMODES_CFG_REQ_FLAGS_JUMBO_PLACEMENT 0x2UL
+ #define VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV4 0x4UL
+ #define VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV6 0x8UL
+ #define VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_FCOE 0x10UL
+ #define VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_ROCE 0x20UL
+ #define VNIC_PLCMODES_CFG_REQ_FLAGS_VIRTIO_PLACEMENT 0x40UL
+ __le32 enables;
+ #define VNIC_PLCMODES_CFG_REQ_ENABLES_JUMBO_THRESH_VALID 0x1UL
+ #define VNIC_PLCMODES_CFG_REQ_ENABLES_HDS_OFFSET_VALID 0x2UL
+ #define VNIC_PLCMODES_CFG_REQ_ENABLES_HDS_THRESHOLD_VALID 0x4UL
+ #define VNIC_PLCMODES_CFG_REQ_ENABLES_MAX_BDS_VALID 0x8UL
+ __le32 vnic_id;
+ __le16 jumbo_thresh;
+ __le16 hds_offset;
+ __le16 hds_threshold;
+ __le16 max_bds;
+ u8 unused_0[4];
+};
+
+/* hwrm_vnic_plcmodes_cfg_output (size:128b/16B) */
+struct hwrm_vnic_plcmodes_cfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_vnic_plcmodes_cfg_cmd_err (size:64b/8B) */
+struct hwrm_vnic_plcmodes_cfg_cmd_err {
+ u8 code;
+ #define VNIC_PLCMODES_CFG_CMD_ERR_CODE_UNKNOWN 0x0UL
+ #define VNIC_PLCMODES_CFG_CMD_ERR_CODE_INVALID_HDS_THRESHOLD 0x1UL
+ #define VNIC_PLCMODES_CFG_CMD_ERR_CODE_LAST VNIC_PLCMODES_CFG_CMD_ERR_CODE_INVALID_HDS_THRESHOLD
+ u8 unused_0[7];
+};
+
+/* hwrm_vnic_rss_cos_lb_ctx_alloc_input (size:128b/16B) */
+struct hwrm_vnic_rss_cos_lb_ctx_alloc_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+};
+
+/* hwrm_vnic_rss_cos_lb_ctx_alloc_output (size:128b/16B) */
+struct hwrm_vnic_rss_cos_lb_ctx_alloc_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 rss_cos_lb_ctx_id;
+ u8 unused_0[5];
+ u8 valid;
+};
+
+/* hwrm_vnic_rss_cos_lb_ctx_free_input (size:192b/24B) */
+struct hwrm_vnic_rss_cos_lb_ctx_free_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 rss_cos_lb_ctx_id;
+ u8 unused_0[6];
+};
+
+/* hwrm_vnic_rss_cos_lb_ctx_free_output (size:128b/16B) */
+struct hwrm_vnic_rss_cos_lb_ctx_free_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_ring_alloc_input (size:768b/96B) */
+struct hwrm_ring_alloc_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 enables;
+ #define RING_ALLOC_REQ_ENABLES_RING_ARB_CFG 0x2UL
+ #define RING_ALLOC_REQ_ENABLES_STAT_CTX_ID_VALID 0x8UL
+ #define RING_ALLOC_REQ_ENABLES_MAX_BW_VALID 0x20UL
+ #define RING_ALLOC_REQ_ENABLES_RX_RING_ID_VALID 0x40UL
+ #define RING_ALLOC_REQ_ENABLES_NQ_RING_ID_VALID 0x80UL
+ #define RING_ALLOC_REQ_ENABLES_RX_BUF_SIZE_VALID 0x100UL
+ #define RING_ALLOC_REQ_ENABLES_SCHQ_ID 0x200UL
+ #define RING_ALLOC_REQ_ENABLES_MPC_CHNLS_TYPE 0x400UL
+ #define RING_ALLOC_REQ_ENABLES_STEERING_TAG_VALID 0x800UL
+ #define RING_ALLOC_REQ_ENABLES_RX_RATE_PROFILE_VALID 0x1000UL
+ #define RING_ALLOC_REQ_ENABLES_DPI_VALID 0x2000UL
+ u8 ring_type;
+ #define RING_ALLOC_REQ_RING_TYPE_L2_CMPL 0x0UL
+ #define RING_ALLOC_REQ_RING_TYPE_TX 0x1UL
+ #define RING_ALLOC_REQ_RING_TYPE_RX 0x2UL
+ #define RING_ALLOC_REQ_RING_TYPE_ROCE_CMPL 0x3UL
+ #define RING_ALLOC_REQ_RING_TYPE_RX_AGG 0x4UL
+ #define RING_ALLOC_REQ_RING_TYPE_NQ 0x5UL
+ #define RING_ALLOC_REQ_RING_TYPE_LAST RING_ALLOC_REQ_RING_TYPE_NQ
+ u8 cmpl_coal_cnt;
+ #define RING_ALLOC_REQ_CMPL_COAL_CNT_COAL_OFF 0x0UL
+ #define RING_ALLOC_REQ_CMPL_COAL_CNT_COAL_4 0x1UL
+ #define RING_ALLOC_REQ_CMPL_COAL_CNT_COAL_8 0x2UL
+ #define RING_ALLOC_REQ_CMPL_COAL_CNT_COAL_12 0x3UL
+ #define RING_ALLOC_REQ_CMPL_COAL_CNT_COAL_16 0x4UL
+ #define RING_ALLOC_REQ_CMPL_COAL_CNT_COAL_24 0x5UL
+ #define RING_ALLOC_REQ_CMPL_COAL_CNT_COAL_32 0x6UL
+ #define RING_ALLOC_REQ_CMPL_COAL_CNT_COAL_48 0x7UL
+ #define RING_ALLOC_REQ_CMPL_COAL_CNT_COAL_64 0x8UL
+ #define RING_ALLOC_REQ_CMPL_COAL_CNT_COAL_96 0x9UL
+ #define RING_ALLOC_REQ_CMPL_COAL_CNT_COAL_128 0xaUL
+ #define RING_ALLOC_REQ_CMPL_COAL_CNT_COAL_192 0xbUL
+ #define RING_ALLOC_REQ_CMPL_COAL_CNT_COAL_256 0xcUL
+ #define RING_ALLOC_REQ_CMPL_COAL_CNT_COAL_320 0xdUL
+ #define RING_ALLOC_REQ_CMPL_COAL_CNT_COAL_384 0xeUL
+ #define RING_ALLOC_REQ_CMPL_COAL_CNT_COAL_MAX 0xfUL
+ #define RING_ALLOC_REQ_CMPL_COAL_CNT_LAST RING_ALLOC_REQ_CMPL_COAL_CNT_COAL_MAX
+ __le16 flags;
+ #define RING_ALLOC_REQ_FLAGS_RX_SOP_PAD 0x1UL
+ #define RING_ALLOC_REQ_FLAGS_DISABLE_CQ_OVERFLOW_DETECTION 0x2UL
+ #define RING_ALLOC_REQ_FLAGS_NQ_DBR_PACING 0x4UL
+ #define RING_ALLOC_REQ_FLAGS_TX_PKT_TS_CMPL_ENABLE 0x8UL
+ __le64 page_tbl_addr;
+ __le32 fbo;
+ u8 page_size;
+ u8 page_tbl_depth;
+ __le16 schq_id;
+ __le32 length;
+ __le16 logical_id;
+ __le16 cmpl_ring_id;
+ __le16 queue_id;
+ __le16 rx_buf_size;
+ __le16 rx_ring_id;
+ __le16 nq_ring_id;
+ __le16 ring_arb_cfg;
+ #define RING_ALLOC_REQ_RING_ARB_CFG_ARB_POLICY_MASK 0xfUL
+ #define RING_ALLOC_REQ_RING_ARB_CFG_ARB_POLICY_SFT 0
+ #define RING_ALLOC_REQ_RING_ARB_CFG_ARB_POLICY_SP 0x1UL
+ #define RING_ALLOC_REQ_RING_ARB_CFG_ARB_POLICY_WFQ 0x2UL
+ #define RING_ALLOC_REQ_RING_ARB_CFG_ARB_POLICY_LAST RING_ALLOC_REQ_RING_ARB_CFG_ARB_POLICY_WFQ
+ #define RING_ALLOC_REQ_RING_ARB_CFG_RSVD_MASK 0xf0UL
+ #define RING_ALLOC_REQ_RING_ARB_CFG_RSVD_SFT 4
+ #define RING_ALLOC_REQ_RING_ARB_CFG_ARB_POLICY_PARAM_MASK 0xff00UL
+ #define RING_ALLOC_REQ_RING_ARB_CFG_ARB_POLICY_PARAM_SFT 8
+ __le16 steering_tag;
+ __le32 reserved3;
+ __le32 stat_ctx_id;
+ __le32 reserved4;
+ __le32 max_bw;
+ #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_MASK 0xfffffffUL
+ #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_SFT 0
+ #define RING_ALLOC_REQ_MAX_BW_SCALE 0x10000000UL
+ #define RING_ALLOC_REQ_MAX_BW_SCALE_BITS (0x0UL << 28)
+ #define RING_ALLOC_REQ_MAX_BW_SCALE_BYTES (0x1UL << 28)
+ #define RING_ALLOC_REQ_MAX_BW_SCALE_LAST RING_ALLOC_REQ_MAX_BW_SCALE_BYTES
+ #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_SFT 29
+ #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+ #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_LAST RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_INVALID
+ u8 int_mode;
+ #define RING_ALLOC_REQ_INT_MODE_LEGACY 0x0UL
+ #define RING_ALLOC_REQ_INT_MODE_RSVD 0x1UL
+ #define RING_ALLOC_REQ_INT_MODE_MSIX 0x2UL
+ #define RING_ALLOC_REQ_INT_MODE_POLL 0x3UL
+ #define RING_ALLOC_REQ_INT_MODE_LAST RING_ALLOC_REQ_INT_MODE_POLL
+ u8 mpc_chnls_type;
+ #define RING_ALLOC_REQ_MPC_CHNLS_TYPE_TCE 0x0UL
+ #define RING_ALLOC_REQ_MPC_CHNLS_TYPE_RCE 0x1UL
+ #define RING_ALLOC_REQ_MPC_CHNLS_TYPE_TE_CFA 0x2UL
+ #define RING_ALLOC_REQ_MPC_CHNLS_TYPE_RE_CFA 0x3UL
+ #define RING_ALLOC_REQ_MPC_CHNLS_TYPE_PRIMATE 0x4UL
+ #define RING_ALLOC_REQ_MPC_CHNLS_TYPE_LAST RING_ALLOC_REQ_MPC_CHNLS_TYPE_PRIMATE
+ u8 rx_rate_profile_sel;
+ #define RING_ALLOC_REQ_RX_RATE_PROFILE_SEL_DEFAULT 0x0UL
+ #define RING_ALLOC_REQ_RX_RATE_PROFILE_SEL_POLL_MODE 0x1UL
+ #define RING_ALLOC_REQ_RX_RATE_PROFILE_SEL_LAST RING_ALLOC_REQ_RX_RATE_PROFILE_SEL_POLL_MODE
+ u8 unused_4;
+ __le64 cq_handle;
+ __le16 dpi;
+ __le16 unused_5[3];
+};
+
+/* hwrm_ring_alloc_output (size:128b/16B) */
+struct hwrm_ring_alloc_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 ring_id;
+ __le16 logical_ring_id;
+ u8 push_buffer_index;
+ #define RING_ALLOC_RESP_PUSH_BUFFER_INDEX_PING_BUFFER 0x0UL
+ #define RING_ALLOC_RESP_PUSH_BUFFER_INDEX_PONG_BUFFER 0x1UL
+ #define RING_ALLOC_RESP_PUSH_BUFFER_INDEX_LAST RING_ALLOC_RESP_PUSH_BUFFER_INDEX_PONG_BUFFER
+ u8 unused_0[2];
+ u8 valid;
+};
+
+/* hwrm_ring_free_input (size:256b/32B) */
+struct hwrm_ring_free_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ u8 ring_type;
+ #define RING_FREE_REQ_RING_TYPE_L2_CMPL 0x0UL
+ #define RING_FREE_REQ_RING_TYPE_TX 0x1UL
+ #define RING_FREE_REQ_RING_TYPE_RX 0x2UL
+ #define RING_FREE_REQ_RING_TYPE_ROCE_CMPL 0x3UL
+ #define RING_FREE_REQ_RING_TYPE_RX_AGG 0x4UL
+ #define RING_FREE_REQ_RING_TYPE_NQ 0x5UL
+ #define RING_FREE_REQ_RING_TYPE_LAST RING_FREE_REQ_RING_TYPE_NQ
+ u8 flags;
+ #define RING_FREE_REQ_FLAGS_VIRTIO_RING_VALID 0x1UL
+ #define RING_FREE_REQ_FLAGS_LAST RING_FREE_REQ_FLAGS_VIRTIO_RING_VALID
+ __le16 ring_id;
+ __le32 prod_idx;
+ __le32 opaque;
+ __le32 unused_1;
+};
+
+/* hwrm_ring_free_output (size:128b/16B) */
+struct hwrm_ring_free_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_ring_reset_input (size:192b/24B) */
+struct hwrm_ring_reset_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ u8 ring_type;
+ #define RING_RESET_REQ_RING_TYPE_L2_CMPL 0x0UL
+ #define RING_RESET_REQ_RING_TYPE_TX 0x1UL
+ #define RING_RESET_REQ_RING_TYPE_RX 0x2UL
+ #define RING_RESET_REQ_RING_TYPE_ROCE_CMPL 0x3UL
+ #define RING_RESET_REQ_RING_TYPE_RX_RING_GRP 0x6UL
+ #define RING_RESET_REQ_RING_TYPE_LAST RING_RESET_REQ_RING_TYPE_RX_RING_GRP
+ u8 unused_0;
+ __le16 ring_id;
+ u8 unused_1[4];
+};
+
+/* hwrm_ring_reset_output (size:128b/16B) */
+struct hwrm_ring_reset_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 push_buffer_index;
+ #define RING_RESET_RESP_PUSH_BUFFER_INDEX_PING_BUFFER 0x0UL
+ #define RING_RESET_RESP_PUSH_BUFFER_INDEX_PONG_BUFFER 0x1UL
+ #define RING_RESET_RESP_PUSH_BUFFER_INDEX_LAST RING_RESET_RESP_PUSH_BUFFER_INDEX_PONG_BUFFER
+ u8 unused_0[3];
+ u8 consumer_idx[3];
+ u8 valid;
+};
+
+/* hwrm_ring_aggint_qcaps_input (size:128b/16B) */
+struct hwrm_ring_aggint_qcaps_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+};
+
+/* hwrm_ring_aggint_qcaps_output (size:384b/48B) */
+struct hwrm_ring_aggint_qcaps_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 cmpl_params;
+ #define RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_INT_LAT_TMR_MIN 0x1UL
+ #define RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_INT_LAT_TMR_MAX 0x2UL
+ #define RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_TIMER_RESET 0x4UL
+ #define RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_RING_IDLE 0x8UL
+ #define RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_NUM_CMPL_DMA_AGGR 0x10UL
+ #define RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_NUM_CMPL_DMA_AGGR_DURING_INT 0x20UL
+ #define RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_CMPL_AGGR_DMA_TMR 0x40UL
+ #define RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_CMPL_AGGR_DMA_TMR_DURING_INT 0x80UL
+ #define RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_NUM_CMPL_AGGR_INT 0x100UL
+ __le32 nq_params;
+ #define RING_AGGINT_QCAPS_RESP_NQ_PARAMS_INT_LAT_TMR_MIN 0x1UL
+ __le16 num_cmpl_dma_aggr_min;
+ __le16 num_cmpl_dma_aggr_max;
+ __le16 num_cmpl_dma_aggr_during_int_min;
+ __le16 num_cmpl_dma_aggr_during_int_max;
+ __le16 cmpl_aggr_dma_tmr_min;
+ __le16 cmpl_aggr_dma_tmr_max;
+ __le16 cmpl_aggr_dma_tmr_during_int_min;
+ __le16 cmpl_aggr_dma_tmr_during_int_max;
+ __le16 int_lat_tmr_min_min;
+ __le16 int_lat_tmr_min_max;
+ __le16 int_lat_tmr_max_min;
+ __le16 int_lat_tmr_max_max;
+ __le16 num_cmpl_aggr_int_min;
+ __le16 num_cmpl_aggr_int_max;
+ __le16 timer_units;
+ u8 unused_0[1];
+ u8 valid;
+};
+
+/* hwrm_ring_cmpl_ring_qaggint_params_input (size:192b/24B) */
+struct hwrm_ring_cmpl_ring_qaggint_params_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 ring_id;
+ __le16 flags;
+ #define RING_CMPL_RING_QAGGINT_PARAMS_REQ_FLAGS_UNUSED_0_MASK 0x3UL
+ #define RING_CMPL_RING_QAGGINT_PARAMS_REQ_FLAGS_UNUSED_0_SFT 0
+ #define RING_CMPL_RING_QAGGINT_PARAMS_REQ_FLAGS_IS_NQ 0x4UL
+ u8 unused_0[4];
+};
+
+/* hwrm_ring_cmpl_ring_qaggint_params_output (size:256b/32B) */
+struct hwrm_ring_cmpl_ring_qaggint_params_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 flags;
+ #define RING_CMPL_RING_QAGGINT_PARAMS_RESP_FLAGS_TIMER_RESET 0x1UL
+ #define RING_CMPL_RING_QAGGINT_PARAMS_RESP_FLAGS_RING_IDLE 0x2UL
+ __le16 num_cmpl_dma_aggr;
+ __le16 num_cmpl_dma_aggr_during_int;
+ __le16 cmpl_aggr_dma_tmr;
+ __le16 cmpl_aggr_dma_tmr_during_int;
+ __le16 int_lat_tmr_min;
+ __le16 int_lat_tmr_max;
+ __le16 num_cmpl_aggr_int;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_ring_cmpl_ring_cfg_aggint_params_input (size:320b/40B) */
+struct hwrm_ring_cmpl_ring_cfg_aggint_params_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 ring_id;
+ __le16 flags;
+ #define RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET 0x1UL
+ #define RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_RING_IDLE 0x2UL
+ #define RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_IS_NQ 0x4UL
+ __le16 num_cmpl_dma_aggr;
+ __le16 num_cmpl_dma_aggr_during_int;
+ __le16 cmpl_aggr_dma_tmr;
+ __le16 cmpl_aggr_dma_tmr_during_int;
+ __le16 int_lat_tmr_min;
+ __le16 int_lat_tmr_max;
+ __le16 num_cmpl_aggr_int;
+ __le16 enables;
+ #define RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_ENABLES_NUM_CMPL_DMA_AGGR 0x1UL
+ #define RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_ENABLES_NUM_CMPL_DMA_AGGR_DURING_INT 0x2UL
+ #define RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_ENABLES_CMPL_AGGR_DMA_TMR 0x4UL
+ #define RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_ENABLES_INT_LAT_TMR_MIN 0x8UL
+ #define RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_ENABLES_INT_LAT_TMR_MAX 0x10UL
+ #define RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_ENABLES_NUM_CMPL_AGGR_INT 0x20UL
+ u8 unused_0[4];
+};
+
+/* hwrm_ring_cmpl_ring_cfg_aggint_params_output (size:128b/16B) */
+struct hwrm_ring_cmpl_ring_cfg_aggint_params_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_ring_grp_alloc_input (size:192b/24B) */
+struct hwrm_ring_grp_alloc_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 cr;
+ __le16 rr;
+ __le16 ar;
+ __le16 sc;
+};
+
+/* hwrm_ring_grp_alloc_output (size:128b/16B) */
+struct hwrm_ring_grp_alloc_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 ring_group_id;
+ u8 unused_0[3];
+ u8 valid;
+};
+
+/* hwrm_ring_grp_free_input (size:192b/24B) */
+struct hwrm_ring_grp_free_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 ring_group_id;
+ u8 unused_0[4];
+};
+
+/* hwrm_ring_grp_free_output (size:128b/16B) */
+struct hwrm_ring_grp_free_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+#define DEFAULT_FLOW_ID 0xFFFFFFFFUL
+#define ROCEV1_FLOW_ID 0xFFFFFFFEUL
+#define ROCEV2_FLOW_ID 0xFFFFFFFDUL
+#define ROCEV2_CNP_FLOW_ID 0xFFFFFFFCUL
+
+/* hwrm_cfa_l2_filter_alloc_input (size:768b/96B) */
+struct hwrm_cfa_l2_filter_alloc_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH 0x1UL
+ #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_TX 0x0UL
+ #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_RX 0x1UL
+ #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_LAST CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_RX
+ #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_LOOPBACK 0x2UL
+ #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_DROP 0x4UL
+ #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_OUTERMOST 0x8UL
+ #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_TRAFFIC_MASK 0x30UL
+ #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_TRAFFIC_SFT 4
+ #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_TRAFFIC_NO_ROCE_L2 (0x0UL << 4)
+ #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_TRAFFIC_L2 (0x1UL << 4)
+ #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_TRAFFIC_ROCE (0x2UL << 4)
+ #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_TRAFFIC_LAST CFA_L2_FILTER_ALLOC_REQ_FLAGS_TRAFFIC_ROCE
+ #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_XDP_DISABLE 0x40UL
+ #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_SOURCE_VALID 0x80UL
+ __le32 enables;
+ #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR 0x1UL
+ #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR_MASK 0x2UL
+ #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_OVLAN 0x4UL
+ #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_OVLAN_MASK 0x8UL
+ #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_IVLAN 0x10UL
+ #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_IVLAN_MASK 0x20UL
+ #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_T_L2_ADDR 0x40UL
+ #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_T_L2_ADDR_MASK 0x80UL
+ #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_T_L2_OVLAN 0x100UL
+ #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_T_L2_OVLAN_MASK 0x200UL
+ #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_T_L2_IVLAN 0x400UL
+ #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_T_L2_IVLAN_MASK 0x800UL
+ #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_SRC_TYPE 0x1000UL
+ #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_SRC_ID 0x2000UL
+ #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_TUNNEL_TYPE 0x4000UL
+ #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_DST_ID 0x8000UL
+ #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_MIRROR_VNIC_ID 0x10000UL
+ #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_NUM_VLANS 0x20000UL
+ #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_T_NUM_VLANS 0x40000UL
+ u8 l2_addr[6];
+ u8 num_vlans;
+ u8 t_num_vlans;
+ u8 l2_addr_mask[6];
+ __le16 l2_ovlan;
+ __le16 l2_ovlan_mask;
+ __le16 l2_ivlan;
+ __le16 l2_ivlan_mask;
+ u8 unused_1[2];
+ u8 t_l2_addr[6];
+ u8 unused_2[2];
+ u8 t_l2_addr_mask[6];
+ __le16 t_l2_ovlan;
+ __le16 t_l2_ovlan_mask;
+ __le16 t_l2_ivlan;
+ __le16 t_l2_ivlan_mask;
+ u8 src_type;
+ #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_NPORT 0x0UL
+ #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_PF 0x1UL
+ #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_VF 0x2UL
+ #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_VNIC 0x3UL
+ #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_KONG 0x4UL
+ #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_APE 0x5UL
+ #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_BONO 0x6UL
+ #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_TANG 0x7UL
+ #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_LAST CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_TANG
+ u8 unused_3;
+ __le32 src_id;
+ u8 tunnel_type;
+ #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_NONTUNNEL 0x0UL
+ #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN 0x1UL
+ #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_NVGRE 0x2UL
+ #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_L2GRE 0x3UL
+ #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPIP 0x4UL
+ #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_GENEVE 0x5UL
+ #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_MPLS 0x6UL
+ #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_STT 0x7UL
+ #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE 0x8UL
+ #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL
+ #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL
+ #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_L2_ETYPE 0xbUL
+ #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE_V6 0xcUL
+ #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE 0x10UL
+ #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL 0xffUL
+ #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_LAST CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL
+ u8 unused_4;
+ __le16 dst_id;
+ __le16 mirror_vnic_id;
+ u8 pri_hint;
+ #define CFA_L2_FILTER_ALLOC_REQ_PRI_HINT_NO_PREFER 0x0UL
+ #define CFA_L2_FILTER_ALLOC_REQ_PRI_HINT_ABOVE_FILTER 0x1UL
+ #define CFA_L2_FILTER_ALLOC_REQ_PRI_HINT_BELOW_FILTER 0x2UL
+ #define CFA_L2_FILTER_ALLOC_REQ_PRI_HINT_MAX 0x3UL
+ #define CFA_L2_FILTER_ALLOC_REQ_PRI_HINT_MIN 0x4UL
+ #define CFA_L2_FILTER_ALLOC_REQ_PRI_HINT_LAST CFA_L2_FILTER_ALLOC_REQ_PRI_HINT_MIN
+ u8 unused_5;
+ __le32 unused_6;
+ __le64 l2_filter_id_hint;
+};
+
+/* hwrm_cfa_l2_filter_alloc_output (size:192b/24B) */
+struct hwrm_cfa_l2_filter_alloc_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le64 l2_filter_id;
+ __le32 flow_id;
+ #define CFA_L2_FILTER_ALLOC_RESP_FLOW_ID_VALUE_MASK 0x3fffffffUL
+ #define CFA_L2_FILTER_ALLOC_RESP_FLOW_ID_VALUE_SFT 0
+ #define CFA_L2_FILTER_ALLOC_RESP_FLOW_ID_TYPE 0x40000000UL
+ #define CFA_L2_FILTER_ALLOC_RESP_FLOW_ID_TYPE_INT (0x0UL << 30)
+ #define CFA_L2_FILTER_ALLOC_RESP_FLOW_ID_TYPE_EXT (0x1UL << 30)
+ #define CFA_L2_FILTER_ALLOC_RESP_FLOW_ID_TYPE_LAST CFA_L2_FILTER_ALLOC_RESP_FLOW_ID_TYPE_EXT
+ #define CFA_L2_FILTER_ALLOC_RESP_FLOW_ID_DIR 0x80000000UL
+ #define CFA_L2_FILTER_ALLOC_RESP_FLOW_ID_DIR_RX (0x0UL << 31)
+ #define CFA_L2_FILTER_ALLOC_RESP_FLOW_ID_DIR_TX (0x1UL << 31)
+ #define CFA_L2_FILTER_ALLOC_RESP_FLOW_ID_DIR_LAST CFA_L2_FILTER_ALLOC_RESP_FLOW_ID_DIR_TX
+ u8 unused_0[3];
+ u8 valid;
+};
+
+/* hwrm_cfa_l2_filter_free_input (size:192b/24B) */
+struct hwrm_cfa_l2_filter_free_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le64 l2_filter_id;
+};
+
+/* hwrm_cfa_l2_filter_free_output (size:128b/16B) */
+struct hwrm_cfa_l2_filter_free_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_cfa_l2_filter_cfg_input (size:384b/48B) */
+struct hwrm_cfa_l2_filter_cfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define CFA_L2_FILTER_CFG_REQ_FLAGS_PATH 0x1UL
+ #define CFA_L2_FILTER_CFG_REQ_FLAGS_PATH_TX 0x0UL
+ #define CFA_L2_FILTER_CFG_REQ_FLAGS_PATH_RX 0x1UL
+ #define CFA_L2_FILTER_CFG_REQ_FLAGS_PATH_LAST CFA_L2_FILTER_CFG_REQ_FLAGS_PATH_RX
+ #define CFA_L2_FILTER_CFG_REQ_FLAGS_DROP 0x2UL
+ #define CFA_L2_FILTER_CFG_REQ_FLAGS_TRAFFIC_MASK 0xcUL
+ #define CFA_L2_FILTER_CFG_REQ_FLAGS_TRAFFIC_SFT 2
+ #define CFA_L2_FILTER_CFG_REQ_FLAGS_TRAFFIC_NO_ROCE_L2 (0x0UL << 2)
+ #define CFA_L2_FILTER_CFG_REQ_FLAGS_TRAFFIC_L2 (0x1UL << 2)
+ #define CFA_L2_FILTER_CFG_REQ_FLAGS_TRAFFIC_ROCE (0x2UL << 2)
+ #define CFA_L2_FILTER_CFG_REQ_FLAGS_TRAFFIC_LAST CFA_L2_FILTER_CFG_REQ_FLAGS_TRAFFIC_ROCE
+ #define CFA_L2_FILTER_CFG_REQ_FLAGS_REMAP_OP_MASK 0x30UL
+ #define CFA_L2_FILTER_CFG_REQ_FLAGS_REMAP_OP_SFT 4
+ #define CFA_L2_FILTER_CFG_REQ_FLAGS_REMAP_OP_NO_UPDATE (0x0UL << 4)
+ #define CFA_L2_FILTER_CFG_REQ_FLAGS_REMAP_OP_BYPASS_LKUP (0x1UL << 4)
+ #define CFA_L2_FILTER_CFG_REQ_FLAGS_REMAP_OP_ENABLE_LKUP (0x2UL << 4)
+ #define CFA_L2_FILTER_CFG_REQ_FLAGS_REMAP_OP_RESTORE_FW_OP (0x3UL << 4)
+ #define CFA_L2_FILTER_CFG_REQ_FLAGS_REMAP_OP_LAST CFA_L2_FILTER_CFG_REQ_FLAGS_REMAP_OP_RESTORE_FW_OP
+ __le32 enables;
+ #define CFA_L2_FILTER_CFG_REQ_ENABLES_DST_ID 0x1UL
+ #define CFA_L2_FILTER_CFG_REQ_ENABLES_NEW_MIRROR_VNIC_ID 0x2UL
+ #define CFA_L2_FILTER_CFG_REQ_ENABLES_PROF_FUNC 0x4UL
+ #define CFA_L2_FILTER_CFG_REQ_ENABLES_L2_CONTEXT_ID 0x8UL
+ __le64 l2_filter_id;
+ __le32 dst_id;
+ __le32 new_mirror_vnic_id;
+ __le32 prof_func;
+ __le32 l2_context_id;
+};
+
+/* hwrm_cfa_l2_filter_cfg_output (size:128b/16B) */
+struct hwrm_cfa_l2_filter_cfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_cfa_l2_set_rx_mask_input (size:448b/56B) */
+struct hwrm_cfa_l2_set_rx_mask_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 vnic_id;
+ __le32 mask;
+ #define CFA_L2_SET_RX_MASK_REQ_MASK_MCAST 0x2UL
+ #define CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST 0x4UL
+ #define CFA_L2_SET_RX_MASK_REQ_MASK_BCAST 0x8UL
+ #define CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS 0x10UL
+ #define CFA_L2_SET_RX_MASK_REQ_MASK_OUTERMOST 0x20UL
+ #define CFA_L2_SET_RX_MASK_REQ_MASK_VLANONLY 0x40UL
+ #define CFA_L2_SET_RX_MASK_REQ_MASK_VLAN_NONVLAN 0x80UL
+ #define CFA_L2_SET_RX_MASK_REQ_MASK_ANYVLAN_NONVLAN 0x100UL
+ __le64 mc_tbl_addr;
+ __le32 num_mc_entries;
+ u8 unused_0[4];
+ __le64 vlan_tag_tbl_addr;
+ __le32 num_vlan_tags;
+ u8 unused_1[4];
+};
+
+/* hwrm_cfa_l2_set_rx_mask_output (size:128b/16B) */
+struct hwrm_cfa_l2_set_rx_mask_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_cfa_l2_set_rx_mask_cmd_err (size:64b/8B) */
+struct hwrm_cfa_l2_set_rx_mask_cmd_err {
+ u8 code;
+ #define CFA_L2_SET_RX_MASK_CMD_ERR_CODE_UNKNOWN 0x0UL
+ #define CFA_L2_SET_RX_MASK_CMD_ERR_CODE_NTUPLE_FILTER_CONFLICT_ERR 0x1UL
+ #define CFA_L2_SET_RX_MASK_CMD_ERR_CODE_MAX_VLAN_TAGS 0x2UL
+ #define CFA_L2_SET_RX_MASK_CMD_ERR_CODE_INVALID_VNIC_ID 0x3UL
+ #define CFA_L2_SET_RX_MASK_CMD_ERR_CODE_INVALID_ACTION 0x4UL
+ #define CFA_L2_SET_RX_MASK_CMD_ERR_CODE_LAST CFA_L2_SET_RX_MASK_CMD_ERR_CODE_INVALID_ACTION
+ u8 unused_0[7];
+};
+
+/* hwrm_cfa_tunnel_filter_alloc_input (size:704b/88B) */
+struct hwrm_cfa_tunnel_filter_alloc_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_FLAGS_LOOPBACK 0x1UL
+ __le32 enables;
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_L2_FILTER_ID 0x1UL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_L2_ADDR 0x2UL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_L2_IVLAN 0x4UL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_L3_ADDR 0x8UL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_L3_ADDR_TYPE 0x10UL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_T_L3_ADDR_TYPE 0x20UL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_T_L3_ADDR 0x40UL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_TUNNEL_TYPE 0x80UL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_VNI 0x100UL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_DST_VNIC_ID 0x200UL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_MIRROR_VNIC_ID 0x400UL
+ __le64 l2_filter_id;
+ u8 l2_addr[6];
+ __le16 l2_ivlan;
+ __le32 l3_addr[4];
+ __le32 t_l3_addr[4];
+ u8 l3_addr_type;
+ u8 t_l3_addr_type;
+ u8 tunnel_type;
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_NONTUNNEL 0x0UL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN 0x1UL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_NVGRE 0x2UL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_L2GRE 0x3UL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPIP 0x4UL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_GENEVE 0x5UL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_MPLS 0x6UL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_STT 0x7UL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE 0x8UL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_L2_ETYPE 0xbUL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE_V6 0xcUL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE 0x10UL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL 0xffUL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_LAST CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL
+ u8 tunnel_flags;
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_FLAGS_TUN_FLAGS_OAM_CHECKSUM_EXPLHDR 0x1UL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_FLAGS_TUN_FLAGS_CRITICAL_OPT_S1 0x2UL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_FLAGS_TUN_FLAGS_EXTHDR_SEQNUM_S0 0x4UL
+ __le32 vni;
+ __le32 dst_vnic_id;
+ __le32 mirror_vnic_id;
+};
+
+/* hwrm_cfa_tunnel_filter_alloc_output (size:192b/24B) */
+struct hwrm_cfa_tunnel_filter_alloc_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le64 tunnel_filter_id;
+ __le32 flow_id;
+ #define CFA_TUNNEL_FILTER_ALLOC_RESP_FLOW_ID_VALUE_MASK 0x3fffffffUL
+ #define CFA_TUNNEL_FILTER_ALLOC_RESP_FLOW_ID_VALUE_SFT 0
+ #define CFA_TUNNEL_FILTER_ALLOC_RESP_FLOW_ID_TYPE 0x40000000UL
+ #define CFA_TUNNEL_FILTER_ALLOC_RESP_FLOW_ID_TYPE_INT (0x0UL << 30)
+ #define CFA_TUNNEL_FILTER_ALLOC_RESP_FLOW_ID_TYPE_EXT (0x1UL << 30)
+ #define CFA_TUNNEL_FILTER_ALLOC_RESP_FLOW_ID_TYPE_LAST CFA_TUNNEL_FILTER_ALLOC_RESP_FLOW_ID_TYPE_EXT
+ #define CFA_TUNNEL_FILTER_ALLOC_RESP_FLOW_ID_DIR 0x80000000UL
+ #define CFA_TUNNEL_FILTER_ALLOC_RESP_FLOW_ID_DIR_RX (0x0UL << 31)
+ #define CFA_TUNNEL_FILTER_ALLOC_RESP_FLOW_ID_DIR_TX (0x1UL << 31)
+ #define CFA_TUNNEL_FILTER_ALLOC_RESP_FLOW_ID_DIR_LAST CFA_TUNNEL_FILTER_ALLOC_RESP_FLOW_ID_DIR_TX
+ u8 unused_0[3];
+ u8 valid;
+};
+
+/* hwrm_cfa_tunnel_filter_free_input (size:192b/24B) */
+struct hwrm_cfa_tunnel_filter_free_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le64 tunnel_filter_id;
+};
+
+/* hwrm_cfa_tunnel_filter_free_output (size:128b/16B) */
+struct hwrm_cfa_tunnel_filter_free_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_vxlan_ipv4_hdr (size:128b/16B) */
+struct hwrm_vxlan_ipv4_hdr {
+ u8 ver_hlen;
+ #define VXLAN_IPV4_HDR_VER_HLEN_HEADER_LENGTH_MASK 0xfUL
+ #define VXLAN_IPV4_HDR_VER_HLEN_HEADER_LENGTH_SFT 0
+ #define VXLAN_IPV4_HDR_VER_HLEN_VERSION_MASK 0xf0UL
+ #define VXLAN_IPV4_HDR_VER_HLEN_VERSION_SFT 4
+ u8 tos;
+ __be16 ip_id;
+ __be16 flags_frag_offset;
+ u8 ttl;
+ u8 protocol;
+ __be32 src_ip_addr;
+ __be32 dest_ip_addr;
+};
+
+/* hwrm_vxlan_ipv6_hdr (size:320b/40B) */
+struct hwrm_vxlan_ipv6_hdr {
+ __be32 ver_tc_flow_label;
+ #define VXLAN_IPV6_HDR_VER_TC_FLOW_LABEL_VER_SFT 0x1cUL
+ #define VXLAN_IPV6_HDR_VER_TC_FLOW_LABEL_VER_MASK 0xf0000000UL
+ #define VXLAN_IPV6_HDR_VER_TC_FLOW_LABEL_TC_SFT 0x14UL
+ #define VXLAN_IPV6_HDR_VER_TC_FLOW_LABEL_TC_MASK 0xff00000UL
+ #define VXLAN_IPV6_HDR_VER_TC_FLOW_LABEL_FLOW_LABEL_SFT 0x0UL
+ #define VXLAN_IPV6_HDR_VER_TC_FLOW_LABEL_FLOW_LABEL_MASK 0xfffffUL
+ #define VXLAN_IPV6_HDR_VER_TC_FLOW_LABEL_LAST VXLAN_IPV6_HDR_VER_TC_FLOW_LABEL_FLOW_LABEL_MASK
+ __be16 payload_len;
+ u8 next_hdr;
+ u8 ttl;
+ __be32 src_ip_addr[4];
+ __be32 dest_ip_addr[4];
+};
+
+/* hwrm_cfa_encap_data_vxlan (size:640b/80B) */
+struct hwrm_cfa_encap_data_vxlan {
+ u8 src_mac_addr[6];
+ __le16 unused_0;
+ u8 dst_mac_addr[6];
+ u8 num_vlan_tags;
+ u8 unused_1;
+ __be16 ovlan_tpid;
+ __be16 ovlan_tci;
+ __be16 ivlan_tpid;
+ __be16 ivlan_tci;
+ __le32 l3[10];
+ #define CFA_ENCAP_DATA_VXLAN_L3_VER_MASK 0xfUL
+ #define CFA_ENCAP_DATA_VXLAN_L3_VER_IPV4 0x4UL
+ #define CFA_ENCAP_DATA_VXLAN_L3_VER_IPV6 0x6UL
+ #define CFA_ENCAP_DATA_VXLAN_L3_LAST CFA_ENCAP_DATA_VXLAN_L3_VER_IPV6
+ __be16 src_port;
+ __be16 dst_port;
+ __be32 vni;
+ u8 hdr_rsvd0[3];
+ u8 hdr_rsvd1;
+ u8 hdr_flags;
+ u8 unused[3];
+};
+
+/* hwrm_cfa_encap_record_alloc_input (size:832b/104B) */
+struct hwrm_cfa_encap_record_alloc_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define CFA_ENCAP_RECORD_ALLOC_REQ_FLAGS_LOOPBACK 0x1UL
+ #define CFA_ENCAP_RECORD_ALLOC_REQ_FLAGS_EXTERNAL 0x2UL
+ u8 encap_type;
+ #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_VXLAN 0x1UL
+ #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_NVGRE 0x2UL
+ #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_L2GRE 0x3UL
+ #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_IPIP 0x4UL
+ #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_GENEVE 0x5UL
+ #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_MPLS 0x6UL
+ #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_VLAN 0x7UL
+ #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_IPGRE 0x8UL
+ #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_VXLAN_V4 0x9UL
+ #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_IPGRE_V1 0xaUL
+ #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_L2_ETYPE 0xbUL
+ #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_VXLAN_GPE_V6 0xcUL
+ #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_VXLAN_GPE 0x10UL
+ #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_LAST CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_VXLAN_GPE
+ u8 unused_0[3];
+ __le32 encap_data[20];
+};
+
+/* hwrm_cfa_encap_record_alloc_output (size:128b/16B) */
+struct hwrm_cfa_encap_record_alloc_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 encap_record_id;
+ u8 unused_0[3];
+ u8 valid;
+};
+
+/* hwrm_cfa_encap_record_free_input (size:192b/24B) */
+struct hwrm_cfa_encap_record_free_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 encap_record_id;
+ u8 unused_0[4];
+};
+
+/* hwrm_cfa_encap_record_free_output (size:128b/16B) */
+struct hwrm_cfa_encap_record_free_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_cfa_ntuple_filter_alloc_input (size:1024b/128B) */
+struct hwrm_cfa_ntuple_filter_alloc_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_LOOPBACK 0x1UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_DROP 0x2UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_METER 0x4UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_DEST_FID 0x8UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_ARP_REPLY 0x10UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_DEST_RFS_RING_IDX 0x20UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_NO_L2_CONTEXT 0x40UL
+ __le32 enables;
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_L2_FILTER_ID 0x1UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_ETHERTYPE 0x2UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_TUNNEL_TYPE 0x4UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_MACADDR 0x8UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IPADDR_TYPE 0x10UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR 0x20UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR_MASK 0x40UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR 0x80UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR_MASK 0x100UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IP_PROTOCOL 0x200UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT 0x400UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT_MASK 0x800UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT 0x1000UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT_MASK 0x2000UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_PRI_HINT 0x4000UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_NTUPLE_FILTER_ID 0x8000UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_ID 0x10000UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_MIRROR_VNIC_ID 0x20000UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_MACADDR 0x40000UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_RFS_RING_TBL_IDX 0x80000UL
+ __le64 l2_filter_id;
+ u8 src_macaddr[6];
+ __be16 ethertype;
+ u8 ip_addr_type;
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_UNKNOWN 0x0UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV4 0x4UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV6 0x6UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_LAST CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV6
+ u8 ip_protocol;
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_PROTOCOL_UNKNOWN 0x0UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_PROTOCOL_TCP 0x6UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_PROTOCOL_UDP 0x11UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_PROTOCOL_ICMP 0x1UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_PROTOCOL_ICMPV6 0x3aUL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_PROTOCOL_RSVD 0xffUL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_PROTOCOL_LAST CFA_NTUPLE_FILTER_ALLOC_REQ_IP_PROTOCOL_RSVD
+ __le16 dst_id;
+ __le16 rfs_ring_tbl_idx;
+ u8 tunnel_type;
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_NONTUNNEL 0x0UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN 0x1UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_NVGRE 0x2UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_L2GRE 0x3UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPIP 0x4UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_GENEVE 0x5UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_MPLS 0x6UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_STT 0x7UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE 0x8UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_L2_ETYPE 0xbUL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE_V6 0xcUL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE 0x10UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL 0xffUL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_LAST CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL
+ u8 pri_hint;
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_PRI_HINT_NO_PREFER 0x0UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_PRI_HINT_ABOVE 0x1UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_PRI_HINT_BELOW 0x2UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_PRI_HINT_HIGHEST 0x3UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_PRI_HINT_LOWEST 0x4UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_PRI_HINT_LAST CFA_NTUPLE_FILTER_ALLOC_REQ_PRI_HINT_LOWEST
+ __be32 src_ipaddr[4];
+ __be32 src_ipaddr_mask[4];
+ __be32 dst_ipaddr[4];
+ __be32 dst_ipaddr_mask[4];
+ __be16 src_port;
+ __be16 src_port_mask;
+ __be16 dst_port;
+ __be16 dst_port_mask;
+ __le64 ntuple_filter_id_hint;
+};
+
+/* hwrm_cfa_ntuple_filter_alloc_output (size:192b/24B) */
+struct hwrm_cfa_ntuple_filter_alloc_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le64 ntuple_filter_id;
+ __le32 flow_id;
+ #define CFA_NTUPLE_FILTER_ALLOC_RESP_FLOW_ID_VALUE_MASK 0x3fffffffUL
+ #define CFA_NTUPLE_FILTER_ALLOC_RESP_FLOW_ID_VALUE_SFT 0
+ #define CFA_NTUPLE_FILTER_ALLOC_RESP_FLOW_ID_TYPE 0x40000000UL
+ #define CFA_NTUPLE_FILTER_ALLOC_RESP_FLOW_ID_TYPE_INT (0x0UL << 30)
+ #define CFA_NTUPLE_FILTER_ALLOC_RESP_FLOW_ID_TYPE_EXT (0x1UL << 30)
+ #define CFA_NTUPLE_FILTER_ALLOC_RESP_FLOW_ID_TYPE_LAST CFA_NTUPLE_FILTER_ALLOC_RESP_FLOW_ID_TYPE_EXT
+ #define CFA_NTUPLE_FILTER_ALLOC_RESP_FLOW_ID_DIR 0x80000000UL
+ #define CFA_NTUPLE_FILTER_ALLOC_RESP_FLOW_ID_DIR_RX (0x0UL << 31)
+ #define CFA_NTUPLE_FILTER_ALLOC_RESP_FLOW_ID_DIR_TX (0x1UL << 31)
+ #define CFA_NTUPLE_FILTER_ALLOC_RESP_FLOW_ID_DIR_LAST CFA_NTUPLE_FILTER_ALLOC_RESP_FLOW_ID_DIR_TX
+ u8 unused_0[3];
+ u8 valid;
+};
+
+/* hwrm_cfa_ntuple_filter_alloc_cmd_err (size:64b/8B) */
+struct hwrm_cfa_ntuple_filter_alloc_cmd_err {
+ u8 code;
+ #define CFA_NTUPLE_FILTER_ALLOC_CMD_ERR_CODE_UNKNOWN 0x0UL
+ #define CFA_NTUPLE_FILTER_ALLOC_CMD_ERR_CODE_ZERO_MAC 0x65UL
+ #define CFA_NTUPLE_FILTER_ALLOC_CMD_ERR_CODE_BC_MC_MAC 0x66UL
+ #define CFA_NTUPLE_FILTER_ALLOC_CMD_ERR_CODE_INVALID_VNIC 0x67UL
+ #define CFA_NTUPLE_FILTER_ALLOC_CMD_ERR_CODE_INVALID_PF_FID 0x68UL
+ #define CFA_NTUPLE_FILTER_ALLOC_CMD_ERR_CODE_INVALID_L2_CTXT_ID 0x69UL
+ #define CFA_NTUPLE_FILTER_ALLOC_CMD_ERR_CODE_NULL_L2_CTXT_CFG 0x6aUL
+ #define CFA_NTUPLE_FILTER_ALLOC_CMD_ERR_CODE_NULL_L2_DATA_FLD 0x6bUL
+ #define CFA_NTUPLE_FILTER_ALLOC_CMD_ERR_CODE_INVALID_CFA_LAYOUT 0x6cUL
+ #define CFA_NTUPLE_FILTER_ALLOC_CMD_ERR_CODE_L2_CTXT_ALLOC_FAIL 0x6dUL
+ #define CFA_NTUPLE_FILTER_ALLOC_CMD_ERR_CODE_ROCE_FLOW_ERR 0x6eUL
+ #define CFA_NTUPLE_FILTER_ALLOC_CMD_ERR_CODE_INVALID_OWNER_FID 0x6fUL
+ #define CFA_NTUPLE_FILTER_ALLOC_CMD_ERR_CODE_ZERO_REF_CNT 0x70UL
+ #define CFA_NTUPLE_FILTER_ALLOC_CMD_ERR_CODE_INVALID_FLOW_TYPE 0x71UL
+ #define CFA_NTUPLE_FILTER_ALLOC_CMD_ERR_CODE_INVALID_IVLAN 0x72UL
+ #define CFA_NTUPLE_FILTER_ALLOC_CMD_ERR_CODE_MAX_VLAN_ID 0x73UL
+ #define CFA_NTUPLE_FILTER_ALLOC_CMD_ERR_CODE_INVALID_TNL_REQ 0x74UL
+ #define CFA_NTUPLE_FILTER_ALLOC_CMD_ERR_CODE_L2_ADDR 0x75UL
+ #define CFA_NTUPLE_FILTER_ALLOC_CMD_ERR_CODE_L2_IVLAN 0x76UL
+ #define CFA_NTUPLE_FILTER_ALLOC_CMD_ERR_CODE_L3_ADDR 0x77UL
+ #define CFA_NTUPLE_FILTER_ALLOC_CMD_ERR_CODE_L3_ADDR_TYPE 0x78UL
+ #define CFA_NTUPLE_FILTER_ALLOC_CMD_ERR_CODE_T_L3_ADDR_TYPE 0x79UL
+ #define CFA_NTUPLE_FILTER_ALLOC_CMD_ERR_CODE_DST_VNIC_ID 0x7aUL
+ #define CFA_NTUPLE_FILTER_ALLOC_CMD_ERR_CODE_VNI 0x7bUL
+ #define CFA_NTUPLE_FILTER_ALLOC_CMD_ERR_CODE_INVALID_DST_ID 0x7cUL
+ #define CFA_NTUPLE_FILTER_ALLOC_CMD_ERR_CODE_FAIL_ROCE_L2_FLOW 0x7dUL
+ #define CFA_NTUPLE_FILTER_ALLOC_CMD_ERR_CODE_INVALID_NPAR_VLAN 0x7eUL
+ #define CFA_NTUPLE_FILTER_ALLOC_CMD_ERR_CODE_ATSP_ADD 0x7fUL
+ #define CFA_NTUPLE_FILTER_ALLOC_CMD_ERR_CODE_DFLT_VLAN_FAIL 0x80UL
+ #define CFA_NTUPLE_FILTER_ALLOC_CMD_ERR_CODE_INVALID_L3_TYPE 0x81UL
+ #define CFA_NTUPLE_FILTER_ALLOC_CMD_ERR_CODE_VAL_FAIL_TNL_FLOW 0x82UL
+ #define CFA_NTUPLE_FILTER_ALLOC_CMD_ERR_CODE_LAST CFA_NTUPLE_FILTER_ALLOC_CMD_ERR_CODE_VAL_FAIL_TNL_FLOW
+ u8 unused_0[7];
+};
+
+/* hwrm_cfa_ntuple_filter_free_input (size:192b/24B) */
+struct hwrm_cfa_ntuple_filter_free_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le64 ntuple_filter_id;
+};
+
+/* hwrm_cfa_ntuple_filter_free_output (size:128b/16B) */
+struct hwrm_cfa_ntuple_filter_free_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_cfa_ntuple_filter_cfg_input (size:384b/48B) */
+struct hwrm_cfa_ntuple_filter_cfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 enables;
+ #define CFA_NTUPLE_FILTER_CFG_REQ_ENABLES_NEW_DST_ID 0x1UL
+ #define CFA_NTUPLE_FILTER_CFG_REQ_ENABLES_NEW_MIRROR_VNIC_ID 0x2UL
+ #define CFA_NTUPLE_FILTER_CFG_REQ_ENABLES_NEW_METER_INSTANCE_ID 0x4UL
+ __le32 flags;
+ #define CFA_NTUPLE_FILTER_CFG_REQ_FLAGS_DEST_FID 0x1UL
+ #define CFA_NTUPLE_FILTER_CFG_REQ_FLAGS_DEST_RFS_RING_IDX 0x2UL
+ #define CFA_NTUPLE_FILTER_CFG_REQ_FLAGS_NO_L2_CONTEXT 0x4UL
+ __le64 ntuple_filter_id;
+ __le32 new_dst_id;
+ __le32 new_mirror_vnic_id;
+ __le16 new_meter_instance_id;
+ #define CFA_NTUPLE_FILTER_CFG_REQ_NEW_METER_INSTANCE_ID_INVALID 0xffffUL
+ #define CFA_NTUPLE_FILTER_CFG_REQ_NEW_METER_INSTANCE_ID_LAST CFA_NTUPLE_FILTER_CFG_REQ_NEW_METER_INSTANCE_ID_INVALID
+ u8 unused_1[6];
+};
+
+/* hwrm_cfa_ntuple_filter_cfg_output (size:128b/16B) */
+struct hwrm_cfa_ntuple_filter_cfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_cfa_decap_filter_alloc_input (size:832b/104B) */
+struct hwrm_cfa_decap_filter_alloc_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define CFA_DECAP_FILTER_ALLOC_REQ_FLAGS_OVS_TUNNEL 0x1UL
+ __le32 enables;
+ #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_TUNNEL_TYPE 0x1UL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_TUNNEL_ID 0x2UL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_SRC_MACADDR 0x4UL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_DST_MACADDR 0x8UL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_OVLAN_VID 0x10UL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_IVLAN_VID 0x20UL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_T_OVLAN_VID 0x40UL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_T_IVLAN_VID 0x80UL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_ETHERTYPE 0x100UL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR 0x200UL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR 0x400UL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_IPADDR_TYPE 0x800UL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_IP_PROTOCOL 0x1000UL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_SRC_PORT 0x2000UL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_DST_PORT 0x4000UL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_DST_ID 0x8000UL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_MIRROR_VNIC_ID 0x10000UL
+ __be32 tunnel_id;
+ u8 tunnel_type;
+ #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_NONTUNNEL 0x0UL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN 0x1UL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_NVGRE 0x2UL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_L2GRE 0x3UL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPIP 0x4UL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_GENEVE 0x5UL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_MPLS 0x6UL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_STT 0x7UL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE 0x8UL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_L2_ETYPE 0xbUL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE_V6 0xcUL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE 0x10UL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL 0xffUL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_LAST CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL
+ u8 unused_0;
+ __le16 unused_1;
+ u8 src_macaddr[6];
+ u8 unused_2[2];
+ u8 dst_macaddr[6];
+ __be16 ovlan_vid;
+ __be16 ivlan_vid;
+ __be16 t_ovlan_vid;
+ __be16 t_ivlan_vid;
+ __be16 ethertype;
+ u8 ip_addr_type;
+ #define CFA_DECAP_FILTER_ALLOC_REQ_IP_ADDR_TYPE_UNKNOWN 0x0UL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV4 0x4UL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV6 0x6UL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_IP_ADDR_TYPE_LAST CFA_DECAP_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV6
+ u8 ip_protocol;
+ #define CFA_DECAP_FILTER_ALLOC_REQ_IP_PROTOCOL_UNKNOWN 0x0UL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_IP_PROTOCOL_TCP 0x6UL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_IP_PROTOCOL_UDP 0x11UL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_IP_PROTOCOL_LAST CFA_DECAP_FILTER_ALLOC_REQ_IP_PROTOCOL_UDP
+ __le16 unused_3;
+ __le32 unused_4;
+ __be32 src_ipaddr[4];
+ __be32 dst_ipaddr[4];
+ __be16 src_port;
+ __be16 dst_port;
+ __le16 dst_id;
+ __le16 l2_ctxt_ref_id;
+};
+
+/* hwrm_cfa_decap_filter_alloc_output (size:128b/16B) */
+struct hwrm_cfa_decap_filter_alloc_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 decap_filter_id;
+ u8 unused_0[3];
+ u8 valid;
+};
+
+/* hwrm_cfa_decap_filter_free_input (size:192b/24B) */
+struct hwrm_cfa_decap_filter_free_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 decap_filter_id;
+ u8 unused_0[4];
+};
+
+/* hwrm_cfa_decap_filter_free_output (size:128b/16B) */
+struct hwrm_cfa_decap_filter_free_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_cfa_flow_alloc_input (size:1024b/128B) */
+struct hwrm_cfa_flow_alloc_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 flags;
+ #define CFA_FLOW_ALLOC_REQ_FLAGS_TUNNEL 0x1UL
+ #define CFA_FLOW_ALLOC_REQ_FLAGS_NUM_VLAN_MASK 0x6UL
+ #define CFA_FLOW_ALLOC_REQ_FLAGS_NUM_VLAN_SFT 1
+ #define CFA_FLOW_ALLOC_REQ_FLAGS_NUM_VLAN_NONE (0x0UL << 1)
+ #define CFA_FLOW_ALLOC_REQ_FLAGS_NUM_VLAN_ONE (0x1UL << 1)
+ #define CFA_FLOW_ALLOC_REQ_FLAGS_NUM_VLAN_TWO (0x2UL << 1)
+ #define CFA_FLOW_ALLOC_REQ_FLAGS_NUM_VLAN_LAST CFA_FLOW_ALLOC_REQ_FLAGS_NUM_VLAN_TWO
+ #define CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_MASK 0x38UL
+ #define CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_SFT 3
+ #define CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_L2 (0x0UL << 3)
+ #define CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_IPV4 (0x1UL << 3)
+ #define CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_IPV6 (0x2UL << 3)
+ #define CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_LAST CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_IPV6
+ #define CFA_FLOW_ALLOC_REQ_FLAGS_PATH_TX 0x40UL
+ #define CFA_FLOW_ALLOC_REQ_FLAGS_PATH_RX 0x80UL
+ #define CFA_FLOW_ALLOC_REQ_FLAGS_MATCH_VXLAN_IP_VNI 0x100UL
+ #define CFA_FLOW_ALLOC_REQ_FLAGS_VHOST_ID_USE_VLAN 0x200UL
+ __le16 src_fid;
+ __le32 tunnel_handle;
+ __le16 action_flags;
+ #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_FWD 0x1UL
+ #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_RECYCLE 0x2UL
+ #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_DROP 0x4UL
+ #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_METER 0x8UL
+ #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_TUNNEL 0x10UL
+ #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_NAT_SRC 0x20UL
+ #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_NAT_DEST 0x40UL
+ #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_NAT_IPV4_ADDRESS 0x80UL
+ #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_L2_HEADER_REWRITE 0x100UL
+ #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_TTL_DECREMENT 0x200UL
+ #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_TUNNEL_IP 0x400UL
+ #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_FLOW_AGING_ENABLED 0x800UL
+ #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_PRI_HINT 0x1000UL
+ #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_NO_FLOW_COUNTER_ALLOC 0x2000UL
+ __le16 dst_fid;
+ __be16 l2_rewrite_vlan_tpid;
+ __be16 l2_rewrite_vlan_tci;
+ __le16 act_meter_id;
+ __le16 ref_flow_handle;
+ __be16 ethertype;
+ __be16 outer_vlan_tci;
+ __be16 dmac[3];
+ __be16 inner_vlan_tci;
+ __be16 smac[3];
+ u8 ip_dst_mask_len;
+ u8 ip_src_mask_len;
+ __be32 ip_dst[4];
+ __be32 ip_src[4];
+ __be16 l4_src_port;
+ __be16 l4_src_port_mask;
+ __be16 l4_dst_port;
+ __be16 l4_dst_port_mask;
+ __be32 nat_ip_address[4];
+ __be16 l2_rewrite_dmac[3];
+ __be16 nat_port;
+ __be16 l2_rewrite_smac[3];
+ u8 ip_proto;
+ u8 tunnel_type;
+ #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_NONTUNNEL 0x0UL
+ #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_VXLAN 0x1UL
+ #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_NVGRE 0x2UL
+ #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_L2GRE 0x3UL
+ #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_IPIP 0x4UL
+ #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_GENEVE 0x5UL
+ #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_MPLS 0x6UL
+ #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_STT 0x7UL
+ #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_IPGRE 0x8UL
+ #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL
+ #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL
+ #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_L2_ETYPE 0xbUL
+ #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE_V6 0xcUL
+ #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE 0x10UL
+ #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL 0xffUL
+ #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_LAST CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL
+};
+
+/* hwrm_cfa_flow_alloc_output (size:256b/32B) */
+struct hwrm_cfa_flow_alloc_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 flow_handle;
+ u8 unused_0[2];
+ __le32 flow_id;
+ #define CFA_FLOW_ALLOC_RESP_FLOW_ID_VALUE_MASK 0x3fffffffUL
+ #define CFA_FLOW_ALLOC_RESP_FLOW_ID_VALUE_SFT 0
+ #define CFA_FLOW_ALLOC_RESP_FLOW_ID_TYPE 0x40000000UL
+ #define CFA_FLOW_ALLOC_RESP_FLOW_ID_TYPE_INT (0x0UL << 30)
+ #define CFA_FLOW_ALLOC_RESP_FLOW_ID_TYPE_EXT (0x1UL << 30)
+ #define CFA_FLOW_ALLOC_RESP_FLOW_ID_TYPE_LAST CFA_FLOW_ALLOC_RESP_FLOW_ID_TYPE_EXT
+ #define CFA_FLOW_ALLOC_RESP_FLOW_ID_DIR 0x80000000UL
+ #define CFA_FLOW_ALLOC_RESP_FLOW_ID_DIR_RX (0x0UL << 31)
+ #define CFA_FLOW_ALLOC_RESP_FLOW_ID_DIR_TX (0x1UL << 31)
+ #define CFA_FLOW_ALLOC_RESP_FLOW_ID_DIR_LAST CFA_FLOW_ALLOC_RESP_FLOW_ID_DIR_TX
+ __le64 ext_flow_handle;
+ __le32 flow_counter_id;
+ u8 unused_1[3];
+ u8 valid;
+};
+
+/* hwrm_cfa_flow_alloc_cmd_err (size:64b/8B) */
+struct hwrm_cfa_flow_alloc_cmd_err {
+ u8 code;
+ #define CFA_FLOW_ALLOC_CMD_ERR_CODE_UNKNOWN 0x0UL
+ #define CFA_FLOW_ALLOC_CMD_ERR_CODE_L2_CONTEXT_TCAM 0x1UL
+ #define CFA_FLOW_ALLOC_CMD_ERR_CODE_ACTION_RECORD 0x2UL
+ #define CFA_FLOW_ALLOC_CMD_ERR_CODE_FLOW_COUNTER 0x3UL
+ #define CFA_FLOW_ALLOC_CMD_ERR_CODE_WILD_CARD_TCAM 0x4UL
+ #define CFA_FLOW_ALLOC_CMD_ERR_CODE_HASH_COLLISION 0x5UL
+ #define CFA_FLOW_ALLOC_CMD_ERR_CODE_KEY_EXISTS 0x6UL
+ #define CFA_FLOW_ALLOC_CMD_ERR_CODE_FLOW_CTXT_DB 0x7UL
+ #define CFA_FLOW_ALLOC_CMD_ERR_CODE_LAST CFA_FLOW_ALLOC_CMD_ERR_CODE_FLOW_CTXT_DB
+ u8 unused_0[7];
+};
+
+/* hwrm_cfa_flow_free_input (size:256b/32B) */
+struct hwrm_cfa_flow_free_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 flow_handle;
+ __le16 unused_0;
+ __le32 flow_counter_id;
+ __le64 ext_flow_handle;
+};
+
+/* hwrm_cfa_flow_free_output (size:256b/32B) */
+struct hwrm_cfa_flow_free_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le64 packet;
+ __le64 byte;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_cfa_flow_info_input (size:256b/32B) */
+struct hwrm_cfa_flow_info_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 flow_handle;
+ #define CFA_FLOW_INFO_REQ_FLOW_HANDLE_MAX_MASK 0xfffUL
+ #define CFA_FLOW_INFO_REQ_FLOW_HANDLE_CNP_CNT 0x1000UL
+ #define CFA_FLOW_INFO_REQ_FLOW_HANDLE_ROCEV1_CNT 0x2000UL
+ #define CFA_FLOW_INFO_REQ_FLOW_HANDLE_NIC_TX 0x3000UL
+ #define CFA_FLOW_INFO_REQ_FLOW_HANDLE_ROCEV2_CNT 0x4000UL
+ #define CFA_FLOW_INFO_REQ_FLOW_HANDLE_DIR_RX 0x8000UL
+ #define CFA_FLOW_INFO_REQ_FLOW_HANDLE_CNP_CNT_RX 0x9000UL
+ #define CFA_FLOW_INFO_REQ_FLOW_HANDLE_ROCEV1_CNT_RX 0xa000UL
+ #define CFA_FLOW_INFO_REQ_FLOW_HANDLE_NIC_RX 0xb000UL
+ #define CFA_FLOW_INFO_REQ_FLOW_HANDLE_ROCEV2_CNT_RX 0xc000UL
+ #define CFA_FLOW_INFO_REQ_FLOW_HANDLE_LAST CFA_FLOW_INFO_REQ_FLOW_HANDLE_ROCEV2_CNT_RX
+ u8 unused_0[6];
+ __le64 ext_flow_handle;
+};
+
+/* hwrm_cfa_flow_info_output (size:5632b/704B) */
+struct hwrm_cfa_flow_info_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 flags;
+ #define CFA_FLOW_INFO_RESP_FLAGS_PATH_TX 0x1UL
+ #define CFA_FLOW_INFO_RESP_FLAGS_PATH_RX 0x2UL
+ u8 profile;
+ __le16 src_fid;
+ __le16 dst_fid;
+ __le16 l2_ctxt_id;
+ __le64 em_info;
+ __le64 tcam_info;
+ __le64 vfp_tcam_info;
+ __le16 ar_id;
+ __le16 flow_handle;
+ __le32 tunnel_handle;
+ __le16 flow_timer;
+ u8 unused_0[6];
+ __le32 flow_key_data[130];
+ __le32 flow_action_info[30];
+ u8 unused_1[7];
+ u8 valid;
+};
+
+/* hwrm_cfa_flow_stats_input (size:640b/80B) */
+struct hwrm_cfa_flow_stats_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 num_flows;
+ __le16 flow_handle_0;
+ __le16 flow_handle_1;
+ __le16 flow_handle_2;
+ __le16 flow_handle_3;
+ __le16 flow_handle_4;
+ __le16 flow_handle_5;
+ __le16 flow_handle_6;
+ __le16 flow_handle_7;
+ __le16 flow_handle_8;
+ __le16 flow_handle_9;
+ u8 unused_0[2];
+ __le32 flow_id_0;
+ __le32 flow_id_1;
+ __le32 flow_id_2;
+ __le32 flow_id_3;
+ __le32 flow_id_4;
+ __le32 flow_id_5;
+ __le32 flow_id_6;
+ __le32 flow_id_7;
+ __le32 flow_id_8;
+ __le32 flow_id_9;
+};
+
+/* hwrm_cfa_flow_stats_output (size:1408b/176B) */
+struct hwrm_cfa_flow_stats_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le64 packet_0;
+ __le64 packet_1;
+ __le64 packet_2;
+ __le64 packet_3;
+ __le64 packet_4;
+ __le64 packet_5;
+ __le64 packet_6;
+ __le64 packet_7;
+ __le64 packet_8;
+ __le64 packet_9;
+ __le64 byte_0;
+ __le64 byte_1;
+ __le64 byte_2;
+ __le64 byte_3;
+ __le64 byte_4;
+ __le64 byte_5;
+ __le64 byte_6;
+ __le64 byte_7;
+ __le64 byte_8;
+ __le64 byte_9;
+ __le16 flow_hits;
+ u8 unused_0[5];
+ u8 valid;
+};
+
+/* hwrm_cfa_vfr_alloc_input (size:448b/56B) */
+struct hwrm_cfa_vfr_alloc_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 vf_id;
+ __le16 reserved;
+ u8 unused_0[4];
+ char vfr_name[32];
+};
+
+/* hwrm_cfa_vfr_alloc_output (size:128b/16B) */
+struct hwrm_cfa_vfr_alloc_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 rx_cfa_code;
+ __le16 tx_cfa_action;
+ u8 unused_0[3];
+ u8 valid;
+};
+
+/* hwrm_cfa_vfr_free_input (size:448b/56B) */
+struct hwrm_cfa_vfr_free_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ char vfr_name[32];
+ __le16 vf_id;
+ __le16 reserved;
+ u8 unused_0[4];
+};
+
+/* hwrm_cfa_vfr_free_output (size:128b/16B) */
+struct hwrm_cfa_vfr_free_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_cfa_eem_qcaps_input (size:192b/24B) */
+struct hwrm_cfa_eem_qcaps_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define CFA_EEM_QCAPS_REQ_FLAGS_PATH_TX 0x1UL
+ #define CFA_EEM_QCAPS_REQ_FLAGS_PATH_RX 0x2UL
+ #define CFA_EEM_QCAPS_REQ_FLAGS_PREFERRED_OFFLOAD 0x4UL
+ __le32 unused_0;
+};
+
+/* hwrm_cfa_eem_qcaps_output (size:320b/40B) */
+struct hwrm_cfa_eem_qcaps_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 flags;
+ #define CFA_EEM_QCAPS_RESP_FLAGS_PATH_TX 0x1UL
+ #define CFA_EEM_QCAPS_RESP_FLAGS_PATH_RX 0x2UL
+ #define CFA_EEM_QCAPS_RESP_FLAGS_CENTRALIZED_MEMORY_MODEL_SUPPORTED 0x4UL
+ #define CFA_EEM_QCAPS_RESP_FLAGS_DETACHED_CENTRALIZED_MEMORY_MODEL_SUPPORTED 0x8UL
+ __le32 unused_0;
+ __le32 supported;
+ #define CFA_EEM_QCAPS_RESP_SUPPORTED_KEY0_TABLE 0x1UL
+ #define CFA_EEM_QCAPS_RESP_SUPPORTED_KEY1_TABLE 0x2UL
+ #define CFA_EEM_QCAPS_RESP_SUPPORTED_EXTERNAL_RECORD_TABLE 0x4UL
+ #define CFA_EEM_QCAPS_RESP_SUPPORTED_EXTERNAL_FLOW_COUNTERS_TABLE 0x8UL
+ #define CFA_EEM_QCAPS_RESP_SUPPORTED_FID_TABLE 0x10UL
+ __le32 max_entries_supported;
+ __le16 key_entry_size;
+ __le16 record_entry_size;
+ __le16 efc_entry_size;
+ __le16 fid_entry_size;
+ u8 unused_1[7];
+ u8 valid;
+};
+
+/* hwrm_cfa_eem_cfg_input (size:384b/48B) */
+struct hwrm_cfa_eem_cfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define CFA_EEM_CFG_REQ_FLAGS_PATH_TX 0x1UL
+ #define CFA_EEM_CFG_REQ_FLAGS_PATH_RX 0x2UL
+ #define CFA_EEM_CFG_REQ_FLAGS_PREFERRED_OFFLOAD 0x4UL
+ #define CFA_EEM_CFG_REQ_FLAGS_SECONDARY_PF 0x8UL
+ __le16 group_id;
+ __le16 unused_0;
+ __le32 num_entries;
+ __le32 unused_1;
+ __le16 key0_ctx_id;
+ __le16 key1_ctx_id;
+ __le16 record_ctx_id;
+ __le16 efc_ctx_id;
+ __le16 fid_ctx_id;
+ __le16 unused_2;
+ __le32 unused_3;
+};
+
+/* hwrm_cfa_eem_cfg_output (size:128b/16B) */
+struct hwrm_cfa_eem_cfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_cfa_eem_qcfg_input (size:192b/24B) */
+struct hwrm_cfa_eem_qcfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define CFA_EEM_QCFG_REQ_FLAGS_PATH_TX 0x1UL
+ #define CFA_EEM_QCFG_REQ_FLAGS_PATH_RX 0x2UL
+ __le32 unused_0;
+};
+
+/* hwrm_cfa_eem_qcfg_output (size:256b/32B) */
+struct hwrm_cfa_eem_qcfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 flags;
+ #define CFA_EEM_QCFG_RESP_FLAGS_PATH_TX 0x1UL
+ #define CFA_EEM_QCFG_RESP_FLAGS_PATH_RX 0x2UL
+ #define CFA_EEM_QCFG_RESP_FLAGS_PREFERRED_OFFLOAD 0x4UL
+ __le32 num_entries;
+ __le16 key0_ctx_id;
+ __le16 key1_ctx_id;
+ __le16 record_ctx_id;
+ __le16 efc_ctx_id;
+ __le16 fid_ctx_id;
+ u8 unused_2[5];
+ u8 valid;
+};
+
+/* hwrm_cfa_eem_op_input (size:192b/24B) */
+struct hwrm_cfa_eem_op_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define CFA_EEM_OP_REQ_FLAGS_PATH_TX 0x1UL
+ #define CFA_EEM_OP_REQ_FLAGS_PATH_RX 0x2UL
+ __le16 unused_0;
+ __le16 op;
+ #define CFA_EEM_OP_REQ_OP_RESERVED 0x0UL
+ #define CFA_EEM_OP_REQ_OP_EEM_DISABLE 0x1UL
+ #define CFA_EEM_OP_REQ_OP_EEM_ENABLE 0x2UL
+ #define CFA_EEM_OP_REQ_OP_EEM_CLEANUP 0x3UL
+ #define CFA_EEM_OP_REQ_OP_LAST CFA_EEM_OP_REQ_OP_EEM_CLEANUP
+};
+
+/* hwrm_cfa_eem_op_output (size:128b/16B) */
+struct hwrm_cfa_eem_op_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_cfa_adv_flow_mgnt_qcaps_input (size:256b/32B) */
+struct hwrm_cfa_adv_flow_mgnt_qcaps_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 unused_0[4];
+};
+
+/* hwrm_cfa_adv_flow_mgnt_qcaps_output (size:128b/16B) */
+struct hwrm_cfa_adv_flow_mgnt_qcaps_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 flags;
+ #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_FLOW_HND_16BIT_SUPPORTED 0x1UL
+ #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_FLOW_HND_64BIT_SUPPORTED 0x2UL
+ #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_FLOW_BATCH_DELETE_SUPPORTED 0x4UL
+ #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_FLOW_RESET_ALL_SUPPORTED 0x8UL
+ #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_NTUPLE_FLOW_DEST_FUNC_SUPPORTED 0x10UL
+ #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_TX_EEM_FLOW_SUPPORTED 0x20UL
+ #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_RX_EEM_FLOW_SUPPORTED 0x40UL
+ #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_FLOW_COUNTER_ALLOC_SUPPORTED 0x80UL
+ #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_RFS_RING_TBL_IDX_SUPPORTED 0x100UL
+ #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_UNTAGGED_VLAN_SUPPORTED 0x200UL
+ #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_XDP_SUPPORTED 0x400UL
+ #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_L2_HEADER_SOURCE_FIELDS_SUPPORTED 0x800UL
+ #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_NTUPLE_FLOW_RX_ARP_SUPPORTED 0x1000UL
+ #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_RFS_RING_TBL_IDX_V2_SUPPORTED 0x2000UL
+ #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_NTUPLE_FLOW_RX_ETHERTYPE_IP_SUPPORTED 0x4000UL
+ #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_TRUFLOW_CAPABLE 0x8000UL
+ #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_L2_FILTER_TRAFFIC_TYPE_L2_ROCE_SUPPORTED 0x10000UL
+ #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_LAG_SUPPORTED 0x20000UL
+ #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_NTUPLE_FLOW_NO_L2CTX_SUPPORTED 0x40000UL
+ #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_NIC_FLOW_STATS_SUPPORTED 0x80000UL
+ #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_NTUPLE_FLOW_RX_EXT_IP_PROTO_SUPPORTED 0x100000UL
+ #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_RFS_RING_TBL_IDX_V3_SUPPORTED 0x200000UL
+ u8 unused_0[3];
+ u8 valid;
+};
+
+/* hwrm_tunnel_dst_port_query_input (size:192b/24B) */
+struct hwrm_tunnel_dst_port_query_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ u8 tunnel_type;
+ #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_VXLAN 0x1UL
+ #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_GENEVE 0x5UL
+ #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL
+ #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL
+ #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_L2_ETYPE 0xbUL
+ #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_VXLAN_GPE_V6 0xcUL
+ #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_CUSTOM_GRE 0xdUL
+ #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_ECPRI 0xeUL
+ #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_SRV6 0xfUL
+ #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_VXLAN_GPE 0x10UL
+ #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_GRE 0x11UL
+ #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_ULP_DYN_UPAR 0x12UL
+ #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES01 0x13UL
+ #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES02 0x14UL
+ #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES03 0x15UL
+ #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES04 0x16UL
+ #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES05 0x17UL
+ #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES06 0x18UL
+ #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES07 0x19UL
+ #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_LAST TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES07
+ u8 tunnel_next_proto;
+ u8 unused_0[6];
+};
+
+/* hwrm_tunnel_dst_port_query_output (size:128b/16B) */
+struct hwrm_tunnel_dst_port_query_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 tunnel_dst_port_id;
+ __be16 tunnel_dst_port_val;
+ u8 upar_in_use;
+ #define TUNNEL_DST_PORT_QUERY_RESP_UPAR_IN_USE_UPAR0 0x1UL
+ #define TUNNEL_DST_PORT_QUERY_RESP_UPAR_IN_USE_UPAR1 0x2UL
+ #define TUNNEL_DST_PORT_QUERY_RESP_UPAR_IN_USE_UPAR2 0x4UL
+ #define TUNNEL_DST_PORT_QUERY_RESP_UPAR_IN_USE_UPAR3 0x8UL
+ #define TUNNEL_DST_PORT_QUERY_RESP_UPAR_IN_USE_UPAR4 0x10UL
+ #define TUNNEL_DST_PORT_QUERY_RESP_UPAR_IN_USE_UPAR5 0x20UL
+ #define TUNNEL_DST_PORT_QUERY_RESP_UPAR_IN_USE_UPAR6 0x40UL
+ #define TUNNEL_DST_PORT_QUERY_RESP_UPAR_IN_USE_UPAR7 0x80UL
+ u8 status;
+ #define TUNNEL_DST_PORT_QUERY_RESP_STATUS_CHIP_LEVEL 0x1UL
+ #define TUNNEL_DST_PORT_QUERY_RESP_STATUS_FUNC_LEVEL 0x2UL
+ u8 unused_0;
+ u8 valid;
+};
+
+/* hwrm_tunnel_dst_port_alloc_input (size:192b/24B) */
+struct hwrm_tunnel_dst_port_alloc_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ u8 tunnel_type;
+ #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN 0x1UL
+ #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_GENEVE 0x5UL
+ #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL
+ #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL
+ #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_L2_ETYPE 0xbUL
+ #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE_V6 0xcUL
+ #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_CUSTOM_GRE 0xdUL
+ #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_ECPRI 0xeUL
+ #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_SRV6 0xfUL
+ #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE 0x10UL
+ #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_GRE 0x11UL
+ #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_ULP_DYN_UPAR 0x12UL
+ #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES01 0x13UL
+ #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES02 0x14UL
+ #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES03 0x15UL
+ #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES04 0x16UL
+ #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES05 0x17UL
+ #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES06 0x18UL
+ #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES07 0x19UL
+ #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_LAST TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES07
+ u8 tunnel_next_proto;
+ __be16 tunnel_dst_port_val;
+ u8 unused_0[4];
+};
+
+/* hwrm_tunnel_dst_port_alloc_output (size:128b/16B) */
+struct hwrm_tunnel_dst_port_alloc_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 tunnel_dst_port_id;
+ u8 error_info;
+ #define TUNNEL_DST_PORT_ALLOC_RESP_ERROR_INFO_SUCCESS 0x0UL
+ #define TUNNEL_DST_PORT_ALLOC_RESP_ERROR_INFO_ERR_ALLOCATED 0x1UL
+ #define TUNNEL_DST_PORT_ALLOC_RESP_ERROR_INFO_ERR_NO_RESOURCE 0x2UL
+ #define TUNNEL_DST_PORT_ALLOC_RESP_ERROR_INFO_ERR_ENABLED 0x3UL
+ #define TUNNEL_DST_PORT_ALLOC_RESP_ERROR_INFO_LAST TUNNEL_DST_PORT_ALLOC_RESP_ERROR_INFO_ERR_ENABLED
+ u8 upar_in_use;
+ #define TUNNEL_DST_PORT_ALLOC_RESP_UPAR_IN_USE_UPAR0 0x1UL
+ #define TUNNEL_DST_PORT_ALLOC_RESP_UPAR_IN_USE_UPAR1 0x2UL
+ #define TUNNEL_DST_PORT_ALLOC_RESP_UPAR_IN_USE_UPAR2 0x4UL
+ #define TUNNEL_DST_PORT_ALLOC_RESP_UPAR_IN_USE_UPAR3 0x8UL
+ #define TUNNEL_DST_PORT_ALLOC_RESP_UPAR_IN_USE_UPAR4 0x10UL
+ #define TUNNEL_DST_PORT_ALLOC_RESP_UPAR_IN_USE_UPAR5 0x20UL
+ #define TUNNEL_DST_PORT_ALLOC_RESP_UPAR_IN_USE_UPAR6 0x40UL
+ #define TUNNEL_DST_PORT_ALLOC_RESP_UPAR_IN_USE_UPAR7 0x80UL
+ u8 unused_0[3];
+ u8 valid;
+};
+
+/* hwrm_tunnel_dst_port_free_input (size:192b/24B) */
+struct hwrm_tunnel_dst_port_free_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ u8 tunnel_type;
+ #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN 0x1UL
+ #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE 0x5UL
+ #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL
+ #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL
+ #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_L2_ETYPE 0xbUL
+ #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN_GPE_V6 0xcUL
+ #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_CUSTOM_GRE 0xdUL
+ #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_ECPRI 0xeUL
+ #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_SRV6 0xfUL
+ #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN_GPE 0x10UL
+ #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GRE 0x11UL
+ #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_ULP_DYN_UPAR 0x12UL
+ #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES01 0x13UL
+ #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES02 0x14UL
+ #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES03 0x15UL
+ #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES04 0x16UL
+ #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES05 0x17UL
+ #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES06 0x18UL
+ #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES07 0x19UL
+ #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_LAST TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES07
+ u8 tunnel_next_proto;
+ __le16 tunnel_dst_port_id;
+ u8 unused_0[4];
+};
+
+/* hwrm_tunnel_dst_port_free_output (size:128b/16B) */
+struct hwrm_tunnel_dst_port_free_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 error_info;
+ #define TUNNEL_DST_PORT_FREE_RESP_ERROR_INFO_SUCCESS 0x0UL
+ #define TUNNEL_DST_PORT_FREE_RESP_ERROR_INFO_ERR_NOT_OWNER 0x1UL
+ #define TUNNEL_DST_PORT_FREE_RESP_ERROR_INFO_ERR_NOT_ALLOCATED 0x2UL
+ #define TUNNEL_DST_PORT_FREE_RESP_ERROR_INFO_LAST TUNNEL_DST_PORT_FREE_RESP_ERROR_INFO_ERR_NOT_ALLOCATED
+ u8 unused_1[6];
+ u8 valid;
+};
+
+/* ctx_hw_stats (size:1280b/160B) */
+struct ctx_hw_stats {
+ __le64 rx_ucast_pkts;
+ __le64 rx_mcast_pkts;
+ __le64 rx_bcast_pkts;
+ __le64 rx_discard_pkts;
+ __le64 rx_error_pkts;
+ __le64 rx_ucast_bytes;
+ __le64 rx_mcast_bytes;
+ __le64 rx_bcast_bytes;
+ __le64 tx_ucast_pkts;
+ __le64 tx_mcast_pkts;
+ __le64 tx_bcast_pkts;
+ __le64 tx_error_pkts;
+ __le64 tx_discard_pkts;
+ __le64 tx_ucast_bytes;
+ __le64 tx_mcast_bytes;
+ __le64 tx_bcast_bytes;
+ __le64 tpa_pkts;
+ __le64 tpa_bytes;
+ __le64 tpa_events;
+ __le64 tpa_aborts;
+};
+
+/* ctx_hw_stats_ext (size:1408b/176B) */
+struct ctx_hw_stats_ext {
+ __le64 rx_ucast_pkts;
+ __le64 rx_mcast_pkts;
+ __le64 rx_bcast_pkts;
+ __le64 rx_discard_pkts;
+ __le64 rx_error_pkts;
+ __le64 rx_ucast_bytes;
+ __le64 rx_mcast_bytes;
+ __le64 rx_bcast_bytes;
+ __le64 tx_ucast_pkts;
+ __le64 tx_mcast_pkts;
+ __le64 tx_bcast_pkts;
+ __le64 tx_error_pkts;
+ __le64 tx_discard_pkts;
+ __le64 tx_ucast_bytes;
+ __le64 tx_mcast_bytes;
+ __le64 tx_bcast_bytes;
+ __le64 rx_tpa_eligible_pkt;
+ __le64 rx_tpa_eligible_bytes;
+ __le64 rx_tpa_pkt;
+ __le64 rx_tpa_bytes;
+ __le64 rx_tpa_errors;
+ __le64 rx_tpa_events;
+};
+
+/* hwrm_stat_ctx_alloc_input (size:384b/48B) */
+struct hwrm_stat_ctx_alloc_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le64 stats_dma_addr;
+ __le32 update_period_ms;
+ u8 stat_ctx_flags;
+ #define STAT_CTX_ALLOC_REQ_STAT_CTX_FLAGS_ROCE 0x1UL
+ #define STAT_CTX_ALLOC_REQ_STAT_CTX_FLAGS_DUP_HOST_BUF 0x2UL
+ u8 unused_0;
+ __le16 stats_dma_length;
+ __le16 flags;
+ #define STAT_CTX_ALLOC_REQ_FLAGS_STEERING_TAG_VALID 0x1UL
+ __le16 steering_tag;
+ __le32 stat_ctx_id;
+ __le16 alloc_seq_id;
+ u8 unused_1[6];
+};
+
+/* hwrm_stat_ctx_alloc_output (size:128b/16B) */
+struct hwrm_stat_ctx_alloc_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 stat_ctx_id;
+ u8 unused_0[3];
+ u8 valid;
+};
+
+/* hwrm_stat_ctx_free_input (size:192b/24B) */
+struct hwrm_stat_ctx_free_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 stat_ctx_id;
+ u8 unused_0[4];
+};
+
+/* hwrm_stat_ctx_free_output (size:128b/16B) */
+struct hwrm_stat_ctx_free_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 stat_ctx_id;
+ u8 unused_0[3];
+ u8 valid;
+};
+
+/* hwrm_stat_ctx_query_input (size:192b/24B) */
+struct hwrm_stat_ctx_query_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 stat_ctx_id;
+ u8 flags;
+ #define STAT_CTX_QUERY_REQ_FLAGS_COUNTER_MASK 0x1UL
+ u8 unused_0[3];
+};
+
+/* hwrm_stat_ctx_query_output (size:1408b/176B) */
+struct hwrm_stat_ctx_query_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le64 tx_ucast_pkts;
+ __le64 tx_mcast_pkts;
+ __le64 tx_bcast_pkts;
+ __le64 tx_discard_pkts;
+ __le64 tx_error_pkts;
+ __le64 tx_ucast_bytes;
+ __le64 tx_mcast_bytes;
+ __le64 tx_bcast_bytes;
+ __le64 rx_ucast_pkts;
+ __le64 rx_mcast_pkts;
+ __le64 rx_bcast_pkts;
+ __le64 rx_discard_pkts;
+ __le64 rx_error_pkts;
+ __le64 rx_ucast_bytes;
+ __le64 rx_mcast_bytes;
+ __le64 rx_bcast_bytes;
+ __le64 rx_agg_pkts;
+ __le64 rx_agg_bytes;
+ __le64 rx_agg_events;
+ __le64 rx_agg_aborts;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_stat_ext_ctx_query_input (size:192b/24B) */
+struct hwrm_stat_ext_ctx_query_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 stat_ctx_id;
+ u8 flags;
+ #define STAT_EXT_CTX_QUERY_REQ_FLAGS_COUNTER_MASK 0x1UL
+ u8 unused_0[3];
+};
+
+/* hwrm_stat_ext_ctx_query_output (size:1536b/192B) */
+struct hwrm_stat_ext_ctx_query_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le64 rx_ucast_pkts;
+ __le64 rx_mcast_pkts;
+ __le64 rx_bcast_pkts;
+ __le64 rx_discard_pkts;
+ __le64 rx_error_pkts;
+ __le64 rx_ucast_bytes;
+ __le64 rx_mcast_bytes;
+ __le64 rx_bcast_bytes;
+ __le64 tx_ucast_pkts;
+ __le64 tx_mcast_pkts;
+ __le64 tx_bcast_pkts;
+ __le64 tx_error_pkts;
+ __le64 tx_discard_pkts;
+ __le64 tx_ucast_bytes;
+ __le64 tx_mcast_bytes;
+ __le64 tx_bcast_bytes;
+ __le64 rx_tpa_eligible_pkt;
+ __le64 rx_tpa_eligible_bytes;
+ __le64 rx_tpa_pkt;
+ __le64 rx_tpa_bytes;
+ __le64 rx_tpa_errors;
+ __le64 rx_tpa_events;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_stat_ctx_clr_stats_input (size:192b/24B) */
+struct hwrm_stat_ctx_clr_stats_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 stat_ctx_id;
+ u8 unused_0[4];
+};
+
+/* hwrm_stat_ctx_clr_stats_output (size:128b/16B) */
+struct hwrm_stat_ctx_clr_stats_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_pcie_qstats_input (size:256b/32B) */
+struct hwrm_pcie_qstats_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 pcie_stat_size;
+ u8 unused_0[6];
+ __le64 pcie_stat_host_addr;
+};
+
+/* hwrm_pcie_qstats_output (size:128b/16B) */
+struct hwrm_pcie_qstats_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 pcie_stat_size;
+ u8 unused_0[5];
+ u8 valid;
+};
+
+/* pcie_ctx_hw_stats (size:768b/96B) */
+struct pcie_ctx_hw_stats {
+ __le64 pcie_pl_signal_integrity;
+ __le64 pcie_dl_signal_integrity;
+ __le64 pcie_tl_signal_integrity;
+ __le64 pcie_link_integrity;
+ __le64 pcie_tx_traffic_rate;
+ __le64 pcie_rx_traffic_rate;
+ __le64 pcie_tx_dllp_statistics;
+ __le64 pcie_rx_dllp_statistics;
+ __le64 pcie_equalization_time;
+ __le32 pcie_ltssm_histogram[4];
+ __le64 pcie_recovery_histogram;
+};
+
+/* pcie_ctx_hw_stats_v2 (size:4544b/568B) */
+struct pcie_ctx_hw_stats_v2 {
+ __le64 pcie_pl_signal_integrity;
+ __le64 pcie_dl_signal_integrity;
+ __le64 pcie_tl_signal_integrity;
+ __le64 pcie_link_integrity;
+ __le64 pcie_tx_traffic_rate;
+ __le64 pcie_rx_traffic_rate;
+ __le64 pcie_tx_dllp_statistics;
+ __le64 pcie_rx_dllp_statistics;
+ __le64 pcie_equalization_time;
+ __le32 pcie_ltssm_histogram[4];
+ __le64 pcie_recovery_histogram;
+ __le32 pcie_tl_credit_nph_histogram[8];
+ __le32 pcie_tl_credit_ph_histogram[8];
+ __le32 pcie_tl_credit_pd_histogram[8];
+ __le32 pcie_cmpl_latest_times[4];
+ __le32 pcie_cmpl_longest_time;
+ __le32 pcie_cmpl_shortest_time;
+ __le32 unused_0[2];
+ __le32 pcie_cmpl_latest_headers[4][4];
+ __le32 pcie_cmpl_longest_headers[4][4];
+ __le32 pcie_cmpl_shortest_headers[4][4];
+ __le32 pcie_wr_latency_histogram[12];
+ __le32 pcie_wr_latency_all_normal_count;
+ __le32 unused_1;
+ __le64 pcie_posted_packet_count;
+ __le64 pcie_non_posted_packet_count;
+ __le64 pcie_other_packet_count;
+ __le64 pcie_blocked_packet_count;
+ __le64 pcie_cmpl_packet_count;
+ __le32 pcie_rd_latency_histogram[12];
+ __le32 pcie_rd_latency_all_normal_count;
+ __le32 unused_2;
+};
+
+/* hwrm_stat_generic_qstats_input (size:256b/32B) */
+struct hwrm_stat_generic_qstats_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 generic_stat_size;
+ u8 flags;
+ #define STAT_GENERIC_QSTATS_REQ_FLAGS_COUNTER_MASK 0x1UL
+ u8 unused_0[5];
+ __le64 generic_stat_host_addr;
+};
+
+/* hwrm_stat_generic_qstats_output (size:128b/16B) */
+struct hwrm_stat_generic_qstats_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 generic_stat_size;
+ u8 unused_0[5];
+ u8 valid;
+};
+
+/* generic_sw_hw_stats (size:1472b/184B) */
+struct generic_sw_hw_stats {
+ __le64 pcie_statistics_tx_tlp;
+ __le64 pcie_statistics_rx_tlp;
+ __le64 pcie_credit_fc_hdr_posted;
+ __le64 pcie_credit_fc_hdr_nonposted;
+ __le64 pcie_credit_fc_hdr_cmpl;
+ __le64 pcie_credit_fc_data_posted;
+ __le64 pcie_credit_fc_data_nonposted;
+ __le64 pcie_credit_fc_data_cmpl;
+ __le64 pcie_credit_fc_tgt_nonposted;
+ __le64 pcie_credit_fc_tgt_data_posted;
+ __le64 pcie_credit_fc_tgt_hdr_posted;
+ __le64 pcie_credit_fc_cmpl_hdr_posted;
+ __le64 pcie_credit_fc_cmpl_data_posted;
+ __le64 pcie_cmpl_longest;
+ __le64 pcie_cmpl_shortest;
+ __le64 cache_miss_count_cfcq;
+ __le64 cache_miss_count_cfcs;
+ __le64 cache_miss_count_cfcc;
+ __le64 cache_miss_count_cfcm;
+ __le64 hw_db_recov_dbs_dropped;
+ __le64 hw_db_recov_drops_serviced;
+ __le64 hw_db_recov_dbs_recovered;
+ __le64 hw_db_recov_oo_drop_count;
+};
+
+/* hwrm_fw_reset_input (size:192b/24B) */
+struct hwrm_fw_reset_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ u8 embedded_proc_type;
+ #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_BOOT 0x0UL
+ #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_MGMT 0x1UL
+ #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_NETCTRL 0x2UL
+ #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_ROCE 0x3UL
+ #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_HOST 0x4UL
+ #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_AP 0x5UL
+ #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_CHIP 0x6UL
+ #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_HOST_RESOURCE_REINIT 0x7UL
+ #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_IMPACTLESS_ACTIVATION 0x8UL
+ #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_LAST FW_RESET_REQ_EMBEDDED_PROC_TYPE_IMPACTLESS_ACTIVATION
+ u8 selfrst_status;
+ #define FW_RESET_REQ_SELFRST_STATUS_SELFRSTNONE 0x0UL
+ #define FW_RESET_REQ_SELFRST_STATUS_SELFRSTASAP 0x1UL
+ #define FW_RESET_REQ_SELFRST_STATUS_SELFRSTPCIERST 0x2UL
+ #define FW_RESET_REQ_SELFRST_STATUS_SELFRSTIMMEDIATE 0x3UL
+ #define FW_RESET_REQ_SELFRST_STATUS_LAST FW_RESET_REQ_SELFRST_STATUS_SELFRSTIMMEDIATE
+ u8 host_idx;
+ u8 flags;
+ #define FW_RESET_REQ_FLAGS_RESET_GRACEFUL 0x1UL
+ #define FW_RESET_REQ_FLAGS_FW_ACTIVATION 0x2UL
+ u8 unused_0[4];
+};
+
+/* hwrm_fw_reset_output (size:128b/16B) */
+struct hwrm_fw_reset_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 selfrst_status;
+ #define FW_RESET_RESP_SELFRST_STATUS_SELFRSTNONE 0x0UL
+ #define FW_RESET_RESP_SELFRST_STATUS_SELFRSTASAP 0x1UL
+ #define FW_RESET_RESP_SELFRST_STATUS_SELFRSTPCIERST 0x2UL
+ #define FW_RESET_RESP_SELFRST_STATUS_SELFRSTIMMEDIATE 0x3UL
+ #define FW_RESET_RESP_SELFRST_STATUS_LAST FW_RESET_RESP_SELFRST_STATUS_SELFRSTIMMEDIATE
+ u8 unused_0[6];
+ u8 valid;
+};
+
+/* hwrm_fw_qstatus_input (size:192b/24B) */
+struct hwrm_fw_qstatus_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ u8 embedded_proc_type;
+ #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_BOOT 0x0UL
+ #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_MGMT 0x1UL
+ #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_NETCTRL 0x2UL
+ #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_ROCE 0x3UL
+ #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_HOST 0x4UL
+ #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_AP 0x5UL
+ #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_CHIP 0x6UL
+ #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_LAST FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_CHIP
+ u8 unused_0[7];
+};
+
+/* hwrm_fw_qstatus_output (size:128b/16B) */
+struct hwrm_fw_qstatus_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 selfrst_status;
+ #define FW_QSTATUS_RESP_SELFRST_STATUS_SELFRSTNONE 0x0UL
+ #define FW_QSTATUS_RESP_SELFRST_STATUS_SELFRSTASAP 0x1UL
+ #define FW_QSTATUS_RESP_SELFRST_STATUS_SELFRSTPCIERST 0x2UL
+ #define FW_QSTATUS_RESP_SELFRST_STATUS_SELFRSTPOWER 0x3UL
+ #define FW_QSTATUS_RESP_SELFRST_STATUS_LAST FW_QSTATUS_RESP_SELFRST_STATUS_SELFRSTPOWER
+ u8 nvm_option_action_status;
+ #define FW_QSTATUS_RESP_NVM_OPTION_ACTION_STATUS_NVMOPT_ACTION_NONE 0x0UL
+ #define FW_QSTATUS_RESP_NVM_OPTION_ACTION_STATUS_NVMOPT_ACTION_HOTRESET 0x1UL
+ #define FW_QSTATUS_RESP_NVM_OPTION_ACTION_STATUS_NVMOPT_ACTION_WARMBOOT 0x2UL
+ #define FW_QSTATUS_RESP_NVM_OPTION_ACTION_STATUS_NVMOPT_ACTION_COLDBOOT 0x3UL
+ #define FW_QSTATUS_RESP_NVM_OPTION_ACTION_STATUS_LAST FW_QSTATUS_RESP_NVM_OPTION_ACTION_STATUS_NVMOPT_ACTION_COLDBOOT
+ u8 unused_0[5];
+ u8 valid;
+};
+
+/* hwrm_fw_set_time_input (size:256b/32B) */
+struct hwrm_fw_set_time_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 year;
+ #define FW_SET_TIME_REQ_YEAR_UNKNOWN 0x0UL
+ #define FW_SET_TIME_REQ_YEAR_LAST FW_SET_TIME_REQ_YEAR_UNKNOWN
+ u8 month;
+ u8 day;
+ u8 hour;
+ u8 minute;
+ u8 second;
+ u8 unused_0;
+ __le16 millisecond;
+ __le16 zone;
+ #define FW_SET_TIME_REQ_ZONE_UTC 0
+ #define FW_SET_TIME_REQ_ZONE_UNKNOWN 65535
+ #define FW_SET_TIME_REQ_ZONE_LAST FW_SET_TIME_REQ_ZONE_UNKNOWN
+ u8 unused_1[4];
+};
+
+/* hwrm_fw_set_time_output (size:128b/16B) */
+struct hwrm_fw_set_time_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_struct_hdr (size:128b/16B) */
+struct hwrm_struct_hdr {
+ __le16 struct_id;
+ #define STRUCT_HDR_STRUCT_ID_LLDP_CFG 0x41bUL
+ #define STRUCT_HDR_STRUCT_ID_DCBX_ETS 0x41dUL
+ #define STRUCT_HDR_STRUCT_ID_DCBX_PFC 0x41fUL
+ #define STRUCT_HDR_STRUCT_ID_DCBX_APP 0x421UL
+ #define STRUCT_HDR_STRUCT_ID_DCBX_FEATURE_STATE 0x422UL
+ #define STRUCT_HDR_STRUCT_ID_LLDP_GENERIC 0x424UL
+ #define STRUCT_HDR_STRUCT_ID_LLDP_DEVICE 0x426UL
+ #define STRUCT_HDR_STRUCT_ID_POWER_BKUP 0x427UL
+ #define STRUCT_HDR_STRUCT_ID_PEER_MMAP 0x429UL
+ #define STRUCT_HDR_STRUCT_ID_AFM_OPAQUE 0x1UL
+ #define STRUCT_HDR_STRUCT_ID_PORT_DESCRIPTION 0xaUL
+ #define STRUCT_HDR_STRUCT_ID_RSS_V2 0x64UL
+ #define STRUCT_HDR_STRUCT_ID_MSIX_PER_VF 0xc8UL
+ #define STRUCT_HDR_STRUCT_ID_UDCC_RTT_BUCKET_COUNT 0x12cUL
+ #define STRUCT_HDR_STRUCT_ID_UDCC_RTT_BUCKET_BOUND 0x12dUL
+ #define STRUCT_HDR_STRUCT_ID_DBG_TOKEN_CLAIMS 0x190UL
+ #define STRUCT_HDR_STRUCT_ID_LAST STRUCT_HDR_STRUCT_ID_DBG_TOKEN_CLAIMS
+ __le16 len;
+ u8 version;
+ #define STRUCT_HDR_VERSION_0 0x0UL
+ #define STRUCT_HDR_VERSION_1 0x1UL
+ #define STRUCT_HDR_VERSION_LAST STRUCT_HDR_VERSION_1
+ u8 count;
+ __le16 subtype;
+ __le16 next_offset;
+ #define STRUCT_HDR_NEXT_OFFSET_LAST 0x0UL
+ u8 unused_0[6];
+};
+
+/* hwrm_struct_data_dcbx_app (size:64b/8B) */
+struct hwrm_struct_data_dcbx_app {
+ __be16 protocol_id;
+ u8 protocol_selector;
+ #define STRUCT_DATA_DCBX_APP_PROTOCOL_SELECTOR_ETHER_TYPE 0x1UL
+ #define STRUCT_DATA_DCBX_APP_PROTOCOL_SELECTOR_TCP_PORT 0x2UL
+ #define STRUCT_DATA_DCBX_APP_PROTOCOL_SELECTOR_UDP_PORT 0x3UL
+ #define STRUCT_DATA_DCBX_APP_PROTOCOL_SELECTOR_TCP_UDP_PORT 0x4UL
+ #define STRUCT_DATA_DCBX_APP_PROTOCOL_SELECTOR_LAST STRUCT_DATA_DCBX_APP_PROTOCOL_SELECTOR_TCP_UDP_PORT
+ u8 priority;
+ u8 valid;
+ u8 unused_0[3];
+};
+
+/* hwrm_fw_set_structured_data_input (size:256b/32B) */
+struct hwrm_fw_set_structured_data_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le64 src_data_addr;
+ __le16 data_len;
+ u8 hdr_cnt;
+ u8 unused_0[5];
+};
+
+/* hwrm_fw_set_structured_data_output (size:128b/16B) */
+struct hwrm_fw_set_structured_data_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_fw_set_structured_data_cmd_err (size:64b/8B) */
+struct hwrm_fw_set_structured_data_cmd_err {
+ u8 code;
+ #define FW_SET_STRUCTURED_DATA_CMD_ERR_CODE_UNKNOWN 0x0UL
+ #define FW_SET_STRUCTURED_DATA_CMD_ERR_CODE_BAD_HDR_CNT 0x1UL
+ #define FW_SET_STRUCTURED_DATA_CMD_ERR_CODE_BAD_FMT 0x2UL
+ #define FW_SET_STRUCTURED_DATA_CMD_ERR_CODE_BAD_ID 0x3UL
+ #define FW_SET_STRUCTURED_DATA_CMD_ERR_CODE_ALREADY_ADDED 0x4UL
+ #define FW_SET_STRUCTURED_DATA_CMD_ERR_CODE_INST_IN_PROG 0x5UL
+ #define FW_SET_STRUCTURED_DATA_CMD_ERR_CODE_LAST FW_SET_STRUCTURED_DATA_CMD_ERR_CODE_INST_IN_PROG
+ u8 unused_0[7];
+};
+
+/* hwrm_fw_get_structured_data_input (size:256b/32B) */
+struct hwrm_fw_get_structured_data_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le64 dest_data_addr;
+ __le16 data_len;
+ __le16 structure_id;
+ __le16 subtype;
+ #define FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_UNUSED 0x0UL
+ #define FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_ALL 0xffffUL
+ #define FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_NEAR_BRIDGE_ADMIN 0x100UL
+ #define FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_NEAR_BRIDGE_PEER 0x101UL
+ #define FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_NEAR_BRIDGE_OPERATIONAL 0x102UL
+ #define FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_NON_TPMR_ADMIN 0x200UL
+ #define FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_NON_TPMR_PEER 0x201UL
+ #define FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_NON_TPMR_OPERATIONAL 0x202UL
+ #define FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_HOST_OPERATIONAL 0x300UL
+ #define FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_CLAIMS_SUPPORTED 0x320UL
+ #define FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_CLAIMS_ACTIVE 0x321UL
+ #define FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_LAST FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_CLAIMS_ACTIVE
+ u8 count;
+ u8 unused_0;
+};
+
+/* hwrm_fw_get_structured_data_output (size:128b/16B) */
+struct hwrm_fw_get_structured_data_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 hdr_cnt;
+ u8 unused_0[6];
+ u8 valid;
+};
+
+/* hwrm_fw_get_structured_data_cmd_err (size:64b/8B) */
+struct hwrm_fw_get_structured_data_cmd_err {
+ u8 code;
+ #define FW_GET_STRUCTURED_DATA_CMD_ERR_CODE_UNKNOWN 0x0UL
+ #define FW_GET_STRUCTURED_DATA_CMD_ERR_CODE_BAD_ID 0x3UL
+ #define FW_GET_STRUCTURED_DATA_CMD_ERR_CODE_LAST FW_GET_STRUCTURED_DATA_CMD_ERR_CODE_BAD_ID
+ u8 unused_0[7];
+};
+
+/* hwrm_fw_livepatch_query_input (size:192b/24B) */
+struct hwrm_fw_livepatch_query_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ u8 fw_target;
+ #define FW_LIVEPATCH_QUERY_REQ_FW_TARGET_COMMON_FW 0x1UL
+ #define FW_LIVEPATCH_QUERY_REQ_FW_TARGET_SECURE_FW 0x2UL
+ #define FW_LIVEPATCH_QUERY_REQ_FW_TARGET_LAST FW_LIVEPATCH_QUERY_REQ_FW_TARGET_SECURE_FW
+ u8 unused_0[7];
+};
+
+/* hwrm_fw_livepatch_query_output (size:640b/80B) */
+struct hwrm_fw_livepatch_query_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ char install_ver[32];
+ char active_ver[32];
+ __le16 status_flags;
+ #define FW_LIVEPATCH_QUERY_RESP_STATUS_FLAGS_INSTALL 0x1UL
+ #define FW_LIVEPATCH_QUERY_RESP_STATUS_FLAGS_ACTIVE 0x2UL
+ u8 unused_0[5];
+ u8 valid;
+};
+
+/* hwrm_fw_livepatch_input (size:256b/32B) */
+struct hwrm_fw_livepatch_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ u8 opcode;
+ #define FW_LIVEPATCH_REQ_OPCODE_ACTIVATE 0x1UL
+ #define FW_LIVEPATCH_REQ_OPCODE_DEACTIVATE 0x2UL
+ #define FW_LIVEPATCH_REQ_OPCODE_LAST FW_LIVEPATCH_REQ_OPCODE_DEACTIVATE
+ u8 fw_target;
+ #define FW_LIVEPATCH_REQ_FW_TARGET_COMMON_FW 0x1UL
+ #define FW_LIVEPATCH_REQ_FW_TARGET_SECURE_FW 0x2UL
+ #define FW_LIVEPATCH_REQ_FW_TARGET_LAST FW_LIVEPATCH_REQ_FW_TARGET_SECURE_FW
+ u8 loadtype;
+ #define FW_LIVEPATCH_REQ_LOADTYPE_NVM_INSTALL 0x1UL
+ #define FW_LIVEPATCH_REQ_LOADTYPE_MEMORY_DIRECT 0x2UL
+ #define FW_LIVEPATCH_REQ_LOADTYPE_LAST FW_LIVEPATCH_REQ_LOADTYPE_MEMORY_DIRECT
+ u8 flags;
+ __le32 patch_len;
+ __le64 host_addr;
+};
+
+/* hwrm_fw_livepatch_output (size:128b/16B) */
+struct hwrm_fw_livepatch_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_fw_livepatch_cmd_err (size:64b/8B) */
+struct hwrm_fw_livepatch_cmd_err {
+ u8 code;
+ #define FW_LIVEPATCH_CMD_ERR_CODE_UNKNOWN 0x0UL
+ #define FW_LIVEPATCH_CMD_ERR_CODE_INVALID_OPCODE 0x1UL
+ #define FW_LIVEPATCH_CMD_ERR_CODE_INVALID_TARGET 0x2UL
+ #define FW_LIVEPATCH_CMD_ERR_CODE_NOT_SUPPORTED 0x3UL
+ #define FW_LIVEPATCH_CMD_ERR_CODE_NOT_INSTALLED 0x4UL
+ #define FW_LIVEPATCH_CMD_ERR_CODE_NOT_PATCHED 0x5UL
+ #define FW_LIVEPATCH_CMD_ERR_CODE_AUTH_FAIL 0x6UL
+ #define FW_LIVEPATCH_CMD_ERR_CODE_INVALID_HEADER 0x7UL
+ #define FW_LIVEPATCH_CMD_ERR_CODE_INVALID_SIZE 0x8UL
+ #define FW_LIVEPATCH_CMD_ERR_CODE_ALREADY_PATCHED 0x9UL
+ #define FW_LIVEPATCH_CMD_ERR_CODE_LAST FW_LIVEPATCH_CMD_ERR_CODE_ALREADY_PATCHED
+ u8 unused_0[7];
+};
+
+/* hwrm_exec_fwd_resp_input (size:1024b/128B) */
+struct hwrm_exec_fwd_resp_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 encap_request[26];
+ __le16 encap_resp_target_id;
+ u8 unused_0[6];
+};
+
+/* hwrm_exec_fwd_resp_output (size:128b/16B) */
+struct hwrm_exec_fwd_resp_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_reject_fwd_resp_input (size:1024b/128B) */
+struct hwrm_reject_fwd_resp_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 encap_request[26];
+ __le16 encap_resp_target_id;
+ u8 unused_0[6];
+};
+
+/* hwrm_reject_fwd_resp_output (size:128b/16B) */
+struct hwrm_reject_fwd_resp_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_fwd_resp_input (size:1024b/128B) */
+struct hwrm_fwd_resp_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 encap_resp_target_id;
+ __le16 encap_resp_cmpl_ring;
+ __le16 encap_resp_len;
+ u8 unused_0;
+ u8 unused_1;
+ __le64 encap_resp_addr;
+ __le32 encap_resp[24];
+};
+
+/* hwrm_fwd_resp_output (size:128b/16B) */
+struct hwrm_fwd_resp_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_fwd_async_event_cmpl_input (size:320b/40B) */
+struct hwrm_fwd_async_event_cmpl_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 encap_async_event_target_id;
+ u8 unused_0[6];
+ __le32 encap_async_event_cmpl[4];
+};
+
+/* hwrm_fwd_async_event_cmpl_output (size:128b/16B) */
+struct hwrm_fwd_async_event_cmpl_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_temp_monitor_query_input (size:128b/16B) */
+struct hwrm_temp_monitor_query_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+};
+
+/* hwrm_temp_monitor_query_output (size:192b/24B) */
+struct hwrm_temp_monitor_query_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 temp;
+ u8 phy_temp;
+ u8 om_temp;
+ u8 flags;
+ #define TEMP_MONITOR_QUERY_RESP_FLAGS_TEMP_NOT_AVAILABLE 0x1UL
+ #define TEMP_MONITOR_QUERY_RESP_FLAGS_PHY_TEMP_NOT_AVAILABLE 0x2UL
+ #define TEMP_MONITOR_QUERY_RESP_FLAGS_OM_NOT_PRESENT 0x4UL
+ #define TEMP_MONITOR_QUERY_RESP_FLAGS_OM_TEMP_NOT_AVAILABLE 0x8UL
+ #define TEMP_MONITOR_QUERY_RESP_FLAGS_EXT_TEMP_FIELDS_AVAILABLE 0x10UL
+ #define TEMP_MONITOR_QUERY_RESP_FLAGS_THRESHOLD_VALUES_AVAILABLE 0x20UL
+ u8 temp2;
+ u8 phy_temp2;
+ u8 om_temp2;
+ u8 warn_threshold;
+ u8 critical_threshold;
+ u8 fatal_threshold;
+ u8 shutdown_threshold;
+ u8 unused_0[4];
+ u8 valid;
+};
+
+/* hwrm_wol_filter_alloc_input (size:512b/64B) */
+struct hwrm_wol_filter_alloc_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ __le32 enables;
+ #define WOL_FILTER_ALLOC_REQ_ENABLES_MAC_ADDRESS 0x1UL
+ #define WOL_FILTER_ALLOC_REQ_ENABLES_PATTERN_OFFSET 0x2UL
+ #define WOL_FILTER_ALLOC_REQ_ENABLES_PATTERN_BUF_SIZE 0x4UL
+ #define WOL_FILTER_ALLOC_REQ_ENABLES_PATTERN_BUF_ADDR 0x8UL
+ #define WOL_FILTER_ALLOC_REQ_ENABLES_PATTERN_MASK_ADDR 0x10UL
+ #define WOL_FILTER_ALLOC_REQ_ENABLES_PATTERN_MASK_SIZE 0x20UL
+ __le16 port_id;
+ u8 wol_type;
+ #define WOL_FILTER_ALLOC_REQ_WOL_TYPE_MAGICPKT 0x0UL
+ #define WOL_FILTER_ALLOC_REQ_WOL_TYPE_BMP 0x1UL
+ #define WOL_FILTER_ALLOC_REQ_WOL_TYPE_INVALID 0xffUL
+ #define WOL_FILTER_ALLOC_REQ_WOL_TYPE_LAST WOL_FILTER_ALLOC_REQ_WOL_TYPE_INVALID
+ u8 unused_0[5];
+ u8 mac_address[6];
+ __le16 pattern_offset;
+ __le16 pattern_buf_size;
+ __le16 pattern_mask_size;
+ u8 unused_1[4];
+ __le64 pattern_buf_addr;
+ __le64 pattern_mask_addr;
+};
+
+/* hwrm_wol_filter_alloc_output (size:128b/16B) */
+struct hwrm_wol_filter_alloc_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 wol_filter_id;
+ u8 unused_0[6];
+ u8 valid;
+};
+
+/* hwrm_wol_filter_free_input (size:256b/32B) */
+struct hwrm_wol_filter_free_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define WOL_FILTER_FREE_REQ_FLAGS_FREE_ALL_WOL_FILTERS 0x1UL
+ __le32 enables;
+ #define WOL_FILTER_FREE_REQ_ENABLES_WOL_FILTER_ID 0x1UL
+ __le16 port_id;
+ u8 wol_filter_id;
+ u8 unused_0[5];
+};
+
+/* hwrm_wol_filter_free_output (size:128b/16B) */
+struct hwrm_wol_filter_free_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_wol_filter_qcfg_input (size:448b/56B) */
+struct hwrm_wol_filter_qcfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 port_id;
+ __le16 handle;
+ u8 unused_0[4];
+ __le64 pattern_buf_addr;
+ __le16 pattern_buf_size;
+ u8 unused_1[6];
+ __le64 pattern_mask_addr;
+ __le16 pattern_mask_size;
+ u8 unused_2[6];
+};
+
+/* hwrm_wol_filter_qcfg_output (size:256b/32B) */
+struct hwrm_wol_filter_qcfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 next_handle;
+ u8 wol_filter_id;
+ u8 wol_type;
+ #define WOL_FILTER_QCFG_RESP_WOL_TYPE_MAGICPKT 0x0UL
+ #define WOL_FILTER_QCFG_RESP_WOL_TYPE_BMP 0x1UL
+ #define WOL_FILTER_QCFG_RESP_WOL_TYPE_INVALID 0xffUL
+ #define WOL_FILTER_QCFG_RESP_WOL_TYPE_LAST WOL_FILTER_QCFG_RESP_WOL_TYPE_INVALID
+ __le32 unused_0;
+ u8 mac_address[6];
+ __le16 pattern_offset;
+ __le16 pattern_size;
+ __le16 pattern_mask_size;
+ u8 unused_1[3];
+ u8 valid;
+};
+
+/* hwrm_wol_reason_qcfg_input (size:320b/40B) */
+struct hwrm_wol_reason_qcfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 port_id;
+ u8 unused_0[6];
+ __le64 wol_pkt_buf_addr;
+ __le16 wol_pkt_buf_size;
+ u8 unused_1[6];
+};
+
+/* hwrm_wol_reason_qcfg_output (size:128b/16B) */
+struct hwrm_wol_reason_qcfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 wol_filter_id;
+ u8 wol_reason;
+ #define WOL_REASON_QCFG_RESP_WOL_REASON_MAGICPKT 0x0UL
+ #define WOL_REASON_QCFG_RESP_WOL_REASON_BMP 0x1UL
+ #define WOL_REASON_QCFG_RESP_WOL_REASON_INVALID 0xffUL
+ #define WOL_REASON_QCFG_RESP_WOL_REASON_LAST WOL_REASON_QCFG_RESP_WOL_REASON_INVALID
+ u8 wol_pkt_len;
+ u8 unused_0[4];
+ u8 valid;
+};
+
+/* hwrm_dbg_read_direct_input (size:256b/32B) */
+struct hwrm_dbg_read_direct_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le64 host_dest_addr;
+ __le32 read_addr;
+ __le32 read_len32;
+};
+
+/* hwrm_dbg_read_direct_output (size:128b/16B) */
+struct hwrm_dbg_read_direct_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 crc32;
+ u8 unused_0[3];
+ u8 valid;
+};
+
+/* hwrm_dbg_qcaps_input (size:192b/24B) */
+struct hwrm_dbg_qcaps_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 fid;
+ u8 unused_0[6];
+};
+
+/* hwrm_dbg_qcaps_output (size:192b/24B) */
+struct hwrm_dbg_qcaps_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 fid;
+ u8 unused_0[2];
+ __le32 coredump_component_disable_caps;
+ #define DBG_QCAPS_RESP_COREDUMP_COMPONENT_DISABLE_CAPS_NVRAM 0x1UL
+ __le32 flags;
+ #define DBG_QCAPS_RESP_FLAGS_CRASHDUMP_NVM 0x1UL
+ #define DBG_QCAPS_RESP_FLAGS_CRASHDUMP_HOST_DDR 0x2UL
+ #define DBG_QCAPS_RESP_FLAGS_CRASHDUMP_SOC_DDR 0x4UL
+ #define DBG_QCAPS_RESP_FLAGS_USEQ 0x8UL
+ #define DBG_QCAPS_RESP_FLAGS_COREDUMP_HOST_DDR 0x10UL
+ #define DBG_QCAPS_RESP_FLAGS_COREDUMP_HOST_CAPTURE 0x20UL
+ #define DBG_QCAPS_RESP_FLAGS_PTRACE 0x40UL
+ #define DBG_QCAPS_RESP_FLAGS_REG_ACCESS_RESTRICTED 0x80UL
+ u8 unused_1[3];
+ u8 valid;
+};
+
+/* hwrm_dbg_qcfg_input (size:192b/24B) */
+struct hwrm_dbg_qcfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 fid;
+ __le16 flags;
+ #define DBG_QCFG_REQ_FLAGS_CRASHDUMP_SIZE_FOR_DEST_MASK 0x3UL
+ #define DBG_QCFG_REQ_FLAGS_CRASHDUMP_SIZE_FOR_DEST_SFT 0
+ #define DBG_QCFG_REQ_FLAGS_CRASHDUMP_SIZE_FOR_DEST_DEST_NVM 0x0UL
+ #define DBG_QCFG_REQ_FLAGS_CRASHDUMP_SIZE_FOR_DEST_DEST_HOST_DDR 0x1UL
+ #define DBG_QCFG_REQ_FLAGS_CRASHDUMP_SIZE_FOR_DEST_DEST_SOC_DDR 0x2UL
+ #define DBG_QCFG_REQ_FLAGS_CRASHDUMP_SIZE_FOR_DEST_LAST DBG_QCFG_REQ_FLAGS_CRASHDUMP_SIZE_FOR_DEST_DEST_SOC_DDR
+ __le32 coredump_component_disable_flags;
+ #define DBG_QCFG_REQ_COREDUMP_COMPONENT_DISABLE_FLAGS_NVRAM 0x1UL
+};
+
+/* hwrm_dbg_qcfg_output (size:256b/32B) */
+struct hwrm_dbg_qcfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 fid;
+ u8 unused_0[2];
+ __le32 coredump_size;
+ __le32 flags;
+ #define DBG_QCFG_RESP_FLAGS_UART_LOG 0x1UL
+ #define DBG_QCFG_RESP_FLAGS_UART_LOG_SECONDARY 0x2UL
+ #define DBG_QCFG_RESP_FLAGS_FW_TRACE 0x4UL
+ #define DBG_QCFG_RESP_FLAGS_FW_TRACE_SECONDARY 0x8UL
+ #define DBG_QCFG_RESP_FLAGS_DEBUG_NOTIFY 0x10UL
+ #define DBG_QCFG_RESP_FLAGS_JTAG_DEBUG 0x20UL
+ __le16 async_cmpl_ring;
+ u8 unused_2[2];
+ __le32 crashdump_size;
+ u8 unused_3[3];
+ u8 valid;
+};
+
+/* hwrm_dbg_crashdump_medium_cfg_input (size:320b/40B) */
+struct hwrm_dbg_crashdump_medium_cfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 output_dest_flags;
+ #define DBG_CRASHDUMP_MEDIUM_CFG_REQ_TYPE_DDR 0x1UL
+ __le16 pg_size_lvl;
+ #define DBG_CRASHDUMP_MEDIUM_CFG_REQ_LVL_MASK 0x3UL
+ #define DBG_CRASHDUMP_MEDIUM_CFG_REQ_LVL_SFT 0
+ #define DBG_CRASHDUMP_MEDIUM_CFG_REQ_LVL_LVL_0 0x0UL
+ #define DBG_CRASHDUMP_MEDIUM_CFG_REQ_LVL_LVL_1 0x1UL
+ #define DBG_CRASHDUMP_MEDIUM_CFG_REQ_LVL_LVL_2 0x2UL
+ #define DBG_CRASHDUMP_MEDIUM_CFG_REQ_LVL_LAST DBG_CRASHDUMP_MEDIUM_CFG_REQ_LVL_LVL_2
+ #define DBG_CRASHDUMP_MEDIUM_CFG_REQ_PG_SIZE_MASK 0x1cUL
+ #define DBG_CRASHDUMP_MEDIUM_CFG_REQ_PG_SIZE_SFT 2
+ #define DBG_CRASHDUMP_MEDIUM_CFG_REQ_PG_SIZE_PG_4K (0x0UL << 2)
+ #define DBG_CRASHDUMP_MEDIUM_CFG_REQ_PG_SIZE_PG_8K (0x1UL << 2)
+ #define DBG_CRASHDUMP_MEDIUM_CFG_REQ_PG_SIZE_PG_64K (0x2UL << 2)
+ #define DBG_CRASHDUMP_MEDIUM_CFG_REQ_PG_SIZE_PG_2M (0x3UL << 2)
+ #define DBG_CRASHDUMP_MEDIUM_CFG_REQ_PG_SIZE_PG_8M (0x4UL << 2)
+ #define DBG_CRASHDUMP_MEDIUM_CFG_REQ_PG_SIZE_PG_1G (0x5UL << 2)
+ #define DBG_CRASHDUMP_MEDIUM_CFG_REQ_PG_SIZE_LAST DBG_CRASHDUMP_MEDIUM_CFG_REQ_PG_SIZE_PG_1G
+ #define DBG_CRASHDUMP_MEDIUM_CFG_REQ_UNUSED11_MASK 0xffe0UL
+ #define DBG_CRASHDUMP_MEDIUM_CFG_REQ_UNUSED11_SFT 5
+ __le32 size;
+ __le32 coredump_component_disable_flags;
+ #define DBG_CRASHDUMP_MEDIUM_CFG_REQ_NVRAM 0x1UL
+ __le32 unused_0;
+ __le64 pbl;
+};
+
+/* hwrm_dbg_crashdump_medium_cfg_output (size:128b/16B) */
+struct hwrm_dbg_crashdump_medium_cfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_1[7];
+ u8 valid;
+};
+
+/* coredump_segment_record (size:128b/16B) */
+struct coredump_segment_record {
+ __le16 component_id;
+ __le16 segment_id;
+ __le16 max_instances;
+ u8 version_hi;
+ u8 version_low;
+ u8 seg_flags;
+ u8 compress_flags;
+ #define SFLAG_COMPRESSED_ZLIB 0x1UL
+ u8 unused_0[2];
+ __le32 segment_len;
+};
+
+/* hwrm_dbg_coredump_list_input (size:256b/32B) */
+struct hwrm_dbg_coredump_list_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le64 host_dest_addr;
+ __le32 host_buf_len;
+ __le16 seq_no;
+ u8 flags;
+ #define DBG_COREDUMP_LIST_REQ_FLAGS_CRASHDUMP 0x1UL
+ u8 unused_0[1];
+};
+
+/* hwrm_dbg_coredump_list_output (size:128b/16B) */
+struct hwrm_dbg_coredump_list_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 flags;
+ #define DBG_COREDUMP_LIST_RESP_FLAGS_MORE 0x1UL
+ u8 unused_0;
+ __le16 total_segments;
+ __le16 data_len;
+ u8 unused_1;
+ u8 valid;
+};
+
+/* hwrm_dbg_coredump_initiate_input (size:256b/32B) */
+struct hwrm_dbg_coredump_initiate_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 component_id;
+ __le16 segment_id;
+ __le16 instance;
+ __le16 unused_0;
+ u8 seg_flags;
+ #define DBG_COREDUMP_INITIATE_REQ_SEG_FLAGS_LIVE_DATA 0x1UL
+ #define DBG_COREDUMP_INITIATE_REQ_SEG_FLAGS_CRASH_DATA 0x2UL
+ #define DBG_COREDUMP_INITIATE_REQ_SEG_FLAGS_COLLECT_CTX_L1_CACHE 0x4UL
+ u8 unused_1[7];
+};
+
+/* hwrm_dbg_coredump_initiate_output (size:128b/16B) */
+struct hwrm_dbg_coredump_initiate_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* coredump_data_hdr (size:128b/16B) */
+struct coredump_data_hdr {
+ __le32 address;
+ __le32 flags_length;
+ #define COREDUMP_DATA_HDR_FLAGS_LENGTH_ACTUAL_LEN_MASK 0xffffffUL
+ #define COREDUMP_DATA_HDR_FLAGS_LENGTH_ACTUAL_LEN_SFT 0
+ #define COREDUMP_DATA_HDR_FLAGS_LENGTH_INDIRECT_ACCESS 0x1000000UL
+ __le32 instance;
+ __le32 next_offset;
+};
+
+/* hwrm_dbg_coredump_retrieve_input (size:448b/56B) */
+struct hwrm_dbg_coredump_retrieve_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le64 host_dest_addr;
+ __le32 host_buf_len;
+ __le32 unused_0;
+ __le16 component_id;
+ __le16 segment_id;
+ __le16 instance;
+ __le16 unused_1;
+ u8 seg_flags;
+ u8 unused_2;
+ __le16 unused_3;
+ __le32 unused_4;
+ __le32 seq_no;
+ __le32 unused_5;
+};
+
+/* hwrm_dbg_coredump_retrieve_output (size:128b/16B) */
+struct hwrm_dbg_coredump_retrieve_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 flags;
+ #define DBG_COREDUMP_RETRIEVE_RESP_FLAGS_MORE 0x1UL
+ u8 unused_0;
+ __le16 data_len;
+ u8 unused_1[3];
+ u8 valid;
+};
+
+/* hwrm_dbg_ring_info_get_input (size:192b/24B) */
+struct hwrm_dbg_ring_info_get_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ u8 ring_type;
+ #define DBG_RING_INFO_GET_REQ_RING_TYPE_L2_CMPL 0x0UL
+ #define DBG_RING_INFO_GET_REQ_RING_TYPE_TX 0x1UL
+ #define DBG_RING_INFO_GET_REQ_RING_TYPE_RX 0x2UL
+ #define DBG_RING_INFO_GET_REQ_RING_TYPE_NQ 0x3UL
+ #define DBG_RING_INFO_GET_REQ_RING_TYPE_LAST DBG_RING_INFO_GET_REQ_RING_TYPE_NQ
+ u8 unused_0[3];
+ __le32 fw_ring_id;
+};
+
+/* hwrm_dbg_ring_info_get_output (size:192b/24B) */
+struct hwrm_dbg_ring_info_get_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 producer_index;
+ __le32 consumer_index;
+ __le32 cag_vector_ctrl;
+ __le16 st_tag;
+ u8 unused_0;
+ u8 valid;
+};
+
+/* hwrm_dbg_log_buffer_flush_input (size:192b/24B) */
+struct hwrm_dbg_log_buffer_flush_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 type;
+ #define DBG_LOG_BUFFER_FLUSH_REQ_TYPE_SRT_TRACE 0x0UL
+ #define DBG_LOG_BUFFER_FLUSH_REQ_TYPE_SRT2_TRACE 0x1UL
+ #define DBG_LOG_BUFFER_FLUSH_REQ_TYPE_CRT_TRACE 0x2UL
+ #define DBG_LOG_BUFFER_FLUSH_REQ_TYPE_CRT2_TRACE 0x3UL
+ #define DBG_LOG_BUFFER_FLUSH_REQ_TYPE_RIGP0_TRACE 0x4UL
+ #define DBG_LOG_BUFFER_FLUSH_REQ_TYPE_L2_HWRM_TRACE 0x5UL
+ #define DBG_LOG_BUFFER_FLUSH_REQ_TYPE_ROCE_HWRM_TRACE 0x6UL
+ #define DBG_LOG_BUFFER_FLUSH_REQ_TYPE_CA0_TRACE 0x7UL
+ #define DBG_LOG_BUFFER_FLUSH_REQ_TYPE_CA1_TRACE 0x8UL
+ #define DBG_LOG_BUFFER_FLUSH_REQ_TYPE_CA2_TRACE 0x9UL
+ #define DBG_LOG_BUFFER_FLUSH_REQ_TYPE_RIGP1_TRACE 0xaUL
+ #define DBG_LOG_BUFFER_FLUSH_REQ_TYPE_AFM_KONG_HWRM_TRACE 0xbUL
+ #define DBG_LOG_BUFFER_FLUSH_REQ_TYPE_ERR_QPC_TRACE 0xcUL
+ #define DBG_LOG_BUFFER_FLUSH_REQ_TYPE_LAST DBG_LOG_BUFFER_FLUSH_REQ_TYPE_ERR_QPC_TRACE
+ u8 unused_1[2];
+ __le32 flags;
+ #define DBG_LOG_BUFFER_FLUSH_REQ_FLAGS_FLUSH_ALL_BUFFERS 0x1UL
+};
+
+/* hwrm_dbg_log_buffer_flush_output (size:128b/16B) */
+struct hwrm_dbg_log_buffer_flush_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 current_buffer_offset;
+ u8 unused_1[3];
+ u8 valid;
+};
+
+/* hwrm_nvm_read_input (size:320b/40B) */
+struct hwrm_nvm_read_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le64 host_dest_addr;
+ __le16 dir_idx;
+ u8 unused_0[2];
+ __le32 offset;
+ __le32 len;
+ u8 unused_1[4];
+};
+
+/* hwrm_nvm_read_output (size:128b/16B) */
+struct hwrm_nvm_read_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_nvm_get_dir_entries_input (size:192b/24B) */
+struct hwrm_nvm_get_dir_entries_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le64 host_dest_addr;
+};
+
+/* hwrm_nvm_get_dir_entries_output (size:128b/16B) */
+struct hwrm_nvm_get_dir_entries_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_nvm_get_dir_info_input (size:128b/16B) */
+struct hwrm_nvm_get_dir_info_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+};
+
+/* hwrm_nvm_get_dir_info_output (size:192b/24B) */
+struct hwrm_nvm_get_dir_info_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 entries;
+ __le32 entry_length;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_nvm_write_input (size:448b/56B) */
+struct hwrm_nvm_write_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le64 host_src_addr;
+ __le16 dir_type;
+ __le16 dir_ordinal;
+ __le16 dir_ext;
+ __le16 dir_attr;
+ __le32 dir_data_length;
+ __le16 option;
+ __le16 flags;
+ #define NVM_WRITE_REQ_FLAGS_KEEP_ORIG_ACTIVE_IMG 0x1UL
+ #define NVM_WRITE_REQ_FLAGS_BATCH_MODE 0x2UL
+ #define NVM_WRITE_REQ_FLAGS_BATCH_LAST 0x4UL
+ #define NVM_WRITE_REQ_FLAGS_SKIP_CRID_CHECK 0x8UL
+ __le32 dir_item_length;
+ __le32 offset;
+ __le32 len;
+ __le32 unused_0;
+};
+
+/* hwrm_nvm_write_output (size:128b/16B) */
+struct hwrm_nvm_write_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 dir_item_length;
+ __le16 dir_idx;
+ u8 unused_0;
+ u8 valid;
+};
+
+/* hwrm_nvm_write_cmd_err (size:64b/8B) */
+struct hwrm_nvm_write_cmd_err {
+ u8 code;
+ #define NVM_WRITE_CMD_ERR_CODE_UNKNOWN 0x0UL
+ #define NVM_WRITE_CMD_ERR_CODE_FRAG_ERR 0x1UL
+ #define NVM_WRITE_CMD_ERR_CODE_NO_SPACE 0x2UL
+ #define NVM_WRITE_CMD_ERR_CODE_WRITE_FAILED 0x3UL
+ #define NVM_WRITE_CMD_ERR_CODE_REQD_ERASE_FAILED 0x4UL
+ #define NVM_WRITE_CMD_ERR_CODE_VERIFY_FAILED 0x5UL
+ #define NVM_WRITE_CMD_ERR_CODE_INVALID_HEADER 0x6UL
+ #define NVM_WRITE_CMD_ERR_CODE_UPDATE_DIGEST_FAILED 0x7UL
+ #define NVM_WRITE_CMD_ERR_CODE_LAST NVM_WRITE_CMD_ERR_CODE_UPDATE_DIGEST_FAILED
+ u8 unused_0[7];
+};
+
+/* hwrm_nvm_modify_input (size:320b/40B) */
+struct hwrm_nvm_modify_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le64 host_src_addr;
+ __le16 dir_idx;
+ __le16 flags;
+ #define NVM_MODIFY_REQ_FLAGS_BATCH_MODE 0x1UL
+ #define NVM_MODIFY_REQ_FLAGS_BATCH_LAST 0x2UL
+ __le32 offset;
+ __le32 len;
+ u8 unused_1[4];
+};
+
+/* hwrm_nvm_modify_output (size:128b/16B) */
+struct hwrm_nvm_modify_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_nvm_find_dir_entry_input (size:256b/32B) */
+struct hwrm_nvm_find_dir_entry_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 enables;
+ #define NVM_FIND_DIR_ENTRY_REQ_ENABLES_DIR_IDX_VALID 0x1UL
+ __le16 dir_idx;
+ __le16 dir_type;
+ __le16 dir_ordinal;
+ __le16 dir_ext;
+ u8 opt_ordinal;
+ #define NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_MASK 0x3UL
+ #define NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_SFT 0
+ #define NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_EQ 0x0UL
+ #define NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_GE 0x1UL
+ #define NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_GT 0x2UL
+ #define NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_LAST NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_GT
+ u8 unused_0[3];
+};
+
+/* hwrm_nvm_find_dir_entry_output (size:256b/32B) */
+struct hwrm_nvm_find_dir_entry_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 dir_item_length;
+ __le32 dir_data_length;
+ __le32 fw_ver;
+ __le16 dir_ordinal;
+ __le16 dir_idx;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_nvm_erase_dir_entry_input (size:192b/24B) */
+struct hwrm_nvm_erase_dir_entry_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 dir_idx;
+ u8 unused_0[6];
+};
+
+/* hwrm_nvm_erase_dir_entry_output (size:128b/16B) */
+struct hwrm_nvm_erase_dir_entry_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_nvm_get_dev_info_input (size:192b/24B) */
+struct hwrm_nvm_get_dev_info_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ u8 flags;
+ #define NVM_GET_DEV_INFO_REQ_FLAGS_SECURITY_SOC_NVM 0x1UL
+ u8 unused_0[7];
+};
+
+/* hwrm_nvm_get_dev_info_output (size:768b/96B) */
+struct hwrm_nvm_get_dev_info_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 manufacturer_id;
+ __le16 device_id;
+ __le32 sector_size;
+ __le32 nvram_size;
+ __le32 reserved_size;
+ __le32 available_size;
+ u8 nvm_cfg_ver_maj;
+ u8 nvm_cfg_ver_min;
+ u8 nvm_cfg_ver_upd;
+ u8 flags;
+ #define NVM_GET_DEV_INFO_RESP_FLAGS_FW_VER_VALID 0x1UL
+ char pkg_name[16];
+ __le16 hwrm_fw_major;
+ __le16 hwrm_fw_minor;
+ __le16 hwrm_fw_build;
+ __le16 hwrm_fw_patch;
+ __le16 mgmt_fw_major;
+ __le16 mgmt_fw_minor;
+ __le16 mgmt_fw_build;
+ __le16 mgmt_fw_patch;
+ __le16 roce_fw_major;
+ __le16 roce_fw_minor;
+ __le16 roce_fw_build;
+ __le16 roce_fw_patch;
+ __le16 netctrl_fw_major;
+ __le16 netctrl_fw_minor;
+ __le16 netctrl_fw_build;
+ __le16 netctrl_fw_patch;
+ __le16 srt2_fw_major;
+ __le16 srt2_fw_minor;
+ __le16 srt2_fw_build;
+ __le16 srt2_fw_patch;
+ u8 security_soc_fw_major;
+ u8 security_soc_fw_minor;
+ u8 security_soc_fw_build;
+ u8 security_soc_fw_patch;
+ u8 unused_0[3];
+ u8 valid;
+};
+
+/* hwrm_nvm_mod_dir_entry_input (size:256b/32B) */
+struct hwrm_nvm_mod_dir_entry_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 enables;
+ #define NVM_MOD_DIR_ENTRY_REQ_ENABLES_CHECKSUM 0x1UL
+ __le16 dir_idx;
+ __le16 dir_ordinal;
+ __le16 dir_ext;
+ __le16 dir_attr;
+ __le32 checksum;
+};
+
+/* hwrm_nvm_mod_dir_entry_output (size:128b/16B) */
+struct hwrm_nvm_mod_dir_entry_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_nvm_verify_update_input (size:192b/24B) */
+struct hwrm_nvm_verify_update_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 dir_type;
+ __le16 dir_ordinal;
+ __le16 dir_ext;
+ u8 unused_0[2];
+};
+
+/* hwrm_nvm_verify_update_output (size:128b/16B) */
+struct hwrm_nvm_verify_update_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_nvm_install_update_input (size:192b/24B) */
+struct hwrm_nvm_install_update_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 install_type;
+ #define NVM_INSTALL_UPDATE_REQ_INSTALL_TYPE_NORMAL 0x0UL
+ #define NVM_INSTALL_UPDATE_REQ_INSTALL_TYPE_ALL 0xffffffffUL
+ #define NVM_INSTALL_UPDATE_REQ_INSTALL_TYPE_LAST NVM_INSTALL_UPDATE_REQ_INSTALL_TYPE_ALL
+ __le16 flags;
+ #define NVM_INSTALL_UPDATE_REQ_FLAGS_ERASE_UNUSED_SPACE 0x1UL
+ #define NVM_INSTALL_UPDATE_REQ_FLAGS_REMOVE_UNUSED_PKG 0x2UL
+ #define NVM_INSTALL_UPDATE_REQ_FLAGS_ALLOWED_TO_DEFRAG 0x4UL
+ #define NVM_INSTALL_UPDATE_REQ_FLAGS_VERIFY_ONLY 0x8UL
+ u8 unused_0[2];
+};
+
+/* hwrm_nvm_install_update_output (size:192b/24B) */
+struct hwrm_nvm_install_update_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le64 installed_items;
+ u8 result;
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_SUCCESS 0x0UL
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_FAILURE 0xffUL
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_MALLOC_FAILURE 0xfdUL
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_INDEX_PARAMETER 0xfbUL
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_TYPE_PARAMETER 0xf3UL
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_PREREQUISITE 0xf2UL
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_FILE_HEADER 0xecUL
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_SIGNATURE 0xebUL
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_PROP_STREAM 0xeaUL
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_PROP_LENGTH 0xe9UL
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_MANIFEST 0xe8UL
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_TRAILER 0xe7UL
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_CHECKSUM 0xe6UL
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_ITEM_CHECKSUM 0xe5UL
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_DATA_LENGTH 0xe4UL
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_DIRECTIVE 0xe1UL
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_UNSUPPORTED_CHIP_REV 0xceUL
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_UNSUPPORTED_DEVICE_ID 0xcdUL
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_UNSUPPORTED_SUBSYS_VENDOR 0xccUL
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_UNSUPPORTED_SUBSYS_ID 0xcbUL
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_UNSUPPORTED_PLATFORM 0xc5UL
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_DUPLICATE_ITEM 0xc4UL
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_ZERO_LENGTH_ITEM 0xc3UL
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_INSTALL_CHECKSUM_ERROR 0xb9UL
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_INSTALL_DATA_ERROR 0xb8UL
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_INSTALL_AUTHENTICATION_ERROR 0xb7UL
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_ITEM_NOT_FOUND 0xb0UL
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_ITEM_LOCKED 0xa7UL
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_LAST NVM_INSTALL_UPDATE_RESP_RESULT_ITEM_LOCKED
+ u8 problem_item;
+ #define NVM_INSTALL_UPDATE_RESP_PROBLEM_ITEM_NONE 0x0UL
+ #define NVM_INSTALL_UPDATE_RESP_PROBLEM_ITEM_PACKAGE 0xffUL
+ #define NVM_INSTALL_UPDATE_RESP_PROBLEM_ITEM_LAST NVM_INSTALL_UPDATE_RESP_PROBLEM_ITEM_PACKAGE
+ u8 reset_required;
+ #define NVM_INSTALL_UPDATE_RESP_RESET_REQUIRED_NONE 0x0UL
+ #define NVM_INSTALL_UPDATE_RESP_RESET_REQUIRED_PCI 0x1UL
+ #define NVM_INSTALL_UPDATE_RESP_RESET_REQUIRED_POWER 0x2UL
+ #define NVM_INSTALL_UPDATE_RESP_RESET_REQUIRED_LAST NVM_INSTALL_UPDATE_RESP_RESET_REQUIRED_POWER
+ u8 unused_0[4];
+ u8 valid;
+};
+
+/* hwrm_nvm_install_update_cmd_err (size:64b/8B) */
+struct hwrm_nvm_install_update_cmd_err {
+ u8 code;
+ #define NVM_INSTALL_UPDATE_CMD_ERR_CODE_UNKNOWN 0x0UL
+ #define NVM_INSTALL_UPDATE_CMD_ERR_CODE_FRAG_ERR 0x1UL
+ #define NVM_INSTALL_UPDATE_CMD_ERR_CODE_NO_SPACE 0x2UL
+ #define NVM_INSTALL_UPDATE_CMD_ERR_CODE_ANTI_ROLLBACK 0x3UL
+ #define NVM_INSTALL_UPDATE_CMD_ERR_CODE_NO_VOLTREG_SUPPORT 0x4UL
+ #define NVM_INSTALL_UPDATE_CMD_ERR_CODE_DEFRAG_FAILED 0x5UL
+ #define NVM_INSTALL_UPDATE_CMD_ERR_CODE_UNKNOWN_DIR_ERR 0x6UL
+ #define NVM_INSTALL_UPDATE_CMD_ERR_CODE_LAST NVM_INSTALL_UPDATE_CMD_ERR_CODE_UNKNOWN_DIR_ERR
+ u8 unused_0[7];
+};
+
+/* hwrm_nvm_get_variable_input (size:320b/40B) */
+struct hwrm_nvm_get_variable_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le64 dest_data_addr;
+ __le16 data_len;
+ __le16 option_num;
+ #define NVM_GET_VARIABLE_REQ_OPTION_NUM_RSVD_0 0x0UL
+ #define NVM_GET_VARIABLE_REQ_OPTION_NUM_RSVD_FFFF 0xffffUL
+ #define NVM_GET_VARIABLE_REQ_OPTION_NUM_LAST NVM_GET_VARIABLE_REQ_OPTION_NUM_RSVD_FFFF
+ __le16 dimensions;
+ __le16 index_0;
+ __le16 index_1;
+ __le16 index_2;
+ __le16 index_3;
+ u8 flags;
+ #define NVM_GET_VARIABLE_REQ_FLAGS_FACTORY_DFLT 0x1UL
+ #define NVM_GET_VARIABLE_REQ_FLAGS_VALIDATE_OPT_VALUE 0x2UL
+ u8 unused_0;
+};
+
+/* hwrm_nvm_get_variable_output (size:128b/16B) */
+struct hwrm_nvm_get_variable_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 data_len;
+ __le16 option_num;
+ #define NVM_GET_VARIABLE_RESP_OPTION_NUM_RSVD_0 0x0UL
+ #define NVM_GET_VARIABLE_RESP_OPTION_NUM_RSVD_FFFF 0xffffUL
+ #define NVM_GET_VARIABLE_RESP_OPTION_NUM_LAST NVM_GET_VARIABLE_RESP_OPTION_NUM_RSVD_FFFF
+ u8 flags;
+ #define NVM_GET_VARIABLE_RESP_FLAGS_VALIDATE_OPT_VALUE 0x1UL
+ u8 unused_0[2];
+ u8 valid;
+};
+
+/* hwrm_nvm_get_variable_cmd_err (size:64b/8B) */
+struct hwrm_nvm_get_variable_cmd_err {
+ u8 code;
+ #define NVM_GET_VARIABLE_CMD_ERR_CODE_UNKNOWN 0x0UL
+ #define NVM_GET_VARIABLE_CMD_ERR_CODE_VAR_NOT_EXIST 0x1UL
+ #define NVM_GET_VARIABLE_CMD_ERR_CODE_CORRUPT_VAR 0x2UL
+ #define NVM_GET_VARIABLE_CMD_ERR_CODE_LEN_TOO_SHORT 0x3UL
+ #define NVM_GET_VARIABLE_CMD_ERR_CODE_INDEX_INVALID 0x4UL
+ #define NVM_GET_VARIABLE_CMD_ERR_CODE_ACCESS_DENIED 0x5UL
+ #define NVM_GET_VARIABLE_CMD_ERR_CODE_CB_FAILED 0x6UL
+ #define NVM_GET_VARIABLE_CMD_ERR_CODE_INVALID_DATA_LEN 0x7UL
+ #define NVM_GET_VARIABLE_CMD_ERR_CODE_NO_MEM 0x8UL
+ #define NVM_GET_VARIABLE_CMD_ERR_CODE_LAST NVM_GET_VARIABLE_CMD_ERR_CODE_NO_MEM
+ u8 unused_0[7];
+};
+
+/* hwrm_nvm_set_variable_input (size:320b/40B) */
+struct hwrm_nvm_set_variable_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le64 src_data_addr;
+ __le16 data_len;
+ __le16 option_num;
+ #define NVM_SET_VARIABLE_REQ_OPTION_NUM_RSVD_0 0x0UL
+ #define NVM_SET_VARIABLE_REQ_OPTION_NUM_RSVD_FFFF 0xffffUL
+ #define NVM_SET_VARIABLE_REQ_OPTION_NUM_LAST NVM_SET_VARIABLE_REQ_OPTION_NUM_RSVD_FFFF
+ __le16 dimensions;
+ __le16 index_0;
+ __le16 index_1;
+ __le16 index_2;
+ __le16 index_3;
+ u8 flags;
+ #define NVM_SET_VARIABLE_REQ_FLAGS_FORCE_FLUSH 0x1UL
+ #define NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_MASK 0xeUL
+ #define NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_SFT 1
+ #define NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_NONE (0x0UL << 1)
+ #define NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_HMAC_SHA1 (0x1UL << 1)
+ #define NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_AES256 (0x2UL << 1)
+ #define NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_HMAC_SHA1_AUTH (0x3UL << 1)
+ #define NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_LAST NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_HMAC_SHA1_AUTH
+ #define NVM_SET_VARIABLE_REQ_FLAGS_FLAGS_UNUSED_0_MASK 0x70UL
+ #define NVM_SET_VARIABLE_REQ_FLAGS_FLAGS_UNUSED_0_SFT 4
+ #define NVM_SET_VARIABLE_REQ_FLAGS_FACTORY_DEFAULT 0x80UL
+ u8 unused_0;
+};
+
+/* hwrm_nvm_set_variable_output (size:128b/16B) */
+struct hwrm_nvm_set_variable_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_nvm_set_variable_cmd_err (size:64b/8B) */
+struct hwrm_nvm_set_variable_cmd_err {
+ u8 code;
+ #define NVM_SET_VARIABLE_CMD_ERR_CODE_UNKNOWN 0x0UL
+ #define NVM_SET_VARIABLE_CMD_ERR_CODE_VAR_NOT_EXIST 0x1UL
+ #define NVM_SET_VARIABLE_CMD_ERR_CODE_CORRUPT_VAR 0x2UL
+ #define NVM_SET_VARIABLE_CMD_ERR_CODE_LEN_TOO_SHORT 0x3UL
+ #define NVM_SET_VARIABLE_CMD_ERR_CODE_ACTION_NOT_SUPPORTED 0x4UL
+ #define NVM_SET_VARIABLE_CMD_ERR_CODE_INDEX_INVALID 0x5UL
+ #define NVM_SET_VARIABLE_CMD_ERR_CODE_ACCESS_DENIED 0x6UL
+ #define NVM_SET_VARIABLE_CMD_ERR_CODE_CB_FAILED 0x7UL
+ #define NVM_SET_VARIABLE_CMD_ERR_CODE_INVALID_DATA_LEN 0x8UL
+ #define NVM_SET_VARIABLE_CMD_ERR_CODE_NO_MEM 0x9UL
+ #define NVM_SET_VARIABLE_CMD_ERR_CODE_LAST NVM_SET_VARIABLE_CMD_ERR_CODE_NO_MEM
+ u8 unused_0[7];
+};
+
+/* hwrm_selftest_qlist_input (size:128b/16B) */
+struct hwrm_selftest_qlist_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+};
+
+/* hwrm_selftest_qlist_output (size:2240b/280B) */
+struct hwrm_selftest_qlist_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 num_tests;
+ u8 available_tests;
+ #define SELFTEST_QLIST_RESP_AVAILABLE_TESTS_NVM_TEST 0x1UL
+ #define SELFTEST_QLIST_RESP_AVAILABLE_TESTS_LINK_TEST 0x2UL
+ #define SELFTEST_QLIST_RESP_AVAILABLE_TESTS_REGISTER_TEST 0x4UL
+ #define SELFTEST_QLIST_RESP_AVAILABLE_TESTS_MEMORY_TEST 0x8UL
+ #define SELFTEST_QLIST_RESP_AVAILABLE_TESTS_PCIE_SERDES_TEST 0x10UL
+ #define SELFTEST_QLIST_RESP_AVAILABLE_TESTS_ETHERNET_SERDES_TEST 0x20UL
+ u8 offline_tests;
+ #define SELFTEST_QLIST_RESP_OFFLINE_TESTS_NVM_TEST 0x1UL
+ #define SELFTEST_QLIST_RESP_OFFLINE_TESTS_LINK_TEST 0x2UL
+ #define SELFTEST_QLIST_RESP_OFFLINE_TESTS_REGISTER_TEST 0x4UL
+ #define SELFTEST_QLIST_RESP_OFFLINE_TESTS_MEMORY_TEST 0x8UL
+ #define SELFTEST_QLIST_RESP_OFFLINE_TESTS_PCIE_SERDES_TEST 0x10UL
+ #define SELFTEST_QLIST_RESP_OFFLINE_TESTS_ETHERNET_SERDES_TEST 0x20UL
+ u8 unused_0;
+ __le16 test_timeout;
+ u8 unused_1[2];
+ char test_name[8][32];
+ u8 eyescope_target_BER_support;
+ #define SELFTEST_QLIST_RESP_EYESCOPE_TARGET_BER_SUPPORT_BER_1E8_SUPPORTED 0x0UL
+ #define SELFTEST_QLIST_RESP_EYESCOPE_TARGET_BER_SUPPORT_BER_1E9_SUPPORTED 0x1UL
+ #define SELFTEST_QLIST_RESP_EYESCOPE_TARGET_BER_SUPPORT_BER_1E10_SUPPORTED 0x2UL
+ #define SELFTEST_QLIST_RESP_EYESCOPE_TARGET_BER_SUPPORT_BER_1E11_SUPPORTED 0x3UL
+ #define SELFTEST_QLIST_RESP_EYESCOPE_TARGET_BER_SUPPORT_BER_1E12_SUPPORTED 0x4UL
+ #define SELFTEST_QLIST_RESP_EYESCOPE_TARGET_BER_SUPPORT_LAST SELFTEST_QLIST_RESP_EYESCOPE_TARGET_BER_SUPPORT_BER_1E12_SUPPORTED
+ u8 unused_2[6];
+ u8 valid;
+};
+
+/* hwrm_selftest_exec_input (size:192b/24B) */
+struct hwrm_selftest_exec_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ u8 flags;
+ #define SELFTEST_EXEC_REQ_FLAGS_NVM_TEST 0x1UL
+ #define SELFTEST_EXEC_REQ_FLAGS_LINK_TEST 0x2UL
+ #define SELFTEST_EXEC_REQ_FLAGS_REGISTER_TEST 0x4UL
+ #define SELFTEST_EXEC_REQ_FLAGS_MEMORY_TEST 0x8UL
+ #define SELFTEST_EXEC_REQ_FLAGS_PCIE_SERDES_TEST 0x10UL
+ #define SELFTEST_EXEC_REQ_FLAGS_ETHERNET_SERDES_TEST 0x20UL
+ u8 unused_0[7];
+};
+
+/* hwrm_selftest_exec_output (size:128b/16B) */
+struct hwrm_selftest_exec_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 requested_tests;
+ #define SELFTEST_EXEC_RESP_REQUESTED_TESTS_NVM_TEST 0x1UL
+ #define SELFTEST_EXEC_RESP_REQUESTED_TESTS_LINK_TEST 0x2UL
+ #define SELFTEST_EXEC_RESP_REQUESTED_TESTS_REGISTER_TEST 0x4UL
+ #define SELFTEST_EXEC_RESP_REQUESTED_TESTS_MEMORY_TEST 0x8UL
+ #define SELFTEST_EXEC_RESP_REQUESTED_TESTS_PCIE_SERDES_TEST 0x10UL
+ #define SELFTEST_EXEC_RESP_REQUESTED_TESTS_ETHERNET_SERDES_TEST 0x20UL
+ u8 test_success;
+ #define SELFTEST_EXEC_RESP_TEST_SUCCESS_NVM_TEST 0x1UL
+ #define SELFTEST_EXEC_RESP_TEST_SUCCESS_LINK_TEST 0x2UL
+ #define SELFTEST_EXEC_RESP_TEST_SUCCESS_REGISTER_TEST 0x4UL
+ #define SELFTEST_EXEC_RESP_TEST_SUCCESS_MEMORY_TEST 0x8UL
+ #define SELFTEST_EXEC_RESP_TEST_SUCCESS_PCIE_SERDES_TEST 0x10UL
+ #define SELFTEST_EXEC_RESP_TEST_SUCCESS_ETHERNET_SERDES_TEST 0x20UL
+ u8 unused_0[5];
+ u8 valid;
+};
+
+/* hwrm_selftest_irq_input (size:128b/16B) */
+struct hwrm_selftest_irq_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+};
+
+/* hwrm_selftest_irq_output (size:128b/16B) */
+struct hwrm_selftest_irq_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* dbc_dbc (size:64b/8B) */
+struct dbc_dbc {
+ __le32 index;
+ #define DBC_DBC_INDEX_MASK 0xffffffUL
+ #define DBC_DBC_INDEX_SFT 0
+ #define DBC_DBC_EPOCH 0x1000000UL
+ #define DBC_DBC_TOGGLE_MASK 0x6000000UL
+ #define DBC_DBC_TOGGLE_SFT 25
+ __le32 type_path_xid;
+ #define DBC_DBC_XID_MASK 0xfffffUL
+ #define DBC_DBC_XID_SFT 0
+ #define DBC_DBC_PATH_MASK 0x3000000UL
+ #define DBC_DBC_PATH_SFT 24
+ #define DBC_DBC_PATH_ROCE (0x0UL << 24)
+ #define DBC_DBC_PATH_L2 (0x1UL << 24)
+ #define DBC_DBC_PATH_ENGINE (0x2UL << 24)
+ #define DBC_DBC_PATH_LAST DBC_DBC_PATH_ENGINE
+ #define DBC_DBC_VALID 0x4000000UL
+ #define DBC_DBC_DEBUG_TRACE 0x8000000UL
+ #define DBC_DBC_TYPE_MASK 0xf0000000UL
+ #define DBC_DBC_TYPE_SFT 28
+ #define DBC_DBC_TYPE_SQ (0x0UL << 28)
+ #define DBC_DBC_TYPE_RQ (0x1UL << 28)
+ #define DBC_DBC_TYPE_SRQ (0x2UL << 28)
+ #define DBC_DBC_TYPE_SRQ_ARM (0x3UL << 28)
+ #define DBC_DBC_TYPE_CQ (0x4UL << 28)
+ #define DBC_DBC_TYPE_CQ_ARMSE (0x5UL << 28)
+ #define DBC_DBC_TYPE_CQ_ARMALL (0x6UL << 28)
+ #define DBC_DBC_TYPE_CQ_ARMENA (0x7UL << 28)
+ #define DBC_DBC_TYPE_SRQ_ARMENA (0x8UL << 28)
+ #define DBC_DBC_TYPE_CQ_CUTOFF_ACK (0x9UL << 28)
+ #define DBC_DBC_TYPE_NQ (0xaUL << 28)
+ #define DBC_DBC_TYPE_NQ_ARM (0xbUL << 28)
+ #define DBC_DBC_TYPE_NQ_MASK (0xeUL << 28)
+ #define DBC_DBC_TYPE_NULL (0xfUL << 28)
+ #define DBC_DBC_TYPE_LAST DBC_DBC_TYPE_NULL
+};
+
+/* db_push_start (size:64b/8B) */
+struct db_push_start {
+ u64 db;
+ #define DB_PUSH_START_DB_INDEX_MASK 0xffffffUL
+ #define DB_PUSH_START_DB_INDEX_SFT 0
+ #define DB_PUSH_START_DB_PI_LO_MASK 0xff000000UL
+ #define DB_PUSH_START_DB_PI_LO_SFT 24
+ #define DB_PUSH_START_DB_XID_MASK 0xfffff00000000ULL
+ #define DB_PUSH_START_DB_XID_SFT 32
+ #define DB_PUSH_START_DB_PI_HI_MASK 0xf0000000000000ULL
+ #define DB_PUSH_START_DB_PI_HI_SFT 52
+ #define DB_PUSH_START_DB_TYPE_MASK 0xf000000000000000ULL
+ #define DB_PUSH_START_DB_TYPE_SFT 60
+ #define DB_PUSH_START_DB_TYPE_PUSH_START (0xcULL << 60)
+ #define DB_PUSH_START_DB_TYPE_PUSH_END (0xdULL << 60)
+ #define DB_PUSH_START_DB_TYPE_LAST DB_PUSH_START_DB_TYPE_PUSH_END
+};
+
+/* db_push_end (size:64b/8B) */
+struct db_push_end {
+ u64 db;
+ #define DB_PUSH_END_DB_INDEX_MASK 0xffffffUL
+ #define DB_PUSH_END_DB_INDEX_SFT 0
+ #define DB_PUSH_END_DB_PI_LO_MASK 0xff000000UL
+ #define DB_PUSH_END_DB_PI_LO_SFT 24
+ #define DB_PUSH_END_DB_XID_MASK 0xfffff00000000ULL
+ #define DB_PUSH_END_DB_XID_SFT 32
+ #define DB_PUSH_END_DB_PI_HI_MASK 0xf0000000000000ULL
+ #define DB_PUSH_END_DB_PI_HI_SFT 52
+ #define DB_PUSH_END_DB_PATH_MASK 0x300000000000000ULL
+ #define DB_PUSH_END_DB_PATH_SFT 56
+ #define DB_PUSH_END_DB_PATH_ROCE (0x0ULL << 56)
+ #define DB_PUSH_END_DB_PATH_L2 (0x1ULL << 56)
+ #define DB_PUSH_END_DB_PATH_ENGINE (0x2ULL << 56)
+ #define DB_PUSH_END_DB_PATH_LAST DB_PUSH_END_DB_PATH_ENGINE
+ #define DB_PUSH_END_DB_DEBUG_TRACE 0x800000000000000ULL
+ #define DB_PUSH_END_DB_TYPE_MASK 0xf000000000000000ULL
+ #define DB_PUSH_END_DB_TYPE_SFT 60
+ #define DB_PUSH_END_DB_TYPE_PUSH_START (0xcULL << 60)
+ #define DB_PUSH_END_DB_TYPE_PUSH_END (0xdULL << 60)
+ #define DB_PUSH_END_DB_TYPE_LAST DB_PUSH_END_DB_TYPE_PUSH_END
+};
+
+/* db_push_info (size:64b/8B) */
+struct db_push_info {
+ u32 push_size_push_index;
+ #define DB_PUSH_INFO_PUSH_INDEX_MASK 0xffffffUL
+ #define DB_PUSH_INFO_PUSH_INDEX_SFT 0
+ #define DB_PUSH_INFO_PUSH_SIZE_MASK 0x1f000000UL
+ #define DB_PUSH_INFO_PUSH_SIZE_SFT 24
+ u32 reserved32;
+};
+
+/* fw_status_reg (size:32b/4B) */
+struct fw_status_reg {
+ u32 fw_status;
+ #define FW_STATUS_REG_CODE_MASK 0xffffUL
+ #define FW_STATUS_REG_CODE_SFT 0
+ #define FW_STATUS_REG_CODE_READY 0x8000UL
+ #define FW_STATUS_REG_CODE_LAST FW_STATUS_REG_CODE_READY
+ #define FW_STATUS_REG_IMAGE_DEGRADED 0x10000UL
+ #define FW_STATUS_REG_RECOVERABLE 0x20000UL
+ #define FW_STATUS_REG_CRASHDUMP_ONGOING 0x40000UL
+ #define FW_STATUS_REG_CRASHDUMP_COMPLETE 0x80000UL
+ #define FW_STATUS_REG_SHUTDOWN 0x100000UL
+ #define FW_STATUS_REG_CRASHED_NO_MASTER 0x200000UL
+ #define FW_STATUS_REG_RECOVERING 0x400000UL
+ #define FW_STATUS_REG_MANU_DEBUG_STATUS 0x800000UL
+};
+
+/* hcomm_status (size:64b/8B) */
+struct hcomm_status {
+ u32 sig_ver;
+ #define HCOMM_STATUS_VER_MASK 0xffUL
+ #define HCOMM_STATUS_VER_SFT 0
+ #define HCOMM_STATUS_VER_LATEST 0x1UL
+ #define HCOMM_STATUS_VER_LAST HCOMM_STATUS_VER_LATEST
+ #define HCOMM_STATUS_SIGNATURE_MASK 0xffffff00UL
+ #define HCOMM_STATUS_SIGNATURE_SFT 8
+ #define HCOMM_STATUS_SIGNATURE_VAL (0x484353UL << 8)
+ #define HCOMM_STATUS_SIGNATURE_LAST HCOMM_STATUS_SIGNATURE_VAL
+ u32 fw_status_loc;
+ #define HCOMM_STATUS_TRUE_ADDR_SPACE_MASK 0x3UL
+ #define HCOMM_STATUS_TRUE_ADDR_SPACE_SFT 0
+ #define HCOMM_STATUS_FW_STATUS_LOC_ADDR_SPACE_PCIE_CFG 0x0UL
+ #define HCOMM_STATUS_FW_STATUS_LOC_ADDR_SPACE_GRC 0x1UL
+ #define HCOMM_STATUS_FW_STATUS_LOC_ADDR_SPACE_BAR0 0x2UL
+ #define HCOMM_STATUS_FW_STATUS_LOC_ADDR_SPACE_BAR1 0x3UL
+ #define HCOMM_STATUS_FW_STATUS_LOC_ADDR_SPACE_LAST HCOMM_STATUS_FW_STATUS_LOC_ADDR_SPACE_BAR1
+ #define HCOMM_STATUS_TRUE_OFFSET_MASK 0xfffffffcUL
+ #define HCOMM_STATUS_TRUE_OFFSET_SFT 2
+};
+#define HCOMM_STATUS_STRUCT_LOC 0x31001F0UL
+
+#endif /* _BNXT_HSI_H_ */
diff --git a/include/linux/bootconfig.h b/include/linux/bootconfig.h
new file mode 100644
index 000000000000..25df9260d206
--- /dev/null
+++ b/include/linux/bootconfig.h
@@ -0,0 +1,308 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_XBC_H
+#define _LINUX_XBC_H
+/*
+ * Extra Boot Config
+ * Copyright (C) 2019 Linaro Ltd.
+ * Author: Masami Hiramatsu <mhiramat@kernel.org>
+ */
+
+#ifdef __KERNEL__
+#include <linux/kernel.h>
+#include <linux/types.h>
+bool __init cmdline_has_extra_options(void);
+#else /* !__KERNEL__ */
+/*
+ * NOTE: This is only for tools/bootconfig, because tools/bootconfig will
+ * run the parser sanity test.
+ * This does NOT mean linux/bootconfig.h is available in the user space.
+ * However, if you change this file, please make sure the tools/bootconfig
+ * has no issue on building and running.
+ */
+#endif
+
+#define BOOTCONFIG_MAGIC "#BOOTCONFIG\n"
+#define BOOTCONFIG_MAGIC_LEN 12
+#define BOOTCONFIG_ALIGN_SHIFT 2
+#define BOOTCONFIG_ALIGN (1 << BOOTCONFIG_ALIGN_SHIFT)
+#define BOOTCONFIG_ALIGN_MASK (BOOTCONFIG_ALIGN - 1)
+
+/**
+ * xbc_calc_checksum() - Calculate checksum of bootconfig
+ * @data: Bootconfig data.
+ * @size: The size of the bootconfig data.
+ *
+ * Calculate the checksum value of the bootconfig data.
+ * The checksum will be used with the BOOTCONFIG_MAGIC and the size for
+ * embedding the bootconfig in the initrd image.
+ */
+static inline __init uint32_t xbc_calc_checksum(void *data, uint32_t size)
+{
+ unsigned char *p = data;
+ uint32_t ret = 0;
+
+ while (size--)
+ ret += *p++;
+
+ return ret;
+}
+
+/* XBC tree node */
+struct xbc_node {
+ uint16_t next;
+ uint16_t child;
+ uint16_t parent;
+ uint16_t data;
+} __attribute__ ((__packed__));
+
+#define XBC_KEY 0
+#define XBC_VALUE (1 << 15)
+/* Maximum size of boot config is 32KB - 1 */
+#define XBC_DATA_MAX (XBC_VALUE - 1)
+
+#define XBC_NODE_MAX 8192
+#define XBC_KEYLEN_MAX 256
+#define XBC_DEPTH_MAX 16
+
+/* Node tree access raw APIs */
+struct xbc_node * __init xbc_root_node(void);
+int __init xbc_node_index(struct xbc_node *node);
+struct xbc_node * __init xbc_node_get_parent(struct xbc_node *node);
+struct xbc_node * __init xbc_node_get_child(struct xbc_node *node);
+struct xbc_node * __init xbc_node_get_next(struct xbc_node *node);
+const char * __init xbc_node_get_data(struct xbc_node *node);
+
+/**
+ * xbc_node_is_value() - Test the node is a value node
+ * @node: An XBC node.
+ *
+ * Test the @node is a value node and return true if a value node, false if not.
+ */
+static inline __init bool xbc_node_is_value(struct xbc_node *node)
+{
+ return node->data & XBC_VALUE;
+}
+
+/**
+ * xbc_node_is_key() - Test the node is a key node
+ * @node: An XBC node.
+ *
+ * Test the @node is a key node and return true if a key node, false if not.
+ */
+static inline __init bool xbc_node_is_key(struct xbc_node *node)
+{
+ return !xbc_node_is_value(node);
+}
+
+/**
+ * xbc_node_is_array() - Test the node is an arraied value node
+ * @node: An XBC node.
+ *
+ * Test the @node is an arraied value node.
+ */
+static inline __init bool xbc_node_is_array(struct xbc_node *node)
+{
+ return xbc_node_is_value(node) && node->child != 0;
+}
+
+/**
+ * xbc_node_is_leaf() - Test the node is a leaf key node
+ * @node: An XBC node.
+ *
+ * Test the @node is a leaf key node which is a key node and has a value node
+ * or no child. Returns true if it is a leaf node, or false if not.
+ * Note that the leaf node can have subkey nodes in addition to the
+ * value node.
+ */
+static inline __init bool xbc_node_is_leaf(struct xbc_node *node)
+{
+ return xbc_node_is_key(node) &&
+ (!node->child || xbc_node_is_value(xbc_node_get_child(node)));
+}
+
+/* Tree-based key-value access APIs */
+struct xbc_node * __init xbc_node_find_subkey(struct xbc_node *parent,
+ const char *key);
+
+const char * __init xbc_node_find_value(struct xbc_node *parent,
+ const char *key,
+ struct xbc_node **vnode);
+
+struct xbc_node * __init xbc_node_find_next_leaf(struct xbc_node *root,
+ struct xbc_node *leaf);
+
+const char * __init xbc_node_find_next_key_value(struct xbc_node *root,
+ struct xbc_node **leaf);
+
+/**
+ * xbc_find_value() - Find a value which matches the key
+ * @key: Search key
+ * @vnode: A container pointer of XBC value node.
+ *
+ * Search a value whose key matches @key from whole of XBC tree and return
+ * the value if found. Found value node is stored in *@vnode.
+ * Note that this can return 0-length string and store NULL in *@vnode for
+ * key-only (non-value) entry.
+ */
+static inline const char * __init
+xbc_find_value(const char *key, struct xbc_node **vnode)
+{
+ return xbc_node_find_value(NULL, key, vnode);
+}
+
+/**
+ * xbc_find_node() - Find a node which matches the key
+ * @key: Search key
+ *
+ * Search a (key) node whose key matches @key from whole of XBC tree and
+ * return the node if found. If not found, returns NULL.
+ */
+static inline struct xbc_node * __init xbc_find_node(const char *key)
+{
+ return xbc_node_find_subkey(NULL, key);
+}
+
+/**
+ * xbc_node_get_subkey() - Return the first subkey node if exists
+ * @node: Parent node
+ *
+ * Return the first subkey node of the @node. If the @node has no child
+ * or only value node, this will return NULL.
+ */
+static inline struct xbc_node * __init xbc_node_get_subkey(struct xbc_node *node)
+{
+ struct xbc_node *child = xbc_node_get_child(node);
+
+ if (child && xbc_node_is_value(child))
+ return xbc_node_get_next(child);
+ else
+ return child;
+}
+
+/**
+ * xbc_array_for_each_value() - Iterate value nodes on an array
+ * @anode: An XBC arraied value node
+ * @value: A value
+ *
+ * Iterate array value nodes and values starts from @anode. This is expected to
+ * be used with xbc_find_value() and xbc_node_find_value(), so that user can
+ * process each array entry node.
+ */
+#define xbc_array_for_each_value(anode, value) \
+ for (value = xbc_node_get_data(anode); anode != NULL ; \
+ anode = xbc_node_get_child(anode), \
+ value = anode ? xbc_node_get_data(anode) : NULL)
+
+/**
+ * xbc_node_for_each_child() - Iterate child nodes
+ * @parent: An XBC node.
+ * @child: Iterated XBC node.
+ *
+ * Iterate child nodes of @parent. Each child nodes are stored to @child.
+ * The @child can be mixture of a value node and subkey nodes.
+ */
+#define xbc_node_for_each_child(parent, child) \
+ for (child = xbc_node_get_child(parent); child != NULL ; \
+ child = xbc_node_get_next(child))
+
+/**
+ * xbc_node_for_each_subkey() - Iterate child subkey nodes
+ * @parent: An XBC node.
+ * @child: Iterated XBC node.
+ *
+ * Iterate subkey nodes of @parent. Each child nodes are stored to @child.
+ * The @child is only the subkey node.
+ */
+#define xbc_node_for_each_subkey(parent, child) \
+ for (child = xbc_node_get_subkey(parent); child != NULL ; \
+ child = xbc_node_get_next(child))
+
+/**
+ * xbc_node_for_each_array_value() - Iterate array entries of geven key
+ * @node: An XBC node.
+ * @key: A key string searched under @node
+ * @anode: Iterated XBC node of array entry.
+ * @value: Iterated value of array entry.
+ *
+ * Iterate array entries of given @key under @node. Each array entry node
+ * is stored to @anode and @value. If the @node doesn't have @key node,
+ * it does nothing.
+ * Note that even if the found key node has only one value (not array)
+ * this executes block once. However, if the found key node has no value
+ * (key-only node), this does nothing. So don't use this for testing the
+ * key-value pair existence.
+ */
+#define xbc_node_for_each_array_value(node, key, anode, value) \
+ for (value = xbc_node_find_value(node, key, &anode); value != NULL; \
+ anode = xbc_node_get_child(anode), \
+ value = anode ? xbc_node_get_data(anode) : NULL)
+
+/**
+ * xbc_node_for_each_key_value() - Iterate key-value pairs under a node
+ * @node: An XBC node.
+ * @knode: Iterated key node
+ * @value: Iterated value string
+ *
+ * Iterate key-value pairs under @node. Each key node and value string are
+ * stored in @knode and @value respectively.
+ */
+#define xbc_node_for_each_key_value(node, knode, value) \
+ for (knode = NULL, value = xbc_node_find_next_key_value(node, &knode);\
+ knode != NULL; value = xbc_node_find_next_key_value(node, &knode))
+
+/**
+ * xbc_for_each_key_value() - Iterate key-value pairs
+ * @knode: Iterated key node
+ * @value: Iterated value string
+ *
+ * Iterate key-value pairs in whole XBC tree. Each key node and value string
+ * are stored in @knode and @value respectively.
+ */
+#define xbc_for_each_key_value(knode, value) \
+ xbc_node_for_each_key_value(NULL, knode, value)
+
+/* Compose partial key */
+int __init xbc_node_compose_key_after(struct xbc_node *root,
+ struct xbc_node *node, char *buf, size_t size);
+
+/**
+ * xbc_node_compose_key() - Compose full key string of the XBC node
+ * @node: An XBC node.
+ * @buf: A buffer to store the key.
+ * @size: The size of the @buf.
+ *
+ * Compose the full-length key of the @node into @buf. Returns the total
+ * length of the key stored in @buf. Or returns -EINVAL if @node is NULL,
+ * and -ERANGE if the key depth is deeper than max depth.
+ */
+static inline int __init xbc_node_compose_key(struct xbc_node *node,
+ char *buf, size_t size)
+{
+ return xbc_node_compose_key_after(NULL, node, buf, size);
+}
+
+/* XBC node initializer */
+int __init xbc_init(const char *buf, size_t size, const char **emsg, int *epos);
+
+/* XBC node and size information */
+int __init xbc_get_info(int *node_size, size_t *data_size);
+
+/* XBC cleanup data structures */
+void __init _xbc_exit(bool early);
+
+static __always_inline void xbc_exit(void)
+{
+ _xbc_exit(false);
+}
+
+/* XBC embedded bootconfig data in kernel */
+#ifdef CONFIG_BOOT_CONFIG_EMBED
+const char * __init xbc_get_embedded_bootconfig(size_t *size);
+#else
+static inline const char *xbc_get_embedded_bootconfig(size_t *size)
+{
+ return NULL;
+}
+#endif
+
+#endif
diff --git a/include/linux/bootmem.h b/include/linux/bootmem.h
deleted file mode 100644
index e223d91b6439..000000000000
--- a/include/linux/bootmem.h
+++ /dev/null
@@ -1,374 +0,0 @@
-/*
- * Discontiguous memory support, Kanoj Sarcar, SGI, Nov 1999
- */
-#ifndef _LINUX_BOOTMEM_H
-#define _LINUX_BOOTMEM_H
-
-#include <linux/mmzone.h>
-#include <linux/mm_types.h>
-#include <asm/dma.h>
-#include <asm/processor.h>
-
-/*
- * simple boot-time physical memory area allocator.
- */
-
-extern unsigned long max_low_pfn;
-extern unsigned long min_low_pfn;
-
-/*
- * highest page
- */
-extern unsigned long max_pfn;
-/*
- * highest possible page
- */
-extern unsigned long long max_possible_pfn;
-
-#ifndef CONFIG_NO_BOOTMEM
-/*
- * node_bootmem_map is a map pointer - the bits represent all physical
- * memory pages (including holes) on the node.
- */
-typedef struct bootmem_data {
- unsigned long node_min_pfn;
- unsigned long node_low_pfn;
- void *node_bootmem_map;
- unsigned long last_end_off;
- unsigned long hint_idx;
- struct list_head list;
-} bootmem_data_t;
-
-extern bootmem_data_t bootmem_node_data[];
-#endif
-
-extern unsigned long bootmem_bootmap_pages(unsigned long);
-
-extern unsigned long init_bootmem_node(pg_data_t *pgdat,
- unsigned long freepfn,
- unsigned long startpfn,
- unsigned long endpfn);
-extern unsigned long init_bootmem(unsigned long addr, unsigned long memend);
-
-extern unsigned long free_all_bootmem(void);
-extern void reset_node_managed_pages(pg_data_t *pgdat);
-extern void reset_all_zones_managed_pages(void);
-
-extern void free_bootmem_node(pg_data_t *pgdat,
- unsigned long addr,
- unsigned long size);
-extern void free_bootmem(unsigned long physaddr, unsigned long size);
-extern void free_bootmem_late(unsigned long physaddr, unsigned long size);
-
-/*
- * Flags for reserve_bootmem (also if CONFIG_HAVE_ARCH_BOOTMEM_NODE,
- * the architecture-specific code should honor this).
- *
- * If flags is BOOTMEM_DEFAULT, then the return value is always 0 (success).
- * If flags contains BOOTMEM_EXCLUSIVE, then -EBUSY is returned if the memory
- * already was reserved.
- */
-#define BOOTMEM_DEFAULT 0
-#define BOOTMEM_EXCLUSIVE (1<<0)
-
-extern int reserve_bootmem(unsigned long addr,
- unsigned long size,
- int flags);
-extern int reserve_bootmem_node(pg_data_t *pgdat,
- unsigned long physaddr,
- unsigned long size,
- int flags);
-
-extern void *__alloc_bootmem(unsigned long size,
- unsigned long align,
- unsigned long goal);
-extern void *__alloc_bootmem_nopanic(unsigned long size,
- unsigned long align,
- unsigned long goal) __malloc;
-extern void *__alloc_bootmem_node(pg_data_t *pgdat,
- unsigned long size,
- unsigned long align,
- unsigned long goal) __malloc;
-void *__alloc_bootmem_node_high(pg_data_t *pgdat,
- unsigned long size,
- unsigned long align,
- unsigned long goal) __malloc;
-extern void *__alloc_bootmem_node_nopanic(pg_data_t *pgdat,
- unsigned long size,
- unsigned long align,
- unsigned long goal) __malloc;
-void *___alloc_bootmem_node_nopanic(pg_data_t *pgdat,
- unsigned long size,
- unsigned long align,
- unsigned long goal,
- unsigned long limit) __malloc;
-extern void *__alloc_bootmem_low(unsigned long size,
- unsigned long align,
- unsigned long goal) __malloc;
-void *__alloc_bootmem_low_nopanic(unsigned long size,
- unsigned long align,
- unsigned long goal) __malloc;
-extern void *__alloc_bootmem_low_node(pg_data_t *pgdat,
- unsigned long size,
- unsigned long align,
- unsigned long goal) __malloc;
-
-#ifdef CONFIG_NO_BOOTMEM
-/* We are using top down, so it is safe to use 0 here */
-#define BOOTMEM_LOW_LIMIT 0
-#else
-#define BOOTMEM_LOW_LIMIT __pa(MAX_DMA_ADDRESS)
-#endif
-
-#ifndef ARCH_LOW_ADDRESS_LIMIT
-#define ARCH_LOW_ADDRESS_LIMIT 0xffffffffUL
-#endif
-
-#define alloc_bootmem(x) \
- __alloc_bootmem(x, SMP_CACHE_BYTES, BOOTMEM_LOW_LIMIT)
-#define alloc_bootmem_align(x, align) \
- __alloc_bootmem(x, align, BOOTMEM_LOW_LIMIT)
-#define alloc_bootmem_nopanic(x) \
- __alloc_bootmem_nopanic(x, SMP_CACHE_BYTES, BOOTMEM_LOW_LIMIT)
-#define alloc_bootmem_pages(x) \
- __alloc_bootmem(x, PAGE_SIZE, BOOTMEM_LOW_LIMIT)
-#define alloc_bootmem_pages_nopanic(x) \
- __alloc_bootmem_nopanic(x, PAGE_SIZE, BOOTMEM_LOW_LIMIT)
-#define alloc_bootmem_node(pgdat, x) \
- __alloc_bootmem_node(pgdat, x, SMP_CACHE_BYTES, BOOTMEM_LOW_LIMIT)
-#define alloc_bootmem_node_nopanic(pgdat, x) \
- __alloc_bootmem_node_nopanic(pgdat, x, SMP_CACHE_BYTES, BOOTMEM_LOW_LIMIT)
-#define alloc_bootmem_pages_node(pgdat, x) \
- __alloc_bootmem_node(pgdat, x, PAGE_SIZE, BOOTMEM_LOW_LIMIT)
-#define alloc_bootmem_pages_node_nopanic(pgdat, x) \
- __alloc_bootmem_node_nopanic(pgdat, x, PAGE_SIZE, BOOTMEM_LOW_LIMIT)
-
-#define alloc_bootmem_low(x) \
- __alloc_bootmem_low(x, SMP_CACHE_BYTES, 0)
-#define alloc_bootmem_low_pages_nopanic(x) \
- __alloc_bootmem_low_nopanic(x, PAGE_SIZE, 0)
-#define alloc_bootmem_low_pages(x) \
- __alloc_bootmem_low(x, PAGE_SIZE, 0)
-#define alloc_bootmem_low_pages_node(pgdat, x) \
- __alloc_bootmem_low_node(pgdat, x, PAGE_SIZE, 0)
-
-
-#if defined(CONFIG_HAVE_MEMBLOCK) && defined(CONFIG_NO_BOOTMEM)
-
-/* FIXME: use MEMBLOCK_ALLOC_* variants here */
-#define BOOTMEM_ALLOC_ACCESSIBLE 0
-#define BOOTMEM_ALLOC_ANYWHERE (~(phys_addr_t)0)
-
-/* FIXME: Move to memblock.h at a point where we remove nobootmem.c */
-void *memblock_virt_alloc_try_nid_nopanic(phys_addr_t size,
- phys_addr_t align, phys_addr_t min_addr,
- phys_addr_t max_addr, int nid);
-void *memblock_virt_alloc_try_nid(phys_addr_t size, phys_addr_t align,
- phys_addr_t min_addr, phys_addr_t max_addr, int nid);
-void __memblock_free_early(phys_addr_t base, phys_addr_t size);
-void __memblock_free_late(phys_addr_t base, phys_addr_t size);
-
-static inline void * __init memblock_virt_alloc(
- phys_addr_t size, phys_addr_t align)
-{
- return memblock_virt_alloc_try_nid(size, align, BOOTMEM_LOW_LIMIT,
- BOOTMEM_ALLOC_ACCESSIBLE,
- NUMA_NO_NODE);
-}
-
-static inline void * __init memblock_virt_alloc_nopanic(
- phys_addr_t size, phys_addr_t align)
-{
- return memblock_virt_alloc_try_nid_nopanic(size, align,
- BOOTMEM_LOW_LIMIT,
- BOOTMEM_ALLOC_ACCESSIBLE,
- NUMA_NO_NODE);
-}
-
-static inline void * __init memblock_virt_alloc_low(
- phys_addr_t size, phys_addr_t align)
-{
- return memblock_virt_alloc_try_nid(size, align,
- BOOTMEM_LOW_LIMIT,
- ARCH_LOW_ADDRESS_LIMIT,
- NUMA_NO_NODE);
-}
-static inline void * __init memblock_virt_alloc_low_nopanic(
- phys_addr_t size, phys_addr_t align)
-{
- return memblock_virt_alloc_try_nid_nopanic(size, align,
- BOOTMEM_LOW_LIMIT,
- ARCH_LOW_ADDRESS_LIMIT,
- NUMA_NO_NODE);
-}
-
-static inline void * __init memblock_virt_alloc_from_nopanic(
- phys_addr_t size, phys_addr_t align, phys_addr_t min_addr)
-{
- return memblock_virt_alloc_try_nid_nopanic(size, align, min_addr,
- BOOTMEM_ALLOC_ACCESSIBLE,
- NUMA_NO_NODE);
-}
-
-static inline void * __init memblock_virt_alloc_node(
- phys_addr_t size, int nid)
-{
- return memblock_virt_alloc_try_nid(size, 0, BOOTMEM_LOW_LIMIT,
- BOOTMEM_ALLOC_ACCESSIBLE, nid);
-}
-
-static inline void * __init memblock_virt_alloc_node_nopanic(
- phys_addr_t size, int nid)
-{
- return memblock_virt_alloc_try_nid_nopanic(size, 0, BOOTMEM_LOW_LIMIT,
- BOOTMEM_ALLOC_ACCESSIBLE,
- nid);
-}
-
-static inline void __init memblock_free_early(
- phys_addr_t base, phys_addr_t size)
-{
- __memblock_free_early(base, size);
-}
-
-static inline void __init memblock_free_early_nid(
- phys_addr_t base, phys_addr_t size, int nid)
-{
- __memblock_free_early(base, size);
-}
-
-static inline void __init memblock_free_late(
- phys_addr_t base, phys_addr_t size)
-{
- __memblock_free_late(base, size);
-}
-
-#else
-
-#define BOOTMEM_ALLOC_ACCESSIBLE 0
-
-
-/* Fall back to all the existing bootmem APIs */
-static inline void * __init memblock_virt_alloc(
- phys_addr_t size, phys_addr_t align)
-{
- if (!align)
- align = SMP_CACHE_BYTES;
- return __alloc_bootmem(size, align, BOOTMEM_LOW_LIMIT);
-}
-
-static inline void * __init memblock_virt_alloc_nopanic(
- phys_addr_t size, phys_addr_t align)
-{
- if (!align)
- align = SMP_CACHE_BYTES;
- return __alloc_bootmem_nopanic(size, align, BOOTMEM_LOW_LIMIT);
-}
-
-static inline void * __init memblock_virt_alloc_low(
- phys_addr_t size, phys_addr_t align)
-{
- if (!align)
- align = SMP_CACHE_BYTES;
- return __alloc_bootmem_low(size, align, 0);
-}
-
-static inline void * __init memblock_virt_alloc_low_nopanic(
- phys_addr_t size, phys_addr_t align)
-{
- if (!align)
- align = SMP_CACHE_BYTES;
- return __alloc_bootmem_low_nopanic(size, align, 0);
-}
-
-static inline void * __init memblock_virt_alloc_from_nopanic(
- phys_addr_t size, phys_addr_t align, phys_addr_t min_addr)
-{
- return __alloc_bootmem_nopanic(size, align, min_addr);
-}
-
-static inline void * __init memblock_virt_alloc_node(
- phys_addr_t size, int nid)
-{
- return __alloc_bootmem_node(NODE_DATA(nid), size, SMP_CACHE_BYTES,
- BOOTMEM_LOW_LIMIT);
-}
-
-static inline void * __init memblock_virt_alloc_node_nopanic(
- phys_addr_t size, int nid)
-{
- return __alloc_bootmem_node_nopanic(NODE_DATA(nid), size,
- SMP_CACHE_BYTES,
- BOOTMEM_LOW_LIMIT);
-}
-
-static inline void * __init memblock_virt_alloc_try_nid(phys_addr_t size,
- phys_addr_t align, phys_addr_t min_addr, phys_addr_t max_addr, int nid)
-{
- return __alloc_bootmem_node_high(NODE_DATA(nid), size, align,
- min_addr);
-}
-
-static inline void * __init memblock_virt_alloc_try_nid_nopanic(
- phys_addr_t size, phys_addr_t align,
- phys_addr_t min_addr, phys_addr_t max_addr, int nid)
-{
- return ___alloc_bootmem_node_nopanic(NODE_DATA(nid), size, align,
- min_addr, max_addr);
-}
-
-static inline void __init memblock_free_early(
- phys_addr_t base, phys_addr_t size)
-{
- free_bootmem(base, size);
-}
-
-static inline void __init memblock_free_early_nid(
- phys_addr_t base, phys_addr_t size, int nid)
-{
- free_bootmem_node(NODE_DATA(nid), base, size);
-}
-
-static inline void __init memblock_free_late(
- phys_addr_t base, phys_addr_t size)
-{
- free_bootmem_late(base, size);
-}
-#endif /* defined(CONFIG_HAVE_MEMBLOCK) && defined(CONFIG_NO_BOOTMEM) */
-
-#ifdef CONFIG_HAVE_ARCH_ALLOC_REMAP
-extern void *alloc_remap(int nid, unsigned long size);
-#else
-static inline void *alloc_remap(int nid, unsigned long size)
-{
- return NULL;
-}
-#endif /* CONFIG_HAVE_ARCH_ALLOC_REMAP */
-
-extern void *alloc_large_system_hash(const char *tablename,
- unsigned long bucketsize,
- unsigned long numentries,
- int scale,
- int flags,
- unsigned int *_hash_shift,
- unsigned int *_hash_mask,
- unsigned long low_limit,
- unsigned long high_limit);
-
-#define HASH_EARLY 0x00000001 /* Allocating during early boot? */
-#define HASH_SMALL 0x00000002 /* sub-page allocation allowed, min
- * shift passed via *_hash_shift */
-#define HASH_ZERO 0x00000004 /* Zero allocated hash table */
-
-/* Only NUMA needs hash distribution. 64bit NUMA architectures have
- * sufficient vmalloc space.
- */
-#ifdef CONFIG_NUMA
-#define HASHDIST_DEFAULT IS_ENABLED(CONFIG_64BIT)
-extern int hashdist; /* Distribute hashes across NUMA nodes? */
-#else
-#define hashdist (0)
-#endif
-
-
-#endif /* _LINUX_BOOTMEM_H */
diff --git a/include/linux/bootmem_info.h b/include/linux/bootmem_info.h
new file mode 100644
index 000000000000..4c506e76a808
--- /dev/null
+++ b/include/linux/bootmem_info.h
@@ -0,0 +1,94 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __LINUX_BOOTMEM_INFO_H
+#define __LINUX_BOOTMEM_INFO_H
+
+#include <linux/mm.h>
+#include <linux/kmemleak.h>
+
+/*
+ * Types for free bootmem stored in the low bits of page->private.
+ */
+enum bootmem_type {
+ MEMORY_HOTPLUG_MIN_BOOTMEM_TYPE = 1,
+ SECTION_INFO = MEMORY_HOTPLUG_MIN_BOOTMEM_TYPE,
+ MIX_SECTION_INFO,
+ NODE_INFO,
+ MEMORY_HOTPLUG_MAX_BOOTMEM_TYPE = NODE_INFO,
+};
+
+#ifdef CONFIG_HAVE_BOOTMEM_INFO_NODE
+void __init register_page_bootmem_info_node(struct pglist_data *pgdat);
+void register_page_bootmem_memmap(unsigned long section_nr, struct page *map,
+ unsigned long nr_pages);
+
+void get_page_bootmem(unsigned long info, struct page *page,
+ enum bootmem_type type);
+void put_page_bootmem(struct page *page);
+
+static inline enum bootmem_type bootmem_type(const struct page *page)
+{
+ return (unsigned long)page->private & 0xf;
+}
+
+static inline unsigned long bootmem_info(const struct page *page)
+{
+ return (unsigned long)page->private >> 4;
+}
+
+/*
+ * Any memory allocated via the memblock allocator and not via the
+ * buddy will be marked reserved already in the memmap. For those
+ * pages, we can call this function to free it to buddy allocator.
+ */
+static inline void free_bootmem_page(struct page *page)
+{
+ enum bootmem_type type = bootmem_type(page);
+
+ /*
+ * The reserve_bootmem_region sets the reserved flag on bootmem
+ * pages.
+ */
+ VM_BUG_ON_PAGE(page_ref_count(page) != 2, page);
+
+ if (type == SECTION_INFO || type == MIX_SECTION_INFO)
+ put_page_bootmem(page);
+ else
+ VM_BUG_ON_PAGE(1, page);
+}
+#else
+static inline void register_page_bootmem_info_node(struct pglist_data *pgdat)
+{
+}
+
+static inline void register_page_bootmem_memmap(unsigned long section_nr,
+ struct page *map, unsigned long nr_pages)
+{
+}
+
+static inline void put_page_bootmem(struct page *page)
+{
+}
+
+static inline enum bootmem_type bootmem_type(const struct page *page)
+{
+ return SECTION_INFO;
+}
+
+static inline unsigned long bootmem_info(const struct page *page)
+{
+ return 0;
+}
+
+static inline void get_page_bootmem(unsigned long info, struct page *page,
+ enum bootmem_type type)
+{
+}
+
+static inline void free_bootmem_page(struct page *page)
+{
+ kmemleak_free_part_phys(PFN_PHYS(page_to_pfn(page)), PAGE_SIZE);
+ free_reserved_page(page);
+}
+#endif
+
+#endif /* __LINUX_BOOTMEM_INFO_H */
diff --git a/include/linux/bottom_half.h b/include/linux/bottom_half.h
index 8fdcb783197d..fc53e0ad56d9 100644
--- a/include/linux/bottom_half.h
+++ b/include/linux/bottom_half.h
@@ -1,9 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_BH_H
#define _LINUX_BH_H
+#include <linux/instruction_pointer.h>
#include <linux/preempt.h>
-#ifdef CONFIG_TRACE_IRQFLAGS
+#if defined(CONFIG_PREEMPT_RT) || defined(CONFIG_TRACE_IRQFLAGS)
extern void __local_bh_disable_ip(unsigned long ip, unsigned int cnt);
#else
static __always_inline void __local_bh_disable_ip(unsigned long ip, unsigned int cnt)
@@ -31,4 +33,10 @@ static inline void local_bh_enable(void)
__local_bh_enable_ip(_THIS_IP_, SOFTIRQ_DISABLE_OFFSET);
}
+#ifdef CONFIG_PREEMPT_RT
+extern bool local_bh_blocked(void);
+#else
+static inline bool local_bh_blocked(void) { return false; }
+#endif
+
#endif /* _LINUX_BH_H */
diff --git a/include/linux/bpf-cgroup-defs.h b/include/linux/bpf-cgroup-defs.h
new file mode 100644
index 000000000000..c9e6b26abab6
--- /dev/null
+++ b/include/linux/bpf-cgroup-defs.h
@@ -0,0 +1,85 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _BPF_CGROUP_DEFS_H
+#define _BPF_CGROUP_DEFS_H
+
+#ifdef CONFIG_CGROUP_BPF
+
+#include <linux/list.h>
+#include <linux/percpu-refcount.h>
+#include <linux/workqueue.h>
+
+struct bpf_prog_array;
+
+#ifdef CONFIG_BPF_LSM
+/* Maximum number of concurrently attachable per-cgroup LSM hooks. */
+#define CGROUP_LSM_NUM 10
+#else
+#define CGROUP_LSM_NUM 0
+#endif
+
+enum cgroup_bpf_attach_type {
+ CGROUP_BPF_ATTACH_TYPE_INVALID = -1,
+ CGROUP_INET_INGRESS = 0,
+ CGROUP_INET_EGRESS,
+ CGROUP_INET_SOCK_CREATE,
+ CGROUP_SOCK_OPS,
+ CGROUP_DEVICE,
+ CGROUP_INET4_BIND,
+ CGROUP_INET6_BIND,
+ CGROUP_INET4_CONNECT,
+ CGROUP_INET6_CONNECT,
+ CGROUP_UNIX_CONNECT,
+ CGROUP_INET4_POST_BIND,
+ CGROUP_INET6_POST_BIND,
+ CGROUP_UDP4_SENDMSG,
+ CGROUP_UDP6_SENDMSG,
+ CGROUP_UNIX_SENDMSG,
+ CGROUP_SYSCTL,
+ CGROUP_UDP4_RECVMSG,
+ CGROUP_UDP6_RECVMSG,
+ CGROUP_UNIX_RECVMSG,
+ CGROUP_GETSOCKOPT,
+ CGROUP_SETSOCKOPT,
+ CGROUP_INET4_GETPEERNAME,
+ CGROUP_INET6_GETPEERNAME,
+ CGROUP_UNIX_GETPEERNAME,
+ CGROUP_INET4_GETSOCKNAME,
+ CGROUP_INET6_GETSOCKNAME,
+ CGROUP_UNIX_GETSOCKNAME,
+ CGROUP_INET_SOCK_RELEASE,
+ CGROUP_LSM_START,
+ CGROUP_LSM_END = CGROUP_LSM_START + CGROUP_LSM_NUM - 1,
+ MAX_CGROUP_BPF_ATTACH_TYPE
+};
+
+struct cgroup_bpf {
+ /* array of effective progs in this cgroup */
+ struct bpf_prog_array __rcu *effective[MAX_CGROUP_BPF_ATTACH_TYPE];
+
+ /* attached progs to this cgroup and attach flags
+ * when flags == 0 or BPF_F_ALLOW_OVERRIDE the progs list will
+ * have either zero or one element
+ * when BPF_F_ALLOW_MULTI the list can have up to BPF_CGROUP_MAX_PROGS
+ */
+ struct hlist_head progs[MAX_CGROUP_BPF_ATTACH_TYPE];
+ u8 flags[MAX_CGROUP_BPF_ATTACH_TYPE];
+ u64 revisions[MAX_CGROUP_BPF_ATTACH_TYPE];
+
+ /* list of cgroup shared storages */
+ struct list_head storages;
+
+ /* temp storage for effective prog array used by prog_attach/detach */
+ struct bpf_prog_array *inactive;
+
+ /* reference counter used to detach bpf programs after cgroup removal */
+ struct percpu_ref refcnt;
+
+ /* cgroup_bpf is released using a work queue */
+ struct work_struct release_work;
+};
+
+#else /* CONFIG_CGROUP_BPF */
+struct cgroup_bpf {};
+#endif /* CONFIG_CGROUP_BPF */
+
+#endif
diff --git a/include/linux/bpf-cgroup.h b/include/linux/bpf-cgroup.h
index d41d40ac3efd..d1eb5c7729cb 100644
--- a/include/linux/bpf-cgroup.h
+++ b/include/linux/bpf-cgroup.h
@@ -1,59 +1,201 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _BPF_CGROUP_H
#define _BPF_CGROUP_H
+#include <linux/bpf.h>
+#include <linux/bpf-cgroup-defs.h>
+#include <linux/errno.h>
#include <linux/jump_label.h>
+#include <linux/percpu.h>
+#include <linux/rbtree.h>
+#include <net/sock.h>
#include <uapi/linux/bpf.h>
struct sock;
+struct sockaddr;
struct cgroup;
struct sk_buff;
+struct bpf_map;
+struct bpf_prog;
struct bpf_sock_ops_kern;
+struct bpf_cgroup_storage;
+struct ctl_table;
+struct ctl_table_header;
+struct task_struct;
+
+unsigned int __cgroup_bpf_run_lsm_sock(const void *ctx,
+ const struct bpf_insn *insn);
+unsigned int __cgroup_bpf_run_lsm_socket(const void *ctx,
+ const struct bpf_insn *insn);
+unsigned int __cgroup_bpf_run_lsm_current(const void *ctx,
+ const struct bpf_insn *insn);
#ifdef CONFIG_CGROUP_BPF
-extern struct static_key_false cgroup_bpf_enabled_key;
-#define cgroup_bpf_enabled static_branch_unlikely(&cgroup_bpf_enabled_key)
-
-struct cgroup_bpf {
- /*
- * Store two sets of bpf_prog pointers, one for programs that are
- * pinned directly to this cgroup, and one for those that are effective
- * when this cgroup is accessed.
- */
- struct bpf_prog *prog[MAX_BPF_ATTACH_TYPE];
- struct bpf_prog __rcu *effective[MAX_BPF_ATTACH_TYPE];
- bool disallow_override[MAX_BPF_ATTACH_TYPE];
+#define CGROUP_ATYPE(type) \
+ case BPF_##type: return type
+
+static inline enum cgroup_bpf_attach_type
+to_cgroup_bpf_attach_type(enum bpf_attach_type attach_type)
+{
+ switch (attach_type) {
+ CGROUP_ATYPE(CGROUP_INET_INGRESS);
+ CGROUP_ATYPE(CGROUP_INET_EGRESS);
+ CGROUP_ATYPE(CGROUP_INET_SOCK_CREATE);
+ CGROUP_ATYPE(CGROUP_SOCK_OPS);
+ CGROUP_ATYPE(CGROUP_DEVICE);
+ CGROUP_ATYPE(CGROUP_INET4_BIND);
+ CGROUP_ATYPE(CGROUP_INET6_BIND);
+ CGROUP_ATYPE(CGROUP_INET4_CONNECT);
+ CGROUP_ATYPE(CGROUP_INET6_CONNECT);
+ CGROUP_ATYPE(CGROUP_UNIX_CONNECT);
+ CGROUP_ATYPE(CGROUP_INET4_POST_BIND);
+ CGROUP_ATYPE(CGROUP_INET6_POST_BIND);
+ CGROUP_ATYPE(CGROUP_UDP4_SENDMSG);
+ CGROUP_ATYPE(CGROUP_UDP6_SENDMSG);
+ CGROUP_ATYPE(CGROUP_UNIX_SENDMSG);
+ CGROUP_ATYPE(CGROUP_SYSCTL);
+ CGROUP_ATYPE(CGROUP_UDP4_RECVMSG);
+ CGROUP_ATYPE(CGROUP_UDP6_RECVMSG);
+ CGROUP_ATYPE(CGROUP_UNIX_RECVMSG);
+ CGROUP_ATYPE(CGROUP_GETSOCKOPT);
+ CGROUP_ATYPE(CGROUP_SETSOCKOPT);
+ CGROUP_ATYPE(CGROUP_INET4_GETPEERNAME);
+ CGROUP_ATYPE(CGROUP_INET6_GETPEERNAME);
+ CGROUP_ATYPE(CGROUP_UNIX_GETPEERNAME);
+ CGROUP_ATYPE(CGROUP_INET4_GETSOCKNAME);
+ CGROUP_ATYPE(CGROUP_INET6_GETSOCKNAME);
+ CGROUP_ATYPE(CGROUP_UNIX_GETSOCKNAME);
+ CGROUP_ATYPE(CGROUP_INET_SOCK_RELEASE);
+ default:
+ return CGROUP_BPF_ATTACH_TYPE_INVALID;
+ }
+}
+
+#undef CGROUP_ATYPE
+
+extern struct static_key_false cgroup_bpf_enabled_key[MAX_CGROUP_BPF_ATTACH_TYPE];
+#define cgroup_bpf_enabled(atype) static_branch_unlikely(&cgroup_bpf_enabled_key[atype])
+
+struct bpf_cgroup_storage_map;
+
+struct bpf_storage_buffer {
+ struct rcu_head rcu;
+ char data[];
};
-void cgroup_bpf_put(struct cgroup *cgrp);
-void cgroup_bpf_inherit(struct cgroup *cgrp, struct cgroup *parent);
+struct bpf_cgroup_storage {
+ union {
+ struct bpf_storage_buffer *buf;
+ void __percpu *percpu_buf;
+ };
+ struct bpf_cgroup_storage_map *map;
+ struct bpf_cgroup_storage_key key;
+ struct list_head list_map;
+ struct list_head list_cg;
+ struct rb_node node;
+ struct rcu_head rcu;
+};
+
+struct bpf_cgroup_link {
+ struct bpf_link link;
+ struct cgroup *cgroup;
+};
-int __cgroup_bpf_update(struct cgroup *cgrp, struct cgroup *parent,
- struct bpf_prog *prog, enum bpf_attach_type type,
- bool overridable);
+struct bpf_prog_list {
+ struct hlist_node node;
+ struct bpf_prog *prog;
+ struct bpf_cgroup_link *link;
+ struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE];
+ u32 flags;
+};
-/* Wrapper for __cgroup_bpf_update() protected by cgroup_mutex */
-int cgroup_bpf_update(struct cgroup *cgrp, struct bpf_prog *prog,
- enum bpf_attach_type type, bool overridable);
+void __init cgroup_bpf_lifetime_notifier_init(void);
int __cgroup_bpf_run_filter_skb(struct sock *sk,
struct sk_buff *skb,
- enum bpf_attach_type type);
+ enum cgroup_bpf_attach_type atype);
int __cgroup_bpf_run_filter_sk(struct sock *sk,
- enum bpf_attach_type type);
+ enum cgroup_bpf_attach_type atype);
+
+int __cgroup_bpf_run_filter_sock_addr(struct sock *sk,
+ struct sockaddr_unsized *uaddr,
+ int *uaddrlen,
+ enum cgroup_bpf_attach_type atype,
+ void *t_ctx,
+ u32 *flags);
int __cgroup_bpf_run_filter_sock_ops(struct sock *sk,
struct bpf_sock_ops_kern *sock_ops,
- enum bpf_attach_type type);
+ enum cgroup_bpf_attach_type atype);
+
+int __cgroup_bpf_check_dev_permission(short dev_type, u32 major, u32 minor,
+ short access, enum cgroup_bpf_attach_type atype);
+
+int __cgroup_bpf_run_filter_sysctl(struct ctl_table_header *head,
+ const struct ctl_table *table, int write,
+ char **buf, size_t *pcount, loff_t *ppos,
+ enum cgroup_bpf_attach_type atype);
+
+int __cgroup_bpf_run_filter_setsockopt(struct sock *sock, int *level,
+ int *optname, sockptr_t optval,
+ int *optlen, char **kernel_optval);
+
+int __cgroup_bpf_run_filter_getsockopt(struct sock *sk, int level,
+ int optname, sockptr_t optval,
+ sockptr_t optlen, int max_optlen,
+ int retval);
+
+int __cgroup_bpf_run_filter_getsockopt_kern(struct sock *sk, int level,
+ int optname, void *optval,
+ int *optlen, int retval);
+
+static inline enum bpf_cgroup_storage_type cgroup_storage_type(
+ struct bpf_map *map)
+{
+ if (map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE)
+ return BPF_CGROUP_STORAGE_PERCPU;
+
+ return BPF_CGROUP_STORAGE_SHARED;
+}
+
+struct bpf_cgroup_storage *
+cgroup_storage_lookup(struct bpf_cgroup_storage_map *map,
+ void *key, bool locked);
+struct bpf_cgroup_storage *bpf_cgroup_storage_alloc(struct bpf_prog *prog,
+ enum bpf_cgroup_storage_type stype);
+void bpf_cgroup_storage_free(struct bpf_cgroup_storage *storage);
+void bpf_cgroup_storage_link(struct bpf_cgroup_storage *storage,
+ struct cgroup *cgroup,
+ enum bpf_attach_type type);
+void bpf_cgroup_storage_unlink(struct bpf_cgroup_storage *storage);
+int bpf_cgroup_storage_assign(struct bpf_prog_aux *aux, struct bpf_map *map);
+
+int bpf_percpu_cgroup_storage_copy(struct bpf_map *map, void *key, void *value);
+int bpf_percpu_cgroup_storage_update(struct bpf_map *map, void *key,
+ void *value, u64 flags);
+
+/* Opportunistic check to see whether we have any BPF program attached*/
+static inline bool cgroup_bpf_sock_enabled(struct sock *sk,
+ enum cgroup_bpf_attach_type type)
+{
+ struct cgroup *cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data);
+ struct bpf_prog_array *array;
+
+ array = rcu_access_pointer(cgrp->bpf.effective[type]);
+ return array != &bpf_empty_prog_array.hdr;
+}
/* Wrappers for __cgroup_bpf_run_filter_skb() guarded by cgroup_bpf_enabled. */
#define BPF_CGROUP_RUN_PROG_INET_INGRESS(sk, skb) \
({ \
int __ret = 0; \
- if (cgroup_bpf_enabled) \
+ if (cgroup_bpf_enabled(CGROUP_INET_INGRESS) && \
+ cgroup_bpf_sock_enabled(sk, CGROUP_INET_INGRESS) && sk && \
+ sk_fullsock(sk)) \
__ret = __cgroup_bpf_run_filter_skb(sk, skb, \
- BPF_CGROUP_INET_INGRESS); \
+ CGROUP_INET_INGRESS); \
\
__ret; \
})
@@ -61,48 +203,312 @@ int __cgroup_bpf_run_filter_sock_ops(struct sock *sk,
#define BPF_CGROUP_RUN_PROG_INET_EGRESS(sk, skb) \
({ \
int __ret = 0; \
- if (cgroup_bpf_enabled && sk && sk == skb->sk) { \
+ if (cgroup_bpf_enabled(CGROUP_INET_EGRESS) && sk) { \
typeof(sk) __sk = sk_to_full_sk(sk); \
- if (sk_fullsock(__sk)) \
+ if (__sk && __sk == skb_to_full_sk(skb) && \
+ cgroup_bpf_sock_enabled(__sk, CGROUP_INET_EGRESS)) \
__ret = __cgroup_bpf_run_filter_skb(__sk, skb, \
- BPF_CGROUP_INET_EGRESS); \
+ CGROUP_INET_EGRESS); \
+ } \
+ __ret; \
+})
+
+#define BPF_CGROUP_RUN_SK_PROG(sk, atype) \
+({ \
+ int __ret = 0; \
+ if (cgroup_bpf_enabled(atype)) { \
+ __ret = __cgroup_bpf_run_filter_sk(sk, atype); \
} \
__ret; \
})
#define BPF_CGROUP_RUN_PROG_INET_SOCK(sk) \
+ BPF_CGROUP_RUN_SK_PROG(sk, CGROUP_INET_SOCK_CREATE)
+
+#define BPF_CGROUP_RUN_PROG_INET_SOCK_RELEASE(sk) \
+ BPF_CGROUP_RUN_SK_PROG(sk, CGROUP_INET_SOCK_RELEASE)
+
+#define BPF_CGROUP_RUN_PROG_INET4_POST_BIND(sk) \
+ BPF_CGROUP_RUN_SK_PROG(sk, CGROUP_INET4_POST_BIND)
+
+#define BPF_CGROUP_RUN_PROG_INET6_POST_BIND(sk) \
+ BPF_CGROUP_RUN_SK_PROG(sk, CGROUP_INET6_POST_BIND)
+
+#define BPF_CGROUP_RUN_SA_PROG(sk, uaddr, uaddrlen, atype) \
+({ \
+ int __ret = 0; \
+ if (cgroup_bpf_enabled(atype)) \
+ __ret = __cgroup_bpf_run_filter_sock_addr(sk, \
+ (struct sockaddr_unsized *)uaddr, uaddrlen, \
+ atype, NULL, NULL); \
+ __ret; \
+})
+
+#define BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, uaddrlen, atype, t_ctx) \
({ \
int __ret = 0; \
- if (cgroup_bpf_enabled && sk) { \
- __ret = __cgroup_bpf_run_filter_sk(sk, \
- BPF_CGROUP_INET_SOCK_CREATE); \
+ if (cgroup_bpf_enabled(atype)) { \
+ lock_sock(sk); \
+ __ret = __cgroup_bpf_run_filter_sock_addr(sk, \
+ (struct sockaddr_unsized *)uaddr, uaddrlen, \
+ atype, t_ctx, NULL); \
+ release_sock(sk); \
} \
__ret; \
})
+/* BPF_CGROUP_INET4_BIND and BPF_CGROUP_INET6_BIND can return extra flags
+ * via upper bits of return code. The only flag that is supported
+ * (at bit position 0) is to indicate CAP_NET_BIND_SERVICE capability check
+ * should be bypassed (BPF_RET_BIND_NO_CAP_NET_BIND_SERVICE).
+ */
+#define BPF_CGROUP_RUN_PROG_INET_BIND_LOCK(sk, uaddr, uaddrlen, atype, bind_flags) \
+({ \
+ u32 __flags = 0; \
+ int __ret = 0; \
+ if (cgroup_bpf_enabled(atype)) { \
+ lock_sock(sk); \
+ __ret = __cgroup_bpf_run_filter_sock_addr(sk, \
+ (struct sockaddr_unsized *)uaddr, uaddrlen, \
+ atype, NULL, &__flags); \
+ release_sock(sk); \
+ if (__flags & BPF_RET_BIND_NO_CAP_NET_BIND_SERVICE) \
+ *bind_flags |= BIND_NO_CAP_NET_BIND_SERVICE; \
+ } \
+ __ret; \
+})
+
+#define BPF_CGROUP_PRE_CONNECT_ENABLED(sk) \
+ ((cgroup_bpf_enabled(CGROUP_INET4_CONNECT) || \
+ cgroup_bpf_enabled(CGROUP_INET6_CONNECT)) && \
+ (sk)->sk_prot->pre_connect)
+
+#define BPF_CGROUP_RUN_PROG_INET4_CONNECT(sk, uaddr, uaddrlen) \
+ BPF_CGROUP_RUN_SA_PROG(sk, uaddr, uaddrlen, CGROUP_INET4_CONNECT)
+
+#define BPF_CGROUP_RUN_PROG_INET6_CONNECT(sk, uaddr, uaddrlen) \
+ BPF_CGROUP_RUN_SA_PROG(sk, uaddr, uaddrlen, CGROUP_INET6_CONNECT)
+
+#define BPF_CGROUP_RUN_PROG_INET4_CONNECT_LOCK(sk, uaddr, uaddrlen) \
+ BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, uaddrlen, CGROUP_INET4_CONNECT, NULL)
+
+#define BPF_CGROUP_RUN_PROG_INET6_CONNECT_LOCK(sk, uaddr, uaddrlen) \
+ BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, uaddrlen, CGROUP_INET6_CONNECT, NULL)
+
+#define BPF_CGROUP_RUN_PROG_UNIX_CONNECT_LOCK(sk, uaddr, uaddrlen) \
+ BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, uaddrlen, CGROUP_UNIX_CONNECT, NULL)
+
+#define BPF_CGROUP_RUN_PROG_UDP4_SENDMSG_LOCK(sk, uaddr, uaddrlen, t_ctx) \
+ BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, uaddrlen, CGROUP_UDP4_SENDMSG, t_ctx)
+
+#define BPF_CGROUP_RUN_PROG_UDP6_SENDMSG_LOCK(sk, uaddr, uaddrlen, t_ctx) \
+ BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, uaddrlen, CGROUP_UDP6_SENDMSG, t_ctx)
+
+#define BPF_CGROUP_RUN_PROG_UNIX_SENDMSG_LOCK(sk, uaddr, uaddrlen, t_ctx) \
+ BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, uaddrlen, CGROUP_UNIX_SENDMSG, t_ctx)
+
+#define BPF_CGROUP_RUN_PROG_UDP4_RECVMSG_LOCK(sk, uaddr, uaddrlen) \
+ BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, uaddrlen, CGROUP_UDP4_RECVMSG, NULL)
+
+#define BPF_CGROUP_RUN_PROG_UDP6_RECVMSG_LOCK(sk, uaddr, uaddrlen) \
+ BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, uaddrlen, CGROUP_UDP6_RECVMSG, NULL)
+
+#define BPF_CGROUP_RUN_PROG_UNIX_RECVMSG_LOCK(sk, uaddr, uaddrlen) \
+ BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, uaddrlen, CGROUP_UNIX_RECVMSG, NULL)
+
+/* The SOCK_OPS"_SK" macro should be used when sock_ops->sk is not a
+ * fullsock and its parent fullsock cannot be traced by
+ * sk_to_full_sk().
+ *
+ * e.g. sock_ops->sk is a request_sock and it is under syncookie mode.
+ * Its listener-sk is not attached to the rsk_listener.
+ * In this case, the caller holds the listener-sk (unlocked),
+ * set its sock_ops->sk to req_sk, and call this SOCK_OPS"_SK" with
+ * the listener-sk such that the cgroup-bpf-progs of the
+ * listener-sk will be run.
+ *
+ * Regardless of syncookie mode or not,
+ * calling bpf_setsockopt on listener-sk will not make sense anyway,
+ * so passing 'sock_ops->sk == req_sk' to the bpf prog is appropriate here.
+ */
+#define BPF_CGROUP_RUN_PROG_SOCK_OPS_SK(sock_ops, sk) \
+({ \
+ int __ret = 0; \
+ if (cgroup_bpf_enabled(CGROUP_SOCK_OPS)) \
+ __ret = __cgroup_bpf_run_filter_sock_ops(sk, \
+ sock_ops, \
+ CGROUP_SOCK_OPS); \
+ __ret; \
+})
+
#define BPF_CGROUP_RUN_PROG_SOCK_OPS(sock_ops) \
({ \
int __ret = 0; \
- if (cgroup_bpf_enabled && (sock_ops)->sk) { \
+ if (cgroup_bpf_enabled(CGROUP_SOCK_OPS) && (sock_ops)->sk) { \
typeof(sk) __sk = sk_to_full_sk((sock_ops)->sk); \
if (__sk && sk_fullsock(__sk)) \
__ret = __cgroup_bpf_run_filter_sock_ops(__sk, \
sock_ops, \
- BPF_CGROUP_SOCK_OPS); \
+ CGROUP_SOCK_OPS); \
} \
__ret; \
})
+
+#define BPF_CGROUP_RUN_PROG_DEVICE_CGROUP(atype, major, minor, access) \
+({ \
+ int __ret = 0; \
+ if (cgroup_bpf_enabled(CGROUP_DEVICE)) \
+ __ret = __cgroup_bpf_check_dev_permission(atype, major, minor, \
+ access, \
+ CGROUP_DEVICE); \
+ \
+ __ret; \
+})
+
+
+#define BPF_CGROUP_RUN_PROG_SYSCTL(head, table, write, buf, count, pos) \
+({ \
+ int __ret = 0; \
+ if (cgroup_bpf_enabled(CGROUP_SYSCTL)) \
+ __ret = __cgroup_bpf_run_filter_sysctl(head, table, write, \
+ buf, count, pos, \
+ CGROUP_SYSCTL); \
+ __ret; \
+})
+
+#define BPF_CGROUP_RUN_PROG_SETSOCKOPT(sock, level, optname, optval, optlen, \
+ kernel_optval) \
+({ \
+ int __ret = 0; \
+ if (cgroup_bpf_enabled(CGROUP_SETSOCKOPT) && \
+ cgroup_bpf_sock_enabled(sock, CGROUP_SETSOCKOPT)) \
+ __ret = __cgroup_bpf_run_filter_setsockopt(sock, level, \
+ optname, optval, \
+ optlen, \
+ kernel_optval); \
+ __ret; \
+})
+
+#define BPF_CGROUP_RUN_PROG_GETSOCKOPT(sock, level, optname, optval, optlen, \
+ max_optlen, retval) \
+({ \
+ int __ret = retval; \
+ if (cgroup_bpf_enabled(CGROUP_GETSOCKOPT) && \
+ cgroup_bpf_sock_enabled(sock, CGROUP_GETSOCKOPT)) \
+ if (!(sock)->sk_prot->bpf_bypass_getsockopt || \
+ !INDIRECT_CALL_INET_1((sock)->sk_prot->bpf_bypass_getsockopt, \
+ tcp_bpf_bypass_getsockopt, \
+ level, optname)) \
+ __ret = __cgroup_bpf_run_filter_getsockopt( \
+ sock, level, optname, optval, optlen, \
+ max_optlen, retval); \
+ __ret; \
+})
+
+#define BPF_CGROUP_RUN_PROG_GETSOCKOPT_KERN(sock, level, optname, optval, \
+ optlen, retval) \
+({ \
+ int __ret = retval; \
+ if (cgroup_bpf_enabled(CGROUP_GETSOCKOPT)) \
+ __ret = __cgroup_bpf_run_filter_getsockopt_kern( \
+ sock, level, optname, optval, optlen, retval); \
+ __ret; \
+})
+
+int cgroup_bpf_prog_attach(const union bpf_attr *attr,
+ enum bpf_prog_type ptype, struct bpf_prog *prog);
+int cgroup_bpf_prog_detach(const union bpf_attr *attr,
+ enum bpf_prog_type ptype);
+int cgroup_bpf_link_attach(const union bpf_attr *attr, struct bpf_prog *prog);
+int cgroup_bpf_prog_query(const union bpf_attr *attr,
+ union bpf_attr __user *uattr);
+
+const struct bpf_func_proto *
+cgroup_common_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog);
#else
-struct cgroup_bpf {};
-static inline void cgroup_bpf_put(struct cgroup *cgrp) {}
-static inline void cgroup_bpf_inherit(struct cgroup *cgrp,
- struct cgroup *parent) {}
+static inline void cgroup_bpf_lifetime_notifier_init(void)
+{
+ return;
+}
+
+static inline int cgroup_bpf_prog_attach(const union bpf_attr *attr,
+ enum bpf_prog_type ptype,
+ struct bpf_prog *prog)
+{
+ return -EINVAL;
+}
+
+static inline int cgroup_bpf_prog_detach(const union bpf_attr *attr,
+ enum bpf_prog_type ptype)
+{
+ return -EINVAL;
+}
+
+static inline int cgroup_bpf_link_attach(const union bpf_attr *attr,
+ struct bpf_prog *prog)
+{
+ return -EINVAL;
+}
+
+static inline int cgroup_bpf_prog_query(const union bpf_attr *attr,
+ union bpf_attr __user *uattr)
+{
+ return -EINVAL;
+}
+
+static inline const struct bpf_func_proto *
+cgroup_common_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
+{
+ return NULL;
+}
+
+static inline int bpf_cgroup_storage_assign(struct bpf_prog_aux *aux,
+ struct bpf_map *map) { return 0; }
+static inline struct bpf_cgroup_storage *bpf_cgroup_storage_alloc(
+ struct bpf_prog *prog, enum bpf_cgroup_storage_type stype) { return NULL; }
+static inline void bpf_cgroup_storage_free(
+ struct bpf_cgroup_storage *storage) {}
+static inline int bpf_percpu_cgroup_storage_copy(struct bpf_map *map, void *key,
+ void *value) {
+ return 0;
+}
+static inline int bpf_percpu_cgroup_storage_update(struct bpf_map *map,
+ void *key, void *value, u64 flags) {
+ return 0;
+}
+#define cgroup_bpf_enabled(atype) (0)
+#define BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, uaddrlen, atype, t_ctx) ({ 0; })
+#define BPF_CGROUP_RUN_SA_PROG(sk, uaddr, uaddrlen, atype) ({ 0; })
+#define BPF_CGROUP_PRE_CONNECT_ENABLED(sk) (0)
#define BPF_CGROUP_RUN_PROG_INET_INGRESS(sk,skb) ({ 0; })
#define BPF_CGROUP_RUN_PROG_INET_EGRESS(sk,skb) ({ 0; })
#define BPF_CGROUP_RUN_PROG_INET_SOCK(sk) ({ 0; })
+#define BPF_CGROUP_RUN_PROG_INET_SOCK_RELEASE(sk) ({ 0; })
+#define BPF_CGROUP_RUN_PROG_INET_BIND_LOCK(sk, uaddr, uaddrlen, atype, flags) ({ 0; })
+#define BPF_CGROUP_RUN_PROG_INET4_POST_BIND(sk) ({ 0; })
+#define BPF_CGROUP_RUN_PROG_INET6_POST_BIND(sk) ({ 0; })
+#define BPF_CGROUP_RUN_PROG_INET4_CONNECT(sk, uaddr, uaddrlen) ({ 0; })
+#define BPF_CGROUP_RUN_PROG_INET4_CONNECT_LOCK(sk, uaddr, uaddrlen) ({ 0; })
+#define BPF_CGROUP_RUN_PROG_INET6_CONNECT(sk, uaddr, uaddrlen) ({ 0; })
+#define BPF_CGROUP_RUN_PROG_INET6_CONNECT_LOCK(sk, uaddr, uaddrlen) ({ 0; })
+#define BPF_CGROUP_RUN_PROG_UNIX_CONNECT_LOCK(sk, uaddr, uaddrlen) ({ 0; })
+#define BPF_CGROUP_RUN_PROG_UDP4_SENDMSG_LOCK(sk, uaddr, uaddrlen, t_ctx) ({ 0; })
+#define BPF_CGROUP_RUN_PROG_UDP6_SENDMSG_LOCK(sk, uaddr, uaddrlen, t_ctx) ({ 0; })
+#define BPF_CGROUP_RUN_PROG_UNIX_SENDMSG_LOCK(sk, uaddr, uaddrlen, t_ctx) ({ 0; })
+#define BPF_CGROUP_RUN_PROG_UDP4_RECVMSG_LOCK(sk, uaddr, uaddrlen) ({ 0; })
+#define BPF_CGROUP_RUN_PROG_UDP6_RECVMSG_LOCK(sk, uaddr, uaddrlen) ({ 0; })
+#define BPF_CGROUP_RUN_PROG_UNIX_RECVMSG_LOCK(sk, uaddr, uaddrlen) ({ 0; })
#define BPF_CGROUP_RUN_PROG_SOCK_OPS(sock_ops) ({ 0; })
+#define BPF_CGROUP_RUN_PROG_DEVICE_CGROUP(atype, major, minor, access) ({ 0; })
+#define BPF_CGROUP_RUN_PROG_SYSCTL(head,table,write,buf,count,pos) ({ 0; })
+#define BPF_CGROUP_RUN_PROG_GETSOCKOPT(sock, level, optname, optval, \
+ optlen, max_optlen, retval) ({ retval; })
+#define BPF_CGROUP_RUN_PROG_GETSOCKOPT_KERN(sock, level, optname, optval, \
+ optlen, retval) ({ retval; })
+#define BPF_CGROUP_RUN_PROG_SETSOCKOPT(sock, level, optname, optval, optlen, \
+ kernel_optval) ({ 0; })
#endif /* CONFIG_CGROUP_BPF */
diff --git a/include/linux/bpf-netns.h b/include/linux/bpf-netns.h
new file mode 100644
index 000000000000..413cfa5e4b07
--- /dev/null
+++ b/include/linux/bpf-netns.h
@@ -0,0 +1,62 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _BPF_NETNS_H
+#define _BPF_NETNS_H
+
+#include <linux/mutex.h>
+#include <net/netns/bpf.h>
+#include <uapi/linux/bpf.h>
+
+static inline enum netns_bpf_attach_type
+to_netns_bpf_attach_type(enum bpf_attach_type attach_type)
+{
+ switch (attach_type) {
+ case BPF_FLOW_DISSECTOR:
+ return NETNS_BPF_FLOW_DISSECTOR;
+ case BPF_SK_LOOKUP:
+ return NETNS_BPF_SK_LOOKUP;
+ default:
+ return NETNS_BPF_INVALID;
+ }
+}
+
+/* Protects updates to netns_bpf */
+extern struct mutex netns_bpf_mutex;
+
+union bpf_attr;
+struct bpf_prog;
+
+#ifdef CONFIG_NET
+int netns_bpf_prog_query(const union bpf_attr *attr,
+ union bpf_attr __user *uattr);
+int netns_bpf_prog_attach(const union bpf_attr *attr,
+ struct bpf_prog *prog);
+int netns_bpf_prog_detach(const union bpf_attr *attr, enum bpf_prog_type ptype);
+int netns_bpf_link_create(const union bpf_attr *attr,
+ struct bpf_prog *prog);
+#else
+static inline int netns_bpf_prog_query(const union bpf_attr *attr,
+ union bpf_attr __user *uattr)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int netns_bpf_prog_attach(const union bpf_attr *attr,
+ struct bpf_prog *prog)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int netns_bpf_prog_detach(const union bpf_attr *attr,
+ enum bpf_prog_type ptype)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int netns_bpf_link_create(const union bpf_attr *attr,
+ struct bpf_prog *prog)
+{
+ return -EOPNOTSUPP;
+}
+#endif
+
+#endif /* _BPF_NETNS_H */
diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index b69e7a5869ff..6498be4c44f8 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -1,60 +1,816 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of version 2 of the GNU General Public
- * License as published by the Free Software Foundation.
*/
#ifndef _LINUX_BPF_H
#define _LINUX_BPF_H 1
#include <uapi/linux/bpf.h>
+#include <uapi/linux/filter.h>
+#include <crypto/sha2.h>
#include <linux/workqueue.h>
#include <linux/file.h>
#include <linux/percpu.h>
#include <linux/err.h>
#include <linux/rbtree_latch.h>
+#include <linux/numa.h>
+#include <linux/mm_types.h>
+#include <linux/wait.h>
+#include <linux/refcount.h>
+#include <linux/mutex.h>
+#include <linux/module.h>
+#include <linux/kallsyms.h>
+#include <linux/capability.h>
+#include <linux/sched/mm.h>
+#include <linux/slab.h>
+#include <linux/percpu-refcount.h>
+#include <linux/stddef.h>
+#include <linux/bpfptr.h>
+#include <linux/btf.h>
+#include <linux/rcupdate_trace.h>
+#include <linux/static_call.h>
+#include <linux/memcontrol.h>
+#include <linux/cfi.h>
+#include <asm/rqspinlock.h>
+struct bpf_verifier_env;
+struct bpf_verifier_log;
struct perf_event;
+struct bpf_prog;
+struct bpf_prog_aux;
struct bpf_map;
+struct bpf_arena;
+struct sock;
+struct seq_file;
+struct btf;
+struct btf_type;
+struct exception_table_entry;
+struct seq_operations;
+struct bpf_iter_aux_info;
+struct bpf_local_storage;
+struct bpf_local_storage_map;
+struct kobject;
+struct mem_cgroup;
+struct module;
+struct bpf_func_state;
+struct ftrace_ops;
+struct cgroup;
+struct bpf_token;
+struct user_namespace;
+struct super_block;
+struct inode;
+
+extern struct idr btf_idr;
+extern spinlock_t btf_idr_lock;
+extern struct kobject *btf_kobj;
+extern struct bpf_mem_alloc bpf_global_ma, bpf_global_percpu_ma;
+extern bool bpf_global_ma_set;
+
+typedef u64 (*bpf_callback_t)(u64, u64, u64, u64, u64);
+typedef int (*bpf_iter_init_seq_priv_t)(void *private_data,
+ struct bpf_iter_aux_info *aux);
+typedef void (*bpf_iter_fini_seq_priv_t)(void *private_data);
+typedef unsigned int (*bpf_func_t)(const void *,
+ const struct bpf_insn *);
+struct bpf_iter_seq_info {
+ const struct seq_operations *seq_ops;
+ bpf_iter_init_seq_priv_t init_seq_private;
+ bpf_iter_fini_seq_priv_t fini_seq_private;
+ u32 seq_priv_size;
+};
-/* map is generic key/value storage optionally accesible by eBPF programs */
+/* map is generic key/value storage optionally accessible by eBPF programs */
struct bpf_map_ops {
/* funcs callable from userspace (via syscall) */
+ int (*map_alloc_check)(union bpf_attr *attr);
struct bpf_map *(*map_alloc)(union bpf_attr *attr);
void (*map_release)(struct bpf_map *map, struct file *map_file);
void (*map_free)(struct bpf_map *map);
int (*map_get_next_key)(struct bpf_map *map, void *key, void *next_key);
+ void (*map_release_uref)(struct bpf_map *map);
+ void *(*map_lookup_elem_sys_only)(struct bpf_map *map, void *key);
+ int (*map_lookup_batch)(struct bpf_map *map, const union bpf_attr *attr,
+ union bpf_attr __user *uattr);
+ int (*map_lookup_and_delete_elem)(struct bpf_map *map, void *key,
+ void *value, u64 flags);
+ int (*map_lookup_and_delete_batch)(struct bpf_map *map,
+ const union bpf_attr *attr,
+ union bpf_attr __user *uattr);
+ int (*map_update_batch)(struct bpf_map *map, struct file *map_file,
+ const union bpf_attr *attr,
+ union bpf_attr __user *uattr);
+ int (*map_delete_batch)(struct bpf_map *map, const union bpf_attr *attr,
+ union bpf_attr __user *uattr);
/* funcs callable from userspace and from eBPF programs */
void *(*map_lookup_elem)(struct bpf_map *map, void *key);
- int (*map_update_elem)(struct bpf_map *map, void *key, void *value, u64 flags);
- int (*map_delete_elem)(struct bpf_map *map, void *key);
+ long (*map_update_elem)(struct bpf_map *map, void *key, void *value, u64 flags);
+ long (*map_delete_elem)(struct bpf_map *map, void *key);
+ long (*map_push_elem)(struct bpf_map *map, void *value, u64 flags);
+ long (*map_pop_elem)(struct bpf_map *map, void *value);
+ long (*map_peek_elem)(struct bpf_map *map, void *value);
+ void *(*map_lookup_percpu_elem)(struct bpf_map *map, void *key, u32 cpu);
+ int (*map_get_hash)(struct bpf_map *map, u32 hash_buf_size, void *hash_buf);
/* funcs called by prog_array and perf_event_array map */
void *(*map_fd_get_ptr)(struct bpf_map *map, struct file *map_file,
int fd);
- void (*map_fd_put_ptr)(void *ptr);
- u32 (*map_gen_lookup)(struct bpf_map *map, struct bpf_insn *insn_buf);
+ /* If need_defer is true, the implementation should guarantee that
+ * the to-be-put element is still alive before the bpf program, which
+ * may manipulate it, exists.
+ */
+ void (*map_fd_put_ptr)(struct bpf_map *map, void *ptr, bool need_defer);
+ int (*map_gen_lookup)(struct bpf_map *map, struct bpf_insn *insn_buf);
u32 (*map_fd_sys_lookup_elem)(void *ptr);
+ void (*map_seq_show_elem)(struct bpf_map *map, void *key,
+ struct seq_file *m);
+ int (*map_check_btf)(const struct bpf_map *map,
+ const struct btf *btf,
+ const struct btf_type *key_type,
+ const struct btf_type *value_type);
+
+ /* Prog poke tracking helpers. */
+ int (*map_poke_track)(struct bpf_map *map, struct bpf_prog_aux *aux);
+ void (*map_poke_untrack)(struct bpf_map *map, struct bpf_prog_aux *aux);
+ void (*map_poke_run)(struct bpf_map *map, u32 key, struct bpf_prog *old,
+ struct bpf_prog *new);
+
+ /* Direct value access helpers. */
+ int (*map_direct_value_addr)(const struct bpf_map *map,
+ u64 *imm, u32 off);
+ int (*map_direct_value_meta)(const struct bpf_map *map,
+ u64 imm, u32 *off);
+ int (*map_mmap)(struct bpf_map *map, struct vm_area_struct *vma);
+ __poll_t (*map_poll)(struct bpf_map *map, struct file *filp,
+ struct poll_table_struct *pts);
+ unsigned long (*map_get_unmapped_area)(struct file *filep, unsigned long addr,
+ unsigned long len, unsigned long pgoff,
+ unsigned long flags);
+
+ /* Functions called by bpf_local_storage maps */
+ int (*map_local_storage_charge)(struct bpf_local_storage_map *smap,
+ void *owner, u32 size);
+ void (*map_local_storage_uncharge)(struct bpf_local_storage_map *smap,
+ void *owner, u32 size);
+ struct bpf_local_storage __rcu ** (*map_owner_storage_ptr)(void *owner);
+
+ /* Misc helpers.*/
+ long (*map_redirect)(struct bpf_map *map, u64 key, u64 flags);
+
+ /* map_meta_equal must be implemented for maps that can be
+ * used as an inner map. It is a runtime check to ensure
+ * an inner map can be inserted to an outer map.
+ *
+ * Some properties of the inner map has been used during the
+ * verification time. When inserting an inner map at the runtime,
+ * map_meta_equal has to ensure the inserting map has the same
+ * properties that the verifier has used earlier.
+ */
+ bool (*map_meta_equal)(const struct bpf_map *meta0,
+ const struct bpf_map *meta1);
+
+
+ int (*map_set_for_each_callback_args)(struct bpf_verifier_env *env,
+ struct bpf_func_state *caller,
+ struct bpf_func_state *callee);
+ long (*map_for_each_callback)(struct bpf_map *map,
+ bpf_callback_t callback_fn,
+ void *callback_ctx, u64 flags);
+
+ u64 (*map_mem_usage)(const struct bpf_map *map);
+
+ /* BTF id of struct allocated by map_alloc */
+ int *map_btf_id;
+
+ /* bpf_iter info used to open a seq_file */
+ const struct bpf_iter_seq_info *iter_seq_info;
+};
+
+enum {
+ /* Support at most 11 fields in a BTF type */
+ BTF_FIELDS_MAX = 11,
+};
+
+enum btf_field_type {
+ BPF_SPIN_LOCK = (1 << 0),
+ BPF_TIMER = (1 << 1),
+ BPF_KPTR_UNREF = (1 << 2),
+ BPF_KPTR_REF = (1 << 3),
+ BPF_KPTR_PERCPU = (1 << 4),
+ BPF_KPTR = BPF_KPTR_UNREF | BPF_KPTR_REF | BPF_KPTR_PERCPU,
+ BPF_LIST_HEAD = (1 << 5),
+ BPF_LIST_NODE = (1 << 6),
+ BPF_RB_ROOT = (1 << 7),
+ BPF_RB_NODE = (1 << 8),
+ BPF_GRAPH_NODE = BPF_RB_NODE | BPF_LIST_NODE,
+ BPF_GRAPH_ROOT = BPF_RB_ROOT | BPF_LIST_HEAD,
+ BPF_REFCOUNT = (1 << 9),
+ BPF_WORKQUEUE = (1 << 10),
+ BPF_UPTR = (1 << 11),
+ BPF_RES_SPIN_LOCK = (1 << 12),
+ BPF_TASK_WORK = (1 << 13),
+};
+
+enum bpf_cgroup_storage_type {
+ BPF_CGROUP_STORAGE_SHARED,
+ BPF_CGROUP_STORAGE_PERCPU,
+ __BPF_CGROUP_STORAGE_MAX
+#define MAX_BPF_CGROUP_STORAGE_TYPE __BPF_CGROUP_STORAGE_MAX
+};
+
+#ifdef CONFIG_CGROUP_BPF
+# define for_each_cgroup_storage_type(stype) \
+ for (stype = 0; stype < MAX_BPF_CGROUP_STORAGE_TYPE; stype++)
+#else
+# define for_each_cgroup_storage_type(stype) for (; false; )
+#endif /* CONFIG_CGROUP_BPF */
+
+typedef void (*btf_dtor_kfunc_t)(void *);
+
+struct btf_field_kptr {
+ struct btf *btf;
+ struct module *module;
+ /* dtor used if btf_is_kernel(btf), otherwise the type is
+ * program-allocated, dtor is NULL, and __bpf_obj_drop_impl is used
+ */
+ btf_dtor_kfunc_t dtor;
+ u32 btf_id;
+};
+
+struct btf_field_graph_root {
+ struct btf *btf;
+ u32 value_btf_id;
+ u32 node_offset;
+ struct btf_record *value_rec;
+};
+
+struct btf_field {
+ u32 offset;
+ u32 size;
+ enum btf_field_type type;
+ union {
+ struct btf_field_kptr kptr;
+ struct btf_field_graph_root graph_root;
+ };
+};
+
+struct btf_record {
+ u32 cnt;
+ u32 field_mask;
+ int spin_lock_off;
+ int res_spin_lock_off;
+ int timer_off;
+ int wq_off;
+ int refcount_off;
+ int task_work_off;
+ struct btf_field fields[];
+};
+
+/* Non-opaque version of bpf_rb_node in uapi/linux/bpf.h */
+struct bpf_rb_node_kern {
+ struct rb_node rb_node;
+ void *owner;
+} __attribute__((aligned(8)));
+
+/* Non-opaque version of bpf_list_node in uapi/linux/bpf.h */
+struct bpf_list_node_kern {
+ struct list_head list_head;
+ void *owner;
+} __attribute__((aligned(8)));
+
+/* 'Ownership' of program-containing map is claimed by the first program
+ * that is going to use this map or by the first program which FD is
+ * stored in the map to make sure that all callers and callees have the
+ * same prog type, JITed flag and xdp_has_frags flag.
+ */
+struct bpf_map_owner {
+ enum bpf_prog_type type;
+ bool jited;
+ bool xdp_has_frags;
+ u64 storage_cookie[MAX_BPF_CGROUP_STORAGE_TYPE];
+ const struct btf_type *attach_func_proto;
+ enum bpf_attach_type expected_attach_type;
};
struct bpf_map {
- atomic_t refcnt;
+ u8 sha[SHA256_DIGEST_SIZE];
+ const struct bpf_map_ops *ops;
+ struct bpf_map *inner_map_meta;
+#ifdef CONFIG_SECURITY
+ void *security;
+#endif
enum bpf_map_type map_type;
u32 key_size;
u32 value_size;
u32 max_entries;
+ u64 map_extra; /* any per-map-type extra fields */
u32 map_flags;
- u32 pages;
u32 id;
- struct user_struct *user;
- const struct bpf_map_ops *ops;
- struct work_struct work;
- atomic_t usercnt;
- struct bpf_map *inner_map_meta;
+ struct btf_record *record;
+ int numa_node;
+ u32 btf_key_type_id;
+ u32 btf_value_type_id;
+ u32 btf_vmlinux_value_type_id;
+ struct btf *btf;
+#ifdef CONFIG_MEMCG
+ struct obj_cgroup *objcg;
+#endif
+ char name[BPF_OBJ_NAME_LEN];
+ struct mutex freeze_mutex;
+ atomic64_t refcnt;
+ atomic64_t usercnt;
+ /* rcu is used before freeing and work is only used during freeing */
+ union {
+ struct work_struct work;
+ struct rcu_head rcu;
+ };
+ atomic64_t writecnt;
+ spinlock_t owner_lock;
+ struct bpf_map_owner *owner;
+ bool bypass_spec_v1;
+ bool frozen; /* write-once; write-protected by freeze_mutex */
+ bool free_after_mult_rcu_gp;
+ bool free_after_rcu_gp;
+ atomic64_t sleepable_refcnt;
+ s64 __percpu *elem_count;
+ u64 cookie; /* write-once */
+ char *excl_prog_sha;
+};
+
+static inline const char *btf_field_type_name(enum btf_field_type type)
+{
+ switch (type) {
+ case BPF_SPIN_LOCK:
+ return "bpf_spin_lock";
+ case BPF_RES_SPIN_LOCK:
+ return "bpf_res_spin_lock";
+ case BPF_TIMER:
+ return "bpf_timer";
+ case BPF_WORKQUEUE:
+ return "bpf_wq";
+ case BPF_KPTR_UNREF:
+ case BPF_KPTR_REF:
+ return "kptr";
+ case BPF_KPTR_PERCPU:
+ return "percpu_kptr";
+ case BPF_UPTR:
+ return "uptr";
+ case BPF_LIST_HEAD:
+ return "bpf_list_head";
+ case BPF_LIST_NODE:
+ return "bpf_list_node";
+ case BPF_RB_ROOT:
+ return "bpf_rb_root";
+ case BPF_RB_NODE:
+ return "bpf_rb_node";
+ case BPF_REFCOUNT:
+ return "bpf_refcount";
+ case BPF_TASK_WORK:
+ return "bpf_task_work";
+ default:
+ WARN_ON_ONCE(1);
+ return "unknown";
+ }
+}
+
+#if IS_ENABLED(CONFIG_DEBUG_KERNEL)
+#define BPF_WARN_ONCE(cond, format...) WARN_ONCE(cond, format)
+#else
+#define BPF_WARN_ONCE(cond, format...) BUILD_BUG_ON_INVALID(cond)
+#endif
+
+static inline u32 btf_field_type_size(enum btf_field_type type)
+{
+ switch (type) {
+ case BPF_SPIN_LOCK:
+ return sizeof(struct bpf_spin_lock);
+ case BPF_RES_SPIN_LOCK:
+ return sizeof(struct bpf_res_spin_lock);
+ case BPF_TIMER:
+ return sizeof(struct bpf_timer);
+ case BPF_WORKQUEUE:
+ return sizeof(struct bpf_wq);
+ case BPF_KPTR_UNREF:
+ case BPF_KPTR_REF:
+ case BPF_KPTR_PERCPU:
+ case BPF_UPTR:
+ return sizeof(u64);
+ case BPF_LIST_HEAD:
+ return sizeof(struct bpf_list_head);
+ case BPF_LIST_NODE:
+ return sizeof(struct bpf_list_node);
+ case BPF_RB_ROOT:
+ return sizeof(struct bpf_rb_root);
+ case BPF_RB_NODE:
+ return sizeof(struct bpf_rb_node);
+ case BPF_REFCOUNT:
+ return sizeof(struct bpf_refcount);
+ case BPF_TASK_WORK:
+ return sizeof(struct bpf_task_work);
+ default:
+ WARN_ON_ONCE(1);
+ return 0;
+ }
+}
+
+static inline u32 btf_field_type_align(enum btf_field_type type)
+{
+ switch (type) {
+ case BPF_SPIN_LOCK:
+ return __alignof__(struct bpf_spin_lock);
+ case BPF_RES_SPIN_LOCK:
+ return __alignof__(struct bpf_res_spin_lock);
+ case BPF_TIMER:
+ return __alignof__(struct bpf_timer);
+ case BPF_WORKQUEUE:
+ return __alignof__(struct bpf_wq);
+ case BPF_KPTR_UNREF:
+ case BPF_KPTR_REF:
+ case BPF_KPTR_PERCPU:
+ case BPF_UPTR:
+ return __alignof__(u64);
+ case BPF_LIST_HEAD:
+ return __alignof__(struct bpf_list_head);
+ case BPF_LIST_NODE:
+ return __alignof__(struct bpf_list_node);
+ case BPF_RB_ROOT:
+ return __alignof__(struct bpf_rb_root);
+ case BPF_RB_NODE:
+ return __alignof__(struct bpf_rb_node);
+ case BPF_REFCOUNT:
+ return __alignof__(struct bpf_refcount);
+ case BPF_TASK_WORK:
+ return __alignof__(struct bpf_task_work);
+ default:
+ WARN_ON_ONCE(1);
+ return 0;
+ }
+}
+
+static inline void bpf_obj_init_field(const struct btf_field *field, void *addr)
+{
+ memset(addr, 0, field->size);
+
+ switch (field->type) {
+ case BPF_REFCOUNT:
+ refcount_set((refcount_t *)addr, 1);
+ break;
+ case BPF_RB_NODE:
+ RB_CLEAR_NODE((struct rb_node *)addr);
+ break;
+ case BPF_LIST_HEAD:
+ case BPF_LIST_NODE:
+ INIT_LIST_HEAD((struct list_head *)addr);
+ break;
+ case BPF_RB_ROOT:
+ /* RB_ROOT_CACHED 0-inits, no need to do anything after memset */
+ case BPF_SPIN_LOCK:
+ case BPF_RES_SPIN_LOCK:
+ case BPF_TIMER:
+ case BPF_WORKQUEUE:
+ case BPF_KPTR_UNREF:
+ case BPF_KPTR_REF:
+ case BPF_KPTR_PERCPU:
+ case BPF_UPTR:
+ case BPF_TASK_WORK:
+ break;
+ default:
+ WARN_ON_ONCE(1);
+ return;
+ }
+}
+
+static inline bool btf_record_has_field(const struct btf_record *rec, enum btf_field_type type)
+{
+ if (IS_ERR_OR_NULL(rec))
+ return false;
+ return rec->field_mask & type;
+}
+
+static inline void bpf_obj_init(const struct btf_record *rec, void *obj)
+{
+ int i;
+
+ if (IS_ERR_OR_NULL(rec))
+ return;
+ for (i = 0; i < rec->cnt; i++)
+ bpf_obj_init_field(&rec->fields[i], obj + rec->fields[i].offset);
+}
+
+/* 'dst' must be a temporary buffer and should not point to memory that is being
+ * used in parallel by a bpf program or bpf syscall, otherwise the access from
+ * the bpf program or bpf syscall may be corrupted by the reinitialization,
+ * leading to weird problems. Even 'dst' is newly-allocated from bpf memory
+ * allocator, it is still possible for 'dst' to be used in parallel by a bpf
+ * program or bpf syscall.
+ */
+static inline void check_and_init_map_value(struct bpf_map *map, void *dst)
+{
+ bpf_obj_init(map->record, dst);
+}
+
+/* memcpy that is used with 8-byte aligned pointers, power-of-8 size and
+ * forced to use 'long' read/writes to try to atomically copy long counters.
+ * Best-effort only. No barriers here, since it _will_ race with concurrent
+ * updates from BPF programs. Called from bpf syscall and mostly used with
+ * size 8 or 16 bytes, so ask compiler to inline it.
+ */
+static inline void bpf_long_memcpy(void *dst, const void *src, u32 size)
+{
+ const long *lsrc = src;
+ long *ldst = dst;
+
+ size /= sizeof(long);
+ while (size--)
+ data_race(*ldst++ = *lsrc++);
+}
+
+/* copy everything but bpf_spin_lock, bpf_timer, and kptrs. There could be one of each. */
+static inline void bpf_obj_memcpy(struct btf_record *rec,
+ void *dst, void *src, u32 size,
+ bool long_memcpy)
+{
+ u32 curr_off = 0;
+ int i;
+
+ if (IS_ERR_OR_NULL(rec)) {
+ if (long_memcpy)
+ bpf_long_memcpy(dst, src, round_up(size, 8));
+ else
+ memcpy(dst, src, size);
+ return;
+ }
+
+ for (i = 0; i < rec->cnt; i++) {
+ u32 next_off = rec->fields[i].offset;
+ u32 sz = next_off - curr_off;
+
+ memcpy(dst + curr_off, src + curr_off, sz);
+ curr_off += rec->fields[i].size + sz;
+ }
+ memcpy(dst + curr_off, src + curr_off, size - curr_off);
+}
+
+static inline void copy_map_value(struct bpf_map *map, void *dst, void *src)
+{
+ bpf_obj_memcpy(map->record, dst, src, map->value_size, false);
+}
+
+static inline void copy_map_value_long(struct bpf_map *map, void *dst, void *src)
+{
+ bpf_obj_memcpy(map->record, dst, src, map->value_size, true);
+}
+
+static inline void bpf_obj_swap_uptrs(const struct btf_record *rec, void *dst, void *src)
+{
+ unsigned long *src_uptr, *dst_uptr;
+ const struct btf_field *field;
+ int i;
+
+ if (!btf_record_has_field(rec, BPF_UPTR))
+ return;
+
+ for (i = 0, field = rec->fields; i < rec->cnt; i++, field++) {
+ if (field->type != BPF_UPTR)
+ continue;
+
+ src_uptr = src + field->offset;
+ dst_uptr = dst + field->offset;
+ swap(*src_uptr, *dst_uptr);
+ }
+}
+
+static inline void bpf_obj_memzero(struct btf_record *rec, void *dst, u32 size)
+{
+ u32 curr_off = 0;
+ int i;
+
+ if (IS_ERR_OR_NULL(rec)) {
+ memset(dst, 0, size);
+ return;
+ }
+
+ for (i = 0; i < rec->cnt; i++) {
+ u32 next_off = rec->fields[i].offset;
+ u32 sz = next_off - curr_off;
+
+ memset(dst + curr_off, 0, sz);
+ curr_off += rec->fields[i].size + sz;
+ }
+ memset(dst + curr_off, 0, size - curr_off);
+}
+
+static inline void zero_map_value(struct bpf_map *map, void *dst)
+{
+ bpf_obj_memzero(map->record, dst, map->value_size);
+}
+
+void copy_map_value_locked(struct bpf_map *map, void *dst, void *src,
+ bool lock_src);
+void bpf_timer_cancel_and_free(void *timer);
+void bpf_wq_cancel_and_free(void *timer);
+void bpf_task_work_cancel_and_free(void *timer);
+void bpf_list_head_free(const struct btf_field *field, void *list_head,
+ struct bpf_spin_lock *spin_lock);
+void bpf_rb_root_free(const struct btf_field *field, void *rb_root,
+ struct bpf_spin_lock *spin_lock);
+u64 bpf_arena_get_kern_vm_start(struct bpf_arena *arena);
+u64 bpf_arena_get_user_vm_start(struct bpf_arena *arena);
+int bpf_obj_name_cpy(char *dst, const char *src, unsigned int size);
+
+struct bpf_offload_dev;
+struct bpf_offloaded_map;
+
+struct bpf_map_dev_ops {
+ int (*map_get_next_key)(struct bpf_offloaded_map *map,
+ void *key, void *next_key);
+ int (*map_lookup_elem)(struct bpf_offloaded_map *map,
+ void *key, void *value);
+ int (*map_update_elem)(struct bpf_offloaded_map *map,
+ void *key, void *value, u64 flags);
+ int (*map_delete_elem)(struct bpf_offloaded_map *map, void *key);
+};
+
+struct bpf_offloaded_map {
+ struct bpf_map map;
+ struct net_device *netdev;
+ const struct bpf_map_dev_ops *dev_ops;
+ void *dev_priv;
+ struct list_head offloads;
};
+static inline struct bpf_offloaded_map *map_to_offmap(struct bpf_map *map)
+{
+ return container_of(map, struct bpf_offloaded_map, map);
+}
+
+static inline bool bpf_map_offload_neutral(const struct bpf_map *map)
+{
+ return map->map_type == BPF_MAP_TYPE_PERF_EVENT_ARRAY;
+}
+
+static inline bool bpf_map_support_seq_show(const struct bpf_map *map)
+{
+ return (map->btf_value_type_id || map->btf_vmlinux_value_type_id) &&
+ map->ops->map_seq_show_elem;
+}
+
+int map_check_no_btf(const struct bpf_map *map,
+ const struct btf *btf,
+ const struct btf_type *key_type,
+ const struct btf_type *value_type);
+
+bool bpf_map_meta_equal(const struct bpf_map *meta0,
+ const struct bpf_map *meta1);
+
+static inline bool bpf_map_has_internal_structs(struct bpf_map *map)
+{
+ return btf_record_has_field(map->record, BPF_TIMER | BPF_WORKQUEUE | BPF_TASK_WORK);
+}
+
+void bpf_map_free_internal_structs(struct bpf_map *map, void *obj);
+
+int bpf_dynptr_from_file_sleepable(struct file *file, u32 flags,
+ struct bpf_dynptr *ptr__uninit);
+
+extern const struct bpf_map_ops bpf_map_offload_ops;
+
+/* bpf_type_flag contains a set of flags that are applicable to the values of
+ * arg_type, ret_type and reg_type. For example, a pointer value may be null,
+ * or a memory is read-only. We classify types into two categories: base types
+ * and extended types. Extended types are base types combined with a type flag.
+ *
+ * Currently there are no more than 32 base types in arg_type, ret_type and
+ * reg_types.
+ */
+#define BPF_BASE_TYPE_BITS 8
+
+enum bpf_type_flag {
+ /* PTR may be NULL. */
+ PTR_MAYBE_NULL = BIT(0 + BPF_BASE_TYPE_BITS),
+
+ /* MEM is read-only. When applied on bpf_arg, it indicates the arg is
+ * compatible with both mutable and immutable memory.
+ */
+ MEM_RDONLY = BIT(1 + BPF_BASE_TYPE_BITS),
+
+ /* MEM points to BPF ring buffer reservation. */
+ MEM_RINGBUF = BIT(2 + BPF_BASE_TYPE_BITS),
+
+ /* MEM is in user address space. */
+ MEM_USER = BIT(3 + BPF_BASE_TYPE_BITS),
+
+ /* MEM is a percpu memory. MEM_PERCPU tags PTR_TO_BTF_ID. When tagged
+ * with MEM_PERCPU, PTR_TO_BTF_ID _cannot_ be directly accessed. In
+ * order to drop this tag, it must be passed into bpf_per_cpu_ptr()
+ * or bpf_this_cpu_ptr(), which will return the pointer corresponding
+ * to the specified cpu.
+ */
+ MEM_PERCPU = BIT(4 + BPF_BASE_TYPE_BITS),
+
+ /* Indicates that the argument will be released. */
+ OBJ_RELEASE = BIT(5 + BPF_BASE_TYPE_BITS),
+
+ /* PTR is not trusted. This is only used with PTR_TO_BTF_ID, to mark
+ * unreferenced and referenced kptr loaded from map value using a load
+ * instruction, so that they can only be dereferenced but not escape the
+ * BPF program into the kernel (i.e. cannot be passed as arguments to
+ * kfunc or bpf helpers).
+ */
+ PTR_UNTRUSTED = BIT(6 + BPF_BASE_TYPE_BITS),
+
+ /* MEM can be uninitialized. */
+ MEM_UNINIT = BIT(7 + BPF_BASE_TYPE_BITS),
+
+ /* DYNPTR points to memory local to the bpf program. */
+ DYNPTR_TYPE_LOCAL = BIT(8 + BPF_BASE_TYPE_BITS),
+
+ /* DYNPTR points to a kernel-produced ringbuf record. */
+ DYNPTR_TYPE_RINGBUF = BIT(9 + BPF_BASE_TYPE_BITS),
+
+ /* Size is known at compile time. */
+ MEM_FIXED_SIZE = BIT(10 + BPF_BASE_TYPE_BITS),
+
+ /* MEM is of an allocated object of type in program BTF. This is used to
+ * tag PTR_TO_BTF_ID allocated using bpf_obj_new.
+ */
+ MEM_ALLOC = BIT(11 + BPF_BASE_TYPE_BITS),
+
+ /* PTR was passed from the kernel in a trusted context, and may be
+ * passed to KF_TRUSTED_ARGS kfuncs or BPF helper functions.
+ * Confusingly, this is _not_ the opposite of PTR_UNTRUSTED above.
+ * PTR_UNTRUSTED refers to a kptr that was read directly from a map
+ * without invoking bpf_kptr_xchg(). What we really need to know is
+ * whether a pointer is safe to pass to a kfunc or BPF helper function.
+ * While PTR_UNTRUSTED pointers are unsafe to pass to kfuncs and BPF
+ * helpers, they do not cover all possible instances of unsafe
+ * pointers. For example, a pointer that was obtained from walking a
+ * struct will _not_ get the PTR_UNTRUSTED type modifier, despite the
+ * fact that it may be NULL, invalid, etc. This is due to backwards
+ * compatibility requirements, as this was the behavior that was first
+ * introduced when kptrs were added. The behavior is now considered
+ * deprecated, and PTR_UNTRUSTED will eventually be removed.
+ *
+ * PTR_TRUSTED, on the other hand, is a pointer that the kernel
+ * guarantees to be valid and safe to pass to kfuncs and BPF helpers.
+ * For example, pointers passed to tracepoint arguments are considered
+ * PTR_TRUSTED, as are pointers that are passed to struct_ops
+ * callbacks. As alluded to above, pointers that are obtained from
+ * walking PTR_TRUSTED pointers are _not_ trusted. For example, if a
+ * struct task_struct *task is PTR_TRUSTED, then accessing
+ * task->last_wakee will lose the PTR_TRUSTED modifier when it's stored
+ * in a BPF register. Similarly, pointers passed to certain programs
+ * types such as kretprobes are not guaranteed to be valid, as they may
+ * for example contain an object that was recently freed.
+ */
+ PTR_TRUSTED = BIT(12 + BPF_BASE_TYPE_BITS),
+
+ /* MEM is tagged with rcu and memory access needs rcu_read_lock protection. */
+ MEM_RCU = BIT(13 + BPF_BASE_TYPE_BITS),
+
+ /* Used to tag PTR_TO_BTF_ID | MEM_ALLOC references which are non-owning.
+ * Currently only valid for linked-list and rbtree nodes. If the nodes
+ * have a bpf_refcount_field, they must be tagged MEM_RCU as well.
+ */
+ NON_OWN_REF = BIT(14 + BPF_BASE_TYPE_BITS),
+
+ /* DYNPTR points to sk_buff */
+ DYNPTR_TYPE_SKB = BIT(15 + BPF_BASE_TYPE_BITS),
+
+ /* DYNPTR points to xdp_buff */
+ DYNPTR_TYPE_XDP = BIT(16 + BPF_BASE_TYPE_BITS),
+
+ /* Memory must be aligned on some architectures, used in combination with
+ * MEM_FIXED_SIZE.
+ */
+ MEM_ALIGNED = BIT(17 + BPF_BASE_TYPE_BITS),
+
+ /* MEM is being written to, often combined with MEM_UNINIT. Non-presence
+ * of MEM_WRITE means that MEM is only being read. MEM_WRITE without the
+ * MEM_UNINIT means that memory needs to be initialized since it is also
+ * read.
+ */
+ MEM_WRITE = BIT(18 + BPF_BASE_TYPE_BITS),
+
+ /* DYNPTR points to skb_metadata_end()-skb_metadata_len() */
+ DYNPTR_TYPE_SKB_META = BIT(19 + BPF_BASE_TYPE_BITS),
+
+ /* DYNPTR points to file */
+ DYNPTR_TYPE_FILE = BIT(20 + BPF_BASE_TYPE_BITS),
+
+ __BPF_TYPE_FLAG_MAX,
+ __BPF_TYPE_LAST_FLAG = __BPF_TYPE_FLAG_MAX - 1,
+};
+
+#define DYNPTR_TYPE_FLAG_MASK (DYNPTR_TYPE_LOCAL | DYNPTR_TYPE_RINGBUF | DYNPTR_TYPE_SKB \
+ | DYNPTR_TYPE_XDP | DYNPTR_TYPE_SKB_META | DYNPTR_TYPE_FILE)
+
+/* Max number of base types. */
+#define BPF_BASE_TYPE_LIMIT (1UL << BPF_BASE_TYPE_BITS)
+
+/* Max number of all types. */
+#define BPF_TYPE_LIMIT (__BPF_TYPE_LAST_FLAG | (__BPF_TYPE_LAST_FLAG - 1))
+
/* function argument constraints */
enum bpf_arg_type {
ARG_DONTCARE = 0, /* unused argument in helper function */
@@ -66,28 +822,83 @@ enum bpf_arg_type {
ARG_PTR_TO_MAP_KEY, /* pointer to stack used as map key */
ARG_PTR_TO_MAP_VALUE, /* pointer to stack used as map value */
- /* the following constraints used to prototype bpf_memcmp() and other
- * functions that access data on eBPF program stack
+ /* Used to prototype bpf_memcmp() and other functions that access data
+ * on eBPF program stack
*/
ARG_PTR_TO_MEM, /* pointer to valid memory (stack, packet, map value) */
- ARG_PTR_TO_UNINIT_MEM, /* pointer to memory does not need to be initialized,
- * helper function must fill all bytes or clear
- * them in error case.
- */
+ ARG_PTR_TO_ARENA,
ARG_CONST_SIZE, /* number of bytes accessed from memory */
ARG_CONST_SIZE_OR_ZERO, /* number of bytes accessed from memory or 0 */
ARG_PTR_TO_CTX, /* pointer to context */
ARG_ANYTHING, /* any (initialized) argument is ok */
+ ARG_PTR_TO_SPIN_LOCK, /* pointer to bpf_spin_lock */
+ ARG_PTR_TO_SOCK_COMMON, /* pointer to sock_common */
+ ARG_PTR_TO_SOCKET, /* pointer to bpf_sock (fullsock) */
+ ARG_PTR_TO_BTF_ID, /* pointer to in-kernel struct */
+ ARG_PTR_TO_RINGBUF_MEM, /* pointer to dynamically reserved ringbuf memory */
+ ARG_CONST_ALLOC_SIZE_OR_ZERO, /* number of allocated bytes requested */
+ ARG_PTR_TO_BTF_ID_SOCK_COMMON, /* pointer to in-kernel sock_common or bpf-mirrored bpf_sock */
+ ARG_PTR_TO_PERCPU_BTF_ID, /* pointer to in-kernel percpu type */
+ ARG_PTR_TO_FUNC, /* pointer to a bpf program function */
+ ARG_PTR_TO_STACK, /* pointer to stack */
+ ARG_PTR_TO_CONST_STR, /* pointer to a null terminated read-only string */
+ ARG_PTR_TO_TIMER, /* pointer to bpf_timer */
+ ARG_KPTR_XCHG_DEST, /* pointer to destination that kptrs are bpf_kptr_xchg'd into */
+ ARG_PTR_TO_DYNPTR, /* pointer to bpf_dynptr. See bpf_type_flag for dynptr type */
+ __BPF_ARG_TYPE_MAX,
+
+ /* Extended arg_types. */
+ ARG_PTR_TO_MAP_VALUE_OR_NULL = PTR_MAYBE_NULL | ARG_PTR_TO_MAP_VALUE,
+ ARG_PTR_TO_MEM_OR_NULL = PTR_MAYBE_NULL | ARG_PTR_TO_MEM,
+ ARG_PTR_TO_CTX_OR_NULL = PTR_MAYBE_NULL | ARG_PTR_TO_CTX,
+ ARG_PTR_TO_SOCKET_OR_NULL = PTR_MAYBE_NULL | ARG_PTR_TO_SOCKET,
+ ARG_PTR_TO_STACK_OR_NULL = PTR_MAYBE_NULL | ARG_PTR_TO_STACK,
+ ARG_PTR_TO_BTF_ID_OR_NULL = PTR_MAYBE_NULL | ARG_PTR_TO_BTF_ID,
+ /* Pointer to memory does not need to be initialized, since helper function
+ * fills all bytes or clears them in error case.
+ */
+ ARG_PTR_TO_UNINIT_MEM = MEM_UNINIT | MEM_WRITE | ARG_PTR_TO_MEM,
+ /* Pointer to valid memory of size known at compile time. */
+ ARG_PTR_TO_FIXED_SIZE_MEM = MEM_FIXED_SIZE | ARG_PTR_TO_MEM,
+
+ /* This must be the last entry. Its purpose is to ensure the enum is
+ * wide enough to hold the higher bits reserved for bpf_type_flag.
+ */
+ __BPF_ARG_TYPE_LIMIT = BPF_TYPE_LIMIT,
};
+static_assert(__BPF_ARG_TYPE_MAX <= BPF_BASE_TYPE_LIMIT);
/* type of values returned from helper functions */
enum bpf_return_type {
RET_INTEGER, /* function returns integer */
RET_VOID, /* function doesn't return anything */
- RET_PTR_TO_MAP_VALUE_OR_NULL, /* returns a pointer to map elem value or NULL */
+ RET_PTR_TO_MAP_VALUE, /* returns a pointer to map elem value */
+ RET_PTR_TO_SOCKET, /* returns a pointer to a socket */
+ RET_PTR_TO_TCP_SOCK, /* returns a pointer to a tcp_sock */
+ RET_PTR_TO_SOCK_COMMON, /* returns a pointer to a sock_common */
+ RET_PTR_TO_MEM, /* returns a pointer to memory */
+ RET_PTR_TO_MEM_OR_BTF_ID, /* returns a pointer to a valid memory or a btf_id */
+ RET_PTR_TO_BTF_ID, /* returns a pointer to a btf_id */
+ __BPF_RET_TYPE_MAX,
+
+ /* Extended ret_types. */
+ RET_PTR_TO_MAP_VALUE_OR_NULL = PTR_MAYBE_NULL | RET_PTR_TO_MAP_VALUE,
+ RET_PTR_TO_SOCKET_OR_NULL = PTR_MAYBE_NULL | RET_PTR_TO_SOCKET,
+ RET_PTR_TO_TCP_SOCK_OR_NULL = PTR_MAYBE_NULL | RET_PTR_TO_TCP_SOCK,
+ RET_PTR_TO_SOCK_COMMON_OR_NULL = PTR_MAYBE_NULL | RET_PTR_TO_SOCK_COMMON,
+ RET_PTR_TO_RINGBUF_MEM_OR_NULL = PTR_MAYBE_NULL | MEM_RINGBUF | RET_PTR_TO_MEM,
+ RET_PTR_TO_DYNPTR_MEM_OR_NULL = PTR_MAYBE_NULL | RET_PTR_TO_MEM,
+ RET_PTR_TO_BTF_ID_OR_NULL = PTR_MAYBE_NULL | RET_PTR_TO_BTF_ID,
+ RET_PTR_TO_BTF_ID_TRUSTED = PTR_TRUSTED | RET_PTR_TO_BTF_ID,
+
+ /* This must be the last entry. Its purpose is to ensure the enum is
+ * wide enough to hold the higher bits reserved for bpf_type_flag.
+ */
+ __BPF_RET_TYPE_LIMIT = BPF_TYPE_LIMIT,
};
+static_assert(__BPF_RET_TYPE_MAX <= BPF_BASE_TYPE_LIMIT);
/* eBPF function prototype used by verifier to allow BPF_CALLs from eBPF programs
* to in-kernel helper functions and for adjusting imm32 field in BPF_CALL
@@ -97,12 +908,44 @@ struct bpf_func_proto {
u64 (*func)(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
bool gpl_only;
bool pkt_access;
+ bool might_sleep;
+ /* set to true if helper follows contract for llvm
+ * attribute bpf_fastcall:
+ * - void functions do not scratch r0
+ * - functions taking N arguments scratch only registers r1-rN
+ */
+ bool allow_fastcall;
enum bpf_return_type ret_type;
- enum bpf_arg_type arg1_type;
- enum bpf_arg_type arg2_type;
- enum bpf_arg_type arg3_type;
- enum bpf_arg_type arg4_type;
- enum bpf_arg_type arg5_type;
+ union {
+ struct {
+ enum bpf_arg_type arg1_type;
+ enum bpf_arg_type arg2_type;
+ enum bpf_arg_type arg3_type;
+ enum bpf_arg_type arg4_type;
+ enum bpf_arg_type arg5_type;
+ };
+ enum bpf_arg_type arg_type[5];
+ };
+ union {
+ struct {
+ u32 *arg1_btf_id;
+ u32 *arg2_btf_id;
+ u32 *arg3_btf_id;
+ u32 *arg4_btf_id;
+ u32 *arg5_btf_id;
+ };
+ u32 *arg_btf_id[5];
+ struct {
+ size_t arg1_size;
+ size_t arg2_size;
+ size_t arg3_size;
+ size_t arg4_size;
+ size_t arg5_size;
+ };
+ size_t arg_size[5];
+ };
+ int *ret_btf_id; /* return value btf_id */
+ bool (*allowed)(const struct bpf_prog *prog);
};
/* bpf_context is intentionally undefined structure. Pointer to bpf_context is
@@ -117,45 +960,85 @@ enum bpf_access_type {
};
/* types of values stored in eBPF registers */
+/* Pointer types represent:
+ * pointer
+ * pointer + imm
+ * pointer + (u16) var
+ * pointer + (u16) var + imm
+ * if (range > 0) then [ptr, ptr + range - off) is safe to access
+ * if (id > 0) means that some 'var' was added
+ * if (off > 0) means that 'imm' was added
+ */
enum bpf_reg_type {
NOT_INIT = 0, /* nothing was written into register */
- UNKNOWN_VALUE, /* reg doesn't contain a valid pointer */
+ SCALAR_VALUE, /* reg doesn't contain a valid pointer */
PTR_TO_CTX, /* reg points to bpf_context */
CONST_PTR_TO_MAP, /* reg points to struct bpf_map */
PTR_TO_MAP_VALUE, /* reg points to map element value */
- PTR_TO_MAP_VALUE_OR_NULL,/* points to map elem value or NULL */
- FRAME_PTR, /* reg == frame_pointer */
- PTR_TO_STACK, /* reg == frame_pointer + imm */
- CONST_IMM, /* constant integer value */
-
- /* PTR_TO_PACKET represents:
- * skb->data
- * skb->data + imm
- * skb->data + (u16) var
- * skb->data + (u16) var + imm
- * if (range > 0) then [ptr, ptr + range - off) is safe to access
- * if (id > 0) means that some 'var' was added
- * if (off > 0) menas that 'imm' was added
- */
- PTR_TO_PACKET,
+ PTR_TO_MAP_KEY, /* reg points to a map element key */
+ PTR_TO_STACK, /* reg == frame_pointer + offset */
+ PTR_TO_PACKET_META, /* skb->data - meta_len */
+ PTR_TO_PACKET, /* reg points to skb->data */
PTR_TO_PACKET_END, /* skb->data + headlen */
+ PTR_TO_FLOW_KEYS, /* reg points to bpf_flow_keys */
+ PTR_TO_SOCKET, /* reg points to struct bpf_sock */
+ PTR_TO_SOCK_COMMON, /* reg points to sock_common */
+ PTR_TO_TCP_SOCK, /* reg points to struct tcp_sock */
+ PTR_TO_TP_BUFFER, /* reg points to a writable raw tp's buffer */
+ PTR_TO_XDP_SOCK, /* reg points to struct xdp_sock */
+ /* PTR_TO_BTF_ID points to a kernel struct that does not need
+ * to be null checked by the BPF program. This does not imply the
+ * pointer is _not_ null and in practice this can easily be a null
+ * pointer when reading pointer chains. The assumption is program
+ * context will handle null pointer dereference typically via fault
+ * handling. The verifier must keep this in mind and can make no
+ * assumptions about null or non-null when doing branch analysis.
+ * Further, when passed into helpers the helpers can not, without
+ * additional context, assume the value is non-null.
+ */
+ PTR_TO_BTF_ID,
+ PTR_TO_MEM, /* reg points to valid memory region */
+ PTR_TO_ARENA,
+ PTR_TO_BUF, /* reg points to a read/write buffer */
+ PTR_TO_FUNC, /* reg points to a bpf program function */
+ PTR_TO_INSN, /* reg points to a bpf program instruction */
+ CONST_PTR_TO_DYNPTR, /* reg points to a const struct bpf_dynptr */
+ __BPF_REG_TYPE_MAX,
- /* PTR_TO_MAP_VALUE_ADJ is used for doing pointer math inside of a map
- * elem value. We only allow this if we can statically verify that
- * access from this register are going to fall within the size of the
- * map element.
+ /* Extended reg_types. */
+ PTR_TO_MAP_VALUE_OR_NULL = PTR_MAYBE_NULL | PTR_TO_MAP_VALUE,
+ PTR_TO_SOCKET_OR_NULL = PTR_MAYBE_NULL | PTR_TO_SOCKET,
+ PTR_TO_SOCK_COMMON_OR_NULL = PTR_MAYBE_NULL | PTR_TO_SOCK_COMMON,
+ PTR_TO_TCP_SOCK_OR_NULL = PTR_MAYBE_NULL | PTR_TO_TCP_SOCK,
+ /* PTR_TO_BTF_ID_OR_NULL points to a kernel struct that has not
+ * been checked for null. Used primarily to inform the verifier
+ * an explicit null check is required for this struct.
*/
- PTR_TO_MAP_VALUE_ADJ,
-};
+ PTR_TO_BTF_ID_OR_NULL = PTR_MAYBE_NULL | PTR_TO_BTF_ID,
-struct bpf_prog;
+ /* This must be the last entry. Its purpose is to ensure the enum is
+ * wide enough to hold the higher bits reserved for bpf_type_flag.
+ */
+ __BPF_REG_TYPE_LIMIT = BPF_TYPE_LIMIT,
+};
+static_assert(__BPF_REG_TYPE_MAX <= BPF_BASE_TYPE_LIMIT);
/* The information passed from prog-specific *_is_valid_access
* back to the verifier.
*/
struct bpf_insn_access_aux {
enum bpf_reg_type reg_type;
- int ctx_field_size;
+ bool is_ldsx;
+ union {
+ int ctx_field_size;
+ struct {
+ struct btf *btf;
+ u32 btf_id;
+ u32 ref_obj_id;
+ };
+ };
+ struct bpf_verifier_log *log; /* for verbose logs */
+ bool is_retval; /* is accessing function return value ? */
};
static inline void
@@ -164,61 +1047,1149 @@ bpf_ctx_record_field_size(struct bpf_insn_access_aux *aux, u32 size)
aux->ctx_field_size = size;
}
+static bool bpf_is_ldimm64(const struct bpf_insn *insn)
+{
+ return insn->code == (BPF_LD | BPF_IMM | BPF_DW);
+}
+
+static inline bool bpf_pseudo_func(const struct bpf_insn *insn)
+{
+ return bpf_is_ldimm64(insn) && insn->src_reg == BPF_PSEUDO_FUNC;
+}
+
+/* Given a BPF_ATOMIC instruction @atomic_insn, return true if it is an
+ * atomic load or store, and false if it is a read-modify-write instruction.
+ */
+static inline bool
+bpf_atomic_is_load_store(const struct bpf_insn *atomic_insn)
+{
+ switch (atomic_insn->imm) {
+ case BPF_LOAD_ACQ:
+ case BPF_STORE_REL:
+ return true;
+ default:
+ return false;
+ }
+}
+
+struct bpf_prog_ops {
+ int (*test_run)(struct bpf_prog *prog, const union bpf_attr *kattr,
+ union bpf_attr __user *uattr);
+};
+
+struct bpf_reg_state;
struct bpf_verifier_ops {
/* return eBPF function prototype for verification */
- const struct bpf_func_proto *(*get_func_proto)(enum bpf_func_id func_id);
+ const struct bpf_func_proto *
+ (*get_func_proto)(enum bpf_func_id func_id,
+ const struct bpf_prog *prog);
/* return true if 'size' wide access at offset 'off' within bpf_context
* with 'type' (read or write) is allowed
*/
bool (*is_valid_access)(int off, int size, enum bpf_access_type type,
+ const struct bpf_prog *prog,
struct bpf_insn_access_aux *info);
int (*gen_prologue)(struct bpf_insn *insn, bool direct_write,
const struct bpf_prog *prog);
+ int (*gen_epilogue)(struct bpf_insn *insn, const struct bpf_prog *prog,
+ s16 ctx_stack_off);
+ int (*gen_ld_abs)(const struct bpf_insn *orig,
+ struct bpf_insn *insn_buf);
u32 (*convert_ctx_access)(enum bpf_access_type type,
const struct bpf_insn *src,
struct bpf_insn *dst,
struct bpf_prog *prog, u32 *target_size);
- int (*test_run)(struct bpf_prog *prog, const union bpf_attr *kattr,
- union bpf_attr __user *uattr);
+ int (*btf_struct_access)(struct bpf_verifier_log *log,
+ const struct bpf_reg_state *reg,
+ int off, int size);
+};
+
+struct bpf_prog_offload_ops {
+ /* verifier basic callbacks */
+ int (*insn_hook)(struct bpf_verifier_env *env,
+ int insn_idx, int prev_insn_idx);
+ int (*finalize)(struct bpf_verifier_env *env);
+ /* verifier optimization callbacks (called after .finalize) */
+ int (*replace_insn)(struct bpf_verifier_env *env, u32 off,
+ struct bpf_insn *insn);
+ int (*remove_insns)(struct bpf_verifier_env *env, u32 off, u32 cnt);
+ /* program management callbacks */
+ int (*prepare)(struct bpf_prog *prog);
+ int (*translate)(struct bpf_prog *prog);
+ void (*destroy)(struct bpf_prog *prog);
+};
+
+struct bpf_prog_offload {
+ struct bpf_prog *prog;
+ struct net_device *netdev;
+ struct bpf_offload_dev *offdev;
+ void *dev_priv;
+ struct list_head offloads;
+ bool dev_state;
+ bool opt_failed;
+ void *jited_image;
+ u32 jited_len;
+};
+
+/* The longest tracepoint has 12 args.
+ * See include/trace/bpf_probe.h
+ */
+#define MAX_BPF_FUNC_ARGS 12
+
+/* The maximum number of arguments passed through registers
+ * a single function may have.
+ */
+#define MAX_BPF_FUNC_REG_ARGS 5
+
+/* The argument is a structure or a union. */
+#define BTF_FMODEL_STRUCT_ARG BIT(0)
+
+/* The argument is signed. */
+#define BTF_FMODEL_SIGNED_ARG BIT(1)
+
+struct btf_func_model {
+ u8 ret_size;
+ u8 ret_flags;
+ u8 nr_args;
+ u8 arg_size[MAX_BPF_FUNC_ARGS];
+ u8 arg_flags[MAX_BPF_FUNC_ARGS];
+};
+
+/* Restore arguments before returning from trampoline to let original function
+ * continue executing. This flag is used for fentry progs when there are no
+ * fexit progs.
+ */
+#define BPF_TRAMP_F_RESTORE_REGS BIT(0)
+/* Call original function after fentry progs, but before fexit progs.
+ * Makes sense for fentry/fexit, normal calls and indirect calls.
+ */
+#define BPF_TRAMP_F_CALL_ORIG BIT(1)
+/* Skip current frame and return to parent. Makes sense for fentry/fexit
+ * programs only. Should not be used with normal calls and indirect calls.
+ */
+#define BPF_TRAMP_F_SKIP_FRAME BIT(2)
+/* Store IP address of the caller on the trampoline stack,
+ * so it's available for trampoline's programs.
+ */
+#define BPF_TRAMP_F_IP_ARG BIT(3)
+/* Return the return value of fentry prog. Only used by bpf_struct_ops. */
+#define BPF_TRAMP_F_RET_FENTRY_RET BIT(4)
+
+/* Get original function from stack instead of from provided direct address.
+ * Makes sense for trampolines with fexit or fmod_ret programs.
+ */
+#define BPF_TRAMP_F_ORIG_STACK BIT(5)
+
+/* This trampoline is on a function with another ftrace_ops with IPMODIFY,
+ * e.g., a live patch. This flag is set and cleared by ftrace call backs,
+ */
+#define BPF_TRAMP_F_SHARE_IPMODIFY BIT(6)
+
+/* Indicate that current trampoline is in a tail call context. Then, it has to
+ * cache and restore tail_call_cnt to avoid infinite tail call loop.
+ */
+#define BPF_TRAMP_F_TAIL_CALL_CTX BIT(7)
+
+/*
+ * Indicate the trampoline should be suitable to receive indirect calls;
+ * without this indirectly calling the generated code can result in #UD/#CP,
+ * depending on the CFI options.
+ *
+ * Used by bpf_struct_ops.
+ *
+ * Incompatible with FENTRY usage, overloads @func_addr argument.
+ */
+#define BPF_TRAMP_F_INDIRECT BIT(8)
+
+/* Each call __bpf_prog_enter + call bpf_func + call __bpf_prog_exit is ~50
+ * bytes on x86.
+ */
+enum {
+#if defined(__s390x__)
+ BPF_MAX_TRAMP_LINKS = 27,
+#else
+ BPF_MAX_TRAMP_LINKS = 38,
+#endif
+};
+
+struct bpf_tramp_links {
+ struct bpf_tramp_link *links[BPF_MAX_TRAMP_LINKS];
+ int nr_links;
+};
+
+struct bpf_tramp_run_ctx;
+
+/* Different use cases for BPF trampoline:
+ * 1. replace nop at the function entry (kprobe equivalent)
+ * flags = BPF_TRAMP_F_RESTORE_REGS
+ * fentry = a set of programs to run before returning from trampoline
+ *
+ * 2. replace nop at the function entry (kprobe + kretprobe equivalent)
+ * flags = BPF_TRAMP_F_CALL_ORIG | BPF_TRAMP_F_SKIP_FRAME
+ * orig_call = fentry_ip + MCOUNT_INSN_SIZE
+ * fentry = a set of program to run before calling original function
+ * fexit = a set of program to run after original function
+ *
+ * 3. replace direct call instruction anywhere in the function body
+ * or assign a function pointer for indirect call (like tcp_congestion_ops->cong_avoid)
+ * With flags = 0
+ * fentry = a set of programs to run before returning from trampoline
+ * With flags = BPF_TRAMP_F_CALL_ORIG
+ * orig_call = original callback addr or direct function addr
+ * fentry = a set of program to run before calling original function
+ * fexit = a set of program to run after original function
+ */
+struct bpf_tramp_image;
+int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *image_end,
+ const struct btf_func_model *m, u32 flags,
+ struct bpf_tramp_links *tlinks,
+ void *func_addr);
+void *arch_alloc_bpf_trampoline(unsigned int size);
+void arch_free_bpf_trampoline(void *image, unsigned int size);
+int __must_check arch_protect_bpf_trampoline(void *image, unsigned int size);
+int arch_bpf_trampoline_size(const struct btf_func_model *m, u32 flags,
+ struct bpf_tramp_links *tlinks, void *func_addr);
+
+u64 notrace __bpf_prog_enter_sleepable_recur(struct bpf_prog *prog,
+ struct bpf_tramp_run_ctx *run_ctx);
+void notrace __bpf_prog_exit_sleepable_recur(struct bpf_prog *prog, u64 start,
+ struct bpf_tramp_run_ctx *run_ctx);
+void notrace __bpf_tramp_enter(struct bpf_tramp_image *tr);
+void notrace __bpf_tramp_exit(struct bpf_tramp_image *tr);
+typedef u64 (*bpf_trampoline_enter_t)(struct bpf_prog *prog,
+ struct bpf_tramp_run_ctx *run_ctx);
+typedef void (*bpf_trampoline_exit_t)(struct bpf_prog *prog, u64 start,
+ struct bpf_tramp_run_ctx *run_ctx);
+bpf_trampoline_enter_t bpf_trampoline_enter(const struct bpf_prog *prog);
+bpf_trampoline_exit_t bpf_trampoline_exit(const struct bpf_prog *prog);
+
+#ifdef CONFIG_DYNAMIC_FTRACE_WITH_JMP
+static inline bool bpf_trampoline_use_jmp(u64 flags)
+{
+ return flags & BPF_TRAMP_F_CALL_ORIG && !(flags & BPF_TRAMP_F_SKIP_FRAME);
+}
+#else
+static inline bool bpf_trampoline_use_jmp(u64 flags)
+{
+ return false;
+}
+#endif
+
+struct bpf_ksym {
+ unsigned long start;
+ unsigned long end;
+ char name[KSYM_NAME_LEN];
+ struct list_head lnode;
+ struct latch_tree_node tnode;
+ bool prog;
+};
+
+enum bpf_tramp_prog_type {
+ BPF_TRAMP_FENTRY,
+ BPF_TRAMP_FEXIT,
+ BPF_TRAMP_MODIFY_RETURN,
+ BPF_TRAMP_MAX,
+ BPF_TRAMP_REPLACE, /* more than MAX */
+};
+
+struct bpf_tramp_image {
+ void *image;
+ int size;
+ struct bpf_ksym ksym;
+ struct percpu_ref pcref;
+ void *ip_after_call;
+ void *ip_epilogue;
+ union {
+ struct rcu_head rcu;
+ struct work_struct work;
+ };
+};
+
+struct bpf_trampoline {
+ /* hlist for trampoline_table */
+ struct hlist_node hlist;
+ struct ftrace_ops *fops;
+ /* serializes access to fields of this trampoline */
+ struct mutex mutex;
+ refcount_t refcnt;
+ u32 flags;
+ u64 key;
+ struct {
+ struct btf_func_model model;
+ void *addr;
+ bool ftrace_managed;
+ } func;
+ /* if !NULL this is BPF_PROG_TYPE_EXT program that extends another BPF
+ * program by replacing one of its functions. func.addr is the address
+ * of the function it replaced.
+ */
+ struct bpf_prog *extension_prog;
+ /* list of BPF programs using this trampoline */
+ struct hlist_head progs_hlist[BPF_TRAMP_MAX];
+ /* Number of attached programs. A counter per kind. */
+ int progs_cnt[BPF_TRAMP_MAX];
+ /* Executable image of trampoline */
+ struct bpf_tramp_image *cur_image;
+};
+
+struct bpf_attach_target_info {
+ struct btf_func_model fmodel;
+ long tgt_addr;
+ struct module *tgt_mod;
+ const char *tgt_name;
+ const struct btf_type *tgt_type;
+};
+
+#define BPF_DISPATCHER_MAX 48 /* Fits in 2048B */
+
+struct bpf_dispatcher_prog {
+ struct bpf_prog *prog;
+ refcount_t users;
+};
+
+struct bpf_dispatcher {
+ /* dispatcher mutex */
+ struct mutex mutex;
+ void *func;
+ struct bpf_dispatcher_prog progs[BPF_DISPATCHER_MAX];
+ int num_progs;
+ void *image;
+ void *rw_image;
+ u32 image_off;
+ struct bpf_ksym ksym;
+#ifdef CONFIG_HAVE_STATIC_CALL
+ struct static_call_key *sc_key;
+ void *sc_tramp;
+#endif
+};
+
+#ifndef __bpfcall
+#define __bpfcall __nocfi
+#endif
+
+static __always_inline __bpfcall unsigned int bpf_dispatcher_nop_func(
+ const void *ctx,
+ const struct bpf_insn *insnsi,
+ bpf_func_t bpf_func)
+{
+ return bpf_func(ctx, insnsi);
+}
+
+/* the implementation of the opaque uapi struct bpf_dynptr */
+struct bpf_dynptr_kern {
+ void *data;
+ /* Size represents the number of usable bytes of dynptr data.
+ * If for example the offset is at 4 for a local dynptr whose data is
+ * of type u64, the number of usable bytes is 4.
+ *
+ * The upper 8 bits are reserved. It is as follows:
+ * Bits 0 - 23 = size
+ * Bits 24 - 30 = dynptr type
+ * Bit 31 = whether dynptr is read-only
+ */
+ u32 size;
+ u32 offset;
+} __aligned(8);
+
+enum bpf_dynptr_type {
+ BPF_DYNPTR_TYPE_INVALID,
+ /* Points to memory that is local to the bpf program */
+ BPF_DYNPTR_TYPE_LOCAL,
+ /* Underlying data is a ringbuf record */
+ BPF_DYNPTR_TYPE_RINGBUF,
+ /* Underlying data is a sk_buff */
+ BPF_DYNPTR_TYPE_SKB,
+ /* Underlying data is a xdp_buff */
+ BPF_DYNPTR_TYPE_XDP,
+ /* Points to skb_metadata_end()-skb_metadata_len() */
+ BPF_DYNPTR_TYPE_SKB_META,
+ /* Underlying data is a file */
+ BPF_DYNPTR_TYPE_FILE,
+};
+
+int bpf_dynptr_check_size(u64 size);
+u64 __bpf_dynptr_size(const struct bpf_dynptr_kern *ptr);
+const void *__bpf_dynptr_data(const struct bpf_dynptr_kern *ptr, u64 len);
+void *__bpf_dynptr_data_rw(const struct bpf_dynptr_kern *ptr, u64 len);
+bool __bpf_dynptr_is_rdonly(const struct bpf_dynptr_kern *ptr);
+int __bpf_dynptr_write(const struct bpf_dynptr_kern *dst, u64 offset,
+ void *src, u64 len, u64 flags);
+void *bpf_dynptr_slice_rdwr(const struct bpf_dynptr *p, u64 offset,
+ void *buffer__opt, u64 buffer__szk);
+
+static inline int bpf_dynptr_check_off_len(const struct bpf_dynptr_kern *ptr, u64 offset, u64 len)
+{
+ u64 size = __bpf_dynptr_size(ptr);
+
+ if (len > size || offset > size - len)
+ return -E2BIG;
+
+ return 0;
+}
+
+#ifdef CONFIG_BPF_JIT
+int bpf_trampoline_link_prog(struct bpf_tramp_link *link,
+ struct bpf_trampoline *tr,
+ struct bpf_prog *tgt_prog);
+int bpf_trampoline_unlink_prog(struct bpf_tramp_link *link,
+ struct bpf_trampoline *tr,
+ struct bpf_prog *tgt_prog);
+struct bpf_trampoline *bpf_trampoline_get(u64 key,
+ struct bpf_attach_target_info *tgt_info);
+void bpf_trampoline_put(struct bpf_trampoline *tr);
+int arch_prepare_bpf_dispatcher(void *image, void *buf, s64 *funcs, int num_funcs);
+
+/*
+ * When the architecture supports STATIC_CALL replace the bpf_dispatcher_fn
+ * indirection with a direct call to the bpf program. If the architecture does
+ * not have STATIC_CALL, avoid a double-indirection.
+ */
+#ifdef CONFIG_HAVE_STATIC_CALL
+
+#define __BPF_DISPATCHER_SC_INIT(_name) \
+ .sc_key = &STATIC_CALL_KEY(_name), \
+ .sc_tramp = STATIC_CALL_TRAMP_ADDR(_name),
+
+#define __BPF_DISPATCHER_SC(name) \
+ DEFINE_STATIC_CALL(bpf_dispatcher_##name##_call, bpf_dispatcher_nop_func)
+
+#define __BPF_DISPATCHER_CALL(name) \
+ static_call(bpf_dispatcher_##name##_call)(ctx, insnsi, bpf_func)
+
+#define __BPF_DISPATCHER_UPDATE(_d, _new) \
+ __static_call_update((_d)->sc_key, (_d)->sc_tramp, (_new))
+
+#else
+#define __BPF_DISPATCHER_SC_INIT(name)
+#define __BPF_DISPATCHER_SC(name)
+#define __BPF_DISPATCHER_CALL(name) bpf_func(ctx, insnsi)
+#define __BPF_DISPATCHER_UPDATE(_d, _new)
+#endif
+
+#define BPF_DISPATCHER_INIT(_name) { \
+ .mutex = __MUTEX_INITIALIZER(_name.mutex), \
+ .func = &_name##_func, \
+ .progs = {}, \
+ .num_progs = 0, \
+ .image = NULL, \
+ .image_off = 0, \
+ .ksym = { \
+ .name = #_name, \
+ .lnode = LIST_HEAD_INIT(_name.ksym.lnode), \
+ }, \
+ __BPF_DISPATCHER_SC_INIT(_name##_call) \
+}
+
+#define DEFINE_BPF_DISPATCHER(name) \
+ __BPF_DISPATCHER_SC(name); \
+ noinline __bpfcall unsigned int bpf_dispatcher_##name##_func( \
+ const void *ctx, \
+ const struct bpf_insn *insnsi, \
+ bpf_func_t bpf_func) \
+ { \
+ return __BPF_DISPATCHER_CALL(name); \
+ } \
+ EXPORT_SYMBOL(bpf_dispatcher_##name##_func); \
+ struct bpf_dispatcher bpf_dispatcher_##name = \
+ BPF_DISPATCHER_INIT(bpf_dispatcher_##name);
+
+#define DECLARE_BPF_DISPATCHER(name) \
+ unsigned int bpf_dispatcher_##name##_func( \
+ const void *ctx, \
+ const struct bpf_insn *insnsi, \
+ bpf_func_t bpf_func); \
+ extern struct bpf_dispatcher bpf_dispatcher_##name;
+
+#define BPF_DISPATCHER_FUNC(name) bpf_dispatcher_##name##_func
+#define BPF_DISPATCHER_PTR(name) (&bpf_dispatcher_##name)
+void bpf_dispatcher_change_prog(struct bpf_dispatcher *d, struct bpf_prog *from,
+ struct bpf_prog *to);
+/* Called only from JIT-enabled code, so there's no need for stubs. */
+void bpf_image_ksym_init(void *data, unsigned int size, struct bpf_ksym *ksym);
+void bpf_image_ksym_add(struct bpf_ksym *ksym);
+void bpf_image_ksym_del(struct bpf_ksym *ksym);
+void bpf_ksym_add(struct bpf_ksym *ksym);
+void bpf_ksym_del(struct bpf_ksym *ksym);
+int bpf_jit_charge_modmem(u32 size);
+void bpf_jit_uncharge_modmem(u32 size);
+bool bpf_prog_has_trampoline(const struct bpf_prog *prog);
+#else
+static inline int bpf_trampoline_link_prog(struct bpf_tramp_link *link,
+ struct bpf_trampoline *tr,
+ struct bpf_prog *tgt_prog)
+{
+ return -ENOTSUPP;
+}
+static inline int bpf_trampoline_unlink_prog(struct bpf_tramp_link *link,
+ struct bpf_trampoline *tr,
+ struct bpf_prog *tgt_prog)
+{
+ return -ENOTSUPP;
+}
+static inline struct bpf_trampoline *bpf_trampoline_get(u64 key,
+ struct bpf_attach_target_info *tgt_info)
+{
+ return NULL;
+}
+static inline void bpf_trampoline_put(struct bpf_trampoline *tr) {}
+#define DEFINE_BPF_DISPATCHER(name)
+#define DECLARE_BPF_DISPATCHER(name)
+#define BPF_DISPATCHER_FUNC(name) bpf_dispatcher_nop_func
+#define BPF_DISPATCHER_PTR(name) NULL
+static inline void bpf_dispatcher_change_prog(struct bpf_dispatcher *d,
+ struct bpf_prog *from,
+ struct bpf_prog *to) {}
+static inline bool is_bpf_image_address(unsigned long address)
+{
+ return false;
+}
+static inline bool bpf_prog_has_trampoline(const struct bpf_prog *prog)
+{
+ return false;
+}
+#endif
+
+struct bpf_func_info_aux {
+ u16 linkage;
+ bool unreliable;
+ bool called : 1;
+ bool verified : 1;
+};
+
+enum bpf_jit_poke_reason {
+ BPF_POKE_REASON_TAIL_CALL,
+};
+
+/* Descriptor of pokes pointing /into/ the JITed image. */
+struct bpf_jit_poke_descriptor {
+ void *tailcall_target;
+ void *tailcall_bypass;
+ void *bypass_addr;
+ void *aux;
+ union {
+ struct {
+ struct bpf_map *map;
+ u32 key;
+ } tail_call;
+ };
+ bool tailcall_target_stable;
+ u8 adj_off;
+ u16 reason;
+ u32 insn_idx;
+};
+
+/* reg_type info for ctx arguments */
+struct bpf_ctx_arg_aux {
+ u32 offset;
+ enum bpf_reg_type reg_type;
+ struct btf *btf;
+ u32 btf_id;
+ u32 ref_obj_id;
+ bool refcounted;
+};
+
+struct btf_mod_pair {
+ struct btf *btf;
+ struct module *module;
+};
+
+struct bpf_kfunc_desc_tab;
+
+enum bpf_stream_id {
+ BPF_STDOUT = 1,
+ BPF_STDERR = 2,
+};
+
+struct bpf_stream_elem {
+ struct llist_node node;
+ int total_len;
+ int consumed_len;
+ char str[];
+};
+
+enum {
+ /* 100k bytes */
+ BPF_STREAM_MAX_CAPACITY = 100000ULL,
+};
+
+struct bpf_stream {
+ atomic_t capacity;
+ struct llist_head log; /* list of in-flight stream elements in LIFO order */
+
+ struct mutex lock; /* lock protecting backlog_{head,tail} */
+ struct llist_node *backlog_head; /* list of in-flight stream elements in FIFO order */
+ struct llist_node *backlog_tail; /* tail of the list above */
+};
+
+struct bpf_stream_stage {
+ struct llist_head log;
+ int len;
};
struct bpf_prog_aux {
- atomic_t refcnt;
+ atomic64_t refcnt;
u32 used_map_cnt;
+ u32 used_btf_cnt;
u32 max_ctx_offset;
+ u32 max_pkt_offset;
+ u32 max_tp_access;
u32 stack_depth;
u32 id;
- struct latch_tree_node ksym_tnode;
- struct list_head ksym_lnode;
- const struct bpf_verifier_ops *ops;
+ u32 func_cnt; /* used by non-func prog as the number of func progs */
+ u32 real_func_cnt; /* includes hidden progs, only used for JIT and freeing progs */
+ u32 func_idx; /* 0 for non-func prog, the index in func array for func prog */
+ u32 attach_btf_id; /* in-kernel BTF type id to attach to */
+ u32 attach_st_ops_member_off;
+ u32 ctx_arg_info_size;
+ u32 max_rdonly_access;
+ u32 max_rdwr_access;
+ u32 subprog_start;
+ struct btf *attach_btf;
+ struct bpf_ctx_arg_aux *ctx_arg_info;
+ void __percpu *priv_stack_ptr;
+ struct mutex dst_mutex; /* protects dst_* pointers below, *after* prog becomes visible */
+ struct bpf_prog *dst_prog;
+ struct bpf_trampoline *dst_trampoline;
+ enum bpf_prog_type saved_dst_prog_type;
+ enum bpf_attach_type saved_dst_attach_type;
+ bool verifier_zext; /* Zero extensions has been inserted by verifier. */
+ bool dev_bound; /* Program is bound to the netdev. */
+ bool offload_requested; /* Program is bound and offloaded to the netdev. */
+ bool attach_btf_trace; /* true if attaching to BTF-enabled raw tp */
+ bool attach_tracing_prog; /* true if tracing another tracing program */
+ bool func_proto_unreliable;
+ bool tail_call_reachable;
+ bool xdp_has_frags;
+ bool exception_cb;
+ bool exception_boundary;
+ bool is_extended; /* true if extended by freplace program */
+ bool jits_use_priv_stack;
+ bool priv_stack_requested;
+ bool changes_pkt_data;
+ bool might_sleep;
+ bool kprobe_write_ctx;
+ u64 prog_array_member_cnt; /* counts how many times as member of prog_array */
+ struct mutex ext_mutex; /* mutex for is_extended and prog_array_member_cnt */
+ struct bpf_arena *arena;
+ void (*recursion_detected)(struct bpf_prog *prog); /* callback if recursion is detected */
+ /* BTF_KIND_FUNC_PROTO for valid attach_btf_id */
+ const struct btf_type *attach_func_proto;
+ /* function name for valid attach_btf_id */
+ const char *attach_func_name;
+ struct bpf_prog **func;
+ struct bpf_prog_aux *main_prog_aux;
+ void *jit_data; /* JIT specific data. arch dependent */
+ struct bpf_jit_poke_descriptor *poke_tab;
+ struct bpf_kfunc_desc_tab *kfunc_tab;
+ struct bpf_kfunc_btf_tab *kfunc_btf_tab;
+ u32 size_poke_tab;
+#ifdef CONFIG_FINEIBT
+ struct bpf_ksym ksym_prefix;
+#endif
+ struct bpf_ksym ksym;
+ const struct bpf_prog_ops *ops;
+ const struct bpf_struct_ops *st_ops;
struct bpf_map **used_maps;
+ struct mutex used_maps_mutex; /* mutex for used_maps and used_map_cnt */
+ struct btf_mod_pair *used_btfs;
struct bpf_prog *prog;
struct user_struct *user;
+ u64 load_time; /* ns since boottime */
+ u32 verified_insns;
+ int cgroup_atype; /* enum cgroup_bpf_attach_type */
+ struct bpf_map *cgroup_storage[MAX_BPF_CGROUP_STORAGE_TYPE];
+ char name[BPF_OBJ_NAME_LEN];
+ u64 (*bpf_exception_cb)(u64 cookie, u64 sp, u64 bp, u64, u64);
+#ifdef CONFIG_SECURITY
+ void *security;
+#endif
+ struct bpf_token *token;
+ struct bpf_prog_offload *offload;
+ struct btf *btf;
+ struct bpf_func_info *func_info;
+ struct bpf_func_info_aux *func_info_aux;
+ /* bpf_line_info loaded from userspace. linfo->insn_off
+ * has the xlated insn offset.
+ * Both the main and sub prog share the same linfo.
+ * The subprog can access its first linfo by
+ * using the linfo_idx.
+ */
+ struct bpf_line_info *linfo;
+ /* jited_linfo is the jited addr of the linfo. It has a
+ * one to one mapping to linfo:
+ * jited_linfo[i] is the jited addr for the linfo[i]->insn_off.
+ * Both the main and sub prog share the same jited_linfo.
+ * The subprog can access its first jited_linfo by
+ * using the linfo_idx.
+ */
+ void **jited_linfo;
+ u32 func_info_cnt;
+ u32 nr_linfo;
+ /* subprog can use linfo_idx to access its first linfo and
+ * jited_linfo.
+ * main prog always has linfo_idx == 0
+ */
+ u32 linfo_idx;
+ struct module *mod;
+ u32 num_exentries;
+ struct exception_table_entry *extable;
union {
struct work_struct work;
struct rcu_head rcu;
};
+ struct bpf_stream stream[2];
+};
+
+struct bpf_prog {
+ u16 pages; /* Number of allocated pages */
+ u16 jited:1, /* Is our filter JIT'ed? */
+ jit_requested:1,/* archs need to JIT the prog */
+ gpl_compatible:1, /* Is filter GPL compatible? */
+ cb_access:1, /* Is control block accessed? */
+ dst_needed:1, /* Do we need dst entry? */
+ blinding_requested:1, /* needs constant blinding */
+ blinded:1, /* Was blinded */
+ is_func:1, /* program is a bpf function */
+ kprobe_override:1, /* Do we override a kprobe? */
+ has_callchain_buf:1, /* callchain buffer allocated? */
+ enforce_expected_attach_type:1, /* Enforce expected_attach_type checking at attach time */
+ call_get_stack:1, /* Do we call bpf_get_stack() or bpf_get_stackid() */
+ call_get_func_ip:1, /* Do we call get_func_ip() */
+ tstamp_type_access:1, /* Accessed __sk_buff->tstamp_type */
+ sleepable:1; /* BPF program is sleepable */
+ enum bpf_prog_type type; /* Type of BPF program */
+ enum bpf_attach_type expected_attach_type; /* For some prog types */
+ u32 len; /* Number of filter blocks */
+ u32 jited_len; /* Size of jited insns in bytes */
+ union {
+ u8 digest[SHA256_DIGEST_SIZE];
+ u8 tag[BPF_TAG_SIZE];
+ };
+ struct bpf_prog_stats __percpu *stats;
+ int __percpu *active;
+ unsigned int (*bpf_func)(const void *ctx,
+ const struct bpf_insn *insn);
+ struct bpf_prog_aux *aux; /* Auxiliary fields */
+ struct sock_fprog_kern *orig_prog; /* Original BPF program */
+ /* Instructions for interpreter */
+ union {
+ DECLARE_FLEX_ARRAY(struct sock_filter, insns);
+ DECLARE_FLEX_ARRAY(struct bpf_insn, insnsi);
+ };
+};
+
+struct bpf_array_aux {
+ /* Programs with direct jumps into programs part of this array. */
+ struct list_head poke_progs;
+ struct bpf_map *map;
+ struct mutex poke_mutex;
+ struct work_struct work;
+};
+
+struct bpf_link {
+ atomic64_t refcnt;
+ u32 id;
+ enum bpf_link_type type;
+ const struct bpf_link_ops *ops;
+ struct bpf_prog *prog;
+
+ u32 flags;
+ enum bpf_attach_type attach_type;
+
+ /* rcu is used before freeing, work can be used to schedule that
+ * RCU-based freeing before that, so they never overlap
+ */
+ union {
+ struct rcu_head rcu;
+ struct work_struct work;
+ };
+ /* whether BPF link itself has "sleepable" semantics, which can differ
+ * from underlying BPF program having a "sleepable" semantics, as BPF
+ * link's semantics is determined by target attach hook
+ */
+ bool sleepable;
+};
+
+struct bpf_link_ops {
+ void (*release)(struct bpf_link *link);
+ /* deallocate link resources callback, called without RCU grace period
+ * waiting
+ */
+ void (*dealloc)(struct bpf_link *link);
+ /* deallocate link resources callback, called after RCU grace period;
+ * if either the underlying BPF program is sleepable or BPF link's
+ * target hook is sleepable, we'll go through tasks trace RCU GP and
+ * then "classic" RCU GP; this need for chaining tasks trace and
+ * classic RCU GPs is designated by setting bpf_link->sleepable flag
+ */
+ void (*dealloc_deferred)(struct bpf_link *link);
+ int (*detach)(struct bpf_link *link);
+ int (*update_prog)(struct bpf_link *link, struct bpf_prog *new_prog,
+ struct bpf_prog *old_prog);
+ void (*show_fdinfo)(const struct bpf_link *link, struct seq_file *seq);
+ int (*fill_link_info)(const struct bpf_link *link,
+ struct bpf_link_info *info);
+ int (*update_map)(struct bpf_link *link, struct bpf_map *new_map,
+ struct bpf_map *old_map);
+ __poll_t (*poll)(struct file *file, struct poll_table_struct *pts);
+};
+
+struct bpf_tramp_link {
+ struct bpf_link link;
+ struct hlist_node tramp_hlist;
+ u64 cookie;
};
+struct bpf_shim_tramp_link {
+ struct bpf_tramp_link link;
+ struct bpf_trampoline *trampoline;
+};
+
+struct bpf_tracing_link {
+ struct bpf_tramp_link link;
+ struct bpf_trampoline *trampoline;
+ struct bpf_prog *tgt_prog;
+};
+
+struct bpf_raw_tp_link {
+ struct bpf_link link;
+ struct bpf_raw_event_map *btp;
+ u64 cookie;
+};
+
+struct bpf_link_primer {
+ struct bpf_link *link;
+ struct file *file;
+ int fd;
+ u32 id;
+};
+
+struct bpf_mount_opts {
+ kuid_t uid;
+ kgid_t gid;
+ umode_t mode;
+
+ /* BPF token-related delegation options */
+ u64 delegate_cmds;
+ u64 delegate_maps;
+ u64 delegate_progs;
+ u64 delegate_attachs;
+};
+
+struct bpf_token {
+ struct work_struct work;
+ atomic64_t refcnt;
+ struct user_namespace *userns;
+ u64 allowed_cmds;
+ u64 allowed_maps;
+ u64 allowed_progs;
+ u64 allowed_attachs;
+#ifdef CONFIG_SECURITY
+ void *security;
+#endif
+};
+
+struct bpf_struct_ops_value;
+struct btf_member;
+
+#define BPF_STRUCT_OPS_MAX_NR_MEMBERS 64
+/**
+ * struct bpf_struct_ops - A structure of callbacks allowing a subsystem to
+ * define a BPF_MAP_TYPE_STRUCT_OPS map type composed
+ * of BPF_PROG_TYPE_STRUCT_OPS progs.
+ * @verifier_ops: A structure of callbacks that are invoked by the verifier
+ * when determining whether the struct_ops progs in the
+ * struct_ops map are valid.
+ * @init: A callback that is invoked a single time, and before any other
+ * callback, to initialize the structure. A nonzero return value means
+ * the subsystem could not be initialized.
+ * @check_member: When defined, a callback invoked by the verifier to allow
+ * the subsystem to determine if an entry in the struct_ops map
+ * is valid. A nonzero return value means that the map is
+ * invalid and should be rejected by the verifier.
+ * @init_member: A callback that is invoked for each member of the struct_ops
+ * map to allow the subsystem to initialize the member. A nonzero
+ * value means the member could not be initialized. This callback
+ * is exclusive with the @type, @type_id, @value_type, and
+ * @value_id fields.
+ * @reg: A callback that is invoked when the struct_ops map has been
+ * initialized and is being attached to. Zero means the struct_ops map
+ * has been successfully registered and is live. A nonzero return value
+ * means the struct_ops map could not be registered.
+ * @unreg: A callback that is invoked when the struct_ops map should be
+ * unregistered.
+ * @update: A callback that is invoked when the live struct_ops map is being
+ * updated to contain new values. This callback is only invoked when
+ * the struct_ops map is loaded with BPF_F_LINK. If not defined, the
+ * it is assumed that the struct_ops map cannot be updated.
+ * @validate: A callback that is invoked after all of the members have been
+ * initialized. This callback should perform static checks on the
+ * map, meaning that it should either fail or succeed
+ * deterministically. A struct_ops map that has been validated may
+ * not necessarily succeed in being registered if the call to @reg
+ * fails. For example, a valid struct_ops map may be loaded, but
+ * then fail to be registered due to there being another active
+ * struct_ops map on the system in the subsystem already. For this
+ * reason, if this callback is not defined, the check is skipped as
+ * the struct_ops map will have final verification performed in
+ * @reg.
+ * @cfi_stubs: Pointer to a structure of stub functions for CFI. These stubs
+ * provide the correct Control Flow Integrity hashes for the
+ * trampolines generated by BPF struct_ops.
+ * @owner: The module that owns this struct_ops. Used for module reference
+ * counting to ensure the module providing the struct_ops cannot be
+ * unloaded while in use.
+ * @name: The name of the struct bpf_struct_ops object.
+ * @func_models: Func models
+ */
+struct bpf_struct_ops {
+ const struct bpf_verifier_ops *verifier_ops;
+ int (*init)(struct btf *btf);
+ int (*check_member)(const struct btf_type *t,
+ const struct btf_member *member,
+ const struct bpf_prog *prog);
+ int (*init_member)(const struct btf_type *t,
+ const struct btf_member *member,
+ void *kdata, const void *udata);
+ int (*reg)(void *kdata, struct bpf_link *link);
+ void (*unreg)(void *kdata, struct bpf_link *link);
+ int (*update)(void *kdata, void *old_kdata, struct bpf_link *link);
+ int (*validate)(void *kdata);
+ void *cfi_stubs;
+ struct module *owner;
+ const char *name;
+ struct btf_func_model func_models[BPF_STRUCT_OPS_MAX_NR_MEMBERS];
+};
+
+/* Every member of a struct_ops type has an instance even a member is not
+ * an operator (function pointer). The "info" field will be assigned to
+ * prog->aux->ctx_arg_info of BPF struct_ops programs to provide the
+ * argument information required by the verifier to verify the program.
+ *
+ * btf_ctx_access() will lookup prog->aux->ctx_arg_info to find the
+ * corresponding entry for an given argument.
+ */
+struct bpf_struct_ops_arg_info {
+ struct bpf_ctx_arg_aux *info;
+ u32 cnt;
+};
+
+struct bpf_struct_ops_desc {
+ struct bpf_struct_ops *st_ops;
+
+ const struct btf_type *type;
+ const struct btf_type *value_type;
+ u32 type_id;
+ u32 value_id;
+
+ /* Collection of argument information for each member */
+ struct bpf_struct_ops_arg_info *arg_info;
+};
+
+enum bpf_struct_ops_state {
+ BPF_STRUCT_OPS_STATE_INIT,
+ BPF_STRUCT_OPS_STATE_INUSE,
+ BPF_STRUCT_OPS_STATE_TOBEFREE,
+ BPF_STRUCT_OPS_STATE_READY,
+};
+
+struct bpf_struct_ops_common_value {
+ refcount_t refcnt;
+ enum bpf_struct_ops_state state;
+};
+
+#if defined(CONFIG_BPF_JIT) && defined(CONFIG_BPF_SYSCALL)
+/* This macro helps developer to register a struct_ops type and generate
+ * type information correctly. Developers should use this macro to register
+ * a struct_ops type instead of calling __register_bpf_struct_ops() directly.
+ */
+#define register_bpf_struct_ops(st_ops, type) \
+ ({ \
+ struct bpf_struct_ops_##type { \
+ struct bpf_struct_ops_common_value common; \
+ struct type data ____cacheline_aligned_in_smp; \
+ }; \
+ BTF_TYPE_EMIT(struct bpf_struct_ops_##type); \
+ __register_bpf_struct_ops(st_ops); \
+ })
+#define BPF_MODULE_OWNER ((void *)((0xeB9FUL << 2) + POISON_POINTER_DELTA))
+bool bpf_struct_ops_get(const void *kdata);
+void bpf_struct_ops_put(const void *kdata);
+int bpf_struct_ops_supported(const struct bpf_struct_ops *st_ops, u32 moff);
+int bpf_struct_ops_map_sys_lookup_elem(struct bpf_map *map, void *key,
+ void *value);
+int bpf_struct_ops_prepare_trampoline(struct bpf_tramp_links *tlinks,
+ struct bpf_tramp_link *link,
+ const struct btf_func_model *model,
+ void *stub_func,
+ void **image, u32 *image_off,
+ bool allow_alloc);
+void bpf_struct_ops_image_free(void *image);
+static inline bool bpf_try_module_get(const void *data, struct module *owner)
+{
+ if (owner == BPF_MODULE_OWNER)
+ return bpf_struct_ops_get(data);
+ else
+ return try_module_get(owner);
+}
+static inline void bpf_module_put(const void *data, struct module *owner)
+{
+ if (owner == BPF_MODULE_OWNER)
+ bpf_struct_ops_put(data);
+ else
+ module_put(owner);
+}
+int bpf_struct_ops_link_create(union bpf_attr *attr);
+u32 bpf_struct_ops_id(const void *kdata);
+
+#ifdef CONFIG_NET
+/* Define it here to avoid the use of forward declaration */
+struct bpf_dummy_ops_state {
+ int val;
+};
+
+struct bpf_dummy_ops {
+ int (*test_1)(struct bpf_dummy_ops_state *cb);
+ int (*test_2)(struct bpf_dummy_ops_state *cb, int a1, unsigned short a2,
+ char a3, unsigned long a4);
+ int (*test_sleepable)(struct bpf_dummy_ops_state *cb);
+};
+
+int bpf_struct_ops_test_run(struct bpf_prog *prog, const union bpf_attr *kattr,
+ union bpf_attr __user *uattr);
+#endif
+int bpf_struct_ops_desc_init(struct bpf_struct_ops_desc *st_ops_desc,
+ struct btf *btf,
+ struct bpf_verifier_log *log);
+void bpf_map_struct_ops_info_fill(struct bpf_map_info *info, struct bpf_map *map);
+void bpf_struct_ops_desc_release(struct bpf_struct_ops_desc *st_ops_desc);
+#else
+#define register_bpf_struct_ops(st_ops, type) ({ (void *)(st_ops); 0; })
+static inline bool bpf_try_module_get(const void *data, struct module *owner)
+{
+ return try_module_get(owner);
+}
+static inline void bpf_module_put(const void *data, struct module *owner)
+{
+ module_put(owner);
+}
+static inline int bpf_struct_ops_supported(const struct bpf_struct_ops *st_ops, u32 moff)
+{
+ return -ENOTSUPP;
+}
+static inline int bpf_struct_ops_map_sys_lookup_elem(struct bpf_map *map,
+ void *key,
+ void *value)
+{
+ return -EINVAL;
+}
+static inline int bpf_struct_ops_link_create(union bpf_attr *attr)
+{
+ return -EOPNOTSUPP;
+}
+static inline void bpf_map_struct_ops_info_fill(struct bpf_map_info *info, struct bpf_map *map)
+{
+}
+
+static inline void bpf_struct_ops_desc_release(struct bpf_struct_ops_desc *st_ops_desc)
+{
+}
+
+#endif
+
+int bpf_prog_ctx_arg_info_init(struct bpf_prog *prog,
+ const struct bpf_ctx_arg_aux *info, u32 cnt);
+
+#if defined(CONFIG_CGROUP_BPF) && defined(CONFIG_BPF_LSM)
+int bpf_trampoline_link_cgroup_shim(struct bpf_prog *prog,
+ int cgroup_atype,
+ enum bpf_attach_type attach_type);
+void bpf_trampoline_unlink_cgroup_shim(struct bpf_prog *prog);
+#else
+static inline int bpf_trampoline_link_cgroup_shim(struct bpf_prog *prog,
+ int cgroup_atype,
+ enum bpf_attach_type attach_type)
+{
+ return -EOPNOTSUPP;
+}
+static inline void bpf_trampoline_unlink_cgroup_shim(struct bpf_prog *prog)
+{
+}
+#endif
+
struct bpf_array {
struct bpf_map map;
u32 elem_size;
- /* 'ownership' of prog_array is claimed by the first program that
- * is going to use this map or by the first program which FD is stored
- * in the map to make sure that all callers and callees have the same
- * prog_type and JITed flag
- */
- enum bpf_prog_type owner_prog_type;
- bool owner_jited;
+ u32 index_mask;
+ struct bpf_array_aux *aux;
union {
- char value[0] __aligned(8);
- void *ptrs[0] __aligned(8);
- void __percpu *pptrs[0] __aligned(8);
+ DECLARE_FLEX_ARRAY(char, value) __aligned(8);
+ DECLARE_FLEX_ARRAY(void *, ptrs) __aligned(8);
+ DECLARE_FLEX_ARRAY(void __percpu *, pptrs) __aligned(8);
};
};
-#define MAX_TAIL_CALL_CNT 32
+/*
+ * The bpf_array_get_next_key() function may be used for all array-like
+ * maps, i.e., maps with u32 keys with range [0 ,..., max_entries)
+ */
+int bpf_array_get_next_key(struct bpf_map *map, void *key, void *next_key);
+
+#define BPF_COMPLEXITY_LIMIT_INSNS 1000000 /* yes. 1M insns */
+#define MAX_TAIL_CALL_CNT 33
+
+/* Maximum number of loops for bpf_loop and bpf_iter_num.
+ * It's enum to expose it (and thus make it discoverable) through BTF.
+ */
+enum {
+ BPF_MAX_LOOPS = 8 * 1024 * 1024,
+ BPF_MAX_TIMED_LOOPS = 0xffff,
+};
+
+#define BPF_F_ACCESS_MASK (BPF_F_RDONLY | \
+ BPF_F_RDONLY_PROG | \
+ BPF_F_WRONLY | \
+ BPF_F_WRONLY_PROG)
+
+#define BPF_MAP_CAN_READ BIT(0)
+#define BPF_MAP_CAN_WRITE BIT(1)
+
+/* Maximum number of user-producer ring buffer samples that can be drained in
+ * a call to bpf_user_ringbuf_drain().
+ */
+#define BPF_MAX_USER_RINGBUF_SAMPLES (128 * 1024)
+
+static inline u32 bpf_map_flags_to_cap(struct bpf_map *map)
+{
+ u32 access_flags = map->map_flags & (BPF_F_RDONLY_PROG | BPF_F_WRONLY_PROG);
+
+ /* Combination of BPF_F_RDONLY_PROG | BPF_F_WRONLY_PROG is
+ * not possible.
+ */
+ if (access_flags & BPF_F_RDONLY_PROG)
+ return BPF_MAP_CAN_READ;
+ else if (access_flags & BPF_F_WRONLY_PROG)
+ return BPF_MAP_CAN_WRITE;
+ else
+ return BPF_MAP_CAN_READ | BPF_MAP_CAN_WRITE;
+}
+
+static inline bool bpf_map_flags_access_ok(u32 access_flags)
+{
+ return (access_flags & (BPF_F_RDONLY_PROG | BPF_F_WRONLY_PROG)) !=
+ (BPF_F_RDONLY_PROG | BPF_F_WRONLY_PROG);
+}
+
+static inline struct bpf_map_owner *bpf_map_owner_alloc(struct bpf_map *map)
+{
+ return kzalloc(sizeof(*map->owner), GFP_ATOMIC);
+}
+
+static inline void bpf_map_owner_free(struct bpf_map *map)
+{
+ kfree(map->owner);
+}
struct bpf_event_entry {
struct perf_event *event;
@@ -227,61 +2198,568 @@ struct bpf_event_entry {
struct rcu_head rcu;
};
-u64 bpf_tail_call(u64 ctx, u64 r2, u64 index, u64 r4, u64 r5);
-u64 bpf_get_stackid(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
+static inline bool map_type_contains_progs(struct bpf_map *map)
+{
+ return map->map_type == BPF_MAP_TYPE_PROG_ARRAY ||
+ map->map_type == BPF_MAP_TYPE_DEVMAP ||
+ map->map_type == BPF_MAP_TYPE_CPUMAP;
+}
-bool bpf_prog_array_compatible(struct bpf_array *array, const struct bpf_prog *fp);
+bool bpf_prog_map_compatible(struct bpf_map *map, const struct bpf_prog *fp);
int bpf_prog_calc_tag(struct bpf_prog *fp);
const struct bpf_func_proto *bpf_get_trace_printk_proto(void);
+const struct bpf_func_proto *bpf_get_trace_vprintk_proto(void);
+
+const struct bpf_func_proto *bpf_get_perf_event_read_value_proto(void);
typedef unsigned long (*bpf_ctx_copy_t)(void *dst, const void *src,
unsigned long off, unsigned long len);
+typedef u32 (*bpf_convert_ctx_access_t)(enum bpf_access_type type,
+ const struct bpf_insn *src,
+ struct bpf_insn *dst,
+ struct bpf_prog *prog,
+ u32 *target_size);
u64 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size,
void *ctx, u64 ctx_size, bpf_ctx_copy_t ctx_copy);
-int bpf_prog_test_run_xdp(struct bpf_prog *prog, const union bpf_attr *kattr,
- union bpf_attr __user *uattr);
-int bpf_prog_test_run_skb(struct bpf_prog *prog, const union bpf_attr *kattr,
- union bpf_attr __user *uattr);
+/* an array of programs to be executed under rcu_lock.
+ *
+ * Typical usage:
+ * ret = bpf_prog_run_array(rcu_dereference(&bpf_prog_array), ctx, bpf_prog_run);
+ *
+ * the structure returned by bpf_prog_array_alloc() should be populated
+ * with program pointers and the last pointer must be NULL.
+ * The user has to keep refcnt on the program and make sure the program
+ * is removed from the array before bpf_prog_put().
+ * The 'struct bpf_prog_array *' should only be replaced with xchg()
+ * since other cpus are walking the array of pointers in parallel.
+ */
+struct bpf_prog_array_item {
+ struct bpf_prog *prog;
+ union {
+ struct bpf_cgroup_storage *cgroup_storage[MAX_BPF_CGROUP_STORAGE_TYPE];
+ u64 bpf_cookie;
+ };
+};
+
+struct bpf_prog_array {
+ struct rcu_head rcu;
+ struct bpf_prog_array_item items[];
+};
+
+struct bpf_empty_prog_array {
+ struct bpf_prog_array hdr;
+ struct bpf_prog *null_prog;
+};
+
+/* to avoid allocating empty bpf_prog_array for cgroups that
+ * don't have bpf program attached use one global 'bpf_empty_prog_array'
+ * It will not be modified the caller of bpf_prog_array_alloc()
+ * (since caller requested prog_cnt == 0)
+ * that pointer should be 'freed' by bpf_prog_array_free()
+ */
+extern struct bpf_empty_prog_array bpf_empty_prog_array;
+
+struct bpf_prog_array *bpf_prog_array_alloc(u32 prog_cnt, gfp_t flags);
+void bpf_prog_array_free(struct bpf_prog_array *progs);
+/* Use when traversal over the bpf_prog_array uses tasks_trace rcu */
+void bpf_prog_array_free_sleepable(struct bpf_prog_array *progs);
+int bpf_prog_array_length(struct bpf_prog_array *progs);
+bool bpf_prog_array_is_empty(struct bpf_prog_array *array);
+int bpf_prog_array_copy_to_user(struct bpf_prog_array *progs,
+ __u32 __user *prog_ids, u32 cnt);
+
+void bpf_prog_array_delete_safe(struct bpf_prog_array *progs,
+ struct bpf_prog *old_prog);
+int bpf_prog_array_delete_safe_at(struct bpf_prog_array *array, int index);
+int bpf_prog_array_update_at(struct bpf_prog_array *array, int index,
+ struct bpf_prog *prog);
+int bpf_prog_array_copy_info(struct bpf_prog_array *array,
+ u32 *prog_ids, u32 request_cnt,
+ u32 *prog_cnt);
+int bpf_prog_array_copy(struct bpf_prog_array *old_array,
+ struct bpf_prog *exclude_prog,
+ struct bpf_prog *include_prog,
+ u64 bpf_cookie,
+ struct bpf_prog_array **new_array);
+
+struct bpf_run_ctx {};
+
+struct bpf_cg_run_ctx {
+ struct bpf_run_ctx run_ctx;
+ const struct bpf_prog_array_item *prog_item;
+ int retval;
+};
+
+struct bpf_trace_run_ctx {
+ struct bpf_run_ctx run_ctx;
+ u64 bpf_cookie;
+ bool is_uprobe;
+};
+
+struct bpf_tramp_run_ctx {
+ struct bpf_run_ctx run_ctx;
+ u64 bpf_cookie;
+ struct bpf_run_ctx *saved_run_ctx;
+};
+
+static inline struct bpf_run_ctx *bpf_set_run_ctx(struct bpf_run_ctx *new_ctx)
+{
+ struct bpf_run_ctx *old_ctx = NULL;
+
+#ifdef CONFIG_BPF_SYSCALL
+ old_ctx = current->bpf_ctx;
+ current->bpf_ctx = new_ctx;
+#endif
+ return old_ctx;
+}
+
+static inline void bpf_reset_run_ctx(struct bpf_run_ctx *old_ctx)
+{
+#ifdef CONFIG_BPF_SYSCALL
+ current->bpf_ctx = old_ctx;
+#endif
+}
+
+/* BPF program asks to bypass CAP_NET_BIND_SERVICE in bind. */
+#define BPF_RET_BIND_NO_CAP_NET_BIND_SERVICE (1 << 0)
+/* BPF program asks to set CN on the packet. */
+#define BPF_RET_SET_CN (1 << 0)
+
+typedef u32 (*bpf_prog_run_fn)(const struct bpf_prog *prog, const void *ctx);
+
+static __always_inline u32
+bpf_prog_run_array(const struct bpf_prog_array *array,
+ const void *ctx, bpf_prog_run_fn run_prog)
+{
+ const struct bpf_prog_array_item *item;
+ const struct bpf_prog *prog;
+ struct bpf_run_ctx *old_run_ctx;
+ struct bpf_trace_run_ctx run_ctx;
+ u32 ret = 1;
+
+ RCU_LOCKDEP_WARN(!rcu_read_lock_held(), "no rcu lock held");
+
+ if (unlikely(!array))
+ return ret;
+
+ run_ctx.is_uprobe = false;
+
+ migrate_disable();
+ old_run_ctx = bpf_set_run_ctx(&run_ctx.run_ctx);
+ item = &array->items[0];
+ while ((prog = READ_ONCE(item->prog))) {
+ run_ctx.bpf_cookie = item->bpf_cookie;
+ ret &= run_prog(prog, ctx);
+ item++;
+ }
+ bpf_reset_run_ctx(old_run_ctx);
+ migrate_enable();
+ return ret;
+}
+
+/* Notes on RCU design for bpf_prog_arrays containing sleepable programs:
+ *
+ * We use the tasks_trace rcu flavor read section to protect the bpf_prog_array
+ * overall. As a result, we must use the bpf_prog_array_free_sleepable
+ * in order to use the tasks_trace rcu grace period.
+ *
+ * When a non-sleepable program is inside the array, we take the rcu read
+ * section and disable preemption for that program alone, so it can access
+ * rcu-protected dynamically sized maps.
+ */
+static __always_inline u32
+bpf_prog_run_array_uprobe(const struct bpf_prog_array *array,
+ const void *ctx, bpf_prog_run_fn run_prog)
+{
+ const struct bpf_prog_array_item *item;
+ const struct bpf_prog *prog;
+ struct bpf_run_ctx *old_run_ctx;
+ struct bpf_trace_run_ctx run_ctx;
+ u32 ret = 1;
+
+ might_fault();
+ RCU_LOCKDEP_WARN(!rcu_read_lock_trace_held(), "no rcu lock held");
+
+ if (unlikely(!array))
+ return ret;
+
+ migrate_disable();
+
+ run_ctx.is_uprobe = true;
+
+ old_run_ctx = bpf_set_run_ctx(&run_ctx.run_ctx);
+ item = &array->items[0];
+ while ((prog = READ_ONCE(item->prog))) {
+ if (!prog->sleepable)
+ rcu_read_lock();
+
+ run_ctx.bpf_cookie = item->bpf_cookie;
+ ret &= run_prog(prog, ctx);
+ item++;
+
+ if (!prog->sleepable)
+ rcu_read_unlock();
+ }
+ bpf_reset_run_ctx(old_run_ctx);
+ migrate_enable();
+ return ret;
+}
+
+bool bpf_jit_bypass_spec_v1(void);
+bool bpf_jit_bypass_spec_v4(void);
+
+#define bpf_rcu_lock_held() \
+ (rcu_read_lock_held() || rcu_read_lock_trace_held() || rcu_read_lock_bh_held())
#ifdef CONFIG_BPF_SYSCALL
DECLARE_PER_CPU(int, bpf_prog_active);
+extern struct mutex bpf_stats_enabled_mutex;
-#define BPF_PROG_TYPE(_id, _ops) \
- extern const struct bpf_verifier_ops _ops;
+/*
+ * Block execution of BPF programs attached to instrumentation (perf,
+ * kprobes, tracepoints) to prevent deadlocks on map operations as any of
+ * these events can happen inside a region which holds a map bucket lock
+ * and can deadlock on it.
+ */
+static inline void bpf_disable_instrumentation(void)
+{
+ migrate_disable();
+ this_cpu_inc(bpf_prog_active);
+}
+
+static inline void bpf_enable_instrumentation(void)
+{
+ this_cpu_dec(bpf_prog_active);
+ migrate_enable();
+}
+
+extern const struct super_operations bpf_super_ops;
+extern const struct file_operations bpf_map_fops;
+extern const struct file_operations bpf_prog_fops;
+extern const struct file_operations bpf_iter_fops;
+extern const struct file_operations bpf_token_fops;
+
+#define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type) \
+ extern const struct bpf_prog_ops _name ## _prog_ops; \
+ extern const struct bpf_verifier_ops _name ## _verifier_ops;
#define BPF_MAP_TYPE(_id, _ops) \
extern const struct bpf_map_ops _ops;
+#define BPF_LINK_TYPE(_id, _name)
#include <linux/bpf_types.h>
#undef BPF_PROG_TYPE
#undef BPF_MAP_TYPE
+#undef BPF_LINK_TYPE
+
+extern const struct bpf_prog_ops bpf_offload_prog_ops;
+extern const struct bpf_verifier_ops tc_cls_act_analyzer_ops;
+extern const struct bpf_verifier_ops xdp_analyzer_ops;
struct bpf_prog *bpf_prog_get(u32 ufd);
-struct bpf_prog *bpf_prog_get_type(u32 ufd, enum bpf_prog_type type);
-struct bpf_prog * __must_check bpf_prog_add(struct bpf_prog *prog, int i);
+struct bpf_prog *bpf_prog_get_type_dev(u32 ufd, enum bpf_prog_type type,
+ bool attach_drv);
+void bpf_prog_add(struct bpf_prog *prog, int i);
void bpf_prog_sub(struct bpf_prog *prog, int i);
-struct bpf_prog * __must_check bpf_prog_inc(struct bpf_prog *prog);
+void bpf_prog_inc(struct bpf_prog *prog);
+struct bpf_prog * __must_check bpf_prog_inc_not_zero(struct bpf_prog *prog);
void bpf_prog_put(struct bpf_prog *prog);
-int __bpf_prog_charge(struct user_struct *user, u32 pages);
-void __bpf_prog_uncharge(struct user_struct *user, u32 pages);
+void bpf_prog_free_id(struct bpf_prog *prog);
+void bpf_map_free_id(struct bpf_map *map);
+
+struct btf_field *btf_record_find(const struct btf_record *rec,
+ u32 offset, u32 field_mask);
+void btf_record_free(struct btf_record *rec);
+void bpf_map_free_record(struct bpf_map *map);
+struct btf_record *btf_record_dup(const struct btf_record *rec);
+bool btf_record_equal(const struct btf_record *rec_a, const struct btf_record *rec_b);
+void bpf_obj_free_timer(const struct btf_record *rec, void *obj);
+void bpf_obj_free_workqueue(const struct btf_record *rec, void *obj);
+void bpf_obj_free_task_work(const struct btf_record *rec, void *obj);
+void bpf_obj_free_fields(const struct btf_record *rec, void *obj);
+void __bpf_obj_drop_impl(void *p, const struct btf_record *rec, bool percpu);
+
+struct bpf_map *bpf_map_get(u32 ufd);
struct bpf_map *bpf_map_get_with_uref(u32 ufd);
-struct bpf_map *__bpf_map_get(struct fd f);
-struct bpf_map * __must_check bpf_map_inc(struct bpf_map *map, bool uref);
+
+/*
+ * The __bpf_map_get() and __btf_get_by_fd() functions parse a file
+ * descriptor and return a corresponding map or btf object.
+ * Their names are double underscored to emphasize the fact that they
+ * do not increase refcnt. To also increase refcnt use corresponding
+ * bpf_map_get() and btf_get_by_fd() functions.
+ */
+
+static inline struct bpf_map *__bpf_map_get(struct fd f)
+{
+ if (fd_empty(f))
+ return ERR_PTR(-EBADF);
+ if (unlikely(fd_file(f)->f_op != &bpf_map_fops))
+ return ERR_PTR(-EINVAL);
+ return fd_file(f)->private_data;
+}
+
+static inline struct btf *__btf_get_by_fd(struct fd f)
+{
+ if (fd_empty(f))
+ return ERR_PTR(-EBADF);
+ if (unlikely(fd_file(f)->f_op != &btf_fops))
+ return ERR_PTR(-EINVAL);
+ return fd_file(f)->private_data;
+}
+
+void bpf_map_inc(struct bpf_map *map);
+void bpf_map_inc_with_uref(struct bpf_map *map);
+struct bpf_map *__bpf_map_inc_not_zero(struct bpf_map *map, bool uref);
+struct bpf_map * __must_check bpf_map_inc_not_zero(struct bpf_map *map);
void bpf_map_put_with_uref(struct bpf_map *map);
void bpf_map_put(struct bpf_map *map);
-int bpf_map_precharge_memlock(u32 pages);
-void *bpf_map_area_alloc(size_t size);
+void *bpf_map_area_alloc(u64 size, int numa_node);
+void *bpf_map_area_mmapable_alloc(u64 size, int numa_node);
void bpf_map_area_free(void *base);
+bool bpf_map_write_active(const struct bpf_map *map);
+void bpf_map_init_from_attr(struct bpf_map *map, union bpf_attr *attr);
+int generic_map_lookup_batch(struct bpf_map *map,
+ const union bpf_attr *attr,
+ union bpf_attr __user *uattr);
+int generic_map_update_batch(struct bpf_map *map, struct file *map_file,
+ const union bpf_attr *attr,
+ union bpf_attr __user *uattr);
+int generic_map_delete_batch(struct bpf_map *map,
+ const union bpf_attr *attr,
+ union bpf_attr __user *uattr);
+struct bpf_map *bpf_map_get_curr_or_next(u32 *id);
+struct bpf_prog *bpf_prog_get_curr_or_next(u32 *id);
+
+
+int bpf_map_alloc_pages(const struct bpf_map *map, int nid,
+ unsigned long nr_pages, struct page **page_array);
+#ifdef CONFIG_MEMCG
+void *bpf_map_kmalloc_node(const struct bpf_map *map, size_t size, gfp_t flags,
+ int node);
+void *bpf_map_kmalloc_nolock(const struct bpf_map *map, size_t size, gfp_t flags,
+ int node);
+void *bpf_map_kzalloc(const struct bpf_map *map, size_t size, gfp_t flags);
+void *bpf_map_kvcalloc(struct bpf_map *map, size_t n, size_t size,
+ gfp_t flags);
+void __percpu *bpf_map_alloc_percpu(const struct bpf_map *map, size_t size,
+ size_t align, gfp_t flags);
+#else
+/*
+ * These specialized allocators have to be macros for their allocations to be
+ * accounted separately (to have separate alloc_tag).
+ */
+#define bpf_map_kmalloc_node(_map, _size, _flags, _node) \
+ kmalloc_node(_size, _flags, _node)
+#define bpf_map_kmalloc_nolock(_map, _size, _flags, _node) \
+ kmalloc_nolock(_size, _flags, _node)
+#define bpf_map_kzalloc(_map, _size, _flags) \
+ kzalloc(_size, _flags)
+#define bpf_map_kvcalloc(_map, _n, _size, _flags) \
+ kvcalloc(_n, _size, _flags)
+#define bpf_map_alloc_percpu(_map, _size, _align, _flags) \
+ __alloc_percpu_gfp(_size, _align, _flags)
+#endif
+
+static inline int
+bpf_map_init_elem_count(struct bpf_map *map)
+{
+ size_t size = sizeof(*map->elem_count), align = size;
+ gfp_t flags = GFP_USER | __GFP_NOWARN;
+
+ map->elem_count = bpf_map_alloc_percpu(map, size, align, flags);
+ if (!map->elem_count)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static inline void
+bpf_map_free_elem_count(struct bpf_map *map)
+{
+ free_percpu(map->elem_count);
+}
+
+static inline void bpf_map_inc_elem_count(struct bpf_map *map)
+{
+ this_cpu_inc(*map->elem_count);
+}
+
+static inline void bpf_map_dec_elem_count(struct bpf_map *map)
+{
+ this_cpu_dec(*map->elem_count);
+}
extern int sysctl_unprivileged_bpf_disabled;
-int bpf_map_new_fd(struct bpf_map *map);
+bool bpf_token_capable(const struct bpf_token *token, int cap);
+
+static inline bool bpf_allow_ptr_leaks(const struct bpf_token *token)
+{
+ return bpf_token_capable(token, CAP_PERFMON);
+}
+
+static inline bool bpf_allow_uninit_stack(const struct bpf_token *token)
+{
+ return bpf_token_capable(token, CAP_PERFMON);
+}
+
+static inline bool bpf_bypass_spec_v1(const struct bpf_token *token)
+{
+ return bpf_jit_bypass_spec_v1() ||
+ cpu_mitigations_off() ||
+ bpf_token_capable(token, CAP_PERFMON);
+}
+
+static inline bool bpf_bypass_spec_v4(const struct bpf_token *token)
+{
+ return bpf_jit_bypass_spec_v4() ||
+ cpu_mitigations_off() ||
+ bpf_token_capable(token, CAP_PERFMON);
+}
+
+int bpf_map_new_fd(struct bpf_map *map, int flags);
int bpf_prog_new_fd(struct bpf_prog *prog);
-int bpf_obj_pin_user(u32 ufd, const char __user *pathname);
-int bpf_obj_get_user(const char __user *pathname);
+void bpf_link_init(struct bpf_link *link, enum bpf_link_type type,
+ const struct bpf_link_ops *ops, struct bpf_prog *prog,
+ enum bpf_attach_type attach_type);
+void bpf_link_init_sleepable(struct bpf_link *link, enum bpf_link_type type,
+ const struct bpf_link_ops *ops, struct bpf_prog *prog,
+ enum bpf_attach_type attach_type, bool sleepable);
+int bpf_link_prime(struct bpf_link *link, struct bpf_link_primer *primer);
+int bpf_link_settle(struct bpf_link_primer *primer);
+void bpf_link_cleanup(struct bpf_link_primer *primer);
+void bpf_link_inc(struct bpf_link *link);
+struct bpf_link *bpf_link_inc_not_zero(struct bpf_link *link);
+void bpf_link_put(struct bpf_link *link);
+int bpf_link_new_fd(struct bpf_link *link);
+struct bpf_link *bpf_link_get_from_fd(u32 ufd);
+struct bpf_link *bpf_link_get_curr_or_next(u32 *id);
+
+void bpf_token_inc(struct bpf_token *token);
+void bpf_token_put(struct bpf_token *token);
+int bpf_token_create(union bpf_attr *attr);
+struct bpf_token *bpf_token_get_from_fd(u32 ufd);
+int bpf_token_get_info_by_fd(struct bpf_token *token,
+ const union bpf_attr *attr,
+ union bpf_attr __user *uattr);
+
+bool bpf_token_allow_cmd(const struct bpf_token *token, enum bpf_cmd cmd);
+bool bpf_token_allow_map_type(const struct bpf_token *token, enum bpf_map_type type);
+bool bpf_token_allow_prog_type(const struct bpf_token *token,
+ enum bpf_prog_type prog_type,
+ enum bpf_attach_type attach_type);
+
+int bpf_obj_pin_user(u32 ufd, int path_fd, const char __user *pathname);
+int bpf_obj_get_user(int path_fd, const char __user *pathname, int flags);
+struct inode *bpf_get_inode(struct super_block *sb, const struct inode *dir,
+ umode_t mode);
+
+#define BPF_ITER_FUNC_PREFIX "bpf_iter_"
+#define DEFINE_BPF_ITER_FUNC(target, args...) \
+ extern int bpf_iter_ ## target(args); \
+ int __init bpf_iter_ ## target(args) { return 0; }
+
+/*
+ * The task type of iterators.
+ *
+ * For BPF task iterators, they can be parameterized with various
+ * parameters to visit only some of tasks.
+ *
+ * BPF_TASK_ITER_ALL (default)
+ * Iterate over resources of every task.
+ *
+ * BPF_TASK_ITER_TID
+ * Iterate over resources of a task/tid.
+ *
+ * BPF_TASK_ITER_TGID
+ * Iterate over resources of every task of a process / task group.
+ */
+enum bpf_iter_task_type {
+ BPF_TASK_ITER_ALL = 0,
+ BPF_TASK_ITER_TID,
+ BPF_TASK_ITER_TGID,
+};
+
+struct bpf_iter_aux_info {
+ /* for map_elem iter */
+ struct bpf_map *map;
+
+ /* for cgroup iter */
+ struct {
+ struct cgroup *start; /* starting cgroup */
+ enum bpf_cgroup_iter_order order;
+ } cgroup;
+ struct {
+ enum bpf_iter_task_type type;
+ u32 pid;
+ } task;
+};
+
+typedef int (*bpf_iter_attach_target_t)(struct bpf_prog *prog,
+ union bpf_iter_link_info *linfo,
+ struct bpf_iter_aux_info *aux);
+typedef void (*bpf_iter_detach_target_t)(struct bpf_iter_aux_info *aux);
+typedef void (*bpf_iter_show_fdinfo_t) (const struct bpf_iter_aux_info *aux,
+ struct seq_file *seq);
+typedef int (*bpf_iter_fill_link_info_t)(const struct bpf_iter_aux_info *aux,
+ struct bpf_link_info *info);
+typedef const struct bpf_func_proto *
+(*bpf_iter_get_func_proto_t)(enum bpf_func_id func_id,
+ const struct bpf_prog *prog);
+
+enum bpf_iter_feature {
+ BPF_ITER_RESCHED = BIT(0),
+};
+
+#define BPF_ITER_CTX_ARG_MAX 2
+struct bpf_iter_reg {
+ const char *target;
+ bpf_iter_attach_target_t attach_target;
+ bpf_iter_detach_target_t detach_target;
+ bpf_iter_show_fdinfo_t show_fdinfo;
+ bpf_iter_fill_link_info_t fill_link_info;
+ bpf_iter_get_func_proto_t get_func_proto;
+ u32 ctx_arg_info_size;
+ u32 feature;
+ struct bpf_ctx_arg_aux ctx_arg_info[BPF_ITER_CTX_ARG_MAX];
+ const struct bpf_iter_seq_info *seq_info;
+};
+
+struct bpf_iter_meta {
+ __bpf_md_ptr(struct seq_file *, seq);
+ u64 session_id;
+ u64 seq_num;
+};
+
+struct bpf_iter__bpf_map_elem {
+ __bpf_md_ptr(struct bpf_iter_meta *, meta);
+ __bpf_md_ptr(struct bpf_map *, map);
+ __bpf_md_ptr(void *, key);
+ __bpf_md_ptr(void *, value);
+};
+
+int bpf_iter_reg_target(const struct bpf_iter_reg *reg_info);
+void bpf_iter_unreg_target(const struct bpf_iter_reg *reg_info);
+int bpf_iter_prog_supported(struct bpf_prog *prog);
+const struct bpf_func_proto *
+bpf_iter_get_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog);
+int bpf_iter_link_attach(const union bpf_attr *attr, bpfptr_t uattr, struct bpf_prog *prog);
+int bpf_iter_new_fd(struct bpf_link *link);
+bool bpf_link_is_iter(struct bpf_link *link);
+struct bpf_prog *bpf_iter_get_info(struct bpf_iter_meta *meta, bool in_stop);
+int bpf_iter_run_prog(struct bpf_prog *prog, void *ctx);
+void bpf_iter_map_show_fdinfo(const struct bpf_iter_aux_info *aux,
+ struct seq_file *seq);
+int bpf_iter_map_fill_link_info(const struct bpf_iter_aux_info *aux,
+ struct bpf_link_info *info);
+
+int map_set_for_each_callback_args(struct bpf_verifier_env *env,
+ struct bpf_func_state *caller,
+ struct bpf_func_state *callee);
int bpf_percpu_hash_copy(struct bpf_map *map, void *key, void *value);
int bpf_percpu_array_copy(struct bpf_map *map, void *key, void *value);
@@ -290,49 +2768,200 @@ int bpf_percpu_hash_update(struct bpf_map *map, void *key, void *value,
int bpf_percpu_array_update(struct bpf_map *map, void *key, void *value,
u64 flags);
-int bpf_stackmap_copy(struct bpf_map *map, void *key, void *value);
+int bpf_stackmap_extract(struct bpf_map *map, void *key, void *value, bool delete);
int bpf_fd_array_map_update_elem(struct bpf_map *map, struct file *map_file,
void *key, void *value, u64 map_flags);
int bpf_fd_array_map_lookup_elem(struct bpf_map *map, void *key, u32 *value);
-void bpf_fd_array_map_clear(struct bpf_map *map);
int bpf_fd_htab_map_update_elem(struct bpf_map *map, struct file *map_file,
void *key, void *value, u64 map_flags);
int bpf_fd_htab_map_lookup_elem(struct bpf_map *map, void *key, u32 *value);
-/* memcpy that is used with 8-byte aligned pointers, power-of-8 size and
- * forced to use 'long' read/writes to try to atomically copy long counters.
- * Best-effort only. No barriers here, since it _will_ race with concurrent
- * updates from BPF programs. Called from bpf syscall and mostly used with
- * size 8 or 16 bytes, so ask compiler to inline it.
- */
-static inline void bpf_long_memcpy(void *dst, const void *src, u32 size)
+int bpf_get_file_flag(int flags);
+int bpf_check_uarg_tail_zero(bpfptr_t uaddr, size_t expected_size,
+ size_t actual_size);
+
+/* verify correctness of eBPF program */
+int bpf_check(struct bpf_prog **fp, union bpf_attr *attr, bpfptr_t uattr, u32 uattr_size);
+
+#ifndef CONFIG_BPF_JIT_ALWAYS_ON
+void bpf_patch_call_args(struct bpf_insn *insn, u32 stack_depth);
+#endif
+
+struct btf *bpf_get_btf_vmlinux(void);
+
+/* Map specifics */
+struct xdp_frame;
+struct sk_buff;
+struct bpf_dtab_netdev;
+struct bpf_cpu_map_entry;
+
+void __dev_flush(struct list_head *flush_list);
+int dev_xdp_enqueue(struct net_device *dev, struct xdp_frame *xdpf,
+ struct net_device *dev_rx);
+int dev_map_enqueue(struct bpf_dtab_netdev *dst, struct xdp_frame *xdpf,
+ struct net_device *dev_rx);
+int dev_map_enqueue_multi(struct xdp_frame *xdpf, struct net_device *dev_rx,
+ struct bpf_map *map, bool exclude_ingress);
+int dev_map_generic_redirect(struct bpf_dtab_netdev *dst, struct sk_buff *skb,
+ const struct bpf_prog *xdp_prog);
+int dev_map_redirect_multi(struct net_device *dev, struct sk_buff *skb,
+ const struct bpf_prog *xdp_prog,
+ struct bpf_map *map, bool exclude_ingress);
+
+void __cpu_map_flush(struct list_head *flush_list);
+int cpu_map_enqueue(struct bpf_cpu_map_entry *rcpu, struct xdp_frame *xdpf,
+ struct net_device *dev_rx);
+int cpu_map_generic_redirect(struct bpf_cpu_map_entry *rcpu,
+ struct sk_buff *skb);
+
+/* Return map's numa specified by userspace */
+static inline int bpf_map_attr_numa_node(const union bpf_attr *attr)
{
- const long *lsrc = src;
- long *ldst = dst;
+ return (attr->map_flags & BPF_F_NUMA_NODE) ?
+ attr->numa_node : NUMA_NO_NODE;
+}
- size /= sizeof(long);
- while (size--)
- *ldst++ = *lsrc++;
+struct bpf_prog *bpf_prog_get_type_path(const char *name, enum bpf_prog_type type);
+int array_map_alloc_check(union bpf_attr *attr);
+
+int bpf_prog_test_run_xdp(struct bpf_prog *prog, const union bpf_attr *kattr,
+ union bpf_attr __user *uattr);
+int bpf_prog_test_run_skb(struct bpf_prog *prog, const union bpf_attr *kattr,
+ union bpf_attr __user *uattr);
+int bpf_prog_test_run_tracing(struct bpf_prog *prog,
+ const union bpf_attr *kattr,
+ union bpf_attr __user *uattr);
+int bpf_prog_test_run_flow_dissector(struct bpf_prog *prog,
+ const union bpf_attr *kattr,
+ union bpf_attr __user *uattr);
+int bpf_prog_test_run_raw_tp(struct bpf_prog *prog,
+ const union bpf_attr *kattr,
+ union bpf_attr __user *uattr);
+int bpf_prog_test_run_sk_lookup(struct bpf_prog *prog,
+ const union bpf_attr *kattr,
+ union bpf_attr __user *uattr);
+int bpf_prog_test_run_nf(struct bpf_prog *prog,
+ const union bpf_attr *kattr,
+ union bpf_attr __user *uattr);
+bool btf_ctx_access(int off, int size, enum bpf_access_type type,
+ const struct bpf_prog *prog,
+ struct bpf_insn_access_aux *info);
+
+static inline bool bpf_tracing_ctx_access(int off, int size,
+ enum bpf_access_type type)
+{
+ if (off < 0 || off >= sizeof(__u64) * MAX_BPF_FUNC_ARGS)
+ return false;
+ if (type != BPF_READ)
+ return false;
+ if (off % size != 0)
+ return false;
+ return true;
}
-/* verify correctness of eBPF program */
-int bpf_check(struct bpf_prog **fp, union bpf_attr *attr);
-#else
+static inline bool bpf_tracing_btf_ctx_access(int off, int size,
+ enum bpf_access_type type,
+ const struct bpf_prog *prog,
+ struct bpf_insn_access_aux *info)
+{
+ if (!bpf_tracing_ctx_access(off, size, type))
+ return false;
+ return btf_ctx_access(off, size, type, prog, info);
+}
+
+int btf_struct_access(struct bpf_verifier_log *log,
+ const struct bpf_reg_state *reg,
+ int off, int size, enum bpf_access_type atype,
+ u32 *next_btf_id, enum bpf_type_flag *flag, const char **field_name);
+bool btf_struct_ids_match(struct bpf_verifier_log *log,
+ const struct btf *btf, u32 id, int off,
+ const struct btf *need_btf, u32 need_type_id,
+ bool strict);
+
+int btf_distill_func_proto(struct bpf_verifier_log *log,
+ struct btf *btf,
+ const struct btf_type *func_proto,
+ const char *func_name,
+ struct btf_func_model *m);
+
+struct bpf_reg_state;
+int btf_prepare_func_args(struct bpf_verifier_env *env, int subprog);
+int btf_check_type_match(struct bpf_verifier_log *log, const struct bpf_prog *prog,
+ struct btf *btf, const struct btf_type *t);
+const char *btf_find_decl_tag_value(const struct btf *btf, const struct btf_type *pt,
+ int comp_idx, const char *tag_key);
+int btf_find_next_decl_tag(const struct btf *btf, const struct btf_type *pt,
+ int comp_idx, const char *tag_key, int last_id);
+
+struct bpf_prog *bpf_prog_by_id(u32 id);
+struct bpf_link *bpf_link_by_id(u32 id);
+
+const struct bpf_func_proto *bpf_base_func_proto(enum bpf_func_id func_id,
+ const struct bpf_prog *prog);
+void bpf_task_storage_free(struct task_struct *task);
+void bpf_cgrp_storage_free(struct cgroup *cgroup);
+bool bpf_prog_has_kfunc_call(const struct bpf_prog *prog);
+const struct btf_func_model *
+bpf_jit_find_kfunc_model(const struct bpf_prog *prog,
+ const struct bpf_insn *insn);
+int bpf_get_kfunc_addr(const struct bpf_prog *prog, u32 func_id,
+ u16 btf_fd_idx, u8 **func_addr);
+
+struct bpf_core_ctx {
+ struct bpf_verifier_log *log;
+ const struct btf *btf;
+};
+
+bool btf_nested_type_is_trusted(struct bpf_verifier_log *log,
+ const struct bpf_reg_state *reg,
+ const char *field_name, u32 btf_id, const char *suffix);
+
+bool btf_type_ids_nocast_alias(struct bpf_verifier_log *log,
+ const struct btf *reg_btf, u32 reg_id,
+ const struct btf *arg_btf, u32 arg_id);
+
+int bpf_core_apply(struct bpf_core_ctx *ctx, const struct bpf_core_relo *relo,
+ int relo_idx, void *insn);
+
+static inline bool unprivileged_ebpf_enabled(void)
+{
+ return !sysctl_unprivileged_bpf_disabled;
+}
+
+/* Not all bpf prog type has the bpf_ctx.
+ * For the bpf prog type that has initialized the bpf_ctx,
+ * this function can be used to decide if a kernel function
+ * is called by a bpf program.
+ */
+static inline bool has_current_bpf_ctx(void)
+{
+ return !!current->bpf_ctx;
+}
+
+void notrace bpf_prog_inc_misses_counter(struct bpf_prog *prog);
+
+void bpf_dynptr_init(struct bpf_dynptr_kern *ptr, void *data,
+ enum bpf_dynptr_type type, u32 offset, u32 size);
+void bpf_dynptr_set_null(struct bpf_dynptr_kern *ptr);
+void bpf_dynptr_set_rdonly(struct bpf_dynptr_kern *ptr);
+void bpf_prog_report_arena_violation(bool write, unsigned long addr, unsigned long fault_ip);
+
+#else /* !CONFIG_BPF_SYSCALL */
static inline struct bpf_prog *bpf_prog_get(u32 ufd)
{
return ERR_PTR(-EOPNOTSUPP);
}
-static inline struct bpf_prog *bpf_prog_get_type(u32 ufd,
- enum bpf_prog_type type)
+static inline struct bpf_prog *bpf_prog_get_type_dev(u32 ufd,
+ enum bpf_prog_type type,
+ bool attach_drv)
{
return ERR_PTR(-EOPNOTSUPP);
}
-static inline struct bpf_prog * __must_check bpf_prog_add(struct bpf_prog *prog,
- int i)
+
+static inline void bpf_prog_add(struct bpf_prog *prog, int i)
{
- return ERR_PTR(-EOPNOTSUPP);
}
static inline void bpf_prog_sub(struct bpf_prog *prog, int i)
@@ -343,40 +2972,872 @@ static inline void bpf_prog_put(struct bpf_prog *prog)
{
}
-static inline struct bpf_prog * __must_check bpf_prog_inc(struct bpf_prog *prog)
+static inline void bpf_prog_inc(struct bpf_prog *prog)
+{
+}
+
+static inline struct bpf_prog *__must_check
+bpf_prog_inc_not_zero(struct bpf_prog *prog)
{
return ERR_PTR(-EOPNOTSUPP);
}
-static inline int __bpf_prog_charge(struct user_struct *user, u32 pages)
+static inline void bpf_link_init(struct bpf_link *link, enum bpf_link_type type,
+ const struct bpf_link_ops *ops,
+ struct bpf_prog *prog, enum bpf_attach_type attach_type)
+{
+}
+
+static inline void bpf_link_init_sleepable(struct bpf_link *link, enum bpf_link_type type,
+ const struct bpf_link_ops *ops, struct bpf_prog *prog,
+ enum bpf_attach_type attach_type, bool sleepable)
+{
+}
+
+static inline int bpf_link_prime(struct bpf_link *link,
+ struct bpf_link_primer *primer)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int bpf_link_settle(struct bpf_link_primer *primer)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline void bpf_link_cleanup(struct bpf_link_primer *primer)
+{
+}
+
+static inline void bpf_link_inc(struct bpf_link *link)
+{
+}
+
+static inline struct bpf_link *bpf_link_inc_not_zero(struct bpf_link *link)
+{
+ return NULL;
+}
+
+static inline void bpf_link_put(struct bpf_link *link)
+{
+}
+
+static inline int bpf_obj_get_user(const char __user *pathname, int flags)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline bool bpf_token_capable(const struct bpf_token *token, int cap)
+{
+ return capable(cap) || (cap != CAP_SYS_ADMIN && capable(CAP_SYS_ADMIN));
+}
+
+static inline void bpf_token_inc(struct bpf_token *token)
+{
+}
+
+static inline void bpf_token_put(struct bpf_token *token)
+{
+}
+
+static inline struct bpf_token *bpf_token_get_from_fd(u32 ufd)
+{
+ return ERR_PTR(-EOPNOTSUPP);
+}
+
+static inline int bpf_token_get_info_by_fd(struct bpf_token *token,
+ const union bpf_attr *attr,
+ union bpf_attr __user *uattr)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline void __dev_flush(struct list_head *flush_list)
+{
+}
+
+struct xdp_frame;
+struct bpf_dtab_netdev;
+struct bpf_cpu_map_entry;
+
+static inline
+int dev_xdp_enqueue(struct net_device *dev, struct xdp_frame *xdpf,
+ struct net_device *dev_rx)
+{
+ return 0;
+}
+
+static inline
+int dev_map_enqueue(struct bpf_dtab_netdev *dst, struct xdp_frame *xdpf,
+ struct net_device *dev_rx)
+{
+ return 0;
+}
+
+static inline
+int dev_map_enqueue_multi(struct xdp_frame *xdpf, struct net_device *dev_rx,
+ struct bpf_map *map, bool exclude_ingress)
{
return 0;
}
-static inline void __bpf_prog_uncharge(struct user_struct *user, u32 pages)
+struct sk_buff;
+
+static inline int dev_map_generic_redirect(struct bpf_dtab_netdev *dst,
+ struct sk_buff *skb,
+ const struct bpf_prog *xdp_prog)
+{
+ return 0;
+}
+
+static inline
+int dev_map_redirect_multi(struct net_device *dev, struct sk_buff *skb,
+ const struct bpf_prog *xdp_prog,
+ struct bpf_map *map, bool exclude_ingress)
+{
+ return 0;
+}
+
+static inline void __cpu_map_flush(struct list_head *flush_list)
+{
+}
+
+static inline int cpu_map_enqueue(struct bpf_cpu_map_entry *rcpu,
+ struct xdp_frame *xdpf,
+ struct net_device *dev_rx)
+{
+ return 0;
+}
+
+static inline int cpu_map_generic_redirect(struct bpf_cpu_map_entry *rcpu,
+ struct sk_buff *skb)
{
+ return -EOPNOTSUPP;
+}
+
+static inline struct bpf_prog *bpf_prog_get_type_path(const char *name,
+ enum bpf_prog_type type)
+{
+ return ERR_PTR(-EOPNOTSUPP);
+}
+
+static inline int bpf_prog_test_run_xdp(struct bpf_prog *prog,
+ const union bpf_attr *kattr,
+ union bpf_attr __user *uattr)
+{
+ return -ENOTSUPP;
+}
+
+static inline int bpf_prog_test_run_skb(struct bpf_prog *prog,
+ const union bpf_attr *kattr,
+ union bpf_attr __user *uattr)
+{
+ return -ENOTSUPP;
+}
+
+static inline int bpf_prog_test_run_tracing(struct bpf_prog *prog,
+ const union bpf_attr *kattr,
+ union bpf_attr __user *uattr)
+{
+ return -ENOTSUPP;
+}
+
+static inline int bpf_prog_test_run_flow_dissector(struct bpf_prog *prog,
+ const union bpf_attr *kattr,
+ union bpf_attr __user *uattr)
+{
+ return -ENOTSUPP;
+}
+
+static inline int bpf_prog_test_run_sk_lookup(struct bpf_prog *prog,
+ const union bpf_attr *kattr,
+ union bpf_attr __user *uattr)
+{
+ return -ENOTSUPP;
+}
+
+static inline void bpf_map_put(struct bpf_map *map)
+{
+}
+
+static inline struct bpf_prog *bpf_prog_by_id(u32 id)
+{
+ return ERR_PTR(-ENOTSUPP);
+}
+
+static inline int btf_struct_access(struct bpf_verifier_log *log,
+ const struct bpf_reg_state *reg,
+ int off, int size, enum bpf_access_type atype,
+ u32 *next_btf_id, enum bpf_type_flag *flag,
+ const char **field_name)
+{
+ return -EACCES;
+}
+
+static inline const struct bpf_func_proto *
+bpf_base_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
+{
+ return NULL;
+}
+
+static inline void bpf_task_storage_free(struct task_struct *task)
+{
+}
+
+static inline bool bpf_prog_has_kfunc_call(const struct bpf_prog *prog)
+{
+ return false;
+}
+
+static inline const struct btf_func_model *
+bpf_jit_find_kfunc_model(const struct bpf_prog *prog,
+ const struct bpf_insn *insn)
+{
+ return NULL;
+}
+
+static inline int
+bpf_get_kfunc_addr(const struct bpf_prog *prog, u32 func_id,
+ u16 btf_fd_idx, u8 **func_addr)
+{
+ return -ENOTSUPP;
+}
+
+static inline bool unprivileged_ebpf_enabled(void)
+{
+ return false;
+}
+
+static inline bool has_current_bpf_ctx(void)
+{
+ return false;
+}
+
+static inline void bpf_prog_inc_misses_counter(struct bpf_prog *prog)
+{
+}
+
+static inline void bpf_cgrp_storage_free(struct cgroup *cgroup)
+{
+}
+
+static inline void bpf_dynptr_init(struct bpf_dynptr_kern *ptr, void *data,
+ enum bpf_dynptr_type type, u32 offset, u32 size)
+{
+}
+
+static inline void bpf_dynptr_set_null(struct bpf_dynptr_kern *ptr)
+{
+}
+
+static inline void bpf_dynptr_set_rdonly(struct bpf_dynptr_kern *ptr)
+{
+}
+
+static inline void bpf_prog_report_arena_violation(bool write, unsigned long addr,
+ unsigned long fault_ip)
+{
+}
+#endif /* CONFIG_BPF_SYSCALL */
+
+static __always_inline int
+bpf_probe_read_kernel_common(void *dst, u32 size, const void *unsafe_ptr)
+{
+ int ret = -EFAULT;
+
+ if (IS_ENABLED(CONFIG_BPF_EVENTS))
+ ret = copy_from_kernel_nofault(dst, unsafe_ptr, size);
+ if (unlikely(ret < 0))
+ memset(dst, 0, size);
+ return ret;
+}
+
+void __bpf_free_used_btfs(struct btf_mod_pair *used_btfs, u32 len);
+
+static inline struct bpf_prog *bpf_prog_get_type(u32 ufd,
+ enum bpf_prog_type type)
+{
+ return bpf_prog_get_type_dev(ufd, type, false);
+}
+
+void __bpf_free_used_maps(struct bpf_prog_aux *aux,
+ struct bpf_map **used_maps, u32 len);
+
+bool bpf_prog_get_ok(struct bpf_prog *, enum bpf_prog_type *, bool);
+
+int bpf_prog_offload_compile(struct bpf_prog *prog);
+void bpf_prog_dev_bound_destroy(struct bpf_prog *prog);
+int bpf_prog_offload_info_fill(struct bpf_prog_info *info,
+ struct bpf_prog *prog);
+
+int bpf_map_offload_info_fill(struct bpf_map_info *info, struct bpf_map *map);
+
+int bpf_map_offload_lookup_elem(struct bpf_map *map, void *key, void *value);
+int bpf_map_offload_update_elem(struct bpf_map *map,
+ void *key, void *value, u64 flags);
+int bpf_map_offload_delete_elem(struct bpf_map *map, void *key);
+int bpf_map_offload_get_next_key(struct bpf_map *map,
+ void *key, void *next_key);
+
+bool bpf_offload_prog_map_match(struct bpf_prog *prog, struct bpf_map *map);
+
+struct bpf_offload_dev *
+bpf_offload_dev_create(const struct bpf_prog_offload_ops *ops, void *priv);
+void bpf_offload_dev_destroy(struct bpf_offload_dev *offdev);
+void *bpf_offload_dev_priv(struct bpf_offload_dev *offdev);
+int bpf_offload_dev_netdev_register(struct bpf_offload_dev *offdev,
+ struct net_device *netdev);
+void bpf_offload_dev_netdev_unregister(struct bpf_offload_dev *offdev,
+ struct net_device *netdev);
+bool bpf_offload_dev_match(struct bpf_prog *prog, struct net_device *netdev);
+
+void unpriv_ebpf_notify(int new_state);
+
+#if defined(CONFIG_NET) && defined(CONFIG_BPF_SYSCALL)
+int bpf_dev_bound_kfunc_check(struct bpf_verifier_log *log,
+ struct bpf_prog_aux *prog_aux);
+void *bpf_dev_bound_resolve_kfunc(struct bpf_prog *prog, u32 func_id);
+int bpf_prog_dev_bound_init(struct bpf_prog *prog, union bpf_attr *attr);
+int bpf_prog_dev_bound_inherit(struct bpf_prog *new_prog, struct bpf_prog *old_prog);
+void bpf_dev_bound_netdev_unregister(struct net_device *dev);
+
+static inline bool bpf_prog_is_dev_bound(const struct bpf_prog_aux *aux)
+{
+ return aux->dev_bound;
+}
+
+static inline bool bpf_prog_is_offloaded(const struct bpf_prog_aux *aux)
+{
+ return aux->offload_requested;
+}
+
+bool bpf_prog_dev_bound_match(const struct bpf_prog *lhs, const struct bpf_prog *rhs);
+
+static inline bool bpf_map_is_offloaded(struct bpf_map *map)
+{
+ return unlikely(map->ops == &bpf_map_offload_ops);
+}
+
+struct bpf_map *bpf_map_offload_map_alloc(union bpf_attr *attr);
+void bpf_map_offload_map_free(struct bpf_map *map);
+u64 bpf_map_offload_map_mem_usage(const struct bpf_map *map);
+int bpf_prog_test_run_syscall(struct bpf_prog *prog,
+ const union bpf_attr *kattr,
+ union bpf_attr __user *uattr);
+
+int sock_map_get_from_fd(const union bpf_attr *attr, struct bpf_prog *prog);
+int sock_map_prog_detach(const union bpf_attr *attr, enum bpf_prog_type ptype);
+int sock_map_update_elem_sys(struct bpf_map *map, void *key, void *value, u64 flags);
+int sock_map_bpf_prog_query(const union bpf_attr *attr,
+ union bpf_attr __user *uattr);
+int sock_map_link_create(const union bpf_attr *attr, struct bpf_prog *prog);
+
+void sock_map_unhash(struct sock *sk);
+void sock_map_destroy(struct sock *sk);
+void sock_map_close(struct sock *sk, long timeout);
+#else
+static inline int bpf_dev_bound_kfunc_check(struct bpf_verifier_log *log,
+ struct bpf_prog_aux *prog_aux)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline void *bpf_dev_bound_resolve_kfunc(struct bpf_prog *prog,
+ u32 func_id)
+{
+ return NULL;
+}
+
+static inline int bpf_prog_dev_bound_init(struct bpf_prog *prog,
+ union bpf_attr *attr)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int bpf_prog_dev_bound_inherit(struct bpf_prog *new_prog,
+ struct bpf_prog *old_prog)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline void bpf_dev_bound_netdev_unregister(struct net_device *dev)
+{
+}
+
+static inline bool bpf_prog_is_dev_bound(const struct bpf_prog_aux *aux)
+{
+ return false;
+}
+
+static inline bool bpf_prog_is_offloaded(struct bpf_prog_aux *aux)
+{
+ return false;
+}
+
+static inline bool bpf_prog_dev_bound_match(const struct bpf_prog *lhs, const struct bpf_prog *rhs)
+{
+ return false;
+}
+
+static inline bool bpf_map_is_offloaded(struct bpf_map *map)
+{
+ return false;
+}
+
+static inline struct bpf_map *bpf_map_offload_map_alloc(union bpf_attr *attr)
+{
+ return ERR_PTR(-EOPNOTSUPP);
+}
+
+static inline void bpf_map_offload_map_free(struct bpf_map *map)
+{
+}
+
+static inline u64 bpf_map_offload_map_mem_usage(const struct bpf_map *map)
+{
+ return 0;
+}
+
+static inline int bpf_prog_test_run_syscall(struct bpf_prog *prog,
+ const union bpf_attr *kattr,
+ union bpf_attr __user *uattr)
+{
+ return -ENOTSUPP;
+}
+
+#ifdef CONFIG_BPF_SYSCALL
+static inline int sock_map_get_from_fd(const union bpf_attr *attr,
+ struct bpf_prog *prog)
+{
+ return -EINVAL;
+}
+
+static inline int sock_map_prog_detach(const union bpf_attr *attr,
+ enum bpf_prog_type ptype)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int sock_map_update_elem_sys(struct bpf_map *map, void *key, void *value,
+ u64 flags)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int sock_map_bpf_prog_query(const union bpf_attr *attr,
+ union bpf_attr __user *uattr)
+{
+ return -EINVAL;
+}
+
+static inline int sock_map_link_create(const union bpf_attr *attr, struct bpf_prog *prog)
+{
+ return -EOPNOTSUPP;
}
#endif /* CONFIG_BPF_SYSCALL */
+#endif /* CONFIG_NET && CONFIG_BPF_SYSCALL */
+
+static __always_inline void
+bpf_prog_inc_misses_counters(const struct bpf_prog_array *array)
+{
+ const struct bpf_prog_array_item *item;
+ struct bpf_prog *prog;
+
+ if (unlikely(!array))
+ return;
+
+ item = &array->items[0];
+ while ((prog = READ_ONCE(item->prog))) {
+ bpf_prog_inc_misses_counter(prog);
+ item++;
+ }
+}
+
+#if defined(CONFIG_INET) && defined(CONFIG_BPF_SYSCALL)
+void bpf_sk_reuseport_detach(struct sock *sk);
+int bpf_fd_reuseport_array_lookup_elem(struct bpf_map *map, void *key,
+ void *value);
+int bpf_fd_reuseport_array_update_elem(struct bpf_map *map, void *key,
+ void *value, u64 map_flags);
+#else
+static inline void bpf_sk_reuseport_detach(struct sock *sk)
+{
+}
+
+#ifdef CONFIG_BPF_SYSCALL
+static inline int bpf_fd_reuseport_array_lookup_elem(struct bpf_map *map,
+ void *key, void *value)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int bpf_fd_reuseport_array_update_elem(struct bpf_map *map,
+ void *key, void *value,
+ u64 map_flags)
+{
+ return -EOPNOTSUPP;
+}
+#endif /* CONFIG_BPF_SYSCALL */
+#endif /* defined(CONFIG_INET) && defined(CONFIG_BPF_SYSCALL) */
+
+#if defined(CONFIG_KEYS) && defined(CONFIG_BPF_SYSCALL)
+
+struct bpf_key *bpf_lookup_user_key(s32 serial, u64 flags);
+struct bpf_key *bpf_lookup_system_key(u64 id);
+void bpf_key_put(struct bpf_key *bkey);
+int bpf_verify_pkcs7_signature(struct bpf_dynptr *data_p,
+ struct bpf_dynptr *sig_p,
+ struct bpf_key *trusted_keyring);
+
+#else
+static inline struct bpf_key *bpf_lookup_user_key(u32 serial, u64 flags)
+{
+ return NULL;
+}
+
+static inline struct bpf_key *bpf_lookup_system_key(u64 id)
+{
+ return NULL;
+}
+
+static inline void bpf_key_put(struct bpf_key *bkey)
+{
+}
+
+static inline int bpf_verify_pkcs7_signature(struct bpf_dynptr *data_p,
+ struct bpf_dynptr *sig_p,
+ struct bpf_key *trusted_keyring)
+{
+ return -EOPNOTSUPP;
+}
+#endif /* defined(CONFIG_KEYS) && defined(CONFIG_BPF_SYSCALL) */
/* verifier prototypes for helper functions called from eBPF programs */
extern const struct bpf_func_proto bpf_map_lookup_elem_proto;
extern const struct bpf_func_proto bpf_map_update_elem_proto;
extern const struct bpf_func_proto bpf_map_delete_elem_proto;
+extern const struct bpf_func_proto bpf_map_push_elem_proto;
+extern const struct bpf_func_proto bpf_map_pop_elem_proto;
+extern const struct bpf_func_proto bpf_map_peek_elem_proto;
+extern const struct bpf_func_proto bpf_map_lookup_percpu_elem_proto;
extern const struct bpf_func_proto bpf_get_prandom_u32_proto;
extern const struct bpf_func_proto bpf_get_smp_processor_id_proto;
extern const struct bpf_func_proto bpf_get_numa_node_id_proto;
extern const struct bpf_func_proto bpf_tail_call_proto;
extern const struct bpf_func_proto bpf_ktime_get_ns_proto;
+extern const struct bpf_func_proto bpf_ktime_get_boot_ns_proto;
+extern const struct bpf_func_proto bpf_ktime_get_tai_ns_proto;
extern const struct bpf_func_proto bpf_get_current_pid_tgid_proto;
extern const struct bpf_func_proto bpf_get_current_uid_gid_proto;
extern const struct bpf_func_proto bpf_get_current_comm_proto;
-extern const struct bpf_func_proto bpf_skb_vlan_push_proto;
-extern const struct bpf_func_proto bpf_skb_vlan_pop_proto;
extern const struct bpf_func_proto bpf_get_stackid_proto;
+extern const struct bpf_func_proto bpf_get_stack_proto;
+extern const struct bpf_func_proto bpf_get_stack_sleepable_proto;
+extern const struct bpf_func_proto bpf_get_task_stack_proto;
+extern const struct bpf_func_proto bpf_get_task_stack_sleepable_proto;
+extern const struct bpf_func_proto bpf_get_stackid_proto_pe;
+extern const struct bpf_func_proto bpf_get_stack_proto_pe;
+extern const struct bpf_func_proto bpf_sock_map_update_proto;
+extern const struct bpf_func_proto bpf_sock_hash_update_proto;
+extern const struct bpf_func_proto bpf_get_current_cgroup_id_proto;
+extern const struct bpf_func_proto bpf_get_current_ancestor_cgroup_id_proto;
+extern const struct bpf_func_proto bpf_get_cgroup_classid_curr_proto;
+extern const struct bpf_func_proto bpf_current_task_under_cgroup_proto;
+extern const struct bpf_func_proto bpf_msg_redirect_hash_proto;
+extern const struct bpf_func_proto bpf_msg_redirect_map_proto;
+extern const struct bpf_func_proto bpf_sk_redirect_hash_proto;
+extern const struct bpf_func_proto bpf_sk_redirect_map_proto;
+extern const struct bpf_func_proto bpf_spin_lock_proto;
+extern const struct bpf_func_proto bpf_spin_unlock_proto;
+extern const struct bpf_func_proto bpf_get_local_storage_proto;
+extern const struct bpf_func_proto bpf_strtol_proto;
+extern const struct bpf_func_proto bpf_strtoul_proto;
+extern const struct bpf_func_proto bpf_tcp_sock_proto;
+extern const struct bpf_func_proto bpf_jiffies64_proto;
+extern const struct bpf_func_proto bpf_get_ns_current_pid_tgid_proto;
+extern const struct bpf_func_proto bpf_event_output_data_proto;
+extern const struct bpf_func_proto bpf_ringbuf_output_proto;
+extern const struct bpf_func_proto bpf_ringbuf_reserve_proto;
+extern const struct bpf_func_proto bpf_ringbuf_submit_proto;
+extern const struct bpf_func_proto bpf_ringbuf_discard_proto;
+extern const struct bpf_func_proto bpf_ringbuf_query_proto;
+extern const struct bpf_func_proto bpf_ringbuf_reserve_dynptr_proto;
+extern const struct bpf_func_proto bpf_ringbuf_submit_dynptr_proto;
+extern const struct bpf_func_proto bpf_ringbuf_discard_dynptr_proto;
+extern const struct bpf_func_proto bpf_skc_to_tcp6_sock_proto;
+extern const struct bpf_func_proto bpf_skc_to_tcp_sock_proto;
+extern const struct bpf_func_proto bpf_skc_to_tcp_timewait_sock_proto;
+extern const struct bpf_func_proto bpf_skc_to_tcp_request_sock_proto;
+extern const struct bpf_func_proto bpf_skc_to_udp6_sock_proto;
+extern const struct bpf_func_proto bpf_skc_to_unix_sock_proto;
+extern const struct bpf_func_proto bpf_skc_to_mptcp_sock_proto;
+extern const struct bpf_func_proto bpf_copy_from_user_proto;
+extern const struct bpf_func_proto bpf_snprintf_btf_proto;
+extern const struct bpf_func_proto bpf_snprintf_proto;
+extern const struct bpf_func_proto bpf_per_cpu_ptr_proto;
+extern const struct bpf_func_proto bpf_this_cpu_ptr_proto;
+extern const struct bpf_func_proto bpf_ktime_get_coarse_ns_proto;
+extern const struct bpf_func_proto bpf_sock_from_file_proto;
+extern const struct bpf_func_proto bpf_get_socket_ptr_cookie_proto;
+extern const struct bpf_func_proto bpf_task_storage_get_recur_proto;
+extern const struct bpf_func_proto bpf_task_storage_get_proto;
+extern const struct bpf_func_proto bpf_task_storage_delete_recur_proto;
+extern const struct bpf_func_proto bpf_task_storage_delete_proto;
+extern const struct bpf_func_proto bpf_for_each_map_elem_proto;
+extern const struct bpf_func_proto bpf_btf_find_by_name_kind_proto;
+extern const struct bpf_func_proto bpf_sk_setsockopt_proto;
+extern const struct bpf_func_proto bpf_sk_getsockopt_proto;
+extern const struct bpf_func_proto bpf_unlocked_sk_setsockopt_proto;
+extern const struct bpf_func_proto bpf_unlocked_sk_getsockopt_proto;
+extern const struct bpf_func_proto bpf_find_vma_proto;
+extern const struct bpf_func_proto bpf_loop_proto;
+extern const struct bpf_func_proto bpf_copy_from_user_task_proto;
+extern const struct bpf_func_proto bpf_set_retval_proto;
+extern const struct bpf_func_proto bpf_get_retval_proto;
+extern const struct bpf_func_proto bpf_user_ringbuf_drain_proto;
+extern const struct bpf_func_proto bpf_cgrp_storage_get_proto;
+extern const struct bpf_func_proto bpf_cgrp_storage_delete_proto;
+
+const struct bpf_func_proto *tracing_prog_func_proto(
+ enum bpf_func_id func_id, const struct bpf_prog *prog);
/* Shared helpers among cBPF and eBPF. */
void bpf_user_rnd_init_once(void);
u64 bpf_user_rnd_u32(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
+u64 bpf_get_raw_cpu_id(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
+
+#if defined(CONFIG_NET)
+bool bpf_sock_common_is_valid_access(int off, int size,
+ enum bpf_access_type type,
+ struct bpf_insn_access_aux *info);
+bool bpf_sock_is_valid_access(int off, int size, enum bpf_access_type type,
+ struct bpf_insn_access_aux *info);
+u32 bpf_sock_convert_ctx_access(enum bpf_access_type type,
+ const struct bpf_insn *si,
+ struct bpf_insn *insn_buf,
+ struct bpf_prog *prog,
+ u32 *target_size);
+int bpf_dynptr_from_skb_rdonly(struct __sk_buff *skb, u64 flags,
+ struct bpf_dynptr *ptr);
+#else
+static inline bool bpf_sock_common_is_valid_access(int off, int size,
+ enum bpf_access_type type,
+ struct bpf_insn_access_aux *info)
+{
+ return false;
+}
+static inline bool bpf_sock_is_valid_access(int off, int size,
+ enum bpf_access_type type,
+ struct bpf_insn_access_aux *info)
+{
+ return false;
+}
+static inline u32 bpf_sock_convert_ctx_access(enum bpf_access_type type,
+ const struct bpf_insn *si,
+ struct bpf_insn *insn_buf,
+ struct bpf_prog *prog,
+ u32 *target_size)
+{
+ return 0;
+}
+static inline int bpf_dynptr_from_skb_rdonly(struct __sk_buff *skb, u64 flags,
+ struct bpf_dynptr *ptr)
+{
+ return -EOPNOTSUPP;
+}
+#endif
+
+#ifdef CONFIG_INET
+struct sk_reuseport_kern {
+ struct sk_buff *skb;
+ struct sock *sk;
+ struct sock *selected_sk;
+ struct sock *migrating_sk;
+ void *data_end;
+ u32 hash;
+ u32 reuseport_id;
+ bool bind_inany;
+};
+bool bpf_tcp_sock_is_valid_access(int off, int size, enum bpf_access_type type,
+ struct bpf_insn_access_aux *info);
+
+u32 bpf_tcp_sock_convert_ctx_access(enum bpf_access_type type,
+ const struct bpf_insn *si,
+ struct bpf_insn *insn_buf,
+ struct bpf_prog *prog,
+ u32 *target_size);
+
+bool bpf_xdp_sock_is_valid_access(int off, int size, enum bpf_access_type type,
+ struct bpf_insn_access_aux *info);
+
+u32 bpf_xdp_sock_convert_ctx_access(enum bpf_access_type type,
+ const struct bpf_insn *si,
+ struct bpf_insn *insn_buf,
+ struct bpf_prog *prog,
+ u32 *target_size);
+#else
+static inline bool bpf_tcp_sock_is_valid_access(int off, int size,
+ enum bpf_access_type type,
+ struct bpf_insn_access_aux *info)
+{
+ return false;
+}
+
+static inline u32 bpf_tcp_sock_convert_ctx_access(enum bpf_access_type type,
+ const struct bpf_insn *si,
+ struct bpf_insn *insn_buf,
+ struct bpf_prog *prog,
+ u32 *target_size)
+{
+ return 0;
+}
+static inline bool bpf_xdp_sock_is_valid_access(int off, int size,
+ enum bpf_access_type type,
+ struct bpf_insn_access_aux *info)
+{
+ return false;
+}
+
+static inline u32 bpf_xdp_sock_convert_ctx_access(enum bpf_access_type type,
+ const struct bpf_insn *si,
+ struct bpf_insn *insn_buf,
+ struct bpf_prog *prog,
+ u32 *target_size)
+{
+ return 0;
+}
+#endif /* CONFIG_INET */
+
+enum bpf_text_poke_type {
+ BPF_MOD_NOP,
+ BPF_MOD_CALL,
+ BPF_MOD_JUMP,
+};
+
+int bpf_arch_text_poke(void *ip, enum bpf_text_poke_type old_t,
+ enum bpf_text_poke_type new_t, void *old_addr,
+ void *new_addr);
+
+void bpf_arch_poke_desc_update(struct bpf_jit_poke_descriptor *poke,
+ struct bpf_prog *new, struct bpf_prog *old);
+
+void *bpf_arch_text_copy(void *dst, void *src, size_t len);
+int bpf_arch_text_invalidate(void *dst, size_t len);
+
+struct btf_id_set;
+bool btf_id_set_contains(const struct btf_id_set *set, u32 id);
+
+#define MAX_BPRINTF_VARARGS 12
+#define MAX_BPRINTF_BUF 1024
+
+/* Per-cpu temp buffers used by printf-like helpers to store the bprintf binary
+ * arguments representation.
+ */
+#define MAX_BPRINTF_BIN_ARGS 512
+
+struct bpf_bprintf_buffers {
+ char bin_args[MAX_BPRINTF_BIN_ARGS];
+ char buf[MAX_BPRINTF_BUF];
+};
+
+struct bpf_bprintf_data {
+ u32 *bin_args;
+ char *buf;
+ bool get_bin_args;
+ bool get_buf;
+};
+
+int bpf_bprintf_prepare(const char *fmt, u32 fmt_size, const u64 *raw_args,
+ u32 num_args, struct bpf_bprintf_data *data);
+void bpf_bprintf_cleanup(struct bpf_bprintf_data *data);
+int bpf_try_get_buffers(struct bpf_bprintf_buffers **bufs);
+void bpf_put_buffers(void);
+
+void bpf_prog_stream_init(struct bpf_prog *prog);
+void bpf_prog_stream_free(struct bpf_prog *prog);
+int bpf_prog_stream_read(struct bpf_prog *prog, enum bpf_stream_id stream_id, void __user *buf, int len);
+void bpf_stream_stage_init(struct bpf_stream_stage *ss);
+void bpf_stream_stage_free(struct bpf_stream_stage *ss);
+__printf(2, 3)
+int bpf_stream_stage_printk(struct bpf_stream_stage *ss, const char *fmt, ...);
+int bpf_stream_stage_commit(struct bpf_stream_stage *ss, struct bpf_prog *prog,
+ enum bpf_stream_id stream_id);
+int bpf_stream_stage_dump_stack(struct bpf_stream_stage *ss);
+
+#define bpf_stream_printk(ss, ...) bpf_stream_stage_printk(&ss, __VA_ARGS__)
+#define bpf_stream_dump_stack(ss) bpf_stream_stage_dump_stack(&ss)
+
+#define bpf_stream_stage(ss, prog, stream_id, expr) \
+ ({ \
+ bpf_stream_stage_init(&ss); \
+ (expr); \
+ bpf_stream_stage_commit(&ss, prog, stream_id); \
+ bpf_stream_stage_free(&ss); \
+ })
+
+#ifdef CONFIG_BPF_LSM
+void bpf_cgroup_atype_get(u32 attach_btf_id, int cgroup_atype);
+void bpf_cgroup_atype_put(int cgroup_atype);
+#else
+static inline void bpf_cgroup_atype_get(u32 attach_btf_id, int cgroup_atype) {}
+static inline void bpf_cgroup_atype_put(int cgroup_atype) {}
+#endif /* CONFIG_BPF_LSM */
+
+struct key;
+
+#ifdef CONFIG_KEYS
+struct bpf_key {
+ struct key *key;
+ bool has_ref;
+};
+#endif /* CONFIG_KEYS */
+
+static inline bool type_is_alloc(u32 type)
+{
+ return type & MEM_ALLOC;
+}
+
+static inline gfp_t bpf_memcg_flags(gfp_t flags)
+{
+ if (memcg_bpf_enabled())
+ return flags | __GFP_ACCOUNT;
+ return flags;
+}
+
+static inline bool bpf_is_subprog(const struct bpf_prog *prog)
+{
+ return prog->aux->func_idx != 0;
+}
+
+int bpf_prog_get_file_line(struct bpf_prog *prog, unsigned long ip, const char **filep,
+ const char **linep, int *nump);
+struct bpf_prog *bpf_prog_find_from_stack(void);
+
+int bpf_insn_array_init(struct bpf_map *map, const struct bpf_prog *prog);
+int bpf_insn_array_ready(struct bpf_map *map);
+void bpf_insn_array_release(struct bpf_map *map);
+void bpf_insn_array_adjust(struct bpf_map *map, u32 off, u32 len);
+void bpf_insn_array_adjust_after_remove(struct bpf_map *map, u32 off, u32 len);
+
+#ifdef CONFIG_BPF_SYSCALL
+void bpf_prog_update_insn_ptrs(struct bpf_prog *prog, u32 *offsets, void *image);
+#else
+static inline void
+bpf_prog_update_insn_ptrs(struct bpf_prog *prog, u32 *offsets, void *image)
+{
+}
+#endif
+
+static inline int bpf_map_check_op_flags(struct bpf_map *map, u64 flags, u64 allowed_flags)
+{
+ if (flags & ~allowed_flags)
+ return -EINVAL;
+
+ if ((flags & BPF_F_LOCK) && !btf_record_has_field(map->record, BPF_SPIN_LOCK))
+ return -EINVAL;
+
+ return 0;
+}
#endif /* _LINUX_BPF_H */
diff --git a/include/linux/bpf_crypto.h b/include/linux/bpf_crypto.h
new file mode 100644
index 000000000000..a41e71d4e2d9
--- /dev/null
+++ b/include/linux/bpf_crypto.h
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */
+#ifndef _BPF_CRYPTO_H
+#define _BPF_CRYPTO_H
+
+struct bpf_crypto_type {
+ void *(*alloc_tfm)(const char *algo);
+ void (*free_tfm)(void *tfm);
+ int (*has_algo)(const char *algo);
+ int (*setkey)(void *tfm, const u8 *key, unsigned int keylen);
+ int (*setauthsize)(void *tfm, unsigned int authsize);
+ int (*encrypt)(void *tfm, const u8 *src, u8 *dst, unsigned int len, u8 *iv);
+ int (*decrypt)(void *tfm, const u8 *src, u8 *dst, unsigned int len, u8 *iv);
+ unsigned int (*ivsize)(void *tfm);
+ unsigned int (*statesize)(void *tfm);
+ u32 (*get_flags)(void *tfm);
+ struct module *owner;
+ char name[14];
+};
+
+int bpf_crypto_register_type(const struct bpf_crypto_type *type);
+int bpf_crypto_unregister_type(const struct bpf_crypto_type *type);
+
+#endif /* _BPF_CRYPTO_H */
diff --git a/include/linux/bpf_lirc.h b/include/linux/bpf_lirc.h
new file mode 100644
index 000000000000..9d9ff755ec29
--- /dev/null
+++ b/include/linux/bpf_lirc.h
@@ -0,0 +1,30 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _BPF_LIRC_H
+#define _BPF_LIRC_H
+
+#include <uapi/linux/bpf.h>
+
+#ifdef CONFIG_BPF_LIRC_MODE2
+int lirc_prog_attach(const union bpf_attr *attr, struct bpf_prog *prog);
+int lirc_prog_detach(const union bpf_attr *attr);
+int lirc_prog_query(const union bpf_attr *attr, union bpf_attr __user *uattr);
+#else
+static inline int lirc_prog_attach(const union bpf_attr *attr,
+ struct bpf_prog *prog)
+{
+ return -EINVAL;
+}
+
+static inline int lirc_prog_detach(const union bpf_attr *attr)
+{
+ return -EINVAL;
+}
+
+static inline int lirc_prog_query(const union bpf_attr *attr,
+ union bpf_attr __user *uattr)
+{
+ return -EINVAL;
+}
+#endif
+
+#endif /* _BPF_LIRC_H */
diff --git a/include/linux/bpf_local_storage.h b/include/linux/bpf_local_storage.h
new file mode 100644
index 000000000000..66432248cd81
--- /dev/null
+++ b/include/linux/bpf_local_storage.h
@@ -0,0 +1,203 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2019 Facebook
+ * Copyright 2020 Google LLC.
+ */
+
+#ifndef _BPF_LOCAL_STORAGE_H
+#define _BPF_LOCAL_STORAGE_H
+
+#include <linux/bpf.h>
+#include <linux/filter.h>
+#include <linux/rculist.h>
+#include <linux/list.h>
+#include <linux/hash.h>
+#include <linux/types.h>
+#include <linux/bpf_mem_alloc.h>
+#include <uapi/linux/btf.h>
+
+#define BPF_LOCAL_STORAGE_CACHE_SIZE 16
+
+struct bpf_local_storage_map_bucket {
+ struct hlist_head list;
+ raw_spinlock_t lock;
+};
+
+/* Thp map is not the primary owner of a bpf_local_storage_elem.
+ * Instead, the container object (eg. sk->sk_bpf_storage) is.
+ *
+ * The map (bpf_local_storage_map) is for two purposes
+ * 1. Define the size of the "local storage". It is
+ * the map's value_size.
+ *
+ * 2. Maintain a list to keep track of all elems such
+ * that they can be cleaned up during the map destruction.
+ *
+ * When a bpf local storage is being looked up for a
+ * particular object, the "bpf_map" pointer is actually used
+ * as the "key" to search in the list of elem in
+ * the respective bpf_local_storage owned by the object.
+ *
+ * e.g. sk->sk_bpf_storage is the mini-map with the "bpf_map" pointer
+ * as the searching key.
+ */
+struct bpf_local_storage_map {
+ struct bpf_map map;
+ /* Lookup elem does not require accessing the map.
+ *
+ * Updating/Deleting requires a bucket lock to
+ * link/unlink the elem from the map. Having
+ * multiple buckets to improve contention.
+ */
+ struct bpf_local_storage_map_bucket *buckets;
+ u32 bucket_log;
+ u16 elem_size;
+ u16 cache_idx;
+ bool use_kmalloc_nolock;
+};
+
+struct bpf_local_storage_data {
+ /* smap is used as the searching key when looking up
+ * from the object's bpf_local_storage.
+ *
+ * Put it in the same cacheline as the data to minimize
+ * the number of cachelines accessed during the cache hit case.
+ */
+ struct bpf_local_storage_map __rcu *smap;
+ u8 data[] __aligned(8);
+};
+
+/* Linked to bpf_local_storage and bpf_local_storage_map */
+struct bpf_local_storage_elem {
+ struct hlist_node map_node; /* Linked to bpf_local_storage_map */
+ struct hlist_node snode; /* Linked to bpf_local_storage */
+ struct bpf_local_storage __rcu *local_storage;
+ union {
+ struct rcu_head rcu;
+ struct hlist_node free_node; /* used to postpone
+ * bpf_selem_free
+ * after raw_spin_unlock
+ */
+ };
+ /* 8 bytes hole */
+ /* The data is stored in another cacheline to minimize
+ * the number of cachelines access during a cache hit.
+ */
+ struct bpf_local_storage_data sdata ____cacheline_aligned;
+};
+
+struct bpf_local_storage {
+ struct bpf_local_storage_data __rcu *cache[BPF_LOCAL_STORAGE_CACHE_SIZE];
+ struct bpf_local_storage_map __rcu *smap;
+ struct hlist_head list; /* List of bpf_local_storage_elem */
+ void *owner; /* The object that owns the above "list" of
+ * bpf_local_storage_elem.
+ */
+ struct rcu_head rcu;
+ raw_spinlock_t lock; /* Protect adding/removing from the "list" */
+ bool use_kmalloc_nolock;
+};
+
+/* U16_MAX is much more than enough for sk local storage
+ * considering a tcp_sock is ~2k.
+ */
+#define BPF_LOCAL_STORAGE_MAX_VALUE_SIZE \
+ min_t(u32, \
+ (KMALLOC_MAX_SIZE - MAX_BPF_STACK - \
+ sizeof(struct bpf_local_storage_elem)), \
+ (U16_MAX - sizeof(struct bpf_local_storage_elem)))
+
+#define SELEM(_SDATA) \
+ container_of((_SDATA), struct bpf_local_storage_elem, sdata)
+#define SDATA(_SELEM) (&(_SELEM)->sdata)
+
+#define BPF_LOCAL_STORAGE_CACHE_SIZE 16
+
+struct bpf_local_storage_cache {
+ spinlock_t idx_lock;
+ u64 idx_usage_counts[BPF_LOCAL_STORAGE_CACHE_SIZE];
+};
+
+#define DEFINE_BPF_STORAGE_CACHE(name) \
+static struct bpf_local_storage_cache name = { \
+ .idx_lock = __SPIN_LOCK_UNLOCKED(name.idx_lock), \
+}
+
+/* Helper functions for bpf_local_storage */
+int bpf_local_storage_map_alloc_check(union bpf_attr *attr);
+
+struct bpf_map *
+bpf_local_storage_map_alloc(union bpf_attr *attr,
+ struct bpf_local_storage_cache *cache,
+ bool use_kmalloc_nolock);
+
+void __bpf_local_storage_insert_cache(struct bpf_local_storage *local_storage,
+ struct bpf_local_storage_map *smap,
+ struct bpf_local_storage_elem *selem);
+/* If cacheit_lockit is false, this lookup function is lockless */
+static inline struct bpf_local_storage_data *
+bpf_local_storage_lookup(struct bpf_local_storage *local_storage,
+ struct bpf_local_storage_map *smap,
+ bool cacheit_lockit)
+{
+ struct bpf_local_storage_data *sdata;
+ struct bpf_local_storage_elem *selem;
+
+ /* Fast path (cache hit) */
+ sdata = rcu_dereference_check(local_storage->cache[smap->cache_idx],
+ bpf_rcu_lock_held());
+ if (sdata && rcu_access_pointer(sdata->smap) == smap)
+ return sdata;
+
+ /* Slow path (cache miss) */
+ hlist_for_each_entry_rcu(selem, &local_storage->list, snode,
+ rcu_read_lock_trace_held())
+ if (rcu_access_pointer(SDATA(selem)->smap) == smap)
+ break;
+
+ if (!selem)
+ return NULL;
+ if (cacheit_lockit)
+ __bpf_local_storage_insert_cache(local_storage, smap, selem);
+ return SDATA(selem);
+}
+
+void bpf_local_storage_destroy(struct bpf_local_storage *local_storage);
+
+void bpf_local_storage_map_free(struct bpf_map *map,
+ struct bpf_local_storage_cache *cache,
+ int __percpu *busy_counter);
+
+int bpf_local_storage_map_check_btf(const struct bpf_map *map,
+ const struct btf *btf,
+ const struct btf_type *key_type,
+ const struct btf_type *value_type);
+
+void bpf_selem_link_storage_nolock(struct bpf_local_storage *local_storage,
+ struct bpf_local_storage_elem *selem);
+
+void bpf_selem_unlink(struct bpf_local_storage_elem *selem, bool reuse_now);
+
+void bpf_selem_link_map(struct bpf_local_storage_map *smap,
+ struct bpf_local_storage_elem *selem);
+
+struct bpf_local_storage_elem *
+bpf_selem_alloc(struct bpf_local_storage_map *smap, void *owner, void *value,
+ bool swap_uptrs, gfp_t gfp_flags);
+
+void bpf_selem_free(struct bpf_local_storage_elem *selem,
+ bool reuse_now);
+
+int
+bpf_local_storage_alloc(void *owner,
+ struct bpf_local_storage_map *smap,
+ struct bpf_local_storage_elem *first_selem,
+ gfp_t gfp_flags);
+
+struct bpf_local_storage_data *
+bpf_local_storage_update(void *owner, struct bpf_local_storage_map *smap,
+ void *value, u64 map_flags, bool swap_uptrs, gfp_t gfp_flags);
+
+u64 bpf_local_storage_map_mem_usage(const struct bpf_map *map);
+
+#endif /* _BPF_LOCAL_STORAGE_H */
diff --git a/include/linux/bpf_lsm.h b/include/linux/bpf_lsm.h
new file mode 100644
index 000000000000..643809cc78c3
--- /dev/null
+++ b/include/linux/bpf_lsm.h
@@ -0,0 +1,109 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+/*
+ * Copyright (C) 2020 Google LLC.
+ */
+
+#ifndef _LINUX_BPF_LSM_H
+#define _LINUX_BPF_LSM_H
+
+#include <linux/sched.h>
+#include <linux/bpf.h>
+#include <linux/bpf_verifier.h>
+#include <linux/lsm_hooks.h>
+
+#ifdef CONFIG_BPF_LSM
+
+#define LSM_HOOK(RET, DEFAULT, NAME, ...) \
+ RET bpf_lsm_##NAME(__VA_ARGS__);
+#include <linux/lsm_hook_defs.h>
+#undef LSM_HOOK
+
+struct bpf_storage_blob {
+ struct bpf_local_storage __rcu *storage;
+};
+
+extern struct lsm_blob_sizes bpf_lsm_blob_sizes;
+
+int bpf_lsm_verify_prog(struct bpf_verifier_log *vlog,
+ const struct bpf_prog *prog);
+
+bool bpf_lsm_is_sleepable_hook(u32 btf_id);
+bool bpf_lsm_is_trusted(const struct bpf_prog *prog);
+
+static inline struct bpf_storage_blob *bpf_inode(
+ const struct inode *inode)
+{
+ if (unlikely(!inode->i_security))
+ return NULL;
+
+ return inode->i_security + bpf_lsm_blob_sizes.lbs_inode;
+}
+
+extern const struct bpf_func_proto bpf_inode_storage_get_proto;
+extern const struct bpf_func_proto bpf_inode_storage_delete_proto;
+void bpf_inode_storage_free(struct inode *inode);
+
+void bpf_lsm_find_cgroup_shim(const struct bpf_prog *prog, bpf_func_t *bpf_func);
+
+int bpf_lsm_get_retval_range(const struct bpf_prog *prog,
+ struct bpf_retval_range *range);
+int bpf_set_dentry_xattr_locked(struct dentry *dentry, const char *name__str,
+ const struct bpf_dynptr *value_p, int flags);
+int bpf_remove_dentry_xattr_locked(struct dentry *dentry, const char *name__str);
+bool bpf_lsm_has_d_inode_locked(const struct bpf_prog *prog);
+
+#else /* !CONFIG_BPF_LSM */
+
+static inline bool bpf_lsm_is_sleepable_hook(u32 btf_id)
+{
+ return false;
+}
+
+static inline bool bpf_lsm_is_trusted(const struct bpf_prog *prog)
+{
+ return false;
+}
+
+static inline int bpf_lsm_verify_prog(struct bpf_verifier_log *vlog,
+ const struct bpf_prog *prog)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline struct bpf_storage_blob *bpf_inode(
+ const struct inode *inode)
+{
+ return NULL;
+}
+
+static inline void bpf_inode_storage_free(struct inode *inode)
+{
+}
+
+static inline void bpf_lsm_find_cgroup_shim(const struct bpf_prog *prog,
+ bpf_func_t *bpf_func)
+{
+}
+
+static inline int bpf_lsm_get_retval_range(const struct bpf_prog *prog,
+ struct bpf_retval_range *range)
+{
+ return -EOPNOTSUPP;
+}
+static inline int bpf_set_dentry_xattr_locked(struct dentry *dentry, const char *name__str,
+ const struct bpf_dynptr *value_p, int flags)
+{
+ return -EOPNOTSUPP;
+}
+static inline int bpf_remove_dentry_xattr_locked(struct dentry *dentry, const char *name__str)
+{
+ return -EOPNOTSUPP;
+}
+static inline bool bpf_lsm_has_d_inode_locked(const struct bpf_prog *prog)
+{
+ return false;
+}
+#endif /* CONFIG_BPF_LSM */
+
+#endif /* _LINUX_BPF_LSM_H */
diff --git a/include/linux/bpf_mem_alloc.h b/include/linux/bpf_mem_alloc.h
new file mode 100644
index 000000000000..e45162ef59bb
--- /dev/null
+++ b/include/linux/bpf_mem_alloc.h
@@ -0,0 +1,51 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */
+#ifndef _BPF_MEM_ALLOC_H
+#define _BPF_MEM_ALLOC_H
+#include <linux/compiler_types.h>
+#include <linux/workqueue.h>
+
+struct bpf_mem_cache;
+struct bpf_mem_caches;
+
+struct bpf_mem_alloc {
+ struct bpf_mem_caches __percpu *caches;
+ struct bpf_mem_cache __percpu *cache;
+ struct obj_cgroup *objcg;
+ bool percpu;
+ struct work_struct work;
+};
+
+/* 'size != 0' is for bpf_mem_alloc which manages fixed-size objects.
+ * Alloc and free are done with bpf_mem_cache_{alloc,free}().
+ *
+ * 'size = 0' is for bpf_mem_alloc which manages many fixed-size objects.
+ * Alloc and free are done with bpf_mem_{alloc,free}() and the size of
+ * the returned object is given by the size argument of bpf_mem_alloc().
+ * If percpu equals true, error will be returned in order to avoid
+ * large memory consumption and the below bpf_mem_alloc_percpu_unit_init()
+ * should be used to do on-demand per-cpu allocation for each size.
+ */
+int bpf_mem_alloc_init(struct bpf_mem_alloc *ma, int size, bool percpu);
+/* Initialize a non-fix-size percpu memory allocator */
+int bpf_mem_alloc_percpu_init(struct bpf_mem_alloc *ma, struct obj_cgroup *objcg);
+/* The percpu allocation with a specific unit size. */
+int bpf_mem_alloc_percpu_unit_init(struct bpf_mem_alloc *ma, int size);
+void bpf_mem_alloc_destroy(struct bpf_mem_alloc *ma);
+
+/* Check the allocation size for kmalloc equivalent allocator */
+int bpf_mem_alloc_check_size(bool percpu, size_t size);
+
+/* kmalloc/kfree equivalent: */
+void *bpf_mem_alloc(struct bpf_mem_alloc *ma, size_t size);
+void bpf_mem_free(struct bpf_mem_alloc *ma, void *ptr);
+void bpf_mem_free_rcu(struct bpf_mem_alloc *ma, void *ptr);
+
+/* kmem_cache_alloc/free equivalent: */
+void *bpf_mem_cache_alloc(struct bpf_mem_alloc *ma);
+void bpf_mem_cache_free(struct bpf_mem_alloc *ma, void *ptr);
+void bpf_mem_cache_free_rcu(struct bpf_mem_alloc *ma, void *ptr);
+void bpf_mem_cache_raw_free(void *ptr);
+void *bpf_mem_cache_alloc_flags(struct bpf_mem_alloc *ma, gfp_t flags);
+
+#endif /* _BPF_MEM_ALLOC_H */
diff --git a/include/linux/bpf_mprog.h b/include/linux/bpf_mprog.h
new file mode 100644
index 000000000000..929225f7b095
--- /dev/null
+++ b/include/linux/bpf_mprog.h
@@ -0,0 +1,343 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2023 Isovalent */
+#ifndef __BPF_MPROG_H
+#define __BPF_MPROG_H
+
+#include <linux/bpf.h>
+
+/* bpf_mprog framework:
+ *
+ * bpf_mprog is a generic layer for multi-program attachment. In-kernel users
+ * of the bpf_mprog don't need to care about the dependency resolution
+ * internals, they can just consume it with few API calls. Currently available
+ * dependency directives are BPF_F_{BEFORE,AFTER} which enable insertion of
+ * a BPF program or BPF link relative to an existing BPF program or BPF link
+ * inside the multi-program array as well as prepend and append behavior if
+ * no relative object was specified, see corresponding selftests for concrete
+ * examples (e.g. tc_links and tc_opts test cases of test_progs).
+ *
+ * Usage of bpf_mprog_{attach,detach,query}() core APIs with pseudo code:
+ *
+ * Attach case:
+ *
+ * struct bpf_mprog_entry *entry, *entry_new;
+ * int ret;
+ *
+ * // bpf_mprog user-side lock
+ * // fetch active @entry from attach location
+ * [...]
+ * ret = bpf_mprog_attach(entry, &entry_new, [...]);
+ * if (!ret) {
+ * if (entry != entry_new) {
+ * // swap @entry to @entry_new at attach location
+ * // ensure there are no inflight users of @entry:
+ * synchronize_rcu();
+ * }
+ * bpf_mprog_commit(entry);
+ * } else {
+ * // error path, bail out, propagate @ret
+ * }
+ * // bpf_mprog user-side unlock
+ *
+ * Detach case:
+ *
+ * struct bpf_mprog_entry *entry, *entry_new;
+ * int ret;
+ *
+ * // bpf_mprog user-side lock
+ * // fetch active @entry from attach location
+ * [...]
+ * ret = bpf_mprog_detach(entry, &entry_new, [...]);
+ * if (!ret) {
+ * // all (*) marked is optional and depends on the use-case
+ * // whether bpf_mprog_bundle should be freed or not
+ * if (!bpf_mprog_total(entry_new)) (*)
+ * entry_new = NULL (*)
+ * // swap @entry to @entry_new at attach location
+ * // ensure there are no inflight users of @entry:
+ * synchronize_rcu();
+ * bpf_mprog_commit(entry);
+ * if (!entry_new) (*)
+ * // free bpf_mprog_bundle (*)
+ * } else {
+ * // error path, bail out, propagate @ret
+ * }
+ * // bpf_mprog user-side unlock
+ *
+ * Query case:
+ *
+ * struct bpf_mprog_entry *entry;
+ * int ret;
+ *
+ * // bpf_mprog user-side lock
+ * // fetch active @entry from attach location
+ * [...]
+ * ret = bpf_mprog_query(attr, uattr, entry);
+ * // bpf_mprog user-side unlock
+ *
+ * Data/fast path:
+ *
+ * struct bpf_mprog_entry *entry;
+ * struct bpf_mprog_fp *fp;
+ * struct bpf_prog *prog;
+ * int ret = [...];
+ *
+ * rcu_read_lock();
+ * // fetch active @entry from attach location
+ * [...]
+ * bpf_mprog_foreach_prog(entry, fp, prog) {
+ * ret = bpf_prog_run(prog, [...]);
+ * // process @ret from program
+ * }
+ * [...]
+ * rcu_read_unlock();
+ *
+ * bpf_mprog locking considerations:
+ *
+ * bpf_mprog_{attach,detach,query}() must be protected by an external lock
+ * (like RTNL in case of tcx).
+ *
+ * bpf_mprog_entry pointer can be an __rcu annotated pointer (in case of tcx
+ * the netdevice has tcx_ingress and tcx_egress __rcu pointer) which gets
+ * updated via rcu_assign_pointer() pointing to the active bpf_mprog_entry of
+ * the bpf_mprog_bundle.
+ *
+ * Fast path accesses the active bpf_mprog_entry within RCU critical section
+ * (in case of tcx it runs in NAPI which provides RCU protection there,
+ * other users might need explicit rcu_read_lock()). The bpf_mprog_commit()
+ * assumes that for the old bpf_mprog_entry there are no inflight users
+ * anymore.
+ *
+ * The READ_ONCE()/WRITE_ONCE() pairing for bpf_mprog_fp's prog access is for
+ * the replacement case where we don't swap the bpf_mprog_entry.
+ */
+
+#define bpf_mprog_foreach_tuple(entry, fp, cp, t) \
+ for (fp = &entry->fp_items[0], cp = &entry->parent->cp_items[0];\
+ ({ \
+ t.prog = READ_ONCE(fp->prog); \
+ t.link = cp->link; \
+ t.prog; \
+ }); \
+ fp++, cp++)
+
+#define bpf_mprog_foreach_prog(entry, fp, p) \
+ for (fp = &entry->fp_items[0]; \
+ (p = READ_ONCE(fp->prog)); \
+ fp++)
+
+#define BPF_MPROG_MAX 64
+
+struct bpf_mprog_fp {
+ struct bpf_prog *prog;
+};
+
+struct bpf_mprog_cp {
+ struct bpf_link *link;
+};
+
+struct bpf_mprog_entry {
+ struct bpf_mprog_fp fp_items[BPF_MPROG_MAX];
+ struct bpf_mprog_bundle *parent;
+};
+
+struct bpf_mprog_bundle {
+ struct bpf_mprog_entry a;
+ struct bpf_mprog_entry b;
+ struct bpf_mprog_cp cp_items[BPF_MPROG_MAX];
+ struct bpf_prog *ref;
+ atomic64_t revision;
+ u32 count;
+};
+
+struct bpf_tuple {
+ struct bpf_prog *prog;
+ struct bpf_link *link;
+};
+
+static inline struct bpf_mprog_entry *
+bpf_mprog_peer(const struct bpf_mprog_entry *entry)
+{
+ if (entry == &entry->parent->a)
+ return &entry->parent->b;
+ else
+ return &entry->parent->a;
+}
+
+static inline void bpf_mprog_bundle_init(struct bpf_mprog_bundle *bundle)
+{
+ BUILD_BUG_ON(sizeof(bundle->a.fp_items[0]) > sizeof(u64));
+ BUILD_BUG_ON(ARRAY_SIZE(bundle->a.fp_items) !=
+ ARRAY_SIZE(bundle->cp_items));
+
+ memset(bundle, 0, sizeof(*bundle));
+ atomic64_set(&bundle->revision, 1);
+ bundle->a.parent = bundle;
+ bundle->b.parent = bundle;
+}
+
+static inline void bpf_mprog_inc(struct bpf_mprog_entry *entry)
+{
+ entry->parent->count++;
+}
+
+static inline void bpf_mprog_dec(struct bpf_mprog_entry *entry)
+{
+ entry->parent->count--;
+}
+
+static inline int bpf_mprog_max(void)
+{
+ return ARRAY_SIZE(((struct bpf_mprog_entry *)NULL)->fp_items) - 1;
+}
+
+static inline int bpf_mprog_total(struct bpf_mprog_entry *entry)
+{
+ int total = entry->parent->count;
+
+ WARN_ON_ONCE(total > bpf_mprog_max());
+ return total;
+}
+
+static inline bool bpf_mprog_exists(struct bpf_mprog_entry *entry,
+ struct bpf_prog *prog)
+{
+ const struct bpf_mprog_fp *fp;
+ const struct bpf_prog *tmp;
+
+ bpf_mprog_foreach_prog(entry, fp, tmp) {
+ if (tmp == prog)
+ return true;
+ }
+ return false;
+}
+
+static inline void bpf_mprog_mark_for_release(struct bpf_mprog_entry *entry,
+ struct bpf_tuple *tuple)
+{
+ WARN_ON_ONCE(entry->parent->ref);
+ if (!tuple->link)
+ entry->parent->ref = tuple->prog;
+}
+
+static inline void bpf_mprog_complete_release(struct bpf_mprog_entry *entry)
+{
+ /* In the non-link case prog deletions can only drop the reference
+ * to the prog after the bpf_mprog_entry got swapped and the
+ * bpf_mprog ensured that there are no inflight users anymore.
+ *
+ * Paired with bpf_mprog_mark_for_release().
+ */
+ if (entry->parent->ref) {
+ bpf_prog_put(entry->parent->ref);
+ entry->parent->ref = NULL;
+ }
+}
+
+static inline void bpf_mprog_revision_new(struct bpf_mprog_entry *entry)
+{
+ atomic64_inc(&entry->parent->revision);
+}
+
+static inline void bpf_mprog_commit(struct bpf_mprog_entry *entry)
+{
+ bpf_mprog_complete_release(entry);
+ bpf_mprog_revision_new(entry);
+}
+
+static inline u64 bpf_mprog_revision(struct bpf_mprog_entry *entry)
+{
+ return atomic64_read(&entry->parent->revision);
+}
+
+static inline void bpf_mprog_entry_copy(struct bpf_mprog_entry *dst,
+ struct bpf_mprog_entry *src)
+{
+ memcpy(dst->fp_items, src->fp_items, sizeof(src->fp_items));
+}
+
+static inline void bpf_mprog_entry_clear(struct bpf_mprog_entry *dst)
+{
+ memset(dst->fp_items, 0, sizeof(dst->fp_items));
+}
+
+static inline void bpf_mprog_clear_all(struct bpf_mprog_entry *entry,
+ struct bpf_mprog_entry **entry_new)
+{
+ struct bpf_mprog_entry *peer;
+
+ peer = bpf_mprog_peer(entry);
+ bpf_mprog_entry_clear(peer);
+ peer->parent->count = 0;
+ *entry_new = peer;
+}
+
+static inline void bpf_mprog_entry_grow(struct bpf_mprog_entry *entry, int idx)
+{
+ int total = bpf_mprog_total(entry);
+
+ memmove(entry->fp_items + idx + 1,
+ entry->fp_items + idx,
+ (total - idx) * sizeof(struct bpf_mprog_fp));
+
+ memmove(entry->parent->cp_items + idx + 1,
+ entry->parent->cp_items + idx,
+ (total - idx) * sizeof(struct bpf_mprog_cp));
+}
+
+static inline void bpf_mprog_entry_shrink(struct bpf_mprog_entry *entry, int idx)
+{
+ /* Total array size is needed in this case to enure the NULL
+ * entry is copied at the end.
+ */
+ int total = ARRAY_SIZE(entry->fp_items);
+
+ memmove(entry->fp_items + idx,
+ entry->fp_items + idx + 1,
+ (total - idx - 1) * sizeof(struct bpf_mprog_fp));
+
+ memmove(entry->parent->cp_items + idx,
+ entry->parent->cp_items + idx + 1,
+ (total - idx - 1) * sizeof(struct bpf_mprog_cp));
+}
+
+static inline void bpf_mprog_read(struct bpf_mprog_entry *entry, u32 idx,
+ struct bpf_mprog_fp **fp,
+ struct bpf_mprog_cp **cp)
+{
+ *fp = &entry->fp_items[idx];
+ *cp = &entry->parent->cp_items[idx];
+}
+
+static inline void bpf_mprog_write(struct bpf_mprog_fp *fp,
+ struct bpf_mprog_cp *cp,
+ struct bpf_tuple *tuple)
+{
+ WRITE_ONCE(fp->prog, tuple->prog);
+ cp->link = tuple->link;
+}
+
+int bpf_mprog_attach(struct bpf_mprog_entry *entry,
+ struct bpf_mprog_entry **entry_new,
+ struct bpf_prog *prog_new, struct bpf_link *link,
+ struct bpf_prog *prog_old,
+ u32 flags, u32 id_or_fd, u64 revision);
+
+int bpf_mprog_detach(struct bpf_mprog_entry *entry,
+ struct bpf_mprog_entry **entry_new,
+ struct bpf_prog *prog, struct bpf_link *link,
+ u32 flags, u32 id_or_fd, u64 revision);
+
+int bpf_mprog_query(const union bpf_attr *attr, union bpf_attr __user *uattr,
+ struct bpf_mprog_entry *entry);
+
+static inline bool bpf_mprog_supported(enum bpf_prog_type type)
+{
+ switch (type) {
+ case BPF_PROG_TYPE_SCHED_CLS:
+ return true;
+ default:
+ return false;
+ }
+}
+#endif /* __BPF_MPROG_H */
diff --git a/include/linux/bpf_trace.h b/include/linux/bpf_trace.h
index b22efbdd2eb4..ddf896abcfb6 100644
--- a/include/linux/bpf_trace.h
+++ b/include/linux/bpf_trace.h
@@ -1,7 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __LINUX_BPF_TRACE_H__
#define __LINUX_BPF_TRACE_H__
-#include <trace/events/bpf.h>
#include <trace/events/xdp.h>
#endif /* __LINUX_BPF_TRACE_H__ */
diff --git a/include/linux/bpf_types.h b/include/linux/bpf_types.h
index 3d137c33d664..b13de31e163f 100644
--- a/include/linux/bpf_types.h
+++ b/include/linux/bpf_types.h
@@ -1,21 +1,87 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/* internal file - do not include directly */
#ifdef CONFIG_NET
-BPF_PROG_TYPE(BPF_PROG_TYPE_SOCKET_FILTER, sk_filter_prog_ops)
-BPF_PROG_TYPE(BPF_PROG_TYPE_SCHED_CLS, tc_cls_act_prog_ops)
-BPF_PROG_TYPE(BPF_PROG_TYPE_SCHED_ACT, tc_cls_act_prog_ops)
-BPF_PROG_TYPE(BPF_PROG_TYPE_XDP, xdp_prog_ops)
-BPF_PROG_TYPE(BPF_PROG_TYPE_CGROUP_SKB, cg_skb_prog_ops)
-BPF_PROG_TYPE(BPF_PROG_TYPE_CGROUP_SOCK, cg_sock_prog_ops)
-BPF_PROG_TYPE(BPF_PROG_TYPE_LWT_IN, lwt_inout_prog_ops)
-BPF_PROG_TYPE(BPF_PROG_TYPE_LWT_OUT, lwt_inout_prog_ops)
-BPF_PROG_TYPE(BPF_PROG_TYPE_LWT_XMIT, lwt_xmit_prog_ops)
-BPF_PROG_TYPE(BPF_PROG_TYPE_SOCK_OPS, sock_ops_prog_ops)
+BPF_PROG_TYPE(BPF_PROG_TYPE_SOCKET_FILTER, sk_filter,
+ struct __sk_buff, struct sk_buff)
+BPF_PROG_TYPE(BPF_PROG_TYPE_SCHED_CLS, tc_cls_act,
+ struct __sk_buff, struct sk_buff)
+BPF_PROG_TYPE(BPF_PROG_TYPE_SCHED_ACT, tc_cls_act,
+ struct __sk_buff, struct sk_buff)
+BPF_PROG_TYPE(BPF_PROG_TYPE_XDP, xdp,
+ struct xdp_md, struct xdp_buff)
+#ifdef CONFIG_CGROUP_BPF
+BPF_PROG_TYPE(BPF_PROG_TYPE_CGROUP_SKB, cg_skb,
+ struct __sk_buff, struct sk_buff)
+BPF_PROG_TYPE(BPF_PROG_TYPE_CGROUP_SOCK, cg_sock,
+ struct bpf_sock, struct sock)
+BPF_PROG_TYPE(BPF_PROG_TYPE_CGROUP_SOCK_ADDR, cg_sock_addr,
+ struct bpf_sock_addr, struct bpf_sock_addr_kern)
+#endif
+BPF_PROG_TYPE(BPF_PROG_TYPE_LWT_IN, lwt_in,
+ struct __sk_buff, struct sk_buff)
+BPF_PROG_TYPE(BPF_PROG_TYPE_LWT_OUT, lwt_out,
+ struct __sk_buff, struct sk_buff)
+BPF_PROG_TYPE(BPF_PROG_TYPE_LWT_XMIT, lwt_xmit,
+ struct __sk_buff, struct sk_buff)
+BPF_PROG_TYPE(BPF_PROG_TYPE_LWT_SEG6LOCAL, lwt_seg6local,
+ struct __sk_buff, struct sk_buff)
+BPF_PROG_TYPE(BPF_PROG_TYPE_SOCK_OPS, sock_ops,
+ struct bpf_sock_ops, struct bpf_sock_ops_kern)
+BPF_PROG_TYPE(BPF_PROG_TYPE_SK_SKB, sk_skb,
+ struct __sk_buff, struct sk_buff)
+BPF_PROG_TYPE(BPF_PROG_TYPE_SK_MSG, sk_msg,
+ struct sk_msg_md, struct sk_msg)
+BPF_PROG_TYPE(BPF_PROG_TYPE_FLOW_DISSECTOR, flow_dissector,
+ struct __sk_buff, struct bpf_flow_dissector)
#endif
#ifdef CONFIG_BPF_EVENTS
-BPF_PROG_TYPE(BPF_PROG_TYPE_KPROBE, kprobe_prog_ops)
-BPF_PROG_TYPE(BPF_PROG_TYPE_TRACEPOINT, tracepoint_prog_ops)
-BPF_PROG_TYPE(BPF_PROG_TYPE_PERF_EVENT, perf_event_prog_ops)
+BPF_PROG_TYPE(BPF_PROG_TYPE_KPROBE, kprobe,
+ bpf_user_pt_regs_t, struct pt_regs)
+BPF_PROG_TYPE(BPF_PROG_TYPE_TRACEPOINT, tracepoint,
+ __u64, u64)
+BPF_PROG_TYPE(BPF_PROG_TYPE_PERF_EVENT, perf_event,
+ struct bpf_perf_event_data, struct bpf_perf_event_data_kern)
+BPF_PROG_TYPE(BPF_PROG_TYPE_RAW_TRACEPOINT, raw_tracepoint,
+ struct bpf_raw_tracepoint_args, u64)
+BPF_PROG_TYPE(BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE, raw_tracepoint_writable,
+ struct bpf_raw_tracepoint_args, u64)
+BPF_PROG_TYPE(BPF_PROG_TYPE_TRACING, tracing,
+ void *, void *)
+#endif
+#ifdef CONFIG_CGROUP_BPF
+BPF_PROG_TYPE(BPF_PROG_TYPE_CGROUP_DEVICE, cg_dev,
+ struct bpf_cgroup_dev_ctx, struct bpf_cgroup_dev_ctx)
+BPF_PROG_TYPE(BPF_PROG_TYPE_CGROUP_SYSCTL, cg_sysctl,
+ struct bpf_sysctl, struct bpf_sysctl_kern)
+BPF_PROG_TYPE(BPF_PROG_TYPE_CGROUP_SOCKOPT, cg_sockopt,
+ struct bpf_sockopt, struct bpf_sockopt_kern)
+#endif
+#ifdef CONFIG_BPF_LIRC_MODE2
+BPF_PROG_TYPE(BPF_PROG_TYPE_LIRC_MODE2, lirc_mode2,
+ __u32, u32)
+#endif
+#ifdef CONFIG_INET
+BPF_PROG_TYPE(BPF_PROG_TYPE_SK_REUSEPORT, sk_reuseport,
+ struct sk_reuseport_md, struct sk_reuseport_kern)
+BPF_PROG_TYPE(BPF_PROG_TYPE_SK_LOOKUP, sk_lookup,
+ struct bpf_sk_lookup, struct bpf_sk_lookup_kern)
+#endif
+#if defined(CONFIG_BPF_JIT)
+BPF_PROG_TYPE(BPF_PROG_TYPE_STRUCT_OPS, bpf_struct_ops,
+ void *, void *)
+BPF_PROG_TYPE(BPF_PROG_TYPE_EXT, bpf_extension,
+ void *, void *)
+#ifdef CONFIG_BPF_LSM
+BPF_PROG_TYPE(BPF_PROG_TYPE_LSM, lsm,
+ void *, void *)
+#endif /* CONFIG_BPF_LSM */
+#endif
+BPF_PROG_TYPE(BPF_PROG_TYPE_SYSCALL, bpf_syscall,
+ void *, void *)
+#ifdef CONFIG_NETFILTER_BPF_LINK
+BPF_PROG_TYPE(BPF_PROG_TYPE_NETFILTER, netfilter,
+ struct bpf_nf_ctx, struct bpf_nf_ctx)
#endif
BPF_MAP_TYPE(BPF_MAP_TYPE_ARRAY, array_map_ops)
@@ -24,6 +90,11 @@ BPF_MAP_TYPE(BPF_MAP_TYPE_PROG_ARRAY, prog_array_map_ops)
BPF_MAP_TYPE(BPF_MAP_TYPE_PERF_EVENT_ARRAY, perf_event_array_map_ops)
#ifdef CONFIG_CGROUPS
BPF_MAP_TYPE(BPF_MAP_TYPE_CGROUP_ARRAY, cgroup_array_map_ops)
+BPF_MAP_TYPE(BPF_MAP_TYPE_CGRP_STORAGE, cgrp_storage_map_ops)
+#endif
+#ifdef CONFIG_CGROUP_BPF
+BPF_MAP_TYPE(BPF_MAP_TYPE_CGROUP_STORAGE, cgroup_storage_map_ops)
+BPF_MAP_TYPE(BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE, cgroup_storage_map_ops)
#endif
BPF_MAP_TYPE(BPF_MAP_TYPE_HASH, htab_map_ops)
BPF_MAP_TYPE(BPF_MAP_TYPE_PERCPU_HASH, htab_percpu_map_ops)
@@ -31,7 +102,56 @@ BPF_MAP_TYPE(BPF_MAP_TYPE_LRU_HASH, htab_lru_map_ops)
BPF_MAP_TYPE(BPF_MAP_TYPE_LRU_PERCPU_HASH, htab_lru_percpu_map_ops)
BPF_MAP_TYPE(BPF_MAP_TYPE_LPM_TRIE, trie_map_ops)
#ifdef CONFIG_PERF_EVENTS
-BPF_MAP_TYPE(BPF_MAP_TYPE_STACK_TRACE, stack_map_ops)
+BPF_MAP_TYPE(BPF_MAP_TYPE_STACK_TRACE, stack_trace_map_ops)
#endif
BPF_MAP_TYPE(BPF_MAP_TYPE_ARRAY_OF_MAPS, array_of_maps_map_ops)
BPF_MAP_TYPE(BPF_MAP_TYPE_HASH_OF_MAPS, htab_of_maps_map_ops)
+#ifdef CONFIG_BPF_LSM
+BPF_MAP_TYPE(BPF_MAP_TYPE_INODE_STORAGE, inode_storage_map_ops)
+#endif
+BPF_MAP_TYPE(BPF_MAP_TYPE_TASK_STORAGE, task_storage_map_ops)
+#ifdef CONFIG_NET
+BPF_MAP_TYPE(BPF_MAP_TYPE_DEVMAP, dev_map_ops)
+BPF_MAP_TYPE(BPF_MAP_TYPE_DEVMAP_HASH, dev_map_hash_ops)
+BPF_MAP_TYPE(BPF_MAP_TYPE_SK_STORAGE, sk_storage_map_ops)
+BPF_MAP_TYPE(BPF_MAP_TYPE_CPUMAP, cpu_map_ops)
+#if defined(CONFIG_XDP_SOCKETS)
+BPF_MAP_TYPE(BPF_MAP_TYPE_XSKMAP, xsk_map_ops)
+#endif
+#ifdef CONFIG_INET
+BPF_MAP_TYPE(BPF_MAP_TYPE_SOCKMAP, sock_map_ops)
+BPF_MAP_TYPE(BPF_MAP_TYPE_SOCKHASH, sock_hash_ops)
+BPF_MAP_TYPE(BPF_MAP_TYPE_REUSEPORT_SOCKARRAY, reuseport_array_ops)
+#endif
+#endif
+BPF_MAP_TYPE(BPF_MAP_TYPE_QUEUE, queue_map_ops)
+BPF_MAP_TYPE(BPF_MAP_TYPE_STACK, stack_map_ops)
+#if defined(CONFIG_BPF_JIT)
+BPF_MAP_TYPE(BPF_MAP_TYPE_STRUCT_OPS, bpf_struct_ops_map_ops)
+#endif
+BPF_MAP_TYPE(BPF_MAP_TYPE_RINGBUF, ringbuf_map_ops)
+BPF_MAP_TYPE(BPF_MAP_TYPE_BLOOM_FILTER, bloom_filter_map_ops)
+BPF_MAP_TYPE(BPF_MAP_TYPE_USER_RINGBUF, user_ringbuf_map_ops)
+BPF_MAP_TYPE(BPF_MAP_TYPE_ARENA, arena_map_ops)
+BPF_MAP_TYPE(BPF_MAP_TYPE_INSN_ARRAY, insn_array_map_ops)
+
+BPF_LINK_TYPE(BPF_LINK_TYPE_RAW_TRACEPOINT, raw_tracepoint)
+BPF_LINK_TYPE(BPF_LINK_TYPE_TRACING, tracing)
+#ifdef CONFIG_CGROUP_BPF
+BPF_LINK_TYPE(BPF_LINK_TYPE_CGROUP, cgroup)
+#endif
+BPF_LINK_TYPE(BPF_LINK_TYPE_ITER, iter)
+#ifdef CONFIG_NET
+BPF_LINK_TYPE(BPF_LINK_TYPE_NETNS, netns)
+BPF_LINK_TYPE(BPF_LINK_TYPE_XDP, xdp)
+BPF_LINK_TYPE(BPF_LINK_TYPE_NETFILTER, netfilter)
+BPF_LINK_TYPE(BPF_LINK_TYPE_TCX, tcx)
+BPF_LINK_TYPE(BPF_LINK_TYPE_NETKIT, netkit)
+BPF_LINK_TYPE(BPF_LINK_TYPE_SOCKMAP, sockmap)
+#endif
+#ifdef CONFIG_PERF_EVENTS
+BPF_LINK_TYPE(BPF_LINK_TYPE_PERF_EVENT, perf)
+#endif
+BPF_LINK_TYPE(BPF_LINK_TYPE_KPROBE_MULTI, kprobe_multi)
+BPF_LINK_TYPE(BPF_LINK_TYPE_STRUCT_OPS, struct_ops)
+BPF_LINK_TYPE(BPF_LINK_TYPE_UPROBE_MULTI, uprobe_multi)
diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h
index 8e5d31f6faef..130bcbd66f60 100644
--- a/include/linux/bpf_verifier.h
+++ b/include/linux/bpf_verifier.h
@@ -1,113 +1,1090 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of version 2 of the GNU General Public
- * License as published by the Free Software Foundation.
*/
#ifndef _LINUX_BPF_VERIFIER_H
#define _LINUX_BPF_VERIFIER_H 1
#include <linux/bpf.h> /* for enum bpf_reg_type */
+#include <linux/btf.h> /* for struct btf and btf_id() */
#include <linux/filter.h> /* for MAX_BPF_STACK */
+#include <linux/tnum.h>
- /* Just some arbitrary values so we can safely do math without overflowing and
- * are obviously wrong for any sort of memory access.
- */
-#define BPF_REGISTER_MAX_RANGE (1024 * 1024 * 1024)
-#define BPF_REGISTER_MIN_RANGE -1
+/* Maximum variable offset umax_value permitted when resolving memory accesses.
+ * In practice this is far bigger than any realistic pointer offset; this limit
+ * ensures that umax_value + (int)off + (int)size cannot overflow a u64.
+ */
+#define BPF_MAX_VAR_OFF (1 << 29)
+/* Maximum variable size permitted for ARG_CONST_SIZE[_OR_ZERO]. This ensures
+ * that converting umax_value to int cannot overflow.
+ */
+#define BPF_MAX_VAR_SIZ (1 << 29)
+/* size of tmp_str_buf in bpf_verifier.
+ * we need at least 306 bytes to fit full stack mask representation
+ * (in the "-8,-16,...,-512" form)
+ */
+#define TMP_STR_BUF_LEN 320
+/* Patch buffer size */
+#define INSN_BUF_SIZE 32
+
+#define ITER_PREFIX "bpf_iter_"
+
+enum bpf_iter_state {
+ BPF_ITER_STATE_INVALID, /* for non-first slot */
+ BPF_ITER_STATE_ACTIVE,
+ BPF_ITER_STATE_DRAINED,
+};
struct bpf_reg_state {
+ /* Ordering of fields matters. See states_equal() */
enum bpf_reg_type type;
+ /*
+ * Fixed part of pointer offset, pointer types only.
+ * Or constant delta between "linked" scalars with the same ID.
+ */
+ s32 off;
union {
- /* valid when type == CONST_IMM | PTR_TO_STACK | UNKNOWN_VALUE */
- s64 imm;
-
- /* valid when type == PTR_TO_PACKET* */
- struct {
- u16 off;
- u16 range;
- };
+ /* valid when type == PTR_TO_PACKET */
+ int range;
/* valid when type == CONST_PTR_TO_MAP | PTR_TO_MAP_VALUE |
* PTR_TO_MAP_VALUE_OR_NULL
*/
- struct bpf_map *map_ptr;
+ struct {
+ struct bpf_map *map_ptr;
+ /* To distinguish map lookups from outer map
+ * the map_uid is non-zero for registers
+ * pointing to inner maps.
+ */
+ u32 map_uid;
+ };
+
+ /* for PTR_TO_BTF_ID */
+ struct {
+ struct btf *btf;
+ u32 btf_id;
+ };
+
+ struct { /* for PTR_TO_MEM | PTR_TO_MEM_OR_NULL */
+ u32 mem_size;
+ u32 dynptr_id; /* for dynptr slices */
+ };
+
+ /* For dynptr stack slots */
+ struct {
+ enum bpf_dynptr_type type;
+ /* A dynptr is 16 bytes so it takes up 2 stack slots.
+ * We need to track which slot is the first slot
+ * to protect against cases where the user may try to
+ * pass in an address starting at the second slot of the
+ * dynptr.
+ */
+ bool first_slot;
+ } dynptr;
+
+ /* For bpf_iter stack slots */
+ struct {
+ /* BTF container and BTF type ID describing
+ * struct bpf_iter_<type> of an iterator state
+ */
+ struct btf *btf;
+ u32 btf_id;
+ /* packing following two fields to fit iter state into 16 bytes */
+ enum bpf_iter_state state:2;
+ int depth:30;
+ } iter;
+
+ /* For irq stack slots */
+ struct {
+ enum {
+ IRQ_NATIVE_KFUNC,
+ IRQ_LOCK_KFUNC,
+ } kfunc_class;
+ } irq;
+
+ /* Max size from any of the above. */
+ struct {
+ unsigned long raw1;
+ unsigned long raw2;
+ } raw;
+
+ u32 subprogno; /* for PTR_TO_FUNC */
};
- u32 id;
+ /* For scalar types (SCALAR_VALUE), this represents our knowledge of
+ * the actual value.
+ * For pointer types, this represents the variable part of the offset
+ * from the pointed-to object, and is shared with all bpf_reg_states
+ * with the same id as us.
+ */
+ struct tnum var_off;
/* Used to determine if any memory access using this register will
- * result in a bad access. These two fields must be last.
- * See states_equal()
+ * result in a bad access.
+ * These refer to the same value as var_off, not necessarily the actual
+ * contents of the register.
+ */
+ s64 smin_value; /* minimum possible (s64)value */
+ s64 smax_value; /* maximum possible (s64)value */
+ u64 umin_value; /* minimum possible (u64)value */
+ u64 umax_value; /* maximum possible (u64)value */
+ s32 s32_min_value; /* minimum possible (s32)value */
+ s32 s32_max_value; /* maximum possible (s32)value */
+ u32 u32_min_value; /* minimum possible (u32)value */
+ u32 u32_max_value; /* maximum possible (u32)value */
+ /* For PTR_TO_PACKET, used to find other pointers with the same variable
+ * offset, so they can share range knowledge.
+ * For PTR_TO_MAP_VALUE_OR_NULL this is used to share which map value we
+ * came from, when one is tested for != NULL.
+ * For PTR_TO_MEM_OR_NULL this is used to identify memory allocation
+ * for the purpose of tracking that it's freed.
+ * For PTR_TO_SOCKET this is used to share which pointers retain the
+ * same reference to the socket, to determine proper reference freeing.
+ * For stack slots that are dynptrs, this is used to track references to
+ * the dynptr to determine proper reference freeing.
+ * Similarly to dynptrs, we use ID to track "belonging" of a reference
+ * to a specific instance of bpf_iter.
*/
- s64 min_value;
- u64 max_value;
- u32 min_align;
- u32 aux_off;
- u32 aux_off_align;
- bool value_from_signed;
+ /*
+ * Upper bit of ID is used to remember relationship between "linked"
+ * registers. Example:
+ * r1 = r2; both will have r1->id == r2->id == N
+ * r1 += 10; r1->id == N | BPF_ADD_CONST and r1->off == 10
+ */
+#define BPF_ADD_CONST (1U << 31)
+ u32 id;
+ /* PTR_TO_SOCKET and PTR_TO_TCP_SOCK could be a ptr returned
+ * from a pointer-cast helper, bpf_sk_fullsock() and
+ * bpf_tcp_sock().
+ *
+ * Consider the following where "sk" is a reference counted
+ * pointer returned from "sk = bpf_sk_lookup_tcp();":
+ *
+ * 1: sk = bpf_sk_lookup_tcp();
+ * 2: if (!sk) { return 0; }
+ * 3: fullsock = bpf_sk_fullsock(sk);
+ * 4: if (!fullsock) { bpf_sk_release(sk); return 0; }
+ * 5: tp = bpf_tcp_sock(fullsock);
+ * 6: if (!tp) { bpf_sk_release(sk); return 0; }
+ * 7: bpf_sk_release(sk);
+ * 8: snd_cwnd = tp->snd_cwnd; // verifier will complain
+ *
+ * After bpf_sk_release(sk) at line 7, both "fullsock" ptr and
+ * "tp" ptr should be invalidated also. In order to do that,
+ * the reg holding "fullsock" and "sk" need to remember
+ * the original refcounted ptr id (i.e. sk_reg->id) in ref_obj_id
+ * such that the verifier can reset all regs which have
+ * ref_obj_id matching the sk_reg->id.
+ *
+ * sk_reg->ref_obj_id is set to sk_reg->id at line 1.
+ * sk_reg->id will stay as NULL-marking purpose only.
+ * After NULL-marking is done, sk_reg->id can be reset to 0.
+ *
+ * After "fullsock = bpf_sk_fullsock(sk);" at line 3,
+ * fullsock_reg->ref_obj_id is set to sk_reg->ref_obj_id.
+ *
+ * After "tp = bpf_tcp_sock(fullsock);" at line 5,
+ * tp_reg->ref_obj_id is set to fullsock_reg->ref_obj_id
+ * which is the same as sk_reg->ref_obj_id.
+ *
+ * From the verifier perspective, if sk, fullsock and tp
+ * are not NULL, they are the same ptr with different
+ * reg->type. In particular, bpf_sk_release(tp) is also
+ * allowed and has the same effect as bpf_sk_release(sk).
+ */
+ u32 ref_obj_id;
+ /* Inside the callee two registers can be both PTR_TO_STACK like
+ * R1=fp-8 and R2=fp-8, but one of them points to this function stack
+ * while another to the caller's stack. To differentiate them 'frameno'
+ * is used which is an index in bpf_verifier_state->frame[] array
+ * pointing to bpf_func_state.
+ */
+ u32 frameno;
+ /* Tracks subreg definition. The stored value is the insn_idx of the
+ * writing insn. This is safe because subreg_def is used before any insn
+ * patching which only happens after main verification finished.
+ */
+ s32 subreg_def;
+ /* if (!precise && SCALAR_VALUE) min/max/tnum don't affect safety */
+ bool precise;
};
enum bpf_stack_slot_type {
STACK_INVALID, /* nothing was stored in this stack slot */
STACK_SPILL, /* register spilled into stack */
- STACK_MISC /* BPF program wrote some data into this slot */
+ STACK_MISC, /* BPF program wrote some data into this slot */
+ STACK_ZERO, /* BPF program wrote constant zero */
+ /* A dynptr is stored in this stack slot. The type of dynptr
+ * is stored in bpf_stack_state->spilled_ptr.dynptr.type
+ */
+ STACK_DYNPTR,
+ STACK_ITER,
+ STACK_IRQ_FLAG,
};
#define BPF_REG_SIZE 8 /* size of eBPF register in bytes */
+#define BPF_REGMASK_ARGS ((1 << BPF_REG_1) | (1 << BPF_REG_2) | \
+ (1 << BPF_REG_3) | (1 << BPF_REG_4) | \
+ (1 << BPF_REG_5))
+
+#define BPF_DYNPTR_SIZE sizeof(struct bpf_dynptr_kern)
+#define BPF_DYNPTR_NR_SLOTS (BPF_DYNPTR_SIZE / BPF_REG_SIZE)
+
+struct bpf_stack_state {
+ struct bpf_reg_state spilled_ptr;
+ u8 slot_type[BPF_REG_SIZE];
+};
+
+struct bpf_reference_state {
+ /* Each reference object has a type. Ensure REF_TYPE_PTR is zero to
+ * default to pointer reference on zero initialization of a state.
+ */
+ enum ref_state_type {
+ REF_TYPE_PTR = (1 << 1),
+ REF_TYPE_IRQ = (1 << 2),
+ REF_TYPE_LOCK = (1 << 3),
+ REF_TYPE_RES_LOCK = (1 << 4),
+ REF_TYPE_RES_LOCK_IRQ = (1 << 5),
+ REF_TYPE_LOCK_MASK = REF_TYPE_LOCK | REF_TYPE_RES_LOCK | REF_TYPE_RES_LOCK_IRQ,
+ } type;
+ /* Track each reference created with a unique id, even if the same
+ * instruction creates the reference multiple times (eg, via CALL).
+ */
+ int id;
+ /* Instruction where the allocation of this reference occurred. This
+ * is used purely to inform the user of a reference leak.
+ */
+ int insn_idx;
+ /* Use to keep track of the source object of a lock, to ensure
+ * it matches on unlock.
+ */
+ void *ptr;
+};
+
+struct bpf_retval_range {
+ s32 minval;
+ s32 maxval;
+};
+
/* state of the program:
* type of all registers and stack info
*/
-struct bpf_verifier_state {
+struct bpf_func_state {
struct bpf_reg_state regs[MAX_BPF_REG];
- u8 stack_slot_type[MAX_BPF_STACK];
- struct bpf_reg_state spilled_regs[MAX_BPF_STACK / BPF_REG_SIZE];
+ /* index of call instruction that called into this func */
+ int callsite;
+ /* stack frame number of this function state from pov of
+ * enclosing bpf_verifier_state.
+ * 0 = main function, 1 = first callee.
+ */
+ u32 frameno;
+ /* subprog number == index within subprog_info
+ * zero == main subprog
+ */
+ u32 subprogno;
+ /* Every bpf_timer_start will increment async_entry_cnt.
+ * It's used to distinguish:
+ * void foo(void) { for(;;); }
+ * void foo(void) { bpf_timer_set_callback(,foo); }
+ */
+ u32 async_entry_cnt;
+ struct bpf_retval_range callback_ret_range;
+ bool in_callback_fn;
+ bool in_async_callback_fn;
+ bool in_exception_callback_fn;
+ /* For callback calling functions that limit number of possible
+ * callback executions (e.g. bpf_loop) keeps track of current
+ * simulated iteration number.
+ * Value in frame N refers to number of times callback with frame
+ * N+1 was simulated, e.g. for the following call:
+ *
+ * bpf_loop(..., fn, ...); | suppose current frame is N
+ * | fn would be simulated in frame N+1
+ * | number of simulations is tracked in frame N
+ */
+ u32 callback_depth;
+
+ /* The following fields should be last. See copy_func_state() */
+ /* The state of the stack. Each element of the array describes BPF_REG_SIZE
+ * (i.e. 8) bytes worth of stack memory.
+ * stack[0] represents bytes [*(r10-8)..*(r10-1)]
+ * stack[1] represents bytes [*(r10-16)..*(r10-9)]
+ * ...
+ * stack[allocated_stack/8 - 1] represents [*(r10-allocated_stack)..*(r10-allocated_stack+7)]
+ */
+ struct bpf_stack_state *stack;
+ /* Size of the current stack, in bytes. The stack state is tracked below, in
+ * `stack`. allocated_stack is always a multiple of BPF_REG_SIZE.
+ */
+ int allocated_stack;
};
+#define MAX_CALL_FRAMES 8
+
+/* instruction history flags, used in bpf_jmp_history_entry.flags field */
+enum {
+ /* instruction references stack slot through PTR_TO_STACK register;
+ * we also store stack's frame number in lower 3 bits (MAX_CALL_FRAMES is 8)
+ * and accessed stack slot's index in next 6 bits (MAX_BPF_STACK is 512,
+ * 8 bytes per slot, so slot index (spi) is [0, 63])
+ */
+ INSN_F_FRAMENO_MASK = 0x7, /* 3 bits */
+
+ INSN_F_SPI_MASK = 0x3f, /* 6 bits */
+ INSN_F_SPI_SHIFT = 3, /* shifted 3 bits to the left */
+
+ INSN_F_STACK_ACCESS = BIT(9),
+
+ INSN_F_DST_REG_STACK = BIT(10), /* dst_reg is PTR_TO_STACK */
+ INSN_F_SRC_REG_STACK = BIT(11), /* src_reg is PTR_TO_STACK */
+ /* total 12 bits are used now. */
+};
+
+static_assert(INSN_F_FRAMENO_MASK + 1 >= MAX_CALL_FRAMES);
+static_assert(INSN_F_SPI_MASK + 1 >= MAX_BPF_STACK / 8);
+
+struct bpf_jmp_history_entry {
+ u32 idx;
+ /* insn idx can't be bigger than 1 million */
+ u32 prev_idx : 20;
+ /* special INSN_F_xxx flags */
+ u32 flags : 12;
+ /* additional registers that need precision tracking when this
+ * jump is backtracked, vector of six 10-bit records
+ */
+ u64 linked_regs;
+};
+
+/* Maximum number of register states that can exist at once */
+#define BPF_ID_MAP_SIZE ((MAX_BPF_REG + MAX_BPF_STACK / BPF_REG_SIZE) * MAX_CALL_FRAMES)
+struct bpf_verifier_state {
+ /* call stack tracking */
+ struct bpf_func_state *frame[MAX_CALL_FRAMES];
+ struct bpf_verifier_state *parent;
+ /* Acquired reference states */
+ struct bpf_reference_state *refs;
+ /*
+ * 'branches' field is the number of branches left to explore:
+ * 0 - all possible paths from this state reached bpf_exit or
+ * were safely pruned
+ * 1 - at least one path is being explored.
+ * This state hasn't reached bpf_exit
+ * 2 - at least two paths are being explored.
+ * This state is an immediate parent of two children.
+ * One is fallthrough branch with branches==1 and another
+ * state is pushed into stack (to be explored later) also with
+ * branches==1. The parent of this state has branches==1.
+ * The verifier state tree connected via 'parent' pointer looks like:
+ * 1
+ * 1
+ * 2 -> 1 (first 'if' pushed into stack)
+ * 1
+ * 2 -> 1 (second 'if' pushed into stack)
+ * 1
+ * 1
+ * 1 bpf_exit.
+ *
+ * Once do_check() reaches bpf_exit, it calls update_branch_counts()
+ * and the verifier state tree will look:
+ * 1
+ * 1
+ * 2 -> 1 (first 'if' pushed into stack)
+ * 1
+ * 1 -> 1 (second 'if' pushed into stack)
+ * 0
+ * 0
+ * 0 bpf_exit.
+ * After pop_stack() the do_check() will resume at second 'if'.
+ *
+ * If is_state_visited() sees a state with branches > 0 it means
+ * there is a loop. If such state is exactly equal to the current state
+ * it's an infinite loop. Note states_equal() checks for states
+ * equivalency, so two states being 'states_equal' does not mean
+ * infinite loop. The exact comparison is provided by
+ * states_maybe_looping() function. It's a stronger pre-check and
+ * much faster than states_equal().
+ *
+ * This algorithm may not find all possible infinite loops or
+ * loop iteration count may be too high.
+ * In such cases BPF_COMPLEXITY_LIMIT_INSNS limit kicks in.
+ */
+ u32 branches;
+ u32 insn_idx;
+ u32 curframe;
+
+ u32 acquired_refs;
+ u32 active_locks;
+ u32 active_preempt_locks;
+ u32 active_irq_id;
+ u32 active_lock_id;
+ void *active_lock_ptr;
+ u32 active_rcu_locks;
+
+ bool speculative;
+ bool in_sleepable;
+ bool cleaned;
+
+ /* first and last insn idx of this verifier state */
+ u32 first_insn_idx;
+ u32 last_insn_idx;
+ /* if this state is a backedge state then equal_state
+ * records cached state to which this state is equal.
+ */
+ struct bpf_verifier_state *equal_state;
+ /* jmp history recorded from first to last.
+ * backtracking is using it to go from last to first.
+ * For most states jmp_history_cnt is [0-3].
+ * For loops can go up to ~40.
+ */
+ struct bpf_jmp_history_entry *jmp_history;
+ u32 jmp_history_cnt;
+ u32 dfs_depth;
+ u32 callback_unroll_depth;
+ u32 may_goto_depth;
+};
+
+#define bpf_get_spilled_reg(slot, frame, mask) \
+ (((slot < frame->allocated_stack / BPF_REG_SIZE) && \
+ ((1 << frame->stack[slot].slot_type[BPF_REG_SIZE - 1]) & (mask))) \
+ ? &frame->stack[slot].spilled_ptr : NULL)
+
+/* Iterate over 'frame', setting 'reg' to either NULL or a spilled register. */
+#define bpf_for_each_spilled_reg(iter, frame, reg, mask) \
+ for (iter = 0, reg = bpf_get_spilled_reg(iter, frame, mask); \
+ iter < frame->allocated_stack / BPF_REG_SIZE; \
+ iter++, reg = bpf_get_spilled_reg(iter, frame, mask))
+
+#define bpf_for_each_reg_in_vstate_mask(__vst, __state, __reg, __mask, __expr) \
+ ({ \
+ struct bpf_verifier_state *___vstate = __vst; \
+ int ___i, ___j; \
+ for (___i = 0; ___i <= ___vstate->curframe; ___i++) { \
+ struct bpf_reg_state *___regs; \
+ __state = ___vstate->frame[___i]; \
+ ___regs = __state->regs; \
+ for (___j = 0; ___j < MAX_BPF_REG; ___j++) { \
+ __reg = &___regs[___j]; \
+ (void)(__expr); \
+ } \
+ bpf_for_each_spilled_reg(___j, __state, __reg, __mask) { \
+ if (!__reg) \
+ continue; \
+ (void)(__expr); \
+ } \
+ } \
+ })
+
+/* Invoke __expr over regsiters in __vst, setting __state and __reg */
+#define bpf_for_each_reg_in_vstate(__vst, __state, __reg, __expr) \
+ bpf_for_each_reg_in_vstate_mask(__vst, __state, __reg, 1 << STACK_SPILL, __expr)
+
/* linked list of verifier states used to prune search */
struct bpf_verifier_state_list {
struct bpf_verifier_state state;
- struct bpf_verifier_state_list *next;
+ struct list_head node;
+ u32 miss_cnt;
+ u32 hit_cnt:31;
+ u32 in_free_list:1;
+};
+
+struct bpf_loop_inline_state {
+ unsigned int initialized:1; /* set to true upon first entry */
+ unsigned int fit_for_inline:1; /* true if callback function is the same
+ * at each call and flags are always zero
+ */
+ u32 callback_subprogno; /* valid when fit_for_inline is true */
+};
+
+/* pointer and state for maps */
+struct bpf_map_ptr_state {
+ struct bpf_map *map_ptr;
+ bool poison;
+ bool unpriv;
+};
+
+/* Possible states for alu_state member. */
+#define BPF_ALU_SANITIZE_SRC (1U << 0)
+#define BPF_ALU_SANITIZE_DST (1U << 1)
+#define BPF_ALU_NEG_VALUE (1U << 2)
+#define BPF_ALU_NON_POINTER (1U << 3)
+#define BPF_ALU_IMMEDIATE (1U << 4)
+#define BPF_ALU_SANITIZE (BPF_ALU_SANITIZE_SRC | \
+ BPF_ALU_SANITIZE_DST)
+
+/*
+ * An array of BPF instructions.
+ * Primary usage: return value of bpf_insn_successors.
+ */
+struct bpf_iarray {
+ int cnt;
+ u32 items[];
};
struct bpf_insn_aux_data {
union {
enum bpf_reg_type ptr_type; /* pointer type for load/store insns */
- struct bpf_map *map_ptr; /* pointer for call insn into lookup_elem */
+ struct bpf_map_ptr_state map_ptr_state;
+ s32 call_imm; /* saved imm field of call insn */
+ u32 alu_limit; /* limit for add/sub register with pointer */
+ struct {
+ u32 map_index; /* index into used_maps[] */
+ u32 map_off; /* offset from value base address */
+ };
+ struct {
+ enum bpf_reg_type reg_type; /* type of pseudo_btf_id */
+ union {
+ struct {
+ struct btf *btf;
+ u32 btf_id; /* btf_id for struct typed var */
+ };
+ u32 mem_size; /* mem_size for non-struct typed var */
+ };
+ } btf_var;
+ /* if instruction is a call to bpf_loop this field tracks
+ * the state of the relevant registers to make decision about inlining
+ */
+ struct bpf_loop_inline_state loop_inline_state;
};
+ union {
+ /* remember the size of type passed to bpf_obj_new to rewrite R1 */
+ u64 obj_new_size;
+ /* remember the offset of node field within type to rewrite */
+ u64 insert_off;
+ };
+ struct bpf_iarray *jt; /* jump table for gotox or bpf_tailcall call instruction */
+ struct btf_struct_meta *kptr_struct_meta;
+ u64 map_key_state; /* constant (32 bit) key tracking for maps */
int ctx_field_size; /* the ctx field size for load insn, maybe 0 */
- int converted_op_size; /* the valid value width after perceived conversion */
+ u32 seen; /* this insn was processed by the verifier at env->pass_cnt */
+ bool nospec; /* do not execute this instruction speculatively */
+ bool nospec_result; /* result is unsafe under speculation, nospec must follow */
+ bool zext_dst; /* this insn zero extends dst reg */
+ bool needs_zext; /* alu op needs to clear upper bits */
+ bool non_sleepable; /* helper/kfunc may be called from non-sleepable context */
+ bool is_iter_next; /* bpf_iter_<type>_next() kfunc call */
+ bool call_with_percpu_alloc_ptr; /* {this,per}_cpu_ptr() with prog percpu alloc */
+ u8 alu_state; /* used in combination with alu_limit */
+ /* true if STX or LDX instruction is a part of a spill/fill
+ * pattern for a bpf_fastcall call.
+ */
+ u8 fastcall_pattern:1;
+ /* for CALL instructions, a number of spill/fill pairs in the
+ * bpf_fastcall pattern.
+ */
+ u8 fastcall_spills_num:3;
+ u8 arg_prog:4;
+
+ /* below fields are initialized once */
+ unsigned int orig_idx; /* original instruction index */
+ bool jmp_point;
+ bool prune_point;
+ /* ensure we check state equivalence and save state checkpoint and
+ * this instruction, regardless of any heuristics
+ */
+ bool force_checkpoint;
+ /* true if instruction is a call to a helper function that
+ * accepts callback function as a parameter.
+ */
+ bool calls_callback;
+ /*
+ * CFG strongly connected component this instruction belongs to,
+ * zero if it is a singleton SCC.
+ */
+ u32 scc;
+ /* registers alive before this instruction. */
+ u16 live_regs_before;
};
#define MAX_USED_MAPS 64 /* max number of maps accessed by one eBPF program */
+#define MAX_USED_BTFS 64 /* max number of BTFs accessed by one BPF program */
+
+#define BPF_VERIFIER_TMP_LOG_SIZE 1024
+
+struct bpf_verifier_log {
+ /* Logical start and end positions of a "log window" of the verifier log.
+ * start_pos == 0 means we haven't truncated anything.
+ * Once truncation starts to happen, start_pos + len_total == end_pos,
+ * except during log reset situations, in which (end_pos - start_pos)
+ * might get smaller than len_total (see bpf_vlog_reset()).
+ * Generally, (end_pos - start_pos) gives number of useful data in
+ * user log buffer.
+ */
+ u64 start_pos;
+ u64 end_pos;
+ char __user *ubuf;
+ u32 level;
+ u32 len_total;
+ u32 len_max;
+ char kbuf[BPF_VERIFIER_TMP_LOG_SIZE];
+};
+
+#define BPF_LOG_LEVEL1 1
+#define BPF_LOG_LEVEL2 2
+#define BPF_LOG_STATS 4
+#define BPF_LOG_FIXED 8
+#define BPF_LOG_LEVEL (BPF_LOG_LEVEL1 | BPF_LOG_LEVEL2)
+#define BPF_LOG_MASK (BPF_LOG_LEVEL | BPF_LOG_STATS | BPF_LOG_FIXED)
+#define BPF_LOG_KERNEL (BPF_LOG_MASK + 1) /* kernel internal flag */
+#define BPF_LOG_MIN_ALIGNMENT 8U
+#define BPF_LOG_ALIGNMENT 40U
+
+static inline bool bpf_verifier_log_needed(const struct bpf_verifier_log *log)
+{
+ return log && log->level;
+}
+
+#define BPF_MAX_SUBPROGS 256
+
+struct bpf_subprog_arg_info {
+ enum bpf_arg_type arg_type;
+ union {
+ u32 mem_size;
+ u32 btf_id;
+ };
+};
+
+enum priv_stack_mode {
+ PRIV_STACK_UNKNOWN,
+ NO_PRIV_STACK,
+ PRIV_STACK_ADAPTIVE,
+};
+
+struct bpf_subprog_info {
+ /* 'start' has to be the first field otherwise find_subprog() won't work */
+ u32 start; /* insn idx of function entry point */
+ u32 linfo_idx; /* The idx to the main_prog->aux->linfo */
+ u32 postorder_start; /* The idx to the env->cfg.insn_postorder */
+ u32 exit_idx; /* Index of one of the BPF_EXIT instructions in this subprogram */
+ u16 stack_depth; /* max. stack depth used by this function */
+ u16 stack_extra;
+ /* offsets in range [stack_depth .. fastcall_stack_off)
+ * are used for bpf_fastcall spills and fills.
+ */
+ s16 fastcall_stack_off;
+ bool has_tail_call: 1;
+ bool tail_call_reachable: 1;
+ bool has_ld_abs: 1;
+ bool is_cb: 1;
+ bool is_async_cb: 1;
+ bool is_exception_cb: 1;
+ bool args_cached: 1;
+ /* true if bpf_fastcall stack region is used by functions that can't be inlined */
+ bool keep_fastcall_stack: 1;
+ bool changes_pkt_data: 1;
+ bool might_sleep: 1;
+ u8 arg_cnt:3;
+
+ enum priv_stack_mode priv_stack_mode;
+ struct bpf_subprog_arg_info args[MAX_BPF_FUNC_REG_ARGS];
+};
struct bpf_verifier_env;
-struct bpf_ext_analyzer_ops {
- int (*insn_hook)(struct bpf_verifier_env *env,
- int insn_idx, int prev_insn_idx);
+
+struct backtrack_state {
+ struct bpf_verifier_env *env;
+ u32 frame;
+ u32 reg_masks[MAX_CALL_FRAMES];
+ u64 stack_masks[MAX_CALL_FRAMES];
+};
+
+struct bpf_id_pair {
+ u32 old;
+ u32 cur;
+};
+
+struct bpf_idmap {
+ u32 tmp_id_gen;
+ struct bpf_id_pair map[BPF_ID_MAP_SIZE];
+};
+
+struct bpf_idset {
+ u32 count;
+ u32 ids[BPF_ID_MAP_SIZE];
};
+/* see verifier.c:compute_scc_callchain() */
+struct bpf_scc_callchain {
+ /* call sites from bpf_verifier_state->frame[*]->callsite leading to this SCC */
+ u32 callsites[MAX_CALL_FRAMES - 1];
+ /* last frame in a chain is identified by SCC id */
+ u32 scc;
+};
+
+/* verifier state waiting for propagate_backedges() */
+struct bpf_scc_backedge {
+ struct bpf_scc_backedge *next;
+ struct bpf_verifier_state state;
+};
+
+struct bpf_scc_visit {
+ struct bpf_scc_callchain callchain;
+ /* first state in current verification path that entered SCC
+ * identified by the callchain
+ */
+ struct bpf_verifier_state *entry_state;
+ struct bpf_scc_backedge *backedges; /* list of backedges */
+ u32 num_backedges;
+};
+
+/* An array of bpf_scc_visit structs sharing tht same bpf_scc_callchain->scc
+ * but having different bpf_scc_callchain->callsites.
+ */
+struct bpf_scc_info {
+ u32 num_visits;
+ struct bpf_scc_visit visits[];
+};
+
+struct bpf_liveness;
+
/* single container for all structs
* one verifier_env per bpf_check() call
*/
struct bpf_verifier_env {
+ u32 insn_idx;
+ u32 prev_insn_idx;
struct bpf_prog *prog; /* eBPF program being verified */
+ const struct bpf_verifier_ops *ops;
+ struct module *attach_btf_mod; /* The owner module of prog->aux->attach_btf */
struct bpf_verifier_stack_elem *head; /* stack of verifier states to be processed */
int stack_size; /* number of states to be processed */
bool strict_alignment; /* perform strict pointer alignment checks */
- struct bpf_verifier_state cur_state; /* current verifier state */
- struct bpf_verifier_state_list **explored_states; /* search pruning optimization */
- const struct bpf_ext_analyzer_ops *analyzer_ops; /* external analyzer ops */
- void *analyzer_priv; /* pointer to external analyzer's private data */
+ bool test_state_freq; /* test verifier with different pruning frequency */
+ bool test_reg_invariants; /* fail verification on register invariants violations */
+ struct bpf_verifier_state *cur_state; /* current verifier state */
+ /* Search pruning optimization, array of list_heads for
+ * lists of struct bpf_verifier_state_list.
+ */
+ struct list_head *explored_states;
+ struct list_head free_list; /* list of struct bpf_verifier_state_list */
struct bpf_map *used_maps[MAX_USED_MAPS]; /* array of map's used by eBPF program */
+ struct btf_mod_pair used_btfs[MAX_USED_BTFS]; /* array of BTF's used by BPF program */
+ struct bpf_map *insn_array_maps[MAX_USED_MAPS]; /* array of INSN_ARRAY map's to be relocated */
u32 used_map_cnt; /* number of used maps */
+ u32 used_btf_cnt; /* number of used BTF objects */
+ u32 insn_array_map_cnt; /* number of used maps of type BPF_MAP_TYPE_INSN_ARRAY */
u32 id_gen; /* used to generate unique reg IDs */
+ u32 hidden_subprog_cnt; /* number of hidden subprogs */
+ int exception_callback_subprog;
+ bool explore_alu_limits;
bool allow_ptr_leaks;
+ /* Allow access to uninitialized stack memory. Writes with fixed offset are
+ * always allowed, so this refers to reads (with fixed or variable offset),
+ * to writes with variable offset and to indirect (helper) accesses.
+ */
+ bool allow_uninit_stack;
+ bool bpf_capable;
+ bool bypass_spec_v1;
+ bool bypass_spec_v4;
bool seen_direct_write;
- bool varlen_map_value_access;
+ bool seen_exception;
struct bpf_insn_aux_data *insn_aux_data; /* array of per-insn state */
+ const struct bpf_line_info *prev_linfo;
+ struct bpf_verifier_log log;
+ struct bpf_subprog_info subprog_info[BPF_MAX_SUBPROGS + 2]; /* max + 2 for the fake and exception subprogs */
+ union {
+ struct bpf_idmap idmap_scratch;
+ struct bpf_idset idset_scratch;
+ };
+ struct {
+ int *insn_state;
+ int *insn_stack;
+ /*
+ * vector of instruction indexes sorted in post-order, grouped by subprogram,
+ * see bpf_subprog_info->postorder_start.
+ */
+ int *insn_postorder;
+ int cur_stack;
+ /* current position in the insn_postorder vector */
+ int cur_postorder;
+ } cfg;
+ struct backtrack_state bt;
+ struct bpf_jmp_history_entry *cur_hist_ent;
+ u32 pass_cnt; /* number of times do_check() was called */
+ u32 subprog_cnt;
+ /* number of instructions analyzed by the verifier */
+ u32 prev_insn_processed, insn_processed;
+ /* number of jmps, calls, exits analyzed so far */
+ u32 prev_jmps_processed, jmps_processed;
+ /* total verification time */
+ u64 verification_time;
+ /* maximum number of verifier states kept in 'branching' instructions */
+ u32 max_states_per_insn;
+ /* total number of allocated verifier states */
+ u32 total_states;
+ /* some states are freed during program analysis.
+ * this is peak number of states. this number dominates kernel
+ * memory consumption during verification
+ */
+ u32 peak_states;
+ /* longest register parentage chain walked for liveness marking */
+ u32 longest_mark_read_walk;
+ u32 free_list_size;
+ u32 explored_states_size;
+ u32 num_backedges;
+ bpfptr_t fd_array;
+
+ /* bit mask to keep track of whether a register has been accessed
+ * since the last time the function state was printed
+ */
+ u32 scratched_regs;
+ /* Same as scratched_regs but for stack slots */
+ u64 scratched_stack_slots;
+ u64 prev_log_pos, prev_insn_print_pos;
+ /* buffer used to temporary hold constants as scalar registers */
+ struct bpf_reg_state fake_reg[2];
+ /* buffer used to generate temporary string representations,
+ * e.g., in reg_type_str() to generate reg_type string
+ */
+ char tmp_str_buf[TMP_STR_BUF_LEN];
+ struct bpf_insn insn_buf[INSN_BUF_SIZE];
+ struct bpf_insn epilogue_buf[INSN_BUF_SIZE];
+ struct bpf_scc_callchain callchain_buf;
+ struct bpf_liveness *liveness;
+ /* array of pointers to bpf_scc_info indexed by SCC id */
+ struct bpf_scc_info **scc_info;
+ u32 scc_cnt;
+ struct bpf_iarray *succ;
+ struct bpf_iarray *gotox_tmp_buf;
};
-int bpf_analyzer(struct bpf_prog *prog, const struct bpf_ext_analyzer_ops *ops,
- void *priv);
+static inline struct bpf_func_info_aux *subprog_aux(struct bpf_verifier_env *env, int subprog)
+{
+ return &env->prog->aux->func_info_aux[subprog];
+}
+
+static inline struct bpf_subprog_info *subprog_info(struct bpf_verifier_env *env, int subprog)
+{
+ return &env->subprog_info[subprog];
+}
+
+__printf(2, 0) void bpf_verifier_vlog(struct bpf_verifier_log *log,
+ const char *fmt, va_list args);
+__printf(2, 3) void bpf_verifier_log_write(struct bpf_verifier_env *env,
+ const char *fmt, ...);
+__printf(2, 3) void bpf_log(struct bpf_verifier_log *log,
+ const char *fmt, ...);
+int bpf_vlog_init(struct bpf_verifier_log *log, u32 log_level,
+ char __user *log_buf, u32 log_size);
+void bpf_vlog_reset(struct bpf_verifier_log *log, u64 new_pos);
+int bpf_vlog_finalize(struct bpf_verifier_log *log, u32 *log_size_actual);
+
+__printf(3, 4) void verbose_linfo(struct bpf_verifier_env *env,
+ u32 insn_off,
+ const char *prefix_fmt, ...);
+
+#define verifier_bug_if(cond, env, fmt, args...) \
+ ({ \
+ bool __cond = (cond); \
+ if (unlikely(__cond)) \
+ verifier_bug(env, fmt " (" #cond ")", ##args); \
+ (__cond); \
+ })
+#define verifier_bug(env, fmt, args...) \
+ ({ \
+ BPF_WARN_ONCE(1, "verifier bug: " fmt "\n", ##args); \
+ bpf_log(&env->log, "verifier bug: " fmt "\n", ##args); \
+ })
+
+static inline struct bpf_func_state *cur_func(struct bpf_verifier_env *env)
+{
+ struct bpf_verifier_state *cur = env->cur_state;
+
+ return cur->frame[cur->curframe];
+}
+
+static inline struct bpf_reg_state *cur_regs(struct bpf_verifier_env *env)
+{
+ return cur_func(env)->regs;
+}
+
+int bpf_prog_offload_verifier_prep(struct bpf_prog *prog);
+int bpf_prog_offload_verify_insn(struct bpf_verifier_env *env,
+ int insn_idx, int prev_insn_idx);
+int bpf_prog_offload_finalize(struct bpf_verifier_env *env);
+void
+bpf_prog_offload_replace_insn(struct bpf_verifier_env *env, u32 off,
+ struct bpf_insn *insn);
+void
+bpf_prog_offload_remove_insns(struct bpf_verifier_env *env, u32 off, u32 cnt);
+
+/* this lives here instead of in bpf.h because it needs to dereference tgt_prog */
+static inline u64 bpf_trampoline_compute_key(const struct bpf_prog *tgt_prog,
+ struct btf *btf, u32 btf_id)
+{
+ if (tgt_prog)
+ return ((u64)tgt_prog->aux->id << 32) | btf_id;
+ else
+ return ((u64)btf_obj_id(btf) << 32) | 0x80000000 | btf_id;
+}
+
+/* unpack the IDs from the key as constructed above */
+static inline void bpf_trampoline_unpack_key(u64 key, u32 *obj_id, u32 *btf_id)
+{
+ if (obj_id)
+ *obj_id = key >> 32;
+ if (btf_id)
+ *btf_id = key & 0x7FFFFFFF;
+}
+
+int bpf_check_attach_target(struct bpf_verifier_log *log,
+ const struct bpf_prog *prog,
+ const struct bpf_prog *tgt_prog,
+ u32 btf_id,
+ struct bpf_attach_target_info *tgt_info);
+void bpf_free_kfunc_btf_tab(struct bpf_kfunc_btf_tab *tab);
+
+int mark_chain_precision(struct bpf_verifier_env *env, int regno);
+
+#define BPF_BASE_TYPE_MASK GENMASK(BPF_BASE_TYPE_BITS - 1, 0)
+
+/* extract base type from bpf_{arg, return, reg}_type. */
+static inline u32 base_type(u32 type)
+{
+ return type & BPF_BASE_TYPE_MASK;
+}
+
+/* extract flags from an extended type. See bpf_type_flag in bpf.h. */
+static inline u32 type_flag(u32 type)
+{
+ return type & ~BPF_BASE_TYPE_MASK;
+}
+
+/* only use after check_attach_btf_id() */
+static inline enum bpf_prog_type resolve_prog_type(const struct bpf_prog *prog)
+{
+ return (prog->type == BPF_PROG_TYPE_EXT && prog->aux->saved_dst_prog_type) ?
+ prog->aux->saved_dst_prog_type : prog->type;
+}
+
+static inline bool bpf_prog_check_recur(const struct bpf_prog *prog)
+{
+ switch (resolve_prog_type(prog)) {
+ case BPF_PROG_TYPE_TRACING:
+ return prog->expected_attach_type != BPF_TRACE_ITER;
+ case BPF_PROG_TYPE_STRUCT_OPS:
+ return prog->aux->jits_use_priv_stack;
+ case BPF_PROG_TYPE_LSM:
+ case BPF_PROG_TYPE_SYSCALL:
+ return false;
+ default:
+ return true;
+ }
+}
+
+#define BPF_REG_TRUSTED_MODIFIERS (MEM_ALLOC | PTR_TRUSTED | NON_OWN_REF)
+
+static inline bool bpf_type_has_unsafe_modifiers(u32 type)
+{
+ return type_flag(type) & ~BPF_REG_TRUSTED_MODIFIERS;
+}
+
+static inline bool type_is_ptr_alloc_obj(u32 type)
+{
+ return base_type(type) == PTR_TO_BTF_ID && type_flag(type) & MEM_ALLOC;
+}
+
+static inline bool type_is_non_owning_ref(u32 type)
+{
+ return type_is_ptr_alloc_obj(type) && type_flag(type) & NON_OWN_REF;
+}
+
+static inline bool type_is_pkt_pointer(enum bpf_reg_type type)
+{
+ type = base_type(type);
+ return type == PTR_TO_PACKET ||
+ type == PTR_TO_PACKET_META;
+}
+
+static inline bool type_is_sk_pointer(enum bpf_reg_type type)
+{
+ return type == PTR_TO_SOCKET ||
+ type == PTR_TO_SOCK_COMMON ||
+ type == PTR_TO_TCP_SOCK ||
+ type == PTR_TO_XDP_SOCK;
+}
+
+static inline bool type_may_be_null(u32 type)
+{
+ return type & PTR_MAYBE_NULL;
+}
+
+static inline void mark_reg_scratched(struct bpf_verifier_env *env, u32 regno)
+{
+ env->scratched_regs |= 1U << regno;
+}
+
+static inline void mark_stack_slot_scratched(struct bpf_verifier_env *env, u32 spi)
+{
+ env->scratched_stack_slots |= 1ULL << spi;
+}
+
+static inline bool reg_scratched(const struct bpf_verifier_env *env, u32 regno)
+{
+ return (env->scratched_regs >> regno) & 1;
+}
+
+static inline bool stack_slot_scratched(const struct bpf_verifier_env *env, u64 regno)
+{
+ return (env->scratched_stack_slots >> regno) & 1;
+}
+
+static inline bool verifier_state_scratched(const struct bpf_verifier_env *env)
+{
+ return env->scratched_regs || env->scratched_stack_slots;
+}
+
+static inline void mark_verifier_state_clean(struct bpf_verifier_env *env)
+{
+ env->scratched_regs = 0U;
+ env->scratched_stack_slots = 0ULL;
+}
+
+/* Used for printing the entire verifier state. */
+static inline void mark_verifier_state_scratched(struct bpf_verifier_env *env)
+{
+ env->scratched_regs = ~0U;
+ env->scratched_stack_slots = ~0ULL;
+}
+
+static inline bool bpf_stack_narrow_access_ok(int off, int fill_size, int spill_size)
+{
+#ifdef __BIG_ENDIAN
+ off -= spill_size - fill_size;
+#endif
+
+ return !(off % BPF_REG_SIZE);
+}
+
+static inline bool insn_is_gotox(struct bpf_insn *insn)
+{
+ return BPF_CLASS(insn->code) == BPF_JMP &&
+ BPF_OP(insn->code) == BPF_JA &&
+ BPF_SRC(insn->code) == BPF_X;
+}
+
+const char *reg_type_str(struct bpf_verifier_env *env, enum bpf_reg_type type);
+const char *dynptr_type_str(enum bpf_dynptr_type type);
+const char *iter_type_str(const struct btf *btf, u32 btf_id);
+const char *iter_state_str(enum bpf_iter_state state);
+
+void print_verifier_state(struct bpf_verifier_env *env, const struct bpf_verifier_state *vstate,
+ u32 frameno, bool print_all);
+void print_insn_state(struct bpf_verifier_env *env, const struct bpf_verifier_state *vstate,
+ u32 frameno);
+
+struct bpf_subprog_info *bpf_find_containing_subprog(struct bpf_verifier_env *env, int off);
+int bpf_jmp_offset(struct bpf_insn *insn);
+struct bpf_iarray *bpf_insn_successors(struct bpf_verifier_env *env, u32 idx);
+void bpf_fmt_stack_mask(char *buf, ssize_t buf_sz, u64 stack_mask);
+bool bpf_calls_callback(struct bpf_verifier_env *env, int insn_idx);
+
+int bpf_stack_liveness_init(struct bpf_verifier_env *env);
+void bpf_stack_liveness_free(struct bpf_verifier_env *env);
+int bpf_update_live_stack(struct bpf_verifier_env *env);
+int bpf_mark_stack_read(struct bpf_verifier_env *env, u32 frameno, u32 insn_idx, u64 mask);
+void bpf_mark_stack_write(struct bpf_verifier_env *env, u32 frameno, u64 mask);
+int bpf_reset_stack_write_marks(struct bpf_verifier_env *env, u32 insn_idx);
+int bpf_commit_stack_write_marks(struct bpf_verifier_env *env);
+int bpf_live_stack_query_init(struct bpf_verifier_env *env, struct bpf_verifier_state *st);
+bool bpf_stack_slot_alive(struct bpf_verifier_env *env, u32 frameno, u32 spi);
+void bpf_reset_live_stack_callchain(struct bpf_verifier_env *env);
#endif /* _LINUX_BPF_VERIFIER_H */
diff --git a/include/linux/bpfptr.h b/include/linux/bpfptr.h
new file mode 100644
index 000000000000..f6e0795db484
--- /dev/null
+++ b/include/linux/bpfptr.h
@@ -0,0 +1,89 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* A pointer that can point to either kernel or userspace memory. */
+#ifndef _LINUX_BPFPTR_H
+#define _LINUX_BPFPTR_H
+
+#include <linux/mm.h>
+#include <linux/sockptr.h>
+
+typedef sockptr_t bpfptr_t;
+
+static inline bool bpfptr_is_kernel(bpfptr_t bpfptr)
+{
+ return bpfptr.is_kernel;
+}
+
+static inline bpfptr_t KERNEL_BPFPTR(void *p)
+{
+ return (bpfptr_t) { .kernel = p, .is_kernel = true };
+}
+
+static inline bpfptr_t USER_BPFPTR(void __user *p)
+{
+ return (bpfptr_t) { .user = p };
+}
+
+static inline bpfptr_t make_bpfptr(u64 addr, bool is_kernel)
+{
+ if (is_kernel)
+ return KERNEL_BPFPTR((void*) (uintptr_t) addr);
+ else
+ return USER_BPFPTR(u64_to_user_ptr(addr));
+}
+
+static inline bool bpfptr_is_null(bpfptr_t bpfptr)
+{
+ if (bpfptr_is_kernel(bpfptr))
+ return !bpfptr.kernel;
+ return !bpfptr.user;
+}
+
+static inline void bpfptr_add(bpfptr_t *bpfptr, size_t val)
+{
+ if (bpfptr_is_kernel(*bpfptr))
+ bpfptr->kernel += val;
+ else
+ bpfptr->user += val;
+}
+
+static inline int copy_from_bpfptr_offset(void *dst, bpfptr_t src,
+ size_t offset, size_t size)
+{
+ if (!bpfptr_is_kernel(src))
+ return copy_from_user(dst, src.user + offset, size);
+ return copy_from_kernel_nofault(dst, src.kernel + offset, size);
+}
+
+static inline int copy_from_bpfptr(void *dst, bpfptr_t src, size_t size)
+{
+ return copy_from_bpfptr_offset(dst, src, 0, size);
+}
+
+static inline int copy_to_bpfptr_offset(bpfptr_t dst, size_t offset,
+ const void *src, size_t size)
+{
+ return copy_to_sockptr_offset((sockptr_t) dst, offset, src, size);
+}
+
+static inline void *kvmemdup_bpfptr_noprof(bpfptr_t src, size_t len)
+{
+ void *p = kvmalloc_node_align_noprof(len, 1, GFP_USER | __GFP_NOWARN, NUMA_NO_NODE);
+
+ if (!p)
+ return ERR_PTR(-ENOMEM);
+ if (copy_from_bpfptr(p, src, len)) {
+ kvfree(p);
+ return ERR_PTR(-EFAULT);
+ }
+ return p;
+}
+#define kvmemdup_bpfptr(...) alloc_hooks(kvmemdup_bpfptr_noprof(__VA_ARGS__))
+
+static inline long strncpy_from_bpfptr(char *dst, bpfptr_t src, size_t count)
+{
+ if (bpfptr_is_kernel(src))
+ return strncpy_from_kernel_nofault(dst, src.kernel, count);
+ return strncpy_from_user(dst, src.user, count);
+}
+
+#endif /* _LINUX_BPFPTR_H */
diff --git a/include/linux/brcmphy.h b/include/linux/brcmphy.h
index abcda9b458ab..115a964f3006 100644
--- a/include/linux/brcmphy.h
+++ b/include/linux/brcmphy.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_BRCMPHY_H
#define _LINUX_BRCMPHY_H
@@ -10,10 +11,15 @@
#define PHY_ID_BCM50610 0x0143bd60
#define PHY_ID_BCM50610M 0x0143bd70
+#define PHY_ID_BCM5221 0x004061e0
#define PHY_ID_BCM5241 0x0143bc30
#define PHY_ID_BCMAC131 0x0143bc70
#define PHY_ID_BCM5481 0x0143bca0
+#define PHY_ID_BCM5395 0x0143bcf0
+#define PHY_ID_BCM53125 0x03625f20
+#define PHY_ID_BCM53128 0x03625e10
#define PHY_ID_BCM54810 0x03625d00
+#define PHY_ID_BCM54811 0x03625cc0
#define PHY_ID_BCM5482 0x0143bcb0
#define PHY_ID_BCM5411 0x00206070
#define PHY_ID_BCM5421 0x002060e0
@@ -22,9 +28,15 @@
#define PHY_ID_BCM5461 0x002060c0
#define PHY_ID_BCM54612E 0x03625e60
#define PHY_ID_BCM54616S 0x03625d10
+#define PHY_ID_BCM54140 0xae025009
#define PHY_ID_BCM57780 0x03625d90
+#define PHY_ID_BCM89610 0x03625cd0
+#define PHY_ID_BCM72113 0x35905310
+#define PHY_ID_BCM72116 0x35905350
+#define PHY_ID_BCM72165 0x35905340
#define PHY_ID_BCM7250 0xae025280
+#define PHY_ID_BCM7255 0xae025120
#define PHY_ID_BCM7260 0xae025190
#define PHY_ID_BCM7268 0xae025090
#define PHY_ID_BCM7271 0xae0253b0
@@ -33,6 +45,7 @@
#define PHY_ID_BCM7366 0x600d8490
#define PHY_ID_BCM7346 0x600d8650
#define PHY_ID_BCM7362 0x600d84b0
+#define PHY_ID_BCM74165 0x359052c0
#define PHY_ID_BCM7425 0x600d86b0
#define PHY_ID_BCM7429 0x600d8730
#define PHY_ID_BCM7435 0x600d8750
@@ -40,8 +53,10 @@
#define PHY_ID_BCM7439 0x600d8480
#define PHY_ID_BCM7439_2 0xae025080
#define PHY_ID_BCM7445 0x600d8510
+#define PHY_ID_BCM7712 0x35905330
#define PHY_ID_BCM_CYGNUS 0xae025200
+#define PHY_ID_BCM_OMEGA 0xae025100
#define PHY_BCM_OUI_MASK 0xfffffc00
#define PHY_BCM_OUI_1 0x00206000
@@ -51,18 +66,12 @@
#define PHY_BCM_OUI_5 0x03625e00
#define PHY_BCM_OUI_6 0xae025000
-#define PHY_BCM_FLAGS_MODE_COPPER 0x00000001
-#define PHY_BCM_FLAGS_MODE_1000BX 0x00000002
-#define PHY_BCM_FLAGS_INTF_SGMII 0x00000010
-#define PHY_BCM_FLAGS_INTF_XAUI 0x00000020
-#define PHY_BRCM_WIRESPEED_ENABLE 0x00000100
-#define PHY_BRCM_AUTO_PWRDWN_ENABLE 0x00000200
-#define PHY_BRCM_RX_REFCLK_UNUSED 0x00000400
-#define PHY_BRCM_STD_IBND_DISABLE 0x00000800
-#define PHY_BRCM_EXT_IBND_RX_ENABLE 0x00001000
-#define PHY_BRCM_EXT_IBND_TX_ENABLE 0x00002000
-#define PHY_BRCM_CLEAR_RGMII_MODE 0x00004000
-#define PHY_BRCM_DIS_TXCRXC_NOENRGY 0x00008000
+#define PHY_BRCM_AUTO_PWRDWN_ENABLE 0x00000001
+#define PHY_BRCM_RX_REFCLK_UNUSED 0x00000002
+#define PHY_BRCM_CLEAR_RGMII_MODE 0x00000004
+#define PHY_BRCM_DIS_TXCRXC_NOENRGY 0x00000008
+#define PHY_BRCM_EN_MASTER_MODE 0x00000010
+#define PHY_BRCM_IDDQ_SUSPEND 0x00000020
/* Broadcom BCM7xxx specific workarounds */
#define PHY_BRCM_7XXX_REV(x) (((x) >> 8) & 0xff)
@@ -73,14 +82,18 @@
#define MII_BCM54XX_ECR 0x10 /* BCM54xx extended control register */
#define MII_BCM54XX_ECR_IM 0x1000 /* Interrupt mask */
#define MII_BCM54XX_ECR_IF 0x0800 /* Interrupt force */
+#define MII_BCM54XX_ECR_FIFOE 0x0001 /* FIFO elasticity */
#define MII_BCM54XX_ESR 0x11 /* BCM54xx extended status register */
#define MII_BCM54XX_ESR_IS 0x1000 /* Interrupt status */
#define MII_BCM54XX_EXP_DATA 0x15 /* Expansion register data */
#define MII_BCM54XX_EXP_SEL 0x17 /* Expansion register select */
+#define MII_BCM54XX_EXP_SEL_TOP 0x0d00 /* TOP_MISC expansion register select */
#define MII_BCM54XX_EXP_SEL_SSD 0x0e00 /* Secondary SerDes select */
+#define MII_BCM54XX_EXP_SEL_WOL 0x0e00 /* Wake-on-LAN expansion select register */
#define MII_BCM54XX_EXP_SEL_ER 0x0f00 /* Expansion register select */
+#define MII_BCM54XX_EXP_SEL_ETC 0x0d00 /* Expansion register spare + 2k mem */
#define MII_BCM54XX_AUX_CTL 0x18 /* Auxiliary control register */
#define MII_BCM54XX_ISR 0x1a /* BCM54xx interrupt status register */
@@ -106,15 +119,26 @@
#define MII_BCM54XX_SHD_VAL(x) ((x & 0x1f) << 10)
#define MII_BCM54XX_SHD_DATA(x) ((x & 0x3ff) << 0)
+#define MII_BCM54XX_RDB_ADDR 0x1e
+#define MII_BCM54XX_RDB_DATA 0x1f
+
+/* legacy access control via rdb/expansion register */
+#define BCM54XX_RDB_REG0087 0x0087
+#define BCM54XX_EXP_REG7E (MII_BCM54XX_EXP_SEL_ER + 0x7E)
+#define BCM54XX_ACCESS_MODE_LEGACY_EN BIT(15)
+
/*
* AUXILIARY CONTROL SHADOW ACCESS REGISTERS. (PHY REG 0x18)
*/
#define MII_BCM54XX_AUXCTL_SHDWSEL_AUXCTL 0x00
#define MII_BCM54XX_AUXCTL_ACTL_TX_6DB 0x0400
#define MII_BCM54XX_AUXCTL_ACTL_SMDSP_ENA 0x0800
+#define MII_BCM54XX_AUXCTL_ACTL_EXT_PKT_LEN 0x4000
#define MII_BCM54XX_AUXCTL_SHDWSEL_MISC 0x07
#define MII_BCM54XX_AUXCTL_SHDWSEL_MISC_WIRESPEED_EN 0x0010
+#define MII_BCM54XX_AUXCTL_SHDWSEL_MISC_RSVD 0x0060
+#define MII_BCM54XX_AUXCTL_SHDWSEL_MISC_RGMII_EN 0x0080
#define MII_BCM54XX_AUXCTL_SHDWSEL_MISC_RGMII_SKEW_EN 0x0100
#define MII_BCM54XX_AUXCTL_MISC_FORCE_AMDIX 0x0200
#define MII_BCM54XX_AUXCTL_MISC_WREN 0x8000
@@ -140,7 +164,30 @@
#define BCM_LED_SRC_OPENSHORT 0xb
#define BCM_LED_SRC_OFF 0xe /* Tied high */
#define BCM_LED_SRC_ON 0xf /* Tied low */
+#define BCM_LED_SRC_MASK GENMASK(3, 0)
+/*
+ * Broadcom Multicolor LED configurations (expansion register 4)
+ */
+#define BCM_EXP_MULTICOLOR (MII_BCM54XX_EXP_SEL_ER + 0x04)
+#define BCM_LED_MULTICOLOR_IN_PHASE BIT(8)
+#define BCM_LED_MULTICOLOR_LINK_ACT 0x0
+#define BCM_LED_MULTICOLOR_SPEED 0x1
+#define BCM_LED_MULTICOLOR_ACT_FLASH 0x2
+#define BCM_LED_MULTICOLOR_FDX 0x3
+#define BCM_LED_MULTICOLOR_OFF 0x4
+#define BCM_LED_MULTICOLOR_ON 0x5
+#define BCM_LED_MULTICOLOR_ALT 0x6
+#define BCM_LED_MULTICOLOR_FLASH 0x7
+#define BCM_LED_MULTICOLOR_LINK 0x8
+#define BCM_LED_MULTICOLOR_ACT 0x9
+#define BCM_LED_MULTICOLOR_PROGRAM 0xa
+
+/*
+ * Broadcom Synchronous Ethernet Controls (expansion register 0x0E)
+ */
+#define BCM_EXP_SYNC_ETHERNET (MII_BCM54XX_EXP_SEL_ER + 0x0E)
+#define BCM_EXP_SYNC_ETHERNET_MII_LITE BIT(11)
/*
* BCM5482: Shadow registers
@@ -160,6 +207,7 @@
#define BCM54XX_SHD_SCR3_DEF_CLK125 0x0001
#define BCM54XX_SHD_SCR3_DLLAPD_DIS 0x0002
#define BCM54XX_SHD_SCR3_TRDDAPD 0x0004
+#define BCM54XX_SHD_SCR3_RXCTXC_DIS 0x0100
/* 01010: Auto Power-Down */
#define BCM54XX_SHD_APD 0x0a
@@ -168,18 +216,29 @@
#define BCM_NO_ANEG_APD_EN 0x0060 /* bits 5 & 6 */
#define BCM_APD_SINGLELP_EN 0x0100 /* Bit 8 */
-#define BCM5482_SHD_LEDS1 0x0d /* 01101: LED Selector 1 */
+#define BCM54XX_SHD_LEDS1 0x0d /* 01101: LED Selector 1 */
/* LED3 / ~LINKSPD[2] selector */
-#define BCM5482_SHD_LEDS1_LED3(src) ((src & 0xf) << 4)
+#define BCM54XX_SHD_LEDS_SHIFT(led) (4 * (led))
+#define BCM54XX_SHD_LEDS1_LED3(src) ((src & 0xf) << 4)
/* LED1 / ~LINKSPD[1] selector */
-#define BCM5482_SHD_LEDS1_LED1(src) ((src & 0xf) << 0)
+#define BCM54XX_SHD_LEDS1_LED1(src) ((src & 0xf) << 0)
+#define BCM54XX_SHD_LEDS2 0x0e /* 01110: LED Selector 2 */
#define BCM54XX_SHD_RGMII_MODE 0x0b /* 01011: RGMII Mode Selector */
#define BCM5482_SHD_SSD 0x14 /* 10100: Secondary SerDes control */
#define BCM5482_SHD_SSD_LEDM 0x0008 /* SSD LED Mode enable */
#define BCM5482_SHD_SSD_EN 0x0001 /* SSD enable */
-#define BCM5482_SHD_MODE 0x1f /* 11111: Mode Control Register */
-#define BCM5482_SHD_MODE_1000BX 0x0001 /* Enable 1000BASE-X registers */
+/* 10011: SerDes 100-FX Control Register */
+#define BCM54616S_SHD_100FX_CTRL 0x13
+#define BCM54616S_100FX_MODE BIT(0) /* 100-FX SerDes Enable */
+
+/* 11111: Mode Control Register */
+#define BCM54XX_SHD_MODE 0x1f
+#define BCM54XX_SHD_INTF_SEL_MASK GENMASK(2, 1) /* INTERF_SEL[1:0] */
+#define BCM54XX_SHD_INTF_SEL_RGMII 0x02
+#define BCM54XX_SHD_INTF_SEL_SGMII 0x04
+#define BCM54XX_SHD_INTF_SEL_GBIC 0x06
+#define BCM54XX_SHD_MODE_1000BX BIT(0) /* Enable 1000-X registers */
/*
* EXPANSION SHADOW ACCESS REGISTERS. (PHY REG 0x15, 0x16, and 0x17)
@@ -192,6 +251,7 @@
#define MII_BCM54XX_EXP_EXP08 0x0F08
#define MII_BCM54XX_EXP_EXP08_RJCT_2MHZ 0x0001
#define MII_BCM54XX_EXP_EXP08_EARLY_DAC_WAKE 0x0200
+#define MII_BCM54XX_EXP_EXP08_FORCE_DAC_WAKE 0x0100
#define MII_BCM54XX_EXP_EXP75 0x0f75
#define MII_BCM54XX_EXP_EXP75_VDACCTRL 0x003c
#define MII_BCM54XX_EXP_EXP75_CM_OSC 0x0001
@@ -200,6 +260,15 @@
#define MII_BCM54XX_EXP_EXP97 0x0f97
#define MII_BCM54XX_EXP_EXP97_MYST 0x0c0c
+/* Top-MISC expansion registers */
+#define BCM54XX_TOP_MISC_IDDQ_CTRL (MII_BCM54XX_EXP_SEL_TOP + 0x06)
+#define BCM54XX_TOP_MISC_IDDQ_LP (1 << 0)
+#define BCM54XX_TOP_MISC_IDDQ_SD (1 << 2)
+#define BCM54XX_TOP_MISC_IDDQ_SR (1 << 3)
+
+#define BCM54XX_TOP_MISC_LED_CTL (MII_BCM54XX_EXP_SEL_TOP + 0x0C)
+#define BCM54XX_LED4_SEL_INTR BIT(1)
+
/*
* BCM5482: Secondary SerDes registers
*/
@@ -209,12 +278,163 @@
#define BCM5482_SSD_SGMII_SLAVE_EN 0x0002 /* Slave mode enable */
#define BCM5482_SSD_SGMII_SLAVE_AD 0x0001 /* Slave auto-detection */
+/* BroadR-Reach LRE Registers. */
+#define MII_BCM54XX_LRECR 0x00 /* LRE Control Register */
+#define MII_BCM54XX_LRESR 0x01 /* LRE Status Register */
+#define MII_BCM54XX_LREPHYSID1 0x02 /* LRE PHYS ID 1 */
+#define MII_BCM54XX_LREPHYSID2 0x03 /* LRE PHYS ID 2 */
+#define MII_BCM54XX_LREANAA 0x04 /* LDS Auto-Negotiation Advertised Ability */
+#define MII_BCM54XX_LREANAC 0x05 /* LDS Auto-Negotiation Advertised Control */
+#define MII_BCM54XX_LREANPT 0x06 /* LDS Ability Next Page Transmit */
+#define MII_BCM54XX_LRELPA 0x07 /* LDS Link Partner Ability */
+#define MII_BCM54XX_LRELPNPM 0x08 /* LDS Link Partner Next Page Message */
+#define MII_BCM54XX_LRELPNPC 0x09 /* LDS Link Partner Next Page Control */
+#define MII_BCM54XX_LRELDSE 0x0a /* LDS Expansion Register */
+#define MII_BCM54XX_LREES 0x0f /* LRE Extended Status */
+
+/* LRE control register. */
+#define LRECR_RESET 0x8000 /* Reset to default state */
+#define LRECR_LOOPBACK 0x4000 /* Internal Loopback */
+#define LRECR_LDSRES 0x2000 /* Restart LDS Process */
+#define LRECR_LDSEN 0x1000 /* LDS Enable */
+#define LRECR_PDOWN 0x0800 /* Enable low power state */
+#define LRECR_ISOLATE 0x0400 /* Isolate data paths from MII */
+#define LRECR_SPEED100 0x0200 /* Select 100 Mbps */
+#define LRECR_SPEED10 0x0000 /* Select 10 Mbps */
+#define LRECR_4PAIRS 0x0020 /* Select 4 Pairs */
+#define LRECR_2PAIRS 0x0010 /* Select 2 Pairs */
+#define LRECR_1PAIR 0x0000 /* Select 1 Pair */
+#define LRECR_MASTER 0x0008 /* Force Master when LDS disabled */
+#define LRECR_SLAVE 0x0000 /* Force Slave when LDS disabled */
+
+/* LRE status register. */
+#define LRESR_100_1PAIR 0x2000 /* Can do 100Mbps 1 Pair */
+#define LRESR_100_4PAIR 0x1000 /* Can do 100Mbps 4 Pairs */
+#define LRESR_100_2PAIR 0x0800 /* Can do 100Mbps 2 Pairs */
+#define LRESR_10_2PAIR 0x0400 /* Can do 10Mbps 2 Pairs */
+#define LRESR_10_1PAIR 0x0200 /* Can do 10Mbps 1 Pair */
+#define LRESR_ESTATEN 0x0100 /* Extended Status in R15 */
+#define LRESR_RESV 0x0080 /* Unused... */
+#define LRESR_MFPS 0x0040 /* Can suppress Management Frames Preamble */
+#define LRESR_LDSCOMPLETE 0x0020 /* LDS Auto-negotiation complete */
+#define LRESR_8023 0x0010 /* Has IEEE 802.3 Support */
+#define LRESR_LDSABILITY 0x0008 /* LDS auto-negotiation capable */
+#define LRESR_LSTATUS 0x0004 /* Link status */
+#define LRESR_JCD 0x0002 /* Jabber detected */
+#define LRESR_ERCAP 0x0001 /* Ext-reg capability */
+
+/* LDS Auto-Negotiation Advertised Ability. */
+#define LREANAA_PAUSE_ASYM 0x8000 /* Can pause asymmetrically */
+#define LREANAA_PAUSE 0x4000 /* Can pause */
+#define LREANAA_100_1PAIR 0x0020 /* Can do 100Mbps 1 Pair */
+#define LREANAA_100_4PAIR 0x0010 /* Can do 100Mbps 4 Pair */
+#define LREANAA_100_2PAIR 0x0008 /* Can do 100Mbps 2 Pair */
+#define LREANAA_10_2PAIR 0x0004 /* Can do 10Mbps 2 Pair */
+#define LREANAA_10_1PAIR 0x0002 /* Can do 10Mbps 1 Pair */
+
+#define LRE_ADVERTISE_FULL (LREANAA_100_1PAIR | LREANAA_100_4PAIR | \
+ LREANAA_100_2PAIR | LREANAA_10_2PAIR | \
+ LREANAA_10_1PAIR)
+
+#define LRE_ADVERTISE_ALL LRE_ADVERTISE_FULL
+
+/* LDS Link Partner Ability. */
+#define LRELPA_PAUSE_ASYM 0x8000 /* Supports asymmetric pause */
+#define LRELPA_PAUSE 0x4000 /* Supports pause capability */
+#define LRELPA_100_1PAIR 0x0020 /* 100Mbps 1 Pair capable */
+#define LRELPA_100_4PAIR 0x0010 /* 100Mbps 4 Pair capable */
+#define LRELPA_100_2PAIR 0x0008 /* 100Mbps 2 Pair capable */
+#define LRELPA_10_2PAIR 0x0004 /* 10Mbps 2 Pair capable */
+#define LRELPA_10_1PAIR 0x0002 /* 10Mbps 1 Pair capable */
+
+/* LDS Expansion register. */
+#define LDSE_DOWNGRADE 0x8000 /* Can do LDS Speed Downgrade */
+#define LDSE_MASTER 0x4000 /* Master / Slave */
+#define LDSE_PAIRS_MASK 0x3000 /* Pair Count Mask */
+#define LDSE_PAIRS_SHIFT 12
+#define LDSE_4PAIRS (2 << LDSE_PAIRS_SHIFT) /* 4 Pairs Connection */
+#define LDSE_2PAIRS (1 << LDSE_PAIRS_SHIFT) /* 2 Pairs Connection */
+#define LDSE_1PAIR (0 << LDSE_PAIRS_SHIFT) /* 1 Pair Connection */
+#define LDSE_CABLEN_MASK 0x0FFF /* Cable Length Mask */
+
/* BCM54810 Registers */
#define BCM54810_EXP_BROADREACH_LRE_MISC_CTL (MII_BCM54XX_EXP_SEL_ER + 0x90)
#define BCM54810_EXP_BROADREACH_LRE_MISC_CTL_EN (1 << 0)
#define BCM54810_SHD_CLK_CTL 0x3
#define BCM54810_SHD_CLK_CTL_GTXCLK_EN (1 << 9)
+/* BCM54811 Registers */
+#define BCM54811_EXP_BROADREACH_LRE_OVERLAY_CTL (MII_BCM54XX_EXP_SEL_ER + 0x9A)
+/* Access Control Override Enable */
+#define BCM54811_EXP_BROADREACH_LRE_OVERLAY_CTL_EN BIT(15)
+/* Access Control Override Value */
+#define BCM54811_EXP_BROADREACH_LRE_OVERLAY_CTL_OVERRIDE_VAL BIT(14)
+/* Access Control Value */
+#define BCM54811_EXP_BROADREACH_LRE_OVERLAY_CTL_VAL BIT(13)
+
+/* BCM54612E Registers */
+#define BCM54612E_EXP_SPARE0 (MII_BCM54XX_EXP_SEL_ETC + 0x34)
+#define BCM54612E_LED4_CLK125OUT_EN (1 << 1)
+
+
+/* Wake-on-LAN registers */
+#define BCM54XX_WOL_MAIN_CTL (MII_BCM54XX_EXP_SEL_WOL + 0x80)
+#define BCM54XX_WOL_EN BIT(0)
+#define BCM54XX_WOL_MODE_SINGLE_MPD 0
+#define BCM54XX_WOL_MODE_SINGLE_MPDSEC 1
+#define BCM54XX_WOL_MODE_DUAL 2
+#define BCM54XX_WOL_MODE_SHIFT 1
+#define BCM54XX_WOL_MODE_MASK 0x3
+#define BCM54XX_WOL_MP_MSB_FF_EN BIT(3)
+#define BCM54XX_WOL_SECKEY_OPT_4B 0
+#define BCM54XX_WOL_SECKEY_OPT_6B 1
+#define BCM54XX_WOL_SECKEY_OPT_8B 2
+#define BCM54XX_WOL_SECKEY_OPT_SHIFT 4
+#define BCM54XX_WOL_SECKEY_OPT_MASK 0x3
+#define BCM54XX_WOL_L2_TYPE_CHK BIT(6)
+#define BCM54XX_WOL_L4IPV4UDP_CHK BIT(7)
+#define BCM54XX_WOL_L4IPV6UDP_CHK BIT(8)
+#define BCM54XX_WOL_UDPPORT_CHK BIT(9)
+#define BCM54XX_WOL_CRC_CHK BIT(10)
+#define BCM54XX_WOL_SECKEY_MODE BIT(11)
+#define BCM54XX_WOL_RST BIT(12)
+#define BCM54XX_WOL_DIR_PKT_EN BIT(13)
+#define BCM54XX_WOL_MASK_MODE_DA_FF 0
+#define BCM54XX_WOL_MASK_MODE_DA_MPD 1
+#define BCM54XX_WOL_MASK_MODE_DA_ONLY 2
+#define BCM54XX_WOL_MASK_MODE_MPD 3
+#define BCM54XX_WOL_MASK_MODE_SHIFT 14
+#define BCM54XX_WOL_MASK_MODE_MASK 0x3
+
+#define BCM54XX_WOL_INNER_PROTO (MII_BCM54XX_EXP_SEL_WOL + 0x81)
+#define BCM54XX_WOL_OUTER_PROTO (MII_BCM54XX_EXP_SEL_WOL + 0x82)
+#define BCM54XX_WOL_OUTER_PROTO2 (MII_BCM54XX_EXP_SEL_WOL + 0x83)
+
+#define BCM54XX_WOL_MPD_DATA1(x) (MII_BCM54XX_EXP_SEL_WOL + 0x84 + (x))
+#define BCM54XX_WOL_MPD_DATA2(x) (MII_BCM54XX_EXP_SEL_WOL + 0x87 + (x))
+#define BCM54XX_WOL_SEC_KEY_8B (MII_BCM54XX_EXP_SEL_WOL + 0x8A)
+#define BCM54XX_WOL_MASK(x) (MII_BCM54XX_EXP_SEL_WOL + 0x8B + (x))
+#define BCM54XX_SEC_KEY_STORE(x) (MII_BCM54XX_EXP_SEL_WOL + 0x8E)
+#define BCM54XX_WOL_SHARED_CNT (MII_BCM54XX_EXP_SEL_WOL + 0x92)
+
+#define BCM54XX_WOL_INT_MASK (MII_BCM54XX_EXP_SEL_WOL + 0x93)
+#define BCM54XX_WOL_PKT1 BIT(0)
+#define BCM54XX_WOL_PKT2 BIT(1)
+#define BCM54XX_WOL_DIR BIT(2)
+#define BCM54XX_WOL_ALL_INTRS (BCM54XX_WOL_PKT1 | \
+ BCM54XX_WOL_PKT2 | \
+ BCM54XX_WOL_DIR)
+
+#define BCM54XX_WOL_INT_STATUS (MII_BCM54XX_EXP_SEL_WOL + 0x94)
+
+/* BCM5221 Registers */
+#define BCM5221_AEGSR 0x1C
+#define BCM5221_AEGSR_MDIX_STATUS BIT(13)
+#define BCM5221_AEGSR_MDIX_MAN_SWAP BIT(12)
+#define BCM5221_AEGSR_MDIX_DIS BIT(11)
+
+#define BCM5221_SHDW_AM4_EN_CLK_LPM BIT(2)
+#define BCM5221_SHDW_AM4_FORCE_LPM BIT(1)
/*****************************************************************************/
/* Fast Ethernet Transceiver definitions. */
@@ -237,6 +457,7 @@
#define MII_BRCM_FET_SHDW_MC_FAME 0x4000 /* Force Auto MDIX enable */
#define MII_BRCM_FET_SHDW_AUXMODE4 0x1a /* Auxiliary mode 4 */
+#define MII_BRCM_FET_SHDW_AM4_STANDBY 0x0008 /* Standby enable */
#define MII_BRCM_FET_SHDW_AM4_LED_MASK 0x0003
#define MII_BRCM_FET_SHDW_AM4_LED_MODE1 0x0001
@@ -247,6 +468,8 @@
#define LPI_FEATURE_EN 0x8000
#define LPI_FEATURE_EN_DIG1000X 0x4000
+#define BRCM_CL45VEN_EEE_LPI_CNT 0x803f
+
/* Core register definitions*/
#define MII_BRCM_CORE_BASE12 0x12
#define MII_BRCM_CORE_BASE13 0x13
@@ -255,4 +478,51 @@
#define MII_BRCM_CORE_EXPB0 0xB0
#define MII_BRCM_CORE_EXPB1 0xB1
+/* Enhanced Cable Diagnostics */
+#define BCM54XX_RDB_ECD_CTRL 0x2a0
+#define BCM54XX_EXP_ECD_CTRL (MII_BCM54XX_EXP_SEL_ER + 0xc0)
+
+#define BCM54XX_ECD_CTRL_CABLE_TYPE_CAT3 1 /* CAT3 or worse */
+#define BCM54XX_ECD_CTRL_CABLE_TYPE_CAT5 0 /* CAT5 or better */
+#define BCM54XX_ECD_CTRL_CABLE_TYPE_MASK BIT(0) /* cable type */
+#define BCM54XX_ECD_CTRL_INVALID BIT(3) /* invalid result */
+#define BCM54XX_ECD_CTRL_UNIT_CM 0 /* centimeters */
+#define BCM54XX_ECD_CTRL_UNIT_M 1 /* meters */
+#define BCM54XX_ECD_CTRL_UNIT_MASK BIT(10) /* cable length unit */
+#define BCM54XX_ECD_CTRL_IN_PROGRESS BIT(11) /* test in progress */
+#define BCM54XX_ECD_CTRL_BREAK_LINK BIT(12) /* unconnect link
+ * during test
+ */
+#define BCM54XX_ECD_CTRL_CROSS_SHORT_DIS BIT(13) /* disable inter-pair
+ * short check
+ */
+#define BCM54XX_ECD_CTRL_RUN BIT(15) /* run immediate */
+
+#define BCM54XX_RDB_ECD_FAULT_TYPE 0x2a1
+#define BCM54XX_EXP_ECD_FAULT_TYPE (MII_BCM54XX_EXP_SEL_ER + 0xc1)
+#define BCM54XX_ECD_FAULT_TYPE_INVALID 0x0
+#define BCM54XX_ECD_FAULT_TYPE_OK 0x1
+#define BCM54XX_ECD_FAULT_TYPE_OPEN 0x2
+#define BCM54XX_ECD_FAULT_TYPE_SAME_SHORT 0x3 /* short same pair */
+#define BCM54XX_ECD_FAULT_TYPE_CROSS_SHORT 0x4 /* short different pairs */
+#define BCM54XX_ECD_FAULT_TYPE_BUSY 0x9
+#define BCM54XX_ECD_FAULT_TYPE_PAIR_D_MASK GENMASK(3, 0)
+#define BCM54XX_ECD_FAULT_TYPE_PAIR_C_MASK GENMASK(7, 4)
+#define BCM54XX_ECD_FAULT_TYPE_PAIR_B_MASK GENMASK(11, 8)
+#define BCM54XX_ECD_FAULT_TYPE_PAIR_A_MASK GENMASK(15, 12)
+#define BCM54XX_ECD_PAIR_A_LENGTH_RESULTS 0x2a2
+#define BCM54XX_ECD_PAIR_B_LENGTH_RESULTS 0x2a3
+#define BCM54XX_ECD_PAIR_C_LENGTH_RESULTS 0x2a4
+#define BCM54XX_ECD_PAIR_D_LENGTH_RESULTS 0x2a5
+
+#define BCM54XX_RDB_ECD_PAIR_A_LENGTH_RESULTS 0x2a2
+#define BCM54XX_EXP_ECD_PAIR_A_LENGTH_RESULTS (MII_BCM54XX_EXP_SEL_ER + 0xc2)
+#define BCM54XX_RDB_ECD_PAIR_B_LENGTH_RESULTS 0x2a3
+#define BCM54XX_EXP_ECD_PAIR_B_LENGTH_RESULTS (MII_BCM54XX_EXP_SEL_ER + 0xc3)
+#define BCM54XX_RDB_ECD_PAIR_C_LENGTH_RESULTS 0x2a4
+#define BCM54XX_EXP_ECD_PAIR_C_LENGTH_RESULTS (MII_BCM54XX_EXP_SEL_ER + 0xc4)
+#define BCM54XX_RDB_ECD_PAIR_D_LENGTH_RESULTS 0x2a5
+#define BCM54XX_EXP_ECD_PAIR_D_LENGTH_RESULTS (MII_BCM54XX_EXP_SEL_ER + 0xc5)
+#define BCM54XX_ECD_LENGTH_RESULTS_INVALID 0xffff
+
#endif /* _LINUX_BRCMPHY_H */
diff --git a/include/linux/bsearch.h b/include/linux/bsearch.h
index 90b1aa867224..e66b711d091e 100644
--- a/include/linux/bsearch.h
+++ b/include/linux/bsearch.h
@@ -1,9 +1,32 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_BSEARCH_H
#define _LINUX_BSEARCH_H
#include <linux/types.h>
-void *bsearch(const void *key, const void *base, size_t num, size_t size,
- int (*cmp)(const void *key, const void *elt));
+static __always_inline
+void *__inline_bsearch(const void *key, const void *base, size_t num, size_t size, cmp_func_t cmp)
+{
+ const char *pivot;
+ int result;
+
+ while (num > 0) {
+ pivot = base + (num >> 1) * size;
+ result = cmp(key, pivot);
+
+ if (result == 0)
+ return (void *)pivot;
+
+ if (result > 0) {
+ base = pivot + size;
+ num--;
+ }
+ num >>= 1;
+ }
+
+ return NULL;
+}
+
+extern void *bsearch(const void *key, const void *base, size_t num, size_t size, cmp_func_t cmp);
#endif /* _LINUX_BSEARCH_H */
diff --git a/include/linux/bsg-lib.h b/include/linux/bsg-lib.h
index 637a20cfb237..14fa93268630 100644
--- a/include/linux/bsg-lib.h
+++ b/include/linux/bsg-lib.h
@@ -1,36 +1,25 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* BSG helper library
*
* Copyright (C) 2008 James Smart, Emulex Corporation
* Copyright (C) 2011 Red Hat, Inc. All rights reserved.
* Copyright (C) 2011 Mike Christie
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
*/
#ifndef _BLK_BSG_
#define _BLK_BSG_
#include <linux/blkdev.h>
-#include <scsi/scsi_request.h>
+struct bsg_job;
struct request;
struct device;
struct scatterlist;
struct request_queue;
+typedef int (bsg_job_fn) (struct bsg_job *);
+typedef enum blk_eh_timer_return (bsg_timeout_fn)(struct request *);
+
struct bsg_buffer {
unsigned int payload_len;
int sg_cnt;
@@ -38,12 +27,12 @@ struct bsg_buffer {
};
struct bsg_job {
- struct scsi_request sreq;
struct device *dev;
- struct request *req;
struct kref kref;
+ unsigned int timeout;
+
/* Transport/driver specific request/reply structs */
void *request;
void *reply;
@@ -63,13 +52,22 @@ struct bsg_job {
struct bsg_buffer request_payload;
struct bsg_buffer reply_payload;
+ int result;
+ unsigned int reply_payload_rcv_len;
+
+ /* BIDI support */
+ struct request *bidi_rq;
+ struct bio *bidi_bio;
+
void *dd_data; /* Used for driver-specific storage */
};
void bsg_job_done(struct bsg_job *job, int result,
unsigned int reply_payload_rcv_len);
-struct request_queue *bsg_setup_queue(struct device *dev, char *name,
- bsg_job_fn *job_fn, int dd_job_size);
+struct request_queue *bsg_setup_queue(struct device *dev, const char *name,
+ struct queue_limits *lim, bsg_job_fn *job_fn,
+ bsg_timeout_fn *timeout, int dd_job_size);
+void bsg_remove_queue(struct request_queue *q);
void bsg_job_put(struct bsg_job *job);
int __must_check bsg_job_get(struct bsg_job *job);
diff --git a/include/linux/bsg.h b/include/linux/bsg.h
index 7173f6e9d2dd..ee2df73edf83 100644
--- a/include/linux/bsg.h
+++ b/include/linux/bsg.h
@@ -1,33 +1,19 @@
-#ifndef BSG_H
-#define BSG_H
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_BSG_H
+#define _LINUX_BSG_H
#include <uapi/linux/bsg.h>
+struct bsg_device;
+struct device;
+struct request_queue;
-#if defined(CONFIG_BLK_DEV_BSG)
-struct bsg_class_device {
- struct device *class_dev;
- struct device *parent;
- int minor;
- struct request_queue *queue;
- struct kref ref;
- void (*release)(struct device *);
-};
+typedef int (bsg_sg_io_fn)(struct request_queue *, struct sg_io_v4 *hdr,
+ bool open_for_write, unsigned int timeout);
-extern int bsg_register_queue(struct request_queue *q,
- struct device *parent, const char *name,
- void (*release)(struct device *));
-extern void bsg_unregister_queue(struct request_queue *);
-#else
-static inline int bsg_register_queue(struct request_queue *q,
- struct device *parent, const char *name,
- void (*release)(struct device *))
-{
- return 0;
-}
-static inline void bsg_unregister_queue(struct request_queue *q)
-{
-}
-#endif
+struct bsg_device *bsg_register_queue(struct request_queue *q,
+ struct device *parent, const char *name,
+ bsg_sg_io_fn *sg_io_fn);
+void bsg_unregister_queue(struct bsg_device *bcd);
-#endif
+#endif /* _LINUX_BSG_H */
diff --git a/include/linux/btf.h b/include/linux/btf.h
new file mode 100644
index 000000000000..f06976ffb63f
--- /dev/null
+++ b/include/linux/btf.h
@@ -0,0 +1,686 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2018 Facebook */
+
+#ifndef _LINUX_BTF_H
+#define _LINUX_BTF_H 1
+
+#include <linux/types.h>
+#include <linux/bpfptr.h>
+#include <linux/bsearch.h>
+#include <linux/btf_ids.h>
+#include <uapi/linux/btf.h>
+#include <uapi/linux/bpf.h>
+
+#define BTF_TYPE_EMIT(type) ((void)(type *)0)
+#define BTF_TYPE_EMIT_ENUM(enum_val) ((void)enum_val)
+
+/* These need to be macros, as the expressions are used in assembler input */
+#define KF_ACQUIRE (1 << 0) /* kfunc is an acquire function */
+#define KF_RELEASE (1 << 1) /* kfunc is a release function */
+#define KF_RET_NULL (1 << 2) /* kfunc returns a pointer that may be NULL */
+/* Trusted arguments are those which are guaranteed to be valid when passed to
+ * the kfunc. It is used to enforce that pointers obtained from either acquire
+ * kfuncs, or from the main kernel on a tracepoint or struct_ops callback
+ * invocation, remain unmodified when being passed to helpers taking trusted
+ * args.
+ *
+ * Consider, for example, the following new task tracepoint:
+ *
+ * SEC("tp_btf/task_newtask")
+ * int BPF_PROG(new_task_tp, struct task_struct *task, u64 clone_flags)
+ * {
+ * ...
+ * }
+ *
+ * And the following kfunc:
+ *
+ * BTF_ID_FLAGS(func, bpf_task_acquire, KF_ACQUIRE | KF_TRUSTED_ARGS)
+ *
+ * All invocations to the kfunc must pass the unmodified, unwalked task:
+ *
+ * bpf_task_acquire(task); // Allowed
+ * bpf_task_acquire(task->last_wakee); // Rejected, walked task
+ *
+ * Programs may also pass referenced tasks directly to the kfunc:
+ *
+ * struct task_struct *acquired;
+ *
+ * acquired = bpf_task_acquire(task); // Allowed, same as above
+ * bpf_task_acquire(acquired); // Allowed
+ * bpf_task_acquire(task); // Allowed
+ * bpf_task_acquire(acquired->last_wakee); // Rejected, walked task
+ *
+ * Programs may _not_, however, pass a task from an arbitrary fentry/fexit, or
+ * kprobe/kretprobe to the kfunc, as BPF cannot guarantee that all of these
+ * pointers are guaranteed to be safe. For example, the following BPF program
+ * would be rejected:
+ *
+ * SEC("kretprobe/free_task")
+ * int BPF_PROG(free_task_probe, struct task_struct *tsk)
+ * {
+ * struct task_struct *acquired;
+ *
+ * acquired = bpf_task_acquire(acquired); // Rejected, not a trusted pointer
+ * bpf_task_release(acquired);
+ *
+ * return 0;
+ * }
+ */
+#define KF_TRUSTED_ARGS (1 << 4) /* kfunc only takes trusted pointer arguments */
+#define KF_SLEEPABLE (1 << 5) /* kfunc may sleep */
+#define KF_DESTRUCTIVE (1 << 6) /* kfunc performs destructive actions */
+#define KF_RCU (1 << 7) /* kfunc takes either rcu or trusted pointer arguments */
+/* only one of KF_ITER_{NEW,NEXT,DESTROY} could be specified per kfunc */
+#define KF_ITER_NEW (1 << 8) /* kfunc implements BPF iter constructor */
+#define KF_ITER_NEXT (1 << 9) /* kfunc implements BPF iter next method */
+#define KF_ITER_DESTROY (1 << 10) /* kfunc implements BPF iter destructor */
+#define KF_RCU_PROTECTED (1 << 11) /* kfunc should be protected by rcu cs when they are invoked */
+#define KF_FASTCALL (1 << 12) /* kfunc supports bpf_fastcall protocol */
+#define KF_ARENA_RET (1 << 13) /* kfunc returns an arena pointer */
+#define KF_ARENA_ARG1 (1 << 14) /* kfunc takes an arena pointer as its first argument */
+#define KF_ARENA_ARG2 (1 << 15) /* kfunc takes an arena pointer as its second argument */
+
+/*
+ * Tag marking a kernel function as a kfunc. This is meant to minimize the
+ * amount of copy-paste that kfunc authors have to include for correctness so
+ * as to avoid issues such as the compiler inlining or eliding either a static
+ * kfunc, or a global kfunc in an LTO build.
+ */
+#define __bpf_kfunc __used __retain __noclone noinline
+
+#define __bpf_kfunc_start_defs() \
+ __diag_push(); \
+ __diag_ignore_all("-Wmissing-declarations", \
+ "Global kfuncs as their definitions will be in BTF");\
+ __diag_ignore_all("-Wmissing-prototypes", \
+ "Global kfuncs as their definitions will be in BTF")
+
+#define __bpf_kfunc_end_defs() __diag_pop()
+#define __bpf_hook_start() __bpf_kfunc_start_defs()
+#define __bpf_hook_end() __bpf_kfunc_end_defs()
+
+/*
+ * Return the name of the passed struct, if exists, or halt the build if for
+ * example the structure gets renamed. In this way, developers have to revisit
+ * the code using that structure name, and update it accordingly.
+ */
+#define stringify_struct(x) \
+ ({ BUILD_BUG_ON(sizeof(struct x) < 0); \
+ __stringify(x); })
+
+struct btf;
+struct btf_member;
+struct btf_type;
+union bpf_attr;
+struct btf_show;
+struct btf_id_set;
+struct bpf_prog;
+
+typedef int (*btf_kfunc_filter_t)(const struct bpf_prog *prog, u32 kfunc_id);
+
+struct btf_kfunc_id_set {
+ struct module *owner;
+ struct btf_id_set8 *set;
+ btf_kfunc_filter_t filter;
+};
+
+struct btf_id_dtor_kfunc {
+ u32 btf_id;
+ u32 kfunc_btf_id;
+};
+
+struct btf_struct_meta {
+ u32 btf_id;
+ struct btf_record *record;
+};
+
+struct btf_struct_metas {
+ u32 cnt;
+ struct btf_struct_meta types[];
+};
+
+extern const struct file_operations btf_fops;
+
+const char *btf_get_name(const struct btf *btf);
+void btf_get(struct btf *btf);
+void btf_put(struct btf *btf);
+const struct btf_header *btf_header(const struct btf *btf);
+int btf_new_fd(const union bpf_attr *attr, bpfptr_t uattr, u32 uattr_sz);
+struct btf *btf_get_by_fd(int fd);
+int btf_get_info_by_fd(const struct btf *btf,
+ const union bpf_attr *attr,
+ union bpf_attr __user *uattr);
+/* Figure out the size of a type_id. If type_id is a modifier
+ * (e.g. const), it will be resolved to find out the type with size.
+ *
+ * For example:
+ * In describing "const void *", type_id is "const" and "const"
+ * refers to "void *". The return type will be "void *".
+ *
+ * If type_id is a simple "int", then return type will be "int".
+ *
+ * @btf: struct btf object
+ * @type_id: Find out the size of type_id. The type_id of the return
+ * type is set to *type_id.
+ * @ret_size: It can be NULL. If not NULL, the size of the return
+ * type is set to *ret_size.
+ * Return: The btf_type (resolved to another type with size info if needed).
+ * NULL is returned if type_id itself does not have size info
+ * (e.g. void) or it cannot be resolved to another type that
+ * has size info.
+ * *type_id and *ret_size will not be changed in the
+ * NULL return case.
+ */
+const struct btf_type *btf_type_id_size(const struct btf *btf,
+ u32 *type_id,
+ u32 *ret_size);
+
+/*
+ * Options to control show behaviour.
+ * - BTF_SHOW_COMPACT: no formatting around type information
+ * - BTF_SHOW_NONAME: no struct/union member names/types
+ * - BTF_SHOW_PTR_RAW: show raw (unobfuscated) pointer values;
+ * equivalent to %px.
+ * - BTF_SHOW_ZERO: show zero-valued struct/union members; they
+ * are not displayed by default
+ * - BTF_SHOW_UNSAFE: skip use of bpf_probe_read() to safely read
+ * data before displaying it.
+ */
+#define BTF_SHOW_COMPACT BTF_F_COMPACT
+#define BTF_SHOW_NONAME BTF_F_NONAME
+#define BTF_SHOW_PTR_RAW BTF_F_PTR_RAW
+#define BTF_SHOW_ZERO BTF_F_ZERO
+#define BTF_SHOW_UNSAFE (1ULL << 4)
+
+void btf_type_seq_show(const struct btf *btf, u32 type_id, void *obj,
+ struct seq_file *m);
+int btf_type_seq_show_flags(const struct btf *btf, u32 type_id, void *obj,
+ struct seq_file *m, u64 flags);
+
+/*
+ * Copy len bytes of string representation of obj of BTF type_id into buf.
+ *
+ * @btf: struct btf object
+ * @type_id: type id of type obj points to
+ * @obj: pointer to typed data
+ * @buf: buffer to write to
+ * @len: maximum length to write to buf
+ * @flags: show options (see above)
+ *
+ * Return: length that would have been/was copied as per snprintf, or
+ * negative error.
+ */
+int btf_type_snprintf_show(const struct btf *btf, u32 type_id, void *obj,
+ char *buf, int len, u64 flags);
+
+int btf_get_fd_by_id(u32 id);
+u32 btf_obj_id(const struct btf *btf);
+bool btf_is_kernel(const struct btf *btf);
+bool btf_is_module(const struct btf *btf);
+bool btf_is_vmlinux(const struct btf *btf);
+struct module *btf_try_get_module(const struct btf *btf);
+u32 btf_nr_types(const struct btf *btf);
+struct btf *btf_base_btf(const struct btf *btf);
+bool btf_type_is_i32(const struct btf_type *t);
+bool btf_type_is_i64(const struct btf_type *t);
+bool btf_type_is_primitive(const struct btf_type *t);
+bool btf_member_is_reg_int(const struct btf *btf, const struct btf_type *s,
+ const struct btf_member *m,
+ u32 expected_offset, u32 expected_size);
+struct btf_record *btf_parse_fields(const struct btf *btf, const struct btf_type *t,
+ u32 field_mask, u32 value_size);
+int btf_check_and_fixup_fields(const struct btf *btf, struct btf_record *rec);
+bool btf_type_is_void(const struct btf_type *t);
+s32 btf_find_by_name_kind(const struct btf *btf, const char *name, u8 kind);
+s32 bpf_find_btf_id(const char *name, u32 kind, struct btf **btf_p);
+const struct btf_type *btf_type_skip_modifiers(const struct btf *btf,
+ u32 id, u32 *res_id);
+const struct btf_type *btf_type_resolve_ptr(const struct btf *btf,
+ u32 id, u32 *res_id);
+const struct btf_type *btf_type_resolve_func_ptr(const struct btf *btf,
+ u32 id, u32 *res_id);
+const struct btf_type *
+btf_resolve_size(const struct btf *btf, const struct btf_type *type,
+ u32 *type_size);
+const char *btf_type_str(const struct btf_type *t);
+
+#define for_each_member(i, struct_type, member) \
+ for (i = 0, member = btf_type_member(struct_type); \
+ i < btf_type_vlen(struct_type); \
+ i++, member++)
+
+#define for_each_vsi(i, datasec_type, member) \
+ for (i = 0, member = btf_type_var_secinfo(datasec_type); \
+ i < btf_type_vlen(datasec_type); \
+ i++, member++)
+
+static inline bool btf_type_is_ptr(const struct btf_type *t)
+{
+ return BTF_INFO_KIND(t->info) == BTF_KIND_PTR;
+}
+
+static inline bool btf_type_is_int(const struct btf_type *t)
+{
+ return BTF_INFO_KIND(t->info) == BTF_KIND_INT;
+}
+
+static inline bool btf_type_is_small_int(const struct btf_type *t)
+{
+ return btf_type_is_int(t) && t->size <= sizeof(u64);
+}
+
+static inline u8 btf_int_encoding(const struct btf_type *t)
+{
+ return BTF_INT_ENCODING(*(u32 *)(t + 1));
+}
+
+static inline bool btf_type_is_signed_int(const struct btf_type *t)
+{
+ return btf_type_is_int(t) && (btf_int_encoding(t) & BTF_INT_SIGNED);
+}
+
+static inline bool btf_type_is_enum(const struct btf_type *t)
+{
+ return BTF_INFO_KIND(t->info) == BTF_KIND_ENUM;
+}
+
+static inline bool btf_is_any_enum(const struct btf_type *t)
+{
+ return BTF_INFO_KIND(t->info) == BTF_KIND_ENUM ||
+ BTF_INFO_KIND(t->info) == BTF_KIND_ENUM64;
+}
+
+static inline bool btf_kind_core_compat(const struct btf_type *t1,
+ const struct btf_type *t2)
+{
+ return BTF_INFO_KIND(t1->info) == BTF_INFO_KIND(t2->info) ||
+ (btf_is_any_enum(t1) && btf_is_any_enum(t2));
+}
+
+static inline bool str_is_empty(const char *s)
+{
+ return !s || !s[0];
+}
+
+static inline u16 btf_kind(const struct btf_type *t)
+{
+ return BTF_INFO_KIND(t->info);
+}
+
+static inline bool btf_is_enum(const struct btf_type *t)
+{
+ return btf_kind(t) == BTF_KIND_ENUM;
+}
+
+static inline bool btf_is_enum64(const struct btf_type *t)
+{
+ return btf_kind(t) == BTF_KIND_ENUM64;
+}
+
+static inline u64 btf_enum64_value(const struct btf_enum64 *e)
+{
+ return ((u64)e->val_hi32 << 32) | e->val_lo32;
+}
+
+static inline bool btf_is_composite(const struct btf_type *t)
+{
+ u16 kind = btf_kind(t);
+
+ return kind == BTF_KIND_STRUCT || kind == BTF_KIND_UNION;
+}
+
+static inline bool btf_is_array(const struct btf_type *t)
+{
+ return btf_kind(t) == BTF_KIND_ARRAY;
+}
+
+static inline bool btf_is_int(const struct btf_type *t)
+{
+ return btf_kind(t) == BTF_KIND_INT;
+}
+
+static inline bool btf_is_ptr(const struct btf_type *t)
+{
+ return btf_kind(t) == BTF_KIND_PTR;
+}
+
+static inline u8 btf_int_offset(const struct btf_type *t)
+{
+ return BTF_INT_OFFSET(*(u32 *)(t + 1));
+}
+
+static inline __u8 btf_int_bits(const struct btf_type *t)
+{
+ return BTF_INT_BITS(*(__u32 *)(t + 1));
+}
+
+static inline bool btf_type_is_scalar(const struct btf_type *t)
+{
+ return btf_type_is_int(t) || btf_type_is_enum(t);
+}
+
+static inline bool btf_type_is_fwd(const struct btf_type *t)
+{
+ return BTF_INFO_KIND(t->info) == BTF_KIND_FWD;
+}
+
+static inline bool btf_type_is_typedef(const struct btf_type *t)
+{
+ return BTF_INFO_KIND(t->info) == BTF_KIND_TYPEDEF;
+}
+
+static inline bool btf_type_is_volatile(const struct btf_type *t)
+{
+ return BTF_INFO_KIND(t->info) == BTF_KIND_VOLATILE;
+}
+
+static inline bool btf_type_is_func(const struct btf_type *t)
+{
+ return BTF_INFO_KIND(t->info) == BTF_KIND_FUNC;
+}
+
+static inline bool btf_type_is_func_proto(const struct btf_type *t)
+{
+ return BTF_INFO_KIND(t->info) == BTF_KIND_FUNC_PROTO;
+}
+
+static inline bool btf_type_is_var(const struct btf_type *t)
+{
+ return BTF_INFO_KIND(t->info) == BTF_KIND_VAR;
+}
+
+static inline bool btf_type_is_type_tag(const struct btf_type *t)
+{
+ return BTF_INFO_KIND(t->info) == BTF_KIND_TYPE_TAG;
+}
+
+/* union is only a special case of struct:
+ * all its offsetof(member) == 0
+ */
+static inline bool btf_type_is_struct(const struct btf_type *t)
+{
+ u8 kind = BTF_INFO_KIND(t->info);
+
+ return kind == BTF_KIND_STRUCT || kind == BTF_KIND_UNION;
+}
+
+static inline bool __btf_type_is_struct(const struct btf_type *t)
+{
+ return BTF_INFO_KIND(t->info) == BTF_KIND_STRUCT;
+}
+
+static inline bool btf_type_is_array(const struct btf_type *t)
+{
+ return BTF_INFO_KIND(t->info) == BTF_KIND_ARRAY;
+}
+
+static inline u16 btf_type_vlen(const struct btf_type *t)
+{
+ return BTF_INFO_VLEN(t->info);
+}
+
+static inline u16 btf_vlen(const struct btf_type *t)
+{
+ return btf_type_vlen(t);
+}
+
+static inline u16 btf_func_linkage(const struct btf_type *t)
+{
+ return BTF_INFO_VLEN(t->info);
+}
+
+static inline bool btf_type_kflag(const struct btf_type *t)
+{
+ return BTF_INFO_KFLAG(t->info);
+}
+
+static inline u32 __btf_member_bit_offset(const struct btf_type *struct_type,
+ const struct btf_member *member)
+{
+ return btf_type_kflag(struct_type) ? BTF_MEMBER_BIT_OFFSET(member->offset)
+ : member->offset;
+}
+
+static inline u32 __btf_member_bitfield_size(const struct btf_type *struct_type,
+ const struct btf_member *member)
+{
+ return btf_type_kflag(struct_type) ? BTF_MEMBER_BITFIELD_SIZE(member->offset)
+ : 0;
+}
+
+static inline struct btf_member *btf_members(const struct btf_type *t)
+{
+ return (struct btf_member *)(t + 1);
+}
+
+static inline u32 btf_member_bit_offset(const struct btf_type *t, u32 member_idx)
+{
+ const struct btf_member *m = btf_members(t) + member_idx;
+
+ return __btf_member_bit_offset(t, m);
+}
+
+static inline u32 btf_member_bitfield_size(const struct btf_type *t, u32 member_idx)
+{
+ const struct btf_member *m = btf_members(t) + member_idx;
+
+ return __btf_member_bitfield_size(t, m);
+}
+
+static inline const struct btf_member *btf_type_member(const struct btf_type *t)
+{
+ return (const struct btf_member *)(t + 1);
+}
+
+static inline struct btf_array *btf_array(const struct btf_type *t)
+{
+ return (struct btf_array *)(t + 1);
+}
+
+static inline struct btf_enum *btf_enum(const struct btf_type *t)
+{
+ return (struct btf_enum *)(t + 1);
+}
+
+static inline struct btf_enum64 *btf_enum64(const struct btf_type *t)
+{
+ return (struct btf_enum64 *)(t + 1);
+}
+
+static inline const struct btf_var_secinfo *btf_type_var_secinfo(
+ const struct btf_type *t)
+{
+ return (const struct btf_var_secinfo *)(t + 1);
+}
+
+static inline struct btf_param *btf_params(const struct btf_type *t)
+{
+ return (struct btf_param *)(t + 1);
+}
+
+static inline struct btf_decl_tag *btf_decl_tag(const struct btf_type *t)
+{
+ return (struct btf_decl_tag *)(t + 1);
+}
+
+static inline int btf_id_cmp_func(const void *a, const void *b)
+{
+ const int *pa = a, *pb = b;
+
+ return *pa - *pb;
+}
+
+static inline bool btf_id_set_contains(const struct btf_id_set *set, u32 id)
+{
+ return bsearch(&id, set->ids, set->cnt, sizeof(u32), btf_id_cmp_func) != NULL;
+}
+
+static inline void *btf_id_set8_contains(const struct btf_id_set8 *set, u32 id)
+{
+ return bsearch(&id, set->pairs, set->cnt, sizeof(set->pairs[0]), btf_id_cmp_func);
+}
+
+bool btf_param_match_suffix(const struct btf *btf,
+ const struct btf_param *arg,
+ const char *suffix);
+int btf_ctx_arg_offset(const struct btf *btf, const struct btf_type *func_proto,
+ u32 arg_no);
+u32 btf_ctx_arg_idx(struct btf *btf, const struct btf_type *func_proto, int off);
+
+struct bpf_verifier_log;
+
+#if defined(CONFIG_BPF_JIT) && defined(CONFIG_BPF_SYSCALL)
+struct bpf_struct_ops;
+int __register_bpf_struct_ops(struct bpf_struct_ops *st_ops);
+const struct bpf_struct_ops_desc *bpf_struct_ops_find_value(struct btf *btf, u32 value_id);
+const struct bpf_struct_ops_desc *bpf_struct_ops_find(struct btf *btf, u32 type_id);
+#else
+static inline const struct bpf_struct_ops_desc *bpf_struct_ops_find(struct btf *btf, u32 type_id)
+{
+ return NULL;
+}
+#endif
+
+enum btf_field_iter_kind {
+ BTF_FIELD_ITER_IDS,
+ BTF_FIELD_ITER_STRS,
+};
+
+struct btf_field_desc {
+ /* once-per-type offsets */
+ int t_off_cnt, t_offs[2];
+ /* member struct size, or zero, if no members */
+ int m_sz;
+ /* repeated per-member offsets */
+ int m_off_cnt, m_offs[1];
+};
+
+struct btf_field_iter {
+ struct btf_field_desc desc;
+ void *p;
+ int m_idx;
+ int off_idx;
+ int vlen;
+};
+
+#ifdef CONFIG_BPF_SYSCALL
+const struct btf_type *btf_type_by_id(const struct btf *btf, u32 type_id);
+void btf_set_base_btf(struct btf *btf, const struct btf *base_btf);
+int btf_relocate(struct btf *btf, const struct btf *base_btf, __u32 **map_ids);
+int btf_field_iter_init(struct btf_field_iter *it, struct btf_type *t,
+ enum btf_field_iter_kind iter_kind);
+__u32 *btf_field_iter_next(struct btf_field_iter *it);
+
+const char *btf_name_by_offset(const struct btf *btf, u32 offset);
+const char *btf_str_by_offset(const struct btf *btf, u32 offset);
+struct btf *btf_parse_vmlinux(void);
+struct btf *bpf_prog_get_target_btf(const struct bpf_prog *prog);
+u32 *btf_kfunc_id_set_contains(const struct btf *btf, u32 kfunc_btf_id,
+ const struct bpf_prog *prog);
+u32 *btf_kfunc_is_modify_return(const struct btf *btf, u32 kfunc_btf_id,
+ const struct bpf_prog *prog);
+int register_btf_kfunc_id_set(enum bpf_prog_type prog_type,
+ const struct btf_kfunc_id_set *s);
+int register_btf_fmodret_id_set(const struct btf_kfunc_id_set *kset);
+s32 btf_find_dtor_kfunc(struct btf *btf, u32 btf_id);
+int register_btf_id_dtor_kfuncs(const struct btf_id_dtor_kfunc *dtors, u32 add_cnt,
+ struct module *owner);
+struct btf_struct_meta *btf_find_struct_meta(const struct btf *btf, u32 btf_id);
+bool btf_is_projection_of(const char *pname, const char *tname);
+bool btf_is_prog_ctx_type(struct bpf_verifier_log *log, const struct btf *btf,
+ const struct btf_type *t, enum bpf_prog_type prog_type,
+ int arg);
+int get_kern_ctx_btf_id(struct bpf_verifier_log *log, enum bpf_prog_type prog_type);
+bool btf_types_are_same(const struct btf *btf1, u32 id1,
+ const struct btf *btf2, u32 id2);
+int btf_check_iter_arg(struct btf *btf, const struct btf_type *func, int arg_idx);
+
+static inline bool btf_type_is_struct_ptr(struct btf *btf, const struct btf_type *t)
+{
+ if (!btf_type_is_ptr(t))
+ return false;
+
+ t = btf_type_skip_modifiers(btf, t->type, NULL);
+
+ return btf_type_is_struct(t);
+}
+#else
+static inline const struct btf_type *btf_type_by_id(const struct btf *btf,
+ u32 type_id)
+{
+ return NULL;
+}
+
+static inline void btf_set_base_btf(struct btf *btf, const struct btf *base_btf)
+{
+}
+
+static inline int btf_relocate(void *log, struct btf *btf, const struct btf *base_btf,
+ __u32 **map_ids)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int btf_field_iter_init(struct btf_field_iter *it, struct btf_type *t,
+ enum btf_field_iter_kind iter_kind)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline __u32 *btf_field_iter_next(struct btf_field_iter *it)
+{
+ return NULL;
+}
+
+static inline const char *btf_name_by_offset(const struct btf *btf,
+ u32 offset)
+{
+ return NULL;
+}
+static inline u32 *btf_kfunc_id_set_contains(const struct btf *btf,
+ u32 kfunc_btf_id,
+ struct bpf_prog *prog)
+
+{
+ return NULL;
+}
+static inline int register_btf_kfunc_id_set(enum bpf_prog_type prog_type,
+ const struct btf_kfunc_id_set *s)
+{
+ return 0;
+}
+static inline s32 btf_find_dtor_kfunc(struct btf *btf, u32 btf_id)
+{
+ return -ENOENT;
+}
+static inline int register_btf_id_dtor_kfuncs(const struct btf_id_dtor_kfunc *dtors,
+ u32 add_cnt, struct module *owner)
+{
+ return 0;
+}
+static inline struct btf_struct_meta *btf_find_struct_meta(const struct btf *btf, u32 btf_id)
+{
+ return NULL;
+}
+static inline bool
+btf_is_prog_ctx_type(struct bpf_verifier_log *log, const struct btf *btf,
+ const struct btf_type *t, enum bpf_prog_type prog_type,
+ int arg)
+{
+ return false;
+}
+static inline int get_kern_ctx_btf_id(struct bpf_verifier_log *log,
+ enum bpf_prog_type prog_type) {
+ return -EINVAL;
+}
+static inline bool btf_types_are_same(const struct btf *btf1, u32 id1,
+ const struct btf *btf2, u32 id2)
+{
+ return false;
+}
+static inline int btf_check_iter_arg(struct btf *btf, const struct btf_type *func, int arg_idx)
+{
+ return -EOPNOTSUPP;
+}
+#endif
+#endif
diff --git a/include/linux/btf_ids.h b/include/linux/btf_ids.h
new file mode 100644
index 000000000000..139bdececdcf
--- /dev/null
+++ b/include/linux/btf_ids.h
@@ -0,0 +1,288 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef _LINUX_BTF_IDS_H
+#define _LINUX_BTF_IDS_H
+
+#include <linux/types.h> /* for u32 */
+
+struct btf_id_set {
+ u32 cnt;
+ u32 ids[];
+};
+
+/* This flag implies BTF_SET8 holds kfunc(s) */
+#define BTF_SET8_KFUNCS (1 << 0)
+
+struct btf_id_set8 {
+ u32 cnt;
+ u32 flags;
+ struct {
+ u32 id;
+ u32 flags;
+ } pairs[];
+};
+
+#ifdef CONFIG_DEBUG_INFO_BTF
+
+#include <linux/compiler.h> /* for __PASTE */
+#include <linux/compiler_attributes.h> /* for __maybe_unused */
+#include <linux/stringify.h>
+
+/*
+ * Following macros help to define lists of BTF IDs placed
+ * in .BTF_ids section. They are initially filled with zeros
+ * (during compilation) and resolved later during the
+ * linking phase by resolve_btfids tool.
+ *
+ * Any change in list layout must be reflected in resolve_btfids
+ * tool logic.
+ */
+
+#define BTF_IDS_SECTION ".BTF_ids"
+
+#define ____BTF_ID(symbol, word) \
+asm( \
+".pushsection " BTF_IDS_SECTION ",\"a\"; \n" \
+".local " #symbol " ; \n" \
+".type " #symbol ", STT_OBJECT; \n" \
+".size " #symbol ", 4; \n" \
+#symbol ": \n" \
+".zero 4 \n" \
+word \
+".popsection; \n");
+
+#define __BTF_ID(symbol, word) \
+ ____BTF_ID(symbol, word)
+
+#define __ID(prefix) \
+ __PASTE(__PASTE(prefix, __COUNTER__), __LINE__)
+
+/*
+ * The BTF_ID defines unique symbol for each ID pointing
+ * to 4 zero bytes.
+ */
+#define BTF_ID(prefix, name) \
+ __BTF_ID(__ID(__BTF_ID__##prefix##__##name##__), "")
+
+#define ____BTF_ID_FLAGS(prefix, name, flags) \
+ __BTF_ID(__ID(__BTF_ID__##prefix##__##name##__), ".long " #flags "\n")
+#define __BTF_ID_FLAGS(prefix, name, flags, ...) \
+ ____BTF_ID_FLAGS(prefix, name, flags)
+#define BTF_ID_FLAGS(prefix, name, ...) \
+ __BTF_ID_FLAGS(prefix, name, ##__VA_ARGS__, 0)
+
+/*
+ * The BTF_ID_LIST macro defines pure (unsorted) list
+ * of BTF IDs, with following layout:
+ *
+ * BTF_ID_LIST(list1)
+ * BTF_ID(type1, name1)
+ * BTF_ID(type2, name2)
+ *
+ * list1:
+ * __BTF_ID__type1__name1__1:
+ * .zero 4
+ * __BTF_ID__type2__name2__2:
+ * .zero 4
+ *
+ */
+#define __BTF_ID_LIST(name, scope) \
+asm( \
+".pushsection " BTF_IDS_SECTION ",\"a\"; \n" \
+"." #scope " " #name "; \n" \
+#name ":; \n" \
+".popsection; \n");
+
+#define BTF_ID_LIST(name) \
+__BTF_ID_LIST(name, local) \
+extern u32 name[];
+
+#define BTF_ID_LIST_GLOBAL(name, n) \
+__BTF_ID_LIST(name, globl)
+
+/* The BTF_ID_LIST_SINGLE macro defines a BTF_ID_LIST with
+ * a single entry.
+ */
+#define BTF_ID_LIST_SINGLE(name, prefix, typename) \
+ BTF_ID_LIST(name) \
+ BTF_ID(prefix, typename)
+#define BTF_ID_LIST_GLOBAL_SINGLE(name, prefix, typename) \
+ BTF_ID_LIST_GLOBAL(name, 1) \
+ BTF_ID(prefix, typename)
+
+/*
+ * The BTF_ID_UNUSED macro defines 4 zero bytes.
+ * It's used when we want to define 'unused' entry
+ * in BTF_ID_LIST, like:
+ *
+ * BTF_ID_LIST(bpf_skb_output_btf_ids)
+ * BTF_ID(struct, sk_buff)
+ * BTF_ID_UNUSED
+ * BTF_ID(struct, task_struct)
+ */
+
+#define BTF_ID_UNUSED \
+asm( \
+".pushsection " BTF_IDS_SECTION ",\"a\"; \n" \
+".zero 4 \n" \
+".popsection; \n");
+
+/*
+ * The BTF_SET_START/END macros pair defines sorted list of
+ * BTF IDs plus its members count, with following layout:
+ *
+ * BTF_SET_START(list)
+ * BTF_ID(type1, name1)
+ * BTF_ID(type2, name2)
+ * BTF_SET_END(list)
+ *
+ * __BTF_ID__set__list:
+ * .zero 4
+ * list:
+ * __BTF_ID__type1__name1__3:
+ * .zero 4
+ * __BTF_ID__type2__name2__4:
+ * .zero 4
+ *
+ */
+#define __BTF_SET_START(name, scope) \
+asm( \
+".pushsection " BTF_IDS_SECTION ",\"a\"; \n" \
+"." #scope " __BTF_ID__set__" #name "; \n" \
+"__BTF_ID__set__" #name ":; \n" \
+".zero 4 \n" \
+".popsection; \n");
+
+#define BTF_SET_START(name) \
+__BTF_ID_LIST(name, local) \
+__BTF_SET_START(name, local)
+
+#define BTF_SET_START_GLOBAL(name) \
+__BTF_ID_LIST(name, globl) \
+__BTF_SET_START(name, globl)
+
+#define BTF_SET_END(name) \
+asm( \
+".pushsection " BTF_IDS_SECTION ",\"a\"; \n" \
+".size __BTF_ID__set__" #name ", .-" #name " \n" \
+".popsection; \n"); \
+extern struct btf_id_set name;
+
+/*
+ * The BTF_SET8_START/END macros pair defines sorted list of
+ * BTF IDs and their flags plus its members count, with the
+ * following layout:
+ *
+ * BTF_SET8_START(list)
+ * BTF_ID_FLAGS(type1, name1, flags)
+ * BTF_ID_FLAGS(type2, name2, flags)
+ * BTF_SET8_END(list)
+ *
+ * __BTF_ID__set8__list:
+ * .zero 8
+ * list:
+ * __BTF_ID__type1__name1__3:
+ * .zero 4
+ * .word (1 << 0) | (1 << 2)
+ * __BTF_ID__type2__name2__5:
+ * .zero 4
+ * .word (1 << 3) | (1 << 1) | (1 << 2)
+ *
+ */
+#define __BTF_SET8_START(name, scope, flags) \
+__BTF_ID_LIST(name, local) \
+asm( \
+".pushsection " BTF_IDS_SECTION ",\"a\"; \n" \
+"." #scope " __BTF_ID__set8__" #name "; \n" \
+"__BTF_ID__set8__" #name ":; \n" \
+".zero 4 \n" \
+".long " __stringify(flags) "\n" \
+".popsection; \n");
+
+#define BTF_SET8_START(name) \
+__BTF_SET8_START(name, local, 0)
+
+#define BTF_SET8_END(name) \
+asm( \
+".pushsection " BTF_IDS_SECTION ",\"a\"; \n" \
+".size __BTF_ID__set8__" #name ", .-" #name " \n" \
+".popsection; \n"); \
+extern struct btf_id_set8 name;
+
+#define BTF_KFUNCS_START(name) \
+__BTF_SET8_START(name, local, BTF_SET8_KFUNCS)
+
+#define BTF_KFUNCS_END(name) \
+BTF_SET8_END(name)
+
+#else
+
+#define BTF_ID_LIST(name) static u32 __maybe_unused name[64];
+#define BTF_ID(prefix, name)
+#define BTF_ID_FLAGS(prefix, name, ...)
+#define BTF_ID_UNUSED
+#define BTF_ID_LIST_GLOBAL(name, n) u32 __maybe_unused name[n];
+#define BTF_ID_LIST_SINGLE(name, prefix, typename) static u32 __maybe_unused name[1];
+#define BTF_ID_LIST_GLOBAL_SINGLE(name, prefix, typename) u32 __maybe_unused name[1];
+#define BTF_SET_START(name) static struct btf_id_set __maybe_unused name = { 0 };
+#define BTF_SET_START_GLOBAL(name) static struct btf_id_set __maybe_unused name = { 0 };
+#define BTF_SET_END(name)
+#define BTF_SET8_START(name) static struct btf_id_set8 __maybe_unused name = { 0 };
+#define BTF_SET8_END(name)
+#define BTF_KFUNCS_START(name) static struct btf_id_set8 __maybe_unused name = { .flags = BTF_SET8_KFUNCS };
+#define BTF_KFUNCS_END(name)
+
+#endif /* CONFIG_DEBUG_INFO_BTF */
+
+#ifdef CONFIG_NET
+/* Define a list of socket types which can be the argument for
+ * skc_to_*_sock() helpers. All these sockets should have
+ * sock_common as the first argument in its memory layout.
+ */
+#define BTF_SOCK_TYPE_xxx \
+ BTF_SOCK_TYPE(BTF_SOCK_TYPE_INET, inet_sock) \
+ BTF_SOCK_TYPE(BTF_SOCK_TYPE_INET_CONN, inet_connection_sock) \
+ BTF_SOCK_TYPE(BTF_SOCK_TYPE_INET_REQ, inet_request_sock) \
+ BTF_SOCK_TYPE(BTF_SOCK_TYPE_INET_TW, inet_timewait_sock) \
+ BTF_SOCK_TYPE(BTF_SOCK_TYPE_REQ, request_sock) \
+ BTF_SOCK_TYPE(BTF_SOCK_TYPE_SOCK, sock) \
+ BTF_SOCK_TYPE(BTF_SOCK_TYPE_SOCK_COMMON, sock_common) \
+ BTF_SOCK_TYPE(BTF_SOCK_TYPE_TCP, tcp_sock) \
+ BTF_SOCK_TYPE(BTF_SOCK_TYPE_TCP_REQ, tcp_request_sock) \
+ BTF_SOCK_TYPE(BTF_SOCK_TYPE_TCP_TW, tcp_timewait_sock) \
+ BTF_SOCK_TYPE(BTF_SOCK_TYPE_TCP6, tcp6_sock) \
+ BTF_SOCK_TYPE(BTF_SOCK_TYPE_UDP, udp_sock) \
+ BTF_SOCK_TYPE(BTF_SOCK_TYPE_UDP6, udp6_sock) \
+ BTF_SOCK_TYPE(BTF_SOCK_TYPE_UNIX, unix_sock) \
+ BTF_SOCK_TYPE(BTF_SOCK_TYPE_MPTCP, mptcp_sock) \
+ BTF_SOCK_TYPE(BTF_SOCK_TYPE_SOCKET, socket)
+
+enum {
+#define BTF_SOCK_TYPE(name, str) name,
+BTF_SOCK_TYPE_xxx
+#undef BTF_SOCK_TYPE
+MAX_BTF_SOCK_TYPE,
+};
+
+extern u32 btf_sock_ids[];
+#endif
+
+#define BTF_TRACING_TYPE_xxx \
+ BTF_TRACING_TYPE(BTF_TRACING_TYPE_TASK, task_struct) \
+ BTF_TRACING_TYPE(BTF_TRACING_TYPE_FILE, file) \
+ BTF_TRACING_TYPE(BTF_TRACING_TYPE_VMA, vm_area_struct)
+
+enum {
+#define BTF_TRACING_TYPE(name, type) name,
+BTF_TRACING_TYPE_xxx
+#undef BTF_TRACING_TYPE
+MAX_BTF_TRACING_TYPE,
+};
+
+extern u32 btf_tracing_ids[];
+extern u32 bpf_cgroup_btf_id[];
+extern u32 bpf_local_storage_map_btf_id[];
+extern u32 btf_bpf_map_id[];
+extern u32 bpf_kmem_cache_btf_id[];
+
+#endif
diff --git a/include/linux/btree-128.h b/include/linux/btree-128.h
index 0b3414c4c928..22c09f5c3c39 100644
--- a/include/linux/btree-128.h
+++ b/include/linux/btree-128.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
extern struct btree_geo btree_geo128;
struct btree_head128 { struct btree_head h; };
diff --git a/include/linux/btree-type.h b/include/linux/btree-type.h
index 9a1147ef8563..fb34a52c788b 100644
--- a/include/linux/btree-type.h
+++ b/include/linux/btree-type.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#define __BTREE_TP(pfx, type, sfx) pfx ## type ## sfx
#define _BTREE_TP(pfx, type, sfx) __BTREE_TP(pfx, type, sfx)
#define BTREE_TP(pfx) _BTREE_TP(pfx, BTREE_TYPE_SUFFIX,)
diff --git a/include/linux/btree.h b/include/linux/btree.h
index 65b5bb058324..243ee544397a 100644
--- a/include/linux/btree.h
+++ b/include/linux/btree.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef BTREE_H
#define BTREE_H
@@ -9,7 +10,7 @@
*
* A B+Tree is a data structure for looking up arbitrary (currently allowing
* unsigned long, u32, u64 and 2 * u64) keys into pointers. The data structure
- * is described at http://en.wikipedia.org/wiki/B-tree, we currently do not
+ * is described at https://en.wikipedia.org/wiki/B-tree, we currently do not
* use binary search to find the key on lookups.
*
* Each B+Tree consists of a head, that contains bookkeeping information and
diff --git a/include/linux/btrfs.h b/include/linux/btrfs.h
index 22d799147db2..9a37a45ec801 100644
--- a/include/linux/btrfs.h
+++ b/include/linux/btrfs.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_BTRFS_H
#define _LINUX_BTRFS_H
diff --git a/include/linux/buffer_head.h b/include/linux/buffer_head.h
index c8dae555eccf..b16b88bfbc3e 100644
--- a/include/linux/buffer_head.h
+++ b/include/linux/buffer_head.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* include/linux/buffer_head.h
*
@@ -8,22 +9,18 @@
#define _LINUX_BUFFER_HEAD_H
#include <linux/types.h>
+#include <linux/blk_types.h>
#include <linux/fs.h>
#include <linux/linkage.h>
#include <linux/pagemap.h>
#include <linux/wait.h>
#include <linux/atomic.h>
-#ifdef CONFIG_BLOCK
-
enum bh_state_bits {
BH_Uptodate, /* Contains valid data */
BH_Dirty, /* Is dirty */
BH_Lock, /* Is locked */
BH_Req, /* Has been submitted for I/O */
- BH_Uptodate_Lock,/* Used by the first bh in a page, to serialise
- * IO completion of other buffers in the page
- */
BH_Mapped, /* Has a disk mapping */
BH_New, /* Disk mapping was newly created by get_block */
@@ -37,6 +34,7 @@ enum bh_state_bits {
BH_Meta, /* Buffer contains metadata */
BH_Prio, /* Buffer should be submitted with REQ_PRIO */
BH_Defer_Completion, /* Defer AIO completion to workqueue */
+ BH_Migrate, /* Buffer is being migrated (norefs) */
BH_PrivateStart,/* not a state bit, but the first bit available
* for private allocation by other entities
@@ -56,13 +54,16 @@ typedef void (bh_end_io_t)(struct buffer_head *bh, int uptodate);
* filesystem and block layers. Nowadays the basic I/O unit
* is the bio, and buffer_heads are used for extracting block
* mappings (via a get_block_t call), for tracking state within
- * a page (via a page_mapping) and for wrapping bio submission
+ * a folio (via a folio_mapping) and for wrapping bio submission
* for backward compatibility reasons (e.g. submit_bh).
*/
struct buffer_head {
unsigned long b_state; /* buffer state bitmap (see above) */
struct buffer_head *b_this_page;/* circular list of page's buffers */
- struct page *b_page; /* the page this bh is mapped to */
+ union {
+ struct page *b_page; /* the page this bh is mapped to */
+ struct folio *b_folio; /* the folio this bh is mapped to */
+ };
sector_t b_blocknr; /* start block number */
size_t b_size; /* size of mapping */
@@ -75,16 +76,22 @@ struct buffer_head {
struct address_space *b_assoc_map; /* mapping this buffer is
associated with */
atomic_t b_count; /* users using this buffer_head */
+ spinlock_t b_uptodate_lock; /* Used by the first bh in a page, to
+ * serialise IO completion of other
+ * buffers in the page */
};
/*
* macro tricks to expand the set_buffer_foo(), clear_buffer_foo()
* and buffer_foo() functions.
+ * To avoid reset buffer flags that are already set, because that causes
+ * a costly cache line transition, check the flag first.
*/
#define BUFFER_FNS(bit, name) \
static __always_inline void set_buffer_##name(struct buffer_head *bh) \
{ \
- set_bit(BH_##bit, &(bh)->b_state); \
+ if (!test_bit(BH_##bit, &(bh)->b_state)) \
+ set_bit(BH_##bit, &(bh)->b_state); \
} \
static __always_inline void clear_buffer_##name(struct buffer_head *bh) \
{ \
@@ -113,7 +120,6 @@ static __always_inline int test_clear_buffer_##name(struct buffer_head *bh) \
* of the form "mark_buffer_foo()". These are higher-level functions which
* do something in addition to setting a b_state bit.
*/
-BUFFER_FNS(Uptodate, uptodate)
BUFFER_FNS(Dirty, dirty)
TAS_BUFFER_FNS(Dirty, dirty)
BUFFER_FNS(Lock, locked)
@@ -131,7 +137,45 @@ BUFFER_FNS(Meta, meta)
BUFFER_FNS(Prio, prio)
BUFFER_FNS(Defer_Completion, defer_completion)
-#define bh_offset(bh) ((unsigned long)(bh)->b_data & ~PAGE_MASK)
+static __always_inline void set_buffer_uptodate(struct buffer_head *bh)
+{
+ /*
+ * If somebody else already set this uptodate, they will
+ * have done the memory barrier, and a reader will thus
+ * see *some* valid buffer state.
+ *
+ * Any other serialization (with IO errors or whatever that
+ * might clear the bit) has to come from other state (eg BH_Lock).
+ */
+ if (test_bit(BH_Uptodate, &bh->b_state))
+ return;
+
+ /*
+ * make it consistent with folio_mark_uptodate
+ * pairs with smp_load_acquire in buffer_uptodate
+ */
+ smp_mb__before_atomic();
+ set_bit(BH_Uptodate, &bh->b_state);
+}
+
+static __always_inline void clear_buffer_uptodate(struct buffer_head *bh)
+{
+ clear_bit(BH_Uptodate, &bh->b_state);
+}
+
+static __always_inline int buffer_uptodate(const struct buffer_head *bh)
+{
+ /*
+ * make it consistent with folio_test_uptodate
+ * pairs with smp_mb__before_atomic in set_buffer_uptodate
+ */
+ return test_bit_acquire(BH_Uptodate, &bh->b_state);
+}
+
+static inline unsigned long bh_offset(const struct buffer_head *bh)
+{
+ return (unsigned long)(bh)->b_data & (page_size(bh->b_page) - 1);
+}
/* If we *know* page->private refers to buffer_heads */
#define page_buffers(page) \
@@ -139,9 +183,9 @@ BUFFER_FNS(Defer_Completion, defer_completion)
BUG_ON(!PagePrivate(page)); \
((struct buffer_head *)page_private(page)); \
})
-#define page_has_buffers(page) PagePrivate(page)
+#define folio_buffers(folio) folio_get_private(folio)
-void buffer_check_dirty_writeback(struct page *page,
+void buffer_check_dirty_writeback(struct folio *folio,
bool *dirty, bool *writeback);
/*
@@ -150,25 +194,23 @@ void buffer_check_dirty_writeback(struct page *page,
void mark_buffer_dirty(struct buffer_head *bh);
void mark_buffer_write_io_error(struct buffer_head *bh);
-void init_buffer(struct buffer_head *, bh_end_io_t *, void *);
void touch_buffer(struct buffer_head *bh);
-void set_bh_page(struct buffer_head *bh,
- struct page *page, unsigned long offset);
-int try_to_free_buffers(struct page *);
-struct buffer_head *alloc_page_buffers(struct page *page, unsigned long size,
- int retry);
-void create_empty_buffers(struct page *, unsigned long,
- unsigned long b_state);
+void folio_set_bh(struct buffer_head *bh, struct folio *folio,
+ unsigned long offset);
+struct buffer_head *folio_alloc_buffers(struct folio *folio, unsigned long size,
+ gfp_t gfp);
+struct buffer_head *alloc_page_buffers(struct page *page, unsigned long size);
+struct buffer_head *create_empty_buffers(struct folio *folio,
+ unsigned long blocksize, unsigned long b_state);
void end_buffer_read_sync(struct buffer_head *bh, int uptodate);
void end_buffer_write_sync(struct buffer_head *bh, int uptodate);
-void end_buffer_async_write(struct buffer_head *bh, int uptodate);
/* Things to do with buffers at mapping->private_list */
void mark_buffer_dirty_inode(struct buffer_head *bh, struct inode *inode);
-int inode_has_buffers(struct inode *);
-void invalidate_inode_buffers(struct inode *);
-int remove_inode_buffers(struct inode *inode);
-int sync_mapping_buffers(struct address_space *mapping);
+int generic_buffers_fsync_noflush(struct file *file, loff_t start, loff_t end,
+ bool datasync);
+int generic_buffers_fsync(struct file *file, loff_t start, loff_t end,
+ bool datasync);
void clean_bdev_aliases(struct block_device *bdev, sector_t block,
sector_t len);
static inline void clean_bdev_bh_alias(struct buffer_head *bh)
@@ -181,101 +223,74 @@ void __wait_on_buffer(struct buffer_head *);
wait_queue_head_t *bh_waitq_head(struct buffer_head *bh);
struct buffer_head *__find_get_block(struct block_device *bdev, sector_t block,
unsigned size);
-struct buffer_head *__getblk_gfp(struct block_device *bdev, sector_t block,
- unsigned size, gfp_t gfp);
+struct buffer_head *__find_get_block_nonatomic(struct block_device *bdev,
+ sector_t block, unsigned size);
+struct buffer_head *bdev_getblk(struct block_device *bdev, sector_t block,
+ unsigned size, gfp_t gfp);
void __brelse(struct buffer_head *);
void __bforget(struct buffer_head *);
void __breadahead(struct block_device *, sector_t block, unsigned int size);
struct buffer_head *__bread_gfp(struct block_device *,
sector_t block, unsigned size, gfp_t gfp);
-void invalidate_bh_lrus(void);
struct buffer_head *alloc_buffer_head(gfp_t gfp_flags);
void free_buffer_head(struct buffer_head * bh);
void unlock_buffer(struct buffer_head *bh);
void __lock_buffer(struct buffer_head *bh);
-void ll_rw_block(int, int, int, struct buffer_head * bh[]);
int sync_dirty_buffer(struct buffer_head *bh);
-int __sync_dirty_buffer(struct buffer_head *bh, int op_flags);
-void write_dirty_buffer(struct buffer_head *bh, int op_flags);
-int submit_bh(int, int, struct buffer_head *);
+int __sync_dirty_buffer(struct buffer_head *bh, blk_opf_t op_flags);
+void write_dirty_buffer(struct buffer_head *bh, blk_opf_t op_flags);
+void submit_bh(blk_opf_t, struct buffer_head *);
void write_boundary_block(struct block_device *bdev,
sector_t bblock, unsigned blocksize);
int bh_uptodate_or_lock(struct buffer_head *bh);
-int bh_submit_read(struct buffer_head *bh);
-loff_t page_cache_seek_hole_data(struct inode *inode, loff_t offset,
- loff_t length, int whence);
-
-extern int buffer_heads_over_limit;
+int __bh_read(struct buffer_head *bh, blk_opf_t op_flags, bool wait);
+void __bh_read_batch(int nr, struct buffer_head *bhs[],
+ blk_opf_t op_flags, bool force_lock);
/*
* Generic address_space_operations implementations for buffer_head-backed
* address_spaces.
*/
-void block_invalidatepage(struct page *page, unsigned int offset,
- unsigned int length);
-int block_write_full_page(struct page *page, get_block_t *get_block,
- struct writeback_control *wbc);
-int __block_write_full_page(struct inode *inode, struct page *page,
- get_block_t *get_block, struct writeback_control *wbc,
- bh_end_io_t *handler);
-int block_read_full_page(struct page*, get_block_t*);
-int block_is_partially_uptodate(struct page *page, unsigned long from,
- unsigned long count);
+void block_invalidate_folio(struct folio *folio, size_t offset, size_t length);
+int block_write_full_folio(struct folio *folio, struct writeback_control *wbc,
+ void *get_block);
+int __block_write_full_folio(struct inode *inode, struct folio *folio,
+ get_block_t *get_block, struct writeback_control *wbc);
+int block_read_full_folio(struct folio *, get_block_t *);
+bool block_is_partially_uptodate(struct folio *, size_t from, size_t count);
int block_write_begin(struct address_space *mapping, loff_t pos, unsigned len,
- unsigned flags, struct page **pagep, get_block_t *get_block);
-int __block_write_begin(struct page *page, loff_t pos, unsigned len,
+ struct folio **foliop, get_block_t *get_block);
+int __block_write_begin(struct folio *folio, loff_t pos, unsigned len,
get_block_t *get_block);
-int block_write_end(struct file *, struct address_space *,
- loff_t, unsigned, unsigned,
- struct page *, void *);
-int generic_write_end(struct file *, struct address_space *,
- loff_t, unsigned, unsigned,
- struct page *, void *);
-void page_zero_new_buffers(struct page *page, unsigned from, unsigned to);
-int cont_write_begin(struct file *, struct address_space *, loff_t,
- unsigned, unsigned, struct page **, void **,
+int block_write_end(loff_t pos, unsigned len, unsigned copied, struct folio *);
+int generic_write_end(const struct kiocb *, struct address_space *,
+ loff_t, unsigned len, unsigned copied,
+ struct folio *, void *);
+void folio_zero_new_buffers(struct folio *folio, size_t from, size_t to);
+int cont_write_begin(const struct kiocb *, struct address_space *, loff_t,
+ unsigned, struct folio **, void **,
get_block_t *, loff_t *);
int generic_cont_expand_simple(struct inode *inode, loff_t size);
-int block_commit_write(struct page *page, unsigned from, unsigned to);
+void block_commit_write(struct folio *folio, size_t from, size_t to);
int block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf,
get_block_t get_block);
-/* Convert errno to return value from ->page_mkwrite() call */
-static inline int block_page_mkwrite_return(int err)
-{
- if (err == 0)
- return VM_FAULT_LOCKED;
- if (err == -EFAULT || err == -EAGAIN)
- return VM_FAULT_NOPAGE;
- if (err == -ENOMEM)
- return VM_FAULT_OOM;
- /* -ENOSPC, -EDQUOT, -EIO ... */
- return VM_FAULT_SIGBUS;
-}
sector_t generic_block_bmap(struct address_space *, sector_t, get_block_t *);
int block_truncate_page(struct address_space *, loff_t, get_block_t *);
-int nobh_write_begin(struct address_space *, loff_t, unsigned, unsigned,
- struct page **, void **, get_block_t*);
-int nobh_write_end(struct file *, struct address_space *,
- loff_t, unsigned, unsigned,
- struct page *, void *);
-int nobh_truncate_page(struct address_space *, loff_t, get_block_t *);
-int nobh_writepage(struct page *page, get_block_t *get_block,
- struct writeback_control *wbc);
-void buffer_init(void);
+#ifdef CONFIG_MIGRATION
+extern int buffer_migrate_folio(struct address_space *,
+ struct folio *dst, struct folio *src, enum migrate_mode);
+extern int buffer_migrate_folio_norefs(struct address_space *,
+ struct folio *dst, struct folio *src, enum migrate_mode);
+#else
+#define buffer_migrate_folio NULL
+#define buffer_migrate_folio_norefs NULL
+#endif
/*
* inline definitions
*/
-static inline void attach_page_buffers(struct page *page,
- struct buffer_head *head)
-{
- get_page(page);
- SetPagePrivate(page);
- set_page_private(page, (unsigned long)head);
-}
-
static inline void get_bh(struct buffer_head *bh)
{
atomic_inc(&bh->b_count);
@@ -287,12 +302,38 @@ static inline void put_bh(struct buffer_head *bh)
atomic_dec(&bh->b_count);
}
+/**
+ * brelse - Release a buffer.
+ * @bh: The buffer to release.
+ *
+ * Decrement a buffer_head's reference count. If @bh is NULL, this
+ * function is a no-op.
+ *
+ * If all buffers on a folio have zero reference count, are clean
+ * and unlocked, and if the folio is unlocked and not under writeback
+ * then try_to_free_buffers() may strip the buffers from the folio in
+ * preparation for freeing it (sometimes, rarely, buffers are removed
+ * from a folio but it ends up not being freed, and buffers may later
+ * be reattached).
+ *
+ * Context: Any context.
+ */
static inline void brelse(struct buffer_head *bh)
{
if (bh)
__brelse(bh);
}
+/**
+ * bforget - Discard any dirty data in a buffer.
+ * @bh: The buffer to forget.
+ *
+ * Call this function instead of brelse() if the data written to a buffer
+ * no longer needs to be written back. It will clear the buffer's dirty
+ * flag so writeback of this buffer will be skipped.
+ *
+ * Context: Any context.
+ */
static inline void bforget(struct buffer_head *bh)
{
if (bh)
@@ -317,17 +358,38 @@ sb_breadahead(struct super_block *sb, sector_t block)
__breadahead(sb->s_bdev, block, sb->s_blocksize);
}
-static inline struct buffer_head *
-sb_getblk(struct super_block *sb, sector_t block)
+static inline struct buffer_head *getblk_unmovable(struct block_device *bdev,
+ sector_t block, unsigned size)
+{
+ gfp_t gfp;
+
+ gfp = mapping_gfp_constraint(bdev->bd_mapping, ~__GFP_FS);
+ gfp |= __GFP_NOFAIL;
+
+ return bdev_getblk(bdev, block, size, gfp);
+}
+
+static inline struct buffer_head *__getblk(struct block_device *bdev,
+ sector_t block, unsigned size)
{
- return __getblk_gfp(sb->s_bdev, block, sb->s_blocksize, __GFP_MOVABLE);
+ gfp_t gfp;
+
+ gfp = mapping_gfp_constraint(bdev->bd_mapping, ~__GFP_FS);
+ gfp |= __GFP_MOVABLE | __GFP_NOFAIL;
+
+ return bdev_getblk(bdev, block, size, gfp);
}
+static inline struct buffer_head *sb_getblk(struct super_block *sb,
+ sector_t block)
+{
+ return __getblk(sb->s_bdev, block, sb->s_blocksize);
+}
-static inline struct buffer_head *
-sb_getblk_gfp(struct super_block *sb, sector_t block, gfp_t gfp)
+static inline struct buffer_head *sb_getblk_gfp(struct super_block *sb,
+ sector_t block, gfp_t gfp)
{
- return __getblk_gfp(sb->s_bdev, block, sb->s_blocksize, gfp);
+ return bdev_getblk(sb->s_bdev, block, sb->s_blocksize, gfp);
}
static inline struct buffer_head *
@@ -336,6 +398,12 @@ sb_find_get_block(struct super_block *sb, sector_t block)
return __find_get_block(sb->s_bdev, block, sb->s_blocksize);
}
+static inline struct buffer_head *
+sb_find_get_block_nonatomic(struct super_block *sb, sector_t block)
+{
+ return __find_get_block_nonatomic(sb->s_bdev, block, sb->s_blocksize);
+}
+
static inline void
map_bh(struct buffer_head *bh, struct super_block *sb, sector_t block)
{
@@ -364,46 +432,110 @@ static inline void lock_buffer(struct buffer_head *bh)
__lock_buffer(bh);
}
-static inline struct buffer_head *getblk_unmovable(struct block_device *bdev,
- sector_t block,
- unsigned size)
+static inline void bh_readahead(struct buffer_head *bh, blk_opf_t op_flags)
{
- return __getblk_gfp(bdev, block, size, 0);
+ if (!buffer_uptodate(bh) && trylock_buffer(bh)) {
+ if (!buffer_uptodate(bh))
+ __bh_read(bh, op_flags, false);
+ else
+ unlock_buffer(bh);
+ }
}
-static inline struct buffer_head *__getblk(struct block_device *bdev,
- sector_t block,
- unsigned size)
+static inline void bh_read_nowait(struct buffer_head *bh, blk_opf_t op_flags)
+{
+ if (!bh_uptodate_or_lock(bh))
+ __bh_read(bh, op_flags, false);
+}
+
+/* Returns 1 if buffer uptodated, 0 on success, and -EIO on error. */
+static inline int bh_read(struct buffer_head *bh, blk_opf_t op_flags)
+{
+ if (bh_uptodate_or_lock(bh))
+ return 1;
+ return __bh_read(bh, op_flags, true);
+}
+
+static inline void bh_read_batch(int nr, struct buffer_head *bhs[])
{
- return __getblk_gfp(bdev, block, size, __GFP_MOVABLE);
+ __bh_read_batch(nr, bhs, 0, true);
+}
+
+static inline void bh_readahead_batch(int nr, struct buffer_head *bhs[],
+ blk_opf_t op_flags)
+{
+ __bh_read_batch(nr, bhs, op_flags, false);
}
/**
- * __bread() - reads a specified block and returns the bh
- * @bdev: the block_device to read from
- * @block: number of block
- * @size: size (in bytes) to read
+ * __bread() - Read a block.
+ * @bdev: The block device to read from.
+ * @block: Block number in units of block size.
+ * @size: The block size of this device in bytes.
*
- * Reads a specified block, and returns buffer head that contains it.
- * The page cache is allocated from movable area so that it can be migrated.
- * It returns NULL if the block was unreadable.
+ * Read a specified block, and return the buffer head that refers
+ * to it. The memory is allocated from the movable area so that it can
+ * be migrated. The returned buffer head has its refcount increased.
+ * The caller should call brelse() when it has finished with the buffer.
+ *
+ * Context: May sleep waiting for I/O.
+ * Return: NULL if the block was unreadable.
*/
-static inline struct buffer_head *
-__bread(struct block_device *bdev, sector_t block, unsigned size)
+static inline struct buffer_head *__bread(struct block_device *bdev,
+ sector_t block, unsigned size)
{
return __bread_gfp(bdev, block, size, __GFP_MOVABLE);
}
-extern int __set_page_dirty_buffers(struct page *page);
+/**
+ * get_nth_bh - Get a reference on the n'th buffer after this one.
+ * @bh: The buffer to start counting from.
+ * @count: How many buffers to skip.
+ *
+ * This is primarily useful for finding the nth buffer in a folio; in
+ * that case you pass the head buffer and the byte offset in the folio
+ * divided by the block size. It can be used for other purposes, but
+ * it will wrap at the end of the folio rather than returning NULL or
+ * proceeding to the next folio for you.
+ *
+ * Return: The requested buffer with an elevated refcount.
+ */
+static inline __must_check
+struct buffer_head *get_nth_bh(struct buffer_head *bh, unsigned int count)
+{
+ while (count--)
+ bh = bh->b_this_page;
+ get_bh(bh);
+ return bh;
+}
+
+bool block_dirty_folio(struct address_space *mapping, struct folio *folio);
+
+#ifdef CONFIG_BUFFER_HEAD
+
+void buffer_init(void);
+bool try_to_free_buffers(struct folio *folio);
+int inode_has_buffers(struct inode *inode);
+void invalidate_inode_buffers(struct inode *inode);
+int remove_inode_buffers(struct inode *inode);
+int sync_mapping_buffers(struct address_space *mapping);
+void invalidate_bh_lrus(void);
+void invalidate_bh_lrus_cpu(void);
+bool has_bh_in_lru(int cpu, void *dummy);
+extern int buffer_heads_over_limit;
-#else /* CONFIG_BLOCK */
+#else /* CONFIG_BUFFER_HEAD */
static inline void buffer_init(void) {}
-static inline int try_to_free_buffers(struct page *page) { return 1; }
+static inline bool try_to_free_buffers(struct folio *folio) { return true; }
static inline int inode_has_buffers(struct inode *inode) { return 0; }
static inline void invalidate_inode_buffers(struct inode *inode) {}
static inline int remove_inode_buffers(struct inode *inode) { return 1; }
static inline int sync_mapping_buffers(struct address_space *mapping) { return 0; }
+static inline void invalidate_bh_lrus(void) {}
+static inline void invalidate_bh_lrus_cpu(void) {}
+static inline bool has_bh_in_lru(int cpu, void *dummy) { return false; }
+#define buffer_heads_over_limit 0
-#endif /* CONFIG_BLOCK */
+#endif /* CONFIG_BUFFER_HEAD */
#endif /* _LINUX_BUFFER_HEAD_H */
diff --git a/include/linux/bug.h b/include/linux/bug.h
index 5d5554c874fd..17a4933c611b 100644
--- a/include/linux/bug.h
+++ b/include/linux/bug.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_BUG_H
#define _LINUX_BUG_H
@@ -35,32 +36,68 @@ static inline int is_warning_bug(const struct bug_entry *bug)
return bug->flags & BUGFLAG_WARNING;
}
+void bug_get_file_line(struct bug_entry *bug, const char **file,
+ unsigned int *line);
+
struct bug_entry *find_bug(unsigned long bugaddr);
enum bug_trap_type report_bug(unsigned long bug_addr, struct pt_regs *regs);
+enum bug_trap_type report_bug_entry(struct bug_entry *bug, struct pt_regs *regs);
/* These are defined by the architecture */
int is_valid_bugaddr(unsigned long addr);
+void generic_bug_clear_once(void);
+
#else /* !CONFIG_GENERIC_BUG */
+static inline void *find_bug(unsigned long bugaddr)
+{
+ return NULL;
+}
+
static inline enum bug_trap_type report_bug(unsigned long bug_addr,
struct pt_regs *regs)
{
return BUG_TRAP_TYPE_BUG;
}
+struct bug_entry;
+
+static inline enum bug_trap_type
+report_bug_entry(struct bug_entry *bug, struct pt_regs *regs)
+{
+ return BUG_TRAP_TYPE_BUG;
+}
+
+static inline void bug_get_file_line(struct bug_entry *bug, const char **file,
+ unsigned int *line)
+{
+ *file = NULL;
+ *line = 0;
+}
+
+static inline void generic_bug_clear_once(void) {}
+
#endif /* CONFIG_GENERIC_BUG */
+#ifdef CONFIG_PRINTK
+void mem_dump_obj(void *object);
+#else
+static inline void mem_dump_obj(void *object) {}
+#endif
+
/*
* Since detected data corruption should stop operation on the affected
* structures. Return value must be checked and sanely acted on by caller.
*/
static inline __must_check bool check_data_corruption(bool v) { return v; }
-#define CHECK_DATA_CORRUPTION(condition, fmt, ...) \
+#define CHECK_DATA_CORRUPTION(condition, addr, fmt, ...) \
check_data_corruption(({ \
bool corruption = unlikely(condition); \
if (corruption) { \
+ if (addr) \
+ mem_dump_obj(addr); \
if (IS_ENABLED(CONFIG_BUG_ON_DATA_CORRUPTION)) { \
pr_err(fmt, ##__VA_ARGS__); \
BUG(); \
diff --git a/include/linux/build-salt.h b/include/linux/build-salt.h
new file mode 100644
index 000000000000..bb007bd05e7a
--- /dev/null
+++ b/include/linux/build-salt.h
@@ -0,0 +1,20 @@
+#ifndef __BUILD_SALT_H
+#define __BUILD_SALT_H
+
+#include <linux/elfnote.h>
+
+#define LINUX_ELFNOTE_BUILD_SALT 0x100
+
+#ifdef __ASSEMBLER__
+
+#define BUILD_SALT \
+ ELFNOTE(Linux, LINUX_ELFNOTE_BUILD_SALT, .asciz CONFIG_BUILD_SALT)
+
+#else
+
+#define BUILD_SALT \
+ ELFNOTE32("Linux", LINUX_ELFNOTE_BUILD_SALT, CONFIG_BUILD_SALT)
+
+#endif
+
+#endif /* __BUILD_SALT_H */
diff --git a/include/linux/build_bug.h b/include/linux/build_bug.h
index b7d22d60008a..2cfbb4c65c78 100644
--- a/include/linux/build_bug.h
+++ b/include/linux/build_bug.h
@@ -1,18 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_BUILD_BUG_H
#define _LINUX_BUILD_BUG_H
#include <linux/compiler.h>
-#ifdef __CHECKER__
-#define __BUILD_BUG_ON_NOT_POWER_OF_2(n) (0)
-#define BUILD_BUG_ON_NOT_POWER_OF_2(n) (0)
-#define BUILD_BUG_ON_ZERO(e) (0)
-#define BUILD_BUG_ON_NULL(e) ((void *)0)
-#define BUILD_BUG_ON_INVALID(e) (0)
-#define BUILD_BUG_ON_MSG(cond, msg) (0)
-#define BUILD_BUG_ON(condition) (0)
-#define BUILD_BUG() (0)
-#else /* __CHECKER__ */
+/*
+ * Force a compilation error if condition is true, but also produce a
+ * result (of value 0 and type int), so the expression can be used
+ * e.g. in a structure initializer (or where-ever else comma expressions
+ * aren't permitted).
+ *
+ * Take an error message as an optional second argument. If omitted,
+ * default to the stringification of the tested expression.
+ */
+#define BUILD_BUG_ON_ZERO(e, ...) \
+ __BUILD_BUG_ON_ZERO_MSG(e, ##__VA_ARGS__, #e " is true")
/* Force a compilation error if a constant expression is not a power of 2 */
#define __BUILD_BUG_ON_NOT_POWER_OF_2(n) \
@@ -21,15 +23,6 @@
BUILD_BUG_ON((n) == 0 || (((n) & ((n) - 1)) != 0))
/*
- * Force a compilation error if condition is true, but also produce a
- * result (of value 0 and type size_t), so the expression can be used
- * e.g. in a structure initializer (or where-ever else comma expressions
- * aren't permitted).
- */
-#define BUILD_BUG_ON_ZERO(e) (sizeof(struct { int:(-!!(e)); }))
-#define BUILD_BUG_ON_NULL(e) ((void *)sizeof(struct { int:(-!!(e)); }))
-
-/*
* BUILD_BUG_ON_INVALID() permits the compiler to check the validity of the
* expression but avoids the generation of any code, even if that expression
* has side-effects.
@@ -52,23 +45,9 @@
* If you have some code which relies on certain constants being equal, or
* some other compile-time-evaluated condition, you should use BUILD_BUG_ON to
* detect if someone changes it.
- *
- * The implementation uses gcc's reluctance to create a negative array, but gcc
- * (as of 4.4) only emits that error for obvious cases (e.g. not arguments to
- * inline functions). Luckily, in 4.3 they added the "error" function
- * attribute just for this type of case. Thus, we use a negative sized array
- * (should always create an error on gcc versions older than 4.4) and then call
- * an undefined function with the error attribute (should always create an
- * error on gcc 4.3 and later). If for some reason, neither creates a
- * compile-time error, we'll still have a link-time error, which is harder to
- * track down.
*/
-#ifndef __OPTIMIZE__
-#define BUILD_BUG_ON(condition) ((void)sizeof(char[1 - 2*!!(condition)]))
-#else
#define BUILD_BUG_ON(condition) \
BUILD_BUG_ON_MSG(condition, "BUILD_BUG_ON failed: " #condition)
-#endif
/**
* BUILD_BUG - break compile if used.
@@ -79,6 +58,32 @@
*/
#define BUILD_BUG() BUILD_BUG_ON_MSG(1, "BUILD_BUG failed")
-#endif /* __CHECKER__ */
+/**
+ * static_assert - check integer constant expression at build time
+ *
+ * static_assert() is a wrapper for the C11 _Static_assert, with a
+ * little macro magic to make the message optional (defaulting to the
+ * stringification of the tested expression).
+ *
+ * Contrary to BUILD_BUG_ON(), static_assert() can be used at global
+ * scope, but requires the expression to be an integer constant
+ * expression (i.e., it is not enough that __builtin_constant_p() is
+ * true for expr).
+ *
+ * Also note that BUILD_BUG_ON() fails the build if the condition is
+ * true, while static_assert() fails the build if the expression is
+ * false.
+ */
+#define static_assert(expr, ...) __static_assert(expr, ##__VA_ARGS__, #expr)
+#define __static_assert(expr, msg, ...) _Static_assert(expr, msg)
+
+
+/*
+ * Compile time check that field has an expected offset
+ */
+#define ASSERT_STRUCT_OFFSET(type, field, expected_offset) \
+ BUILD_BUG_ON_MSG(offsetof(type, field) != (expected_offset), \
+ "Offset of " #field " in " #type " has changed.")
+
#endif /* _LINUX_BUILD_BUG_H */
diff --git a/include/linux/buildid.h b/include/linux/buildid.h
new file mode 100644
index 000000000000..831c1b4b626c
--- /dev/null
+++ b/include/linux/buildid.h
@@ -0,0 +1,46 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_BUILDID_H
+#define _LINUX_BUILDID_H
+
+#include <linux/types.h>
+
+#define BUILD_ID_SIZE_MAX 20
+
+struct vm_area_struct;
+int build_id_parse(struct vm_area_struct *vma, unsigned char *build_id, __u32 *size);
+int build_id_parse_nofault(struct vm_area_struct *vma, unsigned char *build_id, __u32 *size);
+int build_id_parse_buf(const void *buf, unsigned char *build_id, u32 buf_size);
+
+#if IS_ENABLED(CONFIG_STACKTRACE_BUILD_ID) || IS_ENABLED(CONFIG_VMCORE_INFO)
+extern unsigned char vmlinux_build_id[BUILD_ID_SIZE_MAX];
+void init_vmlinux_build_id(void);
+#else
+static inline void init_vmlinux_build_id(void) { }
+#endif
+
+struct freader {
+ void *buf;
+ u32 buf_sz;
+ int err;
+ union {
+ struct {
+ struct file *file;
+ struct folio *folio;
+ void *addr;
+ loff_t folio_off;
+ bool may_fault;
+ };
+ struct {
+ const char *data;
+ u64 data_sz;
+ };
+ };
+};
+
+void freader_init_from_file(struct freader *r, void *buf, u32 buf_sz,
+ struct file *file, bool may_fault);
+void freader_init_from_mem(struct freader *r, const char *data, u64 data_sz);
+const void *freader_fetch(struct freader *r, loff_t file_off, size_t sz);
+void freader_cleanup(struct freader *r);
+
+#endif
diff --git a/include/linux/bus/stm32_firewall_device.h b/include/linux/bus/stm32_firewall_device.h
new file mode 100644
index 000000000000..eaa7a3f54450
--- /dev/null
+++ b/include/linux/bus/stm32_firewall_device.h
@@ -0,0 +1,145 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2023, STMicroelectronics - All Rights Reserved
+ */
+
+#ifndef STM32_FIREWALL_DEVICE_H
+#define STM32_FIREWALL_DEVICE_H
+
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/types.h>
+
+#define STM32_FIREWALL_MAX_EXTRA_ARGS 5
+
+/* Opaque reference to stm32_firewall_controller */
+struct stm32_firewall_controller;
+
+/**
+ * struct stm32_firewall - Information on a device's firewall. Each device can have more than one
+ * firewall.
+ *
+ * @firewall_ctrl: Pointer referencing a firewall controller of the device. It is
+ * opaque so a device cannot manipulate the controller's ops or access
+ * the controller's data
+ * @extra_args: Extra arguments that are implementation dependent
+ * @entry: Name of the firewall entry
+ * @extra_args_size: Number of extra arguments
+ * @firewall_id: Firewall ID associated the device for this firewall controller
+ */
+struct stm32_firewall {
+ struct stm32_firewall_controller *firewall_ctrl;
+ u32 extra_args[STM32_FIREWALL_MAX_EXTRA_ARGS];
+ const char *entry;
+ size_t extra_args_size;
+ u32 firewall_id;
+};
+
+#if IS_ENABLED(CONFIG_STM32_FIREWALL)
+/**
+ * stm32_firewall_get_firewall - Get the firewall(s) associated to given device.
+ * The firewall controller reference is always the first argument
+ * of each of the access-controller property entries.
+ * The firewall ID is always the second argument of each of the
+ * access-controller property entries.
+ * If there's no argument linked to the phandle, then the firewall ID
+ * field is set to U32_MAX, which is an invalid ID.
+ *
+ * @np: Device node to parse
+ * @firewall: Array of firewall references
+ * @nb_firewall: Number of firewall references to get. Must be at least 1.
+ *
+ * Returns 0 on success, -ENODEV if there's no match with a firewall controller or appropriate errno
+ * code if error occurred.
+ */
+int stm32_firewall_get_firewall(struct device_node *np, struct stm32_firewall *firewall,
+ unsigned int nb_firewall);
+
+/**
+ * stm32_firewall_grant_access - Request firewall access rights and grant access.
+ *
+ * @firewall: Firewall reference containing the ID to check against its firewall
+ * controller
+ *
+ * Returns 0 if access is granted, -EACCES if access is denied, -ENODEV if firewall is null or
+ * appropriate errno code if error occurred
+ */
+int stm32_firewall_grant_access(struct stm32_firewall *firewall);
+
+/**
+ * stm32_firewall_release_access - Release access granted from a call to
+ * stm32_firewall_grant_access().
+ *
+ * @firewall: Firewall reference containing the ID to check against its firewall
+ * controller
+ */
+void stm32_firewall_release_access(struct stm32_firewall *firewall);
+
+/**
+ * stm32_firewall_grant_access_by_id - Request firewall access rights of a given device
+ * based on a specific firewall ID
+ *
+ * Warnings:
+ * There is no way to ensure that the given ID will correspond to the firewall referenced in the
+ * device node if the ID did not come from stm32_firewall_get_firewall(). In that case, this
+ * function must be used with caution.
+ * This function should be used for subsystem resources that do not have the same firewall ID
+ * as their parent.
+ * U32_MAX is an invalid ID.
+ *
+ * @firewall: Firewall reference containing the firewall controller
+ * @subsystem_id: Firewall ID of the subsystem resource
+ *
+ * Returns 0 if access is granted, -EACCES if access is denied, -ENODEV if firewall is null or
+ * appropriate errno code if error occurred
+ */
+int stm32_firewall_grant_access_by_id(struct stm32_firewall *firewall, u32 subsystem_id);
+
+/**
+ * stm32_firewall_release_access_by_id - Release access granted from a call to
+ * stm32_firewall_grant_access_by_id().
+ *
+ * Warnings:
+ * There is no way to ensure that the given ID will correspond to the firewall referenced in the
+ * device node if the ID did not come from stm32_firewall_get_firewall(). In that case, this
+ * function must be used with caution.
+ * This function should be used for subsystem resources that do not have the same firewall ID
+ * as their parent.
+ * U32_MAX is an invalid ID.
+ *
+ * @firewall: Firewall reference containing the firewall controller
+ * @subsystem_id: Firewall ID of the subsystem resource
+ */
+void stm32_firewall_release_access_by_id(struct stm32_firewall *firewall, u32 subsystem_id);
+
+#else /* CONFIG_STM32_FIREWALL */
+
+static inline int stm32_firewall_get_firewall(struct device_node *np,
+ struct stm32_firewall *firewall,
+ unsigned int nb_firewall)
+{
+ return -ENODEV;
+}
+
+static inline int stm32_firewall_grant_access(struct stm32_firewall *firewall)
+{
+ return -ENODEV;
+}
+
+static inline void stm32_firewall_release_access(struct stm32_firewall *firewall)
+{
+}
+
+static inline int stm32_firewall_grant_access_by_id(struct stm32_firewall *firewall,
+ u32 subsystem_id)
+{
+ return -ENODEV;
+}
+
+static inline void stm32_firewall_release_access_by_id(struct stm32_firewall *firewall,
+ u32 subsystem_id)
+{
+}
+
+#endif /* CONFIG_STM32_FIREWALL */
+#endif /* STM32_FIREWALL_DEVICE_H */
diff --git a/include/linux/bvec.h b/include/linux/bvec.h
index ec8a4d7af6bd..3fc0efa0825b 100644
--- a/include/linux/bvec.h
+++ b/include/linux/bvec.h
@@ -1,31 +1,29 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* bvec iterator
*
* Copyright (C) 2001 Ming Lei <ming.lei@canonical.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- *
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public Licens
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-
*/
-#ifndef __LINUX_BVEC_ITER_H
-#define __LINUX_BVEC_ITER_H
+#ifndef __LINUX_BVEC_H
+#define __LINUX_BVEC_H
-#include <linux/kernel.h>
+#include <linux/highmem.h>
#include <linux/bug.h>
#include <linux/errno.h>
+#include <linux/limits.h>
+#include <linux/minmax.h>
+#include <linux/types.h>
-/*
- * was unsigned short, but we might as well be ready for > 64kB I/O pages
+struct page;
+
+/**
+ * struct bio_vec - a contiguous range of physical memory addresses
+ * @bv_page: First page associated with the address range.
+ * @bv_len: Number of bytes in the address range.
+ * @bv_offset: Start of the address range relative to the start of @bv_page.
+ *
+ * All pages within a bio_vec starting from @bv_page are contiguous and
+ * can simply be iterated (see bvec_advance()).
*/
struct bio_vec {
struct page *bv_page;
@@ -33,6 +31,49 @@ struct bio_vec {
unsigned int bv_offset;
};
+/**
+ * bvec_set_page - initialize a bvec based off a struct page
+ * @bv: bvec to initialize
+ * @page: page the bvec should point to
+ * @len: length of the bvec
+ * @offset: offset into the page
+ */
+static inline void bvec_set_page(struct bio_vec *bv, struct page *page,
+ unsigned int len, unsigned int offset)
+{
+ bv->bv_page = page;
+ bv->bv_len = len;
+ bv->bv_offset = offset;
+}
+
+/**
+ * bvec_set_folio - initialize a bvec based off a struct folio
+ * @bv: bvec to initialize
+ * @folio: folio the bvec should point to
+ * @len: length of the bvec
+ * @offset: offset into the folio
+ */
+static inline void bvec_set_folio(struct bio_vec *bv, struct folio *folio,
+ size_t len, size_t offset)
+{
+ unsigned long nr = offset / PAGE_SIZE;
+
+ WARN_ON_ONCE(len > UINT_MAX);
+ bvec_set_page(bv, folio_page(folio, nr), len, offset % PAGE_SIZE);
+}
+
+/**
+ * bvec_set_virt - initialize a bvec based on a virtual address
+ * @bv: bvec to initialize
+ * @vaddr: virtual address to set the bvec to
+ * @len: length of the bvec
+ */
+static inline void bvec_set_virt(struct bio_vec *bv, void *vaddr,
+ unsigned int len)
+{
+ bvec_set_page(bv, virt_to_page(vaddr), len, offset_in_page(vaddr));
+}
+
struct bvec_iter {
sector_t bi_sector; /* device address in 512 byte
sectors */
@@ -40,10 +81,14 @@ struct bvec_iter {
unsigned int bi_idx; /* current index into bvl_vec */
- unsigned int bi_done; /* number of bytes completed */
-
unsigned int bi_bvec_done; /* number of bytes completed in
current bvec */
+} __packed __aligned(4);
+
+struct bvec_iter_all {
+ struct bio_vec bv;
+ int idx;
+ unsigned done;
};
/*
@@ -52,16 +97,39 @@ struct bvec_iter {
*/
#define __bvec_iter_bvec(bvec, iter) (&(bvec)[(iter).bi_idx])
-#define bvec_iter_page(bvec, iter) \
+/* multi-page (mp_bvec) helpers */
+#define mp_bvec_iter_page(bvec, iter) \
(__bvec_iter_bvec((bvec), (iter))->bv_page)
-#define bvec_iter_len(bvec, iter) \
+#define mp_bvec_iter_len(bvec, iter) \
min((iter).bi_size, \
__bvec_iter_bvec((bvec), (iter))->bv_len - (iter).bi_bvec_done)
-#define bvec_iter_offset(bvec, iter) \
+#define mp_bvec_iter_offset(bvec, iter) \
(__bvec_iter_bvec((bvec), (iter))->bv_offset + (iter).bi_bvec_done)
+#define mp_bvec_iter_page_idx(bvec, iter) \
+ (mp_bvec_iter_offset((bvec), (iter)) / PAGE_SIZE)
+
+#define mp_bvec_iter_bvec(bvec, iter) \
+((struct bio_vec) { \
+ .bv_page = mp_bvec_iter_page((bvec), (iter)), \
+ .bv_len = mp_bvec_iter_len((bvec), (iter)), \
+ .bv_offset = mp_bvec_iter_offset((bvec), (iter)), \
+})
+
+/* For building single-page bvec in flight */
+ #define bvec_iter_offset(bvec, iter) \
+ (mp_bvec_iter_offset((bvec), (iter)) % PAGE_SIZE)
+
+#define bvec_iter_len(bvec, iter) \
+ min_t(unsigned, mp_bvec_iter_len((bvec), (iter)), \
+ PAGE_SIZE - bvec_iter_offset((bvec), (iter)))
+
+#define bvec_iter_page(bvec, iter) \
+ (mp_bvec_iter_page((bvec), (iter)) + \
+ mp_bvec_iter_page_idx((bvec), (iter)))
+
#define bvec_iter_bvec(bvec, iter) \
((struct bio_vec) { \
.bv_page = bvec_iter_page((bvec), (iter)), \
@@ -72,57 +140,159 @@ struct bvec_iter {
static inline bool bvec_iter_advance(const struct bio_vec *bv,
struct bvec_iter *iter, unsigned bytes)
{
+ unsigned int idx = iter->bi_idx;
+
if (WARN_ONCE(bytes > iter->bi_size,
"Attempted to advance past end of bvec iter\n")) {
iter->bi_size = 0;
return false;
}
- while (bytes) {
- unsigned iter_len = bvec_iter_len(bv, *iter);
- unsigned len = min(bytes, iter_len);
-
- bytes -= len;
- iter->bi_size -= len;
- iter->bi_bvec_done += len;
- iter->bi_done += len;
+ iter->bi_size -= bytes;
+ bytes += iter->bi_bvec_done;
- if (iter->bi_bvec_done == __bvec_iter_bvec(bv, *iter)->bv_len) {
- iter->bi_bvec_done = 0;
- iter->bi_idx++;
- }
+ while (bytes && bytes >= bv[idx].bv_len) {
+ bytes -= bv[idx].bv_len;
+ idx++;
}
+
+ iter->bi_idx = idx;
+ iter->bi_bvec_done = bytes;
return true;
}
-static inline bool bvec_iter_rewind(const struct bio_vec *bv,
- struct bvec_iter *iter,
- unsigned int bytes)
+/*
+ * A simpler version of bvec_iter_advance(), @bytes should not span
+ * across multiple bvec entries, i.e. bytes <= bv[i->bi_idx].bv_len
+ */
+static inline void bvec_iter_advance_single(const struct bio_vec *bv,
+ struct bvec_iter *iter, unsigned int bytes)
{
- while (bytes) {
- unsigned len = min(bytes, iter->bi_bvec_done);
-
- if (iter->bi_bvec_done == 0) {
- if (WARN_ONCE(iter->bi_idx == 0,
- "Attempted to rewind iter beyond "
- "bvec's boundaries\n")) {
- return false;
- }
- iter->bi_idx--;
- iter->bi_bvec_done = __bvec_iter_bvec(bv, *iter)->bv_len;
- continue;
- }
- bytes -= len;
- iter->bi_size += len;
- iter->bi_bvec_done -= len;
+ unsigned int done = iter->bi_bvec_done + bytes;
+
+ if (done == bv[iter->bi_idx].bv_len) {
+ done = 0;
+ iter->bi_idx++;
}
- return true;
+ iter->bi_bvec_done = done;
+ iter->bi_size -= bytes;
}
#define for_each_bvec(bvl, bio_vec, iter, start) \
for (iter = (start); \
(iter).bi_size && \
((bvl = bvec_iter_bvec((bio_vec), (iter))), 1); \
- bvec_iter_advance((bio_vec), &(iter), (bvl).bv_len))
+ bvec_iter_advance_single((bio_vec), &(iter), (bvl).bv_len))
+
+#define for_each_mp_bvec(bvl, bio_vec, iter, start) \
+ for (iter = (start); \
+ (iter).bi_size && \
+ ((bvl = mp_bvec_iter_bvec((bio_vec), (iter))), 1); \
+ bvec_iter_advance_single((bio_vec), &(iter), (bvl).bv_len))
+
+/* for iterating one bio from start to end */
+#define BVEC_ITER_ALL_INIT (struct bvec_iter) \
+{ \
+ .bi_sector = 0, \
+ .bi_size = UINT_MAX, \
+ .bi_idx = 0, \
+ .bi_bvec_done = 0, \
+}
+
+static inline struct bio_vec *bvec_init_iter_all(struct bvec_iter_all *iter_all)
+{
+ iter_all->done = 0;
+ iter_all->idx = 0;
+
+ return &iter_all->bv;
+}
+
+static inline void bvec_advance(const struct bio_vec *bvec,
+ struct bvec_iter_all *iter_all)
+{
+ struct bio_vec *bv = &iter_all->bv;
+
+ if (iter_all->done) {
+ bv->bv_page++;
+ bv->bv_offset = 0;
+ } else {
+ bv->bv_page = bvec->bv_page + (bvec->bv_offset >> PAGE_SHIFT);
+ bv->bv_offset = bvec->bv_offset & ~PAGE_MASK;
+ }
+ bv->bv_len = min_t(unsigned int, PAGE_SIZE - bv->bv_offset,
+ bvec->bv_len - iter_all->done);
+ iter_all->done += bv->bv_len;
+
+ if (iter_all->done == bvec->bv_len) {
+ iter_all->idx++;
+ iter_all->done = 0;
+ }
+}
+
+/**
+ * bvec_kmap_local - map a bvec into the kernel virtual address space
+ * @bvec: bvec to map
+ *
+ * Must be called on single-page bvecs only. Call kunmap_local on the returned
+ * address to unmap.
+ */
+static inline void *bvec_kmap_local(struct bio_vec *bvec)
+{
+ return kmap_local_page(bvec->bv_page) + bvec->bv_offset;
+}
+
+/**
+ * memcpy_from_bvec - copy data from a bvec
+ * @bvec: bvec to copy from
+ *
+ * Must be called on single-page bvecs only.
+ */
+static inline void memcpy_from_bvec(char *to, struct bio_vec *bvec)
+{
+ memcpy_from_page(to, bvec->bv_page, bvec->bv_offset, bvec->bv_len);
+}
+
+/**
+ * memcpy_to_bvec - copy data to a bvec
+ * @bvec: bvec to copy to
+ *
+ * Must be called on single-page bvecs only.
+ */
+static inline void memcpy_to_bvec(struct bio_vec *bvec, const char *from)
+{
+ memcpy_to_page(bvec->bv_page, bvec->bv_offset, from, bvec->bv_len);
+}
+
+/**
+ * memzero_bvec - zero all data in a bvec
+ * @bvec: bvec to zero
+ *
+ * Must be called on single-page bvecs only.
+ */
+static inline void memzero_bvec(struct bio_vec *bvec)
+{
+ memzero_page(bvec->bv_page, bvec->bv_offset, bvec->bv_len);
+}
+
+/**
+ * bvec_virt - return the virtual address for a bvec
+ * @bvec: bvec to return the virtual address for
+ *
+ * Note: the caller must ensure that @bvec->bv_page is not a highmem page.
+ */
+static inline void *bvec_virt(struct bio_vec *bvec)
+{
+ WARN_ON_ONCE(PageHighMem(bvec->bv_page));
+ return page_address(bvec->bv_page) + bvec->bv_offset;
+}
+
+/**
+ * bvec_phys - return the physical address for a bvec
+ * @bvec: bvec to return the physical address for
+ */
+static inline phys_addr_t bvec_phys(const struct bio_vec *bvec)
+{
+ return page_to_phys(bvec->bv_page) + bvec->bv_offset;
+}
-#endif /* __LINUX_BVEC_ITER_H */
+#endif /* __LINUX_BVEC_H */
diff --git a/include/linux/byteorder/big_endian.h b/include/linux/byteorder/big_endian.h
index 392041475c72..d64a524d3cfb 100644
--- a/include/linux/byteorder/big_endian.h
+++ b/include/linux/byteorder/big_endian.h
@@ -1,7 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_BYTEORDER_BIG_ENDIAN_H
#define _LINUX_BYTEORDER_BIG_ENDIAN_H
#include <uapi/linux/byteorder/big_endian.h>
+#ifndef CONFIG_CPU_BIG_ENDIAN
+#warning inconsistent configuration, needs CONFIG_CPU_BIG_ENDIAN
+#endif
+
#include <linux/byteorder/generic.h>
#endif /* _LINUX_BYTEORDER_BIG_ENDIAN_H */
diff --git a/include/linux/byteorder/generic.h b/include/linux/byteorder/generic.h
index 89f67c1c3160..55a44199de87 100644
--- a/include/linux/byteorder/generic.h
+++ b/include/linux/byteorder/generic.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_BYTEORDER_GENERIC_H
#define _LINUX_BYTEORDER_GENERIC_H
@@ -155,6 +156,55 @@ static inline void le64_add_cpu(__le64 *var, u64 val)
*var = cpu_to_le64(le64_to_cpu(*var) + val);
}
+/* XXX: this stuff can be optimized */
+static inline void le32_to_cpu_array(u32 *buf, unsigned int words)
+{
+ while (words--) {
+ __le32_to_cpus(buf);
+ buf++;
+ }
+}
+
+static inline void cpu_to_le32_array(u32 *buf, unsigned int words)
+{
+ while (words--) {
+ __cpu_to_le32s(buf);
+ buf++;
+ }
+}
+
+static inline void le64_to_cpu_array(u64 *buf, unsigned int words)
+{
+ while (words--) {
+ __le64_to_cpus(buf);
+ buf++;
+ }
+}
+
+static inline void cpu_to_le64_array(u64 *buf, unsigned int words)
+{
+ while (words--) {
+ __cpu_to_le64s(buf);
+ buf++;
+ }
+}
+
+static inline void memcpy_from_le32(u32 *dst, const __le32 *src, size_t words)
+{
+ size_t i;
+
+ for (i = 0; i < words; i++)
+ dst[i] = le32_to_cpu(src[i]);
+}
+
+static inline void memcpy_to_le32(__le32 *dst, const u32 *src, size_t words)
+{
+ size_t i;
+
+ for (i = 0; i < words; i++)
+ dst[i] = cpu_to_le32(src[i]);
+}
+
static inline void be16_add_cpu(__be16 *var, u16 val)
{
*var = cpu_to_be16(be16_to_cpu(*var) + val);
@@ -170,4 +220,20 @@ static inline void be64_add_cpu(__be64 *var, u64 val)
*var = cpu_to_be64(be64_to_cpu(*var) + val);
}
+static inline void cpu_to_be32_array(__be32 *dst, const u32 *src, size_t len)
+{
+ size_t i;
+
+ for (i = 0; i < len; i++)
+ dst[i] = cpu_to_be32(src[i]);
+}
+
+static inline void be32_to_cpu_array(u32 *dst, const __be32 *src, size_t len)
+{
+ size_t i;
+
+ for (i = 0; i < len; i++)
+ dst[i] = be32_to_cpu(src[i]);
+}
+
#endif /* _LINUX_BYTEORDER_GENERIC_H */
diff --git a/include/linux/byteorder/little_endian.h b/include/linux/byteorder/little_endian.h
index 08057377aa23..1ec650ff76cb 100644
--- a/include/linux/byteorder/little_endian.h
+++ b/include/linux/byteorder/little_endian.h
@@ -1,7 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_BYTEORDER_LITTLE_ENDIAN_H
#define _LINUX_BYTEORDER_LITTLE_ENDIAN_H
#include <uapi/linux/byteorder/little_endian.h>
+#ifdef CONFIG_CPU_BIG_ENDIAN
+#warning inconsistent configuration, CONFIG_CPU_BIG_ENDIAN is set
+#endif
+
#include <linux/byteorder/generic.h>
#endif /* _LINUX_BYTEORDER_LITTLE_ENDIAN_H */
diff --git a/include/linux/c2port.h b/include/linux/c2port.h
index 4efabcb51347..4e93bc63c27a 100644
--- a/include/linux/c2port.h
+++ b/include/linux/c2port.h
@@ -1,16 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Silicon Labs C2 port Linux support
*
* Copyright (c) 2007 Rodolfo Giometti <giometti@linux.it>
* Copyright (c) 2007 Eurotech S.p.A. <info@eurotech.it>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation
*/
-#include <linux/kmemcheck.h>
-
#define C2PORT_NAME_LEN 32
struct device;
@@ -22,10 +17,8 @@ struct device;
/* Main struct */
struct c2port_ops;
struct c2port_device {
- kmemcheck_bitfield_begin(flags);
unsigned int access:1;
unsigned int flash_access:1;
- kmemcheck_bitfield_end(flags);
int id;
char name[C2PORT_NAME_LEN];
diff --git a/include/linux/cache.h b/include/linux/cache.h
index 1be04f8c563a..e69768f50d53 100644
--- a/include/linux/cache.h
+++ b/include/linux/cache.h
@@ -1,21 +1,51 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __LINUX_CACHE_H
#define __LINUX_CACHE_H
#include <uapi/linux/kernel.h>
+#include <vdso/cache.h>
#include <asm/cache.h>
#ifndef L1_CACHE_ALIGN
#define L1_CACHE_ALIGN(x) __ALIGN_KERNEL(x, L1_CACHE_BYTES)
#endif
-#ifndef SMP_CACHE_BYTES
-#define SMP_CACHE_BYTES L1_CACHE_BYTES
+/**
+ * SMP_CACHE_ALIGN - align a value to the L2 cacheline size
+ * @x: value to align
+ *
+ * On some architectures, L2 ("SMP") CL size is bigger than L1, and sometimes,
+ * this needs to be accounted.
+ *
+ * Return: aligned value.
+ */
+#ifndef SMP_CACHE_ALIGN
+#define SMP_CACHE_ALIGN(x) ALIGN(x, SMP_CACHE_BYTES)
+#endif
+
+/*
+ * ``__aligned_largest`` aligns a field to the value most optimal for the
+ * target architecture to perform memory operations. Get the actual value
+ * to be able to use it anywhere else.
+ */
+#ifndef __LARGEST_ALIGN
+#define __LARGEST_ALIGN sizeof(struct { long x; } __aligned_largest)
+#endif
+
+#ifndef LARGEST_ALIGN
+#define LARGEST_ALIGN(x) ALIGN(x, __LARGEST_ALIGN)
#endif
/*
* __read_mostly is used to keep rarely changing variables out of frequently
- * updated cachelines. If an architecture doesn't support it, ignore the
- * hint.
+ * updated cachelines. Its use should be reserved for data that is used
+ * frequently in hot paths. Performance traces can help decide when to use
+ * this. You want __read_mostly data to be tightly packed, so that in the
+ * best case multiple frequently read variables for a hot path will be next
+ * to each other in order to reduce the number of cachelines needed to
+ * execute a critical path. We should be mindful and selective of its use.
+ * ie: if you're going to use it please supply a *good* justification in your
+ * commit log
*/
#ifndef __read_mostly
#define __read_mostly
@@ -27,11 +57,7 @@
* but may get written to during init, so can't live in .rodata (via "const").
*/
#ifndef __ro_after_init
-#define __ro_after_init __attribute__((__section__(".data..ro_after_init")))
-#endif
-
-#ifndef ____cacheline_aligned
-#define ____cacheline_aligned __attribute__((__aligned__(SMP_CACHE_BYTES)))
+#define __ro_after_init __section(".data..ro_after_init")
#endif
#ifndef ____cacheline_aligned_in_smp
@@ -78,4 +104,81 @@
#define cache_line_size() L1_CACHE_BYTES
#endif
+#ifndef __cacheline_group_begin
+#define __cacheline_group_begin(GROUP) \
+ __u8 __cacheline_group_begin__##GROUP[0]
+#endif
+
+#ifndef __cacheline_group_end
+#define __cacheline_group_end(GROUP) \
+ __u8 __cacheline_group_end__##GROUP[0]
+#endif
+
+/**
+ * __cacheline_group_begin_aligned - declare an aligned group start
+ * @GROUP: name of the group
+ * @...: optional group alignment
+ *
+ * The following block inside a struct:
+ *
+ * __cacheline_group_begin_aligned(grp);
+ * field a;
+ * field b;
+ * __cacheline_group_end_aligned(grp);
+ *
+ * will always be aligned to either the specified alignment or
+ * ``SMP_CACHE_BYTES``.
+ */
+#define __cacheline_group_begin_aligned(GROUP, ...) \
+ __cacheline_group_begin(GROUP) \
+ __aligned((__VA_ARGS__ + 0) ? : SMP_CACHE_BYTES)
+
+/**
+ * __cacheline_group_end_aligned - declare an aligned group end
+ * @GROUP: name of the group
+ * @...: optional alignment (same as was in __cacheline_group_begin_aligned())
+ *
+ * Note that the end marker is aligned to sizeof(long) to allow more precise
+ * size assertion. It also declares a padding at the end to avoid next field
+ * falling into this cacheline.
+ */
+#define __cacheline_group_end_aligned(GROUP, ...) \
+ __cacheline_group_end(GROUP) __aligned(sizeof(long)); \
+ struct { } __cacheline_group_pad__##GROUP \
+ __aligned((__VA_ARGS__ + 0) ? : SMP_CACHE_BYTES)
+
+#ifndef CACHELINE_ASSERT_GROUP_MEMBER
+#define CACHELINE_ASSERT_GROUP_MEMBER(TYPE, GROUP, MEMBER) \
+ BUILD_BUG_ON(!(offsetof(TYPE, MEMBER) >= \
+ offsetofend(TYPE, __cacheline_group_begin__##GROUP) && \
+ offsetofend(TYPE, MEMBER) <= \
+ offsetof(TYPE, __cacheline_group_end__##GROUP)))
+#endif
+
+#ifndef CACHELINE_ASSERT_GROUP_SIZE
+#define CACHELINE_ASSERT_GROUP_SIZE(TYPE, GROUP, SIZE) \
+ BUILD_BUG_ON(offsetof(TYPE, __cacheline_group_end__##GROUP) - \
+ offsetofend(TYPE, __cacheline_group_begin__##GROUP) > \
+ SIZE)
+#endif
+
+/*
+ * Helper to add padding within a struct to ensure data fall into separate
+ * cachelines.
+ */
+#if defined(CONFIG_SMP)
+struct cacheline_padding {
+ char x[0];
+} ____cacheline_internodealigned_in_smp;
+#define CACHELINE_PADDING(name) struct cacheline_padding name
+#else
+#define CACHELINE_PADDING(name)
+#endif
+
+#ifdef ARCH_DMA_MINALIGN
+#define ARCH_HAS_DMA_MINALIGN
+#else
+#define ARCH_DMA_MINALIGN __alignof__(unsigned long long)
+#endif
+
#endif /* __LINUX_CACHE_H */
diff --git a/include/linux/cache_coherency.h b/include/linux/cache_coherency.h
new file mode 100644
index 000000000000..cc81c5733e31
--- /dev/null
+++ b/include/linux/cache_coherency.h
@@ -0,0 +1,61 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Cache coherency maintenance operation device drivers
+ *
+ * Copyright Huawei 2025
+ */
+#ifndef _LINUX_CACHE_COHERENCY_H_
+#define _LINUX_CACHE_COHERENCY_H_
+
+#include <linux/list.h>
+#include <linux/kref.h>
+#include <linux/types.h>
+
+struct cc_inval_params {
+ phys_addr_t addr;
+ size_t size;
+};
+
+struct cache_coherency_ops_inst;
+
+struct cache_coherency_ops {
+ int (*wbinv)(struct cache_coherency_ops_inst *cci,
+ struct cc_inval_params *invp);
+ int (*done)(struct cache_coherency_ops_inst *cci);
+};
+
+struct cache_coherency_ops_inst {
+ struct kref kref;
+ struct list_head node;
+ const struct cache_coherency_ops *ops;
+};
+
+int cache_coherency_ops_instance_register(struct cache_coherency_ops_inst *cci);
+void cache_coherency_ops_instance_unregister(struct cache_coherency_ops_inst *cci);
+
+struct cache_coherency_ops_inst *
+_cache_coherency_ops_instance_alloc(const struct cache_coherency_ops *ops,
+ size_t size);
+/**
+ * cache_coherency_ops_instance_alloc - Allocate cache coherency ops instance
+ * @ops: Cache maintenance operations
+ * @drv_struct: structure that contains the struct cache_coherency_ops_inst
+ * @member: Name of the struct cache_coherency_ops_inst member in @drv_struct.
+ *
+ * This allocates a driver specific structure and initializes the
+ * cache_coherency_ops_inst embedded in the drv_struct. Upon success the
+ * pointer must be freed via cache_coherency_ops_instance_put().
+ *
+ * Returns a &drv_struct * on success, %NULL on error.
+ */
+#define cache_coherency_ops_instance_alloc(ops, drv_struct, member) \
+ ({ \
+ static_assert(__same_type(struct cache_coherency_ops_inst, \
+ ((drv_struct *)NULL)->member)); \
+ static_assert(offsetof(drv_struct, member) == 0); \
+ (drv_struct *)_cache_coherency_ops_instance_alloc(ops, \
+ sizeof(drv_struct)); \
+ })
+void cache_coherency_ops_instance_put(struct cache_coherency_ops_inst *cci);
+
+#endif
diff --git a/include/linux/cacheflush.h b/include/linux/cacheflush.h
new file mode 100644
index 000000000000..55f297b2c23f
--- /dev/null
+++ b/include/linux/cacheflush.h
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_CACHEFLUSH_H
+#define _LINUX_CACHEFLUSH_H
+
+#include <asm/cacheflush.h>
+
+struct folio;
+
+#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE
+#ifndef flush_dcache_folio
+void flush_dcache_folio(struct folio *folio);
+#endif
+#else
+static inline void flush_dcache_folio(struct folio *folio)
+{
+}
+#define flush_dcache_folio flush_dcache_folio
+#endif /* ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE */
+
+#ifndef flush_icache_pages
+static inline void flush_icache_pages(struct vm_area_struct *vma,
+ struct page *page, unsigned int nr)
+{
+}
+#endif
+
+#define flush_icache_page(vma, page) flush_icache_pages(vma, page, 1)
+
+#endif /* _LINUX_CACHEFLUSH_H */
diff --git a/include/linux/cacheinfo.h b/include/linux/cacheinfo.h
index 6a524bf6a06d..c8f4f0a0b874 100644
--- a/include/linux/cacheinfo.h
+++ b/include/linux/cacheinfo.h
@@ -1,8 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_CACHEINFO_H
#define _LINUX_CACHEINFO_H
#include <linux/bitops.h>
-#include <linux/cpumask.h>
+#include <linux/cpuhplock.h>
+#include <linux/cpumask_types.h>
#include <linux/smp.h>
struct device_node;
@@ -16,6 +18,8 @@ enum cache_type {
CACHE_TYPE_UNIFIED = BIT(2),
};
+extern unsigned int coherency_max_size;
+
/**
* struct cacheinfo - represent a cache leaf node
* @id: This cache's id. It is unique among caches with the same (type, level).
@@ -33,9 +37,8 @@ enum cache_type {
* @shared_cpu_map: logical cpumask representing all the cpus sharing
* this cache node
* @attributes: bitfield representing various cache attributes
- * @of_node: if devicetree is used, this represents either the cpu node in
- * case there's no explicit cache node or the cache node itself in the
- * device tree
+ * @fw_token: Unique value used to determine if different cacheinfo
+ * structures represent a single hardware cache instance.
* @disable_sysfs: indicates whether this node is visible to the user via
* sysfs or not
* @priv: pointer to any private data structure specific to particular
@@ -64,41 +67,102 @@ struct cacheinfo {
#define CACHE_ALLOCATE_POLICY_MASK \
(CACHE_READ_ALLOCATE | CACHE_WRITE_ALLOCATE)
#define CACHE_ID BIT(4)
-
- struct device_node *of_node;
+ void *fw_token;
bool disable_sysfs;
void *priv;
};
struct cpu_cacheinfo {
struct cacheinfo *info_list;
+ unsigned int per_cpu_data_slice_size;
unsigned int num_levels;
unsigned int num_leaves;
bool cpu_map_populated;
+ bool early_ci_levels;
};
-/*
- * Helpers to make sure "func" is executed on the cpu whose cache
- * attributes are being detected
- */
-#define DEFINE_SMP_CALL_CACHE_FUNCTION(func) \
-static inline void _##func(void *ret) \
-{ \
- int cpu = smp_processor_id(); \
- *(int *)ret = __##func(cpu); \
-} \
- \
-int func(unsigned int cpu) \
-{ \
- int ret; \
- smp_call_function_single(cpu, _##func, &ret, true); \
- return ret; \
-}
-
struct cpu_cacheinfo *get_cpu_cacheinfo(unsigned int cpu);
+int early_cache_level(unsigned int cpu);
int init_cache_level(unsigned int cpu);
+int init_of_cache_level(unsigned int cpu);
int populate_cache_leaves(unsigned int cpu);
+int cache_setup_acpi(unsigned int cpu);
+bool last_level_cache_is_valid(unsigned int cpu);
+bool last_level_cache_is_shared(unsigned int cpu_x, unsigned int cpu_y);
+int fetch_cache_info(unsigned int cpu);
+int detect_cache_attributes(unsigned int cpu);
+#ifndef CONFIG_ACPI_PPTT
+/*
+ * acpi_get_cache_info() is only called on ACPI enabled
+ * platforms using the PPTT for topology. This means that if
+ * the platform supports other firmware configuration methods
+ * we need to stub out the call when ACPI is disabled.
+ * ACPI enabled platforms not using PPTT won't be making calls
+ * to this function so we need not worry about them.
+ */
+static inline
+int acpi_get_cache_info(unsigned int cpu,
+ unsigned int *levels, unsigned int *split_levels)
+{
+ return -ENOENT;
+}
+#else
+int acpi_get_cache_info(unsigned int cpu,
+ unsigned int *levels, unsigned int *split_levels);
+#endif
const struct attribute_group *cache_get_priv_group(struct cacheinfo *this_leaf);
+/*
+ * Get the cacheinfo structure for the cache associated with @cpu at
+ * level @level.
+ * cpuhp lock must be held.
+ */
+static inline struct cacheinfo *get_cpu_cacheinfo_level(int cpu, int level)
+{
+ struct cpu_cacheinfo *ci = get_cpu_cacheinfo(cpu);
+ int i;
+
+ lockdep_assert_cpus_held();
+
+ for (i = 0; i < ci->num_leaves; i++) {
+ if (ci->info_list[i].level == level) {
+ if (ci->info_list[i].attributes & CACHE_ID)
+ return &ci->info_list[i];
+ return NULL;
+ }
+ }
+
+ return NULL;
+}
+
+/*
+ * Get the id of the cache associated with @cpu at level @level.
+ * cpuhp lock must be held.
+ */
+static inline int get_cpu_cacheinfo_id(int cpu, int level)
+{
+ struct cacheinfo *ci = get_cpu_cacheinfo_level(cpu, level);
+
+ return ci ? ci->id : -1;
+}
+
+#if defined(CONFIG_ARM64) || defined(CONFIG_ARM)
+#define use_arch_cache_info() (true)
+#else
+#define use_arch_cache_info() (false)
+#endif
+
+#ifndef CONFIG_ARCH_HAS_CPU_CACHE_ALIASING
+#define cpu_dcache_is_aliasing() false
+#define cpu_icache_is_aliasing() cpu_dcache_is_aliasing()
+#else
+#include <asm/cachetype.h>
+
+#ifndef cpu_icache_is_aliasing
+#define cpu_icache_is_aliasing() cpu_dcache_is_aliasing()
+#endif
+
+#endif
+
#endif /* _LINUX_CACHEINFO_H */
diff --git a/include/linux/call_once.h b/include/linux/call_once.h
new file mode 100644
index 000000000000..13cd6469e7e5
--- /dev/null
+++ b/include/linux/call_once.h
@@ -0,0 +1,66 @@
+#ifndef _LINUX_CALL_ONCE_H
+#define _LINUX_CALL_ONCE_H
+
+#include <linux/types.h>
+#include <linux/mutex.h>
+
+#define ONCE_NOT_STARTED 0
+#define ONCE_RUNNING 1
+#define ONCE_COMPLETED 2
+
+struct once {
+ atomic_t state;
+ struct mutex lock;
+};
+
+static inline void __once_init(struct once *once, const char *name,
+ struct lock_class_key *key)
+{
+ atomic_set(&once->state, ONCE_NOT_STARTED);
+ __mutex_init(&once->lock, name, key);
+}
+
+#define once_init(once) \
+do { \
+ static struct lock_class_key __key; \
+ __once_init((once), #once, &__key); \
+} while (0)
+
+/*
+ * call_once - Ensure a function has been called exactly once
+ *
+ * @once: Tracking struct
+ * @cb: Function to be called
+ *
+ * If @once has never completed successfully before, call @cb and, if
+ * it returns a zero or positive value, mark @once as completed. Return
+ * the value returned by @cb
+ *
+ * If @once has completed succesfully before, return 0.
+ *
+ * The call to @cb is implicitly surrounded by a mutex, though for
+ * efficiency the * function avoids taking it after the first call.
+ */
+static inline int call_once(struct once *once, int (*cb)(struct once *))
+{
+ int r, state;
+
+ /* Pairs with atomic_set_release() below. */
+ if (atomic_read_acquire(&once->state) == ONCE_COMPLETED)
+ return 0;
+
+ guard(mutex)(&once->lock);
+ state = atomic_read(&once->state);
+ if (unlikely(state != ONCE_NOT_STARTED))
+ return WARN_ON_ONCE(state != ONCE_COMPLETED) ? -EINVAL : 0;
+
+ atomic_set(&once->state, ONCE_RUNNING);
+ r = cb(once);
+ if (r < 0)
+ atomic_set(&once->state, ONCE_NOT_STARTED);
+ else
+ atomic_set_release(&once->state, ONCE_COMPLETED);
+ return r;
+}
+
+#endif /* _LINUX_CALL_ONCE_H */
diff --git a/include/linux/can/bittiming.h b/include/linux/can/bittiming.h
new file mode 100644
index 000000000000..726d909e87ce
--- /dev/null
+++ b/include/linux/can/bittiming.h
@@ -0,0 +1,280 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (c) 2020 Pengutronix, Marc Kleine-Budde <kernel@pengutronix.de>
+ * Copyright (c) 2021-2025 Vincent Mailhol <mailhol@kernel.org>
+ */
+
+#ifndef _CAN_BITTIMING_H
+#define _CAN_BITTIMING_H
+
+#include <linux/netdevice.h>
+#include <linux/can/netlink.h>
+
+#define CAN_SYNC_SEG 1
+
+#define CAN_BITRATE_UNSET 0
+#define CAN_BITRATE_UNKNOWN (-1U)
+
+#define CAN_CTRLMODE_FD_TDC_MASK \
+ (CAN_CTRLMODE_TDC_AUTO | CAN_CTRLMODE_TDC_MANUAL)
+#define CAN_CTRLMODE_XL_TDC_MASK \
+ (CAN_CTRLMODE_XL_TDC_AUTO | CAN_CTRLMODE_XL_TDC_MANUAL)
+#define CAN_CTRLMODE_TDC_AUTO_MASK \
+ (CAN_CTRLMODE_TDC_AUTO | CAN_CTRLMODE_XL_TDC_AUTO)
+#define CAN_CTRLMODE_TDC_MANUAL_MASK \
+ (CAN_CTRLMODE_TDC_MANUAL | CAN_CTRLMODE_XL_TDC_MANUAL)
+
+/*
+ * struct can_tdc - CAN FD Transmission Delay Compensation parameters
+ *
+ * At high bit rates, the propagation delay from the TX pin to the RX
+ * pin of the transceiver causes measurement errors: the sample point
+ * on the RX pin might occur on the previous bit.
+ *
+ * To solve this issue, ISO 11898-1 introduces in section 11.3.3
+ * "Transmitter delay compensation" a SSP (Secondary Sample Point)
+ * equal to the distance from the start of the bit time on the TX pin
+ * to the actual measurement on the RX pin.
+ *
+ * This structure contains the parameters to calculate that SSP.
+ *
+ * -+----------- one bit ----------+-- TX pin
+ * |<--- Sample Point --->|
+ *
+ * --+----------- one bit ----------+-- RX pin
+ * |<-------- TDCV -------->|
+ * |<------- TDCO ------->|
+ * |<----------- Secondary Sample Point ---------->|
+ *
+ * To increase precision, contrary to the other bittiming parameters
+ * which are measured in time quanta, the TDC parameters are measured
+ * in clock periods (also referred as "minimum time quantum" in ISO
+ * 11898-1).
+ *
+ * @tdcv: Transmitter Delay Compensation Value. The time needed for
+ * the signal to propagate, i.e. the distance, in clock periods,
+ * from the start of the bit on the TX pin to when it is received
+ * on the RX pin. @tdcv depends on the controller modes:
+ *
+ * CAN_CTRLMODE_TDC_AUTO is set: The transceiver dynamically
+ * measures @tdcv for each transmitted CAN FD frame and the
+ * value provided here should be ignored.
+ *
+ * CAN_CTRLMODE_TDC_MANUAL is set: use the fixed provided @tdcv
+ * value.
+ *
+ * N.B. CAN_CTRLMODE_TDC_AUTO and CAN_CTRLMODE_TDC_MANUAL are
+ * mutually exclusive. Only one can be set at a time. If both
+ * CAN_TDC_CTRLMODE_AUTO and CAN_TDC_CTRLMODE_MANUAL are unset,
+ * TDC is disabled and all the values of this structure should be
+ * ignored.
+ *
+ * @tdco: Transmitter Delay Compensation Offset. Offset value, in
+ * clock periods, defining the distance between the start of the
+ * bit reception on the RX pin of the transceiver and the SSP
+ * position such that SSP = @tdcv + @tdco.
+ *
+ * @tdcf: Transmitter Delay Compensation Filter window. Defines the
+ * minimum value for the SSP position in clock periods. If the
+ * SSP position is less than @tdcf, then no delay compensations
+ * occur and the normal sampling point is used instead. The
+ * feature is enabled if and only if @tdcv is set to zero
+ * (automatic mode) and @tdcf is configured to a value greater
+ * than @tdco.
+ */
+struct can_tdc {
+ u32 tdcv;
+ u32 tdco;
+ u32 tdcf;
+};
+
+/* The transceiver decoding margin corresponds to t_Decode in ISO 11898-2 */
+#define CAN_PWM_DECODE_NS 5
+/* Maximum PWM symbol duration. Corresponds to t_SymbolNom_MAX - t_Decode */
+#define CAN_PWM_NS_MAX (205 - CAN_PWM_DECODE_NS)
+
+/*
+ * struct can_tdc_const - CAN hardware-dependent constant for
+ * Transmission Delay Compensation
+ *
+ * @tdcv_min: Transmitter Delay Compensation Value minimum value. If
+ * the controller does not support manual mode for tdcv
+ * (c.f. flag CAN_CTRLMODE_TDC_MANUAL) then this value is
+ * ignored.
+ * @tdcv_max: Transmitter Delay Compensation Value maximum value. If
+ * the controller does not support manual mode for tdcv
+ * (c.f. flag CAN_CTRLMODE_TDC_MANUAL) then this value is
+ * ignored.
+ *
+ * @tdco_min: Transmitter Delay Compensation Offset minimum value.
+ * @tdco_max: Transmitter Delay Compensation Offset maximum value.
+ * Should not be zero. If the controller does not support TDC,
+ * then the pointer to this structure should be NULL.
+ *
+ * @tdcf_min: Transmitter Delay Compensation Filter window minimum
+ * value. If @tdcf_max is zero, this value is ignored.
+ * @tdcf_max: Transmitter Delay Compensation Filter window maximum
+ * value. Should be set to zero if the controller does not
+ * support this feature.
+ */
+struct can_tdc_const {
+ u32 tdcv_min;
+ u32 tdcv_max;
+ u32 tdco_min;
+ u32 tdco_max;
+ u32 tdcf_min;
+ u32 tdcf_max;
+};
+
+/*
+ * struct can_pwm - CAN Pulse-Width Modulation (PWM) parameters
+ *
+ * @pwms: pulse width modulation short phase
+ * @pwml: pulse width modulation long phase
+ * @pwmo: pulse width modulation offset
+ */
+struct can_pwm {
+ u32 pwms;
+ u32 pwml;
+ u32 pwmo;
+};
+
+/*
+ * struct can_pwm - CAN hardware-dependent constants for Pulse-Width
+ * Modulation (PWM)
+ *
+ * @pwms_min: PWM short phase minimum value. Must be at least 1.
+ * @pwms_max: PWM short phase maximum value
+ * @pwml_min: PWM long phase minimum value. Must be at least 1.
+ * @pwml_max: PWM long phase maximum value
+ * @pwmo_min: PWM offset phase minimum value
+ * @pwmo_max: PWM offset phase maximum value
+ */
+struct can_pwm_const {
+ u32 pwms_min;
+ u32 pwms_max;
+ u32 pwml_min;
+ u32 pwml_max;
+ u32 pwmo_min;
+ u32 pwmo_max;
+};
+
+struct data_bittiming_params {
+ const struct can_bittiming_const *data_bittiming_const;
+ struct can_bittiming data_bittiming;
+ const struct can_tdc_const *tdc_const;
+ const struct can_pwm_const *pwm_const;
+ union {
+ struct can_tdc tdc;
+ struct can_pwm pwm;
+ };
+ const u32 *data_bitrate_const;
+ unsigned int data_bitrate_const_cnt;
+ int (*do_set_data_bittiming)(struct net_device *dev);
+ int (*do_get_auto_tdcv)(const struct net_device *dev, u32 *tdcv);
+};
+
+#ifdef CONFIG_CAN_CALC_BITTIMING
+int can_calc_bittiming(const struct net_device *dev, struct can_bittiming *bt,
+ const struct can_bittiming_const *btc, struct netlink_ext_ack *extack);
+
+void can_calc_tdco(struct can_tdc *tdc, const struct can_tdc_const *tdc_const,
+ const struct can_bittiming *dbt,
+ u32 tdc_mask, u32 *ctrlmode, u32 ctrlmode_supported);
+
+int can_calc_pwm(struct net_device *dev, struct netlink_ext_ack *extack);
+#else /* !CONFIG_CAN_CALC_BITTIMING */
+static inline int
+can_calc_bittiming(const struct net_device *dev, struct can_bittiming *bt,
+ const struct can_bittiming_const *btc, struct netlink_ext_ack *extack)
+{
+ NL_SET_ERR_MSG(extack, "bit-timing calculation not available\n");
+ return -EINVAL;
+}
+
+static inline void
+can_calc_tdco(struct can_tdc *tdc, const struct can_tdc_const *tdc_const,
+ const struct can_bittiming *dbt,
+ u32 tdc_mask, u32 *ctrlmode, u32 ctrlmode_supported)
+{
+}
+
+static inline int
+can_calc_pwm(struct net_device *dev, struct netlink_ext_ack *extack)
+{
+ NL_SET_ERR_MSG(extack,
+ "bit-timing calculation not available: manually provide PWML and PWMS\n");
+ return -EINVAL;
+}
+#endif /* CONFIG_CAN_CALC_BITTIMING */
+
+void can_sjw_set_default(struct can_bittiming *bt);
+
+int can_sjw_check(const struct net_device *dev, const struct can_bittiming *bt,
+ const struct can_bittiming_const *btc, struct netlink_ext_ack *extack);
+
+int can_get_bittiming(const struct net_device *dev, struct can_bittiming *bt,
+ const struct can_bittiming_const *btc,
+ const u32 *bitrate_const,
+ const unsigned int bitrate_const_cnt,
+ struct netlink_ext_ack *extack);
+
+int can_validate_pwm_bittiming(const struct net_device *dev,
+ const struct can_pwm *pwm,
+ struct netlink_ext_ack *extack);
+
+/*
+ * can_get_relative_tdco() - TDCO relative to the sample point
+ *
+ * struct can_tdc::tdco represents the absolute offset from TDCV. Some
+ * controllers use instead an offset relative to the Sample Point (SP)
+ * such that:
+ *
+ * SSP = TDCV + absolute TDCO
+ * = TDCV + SP + relative TDCO
+ *
+ * -+----------- one bit ----------+-- TX pin
+ * |<--- Sample Point --->|
+ *
+ * --+----------- one bit ----------+-- RX pin
+ * |<-------- TDCV -------->|
+ * |<------------------------>| absolute TDCO
+ * |<--- Sample Point --->|
+ * | |<->| relative TDCO
+ * |<------------- Secondary Sample Point ------------>|
+ */
+static inline s32 can_get_relative_tdco(const struct data_bittiming_params *dbt_params)
+{
+ const struct can_bittiming *dbt = &dbt_params->data_bittiming;
+ s32 sample_point_in_tc = (CAN_SYNC_SEG + dbt->prop_seg +
+ dbt->phase_seg1) * dbt->brp;
+
+ return (s32)dbt_params->tdc.tdco - sample_point_in_tc;
+}
+
+/*
+ * can_bit_time() - Duration of one bit
+ *
+ * Please refer to ISO 11898-1:2015, section 11.3.1.1 "Bit time" for
+ * additional information.
+ *
+ * Return: the number of time quanta in one bit.
+ */
+static inline unsigned int can_bit_time(const struct can_bittiming *bt)
+{
+ return CAN_SYNC_SEG + bt->prop_seg + bt->phase_seg1 + bt->phase_seg2;
+}
+
+/* Duration of one bit in minimum time quantum */
+static inline unsigned int can_bit_time_tqmin(const struct can_bittiming *bt)
+{
+ return can_bit_time(bt) * bt->brp;
+}
+
+/* Convert a duration from minimum a minimum time quantum to nano seconds */
+static inline u32 can_tqmin_to_ns(u32 tqmin, u32 clock_freq)
+{
+ return DIV_U64_ROUND_CLOSEST(mul_u32_u32(tqmin, NSEC_PER_SEC),
+ clock_freq);
+}
+
+#endif /* !_CAN_BITTIMING_H */
diff --git a/include/linux/can/can-ml.h b/include/linux/can/can-ml.h
new file mode 100644
index 000000000000..8afa92d15a66
--- /dev/null
+++ b/include/linux/can/can-ml.h
@@ -0,0 +1,80 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) */
+/* Copyright (c) 2002-2007 Volkswagen Group Electronic Research
+ * Copyright (c) 2017 Pengutronix, Marc Kleine-Budde <kernel@pengutronix.de>
+ *
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of Volkswagen nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * Alternatively, provided that this notice is retained in full, this
+ * software may be distributed under the terms of the GNU General
+ * Public License ("GPL") version 2, in which case the provisions of the
+ * GPL apply INSTEAD OF those given above.
+ *
+ * The provided data structures and external interfaces from this code
+ * are not restricted to be used by modules with a GPL compatible license.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
+ * DAMAGE.
+ *
+ */
+
+#ifndef CAN_ML_H
+#define CAN_ML_H
+
+#include <linux/can.h>
+#include <linux/list.h>
+#include <linux/netdevice.h>
+
+#define CAN_SFF_RCV_ARRAY_SZ (1 << CAN_SFF_ID_BITS)
+#define CAN_EFF_RCV_HASH_BITS 10
+#define CAN_EFF_RCV_ARRAY_SZ (1 << CAN_EFF_RCV_HASH_BITS)
+
+enum { RX_ERR, RX_ALL, RX_FIL, RX_INV, RX_MAX };
+
+struct can_dev_rcv_lists {
+ struct hlist_head rx[RX_MAX];
+ struct hlist_head rx_sff[CAN_SFF_RCV_ARRAY_SZ];
+ struct hlist_head rx_eff[CAN_EFF_RCV_ARRAY_SZ];
+ int entries;
+};
+
+struct can_ml_priv {
+ struct can_dev_rcv_lists dev_rcv_lists;
+#ifdef CAN_J1939
+ struct j1939_priv *j1939_priv;
+#endif
+};
+
+static inline struct can_ml_priv *can_get_ml_priv(struct net_device *dev)
+{
+ return netdev_get_ml_priv(dev, ML_PRIV_CAN);
+}
+
+static inline void can_set_ml_priv(struct net_device *dev,
+ struct can_ml_priv *ml_priv)
+{
+ netdev_set_ml_priv(dev, ml_priv, ML_PRIV_CAN);
+}
+
+#endif /* CAN_ML_H */
diff --git a/include/linux/can/core.h b/include/linux/can/core.h
index c9a17bb1221c..5fb8d0e3f9c1 100644
--- a/include/linux/can/core.h
+++ b/include/linux/can/core.h
@@ -1,7 +1,8 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) */
/*
* linux/can/core.h
*
- * Protoypes and definitions for CAN protocol modules using the PF_CAN core
+ * Prototypes and definitions for CAN protocol modules using the PF_CAN core
*
* Authors: Oliver Hartkopp <oliver.hartkopp@volkswagen.de>
* Urs Thuermann <urs.thuermann@volkswagen.de>
@@ -17,13 +18,6 @@
#include <linux/skbuff.h>
#include <linux/netdevice.h>
-#define CAN_VERSION "20170425"
-
-/* increment this number each time you change some user-space interface */
-#define CAN_ABI_VERSION "9"
-
-#define CAN_VERSION_STRING "rev " CAN_VERSION " abi " CAN_ABI_VERSION
-
#define DNAME(dev) ((dev) ? (dev)->name : "any")
/**
@@ -40,6 +34,14 @@ struct can_proto {
struct proto *prot;
};
+/* required_size
+ * macro to find the minimum size of a struct
+ * that includes a requested member
+ */
+#define CAN_REQUIRED_SIZE(struct_type, member) \
+ (offsetof(typeof(struct_type), member) + \
+ sizeof(((typeof(struct_type) *)(NULL))->member))
+
/* function prototypes for the CAN networklayer core (af_can.c) */
extern int can_proto_register(const struct can_proto *cp);
@@ -56,6 +58,6 @@ extern void can_rx_unregister(struct net *net, struct net_device *dev,
void *data);
extern int can_send(struct sk_buff *skb, int loop);
-extern int can_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg);
+void can_sock_destruct(struct sock *sk);
#endif /* !_CAN_CORE_H */
diff --git a/include/linux/can/dev.h b/include/linux/can/dev.h
index 141b05aade81..f6416a56e95d 100644
--- a/include/linux/can/dev.h
+++ b/include/linux/can/dev.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* linux/can/dev.h
*
@@ -14,9 +15,12 @@
#define _CAN_DEV_H
#include <linux/can.h>
+#include <linux/can/bittiming.h>
#include <linux/can/error.h>
-#include <linux/can/led.h>
+#include <linux/can/length.h>
#include <linux/can/netlink.h>
+#include <linux/can/skb.h>
+#include <linux/ethtool.h>
#include <linux/netdevice.h>
/*
@@ -28,6 +32,12 @@ enum can_mode {
CAN_MODE_SLEEP
};
+enum can_termination_gpio {
+ CAN_TERMINATION_GPIO_DISABLED = 0,
+ CAN_TERMINATION_GPIO_ENABLED,
+ CAN_TERMINATION_GPIO_MAX,
+};
+
/*
* CAN common private data
*/
@@ -35,121 +45,93 @@ struct can_priv {
struct net_device *dev;
struct can_device_stats can_stats;
- struct can_bittiming bittiming, data_bittiming;
- const struct can_bittiming_const *bittiming_const,
- *data_bittiming_const;
- const u16 *termination_const;
- unsigned int termination_const_cnt;
- u16 termination;
- const u32 *bitrate_const;
+ const struct can_bittiming_const *bittiming_const;
+ struct can_bittiming bittiming;
+ struct data_bittiming_params fd, xl;
unsigned int bitrate_const_cnt;
- const u32 *data_bitrate_const;
- unsigned int data_bitrate_const_cnt;
+ const u32 *bitrate_const;
+ u32 bitrate_max;
struct can_clock clock;
+ unsigned int termination_const_cnt;
+ const u16 *termination_const;
+ u16 termination;
+ struct gpio_desc *termination_gpio;
+ u16 termination_gpio_ohms[CAN_TERMINATION_GPIO_MAX];
+
+ unsigned int echo_skb_max;
+ struct sk_buff **echo_skb;
+
enum can_state state;
/* CAN controller features - see include/uapi/linux/can/netlink.h */
u32 ctrlmode; /* current options setting */
u32 ctrlmode_supported; /* options that can be modified by netlink */
- u32 ctrlmode_static; /* static enabled options for driver/hardware */
int restart_ms;
struct delayed_work restart_work;
int (*do_set_bittiming)(struct net_device *dev);
- int (*do_set_data_bittiming)(struct net_device *dev);
int (*do_set_mode)(struct net_device *dev, enum can_mode mode);
int (*do_set_termination)(struct net_device *dev, u16 term);
int (*do_get_state)(const struct net_device *dev,
enum can_state *state);
int (*do_get_berr_counter)(const struct net_device *dev,
struct can_berr_counter *bec);
-
- unsigned int echo_skb_max;
- struct sk_buff **echo_skb;
-
-#ifdef CONFIG_CAN_LEDS
- struct led_trigger *tx_led_trig;
- char tx_led_trig_name[CAN_LED_NAME_SZ];
- struct led_trigger *rx_led_trig;
- char rx_led_trig_name[CAN_LED_NAME_SZ];
- struct led_trigger *rxtx_led_trig;
- char rxtx_led_trig_name[CAN_LED_NAME_SZ];
-#endif
};
-/*
- * get_can_dlc(value) - helper macro to cast a given data length code (dlc)
- * to __u8 and ensure the dlc value to be max. 8 bytes.
- *
- * To be used in the CAN netdriver receive path to ensure conformance with
- * ISO 11898-1 Chapter 8.4.2.3 (DLC field)
- */
-#define get_can_dlc(i) (min_t(__u8, (i), CAN_MAX_DLC))
-#define get_canfd_dlc(i) (min_t(__u8, (i), CANFD_MAX_DLC))
-
-/* Drop a given socketbuffer if it does not contain a valid CAN frame. */
-static inline bool can_dropped_invalid_skb(struct net_device *dev,
- struct sk_buff *skb)
+static inline bool can_fd_tdc_is_enabled(const struct can_priv *priv)
{
- const struct canfd_frame *cfd = (struct canfd_frame *)skb->data;
-
- if (skb->protocol == htons(ETH_P_CAN)) {
- if (unlikely(skb->len != CAN_MTU ||
- cfd->len > CAN_MAX_DLEN))
- goto inval_skb;
- } else if (skb->protocol == htons(ETH_P_CANFD)) {
- if (unlikely(skb->len != CANFD_MTU ||
- cfd->len > CANFD_MAX_DLEN))
- goto inval_skb;
- } else
- goto inval_skb;
-
- return false;
-
-inval_skb:
- kfree_skb(skb);
- dev->stats.tx_dropped++;
- return true;
+ return !!(priv->ctrlmode & CAN_CTRLMODE_FD_TDC_MASK);
}
-static inline bool can_is_canfd_skb(const struct sk_buff *skb)
+static inline bool can_xl_tdc_is_enabled(const struct can_priv *priv)
{
- /* the CAN specific type of skb is identified by its data length */
- return skb->len == CANFD_MTU;
+ return !!(priv->ctrlmode & CAN_CTRLMODE_XL_TDC_MASK);
}
-/* helper to define static CAN controller features at device creation time */
-static inline void can_set_static_ctrlmode(struct net_device *dev,
- u32 static_mode)
+static inline u32 can_get_static_ctrlmode(struct can_priv *priv)
{
- struct can_priv *priv = netdev_priv(dev);
-
- /* alloc_candev() succeeded => netdev_priv() is valid at this point */
- priv->ctrlmode = static_mode;
- priv->ctrlmode_static = static_mode;
-
- /* override MTU which was set by default in can_setup()? */
- if (static_mode & CAN_CTRLMODE_FD)
- dev->mtu = CANFD_MTU;
+ return priv->ctrlmode & ~priv->ctrlmode_supported;
}
-/* get data length from can_dlc with sanitized can_dlc */
-u8 can_dlc2len(u8 can_dlc);
+static inline bool can_is_canxl_dev_mtu(unsigned int mtu)
+{
+ return (mtu >= CANXL_MIN_MTU && mtu <= CANXL_MAX_MTU);
+}
-/* map the sanitized data length to an appropriate data length code */
-u8 can_len2dlc(u8 len);
+void can_setup(struct net_device *dev);
-struct net_device *alloc_candev(int sizeof_priv, unsigned int echo_skb_max);
+struct net_device *alloc_candev_mqs(int sizeof_priv, unsigned int echo_skb_max,
+ unsigned int txqs, unsigned int rxqs);
+#define alloc_candev(sizeof_priv, echo_skb_max) \
+ alloc_candev_mqs(sizeof_priv, echo_skb_max, 1, 1)
+#define alloc_candev_mq(sizeof_priv, echo_skb_max, count) \
+ alloc_candev_mqs(sizeof_priv, echo_skb_max, count, count)
void free_candev(struct net_device *dev);
/* a candev safe wrapper around netdev_priv */
+#if IS_ENABLED(CONFIG_CAN_NETLINK)
struct can_priv *safe_candev_priv(struct net_device *dev);
+#else
+static inline struct can_priv *safe_candev_priv(struct net_device *dev)
+{
+ return NULL;
+}
+#endif
int open_candev(struct net_device *dev);
void close_candev(struct net_device *dev);
-int can_change_mtu(struct net_device *dev, int new_mtu);
+void can_set_default_mtu(struct net_device *dev);
+int __must_check can_set_static_ctrlmode(struct net_device *dev,
+ u32 static_mode);
+int can_hwtstamp_get(struct net_device *netdev,
+ struct kernel_hwtstamp_config *cfg);
+int can_hwtstamp_set(struct net_device *netdev,
+ struct kernel_hwtstamp_config *cfg,
+ struct netlink_ext_ack *extack);
+int can_ethtool_op_get_ts_info_hwts(struct net_device *dev,
+ struct kernel_ethtool_ts_info *info);
int register_candev(struct net_device *dev);
void unregister_candev(struct net_device *dev);
@@ -157,18 +139,69 @@ void unregister_candev(struct net_device *dev);
int can_restart_now(struct net_device *dev);
void can_bus_off(struct net_device *dev);
+const char *can_get_state_str(const enum can_state state);
+const char *can_get_ctrlmode_str(u32 ctrlmode);
+
+static inline bool can_dev_in_xl_only_mode(struct can_priv *priv)
+{
+ const u32 mixed_mode = CAN_CTRLMODE_FD | CAN_CTRLMODE_XL;
+
+ /* When CAN XL is enabled but FD is disabled we are running in
+ * the so-called 'CANXL-only mode' where the error signalling is
+ * disabled. This helper function determines the required value
+ * to disable error signalling in the CAN XL controller.
+ * The so-called CC/FD/XL 'mixed mode' requires error signalling.
+ */
+ return ((priv->ctrlmode & mixed_mode) == CAN_CTRLMODE_XL);
+}
+
+/* drop skb if it does not contain a valid CAN frame for sending */
+static inline bool can_dev_dropped_skb(struct net_device *dev, struct sk_buff *skb)
+{
+ struct can_priv *priv = netdev_priv(dev);
+ u32 silent_mode = priv->ctrlmode & (CAN_CTRLMODE_LISTENONLY |
+ CAN_CTRLMODE_RESTRICTED);
+
+ if (silent_mode) {
+ netdev_info_once(dev, "interface in %s mode, dropping skb\n",
+ can_get_ctrlmode_str(silent_mode));
+ goto invalid_skb;
+ }
+
+ if (!(priv->ctrlmode & CAN_CTRLMODE_FD) && can_is_canfd_skb(skb)) {
+ netdev_info_once(dev, "CAN FD is disabled, dropping skb\n");
+ goto invalid_skb;
+ }
+
+ if (can_dev_in_xl_only_mode(priv) && !can_is_canxl_skb(skb)) {
+ netdev_info_once(dev,
+ "Error signaling is disabled, dropping skb\n");
+ goto invalid_skb;
+ }
+
+ return can_dropped_invalid_skb(dev, skb);
+
+invalid_skb:
+ kfree_skb(skb);
+ dev->stats.tx_dropped++;
+ return true;
+}
+
+void can_state_get_by_berr_counter(const struct net_device *dev,
+ const struct can_berr_counter *bec,
+ enum can_state *tx_state,
+ enum can_state *rx_state);
void can_change_state(struct net_device *dev, struct can_frame *cf,
enum can_state tx_state, enum can_state rx_state);
-void can_put_echo_skb(struct sk_buff *skb, struct net_device *dev,
- unsigned int idx);
-unsigned int can_get_echo_skb(struct net_device *dev, unsigned int idx);
-void can_free_echo_skb(struct net_device *dev, unsigned int idx);
+#ifdef CONFIG_OF
+void of_can_transceiver(struct net_device *dev);
+#else
+static inline void of_can_transceiver(struct net_device *dev) { }
+#endif
-struct sk_buff *alloc_can_skb(struct net_device *dev, struct can_frame **cf);
-struct sk_buff *alloc_canfd_skb(struct net_device *dev,
- struct canfd_frame **cfd);
-struct sk_buff *alloc_can_err_skb(struct net_device *dev,
- struct can_frame **cf);
+extern struct rtnl_link_ops can_link_ops;
+int can_netlink_register(void);
+void can_netlink_unregister(void);
#endif /* !_CAN_DEV_H */
diff --git a/include/linux/can/dev/peak_canfd.h b/include/linux/can/dev/peak_canfd.h
index 46dceef2cfa6..d3788a3d0942 100644
--- a/include/linux/can/dev/peak_canfd.h
+++ b/include/linux/can/dev/peak_canfd.h
@@ -1,17 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* CAN driver for PEAK System micro-CAN based adapters
*
- * Copyright (C) 2003-2011 PEAK System-Technik GmbH
- * Copyright (C) 2011-2013 Stephane Grosjean <s.grosjean@peak-system.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published
- * by the Free Software Foundation; version 2 of the License.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
+ * Copyright (C) 2003-2025 PEAK System-Technik GmbH
+ * Author: Stéphane Grosjean <stephane.grosjean@hms-networks.com>
*/
#ifndef PUCAN_H
#define PUCAN_H
@@ -197,7 +189,7 @@ struct __packed pucan_rx_msg {
u8 client;
__le16 flags;
__le32 can_id;
- u8 d[0];
+ u8 d[];
};
/* uCAN error types */
@@ -274,7 +266,7 @@ struct __packed pucan_tx_msg {
u8 client;
__le16 flags;
__le32 can_id;
- u8 d[0];
+ u8 d[];
};
/* build the cmd opcode_channel field with respect to the correct endianness */
@@ -290,7 +282,7 @@ static inline int pucan_msg_get_channel(const struct pucan_rx_msg *msg)
}
/* return the dlc value from any received message channel_dlc field */
-static inline int pucan_msg_get_dlc(const struct pucan_rx_msg *msg)
+static inline u8 pucan_msg_get_dlc(const struct pucan_rx_msg *msg)
{
return msg->channel_dlc >> 4;
}
diff --git a/include/linux/can/led.h b/include/linux/can/led.h
deleted file mode 100644
index 2746f7c2f87d..000000000000
--- a/include/linux/can/led.h
+++ /dev/null
@@ -1,54 +0,0 @@
-/*
- * Copyright 2012, Fabio Baltieri <fabio.baltieri@gmail.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#ifndef _CAN_LED_H
-#define _CAN_LED_H
-
-#include <linux/if.h>
-#include <linux/leds.h>
-#include <linux/netdevice.h>
-
-enum can_led_event {
- CAN_LED_EVENT_OPEN,
- CAN_LED_EVENT_STOP,
- CAN_LED_EVENT_TX,
- CAN_LED_EVENT_RX,
-};
-
-#ifdef CONFIG_CAN_LEDS
-
-/* keep space for interface name + "-tx"/"-rx"/"-rxtx"
- * suffix and null terminator
- */
-#define CAN_LED_NAME_SZ (IFNAMSIZ + 6)
-
-void can_led_event(struct net_device *netdev, enum can_led_event event);
-void devm_can_led_init(struct net_device *netdev);
-int __init can_led_notifier_init(void);
-void __exit can_led_notifier_exit(void);
-
-#else
-
-static inline void can_led_event(struct net_device *netdev,
- enum can_led_event event)
-{
-}
-static inline void devm_can_led_init(struct net_device *netdev)
-{
-}
-static inline int can_led_notifier_init(void)
-{
- return 0;
-}
-static inline void can_led_notifier_exit(void)
-{
-}
-
-#endif
-
-#endif /* !_CAN_LED_H */
diff --git a/include/linux/can/length.h b/include/linux/can/length.h
new file mode 100644
index 000000000000..abc978b38f79
--- /dev/null
+++ b/include/linux/can/length.h
@@ -0,0 +1,306 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2020 Oliver Hartkopp <socketcan@hartkopp.net>
+ * Copyright (C) 2020 Marc Kleine-Budde <kernel@pengutronix.de>
+ * Copyright (C) 2020, 2023 Vincent Mailhol <mailhol.vincent@wanadoo.fr>
+ */
+
+#ifndef _CAN_LENGTH_H
+#define _CAN_LENGTH_H
+
+#include <linux/bits.h>
+#include <linux/can.h>
+#include <linux/can/netlink.h>
+#include <linux/math.h>
+
+/*
+ * Size of a Classical CAN Standard Frame header in bits
+ *
+ * Name of Field Bits
+ * ---------------------------------------------------------
+ * Start Of Frame (SOF) 1
+ * Arbitration field:
+ * base ID 11
+ * Remote Transmission Request (RTR) 1
+ * Control field:
+ * IDentifier Extension bit (IDE) 1
+ * FD Format indicator (FDF) 1
+ * Data Length Code (DLC) 4
+ *
+ * including all fields preceding the data field, ignoring bitstuffing
+ */
+#define CAN_FRAME_HEADER_SFF_BITS 19
+
+/*
+ * Size of a Classical CAN Extended Frame header in bits
+ *
+ * Name of Field Bits
+ * ---------------------------------------------------------
+ * Start Of Frame (SOF) 1
+ * Arbitration field:
+ * base ID 11
+ * Substitute Remote Request (SRR) 1
+ * IDentifier Extension bit (IDE) 1
+ * ID extension 18
+ * Remote Transmission Request (RTR) 1
+ * Control field:
+ * FD Format indicator (FDF) 1
+ * Reserved bit (r0) 1
+ * Data length code (DLC) 4
+ *
+ * including all fields preceding the data field, ignoring bitstuffing
+ */
+#define CAN_FRAME_HEADER_EFF_BITS 39
+
+/*
+ * Size of a CAN-FD Standard Frame in bits
+ *
+ * Name of Field Bits
+ * ---------------------------------------------------------
+ * Start Of Frame (SOF) 1
+ * Arbitration field:
+ * base ID 11
+ * Remote Request Substitution (RRS) 1
+ * Control field:
+ * IDentifier Extension bit (IDE) 1
+ * FD Format indicator (FDF) 1
+ * Reserved bit (res) 1
+ * Bit Rate Switch (BRS) 1
+ * Error Status Indicator (ESI) 1
+ * Data length code (DLC) 4
+ *
+ * including all fields preceding the data field, ignoring bitstuffing
+ */
+#define CANFD_FRAME_HEADER_SFF_BITS 22
+
+/*
+ * Size of a CAN-FD Extended Frame in bits
+ *
+ * Name of Field Bits
+ * ---------------------------------------------------------
+ * Start Of Frame (SOF) 1
+ * Arbitration field:
+ * base ID 11
+ * Substitute Remote Request (SRR) 1
+ * IDentifier Extension bit (IDE) 1
+ * ID extension 18
+ * Remote Request Substitution (RRS) 1
+ * Control field:
+ * FD Format indicator (FDF) 1
+ * Reserved bit (res) 1
+ * Bit Rate Switch (BRS) 1
+ * Error Status Indicator (ESI) 1
+ * Data length code (DLC) 4
+ *
+ * including all fields preceding the data field, ignoring bitstuffing
+ */
+#define CANFD_FRAME_HEADER_EFF_BITS 41
+
+/*
+ * Size of a CAN CRC Field in bits
+ *
+ * Name of Field Bits
+ * ---------------------------------------------------------
+ * CRC sequence (CRC15) 15
+ * CRC Delimiter 1
+ *
+ * ignoring bitstuffing
+ */
+#define CAN_FRAME_CRC_FIELD_BITS 16
+
+/*
+ * Size of a CAN-FD CRC17 Field in bits (length: 0..16)
+ *
+ * Name of Field Bits
+ * ---------------------------------------------------------
+ * Stuff Count 4
+ * CRC Sequence (CRC17) 17
+ * CRC Delimiter 1
+ * Fixed stuff bits 6
+ */
+#define CANFD_FRAME_CRC17_FIELD_BITS 28
+
+/*
+ * Size of a CAN-FD CRC21 Field in bits (length: 20..64)
+ *
+ * Name of Field Bits
+ * ---------------------------------------------------------
+ * Stuff Count 4
+ * CRC sequence (CRC21) 21
+ * CRC Delimiter 1
+ * Fixed stuff bits 7
+ */
+#define CANFD_FRAME_CRC21_FIELD_BITS 33
+
+/*
+ * Size of a CAN(-FD) Frame footer in bits
+ *
+ * Name of Field Bits
+ * ---------------------------------------------------------
+ * ACK slot 1
+ * ACK delimiter 1
+ * End Of Frame (EOF) 7
+ *
+ * including all fields following the CRC field
+ */
+#define CAN_FRAME_FOOTER_BITS 9
+
+/*
+ * First part of the Inter Frame Space
+ * (a.k.a. IMF - intermission field)
+ */
+#define CAN_INTERMISSION_BITS 3
+
+/**
+ * can_bitstuffing_len() - Calculate the maximum length with bitstuffing
+ * @destuffed_len: length of a destuffed bit stream
+ *
+ * The worst bit stuffing case is a sequence in which dominant and
+ * recessive bits alternate every four bits:
+ *
+ * Destuffed: 1 1111 0000 1111 0000 1111
+ * Stuffed: 1 1111o 0000i 1111o 0000i 1111o
+ *
+ * Nomenclature
+ *
+ * - "0": dominant bit
+ * - "o": dominant stuff bit
+ * - "1": recessive bit
+ * - "i": recessive stuff bit
+ *
+ * Aside from the first bit, one stuff bit is added every four bits.
+ *
+ * Return: length of the stuffed bit stream in the worst case scenario.
+ */
+#define can_bitstuffing_len(destuffed_len) \
+ (destuffed_len + (destuffed_len - 1) / 4)
+
+#define __can_bitstuffing_len(bitstuffing, destuffed_len) \
+ (bitstuffing ? can_bitstuffing_len(destuffed_len) : \
+ destuffed_len)
+
+#define __can_cc_frame_bits(is_eff, bitstuffing, \
+ intermission, data_len) \
+( \
+ __can_bitstuffing_len(bitstuffing, \
+ (is_eff ? CAN_FRAME_HEADER_EFF_BITS : \
+ CAN_FRAME_HEADER_SFF_BITS) + \
+ (data_len) * BITS_PER_BYTE + \
+ CAN_FRAME_CRC_FIELD_BITS) + \
+ CAN_FRAME_FOOTER_BITS + \
+ (intermission ? CAN_INTERMISSION_BITS : 0) \
+)
+
+#define __can_fd_frame_bits(is_eff, bitstuffing, \
+ intermission, data_len) \
+( \
+ __can_bitstuffing_len(bitstuffing, \
+ (is_eff ? CANFD_FRAME_HEADER_EFF_BITS : \
+ CANFD_FRAME_HEADER_SFF_BITS) + \
+ (data_len) * BITS_PER_BYTE) + \
+ ((data_len) <= 16 ? \
+ CANFD_FRAME_CRC17_FIELD_BITS : \
+ CANFD_FRAME_CRC21_FIELD_BITS) + \
+ CAN_FRAME_FOOTER_BITS + \
+ (intermission ? CAN_INTERMISSION_BITS : 0) \
+)
+
+/**
+ * can_frame_bits() - Calculate the number of bits on the wire in a
+ * CAN frame
+ * @is_fd: true: CAN-FD frame; false: Classical CAN frame.
+ * @is_eff: true: Extended frame; false: Standard frame.
+ * @bitstuffing: true: calculate the bitstuffing worst case; false:
+ * calculate the bitstuffing best case (no dynamic
+ * bitstuffing). CAN-FD's fixed stuff bits are always included.
+ * @intermission: if and only if true, include the inter frame space
+ * assuming no bus idle (i.e. only the intermission). Strictly
+ * speaking, the inter frame space is not part of the
+ * frame. However, it is needed when calculating the delay
+ * between the Start Of Frame of two consecutive frames.
+ * @data_len: length of the data field in bytes. Correspond to
+ * can(fd)_frame->len. Should be zero for remote frames. No
+ * sanitization is done on @data_len and it shall have no side
+ * effects.
+ *
+ * Return: the numbers of bits on the wire of a CAN frame.
+ */
+#define can_frame_bits(is_fd, is_eff, bitstuffing, \
+ intermission, data_len) \
+( \
+ is_fd ? __can_fd_frame_bits(is_eff, bitstuffing, \
+ intermission, data_len) : \
+ __can_cc_frame_bits(is_eff, bitstuffing, \
+ intermission, data_len) \
+)
+
+/*
+ * Number of bytes in a CAN frame
+ * (rounded up, including intermission)
+ */
+#define can_frame_bytes(is_fd, is_eff, bitstuffing, data_len) \
+ DIV_ROUND_UP(can_frame_bits(is_fd, is_eff, bitstuffing, \
+ true, data_len), \
+ BITS_PER_BYTE)
+
+/*
+ * Maximum size of a Classical CAN frame
+ * (rounded up, ignoring bitstuffing but including intermission)
+ */
+#define CAN_FRAME_LEN_MAX can_frame_bytes(false, true, false, CAN_MAX_DLEN)
+
+/*
+ * Maximum size of a CAN-FD frame
+ * (rounded up, ignoring dynamic bitstuffing but including intermission)
+ */
+#define CANFD_FRAME_LEN_MAX can_frame_bytes(true, true, false, CANFD_MAX_DLEN)
+
+/*
+ * can_cc_dlc2len(value) - convert a given data length code (dlc) of a
+ * Classical CAN frame into a valid data length of max. 8 bytes.
+ *
+ * To be used in the CAN netdriver receive path to ensure conformance with
+ * ISO 11898-1 Chapter 8.4.2.3 (DLC field)
+ */
+#define can_cc_dlc2len(dlc) (min_t(u8, (dlc), CAN_MAX_DLEN))
+
+/* helper to get the data length code (DLC) for Classical CAN raw DLC access */
+static inline u8 can_get_cc_dlc(const struct can_frame *cf, const u32 ctrlmode)
+{
+ /* return len8_dlc as dlc value only if all conditions apply */
+ if ((ctrlmode & CAN_CTRLMODE_CC_LEN8_DLC) &&
+ (cf->len == CAN_MAX_DLEN) &&
+ (cf->len8_dlc > CAN_MAX_DLEN && cf->len8_dlc <= CAN_MAX_RAW_DLC))
+ return cf->len8_dlc;
+
+ /* return the payload length as dlc value */
+ return cf->len;
+}
+
+/* helper to set len and len8_dlc value for Classical CAN raw DLC access */
+static inline void can_frame_set_cc_len(struct can_frame *cf, const u8 dlc,
+ const u32 ctrlmode)
+{
+ /* the caller already ensured that dlc is a value from 0 .. 15 */
+ if (ctrlmode & CAN_CTRLMODE_CC_LEN8_DLC && dlc > CAN_MAX_DLEN)
+ cf->len8_dlc = dlc;
+
+ /* limit the payload length 'len' to CAN_MAX_DLEN */
+ cf->len = can_cc_dlc2len(dlc);
+}
+
+/* get data length from raw data length code (DLC) */
+u8 can_fd_dlc2len(u8 dlc);
+
+/* map the sanitized data length to an appropriate data length code */
+u8 can_fd_len2dlc(u8 len);
+
+/* calculate the CAN Frame length in bytes of a given skb */
+unsigned int can_skb_get_frame_len(const struct sk_buff *skb);
+
+/* map the data length to an appropriate data link layer length */
+static inline u8 canfd_sanitize_len(u8 len)
+{
+ return can_fd_dlc2len(can_fd_len2dlc(len));
+}
+
+#endif /* !_CAN_LENGTH_H */
diff --git a/include/linux/can/platform/cc770.h b/include/linux/can/platform/cc770.h
index 78b2d44f04cf..9587d6882906 100644
--- a/include/linux/can/platform/cc770.h
+++ b/include/linux/can/platform/cc770.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _CAN_PLATFORM_CC770_H
#define _CAN_PLATFORM_CC770_H
diff --git a/include/linux/can/platform/flexcan.h b/include/linux/can/platform/flexcan.h
new file mode 100644
index 000000000000..1b536fb999de
--- /dev/null
+++ b/include/linux/can/platform/flexcan.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2021 Angelo Dureghello <angelo@kernel-space.org>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CAN_PLATFORM_FLEXCAN_H
+#define _CAN_PLATFORM_FLEXCAN_H
+
+struct flexcan_platform_data {
+ u32 clock_frequency;
+ u8 clk_src;
+};
+
+#endif /* _CAN_PLATFORM_FLEXCAN_H */
diff --git a/include/linux/can/platform/mcp251x.h b/include/linux/can/platform/mcp251x.h
deleted file mode 100644
index d44fcae274ff..000000000000
--- a/include/linux/can/platform/mcp251x.h
+++ /dev/null
@@ -1,21 +0,0 @@
-#ifndef _CAN_PLATFORM_MCP251X_H
-#define _CAN_PLATFORM_MCP251X_H
-
-/*
- *
- * CAN bus driver for Microchip 251x CAN Controller with SPI Interface
- *
- */
-
-#include <linux/spi/spi.h>
-
-/*
- * struct mcp251x_platform_data - MCP251X SPI CAN controller platform data
- * @oscillator_frequency: - oscillator frequency in Hz
- */
-
-struct mcp251x_platform_data {
- unsigned long oscillator_frequency;
-};
-
-#endif /* !_CAN_PLATFORM_MCP251X_H */
diff --git a/include/linux/can/platform/rcar_can.h b/include/linux/can/platform/rcar_can.h
deleted file mode 100644
index 0f4a2f3df504..000000000000
--- a/include/linux/can/platform/rcar_can.h
+++ /dev/null
@@ -1,17 +0,0 @@
-#ifndef _CAN_PLATFORM_RCAR_CAN_H_
-#define _CAN_PLATFORM_RCAR_CAN_H_
-
-#include <linux/types.h>
-
-/* Clock Select Register settings */
-enum CLKR {
- CLKR_CLKP1 = 0, /* Peripheral clock (clkp1) */
- CLKR_CLKP2 = 1, /* Peripheral clock (clkp2) */
- CLKR_CLKEXT = 3 /* Externally input clock */
-};
-
-struct rcar_can_platform_data {
- enum CLKR clock_select; /* Clock source select */
-};
-
-#endif /* !_CAN_PLATFORM_RCAR_CAN_H_ */
diff --git a/include/linux/can/platform/sja1000.h b/include/linux/can/platform/sja1000.h
index 93570b61ec6c..6a869682c120 100644
--- a/include/linux/can/platform/sja1000.h
+++ b/include/linux/can/platform/sja1000.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _CAN_PLATFORM_SJA1000_H
#define _CAN_PLATFORM_SJA1000_H
@@ -13,7 +14,7 @@
#define OCR_MODE_TEST 0x01
#define OCR_MODE_NORMAL 0x02
#define OCR_MODE_CLOCK 0x03
-#define OCR_MODE_MASK 0x07
+#define OCR_MODE_MASK 0x03
#define OCR_TX0_INVERT 0x04
#define OCR_TX0_PULLDOWN 0x08
#define OCR_TX0_PULLUP 0x10
diff --git a/include/linux/can/rx-offload.h b/include/linux/can/rx-offload.h
index cb31683bbe15..d29bb4521947 100644
--- a/include/linux/can/rx-offload.h
+++ b/include/linux/can/rx-offload.h
@@ -1,17 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* linux/can/rx-offload.h
*
* Copyright (c) 2014 David Jander, Protonic Holland
- * Copyright (c) 2014-2017 Pengutronix, Marc Kleine-Budde <kernel@pengutronix.de>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the version 2 of the GNU General Public License
- * as published by the Free Software Foundation
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
+ * Copyright (c) 2014-2017, 2023 Pengutronix, Marc Kleine-Budde <kernel@pengutronix.de>
*/
#ifndef _CAN_RX_OFFLOAD_H
@@ -23,10 +15,12 @@
struct can_rx_offload {
struct net_device *dev;
- unsigned int (*mailbox_read)(struct can_rx_offload *offload, struct can_frame *cf,
- u32 *timestamp, unsigned int mb);
+ struct sk_buff *(*mailbox_read)(struct can_rx_offload *offload,
+ unsigned int mb, u32 *timestamp,
+ bool drop);
struct sk_buff_head skb_queue;
+ struct sk_buff_head skb_irq_queue;
u32 skb_queue_len_max;
unsigned int mb_first;
@@ -37,20 +31,32 @@ struct can_rx_offload {
bool inc;
};
-int can_rx_offload_add_timestamp(struct net_device *dev, struct can_rx_offload *offload);
-int can_rx_offload_add_fifo(struct net_device *dev, struct can_rx_offload *offload, unsigned int weight);
-int can_rx_offload_irq_offload_timestamp(struct can_rx_offload *offload, u64 reg);
+int can_rx_offload_add_timestamp(struct net_device *dev,
+ struct can_rx_offload *offload);
+int can_rx_offload_add_fifo(struct net_device *dev,
+ struct can_rx_offload *offload,
+ unsigned int weight);
+int can_rx_offload_add_manual(struct net_device *dev,
+ struct can_rx_offload *offload,
+ unsigned int weight);
+int can_rx_offload_irq_offload_timestamp(struct can_rx_offload *offload,
+ u64 reg);
int can_rx_offload_irq_offload_fifo(struct can_rx_offload *offload);
-int can_rx_offload_irq_queue_err_skb(struct can_rx_offload *offload, struct sk_buff *skb);
-void can_rx_offload_reset(struct can_rx_offload *offload);
+int can_rx_offload_queue_timestamp(struct can_rx_offload *offload,
+ struct sk_buff *skb, u32 timestamp);
+unsigned int can_rx_offload_get_echo_skb_queue_timestamp(struct can_rx_offload *offload,
+ unsigned int idx, u32 timestamp,
+ unsigned int *frame_len_ptr);
+int can_rx_offload_queue_tail(struct can_rx_offload *offload,
+ struct sk_buff *skb);
+unsigned int can_rx_offload_get_echo_skb_queue_tail(struct can_rx_offload *offload,
+ unsigned int idx,
+ unsigned int *frame_len_ptr);
+void can_rx_offload_irq_finish(struct can_rx_offload *offload);
+void can_rx_offload_threaded_irq_finish(struct can_rx_offload *offload);
void can_rx_offload_del(struct can_rx_offload *offload);
void can_rx_offload_enable(struct can_rx_offload *offload);
-static inline void can_rx_offload_schedule(struct can_rx_offload *offload)
-{
- napi_schedule(&offload->napi);
-}
-
static inline void can_rx_offload_disable(struct can_rx_offload *offload)
{
napi_disable(&offload->napi);
diff --git a/include/linux/can/skb.h b/include/linux/can/skb.h
index 51bb6532785c..1abc25a8d144 100644
--- a/include/linux/can/skb.h
+++ b/include/linux/can/skb.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) */
/*
* linux/can/skb.h
*
@@ -15,6 +16,27 @@
#include <linux/can.h>
#include <net/sock.h>
+void can_flush_echo_skb(struct net_device *dev);
+int can_put_echo_skb(struct sk_buff *skb, struct net_device *dev,
+ unsigned int idx, unsigned int frame_len);
+struct sk_buff *__can_get_echo_skb(struct net_device *dev, unsigned int idx,
+ unsigned int *len_ptr,
+ unsigned int *frame_len_ptr);
+unsigned int __must_check can_get_echo_skb(struct net_device *dev,
+ unsigned int idx,
+ unsigned int *frame_len_ptr);
+void can_free_echo_skb(struct net_device *dev, unsigned int idx,
+ unsigned int *frame_len_ptr);
+struct sk_buff *alloc_can_skb(struct net_device *dev, struct can_frame **cf);
+struct sk_buff *alloc_canfd_skb(struct net_device *dev,
+ struct canfd_frame **cfd);
+struct sk_buff *alloc_canxl_skb(struct net_device *dev,
+ struct canxl_frame **cxl,
+ unsigned int data_len);
+struct sk_buff *alloc_can_err_skb(struct net_device *dev,
+ struct can_frame **cf);
+bool can_dropped_invalid_skb(struct net_device *dev, struct sk_buff *skb);
+
/*
* The struct can_skb_priv is used to transport additional information along
* with the stored struct can(fd)_frame that can not be contained in existing
@@ -28,12 +50,14 @@
* struct can_skb_priv - private additional data inside CAN sk_buffs
* @ifindex: ifindex of the first interface the CAN frame appeared on
* @skbcnt: atomic counter to have an unique id together with skb pointer
+ * @frame_len: length of CAN frame in data link layer
* @cf: align to the following CAN frame at skb->data
*/
struct can_skb_priv {
int ifindex;
int skbcnt;
- struct can_frame cf[0];
+ unsigned int frame_len;
+ struct can_frame cf[];
};
static inline struct can_skb_priv *can_skb_prv(struct sk_buff *skb)
@@ -48,8 +72,12 @@ static inline void can_skb_reserve(struct sk_buff *skb)
static inline void can_skb_set_owner(struct sk_buff *skb, struct sock *sk)
{
- if (sk) {
- sock_hold(sk);
+ /* If the socket has already been closed by user space, the
+ * refcount may already be 0 (and the socket will be freed
+ * after the last TX skb has been freed). So only increase
+ * socket refcount if the refcount is > 0.
+ */
+ if (sk && refcount_inc_not_zero(&sk->sk_refcnt)) {
skb->destructor = sock_efree;
skb->sk = sk;
}
@@ -60,21 +88,72 @@ static inline void can_skb_set_owner(struct sk_buff *skb, struct sock *sk)
*/
static inline struct sk_buff *can_create_echo_skb(struct sk_buff *skb)
{
- if (skb_shared(skb)) {
- struct sk_buff *nskb = skb_clone(skb, GFP_ATOMIC);
-
- if (likely(nskb)) {
- can_skb_set_owner(nskb, skb->sk);
- consume_skb(skb);
- return nskb;
- } else {
- kfree_skb(skb);
- return NULL;
- }
+ struct sk_buff *nskb;
+
+ nskb = skb_clone(skb, GFP_ATOMIC);
+ if (unlikely(!nskb)) {
+ kfree_skb(skb);
+ return NULL;
}
- /* we can assume to have an unshared skb with proper owner */
- return skb;
+ can_skb_set_owner(nskb, skb->sk);
+ consume_skb(skb);
+ return nskb;
+}
+
+static inline bool can_is_can_skb(const struct sk_buff *skb)
+{
+ struct can_frame *cf = (struct can_frame *)skb->data;
+
+ /* the CAN specific type of skb is identified by its data length */
+ return (skb->len == CAN_MTU && cf->len <= CAN_MAX_DLEN);
+}
+
+static inline bool can_is_canfd_skb(const struct sk_buff *skb)
+{
+ struct canfd_frame *cfd = (struct canfd_frame *)skb->data;
+
+ /* the CAN specific type of skb is identified by its data length */
+ return (skb->len == CANFD_MTU && cfd->len <= CANFD_MAX_DLEN);
+}
+
+static inline bool can_is_canxl_skb(const struct sk_buff *skb)
+{
+ const struct canxl_frame *cxl = (struct canxl_frame *)skb->data;
+
+ if (skb->len < CANXL_HDR_SIZE + CANXL_MIN_DLEN || skb->len > CANXL_MTU)
+ return false;
+
+ /* this also checks valid CAN XL data length boundaries */
+ if (skb->len != CANXL_HDR_SIZE + cxl->len)
+ return false;
+
+ return cxl->flags & CANXL_XLF;
+}
+
+/* get length element value from can[|fd|xl]_frame structure */
+static inline unsigned int can_skb_get_len_val(struct sk_buff *skb)
+{
+ const struct canxl_frame *cxl = (struct canxl_frame *)skb->data;
+ const struct canfd_frame *cfd = (struct canfd_frame *)skb->data;
+
+ if (can_is_canxl_skb(skb))
+ return cxl->len;
+
+ return cfd->len;
+}
+
+/* get needed data length inside CAN frame for all frame types (RTR aware) */
+static inline unsigned int can_skb_get_data_len(struct sk_buff *skb)
+{
+ unsigned int len = can_skb_get_len_val(skb);
+ const struct can_frame *cf = (struct can_frame *)skb->data;
+
+ /* RTR frames have an actual length of zero */
+ if (can_is_can_skb(skb) && cf->can_id & CAN_RTR_FLAG)
+ return 0;
+
+ return len;
}
#endif /* !_CAN_SKB_H */
diff --git a/include/linux/capability.h b/include/linux/capability.h
index 6ffb67e10c06..1fb08922552c 100644
--- a/include/linux/capability.h
+++ b/include/linux/capability.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* This is <linux/capability.h>
*
@@ -13,20 +14,19 @@
#define _LINUX_CAPABILITY_H
#include <uapi/linux/capability.h>
-
+#include <linux/uidgid.h>
+#include <linux/bits.h>
#define _KERNEL_CAPABILITY_VERSION _LINUX_CAPABILITY_VERSION_3
-#define _KERNEL_CAPABILITY_U32S _LINUX_CAPABILITY_U32S_3
extern int file_caps_enabled;
-typedef struct kernel_cap_struct {
- __u32 cap[_KERNEL_CAPABILITY_U32S];
-} kernel_cap_t;
+typedef struct { u64 val; } kernel_cap_t;
-/* exact same as vfs_cap_data but in cpu endian and always filled completely */
+/* same as vfs_ns_cap_data but in cpu endian and always filled completely */
struct cpu_vfs_cap_data {
__u32 magic_etc;
+ kuid_t rootid;
kernel_cap_t permitted;
kernel_cap_t inheritable;
};
@@ -34,22 +34,12 @@ struct cpu_vfs_cap_data {
#define _USER_CAP_HEADER_SIZE (sizeof(struct __user_cap_header_struct))
#define _KERNEL_CAP_T_SIZE (sizeof(kernel_cap_t))
-
struct file;
struct inode;
struct dentry;
struct task_struct;
struct user_namespace;
-
-extern const kernel_cap_t __cap_empty_set;
-extern const kernel_cap_t __cap_init_eff_set;
-
-/*
- * Internal kernel functions only
- */
-
-#define CAP_FOR_EACH_U32(__capi) \
- for (__capi = 0; __capi < _KERNEL_CAPABILITY_U32S; ++__capi)
+struct mnt_idmap;
/*
* CAP_FS_MASK and CAP_NFSD_MASKS:
@@ -64,94 +54,52 @@ extern const kernel_cap_t __cap_init_eff_set;
* 2. The security.* and trusted.* xattrs are fs-related MAC permissions
*/
-# define CAP_FS_MASK_B0 (CAP_TO_MASK(CAP_CHOWN) \
- | CAP_TO_MASK(CAP_MKNOD) \
- | CAP_TO_MASK(CAP_DAC_OVERRIDE) \
- | CAP_TO_MASK(CAP_DAC_READ_SEARCH) \
- | CAP_TO_MASK(CAP_FOWNER) \
- | CAP_TO_MASK(CAP_FSETID))
-
-# define CAP_FS_MASK_B1 (CAP_TO_MASK(CAP_MAC_OVERRIDE))
-
-#if _KERNEL_CAPABILITY_U32S != 2
-# error Fix up hand-coded capability macro initializers
-#else /* HAND-CODED capability initializers */
-
-#define CAP_LAST_U32 ((_KERNEL_CAPABILITY_U32S) - 1)
-#define CAP_LAST_U32_VALID_MASK (CAP_TO_MASK(CAP_LAST_CAP + 1) -1)
-
-# define CAP_EMPTY_SET ((kernel_cap_t){{ 0, 0 }})
-# define CAP_FULL_SET ((kernel_cap_t){{ ~0, CAP_LAST_U32_VALID_MASK }})
-# define CAP_FS_SET ((kernel_cap_t){{ CAP_FS_MASK_B0 \
- | CAP_TO_MASK(CAP_LINUX_IMMUTABLE), \
- CAP_FS_MASK_B1 } })
-# define CAP_NFSD_SET ((kernel_cap_t){{ CAP_FS_MASK_B0 \
- | CAP_TO_MASK(CAP_SYS_RESOURCE), \
- CAP_FS_MASK_B1 } })
-
-#endif /* _KERNEL_CAPABILITY_U32S != 2 */
-
-# define cap_clear(c) do { (c) = __cap_empty_set; } while (0)
-
-#define cap_raise(c, flag) ((c).cap[CAP_TO_INDEX(flag)] |= CAP_TO_MASK(flag))
-#define cap_lower(c, flag) ((c).cap[CAP_TO_INDEX(flag)] &= ~CAP_TO_MASK(flag))
-#define cap_raised(c, flag) ((c).cap[CAP_TO_INDEX(flag)] & CAP_TO_MASK(flag))
-
-#define CAP_BOP_ALL(c, a, b, OP) \
-do { \
- unsigned __capi; \
- CAP_FOR_EACH_U32(__capi) { \
- c.cap[__capi] = a.cap[__capi] OP b.cap[__capi]; \
- } \
-} while (0)
-
-#define CAP_UOP_ALL(c, a, OP) \
-do { \
- unsigned __capi; \
- CAP_FOR_EACH_U32(__capi) { \
- c.cap[__capi] = OP a.cap[__capi]; \
- } \
-} while (0)
+# define CAP_FS_MASK (BIT_ULL(CAP_CHOWN) \
+ | BIT_ULL(CAP_MKNOD) \
+ | BIT_ULL(CAP_DAC_OVERRIDE) \
+ | BIT_ULL(CAP_DAC_READ_SEARCH) \
+ | BIT_ULL(CAP_FOWNER) \
+ | BIT_ULL(CAP_FSETID) \
+ | BIT_ULL(CAP_MAC_OVERRIDE))
+#define CAP_VALID_MASK (BIT_ULL(CAP_LAST_CAP+1)-1)
+
+# define CAP_EMPTY_SET ((kernel_cap_t) { 0 })
+# define CAP_FULL_SET ((kernel_cap_t) { CAP_VALID_MASK })
+# define CAP_FS_SET ((kernel_cap_t) { CAP_FS_MASK | BIT_ULL(CAP_LINUX_IMMUTABLE) })
+# define CAP_NFSD_SET ((kernel_cap_t) { CAP_FS_MASK | BIT_ULL(CAP_SYS_RESOURCE) })
+
+# define cap_clear(c) do { (c).val = 0; } while (0)
+
+#define cap_raise(c, flag) ((c).val |= BIT_ULL(flag))
+#define cap_lower(c, flag) ((c).val &= ~BIT_ULL(flag))
+#define cap_raised(c, flag) (((c).val & BIT_ULL(flag)) != 0)
static inline kernel_cap_t cap_combine(const kernel_cap_t a,
const kernel_cap_t b)
{
- kernel_cap_t dest;
- CAP_BOP_ALL(dest, a, b, |);
- return dest;
+ return (kernel_cap_t) { a.val | b.val };
}
static inline kernel_cap_t cap_intersect(const kernel_cap_t a,
const kernel_cap_t b)
{
- kernel_cap_t dest;
- CAP_BOP_ALL(dest, a, b, &);
- return dest;
+ return (kernel_cap_t) { a.val & b.val };
}
static inline kernel_cap_t cap_drop(const kernel_cap_t a,
const kernel_cap_t drop)
{
- kernel_cap_t dest;
- CAP_BOP_ALL(dest, a, drop, &~);
- return dest;
+ return (kernel_cap_t) { a.val &~ drop.val };
}
-static inline kernel_cap_t cap_invert(const kernel_cap_t c)
+static inline bool cap_isclear(const kernel_cap_t a)
{
- kernel_cap_t dest;
- CAP_UOP_ALL(dest, c, ~);
- return dest;
+ return !a.val;
}
-static inline bool cap_isclear(const kernel_cap_t a)
+static inline bool cap_isidentical(const kernel_cap_t a, const kernel_cap_t b)
{
- unsigned __capi;
- CAP_FOR_EACH_U32(__capi) {
- if (a.cap[__capi] != 0)
- return false;
- }
- return true;
+ return a.val == b.val;
}
/*
@@ -163,43 +111,34 @@ static inline bool cap_isclear(const kernel_cap_t a)
*/
static inline bool cap_issubset(const kernel_cap_t a, const kernel_cap_t set)
{
- kernel_cap_t dest;
- dest = cap_drop(a, set);
- return cap_isclear(dest);
+ return !(a.val & ~set.val);
}
/* Used to decide between falling back on the old suser() or fsuser(). */
static inline kernel_cap_t cap_drop_fs_set(const kernel_cap_t a)
{
- const kernel_cap_t __cap_fs_set = CAP_FS_SET;
- return cap_drop(a, __cap_fs_set);
+ return cap_drop(a, CAP_FS_SET);
}
static inline kernel_cap_t cap_raise_fs_set(const kernel_cap_t a,
const kernel_cap_t permitted)
{
- const kernel_cap_t __cap_fs_set = CAP_FS_SET;
- return cap_combine(a,
- cap_intersect(permitted, __cap_fs_set));
+ return cap_combine(a, cap_intersect(permitted, CAP_FS_SET));
}
static inline kernel_cap_t cap_drop_nfsd_set(const kernel_cap_t a)
{
- const kernel_cap_t __cap_fs_set = CAP_NFSD_SET;
- return cap_drop(a, __cap_fs_set);
+ return cap_drop(a, CAP_NFSD_SET);
}
static inline kernel_cap_t cap_raise_nfsd_set(const kernel_cap_t a,
const kernel_cap_t permitted)
{
- const kernel_cap_t __cap_nfsd_set = CAP_NFSD_SET;
- return cap_combine(a,
- cap_intersect(permitted, __cap_nfsd_set));
+ return cap_combine(a, cap_intersect(permitted, CAP_NFSD_SET));
}
#ifdef CONFIG_MULTIUSER
-extern bool has_capability(struct task_struct *t, int cap);
extern bool has_ns_capability(struct task_struct *t,
struct user_namespace *ns, int cap);
extern bool has_capability_noaudit(struct task_struct *t, int cap);
@@ -208,11 +147,8 @@ extern bool has_ns_capability_noaudit(struct task_struct *t,
extern bool capable(int cap);
extern bool ns_capable(struct user_namespace *ns, int cap);
extern bool ns_capable_noaudit(struct user_namespace *ns, int cap);
+extern bool ns_capable_setid(struct user_namespace *ns, int cap);
#else
-static inline bool has_capability(struct task_struct *t, int cap)
-{
- return true;
-}
static inline bool has_ns_capability(struct task_struct *t,
struct user_namespace *ns, int cap)
{
@@ -239,13 +175,40 @@ static inline bool ns_capable_noaudit(struct user_namespace *ns, int cap)
{
return true;
}
+static inline bool ns_capable_setid(struct user_namespace *ns, int cap)
+{
+ return true;
+}
#endif /* CONFIG_MULTIUSER */
-extern bool privileged_wrt_inode_uidgid(struct user_namespace *ns, const struct inode *inode);
-extern bool capable_wrt_inode_uidgid(const struct inode *inode, int cap);
+bool privileged_wrt_inode_uidgid(struct user_namespace *ns,
+ struct mnt_idmap *idmap,
+ const struct inode *inode);
+bool capable_wrt_inode_uidgid(struct mnt_idmap *idmap,
+ const struct inode *inode, int cap);
extern bool file_ns_capable(const struct file *file, struct user_namespace *ns, int cap);
extern bool ptracer_capable(struct task_struct *tsk, struct user_namespace *ns);
+static inline bool perfmon_capable(void)
+{
+ return capable(CAP_PERFMON) || capable(CAP_SYS_ADMIN);
+}
+
+static inline bool bpf_capable(void)
+{
+ return capable(CAP_BPF) || capable(CAP_SYS_ADMIN);
+}
+
+static inline bool checkpoint_restore_ns_capable(struct user_namespace *ns)
+{
+ return ns_capable(ns, CAP_CHECKPOINT_RESTORE) ||
+ ns_capable(ns, CAP_SYS_ADMIN);
+}
/* audit system wants to get cap info from files as well */
-extern int get_vfs_caps_from_disk(const struct dentry *dentry, struct cpu_vfs_cap_data *cpu_caps);
+int get_vfs_caps_from_disk(struct mnt_idmap *idmap,
+ const struct dentry *dentry,
+ struct cpu_vfs_cap_data *cpu_caps);
+
+int cap_convert_nscap(struct mnt_idmap *idmap, struct dentry *dentry,
+ const void **ivalue, size_t size);
#endif /* !_LINUX_CAPABILITY_H */
diff --git a/include/linux/cb710.h b/include/linux/cb710.h
index 8cc10411bab2..405657a9a0d5 100644
--- a/include/linux/cb710.h
+++ b/include/linux/cb710.h
@@ -1,11 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* cb710/cb710.h
*
* Copyright by Michał Mirosław, 2008-2009
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef LINUX_CB710_DRIVER_H
#define LINUX_CB710_DRIVER_H
@@ -39,7 +36,7 @@ struct cb710_chip {
unsigned slot_mask;
unsigned slots;
spinlock_t irq_lock;
- struct cb710_slot slot[0];
+ struct cb710_slot slot[];
};
/* NOTE: cb710_chip.slots is modified only during device init/exit and
@@ -129,10 +126,6 @@ void cb710_dump_regs(struct cb710_chip *chip, unsigned dump);
* cb710/sgbuf2.h
*
* Copyright by Michał Mirosław, 2008-2009
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef LINUX_CB710_SG_H
#define LINUX_CB710_SG_H
diff --git a/include/linux/cc_platform.h b/include/linux/cc_platform.h
new file mode 100644
index 000000000000..559353ad64ac
--- /dev/null
+++ b/include/linux/cc_platform.h
@@ -0,0 +1,135 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Confidential Computing Platform Capability checks
+ *
+ * Copyright (C) 2021 Advanced Micro Devices, Inc.
+ *
+ * Author: Tom Lendacky <thomas.lendacky@amd.com>
+ */
+
+#ifndef _LINUX_CC_PLATFORM_H
+#define _LINUX_CC_PLATFORM_H
+
+#include <linux/types.h>
+#include <linux/stddef.h>
+
+/**
+ * enum cc_attr - Confidential computing attributes
+ *
+ * These attributes represent confidential computing features that are
+ * currently active.
+ */
+enum cc_attr {
+ /**
+ * @CC_ATTR_MEM_ENCRYPT: Memory encryption is active
+ *
+ * The platform/OS is running with active memory encryption. This
+ * includes running either as a bare-metal system or a hypervisor
+ * and actively using memory encryption or as a guest/virtual machine
+ * and actively using memory encryption.
+ *
+ * Examples include SME, SEV and SEV-ES.
+ */
+ CC_ATTR_MEM_ENCRYPT,
+
+ /**
+ * @CC_ATTR_HOST_MEM_ENCRYPT: Host memory encryption is active
+ *
+ * The platform/OS is running as a bare-metal system or a hypervisor
+ * and actively using memory encryption.
+ *
+ * Examples include SME.
+ */
+ CC_ATTR_HOST_MEM_ENCRYPT,
+
+ /**
+ * @CC_ATTR_GUEST_MEM_ENCRYPT: Guest memory encryption is active
+ *
+ * The platform/OS is running as a guest/virtual machine and actively
+ * using memory encryption.
+ *
+ * Examples include SEV and SEV-ES.
+ */
+ CC_ATTR_GUEST_MEM_ENCRYPT,
+
+ /**
+ * @CC_ATTR_GUEST_STATE_ENCRYPT: Guest state encryption is active
+ *
+ * The platform/OS is running as a guest/virtual machine and actively
+ * using memory encryption and register state encryption.
+ *
+ * Examples include SEV-ES.
+ */
+ CC_ATTR_GUEST_STATE_ENCRYPT,
+
+ /**
+ * @CC_ATTR_GUEST_UNROLL_STRING_IO: String I/O is implemented with
+ * IN/OUT instructions
+ *
+ * The platform/OS is running as a guest/virtual machine and uses
+ * IN/OUT instructions in place of string I/O.
+ *
+ * Examples include TDX guest & SEV.
+ */
+ CC_ATTR_GUEST_UNROLL_STRING_IO,
+
+ /**
+ * @CC_ATTR_GUEST_SEV_SNP: Guest SNP is active.
+ *
+ * The platform/OS is running as a guest/virtual machine and actively
+ * using AMD SEV-SNP features.
+ */
+ CC_ATTR_GUEST_SEV_SNP,
+
+ /**
+ * @CC_ATTR_GUEST_SNP_SECURE_TSC: SNP Secure TSC is active.
+ *
+ * The platform/OS is running as a guest/virtual machine and actively
+ * using AMD SEV-SNP Secure TSC feature.
+ */
+ CC_ATTR_GUEST_SNP_SECURE_TSC,
+
+ /**
+ * @CC_ATTR_HOST_SEV_SNP: AMD SNP enabled on the host.
+ *
+ * The host kernel is running with the necessary features
+ * enabled to run SEV-SNP guests.
+ */
+ CC_ATTR_HOST_SEV_SNP,
+
+ /**
+ * @CC_ATTR_SNP_SECURE_AVIC: Secure AVIC mode is active.
+ *
+ * The host kernel is running with the necessary features enabled
+ * to run SEV-SNP guests with full Secure AVIC capabilities.
+ */
+ CC_ATTR_SNP_SECURE_AVIC,
+};
+
+#ifdef CONFIG_ARCH_HAS_CC_PLATFORM
+
+/**
+ * cc_platform_has() - Checks if the specified cc_attr attribute is active
+ * @attr: Confidential computing attribute to check
+ *
+ * The cc_platform_has() function will return an indicator as to whether the
+ * specified Confidential Computing attribute is currently active.
+ *
+ * Context: Any context
+ * Return:
+ * * TRUE - Specified Confidential Computing attribute is active
+ * * FALSE - Specified Confidential Computing attribute is not active
+ */
+bool cc_platform_has(enum cc_attr attr);
+void cc_platform_set(enum cc_attr attr);
+void cc_platform_clear(enum cc_attr attr);
+
+#else /* !CONFIG_ARCH_HAS_CC_PLATFORM */
+
+static inline bool cc_platform_has(enum cc_attr attr) { return false; }
+static inline void cc_platform_set(enum cc_attr attr) { }
+static inline void cc_platform_clear(enum cc_attr attr) { }
+
+#endif /* CONFIG_ARCH_HAS_CC_PLATFORM */
+
+#endif /* _LINUX_CC_PLATFORM_H */
diff --git a/include/linux/cciss_ioctl.h b/include/linux/cciss_ioctl.h
index 84b6e2d0f44d..1d5229200a71 100644
--- a/include/linux/cciss_ioctl.h
+++ b/include/linux/cciss_ioctl.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef CCISS_IOCTLH
#define CCISS_IOCTLH
diff --git a/include/linux/ccp.h b/include/linux/ccp.h
index 3285c944194a..868924dec5a1 100644
--- a/include/linux/ccp.h
+++ b/include/linux/ccp.h
@@ -1,14 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* AMD Cryptographic Coprocessor (CCP) driver
*
- * Copyright (C) 2013,2016 Advanced Micro Devices, Inc.
+ * Copyright (C) 2013,2017 Advanced Micro Devices, Inc.
*
* Author: Tom Lendacky <thomas.lendacky@amd.com>
* Author: Gary R Hook <gary.hook@amd.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef __CCP_H__
@@ -18,14 +15,13 @@
#include <linux/workqueue.h>
#include <linux/list.h>
#include <crypto/aes.h>
-#include <crypto/sha.h>
-
+#include <crypto/sha1.h>
+#include <crypto/sha2.h>
struct ccp_device;
struct ccp_cmd;
-#if defined(CONFIG_CRYPTO_DEV_CCP_DD) || \
- defined(CONFIG_CRYPTO_DEV_CCP_DD_MODULE)
+#if defined(CONFIG_CRYPTO_DEV_SP_CCP)
/**
* ccp_present - check if a CCP device is present
@@ -71,7 +67,7 @@ unsigned int ccp_version(void);
*/
int ccp_enqueue_cmd(struct ccp_cmd *cmd);
-#else /* CONFIG_CRYPTO_DEV_CCP_DD is not enabled */
+#else /* CONFIG_CRYPTO_DEV_CCP_SP_DEV is not enabled */
static inline int ccp_present(void)
{
@@ -88,7 +84,7 @@ static inline int ccp_enqueue_cmd(struct ccp_cmd *cmd)
return -ENODEV;
}
-#endif /* CONFIG_CRYPTO_DEV_CCP_DD */
+#endif /* CONFIG_CRYPTO_DEV_SP_CCP */
/***** AES engine *****/
@@ -175,6 +171,8 @@ struct ccp_aes_engine {
enum ccp_aes_mode mode;
enum ccp_aes_action action;
+ u32 authsize;
+
struct scatterlist *key;
u32 key_len; /* In bytes */
@@ -231,6 +229,7 @@ enum ccp_xts_aes_unit_size {
* AES operation the new IV overwrites the old IV.
*/
struct ccp_xts_aes_engine {
+ enum ccp_aes_type type;
enum ccp_aes_action action;
enum ccp_xts_aes_unit_size unit_size;
diff --git a/include/linux/cdev.h b/include/linux/cdev.h
index cb28eb21e3ca..0e8cd6293deb 100644
--- a/include/linux/cdev.h
+++ b/include/linux/cdev.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_CDEV_H
#define _LINUX_CDEV_H
diff --git a/include/linux/cdrom.h b/include/linux/cdrom.h
index 6e8f209a6dff..b907e6c2307d 100644
--- a/include/linux/cdrom.h
+++ b/include/linux/cdrom.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* -- <linux/cdrom.h>
* General header file for linux CD-ROM drivers
@@ -12,6 +13,8 @@
#include <linux/fs.h> /* not really needed, later.. */
#include <linux/list.h>
+#include <linux/blkdev.h>
+#include <scsi/scsi_common.h>
#include <uapi/linux/cdrom.h>
struct packet_command
@@ -20,7 +23,7 @@ struct packet_command
unsigned char *buffer;
unsigned int buflen;
int stat;
- struct request_sense *sense;
+ struct scsi_sense_hdr *sshdr;
unsigned char data_direction;
int quiet;
int timeout;
@@ -59,9 +62,9 @@ struct cdrom_device_info {
__u8 last_sense;
__u8 media_written; /* dirty flag, DVD+RW bookkeeping */
unsigned short mmc3_profile; /* current MMC3 profile */
- int for_data;
- int (*exit)(struct cdrom_device_info *);
int mrw_mode_page;
+ bool opened_for_data;
+ __s64 last_media_change_ms;
};
struct cdrom_device_ops {
@@ -71,11 +74,9 @@ struct cdrom_device_ops {
int (*drive_status) (struct cdrom_device_info *, int);
unsigned int (*check_events) (struct cdrom_device_info *cdi,
unsigned int clearing, int slot);
- int (*media_changed) (struct cdrom_device_info *, int);
int (*tray_move) (struct cdrom_device_info *, int);
int (*lock_door) (struct cdrom_device_info *, int);
- int (*select_speed) (struct cdrom_device_info *, int);
- int (*select_disc) (struct cdrom_device_info *, int);
+ int (*select_speed) (struct cdrom_device_info *, unsigned long);
int (*get_last_session) (struct cdrom_device_info *,
struct cdrom_multisession *);
int (*get_mcn) (struct cdrom_device_info *,
@@ -85,24 +86,29 @@ struct cdrom_device_ops {
/* play stuff */
int (*audio_ioctl) (struct cdrom_device_info *,unsigned int, void *);
-/* driver specifications */
- const int capability; /* capability flags */
/* handle uniform packets for scsi type devices (scsi,atapi) */
int (*generic_packet) (struct cdrom_device_info *,
struct packet_command *);
+ int (*read_cdda_bpc)(struct cdrom_device_info *cdi, void __user *ubuf,
+ u32 lba, u32 nframes, u8 *last_sense);
+/* driver specifications */
+ const int capability; /* capability flags */
};
+int cdrom_multisession(struct cdrom_device_info *cdi,
+ struct cdrom_multisession *info);
+int cdrom_read_tocentry(struct cdrom_device_info *cdi,
+ struct cdrom_tocentry *entry);
+
/* the general block_device operations structure: */
-extern int cdrom_open(struct cdrom_device_info *cdi, struct block_device *bdev,
- fmode_t mode);
-extern void cdrom_release(struct cdrom_device_info *cdi, fmode_t mode);
-extern int cdrom_ioctl(struct cdrom_device_info *cdi, struct block_device *bdev,
- fmode_t mode, unsigned int cmd, unsigned long arg);
+int cdrom_open(struct cdrom_device_info *cdi, blk_mode_t mode);
+void cdrom_release(struct cdrom_device_info *cdi);
+int cdrom_ioctl(struct cdrom_device_info *cdi, struct block_device *bdev,
+ unsigned int cmd, unsigned long arg);
extern unsigned int cdrom_check_events(struct cdrom_device_info *cdi,
unsigned int clearing);
-extern int cdrom_media_changed(struct cdrom_device_info *);
-extern int register_cdrom(struct cdrom_device_info *cdi);
+extern int register_cdrom(struct gendisk *disk, struct cdrom_device_info *cdi);
extern void unregister_cdrom(struct cdrom_device_info *cdi);
typedef struct {
diff --git a/include/linux/cdx/bitfield.h b/include/linux/cdx/bitfield.h
new file mode 100644
index 000000000000..567f8ec47582
--- /dev/null
+++ b/include/linux/cdx/bitfield.h
@@ -0,0 +1,90 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2005-2006 Fen Systems Ltd.
+ * Copyright 2006-2013 Solarflare Communications Inc.
+ * Copyright (C) 2022-2023, Advanced Micro Devices, Inc.
+ */
+
+#ifndef CDX_BITFIELD_H
+#define CDX_BITFIELD_H
+
+#include <linux/bitfield.h>
+
+/* Lowest bit numbers and widths */
+#define CDX_DWORD_LBN 0
+#define CDX_DWORD_WIDTH 32
+
+/* Specified attribute (e.g. LBN) of the specified field */
+#define CDX_VAL(field, attribute) field ## _ ## attribute
+/* Low bit number of the specified field */
+#define CDX_LOW_BIT(field) CDX_VAL(field, LBN)
+/* Bit width of the specified field */
+#define CDX_WIDTH(field) CDX_VAL(field, WIDTH)
+/* High bit number of the specified field */
+#define CDX_HIGH_BIT(field) (CDX_LOW_BIT(field) + CDX_WIDTH(field) - 1)
+
+/* A doubleword (i.e. 4 byte) datatype - little-endian in HW */
+struct cdx_dword {
+ __le32 cdx_u32;
+};
+
+/* Value expanders for printk */
+#define CDX_DWORD_VAL(dword) \
+ ((unsigned int)le32_to_cpu((dword).cdx_u32))
+
+/*
+ * Extract bit field portion [low,high) from the 32-bit little-endian
+ * element which contains bits [min,max)
+ */
+#define CDX_DWORD_FIELD(dword, field) \
+ (FIELD_GET(GENMASK(CDX_HIGH_BIT(field), CDX_LOW_BIT(field)), \
+ le32_to_cpu((dword).cdx_u32)))
+
+/*
+ * Creates the portion of the named bit field that lies within the
+ * range [min,max).
+ */
+#define CDX_INSERT_FIELD(field, value) \
+ (FIELD_PREP(GENMASK(CDX_HIGH_BIT(field), \
+ CDX_LOW_BIT(field)), value))
+
+/*
+ * Creates the portion of the named bit fields that lie within the
+ * range [min,max).
+ */
+#define CDX_INSERT_FIELDS(field1, value1, \
+ field2, value2, \
+ field3, value3, \
+ field4, value4, \
+ field5, value5, \
+ field6, value6, \
+ field7, value7) \
+ (CDX_INSERT_FIELD(field1, (value1)) | \
+ CDX_INSERT_FIELD(field2, (value2)) | \
+ CDX_INSERT_FIELD(field3, (value3)) | \
+ CDX_INSERT_FIELD(field4, (value4)) | \
+ CDX_INSERT_FIELD(field5, (value5)) | \
+ CDX_INSERT_FIELD(field6, (value6)) | \
+ CDX_INSERT_FIELD(field7, (value7)))
+
+#define CDX_POPULATE_DWORD(dword, ...) \
+ (dword).cdx_u32 = cpu_to_le32(CDX_INSERT_FIELDS(__VA_ARGS__))
+
+/* Populate a dword field with various numbers of arguments */
+#define CDX_POPULATE_DWORD_7 CDX_POPULATE_DWORD
+#define CDX_POPULATE_DWORD_6(dword, ...) \
+ CDX_POPULATE_DWORD_7(dword, CDX_DWORD, 0, __VA_ARGS__)
+#define CDX_POPULATE_DWORD_5(dword, ...) \
+ CDX_POPULATE_DWORD_6(dword, CDX_DWORD, 0, __VA_ARGS__)
+#define CDX_POPULATE_DWORD_4(dword, ...) \
+ CDX_POPULATE_DWORD_5(dword, CDX_DWORD, 0, __VA_ARGS__)
+#define CDX_POPULATE_DWORD_3(dword, ...) \
+ CDX_POPULATE_DWORD_4(dword, CDX_DWORD, 0, __VA_ARGS__)
+#define CDX_POPULATE_DWORD_2(dword, ...) \
+ CDX_POPULATE_DWORD_3(dword, CDX_DWORD, 0, __VA_ARGS__)
+#define CDX_POPULATE_DWORD_1(dword, ...) \
+ CDX_POPULATE_DWORD_2(dword, CDX_DWORD, 0, __VA_ARGS__)
+#define CDX_SET_DWORD(dword) \
+ CDX_POPULATE_DWORD_1(dword, CDX_DWORD, 0xffffffff)
+
+#endif /* CDX_BITFIELD_H */
diff --git a/include/linux/cdx/cdx_bus.h b/include/linux/cdx/cdx_bus.h
new file mode 100644
index 000000000000..b1ba97f6c9ad
--- /dev/null
+++ b/include/linux/cdx/cdx_bus.h
@@ -0,0 +1,291 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * CDX bus public interface
+ *
+ * Copyright (C) 2022-2023, Advanced Micro Devices, Inc.
+ *
+ */
+
+#ifndef _CDX_BUS_H_
+#define _CDX_BUS_H_
+
+#include <linux/device.h>
+#include <linux/list.h>
+#include <linux/mod_devicetable.h>
+#include <linux/msi.h>
+
+#define MAX_CDX_DEV_RESOURCES 4
+#define CDX_CONTROLLER_ID_SHIFT 4
+#define CDX_BUS_NUM_MASK 0xF
+
+/* Forward declaration for CDX controller */
+struct cdx_controller;
+
+enum {
+ CDX_DEV_MSI_CONF,
+ CDX_DEV_BUS_MASTER_CONF,
+ CDX_DEV_RESET_CONF,
+ CDX_DEV_MSI_ENABLE,
+};
+
+struct cdx_msi_config {
+ u64 addr;
+ u32 data;
+ u16 msi_index;
+};
+
+struct cdx_device_config {
+ u8 type;
+ union {
+ struct cdx_msi_config msi;
+ bool bus_master_enable;
+ bool msi_enable;
+ };
+};
+
+typedef int (*cdx_bus_enable_cb)(struct cdx_controller *cdx, u8 bus_num);
+
+typedef int (*cdx_bus_disable_cb)(struct cdx_controller *cdx, u8 bus_num);
+
+typedef int (*cdx_scan_cb)(struct cdx_controller *cdx);
+
+typedef int (*cdx_dev_configure_cb)(struct cdx_controller *cdx,
+ u8 bus_num, u8 dev_num,
+ struct cdx_device_config *dev_config);
+
+/**
+ * CDX_DEVICE - macro used to describe a specific CDX device
+ * @vend: the 16 bit CDX Vendor ID
+ * @dev: the 16 bit CDX Device ID
+ *
+ * This macro is used to create a struct cdx_device_id that matches a
+ * specific device. The subvendor and subdevice fields will be set to
+ * CDX_ANY_ID.
+ */
+#define CDX_DEVICE(vend, dev) \
+ .vendor = (vend), .device = (dev), \
+ .subvendor = CDX_ANY_ID, .subdevice = CDX_ANY_ID
+
+/**
+ * CDX_DEVICE_DRIVER_OVERRIDE - macro used to describe a CDX device with
+ * override_only flags.
+ * @vend: the 16 bit CDX Vendor ID
+ * @dev: the 16 bit CDX Device ID
+ * @driver_override: the 32 bit CDX Device override_only
+ *
+ * This macro is used to create a struct cdx_device_id that matches only a
+ * driver_override device. The subvendor and subdevice fields will be set to
+ * CDX_ANY_ID.
+ */
+#define CDX_DEVICE_DRIVER_OVERRIDE(vend, dev, driver_override) \
+ .vendor = (vend), .device = (dev), .subvendor = CDX_ANY_ID,\
+ .subdevice = CDX_ANY_ID, .override_only = (driver_override)
+
+/**
+ * struct cdx_ops - Callbacks supported by CDX controller.
+ * @bus_enable: enable bus on the controller
+ * @bus_disable: disable bus on the controller
+ * @scan: scan the devices on the controller
+ * @dev_configure: configuration like reset, master_enable,
+ * msi_config etc for a CDX device
+ */
+struct cdx_ops {
+ cdx_bus_enable_cb bus_enable;
+ cdx_bus_disable_cb bus_disable;
+ cdx_scan_cb scan;
+ cdx_dev_configure_cb dev_configure;
+};
+
+/**
+ * struct cdx_controller: CDX controller object
+ * @dev: Linux device associated with the CDX controller.
+ * @priv: private data
+ * @msi_domain: MSI domain
+ * @id: Controller ID
+ * @controller_registered: controller registered with bus
+ * @ops: CDX controller ops
+ */
+struct cdx_controller {
+ struct device *dev;
+ void *priv;
+ struct irq_domain *msi_domain;
+ u32 id;
+ bool controller_registered;
+ struct cdx_ops *ops;
+};
+
+/**
+ * struct cdx_device - CDX device object
+ * @dev: Linux driver model device object
+ * @cdx: CDX controller associated with the device
+ * @vendor: Vendor ID for CDX device
+ * @device: Device ID for CDX device
+ * @subsystem_vendor: Subsystem Vendor ID for CDX device
+ * @subsystem_device: Subsystem Device ID for CDX device
+ * @class: Class for the CDX device
+ * @revision: Revision of the CDX device
+ * @bus_num: Bus number for this CDX device
+ * @dev_num: Device number for this device
+ * @res: array of MMIO region entries
+ * @res_attr: resource binary attribute
+ * @debugfs_dir: debugfs directory for this device
+ * @res_count: number of valid MMIO regions
+ * @dma_mask: Default DMA mask
+ * @flags: CDX device flags
+ * @req_id: Requestor ID associated with CDX device
+ * @is_bus: Is this bus device
+ * @enabled: is this bus enabled
+ * @msi_dev_id: MSI Device ID associated with CDX device
+ * @num_msi: Number of MSI's supported by the device
+ * @driver_override: driver name to force a match; do not set directly,
+ * because core frees it; use driver_set_override() to
+ * set or clear it.
+ * @irqchip_lock: lock to synchronize irq/msi configuration
+ * @msi_write_pending: MSI write pending for this device
+ */
+struct cdx_device {
+ struct device dev;
+ struct cdx_controller *cdx;
+ u16 vendor;
+ u16 device;
+ u16 subsystem_vendor;
+ u16 subsystem_device;
+ u32 class;
+ u8 revision;
+ u8 bus_num;
+ u8 dev_num;
+ struct resource res[MAX_CDX_DEV_RESOURCES];
+ struct bin_attribute *res_attr[MAX_CDX_DEV_RESOURCES];
+ struct dentry *debugfs_dir;
+ u8 res_count;
+ u64 dma_mask;
+ u16 flags;
+ u32 req_id;
+ bool is_bus;
+ bool enabled;
+ u32 msi_dev_id;
+ u32 num_msi;
+ const char *driver_override;
+ struct mutex irqchip_lock;
+ bool msi_write_pending;
+};
+
+#define to_cdx_device(_dev) \
+ container_of(_dev, struct cdx_device, dev)
+
+#define cdx_resource_start(dev, num) ((dev)->res[(num)].start)
+#define cdx_resource_end(dev, num) ((dev)->res[(num)].end)
+#define cdx_resource_flags(dev, num) ((dev)->res[(num)].flags)
+#define cdx_resource_len(dev, num) \
+ ((cdx_resource_start((dev), (num)) == 0 && \
+ cdx_resource_end((dev), (num)) == \
+ cdx_resource_start((dev), (num))) ? 0 : \
+ (cdx_resource_end((dev), (num)) - \
+ cdx_resource_start((dev), (num)) + 1))
+/**
+ * struct cdx_driver - CDX device driver
+ * @driver: Generic device driver
+ * @match_id_table: table of supported device matching Ids
+ * @probe: Function called when a device is added
+ * @remove: Function called when a device is removed
+ * @shutdown: Function called at shutdown time to quiesce the device
+ * @reset_prepare: Function called before is reset to notify driver
+ * @reset_done: Function called after reset is complete to notify driver
+ * @driver_managed_dma: Device driver doesn't use kernel DMA API for DMA.
+ * For most device drivers, no need to care about this flag
+ * as long as all DMAs are handled through the kernel DMA API.
+ * For some special ones, for example VFIO drivers, they know
+ * how to manage the DMA themselves and set this flag so that
+ * the IOMMU layer will allow them to setup and manage their
+ * own I/O address space.
+ */
+struct cdx_driver {
+ struct device_driver driver;
+ const struct cdx_device_id *match_id_table;
+ int (*probe)(struct cdx_device *dev);
+ int (*remove)(struct cdx_device *dev);
+ void (*shutdown)(struct cdx_device *dev);
+ void (*reset_prepare)(struct cdx_device *dev);
+ void (*reset_done)(struct cdx_device *dev);
+ bool driver_managed_dma;
+};
+
+#define to_cdx_driver(_drv) \
+ container_of_const(_drv, struct cdx_driver, driver)
+
+/* Macro to avoid include chaining to get THIS_MODULE */
+#define cdx_driver_register(drv) \
+ __cdx_driver_register(drv, THIS_MODULE)
+
+/**
+ * __cdx_driver_register - registers a CDX device driver
+ * @cdx_driver: CDX driver to register
+ * @owner: module owner
+ *
+ * Return: -errno on failure, 0 on success.
+ */
+int __must_check __cdx_driver_register(struct cdx_driver *cdx_driver,
+ struct module *owner);
+
+/**
+ * cdx_driver_unregister - unregisters a device driver from the
+ * CDX bus.
+ * @cdx_driver: CDX driver to register
+ */
+void cdx_driver_unregister(struct cdx_driver *cdx_driver);
+
+extern const struct bus_type cdx_bus_type;
+
+/**
+ * cdx_dev_reset - Reset CDX device
+ * @dev: device pointer
+ *
+ * Return: 0 for success, -errno on failure
+ */
+int cdx_dev_reset(struct device *dev);
+
+/**
+ * cdx_set_master - enables bus-mastering for CDX device
+ * @cdx_dev: the CDX device to enable
+ *
+ * Return: 0 for success, -errno on failure
+ */
+int cdx_set_master(struct cdx_device *cdx_dev);
+
+/**
+ * cdx_clear_master - disables bus-mastering for CDX device
+ * @cdx_dev: the CDX device to disable
+ *
+ * Return: 0 for success, -errno on failure
+ */
+int cdx_clear_master(struct cdx_device *cdx_dev);
+
+#ifdef CONFIG_GENERIC_MSI_IRQ
+/**
+ * cdx_enable_msi - Enable MSI for the CDX device.
+ * @cdx_dev: device pointer
+ *
+ * Return: 0 for success, -errno on failure
+ */
+int cdx_enable_msi(struct cdx_device *cdx_dev);
+
+/**
+ * cdx_disable_msi - Disable MSI for the CDX device.
+ * @cdx_dev: device pointer
+ */
+void cdx_disable_msi(struct cdx_device *cdx_dev);
+
+#else /* CONFIG_GENERIC_MSI_IRQ */
+
+static inline int cdx_enable_msi(struct cdx_device *cdx_dev)
+{
+ return -ENODEV;
+}
+
+static inline void cdx_disable_msi(struct cdx_device *cdx_dev)
+{
+}
+
+#endif /* CONFIG_GENERIC_MSI_IRQ */
+
+#endif /* _CDX_BUS_H_ */
diff --git a/include/linux/cdx/edac_cdx_pcol.h b/include/linux/cdx/edac_cdx_pcol.h
new file mode 100644
index 000000000000..749db33bb482
--- /dev/null
+++ b/include/linux/cdx/edac_cdx_pcol.h
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Driver for AMD network controllers and boards
+ *
+ * Copyright (C) 2021, Xilinx, Inc.
+ * Copyright (C) 2022-2023, Advanced Micro Devices, Inc.
+ */
+
+#ifndef MC_CDX_PCOL_H
+#define MC_CDX_PCOL_H
+#include <linux/cdx/mcdi.h>
+
+#define MC_CMD_EDAC_GET_DDR_CONFIG_OUT_WORD_LENGTH_LEN 4
+/* Number of registers for the DDR controller */
+#define MC_CMD_GET_DDR_CONFIG_OFST 4
+#define MC_CMD_GET_DDR_CONFIG_LEN 4
+
+/***********************************/
+/* MC_CMD_EDAC_GET_DDR_CONFIG
+ * Provides detailed configuration for the DDR controller of the given index.
+ */
+#define MC_CMD_EDAC_GET_DDR_CONFIG 0x3
+
+/* MC_CMD_EDAC_GET_DDR_CONFIG_IN msgrequest */
+#define MC_CMD_EDAC_GET_DDR_CONFIG_IN_CONTROLLER_INDEX_OFST 0
+#define MC_CMD_EDAC_GET_DDR_CONFIG_IN_CONTROLLER_INDEX_LEN 4
+
+#endif /* MC_CDX_PCOL_H */
diff --git a/include/linux/cdx/mcdi.h b/include/linux/cdx/mcdi.h
new file mode 100644
index 000000000000..74075305cba4
--- /dev/null
+++ b/include/linux/cdx/mcdi.h
@@ -0,0 +1,199 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2008-2013 Solarflare Communications Inc.
+ * Copyright (C) 2022-2023, Advanced Micro Devices, Inc.
+ */
+
+#ifndef CDX_MCDI_H
+#define CDX_MCDI_H
+
+#include <linux/mutex.h>
+#include <linux/kref.h>
+#include <linux/rpmsg.h>
+
+#include "linux/cdx/bitfield.h"
+
+/**
+ * enum cdx_mcdi_mode - MCDI transaction mode
+ * @MCDI_MODE_EVENTS: wait for an mcdi response callback.
+ * @MCDI_MODE_FAIL: we think MCDI is dead, so fail-fast all calls
+ */
+enum cdx_mcdi_mode {
+ MCDI_MODE_EVENTS,
+ MCDI_MODE_FAIL,
+};
+
+#define MCDI_RPC_TIMEOUT (10 * HZ)
+#define MCDI_RPC_LONG_TIMEOU (60 * HZ)
+#define MCDI_RPC_POST_RST_TIME (10 * HZ)
+
+/**
+ * enum cdx_mcdi_cmd_state - State for an individual MCDI command
+ * @MCDI_STATE_QUEUED: Command not started and is waiting to run.
+ * @MCDI_STATE_RETRY: Command was submitted and MC rejected with no resources,
+ * as MC have too many outstanding commands. Command will be retried once
+ * another command returns.
+ * @MCDI_STATE_RUNNING: Command was accepted and is running.
+ * @MCDI_STATE_RUNNING_CANCELLED: Command is running but the issuer cancelled
+ * the command.
+ * @MCDI_STATE_FINISHED: Processing of this command has completed.
+ */
+
+enum cdx_mcdi_cmd_state {
+ MCDI_STATE_QUEUED,
+ MCDI_STATE_RETRY,
+ MCDI_STATE_RUNNING,
+ MCDI_STATE_RUNNING_CANCELLED,
+ MCDI_STATE_FINISHED,
+};
+
+/**
+ * struct cdx_mcdi - CDX MCDI Firmware interface, to interact
+ * with CDX controller.
+ * @mcdi: MCDI interface
+ * @mcdi_ops: MCDI operations
+ * @r5_rproc : R5 Remoteproc device handle
+ * @rpdev: RPMsg device
+ * @ept: RPMsg endpoint
+ * @work: Post probe work
+ */
+struct cdx_mcdi {
+ /* MCDI interface */
+ struct cdx_mcdi_data *mcdi;
+ const struct cdx_mcdi_ops *mcdi_ops;
+
+ struct rproc *r5_rproc;
+ struct rpmsg_device *rpdev;
+ struct rpmsg_endpoint *ept;
+ struct work_struct work;
+};
+
+struct cdx_mcdi_ops {
+ void (*mcdi_request)(struct cdx_mcdi *cdx,
+ const struct cdx_dword *hdr, size_t hdr_len,
+ const struct cdx_dword *sdu, size_t sdu_len);
+ unsigned int (*mcdi_rpc_timeout)(struct cdx_mcdi *cdx, unsigned int cmd);
+};
+
+typedef void cdx_mcdi_async_completer(struct cdx_mcdi *cdx,
+ unsigned long cookie, int rc,
+ struct cdx_dword *outbuf,
+ size_t outlen_actual);
+
+/**
+ * struct cdx_mcdi_cmd - An outstanding MCDI command
+ * @ref: Reference count. There will be one reference if the command is
+ * in the mcdi_iface cmd_list, another if it's on a cleanup list,
+ * and a third if it's queued in the work queue.
+ * @list: The data for this entry in mcdi->cmd_list
+ * @cleanup_list: The data for this entry in a cleanup list
+ * @work: The work item for this command, queued in mcdi->workqueue
+ * @mcdi: The mcdi_iface for this command
+ * @state: The state of this command
+ * @inlen: inbuf length
+ * @inbuf: Input buffer
+ * @quiet: Whether to silence errors
+ * @reboot_seen: Whether a reboot has been seen during this command,
+ * to prevent duplicates
+ * @seq: Sequence number
+ * @started: Jiffies this command was started at
+ * @cookie: Context for completion function
+ * @completer: Completion function
+ * @handle: Command handle
+ * @cmd: Command number
+ * @rc: Return code
+ * @outlen: Length of output buffer
+ * @outbuf: Output buffer
+ */
+struct cdx_mcdi_cmd {
+ struct kref ref;
+ struct list_head list;
+ struct list_head cleanup_list;
+ struct work_struct work;
+ struct cdx_mcdi_iface *mcdi;
+ enum cdx_mcdi_cmd_state state;
+ size_t inlen;
+ const struct cdx_dword *inbuf;
+ bool quiet;
+ bool reboot_seen;
+ u8 seq;
+ unsigned long started;
+ unsigned long cookie;
+ cdx_mcdi_async_completer *completer;
+ unsigned int handle;
+ unsigned int cmd;
+ int rc;
+ size_t outlen;
+ struct cdx_dword *outbuf;
+ /* followed by inbuf data if necessary */
+};
+
+/**
+ * struct cdx_mcdi_iface - MCDI protocol context
+ * @cdx: The associated NIC
+ * @iface_lock: Serialise access to this structure
+ * @outstanding_cleanups: Count of cleanups
+ * @cmd_list: List of outstanding and running commands
+ * @workqueue: Workqueue used for delayed processing
+ * @cmd_complete_wq: Waitqueue for command completion
+ * @db_held_by: Command the MC doorbell is in use by
+ * @seq_held_by: Command each sequence number is in use by
+ * @prev_handle: The last used command handle
+ * @mode: Poll for mcdi completion, or wait for an mcdi_event
+ * @prev_seq: The last used sequence number
+ * @new_epoch: Indicates start of day or start of MC reboot recovery
+ */
+struct cdx_mcdi_iface {
+ struct cdx_mcdi *cdx;
+ /* Serialise access */
+ struct mutex iface_lock;
+ unsigned int outstanding_cleanups;
+ struct list_head cmd_list;
+ struct workqueue_struct *workqueue;
+ wait_queue_head_t cmd_complete_wq;
+ struct cdx_mcdi_cmd *db_held_by;
+ struct cdx_mcdi_cmd *seq_held_by[16];
+ unsigned int prev_handle;
+ enum cdx_mcdi_mode mode;
+ u8 prev_seq;
+ bool new_epoch;
+};
+
+/**
+ * struct cdx_mcdi_data - extra state for NICs that implement MCDI
+ * @iface: Interface/protocol state
+ * @fn_flags: Flags for this function, as returned by %MC_CMD_DRV_ATTACH.
+ */
+struct cdx_mcdi_data {
+ struct cdx_mcdi_iface iface;
+ u32 fn_flags;
+};
+
+void cdx_mcdi_finish(struct cdx_mcdi *cdx);
+int cdx_mcdi_init(struct cdx_mcdi *cdx);
+void cdx_mcdi_process_cmd(struct cdx_mcdi *cdx, struct cdx_dword *outbuf, int len);
+int cdx_mcdi_rpc(struct cdx_mcdi *cdx, unsigned int cmd,
+ const struct cdx_dword *inbuf, size_t inlen,
+ struct cdx_dword *outbuf, size_t outlen, size_t *outlen_actual);
+
+/*
+ * We expect that 16- and 32-bit fields in MCDI requests and responses
+ * are appropriately aligned, but 64-bit fields are only
+ * 32-bit-aligned.
+ */
+#define MCDI_DECLARE_BUF(_name, _len) struct cdx_dword _name[DIV_ROUND_UP(_len, 4)] = {{0}}
+#define _MCDI_PTR(_buf, _offset) \
+ ((u8 *)(_buf) + (_offset))
+#define MCDI_PTR(_buf, _field) \
+ _MCDI_PTR(_buf, MC_CMD_ ## _field ## _OFST)
+#define _MCDI_CHECK_ALIGN(_ofst, _align) \
+ ((void)BUILD_BUG_ON_ZERO((_ofst) & ((_align) - 1)), \
+ (_ofst))
+#define _MCDI_DWORD(_buf, _field) \
+ ((_buf) + (_MCDI_CHECK_ALIGN(MC_CMD_ ## _field ## _OFST, 4) >> 2))
+
+#define MCDI_SET_DWORD(_buf, _field, _value) \
+ CDX_POPULATE_DWORD_1(*_MCDI_DWORD(_buf, _field), CDX_DWORD, _value)
+#define MCDI_DWORD(_buf, _field) \
+ CDX_DWORD_FIELD(*_MCDI_DWORD(_buf, _field), CDX_DWORD)
+#endif /* CDX_MCDI_H */
diff --git a/include/linux/ceph/auth.h b/include/linux/ceph/auth.h
index a6747789fe5c..6b138fa97db8 100644
--- a/include/linux/ceph/auth.h
+++ b/include/linux/ceph/auth.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _FS_CEPH_AUTH_H
#define _FS_CEPH_AUTH_H
@@ -31,8 +32,6 @@ struct ceph_auth_handshake {
};
struct ceph_auth_client_ops {
- const char *name;
-
/*
* true if we are authenticated and can connect to
* services.
@@ -51,8 +50,10 @@ struct ceph_auth_client_ops {
* another request.
*/
int (*build_request)(struct ceph_auth_client *ac, void *buf, void *end);
- int (*handle_reply)(struct ceph_auth_client *ac, int result,
- void *buf, void *end);
+ int (*handle_reply)(struct ceph_auth_client *ac, u64 global_id,
+ void *buf, void *end, u8 *session_key,
+ int *session_key_len, u8 *con_secret,
+ int *con_secret_len);
/*
* Create authorizer for connecting to a service, and verify
@@ -63,8 +64,15 @@ struct ceph_auth_client_ops {
/* ensure that an existing authorizer is up to date */
int (*update_authorizer)(struct ceph_auth_client *ac, int peer_type,
struct ceph_auth_handshake *auth);
+ int (*add_authorizer_challenge)(struct ceph_auth_client *ac,
+ struct ceph_authorizer *a,
+ void *challenge_buf,
+ int challenge_buf_len);
int (*verify_authorizer_reply)(struct ceph_auth_client *ac,
- struct ceph_authorizer *a);
+ struct ceph_authorizer *a,
+ void *reply, int reply_len,
+ u8 *session_key, int *session_key_len,
+ u8 *con_secret, int *con_secret_len);
void (*invalidate_authorizer)(struct ceph_auth_client *ac,
int peer_type);
@@ -90,11 +98,17 @@ struct ceph_auth_client {
const struct ceph_crypto_key *key; /* our secret key */
unsigned want_keys; /* which services we want */
+ int preferred_mode; /* CEPH_CON_MODE_* */
+ int fallback_mode; /* ditto */
+
struct mutex mutex;
};
-extern struct ceph_auth_client *ceph_auth_init(const char *name,
- const struct ceph_crypto_key *key);
+void ceph_auth_set_global_id(struct ceph_auth_client *ac, u64 global_id);
+
+struct ceph_auth_client *ceph_auth_init(const char *name,
+ const struct ceph_crypto_key *key,
+ const int *con_modes);
extern void ceph_auth_destroy(struct ceph_auth_client *ac);
extern void ceph_auth_reset(struct ceph_auth_client *ac);
@@ -108,17 +122,22 @@ int ceph_auth_entity_name_encode(const char *name, void **p, void *end);
extern int ceph_build_auth(struct ceph_auth_client *ac,
void *msg_buf, size_t msg_len);
-
extern int ceph_auth_is_authenticated(struct ceph_auth_client *ac);
-extern int ceph_auth_create_authorizer(struct ceph_auth_client *ac,
- int peer_type,
- struct ceph_auth_handshake *auth);
+
+int __ceph_auth_get_authorizer(struct ceph_auth_client *ac,
+ struct ceph_auth_handshake *auth,
+ int peer_type, bool force_new,
+ int *proto, int *pref_mode, int *fallb_mode);
void ceph_auth_destroy_authorizer(struct ceph_authorizer *a);
-extern int ceph_auth_update_authorizer(struct ceph_auth_client *ac,
- int peer_type,
- struct ceph_auth_handshake *a);
-extern int ceph_auth_verify_authorizer_reply(struct ceph_auth_client *ac,
- struct ceph_authorizer *a);
+int ceph_auth_add_authorizer_challenge(struct ceph_auth_client *ac,
+ struct ceph_authorizer *a,
+ void *challenge_buf,
+ int challenge_buf_len);
+int ceph_auth_verify_authorizer_reply(struct ceph_auth_client *ac,
+ struct ceph_authorizer *a,
+ void *reply, int reply_len,
+ u8 *session_key, int *session_key_len,
+ u8 *con_secret, int *con_secret_len);
extern void ceph_auth_invalidate_authorizer(struct ceph_auth_client *ac,
int peer_type);
@@ -138,4 +157,34 @@ int ceph_auth_check_message_signature(struct ceph_auth_handshake *auth,
return auth->check_message_signature(auth, msg);
return 0;
}
+
+int ceph_auth_get_request(struct ceph_auth_client *ac, void *buf, int buf_len);
+int ceph_auth_handle_reply_more(struct ceph_auth_client *ac, void *reply,
+ int reply_len, void *buf, int buf_len);
+int ceph_auth_handle_reply_done(struct ceph_auth_client *ac,
+ u64 global_id, void *reply, int reply_len,
+ u8 *session_key, int *session_key_len,
+ u8 *con_secret, int *con_secret_len);
+bool ceph_auth_handle_bad_method(struct ceph_auth_client *ac,
+ int used_proto, int result,
+ const int *allowed_protos, int proto_cnt,
+ const int *allowed_modes, int mode_cnt);
+
+int ceph_auth_get_authorizer(struct ceph_auth_client *ac,
+ struct ceph_auth_handshake *auth,
+ int peer_type, void *buf, int *buf_len);
+int ceph_auth_handle_svc_reply_more(struct ceph_auth_client *ac,
+ struct ceph_auth_handshake *auth,
+ void *reply, int reply_len,
+ void *buf, int *buf_len);
+int ceph_auth_handle_svc_reply_done(struct ceph_auth_client *ac,
+ struct ceph_auth_handshake *auth,
+ void *reply, int reply_len,
+ u8 *session_key, int *session_key_len,
+ u8 *con_secret, int *con_secret_len);
+bool ceph_auth_handle_bad_authorizer(struct ceph_auth_client *ac,
+ int peer_type, int used_proto, int result,
+ const int *allowed_protos, int proto_cnt,
+ const int *allowed_modes, int mode_cnt);
+
#endif
diff --git a/include/linux/ceph/buffer.h b/include/linux/ceph/buffer.h
index 07ca15e76100..11cdc7c60480 100644
--- a/include/linux/ceph/buffer.h
+++ b/include/linux/ceph/buffer.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __FS_CEPH_BUFFER_H
#define __FS_CEPH_BUFFER_H
@@ -29,7 +30,8 @@ static inline struct ceph_buffer *ceph_buffer_get(struct ceph_buffer *b)
static inline void ceph_buffer_put(struct ceph_buffer *b)
{
- kref_put(&b->kref, ceph_buffer_release);
+ if (b)
+ kref_put(&b->kref, ceph_buffer_release);
}
extern int ceph_decode_buffer(struct ceph_buffer **b, void **p, void *end);
diff --git a/include/linux/ceph/ceph_debug.h b/include/linux/ceph/ceph_debug.h
index 51c5bd64bd00..5f904591fa5f 100644
--- a/include/linux/ceph/ceph_debug.h
+++ b/include/linux/ceph/ceph_debug.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _FS_CEPH_DEBUG_H
#define _FS_CEPH_DEBUG_H
@@ -18,12 +19,21 @@
pr_debug("%.*s %12.12s:%-4d : " fmt, \
8 - (int)sizeof(KBUILD_MODNAME), " ", \
kbasename(__FILE__), __LINE__, ##__VA_ARGS__)
+# define doutc(client, fmt, ...) \
+ pr_debug("%.*s %12.12s:%-4d : [%pU %llu] " fmt, \
+ 8 - (int)sizeof(KBUILD_MODNAME), " ", \
+ kbasename(__FILE__), __LINE__, \
+ &client->fsid, client->monc.auth->global_id, \
+ ##__VA_ARGS__)
# else
/* faux printk call just to see any compiler warnings. */
-# define dout(fmt, ...) do { \
- if (0) \
- printk(KERN_DEBUG fmt, ##__VA_ARGS__); \
- } while (0)
+# define dout(fmt, ...) \
+ no_printk(KERN_DEBUG fmt, ##__VA_ARGS__)
+# define doutc(client, fmt, ...) \
+ no_printk(KERN_DEBUG "[%pU %llu] " fmt, \
+ &client->fsid, \
+ client->monc.auth->global_id, \
+ ##__VA_ARGS__)
# endif
#else
@@ -32,7 +42,32 @@
* or, just wrap pr_debug
*/
# define dout(fmt, ...) pr_debug(" " fmt, ##__VA_ARGS__)
+# define doutc(client, fmt, ...) \
+ pr_debug(" [%pU %llu] %s: " fmt, &client->fsid, \
+ client->monc.auth->global_id, __func__, ##__VA_ARGS__)
#endif
+#define pr_notice_client(client, fmt, ...) \
+ pr_notice("[%pU %llu]: " fmt, &client->fsid, \
+ client->monc.auth->global_id, ##__VA_ARGS__)
+#define pr_info_client(client, fmt, ...) \
+ pr_info("[%pU %llu]: " fmt, &client->fsid, \
+ client->monc.auth->global_id, ##__VA_ARGS__)
+#define pr_warn_client(client, fmt, ...) \
+ pr_warn("[%pU %llu]: " fmt, &client->fsid, \
+ client->monc.auth->global_id, ##__VA_ARGS__)
+#define pr_warn_once_client(client, fmt, ...) \
+ pr_warn_once("[%pU %llu]: " fmt, &client->fsid, \
+ client->monc.auth->global_id, ##__VA_ARGS__)
+#define pr_err_client(client, fmt, ...) \
+ pr_err("[%pU %llu]: " fmt, &client->fsid, \
+ client->monc.auth->global_id, ##__VA_ARGS__)
+#define pr_warn_ratelimited_client(client, fmt, ...) \
+ pr_warn_ratelimited("[%pU %llu]: " fmt, &client->fsid, \
+ client->monc.auth->global_id, ##__VA_ARGS__)
+#define pr_err_ratelimited_client(client, fmt, ...) \
+ pr_err_ratelimited("[%pU %llu]: " fmt, &client->fsid, \
+ client->monc.auth->global_id, ##__VA_ARGS__)
+
#endif
diff --git a/include/linux/ceph/ceph_features.h b/include/linux/ceph/ceph_features.h
index 040dd105c3e7..3a47acd9cc14 100644
--- a/include/linux/ceph/ceph_features.h
+++ b/include/linux/ceph/ceph_features.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __CEPH_FEATURES
#define __CEPH_FEATURES
@@ -7,17 +8,18 @@
* feature. Base case is 1 (first use).
*/
#define CEPH_FEATURE_INCARNATION_1 (0ull)
-#define CEPH_FEATURE_INCARNATION_2 (1ull<<57) // CEPH_FEATURE_SERVER_JEWEL
+#define CEPH_FEATURE_INCARNATION_2 (1ull<<57) // SERVER_JEWEL
+#define CEPH_FEATURE_INCARNATION_3 ((1ull<<57)|(1ull<<28)) // SERVER_MIMIC
#define DEFINE_CEPH_FEATURE(bit, incarnation, name) \
- static const uint64_t CEPH_FEATURE_##name = (1ULL<<bit); \
- static const uint64_t CEPH_FEATUREMASK_##name = \
+ static const uint64_t __maybe_unused CEPH_FEATURE_##name = (1ULL<<bit); \
+ static const uint64_t __maybe_unused CEPH_FEATUREMASK_##name = \
(1ULL<<bit | CEPH_FEATURE_INCARNATION_##incarnation);
/* this bit is ignored but still advertised by release *when* */
#define DEFINE_CEPH_FEATURE_DEPRECATED(bit, incarnation, name, when) \
- static const uint64_t DEPRECATED_CEPH_FEATURE_##name = (1ULL<<bit); \
- static const uint64_t DEPRECATED_CEPH_FEATUREMASK_##name = \
+ static const uint64_t __maybe_unused DEPRECATED_CEPH_FEATURE_##name = (1ULL<<bit); \
+ static const uint64_t __maybe_unused DEPRECATED_CEPH_FEATUREMASK_##name = \
(1ULL<<bit | CEPH_FEATURE_INCARNATION_##incarnation);
/*
@@ -57,7 +59,7 @@
* because 10.2.z (jewel) did not care if its peers advertised this
* feature bit.
*
- * - In the second phase we stop advertising the the bit and call it
+ * - In the second phase we stop advertising the bit and call it
* RETIRED. This can normally be done in the *next* major release
* following the one in which we marked the feature DEPRECATED. In
* the above example, for 12.0.z (luminous) we can say:
@@ -74,7 +76,7 @@
DEFINE_CEPH_FEATURE( 0, 1, UID)
DEFINE_CEPH_FEATURE( 1, 1, NOSRCADDR)
DEFINE_CEPH_FEATURE_RETIRED( 2, 1, MONCLOCKCHECK, JEWEL, LUMINOUS)
-
+DEFINE_CEPH_FEATURE( 2, 3, SERVER_NAUTILUS)
DEFINE_CEPH_FEATURE( 3, 1, FLOCK)
DEFINE_CEPH_FEATURE( 4, 1, SUBSCRIBE2)
DEFINE_CEPH_FEATURE( 5, 1, MONNAMES)
@@ -113,7 +115,7 @@ DEFINE_CEPH_FEATURE(25, 1, CRUSH_TUNABLES2)
DEFINE_CEPH_FEATURE(26, 1, CREATEPOOLID)
DEFINE_CEPH_FEATURE(27, 1, REPLY_CREATE_INODE)
DEFINE_CEPH_FEATURE_RETIRED(28, 1, OSD_HBMSGS, HAMMER, JEWEL)
-DEFINE_CEPH_FEATURE(28, 2, SERVER_M)
+DEFINE_CEPH_FEATURE(28, 2, SERVER_MIMIC)
DEFINE_CEPH_FEATURE(29, 1, MDSENC)
DEFINE_CEPH_FEATURE(30, 1, OSDHASHPSPOOL)
DEFINE_CEPH_FEATURE(31, 1, MON_SINGLE_PAXOS) // deprecate me
@@ -164,9 +166,9 @@ DEFINE_CEPH_FEATURE(58, 1, FS_FILE_LAYOUT_V2) // overlap
DEFINE_CEPH_FEATURE(59, 1, FS_BTIME)
DEFINE_CEPH_FEATURE(59, 1, FS_CHANGE_ATTR) // overlap
DEFINE_CEPH_FEATURE(59, 1, MSG_ADDR2) // overlap
-DEFINE_CEPH_FEATURE(60, 1, BLKIN_TRACING) // *do not share this bit*
+DEFINE_CEPH_FEATURE(60, 1, OSD_RECOVERY_DELETES) // *do not share this bit*
+DEFINE_CEPH_FEATURE(61, 1, CEPHX_V2) // *do not share this bit*
-DEFINE_CEPH_FEATURE(61, 1, RESERVED2) // unused, but slow down!
DEFINE_CEPH_FEATURE(62, 1, RESERVED) // do not use; used as a sentinal
DEFINE_CEPH_FEATURE_DEPRECATED(63, 1, RESERVED_BROKEN, LUMINOUS) // client-facing
@@ -176,13 +178,16 @@ DEFINE_CEPH_FEATURE_DEPRECATED(63, 1, RESERVED_BROKEN, LUMINOUS) // client-facin
*/
#define CEPH_FEATURES_SUPPORTED_DEFAULT \
(CEPH_FEATURE_NOSRCADDR | \
+ CEPH_FEATURE_SERVER_NAUTILUS | \
CEPH_FEATURE_FLOCK | \
CEPH_FEATURE_SUBSCRIBE2 | \
+ CEPH_FEATURE_MONNAMES | \
CEPH_FEATURE_RECONNECT_SEQ | \
CEPH_FEATURE_DIRLAYOUTHASH | \
CEPH_FEATURE_PGID64 | \
CEPH_FEATURE_PGPOOL3 | \
CEPH_FEATURE_OSDENC | \
+ CEPH_FEATURE_MONENC | \
CEPH_FEATURE_CRUSH_TUNABLES | \
CEPH_FEATURE_SERVER_LUMINOUS | \
CEPH_FEATURE_RESEND_ON_SPLIT | \
@@ -192,6 +197,7 @@ DEFINE_CEPH_FEATURE_DEPRECATED(63, 1, RESERVED_BROKEN, LUMINOUS) // client-facin
CEPH_FEATURE_MSG_AUTH | \
CEPH_FEATURE_CRUSH_TUNABLES2 | \
CEPH_FEATURE_REPLY_CREATE_INODE | \
+ CEPH_FEATURE_SERVER_MIMIC | \
CEPH_FEATURE_MDSENC | \
CEPH_FEATURE_OSDHASHPSPOOL | \
CEPH_FEATURE_OSD_CACHEPOOL | \
@@ -203,19 +209,16 @@ DEFINE_CEPH_FEATURE_DEPRECATED(63, 1, RESERVED_BROKEN, LUMINOUS) // client-facin
CEPH_FEATURE_OSD_PRIMARY_AFFINITY | \
CEPH_FEATURE_MSGR_KEEPALIVE2 | \
CEPH_FEATURE_OSD_POOLRESEND | \
+ CEPH_FEATURE_MDS_QUOTA | \
CEPH_FEATURE_CRUSH_V4 | \
CEPH_FEATURE_NEW_OSDOP_ENCODING | \
CEPH_FEATURE_SERVER_JEWEL | \
CEPH_FEATURE_MON_STATEFUL_SUB | \
CEPH_FEATURE_CRUSH_TUNABLES5 | \
- CEPH_FEATURE_NEW_OSDOPREPLY_ENCODING)
-
-#define CEPH_FEATURES_REQUIRED_DEFAULT \
- (CEPH_FEATURE_NOSRCADDR | \
- CEPH_FEATURE_SUBSCRIBE2 | \
- CEPH_FEATURE_RECONNECT_SEQ | \
- CEPH_FEATURE_PGID64 | \
- CEPH_FEATURE_PGPOOL3 | \
- CEPH_FEATURE_OSDENC)
+ CEPH_FEATURE_NEW_OSDOPREPLY_ENCODING | \
+ CEPH_FEATURE_MSG_ADDR2 | \
+ CEPH_FEATURE_CEPHX_V2)
+
+#define CEPH_FEATURES_REQUIRED_DEFAULT 0
#endif
diff --git a/include/linux/ceph/ceph_frag.h b/include/linux/ceph/ceph_frag.h
index 146507df8650..97bab0adc58a 100644
--- a/include/linux/ceph/ceph_frag.h
+++ b/include/linux/ceph/ceph_frag.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef FS_CEPH_FRAG_H
#define FS_CEPH_FRAG_H
diff --git a/include/linux/ceph/ceph_fs.h b/include/linux/ceph/ceph_fs.h
index edf5b04b918a..c7f2c63b3bc3 100644
--- a/include/linux/ceph/ceph_fs.h
+++ b/include/linux/ceph/ceph_fs.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* ceph_fs.h - Ceph constants and data types to share between kernel and
* user space.
@@ -27,8 +28,8 @@
#define CEPH_INO_ROOT 1
-#define CEPH_INO_CEPH 2 /* hidden .ceph dir */
-#define CEPH_INO_DOTDOT 3 /* used by ceph fuse for parent (..) */
+#define CEPH_INO_CEPH 2 /* hidden .ceph dir */
+#define CEPH_INO_GLOBAL_SNAPREALM 3 /* global dummy snaprealm */
/* arbitrary limit on max # of monitors (cluster of 3 is typical) */
#define CEPH_MAX_MON 31
@@ -92,8 +93,19 @@ struct ceph_dir_layout {
#define CEPH_AUTH_NONE 0x1
#define CEPH_AUTH_CEPHX 0x2
+#define CEPH_AUTH_MODE_NONE 0
+#define CEPH_AUTH_MODE_AUTHORIZER 1
+#define CEPH_AUTH_MODE_MON 10
+
+/* msgr2 protocol modes */
+#define CEPH_CON_MODE_UNKNOWN 0x0
+#define CEPH_CON_MODE_CRC 0x1
+#define CEPH_CON_MODE_SECURE 0x2
+
#define CEPH_AUTH_UID_DEFAULT ((__u64) -1)
+const char *ceph_auth_proto_name(int proto);
+const char *ceph_con_mode_name(int mode);
/*********************************************
* message layer
@@ -129,10 +141,12 @@ struct ceph_dir_layout {
#define CEPH_MSG_CLIENT_REQUEST 24
#define CEPH_MSG_CLIENT_REQUEST_FORWARD 25
#define CEPH_MSG_CLIENT_REPLY 26
+#define CEPH_MSG_CLIENT_METRICS 29
#define CEPH_MSG_CLIENT_CAPS 0x310
#define CEPH_MSG_CLIENT_LEASE 0x311
#define CEPH_MSG_CLIENT_SNAP 0x312
#define CEPH_MSG_CLIENT_CAPRELEASE 0x313
+#define CEPH_MSG_CLIENT_QUOTA 0x314
/* pool ops */
#define CEPH_MSG_POOLOP_REPLY 48
@@ -167,6 +181,8 @@ struct ceph_mon_request_header {
struct ceph_mon_statfs {
struct ceph_mon_request_header monhdr;
struct ceph_fsid fsid;
+ __u8 contains_data_pool;
+ __le64 data_pool;
} __attribute__ ((packed));
struct ceph_statfs {
@@ -283,8 +299,11 @@ enum {
CEPH_SESSION_FLUSHMSG_ACK,
CEPH_SESSION_FORCE_RO,
CEPH_SESSION_REJECT,
+ CEPH_SESSION_REQUEST_FLUSH_MDLOG,
};
+#define CEPH_SESSION_BLOCKLISTED (1 << 0) /* session blocklisted */
+
extern const char *ceph_session_op_name(int op);
struct ceph_mds_session_head {
@@ -309,6 +328,7 @@ enum {
CEPH_MDS_OP_LOOKUPPARENT = 0x00103,
CEPH_MDS_OP_LOOKUPINO = 0x00104,
CEPH_MDS_OP_LOOKUPNAME = 0x00105,
+ CEPH_MDS_OP_GETVXATTR = 0x00106,
CEPH_MDS_OP_SETXATTR = 0x01105,
CEPH_MDS_OP_RMXATTR = 0x01106,
@@ -337,16 +357,26 @@ enum {
CEPH_MDS_OP_RENAMESNAP = 0x01403,
};
-extern const char *ceph_mds_op_name(int op);
+#define IS_CEPH_MDS_OP_NEWINODE(op) (op == CEPH_MDS_OP_CREATE || \
+ op == CEPH_MDS_OP_MKNOD || \
+ op == CEPH_MDS_OP_MKDIR || \
+ op == CEPH_MDS_OP_SYMLINK)
+extern const char *ceph_mds_op_name(int op);
-#define CEPH_SETATTR_MODE 1
-#define CEPH_SETATTR_UID 2
-#define CEPH_SETATTR_GID 4
-#define CEPH_SETATTR_MTIME 8
-#define CEPH_SETATTR_ATIME 16
-#define CEPH_SETATTR_SIZE 32
-#define CEPH_SETATTR_CTIME 64
+#define CEPH_SETATTR_MODE (1 << 0)
+#define CEPH_SETATTR_UID (1 << 1)
+#define CEPH_SETATTR_GID (1 << 2)
+#define CEPH_SETATTR_MTIME (1 << 3)
+#define CEPH_SETATTR_ATIME (1 << 4)
+#define CEPH_SETATTR_SIZE (1 << 5)
+#define CEPH_SETATTR_CTIME (1 << 6)
+#define CEPH_SETATTR_MTIME_NOW (1 << 7)
+#define CEPH_SETATTR_ATIME_NOW (1 << 8)
+#define CEPH_SETATTR_BTIME (1 << 9)
+#define CEPH_SETATTR_KILL_SGUID (1 << 10)
+#define CEPH_SETATTR_FSCRYPT_AUTH (1 << 11)
+#define CEPH_SETATTR_FSCRYPT_FILE (1 << 12)
/*
* Ceph setxattr request flags.
@@ -413,12 +443,13 @@ union ceph_mds_request_args {
__le32 stripe_unit; /* layout for newly created file */
__le32 stripe_count; /* ... */
__le32 object_size;
- __le32 file_replication;
- __le32 mask; /* CEPH_CAP_* */
- __le32 old_size;
+ __le32 pool;
+ __le32 mask; /* CEPH_CAP_* */
+ __le64 old_size;
} __attribute__ ((packed)) open;
struct {
__le32 flags;
+ __le32 osdmap_epoch; /* used for setting file/dir layouts */
} __attribute__ ((packed)) setxattr;
struct {
struct ceph_file_layout_legacy layout;
@@ -432,12 +463,33 @@ union ceph_mds_request_args {
__le64 length; /* num bytes to lock from start */
__u8 wait; /* will caller wait for lock to become available? */
} __attribute__ ((packed)) filelock_change;
+ struct {
+ __le32 mask; /* CEPH_CAP_* */
+ __le64 snapid;
+ __le64 parent;
+ __le32 hash;
+ } __attribute__ ((packed)) lookupino;
} __attribute__ ((packed));
-#define CEPH_MDS_FLAG_REPLAY 1 /* this is a replayed op */
-#define CEPH_MDS_FLAG_WANT_DENTRY 2 /* want dentry in reply */
+union ceph_mds_request_args_ext {
+ union ceph_mds_request_args old;
+ struct {
+ __le32 mode;
+ __le32 uid;
+ __le32 gid;
+ struct ceph_timespec mtime;
+ struct ceph_timespec atime;
+ __le64 size, old_size; /* old_size needed by truncate */
+ __le32 mask; /* CEPH_SETATTR_* */
+ struct ceph_timespec btime;
+ } __attribute__ ((packed)) setattr_ext;
+};
-struct ceph_mds_request_head {
+#define CEPH_MDS_FLAG_REPLAY 1 /* this is a replayed op */
+#define CEPH_MDS_FLAG_WANT_DENTRY 2 /* want dentry in reply */
+#define CEPH_MDS_FLAG_ASYNC 4 /* request is asynchronous */
+
+struct ceph_mds_request_head_legacy {
__le64 oldest_client_tid;
__le32 mdsmap_epoch; /* on client */
__le32 flags; /* CEPH_MDS_FLAG_* */
@@ -450,6 +502,28 @@ struct ceph_mds_request_head {
union ceph_mds_request_args args;
} __attribute__ ((packed));
+#define CEPH_MDS_REQUEST_HEAD_VERSION 3
+
+struct ceph_mds_request_head {
+ __le16 version; /* struct version */
+ __le64 oldest_client_tid;
+ __le32 mdsmap_epoch; /* on client */
+ __le32 flags; /* CEPH_MDS_FLAG_* */
+ __u8 num_retry, num_fwd; /* legacy count retry and fwd attempts */
+ __le16 num_releases; /* # include cap/lease release records */
+ __le32 op; /* mds op code */
+ __le32 caller_uid, caller_gid;
+ __le64 ino; /* use this ino for openc, mkdir, mknod,
+ etc. (if replaying) */
+ union ceph_mds_request_args_ext args;
+
+ __le32 ext_num_retry; /* new count retry attempts */
+ __le32 ext_num_fwd; /* new count fwd attempts */
+
+ __le32 struct_len; /* to store size of struct ceph_mds_request_head */
+ __le32 owner_uid, owner_gid; /* used for OPs which create inodes */
+} __attribute__ ((packed));
+
/* cap/lease release record */
struct ceph_mds_request_release {
__le64 ino, cap_id; /* ino and unique cap id */
@@ -520,6 +594,9 @@ struct ceph_mds_reply_lease {
__le32 seq;
} __attribute__ ((packed));
+#define CEPH_LEASE_VALID (1 | 2) /* old and new bit values */
+#define CEPH_LEASE_PRIMARY_LINK 4 /* primary linkage */
+
struct ceph_mds_reply_dirfrag {
__le32 frag; /* fragment */
__le32 auth; /* auth mds, if this is a delegation point */
@@ -554,6 +631,7 @@ struct ceph_filelock {
#define CEPH_FILE_MODE_RDWR 3 /* RD | WR */
#define CEPH_FILE_MODE_LAZY 4 /* lazy io */
#define CEPH_FILE_MODE_BITS 4
+#define CEPH_FILE_MODE_MASK ((1 << CEPH_FILE_MODE_BITS) - 1)
int ceph_flags_to_mode(int flags);
@@ -624,6 +702,7 @@ int ceph_flags_to_mode(int flags);
CEPH_CAP_XATTR_SHARED)
#define CEPH_STAT_CAP_INLINE_DATA (CEPH_CAP_FILE_SHARED | \
CEPH_CAP_FILE_RD)
+#define CEPH_STAT_RSTAT CEPH_CAP_FILE_WREXTEND
#define CEPH_CAP_ANY_SHARED (CEPH_CAP_AUTH_SHARED | \
CEPH_CAP_LINK_SHARED | \
@@ -644,10 +723,19 @@ int ceph_flags_to_mode(int flags);
#define CEPH_CAP_ANY (CEPH_CAP_ANY_RD | CEPH_CAP_ANY_EXCL | \
CEPH_CAP_ANY_FILE_WR | CEPH_CAP_FILE_LAZYIO | \
CEPH_CAP_PIN)
+#define CEPH_CAP_ALL_FILE (CEPH_CAP_PIN | CEPH_CAP_ANY_SHARED | \
+ CEPH_CAP_AUTH_EXCL | CEPH_CAP_XATTR_EXCL | \
+ CEPH_CAP_ANY_FILE_RD | CEPH_CAP_ANY_FILE_WR)
#define CEPH_CAP_LOCKS (CEPH_LOCK_IFILE | CEPH_LOCK_IAUTH | CEPH_LOCK_ILINK | \
CEPH_LOCK_IXATTR)
+/* cap masks async dir operations */
+#define CEPH_CAP_DIR_CREATE CEPH_CAP_FILE_CACHE
+#define CEPH_CAP_DIR_UNLINK CEPH_CAP_FILE_RD
+#define CEPH_CAP_ANY_DIR_OPS (CEPH_CAP_FILE_CACHE | CEPH_CAP_FILE_RD | \
+ CEPH_CAP_FILE_WREXTEND | CEPH_CAP_FILE_LAZYIO)
+
int ceph_caps_for_mode(int mode);
enum {
@@ -669,7 +757,9 @@ enum {
extern const char *ceph_cap_op_name(int op);
/* flags field in client cap messages (version >= 10) */
-#define CEPH_CLIENT_CAPS_SYNC (0x1)
+#define CEPH_CLIENT_CAPS_SYNC (1<<0)
+#define CEPH_CLIENT_CAPS_NO_CAPSNAP (1<<1)
+#define CEPH_CLIENT_CAPS_PENDING_CAPSNAP (1<<2)
/*
* caps message, used for capability callbacks, acks, requests, etc.
@@ -694,7 +784,7 @@ struct ceph_mds_caps {
__le32 xattr_len;
__le64 xattr_version;
- /* filelock */
+ /* a union of non-export and export bodies. */
__le64 size, max_size, truncate_size;
__le32 truncate_seq;
struct ceph_timespec mtime, atime, ctime;
@@ -704,7 +794,7 @@ struct ceph_mds_caps {
struct ceph_mds_cap_peer {
__le64 cap_id;
- __le32 seq;
+ __le32 issue_seq;
__le32 mseq;
__le32 mds;
__u8 flags;
@@ -718,7 +808,7 @@ struct ceph_mds_cap_release {
struct ceph_mds_cap_item {
__le64 ino;
__le64 cap_id;
- __le32 migrate_seq, seq;
+ __le32 migrate_seq, issue_seq;
} __attribute__ ((packed));
#define CEPH_MDS_LEASE_REVOKE 1 /* mds -> client */
@@ -802,4 +892,20 @@ struct ceph_mds_snap_realm {
} __attribute__ ((packed));
/* followed by my snap list, then prior parent snap list */
+/*
+ * quotas
+ */
+struct ceph_mds_quota {
+ __le64 ino; /* ino */
+ struct ceph_timespec rctime;
+ __le64 rbytes; /* dir stats */
+ __le64 rfiles;
+ __le64 rsubdirs;
+ __u8 struct_v; /* compat */
+ __u8 struct_compat;
+ __le32 struct_len;
+ __le64 max_bytes; /* quota max. bytes */
+ __le64 max_files; /* quota max. files */
+} __attribute__ ((packed));
+
#endif
diff --git a/include/linux/ceph/ceph_hash.h b/include/linux/ceph/ceph_hash.h
index d099c3f90236..fda474c7a5d6 100644
--- a/include/linux/ceph/ceph_hash.h
+++ b/include/linux/ceph/ceph_hash.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef FS_CEPH_HASH_H
#define FS_CEPH_HASH_H
diff --git a/include/linux/ceph/cls_lock_client.h b/include/linux/ceph/cls_lock_client.h
index 0594d3bba774..17bc7584d1fe 100644
--- a/include/linux/ceph/cls_lock_client.h
+++ b/include/linux/ceph/cls_lock_client.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_CEPH_CLS_LOCK_CLIENT_H
#define _LINUX_CEPH_CLS_LOCK_CLIENT_H
@@ -51,4 +52,7 @@ int ceph_cls_lock_info(struct ceph_osd_client *osdc,
char *lock_name, u8 *type, char **tag,
struct ceph_locker **lockers, u32 *num_lockers);
+int ceph_cls_assert_locked(struct ceph_osd_request *req, int which,
+ char *lock_name, u8 type, char *cookie, char *tag);
+
#endif
diff --git a/include/linux/ceph/debugfs.h b/include/linux/ceph/debugfs.h
index 29cf897cc5cd..8b3a1a7a953a 100644
--- a/include/linux/ceph/debugfs.h
+++ b/include/linux/ceph/debugfs.h
@@ -1,26 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _FS_CEPH_DEBUGFS_H
#define _FS_CEPH_DEBUGFS_H
-#include <linux/ceph/ceph_debug.h>
#include <linux/ceph/types.h>
-#define CEPH_DEFINE_SHOW_FUNC(name) \
-static int name##_open(struct inode *inode, struct file *file) \
-{ \
- return single_open(file, name, inode->i_private); \
-} \
- \
-static const struct file_operations name##_fops = { \
- .open = name##_open, \
- .read = seq_read, \
- .llseek = seq_lseek, \
- .release = single_release, \
-};
-
/* debugfs.c */
-extern int ceph_debugfs_init(void);
+extern void ceph_debugfs_init(void);
extern void ceph_debugfs_cleanup(void);
-extern int ceph_debugfs_client_init(struct ceph_client *client);
+extern void ceph_debugfs_client_init(struct ceph_client *client);
extern void ceph_debugfs_client_cleanup(struct ceph_client *client);
#endif
diff --git a/include/linux/ceph/decode.h b/include/linux/ceph/decode.h
index 14af9b70d301..8fc1aed64113 100644
--- a/include/linux/ceph/decode.h
+++ b/include/linux/ceph/decode.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __CEPH_DECODE_H
#define __CEPH_DECODE_H
@@ -5,7 +6,7 @@
#include <linux/bug.h>
#include <linux/slab.h>
#include <linux/time.h>
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
#include <linux/ceph/types.h>
@@ -193,16 +194,22 @@ ceph_decode_skip_n(p, end, sizeof(u8), bad)
} while (0)
/*
- * struct ceph_timespec <-> struct timespec
+ * struct ceph_timespec <-> struct timespec64
*/
-static inline void ceph_decode_timespec(struct timespec *ts,
- const struct ceph_timespec *tv)
+static inline void ceph_decode_timespec64(struct timespec64 *ts,
+ const struct ceph_timespec *tv)
{
- ts->tv_sec = (__kernel_time_t)le32_to_cpu(tv->tv_sec);
+ /*
+ * This will still overflow in year 2106. We could extend
+ * the protocol to steal two more bits from tv_nsec to
+ * add three more 136 year epochs after that the way ext4
+ * does if necessary.
+ */
+ ts->tv_sec = (time64_t)le32_to_cpu(tv->tv_sec);
ts->tv_nsec = (long)le32_to_cpu(tv->tv_nsec);
}
-static inline void ceph_encode_timespec(struct ceph_timespec *tv,
- const struct timespec *ts)
+static inline void ceph_encode_timespec64(struct ceph_timespec *tv,
+ const struct timespec64 *ts)
{
tv->tv_sec = cpu_to_le32((u32)ts->tv_sec);
tv->tv_nsec = cpu_to_le32((u32)ts->tv_nsec);
@@ -211,18 +218,35 @@ static inline void ceph_encode_timespec(struct ceph_timespec *tv,
/*
* sockaddr_storage <-> ceph_sockaddr
*/
-static inline void ceph_encode_addr(struct ceph_entity_addr *a)
+#define CEPH_ENTITY_ADDR_TYPE_NONE 0
+#define CEPH_ENTITY_ADDR_TYPE_LEGACY __cpu_to_le32(1)
+#define CEPH_ENTITY_ADDR_TYPE_MSGR2 __cpu_to_le32(2)
+#define CEPH_ENTITY_ADDR_TYPE_ANY __cpu_to_le32(3)
+
+static inline void ceph_encode_banner_addr(struct ceph_entity_addr *a)
{
__be16 ss_family = htons(a->in_addr.ss_family);
a->in_addr.ss_family = *(__u16 *)&ss_family;
+
+ /* Banner addresses require TYPE_NONE */
+ a->type = CEPH_ENTITY_ADDR_TYPE_NONE;
}
-static inline void ceph_decode_addr(struct ceph_entity_addr *a)
+static inline void ceph_decode_banner_addr(struct ceph_entity_addr *a)
{
__be16 ss_family = *(__be16 *)&a->in_addr.ss_family;
a->in_addr.ss_family = ntohs(ss_family);
WARN_ON(a->in_addr.ss_family == 512);
+ a->type = CEPH_ENTITY_ADDR_TYPE_LEGACY;
}
+extern int ceph_decode_entity_addr(void **p, void *end,
+ struct ceph_entity_addr *addr);
+int ceph_decode_entity_addrvec(void **p, void *end, bool msgr2,
+ struct ceph_entity_addr *addr);
+
+int ceph_entity_addr_encoding_len(const struct ceph_entity_addr *addr);
+void ceph_encode_entity_addr(void **p, const struct ceph_entity_addr *addr);
+
/*
* encoders
*/
diff --git a/include/linux/ceph/libceph.h b/include/linux/ceph/libceph.h
index 8a79587e1317..63e0e2aa1ce9 100644
--- a/include/linux/ceph/libceph.h
+++ b/include/linux/ceph/libceph.h
@@ -1,9 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _FS_CEPH_LIBCEPH_H
#define _FS_CEPH_LIBCEPH_H
#include <linux/ceph/ceph_debug.h>
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
#include <linux/backing-dev.h>
#include <linux/completion.h>
#include <linux/exportfs.h>
@@ -30,10 +31,11 @@
#define CEPH_OPT_FSID (1<<0)
#define CEPH_OPT_NOSHARE (1<<1) /* don't share client with other sbs */
#define CEPH_OPT_MYIP (1<<2) /* specified my ip */
-#define CEPH_OPT_NOCRC (1<<3) /* no data crc on writes */
-#define CEPH_OPT_NOMSGAUTH (1<<4) /* don't require msg signing feat */
-#define CEPH_OPT_TCP_NODELAY (1<<5) /* TCP_NODELAY on TCP sockets */
-#define CEPH_OPT_NOMSGSIGN (1<<6) /* don't sign msgs */
+#define CEPH_OPT_NOCRC (1<<3) /* no data crc on writes (msgr1) */
+#define CEPH_OPT_TCP_NODELAY (1<<4) /* TCP_NODELAY on TCP sockets */
+#define CEPH_OPT_NOMSGSIGN (1<<5) /* don't sign msgs (msgr1) */
+#define CEPH_OPT_ABORT_ON_FULL (1<<6) /* abort w/ ENOSPC when full */
+#define CEPH_OPT_RXBOUNCE (1<<7) /* double-buffer read data */
#define CEPH_OPT_DEFAULT (CEPH_OPT_TCP_NODELAY)
@@ -50,9 +52,11 @@ struct ceph_options {
unsigned long osd_idle_ttl; /* jiffies */
unsigned long osd_keepalive_timeout; /* jiffies */
unsigned long osd_request_timeout; /* jiffies */
+ u32 read_from_replica; /* CEPH_OSD_FLAG_BALANCE/LOCALIZE_READS */
+ int con_modes[2]; /* CEPH_CON_MODE_* */
/*
- * any type that can't be simply compared or doesn't need need
+ * any type that can't be simply compared or doesn't need
* to be compared should go beyond this point,
* ceph_compare_options() should be updated accordingly
*/
@@ -62,6 +66,7 @@ struct ceph_options {
int num_mon;
char *name;
struct ceph_crypto_key *key;
+ struct rb_root crush_locs;
};
/*
@@ -71,6 +76,7 @@ struct ceph_options {
#define CEPH_OSD_KEEPALIVE_DEFAULT msecs_to_jiffies(5 * 1000)
#define CEPH_OSD_IDLE_TTL_DEFAULT msecs_to_jiffies(60 * 1000)
#define CEPH_OSD_REQUEST_TIMEOUT_DEFAULT 0 /* no timeout */
+#define CEPH_READ_FROM_REPLICA_DEFAULT 0 /* read from primary */
#define CEPH_MONC_HUNT_INTERVAL msecs_to_jiffies(3 * 1000)
#define CEPH_MONC_PING_INTERVAL msecs_to_jiffies(10 * 1000)
@@ -78,31 +84,20 @@ struct ceph_options {
#define CEPH_MONC_HUNT_BACKOFF 2
#define CEPH_MONC_HUNT_MAX_MULT 10
+#define CEPH_MSG_MAX_CONTROL_LEN (16*1024*1024)
#define CEPH_MSG_MAX_FRONT_LEN (16*1024*1024)
#define CEPH_MSG_MAX_MIDDLE_LEN (16*1024*1024)
-#define CEPH_MSG_MAX_DATA_LEN (16*1024*1024)
-
-#define CEPH_AUTH_NAME_DEFAULT "guest"
/*
- * Delay telling the MDS we no longer want caps, in case we reopen
- * the file. Delay a minimum amount of time, even if we send a cap
- * message for some other reason. Otherwise, take the oppotunity to
- * update the mds to avoid sending another message later.
+ * The largest possible rbd data object is 32M.
+ * The largest possible rbd object map object is 64M.
+ *
+ * There is no limit on the size of cephfs objects, but it has to obey
+ * rsize and wsize mount options anyway.
*/
-#define CEPH_CAPS_WANTED_DELAY_MIN_DEFAULT 5 /* cap release delay */
-#define CEPH_CAPS_WANTED_DELAY_MAX_DEFAULT 60 /* cap release delay */
-
-#define CEPH_CAP_RELEASE_SAFETY_DEFAULT (CEPH_CAPS_PER_RELEASE * 4)
-
-/* mount state */
-enum {
- CEPH_MOUNT_MOUNTING,
- CEPH_MOUNT_MOUNTED,
- CEPH_MOUNT_UNMOUNTING,
- CEPH_MOUNT_UNMOUNTED,
- CEPH_MOUNT_SHUTDOWN,
-};
+#define CEPH_MSG_MAX_DATA_LEN (64*1024*1024)
+
+#define CEPH_AUTH_NAME_DEFAULT "guest"
static inline unsigned long ceph_timeout_jiffies(unsigned long timeout)
{
@@ -148,6 +143,10 @@ struct ceph_client {
#define from_msgr(ms) container_of(ms, struct ceph_client, msgr)
+static inline bool ceph_msgr2(struct ceph_client *client)
+{
+ return client->options->con_modes[0] != CEPH_CON_MODE_UNKNOWN;
+}
/*
* snapshots
@@ -189,7 +188,7 @@ static inline int calc_pages_for(u64 off, u64 len)
#define RB_CMP3WAY(a, b) ((a) < (b) ? -1 : (a) > (b))
#define DEFINE_RB_INSDEL_FUNCS2(name, type, keyfld, cmpexp, keyexp, nodefld) \
-static void insert_##name(struct rb_root *root, type *t) \
+static bool __insert_##name(struct rb_root *root, type *t) \
{ \
struct rb_node **n = &root->rb_node; \
struct rb_node *parent = NULL; \
@@ -207,11 +206,17 @@ static void insert_##name(struct rb_root *root, type *t) \
else if (cmp > 0) \
n = &(*n)->rb_right; \
else \
- BUG(); \
+ return false; \
} \
\
rb_link_node(&t->nodefld, parent, n); \
rb_insert_color(&t->nodefld, root); \
+ return true; \
+} \
+static void __maybe_unused insert_##name(struct rb_root *root, type *t) \
+{ \
+ if (!__insert_##name(root, t)) \
+ BUG(); \
} \
static void erase_##name(struct rb_root *root, type *t) \
{ \
@@ -269,22 +274,30 @@ DEFINE_RB_LOOKUP_FUNC(name, type, keyfld, nodefld)
extern struct kmem_cache *ceph_inode_cachep;
extern struct kmem_cache *ceph_cap_cachep;
+extern struct kmem_cache *ceph_cap_snap_cachep;
extern struct kmem_cache *ceph_cap_flush_cachep;
extern struct kmem_cache *ceph_dentry_cachep;
extern struct kmem_cache *ceph_file_cachep;
+extern struct kmem_cache *ceph_dir_file_cachep;
+extern struct kmem_cache *ceph_mds_request_cachep;
+extern mempool_t *ceph_wb_pagevec_pool;
/* ceph_common.c */
extern bool libceph_compatible(void *data);
extern const char *ceph_msg_type_name(int type);
extern int ceph_check_fsid(struct ceph_client *client, struct ceph_fsid *fsid);
-extern void *ceph_kvmalloc(size_t size, gfp_t flags);
-
-extern struct ceph_options *ceph_parse_options(char *options,
- const char *dev_name, const char *dev_name_end,
- int (*parse_extra_token)(char *c, void *private),
- void *private);
-int ceph_print_client_options(struct seq_file *m, struct ceph_client *client);
+extern int ceph_parse_fsid(const char *str, struct ceph_fsid *fsid);
+
+struct fs_parameter;
+struct fc_log;
+struct ceph_options *ceph_alloc_options(void);
+int ceph_parse_mon_ips(const char *buf, size_t len, struct ceph_options *opt,
+ struct fc_log *l, char delim);
+int ceph_parse_param(struct fs_parameter *param, struct ceph_options *opt,
+ struct fc_log *l);
+int ceph_print_client_options(struct seq_file *m, struct ceph_client *client,
+ bool show_all);
extern void ceph_destroy_options(struct ceph_options *opt);
extern int ceph_compare_options(struct ceph_options *new_opt,
struct ceph_client *client);
@@ -292,25 +305,17 @@ struct ceph_client *ceph_create_client(struct ceph_options *opt, void *private);
struct ceph_entity_addr *ceph_client_addr(struct ceph_client *client);
u64 ceph_client_gid(struct ceph_client *client);
extern void ceph_destroy_client(struct ceph_client *client);
-extern int __ceph_open_session(struct ceph_client *client,
- unsigned long started);
+extern void ceph_reset_client_addr(struct ceph_client *client);
+extern int __ceph_open_session(struct ceph_client *client);
extern int ceph_open_session(struct ceph_client *client);
+int ceph_wait_for_latest_osdmap(struct ceph_client *client,
+ unsigned long timeout);
/* pagevec.c */
extern void ceph_release_page_vector(struct page **pages, int num_pages);
-
-extern struct page **ceph_get_direct_page_vector(const void __user *data,
- int num_pages,
- bool write_page);
extern void ceph_put_page_vector(struct page **pages, int num_pages,
bool dirty);
extern struct page **ceph_alloc_page_vector(int num_pages, gfp_t flags);
-extern int ceph_copy_user_to_page_vector(struct page **pages,
- const void __user *data,
- loff_t off, size_t len);
-extern void ceph_copy_to_page_vector(struct page **pages,
- const void *data,
- loff_t off, size_t len);
extern void ceph_copy_from_page_vector(struct page **pages,
void *data,
loff_t off, size_t len);
diff --git a/include/linux/ceph/mdsmap.h b/include/linux/ceph/mdsmap.h
deleted file mode 100644
index d5f783f3226a..000000000000
--- a/include/linux/ceph/mdsmap.h
+++ /dev/null
@@ -1,69 +0,0 @@
-#ifndef _FS_CEPH_MDSMAP_H
-#define _FS_CEPH_MDSMAP_H
-
-#include <linux/bug.h>
-#include <linux/ceph/types.h>
-
-/*
- * mds map - describe servers in the mds cluster.
- *
- * we limit fields to those the client actually xcares about
- */
-struct ceph_mds_info {
- u64 global_id;
- struct ceph_entity_addr addr;
- s32 state;
- int num_export_targets;
- bool laggy;
- u32 *export_targets;
-};
-
-struct ceph_mdsmap {
- u32 m_epoch, m_client_epoch, m_last_failure;
- u32 m_root;
- u32 m_session_timeout; /* seconds */
- u32 m_session_autoclose; /* seconds */
- u64 m_max_file_size;
- u32 m_max_mds; /* size of m_addr, m_state arrays */
- int m_num_mds;
- struct ceph_mds_info *m_info;
-
- /* which object pools file data can be stored in */
- int m_num_data_pg_pools;
- u64 *m_data_pg_pools;
- u64 m_cas_pg_pool;
-
- bool m_enabled;
- bool m_damaged;
- int m_num_laggy;
-};
-
-static inline struct ceph_entity_addr *
-ceph_mdsmap_get_addr(struct ceph_mdsmap *m, int w)
-{
- if (w >= m->m_num_mds)
- return NULL;
- return &m->m_info[w].addr;
-}
-
-static inline int ceph_mdsmap_get_state(struct ceph_mdsmap *m, int w)
-{
- BUG_ON(w < 0);
- if (w >= m->m_num_mds)
- return CEPH_MDS_STATE_DNE;
- return m->m_info[w].state;
-}
-
-static inline bool ceph_mdsmap_is_laggy(struct ceph_mdsmap *m, int w)
-{
- if (w >= 0 && w < m->m_num_mds)
- return m->m_info[w].laggy;
- return false;
-}
-
-extern int ceph_mdsmap_get_random_mds(struct ceph_mdsmap *m);
-extern struct ceph_mdsmap *ceph_mdsmap_decode(void **p, void *end);
-extern void ceph_mdsmap_destroy(struct ceph_mdsmap *m);
-extern bool ceph_mdsmap_is_cluster_available(struct ceph_mdsmap *m);
-
-#endif
diff --git a/include/linux/ceph/messenger.h b/include/linux/ceph/messenger.h
index fbd94d9fa5dd..6aa4c6478c9f 100644
--- a/include/linux/ceph/messenger.h
+++ b/include/linux/ceph/messenger.h
@@ -1,7 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __FS_CEPH_MESSENGER_H
#define __FS_CEPH_MESSENGER_H
+#include <crypto/sha2.h>
#include <linux/bvec.h>
+#include <linux/crypto.h>
#include <linux/kref.h>
#include <linux/mutex.h>
#include <linux/net.h>
@@ -15,6 +18,7 @@
struct ceph_msg;
struct ceph_connection;
+struct ceph_msg_data_cursor;
/*
* Ceph defines these callbacks for handling connection events.
@@ -30,6 +34,9 @@ struct ceph_connection_operations {
struct ceph_auth_handshake *(*get_authorizer) (
struct ceph_connection *con,
int *proto, int force_new);
+ int (*add_authorizer_challenge)(struct ceph_connection *con,
+ void *challenge_buf,
+ int challenge_buf_len);
int (*verify_authorizer_reply) (struct ceph_connection *con);
int (*invalidate_authorizer)(struct ceph_connection *con);
@@ -48,9 +55,50 @@ struct ceph_connection_operations {
int (*sign_message) (struct ceph_msg *msg);
int (*check_message_signature) (struct ceph_msg *msg);
+
+ /* msgr2 authentication exchange */
+ int (*get_auth_request)(struct ceph_connection *con,
+ void *buf, int *buf_len,
+ void **authorizer, int *authorizer_len);
+ int (*handle_auth_reply_more)(struct ceph_connection *con,
+ void *reply, int reply_len,
+ void *buf, int *buf_len,
+ void **authorizer, int *authorizer_len);
+ int (*handle_auth_done)(struct ceph_connection *con,
+ u64 global_id, void *reply, int reply_len,
+ u8 *session_key, int *session_key_len,
+ u8 *con_secret, int *con_secret_len);
+ int (*handle_auth_bad_method)(struct ceph_connection *con,
+ int used_proto, int result,
+ const int *allowed_protos, int proto_cnt,
+ const int *allowed_modes, int mode_cnt);
+
+ /**
+ * sparse_read: read sparse data
+ * @con: connection we're reading from
+ * @cursor: data cursor for reading extents
+ * @buf: optional buffer to read into
+ *
+ * This should be called more than once, each time setting up to
+ * receive an extent into the current cursor position, and zeroing
+ * the holes between them.
+ *
+ * Returns amount of data to be read (in bytes), 0 if reading is
+ * complete, or -errno if there was an error.
+ *
+ * If @buf is set on a >0 return, then the data should be read into
+ * the provided buffer. Otherwise, it should be read into the cursor.
+ *
+ * The sparse read operation is expected to initialize the cursor
+ * with a length covering up to the end of the last extent.
+ */
+ int (*sparse_read)(struct ceph_connection *con,
+ struct ceph_msg_data_cursor *cursor,
+ char **buf);
+
};
-/* use format string %s%d */
+/* use format string %s%lld */
#define ENTITY_NAME(n) ceph_entity_type_name((n).type), le64_to_cpu((n).num)
struct ceph_messenger {
@@ -75,57 +123,125 @@ enum ceph_msg_data_type {
#ifdef CONFIG_BLOCK
CEPH_MSG_DATA_BIO, /* data source/destination is a bio list */
#endif /* CONFIG_BLOCK */
+ CEPH_MSG_DATA_BVECS, /* data source/destination is a bio_vec array */
+ CEPH_MSG_DATA_ITER, /* data source/destination is an iov_iter */
};
-static __inline__ bool ceph_msg_data_type_valid(enum ceph_msg_data_type type)
-{
- switch (type) {
- case CEPH_MSG_DATA_NONE:
- case CEPH_MSG_DATA_PAGES:
- case CEPH_MSG_DATA_PAGELIST:
#ifdef CONFIG_BLOCK
- case CEPH_MSG_DATA_BIO:
+
+struct ceph_bio_iter {
+ struct bio *bio;
+ struct bvec_iter iter;
+};
+
+#define __ceph_bio_iter_advance_step(it, n, STEP) do { \
+ unsigned int __n = (n), __cur_n; \
+ \
+ while (__n) { \
+ BUG_ON(!(it)->iter.bi_size); \
+ __cur_n = min((it)->iter.bi_size, __n); \
+ (void)(STEP); \
+ bio_advance_iter((it)->bio, &(it)->iter, __cur_n); \
+ if (!(it)->iter.bi_size && (it)->bio->bi_next) { \
+ dout("__ceph_bio_iter_advance_step next bio\n"); \
+ (it)->bio = (it)->bio->bi_next; \
+ (it)->iter = (it)->bio->bi_iter; \
+ } \
+ __n -= __cur_n; \
+ } \
+} while (0)
+
+/*
+ * Advance @it by @n bytes.
+ */
+#define ceph_bio_iter_advance(it, n) \
+ __ceph_bio_iter_advance_step(it, n, 0)
+
+/*
+ * Advance @it by @n bytes, executing BVEC_STEP for each bio_vec.
+ */
+#define ceph_bio_iter_advance_step(it, n, BVEC_STEP) \
+ __ceph_bio_iter_advance_step(it, n, ({ \
+ struct bio_vec bv; \
+ struct bvec_iter __cur_iter; \
+ \
+ __cur_iter = (it)->iter; \
+ __cur_iter.bi_size = __cur_n; \
+ __bio_for_each_segment(bv, (it)->bio, __cur_iter, __cur_iter) \
+ (void)(BVEC_STEP); \
+ }))
+
#endif /* CONFIG_BLOCK */
- return true;
- default:
- return false;
- }
-}
+
+struct ceph_bvec_iter {
+ struct bio_vec *bvecs;
+ struct bvec_iter iter;
+};
+
+#define __ceph_bvec_iter_advance_step(it, n, STEP) do { \
+ BUG_ON((n) > (it)->iter.bi_size); \
+ (void)(STEP); \
+ bvec_iter_advance((it)->bvecs, &(it)->iter, (n)); \
+} while (0)
+
+/*
+ * Advance @it by @n bytes.
+ */
+#define ceph_bvec_iter_advance(it, n) \
+ __ceph_bvec_iter_advance_step(it, n, 0)
+
+/*
+ * Advance @it by @n bytes, executing BVEC_STEP for each bio_vec.
+ */
+#define ceph_bvec_iter_advance_step(it, n, BVEC_STEP) \
+ __ceph_bvec_iter_advance_step(it, n, ({ \
+ struct bio_vec bv; \
+ struct bvec_iter __cur_iter; \
+ \
+ __cur_iter = (it)->iter; \
+ __cur_iter.bi_size = (n); \
+ for_each_bvec(bv, (it)->bvecs, __cur_iter, __cur_iter) \
+ (void)(BVEC_STEP); \
+ }))
+
+#define ceph_bvec_iter_shorten(it, n) do { \
+ BUG_ON((n) > (it)->iter.bi_size); \
+ (it)->iter.bi_size = (n); \
+} while (0)
struct ceph_msg_data {
- struct list_head links; /* ceph_msg->data */
enum ceph_msg_data_type type;
union {
#ifdef CONFIG_BLOCK
struct {
- struct bio *bio;
- size_t bio_length;
+ struct ceph_bio_iter bio_pos;
+ u32 bio_length;
};
#endif /* CONFIG_BLOCK */
+ struct ceph_bvec_iter bvec_pos;
struct {
- struct page **pages; /* NOT OWNER. */
+ struct page **pages;
size_t length; /* total # bytes */
unsigned int alignment; /* first page */
+ bool own_pages;
};
struct ceph_pagelist *pagelist;
+ struct iov_iter iter;
};
};
struct ceph_msg_data_cursor {
size_t total_resid; /* across all data items */
- struct list_head *data_head; /* = &ceph_msg->data */
struct ceph_msg_data *data; /* current data item */
size_t resid; /* bytes not yet consumed */
- bool last_piece; /* current is last piece */
+ int sr_resid; /* residual sparse_read len */
bool need_crc; /* crc update needed */
union {
#ifdef CONFIG_BLOCK
- struct { /* bio */
- struct bio *bio; /* bio from list */
- struct bvec_iter bvec_iter;
- };
+ struct ceph_bio_iter bio_iter;
#endif /* CONFIG_BLOCK */
+ struct bvec_iter bvec_iter;
struct { /* pages */
unsigned int page_offset; /* offset in page */
unsigned short page_index; /* index in array */
@@ -135,6 +251,10 @@ struct ceph_msg_data_cursor {
struct page *page; /* page from list */
size_t offset; /* bytes from list */
};
+ struct {
+ struct iov_iter iov_iter;
+ unsigned int lastlen;
+ };
};
};
@@ -153,7 +273,9 @@ struct ceph_msg {
struct ceph_buffer *middle;
size_t data_length;
- struct list_head data;
+ struct ceph_msg_data *data;
+ int num_data_items;
+ int max_data_items;
struct ceph_msg_data_cursor cursor;
struct ceph_connection *con;
@@ -162,15 +284,183 @@ struct ceph_msg {
struct kref kref;
bool more_to_follow;
bool needs_out_seq;
+ u64 sparse_read_total;
int front_alloc_len;
- unsigned long ack_stamp; /* tx: when we were acked */
struct ceph_msgpool *pool;
};
+/*
+ * connection states
+ */
+#define CEPH_CON_S_CLOSED 1
+#define CEPH_CON_S_PREOPEN 2
+#define CEPH_CON_S_V1_BANNER 3
+#define CEPH_CON_S_V1_CONNECT_MSG 4
+#define CEPH_CON_S_V2_BANNER_PREFIX 5
+#define CEPH_CON_S_V2_BANNER_PAYLOAD 6
+#define CEPH_CON_S_V2_HELLO 7
+#define CEPH_CON_S_V2_AUTH 8
+#define CEPH_CON_S_V2_AUTH_SIGNATURE 9
+#define CEPH_CON_S_V2_SESSION_CONNECT 10
+#define CEPH_CON_S_V2_SESSION_RECONNECT 11
+#define CEPH_CON_S_OPEN 12
+#define CEPH_CON_S_STANDBY 13
+
+/*
+ * ceph_connection flag bits
+ */
+#define CEPH_CON_F_LOSSYTX 0 /* we can close channel or drop
+ messages on errors */
+#define CEPH_CON_F_KEEPALIVE_PENDING 1 /* we need to send a keepalive */
+#define CEPH_CON_F_WRITE_PENDING 2 /* we have data ready to send */
+#define CEPH_CON_F_SOCK_CLOSED 3 /* socket state changed to closed */
+#define CEPH_CON_F_BACKOFF 4 /* need to retry queuing delayed
+ work */
+
/* ceph connection fault delay defaults, for exponential backoff */
-#define BASE_DELAY_INTERVAL (HZ/2)
-#define MAX_DELAY_INTERVAL (5 * 60 * HZ)
+#define BASE_DELAY_INTERVAL (HZ / 4)
+#define MAX_DELAY_INTERVAL (15 * HZ)
+
+struct ceph_connection_v1_info {
+ struct kvec out_kvec[8], /* sending header/footer data */
+ *out_kvec_cur;
+ int out_kvec_left; /* kvec's left in out_kvec */
+ int out_skip; /* skip this many bytes */
+ int out_kvec_bytes; /* total bytes left */
+ bool out_more; /* there is more data after the kvecs */
+ bool out_msg_done;
+
+ struct ceph_auth_handshake *auth;
+ int auth_retry; /* true if we need a newer authorizer */
+
+ /* connection negotiation temps */
+ u8 in_banner[CEPH_BANNER_MAX_LEN];
+ struct ceph_entity_addr actual_peer_addr;
+ struct ceph_entity_addr peer_addr_for_me;
+ struct ceph_msg_connect out_connect;
+ struct ceph_msg_connect_reply in_reply;
+
+ int in_base_pos; /* bytes read */
+
+ /* sparse reads */
+ struct kvec in_sr_kvec; /* current location to receive into */
+ u64 in_sr_len; /* amount of data in this extent */
+
+ /* message in temps */
+ u8 in_tag; /* protocol control byte */
+ struct ceph_msg_header in_hdr;
+ __le64 in_temp_ack; /* for reading an ack */
+
+ /* message out temps */
+ struct ceph_msg_header out_hdr;
+ __le64 out_temp_ack; /* for writing an ack */
+ struct ceph_timespec out_temp_keepalive2; /* for writing keepalive2
+ stamp */
+
+ u32 connect_seq; /* identify the most recent connection
+ attempt for this session */
+ u32 peer_global_seq; /* peer's global seq for this connection */
+};
+
+#define CEPH_CRC_LEN 4
+#define CEPH_GCM_KEY_LEN 16
+#define CEPH_GCM_IV_LEN sizeof(struct ceph_gcm_nonce)
+#define CEPH_GCM_BLOCK_LEN 16
+#define CEPH_GCM_TAG_LEN 16
+
+#define CEPH_PREAMBLE_LEN 32
+#define CEPH_PREAMBLE_INLINE_LEN 48
+#define CEPH_PREAMBLE_PLAIN_LEN CEPH_PREAMBLE_LEN
+#define CEPH_PREAMBLE_SECURE_LEN (CEPH_PREAMBLE_LEN + \
+ CEPH_PREAMBLE_INLINE_LEN + \
+ CEPH_GCM_TAG_LEN)
+#define CEPH_EPILOGUE_PLAIN_LEN (1 + 3 * CEPH_CRC_LEN)
+#define CEPH_EPILOGUE_SECURE_LEN (CEPH_GCM_BLOCK_LEN + CEPH_GCM_TAG_LEN)
+
+#define CEPH_FRAME_MAX_SEGMENT_COUNT 4
+
+struct ceph_frame_desc {
+ int fd_tag; /* FRAME_TAG_* */
+ int fd_seg_cnt;
+ int fd_lens[CEPH_FRAME_MAX_SEGMENT_COUNT]; /* logical */
+ int fd_aligns[CEPH_FRAME_MAX_SEGMENT_COUNT];
+};
+
+struct ceph_gcm_nonce {
+ __le32 fixed;
+ __le64 counter __packed;
+};
+
+struct ceph_connection_v2_info {
+ struct iov_iter in_iter;
+ struct kvec in_kvecs[5]; /* recvmsg */
+ struct bio_vec in_bvec; /* recvmsg (in_cursor) */
+ int in_kvec_cnt;
+ int in_state; /* IN_S_* */
+
+ struct iov_iter out_iter;
+ struct kvec out_kvecs[8]; /* sendmsg */
+ struct bio_vec out_bvec; /* sendpage (out_cursor, out_zero),
+ sendmsg (out_enc_pages) */
+ int out_kvec_cnt;
+ int out_state; /* OUT_S_* */
+
+ int out_zero; /* # of zero bytes to send */
+ bool out_iter_sendpage; /* use sendpage if possible */
+
+ struct ceph_frame_desc in_desc;
+ struct ceph_msg_data_cursor in_cursor;
+ struct ceph_msg_data_cursor out_cursor;
+
+ struct hmac_sha256_key hmac_key; /* post-auth signature */
+ bool hmac_key_set;
+ struct crypto_aead *gcm_tfm; /* on-wire encryption */
+ struct aead_request *gcm_req;
+ struct crypto_wait gcm_wait;
+ struct ceph_gcm_nonce in_gcm_nonce;
+ struct ceph_gcm_nonce out_gcm_nonce;
+
+ struct page **in_enc_pages;
+ int in_enc_page_cnt;
+ int in_enc_resid;
+ int in_enc_i;
+ struct page **out_enc_pages;
+ int out_enc_page_cnt;
+ int out_enc_resid;
+ int out_enc_i;
+
+ int con_mode; /* CEPH_CON_MODE_* */
+
+ void *conn_bufs[16];
+ int conn_buf_cnt;
+ int data_len_remain;
+
+ struct kvec in_sign_kvecs[8];
+ struct kvec out_sign_kvecs[8];
+ int in_sign_kvec_cnt;
+ int out_sign_kvec_cnt;
+
+ u64 client_cookie;
+ u64 server_cookie;
+ u64 global_seq;
+ u64 connect_seq;
+ u64 peer_global_seq;
+
+ u8 in_buf[CEPH_PREAMBLE_SECURE_LEN];
+ u8 out_buf[CEPH_PREAMBLE_SECURE_LEN];
+ struct {
+ u8 late_status; /* FRAME_LATE_STATUS_* */
+ union {
+ struct {
+ u32 front_crc;
+ u32 middle_crc;
+ u32 data_crc;
+ } __packed;
+ u8 pad[CEPH_GCM_BLOCK_LEN - 1];
+ };
+ } out_epil;
+};
/*
* A single connection with another host.
@@ -186,25 +476,16 @@ struct ceph_connection {
struct ceph_messenger *msgr;
+ int state; /* CEPH_CON_S_* */
atomic_t sock_state;
struct socket *sock;
- struct ceph_entity_addr peer_addr; /* peer address */
- struct ceph_entity_addr peer_addr_for_me;
- unsigned long flags;
- unsigned long state;
+ unsigned long flags; /* CEPH_CON_F_* */
const char *error_msg; /* error message, if any */
struct ceph_entity_name peer_name; /* peer name */
-
+ struct ceph_entity_addr peer_addr; /* peer address */
u64 peer_features;
- u32 connect_seq; /* identify the most recent connection
- attempt for this connection, client */
- u32 peer_global_seq; /* peer's global seq for this connection */
-
- int auth_retry; /* true if we need a newer authorizer */
- void *auth_reply_buf; /* where to put the authorizer reply */
- int auth_reply_buf_len;
struct mutex mutex;
@@ -215,49 +496,86 @@ struct ceph_connection {
u64 in_seq, in_seq_acked; /* last message received, acked */
- /* connection negotiation temps */
- char in_banner[CEPH_BANNER_MAX_LEN];
- struct ceph_msg_connect out_connect;
- struct ceph_msg_connect_reply in_reply;
- struct ceph_entity_addr actual_peer_addr;
-
- /* message out temps */
- struct ceph_msg_header out_hdr;
+ struct ceph_msg *in_msg;
struct ceph_msg *out_msg; /* sending message (== tail of
out_sent) */
- bool out_msg_done;
- struct kvec out_kvec[8], /* sending header/footer data */
- *out_kvec_cur;
- int out_kvec_left; /* kvec's left in out_kvec */
- int out_skip; /* skip this many bytes */
- int out_kvec_bytes; /* total bytes left */
- int out_more; /* there is more data after the kvecs */
- __le64 out_temp_ack; /* for writing an ack */
- struct ceph_timespec out_temp_keepalive2; /* for writing keepalive2
- stamp */
-
- /* message in temps */
- struct ceph_msg_header in_hdr;
- struct ceph_msg *in_msg;
+ struct page *bounce_page;
u32 in_front_crc, in_middle_crc, in_data_crc; /* calculated crc */
- char in_tag; /* protocol control byte */
- int in_base_pos; /* bytes read */
- __le64 in_temp_ack; /* for reading an ack */
-
- struct timespec last_keepalive_ack; /* keepalive2 ack stamp */
+ struct timespec64 last_keepalive_ack; /* keepalive2 ack stamp */
struct delayed_work work; /* send|recv work */
unsigned long delay; /* current delay interval */
+
+ union {
+ struct ceph_connection_v1_info v1;
+ struct ceph_connection_v2_info v2;
+ };
};
+extern struct page *ceph_zero_page;
+
+void ceph_con_flag_clear(struct ceph_connection *con, unsigned long con_flag);
+void ceph_con_flag_set(struct ceph_connection *con, unsigned long con_flag);
+bool ceph_con_flag_test(struct ceph_connection *con, unsigned long con_flag);
+bool ceph_con_flag_test_and_clear(struct ceph_connection *con,
+ unsigned long con_flag);
+bool ceph_con_flag_test_and_set(struct ceph_connection *con,
+ unsigned long con_flag);
+
+void ceph_encode_my_addr(struct ceph_messenger *msgr);
+
+int ceph_tcp_connect(struct ceph_connection *con);
+int ceph_con_close_socket(struct ceph_connection *con);
+void ceph_con_reset_session(struct ceph_connection *con);
+
+u32 ceph_get_global_seq(struct ceph_messenger *msgr, u32 gt);
+void ceph_con_discard_sent(struct ceph_connection *con, u64 ack_seq);
+void ceph_con_discard_requeued(struct ceph_connection *con, u64 reconnect_seq);
+
+void ceph_msg_data_cursor_init(struct ceph_msg_data_cursor *cursor,
+ struct ceph_msg *msg, size_t length);
+struct page *ceph_msg_data_next(struct ceph_msg_data_cursor *cursor,
+ size_t *page_offset, size_t *length);
+void ceph_msg_data_advance(struct ceph_msg_data_cursor *cursor, size_t bytes);
+
+u32 ceph_crc32c_page(u32 crc, struct page *page, unsigned int page_offset,
+ unsigned int length);
+
+bool ceph_addr_is_blank(const struct ceph_entity_addr *addr);
+int ceph_addr_port(const struct ceph_entity_addr *addr);
+void ceph_addr_set_port(struct ceph_entity_addr *addr, int p);
+
+void ceph_con_process_message(struct ceph_connection *con);
+int ceph_con_in_msg_alloc(struct ceph_connection *con,
+ struct ceph_msg_header *hdr, int *skip);
+struct ceph_msg *ceph_con_get_out_msg(struct ceph_connection *con);
+
+/* messenger_v1.c */
+int ceph_con_v1_try_read(struct ceph_connection *con);
+int ceph_con_v1_try_write(struct ceph_connection *con);
+void ceph_con_v1_revoke(struct ceph_connection *con, struct ceph_msg *msg);
+void ceph_con_v1_revoke_incoming(struct ceph_connection *con);
+bool ceph_con_v1_opened(struct ceph_connection *con);
+void ceph_con_v1_reset_session(struct ceph_connection *con);
+void ceph_con_v1_reset_protocol(struct ceph_connection *con);
+
+/* messenger_v2.c */
+int ceph_con_v2_try_read(struct ceph_connection *con);
+int ceph_con_v2_try_write(struct ceph_connection *con);
+void ceph_con_v2_revoke(struct ceph_connection *con, struct ceph_msg *msg);
+void ceph_con_v2_revoke_incoming(struct ceph_connection *con);
+bool ceph_con_v2_opened(struct ceph_connection *con);
+void ceph_con_v2_reset_session(struct ceph_connection *con);
+void ceph_con_v2_reset_protocol(struct ceph_connection *con);
+
+
+extern const char *ceph_pr_addr(const struct ceph_entity_addr *addr);
-extern const char *ceph_pr_addr(const struct sockaddr_storage *ss);
extern int ceph_parse_ips(const char *c, const char *end,
struct ceph_entity_addr *addr,
- int max_count, int *count);
-
+ int max_count, int *count, char delim);
extern int ceph_msgr_init(void);
extern void ceph_msgr_exit(void);
@@ -266,6 +584,7 @@ extern void ceph_msgr_flush(void);
extern void ceph_messenger_init(struct ceph_messenger *msgr,
struct ceph_entity_addr *myaddr);
extern void ceph_messenger_fini(struct ceph_messenger *msgr);
+extern void ceph_messenger_reset_nonce(struct ceph_messenger *msgr);
extern void ceph_con_init(struct ceph_connection *con, void *private,
const struct ceph_connection_operations *ops,
@@ -284,15 +603,21 @@ extern void ceph_con_keepalive(struct ceph_connection *con);
extern bool ceph_con_keepalive_expired(struct ceph_connection *con,
unsigned long interval);
-extern void ceph_msg_data_add_pages(struct ceph_msg *msg, struct page **pages,
- size_t length, size_t alignment);
+void ceph_msg_data_add_pages(struct ceph_msg *msg, struct page **pages,
+ size_t length, size_t alignment, bool own_pages);
extern void ceph_msg_data_add_pagelist(struct ceph_msg *msg,
struct ceph_pagelist *pagelist);
#ifdef CONFIG_BLOCK
-extern void ceph_msg_data_add_bio(struct ceph_msg *msg, struct bio *bio,
- size_t length);
+void ceph_msg_data_add_bio(struct ceph_msg *msg, struct ceph_bio_iter *bio_pos,
+ u32 length);
#endif /* CONFIG_BLOCK */
+void ceph_msg_data_add_bvecs(struct ceph_msg *msg,
+ struct ceph_bvec_iter *bvec_pos);
+void ceph_msg_data_add_iter(struct ceph_msg *msg,
+ struct iov_iter *iter);
+struct ceph_msg *ceph_msg_new2(int type, int front_len, int max_data_items,
+ gfp_t flags, bool can_fail);
extern struct ceph_msg *ceph_msg_new(int type, int front_len, gfp_t flags,
bool can_fail);
diff --git a/include/linux/ceph/mon_client.h b/include/linux/ceph/mon_client.h
index d5a3ecea578d..7a9a40163c0f 100644
--- a/include/linux/ceph/mon_client.h
+++ b/include/linux/ceph/mon_client.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _FS_CEPH_MON_CLIENT_H
#define _FS_CEPH_MON_CLIENT_H
@@ -18,7 +19,7 @@ struct ceph_monmap {
struct ceph_fsid fsid;
u32 epoch;
u32 num_mon;
- struct ceph_entity_inst mon_inst[0];
+ struct ceph_entity_inst mon_inst[] __counted_by(num_mon);
};
struct ceph_mon_client;
@@ -103,12 +104,12 @@ struct ceph_mon_client {
#endif
};
-extern struct ceph_monmap *ceph_monmap_decode(void *p, void *end);
extern int ceph_monmap_contains(struct ceph_monmap *m,
struct ceph_entity_addr *addr);
extern int ceph_monc_init(struct ceph_mon_client *monc, struct ceph_client *cl);
extern void ceph_monc_stop(struct ceph_mon_client *monc);
+extern void ceph_monc_reopen_session(struct ceph_mon_client *monc);
enum {
CEPH_SUB_MONMAP = 0,
@@ -133,15 +134,15 @@ void ceph_monc_renew_subs(struct ceph_mon_client *monc);
extern int ceph_monc_wait_osdmap(struct ceph_mon_client *monc, u32 epoch,
unsigned long timeout);
-extern int ceph_monc_do_statfs(struct ceph_mon_client *monc,
- struct ceph_statfs *buf);
+int ceph_monc_do_statfs(struct ceph_mon_client *monc, u64 data_pool,
+ struct ceph_statfs *buf);
int ceph_monc_get_version(struct ceph_mon_client *monc, const char *what,
u64 *newest);
int ceph_monc_get_version_async(struct ceph_mon_client *monc, const char *what,
ceph_monc_callback_t cb, u64 private_data);
-int ceph_monc_blacklist_add(struct ceph_mon_client *monc,
+int ceph_monc_blocklist_add(struct ceph_mon_client *monc,
struct ceph_entity_addr *client_addr);
extern int ceph_monc_open_session(struct ceph_mon_client *monc);
diff --git a/include/linux/ceph/msgpool.h b/include/linux/ceph/msgpool.h
index ddd0d48d0384..729cdf700eae 100644
--- a/include/linux/ceph/msgpool.h
+++ b/include/linux/ceph/msgpool.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _FS_CEPH_MSGPOOL
#define _FS_CEPH_MSGPOOL
@@ -12,14 +13,15 @@ struct ceph_msgpool {
mempool_t *pool;
int type; /* preallocated message type */
int front_len; /* preallocated payload size */
+ int max_data_items;
};
-extern int ceph_msgpool_init(struct ceph_msgpool *pool, int type,
- int front_len, int size, bool blocking,
- const char *name);
+int ceph_msgpool_init(struct ceph_msgpool *pool, int type,
+ int front_len, int max_data_items, int size,
+ const char *name);
extern void ceph_msgpool_destroy(struct ceph_msgpool *pool);
-extern struct ceph_msg *ceph_msgpool_get(struct ceph_msgpool *,
- int front_len);
+struct ceph_msg *ceph_msgpool_get(struct ceph_msgpool *pool, int front_len,
+ int max_data_items);
extern void ceph_msgpool_put(struct ceph_msgpool *, struct ceph_msg *);
#endif
diff --git a/include/linux/ceph/msgr.h b/include/linux/ceph/msgr.h
index 0fe2656ac415..3989dcb94d3d 100644
--- a/include/linux/ceph/msgr.h
+++ b/include/linux/ceph/msgr.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef CEPH_MSGR_H
#define CEPH_MSGR_H
@@ -8,24 +9,45 @@
#define CEPH_MON_PORT 6789 /* default monitor port */
/*
- * client-side processes will try to bind to ports in this
- * range, simply for the benefit of tools like nmap or wireshark
- * that would like to identify the protocol.
- */
-#define CEPH_PORT_FIRST 6789
-#define CEPH_PORT_START 6800 /* non-monitors start here */
-#define CEPH_PORT_LAST 6900
-
-/*
* tcp connection banner. include a protocol version. and adjust
* whenever the wire protocol changes. try to keep this string length
* constant.
*/
#define CEPH_BANNER "ceph v027"
+#define CEPH_BANNER_LEN 9
#define CEPH_BANNER_MAX_LEN 30
/*
+ * messenger V2 connection banner prefix.
+ * The full banner string should have the form: "ceph v2\n<le16>"
+ * the 2 bytes are the length of the remaining banner.
+ */
+#define CEPH_BANNER_V2 "ceph v2\n"
+#define CEPH_BANNER_V2_LEN 8
+#define CEPH_BANNER_V2_PREFIX_LEN (CEPH_BANNER_V2_LEN + sizeof(__le16))
+
+/*
+ * messenger V2 features
+ */
+#define CEPH_MSGR2_INCARNATION_1 (0ull)
+
+#define DEFINE_MSGR2_FEATURE(bit, incarnation, name) \
+ static const uint64_t __maybe_unused CEPH_MSGR2_FEATURE_##name = (1ULL << bit); \
+ static const uint64_t __maybe_unused CEPH_MSGR2_FEATUREMASK_##name = \
+ (1ULL << bit | CEPH_MSGR2_INCARNATION_##incarnation);
+
+#define HAVE_MSGR2_FEATURE(x, name) \
+ (((x) & (CEPH_MSGR2_FEATUREMASK_##name)) == (CEPH_MSGR2_FEATUREMASK_##name))
+
+DEFINE_MSGR2_FEATURE( 0, 1, REVISION_1) // msgr2.1
+
+#define CEPH_MSGR2_SUPPORTED_FEATURES (CEPH_MSGR2_FEATURE_REVISION_1)
+
+#define CEPH_MSGR2_REQUIRED_FEATURES (CEPH_MSGR2_FEATURE_REVISION_1)
+
+
+/*
* Rollover-safe type and comparator for 32-bit sequence numbers.
* Comparator returns -1, 0, or 1.
*/
@@ -60,11 +82,18 @@ extern const char *ceph_entity_type_name(int type);
* entity_addr -- network address
*/
struct ceph_entity_addr {
- __le32 type;
+ __le32 type; /* CEPH_ENTITY_ADDR_TYPE_* */
__le32 nonce; /* unique id for process (e.g. pid) */
struct sockaddr_storage in_addr;
} __attribute__ ((packed));
+static inline bool ceph_addr_equal_no_type(const struct ceph_entity_addr *lhs,
+ const struct ceph_entity_addr *rhs)
+{
+ return !memcmp(&lhs->in_addr, &rhs->in_addr, sizeof(lhs->in_addr)) &&
+ lhs->nonce == rhs->nonce;
+}
+
struct ceph_entity_inst {
struct ceph_entity_name name;
struct ceph_entity_addr addr;
@@ -90,7 +119,7 @@ struct ceph_entity_inst {
#define CEPH_MSGR_TAG_SEQ 13 /* 64-bit int follows with seen seq number */
#define CEPH_MSGR_TAG_KEEPALIVE2 14 /* keepalive2 byte + ceph_timespec */
#define CEPH_MSGR_TAG_KEEPALIVE2_ACK 15 /* keepalive2 reply */
-
+#define CEPH_MSGR_TAG_CHALLENGE_AUTHORIZER 16 /* cephx v2 doing server challenge */
/*
* connection negotiation
@@ -159,6 +188,24 @@ struct ceph_msg_header {
__le32 crc; /* header crc32c */
} __attribute__ ((packed));
+struct ceph_msg_header2 {
+ __le64 seq; /* message seq# for this session */
+ __le64 tid; /* transaction id */
+ __le16 type; /* message type */
+ __le16 priority; /* priority. higher value == higher priority */
+ __le16 version; /* version of message encoding */
+
+ __le32 data_pre_padding_len;
+ __le16 data_off; /* sender: include full offset;
+ receiver: mask against ~PAGE_MASK */
+
+ __le64 ack_seq;
+ __u8 flags;
+ /* oldest code we think can decode this. unknown if zero. */
+ __le16 compat_version;
+ __le16 reserved;
+} __attribute__ ((packed));
+
#define CEPH_MSG_PRIO_LOW 64
#define CEPH_MSG_PRIO_DEFAULT 127
#define CEPH_MSG_PRIO_HIGH 196
diff --git a/include/linux/ceph/osd_client.h b/include/linux/ceph/osd_client.h
index adf670ecaf94..50b14a5661c7 100644
--- a/include/linux/ceph/osd_client.h
+++ b/include/linux/ceph/osd_client.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _FS_CEPH_OSD_CLIENT_H
#define _FS_CEPH_OSD_CLIENT_H
@@ -7,6 +8,7 @@
#include <linux/mempool.h>
#include <linux/rbtree.h>
#include <linux/refcount.h>
+#include <linux/ktime.h>
#include <linux/ceph/types.h>
#include <linux/ceph/osdmap.h>
@@ -27,14 +29,63 @@ typedef void (*ceph_osdc_callback_t)(struct ceph_osd_request *);
#define CEPH_HOMELESS_OSD -1
-/* a given osd we're communicating with */
+/*
+ * A single extent in a SPARSE_READ reply.
+ *
+ * Note that these come from the OSD as little-endian values. On BE arches,
+ * we convert them in-place after receipt.
+ */
+struct ceph_sparse_extent {
+ u64 off;
+ u64 len;
+} __packed;
+
+/* Sparse read state machine state values */
+enum ceph_sparse_read_state {
+ CEPH_SPARSE_READ_HDR = 0,
+ CEPH_SPARSE_READ_EXTENTS,
+ CEPH_SPARSE_READ_DATA_LEN,
+ CEPH_SPARSE_READ_DATA_PRE,
+ CEPH_SPARSE_READ_DATA,
+};
+
+/*
+ * A SPARSE_READ reply is a 32-bit count of extents, followed by an array of
+ * 64-bit offset/length pairs, and then all of the actual file data
+ * concatenated after it (sans holes).
+ *
+ * Unfortunately, we don't know how long the extent array is until we've
+ * started reading the data section of the reply. The caller should send down
+ * a destination buffer for the array, but we'll alloc one if it's too small
+ * or if the caller doesn't.
+ */
+struct ceph_sparse_read {
+ enum ceph_sparse_read_state sr_state; /* state machine state */
+ u64 sr_req_off; /* orig request offset */
+ u64 sr_req_len; /* orig request length */
+ u64 sr_pos; /* current pos in buffer */
+ int sr_index; /* current extent index */
+ u32 sr_datalen; /* length of actual data */
+ u32 sr_count; /* extent count in reply */
+ int sr_ext_len; /* length of extent array */
+ struct ceph_sparse_extent *sr_extent; /* extent array */
+};
+
+/*
+ * A given osd we're communicating with.
+ *
+ * Note that the o_requests tree can be searched while holding the "lock" mutex
+ * or the "o_requests_lock" spinlock. Insertion or removal requires both!
+ */
struct ceph_osd {
refcount_t o_ref;
+ int o_sparse_op_idx;
struct ceph_osd_client *o_osdc;
int o_osd;
int o_incarnation;
struct rb_node o_node;
struct ceph_connection o_con;
+ spinlock_t o_requests_lock;
struct rb_root o_requests;
struct rb_root o_linger_requests;
struct rb_root o_backoff_mappings;
@@ -44,6 +95,7 @@ struct ceph_osd {
unsigned long lru_ttl;
struct list_head o_keepalive_item;
struct mutex lock;
+ struct ceph_sparse_read o_sparse_read;
};
#define CEPH_OSD_SLAB_OPS 2
@@ -56,6 +108,8 @@ enum ceph_osd_data_type {
#ifdef CONFIG_BLOCK
CEPH_OSD_DATA_TYPE_BIO,
#endif /* CONFIG_BLOCK */
+ CEPH_OSD_DATA_TYPE_BVECS,
+ CEPH_OSD_DATA_TYPE_ITER,
};
struct ceph_osd_data {
@@ -71,10 +125,15 @@ struct ceph_osd_data {
struct ceph_pagelist *pagelist;
#ifdef CONFIG_BLOCK
struct {
- struct bio *bio; /* list of bios */
- size_t bio_length; /* total in list */
+ struct ceph_bio_iter bio_pos;
+ u32 bio_length;
};
#endif /* CONFIG_BLOCK */
+ struct {
+ struct ceph_bvec_iter bvec_pos;
+ u32 num_bvecs;
+ };
+ struct iov_iter iter;
};
};
@@ -91,6 +150,8 @@ struct ceph_osd_req_op {
u64 offset, length;
u64 truncate_size;
u32 truncate_seq;
+ int sparse_ext_cnt;
+ struct ceph_sparse_extent *sparse_ext;
struct ceph_osd_data osd_data;
} extent;
struct {
@@ -129,7 +190,18 @@ struct ceph_osd_req_op {
struct {
u64 expected_object_size;
u64 expected_write_size;
+ u32 flags; /* CEPH_OSD_OP_ALLOC_HINT_FLAG_* */
} alloc_hint;
+ struct {
+ u64 snapid;
+ u64 src_version;
+ u8 flags;
+ u32 src_fadvise_flags;
+ struct ceph_osd_data osd_data;
+ } copy_from;
+ struct {
+ u64 ver;
+ } assert_ver;
};
};
@@ -151,6 +223,7 @@ struct ceph_osd_request_target {
bool recovery_deletes;
unsigned int flags; /* CEPH_OSD_FLAG_* */
+ bool used_replica;
bool paused;
u32 epoch;
@@ -164,6 +237,7 @@ struct ceph_osd_request {
u64 r_tid; /* unique for this client */
struct rb_node r_node;
struct rb_node r_mc_node; /* map check */
+ struct work_struct r_complete_work;
struct ceph_osd *r_osd;
struct ceph_osd_request_target r_t;
@@ -182,28 +256,30 @@ struct ceph_osd_request {
struct ceph_osd_client *r_osdc;
struct kref r_kref;
bool r_mempool;
+ bool r_linger; /* don't resend on failure */
struct completion r_completion; /* private to osd_client.c */
ceph_osdc_callback_t r_callback;
- struct list_head r_unsafe_item;
struct inode *r_inode; /* for use by callbacks */
+ struct list_head r_private_item; /* ditto */
void *r_priv; /* ditto */
/* set by submitter */
u64 r_snapid; /* for reads, CEPH_NOSNAP o/w */
struct ceph_snap_context *r_snapc; /* for writes */
- struct timespec r_mtime; /* ditto */
+ struct timespec64 r_mtime; /* ditto */
u64 r_data_offset; /* ditto */
- bool r_linger; /* don't resend on failure */
- bool r_abort_on_full; /* return ENOSPC when full */
/* internal */
+ u64 r_version; /* data version sent in reply */
unsigned long r_stamp; /* jiffies, send or check time */
unsigned long r_start_stamp; /* jiffies */
+ ktime_t r_start_latency; /* ktime_t */
+ ktime_t r_end_latency; /* ktime_t */
int r_attempts;
u32 r_map_dne_bound;
- struct ceph_osd_req_op r_ops[];
+ struct ceph_osd_req_op r_ops[] __counted_by(r_num_ops);
};
struct ceph_request_redirect {
@@ -247,7 +323,7 @@ struct ceph_osd_linger_request {
struct ceph_osd_request_target t;
u32 map_dne_bound;
- struct timespec mtime;
+ struct timespec64 mtime;
struct kref kref;
struct mutex lock;
@@ -269,6 +345,9 @@ struct ceph_osd_linger_request {
rados_watcherrcb_t errcb;
void *data;
+ struct ceph_pagelist *request_pl;
+ struct page **notify_id_pages;
+
struct page ***preply_pages;
size_t *preply_len;
};
@@ -341,6 +420,7 @@ struct ceph_osd_client {
struct rb_root linger_map_checks;
atomic_t num_requests;
atomic_t num_homeless;
+ int abort_err;
struct delayed_work timeout_work;
struct delayed_work osds_timeout_work;
#ifdef CONFIG_DEBUG_FS
@@ -353,6 +433,7 @@ struct ceph_osd_client {
struct ceph_msgpool msgpool_op_reply;
struct workqueue_struct *notify_wq;
+ struct workqueue_struct *completion_wq;
};
static inline bool ceph_osdmap_flag(struct ceph_osd_client *osdc, int flag)
@@ -366,14 +447,23 @@ extern void ceph_osdc_cleanup(void);
extern int ceph_osdc_init(struct ceph_osd_client *osdc,
struct ceph_client *client);
extern void ceph_osdc_stop(struct ceph_osd_client *osdc);
+extern void ceph_osdc_reopen_osds(struct ceph_osd_client *osdc);
-extern void ceph_osdc_handle_reply(struct ceph_osd_client *osdc,
- struct ceph_msg *msg);
extern void ceph_osdc_handle_map(struct ceph_osd_client *osdc,
struct ceph_msg *msg);
void ceph_osdc_update_epoch_barrier(struct ceph_osd_client *osdc, u32 eb);
-
-extern void osd_req_op_init(struct ceph_osd_request *osd_req,
+void ceph_osdc_abort_requests(struct ceph_osd_client *osdc, int err);
+void ceph_osdc_clear_abort_err(struct ceph_osd_client *osdc);
+
+#define osd_req_op_data(oreq, whch, typ, fld) \
+({ \
+ struct ceph_osd_request *__oreq = (oreq); \
+ unsigned int __whch = (whch); \
+ BUG_ON(__whch >= __oreq->r_num_ops); \
+ &__oreq->r_ops[__whch].typ.fld; \
+})
+
+struct ceph_osd_req_op *osd_req_op_init(struct ceph_osd_request *osd_req,
unsigned int which, u16 opcode, u32 flags);
extern void osd_req_op_raw_data_in_pages(struct ceph_osd_request *,
@@ -400,38 +490,54 @@ extern void osd_req_op_extent_osd_data_pages(struct ceph_osd_request *,
struct page **pages, u64 length,
u32 alignment, bool pages_from_pool,
bool own_pages);
-extern void osd_req_op_extent_osd_data_pagelist(struct ceph_osd_request *,
- unsigned int which,
- struct ceph_pagelist *pagelist);
#ifdef CONFIG_BLOCK
-extern void osd_req_op_extent_osd_data_bio(struct ceph_osd_request *,
- unsigned int which,
- struct bio *bio, size_t bio_length);
+void osd_req_op_extent_osd_data_bio(struct ceph_osd_request *osd_req,
+ unsigned int which,
+ struct ceph_bio_iter *bio_pos,
+ u32 bio_length);
#endif /* CONFIG_BLOCK */
+void osd_req_op_extent_osd_data_bvecs(struct ceph_osd_request *osd_req,
+ unsigned int which,
+ struct bio_vec *bvecs, u32 num_bvecs,
+ u32 bytes);
+void osd_req_op_extent_osd_data_bvec_pos(struct ceph_osd_request *osd_req,
+ unsigned int which,
+ struct ceph_bvec_iter *bvec_pos);
+void osd_req_op_extent_osd_iter(struct ceph_osd_request *osd_req,
+ unsigned int which, struct iov_iter *iter);
-extern void osd_req_op_cls_request_data_pagelist(struct ceph_osd_request *,
- unsigned int which,
- struct ceph_pagelist *pagelist);
extern void osd_req_op_cls_request_data_pages(struct ceph_osd_request *,
unsigned int which,
struct page **pages, u64 length,
u32 alignment, bool pages_from_pool,
bool own_pages);
+void osd_req_op_cls_request_data_bvecs(struct ceph_osd_request *osd_req,
+ unsigned int which,
+ struct bio_vec *bvecs, u32 num_bvecs,
+ u32 bytes);
extern void osd_req_op_cls_response_data_pages(struct ceph_osd_request *,
unsigned int which,
struct page **pages, u64 length,
u32 alignment, bool pages_from_pool,
bool own_pages);
-extern void osd_req_op_cls_init(struct ceph_osd_request *osd_req,
- unsigned int which, u16 opcode,
- const char *class, const char *method);
+int osd_req_op_cls_init(struct ceph_osd_request *osd_req, unsigned int which,
+ const char *class, const char *method);
extern int osd_req_op_xattr_init(struct ceph_osd_request *osd_req, unsigned int which,
u16 opcode, const char *name, const void *value,
size_t size, u8 cmp_op, u8 cmp_mode);
extern void osd_req_op_alloc_hint_init(struct ceph_osd_request *osd_req,
unsigned int which,
u64 expected_object_size,
- u64 expected_write_size);
+ u64 expected_write_size,
+ u32 flags);
+extern int osd_req_op_copy_from_init(struct ceph_osd_request *req,
+ u64 src_snapid, u64 src_version,
+ struct ceph_object_id *src_oid,
+ struct ceph_object_locator *src_oloc,
+ u32 src_fadvise_flags,
+ u32 dst_fadvise_flags,
+ u32 truncate_seq, u64 truncate_size,
+ u8 copy_from_flags);
extern struct ceph_osd_request *ceph_osdc_alloc_request(struct ceph_osd_client *osdc,
struct ceph_snap_context *snapc,
@@ -450,12 +556,28 @@ extern struct ceph_osd_request *ceph_osdc_new_request(struct ceph_osd_client *,
u32 truncate_seq, u64 truncate_size,
bool use_mempool);
+int __ceph_alloc_sparse_ext_map(struct ceph_osd_req_op *op, int cnt);
+
+/*
+ * How big an extent array should we preallocate for a sparse read? This is
+ * just a starting value. If we get more than this back from the OSD, the
+ * receiver will reallocate.
+ */
+#define CEPH_SPARSE_EXT_ARRAY_INITIAL 16
+
+static inline int ceph_alloc_sparse_ext_map(struct ceph_osd_req_op *op, int cnt)
+{
+ if (!cnt)
+ cnt = CEPH_SPARSE_EXT_ARRAY_INITIAL;
+
+ return __ceph_alloc_sparse_ext_map(op, cnt);
+}
+
extern void ceph_osdc_get_request(struct ceph_osd_request *req);
extern void ceph_osdc_put_request(struct ceph_osd_request *req);
-extern int ceph_osdc_start_request(struct ceph_osd_client *osdc,
- struct ceph_osd_request *req,
- bool nofail);
+void ceph_osdc_start_request(struct ceph_osd_client *osdc,
+ struct ceph_osd_request *req);
extern void ceph_osdc_cancel_request(struct ceph_osd_request *req);
extern int ceph_osdc_wait_request(struct ceph_osd_client *osdc,
struct ceph_osd_request *req);
@@ -470,24 +592,7 @@ int ceph_osdc_call(struct ceph_osd_client *osdc,
const char *class, const char *method,
unsigned int flags,
struct page *req_page, size_t req_len,
- struct page *resp_page, size_t *resp_len);
-
-extern int ceph_osdc_readpages(struct ceph_osd_client *osdc,
- struct ceph_vino vino,
- struct ceph_file_layout *layout,
- u64 off, u64 *plen,
- u32 truncate_seq, u64 truncate_size,
- struct page **pages, int nr_pages,
- int page_align);
-
-extern int ceph_osdc_writepages(struct ceph_osd_client *osdc,
- struct ceph_vino vino,
- struct ceph_file_layout *layout,
- struct ceph_snap_context *sc,
- u64 off, u64 len,
- u32 truncate_seq, u64 truncate_size,
- struct timespec *mtime,
- struct page **pages, int nr_pages);
+ struct page **resp_pages, size_t *resp_len);
/* watch/notify */
struct ceph_osd_linger_request *
@@ -506,21 +611,33 @@ int ceph_osdc_notify_ack(struct ceph_osd_client *osdc,
u64 notify_id,
u64 cookie,
void *payload,
- size_t payload_len);
+ u32 payload_len);
int ceph_osdc_notify(struct ceph_osd_client *osdc,
struct ceph_object_id *oid,
struct ceph_object_locator *oloc,
void *payload,
- size_t payload_len,
+ u32 payload_len,
u32 timeout,
struct page ***preply_pages,
size_t *preply_len);
-int ceph_osdc_watch_check(struct ceph_osd_client *osdc,
- struct ceph_osd_linger_request *lreq);
int ceph_osdc_list_watchers(struct ceph_osd_client *osdc,
struct ceph_object_id *oid,
struct ceph_object_locator *oloc,
struct ceph_watch_item **watchers,
u32 *num_watchers);
-#endif
+/* Find offset into the buffer of the end of the extent map */
+static inline u64 ceph_sparse_ext_map_end(struct ceph_osd_req_op *op)
+{
+ struct ceph_sparse_extent *ext;
+
+ /* No extents? No data */
+ if (op->extent.sparse_ext_cnt == 0)
+ return 0;
+
+ ext = &op->extent.sparse_ext[op->extent.sparse_ext_cnt - 1];
+
+ return ext->off + ext->len - op->extent.offset;
+}
+
+#endif
diff --git a/include/linux/ceph/osdmap.h b/include/linux/ceph/osdmap.h
index af3444a5bfdd..5553019c3f07 100644
--- a/include/linux/ceph/osdmap.h
+++ b/include/linux/ceph/osdmap.h
@@ -1,10 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _FS_CEPH_OSDMAP_H
#define _FS_CEPH_OSDMAP_H
#include <linux/rbtree.h>
#include <linux/ceph/types.h>
#include <linux/ceph/decode.h>
-#include <linux/ceph/ceph_fs.h>
#include <linux/crush/crush.h>
/*
@@ -37,6 +37,9 @@ int ceph_spg_compare(const struct ceph_spg *lhs, const struct ceph_spg *rhs);
#define CEPH_POOL_FLAG_HASHPSPOOL (1ULL << 0) /* hash pg seed and pool id
together */
#define CEPH_POOL_FLAG_FULL (1ULL << 1) /* pool is full */
+#define CEPH_POOL_FLAG_FULL_QUOTA (1ULL << 10) /* pool ran out of quota,
+ will set FULL too */
+#define CEPH_POOL_FLAG_NEARFULL (1ULL << 11) /* pool is nearfull */
struct ceph_pg_pool_info {
struct rb_node node;
@@ -110,17 +113,16 @@ struct ceph_object_id {
int name_len;
};
+#define __CEPH_OID_INITIALIZER(oid) { .name = (oid).inline_name }
+
+#define CEPH_DEFINE_OID_ONSTACK(oid) \
+ struct ceph_object_id oid = __CEPH_OID_INITIALIZER(oid)
+
static inline void ceph_oid_init(struct ceph_object_id *oid)
{
- oid->name = oid->inline_name;
- oid->name_len = 0;
+ *oid = (struct ceph_object_id) __CEPH_OID_INITIALIZER(*oid);
}
-#define CEPH_OID_INIT_ONSTACK(oid) \
- ({ ceph_oid_init(&oid); oid; })
-#define CEPH_DEFINE_OID_ONSTACK(oid) \
- struct ceph_object_id oid = CEPH_OID_INIT_ONSTACK(oid)
-
static inline bool ceph_oid_empty(const struct ceph_object_id *oid)
{
return oid->name == oid->inline_name && !oid->name_len;
@@ -135,6 +137,17 @@ int ceph_oid_aprintf(struct ceph_object_id *oid, gfp_t gfp,
const char *fmt, ...);
void ceph_oid_destroy(struct ceph_object_id *oid);
+struct workspace_manager {
+ struct list_head idle_ws;
+ spinlock_t ws_lock;
+ /* Number of free workspaces */
+ int free_ws;
+ /* Total number of allocated workspaces */
+ atomic_t total_ws;
+ /* Waiters for a free workspace */
+ wait_queue_head_t ws_wait;
+};
+
struct ceph_pg_mapping {
struct rb_node node;
struct ceph_pg pgid;
@@ -182,8 +195,7 @@ struct ceph_osdmap {
* the list of osds that store+replicate them. */
struct crush_map *crush;
- struct mutex crush_workspace_mutex;
- void *crush_workspace;
+ struct workspace_manager crush_wsm;
};
static inline bool ceph_osd_exists(struct ceph_osdmap *map, int osd)
@@ -239,8 +251,8 @@ static inline int ceph_decode_pgid(void **p, void *end, struct ceph_pg *pgid)
}
struct ceph_osdmap *ceph_osdmap_alloc(void);
-extern struct ceph_osdmap *ceph_osdmap_decode(void **p, void *end);
-struct ceph_osdmap *osdmap_apply_incremental(void **p, void *end,
+struct ceph_osdmap *ceph_osdmap_decode(void **p, void *end, bool msgr2);
+struct ceph_osdmap *osdmap_apply_incremental(void **p, void *end, bool msgr2,
struct ceph_osdmap *map);
extern void ceph_osdmap_destroy(struct ceph_osdmap *map);
@@ -279,15 +291,10 @@ bool ceph_osds_changed(const struct ceph_osds *old_acting,
const struct ceph_osds *new_acting,
bool any_change);
-/* calculate mapping of a file extent to an object */
-extern int ceph_calc_file_object_mapping(struct ceph_file_layout *layout,
- u64 off, u64 len,
- u64 *bno, u64 *oxoff, u64 *oxlen);
-
-int __ceph_object_locator_to_pg(struct ceph_pg_pool_info *pi,
- const struct ceph_object_id *oid,
- const struct ceph_object_locator *oloc,
- struct ceph_pg *raw_pgid);
+void __ceph_object_locator_to_pg(struct ceph_pg_pool_info *pi,
+ const struct ceph_object_id *oid,
+ const struct ceph_object_locator *oloc,
+ struct ceph_pg *raw_pgid);
int ceph_object_locator_to_pg(struct ceph_osdmap *osdmap,
const struct ceph_object_id *oid,
const struct ceph_object_locator *oloc,
@@ -305,10 +312,28 @@ bool ceph_pg_to_primary_shard(struct ceph_osdmap *osdmap,
int ceph_pg_to_acting_primary(struct ceph_osdmap *osdmap,
const struct ceph_pg *raw_pgid);
+struct crush_loc {
+ char *cl_type_name;
+ char *cl_name;
+};
+
+struct crush_loc_node {
+ struct rb_node cl_node;
+ struct crush_loc cl_loc; /* pointers into cl_data */
+ char cl_data[];
+};
+
+int ceph_parse_crush_location(char *crush_location, struct rb_root *locs);
+int ceph_compare_crush_locs(struct rb_root *locs1, struct rb_root *locs2);
+void ceph_clear_crush_locs(struct rb_root *locs);
+
+int ceph_get_crush_locality(struct ceph_osdmap *osdmap, int id,
+ struct rb_root *locs);
+
extern struct ceph_pg_pool_info *ceph_pg_pool_by_id(struct ceph_osdmap *map,
u64 id);
-
extern const char *ceph_pg_pool_name_by_id(struct ceph_osdmap *map, u64 id);
extern int ceph_pg_poolid_by_name(struct ceph_osdmap *map, const char *name);
+u64 ceph_pg_pool_flags(struct ceph_osdmap *map, u64 id);
#endif
diff --git a/include/linux/ceph/pagelist.h b/include/linux/ceph/pagelist.h
index 75a7db21457d..879bec0863aa 100644
--- a/include/linux/ceph/pagelist.h
+++ b/include/linux/ceph/pagelist.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __FS_CEPH_PAGELIST_H
#define __FS_CEPH_PAGELIST_H
@@ -16,22 +17,7 @@ struct ceph_pagelist {
refcount_t refcnt;
};
-struct ceph_pagelist_cursor {
- struct ceph_pagelist *pl; /* pagelist, for error checking */
- struct list_head *page_lru; /* page in list */
- size_t room; /* room remaining to reset to */
-};
-
-static inline void ceph_pagelist_init(struct ceph_pagelist *pl)
-{
- INIT_LIST_HEAD(&pl->head);
- pl->mapped_tail = NULL;
- pl->length = 0;
- pl->room = 0;
- INIT_LIST_HEAD(&pl->free_list);
- pl->num_pages_free = 0;
- refcount_set(&pl->refcnt, 1);
-}
+struct ceph_pagelist *ceph_pagelist_alloc(gfp_t gfp_flags);
extern void ceph_pagelist_release(struct ceph_pagelist *pl);
@@ -41,12 +27,6 @@ extern int ceph_pagelist_reserve(struct ceph_pagelist *pl, size_t space);
extern int ceph_pagelist_free_reserve(struct ceph_pagelist *pl);
-extern void ceph_pagelist_set_cursor(struct ceph_pagelist *pl,
- struct ceph_pagelist_cursor *c);
-
-extern int ceph_pagelist_truncate(struct ceph_pagelist *pl,
- struct ceph_pagelist_cursor *c);
-
static inline int ceph_pagelist_encode_64(struct ceph_pagelist *pl, u64 v)
{
__le64 ev = cpu_to_le64(v);
@@ -67,7 +47,7 @@ static inline int ceph_pagelist_encode_8(struct ceph_pagelist *pl, u8 v)
return ceph_pagelist_append(pl, &v, 1);
}
static inline int ceph_pagelist_encode_string(struct ceph_pagelist *pl,
- char *s, size_t len)
+ char *s, u32 len)
{
int ret = ceph_pagelist_encode_32(pl, len);
if (ret)
diff --git a/include/linux/ceph/rados.h b/include/linux/ceph/rados.h
index b8281feda9c7..73c3efbec36c 100644
--- a/include/linux/ceph/rados.h
+++ b/include/linux/ceph/rados.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef CEPH_RADOS_H
#define CEPH_RADOS_H
@@ -142,8 +143,10 @@ extern const char *ceph_osd_state_name(int s);
/*
* osd map flag bits
*/
-#define CEPH_OSDMAP_NEARFULL (1<<0) /* sync writes (near ENOSPC) */
-#define CEPH_OSDMAP_FULL (1<<1) /* no data writes (ENOSPC) */
+#define CEPH_OSDMAP_NEARFULL (1<<0) /* sync writes (near ENOSPC),
+ not set since ~luminous */
+#define CEPH_OSDMAP_FULL (1<<1) /* no data writes (ENOSPC),
+ not set since ~luminous */
#define CEPH_OSDMAP_PAUSERD (1<<2) /* pause all reads */
#define CEPH_OSDMAP_PAUSEWR (1<<3) /* pause all writes */
#define CEPH_OSDMAP_PAUSEREC (1<<4) /* pause recovery */
@@ -230,7 +233,6 @@ extern const char *ceph_osd_state_name(int s);
\
/* fancy write */ \
f(APPEND, __CEPH_OSD_OP(WR, DATA, 6), "append") \
- f(STARTSYNC, __CEPH_OSD_OP(WR, DATA, 7), "startsync") \
f(SETTRUNC, __CEPH_OSD_OP(WR, DATA, 8), "settrunc") \
f(TRIMTRUNC, __CEPH_OSD_OP(WR, DATA, 9), "trimtrunc") \
\
@@ -256,6 +258,7 @@ extern const char *ceph_osd_state_name(int s);
\
/* tiering */ \
f(COPY_FROM, __CEPH_OSD_OP(WR, DATA, 26), "copy-from") \
+ f(COPY_FROM2, __CEPH_OSD_OP(WR, DATA, 45), "copy-from2") \
f(COPY_GET_CLASSIC, __CEPH_OSD_OP(RD, DATA, 27), "copy-get-classic") \
f(UNDIRTY, __CEPH_OSD_OP(WR, DATA, 28), "undirty") \
f(ISDIRTY, __CEPH_OSD_OP(RD, DATA, 29), "isdirty") \
@@ -410,10 +413,18 @@ enum {
enum {
CEPH_OSD_OP_FLAG_EXCL = 1, /* EXCL object create */
CEPH_OSD_OP_FLAG_FAILOK = 2, /* continue despite failure */
+ CEPH_OSD_OP_FLAG_FADVISE_RANDOM = 0x4, /* the op is random */
+ CEPH_OSD_OP_FLAG_FADVISE_SEQUENTIAL = 0x8, /* the op is sequential */
+ CEPH_OSD_OP_FLAG_FADVISE_WILLNEED = 0x10,/* data will be accessed in
+ the near future */
+ CEPH_OSD_OP_FLAG_FADVISE_DONTNEED = 0x20,/* data will not be accessed
+ in the near future */
+ CEPH_OSD_OP_FLAG_FADVISE_NOCACHE = 0x40,/* data will be accessed only
+ once by this client */
};
#define EOLDSNAPC ERESTART /* ORDERSNAP flag set; writer has old snapc*/
-#define EBLACKLISTED ESHUTDOWN /* blacklisted */
+#define EBLOCKLISTED ESHUTDOWN /* blocklisted */
/* xattr comparison */
enum {
@@ -432,6 +443,16 @@ enum {
};
enum {
+ CEPH_OSD_COPY_FROM_FLAG_FLUSH = 1, /* part of a flush operation */
+ CEPH_OSD_COPY_FROM_FLAG_IGNORE_OVERLAY = 2, /* ignore pool overlay */
+ CEPH_OSD_COPY_FROM_FLAG_IGNORE_CACHE = 4, /* ignore osd cache logic */
+ CEPH_OSD_COPY_FROM_FLAG_MAP_SNAP_CLONE = 8, /* map snap direct to
+ * cloneid */
+ CEPH_OSD_COPY_FROM_FLAG_RWORDERED = 16, /* order with write */
+ CEPH_OSD_COPY_FROM_FLAG_TRUNCATE_SEQ = 32, /* send truncate_{seq,size} */
+};
+
+enum {
CEPH_OSD_WATCH_OP_UNWATCH = 0,
CEPH_OSD_WATCH_OP_LEGACY_WATCH = 1,
/* note: use only ODD ids to prevent pre-giant code from
@@ -444,6 +465,19 @@ enum {
const char *ceph_osd_watch_op_name(int o);
enum {
+ CEPH_OSD_ALLOC_HINT_FLAG_SEQUENTIAL_WRITE = 1,
+ CEPH_OSD_ALLOC_HINT_FLAG_RANDOM_WRITE = 2,
+ CEPH_OSD_ALLOC_HINT_FLAG_SEQUENTIAL_READ = 4,
+ CEPH_OSD_ALLOC_HINT_FLAG_RANDOM_READ = 8,
+ CEPH_OSD_ALLOC_HINT_FLAG_APPEND_ONLY = 16,
+ CEPH_OSD_ALLOC_HINT_FLAG_IMMUTABLE = 32,
+ CEPH_OSD_ALLOC_HINT_FLAG_SHORTLIVED = 64,
+ CEPH_OSD_ALLOC_HINT_FLAG_LONGLIVED = 128,
+ CEPH_OSD_ALLOC_HINT_FLAG_COMPRESSIBLE = 256,
+ CEPH_OSD_ALLOC_HINT_FLAG_INCOMPRESSIBLE = 512,
+};
+
+enum {
CEPH_OSD_BACKOFF_OP_BLOCK = 1,
CEPH_OSD_BACKOFF_OP_ACK_BLOCK = 2,
CEPH_OSD_BACKOFF_OP_UNBLOCK = 3,
@@ -490,13 +524,29 @@ struct ceph_osd_op {
__le64 cookie;
} __attribute__ ((packed)) notify;
struct {
+ __le64 unused;
+ __le64 ver;
+ } __attribute__ ((packed)) assert_ver;
+ struct {
__le64 offset, length;
__le64 src_offset;
} __attribute__ ((packed)) clonerange;
struct {
__le64 expected_object_size;
__le64 expected_write_size;
+ __le32 flags; /* CEPH_OSD_OP_ALLOC_HINT_FLAG_* */
} __attribute__ ((packed)) alloc_hint;
+ struct {
+ __le64 snapid;
+ __le64 src_version;
+ __u8 flags; /* CEPH_OSD_COPY_FROM_FLAG_* */
+ /*
+ * CEPH_OSD_OP_FLAG_FADVISE_*: fadvise flags
+ * for src object, flags for dest object are in
+ * ceph_osd_op::flags.
+ */
+ __le32 src_fadvise_flags;
+ } __attribute__ ((packed)) copy_from;
};
__le32 payload_len;
} __attribute__ ((packed));
diff --git a/include/linux/ceph/string_table.h b/include/linux/ceph/string_table.h
index 1b02c96daf75..a4a9962d1e14 100644
--- a/include/linux/ceph/string_table.h
+++ b/include/linux/ceph/string_table.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _FS_CEPH_STRING_TABLE_H
#define _FS_CEPH_STRING_TABLE_H
diff --git a/include/linux/ceph/striper.h b/include/linux/ceph/striper.h
new file mode 100644
index 000000000000..3486636c0e6e
--- /dev/null
+++ b/include/linux/ceph/striper.h
@@ -0,0 +1,71 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_CEPH_STRIPER_H
+#define _LINUX_CEPH_STRIPER_H
+
+#include <linux/list.h>
+#include <linux/types.h>
+
+struct ceph_file_layout;
+
+void ceph_calc_file_object_mapping(struct ceph_file_layout *l,
+ u64 off, u64 len,
+ u64 *objno, u64 *objoff, u32 *xlen);
+
+struct ceph_object_extent {
+ struct list_head oe_item;
+ u64 oe_objno;
+ u64 oe_off;
+ u64 oe_len;
+};
+
+static inline void ceph_object_extent_init(struct ceph_object_extent *ex)
+{
+ INIT_LIST_HEAD(&ex->oe_item);
+}
+
+/*
+ * Called for each mapped stripe unit.
+ *
+ * @bytes: number of bytes mapped, i.e. the minimum of the full length
+ * requested (file extent length) or the remainder of the stripe
+ * unit within an object
+ */
+typedef void (*ceph_object_extent_fn_t)(struct ceph_object_extent *ex,
+ u32 bytes, void *arg);
+
+int ceph_file_to_extents(struct ceph_file_layout *l, u64 off, u64 len,
+ struct list_head *object_extents,
+ struct ceph_object_extent *alloc_fn(void *arg),
+ void *alloc_arg,
+ ceph_object_extent_fn_t action_fn,
+ void *action_arg);
+int ceph_iterate_extents(struct ceph_file_layout *l, u64 off, u64 len,
+ struct list_head *object_extents,
+ ceph_object_extent_fn_t action_fn,
+ void *action_arg);
+
+struct ceph_file_extent {
+ u64 fe_off;
+ u64 fe_len;
+};
+
+static inline u64 ceph_file_extents_bytes(struct ceph_file_extent *file_extents,
+ u32 num_file_extents)
+{
+ u64 bytes = 0;
+ u32 i;
+
+ for (i = 0; i < num_file_extents; i++)
+ bytes += file_extents[i].fe_len;
+
+ return bytes;
+}
+
+int ceph_extent_to_file(struct ceph_file_layout *l,
+ u64 objno, u64 objoff, u64 objlen,
+ struct ceph_file_extent **file_extents,
+ u32 *num_file_extents);
+
+u64 ceph_get_num_objects(struct ceph_file_layout *l, u64 size);
+
+#endif
diff --git a/include/linux/ceph/types.h b/include/linux/ceph/types.h
index d3ff1cf2d27e..bd3d532902d7 100644
--- a/include/linux/ceph/types.h
+++ b/include/linux/ceph/types.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _FS_CEPH_TYPES_H
#define _FS_CEPH_TYPES_H
@@ -23,6 +24,7 @@ struct ceph_vino {
/* context for the caps reservation mechanism */
struct ceph_cap_reservation {
int count;
+ int used;
};
diff --git a/include/linux/cfag12864b.h b/include/linux/cfag12864b.h
index b454dfce60d9..83e6613d12ae 100644
--- a/include/linux/cfag12864b.h
+++ b/include/linux/cfag12864b.h
@@ -1,25 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Filename: cfag12864b.h
* Version: 0.1.0
* Description: cfag12864b LCD driver header
- * License: GPLv2
*
- * Author: Copyright (C) Miguel Ojeda Sandonis
+ * Author: Copyright (C) Miguel Ojeda <ojeda@kernel.org>
* Date: 2006-10-12
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
*/
#ifndef _CFAG12864B_H_
@@ -42,13 +28,6 @@
extern unsigned char * cfag12864b_buffer;
/*
- * Get the refresh rate of the LCD
- *
- * Returns the refresh rate (hertz).
- */
-extern unsigned int cfag12864b_getrate(void);
-
-/*
* Enable refreshing
*
* Returns 0 if successful (anyone was using it),
@@ -64,16 +43,6 @@ extern unsigned char cfag12864b_enable(void);
extern void cfag12864b_disable(void);
/*
- * Is enabled refreshing? (is anyone using the module?)
- *
- * Returns 0 if refreshing is not enabled (anyone is using it),
- * or != 0 if refreshing is enabled (someone is using it).
- *
- * Useful for buffer read-only modules.
- */
-extern unsigned char cfag12864b_isenabled(void);
-
-/*
* Is the module inited?
*/
extern unsigned char cfag12864b_isinited(void);
diff --git a/include/linux/cfi.h b/include/linux/cfi.h
new file mode 100644
index 000000000000..1fd22ea6eba4
--- /dev/null
+++ b/include/linux/cfi.h
@@ -0,0 +1,86 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Clang Control Flow Integrity (CFI) support.
+ *
+ * Copyright (C) 2022 Google LLC
+ */
+#ifndef _LINUX_CFI_H
+#define _LINUX_CFI_H
+
+#include <linux/bug.h>
+#include <linux/module.h>
+#include <asm/cfi.h>
+
+#ifdef CONFIG_CFI
+extern bool cfi_warn;
+
+enum bug_trap_type report_cfi_failure(struct pt_regs *regs, unsigned long addr,
+ unsigned long *target, u32 type);
+
+static inline enum bug_trap_type report_cfi_failure_noaddr(struct pt_regs *regs,
+ unsigned long addr)
+{
+ return report_cfi_failure(regs, addr, NULL, 0);
+}
+
+#ifndef cfi_get_offset
+/*
+ * Returns the CFI prefix offset. By default, the compiler emits only
+ * a 4-byte CFI type hash before the function. If an architecture
+ * uses -fpatchable-function-entry=N,M where M>0 to change the prefix
+ * offset, they must override this function.
+ */
+static inline int cfi_get_offset(void)
+{
+ return 4;
+}
+#endif
+
+#ifndef cfi_get_func_hash
+static inline u32 cfi_get_func_hash(void *func)
+{
+ u32 hash;
+
+ if (get_kernel_nofault(hash, func - cfi_get_offset()))
+ return 0;
+
+ return hash;
+}
+#endif
+
+/* CFI type hashes for BPF function types */
+extern u32 cfi_bpf_hash;
+extern u32 cfi_bpf_subprog_hash;
+
+#else /* CONFIG_CFI */
+
+static inline int cfi_get_offset(void) { return 0; }
+static inline u32 cfi_get_func_hash(void *func) { return 0; }
+
+#define cfi_bpf_hash 0U
+#define cfi_bpf_subprog_hash 0U
+
+#endif /* CONFIG_CFI */
+
+#ifdef CONFIG_ARCH_USES_CFI_TRAPS
+bool is_cfi_trap(unsigned long addr);
+#else
+static inline bool is_cfi_trap(unsigned long addr) { return false; }
+#endif
+
+#ifdef CONFIG_MODULES
+#ifdef CONFIG_ARCH_USES_CFI_TRAPS
+void module_cfi_finalize(const Elf_Ehdr *hdr, const Elf_Shdr *sechdrs,
+ struct module *mod);
+#else
+static inline void module_cfi_finalize(const Elf_Ehdr *hdr,
+ const Elf_Shdr *sechdrs,
+ struct module *mod) {}
+#endif /* CONFIG_ARCH_USES_CFI_TRAPS */
+#endif /* CONFIG_MODULES */
+
+#ifndef CFI_NOSEAL
+#define CFI_NOSEAL(x)
+#endif
+
+#endif /* _LINUX_CFI_H */
diff --git a/include/linux/cfi_types.h b/include/linux/cfi_types.h
new file mode 100644
index 000000000000..a86af9bc8bdc
--- /dev/null
+++ b/include/linux/cfi_types.h
@@ -0,0 +1,68 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Clang Control Flow Integrity (CFI) type definitions.
+ */
+#ifndef _LINUX_CFI_TYPES_H
+#define _LINUX_CFI_TYPES_H
+
+#ifdef __ASSEMBLY__
+#include <linux/linkage.h>
+
+#ifdef CONFIG_CFI
+/*
+ * Use the __kcfi_typeid_<function> type identifier symbol to
+ * annotate indirectly called assembly functions. The compiler emits
+ * these symbols for all address-taken function declarations in C
+ * code.
+ */
+#ifndef __CFI_TYPE
+#define __CFI_TYPE(name) \
+ .4byte __kcfi_typeid_##name
+#endif
+
+#define SYM_TYPED_ENTRY(name, linkage, align...) \
+ linkage(name) ASM_NL \
+ align ASM_NL \
+ __CFI_TYPE(name) ASM_NL \
+ name:
+
+#define SYM_TYPED_START(name, linkage, align...) \
+ SYM_TYPED_ENTRY(name, linkage, align)
+
+#else /* CONFIG_CFI */
+
+#define SYM_TYPED_START(name, linkage, align...) \
+ SYM_START(name, linkage, align)
+
+#endif /* CONFIG_CFI */
+
+#ifndef SYM_TYPED_FUNC_START
+#define SYM_TYPED_FUNC_START(name) \
+ SYM_TYPED_START(name, SYM_L_GLOBAL, SYM_A_ALIGN)
+#endif
+
+#else /* __ASSEMBLY__ */
+
+#ifdef CONFIG_CFI
+#define DEFINE_CFI_TYPE(name, func) \
+ /* \
+ * Force a reference to the function so the compiler generates \
+ * __kcfi_typeid_<func>. \
+ */ \
+ __ADDRESSABLE(func); \
+ /* u32 name __ro_after_init = __kcfi_typeid_<func> */ \
+ extern u32 name; \
+ asm ( \
+ " .pushsection .data..ro_after_init,\"aw\",\%progbits \n" \
+ " .type " #name ",\%object \n" \
+ " .globl " #name " \n" \
+ " .p2align 2, 0x0 \n" \
+ #name ": \n" \
+ " .4byte __kcfi_typeid_" #func " \n" \
+ " .size " #name ", 4 \n" \
+ " .popsection \n" \
+ );
+#endif
+
+#endif /* __ASSEMBLY__ */
+#endif /* _LINUX_CFI_TYPES_H */
diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h
index 09f4c7df1478..b760a3c470a5 100644
--- a/include/linux/cgroup-defs.h
+++ b/include/linux/cgroup-defs.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* linux/cgroup-defs.h - basic definitions for cgroup
*
@@ -16,8 +17,10 @@
#include <linux/refcount.h>
#include <linux/percpu-refcount.h>
#include <linux/percpu-rwsem.h>
+#include <linux/u64_stats_sync.h>
#include <linux/workqueue.h>
-#include <linux/bpf-cgroup.h>
+#include <linux/bpf-cgroup-defs.h>
+#include <linux/psi_types.h>
#ifdef CONFIG_CGROUPS
@@ -29,6 +32,7 @@ struct kernfs_node;
struct kernfs_ops;
struct kernfs_open_file;
struct seq_file;
+struct poll_table_struct;
#define MAX_CGROUP_TYPE_NAMELEN 32
#define MAX_CGROUP_ROOT_NAMELEN 64
@@ -61,6 +65,12 @@ enum {
* specified at mount time and thus is implemented here.
*/
CGRP_CPUSET_CLONE_CHILDREN,
+
+ /* Control group has to be frozen. */
+ CGRP_FREEZE,
+
+ /* Cgroup is frozen. */
+ CGRP_FROZEN,
};
/* cgroup_root->flags */
@@ -74,6 +84,50 @@ enum {
* aren't writeable from inside the namespace.
*/
CGRP_ROOT_NS_DELEGATE = (1 << 3),
+
+ /*
+ * Reduce latencies on dynamic cgroup modifications such as task
+ * migrations and controller on/offs by disabling percpu operation on
+ * cgroup_threadgroup_rwsem. This makes hot path operations such as
+ * forks and exits into the slow path and more expensive.
+ *
+ * Alleviate the contention between fork, exec, exit operations and
+ * writing to cgroup.procs by taking a per threadgroup rwsem instead of
+ * the global cgroup_threadgroup_rwsem. Fork and other operations
+ * from threads in different thread groups no longer contend with
+ * writing to cgroup.procs.
+ *
+ * The static usage pattern of creating a cgroup, enabling controllers,
+ * and then seeding it with CLONE_INTO_CGROUP doesn't require write
+ * locking cgroup_threadgroup_rwsem and thus doesn't benefit from
+ * favordynmod.
+ */
+ CGRP_ROOT_FAVOR_DYNMODS = (1 << 4),
+
+ /*
+ * Enable cpuset controller in v1 cgroup to use v2 behavior.
+ */
+ CGRP_ROOT_CPUSET_V2_MODE = (1 << 16),
+
+ /*
+ * Enable legacy local memory.events.
+ */
+ CGRP_ROOT_MEMORY_LOCAL_EVENTS = (1 << 17),
+
+ /*
+ * Enable recursive subtree protection
+ */
+ CGRP_ROOT_MEMORY_RECURSIVE_PROT = (1 << 18),
+
+ /*
+ * Enable hugetlb accounting for the memory controller.
+ */
+ CGRP_ROOT_MEMORY_HUGETLB_ACCOUNTING = (1 << 19),
+
+ /*
+ * Enable legacy local pids.events.
+ */
+ CGRP_ROOT_PIDS_LOCAL_EVENTS = (1 << 20),
};
/* cftype->flags */
@@ -84,10 +138,23 @@ enum {
CFTYPE_NO_PREFIX = (1 << 3), /* (DON'T USE FOR NEW FILES) no subsys prefix */
CFTYPE_WORLD_WRITABLE = (1 << 4), /* (DON'T USE FOR NEW FILES) S_IWUGO */
+ CFTYPE_DEBUG = (1 << 5), /* create when cgroup_debug */
/* internal flags, do not use outside cgroup core proper */
__CFTYPE_ONLY_ON_DFL = (1 << 16), /* only on default hierarchy */
__CFTYPE_NOT_ON_DFL = (1 << 17), /* not on default hierarchy */
+ __CFTYPE_ADDED = (1 << 18),
+};
+
+enum cgroup_attach_lock_mode {
+ /* Default */
+ CGRP_ATTACH_LOCK_GLOBAL,
+
+ /* When pid=0 && threadgroup=false, see comments in cgroup_procs_write_start */
+ CGRP_ATTACH_LOCK_NONE,
+
+ /* When favordynmods is on, see comments above CGRP_ROOT_FAVOR_DYNMODS */
+ CGRP_ATTACH_LOCK_PER_THREADGROUP,
};
/*
@@ -98,6 +165,8 @@ enum {
struct cgroup_file {
/* do not access any fields from outside cgroup core */
struct kernfs_node *kn;
+ unsigned long notified_at;
+ struct timer_list notify_timer;
};
/*
@@ -117,7 +186,28 @@ struct cgroup_subsys_state {
/* reference count - access via css_[try]get() and css_put() */
struct percpu_ref refcnt;
- /* siblings list anchored at the parent's ->children */
+ /*
+ * Depending on the context, this field is initialized
+ * via css_rstat_init() at different places:
+ *
+ * when css is associated with cgroup::self
+ * when css->cgroup is the root cgroup
+ * performed in cgroup_init()
+ * when css->cgroup is not the root cgroup
+ * performed in cgroup_create()
+ * when css is associated with a subsystem
+ * when css->cgroup is the root cgroup
+ * performed in cgroup_init_subsys() in the non-early path
+ * when css->cgroup is not the root cgroup
+ * performed in css_create()
+ */
+ struct css_rstat_cpu __percpu *rstat_cpu;
+
+ /*
+ * siblings list anchored at the parent's ->children
+ *
+ * linkage is protected by cgroup_mutex or RCU
+ */
struct list_head sibling;
struct list_head children;
@@ -144,14 +234,32 @@ struct cgroup_subsys_state {
atomic_t online_cnt;
/* percpu_ref killing and RCU release */
- struct rcu_head rcu_head;
struct work_struct destroy_work;
+ struct rcu_work destroy_rwork;
/*
* PI: the parent css. Placed here for cache proximity to following
* fields of the containing structure.
*/
struct cgroup_subsys_state *parent;
+
+ /*
+ * Keep track of total numbers of visible descendant CSSes.
+ * The total number of dying CSSes is tracked in
+ * css->cgroup->nr_dying_subsys[ssid].
+ * Protected by cgroup_mutex.
+ */
+ int nr_descendants;
+
+ /*
+ * A singly-linked list of css structures to be rstat flushed.
+ * This is a scratch field to be used exclusively by
+ * css_rstat_flush().
+ *
+ * Protected by rstat_base_lock when css is cgroup::self.
+ * Protected by css->ss->rstat_ss_lock otherwise.
+ */
+ struct cgroup_subsys_state *rstat_flush_next;
};
/*
@@ -172,6 +280,14 @@ struct css_set {
/* reference count */
refcount_t refcount;
+ /*
+ * For a domain cgroup, the following points to self. If threaded,
+ * to the matching cset of the nearest domain ancestor. The
+ * dom_cset provides access to the domain cgroup and its csses to
+ * which domain level resource consumptions should be charged.
+ */
+ struct css_set *dom_cset;
+
/* the default cgroup associated with this css_set */
struct cgroup *dfl_cgrp;
@@ -182,17 +298,18 @@ struct css_set {
* Lists running through all tasks using this cgroup group.
* mg_tasks lists tasks which belong to this cset but are in the
* process of being migrated out or in. Protected by
- * css_set_rwsem, but, during migration, once tasks are moved to
+ * css_set_lock, but, during migration, once tasks are moved to
* mg_tasks, it can be read safely while holding cgroup_mutex.
*/
struct list_head tasks;
struct list_head mg_tasks;
+ struct list_head dying_tasks;
/* all css_task_iters currently walking this cset */
struct list_head task_iters;
/*
- * On the default hierarhcy, ->subsys[ssid] may point to a css
+ * On the default hierarchy, ->subsys[ssid] may point to a css
* attached to an ancestor instead of the cgroup this css_set is
* associated with. The following node is anchored at
* ->subsys[ssid]->cgroup->e_csets[ssid] and provides a way to
@@ -200,6 +317,10 @@ struct css_set {
*/
struct list_head e_cset_node[CGROUP_SUBSYS_COUNT];
+ /* all threaded csets whose ->dom_cset points to this cset */
+ struct list_head threaded_csets;
+ struct list_head threaded_csets_node;
+
/*
* List running through all cgroup groups in the same hash
* slot. Protected by css_set_lock
@@ -216,7 +337,8 @@ struct css_set {
* List of csets participating in the on-going migration either as
* source or destination. Protected by cgroup_mutex.
*/
- struct list_head mg_preload_node;
+ struct list_head mg_src_preload_node;
+ struct list_head mg_dst_preload_node;
struct list_head mg_node;
/*
@@ -237,6 +359,116 @@ struct css_set {
struct rcu_head rcu_head;
};
+struct cgroup_base_stat {
+ struct task_cputime cputime;
+
+#ifdef CONFIG_SCHED_CORE
+ u64 forceidle_sum;
+#endif
+ u64 ntime;
+};
+
+/*
+ * rstat - cgroup scalable recursive statistics. Accounting is done
+ * per-cpu in css_rstat_cpu which is then lazily propagated up the
+ * hierarchy on reads.
+ *
+ * When a stat gets updated, the css_rstat_cpu and its ancestors are
+ * linked into the updated tree. On the following read, propagation only
+ * considers and consumes the updated tree. This makes reading O(the
+ * number of descendants which have been active since last read) instead of
+ * O(the total number of descendants).
+ *
+ * This is important because there can be a lot of (draining) cgroups which
+ * aren't active and stat may be read frequently. The combination can
+ * become very expensive. By propagating selectively, increasing reading
+ * frequency decreases the cost of each read.
+ *
+ * This struct hosts both the fields which implement the above -
+ * updated_children and updated_next.
+ */
+struct css_rstat_cpu {
+ /*
+ * Child cgroups with stat updates on this cpu since the last read
+ * are linked on the parent's ->updated_children through
+ * ->updated_next. updated_children is terminated by its container css.
+ */
+ struct cgroup_subsys_state *updated_children;
+ struct cgroup_subsys_state *updated_next; /* NULL if not on the list */
+
+ struct llist_node lnode; /* lockless list for update */
+ struct cgroup_subsys_state *owner; /* back pointer */
+};
+
+/*
+ * This struct hosts the fields which track basic resource statistics on
+ * top of it - bsync, bstat and last_bstat.
+ */
+struct cgroup_rstat_base_cpu {
+ /*
+ * ->bsync protects ->bstat. These are the only fields which get
+ * updated in the hot path.
+ */
+ struct u64_stats_sync bsync;
+ struct cgroup_base_stat bstat;
+
+ /*
+ * Snapshots at the last reading. These are used to calculate the
+ * deltas to propagate to the global counters.
+ */
+ struct cgroup_base_stat last_bstat;
+
+ /*
+ * This field is used to record the cumulative per-cpu time of
+ * the cgroup and its descendants. Currently it can be read via
+ * eBPF/drgn etc, and we are still trying to determine how to
+ * expose it in the cgroupfs interface.
+ */
+ struct cgroup_base_stat subtree_bstat;
+
+ /*
+ * Snapshots at the last reading. These are used to calculate the
+ * deltas to propagate to the per-cpu subtree_bstat.
+ */
+ struct cgroup_base_stat last_subtree_bstat;
+};
+
+struct cgroup_freezer_state {
+ /* Should the cgroup and its descendants be frozen. */
+ bool freeze;
+
+ /* Should the cgroup actually be frozen? */
+ bool e_freeze;
+
+ /* Fields below are protected by css_set_lock */
+
+ /* Number of frozen descendant cgroups */
+ int nr_frozen_descendants;
+
+ /*
+ * Number of tasks, which are counted as frozen:
+ * frozen, SIGSTOPped, and PTRACEd.
+ */
+ int nr_frozen_tasks;
+
+ /* Freeze time data consistency protection */
+ seqcount_spinlock_t freeze_seq;
+
+ /*
+ * Most recent time the cgroup was requested to freeze.
+ * Accesses guarded by freeze_seq counter. Writes serialized
+ * by css_set_lock.
+ */
+ u64 freeze_start_nsec;
+
+ /*
+ * Total duration the cgroup has spent freezing.
+ * Accesses guarded by freeze_seq counter. Writes serialized
+ * by css_set_lock.
+ */
+ u64 frozen_nsec;
+};
+
struct cgroup {
/* self css with NULL ->ss, points back to this cgroup */
struct cgroup_subsys_state self;
@@ -244,39 +476,62 @@ struct cgroup {
unsigned long flags; /* "unsigned long" so bitops work */
/*
- * idr allocated in-hierarchy ID.
- *
- * ID 0 is not used, the ID of the root cgroup is always 1, and a
- * new cgroup will be assigned with a smallest available ID.
- *
- * Allocating/Removing ID must be protected by cgroup_mutex.
- */
- int id;
-
- /*
* The depth this cgroup is at. The root is at depth zero and each
* step down the hierarchy increments the level. This along with
- * ancestor_ids[] can determine whether a given cgroup is a
+ * ancestors[] can determine whether a given cgroup is a
* descendant of another without traversing the hierarchy.
*/
int level;
+ /* Maximum allowed descent tree depth */
+ int max_depth;
+
+ /*
+ * Keep track of total numbers of visible and dying descent cgroups.
+ * Dying cgroups are cgroups which were deleted by a user,
+ * but are still existing because someone else is holding a reference.
+ * max_descendants is a maximum allowed number of descent cgroups.
+ *
+ * nr_descendants and nr_dying_descendants are protected
+ * by cgroup_mutex and css_set_lock. It's fine to read them holding
+ * any of cgroup_mutex and css_set_lock; for writing both locks
+ * should be held.
+ */
+ int nr_descendants;
+ int nr_dying_descendants;
+ int max_descendants;
+
/*
* Each non-empty css_set associated with this cgroup contributes
- * one to populated_cnt. All children with non-zero popuplated_cnt
- * of their own contribute one. The count is zero iff there's no
- * task in this cgroup or its subtree.
+ * one to nr_populated_csets. The counter is zero iff this cgroup
+ * doesn't have any tasks.
+ *
+ * All children which have non-zero nr_populated_csets and/or
+ * nr_populated_children of their own contribute one to either
+ * nr_populated_domain_children or nr_populated_threaded_children
+ * depending on their type. Each counter is zero iff all cgroups
+ * of the type in the subtree proper don't have any tasks.
*/
- int populated_cnt;
+ int nr_populated_csets;
+ int nr_populated_domain_children;
+ int nr_populated_threaded_children;
+
+ int nr_threaded_children; /* # of live threaded child cgroups */
+
+ /* sequence number for cgroup.kill, serialized by css_set_lock. */
+ unsigned int kill_seq;
struct kernfs_node *kn; /* cgroup kernfs entry */
struct cgroup_file procs_file; /* handle for "cgroup.procs" */
struct cgroup_file events_file; /* handle for "cgroup.events" */
+ /* handles for "{cpu,memory,io,irq}.pressure" */
+ struct cgroup_file psi_files[NR_PSI_RESOURCES];
+
/*
* The bitmask of subsystems enabled on the child cgroups.
* ->subtree_control is the one configured through
- * "cgroup.subtree_control" while ->child_ss_mask is the effective
+ * "cgroup.subtree_control" while ->subtree_ss_mask is the effective
* one which may have more subsystems enabled. Controller knobs
* are made available iff it's enabled in ->subtree_control.
*/
@@ -288,6 +543,12 @@ struct cgroup {
/* Private pointers for each registered subsystem */
struct cgroup_subsys_state __rcu *subsys[CGROUP_SUBSYS_COUNT];
+ /*
+ * Keep track of total number of dying CSSes at and below this cgroup.
+ * Protected by cgroup_mutex.
+ */
+ int nr_dying_subsys[CGROUP_SUBSYS_COUNT];
+
struct cgroup_root *root;
/*
@@ -306,6 +567,39 @@ struct cgroup {
struct list_head e_csets[CGROUP_SUBSYS_COUNT];
/*
+ * If !threaded, self. If threaded, it points to the nearest
+ * domain ancestor. Inside a threaded subtree, cgroups are exempt
+ * from process granularity and no-internal-task constraint.
+ * Domain level resource consumptions which aren't tied to a
+ * specific task are charged to the dom_cgrp.
+ */
+ struct cgroup *dom_cgrp;
+ struct cgroup *old_dom_cgrp; /* used while enabling threaded */
+
+ /*
+ * Depending on the context, this field is initialized via
+ * css_rstat_init() at different places:
+ *
+ * when cgroup is the root cgroup
+ * performed in cgroup_setup_root()
+ * otherwise
+ * performed in cgroup_create()
+ */
+ struct cgroup_rstat_base_cpu __percpu *rstat_base_cpu;
+
+ /*
+ * Add padding to keep the read mostly rstat per-cpu pointer on a
+ * different cacheline than the following *bstat fields which can have
+ * frequent updates.
+ */
+ CACHELINE_PADDING(_pad_);
+
+ /* cgroup basic resource statistics */
+ struct cgroup_base_stat last_bstat;
+ struct cgroup_base_stat bstat;
+ struct prev_cputime prev_cputime; /* for printing out cputime */
+
+ /*
* list of pidlists, up to two for each namespace (one for procs, one
* for tasks); created on demand.
*/
@@ -318,11 +612,21 @@ struct cgroup {
/* used to schedule release agent */
struct work_struct release_agent_work;
+ /* used to track pressure stalls */
+ struct psi_group *psi;
+
/* used to store eBPF programs */
struct cgroup_bpf bpf;
- /* ids of the ancestors at each level including self */
- int ancestor_ids[];
+ /* Used to store internal freezer state */
+ struct cgroup_freezer_state freezer;
+
+#ifdef CONFIG_BPF_SYSCALL
+ struct bpf_local_storage __rcu *bpf_cgrp_storage;
+#endif
+
+ /* All ancestors including self */
+ struct cgroup *ancestors[];
};
/*
@@ -339,24 +643,26 @@ struct cgroup_root {
/* Unique id for this hierarchy. */
int hierarchy_id;
- /* The root cgroup. Root is destroyed on its release. */
+ /* A list running through the active hierarchies */
+ struct list_head root_list;
+ struct rcu_head rcu; /* Must be near the top */
+
+ /*
+ * The root cgroup. The containing cgroup_root will be destroyed on its
+ * release. cgrp->ancestors[0] will be used overflowing into the
+ * following field. cgrp_ancestor_storage must immediately follow.
+ */
struct cgroup cgrp;
- /* for cgrp->ancestor_ids[0] */
- int cgrp_ancestor_id_storage;
+ /* must follow cgrp for cgrp->ancestors[0], see above */
+ struct cgroup *cgrp_ancestor_storage;
/* Number of cgroups in the hierarchy, used only for /proc/cgroups */
atomic_t nr_cgrps;
- /* A list running through the active hierarchies */
- struct list_head root_list;
-
/* Hierarchy-specific flags */
unsigned int flags;
- /* IDs for cgroups in this hierarchy */
- struct idr cgroup_idr;
-
/* The path to use for release notifications. */
char release_agent_path[PATH_MAX];
@@ -373,9 +679,8 @@ struct cgroup_root {
*/
struct cftype {
/*
- * By convention, the name should begin with the name of the
- * subsystem, followed by a period. Zero length string indicates
- * end of cftype array.
+ * Name of the subsystem is prepended in cgroup_file_name().
+ * Zero length string indicates end of cftype array.
*/
char name[MAX_CFTYPE_NAME];
unsigned long private;
@@ -448,14 +753,15 @@ struct cftype {
ssize_t (*write)(struct kernfs_open_file *of,
char *buf, size_t nbytes, loff_t off);
-#ifdef CONFIG_DEBUG_LOCK_ALLOC
+ __poll_t (*poll)(struct kernfs_open_file *of,
+ struct poll_table_struct *pt);
+
struct lock_class_key lockdep_key;
-#endif
};
/*
* Control Group subsystem type.
- * See Documentation/cgroups/cgroups.txt for details
+ * See Documentation/admin-guide/cgroup-v1/cgroups.rst for details
*/
struct cgroup_subsys {
struct cgroup_subsys_state *(*css_alloc)(struct cgroup_subsys_state *parent_css);
@@ -464,16 +770,22 @@ struct cgroup_subsys {
void (*css_released)(struct cgroup_subsys_state *css);
void (*css_free)(struct cgroup_subsys_state *css);
void (*css_reset)(struct cgroup_subsys_state *css);
+ void (*css_killed)(struct cgroup_subsys_state *css);
+ void (*css_rstat_flush)(struct cgroup_subsys_state *css, int cpu);
+ int (*css_extra_stat_show)(struct seq_file *seq,
+ struct cgroup_subsys_state *css);
+ int (*css_local_stat_show)(struct seq_file *seq,
+ struct cgroup_subsys_state *css);
int (*can_attach)(struct cgroup_taskset *tset);
void (*cancel_attach)(struct cgroup_taskset *tset);
void (*attach)(struct cgroup_taskset *tset);
- void (*post_attach)(void);
- int (*can_fork)(struct task_struct *task);
- void (*cancel_fork)(struct task_struct *task);
+ int (*can_fork)(struct task_struct *task,
+ struct css_set *cset);
+ void (*cancel_fork)(struct task_struct *task, struct css_set *cset);
void (*fork)(struct task_struct *task);
void (*exit)(struct task_struct *task);
- void (*free)(struct task_struct *task);
+ void (*release)(struct task_struct *task);
void (*bind)(struct cgroup_subsys_state *root_css);
bool early_init:1;
@@ -492,21 +804,18 @@ struct cgroup_subsys {
bool implicit_on_dfl:1;
/*
- * If %false, this subsystem is properly hierarchical -
- * configuration, resource accounting and restriction on a parent
- * cgroup cover those of its children. If %true, hierarchy support
- * is broken in some ways - some subsystems ignore hierarchy
- * completely while others are only implemented half-way.
+ * If %true, the controller, supports threaded mode on the default
+ * hierarchy. In a threaded subtree, both process granularity and
+ * no-internal-process constraint are ignored and a threaded
+ * controllers should be able to handle that.
*
- * It's now disallowed to create nested cgroups if the subsystem is
- * broken and cgroup core will emit a warning message on such
- * cases. Eventually, all subsystems will be made properly
- * hierarchical and this will go away.
+ * Note that as an implicit controller is automatically enabled on
+ * all cgroups on the default hierarchy, it should also be
+ * threaded. implicit && !threaded is not supported.
*/
- bool broken_hierarchy:1;
- bool warned_broken_hierarchy:1;
+ bool threaded:1;
- /* the following two fields are initialized automtically during boot */
+ /* the following two fields are initialized automatically during boot */
int id;
const char *name;
@@ -540,20 +849,32 @@ struct cgroup_subsys {
* specifies the mask of subsystems that this one depends on.
*/
unsigned int depends_on;
+
+ spinlock_t rstat_ss_lock;
+ struct llist_head __percpu *lhead; /* lockless update list head */
};
extern struct percpu_rw_semaphore cgroup_threadgroup_rwsem;
+extern bool cgroup_enable_per_threadgroup_rwsem;
+
+struct cgroup_of_peak {
+ unsigned long value;
+ struct list_head list;
+};
/**
* cgroup_threadgroup_change_begin - threadgroup exclusion for cgroups
* @tsk: target task
*
* Allows cgroup operations to synchronize against threadgroup changes
- * using a percpu_rw_semaphore.
+ * using a global percpu_rw_semaphore and a per threadgroup rw_semaphore when
+ * favordynmods is on. See the comment above CGRP_ROOT_FAVOR_DYNMODS definition.
*/
static inline void cgroup_threadgroup_change_begin(struct task_struct *tsk)
{
percpu_down_read(&cgroup_threadgroup_rwsem);
+ if (cgroup_enable_per_threadgroup_rwsem)
+ down_read(&tsk->signal->cgroup_threadgroup_rwsem);
}
/**
@@ -564,6 +885,8 @@ static inline void cgroup_threadgroup_change_begin(struct task_struct *tsk)
*/
static inline void cgroup_threadgroup_change_end(struct task_struct *tsk)
{
+ if (cgroup_enable_per_threadgroup_rwsem)
+ up_read(&tsk->signal->cgroup_threadgroup_rwsem);
percpu_up_read(&cgroup_threadgroup_rwsem);
}
@@ -586,104 +909,53 @@ static inline void cgroup_threadgroup_change_end(struct task_struct *tsk) {}
* sock_cgroup_data is embedded at sock->sk_cgrp_data and contains
* per-socket cgroup information except for memcg association.
*
- * On legacy hierarchies, net_prio and net_cls controllers directly set
- * attributes on each sock which can then be tested by the network layer.
- * On the default hierarchy, each sock is associated with the cgroup it was
- * created in and the networking layer can match the cgroup directly.
- *
- * To avoid carrying all three cgroup related fields separately in sock,
- * sock_cgroup_data overloads (prioidx, classid) and the cgroup pointer.
- * On boot, sock_cgroup_data records the cgroup that the sock was created
- * in so that cgroup2 matches can be made; however, once either net_prio or
- * net_cls starts being used, the area is overriden to carry prioidx and/or
- * classid. The two modes are distinguished by whether the lowest bit is
- * set. Clear bit indicates cgroup pointer while set bit prioidx and
- * classid.
- *
- * While userland may start using net_prio or net_cls at any time, once
- * either is used, cgroup2 matching no longer works. There is no reason to
- * mix the two and this is in line with how legacy and v2 compatibility is
- * handled. On mode switch, cgroup references which are already being
- * pointed to by socks may be leaked. While this can be remedied by adding
- * synchronization around sock_cgroup_data, given that the number of leaked
- * cgroups is bound and highly unlikely to be high, this seems to be the
- * better trade-off.
+ * On legacy hierarchies, net_prio and net_cls controllers directly
+ * set attributes on each sock which can then be tested by the network
+ * layer. On the default hierarchy, each sock is associated with the
+ * cgroup it was created in and the networking layer can match the
+ * cgroup directly.
*/
struct sock_cgroup_data {
- union {
-#ifdef __LITTLE_ENDIAN
- struct {
- u8 is_data;
- u8 padding;
- u16 prioidx;
- u32 classid;
- } __packed;
-#else
- struct {
- u32 classid;
- u16 prioidx;
- u8 padding;
- u8 is_data;
- } __packed;
+ struct cgroup *cgroup; /* v2 */
+#ifdef CONFIG_CGROUP_NET_CLASSID
+ u32 classid; /* v1 */
+#endif
+#ifdef CONFIG_CGROUP_NET_PRIO
+ u16 prioidx; /* v1 */
#endif
- u64 val;
- };
};
-/*
- * There's a theoretical window where the following accessors race with
- * updaters and return part of the previous pointer as the prioidx or
- * classid. Such races are short-lived and the result isn't critical.
- */
-static inline u16 sock_cgroup_prioidx(struct sock_cgroup_data *skcd)
+static inline u16 sock_cgroup_prioidx(const struct sock_cgroup_data *skcd)
{
- /* fallback to 1 which is always the ID of the root cgroup */
- return (skcd->is_data & 1) ? skcd->prioidx : 1;
+#ifdef CONFIG_CGROUP_NET_PRIO
+ return READ_ONCE(skcd->prioidx);
+#else
+ return 1;
+#endif
}
-static inline u32 sock_cgroup_classid(struct sock_cgroup_data *skcd)
+#ifdef CONFIG_CGROUP_NET_CLASSID
+static inline u32 sock_cgroup_classid(const struct sock_cgroup_data *skcd)
{
- /* fallback to 0 which is the unconfigured default classid */
- return (skcd->is_data & 1) ? skcd->classid : 0;
+ return READ_ONCE(skcd->classid);
}
+#endif
-/*
- * If invoked concurrently, the updaters may clobber each other. The
- * caller is responsible for synchronization.
- */
static inline void sock_cgroup_set_prioidx(struct sock_cgroup_data *skcd,
u16 prioidx)
{
- struct sock_cgroup_data skcd_buf = {{ .val = READ_ONCE(skcd->val) }};
-
- if (sock_cgroup_prioidx(&skcd_buf) == prioidx)
- return;
-
- if (!(skcd_buf.is_data & 1)) {
- skcd_buf.val = 0;
- skcd_buf.is_data = 1;
- }
-
- skcd_buf.prioidx = prioidx;
- WRITE_ONCE(skcd->val, skcd_buf.val); /* see sock_cgroup_ptr() */
+#ifdef CONFIG_CGROUP_NET_PRIO
+ WRITE_ONCE(skcd->prioidx, prioidx);
+#endif
}
+#ifdef CONFIG_CGROUP_NET_CLASSID
static inline void sock_cgroup_set_classid(struct sock_cgroup_data *skcd,
u32 classid)
{
- struct sock_cgroup_data skcd_buf = {{ .val = READ_ONCE(skcd->val) }};
-
- if (sock_cgroup_classid(&skcd_buf) == classid)
- return;
-
- if (!(skcd_buf.is_data & 1)) {
- skcd_buf.val = 0;
- skcd_buf.is_data = 1;
- }
-
- skcd_buf.classid = classid;
- WRITE_ONCE(skcd->val, skcd_buf.val); /* see sock_cgroup_ptr() */
+ WRITE_ONCE(skcd->classid, classid);
}
+#endif
#else /* CONFIG_SOCK_CGROUP_DATA */
diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h
index 710a005c6b7a..bc892e3b37ee 100644
--- a/include/linux/cgroup.h
+++ b/include/linux/cgroup.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_CGROUP_H
#define _LINUX_CGROUP_H
/*
@@ -9,8 +10,8 @@
*/
#include <linux/sched.h>
-#include <linux/cpumask.h>
#include <linux/nodemask.h>
+#include <linux/list.h>
#include <linux/rculist.h>
#include <linux/cgroupstats.h>
#include <linux/fs.h>
@@ -18,17 +19,20 @@
#include <linux/kernfs.h>
#include <linux/jump_label.h>
#include <linux/types.h>
+#include <linux/notifier.h>
#include <linux/ns_common.h>
#include <linux/nsproxy.h>
#include <linux/user_namespace.h>
#include <linux/refcount.h>
+#include <linux/kernel_stat.h>
#include <linux/cgroup-defs.h>
+#include <linux/cgroup_namespace.h>
-#ifdef CONFIG_CGROUPS
+struct kernel_clone_args;
/*
- * All weight knobs on the default hierarhcy should use the following min,
+ * All weight knobs on the default hierarchy should use the following min,
* default and max values. The default value is the logarithmic center of
* MIN and MAX and allows 100x to be expressed in both directions.
*/
@@ -36,24 +40,44 @@
#define CGROUP_WEIGHT_DFL 100
#define CGROUP_WEIGHT_MAX 10000
+#ifdef CONFIG_CGROUPS
+
+enum css_task_iter_flags {
+ CSS_TASK_ITER_PROCS = (1U << 0), /* walk only threadgroup leaders */
+ CSS_TASK_ITER_THREADED = (1U << 1), /* walk all threaded css_sets in the domain */
+ CSS_TASK_ITER_SKIPPED = (1U << 16), /* internal flags */
+};
+
/* a css_task_iter should be treated as an opaque object */
struct css_task_iter {
struct cgroup_subsys *ss;
+ unsigned int flags;
struct list_head *cset_pos;
struct list_head *cset_head;
+ struct list_head *tcset_pos;
+ struct list_head *tcset_head;
+
struct list_head *task_pos;
- struct list_head *tasks_head;
- struct list_head *mg_tasks_head;
+ struct list_head *cur_tasks_head;
struct css_set *cur_cset;
+ struct css_set *cur_dcset;
struct task_struct *cur_task;
struct list_head iters_node; /* css_set->task_iters */
};
+enum cgroup_lifetime_events {
+ CGROUP_LIFETIME_ONLINE,
+ CGROUP_LIFETIME_OFFLINE,
+};
+
+extern struct file_system_type cgroup_fs_type;
extern struct cgroup_root cgrp_dfl_root;
extern struct css_set init_css_set;
+extern spinlock_t css_set_lock;
+extern struct blocking_notifier_head cgroup_lifetime_notifier;
#define SUBSYS(_x) extern struct cgroup_subsys _x ## _cgrp_subsys;
#include <linux/cgroup_subsys.h>
@@ -81,6 +105,8 @@ extern struct css_set init_css_set;
bool css_has_online_children(struct cgroup_subsys_state *css);
struct cgroup_subsys_state *css_from_id(int id, struct cgroup_subsys *ss);
+struct cgroup_subsys_state *cgroup_e_css(struct cgroup *cgroup,
+ struct cgroup_subsys *ss);
struct cgroup_subsys_state *cgroup_get_e_css(struct cgroup *cgroup,
struct cgroup_subsys *ss);
struct cgroup_subsys_state *css_tryget_online_from_dir(struct dentry *dentry,
@@ -88,30 +114,39 @@ struct cgroup_subsys_state *css_tryget_online_from_dir(struct dentry *dentry,
struct cgroup *cgroup_get_from_path(const char *path);
struct cgroup *cgroup_get_from_fd(int fd);
+struct cgroup *cgroup_v1v2_get_from_fd(int fd);
int cgroup_attach_task_all(struct task_struct *from, struct task_struct *);
int cgroup_transfer_tasks(struct cgroup *to, struct cgroup *from);
int cgroup_add_dfl_cftypes(struct cgroup_subsys *ss, struct cftype *cfts);
int cgroup_add_legacy_cftypes(struct cgroup_subsys *ss, struct cftype *cfts);
+int cgroup_add_cftypes(struct cgroup_subsys *ss, struct cftype *cfts);
int cgroup_rm_cftypes(struct cftype *cfts);
void cgroup_file_notify(struct cgroup_file *cfile);
+void cgroup_file_show(struct cgroup_file *cfile, bool show);
-int task_cgroup_path(struct task_struct *task, char *buf, size_t buflen);
int cgroupstats_build(struct cgroupstats *stats, struct dentry *dentry);
int proc_cgroup_show(struct seq_file *m, struct pid_namespace *ns,
struct pid *pid, struct task_struct *tsk);
void cgroup_fork(struct task_struct *p);
-extern int cgroup_can_fork(struct task_struct *p);
-extern void cgroup_cancel_fork(struct task_struct *p);
-extern void cgroup_post_fork(struct task_struct *p);
-void cgroup_exit(struct task_struct *p);
-void cgroup_free(struct task_struct *p);
+extern int cgroup_can_fork(struct task_struct *p,
+ struct kernel_clone_args *kargs);
+extern void cgroup_cancel_fork(struct task_struct *p,
+ struct kernel_clone_args *kargs);
+extern void cgroup_post_fork(struct task_struct *p,
+ struct kernel_clone_args *kargs);
+void cgroup_task_exit(struct task_struct *p);
+void cgroup_task_dead(struct task_struct *p);
+void cgroup_task_release(struct task_struct *p);
+void cgroup_task_free(struct task_struct *p);
int cgroup_init_early(void);
int cgroup_init(void);
+int cgroup_parse_float(const char *input, unsigned dec_shift, s64 *v);
+
/*
* Iteration helpers and macros.
*/
@@ -129,7 +164,7 @@ struct task_struct *cgroup_taskset_first(struct cgroup_taskset *tset,
struct task_struct *cgroup_taskset_next(struct cgroup_taskset *tset,
struct cgroup_subsys_state **dst_cssp);
-void css_task_iter_start(struct cgroup_subsys_state *css,
+void css_task_iter_start(struct cgroup_subsys_state *css, unsigned int flags,
struct css_task_iter *it);
struct task_struct *css_task_iter_next(struct css_task_iter *it);
void css_task_iter_end(struct css_task_iter *it);
@@ -283,64 +318,22 @@ void css_task_iter_end(struct css_task_iter *it);
* Inline functions.
*/
-/**
- * css_get - obtain a reference on the specified css
- * @css: target css
- *
- * The caller must already have a reference.
- */
-static inline void css_get(struct cgroup_subsys_state *css)
-{
- if (!(css->flags & CSS_NO_REF))
- percpu_ref_get(&css->refcnt);
-}
-
-/**
- * css_get_many - obtain references on the specified css
- * @css: target css
- * @n: number of references to get
- *
- * The caller must already have a reference.
- */
-static inline void css_get_many(struct cgroup_subsys_state *css, unsigned int n)
-{
- if (!(css->flags & CSS_NO_REF))
- percpu_ref_get_many(&css->refcnt, n);
-}
+#ifdef CONFIG_DEBUG_CGROUP_REF
+void css_get(struct cgroup_subsys_state *css);
+void css_get_many(struct cgroup_subsys_state *css, unsigned int n);
+bool css_tryget(struct cgroup_subsys_state *css);
+bool css_tryget_online(struct cgroup_subsys_state *css);
+void css_put(struct cgroup_subsys_state *css);
+void css_put_many(struct cgroup_subsys_state *css, unsigned int n);
+#else
+#define CGROUP_REF_FN_ATTRS static inline
+#define CGROUP_REF_EXPORT(fn)
+#include <linux/cgroup_refcnt.h>
+#endif
-/**
- * css_tryget - try to obtain a reference on the specified css
- * @css: target css
- *
- * Obtain a reference on @css unless it already has reached zero and is
- * being released. This function doesn't care whether @css is on or
- * offline. The caller naturally needs to ensure that @css is accessible
- * but doesn't have to be holding a reference on it - IOW, RCU protected
- * access is good enough for this function. Returns %true if a reference
- * count was successfully obtained; %false otherwise.
- */
-static inline bool css_tryget(struct cgroup_subsys_state *css)
+static inline u64 cgroup_id(const struct cgroup *cgrp)
{
- if (!(css->flags & CSS_NO_REF))
- return percpu_ref_tryget(&css->refcnt);
- return true;
-}
-
-/**
- * css_tryget_online - try to obtain a reference on the specified css if online
- * @css: target css
- *
- * Obtain a reference on @css if it's online. The caller naturally needs
- * to ensure that @css is accessible but doesn't have to be holding a
- * reference on it - IOW, RCU protected access is good enough for this
- * function. Returns %true if a reference count was successfully obtained;
- * %false otherwise.
- */
-static inline bool css_tryget_online(struct cgroup_subsys_state *css)
-{
- if (!(css->flags & CSS_NO_REF))
- return percpu_ref_tryget_live(&css->refcnt);
- return true;
+ return cgrp->kn->id;
}
/**
@@ -360,32 +353,33 @@ static inline bool css_tryget_online(struct cgroup_subsys_state *css)
*/
static inline bool css_is_dying(struct cgroup_subsys_state *css)
{
- return !(css->flags & CSS_NO_REF) && percpu_ref_is_dying(&css->refcnt);
+ return css->flags & CSS_DYING;
}
-/**
- * css_put - put a css reference
- * @css: target css
- *
- * Put a reference obtained via css_get() and css_tryget_online().
- */
-static inline void css_put(struct cgroup_subsys_state *css)
+static inline bool css_is_online(struct cgroup_subsys_state *css)
{
- if (!(css->flags & CSS_NO_REF))
- percpu_ref_put(&css->refcnt);
+ return css->flags & CSS_ONLINE;
}
-/**
- * css_put_many - put css references
- * @css: target css
- * @n: number of references to put
- *
- * Put references obtained via css_get() and css_tryget_online().
- */
-static inline void css_put_many(struct cgroup_subsys_state *css, unsigned int n)
+static inline bool css_is_self(struct cgroup_subsys_state *css)
+{
+ if (css == &css->cgroup->self) {
+ /* cgroup::self should not have subsystem association */
+ WARN_ON(css->ss != NULL);
+ return true;
+ }
+
+ return false;
+}
+
+static inline void cgroup_get(struct cgroup *cgrp)
+{
+ css_get(&cgrp->self);
+}
+
+static inline bool cgroup_tryget(struct cgroup *cgrp)
{
- if (!(css->flags & CSS_NO_REF))
- percpu_ref_put_many(&css->refcnt, n);
+ return css_tryget(&cgrp->self);
}
static inline void cgroup_put(struct cgroup *cgrp)
@@ -393,6 +387,18 @@ static inline void cgroup_put(struct cgroup *cgrp)
css_put(&cgrp->self);
}
+extern struct mutex cgroup_mutex;
+
+static inline void cgroup_lock(void)
+{
+ mutex_lock(&cgroup_mutex);
+}
+
+static inline void cgroup_unlock(void)
+{
+ mutex_unlock(&cgroup_mutex);
+}
+
/**
* task_css_set_check - obtain a task's css_set with extra access conditions
* @task: the task to obtain css_set for
@@ -407,10 +413,9 @@ static inline void cgroup_put(struct cgroup *cgrp)
* as locks used during the cgroup_subsys::attach() methods.
*/
#ifdef CONFIG_PROVE_RCU
-extern struct mutex cgroup_mutex;
-extern spinlock_t css_set_lock;
#define task_css_set_check(task, __c) \
rcu_dereference_check((task)->cgroups, \
+ rcu_read_lock_sched_held() || \
lockdep_is_held(&cgroup_mutex) || \
lockdep_is_held(&css_set_lock) || \
((task)->flags & PF_EXITING) || (__c))
@@ -462,7 +467,7 @@ static inline struct cgroup_subsys_state *task_css(struct task_struct *task,
*
* Find the css for the (@task, @subsys_id) combination, increment a
* reference on and return it. This function is guaranteed to return a
- * valid css.
+ * valid css. The returned css may already have been offlined.
*/
static inline struct cgroup_subsys_state *
task_get_css(struct task_struct *task, int subsys_id)
@@ -472,7 +477,13 @@ task_get_css(struct task_struct *task, int subsys_id)
rcu_read_lock();
while (true) {
css = task_css(task, subsys_id);
- if (likely(css_tryget_online(css)))
+ /*
+ * Can't use css_tryget_online() here. A task which has
+ * PF_EXITING set may stay associated with an offline css.
+ * If such task calls this function, css_tryget_online()
+ * will keep failing.
+ */
+ if (likely(css_tryget(css)))
break;
cpu_relax();
}
@@ -500,6 +511,20 @@ static inline struct cgroup *task_cgroup(struct task_struct *task,
return task_css(task, subsys_id)->cgroup;
}
+static inline struct cgroup *task_dfl_cgroup(struct task_struct *task)
+{
+ return task_css_set(task)->dfl_cgrp;
+}
+
+static inline struct cgroup *cgroup_parent(struct cgroup *cgrp)
+{
+ struct cgroup_subsys_state *parent_css = cgrp->self.parent;
+
+ if (parent_css)
+ return container_of(parent_css, struct cgroup, self);
+ return NULL;
+}
+
/**
* cgroup_is_descendant - test ancestry
* @cgrp: the cgroup to be tested
@@ -514,7 +539,26 @@ static inline bool cgroup_is_descendant(struct cgroup *cgrp,
{
if (cgrp->root != ancestor->root || cgrp->level < ancestor->level)
return false;
- return cgrp->ancestor_ids[ancestor->level] == ancestor->id;
+ return cgrp->ancestors[ancestor->level] == ancestor;
+}
+
+/**
+ * cgroup_ancestor - find ancestor of cgroup
+ * @cgrp: cgroup to find ancestor of
+ * @ancestor_level: level of ancestor to find starting from root
+ *
+ * Find ancestor of cgroup at specified level starting from root if it exists
+ * and return pointer to it. Return NULL if @cgrp doesn't have ancestor at
+ * @ancestor_level.
+ *
+ * This function is safe to call as long as @cgrp is accessible.
+ */
+static inline struct cgroup *cgroup_ancestor(struct cgroup *cgrp,
+ int ancestor_level)
+{
+ if (ancestor_level < 0 || ancestor_level > cgrp->level)
+ return NULL;
+ return cgrp->ancestors[ancestor_level];
}
/**
@@ -537,13 +581,14 @@ static inline bool task_under_cgroup_hierarchy(struct task_struct *task,
/* no synchronization, the result can only be used as a hint */
static inline bool cgroup_is_populated(struct cgroup *cgrp)
{
- return cgrp->populated_cnt;
+ return cgrp->nr_populated_csets + cgrp->nr_populated_domain_children +
+ cgrp->nr_populated_threaded_children;
}
/* returns ino associated with a cgroup */
static inline ino_t cgroup_ino(struct cgroup *cgrp)
{
- return cgrp->kn->ino;
+ return kernfs_ino(cgrp->kn);
}
/* cft/css accessors for cftype->write() operation */
@@ -590,6 +635,8 @@ static inline void pr_cont_cgroup_path(struct cgroup *cgrp)
pr_cont_kernfs_path(cgrp->kn);
}
+bool cgroup_psi_enabled(void);
+
static inline void cgroup_init_kthreadd(void)
{
/*
@@ -609,120 +656,187 @@ static inline void cgroup_kthread_ready(void)
current->no_cgroup_migration = 0;
}
+void cgroup_path_from_kernfs_id(u64 id, char *buf, size_t buflen);
+struct cgroup *__cgroup_get_from_id(u64 id);
+struct cgroup *cgroup_get_from_id(u64 id);
#else /* !CONFIG_CGROUPS */
struct cgroup_subsys_state;
struct cgroup;
+static inline u64 cgroup_id(const struct cgroup *cgrp) { return 1; }
+static inline void css_get(struct cgroup_subsys_state *css) {}
static inline void css_put(struct cgroup_subsys_state *css) {}
+static inline void cgroup_lock(void) {}
+static inline void cgroup_unlock(void) {}
static inline int cgroup_attach_task_all(struct task_struct *from,
struct task_struct *t) { return 0; }
static inline int cgroupstats_build(struct cgroupstats *stats,
struct dentry *dentry) { return -EINVAL; }
static inline void cgroup_fork(struct task_struct *p) {}
-static inline int cgroup_can_fork(struct task_struct *p) { return 0; }
-static inline void cgroup_cancel_fork(struct task_struct *p) {}
-static inline void cgroup_post_fork(struct task_struct *p) {}
-static inline void cgroup_exit(struct task_struct *p) {}
-static inline void cgroup_free(struct task_struct *p) {}
+static inline int cgroup_can_fork(struct task_struct *p,
+ struct kernel_clone_args *kargs) { return 0; }
+static inline void cgroup_cancel_fork(struct task_struct *p,
+ struct kernel_clone_args *kargs) {}
+static inline void cgroup_post_fork(struct task_struct *p,
+ struct kernel_clone_args *kargs) {}
+static inline void cgroup_task_exit(struct task_struct *p) {}
+static inline void cgroup_task_dead(struct task_struct *p) {}
+static inline void cgroup_task_release(struct task_struct *p) {}
+static inline void cgroup_task_free(struct task_struct *p) {}
static inline int cgroup_init_early(void) { return 0; }
static inline int cgroup_init(void) { return 0; }
static inline void cgroup_init_kthreadd(void) {}
static inline void cgroup_kthread_ready(void) {}
+static inline struct cgroup *cgroup_parent(struct cgroup *cgrp)
+{
+ return NULL;
+}
+
+static inline bool cgroup_psi_enabled(void)
+{
+ return false;
+}
+
static inline bool task_under_cgroup_hierarchy(struct task_struct *task,
struct cgroup *ancestor)
{
return true;
}
+
+static inline void cgroup_path_from_kernfs_id(u64 id, char *buf, size_t buflen)
+{}
#endif /* !CONFIG_CGROUPS */
+#ifdef CONFIG_CGROUPS
+/*
+ * cgroup scalable recursive statistics.
+ */
+void css_rstat_updated(struct cgroup_subsys_state *css, int cpu);
+void css_rstat_flush(struct cgroup_subsys_state *css);
+
+/*
+ * Basic resource stats.
+ */
+#ifdef CONFIG_CGROUP_CPUACCT
+void cpuacct_charge(struct task_struct *tsk, u64 cputime);
+void cpuacct_account_field(struct task_struct *tsk, int index, u64 val);
+#else
+static inline void cpuacct_charge(struct task_struct *tsk, u64 cputime) {}
+static inline void cpuacct_account_field(struct task_struct *tsk, int index,
+ u64 val) {}
+#endif
+
+void __cgroup_account_cputime(struct cgroup *cgrp, u64 delta_exec);
+void __cgroup_account_cputime_field(struct cgroup *cgrp,
+ enum cpu_usage_stat index, u64 delta_exec);
+
+static inline void cgroup_account_cputime(struct task_struct *task,
+ u64 delta_exec)
+{
+ struct cgroup *cgrp;
+
+ cpuacct_charge(task, delta_exec);
+
+ cgrp = task_dfl_cgroup(task);
+ if (cgroup_parent(cgrp))
+ __cgroup_account_cputime(cgrp, delta_exec);
+}
+
+static inline void cgroup_account_cputime_field(struct task_struct *task,
+ enum cpu_usage_stat index,
+ u64 delta_exec)
+{
+ struct cgroup *cgrp;
+
+ cpuacct_account_field(task, index, delta_exec);
+
+ cgrp = task_dfl_cgroup(task);
+ if (cgroup_parent(cgrp))
+ __cgroup_account_cputime_field(cgrp, index, delta_exec);
+}
+
+#else /* CONFIG_CGROUPS */
+
+static inline void cgroup_account_cputime(struct task_struct *task,
+ u64 delta_exec) {}
+static inline void cgroup_account_cputime_field(struct task_struct *task,
+ enum cpu_usage_stat index,
+ u64 delta_exec) {}
+
+#endif /* CONFIG_CGROUPS */
+
/*
* sock->sk_cgrp_data handling. For more info, see sock_cgroup_data
* definition in cgroup-defs.h.
*/
#ifdef CONFIG_SOCK_CGROUP_DATA
-#if defined(CONFIG_CGROUP_NET_PRIO) || defined(CONFIG_CGROUP_NET_CLASSID)
-extern spinlock_t cgroup_sk_update_lock;
-#endif
-
-void cgroup_sk_alloc_disable(void);
void cgroup_sk_alloc(struct sock_cgroup_data *skcd);
+void cgroup_sk_clone(struct sock_cgroup_data *skcd);
void cgroup_sk_free(struct sock_cgroup_data *skcd);
static inline struct cgroup *sock_cgroup_ptr(struct sock_cgroup_data *skcd)
{
-#if defined(CONFIG_CGROUP_NET_PRIO) || defined(CONFIG_CGROUP_NET_CLASSID)
- unsigned long v;
-
- /*
- * @skcd->val is 64bit but the following is safe on 32bit too as we
- * just need the lower ulong to be written and read atomically.
- */
- v = READ_ONCE(skcd->val);
-
- if (v & 1)
- return &cgrp_dfl_root.cgrp;
-
- return (struct cgroup *)(unsigned long)v ?: &cgrp_dfl_root.cgrp;
-#else
- return (struct cgroup *)(unsigned long)skcd->val;
-#endif
+ return skcd->cgroup;
}
#else /* CONFIG_CGROUP_DATA */
static inline void cgroup_sk_alloc(struct sock_cgroup_data *skcd) {}
+static inline void cgroup_sk_clone(struct sock_cgroup_data *skcd) {}
static inline void cgroup_sk_free(struct sock_cgroup_data *skcd) {}
#endif /* CONFIG_CGROUP_DATA */
-struct cgroup_namespace {
- refcount_t count;
- struct ns_common ns;
- struct user_namespace *user_ns;
- struct ucounts *ucounts;
- struct css_set *root_cset;
-};
-
-extern struct cgroup_namespace init_cgroup_ns;
-
#ifdef CONFIG_CGROUPS
-void free_cgroup_ns(struct cgroup_namespace *ns);
+void cgroup_enter_frozen(void);
+void cgroup_leave_frozen(bool always_leave);
+void cgroup_update_frozen(struct cgroup *cgrp);
+void cgroup_freeze(struct cgroup *cgrp, bool freeze);
+void cgroup_freezer_migrate_task(struct task_struct *task, struct cgroup *src,
+ struct cgroup *dst);
-struct cgroup_namespace *copy_cgroup_ns(unsigned long flags,
- struct user_namespace *user_ns,
- struct cgroup_namespace *old_ns);
-
-int cgroup_path_ns(struct cgroup *cgrp, char *buf, size_t buflen,
- struct cgroup_namespace *ns);
+static inline bool cgroup_task_frozen(struct task_struct *task)
+{
+ return task->frozen;
+}
#else /* !CONFIG_CGROUPS */
-static inline void free_cgroup_ns(struct cgroup_namespace *ns) { }
-static inline struct cgroup_namespace *
-copy_cgroup_ns(unsigned long flags, struct user_namespace *user_ns,
- struct cgroup_namespace *old_ns)
+static inline void cgroup_enter_frozen(void) { }
+static inline void cgroup_leave_frozen(bool always_leave) { }
+static inline bool cgroup_task_frozen(struct task_struct *task)
{
- return old_ns;
+ return false;
}
#endif /* !CONFIG_CGROUPS */
-static inline void get_cgroup_ns(struct cgroup_namespace *ns)
+#ifdef CONFIG_CGROUP_BPF
+static inline void cgroup_bpf_get(struct cgroup *cgrp)
{
- if (ns)
- refcount_inc(&ns->count);
+ percpu_ref_get(&cgrp->bpf.refcnt);
}
-static inline void put_cgroup_ns(struct cgroup_namespace *ns)
+static inline void cgroup_bpf_put(struct cgroup *cgrp)
{
- if (ns && refcount_dec_and_test(&ns->count))
- free_cgroup_ns(ns);
+ percpu_ref_put(&cgrp->bpf.refcnt);
}
+#else /* CONFIG_CGROUP_BPF */
+
+static inline void cgroup_bpf_get(struct cgroup *cgrp) {}
+static inline void cgroup_bpf_put(struct cgroup *cgrp) {}
+
+#endif /* CONFIG_CGROUP_BPF */
+
+struct cgroup *task_get_cgroup1(struct task_struct *tsk, int hierarchy_id);
+
+struct cgroup_of_peak *of_peak(struct kernfs_open_file *of);
+
#endif /* _LINUX_CGROUP_H */
diff --git a/include/linux/cgroup_api.h b/include/linux/cgroup_api.h
new file mode 100644
index 000000000000..d0cfe8025111
--- /dev/null
+++ b/include/linux/cgroup_api.h
@@ -0,0 +1 @@
+#include <linux/cgroup.h>
diff --git a/include/linux/cgroup_dmem.h b/include/linux/cgroup_dmem.h
new file mode 100644
index 000000000000..dd4869f1d736
--- /dev/null
+++ b/include/linux/cgroup_dmem.h
@@ -0,0 +1,66 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2023-2024 Intel Corporation
+ */
+
+#ifndef _CGROUP_DMEM_H
+#define _CGROUP_DMEM_H
+
+#include <linux/types.h>
+#include <linux/llist.h>
+
+struct dmem_cgroup_pool_state;
+
+/* Opaque definition of a cgroup region, used internally */
+struct dmem_cgroup_region;
+
+#if IS_ENABLED(CONFIG_CGROUP_DMEM)
+struct dmem_cgroup_region *dmem_cgroup_register_region(u64 size, const char *name_fmt, ...) __printf(2,3);
+void dmem_cgroup_unregister_region(struct dmem_cgroup_region *region);
+int dmem_cgroup_try_charge(struct dmem_cgroup_region *region, u64 size,
+ struct dmem_cgroup_pool_state **ret_pool,
+ struct dmem_cgroup_pool_state **ret_limit_pool);
+void dmem_cgroup_uncharge(struct dmem_cgroup_pool_state *pool, u64 size);
+bool dmem_cgroup_state_evict_valuable(struct dmem_cgroup_pool_state *limit_pool,
+ struct dmem_cgroup_pool_state *test_pool,
+ bool ignore_low, bool *ret_hit_low);
+
+void dmem_cgroup_pool_state_put(struct dmem_cgroup_pool_state *pool);
+#else
+static inline __printf(2,3) struct dmem_cgroup_region *
+dmem_cgroup_register_region(u64 size, const char *name_fmt, ...)
+{
+ return NULL;
+}
+
+static inline void dmem_cgroup_unregister_region(struct dmem_cgroup_region *region)
+{ }
+
+static inline int dmem_cgroup_try_charge(struct dmem_cgroup_region *region, u64 size,
+ struct dmem_cgroup_pool_state **ret_pool,
+ struct dmem_cgroup_pool_state **ret_limit_pool)
+{
+ *ret_pool = NULL;
+
+ if (ret_limit_pool)
+ *ret_limit_pool = NULL;
+
+ return 0;
+}
+
+static inline void dmem_cgroup_uncharge(struct dmem_cgroup_pool_state *pool, u64 size)
+{ }
+
+static inline
+bool dmem_cgroup_state_evict_valuable(struct dmem_cgroup_pool_state *limit_pool,
+ struct dmem_cgroup_pool_state *test_pool,
+ bool ignore_low, bool *ret_hit_low)
+{
+ return true;
+}
+
+static inline void dmem_cgroup_pool_state_put(struct dmem_cgroup_pool_state *pool)
+{ }
+
+#endif
+#endif /* _CGROUP_DMEM_H */
diff --git a/include/linux/cgroup_namespace.h b/include/linux/cgroup_namespace.h
new file mode 100644
index 000000000000..78a8418558a4
--- /dev/null
+++ b/include/linux/cgroup_namespace.h
@@ -0,0 +1,58 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_CGROUP_NAMESPACE_H
+#define _LINUX_CGROUP_NAMESPACE_H
+
+#include <linux/ns_common.h>
+
+struct cgroup_namespace {
+ struct ns_common ns;
+ struct user_namespace *user_ns;
+ struct ucounts *ucounts;
+ struct css_set *root_cset;
+};
+
+extern struct cgroup_namespace init_cgroup_ns;
+
+#ifdef CONFIG_CGROUPS
+
+static inline struct cgroup_namespace *to_cg_ns(struct ns_common *ns)
+{
+ return container_of(ns, struct cgroup_namespace, ns);
+}
+
+void free_cgroup_ns(struct cgroup_namespace *ns);
+
+struct cgroup_namespace *copy_cgroup_ns(u64 flags,
+ struct user_namespace *user_ns,
+ struct cgroup_namespace *old_ns);
+
+int cgroup_path_ns(struct cgroup *cgrp, char *buf, size_t buflen,
+ struct cgroup_namespace *ns);
+
+static inline void get_cgroup_ns(struct cgroup_namespace *ns)
+{
+ ns_ref_inc(ns);
+}
+
+static inline void put_cgroup_ns(struct cgroup_namespace *ns)
+{
+ if (ns_ref_put(ns))
+ free_cgroup_ns(ns);
+}
+
+#else /* !CONFIG_CGROUPS */
+
+static inline void free_cgroup_ns(struct cgroup_namespace *ns) { }
+static inline struct cgroup_namespace *
+copy_cgroup_ns(u64 flags, struct user_namespace *user_ns,
+ struct cgroup_namespace *old_ns)
+{
+ return old_ns;
+}
+
+static inline void get_cgroup_ns(struct cgroup_namespace *ns) { }
+static inline void put_cgroup_ns(struct cgroup_namespace *ns) { }
+
+#endif /* !CONFIG_CGROUPS */
+
+#endif /* _LINUX_CGROUP_NAMESPACE_H */
diff --git a/include/linux/cgroup_rdma.h b/include/linux/cgroup_rdma.h
index e94290b29e99..80edae03c313 100644
--- a/include/linux/cgroup_rdma.h
+++ b/include/linux/cgroup_rdma.h
@@ -1,9 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2016 Parav Pandit <pandit.parav@gmail.com>
- *
- * This file is subject to the terms and conditions of version 2 of the GNU
- * General Public License. See the file COPYING in the main directory of the
- * Linux distribution for more details.
*/
#ifndef _CGROUP_RDMA_H
@@ -39,7 +36,7 @@ struct rdmacg_device {
* APIs for RDMA/IB stack to publish when a device wants to
* participate in resource accounting
*/
-int rdmacg_register_device(struct rdmacg_device *device);
+void rdmacg_register_device(struct rdmacg_device *device);
void rdmacg_unregister_device(struct rdmacg_device *device);
/* APIs for RDMA/IB stack to charge/uncharge pool specific resources */
diff --git a/include/linux/cgroup_refcnt.h b/include/linux/cgroup_refcnt.h
new file mode 100644
index 000000000000..2eea0a69ecfc
--- /dev/null
+++ b/include/linux/cgroup_refcnt.h
@@ -0,0 +1,96 @@
+/**
+ * css_get - obtain a reference on the specified css
+ * @css: target css
+ *
+ * The caller must already have a reference.
+ */
+CGROUP_REF_FN_ATTRS
+void css_get(struct cgroup_subsys_state *css)
+{
+ if (!(css->flags & CSS_NO_REF))
+ percpu_ref_get(&css->refcnt);
+}
+CGROUP_REF_EXPORT(css_get)
+
+/**
+ * css_get_many - obtain references on the specified css
+ * @css: target css
+ * @n: number of references to get
+ *
+ * The caller must already have a reference.
+ */
+CGROUP_REF_FN_ATTRS
+void css_get_many(struct cgroup_subsys_state *css, unsigned int n)
+{
+ if (!(css->flags & CSS_NO_REF))
+ percpu_ref_get_many(&css->refcnt, n);
+}
+CGROUP_REF_EXPORT(css_get_many)
+
+/**
+ * css_tryget - try to obtain a reference on the specified css
+ * @css: target css
+ *
+ * Obtain a reference on @css unless it already has reached zero and is
+ * being released. This function doesn't care whether @css is on or
+ * offline. The caller naturally needs to ensure that @css is accessible
+ * but doesn't have to be holding a reference on it - IOW, RCU protected
+ * access is good enough for this function. Returns %true if a reference
+ * count was successfully obtained; %false otherwise.
+ */
+CGROUP_REF_FN_ATTRS
+bool css_tryget(struct cgroup_subsys_state *css)
+{
+ if (!(css->flags & CSS_NO_REF))
+ return percpu_ref_tryget(&css->refcnt);
+ return true;
+}
+CGROUP_REF_EXPORT(css_tryget)
+
+/**
+ * css_tryget_online - try to obtain a reference on the specified css if online
+ * @css: target css
+ *
+ * Obtain a reference on @css if it's online. The caller naturally needs
+ * to ensure that @css is accessible but doesn't have to be holding a
+ * reference on it - IOW, RCU protected access is good enough for this
+ * function. Returns %true if a reference count was successfully obtained;
+ * %false otherwise.
+ */
+CGROUP_REF_FN_ATTRS
+bool css_tryget_online(struct cgroup_subsys_state *css)
+{
+ if (!(css->flags & CSS_NO_REF))
+ return percpu_ref_tryget_live(&css->refcnt);
+ return true;
+}
+CGROUP_REF_EXPORT(css_tryget_online)
+
+/**
+ * css_put - put a css reference
+ * @css: target css
+ *
+ * Put a reference obtained via css_get() and css_tryget_online().
+ */
+CGROUP_REF_FN_ATTRS
+void css_put(struct cgroup_subsys_state *css)
+{
+ if (!(css->flags & CSS_NO_REF))
+ percpu_ref_put(&css->refcnt);
+}
+CGROUP_REF_EXPORT(css_put)
+
+/**
+ * css_put_many - put css references
+ * @css: target css
+ * @n: number of references to put
+ *
+ * Put references obtained via css_get() and css_tryget_online().
+ */
+CGROUP_REF_FN_ATTRS
+void css_put_many(struct cgroup_subsys_state *css, unsigned int n)
+{
+ if (!(css->flags & CSS_NO_REF))
+ percpu_ref_put_many(&css->refcnt, n);
+}
+CGROUP_REF_EXPORT(css_put_many)
diff --git a/include/linux/cgroup_subsys.h b/include/linux/cgroup_subsys.h
index d0e597c44585..3fd0bcbf3080 100644
--- a/include/linux/cgroup_subsys.h
+++ b/include/linux/cgroup_subsys.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* List of cgroup subsystems.
*
@@ -60,6 +61,14 @@ SUBSYS(pids)
SUBSYS(rdma)
#endif
+#if IS_ENABLED(CONFIG_CGROUP_MISC)
+SUBSYS(misc)
+#endif
+
+#if IS_ENABLED(CONFIG_CGROUP_DMEM)
+SUBSYS(dmem)
+#endif
+
/*
* The following subsystems are not supported on the default hierarchy.
*/
diff --git a/include/linux/circ_buf.h b/include/linux/circ_buf.h
index 90f2471dc6f2..b3233e8202f9 100644
--- a/include/linux/circ_buf.h
+++ b/include/linux/circ_buf.h
@@ -1,5 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
- * See Documentation/circular-buffers.txt for more information.
+ * See Documentation/core-api/circular-buffers.rst for more information.
*/
#ifndef _LINUX_CIRC_BUF_H
diff --git a/include/linux/cleancache.h b/include/linux/cleancache.h
deleted file mode 100644
index bbb3712dd892..000000000000
--- a/include/linux/cleancache.h
+++ /dev/null
@@ -1,123 +0,0 @@
-#ifndef _LINUX_CLEANCACHE_H
-#define _LINUX_CLEANCACHE_H
-
-#include <linux/fs.h>
-#include <linux/exportfs.h>
-#include <linux/mm.h>
-
-#define CLEANCACHE_NO_POOL -1
-#define CLEANCACHE_NO_BACKEND -2
-#define CLEANCACHE_NO_BACKEND_SHARED -3
-
-#define CLEANCACHE_KEY_MAX 6
-
-/*
- * cleancache requires every file with a page in cleancache to have a
- * unique key unless/until the file is removed/truncated. For some
- * filesystems, the inode number is unique, but for "modern" filesystems
- * an exportable filehandle is required (see exportfs.h)
- */
-struct cleancache_filekey {
- union {
- ino_t ino;
- __u32 fh[CLEANCACHE_KEY_MAX];
- u32 key[CLEANCACHE_KEY_MAX];
- } u;
-};
-
-struct cleancache_ops {
- int (*init_fs)(size_t);
- int (*init_shared_fs)(uuid_t *uuid, size_t);
- int (*get_page)(int, struct cleancache_filekey,
- pgoff_t, struct page *);
- void (*put_page)(int, struct cleancache_filekey,
- pgoff_t, struct page *);
- void (*invalidate_page)(int, struct cleancache_filekey, pgoff_t);
- void (*invalidate_inode)(int, struct cleancache_filekey);
- void (*invalidate_fs)(int);
-};
-
-extern int cleancache_register_ops(const struct cleancache_ops *ops);
-extern void __cleancache_init_fs(struct super_block *);
-extern void __cleancache_init_shared_fs(struct super_block *);
-extern int __cleancache_get_page(struct page *);
-extern void __cleancache_put_page(struct page *);
-extern void __cleancache_invalidate_page(struct address_space *, struct page *);
-extern void __cleancache_invalidate_inode(struct address_space *);
-extern void __cleancache_invalidate_fs(struct super_block *);
-
-#ifdef CONFIG_CLEANCACHE
-#define cleancache_enabled (1)
-static inline bool cleancache_fs_enabled_mapping(struct address_space *mapping)
-{
- return mapping->host->i_sb->cleancache_poolid >= 0;
-}
-static inline bool cleancache_fs_enabled(struct page *page)
-{
- return cleancache_fs_enabled_mapping(page->mapping);
-}
-#else
-#define cleancache_enabled (0)
-#define cleancache_fs_enabled(_page) (0)
-#define cleancache_fs_enabled_mapping(_page) (0)
-#endif
-
-/*
- * The shim layer provided by these inline functions allows the compiler
- * to reduce all cleancache hooks to nothingness if CONFIG_CLEANCACHE
- * is disabled, to a single global variable check if CONFIG_CLEANCACHE
- * is enabled but no cleancache "backend" has dynamically enabled it,
- * and, for the most frequent cleancache ops, to a single global variable
- * check plus a superblock element comparison if CONFIG_CLEANCACHE is enabled
- * and a cleancache backend has dynamically enabled cleancache, but the
- * filesystem referenced by that cleancache op has not enabled cleancache.
- * As a result, CONFIG_CLEANCACHE can be enabled by default with essentially
- * no measurable performance impact.
- */
-
-static inline void cleancache_init_fs(struct super_block *sb)
-{
- if (cleancache_enabled)
- __cleancache_init_fs(sb);
-}
-
-static inline void cleancache_init_shared_fs(struct super_block *sb)
-{
- if (cleancache_enabled)
- __cleancache_init_shared_fs(sb);
-}
-
-static inline int cleancache_get_page(struct page *page)
-{
- if (cleancache_enabled && cleancache_fs_enabled(page))
- return __cleancache_get_page(page);
- return -1;
-}
-
-static inline void cleancache_put_page(struct page *page)
-{
- if (cleancache_enabled && cleancache_fs_enabled(page))
- __cleancache_put_page(page);
-}
-
-static inline void cleancache_invalidate_page(struct address_space *mapping,
- struct page *page)
-{
- /* careful... page->mapping is NULL sometimes when this is called */
- if (cleancache_enabled && cleancache_fs_enabled_mapping(mapping))
- __cleancache_invalidate_page(mapping, page);
-}
-
-static inline void cleancache_invalidate_inode(struct address_space *mapping)
-{
- if (cleancache_enabled && cleancache_fs_enabled_mapping(mapping))
- __cleancache_invalidate_inode(mapping);
-}
-
-static inline void cleancache_invalidate_fs(struct super_block *sb)
-{
- if (cleancache_enabled)
- __cleancache_invalidate_fs(sb);
-}
-
-#endif /* _LINUX_CLEANCACHE_H */
diff --git a/include/linux/cleanup.h b/include/linux/cleanup.h
new file mode 100644
index 000000000000..8d41b917c77d
--- /dev/null
+++ b/include/linux/cleanup.h
@@ -0,0 +1,534 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_CLEANUP_H
+#define _LINUX_CLEANUP_H
+
+#include <linux/compiler.h>
+#include <linux/err.h>
+#include <linux/args.h>
+
+/**
+ * DOC: scope-based cleanup helpers
+ *
+ * The "goto error" pattern is notorious for introducing subtle resource
+ * leaks. It is tedious and error prone to add new resource acquisition
+ * constraints into code paths that already have several unwind
+ * conditions. The "cleanup" helpers enable the compiler to help with
+ * this tedium and can aid in maintaining LIFO (last in first out)
+ * unwind ordering to avoid unintentional leaks.
+ *
+ * As drivers make up the majority of the kernel code base, here is an
+ * example of using these helpers to clean up PCI drivers. The target of
+ * the cleanups are occasions where a goto is used to unwind a device
+ * reference (pci_dev_put()), or unlock the device (pci_dev_unlock())
+ * before returning.
+ *
+ * The DEFINE_FREE() macro can arrange for PCI device references to be
+ * dropped when the associated variable goes out of scope::
+ *
+ * DEFINE_FREE(pci_dev_put, struct pci_dev *, if (_T) pci_dev_put(_T))
+ * ...
+ * struct pci_dev *dev __free(pci_dev_put) =
+ * pci_get_slot(parent, PCI_DEVFN(0, 0));
+ *
+ * The above will automatically call pci_dev_put() if @dev is non-NULL
+ * when @dev goes out of scope (automatic variable scope). If a function
+ * wants to invoke pci_dev_put() on error, but return @dev (i.e. without
+ * freeing it) on success, it can do::
+ *
+ * return no_free_ptr(dev);
+ *
+ * ...or::
+ *
+ * return_ptr(dev);
+ *
+ * The DEFINE_GUARD() macro can arrange for the PCI device lock to be
+ * dropped when the scope where guard() is invoked ends::
+ *
+ * DEFINE_GUARD(pci_dev, struct pci_dev *, pci_dev_lock(_T), pci_dev_unlock(_T))
+ * ...
+ * guard(pci_dev)(dev);
+ *
+ * The lifetime of the lock obtained by the guard() helper follows the
+ * scope of automatic variable declaration. Take the following example::
+ *
+ * func(...)
+ * {
+ * if (...) {
+ * ...
+ * guard(pci_dev)(dev); // pci_dev_lock() invoked here
+ * ...
+ * } // <- implied pci_dev_unlock() triggered here
+ * }
+ *
+ * Observe the lock is held for the remainder of the "if ()" block not
+ * the remainder of "func()".
+ *
+ * The ACQUIRE() macro can be used in all places that guard() can be
+ * used and additionally support conditional locks::
+ *
+ * DEFINE_GUARD_COND(pci_dev, _try, pci_dev_trylock(_T))
+ * ...
+ * ACQUIRE(pci_dev_try, lock)(dev);
+ * rc = ACQUIRE_ERR(pci_dev_try, &lock);
+ * if (rc)
+ * return rc;
+ * // @lock is held
+ *
+ * Now, when a function uses both __free() and guard()/ACQUIRE(), or
+ * multiple instances of __free(), the LIFO order of variable definition
+ * order matters. GCC documentation says:
+ *
+ * "When multiple variables in the same scope have cleanup attributes,
+ * at exit from the scope their associated cleanup functions are run in
+ * reverse order of definition (last defined, first cleanup)."
+ *
+ * When the unwind order matters it requires that variables be defined
+ * mid-function scope rather than at the top of the file. Take the
+ * following example and notice the bug highlighted by "!!"::
+ *
+ * LIST_HEAD(list);
+ * DEFINE_MUTEX(lock);
+ *
+ * struct object {
+ * struct list_head node;
+ * };
+ *
+ * static struct object *alloc_add(void)
+ * {
+ * struct object *obj;
+ *
+ * lockdep_assert_held(&lock);
+ * obj = kzalloc(sizeof(*obj), GFP_KERNEL);
+ * if (obj) {
+ * LIST_HEAD_INIT(&obj->node);
+ * list_add(obj->node, &list):
+ * }
+ * return obj;
+ * }
+ *
+ * static void remove_free(struct object *obj)
+ * {
+ * lockdep_assert_held(&lock);
+ * list_del(&obj->node);
+ * kfree(obj);
+ * }
+ *
+ * DEFINE_FREE(remove_free, struct object *, if (_T) remove_free(_T))
+ * static int init(void)
+ * {
+ * struct object *obj __free(remove_free) = NULL;
+ * int err;
+ *
+ * guard(mutex)(&lock);
+ * obj = alloc_add();
+ *
+ * if (!obj)
+ * return -ENOMEM;
+ *
+ * err = other_init(obj);
+ * if (err)
+ * return err; // remove_free() called without the lock!!
+ *
+ * no_free_ptr(obj);
+ * return 0;
+ * }
+ *
+ * That bug is fixed by changing init() to call guard() and define +
+ * initialize @obj in this order::
+ *
+ * guard(mutex)(&lock);
+ * struct object *obj __free(remove_free) = alloc_add();
+ *
+ * Given that the "__free(...) = NULL" pattern for variables defined at
+ * the top of the function poses this potential interdependency problem
+ * the recommendation is to always define and assign variables in one
+ * statement and not group variable definitions at the top of the
+ * function when __free() is used.
+ *
+ * Lastly, given that the benefit of cleanup helpers is removal of
+ * "goto", and that the "goto" statement can jump between scopes, the
+ * expectation is that usage of "goto" and cleanup helpers is never
+ * mixed in the same function. I.e. for a given routine, convert all
+ * resources that need a "goto" cleanup to scope-based cleanup, or
+ * convert none of them.
+ */
+
+/*
+ * DEFINE_FREE(name, type, free):
+ * simple helper macro that defines the required wrapper for a __free()
+ * based cleanup function. @free is an expression using '_T' to access the
+ * variable. @free should typically include a NULL test before calling a
+ * function, see the example below.
+ *
+ * __free(name):
+ * variable attribute to add a scoped based cleanup to the variable.
+ *
+ * no_free_ptr(var):
+ * like a non-atomic xchg(var, NULL), such that the cleanup function will
+ * be inhibited -- provided it sanely deals with a NULL value.
+ *
+ * NOTE: this has __must_check semantics so that it is harder to accidentally
+ * leak the resource.
+ *
+ * return_ptr(p):
+ * returns p while inhibiting the __free().
+ *
+ * Ex.
+ *
+ * DEFINE_FREE(kfree, void *, if (_T) kfree(_T))
+ *
+ * void *alloc_obj(...)
+ * {
+ * struct obj *p __free(kfree) = kmalloc(...);
+ * if (!p)
+ * return NULL;
+ *
+ * if (!init_obj(p))
+ * return NULL;
+ *
+ * return_ptr(p);
+ * }
+ *
+ * NOTE: the DEFINE_FREE()'s @free expression includes a NULL test even though
+ * kfree() is fine to be called with a NULL value. This is on purpose. This way
+ * the compiler sees the end of our alloc_obj() function as:
+ *
+ * tmp = p;
+ * p = NULL;
+ * if (p)
+ * kfree(p);
+ * return tmp;
+ *
+ * And through the magic of value-propagation and dead-code-elimination, it
+ * eliminates the actual cleanup call and compiles into:
+ *
+ * return p;
+ *
+ * Without the NULL test it turns into a mess and the compiler can't help us.
+ */
+
+#define DEFINE_FREE(_name, _type, _free) \
+ static __always_inline void __free_##_name(void *p) { _type _T = *(_type *)p; _free; }
+
+#define __free(_name) __cleanup(__free_##_name)
+
+#define __get_and_null(p, nullvalue) \
+ ({ \
+ auto __ptr = &(p); \
+ auto __val = *__ptr; \
+ *__ptr = nullvalue; \
+ __val; \
+ })
+
+static __always_inline __must_check
+const volatile void * __must_check_fn(const volatile void *val)
+{ return val; }
+
+#define no_free_ptr(p) \
+ ((typeof(p)) __must_check_fn((__force const volatile void *)__get_and_null(p, NULL)))
+
+#define return_ptr(p) return no_free_ptr(p)
+
+/*
+ * Only for situations where an allocation is handed in to another function
+ * and consumed by that function on success.
+ *
+ * struct foo *f __free(kfree) = kzalloc(sizeof(*f), GFP_KERNEL);
+ *
+ * setup(f);
+ * if (some_condition)
+ * return -EINVAL;
+ * ....
+ * ret = bar(f);
+ * if (!ret)
+ * retain_and_null_ptr(f);
+ * return ret;
+ *
+ * After retain_and_null_ptr(f) the variable f is NULL and cannot be
+ * dereferenced anymore.
+ */
+#define retain_and_null_ptr(p) ((void)__get_and_null(p, NULL))
+
+/*
+ * DEFINE_CLASS(name, type, exit, init, init_args...):
+ * helper to define the destructor and constructor for a type.
+ * @exit is an expression using '_T' -- similar to FREE above.
+ * @init is an expression in @init_args resulting in @type
+ *
+ * EXTEND_CLASS(name, ext, init, init_args...):
+ * extends class @name to @name@ext with the new constructor
+ *
+ * CLASS(name, var)(args...):
+ * declare the variable @var as an instance of the named class
+ *
+ * CLASS_INIT(name, var, init_expr):
+ * declare the variable @var as an instance of the named class with
+ * custom initialization expression.
+ *
+ * Ex.
+ *
+ * DEFINE_CLASS(fdget, struct fd, fdput(_T), fdget(fd), int fd)
+ *
+ * CLASS(fdget, f)(fd);
+ * if (fd_empty(f))
+ * return -EBADF;
+ *
+ * // use 'f' without concern
+ */
+
+#define DEFINE_CLASS(_name, _type, _exit, _init, _init_args...) \
+typedef _type class_##_name##_t; \
+static __always_inline void class_##_name##_destructor(_type *p) \
+{ _type _T = *p; _exit; } \
+static __always_inline _type class_##_name##_constructor(_init_args) \
+{ _type t = _init; return t; }
+
+#define EXTEND_CLASS(_name, ext, _init, _init_args...) \
+typedef class_##_name##_t class_##_name##ext##_t; \
+static __always_inline void class_##_name##ext##_destructor(class_##_name##_t *p) \
+{ class_##_name##_destructor(p); } \
+static __always_inline class_##_name##_t class_##_name##ext##_constructor(_init_args) \
+{ class_##_name##_t t = _init; return t; }
+
+#define CLASS(_name, var) \
+ class_##_name##_t var __cleanup(class_##_name##_destructor) = \
+ class_##_name##_constructor
+
+#define CLASS_INIT(_name, _var, _init_expr) \
+ class_##_name##_t _var __cleanup(class_##_name##_destructor) = (_init_expr)
+
+#define __scoped_class(_name, var, _label, args...) \
+ for (CLASS(_name, var)(args); ; ({ goto _label; })) \
+ if (0) { \
+_label: \
+ break; \
+ } else
+
+#define scoped_class(_name, var, args...) \
+ __scoped_class(_name, var, __UNIQUE_ID(label), args)
+
+/*
+ * DEFINE_GUARD(name, type, lock, unlock):
+ * trivial wrapper around DEFINE_CLASS() above specifically
+ * for locks.
+ *
+ * DEFINE_GUARD_COND(name, ext, condlock)
+ * wrapper around EXTEND_CLASS above to add conditional lock
+ * variants to a base class, eg. mutex_trylock() or
+ * mutex_lock_interruptible().
+ *
+ * guard(name):
+ * an anonymous instance of the (guard) class, not recommended for
+ * conditional locks.
+ *
+ * scoped_guard (name, args...) { }:
+ * similar to CLASS(name, scope)(args), except the variable (with the
+ * explicit name 'scope') is declard in a for-loop such that its scope is
+ * bound to the next (compound) statement.
+ *
+ * for conditional locks the loop body is skipped when the lock is not
+ * acquired.
+ *
+ * scoped_cond_guard (name, fail, args...) { }:
+ * similar to scoped_guard(), except it does fail when the lock
+ * acquire fails.
+ *
+ * Only for conditional locks.
+ *
+ * ACQUIRE(name, var):
+ * a named instance of the (guard) class, suitable for conditional
+ * locks when paired with ACQUIRE_ERR().
+ *
+ * ACQUIRE_ERR(name, &var):
+ * a helper that is effectively a PTR_ERR() conversion of the guard
+ * pointer. Returns 0 when the lock was acquired and a negative
+ * error code otherwise.
+ */
+
+#define __DEFINE_CLASS_IS_CONDITIONAL(_name, _is_cond) \
+static __maybe_unused const bool class_##_name##_is_conditional = _is_cond
+
+#define DEFINE_CLASS_IS_UNCONDITIONAL(_name) \
+ __DEFINE_CLASS_IS_CONDITIONAL(_name, false); \
+ static inline void * class_##_name##_lock_ptr(class_##_name##_t *_T) \
+ { return (void *)1; }
+
+#define __GUARD_IS_ERR(_ptr) \
+ ({ \
+ unsigned long _rc = (__force unsigned long)(_ptr); \
+ unlikely((_rc - 1) >= -MAX_ERRNO - 1); \
+ })
+
+#define __DEFINE_GUARD_LOCK_PTR(_name, _exp) \
+ static __always_inline void *class_##_name##_lock_ptr(class_##_name##_t *_T) \
+ { \
+ void *_ptr = (void *)(__force unsigned long)*(_exp); \
+ if (IS_ERR(_ptr)) { \
+ _ptr = NULL; \
+ } \
+ return _ptr; \
+ } \
+ static __always_inline int class_##_name##_lock_err(class_##_name##_t *_T) \
+ { \
+ long _rc = (__force unsigned long)*(_exp); \
+ if (!_rc) { \
+ _rc = -EBUSY; \
+ } \
+ if (!IS_ERR_VALUE(_rc)) { \
+ _rc = 0; \
+ } \
+ return _rc; \
+ }
+
+#define DEFINE_CLASS_IS_GUARD(_name) \
+ __DEFINE_CLASS_IS_CONDITIONAL(_name, false); \
+ __DEFINE_GUARD_LOCK_PTR(_name, _T)
+
+#define DEFINE_CLASS_IS_COND_GUARD(_name) \
+ __DEFINE_CLASS_IS_CONDITIONAL(_name, true); \
+ __DEFINE_GUARD_LOCK_PTR(_name, _T)
+
+#define DEFINE_GUARD(_name, _type, _lock, _unlock) \
+ DEFINE_CLASS(_name, _type, if (!__GUARD_IS_ERR(_T)) { _unlock; }, ({ _lock; _T; }), _type _T); \
+ DEFINE_CLASS_IS_GUARD(_name)
+
+#define DEFINE_GUARD_COND_4(_name, _ext, _lock, _cond) \
+ __DEFINE_CLASS_IS_CONDITIONAL(_name##_ext, true); \
+ EXTEND_CLASS(_name, _ext, \
+ ({ void *_t = _T; int _RET = (_lock); if (_T && !(_cond)) _t = ERR_PTR(_RET); _t; }), \
+ class_##_name##_t _T) \
+ static __always_inline void * class_##_name##_ext##_lock_ptr(class_##_name##_t *_T) \
+ { return class_##_name##_lock_ptr(_T); } \
+ static __always_inline int class_##_name##_ext##_lock_err(class_##_name##_t *_T) \
+ { return class_##_name##_lock_err(_T); }
+
+/*
+ * Default binary condition; success on 'true'.
+ */
+#define DEFINE_GUARD_COND_3(_name, _ext, _lock) \
+ DEFINE_GUARD_COND_4(_name, _ext, _lock, _RET)
+
+#define DEFINE_GUARD_COND(X...) CONCATENATE(DEFINE_GUARD_COND_, COUNT_ARGS(X))(X)
+
+#define guard(_name) \
+ CLASS(_name, __UNIQUE_ID(guard))
+
+#define __guard_ptr(_name) class_##_name##_lock_ptr
+#define __guard_err(_name) class_##_name##_lock_err
+#define __is_cond_ptr(_name) class_##_name##_is_conditional
+
+#define ACQUIRE(_name, _var) CLASS(_name, _var)
+#define ACQUIRE_ERR(_name, _var) __guard_err(_name)(_var)
+
+/*
+ * Helper macro for scoped_guard().
+ *
+ * Note that the "!__is_cond_ptr(_name)" part of the condition ensures that
+ * compiler would be sure that for the unconditional locks the body of the
+ * loop (caller-provided code glued to the else clause) could not be skipped.
+ * It is needed because the other part - "__guard_ptr(_name)(&scope)" - is too
+ * hard to deduce (even if could be proven true for unconditional locks).
+ */
+#define __scoped_guard(_name, _label, args...) \
+ for (CLASS(_name, scope)(args); \
+ __guard_ptr(_name)(&scope) || !__is_cond_ptr(_name); \
+ ({ goto _label; })) \
+ if (0) { \
+_label: \
+ break; \
+ } else
+
+#define scoped_guard(_name, args...) \
+ __scoped_guard(_name, __UNIQUE_ID(label), args)
+
+#define __scoped_cond_guard(_name, _fail, _label, args...) \
+ for (CLASS(_name, scope)(args); true; ({ goto _label; })) \
+ if (!__guard_ptr(_name)(&scope)) { \
+ BUILD_BUG_ON(!__is_cond_ptr(_name)); \
+ _fail; \
+_label: \
+ break; \
+ } else
+
+#define scoped_cond_guard(_name, _fail, args...) \
+ __scoped_cond_guard(_name, _fail, __UNIQUE_ID(label), args)
+
+/*
+ * Additional helper macros for generating lock guards with types, either for
+ * locks that don't have a native type (eg. RCU, preempt) or those that need a
+ * 'fat' pointer (eg. spin_lock_irqsave).
+ *
+ * DEFINE_LOCK_GUARD_0(name, lock, unlock, ...)
+ * DEFINE_LOCK_GUARD_1(name, type, lock, unlock, ...)
+ * DEFINE_LOCK_GUARD_1_COND(name, ext, condlock)
+ *
+ * will result in the following type:
+ *
+ * typedef struct {
+ * type *lock; // 'type := void' for the _0 variant
+ * __VA_ARGS__;
+ * } class_##name##_t;
+ *
+ * As above, both _lock and _unlock are statements, except this time '_T' will
+ * be a pointer to the above struct.
+ */
+
+#define __DEFINE_UNLOCK_GUARD(_name, _type, _unlock, ...) \
+typedef struct { \
+ _type *lock; \
+ __VA_ARGS__; \
+} class_##_name##_t; \
+ \
+static __always_inline void class_##_name##_destructor(class_##_name##_t *_T) \
+{ \
+ if (!__GUARD_IS_ERR(_T->lock)) { _unlock; } \
+} \
+ \
+__DEFINE_GUARD_LOCK_PTR(_name, &_T->lock)
+
+#define __DEFINE_LOCK_GUARD_1(_name, _type, _lock) \
+static __always_inline class_##_name##_t class_##_name##_constructor(_type *l) \
+{ \
+ class_##_name##_t _t = { .lock = l }, *_T = &_t; \
+ _lock; \
+ return _t; \
+}
+
+#define __DEFINE_LOCK_GUARD_0(_name, _lock) \
+static __always_inline class_##_name##_t class_##_name##_constructor(void) \
+{ \
+ class_##_name##_t _t = { .lock = (void*)1 }, \
+ *_T __maybe_unused = &_t; \
+ _lock; \
+ return _t; \
+}
+
+#define DEFINE_LOCK_GUARD_1(_name, _type, _lock, _unlock, ...) \
+__DEFINE_CLASS_IS_CONDITIONAL(_name, false); \
+__DEFINE_UNLOCK_GUARD(_name, _type, _unlock, __VA_ARGS__) \
+__DEFINE_LOCK_GUARD_1(_name, _type, _lock)
+
+#define DEFINE_LOCK_GUARD_0(_name, _lock, _unlock, ...) \
+__DEFINE_CLASS_IS_CONDITIONAL(_name, false); \
+__DEFINE_UNLOCK_GUARD(_name, void, _unlock, __VA_ARGS__) \
+__DEFINE_LOCK_GUARD_0(_name, _lock)
+
+#define DEFINE_LOCK_GUARD_1_COND_4(_name, _ext, _lock, _cond) \
+ __DEFINE_CLASS_IS_CONDITIONAL(_name##_ext, true); \
+ EXTEND_CLASS(_name, _ext, \
+ ({ class_##_name##_t _t = { .lock = l }, *_T = &_t;\
+ int _RET = (_lock); \
+ if (_T->lock && !(_cond)) _T->lock = ERR_PTR(_RET);\
+ _t; }), \
+ typeof_member(class_##_name##_t, lock) l) \
+ static __always_inline void * class_##_name##_ext##_lock_ptr(class_##_name##_t *_T) \
+ { return class_##_name##_lock_ptr(_T); } \
+ static __always_inline int class_##_name##_ext##_lock_err(class_##_name##_t *_T) \
+ { return class_##_name##_lock_err(_T); }
+
+#define DEFINE_LOCK_GUARD_1_COND_3(_name, _ext, _lock) \
+ DEFINE_LOCK_GUARD_1_COND_4(_name, _ext, _lock, _RET)
+
+#define DEFINE_LOCK_GUARD_1_COND(X...) CONCATENATE(DEFINE_LOCK_GUARD_1_COND_, COUNT_ARGS(X))(X)
+
+#endif /* _LINUX_CLEANUP_H */
diff --git a/include/linux/clk-provider.h b/include/linux/clk-provider.h
index c59c62571e4f..630705a47129 100644
--- a/include/linux/clk-provider.h
+++ b/include/linux/clk-provider.h
@@ -1,32 +1,27 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
- * linux/include/linux/clk-provider.h
- *
* Copyright (c) 2010-2011 Jeremy Kerr <jeremy.kerr@canonical.com>
* Copyright (C) 2011-2012 Linaro Ltd <mturquette@linaro.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef __LINUX_CLK_PROVIDER_H
#define __LINUX_CLK_PROVIDER_H
-#include <linux/io.h>
#include <linux/of.h>
-
-#ifdef CONFIG_COMMON_CLK
+#include <linux/of_clk.h>
/*
* flags used across common struct clk. these flags should only affect the
* top-level framework. custom flags for dealing with hardware specifics
* belong in struct clk_foo
+ *
+ * Please update clk_flags[] in drivers/clk/clk.c when making changes here!
*/
#define CLK_SET_RATE_GATE BIT(0) /* must be gated across rate change */
#define CLK_SET_PARENT_GATE BIT(1) /* must be gated across re-parent */
#define CLK_SET_RATE_PARENT BIT(2) /* propagate rate change up one level */
#define CLK_IGNORE_UNUSED BIT(3) /* do not gate even if unused */
/* unused */
-#define CLK_IS_BASIC BIT(5) /* Basic clk, can't do a to_clk_foo() */
+ /* unused */
#define CLK_GET_RATE_NOCACHE BIT(6) /* do not use the cached clk rate */
#define CLK_SET_RATE_NO_REPARENT BIT(7) /* don't re-parent on rate change */
#define CLK_GET_ACCURACY_NOCACHE BIT(8) /* do not use the cached clk accuracy */
@@ -35,6 +30,8 @@
#define CLK_IS_CRITICAL BIT(11) /* do not gate, ever */
/* parents need enable during gate/ungate, set rate and re-parent */
#define CLK_OPS_PARENT_ENABLE BIT(12)
+/* duty cycle call may be forwarded to the parent clock */
+#define CLK_DUTY_CYCLE_PARENT BIT(13)
struct clk;
struct clk_hw;
@@ -45,6 +42,9 @@ struct dentry;
* struct clk_rate_request - Structure encoding the clk constraints that
* a clock user might require.
*
+ * Should be initialized by calling clk_hw_init_rate_request().
+ *
+ * @core: Pointer to the struct clk_core affected by this request
* @rate: Requested clock rate. This field will be adjusted by
* clock drivers according to hardware capabilities.
* @min_rate: Minimum rate imposed by clk users.
@@ -56,6 +56,7 @@ struct dentry;
*
*/
struct clk_rate_request {
+ struct clk_core *core;
unsigned long rate;
unsigned long min_rate;
unsigned long max_rate;
@@ -63,6 +64,26 @@ struct clk_rate_request {
struct clk_hw *best_parent_hw;
};
+void clk_hw_init_rate_request(const struct clk_hw *hw,
+ struct clk_rate_request *req,
+ unsigned long rate);
+void clk_hw_forward_rate_request(const struct clk_hw *core,
+ const struct clk_rate_request *old_req,
+ const struct clk_hw *parent,
+ struct clk_rate_request *req,
+ unsigned long parent_rate);
+
+/**
+ * struct clk_duty - Structure encoding the duty cycle ratio of a clock
+ *
+ * @num: Numerator of the duty cycle ratio
+ * @den: Denominator of the duty cycle ratio
+ */
+struct clk_duty {
+ unsigned int num;
+ unsigned int den;
+};
+
/**
* struct clk_ops - Callback operations for hardware clocks; these are to
* be provided by the clock implementation, and will be called by drivers
@@ -103,10 +124,16 @@ struct clk_rate_request {
* Called with enable_lock held. This function must not
* sleep.
*
- * @recalc_rate Recalculate the rate of this clock, by querying hardware. The
+ * @save_context: Save the context of the clock in prepration for poweroff.
+ *
+ * @restore_context: Restore the context of the clock after a restoration
+ * of power.
+ *
+ * @recalc_rate: Recalculate the rate of this clock, by querying hardware. The
* parent rate is an input parameter. It is up to the caller to
- * ensure that the prepare_mutex is held across this call.
- * Returns the calculated rate. Optional, but recommended - if
+ * ensure that the prepare_mutex is held across this call. If the
+ * driver cannot figure out a rate for this clock, it must return
+ * 0. Returns the calculated rate. Optional, but recommended - if
* this op is not set then clock rate will be initialized to 0.
*
* @round_rate: Given a target rate as input, returns the closest rate actually
@@ -166,10 +193,25 @@ struct clk_rate_request {
* by the second argument. Valid values for degrees are
* 0-359. Return 0 on success, otherwise -EERROR.
*
+ * @get_duty_cycle: Queries the hardware to get the current duty cycle ratio
+ * of a clock. Returned values denominator cannot be 0 and must be
+ * superior or equal to the numerator.
+ *
+ * @set_duty_cycle: Apply the duty cycle ratio to this clock signal specified by
+ * the numerator (2nd argurment) and denominator (3rd argument).
+ * Argument must be a valid ratio (denominator > 0
+ * and >= numerator) Return 0 on success, otherwise -EERROR.
+ *
* @init: Perform platform-specific initialization magic.
- * This is not not used by any of the basic clock types.
- * Please consider other ways of solving initialization problems
- * before using this callback, as its use is discouraged.
+ * This is not used by any of the basic clock types.
+ * This callback exist for HW which needs to perform some
+ * initialisation magic for CCF to get an accurate view of the
+ * clock. It may also be used dynamic resource allocation is
+ * required. It shall not used to deal with clock parameters,
+ * such as rate or parents.
+ * Returns 0 on success, -EERROR otherwise.
+ *
+ * @terminate: Free any resource allocated by init.
*
* @debug_init: Set up type-specific debugfs entries for this clock. This
* is called once, after the debugfs directory entry for this
@@ -198,6 +240,8 @@ struct clk_ops {
void (*disable)(struct clk_hw *hw);
int (*is_enabled)(struct clk_hw *hw);
void (*disable_unused)(struct clk_hw *hw);
+ int (*save_context)(struct clk_hw *hw);
+ void (*restore_context)(struct clk_hw *hw);
unsigned long (*recalc_rate)(struct clk_hw *hw,
unsigned long parent_rate);
long (*round_rate)(struct clk_hw *hw, unsigned long rate,
@@ -215,8 +259,27 @@ struct clk_ops {
unsigned long parent_accuracy);
int (*get_phase)(struct clk_hw *hw);
int (*set_phase)(struct clk_hw *hw, int degrees);
- void (*init)(struct clk_hw *hw);
- int (*debug_init)(struct clk_hw *hw, struct dentry *dentry);
+ int (*get_duty_cycle)(struct clk_hw *hw,
+ struct clk_duty *duty);
+ int (*set_duty_cycle)(struct clk_hw *hw,
+ struct clk_duty *duty);
+ int (*init)(struct clk_hw *hw);
+ void (*terminate)(struct clk_hw *hw);
+ void (*debug_init)(struct clk_hw *hw, struct dentry *dentry);
+};
+
+/**
+ * struct clk_parent_data - clk parent information
+ * @hw: parent clk_hw pointer (used for clk providers with internal clks)
+ * @fw_name: parent name local to provider registering clk
+ * @name: globally unique parent name (used as a fallback)
+ * @index: parent index local to provider registering clk (if @fw_name absent)
+ */
+struct clk_parent_data {
+ const struct clk_hw *hw;
+ const char *fw_name;
+ const char *name;
+ int index;
};
/**
@@ -226,13 +289,20 @@ struct clk_ops {
* @name: clock name
* @ops: operations this clock supports
* @parent_names: array of string names for all possible parents
+ * @parent_data: array of parent data for all possible parents (when some
+ * parents are external to the clk controller)
+ * @parent_hws: array of pointers to all possible parents (when all parents
+ * are internal to the clk controller)
* @num_parents: number of possible parents
* @flags: framework-level hints and quirks
*/
struct clk_init_data {
const char *name;
const struct clk_ops *ops;
+ /* Only one of the following three should be assigned */
const char * const *parent_names;
+ const struct clk_parent_data *parent_data;
+ const struct clk_hw **parent_hws;
u8 num_parents;
unsigned long flags;
};
@@ -250,7 +320,8 @@ struct clk_init_data {
* into the clk API
*
* @init: pointer to struct clk_init_data that contains the init data shared
- * with the common clock framework.
+ * with the common clock framework. This pointer will be set to NULL once
+ * a clk_register() variant is called on this clk_hw pointer.
*/
struct clk_hw {
struct clk_core *core;
@@ -271,30 +342,160 @@ struct clk_hw {
* struct clk_fixed_rate - fixed-rate clock
* @hw: handle between common and hardware-specific interfaces
* @fixed_rate: constant frequency of clock
+ * @fixed_accuracy: constant accuracy of clock in ppb (parts per billion)
+ * @flags: hardware specific flags
+ *
+ * Flags:
+ * * CLK_FIXED_RATE_PARENT_ACCURACY - Use the accuracy of the parent clk
+ * instead of what's set in @fixed_accuracy.
*/
struct clk_fixed_rate {
struct clk_hw hw;
unsigned long fixed_rate;
unsigned long fixed_accuracy;
- u8 flags;
+ unsigned long flags;
};
-#define to_clk_fixed_rate(_hw) container_of(_hw, struct clk_fixed_rate, hw)
+#define CLK_FIXED_RATE_PARENT_ACCURACY BIT(0)
extern const struct clk_ops clk_fixed_rate_ops;
+struct clk_hw *__clk_hw_register_fixed_rate(struct device *dev,
+ struct device_node *np, const char *name,
+ const char *parent_name, const struct clk_hw *parent_hw,
+ const struct clk_parent_data *parent_data, unsigned long flags,
+ unsigned long fixed_rate, unsigned long fixed_accuracy,
+ unsigned long clk_fixed_flags, bool devm);
struct clk *clk_register_fixed_rate(struct device *dev, const char *name,
const char *parent_name, unsigned long flags,
unsigned long fixed_rate);
-struct clk_hw *clk_hw_register_fixed_rate(struct device *dev, const char *name,
- const char *parent_name, unsigned long flags,
- unsigned long fixed_rate);
-struct clk *clk_register_fixed_rate_with_accuracy(struct device *dev,
- const char *name, const char *parent_name, unsigned long flags,
- unsigned long fixed_rate, unsigned long fixed_accuracy);
+/**
+ * clk_hw_register_fixed_rate - register fixed-rate clock with the clock
+ * framework
+ * @dev: device that is registering this clock
+ * @name: name of this clock
+ * @parent_name: name of clock's parent
+ * @flags: framework-specific flags
+ * @fixed_rate: non-adjustable clock rate
+ */
+#define clk_hw_register_fixed_rate(dev, name, parent_name, flags, fixed_rate) \
+ __clk_hw_register_fixed_rate((dev), NULL, (name), (parent_name), NULL, \
+ NULL, (flags), (fixed_rate), 0, 0, false)
+
+/**
+ * devm_clk_hw_register_fixed_rate - register fixed-rate clock with the clock
+ * framework
+ * @dev: device that is registering this clock
+ * @name: name of this clock
+ * @parent_name: name of clock's parent
+ * @flags: framework-specific flags
+ * @fixed_rate: non-adjustable clock rate
+ */
+#define devm_clk_hw_register_fixed_rate(dev, name, parent_name, flags, fixed_rate) \
+ __clk_hw_register_fixed_rate((dev), NULL, (name), (parent_name), NULL, \
+ NULL, (flags), (fixed_rate), 0, 0, true)
+/**
+ * devm_clk_hw_register_fixed_rate_parent_data - register fixed-rate clock with
+ * the clock framework
+ * @dev: device that is registering this clock
+ * @name: name of this clock
+ * @parent_data: parent clk data
+ * @flags: framework-specific flags
+ * @fixed_rate: non-adjustable clock rate
+ */
+#define devm_clk_hw_register_fixed_rate_parent_data(dev, name, parent_data, flags, \
+ fixed_rate) \
+ __clk_hw_register_fixed_rate((dev), NULL, (name), NULL, NULL, \
+ (parent_data), (flags), (fixed_rate), 0, \
+ 0, true)
+/**
+ * clk_hw_register_fixed_rate_parent_hw - register fixed-rate clock with
+ * the clock framework
+ * @dev: device that is registering this clock
+ * @name: name of this clock
+ * @parent_hw: pointer to parent clk
+ * @flags: framework-specific flags
+ * @fixed_rate: non-adjustable clock rate
+ */
+#define clk_hw_register_fixed_rate_parent_hw(dev, name, parent_hw, flags, \
+ fixed_rate) \
+ __clk_hw_register_fixed_rate((dev), NULL, (name), NULL, (parent_hw), \
+ NULL, (flags), (fixed_rate), 0, 0, false)
+/**
+ * clk_hw_register_fixed_rate_parent_data - register fixed-rate clock with
+ * the clock framework
+ * @dev: device that is registering this clock
+ * @name: name of this clock
+ * @parent_data: parent clk data
+ * @flags: framework-specific flags
+ * @fixed_rate: non-adjustable clock rate
+ */
+#define clk_hw_register_fixed_rate_parent_data(dev, name, parent_data, flags, \
+ fixed_rate) \
+ __clk_hw_register_fixed_rate((dev), NULL, (name), NULL, NULL, \
+ (parent_data), (flags), (fixed_rate), 0, \
+ 0, false)
+/**
+ * clk_hw_register_fixed_rate_with_accuracy - register fixed-rate clock with
+ * the clock framework
+ * @dev: device that is registering this clock
+ * @name: name of this clock
+ * @parent_name: name of clock's parent
+ * @flags: framework-specific flags
+ * @fixed_rate: non-adjustable clock rate
+ * @fixed_accuracy: non-adjustable clock accuracy
+ */
+#define clk_hw_register_fixed_rate_with_accuracy(dev, name, parent_name, \
+ flags, fixed_rate, \
+ fixed_accuracy) \
+ __clk_hw_register_fixed_rate((dev), NULL, (name), (parent_name), \
+ NULL, NULL, (flags), (fixed_rate), \
+ (fixed_accuracy), 0, false)
+/**
+ * clk_hw_register_fixed_rate_with_accuracy_parent_hw - register fixed-rate
+ * clock with the clock framework
+ * @dev: device that is registering this clock
+ * @name: name of this clock
+ * @parent_hw: pointer to parent clk
+ * @flags: framework-specific flags
+ * @fixed_rate: non-adjustable clock rate
+ * @fixed_accuracy: non-adjustable clock accuracy
+ */
+#define clk_hw_register_fixed_rate_with_accuracy_parent_hw(dev, name, \
+ parent_hw, flags, fixed_rate, fixed_accuracy) \
+ __clk_hw_register_fixed_rate((dev), NULL, (name), NULL, (parent_hw), \
+ NULL, (flags), (fixed_rate), \
+ (fixed_accuracy), 0, false)
+/**
+ * clk_hw_register_fixed_rate_with_accuracy_parent_data - register fixed-rate
+ * clock with the clock framework
+ * @dev: device that is registering this clock
+ * @name: name of this clock
+ * @parent_data: name of clock's parent
+ * @flags: framework-specific flags
+ * @fixed_rate: non-adjustable clock rate
+ * @fixed_accuracy: non-adjustable clock accuracy
+ */
+#define clk_hw_register_fixed_rate_with_accuracy_parent_data(dev, name, \
+ parent_data, flags, fixed_rate, fixed_accuracy) \
+ __clk_hw_register_fixed_rate((dev), NULL, (name), NULL, NULL, \
+ (parent_data), NULL, (flags), \
+ (fixed_rate), (fixed_accuracy), 0, false)
+/**
+ * clk_hw_register_fixed_rate_parent_accuracy - register fixed-rate clock with
+ * the clock framework
+ * @dev: device that is registering this clock
+ * @name: name of this clock
+ * @parent_data: name of clock's parent
+ * @flags: framework-specific flags
+ * @fixed_rate: non-adjustable clock rate
+ */
+#define clk_hw_register_fixed_rate_parent_accuracy(dev, name, parent_data, \
+ flags, fixed_rate) \
+ __clk_hw_register_fixed_rate((dev), NULL, (name), NULL, NULL, \
+ (parent_data), (flags), (fixed_rate), 0, \
+ CLK_FIXED_RATE_PARENT_ACCURACY, false)
+
void clk_unregister_fixed_rate(struct clk *clk);
-struct clk_hw *clk_hw_register_fixed_rate_with_accuracy(struct device *dev,
- const char *name, const char *parent_name, unsigned long flags,
- unsigned long fixed_rate, unsigned long fixed_accuracy);
void clk_hw_unregister_fixed_rate(struct clk_hw *hw);
void of_fixed_clk_setup(struct device_node *np);
@@ -318,6 +519,9 @@ void of_fixed_clk_setup(struct device_node *np);
* of this register, and mask of gate bits are in higher 16-bit of this
* register. While setting the gate bits, higher 16-bit should also be
* updated to indicate changing gate bits.
+ * CLK_GATE_BIG_ENDIAN - by default little endian register accesses are used for
+ * the gate register. Setting this flag makes the register accesses big
+ * endian.
*/
struct clk_gate {
struct clk_hw hw;
@@ -331,18 +535,133 @@ struct clk_gate {
#define CLK_GATE_SET_TO_DISABLE BIT(0)
#define CLK_GATE_HIWORD_MASK BIT(1)
+#define CLK_GATE_BIG_ENDIAN BIT(2)
extern const struct clk_ops clk_gate_ops;
-struct clk *clk_register_gate(struct device *dev, const char *name,
- const char *parent_name, unsigned long flags,
+struct clk_hw *__clk_hw_register_gate(struct device *dev,
+ struct device_node *np, const char *name,
+ const char *parent_name, const struct clk_hw *parent_hw,
+ const struct clk_parent_data *parent_data,
+ unsigned long flags,
+ void __iomem *reg, u8 bit_idx,
+ u8 clk_gate_flags, spinlock_t *lock);
+struct clk_hw *__devm_clk_hw_register_gate(struct device *dev,
+ struct device_node *np, const char *name,
+ const char *parent_name, const struct clk_hw *parent_hw,
+ const struct clk_parent_data *parent_data,
+ unsigned long flags,
void __iomem *reg, u8 bit_idx,
u8 clk_gate_flags, spinlock_t *lock);
-struct clk_hw *clk_hw_register_gate(struct device *dev, const char *name,
+struct clk *clk_register_gate(struct device *dev, const char *name,
const char *parent_name, unsigned long flags,
void __iomem *reg, u8 bit_idx,
u8 clk_gate_flags, spinlock_t *lock);
+/**
+ * clk_hw_register_gate - register a gate clock with the clock framework
+ * @dev: device that is registering this clock
+ * @name: name of this clock
+ * @parent_name: name of this clock's parent
+ * @flags: framework-specific flags for this clock
+ * @reg: register address to control gating of this clock
+ * @bit_idx: which bit in the register controls gating of this clock
+ * @clk_gate_flags: gate-specific flags for this clock
+ * @lock: shared register lock for this clock
+ */
+#define clk_hw_register_gate(dev, name, parent_name, flags, reg, bit_idx, \
+ clk_gate_flags, lock) \
+ __clk_hw_register_gate((dev), NULL, (name), (parent_name), NULL, \
+ NULL, (flags), (reg), (bit_idx), \
+ (clk_gate_flags), (lock))
+/**
+ * clk_hw_register_gate_parent_hw - register a gate clock with the clock
+ * framework
+ * @dev: device that is registering this clock
+ * @name: name of this clock
+ * @parent_hw: pointer to parent clk
+ * @flags: framework-specific flags for this clock
+ * @reg: register address to control gating of this clock
+ * @bit_idx: which bit in the register controls gating of this clock
+ * @clk_gate_flags: gate-specific flags for this clock
+ * @lock: shared register lock for this clock
+ */
+#define clk_hw_register_gate_parent_hw(dev, name, parent_hw, flags, reg, \
+ bit_idx, clk_gate_flags, lock) \
+ __clk_hw_register_gate((dev), NULL, (name), NULL, (parent_hw), \
+ NULL, (flags), (reg), (bit_idx), \
+ (clk_gate_flags), (lock))
+/**
+ * clk_hw_register_gate_parent_data - register a gate clock with the clock
+ * framework
+ * @dev: device that is registering this clock
+ * @name: name of this clock
+ * @parent_data: parent clk data
+ * @flags: framework-specific flags for this clock
+ * @reg: register address to control gating of this clock
+ * @bit_idx: which bit in the register controls gating of this clock
+ * @clk_gate_flags: gate-specific flags for this clock
+ * @lock: shared register lock for this clock
+ */
+#define clk_hw_register_gate_parent_data(dev, name, parent_data, flags, reg, \
+ bit_idx, clk_gate_flags, lock) \
+ __clk_hw_register_gate((dev), NULL, (name), NULL, NULL, (parent_data), \
+ (flags), (reg), (bit_idx), \
+ (clk_gate_flags), (lock))
+/**
+ * devm_clk_hw_register_gate - register a gate clock with the clock framework
+ * @dev: device that is registering this clock
+ * @name: name of this clock
+ * @parent_name: name of this clock's parent
+ * @flags: framework-specific flags for this clock
+ * @reg: register address to control gating of this clock
+ * @bit_idx: which bit in the register controls gating of this clock
+ * @clk_gate_flags: gate-specific flags for this clock
+ * @lock: shared register lock for this clock
+ */
+#define devm_clk_hw_register_gate(dev, name, parent_name, flags, reg, bit_idx,\
+ clk_gate_flags, lock) \
+ __devm_clk_hw_register_gate((dev), NULL, (name), (parent_name), NULL, \
+ NULL, (flags), (reg), (bit_idx), \
+ (clk_gate_flags), (lock))
+/**
+ * devm_clk_hw_register_gate_parent_hw - register a gate clock with the clock
+ * framework
+ * @dev: device that is registering this clock
+ * @name: name of this clock
+ * @parent_hw: pointer to parent clk
+ * @flags: framework-specific flags for this clock
+ * @reg: register address to control gating of this clock
+ * @bit_idx: which bit in the register controls gating of this clock
+ * @clk_gate_flags: gate-specific flags for this clock
+ * @lock: shared register lock for this clock
+ */
+#define devm_clk_hw_register_gate_parent_hw(dev, name, parent_hw, flags, \
+ reg, bit_idx, clk_gate_flags, \
+ lock) \
+ __devm_clk_hw_register_gate((dev), NULL, (name), NULL, (parent_hw), \
+ NULL, (flags), (reg), (bit_idx), \
+ (clk_gate_flags), (lock))
+/**
+ * devm_clk_hw_register_gate_parent_data - register a gate clock with the
+ * clock framework
+ * @dev: device that is registering this clock
+ * @name: name of this clock
+ * @parent_data: parent clk data
+ * @flags: framework-specific flags for this clock
+ * @reg: register address to control gating of this clock
+ * @bit_idx: which bit in the register controls gating of this clock
+ * @clk_gate_flags: gate-specific flags for this clock
+ * @lock: shared register lock for this clock
+ */
+#define devm_clk_hw_register_gate_parent_data(dev, name, parent_data, flags, \
+ reg, bit_idx, clk_gate_flags, \
+ lock) \
+ __devm_clk_hw_register_gate((dev), NULL, (name), NULL, NULL, \
+ (parent_data), (flags), (reg), (bit_idx), \
+ (clk_gate_flags), (lock))
+
void clk_unregister_gate(struct clk *clk);
void clk_hw_unregister_gate(struct clk_hw *hw);
+int clk_gate_is_enabled(struct clk_hw *hw);
struct clk_div_table {
unsigned int val;
@@ -362,7 +681,7 @@ struct clk_div_table {
* Clock with an adjustable divider affecting its output frequency. Implements
* .recalc_rate, .set_rate and .round_rate
*
- * Flags:
+ * @flags:
* CLK_DIVIDER_ONE_BASED - by default the divisor is the value read from the
* register plus one. If CLK_DIVIDER_ONE_BASED is set then the divider is
* the raw value read from the register, with the value of zero considered
@@ -385,17 +704,23 @@ struct clk_div_table {
* CLK_DIVIDER_MAX_AT_ZERO - For dividers which are like CLK_DIVIDER_ONE_BASED
* except when the value read from the register is zero, the divisor is
* 2^width of the field.
+ * CLK_DIVIDER_BIG_ENDIAN - By default little endian register accesses are used
+ * for the divider register. Setting this flag makes the register accesses
+ * big endian.
+ * CLK_DIVIDER_EVEN_INTEGERS - clock divisor is 2, 4, 6, 8, 10, etc.
+ * Formula is 2 * (value read from hardware + 1).
*/
struct clk_divider {
struct clk_hw hw;
void __iomem *reg;
u8 shift;
u8 width;
- u8 flags;
+ u16 flags;
const struct clk_div_table *table;
spinlock_t *lock;
};
+#define clk_div_mask(width) ((1 << (width)) - 1)
#define to_clk_divider(_hw) container_of(_hw, struct clk_divider, hw)
#define CLK_DIVIDER_ONE_BASED BIT(0)
@@ -405,39 +730,245 @@ struct clk_divider {
#define CLK_DIVIDER_ROUND_CLOSEST BIT(4)
#define CLK_DIVIDER_READ_ONLY BIT(5)
#define CLK_DIVIDER_MAX_AT_ZERO BIT(6)
+#define CLK_DIVIDER_BIG_ENDIAN BIT(7)
+#define CLK_DIVIDER_EVEN_INTEGERS BIT(8)
extern const struct clk_ops clk_divider_ops;
extern const struct clk_ops clk_divider_ro_ops;
unsigned long divider_recalc_rate(struct clk_hw *hw, unsigned long parent_rate,
unsigned int val, const struct clk_div_table *table,
- unsigned long flags);
+ unsigned long flags, unsigned long width);
long divider_round_rate_parent(struct clk_hw *hw, struct clk_hw *parent,
unsigned long rate, unsigned long *prate,
const struct clk_div_table *table,
u8 width, unsigned long flags);
+long divider_ro_round_rate_parent(struct clk_hw *hw, struct clk_hw *parent,
+ unsigned long rate, unsigned long *prate,
+ const struct clk_div_table *table, u8 width,
+ unsigned long flags, unsigned int val);
+int divider_determine_rate(struct clk_hw *hw, struct clk_rate_request *req,
+ const struct clk_div_table *table, u8 width,
+ unsigned long flags);
+int divider_ro_determine_rate(struct clk_hw *hw, struct clk_rate_request *req,
+ const struct clk_div_table *table, u8 width,
+ unsigned long flags, unsigned int val);
int divider_get_val(unsigned long rate, unsigned long parent_rate,
const struct clk_div_table *table, u8 width,
unsigned long flags);
-struct clk *clk_register_divider(struct device *dev, const char *name,
- const char *parent_name, unsigned long flags,
+struct clk_hw *__clk_hw_register_divider(struct device *dev,
+ struct device_node *np, const char *name,
+ const char *parent_name, const struct clk_hw *parent_hw,
+ const struct clk_parent_data *parent_data, unsigned long flags,
void __iomem *reg, u8 shift, u8 width,
- u8 clk_divider_flags, spinlock_t *lock);
-struct clk_hw *clk_hw_register_divider(struct device *dev, const char *name,
- const char *parent_name, unsigned long flags,
+ unsigned long clk_divider_flags,
+ const struct clk_div_table *table, spinlock_t *lock);
+struct clk_hw *__devm_clk_hw_register_divider(struct device *dev,
+ struct device_node *np, const char *name,
+ const char *parent_name, const struct clk_hw *parent_hw,
+ const struct clk_parent_data *parent_data, unsigned long flags,
void __iomem *reg, u8 shift, u8 width,
- u8 clk_divider_flags, spinlock_t *lock);
+ unsigned long clk_divider_flags,
+ const struct clk_div_table *table, spinlock_t *lock);
struct clk *clk_register_divider_table(struct device *dev, const char *name,
const char *parent_name, unsigned long flags,
void __iomem *reg, u8 shift, u8 width,
- u8 clk_divider_flags, const struct clk_div_table *table,
- spinlock_t *lock);
-struct clk_hw *clk_hw_register_divider_table(struct device *dev,
- const char *name, const char *parent_name, unsigned long flags,
- void __iomem *reg, u8 shift, u8 width,
- u8 clk_divider_flags, const struct clk_div_table *table,
- spinlock_t *lock);
+ unsigned long clk_divider_flags,
+ const struct clk_div_table *table, spinlock_t *lock);
+/**
+ * clk_register_divider - register a divider clock with the clock framework
+ * @dev: device registering this clock
+ * @name: name of this clock
+ * @parent_name: name of clock's parent
+ * @flags: framework-specific flags
+ * @reg: register address to adjust divider
+ * @shift: number of bits to shift the bitfield
+ * @width: width of the bitfield
+ * @clk_divider_flags: divider-specific flags for this clock
+ * @lock: shared register lock for this clock
+ */
+#define clk_register_divider(dev, name, parent_name, flags, reg, shift, width, \
+ clk_divider_flags, lock) \
+ clk_register_divider_table((dev), (name), (parent_name), (flags), \
+ (reg), (shift), (width), \
+ (clk_divider_flags), NULL, (lock))
+/**
+ * clk_hw_register_divider - register a divider clock with the clock framework
+ * @dev: device registering this clock
+ * @name: name of this clock
+ * @parent_name: name of clock's parent
+ * @flags: framework-specific flags
+ * @reg: register address to adjust divider
+ * @shift: number of bits to shift the bitfield
+ * @width: width of the bitfield
+ * @clk_divider_flags: divider-specific flags for this clock
+ * @lock: shared register lock for this clock
+ */
+#define clk_hw_register_divider(dev, name, parent_name, flags, reg, shift, \
+ width, clk_divider_flags, lock) \
+ __clk_hw_register_divider((dev), NULL, (name), (parent_name), NULL, \
+ NULL, (flags), (reg), (shift), (width), \
+ (clk_divider_flags), NULL, (lock))
+/**
+ * clk_hw_register_divider_parent_hw - register a divider clock with the clock
+ * framework
+ * @dev: device registering this clock
+ * @name: name of this clock
+ * @parent_hw: pointer to parent clk
+ * @flags: framework-specific flags
+ * @reg: register address to adjust divider
+ * @shift: number of bits to shift the bitfield
+ * @width: width of the bitfield
+ * @clk_divider_flags: divider-specific flags for this clock
+ * @lock: shared register lock for this clock
+ */
+#define clk_hw_register_divider_parent_hw(dev, name, parent_hw, flags, reg, \
+ shift, width, clk_divider_flags, \
+ lock) \
+ __clk_hw_register_divider((dev), NULL, (name), NULL, (parent_hw), \
+ NULL, (flags), (reg), (shift), (width), \
+ (clk_divider_flags), NULL, (lock))
+/**
+ * clk_hw_register_divider_parent_data - register a divider clock with the clock
+ * framework
+ * @dev: device registering this clock
+ * @name: name of this clock
+ * @parent_data: parent clk data
+ * @flags: framework-specific flags
+ * @reg: register address to adjust divider
+ * @shift: number of bits to shift the bitfield
+ * @width: width of the bitfield
+ * @clk_divider_flags: divider-specific flags for this clock
+ * @lock: shared register lock for this clock
+ */
+#define clk_hw_register_divider_parent_data(dev, name, parent_data, flags, \
+ reg, shift, width, \
+ clk_divider_flags, lock) \
+ __clk_hw_register_divider((dev), NULL, (name), NULL, NULL, \
+ (parent_data), (flags), (reg), (shift), \
+ (width), (clk_divider_flags), NULL, (lock))
+/**
+ * clk_hw_register_divider_table - register a table based divider clock with
+ * the clock framework
+ * @dev: device registering this clock
+ * @name: name of this clock
+ * @parent_name: name of clock's parent
+ * @flags: framework-specific flags
+ * @reg: register address to adjust divider
+ * @shift: number of bits to shift the bitfield
+ * @width: width of the bitfield
+ * @clk_divider_flags: divider-specific flags for this clock
+ * @table: array of divider/value pairs ending with a div set to 0
+ * @lock: shared register lock for this clock
+ */
+#define clk_hw_register_divider_table(dev, name, parent_name, flags, reg, \
+ shift, width, clk_divider_flags, table, \
+ lock) \
+ __clk_hw_register_divider((dev), NULL, (name), (parent_name), NULL, \
+ NULL, (flags), (reg), (shift), (width), \
+ (clk_divider_flags), (table), (lock))
+/**
+ * clk_hw_register_divider_table_parent_hw - register a table based divider
+ * clock with the clock framework
+ * @dev: device registering this clock
+ * @name: name of this clock
+ * @parent_hw: pointer to parent clk
+ * @flags: framework-specific flags
+ * @reg: register address to adjust divider
+ * @shift: number of bits to shift the bitfield
+ * @width: width of the bitfield
+ * @clk_divider_flags: divider-specific flags for this clock
+ * @table: array of divider/value pairs ending with a div set to 0
+ * @lock: shared register lock for this clock
+ */
+#define clk_hw_register_divider_table_parent_hw(dev, name, parent_hw, flags, \
+ reg, shift, width, \
+ clk_divider_flags, table, \
+ lock) \
+ __clk_hw_register_divider((dev), NULL, (name), NULL, (parent_hw), \
+ NULL, (flags), (reg), (shift), (width), \
+ (clk_divider_flags), (table), (lock))
+/**
+ * clk_hw_register_divider_table_parent_data - register a table based divider
+ * clock with the clock framework
+ * @dev: device registering this clock
+ * @name: name of this clock
+ * @parent_data: parent clk data
+ * @flags: framework-specific flags
+ * @reg: register address to adjust divider
+ * @shift: number of bits to shift the bitfield
+ * @width: width of the bitfield
+ * @clk_divider_flags: divider-specific flags for this clock
+ * @table: array of divider/value pairs ending with a div set to 0
+ * @lock: shared register lock for this clock
+ */
+#define clk_hw_register_divider_table_parent_data(dev, name, parent_data, \
+ flags, reg, shift, width, \
+ clk_divider_flags, table, \
+ lock) \
+ __clk_hw_register_divider((dev), NULL, (name), NULL, NULL, \
+ (parent_data), (flags), (reg), (shift), \
+ (width), (clk_divider_flags), (table), \
+ (lock))
+/**
+ * devm_clk_hw_register_divider - register a divider clock with the clock framework
+ * @dev: device registering this clock
+ * @name: name of this clock
+ * @parent_name: name of clock's parent
+ * @flags: framework-specific flags
+ * @reg: register address to adjust divider
+ * @shift: number of bits to shift the bitfield
+ * @width: width of the bitfield
+ * @clk_divider_flags: divider-specific flags for this clock
+ * @lock: shared register lock for this clock
+ */
+#define devm_clk_hw_register_divider(dev, name, parent_name, flags, reg, shift, \
+ width, clk_divider_flags, lock) \
+ __devm_clk_hw_register_divider((dev), NULL, (name), (parent_name), NULL, \
+ NULL, (flags), (reg), (shift), (width), \
+ (clk_divider_flags), NULL, (lock))
+/**
+ * devm_clk_hw_register_divider_parent_hw - register a divider clock with the clock framework
+ * @dev: device registering this clock
+ * @name: name of this clock
+ * @parent_hw: pointer to parent clk
+ * @flags: framework-specific flags
+ * @reg: register address to adjust divider
+ * @shift: number of bits to shift the bitfield
+ * @width: width of the bitfield
+ * @clk_divider_flags: divider-specific flags for this clock
+ * @lock: shared register lock for this clock
+ */
+#define devm_clk_hw_register_divider_parent_hw(dev, name, parent_hw, flags, \
+ reg, shift, width, \
+ clk_divider_flags, lock) \
+ __devm_clk_hw_register_divider((dev), NULL, (name), NULL, \
+ (parent_hw), NULL, (flags), (reg), \
+ (shift), (width), (clk_divider_flags), \
+ NULL, (lock))
+/**
+ * devm_clk_hw_register_divider_table - register a table based divider clock
+ * with the clock framework (devres variant)
+ * @dev: device registering this clock
+ * @name: name of this clock
+ * @parent_name: name of clock's parent
+ * @flags: framework-specific flags
+ * @reg: register address to adjust divider
+ * @shift: number of bits to shift the bitfield
+ * @width: width of the bitfield
+ * @clk_divider_flags: divider-specific flags for this clock
+ * @table: array of divider/value pairs ending with a div set to 0
+ * @lock: shared register lock for this clock
+ */
+#define devm_clk_hw_register_divider_table(dev, name, parent_name, flags, \
+ reg, shift, width, \
+ clk_divider_flags, table, lock) \
+ __devm_clk_hw_register_divider((dev), NULL, (name), (parent_name), \
+ NULL, NULL, (flags), (reg), (shift), \
+ (width), (clk_divider_flags), (table), \
+ (lock))
+
void clk_unregister_divider(struct clk *clk);
void clk_hw_unregister_divider(struct clk_hw *hw);
@@ -446,8 +977,9 @@ void clk_hw_unregister_divider(struct clk_hw *hw);
*
* @hw: handle between common and hardware-specific interfaces
* @reg: register controlling multiplexer
+ * @table: array of register values corresponding to the parent index
* @shift: shift to multiplexer bit field
- * @width: width of mutliplexer bit field
+ * @mask: mask of mutliplexer bit field
* @flags: hardware-specific flags
* @lock: register lock
*
@@ -461,13 +993,18 @@ void clk_hw_unregister_divider(struct clk_hw *hw);
* register, and mask of mux bits are in higher 16-bit of this register.
* While setting the mux bits, higher 16-bit should also be updated to
* indicate changing mux bits.
+ * CLK_MUX_READ_ONLY - The mux registers can't be written, only read in the
+ * .get_parent clk_op.
* CLK_MUX_ROUND_CLOSEST - Use the parent rate that is closest to the desired
* frequency.
+ * CLK_MUX_BIG_ENDIAN - By default little endian register accesses are used for
+ * the mux register. Setting this flag makes the register accesses big
+ * endian.
*/
struct clk_mux {
struct clk_hw hw;
void __iomem *reg;
- u32 *table;
+ const u32 *table;
u32 mask;
u8 shift;
u8 flags;
@@ -481,31 +1018,97 @@ struct clk_mux {
#define CLK_MUX_HIWORD_MASK BIT(2)
#define CLK_MUX_READ_ONLY BIT(3) /* mux can't be changed */
#define CLK_MUX_ROUND_CLOSEST BIT(4)
+#define CLK_MUX_BIG_ENDIAN BIT(5)
extern const struct clk_ops clk_mux_ops;
extern const struct clk_ops clk_mux_ro_ops;
-struct clk *clk_register_mux(struct device *dev, const char *name,
- const char * const *parent_names, u8 num_parents,
- unsigned long flags,
- void __iomem *reg, u8 shift, u8 width,
- u8 clk_mux_flags, spinlock_t *lock);
-struct clk_hw *clk_hw_register_mux(struct device *dev, const char *name,
- const char * const *parent_names, u8 num_parents,
- unsigned long flags,
- void __iomem *reg, u8 shift, u8 width,
- u8 clk_mux_flags, spinlock_t *lock);
-
+struct clk_hw *__clk_hw_register_mux(struct device *dev, struct device_node *np,
+ const char *name, u8 num_parents,
+ const char * const *parent_names,
+ const struct clk_hw **parent_hws,
+ const struct clk_parent_data *parent_data,
+ unsigned long flags, void __iomem *reg, u8 shift, u32 mask,
+ u8 clk_mux_flags, const u32 *table, spinlock_t *lock);
+struct clk_hw *__devm_clk_hw_register_mux(struct device *dev, struct device_node *np,
+ const char *name, u8 num_parents,
+ const char * const *parent_names,
+ const struct clk_hw **parent_hws,
+ const struct clk_parent_data *parent_data,
+ unsigned long flags, void __iomem *reg, u8 shift, u32 mask,
+ u8 clk_mux_flags, const u32 *table, spinlock_t *lock);
struct clk *clk_register_mux_table(struct device *dev, const char *name,
const char * const *parent_names, u8 num_parents,
- unsigned long flags,
- void __iomem *reg, u8 shift, u32 mask,
- u8 clk_mux_flags, u32 *table, spinlock_t *lock);
-struct clk_hw *clk_hw_register_mux_table(struct device *dev, const char *name,
- const char * const *parent_names, u8 num_parents,
- unsigned long flags,
- void __iomem *reg, u8 shift, u32 mask,
- u8 clk_mux_flags, u32 *table, spinlock_t *lock);
+ unsigned long flags, void __iomem *reg, u8 shift, u32 mask,
+ u8 clk_mux_flags, const u32 *table, spinlock_t *lock);
+
+#define clk_register_mux(dev, name, parent_names, num_parents, flags, reg, \
+ shift, width, clk_mux_flags, lock) \
+ clk_register_mux_table((dev), (name), (parent_names), (num_parents), \
+ (flags), (reg), (shift), BIT((width)) - 1, \
+ (clk_mux_flags), NULL, (lock))
+#define clk_hw_register_mux_table(dev, name, parent_names, num_parents, \
+ flags, reg, shift, mask, clk_mux_flags, \
+ table, lock) \
+ __clk_hw_register_mux((dev), NULL, (name), (num_parents), \
+ (parent_names), NULL, NULL, (flags), (reg), \
+ (shift), (mask), (clk_mux_flags), (table), \
+ (lock))
+#define clk_hw_register_mux_table_parent_data(dev, name, parent_data, \
+ num_parents, flags, reg, shift, mask, \
+ clk_mux_flags, table, lock) \
+ __clk_hw_register_mux((dev), NULL, (name), (num_parents), \
+ NULL, NULL, (parent_data), (flags), (reg), \
+ (shift), (mask), (clk_mux_flags), (table), \
+ (lock))
+#define clk_hw_register_mux(dev, name, parent_names, num_parents, flags, reg, \
+ shift, width, clk_mux_flags, lock) \
+ __clk_hw_register_mux((dev), NULL, (name), (num_parents), \
+ (parent_names), NULL, NULL, (flags), (reg), \
+ (shift), BIT((width)) - 1, (clk_mux_flags), \
+ NULL, (lock))
+#define clk_hw_register_mux_hws(dev, name, parent_hws, num_parents, flags, \
+ reg, shift, width, clk_mux_flags, lock) \
+ __clk_hw_register_mux((dev), NULL, (name), (num_parents), NULL, \
+ (parent_hws), NULL, (flags), (reg), (shift), \
+ BIT((width)) - 1, (clk_mux_flags), NULL, (lock))
+#define clk_hw_register_mux_parent_data(dev, name, parent_data, num_parents, \
+ flags, reg, shift, width, \
+ clk_mux_flags, lock) \
+ __clk_hw_register_mux((dev), NULL, (name), (num_parents), NULL, NULL, \
+ (parent_data), (flags), (reg), (shift), \
+ BIT((width)) - 1, (clk_mux_flags), NULL, (lock))
+#define clk_hw_register_mux_parent_data_table(dev, name, parent_data, \
+ num_parents, flags, reg, shift, \
+ width, clk_mux_flags, table, \
+ lock) \
+ __clk_hw_register_mux((dev), NULL, (name), (num_parents), NULL, NULL, \
+ (parent_data), (flags), (reg), (shift), \
+ BIT((width)) - 1, (clk_mux_flags), table, (lock))
+#define devm_clk_hw_register_mux(dev, name, parent_names, num_parents, flags, reg, \
+ shift, width, clk_mux_flags, lock) \
+ __devm_clk_hw_register_mux((dev), NULL, (name), (num_parents), \
+ (parent_names), NULL, NULL, (flags), (reg), \
+ (shift), BIT((width)) - 1, (clk_mux_flags), \
+ NULL, (lock))
+#define devm_clk_hw_register_mux_parent_hws(dev, name, parent_hws, \
+ num_parents, flags, reg, shift, \
+ width, clk_mux_flags, lock) \
+ __devm_clk_hw_register_mux((dev), NULL, (name), (num_parents), NULL, \
+ (parent_hws), NULL, (flags), (reg), \
+ (shift), BIT((width)) - 1, \
+ (clk_mux_flags), NULL, (lock))
+#define devm_clk_hw_register_mux_parent_data_table(dev, name, parent_data, \
+ num_parents, flags, reg, shift, \
+ width, clk_mux_flags, table, \
+ lock) \
+ __devm_clk_hw_register_mux((dev), NULL, (name), (num_parents), NULL, \
+ NULL, (parent_data), (flags), (reg), (shift), \
+ BIT((width)) - 1, (clk_mux_flags), table, (lock))
+
+int clk_mux_val_to_index(struct clk_hw *hw, const u32 *table, unsigned int flags,
+ unsigned int val);
+unsigned int clk_mux_index_to_val(const u32 *table, unsigned int flags, u8 index);
void clk_unregister_mux(struct clk *clk);
void clk_hw_unregister_mux(struct clk_hw *hw);
@@ -518,18 +1121,28 @@ void of_fixed_factor_clk_setup(struct device_node *node);
* @hw: handle between common and hardware-specific interfaces
* @mult: multiplier
* @div: divider
+ * @acc: fixed accuracy in ppb
+ * @flags: behavior modifying flags
*
* Clock with a fixed multiplier and divider. The output frequency is the
* parent clock rate divided by div and multiplied by mult.
- * Implements .recalc_rate, .set_rate and .round_rate
+ * Implements .recalc_rate, .set_rate, .round_rate and .recalc_accuracy
+ *
+ * Flags:
+ * * CLK_FIXED_FACTOR_FIXED_ACCURACY - Use the value in @acc instead of the
+ * parent clk accuracy.
*/
struct clk_fixed_factor {
struct clk_hw hw;
unsigned int mult;
unsigned int div;
+ unsigned long acc;
+ unsigned int flags;
};
+#define CLK_FIXED_FACTOR_FIXED_ACCURACY BIT(0)
+
#define to_clk_fixed_factor(_hw) container_of(_hw, struct clk_fixed_factor, hw)
extern const struct clk_ops clk_fixed_factor_ops;
@@ -540,8 +1153,38 @@ void clk_unregister_fixed_factor(struct clk *clk);
struct clk_hw *clk_hw_register_fixed_factor(struct device *dev,
const char *name, const char *parent_name, unsigned long flags,
unsigned int mult, unsigned int div);
+struct clk_hw *clk_hw_register_fixed_factor_fwname(struct device *dev,
+ struct device_node *np, const char *name, const char *fw_name,
+ unsigned long flags, unsigned int mult, unsigned int div);
+struct clk_hw *clk_hw_register_fixed_factor_with_accuracy_fwname(struct device *dev,
+ struct device_node *np, const char *name, const char *fw_name,
+ unsigned long flags, unsigned int mult, unsigned int div,
+ unsigned long acc);
+struct clk_hw *clk_hw_register_fixed_factor_index(struct device *dev,
+ const char *name, unsigned int index, unsigned long flags,
+ unsigned int mult, unsigned int div);
void clk_hw_unregister_fixed_factor(struct clk_hw *hw);
+struct clk_hw *devm_clk_hw_register_fixed_factor(struct device *dev,
+ const char *name, const char *parent_name, unsigned long flags,
+ unsigned int mult, unsigned int div);
+struct clk_hw *devm_clk_hw_register_fixed_factor_fwname(struct device *dev,
+ struct device_node *np, const char *name, const char *fw_name,
+ unsigned long flags, unsigned int mult, unsigned int div);
+struct clk_hw *devm_clk_hw_register_fixed_factor_with_accuracy_fwname(struct device *dev,
+ struct device_node *np, const char *name, const char *fw_name,
+ unsigned long flags, unsigned int mult, unsigned int div,
+ unsigned long acc);
+struct clk_hw *devm_clk_hw_register_fixed_factor_index(struct device *dev,
+ const char *name, unsigned int index, unsigned long flags,
+ unsigned int mult, unsigned int div);
+
+struct clk_hw *devm_clk_hw_register_fixed_factor_parent_hw(struct device *dev,
+ const char *name, const struct clk_hw *parent_hw,
+ unsigned long flags, unsigned int mult, unsigned int div);
+struct clk_hw *clk_hw_register_fixed_factor_parent_hw(struct device *dev,
+ const char *name, const struct clk_hw *parent_hw,
+ unsigned long flags, unsigned int mult, unsigned int div);
/**
* struct clk_fractional_divider - adjustable fractional divider clock
*
@@ -551,26 +1194,46 @@ void clk_hw_unregister_fixed_factor(struct clk_hw *hw);
* @mwidth: width of the numerator bit field
* @nshift: shift to the denominator bit field
* @nwidth: width of the denominator bit field
+ * @approximation: clk driver's callback for calculating the divider clock
* @lock: register lock
*
* Clock with adjustable fractional divider affecting its output frequency.
+ *
+ * @flags:
+ * CLK_FRAC_DIVIDER_ZERO_BASED - by default the numerator and denominator
+ * is the value read from the register. If CLK_FRAC_DIVIDER_ZERO_BASED
+ * is set then the numerator and denominator are both the value read
+ * plus one.
+ * CLK_FRAC_DIVIDER_BIG_ENDIAN - By default little endian register accesses are
+ * used for the divider register. Setting this flag makes the register
+ * accesses big endian.
+ * CLK_FRAC_DIVIDER_POWER_OF_TWO_PS - By default the resulting fraction might
+ * be saturated and the caller will get quite far from the good enough
+ * approximation. Instead the caller may require, by setting this flag,
+ * to shift left by a few bits in case, when the asked one is quite small
+ * to satisfy the desired range of denominator. It assumes that on the
+ * caller's side the power-of-two capable prescaler exists.
*/
struct clk_fractional_divider {
struct clk_hw hw;
void __iomem *reg;
u8 mshift;
u8 mwidth;
- u32 mmask;
u8 nshift;
u8 nwidth;
- u32 nmask;
u8 flags;
+ void (*approximation)(struct clk_hw *hw,
+ unsigned long rate, unsigned long *parent_rate,
+ unsigned long *m, unsigned long *n);
spinlock_t *lock;
};
#define to_clk_fd(_hw) container_of(_hw, struct clk_fractional_divider, hw)
-extern const struct clk_ops clk_fractional_divider_ops;
+#define CLK_FRAC_DIVIDER_ZERO_BASED BIT(0)
+#define CLK_FRAC_DIVIDER_BIG_ENDIAN BIT(1)
+#define CLK_FRAC_DIVIDER_POWER_OF_TWO_PS BIT(2)
+
struct clk *clk_register_fractional_divider(struct device *dev,
const char *name, const char *parent_name, unsigned long flags,
void __iomem *reg, u8 mshift, u8 mwidth, u8 nshift, u8 nwidth,
@@ -593,7 +1256,7 @@ void clk_hw_unregister_fractional_divider(struct clk_hw *hw);
* Clock with an adjustable multiplier affecting its output frequency.
* Implements .recalc_rate, .set_rate and .round_rate
*
- * Flags:
+ * @flags:
* CLK_MULTIPLIER_ZERO_BYPASS - By default, the multiplier is the value read
* from the register, with 0 being a valid value effectively
* zeroing the output clock rate. If CLK_MULTIPLIER_ZERO_BYPASS is
@@ -601,6 +1264,9 @@ void clk_hw_unregister_fractional_divider(struct clk_hw *hw);
* leaving the parent rate unmodified.
* CLK_MULTIPLIER_ROUND_CLOSEST - Makes the best calculated divider to be
* rounded to the closest integer instead of the down one.
+ * CLK_MULTIPLIER_BIG_ENDIAN - By default little endian register accesses are
+ * used for the multiplier register. Setting this flag makes the register
+ * accesses big endian.
*/
struct clk_multiplier {
struct clk_hw hw;
@@ -613,8 +1279,9 @@ struct clk_multiplier {
#define to_clk_multiplier(_hw) container_of(_hw, struct clk_multiplier, hw)
-#define CLK_MULTIPLIER_ZERO_BYPASS BIT(0)
+#define CLK_MULTIPLIER_ZERO_BYPASS BIT(0)
#define CLK_MULTIPLIER_ROUND_CLOSEST BIT(1)
+#define CLK_MULTIPLIER_BIG_ENDIAN BIT(2)
extern const struct clk_ops clk_multiplier_ops;
@@ -650,6 +1317,12 @@ struct clk *clk_register_composite(struct device *dev, const char *name,
struct clk_hw *rate_hw, const struct clk_ops *rate_ops,
struct clk_hw *gate_hw, const struct clk_ops *gate_ops,
unsigned long flags);
+struct clk *clk_register_composite_pdata(struct device *dev, const char *name,
+ const struct clk_parent_data *parent_data, int num_parents,
+ struct clk_hw *mux_hw, const struct clk_ops *mux_ops,
+ struct clk_hw *rate_hw, const struct clk_ops *rate_ops,
+ struct clk_hw *gate_hw, const struct clk_ops *gate_ops,
+ unsigned long flags);
void clk_unregister_composite(struct clk *clk);
struct clk_hw *clk_hw_register_composite(struct device *dev, const char *name,
const char * const *parent_names, int num_parents,
@@ -657,88 +1330,87 @@ struct clk_hw *clk_hw_register_composite(struct device *dev, const char *name,
struct clk_hw *rate_hw, const struct clk_ops *rate_ops,
struct clk_hw *gate_hw, const struct clk_ops *gate_ops,
unsigned long flags);
-void clk_hw_unregister_composite(struct clk_hw *hw);
-
-/***
- * struct clk_gpio_gate - gpio gated clock
- *
- * @hw: handle between common and hardware-specific interfaces
- * @gpiod: gpio descriptor
- *
- * Clock with a gpio control for enabling and disabling the parent clock.
- * Implements .enable, .disable and .is_enabled
- */
-
-struct clk_gpio {
- struct clk_hw hw;
- struct gpio_desc *gpiod;
-};
-
-#define to_clk_gpio(_hw) container_of(_hw, struct clk_gpio, hw)
-
-extern const struct clk_ops clk_gpio_gate_ops;
-struct clk *clk_register_gpio_gate(struct device *dev, const char *name,
- const char *parent_name, unsigned gpio, bool active_low,
+struct clk_hw *clk_hw_register_composite_pdata(struct device *dev,
+ const char *name,
+ const struct clk_parent_data *parent_data, int num_parents,
+ struct clk_hw *mux_hw, const struct clk_ops *mux_ops,
+ struct clk_hw *rate_hw, const struct clk_ops *rate_ops,
+ struct clk_hw *gate_hw, const struct clk_ops *gate_ops,
unsigned long flags);
-struct clk_hw *clk_hw_register_gpio_gate(struct device *dev, const char *name,
- const char *parent_name, unsigned gpio, bool active_low,
+struct clk_hw *devm_clk_hw_register_composite_pdata(struct device *dev,
+ const char *name, const struct clk_parent_data *parent_data,
+ int num_parents,
+ struct clk_hw *mux_hw, const struct clk_ops *mux_ops,
+ struct clk_hw *rate_hw, const struct clk_ops *rate_ops,
+ struct clk_hw *gate_hw, const struct clk_ops *gate_ops,
unsigned long flags);
-void clk_hw_unregister_gpio_gate(struct clk_hw *hw);
-
-/**
- * struct clk_gpio_mux - gpio controlled clock multiplexer
- *
- * @hw: see struct clk_gpio
- * @gpiod: gpio descriptor to select the parent of this clock multiplexer
- *
- * Clock with a gpio control for selecting the parent clock.
- * Implements .get_parent, .set_parent and .determine_rate
- */
-
-extern const struct clk_ops clk_gpio_mux_ops;
-struct clk *clk_register_gpio_mux(struct device *dev, const char *name,
- const char * const *parent_names, u8 num_parents, unsigned gpio,
- bool active_low, unsigned long flags);
-struct clk_hw *clk_hw_register_gpio_mux(struct device *dev, const char *name,
- const char * const *parent_names, u8 num_parents, unsigned gpio,
- bool active_low, unsigned long flags);
-void clk_hw_unregister_gpio_mux(struct clk_hw *hw);
+void clk_hw_unregister_composite(struct clk_hw *hw);
-/**
- * clk_register - allocate a new clock, register it and return an opaque cookie
- * @dev: device that is registering this clock
- * @hw: link to hardware-specific clock data
- *
- * clk_register is the primary interface for populating the clock tree with new
- * clock nodes. It returns a pointer to the newly allocated struct clk which
- * cannot be dereferenced by driver code but may be used in conjuction with the
- * rest of the clock API. In the event of an error clk_register will return an
- * error code; drivers must test for an error code after calling clk_register.
- */
struct clk *clk_register(struct device *dev, struct clk_hw *hw);
struct clk *devm_clk_register(struct device *dev, struct clk_hw *hw);
int __must_check clk_hw_register(struct device *dev, struct clk_hw *hw);
int __must_check devm_clk_hw_register(struct device *dev, struct clk_hw *hw);
+int __must_check of_clk_hw_register(struct device_node *node, struct clk_hw *hw);
void clk_unregister(struct clk *clk);
-void devm_clk_unregister(struct device *dev, struct clk *clk);
void clk_hw_unregister(struct clk_hw *hw);
-void devm_clk_hw_unregister(struct device *dev, struct clk_hw *hw);
/* helper functions */
const char *__clk_get_name(const struct clk *clk);
const char *clk_hw_get_name(const struct clk_hw *hw);
+
+/**
+ * clk_hw_get_dev() - get device from an hardware clock.
+ * @hw: the clk_hw pointer to get the struct device from
+ *
+ * This is a helper to get the struct device associated with a hardware
+ * clock. Some clock controllers, such as the one registered with
+ * CLK_OF_DECLARE(), may have not provided a device pointer while
+ * registering the clock.
+ *
+ * Return: the struct device associated with the clock, or NULL if there
+ * is none.
+ */
+struct device *clk_hw_get_dev(const struct clk_hw *hw);
+
+/**
+ * clk_hw_get_of_node() - get device_node from a hardware clock.
+ * @hw: the clk_hw pointer to get the struct device_node from
+ *
+ * This is a helper to get the struct device_node associated with a
+ * hardware clock.
+ *
+ * Return: the struct device_node associated with the clock, or NULL
+ * if there is none.
+ */
+struct device_node *clk_hw_get_of_node(const struct clk_hw *hw);
+#ifdef CONFIG_COMMON_CLK
struct clk_hw *__clk_get_hw(struct clk *clk);
+#else
+static inline struct clk_hw *__clk_get_hw(struct clk *clk)
+{
+ return (struct clk_hw *)clk;
+}
+#endif
+
+struct clk *clk_hw_get_clk(struct clk_hw *hw, const char *con_id);
+struct clk *devm_clk_hw_get_clk(struct device *dev, struct clk_hw *hw,
+ const char *con_id);
+
unsigned int clk_hw_get_num_parents(const struct clk_hw *hw);
struct clk_hw *clk_hw_get_parent(const struct clk_hw *hw);
struct clk_hw *clk_hw_get_parent_by_index(const struct clk_hw *hw,
unsigned int index);
+int clk_hw_get_parent_index(struct clk_hw *hw);
+int clk_hw_set_parent(struct clk_hw *hw, struct clk_hw *new_parent);
unsigned int __clk_get_enable_count(struct clk *clk);
unsigned long clk_hw_get_rate(const struct clk_hw *hw);
-unsigned long __clk_get_flags(struct clk *clk);
unsigned long clk_hw_get_flags(const struct clk_hw *hw);
+#define clk_hw_can_set_rate_parent(hw) \
+ (clk_hw_get_flags((hw)) & CLK_SET_RATE_PARENT)
+
bool clk_hw_is_prepared(const struct clk_hw *hw);
bool clk_hw_is_enabled(const struct clk_hw *hw);
bool __clk_is_enabled(struct clk *clk);
@@ -748,7 +1420,14 @@ int __clk_mux_determine_rate(struct clk_hw *hw,
int __clk_determine_rate(struct clk_hw *core, struct clk_rate_request *req);
int __clk_mux_determine_rate_closest(struct clk_hw *hw,
struct clk_rate_request *req);
+int clk_mux_determine_rate_flags(struct clk_hw *hw,
+ struct clk_rate_request *req,
+ unsigned long flags);
+int clk_hw_determine_rate_no_reparent(struct clk_hw *hw,
+ struct clk_rate_request *req);
void clk_hw_reparent(struct clk_hw *hw, struct clk_hw *new_parent);
+void clk_hw_get_rate_range(struct clk_hw *hw, unsigned long *min_rate,
+ unsigned long *max_rate);
void clk_hw_set_rate_range(struct clk_hw *hw, unsigned long min_rate,
unsigned long max_rate);
@@ -767,15 +1446,22 @@ static inline long divider_round_rate(struct clk_hw *hw, unsigned long rate,
rate, prate, table, width, flags);
}
+static inline long divider_ro_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *prate,
+ const struct clk_div_table *table,
+ u8 width, unsigned long flags,
+ unsigned int val)
+{
+ return divider_ro_round_rate_parent(hw, clk_hw_get_parent(hw),
+ rate, prate, table, width, flags,
+ val);
+}
+
/*
* FIXME clock api without lock protection
*/
unsigned long clk_hw_round_rate(struct clk_hw *hw, unsigned long rate);
-struct of_device_id;
-
-typedef void (*of_clk_init_cb_t)(struct device_node *);
-
struct clk_onecell_data {
struct clk **clks;
unsigned int clk_num;
@@ -783,12 +1469,16 @@ struct clk_onecell_data {
struct clk_hw_onecell_data {
unsigned int num;
- struct clk_hw *hws[];
+ struct clk_hw *hws[] __counted_by(num);
};
-extern struct of_device_id __clk_of_table;
-
-#define CLK_OF_DECLARE(name, compat, fn) OF_DECLARE_1(clk, name, compat, fn)
+#define CLK_OF_DECLARE(name, compat, fn) \
+ static void __init __##name##_of_clk_init_declare(struct device_node *np) \
+ { \
+ fn(np); \
+ fwnode_dev_initialized(of_fwnode_handle(np), true); \
+ } \
+ OF_DECLARE_1(clk, name, compat, __##name##_of_clk_init_declare)
/*
* Use this macro when you have a driver that requires two initialization
@@ -802,6 +1492,133 @@ extern struct of_device_id __clk_of_table;
} \
OF_DECLARE_1(clk, name, compat, name##_of_clk_init_driver)
+#define CLK_HW_INIT(_name, _parent, _ops, _flags) \
+ (&(struct clk_init_data) { \
+ .flags = _flags, \
+ .name = _name, \
+ .parent_names = (const char *[]) { _parent }, \
+ .num_parents = 1, \
+ .ops = _ops, \
+ })
+
+#define CLK_HW_INIT_HW(_name, _parent, _ops, _flags) \
+ (&(struct clk_init_data) { \
+ .flags = _flags, \
+ .name = _name, \
+ .parent_hws = (const struct clk_hw*[]) { _parent }, \
+ .num_parents = 1, \
+ .ops = _ops, \
+ })
+
+/*
+ * This macro is intended for drivers to be able to share the otherwise
+ * individual struct clk_hw[] compound literals created by the compiler
+ * when using CLK_HW_INIT_HW. It does NOT support multiple parents.
+ */
+#define CLK_HW_INIT_HWS(_name, _parent, _ops, _flags) \
+ (&(struct clk_init_data) { \
+ .flags = _flags, \
+ .name = _name, \
+ .parent_hws = _parent, \
+ .num_parents = 1, \
+ .ops = _ops, \
+ })
+
+#define CLK_HW_INIT_FW_NAME(_name, _parent, _ops, _flags) \
+ (&(struct clk_init_data) { \
+ .flags = _flags, \
+ .name = _name, \
+ .parent_data = (const struct clk_parent_data[]) { \
+ { .fw_name = _parent }, \
+ }, \
+ .num_parents = 1, \
+ .ops = _ops, \
+ })
+
+#define CLK_HW_INIT_PARENTS(_name, _parents, _ops, _flags) \
+ (&(struct clk_init_data) { \
+ .flags = _flags, \
+ .name = _name, \
+ .parent_names = _parents, \
+ .num_parents = ARRAY_SIZE(_parents), \
+ .ops = _ops, \
+ })
+
+#define CLK_HW_INIT_PARENTS_HW(_name, _parents, _ops, _flags) \
+ (&(struct clk_init_data) { \
+ .flags = _flags, \
+ .name = _name, \
+ .parent_hws = _parents, \
+ .num_parents = ARRAY_SIZE(_parents), \
+ .ops = _ops, \
+ })
+
+#define CLK_HW_INIT_PARENTS_DATA(_name, _parents, _ops, _flags) \
+ (&(struct clk_init_data) { \
+ .flags = _flags, \
+ .name = _name, \
+ .parent_data = _parents, \
+ .num_parents = ARRAY_SIZE(_parents), \
+ .ops = _ops, \
+ })
+
+#define CLK_HW_INIT_NO_PARENT(_name, _ops, _flags) \
+ (&(struct clk_init_data) { \
+ .flags = _flags, \
+ .name = _name, \
+ .parent_names = NULL, \
+ .num_parents = 0, \
+ .ops = _ops, \
+ })
+
+#define CLK_FIXED_FACTOR(_struct, _name, _parent, \
+ _div, _mult, _flags) \
+ struct clk_fixed_factor _struct = { \
+ .div = _div, \
+ .mult = _mult, \
+ .hw.init = CLK_HW_INIT(_name, \
+ _parent, \
+ &clk_fixed_factor_ops, \
+ _flags), \
+ }
+
+#define CLK_FIXED_FACTOR_HW(_struct, _name, _parent, \
+ _div, _mult, _flags) \
+ struct clk_fixed_factor _struct = { \
+ .div = _div, \
+ .mult = _mult, \
+ .hw.init = CLK_HW_INIT_HW(_name, \
+ _parent, \
+ &clk_fixed_factor_ops, \
+ _flags), \
+ }
+
+/*
+ * This macro allows the driver to reuse the _parent array for multiple
+ * fixed factor clk declarations.
+ */
+#define CLK_FIXED_FACTOR_HWS(_struct, _name, _parent, \
+ _div, _mult, _flags) \
+ struct clk_fixed_factor _struct = { \
+ .div = _div, \
+ .mult = _mult, \
+ .hw.init = CLK_HW_INIT_HWS(_name, \
+ _parent, \
+ &clk_fixed_factor_ops, \
+ _flags), \
+ }
+
+#define CLK_FIXED_FACTOR_FW_NAME(_struct, _name, _parent, \
+ _div, _mult, _flags) \
+ struct clk_fixed_factor _struct = { \
+ .div = _div, \
+ .mult = _mult, \
+ .hw.init = CLK_HW_INIT_FW_NAME(_name, \
+ _parent, \
+ &clk_fixed_factor_ops, \
+ _flags), \
+ }
+
#ifdef CONFIG_OF
int of_clk_add_provider(struct device_node *np,
struct clk *(*clk_src_get)(struct of_phandle_args *args,
@@ -811,7 +1628,12 @@ int of_clk_add_hw_provider(struct device_node *np,
struct clk_hw *(*get)(struct of_phandle_args *clkspec,
void *data),
void *data);
+int devm_of_clk_add_hw_provider(struct device *dev,
+ struct clk_hw *(*get)(struct of_phandle_args *clkspec,
+ void *data),
+ void *data);
void of_clk_del_provider(struct device_node *np);
+
struct clk *of_clk_src_simple_get(struct of_phandle_args *clkspec,
void *data);
struct clk_hw *of_clk_hw_simple_get(struct of_phandle_args *clkspec,
@@ -819,13 +1641,10 @@ struct clk_hw *of_clk_hw_simple_get(struct of_phandle_args *clkspec,
struct clk *of_clk_src_onecell_get(struct of_phandle_args *clkspec, void *data);
struct clk_hw *of_clk_hw_onecell_get(struct of_phandle_args *clkspec,
void *data);
-unsigned int of_clk_get_parent_count(struct device_node *np);
int of_clk_parent_fill(struct device_node *np, const char **parents,
unsigned int size);
-const char *of_clk_get_parent_name(struct device_node *np, int index);
int of_clk_detect_critical(struct device_node *np, int index,
unsigned long *flags);
-void of_clk_init(const struct of_device_id *matches);
#else /* !CONFIG_OF */
@@ -843,7 +1662,15 @@ static inline int of_clk_add_hw_provider(struct device_node *np,
{
return 0;
}
+static inline int devm_of_clk_add_hw_provider(struct device *dev,
+ struct clk_hw *(*get)(struct of_phandle_args *clkspec,
+ void *data),
+ void *data)
+{
+ return 0;
+}
static inline void of_clk_del_provider(struct device_node *np) {}
+
static inline struct clk *of_clk_src_simple_get(
struct of_phandle_args *clkspec, void *data)
{
@@ -864,63 +1691,18 @@ of_clk_hw_onecell_get(struct of_phandle_args *clkspec, void *data)
{
return ERR_PTR(-ENOENT);
}
-static inline unsigned int of_clk_get_parent_count(struct device_node *np)
-{
- return 0;
-}
static inline int of_clk_parent_fill(struct device_node *np,
const char **parents, unsigned int size)
{
return 0;
}
-static inline const char *of_clk_get_parent_name(struct device_node *np,
- int index)
-{
- return NULL;
-}
static inline int of_clk_detect_critical(struct device_node *np, int index,
unsigned long *flags)
{
return 0;
}
-static inline void of_clk_init(const struct of_device_id *matches) {}
#endif /* CONFIG_OF */
-/*
- * wrap access to peripherals in accessor routines
- * for improved portability across platforms
- */
-
-#if IS_ENABLED(CONFIG_PPC)
-
-static inline u32 clk_readl(u32 __iomem *reg)
-{
- return ioread32be(reg);
-}
-
-static inline void clk_writel(u32 val, u32 __iomem *reg)
-{
- iowrite32be(val, reg);
-}
-
-#else /* platform dependent I/O accessors */
-
-static inline u32 clk_readl(u32 __iomem *reg)
-{
- return readl(reg);
-}
-
-static inline void clk_writel(u32 val, u32 __iomem *reg)
-{
- writel(val, reg);
-}
-
-#endif /* platform dependent I/O accessors */
-
-#ifdef CONFIG_DEBUG_FS
-struct dentry *clk_debugfs_add_file(struct clk_hw *hw, char *name, umode_t mode,
- void *data, const struct file_operations *fops);
-#endif
+void clk_gate_restore_context(struct clk_hw *hw);
-#endif /* CONFIG_COMMON_CLK */
#endif /* CLK_PROVIDER_H */
diff --git a/include/linux/clk.h b/include/linux/clk.h
index 12c96d94d1fa..b607482ca77e 100644
--- a/include/linux/clk.h
+++ b/include/linux/clk.h
@@ -1,13 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* linux/include/linux/clk.h
*
* Copyright (C) 2004 ARM Limited.
* Written by Deep Blue Solutions Limited.
* Copyright (C) 2011-2012 Linaro Ltd <mturquette@linaro.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef __LINUX_CLK_H
#define __LINUX_CLK_H
@@ -95,7 +92,7 @@ struct clk_bulk_data {
#ifdef CONFIG_COMMON_CLK
/**
- * clk_notifier_register: register a clock rate-change notifier callback
+ * clk_notifier_register - register a clock rate-change notifier callback
* @clk: clock whose rate we are interested in
* @nb: notifier block with callback function pointer
*
@@ -106,13 +103,24 @@ struct clk_bulk_data {
int clk_notifier_register(struct clk *clk, struct notifier_block *nb);
/**
- * clk_notifier_unregister: unregister a clock rate-change notifier callback
+ * clk_notifier_unregister - unregister a clock rate-change notifier callback
* @clk: clock whose rate we are no longer interested in
* @nb: notifier block which will be unregistered
*/
int clk_notifier_unregister(struct clk *clk, struct notifier_block *nb);
/**
+ * devm_clk_notifier_register - register a managed rate-change notifier callback
+ * @dev: device for clock "consumer"
+ * @clk: clock whose rate we are interested in
+ * @nb: notifier block with callback function pointer
+ *
+ * Returns 0 on success, -EERROR otherwise
+ */
+int devm_clk_notifier_register(struct device *dev, struct clk *clk,
+ struct notifier_block *nb);
+
+/**
* clk_get_accuracy - obtain the clock accuracy in ppb (parts per billion)
* for a clock source.
* @clk: clock source
@@ -142,6 +150,27 @@ int clk_set_phase(struct clk *clk, int degrees);
int clk_get_phase(struct clk *clk);
/**
+ * clk_set_duty_cycle - adjust the duty cycle ratio of a clock signal
+ * @clk: clock signal source
+ * @num: numerator of the duty cycle ratio to be applied
+ * @den: denominator of the duty cycle ratio to be applied
+ *
+ * Adjust the duty cycle of a clock signal by the specified ratio. Returns 0 on
+ * success, -EERROR otherwise.
+ */
+int clk_set_duty_cycle(struct clk *clk, unsigned int num, unsigned int den);
+
+/**
+ * clk_get_scaled_duty_cycle - return the duty cycle ratio of a clock signal
+ * @clk: clock signal source
+ * @scale: scaling factor to be applied to represent the ratio as an integer
+ *
+ * Returns the duty cycle ratio multiplied by the scale provided, otherwise
+ * returns -EERROR.
+ */
+int clk_get_scaled_duty_cycle(struct clk *clk, unsigned int scale);
+
+/**
* clk_is_match - check if two clk's point to the same hardware clock
* @p: clk compared against q
* @q: clk compared against p
@@ -154,6 +183,51 @@ int clk_get_phase(struct clk *clk);
*/
bool clk_is_match(const struct clk *p, const struct clk *q);
+/**
+ * clk_rate_exclusive_get - get exclusivity over the rate control of a
+ * producer
+ * @clk: clock source
+ *
+ * This function allows drivers to get exclusive control over the rate of a
+ * provider. It prevents any other consumer to execute, even indirectly,
+ * opereation which could alter the rate of the provider or cause glitches
+ *
+ * If exlusivity is claimed more than once on clock, even by the same driver,
+ * the rate effectively gets locked as exclusivity can't be preempted.
+ *
+ * Must not be called from within atomic context.
+ *
+ * Returns success (0) or negative errno.
+ */
+int clk_rate_exclusive_get(struct clk *clk);
+
+/**
+ * devm_clk_rate_exclusive_get - devm variant of clk_rate_exclusive_get
+ * @dev: device the exclusivity is bound to
+ * @clk: clock source
+ *
+ * Calls clk_rate_exclusive_get() on @clk and registers a devm cleanup handler
+ * on @dev to call clk_rate_exclusive_put().
+ *
+ * Must not be called from within atomic context.
+ */
+int devm_clk_rate_exclusive_get(struct device *dev, struct clk *clk);
+
+/**
+ * clk_rate_exclusive_put - release exclusivity over the rate control of a
+ * producer
+ * @clk: clock source
+ *
+ * This function allows drivers to release the exclusivity it previously got
+ * from clk_rate_exclusive_get()
+ *
+ * The caller must balance the number of clk_rate_exclusive_get() and
+ * clk_rate_exclusive_put() calls.
+ *
+ * Must not be called from within atomic context.
+ */
+void clk_rate_exclusive_put(struct clk *clk);
+
#else
static inline int clk_notifier_register(struct clk *clk,
@@ -168,6 +242,13 @@ static inline int clk_notifier_unregister(struct clk *clk,
return -ENOTSUPP;
}
+static inline int devm_clk_notifier_register(struct device *dev,
+ struct clk *clk,
+ struct notifier_block *nb)
+{
+ return -ENOTSUPP;
+}
+
static inline long clk_get_accuracy(struct clk *clk)
{
return -ENOTSUPP;
@@ -183,13 +264,38 @@ static inline long clk_get_phase(struct clk *clk)
return -ENOTSUPP;
}
+static inline int clk_set_duty_cycle(struct clk *clk, unsigned int num,
+ unsigned int den)
+{
+ return -ENOTSUPP;
+}
+
+static inline unsigned int clk_get_scaled_duty_cycle(struct clk *clk,
+ unsigned int scale)
+{
+ return 0;
+}
+
static inline bool clk_is_match(const struct clk *p, const struct clk *q)
{
return p == q;
}
+static inline int clk_rate_exclusive_get(struct clk *clk)
+{
+ return 0;
+}
+
+static inline int devm_clk_rate_exclusive_get(struct device *dev, struct clk *clk)
+{
+ return 0;
+}
+
+static inline void clk_rate_exclusive_put(struct clk *clk) {}
+
#endif
+#ifdef CONFIG_HAVE_CLK_PREPARE
/**
* clk_prepare - prepare a clock source
* @clk: clock source
@@ -198,10 +304,26 @@ static inline bool clk_is_match(const struct clk *p, const struct clk *q)
*
* Must not be called from within atomic context.
*/
-#ifdef CONFIG_HAVE_CLK_PREPARE
int clk_prepare(struct clk *clk);
int __must_check clk_bulk_prepare(int num_clks,
const struct clk_bulk_data *clks);
+
+/**
+ * clk_is_enabled_when_prepared - indicate if preparing a clock also enables it.
+ * @clk: clock source
+ *
+ * Returns true if clk_prepare() implicitly enables the clock, effectively
+ * making clk_enable()/clk_disable() no-ops, false otherwise.
+ *
+ * This is of interest mainly to the power management code where actually
+ * disabling the clock also requires unpreparing it to have any material
+ * effect.
+ *
+ * Regardless of the value returned here, the caller must always invoke
+ * clk_enable() or clk_prepare_enable() and counterparts for usage counts
+ * to be right.
+ */
+bool clk_is_enabled_when_prepared(struct clk *clk);
#else
static inline int clk_prepare(struct clk *clk)
{
@@ -209,11 +331,17 @@ static inline int clk_prepare(struct clk *clk)
return 0;
}
-static inline int clk_bulk_prepare(int num_clks, struct clk_bulk_data *clks)
+static inline int __must_check
+clk_bulk_prepare(int num_clks, const struct clk_bulk_data *clks)
{
might_sleep();
return 0;
}
+
+static inline bool clk_is_enabled_when_prepared(struct clk *clk)
+{
+ return false;
+}
#endif
/**
@@ -233,7 +361,8 @@ static inline void clk_unprepare(struct clk *clk)
{
might_sleep();
}
-static inline void clk_bulk_unprepare(int num_clks, struct clk_bulk_data *clks)
+static inline void clk_bulk_unprepare(int num_clks,
+ const struct clk_bulk_data *clks)
{
might_sleep();
}
@@ -279,8 +408,40 @@ struct clk *clk_get(struct device *dev, const char *id);
*/
int __must_check clk_bulk_get(struct device *dev, int num_clks,
struct clk_bulk_data *clks);
+/**
+ * clk_bulk_get_all - lookup and obtain all available references to clock
+ * producer.
+ * @dev: device for clock "consumer"
+ * @clks: pointer to the clk_bulk_data table of consumer
+ *
+ * This helper function allows drivers to get all clk consumers in one
+ * operation. If any of the clk cannot be acquired then any clks
+ * that were obtained will be freed before returning to the caller.
+ *
+ * Returns a positive value for the number of clocks obtained while the
+ * clock references are stored in the clk_bulk_data table in @clks field.
+ * Returns 0 if there're none and a negative value if something failed.
+ *
+ * Drivers must assume that the clock source is not enabled.
+ *
+ * clk_bulk_get should not be called from within interrupt context.
+ */
+int __must_check clk_bulk_get_all(struct device *dev,
+ struct clk_bulk_data **clks);
/**
+ * clk_bulk_get_optional - lookup and obtain a number of references to clock producer
+ * @dev: device for clock "consumer"
+ * @num_clks: the number of clk_bulk_data
+ * @clks: the clk_bulk_data table of consumer
+ *
+ * Behaves the same as clk_bulk_get() except where there is no clock producer.
+ * In this case, instead of returning -ENOENT, the function returns 0 and
+ * NULL for a clk for which a clock producer could not be determined.
+ */
+int __must_check clk_bulk_get_optional(struct device *dev, int num_clks,
+ struct clk_bulk_data *clks);
+/**
* devm_clk_bulk_get - managed get multiple clk consumers
* @dev: device for clock "consumer"
* @num_clks: the number of clk_bulk_data
@@ -294,21 +455,79 @@ int __must_check clk_bulk_get(struct device *dev, int num_clks,
*/
int __must_check devm_clk_bulk_get(struct device *dev, int num_clks,
struct clk_bulk_data *clks);
+/**
+ * devm_clk_bulk_get_optional - managed get multiple optional consumer clocks
+ * @dev: device for clock "consumer"
+ * @num_clks: the number of clk_bulk_data
+ * @clks: pointer to the clk_bulk_data table of consumer
+ *
+ * Behaves the same as devm_clk_bulk_get() except where there is no clock
+ * producer. In this case, instead of returning -ENOENT, the function returns
+ * NULL for given clk. It is assumed all clocks in clk_bulk_data are optional.
+ *
+ * Returns 0 if all clocks specified in clk_bulk_data table are obtained
+ * successfully or for any clk there was no clk provider available, otherwise
+ * returns valid IS_ERR() condition containing errno.
+ * The implementation uses @dev and @clk_bulk_data.id to determine the
+ * clock consumer, and thereby the clock producer.
+ * The clock returned is stored in each @clk_bulk_data.clk field.
+ *
+ * Drivers must assume that the clock source is not enabled.
+ *
+ * clk_bulk_get should not be called from within interrupt context.
+ */
+int __must_check devm_clk_bulk_get_optional(struct device *dev, int num_clks,
+ struct clk_bulk_data *clks);
+/**
+ * devm_clk_bulk_get_all - managed get multiple clk consumers
+ * @dev: device for clock "consumer"
+ * @clks: pointer to the clk_bulk_data table of consumer
+ *
+ * Returns a positive value for the number of clocks obtained while the
+ * clock references are stored in the clk_bulk_data table in @clks field.
+ * Returns 0 if there're none and a negative value if something failed.
+ *
+ * This helper function allows drivers to get several clk
+ * consumers in one operation with management, the clks will
+ * automatically be freed when the device is unbound.
+ */
+
+int __must_check devm_clk_bulk_get_all(struct device *dev,
+ struct clk_bulk_data **clks);
+
+/**
+ * devm_clk_bulk_get_all_enabled - Get and enable all clocks of the consumer (managed)
+ * @dev: device for clock "consumer"
+ * @clks: pointer to the clk_bulk_data table of consumer
+ *
+ * Returns a positive value for the number of clocks obtained while the
+ * clock references are stored in the clk_bulk_data table in @clks field.
+ * Returns 0 if there're none and a negative value if something failed.
+ *
+ * This helper function allows drivers to get all clocks of the
+ * consumer and enables them in one operation with management.
+ * The clks will automatically be disabled and freed when the device
+ * is unbound.
+ */
+
+int __must_check devm_clk_bulk_get_all_enabled(struct device *dev,
+ struct clk_bulk_data **clks);
/**
* devm_clk_get - lookup and obtain a managed reference to a clock producer.
* @dev: device for clock "consumer"
* @id: clock consumer ID
*
- * Returns a struct clk corresponding to the clock producer, or
+ * Context: May sleep.
+ *
+ * Return: a struct clk corresponding to the clock producer, or
* valid IS_ERR() condition containing errno. The implementation
* uses @dev and @id to determine the clock consumer, and thereby
* the clock producer. (IOW, @id may be identical strings, but
* clk_get may return different clock producers depending on @dev.)
*
- * Drivers must assume that the clock source is not enabled.
- *
- * devm_clk_get should not be called from within interrupt context.
+ * Drivers must assume that the clock source is neither prepared nor
+ * enabled.
*
* The clock will automatically be freed when the device is unbound
* from the bus.
@@ -316,6 +535,140 @@ int __must_check devm_clk_bulk_get(struct device *dev, int num_clks,
struct clk *devm_clk_get(struct device *dev, const char *id);
/**
+ * devm_clk_get_prepared - devm_clk_get() + clk_prepare()
+ * @dev: device for clock "consumer"
+ * @id: clock consumer ID
+ *
+ * Context: May sleep.
+ *
+ * Return: a struct clk corresponding to the clock producer, or
+ * valid IS_ERR() condition containing errno. The implementation
+ * uses @dev and @id to determine the clock consumer, and thereby
+ * the clock producer. (IOW, @id may be identical strings, but
+ * clk_get may return different clock producers depending on @dev.)
+ *
+ * The returned clk (if valid) is prepared. Drivers must however assume
+ * that the clock is not enabled.
+ *
+ * The clock will automatically be unprepared and freed when the device
+ * is unbound from the bus.
+ */
+struct clk *devm_clk_get_prepared(struct device *dev, const char *id);
+
+/**
+ * devm_clk_get_enabled - devm_clk_get() + clk_prepare_enable()
+ * @dev: device for clock "consumer"
+ * @id: clock consumer ID
+ *
+ * Context: May sleep.
+ *
+ * Return: a struct clk corresponding to the clock producer, or
+ * valid IS_ERR() condition containing errno. The implementation
+ * uses @dev and @id to determine the clock consumer, and thereby
+ * the clock producer. (IOW, @id may be identical strings, but
+ * clk_get may return different clock producers depending on @dev.)
+ *
+ * The returned clk (if valid) is prepared and enabled.
+ *
+ * The clock will automatically be disabled, unprepared and freed
+ * when the device is unbound from the bus.
+ */
+struct clk *devm_clk_get_enabled(struct device *dev, const char *id);
+
+/**
+ * devm_clk_get_optional - lookup and obtain a managed reference to an optional
+ * clock producer.
+ * @dev: device for clock "consumer"
+ * @id: clock consumer ID
+ *
+ * Context: May sleep.
+ *
+ * Return: a struct clk corresponding to the clock producer, or
+ * valid IS_ERR() condition containing errno. The implementation
+ * uses @dev and @id to determine the clock consumer, and thereby
+ * the clock producer. If no such clk is found, it returns NULL
+ * which serves as a dummy clk. That's the only difference compared
+ * to devm_clk_get().
+ *
+ * Drivers must assume that the clock source is neither prepared nor
+ * enabled.
+ *
+ * The clock will automatically be freed when the device is unbound
+ * from the bus.
+ */
+struct clk *devm_clk_get_optional(struct device *dev, const char *id);
+
+/**
+ * devm_clk_get_optional_prepared - devm_clk_get_optional() + clk_prepare()
+ * @dev: device for clock "consumer"
+ * @id: clock consumer ID
+ *
+ * Context: May sleep.
+ *
+ * Return: a struct clk corresponding to the clock producer, or
+ * valid IS_ERR() condition containing errno. The implementation
+ * uses @dev and @id to determine the clock consumer, and thereby
+ * the clock producer. If no such clk is found, it returns NULL
+ * which serves as a dummy clk. That's the only difference compared
+ * to devm_clk_get_prepared().
+ *
+ * The returned clk (if valid) is prepared. Drivers must however
+ * assume that the clock is not enabled.
+ *
+ * The clock will automatically be unprepared and freed when the
+ * device is unbound from the bus.
+ */
+struct clk *devm_clk_get_optional_prepared(struct device *dev, const char *id);
+
+/**
+ * devm_clk_get_optional_enabled - devm_clk_get_optional() +
+ * clk_prepare_enable()
+ * @dev: device for clock "consumer"
+ * @id: clock consumer ID
+ *
+ * Context: May sleep.
+ *
+ * Return: a struct clk corresponding to the clock producer, or
+ * valid IS_ERR() condition containing errno. The implementation
+ * uses @dev and @id to determine the clock consumer, and thereby
+ * the clock producer. If no such clk is found, it returns NULL
+ * which serves as a dummy clk. That's the only difference compared
+ * to devm_clk_get_enabled().
+ *
+ * The returned clk (if valid) is prepared and enabled.
+ *
+ * The clock will automatically be disabled, unprepared and freed
+ * when the device is unbound from the bus.
+ */
+struct clk *devm_clk_get_optional_enabled(struct device *dev, const char *id);
+
+/**
+ * devm_clk_get_optional_enabled_with_rate - devm_clk_get_optional() +
+ * clk_set_rate() +
+ * clk_prepare_enable()
+ * @dev: device for clock "consumer"
+ * @id: clock consumer ID
+ * @rate: new clock rate
+ *
+ * Context: May sleep.
+ *
+ * Return: a struct clk corresponding to the clock producer, or
+ * valid IS_ERR() condition containing errno. The implementation
+ * uses @dev and @id to determine the clock consumer, and thereby
+ * the clock producer. If no such clk is found, it returns NULL
+ * which serves as a dummy clk. That's the only difference compared
+ * to devm_clk_get_enabled().
+ *
+ * The returned clk (if valid) is prepared and enabled and rate was set.
+ *
+ * The clock will automatically be disabled, unprepared and freed
+ * when the device is unbound from the bus.
+ */
+struct clk *devm_clk_get_optional_enabled_with_rate(struct device *dev,
+ const char *id,
+ unsigned long rate);
+
+/**
* devm_get_clk_from_child - lookup and obtain a managed reference to a
* clock producer from child node.
* @dev: device for clock "consumer"
@@ -423,6 +776,19 @@ void clk_put(struct clk *clk);
void clk_bulk_put(int num_clks, struct clk_bulk_data *clks);
/**
+ * clk_bulk_put_all - "free" all the clock source
+ * @num_clks: the number of clk_bulk_data
+ * @clks: the clk_bulk_data table of consumer
+ *
+ * Note: drivers must ensure that all clk_bulk_enable calls made on this
+ * clock source are balanced by clk_bulk_disable calls prior to calling
+ * this function.
+ *
+ * clk_bulk_put_all should not be called from within interrupt context.
+ */
+void clk_bulk_put_all(int num_clks, struct clk_bulk_data *clks);
+
+/**
* devm_clk_put - "free" a managed clock source
* @dev: device used to acquire the clock
* @clk: clock source acquired with devm_clk_get()
@@ -468,11 +834,31 @@ long clk_round_rate(struct clk *clk, unsigned long rate);
* @clk: clock source
* @rate: desired clock rate in Hz
*
+ * Updating the rate starts at the top-most affected clock and then
+ * walks the tree down to the bottom-most clock that needs updating.
+ *
* Returns success (0) or negative errno.
*/
int clk_set_rate(struct clk *clk, unsigned long rate);
/**
+ * clk_set_rate_exclusive- set the clock rate and claim exclusivity over
+ * clock source
+ * @clk: clock source
+ * @rate: desired clock rate in Hz
+ *
+ * This helper function allows drivers to atomically set the rate of a producer
+ * and claim exclusivity over the rate control of the producer.
+ *
+ * It is essentially a combination of clk_set_rate() and
+ * clk_rate_exclusite_get(). Caller must balance this call with a call to
+ * clk_rate_exclusive_put()
+ *
+ * Returns success (0) or negative errno.
+ */
+int clk_set_rate_exclusive(struct clk *clk, unsigned long rate);
+
+/**
* clk_has_parent - check if a clock is a possible parent for another
* @clk: clock source
* @parent: parent clock source
@@ -482,7 +868,7 @@ int clk_set_rate(struct clk *clk, unsigned long rate);
*
* Returns true if @parent is a possible parent for @clk, false otherwise.
*/
-bool clk_has_parent(struct clk *clk, struct clk *parent);
+bool clk_has_parent(const struct clk *clk, const struct clk *parent);
/**
* clk_set_rate_range - set a rate range for a clock source
@@ -547,6 +933,23 @@ struct clk *clk_get_parent(struct clk *clk);
*/
struct clk *clk_get_sys(const char *dev_id, const char *con_id);
+/**
+ * clk_save_context - save clock context for poweroff
+ *
+ * Saves the context of the clock register for powerstates in which the
+ * contents of the registers will be lost. Occurs deep within the suspend
+ * code so locking is not necessary.
+ */
+int clk_save_context(void);
+
+/**
+ * clk_restore_context - restore clock context after poweroff
+ *
+ * This occurs with all clocks enabled. Occurs deep within the resume code
+ * so locking is not necessary.
+ */
+void clk_restore_context(void);
+
#else /* !CONFIG_HAVE_CLK */
static inline struct clk *clk_get(struct device *dev, const char *id)
@@ -554,8 +957,20 @@ static inline struct clk *clk_get(struct device *dev, const char *id)
return NULL;
}
-static inline int clk_bulk_get(struct device *dev, int num_clks,
- struct clk_bulk_data *clks)
+static inline int __must_check clk_bulk_get(struct device *dev, int num_clks,
+ struct clk_bulk_data *clks)
+{
+ return 0;
+}
+
+static inline int __must_check clk_bulk_get_optional(struct device *dev,
+ int num_clks, struct clk_bulk_data *clks)
+{
+ return 0;
+}
+
+static inline int __must_check clk_bulk_get_all(struct device *dev,
+ struct clk_bulk_data **clks)
{
return 0;
}
@@ -565,8 +980,64 @@ static inline struct clk *devm_clk_get(struct device *dev, const char *id)
return NULL;
}
-static inline int devm_clk_bulk_get(struct device *dev, int num_clks,
- struct clk_bulk_data *clks)
+static inline struct clk *devm_clk_get_prepared(struct device *dev,
+ const char *id)
+{
+ return NULL;
+}
+
+static inline struct clk *devm_clk_get_enabled(struct device *dev,
+ const char *id)
+{
+ return NULL;
+}
+
+static inline struct clk *devm_clk_get_optional(struct device *dev,
+ const char *id)
+{
+ return NULL;
+}
+
+static inline struct clk *devm_clk_get_optional_prepared(struct device *dev,
+ const char *id)
+{
+ return NULL;
+}
+
+static inline struct clk *devm_clk_get_optional_enabled(struct device *dev,
+ const char *id)
+{
+ return NULL;
+}
+
+static inline struct clk *
+devm_clk_get_optional_enabled_with_rate(struct device *dev, const char *id,
+ unsigned long rate)
+{
+ return NULL;
+}
+
+static inline int __must_check devm_clk_bulk_get(struct device *dev, int num_clks,
+ struct clk_bulk_data *clks)
+{
+ return 0;
+}
+
+static inline int __must_check devm_clk_bulk_get_optional(struct device *dev,
+ int num_clks, struct clk_bulk_data *clks)
+{
+ return 0;
+}
+
+static inline int __must_check devm_clk_bulk_get_all(struct device *dev,
+ struct clk_bulk_data **clks)
+{
+
+ return 0;
+}
+
+static inline int __must_check devm_clk_bulk_get_all_enabled(struct device *dev,
+ struct clk_bulk_data **clks)
{
return 0;
}
@@ -581,6 +1052,8 @@ static inline void clk_put(struct clk *clk) {}
static inline void clk_bulk_put(int num_clks, struct clk_bulk_data *clks) {}
+static inline void clk_bulk_put_all(int num_clks, struct clk_bulk_data *clks) {}
+
static inline void devm_clk_put(struct device *dev, struct clk *clk) {}
static inline int clk_enable(struct clk *clk)
@@ -588,7 +1061,8 @@ static inline int clk_enable(struct clk *clk)
return 0;
}
-static inline int clk_bulk_enable(int num_clks, struct clk_bulk_data *clks)
+static inline int __must_check clk_bulk_enable(int num_clks,
+ const struct clk_bulk_data *clks)
{
return 0;
}
@@ -597,7 +1071,7 @@ static inline void clk_disable(struct clk *clk) {}
static inline void clk_bulk_disable(int num_clks,
- struct clk_bulk_data *clks) {}
+ const struct clk_bulk_data *clks) {}
static inline unsigned long clk_get_rate(struct clk *clk)
{
@@ -609,6 +1083,11 @@ static inline int clk_set_rate(struct clk *clk, unsigned long rate)
return 0;
}
+static inline int clk_set_rate_exclusive(struct clk *clk, unsigned long rate)
+{
+ return 0;
+}
+
static inline long clk_round_rate(struct clk *clk, unsigned long rate)
{
return 0;
@@ -619,6 +1098,22 @@ static inline bool clk_has_parent(struct clk *clk, struct clk *parent)
return true;
}
+static inline int clk_set_rate_range(struct clk *clk, unsigned long min,
+ unsigned long max)
+{
+ return 0;
+}
+
+static inline int clk_set_min_rate(struct clk *clk, unsigned long rate)
+{
+ return 0;
+}
+
+static inline int clk_set_max_rate(struct clk *clk, unsigned long rate)
+{
+ return 0;
+}
+
static inline int clk_set_parent(struct clk *clk, struct clk *parent)
{
return 0;
@@ -633,6 +1128,14 @@ static inline struct clk *clk_get_sys(const char *dev_id, const char *con_id)
{
return NULL;
}
+
+static inline int clk_save_context(void)
+{
+ return 0;
+}
+
+static inline void clk_restore_context(void) {}
+
#endif
/* clk_prepare_enable helps cases using clk_enable in non-atomic context. */
@@ -657,8 +1160,8 @@ static inline void clk_disable_unprepare(struct clk *clk)
clk_unprepare(clk);
}
-static inline int clk_bulk_prepare_enable(int num_clks,
- struct clk_bulk_data *clks)
+static inline int __must_check
+clk_bulk_prepare_enable(int num_clks, const struct clk_bulk_data *clks)
{
int ret;
@@ -673,12 +1176,42 @@ static inline int clk_bulk_prepare_enable(int num_clks,
}
static inline void clk_bulk_disable_unprepare(int num_clks,
- struct clk_bulk_data *clks)
+ const struct clk_bulk_data *clks)
{
clk_bulk_disable(num_clks, clks);
clk_bulk_unprepare(num_clks, clks);
}
+/**
+ * clk_drop_range - Reset any range set on that clock
+ * @clk: clock source
+ *
+ * Returns success (0) or negative errno.
+ */
+static inline int clk_drop_range(struct clk *clk)
+{
+ return clk_set_rate_range(clk, 0, ULONG_MAX);
+}
+
+/**
+ * clk_get_optional - lookup and obtain a reference to an optional clock
+ * producer.
+ * @dev: device for clock "consumer"
+ * @id: clock consumer ID
+ *
+ * Behaves the same as clk_get() except where there is no clock producer. In
+ * this case, instead of returning -ENOENT, the function returns NULL.
+ */
+static inline struct clk *clk_get_optional(struct device *dev, const char *id)
+{
+ struct clk *clk = clk_get(dev, id);
+
+ if (clk == ERR_PTR(-ENOENT))
+ return NULL;
+
+ return clk;
+}
+
#if defined(CONFIG_OF) && defined(CONFIG_COMMON_CLK)
struct clk *of_clk_get(struct device_node *np, int index);
struct clk *of_clk_get_by_name(struct device_node *np, const char *name);
diff --git a/include/linux/clk/analogbits-wrpll-cln28hpc.h b/include/linux/clk/analogbits-wrpll-cln28hpc.h
new file mode 100644
index 000000000000..03279097e138
--- /dev/null
+++ b/include/linux/clk/analogbits-wrpll-cln28hpc.h
@@ -0,0 +1,79 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2018-2019 SiFive, Inc.
+ * Wesley Terpstra
+ * Paul Walmsley
+ */
+
+#ifndef __LINUX_CLK_ANALOGBITS_WRPLL_CLN28HPC_H
+#define __LINUX_CLK_ANALOGBITS_WRPLL_CLN28HPC_H
+
+#include <linux/types.h>
+
+/* DIVQ_VALUES: number of valid DIVQ values */
+#define DIVQ_VALUES 6
+
+/*
+ * Bit definitions for struct wrpll_cfg.flags
+ *
+ * WRPLL_FLAGS_BYPASS_FLAG: if set, the PLL is either in bypass, or should be
+ * programmed to enter bypass
+ * WRPLL_FLAGS_RESET_FLAG: if set, the PLL is in reset
+ * WRPLL_FLAGS_INT_FEEDBACK_FLAG: if set, the PLL is configured for internal
+ * feedback mode
+ * WRPLL_FLAGS_EXT_FEEDBACK_FLAG: if set, the PLL is configured for external
+ * feedback mode (not yet supported by this driver)
+ */
+#define WRPLL_FLAGS_BYPASS_SHIFT 0
+#define WRPLL_FLAGS_BYPASS_MASK BIT(WRPLL_FLAGS_BYPASS_SHIFT)
+#define WRPLL_FLAGS_RESET_SHIFT 1
+#define WRPLL_FLAGS_RESET_MASK BIT(WRPLL_FLAGS_RESET_SHIFT)
+#define WRPLL_FLAGS_INT_FEEDBACK_SHIFT 2
+#define WRPLL_FLAGS_INT_FEEDBACK_MASK BIT(WRPLL_FLAGS_INT_FEEDBACK_SHIFT)
+#define WRPLL_FLAGS_EXT_FEEDBACK_SHIFT 3
+#define WRPLL_FLAGS_EXT_FEEDBACK_MASK BIT(WRPLL_FLAGS_EXT_FEEDBACK_SHIFT)
+
+/**
+ * struct wrpll_cfg - WRPLL configuration values
+ * @divr: reference divider value (6 bits), as presented to the PLL signals
+ * @divf: feedback divider value (9 bits), as presented to the PLL signals
+ * @divq: output divider value (3 bits), as presented to the PLL signals
+ * @flags: PLL configuration flags. See above for more information
+ * @range: PLL loop filter range. See below for more information
+ * @output_rate_cache: cached output rates, swept across DIVQ
+ * @parent_rate: PLL refclk rate for which values are valid
+ * @max_r: maximum possible R divider value, given @parent_rate
+ * @init_r: initial R divider value to start the search from
+ *
+ * @divr, @divq, @divq, @range represent what the PLL expects to see
+ * on its input signals. Thus @divr and @divf are the actual divisors
+ * minus one. @divq is a power-of-two divider; for example, 1 =
+ * divide-by-2 and 6 = divide-by-64. 0 is an invalid @divq value.
+ *
+ * When initially passing a struct wrpll_cfg record, the
+ * record should be zero-initialized with the exception of the @flags
+ * field. The only flag bits that need to be set are either
+ * WRPLL_FLAGS_INT_FEEDBACK or WRPLL_FLAGS_EXT_FEEDBACK.
+ */
+struct wrpll_cfg {
+ u8 divr;
+ u8 divq;
+ u8 range;
+ u8 flags;
+ u16 divf;
+/* private: */
+ u32 output_rate_cache[DIVQ_VALUES];
+ unsigned long parent_rate;
+ u8 max_r;
+ u8 init_r;
+};
+
+int wrpll_configure_for_rate(struct wrpll_cfg *c, u32 target_rate,
+ unsigned long parent_rate);
+
+unsigned int wrpll_calc_max_lock_us(const struct wrpll_cfg *c);
+
+unsigned long wrpll_calc_output_rate(const struct wrpll_cfg *c,
+ unsigned long parent_rate);
+
+#endif /* __LINUX_CLK_ANALOGBITS_WRPLL_CLN28HPC_H */
diff --git a/include/linux/clk/at91_pmc.h b/include/linux/clk/at91_pmc.h
index 17f413bbbedf..d60ce9708ea2 100644
--- a/include/linux/clk/at91_pmc.h
+++ b/include/linux/clk/at91_pmc.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* include/linux/clk/at91_pmc.h
*
@@ -6,16 +7,16 @@
*
* Power Management Controller (PMC) - System peripherals registers.
* Based on AT91RM9200 datasheet revision E.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
*/
#ifndef AT91_PMC_H
#define AT91_PMC_H
+#include <linux/bits.h>
+
+#define AT91_PMC_V1 (1) /* PMC version 1 */
+#define AT91_PMC_V2 (2) /* PMC version 2 [SAM9X60] */
+
#define AT91_PMC_SCER 0x00 /* System Clock Enable Register */
#define AT91_PMC_SCDR 0x04 /* System Clock Disable Register */
@@ -34,21 +35,40 @@
#define AT91_PMC_HCK0 (1 << 16) /* AHB Clock (USB host) [AT91SAM9261 only] */
#define AT91_PMC_HCK1 (1 << 17) /* AHB Clock (LCD) [AT91SAM9261 only] */
+#define AT91_PMC_PLL_CTRL0 0x0C /* PLL Control Register 0 [for SAM9X60] */
+#define AT91_PMC_PLL_CTRL0_ENPLL (1 << 28) /* Enable PLL */
+#define AT91_PMC_PLL_CTRL0_ENPLLCK (1 << 29) /* Enable PLL clock for PMC */
+#define AT91_PMC_PLL_CTRL0_ENLOCK (1 << 31) /* Enable PLL lock */
+
+#define AT91_PMC_PLL_CTRL1 0x10 /* PLL Control Register 1 [for SAM9X60] */
+
#define AT91_PMC_PCER 0x10 /* Peripheral Clock Enable Register */
#define AT91_PMC_PCDR 0x14 /* Peripheral Clock Disable Register */
#define AT91_PMC_PCSR 0x18 /* Peripheral Clock Status Register */
+#define AT91_PMC_PLL_ACR 0x18 /* PLL Analog Control Register [for SAM9X60] */
+#define AT91_PMC_PLL_ACR_UTMIVR (1 << 12) /* UPLL Voltage regulator Control */
+#define AT91_PMC_PLL_ACR_UTMIBG (1 << 13) /* UPLL Bandgap Control */
+
#define AT91_CKGR_UCKR 0x1C /* UTMI Clock Register [some SAM9] */
#define AT91_PMC_UPLLEN (1 << 16) /* UTMI PLL Enable */
#define AT91_PMC_UPLLCOUNT (0xf << 20) /* UTMI PLL Start-up Time */
#define AT91_PMC_BIASEN (1 << 24) /* UTMI BIAS Enable */
#define AT91_PMC_BIASCOUNT (0xf << 28) /* UTMI BIAS Start-up Time */
+#define AT91_PMC_PLL_UPDT 0x1C /* PMC PLL update register [for SAM9X60] */
+#define AT91_PMC_PLL_UPDT_UPDATE (1 << 8) /* Update PLL settings */
+#define AT91_PMC_PLL_UPDT_ID (1 << 0) /* PLL ID */
+#define AT91_PMC_PLL_UPDT_ID_MSK (0xf) /* PLL ID mask */
+#define AT91_PMC_PLL_UPDT_STUPTIM (0xff << 16) /* Startup time */
+
#define AT91_CKGR_MOR 0x20 /* Main Oscillator Register [not on SAM9RL] */
#define AT91_PMC_MOSCEN (1 << 0) /* Main Oscillator Enable */
#define AT91_PMC_OSCBYPASS (1 << 1) /* Oscillator Bypass */
+#define AT91_PMC_WAITMODE (1 << 2) /* Wait Mode Command */
#define AT91_PMC_MOSCRCEN (1 << 3) /* Main On-Chip RC Oscillator Enable [some SAM9] */
#define AT91_PMC_OSCOUNT (0xff << 8) /* Main Oscillator Start-up Time */
+#define AT91_PMC_KEY_MASK (0xff << 16)
#define AT91_PMC_KEY (0x37 << 16) /* MOR Writing Key */
#define AT91_PMC_MOSCSEL (1 << 24) /* Main Oscillator Selection [some SAM9] */
#define AT91_PMC_CFDEN (1 << 25) /* Clock Failure Detector Enable [some SAM9] */
@@ -58,6 +78,10 @@
#define AT91_PMC_MAINRDY (1 << 16) /* Main Clock Ready */
#define AT91_CKGR_PLLAR 0x28 /* PLL A Register */
+
+#define AT91_PMC_RATIO 0x2c /* Processor clock ratio register [SAMA7G5 only] */
+#define AT91_PMC_RATIO_RATIO (0xf) /* CPU clock ratio. */
+
#define AT91_CKGR_PLLBR 0x2c /* PLL B Register */
#define AT91_PMC_DIV (0xff << 0) /* Divider */
#define AT91_PMC_PLLCOUNT (0x3f << 8) /* PLL Counter */
@@ -72,6 +96,8 @@
#define AT91_PMC_USBDIV_4 (2 << 28)
#define AT91_PMC_USB96M (1 << 28) /* Divider by 2 Enable (PLLB only) */
+#define AT91_PMC_CPU_CKR 0x28 /* CPU Clock Register */
+
#define AT91_PMC_MCKR 0x30 /* Master Clock Register */
#define AT91_PMC_CSS (3 << 0) /* Master Clock Selection */
#define AT91_PMC_CSS_SLOW (0 << 0)
@@ -115,6 +141,34 @@
#define AT91_PMC_PLLADIV2_ON (1 << 12)
#define AT91_PMC_H32MXDIV BIT(24)
+#define AT91_PMC_MCR_V2 0x30 /* Master Clock Register [SAMA7G5 only] */
+#define AT91_PMC_MCR_V2_ID_MSK (0xF)
+#define AT91_PMC_MCR_V2_ID(_id) ((_id) & AT91_PMC_MCR_V2_ID_MSK)
+#define AT91_PMC_MCR_V2_CMD (1 << 7)
+#define AT91_PMC_MCR_V2_DIV (7 << 8)
+#define AT91_PMC_MCR_V2_DIV1 (0 << 8)
+#define AT91_PMC_MCR_V2_DIV2 (1 << 8)
+#define AT91_PMC_MCR_V2_DIV4 (2 << 8)
+#define AT91_PMC_MCR_V2_DIV8 (3 << 8)
+#define AT91_PMC_MCR_V2_DIV16 (4 << 8)
+#define AT91_PMC_MCR_V2_DIV32 (5 << 8)
+#define AT91_PMC_MCR_V2_DIV64 (6 << 8)
+#define AT91_PMC_MCR_V2_DIV3 (7 << 8)
+#define AT91_PMC_MCR_V2_CSS (0x1F << 16)
+#define AT91_PMC_MCR_V2_CSS_MD_SLCK (0 << 16)
+#define AT91_PMC_MCR_V2_CSS_TD_SLCK (1 << 16)
+#define AT91_PMC_MCR_V2_CSS_MAINCK (2 << 16)
+#define AT91_PMC_MCR_V2_CSS_MCK0 (3 << 16)
+#define AT91_PMC_MCR_V2_CSS_SYSPLL (5 << 16)
+#define AT91_PMC_MCR_V2_CSS_DDRPLL (6 << 16)
+#define AT91_PMC_MCR_V2_CSS_IMGPLL (7 << 16)
+#define AT91_PMC_MCR_V2_CSS_BAUDPLL (8 << 16)
+#define AT91_PMC_MCR_V2_CSS_AUDIOPLL (9 << 16)
+#define AT91_PMC_MCR_V2_CSS_ETHPLL (10 << 16)
+#define AT91_PMC_MCR_V2_EN (1 << 28)
+
+#define AT91_PMC_XTALF 0x34 /* Main XTAL Frequency Register [SAMA7G5 only] */
+
#define AT91_PMC_USB 0x38 /* USB Clock Register [some SAM9 only] */
#define AT91_PMC_USBS (0x1 << 0) /* USB OHCI Input clock selection */
#define AT91_PMC_USBS_PLLA (0 << 0)
@@ -153,8 +207,23 @@
#define AT91_PMC_MOSCRCS (1 << 17) /* Main On-Chip RC [some SAM9] */
#define AT91_PMC_CFDEV (1 << 18) /* Clock Failure Detector Event [some SAM9] */
#define AT91_PMC_GCKRDY (1 << 24) /* Generated Clocks */
+#define AT91_PMC_MCKXRDY (1 << 26) /* Master Clock x [x=1..4] Ready Status */
#define AT91_PMC_IMR 0x6c /* Interrupt Mask Register */
+#define AT91_PMC_FSMR 0x70 /* Fast Startup Mode Register */
+#define AT91_PMC_FSTT(n) BIT(n)
+#define AT91_PMC_RTTAL BIT(16)
+#define AT91_PMC_RTCAL BIT(17) /* RTC Alarm Enable */
+#define AT91_PMC_USBAL BIT(18) /* USB Resume Enable */
+#define AT91_PMC_SDMMC_CD BIT(19) /* SDMMC Card Detect Enable */
+#define AT91_PMC_LPM BIT(20) /* Low-power Mode */
+#define AT91_PMC_RXLP_MCE BIT(24) /* Backup UART Receive Enable */
+#define AT91_PMC_ACC_CE BIT(25) /* ACC Enable */
+
+#define AT91_PMC_FSPR 0x74 /* Fast Startup Polarity Reg */
+
+#define AT91_PMC_FS_INPUT_MASK 0x7ff
+
#define AT91_PMC_PLLICPR 0x80 /* PLL Charge Pump Current Register */
#define AT91_PMC_PROT 0xe4 /* Write Protect Mode Register [some SAM9] */
@@ -166,23 +235,42 @@
#define AT91_PMC_WPVS (0x1 << 0) /* Write Protect Violation Status */
#define AT91_PMC_WPVSRC (0xffff << 8) /* Write Protect Violation Source */
+#define AT91_PMC_PLL_ISR0 0xEC /* PLL Interrupt Status Register 0 [SAM9X60 only] */
+
#define AT91_PMC_PCER1 0x100 /* Peripheral Clock Enable Register 1 [SAMA5 only]*/
#define AT91_PMC_PCDR1 0x104 /* Peripheral Clock Enable Register 1 */
#define AT91_PMC_PCSR1 0x108 /* Peripheral Clock Enable Register 1 */
#define AT91_PMC_PCR 0x10c /* Peripheral Control Register [some SAM9 and SAMA5] */
#define AT91_PMC_PCR_PID_MASK 0x3f
-#define AT91_PMC_PCR_GCKCSS_OFFSET 8
-#define AT91_PMC_PCR_GCKCSS_MASK (0x7 << AT91_PMC_PCR_GCKCSS_OFFSET)
-#define AT91_PMC_PCR_GCKCSS(n) ((n) << AT91_PMC_PCR_GCKCSS_OFFSET) /* GCK Clock Source Selection */
#define AT91_PMC_PCR_CMD (0x1 << 12) /* Command (read=0, write=1) */
-#define AT91_PMC_PCR_DIV_OFFSET 16
-#define AT91_PMC_PCR_DIV_MASK (0x3 << AT91_PMC_PCR_DIV_OFFSET)
-#define AT91_PMC_PCR_DIV(n) ((n) << AT91_PMC_PCR_DIV_OFFSET) /* Divisor Value */
-#define AT91_PMC_PCR_GCKDIV_OFFSET 20
-#define AT91_PMC_PCR_GCKDIV_MASK (0xff << AT91_PMC_PCR_GCKDIV_OFFSET)
-#define AT91_PMC_PCR_GCKDIV(n) ((n) << AT91_PMC_PCR_GCKDIV_OFFSET) /* Generated Clock Divisor Value */
+#define AT91_PMC_PCR_GCKDIV_MASK GENMASK(27, 20)
#define AT91_PMC_PCR_EN (0x1 << 28) /* Enable */
#define AT91_PMC_PCR_GCKEN (0x1 << 29) /* GCK Enable */
+#define AT91_PMC_AUDIO_PLL0 0x14c
+#define AT91_PMC_AUDIO_PLL_PLLEN (1 << 0)
+#define AT91_PMC_AUDIO_PLL_PADEN (1 << 1)
+#define AT91_PMC_AUDIO_PLL_PMCEN (1 << 2)
+#define AT91_PMC_AUDIO_PLL_RESETN (1 << 3)
+#define AT91_PMC_AUDIO_PLL_ND_OFFSET 8
+#define AT91_PMC_AUDIO_PLL_ND_MASK (0x7f << AT91_PMC_AUDIO_PLL_ND_OFFSET)
+#define AT91_PMC_AUDIO_PLL_ND(n) ((n) << AT91_PMC_AUDIO_PLL_ND_OFFSET)
+#define AT91_PMC_AUDIO_PLL_QDPMC_OFFSET 16
+#define AT91_PMC_AUDIO_PLL_QDPMC_MASK (0x7f << AT91_PMC_AUDIO_PLL_QDPMC_OFFSET)
+#define AT91_PMC_AUDIO_PLL_QDPMC(n) ((n) << AT91_PMC_AUDIO_PLL_QDPMC_OFFSET)
+
+#define AT91_PMC_AUDIO_PLL1 0x150
+#define AT91_PMC_AUDIO_PLL_FRACR_MASK 0x3fffff
+#define AT91_PMC_AUDIO_PLL_QDPAD_OFFSET 24
+#define AT91_PMC_AUDIO_PLL_QDPAD_MASK (0x7f << AT91_PMC_AUDIO_PLL_QDPAD_OFFSET)
+#define AT91_PMC_AUDIO_PLL_QDPAD(n) ((n) << AT91_PMC_AUDIO_PLL_QDPAD_OFFSET)
+#define AT91_PMC_AUDIO_PLL_QDPAD_DIV_OFFSET AT91_PMC_AUDIO_PLL_QDPAD_OFFSET
+#define AT91_PMC_AUDIO_PLL_QDPAD_DIV_MASK (0x3 << AT91_PMC_AUDIO_PLL_QDPAD_DIV_OFFSET)
+#define AT91_PMC_AUDIO_PLL_QDPAD_DIV(n) ((n) << AT91_PMC_AUDIO_PLL_QDPAD_DIV_OFFSET)
+#define AT91_PMC_AUDIO_PLL_QDPAD_EXTDIV_OFFSET 26
+#define AT91_PMC_AUDIO_PLL_QDPAD_EXTDIV_MAX 0x1f
+#define AT91_PMC_AUDIO_PLL_QDPAD_EXTDIV_MASK (AT91_PMC_AUDIO_PLL_QDPAD_EXTDIV_MAX << AT91_PMC_AUDIO_PLL_QDPAD_EXTDIV_OFFSET)
+#define AT91_PMC_AUDIO_PLL_QDPAD_EXTDIV(n) ((n) << AT91_PMC_AUDIO_PLL_QDPAD_EXTDIV_OFFSET)
+
#endif
diff --git a/include/linux/clk/bcm2835.h b/include/linux/clk/bcm2835.h
deleted file mode 100644
index aa937f6c17da..000000000000
--- a/include/linux/clk/bcm2835.h
+++ /dev/null
@@ -1,24 +0,0 @@
-/*
- * Copyright (C) 2010 Broadcom
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
-
-#ifndef __LINUX_CLK_BCM2835_H_
-#define __LINUX_CLK_BCM2835_H_
-
-void __init bcm2835_init_clocks(void);
-
-#endif
diff --git a/include/linux/clk/clk-conf.h b/include/linux/clk/clk-conf.h
index e0c362363c38..eae9652c70cd 100644
--- a/include/linux/clk/clk-conf.h
+++ b/include/linux/clk/clk-conf.h
@@ -1,12 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) 2014 Samsung Electronics Co., Ltd.
* Sylwester Nawrocki <s.nawrocki@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
+#ifndef __CLK_CONF_H
+#define __CLK_CONF_H
+
#include <linux/types.h>
struct device_node;
@@ -20,3 +20,5 @@ static inline int of_clk_set_defaults(struct device_node *node,
return 0;
}
#endif
+
+#endif /* __CLK_CONF_H */
diff --git a/include/linux/clk/davinci.h b/include/linux/clk/davinci.h
new file mode 100644
index 000000000000..787a81116b00
--- /dev/null
+++ b/include/linux/clk/davinci.h
@@ -0,0 +1,17 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Clock drivers for TI DaVinci PLL and PSC controllers
+ *
+ * Copyright (C) 2018 David Lechner <david@lechnology.com>
+ */
+
+#ifndef __LINUX_CLK_DAVINCI_PLL_H___
+#define __LINUX_CLK_DAVINCI_PLL_H___
+
+#include <linux/device.h>
+#include <linux/regmap.h>
+
+/* function for registering clocks in early boot */
+int da850_pll0_init(struct device *dev, void __iomem *base, struct regmap *cfgchip);
+
+#endif /* __LINUX_CLK_DAVINCI_PLL_H___ */
diff --git a/include/linux/clk/imx.h b/include/linux/clk/imx.h
new file mode 100644
index 000000000000..75a0d9696552
--- /dev/null
+++ b/include/linux/clk/imx.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2020 Freescale Semiconductor, Inc.
+ *
+ * Author: Lee Jones <lee.jones@linaro.org>
+ */
+
+#ifndef __LINUX_CLK_IMX_H
+#define __LINUX_CLK_IMX_H
+
+#include <linux/types.h>
+
+void imx6sl_set_wait_clk(bool enter);
+
+#endif
diff --git a/include/linux/clk/mmp.h b/include/linux/clk/mmp.h
deleted file mode 100644
index 607321fa2c2b..000000000000
--- a/include/linux/clk/mmp.h
+++ /dev/null
@@ -1,17 +0,0 @@
-#ifndef __CLK_MMP_H
-#define __CLK_MMP_H
-
-#include <linux/types.h>
-
-extern void pxa168_clk_init(phys_addr_t mpmu_phys,
- phys_addr_t apmu_phys,
- phys_addr_t apbc_phys);
-extern void pxa910_clk_init(phys_addr_t mpmu_phys,
- phys_addr_t apmu_phys,
- phys_addr_t apbc_phys,
- phys_addr_t apbcp_phys);
-extern void mmp2_clk_init(phys_addr_t mpmu_phys,
- phys_addr_t apmu_phys,
- phys_addr_t apbc_phys);
-
-#endif
diff --git a/include/linux/clk/mxs.h b/include/linux/clk/mxs.h
index 5138a90e018c..2674e607ffb1 100644
--- a/include/linux/clk/mxs.h
+++ b/include/linux/clk/mxs.h
@@ -1,9 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2013 Freescale Semiconductor, Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef __LINUX_CLK_MXS_H
diff --git a/include/linux/clk/pxa.h b/include/linux/clk/pxa.h
new file mode 100644
index 000000000000..736b8bb91bd7
--- /dev/null
+++ b/include/linux/clk/pxa.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#include <linux/compiler.h>
+#include <linux/types.h>
+
+extern int pxa25x_clocks_init(void __iomem *regs);
+extern int pxa27x_clocks_init(void __iomem *regs);
+extern int pxa3xx_clocks_init(void __iomem *regs, void __iomem *oscc_reg);
+
+#ifdef CONFIG_PXA3xx
+extern unsigned pxa3xx_get_clk_frequency_khz(int);
+extern void pxa3xx_clk_update_accr(u32 disable, u32 enable, u32 xclkcfg, u32 mask);
+#else
+#define pxa3xx_get_clk_frequency_khz(x) (0)
+#define pxa3xx_clk_update_accr(disable, enable, xclkcfg, mask) do { } while (0)
+#endif
diff --git a/include/linux/clk/renesas.h b/include/linux/clk/renesas.h
index 9ebf1f8243bb..69d8159deee3 100644
--- a/include/linux/clk/renesas.h
+++ b/include/linux/clk/renesas.h
@@ -1,20 +1,18 @@
-/*
+/* SPDX-License-Identifier: GPL-2.0+
+ *
* Copyright 2013 Ideas On Board SPRL
* Copyright 2013, 2014 Horms Solutions Ltd.
*
* Contact: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
* Contact: Simon Horman <horms@verge.net.au>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
*/
#ifndef __LINUX_CLK_RENESAS_H_
#define __LINUX_CLK_RENESAS_H_
+#include <linux/clk-provider.h>
#include <linux/types.h>
+#include <linux/units.h>
struct device;
struct device_node;
@@ -36,4 +34,147 @@ void cpg_mssr_detach_dev(struct generic_pm_domain *unused, struct device *dev);
#define cpg_mssr_attach_dev NULL
#define cpg_mssr_detach_dev NULL
#endif
+
+/**
+ * struct rzv2h_pll_limits - PLL parameter constraints
+ *
+ * This structure defines the minimum and maximum allowed values for
+ * various parameters used to configure a PLL. These limits ensure
+ * the PLL operates within valid and stable ranges.
+ *
+ * @fout: Output frequency range (in MHz)
+ * @fout.min: Minimum allowed output frequency
+ * @fout.max: Maximum allowed output frequency
+ *
+ * @fvco: PLL oscillation frequency range (in MHz)
+ * @fvco.min: Minimum allowed VCO frequency
+ * @fvco.max: Maximum allowed VCO frequency
+ *
+ * @m: Main-divider range
+ * @m.min: Minimum main-divider value
+ * @m.max: Maximum main-divider value
+ *
+ * @p: Pre-divider range
+ * @p.min: Minimum pre-divider value
+ * @p.max: Maximum pre-divider value
+ *
+ * @s: Divider range
+ * @s.min: Minimum divider value
+ * @s.max: Maximum divider value
+ *
+ * @k: Delta-sigma modulator range (signed)
+ * @k.min: Minimum delta-sigma value
+ * @k.max: Maximum delta-sigma value
+ */
+struct rzv2h_pll_limits {
+ struct {
+ u32 min;
+ u32 max;
+ } fout;
+
+ struct {
+ u32 min;
+ u32 max;
+ } fvco;
+
+ struct {
+ u16 min;
+ u16 max;
+ } m;
+
+ struct {
+ u8 min;
+ u8 max;
+ } p;
+
+ struct {
+ u8 min;
+ u8 max;
+ } s;
+
+ struct {
+ s16 min;
+ s16 max;
+ } k;
+};
+
+/**
+ * struct rzv2h_pll_pars - PLL configuration parameters
+ *
+ * This structure contains the configuration parameters for the
+ * Phase-Locked Loop (PLL), used to achieve a specific output frequency.
+ *
+ * @m: Main divider value
+ * @p: Pre-divider value
+ * @s: Output divider value
+ * @k: Delta-sigma modulation value
+ * @freq_millihz: Calculated PLL output frequency in millihertz
+ * @error_millihz: Frequency error from target in millihertz (signed)
+ */
+struct rzv2h_pll_pars {
+ u16 m;
+ u8 p;
+ u8 s;
+ s16 k;
+ u64 freq_millihz;
+ s64 error_millihz;
+};
+
+/**
+ * struct rzv2h_pll_div_pars - PLL parameters with post-divider
+ *
+ * This structure is used for PLLs that include an additional post-divider
+ * stage after the main PLL block. It contains both the PLL configuration
+ * parameters and the resulting frequency/error values after the divider.
+ *
+ * @pll: Main PLL configuration parameters (see struct rzv2h_pll_pars)
+ *
+ * @div: Post-divider configuration and result
+ * @div.divider_value: Divider applied to the PLL output
+ * @div.freq_millihz: Output frequency after divider in millihertz
+ * @div.error_millihz: Frequency error from target in millihertz (signed)
+ */
+struct rzv2h_pll_div_pars {
+ struct rzv2h_pll_pars pll;
+ struct {
+ u8 divider_value;
+ u64 freq_millihz;
+ s64 error_millihz;
+ } div;
+};
+
+#define RZV2H_CPG_PLL_DSI_LIMITS(name) \
+ static const struct rzv2h_pll_limits (name) = { \
+ .fout = { .min = 25 * MEGA, .max = 375 * MEGA }, \
+ .fvco = { .min = 1600 * MEGA, .max = 3200 * MEGA }, \
+ .m = { .min = 64, .max = 533 }, \
+ .p = { .min = 1, .max = 4 }, \
+ .s = { .min = 0, .max = 6 }, \
+ .k = { .min = -32768, .max = 32767 }, \
+ } \
+
+#ifdef CONFIG_CLK_RZV2H
+bool rzv2h_get_pll_pars(const struct rzv2h_pll_limits *limits,
+ struct rzv2h_pll_pars *pars, u64 freq_millihz);
+
+bool rzv2h_get_pll_divs_pars(const struct rzv2h_pll_limits *limits,
+ struct rzv2h_pll_div_pars *pars,
+ const u8 *table, u8 table_size, u64 freq_millihz);
+#else
+static inline bool rzv2h_get_pll_pars(const struct rzv2h_pll_limits *limits,
+ struct rzv2h_pll_pars *pars,
+ u64 freq_millihz)
+{
+ return false;
+}
+
+static inline bool rzv2h_get_pll_divs_pars(const struct rzv2h_pll_limits *limits,
+ struct rzv2h_pll_div_pars *pars,
+ const u8 *table, u8 table_size,
+ u64 freq_millihz)
+{
+ return false;
+}
+#endif
+
#endif
diff --git a/include/linux/clk/samsung.h b/include/linux/clk/samsung.h
new file mode 100644
index 000000000000..0cf7aac83439
--- /dev/null
+++ b/include/linux/clk/samsung.h
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2020 Krzysztof Kozlowski <krzk@kernel.org>
+ */
+
+#ifndef __LINUX_CLK_SAMSUNG_H_
+#define __LINUX_CLK_SAMSUNG_H_
+
+#include <linux/compiler_types.h>
+
+struct device_node;
+
+#ifdef CONFIG_S3C64XX_COMMON_CLK
+void s3c64xx_clk_init(struct device_node *np, unsigned long xtal_f,
+ unsigned long xusbxti_f, bool s3c6400,
+ void __iomem *base);
+#else
+static inline void s3c64xx_clk_init(struct device_node *np,
+ unsigned long xtal_f,
+ unsigned long xusbxti_f,
+ bool s3c6400, void __iomem *base) { }
+#endif /* CONFIG_S3C64XX_COMMON_CLK */
+
+#endif /* __LINUX_CLK_SAMSUNG_H_ */
diff --git a/include/linux/clk/spear.h b/include/linux/clk/spear.h
new file mode 100644
index 000000000000..eaf95ca656f8
--- /dev/null
+++ b/include/linux/clk/spear.h
@@ -0,0 +1,37 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (C) 2020 STMicroelectronics - All Rights Reserved
+ *
+ * Author: Lee Jones <lee.jones@linaro.org>
+ */
+
+#ifndef __LINUX_CLK_SPEAR_H
+#define __LINUX_CLK_SPEAR_H
+
+#ifdef CONFIG_ARCH_SPEAR3XX
+void __init spear3xx_clk_init(void __iomem *misc_base,
+ void __iomem *soc_config_base);
+#else
+static inline void __init spear3xx_clk_init(void __iomem *misc_base,
+ void __iomem *soc_config_base) {}
+#endif
+
+#ifdef CONFIG_ARCH_SPEAR6XX
+void __init spear6xx_clk_init(void __iomem *misc_base);
+#else
+static inline void __init spear6xx_clk_init(void __iomem *misc_base) {}
+#endif
+
+#ifdef CONFIG_MACH_SPEAR1310
+void __init spear1310_clk_init(void __iomem *misc_base, void __iomem *ras_base);
+#else
+static inline void spear1310_clk_init(void __iomem *misc_base, void __iomem *ras_base) {}
+#endif
+
+#ifdef CONFIG_MACH_SPEAR1340
+void __init spear1340_clk_init(void __iomem *misc_base);
+#else
+static inline void spear1340_clk_init(void __iomem *misc_base) {}
+#endif
+
+#endif
diff --git a/include/linux/clk/sunxi-ng.h b/include/linux/clk/sunxi-ng.h
new file mode 100644
index 000000000000..57c8ec44ab4e
--- /dev/null
+++ b/include/linux/clk/sunxi-ng.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2017 Chen-Yu Tsai. All rights reserved.
+ */
+
+#ifndef _LINUX_CLK_SUNXI_NG_H_
+#define _LINUX_CLK_SUNXI_NG_H_
+
+int sunxi_ccu_set_mmc_timing_mode(struct clk *clk, bool new_mode);
+int sunxi_ccu_get_mmc_timing_mode(struct clk *clk);
+
+int sun6i_rtc_ccu_probe(struct device *dev, void __iomem *reg);
+
+#endif
diff --git a/include/linux/clk/tegra.h b/include/linux/clk/tegra.h
index d23c9cf26993..3650e926e93f 100644
--- a/include/linux/clk/tegra.h
+++ b/include/linux/clk/tegra.h
@@ -1,17 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
- * Copyright (c) 2012, NVIDIA CORPORATION. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ * Copyright (c) 2012-2020, NVIDIA CORPORATION. All rights reserved.
*/
#ifndef __LINUX_CLK_TEGRA_H_
@@ -53,6 +42,7 @@ struct tegra_cpu_car_ops {
#endif
};
+#ifdef CONFIG_ARCH_TEGRA
extern struct tegra_cpu_car_ops *tegra_cpu_car_ops;
static inline void tegra_wait_cpu_in_reset(u32 cpu)
@@ -94,8 +84,29 @@ static inline void tegra_disable_cpu_clock(u32 cpu)
tegra_cpu_car_ops->disable_clock(cpu);
}
+#else
+static inline void tegra_wait_cpu_in_reset(u32 cpu)
+{
+}
-#ifdef CONFIG_PM_SLEEP
+static inline void tegra_put_cpu_in_reset(u32 cpu)
+{
+}
+
+static inline void tegra_cpu_out_of_reset(u32 cpu)
+{
+}
+
+static inline void tegra_enable_cpu_clock(u32 cpu)
+{
+}
+
+static inline void tegra_disable_cpu_clock(u32 cpu)
+{
+}
+#endif
+
+#if defined(CONFIG_ARCH_TEGRA) && defined(CONFIG_PM_SLEEP)
static inline bool tegra_cpu_rail_off_ready(void)
{
if (WARN_ON(!tegra_cpu_car_ops->rail_off_ready))
@@ -119,14 +130,134 @@ static inline void tegra_cpu_clock_resume(void)
tegra_cpu_car_ops->resume();
}
+#else
+static inline bool tegra_cpu_rail_off_ready(void)
+{
+ return false;
+}
+
+static inline void tegra_cpu_clock_suspend(void)
+{
+}
+
+static inline void tegra_cpu_clock_resume(void)
+{
+}
#endif
-extern void tegra210_xusb_pll_hw_control_enable(void);
-extern void tegra210_xusb_pll_hw_sequence_start(void);
-extern void tegra210_sata_pll_hw_control_enable(void);
-extern void tegra210_sata_pll_hw_sequence_start(void);
-extern void tegra210_set_sata_pll_seq_sw(bool state);
-extern void tegra210_put_utmipll_in_iddq(void);
-extern void tegra210_put_utmipll_out_iddq(void);
+struct clk;
+struct tegra_emc;
+
+typedef long (tegra20_clk_emc_round_cb)(unsigned long rate,
+ unsigned long min_rate,
+ unsigned long max_rate,
+ void *arg);
+typedef int (tegra124_emc_prepare_timing_change_cb)(struct tegra_emc *emc,
+ unsigned long rate);
+typedef void (tegra124_emc_complete_timing_change_cb)(struct tegra_emc *emc,
+ unsigned long rate);
+
+struct tegra210_clk_emc_config {
+ unsigned long rate;
+ bool same_freq;
+ u32 value;
+
+ unsigned long parent_rate;
+ u8 parent;
+};
+
+struct tegra210_clk_emc_provider {
+ struct module *owner;
+ struct device *dev;
+
+ struct tegra210_clk_emc_config *configs;
+ unsigned int num_configs;
+
+ int (*set_rate)(struct device *dev,
+ const struct tegra210_clk_emc_config *config);
+};
+
+#if defined(CONFIG_ARCH_TEGRA_2x_SOC) || defined(CONFIG_ARCH_TEGRA_3x_SOC)
+void tegra20_clk_set_emc_round_callback(tegra20_clk_emc_round_cb *round_cb,
+ void *cb_arg);
+int tegra20_clk_prepare_emc_mc_same_freq(struct clk *emc_clk, bool same);
+#else
+static inline void
+tegra20_clk_set_emc_round_callback(tegra20_clk_emc_round_cb *round_cb,
+ void *cb_arg)
+{
+}
+
+static inline int
+tegra20_clk_prepare_emc_mc_same_freq(struct clk *emc_clk, bool same)
+{
+ return 0;
+}
+#endif
+
+#ifdef CONFIG_TEGRA124_CLK_EMC
+void tegra124_clk_set_emc_callbacks(tegra124_emc_prepare_timing_change_cb *prep_cb,
+ tegra124_emc_complete_timing_change_cb *complete_cb);
+#else
+static inline void
+tegra124_clk_set_emc_callbacks(tegra124_emc_prepare_timing_change_cb *prep_cb,
+ tegra124_emc_complete_timing_change_cb *complete_cb)
+{
+}
+#endif
+
+#ifdef CONFIG_ARCH_TEGRA_210_SOC
+int tegra210_plle_hw_sequence_start(void);
+bool tegra210_plle_hw_sequence_is_enabled(void);
+void tegra210_xusb_pll_hw_control_enable(void);
+void tegra210_xusb_pll_hw_sequence_start(void);
+void tegra210_sata_pll_hw_control_enable(void);
+void tegra210_sata_pll_hw_sequence_start(void);
+void tegra210_set_sata_pll_seq_sw(bool state);
+void tegra210_put_utmipll_in_iddq(void);
+void tegra210_put_utmipll_out_iddq(void);
+int tegra210_clk_handle_mbist_war(unsigned int id);
+void tegra210_clk_emc_dll_enable(bool flag);
+void tegra210_clk_emc_dll_update_setting(u32 emc_dll_src_value);
+void tegra210_clk_emc_update_setting(u32 emc_src_value);
+
+int tegra210_clk_emc_attach(struct clk *clk,
+ struct tegra210_clk_emc_provider *provider);
+void tegra210_clk_emc_detach(struct clk *clk);
+#else
+static inline int tegra210_plle_hw_sequence_start(void)
+{
+ return 0;
+}
+
+static inline bool tegra210_plle_hw_sequence_is_enabled(void)
+{
+ return false;
+}
+
+static inline int tegra210_clk_handle_mbist_war(unsigned int id)
+{
+ return 0;
+}
+
+static inline int
+tegra210_clk_emc_attach(struct clk *clk,
+ struct tegra210_clk_emc_provider *provider)
+{
+ return 0;
+}
+
+static inline void tegra210_xusb_pll_hw_control_enable(void) {}
+static inline void tegra210_xusb_pll_hw_sequence_start(void) {}
+static inline void tegra210_sata_pll_hw_control_enable(void) {}
+static inline void tegra210_sata_pll_hw_sequence_start(void) {}
+static inline void tegra210_set_sata_pll_seq_sw(bool state) {}
+static inline void tegra210_put_utmipll_in_iddq(void) {}
+static inline void tegra210_put_utmipll_out_iddq(void) {}
+static inline void tegra210_clk_emc_dll_enable(bool flag) {}
+static inline void tegra210_clk_emc_dll_update_setting(u32 emc_dll_src_value) {}
+static inline void tegra210_clk_emc_update_setting(u32 emc_src_value) {}
+static inline void tegra210_clk_emc_detach(struct clk *clk) {}
+#endif
#endif /* __LINUX_CLK_TEGRA_H_ */
diff --git a/include/linux/clk/ti.h b/include/linux/clk/ti.h
index d18da839b810..54a3fa370004 100644
--- a/include/linux/clk/ti.h
+++ b/include/linux/clk/ti.h
@@ -1,16 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* TI clock drivers support
*
* Copyright (C) 2013 Texas Instruments, Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether express or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#ifndef __LINUX_CLK_TI_H__
#define __LINUX_CLK_TI_H__
@@ -21,11 +13,14 @@
/**
* struct clk_omap_reg - OMAP register declaration
* @offset: offset from the master IP module base address
+ * @bit: register bit offset
* @index: index of the master IP module
+ * @flags: flags
*/
struct clk_omap_reg {
void __iomem *ptr;
u16 offset;
+ u8 bit;
u8 index;
u8 flags;
};
@@ -39,14 +34,14 @@ struct clk_omap_reg {
* @clk_ref: struct clk_hw pointer to the clock's reference clock input
* @control_reg: register containing the DPLL mode bitfield
* @enable_mask: mask of the DPLL mode bitfield in @control_reg
- * @last_rounded_rate: cache of the last rate result of omap2_dpll_round_rate()
- * @last_rounded_m: cache of the last M result of omap2_dpll_round_rate()
+ * @last_rounded_rate: cache of the last rate result of omap2_dpll_determine_rate()
+ * @last_rounded_m: cache of the last M result of omap2_dpll_determine_rate()
* @last_rounded_m4xen: cache of the last M4X result of
- * omap4_dpll_regm4xen_round_rate()
+ * omap4_dpll_regm4xen_determine_rate()
* @last_rounded_lpmode: cache of the last lpmode result of
* omap4_dpll_lpmode_recalc()
* @max_multiplier: maximum valid non-bypass multiplier value (actual)
- * @last_rounded_n: cache of the last N result of omap2_dpll_round_rate()
+ * @last_rounded_n: cache of the last N result of omap2_dpll_determine_rate()
* @min_divider: minimum valid non-bypass divider value (actual)
* @max_divider: maximum valid non-bypass divider value (actual)
* @max_rate: maximum clock rate for the DPLL
@@ -63,6 +58,17 @@ struct clk_omap_reg {
* @auto_recal_bit: bitshift of the driftguard enable bit in @control_reg
* @recal_en_bit: bitshift of the PRM_IRQENABLE_* bit for recalibration IRQs
* @recal_st_bit: bitshift of the PRM_IRQSTATUS_* bit for recalibration IRQs
+ * @ssc_deltam_reg: register containing the DPLL SSC frequency spreading
+ * @ssc_modfreq_reg: register containing the DPLL SSC modulation frequency
+ * @ssc_modfreq_mant_mask: mask of the mantissa component in @ssc_modfreq_reg
+ * @ssc_modfreq_exp_mask: mask of the exponent component in @ssc_modfreq_reg
+ * @ssc_enable_mask: mask of the DPLL SSC enable bit in @control_reg
+ * @ssc_downspread_mask: mask of the DPLL SSC low frequency only bit in
+ * @control_reg
+ * @ssc_modfreq: the DPLL SSC frequency modulation in kHz
+ * @ssc_deltam: the DPLL SSC frequency spreading in permille (10th of percent)
+ * @ssc_downspread: require the only low frequency spread of the DPLL in SSC
+ * mode
* @flags: DPLL type/features (see below)
*
* Possible values for @flags:
@@ -110,6 +116,17 @@ struct dpll_data {
u8 auto_recal_bit;
u8 recal_en_bit;
u8 recal_st_bit;
+ struct clk_omap_reg ssc_deltam_reg;
+ struct clk_omap_reg ssc_modfreq_reg;
+ u32 ssc_deltam_int_mask;
+ u32 ssc_deltam_frac_mask;
+ u32 ssc_modfreq_mant_mask;
+ u32 ssc_modfreq_exp_mask;
+ u32 ssc_enable_mask;
+ u32 ssc_downspread_mask;
+ u32 ssc_modfreq;
+ u32 ssc_deltam;
+ bool ssc_downspread;
u8 flags;
};
@@ -153,12 +170,14 @@ struct clk_hw_omap {
u8 fixed_div;
struct clk_omap_reg enable_reg;
u8 enable_bit;
- u8 flags;
+ unsigned long flags;
struct clk_omap_reg clksel_reg;
struct dpll_data *dpll_data;
const char *clkdm_name;
struct clockdomain *clkdm;
const struct clk_hw_omap_ops *ops;
+ u32 context;
+ int autoidle_count;
};
/*
@@ -203,6 +222,7 @@ enum {
TI_CLKM_PRM,
TI_CLKM_SCRM,
TI_CLKM_CTRL,
+ TI_CLKM_CTRL_AUX,
TI_CLKM_PLLSS,
CLK_MAX_MEMMAPS
};
@@ -211,6 +231,7 @@ enum {
* struct ti_clk_ll_ops - low-level ops for clocks
* @clk_readl: pointer to register read function
* @clk_writel: pointer to register write function
+ * @clk_rmw: pointer to register read-modify-write function
* @clkdm_clk_enable: pointer to clockdomain enable function
* @clkdm_clk_disable: pointer to clockdomain disable function
* @clkdm_lookup: pointer to clockdomain lookup function
@@ -226,6 +247,7 @@ enum {
struct ti_clk_ll_ops {
u32 (*clk_readl)(const struct clk_omap_reg *reg);
void (*clk_writel)(u32 val, const struct clk_omap_reg *reg);
+ void (*clk_rmw)(u32 val, u32 mask, const struct clk_omap_reg *reg);
int (*clkdm_clk_enable)(struct clockdomain *clkdm, struct clk *clk);
int (*clkdm_clk_disable)(struct clockdomain *clkdm,
struct clk *clk);
@@ -238,6 +260,7 @@ struct ti_clk_ll_ops {
#define to_clk_hw_omap(_hw) container_of(_hw, struct clk_hw_omap, hw)
+bool omap2_clk_is_hw_omap(struct clk_hw *hw);
int omap2_clk_disable_autoidle_all(void);
int omap2_clk_enable_autoidle_all(void);
int omap2_clk_allow_idle(struct clk *clk);
@@ -287,9 +310,17 @@ struct ti_clk_features {
#define TI_CLK_DPLL4_DENY_REPROGRAM BIT(1)
#define TI_CLK_DISABLE_CLKDM_CONTROL BIT(2)
#define TI_CLK_ERRATA_I810 BIT(3)
+#define TI_CLK_CLKCTRL_COMPAT BIT(4)
+#define TI_CLK_DEVICE_TYPE_GP BIT(5)
void ti_clk_setup_features(struct ti_clk_features *features);
const struct ti_clk_features *ti_clk_get_features(void);
+bool ti_clk_is_in_standby(struct clk *clk);
+int omap3_noncore_dpll_save_context(struct clk_hw *hw);
+void omap3_noncore_dpll_restore_context(struct clk_hw *hw);
+
+int omap3_core_dpll_save_context(struct clk_hw *hw);
+void omap3_core_dpll_restore_context(struct clk_hw *hw);
extern const struct clk_hw_omap_ops clkhwops_omap2xxx_dpll;
diff --git a/include/linux/clk/zynq.h b/include/linux/clk/zynq.h
index 7a5633b71533..a198dd9255a4 100644
--- a/include/linux/clk/zynq.h
+++ b/include/linux/clk/zynq.h
@@ -1,20 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* Copyright (C) 2013 Xilinx Inc.
* Copyright (C) 2012 National Instruments
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#ifndef __LINUX_CLK_ZYNQ_H_
diff --git a/include/linux/clkdev.h b/include/linux/clkdev.h
index 2eabc862abdb..45570bc21a43 100644
--- a/include/linux/clkdev.h
+++ b/include/linux/clkdev.h
@@ -1,18 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* include/linux/clkdev.h
*
* Copyright (C) 2008 Russell King.
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
* Helper for the clk API to assist looking up a struct clk.
*/
#ifndef __CLKDEV_H
#define __CLKDEV_H
-#include <asm/clkdev.h>
+#include <linux/slab.h>
struct clk;
struct clk_hw;
@@ -33,11 +30,6 @@ struct clk_lookup {
.clk = c, \
}
-struct clk_lookup *clkdev_alloc(struct clk *clk, const char *con_id,
- const char *dev_fmt, ...) __printf(3, 4);
-struct clk_lookup *clkdev_hw_alloc(struct clk_hw *hw, const char *con_id,
- const char *dev_fmt, ...) __printf(3, 4);
-
void clkdev_add(struct clk_lookup *cl);
void clkdev_drop(struct clk_lookup *cl);
@@ -52,9 +44,6 @@ int clk_add_alias(const char *, const char *, const char *, struct device *);
int clk_register_clkdev(struct clk *, const char *, const char *);
int clk_hw_register_clkdev(struct clk_hw *, const char *, const char *);
-#ifdef CONFIG_COMMON_CLK
-int __clk_get(struct clk *clk);
-void __clk_put(struct clk *clk);
-#endif
-
+int devm_clk_hw_register_clkdev(struct device *dev, struct clk_hw *hw,
+ const char *con_id, const char *dev_id);
#endif
diff --git a/include/linux/clock_cooling.h b/include/linux/clock_cooling.h
deleted file mode 100644
index 4d1019d56f7f..000000000000
--- a/include/linux/clock_cooling.h
+++ /dev/null
@@ -1,65 +0,0 @@
-/*
- * linux/include/linux/clock_cooling.h
- *
- * Copyright (C) 2014 Eduardo Valentin <edubezval@gmail.com>
- *
- * Copyright (C) 2013 Texas Instruments Inc.
- * Contact: Eduardo Valentin <eduardo.valentin@ti.com>
- *
- * Highly based on cpu_cooling.c.
- * Copyright (C) 2012 Samsung Electronics Co., Ltd(http://www.samsung.com)
- * Copyright (C) 2012 Amit Daniel <amit.kachhap@linaro.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- */
-
-#ifndef __CPU_COOLING_H__
-#define __CPU_COOLING_H__
-
-#include <linux/of.h>
-#include <linux/thermal.h>
-#include <linux/cpumask.h>
-
-#ifdef CONFIG_CLOCK_THERMAL
-/**
- * clock_cooling_register - function to create clock cooling device.
- * @dev: struct device pointer to the device used as clock cooling device.
- * @clock_name: string containing the clock used as cooling mechanism.
- */
-struct thermal_cooling_device *
-clock_cooling_register(struct device *dev, const char *clock_name);
-
-/**
- * clock_cooling_unregister - function to remove clock cooling device.
- * @cdev: thermal cooling device pointer.
- */
-void clock_cooling_unregister(struct thermal_cooling_device *cdev);
-
-unsigned long clock_cooling_get_level(struct thermal_cooling_device *cdev,
- unsigned long freq);
-#else /* !CONFIG_CLOCK_THERMAL */
-static inline struct thermal_cooling_device *
-clock_cooling_register(struct device *dev, const char *clock_name)
-{
- return NULL;
-}
-static inline
-void clock_cooling_unregister(struct thermal_cooling_device *cdev)
-{
-}
-static inline
-unsigned long clock_cooling_get_level(struct thermal_cooling_device *cdev,
- unsigned long freq)
-{
- return THERMAL_CSTATE_INVALID;
-}
-#endif /* CONFIG_CLOCK_THERMAL */
-
-#endif /* __CPU_COOLING_H__ */
diff --git a/include/linux/clockchips.h b/include/linux/clockchips.h
index a116926598fd..b0df28ddd394 100644
--- a/include/linux/clockchips.h
+++ b/include/linux/clockchips.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/* linux/include/linux/clockchips.h
*
* This file contains the structure definitions for clockchips.
@@ -11,7 +12,7 @@
#ifdef CONFIG_GENERIC_CLOCKEVENTS
# include <linux/clocksource.h>
-# include <linux/cpumask.h>
+# include <linux/cpumask_types.h>
# include <linux/ktime.h>
# include <linux/notifier.h>
@@ -210,7 +211,7 @@ extern int tick_receive_broadcast(void);
extern void tick_setup_hrtimer_broadcast(void);
extern int tick_check_broadcast_expired(void);
# else
-static inline int tick_check_broadcast_expired(void) { return 0; }
+static __always_inline int tick_check_broadcast_expired(void) { return 0; }
static inline void tick_setup_hrtimer_broadcast(void) { }
# endif
@@ -218,7 +219,7 @@ static inline void tick_setup_hrtimer_broadcast(void) { }
static inline void clockevents_suspend(void) { }
static inline void clockevents_resume(void) { }
-static inline int tick_check_broadcast_expired(void) { return 0; }
+static __always_inline int tick_check_broadcast_expired(void) { return 0; }
static inline void tick_setup_hrtimer_broadcast(void) { }
#endif /* !CONFIG_GENERIC_CLOCKEVENTS */
diff --git a/include/linux/clocksource.h b/include/linux/clocksource.h
index a78cb1848e65..65b7c41471c3 100644
--- a/include/linux/clocksource.h
+++ b/include/linux/clocksource.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/* linux/include/linux/clocksource.h
*
* This file contains the structure definitions for clocksources.
@@ -16,24 +17,43 @@
#include <linux/timer.h>
#include <linux/init.h>
#include <linux/of.h>
+#include <linux/clocksource_ids.h>
#include <asm/div64.h>
#include <asm/io.h>
+struct clocksource_base;
struct clocksource;
struct module;
-#ifdef CONFIG_ARCH_CLOCKSOURCE_DATA
+#if defined(CONFIG_ARCH_CLOCKSOURCE_DATA) || \
+ defined(CONFIG_GENERIC_GETTIMEOFDAY)
#include <asm/clocksource.h>
#endif
+#include <vdso/clocksource.h>
+
/**
* struct clocksource - hardware abstraction for a free running counter
* Provides mostly state-free accessors to the underlying hardware.
* This is the structure used for system time.
*
- * @name: ptr to clocksource name
- * @list: list head for registration
- * @rating: rating value for selection (higher is better)
+ * @read: Returns a cycle value, passes clocksource as argument
+ * @mask: Bitmask for two's complement
+ * subtraction of non 64 bit counters
+ * @mult: Cycle to nanosecond multiplier
+ * @shift: Cycle to nanosecond divisor (power of two)
+ * @max_idle_ns: Maximum idle time permitted by the clocksource (nsecs)
+ * @maxadj: Maximum adjustment value to mult (~11%)
+ * @uncertainty_margin: Maximum uncertainty in nanoseconds per half second.
+ * Zero says to use default WATCHDOG_THRESHOLD.
+ * @archdata: Optional arch-specific data
+ * @max_cycles: Maximum safe cycle value which won't overflow on
+ * multiplication
+ * @max_raw_delta: Maximum safe delta value for negative motion detection
+ * @name: Pointer to clocksource name
+ * @list: List head for registration (internal)
+ * @freq_khz: Clocksource frequency in khz.
+ * @rating: Rating value for selection (higher is better)
* To avoid rating inflation the following
* list should give you a guide as to how
* to assign your clocksource a rating
@@ -48,27 +68,29 @@ struct module;
* 400-499: Perfect
* The ideal clocksource. A must-use where
* available.
- * @read: returns a cycle value, passes clocksource as argument
- * @enable: optional function to enable the clocksource
- * @disable: optional function to disable the clocksource
- * @mask: bitmask for two's complement
- * subtraction of non 64 bit counters
- * @mult: cycle to nanosecond multiplier
- * @shift: cycle to nanosecond divisor (power of two)
- * @max_idle_ns: max idle time permitted by the clocksource (nsecs)
- * @maxadj: maximum adjustment value to mult (~11%)
- * @max_cycles: maximum safe cycle value which won't overflow on multiplication
- * @flags: flags describing special properties
- * @archdata: arch-specific data
- * @suspend: suspend function for the clocksource, if necessary
- * @resume: resume function for the clocksource, if necessary
+ * @id: Defaults to CSID_GENERIC. The id value is captured
+ * in certain snapshot functions to allow callers to
+ * validate the clocksource from which the snapshot was
+ * taken.
+ * @flags: Flags describing special properties
+ * @base: Hardware abstraction for clock on which a clocksource
+ * is based
+ * @enable: Optional function to enable the clocksource
+ * @disable: Optional function to disable the clocksource
+ * @suspend: Optional suspend function for the clocksource
+ * @resume: Optional resume function for the clocksource
* @mark_unstable: Optional function to inform the clocksource driver that
* the watchdog marked the clocksource unstable
- * @owner: module reference, must be set by clocksource in modules
+ * @tick_stable: Optional function called periodically from the watchdog
+ * code to provide stable synchronization points
+ * @wd_list: List head to enqueue into the watchdog list (internal)
+ * @cs_last: Last clocksource value for clocksource watchdog
+ * @wd_last: Last watchdog value corresponding to @cs_last
+ * @owner: Module reference, must be set by clocksource in modules
*
* Note: This struct is not used in hotpathes of the timekeeping code
* because the timekeeper caches the hot path fields in its own data
- * structure, so no line cache alignment is required,
+ * structure, so no cache line alignment is required,
*
* The pointer to the clocksource itself is handed to the read
* callback. If you need extra information there you can wrap struct
@@ -77,35 +99,42 @@ struct module;
* structure.
*/
struct clocksource {
- u64 (*read)(struct clocksource *cs);
- u64 mask;
- u32 mult;
- u32 shift;
- u64 max_idle_ns;
- u32 maxadj;
+ u64 (*read)(struct clocksource *cs);
+ u64 mask;
+ u32 mult;
+ u32 shift;
+ u64 max_idle_ns;
+ u32 maxadj;
+ u32 uncertainty_margin;
#ifdef CONFIG_ARCH_CLOCKSOURCE_DATA
struct arch_clocksource_data archdata;
#endif
- u64 max_cycles;
- const char *name;
- struct list_head list;
- int rating;
- int (*enable)(struct clocksource *cs);
- void (*disable)(struct clocksource *cs);
- unsigned long flags;
- void (*suspend)(struct clocksource *cs);
- void (*resume)(struct clocksource *cs);
- void (*mark_unstable)(struct clocksource *cs);
- void (*tick_stable)(struct clocksource *cs);
+ u64 max_cycles;
+ u64 max_raw_delta;
+ const char *name;
+ struct list_head list;
+ u32 freq_khz;
+ int rating;
+ enum clocksource_ids id;
+ enum vdso_clock_mode vdso_clock_mode;
+ unsigned long flags;
+ struct clocksource_base *base;
+
+ int (*enable)(struct clocksource *cs);
+ void (*disable)(struct clocksource *cs);
+ void (*suspend)(struct clocksource *cs);
+ void (*resume)(struct clocksource *cs);
+ void (*mark_unstable)(struct clocksource *cs);
+ void (*tick_stable)(struct clocksource *cs);
/* private: */
#ifdef CONFIG_CLOCKSOURCE_WATCHDOG
/* Watchdog related data, used by the framework */
- struct list_head wd_list;
- u64 cs_last;
- u64 wd_last;
+ struct list_head wd_list;
+ u64 cs_last;
+ u64 wd_last;
#endif
- struct module *owner;
+ struct module *owner;
};
/*
@@ -119,7 +148,7 @@ struct clocksource {
#define CLOCK_SOURCE_UNSTABLE 0x40
#define CLOCK_SOURCE_SUSPEND_NONSTOP 0x80
#define CLOCK_SOURCE_RESELECT 0x100
-
+#define CLOCK_SOURCE_VERIFY_PERCPU 0x200
/* simplify initialization of mask field */
#define CLOCKSOURCE_MASK(bits) GENMASK_ULL((bits) - 1, 0)
@@ -188,11 +217,13 @@ static inline s64 clocksource_cyc2ns(u64 cycles, u32 mult, u32 shift)
extern int clocksource_unregister(struct clocksource*);
extern void clocksource_touch_watchdog(void);
-extern void clocksource_change_rating(struct clocksource *cs, int rating);
extern void clocksource_suspend(void);
extern void clocksource_resume(void);
extern struct clocksource * __init clocksource_default_clock(void);
extern void clocksource_mark_unstable(struct clocksource *cs);
+extern void
+clocksource_start_suspend_timing(struct clocksource *cs, u64 start_cycles);
+extern u64 clocksource_stop_suspend_timing(struct clocksource *cs, u64 now);
extern u64
clocks_calc_max_nsecs(u32 mult, u32 shift, u32 maxadj, u64 mask, u64 *max_cycles);
@@ -237,6 +268,11 @@ static inline void __clocksource_update_freq_khz(struct clocksource *cs, u32 khz
__clocksource_update_freq_scale(cs, 1000, khz);
}
+#ifdef CONFIG_ARCH_CLOCKSOURCE_INIT
+extern void clocksource_arch_init(struct clocksource *cs);
+#else
+static inline void clocksource_arch_init(struct clocksource *cs) { }
+#endif
extern int timekeeping_notify(struct clocksource *clock);
@@ -253,9 +289,6 @@ extern int clocksource_i8253_init(void);
#define TIMER_OF_DECLARE(name, compat, fn) \
OF_DECLARE_1_RET(timer, name, compat, fn)
-#define CLOCKSOURCE_OF_DECLARE(name, compat, fn) \
- TIMER_OF_DECLARE(name, compat, fn)
-
#ifdef CONFIG_TIMER_PROBE
extern void timer_probe(void);
#else
@@ -265,4 +298,40 @@ static inline void timer_probe(void) {}
#define TIMER_ACPI_DECLARE(name, table_id, fn) \
ACPI_DECLARE_PROBE_ENTRY(timer, name, table_id, 0, NULL, 0, fn)
+static inline unsigned int clocksource_get_max_watchdog_retry(void)
+{
+ /*
+ * When system is in the boot phase or under heavy workload, there
+ * can be random big latencies during the clocksource/watchdog
+ * read, so allow retries to filter the noise latency. As the
+ * latency's frequency and maximum value goes up with the number of
+ * CPUs, scale the number of retries with the number of online
+ * CPUs.
+ */
+ return (ilog2(num_online_cpus()) / 2) + 1;
+}
+
+void clocksource_verify_percpu(struct clocksource *cs);
+
+/**
+ * struct clocksource_base - hardware abstraction for clock on which a clocksource
+ * is based
+ * @id: Defaults to CSID_GENERIC. The id value is used for conversion
+ * functions which require that the current clocksource is based
+ * on a clocksource_base with a particular ID in certain snapshot
+ * functions to allow callers to validate the clocksource from
+ * which the snapshot was taken.
+ * @freq_khz: Nominal frequency of the base clock in kHz
+ * @offset: Offset between the base clock and the clocksource
+ * @numerator: Numerator of the clock ratio between base clock and the clocksource
+ * @denominator: Denominator of the clock ratio between base clock and the clocksource
+ */
+struct clocksource_base {
+ enum clocksource_ids id;
+ u32 freq_khz;
+ u64 offset;
+ u32 numerator;
+ u32 denominator;
+};
+
#endif /* _LINUX_CLOCKSOURCE_H */
diff --git a/include/linux/clocksource_ids.h b/include/linux/clocksource_ids.h
new file mode 100644
index 000000000000..c4ef4ae2eded
--- /dev/null
+++ b/include/linux/clocksource_ids.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_CLOCKSOURCE_IDS_H
+#define _LINUX_CLOCKSOURCE_IDS_H
+
+/* Enum to give clocksources a unique identifier */
+enum clocksource_ids {
+ CSID_GENERIC = 0,
+ CSID_ARM_ARCH_COUNTER,
+ CSID_S390_TOD,
+ CSID_X86_TSC_EARLY,
+ CSID_X86_TSC,
+ CSID_X86_KVM_CLK,
+ CSID_X86_ART,
+ CSID_MAX,
+};
+
+#endif
diff --git a/include/linux/closure.h b/include/linux/closure.h
new file mode 100644
index 000000000000..880fe85e35e9
--- /dev/null
+++ b/include/linux/closure.h
@@ -0,0 +1,492 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_CLOSURE_H
+#define _LINUX_CLOSURE_H
+
+#include <linux/llist.h>
+#include <linux/sched.h>
+#include <linux/sched/task_stack.h>
+#include <linux/workqueue.h>
+
+/*
+ * Closure is perhaps the most overused and abused term in computer science, but
+ * since I've been unable to come up with anything better you're stuck with it
+ * again.
+ *
+ * What are closures?
+ *
+ * They embed a refcount. The basic idea is they count "things that are in
+ * progress" - in flight bios, some other thread that's doing something else -
+ * anything you might want to wait on.
+ *
+ * The refcount may be manipulated with closure_get() and closure_put().
+ * closure_put() is where many of the interesting things happen, when it causes
+ * the refcount to go to 0.
+ *
+ * Closures can be used to wait on things both synchronously and asynchronously,
+ * and synchronous and asynchronous use can be mixed without restriction. To
+ * wait synchronously, use closure_sync() - you will sleep until your closure's
+ * refcount hits 1.
+ *
+ * To wait asynchronously, use
+ * continue_at(cl, next_function, workqueue);
+ *
+ * passing it, as you might expect, the function to run when nothing is pending
+ * and the workqueue to run that function out of.
+ *
+ * continue_at() also, critically, requires a 'return' immediately following the
+ * location where this macro is referenced, to return to the calling function.
+ * There's good reason for this.
+ *
+ * To use safely closures asynchronously, they must always have a refcount while
+ * they are running owned by the thread that is running them. Otherwise, suppose
+ * you submit some bios and wish to have a function run when they all complete:
+ *
+ * foo_endio(struct bio *bio)
+ * {
+ * closure_put(cl);
+ * }
+ *
+ * closure_init(cl);
+ *
+ * do_stuff();
+ * closure_get(cl);
+ * bio1->bi_endio = foo_endio;
+ * bio_submit(bio1);
+ *
+ * do_more_stuff();
+ * closure_get(cl);
+ * bio2->bi_endio = foo_endio;
+ * bio_submit(bio2);
+ *
+ * continue_at(cl, complete_some_read, system_wq);
+ *
+ * If closure's refcount started at 0, complete_some_read() could run before the
+ * second bio was submitted - which is almost always not what you want! More
+ * importantly, it wouldn't be possible to say whether the original thread or
+ * complete_some_read()'s thread owned the closure - and whatever state it was
+ * associated with!
+ *
+ * So, closure_init() initializes a closure's refcount to 1 - and when a
+ * closure_fn is run, the refcount will be reset to 1 first.
+ *
+ * Then, the rule is - if you got the refcount with closure_get(), release it
+ * with closure_put() (i.e, in a bio->bi_endio function). If you have a refcount
+ * on a closure because you called closure_init() or you were run out of a
+ * closure - _always_ use continue_at(). Doing so consistently will help
+ * eliminate an entire class of particularly pernicious races.
+ *
+ * Lastly, you might have a wait list dedicated to a specific event, and have no
+ * need for specifying the condition - you just want to wait until someone runs
+ * closure_wake_up() on the appropriate wait list. In that case, just use
+ * closure_wait(). It will return either true or false, depending on whether the
+ * closure was already on a wait list or not - a closure can only be on one wait
+ * list at a time.
+ *
+ * Parents:
+ *
+ * closure_init() takes two arguments - it takes the closure to initialize, and
+ * a (possibly null) parent.
+ *
+ * If parent is non null, the new closure will have a refcount for its lifetime;
+ * a closure is considered to be "finished" when its refcount hits 0 and the
+ * function to run is null. Hence
+ *
+ * continue_at(cl, NULL, NULL);
+ *
+ * returns up the (spaghetti) stack of closures, precisely like normal return
+ * returns up the C stack. continue_at() with non null fn is better thought of
+ * as doing a tail call.
+ *
+ * All this implies that a closure should typically be embedded in a particular
+ * struct (which its refcount will normally control the lifetime of), and that
+ * struct can very much be thought of as a stack frame.
+ */
+
+struct closure;
+struct closure_syncer;
+typedef void (closure_fn) (struct work_struct *);
+extern struct dentry *bcache_debug;
+
+struct closure_waitlist {
+ struct llist_head list;
+};
+
+enum closure_state {
+ /*
+ * CLOSURE_WAITING: Set iff the closure is on a waitlist. Must be set by
+ * the thread that owns the closure, and cleared by the thread that's
+ * waking up the closure.
+ *
+ * The rest are for debugging and don't affect behaviour:
+ *
+ * CLOSURE_RUNNING: Set when a closure is running (i.e. by
+ * closure_init() and when closure_put() runs then next function), and
+ * must be cleared before remaining hits 0. Primarily to help guard
+ * against incorrect usage and accidentally transferring references.
+ * continue_at() and closure_return() clear it for you, if you're doing
+ * something unusual you can use closure_set_dead() which also helps
+ * annotate where references are being transferred.
+ */
+
+ CLOSURE_BITS_START = (1U << 26),
+ CLOSURE_DESTRUCTOR = (1U << 26),
+ CLOSURE_WAITING = (1U << 28),
+ CLOSURE_RUNNING = (1U << 30),
+};
+
+#define CLOSURE_GUARD_MASK \
+ ((CLOSURE_DESTRUCTOR|CLOSURE_WAITING|CLOSURE_RUNNING) << 1)
+
+#define CLOSURE_REMAINING_MASK (CLOSURE_BITS_START - 1)
+#define CLOSURE_REMAINING_INITIALIZER (1|CLOSURE_RUNNING)
+
+struct closure {
+ union {
+ struct {
+ struct workqueue_struct *wq;
+ struct closure_syncer *s;
+ struct llist_node list;
+ closure_fn *fn;
+ };
+ struct work_struct work;
+ };
+
+ struct closure *parent;
+
+ atomic_t remaining;
+ bool closure_get_happened;
+
+#ifdef CONFIG_DEBUG_CLOSURES
+#define CLOSURE_MAGIC_DEAD 0xc054dead
+#define CLOSURE_MAGIC_ALIVE 0xc054a11e
+#define CLOSURE_MAGIC_STACK 0xc05451cc
+
+ unsigned int magic;
+ struct list_head all;
+ unsigned long ip;
+ unsigned long waiting_on;
+#endif
+};
+
+void closure_sub(struct closure *cl, int v);
+void closure_put(struct closure *cl);
+void __closure_wake_up(struct closure_waitlist *list);
+bool closure_wait(struct closure_waitlist *list, struct closure *cl);
+void __closure_sync(struct closure *cl);
+
+static inline unsigned closure_nr_remaining(struct closure *cl)
+{
+ return atomic_read(&cl->remaining) & CLOSURE_REMAINING_MASK;
+}
+
+/**
+ * closure_sync - sleep until a closure a closure has nothing left to wait on
+ *
+ * Sleeps until the refcount hits 1 - the thread that's running the closure owns
+ * the last refcount.
+ */
+static inline void closure_sync(struct closure *cl)
+{
+#ifdef CONFIG_DEBUG_CLOSURES
+ BUG_ON(closure_nr_remaining(cl) != 1 && !cl->closure_get_happened);
+#endif
+
+ if (cl->closure_get_happened)
+ __closure_sync(cl);
+}
+
+int __closure_sync_timeout(struct closure *cl, unsigned long timeout);
+
+static inline int closure_sync_timeout(struct closure *cl, unsigned long timeout)
+{
+#ifdef CONFIG_DEBUG_CLOSURES
+ BUG_ON(closure_nr_remaining(cl) != 1 && !cl->closure_get_happened);
+#endif
+ return cl->closure_get_happened
+ ? __closure_sync_timeout(cl, timeout)
+ : 0;
+}
+
+#ifdef CONFIG_DEBUG_CLOSURES
+
+void closure_debug_create(struct closure *cl);
+void closure_debug_destroy(struct closure *cl);
+
+#else
+
+static inline void closure_debug_create(struct closure *cl) {}
+static inline void closure_debug_destroy(struct closure *cl) {}
+
+#endif
+
+static inline void closure_set_ip(struct closure *cl)
+{
+#ifdef CONFIG_DEBUG_CLOSURES
+ cl->ip = _THIS_IP_;
+#endif
+}
+
+static inline void closure_set_ret_ip(struct closure *cl)
+{
+#ifdef CONFIG_DEBUG_CLOSURES
+ cl->ip = _RET_IP_;
+#endif
+}
+
+static inline void closure_set_waiting(struct closure *cl, unsigned long f)
+{
+#ifdef CONFIG_DEBUG_CLOSURES
+ cl->waiting_on = f;
+#endif
+}
+
+static inline void closure_set_stopped(struct closure *cl)
+{
+ atomic_sub(CLOSURE_RUNNING, &cl->remaining);
+}
+
+static inline void set_closure_fn(struct closure *cl, closure_fn *fn,
+ struct workqueue_struct *wq)
+{
+ closure_set_ip(cl);
+ cl->fn = fn;
+ cl->wq = wq;
+}
+
+static inline void closure_queue(struct closure *cl)
+{
+ struct workqueue_struct *wq = cl->wq;
+ /**
+ * Changes made to closure, work_struct, or a couple of other structs
+ * may cause work.func not pointing to the right location.
+ */
+ BUILD_BUG_ON(offsetof(struct closure, fn)
+ != offsetof(struct work_struct, func));
+
+ if (wq) {
+ INIT_WORK(&cl->work, cl->work.func);
+ BUG_ON(!queue_work(wq, &cl->work));
+ } else
+ cl->fn(&cl->work);
+}
+
+/**
+ * closure_get - increment a closure's refcount
+ */
+static inline void closure_get(struct closure *cl)
+{
+ cl->closure_get_happened = true;
+
+#ifdef CONFIG_DEBUG_CLOSURES
+ BUG_ON((atomic_inc_return(&cl->remaining) &
+ CLOSURE_REMAINING_MASK) <= 1);
+#else
+ atomic_inc(&cl->remaining);
+#endif
+}
+
+/**
+ * closure_get_not_zero
+ */
+static inline bool closure_get_not_zero(struct closure *cl)
+{
+ unsigned old = atomic_read(&cl->remaining);
+ do {
+ if (!(old & CLOSURE_REMAINING_MASK))
+ return false;
+
+ } while (!atomic_try_cmpxchg_acquire(&cl->remaining, &old, old + 1));
+
+ return true;
+}
+
+/**
+ * closure_init - Initialize a closure, setting the refcount to 1
+ * @cl: closure to initialize
+ * @parent: parent of the new closure. cl will take a refcount on it for its
+ * lifetime; may be NULL.
+ */
+static inline void closure_init(struct closure *cl, struct closure *parent)
+{
+ cl->fn = NULL;
+ cl->parent = parent;
+ if (parent)
+ closure_get(parent);
+
+ atomic_set(&cl->remaining, CLOSURE_REMAINING_INITIALIZER);
+ cl->closure_get_happened = false;
+
+ closure_debug_create(cl);
+ closure_set_ip(cl);
+}
+
+static inline void closure_init_stack(struct closure *cl)
+{
+ memset(cl, 0, sizeof(struct closure));
+ atomic_set(&cl->remaining, CLOSURE_REMAINING_INITIALIZER);
+#ifdef CONFIG_DEBUG_CLOSURES
+ cl->magic = CLOSURE_MAGIC_STACK;
+#endif
+}
+
+static inline void closure_init_stack_release(struct closure *cl)
+{
+ memset(cl, 0, sizeof(struct closure));
+ atomic_set_release(&cl->remaining, CLOSURE_REMAINING_INITIALIZER);
+#ifdef CONFIG_DEBUG_CLOSURES
+ cl->magic = CLOSURE_MAGIC_STACK;
+#endif
+}
+
+/**
+ * closure_wake_up - wake up all closures on a wait list,
+ * with memory barrier
+ */
+static inline void closure_wake_up(struct closure_waitlist *list)
+{
+ /* Memory barrier for the wait list */
+ smp_mb();
+ __closure_wake_up(list);
+}
+
+#define CLOSURE_CALLBACK(name) void name(struct work_struct *ws)
+#define closure_type(name, type, member) \
+ struct closure *cl = container_of(ws, struct closure, work); \
+ type *name = container_of(cl, type, member)
+
+/**
+ * continue_at - jump to another function with barrier
+ *
+ * After @cl is no longer waiting on anything (i.e. all outstanding refs have
+ * been dropped with closure_put()), it will resume execution at @fn running out
+ * of @wq (or, if @wq is NULL, @fn will be called by closure_put() directly).
+ *
+ * This is because after calling continue_at() you no longer have a ref on @cl,
+ * and whatever @cl owns may be freed out from under you - a running closure fn
+ * has a ref on its own closure which continue_at() drops.
+ *
+ * Note you are expected to immediately return after using this macro.
+ */
+#define continue_at(_cl, _fn, _wq) \
+do { \
+ set_closure_fn(_cl, _fn, _wq); \
+ closure_sub(_cl, CLOSURE_RUNNING + 1); \
+} while (0)
+
+/**
+ * closure_return - finish execution of a closure
+ *
+ * This is used to indicate that @cl is finished: when all outstanding refs on
+ * @cl have been dropped @cl's ref on its parent closure (as passed to
+ * closure_init()) will be dropped, if one was specified - thus this can be
+ * thought of as returning to the parent closure.
+ */
+#define closure_return(_cl) continue_at((_cl), NULL, NULL)
+
+void closure_return_sync(struct closure *cl);
+
+/**
+ * continue_at_nobarrier - jump to another function without barrier
+ *
+ * Causes @fn to be executed out of @cl, in @wq context (or called directly if
+ * @wq is NULL).
+ *
+ * The ref the caller of continue_at_nobarrier() had on @cl is now owned by @fn,
+ * thus it's not safe to touch anything protected by @cl after a
+ * continue_at_nobarrier().
+ */
+#define continue_at_nobarrier(_cl, _fn, _wq) \
+do { \
+ set_closure_fn(_cl, _fn, _wq); \
+ closure_queue(_cl); \
+} while (0)
+
+/**
+ * closure_return_with_destructor - finish execution of a closure,
+ * with destructor
+ *
+ * Works like closure_return(), except @destructor will be called when all
+ * outstanding refs on @cl have been dropped; @destructor may be used to safely
+ * free the memory occupied by @cl, and it is called with the ref on the parent
+ * closure still held - so @destructor could safely return an item to a
+ * freelist protected by @cl's parent.
+ */
+#define closure_return_with_destructor(_cl, _destructor) \
+do { \
+ set_closure_fn(_cl, _destructor, NULL); \
+ closure_sub(_cl, CLOSURE_RUNNING - CLOSURE_DESTRUCTOR + 1); \
+} while (0)
+
+/**
+ * closure_call - execute @fn out of a new, uninitialized closure
+ *
+ * Typically used when running out of one closure, and we want to run @fn
+ * asynchronously out of a new closure - @parent will then wait for @cl to
+ * finish.
+ */
+static inline void closure_call(struct closure *cl, closure_fn fn,
+ struct workqueue_struct *wq,
+ struct closure *parent)
+{
+ closure_init(cl, parent);
+ continue_at_nobarrier(cl, fn, wq);
+}
+
+#define __closure_wait_event(waitlist, _cond) \
+do { \
+ struct closure cl; \
+ \
+ closure_init_stack(&cl); \
+ \
+ while (1) { \
+ closure_wait(waitlist, &cl); \
+ if (_cond) \
+ break; \
+ closure_sync(&cl); \
+ } \
+ closure_wake_up(waitlist); \
+ closure_sync(&cl); \
+} while (0)
+
+#define closure_wait_event(waitlist, _cond) \
+do { \
+ if (!(_cond)) \
+ __closure_wait_event(waitlist, _cond); \
+} while (0)
+
+#define __closure_wait_event_timeout(waitlist, _cond, _until) \
+({ \
+ struct closure cl; \
+ long _t; \
+ \
+ closure_init_stack(&cl); \
+ \
+ while (1) { \
+ closure_wait(waitlist, &cl); \
+ if (_cond) { \
+ _t = max_t(long, 1L, _until - jiffies); \
+ break; \
+ } \
+ _t = max_t(long, 0L, _until - jiffies); \
+ if (!_t) \
+ break; \
+ closure_sync_timeout(&cl, _t); \
+ } \
+ closure_wake_up(waitlist); \
+ closure_sync(&cl); \
+ _t; \
+})
+
+/*
+ * Returns 0 if timeout expired, remaining time in jiffies (at least 1) if
+ * condition became true
+ */
+#define closure_wait_event_timeout(waitlist, _cond, _timeout) \
+({ \
+ unsigned long _until = jiffies + _timeout; \
+ (_cond) \
+ ? max_t(long, 1L, _until - jiffies) \
+ : __closure_wait_event_timeout(waitlist, _cond, _until);\
+})
+
+#endif /* _LINUX_CLOSURE_H */
diff --git a/include/linux/cm4000_cs.h b/include/linux/cm4000_cs.h
deleted file mode 100644
index 88bee3a33090..000000000000
--- a/include/linux/cm4000_cs.h
+++ /dev/null
@@ -1,10 +0,0 @@
-#ifndef _CM4000_H_
-#define _CM4000_H_
-
-#include <uapi/linux/cm4000_cs.h>
-
-
-#define DEVICE_NAME "cmm"
-#define MODULE_NAME "cm4000_cs"
-
-#endif /* _CM4000_H_ */
diff --git a/include/linux/cma.h b/include/linux/cma.h
index 3e8fbf5a5c73..62d9c1cf6326 100644
--- a/include/linux/cma.h
+++ b/include/linux/cma.h
@@ -1,20 +1,24 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __CMA_H__
#define __CMA_H__
#include <linux/init.h>
#include <linux/types.h>
+#include <linux/numa.h>
-/*
- * There is always at least global CMA area and a few optional
- * areas configured in kernel .config.
- */
#ifdef CONFIG_CMA_AREAS
-#define MAX_CMA_AREAS (1 + CONFIG_CMA_AREAS)
+#define MAX_CMA_AREAS CONFIG_CMA_AREAS
+#endif
-#else
-#define MAX_CMA_AREAS (0)
+#define CMA_MAX_NAME 64
-#endif
+/*
+ * the buddy -- especially pageblock merging and alloc_contig_range()
+ * -- can deal with only some pageblocks of a higher-order page being
+ * MIGRATE_CMA, we can use pageblock_nr_pages.
+ */
+#define CMA_MIN_ALIGNMENT_PAGES pageblock_nr_pages
+#define CMA_MIN_ALIGNMENT_BYTES (PAGE_SIZE * CMA_MIN_ALIGNMENT_PAGES)
struct cma;
@@ -23,17 +27,54 @@ extern phys_addr_t cma_get_base(const struct cma *cma);
extern unsigned long cma_get_size(const struct cma *cma);
extern const char *cma_get_name(const struct cma *cma);
-extern int __init cma_declare_contiguous(phys_addr_t base,
+extern int __init cma_declare_contiguous_nid(phys_addr_t base,
phys_addr_t size, phys_addr_t limit,
phys_addr_t alignment, unsigned int order_per_bit,
- bool fixed, const char *name, struct cma **res_cma);
+ bool fixed, const char *name, struct cma **res_cma,
+ int nid);
+static inline int __init cma_declare_contiguous(phys_addr_t base,
+ phys_addr_t size, phys_addr_t limit,
+ phys_addr_t alignment, unsigned int order_per_bit,
+ bool fixed, const char *name, struct cma **res_cma)
+{
+ return cma_declare_contiguous_nid(base, size, limit, alignment,
+ order_per_bit, fixed, name, res_cma, NUMA_NO_NODE);
+}
+extern int __init cma_declare_contiguous_multi(phys_addr_t size,
+ phys_addr_t align, unsigned int order_per_bit,
+ const char *name, struct cma **res_cma, int nid);
extern int cma_init_reserved_mem(phys_addr_t base, phys_addr_t size,
unsigned int order_per_bit,
const char *name,
struct cma **res_cma);
-extern struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align,
- gfp_t gfp_mask);
-extern bool cma_release(struct cma *cma, const struct page *pages, unsigned int count);
+extern struct page *cma_alloc(struct cma *cma, unsigned long count, unsigned int align,
+ bool no_warn);
+extern bool cma_pages_valid(struct cma *cma, const struct page *pages, unsigned long count);
+extern bool cma_release(struct cma *cma, const struct page *pages, unsigned long count);
extern int cma_for_each_area(int (*it)(struct cma *cma, void *data), void *data);
+extern bool cma_intersects(struct cma *cma, unsigned long start, unsigned long end);
+
+extern void cma_reserve_pages_on_error(struct cma *cma);
+
+#ifdef CONFIG_CMA
+struct folio *cma_alloc_folio(struct cma *cma, int order, gfp_t gfp);
+bool cma_free_folio(struct cma *cma, const struct folio *folio);
+bool cma_validate_zones(struct cma *cma);
+#else
+static inline struct folio *cma_alloc_folio(struct cma *cma, int order, gfp_t gfp)
+{
+ return NULL;
+}
+
+static inline bool cma_free_folio(struct cma *cma, const struct folio *folio)
+{
+ return false;
+}
+static inline bool cma_validate_zones(struct cma *cma)
+{
+ return false;
+}
+#endif
+
#endif
diff --git a/include/linux/cmdline-parser.h b/include/linux/cmdline-parser.h
deleted file mode 100644
index 2e6dce6e5c2a..000000000000
--- a/include/linux/cmdline-parser.h
+++ /dev/null
@@ -1,45 +0,0 @@
-/*
- * Parsing command line, get the partitions information.
- *
- * Written by Cai Zhiyong <caizhiyong@huawei.com>
- *
- */
-#ifndef CMDLINEPARSEH
-#define CMDLINEPARSEH
-
-#include <linux/blkdev.h>
-#include <linux/fs.h>
-#include <linux/slab.h>
-
-/* partition flags */
-#define PF_RDONLY 0x01 /* Device is read only */
-#define PF_POWERUP_LOCK 0x02 /* Always locked after reset */
-
-struct cmdline_subpart {
- char name[BDEVNAME_SIZE]; /* partition name, such as 'rootfs' */
- sector_t from;
- sector_t size;
- int flags;
- struct cmdline_subpart *next_subpart;
-};
-
-struct cmdline_parts {
- char name[BDEVNAME_SIZE]; /* block device, such as 'mmcblk0' */
- unsigned int nr_subparts;
- struct cmdline_subpart *subpart;
- struct cmdline_parts *next_parts;
-};
-
-void cmdline_parts_free(struct cmdline_parts **parts);
-
-int cmdline_parts_parse(struct cmdline_parts **parts, const char *cmdline);
-
-struct cmdline_parts *cmdline_parts_find(struct cmdline_parts *parts,
- const char *bdev);
-
-int cmdline_parts_set(struct cmdline_parts *parts, sector_t disk_size,
- int slot,
- int (*add_part)(int, struct cmdline_subpart *, void *),
- void *param);
-
-#endif /* CMDLINEPARSEH */
diff --git a/include/linux/cmpxchg-emu.h b/include/linux/cmpxchg-emu.h
new file mode 100644
index 000000000000..998deec67740
--- /dev/null
+++ b/include/linux/cmpxchg-emu.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Emulated 1-byte and 2-byte cmpxchg operations for architectures
+ * lacking direct support for these sizes. These are implemented in terms
+ * of 4-byte cmpxchg operations.
+ *
+ * Copyright (C) 2024 Paul E. McKenney.
+ */
+
+#ifndef __LINUX_CMPXCHG_EMU_H
+#define __LINUX_CMPXCHG_EMU_H
+
+uintptr_t cmpxchg_emu_u8(volatile u8 *p, uintptr_t old, uintptr_t new);
+
+#endif /* __LINUX_CMPXCHG_EMU_H */
diff --git a/include/linux/cnt32_to_63.h b/include/linux/cnt32_to_63.h
index aa629bce9033..064428479f2d 100644
--- a/include/linux/cnt32_to_63.h
+++ b/include/linux/cnt32_to_63.h
@@ -1,13 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Extend a 32-bit counter to 63 bits
*
* Author: Nicolas Pitre
* Created: December 3, 2006
* Copyright: MontaVista Software, Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2
- * as published by the Free Software Foundation.
*/
#ifndef __LINUX_CNT32_TO_63_H__
diff --git a/include/linux/coda.h b/include/linux/coda.h
index d30209b9cef8..0ca0c83fdb1c 100644
--- a/include/linux/coda.h
+++ b/include/linux/coda.h
@@ -58,8 +58,7 @@ Mellon the rights to redistribute these changes without encumbrance.
#ifndef _CODA_HEADER_
#define _CODA_HEADER_
-#if defined(__linux__)
typedef unsigned long long u_quad_t;
-#endif
+
#include <uapi/linux/coda.h>
#endif
diff --git a/include/linux/coda_psdev.h b/include/linux/coda_psdev.h
deleted file mode 100644
index 31e4e1f1547c..000000000000
--- a/include/linux/coda_psdev.h
+++ /dev/null
@@ -1,71 +0,0 @@
-#ifndef __CODA_PSDEV_H
-#define __CODA_PSDEV_H
-
-#include <linux/backing-dev.h>
-#include <linux/mutex.h>
-#include <uapi/linux/coda_psdev.h>
-
-struct kstatfs;
-
-/* communication pending/processing queues */
-struct venus_comm {
- u_long vc_seq;
- wait_queue_head_t vc_waitq; /* Venus wait queue */
- struct list_head vc_pending;
- struct list_head vc_processing;
- int vc_inuse;
- struct super_block *vc_sb;
- struct mutex vc_mutex;
-};
-
-
-static inline struct venus_comm *coda_vcp(struct super_block *sb)
-{
- return (struct venus_comm *)((sb)->s_fs_info);
-}
-
-/* upcalls */
-int venus_rootfid(struct super_block *sb, struct CodaFid *fidp);
-int venus_getattr(struct super_block *sb, struct CodaFid *fid,
- struct coda_vattr *attr);
-int venus_setattr(struct super_block *, struct CodaFid *, struct coda_vattr *);
-int venus_lookup(struct super_block *sb, struct CodaFid *fid,
- const char *name, int length, int *type,
- struct CodaFid *resfid);
-int venus_close(struct super_block *sb, struct CodaFid *fid, int flags,
- kuid_t uid);
-int venus_open(struct super_block *sb, struct CodaFid *fid, int flags,
- struct file **f);
-int venus_mkdir(struct super_block *sb, struct CodaFid *dirfid,
- const char *name, int length,
- struct CodaFid *newfid, struct coda_vattr *attrs);
-int venus_create(struct super_block *sb, struct CodaFid *dirfid,
- const char *name, int length, int excl, int mode,
- struct CodaFid *newfid, struct coda_vattr *attrs) ;
-int venus_rmdir(struct super_block *sb, struct CodaFid *dirfid,
- const char *name, int length);
-int venus_remove(struct super_block *sb, struct CodaFid *dirfid,
- const char *name, int length);
-int venus_readlink(struct super_block *sb, struct CodaFid *fid,
- char *buffer, int *length);
-int venus_rename(struct super_block *, struct CodaFid *new_fid,
- struct CodaFid *old_fid, size_t old_length,
- size_t new_length, const char *old_name,
- const char *new_name);
-int venus_link(struct super_block *sb, struct CodaFid *fid,
- struct CodaFid *dirfid, const char *name, int len );
-int venus_symlink(struct super_block *sb, struct CodaFid *fid,
- const char *name, int len, const char *symname, int symlen);
-int venus_access(struct super_block *sb, struct CodaFid *fid, int mask);
-int venus_pioctl(struct super_block *sb, struct CodaFid *fid,
- unsigned int cmd, struct PioctlData *data);
-int coda_downcall(struct venus_comm *vcp, int opcode, union outputArgs *out);
-int venus_fsync(struct super_block *sb, struct CodaFid *fid);
-int venus_statfs(struct dentry *dentry, struct kstatfs *sfs);
-
-/*
- * Statistics
- */
-
-extern struct venus_comm coda_comms[];
-#endif
diff --git a/include/linux/codetag.h b/include/linux/codetag.h
new file mode 100644
index 000000000000..8ea2a5f7c98a
--- /dev/null
+++ b/include/linux/codetag.h
@@ -0,0 +1,115 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * code tagging framework
+ */
+#ifndef _LINUX_CODETAG_H
+#define _LINUX_CODETAG_H
+
+#include <linux/types.h>
+
+struct codetag_iterator;
+struct codetag_type;
+struct codetag_module;
+struct seq_buf;
+struct module;
+
+#define CODETAG_SECTION_START_PREFIX "__start_"
+#define CODETAG_SECTION_STOP_PREFIX "__stop_"
+
+/* codetag flags */
+#define CODETAG_FLAG_INACCURATE (1 << 0)
+
+/*
+ * An instance of this structure is created in a special ELF section at every
+ * code location being tagged. At runtime, the special section is treated as
+ * an array of these.
+ */
+struct codetag {
+ unsigned int flags;
+ unsigned int lineno;
+ const char *modname;
+ const char *function;
+ const char *filename;
+} __aligned(8);
+
+union codetag_ref {
+ struct codetag *ct;
+};
+
+struct codetag_type_desc {
+ const char *section;
+ size_t tag_size;
+ int (*module_load)(struct module *mod,
+ struct codetag *start, struct codetag *end);
+ void (*module_unload)(struct module *mod,
+ struct codetag *start, struct codetag *end);
+#ifdef CONFIG_MODULES
+ void (*module_replaced)(struct module *mod, struct module *new_mod);
+ bool (*needs_section_mem)(struct module *mod, unsigned long size);
+ void *(*alloc_section_mem)(struct module *mod, unsigned long size,
+ unsigned int prepend, unsigned long align);
+ void (*free_section_mem)(struct module *mod, bool used);
+#endif
+};
+
+struct codetag_iterator {
+ struct codetag_type *cttype;
+ struct codetag_module *cmod;
+ unsigned long mod_id;
+ struct codetag *ct;
+ unsigned long mod_seq;
+};
+
+#ifdef MODULE
+#define CT_MODULE_NAME KBUILD_MODNAME
+#else
+#define CT_MODULE_NAME NULL
+#endif
+
+#define CODE_TAG_INIT { \
+ .modname = CT_MODULE_NAME, \
+ .function = __func__, \
+ .filename = __FILE__, \
+ .lineno = __LINE__, \
+ .flags = 0, \
+}
+
+void codetag_lock_module_list(struct codetag_type *cttype, bool lock);
+bool codetag_trylock_module_list(struct codetag_type *cttype);
+struct codetag_iterator codetag_get_ct_iter(struct codetag_type *cttype);
+struct codetag *codetag_next_ct(struct codetag_iterator *iter);
+
+void codetag_to_text(struct seq_buf *out, struct codetag *ct);
+
+struct codetag_type *
+codetag_register_type(const struct codetag_type_desc *desc);
+
+#if defined(CONFIG_CODE_TAGGING) && defined(CONFIG_MODULES)
+
+bool codetag_needs_module_section(struct module *mod, const char *name,
+ unsigned long size);
+void *codetag_alloc_module_section(struct module *mod, const char *name,
+ unsigned long size, unsigned int prepend,
+ unsigned long align);
+void codetag_free_module_sections(struct module *mod);
+void codetag_module_replaced(struct module *mod, struct module *new_mod);
+int codetag_load_module(struct module *mod);
+void codetag_unload_module(struct module *mod);
+
+#else /* defined(CONFIG_CODE_TAGGING) && defined(CONFIG_MODULES) */
+
+static inline bool
+codetag_needs_module_section(struct module *mod, const char *name,
+ unsigned long size) { return false; }
+static inline void *
+codetag_alloc_module_section(struct module *mod, const char *name,
+ unsigned long size, unsigned int prepend,
+ unsigned long align) { return NULL; }
+static inline void codetag_free_module_sections(struct module *mod) {}
+static inline void codetag_module_replaced(struct module *mod, struct module *new_mod) {}
+static inline int codetag_load_module(struct module *mod) { return 0; }
+static inline void codetag_unload_module(struct module *mod) {}
+
+#endif /* defined(CONFIG_CODE_TAGGING) && defined(CONFIG_MODULES) */
+
+#endif /* _LINUX_CODETAG_H */
diff --git a/include/linux/comedi/comedi_8254.h b/include/linux/comedi/comedi_8254.h
new file mode 100644
index 000000000000..d527f04400df
--- /dev/null
+++ b/include/linux/comedi/comedi_8254.h
@@ -0,0 +1,161 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * comedi_8254.h
+ * Generic 8254 timer/counter support
+ * Copyright (C) 2014 H Hartley Sweeten <hsweeten@visionengravers.com>
+ *
+ * COMEDI - Linux Control and Measurement Device Interface
+ * Copyright (C) 2000 David A. Schleef <ds@schleef.org>
+ */
+
+#ifndef _COMEDI_8254_H
+#define _COMEDI_8254_H
+
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/err.h>
+
+struct comedi_device;
+struct comedi_insn;
+struct comedi_subdevice;
+
+/*
+ * Common oscillator base values in nanoseconds
+ */
+#define I8254_OSC_BASE_10MHZ 100
+#define I8254_OSC_BASE_5MHZ 200
+#define I8254_OSC_BASE_4MHZ 250
+#define I8254_OSC_BASE_2MHZ 500
+#define I8254_OSC_BASE_1MHZ 1000
+#define I8254_OSC_BASE_100KHZ 10000
+#define I8254_OSC_BASE_10KHZ 100000
+#define I8254_OSC_BASE_1KHZ 1000000
+
+/*
+ * I/O access size used to read/write registers
+ */
+#define I8254_IO8 1
+#define I8254_IO16 2
+#define I8254_IO32 4
+
+/*
+ * Register map for generic 8254 timer (I8254_IO8 with 0 regshift)
+ */
+#define I8254_COUNTER0_REG 0x00
+#define I8254_COUNTER1_REG 0x01
+#define I8254_COUNTER2_REG 0x02
+#define I8254_CTRL_REG 0x03
+#define I8254_CTRL_SEL_CTR(x) ((x) << 6)
+#define I8254_CTRL_READBACK(x) (I8254_CTRL_SEL_CTR(3) | BIT(x))
+#define I8254_CTRL_READBACK_COUNT I8254_CTRL_READBACK(4)
+#define I8254_CTRL_READBACK_STATUS I8254_CTRL_READBACK(5)
+#define I8254_CTRL_READBACK_SEL_CTR(x) (2 << (x))
+#define I8254_CTRL_RW(x) (((x) & 0x3) << 4)
+#define I8254_CTRL_LATCH I8254_CTRL_RW(0)
+#define I8254_CTRL_LSB_ONLY I8254_CTRL_RW(1)
+#define I8254_CTRL_MSB_ONLY I8254_CTRL_RW(2)
+#define I8254_CTRL_LSB_MSB I8254_CTRL_RW(3)
+
+/* counter maps zero to 0x10000 */
+#define I8254_MAX_COUNT 0x10000
+
+struct comedi_8254;
+
+/**
+ * typedef comedi_8254_iocb_fn - call-back function type for 8254 register access
+ * @i8254: pointer to struct comedi_8254
+ * @dir: direction (0 = read, 1 = write)
+ * @reg: register number
+ * @val: value to write
+ *
+ * Return: Register value when reading, 0 when writing.
+ */
+typedef unsigned int comedi_8254_iocb_fn(struct comedi_8254 *i8254, int dir,
+ unsigned int reg, unsigned int val);
+
+/**
+ * struct comedi_8254 - private data used by this module
+ * @iocb: I/O call-back function for register access
+ * @context: context for register access (e.g. a base address)
+ * @iosize: I/O size used to access the registers (b/w/l)
+ * @regshift: register gap shift
+ * @osc_base: cascaded oscillator speed in ns
+ * @divisor: divisor for single counter
+ * @divisor1: divisor loaded into first cascaded counter
+ * @divisor2: divisor loaded into second cascaded counter
+ * #next_div: next divisor for single counter
+ * @next_div1: next divisor to use for first cascaded counter
+ * @next_div2: next divisor to use for second cascaded counter
+ * @clock_src; current clock source for each counter (driver specific)
+ * @gate_src; current gate source for each counter (driver specific)
+ * @busy: flags used to indicate that a counter is "busy"
+ * @insn_config: driver specific (*insn_config) callback
+ */
+struct comedi_8254 {
+ comedi_8254_iocb_fn *iocb;
+ unsigned long context;
+ unsigned int iosize;
+ unsigned int regshift;
+ unsigned int osc_base;
+ unsigned int divisor;
+ unsigned int divisor1;
+ unsigned int divisor2;
+ unsigned int next_div;
+ unsigned int next_div1;
+ unsigned int next_div2;
+ unsigned int clock_src[3];
+ unsigned int gate_src[3];
+ bool busy[3];
+
+ int (*insn_config)(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn, unsigned int *data);
+};
+
+unsigned int comedi_8254_status(struct comedi_8254 *i8254,
+ unsigned int counter);
+unsigned int comedi_8254_read(struct comedi_8254 *i8254, unsigned int counter);
+void comedi_8254_write(struct comedi_8254 *i8254,
+ unsigned int counter, unsigned int val);
+
+int comedi_8254_set_mode(struct comedi_8254 *i8254,
+ unsigned int counter, unsigned int mode);
+int comedi_8254_load(struct comedi_8254 *i8254,
+ unsigned int counter, unsigned int val, unsigned int mode);
+
+void comedi_8254_pacer_enable(struct comedi_8254 *i8254,
+ unsigned int counter1, unsigned int counter2,
+ bool enable);
+void comedi_8254_update_divisors(struct comedi_8254 *i8254);
+void comedi_8254_cascade_ns_to_timer(struct comedi_8254 *i8254,
+ unsigned int *nanosec, unsigned int flags);
+void comedi_8254_ns_to_timer(struct comedi_8254 *i8254,
+ unsigned int *nanosec, unsigned int flags);
+
+void comedi_8254_set_busy(struct comedi_8254 *i8254,
+ unsigned int counter, bool busy);
+
+void comedi_8254_subdevice_init(struct comedi_subdevice *s,
+ struct comedi_8254 *i8254);
+
+#ifdef CONFIG_HAS_IOPORT
+struct comedi_8254 *comedi_8254_io_alloc(unsigned long iobase,
+ unsigned int osc_base,
+ unsigned int iosize,
+ unsigned int regshift);
+#else
+static inline struct comedi_8254 *comedi_8254_io_alloc(unsigned long iobase,
+ unsigned int osc_base,
+ unsigned int iosize,
+ unsigned int regshift)
+{
+ return ERR_PTR(-ENXIO);
+}
+#endif
+
+struct comedi_8254 *comedi_8254_mm_alloc(void __iomem *mmio,
+ unsigned int osc_base,
+ unsigned int iosize,
+ unsigned int regshift);
+
+#endif /* _COMEDI_8254_H */
diff --git a/include/linux/comedi/comedi_8255.h b/include/linux/comedi/comedi_8255.h
new file mode 100644
index 000000000000..d24a69da389b
--- /dev/null
+++ b/include/linux/comedi/comedi_8255.h
@@ -0,0 +1,54 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * comedi_8255.h
+ * Generic 8255 digital I/O subdevice support
+ *
+ * COMEDI - Linux Control and Measurement Device Interface
+ * Copyright (C) 1998 David A. Schleef <ds@schleef.org>
+ */
+
+#ifndef _COMEDI_8255_H
+#define _COMEDI_8255_H
+
+#include <linux/errno.h>
+
+#define I8255_SIZE 0x04
+
+#define I8255_DATA_A_REG 0x00
+#define I8255_DATA_B_REG 0x01
+#define I8255_DATA_C_REG 0x02
+#define I8255_CTRL_REG 0x03
+#define I8255_CTRL_C_LO_IO BIT(0)
+#define I8255_CTRL_B_IO BIT(1)
+#define I8255_CTRL_B_MODE BIT(2)
+#define I8255_CTRL_C_HI_IO BIT(3)
+#define I8255_CTRL_A_IO BIT(4)
+#define I8255_CTRL_A_MODE(x) ((x) << 5)
+#define I8255_CTRL_CW BIT(7)
+
+struct comedi_device;
+struct comedi_subdevice;
+
+#ifdef CONFIG_HAS_IOPORT
+int subdev_8255_io_init(struct comedi_device *dev, struct comedi_subdevice *s,
+ unsigned long regbase);
+#else
+static inline int subdev_8255_io_init(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ unsigned long regbase)
+{
+ return -ENXIO;
+}
+#endif
+
+int subdev_8255_mm_init(struct comedi_device *dev, struct comedi_subdevice *s,
+ unsigned long regbase);
+
+int subdev_8255_cb_init(struct comedi_device *dev, struct comedi_subdevice *s,
+ int (*io)(struct comedi_device *dev, int dir, int port,
+ int data, unsigned long context),
+ unsigned long context);
+
+unsigned long subdev_8255_regbase(struct comedi_subdevice *s);
+
+#endif
diff --git a/include/linux/comedi/comedi_isadma.h b/include/linux/comedi/comedi_isadma.h
new file mode 100644
index 000000000000..9d2b12db7e6e
--- /dev/null
+++ b/include/linux/comedi/comedi_isadma.h
@@ -0,0 +1,114 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * COMEDI ISA DMA support functions
+ * Copyright (c) 2014 H Hartley Sweeten <hsweeten@visionengravers.com>
+ */
+
+#ifndef _COMEDI_ISADMA_H
+#define _COMEDI_ISADMA_H
+
+#include <linux/types.h>
+
+struct comedi_device;
+struct device;
+
+/*
+ * These are used to avoid issues when <asm/dma.h> and the DMA_MODE_
+ * defines are not available.
+ */
+#define COMEDI_ISADMA_READ 0
+#define COMEDI_ISADMA_WRITE 1
+
+/**
+ * struct comedi_isadma_desc - cookie for ISA DMA
+ * @virt_addr: virtual address of buffer
+ * @hw_addr: hardware (bus) address of buffer
+ * @chan: DMA channel
+ * @maxsize: allocated size of buffer (in bytes)
+ * @size: transfer size (in bytes)
+ * @mode: DMA_MODE_READ or DMA_MODE_WRITE
+ */
+struct comedi_isadma_desc {
+ void *virt_addr;
+ dma_addr_t hw_addr;
+ unsigned int chan;
+ unsigned int maxsize;
+ unsigned int size;
+ char mode;
+};
+
+/**
+ * struct comedi_isadma - ISA DMA data
+ * @dev: device to allocate non-coherent memory for
+ * @desc: cookie for each DMA buffer
+ * @n_desc: the number of cookies
+ * @cur_dma: the current cookie in use
+ * @chan: the first DMA channel requested
+ * @chan2: the second DMA channel requested
+ */
+struct comedi_isadma {
+ struct device *dev;
+ struct comedi_isadma_desc *desc;
+ int n_desc;
+ int cur_dma;
+ unsigned int chan;
+ unsigned int chan2;
+};
+
+#if IS_ENABLED(CONFIG_ISA_DMA_API)
+
+void comedi_isadma_program(struct comedi_isadma_desc *desc);
+unsigned int comedi_isadma_disable(unsigned int dma_chan);
+unsigned int comedi_isadma_disable_on_sample(unsigned int dma_chan,
+ unsigned int size);
+unsigned int comedi_isadma_poll(struct comedi_isadma *dma);
+void comedi_isadma_set_mode(struct comedi_isadma_desc *desc, char dma_dir);
+
+struct comedi_isadma *comedi_isadma_alloc(struct comedi_device *dev,
+ int n_desc, unsigned int dma_chan1,
+ unsigned int dma_chan2,
+ unsigned int maxsize, char dma_dir);
+void comedi_isadma_free(struct comedi_isadma *dma);
+
+#else /* !IS_ENABLED(CONFIG_ISA_DMA_API) */
+
+static inline void comedi_isadma_program(struct comedi_isadma_desc *desc)
+{
+}
+
+static inline unsigned int comedi_isadma_disable(unsigned int dma_chan)
+{
+ return 0;
+}
+
+static inline unsigned int
+comedi_isadma_disable_on_sample(unsigned int dma_chan, unsigned int size)
+{
+ return 0;
+}
+
+static inline unsigned int comedi_isadma_poll(struct comedi_isadma *dma)
+{
+ return 0;
+}
+
+static inline void comedi_isadma_set_mode(struct comedi_isadma_desc *desc,
+ char dma_dir)
+{
+}
+
+static inline struct comedi_isadma *
+comedi_isadma_alloc(struct comedi_device *dev, int n_desc,
+ unsigned int dma_chan1, unsigned int dma_chan2,
+ unsigned int maxsize, char dma_dir)
+{
+ return NULL;
+}
+
+static inline void comedi_isadma_free(struct comedi_isadma *dma)
+{
+}
+
+#endif /* !IS_ENABLED(CONFIG_ISA_DMA_API) */
+
+#endif /* #ifndef _COMEDI_ISADMA_H */
diff --git a/include/linux/comedi/comedi_pci.h b/include/linux/comedi/comedi_pci.h
new file mode 100644
index 000000000000..2fb50663e3ed
--- /dev/null
+++ b/include/linux/comedi/comedi_pci.h
@@ -0,0 +1,56 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * comedi_pci.h
+ * header file for Comedi PCI drivers
+ *
+ * COMEDI - Linux Control and Measurement Device Interface
+ * Copyright (C) 1997-2000 David A. Schleef <ds@schleef.org>
+ */
+
+#ifndef _COMEDI_PCI_H
+#define _COMEDI_PCI_H
+
+#include <linux/pci.h>
+#include <linux/comedi/comedidev.h>
+
+/*
+ * PCI Vendor IDs not in <linux/pci_ids.h>
+ */
+#define PCI_VENDOR_ID_KOLTER 0x1001
+#define PCI_VENDOR_ID_ICP 0x104c
+#define PCI_VENDOR_ID_DT 0x1116
+#define PCI_VENDOR_ID_IOTECH 0x1616
+#define PCI_VENDOR_ID_CONTEC 0x1221
+#define PCI_VENDOR_ID_RTD 0x1435
+#define PCI_VENDOR_ID_HUMUSOFT 0x186c
+
+struct pci_dev *comedi_to_pci_dev(struct comedi_device *dev);
+
+int comedi_pci_enable(struct comedi_device *dev);
+void comedi_pci_disable(struct comedi_device *dev);
+void comedi_pci_detach(struct comedi_device *dev);
+
+int comedi_pci_auto_config(struct pci_dev *pcidev, struct comedi_driver *driver,
+ unsigned long context);
+void comedi_pci_auto_unconfig(struct pci_dev *pcidev);
+
+int comedi_pci_driver_register(struct comedi_driver *comedi_driver,
+ struct pci_driver *pci_driver);
+void comedi_pci_driver_unregister(struct comedi_driver *comedi_driver,
+ struct pci_driver *pci_driver);
+
+/**
+ * module_comedi_pci_driver() - Helper macro for registering a comedi PCI driver
+ * @__comedi_driver: comedi_driver struct
+ * @__pci_driver: pci_driver struct
+ *
+ * Helper macro for comedi PCI drivers which do not do anything special
+ * in module init/exit. This eliminates a lot of boilerplate. Each
+ * module may only use this macro once, and calling it replaces
+ * module_init() and module_exit()
+ */
+#define module_comedi_pci_driver(__comedi_driver, __pci_driver) \
+ module_driver(__comedi_driver, comedi_pci_driver_register, \
+ comedi_pci_driver_unregister, &(__pci_driver))
+
+#endif /* _COMEDI_PCI_H */
diff --git a/include/linux/comedi/comedi_pcmcia.h b/include/linux/comedi/comedi_pcmcia.h
new file mode 100644
index 000000000000..a33dfb65b869
--- /dev/null
+++ b/include/linux/comedi/comedi_pcmcia.h
@@ -0,0 +1,48 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * comedi_pcmcia.h
+ * header file for Comedi PCMCIA drivers
+ *
+ * COMEDI - Linux Control and Measurement Device Interface
+ * Copyright (C) 1997-2000 David A. Schleef <ds@schleef.org>
+ */
+
+#ifndef _COMEDI_PCMCIA_H
+#define _COMEDI_PCMCIA_H
+
+#include <pcmcia/cistpl.h>
+#include <pcmcia/ds.h>
+#include <linux/comedi/comedidev.h>
+
+struct pcmcia_device *comedi_to_pcmcia_dev(struct comedi_device *dev);
+
+int comedi_pcmcia_enable(struct comedi_device *dev,
+ int (*conf_check)(struct pcmcia_device *p_dev,
+ void *priv_data));
+void comedi_pcmcia_disable(struct comedi_device *dev);
+
+int comedi_pcmcia_auto_config(struct pcmcia_device *link,
+ struct comedi_driver *driver);
+void comedi_pcmcia_auto_unconfig(struct pcmcia_device *link);
+
+int comedi_pcmcia_driver_register(struct comedi_driver *comedi_driver,
+ struct pcmcia_driver *pcmcia_driver);
+void comedi_pcmcia_driver_unregister(struct comedi_driver *comedi_driver,
+ struct pcmcia_driver *pcmcia_driver);
+
+/**
+ * module_comedi_pcmcia_driver() - Helper macro for registering a comedi
+ * PCMCIA driver
+ * @__comedi_driver: comedi_driver struct
+ * @__pcmcia_driver: pcmcia_driver struct
+ *
+ * Helper macro for comedi PCMCIA drivers which do not do anything special
+ * in module init/exit. This eliminates a lot of boilerplate. Each
+ * module may only use this macro once, and calling it replaces
+ * module_init() and module_exit()
+ */
+#define module_comedi_pcmcia_driver(__comedi_driver, __pcmcia_driver) \
+ module_driver(__comedi_driver, comedi_pcmcia_driver_register, \
+ comedi_pcmcia_driver_unregister, &(__pcmcia_driver))
+
+#endif /* _COMEDI_PCMCIA_H */
diff --git a/include/linux/comedi/comedi_usb.h b/include/linux/comedi/comedi_usb.h
new file mode 100644
index 000000000000..5d17dd425bd2
--- /dev/null
+++ b/include/linux/comedi/comedi_usb.h
@@ -0,0 +1,41 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/* comedi_usb.h
+ * header file for USB Comedi drivers
+ *
+ * COMEDI - Linux Control and Measurement Device Interface
+ * Copyright (C) 1997-2000 David A. Schleef <ds@schleef.org>
+ */
+
+#ifndef _COMEDI_USB_H
+#define _COMEDI_USB_H
+
+#include <linux/usb.h>
+#include <linux/comedi/comedidev.h>
+
+struct usb_interface *comedi_to_usb_interface(struct comedi_device *dev);
+struct usb_device *comedi_to_usb_dev(struct comedi_device *dev);
+
+int comedi_usb_auto_config(struct usb_interface *intf,
+ struct comedi_driver *driver, unsigned long context);
+void comedi_usb_auto_unconfig(struct usb_interface *intf);
+
+int comedi_usb_driver_register(struct comedi_driver *comedi_driver,
+ struct usb_driver *usb_driver);
+void comedi_usb_driver_unregister(struct comedi_driver *comedi_driver,
+ struct usb_driver *usb_driver);
+
+/**
+ * module_comedi_usb_driver() - Helper macro for registering a comedi USB driver
+ * @__comedi_driver: comedi_driver struct
+ * @__usb_driver: usb_driver struct
+ *
+ * Helper macro for comedi USB drivers which do not do anything special
+ * in module init/exit. This eliminates a lot of boilerplate. Each
+ * module may only use this macro once, and calling it replaces
+ * module_init() and module_exit()
+ */
+#define module_comedi_usb_driver(__comedi_driver, __usb_driver) \
+ module_driver(__comedi_driver, comedi_usb_driver_register, \
+ comedi_usb_driver_unregister, &(__usb_driver))
+
+#endif /* _COMEDI_USB_H */
diff --git a/include/linux/comedi/comedidev.h b/include/linux/comedi/comedidev.h
new file mode 100644
index 000000000000..35fdc41845ce
--- /dev/null
+++ b/include/linux/comedi/comedidev.h
@@ -0,0 +1,1054 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * comedidev.h
+ * header file for kernel-only structures, variables, and constants
+ *
+ * COMEDI - Linux Control and Measurement Device Interface
+ * Copyright (C) 1997-2000 David A. Schleef <ds@schleef.org>
+ */
+
+#ifndef _COMEDIDEV_H
+#define _COMEDIDEV_H
+
+#include <linux/dma-mapping.h>
+#include <linux/mutex.h>
+#include <linux/spinlock_types.h>
+#include <linux/rwsem.h>
+#include <linux/kref.h>
+#include <linux/completion.h>
+#include <linux/comedi.h>
+
+#define COMEDI_VERSION(a, b, c) (((a) << 16) + ((b) << 8) + (c))
+#define COMEDI_VERSION_CODE COMEDI_VERSION(COMEDI_MAJORVERSION, \
+ COMEDI_MINORVERSION, COMEDI_MICROVERSION)
+#define COMEDI_RELEASE VERSION
+
+#define COMEDI_NUM_BOARD_MINORS 0x30
+
+/**
+ * struct comedi_subdevice - Working data for a COMEDI subdevice
+ * @device: COMEDI device to which this subdevice belongs. (Initialized by
+ * comedi_alloc_subdevices().)
+ * @index: Index of this subdevice within device's array of subdevices.
+ * (Initialized by comedi_alloc_subdevices().)
+ * @type: Type of subdevice from &enum comedi_subdevice_type. (Initialized by
+ * the low-level driver.)
+ * @n_chan: Number of channels the subdevice supports. (Initialized by the
+ * low-level driver.)
+ * @subdev_flags: Various "SDF" flags indicating aspects of the subdevice to
+ * the COMEDI core and user application. (Initialized by the low-level
+ * driver.)
+ * @len_chanlist: Maximum length of a channel list if the subdevice supports
+ * asynchronous acquisition commands. (Optionally initialized by the
+ * low-level driver, or changed from 0 to 1 during post-configuration.)
+ * @private: Private data pointer which is either set by the low-level driver
+ * itself, or by a call to comedi_alloc_spriv() which allocates storage.
+ * In the latter case, the storage is automatically freed after the
+ * low-level driver's "detach" handler is called for the device.
+ * (Initialized by the low-level driver.)
+ * @async: Pointer to &struct comedi_async id the subdevice supports
+ * asynchronous acquisition commands. (Allocated and initialized during
+ * post-configuration if needed.)
+ * @lock: Pointer to a file object that performed a %COMEDI_LOCK ioctl on the
+ * subdevice. (Initially NULL.)
+ * @busy: Pointer to a file object that is performing an asynchronous
+ * acquisition command on the subdevice. (Initially NULL.)
+ * @runflags: Internal flags for use by COMEDI core, mostly indicating whether
+ * an asynchronous acquisition command is running.
+ * @spin_lock: Generic spin-lock for use by the COMEDI core and the low-level
+ * driver. (Initialized by comedi_alloc_subdevices().)
+ * @io_bits: Bit-mask indicating the channel directions for a DIO subdevice
+ * with no more than 32 channels. A '1' at a bit position indicates the
+ * corresponding channel is configured as an output. (Initialized by the
+ * low-level driver for a DIO subdevice. Forced to all-outputs during
+ * post-configuration for a digital output subdevice.)
+ * @maxdata: If non-zero, this is the maximum raw data value of each channel.
+ * If zero, the maximum data value is channel-specific. (Initialized by
+ * the low-level driver.)
+ * @maxdata_list: If the maximum data value is channel-specific, this points
+ * to an array of maximum data values indexed by channel index.
+ * (Initialized by the low-level driver.)
+ * @range_table: If non-NULL, this points to a COMEDI range table for the
+ * subdevice. If NULL, the range table is channel-specific. (Initialized
+ * by the low-level driver, will be set to an "invalid" range table during
+ * post-configuration if @range_table and @range_table_list are both
+ * NULL.)
+ * @range_table_list: If the COMEDI range table is channel-specific, this
+ * points to an array of pointers to COMEDI range tables indexed by
+ * channel number. (Initialized by the low-level driver.)
+ * @chanlist: Not used.
+ * @insn_read: Optional pointer to a handler for the %INSN_READ instruction.
+ * (Initialized by the low-level driver, or set to a default handler
+ * during post-configuration.)
+ * @insn_write: Optional pointer to a handler for the %INSN_WRITE instruction.
+ * (Initialized by the low-level driver, or set to a default handler
+ * during post-configuration.)
+ * @insn_bits: Optional pointer to a handler for the %INSN_BITS instruction
+ * for a digital input, digital output or digital input/output subdevice.
+ * (Initialized by the low-level driver, or set to a default handler
+ * during post-configuration.)
+ * @insn_config: Optional pointer to a handler for the %INSN_CONFIG
+ * instruction. (Initialized by the low-level driver, or set to a default
+ * handler during post-configuration.)
+ * @do_cmd: If the subdevice supports asynchronous acquisition commands, this
+ * points to a handler to set it up in hardware. (Initialized by the
+ * low-level driver.)
+ * @do_cmdtest: If the subdevice supports asynchronous acquisition commands,
+ * this points to a handler used to check and possibly tweak a prospective
+ * acquisition command without setting it up in hardware. (Initialized by
+ * the low-level driver.)
+ * @poll: If the subdevice supports asynchronous acquisition commands, this
+ * is an optional pointer to a handler for the %COMEDI_POLL ioctl which
+ * instructs the low-level driver to synchronize buffers. (Initialized by
+ * the low-level driver if needed.)
+ * @cancel: If the subdevice supports asynchronous acquisition commands, this
+ * points to a handler used to terminate a running command. (Initialized
+ * by the low-level driver.)
+ * @buf_change: If the subdevice supports asynchronous acquisition commands,
+ * this is an optional pointer to a handler that is called when the data
+ * buffer for handling asynchronous commands is allocated or reallocated.
+ * (Initialized by the low-level driver if needed.)
+ * @munge: If the subdevice supports asynchronous acquisition commands and
+ * uses DMA to transfer data from the hardware to the acquisition buffer,
+ * this points to a function used to "munge" the data values from the
+ * hardware into the format expected by COMEDI. (Initialized by the
+ * low-level driver if needed.)
+ * @async_dma_dir: If the subdevice supports asynchronous acquisition commands
+ * and uses DMA to transfer data from the hardware to the acquisition
+ * buffer, this sets the DMA direction for the buffer. (initialized to
+ * %DMA_NONE by comedi_alloc_subdevices() and changed by the low-level
+ * driver if necessary.)
+ * @state: Handy bit-mask indicating the output states for a DIO or digital
+ * output subdevice with no more than 32 channels. (Initialized by the
+ * low-level driver.)
+ * @class_dev: If the subdevice supports asynchronous acquisition commands,
+ * this points to a sysfs comediX_subdY device where X is the minor device
+ * number of the COMEDI device and Y is the subdevice number. The minor
+ * device number for the sysfs device is allocated dynamically in the
+ * range 48 to 255. This is used to allow the COMEDI device to be opened
+ * with a different default read or write subdevice. (Allocated during
+ * post-configuration if needed.)
+ * @minor: If @class_dev is set, this is its dynamically allocated minor
+ * device number. (Set during post-configuration if necessary.)
+ * @readback: Optional pointer to memory allocated by
+ * comedi_alloc_subdev_readback() used to hold the values written to
+ * analog output channels so they can be read back. The storage is
+ * automatically freed after the low-level driver's "detach" handler is
+ * called for the device. (Initialized by the low-level driver.)
+ *
+ * This is the main control structure for a COMEDI subdevice. If the subdevice
+ * supports asynchronous acquisition commands, additional information is stored
+ * in the &struct comedi_async pointed to by @async.
+ *
+ * Most of the subdevice is initialized by the low-level driver's "attach" or
+ * "auto_attach" handlers but parts of it are initialized by
+ * comedi_alloc_subdevices(), and other parts are initialized during
+ * post-configuration on return from that handler.
+ *
+ * A low-level driver that sets @insn_bits for a digital input, digital output,
+ * or DIO subdevice may leave @insn_read and @insn_write uninitialized, in
+ * which case they will be set to a default handler during post-configuration
+ * that uses @insn_bits to emulate the %INSN_READ and %INSN_WRITE instructions.
+ */
+struct comedi_subdevice {
+ struct comedi_device *device;
+ int index;
+ int type;
+ int n_chan;
+ int subdev_flags;
+ int len_chanlist; /* maximum length of channel/gain list */
+
+ void *private;
+
+ struct comedi_async *async;
+
+ void *lock;
+ void *busy;
+ unsigned int runflags;
+ spinlock_t spin_lock; /* generic spin-lock for COMEDI and drivers */
+
+ unsigned int io_bits;
+
+ unsigned int maxdata; /* if maxdata==0, use list */
+ const unsigned int *maxdata_list; /* list is channel specific */
+
+ const struct comedi_lrange *range_table;
+ const struct comedi_lrange *const *range_table_list;
+
+ unsigned int *chanlist; /* driver-owned chanlist (not used) */
+
+ int (*insn_read)(struct comedi_device *dev, struct comedi_subdevice *s,
+ struct comedi_insn *insn, unsigned int *data);
+ int (*insn_write)(struct comedi_device *dev, struct comedi_subdevice *s,
+ struct comedi_insn *insn, unsigned int *data);
+ int (*insn_bits)(struct comedi_device *dev, struct comedi_subdevice *s,
+ struct comedi_insn *insn, unsigned int *data);
+ int (*insn_config)(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data);
+
+ int (*do_cmd)(struct comedi_device *dev, struct comedi_subdevice *s);
+ int (*do_cmdtest)(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_cmd *cmd);
+ int (*poll)(struct comedi_device *dev, struct comedi_subdevice *s);
+ int (*cancel)(struct comedi_device *dev, struct comedi_subdevice *s);
+
+ /* called when the buffer changes */
+ int (*buf_change)(struct comedi_device *dev,
+ struct comedi_subdevice *s);
+
+ void (*munge)(struct comedi_device *dev, struct comedi_subdevice *s,
+ void *data, unsigned int num_bytes,
+ unsigned int start_chan_index);
+ enum dma_data_direction async_dma_dir;
+
+ unsigned int state;
+
+ struct device *class_dev;
+ int minor;
+
+ unsigned int *readback;
+};
+
+/**
+ * struct comedi_buf_page - Describe a page of a COMEDI buffer
+ * @virt_addr: Kernel address of page.
+ * @dma_addr: DMA address of page if in DMA coherent memory.
+ */
+struct comedi_buf_page {
+ void *virt_addr;
+ dma_addr_t dma_addr;
+};
+
+/**
+ * struct comedi_buf_map - Describe pages in a COMEDI buffer
+ * @dma_hw_dev: Low-level hardware &struct device pointer copied from the
+ * COMEDI device's hw_dev member.
+ * @page_list: Pointer to array of &struct comedi_buf_page, one for each
+ * page in the buffer.
+ * @n_pages: Number of pages in the buffer.
+ * @dma_dir: DMA direction used to allocate pages of DMA coherent memory,
+ * or %DMA_NONE if pages allocated from regular memory.
+ * @refcount: &struct kref reference counter used to free the buffer.
+ *
+ * A COMEDI data buffer is allocated as individual pages, either in
+ * conventional memory or DMA coherent memory, depending on the attached,
+ * low-level hardware device.
+ *
+ * The buffer is normally freed when the COMEDI device is detached from the
+ * low-level driver (which may happen due to device removal), but if it happens
+ * to be mmapped at the time, the pages cannot be freed until the buffer has
+ * been munmapped. That is what the reference counter is for.
+ */
+struct comedi_buf_map {
+ struct device *dma_hw_dev;
+ struct comedi_buf_page *page_list;
+ unsigned int n_pages;
+ enum dma_data_direction dma_dir;
+ struct kref refcount;
+};
+
+/**
+ * struct comedi_async - Control data for asynchronous COMEDI commands
+ * @prealloc_bufsz: Buffer size (in bytes).
+ * @buf_map: Map of buffer pages.
+ * @max_bufsize: Maximum allowed buffer size (in bytes).
+ * @buf_write_count: "Write completed" count (in bytes, modulo 2**32).
+ * @buf_write_alloc_count: "Allocated for writing" count (in bytes,
+ * modulo 2**32).
+ * @buf_read_count: "Read completed" count (in bytes, modulo 2**32).
+ * @buf_read_alloc_count: "Allocated for reading" count (in bytes,
+ * modulo 2**32).
+ * @buf_write_ptr: Buffer position for writer.
+ * @buf_read_ptr: Buffer position for reader.
+ * @cur_chan: Current position in chanlist for scan (for those drivers that
+ * use it).
+ * @scans_done: The number of scans completed.
+ * @scan_progress: Amount received or sent for current scan (in bytes).
+ * @munge_chan: Current position in chanlist for "munging".
+ * @munge_count: "Munge" count (in bytes, modulo 2**32).
+ * @munge_ptr: Buffer position for "munging".
+ * @events: Bit-vector of events that have occurred.
+ * @cmd: Details of comedi command in progress.
+ * @wait_head: Task wait queue for file reader or writer.
+ * @run_complete: "run complete" completion event.
+ * @run_active: "run active" reference counter.
+ * @cb_mask: Bit-vector of events that should wake waiting tasks.
+ * @inttrig: Software trigger function for command, or NULL.
+ *
+ * Note about the ..._count and ..._ptr members:
+ *
+ * Think of the _Count values being integers of unlimited size, indexing
+ * into a buffer of infinite length (though only an advancing portion
+ * of the buffer of fixed length prealloc_bufsz is accessible at any
+ * time). Then:
+ *
+ * Buf_Read_Count <= Buf_Read_Alloc_Count <= Munge_Count <=
+ * Buf_Write_Count <= Buf_Write_Alloc_Count <=
+ * (Buf_Read_Count + prealloc_bufsz)
+ *
+ * (Those aren't the actual members, apart from prealloc_bufsz.) When the
+ * buffer is reset, those _Count values start at 0 and only increase in value,
+ * maintaining the above inequalities until the next time the buffer is
+ * reset. The buffer is divided into the following regions by the inequalities:
+ *
+ * [0, Buf_Read_Count):
+ * old region no longer accessible
+ *
+ * [Buf_Read_Count, Buf_Read_Alloc_Count):
+ * filled and munged region allocated for reading but not yet read
+ *
+ * [Buf_Read_Alloc_Count, Munge_Count):
+ * filled and munged region not yet allocated for reading
+ *
+ * [Munge_Count, Buf_Write_Count):
+ * filled region not yet munged
+ *
+ * [Buf_Write_Count, Buf_Write_Alloc_Count):
+ * unfilled region allocated for writing but not yet written
+ *
+ * [Buf_Write_Alloc_Count, Buf_Read_Count + prealloc_bufsz):
+ * unfilled region not yet allocated for writing
+ *
+ * [Buf_Read_Count + prealloc_bufsz, infinity):
+ * unfilled region not yet accessible
+ *
+ * Data needs to be written into the buffer before it can be read out,
+ * and may need to be converted (or "munged") between the two
+ * operations. Extra unfilled buffer space may need to allocated for
+ * writing (advancing Buf_Write_Alloc_Count) before new data is written.
+ * After writing new data, the newly filled space needs to be released
+ * (advancing Buf_Write_Count). This also results in the new data being
+ * "munged" (advancing Munge_Count). Before data is read out of the
+ * buffer, extra space may need to be allocated for reading (advancing
+ * Buf_Read_Alloc_Count). After the data has been read out, the space
+ * needs to be released (advancing Buf_Read_Count).
+ *
+ * The actual members, buf_read_count, buf_read_alloc_count,
+ * munge_count, buf_write_count, and buf_write_alloc_count take the
+ * value of the corresponding capitalized _Count values modulo 2^32
+ * (UINT_MAX+1). Subtracting a "higher" _count value from a "lower"
+ * _count value gives the same answer as subtracting a "higher" _Count
+ * value from a lower _Count value because prealloc_bufsz < UINT_MAX+1.
+ * The modulo operation is done implicitly.
+ *
+ * The buf_read_ptr, munge_ptr, and buf_write_ptr members take the value
+ * of the corresponding capitalized _Count values modulo prealloc_bufsz.
+ * These correspond to byte indices in the physical buffer. The modulo
+ * operation is done by subtracting prealloc_bufsz when the value
+ * exceeds prealloc_bufsz (assuming prealloc_bufsz plus the increment is
+ * less than or equal to UINT_MAX).
+ */
+struct comedi_async {
+ unsigned int prealloc_bufsz;
+ struct comedi_buf_map *buf_map;
+ unsigned int max_bufsize;
+ unsigned int buf_write_count;
+ unsigned int buf_write_alloc_count;
+ unsigned int buf_read_count;
+ unsigned int buf_read_alloc_count;
+ unsigned int buf_write_ptr;
+ unsigned int buf_read_ptr;
+ unsigned int cur_chan;
+ unsigned int scans_done;
+ unsigned int scan_progress;
+ unsigned int munge_chan;
+ unsigned int munge_count;
+ unsigned int munge_ptr;
+ unsigned int events;
+ struct comedi_cmd cmd;
+ wait_queue_head_t wait_head;
+ struct completion run_complete;
+ refcount_t run_active;
+ unsigned int cb_mask;
+ int (*inttrig)(struct comedi_device *dev, struct comedi_subdevice *s,
+ unsigned int x);
+};
+
+/**
+ * enum comedi_cb - &struct comedi_async callback "events"
+ * @COMEDI_CB_EOS: end-of-scan
+ * @COMEDI_CB_EOA: end-of-acquisition/output
+ * @COMEDI_CB_BLOCK: data has arrived, wakes up read() / write()
+ * @COMEDI_CB_EOBUF: DEPRECATED: end of buffer
+ * @COMEDI_CB_ERROR: card error during acquisition
+ * @COMEDI_CB_OVERFLOW: buffer overflow/underflow
+ * @COMEDI_CB_ERROR_MASK: events that indicate an error has occurred
+ * @COMEDI_CB_CANCEL_MASK: events that will cancel an async command
+ */
+enum comedi_cb {
+ COMEDI_CB_EOS = BIT(0),
+ COMEDI_CB_EOA = BIT(1),
+ COMEDI_CB_BLOCK = BIT(2),
+ COMEDI_CB_EOBUF = BIT(3),
+ COMEDI_CB_ERROR = BIT(4),
+ COMEDI_CB_OVERFLOW = BIT(5),
+ /* masks */
+ COMEDI_CB_ERROR_MASK = (COMEDI_CB_ERROR | COMEDI_CB_OVERFLOW),
+ COMEDI_CB_CANCEL_MASK = (COMEDI_CB_EOA | COMEDI_CB_ERROR_MASK)
+};
+
+/**
+ * struct comedi_driver - COMEDI driver registration
+ * @driver_name: Name of driver.
+ * @module: Owning module.
+ * @attach: The optional "attach" handler for manually configured COMEDI
+ * devices.
+ * @detach: The "detach" handler for deconfiguring COMEDI devices.
+ * @auto_attach: The optional "auto_attach" handler for automatically
+ * configured COMEDI devices.
+ * @num_names: Optional number of "board names" supported.
+ * @board_name: Optional pointer to a pointer to a board name. The pointer
+ * to a board name is embedded in an element of a driver-defined array
+ * of static, read-only board type information.
+ * @offset: Optional size of each element of the driver-defined array of
+ * static, read-only board type information, i.e. the offset between each
+ * pointer to a board name.
+ *
+ * This is used with comedi_driver_register() and comedi_driver_unregister() to
+ * register and unregister a low-level COMEDI driver with the COMEDI core.
+ *
+ * If @num_names is non-zero, @board_name should be non-NULL, and @offset
+ * should be at least sizeof(*board_name). These are used by the handler for
+ * the %COMEDI_DEVCONFIG ioctl to match a hardware device and its driver by
+ * board name. If @num_names is zero, the %COMEDI_DEVCONFIG ioctl matches a
+ * hardware device and its driver by driver name. This is only useful if the
+ * @attach handler is set. If @num_names is non-zero, the driver's @attach
+ * handler will be called with the COMEDI device structure's board_ptr member
+ * pointing to the matched pointer to a board name within the driver's private
+ * array of static, read-only board type information.
+ *
+ * The @detach handler has two roles. If a COMEDI device was successfully
+ * configured by the @attach or @auto_attach handler, it is called when the
+ * device is being deconfigured (by the %COMEDI_DEVCONFIG ioctl, or due to
+ * unloading of the driver, or due to device removal). It is also called when
+ * the @attach or @auto_attach handler returns an error. Therefore, the
+ * @attach or @auto_attach handlers can defer clean-up on error until the
+ * @detach handler is called. If the @attach or @auto_attach handlers free
+ * any resources themselves, they must prevent the @detach handler from
+ * freeing the same resources. The @detach handler must not assume that all
+ * resources requested by the @attach or @auto_attach handler were
+ * successfully allocated.
+ */
+struct comedi_driver {
+ /* private: */
+ struct comedi_driver *next; /* Next in list of COMEDI drivers. */
+ /* public: */
+ const char *driver_name;
+ struct module *module;
+ int (*attach)(struct comedi_device *dev, struct comedi_devconfig *it);
+ void (*detach)(struct comedi_device *dev);
+ int (*auto_attach)(struct comedi_device *dev, unsigned long context);
+ unsigned int num_names;
+ const char *const *board_name;
+ int offset;
+};
+
+/**
+ * struct comedi_device - Working data for a COMEDI device
+ * @use_count: Number of open file objects.
+ * @driver: Low-level COMEDI driver attached to this COMEDI device.
+ * @pacer: Optional pointer to a dynamically allocated acquisition pacer
+ * control. It is freed automatically after the COMEDI device is
+ * detached from the low-level driver.
+ * @private: Optional pointer to private data allocated by the low-level
+ * driver. It is freed automatically after the COMEDI device is
+ * detached from the low-level driver.
+ * @class_dev: Sysfs comediX device.
+ * @minor: Minor device number of COMEDI char device (0-47).
+ * @detach_count: Counter incremented every time the COMEDI device is detached.
+ * Used for checking a previous attachment is still valid.
+ * @hw_dev: Optional pointer to the low-level hardware &struct device. It is
+ * required for automatically configured COMEDI devices and optional for
+ * COMEDI devices configured by the %COMEDI_DEVCONFIG ioctl, although
+ * the bus-specific COMEDI functions only work if it is set correctly.
+ * It is also passed to dma_alloc_coherent() for COMEDI subdevices that
+ * have their 'async_dma_dir' member set to something other than
+ * %DMA_NONE.
+ * @board_name: Pointer to a COMEDI board name or a COMEDI driver name. When
+ * the low-level driver's "attach" handler is called by the handler for
+ * the %COMEDI_DEVCONFIG ioctl, it either points to a matched board name
+ * string if the 'num_names' member of the &struct comedi_driver is
+ * non-zero, otherwise it points to the low-level driver name string.
+ * When the low-lever driver's "auto_attach" handler is called for an
+ * automatically configured COMEDI device, it points to the low-level
+ * driver name string. The low-level driver is free to change it in its
+ * "attach" or "auto_attach" handler if it wishes.
+ * @board_ptr: Optional pointer to private, read-only board type information in
+ * the low-level driver. If the 'num_names' member of the &struct
+ * comedi_driver is non-zero, the handler for the %COMEDI_DEVCONFIG ioctl
+ * will point it to a pointer to a matched board name string within the
+ * driver's private array of static, read-only board type information when
+ * calling the driver's "attach" handler. The low-level driver is free to
+ * change it.
+ * @attached: Flag indicating that the COMEDI device is attached to a low-level
+ * driver.
+ * @ioenabled: Flag used to indicate that a PCI device has been enabled and
+ * its regions requested.
+ * @spinlock: Generic spin-lock for use by the low-level driver.
+ * @mutex: Generic mutex for use by the COMEDI core module.
+ * @attach_lock: &struct rw_semaphore used to guard against the COMEDI device
+ * being detached while an operation is in progress. The down_write()
+ * operation is only allowed while @mutex is held and is used when
+ * changing @attached and @detach_count and calling the low-level driver's
+ * "detach" handler. The down_read() operation is generally used without
+ * holding @mutex.
+ * @refcount: &struct kref reference counter for freeing COMEDI device.
+ * @n_subdevices: Number of COMEDI subdevices allocated by the low-level
+ * driver for this device.
+ * @subdevices: Dynamically allocated array of COMEDI subdevices.
+ * @mmio: Optional pointer to a remapped MMIO region set by the low-level
+ * driver.
+ * @iobase: Optional base of an I/O port region requested by the low-level
+ * driver.
+ * @iolen: Length of I/O port region requested at @iobase.
+ * @irq: Optional IRQ number requested by the low-level driver.
+ * @read_subdev: Optional pointer to a default COMEDI subdevice operated on by
+ * the read() file operation. Set by the low-level driver.
+ * @write_subdev: Optional pointer to a default COMEDI subdevice operated on by
+ * the write() file operation. Set by the low-level driver.
+ * @async_queue: Storage for fasync_helper().
+ * @open: Optional pointer to a function set by the low-level driver to be
+ * called when @use_count changes from 0 to 1.
+ * @close: Optional pointer to a function set by the low-level driver to be
+ * called when @use_count changed from 1 to 0.
+ * @insn_device_config: Optional pointer to a handler for all sub-instructions
+ * except %INSN_DEVICE_CONFIG_GET_ROUTES of the %INSN_DEVICE_CONFIG
+ * instruction. If this is not initialized by the low-level driver, a
+ * default handler will be set during post-configuration.
+ * @get_valid_routes: Optional pointer to a handler for the
+ * %INSN_DEVICE_CONFIG_GET_ROUTES sub-instruction of the
+ * %INSN_DEVICE_CONFIG instruction set. If this is not initialized by the
+ * low-level driver, a default handler that copies zero routes back to the
+ * user will be used.
+ *
+ * This is the main control data structure for a COMEDI device (as far as the
+ * COMEDI core is concerned). There are two groups of COMEDI devices -
+ * "legacy" devices that are configured by the handler for the
+ * %COMEDI_DEVCONFIG ioctl, and automatically configured devices resulting
+ * from a call to comedi_auto_config() as a result of a bus driver probe in
+ * a low-level COMEDI driver. The "legacy" COMEDI devices are allocated
+ * during module initialization if the "comedi_num_legacy_minors" module
+ * parameter is non-zero and use minor device numbers from 0 to
+ * comedi_num_legacy_minors minus one. The automatically configured COMEDI
+ * devices are allocated on demand and use minor device numbers from
+ * comedi_num_legacy_minors to 47.
+ */
+struct comedi_device {
+ int use_count;
+ struct comedi_driver *driver;
+ struct comedi_8254 *pacer;
+ void *private;
+
+ struct device *class_dev;
+ int minor;
+ unsigned int detach_count;
+ struct device *hw_dev;
+
+ const char *board_name;
+ const void *board_ptr;
+ unsigned int attached:1;
+ unsigned int ioenabled:1;
+ spinlock_t spinlock; /* generic spin-lock for low-level driver */
+ struct mutex mutex; /* generic mutex for COMEDI core */
+ struct rw_semaphore attach_lock;
+ struct kref refcount;
+
+ int n_subdevices;
+ struct comedi_subdevice *subdevices;
+
+ /* dumb */
+ void __iomem *mmio;
+ unsigned long iobase;
+ unsigned long iolen;
+ unsigned int irq;
+
+ struct comedi_subdevice *read_subdev;
+ struct comedi_subdevice *write_subdev;
+
+ struct fasync_struct *async_queue;
+
+ int (*open)(struct comedi_device *dev);
+ void (*close)(struct comedi_device *dev);
+ int (*insn_device_config)(struct comedi_device *dev,
+ struct comedi_insn *insn, unsigned int *data);
+ unsigned int (*get_valid_routes)(struct comedi_device *dev,
+ unsigned int n_pairs,
+ unsigned int *pair_data);
+};
+
+/*
+ * function prototypes
+ */
+
+void comedi_event(struct comedi_device *dev, struct comedi_subdevice *s);
+
+struct comedi_device *comedi_dev_get_from_minor(unsigned int minor);
+int comedi_dev_put(struct comedi_device *dev);
+
+bool comedi_is_subdevice_running(struct comedi_subdevice *s);
+bool comedi_get_is_subdevice_running(struct comedi_subdevice *s);
+void comedi_put_is_subdevice_running(struct comedi_subdevice *s);
+
+void *comedi_alloc_spriv(struct comedi_subdevice *s, size_t size);
+void comedi_set_spriv_auto_free(struct comedi_subdevice *s);
+
+int comedi_check_chanlist(struct comedi_subdevice *s,
+ int n,
+ unsigned int *chanlist);
+
+/* range stuff */
+
+#define RANGE(a, b) {(a) * 1e6, (b) * 1e6, 0}
+#define RANGE_ext(a, b) {(a) * 1e6, (b) * 1e6, RF_EXTERNAL}
+#define RANGE_mA(a, b) {(a) * 1e6, (b) * 1e6, UNIT_mA}
+#define RANGE_unitless(a, b) {(a) * 1e6, (b) * 1e6, 0}
+#define BIP_RANGE(a) {-(a) * 1e6, (a) * 1e6, 0}
+#define UNI_RANGE(a) {0, (a) * 1e6, 0}
+
+extern const struct comedi_lrange range_bipolar10;
+extern const struct comedi_lrange range_bipolar5;
+extern const struct comedi_lrange range_bipolar2_5;
+extern const struct comedi_lrange range_unipolar10;
+extern const struct comedi_lrange range_unipolar5;
+extern const struct comedi_lrange range_unipolar2_5;
+extern const struct comedi_lrange range_0_20mA;
+extern const struct comedi_lrange range_4_20mA;
+extern const struct comedi_lrange range_0_32mA;
+extern const struct comedi_lrange range_unknown;
+
+#define range_digital range_unipolar5
+
+/**
+ * struct comedi_lrange - Describes a COMEDI range table
+ * @length: Number of entries in the range table.
+ * @range: Array of &struct comedi_krange, one for each range.
+ *
+ * Each element of @range[] describes the minimum and maximum physical range
+ * and the type of units. Typically, the type of unit is %UNIT_volt
+ * (i.e. volts) and the minimum and maximum are in millionths of a volt.
+ * There may also be a flag that indicates the minimum and maximum are merely
+ * scale factors for an unknown, external reference.
+ */
+struct comedi_lrange {
+ int length;
+ struct comedi_krange range[] __counted_by(length);
+};
+
+/**
+ * comedi_range_is_bipolar() - Test if subdevice range is bipolar
+ * @s: COMEDI subdevice.
+ * @range: Index of range within a range table.
+ *
+ * Tests whether a range is bipolar by checking whether its minimum value
+ * is negative.
+ *
+ * Assumes @range is valid. Does not work for subdevices using a
+ * channel-specific range table list.
+ *
+ * Return:
+ * %true if the range is bipolar.
+ * %false if the range is unipolar.
+ */
+static inline bool comedi_range_is_bipolar(struct comedi_subdevice *s,
+ unsigned int range)
+{
+ return s->range_table->range[range].min < 0;
+}
+
+/**
+ * comedi_range_is_unipolar() - Test if subdevice range is unipolar
+ * @s: COMEDI subdevice.
+ * @range: Index of range within a range table.
+ *
+ * Tests whether a range is unipolar by checking whether its minimum value
+ * is at least 0.
+ *
+ * Assumes @range is valid. Does not work for subdevices using a
+ * channel-specific range table list.
+ *
+ * Return:
+ * %true if the range is unipolar.
+ * %false if the range is bipolar.
+ */
+static inline bool comedi_range_is_unipolar(struct comedi_subdevice *s,
+ unsigned int range)
+{
+ return s->range_table->range[range].min >= 0;
+}
+
+/**
+ * comedi_range_is_external() - Test if subdevice range is external
+ * @s: COMEDI subdevice.
+ * @range: Index of range within a range table.
+ *
+ * Tests whether a range is externally reference by checking whether its
+ * %RF_EXTERNAL flag is set.
+ *
+ * Assumes @range is valid. Does not work for subdevices using a
+ * channel-specific range table list.
+ *
+ * Return:
+ * %true if the range is external.
+ * %false if the range is internal.
+ */
+static inline bool comedi_range_is_external(struct comedi_subdevice *s,
+ unsigned int range)
+{
+ return !!(s->range_table->range[range].flags & RF_EXTERNAL);
+}
+
+/**
+ * comedi_chan_range_is_bipolar() - Test if channel-specific range is bipolar
+ * @s: COMEDI subdevice.
+ * @chan: The channel number.
+ * @range: Index of range within a range table.
+ *
+ * Tests whether a range is bipolar by checking whether its minimum value
+ * is negative.
+ *
+ * Assumes @chan and @range are valid. Only works for subdevices with a
+ * channel-specific range table list.
+ *
+ * Return:
+ * %true if the range is bipolar.
+ * %false if the range is unipolar.
+ */
+static inline bool comedi_chan_range_is_bipolar(struct comedi_subdevice *s,
+ unsigned int chan,
+ unsigned int range)
+{
+ return s->range_table_list[chan]->range[range].min < 0;
+}
+
+/**
+ * comedi_chan_range_is_unipolar() - Test if channel-specific range is unipolar
+ * @s: COMEDI subdevice.
+ * @chan: The channel number.
+ * @range: Index of range within a range table.
+ *
+ * Tests whether a range is unipolar by checking whether its minimum value
+ * is at least 0.
+ *
+ * Assumes @chan and @range are valid. Only works for subdevices with a
+ * channel-specific range table list.
+ *
+ * Return:
+ * %true if the range is unipolar.
+ * %false if the range is bipolar.
+ */
+static inline bool comedi_chan_range_is_unipolar(struct comedi_subdevice *s,
+ unsigned int chan,
+ unsigned int range)
+{
+ return s->range_table_list[chan]->range[range].min >= 0;
+}
+
+/**
+ * comedi_chan_range_is_external() - Test if channel-specific range is external
+ * @s: COMEDI subdevice.
+ * @chan: The channel number.
+ * @range: Index of range within a range table.
+ *
+ * Tests whether a range is externally reference by checking whether its
+ * %RF_EXTERNAL flag is set.
+ *
+ * Assumes @chan and @range are valid. Only works for subdevices with a
+ * channel-specific range table list.
+ *
+ * Return:
+ * %true if the range is bipolar.
+ * %false if the range is unipolar.
+ */
+static inline bool comedi_chan_range_is_external(struct comedi_subdevice *s,
+ unsigned int chan,
+ unsigned int range)
+{
+ return !!(s->range_table_list[chan]->range[range].flags & RF_EXTERNAL);
+}
+
+/**
+ * comedi_offset_munge() - Convert between offset binary and 2's complement
+ * @s: COMEDI subdevice.
+ * @val: Value to be converted.
+ *
+ * Toggles the highest bit of a sample value to toggle between offset binary
+ * and 2's complement. Assumes that @s->maxdata is a power of 2 minus 1.
+ *
+ * Return: The converted value.
+ */
+static inline unsigned int comedi_offset_munge(struct comedi_subdevice *s,
+ unsigned int val)
+{
+ return val ^ s->maxdata ^ (s->maxdata >> 1);
+}
+
+/**
+ * comedi_bytes_per_sample() - Determine subdevice sample size
+ * @s: COMEDI subdevice.
+ *
+ * The sample size will be 4 (sizeof int) or 2 (sizeof short) depending on
+ * whether the %SDF_LSAMPL subdevice flag is set or not.
+ *
+ * Return: The subdevice sample size.
+ */
+static inline unsigned int comedi_bytes_per_sample(struct comedi_subdevice *s)
+{
+ return s->subdev_flags & SDF_LSAMPL ? sizeof(int) : sizeof(short);
+}
+
+/**
+ * comedi_sample_shift() - Determine log2 of subdevice sample size
+ * @s: COMEDI subdevice.
+ *
+ * The sample size will be 4 (sizeof int) or 2 (sizeof short) depending on
+ * whether the %SDF_LSAMPL subdevice flag is set or not. The log2 of the
+ * sample size will be 2 or 1 and can be used as the right operand of a
+ * bit-shift operator to multiply or divide something by the sample size.
+ *
+ * Return: log2 of the subdevice sample size.
+ */
+static inline unsigned int comedi_sample_shift(struct comedi_subdevice *s)
+{
+ return s->subdev_flags & SDF_LSAMPL ? 2 : 1;
+}
+
+/**
+ * comedi_bytes_to_samples() - Convert a number of bytes to a number of samples
+ * @s: COMEDI subdevice.
+ * @nbytes: Number of bytes
+ *
+ * Return: The number of bytes divided by the subdevice sample size.
+ */
+static inline unsigned int comedi_bytes_to_samples(struct comedi_subdevice *s,
+ unsigned int nbytes)
+{
+ return nbytes >> comedi_sample_shift(s);
+}
+
+/**
+ * comedi_samples_to_bytes() - Convert a number of samples to a number of bytes
+ * @s: COMEDI subdevice.
+ * @nsamples: Number of samples.
+ *
+ * Return: The number of samples multiplied by the subdevice sample size.
+ * (Does not check for arithmetic overflow.)
+ */
+static inline unsigned int comedi_samples_to_bytes(struct comedi_subdevice *s,
+ unsigned int nsamples)
+{
+ return nsamples << comedi_sample_shift(s);
+}
+
+/**
+ * comedi_check_trigger_src() - Trivially validate a comedi_cmd trigger source
+ * @src: Pointer to the trigger source to validate.
+ * @flags: Bitmask of valid %TRIG_* for the trigger.
+ *
+ * This is used in "step 1" of the do_cmdtest functions of comedi drivers
+ * to validate the comedi_cmd triggers. The mask of the @src against the
+ * @flags allows the userspace comedilib to pass all the comedi_cmd
+ * triggers as %TRIG_ANY and get back a bitmask of the valid trigger sources.
+ *
+ * Return:
+ * 0 if trigger sources in *@src are all supported.
+ * -EINVAL if any trigger source in *@src is unsupported.
+ */
+static inline int comedi_check_trigger_src(unsigned int *src,
+ unsigned int flags)
+{
+ unsigned int orig_src = *src;
+
+ *src = orig_src & flags;
+ if (*src == TRIG_INVALID || *src != orig_src)
+ return -EINVAL;
+ return 0;
+}
+
+/**
+ * comedi_check_trigger_is_unique() - Make sure a trigger source is unique
+ * @src: The trigger source to check.
+ *
+ * Return:
+ * 0 if no more than one trigger source is set.
+ * -EINVAL if more than one trigger source is set.
+ */
+static inline int comedi_check_trigger_is_unique(unsigned int src)
+{
+ /* this test is true if more than one _src bit is set */
+ if ((src & (src - 1)) != 0)
+ return -EINVAL;
+ return 0;
+}
+
+/**
+ * comedi_check_trigger_arg_is() - Trivially validate a trigger argument
+ * @arg: Pointer to the trigger arg to validate.
+ * @val: The value the argument should be.
+ *
+ * Forces *@arg to be @val.
+ *
+ * Return:
+ * 0 if *@arg was already @val.
+ * -EINVAL if *@arg differed from @val.
+ */
+static inline int comedi_check_trigger_arg_is(unsigned int *arg,
+ unsigned int val)
+{
+ if (*arg != val) {
+ *arg = val;
+ return -EINVAL;
+ }
+ return 0;
+}
+
+/**
+ * comedi_check_trigger_arg_min() - Trivially validate a trigger argument min
+ * @arg: Pointer to the trigger arg to validate.
+ * @val: The minimum value the argument should be.
+ *
+ * Forces *@arg to be at least @val, setting it to @val if necessary.
+ *
+ * Return:
+ * 0 if *@arg was already at least @val.
+ * -EINVAL if *@arg was less than @val.
+ */
+static inline int comedi_check_trigger_arg_min(unsigned int *arg,
+ unsigned int val)
+{
+ if (*arg < val) {
+ *arg = val;
+ return -EINVAL;
+ }
+ return 0;
+}
+
+/**
+ * comedi_check_trigger_arg_max() - Trivially validate a trigger argument max
+ * @arg: Pointer to the trigger arg to validate.
+ * @val: The maximum value the argument should be.
+ *
+ * Forces *@arg to be no more than @val, setting it to @val if necessary.
+ *
+ * Return:
+ * 0 if*@arg was already no more than @val.
+ * -EINVAL if *@arg was greater than @val.
+ */
+static inline int comedi_check_trigger_arg_max(unsigned int *arg,
+ unsigned int val)
+{
+ if (*arg > val) {
+ *arg = val;
+ return -EINVAL;
+ }
+ return 0;
+}
+
+/*
+ * Must set dev->hw_dev if you wish to dma directly into comedi's buffer.
+ * Also useful for retrieving a previously configured hardware device of
+ * known bus type. Set automatically for auto-configured devices.
+ * Automatically set to NULL when detaching hardware device.
+ */
+int comedi_set_hw_dev(struct comedi_device *dev, struct device *hw_dev);
+
+/**
+ * comedi_buf_n_bytes_ready - Determine amount of unread data in buffer
+ * @s: COMEDI subdevice.
+ *
+ * Determines the number of bytes of unread data in the asynchronous
+ * acquisition data buffer for a subdevice. The data in question might not
+ * have been fully "munged" yet.
+ *
+ * Returns: The amount of unread data in bytes.
+ */
+static inline unsigned int comedi_buf_n_bytes_ready(struct comedi_subdevice *s)
+{
+ return s->async->buf_write_count - s->async->buf_read_count;
+}
+
+unsigned int comedi_buf_write_alloc(struct comedi_subdevice *s, unsigned int n);
+unsigned int comedi_buf_write_free(struct comedi_subdevice *s, unsigned int n);
+
+unsigned int comedi_buf_read_n_available(struct comedi_subdevice *s);
+unsigned int comedi_buf_read_alloc(struct comedi_subdevice *s, unsigned int n);
+unsigned int comedi_buf_read_free(struct comedi_subdevice *s, unsigned int n);
+
+unsigned int comedi_buf_write_samples(struct comedi_subdevice *s,
+ const void *data, unsigned int nsamples);
+unsigned int comedi_buf_read_samples(struct comedi_subdevice *s,
+ void *data, unsigned int nsamples);
+
+/* drivers.c - general comedi driver functions */
+
+#define COMEDI_TIMEOUT_MS 1000
+
+int comedi_timeout(struct comedi_device *dev, struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ int (*cb)(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn, unsigned long context),
+ unsigned long context);
+
+unsigned int comedi_handle_events(struct comedi_device *dev,
+ struct comedi_subdevice *s);
+
+int comedi_dio_insn_config(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn, unsigned int *data,
+ unsigned int mask);
+unsigned int comedi_dio_update_state(struct comedi_subdevice *s,
+ unsigned int *data);
+unsigned int comedi_bytes_per_scan_cmd(struct comedi_subdevice *s,
+ struct comedi_cmd *cmd);
+unsigned int comedi_bytes_per_scan(struct comedi_subdevice *s);
+unsigned int comedi_nscans_left(struct comedi_subdevice *s,
+ unsigned int nscans);
+unsigned int comedi_nsamples_left(struct comedi_subdevice *s,
+ unsigned int nsamples);
+void comedi_inc_scan_progress(struct comedi_subdevice *s,
+ unsigned int num_bytes);
+
+void *comedi_alloc_devpriv(struct comedi_device *dev, size_t size);
+int comedi_alloc_subdevices(struct comedi_device *dev, int num_subdevices);
+int comedi_alloc_subdev_readback(struct comedi_subdevice *s);
+
+int comedi_readback_insn_read(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn, unsigned int *data);
+
+int comedi_load_firmware(struct comedi_device *dev, struct device *hw_dev,
+ const char *name,
+ int (*cb)(struct comedi_device *dev,
+ const u8 *data, size_t size,
+ unsigned long context),
+ unsigned long context);
+
+int __comedi_request_region(struct comedi_device *dev,
+ unsigned long start, unsigned long len);
+int comedi_request_region(struct comedi_device *dev,
+ unsigned long start, unsigned long len);
+void comedi_legacy_detach(struct comedi_device *dev);
+
+int comedi_auto_config(struct device *hardware_device,
+ struct comedi_driver *driver, unsigned long context);
+void comedi_auto_unconfig(struct device *hardware_device);
+
+int comedi_driver_register(struct comedi_driver *driver);
+void comedi_driver_unregister(struct comedi_driver *driver);
+
+/**
+ * module_comedi_driver() - Helper macro for registering a comedi driver
+ * @__comedi_driver: comedi_driver struct
+ *
+ * Helper macro for comedi drivers which do not do anything special in module
+ * init/exit. This eliminates a lot of boilerplate. Each module may only use
+ * this macro once, and calling it replaces module_init() and module_exit().
+ */
+#define module_comedi_driver(__comedi_driver) \
+ module_driver(__comedi_driver, comedi_driver_register, \
+ comedi_driver_unregister)
+
+#endif /* _COMEDIDEV_H */
diff --git a/include/linux/comedi/comedilib.h b/include/linux/comedi/comedilib.h
new file mode 100644
index 000000000000..1f2b22b383cc
--- /dev/null
+++ b/include/linux/comedi/comedilib.h
@@ -0,0 +1,56 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * comedilib.h
+ * Header file for kcomedilib
+ *
+ * COMEDI - Linux Control and Measurement Device Interface
+ * Copyright (C) 1998-2001 David A. Schleef <ds@schleef.org>
+ */
+
+#ifndef _LINUX_COMEDILIB_H
+#define _LINUX_COMEDILIB_H
+
+struct comedi_device *comedi_open_from(const char *path, int from);
+
+/**
+ * comedi_open() - Open a COMEDI device from the kernel
+ * @filename: Fake pathname of the form "/dev/comediN".
+ *
+ * Converts @filename to a COMEDI device number and "opens" it if it exists
+ * and is attached to a low-level COMEDI driver.
+ *
+ * Return: A pointer to the COMEDI device on success.
+ * Return %NULL on failure.
+ */
+static inline struct comedi_device *comedi_open(const char *path)
+{
+ return comedi_open_from(path, -1);
+}
+
+int comedi_close_from(struct comedi_device *dev, int from);
+
+/**
+ * comedi_close() - Close a COMEDI device from the kernel
+ * @dev: COMEDI device.
+ *
+ * Closes a COMEDI device previously opened by comedi_open().
+ *
+ * Returns: 0
+ */
+static inline int comedi_close(struct comedi_device *dev)
+{
+ return comedi_close_from(dev, -1);
+}
+
+int comedi_dio_get_config(struct comedi_device *dev, unsigned int subdev,
+ unsigned int chan, unsigned int *io);
+int comedi_dio_config(struct comedi_device *dev, unsigned int subdev,
+ unsigned int chan, unsigned int io);
+int comedi_dio_bitfield2(struct comedi_device *dev, unsigned int subdev,
+ unsigned int mask, unsigned int *bits,
+ unsigned int base_channel);
+int comedi_find_subdevice_by_type(struct comedi_device *dev, int type,
+ unsigned int subd);
+int comedi_get_n_channels(struct comedi_device *dev, unsigned int subdevice);
+
+#endif
diff --git a/include/linux/compaction.h b/include/linux/compaction.h
index 0d8415820fc3..173d9c07a895 100644
--- a/include/linux/compaction.h
+++ b/include/linux/compaction.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_COMPACTION_H
#define _LINUX_COMPACTION_H
@@ -28,21 +29,18 @@ enum compact_result {
/* compaction didn't start as it was deferred due to past failures */
COMPACT_DEFERRED,
- /* compaction not active last round */
- COMPACT_INACTIVE = COMPACT_DEFERRED,
-
/* For more detailed tracepoint output - internal to compaction */
COMPACT_NO_SUITABLE_PAGE,
/* compaction should continue to another pageblock */
COMPACT_CONTINUE,
/*
- * The full zone was compacted scanned but wasn't successfull to compact
+ * The full zone was compacted scanned but wasn't successful to compact
* suitable pages.
*/
COMPACT_COMPLETE,
/*
- * direct compaction has scanned part of the zone but wasn't successfull
+ * direct compaction has scanned part of the zone but wasn't successful
* to compact suitable pages.
*/
COMPACT_PARTIAL_SKIPPED,
@@ -82,152 +80,61 @@ static inline unsigned long compact_gap(unsigned int order)
return 2UL << order;
}
+static inline int current_is_kcompactd(void)
+{
+ return current->flags & PF_KCOMPACTD;
+}
+
#ifdef CONFIG_COMPACTION
-extern int sysctl_compact_memory;
-extern int sysctl_compaction_handler(struct ctl_table *table, int write,
- void __user *buffer, size_t *length, loff_t *ppos);
-extern int sysctl_extfrag_threshold;
-extern int sysctl_extfrag_handler(struct ctl_table *table, int write,
- void __user *buffer, size_t *length, loff_t *ppos);
-extern int sysctl_compact_unevictable_allowed;
+extern unsigned int extfrag_for_order(struct zone *zone, unsigned int order);
extern int fragmentation_index(struct zone *zone, unsigned int order);
extern enum compact_result try_to_compact_pages(gfp_t gfp_mask,
unsigned int order, unsigned int alloc_flags,
- const struct alloc_context *ac, enum compact_priority prio);
+ const struct alloc_context *ac, enum compact_priority prio,
+ struct page **page);
extern void reset_isolation_suitable(pg_data_t *pgdat);
-extern enum compact_result compaction_suitable(struct zone *zone, int order,
- unsigned int alloc_flags, int classzone_idx);
+extern bool compaction_suitable(struct zone *zone, int order,
+ unsigned long watermark, int highest_zoneidx);
-extern void defer_compaction(struct zone *zone, int order);
-extern bool compaction_deferred(struct zone *zone, int order);
extern void compaction_defer_reset(struct zone *zone, int order,
bool alloc_success);
-extern bool compaction_restarting(struct zone *zone, int order);
-
-/* Compaction has made some progress and retrying makes sense */
-static inline bool compaction_made_progress(enum compact_result result)
-{
- /*
- * Even though this might sound confusing this in fact tells us
- * that the compaction successfully isolated and migrated some
- * pageblocks.
- */
- if (result == COMPACT_SUCCESS)
- return true;
-
- return false;
-}
-
-/* Compaction has failed and it doesn't make much sense to keep retrying. */
-static inline bool compaction_failed(enum compact_result result)
-{
- /* All zones were scanned completely and still not result. */
- if (result == COMPACT_COMPLETE)
- return true;
-
- return false;
-}
-
-/*
- * Compaction has backed off for some reason. It might be throttling or
- * lock contention. Retrying is still worthwhile.
- */
-static inline bool compaction_withdrawn(enum compact_result result)
-{
- /*
- * Compaction backed off due to watermark checks for order-0
- * so the regular reclaim has to try harder and reclaim something.
- */
- if (result == COMPACT_SKIPPED)
- return true;
-
- /*
- * If compaction is deferred for high-order allocations, it is
- * because sync compaction recently failed. If this is the case
- * and the caller requested a THP allocation, we do not want
- * to heavily disrupt the system, so we fail the allocation
- * instead of entering direct reclaim.
- */
- if (result == COMPACT_DEFERRED)
- return true;
-
- /*
- * If compaction in async mode encounters contention or blocks higher
- * priority task we back off early rather than cause stalls.
- */
- if (result == COMPACT_CONTENDED)
- return true;
-
- /*
- * Page scanners have met but we haven't scanned full zones so this
- * is a back off in fact.
- */
- if (result == COMPACT_PARTIAL_SKIPPED)
- return true;
-
- return false;
-}
-
bool compaction_zonelist_suitable(struct alloc_context *ac, int order,
int alloc_flags);
-extern int kcompactd_run(int nid);
-extern void kcompactd_stop(int nid);
-extern void wakeup_kcompactd(pg_data_t *pgdat, int order, int classzone_idx);
+extern void __meminit kcompactd_run(int nid);
+extern void __meminit kcompactd_stop(int nid);
+extern void wakeup_kcompactd(pg_data_t *pgdat, int order, int highest_zoneidx);
#else
static inline void reset_isolation_suitable(pg_data_t *pgdat)
{
}
-static inline enum compact_result compaction_suitable(struct zone *zone, int order,
- int alloc_flags, int classzone_idx)
-{
- return COMPACT_SKIPPED;
-}
-
-static inline void defer_compaction(struct zone *zone, int order)
-{
-}
-
-static inline bool compaction_deferred(struct zone *zone, int order)
-{
- return true;
-}
-
-static inline bool compaction_made_progress(enum compact_result result)
-{
- return false;
-}
-
-static inline bool compaction_failed(enum compact_result result)
+static inline bool compaction_suitable(struct zone *zone, int order,
+ unsigned long watermark,
+ int highest_zoneidx)
{
return false;
}
-static inline bool compaction_withdrawn(enum compact_result result)
+static inline void kcompactd_run(int nid)
{
- return true;
-}
-
-static inline int kcompactd_run(int nid)
-{
- return 0;
}
static inline void kcompactd_stop(int nid)
{
}
-static inline void wakeup_kcompactd(pg_data_t *pgdat, int order, int classzone_idx)
+static inline void wakeup_kcompactd(pg_data_t *pgdat,
+ int order, int highest_zoneidx)
{
}
#endif /* CONFIG_COMPACTION */
-#if defined(CONFIG_COMPACTION) && defined(CONFIG_SYSFS) && defined(CONFIG_NUMA)
struct node;
+#if defined(CONFIG_COMPACTION) && defined(CONFIG_SYSFS) && defined(CONFIG_NUMA)
extern int compaction_register_node(struct node *node);
extern void compaction_unregister_node(struct node *node);
diff --git a/include/linux/compat.h b/include/linux/compat.h
index 3fc433303d7a..56cebaff0c91 100644
--- a/include/linux/compat.h
+++ b/include/linux/compat.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_COMPAT_H
#define _LINUX_COMPAT_H
/*
@@ -6,8 +7,7 @@
*/
#include <linux/types.h>
-
-#ifdef CONFIG_COMPAT
+#include <linux/time.h>
#include <linux/stat.h>
#include <linux/param.h> /* for HZ */
@@ -16,12 +16,24 @@
#include <linux/if.h>
#include <linux/fs.h>
#include <linux/aio_abi.h> /* for aio_context_t */
+#include <linux/uaccess.h>
#include <linux/unistd.h>
#include <asm/compat.h>
#include <asm/siginfo.h>
#include <asm/signal.h>
+#ifdef CONFIG_ARCH_HAS_SYSCALL_WRAPPER
+/*
+ * It may be useful for an architecture to override the definitions of the
+ * COMPAT_SYSCALL_DEFINE0 and COMPAT_SYSCALL_DEFINEx() macros, in particular
+ * to use a different calling convention for syscalls. To allow for that,
+ + the prototypes for the compat_sys_*() functions below will *not* be included
+ * if CONFIG_ARCH_HAS_SYSCALL_WRAPPER is enabled.
+ */
+#include <asm/syscall_wrapper.h>
+#endif /* CONFIG_ARCH_HAS_SYSCALL_WRAPPER */
+
#ifndef COMPAT_USE_64BIT_TIME
#define COMPAT_USE_64BIT_TIME 0
#endif
@@ -30,8 +42,12 @@
#define __SC_DELOUSE(t,v) ((__force t)(unsigned long)(v))
#endif
+#ifndef COMPAT_SYSCALL_DEFINE0
#define COMPAT_SYSCALL_DEFINE0(name) \
+ asmlinkage long compat_sys_##name(void); \
+ ALLOW_ERROR_INJECTION(compat_sys_##name, ERRNO); \
asmlinkage long compat_sys_##name(void)
+#endif /* COMPAT_SYSCALL_DEFINE0 */
#define COMPAT_SYSCALL_DEFINE1(name, ...) \
COMPAT_SYSCALL_DEFINEx(1, _##name, __VA_ARGS__)
@@ -46,16 +62,35 @@
#define COMPAT_SYSCALL_DEFINE6(name, ...) \
COMPAT_SYSCALL_DEFINEx(6, _##name, __VA_ARGS__)
-#define COMPAT_SYSCALL_DEFINEx(x, name, ...) \
- asmlinkage long compat_sys##name(__MAP(x,__SC_DECL,__VA_ARGS__))\
- __attribute__((alias(__stringify(compat_SyS##name)))); \
- static inline long C_SYSC##name(__MAP(x,__SC_DECL,__VA_ARGS__));\
- asmlinkage long compat_SyS##name(__MAP(x,__SC_LONG,__VA_ARGS__));\
- asmlinkage long compat_SyS##name(__MAP(x,__SC_LONG,__VA_ARGS__))\
- { \
- return C_SYSC##name(__MAP(x,__SC_DELOUSE,__VA_ARGS__)); \
- } \
- static inline long C_SYSC##name(__MAP(x,__SC_DECL,__VA_ARGS__))
+/*
+ * The asmlinkage stub is aliased to a function named __se_compat_sys_*() which
+ * sign-extends 32-bit ints to longs whenever needed. The actual work is
+ * done within __do_compat_sys_*().
+ */
+#ifndef COMPAT_SYSCALL_DEFINEx
+#define COMPAT_SYSCALL_DEFINEx(x, name, ...) \
+ __diag_push(); \
+ __diag_ignore(GCC, 8, "-Wattribute-alias", \
+ "Type aliasing is used to sanitize syscall arguments");\
+ asmlinkage long compat_sys##name(__MAP(x,__SC_DECL,__VA_ARGS__)) \
+ __attribute__((alias(__stringify(__se_compat_sys##name)))); \
+ ALLOW_ERROR_INJECTION(compat_sys##name, ERRNO); \
+ static inline long __do_compat_sys##name(__MAP(x,__SC_DECL,__VA_ARGS__));\
+ asmlinkage long __se_compat_sys##name(__MAP(x,__SC_LONG,__VA_ARGS__)); \
+ asmlinkage long __se_compat_sys##name(__MAP(x,__SC_LONG,__VA_ARGS__)) \
+ { \
+ long ret = __do_compat_sys##name(__MAP(x,__SC_DELOUSE,__VA_ARGS__));\
+ __MAP(x,__SC_TEST,__VA_ARGS__); \
+ return ret; \
+ } \
+ __diag_pop(); \
+ static inline long __do_compat_sys##name(__MAP(x,__SC_DECL,__VA_ARGS__))
+#endif /* COMPAT_SYSCALL_DEFINEx */
+
+struct compat_iovec {
+ compat_uptr_t iov_base;
+ compat_size_t iov_len;
+};
#ifndef compat_user_stack_pointer
#define compat_user_stack_pointer() current_user_stack_pointer()
@@ -67,6 +102,9 @@ typedef struct compat_sigaltstack {
compat_size_t ss_size;
} compat_stack_t;
#endif
+#ifndef COMPAT_MINSIGSTKSZ
+#define COMPAT_MINSIGSTKSZ MINSIGSTKSZ
+#endif
#define compat_jiffies_to_clock_t(x) \
(((unsigned long)(x) * COMPAT_USER_HZ) / HZ)
@@ -74,29 +112,10 @@ typedef struct compat_sigaltstack {
typedef __compat_uid32_t compat_uid_t;
typedef __compat_gid32_t compat_gid_t;
-typedef compat_ulong_t compat_aio_context_t;
-
struct compat_sel_arg_struct;
struct rusage;
-struct compat_itimerspec {
- struct compat_timespec it_interval;
- struct compat_timespec it_value;
-};
-
-struct compat_utimbuf {
- compat_time_t actime;
- compat_time_t modtime;
-};
-
-struct compat_itimerval {
- struct compat_timeval it_interval;
- struct compat_timeval it_value;
-};
-
-struct itimerval;
-int get_compat_itimerval(struct itimerval *, const struct compat_itimerval __user *);
-int put_compat_itimerval(struct compat_itimerval __user *, const struct itimerval *);
+struct old_itimerval32;
struct compat_tms {
compat_clock_t tms_utime;
@@ -105,43 +124,15 @@ struct compat_tms {
compat_clock_t tms_cstime;
};
-struct compat_timex {
- compat_uint_t modes;
- compat_long_t offset;
- compat_long_t freq;
- compat_long_t maxerror;
- compat_long_t esterror;
- compat_int_t status;
- compat_long_t constant;
- compat_long_t precision;
- compat_long_t tolerance;
- struct compat_timeval time;
- compat_long_t tick;
- compat_long_t ppsfreq;
- compat_long_t jitter;
- compat_int_t shift;
- compat_long_t stabil;
- compat_long_t jitcnt;
- compat_long_t calcnt;
- compat_long_t errcnt;
- compat_long_t stbcnt;
- compat_int_t tai;
-
- compat_int_t:32; compat_int_t:32; compat_int_t:32; compat_int_t:32;
- compat_int_t:32; compat_int_t:32; compat_int_t:32; compat_int_t:32;
- compat_int_t:32; compat_int_t:32; compat_int_t:32;
-};
-
-struct timex;
-int compat_get_timex(struct timex *, const struct compat_timex __user *);
-int compat_put_timex(struct compat_timex __user *, const struct timex *);
-
#define _COMPAT_NSIG_WORDS (_COMPAT_NSIG / _COMPAT_NSIG_BPW)
typedef struct {
compat_sigset_word sig[_COMPAT_NSIG_WORDS];
} compat_sigset_t;
+int set_compat_user_sigmask(const compat_sigset_t __user *umask,
+ size_t sigsetsize);
+
struct compat_sigaction {
#ifndef __ARCH_HAS_IRIX_SIGACTION
compat_uptr_t sa_handler;
@@ -156,43 +147,150 @@ struct compat_sigaction {
compat_sigset_t sa_mask __packed;
};
-/*
- * These functions operate on 32- or 64-bit specs depending on
- * COMPAT_USE_64BIT_TIME, hence the void user pointer arguments.
- */
-extern int compat_get_timespec(struct timespec *, const void __user *);
-extern int compat_put_timespec(const struct timespec *, void __user *);
-extern int compat_get_timeval(struct timeval *, const void __user *);
-extern int compat_put_timeval(const struct timeval *, void __user *);
-extern int compat_get_timespec64(struct timespec64 *, const void __user *);
-extern int compat_put_timespec64(const struct timespec64 *, void __user *);
-extern int get_compat_itimerspec64(struct itimerspec64 *its,
- const struct compat_itimerspec __user *uits);
-extern int put_compat_itimerspec64(const struct itimerspec64 *its,
- struct compat_itimerspec __user *uits);
+typedef union compat_sigval {
+ compat_int_t sival_int;
+ compat_uptr_t sival_ptr;
+} compat_sigval_t;
-/*
- * This function convert a timespec if necessary and returns a *user
- * space* pointer. If no conversion is necessary, it returns the
- * initial pointer. NULL is a legitimate argument and will always
- * output NULL.
- */
-extern int compat_convert_timespec(struct timespec __user **,
- const void __user *);
+typedef struct compat_siginfo {
+ int si_signo;
+#ifndef __ARCH_HAS_SWAPPED_SIGINFO
+ int si_errno;
+ int si_code;
+#else
+ int si_code;
+ int si_errno;
+#endif
-struct compat_iovec {
- compat_uptr_t iov_base;
- compat_size_t iov_len;
-};
+ union {
+ int _pad[128/sizeof(int) - 3];
+
+ /* kill() */
+ struct {
+ compat_pid_t _pid; /* sender's pid */
+ __compat_uid32_t _uid; /* sender's uid */
+ } _kill;
+
+ /* POSIX.1b timers */
+ struct {
+ compat_timer_t _tid; /* timer id */
+ int _overrun; /* overrun count */
+ compat_sigval_t _sigval; /* same as below */
+ } _timer;
+
+ /* POSIX.1b signals */
+ struct {
+ compat_pid_t _pid; /* sender's pid */
+ __compat_uid32_t _uid; /* sender's uid */
+ compat_sigval_t _sigval;
+ } _rt;
+
+ /* SIGCHLD */
+ struct {
+ compat_pid_t _pid; /* which child */
+ __compat_uid32_t _uid; /* sender's uid */
+ int _status; /* exit code */
+ compat_clock_t _utime;
+ compat_clock_t _stime;
+ } _sigchld;
+
+#ifdef CONFIG_X86_X32_ABI
+ /* SIGCHLD (x32 version) */
+ struct {
+ compat_pid_t _pid; /* which child */
+ __compat_uid32_t _uid; /* sender's uid */
+ int _status; /* exit code */
+ compat_s64 _utime;
+ compat_s64 _stime;
+ } _sigchld_x32;
+#endif
+
+ /* SIGILL, SIGFPE, SIGSEGV, SIGBUS, SIGTRAP, SIGEMT */
+ struct {
+ compat_uptr_t _addr; /* faulting insn/memory ref. */
+#define __COMPAT_ADDR_BND_PKEY_PAD (__alignof__(compat_uptr_t) < sizeof(short) ? \
+ sizeof(short) : __alignof__(compat_uptr_t))
+ union {
+ /* used on alpha and sparc */
+ int _trapno; /* TRAP # which caused the signal */
+ /*
+ * used when si_code=BUS_MCEERR_AR or
+ * used when si_code=BUS_MCEERR_AO
+ */
+ short int _addr_lsb; /* Valid LSB of the reported address. */
+ /* used when si_code=SEGV_BNDERR */
+ struct {
+ char _dummy_bnd[__COMPAT_ADDR_BND_PKEY_PAD];
+ compat_uptr_t _lower;
+ compat_uptr_t _upper;
+ } _addr_bnd;
+ /* used when si_code=SEGV_PKUERR */
+ struct {
+ char _dummy_pkey[__COMPAT_ADDR_BND_PKEY_PAD];
+ u32 _pkey;
+ } _addr_pkey;
+ /* used when si_code=TRAP_PERF */
+ struct {
+ compat_ulong_t _data;
+ u32 _type;
+ u32 _flags;
+ } _perf;
+ };
+ } _sigfault;
+
+ /* SIGPOLL */
+ struct {
+ compat_long_t _band; /* POLL_IN, POLL_OUT, POLL_MSG */
+ int _fd;
+ } _sigpoll;
+
+ struct {
+ compat_uptr_t _call_addr; /* calling user insn */
+ int _syscall; /* triggering system call number */
+ unsigned int _arch; /* AUDIT_ARCH_* of syscall */
+ } _sigsys;
+ } _sifields;
+} compat_siginfo_t;
struct compat_rlimit {
compat_ulong_t rlim_cur;
compat_ulong_t rlim_max;
};
+#ifdef __ARCH_NEED_COMPAT_FLOCK64_PACKED
+#define __ARCH_COMPAT_FLOCK64_PACK __attribute__((packed))
+#else
+#define __ARCH_COMPAT_FLOCK64_PACK
+#endif
+
+struct compat_flock {
+ short l_type;
+ short l_whence;
+ compat_off_t l_start;
+ compat_off_t l_len;
+#ifdef __ARCH_COMPAT_FLOCK_EXTRA_SYSID
+ __ARCH_COMPAT_FLOCK_EXTRA_SYSID
+#endif
+ compat_pid_t l_pid;
+#ifdef __ARCH_COMPAT_FLOCK_PAD
+ __ARCH_COMPAT_FLOCK_PAD
+#endif
+};
+
+struct compat_flock64 {
+ short l_type;
+ short l_whence;
+ compat_loff_t l_start;
+ compat_loff_t l_len;
+ compat_pid_t l_pid;
+#ifdef __ARCH_COMPAT_FLOCK64_PAD
+ __ARCH_COMPAT_FLOCK64_PAD
+#endif
+} __ARCH_COMPAT_FLOCK64_PACK;
+
struct compat_rusage {
- struct compat_timeval ru_utime;
- struct compat_timeval ru_stime;
+ struct old_timeval32 ru_utime;
+ struct old_timeval32 ru_stime;
compat_long_t ru_maxrss;
compat_long_t ru_ixrss;
compat_long_t ru_idrss;
@@ -213,10 +311,7 @@ extern int put_compat_rusage(const struct rusage *,
struct compat_rusage __user *);
struct compat_siginfo;
-
-extern asmlinkage long compat_sys_waitid(int, compat_pid_t,
- struct compat_siginfo __user *, int,
- struct compat_rusage __user *);
+struct __compat_aio_sigset;
struct compat_dirent {
u32 d_ino;
@@ -316,6 +411,7 @@ struct compat_keyctl_kdf_params {
__u32 __spare[8];
};
+struct compat_stat;
struct compat_statfs;
struct compat_statfs64;
struct compat_old_linux_dirent;
@@ -329,147 +425,112 @@ struct compat_kexec_segment;
struct compat_mq_attr;
struct compat_msgbuf;
-extern void compat_exit_robust_list(struct task_struct *curr);
-
-asmlinkage long
-compat_sys_set_robust_list(struct compat_robust_list_head __user *head,
- compat_size_t len);
-asmlinkage long
-compat_sys_get_robust_list(int pid, compat_uptr_t __user *head_ptr,
- compat_size_t __user *len_ptr);
-
-asmlinkage long compat_sys_ipc(u32, int, int, u32, compat_uptr_t, u32);
-asmlinkage long compat_sys_shmat(int shmid, compat_uptr_t shmaddr, int shmflg);
-asmlinkage long compat_sys_semctl(int semid, int semnum, int cmd, int arg);
-asmlinkage long compat_sys_msgsnd(int msqid, compat_uptr_t msgp,
- compat_ssize_t msgsz, int msgflg);
-asmlinkage long compat_sys_msgrcv(int msqid, compat_uptr_t msgp,
- compat_ssize_t msgsz, compat_long_t msgtyp, int msgflg);
-long compat_sys_msgctl(int first, int second, void __user *uptr);
-long compat_sys_shmctl(int first, int second, void __user *uptr);
-long compat_sys_semtimedop(int semid, struct sembuf __user *tsems,
- unsigned nsems, const struct compat_timespec __user *timeout);
-asmlinkage long compat_sys_keyctl(u32 option,
- u32 arg2, u32 arg3, u32 arg4, u32 arg5);
-asmlinkage long compat_sys_ustat(unsigned dev, struct compat_ustat __user *u32);
-
-asmlinkage ssize_t compat_sys_readv(compat_ulong_t fd,
- const struct compat_iovec __user *vec, compat_ulong_t vlen);
-asmlinkage ssize_t compat_sys_writev(compat_ulong_t fd,
- const struct compat_iovec __user *vec, compat_ulong_t vlen);
-asmlinkage ssize_t compat_sys_preadv(compat_ulong_t fd,
- const struct compat_iovec __user *vec,
- compat_ulong_t vlen, u32 pos_low, u32 pos_high);
-asmlinkage ssize_t compat_sys_pwritev(compat_ulong_t fd,
- const struct compat_iovec __user *vec,
- compat_ulong_t vlen, u32 pos_low, u32 pos_high);
-asmlinkage ssize_t compat_sys_preadv2(compat_ulong_t fd,
- const struct compat_iovec __user *vec,
- compat_ulong_t vlen, u32 pos_low, u32 pos_high, rwf_t flags);
-asmlinkage ssize_t compat_sys_pwritev2(compat_ulong_t fd,
- const struct compat_iovec __user *vec,
- compat_ulong_t vlen, u32 pos_low, u32 pos_high, rwf_t flags);
-
-#ifdef __ARCH_WANT_COMPAT_SYS_PREADV64
-asmlinkage long compat_sys_preadv64(unsigned long fd,
- const struct compat_iovec __user *vec,
- unsigned long vlen, loff_t pos);
-#endif
-
-#ifdef __ARCH_WANT_COMPAT_SYS_PWRITEV64
-asmlinkage long compat_sys_pwritev64(unsigned long fd,
- const struct compat_iovec __user *vec,
- unsigned long vlen, loff_t pos);
-#endif
-
-#ifdef __ARCH_WANT_COMPAT_SYS_PREADV64V2
-asmlinkage long compat_sys_readv64v2(unsigned long fd,
- const struct compat_iovec __user *vec,
- unsigned long vlen, loff_t pos, rwf_t flags);
-#endif
-
-#ifdef __ARCH_WANT_COMPAT_SYS_PWRITEV64V2
-asmlinkage long compat_sys_pwritev64v2(unsigned long fd,
- const struct compat_iovec __user *vec,
- unsigned long vlen, loff_t pos, rwf_t flags);
+void copy_siginfo_to_external32(struct compat_siginfo *to,
+ const struct kernel_siginfo *from);
+int copy_siginfo_from_user32(kernel_siginfo_t *to,
+ const struct compat_siginfo __user *from);
+int __copy_siginfo_to_user32(struct compat_siginfo __user *to,
+ const kernel_siginfo_t *from);
+#ifndef copy_siginfo_to_user32
+#define copy_siginfo_to_user32 __copy_siginfo_to_user32
#endif
-
-asmlinkage long compat_sys_lseek(unsigned int, compat_off_t, unsigned int);
-
-asmlinkage long compat_sys_execve(const char __user *filename, const compat_uptr_t __user *argv,
- const compat_uptr_t __user *envp);
-asmlinkage long compat_sys_execveat(int dfd, const char __user *filename,
- const compat_uptr_t __user *argv,
- const compat_uptr_t __user *envp, int flags);
-
-asmlinkage long compat_sys_select(int n, compat_ulong_t __user *inp,
- compat_ulong_t __user *outp, compat_ulong_t __user *exp,
- struct compat_timeval __user *tvp);
-
-asmlinkage long compat_sys_old_select(struct compat_sel_arg_struct __user *arg);
-
-asmlinkage long compat_sys_wait4(compat_pid_t pid,
- compat_uint_t __user *stat_addr, int options,
- struct compat_rusage __user *ru);
-
-#define BITS_PER_COMPAT_LONG (8*sizeof(compat_long_t))
-
-#define BITS_TO_COMPAT_LONGS(bits) DIV_ROUND_UP(bits, BITS_PER_COMPAT_LONG)
-
-long compat_get_bitmap(unsigned long *mask, const compat_ulong_t __user *umask,
- unsigned long bitmap_size);
-long compat_put_bitmap(compat_ulong_t __user *umask, unsigned long *mask,
- unsigned long bitmap_size);
-int copy_siginfo_from_user32(siginfo_t *to, struct compat_siginfo __user *from);
-int copy_siginfo_to_user32(struct compat_siginfo __user *to, const siginfo_t *from);
int get_compat_sigevent(struct sigevent *event,
const struct compat_sigevent __user *u_event);
-long compat_sys_rt_tgsigqueueinfo(compat_pid_t tgid, compat_pid_t pid, int sig,
- struct compat_siginfo __user *uinfo);
-#ifdef CONFIG_COMPAT_OLD_SIGACTION
-asmlinkage long compat_sys_sigaction(int sig,
- const struct compat_old_sigaction __user *act,
- struct compat_old_sigaction __user *oact);
-#endif
-static inline int compat_timeval_compare(struct compat_timeval *lhs,
- struct compat_timeval *rhs)
-{
- if (lhs->tv_sec < rhs->tv_sec)
- return -1;
- if (lhs->tv_sec > rhs->tv_sec)
- return 1;
- return lhs->tv_usec - rhs->tv_usec;
-}
+extern int get_compat_sigset(sigset_t *set, const compat_sigset_t __user *compat);
-static inline int compat_timespec_compare(struct compat_timespec *lhs,
- struct compat_timespec *rhs)
+/*
+ * Defined inline such that size can be compile time constant, which avoids
+ * CONFIG_HARDENED_USERCOPY complaining about copies from task_struct
+ */
+static inline int
+put_compat_sigset(compat_sigset_t __user *compat, const sigset_t *set,
+ unsigned int size)
{
- if (lhs->tv_sec < rhs->tv_sec)
- return -1;
- if (lhs->tv_sec > rhs->tv_sec)
- return 1;
- return lhs->tv_nsec - rhs->tv_nsec;
+ /* size <= sizeof(compat_sigset_t) <= sizeof(sigset_t) */
+#if defined(__BIG_ENDIAN) && defined(CONFIG_64BIT)
+ compat_sigset_t v;
+ switch (_NSIG_WORDS) {
+ case 4: v.sig[7] = (set->sig[3] >> 32); v.sig[6] = set->sig[3];
+ fallthrough;
+ case 3: v.sig[5] = (set->sig[2] >> 32); v.sig[4] = set->sig[2];
+ fallthrough;
+ case 2: v.sig[3] = (set->sig[1] >> 32); v.sig[2] = set->sig[1];
+ fallthrough;
+ case 1: v.sig[1] = (set->sig[0] >> 32); v.sig[0] = set->sig[0];
+ }
+ return copy_to_user(compat, &v, size) ? -EFAULT : 0;
+#else
+ return copy_to_user(compat, set, size) ? -EFAULT : 0;
+#endif
}
-extern int get_compat_itimerspec(struct itimerspec *dst,
- const struct compat_itimerspec __user *src);
-extern int put_compat_itimerspec(struct compat_itimerspec __user *dst,
- const struct itimerspec *src);
-
-asmlinkage long compat_sys_gettimeofday(struct compat_timeval __user *tv,
- struct timezone __user *tz);
-asmlinkage long compat_sys_settimeofday(struct compat_timeval __user *tv,
- struct timezone __user *tz);
-
-asmlinkage long compat_sys_adjtimex(struct compat_timex __user *utp);
-
-extern void sigset_from_compat(sigset_t *set, const compat_sigset_t *compat);
-extern void sigset_to_compat(compat_sigset_t *compat, const sigset_t *set);
-
-asmlinkage long compat_sys_migrate_pages(compat_pid_t pid,
- compat_ulong_t maxnode, const compat_ulong_t __user *old_nodes,
- const compat_ulong_t __user *new_nodes);
+#ifdef CONFIG_CPU_BIG_ENDIAN
+#define unsafe_put_compat_sigset(compat, set, label) do { \
+ compat_sigset_t __user *__c = compat; \
+ const sigset_t *__s = set; \
+ \
+ switch (_NSIG_WORDS) { \
+ case 4: \
+ unsafe_put_user(__s->sig[3] >> 32, &__c->sig[7], label); \
+ unsafe_put_user(__s->sig[3], &__c->sig[6], label); \
+ fallthrough; \
+ case 3: \
+ unsafe_put_user(__s->sig[2] >> 32, &__c->sig[5], label); \
+ unsafe_put_user(__s->sig[2], &__c->sig[4], label); \
+ fallthrough; \
+ case 2: \
+ unsafe_put_user(__s->sig[1] >> 32, &__c->sig[3], label); \
+ unsafe_put_user(__s->sig[1], &__c->sig[2], label); \
+ fallthrough; \
+ case 1: \
+ unsafe_put_user(__s->sig[0] >> 32, &__c->sig[1], label); \
+ unsafe_put_user(__s->sig[0], &__c->sig[0], label); \
+ } \
+} while (0)
+
+#define unsafe_get_compat_sigset(set, compat, label) do { \
+ const compat_sigset_t __user *__c = compat; \
+ compat_sigset_word hi, lo; \
+ sigset_t *__s = set; \
+ \
+ switch (_NSIG_WORDS) { \
+ case 4: \
+ unsafe_get_user(lo, &__c->sig[7], label); \
+ unsafe_get_user(hi, &__c->sig[6], label); \
+ __s->sig[3] = hi | (((long)lo) << 32); \
+ fallthrough; \
+ case 3: \
+ unsafe_get_user(lo, &__c->sig[5], label); \
+ unsafe_get_user(hi, &__c->sig[4], label); \
+ __s->sig[2] = hi | (((long)lo) << 32); \
+ fallthrough; \
+ case 2: \
+ unsafe_get_user(lo, &__c->sig[3], label); \
+ unsafe_get_user(hi, &__c->sig[2], label); \
+ __s->sig[1] = hi | (((long)lo) << 32); \
+ fallthrough; \
+ case 1: \
+ unsafe_get_user(lo, &__c->sig[1], label); \
+ unsafe_get_user(hi, &__c->sig[0], label); \
+ __s->sig[0] = hi | (((long)lo) << 32); \
+ } \
+} while (0)
+#else
+#define unsafe_put_compat_sigset(compat, set, label) do { \
+ compat_sigset_t __user *__c = compat; \
+ const sigset_t *__s = set; \
+ \
+ unsafe_copy_to_user(__c, __s, sizeof(*__c), label); \
+} while (0)
+
+#define unsafe_get_compat_sigset(set, compat, label) do { \
+ const compat_sigset_t __user *__c = compat; \
+ sigset_t *__s = set; \
+ \
+ unsafe_copy_from_user(__s, __c, sizeof(*__c), label); \
+} while (0)
+#endif
extern int compat_ptrace_request(struct task_struct *child,
compat_long_t request,
@@ -477,252 +538,311 @@ extern int compat_ptrace_request(struct task_struct *child,
extern long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
compat_ulong_t addr, compat_ulong_t data);
-asmlinkage long compat_sys_ptrace(compat_long_t request, compat_long_t pid,
- compat_long_t addr, compat_long_t data);
-asmlinkage long compat_sys_lookup_dcookie(u32, u32, char __user *, compat_size_t);
+struct epoll_event; /* fortunately, this one is fixed-layout */
+
+int compat_restore_altstack(const compat_stack_t __user *uss);
+int __compat_save_altstack(compat_stack_t __user *, unsigned long);
+#define unsafe_compat_save_altstack(uss, sp, label) do { \
+ compat_stack_t __user *__uss = uss; \
+ struct task_struct *t = current; \
+ unsafe_put_user(ptr_to_compat((void __user *)t->sas_ss_sp), \
+ &__uss->ss_sp, label); \
+ unsafe_put_user(t->sas_ss_flags, &__uss->ss_flags, label); \
+ unsafe_put_user(t->sas_ss_size, &__uss->ss_size, label); \
+} while (0);
+
/*
- * epoll (fs/eventpoll.c) compat bits follow ...
+ * These syscall function prototypes are kept in the same order as
+ * include/uapi/asm-generic/unistd.h. Deprecated or obsolete system calls
+ * go below.
+ *
+ * Please note that these prototypes here are only provided for information
+ * purposes, for static analysis, and for linking from the syscall table.
+ * These functions should not be called elsewhere from kernel code.
+ *
+ * As the syscall calling convention may be different from the default
+ * for architectures overriding the syscall calling convention, do not
+ * include the prototypes if CONFIG_ARCH_HAS_SYSCALL_WRAPPER is enabled.
*/
-struct epoll_event; /* fortunately, this one is fixed-layout */
+#ifndef CONFIG_ARCH_HAS_SYSCALL_WRAPPER
+asmlinkage long compat_sys_io_setup(unsigned nr_reqs, u32 __user *ctx32p);
+asmlinkage long compat_sys_io_submit(compat_aio_context_t ctx_id, int nr,
+ u32 __user *iocb);
+asmlinkage long compat_sys_io_pgetevents(compat_aio_context_t ctx_id,
+ compat_long_t min_nr,
+ compat_long_t nr,
+ struct io_event __user *events,
+ struct old_timespec32 __user *timeout,
+ const struct __compat_aio_sigset __user *usig);
+asmlinkage long compat_sys_io_pgetevents_time64(compat_aio_context_t ctx_id,
+ compat_long_t min_nr,
+ compat_long_t nr,
+ struct io_event __user *events,
+ struct __kernel_timespec __user *timeout,
+ const struct __compat_aio_sigset __user *usig);
asmlinkage long compat_sys_epoll_pwait(int epfd,
struct epoll_event __user *events,
int maxevents, int timeout,
const compat_sigset_t __user *sigmask,
compat_size_t sigsetsize);
-
-asmlinkage long compat_sys_utime(const char __user *filename,
- struct compat_utimbuf __user *t);
-asmlinkage long compat_sys_utimensat(unsigned int dfd,
- const char __user *filename,
- struct compat_timespec __user *t,
- int flags);
-
-asmlinkage long compat_sys_time(compat_time_t __user *tloc);
-asmlinkage long compat_sys_stime(compat_time_t __user *tptr);
-asmlinkage long compat_sys_signalfd(int ufd,
- const compat_sigset_t __user *sigmask,
- compat_size_t sigsetsize);
-asmlinkage long compat_sys_timerfd_settime(int ufd, int flags,
- const struct compat_itimerspec __user *utmr,
- struct compat_itimerspec __user *otmr);
-asmlinkage long compat_sys_timerfd_gettime(int ufd,
- struct compat_itimerspec __user *otmr);
-
-asmlinkage long compat_sys_move_pages(pid_t pid, compat_ulong_t nr_pages,
- __u32 __user *pages,
- const int __user *nodes,
- int __user *status,
- int flags);
-asmlinkage long compat_sys_futimesat(unsigned int dfd,
- const char __user *filename,
- struct compat_timeval __user *t);
-asmlinkage long compat_sys_utimes(const char __user *filename,
- struct compat_timeval __user *t);
-asmlinkage long compat_sys_newstat(const char __user *filename,
- struct compat_stat __user *statbuf);
-asmlinkage long compat_sys_newlstat(const char __user *filename,
- struct compat_stat __user *statbuf);
-asmlinkage long compat_sys_newfstatat(unsigned int dfd,
- const char __user *filename,
- struct compat_stat __user *statbuf,
- int flag);
-asmlinkage long compat_sys_newfstat(unsigned int fd,
- struct compat_stat __user *statbuf);
+asmlinkage long compat_sys_epoll_pwait2(int epfd,
+ struct epoll_event __user *events,
+ int maxevents,
+ const struct __kernel_timespec __user *timeout,
+ const compat_sigset_t __user *sigmask,
+ compat_size_t sigsetsize);
+asmlinkage long compat_sys_fcntl(unsigned int fd, unsigned int cmd,
+ compat_ulong_t arg);
+asmlinkage long compat_sys_fcntl64(unsigned int fd, unsigned int cmd,
+ compat_ulong_t arg);
+asmlinkage long compat_sys_ioctl(unsigned int fd, unsigned int cmd,
+ compat_ulong_t arg);
asmlinkage long compat_sys_statfs(const char __user *pathname,
struct compat_statfs __user *buf);
-asmlinkage long compat_sys_fstatfs(unsigned int fd,
- struct compat_statfs __user *buf);
asmlinkage long compat_sys_statfs64(const char __user *pathname,
compat_size_t sz,
struct compat_statfs64 __user *buf);
+asmlinkage long compat_sys_fstatfs(unsigned int fd,
+ struct compat_statfs __user *buf);
asmlinkage long compat_sys_fstatfs64(unsigned int fd, compat_size_t sz,
struct compat_statfs64 __user *buf);
-asmlinkage long compat_sys_fcntl64(unsigned int fd, unsigned int cmd,
- compat_ulong_t arg);
-asmlinkage long compat_sys_fcntl(unsigned int fd, unsigned int cmd,
- compat_ulong_t arg);
-asmlinkage long compat_sys_io_setup(unsigned nr_reqs, u32 __user *ctx32p);
-asmlinkage long compat_sys_io_getevents(compat_aio_context_t ctx_id,
- compat_long_t min_nr,
- compat_long_t nr,
- struct io_event __user *events,
- struct compat_timespec __user *timeout);
-asmlinkage long compat_sys_io_submit(compat_aio_context_t ctx_id, int nr,
- u32 __user *iocb);
-asmlinkage long compat_sys_mount(const char __user *dev_name,
- const char __user *dir_name,
- const char __user *type, compat_ulong_t flags,
- const void __user *data);
-asmlinkage long compat_sys_old_readdir(unsigned int fd,
- struct compat_old_linux_dirent __user *,
- unsigned int count);
+asmlinkage long compat_sys_truncate(const char __user *, compat_off_t);
+asmlinkage long compat_sys_ftruncate(unsigned int, compat_off_t);
+/* No generic prototype for truncate64, ftruncate64, fallocate */
+asmlinkage long compat_sys_openat(int dfd, const char __user *filename,
+ int flags, umode_t mode);
asmlinkage long compat_sys_getdents(unsigned int fd,
struct compat_linux_dirent __user *dirent,
unsigned int count);
-asmlinkage long compat_sys_vmsplice(int fd, const struct compat_iovec __user *,
- unsigned int nr_segs, unsigned int flags);
-asmlinkage long compat_sys_open(const char __user *filename, int flags,
- umode_t mode);
-asmlinkage long compat_sys_openat(int dfd, const char __user *filename,
- int flags, umode_t mode);
-asmlinkage long compat_sys_open_by_handle_at(int mountdirfd,
- struct file_handle __user *handle,
- int flags);
-asmlinkage long compat_sys_truncate(const char __user *, compat_off_t);
-asmlinkage long compat_sys_ftruncate(unsigned int, compat_ulong_t);
-asmlinkage long compat_sys_pselect6(int n, compat_ulong_t __user *inp,
+asmlinkage long compat_sys_lseek(unsigned int, compat_off_t, unsigned int);
+/* No generic prototype for pread64 and pwrite64 */
+asmlinkage ssize_t compat_sys_preadv(compat_ulong_t fd,
+ const struct iovec __user *vec,
+ compat_ulong_t vlen, u32 pos_low, u32 pos_high);
+asmlinkage ssize_t compat_sys_pwritev(compat_ulong_t fd,
+ const struct iovec __user *vec,
+ compat_ulong_t vlen, u32 pos_low, u32 pos_high);
+#ifdef __ARCH_WANT_COMPAT_SYS_PREADV64
+asmlinkage long compat_sys_preadv64(unsigned long fd,
+ const struct iovec __user *vec,
+ unsigned long vlen, loff_t pos);
+#endif
+
+#ifdef __ARCH_WANT_COMPAT_SYS_PWRITEV64
+asmlinkage long compat_sys_pwritev64(unsigned long fd,
+ const struct iovec __user *vec,
+ unsigned long vlen, loff_t pos);
+#endif
+asmlinkage long compat_sys_sendfile(int out_fd, int in_fd,
+ compat_off_t __user *offset, compat_size_t count);
+asmlinkage long compat_sys_sendfile64(int out_fd, int in_fd,
+ compat_loff_t __user *offset, compat_size_t count);
+asmlinkage long compat_sys_pselect6_time32(int n, compat_ulong_t __user *inp,
compat_ulong_t __user *outp,
compat_ulong_t __user *exp,
- struct compat_timespec __user *tsp,
+ struct old_timespec32 __user *tsp,
void __user *sig);
-asmlinkage long compat_sys_ppoll(struct pollfd __user *ufds,
+asmlinkage long compat_sys_pselect6_time64(int n, compat_ulong_t __user *inp,
+ compat_ulong_t __user *outp,
+ compat_ulong_t __user *exp,
+ struct __kernel_timespec __user *tsp,
+ void __user *sig);
+asmlinkage long compat_sys_ppoll_time32(struct pollfd __user *ufds,
+ unsigned int nfds,
+ struct old_timespec32 __user *tsp,
+ const compat_sigset_t __user *sigmask,
+ compat_size_t sigsetsize);
+asmlinkage long compat_sys_ppoll_time64(struct pollfd __user *ufds,
unsigned int nfds,
- struct compat_timespec __user *tsp,
+ struct __kernel_timespec __user *tsp,
const compat_sigset_t __user *sigmask,
compat_size_t sigsetsize);
asmlinkage long compat_sys_signalfd4(int ufd,
const compat_sigset_t __user *sigmask,
compat_size_t sigsetsize, int flags);
-asmlinkage long compat_sys_get_mempolicy(int __user *policy,
- compat_ulong_t __user *nmask,
- compat_ulong_t maxnode,
- compat_ulong_t addr,
- compat_ulong_t flags);
-asmlinkage long compat_sys_set_mempolicy(int mode, compat_ulong_t __user *nmask,
- compat_ulong_t maxnode);
-asmlinkage long compat_sys_mbind(compat_ulong_t start, compat_ulong_t len,
- compat_ulong_t mode,
- compat_ulong_t __user *nmask,
- compat_ulong_t maxnode, compat_ulong_t flags);
-
-asmlinkage long compat_sys_setsockopt(int fd, int level, int optname,
- char __user *optval, unsigned int optlen);
-asmlinkage long compat_sys_sendmsg(int fd, struct compat_msghdr __user *msg,
- unsigned flags);
-asmlinkage long compat_sys_sendmmsg(int fd, struct compat_mmsghdr __user *mmsg,
- unsigned vlen, unsigned int flags);
-asmlinkage long compat_sys_recvmsg(int fd, struct compat_msghdr __user *msg,
- unsigned int flags);
-asmlinkage long compat_sys_recv(int fd, void __user *buf, compat_size_t len,
- unsigned flags);
-asmlinkage long compat_sys_recvfrom(int fd, void __user *buf, compat_size_t len,
- unsigned flags, struct sockaddr __user *addr,
- int __user *addrlen);
-asmlinkage long compat_sys_recvmmsg(int fd, struct compat_mmsghdr __user *mmsg,
- unsigned vlen, unsigned int flags,
- struct compat_timespec __user *timeout);
-asmlinkage long compat_sys_nanosleep(struct compat_timespec __user *rqtp,
- struct compat_timespec __user *rmtp);
+asmlinkage long compat_sys_newfstatat(unsigned int dfd,
+ const char __user *filename,
+ struct compat_stat __user *statbuf,
+ int flag);
+asmlinkage long compat_sys_newfstat(unsigned int fd,
+ struct compat_stat __user *statbuf);
+/* No generic prototype for sync_file_range and sync_file_range2 */
+asmlinkage long compat_sys_waitid(int, compat_pid_t,
+ struct compat_siginfo __user *, int,
+ struct compat_rusage __user *);
+asmlinkage long
+compat_sys_set_robust_list(struct compat_robust_list_head __user *head,
+ compat_size_t len);
+asmlinkage long
+compat_sys_get_robust_list(int pid, compat_uptr_t __user *head_ptr,
+ compat_size_t __user *len_ptr);
asmlinkage long compat_sys_getitimer(int which,
- struct compat_itimerval __user *it);
+ struct old_itimerval32 __user *it);
asmlinkage long compat_sys_setitimer(int which,
- struct compat_itimerval __user *in,
- struct compat_itimerval __user *out);
-asmlinkage long compat_sys_times(struct compat_tms __user *tbuf);
-asmlinkage long compat_sys_setrlimit(unsigned int resource,
- struct compat_rlimit __user *rlim);
-asmlinkage long compat_sys_getrlimit(unsigned int resource,
- struct compat_rlimit __user *rlim);
-asmlinkage long compat_sys_getrusage(int who, struct compat_rusage __user *ru);
+ struct old_itimerval32 __user *in,
+ struct old_itimerval32 __user *out);
+asmlinkage long compat_sys_kexec_load(compat_ulong_t entry,
+ compat_ulong_t nr_segments,
+ struct compat_kexec_segment __user *,
+ compat_ulong_t flags);
+asmlinkage long compat_sys_timer_create(clockid_t which_clock,
+ struct compat_sigevent __user *timer_event_spec,
+ timer_t __user *created_timer_id);
+asmlinkage long compat_sys_ptrace(compat_long_t request, compat_long_t pid,
+ compat_long_t addr, compat_long_t data);
asmlinkage long compat_sys_sched_setaffinity(compat_pid_t pid,
unsigned int len,
compat_ulong_t __user *user_mask_ptr);
asmlinkage long compat_sys_sched_getaffinity(compat_pid_t pid,
unsigned int len,
compat_ulong_t __user *user_mask_ptr);
-asmlinkage long compat_sys_timer_create(clockid_t which_clock,
- struct compat_sigevent __user *timer_event_spec,
- timer_t __user *created_timer_id);
-asmlinkage long compat_sys_timer_settime(timer_t timer_id, int flags,
- struct compat_itimerspec __user *new,
- struct compat_itimerspec __user *old);
-asmlinkage long compat_sys_timer_gettime(timer_t timer_id,
- struct compat_itimerspec __user *setting);
-asmlinkage long compat_sys_clock_settime(clockid_t which_clock,
- struct compat_timespec __user *tp);
-asmlinkage long compat_sys_clock_gettime(clockid_t which_clock,
- struct compat_timespec __user *tp);
-asmlinkage long compat_sys_clock_adjtime(clockid_t which_clock,
- struct compat_timex __user *tp);
-asmlinkage long compat_sys_clock_getres(clockid_t which_clock,
- struct compat_timespec __user *tp);
-asmlinkage long compat_sys_clock_nanosleep(clockid_t which_clock, int flags,
- struct compat_timespec __user *rqtp,
- struct compat_timespec __user *rmtp);
-asmlinkage long compat_sys_rt_sigtimedwait(compat_sigset_t __user *uthese,
- struct compat_siginfo __user *uinfo,
- struct compat_timespec __user *uts, compat_size_t sigsetsize);
+asmlinkage long compat_sys_sigaltstack(const compat_stack_t __user *uss_ptr,
+ compat_stack_t __user *uoss_ptr);
asmlinkage long compat_sys_rt_sigsuspend(compat_sigset_t __user *unewset,
compat_size_t sigsetsize);
-asmlinkage long compat_sys_rt_sigprocmask(int how, compat_sigset_t __user *set,
- compat_sigset_t __user *oset,
- compat_size_t sigsetsize);
-asmlinkage long compat_sys_rt_sigpending(compat_sigset_t __user *uset,
- compat_size_t sigsetsize);
#ifndef CONFIG_ODD_RT_SIGACTION
asmlinkage long compat_sys_rt_sigaction(int,
const struct compat_sigaction __user *,
struct compat_sigaction __user *,
compat_size_t);
#endif
+asmlinkage long compat_sys_rt_sigprocmask(int how, compat_sigset_t __user *set,
+ compat_sigset_t __user *oset,
+ compat_size_t sigsetsize);
+asmlinkage long compat_sys_rt_sigpending(compat_sigset_t __user *uset,
+ compat_size_t sigsetsize);
+asmlinkage long compat_sys_rt_sigtimedwait_time32(compat_sigset_t __user *uthese,
+ struct compat_siginfo __user *uinfo,
+ struct old_timespec32 __user *uts, compat_size_t sigsetsize);
+asmlinkage long compat_sys_rt_sigtimedwait_time64(compat_sigset_t __user *uthese,
+ struct compat_siginfo __user *uinfo,
+ struct __kernel_timespec __user *uts, compat_size_t sigsetsize);
asmlinkage long compat_sys_rt_sigqueueinfo(compat_pid_t pid, int sig,
struct compat_siginfo __user *uinfo);
+/* No generic prototype for rt_sigreturn */
+asmlinkage long compat_sys_times(struct compat_tms __user *tbuf);
+asmlinkage long compat_sys_getrlimit(unsigned int resource,
+ struct compat_rlimit __user *rlim);
+asmlinkage long compat_sys_setrlimit(unsigned int resource,
+ struct compat_rlimit __user *rlim);
+asmlinkage long compat_sys_getrusage(int who, struct compat_rusage __user *ru);
+asmlinkage long compat_sys_gettimeofday(struct old_timeval32 __user *tv,
+ struct timezone __user *tz);
+asmlinkage long compat_sys_settimeofday(struct old_timeval32 __user *tv,
+ struct timezone __user *tz);
asmlinkage long compat_sys_sysinfo(struct compat_sysinfo __user *info);
-asmlinkage long compat_sys_ioctl(unsigned int fd, unsigned int cmd,
- compat_ulong_t arg);
-asmlinkage long compat_sys_futex(u32 __user *uaddr, int op, u32 val,
- struct compat_timespec __user *utime, u32 __user *uaddr2,
- u32 val3);
-asmlinkage long compat_sys_getsockopt(int fd, int level, int optname,
- char __user *optval, int __user *optlen);
-asmlinkage long compat_sys_kexec_load(compat_ulong_t entry,
- compat_ulong_t nr_segments,
- struct compat_kexec_segment __user *,
- compat_ulong_t flags);
-asmlinkage long compat_sys_mq_getsetattr(mqd_t mqdes,
- const struct compat_mq_attr __user *u_mqstat,
- struct compat_mq_attr __user *u_omqstat);
-asmlinkage long compat_sys_mq_notify(mqd_t mqdes,
- const struct compat_sigevent __user *u_notification);
asmlinkage long compat_sys_mq_open(const char __user *u_name,
int oflag, compat_mode_t mode,
struct compat_mq_attr __user *u_attr);
-asmlinkage long compat_sys_mq_timedsend(mqd_t mqdes,
- const char __user *u_msg_ptr,
- compat_size_t msg_len, unsigned int msg_prio,
- const struct compat_timespec __user *u_abs_timeout);
-asmlinkage ssize_t compat_sys_mq_timedreceive(mqd_t mqdes,
- char __user *u_msg_ptr,
- compat_size_t msg_len, unsigned int __user *u_msg_prio,
- const struct compat_timespec __user *u_abs_timeout);
-asmlinkage long compat_sys_socketcall(int call, u32 __user *args);
-asmlinkage long compat_sys_sysctl(struct compat_sysctl_args __user *args);
+asmlinkage long compat_sys_mq_notify(mqd_t mqdes,
+ const struct compat_sigevent __user *u_notification);
+asmlinkage long compat_sys_mq_getsetattr(mqd_t mqdes,
+ const struct compat_mq_attr __user *u_mqstat,
+ struct compat_mq_attr __user *u_omqstat);
+asmlinkage long compat_sys_msgctl(int first, int second, void __user *uptr);
+asmlinkage long compat_sys_msgrcv(int msqid, compat_uptr_t msgp,
+ compat_ssize_t msgsz, compat_long_t msgtyp, int msgflg);
+asmlinkage long compat_sys_msgsnd(int msqid, compat_uptr_t msgp,
+ compat_ssize_t msgsz, int msgflg);
+asmlinkage long compat_sys_semctl(int semid, int semnum, int cmd, int arg);
+asmlinkage long compat_sys_shmctl(int first, int second, void __user *uptr);
+asmlinkage long compat_sys_shmat(int shmid, compat_uptr_t shmaddr, int shmflg);
+asmlinkage long compat_sys_recvfrom(int fd, void __user *buf, compat_size_t len,
+ unsigned flags, struct sockaddr __user *addr,
+ int __user *addrlen);
+asmlinkage long compat_sys_sendmsg(int fd, struct compat_msghdr __user *msg,
+ unsigned flags);
+asmlinkage long compat_sys_recvmsg(int fd, struct compat_msghdr __user *msg,
+ unsigned int flags);
+/* No generic prototype for readahead */
+asmlinkage long compat_sys_keyctl(u32 option,
+ u32 arg2, u32 arg3, u32 arg4, u32 arg5);
+asmlinkage long compat_sys_execve(const char __user *filename, const compat_uptr_t __user *argv,
+ const compat_uptr_t __user *envp);
+/* No generic prototype for fadvise64_64 */
+/* CONFIG_MMU only */
+asmlinkage long compat_sys_rt_tgsigqueueinfo(compat_pid_t tgid,
+ compat_pid_t pid, int sig,
+ struct compat_siginfo __user *uinfo);
+asmlinkage long compat_sys_recvmmsg_time64(int fd, struct compat_mmsghdr __user *mmsg,
+ unsigned vlen, unsigned int flags,
+ struct __kernel_timespec __user *timeout);
+asmlinkage long compat_sys_recvmmsg_time32(int fd, struct compat_mmsghdr __user *mmsg,
+ unsigned vlen, unsigned int flags,
+ struct old_timespec32 __user *timeout);
+asmlinkage long compat_sys_wait4(compat_pid_t pid,
+ compat_uint_t __user *stat_addr, int options,
+ struct compat_rusage __user *ru);
+asmlinkage long compat_sys_fanotify_mark(int, unsigned int, __u32, __u32,
+ int, const char __user *);
+asmlinkage long compat_sys_open_by_handle_at(int mountdirfd,
+ struct file_handle __user *handle,
+ int flags);
+asmlinkage long compat_sys_sendmmsg(int fd, struct compat_mmsghdr __user *mmsg,
+ unsigned vlen, unsigned int flags);
+asmlinkage long compat_sys_execveat(int dfd, const char __user *filename,
+ const compat_uptr_t __user *argv,
+ const compat_uptr_t __user *envp, int flags);
+asmlinkage ssize_t compat_sys_preadv2(compat_ulong_t fd,
+ const struct iovec __user *vec,
+ compat_ulong_t vlen, u32 pos_low, u32 pos_high, rwf_t flags);
+asmlinkage ssize_t compat_sys_pwritev2(compat_ulong_t fd,
+ const struct iovec __user *vec,
+ compat_ulong_t vlen, u32 pos_low, u32 pos_high, rwf_t flags);
+#ifdef __ARCH_WANT_COMPAT_SYS_PREADV64V2
+asmlinkage long compat_sys_preadv64v2(unsigned long fd,
+ const struct iovec __user *vec,
+ unsigned long vlen, loff_t pos, rwf_t flags);
+#endif
-extern ssize_t compat_rw_copy_check_uvector(int type,
- const struct compat_iovec __user *uvector,
- unsigned long nr_segs,
- unsigned long fast_segs, struct iovec *fast_pointer,
- struct iovec **ret_pointer);
+#ifdef __ARCH_WANT_COMPAT_SYS_PWRITEV64V2
+asmlinkage long compat_sys_pwritev64v2(unsigned long fd,
+ const struct iovec __user *vec,
+ unsigned long vlen, loff_t pos, rwf_t flags);
+#endif
-extern void __user *compat_alloc_user_space(unsigned long len);
-asmlinkage ssize_t compat_sys_process_vm_readv(compat_pid_t pid,
- const struct compat_iovec __user *lvec,
- compat_ulong_t liovcnt, const struct compat_iovec __user *rvec,
- compat_ulong_t riovcnt, compat_ulong_t flags);
-asmlinkage ssize_t compat_sys_process_vm_writev(compat_pid_t pid,
- const struct compat_iovec __user *lvec,
- compat_ulong_t liovcnt, const struct compat_iovec __user *rvec,
- compat_ulong_t riovcnt, compat_ulong_t flags);
+/*
+ * Deprecated system calls which are still defined in
+ * include/uapi/asm-generic/unistd.h and wanted by >= 1 arch
+ */
-asmlinkage long compat_sys_sendfile(int out_fd, int in_fd,
- compat_off_t __user *offset, compat_size_t count);
-asmlinkage long compat_sys_sendfile64(int out_fd, int in_fd,
- compat_loff_t __user *offset, compat_size_t count);
-asmlinkage long compat_sys_sigaltstack(const compat_stack_t __user *uss_ptr,
- compat_stack_t __user *uoss_ptr);
+/* __ARCH_WANT_SYSCALL_NO_AT */
+asmlinkage long compat_sys_open(const char __user *filename, int flags,
+ umode_t mode);
+
+/* __ARCH_WANT_SYSCALL_NO_FLAGS */
+asmlinkage long compat_sys_signalfd(int ufd,
+ const compat_sigset_t __user *sigmask,
+ compat_size_t sigsetsize);
+
+/* __ARCH_WANT_SYSCALL_OFF_T */
+asmlinkage long compat_sys_newstat(const char __user *filename,
+ struct compat_stat __user *statbuf);
+asmlinkage long compat_sys_newlstat(const char __user *filename,
+ struct compat_stat __user *statbuf);
+
+/* __ARCH_WANT_SYSCALL_DEPRECATED */
+asmlinkage long compat_sys_select(int n, compat_ulong_t __user *inp,
+ compat_ulong_t __user *outp, compat_ulong_t __user *exp,
+ struct old_timeval32 __user *tvp);
+asmlinkage long compat_sys_ustat(unsigned dev, struct compat_ustat __user *u32);
+asmlinkage long compat_sys_recv(int fd, void __user *buf, compat_size_t len,
+ unsigned flags);
+
+/* obsolete */
+asmlinkage long compat_sys_old_readdir(unsigned int fd,
+ struct compat_old_linux_dirent __user *,
+ unsigned int count);
+
+/* obsolete */
+asmlinkage long compat_sys_old_select(struct compat_sel_arg_struct __user *arg);
+
+/* obsolete */
+asmlinkage long compat_sys_ipc(u32, int, int, u32, compat_uptr_t, u32);
+/* obsolete */
#ifdef __ARCH_WANT_SYS_SIGPENDING
asmlinkage long compat_sys_sigpending(compat_old_sigset_t __user *set);
#endif
@@ -731,60 +851,137 @@ asmlinkage long compat_sys_sigpending(compat_old_sigset_t __user *set);
asmlinkage long compat_sys_sigprocmask(int how, compat_old_sigset_t __user *nset,
compat_old_sigset_t __user *oset);
#endif
+#ifdef CONFIG_COMPAT_OLD_SIGACTION
+asmlinkage long compat_sys_sigaction(int sig,
+ const struct compat_old_sigaction __user *act,
+ struct compat_old_sigaction __user *oact);
+#endif
-int compat_restore_altstack(const compat_stack_t __user *uss);
-int __compat_save_altstack(compat_stack_t __user *, unsigned long);
-#define compat_save_altstack_ex(uss, sp) do { \
- compat_stack_t __user *__uss = uss; \
- struct task_struct *t = current; \
- put_user_ex(ptr_to_compat((void __user *)t->sas_ss_sp), &__uss->ss_sp); \
- put_user_ex(t->sas_ss_flags, &__uss->ss_flags); \
- put_user_ex(t->sas_ss_size, &__uss->ss_size); \
- if (t->sas_ss_flags & SS_AUTODISARM) \
- sas_ss_reset(t); \
-} while (0);
+/* obsolete */
+asmlinkage long compat_sys_socketcall(int call, u32 __user *args);
-asmlinkage long compat_sys_sched_rr_get_interval(compat_pid_t pid,
- struct compat_timespec __user *interval);
+#ifdef __ARCH_WANT_COMPAT_TRUNCATE64
+asmlinkage long compat_sys_truncate64(const char __user *pathname, compat_arg_u64(len));
+#endif
-asmlinkage long compat_sys_fanotify_mark(int, unsigned int, __u32, __u32,
- int, const char __user *);
+#ifdef __ARCH_WANT_COMPAT_FTRUNCATE64
+asmlinkage long compat_sys_ftruncate64(unsigned int fd, compat_arg_u64(len));
+#endif
-asmlinkage long compat_sys_arch_prctl(int option, unsigned long arg2);
+#ifdef __ARCH_WANT_COMPAT_FALLOCATE
+asmlinkage long compat_sys_fallocate(int fd, int mode, compat_arg_u64(offset),
+ compat_arg_u64(len));
+#endif
-/*
- * For most but not all architectures, "am I in a compat syscall?" and
- * "am I a compat task?" are the same question. For architectures on which
- * they aren't the same question, arch code can override in_compat_syscall.
- */
+#ifdef __ARCH_WANT_COMPAT_PREAD64
+asmlinkage long compat_sys_pread64(unsigned int fd, char __user *buf, size_t count,
+ compat_arg_u64(pos));
+#endif
-#ifndef in_compat_syscall
-static inline bool in_compat_syscall(void) { return is_compat_task(); }
+#ifdef __ARCH_WANT_COMPAT_PWRITE64
+asmlinkage long compat_sys_pwrite64(unsigned int fd, const char __user *buf, size_t count,
+ compat_arg_u64(pos));
+#endif
+
+#ifdef __ARCH_WANT_COMPAT_SYNC_FILE_RANGE
+asmlinkage long compat_sys_sync_file_range(int fd, compat_arg_u64(pos),
+ compat_arg_u64(nbytes), unsigned int flags);
+#endif
+
+#ifdef __ARCH_WANT_COMPAT_FADVISE64_64
+asmlinkage long compat_sys_fadvise64_64(int fd, compat_arg_u64(pos),
+ compat_arg_u64(len), int advice);
+#endif
+
+#ifdef __ARCH_WANT_COMPAT_READAHEAD
+asmlinkage long compat_sys_readahead(int fd, compat_arg_u64(offset), size_t count);
#endif
+#endif /* CONFIG_ARCH_HAS_SYSCALL_WRAPPER */
+
/**
- * ns_to_compat_timeval - Compat version of ns_to_timeval
+ * ns_to_old_timeval32 - Compat version of ns_to_timeval
* @nsec: the nanoseconds value to be converted
*
- * Returns the compat_timeval representation of the nsec parameter.
+ * Returns the old_timeval32 representation of the nsec parameter.
*/
-static inline struct compat_timeval ns_to_compat_timeval(s64 nsec)
+static inline struct old_timeval32 ns_to_old_timeval32(s64 nsec)
{
- struct timeval tv;
- struct compat_timeval ctv;
+ struct __kernel_old_timeval tv;
+ struct old_timeval32 ctv;
- tv = ns_to_timeval(nsec);
+ tv = ns_to_kernel_old_timeval(nsec);
ctv.tv_sec = tv.tv_sec;
ctv.tv_usec = tv.tv_usec;
return ctv;
}
+/*
+ * Kernel code should not call compat syscalls (i.e., compat_sys_xyzyyz())
+ * directly. Instead, use one of the functions which work equivalently, such
+ * as the kcompat_sys_xyzyyz() functions prototyped below.
+ */
+
+int kcompat_sys_statfs64(const char __user * pathname, compat_size_t sz,
+ struct compat_statfs64 __user * buf);
+int kcompat_sys_fstatfs64(unsigned int fd, compat_size_t sz,
+ struct compat_statfs64 __user * buf);
+
+#ifdef CONFIG_COMPAT
+
+/*
+ * For most but not all architectures, "am I in a compat syscall?" and
+ * "am I a compat task?" are the same question. For architectures on which
+ * they aren't the same question, arch code can override in_compat_syscall.
+ */
+#ifndef in_compat_syscall
+static inline bool in_compat_syscall(void) { return is_compat_task(); }
+#endif
+
#else /* !CONFIG_COMPAT */
#define is_compat_task() (0)
+/* Ensure no one redefines in_compat_syscall() under !CONFIG_COMPAT */
+#define in_compat_syscall in_compat_syscall
static inline bool in_compat_syscall(void) { return false; }
#endif /* CONFIG_COMPAT */
+#define BITS_PER_COMPAT_LONG (8*sizeof(compat_long_t))
+
+#define BITS_TO_COMPAT_LONGS(bits) DIV_ROUND_UP(bits, BITS_PER_COMPAT_LONG)
+
+long compat_get_bitmap(unsigned long *mask, const compat_ulong_t __user *umask,
+ unsigned long bitmap_size);
+long compat_put_bitmap(compat_ulong_t __user *umask, unsigned long *mask,
+ unsigned long bitmap_size);
+
+/*
+ * Some legacy ABIs like the i386 one use less than natural alignment for 64-bit
+ * types, and will need special compat treatment for that. Most architectures
+ * don't need that special handling even for compat syscalls.
+ */
+#ifndef compat_need_64bit_alignment_fixup
+#define compat_need_64bit_alignment_fixup() false
+#endif
+
+/*
+ * A pointer passed in from user mode. This should not
+ * be used for syscall parameters, just declare them
+ * as pointers because the syscall entry code will have
+ * appropriately converted them already.
+ */
+#ifndef compat_ptr
+static inline void __user *compat_ptr(compat_uptr_t uptr)
+{
+ return (void __user *)(unsigned long)uptr;
+}
+#endif
+
+static inline compat_uptr_t ptr_to_compat(void __user *uptr)
+{
+ return (u32)(unsigned long)uptr;
+}
+
#endif /* _LINUX_COMPAT_H */
diff --git a/include/linux/compiler-clang.h b/include/linux/compiler-clang.h
index de179993e039..107ce05bd16e 100644
--- a/include/linux/compiler-clang.h
+++ b/include/linux/compiler-clang.h
@@ -1,17 +1,155 @@
-#ifndef __LINUX_COMPILER_H
-#error "Please don't include <linux/compiler-clang.h> directly, include <linux/compiler.h> instead."
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __LINUX_COMPILER_TYPES_H
+#error "Please do not include <linux/compiler-clang.h> directly, include <linux/compiler.h> instead."
#endif
-/* Some compiler specific definitions are overwritten here
- * for Clang compiler
+/* Compiler specific definitions for Clang compiler */
+
+/*
+ * Clang prior to 17 is being silly and considers many __cleanup() variables
+ * as unused (because they are, their sole purpose is to go out of scope).
+ *
+ * https://github.com/llvm/llvm-project/commit/877210faa447f4cc7db87812f8ed80e398fedd61
+ */
+#undef __cleanup
+#define __cleanup(func) __maybe_unused __attribute__((__cleanup__(func)))
+
+/* all clang versions usable with the kernel support KASAN ABI version 5 */
+#define KASAN_ABI_VERSION 5
+
+/*
+ * Clang 22 added preprocessor macros to match GCC, in hopes of eventually
+ * dropping __has_feature support for sanitizers:
+ * https://github.com/llvm/llvm-project/commit/568c23bbd3303518c5056d7f03444dae4fdc8a9c
+ * Create these macros for older versions of clang so that it is easy to clean
+ * up once the minimum supported version of LLVM for building the kernel always
+ * creates these macros.
+ *
+ * Note: Checking __has_feature(*_sanitizer) is only true if the feature is
+ * enabled. Therefore it is not required to additionally check defined(CONFIG_*)
+ * to avoid adding redundant attributes in other configurations.
+ */
+#if __has_feature(address_sanitizer) && !defined(__SANITIZE_ADDRESS__)
+#define __SANITIZE_ADDRESS__
+#endif
+#if __has_feature(hwaddress_sanitizer) && !defined(__SANITIZE_HWADDRESS__)
+#define __SANITIZE_HWADDRESS__
+#endif
+#if __has_feature(thread_sanitizer) && !defined(__SANITIZE_THREAD__)
+#define __SANITIZE_THREAD__
+#endif
+
+/*
+ * Treat __SANITIZE_HWADDRESS__ the same as __SANITIZE_ADDRESS__ in the kernel.
+ */
+#ifdef __SANITIZE_HWADDRESS__
+#define __SANITIZE_ADDRESS__
+#endif
+
+#ifdef __SANITIZE_ADDRESS__
+#define __no_sanitize_address \
+ __attribute__((no_sanitize("address", "hwaddress")))
+#else
+#define __no_sanitize_address
+#endif
+
+#ifdef __SANITIZE_THREAD__
+#define __no_sanitize_thread \
+ __attribute__((no_sanitize("thread")))
+#else
+#define __no_sanitize_thread
+#endif
+
+#if defined(CONFIG_ARCH_USE_BUILTIN_BSWAP)
+#define __HAVE_BUILTIN_BSWAP32__
+#define __HAVE_BUILTIN_BSWAP64__
+#define __HAVE_BUILTIN_BSWAP16__
+#endif /* CONFIG_ARCH_USE_BUILTIN_BSWAP */
+
+#if __has_feature(undefined_behavior_sanitizer)
+/* GCC does not have __SANITIZE_UNDEFINED__ */
+#define __no_sanitize_undefined \
+ __attribute__((no_sanitize("undefined")))
+#else
+#define __no_sanitize_undefined
+#endif
+
+#if __has_feature(memory_sanitizer)
+#define __SANITIZE_MEMORY__
+/*
+ * Unlike other sanitizers, KMSAN still inserts code into functions marked with
+ * no_sanitize("kernel-memory"). Using disable_sanitizer_instrumentation
+ * provides the behavior consistent with other __no_sanitize_ attributes,
+ * guaranteeing that __no_sanitize_memory functions remain uninstrumented.
*/
+#define __no_sanitize_memory __disable_sanitizer_instrumentation
+
+/*
+ * The __no_kmsan_checks attribute ensures that a function does not produce
+ * false positive reports by:
+ * - initializing all local variables and memory stores in this function;
+ * - skipping all shadow checks;
+ * - passing initialized arguments to this function's callees.
+ */
+#define __no_kmsan_checks __attribute__((no_sanitize("kernel-memory")))
+#else
+#define __no_sanitize_memory
+#define __no_kmsan_checks
+#endif
+
+/*
+ * Support for __has_feature(coverage_sanitizer) was added in Clang 13 together
+ * with no_sanitize("coverage"). Prior versions of Clang support coverage
+ * instrumentation, but cannot be queried for support by the preprocessor.
+ */
+#if __has_feature(coverage_sanitizer)
+#define __no_sanitize_coverage __attribute__((no_sanitize("coverage")))
+#else
+#define __no_sanitize_coverage
+#endif
+
+/* Only Clang needs to disable the coverage sanitizer for kstack_erase. */
+#define __no_kstack_erase __no_sanitize_coverage
-#ifdef uninitialized_var
-#undef uninitialized_var
-#define uninitialized_var(x) x = *(&(x))
+#if __has_feature(shadow_call_stack)
+# define __noscs __attribute__((__no_sanitize__("shadow-call-stack")))
#endif
-/* same as gcc, this was present in clang-2.6 so we can assume it works
- * with any version that can compile the kernel
+/*
+ * Turn individual warnings and errors on and off locally, depending
+ * on version.
+ */
+#define __diag_clang(version, severity, s) \
+ __diag_clang_ ## version(__diag_clang_ ## severity s)
+
+/* Severity used in pragma directives */
+#define __diag_clang_ignore ignored
+#define __diag_clang_warn warning
+#define __diag_clang_error error
+
+#define __diag_str1(s) #s
+#define __diag_str(s) __diag_str1(s)
+#define __diag(s) _Pragma(__diag_str(clang diagnostic s))
+
+#define __diag_clang_13(s) __diag(s)
+
+#define __diag_ignore_all(option, comment) \
+ __diag_clang(13, ignore, option)
+
+/*
+ * clang has horrible behavior with "g" or "rm" constraints for asm
+ * inputs, turning them into something worse than "m". Avoid using
+ * constraints with multiple possible uses (but "ir" seems to be ok):
+ *
+ * https://github.com/llvm/llvm-project/issues/20571
+ */
+#define ASM_INPUT_G "ir"
+#define ASM_INPUT_RM "r"
+
+/*
+ * Declare compiler support for __typeof_unqual__() operator.
+ *
+ * Bindgen uses LLVM even if our C compiler is GCC, so we cannot
+ * rely on the auto-detected CONFIG_CC_HAS_TYPEOF_UNQUAL.
*/
-#define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __COUNTER__)
+#define CC_HAS_TYPEOF_UNQUAL (__clang_major__ >= 19)
diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h
index 16d41de92ee3..5de824a0b3d7 100644
--- a/include/linux/compiler-gcc.h
+++ b/include/linux/compiler-gcc.h
@@ -1,5 +1,6 @@
-#ifndef __LINUX_COMPILER_H
-#error "Please don't include <linux/compiler-gcc.h> directly, include <linux/compiler.h> instead."
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __LINUX_COMPILER_TYPES_H
+#error "Please do not include <linux/compiler-gcc.h> directly, include <linux/compiler.h> instead."
#endif
/*
@@ -9,25 +10,6 @@
+ __GNUC_MINOR__ * 100 \
+ __GNUC_PATCHLEVEL__)
-/* Optimization barrier */
-
-/* The "volatile" is due to gcc bugs */
-#define barrier() __asm__ __volatile__("": : :"memory")
-/*
- * This version is i.e. to prevent dead stores elimination on @ptr
- * where gcc and llvm may behave differently when otherwise using
- * normal barrier(): while gcc behavior gets along with a normal
- * barrier(), llvm needs an explicit input variable to be assumed
- * clobbered. The issue is as follows: while the inline asm might
- * access any memory it wants, the compiler could have fit all of
- * @ptr into memory registers instead, and since @ptr never escaped
- * from that, it proved that the inline asm wasn't touching any of
- * it. This version works well with both compilers, i.e. we're telling
- * the compiler that the inline asm absolutely may see the contents
- * of @ptr. See also: https://llvm.org/bugs/show_bug.cgi?id=15495
- */
-#define barrier_data(ptr) __asm__ __volatile__("": :"r"(ptr) :"memory")
-
/*
* This macro obfuscates arithmetic on a variable address so that gcc
* shouldn't recognize the original var, and make assumptions about it.
@@ -53,278 +35,111 @@
(typeof(ptr)) (__ptr + (off)); \
})
-/* Make the optimizer believe the variable can be manipulated arbitrarily. */
-#define OPTIMIZER_HIDE_VAR(var) \
- __asm__ ("" : "=r" (var) : "0" (var))
-
-#ifdef __CHECKER__
-#define __must_be_array(a) 0
-#else
-/* &a[0] degrades to a pointer: a different type from an array */
-#define __must_be_array(a) BUILD_BUG_ON_ZERO(__same_type((a), &(a)[0]))
-#endif
-
-/*
- * Force always-inline if the user requests it so via the .config,
- * or if gcc is too old.
- * GCC does not warn about unused static inline functions for
- * -Wunused-function. This turns out to avoid the need for complex #ifdef
- * directives. Suppress the warning in clang as well by using "unused"
- * function attribute, which is redundant but not harmful for gcc.
- */
-#if !defined(CONFIG_ARCH_SUPPORTS_OPTIMIZED_INLINING) || \
- !defined(CONFIG_OPTIMIZE_INLINING) || (__GNUC__ < 4)
-#define inline inline __attribute__((always_inline,unused)) notrace
-#define __inline__ __inline__ __attribute__((always_inline,unused)) notrace
-#define __inline __inline __attribute__((always_inline,unused)) notrace
-#else
-/* A lot of inline functions can cause havoc with function tracing */
-#define inline inline __attribute__((unused)) notrace
-#define __inline__ __inline__ __attribute__((unused)) notrace
-#define __inline __inline __attribute__((unused)) notrace
+#if defined(LATENT_ENTROPY_PLUGIN) && !defined(__CHECKER__)
+#define __latent_entropy __attribute__((latent_entropy))
#endif
-#define __always_inline inline __attribute__((always_inline))
-#define noinline __attribute__((noinline))
-
-#define __deprecated __attribute__((deprecated))
-#define __packed __attribute__((packed))
-#define __weak __attribute__((weak))
-#define __alias(symbol) __attribute__((alias(#symbol)))
-
-/*
- * it doesn't make sense on ARM (currently the only user of __naked)
- * to trace naked functions because then mcount is called without
- * stack and frame pointer being set up and there is no chance to
- * restore the lr register to the value before mcount was called.
- *
- * The asm() bodies of naked functions often depend on standard calling
- * conventions, therefore they must be noinline and noclone.
- *
- * GCC 4.[56] currently fail to enforce this, so we must do so ourselves.
- * See GCC PR44290.
- */
-#define __naked __attribute__((naked)) noinline __noclone notrace
-
-#define __noreturn __attribute__((noreturn))
-
/*
- * From the GCC manual:
+ * calling noreturn functions, __builtin_unreachable() and __builtin_trap()
+ * confuse the stack allocation in gcc, leading to overly large stack
+ * frames, see https://gcc.gnu.org/bugzilla/show_bug.cgi?id=82365
*
- * Many functions have no effects except the return value and their
- * return value depends only on the parameters and/or global
- * variables. Such a function can be subject to common subexpression
- * elimination and loop optimization just as an arithmetic operator
- * would be.
- * [...]
+ * Adding an empty inline assembly before it works around the problem
*/
-#define __pure __attribute__((pure))
-#define __aligned(x) __attribute__((aligned(x)))
-#define __aligned_largest __attribute__((aligned))
-#define __printf(a, b) __attribute__((format(printf, a, b)))
-#define __scanf(a, b) __attribute__((format(scanf, a, b)))
-#define __attribute_const__ __attribute__((__const__))
-#define __maybe_unused __attribute__((unused))
-#define __always_unused __attribute__((unused))
-#define __mode(x) __attribute__((mode(x)))
-
-/* gcc version specific checks */
+#define barrier_before_unreachable() asm volatile("")
-#if GCC_VERSION < 30200
-# error Sorry, your compiler is too old - please upgrade it.
-#endif
+#if defined(CONFIG_ARCH_USE_BUILTIN_BSWAP)
+#define __HAVE_BUILTIN_BSWAP32__
+#define __HAVE_BUILTIN_BSWAP64__
+#define __HAVE_BUILTIN_BSWAP16__
+#endif /* CONFIG_ARCH_USE_BUILTIN_BSWAP */
-#if GCC_VERSION < 30300
-# define __used __attribute__((__unused__))
+#if GCC_VERSION >= 70000
+#define KASAN_ABI_VERSION 5
#else
-# define __used __attribute__((__used__))
+#define KASAN_ABI_VERSION 4
#endif
-#ifdef CONFIG_GCOV_KERNEL
-# if GCC_VERSION < 30400
-# error "GCOV profiling support for gcc versions below 3.4 not included"
-# endif /* __GNUC_MINOR__ */
-#endif /* CONFIG_GCOV_KERNEL */
-
-#if GCC_VERSION >= 30400
-#define __must_check __attribute__((warn_unused_result))
-#define __malloc __attribute__((__malloc__))
+#ifdef CONFIG_SHADOW_CALL_STACK
+#define __noscs __attribute__((__no_sanitize__("shadow-call-stack")))
#endif
-#if GCC_VERSION >= 40000
-
-/* GCC 4.1.[01] miscompiles __weak */
-#ifdef __KERNEL__
-# if GCC_VERSION >= 40100 && GCC_VERSION <= 40101
-# error Your version of gcc miscompiles the __weak directive
-# endif
+#ifdef __SANITIZE_HWADDRESS__
+#define __no_sanitize_address __attribute__((__no_sanitize__("hwaddress")))
+#else
+#define __no_sanitize_address __attribute__((__no_sanitize_address__))
#endif
-#define __used __attribute__((__used__))
-#define __compiler_offsetof(a, b) \
- __builtin_offsetof(a, b)
-
-#if GCC_VERSION >= 40100
-# define __compiletime_object_size(obj) __builtin_object_size(obj, 0)
-
-#define __nostackprotector __attribute__((__optimize__("no-stack-protector")))
+#if defined(__SANITIZE_THREAD__)
+#define __no_sanitize_thread __attribute__((__no_sanitize_thread__))
+#else
+#define __no_sanitize_thread
#endif
-#if GCC_VERSION >= 40300
-/* Mark functions as cold. gcc will assume any path leading to a call
- * to them will be unlikely. This means a lot of manual unlikely()s
- * are unnecessary now for any paths leading to the usual suspects
- * like BUG(), printk(), panic() etc. [but let's keep them for now for
- * older compilers]
- *
- * Early snapshots of gcc 4.3 don't support this and we can't detect this
- * in the preprocessor, but we can live with this because they're unreleased.
- * Maketime probing would be overkill here.
- *
- * gcc also has a __attribute__((__hot__)) to move hot functions into
- * a special section, but I don't see any sense in this right now in
- * the kernel context
- */
-#define __cold __attribute__((__cold__))
-
-#define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __COUNTER__)
-
-#ifndef __CHECKER__
-# define __compiletime_warning(message) __attribute__((warning(message)))
-# define __compiletime_error(message) __attribute__((error(message)))
-#endif /* __CHECKER__ */
-#endif /* GCC_VERSION >= 40300 */
-
-#if GCC_VERSION >= 40500
-
-#ifndef __CHECKER__
-#ifdef LATENT_ENTROPY_PLUGIN
-#define __latent_entropy __attribute__((latent_entropy))
-#endif
-#endif
+#define __no_sanitize_undefined __attribute__((__no_sanitize_undefined__))
/*
- * Mark a position in code as unreachable. This can be used to
- * suppress control flow warnings after asm blocks that transfer
- * control elsewhere.
- *
- * Early snapshots of gcc 4.5 don't support this and we can't detect
- * this in the preprocessor, but we can live with this because they're
- * unreleased. Really, we need to have autoconf for the kernel.
+ * Only supported since gcc >= 12
*/
-#define unreachable() \
- do { annotate_unreachable(); __builtin_unreachable(); } while (0)
-
-/* Mark a function definition as prohibited from being cloned. */
-#define __noclone __attribute__((__noclone__, __optimize__("no-tracer")))
-
-#ifdef RANDSTRUCT_PLUGIN
-#define __randomize_layout __attribute__((randomize_layout))
-#define __no_randomize_layout __attribute__((no_randomize_layout))
+#if defined(CONFIG_KCOV) && __has_attribute(__no_sanitize_coverage__)
+#define __no_sanitize_coverage __attribute__((__no_sanitize_coverage__))
+#else
+#define __no_sanitize_coverage
#endif
-#endif /* GCC_VERSION >= 40500 */
-
-#if GCC_VERSION >= 40600
-
-/*
- * When used with Link Time Optimization, gcc can optimize away C functions or
- * variables which are referenced only from assembly code. __visible tells the
- * optimizer that something else uses this function or variable, thus preventing
- * this.
- */
-#define __visible __attribute__((externally_visible))
-
-/*
- * RANDSTRUCT_PLUGIN wants to use an anonymous struct, but it is only
- * possible since GCC 4.6. To provide as much build testing coverage
- * as possible, this is used for all GCC 4.6+ builds, and not just on
- * RANDSTRUCT_PLUGIN builds.
- */
-#define randomized_struct_fields_start struct {
-#define randomized_struct_fields_end } __randomize_layout;
-
-#endif /* GCC_VERSION >= 40600 */
-
-
-#if GCC_VERSION >= 40900 && !defined(__CHECKER__)
/*
- * __assume_aligned(n, k): Tell the optimizer that the returned
- * pointer can be assumed to be k modulo n. The second argument is
- * optional (default 0), so we use a variadic macro to make the
- * shorthand.
- *
- * Beware: Do not apply this to functions which may return
- * ERR_PTRs. Also, it is probably unwise to apply it to functions
- * returning extra information in the low bits (but in that case the
- * compiler should see some alignment anyway, when the return value is
- * massaged by 'flags = ptr & 3; ptr &= ~3;').
+ * Treat __SANITIZE_HWADDRESS__ the same as __SANITIZE_ADDRESS__ in the kernel,
+ * matching the defines used by Clang.
*/
-#define __assume_aligned(a, ...) __attribute__((__assume_aligned__(a, ## __VA_ARGS__)))
+#ifdef __SANITIZE_HWADDRESS__
+#define __SANITIZE_ADDRESS__
#endif
/*
- * GCC 'asm goto' miscompiles certain code sequences:
- *
- * http://gcc.gnu.org/bugzilla/show_bug.cgi?id=58670
- *
- * Work it around via a compiler barrier quirk suggested by Jakub Jelinek.
- *
- * (asm goto is automatically volatile - the naming reflects this.)
+ * GCC does not support KMSAN.
*/
-#define asm_volatile_goto(x...) do { asm goto(x); asm (""); } while (0)
+#define __no_sanitize_memory
+#define __no_kmsan_checks
/*
- * sparse (__CHECKER__) pretends to be gcc, but can't do constant
- * folding in __builtin_bswap*() (yet), so don't set these for it.
+ * Turn individual warnings and errors on and off locally, depending
+ * on version.
*/
-#if defined(CONFIG_ARCH_USE_BUILTIN_BSWAP) && !defined(__CHECKER__)
-#if GCC_VERSION >= 40400
-#define __HAVE_BUILTIN_BSWAP32__
-#define __HAVE_BUILTIN_BSWAP64__
-#endif
-#if GCC_VERSION >= 40800
-#define __HAVE_BUILTIN_BSWAP16__
-#endif
-#endif /* CONFIG_ARCH_USE_BUILTIN_BSWAP && !__CHECKER__ */
+#define __diag_GCC(version, severity, s) \
+ __diag_GCC_ ## version(__diag_GCC_ ## severity s)
-#if GCC_VERSION >= 70000
-#define KASAN_ABI_VERSION 5
-#elif GCC_VERSION >= 50000
-#define KASAN_ABI_VERSION 4
-#elif GCC_VERSION >= 40902
-#define KASAN_ABI_VERSION 3
-#endif
+/* Severity used in pragma directives */
+#define __diag_GCC_ignore ignored
+#define __diag_GCC_warn warning
+#define __diag_GCC_error error
-#if GCC_VERSION >= 40902
-/*
- * Tell the compiler that address safety instrumentation (KASAN)
- * should not be applied to that function.
- * Conflicts with inlining: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=67368
- */
-#define __no_sanitize_address __attribute__((no_sanitize_address))
-#endif
+#define __diag_str1(s) #s
+#define __diag_str(s) __diag_str1(s)
+#define __diag(s) _Pragma(__diag_str(GCC diagnostic s))
-#if GCC_VERSION >= 50100
-/*
- * Mark structures as requiring designated initializers.
- * https://gcc.gnu.org/onlinedocs/gcc/Designated-Inits.html
- */
-#define __designated_init __attribute__((designated_init))
+#if GCC_VERSION >= 80000
+#define __diag_GCC_8(s) __diag(s)
+#else
+#define __diag_GCC_8(s)
#endif
-#endif /* gcc version >= 40000 specific checks */
+#define __diag_GCC_all(s) __diag(s)
-#if !defined(__noclone)
-#define __noclone /* not needed */
-#endif
+#define __diag_ignore_all(option, comment) \
+ __diag(__diag_GCC_ignore option)
-#if !defined(__no_sanitize_address)
-#define __no_sanitize_address
+/*
+ * Prior to 9.1, -Wno-alloc-size-larger-than (and therefore the "alloc_size"
+ * attribute) do not work, and must be disabled.
+ */
+#if GCC_VERSION < 90100
+#undef __alloc_size__
#endif
/*
- * A trick to suppress uninitialized variable warning without generating any
- * code
+ * Declare compiler support for __typeof_unqual__() operator.
+ *
+ * Bindgen uses LLVM even if our C compiler is GCC, so we cannot
+ * rely on the auto-detected CONFIG_CC_HAS_TYPEOF_UNQUAL.
*/
-#define uninitialized_var(x) x = x
+#define CC_HAS_TYPEOF_UNQUAL (__GNUC__ >= 14)
diff --git a/include/linux/compiler-intel.h b/include/linux/compiler-intel.h
deleted file mode 100644
index d4c71132d07f..000000000000
--- a/include/linux/compiler-intel.h
+++ /dev/null
@@ -1,45 +0,0 @@
-#ifndef __LINUX_COMPILER_H
-#error "Please don't include <linux/compiler-intel.h> directly, include <linux/compiler.h> instead."
-#endif
-
-#ifdef __ECC
-
-/* Some compiler specific definitions are overwritten here
- * for Intel ECC compiler
- */
-
-#include <asm/intrinsics.h>
-
-/* Intel ECC compiler doesn't support gcc specific asm stmts.
- * It uses intrinsics to do the equivalent things.
- */
-#undef barrier
-#undef barrier_data
-#undef RELOC_HIDE
-#undef OPTIMIZER_HIDE_VAR
-
-#define barrier() __memory_barrier()
-#define barrier_data(ptr) barrier()
-
-#define RELOC_HIDE(ptr, off) \
- ({ unsigned long __ptr; \
- __ptr = (unsigned long) (ptr); \
- (typeof(ptr)) (__ptr + (off)); })
-
-/* This should act as an optimization barrier on var.
- * Given that this compiler does not have inline assembly, a compiler barrier
- * is the best we can do.
- */
-#define OPTIMIZER_HIDE_VAR(var) barrier()
-
-/* Intel ECC compiler doesn't support __builtin_types_compatible_p() */
-#define __must_be_array(a) 0
-
-#endif
-
-#ifndef __HAVE_BUILTIN_BSWAP16__
-/* icc has this, but it's called _bswap16 */
-#define __HAVE_BUILTIN_BSWAP16__
-#define __builtin_bswap16 _bswap16
-#endif
-
diff --git a/include/linux/compiler-version.h b/include/linux/compiler-version.h
new file mode 100644
index 000000000000..ac1665a98a15
--- /dev/null
+++ b/include/linux/compiler-version.h
@@ -0,0 +1,44 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#ifdef __LINUX_COMPILER_VERSION_H
+#error "Please do not include <linux/compiler-version.h>. This is done by the build system."
+#endif
+#define __LINUX_COMPILER_VERSION_H
+
+/*
+ * This header exists to force full rebuild when the compiler is upgraded.
+ *
+ * When fixdep scans this, it will find this string "CONFIG_CC_VERSION_TEXT"
+ * and add dependency on include/config/CC_VERSION_TEXT, which is touched
+ * by Kconfig when the version string from the compiler changes.
+ */
+
+/* Additional tree-wide dependencies start here. */
+
+/*
+ * If any of the GCC plugins change, we need to rebuild everything that
+ * was built with them, as they may have changed their behavior and those
+ * behaviors may need to be synchronized across all translation units.
+ */
+#ifdef GCC_PLUGINS
+#include <generated/gcc-plugins.h>
+#endif
+
+/*
+ * If the randstruct seed itself changes (whether for GCC plugins or
+ * Clang), the entire tree needs to be rebuilt since the randomization of
+ * structures may change between compilation units if not.
+ */
+#ifdef RANDSTRUCT
+#include <generated/randstruct_hash.h>
+#endif
+
+/*
+ * If any external changes affect Clang's integer wrapping sanitizer
+ * behavior, a full rebuild is needed as the coverage for wrapping types
+ * may have changed, which may impact the expected behaviors that should
+ * not differ between compilation units.
+ */
+#ifdef INTEGER_WRAP
+#include <generated/integer-wrap.h>
+#endif
diff --git a/include/linux/compiler.h b/include/linux/compiler.h
index e95a2631e545..04487c9bd751 100644
--- a/include/linux/compiler.h
+++ b/include/linux/compiler.h
@@ -1,128 +1,29 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __LINUX_COMPILER_H
#define __LINUX_COMPILER_H
-#ifndef __ASSEMBLY__
-
-#ifdef __CHECKER__
-# define __user __attribute__((noderef, address_space(1)))
-# define __kernel __attribute__((address_space(0)))
-# define __safe __attribute__((safe))
-# define __force __attribute__((force))
-# define __nocast __attribute__((nocast))
-# define __iomem __attribute__((noderef, address_space(2)))
-# define __must_hold(x) __attribute__((context(x,1,1)))
-# define __acquires(x) __attribute__((context(x,0,1)))
-# define __releases(x) __attribute__((context(x,1,0)))
-# define __acquire(x) __context__(x,1)
-# define __release(x) __context__(x,-1)
-# define __cond_lock(x,c) ((c) ? ({ __acquire(x); 1; }) : 0)
-# define __percpu __attribute__((noderef, address_space(3)))
-# define __rcu __attribute__((noderef, address_space(4)))
-# define __private __attribute__((noderef))
-extern void __chk_user_ptr(const volatile void __user *);
-extern void __chk_io_ptr(const volatile void __iomem *);
-# define ACCESS_PRIVATE(p, member) (*((typeof((p)->member) __force *) &(p)->member))
-#else /* __CHECKER__ */
-# ifdef STRUCTLEAK_PLUGIN
-# define __user __attribute__((user))
-# else
-# define __user
-# endif
-# define __kernel
-# define __safe
-# define __force
-# define __nocast
-# define __iomem
-# define __chk_user_ptr(x) (void)0
-# define __chk_io_ptr(x) (void)0
-# define __builtin_warning(x, y...) (1)
-# define __must_hold(x)
-# define __acquires(x)
-# define __releases(x)
-# define __acquire(x) (void)0
-# define __release(x) (void)0
-# define __cond_lock(x,c) (c)
-# define __percpu
-# define __rcu
-# define __private
-# define ACCESS_PRIVATE(p, member) ((p)->member)
-#endif /* __CHECKER__ */
+#include <linux/compiler_types.h>
-/* Indirect macros required for expanded argument pasting, eg. __LINE__. */
-#define ___PASTE(a,b) a##b
-#define __PASTE(a,b) ___PASTE(a,b)
+#ifndef __ASSEMBLY__
#ifdef __KERNEL__
-#ifdef __GNUC__
-#include <linux/compiler-gcc.h>
-#endif
-
-#if defined(CC_USING_HOTPATCH) && !defined(__CHECKER__)
-#define notrace __attribute__((hotpatch(0,0)))
-#else
-#define notrace __attribute__((no_instrument_function))
-#endif
-
-/* Intel compiler defines __GNUC__. So we will overwrite implementations
- * coming from above header files here
- */
-#ifdef __INTEL_COMPILER
-# include <linux/compiler-intel.h>
-#endif
-
-/* Clang compiler defines __GNUC__. So we will overwrite implementations
- * coming from above header files here
- */
-#ifdef __clang__
-#include <linux/compiler-clang.h>
-#endif
-
-/*
- * Generic compiler-dependent macros required for kernel
- * build go below this comment. Actual compiler/compiler version
- * specific implementations come from the above header files
- */
-
-struct ftrace_branch_data {
- const char *func;
- const char *file;
- unsigned line;
- union {
- struct {
- unsigned long correct;
- unsigned long incorrect;
- };
- struct {
- unsigned long miss;
- unsigned long hit;
- };
- unsigned long miss_hit[2];
- };
-};
-
-struct ftrace_likely_data {
- struct ftrace_branch_data data;
- unsigned long constant;
-};
-
/*
* Note: DISABLE_BRANCH_PROFILING can be used by special lowlevel code
* to disable branch tracing on a per file basis.
*/
-#if defined(CONFIG_TRACE_BRANCH_PROFILING) \
- && !defined(DISABLE_BRANCH_PROFILING) && !defined(__CHECKER__)
void ftrace_likely_update(struct ftrace_likely_data *f, int val,
int expect, int is_constant);
-
+#if defined(CONFIG_TRACE_BRANCH_PROFILING) \
+ && !defined(DISABLE_BRANCH_PROFILING) && !defined(__CHECKER__)
#define likely_notrace(x) __builtin_expect(!!(x), 1)
#define unlikely_notrace(x) __builtin_expect(!!(x), 0)
#define __branch_check__(x, expect, is_constant) ({ \
- int ______r; \
+ long ______r; \
static struct ftrace_likely_data \
- __attribute__((__aligned__(4))) \
- __attribute__((section("_ftrace_annotated_branch"))) \
+ __aligned(4) \
+ __section("_ftrace_annotated_branch") \
______f = { \
.data.func = __func__, \
.data.file = __FILE__, \
@@ -151,69 +52,78 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val,
* "Define 'is'", Bill Clinton
* "Define 'if'", Steven Rostedt
*/
-#define if(cond, ...) __trace_if( (cond , ## __VA_ARGS__) )
-#define __trace_if(cond) \
- if (__builtin_constant_p(!!(cond)) ? !!(cond) : \
- ({ \
- int ______r; \
- static struct ftrace_branch_data \
- __attribute__((__aligned__(4))) \
- __attribute__((section("_ftrace_branch"))) \
- ______f = { \
- .func = __func__, \
- .file = __FILE__, \
- .line = __LINE__, \
- }; \
- ______r = !!(cond); \
- ______f.miss_hit[______r]++; \
- ______r; \
- }))
+#define if(cond, ...) if ( __trace_if_var( !!(cond , ## __VA_ARGS__) ) )
+
+#define __trace_if_var(cond) (__builtin_constant_p(cond) ? (cond) : __trace_if_value(cond))
+
+#define __trace_if_value(cond) ({ \
+ static struct ftrace_branch_data \
+ __aligned(4) \
+ __section("_ftrace_branch") \
+ __if_trace = { \
+ .func = __func__, \
+ .file = __FILE__, \
+ .line = __LINE__, \
+ }; \
+ (cond) ? \
+ (__if_trace.miss_hit[1]++,1) : \
+ (__if_trace.miss_hit[0]++,0); \
+})
+
#endif /* CONFIG_PROFILE_ALL_BRANCHES */
#else
# define likely(x) __builtin_expect(!!(x), 1)
# define unlikely(x) __builtin_expect(!!(x), 0)
+# define likely_notrace(x) likely(x)
+# define unlikely_notrace(x) unlikely(x)
#endif
/* Optimization barrier */
#ifndef barrier
-# define barrier() __memory_barrier()
+/* The "volatile" is due to gcc bugs */
+# define barrier() __asm__ __volatile__("": : :"memory")
#endif
#ifndef barrier_data
-# define barrier_data(ptr) barrier()
+/*
+ * This version is i.e. to prevent dead stores elimination on @ptr
+ * where gcc and llvm may behave differently when otherwise using
+ * normal barrier(): while gcc behavior gets along with a normal
+ * barrier(), llvm needs an explicit input variable to be assumed
+ * clobbered. The issue is as follows: while the inline asm might
+ * access any memory it wants, the compiler could have fit all of
+ * @ptr into memory registers instead, and since @ptr never escaped
+ * from that, it proved that the inline asm wasn't touching any of
+ * it. This version works well with both compilers, i.e. we're telling
+ * the compiler that the inline asm absolutely may see the contents
+ * of @ptr. See also: https://llvm.org/bugs/show_bug.cgi?id=15495
+ */
+# define barrier_data(ptr) __asm__ __volatile__("": :"r"(ptr) :"memory")
#endif
-/* Unreachable code */
-#ifdef CONFIG_STACK_VALIDATION
-#define annotate_reachable() ({ \
- asm("%c0:\n\t" \
- ".pushsection .discard.reachable\n\t" \
- ".long %c0b - .\n\t" \
- ".popsection\n\t" : : "i" (__LINE__)); \
-})
-#define annotate_unreachable() ({ \
- asm("%c0:\n\t" \
- ".pushsection .discard.unreachable\n\t" \
- ".long %c0b - .\n\t" \
- ".popsection\n\t" : : "i" (__LINE__)); \
-})
-#define ASM_UNREACHABLE \
- "999:\n\t" \
- ".pushsection .discard.unreachable\n\t" \
- ".long 999b - .\n\t" \
- ".popsection\n\t"
-#else
-#define annotate_reachable()
-#define annotate_unreachable()
+/* workaround for GCC PR82365 if needed */
+#ifndef barrier_before_unreachable
+# define barrier_before_unreachable() do { } while (0)
#endif
-#ifndef ASM_UNREACHABLE
-# define ASM_UNREACHABLE
-#endif
-#ifndef unreachable
-# define unreachable() do { annotate_reachable(); do { } while (1); } while (0)
-#endif
+/* Unreachable code */
+#ifdef CONFIG_OBJTOOL
+/* Annotate a C jump table to allow objtool to follow the code flow */
+#define __annotate_jump_table __section(".data.rel.ro.c_jump_table")
+#else /* !CONFIG_OBJTOOL */
+#define __annotate_jump_table
+#endif /* CONFIG_OBJTOOL */
+
+/*
+ * Mark a position in code as unreachable. This can be used to
+ * suppress control flow warnings after asm blocks that transfer
+ * control elsewhere.
+ */
+#define unreachable() do { \
+ barrier_before_unreachable(); \
+ __builtin_unreachable(); \
+} while (0)
/*
* KENTRY - kernel entry point
@@ -234,7 +144,7 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val,
extern typeof(sym) sym; \
static const unsigned long __kentry_##sym \
__used \
- __attribute__((section("___kentry" "+" #sym ), used)) \
+ __attribute__((__section__("___kentry+" #sym))) \
= (unsigned long)&sym;
#endif
@@ -245,383 +155,228 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val,
(typeof(ptr)) (__ptr + (off)); })
#endif
-#ifndef OPTIMIZER_HIDE_VAR
-#define OPTIMIZER_HIDE_VAR(var) barrier()
-#endif
-
-/* Not-quite-unique ID. */
-#ifndef __UNIQUE_ID
-# define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __LINE__)
-#endif
-
-#include <uapi/linux/types.h>
+#define absolute_pointer(val) RELOC_HIDE((void *)(val), 0)
-#define __READ_ONCE_SIZE \
-({ \
- switch (size) { \
- case 1: *(__u8 *)res = *(volatile __u8 *)p; break; \
- case 2: *(__u16 *)res = *(volatile __u16 *)p; break; \
- case 4: *(__u32 *)res = *(volatile __u32 *)p; break; \
- case 8: *(__u64 *)res = *(volatile __u64 *)p; break; \
- default: \
- barrier(); \
- __builtin_memcpy((void *)res, (const void *)p, size); \
- barrier(); \
- } \
-})
-
-static __always_inline
-void __read_once_size(const volatile void *p, void *res, int size)
-{
- __READ_ONCE_SIZE;
-}
-
-#ifdef CONFIG_KASAN
-/*
- * This function is not 'inline' because __no_sanitize_address confilcts
- * with inlining. Attempt to inline it may cause a build failure.
- * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=67368
- * '__maybe_unused' allows us to avoid defined-but-not-used warnings.
- */
-static __no_sanitize_address __maybe_unused
-void __read_once_size_nocheck(const volatile void *p, void *res, int size)
-{
- __READ_ONCE_SIZE;
-}
-#else
-static __always_inline
-void __read_once_size_nocheck(const volatile void *p, void *res, int size)
-{
- __READ_ONCE_SIZE;
-}
+#ifndef OPTIMIZER_HIDE_VAR
+/* Make the optimizer believe the variable can be manipulated arbitrarily. */
+#define OPTIMIZER_HIDE_VAR(var) \
+ __asm__ ("" : "=r" (var) : "0" (var))
#endif
-static __always_inline void __write_once_size(volatile void *p, void *res, int size)
-{
- switch (size) {
- case 1: *(volatile __u8 *)p = *(__u8 *)res; break;
- case 2: *(volatile __u16 *)p = *(__u16 *)res; break;
- case 4: *(volatile __u32 *)p = *(__u32 *)res; break;
- case 8: *(volatile __u64 *)p = *(__u64 *)res; break;
- default:
- barrier();
- __builtin_memcpy((void *)p, (const void *)res, size);
- barrier();
- }
-}
+/* Format: __UNIQUE_ID_<name>_<__COUNTER__> */
+#define __UNIQUE_ID(name) \
+ __PASTE(__UNIQUE_ID_, \
+ __PASTE(name, \
+ __PASTE(_, __COUNTER__)))
-/*
- * Prevent the compiler from merging or refetching reads or writes. The
- * compiler is also forbidden from reordering successive instances of
- * READ_ONCE, WRITE_ONCE and ACCESS_ONCE (see below), but only when the
- * compiler is aware of some particular ordering. One way to make the
- * compiler aware of ordering is to put the two invocations of READ_ONCE,
- * WRITE_ONCE or ACCESS_ONCE() in different C statements.
+/**
+ * data_race - mark an expression as containing intentional data races
*
- * In contrast to ACCESS_ONCE these two macros will also work on aggregate
- * data types like structs or unions. If the size of the accessed data
- * type exceeds the word size of the machine (e.g., 32 bits or 64 bits)
- * READ_ONCE() and WRITE_ONCE() will fall back to memcpy(). There's at
- * least two memcpy()s: one for the __builtin_memcpy() and then one for
- * the macro doing the copy of variable - '__u' allocated on the stack.
+ * This data_race() macro is useful for situations in which data races
+ * should be forgiven. One example is diagnostic code that accesses
+ * shared variables but is not a part of the core synchronization design.
+ * For example, if accesses to a given variable are protected by a lock,
+ * except for diagnostic code, then the accesses under the lock should
+ * be plain C-language accesses and those in the diagnostic code should
+ * use data_race(). This way, KCSAN will complain if buggy lockless
+ * accesses to that variable are introduced, even if the buggy accesses
+ * are protected by READ_ONCE() or WRITE_ONCE().
*
- * Their two major use cases are: (1) Mediating communication between
- * process-level code and irq/NMI handlers, all running on the same CPU,
- * and (2) Ensuring that the compiler does not fold, spindle, or otherwise
- * mutilate accesses that either do not require ordering or that interact
- * with an explicit memory barrier or atomic instruction that provides the
- * required ordering.
+ * This macro *does not* affect normal code generation, but is a hint
+ * to tooling that data races here are to be ignored. If the access must
+ * be atomic *and* KCSAN should ignore the access, use both data_race()
+ * and READ_ONCE(), for example, data_race(READ_ONCE(x)).
*/
-
-#define __READ_ONCE(x, check) \
+#define data_race(expr) \
({ \
- union { typeof(x) __val; char __c[1]; } __u; \
- if (check) \
- __read_once_size(&(x), __u.__c, sizeof(x)); \
- else \
- __read_once_size_nocheck(&(x), __u.__c, sizeof(x)); \
- __u.__val; \
+ __kcsan_disable_current(); \
+ auto __v = (expr); \
+ __kcsan_enable_current(); \
+ __v; \
})
-#define READ_ONCE(x) __READ_ONCE(x, 1)
-/*
- * Use READ_ONCE_NOCHECK() instead of READ_ONCE() if you need
- * to hide memory access from KASAN.
- */
-#define READ_ONCE_NOCHECK(x) __READ_ONCE(x, 0)
-
-#define WRITE_ONCE(x, val) \
-({ \
- union { typeof(x) __val; char __c[1]; } __u = \
- { .__val = (__force typeof(x)) (val) }; \
- __write_once_size(&(x), __u.__c, sizeof(x)); \
- __u.__val; \
-})
+#ifdef __CHECKER__
+#define __BUILD_BUG_ON_ZERO_MSG(e, msg, ...) (0)
+#else /* __CHECKER__ */
+#define __BUILD_BUG_ON_ZERO_MSG(e, msg, ...) ((int)sizeof(struct {_Static_assert(!(e), msg);}))
+#endif /* __CHECKER__ */
-#endif /* __KERNEL__ */
+/* &a[0] degrades to a pointer: a different type from an array */
+#define __is_array(a) (!__same_type((a), &(a)[0]))
+#define __must_be_array(a) __BUILD_BUG_ON_ZERO_MSG(!__is_array(a), \
+ "must be array")
-#endif /* __ASSEMBLY__ */
+#define __is_byte_array(a) (__is_array(a) && sizeof((a)[0]) == 1)
+#define __must_be_byte_array(a) __BUILD_BUG_ON_ZERO_MSG(!__is_byte_array(a), \
+ "must be byte array")
-#ifdef __KERNEL__
/*
- * Allow us to mark functions as 'deprecated' and have gcc emit a nice
- * warning for each use, in hopes of speeding the functions removal.
- * Usage is:
- * int __deprecated foo(void)
+ * If the "nonstring" attribute isn't available, we have to return true
+ * so the __must_*() checks pass when "nonstring" isn't supported.
*/
-#ifndef __deprecated
-# define __deprecated /* unimplemented */
-#endif
-
-#ifdef MODULE
-#define __deprecated_for_modules __deprecated
+#if __has_attribute(__nonstring__) && defined(__annotated)
+#define __is_cstr(a) (!__annotated(a, nonstring))
+#define __is_noncstr(a) (__annotated(a, nonstring))
#else
-#define __deprecated_for_modules
-#endif
-
-#ifndef __must_check
-#define __must_check
-#endif
-
-#ifndef CONFIG_ENABLE_MUST_CHECK
-#undef __must_check
-#define __must_check
-#endif
-#ifndef CONFIG_ENABLE_WARN_DEPRECATED
-#undef __deprecated
-#undef __deprecated_for_modules
-#define __deprecated
-#define __deprecated_for_modules
+#define __is_cstr(a) (true)
+#define __is_noncstr(a) (true)
#endif
-#ifndef __malloc
-#define __malloc
-#endif
+/* Require C Strings (i.e. NUL-terminated) lack the "nonstring" attribute. */
+#define __must_be_cstr(p) \
+ __BUILD_BUG_ON_ZERO_MSG(!__is_cstr(p), \
+ "must be C-string (NUL-terminated)")
+#define __must_be_noncstr(p) \
+ __BUILD_BUG_ON_ZERO_MSG(!__is_noncstr(p), \
+ "must be non-C-string (not NUL-terminated)")
/*
- * Allow us to avoid 'defined but not used' warnings on functions and data,
- * as well as force them to be emitted to the assembly file.
- *
- * As of gcc 3.4, static functions that are not marked with attribute((used))
- * may be elided from the assembly file. As of gcc 3.4, static data not so
- * marked will not be elided, but this may change in a future gcc version.
- *
- * NOTE: Because distributions shipped with a backported unit-at-a-time
- * compiler in gcc 3.3, we must define __used to be __attribute__((used))
- * for gcc >=3.3 instead of 3.4.
+ * Use __typeof_unqual__() when available.
*
- * In prior versions of gcc, such functions and data would be emitted, but
- * would be warned about except with attribute((unused)).
- *
- * Mark functions that are referenced only in inline assembly as __used so
- * the code is emitted even though it appears to be unreferenced.
+ * XXX: Remove test for __CHECKER__ once
+ * sparse learns about __typeof_unqual__().
*/
-#ifndef __used
-# define __used /* unimplemented */
-#endif
-
-#ifndef __maybe_unused
-# define __maybe_unused /* unimplemented */
-#endif
-
-#ifndef __always_unused
-# define __always_unused /* unimplemented */
-#endif
-
-#ifndef noinline
-#define noinline
+#if CC_HAS_TYPEOF_UNQUAL && !defined(__CHECKER__)
+# define USE_TYPEOF_UNQUAL 1
#endif
/*
- * Rather then using noinline to prevent stack consumption, use
- * noinline_for_stack instead. For documentation reasons.
+ * Define TYPEOF_UNQUAL() to use __typeof_unqual__() as typeof
+ * operator when available, to return an unqualified type of the exp.
*/
-#define noinline_for_stack noinline
-
-#ifndef __always_inline
-#define __always_inline inline
+#if defined(USE_TYPEOF_UNQUAL)
+# define TYPEOF_UNQUAL(exp) __typeof_unqual__(exp)
+#else
+# define TYPEOF_UNQUAL(exp) __typeof__(exp)
#endif
#endif /* __KERNEL__ */
+#if defined(CONFIG_CFI) && !defined(__DISABLE_EXPORTS) && !defined(BUILD_VDSO)
/*
- * From the GCC manual:
- *
- * Many functions do not examine any values except their arguments,
- * and have no effects except the return value. Basically this is
- * just slightly more strict class than the `pure' attribute above,
- * since function is not allowed to read global memory.
- *
- * Note that a function that has pointer arguments and examines the
- * data pointed to must _not_ be declared `const'. Likewise, a
- * function that calls a non-`const' function usually must not be
- * `const'. It does not make sense for a `const' function to return
- * `void'.
+ * Force a reference to the external symbol so the compiler generates
+ * __kcfi_typid.
*/
-#ifndef __attribute_const__
-# define __attribute_const__ /* unimplemented */
-#endif
-
-#ifndef __designated_init
-# define __designated_init
-#endif
-
-#ifndef __latent_entropy
-# define __latent_entropy
-#endif
-
-#ifndef __randomize_layout
-# define __randomize_layout __designated_init
+#define KCFI_REFERENCE(sym) __ADDRESSABLE(sym)
+#else
+#define KCFI_REFERENCE(sym)
#endif
-#ifndef __no_randomize_layout
-# define __no_randomize_layout
-#endif
+/**
+ * offset_to_ptr - convert a relative memory offset to an absolute pointer
+ * @off: the address of the 32-bit offset value
+ */
+static inline void *offset_to_ptr(const int *off)
+{
+ return (void *)((unsigned long)off + *off);
+}
-#ifndef randomized_struct_fields_start
-# define randomized_struct_fields_start
-# define randomized_struct_fields_end
-#endif
+#endif /* __ASSEMBLY__ */
/*
- * Tell gcc if a function is cold. The compiler will assume any path
- * directly leading to the call is unlikely.
+ * Force the compiler to emit 'sym' as a symbol, so that we can reference
+ * it from inline assembler. Necessary in case 'sym' could be inlined
+ * otherwise, or eliminated entirely due to lack of references that are
+ * visible to the compiler.
*/
+#define ___ADDRESSABLE(sym, __attrs) \
+ static void * __used __attrs \
+ __UNIQUE_ID(__PASTE(addressable_, sym)) = (void *)(uintptr_t)&sym;
-#ifndef __cold
-#define __cold
-#endif
-
-/* Simple shorthand for a section definition */
-#ifndef __section
-# define __section(S) __attribute__ ((__section__(#S)))
-#endif
-
-#ifndef __visible
-#define __visible
-#endif
-
-#ifndef __nostackprotector
-# define __nostackprotector
-#endif
+#define __ADDRESSABLE(sym) \
+ ___ADDRESSABLE(sym, __section(".discard.addressable"))
/*
- * Assume alignment of return value.
+ * This returns a constant expression while determining if an argument is
+ * a constant expression, most importantly without evaluating the argument.
+ * Glory to Martin Uecker <Martin.Uecker@med.uni-goettingen.de>
+ *
+ * Details:
+ * - sizeof() return an integer constant expression, and does not evaluate
+ * the value of its operand; it only examines the type of its operand.
+ * - The results of comparing two integer constant expressions is also
+ * an integer constant expression.
+ * - The first literal "8" isn't important. It could be any literal value.
+ * - The second literal "8" is to avoid warnings about unaligned pointers;
+ * this could otherwise just be "1".
+ * - (long)(x) is used to avoid warnings about 64-bit types on 32-bit
+ * architectures.
+ * - The C Standard defines "null pointer constant", "(void *)0", as
+ * distinct from other void pointers.
+ * - If (x) is an integer constant expression, then the "* 0l" resolves
+ * it into an integer constant expression of value 0. Since it is cast to
+ * "void *", this makes the second operand a null pointer constant.
+ * - If (x) is not an integer constant expression, then the second operand
+ * resolves to a void pointer (but not a null pointer constant: the value
+ * is not an integer constant 0).
+ * - The conditional operator's third operand, "(int *)8", is an object
+ * pointer (to type "int").
+ * - The behavior (including the return type) of the conditional operator
+ * ("operand1 ? operand2 : operand3") depends on the kind of expressions
+ * given for the second and third operands. This is the central mechanism
+ * of the macro:
+ * - When one operand is a null pointer constant (i.e. when x is an integer
+ * constant expression) and the other is an object pointer (i.e. our
+ * third operand), the conditional operator returns the type of the
+ * object pointer operand (i.e. "int *"). Here, within the sizeof(), we
+ * would then get:
+ * sizeof(*((int *)(...)) == sizeof(int) == 4
+ * - When one operand is a void pointer (i.e. when x is not an integer
+ * constant expression) and the other is an object pointer (i.e. our
+ * third operand), the conditional operator returns a "void *" type.
+ * Here, within the sizeof(), we would then get:
+ * sizeof(*((void *)(...)) == sizeof(void) == 1
+ * - The equality comparison to "sizeof(int)" therefore depends on (x):
+ * sizeof(int) == sizeof(int) (x) was a constant expression
+ * sizeof(int) != sizeof(void) (x) was not a constant expression
*/
-#ifndef __assume_aligned
-#define __assume_aligned(a, ...)
-#endif
-
+#define __is_constexpr(x) \
+ (sizeof(int) == sizeof(*(8 ? ((void *)((long)(x) * 0l)) : (int *)8)))
-/* Are two types/vars the same type (ignoring qualifiers)? */
-#ifndef __same_type
-# define __same_type(a, b) __builtin_types_compatible_p(typeof(a), typeof(b))
-#endif
-
-/* Is this type a native word size -- useful for atomic operations */
-#ifndef __native_word
-# define __native_word(t) (sizeof(t) == sizeof(char) || sizeof(t) == sizeof(short) || sizeof(t) == sizeof(int) || sizeof(t) == sizeof(long))
-#endif
-
-/* Compile time object size, -1 for unknown */
-#ifndef __compiletime_object_size
-# define __compiletime_object_size(obj) -1
-#endif
-#ifndef __compiletime_warning
-# define __compiletime_warning(message)
-#endif
-#ifndef __compiletime_error
-# define __compiletime_error(message)
/*
- * Sparse complains of variable sized arrays due to the temporary variable in
- * __compiletime_assert. Unfortunately we can't just expand it out to make
- * sparse see a constant array size without breaking compiletime_assert on old
- * versions of GCC (e.g. 4.2.4), so hide the array from sparse altogether.
+ * Whether 'type' is a signed type or an unsigned type. Supports scalar types,
+ * bool and also pointer types.
*/
-# ifndef __CHECKER__
-# define __compiletime_error_fallback(condition) \
- do { ((void)sizeof(char[1 - 2 * condition])); } while (0)
-# endif
-#endif
-#ifndef __compiletime_error_fallback
-# define __compiletime_error_fallback(condition) do { } while (0)
-#endif
-
-#ifdef __OPTIMIZE__
-# define __compiletime_assert(condition, msg, prefix, suffix) \
- do { \
- bool __cond = !(condition); \
- extern void prefix ## suffix(void) __compiletime_error(msg); \
- if (__cond) \
- prefix ## suffix(); \
- __compiletime_error_fallback(__cond); \
- } while (0)
-#else
-# define __compiletime_assert(condition, msg, prefix, suffix) do { } while (0)
-#endif
+#define is_signed_type(type) (((type)(-1)) < (__force type)1)
+#define is_unsigned_type(type) (!is_signed_type(type))
-#define _compiletime_assert(condition, msg, prefix, suffix) \
- __compiletime_assert(condition, msg, prefix, suffix)
-
-/**
- * compiletime_assert - break build and emit msg if condition is false
- * @condition: a compile-time constant condition to check
- * @msg: a message to emit if condition is false
+/*
+ * Useful shorthand for "is this condition known at compile-time?"
*
- * In tradition of POSIX assert, this macro will break the build if the
- * supplied condition is *false*, emitting the supplied error message if the
- * compiler has support to do so.
+ * Note that the condition may involve non-constant values,
+ * but the compiler may know enough about the details of the
+ * values to determine that the condition is statically true.
*/
-#define compiletime_assert(condition, msg) \
- _compiletime_assert(condition, msg, __compiletime_assert_, __LINE__)
-
-#define compiletime_assert_atomic_type(t) \
- compiletime_assert(__native_word(t), \
- "Need native word sized stores/loads for atomicity.")
+#define statically_true(x) (__builtin_constant_p(x) && (x))
/*
- * Prevent the compiler from merging or refetching accesses. The compiler
- * is also forbidden from reordering successive instances of ACCESS_ONCE(),
- * but only when the compiler is aware of some particular ordering. One way
- * to make the compiler aware of ordering is to put the two invocations of
- * ACCESS_ONCE() in different C statements.
+ * Similar to statically_true() but produces a constant expression
*
- * ACCESS_ONCE will only work on scalar types. For union types, ACCESS_ONCE
- * on a union member will work as long as the size of the member matches the
- * size of the union and the size is smaller than word size.
+ * To be used in conjunction with macros, such as BUILD_BUG_ON_ZERO(),
+ * which require their input to be a constant expression and for which
+ * statically_true() would otherwise fail.
*
- * The major use cases of ACCESS_ONCE used to be (1) Mediating communication
- * between process-level code and irq/NMI handlers, all running on the same CPU,
- * and (2) Ensuring that the compiler does not fold, spindle, or otherwise
- * mutilate accesses that either do not require ordering or that interact
- * with an explicit memory barrier or atomic instruction that provides the
- * required ordering.
+ * This is a trade-off: const_true() requires all its operands to be
+ * compile time constants. Else, it would always returns false even on
+ * the most trivial cases like:
*
- * If possible use READ_ONCE()/WRITE_ONCE() instead.
- */
-#define __ACCESS_ONCE(x) ({ \
- __maybe_unused typeof(x) __var = (__force typeof(x)) 0; \
- (volatile typeof(x) *)&(x); })
-#define ACCESS_ONCE(x) (*__ACCESS_ONCE(x))
-
-/**
- * lockless_dereference() - safely load a pointer for later dereference
- * @p: The pointer to load
+ * true || non_const_var
*
- * Similar to rcu_dereference(), but for situations where the pointed-to
- * object's lifetime is managed by something other than RCU. That
- * "something other" might be reference counting or simple immortality.
+ * On the opposite, statically_true() is able to fold more complex
+ * tautologies and will return true on expressions such as:
*
- * The seemingly unused variable ___typecheck_p validates that @p is
- * indeed a pointer type by using a pointer to typeof(*p) as the type.
- * Taking a pointer to typeof(*p) again is needed in case p is void *.
+ * !(non_const_var * 8 % 4)
+ *
+ * For the general case, statically_true() is better.
*/
-#define lockless_dereference(p) \
-({ \
- typeof(p) _________p1 = READ_ONCE(p); \
- typeof(*(p)) *___typecheck_p __maybe_unused; \
- smp_read_barrier_depends(); /* Dependency order vs. p above. */ \
- (_________p1); \
-})
+#define const_true(x) __builtin_choose_expr(__is_constexpr(x), x, false)
+
+/*
+ * This is needed in functions which generate the stack canary, see
+ * arch/x86/kernel/smpboot.c::start_secondary() for an example.
+ */
+#define prevent_tail_call_optimization() mb()
+
+#include <asm/rwonce.h>
#endif /* __LINUX_COMPILER_H */
diff --git a/include/linux/compiler_attributes.h b/include/linux/compiler_attributes.h
new file mode 100644
index 000000000000..c16d4199bf92
--- /dev/null
+++ b/include/linux/compiler_attributes.h
@@ -0,0 +1,412 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __LINUX_COMPILER_ATTRIBUTES_H
+#define __LINUX_COMPILER_ATTRIBUTES_H
+
+/*
+ * The attributes in this file are unconditionally defined and they directly
+ * map to compiler attribute(s), unless one of the compilers does not support
+ * the attribute. In that case, __has_attribute is used to check for support
+ * and the reason is stated in its comment ("Optional: ...").
+ *
+ * Any other "attributes" (i.e. those that depend on a configuration option,
+ * on a compiler, on an architecture, on plugins, on other attributes...)
+ * should be defined elsewhere (e.g. compiler_types.h or compiler-*.h).
+ * The intention is to keep this file as simple as possible, as well as
+ * compiler- and version-agnostic (e.g. avoiding GCC_VERSION checks).
+ *
+ * This file is meant to be sorted (by actual attribute name,
+ * not by #define identifier). Use the __attribute__((__name__)) syntax
+ * (i.e. with underscores) to avoid future collisions with other macros.
+ * Provide links to the documentation of each supported compiler, if it exists.
+ */
+
+/*
+ * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-alias-function-attribute
+ */
+#define __alias(symbol) __attribute__((__alias__(#symbol)))
+
+/*
+ * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-aligned-function-attribute
+ * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Type-Attributes.html#index-aligned-type-attribute
+ * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Variable-Attributes.html#index-aligned-variable-attribute
+ */
+#define __aligned(x) __attribute__((__aligned__(x)))
+#define __aligned_largest __attribute__((__aligned__))
+
+/*
+ * Note: do not use this directly. Instead, use __alloc_size() since it is conditionally
+ * available and includes other attributes. For GCC < 9.1, __alloc_size__ gets undefined
+ * in compiler-gcc.h, due to misbehaviors.
+ *
+ * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-alloc_005fsize-function-attribute
+ * clang: https://clang.llvm.org/docs/AttributeReference.html#alloc-size
+ */
+#define __alloc_size__(x, ...) __attribute__((__alloc_size__(x, ## __VA_ARGS__)))
+
+/*
+ * Note: users of __always_inline currently do not write "inline" themselves,
+ * which seems to be required by gcc to apply the attribute according
+ * to its docs (and also "warning: always_inline function might not be
+ * inlinable [-Wattributes]" is emitted).
+ *
+ * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-always_005finline-function-attribute
+ * clang: mentioned
+ */
+#define __always_inline inline __attribute__((__always_inline__))
+
+/*
+ * The second argument is optional (default 0), so we use a variadic macro
+ * to make the shorthand.
+ *
+ * Beware: Do not apply this to functions which may return
+ * ERR_PTRs. Also, it is probably unwise to apply it to functions
+ * returning extra information in the low bits (but in that case the
+ * compiler should see some alignment anyway, when the return value is
+ * massaged by 'flags = ptr & 3; ptr &= ~3;').
+ *
+ * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-assume_005faligned-function-attribute
+ * clang: https://clang.llvm.org/docs/AttributeReference.html#assume-aligned
+ */
+#define __assume_aligned(a, ...) __attribute__((__assume_aligned__(a, ## __VA_ARGS__)))
+
+/*
+ * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Variable-Attributes.html#index-cleanup-variable-attribute
+ * clang: https://clang.llvm.org/docs/AttributeReference.html#cleanup
+ */
+#define __cleanup(func) __attribute__((__cleanup__(func)))
+
+/*
+ * Note the long name.
+ *
+ * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-const-function-attribute
+ */
+#define __attribute_const__ __attribute__((__const__))
+
+/*
+ * Optional: only supported since gcc >= 9
+ * Optional: not supported by clang
+ *
+ * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-copy-function-attribute
+ */
+#if __has_attribute(__copy__)
+# define __copy(symbol) __attribute__((__copy__(symbol)))
+#else
+# define __copy(symbol)
+#endif
+
+/*
+ * Optional: not supported by gcc
+ * Optional: only supported since clang >= 14.0
+ *
+ * clang: https://clang.llvm.org/docs/AttributeReference.html#diagnose_as_builtin
+ */
+#if __has_attribute(__diagnose_as_builtin__)
+# define __diagnose_as(builtin...) __attribute__((__diagnose_as_builtin__(builtin)))
+#else
+# define __diagnose_as(builtin...)
+#endif
+
+/*
+ * Don't. Just don't. See commit 771c035372a0 ("deprecate the '__deprecated'
+ * attribute warnings entirely and for good") for more information.
+ *
+ * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-deprecated-function-attribute
+ * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Type-Attributes.html#index-deprecated-type-attribute
+ * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Variable-Attributes.html#index-deprecated-variable-attribute
+ * gcc: https://gcc.gnu.org/onlinedocs/gcc/Enumerator-Attributes.html#index-deprecated-enumerator-attribute
+ * clang: https://clang.llvm.org/docs/AttributeReference.html#deprecated
+ */
+#define __deprecated
+
+/*
+ * Optional: not supported by clang
+ *
+ * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Type-Attributes.html#index-designated_005finit-type-attribute
+ */
+#if __has_attribute(__designated_init__)
+# define __designated_init __attribute__((__designated_init__))
+#else
+# define __designated_init
+#endif
+
+/*
+ * Optional: only supported since clang >= 14.0
+ *
+ * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-error-function-attribute
+ */
+#if __has_attribute(__error__)
+# define __compiletime_error(msg) __attribute__((__error__(msg)))
+#else
+# define __compiletime_error(msg)
+#endif
+
+/*
+ * Optional: not supported by clang
+ *
+ * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-externally_005fvisible-function-attribute
+ */
+#if __has_attribute(__externally_visible__)
+# define __visible __attribute__((__externally_visible__))
+#else
+# define __visible
+#endif
+
+/*
+ * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-format-function-attribute
+ * clang: https://clang.llvm.org/docs/AttributeReference.html#format
+ */
+#define __printf(a, b) __attribute__((__format__(printf, a, b)))
+#define __scanf(a, b) __attribute__((__format__(scanf, a, b)))
+
+/*
+ * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-gnu_005finline-function-attribute
+ * clang: https://clang.llvm.org/docs/AttributeReference.html#gnu-inline
+ */
+#define __gnu_inline __attribute__((__gnu_inline__))
+
+/*
+ * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-malloc-function-attribute
+ * clang: https://clang.llvm.org/docs/AttributeReference.html#malloc
+ */
+#define __malloc __attribute__((__malloc__))
+
+/*
+ * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Type-Attributes.html#index-mode-type-attribute
+ * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Variable-Attributes.html#index-mode-variable-attribute
+ */
+#define __mode(x) __attribute__((__mode__(x)))
+
+/*
+ * Optional: only supported since gcc >= 7
+ *
+ * gcc: https://gcc.gnu.org/onlinedocs/gcc/x86-Function-Attributes.html#index-no_005fcaller_005fsaved_005fregisters-function-attribute_002c-x86
+ * clang: https://clang.llvm.org/docs/AttributeReference.html#no-caller-saved-registers
+ */
+#if __has_attribute(__no_caller_saved_registers__)
+# define __no_caller_saved_registers __attribute__((__no_caller_saved_registers__))
+#else
+# define __no_caller_saved_registers
+#endif
+
+/*
+ * Optional: not supported by clang
+ *
+ * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-noclone-function-attribute
+ */
+#if __has_attribute(__noclone__)
+# define __noclone __attribute__((__noclone__))
+#else
+# define __noclone
+#endif
+
+/*
+ * Add the pseudo keyword 'fallthrough' so case statement blocks
+ * must end with any of these keywords:
+ * break;
+ * fallthrough;
+ * continue;
+ * goto <label>;
+ * return [expression];
+ *
+ * gcc: https://gcc.gnu.org/onlinedocs/gcc/Statement-Attributes.html#Statement-Attributes
+ */
+#if __has_attribute(__fallthrough__)
+# define fallthrough __attribute__((__fallthrough__))
+#else
+# define fallthrough do {} while (0) /* fallthrough */
+#endif
+
+/*
+ * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#Common-Function-Attributes
+ * clang: https://clang.llvm.org/docs/AttributeReference.html#flatten
+ */
+# define __flatten __attribute__((flatten))
+
+/*
+ * Note the missing underscores.
+ *
+ * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-noinline-function-attribute
+ * clang: mentioned
+ */
+#define noinline __attribute__((__noinline__))
+
+/*
+ * Optional: only supported since gcc >= 8
+ * Optional: not supported by clang
+ *
+ * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Variable-Attributes.html#index-nonstring-variable-attribute
+ */
+#if __has_attribute(__nonstring__)
+# define __nonstring __attribute__((__nonstring__))
+#else
+# define __nonstring
+#endif
+
+/*
+ * Optional: only supported since GCC >= 7.1, clang >= 13.0.
+ *
+ * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-no_005fprofile_005finstrument_005ffunction-function-attribute
+ * clang: https://clang.llvm.org/docs/AttributeReference.html#no-profile-instrument-function
+ */
+#if __has_attribute(__no_profile_instrument_function__)
+# define __no_profile __attribute__((__no_profile_instrument_function__))
+#else
+# define __no_profile
+#endif
+
+/*
+ * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-noreturn-function-attribute
+ * clang: https://clang.llvm.org/docs/AttributeReference.html#noreturn
+ * clang: https://clang.llvm.org/docs/AttributeReference.html#id1
+ */
+#define __noreturn __attribute__((__noreturn__))
+
+/*
+ * Optional: only supported since GCC >= 11.1, clang >= 7.0.
+ *
+ * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-no_005fstack_005fprotector-function-attribute
+ * clang: https://clang.llvm.org/docs/AttributeReference.html#no-stack-protector-safebuffers
+ */
+#if __has_attribute(__no_stack_protector__)
+# define __no_stack_protector __attribute__((__no_stack_protector__))
+#else
+# define __no_stack_protector
+#endif
+
+/*
+ * Optional: not supported by gcc.
+ *
+ * clang: https://clang.llvm.org/docs/AttributeReference.html#overloadable
+ */
+#if __has_attribute(__overloadable__)
+# define __overloadable __attribute__((__overloadable__))
+#else
+# define __overloadable
+#endif
+
+/*
+ * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Type-Attributes.html#index-packed-type-attribute
+ * clang: https://gcc.gnu.org/onlinedocs/gcc/Common-Variable-Attributes.html#index-packed-variable-attribute
+ */
+#define __packed __attribute__((__packed__))
+
+/*
+ * Note: the "type" argument should match any __builtin_object_size(p, type) usage.
+ *
+ * Optional: not supported by gcc.
+ *
+ * clang: https://clang.llvm.org/docs/AttributeReference.html#pass-object-size-pass-dynamic-object-size
+ */
+#if __has_attribute(__pass_dynamic_object_size__)
+# define __pass_dynamic_object_size(type) __attribute__((__pass_dynamic_object_size__(type)))
+#else
+# define __pass_dynamic_object_size(type)
+#endif
+#if __has_attribute(__pass_object_size__)
+# define __pass_object_size(type) __attribute__((__pass_object_size__(type)))
+#else
+# define __pass_object_size(type)
+#endif
+
+/*
+ * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-pure-function-attribute
+ */
+#define __pure __attribute__((__pure__))
+
+/*
+ * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-section-function-attribute
+ * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Variable-Attributes.html#index-section-variable-attribute
+ * clang: https://clang.llvm.org/docs/AttributeReference.html#section-declspec-allocate
+ */
+#define __section(section) __attribute__((__section__(section)))
+
+/*
+ * Optional: only supported since gcc >= 12
+ *
+ * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Variable-Attributes.html#index-uninitialized-variable-attribute
+ * clang: https://clang.llvm.org/docs/AttributeReference.html#uninitialized
+ */
+#if __has_attribute(__uninitialized__)
+# define __uninitialized __attribute__((__uninitialized__))
+#else
+# define __uninitialized
+#endif
+
+/*
+ * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-unused-function-attribute
+ * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Type-Attributes.html#index-unused-type-attribute
+ * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Variable-Attributes.html#index-unused-variable-attribute
+ * gcc: https://gcc.gnu.org/onlinedocs/gcc/Label-Attributes.html#index-unused-label-attribute
+ * clang: https://clang.llvm.org/docs/AttributeReference.html#maybe-unused-unused
+ */
+#define __always_unused __attribute__((__unused__))
+#define __maybe_unused __attribute__((__unused__))
+
+/*
+ * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-used-function-attribute
+ * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Variable-Attributes.html#index-used-variable-attribute
+ */
+#define __used __attribute__((__used__))
+
+/*
+ * The __used attribute guarantees that the attributed variable will be
+ * always emitted by a compiler. It doesn't prevent the compiler from
+ * throwing 'unused' warnings when it can't detect how the variable is
+ * actually used. It's a compiler implementation details either emit
+ * the warning in that case or not.
+ *
+ * The combination of both 'used' and 'unused' attributes ensures that
+ * the variable would be emitted, and will not trigger 'unused' warnings.
+ * The attribute is applicable for functions, static and global variables.
+ */
+#define __always_used __used __maybe_unused
+
+/*
+ * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-warn_005funused_005fresult-function-attribute
+ * clang: https://clang.llvm.org/docs/AttributeReference.html#nodiscard-warn-unused-result
+ */
+#define __must_check __attribute__((__warn_unused_result__))
+
+/*
+ * Optional: only supported since clang >= 14.0
+ *
+ * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-warning-function-attribute
+ */
+#if __has_attribute(__warning__)
+# define __compiletime_warning(msg) __attribute__((__warning__(msg)))
+#else
+# define __compiletime_warning(msg)
+#endif
+
+/*
+ * Optional: only supported since clang >= 14.0
+ *
+ * clang: https://clang.llvm.org/docs/AttributeReference.html#disable-sanitizer-instrumentation
+ *
+ * disable_sanitizer_instrumentation is not always similar to
+ * no_sanitize((<sanitizer-name>)): the latter may still let specific sanitizers
+ * insert code into functions to prevent false positives. Unlike that,
+ * disable_sanitizer_instrumentation prevents all kinds of instrumentation to
+ * functions with the attribute.
+ */
+#if __has_attribute(disable_sanitizer_instrumentation)
+# define __disable_sanitizer_instrumentation \
+ __attribute__((disable_sanitizer_instrumentation))
+#else
+# define __disable_sanitizer_instrumentation
+#endif
+
+/*
+ * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-weak-function-attribute
+ * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Variable-Attributes.html#index-weak-variable-attribute
+ */
+#define __weak __attribute__((__weak__))
+
+/*
+ * Used by functions that use '__builtin_return_address'. These function
+ * don't want to be splited or made inline, which can make
+ * the '__builtin_return_address' get unexpected address.
+ */
+#define __fix_address noinline __noclone
+
+#endif /* __LINUX_COMPILER_ATTRIBUTES_H */
diff --git a/include/linux/compiler_types.h b/include/linux/compiler_types.h
new file mode 100644
index 000000000000..1280693766b9
--- /dev/null
+++ b/include/linux/compiler_types.h
@@ -0,0 +1,659 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __LINUX_COMPILER_TYPES_H
+#define __LINUX_COMPILER_TYPES_H
+
+/*
+ * __has_builtin is supported on gcc >= 10, clang >= 3 and icc >= 21.
+ * In the meantime, to support gcc < 10, we implement __has_builtin
+ * by hand.
+ */
+#ifndef __has_builtin
+#define __has_builtin(x) (0)
+#endif
+
+/* Indirect macros required for expanded argument pasting, eg. __LINE__. */
+#define ___PASTE(a, b) a##b
+#define __PASTE(a, b) ___PASTE(a, b)
+
+#ifndef __ASSEMBLY__
+
+/*
+ * C23 introduces "auto" as a standard way to define type-inferred
+ * variables, but "auto" has been a (useless) keyword even since K&R C,
+ * so it has always been "namespace reserved."
+ *
+ * Until at some future time we require C23 support, we need the gcc
+ * extension __auto_type, but there is no reason to put that elsewhere
+ * in the source code.
+ */
+#if __STDC_VERSION__ < 202311L
+# define auto __auto_type
+#endif
+
+/*
+ * Skipped when running bindgen due to a libclang issue;
+ * see https://github.com/rust-lang/rust-bindgen/issues/2244.
+ */
+#if defined(CONFIG_DEBUG_INFO_BTF) && defined(CONFIG_PAHOLE_HAS_BTF_TAG) && \
+ __has_attribute(btf_type_tag) && !defined(__BINDGEN__)
+# define BTF_TYPE_TAG(value) __attribute__((btf_type_tag(#value)))
+#else
+# define BTF_TYPE_TAG(value) /* nothing */
+#endif
+
+/* sparse defines __CHECKER__; see Documentation/dev-tools/sparse.rst */
+#ifdef __CHECKER__
+/* address spaces */
+# define __kernel __attribute__((address_space(0)))
+# define __user __attribute__((noderef, address_space(__user)))
+# define __iomem __attribute__((noderef, address_space(__iomem)))
+# define __percpu __attribute__((noderef, address_space(__percpu)))
+# define __rcu __attribute__((noderef, address_space(__rcu)))
+static inline void __chk_user_ptr(const volatile void __user *ptr) { }
+static inline void __chk_io_ptr(const volatile void __iomem *ptr) { }
+/* context/locking */
+# define __must_hold(x) __attribute__((context(x,1,1)))
+# define __acquires(x) __attribute__((context(x,0,1)))
+# define __cond_acquires(x) __attribute__((context(x,0,-1)))
+# define __releases(x) __attribute__((context(x,1,0)))
+# define __acquire(x) __context__(x,1)
+# define __release(x) __context__(x,-1)
+# define __cond_lock(x,c) ((c) ? ({ __acquire(x); 1; }) : 0)
+/* other */
+# define __force __attribute__((force))
+# define __nocast __attribute__((nocast))
+# define __safe __attribute__((safe))
+# define __private __attribute__((noderef))
+# define ACCESS_PRIVATE(p, member) (*((typeof((p)->member) __force *) &(p)->member))
+#else /* __CHECKER__ */
+/* address spaces */
+# define __kernel
+# ifdef STRUCTLEAK_PLUGIN
+# define __user __attribute__((user))
+# else
+# define __user BTF_TYPE_TAG(user)
+# endif
+# define __iomem
+# define __percpu __percpu_qual BTF_TYPE_TAG(percpu)
+# define __rcu BTF_TYPE_TAG(rcu)
+
+# define __chk_user_ptr(x) (void)0
+# define __chk_io_ptr(x) (void)0
+/* context/locking */
+# define __must_hold(x)
+# define __acquires(x)
+# define __cond_acquires(x)
+# define __releases(x)
+# define __acquire(x) (void)0
+# define __release(x) (void)0
+# define __cond_lock(x,c) (c)
+/* other */
+# define __force
+# define __nocast
+# define __safe
+# define __private
+# define ACCESS_PRIVATE(p, member) ((p)->member)
+# define __builtin_warning(x, y...) (1)
+#endif /* __CHECKER__ */
+
+#ifdef __KERNEL__
+
+/* Attributes */
+#include <linux/compiler_attributes.h>
+
+#if CONFIG_FUNCTION_ALIGNMENT > 0
+#define __function_aligned __aligned(CONFIG_FUNCTION_ALIGNMENT)
+#else
+#define __function_aligned
+#endif
+
+/*
+ * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-cold-function-attribute
+ * gcc: https://gcc.gnu.org/onlinedocs/gcc/Label-Attributes.html#index-cold-label-attribute
+ *
+ * When -falign-functions=N is in use, we must avoid the cold attribute as
+ * GCC drops the alignment for cold functions. Worse, GCC can implicitly mark
+ * callees of cold functions as cold themselves, so it's not sufficient to add
+ * __function_aligned here as that will not ensure that callees are correctly
+ * aligned.
+ *
+ * See:
+ *
+ * https://lore.kernel.org/lkml/Y77%2FqVgvaJidFpYt@FVFF77S0Q05N
+ * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=88345#c9
+ */
+#if defined(CONFIG_CC_HAS_SANE_FUNCTION_ALIGNMENT) || (CONFIG_FUNCTION_ALIGNMENT == 0)
+#define __cold __attribute__((__cold__))
+#else
+#define __cold
+#endif
+
+/*
+ * On x86-64 and arm64 targets, __preserve_most changes the calling convention
+ * of a function to make the code in the caller as unintrusive as possible. This
+ * convention behaves identically to the C calling convention on how arguments
+ * and return values are passed, but uses a different set of caller- and callee-
+ * saved registers.
+ *
+ * The purpose is to alleviates the burden of saving and recovering a large
+ * register set before and after the call in the caller. This is beneficial for
+ * rarely taken slow paths, such as error-reporting functions that may be called
+ * from hot paths.
+ *
+ * Note: This may conflict with instrumentation inserted on function entry which
+ * does not use __preserve_most or equivalent convention (if in assembly). Since
+ * function tracing assumes the normal C calling convention, where the attribute
+ * is supported, __preserve_most implies notrace. It is recommended to restrict
+ * use of the attribute to functions that should or already disable tracing.
+ *
+ * Optional: not supported by gcc.
+ *
+ * clang: https://clang.llvm.org/docs/AttributeReference.html#preserve-most
+ */
+#if __has_attribute(__preserve_most__) && (defined(CONFIG_X86_64) || defined(CONFIG_ARM64))
+# define __preserve_most notrace __attribute__((__preserve_most__))
+#else
+# define __preserve_most
+#endif
+
+/*
+ * Annotating a function/variable with __retain tells the compiler to place
+ * the object in its own section and set the flag SHF_GNU_RETAIN. This flag
+ * instructs the linker to retain the object during garbage-cleanup or LTO
+ * phases.
+ *
+ * Note that the __used macro is also used to prevent functions or data
+ * being optimized out, but operates at the compiler/IR-level and may still
+ * allow unintended removal of objects during linking.
+ *
+ * Optional: only supported since gcc >= 11, clang >= 13
+ *
+ * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-retain-function-attribute
+ * clang: https://clang.llvm.org/docs/AttributeReference.html#retain
+ */
+#if __has_attribute(__retain__) && \
+ (defined(CONFIG_LD_DEAD_CODE_DATA_ELIMINATION) || \
+ defined(CONFIG_LTO_CLANG))
+# define __retain __attribute__((__retain__))
+#else
+# define __retain
+#endif
+
+/* Compiler specific macros. */
+#ifdef __clang__
+#include <linux/compiler-clang.h>
+#elif defined(__GNUC__)
+/* The above compilers also define __GNUC__, so order is important here. */
+#include <linux/compiler-gcc.h>
+#else
+#error "Unknown compiler"
+#endif
+
+/*
+ * Some architectures need to provide custom definitions of macros provided
+ * by linux/compiler-*.h, and can do so using asm/compiler.h. We include that
+ * conditionally rather than using an asm-generic wrapper in order to avoid
+ * build failures if any C compilation, which will include this file via an
+ * -include argument in c_flags, occurs prior to the asm-generic wrappers being
+ * generated.
+ */
+#ifdef CONFIG_HAVE_ARCH_COMPILER_H
+#include <asm/compiler.h>
+#endif
+
+struct ftrace_branch_data {
+ const char *func;
+ const char *file;
+ unsigned line;
+ union {
+ struct {
+ unsigned long correct;
+ unsigned long incorrect;
+ };
+ struct {
+ unsigned long miss;
+ unsigned long hit;
+ };
+ unsigned long miss_hit[2];
+ };
+};
+
+struct ftrace_likely_data {
+ struct ftrace_branch_data data;
+ unsigned long constant;
+};
+
+#if defined(CC_USING_HOTPATCH)
+#define notrace __attribute__((hotpatch(0, 0)))
+#elif defined(CC_USING_PATCHABLE_FUNCTION_ENTRY)
+#define notrace __attribute__((patchable_function_entry(0, 0)))
+#else
+#define notrace __attribute__((__no_instrument_function__))
+#endif
+
+/*
+ * it doesn't make sense on ARM (currently the only user of __naked)
+ * to trace naked functions because then mcount is called without
+ * stack and frame pointer being set up and there is no chance to
+ * restore the lr register to the value before mcount was called.
+ */
+#define __naked __attribute__((__naked__)) notrace
+
+/*
+ * Prefer gnu_inline, so that extern inline functions do not emit an
+ * externally visible function. This makes extern inline behave as per gnu89
+ * semantics rather than c99. This prevents multiple symbol definition errors
+ * of extern inline functions at link time.
+ * A lot of inline functions can cause havoc with function tracing.
+ */
+#define inline inline __gnu_inline __inline_maybe_unused notrace
+
+/*
+ * gcc provides both __inline__ and __inline as alternate spellings of
+ * the inline keyword, though the latter is undocumented. New kernel
+ * code should only use the inline spelling, but some existing code
+ * uses __inline__. Since we #define inline above, to ensure
+ * __inline__ has the same semantics, we need this #define.
+ *
+ * However, the spelling __inline is strictly reserved for referring
+ * to the bare keyword.
+ */
+#define __inline__ inline
+
+/*
+ * GCC does not warn about unused static inline functions for -Wunused-function.
+ * Suppress the warning in clang as well by using __maybe_unused, but enable it
+ * for W=2 build. This will allow clang to find unused functions.
+ */
+#ifdef KBUILD_EXTRA_WARN2
+#define __inline_maybe_unused
+#else
+#define __inline_maybe_unused __maybe_unused
+#endif
+
+/*
+ * Rather then using noinline to prevent stack consumption, use
+ * noinline_for_stack instead. For documentation reasons.
+ */
+#define noinline_for_stack noinline
+
+/*
+ * Use noinline_for_tracing for functions that should not be inlined.
+ * For tracing reasons.
+ */
+#define noinline_for_tracing noinline
+
+/*
+ * Sanitizer helper attributes: Because using __always_inline and
+ * __no_sanitize_* conflict, provide helper attributes that will either expand
+ * to __no_sanitize_* in compilation units where instrumentation is enabled
+ * (__SANITIZE_*__), or __always_inline in compilation units without
+ * instrumentation (__SANITIZE_*__ undefined).
+ */
+#ifdef __SANITIZE_ADDRESS__
+/*
+ * We can't declare function 'inline' because __no_sanitize_address conflicts
+ * with inlining. Attempt to inline it may cause a build failure.
+ * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=67368
+ * '__maybe_unused' allows us to avoid defined-but-not-used warnings.
+ */
+# define __no_kasan_or_inline __no_sanitize_address notrace __maybe_unused
+# define __no_sanitize_or_inline __no_kasan_or_inline
+#else
+# define __no_kasan_or_inline __always_inline
+#endif
+
+#ifdef __SANITIZE_THREAD__
+/*
+ * Clang still emits instrumentation for __tsan_func_{entry,exit}() and builtin
+ * atomics even with __no_sanitize_thread (to avoid false positives in userspace
+ * ThreadSanitizer). The kernel's requirements are stricter and we really do not
+ * want any instrumentation with __no_kcsan.
+ *
+ * Therefore we add __disable_sanitizer_instrumentation where available to
+ * disable all instrumentation. See Kconfig.kcsan where this is mandatory.
+ */
+# define __no_kcsan __no_sanitize_thread __disable_sanitizer_instrumentation
+/*
+ * Type qualifier to mark variables where all data-racy accesses should be
+ * ignored by KCSAN. Note, the implementation simply marks these variables as
+ * volatile, since KCSAN will treat such accesses as "marked".
+ */
+# define __data_racy volatile
+# define __no_sanitize_or_inline __no_kcsan notrace __maybe_unused
+#else
+# define __no_kcsan
+# define __data_racy
+#endif
+
+#ifdef __SANITIZE_MEMORY__
+/*
+ * Similarly to KASAN and KCSAN, KMSAN loses function attributes of inlined
+ * functions, therefore disabling KMSAN checks also requires disabling inlining.
+ *
+ * __no_sanitize_or_inline effectively prevents KMSAN from reporting errors
+ * within the function and marks all its outputs as initialized.
+ */
+# define __no_sanitize_or_inline __no_kmsan_checks notrace __maybe_unused
+#endif
+
+#ifndef __no_sanitize_or_inline
+#define __no_sanitize_or_inline __always_inline
+#endif
+
+/*
+ * The assume attribute is used to indicate that a certain condition is
+ * assumed to be true. If this condition is violated at runtime, the behavior
+ * is undefined. Compilers may or may not use this indication to generate
+ * optimized code.
+ *
+ * Note that the clang documentation states that optimizers may react
+ * differently to this attribute, and this may even have a negative
+ * performance impact. Therefore this attribute should be used with care.
+ *
+ * Optional: only supported since gcc >= 13
+ * Optional: only supported since clang >= 19
+ *
+ * gcc: https://gcc.gnu.org/onlinedocs/gcc/Statement-Attributes.html#index-assume-statement-attribute
+ * clang: https://clang.llvm.org/docs/AttributeReference.html#id13
+ *
+ */
+#ifdef CONFIG_CC_HAS_ASSUME
+# define __assume(expr) __attribute__((__assume__(expr)))
+#else
+# define __assume(expr)
+#endif
+
+/*
+ * Optional: only supported since gcc >= 15
+ * Optional: only supported since clang >= 18
+ *
+ * gcc: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=108896
+ * clang: https://github.com/llvm/llvm-project/pull/76348
+ *
+ * __bdos on clang < 19.1.2 can erroneously return 0:
+ * https://github.com/llvm/llvm-project/pull/110497
+ *
+ * __bdos on clang < 19.1.3 can be off by 4:
+ * https://github.com/llvm/llvm-project/pull/112636
+ */
+#ifdef CONFIG_CC_HAS_COUNTED_BY
+# define __counted_by(member) __attribute__((__counted_by__(member)))
+#else
+# define __counted_by(member)
+#endif
+
+/*
+ * Optional: only supported since gcc >= 15
+ * Optional: not supported by Clang
+ *
+ * gcc: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=117178
+ */
+#ifdef CONFIG_CC_HAS_MULTIDIMENSIONAL_NONSTRING
+# define __nonstring_array __attribute__((__nonstring__))
+#else
+# define __nonstring_array
+#endif
+
+/*
+ * Apply __counted_by() when the Endianness matches to increase test coverage.
+ */
+#ifdef __LITTLE_ENDIAN
+#define __counted_by_le(member) __counted_by(member)
+#define __counted_by_be(member)
+#else
+#define __counted_by_le(member)
+#define __counted_by_be(member) __counted_by(member)
+#endif
+
+/*
+ * This designates the minimum number of elements a passed array parameter must
+ * have. For example:
+ *
+ * void some_function(u8 param[at_least 7]);
+ *
+ * If a caller passes an array with fewer than 7 elements, the compiler will
+ * emit a warning.
+ */
+#ifndef __CHECKER__
+#define at_least static
+#else
+#define at_least
+#endif
+
+/* Do not trap wrapping arithmetic within an annotated function. */
+#ifdef CONFIG_UBSAN_INTEGER_WRAP
+# define __signed_wrap __attribute__((no_sanitize("signed-integer-overflow")))
+#else
+# define __signed_wrap
+#endif
+
+/* Section for code which can't be instrumented at all */
+#define __noinstr_section(section) \
+ noinline notrace __attribute((__section__(section))) \
+ __no_kcsan __no_sanitize_address __no_profile __no_sanitize_coverage \
+ __no_sanitize_memory __signed_wrap
+
+#define noinstr __noinstr_section(".noinstr.text")
+
+/*
+ * The __cpuidle section is used twofold:
+ *
+ * 1) the original use -- identifying if a CPU is 'stuck' in idle state based
+ * on it's instruction pointer. See cpu_in_idle().
+ *
+ * 2) supressing instrumentation around where cpuidle disables RCU; where the
+ * function isn't strictly required for #1, this is interchangeable with
+ * noinstr.
+ */
+#define __cpuidle __noinstr_section(".cpuidle.text")
+
+#endif /* __KERNEL__ */
+
+#endif /* __ASSEMBLY__ */
+
+/*
+ * The below symbols may be defined for one or more, but not ALL, of the above
+ * compilers. We don't consider that to be an error, so set them to nothing.
+ * For example, some of them are for compiler specific plugins.
+ */
+#ifndef __latent_entropy
+# define __latent_entropy
+#endif
+
+#if defined(RANDSTRUCT) && !defined(__CHECKER__)
+# define __randomize_layout __designated_init __attribute__((randomize_layout))
+# define __no_randomize_layout __attribute__((no_randomize_layout))
+/* This anon struct can add padding, so only enable it under randstruct. */
+# define randomized_struct_fields_start struct {
+# define randomized_struct_fields_end } __randomize_layout;
+#else
+# define __randomize_layout __designated_init
+# define __no_randomize_layout
+# define randomized_struct_fields_start
+# define randomized_struct_fields_end
+#endif
+
+#ifndef __no_kstack_erase
+# define __no_kstack_erase
+#endif
+
+#ifndef __noscs
+# define __noscs
+#endif
+
+#if defined(CONFIG_CFI)
+# define __nocfi __attribute__((__no_sanitize__("kcfi")))
+#else
+# define __nocfi
+#endif
+
+#if defined(CONFIG_ARCH_USES_CFI_GENERIC_LLVM_PASS)
+# define __nocfi_generic __nocfi
+#else
+# define __nocfi_generic
+#endif
+
+/*
+ * Any place that could be marked with the "alloc_size" attribute is also
+ * a place to be marked with the "malloc" attribute, except those that may
+ * be performing a _reallocation_, as that may alias the existing pointer.
+ * For these, use __realloc_size().
+ */
+#ifdef __alloc_size__
+# define __alloc_size(x, ...) __alloc_size__(x, ## __VA_ARGS__) __malloc
+# define __realloc_size(x, ...) __alloc_size__(x, ## __VA_ARGS__)
+#else
+# define __alloc_size(x, ...) __malloc
+# define __realloc_size(x, ...)
+#endif
+
+/*
+ * When the size of an allocated object is needed, use the best available
+ * mechanism to find it. (For cases where sizeof() cannot be used.)
+ *
+ * Optional: only supported since gcc >= 12
+ *
+ * gcc: https://gcc.gnu.org/onlinedocs/gcc/Object-Size-Checking.html
+ * clang: https://clang.llvm.org/docs/LanguageExtensions.html#evaluating-object-size
+ */
+#if __has_builtin(__builtin_dynamic_object_size)
+#define __struct_size(p) __builtin_dynamic_object_size(p, 0)
+#define __member_size(p) __builtin_dynamic_object_size(p, 1)
+#else
+#define __struct_size(p) __builtin_object_size(p, 0)
+#define __member_size(p) __builtin_object_size(p, 1)
+#endif
+
+/*
+ * Determine if an attribute has been applied to a variable.
+ * Using __annotated needs to check for __annotated being available,
+ * or negative tests may fail when annotation cannot be checked. For
+ * example, see the definition of __is_cstr().
+ */
+#if __has_builtin(__builtin_has_attribute)
+#define __annotated(var, attr) __builtin_has_attribute(var, attr)
+#endif
+
+/*
+ * Some versions of gcc do not mark 'asm goto' volatile:
+ *
+ * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=103979
+ *
+ * We do it here by hand, because it doesn't hurt.
+ */
+#ifndef asm_goto_output
+#define asm_goto_output(x...) asm volatile goto(x)
+#endif
+
+/*
+ * Clang has trouble with constraints with multiple
+ * alternative behaviors (mainly "g" and "rm").
+ */
+#ifndef ASM_INPUT_G
+ #define ASM_INPUT_G "g"
+ #define ASM_INPUT_RM "rm"
+#endif
+
+#ifdef CONFIG_CC_HAS_ASM_INLINE
+#define asm_inline asm __inline
+#else
+#define asm_inline asm
+#endif
+
+/* Are two types/vars the same type (ignoring qualifiers)? */
+#define __same_type(a, b) __builtin_types_compatible_p(typeof(a), typeof(b))
+
+/*
+ * __unqual_scalar_typeof(x) - Declare an unqualified scalar type, leaving
+ * non-scalar types unchanged.
+ */
+/*
+ * Prefer C11 _Generic for better compile-times and simpler code. Note: 'char'
+ * is not type-compatible with 'signed char', and we define a separate case.
+ */
+#define __scalar_type_to_expr_cases(type) \
+ unsigned type: (unsigned type)0, \
+ signed type: (signed type)0
+
+#define __unqual_scalar_typeof(x) typeof( \
+ _Generic((x), \
+ char: (char)0, \
+ __scalar_type_to_expr_cases(char), \
+ __scalar_type_to_expr_cases(short), \
+ __scalar_type_to_expr_cases(int), \
+ __scalar_type_to_expr_cases(long), \
+ __scalar_type_to_expr_cases(long long), \
+ default: (x)))
+
+/* Is this type a native word size -- useful for atomic operations */
+#define __native_word(t) \
+ (sizeof(t) == sizeof(char) || sizeof(t) == sizeof(short) || \
+ sizeof(t) == sizeof(int) || sizeof(t) == sizeof(long))
+
+#ifdef __OPTIMIZE__
+/*
+ * #ifdef __OPTIMIZE__ is only a good approximation; for instance "make
+ * CFLAGS_foo.o=-Og" defines __OPTIMIZE__, does not elide the conditional code
+ * and can break compilation with wrong error message(s). Combine with
+ * -U__OPTIMIZE__ when needed.
+ */
+# define __compiletime_assert(condition, msg, prefix, suffix) \
+ do { \
+ /* \
+ * __noreturn is needed to give the compiler enough \
+ * information to avoid certain possibly-uninitialized \
+ * warnings (regardless of the build failing). \
+ */ \
+ __noreturn extern void prefix ## suffix(void) \
+ __compiletime_error(msg); \
+ if (!(condition)) \
+ prefix ## suffix(); \
+ } while (0)
+#else
+# define __compiletime_assert(condition, msg, prefix, suffix) ((void)(condition))
+#endif
+
+#define _compiletime_assert(condition, msg, prefix, suffix) \
+ __compiletime_assert(condition, msg, prefix, suffix)
+
+/**
+ * compiletime_assert - break build and emit msg if condition is false
+ * @condition: a compile-time constant condition to check
+ * @msg: a message to emit if condition is false
+ *
+ * In tradition of POSIX assert, this macro will break the build if the
+ * supplied condition is *false*, emitting the supplied error message if the
+ * compiler has support to do so.
+ */
+#define compiletime_assert(condition, msg) \
+ _compiletime_assert(condition, msg, __compiletime_assert_, __COUNTER__)
+
+#define compiletime_assert_atomic_type(t) \
+ compiletime_assert(__native_word(t), \
+ "Need native word sized stores/loads for atomicity.")
+
+/* Helpers for emitting diagnostics in pragmas. */
+#ifndef __diag
+#define __diag(string)
+#endif
+
+#ifndef __diag_GCC
+#define __diag_GCC(version, severity, string)
+#endif
+
+#define __diag_push() __diag(push)
+#define __diag_pop() __diag(pop)
+
+#define __diag_ignore(compiler, version, option, comment) \
+ __diag_ ## compiler(version, ignore, option)
+#define __diag_warn(compiler, version, option, comment) \
+ __diag_ ## compiler(version, warn, option)
+#define __diag_error(compiler, version, option, comment) \
+ __diag_ ## compiler(version, error, option)
+
+#ifndef __diag_ignore_all
+#define __diag_ignore_all(option, comment)
+#endif
+
+#endif /* __LINUX_COMPILER_TYPES_H */
diff --git a/include/linux/completion.h b/include/linux/completion.h
index cae5400022a3..fb2915676574 100644
--- a/include/linux/completion.h
+++ b/include/linux/completion.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __LINUX_COMPLETION_H
#define __LINUX_COMPLETION_H
@@ -8,10 +9,7 @@
* See kernel/sched/completion.c for details.
*/
-#include <linux/wait.h>
-#ifdef CONFIG_LOCKDEP_COMPLETIONS
-#include <linux/lockdep.h>
-#endif
+#include <linux/swait.h>
/*
* struct completion - structure used to maintain state for a "completion"
@@ -27,51 +25,18 @@
*/
struct completion {
unsigned int done;
- wait_queue_head_t wait;
-#ifdef CONFIG_LOCKDEP_COMPLETIONS
- struct lockdep_map_cross map;
-#endif
+ struct swait_queue_head wait;
};
-#ifdef CONFIG_LOCKDEP_COMPLETIONS
-static inline void complete_acquire(struct completion *x)
-{
- lock_acquire_exclusive((struct lockdep_map *)&x->map, 0, 0, NULL, _RET_IP_);
-}
-
-static inline void complete_release(struct completion *x)
-{
- lock_release((struct lockdep_map *)&x->map, 0, _RET_IP_);
-}
-
-static inline void complete_release_commit(struct completion *x)
-{
- lock_commit_crosslock((struct lockdep_map *)&x->map);
-}
-
-#define init_completion(x) \
-do { \
- static struct lock_class_key __key; \
- lockdep_init_map_crosslock((struct lockdep_map *)&(x)->map, \
- "(complete)" #x, \
- &__key, 0); \
- __init_completion(x); \
-} while (0)
-#else
-#define init_completion(x) __init_completion(x)
+#define init_completion_map(x, m) init_completion(x)
static inline void complete_acquire(struct completion *x) {}
static inline void complete_release(struct completion *x) {}
-static inline void complete_release_commit(struct completion *x) {}
-#endif
-#ifdef CONFIG_LOCKDEP_COMPLETIONS
#define COMPLETION_INITIALIZER(work) \
- { 0, __WAIT_QUEUE_HEAD_INITIALIZER((work).wait), \
- STATIC_CROSS_LOCKDEP_MAP_INIT("(complete)" #work, &(work)) }
-#else
-#define COMPLETION_INITIALIZER(work) \
- { 0, __WAIT_QUEUE_HEAD_INITIALIZER((work).wait) }
-#endif
+ { 0, __SWAIT_QUEUE_HEAD_INITIALIZER((work).wait) }
+
+#define COMPLETION_INITIALIZER_ONSTACK_MAP(work, map) \
+ (*({ init_completion_map(&(work), &(map)); &(work); }))
#define COMPLETION_INITIALIZER_ONSTACK(work) \
(*({ init_completion(&work); &work; }))
@@ -102,8 +67,11 @@ static inline void complete_release_commit(struct completion *x) {}
#ifdef CONFIG_LOCKDEP
# define DECLARE_COMPLETION_ONSTACK(work) \
struct completion work = COMPLETION_INITIALIZER_ONSTACK(work)
+# define DECLARE_COMPLETION_ONSTACK_MAP(work, map) \
+ struct completion work = COMPLETION_INITIALIZER_ONSTACK_MAP(work, map)
#else
# define DECLARE_COMPLETION_ONSTACK(work) DECLARE_COMPLETION(work)
+# define DECLARE_COMPLETION_ONSTACK_MAP(work, map) DECLARE_COMPLETION(work)
#endif
/**
@@ -113,10 +81,10 @@ static inline void complete_release_commit(struct completion *x) {}
* This inline function will initialize a dynamically created completion
* structure.
*/
-static inline void __init_completion(struct completion *x)
+static inline void init_completion(struct completion *x)
{
x->done = 0;
- init_waitqueue_head(&x->wait);
+ init_swait_queue_head(&x->wait);
}
/**
@@ -135,6 +103,7 @@ extern void wait_for_completion(struct completion *);
extern void wait_for_completion_io(struct completion *);
extern int wait_for_completion_interruptible(struct completion *x);
extern int wait_for_completion_killable(struct completion *x);
+extern int wait_for_completion_state(struct completion *x, unsigned int state);
extern unsigned long wait_for_completion_timeout(struct completion *x,
unsigned long timeout);
extern unsigned long wait_for_completion_io_timeout(struct completion *x,
@@ -147,6 +116,7 @@ extern bool try_wait_for_completion(struct completion *x);
extern bool completion_done(struct completion *x);
extern void complete(struct completion *);
+extern void complete_on_current_cpu(struct completion *x);
extern void complete_all(struct completion *);
#endif
diff --git a/include/linux/component.h b/include/linux/component.h
index a559eebc0e0f..9d6c66401280 100644
--- a/include/linux/component.h
+++ b/include/linux/component.h
@@ -1,47 +1,132 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef COMPONENT_H
#define COMPONENT_H
#include <linux/stddef.h>
+#include <linux/types.h>
struct device;
+/**
+ * struct component_ops - callbacks for component drivers
+ *
+ * Components are registered with component_add() and unregistered with
+ * component_del().
+ */
struct component_ops {
+ /**
+ * @bind:
+ *
+ * Called through component_bind_all() when the aggregate driver is
+ * ready to bind the overall driver.
+ */
int (*bind)(struct device *comp, struct device *master,
void *master_data);
+ /**
+ * @unbind:
+ *
+ * Called through component_unbind_all() when the aggregate driver is
+ * ready to bind the overall driver, or when component_bind_all() fails
+ * part-ways through and needs to unbind some already bound components.
+ */
void (*unbind)(struct device *comp, struct device *master,
void *master_data);
};
int component_add(struct device *, const struct component_ops *);
+int component_add_typed(struct device *dev, const struct component_ops *ops,
+ int subcomponent);
void component_del(struct device *, const struct component_ops *);
-int component_bind_all(struct device *master, void *master_data);
-void component_unbind_all(struct device *master, void *master_data);
+int component_bind_all(struct device *parent, void *data);
+void component_unbind_all(struct device *parent, void *data);
-struct master;
+struct aggregate_device;
+/**
+ * struct component_master_ops - callback for the aggregate driver
+ *
+ * Aggregate drivers are registered with component_master_add_with_match() and
+ * unregistered with component_master_del().
+ */
struct component_master_ops {
+ /**
+ * @bind:
+ *
+ * Called when all components or the aggregate driver, as specified in
+ * the match list passed to component_master_add_with_match(), are
+ * ready. Usually there are 3 steps to bind an aggregate driver:
+ *
+ * 1. Allocate a structure for the aggregate driver.
+ *
+ * 2. Bind all components to the aggregate driver by calling
+ * component_bind_all() with the aggregate driver structure as opaque
+ * pointer data.
+ *
+ * 3. Register the aggregate driver with the subsystem to publish its
+ * interfaces.
+ *
+ * Note that the lifetime of the aggregate driver does not align with
+ * any of the underlying &struct device instances. Therefore devm cannot
+ * be used and all resources acquired or allocated in this callback must
+ * be explicitly released in the @unbind callback.
+ */
int (*bind)(struct device *master);
+ /**
+ * @unbind:
+ *
+ * Called when either the aggregate driver, using
+ * component_master_del(), or one of its components, using
+ * component_del(), is unregistered.
+ */
void (*unbind)(struct device *master);
};
+/* A set helper functions for component compare/release */
+int component_compare_of(struct device *dev, void *data);
+void component_release_of(struct device *dev, void *data);
+int component_compare_dev(struct device *dev, void *data);
+int component_compare_dev_name(struct device *dev, void *data);
+
void component_master_del(struct device *,
const struct component_master_ops *);
+bool component_master_is_bound(struct device *parent,
+ const struct component_master_ops *ops);
struct component_match;
int component_master_add_with_match(struct device *,
const struct component_master_ops *, struct component_match *);
-void component_match_add_release(struct device *master,
+void component_match_add_release(struct device *parent,
struct component_match **matchptr,
void (*release)(struct device *, void *),
int (*compare)(struct device *, void *), void *compare_data);
+void component_match_add_typed(struct device *parent,
+ struct component_match **matchptr,
+ int (*compare_typed)(struct device *, int, void *), void *compare_data);
-static inline void component_match_add(struct device *master,
+/**
+ * component_match_add - add a component match entry
+ * @parent: device with the aggregate driver
+ * @matchptr: pointer to the list of component matches
+ * @compare: compare function to match against all components
+ * @compare_data: opaque pointer passed to the @compare function
+ *
+ * Adds a new component match to the list stored in @matchptr, which the @parent
+ * aggregate driver needs to function. The list of component matches pointed to
+ * by @matchptr must be initialized to NULL before adding the first match. This
+ * only matches against components added with component_add().
+ *
+ * The allocated match list in @matchptr is automatically released using devm
+ * actions.
+ *
+ * See also component_match_add_release() and component_match_add_typed().
+ */
+static inline void component_match_add(struct device *parent,
struct component_match **matchptr,
int (*compare)(struct device *, void *), void *compare_data)
{
- component_match_add_release(master, matchptr, NULL, compare,
+ component_match_add_release(parent, matchptr, NULL, compare,
compare_data);
}
diff --git a/include/linux/concap.h b/include/linux/concap.h
deleted file mode 100644
index 977acb3d1fb2..000000000000
--- a/include/linux/concap.h
+++ /dev/null
@@ -1,112 +0,0 @@
-/* $Id: concap.h,v 1.3.2.2 2004/01/12 23:08:35 keil Exp $
- *
- * Copyright 1997 by Henner Eisen <eis@baty.hanse.de>
- *
- * This software may be used and distributed according to the terms
- * of the GNU General Public License, incorporated herein by reference.
- */
-
-#ifndef _LINUX_CONCAP_H
-#define _LINUX_CONCAP_H
-
-#include <linux/skbuff.h>
-#include <linux/netdevice.h>
-
-/* Stuff to support encapsulation protocols genericly. The encapsulation
- protocol is processed at the uppermost layer of the network interface.
-
- Based on a ideas developed in a 'synchronous device' thread in the
- linux-x25 mailing list contributed by Alan Cox, Thomasz Motylewski
- and Jonathan Naylor.
-
- For more documetation on this refer to Documentation/isdn/README.concap
-*/
-
-struct concap_proto_ops;
-struct concap_device_ops;
-
-/* this manages all data needed by the encapsulation protocol
- */
-struct concap_proto{
- struct net_device *net_dev; /* net device using our service */
- struct concap_device_ops *dops; /* callbacks provided by device */
- struct concap_proto_ops *pops; /* callbacks provided by us */
- spinlock_t lock;
- int flags;
- void *proto_data; /* protocol specific private data, to
- be accessed via *pops methods only*/
- /*
- :
- whatever
- :
- */
-};
-
-/* Operations to be supported by the net device. Called by the encapsulation
- * protocol entity. No receive method is offered because the encapsulation
- * protocol directly calls netif_rx().
- */
-struct concap_device_ops{
-
- /* to request data is submitted by device*/
- int (*data_req)(struct concap_proto *, struct sk_buff *);
-
- /* Control methods must be set to NULL by devices which do not
- support connection control.*/
- /* to request a connection is set up */
- int (*connect_req)(struct concap_proto *);
-
- /* to request a connection is released */
- int (*disconn_req)(struct concap_proto *);
-};
-
-/* Operations to be supported by the encapsulation protocol. Called by
- * device driver.
- */
-struct concap_proto_ops{
-
- /* create a new encapsulation protocol instance of same type */
- struct concap_proto * (*proto_new) (void);
-
- /* delete encapsulation protocol instance and free all its resources.
- cprot may no loger be referenced after calling this */
- void (*proto_del)(struct concap_proto *cprot);
-
- /* initialize the protocol's data. To be called at interface startup
- or when the device driver resets the interface. All services of the
- encapsulation protocol may be used after this*/
- int (*restart)(struct concap_proto *cprot,
- struct net_device *ndev,
- struct concap_device_ops *dops);
-
- /* inactivate an encapsulation protocol instance. The encapsulation
- protocol may not call any *dops methods after this. */
- int (*close)(struct concap_proto *cprot);
-
- /* process a frame handed down to us by upper layer */
- int (*encap_and_xmit)(struct concap_proto *cprot, struct sk_buff *skb);
-
- /* to be called for each data entity received from lower layer*/
- int (*data_ind)(struct concap_proto *cprot, struct sk_buff *skb);
-
- /* to be called when a connection was set up/down.
- Protocols that don't process these primitives might fill in
- dummy methods here */
- int (*connect_ind)(struct concap_proto *cprot);
- int (*disconn_ind)(struct concap_proto *cprot);
- /*
- Some network device support functions, like net_header(), rebuild_header(),
- and others, that depend solely on the encapsulation protocol, might
- be provided here, too. The net device would just fill them in its
- corresponding fields when it is opened.
- */
-};
-
-/* dummy restart/close/connect/reset/disconn methods
- */
-extern int concap_nop(struct concap_proto *cprot);
-
-/* dummy submit method
- */
-extern int concap_drop_skb(struct concap_proto *cprot, struct sk_buff *skb);
-#endif
diff --git a/include/linux/configfs.h b/include/linux/configfs.h
index c96709049683..ef65c75beeaa 100644
--- a/include/linux/configfs.h
+++ b/include/linux/configfs.h
@@ -1,23 +1,7 @@
-/* -*- mode: c; c-basic-offset: 8; -*-
- * vim: noexpandtab sw=8 ts=8 sts=0:
- *
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
* configfs.h - definitions for the device driver filesystem
*
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public
- * License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public
- * License along with this program; if not, write to the
- * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
- * Boston, MA 021110-1307, USA.
- *
* Based on sysfs:
* sysfs is Copyright (C) 2001, 2002, 2003 Patrick Mochel
*
@@ -27,7 +11,7 @@
*
* configfs Copyright (C) 2005 Oracle. All rights reserved.
*
- * Please read Documentation/filesystems/configfs/configfs.txt before using
+ * Please read Documentation/filesystems/configfs.rst before using
* the configfs interface, ESPECIALLY the parts about reference counts and
* item destructors.
*/
@@ -58,7 +42,7 @@ struct config_item {
struct list_head ci_entry;
struct config_item *ci_parent;
struct config_group *ci_group;
- struct config_item_type *ci_type;
+ const struct config_item_type *ci_type;
struct dentry *ci_dentry;
};
@@ -72,7 +56,7 @@ static inline char *config_item_name(struct config_item * item)
extern void config_item_init_type_name(struct config_item *item,
const char *name,
- struct config_item_type *type);
+ const struct config_item_type *type);
extern struct config_item *config_item_get(struct config_item *);
extern struct config_item *config_item_get_unless_zero(struct config_item *);
@@ -80,8 +64,8 @@ extern void config_item_put(struct config_item *);
struct config_item_type {
struct module *ct_owner;
- struct configfs_item_operations *ct_item_ops;
- struct configfs_group_operations *ct_group_ops;
+ const struct configfs_item_operations *ct_item_ops;
+ const struct configfs_group_operations *ct_group_ops;
struct configfs_attribute **ct_attrs;
struct configfs_bin_attribute **ct_bin_attrs;
};
@@ -101,7 +85,7 @@ struct config_group {
extern void config_group_init(struct config_group *group);
extern void config_group_init_type_name(struct config_group *group,
const char *name,
- struct config_item_type *type);
+ const struct config_item_type *type);
static inline struct config_group *to_config_group(struct config_item *item)
{
@@ -136,15 +120,19 @@ struct configfs_attribute {
ssize_t (*store)(struct config_item *, const char *, size_t);
};
-#define CONFIGFS_ATTR(_pfx, _name) \
+#define CONFIGFS_ATTR_PERM(_pfx, _name, _perm) \
static struct configfs_attribute _pfx##attr_##_name = { \
.ca_name = __stringify(_name), \
- .ca_mode = S_IRUGO | S_IWUSR, \
+ .ca_mode = _perm, \
.ca_owner = THIS_MODULE, \
.show = _pfx##_name##_show, \
.store = _pfx##_name##_store, \
}
+#define CONFIGFS_ATTR(_pfx, _name) CONFIGFS_ATTR_PERM( \
+ _pfx, _name, S_IRUGO | S_IWUSR \
+)
+
#define CONFIGFS_ATTR_RO(_pfx, _name) \
static struct configfs_attribute _pfx##attr_##_name = { \
.ca_name = __stringify(_name), \
@@ -220,8 +208,6 @@ static struct configfs_bin_attribute _pfx##attr_##_name = { \
* group children. default_groups may coexist alongsize make_group() or
* make_item(), but if the group wishes to have only default_groups
* children (disallowing mkdir(2)), it need not provide either function.
- * If the group has commit(), it supports pending and committed (active)
- * items.
*/
struct configfs_item_operations {
void (*release)(struct config_item *);
@@ -232,9 +218,11 @@ struct configfs_item_operations {
struct configfs_group_operations {
struct config_item *(*make_item)(struct config_group *group, const char *name);
struct config_group *(*make_group)(struct config_group *group, const char *name);
- int (*commit_item)(struct config_item *item);
void (*disconnect_notify)(struct config_group *group, struct config_item *item);
void (*drop_item)(struct config_group *group, struct config_item *item);
+ bool (*is_visible)(struct config_item *item, struct configfs_attribute *attr, int n);
+ bool (*is_bin_visible)(struct config_item *item, struct configfs_bin_attribute *attr,
+ int n);
};
struct configfs_subsystem {
@@ -261,7 +249,7 @@ void configfs_remove_default_groups(struct config_group *group);
struct config_group *
configfs_register_default_group(struct config_group *parent_group,
const char *name,
- struct config_item_type *item_type);
+ const struct config_item_type *item_type);
void configfs_unregister_default_group(struct config_group *group);
/* These functions can sleep and can alloc with GFP_KERNEL */
diff --git a/include/linux/connector.h b/include/linux/connector.h
index f8fe8637d771..70bc1160f3d8 100644
--- a/include/linux/connector.h
+++ b/include/linux/connector.h
@@ -1,28 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* connector.h
*
* 2004-2005 Copyright (c) Evgeniy Polyakov <zbr@ioremap.net>
* All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#ifndef __CONNECTOR_H
#define __CONNECTOR_H
-#include <linux/atomic.h>
+#include <linux/refcount.h>
#include <linux/list.h>
#include <linux/workqueue.h>
@@ -49,7 +36,7 @@ struct cn_callback_id {
struct cn_callback_entry {
struct list_head callback_entry;
- atomic_t refcnt;
+ refcount_t refcnt;
struct cn_queue_dev *pdev;
struct cn_callback_id id;
@@ -63,26 +50,91 @@ struct cn_dev {
u32 seq, groups;
struct sock *nls;
- void (*input) (struct sk_buff *skb);
struct cn_queue_dev *cbdev;
};
-int cn_add_callback(struct cb_id *id, const char *name,
+/**
+ * cn_add_callback() - Registers new callback with connector core.
+ *
+ * @id: unique connector's user identifier.
+ * It must be registered in connector.h for legal
+ * in-kernel users.
+ * @name: connector's callback symbolic name.
+ * @callback: connector's callback.
+ * parameters are %cn_msg and the sender's credentials
+ */
+int cn_add_callback(const struct cb_id *id, const char *name,
void (*callback)(struct cn_msg *, struct netlink_skb_parms *));
-void cn_del_callback(struct cb_id *);
-int cn_netlink_send_mult(struct cn_msg *msg, u16 len, u32 portid, u32 group, gfp_t gfp_mask);
+/**
+ * cn_del_callback() - Unregisters new callback with connector core.
+ *
+ * @id: unique connector's user identifier.
+ */
+void cn_del_callback(const struct cb_id *id);
+
+
+/**
+ * cn_netlink_send_mult - Sends message to the specified groups.
+ *
+ * @msg: message header(with attached data).
+ * @len: Number of @msg to be sent.
+ * @portid: destination port.
+ * If non-zero the message will be sent to the given port,
+ * which should be set to the original sender.
+ * @group: destination group.
+ * If @portid and @group is zero, then appropriate group will
+ * be searched through all registered connector users, and
+ * message will be delivered to the group which was created
+ * for user with the same ID as in @msg.
+ * If @group is not zero, then message will be delivered
+ * to the specified group.
+ * @gfp_mask: GFP mask.
+ * @filter: Filter function to be used at netlink layer.
+ * @filter_data:Filter data to be supplied to the filter function
+ *
+ * It can be safely called from softirq context, but may silently
+ * fail under strong memory pressure.
+ *
+ * If there are no listeners for given group %-ESRCH can be returned.
+ */
+int cn_netlink_send_mult(struct cn_msg *msg, u16 len, u32 portid,
+ u32 group, gfp_t gfp_mask,
+ netlink_filter_fn filter,
+ void *filter_data);
+
+/**
+ * cn_netlink_send - Sends message to the specified groups.
+ *
+ * @msg: message header(with attached data).
+ * @portid: destination port.
+ * If non-zero the message will be sent to the given port,
+ * which should be set to the original sender.
+ * @group: destination group.
+ * If @portid and @group is zero, then appropriate group will
+ * be searched through all registered connector users, and
+ * message will be delivered to the group which was created
+ * for user with the same ID as in @msg.
+ * If @group is not zero, then message will be delivered
+ * to the specified group.
+ * @gfp_mask: GFP mask.
+ *
+ * It can be safely called from softirq context, but may silently
+ * fail under strong memory pressure.
+ *
+ * If there are no listeners for given group %-ESRCH can be returned.
+ */
int cn_netlink_send(struct cn_msg *msg, u32 portid, u32 group, gfp_t gfp_mask);
int cn_queue_add_callback(struct cn_queue_dev *dev, const char *name,
- struct cb_id *id,
+ const struct cb_id *id,
void (*callback)(struct cn_msg *, struct netlink_skb_parms *));
-void cn_queue_del_callback(struct cn_queue_dev *dev, struct cb_id *id);
+void cn_queue_del_callback(struct cn_queue_dev *dev, const struct cb_id *id);
void cn_queue_release_callback(struct cn_callback_entry *);
struct cn_queue_dev *cn_queue_alloc_dev(const char *name, struct sock *);
void cn_queue_free_dev(struct cn_queue_dev *dev);
-int cn_cb_equal(struct cb_id *, struct cb_id *);
+int cn_cb_equal(const struct cb_id *, const struct cb_id *);
#endif /* __CONNECTOR_H */
diff --git a/include/linux/console.h b/include/linux/console.h
index b8920a031a3e..fc9f5c5c1b04 100644
--- a/include/linux/console.h
+++ b/include/linux/console.h
@@ -14,78 +14,117 @@
#ifndef _LINUX_CONSOLE_H_
#define _LINUX_CONSOLE_H_ 1
+#include <linux/atomic.h>
+#include <linux/bits.h>
+#include <linux/irq_work.h>
+#include <linux/rculist.h>
+#include <linux/rcuwait.h>
+#include <linux/smp.h>
#include <linux/types.h>
+#include <linux/vesa.h>
struct vc_data;
struct console_font_op;
struct console_font;
struct module;
struct tty_struct;
-
-/*
- * this is what the terminal answers to a ESC-Z or csi0c query.
- */
-#define VT100ID "\033[?1;2c"
-#define VT102ID "\033[?6c"
+struct notifier_block;
enum con_scroll {
SM_UP,
SM_DOWN,
};
+enum vc_intensity;
+
/**
* struct consw - callbacks for consoles
*
+ * @owner: the module to get references of when this console is used
+ * @con_startup: set up the console and return its name (like VGA, EGA, ...)
+ * @con_init: initialize the console on @vc. @init is true for the very first
+ * call on this @vc.
+ * @con_deinit: deinitialize the console from @vc.
+ * @con_clear: erase @count characters at [@x, @y] on @vc. @count >= 1.
+ * @con_putc: emit one character with attributes @ca to [@x, @y] on @vc.
+ * (optional -- @con_putcs would be called instead)
+ * @con_putcs: emit @count characters with attributes @s to [@x, @y] on @vc.
+ * @con_cursor: enable/disable cursor depending on @enable
* @con_scroll: move lines from @top to @bottom in direction @dir by @lines.
* Return true if no generic handling should be done.
* Invoked by csi_M and printing to the console.
- * @con_set_palette: sets the palette of the console to @table (optional)
+ * @con_switch: notifier about the console switch; it is supposed to return
+ * true if a redraw is needed.
+ * @con_blank: blank/unblank the console. The target mode is passed in @blank.
+ * @mode_switch is set if changing from/to text/graphics. The hook
+ * is supposed to return true if a redraw is needed.
+ * @con_font_set: set console @vc font to @font with height @vpitch. @flags can
+ * be %KD_FONT_FLAG_DONT_RECALC. (optional)
+ * @con_font_get: fetch the current font on @vc of height @vpitch into @font.
+ * (optional)
+ * @con_font_default: set default font on @vc. @name can be %NULL or font name
+ * to search for. @font can be filled back. (optional)
+ * @con_resize: resize the @vc console to @width x @height. @from_user is true
+ * when this change comes from the user space.
+ * @con_set_palette: sets the palette of the console @vc to @table (optional)
* @con_scrolldelta: the contents of the console should be scrolled by @lines.
* Invoked by user. (optional)
+ * @con_set_origin: set origin (see &vc_data::vc_origin) of the @vc. If not
+ * provided or returns false, the origin is set to
+ * @vc->vc_screenbuf. (optional)
+ * @con_save_screen: save screen content into @vc->vc_screenbuf. Called e.g.
+ * upon entering graphics. (optional)
+ * @con_build_attr: build attributes based on @color, @intensity and other
+ * parameters. The result is used for both normal and erase
+ * characters. (optional)
+ * @con_invert_region: invert a region of length @count on @vc starting at @p.
+ * (optional)
+ * @con_debug_enter: prepare the console for the debugger. This includes, but
+ * is not limited to, unblanking the console, loading an
+ * appropriate palette, and allowing debugger generated output.
+ * (optional)
+ * @con_debug_leave: restore the console to its pre-debug state as closely as
+ * possible. (optional)
*/
struct consw {
struct module *owner;
const char *(*con_startup)(void);
- void (*con_init)(struct vc_data *, int);
- void (*con_deinit)(struct vc_data *);
- void (*con_clear)(struct vc_data *, int, int, int, int);
- void (*con_putc)(struct vc_data *, int, int, int);
- void (*con_putcs)(struct vc_data *, const unsigned short *, int, int, int);
- void (*con_cursor)(struct vc_data *, int);
- bool (*con_scroll)(struct vc_data *, unsigned int top,
+ void (*con_init)(struct vc_data *vc, bool init);
+ void (*con_deinit)(struct vc_data *vc);
+ void (*con_clear)(struct vc_data *vc, unsigned int y,
+ unsigned int x, unsigned int count);
+ void (*con_putc)(struct vc_data *vc, u16 ca, unsigned int y,
+ unsigned int x);
+ void (*con_putcs)(struct vc_data *vc, const u16 *s,
+ unsigned int count, unsigned int ypos,
+ unsigned int xpos);
+ void (*con_cursor)(struct vc_data *vc, bool enable);
+ bool (*con_scroll)(struct vc_data *vc, unsigned int top,
unsigned int bottom, enum con_scroll dir,
unsigned int lines);
- int (*con_switch)(struct vc_data *);
- int (*con_blank)(struct vc_data *, int, int);
- int (*con_font_set)(struct vc_data *, struct console_font *, unsigned);
- int (*con_font_get)(struct vc_data *, struct console_font *);
- int (*con_font_default)(struct vc_data *, struct console_font *, char *);
- int (*con_font_copy)(struct vc_data *, int);
- int (*con_resize)(struct vc_data *, unsigned int, unsigned int,
- unsigned int);
- void (*con_set_palette)(struct vc_data *,
+ bool (*con_switch)(struct vc_data *vc);
+ bool (*con_blank)(struct vc_data *vc, enum vesa_blank_mode blank,
+ bool mode_switch);
+ int (*con_font_set)(struct vc_data *vc,
+ const struct console_font *font,
+ unsigned int vpitch, unsigned int flags);
+ int (*con_font_get)(struct vc_data *vc, struct console_font *font,
+ unsigned int vpitch);
+ int (*con_font_default)(struct vc_data *vc,
+ struct console_font *font, const char *name);
+ int (*con_resize)(struct vc_data *vc, unsigned int width,
+ unsigned int height, bool from_user);
+ void (*con_set_palette)(struct vc_data *vc,
const unsigned char *table);
- void (*con_scrolldelta)(struct vc_data *, int lines);
- int (*con_set_origin)(struct vc_data *);
- void (*con_save_screen)(struct vc_data *);
- u8 (*con_build_attr)(struct vc_data *, u8, u8, u8, u8, u8, u8);
- void (*con_invert_region)(struct vc_data *, u16 *, int);
- u16 *(*con_screen_pos)(struct vc_data *, int);
- unsigned long (*con_getxy)(struct vc_data *, unsigned long, int *, int *);
- /*
- * Flush the video console driver's scrollback buffer
- */
- void (*con_flush_scrollback)(struct vc_data *);
- /*
- * Prepare the console for the debugger. This includes, but is not
- * limited to, unblanking the console, loading an appropriate
- * palette, and allowing debugger generated output.
- */
- int (*con_debug_enter)(struct vc_data *);
- /*
- * Restore the console to its pre-debug state as closely as possible.
- */
- int (*con_debug_leave)(struct vc_data *);
+ void (*con_scrolldelta)(struct vc_data *vc, int lines);
+ bool (*con_set_origin)(struct vc_data *vc);
+ void (*con_save_screen)(struct vc_data *vc);
+ u8 (*con_build_attr)(struct vc_data *vc, u8 color,
+ enum vc_intensity intensity,
+ bool blink, bool underline, bool reverse, bool italic);
+ void (*con_invert_region)(struct vc_data *vc, u16 *p, int count);
+ void (*con_debug_enter)(struct vc_data *vc);
+ void (*con_debug_leave)(struct vc_data *vc);
};
extern const struct consw *conswitchp;
@@ -93,84 +132,577 @@ extern const struct consw *conswitchp;
extern const struct consw dummy_con; /* dummy console buffer */
extern const struct consw vga_con; /* VGA text console */
extern const struct consw newport_con; /* SGI Newport console */
-extern const struct consw prom_con; /* SPARC PROM console */
+
+struct screen_info;
+#ifdef CONFIG_VGA_CONSOLE
+void vgacon_register_screen(struct screen_info *si);
+#else
+static inline void vgacon_register_screen(struct screen_info *si) { }
+#endif
int con_is_bound(const struct consw *csw);
int do_unregister_con_driver(const struct consw *csw);
int do_take_over_console(const struct consw *sw, int first, int last, int deflt);
void give_up_console(const struct consw *sw);
-#ifdef CONFIG_HW_CONSOLE
-int con_debug_enter(struct vc_data *vc);
-int con_debug_leave(void);
+#ifdef CONFIG_VT
+void con_debug_enter(struct vc_data *vc);
+void con_debug_leave(void);
+#else
+static inline void con_debug_enter(struct vc_data *vc) { }
+static inline void con_debug_leave(void) { }
+#endif
+
+/*
+ * The interface for a console, or any other device that wants to capture
+ * console messages (printer driver?)
+ */
+
+/**
+ * enum cons_flags - General console flags
+ * @CON_PRINTBUFFER: Used by newly registered consoles to avoid duplicate
+ * output of messages that were already shown by boot
+ * consoles or read by userspace via syslog() syscall.
+ * @CON_CONSDEV: Indicates that the console driver is backing
+ * /dev/console.
+ * @CON_ENABLED: Indicates if a console is allowed to print records. If
+ * false, the console also will not advance to later
+ * records.
+ * @CON_BOOT: Marks the console driver as early console driver which
+ * is used during boot before the real driver becomes
+ * available. It will be automatically unregistered
+ * when the real console driver is registered unless
+ * "keep_bootcon" parameter is used.
+ * @CON_ANYTIME: A misnomed historical flag which tells the core code
+ * that the legacy @console::write callback can be invoked
+ * on a CPU which is marked OFFLINE. That is misleading as
+ * it suggests that there is no contextual limit for
+ * invoking the callback. The original motivation was
+ * readiness of the per-CPU areas.
+ * @CON_BRL: Indicates a braille device which is exempt from
+ * receiving the printk spam for obvious reasons.
+ * @CON_EXTENDED: The console supports the extended output format of
+ * /dev/kmesg which requires a larger output buffer.
+ * @CON_SUSPENDED: Indicates if a console is suspended. If true, the
+ * printing callbacks must not be called.
+ * @CON_NBCON: Console can operate outside of the legacy style console_lock
+ * constraints.
+ * @CON_NBCON_ATOMIC_UNSAFE: The write_atomic() callback is not safe and is
+ * therefore only used by nbcon_atomic_flush_unsafe().
+ */
+enum cons_flags {
+ CON_PRINTBUFFER = BIT(0),
+ CON_CONSDEV = BIT(1),
+ CON_ENABLED = BIT(2),
+ CON_BOOT = BIT(3),
+ CON_ANYTIME = BIT(4),
+ CON_BRL = BIT(5),
+ CON_EXTENDED = BIT(6),
+ CON_SUSPENDED = BIT(7),
+ CON_NBCON = BIT(8),
+ CON_NBCON_ATOMIC_UNSAFE = BIT(9),
+};
+
+/**
+ * struct nbcon_state - console state for nbcon consoles
+ * @atom: Compound of the state fields for atomic operations
+ *
+ * @req_prio: The priority of a handover request
+ * @prio: The priority of the current owner
+ * @unsafe: Console is busy in a non takeover region
+ * @unsafe_takeover: A hostile takeover in an unsafe state happened in the
+ * past. The console cannot be safe until re-initialized.
+ * @cpu: The CPU on which the owner runs
+ *
+ * To be used for reading and preparing of the value stored in the nbcon
+ * state variable @console::nbcon_state.
+ *
+ * The @prio and @req_prio fields are particularly important to allow
+ * spin-waiting to timeout and give up without the risk of a waiter being
+ * assigned the lock after giving up.
+ */
+struct nbcon_state {
+ union {
+ unsigned int atom;
+ struct {
+ unsigned int prio : 2;
+ unsigned int req_prio : 2;
+ unsigned int unsafe : 1;
+ unsigned int unsafe_takeover : 1;
+ unsigned int cpu : 24;
+ };
+ };
+};
+
+/*
+ * The nbcon_state struct is used to easily create and interpret values that
+ * are stored in the @console::nbcon_state variable. Ensure this struct stays
+ * within the size boundaries of the atomic variable's underlying type in
+ * order to avoid any accidental truncation.
+ */
+static_assert(sizeof(struct nbcon_state) <= sizeof(int));
+
+/**
+ * enum nbcon_prio - console owner priority for nbcon consoles
+ * @NBCON_PRIO_NONE: Unused
+ * @NBCON_PRIO_NORMAL: Normal (non-emergency) usage
+ * @NBCON_PRIO_EMERGENCY: Emergency output (WARN/OOPS...)
+ * @NBCON_PRIO_PANIC: Panic output
+ * @NBCON_PRIO_MAX: The number of priority levels
+ *
+ * A higher priority context can takeover the console when it is
+ * in the safe state. The final attempt to flush consoles in panic()
+ * can be allowed to do so even in an unsafe state (Hope and pray).
+ */
+enum nbcon_prio {
+ NBCON_PRIO_NONE = 0,
+ NBCON_PRIO_NORMAL,
+ NBCON_PRIO_EMERGENCY,
+ NBCON_PRIO_PANIC,
+ NBCON_PRIO_MAX,
+};
+
+struct console;
+struct printk_buffers;
+
+/**
+ * struct nbcon_context - Context for console acquire/release
+ * @console: The associated console
+ * @spinwait_max_us: Limit for spin-wait acquire
+ * @prio: Priority of the context
+ * @allow_unsafe_takeover: Allow performing takeover even if unsafe. Can
+ * be used only with NBCON_PRIO_PANIC @prio. It
+ * might cause a system freeze when the console
+ * is used later.
+ * @backlog: Ringbuffer has pending records
+ * @pbufs: Pointer to the text buffer for this context
+ * @seq: The sequence number to print for this context
+ */
+struct nbcon_context {
+ /* members set by caller */
+ struct console *console;
+ unsigned int spinwait_max_us;
+ enum nbcon_prio prio;
+ unsigned int allow_unsafe_takeover : 1;
+
+ /* members set by emit */
+ unsigned int backlog : 1;
+
+ /* members set by acquire */
+ struct printk_buffers *pbufs;
+ u64 seq;
+};
+
+/**
+ * struct nbcon_write_context - Context handed to the nbcon write callbacks
+ * @ctxt: The core console context
+ * @outbuf: Pointer to the text buffer for output
+ * @len: Length to write
+ * @unsafe_takeover: If a hostile takeover in an unsafe state has occurred
+ */
+struct nbcon_write_context {
+ struct nbcon_context __private ctxt;
+ char *outbuf;
+ unsigned int len;
+ bool unsafe_takeover;
+};
+
+/**
+ * struct console - The console descriptor structure
+ * @name: The name of the console driver
+ * @write: Legacy write callback to output messages (Optional)
+ * @read: Read callback for console input (Optional)
+ * @device: The underlying TTY device driver (Optional)
+ * @unblank: Callback to unblank the console (Optional)
+ * @setup: Callback for initializing the console (Optional)
+ * @exit: Callback for teardown of the console (Optional)
+ * @match: Callback for matching a console (Optional)
+ * @flags: Console flags. See enum cons_flags
+ * @index: Console index, e.g. port number
+ * @cflag: TTY control mode flags
+ * @ispeed: TTY input speed
+ * @ospeed: TTY output speed
+ * @seq: Sequence number of the next ringbuffer record to print
+ * @dropped: Number of unreported dropped ringbuffer records
+ * @data: Driver private data
+ * @node: hlist node for the console list
+ *
+ * @nbcon_state: State for nbcon consoles
+ * @nbcon_seq: Sequence number of the next record for nbcon to print
+ * @nbcon_device_ctxt: Context available for non-printing operations
+ * @nbcon_prev_seq: Seq num the previous nbcon owner was assigned to print
+ * @pbufs: Pointer to nbcon private buffer
+ * @kthread: Printer kthread for this console
+ * @rcuwait: RCU-safe wait object for @kthread waking
+ * @irq_work: Defer @kthread waking to IRQ work context
+ */
+struct console {
+ char name[16];
+ void (*write)(struct console *co, const char *s, unsigned int count);
+ int (*read)(struct console *co, char *s, unsigned int count);
+ struct tty_driver *(*device)(struct console *co, int *index);
+ void (*unblank)(void);
+ int (*setup)(struct console *co, char *options);
+ int (*exit)(struct console *co);
+ int (*match)(struct console *co, char *name, int idx, char *options);
+ short flags;
+ short index;
+ int cflag;
+ uint ispeed;
+ uint ospeed;
+ u64 seq;
+ unsigned long dropped;
+ void *data;
+ struct hlist_node node;
+
+ /* nbcon console specific members */
+
+ /**
+ * @write_atomic:
+ *
+ * NBCON callback to write out text in any context. (Optional)
+ *
+ * This callback is called with the console already acquired. However,
+ * a higher priority context is allowed to take it over by default.
+ *
+ * The callback must call nbcon_enter_unsafe() and nbcon_exit_unsafe()
+ * around any code where the takeover is not safe, for example, when
+ * manipulating the serial port registers.
+ *
+ * nbcon_enter_unsafe() will fail if the context has lost the console
+ * ownership in the meantime. In this case, the callback is no longer
+ * allowed to go forward. It must back out immediately and carefully.
+ * The buffer content is also no longer trusted since it no longer
+ * belongs to the context.
+ *
+ * The callback should allow the takeover whenever it is safe. It
+ * increases the chance to see messages when the system is in trouble.
+ * If the driver must reacquire ownership in order to finalize or
+ * revert hardware changes, nbcon_reacquire_nobuf() can be used.
+ * However, on reacquire the buffer content is no longer available. A
+ * reacquire cannot be used to resume printing.
+ *
+ * The callback can be called from any context (including NMI).
+ * Therefore it must avoid usage of any locking and instead rely
+ * on the console ownership for synchronization.
+ */
+ void (*write_atomic)(struct console *con, struct nbcon_write_context *wctxt);
+
+ /**
+ * @write_thread:
+ *
+ * NBCON callback to write out text in task context.
+ *
+ * This callback must be called only in task context with both
+ * device_lock() and the nbcon console acquired with
+ * NBCON_PRIO_NORMAL.
+ *
+ * The same rules for console ownership verification and unsafe
+ * sections handling applies as with write_atomic().
+ *
+ * The console ownership handling is necessary for synchronization
+ * against write_atomic() which is synchronized only via the context.
+ *
+ * The device_lock() provides the primary serialization for operations
+ * on the device. It might be as relaxed (mutex)[*] or as tight
+ * (disabled preemption and interrupts) as needed. It allows
+ * the kthread to operate in the least restrictive mode[**].
+ *
+ * [*] Standalone nbcon_context_try_acquire() is not safe with
+ * the preemption enabled, see nbcon_owner_matches(). But it
+ * can be safe when always called in the preemptive context
+ * under the device_lock().
+ *
+ * [**] The device_lock() makes sure that nbcon_context_try_acquire()
+ * would never need to spin which is important especially with
+ * PREEMPT_RT.
+ */
+ void (*write_thread)(struct console *con, struct nbcon_write_context *wctxt);
+
+ /**
+ * @device_lock:
+ *
+ * NBCON callback to begin synchronization with driver code.
+ *
+ * Console drivers typically must deal with access to the hardware
+ * via user input/output (such as an interactive login shell) and
+ * output of kernel messages via printk() calls. This callback is
+ * called by the printk-subsystem whenever it needs to synchronize
+ * with hardware access by the driver. It should be implemented to
+ * use whatever synchronization mechanism the driver is using for
+ * itself (for example, the port lock for uart serial consoles).
+ *
+ * The callback is always called from task context. It may use any
+ * synchronization method required by the driver.
+ *
+ * IMPORTANT: The callback MUST disable migration. The console driver
+ * may be using a synchronization mechanism that already takes
+ * care of this (such as spinlocks). Otherwise this function must
+ * explicitly call migrate_disable().
+ *
+ * The flags argument is provided as a convenience to the driver. It
+ * will be passed again to device_unlock(). It can be ignored if the
+ * driver does not need it.
+ */
+ void (*device_lock)(struct console *con, unsigned long *flags);
+
+ /**
+ * @device_unlock:
+ *
+ * NBCON callback to finish synchronization with driver code.
+ *
+ * It is the counterpart to device_lock().
+ *
+ * This callback is always called from task context. It must
+ * appropriately re-enable migration (depending on how device_lock()
+ * disabled migration).
+ *
+ * The flags argument is the value of the same variable that was
+ * passed to device_lock().
+ */
+ void (*device_unlock)(struct console *con, unsigned long flags);
+
+ atomic_t __private nbcon_state;
+ atomic_long_t __private nbcon_seq;
+ struct nbcon_context __private nbcon_device_ctxt;
+ atomic_long_t __private nbcon_prev_seq;
+
+ struct printk_buffers *pbufs;
+ struct task_struct *kthread;
+ struct rcuwait rcuwait;
+ struct irq_work irq_work;
+};
+
+#ifdef CONFIG_LOCKDEP
+extern void lockdep_assert_console_list_lock_held(void);
#else
-static inline int con_debug_enter(struct vc_data *vc)
+static inline void lockdep_assert_console_list_lock_held(void)
{
- return 0;
}
-static inline int con_debug_leave(void)
+#endif
+
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+extern bool console_srcu_read_lock_is_held(void);
+#else
+static inline bool console_srcu_read_lock_is_held(void)
{
- return 0;
+ return 1;
}
#endif
-/* cursor */
-#define CM_DRAW (1)
-#define CM_ERASE (2)
-#define CM_MOVE (3)
+extern int console_srcu_read_lock(void);
+extern void console_srcu_read_unlock(int cookie);
+
+extern void console_list_lock(void) __acquires(console_mutex);
+extern void console_list_unlock(void) __releases(console_mutex);
+
+extern struct hlist_head console_list;
+
+/**
+ * console_srcu_read_flags - Locklessly read flags of a possibly registered
+ * console
+ * @con: struct console pointer of console to read flags from
+ *
+ * Locklessly reading @con->flags provides a consistent read value because
+ * there is at most one CPU modifying @con->flags and that CPU is using only
+ * read-modify-write operations to do so.
+ *
+ * Requires console_srcu_read_lock to be held, which implies that @con might
+ * be a registered console. The purpose of holding console_srcu_read_lock is
+ * to guarantee that the console state is valid (CON_SUSPENDED/CON_ENABLED)
+ * and that no exit/cleanup routines will run if the console is currently
+ * undergoing unregistration.
+ *
+ * If the caller is holding the console_list_lock or it is _certain_ that
+ * @con is not and will not become registered, the caller may read
+ * @con->flags directly instead.
+ *
+ * Context: Any context.
+ * Return: The current value of the @con->flags field.
+ */
+static inline short console_srcu_read_flags(const struct console *con)
+{
+ WARN_ON_ONCE(!console_srcu_read_lock_is_held());
+
+ /*
+ * The READ_ONCE() matches the WRITE_ONCE() when @flags are modified
+ * for registered consoles with console_srcu_write_flags().
+ */
+ return data_race(READ_ONCE(con->flags));
+}
+
+/**
+ * console_srcu_write_flags - Write flags for a registered console
+ * @con: struct console pointer of console to write flags to
+ * @flags: new flags value to write
+ *
+ * Only use this function to write flags for registered consoles. It
+ * requires holding the console_list_lock.
+ *
+ * Context: Any context.
+ */
+static inline void console_srcu_write_flags(struct console *con, short flags)
+{
+ lockdep_assert_console_list_lock_held();
+
+ /* This matches the READ_ONCE() in console_srcu_read_flags(). */
+ WRITE_ONCE(con->flags, flags);
+}
+
+/* Variant of console_is_registered() when the console_list_lock is held. */
+static inline bool console_is_registered_locked(const struct console *con)
+{
+ lockdep_assert_console_list_lock_held();
+ return !hlist_unhashed(&con->node);
+}
/*
- * The interface for a console, or any other device that wants to capture
- * console messages (printer driver?)
+ * console_is_registered - Check if the console is registered
+ * @con: struct console pointer of console to check
+ *
+ * Context: Process context. May sleep while acquiring console list lock.
+ * Return: true if the console is in the console list, otherwise false.
*
- * If a console driver is marked CON_BOOT then it will be auto-unregistered
- * when the first real console is registered. This is for early-printk drivers.
+ * If false is returned for a console that was previously registered, it
+ * can be assumed that the console's unregistration is fully completed,
+ * including the exit() callback after console list removal.
*/
+static inline bool console_is_registered(const struct console *con)
+{
+ bool ret;
-#define CON_PRINTBUFFER (1)
-#define CON_CONSDEV (2) /* Last on the command line */
-#define CON_ENABLED (4)
-#define CON_BOOT (8)
-#define CON_ANYTIME (16) /* Safe to call when cpu is offline */
-#define CON_BRL (32) /* Used for a braille device */
-#define CON_EXTENDED (64) /* Use the extended output format a la /dev/kmsg */
+ console_list_lock();
+ ret = console_is_registered_locked(con);
+ console_list_unlock();
+ return ret;
+}
-struct console {
- char name[16];
- void (*write)(struct console *, const char *, unsigned);
- int (*read)(struct console *, char *, unsigned);
- struct tty_driver *(*device)(struct console *, int *);
- void (*unblank)(void);
- int (*setup)(struct console *, char *);
- int (*match)(struct console *, char *name, int idx, char *options);
- short flags;
- short index;
- int cflag;
- void *data;
- struct console *next;
-};
+/**
+ * for_each_console_srcu() - Iterator over registered consoles
+ * @con: struct console pointer used as loop cursor
+ *
+ * Although SRCU guarantees the console list will be consistent, the
+ * struct console fields may be updated by other CPUs while iterating.
+ *
+ * Requires console_srcu_read_lock to be held. Can be invoked from
+ * any context.
+ */
+#define for_each_console_srcu(con) \
+ hlist_for_each_entry_srcu(con, &console_list, node, \
+ console_srcu_read_lock_is_held())
+
+/**
+ * for_each_console() - Iterator over registered consoles
+ * @con: struct console pointer used as loop cursor
+ *
+ * The console list and the &console.flags are immutable while iterating.
+ *
+ * Requires console_list_lock to be held.
+ */
+#define for_each_console(con) \
+ lockdep_assert_console_list_lock_held(); \
+ hlist_for_each_entry(con, &console_list, node)
+
+#ifdef CONFIG_PRINTK
+extern void nbcon_cpu_emergency_enter(void);
+extern void nbcon_cpu_emergency_exit(void);
+extern bool nbcon_can_proceed(struct nbcon_write_context *wctxt);
+extern void nbcon_write_context_set_buf(struct nbcon_write_context *wctxt,
+ char *buf, unsigned int len);
+extern bool nbcon_enter_unsafe(struct nbcon_write_context *wctxt);
+extern bool nbcon_exit_unsafe(struct nbcon_write_context *wctxt);
+extern void nbcon_reacquire_nobuf(struct nbcon_write_context *wctxt);
+extern bool nbcon_allow_unsafe_takeover(void);
+extern bool nbcon_kdb_try_acquire(struct console *con,
+ struct nbcon_write_context *wctxt);
+extern void nbcon_kdb_release(struct nbcon_write_context *wctxt);
/*
- * for_each_console() allows you to iterate on each console
+ * Check if the given console is currently capable and allowed to print
+ * records. Note that this function does not consider the current context,
+ * which can also play a role in deciding if @con can be used to print
+ * records.
*/
-#define for_each_console(con) \
- for (con = console_drivers; con != NULL; con = con->next)
+static inline bool console_is_usable(struct console *con, short flags, bool use_atomic)
+{
+ if (!(flags & CON_ENABLED))
+ return false;
+
+ if ((flags & CON_SUSPENDED))
+ return false;
+
+ if (flags & CON_NBCON) {
+ if (use_atomic) {
+ /* The write_atomic() callback is optional. */
+ if (!con->write_atomic)
+ return false;
+
+ /*
+ * An unsafe write_atomic() callback is only usable
+ * when unsafe takeovers are allowed.
+ */
+ if ((flags & CON_NBCON_ATOMIC_UNSAFE) && !nbcon_allow_unsafe_takeover())
+ return false;
+ }
+
+ /*
+ * For the !use_atomic case, @printk_kthreads_running is not
+ * checked because the write_thread() callback is also used
+ * via the legacy loop when the printer threads are not
+ * available.
+ */
+ } else {
+ if (!con->write)
+ return false;
+ }
+
+ /*
+ * Console drivers may assume that per-cpu resources have been
+ * allocated. So unless they're explicitly marked as being able to
+ * cope (CON_ANYTIME) don't call them until this CPU is officially up.
+ */
+ if (!cpu_online(raw_smp_processor_id()) && !(flags & CON_ANYTIME))
+ return false;
+
+ return true;
+}
+
+#else
+static inline void nbcon_cpu_emergency_enter(void) { }
+static inline void nbcon_cpu_emergency_exit(void) { }
+static inline bool nbcon_can_proceed(struct nbcon_write_context *wctxt) { return false; }
+static inline void nbcon_write_context_set_buf(struct nbcon_write_context *wctxt,
+ char *buf, unsigned int len) { }
+static inline bool nbcon_enter_unsafe(struct nbcon_write_context *wctxt) { return false; }
+static inline bool nbcon_exit_unsafe(struct nbcon_write_context *wctxt) { return false; }
+static inline void nbcon_reacquire_nobuf(struct nbcon_write_context *wctxt) { }
+static inline bool nbcon_kdb_try_acquire(struct console *con,
+ struct nbcon_write_context *wctxt) { return false; }
+static inline void nbcon_kdb_release(struct nbcon_write_context *wctxt) { }
+static inline bool console_is_usable(struct console *con, short flags,
+ bool use_atomic) { return false; }
+#endif
extern int console_set_on_cmdline;
extern struct console *early_console;
-extern int add_preferred_console(char *name, int idx, char *options);
+enum con_flush_mode {
+ CONSOLE_FLUSH_PENDING,
+ CONSOLE_REPLAY_ALL,
+};
+
+extern int add_preferred_console(const char *name, const short idx, char *options);
+extern void console_force_preferred_locked(struct console *con);
extern void register_console(struct console *);
extern int unregister_console(struct console *);
-extern struct console *console_drivers;
extern void console_lock(void);
extern int console_trylock(void);
extern void console_unlock(void);
extern void console_conditional_schedule(void);
extern void console_unblank(void);
-extern void console_flush_on_panic(void);
+extern void console_flush_on_panic(enum con_flush_mode mode);
extern struct tty_driver *console_device(int *);
-extern void console_stop(struct console *);
-extern void console_start(struct console *);
+extern void console_suspend(struct console *);
+extern void console_resume(struct console *);
extern int is_console_locked(void);
extern int braille_register_console(struct console *, int index,
char *console_options, char *braille_options);
@@ -184,34 +716,30 @@ static inline void console_sysfs_notify(void)
extern bool console_suspend_enabled;
/* Suspend and resume console messages over PM events */
-extern void suspend_console(void);
-extern void resume_console(void);
+extern void console_suspend_all(void);
+extern void console_resume_all(void);
int mda_console_init(void);
-void prom_con_init(void);
void vcs_make_sysfs(int index);
void vcs_remove_sysfs(int index);
/* Some debug stub to catch some of the obvious races in the VT code */
-#if 1
-#define WARN_CONSOLE_UNLOCKED() WARN_ON(!is_console_locked() && !oops_in_progress)
-#else
-#define WARN_CONSOLE_UNLOCKED()
-#endif
-
-/* VESA Blanking Levels */
-#define VESA_NO_BLANKING 0
-#define VESA_VSYNC_SUSPEND 1
-#define VESA_HSYNC_SUSPEND 2
-#define VESA_POWERDOWN 3
+#define WARN_CONSOLE_UNLOCKED() \
+ WARN_ON(!atomic_read(&ignore_console_lock_warning) && \
+ !is_console_locked() && !oops_in_progress)
+/*
+ * Increment ignore_console_lock_warning if you need to quiet
+ * WARN_CONSOLE_UNLOCKED() for debugging purposes.
+ */
+extern atomic_t ignore_console_lock_warning;
-#ifdef CONFIG_VGA_CONSOLE
-extern bool vgacon_text_force(void);
-#else
-static inline bool vgacon_text_force(void) { return false; }
-#endif
+DEFINE_LOCK_GUARD_0(console_lock, console_lock(), console_unlock());
extern void console_init(void);
+/* For deferred console takeover */
+void dummycon_register_output_notifier(struct notifier_block *nb);
+void dummycon_unregister_output_notifier(struct notifier_block *nb);
+
#endif /* _LINUX_CONSOLE_H */
diff --git a/include/linux/console_struct.h b/include/linux/console_struct.h
index 6fd3c908a340..13b35637bd5a 100644
--- a/include/linux/console_struct.h
+++ b/include/linux/console_struct.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* console_struct.h
*
@@ -16,10 +17,46 @@
#include <linux/vt.h>
#include <linux/workqueue.h>
-struct vt_struct;
-struct uni_pagedir;
+struct uni_pagedict;
#define NPAR 16
+#define VC_TABSTOPS_COUNT 256U
+
+enum vc_intensity {
+ VCI_HALF_BRIGHT,
+ VCI_NORMAL,
+ VCI_BOLD,
+ VCI_MASK = 0x3,
+};
+
+/**
+ * struct vc_state -- state of a VC
+ * @x: cursor's x-position
+ * @y: cursor's y-position
+ * @color: foreground & background colors
+ * @Gx_charset: what's G0/G1 slot set to (like GRAF_MAP, LAT1_MAP)
+ * @charset: what character set to use (0=G0 or 1=G1)
+ * @intensity: see enum vc_intensity for values
+ * @reverse: reversed foreground/background colors
+ *
+ * These members are defined separately from struct vc_data as we save &
+ * restore them at times.
+ */
+struct vc_state {
+ unsigned int x, y;
+
+ unsigned char color;
+
+ unsigned char Gx_charset[2];
+ unsigned int charset : 1;
+
+ /* attribute flags */
+ enum vc_intensity intensity;
+ bool italic;
+ bool underline;
+ bool blink;
+ bool reverse;
+};
/*
* Example: vc_data of a console that was scrolled 3 lines down.
@@ -56,11 +93,14 @@ struct uni_pagedir;
struct vc_data {
struct tty_port port; /* Upper level data */
+ struct vc_state state, saved_state;
+
unsigned short vc_num; /* Console number */
unsigned int vc_cols; /* [#] Console size */
unsigned int vc_rows;
unsigned int vc_size_row; /* Bytes per row */
unsigned int vc_scan_lines; /* # of scan lines */
+ unsigned int vc_cell_height; /* CRTC character cell height */
unsigned long vc_origin; /* [!] Start of real screen */
unsigned long vc_scr_end; /* [!] End of real screen */
unsigned long vc_visible_origin; /* [!] Top of visible window */
@@ -72,8 +112,6 @@ struct vc_data {
/* attributes for all characters on screen */
unsigned char vc_attr; /* Current attributes */
unsigned char vc_def_color; /* Default colors */
- unsigned char vc_color; /* Foreground & background */
- unsigned char vc_s_color; /* Saved foreground & background */
unsigned char vc_ulcolor; /* Color for underline mode */
unsigned char vc_itcolor;
unsigned char vc_halfcolor; /* Color for half intensity mode */
@@ -81,8 +119,6 @@ struct vc_data {
unsigned int vc_cursor_type;
unsigned short vc_complement_mask; /* [#] Xor mask for mouse pointer */
unsigned short vc_s_complement_mask; /* Saved mouse pointer mask */
- unsigned int vc_x, vc_y; /* Cursor position */
- unsigned int vc_saved_x, vc_saved_y;
unsigned long vc_pos; /* Cursor address */
/* fonts */
unsigned short vc_hi_font_mask; /* [#] Attribute set for upper 256 chars of font or 0 if not supported */
@@ -97,8 +133,6 @@ struct vc_data {
int vt_newvt;
wait_queue_head_t paste_wait;
/* mode flags */
- unsigned int vc_charset : 1; /* Character set G0 / G1 */
- unsigned int vc_s_charset : 1; /* Saved character set */
unsigned int vc_disp_ctrl : 1; /* Display chars < 32? */
unsigned int vc_toggle_meta : 1; /* Toggle high bit? */
unsigned int vc_decscnm : 1; /* Screen Mode */
@@ -106,40 +140,28 @@ struct vc_data {
unsigned int vc_decawm : 1; /* Autowrap Mode */
unsigned int vc_deccm : 1; /* Cursor Visible */
unsigned int vc_decim : 1; /* Insert Mode */
- /* attribute flags */
- unsigned int vc_intensity : 2; /* 0=half-bright, 1=normal, 2=bold */
- unsigned int vc_italic:1;
- unsigned int vc_underline : 1;
- unsigned int vc_blink : 1;
- unsigned int vc_reverse : 1;
- unsigned int vc_s_intensity : 2; /* saved rendition */
- unsigned int vc_s_italic:1;
- unsigned int vc_s_underline : 1;
- unsigned int vc_s_blink : 1;
- unsigned int vc_s_reverse : 1;
/* misc */
- unsigned int vc_ques : 1;
+ unsigned int vc_priv : 3;
unsigned int vc_need_wrap : 1;
unsigned int vc_can_do_color : 1;
unsigned int vc_report_mouse : 2;
+ unsigned int vc_bracketed_paste : 1;
unsigned char vc_utf : 1; /* Unicode UTF-8 encoding */
unsigned char vc_utf_count;
int vc_utf_char;
- unsigned int vc_tab_stop[8]; /* Tab stops. 256 columns. */
+ DECLARE_BITMAP(vc_tab_stop, VC_TABSTOPS_COUNT); /* Tab stops. 256 columns. */
unsigned char vc_palette[16*3]; /* Colour palette for VGA+ */
unsigned short * vc_translate;
- unsigned char vc_G0_charset;
- unsigned char vc_G1_charset;
- unsigned char vc_saved_G0;
- unsigned char vc_saved_G1;
- unsigned int vc_resize_user; /* resize request from user */
unsigned int vc_bell_pitch; /* Console bell pitch */
unsigned int vc_bell_duration; /* Console bell duration */
unsigned short vc_cur_blink_ms; /* Cursor blink duration */
struct vc_data **vc_display_fg; /* [!] Ptr to var holding fg console for this display */
- struct uni_pagedir *vc_uni_pagedir;
- struct uni_pagedir **vc_uni_pagedir_loc; /* [!] Location of uni_pagedir variable for this console */
- bool vc_panic_force_write; /* when oops/panic this VC can accept forced output/blanking */
+ struct uni_pagedict *uni_pagedict;
+ struct uni_pagedict **uni_pagedict_loc; /* [!] Location of uni_pagedict variable for this console */
+ u32 **vc_uni_lines; /* unicode screen content */
+ u16 *vc_saved_screen;
+ unsigned int vc_saved_cols;
+ unsigned int vc_saved_rows;
/* additional information is in vt_kern.h */
};
@@ -147,29 +169,31 @@ struct vc {
struct vc_data *d;
struct work_struct SAK_work;
- /* might add scrmem, vt_struct, kbd at some time,
- to have everything in one place - the disadvantage
- would be that vc_cons etc can no longer be static */
+ /* might add scrmem, kbd at some time,
+ to have everything in one place */
};
extern struct vc vc_cons [MAX_NR_CONSOLES];
extern void vc_SAK(struct work_struct *work);
-#define CUR_DEF 0
-#define CUR_NONE 1
-#define CUR_UNDERLINE 2
-#define CUR_LOWER_THIRD 3
-#define CUR_LOWER_HALF 4
-#define CUR_TWO_THIRDS 5
-#define CUR_BLOCK 6
-#define CUR_HWMASK 0x0f
-#define CUR_SWMASK 0xfff0
-
-#define CUR_DEFAULT CUR_UNDERLINE
-
-static inline bool con_is_visible(const struct vc_data *vc)
-{
- return *vc->vc_display_fg == vc;
-}
+#define CUR_MAKE(size, change, set) ((size) | ((change) << 8) | \
+ ((set) << 16))
+#define CUR_SIZE(c) ((c) & 0x00000f)
+# define CUR_DEF 0
+# define CUR_NONE 1
+# define CUR_UNDERLINE 2
+# define CUR_LOWER_THIRD 3
+# define CUR_LOWER_HALF 4
+# define CUR_TWO_THIRDS 5
+# define CUR_BLOCK 6
+#define CUR_SW 0x000010
+#define CUR_ALWAYS_BG 0x000020
+#define CUR_INVERT_FG_BG 0x000040
+#define CUR_FG 0x000700
+#define CUR_BG 0x007000
+#define CUR_CHANGE(c) ((c) & 0x00ff00)
+#define CUR_SET(c) (((c) & 0xff0000) >> 8)
+
+bool con_is_visible(const struct vc_data *vc);
#endif /* _LINUX_CONSOLE_STRUCT_H */
diff --git a/include/linux/consolemap.h b/include/linux/consolemap.h
index c4811da1338b..6180b803795c 100644
--- a/include/linux/consolemap.h
+++ b/include/linux/consolemap.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* consolemap.h
*
@@ -6,29 +7,80 @@
#ifndef __LINUX_CONSOLEMAP_H__
#define __LINUX_CONSOLEMAP_H__
-#define LAT1_MAP 0
-#define GRAF_MAP 1
-#define IBMPC_MAP 2
-#define USER_MAP 3
+enum translation_map {
+ LAT1_MAP,
+ GRAF_MAP,
+ IBMPC_MAP,
+ USER_MAP,
+
+ FIRST_MAP = LAT1_MAP,
+ LAST_MAP = USER_MAP,
+};
#include <linux/types.h>
-#ifdef CONFIG_CONSOLE_TRANSLATIONS
struct vc_data;
-extern u16 inverse_translate(struct vc_data *conp, int glyph, int use_unicode);
-extern unsigned short *set_translate(int m, struct vc_data *vc);
-extern int conv_uni_to_pc(struct vc_data *conp, long ucs);
-extern u32 conv_8bit_to_uni(unsigned char c);
-extern int conv_uni_to_8bit(u32 uni);
+#ifdef CONFIG_CONSOLE_TRANSLATIONS
+u16 inverse_translate(const struct vc_data *conp, u16 glyph, bool use_unicode);
+unsigned short *set_translate(enum translation_map m, struct vc_data *vc);
+int conv_uni_to_pc(struct vc_data *conp, long ucs);
+u32 conv_8bit_to_uni(unsigned char c);
+int conv_uni_to_8bit(u32 uni);
void console_map_init(void);
+bool ucs_is_double_width(uint32_t cp);
+bool ucs_is_zero_width(uint32_t cp);
+u32 ucs_recompose(u32 base, u32 mark);
+u32 ucs_get_fallback(u32 cp);
#else
-#define inverse_translate(conp, glyph, uni) ((uint16_t)glyph)
-#define set_translate(m, vc) ((unsigned short *)NULL)
-#define conv_uni_to_pc(conp, ucs) ((int) (ucs > 0xff ? -1: ucs))
-#define conv_8bit_to_uni(c) ((uint32_t)(c))
-#define conv_uni_to_8bit(c) ((int) ((c) & 0xff))
-#define console_map_init(c) do { ; } while (0)
+static inline u16 inverse_translate(const struct vc_data *conp, u16 glyph,
+ bool use_unicode)
+{
+ return glyph;
+}
+
+static inline unsigned short *set_translate(enum translation_map m,
+ struct vc_data *vc)
+{
+ return NULL;
+}
+
+static inline int conv_uni_to_pc(struct vc_data *conp, long ucs)
+{
+ return ucs > 0xff ? -1 : ucs;
+}
+
+static inline u32 conv_8bit_to_uni(unsigned char c)
+{
+ return c;
+}
+
+static inline int conv_uni_to_8bit(u32 uni)
+{
+ return uni & 0xff;
+}
+
+static inline void console_map_init(void) { }
+
+static inline bool ucs_is_double_width(uint32_t cp)
+{
+ return false;
+}
+
+static inline bool ucs_is_zero_width(uint32_t cp)
+{
+ return false;
+}
+
+static inline u32 ucs_recompose(u32 base, u32 mark)
+{
+ return 0;
+}
+
+static inline u32 ucs_get_fallback(u32 cp)
+{
+ return 0;
+}
#endif /* CONFIG_CONSOLE_TRANSLATIONS */
#endif /* __LINUX_CONSOLEMAP_H__ */
diff --git a/include/linux/const.h b/include/linux/const.h
new file mode 100644
index 000000000000..81b8aae5a855
--- /dev/null
+++ b/include/linux/const.h
@@ -0,0 +1,6 @@
+#ifndef _LINUX_CONST_H
+#define _LINUX_CONST_H
+
+#include <vdso/const.h>
+
+#endif /* _LINUX_CONST_H */
diff --git a/include/linux/container.h b/include/linux/container.h
index 3c03e6fd2035..dd00cc918a92 100644
--- a/include/linux/container.h
+++ b/include/linux/container.h
@@ -1,18 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Definitions for container bus type.
*
* Copyright (C) 2013, Intel Corporation
* Author: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
+#ifndef _LINUX_CONTAINER_H
+#define _LINUX_CONTAINER_H
+
#include <linux/device.h>
/* drivers/base/power/container.c */
-extern struct bus_type container_subsys;
+extern const struct bus_type container_subsys;
struct container_dev {
struct device dev;
@@ -23,3 +23,5 @@ static inline struct container_dev *to_container_dev(struct device *dev)
{
return container_of(dev, struct container_dev, dev);
}
+
+#endif /* _LINUX_CONTAINER_H */
diff --git a/include/linux/container_of.h b/include/linux/container_of.h
new file mode 100644
index 000000000000..1f6ebf27d962
--- /dev/null
+++ b/include/linux/container_of.h
@@ -0,0 +1,41 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_CONTAINER_OF_H
+#define _LINUX_CONTAINER_OF_H
+
+#include <linux/build_bug.h>
+#include <linux/stddef.h>
+
+#define typeof_member(T, m) typeof(((T*)0)->m)
+
+/**
+ * container_of - cast a member of a structure out to the containing structure
+ * @ptr: the pointer to the member.
+ * @type: the type of the container struct this is embedded in.
+ * @member: the name of the member within the struct.
+ *
+ * WARNING: any const qualifier of @ptr is lost.
+ * Do not use container_of() in new code.
+ */
+#define container_of(ptr, type, member) ({ \
+ void *__mptr = (void *)(ptr); \
+ static_assert(__same_type(*(ptr), ((type *)0)->member) || \
+ __same_type(*(ptr), void), \
+ "pointer type mismatch in container_of()"); \
+ ((type *)(__mptr - offsetof(type, member))); })
+
+/**
+ * container_of_const - cast a member of a structure out to the containing
+ * structure and preserve the const-ness of the pointer
+ * @ptr: the pointer to the member
+ * @type: the type of the container struct this is embedded in.
+ * @member: the name of the member within the struct.
+ *
+ * Always prefer container_of_const() instead of container_of() in new code.
+ */
+#define container_of_const(ptr, type, member) \
+ _Generic(ptr, \
+ const typeof(*(ptr)) *: ((const type *)container_of(ptr, type, member)),\
+ default: ((type *)container_of(ptr, type, member)) \
+ )
+
+#endif /* _LINUX_CONTAINER_OF_H */
diff --git a/include/linux/context_tracking.h b/include/linux/context_tracking.h
index c78fc27418f2..af9fe87a0922 100644
--- a/include/linux/context_tracking.h
+++ b/include/linux/context_tracking.h
@@ -1,174 +1,172 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_CONTEXT_TRACKING_H
#define _LINUX_CONTEXT_TRACKING_H
#include <linux/sched.h>
#include <linux/vtime.h>
#include <linux/context_tracking_state.h>
+#include <linux/instrumentation.h>
+
#include <asm/ptrace.h>
-#ifdef CONFIG_CONTEXT_TRACKING
-extern void context_tracking_cpu_set(int cpu);
+#ifdef CONFIG_CONTEXT_TRACKING_USER
+extern void ct_cpu_track_user(int cpu);
/* Called with interrupts disabled. */
-extern void __context_tracking_enter(enum ctx_state state);
-extern void __context_tracking_exit(enum ctx_state state);
+extern void __ct_user_enter(enum ctx_state state);
+extern void __ct_user_exit(enum ctx_state state);
+
+extern void ct_user_enter(enum ctx_state state);
+extern void ct_user_exit(enum ctx_state state);
-extern void context_tracking_enter(enum ctx_state state);
-extern void context_tracking_exit(enum ctx_state state);
-extern void context_tracking_user_enter(void);
-extern void context_tracking_user_exit(void);
+extern void user_enter_callable(void);
+extern void user_exit_callable(void);
static inline void user_enter(void)
{
- if (context_tracking_is_enabled())
- context_tracking_enter(CONTEXT_USER);
+ if (context_tracking_enabled())
+ ct_user_enter(CT_STATE_USER);
}
static inline void user_exit(void)
{
- if (context_tracking_is_enabled())
- context_tracking_exit(CONTEXT_USER);
+ if (context_tracking_enabled())
+ ct_user_exit(CT_STATE_USER);
}
/* Called with interrupts disabled. */
-static inline void user_enter_irqoff(void)
+static __always_inline void user_enter_irqoff(void)
{
- if (context_tracking_is_enabled())
- __context_tracking_enter(CONTEXT_USER);
+ if (context_tracking_enabled())
+ __ct_user_enter(CT_STATE_USER);
}
-static inline void user_exit_irqoff(void)
+static __always_inline void user_exit_irqoff(void)
{
- if (context_tracking_is_enabled())
- __context_tracking_exit(CONTEXT_USER);
+ if (context_tracking_enabled())
+ __ct_user_exit(CT_STATE_USER);
}
static inline enum ctx_state exception_enter(void)
{
enum ctx_state prev_ctx;
- if (!context_tracking_is_enabled())
+ if (IS_ENABLED(CONFIG_HAVE_CONTEXT_TRACKING_USER_OFFSTACK) ||
+ !context_tracking_enabled())
return 0;
- prev_ctx = this_cpu_read(context_tracking.state);
- if (prev_ctx != CONTEXT_KERNEL)
- context_tracking_exit(prev_ctx);
+ prev_ctx = __ct_state();
+ if (prev_ctx != CT_STATE_KERNEL)
+ ct_user_exit(prev_ctx);
return prev_ctx;
}
static inline void exception_exit(enum ctx_state prev_ctx)
{
- if (context_tracking_is_enabled()) {
- if (prev_ctx != CONTEXT_KERNEL)
- context_tracking_enter(prev_ctx);
+ if (!IS_ENABLED(CONFIG_HAVE_CONTEXT_TRACKING_USER_OFFSTACK) &&
+ context_tracking_enabled()) {
+ if (prev_ctx != CT_STATE_KERNEL)
+ ct_user_enter(prev_ctx);
}
}
+static __always_inline bool context_tracking_guest_enter(void)
+{
+ if (context_tracking_enabled())
+ __ct_user_enter(CT_STATE_GUEST);
-/**
- * ct_state() - return the current context tracking state if known
- *
- * Returns the current cpu's context tracking state if context tracking
- * is enabled. If context tracking is disabled, returns
- * CONTEXT_DISABLED. This should be used primarily for debugging.
- */
-static inline enum ctx_state ct_state(void)
+ return context_tracking_enabled_this_cpu();
+}
+
+static __always_inline bool context_tracking_guest_exit(void)
{
- return context_tracking_is_enabled() ?
- this_cpu_read(context_tracking.state) : CONTEXT_DISABLED;
+ if (context_tracking_enabled())
+ __ct_user_exit(CT_STATE_GUEST);
+
+ return context_tracking_enabled_this_cpu();
}
+
+#define CT_WARN_ON(cond) WARN_ON(context_tracking_enabled() && (cond))
+
#else
static inline void user_enter(void) { }
static inline void user_exit(void) { }
static inline void user_enter_irqoff(void) { }
static inline void user_exit_irqoff(void) { }
-static inline enum ctx_state exception_enter(void) { return 0; }
+static inline int exception_enter(void) { return 0; }
static inline void exception_exit(enum ctx_state prev_ctx) { }
-static inline enum ctx_state ct_state(void) { return CONTEXT_DISABLED; }
-#endif /* !CONFIG_CONTEXT_TRACKING */
-
-#define CT_WARN_ON(cond) WARN_ON(context_tracking_is_enabled() && (cond))
-
-#ifdef CONFIG_CONTEXT_TRACKING_FORCE
+static inline int ct_state(void) { return -1; }
+static inline int __ct_state(void) { return -1; }
+static __always_inline bool context_tracking_guest_enter(void) { return false; }
+static __always_inline bool context_tracking_guest_exit(void) { return false; }
+#define CT_WARN_ON(cond) do { } while (0)
+#endif /* !CONFIG_CONTEXT_TRACKING_USER */
+
+#ifdef CONFIG_CONTEXT_TRACKING_USER_FORCE
extern void context_tracking_init(void);
#else
static inline void context_tracking_init(void) { }
-#endif /* CONFIG_CONTEXT_TRACKING_FORCE */
+#endif /* CONFIG_CONTEXT_TRACKING_USER_FORCE */
+#ifdef CONFIG_CONTEXT_TRACKING_IDLE
+extern void ct_idle_enter(void);
+extern void ct_idle_exit(void);
-#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
-/* must be called with irqs disabled */
-static inline void guest_enter_irqoff(void)
+/*
+ * Is RCU watching the current CPU (IOW, it is not in an extended quiescent state)?
+ *
+ * Note that this returns the actual boolean data (watching / not watching),
+ * whereas ct_rcu_watching() returns the RCU_WATCHING subvariable of
+ * context_tracking.state.
+ *
+ * No ordering, as we are sampling CPU-local information.
+ */
+static __always_inline bool rcu_is_watching_curr_cpu(void)
{
- if (vtime_accounting_cpu_enabled())
- vtime_guest_enter(current);
- else
- current->flags |= PF_VCPU;
-
- if (context_tracking_is_enabled())
- __context_tracking_enter(CONTEXT_GUEST);
-
- /* KVM does not hold any references to rcu protected data when it
- * switches CPU into a guest mode. In fact switching to a guest mode
- * is very similar to exiting to userspace from rcu point of view. In
- * addition CPU may stay in a guest mode for quite a long time (up to
- * one time slice). Lets treat guest mode as quiescent state, just like
- * we do with user-mode execution.
- */
- if (!context_tracking_cpu_is_enabled())
- rcu_virt_note_context_switch(smp_processor_id());
+ return raw_atomic_read(this_cpu_ptr(&context_tracking.state)) & CT_RCU_WATCHING;
}
-static inline void guest_exit_irqoff(void)
+/*
+ * Increment the current CPU's context_tracking structure's ->state field
+ * with ordering. Return the new value.
+ */
+static __always_inline unsigned long ct_state_inc(int incby)
{
- if (context_tracking_is_enabled())
- __context_tracking_exit(CONTEXT_GUEST);
-
- if (vtime_accounting_cpu_enabled())
- vtime_guest_exit(current);
- else
- current->flags &= ~PF_VCPU;
+ return raw_atomic_add_return(incby, this_cpu_ptr(&context_tracking.state));
}
-#else
-static inline void guest_enter_irqoff(void)
+static __always_inline bool warn_rcu_enter(void)
{
+ bool ret = false;
+
/*
- * This is running in ioctl context so its safe
- * to assume that it's the stime pending cputime
- * to flush.
+ * Horrible hack to shut up recursive RCU isn't watching fail since
+ * lots of the actual reporting also relies on RCU.
*/
- vtime_account_system(current);
- current->flags |= PF_VCPU;
- rcu_virt_note_context_switch(smp_processor_id());
-}
+ preempt_disable_notrace();
+ if (!rcu_is_watching_curr_cpu()) {
+ ret = true;
+ ct_state_inc(CT_RCU_WATCHING);
+ }
-static inline void guest_exit_irqoff(void)
-{
- /* Flush the guest cputime we spent on the guest */
- vtime_account_system(current);
- current->flags &= ~PF_VCPU;
+ return ret;
}
-#endif /* CONFIG_VIRT_CPU_ACCOUNTING_GEN */
-static inline void guest_enter(void)
+static __always_inline void warn_rcu_exit(bool rcu)
{
- unsigned long flags;
-
- local_irq_save(flags);
- guest_enter_irqoff();
- local_irq_restore(flags);
+ if (rcu)
+ ct_state_inc(CT_RCU_WATCHING);
+ preempt_enable_notrace();
}
-static inline void guest_exit(void)
-{
- unsigned long flags;
+#else
+static inline void ct_idle_enter(void) { }
+static inline void ct_idle_exit(void) { }
- local_irq_save(flags);
- guest_exit_irqoff();
- local_irq_restore(flags);
-}
+static __always_inline bool warn_rcu_enter(void) { return false; }
+static __always_inline void warn_rcu_exit(bool rcu) { }
+#endif /* !CONFIG_CONTEXT_TRACKING_IDLE */
#endif
diff --git a/include/linux/context_tracking_irq.h b/include/linux/context_tracking_irq.h
new file mode 100644
index 000000000000..197916ee91a4
--- /dev/null
+++ b/include/linux/context_tracking_irq.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_CONTEXT_TRACKING_IRQ_H
+#define _LINUX_CONTEXT_TRACKING_IRQ_H
+
+#ifdef CONFIG_CONTEXT_TRACKING_IDLE
+void ct_irq_enter(void);
+void ct_irq_exit(void);
+void ct_irq_enter_irqson(void);
+void ct_irq_exit_irqson(void);
+void ct_nmi_enter(void);
+void ct_nmi_exit(void);
+#else
+static __always_inline void ct_irq_enter(void) { }
+static __always_inline void ct_irq_exit(void) { }
+static inline void ct_irq_enter_irqson(void) { }
+static inline void ct_irq_exit_irqson(void) { }
+static __always_inline void ct_nmi_enter(void) { }
+static __always_inline void ct_nmi_exit(void) { }
+#endif
+
+#endif
diff --git a/include/linux/context_tracking_state.h b/include/linux/context_tracking_state.h
index 1d34fe68f48a..0b81248aa03e 100644
--- a/include/linux/context_tracking_state.h
+++ b/include/linux/context_tracking_state.h
@@ -1,10 +1,25 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_CONTEXT_TRACKING_STATE_H
#define _LINUX_CONTEXT_TRACKING_STATE_H
#include <linux/percpu.h>
#include <linux/static_key.h>
+#include <linux/context_tracking_irq.h>
+
+/* Offset to allow distinguishing irq vs. task-based idle entry/exit. */
+#define CT_NESTING_IRQ_NONIDLE ((LONG_MAX / 2) + 1)
+
+enum ctx_state {
+ CT_STATE_DISABLED = -1, /* returned by ct_state() if unknown */
+ CT_STATE_KERNEL = 0,
+ CT_STATE_IDLE = 1,
+ CT_STATE_USER = 2,
+ CT_STATE_GUEST = 3,
+ CT_STATE_MAX = 4,
+};
struct context_tracking {
+#ifdef CONFIG_CONTEXT_TRACKING_USER
/*
* When active is false, probes are unset in order
* to minimize overhead: TIF flags are cleared
@@ -13,37 +28,151 @@ struct context_tracking {
*/
bool active;
int recursion;
- enum ctx_state {
- CONTEXT_DISABLED = -1, /* returned by ct_state() if unknown */
- CONTEXT_KERNEL = 0,
- CONTEXT_USER,
- CONTEXT_GUEST,
- } state;
+#endif
+#ifdef CONFIG_CONTEXT_TRACKING
+ atomic_t state;
+#endif
+#ifdef CONFIG_CONTEXT_TRACKING_IDLE
+ long nesting; /* Track process nesting level. */
+ long nmi_nesting; /* Track irq/NMI nesting level. */
+#endif
};
+/*
+ * We cram two different things within the same atomic variable:
+ *
+ * CT_RCU_WATCHING_START CT_STATE_START
+ * | |
+ * v v
+ * MSB [ RCU watching counter ][ context_state ] LSB
+ * ^ ^
+ * | |
+ * CT_RCU_WATCHING_END CT_STATE_END
+ *
+ * Bits are used from the LSB upwards, so unused bits (if any) will always be in
+ * upper bits of the variable.
+ */
#ifdef CONFIG_CONTEXT_TRACKING
-extern struct static_key_false context_tracking_enabled;
+#define CT_SIZE (sizeof(((struct context_tracking *)0)->state) * BITS_PER_BYTE)
+
+#define CT_STATE_WIDTH bits_per(CT_STATE_MAX - 1)
+#define CT_STATE_START 0
+#define CT_STATE_END (CT_STATE_START + CT_STATE_WIDTH - 1)
+
+#define CT_RCU_WATCHING_MAX_WIDTH (CT_SIZE - CT_STATE_WIDTH)
+#define CT_RCU_WATCHING_WIDTH (IS_ENABLED(CONFIG_RCU_DYNTICKS_TORTURE) ? 2 : CT_RCU_WATCHING_MAX_WIDTH)
+#define CT_RCU_WATCHING_START (CT_STATE_END + 1)
+#define CT_RCU_WATCHING_END (CT_RCU_WATCHING_START + CT_RCU_WATCHING_WIDTH - 1)
+#define CT_RCU_WATCHING BIT(CT_RCU_WATCHING_START)
+
+#define CT_STATE_MASK GENMASK(CT_STATE_END, CT_STATE_START)
+#define CT_RCU_WATCHING_MASK GENMASK(CT_RCU_WATCHING_END, CT_RCU_WATCHING_START)
+
+#define CT_UNUSED_WIDTH (CT_RCU_WATCHING_MAX_WIDTH - CT_RCU_WATCHING_WIDTH)
+
+static_assert(CT_STATE_WIDTH +
+ CT_RCU_WATCHING_WIDTH +
+ CT_UNUSED_WIDTH ==
+ CT_SIZE);
+
DECLARE_PER_CPU(struct context_tracking, context_tracking);
+#endif /* CONFIG_CONTEXT_TRACKING */
+
+#ifdef CONFIG_CONTEXT_TRACKING_USER
+static __always_inline int __ct_state(void)
+{
+ return raw_atomic_read(this_cpu_ptr(&context_tracking.state)) & CT_STATE_MASK;
+}
+#endif
+
+#ifdef CONFIG_CONTEXT_TRACKING_IDLE
+static __always_inline int ct_rcu_watching(void)
+{
+ return atomic_read(this_cpu_ptr(&context_tracking.state)) & CT_RCU_WATCHING_MASK;
+}
+
+static __always_inline int ct_rcu_watching_cpu(int cpu)
+{
+ struct context_tracking *ct = per_cpu_ptr(&context_tracking, cpu);
+
+ return atomic_read(&ct->state) & CT_RCU_WATCHING_MASK;
+}
+
+static __always_inline int ct_rcu_watching_cpu_acquire(int cpu)
+{
+ struct context_tracking *ct = per_cpu_ptr(&context_tracking, cpu);
+
+ return atomic_read_acquire(&ct->state) & CT_RCU_WATCHING_MASK;
+}
-static inline bool context_tracking_is_enabled(void)
+static __always_inline long ct_nesting(void)
{
- return static_branch_unlikely(&context_tracking_enabled);
+ return __this_cpu_read(context_tracking.nesting);
}
-static inline bool context_tracking_cpu_is_enabled(void)
+static __always_inline long ct_nesting_cpu(int cpu)
{
- return __this_cpu_read(context_tracking.active);
+ struct context_tracking *ct = per_cpu_ptr(&context_tracking, cpu);
+
+ return ct->nesting;
}
-static inline bool context_tracking_in_user(void)
+static __always_inline long ct_nmi_nesting(void)
{
- return __this_cpu_read(context_tracking.state) == CONTEXT_USER;
+ return __this_cpu_read(context_tracking.nmi_nesting);
}
+
+static __always_inline long ct_nmi_nesting_cpu(int cpu)
+{
+ struct context_tracking *ct = per_cpu_ptr(&context_tracking, cpu);
+
+ return ct->nmi_nesting;
+}
+#endif /* #ifdef CONFIG_CONTEXT_TRACKING_IDLE */
+
+#ifdef CONFIG_CONTEXT_TRACKING_USER
+extern struct static_key_false context_tracking_key;
+
+static __always_inline bool context_tracking_enabled(void)
+{
+ return static_branch_unlikely(&context_tracking_key);
+}
+
+static __always_inline bool context_tracking_enabled_cpu(int cpu)
+{
+ return context_tracking_enabled() && per_cpu(context_tracking.active, cpu);
+}
+
+static __always_inline bool context_tracking_enabled_this_cpu(void)
+{
+ return context_tracking_enabled() && __this_cpu_read(context_tracking.active);
+}
+
+/**
+ * ct_state() - return the current context tracking state if known
+ *
+ * Returns the current cpu's context tracking state if context tracking
+ * is enabled. If context tracking is disabled, returns
+ * CT_STATE_DISABLED. This should be used primarily for debugging.
+ */
+static __always_inline int ct_state(void)
+{
+ int ret;
+
+ if (!context_tracking_enabled())
+ return CT_STATE_DISABLED;
+
+ preempt_disable();
+ ret = __ct_state();
+ preempt_enable();
+
+ return ret;
+}
+
#else
-static inline bool context_tracking_in_user(void) { return false; }
-static inline bool context_tracking_active(void) { return false; }
-static inline bool context_tracking_is_enabled(void) { return false; }
-static inline bool context_tracking_cpu_is_enabled(void) { return false; }
-#endif /* CONFIG_CONTEXT_TRACKING */
+static __always_inline bool context_tracking_enabled(void) { return false; }
+static __always_inline bool context_tracking_enabled_cpu(int cpu) { return false; }
+static __always_inline bool context_tracking_enabled_this_cpu(void) { return false; }
+#endif /* CONFIG_CONTEXT_TRACKING_USER */
#endif
diff --git a/include/linux/cookie.h b/include/linux/cookie.h
new file mode 100644
index 000000000000..0c159f585109
--- /dev/null
+++ b/include/linux/cookie.h
@@ -0,0 +1,51 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __LINUX_COOKIE_H
+#define __LINUX_COOKIE_H
+
+#include <linux/atomic.h>
+#include <linux/percpu.h>
+#include <asm/local.h>
+
+struct pcpu_gen_cookie {
+ local_t nesting;
+ u64 last;
+} __aligned(16);
+
+struct gen_cookie {
+ struct pcpu_gen_cookie __percpu *local;
+ atomic64_t forward_last ____cacheline_aligned_in_smp;
+ atomic64_t reverse_last;
+};
+
+#define COOKIE_LOCAL_BATCH 4096
+
+#define DEFINE_COOKIE(name) \
+ static DEFINE_PER_CPU(struct pcpu_gen_cookie, __##name); \
+ static struct gen_cookie name = { \
+ .local = &__##name, \
+ .forward_last = ATOMIC64_INIT(0), \
+ .reverse_last = ATOMIC64_INIT(0), \
+ }
+
+static __always_inline u64 gen_cookie_next(struct gen_cookie *gc)
+{
+ struct pcpu_gen_cookie *local = this_cpu_ptr(gc->local);
+ u64 val;
+
+ if (likely(local_inc_return(&local->nesting) == 1)) {
+ val = local->last;
+ if (__is_defined(CONFIG_SMP) &&
+ unlikely((val & (COOKIE_LOCAL_BATCH - 1)) == 0)) {
+ s64 next = atomic64_add_return(COOKIE_LOCAL_BATCH,
+ &gc->forward_last);
+ val = next - COOKIE_LOCAL_BATCH;
+ }
+ local->last = ++val;
+ } else {
+ val = atomic64_dec_return(&gc->reverse_last);
+ }
+ local_dec(&local->nesting);
+ return val;
+}
+
+#endif /* __LINUX_COOKIE_H */
diff --git a/include/linux/cordic.h b/include/linux/cordic.h
index cf68ca4a508c..3d656f54d64f 100644
--- a/include/linux/cordic.h
+++ b/include/linux/cordic.h
@@ -18,6 +18,15 @@
#include <linux/types.h>
+#define CORDIC_ANGLE_GEN 39797
+#define CORDIC_PRECISION_SHIFT 16
+#define CORDIC_NUM_ITER (CORDIC_PRECISION_SHIFT + 2)
+
+#define CORDIC_FIXED(X) ((s32)((X) << CORDIC_PRECISION_SHIFT))
+#define CORDIC_FLOAT(X) (((X) >= 0) \
+ ? ((((X) >> (CORDIC_PRECISION_SHIFT - 1)) + 1) >> 1) \
+ : -((((-(X)) >> (CORDIC_PRECISION_SHIFT - 1)) + 1) >> 1))
+
/**
* struct cordic_iq - i/q coordinate.
*
diff --git a/include/linux/coredump.h b/include/linux/coredump.h
index 28ffa94aed6b..68861da4cf7c 100644
--- a/include/linux/coredump.h
+++ b/include/linux/coredump.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_COREDUMP_H
#define _LINUX_COREDUMP_H
@@ -6,19 +7,73 @@
#include <linux/fs.h>
#include <asm/siginfo.h>
+#ifdef CONFIG_COREDUMP
+struct core_vma_metadata {
+ unsigned long start, end;
+ vm_flags_t flags;
+ unsigned long dump_size;
+ unsigned long pgoff;
+ struct file *file;
+};
+
+struct coredump_params {
+ const kernel_siginfo_t *siginfo;
+ struct file *file;
+ unsigned long limit;
+ unsigned long mm_flags;
+ int cpu;
+ loff_t written;
+ loff_t pos;
+ loff_t to_skip;
+ int vma_count;
+ size_t vma_data_size;
+ struct core_vma_metadata *vma_meta;
+ struct pid *pid;
+};
+
+extern unsigned int core_file_note_size_limit;
+
/*
* These are the only things you should do on a core-file: use only these
* functions to write out all the necessary info.
*/
-struct coredump_params;
-extern int dump_skip(struct coredump_params *cprm, size_t nr);
+extern void dump_skip_to(struct coredump_params *cprm, unsigned long to);
+extern void dump_skip(struct coredump_params *cprm, size_t nr);
extern int dump_emit(struct coredump_params *cprm, const void *addr, int nr);
extern int dump_align(struct coredump_params *cprm, int align);
-extern void dump_truncate(struct coredump_params *cprm);
-#ifdef CONFIG_COREDUMP
-extern void do_coredump(const siginfo_t *siginfo);
+int dump_user_range(struct coredump_params *cprm, unsigned long start,
+ unsigned long len);
+extern void vfs_coredump(const kernel_siginfo_t *siginfo);
+
+/*
+ * Logging for the coredump code, ratelimited.
+ * The TGID and comm fields are added to the message.
+ */
+
+#define __COREDUMP_PRINTK(Level, Format, ...) \
+ do { \
+ char comm[TASK_COMM_LEN]; \
+ /* This will always be NUL terminated. */ \
+ memcpy(comm, current->comm, sizeof(comm)); \
+ printk_ratelimited(Level "coredump: %d(%*pE): " Format "\n", \
+ task_tgid_vnr(current), (int)strlen(comm), comm, ##__VA_ARGS__); \
+ } while (0) \
+
+#define coredump_report(fmt, ...) __COREDUMP_PRINTK(KERN_INFO, fmt, ##__VA_ARGS__)
+#define coredump_report_failure(fmt, ...) __COREDUMP_PRINTK(KERN_WARNING, fmt, ##__VA_ARGS__)
+
+#else
+static inline void vfs_coredump(const kernel_siginfo_t *siginfo) {}
+
+#define coredump_report(...)
+#define coredump_report_failure(...)
+
+#endif
+
+#if defined(CONFIG_COREDUMP) && defined(CONFIG_SYSCTL)
+extern void validate_coredump_safety(void);
#else
-static inline void do_coredump(const siginfo_t *siginfo) {}
+static inline void validate_coredump_safety(void) {}
#endif
#endif /* _LINUX_COREDUMP_H */
diff --git a/include/linux/coresight-pmu.h b/include/linux/coresight-pmu.h
index edfeaba95429..89b0ac0014b0 100644
--- a/include/linux/coresight-pmu.h
+++ b/include/linux/coresight-pmu.h
@@ -1,45 +1,69 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright(C) 2015 Linaro Limited. All rights reserved.
* Author: Mathieu Poirier <mathieu.poirier@linaro.org>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef _LINUX_CORESIGHT_PMU_H
#define _LINUX_CORESIGHT_PMU_H
+#include <linux/bits.h>
+
#define CORESIGHT_ETM_PMU_NAME "cs_etm"
-#define CORESIGHT_ETM_PMU_SEED 0x10
-/* ETMv3.5/PTM's ETMCR config bit */
-#define ETM_OPT_CYCACC 12
-#define ETM_OPT_TS 28
-#define ETM_OPT_RETSTK 29
+/*
+ * The legacy Trace ID system based on fixed calculation from the cpu
+ * number. This has been replaced by drivers using a dynamic allocation
+ * system - but need to retain the legacy algorithm for backward comparibility
+ * in certain situations:-
+ * a) new perf running on older systems that generate the legacy mapping
+ * b) older tools that may not update at the same time as the kernel.
+ */
+#define CORESIGHT_LEGACY_CPU_TRACE_ID(cpu) (0x10 + (cpu * 2))
+
+/*
+ * Below are the definition of bit offsets for perf option, and works as
+ * arbitrary values for all ETM versions.
+ *
+ * Most of them are orignally from ETMv3.5/PTM's ETMCR config, therefore,
+ * ETMv3.5/PTM doesn't define ETMCR config bits with prefix "ETM3_" and
+ * directly use below macros as config bits.
+ */
+#define ETM_OPT_BRANCH_BROADCAST 8
+#define ETM_OPT_CYCACC 12
+#define ETM_OPT_CTXTID 14
+#define ETM_OPT_CTXTID2 15
+#define ETM_OPT_TS 28
+#define ETM_OPT_RETSTK 29
/* ETMv4 CONFIGR programming bits for the ETM OPTs */
+#define ETM4_CFG_BIT_BB 3
#define ETM4_CFG_BIT_CYCACC 4
+#define ETM4_CFG_BIT_CTXTID 6
+#define ETM4_CFG_BIT_VMID 7
#define ETM4_CFG_BIT_TS 11
#define ETM4_CFG_BIT_RETSTK 12
+#define ETM4_CFG_BIT_VMID_OPT 15
+
+/*
+ * Interpretation of the PERF_RECORD_AUX_OUTPUT_HW_ID payload.
+ * Used to associate a CPU with the CoreSight Trace ID.
+ * [07:00] - Trace ID - uses 8 bits to make value easy to read in file.
+ * [39:08] - Sink ID - as reported in /sys/bus/event_source/devices/cs_etm/sinks/
+ * Added in minor version 1.
+ * [55:40] - Unused (SBZ)
+ * [59:56] - Minor Version - previously existing fields are compatible with
+ * all minor versions.
+ * [63:60] - Major Version - previously existing fields mean different things
+ * in new major versions.
+ */
+#define CS_AUX_HW_ID_TRACE_ID_MASK GENMASK_ULL(7, 0)
+#define CS_AUX_HW_ID_SINK_ID_MASK GENMASK_ULL(39, 8)
+
+#define CS_AUX_HW_ID_MINOR_VERSION_MASK GENMASK_ULL(59, 56)
+#define CS_AUX_HW_ID_MAJOR_VERSION_MASK GENMASK_ULL(63, 60)
-static inline int coresight_get_trace_id(int cpu)
-{
- /*
- * A trace ID of value 0 is invalid, so let's start at some
- * random value that fits in 7 bits and go from there. Since
- * the common convention is to have data trace IDs be I(N) + 1,
- * set instruction trace IDs as a function of the CPU number.
- */
- return (CORESIGHT_ETM_PMU_SEED + (cpu * 2));
-}
+#define CS_AUX_HW_ID_MAJOR_VERSION 0
+#define CS_AUX_HW_ID_MINOR_VERSION 1
#endif
diff --git a/include/linux/coresight-stm.h b/include/linux/coresight-stm.h
index a978bb85599a..74714b59f9d2 100644
--- a/include/linux/coresight-stm.h
+++ b/include/linux/coresight-stm.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __LINUX_CORESIGHT_STM_H_
#define __LINUX_CORESIGHT_STM_H_
diff --git a/include/linux/coresight.h b/include/linux/coresight.h
index d950dad5056a..2b48be97fcd0 100644
--- a/include/linux/coresight.h
+++ b/include/linux/coresight.h
@@ -1,21 +1,18 @@
-/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2012, The Linux Foundation. All rights reserved.
*/
#ifndef _LINUX_CORESIGHT_H
#define _LINUX_CORESIGHT_H
+#include <linux/amba/bus.h>
+#include <linux/clk.h>
#include <linux/device.h>
+#include <linux/io.h>
#include <linux/perf_event.h>
#include <linux/sched.h>
+#include <linux/platform_device.h>
/* Peripheral id registers (0xFD0-0xFEC) */
#define CORESIGHT_PERIPHIDR4 0xfd0
@@ -39,145 +36,321 @@
#define CORESIGHT_UNLOCK 0xc5acce55
-extern struct bus_type coresight_bustype;
+extern const struct bus_type coresight_bustype;
enum coresight_dev_type {
- CORESIGHT_DEV_TYPE_NONE,
CORESIGHT_DEV_TYPE_SINK,
CORESIGHT_DEV_TYPE_LINK,
CORESIGHT_DEV_TYPE_LINKSINK,
CORESIGHT_DEV_TYPE_SOURCE,
+ CORESIGHT_DEV_TYPE_HELPER,
+ CORESIGHT_DEV_TYPE_MAX
};
enum coresight_dev_subtype_sink {
- CORESIGHT_DEV_SUBTYPE_SINK_NONE,
+ CORESIGHT_DEV_SUBTYPE_SINK_DUMMY,
CORESIGHT_DEV_SUBTYPE_SINK_PORT,
CORESIGHT_DEV_SUBTYPE_SINK_BUFFER,
+ CORESIGHT_DEV_SUBTYPE_SINK_SYSMEM,
+ CORESIGHT_DEV_SUBTYPE_SINK_PERCPU_SYSMEM,
};
enum coresight_dev_subtype_link {
- CORESIGHT_DEV_SUBTYPE_LINK_NONE,
CORESIGHT_DEV_SUBTYPE_LINK_MERG,
CORESIGHT_DEV_SUBTYPE_LINK_SPLIT,
CORESIGHT_DEV_SUBTYPE_LINK_FIFO,
};
enum coresight_dev_subtype_source {
- CORESIGHT_DEV_SUBTYPE_SOURCE_NONE,
CORESIGHT_DEV_SUBTYPE_SOURCE_PROC,
CORESIGHT_DEV_SUBTYPE_SOURCE_BUS,
CORESIGHT_DEV_SUBTYPE_SOURCE_SOFTWARE,
+ CORESIGHT_DEV_SUBTYPE_SOURCE_TPDM,
+ CORESIGHT_DEV_SUBTYPE_SOURCE_OTHERS,
+};
+
+enum coresight_dev_subtype_helper {
+ CORESIGHT_DEV_SUBTYPE_HELPER_CATU,
+ CORESIGHT_DEV_SUBTYPE_HELPER_ECT_CTI,
+ CORESIGHT_DEV_SUBTYPE_HELPER_CTCU,
};
/**
- * struct coresight_dev_subtype - further characterisation of a type
+ * union coresight_dev_subtype - further characterisation of a type
* @sink_subtype: type of sink this component is, as defined
- by @coresight_dev_subtype_sink.
+ * by @coresight_dev_subtype_sink.
* @link_subtype: type of link this component is, as defined
- by @coresight_dev_subtype_link.
+ * by @coresight_dev_subtype_link.
* @source_subtype: type of source this component is, as defined
- by @coresight_dev_subtype_source.
+ * by @coresight_dev_subtype_source.
+ * @helper_subtype: type of helper this component is, as defined
+ * by @coresight_dev_subtype_helper.
*/
-struct coresight_dev_subtype {
- enum coresight_dev_subtype_sink sink_subtype;
- enum coresight_dev_subtype_link link_subtype;
+union coresight_dev_subtype {
+ /* We have some devices which acts as LINK and SINK */
+ struct {
+ enum coresight_dev_subtype_sink sink_subtype;
+ enum coresight_dev_subtype_link link_subtype;
+ };
enum coresight_dev_subtype_source source_subtype;
+ enum coresight_dev_subtype_helper helper_subtype;
};
/**
- * struct coresight_platform_data - data harvested from the DT specification
- * @cpu: the CPU a source belongs to. Only applicable for ETM/PTMs.
- * @name: name of the component as shown under sysfs.
- * @nr_inport: number of input ports for this component.
- * @outports: list of remote endpoint port number.
- * @child_names:name of all child components connected to this device.
- * @child_ports:child component port number the current component is
- connected to.
- * @nr_outport: number of output ports for this component.
- * @clk: The clock this component is associated to.
+ * struct coresight_platform_data - data harvested from the firmware
+ * specification.
+ *
+ * @nr_inconns: Number of elements for the input connections.
+ * @nr_outconns: Number of elements for the output connections.
+ * @out_conns: Array of nr_outconns pointers to connections from this
+ * component.
+ * @in_conns: Sparse array of pointers to input connections. Sparse
+ * because the source device owns the connection so when it's
+ * unloaded the connection leaves an empty slot.
*/
struct coresight_platform_data {
- int cpu;
- const char *name;
- int nr_inport;
- int *outports;
- const char **child_names;
- int *child_ports;
- int nr_outport;
- struct clk *clk;
+ int nr_inconns;
+ int nr_outconns;
+ struct coresight_connection **out_conns;
+ struct coresight_connection **in_conns;
+};
+
+/**
+ * struct csdev_access - Abstraction of a CoreSight device access.
+ *
+ * @io_mem : True if the device has memory mapped I/O
+ * @base : When io_mem == true, base address of the component
+ * @read : Read from the given "offset" of the given instance.
+ * @write : Write "val" to the given "offset".
+ */
+struct csdev_access {
+ bool io_mem;
+ union {
+ void __iomem *base;
+ struct {
+ u64 (*read)(u32 offset, bool relaxed, bool _64bit);
+ void (*write)(u64 val, u32 offset, bool relaxed,
+ bool _64bit);
+ };
+ };
};
+#define CSDEV_ACCESS_IOMEM(_addr) \
+ ((struct csdev_access) { \
+ .io_mem = true, \
+ .base = (_addr), \
+ })
+
/**
* struct coresight_desc - description of a component required from drivers
* @type: as defined by @coresight_dev_type.
* @subtype: as defined by @coresight_dev_subtype.
* @ops: generic operations for this component, as defined
- by @coresight_ops.
+ * by @coresight_ops.
* @pdata: platform data collected from DT.
* @dev: The device entity associated to this component.
* @groups: operations specific to this component. These will end up
- in the component's sysfs sub-directory.
+ * in the component's sysfs sub-directory.
+ * @name: name for the coresight device, also shown under sysfs.
+ * @access: Describe access to the device
*/
struct coresight_desc {
enum coresight_dev_type type;
- struct coresight_dev_subtype subtype;
+ union coresight_dev_subtype subtype;
const struct coresight_ops *ops;
struct coresight_platform_data *pdata;
struct device *dev;
const struct attribute_group **groups;
+ const char *name;
+ struct csdev_access access;
};
/**
* struct coresight_connection - representation of a single connection
- * @outport: a connection's output port number.
- * @chid_name: remote component's name.
- * @child_port: remote component's port number @output is connected to.
- * @child_dev: a @coresight_device representation of the component
- connected to @outport.
+ * @src_port: a connection's output port number.
+ * @dest_port: destination's input port number @src_port is connected to.
+ * @dest_fwnode: destination component's fwnode handle.
+ * @dest_dev: a @coresight_device representation of the component
+ connected to @src_port. NULL until the device is created
+ * @link: Representation of the connection as a sysfs link.
+ * @filter_src_fwnode: filter source component's fwnode handle.
+ * @filter_src_dev: a @coresight_device representation of the component that
+ needs to be filtered.
+ *
+ * The full connection structure looks like this, where in_conns store
+ * references to same connection as the source device's out_conns.
+ *
+ * +-----------------------------+ +-----------------------------+
+ * |coresight_device | |coresight_connection |
+ * |-----------------------------| |-----------------------------|
+ * | | | |
+ * | | | dest_dev*|<--
+ * |pdata->out_conns[nr_outconns]|<->|src_dev* | |
+ * | | | | |
+ * +-----------------------------+ +-----------------------------+ |
+ * |
+ * +-----------------------------+ |
+ * |coresight_device | |
+ * |------------------------------ |
+ * | | |
+ * | pdata->in_conns[nr_inconns]|<--
+ * | |
+ * +-----------------------------+
*/
struct coresight_connection {
- int outport;
- const char *child_name;
- int child_port;
- struct coresight_device *child_dev;
+ int src_port;
+ int dest_port;
+ struct fwnode_handle *dest_fwnode;
+ struct coresight_device *dest_dev;
+ struct coresight_sysfs_link *link;
+ struct coresight_device *src_dev;
+ struct fwnode_handle *filter_src_fwnode;
+ struct coresight_device *filter_src_dev;
+ int src_refcnt;
+ int dest_refcnt;
+};
+
+/**
+ * struct coresight_sysfs_link - representation of a connection in sysfs.
+ * @orig: Originating (master) coresight device for the link.
+ * @orig_name: Name to use for the link orig->target.
+ * @target: Target (slave) coresight device for the link.
+ * @target_name: Name to use for the link target->orig.
+ */
+struct coresight_sysfs_link {
+ struct coresight_device *orig;
+ const char *orig_name;
+ struct coresight_device *target;
+ const char *target_name;
+};
+
+/* architecturally we have 128 IDs some of which are reserved */
+#define CORESIGHT_TRACE_IDS_MAX 128
+
+/**
+ * Trace ID map.
+ *
+ * @used_ids: Bitmap to register available (bit = 0) and in use (bit = 1) IDs.
+ * Initialised so that the reserved IDs are permanently marked as
+ * in use.
+ * @perf_cs_etm_session_active: Number of Perf sessions using this ID map.
+ */
+struct coresight_trace_id_map {
+ DECLARE_BITMAP(used_ids, CORESIGHT_TRACE_IDS_MAX);
+ atomic_t __percpu *cpu_map;
+ atomic_t perf_cs_etm_session_active;
+ raw_spinlock_t lock;
};
/**
* struct coresight_device - representation of a device as used by the framework
- * @conns: array of coresight_connections associated to this component.
- * @nr_inport: number of input port associated to this component.
- * @nr_outport: number of output port associated to this component.
+ * @pdata: Platform data with device connections associated to this device.
* @type: as defined by @coresight_dev_type.
* @subtype: as defined by @coresight_dev_subtype.
* @ops: generic operations for this component, as defined
- by @coresight_ops.
+ * by @coresight_ops.
+ * @access: Device i/o access abstraction for this device.
* @dev: The device entity associated to this component.
- * @refcnt: keep track of what is in use.
+ * @mode: The device mode, i.e sysFS, Perf or disabled. This is actually
+ * an 'enum cs_mode' but stored in an atomic type. Access is always
+ * through atomic APIs, ensuring SMP-safe synchronisation between
+ * racing from sysFS and Perf mode. A compare-and-exchange
+ * operation is done to atomically claim one mode or the other.
+ * @refcnt: keep track of what is in use. Only access this outside of the
+ * device's spinlock when the coresight_mutex held and mode ==
+ * CS_MODE_SYSFS. Otherwise it must be accessed from inside the
+ * spinlock.
* @orphan: true if the component has connections that haven't been linked.
- * @enable: 'true' if component is currently part of an active path.
- * @activated: 'true' only if a _sink_ has been activated. A sink can be
- activated but not yet enabled. Enabling for a _sink_
- happens when a source has been selected for that it.
+ * @sysfs_sink_activated: 'true' when a sink has been selected for use via sysfs
+ * by writing a 1 to the 'enable_sink' file. A sink can be
+ * activated but not yet enabled. Enabling for a _sink_ happens
+ * when a source has been selected and a path is enabled from
+ * source to that sink. A sink can also become enabled but not
+ * activated if it's used via Perf.
+ * @ea: Device attribute for sink representation under PMU directory.
+ * @def_sink: cached reference to default sink found for this device.
+ * @nr_links: number of sysfs links created to other components from this
+ * device. These will appear in the "connections" group.
+ * @has_conns_grp: Have added a "connections" group for sysfs links.
+ * @feature_csdev_list: List of complex feature programming added to the device.
+ * @config_csdev_list: List of system configurations added to the device.
+ * @cscfg_csdev_lock: Protect the lists of configurations and features.
+ * @active_cscfg_ctxt: Context information for current active system configuration.
*/
struct coresight_device {
- struct coresight_connection *conns;
- int nr_inport;
- int nr_outport;
+ struct coresight_platform_data *pdata;
enum coresight_dev_type type;
- struct coresight_dev_subtype subtype;
+ union coresight_dev_subtype subtype;
const struct coresight_ops *ops;
+ struct csdev_access access;
struct device dev;
- atomic_t *refcnt;
+ atomic_t mode;
+ int refcnt;
bool orphan;
- bool enable; /* true only if configured as part of a path */
- bool activated; /* true only if a sink is part of a path */
+ /* sink specific fields */
+ bool sysfs_sink_activated;
+ struct dev_ext_attribute *ea;
+ struct coresight_device *def_sink;
+ struct coresight_trace_id_map perf_sink_id_map;
+ /* sysfs links between components */
+ int nr_links;
+ bool has_conns_grp;
+ /* system configuration and feature lists */
+ struct list_head feature_csdev_list;
+ struct list_head config_csdev_list;
+ raw_spinlock_t cscfg_csdev_lock;
+ void *active_cscfg_ctxt;
+};
+
+/*
+ * coresight_dev_list - Mapping for devices to "name" index for device
+ * names.
+ *
+ * @nr_idx: Number of entries already allocated.
+ * @pfx: Prefix pattern for device name.
+ * @fwnode_list: Array of fwnode_handles associated with each allocated
+ * index, upto nr_idx entries.
+ */
+struct coresight_dev_list {
+ int nr_idx;
+ const char *pfx;
+ struct fwnode_handle **fwnode_list;
};
+#define DEFINE_CORESIGHT_DEVLIST(var, dev_pfx) \
+static struct coresight_dev_list (var) = { \
+ .pfx = dev_pfx, \
+ .nr_idx = 0, \
+ .fwnode_list = NULL, \
+}
+
#define to_coresight_device(d) container_of(d, struct coresight_device, dev)
+/**
+ * struct coresight_path - data needed by enable/disable path
+ * @path_list: path from source to sink.
+ * @trace_id: trace_id of the whole path.
+ * @handle: handle of the aux_event.
+ */
+struct coresight_path {
+ struct list_head path_list;
+ u8 trace_id;
+ struct perf_output_handle *handle;
+};
+
+enum cs_mode {
+ CS_MODE_DISABLED,
+ CS_MODE_SYSFS,
+ CS_MODE_PERF,
+};
+
+#define coresight_ops(csdev) csdev->ops
#define source_ops(csdev) csdev->ops->source_ops
#define sink_ops(csdev) csdev->ops->sink_ops
#define link_ops(csdev) csdev->ops->link_ops
+#define helper_ops(csdev) csdev->ops->helper_ops
+#define ect_ops(csdev) csdev->ops->ect_ops
+#define panic_ops(csdev) csdev->ops->panic_ops
/**
* struct coresight_ops_sink - basic operations for a sink
@@ -186,23 +359,17 @@ struct coresight_device {
* @disable: disables the sink.
* @alloc_buffer: initialises perf's ring buffer for trace collection.
* @free_buffer: release memory allocated in @get_config.
- * @set_buffer: initialises buffer mechanic before a trace session.
- * @reset_buffer: finalises buffer mechanic after a trace session.
* @update_buffer: update buffer pointers after a trace session.
*/
struct coresight_ops_sink {
- int (*enable)(struct coresight_device *csdev, u32 mode);
- void (*disable)(struct coresight_device *csdev);
- void *(*alloc_buffer)(struct coresight_device *csdev, int cpu,
- void **pages, int nr_pages, bool overwrite);
+ int (*enable)(struct coresight_device *csdev, enum cs_mode mode,
+ struct coresight_path *path);
+ int (*disable)(struct coresight_device *csdev);
+ void *(*alloc_buffer)(struct coresight_device *csdev,
+ struct perf_event *event, void **pages,
+ int nr_pages, bool overwrite);
void (*free_buffer)(void *config);
- int (*set_buffer)(struct coresight_device *csdev,
- struct perf_output_handle *handle,
- void *sink_config);
- unsigned long (*reset_buffer)(struct coresight_device *csdev,
- struct perf_output_handle *handle,
- void *sink_config);
- void (*update_buffer)(struct coresight_device *csdev,
+ unsigned long (*update_buffer)(struct coresight_device *csdev,
struct perf_output_handle *handle,
void *sink_config);
};
@@ -214,8 +381,12 @@ struct coresight_ops_sink {
* @disable: disables flow between iport and oport.
*/
struct coresight_ops_link {
- int (*enable)(struct coresight_device *csdev, int iport, int oport);
- void (*disable)(struct coresight_device *csdev, int iport, int oport);
+ int (*enable)(struct coresight_device *csdev,
+ struct coresight_connection *in,
+ struct coresight_connection *out);
+ void (*disable)(struct coresight_device *csdev,
+ struct coresight_connection *in,
+ struct coresight_connection *out);
};
/**
@@ -223,75 +394,316 @@ struct coresight_ops_link {
* Operations available for sources.
* @cpu_id: returns the value of the CPU number this component
* is associated to.
- * @trace_id: returns the value of the component's trace ID as known
- * to the HW.
* @enable: enables tracing for a source.
* @disable: disables tracing for a source.
+ * @resume_perf: resumes tracing for a source in perf session.
+ * @pause_perf: pauses tracing for a source in perf session.
*/
struct coresight_ops_source {
int (*cpu_id)(struct coresight_device *csdev);
- int (*trace_id)(struct coresight_device *csdev);
- int (*enable)(struct coresight_device *csdev,
- struct perf_event *event, u32 mode);
+ int (*enable)(struct coresight_device *csdev, struct perf_event *event,
+ enum cs_mode mode, struct coresight_path *path);
void (*disable)(struct coresight_device *csdev,
struct perf_event *event);
+ int (*resume_perf)(struct coresight_device *csdev);
+ void (*pause_perf)(struct coresight_device *csdev);
+};
+
+/**
+ * struct coresight_ops_helper - Operations for a helper device.
+ *
+ * All operations could pass in a device specific data, which could
+ * help the helper device to determine what to do.
+ *
+ * @enable : Enable the device
+ * @disable : Disable the device
+ */
+struct coresight_ops_helper {
+ int (*enable)(struct coresight_device *csdev, enum cs_mode mode,
+ struct coresight_path *path);
+ int (*disable)(struct coresight_device *csdev,
+ struct coresight_path *path);
+};
+
+
+/**
+ * struct coresight_ops_panic - Generic device ops for panic handing
+ *
+ * @sync : Sync the device register state/trace data
+ */
+struct coresight_ops_panic {
+ int (*sync)(struct coresight_device *csdev);
};
struct coresight_ops {
+ int (*trace_id)(struct coresight_device *csdev, enum cs_mode mode,
+ struct coresight_device *sink);
const struct coresight_ops_sink *sink_ops;
const struct coresight_ops_link *link_ops;
const struct coresight_ops_source *source_ops;
+ const struct coresight_ops_helper *helper_ops;
+ const struct coresight_ops_panic *panic_ops;
};
-#ifdef CONFIG_CORESIGHT
-extern struct coresight_device *
-coresight_register(struct coresight_desc *desc);
-extern void coresight_unregister(struct coresight_device *csdev);
-extern int coresight_enable(struct coresight_device *csdev);
-extern void coresight_disable(struct coresight_device *csdev);
-extern int coresight_timeout(void __iomem *addr, u32 offset,
- int position, int value);
-#else
-static inline struct coresight_device *
-coresight_register(struct coresight_desc *desc) { return NULL; }
-static inline void coresight_unregister(struct coresight_device *csdev) {}
-static inline int
-coresight_enable(struct coresight_device *csdev) { return -ENOSYS; }
-static inline void coresight_disable(struct coresight_device *csdev) {}
-static inline int coresight_timeout(void __iomem *addr, u32 offset,
- int position, int value) { return 1; }
-#endif
-
-#ifdef CONFIG_OF
-extern int of_coresight_get_cpu(const struct device_node *node);
-extern struct coresight_platform_data *
-of_get_coresight_platform_data(struct device *dev,
- const struct device_node *node);
-#else
-static inline int of_coresight_get_cpu(const struct device_node *node)
-{ return 0; }
-static inline struct coresight_platform_data *of_get_coresight_platform_data(
- struct device *dev, const struct device_node *node) { return NULL; }
-#endif
-
-#ifdef CONFIG_PID_NS
-static inline unsigned long
-coresight_vpid_to_pid(unsigned long vpid)
+static inline u32 csdev_access_relaxed_read32(struct csdev_access *csa,
+ u32 offset)
{
- struct task_struct *task = NULL;
- unsigned long pid = 0;
+ if (likely(csa->io_mem))
+ return readl_relaxed(csa->base + offset);
- rcu_read_lock();
- task = find_task_by_vpid(vpid);
- if (task)
- pid = task_pid_nr(task);
- rcu_read_unlock();
+ return csa->read(offset, true, false);
+}
+
+#define CORESIGHT_CIDRn(i) (0xFF0 + ((i) * 4))
+
+static inline u32 coresight_get_cid(void __iomem *base)
+{
+ u32 i, cid = 0;
+
+ for (i = 0; i < 4; i++)
+ cid |= readl(base + CORESIGHT_CIDRn(i)) << (i * 8);
+
+ return cid;
+}
+
+static inline bool is_coresight_device(void __iomem *base)
+{
+ u32 cid = coresight_get_cid(base);
+
+ return cid == CORESIGHT_CID;
+}
+
+#define CORESIGHT_PIDRn(i) (0xFE0 + ((i) * 4))
+
+static inline u32 coresight_get_pid(struct csdev_access *csa)
+{
+ u32 i, pid = 0;
+
+ for (i = 0; i < 4; i++)
+ pid |= csdev_access_relaxed_read32(csa, CORESIGHT_PIDRn(i)) << (i * 8);
return pid;
}
-#else
-static inline unsigned long
-coresight_vpid_to_pid(unsigned long vpid) { return vpid; }
-#endif
-#endif
+static inline u64 csdev_access_relaxed_read_pair(struct csdev_access *csa,
+ u32 lo_offset, u32 hi_offset)
+{
+ if (likely(csa->io_mem)) {
+ return readl_relaxed(csa->base + lo_offset) |
+ ((u64)readl_relaxed(csa->base + hi_offset) << 32);
+ }
+
+ return csa->read(lo_offset, true, false) | (csa->read(hi_offset, true, false) << 32);
+}
+
+static inline void csdev_access_relaxed_write_pair(struct csdev_access *csa, u64 val,
+ u32 lo_offset, u32 hi_offset)
+{
+ if (likely(csa->io_mem)) {
+ writel_relaxed((u32)val, csa->base + lo_offset);
+ writel_relaxed((u32)(val >> 32), csa->base + hi_offset);
+ } else {
+ csa->write((u32)val, lo_offset, true, false);
+ csa->write((u32)(val >> 32), hi_offset, true, false);
+ }
+}
+
+static inline u32 csdev_access_read32(struct csdev_access *csa, u32 offset)
+{
+ if (likely(csa->io_mem))
+ return readl(csa->base + offset);
+
+ return csa->read(offset, false, false);
+}
+
+static inline void csdev_access_relaxed_write32(struct csdev_access *csa,
+ u32 val, u32 offset)
+{
+ if (likely(csa->io_mem))
+ writel_relaxed(val, csa->base + offset);
+ else
+ csa->write(val, offset, true, false);
+}
+
+static inline void csdev_access_write32(struct csdev_access *csa, u32 val, u32 offset)
+{
+ if (likely(csa->io_mem))
+ writel(val, csa->base + offset);
+ else
+ csa->write(val, offset, false, false);
+}
+
+#ifdef CONFIG_64BIT
+
+static inline u64 csdev_access_relaxed_read64(struct csdev_access *csa,
+ u32 offset)
+{
+ if (likely(csa->io_mem))
+ return readq_relaxed(csa->base + offset);
+
+ return csa->read(offset, true, true);
+}
+
+static inline u64 csdev_access_read64(struct csdev_access *csa, u32 offset)
+{
+ if (likely(csa->io_mem))
+ return readq(csa->base + offset);
+
+ return csa->read(offset, false, true);
+}
+
+static inline void csdev_access_relaxed_write64(struct csdev_access *csa,
+ u64 val, u32 offset)
+{
+ if (likely(csa->io_mem))
+ writeq_relaxed(val, csa->base + offset);
+ else
+ csa->write(val, offset, true, true);
+}
+
+static inline void csdev_access_write64(struct csdev_access *csa, u64 val, u32 offset)
+{
+ if (likely(csa->io_mem))
+ writeq(val, csa->base + offset);
+ else
+ csa->write(val, offset, false, true);
+}
+
+#else /* !CONFIG_64BIT */
+
+static inline u64 csdev_access_relaxed_read64(struct csdev_access *csa,
+ u32 offset)
+{
+ WARN_ON(1);
+ return 0;
+}
+
+static inline u64 csdev_access_read64(struct csdev_access *csa, u32 offset)
+{
+ WARN_ON(1);
+ return 0;
+}
+
+static inline void csdev_access_relaxed_write64(struct csdev_access *csa,
+ u64 val, u32 offset)
+{
+ WARN_ON(1);
+}
+
+static inline void csdev_access_write64(struct csdev_access *csa, u64 val, u32 offset)
+{
+ WARN_ON(1);
+}
+#endif /* CONFIG_64BIT */
+
+static inline bool coresight_is_device_source(struct coresight_device *csdev)
+{
+ return csdev && (csdev->type == CORESIGHT_DEV_TYPE_SOURCE);
+}
+
+static inline bool coresight_is_percpu_source(struct coresight_device *csdev)
+{
+ return csdev && coresight_is_device_source(csdev) &&
+ (csdev->subtype.source_subtype == CORESIGHT_DEV_SUBTYPE_SOURCE_PROC);
+}
+
+static inline bool coresight_is_percpu_sink(struct coresight_device *csdev)
+{
+ return csdev && (csdev->type == CORESIGHT_DEV_TYPE_SINK) &&
+ (csdev->subtype.sink_subtype == CORESIGHT_DEV_SUBTYPE_SINK_PERCPU_SYSMEM);
+}
+
+/*
+ * Atomically try to take the device and set a new mode. Returns true on
+ * success, false if the device is already taken by someone else.
+ */
+static inline bool coresight_take_mode(struct coresight_device *csdev,
+ enum cs_mode new_mode)
+{
+ int curr = CS_MODE_DISABLED;
+
+ return atomic_try_cmpxchg_acquire(&csdev->mode, &curr, new_mode);
+}
+
+static inline enum cs_mode coresight_get_mode(struct coresight_device *csdev)
+{
+ return atomic_read_acquire(&csdev->mode);
+}
+
+static inline void coresight_set_mode(struct coresight_device *csdev,
+ enum cs_mode new_mode)
+{
+ enum cs_mode current_mode = coresight_get_mode(csdev);
+
+ /*
+ * Changing to a new mode must be done from an already disabled state
+ * unless it's synchronized with coresight_take_mode(). Otherwise the
+ * device is already in use and signifies a locking issue.
+ */
+ WARN(new_mode != CS_MODE_DISABLED && current_mode != CS_MODE_DISABLED &&
+ current_mode != new_mode, "Device already in use\n");
+
+ atomic_set_release(&csdev->mode, new_mode);
+}
+
+struct coresight_device *coresight_register(struct coresight_desc *desc);
+void coresight_unregister(struct coresight_device *csdev);
+int coresight_enable_sysfs(struct coresight_device *csdev);
+void coresight_disable_sysfs(struct coresight_device *csdev);
+int coresight_timeout(struct csdev_access *csa, u32 offset, int position, int value);
+typedef void (*coresight_timeout_cb_t) (struct csdev_access *, u32, int, int);
+int coresight_timeout_action(struct csdev_access *csa, u32 offset, int position, int value,
+ coresight_timeout_cb_t cb);
+int coresight_claim_device(struct coresight_device *csdev);
+int coresight_claim_device_unlocked(struct coresight_device *csdev);
+
+int coresight_claim_device(struct coresight_device *csdev);
+int coresight_claim_device_unlocked(struct coresight_device *csdev);
+void coresight_clear_self_claim_tag(struct csdev_access *csa);
+void coresight_clear_self_claim_tag_unlocked(struct csdev_access *csa);
+void coresight_disclaim_device(struct coresight_device *csdev);
+void coresight_disclaim_device_unlocked(struct coresight_device *csdev);
+char *coresight_alloc_device_name(struct coresight_dev_list *devs,
+ struct device *dev);
+
+bool coresight_loses_context_with_cpu(struct device *dev);
+
+u32 coresight_relaxed_read32(struct coresight_device *csdev, u32 offset);
+u32 coresight_read32(struct coresight_device *csdev, u32 offset);
+void coresight_write32(struct coresight_device *csdev, u32 val, u32 offset);
+void coresight_relaxed_write32(struct coresight_device *csdev,
+ u32 val, u32 offset);
+u64 coresight_relaxed_read64(struct coresight_device *csdev, u32 offset);
+u64 coresight_read64(struct coresight_device *csdev, u32 offset);
+void coresight_relaxed_write64(struct coresight_device *csdev,
+ u64 val, u32 offset);
+void coresight_write64(struct coresight_device *csdev, u64 val, u32 offset);
+
+int coresight_get_cpu(struct device *dev);
+int coresight_get_static_trace_id(struct device *dev, u32 *id);
+
+struct coresight_platform_data *coresight_get_platform_data(struct device *dev);
+struct coresight_connection *
+coresight_add_out_conn(struct device *dev,
+ struct coresight_platform_data *pdata,
+ const struct coresight_connection *new_conn);
+int coresight_add_in_conn(struct coresight_connection *conn);
+struct coresight_device *
+coresight_find_input_type(struct coresight_platform_data *pdata,
+ enum coresight_dev_type type,
+ union coresight_dev_subtype subtype);
+struct coresight_device *
+coresight_find_output_type(struct coresight_platform_data *pdata,
+ enum coresight_dev_type type,
+ union coresight_dev_subtype subtype);
+
+int coresight_init_driver(const char *drv, struct amba_driver *amba_drv,
+ struct platform_driver *pdev_drv, struct module *owner);
+
+void coresight_remove_driver(struct amba_driver *amba_drv,
+ struct platform_driver *pdev_drv);
+int coresight_etm_get_trace_id(struct coresight_device *csdev, enum cs_mode mode,
+ struct coresight_device *sink);
+int coresight_get_enable_clocks(struct device *dev, struct clk **pclk,
+ struct clk **atclk);
+#endif /* _LINUX_COREISGHT_H */
diff --git a/include/linux/count_zeros.h b/include/linux/count_zeros.h
index 363da78c4f64..5b8ff5ac660d 100644
--- a/include/linux/count_zeros.h
+++ b/include/linux/count_zeros.h
@@ -1,12 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/* Count leading and trailing zeros functions
*
* Copyright (C) 2012 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public Licence
- * as published by the Free Software Foundation; either version
- * 2 of the Licence, or (at your option) any later version.
*/
#ifndef _LINUX_BITOPS_COUNT_ZEROS_H_
diff --git a/include/linux/counter.h b/include/linux/counter.h
new file mode 100644
index 000000000000..f208e867dd0f
--- /dev/null
+++ b/include/linux/counter.h
@@ -0,0 +1,638 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Counter interface
+ * Copyright (C) 2018 William Breathitt Gray
+ */
+#ifndef _COUNTER_H_
+#define _COUNTER_H_
+
+#include <linux/array_size.h>
+#include <linux/cdev.h>
+#include <linux/device.h>
+#include <linux/kfifo.h>
+#include <linux/mutex.h>
+#include <linux/spinlock_types.h>
+#include <linux/types.h>
+#include <linux/wait.h>
+
+#include <uapi/linux/counter.h>
+
+struct counter_device;
+struct counter_count;
+struct counter_synapse;
+struct counter_signal;
+
+enum counter_comp_type {
+ COUNTER_COMP_U8,
+ COUNTER_COMP_U64,
+ COUNTER_COMP_BOOL,
+ COUNTER_COMP_SIGNAL_LEVEL,
+ COUNTER_COMP_FUNCTION,
+ COUNTER_COMP_SYNAPSE_ACTION,
+ COUNTER_COMP_ENUM,
+ COUNTER_COMP_COUNT_DIRECTION,
+ COUNTER_COMP_COUNT_MODE,
+ COUNTER_COMP_SIGNAL_POLARITY,
+ COUNTER_COMP_ARRAY,
+};
+
+/**
+ * struct counter_comp - Counter component node
+ * @type: Counter component data type
+ * @name: device-specific component name
+ * @priv: component-relevant data
+ * @action_read: Synapse action mode read callback. The read value of the
+ * respective Synapse action mode should be passed back via
+ * the action parameter.
+ * @device_u8_read: Device u8 component read callback. The read value of the
+ * respective Device u8 component should be passed back via
+ * the val parameter.
+ * @count_u8_read: Count u8 component read callback. The read value of the
+ * respective Count u8 component should be passed back via
+ * the val parameter.
+ * @signal_u8_read: Signal u8 component read callback. The read value of the
+ * respective Signal u8 component should be passed back via
+ * the val parameter.
+ * @device_u32_read: Device u32 component read callback. The read value of
+ * the respective Device u32 component should be passed
+ * back via the val parameter.
+ * @count_u32_read: Count u32 component read callback. The read value of the
+ * respective Count u32 component should be passed back via
+ * the val parameter.
+ * @signal_u32_read: Signal u32 component read callback. The read value of
+ * the respective Signal u32 component should be passed
+ * back via the val parameter.
+ * @device_u64_read: Device u64 component read callback. The read value of
+ * the respective Device u64 component should be passed
+ * back via the val parameter.
+ * @count_u64_read: Count u64 component read callback. The read value of the
+ * respective Count u64 component should be passed back via
+ * the val parameter.
+ * @signal_u64_read: Signal u64 component read callback. The read value of
+ * the respective Signal u64 component should be passed
+ * back via the val parameter.
+ * @signal_array_u32_read: Signal u32 array component read callback. The
+ * index of the respective Count u32 array
+ * component element is passed via the idx
+ * parameter. The read value of the respective
+ * Count u32 array component element should be
+ * passed back via the val parameter.
+ * @device_array_u64_read: Device u64 array component read callback. The
+ * index of the respective Device u64 array
+ * component element is passed via the idx
+ * parameter. The read value of the respective
+ * Device u64 array component element should be
+ * passed back via the val parameter.
+ * @count_array_u64_read: Count u64 array component read callback. The
+ * index of the respective Count u64 array
+ * component element is passed via the idx
+ * parameter. The read value of the respective
+ * Count u64 array component element should be
+ * passed back via the val parameter.
+ * @signal_array_u64_read: Signal u64 array component read callback. The
+ * index of the respective Count u64 array
+ * component element is passed via the idx
+ * parameter. The read value of the respective
+ * Count u64 array component element should be
+ * passed back via the val parameter.
+ * @action_write: Synapse action mode write callback. The write value of
+ * the respective Synapse action mode is passed via the
+ * action parameter.
+ * @device_u8_write: Device u8 component write callback. The write value of
+ * the respective Device u8 component is passed via the val
+ * parameter.
+ * @count_u8_write: Count u8 component write callback. The write value of
+ * the respective Count u8 component is passed via the val
+ * parameter.
+ * @signal_u8_write: Signal u8 component write callback. The write value of
+ * the respective Signal u8 component is passed via the val
+ * parameter.
+ * @device_u32_write: Device u32 component write callback. The write value of
+ * the respective Device u32 component is passed via the
+ * val parameter.
+ * @count_u32_write: Count u32 component write callback. The write value of
+ * the respective Count u32 component is passed via the val
+ * parameter.
+ * @signal_u32_write: Signal u32 component write callback. The write value of
+ * the respective Signal u32 component is passed via the
+ * val parameter.
+ * @device_u64_write: Device u64 component write callback. The write value of
+ * the respective Device u64 component is passed via the
+ * val parameter.
+ * @count_u64_write: Count u64 component write callback. The write value of
+ * the respective Count u64 component is passed via the val
+ * parameter.
+ * @signal_u64_write: Signal u64 component write callback. The write value of
+ * the respective Signal u64 component is passed via the
+ * val parameter.
+ * @signal_array_u32_write: Signal u32 array component write callback. The
+ * index of the respective Signal u32 array
+ * component element is passed via the idx
+ * parameter. The write value of the respective
+ * Signal u32 array component element is passed via
+ * the val parameter.
+ * @device_array_u64_write: Device u64 array component write callback. The
+ * index of the respective Device u64 array
+ * component element is passed via the idx
+ * parameter. The write value of the respective
+ * Device u64 array component element is passed via
+ * the val parameter.
+ * @count_array_u64_write: Count u64 array component write callback. The
+ * index of the respective Count u64 array
+ * component element is passed via the idx
+ * parameter. The write value of the respective
+ * Count u64 array component element is passed via
+ * the val parameter.
+ * @signal_array_u64_write: Signal u64 array component write callback. The
+ * index of the respective Signal u64 array
+ * component element is passed via the idx
+ * parameter. The write value of the respective
+ * Signal u64 array component element is passed via
+ * the val parameter.
+ */
+struct counter_comp {
+ enum counter_comp_type type;
+ const char *name;
+ void *priv;
+ union {
+ int (*action_read)(struct counter_device *counter,
+ struct counter_count *count,
+ struct counter_synapse *synapse,
+ enum counter_synapse_action *action);
+ int (*device_u8_read)(struct counter_device *counter, u8 *val);
+ int (*count_u8_read)(struct counter_device *counter,
+ struct counter_count *count, u8 *val);
+ int (*signal_u8_read)(struct counter_device *counter,
+ struct counter_signal *signal, u8 *val);
+ int (*device_u32_read)(struct counter_device *counter,
+ u32 *val);
+ int (*count_u32_read)(struct counter_device *counter,
+ struct counter_count *count, u32 *val);
+ int (*signal_u32_read)(struct counter_device *counter,
+ struct counter_signal *signal, u32 *val);
+ int (*device_u64_read)(struct counter_device *counter,
+ u64 *val);
+ int (*count_u64_read)(struct counter_device *counter,
+ struct counter_count *count, u64 *val);
+ int (*signal_u64_read)(struct counter_device *counter,
+ struct counter_signal *signal, u64 *val);
+ int (*signal_array_u32_read)(struct counter_device *counter,
+ struct counter_signal *signal,
+ size_t idx, u32 *val);
+ int (*device_array_u64_read)(struct counter_device *counter,
+ size_t idx, u64 *val);
+ int (*count_array_u64_read)(struct counter_device *counter,
+ struct counter_count *count,
+ size_t idx, u64 *val);
+ int (*signal_array_u64_read)(struct counter_device *counter,
+ struct counter_signal *signal,
+ size_t idx, u64 *val);
+ };
+ union {
+ int (*action_write)(struct counter_device *counter,
+ struct counter_count *count,
+ struct counter_synapse *synapse,
+ enum counter_synapse_action action);
+ int (*device_u8_write)(struct counter_device *counter, u8 val);
+ int (*count_u8_write)(struct counter_device *counter,
+ struct counter_count *count, u8 val);
+ int (*signal_u8_write)(struct counter_device *counter,
+ struct counter_signal *signal, u8 val);
+ int (*device_u32_write)(struct counter_device *counter,
+ u32 val);
+ int (*count_u32_write)(struct counter_device *counter,
+ struct counter_count *count, u32 val);
+ int (*signal_u32_write)(struct counter_device *counter,
+ struct counter_signal *signal, u32 val);
+ int (*device_u64_write)(struct counter_device *counter,
+ u64 val);
+ int (*count_u64_write)(struct counter_device *counter,
+ struct counter_count *count, u64 val);
+ int (*signal_u64_write)(struct counter_device *counter,
+ struct counter_signal *signal, u64 val);
+ int (*signal_array_u32_write)(struct counter_device *counter,
+ struct counter_signal *signal,
+ size_t idx, u32 val);
+ int (*device_array_u64_write)(struct counter_device *counter,
+ size_t idx, u64 val);
+ int (*count_array_u64_write)(struct counter_device *counter,
+ struct counter_count *count,
+ size_t idx, u64 val);
+ int (*signal_array_u64_write)(struct counter_device *counter,
+ struct counter_signal *signal,
+ size_t idx, u64 val);
+ };
+};
+
+/**
+ * struct counter_signal - Counter Signal node
+ * @id: unique ID used to identify the Signal
+ * @name: device-specific Signal name
+ * @ext: optional array of Signal extensions
+ * @num_ext: number of Signal extensions specified in @ext
+ */
+struct counter_signal {
+ int id;
+ const char *name;
+
+ struct counter_comp *ext;
+ size_t num_ext;
+};
+
+/**
+ * struct counter_synapse - Counter Synapse node
+ * @actions_list: array of available action modes
+ * @num_actions: number of action modes specified in @actions_list
+ * @signal: pointer to the associated Signal
+ */
+struct counter_synapse {
+ const enum counter_synapse_action *actions_list;
+ size_t num_actions;
+
+ struct counter_signal *signal;
+};
+
+/**
+ * struct counter_count - Counter Count node
+ * @id: unique ID used to identify the Count
+ * @name: device-specific Count name
+ * @functions_list: array of available function modes
+ * @num_functions: number of function modes specified in @functions_list
+ * @synapses: array of Synapses for initialization
+ * @num_synapses: number of Synapses specified in @synapses
+ * @ext: optional array of Count extensions
+ * @num_ext: number of Count extensions specified in @ext
+ */
+struct counter_count {
+ int id;
+ const char *name;
+
+ const enum counter_function *functions_list;
+ size_t num_functions;
+
+ struct counter_synapse *synapses;
+ size_t num_synapses;
+
+ struct counter_comp *ext;
+ size_t num_ext;
+};
+
+/**
+ * struct counter_event_node - Counter Event node
+ * @l: list of current watching Counter events
+ * @event: event that triggers
+ * @channel: event channel
+ * @comp_list: list of components to watch when event triggers
+ */
+struct counter_event_node {
+ struct list_head l;
+ u8 event;
+ u8 channel;
+ struct list_head comp_list;
+};
+
+/**
+ * struct counter_ops - Callbacks from driver
+ * @signal_read: optional read callback for Signals. The read level of
+ * the respective Signal should be passed back via the
+ * level parameter.
+ * @count_read: read callback for Counts. The read value of the
+ * respective Count should be passed back via the value
+ * parameter.
+ * @count_write: optional write callback for Counts. The write value for
+ * the respective Count is passed in via the value
+ * parameter.
+ * @function_read: read callback the Count function modes. The read
+ * function mode of the respective Count should be passed
+ * back via the function parameter.
+ * @function_write: optional write callback for Count function modes. The
+ * function mode to write for the respective Count is
+ * passed in via the function parameter.
+ * @action_read: optional read callback the Synapse action modes. The
+ * read action mode of the respective Synapse should be
+ * passed back via the action parameter.
+ * @action_write: optional write callback for Synapse action modes. The
+ * action mode to write for the respective Synapse is
+ * passed in via the action parameter.
+ * @events_configure: optional write callback to configure events. The list of
+ * struct counter_event_node may be accessed via the
+ * events_list member of the counter parameter.
+ * @watch_validate: optional callback to validate a watch. The Counter
+ * component watch configuration is passed in via the watch
+ * parameter. A return value of 0 indicates a valid Counter
+ * component watch configuration.
+ */
+struct counter_ops {
+ int (*signal_read)(struct counter_device *counter,
+ struct counter_signal *signal,
+ enum counter_signal_level *level);
+ int (*count_read)(struct counter_device *counter,
+ struct counter_count *count, u64 *value);
+ int (*count_write)(struct counter_device *counter,
+ struct counter_count *count, u64 value);
+ int (*function_read)(struct counter_device *counter,
+ struct counter_count *count,
+ enum counter_function *function);
+ int (*function_write)(struct counter_device *counter,
+ struct counter_count *count,
+ enum counter_function function);
+ int (*action_read)(struct counter_device *counter,
+ struct counter_count *count,
+ struct counter_synapse *synapse,
+ enum counter_synapse_action *action);
+ int (*action_write)(struct counter_device *counter,
+ struct counter_count *count,
+ struct counter_synapse *synapse,
+ enum counter_synapse_action action);
+ int (*events_configure)(struct counter_device *counter);
+ int (*watch_validate)(struct counter_device *counter,
+ const struct counter_watch *watch);
+};
+
+/**
+ * struct counter_device - Counter data structure
+ * @name: name of the device
+ * @parent: optional parent device providing the counters
+ * @ops: callbacks from driver
+ * @signals: array of Signals
+ * @num_signals: number of Signals specified in @signals
+ * @counts: array of Counts
+ * @num_counts: number of Counts specified in @counts
+ * @ext: optional array of Counter device extensions
+ * @num_ext: number of Counter device extensions specified in @ext
+ * @dev: internal device structure
+ * @chrdev: internal character device structure
+ * @events_list: list of current watching Counter events
+ * @events_list_lock: lock to protect Counter events list operations
+ * @next_events_list: list of next watching Counter events
+ * @n_events_list_lock: lock to protect Counter next events list operations
+ * @events: queue of detected Counter events
+ * @events_wait: wait queue to allow blocking reads of Counter events
+ * @events_in_lock: lock to protect Counter events queue in operations
+ * @events_out_lock: lock to protect Counter events queue out operations
+ * @ops_exist_lock: lock to prevent use during removal
+ */
+struct counter_device {
+ const char *name;
+ struct device *parent;
+
+ const struct counter_ops *ops;
+
+ struct counter_signal *signals;
+ size_t num_signals;
+ struct counter_count *counts;
+ size_t num_counts;
+
+ struct counter_comp *ext;
+ size_t num_ext;
+
+ struct device dev;
+ struct cdev chrdev;
+ struct list_head events_list;
+ spinlock_t events_list_lock;
+ struct list_head next_events_list;
+ struct mutex n_events_list_lock;
+ DECLARE_KFIFO_PTR(events, struct counter_event);
+ wait_queue_head_t events_wait;
+ spinlock_t events_in_lock;
+ struct mutex events_out_lock;
+ struct mutex ops_exist_lock;
+};
+
+void *counter_priv(const struct counter_device *const counter) __attribute_const__;
+
+struct counter_device *counter_alloc(size_t sizeof_priv);
+void counter_put(struct counter_device *const counter);
+int counter_add(struct counter_device *const counter);
+
+void counter_unregister(struct counter_device *const counter);
+struct counter_device *devm_counter_alloc(struct device *dev,
+ size_t sizeof_priv);
+int devm_counter_add(struct device *dev,
+ struct counter_device *const counter);
+void counter_push_event(struct counter_device *const counter, const u8 event,
+ const u8 channel);
+
+#define COUNTER_COMP_DEVICE_U8(_name, _read, _write) \
+{ \
+ .type = COUNTER_COMP_U8, \
+ .name = (_name), \
+ .device_u8_read = (_read), \
+ .device_u8_write = (_write), \
+}
+#define COUNTER_COMP_COUNT_U8(_name, _read, _write) \
+{ \
+ .type = COUNTER_COMP_U8, \
+ .name = (_name), \
+ .count_u8_read = (_read), \
+ .count_u8_write = (_write), \
+}
+#define COUNTER_COMP_SIGNAL_U8(_name, _read, _write) \
+{ \
+ .type = COUNTER_COMP_U8, \
+ .name = (_name), \
+ .signal_u8_read = (_read), \
+ .signal_u8_write = (_write), \
+}
+
+#define COUNTER_COMP_DEVICE_U64(_name, _read, _write) \
+{ \
+ .type = COUNTER_COMP_U64, \
+ .name = (_name), \
+ .device_u64_read = (_read), \
+ .device_u64_write = (_write), \
+}
+#define COUNTER_COMP_COUNT_U64(_name, _read, _write) \
+{ \
+ .type = COUNTER_COMP_U64, \
+ .name = (_name), \
+ .count_u64_read = (_read), \
+ .count_u64_write = (_write), \
+}
+#define COUNTER_COMP_SIGNAL_U64(_name, _read, _write) \
+{ \
+ .type = COUNTER_COMP_U64, \
+ .name = (_name), \
+ .signal_u64_read = (_read), \
+ .signal_u64_write = (_write), \
+}
+
+#define COUNTER_COMP_DEVICE_BOOL(_name, _read, _write) \
+{ \
+ .type = COUNTER_COMP_BOOL, \
+ .name = (_name), \
+ .device_u8_read = (_read), \
+ .device_u8_write = (_write), \
+}
+#define COUNTER_COMP_COUNT_BOOL(_name, _read, _write) \
+{ \
+ .type = COUNTER_COMP_BOOL, \
+ .name = (_name), \
+ .count_u8_read = (_read), \
+ .count_u8_write = (_write), \
+}
+#define COUNTER_COMP_SIGNAL_BOOL(_name, _read, _write) \
+{ \
+ .type = COUNTER_COMP_BOOL, \
+ .name = (_name), \
+ .signal_u8_read = (_read), \
+ .signal_u8_write = (_write), \
+}
+
+struct counter_available {
+ union {
+ const u32 *enums;
+ const char *const *strs;
+ };
+ size_t num_items;
+};
+
+#define DEFINE_COUNTER_AVAILABLE(_name, _enums) \
+ struct counter_available _name = { \
+ .enums = (_enums), \
+ .num_items = ARRAY_SIZE(_enums), \
+ }
+
+#define DEFINE_COUNTER_ENUM(_name, _strs) \
+ struct counter_available _name = { \
+ .strs = (_strs), \
+ .num_items = ARRAY_SIZE(_strs), \
+ }
+
+#define COUNTER_COMP_DEVICE_ENUM(_name, _get, _set, _available) \
+{ \
+ .type = COUNTER_COMP_ENUM, \
+ .name = (_name), \
+ .device_u32_read = (_get), \
+ .device_u32_write = (_set), \
+ .priv = &(_available), \
+}
+#define COUNTER_COMP_COUNT_ENUM(_name, _get, _set, _available) \
+{ \
+ .type = COUNTER_COMP_ENUM, \
+ .name = (_name), \
+ .count_u32_read = (_get), \
+ .count_u32_write = (_set), \
+ .priv = &(_available), \
+}
+#define COUNTER_COMP_SIGNAL_ENUM(_name, _get, _set, _available) \
+{ \
+ .type = COUNTER_COMP_ENUM, \
+ .name = (_name), \
+ .signal_u32_read = (_get), \
+ .signal_u32_write = (_set), \
+ .priv = &(_available), \
+}
+
+struct counter_array {
+ enum counter_comp_type type;
+ const struct counter_available *avail;
+ union {
+ size_t length;
+ size_t idx;
+ };
+};
+
+#define DEFINE_COUNTER_ARRAY_U64(_name, _length) \
+ struct counter_array _name = { \
+ .type = COUNTER_COMP_U64, \
+ .length = (_length), \
+ }
+
+#define DEFINE_COUNTER_ARRAY_CAPTURE(_name, _length) \
+ DEFINE_COUNTER_ARRAY_U64(_name, _length)
+
+#define DEFINE_COUNTER_ARRAY_POLARITY(_name, _available, _length) \
+ struct counter_array _name = { \
+ .type = COUNTER_COMP_SIGNAL_POLARITY, \
+ .avail = &(_available), \
+ .length = (_length), \
+ }
+
+#define COUNTER_COMP_DEVICE_ARRAY_U64(_name, _read, _write, _array) \
+{ \
+ .type = COUNTER_COMP_ARRAY, \
+ .name = (_name), \
+ .device_array_u64_read = (_read), \
+ .device_array_u64_write = (_write), \
+ .priv = &(_array), \
+}
+#define COUNTER_COMP_COUNT_ARRAY_U64(_name, _read, _write, _array) \
+{ \
+ .type = COUNTER_COMP_ARRAY, \
+ .name = (_name), \
+ .count_array_u64_read = (_read), \
+ .count_array_u64_write = (_write), \
+ .priv = &(_array), \
+}
+#define COUNTER_COMP_SIGNAL_ARRAY_U64(_name, _read, _write, _array) \
+{ \
+ .type = COUNTER_COMP_ARRAY, \
+ .name = (_name), \
+ .signal_array_u64_read = (_read), \
+ .signal_array_u64_write = (_write), \
+ .priv = &(_array), \
+}
+
+#define COUNTER_COMP_CAPTURE(_read, _write) \
+ COUNTER_COMP_COUNT_U64("capture", _read, _write)
+
+#define COUNTER_COMP_CEILING(_read, _write) \
+ COUNTER_COMP_COUNT_U64("ceiling", _read, _write)
+
+#define COUNTER_COMP_COMPARE(_read, _write) \
+ COUNTER_COMP_COUNT_U64("compare", _read, _write)
+
+#define COUNTER_COMP_COUNT_MODE(_read, _write, _available) \
+{ \
+ .type = COUNTER_COMP_COUNT_MODE, \
+ .name = "count_mode", \
+ .count_u32_read = (_read), \
+ .count_u32_write = (_write), \
+ .priv = &(_available), \
+}
+
+#define COUNTER_COMP_DIRECTION(_read) \
+{ \
+ .type = COUNTER_COMP_COUNT_DIRECTION, \
+ .name = "direction", \
+ .count_u32_read = (_read), \
+}
+
+#define COUNTER_COMP_ENABLE(_read, _write) \
+ COUNTER_COMP_COUNT_BOOL("enable", _read, _write)
+
+#define COUNTER_COMP_FLOOR(_read, _write) \
+ COUNTER_COMP_COUNT_U64("floor", _read, _write)
+
+#define COUNTER_COMP_FREQUENCY(_read) \
+ COUNTER_COMP_SIGNAL_U64("frequency", _read, NULL)
+
+#define COUNTER_COMP_POLARITY(_read, _write, _available) \
+{ \
+ .type = COUNTER_COMP_SIGNAL_POLARITY, \
+ .name = "polarity", \
+ .signal_u32_read = (_read), \
+ .signal_u32_write = (_write), \
+ .priv = &(_available), \
+}
+
+#define COUNTER_COMP_PRESET(_read, _write) \
+ COUNTER_COMP_COUNT_U64("preset", _read, _write)
+
+#define COUNTER_COMP_PRESET_ENABLE(_read, _write) \
+ COUNTER_COMP_COUNT_BOOL("preset_enable", _read, _write)
+
+#define COUNTER_COMP_ARRAY_CAPTURE(_read, _write, _array) \
+ COUNTER_COMP_COUNT_ARRAY_U64("capture", _read, _write, _array)
+
+#define COUNTER_COMP_ARRAY_POLARITY(_read, _write, _array) \
+{ \
+ .type = COUNTER_COMP_ARRAY, \
+ .name = "polarity", \
+ .signal_array_u32_read = (_read), \
+ .signal_array_u32_write = (_write), \
+ .priv = &(_array), \
+}
+
+#endif /* _COUNTER_H_ */
diff --git a/include/linux/cper.h b/include/linux/cper.h
index 4c671fc2081e..5b1236d8c65b 100644
--- a/include/linux/cper.h
+++ b/include/linux/cper.h
@@ -1,21 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* UEFI Common Platform Error Record
*
* Copyright (C) 2010, Intel Corp.
* Author: Huang Ying <ying.huang@intel.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version
- * 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#ifndef LINUX_CPER_H
@@ -44,7 +32,7 @@
*/
#define CPER_REC_LEN 256
/*
- * Severity difinition for error_severity in struct cper_record_header
+ * Severity definition for error_severity in struct cper_record_header
* and section_severity in struct cper_section_descriptor
*/
enum {
@@ -55,55 +43,79 @@ enum {
};
/*
- * Validation bits difinition for validation_bits in struct
+ * Validation bits definition for validation_bits in struct
* cper_record_header. If set, corresponding fields in struct
* cper_record_header contain valid information.
- *
- * corresponds platform_id
*/
#define CPER_VALID_PLATFORM_ID 0x0001
-/* corresponds timestamp */
#define CPER_VALID_TIMESTAMP 0x0002
-/* corresponds partition_id */
#define CPER_VALID_PARTITION_ID 0x0004
/*
* Notification type used to generate error record, used in
- * notification_type in struct cper_record_header
- *
- * Corrected Machine Check
+ * notification_type in struct cper_record_header. These UUIDs are defined
+ * in the UEFI spec v2.7, sec N.2.1.
*/
+
+/* Corrected Machine Check */
#define CPER_NOTIFY_CMC \
- UUID_LE(0x2DCE8BB1, 0xBDD7, 0x450e, 0xB9, 0xAD, 0x9C, 0xF4, \
- 0xEB, 0xD4, 0xF8, 0x90)
+ GUID_INIT(0x2DCE8BB1, 0xBDD7, 0x450e, 0xB9, 0xAD, 0x9C, 0xF4, \
+ 0xEB, 0xD4, 0xF8, 0x90)
/* Corrected Platform Error */
#define CPER_NOTIFY_CPE \
- UUID_LE(0x4E292F96, 0xD843, 0x4a55, 0xA8, 0xC2, 0xD4, 0x81, \
- 0xF2, 0x7E, 0xBE, 0xEE)
+ GUID_INIT(0x4E292F96, 0xD843, 0x4a55, 0xA8, 0xC2, 0xD4, 0x81, \
+ 0xF2, 0x7E, 0xBE, 0xEE)
/* Machine Check Exception */
#define CPER_NOTIFY_MCE \
- UUID_LE(0xE8F56FFE, 0x919C, 0x4cc5, 0xBA, 0x88, 0x65, 0xAB, \
- 0xE1, 0x49, 0x13, 0xBB)
+ GUID_INIT(0xE8F56FFE, 0x919C, 0x4cc5, 0xBA, 0x88, 0x65, 0xAB, \
+ 0xE1, 0x49, 0x13, 0xBB)
/* PCI Express Error */
#define CPER_NOTIFY_PCIE \
- UUID_LE(0xCF93C01F, 0x1A16, 0x4dfc, 0xB8, 0xBC, 0x9C, 0x4D, \
- 0xAF, 0x67, 0xC1, 0x04)
+ GUID_INIT(0xCF93C01F, 0x1A16, 0x4dfc, 0xB8, 0xBC, 0x9C, 0x4D, \
+ 0xAF, 0x67, 0xC1, 0x04)
/* INIT Record (for IPF) */
#define CPER_NOTIFY_INIT \
- UUID_LE(0xCC5263E8, 0x9308, 0x454a, 0x89, 0xD0, 0x34, 0x0B, \
- 0xD3, 0x9B, 0xC9, 0x8E)
+ GUID_INIT(0xCC5263E8, 0x9308, 0x454a, 0x89, 0xD0, 0x34, 0x0B, \
+ 0xD3, 0x9B, 0xC9, 0x8E)
/* Non-Maskable Interrupt */
#define CPER_NOTIFY_NMI \
- UUID_LE(0x5BAD89FF, 0xB7E6, 0x42c9, 0x81, 0x4A, 0xCF, 0x24, \
- 0x85, 0xD6, 0xE9, 0x8A)
+ GUID_INIT(0x5BAD89FF, 0xB7E6, 0x42c9, 0x81, 0x4A, 0xCF, 0x24, \
+ 0x85, 0xD6, 0xE9, 0x8A)
/* BOOT Error Record */
#define CPER_NOTIFY_BOOT \
- UUID_LE(0x3D61A466, 0xAB40, 0x409a, 0xA6, 0x98, 0xF3, 0x62, \
- 0xD4, 0x64, 0xB3, 0x8F)
+ GUID_INIT(0x3D61A466, 0xAB40, 0x409a, 0xA6, 0x98, 0xF3, 0x62, \
+ 0xD4, 0x64, 0xB3, 0x8F)
/* DMA Remapping Error */
#define CPER_NOTIFY_DMAR \
- UUID_LE(0x667DD791, 0xC6B3, 0x4c27, 0x8A, 0x6B, 0x0F, 0x8E, \
- 0x72, 0x2D, 0xEB, 0x41)
+ GUID_INIT(0x667DD791, 0xC6B3, 0x4c27, 0x8A, 0x6B, 0x0F, 0x8E, \
+ 0x72, 0x2D, 0xEB, 0x41)
+/* CXL Protocol Error Section */
+#define CPER_SEC_CXL_PROT_ERR \
+ GUID_INIT(0x80B9EFB4, 0x52B5, 0x4DE3, 0xA7, 0x77, 0x68, 0x78, \
+ 0x4B, 0x77, 0x10, 0x48)
+
+/* CXL Event record UUIDs are formatted as GUIDs and reported in section type */
+/*
+ * General Media Event Record
+ * CXL rev 3.0 Section 8.2.9.2.1.1; Table 8-43
+ */
+#define CPER_SEC_CXL_GEN_MEDIA_GUID \
+ GUID_INIT(0xfbcd0a77, 0xc260, 0x417f, \
+ 0x85, 0xa9, 0x08, 0x8b, 0x16, 0x21, 0xeb, 0xa6)
+/*
+ * DRAM Event Record
+ * CXL rev 3.0 section 8.2.9.2.1.2; Table 8-44
+ */
+#define CPER_SEC_CXL_DRAM_GUID \
+ GUID_INIT(0x601dcbb3, 0x9c06, 0x4eab, \
+ 0xb8, 0xaf, 0x4e, 0x9b, 0xfb, 0x5c, 0x96, 0x24)
+/*
+ * Memory Module Event Record
+ * CXL rev 3.0 section 8.2.9.2.1.3; Table 8-45
+ */
+#define CPER_SEC_CXL_MEM_MODULE_GUID \
+ GUID_INIT(0xfe927475, 0xdd59, 0x4339, \
+ 0xa5, 0x86, 0x79, 0xba, 0xb1, 0x13, 0xb7, 0x74)
/*
* Flags bits definitions for flags in struct cper_record_header
@@ -122,14 +134,11 @@ enum {
#define CPER_SEC_REV 0x0100
/*
- * Validation bits difinition for validation_bits in struct
+ * Validation bits definition for validation_bits in struct
* cper_section_descriptor. If set, corresponding fields in struct
* cper_section_descriptor contain valid information.
- *
- * corresponds fru_id
*/
#define CPER_SEC_VALID_FRU_ID 0x1
-/* corresponds fru_text */
#define CPER_SEC_VALID_FRU_TEXT 0x2
/*
@@ -165,55 +174,56 @@ enum {
/*
* Section type definitions, used in section_type field in struct
- * cper_section_descriptor
- *
- * Processor Generic
+ * cper_section_descriptor. These UUIDs are defined in the UEFI spec
+ * v2.7, sec N.2.2.
*/
+
+/* Processor Generic */
#define CPER_SEC_PROC_GENERIC \
- UUID_LE(0x9876CCAD, 0x47B4, 0x4bdb, 0xB6, 0x5E, 0x16, 0xF1, \
- 0x93, 0xC4, 0xF3, 0xDB)
+ GUID_INIT(0x9876CCAD, 0x47B4, 0x4bdb, 0xB6, 0x5E, 0x16, 0xF1, \
+ 0x93, 0xC4, 0xF3, 0xDB)
/* Processor Specific: X86/X86_64 */
#define CPER_SEC_PROC_IA \
- UUID_LE(0xDC3EA0B0, 0xA144, 0x4797, 0xB9, 0x5B, 0x53, 0xFA, \
- 0x24, 0x2B, 0x6E, 0x1D)
+ GUID_INIT(0xDC3EA0B0, 0xA144, 0x4797, 0xB9, 0x5B, 0x53, 0xFA, \
+ 0x24, 0x2B, 0x6E, 0x1D)
/* Processor Specific: IA64 */
#define CPER_SEC_PROC_IPF \
- UUID_LE(0xE429FAF1, 0x3CB7, 0x11D4, 0x0B, 0xCA, 0x07, 0x00, \
- 0x80, 0xC7, 0x3C, 0x88, 0x81)
+ GUID_INIT(0xE429FAF1, 0x3CB7, 0x11D4, 0x0B, 0xCA, 0x07, 0x00, \
+ 0x80, 0xC7, 0x3C, 0x88, 0x81)
/* Processor Specific: ARM */
#define CPER_SEC_PROC_ARM \
- UUID_LE(0xE19E3D16, 0xBC11, 0x11E4, 0x9C, 0xAA, 0xC2, 0x05, \
- 0x1D, 0x5D, 0x46, 0xB0)
+ GUID_INIT(0xE19E3D16, 0xBC11, 0x11E4, 0x9C, 0xAA, 0xC2, 0x05, \
+ 0x1D, 0x5D, 0x46, 0xB0)
/* Platform Memory */
#define CPER_SEC_PLATFORM_MEM \
- UUID_LE(0xA5BC1114, 0x6F64, 0x4EDE, 0xB8, 0x63, 0x3E, 0x83, \
- 0xED, 0x7C, 0x83, 0xB1)
+ GUID_INIT(0xA5BC1114, 0x6F64, 0x4EDE, 0xB8, 0x63, 0x3E, 0x83, \
+ 0xED, 0x7C, 0x83, 0xB1)
#define CPER_SEC_PCIE \
- UUID_LE(0xD995E954, 0xBBC1, 0x430F, 0xAD, 0x91, 0xB4, 0x4D, \
- 0xCB, 0x3C, 0x6F, 0x35)
+ GUID_INIT(0xD995E954, 0xBBC1, 0x430F, 0xAD, 0x91, 0xB4, 0x4D, \
+ 0xCB, 0x3C, 0x6F, 0x35)
/* Firmware Error Record Reference */
#define CPER_SEC_FW_ERR_REC_REF \
- UUID_LE(0x81212A96, 0x09ED, 0x4996, 0x94, 0x71, 0x8D, 0x72, \
- 0x9C, 0x8E, 0x69, 0xED)
+ GUID_INIT(0x81212A96, 0x09ED, 0x4996, 0x94, 0x71, 0x8D, 0x72, \
+ 0x9C, 0x8E, 0x69, 0xED)
/* PCI/PCI-X Bus */
#define CPER_SEC_PCI_X_BUS \
- UUID_LE(0xC5753963, 0x3B84, 0x4095, 0xBF, 0x78, 0xED, 0xDA, \
- 0xD3, 0xF9, 0xC9, 0xDD)
+ GUID_INIT(0xC5753963, 0x3B84, 0x4095, 0xBF, 0x78, 0xED, 0xDA, \
+ 0xD3, 0xF9, 0xC9, 0xDD)
/* PCI Component/Device */
#define CPER_SEC_PCI_DEV \
- UUID_LE(0xEB5E4685, 0xCA66, 0x4769, 0xB6, 0xA2, 0x26, 0x06, \
- 0x8B, 0x00, 0x13, 0x26)
+ GUID_INIT(0xEB5E4685, 0xCA66, 0x4769, 0xB6, 0xA2, 0x26, 0x06, \
+ 0x8B, 0x00, 0x13, 0x26)
#define CPER_SEC_DMAR_GENERIC \
- UUID_LE(0x5B51FEF7, 0xC79D, 0x4434, 0x8F, 0x1B, 0xAA, 0x62, \
- 0xDE, 0x3E, 0x2C, 0x64)
+ GUID_INIT(0x5B51FEF7, 0xC79D, 0x4434, 0x8F, 0x1B, 0xAA, 0x62, \
+ 0xDE, 0x3E, 0x2C, 0x64)
/* Intel VT for Directed I/O specific DMAr */
#define CPER_SEC_DMAR_VT \
- UUID_LE(0x71761D37, 0x32B2, 0x45cd, 0xA7, 0xD0, 0xB0, 0xFE, \
- 0xDD, 0x93, 0xE8, 0xCF)
+ GUID_INIT(0x71761D37, 0x32B2, 0x45cd, 0xA7, 0xD0, 0xB0, 0xFE, \
+ 0xDD, 0x93, 0xE8, 0xCF)
/* IOMMU specific DMAr */
#define CPER_SEC_DMAR_IOMMU \
- UUID_LE(0x036F84E1, 0x7F37, 0x428c, 0xA7, 0x9E, 0x57, 0x5F, \
- 0xDF, 0xAA, 0x84, 0xEC)
+ GUID_INIT(0x036F84E1, 0x7F37, 0x428c, 0xA7, 0x9E, 0x57, 0x5F, \
+ 0xDF, 0xAA, 0x84, 0xEC)
#define CPER_PROC_VALID_TYPE 0x0001
#define CPER_PROC_VALID_ISA 0x0002
@@ -247,6 +257,18 @@ enum {
#define CPER_MEM_VALID_RANK_NUMBER 0x8000
#define CPER_MEM_VALID_CARD_HANDLE 0x10000
#define CPER_MEM_VALID_MODULE_HANDLE 0x20000
+#define CPER_MEM_VALID_ROW_EXT 0x40000
+#define CPER_MEM_VALID_BANK_GROUP 0x80000
+#define CPER_MEM_VALID_BANK_ADDRESS 0x100000
+#define CPER_MEM_VALID_CHIP_ID 0x200000
+
+#define CPER_MEM_EXT_ROW_MASK 0x3
+#define CPER_MEM_EXT_ROW_SHIFT 16
+
+#define CPER_MEM_BANK_ADDRESS_MASK 0xff
+#define CPER_MEM_BANK_GROUP_SHIFT 8
+
+#define CPER_MEM_CHIP_ID_SHIFT 5
#define CPER_PCIE_VALID_PORT_TYPE 0x0001
#define CPER_PCIE_VALID_VERSION 0x0002
@@ -275,233 +297,318 @@ enum {
#define CPER_ARM_INFO_FLAGS_PROPAGATED BIT(2)
#define CPER_ARM_INFO_FLAGS_OVERFLOW BIT(3)
+#define CPER_ARM_ERR_TYPE_MASK GENMASK(4,1)
+#define CPER_ARM_CACHE_ERROR BIT(1)
+#define CPER_ARM_TLB_ERROR BIT(2)
+#define CPER_ARM_BUS_ERROR BIT(3)
+#define CPER_ARM_VENDOR_ERROR BIT(4)
+
+#define CPER_ARM_ERR_VALID_TRANSACTION_TYPE BIT(0)
+#define CPER_ARM_ERR_VALID_OPERATION_TYPE BIT(1)
+#define CPER_ARM_ERR_VALID_LEVEL BIT(2)
+#define CPER_ARM_ERR_VALID_PROC_CONTEXT_CORRUPT BIT(3)
+#define CPER_ARM_ERR_VALID_CORRECTED BIT(4)
+#define CPER_ARM_ERR_VALID_PRECISE_PC BIT(5)
+#define CPER_ARM_ERR_VALID_RESTARTABLE_PC BIT(6)
+#define CPER_ARM_ERR_VALID_PARTICIPATION_TYPE BIT(7)
+#define CPER_ARM_ERR_VALID_TIME_OUT BIT(8)
+#define CPER_ARM_ERR_VALID_ADDRESS_SPACE BIT(9)
+#define CPER_ARM_ERR_VALID_MEM_ATTRIBUTES BIT(10)
+#define CPER_ARM_ERR_VALID_ACCESS_MODE BIT(11)
+
+#define CPER_ARM_ERR_TRANSACTION_SHIFT 16
+#define CPER_ARM_ERR_TRANSACTION_MASK GENMASK(1,0)
+#define CPER_ARM_ERR_OPERATION_SHIFT 18
+#define CPER_ARM_ERR_OPERATION_MASK GENMASK(3,0)
+#define CPER_ARM_ERR_LEVEL_SHIFT 22
+#define CPER_ARM_ERR_LEVEL_MASK GENMASK(2,0)
+#define CPER_ARM_ERR_PC_CORRUPT_SHIFT 25
+#define CPER_ARM_ERR_PC_CORRUPT_MASK GENMASK(0,0)
+#define CPER_ARM_ERR_CORRECTED_SHIFT 26
+#define CPER_ARM_ERR_CORRECTED_MASK GENMASK(0,0)
+#define CPER_ARM_ERR_PRECISE_PC_SHIFT 27
+#define CPER_ARM_ERR_PRECISE_PC_MASK GENMASK(0,0)
+#define CPER_ARM_ERR_RESTARTABLE_PC_SHIFT 28
+#define CPER_ARM_ERR_RESTARTABLE_PC_MASK GENMASK(0,0)
+#define CPER_ARM_ERR_PARTICIPATION_TYPE_SHIFT 29
+#define CPER_ARM_ERR_PARTICIPATION_TYPE_MASK GENMASK(1,0)
+#define CPER_ARM_ERR_TIME_OUT_SHIFT 31
+#define CPER_ARM_ERR_TIME_OUT_MASK GENMASK(0,0)
+#define CPER_ARM_ERR_ADDRESS_SPACE_SHIFT 32
+#define CPER_ARM_ERR_ADDRESS_SPACE_MASK GENMASK(1,0)
+#define CPER_ARM_ERR_MEM_ATTRIBUTES_SHIFT 34
+#define CPER_ARM_ERR_MEM_ATTRIBUTES_MASK GENMASK(8,0)
+#define CPER_ARM_ERR_ACCESS_MODE_SHIFT 43
+#define CPER_ARM_ERR_ACCESS_MODE_MASK GENMASK(0,0)
+
/*
* All tables and structs must be byte-packed to match CPER
* specification, since the tables are provided by the system BIOS
*/
#pragma pack(1)
+/* Record Header, UEFI v2.7 sec N.2.1 */
struct cper_record_header {
char signature[CPER_SIG_SIZE]; /* must be CPER_SIG_RECORD */
- __u16 revision; /* must be CPER_RECORD_REV */
- __u32 signature_end; /* must be CPER_SIG_END */
- __u16 section_count;
- __u32 error_severity;
- __u32 validation_bits;
- __u32 record_length;
- __u64 timestamp;
- uuid_le platform_id;
- uuid_le partition_id;
- uuid_le creator_id;
- uuid_le notification_type;
- __u64 record_id;
- __u32 flags;
- __u64 persistence_information;
- __u8 reserved[12]; /* must be zero */
+ u16 revision; /* must be CPER_RECORD_REV */
+ u32 signature_end; /* must be CPER_SIG_END */
+ u16 section_count;
+ u32 error_severity;
+ u32 validation_bits;
+ u32 record_length;
+ u64 timestamp;
+ guid_t platform_id;
+ guid_t partition_id;
+ guid_t creator_id;
+ guid_t notification_type;
+ u64 record_id;
+ u32 flags;
+ u64 persistence_information;
+ u8 reserved[12]; /* must be zero */
};
+/* Section Descriptor, UEFI v2.7 sec N.2.2 */
struct cper_section_descriptor {
- __u32 section_offset; /* Offset in bytes of the
+ u32 section_offset; /* Offset in bytes of the
* section body from the base
* of the record header */
- __u32 section_length;
- __u16 revision; /* must be CPER_RECORD_REV */
- __u8 validation_bits;
- __u8 reserved; /* must be zero */
- __u32 flags;
- uuid_le section_type;
- uuid_le fru_id;
- __u32 section_severity;
- __u8 fru_text[20];
+ u32 section_length;
+ u16 revision; /* must be CPER_RECORD_REV */
+ u8 validation_bits;
+ u8 reserved; /* must be zero */
+ u32 flags;
+ guid_t section_type;
+ guid_t fru_id;
+ u32 section_severity;
+ u8 fru_text[20];
};
-/* Generic Processor Error Section */
+/* Generic Processor Error Section, UEFI v2.7 sec N.2.4.1 */
struct cper_sec_proc_generic {
- __u64 validation_bits;
- __u8 proc_type;
- __u8 proc_isa;
- __u8 proc_error_type;
- __u8 operation;
- __u8 flags;
- __u8 level;
- __u16 reserved;
- __u64 cpu_version;
+ u64 validation_bits;
+ u8 proc_type;
+ u8 proc_isa;
+ u8 proc_error_type;
+ u8 operation;
+ u8 flags;
+ u8 level;
+ u16 reserved;
+ u64 cpu_version;
char cpu_brand[128];
- __u64 proc_id;
- __u64 target_addr;
- __u64 requestor_id;
- __u64 responder_id;
- __u64 ip;
+ u64 proc_id;
+ u64 target_addr;
+ u64 requestor_id;
+ u64 responder_id;
+ u64 ip;
};
-/* IA32/X64 Processor Error Section */
+/* IA32/X64 Processor Error Section, UEFI v2.7 sec N.2.4.2 */
struct cper_sec_proc_ia {
- __u64 validation_bits;
- __u8 lapic_id;
- __u8 cpuid[48];
+ u64 validation_bits;
+ u64 lapic_id;
+ u8 cpuid[48];
};
-/* IA32/X64 Processor Error Information Structure */
+/* IA32/X64 Processor Error Information Structure, UEFI v2.7 sec N.2.4.2.1 */
struct cper_ia_err_info {
- uuid_le err_type;
- __u64 validation_bits;
- __u64 check_info;
- __u64 target_id;
- __u64 requestor_id;
- __u64 responder_id;
- __u64 ip;
+ guid_t err_type;
+ u64 validation_bits;
+ u64 check_info;
+ u64 target_id;
+ u64 requestor_id;
+ u64 responder_id;
+ u64 ip;
};
-/* IA32/X64 Processor Context Information Structure */
+/* IA32/X64 Processor Context Information Structure, UEFI v2.7 sec N.2.4.2.2 */
struct cper_ia_proc_ctx {
- __u16 reg_ctx_type;
- __u16 reg_arr_size;
- __u32 msr_addr;
- __u64 mm_reg_addr;
+ u16 reg_ctx_type;
+ u16 reg_arr_size;
+ u32 msr_addr;
+ u64 mm_reg_addr;
};
-/* ARM Processor Error Section */
+/* ARM Processor Error Section, UEFI v2.7 sec N.2.4.4 */
struct cper_sec_proc_arm {
- __u32 validation_bits;
- __u16 err_info_num; /* Number of Processor Error Info */
- __u16 context_info_num; /* Number of Processor Context Info Records*/
- __u32 section_length;
- __u8 affinity_level;
- __u8 reserved[3]; /* must be zero */
- __u64 mpidr;
- __u64 midr;
- __u32 running_state; /* Bit 0 set - Processor running. PSCI = 0 */
- __u32 psci_state;
+ u32 validation_bits;
+ u16 err_info_num; /* Number of Processor Error Info */
+ u16 context_info_num; /* Number of Processor Context Info Records*/
+ u32 section_length;
+ u8 affinity_level;
+ u8 reserved[3]; /* must be zero */
+ u64 mpidr;
+ u64 midr;
+ u32 running_state; /* Bit 0 set - Processor running. PSCI = 0 */
+ u32 psci_state;
};
-/* ARM Processor Error Information Structure */
+/* ARM Processor Error Information Structure, UEFI v2.7 sec N.2.4.4.1 */
struct cper_arm_err_info {
- __u8 version;
- __u8 length;
- __u16 validation_bits;
- __u8 type;
- __u16 multiple_error;
- __u8 flags;
- __u64 error_info;
- __u64 virt_fault_addr;
- __u64 physical_fault_addr;
+ u8 version;
+ u8 length;
+ u16 validation_bits;
+ u8 type;
+ u16 multiple_error;
+ u8 flags;
+ u64 error_info;
+ u64 virt_fault_addr;
+ u64 physical_fault_addr;
};
-/* ARM Processor Context Information Structure */
+/* ARM Processor Context Information Structure, UEFI v2.7 sec N.2.4.4.2 */
struct cper_arm_ctx_info {
- __u16 version;
- __u16 type;
- __u32 size;
+ u16 version;
+ u16 type;
+ u32 size;
};
-/* Old Memory Error Section UEFI 2.1, 2.2 */
+/* Old Memory Error Section, UEFI v2.1, v2.2 */
struct cper_sec_mem_err_old {
- __u64 validation_bits;
- __u64 error_status;
- __u64 physical_addr;
- __u64 physical_addr_mask;
- __u16 node;
- __u16 card;
- __u16 module;
- __u16 bank;
- __u16 device;
- __u16 row;
- __u16 column;
- __u16 bit_pos;
- __u64 requestor_id;
- __u64 responder_id;
- __u64 target_id;
- __u8 error_type;
+ u64 validation_bits;
+ u64 error_status;
+ u64 physical_addr;
+ u64 physical_addr_mask;
+ u16 node;
+ u16 card;
+ u16 module;
+ u16 bank;
+ u16 device;
+ u16 row;
+ u16 column;
+ u16 bit_pos;
+ u64 requestor_id;
+ u64 responder_id;
+ u64 target_id;
+ u8 error_type;
};
-/* Memory Error Section UEFI >= 2.3 */
+/* Memory Error Section (UEFI >= v2.3), UEFI v2.8 sec N.2.5 */
struct cper_sec_mem_err {
- __u64 validation_bits;
- __u64 error_status;
- __u64 physical_addr;
- __u64 physical_addr_mask;
- __u16 node;
- __u16 card;
- __u16 module;
- __u16 bank;
- __u16 device;
- __u16 row;
- __u16 column;
- __u16 bit_pos;
- __u64 requestor_id;
- __u64 responder_id;
- __u64 target_id;
- __u8 error_type;
- __u8 reserved;
- __u16 rank;
- __u16 mem_array_handle; /* card handle in UEFI 2.4 */
- __u16 mem_dev_handle; /* module handle in UEFI 2.4 */
+ u64 validation_bits;
+ u64 error_status;
+ u64 physical_addr;
+ u64 physical_addr_mask;
+ u16 node;
+ u16 card;
+ u16 module;
+ u16 bank;
+ u16 device;
+ u16 row;
+ u16 column;
+ u16 bit_pos;
+ u64 requestor_id;
+ u64 responder_id;
+ u64 target_id;
+ u8 error_type;
+ u8 extended;
+ u16 rank;
+ u16 mem_array_handle; /* "card handle" in UEFI 2.4 */
+ u16 mem_dev_handle; /* "module handle" in UEFI 2.4 */
};
struct cper_mem_err_compact {
- __u64 validation_bits;
- __u16 node;
- __u16 card;
- __u16 module;
- __u16 bank;
- __u16 device;
- __u16 row;
- __u16 column;
- __u16 bit_pos;
- __u64 requestor_id;
- __u64 responder_id;
- __u64 target_id;
- __u16 rank;
- __u16 mem_array_handle;
- __u16 mem_dev_handle;
+ u64 validation_bits;
+ u16 node;
+ u16 card;
+ u16 module;
+ u16 bank;
+ u16 device;
+ u16 row;
+ u16 column;
+ u16 bit_pos;
+ u64 requestor_id;
+ u64 responder_id;
+ u64 target_id;
+ u16 rank;
+ u16 mem_array_handle;
+ u16 mem_dev_handle;
+ u8 extended;
};
+static inline u32 cper_get_mem_extension(u64 mem_valid, u8 mem_extended)
+{
+ if (!(mem_valid & CPER_MEM_VALID_ROW_EXT))
+ return 0;
+ return (mem_extended & CPER_MEM_EXT_ROW_MASK) << CPER_MEM_EXT_ROW_SHIFT;
+}
+
+/* PCI Express Error Section, UEFI v2.7 sec N.2.7 */
struct cper_sec_pcie {
- __u64 validation_bits;
- __u32 port_type;
+ u64 validation_bits;
+ u32 port_type;
struct {
- __u8 minor;
- __u8 major;
- __u8 reserved[2];
+ u8 minor;
+ u8 major;
+ u8 reserved[2];
} version;
- __u16 command;
- __u16 status;
- __u32 reserved;
+ u16 command;
+ u16 status;
+ u32 reserved;
struct {
- __u16 vendor_id;
- __u16 device_id;
- __u8 class_code[3];
- __u8 function;
- __u8 device;
- __u16 segment;
- __u8 bus;
- __u8 secondary_bus;
- __u16 slot;
- __u8 reserved;
+ u16 vendor_id;
+ u16 device_id;
+ u8 class_code[3];
+ u8 function;
+ u8 device;
+ u16 segment;
+ u8 bus;
+ u8 secondary_bus;
+ u16 slot;
+ u8 reserved;
} device_id;
struct {
- __u32 lower;
- __u32 upper;
+ u32 lower;
+ u32 upper;
} serial_number;
struct {
- __u16 secondary_status;
- __u16 control;
+ u16 secondary_status;
+ u16 control;
} bridge;
- __u8 capability[60];
- __u8 aer_info[96];
+ u8 capability[60];
+ u8 aer_info[96];
+};
+
+/* Firmware Error Record Reference, UEFI v2.7 sec N.2.10 */
+struct cper_sec_fw_err_rec_ref {
+ u8 record_type;
+ u8 revision;
+ u8 reserved[6];
+ u64 record_identifier;
+ guid_t record_identifier_guid;
};
/* Reset to default packing */
#pragma pack()
+extern const char *const cper_proc_error_type_strs[4];
+
u64 cper_next_record_id(void);
const char *cper_severity_str(unsigned int);
const char *cper_mem_err_type_str(unsigned int);
+const char *cper_mem_err_status_str(u64 status);
void cper_print_bits(const char *prefix, unsigned int bits,
const char * const strs[], unsigned int strs_size);
+int cper_bits_to_str(char *buf, int buf_size, unsigned long bits,
+ const char * const strs[], unsigned int strs_size);
void cper_mem_err_pack(const struct cper_sec_mem_err *,
struct cper_mem_err_compact *);
const char *cper_mem_err_unpack(struct trace_seq *,
struct cper_mem_err_compact *);
+void cper_print_proc_arm(const char *pfx,
+ const struct cper_sec_proc_arm *proc);
+void cper_print_proc_ia(const char *pfx,
+ const struct cper_sec_proc_ia *proc);
+int cper_mem_err_location(struct cper_mem_err_compact *mem, char *msg);
+int cper_dimm_err_location(struct cper_mem_err_compact *mem, char *msg);
+
+struct acpi_hest_generic_status;
+void cper_estatus_print(const char *pfx,
+ const struct acpi_hest_generic_status *estatus);
+int cper_estatus_check_header(const struct acpi_hest_generic_status *estatus);
+int cper_estatus_check(const struct acpi_hest_generic_status *estatus);
+
+struct cxl_cper_sec_prot_err;
+void cxl_cper_print_prot_err(const char *pfx,
+ const struct cxl_cper_sec_prot_err *prot_err);
#endif
diff --git a/include/linux/cpu.h b/include/linux/cpu.h
index ca73bc1563f4..487b3bf2e1ea 100644
--- a/include/linux/cpu.h
+++ b/include/linux/cpu.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* include/linux/cpu.h - generic cpu definition
*
@@ -15,8 +16,9 @@
#include <linux/node.h>
#include <linux/compiler.h>
-#include <linux/cpumask.h>
#include <linux/cpuhotplug.h>
+#include <linux/cpuhplock.h>
+#include <linux/cpu_smt.h>
struct device;
struct device_node;
@@ -29,7 +31,7 @@ struct cpu {
};
extern void boot_cpu_init(void);
-extern void boot_cpu_state_init(void);
+extern void boot_cpu_hotplug_init(void);
extern void cpu_init(void);
extern void trap_init(void);
@@ -46,43 +48,81 @@ extern void cpu_remove_dev_attr(struct device_attribute *attr);
extern int cpu_add_dev_attr_group(struct attribute_group *attrs);
extern void cpu_remove_dev_attr_group(struct attribute_group *attrs);
+extern ssize_t cpu_show_meltdown(struct device *dev,
+ struct device_attribute *attr, char *buf);
+extern ssize_t cpu_show_spectre_v1(struct device *dev,
+ struct device_attribute *attr, char *buf);
+extern ssize_t cpu_show_spectre_v2(struct device *dev,
+ struct device_attribute *attr, char *buf);
+extern ssize_t cpu_show_spec_store_bypass(struct device *dev,
+ struct device_attribute *attr, char *buf);
+extern ssize_t cpu_show_l1tf(struct device *dev,
+ struct device_attribute *attr, char *buf);
+extern ssize_t cpu_show_mds(struct device *dev,
+ struct device_attribute *attr, char *buf);
+extern ssize_t cpu_show_tsx_async_abort(struct device *dev,
+ struct device_attribute *attr,
+ char *buf);
+extern ssize_t cpu_show_itlb_multihit(struct device *dev,
+ struct device_attribute *attr, char *buf);
+extern ssize_t cpu_show_srbds(struct device *dev, struct device_attribute *attr, char *buf);
+extern ssize_t cpu_show_mmio_stale_data(struct device *dev,
+ struct device_attribute *attr,
+ char *buf);
+extern ssize_t cpu_show_retbleed(struct device *dev,
+ struct device_attribute *attr, char *buf);
+extern ssize_t cpu_show_spec_rstack_overflow(struct device *dev,
+ struct device_attribute *attr, char *buf);
+extern ssize_t cpu_show_gds(struct device *dev,
+ struct device_attribute *attr, char *buf);
+extern ssize_t cpu_show_reg_file_data_sampling(struct device *dev,
+ struct device_attribute *attr, char *buf);
+extern ssize_t cpu_show_ghostwrite(struct device *dev, struct device_attribute *attr, char *buf);
+extern ssize_t cpu_show_old_microcode(struct device *dev,
+ struct device_attribute *attr, char *buf);
+extern ssize_t cpu_show_indirect_target_selection(struct device *dev,
+ struct device_attribute *attr, char *buf);
+extern ssize_t cpu_show_tsa(struct device *dev, struct device_attribute *attr, char *buf);
+extern ssize_t cpu_show_vmscape(struct device *dev, struct device_attribute *attr, char *buf);
+
extern __printf(4, 5)
struct device *cpu_device_create(struct device *parent, void *drvdata,
const struct attribute_group **groups,
const char *fmt, ...);
+extern bool arch_cpu_is_hotpluggable(int cpu);
+extern int arch_register_cpu(int cpu);
+extern void arch_unregister_cpu(int cpu);
#ifdef CONFIG_HOTPLUG_CPU
extern void unregister_cpu(struct cpu *cpu);
extern ssize_t arch_cpu_probe(const char *, size_t);
extern ssize_t arch_cpu_release(const char *, size_t);
#endif
-struct notifier_block;
-
-#define CPU_ONLINE 0x0002 /* CPU (unsigned)v is up */
-#define CPU_UP_PREPARE 0x0003 /* CPU (unsigned)v coming up */
-#define CPU_DEAD 0x0007 /* CPU (unsigned)v dead */
-#define CPU_POST_DEAD 0x0009 /* CPU (unsigned)v dead, cpu_hotplug
- * lock is dropped */
-#define CPU_BROKEN 0x000B /* CPU (unsigned)v did not die properly,
- * perhaps due to preemption. */
-
-/* Used for CPU hotplug events occurring while tasks are frozen due to a suspend
- * operation in progress
- */
-#define CPU_TASKS_FROZEN 0x0010
-#define CPU_ONLINE_FROZEN (CPU_ONLINE | CPU_TASKS_FROZEN)
-#define CPU_UP_PREPARE_FROZEN (CPU_UP_PREPARE | CPU_TASKS_FROZEN)
-#define CPU_UP_CANCELED_FROZEN (CPU_UP_CANCELED | CPU_TASKS_FROZEN)
-#define CPU_DOWN_PREPARE_FROZEN (CPU_DOWN_PREPARE | CPU_TASKS_FROZEN)
-#define CPU_DOWN_FAILED_FROZEN (CPU_DOWN_FAILED | CPU_TASKS_FROZEN)
-#define CPU_DEAD_FROZEN (CPU_DEAD | CPU_TASKS_FROZEN)
+#ifdef CONFIG_GENERIC_CPU_DEVICES
+DECLARE_PER_CPU(struct cpu, cpu_devices);
+#endif
+
+/*
+ * These states are not related to the core CPU hotplug mechanism. They are
+ * used by various (sub)architectures to track internal state
+ */
+#define CPU_ONLINE 0x0002 /* CPU is up */
+#define CPU_UP_PREPARE 0x0003 /* CPU coming up */
+#define CPU_DEAD 0x0007 /* CPU dead */
+#define CPU_DEAD_FROZEN 0x0008 /* CPU timed out on unplug */
+#define CPU_POST_DEAD 0x0009 /* CPU successfully unplugged */
+#define CPU_BROKEN 0x000B /* CPU did not die properly */
#ifdef CONFIG_SMP
extern bool cpuhp_tasks_frozen;
-int cpu_up(unsigned int cpu);
+int add_cpu(unsigned int cpu);
+int cpu_device_up(struct device *dev);
void notify_cpu_starting(unsigned int cpu);
extern void cpu_maps_update_begin(void);
extern void cpu_maps_update_done(void);
+int bringup_hibernate_cpu(unsigned int sleep_cpu);
+void bringup_nonboot_cpus(unsigned int max_cpus);
+int arch_cpu_rescan_dead_smt_siblings(void);
#else /* CONFIG_SMP */
#define cpuhp_tasks_frozen 0
@@ -95,75 +135,98 @@ static inline void cpu_maps_update_done(void)
{
}
-#endif /* CONFIG_SMP */
-extern struct bus_type cpu_subsys;
+static inline int add_cpu(unsigned int cpu) { return 0;}
-#ifdef CONFIG_HOTPLUG_CPU
-extern void cpus_write_lock(void);
-extern void cpus_write_unlock(void);
-extern void cpus_read_lock(void);
-extern void cpus_read_unlock(void);
-extern void lockdep_assert_cpus_held(void);
-extern void cpu_hotplug_disable(void);
-extern void cpu_hotplug_enable(void);
-void clear_tasks_mm_cpumask(int cpu);
-int cpu_down(unsigned int cpu);
-
-#else /* CONFIG_HOTPLUG_CPU */
-
-static inline void cpus_write_lock(void) { }
-static inline void cpus_write_unlock(void) { }
-static inline void cpus_read_lock(void) { }
-static inline void cpus_read_unlock(void) { }
-static inline void lockdep_assert_cpus_held(void) { }
-static inline void cpu_hotplug_disable(void) { }
-static inline void cpu_hotplug_enable(void) { }
-#endif /* !CONFIG_HOTPLUG_CPU */
-
-/* Wrappers which go away once all code is converted */
-static inline void cpu_hotplug_begin(void) { cpus_write_lock(); }
-static inline void cpu_hotplug_done(void) { cpus_write_unlock(); }
-static inline void get_online_cpus(void) { cpus_read_lock(); }
-static inline void put_online_cpus(void) { cpus_read_unlock(); }
+static inline int arch_cpu_rescan_dead_smt_siblings(void) { return 0; }
+
+#endif /* CONFIG_SMP */
+extern const struct bus_type cpu_subsys;
#ifdef CONFIG_PM_SLEEP_SMP
extern int freeze_secondary_cpus(int primary);
-static inline int disable_nonboot_cpus(void)
+extern void thaw_secondary_cpus(void);
+
+static inline int suspend_disable_secondary_cpus(void)
+{
+ int cpu = 0;
+
+ if (IS_ENABLED(CONFIG_PM_SLEEP_SMP_NONZERO_CPU))
+ cpu = -1;
+
+ return freeze_secondary_cpus(cpu);
+}
+static inline void suspend_enable_secondary_cpus(void)
{
- return freeze_secondary_cpus(0);
+ thaw_secondary_cpus();
}
-extern void enable_nonboot_cpus(void);
+
#else /* !CONFIG_PM_SLEEP_SMP */
-static inline int disable_nonboot_cpus(void) { return 0; }
-static inline void enable_nonboot_cpus(void) {}
+static inline void thaw_secondary_cpus(void) {}
+static inline int suspend_disable_secondary_cpus(void) { return 0; }
+static inline void suspend_enable_secondary_cpus(void) { }
#endif /* !CONFIG_PM_SLEEP_SMP */
-void cpu_startup_entry(enum cpuhp_state state);
+void __noreturn cpu_startup_entry(enum cpuhp_state state);
void cpu_idle_poll_ctrl(bool enable);
-/* Attach to any functions which should be considered cpuidle. */
-#define __cpuidle __attribute__((__section__(".cpuidle.text")))
-
bool cpu_in_idle(unsigned long pc);
void arch_cpu_idle(void);
void arch_cpu_idle_prepare(void);
void arch_cpu_idle_enter(void);
void arch_cpu_idle_exit(void);
-void arch_cpu_idle_dead(void);
+void arch_tick_broadcast_enter(void);
+void arch_tick_broadcast_exit(void);
+void __noreturn arch_cpu_idle_dead(void);
-int cpu_report_state(int cpu);
-int cpu_check_up_prepare(int cpu);
-void cpu_set_state_online(int cpu);
-void play_idle(unsigned long duration_ms);
+#ifdef CONFIG_ARCH_HAS_CPU_FINALIZE_INIT
+void arch_cpu_finalize_init(void);
+#else
+static inline void arch_cpu_finalize_init(void) { }
+#endif
+
+void play_idle_precise(u64 duration_ns, u64 latency_ns);
#ifdef CONFIG_HOTPLUG_CPU
-bool cpu_wait_death(unsigned int cpu, int seconds);
-bool cpu_report_death(void);
void cpuhp_report_idle_dead(void);
#else
static inline void cpuhp_report_idle_dead(void) { }
#endif /* #ifdef CONFIG_HOTPLUG_CPU */
+enum cpu_attack_vectors {
+ CPU_MITIGATE_USER_KERNEL,
+ CPU_MITIGATE_USER_USER,
+ CPU_MITIGATE_GUEST_HOST,
+ CPU_MITIGATE_GUEST_GUEST,
+ NR_CPU_ATTACK_VECTORS,
+};
+
+enum smt_mitigations {
+ SMT_MITIGATIONS_OFF,
+ SMT_MITIGATIONS_AUTO,
+ SMT_MITIGATIONS_ON,
+};
+
+#ifdef CONFIG_CPU_MITIGATIONS
+extern bool cpu_mitigations_off(void);
+extern bool cpu_mitigations_auto_nosmt(void);
+extern bool cpu_attack_vector_mitigated(enum cpu_attack_vectors v);
+extern enum smt_mitigations smt_mitigations;
+#else
+static inline bool cpu_mitigations_off(void)
+{
+ return true;
+}
+static inline bool cpu_mitigations_auto_nosmt(void)
+{
+ return false;
+}
+static inline bool cpu_attack_vector_mitigated(enum cpu_attack_vectors v)
+{
+ return false;
+}
+#define smt_mitigations SMT_MITIGATIONS_OFF
+#endif
+
#endif /* _LINUX_CPU_H_ */
diff --git a/include/linux/cpu_cooling.h b/include/linux/cpu_cooling.h
index d4292ebc5c8b..2c774fb3c091 100644
--- a/include/linux/cpu_cooling.h
+++ b/include/linux/cpu_cooling.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* linux/include/linux/cpu_cooling.h
*
@@ -5,18 +6,6 @@
* Copyright (C) 2012 Amit Daniel <amit.kachhap@linaro.org>
*
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
*
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*/
@@ -26,14 +15,10 @@
#include <linux/of.h>
#include <linux/thermal.h>
-#include <linux/cpumask.h>
struct cpufreq_policy;
-typedef int (*get_static_t)(cpumask_t *cpumask, int interval,
- unsigned long voltage, u32 *power);
-
-#ifdef CONFIG_CPU_THERMAL
+#ifdef CONFIG_CPU_FREQ_THERMAL
/**
* cpufreq_cooling_register - function to create cpufreq cooling device.
* @policy: cpufreq policy.
@@ -41,83 +26,47 @@ typedef int (*get_static_t)(cpumask_t *cpumask, int interval,
struct thermal_cooling_device *
cpufreq_cooling_register(struct cpufreq_policy *policy);
-struct thermal_cooling_device *
-cpufreq_power_cooling_register(struct cpufreq_policy *policy,
- u32 capacitance, get_static_t plat_static_func);
+/**
+ * cpufreq_cooling_unregister - function to remove cpufreq cooling device.
+ * @cdev: thermal cooling device pointer.
+ */
+void cpufreq_cooling_unregister(struct thermal_cooling_device *cdev);
/**
* of_cpufreq_cooling_register - create cpufreq cooling device based on DT.
- * @np: a valid struct device_node to the cooling device device tree node.
* @policy: cpufreq policy.
*/
-#ifdef CONFIG_THERMAL_OF
-struct thermal_cooling_device *
-of_cpufreq_cooling_register(struct device_node *np,
- struct cpufreq_policy *policy);
-
struct thermal_cooling_device *
-of_cpufreq_power_cooling_register(struct device_node *np,
- struct cpufreq_policy *policy,
- u32 capacitance,
- get_static_t plat_static_func);
-#else
-static inline struct thermal_cooling_device *
-of_cpufreq_cooling_register(struct device_node *np,
- struct cpufreq_policy *policy)
-{
- return ERR_PTR(-ENOSYS);
-}
+of_cpufreq_cooling_register(struct cpufreq_policy *policy);
-static inline struct thermal_cooling_device *
-of_cpufreq_power_cooling_register(struct device_node *np,
- struct cpufreq_policy *policy,
- u32 capacitance,
- get_static_t plat_static_func)
-{
- return NULL;
-}
-#endif
-
-/**
- * cpufreq_cooling_unregister - function to remove cpufreq cooling device.
- * @cdev: thermal cooling device pointer.
- */
-void cpufreq_cooling_unregister(struct thermal_cooling_device *cdev);
-
-#else /* !CONFIG_CPU_THERMAL */
+#else /* !CONFIG_CPU_FREQ_THERMAL */
static inline struct thermal_cooling_device *
cpufreq_cooling_register(struct cpufreq_policy *policy)
{
return ERR_PTR(-ENOSYS);
}
-static inline struct thermal_cooling_device *
-cpufreq_power_cooling_register(struct cpufreq_policy *policy,
- u32 capacitance, get_static_t plat_static_func)
-{
- return NULL;
-}
-static inline struct thermal_cooling_device *
-of_cpufreq_cooling_register(struct device_node *np,
- struct cpufreq_policy *policy)
+static inline
+void cpufreq_cooling_unregister(struct thermal_cooling_device *cdev)
{
- return ERR_PTR(-ENOSYS);
+ return;
}
static inline struct thermal_cooling_device *
-of_cpufreq_power_cooling_register(struct device_node *np,
- struct cpufreq_policy *policy,
- u32 capacitance,
- get_static_t plat_static_func)
+of_cpufreq_cooling_register(struct cpufreq_policy *policy)
{
return NULL;
}
+#endif /* CONFIG_CPU_FREQ_THERMAL */
-static inline
-void cpufreq_cooling_unregister(struct thermal_cooling_device *cdev)
+struct cpuidle_driver;
+
+#ifdef CONFIG_CPU_IDLE_THERMAL
+void cpuidle_cooling_register(struct cpuidle_driver *drv);
+#else /* CONFIG_CPU_IDLE_THERMAL */
+static inline void cpuidle_cooling_register(struct cpuidle_driver *drv)
{
- return;
}
-#endif /* CONFIG_CPU_THERMAL */
+#endif /* CONFIG_CPU_IDLE_THERMAL */
#endif /* __CPU_COOLING_H__ */
diff --git a/include/linux/cpu_pm.h b/include/linux/cpu_pm.h
index 455b233dd3b1..552b8f9ea05e 100644
--- a/include/linux/cpu_pm.h
+++ b/include/linux/cpu_pm.h
@@ -1,18 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2011 Google, Inc.
*
* Author:
* Colin Cross <ccross@android.com>
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
*/
#ifndef _LINUX_CPU_PM_H
diff --git a/include/linux/cpu_rmap.h b/include/linux/cpu_rmap.h
index bdd18caa6c94..2fd7ba75362a 100644
--- a/include/linux/cpu_rmap.h
+++ b/include/linux/cpu_rmap.h
@@ -1,16 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
#ifndef __LINUX_CPU_RMAP_H
#define __LINUX_CPU_RMAP_H
/*
* cpu_rmap.c: CPU affinity reverse-map support
* Copyright 2011 Solarflare Communications Inc.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published
- * by the Free Software Foundation, incorporated herein by reference.
*/
-#include <linux/cpumask.h>
+#include <linux/cpumask_types.h>
#include <linux/gfp.h>
#include <linux/slab.h>
#include <linux/kref.h>
@@ -19,23 +16,23 @@
* struct cpu_rmap - CPU affinity reverse-map
* @refcount: kref for object
* @size: Number of objects to be reverse-mapped
- * @used: Number of objects added
* @obj: Pointer to array of object pointers
* @near: For each CPU, the index and distance to the nearest object,
* based on affinity masks
*/
struct cpu_rmap {
struct kref refcount;
- u16 size, used;
+ u16 size;
void **obj;
struct {
u16 index;
u16 dist;
- } near[0];
+ } near[];
};
#define CPU_RMAP_DIST_INF 0xffff
extern struct cpu_rmap *alloc_cpu_rmap(unsigned int size, gfp_t flags);
+extern void cpu_rmap_get(struct cpu_rmap *rmap);
extern int cpu_rmap_put(struct cpu_rmap *rmap);
extern int cpu_rmap_add(struct cpu_rmap *rmap, void *obj);
@@ -64,6 +61,7 @@ static inline struct cpu_rmap *alloc_irq_cpu_rmap(unsigned int size)
}
extern void free_irq_cpu_rmap(struct cpu_rmap *rmap);
+int irq_cpu_rmap_remove(struct cpu_rmap *rmap, int irq);
extern int irq_cpu_rmap_add(struct cpu_rmap *rmap, int irq);
#endif /* __LINUX_CPU_RMAP_H */
diff --git a/include/linux/cpu_smt.h b/include/linux/cpu_smt.h
new file mode 100644
index 000000000000..0c1664294b57
--- /dev/null
+++ b/include/linux/cpu_smt.h
@@ -0,0 +1,33 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_CPU_SMT_H_
+#define _LINUX_CPU_SMT_H_
+
+enum cpuhp_smt_control {
+ CPU_SMT_ENABLED,
+ CPU_SMT_DISABLED,
+ CPU_SMT_FORCE_DISABLED,
+ CPU_SMT_NOT_SUPPORTED,
+ CPU_SMT_NOT_IMPLEMENTED,
+};
+
+#if defined(CONFIG_SMP) && defined(CONFIG_HOTPLUG_SMT)
+extern enum cpuhp_smt_control cpu_smt_control;
+extern unsigned int cpu_smt_num_threads;
+extern void cpu_smt_disable(bool force);
+extern void cpu_smt_set_num_threads(unsigned int num_threads,
+ unsigned int max_threads);
+extern bool cpu_smt_possible(void);
+extern int cpuhp_smt_enable(void);
+extern int cpuhp_smt_disable(enum cpuhp_smt_control ctrlval);
+#else
+# define cpu_smt_control (CPU_SMT_NOT_IMPLEMENTED)
+# define cpu_smt_num_threads 1
+static inline void cpu_smt_disable(bool force) { }
+static inline void cpu_smt_set_num_threads(unsigned int num_threads,
+ unsigned int max_threads) { }
+static inline bool cpu_smt_possible(void) { return false; }
+static inline int cpuhp_smt_enable(void) { return 0; }
+static inline int cpuhp_smt_disable(enum cpuhp_smt_control ctrlval) { return 0; }
+#endif
+
+#endif /* _LINUX_CPU_SMT_H_ */
diff --git a/include/linux/cpufeature.h b/include/linux/cpufeature.h
index 986c06c88d81..6aff540ee9e5 100644
--- a/include/linux/cpufeature.h
+++ b/include/linux/cpufeature.h
@@ -1,9 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2014 Linaro Ltd. <ard.biesheuvel@linaro.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef __LINUX_CPUFEATURE_H
@@ -45,7 +42,7 @@
* 'asm/cpufeature.h' of your favorite architecture.
*/
#define module_cpu_feature_match(x, __initfunc) \
-static struct cpu_feature const cpu_feature_match_ ## x[] = \
+static struct cpu_feature const __maybe_unused cpu_feature_match_ ## x[] = \
{ { .feature = cpu_feature(x) }, { } }; \
MODULE_DEVICE_TABLE(cpu, cpu_feature_match_ ## x); \
\
diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h
index 537ff842ff73..0465d1e6f72a 100644
--- a/include/linux/cpufreq.h
+++ b/include/linux/cpufreq.h
@@ -1,35 +1,35 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* linux/include/linux/cpufreq.h
*
* Copyright (C) 2001 Russell King
* (C) 2002 - 2003 Dominik Brodowski <linux@brodo.de>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef _LINUX_CPUFREQ_H
#define _LINUX_CPUFREQ_H
#include <linux/clk.h>
+#include <linux/cpu.h>
#include <linux/cpumask.h>
#include <linux/completion.h>
#include <linux/kobject.h>
#include <linux/notifier.h>
+#include <linux/of.h>
+#include <linux/pm_opp.h>
+#include <linux/pm_qos.h>
#include <linux/spinlock.h>
#include <linux/sysfs.h>
+#include <linux/minmax.h>
/*********************************************************************
* CPUFREQ INTERFACE *
*********************************************************************/
/*
* Frequency values here are CPU kHz
- *
- * Maximum transition latency is in nanoseconds - if it's unknown,
- * CPUFREQ_ETERNAL shall be used.
*/
-#define CPUFREQ_ETERNAL (-1)
+#define CPUFREQ_DEFAULT_TRANSITION_LATENCY_NS NSEC_PER_MSEC
+
#define CPUFREQ_NAME_LEN 16
/* Print length for names. Extra 1 space for accommodating '\n' in prints */
#define CPUFREQ_NAME_PLEN (CPUFREQ_NAME_LEN + 1)
@@ -42,13 +42,6 @@ enum cpufreq_table_sorting {
CPUFREQ_TABLE_SORTED_DESCENDING
};
-struct cpufreq_freqs {
- unsigned int cpu; /* cpu nr */
- unsigned int old;
- unsigned int new;
- u8 flags; /* flags of cpufreq_driver, see below. */
-};
-
struct cpufreq_cpuinfo {
unsigned int max_freq;
unsigned int min_freq;
@@ -57,11 +50,6 @@ struct cpufreq_cpuinfo {
unsigned int transition_latency;
};
-struct cpufreq_user_policy {
- unsigned int min; /* in kHz */
- unsigned int max; /* in kHz */
-};
-
struct cpufreq_policy {
/* CPUs sharing clock, require sw coordination */
cpumask_var_t cpus; /* Online CPUs only */
@@ -79,7 +67,6 @@ struct cpufreq_policy {
unsigned int max; /* in kHz */
unsigned int cur; /* in kHz, only needed if cpufreq
* governors are used */
- unsigned int restore_freq; /* = policy->cur before transition */
unsigned int suspend_freq; /* freq to set during suspend */
unsigned int policy; /* see above */
@@ -91,7 +78,10 @@ struct cpufreq_policy {
struct work_struct update; /* if update_policy() needs to be
* called, but you're in IRQ context */
- struct cpufreq_user_policy user_policy;
+ struct freq_constraints constraints;
+ struct freq_qos_request *min_freq_req;
+ struct freq_qos_request *max_freq_req;
+
struct cpufreq_frequency_table *freq_table;
enum cpufreq_table_sorting freq_table_sorted;
@@ -121,6 +111,19 @@ struct cpufreq_policy {
bool fast_switch_enabled;
/*
+ * Set if the CPUFREQ_GOV_STRICT_TARGET flag is set for the current
+ * governor.
+ */
+ bool strict_target;
+
+ /*
+ * Set if inefficient frequencies were found in the frequency table.
+ * This indicates if the relation flag CPUFREQ_RELATION_E can be
+ * honored.
+ */
+ bool efficiencies_available;
+
+ /*
* Preferred average time interval between consecutive invocations of
* the driver to set the frequency for this policy. To be set by the
* scaling driver (0, which is the default, means no preference).
@@ -136,9 +139,15 @@ struct cpufreq_policy {
*/
bool dvfs_possible_from_any_cpu;
+ /* Per policy boost enabled flag. */
+ bool boost_enabled;
+
+ /* Per policy boost supported flag. */
+ bool boost_supported;
+
/* Cached frequency lookup from cpufreq_driver_resolve_freq. */
unsigned int cached_target_freq;
- int cached_resolved_idx;
+ unsigned int cached_resolved_idx;
/* Synchronization for frequency transitions */
bool transition_ongoing; /* Tracks transition status */
@@ -151,6 +160,39 @@ struct cpufreq_policy {
/* For cpufreq driver's internal use */
void *driver_data;
+
+ /* Pointer to the cooling device if used for thermal mitigation */
+ struct thermal_cooling_device *cdev;
+
+ struct notifier_block nb_min;
+ struct notifier_block nb_max;
+};
+
+DEFINE_GUARD(cpufreq_policy_write, struct cpufreq_policy *,
+ down_write(&_T->rwsem), up_write(&_T->rwsem))
+
+DEFINE_GUARD(cpufreq_policy_read, struct cpufreq_policy *,
+ down_read(&_T->rwsem), up_read(&_T->rwsem))
+
+/*
+ * Used for passing new cpufreq policy data to the cpufreq driver's ->verify()
+ * callback for sanitization. That callback is only expected to modify the min
+ * and max values, if necessary, and specifically it must not update the
+ * frequency table.
+ */
+struct cpufreq_policy_data {
+ struct cpufreq_cpuinfo cpuinfo;
+ struct cpufreq_frequency_table *freq_table;
+ unsigned int cpu;
+ unsigned int min; /* in kHz */
+ unsigned int max; /* in kHz */
+};
+
+struct cpufreq_freqs {
+ struct cpufreq_policy *policy;
+ unsigned int old;
+ unsigned int new;
+ u8 flags; /* flags of cpufreq_driver, see below. */
};
/* Only for ACPI */
@@ -175,27 +217,43 @@ static inline struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu)
static inline void cpufreq_cpu_put(struct cpufreq_policy *policy) { }
#endif
+/* Scope based cleanup macro for cpufreq_policy kobject reference counting */
+DEFINE_FREE(put_cpufreq_policy, struct cpufreq_policy *, if (_T) cpufreq_cpu_put(_T))
+
+static inline bool policy_is_inactive(struct cpufreq_policy *policy)
+{
+ return cpumask_empty(policy->cpus);
+}
+
static inline bool policy_is_shared(struct cpufreq_policy *policy)
{
return cpumask_weight(policy->cpus) > 1;
}
-/* /sys/devices/system/cpu/cpufreq: entry point for global variables */
-extern struct kobject *cpufreq_global_kobject;
-
#ifdef CONFIG_CPU_FREQ
unsigned int cpufreq_get(unsigned int cpu);
unsigned int cpufreq_quick_get(unsigned int cpu);
unsigned int cpufreq_quick_get_max(unsigned int cpu);
+unsigned int cpufreq_get_hw_max_freq(unsigned int cpu);
void disable_cpufreq(void);
u64 get_cpu_idle_time(unsigned int cpu, u64 *wall, int io_busy);
-int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu);
+
+void refresh_frequency_limits(struct cpufreq_policy *policy);
void cpufreq_update_policy(unsigned int cpu);
+void cpufreq_update_limits(unsigned int cpu);
bool have_governor_per_policy(void);
+bool cpufreq_supports_freq_invariance(void);
struct kobject *get_governor_parent_kobj(struct cpufreq_policy *policy);
void cpufreq_enable_fast_switch(struct cpufreq_policy *policy);
void cpufreq_disable_fast_switch(struct cpufreq_policy *policy);
+bool has_target_index(void);
+
+DECLARE_PER_CPU(unsigned long, cpufreq_pressure);
+static inline unsigned long cpufreq_get_pressure(int cpu)
+{
+ return READ_ONCE(per_cpu(cpufreq_pressure, cpu));
+}
#else
static inline unsigned int cpufreq_get(unsigned int cpu)
{
@@ -209,7 +267,20 @@ static inline unsigned int cpufreq_quick_get_max(unsigned int cpu)
{
return 0;
}
+static inline unsigned int cpufreq_get_hw_max_freq(unsigned int cpu)
+{
+ return 0;
+}
+static inline bool cpufreq_supports_freq_invariance(void)
+{
+ return false;
+}
static inline void disable_cpufreq(void) { }
+static inline void cpufreq_update_limits(unsigned int cpu) { }
+static inline unsigned long cpufreq_get_pressure(int cpu)
+{
+ return 0;
+}
#endif
#ifdef CONFIG_CPU_FREQ_STAT
@@ -231,6 +302,12 @@ static inline void cpufreq_stats_record_transition(struct cpufreq_policy *policy
#define CPUFREQ_RELATION_L 0 /* lowest frequency at or above target */
#define CPUFREQ_RELATION_H 1 /* highest frequency below or at target */
#define CPUFREQ_RELATION_C 2 /* closest frequency to target */
+/* relation flags */
+#define CPUFREQ_RELATION_E BIT(2) /* Get if possible an efficient frequency */
+
+#define CPUFREQ_RELATION_LE (CPUFREQ_RELATION_L | CPUFREQ_RELATION_E)
+#define CPUFREQ_RELATION_HE (CPUFREQ_RELATION_H | CPUFREQ_RELATION_E)
+#define CPUFREQ_RELATION_CE (CPUFREQ_RELATION_C | CPUFREQ_RELATION_E)
struct freq_attr {
struct attribute attr;
@@ -254,39 +331,27 @@ __ATTR(_name, 0644, show_##_name, store_##_name)
static struct freq_attr _name = \
__ATTR(_name, 0200, NULL, store_##_name)
-struct global_attr {
- struct attribute attr;
- ssize_t (*show)(struct kobject *kobj,
- struct attribute *attr, char *buf);
- ssize_t (*store)(struct kobject *a, struct attribute *b,
- const char *c, size_t count);
-};
-
#define define_one_global_ro(_name) \
-static struct global_attr _name = \
+static struct kobj_attribute _name = \
__ATTR(_name, 0444, show_##_name, NULL)
#define define_one_global_rw(_name) \
-static struct global_attr _name = \
+static struct kobj_attribute _name = \
__ATTR(_name, 0644, show_##_name, store_##_name)
struct cpufreq_driver {
char name[CPUFREQ_NAME_LEN];
- u8 flags;
+ u16 flags;
void *driver_data;
/* needed by all drivers */
int (*init)(struct cpufreq_policy *policy);
- int (*verify)(struct cpufreq_policy *policy);
+ int (*verify)(struct cpufreq_policy_data *policy);
/* define one out of two */
int (*setpolicy)(struct cpufreq_policy *policy);
- /*
- * On failure, should always restore frequency to policy->restore_freq
- * (i.e. old freq).
- */
int (*target)(struct cpufreq_policy *policy,
unsigned int target_freq,
unsigned int relation); /* Deprecated */
@@ -294,15 +359,18 @@ struct cpufreq_driver {
unsigned int index);
unsigned int (*fast_switch)(struct cpufreq_policy *policy,
unsigned int target_freq);
-
/*
- * Caches and returns the lowest driver-supported frequency greater than
- * or equal to the target frequency, subject to any driver limitations.
- * Does not set the frequency. Only to be implemented for drivers with
- * target().
+ * ->fast_switch() replacement for drivers that use an internal
+ * representation of performance levels and can pass hints other than
+ * the target performance level to the hardware. This can only be set
+ * if ->fast_switch is set too, because in those cases (under specific
+ * conditions) scale invariance can be disabled, which causes the
+ * schedutil governor to fall back to the latter.
*/
- unsigned int (*resolve_freq)(struct cpufreq_policy *policy,
- unsigned int target_freq);
+ void (*adjust_perf)(unsigned int cpu,
+ unsigned long min_perf,
+ unsigned long target_perf,
+ unsigned long capacity);
/*
* Only for drivers with target_index() and CPUFREQ_ASYNC_NOTIFICATION
@@ -310,7 +378,7 @@ struct cpufreq_driver {
*
* get_intermediate should return a stable intermediate frequency
* platform wants to switch to and target_intermediate() should set CPU
- * to to that frequency, before jumping to the frequency corresponding
+ * to that frequency, before jumping to the frequency corresponding
* to 'index'. Core will take care of sending notifications and driver
* doesn't have to handle them in target_intermediate() or
* target_index().
@@ -324,14 +392,18 @@ struct cpufreq_driver {
int (*target_intermediate)(struct cpufreq_policy *policy,
unsigned int index);
- /* should be defined, if possible */
+ /* should be defined, if possible, return 0 on error */
unsigned int (*get)(unsigned int cpu);
+ /* Called to update policy limits on firmware notifications. */
+ void (*update_limits)(struct cpufreq_policy *policy);
+
/* optional */
int (*bios_limit)(int cpu, unsigned int *limit);
- int (*exit)(struct cpufreq_policy *policy);
- void (*stop_cpu)(struct cpufreq_policy *policy);
+ int (*online)(struct cpufreq_policy *policy);
+ int (*offline)(struct cpufreq_policy *policy);
+ void (*exit)(struct cpufreq_policy *policy);
int (*suspend)(struct cpufreq_policy *policy);
int (*resume)(struct cpufreq_policy *policy);
@@ -342,18 +414,33 @@ struct cpufreq_driver {
/* platform specific boost support code */
bool boost_enabled;
- int (*set_boost)(int state);
+ int (*set_boost)(struct cpufreq_policy *policy, int state);
+
+ /*
+ * Set by drivers that want to register with the energy model after the
+ * policy is properly initialized, but before the governor is started.
+ */
+ void (*register_em)(struct cpufreq_policy *policy);
};
/* flags */
-#define CPUFREQ_STICKY (1 << 0) /* driver isn't removed even if
- all ->init() calls failed */
-#define CPUFREQ_CONST_LOOPS (1 << 1) /* loops_per_jiffy or other
- kernel "constants" aren't
- affected by frequency
- transitions */
-#define CPUFREQ_PM_NO_WARN (1 << 2) /* don't warn on suspend/resume
- speed mismatches */
+
+/*
+ * Set by drivers that need to update internal upper and lower boundaries along
+ * with the target frequency and so the core and governors should also invoke
+ * the diver if the target frequency does not change, but the policy min or max
+ * may have changed.
+ */
+#define CPUFREQ_NEED_UPDATE_LIMITS BIT(0)
+
+/* loops_per_jiffy or other kernel "constants" aren't affected by frequency transitions */
+#define CPUFREQ_CONST_LOOPS BIT(1)
+
+/*
+ * Set by drivers that want the core to automatically register the cpufreq
+ * driver as a thermal cooling device.
+ */
+#define CPUFREQ_IS_COOLING_DEV BIT(2)
/*
* This should be set by platforms having multiple clock-domains, i.e.
@@ -361,14 +448,14 @@ struct cpufreq_driver {
* be created in cpu/cpu<num>/cpufreq/ directory and so they can use the same
* governor with different tunables for different clusters.
*/
-#define CPUFREQ_HAVE_GOVERNOR_PER_POLICY (1 << 3)
+#define CPUFREQ_HAVE_GOVERNOR_PER_POLICY BIT(3)
/*
* Driver will do POSTCHANGE notifications from outside of their ->target()
* routine and so must set cpufreq_driver->flags with this flag, so that core
* can handle them specially.
*/
-#define CPUFREQ_ASYNC_NOTIFICATION (1 << 4)
+#define CPUFREQ_ASYNC_NOTIFICATION BIT(4)
/*
* Set by drivers which want cpufreq core to check if CPU is running at a
@@ -377,41 +464,40 @@ struct cpufreq_driver {
* from the table. And if that fails, we will stop further boot process by
* issuing a BUG_ON().
*/
-#define CPUFREQ_NEED_INITIAL_FREQ_CHECK (1 << 5)
+#define CPUFREQ_NEED_INITIAL_FREQ_CHECK BIT(5)
/*
* Set by drivers to disallow use of governors with "dynamic_switching" flag
* set.
*/
-#define CPUFREQ_NO_AUTO_DYNAMIC_SWITCHING (1 << 6)
+#define CPUFREQ_NO_AUTO_DYNAMIC_SWITCHING BIT(6)
int cpufreq_register_driver(struct cpufreq_driver *driver_data);
-int cpufreq_unregister_driver(struct cpufreq_driver *driver_data);
+void cpufreq_unregister_driver(struct cpufreq_driver *driver_data);
+bool cpufreq_driver_test_flags(u16 flags);
const char *cpufreq_get_current_driver(void);
void *cpufreq_get_driver_data(void);
-static inline void cpufreq_verify_within_limits(struct cpufreq_policy *policy,
- unsigned int min, unsigned int max)
+static inline int cpufreq_thermal_control_enabled(struct cpufreq_driver *drv)
+{
+ return IS_ENABLED(CONFIG_CPU_THERMAL) &&
+ (drv->flags & CPUFREQ_IS_COOLING_DEV);
+}
+
+static inline void cpufreq_verify_within_limits(struct cpufreq_policy_data *policy,
+ unsigned int min,
+ unsigned int max)
{
- if (policy->min < min)
- policy->min = min;
- if (policy->max < min)
- policy->max = min;
- if (policy->min > max)
- policy->min = max;
- if (policy->max > max)
- policy->max = max;
- if (policy->min > policy->max)
- policy->min = policy->max;
- return;
+ policy->max = clamp(policy->max, min, max);
+ policy->min = clamp(policy->min, min, policy->max);
}
static inline void
-cpufreq_verify_within_cpu_limits(struct cpufreq_policy *policy)
+cpufreq_verify_within_cpu_limits(struct cpufreq_policy_data *policy)
{
cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq,
- policy->cpuinfo.max_freq);
+ policy->cpuinfo.max_freq);
}
#ifdef CONFIG_CPU_FREQ
@@ -435,8 +521,8 @@ static inline void cpufreq_resume(void) {}
#define CPUFREQ_POSTCHANGE (1)
/* Policy Notifiers */
-#define CPUFREQ_ADJUST (0)
-#define CPUFREQ_NOTIFY (1)
+#define CPUFREQ_CREATE_POLICY (0)
+#define CPUFREQ_REMOVE_POLICY (1)
#ifdef CONFIG_CPU_FREQ
int cpufreq_register_notifier(struct notifier_block *nb, unsigned int list);
@@ -489,6 +575,7 @@ static inline unsigned long cpufreq_scale(unsigned long old, u_int div,
* CPUFREQ GOVERNORS *
*********************************************************************/
+#define CPUFREQ_POLICY_UNKNOWN (0)
/*
* If (cpufreq_driver->target) exists, the ->governor decides what frequency
* within the limits is used. If (cpufreq_driver->setpolicy> exists, these
@@ -497,14 +584,6 @@ static inline unsigned long cpufreq_scale(unsigned long old, u_int div,
#define CPUFREQ_POLICY_POWERSAVE (1)
#define CPUFREQ_POLICY_PERFORMANCE (2)
-/*
- * The polling frequency depends on the capability of the processor. Default
- * polling frequency is 1000 times the transition latency of the processor. The
- * ondemand governor will work on any processor with transition latency <= 10ms,
- * using appropriate sampling rate.
- */
-#define LATENCY_MULTIPLIER (1000)
-
struct cpufreq_governor {
char name[CPUFREQ_NAME_LEN];
int (*init)(struct cpufreq_policy *policy);
@@ -516,15 +595,28 @@ struct cpufreq_governor {
char *buf);
int (*store_setspeed) (struct cpufreq_policy *policy,
unsigned int freq);
- /* For governors which change frequency dynamically by themselves */
- bool dynamic_switching;
struct list_head governor_list;
struct module *owner;
+ u8 flags;
};
+/* Governor flags */
+
+/* For governors which change frequency dynamically by themselves */
+#define CPUFREQ_GOV_DYNAMIC_SWITCHING BIT(0)
+
+/* For governors wanting the target frequency to be set exactly */
+#define CPUFREQ_GOV_STRICT_TARGET BIT(1)
+
+
/* Pass a target to the cpufreq driver */
unsigned int cpufreq_driver_fast_switch(struct cpufreq_policy *policy,
unsigned int target_freq);
+void cpufreq_driver_adjust_perf(unsigned int cpu,
+ unsigned long min_perf,
+ unsigned long target_perf,
+ unsigned long capacity);
+bool cpufreq_driver_has_adjust_perf(void);
int cpufreq_driver_target(struct cpufreq_policy *policy,
unsigned int target_freq,
unsigned int relation);
@@ -536,16 +628,43 @@ unsigned int cpufreq_driver_resolve_freq(struct cpufreq_policy *policy,
unsigned int cpufreq_policy_transition_delay_us(struct cpufreq_policy *policy);
int cpufreq_register_governor(struct cpufreq_governor *governor);
void cpufreq_unregister_governor(struct cpufreq_governor *governor);
+int cpufreq_start_governor(struct cpufreq_policy *policy);
+void cpufreq_stop_governor(struct cpufreq_policy *policy);
+
+#define cpufreq_governor_init(__governor) \
+static int __init __governor##_init(void) \
+{ \
+ return cpufreq_register_governor(&__governor); \
+} \
+core_initcall(__governor##_init)
+
+#define cpufreq_governor_exit(__governor) \
+static void __exit __governor##_exit(void) \
+{ \
+ return cpufreq_unregister_governor(&__governor); \
+} \
+module_exit(__governor##_exit)
struct cpufreq_governor *cpufreq_default_governor(void);
struct cpufreq_governor *cpufreq_fallback_governor(void);
+#ifdef CONFIG_CPU_FREQ_GOV_SCHEDUTIL
+bool sugov_is_governor(struct cpufreq_policy *policy);
+#else
+static inline bool sugov_is_governor(struct cpufreq_policy *policy)
+{
+ return false;
+}
+#endif
+
static inline void cpufreq_policy_apply_limits(struct cpufreq_policy *policy)
{
if (policy->max < policy->cur)
- __cpufreq_driver_target(policy, policy->max, CPUFREQ_RELATION_H);
+ __cpufreq_driver_target(policy, policy->max,
+ CPUFREQ_RELATION_HE);
else if (policy->min > policy->cur)
- __cpufreq_driver_target(policy, policy->min, CPUFREQ_RELATION_L);
+ __cpufreq_driver_target(policy, policy->min,
+ CPUFREQ_RELATION_LE);
}
/* Governor attribute set */
@@ -559,6 +678,11 @@ struct gov_attr_set {
/* sysfs ops for cpufreq governors */
extern const struct sysfs_ops governor_sysfs_ops;
+static inline struct gov_attr_set *to_gov_attr_set(struct kobject *kobj)
+{
+ return container_of(kobj, struct gov_attr_set, kobj);
+}
+
void gov_attr_set_init(struct gov_attr_set *attr_set, struct list_head *list_node);
void gov_attr_set_get(struct gov_attr_set *attr_set, struct list_head *list_node);
unsigned int gov_attr_set_put(struct gov_attr_set *attr_set, struct list_head *list_node);
@@ -571,26 +695,16 @@ struct governor_attr {
size_t count);
};
-static inline bool cpufreq_can_do_remote_dvfs(struct cpufreq_policy *policy)
-{
- /*
- * Allow remote callbacks if:
- * - dvfs_possible_from_any_cpu flag is set
- * - the local and remote CPUs share cpufreq policy
- */
- return policy->dvfs_possible_from_any_cpu ||
- cpumask_test_cpu(smp_processor_id(), policy->cpus);
-}
-
/*********************************************************************
* FREQUENCY TABLE HELPERS *
*********************************************************************/
/* Special Values of .frequency field */
-#define CPUFREQ_ENTRY_INVALID ~0u
-#define CPUFREQ_TABLE_END ~1u
+#define CPUFREQ_ENTRY_INVALID ~0u
+#define CPUFREQ_TABLE_END ~1u
/* Special Values of .flags field */
-#define CPUFREQ_BOOST_FREQ (1 << 0)
+#define CPUFREQ_BOOST_FREQ (1 << 0)
+#define CPUFREQ_INEFFICIENT_FREQ (1 << 1)
struct cpufreq_frequency_table {
unsigned int flags;
@@ -599,26 +713,6 @@ struct cpufreq_frequency_table {
* order */
};
-#if defined(CONFIG_CPU_FREQ) && defined(CONFIG_PM_OPP)
-int dev_pm_opp_init_cpufreq_table(struct device *dev,
- struct cpufreq_frequency_table **table);
-void dev_pm_opp_free_cpufreq_table(struct device *dev,
- struct cpufreq_frequency_table **table);
-#else
-static inline int dev_pm_opp_init_cpufreq_table(struct device *dev,
- struct cpufreq_frequency_table
- **table)
-{
- return -EINVAL;
-}
-
-static inline void dev_pm_opp_free_cpufreq_table(struct device *dev,
- struct cpufreq_frequency_table
- **table)
-{
-}
-#endif
-
/*
* cpufreq_for_each_entry - iterate over a cpufreq_frequency_table
* @pos: the cpufreq_frequency_table * to use as a loop cursor.
@@ -629,6 +723,18 @@ static inline void dev_pm_opp_free_cpufreq_table(struct device *dev,
for (pos = table; pos->frequency != CPUFREQ_TABLE_END; pos++)
/*
+ * cpufreq_for_each_entry_idx - iterate over a cpufreq_frequency_table
+ * with index
+ * @pos: the cpufreq_frequency_table * to use as a loop cursor.
+ * @table: the cpufreq_frequency_table * to iterate over.
+ * @idx: the table entry currently being processed
+ */
+
+#define cpufreq_for_each_entry_idx(pos, table, idx) \
+ for (pos = table, idx = 0; pos->frequency != CPUFREQ_TABLE_END; \
+ pos++, idx++)
+
+/*
* cpufreq_for_each_valid_entry - iterate over a cpufreq_frequency_table
* excluding CPUFREQ_ENTRY_INVALID frequencies.
* @pos: the cpufreq_frequency_table * to use as a loop cursor.
@@ -641,246 +747,353 @@ static inline void dev_pm_opp_free_cpufreq_table(struct device *dev,
continue; \
else
-int cpufreq_frequency_table_cpuinfo(struct cpufreq_policy *policy,
- struct cpufreq_frequency_table *table);
+/*
+ * cpufreq_for_each_valid_entry_idx - iterate with index over a cpufreq
+ * frequency_table excluding CPUFREQ_ENTRY_INVALID frequencies.
+ * @pos: the cpufreq_frequency_table * to use as a loop cursor.
+ * @table: the cpufreq_frequency_table * to iterate over.
+ * @idx: the table entry currently being processed
+ */
+
+#define cpufreq_for_each_valid_entry_idx(pos, table, idx) \
+ cpufreq_for_each_entry_idx(pos, table, idx) \
+ if (pos->frequency == CPUFREQ_ENTRY_INVALID) \
+ continue; \
+ else
+
+/**
+ * cpufreq_for_each_efficient_entry_idx - iterate with index over a cpufreq
+ * frequency_table excluding CPUFREQ_ENTRY_INVALID and
+ * CPUFREQ_INEFFICIENT_FREQ frequencies.
+ * @pos: the &struct cpufreq_frequency_table to use as a loop cursor.
+ * @table: the &struct cpufreq_frequency_table to iterate over.
+ * @idx: the table entry currently being processed.
+ * @efficiencies: set to true to only iterate over efficient frequencies.
+ */
+
+#define cpufreq_for_each_efficient_entry_idx(pos, table, idx, efficiencies) \
+ cpufreq_for_each_valid_entry_idx(pos, table, idx) \
+ if (efficiencies && (pos->flags & CPUFREQ_INEFFICIENT_FREQ)) \
+ continue; \
+ else
+
+
+int cpufreq_frequency_table_cpuinfo(struct cpufreq_policy *policy);
-int cpufreq_frequency_table_verify(struct cpufreq_policy *policy,
- struct cpufreq_frequency_table *table);
-int cpufreq_generic_frequency_table_verify(struct cpufreq_policy *policy);
+int cpufreq_frequency_table_verify(struct cpufreq_policy_data *policy);
+
+int cpufreq_generic_frequency_table_verify(struct cpufreq_policy_data *policy);
int cpufreq_table_index_unsorted(struct cpufreq_policy *policy,
- unsigned int target_freq,
- unsigned int relation);
+ unsigned int target_freq, unsigned int min,
+ unsigned int max, unsigned int relation);
int cpufreq_frequency_table_get_index(struct cpufreq_policy *policy,
unsigned int freq);
ssize_t cpufreq_show_cpus(const struct cpumask *mask, char *buf);
#ifdef CONFIG_CPU_FREQ
-int cpufreq_boost_trigger_state(int state);
-int cpufreq_boost_enabled(void);
-int cpufreq_enable_boost_support(void);
-bool policy_has_boost_freq(struct cpufreq_policy *policy);
+bool cpufreq_boost_enabled(void);
+int cpufreq_boost_set_sw(struct cpufreq_policy *policy, int state);
/* Find lowest freq at or above target in a table in ascending order */
static inline int cpufreq_table_find_index_al(struct cpufreq_policy *policy,
- unsigned int target_freq)
+ unsigned int target_freq,
+ bool efficiencies)
{
struct cpufreq_frequency_table *table = policy->freq_table;
- struct cpufreq_frequency_table *pos, *best = table - 1;
+ struct cpufreq_frequency_table *pos;
unsigned int freq;
+ int idx, best = -1;
- cpufreq_for_each_valid_entry(pos, table) {
+ cpufreq_for_each_efficient_entry_idx(pos, table, idx, efficiencies) {
freq = pos->frequency;
if (freq >= target_freq)
- return pos - table;
+ return idx;
- best = pos;
+ best = idx;
}
- return best - table;
+ return best;
}
/* Find lowest freq at or above target in a table in descending order */
static inline int cpufreq_table_find_index_dl(struct cpufreq_policy *policy,
- unsigned int target_freq)
+ unsigned int target_freq,
+ bool efficiencies)
{
struct cpufreq_frequency_table *table = policy->freq_table;
- struct cpufreq_frequency_table *pos, *best = table - 1;
+ struct cpufreq_frequency_table *pos;
unsigned int freq;
+ int idx, best = -1;
- cpufreq_for_each_valid_entry(pos, table) {
+ cpufreq_for_each_efficient_entry_idx(pos, table, idx, efficiencies) {
freq = pos->frequency;
if (freq == target_freq)
- return pos - table;
+ return idx;
if (freq > target_freq) {
- best = pos;
+ best = idx;
continue;
}
/* No freq found above target_freq */
- if (best == table - 1)
- return pos - table;
+ if (best == -1)
+ return idx;
- return best - table;
+ return best;
}
- return best - table;
+ return best;
}
-/* Works only on sorted freq-tables */
-static inline int cpufreq_table_find_index_l(struct cpufreq_policy *policy,
- unsigned int target_freq)
+static inline int find_index_l(struct cpufreq_policy *policy,
+ unsigned int target_freq,
+ unsigned int min, unsigned int max,
+ bool efficiencies)
{
- target_freq = clamp_val(target_freq, policy->min, policy->max);
+ target_freq = clamp_val(target_freq, min, max);
if (policy->freq_table_sorted == CPUFREQ_TABLE_SORTED_ASCENDING)
- return cpufreq_table_find_index_al(policy, target_freq);
+ return cpufreq_table_find_index_al(policy, target_freq,
+ efficiencies);
else
- return cpufreq_table_find_index_dl(policy, target_freq);
+ return cpufreq_table_find_index_dl(policy, target_freq,
+ efficiencies);
+}
+
+/* Works only on sorted freq-tables */
+static inline int cpufreq_table_find_index_l(struct cpufreq_policy *policy,
+ unsigned int target_freq,
+ bool efficiencies)
+{
+ return find_index_l(policy, target_freq, policy->min, policy->max, efficiencies);
}
/* Find highest freq at or below target in a table in ascending order */
static inline int cpufreq_table_find_index_ah(struct cpufreq_policy *policy,
- unsigned int target_freq)
+ unsigned int target_freq,
+ bool efficiencies)
{
struct cpufreq_frequency_table *table = policy->freq_table;
- struct cpufreq_frequency_table *pos, *best = table - 1;
+ struct cpufreq_frequency_table *pos;
unsigned int freq;
+ int idx, best = -1;
- cpufreq_for_each_valid_entry(pos, table) {
+ cpufreq_for_each_efficient_entry_idx(pos, table, idx, efficiencies) {
freq = pos->frequency;
if (freq == target_freq)
- return pos - table;
+ return idx;
if (freq < target_freq) {
- best = pos;
+ best = idx;
continue;
}
/* No freq found below target_freq */
- if (best == table - 1)
- return pos - table;
+ if (best == -1)
+ return idx;
- return best - table;
+ return best;
}
- return best - table;
+ return best;
}
/* Find highest freq at or below target in a table in descending order */
static inline int cpufreq_table_find_index_dh(struct cpufreq_policy *policy,
- unsigned int target_freq)
+ unsigned int target_freq,
+ bool efficiencies)
{
struct cpufreq_frequency_table *table = policy->freq_table;
- struct cpufreq_frequency_table *pos, *best = table - 1;
+ struct cpufreq_frequency_table *pos;
unsigned int freq;
+ int idx, best = -1;
- cpufreq_for_each_valid_entry(pos, table) {
+ cpufreq_for_each_efficient_entry_idx(pos, table, idx, efficiencies) {
freq = pos->frequency;
if (freq <= target_freq)
- return pos - table;
+ return idx;
- best = pos;
+ best = idx;
}
- return best - table;
+ return best;
}
-/* Works only on sorted freq-tables */
-static inline int cpufreq_table_find_index_h(struct cpufreq_policy *policy,
- unsigned int target_freq)
+static inline int find_index_h(struct cpufreq_policy *policy,
+ unsigned int target_freq,
+ unsigned int min, unsigned int max,
+ bool efficiencies)
{
- target_freq = clamp_val(target_freq, policy->min, policy->max);
+ target_freq = clamp_val(target_freq, min, max);
if (policy->freq_table_sorted == CPUFREQ_TABLE_SORTED_ASCENDING)
- return cpufreq_table_find_index_ah(policy, target_freq);
+ return cpufreq_table_find_index_ah(policy, target_freq,
+ efficiencies);
else
- return cpufreq_table_find_index_dh(policy, target_freq);
+ return cpufreq_table_find_index_dh(policy, target_freq,
+ efficiencies);
+}
+
+/* Works only on sorted freq-tables */
+static inline int cpufreq_table_find_index_h(struct cpufreq_policy *policy,
+ unsigned int target_freq,
+ bool efficiencies)
+{
+ return find_index_h(policy, target_freq, policy->min, policy->max, efficiencies);
}
/* Find closest freq to target in a table in ascending order */
static inline int cpufreq_table_find_index_ac(struct cpufreq_policy *policy,
- unsigned int target_freq)
+ unsigned int target_freq,
+ bool efficiencies)
{
struct cpufreq_frequency_table *table = policy->freq_table;
- struct cpufreq_frequency_table *pos, *best = table - 1;
+ struct cpufreq_frequency_table *pos;
unsigned int freq;
+ int idx, best = -1;
- cpufreq_for_each_valid_entry(pos, table) {
+ cpufreq_for_each_efficient_entry_idx(pos, table, idx, efficiencies) {
freq = pos->frequency;
if (freq == target_freq)
- return pos - table;
+ return idx;
if (freq < target_freq) {
- best = pos;
+ best = idx;
continue;
}
/* No freq found below target_freq */
- if (best == table - 1)
- return pos - table;
+ if (best == -1)
+ return idx;
/* Choose the closest freq */
- if (target_freq - best->frequency > freq - target_freq)
- return pos - table;
+ if (target_freq - table[best].frequency > freq - target_freq)
+ return idx;
- return best - table;
+ return best;
}
- return best - table;
+ return best;
}
/* Find closest freq to target in a table in descending order */
static inline int cpufreq_table_find_index_dc(struct cpufreq_policy *policy,
- unsigned int target_freq)
+ unsigned int target_freq,
+ bool efficiencies)
{
struct cpufreq_frequency_table *table = policy->freq_table;
- struct cpufreq_frequency_table *pos, *best = table - 1;
+ struct cpufreq_frequency_table *pos;
unsigned int freq;
+ int idx, best = -1;
- cpufreq_for_each_valid_entry(pos, table) {
+ cpufreq_for_each_efficient_entry_idx(pos, table, idx, efficiencies) {
freq = pos->frequency;
if (freq == target_freq)
- return pos - table;
+ return idx;
if (freq > target_freq) {
- best = pos;
+ best = idx;
continue;
}
/* No freq found above target_freq */
- if (best == table - 1)
- return pos - table;
+ if (best == -1)
+ return idx;
/* Choose the closest freq */
- if (best->frequency - target_freq > target_freq - freq)
- return pos - table;
+ if (table[best].frequency - target_freq > target_freq - freq)
+ return idx;
- return best - table;
+ return best;
}
- return best - table;
+ return best;
}
-/* Works only on sorted freq-tables */
-static inline int cpufreq_table_find_index_c(struct cpufreq_policy *policy,
- unsigned int target_freq)
+static inline int find_index_c(struct cpufreq_policy *policy,
+ unsigned int target_freq,
+ unsigned int min, unsigned int max,
+ bool efficiencies)
{
- target_freq = clamp_val(target_freq, policy->min, policy->max);
+ target_freq = clamp_val(target_freq, min, max);
if (policy->freq_table_sorted == CPUFREQ_TABLE_SORTED_ASCENDING)
- return cpufreq_table_find_index_ac(policy, target_freq);
+ return cpufreq_table_find_index_ac(policy, target_freq,
+ efficiencies);
else
- return cpufreq_table_find_index_dc(policy, target_freq);
+ return cpufreq_table_find_index_dc(policy, target_freq,
+ efficiencies);
+}
+
+/* Works only on sorted freq-tables */
+static inline int cpufreq_table_find_index_c(struct cpufreq_policy *policy,
+ unsigned int target_freq,
+ bool efficiencies)
+{
+ return find_index_c(policy, target_freq, policy->min, policy->max, efficiencies);
+}
+
+static inline bool cpufreq_is_in_limits(struct cpufreq_policy *policy,
+ unsigned int min, unsigned int max,
+ int idx)
+{
+ unsigned int freq;
+
+ if (idx < 0)
+ return false;
+
+ freq = policy->freq_table[idx].frequency;
+
+ return freq == clamp_val(freq, min, max);
}
static inline int cpufreq_frequency_table_target(struct cpufreq_policy *policy,
unsigned int target_freq,
+ unsigned int min,
+ unsigned int max,
unsigned int relation)
{
- if (unlikely(policy->freq_table_sorted == CPUFREQ_TABLE_UNSORTED))
- return cpufreq_table_index_unsorted(policy, target_freq,
- relation);
+ bool efficiencies = policy->efficiencies_available &&
+ (relation & CPUFREQ_RELATION_E);
+ int idx;
+
+ /* cpufreq_table_index_unsorted() has no use for this flag anyway */
+ relation &= ~CPUFREQ_RELATION_E;
+ if (unlikely(policy->freq_table_sorted == CPUFREQ_TABLE_UNSORTED))
+ return cpufreq_table_index_unsorted(policy, target_freq, min,
+ max, relation);
+retry:
switch (relation) {
case CPUFREQ_RELATION_L:
- return cpufreq_table_find_index_l(policy, target_freq);
+ idx = find_index_l(policy, target_freq, min, max, efficiencies);
+ break;
case CPUFREQ_RELATION_H:
- return cpufreq_table_find_index_h(policy, target_freq);
+ idx = find_index_h(policy, target_freq, min, max, efficiencies);
+ break;
case CPUFREQ_RELATION_C:
- return cpufreq_table_find_index_c(policy, target_freq);
+ idx = find_index_c(policy, target_freq, min, max, efficiencies);
+ break;
default:
- pr_err("%s: Invalid relation: %d\n", __func__, relation);
- return -EINVAL;
+ WARN_ON_ONCE(1);
+ return 0;
+ }
+
+ /* Limit frequency index to honor min and max */
+ if (!cpufreq_is_in_limits(policy, min, max, idx) && efficiencies) {
+ efficiencies = false;
+ goto retry;
}
+
+ return idx;
}
static inline int cpufreq_table_count_valid_entries(const struct cpufreq_policy *policy)
@@ -896,38 +1109,136 @@ static inline int cpufreq_table_count_valid_entries(const struct cpufreq_policy
return count;
}
-#else
-static inline int cpufreq_boost_trigger_state(int state)
+
+/**
+ * cpufreq_table_set_inefficient() - Mark a frequency as inefficient
+ * @policy: the &struct cpufreq_policy containing the inefficient frequency
+ * @frequency: the inefficient frequency
+ *
+ * The &struct cpufreq_policy must use a sorted frequency table
+ *
+ * Return: %0 on success or a negative errno code
+ */
+
+static inline int
+cpufreq_table_set_inefficient(struct cpufreq_policy *policy,
+ unsigned int frequency)
+{
+ struct cpufreq_frequency_table *pos;
+
+ /* Not supported */
+ if (policy->freq_table_sorted == CPUFREQ_TABLE_UNSORTED)
+ return -EINVAL;
+
+ cpufreq_for_each_valid_entry(pos, policy->freq_table) {
+ if (pos->frequency == frequency) {
+ pos->flags |= CPUFREQ_INEFFICIENT_FREQ;
+ policy->efficiencies_available = true;
+ return 0;
+ }
+ }
+
+ return -EINVAL;
+}
+
+static inline int parse_perf_domain(int cpu, const char *list_name,
+ const char *cell_name,
+ struct of_phandle_args *args)
{
+ int ret;
+
+ struct device_node *cpu_np __free(device_node) = of_cpu_device_node_get(cpu);
+ if (!cpu_np)
+ return -ENODEV;
+
+ ret = of_parse_phandle_with_args(cpu_np, list_name, cell_name, 0,
+ args);
+ if (ret < 0)
+ return ret;
return 0;
}
-static inline int cpufreq_boost_enabled(void)
+
+static inline int of_perf_domain_get_sharing_cpumask(int pcpu, const char *list_name,
+ const char *cell_name, struct cpumask *cpumask,
+ struct of_phandle_args *pargs)
{
+ int cpu, ret;
+ struct of_phandle_args args;
+
+ ret = parse_perf_domain(pcpu, list_name, cell_name, pargs);
+ if (ret < 0)
+ return ret;
+
+ cpumask_set_cpu(pcpu, cpumask);
+
+ for_each_possible_cpu(cpu) {
+ if (cpu == pcpu)
+ continue;
+
+ ret = parse_perf_domain(cpu, list_name, cell_name, &args);
+ if (ret < 0)
+ continue;
+
+ if (of_phandle_args_equal(pargs, &args))
+ cpumask_set_cpu(cpu, cpumask);
+
+ of_node_put(args.np);
+ }
+
return 0;
}
+#else
+static inline bool cpufreq_boost_enabled(void)
+{
+ return false;
+}
+
+static inline int cpufreq_boost_set_sw(struct cpufreq_policy *policy, int state)
+{
+ return -EOPNOTSUPP;
+}
-static inline int cpufreq_enable_boost_support(void)
+static inline int
+cpufreq_table_set_inefficient(struct cpufreq_policy *policy,
+ unsigned int frequency)
{
return -EINVAL;
}
-static inline bool policy_has_boost_freq(struct cpufreq_policy *policy)
+static inline int of_perf_domain_get_sharing_cpumask(int pcpu, const char *list_name,
+ const char *cell_name, struct cpumask *cpumask,
+ struct of_phandle_args *pargs)
{
- return false;
+ return -EOPNOTSUPP;
}
#endif
-extern unsigned int arch_freq_get_on_cpu(int cpu);
+extern int arch_freq_get_on_cpu(int cpu);
+
+#ifndef arch_set_freq_scale
+static __always_inline
+void arch_set_freq_scale(const struct cpumask *cpus,
+ unsigned long cur_freq,
+ unsigned long max_freq)
+{
+}
+#endif
/* the following are really really optional */
extern struct freq_attr cpufreq_freq_attr_scaling_available_freqs;
extern struct freq_attr cpufreq_freq_attr_scaling_boost_freqs;
-extern struct freq_attr *cpufreq_generic_attr[];
-int cpufreq_table_validate_and_show(struct cpufreq_policy *policy,
- struct cpufreq_frequency_table *table);
+int cpufreq_table_validate_and_sort(struct cpufreq_policy *policy);
unsigned int cpufreq_generic_get(unsigned int cpu);
-int cpufreq_generic_init(struct cpufreq_policy *policy,
+void cpufreq_generic_init(struct cpufreq_policy *policy,
struct cpufreq_frequency_table *table,
unsigned int transition_latency);
+
+bool cpufreq_ready_for_eas(const struct cpumask *cpu_mask);
+
+static inline void cpufreq_register_em_with_opp(struct cpufreq_policy *policy)
+{
+ dev_pm_opp_of_register_em(get_cpu_device(policy->cpu),
+ policy->related_cpus);
+}
#endif /* _LINUX_CPUFREQ_H */
diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h
index 82b30e638430..62cd7b35a29c 100644
--- a/include/linux/cpuhotplug.h
+++ b/include/linux/cpuhotplug.h
@@ -1,22 +1,75 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __CPUHOTPLUG_H
#define __CPUHOTPLUG_H
#include <linux/types.h>
+/*
+ * CPU-up CPU-down
+ *
+ * BP AP BP AP
+ *
+ * OFFLINE OFFLINE
+ * | ^
+ * v |
+ * BRINGUP_CPU->AP_OFFLINE BRINGUP_CPU <- AP_IDLE_DEAD (idle thread/play_dead)
+ * | AP_OFFLINE
+ * v (IRQ-off) ,---------------^
+ * AP_ONLNE | (stop_machine)
+ * | TEARDOWN_CPU <- AP_ONLINE_IDLE
+ * | ^
+ * v |
+ * AP_ACTIVE AP_ACTIVE
+ */
+
+/*
+ * CPU hotplug states. The state machine invokes the installed state
+ * startup callbacks sequentially from CPUHP_OFFLINE + 1 to CPUHP_ONLINE
+ * during a CPU online operation. During a CPU offline operation the
+ * installed teardown callbacks are invoked in the reverse order from
+ * CPUHP_ONLINE - 1 down to CPUHP_OFFLINE.
+ *
+ * The state space has three sections: PREPARE, STARTING and ONLINE.
+ *
+ * PREPARE: The callbacks are invoked on a control CPU before the
+ * hotplugged CPU is started up or after the hotplugged CPU has died.
+ *
+ * STARTING: The callbacks are invoked on the hotplugged CPU from the low level
+ * hotplug startup/teardown code with interrupts disabled.
+ *
+ * ONLINE: The callbacks are invoked on the hotplugged CPU from the per CPU
+ * hotplug thread with interrupts and preemption enabled.
+ *
+ * Adding explicit states to this enum is only necessary when:
+ *
+ * 1) The state is within the STARTING section
+ *
+ * 2) The state has ordering constraints vs. other states in the
+ * same section.
+ *
+ * If neither #1 nor #2 apply, please use the dynamic state space when
+ * setting up a state by using CPUHP_BP_PREPARE_DYN or CPUHP_AP_ONLINE_DYN
+ * for the @state argument of the setup function.
+ *
+ * See Documentation/core-api/cpu_hotplug.rst for further information and
+ * examples.
+ */
enum cpuhp_state {
- CPUHP_OFFLINE,
+ CPUHP_INVALID = -1,
+
+ /* PREPARE section invoked on a control CPU */
+ CPUHP_OFFLINE = 0,
CPUHP_CREATE_THREADS,
- CPUHP_PERF_PREPARE,
CPUHP_PERF_X86_PREPARE,
CPUHP_PERF_X86_AMD_UNCORE_PREP,
- CPUHP_PERF_BFIN,
CPUHP_PERF_POWER,
CPUHP_PERF_SUPERH,
CPUHP_X86_HPET_DEAD,
- CPUHP_X86_APB_DEAD,
CPUHP_X86_MCE_DEAD,
CPUHP_VIRT_NET_DEAD,
+ CPUHP_IBMVNIC_DEAD,
CPUHP_SLUB_DEAD,
+ CPUHP_DEBUG_OBJ_DEAD,
CPUHP_MM_WRITEBACK_DEAD,
CPUHP_MM_VMSTAT_DEAD,
CPUHP_SOFTIRQ_DEAD,
@@ -26,6 +79,7 @@ enum cpuhp_state {
CPUHP_ARM_OMAP_WAKE_DEAD,
CPUHP_IRQ_POLL_DEAD,
CPUHP_BLOCK_SOFTIRQ_DEAD,
+ CPUHP_BIO_DEAD,
CPUHP_ACPI_CPUDRV_DEAD,
CPUHP_S390_PFAULT_DEAD,
CPUHP_BLK_MQ_DEAD,
@@ -34,19 +88,19 @@ enum cpuhp_state {
CPUHP_MM_MEMCQ_DEAD,
CPUHP_PERCPU_CNT_DEAD,
CPUHP_RADIX_DEAD,
- CPUHP_PAGE_ALLOC_DEAD,
+ CPUHP_PAGE_ALLOC,
CPUHP_NET_DEV_DEAD,
- CPUHP_PCI_XGENE_DEAD,
- CPUHP_IOMMU_INTEL_DEAD,
- CPUHP_LUSTRE_CFS_DEAD,
+ CPUHP_IOMMU_IOVA_DEAD,
+ CPUHP_AP_ARM_CACHE_B15_RAC_DEAD,
+ CPUHP_PADATA_DEAD,
+ CPUHP_AP_DTPM_CPU_DEAD,
+ CPUHP_RANDOM_PREPARE,
CPUHP_WORKQUEUE_PREP,
CPUHP_POWER_NUMA_PREPARE,
CPUHP_HRTIMERS_PREPARE,
- CPUHP_PROFILE_PREPARE,
CPUHP_X2APIC_PREPARE,
CPUHP_SMPCFD_PREPARE,
CPUHP_RELAY_PREPARE,
- CPUHP_SLAB_PREPARE,
CPUHP_MD_RAID5_PREPARE,
CPUHP_RCUTREE_PREP,
CPUHP_CPUIDLE_COUPLED_PREPARE,
@@ -56,91 +110,138 @@ enum cpuhp_state {
CPUHP_XEN_EVTCHN_PREPARE,
CPUHP_ARM_SHMOBILE_SCU_PREPARE,
CPUHP_SH_SH3X_PREPARE,
- CPUHP_NET_FLOW_PREPARE,
CPUHP_TOPOLOGY_PREPARE,
CPUHP_NET_IUCV_PREPARE,
CPUHP_ARM_BL_PREPARE,
CPUHP_TRACE_RB_PREPARE,
- CPUHP_MM_ZS_PREPARE,
- CPUHP_MM_ZSWP_MEM_PREPARE,
CPUHP_MM_ZSWP_POOL_PREPARE,
CPUHP_KVM_PPC_BOOK3S_PREPARE,
CPUHP_ZCOMP_PREPARE,
- CPUHP_TIMERS_DEAD,
+ CPUHP_TIMERS_PREPARE,
+ CPUHP_TMIGR_PREPARE,
CPUHP_MIPS_SOC_PREPARE,
CPUHP_BP_PREPARE_DYN,
CPUHP_BP_PREPARE_DYN_END = CPUHP_BP_PREPARE_DYN + 20,
+ CPUHP_BP_KICK_AP,
CPUHP_BRINGUP_CPU,
+
+ /*
+ * STARTING section invoked on the hotplugged CPU in low level
+ * bringup and teardown code.
+ */
CPUHP_AP_IDLE_DEAD,
CPUHP_AP_OFFLINE,
+ CPUHP_AP_CACHECTRL_STARTING,
CPUHP_AP_SCHED_STARTING,
CPUHP_AP_RCUTREE_DYING,
+ CPUHP_AP_CPU_PM_STARTING,
CPUHP_AP_IRQ_GIC_STARTING,
CPUHP_AP_IRQ_HIP04_STARTING,
+ CPUHP_AP_IRQ_APPLE_AIC_STARTING,
CPUHP_AP_IRQ_ARMADA_XP_STARTING,
CPUHP_AP_IRQ_BCM2836_STARTING,
+ CPUHP_AP_IRQ_MIPS_GIC_STARTING,
+ CPUHP_AP_IRQ_EIOINTC_STARTING,
+ CPUHP_AP_IRQ_AVECINTC_STARTING,
+ CPUHP_AP_IRQ_SIFIVE_PLIC_STARTING,
+ CPUHP_AP_IRQ_ACLINT_SSWI_STARTING,
+ CPUHP_AP_IRQ_RISCV_IMSIC_STARTING,
+ CPUHP_AP_IRQ_RISCV_SBI_IPI_STARTING,
CPUHP_AP_ARM_MVEBU_COHERENCY,
CPUHP_AP_PERF_X86_AMD_UNCORE_STARTING,
CPUHP_AP_PERF_X86_STARTING,
CPUHP_AP_PERF_X86_AMD_IBS_STARTING,
- CPUHP_AP_PERF_X86_CQM_STARTING,
- CPUHP_AP_PERF_X86_CSTATE_STARTING,
CPUHP_AP_PERF_XTENSA_STARTING,
- CPUHP_AP_PERF_METAG_STARTING,
- CPUHP_AP_MIPS_OP_LOONGSON3_STARTING,
CPUHP_AP_ARM_VFP_STARTING,
CPUHP_AP_ARM64_DEBUG_MONITORS_STARTING,
CPUHP_AP_PERF_ARM_HW_BREAKPOINT_STARTING,
CPUHP_AP_PERF_ARM_ACPI_STARTING,
CPUHP_AP_PERF_ARM_STARTING,
+ CPUHP_AP_PERF_RISCV_STARTING,
CPUHP_AP_ARM_L2X0_STARTING,
+ CPUHP_AP_EXYNOS4_MCT_TIMER_STARTING,
CPUHP_AP_ARM_ARCH_TIMER_STARTING,
+ CPUHP_AP_ARM_ARCH_TIMER_EVTSTRM_STARTING,
CPUHP_AP_ARM_GLOBAL_TIMER_STARTING,
CPUHP_AP_JCORE_TIMER_STARTING,
- CPUHP_AP_EXYNOS4_MCT_TIMER_STARTING,
CPUHP_AP_ARM_TWD_STARTING,
- CPUHP_AP_METAG_TIMER_STARTING,
CPUHP_AP_QCOM_TIMER_STARTING,
+ CPUHP_AP_TEGRA_TIMER_STARTING,
CPUHP_AP_ARMADA_TIMER_STARTING,
- CPUHP_AP_MARCO_TIMER_STARTING,
+ CPUHP_AP_LOONGARCH_ARCH_TIMER_STARTING,
CPUHP_AP_MIPS_GIC_TIMER_STARTING,
CPUHP_AP_ARC_TIMER_STARTING,
- CPUHP_AP_KVM_STARTING,
- CPUHP_AP_KVM_ARM_VGIC_INIT_STARTING,
- CPUHP_AP_KVM_ARM_VGIC_STARTING,
- CPUHP_AP_KVM_ARM_TIMER_STARTING,
+ CPUHP_AP_REALTEK_TIMER_STARTING,
+ CPUHP_AP_RISCV_TIMER_STARTING,
+ CPUHP_AP_CLINT_TIMER_STARTING,
+ CPUHP_AP_CSKY_TIMER_STARTING,
+ CPUHP_AP_TI_GP_TIMER_STARTING,
+ CPUHP_AP_HYPERV_TIMER_STARTING,
/* Must be the last timer callback */
CPUHP_AP_DUMMY_TIMER_STARTING,
CPUHP_AP_ARM_XEN_STARTING,
+ CPUHP_AP_ARM_XEN_RUNSTATE_STARTING,
CPUHP_AP_ARM_CORESIGHT_STARTING,
+ CPUHP_AP_ARM_CORESIGHT_CTI_STARTING,
CPUHP_AP_ARM64_ISNDEP_STARTING,
CPUHP_AP_SMPCFD_DYING,
+ CPUHP_AP_HRTIMERS_DYING,
+ CPUHP_AP_TICK_DYING,
CPUHP_AP_X86_TBOOT_DYING,
+ CPUHP_AP_ARM_CACHE_B15_RAC_DYING,
CPUHP_AP_ONLINE,
CPUHP_TEARDOWN_CPU,
+
+ /* Online section invoked on the hotplugged CPU from the hotplug thread */
CPUHP_AP_ONLINE_IDLE,
+ CPUHP_AP_HYPERV_ONLINE,
+ CPUHP_AP_KVM_ONLINE,
+ CPUHP_AP_SCHED_WAIT_EMPTY,
CPUHP_AP_SMPBOOT_THREADS,
- CPUHP_AP_X86_VDSO_VMA_ONLINE,
CPUHP_AP_IRQ_AFFINITY_ONLINE,
+ CPUHP_AP_BLK_MQ_ONLINE,
+ CPUHP_AP_ARM_MVEBU_SYNC_CLOCKS,
+ CPUHP_AP_X86_INTEL_EPB_ONLINE,
CPUHP_AP_PERF_ONLINE,
CPUHP_AP_PERF_X86_ONLINE,
CPUHP_AP_PERF_X86_UNCORE_ONLINE,
CPUHP_AP_PERF_X86_AMD_UNCORE_ONLINE,
CPUHP_AP_PERF_X86_AMD_POWER_ONLINE,
- CPUHP_AP_PERF_X86_RAPL_ONLINE,
- CPUHP_AP_PERF_X86_CQM_ONLINE,
- CPUHP_AP_PERF_X86_CSTATE_ONLINE,
CPUHP_AP_PERF_S390_CF_ONLINE,
CPUHP_AP_PERF_S390_SF_ONLINE,
CPUHP_AP_PERF_ARM_CCI_ONLINE,
CPUHP_AP_PERF_ARM_CCN_ONLINE,
+ CPUHP_AP_PERF_ARM_HISI_CPA_ONLINE,
+ CPUHP_AP_PERF_ARM_HISI_DDRC_ONLINE,
+ CPUHP_AP_PERF_ARM_HISI_HHA_ONLINE,
+ CPUHP_AP_PERF_ARM_HISI_L3_ONLINE,
+ CPUHP_AP_PERF_ARM_HISI_PA_ONLINE,
+ CPUHP_AP_PERF_ARM_HISI_SLLC_ONLINE,
+ CPUHP_AP_PERF_ARM_HISI_PCIE_PMU_ONLINE,
+ CPUHP_AP_PERF_ARM_HNS3_PMU_ONLINE,
CPUHP_AP_PERF_ARM_L2X0_ONLINE,
CPUHP_AP_PERF_ARM_QCOM_L2_ONLINE,
CPUHP_AP_PERF_ARM_QCOM_L3_ONLINE,
+ CPUHP_AP_PERF_ARM_APM_XGENE_ONLINE,
+ CPUHP_AP_PERF_ARM_CAVIUM_TX2_UNCORE_ONLINE,
+ CPUHP_AP_PERF_ARM_MARVELL_CN10K_DDR_ONLINE,
+ CPUHP_AP_PERF_ARM_MRVL_PEM_ONLINE,
+ CPUHP_AP_PERF_POWERPC_NEST_IMC_ONLINE,
+ CPUHP_AP_PERF_POWERPC_CORE_IMC_ONLINE,
+ CPUHP_AP_PERF_POWERPC_THREAD_IMC_ONLINE,
+ CPUHP_AP_PERF_POWERPC_TRACE_IMC_ONLINE,
+ CPUHP_AP_PERF_POWERPC_HV_24x7_ONLINE,
+ CPUHP_AP_PERF_POWERPC_HV_GPCI_ONLINE,
+ CPUHP_AP_PERF_CSKY_ONLINE,
+ CPUHP_AP_TMIGR_ONLINE,
+ CPUHP_AP_WATCHDOG_ONLINE,
CPUHP_AP_WORKQUEUE_ONLINE,
+ CPUHP_AP_RANDOM_ONLINE,
CPUHP_AP_RCUTREE_ONLINE,
+ CPUHP_AP_KTHREADS_ONLINE,
+ CPUHP_AP_BASE_CACHEINFO_ONLINE,
CPUHP_AP_ONLINE_DYN,
- CPUHP_AP_ONLINE_DYN_END = CPUHP_AP_ONLINE_DYN + 30,
+ CPUHP_AP_ONLINE_DYN_END = CPUHP_AP_ONLINE_DYN + 40,
CPUHP_AP_X86_HPET_ONLINE,
CPUHP_AP_X86_KVM_CLK_ONLINE,
CPUHP_AP_ACTIVE,
@@ -157,14 +258,15 @@ int __cpuhp_setup_state_cpuslocked(enum cpuhp_state state, const char *name,
int (*teardown)(unsigned int cpu),
bool multi_instance);
/**
- * cpuhp_setup_state - Setup hotplug state callbacks with calling the callbacks
+ * cpuhp_setup_state - Setup hotplug state callbacks with calling the @startup
+ * callback
* @state: The state for which the calls are installed
* @name: Name of the callback (will be used in debug output)
- * @startup: startup callback function
- * @teardown: teardown callback function
+ * @startup: startup callback function or NULL if not required
+ * @teardown: teardown callback function or NULL if not required
*
- * Installs the callback functions and invokes the startup callback on
- * the present cpus which have already reached the @state.
+ * Installs the callback functions and invokes the @startup callback on
+ * the online cpus which have already reached the @state.
*/
static inline int cpuhp_setup_state(enum cpuhp_state state,
const char *name,
@@ -174,6 +276,18 @@ static inline int cpuhp_setup_state(enum cpuhp_state state,
return __cpuhp_setup_state(state, name, true, startup, teardown, false);
}
+/**
+ * cpuhp_setup_state_cpuslocked - Setup hotplug state callbacks with calling
+ * @startup callback from a cpus_read_lock()
+ * held region
+ * @state: The state for which the calls are installed
+ * @name: Name of the callback (will be used in debug output)
+ * @startup: startup callback function or NULL if not required
+ * @teardown: teardown callback function or NULL if not required
+ *
+ * Same as cpuhp_setup_state() except that it must be invoked from within a
+ * cpus_read_lock() held region.
+ */
static inline int cpuhp_setup_state_cpuslocked(enum cpuhp_state state,
const char *name,
int (*startup)(unsigned int cpu),
@@ -185,14 +299,14 @@ static inline int cpuhp_setup_state_cpuslocked(enum cpuhp_state state,
/**
* cpuhp_setup_state_nocalls - Setup hotplug state callbacks without calling the
- * callbacks
+ * @startup callback
* @state: The state for which the calls are installed
* @name: Name of the callback.
- * @startup: startup callback function
- * @teardown: teardown callback function
+ * @startup: startup callback function or NULL if not required
+ * @teardown: teardown callback function or NULL if not required
*
- * Same as @cpuhp_setup_state except that no calls are executed are invoked
- * during installation of this callback. NOP if SMP=n or HOTPLUG_CPU=n.
+ * Same as cpuhp_setup_state() except that the @startup callback is not
+ * invoked during installation. NOP if SMP=n or HOTPLUG_CPU=n.
*/
static inline int cpuhp_setup_state_nocalls(enum cpuhp_state state,
const char *name,
@@ -203,6 +317,19 @@ static inline int cpuhp_setup_state_nocalls(enum cpuhp_state state,
false);
}
+/**
+ * cpuhp_setup_state_nocalls_cpuslocked - Setup hotplug state callbacks without
+ * invoking the @startup callback from
+ * a cpus_read_lock() held region
+ * callbacks
+ * @state: The state for which the calls are installed
+ * @name: Name of the callback.
+ * @startup: startup callback function or NULL if not required
+ * @teardown: teardown callback function or NULL if not required
+ *
+ * Same as cpuhp_setup_state_nocalls() except that it must be invoked from
+ * within a cpus_read_lock() held region.
+ */
static inline int cpuhp_setup_state_nocalls_cpuslocked(enum cpuhp_state state,
const char *name,
int (*startup)(unsigned int cpu),
@@ -216,13 +343,13 @@ static inline int cpuhp_setup_state_nocalls_cpuslocked(enum cpuhp_state state,
* cpuhp_setup_state_multi - Add callbacks for multi state
* @state: The state for which the calls are installed
* @name: Name of the callback.
- * @startup: startup callback function
- * @teardown: teardown callback function
+ * @startup: startup callback function or NULL if not required
+ * @teardown: teardown callback function or NULL if not required
*
* Sets the internal multi_instance flag and prepares a state to work as a multi
* instance callback. No callbacks are invoked at this point. The callbacks are
* invoked once an instance for this state are registered via
- * @cpuhp_state_add_instance or @cpuhp_state_add_instance_nocalls.
+ * cpuhp_state_add_instance() or cpuhp_state_add_instance_nocalls()
*/
static inline int cpuhp_setup_state_multi(enum cpuhp_state state,
const char *name,
@@ -247,9 +374,10 @@ int __cpuhp_state_add_instance_cpuslocked(enum cpuhp_state state,
* @state: The state for which the instance is installed
* @node: The node for this individual state.
*
- * Installs the instance for the @state and invokes the startup callback on
- * the present cpus which have already reached the @state. The @state must have
- * been earlier marked as multi-instance by @cpuhp_setup_state_multi.
+ * Installs the instance for the @state and invokes the registered startup
+ * callback on the online cpus which have already reached the @state. The
+ * @state must have been earlier marked as multi-instance by
+ * cpuhp_setup_state_multi().
*/
static inline int cpuhp_state_add_instance(enum cpuhp_state state,
struct hlist_node *node)
@@ -263,8 +391,9 @@ static inline int cpuhp_state_add_instance(enum cpuhp_state state,
* @state: The state for which the instance is installed
* @node: The node for this individual state.
*
- * Installs the instance for the @state The @state must have been earlier
- * marked as multi-instance by @cpuhp_setup_state_multi.
+ * Installs the instance for the @state. The @state must have been earlier
+ * marked as multi-instance by cpuhp_setup_state_multi. NOP if SMP=n or
+ * HOTPLUG_CPU=n.
*/
static inline int cpuhp_state_add_instance_nocalls(enum cpuhp_state state,
struct hlist_node *node)
@@ -272,6 +401,17 @@ static inline int cpuhp_state_add_instance_nocalls(enum cpuhp_state state,
return __cpuhp_state_add_instance(state, node, false);
}
+/**
+ * cpuhp_state_add_instance_nocalls_cpuslocked - Add an instance for a state
+ * without invoking the startup
+ * callback from a cpus_read_lock()
+ * held region.
+ * @state: The state for which the instance is installed
+ * @node: The node for this individual state.
+ *
+ * Same as cpuhp_state_add_instance_nocalls() except that it must be
+ * invoked from within a cpus_read_lock() held region.
+ */
static inline int
cpuhp_state_add_instance_nocalls_cpuslocked(enum cpuhp_state state,
struct hlist_node *node)
@@ -287,7 +427,7 @@ void __cpuhp_remove_state_cpuslocked(enum cpuhp_state state, bool invoke);
* @state: The state for which the calls are removed
*
* Removes the callback functions and invokes the teardown callback on
- * the present cpus which have already reached the @state.
+ * the online cpus which have already reached the @state.
*/
static inline void cpuhp_remove_state(enum cpuhp_state state)
{
@@ -296,7 +436,7 @@ static inline void cpuhp_remove_state(enum cpuhp_state state)
/**
* cpuhp_remove_state_nocalls - Remove hotplug state callbacks without invoking
- * teardown
+ * the teardown callback
* @state: The state for which the calls are removed
*/
static inline void cpuhp_remove_state_nocalls(enum cpuhp_state state)
@@ -304,6 +444,14 @@ static inline void cpuhp_remove_state_nocalls(enum cpuhp_state state)
__cpuhp_remove_state(state, false);
}
+/**
+ * cpuhp_remove_state_nocalls_cpuslocked - Remove hotplug state callbacks without invoking
+ * teardown from a cpus_read_lock() held region.
+ * @state: The state for which the calls are removed
+ *
+ * Same as cpuhp_remove_state nocalls() except that it must be invoked
+ * from within a cpus_read_lock() held region.
+ */
static inline void cpuhp_remove_state_nocalls_cpuslocked(enum cpuhp_state state)
{
__cpuhp_remove_state_cpuslocked(state, false);
@@ -331,8 +479,8 @@ int __cpuhp_state_remove_instance(enum cpuhp_state state,
* @state: The state from which the instance is removed
* @node: The node for this individual state.
*
- * Removes the instance and invokes the teardown callback on the present cpus
- * which have already reached the @state.
+ * Removes the instance and invokes the teardown callback on the online cpus
+ * which have already reached @state.
*/
static inline int cpuhp_state_remove_instance(enum cpuhp_state state,
struct hlist_node *node)
@@ -342,7 +490,7 @@ static inline int cpuhp_state_remove_instance(enum cpuhp_state state,
/**
* cpuhp_state_remove_instance_nocalls - Remove hotplug instance from state
- * without invoking the reatdown callback
+ * without invoking the teardown callback
* @state: The state from which the instance is removed
* @node: The node for this individual state.
*
@@ -360,4 +508,20 @@ void cpuhp_online_idle(enum cpuhp_state state);
static inline void cpuhp_online_idle(enum cpuhp_state state) { }
#endif
+struct task_struct;
+
+void cpuhp_ap_sync_alive(void);
+void arch_cpuhp_sync_state_poll(void);
+void arch_cpuhp_cleanup_kick_cpu(unsigned int cpu);
+int arch_cpuhp_kick_ap_alive(unsigned int cpu, struct task_struct *tidle);
+bool arch_cpuhp_init_parallel_bringup(void);
+
+#ifdef CONFIG_HOTPLUG_CORE_SYNC_DEAD
+void cpuhp_ap_report_dead(void);
+void arch_cpuhp_cleanup_dead_cpu(unsigned int cpu);
+#else
+static inline void cpuhp_ap_report_dead(void) { }
+static inline void arch_cpuhp_cleanup_dead_cpu(unsigned int cpu) { }
+#endif
+
#endif
diff --git a/include/linux/cpuhplock.h b/include/linux/cpuhplock.h
new file mode 100644
index 000000000000..f7aa20f62b87
--- /dev/null
+++ b/include/linux/cpuhplock.h
@@ -0,0 +1,49 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * include/linux/cpuhplock.h - CPU hotplug locking
+ *
+ * Locking functions for CPU hotplug.
+ */
+#ifndef _LINUX_CPUHPLOCK_H_
+#define _LINUX_CPUHPLOCK_H_
+
+#include <linux/cleanup.h>
+#include <linux/errno.h>
+
+struct device;
+
+extern int lockdep_is_cpus_held(void);
+
+#ifdef CONFIG_HOTPLUG_CPU
+void cpus_write_lock(void);
+void cpus_write_unlock(void);
+void cpus_read_lock(void);
+void cpus_read_unlock(void);
+int cpus_read_trylock(void);
+void lockdep_assert_cpus_held(void);
+void cpu_hotplug_disable_offlining(void);
+void cpu_hotplug_disable(void);
+void cpu_hotplug_enable(void);
+void clear_tasks_mm_cpumask(int cpu);
+int remove_cpu(unsigned int cpu);
+int cpu_device_down(struct device *dev);
+void smp_shutdown_nonboot_cpus(unsigned int primary_cpu);
+
+#else /* CONFIG_HOTPLUG_CPU */
+
+static inline void cpus_write_lock(void) { }
+static inline void cpus_write_unlock(void) { }
+static inline void cpus_read_lock(void) { }
+static inline void cpus_read_unlock(void) { }
+static inline int cpus_read_trylock(void) { return true; }
+static inline void lockdep_assert_cpus_held(void) { }
+static inline void cpu_hotplug_disable_offlining(void) { }
+static inline void cpu_hotplug_disable(void) { }
+static inline void cpu_hotplug_enable(void) { }
+static inline int remove_cpu(unsigned int cpu) { return -EPERM; }
+static inline void smp_shutdown_nonboot_cpus(unsigned int primary_cpu) { }
+#endif /* !CONFIG_HOTPLUG_CPU */
+
+DEFINE_LOCK_GUARD_0(cpus_read_lock, cpus_read_lock(), cpus_read_unlock())
+
+#endif /* _LINUX_CPUHPLOCK_H_ */
diff --git a/include/linux/cpuidle.h b/include/linux/cpuidle.h
index 8f7788d23b57..4073690504a7 100644
--- a/include/linux/cpuidle.h
+++ b/include/linux/cpuidle.h
@@ -14,6 +14,7 @@
#include <linux/percpu.h>
#include <linux/list.h>
#include <linux/hrtimer.h>
+#include <linux/context_tracking.h>
#define CPUIDLE_STATE_MAX 10
#define CPUIDLE_NAME_LEN 16
@@ -29,45 +30,61 @@ struct cpuidle_driver;
* CPUIDLE DEVICE INTERFACE *
****************************/
+#define CPUIDLE_STATE_DISABLED_BY_USER BIT(0)
+#define CPUIDLE_STATE_DISABLED_BY_DRIVER BIT(1)
+
struct cpuidle_state_usage {
unsigned long long disable;
unsigned long long usage;
- unsigned long long time; /* in US */
+ u64 time_ns;
+ unsigned long long above; /* Number of times it's been too deep */
+ unsigned long long below; /* Number of times it's been too shallow */
+ unsigned long long rejected; /* Number of times idle entry was rejected */
+#ifdef CONFIG_SUSPEND
+ unsigned long long s2idle_usage;
+ unsigned long long s2idle_time; /* in US */
+#endif
};
struct cpuidle_state {
char name[CPUIDLE_NAME_LEN];
char desc[CPUIDLE_DESC_LEN];
+ s64 exit_latency_ns;
+ s64 target_residency_ns;
unsigned int flags;
unsigned int exit_latency; /* in US */
int power_usage; /* in mW */
unsigned int target_residency; /* in US */
- bool disabled; /* disabled on all CPUs */
int (*enter) (struct cpuidle_device *dev,
struct cpuidle_driver *drv,
int index);
- int (*enter_dead) (struct cpuidle_device *dev, int index);
+ void (*enter_dead) (struct cpuidle_device *dev, int index);
/*
* CPUs execute ->enter_s2idle with the local tick or entire timekeeping
* suspended, so it must not re-enable interrupts at any point (even
* temporarily) or attempt to change states of clock event devices.
+ *
+ * This callback may point to the same function as ->enter if all of
+ * the above requirements are met by it.
*/
- void (*enter_s2idle) (struct cpuidle_device *dev,
- struct cpuidle_driver *drv,
- int index);
+ int (*enter_s2idle)(struct cpuidle_device *dev,
+ struct cpuidle_driver *drv,
+ int index);
};
/* Idle State Flags */
-#define CPUIDLE_FLAG_NONE (0x00)
-#define CPUIDLE_FLAG_POLLING (0x01) /* polling state */
-#define CPUIDLE_FLAG_COUPLED (0x02) /* state applies to multiple cpus */
-#define CPUIDLE_FLAG_TIMER_STOP (0x04) /* timer is stopped on this state */
-
-#define CPUIDLE_DRIVER_FLAGS_MASK (0xFFFF0000)
+#define CPUIDLE_FLAG_NONE (0x00)
+#define CPUIDLE_FLAG_POLLING BIT(0) /* polling state */
+#define CPUIDLE_FLAG_COUPLED BIT(1) /* state applies to multiple cpus */
+#define CPUIDLE_FLAG_TIMER_STOP BIT(2) /* timer is stopped on this state */
+#define CPUIDLE_FLAG_UNUSABLE BIT(3) /* avoid using this state */
+#define CPUIDLE_FLAG_OFF BIT(4) /* disable this state by default */
+#define CPUIDLE_FLAG_TLB_FLUSHED BIT(5) /* idle-state flushes TLBs */
+#define CPUIDLE_FLAG_RCU_IDLE BIT(6) /* idle-state takes care of RCU */
struct cpuidle_device_kobj;
struct cpuidle_state_kobj;
@@ -76,10 +93,14 @@ struct cpuidle_driver_kobj;
struct cpuidle_device {
unsigned int registered:1;
unsigned int enabled:1;
- unsigned int use_deepest_state:1;
+ unsigned int poll_time_limit:1;
unsigned int cpu;
+ ktime_t next_hrtimer;
- int last_residency;
+ int last_state_idx;
+ u64 last_residency_ns;
+ u64 poll_limit_ns;
+ u64 forced_idle_latency_limit_ns;
struct cpuidle_state_usage states_usage[CPUIDLE_STATE_MAX];
struct cpuidle_state_kobj *kobjs[CPUIDLE_STATE_MAX];
struct cpuidle_driver_kobj *kobj_driver;
@@ -95,15 +116,34 @@ struct cpuidle_device {
DECLARE_PER_CPU(struct cpuidle_device *, cpuidle_devices);
DECLARE_PER_CPU(struct cpuidle_device, cpuidle_dev);
-/**
- * cpuidle_get_last_residency - retrieves the last state's residency time
- * @dev: the target CPU
- */
-static inline int cpuidle_get_last_residency(struct cpuidle_device *dev)
+static __always_inline void ct_cpuidle_enter(void)
{
- return dev->last_residency;
+ lockdep_assert_irqs_disabled();
+ /*
+ * Idle is allowed to (temporary) enable IRQs. It
+ * will return with IRQs disabled.
+ *
+ * Trace IRQs enable here, then switch off RCU, and have
+ * arch_cpu_idle() use raw_local_irq_enable(). Note that
+ * ct_idle_enter() relies on lockdep IRQ state, so switch that
+ * last -- this is very similar to the entry code.
+ */
+ trace_hardirqs_on_prepare();
+ lockdep_hardirqs_on_prepare();
+ instrumentation_end();
+ ct_idle_enter();
+ lockdep_hardirqs_on(_RET_IP_);
}
+static __always_inline void ct_cpuidle_exit(void)
+{
+ /*
+ * Carefully undo the above.
+ */
+ lockdep_hardirqs_off(_RET_IP_);
+ ct_idle_exit();
+ instrumentation_begin();
+}
/****************************
* CPUIDLE DRIVER INTERFACE *
@@ -112,7 +152,6 @@ static inline int cpuidle_get_last_residency(struct cpuidle_device *dev)
struct cpuidle_driver {
const char *name;
struct module *owner;
- int refcnt;
/* used by the cpuidle framework to setup the broadcast timer */
unsigned int bctimer:1;
@@ -123,6 +162,9 @@ struct cpuidle_driver {
/* the driver handles the cpus in cpumask */
struct cpumask *cpumask;
+
+ /* preferred governor to switch at register time */
+ const char *governor;
};
#ifdef CONFIG_CPU_IDLE
@@ -131,15 +173,18 @@ extern bool cpuidle_not_available(struct cpuidle_driver *drv,
struct cpuidle_device *dev);
extern int cpuidle_select(struct cpuidle_driver *drv,
- struct cpuidle_device *dev);
+ struct cpuidle_device *dev,
+ bool *stop_tick);
extern int cpuidle_enter(struct cpuidle_driver *drv,
struct cpuidle_device *dev, int index);
extern void cpuidle_reflect(struct cpuidle_device *dev, int index);
+extern u64 cpuidle_poll_time(struct cpuidle_driver *drv,
+ struct cpuidle_device *dev);
extern int cpuidle_register_driver(struct cpuidle_driver *drv);
extern struct cpuidle_driver *cpuidle_get_driver(void);
-extern struct cpuidle_driver *cpuidle_driver_ref(void);
-extern void cpuidle_driver_unref(void);
+extern void cpuidle_driver_state_disabled(struct cpuidle_driver *drv, int idx,
+ bool disable);
extern void cpuidle_unregister_driver(struct cpuidle_driver *drv);
extern int cpuidle_register_device(struct cpuidle_device *dev);
extern void cpuidle_unregister_device(struct cpuidle_device *dev);
@@ -163,17 +208,20 @@ static inline bool cpuidle_not_available(struct cpuidle_driver *drv,
struct cpuidle_device *dev)
{return true; }
static inline int cpuidle_select(struct cpuidle_driver *drv,
- struct cpuidle_device *dev)
+ struct cpuidle_device *dev, bool *stop_tick)
{return -ENODEV; }
static inline int cpuidle_enter(struct cpuidle_driver *drv,
struct cpuidle_device *dev, int index)
{return -ENODEV; }
static inline void cpuidle_reflect(struct cpuidle_device *dev, int index) { }
+static inline u64 cpuidle_poll_time(struct cpuidle_driver *drv,
+ struct cpuidle_device *dev)
+{return 0; }
static inline int cpuidle_register_driver(struct cpuidle_driver *drv)
{return -ENODEV; }
static inline struct cpuidle_driver *cpuidle_get_driver(void) {return NULL; }
-static inline struct cpuidle_driver *cpuidle_driver_ref(void) {return NULL; }
-static inline void cpuidle_driver_unref(void) {}
+static inline void cpuidle_driver_state_disabled(struct cpuidle_driver *drv,
+ int idx, bool disable) { }
static inline void cpuidle_unregister_driver(struct cpuidle_driver *drv) { }
static inline int cpuidle_register_device(struct cpuidle_device *dev)
{return -ENODEV; }
@@ -197,18 +245,22 @@ static inline struct cpuidle_device *cpuidle_get_device(void) {return NULL; }
#ifdef CONFIG_CPU_IDLE
extern int cpuidle_find_deepest_state(struct cpuidle_driver *drv,
- struct cpuidle_device *dev);
+ struct cpuidle_device *dev,
+ u64 latency_limit_ns);
extern int cpuidle_enter_s2idle(struct cpuidle_driver *drv,
- struct cpuidle_device *dev);
-extern void cpuidle_use_deepest_state(bool enable);
+ struct cpuidle_device *dev,
+ u64 latency_limit_ns);
+extern void cpuidle_use_deepest_state(u64 latency_limit_ns);
#else
static inline int cpuidle_find_deepest_state(struct cpuidle_driver *drv,
- struct cpuidle_device *dev)
+ struct cpuidle_device *dev,
+ u64 latency_limit_ns)
{return -ENODEV; }
static inline int cpuidle_enter_s2idle(struct cpuidle_driver *drv,
- struct cpuidle_device *dev)
+ struct cpuidle_device *dev,
+ u64 latency_limit_ns)
{return -ENODEV; }
-static inline void cpuidle_use_deepest_state(bool enable)
+static inline void cpuidle_use_deepest_state(u64 latency_limit_ns)
{
}
#endif
@@ -225,7 +277,7 @@ static inline void cpuidle_coupled_parallel_barrier(struct cpuidle_device *dev,
}
#endif
-#ifdef CONFIG_ARCH_HAS_CPU_RELAX
+#if defined(CONFIG_CPU_IDLE) && defined(CONFIG_ARCH_HAS_CPU_RELAX)
void cpuidle_poll_state_init(struct cpuidle_driver *drv);
#else
static inline void cpuidle_poll_state_init(struct cpuidle_driver *drv) {}
@@ -246,33 +298,57 @@ struct cpuidle_governor {
struct cpuidle_device *dev);
int (*select) (struct cpuidle_driver *drv,
- struct cpuidle_device *dev);
+ struct cpuidle_device *dev,
+ bool *stop_tick);
void (*reflect) (struct cpuidle_device *dev, int index);
};
-#ifdef CONFIG_CPU_IDLE
extern int cpuidle_register_governor(struct cpuidle_governor *gov);
-#else
-static inline int cpuidle_register_governor(struct cpuidle_governor *gov)
-{return 0;}
-#endif
+extern s64 cpuidle_governor_latency_req(unsigned int cpu);
+
+#define __CPU_PM_CPU_IDLE_ENTER(low_level_idle_enter, \
+ idx, \
+ state, \
+ is_retention, is_rcu) \
+({ \
+ int __ret = 0; \
+ \
+ if (!idx) { \
+ cpu_do_idle(); \
+ return idx; \
+ } \
+ \
+ if (!is_retention) \
+ __ret = cpu_pm_enter(); \
+ if (!__ret) { \
+ if (!is_rcu) \
+ ct_cpuidle_enter(); \
+ __ret = low_level_idle_enter(state); \
+ if (!is_rcu) \
+ ct_cpuidle_exit(); \
+ if (!is_retention) \
+ cpu_pm_exit(); \
+ } \
+ \
+ __ret ? -1 : idx; \
+})
#define CPU_PM_CPU_IDLE_ENTER(low_level_idle_enter, idx) \
-({ \
- int __ret; \
- \
- if (!idx) { \
- cpu_do_idle(); \
- return idx; \
- } \
- \
- __ret = cpu_pm_enter(); \
- if (!__ret) { \
- __ret = low_level_idle_enter(idx); \
- cpu_pm_exit(); \
- } \
- \
- __ret ? -1 : idx; \
-})
+ __CPU_PM_CPU_IDLE_ENTER(low_level_idle_enter, idx, idx, 0, 0)
+
+#define CPU_PM_CPU_IDLE_ENTER_RETENTION(low_level_idle_enter, idx) \
+ __CPU_PM_CPU_IDLE_ENTER(low_level_idle_enter, idx, idx, 1, 0)
+
+#define CPU_PM_CPU_IDLE_ENTER_PARAM(low_level_idle_enter, idx, state) \
+ __CPU_PM_CPU_IDLE_ENTER(low_level_idle_enter, idx, state, 0, 0)
+
+#define CPU_PM_CPU_IDLE_ENTER_PARAM_RCU(low_level_idle_enter, idx, state) \
+ __CPU_PM_CPU_IDLE_ENTER(low_level_idle_enter, idx, state, 0, 1)
+
+#define CPU_PM_CPU_IDLE_ENTER_RETENTION_PARAM(low_level_idle_enter, idx, state) \
+ __CPU_PM_CPU_IDLE_ENTER(low_level_idle_enter, idx, state, 1, 0)
+
+#define CPU_PM_CPU_IDLE_ENTER_RETENTION_PARAM_RCU(low_level_idle_enter, idx, state) \
+ __CPU_PM_CPU_IDLE_ENTER(low_level_idle_enter, idx, state, 1, 1)
#endif /* _LINUX_CPUIDLE_H */
diff --git a/include/linux/cpuidle_haltpoll.h b/include/linux/cpuidle_haltpoll.h
new file mode 100644
index 000000000000..d50c1e0411a2
--- /dev/null
+++ b/include/linux/cpuidle_haltpoll.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _CPUIDLE_HALTPOLL_H
+#define _CPUIDLE_HALTPOLL_H
+
+#ifdef CONFIG_ARCH_CPUIDLE_HALTPOLL
+#include <asm/cpuidle_haltpoll.h>
+#else
+static inline void arch_haltpoll_enable(unsigned int cpu)
+{
+}
+
+static inline void arch_haltpoll_disable(unsigned int cpu)
+{
+}
+#endif
+#endif
diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h
index 4bf4479a3a80..80211900f373 100644
--- a/include/linux/cpumask.h
+++ b/include/linux/cpumask.h
@@ -1,27 +1,22 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __LINUX_CPUMASK_H
#define __LINUX_CPUMASK_H
/*
* Cpumasks provide a bitmap suitable for representing the
- * set of CPU's in a system, one bit position per CPU number. In general,
+ * set of CPUs in a system, one bit position per CPU number. In general,
* only nr_cpu_ids (<= NR_CPUS) bits are valid.
*/
-#include <linux/kernel.h>
-#include <linux/threads.h>
+#include <linux/atomic.h>
#include <linux/bitmap.h>
-#include <linux/bug.h>
+#include <linux/cleanup.h>
+#include <linux/cpumask_types.h>
+#include <linux/gfp_types.h>
+#include <linux/numa.h>
+#include <linux/threads.h>
+#include <linux/types.h>
-/* Don't assign or return these: may not be this big! */
-typedef struct cpumask { DECLARE_BITMAP(bits, NR_CPUS); } cpumask_t;
-
-/**
- * cpumask_bits - get the bits in a cpumask
- * @maskp: the struct cpumask *
- *
- * You should only assume nr_cpu_ids bits of this mask are valid. This is
- * a macro so it's const-correct.
- */
-#define cpumask_bits(maskp) ((maskp)->bits)
+#include <asm/bug.h>
/**
* cpumask_pr_args - printf args to output a cpumask
@@ -31,19 +26,56 @@ typedef struct cpumask { DECLARE_BITMAP(bits, NR_CPUS); } cpumask_t;
*/
#define cpumask_pr_args(maskp) nr_cpu_ids, cpumask_bits(maskp)
-#if NR_CPUS == 1
-#define nr_cpu_ids 1
+#if (NR_CPUS == 1) || defined(CONFIG_FORCE_NR_CPUS)
+#define nr_cpu_ids ((unsigned int)NR_CPUS)
#else
-extern int nr_cpu_ids;
+extern unsigned int nr_cpu_ids;
#endif
-#ifdef CONFIG_CPUMASK_OFFSTACK
-/* Assuming NR_CPUS is huge, a runtime limit is more efficient. Also,
- * not all bits may be allocated. */
-#define nr_cpumask_bits ((unsigned int)nr_cpu_ids)
+static __always_inline void set_nr_cpu_ids(unsigned int nr)
+{
+#if (NR_CPUS == 1) || defined(CONFIG_FORCE_NR_CPUS)
+ WARN_ON(nr != nr_cpu_ids);
#else
-#define nr_cpumask_bits ((unsigned int)NR_CPUS)
+ nr_cpu_ids = nr;
#endif
+}
+
+/*
+ * We have several different "preferred sizes" for the cpumask
+ * operations, depending on operation.
+ *
+ * For example, the bitmap scanning and operating operations have
+ * optimized routines that work for the single-word case, but only when
+ * the size is constant. So if NR_CPUS fits in one single word, we are
+ * better off using that small constant, in order to trigger the
+ * optimized bit finding. That is 'small_cpumask_size'.
+ *
+ * The clearing and copying operations will similarly perform better
+ * with a constant size, but we limit that size arbitrarily to four
+ * words. We call this 'large_cpumask_size'.
+ *
+ * Finally, some operations just want the exact limit, either because
+ * they set bits or just don't have any faster fixed-sized versions. We
+ * call this just 'nr_cpumask_bits'.
+ *
+ * Note that these optional constants are always guaranteed to be at
+ * least as big as 'nr_cpu_ids' itself is, and all our cpumask
+ * allocations are at least that size (see cpumask_size()). The
+ * optimization comes from being able to potentially use a compile-time
+ * constant instead of a run-time generated exact number of CPUs.
+ */
+#if NR_CPUS <= BITS_PER_LONG
+ #define small_cpumask_bits ((unsigned int)NR_CPUS)
+ #define large_cpumask_bits ((unsigned int)NR_CPUS)
+#elif NR_CPUS <= 4*BITS_PER_LONG
+ #define small_cpumask_bits nr_cpu_ids
+ #define large_cpumask_bits ((unsigned int)NR_CPUS)
+#else
+ #define small_cpumask_bits nr_cpu_ids
+ #define large_cpumask_bits nr_cpu_ids
+#endif
+#define nr_cpumask_bits nr_cpu_ids
/*
* The following particular system cpumasks and operations manage
@@ -51,22 +83,19 @@ extern int nr_cpu_ids;
*
* cpu_possible_mask- has bit 'cpu' set iff cpu is populatable
* cpu_present_mask - has bit 'cpu' set iff cpu is populated
+ * cpu_enabled_mask - has bit 'cpu' set iff cpu can be brought online
* cpu_online_mask - has bit 'cpu' set iff cpu available to scheduler
* cpu_active_mask - has bit 'cpu' set iff cpu available to migration
*
* If !CONFIG_HOTPLUG_CPU, present == possible, and active == online.
*
- * The cpu_possible_mask is fixed at boot time, as the set of CPU id's
+ * The cpu_possible_mask is fixed at boot time, as the set of CPU IDs
* that it is possible might ever be plugged in at anytime during the
* life of that system boot. The cpu_present_mask is dynamic(*),
* representing which CPUs are currently plugged in. And
* cpu_online_mask is the dynamic subset of cpu_present_mask,
* indicating those CPUs available for scheduling.
*
- * If HOTPLUG is enabled, then cpu_possible_mask is forced to have
- * all NR_CPUS bits set, otherwise it is just the set of CPUs that
- * ACPI reports present at boot.
- *
* If HOTPLUG is enabled, then cpu_present_mask varies dynamically,
* depending on what ACPI reports as currently plugged in, otherwise
* cpu_present_mask is just a copy of cpu_possible_mask.
@@ -75,7 +104,7 @@ extern int nr_cpu_ids;
* hotplug, it's a copy of cpu_possible_mask, hence fixed at boot.
*
* Subtleties:
- * 1) UP arch's (NR_CPUS == 1, CONFIG_SMP not defined) hardcode
+ * 1) UP ARCHes (NR_CPUS == 1, CONFIG_SMP not defined) hardcode
* assumption that their single CPU is online. The UP
* cpu_{online,possible,present}_masks are placebos. Changing them
* will have no useful affect on the following num_*_cpus()
@@ -87,130 +116,258 @@ extern int nr_cpu_ids;
extern struct cpumask __cpu_possible_mask;
extern struct cpumask __cpu_online_mask;
+extern struct cpumask __cpu_enabled_mask;
extern struct cpumask __cpu_present_mask;
extern struct cpumask __cpu_active_mask;
+extern struct cpumask __cpu_dying_mask;
#define cpu_possible_mask ((const struct cpumask *)&__cpu_possible_mask)
#define cpu_online_mask ((const struct cpumask *)&__cpu_online_mask)
+#define cpu_enabled_mask ((const struct cpumask *)&__cpu_enabled_mask)
#define cpu_present_mask ((const struct cpumask *)&__cpu_present_mask)
#define cpu_active_mask ((const struct cpumask *)&__cpu_active_mask)
+#define cpu_dying_mask ((const struct cpumask *)&__cpu_dying_mask)
-#if NR_CPUS > 1
-#define num_online_cpus() cpumask_weight(cpu_online_mask)
-#define num_possible_cpus() cpumask_weight(cpu_possible_mask)
-#define num_present_cpus() cpumask_weight(cpu_present_mask)
-#define num_active_cpus() cpumask_weight(cpu_active_mask)
-#define cpu_online(cpu) cpumask_test_cpu((cpu), cpu_online_mask)
-#define cpu_possible(cpu) cpumask_test_cpu((cpu), cpu_possible_mask)
-#define cpu_present(cpu) cpumask_test_cpu((cpu), cpu_present_mask)
-#define cpu_active(cpu) cpumask_test_cpu((cpu), cpu_active_mask)
-#else
-#define num_online_cpus() 1U
-#define num_possible_cpus() 1U
-#define num_present_cpus() 1U
-#define num_active_cpus() 1U
-#define cpu_online(cpu) ((cpu) == 0)
-#define cpu_possible(cpu) ((cpu) == 0)
-#define cpu_present(cpu) ((cpu) == 0)
-#define cpu_active(cpu) ((cpu) == 0)
-#endif
+extern atomic_t __num_online_cpus;
+extern unsigned int __num_possible_cpus;
-/* verify cpu argument to cpumask_* operators */
-static inline unsigned int cpumask_check(unsigned int cpu)
+extern cpumask_t cpus_booted_once_mask;
+
+static __always_inline void cpu_max_bits_warn(unsigned int cpu, unsigned int bits)
{
#ifdef CONFIG_DEBUG_PER_CPU_MAPS
- WARN_ON_ONCE(cpu >= nr_cpumask_bits);
+ WARN_ON_ONCE(cpu >= bits);
#endif /* CONFIG_DEBUG_PER_CPU_MAPS */
- return cpu;
}
-#if NR_CPUS == 1
-/* Uniprocessor. Assume all masks are "1". */
-static inline unsigned int cpumask_first(const struct cpumask *srcp)
+/* verify cpu argument to cpumask_* operators */
+static __always_inline unsigned int cpumask_check(unsigned int cpu)
{
- return 0;
+ cpu_max_bits_warn(cpu, small_cpumask_bits);
+ return cpu;
}
-/* Valid inputs for n are -1 and 0. */
-static inline unsigned int cpumask_next(int n, const struct cpumask *srcp)
+/**
+ * cpumask_first - get the first cpu in a cpumask
+ * @srcp: the cpumask pointer
+ *
+ * Return: >= nr_cpu_ids if no cpus set.
+ */
+static __always_inline unsigned int cpumask_first(const struct cpumask *srcp)
{
- return n+1;
+ return find_first_bit(cpumask_bits(srcp), small_cpumask_bits);
}
-static inline unsigned int cpumask_next_zero(int n, const struct cpumask *srcp)
+/**
+ * cpumask_first_zero - get the first unset cpu in a cpumask
+ * @srcp: the cpumask pointer
+ *
+ * Return: >= nr_cpu_ids if all cpus are set.
+ */
+static __always_inline unsigned int cpumask_first_zero(const struct cpumask *srcp)
{
- return n+1;
+ return find_first_zero_bit(cpumask_bits(srcp), small_cpumask_bits);
}
-static inline unsigned int cpumask_next_and(int n,
- const struct cpumask *srcp,
- const struct cpumask *andp)
+/**
+ * cpumask_first_and - return the first cpu from *srcp1 & *srcp2
+ * @srcp1: the first input
+ * @srcp2: the second input
+ *
+ * Return: >= nr_cpu_ids if no cpus set in both. See also cpumask_next_and().
+ */
+static __always_inline
+unsigned int cpumask_first_and(const struct cpumask *srcp1, const struct cpumask *srcp2)
{
- return n+1;
+ return find_first_and_bit(cpumask_bits(srcp1), cpumask_bits(srcp2), small_cpumask_bits);
}
-/* cpu must be a valid cpu, ie 0, so there's no other choice. */
-static inline unsigned int cpumask_any_but(const struct cpumask *mask,
- unsigned int cpu)
+/**
+ * cpumask_first_andnot - return the first cpu from *srcp1 & ~*srcp2
+ * @srcp1: the first input
+ * @srcp2: the second input
+ *
+ * Return: >= nr_cpu_ids if no such cpu found.
+ */
+static __always_inline
+unsigned int cpumask_first_andnot(const struct cpumask *srcp1, const struct cpumask *srcp2)
{
- return 1;
+ return find_first_andnot_bit(cpumask_bits(srcp1), cpumask_bits(srcp2), small_cpumask_bits);
}
-static inline unsigned int cpumask_local_spread(unsigned int i, int node)
+/**
+ * cpumask_first_and_and - return the first cpu from *srcp1 & *srcp2 & *srcp3
+ * @srcp1: the first input
+ * @srcp2: the second input
+ * @srcp3: the third input
+ *
+ * Return: >= nr_cpu_ids if no cpus set in all.
+ */
+static __always_inline
+unsigned int cpumask_first_and_and(const struct cpumask *srcp1,
+ const struct cpumask *srcp2,
+ const struct cpumask *srcp3)
{
- return 0;
+ return find_first_and_and_bit(cpumask_bits(srcp1), cpumask_bits(srcp2),
+ cpumask_bits(srcp3), small_cpumask_bits);
}
-#define for_each_cpu(cpu, mask) \
- for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)mask)
-#define for_each_cpu_not(cpu, mask) \
- for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)mask)
-#define for_each_cpu_and(cpu, mask, and) \
- for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)mask, (void)and)
-#else
/**
- * cpumask_first - get the first cpu in a cpumask
- * @srcp: the cpumask pointer
+ * cpumask_last - get the last CPU in a cpumask
+ * @srcp: - the cpumask pointer
*
- * Returns >= nr_cpu_ids if no cpus set.
+ * Return: >= nr_cpumask_bits if no CPUs set.
*/
-static inline unsigned int cpumask_first(const struct cpumask *srcp)
+static __always_inline unsigned int cpumask_last(const struct cpumask *srcp)
{
- return find_first_bit(cpumask_bits(srcp), nr_cpumask_bits);
+ return find_last_bit(cpumask_bits(srcp), small_cpumask_bits);
}
/**
* cpumask_next - get the next cpu in a cpumask
- * @n: the cpu prior to the place to search (ie. return will be > @n)
+ * @n: the cpu prior to the place to search (i.e. return will be > @n)
* @srcp: the cpumask pointer
*
- * Returns >= nr_cpu_ids if no further cpus set.
+ * Return: >= nr_cpu_ids if no further cpus set.
*/
-static inline unsigned int cpumask_next(int n, const struct cpumask *srcp)
+static __always_inline
+unsigned int cpumask_next(int n, const struct cpumask *srcp)
{
/* -1 is a legal arg here. */
if (n != -1)
cpumask_check(n);
- return find_next_bit(cpumask_bits(srcp), nr_cpumask_bits, n+1);
+ return find_next_bit(cpumask_bits(srcp), small_cpumask_bits, n + 1);
}
/**
* cpumask_next_zero - get the next unset cpu in a cpumask
- * @n: the cpu prior to the place to search (ie. return will be > @n)
+ * @n: the cpu prior to the place to search (i.e. return will be > @n)
* @srcp: the cpumask pointer
*
- * Returns >= nr_cpu_ids if no further cpus unset.
+ * Return: >= nr_cpu_ids if no further cpus unset.
*/
-static inline unsigned int cpumask_next_zero(int n, const struct cpumask *srcp)
+static __always_inline
+unsigned int cpumask_next_zero(int n, const struct cpumask *srcp)
{
/* -1 is a legal arg here. */
if (n != -1)
cpumask_check(n);
- return find_next_zero_bit(cpumask_bits(srcp), nr_cpumask_bits, n+1);
+ return find_next_zero_bit(cpumask_bits(srcp), small_cpumask_bits, n+1);
+}
+
+#if NR_CPUS == 1
+/* Uniprocessor: there is only one valid CPU */
+static __always_inline
+unsigned int cpumask_local_spread(unsigned int i, int node)
+{
+ return 0;
}
-int cpumask_next_and(int n, const struct cpumask *, const struct cpumask *);
-int cpumask_any_but(const struct cpumask *mask, unsigned int cpu);
+static __always_inline
+unsigned int cpumask_any_and_distribute(const struct cpumask *src1p,
+ const struct cpumask *src2p)
+{
+ return cpumask_first_and(src1p, src2p);
+}
+
+static __always_inline
+unsigned int cpumask_any_distribute(const struct cpumask *srcp)
+{
+ return cpumask_first(srcp);
+}
+#else
unsigned int cpumask_local_spread(unsigned int i, int node);
+unsigned int cpumask_any_and_distribute(const struct cpumask *src1p,
+ const struct cpumask *src2p);
+unsigned int cpumask_any_distribute(const struct cpumask *srcp);
+#endif /* NR_CPUS */
+
+/**
+ * cpumask_next_and - get the next cpu in *src1p & *src2p
+ * @n: the cpu prior to the place to search (i.e. return will be > @n)
+ * @src1p: the first cpumask pointer
+ * @src2p: the second cpumask pointer
+ *
+ * Return: >= nr_cpu_ids if no further cpus set in both.
+ */
+static __always_inline
+unsigned int cpumask_next_and(int n, const struct cpumask *src1p,
+ const struct cpumask *src2p)
+{
+ /* -1 is a legal arg here. */
+ if (n != -1)
+ cpumask_check(n);
+ return find_next_and_bit(cpumask_bits(src1p), cpumask_bits(src2p),
+ small_cpumask_bits, n + 1);
+}
+
+/**
+ * cpumask_next_andnot - get the next cpu in *src1p & ~*src2p
+ * @n: the cpu prior to the place to search (i.e. return will be > @n)
+ * @src1p: the first cpumask pointer
+ * @src2p: the second cpumask pointer
+ *
+ * Return: >= nr_cpu_ids if no further cpus set in both.
+ */
+static __always_inline
+unsigned int cpumask_next_andnot(int n, const struct cpumask *src1p,
+ const struct cpumask *src2p)
+{
+ /* -1 is a legal arg here. */
+ if (n != -1)
+ cpumask_check(n);
+ return find_next_andnot_bit(cpumask_bits(src1p), cpumask_bits(src2p),
+ small_cpumask_bits, n + 1);
+}
+
+/**
+ * cpumask_next_and_wrap - get the next cpu in *src1p & *src2p, starting from
+ * @n+1. If nothing found, wrap around and start from
+ * the beginning
+ * @n: the cpu prior to the place to search (i.e. search starts from @n+1)
+ * @src1p: the first cpumask pointer
+ * @src2p: the second cpumask pointer
+ *
+ * Return: next set bit, wrapped if needed, or >= nr_cpu_ids if @src1p & @src2p is empty.
+ */
+static __always_inline
+unsigned int cpumask_next_and_wrap(int n, const struct cpumask *src1p,
+ const struct cpumask *src2p)
+{
+ /* -1 is a legal arg here. */
+ if (n != -1)
+ cpumask_check(n);
+ return find_next_and_bit_wrap(cpumask_bits(src1p), cpumask_bits(src2p),
+ small_cpumask_bits, n + 1);
+}
+
+/**
+ * cpumask_next_wrap - get the next cpu in *src, starting from @n+1. If nothing
+ * found, wrap around and start from the beginning
+ * @n: the cpu prior to the place to search (i.e. search starts from @n+1)
+ * @src: cpumask pointer
+ *
+ * Return: next set bit, wrapped if needed, or >= nr_cpu_ids if @src is empty.
+ */
+static __always_inline
+unsigned int cpumask_next_wrap(int n, const struct cpumask *src)
+{
+ /* -1 is a legal arg here. */
+ if (n != -1)
+ cpumask_check(n);
+ return find_next_bit_wrap(cpumask_bits(src), small_cpumask_bits, n + 1);
+}
+
+/**
+ * cpumask_random - get random cpu in *src.
+ * @src: cpumask pointer
+ *
+ * Return: random set bit, or >= nr_cpu_ids if @src is empty.
+ */
+static __always_inline
+unsigned int cpumask_random(const struct cpumask *src)
+{
+ return find_random_bit(cpumask_bits(src), nr_cpu_ids);
+}
/**
* for_each_cpu - iterate over every cpu in a mask
@@ -220,58 +377,209 @@ unsigned int cpumask_local_spread(unsigned int i, int node);
* After the loop, cpu is >= nr_cpu_ids.
*/
#define for_each_cpu(cpu, mask) \
- for ((cpu) = -1; \
- (cpu) = cpumask_next((cpu), (mask)), \
- (cpu) < nr_cpu_ids;)
+ for_each_set_bit(cpu, cpumask_bits(mask), small_cpumask_bits)
/**
- * for_each_cpu_not - iterate over every cpu in a complemented mask
+ * for_each_cpu_wrap - iterate over every cpu in a mask, starting at a specified location
* @cpu: the (optionally unsigned) integer iterator
* @mask: the cpumask pointer
+ * @start: the start location
+ *
+ * The implementation does not assume any bit in @mask is set (including @start).
*
* After the loop, cpu is >= nr_cpu_ids.
*/
-#define for_each_cpu_not(cpu, mask) \
- for ((cpu) = -1; \
- (cpu) = cpumask_next_zero((cpu), (mask)), \
- (cpu) < nr_cpu_ids;)
+#define for_each_cpu_wrap(cpu, mask, start) \
+ for_each_set_bit_wrap(cpu, cpumask_bits(mask), small_cpumask_bits, start)
-extern int cpumask_next_wrap(int n, const struct cpumask *mask, int start, bool wrap);
+/**
+ * for_each_cpu_and - iterate over every cpu in both masks
+ * @cpu: the (optionally unsigned) integer iterator
+ * @mask1: the first cpumask pointer
+ * @mask2: the second cpumask pointer
+ *
+ * This saves a temporary CPU mask in many places. It is equivalent to:
+ * struct cpumask tmp;
+ * cpumask_and(&tmp, &mask1, &mask2);
+ * for_each_cpu(cpu, &tmp)
+ * ...
+ *
+ * After the loop, cpu is >= nr_cpu_ids.
+ */
+#define for_each_cpu_and(cpu, mask1, mask2) \
+ for_each_and_bit(cpu, cpumask_bits(mask1), cpumask_bits(mask2), small_cpumask_bits)
/**
- * for_each_cpu_wrap - iterate over every cpu in a mask, starting at a specified location
+ * for_each_cpu_andnot - iterate over every cpu present in one mask, excluding
+ * those present in another.
* @cpu: the (optionally unsigned) integer iterator
- * @mask: the cpumask poiter
- * @start: the start location
+ * @mask1: the first cpumask pointer
+ * @mask2: the second cpumask pointer
*
- * The implementation does not assume any bit in @mask is set (including @start).
+ * This saves a temporary CPU mask in many places. It is equivalent to:
+ * struct cpumask tmp;
+ * cpumask_andnot(&tmp, &mask1, &mask2);
+ * for_each_cpu(cpu, &tmp)
+ * ...
*
* After the loop, cpu is >= nr_cpu_ids.
*/
-#define for_each_cpu_wrap(cpu, mask, start) \
- for ((cpu) = cpumask_next_wrap((start)-1, (mask), (start), false); \
- (cpu) < nr_cpumask_bits; \
- (cpu) = cpumask_next_wrap((cpu), (mask), (start), true))
+#define for_each_cpu_andnot(cpu, mask1, mask2) \
+ for_each_andnot_bit(cpu, cpumask_bits(mask1), cpumask_bits(mask2), small_cpumask_bits)
/**
- * for_each_cpu_and - iterate over every cpu in both masks
+ * for_each_cpu_or - iterate over every cpu present in either mask
* @cpu: the (optionally unsigned) integer iterator
- * @mask: the first cpumask pointer
- * @and: the second cpumask pointer
+ * @mask1: the first cpumask pointer
+ * @mask2: the second cpumask pointer
*
* This saves a temporary CPU mask in many places. It is equivalent to:
* struct cpumask tmp;
- * cpumask_and(&tmp, &mask, &and);
+ * cpumask_or(&tmp, &mask1, &mask2);
* for_each_cpu(cpu, &tmp)
* ...
*
* After the loop, cpu is >= nr_cpu_ids.
*/
-#define for_each_cpu_and(cpu, mask, and) \
- for ((cpu) = -1; \
- (cpu) = cpumask_next_and((cpu), (mask), (and)), \
- (cpu) < nr_cpu_ids;)
-#endif /* SMP */
+#define for_each_cpu_or(cpu, mask1, mask2) \
+ for_each_or_bit(cpu, cpumask_bits(mask1), cpumask_bits(mask2), small_cpumask_bits)
+
+/**
+ * for_each_cpu_from - iterate over CPUs present in @mask, from @cpu to the end of @mask.
+ * @cpu: the (optionally unsigned) integer iterator
+ * @mask: the cpumask pointer
+ *
+ * After the loop, cpu is >= nr_cpu_ids.
+ */
+#define for_each_cpu_from(cpu, mask) \
+ for_each_set_bit_from(cpu, cpumask_bits(mask), small_cpumask_bits)
+
+/**
+ * cpumask_any_but - return an arbitrary cpu in a cpumask, but not this one.
+ * @mask: the cpumask to search
+ * @cpu: the cpu to ignore.
+ *
+ * Often used to find any cpu but smp_processor_id() in a mask.
+ * If @cpu == -1, the function is equivalent to cpumask_any().
+ * Return: >= nr_cpu_ids if no cpus set.
+ */
+static __always_inline
+unsigned int cpumask_any_but(const struct cpumask *mask, int cpu)
+{
+ unsigned int i;
+
+ /* -1 is a legal arg here. */
+ if (cpu != -1)
+ cpumask_check(cpu);
+
+ for_each_cpu(i, mask)
+ if (i != cpu)
+ break;
+ return i;
+}
+
+/**
+ * cpumask_any_and_but - pick an arbitrary cpu from *mask1 & *mask2, but not this one.
+ * @mask1: the first input cpumask
+ * @mask2: the second input cpumask
+ * @cpu: the cpu to ignore
+ *
+ * If @cpu == -1, the function is equivalent to cpumask_any_and().
+ * Returns >= nr_cpu_ids if no cpus set.
+ */
+static __always_inline
+unsigned int cpumask_any_and_but(const struct cpumask *mask1,
+ const struct cpumask *mask2,
+ int cpu)
+{
+ unsigned int i;
+
+ /* -1 is a legal arg here. */
+ if (cpu != -1)
+ cpumask_check(cpu);
+
+ i = cpumask_first_and(mask1, mask2);
+ if (i != cpu)
+ return i;
+
+ return cpumask_next_and(cpu, mask1, mask2);
+}
+
+/**
+ * cpumask_any_andnot_but - pick an arbitrary cpu from *mask1 & ~*mask2, but not this one.
+ * @mask1: the first input cpumask
+ * @mask2: the second input cpumask
+ * @cpu: the cpu to ignore
+ *
+ * If @cpu == -1, the function returns the first matching cpu.
+ * Returns >= nr_cpu_ids if no cpus set.
+ */
+static __always_inline
+unsigned int cpumask_any_andnot_but(const struct cpumask *mask1,
+ const struct cpumask *mask2,
+ int cpu)
+{
+ unsigned int i;
+
+ /* -1 is a legal arg here. */
+ if (cpu != -1)
+ cpumask_check(cpu);
+
+ i = cpumask_first_andnot(mask1, mask2);
+ if (i != cpu)
+ return i;
+
+ return cpumask_next_andnot(cpu, mask1, mask2);
+}
+
+/**
+ * cpumask_nth - get the Nth cpu in a cpumask
+ * @srcp: the cpumask pointer
+ * @cpu: the Nth cpu to find, starting from 0
+ *
+ * Return: >= nr_cpu_ids if such cpu doesn't exist.
+ */
+static __always_inline
+unsigned int cpumask_nth(unsigned int cpu, const struct cpumask *srcp)
+{
+ return find_nth_bit(cpumask_bits(srcp), small_cpumask_bits, cpumask_check(cpu));
+}
+
+/**
+ * cpumask_nth_and - get the Nth cpu in 2 cpumasks
+ * @srcp1: the cpumask pointer
+ * @srcp2: the cpumask pointer
+ * @cpu: the Nth cpu to find, starting from 0
+ *
+ * Return: >= nr_cpu_ids if such cpu doesn't exist.
+ */
+static __always_inline
+unsigned int cpumask_nth_and(unsigned int cpu, const struct cpumask *srcp1,
+ const struct cpumask *srcp2)
+{
+ return find_nth_and_bit(cpumask_bits(srcp1), cpumask_bits(srcp2),
+ small_cpumask_bits, cpumask_check(cpu));
+}
+
+/**
+ * cpumask_nth_and_andnot - get the Nth cpu set in 1st and 2nd cpumask, and clear in 3rd.
+ * @srcp1: the cpumask pointer
+ * @srcp2: the cpumask pointer
+ * @srcp3: the cpumask pointer
+ * @cpu: the Nth cpu to find, starting from 0
+ *
+ * Return: >= nr_cpu_ids if such cpu doesn't exist.
+ */
+static __always_inline
+unsigned int cpumask_nth_and_andnot(unsigned int cpu, const struct cpumask *srcp1,
+ const struct cpumask *srcp2,
+ const struct cpumask *srcp3)
+{
+ return find_nth_and_andnot_bit(cpumask_bits(srcp1),
+ cpumask_bits(srcp2),
+ cpumask_bits(srcp3),
+ small_cpumask_bits, cpumask_check(cpu));
+}
#define CPU_BITS_NONE \
{ \
@@ -288,28 +596,42 @@ extern int cpumask_next_wrap(int n, const struct cpumask *mask, int start, bool
* @cpu: cpu number (< nr_cpu_ids)
* @dstp: the cpumask pointer
*/
-static inline void cpumask_set_cpu(unsigned int cpu, struct cpumask *dstp)
+static __always_inline
+void cpumask_set_cpu(unsigned int cpu, struct cpumask *dstp)
{
set_bit(cpumask_check(cpu), cpumask_bits(dstp));
}
-static inline void __cpumask_set_cpu(unsigned int cpu, struct cpumask *dstp)
+static __always_inline
+void __cpumask_set_cpu(unsigned int cpu, struct cpumask *dstp)
{
__set_bit(cpumask_check(cpu), cpumask_bits(dstp));
}
+/**
+ * cpumask_clear_cpus - clear cpus in a cpumask
+ * @dstp: the cpumask pointer
+ * @cpu: cpu number (< nr_cpu_ids)
+ * @ncpus: number of cpus to clear (< nr_cpu_ids)
+ */
+static __always_inline void cpumask_clear_cpus(struct cpumask *dstp,
+ unsigned int cpu, unsigned int ncpus)
+{
+ cpumask_check(cpu + ncpus - 1);
+ bitmap_clear(cpumask_bits(dstp), cpumask_check(cpu), ncpus);
+}
/**
* cpumask_clear_cpu - clear a cpu in a cpumask
* @cpu: cpu number (< nr_cpu_ids)
* @dstp: the cpumask pointer
*/
-static inline void cpumask_clear_cpu(int cpu, struct cpumask *dstp)
+static __always_inline void cpumask_clear_cpu(int cpu, struct cpumask *dstp)
{
clear_bit(cpumask_check(cpu), cpumask_bits(dstp));
}
-static inline void __cpumask_clear_cpu(int cpu, struct cpumask *dstp)
+static __always_inline void __cpumask_clear_cpu(int cpu, struct cpumask *dstp)
{
__clear_bit(cpumask_check(cpu), cpumask_bits(dstp));
}
@@ -319,9 +641,10 @@ static inline void __cpumask_clear_cpu(int cpu, struct cpumask *dstp)
* @cpu: cpu number (< nr_cpu_ids)
* @cpumask: the cpumask pointer
*
- * Returns 1 if @cpu is set in @cpumask, else returns 0
+ * Return: true if @cpu is set in @cpumask, else returns false
*/
-static inline int cpumask_test_cpu(int cpu, const struct cpumask *cpumask)
+static __always_inline
+bool cpumask_test_cpu(int cpu, const struct cpumask *cpumask)
{
return test_bit(cpumask_check(cpu), cpumask_bits((cpumask)));
}
@@ -331,11 +654,12 @@ static inline int cpumask_test_cpu(int cpu, const struct cpumask *cpumask)
* @cpu: cpu number (< nr_cpu_ids)
* @cpumask: the cpumask pointer
*
- * Returns 1 if @cpu is set in old bitmap of @cpumask, else returns 0
- *
* test_and_set_bit wrapper for cpumasks.
+ *
+ * Return: true if @cpu is set in old bitmap of @cpumask, else returns false
*/
-static inline int cpumask_test_and_set_cpu(int cpu, struct cpumask *cpumask)
+static __always_inline
+bool cpumask_test_and_set_cpu(int cpu, struct cpumask *cpumask)
{
return test_and_set_bit(cpumask_check(cpu), cpumask_bits(cpumask));
}
@@ -345,11 +669,12 @@ static inline int cpumask_test_and_set_cpu(int cpu, struct cpumask *cpumask)
* @cpu: cpu number (< nr_cpu_ids)
* @cpumask: the cpumask pointer
*
- * Returns 1 if @cpu is set in old bitmap of @cpumask, else returns 0
- *
* test_and_clear_bit wrapper for cpumasks.
+ *
+ * Return: true if @cpu is set in old bitmap of @cpumask, else returns false
*/
-static inline int cpumask_test_and_clear_cpu(int cpu, struct cpumask *cpumask)
+static __always_inline
+bool cpumask_test_and_clear_cpu(int cpu, struct cpumask *cpumask)
{
return test_and_clear_bit(cpumask_check(cpu), cpumask_bits(cpumask));
}
@@ -358,8 +683,12 @@ static inline int cpumask_test_and_clear_cpu(int cpu, struct cpumask *cpumask)
* cpumask_setall - set all cpus (< nr_cpu_ids) in a cpumask
* @dstp: the cpumask pointer
*/
-static inline void cpumask_setall(struct cpumask *dstp)
+static __always_inline void cpumask_setall(struct cpumask *dstp)
{
+ if (small_const_nbits(small_cpumask_bits)) {
+ cpumask_bits(dstp)[0] = BITMAP_LAST_WORD_MASK(nr_cpumask_bits);
+ return;
+ }
bitmap_fill(cpumask_bits(dstp), nr_cpumask_bits);
}
@@ -367,9 +696,9 @@ static inline void cpumask_setall(struct cpumask *dstp)
* cpumask_clear - clear all cpus (< nr_cpu_ids) in a cpumask
* @dstp: the cpumask pointer
*/
-static inline void cpumask_clear(struct cpumask *dstp)
+static __always_inline void cpumask_clear(struct cpumask *dstp)
{
- bitmap_zero(cpumask_bits(dstp), nr_cpumask_bits);
+ bitmap_zero(cpumask_bits(dstp), large_cpumask_bits);
}
/**
@@ -378,14 +707,14 @@ static inline void cpumask_clear(struct cpumask *dstp)
* @src1p: the first input
* @src2p: the second input
*
- * If *@dstp is empty, returns 0, else returns 1
+ * Return: false if *@dstp is empty, else returns true
*/
-static inline int cpumask_and(struct cpumask *dstp,
- const struct cpumask *src1p,
- const struct cpumask *src2p)
+static __always_inline
+bool cpumask_and(struct cpumask *dstp, const struct cpumask *src1p,
+ const struct cpumask *src2p)
{
return bitmap_and(cpumask_bits(dstp), cpumask_bits(src1p),
- cpumask_bits(src2p), nr_cpumask_bits);
+ cpumask_bits(src2p), small_cpumask_bits);
}
/**
@@ -394,11 +723,28 @@ static inline int cpumask_and(struct cpumask *dstp,
* @src1p: the first input
* @src2p: the second input
*/
-static inline void cpumask_or(struct cpumask *dstp, const struct cpumask *src1p,
- const struct cpumask *src2p)
+static __always_inline
+void cpumask_or(struct cpumask *dstp, const struct cpumask *src1p,
+ const struct cpumask *src2p)
{
bitmap_or(cpumask_bits(dstp), cpumask_bits(src1p),
- cpumask_bits(src2p), nr_cpumask_bits);
+ cpumask_bits(src2p), small_cpumask_bits);
+}
+
+/**
+ * cpumask_weighted_or - *dstp = *src1p | *src2p and return the weight of the result
+ * @dstp: the cpumask result
+ * @src1p: the first input
+ * @src2p: the second input
+ *
+ * Return: The number of bits set in the resulting cpumask @dstp
+ */
+static __always_inline
+unsigned int cpumask_weighted_or(struct cpumask *dstp, const struct cpumask *src1p,
+ const struct cpumask *src2p)
+{
+ return bitmap_weighted_or(cpumask_bits(dstp), cpumask_bits(src1p),
+ cpumask_bits(src2p), small_cpumask_bits);
}
/**
@@ -407,12 +753,12 @@ static inline void cpumask_or(struct cpumask *dstp, const struct cpumask *src1p,
* @src1p: the first input
* @src2p: the second input
*/
-static inline void cpumask_xor(struct cpumask *dstp,
- const struct cpumask *src1p,
- const struct cpumask *src2p)
+static __always_inline
+void cpumask_xor(struct cpumask *dstp, const struct cpumask *src1p,
+ const struct cpumask *src2p)
{
bitmap_xor(cpumask_bits(dstp), cpumask_bits(src1p),
- cpumask_bits(src2p), nr_cpumask_bits);
+ cpumask_bits(src2p), small_cpumask_bits);
}
/**
@@ -421,50 +767,60 @@ static inline void cpumask_xor(struct cpumask *dstp,
* @src1p: the first input
* @src2p: the second input
*
- * If *@dstp is empty, returns 0, else returns 1
+ * Return: false if *@dstp is empty, else returns true
*/
-static inline int cpumask_andnot(struct cpumask *dstp,
- const struct cpumask *src1p,
- const struct cpumask *src2p)
+static __always_inline
+bool cpumask_andnot(struct cpumask *dstp, const struct cpumask *src1p,
+ const struct cpumask *src2p)
{
return bitmap_andnot(cpumask_bits(dstp), cpumask_bits(src1p),
- cpumask_bits(src2p), nr_cpumask_bits);
+ cpumask_bits(src2p), small_cpumask_bits);
}
/**
- * cpumask_complement - *dstp = ~*srcp
- * @dstp: the cpumask result
- * @srcp: the input to invert
+ * cpumask_equal - *src1p == *src2p
+ * @src1p: the first input
+ * @src2p: the second input
+ *
+ * Return: true if the cpumasks are equal, false if not
*/
-static inline void cpumask_complement(struct cpumask *dstp,
- const struct cpumask *srcp)
+static __always_inline
+bool cpumask_equal(const struct cpumask *src1p, const struct cpumask *src2p)
{
- bitmap_complement(cpumask_bits(dstp), cpumask_bits(srcp),
- nr_cpumask_bits);
+ return bitmap_equal(cpumask_bits(src1p), cpumask_bits(src2p),
+ small_cpumask_bits);
}
/**
- * cpumask_equal - *src1p == *src2p
+ * cpumask_or_equal - *src1p | *src2p == *src3p
* @src1p: the first input
* @src2p: the second input
+ * @src3p: the third input
+ *
+ * Return: true if first cpumask ORed with second cpumask == third cpumask,
+ * otherwise false
*/
-static inline bool cpumask_equal(const struct cpumask *src1p,
- const struct cpumask *src2p)
+static __always_inline
+bool cpumask_or_equal(const struct cpumask *src1p, const struct cpumask *src2p,
+ const struct cpumask *src3p)
{
- return bitmap_equal(cpumask_bits(src1p), cpumask_bits(src2p),
- nr_cpumask_bits);
+ return bitmap_or_equal(cpumask_bits(src1p), cpumask_bits(src2p),
+ cpumask_bits(src3p), small_cpumask_bits);
}
/**
* cpumask_intersects - (*src1p & *src2p) != 0
* @src1p: the first input
* @src2p: the second input
+ *
+ * Return: true if first cpumask ANDed with second cpumask is non-empty,
+ * otherwise false
*/
-static inline bool cpumask_intersects(const struct cpumask *src1p,
- const struct cpumask *src2p)
+static __always_inline
+bool cpumask_intersects(const struct cpumask *src1p, const struct cpumask *src2p)
{
return bitmap_intersects(cpumask_bits(src1p), cpumask_bits(src2p),
- nr_cpumask_bits);
+ small_cpumask_bits);
}
/**
@@ -472,29 +828,33 @@ static inline bool cpumask_intersects(const struct cpumask *src1p,
* @src1p: the first input
* @src2p: the second input
*
- * Returns 1 if *@src1p is a subset of *@src2p, else returns 0
+ * Return: true if *@src1p is a subset of *@src2p, else returns false
*/
-static inline int cpumask_subset(const struct cpumask *src1p,
- const struct cpumask *src2p)
+static __always_inline
+bool cpumask_subset(const struct cpumask *src1p, const struct cpumask *src2p)
{
return bitmap_subset(cpumask_bits(src1p), cpumask_bits(src2p),
- nr_cpumask_bits);
+ small_cpumask_bits);
}
/**
* cpumask_empty - *srcp == 0
* @srcp: the cpumask to that all cpus < nr_cpu_ids are clear.
+ *
+ * Return: true if srcp is empty (has no bits set), else false
*/
-static inline bool cpumask_empty(const struct cpumask *srcp)
+static __always_inline bool cpumask_empty(const struct cpumask *srcp)
{
- return bitmap_empty(cpumask_bits(srcp), nr_cpumask_bits);
+ return bitmap_empty(cpumask_bits(srcp), small_cpumask_bits);
}
/**
* cpumask_full - *srcp == 0xFFFFFFFF...
* @srcp: the cpumask to that all cpus < nr_cpu_ids are set.
+ *
+ * Return: true if srcp is full (has all bits set), else false
*/
-static inline bool cpumask_full(const struct cpumask *srcp)
+static __always_inline bool cpumask_full(const struct cpumask *srcp)
{
return bitmap_full(cpumask_bits(srcp), nr_cpumask_bits);
}
@@ -502,10 +862,39 @@ static inline bool cpumask_full(const struct cpumask *srcp)
/**
* cpumask_weight - Count of bits in *srcp
* @srcp: the cpumask to count bits (< nr_cpu_ids) in.
+ *
+ * Return: count of bits set in *srcp
+ */
+static __always_inline unsigned int cpumask_weight(const struct cpumask *srcp)
+{
+ return bitmap_weight(cpumask_bits(srcp), small_cpumask_bits);
+}
+
+/**
+ * cpumask_weight_and - Count of bits in (*srcp1 & *srcp2)
+ * @srcp1: the cpumask to count bits (< nr_cpu_ids) in.
+ * @srcp2: the cpumask to count bits (< nr_cpu_ids) in.
+ *
+ * Return: count of bits set in both *srcp1 and *srcp2
*/
-static inline unsigned int cpumask_weight(const struct cpumask *srcp)
+static __always_inline
+unsigned int cpumask_weight_and(const struct cpumask *srcp1, const struct cpumask *srcp2)
{
- return bitmap_weight(cpumask_bits(srcp), nr_cpumask_bits);
+ return bitmap_weight_and(cpumask_bits(srcp1), cpumask_bits(srcp2), small_cpumask_bits);
+}
+
+/**
+ * cpumask_weight_andnot - Count of bits in (*srcp1 & ~*srcp2)
+ * @srcp1: the cpumask to count bits (< nr_cpu_ids) in.
+ * @srcp2: the cpumask to count bits (< nr_cpu_ids) in.
+ *
+ * Return: count of bits set in both *srcp1 and *srcp2
+ */
+static __always_inline
+unsigned int cpumask_weight_andnot(const struct cpumask *srcp1,
+ const struct cpumask *srcp2)
+{
+ return bitmap_weight_andnot(cpumask_bits(srcp1), cpumask_bits(srcp2), small_cpumask_bits);
}
/**
@@ -514,11 +903,11 @@ static inline unsigned int cpumask_weight(const struct cpumask *srcp)
* @srcp: the input to shift
* @n: the number of bits to shift by
*/
-static inline void cpumask_shift_right(struct cpumask *dstp,
- const struct cpumask *srcp, int n)
+static __always_inline
+void cpumask_shift_right(struct cpumask *dstp, const struct cpumask *srcp, int n)
{
bitmap_shift_right(cpumask_bits(dstp), cpumask_bits(srcp), n,
- nr_cpumask_bits);
+ small_cpumask_bits);
}
/**
@@ -527,8 +916,8 @@ static inline void cpumask_shift_right(struct cpumask *dstp,
* @srcp: the input to shift
* @n: the number of bits to shift by
*/
-static inline void cpumask_shift_left(struct cpumask *dstp,
- const struct cpumask *srcp, int n)
+static __always_inline
+void cpumask_shift_left(struct cpumask *dstp, const struct cpumask *srcp, int n)
{
bitmap_shift_left(cpumask_bits(dstp), cpumask_bits(srcp), n,
nr_cpumask_bits);
@@ -539,35 +928,26 @@ static inline void cpumask_shift_left(struct cpumask *dstp,
* @dstp: the result
* @srcp: the input cpumask
*/
-static inline void cpumask_copy(struct cpumask *dstp,
- const struct cpumask *srcp)
+static __always_inline
+void cpumask_copy(struct cpumask *dstp, const struct cpumask *srcp)
{
- bitmap_copy(cpumask_bits(dstp), cpumask_bits(srcp), nr_cpumask_bits);
+ bitmap_copy(cpumask_bits(dstp), cpumask_bits(srcp), large_cpumask_bits);
}
/**
- * cpumask_any - pick a "random" cpu from *srcp
+ * cpumask_any - pick an arbitrary cpu from *srcp
* @srcp: the input cpumask
*
- * Returns >= nr_cpu_ids if no cpus set.
+ * Return: >= nr_cpu_ids if no cpus set.
*/
#define cpumask_any(srcp) cpumask_first(srcp)
/**
- * cpumask_first_and - return the first cpu from *srcp1 & *srcp2
- * @src1p: the first input
- * @src2p: the second input
- *
- * Returns >= nr_cpu_ids if no cpus set in both. See also cpumask_next_and().
- */
-#define cpumask_first_and(src1p, src2p) cpumask_next_and(-1, (src1p), (src2p))
-
-/**
- * cpumask_any_and - pick a "random" cpu from *mask1 & *mask2
+ * cpumask_any_and - pick an arbitrary cpu from *mask1 & *mask2
* @mask1: the first input cpumask
* @mask2: the second input cpumask
*
- * Returns >= nr_cpu_ids if no cpus set.
+ * Return: >= nr_cpu_ids if no cpus set.
*/
#define cpumask_any_and(mask1, mask2) cpumask_first_and((mask1), (mask2))
@@ -583,10 +963,10 @@ static inline void cpumask_copy(struct cpumask *dstp,
* @len: the length of the buffer
* @dstp: the cpumask to set.
*
- * Returns -errno, or 0 for success.
+ * Return: -errno, or 0 for success.
*/
-static inline int cpumask_parse_user(const char __user *buf, int len,
- struct cpumask *dstp)
+static __always_inline
+int cpumask_parse_user(const char __user *buf, int len, struct cpumask *dstp)
{
return bitmap_parse_user(buf, len, cpumask_bits(dstp), nr_cpumask_bits);
}
@@ -597,10 +977,10 @@ static inline int cpumask_parse_user(const char __user *buf, int len,
* @len: the length of the buffer
* @dstp: the cpumask to set.
*
- * Returns -errno, or 0 for success.
+ * Return: -errno, or 0 for success.
*/
-static inline int cpumask_parselist_user(const char __user *buf, int len,
- struct cpumask *dstp)
+static __always_inline
+int cpumask_parselist_user(const char __user *buf, int len, struct cpumask *dstp)
{
return bitmap_parselist_user(buf, len, cpumask_bits(dstp),
nr_cpumask_bits);
@@ -611,14 +991,11 @@ static inline int cpumask_parselist_user(const char __user *buf, int len,
* @buf: the buffer to extract from
* @dstp: the cpumask to set.
*
- * Returns -errno, or 0 for success.
+ * Return: -errno, or 0 for success.
*/
-static inline int cpumask_parse(const char *buf, struct cpumask *dstp)
+static __always_inline int cpumask_parse(const char *buf, struct cpumask *dstp)
{
- char *nl = strchr(buf, '\n');
- unsigned int len = nl ? (unsigned int)(nl - buf) : strlen(buf);
-
- return bitmap_parse(buf, len, cpumask_bits(dstp), nr_cpumask_bits);
+ return bitmap_parse(buf, UINT_MAX, cpumask_bits(dstp), nr_cpumask_bits);
}
/**
@@ -626,128 +1003,120 @@ static inline int cpumask_parse(const char *buf, struct cpumask *dstp)
* @buf: the buffer to extract from
* @dstp: the cpumask to set.
*
- * Returns -errno, or 0 for success.
+ * Return: -errno, or 0 for success.
*/
-static inline int cpulist_parse(const char *buf, struct cpumask *dstp)
+static __always_inline int cpulist_parse(const char *buf, struct cpumask *dstp)
{
return bitmap_parselist(buf, cpumask_bits(dstp), nr_cpumask_bits);
}
/**
- * cpumask_size - size to allocate for a 'struct cpumask' in bytes
+ * cpumask_size - calculate size to allocate for a 'struct cpumask' in bytes
+ *
+ * Return: size to allocate for a &struct cpumask in bytes
*/
-static inline size_t cpumask_size(void)
+static __always_inline unsigned int cpumask_size(void)
{
- return BITS_TO_LONGS(nr_cpumask_bits) * sizeof(long);
+ return bitmap_size(large_cpumask_bits);
}
-/*
- * cpumask_var_t: struct cpumask for stack usage.
- *
- * Oh, the wicked games we play! In order to make kernel coding a
- * little more difficult, we typedef cpumask_var_t to an array or a
- * pointer: doing &mask on an array is a noop, so it still works.
- *
- * ie.
- * cpumask_var_t tmpmask;
- * if (!alloc_cpumask_var(&tmpmask, GFP_KERNEL))
- * return -ENOMEM;
- *
- * ... use 'tmpmask' like a normal struct cpumask * ...
- *
- * free_cpumask_var(tmpmask);
- *
- *
- * However, one notable exception is there. alloc_cpumask_var() allocates
- * only nr_cpumask_bits bits (in the other hand, real cpumask_t always has
- * NR_CPUS bits). Therefore you don't have to dereference cpumask_var_t.
- *
- * cpumask_var_t tmpmask;
- * if (!alloc_cpumask_var(&tmpmask, GFP_KERNEL))
- * return -ENOMEM;
- *
- * var = *tmpmask;
- *
- * This code makes NR_CPUS length memcopy and brings to a memory corruption.
- * cpumask_copy() provide safe copy functionality.
- *
- * Note that there is another evil here: If you define a cpumask_var_t
- * as a percpu variable then the way to obtain the address of the cpumask
- * structure differently influences what this_cpu_* operation needs to be
- * used. Please use this_cpu_cpumask_var_t in those cases. The direct use
- * of this_cpu_ptr() or this_cpu_read() will lead to failures when the
- * other type of cpumask_var_t implementation is configured.
- *
- * Please also note that __cpumask_var_read_mostly can be used to declare
- * a cpumask_var_t variable itself (not its content) as read mostly.
- */
#ifdef CONFIG_CPUMASK_OFFSTACK
-typedef struct cpumask *cpumask_var_t;
#define this_cpu_cpumask_var_ptr(x) this_cpu_read(x)
#define __cpumask_var_read_mostly __read_mostly
+#define CPUMASK_VAR_NULL NULL
bool alloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, int node);
-bool alloc_cpumask_var(cpumask_var_t *mask, gfp_t flags);
-bool zalloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, int node);
-bool zalloc_cpumask_var(cpumask_var_t *mask, gfp_t flags);
+
+static __always_inline
+bool zalloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, int node)
+{
+ return alloc_cpumask_var_node(mask, flags | __GFP_ZERO, node);
+}
+
+/**
+ * alloc_cpumask_var - allocate a struct cpumask
+ * @mask: pointer to cpumask_var_t where the cpumask is returned
+ * @flags: GFP_ flags
+ *
+ * Only defined when CONFIG_CPUMASK_OFFSTACK=y, otherwise is
+ * a nop returning a constant 1 (in <linux/cpumask.h>).
+ *
+ * See alloc_cpumask_var_node.
+ *
+ * Return: %true if allocation succeeded, %false if not
+ */
+static __always_inline
+bool alloc_cpumask_var(cpumask_var_t *mask, gfp_t flags)
+{
+ return alloc_cpumask_var_node(mask, flags, NUMA_NO_NODE);
+}
+
+static __always_inline
+bool zalloc_cpumask_var(cpumask_var_t *mask, gfp_t flags)
+{
+ return alloc_cpumask_var(mask, flags | __GFP_ZERO);
+}
+
void alloc_bootmem_cpumask_var(cpumask_var_t *mask);
void free_cpumask_var(cpumask_var_t mask);
void free_bootmem_cpumask_var(cpumask_var_t mask);
-static inline bool cpumask_available(cpumask_var_t mask)
+static __always_inline bool cpumask_available(cpumask_var_t mask)
{
return mask != NULL;
}
#else
-typedef struct cpumask cpumask_var_t[1];
#define this_cpu_cpumask_var_ptr(x) this_cpu_ptr(x)
#define __cpumask_var_read_mostly
+#define CPUMASK_VAR_NULL {}
-static inline bool alloc_cpumask_var(cpumask_var_t *mask, gfp_t flags)
+static __always_inline bool alloc_cpumask_var(cpumask_var_t *mask, gfp_t flags)
{
return true;
}
-static inline bool alloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags,
+static __always_inline bool alloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags,
int node)
{
return true;
}
-static inline bool zalloc_cpumask_var(cpumask_var_t *mask, gfp_t flags)
+static __always_inline bool zalloc_cpumask_var(cpumask_var_t *mask, gfp_t flags)
{
cpumask_clear(*mask);
return true;
}
-static inline bool zalloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags,
+static __always_inline bool zalloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags,
int node)
{
cpumask_clear(*mask);
return true;
}
-static inline void alloc_bootmem_cpumask_var(cpumask_var_t *mask)
+static __always_inline void alloc_bootmem_cpumask_var(cpumask_var_t *mask)
{
}
-static inline void free_cpumask_var(cpumask_var_t mask)
+static __always_inline void free_cpumask_var(cpumask_var_t mask)
{
}
-static inline void free_bootmem_cpumask_var(cpumask_var_t mask)
+static __always_inline void free_bootmem_cpumask_var(cpumask_var_t mask)
{
}
-static inline bool cpumask_available(cpumask_var_t mask)
+static __always_inline bool cpumask_available(cpumask_var_t mask)
{
return true;
}
#endif /* CONFIG_CPUMASK_OFFSTACK */
+DEFINE_FREE(free_cpumask_var, struct cpumask *, if (_T) free_cpumask_var(_T));
+
/* It's common to want to use cpu_all_mask in struct member initializers,
* so it has to refer to an address rather than a pointer. */
extern const DECLARE_BITMAP(cpu_all_bits, NR_CPUS);
@@ -756,59 +1125,48 @@ extern const DECLARE_BITMAP(cpu_all_bits, NR_CPUS);
/* First bits of cpu_bit_bitmap are in fact unset. */
#define cpu_none_mask to_cpumask(cpu_bit_bitmap[0])
+#if NR_CPUS == 1
+/* Uniprocessor: the possible/online/present masks are always "1" */
+#define for_each_possible_cpu(cpu) for ((cpu) = 0; (cpu) < 1; (cpu)++)
+#define for_each_online_cpu(cpu) for ((cpu) = 0; (cpu) < 1; (cpu)++)
+#define for_each_present_cpu(cpu) for ((cpu) = 0; (cpu) < 1; (cpu)++)
+
+#define for_each_possible_cpu_wrap(cpu, start) \
+ for ((void)(start), (cpu) = 0; (cpu) < 1; (cpu)++)
+#define for_each_online_cpu_wrap(cpu, start) \
+ for ((void)(start), (cpu) = 0; (cpu) < 1; (cpu)++)
+#else
#define for_each_possible_cpu(cpu) for_each_cpu((cpu), cpu_possible_mask)
#define for_each_online_cpu(cpu) for_each_cpu((cpu), cpu_online_mask)
+#define for_each_enabled_cpu(cpu) for_each_cpu((cpu), cpu_enabled_mask)
#define for_each_present_cpu(cpu) for_each_cpu((cpu), cpu_present_mask)
+#define for_each_possible_cpu_wrap(cpu, start) \
+ for_each_cpu_wrap((cpu), cpu_possible_mask, (start))
+#define for_each_online_cpu_wrap(cpu, start) \
+ for_each_cpu_wrap((cpu), cpu_online_mask, (start))
+#endif
+
/* Wrappers for arch boot code to manipulate normally-constant masks */
void init_cpu_present(const struct cpumask *src);
void init_cpu_possible(const struct cpumask *src);
-void init_cpu_online(const struct cpumask *src);
-
-static inline void reset_cpu_possible_mask(void)
-{
- bitmap_zero(cpumask_bits(&__cpu_possible_mask), NR_CPUS);
-}
-
-static inline void
-set_cpu_possible(unsigned int cpu, bool possible)
-{
- if (possible)
- cpumask_set_cpu(cpu, &__cpu_possible_mask);
- else
- cpumask_clear_cpu(cpu, &__cpu_possible_mask);
-}
-static inline void
-set_cpu_present(unsigned int cpu, bool present)
-{
- if (present)
- cpumask_set_cpu(cpu, &__cpu_present_mask);
- else
- cpumask_clear_cpu(cpu, &__cpu_present_mask);
-}
+#define assign_cpu(cpu, mask, val) \
+ assign_bit(cpumask_check(cpu), cpumask_bits(mask), (val))
-static inline void
-set_cpu_online(unsigned int cpu, bool online)
-{
- if (online)
- cpumask_set_cpu(cpu, &__cpu_online_mask);
- else
- cpumask_clear_cpu(cpu, &__cpu_online_mask);
-}
+#define __assign_cpu(cpu, mask, val) \
+ __assign_bit(cpumask_check(cpu), cpumask_bits(mask), (val))
-static inline void
-set_cpu_active(unsigned int cpu, bool active)
-{
- if (active)
- cpumask_set_cpu(cpu, &__cpu_active_mask);
- else
- cpumask_clear_cpu(cpu, &__cpu_active_mask);
-}
+#define set_cpu_enabled(cpu, enabled) assign_cpu((cpu), &__cpu_enabled_mask, (enabled))
+#define set_cpu_present(cpu, present) assign_cpu((cpu), &__cpu_present_mask, (present))
+#define set_cpu_active(cpu, active) assign_cpu((cpu), &__cpu_active_mask, (active))
+#define set_cpu_dying(cpu, dying) assign_cpu((cpu), &__cpu_dying_mask, (dying))
+void set_cpu_online(unsigned int cpu, bool online);
+void set_cpu_possible(unsigned int cpu, bool possible);
/**
- * to_cpumask - convert an NR_CPUS bitmap to a struct cpumask *
+ * to_cpumask - convert a NR_CPUS bitmap to a struct cpumask *
* @bitmap: the bitmap
*
* There are a few places where cpumask_var_t isn't appropriate and
@@ -821,7 +1179,7 @@ set_cpu_active(unsigned int cpu, bool active)
((struct cpumask *)(1 ? (bitmap) \
: (void *)sizeof(__check_is_bitmap(bitmap))))
-static inline int __check_is_bitmap(const unsigned long *bitmap)
+static __always_inline int __check_is_bitmap(const unsigned long *bitmap)
{
return 1;
}
@@ -836,13 +1194,108 @@ static inline int __check_is_bitmap(const unsigned long *bitmap)
extern const unsigned long
cpu_bit_bitmap[BITS_PER_LONG+1][BITS_TO_LONGS(NR_CPUS)];
-static inline const struct cpumask *get_cpu_mask(unsigned int cpu)
+static __always_inline const struct cpumask *get_cpu_mask(unsigned int cpu)
{
const unsigned long *p = cpu_bit_bitmap[1 + cpu % BITS_PER_LONG];
p -= cpu / BITS_PER_LONG;
return to_cpumask(p);
}
+#if NR_CPUS > 1
+/**
+ * num_online_cpus() - Read the number of online CPUs
+ *
+ * Despite the fact that __num_online_cpus is of type atomic_t, this
+ * interface gives only a momentary snapshot and is not protected against
+ * concurrent CPU hotplug operations unless invoked from a cpuhp_lock held
+ * region.
+ *
+ * Return: momentary snapshot of the number of online CPUs
+ */
+static __always_inline unsigned int num_online_cpus(void)
+{
+ return raw_atomic_read(&__num_online_cpus);
+}
+
+static __always_inline unsigned int num_possible_cpus(void)
+{
+ return __num_possible_cpus;
+}
+
+#define num_enabled_cpus() cpumask_weight(cpu_enabled_mask)
+#define num_present_cpus() cpumask_weight(cpu_present_mask)
+#define num_active_cpus() cpumask_weight(cpu_active_mask)
+
+static __always_inline bool cpu_online(unsigned int cpu)
+{
+ return cpumask_test_cpu(cpu, cpu_online_mask);
+}
+
+static __always_inline bool cpu_enabled(unsigned int cpu)
+{
+ return cpumask_test_cpu(cpu, cpu_enabled_mask);
+}
+
+static __always_inline bool cpu_possible(unsigned int cpu)
+{
+ return cpumask_test_cpu(cpu, cpu_possible_mask);
+}
+
+static __always_inline bool cpu_present(unsigned int cpu)
+{
+ return cpumask_test_cpu(cpu, cpu_present_mask);
+}
+
+static __always_inline bool cpu_active(unsigned int cpu)
+{
+ return cpumask_test_cpu(cpu, cpu_active_mask);
+}
+
+static __always_inline bool cpu_dying(unsigned int cpu)
+{
+ return cpumask_test_cpu(cpu, cpu_dying_mask);
+}
+
+#else
+
+#define num_online_cpus() 1U
+#define num_possible_cpus() 1U
+#define num_enabled_cpus() 1U
+#define num_present_cpus() 1U
+#define num_active_cpus() 1U
+
+static __always_inline bool cpu_online(unsigned int cpu)
+{
+ return cpu == 0;
+}
+
+static __always_inline bool cpu_possible(unsigned int cpu)
+{
+ return cpu == 0;
+}
+
+static __always_inline bool cpu_enabled(unsigned int cpu)
+{
+ return cpu == 0;
+}
+
+static __always_inline bool cpu_present(unsigned int cpu)
+{
+ return cpu == 0;
+}
+
+static __always_inline bool cpu_active(unsigned int cpu)
+{
+ return cpu == 0;
+}
+
+static __always_inline bool cpu_dying(unsigned int cpu)
+{
+ return false;
+}
+
+#endif /* NR_CPUS > 1 */
+
#define cpu_is_offline(cpu) unlikely(!cpu_online(cpu))
#if NR_CPUS <= BITS_PER_LONG
@@ -867,16 +1320,62 @@ static inline const struct cpumask *get_cpu_mask(unsigned int cpu)
* @mask: the cpumask to copy
* @buf: the buffer to copy into
*
- * Returns the length of the (null-terminated) @buf string, zero if
+ * Return: the length of the (null-terminated) @buf string, zero if
* nothing is copied.
*/
-static inline ssize_t
+static __always_inline ssize_t
cpumap_print_to_pagebuf(bool list, char *buf, const struct cpumask *mask)
{
return bitmap_print_to_pagebuf(list, buf, cpumask_bits(mask),
nr_cpu_ids);
}
+/**
+ * cpumap_print_bitmask_to_buf - copies the cpumask into the buffer as
+ * hex values of cpumask
+ *
+ * @buf: the buffer to copy into
+ * @mask: the cpumask to copy
+ * @off: in the string from which we are copying, we copy to @buf
+ * @count: the maximum number of bytes to print
+ *
+ * The function prints the cpumask into the buffer as hex values of
+ * cpumask; Typically used by bin_attribute to export cpumask bitmask
+ * ABI.
+ *
+ * Return: the length of how many bytes have been copied, excluding
+ * terminating '\0'.
+ */
+static __always_inline
+ssize_t cpumap_print_bitmask_to_buf(char *buf, const struct cpumask *mask,
+ loff_t off, size_t count)
+{
+ return bitmap_print_bitmask_to_buf(buf, cpumask_bits(mask),
+ nr_cpu_ids, off, count) - 1;
+}
+
+/**
+ * cpumap_print_list_to_buf - copies the cpumask into the buffer as
+ * comma-separated list of cpus
+ * @buf: the buffer to copy into
+ * @mask: the cpumask to copy
+ * @off: in the string from which we are copying, we copy to @buf
+ * @count: the maximum number of bytes to print
+ *
+ * Everything is same with the above cpumap_print_bitmask_to_buf()
+ * except the print format.
+ *
+ * Return: the length of how many bytes have been copied, excluding
+ * terminating '\0'.
+ */
+static __always_inline
+ssize_t cpumap_print_list_to_buf(char *buf, const struct cpumask *mask,
+ loff_t off, size_t count)
+{
+ return bitmap_print_list_to_buf(buf, cpumask_bits(mask),
+ nr_cpu_ids, off, count) - 1;
+}
+
#if NR_CPUS <= BITS_PER_LONG
#define CPU_MASK_ALL \
(cpumask_t) { { \
@@ -900,4 +1399,23 @@ cpumap_print_to_pagebuf(bool list, char *buf, const struct cpumask *mask)
[0] = 1UL \
} }
+/*
+ * Provide a valid theoretical max size for cpumap and cpulist sysfs files
+ * to avoid breaking userspace which may allocate a buffer based on the size
+ * reported by e.g. fstat.
+ *
+ * for cpumap NR_CPUS * 9/32 - 1 should be an exact length.
+ *
+ * For cpulist 7 is (ceil(log10(NR_CPUS)) + 1) allowing for NR_CPUS to be up
+ * to 2 orders of magnitude larger than 8192. And then we divide by 2 to
+ * cover a worst-case of every other cpu being on one of two nodes for a
+ * very large NR_CPUS.
+ *
+ * Use PAGE_SIZE as a minimum for smaller configurations while avoiding
+ * unsigned comparison to -1.
+ */
+#define CPUMAP_FILE_MAX_BYTES (((NR_CPUS * 9)/32 > PAGE_SIZE) \
+ ? (NR_CPUS * 9)/32 - 1 : PAGE_SIZE)
+#define CPULIST_FILE_MAX_BYTES (((NR_CPUS * 7)/2 > PAGE_SIZE) ? (NR_CPUS * 7)/2 : PAGE_SIZE)
+
#endif /* __LINUX_CPUMASK_H */
diff --git a/include/linux/cpumask_api.h b/include/linux/cpumask_api.h
new file mode 100644
index 000000000000..83bd3ebe82b0
--- /dev/null
+++ b/include/linux/cpumask_api.h
@@ -0,0 +1 @@
+#include <linux/cpumask.h>
diff --git a/include/linux/cpumask_types.h b/include/linux/cpumask_types.h
new file mode 100644
index 000000000000..461ed1b6bcdb
--- /dev/null
+++ b/include/linux/cpumask_types.h
@@ -0,0 +1,66 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __LINUX_CPUMASK_TYPES_H
+#define __LINUX_CPUMASK_TYPES_H
+
+#include <linux/bitops.h>
+#include <linux/threads.h>
+
+/* Don't assign or return these: may not be this big! */
+typedef struct cpumask { DECLARE_BITMAP(bits, NR_CPUS); } cpumask_t;
+
+/**
+ * cpumask_bits - get the bits in a cpumask
+ * @maskp: the struct cpumask *
+ *
+ * You should only assume nr_cpu_ids bits of this mask are valid. This is
+ * a macro so it's const-correct.
+ */
+#define cpumask_bits(maskp) ((maskp)->bits)
+
+/*
+ * cpumask_var_t: struct cpumask for stack usage.
+ *
+ * Oh, the wicked games we play! In order to make kernel coding a
+ * little more difficult, we typedef cpumask_var_t to an array or a
+ * pointer: doing &mask on an array is a noop, so it still works.
+ *
+ * i.e.
+ * cpumask_var_t tmpmask;
+ * if (!alloc_cpumask_var(&tmpmask, GFP_KERNEL))
+ * return -ENOMEM;
+ *
+ * ... use 'tmpmask' like a normal struct cpumask * ...
+ *
+ * free_cpumask_var(tmpmask);
+ *
+ *
+ * However, one notable exception is there. alloc_cpumask_var() allocates
+ * only nr_cpumask_bits bits (in the other hand, real cpumask_t always has
+ * NR_CPUS bits). Therefore you don't have to dereference cpumask_var_t.
+ *
+ * cpumask_var_t tmpmask;
+ * if (!alloc_cpumask_var(&tmpmask, GFP_KERNEL))
+ * return -ENOMEM;
+ *
+ * var = *tmpmask;
+ *
+ * This code makes NR_CPUS length memcopy and brings to a memory corruption.
+ * cpumask_copy() provide safe copy functionality.
+ *
+ * Note that there is another evil here: If you define a cpumask_var_t
+ * as a percpu variable then the way to obtain the address of the cpumask
+ * structure differently influences what this_cpu_* operation needs to be
+ * used. Please use this_cpu_cpumask_var_t in those cases. The direct use
+ * of this_cpu_ptr() or this_cpu_read() will lead to failures when the
+ * other type of cpumask_var_t implementation is configured.
+ *
+ * Please also note that __cpumask_var_read_mostly can be used to declare
+ * a cpumask_var_t variable itself (not its content) as read mostly.
+ */
+#ifdef CONFIG_CPUMASK_OFFSTACK
+typedef struct cpumask *cpumask_var_t;
+#else
+typedef struct cpumask cpumask_var_t[1];
+#endif /* CONFIG_CPUMASK_OFFSTACK */
+
+#endif /* __LINUX_CPUMASK_TYPES_H */
diff --git a/include/linux/cpuset.h b/include/linux/cpuset.h
index e74655d941b7..a98d3330385c 100644
--- a/include/linux/cpuset.h
+++ b/include/linux/cpuset.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_CPUSET_H
#define _LINUX_CPUSET_H
/*
@@ -14,6 +15,7 @@
#include <linux/cpumask.h>
#include <linux/nodemask.h>
#include <linux/mm.h>
+#include <linux/mmu_context.h>
#include <linux/jump_label.h>
#ifdef CONFIG_CPUSETS
@@ -32,6 +34,8 @@
*/
extern struct static_key_false cpusets_pre_enable_key;
extern struct static_key_false cpusets_enabled_key;
+extern struct static_key_false cpusets_insane_config_key;
+
static inline bool cpusets_enabled(void)
{
return static_branch_unlikely(&cpusets_enabled_key);
@@ -39,38 +43,51 @@ static inline bool cpusets_enabled(void)
static inline void cpuset_inc(void)
{
- static_branch_inc(&cpusets_pre_enable_key);
- static_branch_inc(&cpusets_enabled_key);
+ static_branch_inc_cpuslocked(&cpusets_pre_enable_key);
+ static_branch_inc_cpuslocked(&cpusets_enabled_key);
}
static inline void cpuset_dec(void)
{
- static_branch_dec(&cpusets_enabled_key);
- static_branch_dec(&cpusets_pre_enable_key);
+ static_branch_dec_cpuslocked(&cpusets_enabled_key);
+ static_branch_dec_cpuslocked(&cpusets_pre_enable_key);
+}
+
+/*
+ * This will get enabled whenever a cpuset configuration is considered
+ * unsupportable in general. E.g. movable only node which cannot satisfy
+ * any non movable allocations (see update_nodemask). Page allocator
+ * needs to make additional checks for those configurations and this
+ * check is meant to guard those checks without any overhead for sane
+ * configurations.
+ */
+static inline bool cpusets_insane_config(void)
+{
+ return static_branch_unlikely(&cpusets_insane_config_key);
}
extern int cpuset_init(void);
extern void cpuset_init_smp(void);
+extern void cpuset_force_rebuild(void);
extern void cpuset_update_active_cpus(void);
+extern void inc_dl_tasks_cs(struct task_struct *task);
+extern void dec_dl_tasks_cs(struct task_struct *task);
+extern void cpuset_lock(void);
+extern void cpuset_unlock(void);
+extern void cpuset_cpus_allowed_locked(struct task_struct *p, struct cpumask *mask);
extern void cpuset_cpus_allowed(struct task_struct *p, struct cpumask *mask);
-extern void cpuset_cpus_allowed_fallback(struct task_struct *p);
+extern bool cpuset_cpus_allowed_fallback(struct task_struct *p);
+extern bool cpuset_cpu_is_isolated(int cpu);
extern nodemask_t cpuset_mems_allowed(struct task_struct *p);
#define cpuset_current_mems_allowed (current->mems_allowed)
void cpuset_init_current_mems_allowed(void);
int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask);
-extern bool __cpuset_node_allowed(int node, gfp_t gfp_mask);
-
-static inline bool cpuset_node_allowed(int node, gfp_t gfp_mask)
-{
- if (cpusets_enabled())
- return __cpuset_node_allowed(node, gfp_mask);
- return true;
-}
+extern bool cpuset_current_node_allowed(int node, gfp_t gfp_mask);
static inline bool __cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask)
{
- return __cpuset_node_allowed(zone_to_nid(z), gfp_mask);
+ return cpuset_current_node_allowed(zone_to_nid(z), gfp_mask);
}
static inline bool cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask)
@@ -83,6 +100,7 @@ static inline bool cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask)
extern int cpuset_mems_allowed_intersects(const struct task_struct *tsk1,
const struct task_struct *tsk2);
+#ifdef CONFIG_CPUSETS_V1
#define cpuset_memory_pressure_bump() \
do { \
if (cpuset_memory_pressure_enabled) \
@@ -90,6 +108,9 @@ extern int cpuset_mems_allowed_intersects(const struct task_struct *tsk1,
} while (0)
extern int cpuset_memory_pressure_enabled;
extern void __cpuset_memory_pressure_bump(void);
+#else
+static inline void cpuset_memory_pressure_bump(void) { }
+#endif
extern void cpuset_task_status_allowed(struct seq_file *m,
struct task_struct *task);
@@ -97,23 +118,19 @@ extern int proc_cpuset_show(struct seq_file *m, struct pid_namespace *ns,
struct pid *pid, struct task_struct *tsk);
extern int cpuset_mem_spread_node(void);
-extern int cpuset_slab_spread_node(void);
static inline int cpuset_do_page_mem_spread(void)
{
return task_spread_page(current);
}
-static inline int cpuset_do_slab_mem_spread(void)
-{
- return task_spread_slab(current);
-}
-
-extern int current_cpuset_is_being_rebound(void);
+extern bool current_cpuset_is_being_rebound(void);
+extern void dl_rebuild_rd_accounting(void);
extern void rebuild_sched_domains(void);
extern void cpuset_print_current_mems_allowed(void);
+extern void cpuset_reset_sched_domains(void);
/*
* read_mems_allowed_begin is required when making decisions involving
@@ -157,26 +174,48 @@ static inline void set_mems_allowed(nodemask_t nodemask)
task_unlock(current);
}
+extern bool cpuset_node_allowed(struct cgroup *cgroup, int nid);
#else /* !CONFIG_CPUSETS */
static inline bool cpusets_enabled(void) { return false; }
+static inline bool cpusets_insane_config(void) { return false; }
+
static inline int cpuset_init(void) { return 0; }
static inline void cpuset_init_smp(void) {}
+static inline void cpuset_force_rebuild(void) { }
+
static inline void cpuset_update_active_cpus(void)
{
partition_sched_domains(1, NULL, NULL);
}
+static inline void inc_dl_tasks_cs(struct task_struct *task) { }
+static inline void dec_dl_tasks_cs(struct task_struct *task) { }
+static inline void cpuset_lock(void) { }
+static inline void cpuset_unlock(void) { }
+
+static inline void cpuset_cpus_allowed_locked(struct task_struct *p,
+ struct cpumask *mask)
+{
+ cpumask_copy(mask, task_cpu_possible_mask(p));
+}
+
static inline void cpuset_cpus_allowed(struct task_struct *p,
struct cpumask *mask)
{
- cpumask_copy(mask, cpu_possible_mask);
+ cpuset_cpus_allowed_locked(p, mask);
}
-static inline void cpuset_cpus_allowed_fallback(struct task_struct *p)
+static inline bool cpuset_cpus_allowed_fallback(struct task_struct *p)
{
+ return false;
+}
+
+static inline bool cpuset_cpu_is_isolated(int cpu)
+{
+ return false;
}
static inline nodemask_t cpuset_mems_allowed(struct task_struct *p)
@@ -192,11 +231,6 @@ static inline int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask)
return 1;
}
-static inline bool cpuset_node_allowed(int node, gfp_t gfp_mask)
-{
- return true;
-}
-
static inline bool __cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask)
{
return true;
@@ -225,27 +259,26 @@ static inline int cpuset_mem_spread_node(void)
return 0;
}
-static inline int cpuset_slab_spread_node(void)
+static inline int cpuset_do_page_mem_spread(void)
{
return 0;
}
-static inline int cpuset_do_page_mem_spread(void)
+static inline bool current_cpuset_is_being_rebound(void)
{
- return 0;
+ return false;
}
-static inline int cpuset_do_slab_mem_spread(void)
+static inline void dl_rebuild_rd_accounting(void)
{
- return 0;
}
-static inline int current_cpuset_is_being_rebound(void)
+static inline void rebuild_sched_domains(void)
{
- return 0;
+ partition_sched_domains(1, NULL, NULL);
}
-static inline void rebuild_sched_domains(void)
+static inline void cpuset_reset_sched_domains(void)
{
partition_sched_domains(1, NULL, NULL);
}
@@ -268,6 +301,10 @@ static inline bool read_mems_allowed_retry(unsigned int seq)
return false;
}
+static inline bool cpuset_node_allowed(struct cgroup *cgroup, int nid)
+{
+ return true;
+}
#endif /* !CONFIG_CPUSETS */
#endif /* _LINUX_CPUSET_H */
diff --git a/include/linux/crash_core.h b/include/linux/crash_core.h
index 2df2118fbe13..d35726d6a415 100644
--- a/include/linux/crash_core.h
+++ b/include/linux/crash_core.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef LINUX_CRASH_CORE_H
#define LINUX_CRASH_CORE_H
@@ -5,69 +6,94 @@
#include <linux/elfcore.h>
#include <linux/elf.h>
-#define CRASH_CORE_NOTE_NAME "CORE"
-#define CRASH_CORE_NOTE_HEAD_BYTES ALIGN(sizeof(struct elf_note), 4)
-#define CRASH_CORE_NOTE_NAME_BYTES ALIGN(sizeof(CRASH_CORE_NOTE_NAME), 4)
-#define CRASH_CORE_NOTE_DESC_BYTES ALIGN(sizeof(struct elf_prstatus), 4)
+struct kimage;
+struct crash_mem {
+ unsigned int max_nr_ranges;
+ unsigned int nr_ranges;
+ struct range ranges[] __counted_by(max_nr_ranges);
+};
+
+#ifdef CONFIG_CRASH_DUMP
+
+int crash_shrink_memory(unsigned long new_size);
+ssize_t crash_get_memory_size(void);
+
+#ifndef arch_kexec_protect_crashkres
/*
- * The per-cpu notes area is a list of notes terminated by a "NULL"
- * note header. For kdump, the code in vmcore.c runs in the context
- * of the second kernel to combine them into one note.
+ * Protection mechanism for crashkernel reserved memory after
+ * the kdump kernel is loaded.
+ *
+ * Provide an empty default implementation here -- architecture
+ * code may override this
*/
-#define CRASH_CORE_NOTE_BYTES ((CRASH_CORE_NOTE_HEAD_BYTES * 2) + \
- CRASH_CORE_NOTE_NAME_BYTES + \
- CRASH_CORE_NOTE_DESC_BYTES)
-
-#define VMCOREINFO_BYTES PAGE_SIZE
-#define VMCOREINFO_NOTE_NAME "VMCOREINFO"
-#define VMCOREINFO_NOTE_NAME_BYTES ALIGN(sizeof(VMCOREINFO_NOTE_NAME), 4)
-#define VMCOREINFO_NOTE_SIZE ((CRASH_CORE_NOTE_HEAD_BYTES * 2) + \
- VMCOREINFO_NOTE_NAME_BYTES + \
- VMCOREINFO_BYTES)
-
-typedef u32 note_buf_t[CRASH_CORE_NOTE_BYTES/4];
-
-void crash_update_vmcoreinfo_safecopy(void *ptr);
-void crash_save_vmcoreinfo(void);
-void arch_crash_save_vmcoreinfo(void);
-__printf(1, 2)
-void vmcoreinfo_append_str(const char *fmt, ...);
-phys_addr_t paddr_vmcoreinfo_note(void);
-
-#define VMCOREINFO_OSRELEASE(value) \
- vmcoreinfo_append_str("OSRELEASE=%s\n", value)
-#define VMCOREINFO_PAGESIZE(value) \
- vmcoreinfo_append_str("PAGESIZE=%ld\n", value)
-#define VMCOREINFO_SYMBOL(name) \
- vmcoreinfo_append_str("SYMBOL(%s)=%lx\n", #name, (unsigned long)&name)
-#define VMCOREINFO_SIZE(name) \
- vmcoreinfo_append_str("SIZE(%s)=%lu\n", #name, \
- (unsigned long)sizeof(name))
-#define VMCOREINFO_STRUCT_SIZE(name) \
- vmcoreinfo_append_str("SIZE(%s)=%lu\n", #name, \
- (unsigned long)sizeof(struct name))
-#define VMCOREINFO_OFFSET(name, field) \
- vmcoreinfo_append_str("OFFSET(%s.%s)=%lu\n", #name, #field, \
- (unsigned long)offsetof(struct name, field))
-#define VMCOREINFO_LENGTH(name, value) \
- vmcoreinfo_append_str("LENGTH(%s)=%lu\n", #name, (unsigned long)value)
-#define VMCOREINFO_NUMBER(name) \
- vmcoreinfo_append_str("NUMBER(%s)=%ld\n", #name, (long)name)
-#define VMCOREINFO_CONFIG(name) \
- vmcoreinfo_append_str("CONFIG_%s=y\n", #name)
-
-extern u32 *vmcoreinfo_note;
-
-Elf_Word *append_elf_note(Elf_Word *buf, char *name, unsigned int type,
- void *data, size_t data_len);
-void final_note(Elf_Word *buf);
-
-int __init parse_crashkernel(char *cmdline, unsigned long long system_ram,
- unsigned long long *crash_size, unsigned long long *crash_base);
-int parse_crashkernel_high(char *cmdline, unsigned long long system_ram,
- unsigned long long *crash_size, unsigned long long *crash_base);
-int parse_crashkernel_low(char *cmdline, unsigned long long system_ram,
- unsigned long long *crash_size, unsigned long long *crash_base);
+static inline void arch_kexec_protect_crashkres(void) { }
+#endif
+
+#ifndef arch_kexec_unprotect_crashkres
+static inline void arch_kexec_unprotect_crashkres(void) { }
+#endif
+
+#ifdef CONFIG_CRASH_DM_CRYPT
+int crash_load_dm_crypt_keys(struct kimage *image);
+ssize_t dm_crypt_keys_read(char *buf, size_t count, u64 *ppos);
+#else
+static inline int crash_load_dm_crypt_keys(struct kimage *image) {return 0; }
+#endif
+
+#ifndef arch_crash_handle_hotplug_event
+static inline void arch_crash_handle_hotplug_event(struct kimage *image, void *arg) { }
+#endif
+
+int crash_check_hotplug_support(void);
+
+#ifndef arch_crash_hotplug_support
+static inline int arch_crash_hotplug_support(struct kimage *image, unsigned long kexec_flags)
+{
+ return 0;
+}
+#endif
+
+#ifndef crash_get_elfcorehdr_size
+static inline unsigned int crash_get_elfcorehdr_size(void) { return 0; }
+#endif
+
+/* Alignment required for elf header segment */
+#define ELF_CORE_HEADER_ALIGN 4096
+
+extern int crash_exclude_mem_range(struct crash_mem *mem,
+ unsigned long long mstart,
+ unsigned long long mend);
+extern int crash_prepare_elf64_headers(struct crash_mem *mem, int need_kernel_map,
+ void **addr, unsigned long *sz);
+
+struct kimage;
+struct kexec_segment;
+
+#define KEXEC_CRASH_HP_NONE 0
+#define KEXEC_CRASH_HP_ADD_CPU 1
+#define KEXEC_CRASH_HP_REMOVE_CPU 2
+#define KEXEC_CRASH_HP_ADD_MEMORY 3
+#define KEXEC_CRASH_HP_REMOVE_MEMORY 4
+#define KEXEC_CRASH_HP_INVALID_CPU -1U
+
+extern void __crash_kexec(struct pt_regs *regs);
+extern void crash_kexec(struct pt_regs *regs);
+int kexec_should_crash(struct task_struct *p);
+int kexec_crash_loaded(void);
+void crash_save_cpu(struct pt_regs *regs, int cpu);
+extern int kimage_crash_copy_vmcoreinfo(struct kimage *image);
+
+#else /* !CONFIG_CRASH_DUMP*/
+struct pt_regs;
+struct task_struct;
+struct kimage;
+static inline void __crash_kexec(struct pt_regs *regs) { }
+static inline void crash_kexec(struct pt_regs *regs) { }
+static inline int kexec_should_crash(struct task_struct *p) { return 0; }
+static inline int kexec_crash_loaded(void) { return 0; }
+static inline void crash_save_cpu(struct pt_regs *regs, int cpu) {};
+static inline int kimage_crash_copy_vmcoreinfo(struct kimage *image) { return 0; };
+#endif /* CONFIG_CRASH_DUMP*/
#endif /* LINUX_CRASH_CORE_H */
diff --git a/include/linux/crash_dump.h b/include/linux/crash_dump.h
index 3873697ba21c..dd6fc3b2133b 100644
--- a/include/linux/crash_dump.h
+++ b/include/linux/crash_dump.h
@@ -1,29 +1,38 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef LINUX_CRASH_DUMP_H
#define LINUX_CRASH_DUMP_H
-#ifdef CONFIG_CRASH_DUMP
#include <linux/kexec.h>
#include <linux/proc_fs.h>
#include <linux/elf.h>
+#include <linux/pgtable.h>
+#include <uapi/linux/vmcore.h>
-#include <asm/pgtable.h> /* for pgprot_t */
-
+/* For IS_ENABLED(CONFIG_CRASH_DUMP) */
#define ELFCORE_ADDR_MAX (-1ULL)
#define ELFCORE_ADDR_ERR (-2ULL)
extern unsigned long long elfcorehdr_addr;
extern unsigned long long elfcorehdr_size;
+extern unsigned long long dm_crypt_keys_addr;
+
+#ifdef CONFIG_CRASH_DUMP
extern int elfcorehdr_alloc(unsigned long long *addr, unsigned long long *size);
extern void elfcorehdr_free(unsigned long long addr);
extern ssize_t elfcorehdr_read(char *buf, size_t count, u64 *ppos);
extern ssize_t elfcorehdr_read_notes(char *buf, size_t count, u64 *ppos);
+void elfcorehdr_fill_device_ram_ptload_elf64(Elf64_Phdr *phdr,
+ unsigned long long paddr, unsigned long long size);
extern int remap_oldmem_pfn_range(struct vm_area_struct *vma,
unsigned long from, unsigned long pfn,
unsigned long size, pgprot_t prot);
-extern ssize_t copy_oldmem_page(unsigned long, char *, size_t,
- unsigned long, int);
+ssize_t copy_oldmem_page(struct iov_iter *i, unsigned long pfn, size_t csize,
+ unsigned long offset);
+ssize_t copy_oldmem_page_encrypted(struct iov_iter *iter, unsigned long pfn,
+ size_t csize, unsigned long offset);
+
void vmcore_cleanup(void);
/* Architecture code defines this if there are other possible ELF
@@ -45,20 +54,22 @@ void vmcore_cleanup(void);
#define vmcore_elf64_check_arch(x) (elf_check_arch(x) || vmcore_elf_check_arch_cross(x))
#endif
+#ifndef is_kdump_kernel
/*
* is_kdump_kernel() checks whether this kernel is booting after a panic of
* previous kernel or not. This is determined by checking if previous kernel
* has passed the elf core header address on command line.
*
* This is not just a test if CONFIG_CRASH_DUMP is enabled or not. It will
- * return 1 if CONFIG_CRASH_DUMP=y and if kernel is booting after a panic of
- * previous kernel.
+ * return true if CONFIG_CRASH_DUMP=y and if kernel is booting after a panic
+ * of previous kernel.
*/
-static inline int is_kdump_kernel(void)
+static inline bool is_kdump_kernel(void)
{
- return (elfcorehdr_addr != ELFCORE_ADDR_MAX) ? 1 : 0;
+ return elfcorehdr_addr != ELFCORE_ADDR_MAX;
}
+#endif
/* is_vmcore_usable() checks if the kernel is booting after a panic and
* the vmcore region is usable.
@@ -70,7 +81,8 @@ static inline int is_kdump_kernel(void)
static inline int is_vmcore_usable(void)
{
- return is_kdump_kernel() && elfcorehdr_addr != ELFCORE_ADDR_ERR ? 1 : 0;
+ return elfcorehdr_addr != ELFCORE_ADDR_ERR &&
+ elfcorehdr_addr != ELFCORE_ADDR_MAX ? 1 : 0;
}
/* vmcore_unusable() marks the vmcore as unusable,
@@ -79,17 +91,102 @@ static inline int is_vmcore_usable(void)
static inline void vmcore_unusable(void)
{
- if (is_kdump_kernel())
- elfcorehdr_addr = ELFCORE_ADDR_ERR;
+ elfcorehdr_addr = ELFCORE_ADDR_ERR;
}
-#define HAVE_OLDMEM_PFN_IS_RAM 1
-extern int register_oldmem_pfn_is_ram(int (*fn)(unsigned long pfn));
-extern void unregister_oldmem_pfn_is_ram(void);
+/**
+ * struct vmcore_cb - driver callbacks for /proc/vmcore handling
+ * @pfn_is_ram: check whether a PFN really is RAM and should be accessed when
+ * reading the vmcore. Will return "true" if it is RAM or if the
+ * callback cannot tell. If any callback returns "false", it's not
+ * RAM and the page must not be accessed; zeroes should be
+ * indicated in the vmcore instead. For example, a ballooned page
+ * contains no data and reading from such a page will cause high
+ * load in the hypervisor.
+ * @get_device_ram: query RAM ranges that can only be detected by device
+ * drivers, such as the virtio-mem driver, so they can be included in
+ * the crash dump on architectures that allocate the elfcore hdr in the dump
+ * ("2nd") kernel. Indicated RAM ranges may contain holes to reduce the
+ * total number of ranges; such holes can be detected using the pfn_is_ram
+ * callback just like for other RAM.
+ * @next: List head to manage registered callbacks internally; initialized by
+ * register_vmcore_cb().
+ *
+ * vmcore callbacks allow drivers managing physical memory ranges to
+ * coordinate with vmcore handling code, for example, to prevent accessing
+ * physical memory ranges that should not be accessed when reading the vmcore,
+ * although included in the vmcore header as memory ranges to dump.
+ */
+struct vmcore_cb {
+ bool (*pfn_is_ram)(struct vmcore_cb *cb, unsigned long pfn);
+ int (*get_device_ram)(struct vmcore_cb *cb, struct list_head *list);
+ struct list_head next;
+};
+extern void register_vmcore_cb(struct vmcore_cb *cb);
+extern void unregister_vmcore_cb(struct vmcore_cb *cb);
+
+struct vmcore_range {
+ struct list_head list;
+ unsigned long long paddr;
+ unsigned long long size;
+ loff_t offset;
+};
+
+/* Allocate a vmcore range and add it to the list. */
+static inline int vmcore_alloc_add_range(struct list_head *list,
+ unsigned long long paddr, unsigned long long size)
+{
+ struct vmcore_range *m = kzalloc(sizeof(*m), GFP_KERNEL);
+
+ if (!m)
+ return -ENOMEM;
+ m->paddr = paddr;
+ m->size = size;
+ list_add_tail(&m->list, list);
+ return 0;
+}
+
+/* Free a list of vmcore ranges. */
+static inline void vmcore_free_ranges(struct list_head *list)
+{
+ struct vmcore_range *m, *tmp;
+
+ list_for_each_entry_safe(m, tmp, list, list) {
+ list_del(&m->list);
+ kfree(m);
+ }
+}
#else /* !CONFIG_CRASH_DUMP */
-static inline int is_kdump_kernel(void) { return 0; }
+static inline bool is_kdump_kernel(void) { return false; }
#endif /* CONFIG_CRASH_DUMP */
-extern unsigned long saved_max_pfn;
+/* Device Dump information to be filled by drivers */
+struct vmcoredd_data {
+ char dump_name[VMCOREDD_MAX_NAME_BYTES]; /* Unique name of the dump */
+ unsigned int size; /* Size of the dump */
+ /* Driver's registered callback to be invoked to collect dump */
+ int (*vmcoredd_callback)(struct vmcoredd_data *data, void *buf);
+};
+
+#ifdef CONFIG_PROC_VMCORE_DEVICE_DUMP
+int vmcore_add_device_dump(struct vmcoredd_data *data);
+#else
+static inline int vmcore_add_device_dump(struct vmcoredd_data *data)
+{
+ return -EOPNOTSUPP;
+}
+#endif /* CONFIG_PROC_VMCORE_DEVICE_DUMP */
+
+#ifdef CONFIG_PROC_VMCORE
+ssize_t read_from_oldmem(struct iov_iter *iter, size_t count,
+ u64 *ppos, bool encrypted);
+#else
+static inline ssize_t read_from_oldmem(struct iov_iter *iter, size_t count,
+ u64 *ppos, bool encrypted)
+{
+ return -EOPNOTSUPP;
+}
+#endif /* CONFIG_PROC_VMCORE */
+
#endif /* LINUX_CRASHDUMP_H */
diff --git a/include/linux/crash_reserve.h b/include/linux/crash_reserve.h
new file mode 100644
index 000000000000..f0dc03d94ca2
--- /dev/null
+++ b/include/linux/crash_reserve.h
@@ -0,0 +1,66 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef LINUX_CRASH_RESERVE_H
+#define LINUX_CRASH_RESERVE_H
+
+#include <linux/linkage.h>
+#include <linux/elfcore.h>
+#include <linux/elf.h>
+#ifdef CONFIG_ARCH_HAS_GENERIC_CRASHKERNEL_RESERVATION
+#include <asm/crash_reserve.h>
+#endif
+
+/* Location of a reserved region to hold the crash kernel.
+ */
+extern struct resource crashk_res;
+extern struct resource crashk_low_res;
+extern struct range crashk_cma_ranges[];
+#if defined(CONFIG_CMA) && defined(CONFIG_ARCH_HAS_GENERIC_CRASHKERNEL_RESERVATION)
+#define CRASHKERNEL_CMA
+#define CRASHKERNEL_CMA_RANGES_MAX 4
+extern int crashk_cma_cnt;
+#else
+#define crashk_cma_cnt 0
+#define CRASHKERNEL_CMA_RANGES_MAX 0
+#endif
+
+
+int __init parse_crashkernel(char *cmdline, unsigned long long system_ram,
+ unsigned long long *crash_size, unsigned long long *crash_base,
+ unsigned long long *low_size, unsigned long long *cma_size,
+ bool *high);
+
+void __init reserve_crashkernel_cma(unsigned long long cma_size);
+
+#ifdef CONFIG_ARCH_HAS_GENERIC_CRASHKERNEL_RESERVATION
+#ifndef arch_add_crash_res_to_iomem
+static inline bool arch_add_crash_res_to_iomem(void)
+{
+ return true;
+}
+#endif
+#ifndef DEFAULT_CRASH_KERNEL_LOW_SIZE
+#define DEFAULT_CRASH_KERNEL_LOW_SIZE (128UL << 20)
+#endif
+#ifndef CRASH_ALIGN
+#define CRASH_ALIGN SZ_2M
+#endif
+#ifndef CRASH_ADDR_LOW_MAX
+#define CRASH_ADDR_LOW_MAX SZ_4G
+#endif
+#ifndef CRASH_ADDR_HIGH_MAX
+#define CRASH_ADDR_HIGH_MAX memblock_end_of_DRAM()
+#endif
+
+void __init reserve_crashkernel_generic(unsigned long long crash_size,
+ unsigned long long crash_base,
+ unsigned long long crash_low_size,
+ bool high);
+#else
+static inline void __init reserve_crashkernel_generic(
+ unsigned long long crash_size,
+ unsigned long long crash_base,
+ unsigned long long crash_low_size,
+ bool high)
+{}
+#endif
+#endif /* LINUX_CRASH_RESERVE_H */
diff --git a/include/linux/crc-ccitt.h b/include/linux/crc-ccitt.h
index f52696a1ff0d..cd4f420231ba 100644
--- a/include/linux/crc-ccitt.h
+++ b/include/linux/crc-ccitt.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_CRC_CCITT_H
#define _LINUX_CRC_CCITT_H
diff --git a/include/linux/crc-itu-t.h b/include/linux/crc-itu-t.h
index a9953c762eee..2f991a427ade 100644
--- a/include/linux/crc-itu-t.h
+++ b/include/linux/crc-itu-t.h
@@ -1,13 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* crc-itu-t.h - CRC ITU-T V.41 routine
*
* Implements the standard CRC ITU-T V.41:
* Width 16
- * Poly 0x1021 (x^16 + x^12 + x^15 + 1)
+ * Poly 0x1021 (x^16 + x^12 + x^5 + 1)
* Init 0
- *
- * This source code is licensed under the GNU General Public License,
- * Version 2. See the file COPYING for more details.
*/
#ifndef CRC_ITU_T_H
diff --git a/include/linux/crc-t10dif.h b/include/linux/crc-t10dif.h
index d81961e9e37d..ecc8bc2dd7f4 100644
--- a/include/linux/crc-t10dif.h
+++ b/include/linux/crc-t10dif.h
@@ -1,14 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_CRC_T10DIF_H
#define _LINUX_CRC_T10DIF_H
#include <linux/types.h>
-#define CRC_T10DIF_DIGEST_SIZE 2
-#define CRC_T10DIF_BLOCK_SIZE 1
+u16 crc_t10dif_update(u16 crc, const u8 *p, size_t len);
-extern __u16 crc_t10dif_generic(__u16 crc, const unsigned char *buffer,
- size_t len);
-extern __u16 crc_t10dif(unsigned char const *, size_t);
-extern __u16 crc_t10dif_update(__u16 crc, unsigned char const *, size_t);
+static inline u16 crc_t10dif(const u8 *p, size_t len)
+{
+ return crc_t10dif_update(0, p, len);
+}
#endif
diff --git a/include/linux/crc16.h b/include/linux/crc16.h
index 9443c084f881..b861d969b161 100644
--- a/include/linux/crc16.h
+++ b/include/linux/crc16.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* crc16.h - CRC-16 routine
*
@@ -7,9 +8,6 @@
* Init 0
*
* Copyright (c) 2005 Ben Gardner <bgardner@wabtec.com>
- *
- * This source code is licensed under the GNU General Public License,
- * Version 2. See the file COPYING for more details.
*/
#ifndef __CRC16_H
@@ -17,14 +15,7 @@
#include <linux/types.h>
-extern u16 const crc16_table[256];
-
-extern u16 crc16(u16 crc, const u8 *buffer, size_t len);
-
-static inline u16 crc16_byte(u16 crc, const u8 data)
-{
- return (crc >> 8) ^ crc16_table[(crc ^ data) & 0xff];
-}
+u16 crc16(u16 crc, const u8 *p, size_t len);
#endif /* __CRC16_H */
diff --git a/include/linux/crc32.h b/include/linux/crc32.h
index 9e8a032c1788..da78b215ff2e 100644
--- a/include/linux/crc32.h
+++ b/include/linux/crc32.h
@@ -1,69 +1,100 @@
-/*
- * crc32.h
- * See linux/lib/crc32.c for license and changes
- */
+/* SPDX-License-Identifier: GPL-2.0-only */
#ifndef _LINUX_CRC32_H
#define _LINUX_CRC32_H
#include <linux/types.h>
#include <linux/bitrev.h>
-u32 __pure crc32_le(u32 crc, unsigned char const *p, size_t len);
-u32 __pure crc32_be(u32 crc, unsigned char const *p, size_t len);
-
/**
- * crc32_le_combine - Combine two crc32 check values into one. For two
- * sequences of bytes, seq1 and seq2 with lengths len1
- * and len2, crc32_le() check values were calculated
- * for each, crc1 and crc2.
+ * crc32_le() - Compute least-significant-bit-first IEEE CRC-32
+ * @crc: Initial CRC value. ~0 (recommended) or 0 for a new CRC computation, or
+ * the previous CRC value if computing incrementally.
+ * @p: Pointer to the data buffer
+ * @len: Length of data in bytes
+ *
+ * This implements the CRC variant that is often known as the IEEE CRC-32, or
+ * simply CRC-32, and is widely used in Ethernet and other applications:
+ *
+ * - Polynomial: x^32 + x^26 + x^23 + x^22 + x^16 + x^12 + x^11 + x^10 + x^8 +
+ * x^7 + x^5 + x^4 + x^2 + x^1 + x^0
+ * - Bit order: Least-significant-bit-first
+ * - Polynomial in integer form: 0xedb88320
*
- * @crc1: crc32 of the first block
- * @crc2: crc32 of the second block
- * @len2: length of the second block
+ * This does *not* invert the CRC at the beginning or end. The caller is
+ * expected to do that if it needs to. Inverting at both ends is recommended.
*
- * Return: The crc32_le() check value of seq1 and seq2 concatenated,
- * requiring only crc1, crc2, and len2. Note: If seq_full denotes
- * the concatenated memory area of seq1 with seq2, and crc_full
- * the crc32_le() value of seq_full, then crc_full ==
- * crc32_le_combine(crc1, crc2, len2) when crc_full was seeded
- * with the same initializer as crc1, and crc2 seed was 0. See
- * also crc32_combine_test().
+ * For new applications, prefer to use CRC-32C instead. See crc32c().
+ *
+ * Context: Any context
+ * Return: The new CRC value
*/
-u32 __attribute_const__ crc32_le_shift(u32 crc, size_t len);
+u32 crc32_le(u32 crc, const void *p, size_t len);
-static inline u32 crc32_le_combine(u32 crc1, u32 crc2, size_t len2)
+/* This is just an alias for crc32_le(). */
+static inline u32 crc32(u32 crc, const void *p, size_t len)
{
- return crc32_le_shift(crc1, len2) ^ crc2;
+ return crc32_le(crc, p, len);
}
-u32 __pure __crc32c_le(u32 crc, unsigned char const *p, size_t len);
-
/**
- * __crc32c_le_combine - Combine two crc32c check values into one. For two
- * sequences of bytes, seq1 and seq2 with lengths len1
- * and len2, __crc32c_le() check values were calculated
- * for each, crc1 and crc2.
+ * crc32_be() - Compute most-significant-bit-first IEEE CRC-32
+ * @crc: Initial CRC value. ~0 (recommended) or 0 for a new CRC computation, or
+ * the previous CRC value if computing incrementally.
+ * @p: Pointer to the data buffer
+ * @len: Length of data in bytes
*
- * @crc1: crc32c of the first block
- * @crc2: crc32c of the second block
- * @len2: length of the second block
+ * crc32_be() is the same as crc32_le() except that crc32_be() computes the
+ * *most-significant-bit-first* variant of the CRC. I.e., within each byte, the
+ * most significant bit is processed first (treated as highest order polynomial
+ * coefficient). The same bit order is also used for the CRC value itself:
*
- * Return: The __crc32c_le() check value of seq1 and seq2 concatenated,
- * requiring only crc1, crc2, and len2. Note: If seq_full denotes
- * the concatenated memory area of seq1 with seq2, and crc_full
- * the __crc32c_le() value of seq_full, then crc_full ==
- * __crc32c_le_combine(crc1, crc2, len2) when crc_full was
- * seeded with the same initializer as crc1, and crc2 seed
- * was 0. See also crc32c_combine_test().
+ * - Polynomial: x^32 + x^26 + x^23 + x^22 + x^16 + x^12 + x^11 + x^10 + x^8 +
+ * x^7 + x^5 + x^4 + x^2 + x^1 + x^0
+ * - Bit order: Most-significant-bit-first
+ * - Polynomial in integer form: 0x04c11db7
+ *
+ * Context: Any context
+ * Return: The new CRC value
*/
-u32 __attribute_const__ __crc32c_le_shift(u32 crc, size_t len);
+u32 crc32_be(u32 crc, const void *p, size_t len);
-static inline u32 __crc32c_le_combine(u32 crc1, u32 crc2, size_t len2)
-{
- return __crc32c_le_shift(crc1, len2) ^ crc2;
-}
+/**
+ * crc32c() - Compute CRC-32C
+ * @crc: Initial CRC value. ~0 (recommended) or 0 for a new CRC computation, or
+ * the previous CRC value if computing incrementally.
+ * @p: Pointer to the data buffer
+ * @len: Length of data in bytes
+ *
+ * This implements CRC-32C, i.e. the Castagnoli CRC. This is the recommended
+ * CRC variant to use in new applications that want a 32-bit CRC.
+ *
+ * - Polynomial: x^32 + x^28 + x^27 + x^26 + x^25 + x^23 + x^22 + x^20 + x^19 +
+ * x^18 + x^14 + x^13 + x^11 + x^10 + x^9 + x^8 + x^6 + x^0
+ * - Bit order: Least-significant-bit-first
+ * - Polynomial in integer form: 0x82f63b78
+ *
+ * This does *not* invert the CRC at the beginning or end. The caller is
+ * expected to do that if it needs to. Inverting at both ends is recommended.
+ *
+ * Context: Any context
+ * Return: The new CRC value
+ */
+u32 crc32c(u32 crc, const void *p, size_t len);
-#define crc32(seed, data, length) crc32_le(seed, (unsigned char const *)(data), length)
+/*
+ * crc32_optimizations() returns flags that indicate which CRC32 library
+ * functions are using architecture-specific optimizations. Unlike
+ * IS_ENABLED(CONFIG_CRC32_ARCH) it takes into account the different CRC32
+ * variants and also whether any needed CPU features are available at runtime.
+ */
+#define CRC32_LE_OPTIMIZATION BIT(0) /* crc32_le() is optimized */
+#define CRC32_BE_OPTIMIZATION BIT(1) /* crc32_be() is optimized */
+#define CRC32C_OPTIMIZATION BIT(2) /* crc32c() is optimized */
+#if IS_ENABLED(CONFIG_CRC32_ARCH)
+u32 crc32_optimizations(void);
+#else
+static inline u32 crc32_optimizations(void) { return 0; }
+#endif
/*
* Helpers for hash table generation of ethernet nics:
diff --git a/include/linux/crc32c.h b/include/linux/crc32c.h
index bd8b44d96bdc..b8cff2f4309a 100644
--- a/include/linux/crc32c.h
+++ b/include/linux/crc32c.h
@@ -1,11 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_CRC32C_H
#define _LINUX_CRC32C_H
-#include <linux/types.h>
-
-extern u32 crc32c(u32 crc, const void *address, unsigned int length);
-
-/* This macro exists for backwards-compatibility. */
-#define crc32c_le crc32c
+#include <linux/crc32.h>
#endif /* _LINUX_CRC32C_H */
diff --git a/include/linux/crc32poly.h b/include/linux/crc32poly.h
new file mode 100644
index 000000000000..ccab711295fa
--- /dev/null
+++ b/include/linux/crc32poly.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_CRC32_POLY_H
+#define _LINUX_CRC32_POLY_H
+
+/* The polynomial used by crc32_le(), in integer form. See crc32_le(). */
+#define CRC32_POLY_LE 0xedb88320
+
+/* The polynomial used by crc32_be(), in integer form. See crc32_be(). */
+#define CRC32_POLY_BE 0x04c11db7
+
+/* The polynomial used by crc32c(), in integer form. See crc32c(). */
+#define CRC32C_POLY_LE 0x82f63b78
+
+#endif /* _LINUX_CRC32_POLY_H */
diff --git a/include/linux/crc4.h b/include/linux/crc4.h
index 8f739f1d794f..bd2c90556a06 100644
--- a/include/linux/crc4.h
+++ b/include/linux/crc4.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_CRC4_H
#define _LINUX_CRC4_H
diff --git a/include/linux/crc64.h b/include/linux/crc64.h
new file mode 100644
index 000000000000..fc0c06ab1993
--- /dev/null
+++ b/include/linux/crc64.h
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_CRC64_H
+#define _LINUX_CRC64_H
+
+#include <linux/types.h>
+
+/**
+ * crc64_be - Calculate bitwise big-endian ECMA-182 CRC64
+ * @crc: seed value for computation. 0 or (u64)~0 for a new CRC calculation,
+ * or the previous crc64 value if computing incrementally.
+ * @p: pointer to buffer over which CRC64 is run
+ * @len: length of buffer @p
+ */
+u64 crc64_be(u64 crc, const void *p, size_t len);
+
+/**
+ * crc64_nvme - Calculate CRC64-NVME
+ * @crc: seed value for computation. 0 for a new CRC calculation, or the
+ * previous crc64 value if computing incrementally.
+ * @p: pointer to buffer over which CRC64 is run
+ * @len: length of buffer @p
+ *
+ * This computes the CRC64 defined in the NVME NVM Command Set Specification,
+ * *including the bitwise inversion at the beginning and end*.
+ */
+u64 crc64_nvme(u64 crc, const void *p, size_t len);
+
+#endif /* _LINUX_CRC64_H */
diff --git a/include/linux/crc7.h b/include/linux/crc7.h
index d590765106f3..61d34749e437 100644
--- a/include/linux/crc7.h
+++ b/include/linux/crc7.h
@@ -1,14 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_CRC7_H
#define _LINUX_CRC7_H
#include <linux/types.h>
-extern const u8 crc7_be_syndrome_table[256];
-
-static inline u8 crc7_be_byte(u8 crc, u8 data)
-{
- return crc7_be_syndrome_table[crc ^ data];
-}
-
extern u8 crc7_be(u8 crc, const u8 *buffer, size_t len);
#endif
diff --git a/include/linux/crc8.h b/include/linux/crc8.h
index 13c8dabb0441..674045c59a04 100644
--- a/include/linux/crc8.h
+++ b/include/linux/crc8.h
@@ -96,6 +96,6 @@ void crc8_populate_msb(u8 table[CRC8_TABLE_SIZE], u8 polynomial);
* Williams, Ross N., ross<at>ross.net
* (see URL http://www.ross.net/crc/download/crc_v3.txt).
*/
-u8 crc8(const u8 table[CRC8_TABLE_SIZE], u8 *pdata, size_t nbytes, u8 crc);
+u8 crc8(const u8 table[CRC8_TABLE_SIZE], const u8 *pdata, size_t nbytes, u8 crc);
#endif /* __CRC8_H_ */
diff --git a/include/linux/cred.h b/include/linux/cred.h
index 099058e1178b..343a140a6ba2 100644
--- a/include/linux/cred.h
+++ b/include/linux/cred.h
@@ -1,12 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/* Credentials management - see Documentation/security/credentials.rst
*
* Copyright (C) 2008 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public Licence
- * as published by the Free Software Foundation; either version
- * 2 of the Licence, or (at your option) any later version.
*/
#ifndef _LINUX_CRED_H
@@ -15,8 +11,8 @@
#include <linux/capability.h>
#include <linux/init.h>
#include <linux/key.h>
-#include <linux/selinux.h>
#include <linux/atomic.h>
+#include <linux/refcount.h>
#include <linux/uidgid.h>
#include <linux/sched.h>
#include <linux/sched/user.h>
@@ -24,13 +20,15 @@
struct cred;
struct inode;
+extern struct task_struct init_task;
+
/*
* COW Supplementary groups list
*/
struct group_info {
- atomic_t usage;
+ refcount_t usage;
int ngroups;
- kgid_t gid[0];
+ kgid_t gid[];
} __randomize_layout;
/**
@@ -44,7 +42,7 @@ struct group_info {
*/
static inline struct group_info *get_group_info(struct group_info *gi)
{
- atomic_inc(&gi->usage);
+ refcount_inc(&gi->usage);
return gi;
}
@@ -54,17 +52,22 @@ static inline struct group_info *get_group_info(struct group_info *gi)
*/
#define put_group_info(group_info) \
do { \
- if (atomic_dec_and_test(&(group_info)->usage)) \
+ if (refcount_dec_and_test(&(group_info)->usage)) \
groups_free(group_info); \
} while (0)
-extern struct group_info init_groups;
#ifdef CONFIG_MULTIUSER
extern struct group_info *groups_alloc(int);
extern void groups_free(struct group_info *);
extern int in_group_p(kgid_t);
extern int in_egroup_p(kgid_t);
+extern int groups_search(const struct group_info *, kgid_t);
+
+extern int set_current_groups(struct group_info *);
+extern void set_groups(struct cred *, struct group_info *);
+extern bool may_setgroups(void);
+extern void groups_sort(struct group_info *);
#else
static inline void groups_free(struct group_info *group_info)
{
@@ -78,11 +81,11 @@ static inline int in_egroup_p(kgid_t grp)
{
return 1;
}
+static inline int groups_search(const struct group_info *group_info, kgid_t grp)
+{
+ return 1;
+}
#endif
-extern int set_current_groups(struct group_info *);
-extern void set_groups(struct cred *, struct group_info *);
-extern int groups_search(const struct group_info *, kgid_t);
-extern bool may_setgroups(void);
/*
* The security context of a task
@@ -108,14 +111,7 @@ extern bool may_setgroups(void);
* same context as task->real_cred.
*/
struct cred {
- atomic_t usage;
-#ifdef CONFIG_DEBUG_CREDENTIALS
- atomic_t subscribers; /* number of processes subscribed */
- void *put_addr;
- unsigned magic;
-#define CRED_MAGIC 0x43736564
-#define CRED_MAGIC_DEAD 0x44656144
-#endif
+ atomic_long_t usage;
kuid_t uid; /* real UID of the task */
kgid_t gid; /* real GID of the task */
kuid_t suid; /* saved UID of the task */
@@ -133,138 +129,164 @@ struct cred {
#ifdef CONFIG_KEYS
unsigned char jit_keyring; /* default keyring to attach requested
* keys to */
- struct key __rcu *session_keyring; /* keyring inherited over fork */
+ struct key *session_keyring; /* keyring inherited over fork */
struct key *process_keyring; /* keyring private to this process */
struct key *thread_keyring; /* keyring private to this thread */
struct key *request_key_auth; /* assumed request_key authority */
#endif
#ifdef CONFIG_SECURITY
- void *security; /* subjective LSM security */
+ void *security; /* LSM security */
#endif
struct user_struct *user; /* real user ID subscription */
struct user_namespace *user_ns; /* user_ns the caps and keyrings are relative to. */
+ struct ucounts *ucounts;
struct group_info *group_info; /* supplementary groups for euid/fsgid */
- struct rcu_head rcu; /* RCU deletion hook */
+ /* RCU deletion */
+ union {
+ int non_rcu; /* Can we skip RCU deletion? */
+ struct rcu_head rcu; /* RCU deletion hook */
+ };
} __randomize_layout;
extern void __put_cred(struct cred *);
extern void exit_creds(struct task_struct *);
-extern int copy_creds(struct task_struct *, unsigned long);
+extern int copy_creds(struct task_struct *, u64);
extern const struct cred *get_task_cred(struct task_struct *);
extern struct cred *cred_alloc_blank(void);
extern struct cred *prepare_creds(void);
extern struct cred *prepare_exec_creds(void);
extern int commit_creds(struct cred *);
extern void abort_creds(struct cred *);
-extern const struct cred *override_creds(const struct cred *);
-extern void revert_creds(const struct cred *);
extern struct cred *prepare_kernel_cred(struct task_struct *);
-extern int change_create_files_as(struct cred *, struct inode *);
+static inline const struct cred *kernel_cred(void)
+{
+ /* shut up sparse */
+ return rcu_dereference_raw(init_task.cred);
+}
extern int set_security_override(struct cred *, u32);
extern int set_security_override_from_ctx(struct cred *, const char *);
extern int set_create_files_as(struct cred *, struct inode *);
+extern int cred_fscmp(const struct cred *, const struct cred *);
extern void __init cred_init(void);
+extern int set_cred_ucounts(struct cred *);
-/*
- * check for validity of credentials
- */
-#ifdef CONFIG_DEBUG_CREDENTIALS
-extern void __invalid_creds(const struct cred *, const char *, unsigned);
-extern void __validate_process_creds(struct task_struct *,
- const char *, unsigned);
-
-extern bool creds_are_invalid(const struct cred *cred);
-
-static inline void __validate_creds(const struct cred *cred,
- const char *file, unsigned line)
+static inline bool cap_ambient_invariant_ok(const struct cred *cred)
{
- if (unlikely(creds_are_invalid(cred)))
- __invalid_creds(cred, file, line);
+ return cap_issubset(cred->cap_ambient,
+ cap_intersect(cred->cap_permitted,
+ cred->cap_inheritable));
}
-#define validate_creds(cred) \
-do { \
- __validate_creds((cred), __FILE__, __LINE__); \
-} while(0)
-
-#define validate_process_creds() \
-do { \
- __validate_process_creds(current, __FILE__, __LINE__); \
-} while(0)
-
-extern void validate_creds_for_do_exit(struct task_struct *);
-#else
-static inline void validate_creds(const struct cred *cred)
-{
-}
-static inline void validate_creds_for_do_exit(struct task_struct *tsk)
+static inline const struct cred *override_creds(const struct cred *override_cred)
{
+ return rcu_replace_pointer(current->cred, override_cred, 1);
}
-static inline void validate_process_creds(void)
-{
-}
-#endif
-static inline bool cap_ambient_invariant_ok(const struct cred *cred)
+static inline const struct cred *revert_creds(const struct cred *revert_cred)
{
- return cap_issubset(cred->cap_ambient,
- cap_intersect(cred->cap_permitted,
- cred->cap_inheritable));
+ return rcu_replace_pointer(current->cred, revert_cred, 1);
}
+DEFINE_CLASS(override_creds,
+ const struct cred *,
+ revert_creds(_T),
+ override_creds(override_cred), const struct cred *override_cred)
+
+#define scoped_with_creds(cred) \
+ scoped_class(override_creds, __UNIQUE_ID(label), cred)
+
+#define scoped_with_kernel_creds() scoped_with_creds(kernel_cred())
+
/**
- * get_new_cred - Get a reference on a new set of credentials
- * @cred: The new credentials to reference
+ * get_cred_many - Get references on a set of credentials
+ * @cred: The credentials to reference
+ * @nr: Number of references to acquire
*
- * Get a reference on the specified set of new credentials. The caller must
- * release the reference.
+ * Get references on the specified set of credentials. The caller must release
+ * all acquired reference. If %NULL is passed, it is returned with no action.
+ *
+ * This is used to deal with a committed set of credentials. Although the
+ * pointer is const, this will temporarily discard the const and increment the
+ * usage count. The purpose of this is to attempt to catch at compile time the
+ * accidental alteration of a set of credentials that should be considered
+ * immutable.
*/
-static inline struct cred *get_new_cred(struct cred *cred)
+static inline const struct cred *get_cred_many(const struct cred *cred, int nr)
{
- atomic_inc(&cred->usage);
+ struct cred *nonconst_cred = (struct cred *) cred;
+ if (!cred)
+ return cred;
+ nonconst_cred->non_rcu = 0;
+ atomic_long_add(nr, &nonconst_cred->usage);
return cred;
}
-/**
+/*
* get_cred - Get a reference on a set of credentials
* @cred: The credentials to reference
*
* Get a reference on the specified set of credentials. The caller must
- * release the reference.
+ * release the reference. If %NULL is passed, it is returned with no action.
*
- * This is used to deal with a committed set of credentials. Although the
- * pointer is const, this will temporarily discard the const and increment the
- * usage count. The purpose of this is to attempt to catch at compile time the
- * accidental alteration of a set of credentials that should be considered
- * immutable.
+ * This is used to deal with a committed set of credentials.
*/
static inline const struct cred *get_cred(const struct cred *cred)
{
+ return get_cred_many(cred, 1);
+}
+
+static inline const struct cred *get_cred_rcu(const struct cred *cred)
+{
struct cred *nonconst_cred = (struct cred *) cred;
- validate_creds(cred);
- return get_new_cred(nonconst_cred);
+ if (!cred)
+ return NULL;
+ if (!atomic_long_inc_not_zero(&nonconst_cred->usage))
+ return NULL;
+ nonconst_cred->non_rcu = 0;
+ return cred;
}
/**
* put_cred - Release a reference to a set of credentials
* @cred: The credentials to release
+ * @nr: Number of references to release
*
* Release a reference to a set of credentials, deleting them when the last ref
- * is released.
+ * is released. If %NULL is passed, nothing is done.
*
* This takes a const pointer to a set of credentials because the credentials
* on task_struct are attached by const pointers to prevent accidental
* alteration of otherwise immutable credential sets.
*/
-static inline void put_cred(const struct cred *_cred)
+static inline void put_cred_many(const struct cred *_cred, int nr)
{
struct cred *cred = (struct cred *) _cred;
- validate_creds(cred);
- if (atomic_dec_and_test(&(cred)->usage))
- __put_cred(cred);
+ if (cred) {
+ if (atomic_long_sub_and_test(nr, &cred->usage))
+ __put_cred(cred);
+ }
+}
+
+/*
+ * put_cred - Release a reference to a set of credentials
+ * @cred: The credentials to release
+ *
+ * Release a reference to a set of credentials, deleting them when the last ref
+ * is released. If %NULL is passed, nothing is done.
+ */
+static inline void put_cred(const struct cred *cred)
+{
+ put_cred_many(cred, 1);
}
+DEFINE_CLASS(prepare_creds,
+ struct cred *,
+ if (_T) put_cred(_T),
+ prepare_creds(), void)
+
+DEFINE_FREE(put_cred, struct cred *, if (!IS_ERR_OR_NULL(_T)) put_cred(_T))
+
/**
* current_cred - Access the current task's subjective credentials
*
@@ -347,6 +369,7 @@ static inline void put_cred(const struct cred *_cred)
#define task_uid(task) (task_cred_xxx((task), uid))
#define task_euid(task) (task_cred_xxx((task), euid))
+#define task_ucounts(task) (task_cred_xxx((task), ucounts))
#define current_cred_xxx(xxx) \
({ \
@@ -363,7 +386,7 @@ static inline void put_cred(const struct cred *_cred)
#define current_fsgid() (current_cred_xxx(fsgid))
#define current_cap() (current_cred_xxx(cap_effective))
#define current_user() (current_cred_xxx(user))
-#define current_security() (current_cred_xxx(security))
+#define current_ucounts() (current_cred_xxx(ucounts))
extern struct user_namespace init_user_ns;
#ifdef CONFIG_USER_NS
diff --git a/include/linux/crush/crush.h b/include/linux/crush/crush.h
index 07eed95e10c7..30dba392b730 100644
--- a/include/linux/crush/crush.h
+++ b/include/linux/crush/crush.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef CEPH_CRUSH_CRUSH_H
#define CEPH_CRUSH_CRUSH_H
@@ -16,7 +17,7 @@
* The algorithm was originally described in detail in this paper
* (although the algorithm has evolved somewhat since then):
*
- * http://www.ssrc.ucsc.edu/Papers/weil-sc06.pdf
+ * https://www.ssrc.ucsc.edu/Papers/weil-sc06.pdf
*
* LGPL2
*/
@@ -86,7 +87,7 @@ struct crush_rule_mask {
struct crush_rule {
__u32 len;
struct crush_rule_mask mask;
- struct crush_rule_step steps[0];
+ struct crush_rule_step steps[];
};
#define crush_rule_size(len) (sizeof(struct crush_rule) + \
@@ -300,6 +301,12 @@ struct crush_map {
__u32 *choose_tries;
#else
+ /* device/bucket type id -> type name (CrushWrapper::type_map) */
+ struct rb_root type_names;
+
+ /* device/bucket id -> name (CrushWrapper::name_map) */
+ struct rb_root names;
+
/* CrushWrapper::choose_args */
struct rb_root choose_args;
#endif
@@ -339,6 +346,15 @@ struct crush_work_bucket {
struct crush_work {
struct crush_work_bucket **work; /* Per-bucket working store */
+#ifdef __KERNEL__
+ struct list_head item;
+#endif
};
+#ifdef __KERNEL__
+/* osdmap.c */
+void clear_crush_names(struct rb_root *root);
+void clear_choose_args(struct crush_map *c);
+#endif
+
#endif
diff --git a/include/linux/crush/hash.h b/include/linux/crush/hash.h
index d1d90258242e..904df41f7847 100644
--- a/include/linux/crush/hash.h
+++ b/include/linux/crush/hash.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef CEPH_CRUSH_HASH_H
#define CEPH_CRUSH_HASH_H
diff --git a/include/linux/crush/mapper.h b/include/linux/crush/mapper.h
index 141edabb947e..f9b99232f5a1 100644
--- a/include/linux/crush/mapper.h
+++ b/include/linux/crush/mapper.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef CEPH_CRUSH_MAPPER_H
#define CEPH_CRUSH_MAPPER_H
diff --git a/include/linux/crypto.h b/include/linux/crypto.h
index 84da9978e951..a2137e19be7d 100644
--- a/include/linux/crypto.h
+++ b/include/linux/crypto.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* Scatterlist Cryptographic API.
*
@@ -7,61 +8,34 @@
*
* Portions derived from Cryptoapi, by Alexander Kjeldaas <astor@fast.no>
* and Nettle, by Niels Möller.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the Free
- * Software Foundation; either version 2 of the License, or (at your option)
- * any later version.
- *
*/
#ifndef _LINUX_CRYPTO_H
#define _LINUX_CRYPTO_H
-#include <linux/atomic.h>
-#include <linux/kernel.h>
-#include <linux/list.h>
-#include <linux/bug.h>
+#include <linux/completion.h>
+#include <linux/errno.h>
+#include <linux/refcount_types.h>
#include <linux/slab.h>
-#include <linux/string.h>
-#include <linux/uaccess.h>
-
-/*
- * Autoloaded crypto modules should only use a prefixed name to avoid allowing
- * arbitrary modules to be loaded. Loading from userspace may still need the
- * unprefixed names, so retains those aliases as well.
- * This uses __MODULE_INFO directly instead of MODULE_ALIAS because pre-4.3
- * gcc (e.g. avr32 toolchain) uses __LINE__ for uniqueness, and this macro
- * expands twice on the same line. Instead, use a separate base name for the
- * alias.
- */
-#define MODULE_ALIAS_CRYPTO(name) \
- __MODULE_INFO(alias, alias_userspace, name); \
- __MODULE_INFO(alias, alias_crypto, "crypto-" name)
+#include <linux/types.h>
/*
* Algorithm masks and types.
*/
#define CRYPTO_ALG_TYPE_MASK 0x0000000f
#define CRYPTO_ALG_TYPE_CIPHER 0x00000001
-#define CRYPTO_ALG_TYPE_COMPRESS 0x00000002
#define CRYPTO_ALG_TYPE_AEAD 0x00000003
-#define CRYPTO_ALG_TYPE_BLKCIPHER 0x00000004
-#define CRYPTO_ALG_TYPE_ABLKCIPHER 0x00000005
+#define CRYPTO_ALG_TYPE_LSKCIPHER 0x00000004
#define CRYPTO_ALG_TYPE_SKCIPHER 0x00000005
-#define CRYPTO_ALG_TYPE_GIVCIPHER 0x00000006
+#define CRYPTO_ALG_TYPE_AKCIPHER 0x00000006
+#define CRYPTO_ALG_TYPE_SIG 0x00000007
#define CRYPTO_ALG_TYPE_KPP 0x00000008
#define CRYPTO_ALG_TYPE_ACOMPRESS 0x0000000a
#define CRYPTO_ALG_TYPE_SCOMPRESS 0x0000000b
#define CRYPTO_ALG_TYPE_RNG 0x0000000c
-#define CRYPTO_ALG_TYPE_AKCIPHER 0x0000000d
-#define CRYPTO_ALG_TYPE_DIGEST 0x0000000e
#define CRYPTO_ALG_TYPE_HASH 0x0000000e
#define CRYPTO_ALG_TYPE_SHASH 0x0000000e
#define CRYPTO_ALG_TYPE_AHASH 0x0000000f
-#define CRYPTO_ALG_TYPE_HASH_MASK 0x0000000e
-#define CRYPTO_ALG_TYPE_AHASH_MASK 0x0000000e
-#define CRYPTO_ALG_TYPE_BLKCIPHER_MASK 0x0000000c
#define CRYPTO_ALG_TYPE_ACOMPRESS_MASK 0x0000000e
#define CRYPTO_ALG_LARVAL 0x00000010
@@ -70,16 +44,19 @@
#define CRYPTO_ALG_ASYNC 0x00000080
/*
- * Set this bit if and only if the algorithm requires another algorithm of
- * the same type to handle corner cases.
+ * Set if the algorithm (or an algorithm which it uses) requires another
+ * algorithm of the same type to handle corner cases.
*/
#define CRYPTO_ALG_NEED_FALLBACK 0x00000100
/*
- * This bit is set for symmetric key ciphers that have already been wrapped
- * with a generic IV generator to prevent them from being wrapped again.
+ * Set if the algorithm data structure should be duplicated into
+ * kmalloc memory before registration. This is useful for hardware
+ * that can be disconnected at will. Do not use this if the data
+ * structure is embedded into a bigger one. Duplicate the overall
+ * data structure in the driver in that case.
*/
-#define CRYPTO_ALG_GENIV 0x00000200
+#define CRYPTO_ALG_DUP_FIRST 0x00000200
/*
* Set if the algorithm has passed automated run-time testing. Note that
@@ -106,19 +83,74 @@
#define CRYPTO_ALG_INTERNAL 0x00002000
/*
+ * Set if the algorithm has a ->setkey() method but can be used without
+ * calling it first, i.e. there is a default key.
+ */
+#define CRYPTO_ALG_OPTIONAL_KEY 0x00004000
+
+/*
+ * Don't trigger module loading
+ */
+#define CRYPTO_NOLOAD 0x00008000
+
+/*
+ * The algorithm may allocate memory during request processing, i.e. during
+ * encryption, decryption, or hashing. Users can request an algorithm with this
+ * flag unset if they can't handle memory allocation failures.
+ *
+ * This flag is currently only implemented for algorithms of type "skcipher",
+ * "aead", "ahash", "shash", and "cipher". Algorithms of other types might not
+ * have this flag set even if they allocate memory.
+ *
+ * In some edge cases, algorithms can allocate memory regardless of this flag.
+ * To avoid these cases, users must obey the following usage constraints:
+ * skcipher:
+ * - The IV buffer and all scatterlist elements must be aligned to the
+ * algorithm's alignmask.
+ * - If the data were to be divided into chunks of size
+ * crypto_skcipher_walksize() (with any remainder going at the end), no
+ * chunk can cross a page boundary or a scatterlist element boundary.
+ * aead:
+ * - The IV buffer and all scatterlist elements must be aligned to the
+ * algorithm's alignmask.
+ * - The first scatterlist element must contain all the associated data,
+ * and its pages must be !PageHighMem.
+ * - If the plaintext/ciphertext were to be divided into chunks of size
+ * crypto_aead_walksize() (with the remainder going at the end), no chunk
+ * can cross a page boundary or a scatterlist element boundary.
+ * ahash:
+ * - crypto_ahash_finup() must not be used unless the algorithm implements
+ * ->finup() natively.
+ */
+#define CRYPTO_ALG_ALLOCATES_MEMORY 0x00010000
+
+/*
+ * Mark an algorithm as a service implementation only usable by a
+ * template and never by a normal user of the kernel crypto API.
+ * This is intended to be used by algorithms that are themselves
+ * not FIPS-approved but may instead be used to implement parts of
+ * a FIPS-approved algorithm (e.g., dh vs. ffdhe2048(dh)).
+ */
+#define CRYPTO_ALG_FIPS_INTERNAL 0x00020000
+
+/* Set if the algorithm supports virtual addresses. */
+#define CRYPTO_ALG_REQ_VIRT 0x00040000
+
+/* Set if the algorithm cannot have a fallback (e.g., phmac). */
+#define CRYPTO_ALG_NO_FALLBACK 0x00080000
+
+/* The high bits 0xff000000 are reserved for type-specific flags. */
+
+/*
* Transform masks and values (for crt_flags).
*/
-#define CRYPTO_TFM_REQ_MASK 0x000fff00
-#define CRYPTO_TFM_RES_MASK 0xfff00000
+#define CRYPTO_TFM_NEED_KEY 0x00000001
-#define CRYPTO_TFM_REQ_WEAK_KEY 0x00000100
+#define CRYPTO_TFM_REQ_MASK 0x000fff00
+#define CRYPTO_TFM_REQ_FORBID_WEAK_KEYS 0x00000100
#define CRYPTO_TFM_REQ_MAY_SLEEP 0x00000200
#define CRYPTO_TFM_REQ_MAY_BACKLOG 0x00000400
-#define CRYPTO_TFM_RES_WEAK_KEY 0x00100000
-#define CRYPTO_TFM_RES_BAD_KEY_LEN 0x00200000
-#define CRYPTO_TFM_RES_BAD_KEY_SCHED 0x00400000
-#define CRYPTO_TFM_RES_BAD_BLOCK_LEN 0x00800000
-#define CRYPTO_TFM_RES_BAD_FLAGS 0x01000000
+#define CRYPTO_TFM_REQ_ON_STACK 0x00000800
/*
* Miscellaneous stuff.
@@ -129,23 +161,22 @@
* The macro CRYPTO_MINALIGN_ATTR (along with the void * type in the actual
* declaration) is used to ensure that the crypto_tfm context structure is
* aligned correctly for the given architecture so that there are no alignment
- * faults for C data types. In particular, this is required on platforms such
- * as arm where pointers are 32-bit aligned but there are data types such as
- * u64 which require 64-bit alignment.
+ * faults for C data types. On architectures that support non-cache coherent
+ * DMA, such as ARM or arm64, it also takes into account the minimal alignment
+ * that is required to ensure that the context struct member does not share any
+ * cachelines with the rest of the struct. This is needed to ensure that cache
+ * maintenance for non-coherent DMA (cache invalidation in particular) does not
+ * affect data that may be accessed by the CPU concurrently.
*/
#define CRYPTO_MINALIGN ARCH_KMALLOC_MINALIGN
#define CRYPTO_MINALIGN_ATTR __attribute__ ((__aligned__(CRYPTO_MINALIGN)))
-struct scatterlist;
-struct crypto_ablkcipher;
-struct crypto_async_request;
-struct crypto_blkcipher;
struct crypto_tfm;
struct crypto_type;
-struct skcipher_givcrypt_request;
+struct module;
-typedef void (*crypto_completion_t)(struct crypto_async_request *req, int err);
+typedef void (*crypto_completion_t)(void *req, int err);
/**
* DOC: Block Cipher Context Data Structures
@@ -163,33 +194,6 @@ struct crypto_async_request {
u32 flags;
};
-struct ablkcipher_request {
- struct crypto_async_request base;
-
- unsigned int nbytes;
-
- void *info;
-
- struct scatterlist *src;
- struct scatterlist *dst;
-
- void *__ctx[] CRYPTO_MINALIGN_ATTR;
-};
-
-struct blkcipher_desc {
- struct crypto_blkcipher *tfm;
- void *info;
- u32 flags;
-};
-
-struct cipher_desc {
- struct crypto_tfm *tfm;
- void (*crfn)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
- unsigned int (*prfn)(const struct cipher_desc *desc, u8 *dst,
- const u8 *src, unsigned int nbytes);
- void *info;
-};
-
/**
* DOC: Block Cipher Algorithm Definitions
*
@@ -198,101 +202,6 @@ struct cipher_desc {
*/
/**
- * struct ablkcipher_alg - asynchronous block cipher definition
- * @min_keysize: Minimum key size supported by the transformation. This is the
- * smallest key length supported by this transformation algorithm.
- * This must be set to one of the pre-defined values as this is
- * not hardware specific. Possible values for this field can be
- * found via git grep "_MIN_KEY_SIZE" include/crypto/
- * @max_keysize: Maximum key size supported by the transformation. This is the
- * largest key length supported by this transformation algorithm.
- * This must be set to one of the pre-defined values as this is
- * not hardware specific. Possible values for this field can be
- * found via git grep "_MAX_KEY_SIZE" include/crypto/
- * @setkey: Set key for the transformation. This function is used to either
- * program a supplied key into the hardware or store the key in the
- * transformation context for programming it later. Note that this
- * function does modify the transformation context. This function can
- * be called multiple times during the existence of the transformation
- * object, so one must make sure the key is properly reprogrammed into
- * the hardware. This function is also responsible for checking the key
- * length for validity. In case a software fallback was put in place in
- * the @cra_init call, this function might need to use the fallback if
- * the algorithm doesn't support all of the key sizes.
- * @encrypt: Encrypt a scatterlist of blocks. This function is used to encrypt
- * the supplied scatterlist containing the blocks of data. The crypto
- * API consumer is responsible for aligning the entries of the
- * scatterlist properly and making sure the chunks are correctly
- * sized. In case a software fallback was put in place in the
- * @cra_init call, this function might need to use the fallback if
- * the algorithm doesn't support all of the key sizes. In case the
- * key was stored in transformation context, the key might need to be
- * re-programmed into the hardware in this function. This function
- * shall not modify the transformation context, as this function may
- * be called in parallel with the same transformation object.
- * @decrypt: Decrypt a single block. This is a reverse counterpart to @encrypt
- * and the conditions are exactly the same.
- * @givencrypt: Update the IV for encryption. With this function, a cipher
- * implementation may provide the function on how to update the IV
- * for encryption.
- * @givdecrypt: Update the IV for decryption. This is the reverse of
- * @givencrypt .
- * @geniv: The transformation implementation may use an "IV generator" provided
- * by the kernel crypto API. Several use cases have a predefined
- * approach how IVs are to be updated. For such use cases, the kernel
- * crypto API provides ready-to-use implementations that can be
- * referenced with this variable.
- * @ivsize: IV size applicable for transformation. The consumer must provide an
- * IV of exactly that size to perform the encrypt or decrypt operation.
- *
- * All fields except @givencrypt , @givdecrypt , @geniv and @ivsize are
- * mandatory and must be filled.
- */
-struct ablkcipher_alg {
- int (*setkey)(struct crypto_ablkcipher *tfm, const u8 *key,
- unsigned int keylen);
- int (*encrypt)(struct ablkcipher_request *req);
- int (*decrypt)(struct ablkcipher_request *req);
- int (*givencrypt)(struct skcipher_givcrypt_request *req);
- int (*givdecrypt)(struct skcipher_givcrypt_request *req);
-
- const char *geniv;
-
- unsigned int min_keysize;
- unsigned int max_keysize;
- unsigned int ivsize;
-};
-
-/**
- * struct blkcipher_alg - synchronous block cipher definition
- * @min_keysize: see struct ablkcipher_alg
- * @max_keysize: see struct ablkcipher_alg
- * @setkey: see struct ablkcipher_alg
- * @encrypt: see struct ablkcipher_alg
- * @decrypt: see struct ablkcipher_alg
- * @geniv: see struct ablkcipher_alg
- * @ivsize: see struct ablkcipher_alg
- *
- * All fields except @geniv and @ivsize are mandatory and must be filled.
- */
-struct blkcipher_alg {
- int (*setkey)(struct crypto_tfm *tfm, const u8 *key,
- unsigned int keylen);
- int (*encrypt)(struct blkcipher_desc *desc,
- struct scatterlist *dst, struct scatterlist *src,
- unsigned int nbytes);
- int (*decrypt)(struct blkcipher_desc *desc,
- struct scatterlist *dst, struct scatterlist *src,
- unsigned int nbytes);
-
- const char *geniv;
-
- unsigned int min_keysize;
- unsigned int max_keysize;
- unsigned int ivsize;
-};
-
-/**
* struct cipher_alg - single-block symmetric ciphers definition
* @cia_min_keysize: Minimum key size supported by the transformation. This is
* the smallest key length supported by this transformation
@@ -348,18 +257,7 @@ struct cipher_alg {
void (*cia_decrypt)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
};
-struct compress_alg {
- int (*coa_compress)(struct crypto_tfm *tfm, const u8 *src,
- unsigned int slen, u8 *dst, unsigned int *dlen);
- int (*coa_decompress)(struct crypto_tfm *tfm, const u8 *src,
- unsigned int slen, u8 *dst, unsigned int *dlen);
-};
-
-
-#define cra_ablkcipher cra_u.ablkcipher
-#define cra_blkcipher cra_u.blkcipher
#define cra_cipher cra_u.cipher
-#define cra_compress cra_u.compress
/**
* struct crypto_alg - definition of a cryptograpic cipher algorithm
@@ -378,18 +276,21 @@ struct compress_alg {
* @cra_ctxsize: Size of the operational context of the transformation. This
* value informs the kernel crypto API about the memory size
* needed to be allocated for the transformation context.
- * @cra_alignmask: Alignment mask for the input and output data buffer. The data
- * buffer containing the input data for the algorithm must be
- * aligned to this alignment mask. The data buffer for the
- * output data must be aligned to this alignment mask. Note that
- * the Crypto API will do the re-alignment in software, but
- * only under special conditions and there is a performance hit.
- * The re-alignment happens at these occasions for different
- * @cra_u types: cipher -- For both input data and output data
- * buffer; ahash -- For output hash destination buf; shash --
- * For output hash destination buf.
- * This is needed on hardware which is flawed by design and
- * cannot pick data from arbitrary addresses.
+ * @cra_alignmask: For cipher, skcipher, lskcipher, and aead algorithms this is
+ * 1 less than the alignment, in bytes, that the algorithm
+ * implementation requires for input and output buffers. When
+ * the crypto API is invoked with buffers that are not aligned
+ * to this alignment, the crypto API automatically utilizes
+ * appropriately aligned temporary buffers to comply with what
+ * the algorithm needs. (For scatterlists this happens only if
+ * the algorithm uses the skcipher_walk helper functions.) This
+ * misalignment handling carries a performance penalty, so it is
+ * preferred that algorithms do not set a nonzero alignmask.
+ * Also, crypto API users may wish to allocate buffers aligned
+ * to the alignmask of the algorithm being used, in order to
+ * avoid the API having to realign them. Note: the alignmask is
+ * not supported for hash algorithms and is always 0 for them.
+ * @cra_reqsize: Size of the request context for this algorithm.
* @cra_priority: Priority of this transformation implementation. In case
* multiple transformations with same @cra_name are available to
* the Crypto API, the kernel will use the one with highest
@@ -405,27 +306,19 @@ struct compress_alg {
* transformation algorithm.
* @cra_type: Type of the cryptographic transformation. This is a pointer to
* struct crypto_type, which implements callbacks common for all
- * transformation types. There are multiple options:
- * &crypto_blkcipher_type, &crypto_ablkcipher_type,
- * &crypto_ahash_type, &crypto_rng_type.
+ * transformation types. There are multiple options, such as
+ * &crypto_skcipher_type, &crypto_ahash_type, &crypto_rng_type.
* This field might be empty. In that case, there are no common
- * callbacks. This is the case for: cipher, compress, shash.
+ * callbacks. This is the case for: cipher.
* @cra_u: Callbacks implementing the transformation. This is a union of
* multiple structures. Depending on the type of transformation selected
* by @cra_type and @cra_flags above, the associated structure must be
* filled with callbacks. This field might be empty. This is the case
* for ahash, shash.
- * @cra_init: Initialize the cryptographic transformation object. This function
- * is used to initialize the cryptographic transformation object.
- * This function is called only once at the instantiation time, right
- * after the transformation context was allocated. In case the
- * cryptographic hardware has some special requirements which need to
- * be handled by software, this function shall check for the precise
- * requirement of the transformation and put any software fallbacks
- * in place.
- * @cra_exit: Deinitialize the cryptographic transformation object. This is a
- * counterpart to @cra_init, used to remove various changes set in
- * @cra_init.
+ * @cra_init: Deprecated, do not use.
+ * @cra_exit: Deprecated, do not use.
+ * @cra_u.cipher: Union member which contains a single-block symmetric cipher
+ * definition. See @struct @cipher_alg.
* @cra_module: Owner of this transformation implementation. Set to THIS_MODULE
* @cra_list: internally used
* @cra_users: internally used
@@ -444,9 +337,10 @@ struct crypto_alg {
unsigned int cra_blocksize;
unsigned int cra_ctxsize;
unsigned int cra_alignmask;
+ unsigned int cra_reqsize;
int cra_priority;
- atomic_t cra_refcnt;
+ refcount_t cra_refcnt;
char cra_name[CRYPTO_MAX_ALG_NAME];
char cra_driver_name[CRYPTO_MAX_ALG_NAME];
@@ -454,10 +348,7 @@ struct crypto_alg {
const struct crypto_type *cra_type;
union {
- struct ablkcipher_alg ablkcipher;
- struct blkcipher_alg blkcipher;
struct cipher_alg cipher;
- struct compress_alg compress;
} cra_u;
int (*cra_init)(struct crypto_tfm *tfm);
@@ -468,125 +359,69 @@ struct crypto_alg {
} CRYPTO_MINALIGN_ATTR;
/*
- * Algorithm registration interface.
+ * A helper struct for waiting for completion of async crypto ops
*/
-int crypto_register_alg(struct crypto_alg *alg);
-int crypto_unregister_alg(struct crypto_alg *alg);
-int crypto_register_algs(struct crypto_alg *algs, int count);
-int crypto_unregister_algs(struct crypto_alg *algs, int count);
+struct crypto_wait {
+ struct completion completion;
+ int err;
+};
/*
- * Algorithm query interface.
+ * Macro for declaring a crypto op async wait object on stack
*/
-int crypto_has_alg(const char *name, u32 type, u32 mask);
+#define DECLARE_CRYPTO_WAIT(_wait) \
+ struct crypto_wait _wait = { \
+ COMPLETION_INITIALIZER_ONSTACK((_wait).completion), 0 }
/*
- * Transforms: user-instantiated objects which encapsulate algorithms
- * and core processing logic. Managed via crypto_alloc_*() and
- * crypto_free_*(), as well as the various helpers below.
+ * Async ops completion helper functioons
*/
+void crypto_req_done(void *req, int err);
-struct ablkcipher_tfm {
- int (*setkey)(struct crypto_ablkcipher *tfm, const u8 *key,
- unsigned int keylen);
- int (*encrypt)(struct ablkcipher_request *req);
- int (*decrypt)(struct ablkcipher_request *req);
-
- struct crypto_ablkcipher *base;
-
- unsigned int ivsize;
- unsigned int reqsize;
-};
+static inline int crypto_wait_req(int err, struct crypto_wait *wait)
+{
+ switch (err) {
+ case -EINPROGRESS:
+ case -EBUSY:
+ wait_for_completion(&wait->completion);
+ reinit_completion(&wait->completion);
+ err = wait->err;
+ break;
+ }
-struct blkcipher_tfm {
- void *iv;
- int (*setkey)(struct crypto_tfm *tfm, const u8 *key,
- unsigned int keylen);
- int (*encrypt)(struct blkcipher_desc *desc, struct scatterlist *dst,
- struct scatterlist *src, unsigned int nbytes);
- int (*decrypt)(struct blkcipher_desc *desc, struct scatterlist *dst,
- struct scatterlist *src, unsigned int nbytes);
-};
+ return err;
+}
-struct cipher_tfm {
- int (*cit_setkey)(struct crypto_tfm *tfm,
- const u8 *key, unsigned int keylen);
- void (*cit_encrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
- void (*cit_decrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
-};
+static inline void crypto_init_wait(struct crypto_wait *wait)
+{
+ init_completion(&wait->completion);
+}
-struct compress_tfm {
- int (*cot_compress)(struct crypto_tfm *tfm,
- const u8 *src, unsigned int slen,
- u8 *dst, unsigned int *dlen);
- int (*cot_decompress)(struct crypto_tfm *tfm,
- const u8 *src, unsigned int slen,
- u8 *dst, unsigned int *dlen);
-};
+/*
+ * Algorithm query interface.
+ */
+int crypto_has_alg(const char *name, u32 type, u32 mask);
-#define crt_ablkcipher crt_u.ablkcipher
-#define crt_blkcipher crt_u.blkcipher
-#define crt_cipher crt_u.cipher
-#define crt_compress crt_u.compress
+/*
+ * Transforms: user-instantiated objects which encapsulate algorithms
+ * and core processing logic. Managed via crypto_alloc_*() and
+ * crypto_free_*(), as well as the various helpers below.
+ */
struct crypto_tfm {
+ refcount_t refcnt;
u32 crt_flags;
-
- union {
- struct ablkcipher_tfm ablkcipher;
- struct blkcipher_tfm blkcipher;
- struct cipher_tfm cipher;
- struct compress_tfm compress;
- } crt_u;
-
- void (*exit)(struct crypto_tfm *tfm);
-
- struct crypto_alg *__crt_alg;
-
- void *__crt_ctx[] CRYPTO_MINALIGN_ATTR;
-};
-
-struct crypto_ablkcipher {
- struct crypto_tfm base;
-};
-
-struct crypto_blkcipher {
- struct crypto_tfm base;
-};
-
-struct crypto_cipher {
- struct crypto_tfm base;
-};
-
-struct crypto_comp {
- struct crypto_tfm base;
-};
-
-enum {
- CRYPTOA_UNSPEC,
- CRYPTOA_ALG,
- CRYPTOA_TYPE,
- CRYPTOA_U32,
- __CRYPTOA_MAX,
-};
-#define CRYPTOA_MAX (__CRYPTOA_MAX - 1)
+ int node;
-/* Maximum number of (rtattr) parameters for each template. */
-#define CRYPTO_MAX_ATTRS 32
+ struct crypto_tfm *fb;
-struct crypto_attr_alg {
- char name[CRYPTO_MAX_ALG_NAME];
-};
+ void (*exit)(struct crypto_tfm *tfm);
-struct crypto_attr_type {
- u32 type;
- u32 mask;
-};
+ struct crypto_alg *__crt_alg;
-struct crypto_attr_u32 {
- u32 num;
+ void *__crt_ctx[] CRYPTO_MINALIGN_ATTR;
};
/*
@@ -601,8 +436,6 @@ static inline void crypto_free_tfm(struct crypto_tfm *tfm)
return crypto_destroy_tfm(tfm, tfm);
}
-int alg_test(const char *driver, const char *alg, u32 type, u32 mask);
-
/*
* Transform helpers which query the underlying algorithm.
*/
@@ -616,16 +449,6 @@ static inline const char *crypto_tfm_alg_driver_name(struct crypto_tfm *tfm)
return tfm->__crt_alg->cra_driver_name;
}
-static inline int crypto_tfm_alg_priority(struct crypto_tfm *tfm)
-{
- return tfm->__crt_alg->cra_priority;
-}
-
-static inline u32 crypto_tfm_alg_type(struct crypto_tfm *tfm)
-{
- return tfm->__crt_alg->cra_flags & CRYPTO_ALG_TYPE_MASK;
-}
-
static inline unsigned int crypto_tfm_alg_blocksize(struct crypto_tfm *tfm)
{
return tfm->__crt_alg->cra_blocksize;
@@ -636,6 +459,11 @@ static inline unsigned int crypto_tfm_alg_alignmask(struct crypto_tfm *tfm)
return tfm->__crt_alg->cra_alignmask;
}
+static inline unsigned int crypto_tfm_alg_reqsize(struct crypto_tfm *tfm)
+{
+ return tfm->__crt_alg->cra_reqsize;
+}
+
static inline u32 crypto_tfm_get_flags(struct crypto_tfm *tfm)
{
return tfm->crt_flags;
@@ -651,957 +479,50 @@ static inline void crypto_tfm_clear_flags(struct crypto_tfm *tfm, u32 flags)
tfm->crt_flags &= ~flags;
}
-static inline void *crypto_tfm_ctx(struct crypto_tfm *tfm)
-{
- return tfm->__crt_ctx;
-}
-
static inline unsigned int crypto_tfm_ctx_alignment(void)
{
struct crypto_tfm *tfm;
return __alignof__(tfm->__crt_ctx);
}
-/*
- * API wrappers.
- */
-static inline struct crypto_ablkcipher *__crypto_ablkcipher_cast(
- struct crypto_tfm *tfm)
-{
- return (struct crypto_ablkcipher *)tfm;
-}
-
-static inline u32 crypto_skcipher_type(u32 type)
-{
- type &= ~(CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_GENIV);
- type |= CRYPTO_ALG_TYPE_BLKCIPHER;
- return type;
-}
-
-static inline u32 crypto_skcipher_mask(u32 mask)
-{
- mask &= ~(CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_GENIV);
- mask |= CRYPTO_ALG_TYPE_BLKCIPHER_MASK;
- return mask;
-}
-
-/**
- * DOC: Asynchronous Block Cipher API
- *
- * Asynchronous block cipher API is used with the ciphers of type
- * CRYPTO_ALG_TYPE_ABLKCIPHER (listed as type "ablkcipher" in /proc/crypto).
- *
- * Asynchronous cipher operations imply that the function invocation for a
- * cipher request returns immediately before the completion of the operation.
- * The cipher request is scheduled as a separate kernel thread and therefore
- * load-balanced on the different CPUs via the process scheduler. To allow
- * the kernel crypto API to inform the caller about the completion of a cipher
- * request, the caller must provide a callback function. That function is
- * invoked with the cipher handle when the request completes.
- *
- * To support the asynchronous operation, additional information than just the
- * cipher handle must be supplied to the kernel crypto API. That additional
- * information is given by filling in the ablkcipher_request data structure.
- *
- * For the asynchronous block cipher API, the state is maintained with the tfm
- * cipher handle. A single tfm can be used across multiple calls and in
- * parallel. For asynchronous block cipher calls, context data supplied and
- * only used by the caller can be referenced the request data structure in
- * addition to the IV used for the cipher request. The maintenance of such
- * state information would be important for a crypto driver implementer to
- * have, because when calling the callback function upon completion of the
- * cipher operation, that callback function may need some information about
- * which operation just finished if it invoked multiple in parallel. This
- * state information is unused by the kernel crypto API.
- */
-
-static inline struct crypto_tfm *crypto_ablkcipher_tfm(
- struct crypto_ablkcipher *tfm)
-{
- return &tfm->base;
-}
-
-/**
- * crypto_free_ablkcipher() - zeroize and free cipher handle
- * @tfm: cipher handle to be freed
- */
-static inline void crypto_free_ablkcipher(struct crypto_ablkcipher *tfm)
-{
- crypto_free_tfm(crypto_ablkcipher_tfm(tfm));
-}
-
-/**
- * crypto_has_ablkcipher() - Search for the availability of an ablkcipher.
- * @alg_name: is the cra_name / name or cra_driver_name / driver name of the
- * ablkcipher
- * @type: specifies the type of the cipher
- * @mask: specifies the mask for the cipher
- *
- * Return: true when the ablkcipher is known to the kernel crypto API; false
- * otherwise
- */
-static inline int crypto_has_ablkcipher(const char *alg_name, u32 type,
- u32 mask)
+static inline bool crypto_tfm_is_async(struct crypto_tfm *tfm)
{
- return crypto_has_alg(alg_name, crypto_skcipher_type(type),
- crypto_skcipher_mask(mask));
+ return tfm->__crt_alg->cra_flags & CRYPTO_ALG_ASYNC;
}
-static inline struct ablkcipher_tfm *crypto_ablkcipher_crt(
- struct crypto_ablkcipher *tfm)
+static inline bool crypto_req_on_stack(struct crypto_async_request *req)
{
- return &crypto_ablkcipher_tfm(tfm)->crt_ablkcipher;
+ return req->flags & CRYPTO_TFM_REQ_ON_STACK;
}
-/**
- * crypto_ablkcipher_ivsize() - obtain IV size
- * @tfm: cipher handle
- *
- * The size of the IV for the ablkcipher referenced by the cipher handle is
- * returned. This IV size may be zero if the cipher does not need an IV.
- *
- * Return: IV size in bytes
- */
-static inline unsigned int crypto_ablkcipher_ivsize(
- struct crypto_ablkcipher *tfm)
+static inline void crypto_request_set_callback(
+ struct crypto_async_request *req, u32 flags,
+ crypto_completion_t compl, void *data)
{
- return crypto_ablkcipher_crt(tfm)->ivsize;
-}
+ u32 keep = CRYPTO_TFM_REQ_ON_STACK;
-/**
- * crypto_ablkcipher_blocksize() - obtain block size of cipher
- * @tfm: cipher handle
- *
- * The block size for the ablkcipher referenced with the cipher handle is
- * returned. The caller may use that information to allocate appropriate
- * memory for the data returned by the encryption or decryption operation
- *
- * Return: block size of cipher
- */
-static inline unsigned int crypto_ablkcipher_blocksize(
- struct crypto_ablkcipher *tfm)
-{
- return crypto_tfm_alg_blocksize(crypto_ablkcipher_tfm(tfm));
+ req->complete = compl;
+ req->data = data;
+ req->flags &= keep;
+ req->flags |= flags & ~keep;
}
-static inline unsigned int crypto_ablkcipher_alignmask(
- struct crypto_ablkcipher *tfm)
-{
- return crypto_tfm_alg_alignmask(crypto_ablkcipher_tfm(tfm));
-}
-
-static inline u32 crypto_ablkcipher_get_flags(struct crypto_ablkcipher *tfm)
-{
- return crypto_tfm_get_flags(crypto_ablkcipher_tfm(tfm));
-}
-
-static inline void crypto_ablkcipher_set_flags(struct crypto_ablkcipher *tfm,
- u32 flags)
-{
- crypto_tfm_set_flags(crypto_ablkcipher_tfm(tfm), flags);
-}
-
-static inline void crypto_ablkcipher_clear_flags(struct crypto_ablkcipher *tfm,
- u32 flags)
-{
- crypto_tfm_clear_flags(crypto_ablkcipher_tfm(tfm), flags);
-}
-
-/**
- * crypto_ablkcipher_setkey() - set key for cipher
- * @tfm: cipher handle
- * @key: buffer holding the key
- * @keylen: length of the key in bytes
- *
- * The caller provided key is set for the ablkcipher referenced by the cipher
- * handle.
- *
- * Note, the key length determines the cipher type. Many block ciphers implement
- * different cipher modes depending on the key size, such as AES-128 vs AES-192
- * vs. AES-256. When providing a 16 byte key for an AES cipher handle, AES-128
- * is performed.
- *
- * Return: 0 if the setting of the key was successful; < 0 if an error occurred
- */
-static inline int crypto_ablkcipher_setkey(struct crypto_ablkcipher *tfm,
- const u8 *key, unsigned int keylen)
-{
- struct ablkcipher_tfm *crt = crypto_ablkcipher_crt(tfm);
-
- return crt->setkey(crt->base, key, keylen);
-}
-
-/**
- * crypto_ablkcipher_reqtfm() - obtain cipher handle from request
- * @req: ablkcipher_request out of which the cipher handle is to be obtained
- *
- * Return the crypto_ablkcipher handle when furnishing an ablkcipher_request
- * data structure.
- *
- * Return: crypto_ablkcipher handle
- */
-static inline struct crypto_ablkcipher *crypto_ablkcipher_reqtfm(
- struct ablkcipher_request *req)
-{
- return __crypto_ablkcipher_cast(req->base.tfm);
-}
-
-/**
- * crypto_ablkcipher_encrypt() - encrypt plaintext
- * @req: reference to the ablkcipher_request handle that holds all information
- * needed to perform the cipher operation
- *
- * Encrypt plaintext data using the ablkcipher_request handle. That data
- * structure and how it is filled with data is discussed with the
- * ablkcipher_request_* functions.
- *
- * Return: 0 if the cipher operation was successful; < 0 if an error occurred
- */
-static inline int crypto_ablkcipher_encrypt(struct ablkcipher_request *req)
-{
- struct ablkcipher_tfm *crt =
- crypto_ablkcipher_crt(crypto_ablkcipher_reqtfm(req));
- return crt->encrypt(req);
-}
-
-/**
- * crypto_ablkcipher_decrypt() - decrypt ciphertext
- * @req: reference to the ablkcipher_request handle that holds all information
- * needed to perform the cipher operation
- *
- * Decrypt ciphertext data using the ablkcipher_request handle. That data
- * structure and how it is filled with data is discussed with the
- * ablkcipher_request_* functions.
- *
- * Return: 0 if the cipher operation was successful; < 0 if an error occurred
- */
-static inline int crypto_ablkcipher_decrypt(struct ablkcipher_request *req)
-{
- struct ablkcipher_tfm *crt =
- crypto_ablkcipher_crt(crypto_ablkcipher_reqtfm(req));
- return crt->decrypt(req);
-}
-
-/**
- * DOC: Asynchronous Cipher Request Handle
- *
- * The ablkcipher_request data structure contains all pointers to data
- * required for the asynchronous cipher operation. This includes the cipher
- * handle (which can be used by multiple ablkcipher_request instances), pointer
- * to plaintext and ciphertext, asynchronous callback function, etc. It acts
- * as a handle to the ablkcipher_request_* API calls in a similar way as
- * ablkcipher handle to the crypto_ablkcipher_* API calls.
- */
-
-/**
- * crypto_ablkcipher_reqsize() - obtain size of the request data structure
- * @tfm: cipher handle
- *
- * Return: number of bytes
- */
-static inline unsigned int crypto_ablkcipher_reqsize(
- struct crypto_ablkcipher *tfm)
-{
- return crypto_ablkcipher_crt(tfm)->reqsize;
-}
-
-/**
- * ablkcipher_request_set_tfm() - update cipher handle reference in request
- * @req: request handle to be modified
- * @tfm: cipher handle that shall be added to the request handle
- *
- * Allow the caller to replace the existing ablkcipher handle in the request
- * data structure with a different one.
- */
-static inline void ablkcipher_request_set_tfm(
- struct ablkcipher_request *req, struct crypto_ablkcipher *tfm)
-{
- req->base.tfm = crypto_ablkcipher_tfm(crypto_ablkcipher_crt(tfm)->base);
-}
-
-static inline struct ablkcipher_request *ablkcipher_request_cast(
- struct crypto_async_request *req)
-{
- return container_of(req, struct ablkcipher_request, base);
-}
-
-/**
- * ablkcipher_request_alloc() - allocate request data structure
- * @tfm: cipher handle to be registered with the request
- * @gfp: memory allocation flag that is handed to kmalloc by the API call.
- *
- * Allocate the request data structure that must be used with the ablkcipher
- * encrypt and decrypt API calls. During the allocation, the provided ablkcipher
- * handle is registered in the request data structure.
- *
- * Return: allocated request handle in case of success, or NULL if out of memory
- */
-static inline struct ablkcipher_request *ablkcipher_request_alloc(
- struct crypto_ablkcipher *tfm, gfp_t gfp)
-{
- struct ablkcipher_request *req;
-
- req = kmalloc(sizeof(struct ablkcipher_request) +
- crypto_ablkcipher_reqsize(tfm), gfp);
-
- if (likely(req))
- ablkcipher_request_set_tfm(req, tfm);
-
- return req;
-}
-
-/**
- * ablkcipher_request_free() - zeroize and free request data structure
- * @req: request data structure cipher handle to be freed
- */
-static inline void ablkcipher_request_free(struct ablkcipher_request *req)
-{
- kzfree(req);
-}
-
-/**
- * ablkcipher_request_set_callback() - set asynchronous callback function
- * @req: request handle
- * @flags: specify zero or an ORing of the flags
- * CRYPTO_TFM_REQ_MAY_BACKLOG the request queue may back log and
- * increase the wait queue beyond the initial maximum size;
- * CRYPTO_TFM_REQ_MAY_SLEEP the request processing may sleep
- * @compl: callback function pointer to be registered with the request handle
- * @data: The data pointer refers to memory that is not used by the kernel
- * crypto API, but provided to the callback function for it to use. Here,
- * the caller can provide a reference to memory the callback function can
- * operate on. As the callback function is invoked asynchronously to the
- * related functionality, it may need to access data structures of the
- * related functionality which can be referenced using this pointer. The
- * callback function can access the memory via the "data" field in the
- * crypto_async_request data structure provided to the callback function.
- *
- * This function allows setting the callback function that is triggered once the
- * cipher operation completes.
- *
- * The callback function is registered with the ablkcipher_request handle and
- * must comply with the following template::
- *
- * void callback_function(struct crypto_async_request *req, int error)
- */
-static inline void ablkcipher_request_set_callback(
- struct ablkcipher_request *req,
- u32 flags, crypto_completion_t compl, void *data)
-{
- req->base.complete = compl;
- req->base.data = data;
- req->base.flags = flags;
-}
-
-/**
- * ablkcipher_request_set_crypt() - set data buffers
- * @req: request handle
- * @src: source scatter / gather list
- * @dst: destination scatter / gather list
- * @nbytes: number of bytes to process from @src
- * @iv: IV for the cipher operation which must comply with the IV size defined
- * by crypto_ablkcipher_ivsize
- *
- * This function allows setting of the source data and destination data
- * scatter / gather lists.
- *
- * For encryption, the source is treated as the plaintext and the
- * destination is the ciphertext. For a decryption operation, the use is
- * reversed - the source is the ciphertext and the destination is the plaintext.
- */
-static inline void ablkcipher_request_set_crypt(
- struct ablkcipher_request *req,
- struct scatterlist *src, struct scatterlist *dst,
- unsigned int nbytes, void *iv)
+static inline void crypto_request_set_tfm(struct crypto_async_request *req,
+ struct crypto_tfm *tfm)
{
- req->src = src;
- req->dst = dst;
- req->nbytes = nbytes;
- req->info = iv;
+ req->tfm = tfm;
+ req->flags &= ~CRYPTO_TFM_REQ_ON_STACK;
}
-/**
- * DOC: Synchronous Block Cipher API
- *
- * The synchronous block cipher API is used with the ciphers of type
- * CRYPTO_ALG_TYPE_BLKCIPHER (listed as type "blkcipher" in /proc/crypto)
- *
- * Synchronous calls, have a context in the tfm. But since a single tfm can be
- * used in multiple calls and in parallel, this info should not be changeable
- * (unless a lock is used). This applies, for example, to the symmetric key.
- * However, the IV is changeable, so there is an iv field in blkcipher_tfm
- * structure for synchronous blkcipher api. So, its the only state info that can
- * be kept for synchronous calls without using a big lock across a tfm.
- *
- * The block cipher API allows the use of a complete cipher, i.e. a cipher
- * consisting of a template (a block chaining mode) and a single block cipher
- * primitive (e.g. AES).
- *
- * The plaintext data buffer and the ciphertext data buffer are pointed to
- * by using scatter/gather lists. The cipher operation is performed
- * on all segments of the provided scatter/gather lists.
- *
- * The kernel crypto API supports a cipher operation "in-place" which means that
- * the caller may provide the same scatter/gather list for the plaintext and
- * cipher text. After the completion of the cipher operation, the plaintext
- * data is replaced with the ciphertext data in case of an encryption and vice
- * versa for a decryption. The caller must ensure that the scatter/gather lists
- * for the output data point to sufficiently large buffers, i.e. multiples of
- * the block size of the cipher.
- */
-
-static inline struct crypto_blkcipher *__crypto_blkcipher_cast(
- struct crypto_tfm *tfm)
-{
- return (struct crypto_blkcipher *)tfm;
-}
-
-static inline struct crypto_blkcipher *crypto_blkcipher_cast(
- struct crypto_tfm *tfm)
-{
- BUG_ON(crypto_tfm_alg_type(tfm) != CRYPTO_ALG_TYPE_BLKCIPHER);
- return __crypto_blkcipher_cast(tfm);
-}
-
-/**
- * crypto_alloc_blkcipher() - allocate synchronous block cipher handle
- * @alg_name: is the cra_name / name or cra_driver_name / driver name of the
- * blkcipher cipher
- * @type: specifies the type of the cipher
- * @mask: specifies the mask for the cipher
- *
- * Allocate a cipher handle for a block cipher. The returned struct
- * crypto_blkcipher is the cipher handle that is required for any subsequent
- * API invocation for that block cipher.
- *
- * Return: allocated cipher handle in case of success; IS_ERR() is true in case
- * of an error, PTR_ERR() returns the error code.
- */
-static inline struct crypto_blkcipher *crypto_alloc_blkcipher(
- const char *alg_name, u32 type, u32 mask)
-{
- type &= ~CRYPTO_ALG_TYPE_MASK;
- type |= CRYPTO_ALG_TYPE_BLKCIPHER;
- mask |= CRYPTO_ALG_TYPE_MASK;
-
- return __crypto_blkcipher_cast(crypto_alloc_base(alg_name, type, mask));
-}
-
-static inline struct crypto_tfm *crypto_blkcipher_tfm(
- struct crypto_blkcipher *tfm)
-{
- return &tfm->base;
-}
-
-/**
- * crypto_free_blkcipher() - zeroize and free the block cipher handle
- * @tfm: cipher handle to be freed
- */
-static inline void crypto_free_blkcipher(struct crypto_blkcipher *tfm)
-{
- crypto_free_tfm(crypto_blkcipher_tfm(tfm));
-}
-
-/**
- * crypto_has_blkcipher() - Search for the availability of a block cipher
- * @alg_name: is the cra_name / name or cra_driver_name / driver name of the
- * block cipher
- * @type: specifies the type of the cipher
- * @mask: specifies the mask for the cipher
- *
- * Return: true when the block cipher is known to the kernel crypto API; false
- * otherwise
- */
-static inline int crypto_has_blkcipher(const char *alg_name, u32 type, u32 mask)
-{
- type &= ~CRYPTO_ALG_TYPE_MASK;
- type |= CRYPTO_ALG_TYPE_BLKCIPHER;
- mask |= CRYPTO_ALG_TYPE_MASK;
-
- return crypto_has_alg(alg_name, type, mask);
-}
-
-/**
- * crypto_blkcipher_name() - return the name / cra_name from the cipher handle
- * @tfm: cipher handle
- *
- * Return: The character string holding the name of the cipher
- */
-static inline const char *crypto_blkcipher_name(struct crypto_blkcipher *tfm)
-{
- return crypto_tfm_alg_name(crypto_blkcipher_tfm(tfm));
-}
-
-static inline struct blkcipher_tfm *crypto_blkcipher_crt(
- struct crypto_blkcipher *tfm)
-{
- return &crypto_blkcipher_tfm(tfm)->crt_blkcipher;
-}
-
-static inline struct blkcipher_alg *crypto_blkcipher_alg(
- struct crypto_blkcipher *tfm)
-{
- return &crypto_blkcipher_tfm(tfm)->__crt_alg->cra_blkcipher;
-}
-
-/**
- * crypto_blkcipher_ivsize() - obtain IV size
- * @tfm: cipher handle
- *
- * The size of the IV for the block cipher referenced by the cipher handle is
- * returned. This IV size may be zero if the cipher does not need an IV.
- *
- * Return: IV size in bytes
- */
-static inline unsigned int crypto_blkcipher_ivsize(struct crypto_blkcipher *tfm)
-{
- return crypto_blkcipher_alg(tfm)->ivsize;
-}
-
-/**
- * crypto_blkcipher_blocksize() - obtain block size of cipher
- * @tfm: cipher handle
- *
- * The block size for the block cipher referenced with the cipher handle is
- * returned. The caller may use that information to allocate appropriate
- * memory for the data returned by the encryption or decryption operation.
- *
- * Return: block size of cipher
- */
-static inline unsigned int crypto_blkcipher_blocksize(
- struct crypto_blkcipher *tfm)
-{
- return crypto_tfm_alg_blocksize(crypto_blkcipher_tfm(tfm));
-}
-
-static inline unsigned int crypto_blkcipher_alignmask(
- struct crypto_blkcipher *tfm)
-{
- return crypto_tfm_alg_alignmask(crypto_blkcipher_tfm(tfm));
-}
-
-static inline u32 crypto_blkcipher_get_flags(struct crypto_blkcipher *tfm)
-{
- return crypto_tfm_get_flags(crypto_blkcipher_tfm(tfm));
-}
-
-static inline void crypto_blkcipher_set_flags(struct crypto_blkcipher *tfm,
- u32 flags)
-{
- crypto_tfm_set_flags(crypto_blkcipher_tfm(tfm), flags);
-}
-
-static inline void crypto_blkcipher_clear_flags(struct crypto_blkcipher *tfm,
- u32 flags)
-{
- crypto_tfm_clear_flags(crypto_blkcipher_tfm(tfm), flags);
-}
-
-/**
- * crypto_blkcipher_setkey() - set key for cipher
- * @tfm: cipher handle
- * @key: buffer holding the key
- * @keylen: length of the key in bytes
- *
- * The caller provided key is set for the block cipher referenced by the cipher
- * handle.
- *
- * Note, the key length determines the cipher type. Many block ciphers implement
- * different cipher modes depending on the key size, such as AES-128 vs AES-192
- * vs. AES-256. When providing a 16 byte key for an AES cipher handle, AES-128
- * is performed.
- *
- * Return: 0 if the setting of the key was successful; < 0 if an error occurred
- */
-static inline int crypto_blkcipher_setkey(struct crypto_blkcipher *tfm,
- const u8 *key, unsigned int keylen)
-{
- return crypto_blkcipher_crt(tfm)->setkey(crypto_blkcipher_tfm(tfm),
- key, keylen);
-}
-
-/**
- * crypto_blkcipher_encrypt() - encrypt plaintext
- * @desc: reference to the block cipher handle with meta data
- * @dst: scatter/gather list that is filled by the cipher operation with the
- * ciphertext
- * @src: scatter/gather list that holds the plaintext
- * @nbytes: number of bytes of the plaintext to encrypt.
- *
- * Encrypt plaintext data using the IV set by the caller with a preceding
- * call of crypto_blkcipher_set_iv.
- *
- * The blkcipher_desc data structure must be filled by the caller and can
- * reside on the stack. The caller must fill desc as follows: desc.tfm is filled
- * with the block cipher handle; desc.flags is filled with either
- * CRYPTO_TFM_REQ_MAY_SLEEP or 0.
- *
- * Return: 0 if the cipher operation was successful; < 0 if an error occurred
- */
-static inline int crypto_blkcipher_encrypt(struct blkcipher_desc *desc,
- struct scatterlist *dst,
- struct scatterlist *src,
- unsigned int nbytes)
-{
- desc->info = crypto_blkcipher_crt(desc->tfm)->iv;
- return crypto_blkcipher_crt(desc->tfm)->encrypt(desc, dst, src, nbytes);
-}
-
-/**
- * crypto_blkcipher_encrypt_iv() - encrypt plaintext with dedicated IV
- * @desc: reference to the block cipher handle with meta data
- * @dst: scatter/gather list that is filled by the cipher operation with the
- * ciphertext
- * @src: scatter/gather list that holds the plaintext
- * @nbytes: number of bytes of the plaintext to encrypt.
- *
- * Encrypt plaintext data with the use of an IV that is solely used for this
- * cipher operation. Any previously set IV is not used.
- *
- * The blkcipher_desc data structure must be filled by the caller and can
- * reside on the stack. The caller must fill desc as follows: desc.tfm is filled
- * with the block cipher handle; desc.info is filled with the IV to be used for
- * the current operation; desc.flags is filled with either
- * CRYPTO_TFM_REQ_MAY_SLEEP or 0.
- *
- * Return: 0 if the cipher operation was successful; < 0 if an error occurred
- */
-static inline int crypto_blkcipher_encrypt_iv(struct blkcipher_desc *desc,
- struct scatterlist *dst,
- struct scatterlist *src,
- unsigned int nbytes)
-{
- return crypto_blkcipher_crt(desc->tfm)->encrypt(desc, dst, src, nbytes);
-}
-
-/**
- * crypto_blkcipher_decrypt() - decrypt ciphertext
- * @desc: reference to the block cipher handle with meta data
- * @dst: scatter/gather list that is filled by the cipher operation with the
- * plaintext
- * @src: scatter/gather list that holds the ciphertext
- * @nbytes: number of bytes of the ciphertext to decrypt.
- *
- * Decrypt ciphertext data using the IV set by the caller with a preceding
- * call of crypto_blkcipher_set_iv.
- *
- * The blkcipher_desc data structure must be filled by the caller as documented
- * for the crypto_blkcipher_encrypt call above.
- *
- * Return: 0 if the cipher operation was successful; < 0 if an error occurred
- *
- */
-static inline int crypto_blkcipher_decrypt(struct blkcipher_desc *desc,
- struct scatterlist *dst,
- struct scatterlist *src,
- unsigned int nbytes)
-{
- desc->info = crypto_blkcipher_crt(desc->tfm)->iv;
- return crypto_blkcipher_crt(desc->tfm)->decrypt(desc, dst, src, nbytes);
-}
-
-/**
- * crypto_blkcipher_decrypt_iv() - decrypt ciphertext with dedicated IV
- * @desc: reference to the block cipher handle with meta data
- * @dst: scatter/gather list that is filled by the cipher operation with the
- * plaintext
- * @src: scatter/gather list that holds the ciphertext
- * @nbytes: number of bytes of the ciphertext to decrypt.
- *
- * Decrypt ciphertext data with the use of an IV that is solely used for this
- * cipher operation. Any previously set IV is not used.
- *
- * The blkcipher_desc data structure must be filled by the caller as documented
- * for the crypto_blkcipher_encrypt_iv call above.
- *
- * Return: 0 if the cipher operation was successful; < 0 if an error occurred
- */
-static inline int crypto_blkcipher_decrypt_iv(struct blkcipher_desc *desc,
- struct scatterlist *dst,
- struct scatterlist *src,
- unsigned int nbytes)
-{
- return crypto_blkcipher_crt(desc->tfm)->decrypt(desc, dst, src, nbytes);
-}
-
-/**
- * crypto_blkcipher_set_iv() - set IV for cipher
- * @tfm: cipher handle
- * @src: buffer holding the IV
- * @len: length of the IV in bytes
- *
- * The caller provided IV is set for the block cipher referenced by the cipher
- * handle.
- */
-static inline void crypto_blkcipher_set_iv(struct crypto_blkcipher *tfm,
- const u8 *src, unsigned int len)
-{
- memcpy(crypto_blkcipher_crt(tfm)->iv, src, len);
-}
-
-/**
- * crypto_blkcipher_get_iv() - obtain IV from cipher
- * @tfm: cipher handle
- * @dst: buffer filled with the IV
- * @len: length of the buffer dst
- *
- * The caller can obtain the IV set for the block cipher referenced by the
- * cipher handle and store it into the user-provided buffer. If the buffer
- * has an insufficient space, the IV is truncated to fit the buffer.
- */
-static inline void crypto_blkcipher_get_iv(struct crypto_blkcipher *tfm,
- u8 *dst, unsigned int len)
-{
- memcpy(dst, crypto_blkcipher_crt(tfm)->iv, len);
-}
-
-/**
- * DOC: Single Block Cipher API
- *
- * The single block cipher API is used with the ciphers of type
- * CRYPTO_ALG_TYPE_CIPHER (listed as type "cipher" in /proc/crypto).
- *
- * Using the single block cipher API calls, operations with the basic cipher
- * primitive can be implemented. These cipher primitives exclude any block
- * chaining operations including IV handling.
- *
- * The purpose of this single block cipher API is to support the implementation
- * of templates or other concepts that only need to perform the cipher operation
- * on one block at a time. Templates invoke the underlying cipher primitive
- * block-wise and process either the input or the output data of these cipher
- * operations.
- */
-
-static inline struct crypto_cipher *__crypto_cipher_cast(struct crypto_tfm *tfm)
-{
- return (struct crypto_cipher *)tfm;
-}
-
-static inline struct crypto_cipher *crypto_cipher_cast(struct crypto_tfm *tfm)
-{
- BUG_ON(crypto_tfm_alg_type(tfm) != CRYPTO_ALG_TYPE_CIPHER);
- return __crypto_cipher_cast(tfm);
-}
-
-/**
- * crypto_alloc_cipher() - allocate single block cipher handle
- * @alg_name: is the cra_name / name or cra_driver_name / driver name of the
- * single block cipher
- * @type: specifies the type of the cipher
- * @mask: specifies the mask for the cipher
- *
- * Allocate a cipher handle for a single block cipher. The returned struct
- * crypto_cipher is the cipher handle that is required for any subsequent API
- * invocation for that single block cipher.
- *
- * Return: allocated cipher handle in case of success; IS_ERR() is true in case
- * of an error, PTR_ERR() returns the error code.
- */
-static inline struct crypto_cipher *crypto_alloc_cipher(const char *alg_name,
- u32 type, u32 mask)
-{
- type &= ~CRYPTO_ALG_TYPE_MASK;
- type |= CRYPTO_ALG_TYPE_CIPHER;
- mask |= CRYPTO_ALG_TYPE_MASK;
-
- return __crypto_cipher_cast(crypto_alloc_base(alg_name, type, mask));
-}
-
-static inline struct crypto_tfm *crypto_cipher_tfm(struct crypto_cipher *tfm)
-{
- return &tfm->base;
-}
-
-/**
- * crypto_free_cipher() - zeroize and free the single block cipher handle
- * @tfm: cipher handle to be freed
- */
-static inline void crypto_free_cipher(struct crypto_cipher *tfm)
-{
- crypto_free_tfm(crypto_cipher_tfm(tfm));
-}
-
-/**
- * crypto_has_cipher() - Search for the availability of a single block cipher
- * @alg_name: is the cra_name / name or cra_driver_name / driver name of the
- * single block cipher
- * @type: specifies the type of the cipher
- * @mask: specifies the mask for the cipher
- *
- * Return: true when the single block cipher is known to the kernel crypto API;
- * false otherwise
- */
-static inline int crypto_has_cipher(const char *alg_name, u32 type, u32 mask)
-{
- type &= ~CRYPTO_ALG_TYPE_MASK;
- type |= CRYPTO_ALG_TYPE_CIPHER;
- mask |= CRYPTO_ALG_TYPE_MASK;
-
- return crypto_has_alg(alg_name, type, mask);
-}
-
-static inline struct cipher_tfm *crypto_cipher_crt(struct crypto_cipher *tfm)
-{
- return &crypto_cipher_tfm(tfm)->crt_cipher;
-}
-
-/**
- * crypto_cipher_blocksize() - obtain block size for cipher
- * @tfm: cipher handle
- *
- * The block size for the single block cipher referenced with the cipher handle
- * tfm is returned. The caller may use that information to allocate appropriate
- * memory for the data returned by the encryption or decryption operation
- *
- * Return: block size of cipher
- */
-static inline unsigned int crypto_cipher_blocksize(struct crypto_cipher *tfm)
-{
- return crypto_tfm_alg_blocksize(crypto_cipher_tfm(tfm));
-}
-
-static inline unsigned int crypto_cipher_alignmask(struct crypto_cipher *tfm)
-{
- return crypto_tfm_alg_alignmask(crypto_cipher_tfm(tfm));
-}
-
-static inline u32 crypto_cipher_get_flags(struct crypto_cipher *tfm)
-{
- return crypto_tfm_get_flags(crypto_cipher_tfm(tfm));
-}
-
-static inline void crypto_cipher_set_flags(struct crypto_cipher *tfm,
- u32 flags)
-{
- crypto_tfm_set_flags(crypto_cipher_tfm(tfm), flags);
-}
-
-static inline void crypto_cipher_clear_flags(struct crypto_cipher *tfm,
- u32 flags)
-{
- crypto_tfm_clear_flags(crypto_cipher_tfm(tfm), flags);
-}
-
-/**
- * crypto_cipher_setkey() - set key for cipher
- * @tfm: cipher handle
- * @key: buffer holding the key
- * @keylen: length of the key in bytes
- *
- * The caller provided key is set for the single block cipher referenced by the
- * cipher handle.
- *
- * Note, the key length determines the cipher type. Many block ciphers implement
- * different cipher modes depending on the key size, such as AES-128 vs AES-192
- * vs. AES-256. When providing a 16 byte key for an AES cipher handle, AES-128
- * is performed.
- *
- * Return: 0 if the setting of the key was successful; < 0 if an error occurred
- */
-static inline int crypto_cipher_setkey(struct crypto_cipher *tfm,
- const u8 *key, unsigned int keylen)
-{
- return crypto_cipher_crt(tfm)->cit_setkey(crypto_cipher_tfm(tfm),
- key, keylen);
-}
-
-/**
- * crypto_cipher_encrypt_one() - encrypt one block of plaintext
- * @tfm: cipher handle
- * @dst: points to the buffer that will be filled with the ciphertext
- * @src: buffer holding the plaintext to be encrypted
- *
- * Invoke the encryption operation of one block. The caller must ensure that
- * the plaintext and ciphertext buffers are at least one block in size.
- */
-static inline void crypto_cipher_encrypt_one(struct crypto_cipher *tfm,
- u8 *dst, const u8 *src)
-{
- crypto_cipher_crt(tfm)->cit_encrypt_one(crypto_cipher_tfm(tfm),
- dst, src);
-}
-
-/**
- * crypto_cipher_decrypt_one() - decrypt one block of ciphertext
- * @tfm: cipher handle
- * @dst: points to the buffer that will be filled with the plaintext
- * @src: buffer holding the ciphertext to be decrypted
- *
- * Invoke the decryption operation of one block. The caller must ensure that
- * the plaintext and ciphertext buffers are at least one block in size.
- */
-static inline void crypto_cipher_decrypt_one(struct crypto_cipher *tfm,
- u8 *dst, const u8 *src)
-{
- crypto_cipher_crt(tfm)->cit_decrypt_one(crypto_cipher_tfm(tfm),
- dst, src);
-}
-
-static inline struct crypto_comp *__crypto_comp_cast(struct crypto_tfm *tfm)
-{
- return (struct crypto_comp *)tfm;
-}
-
-static inline struct crypto_comp *crypto_comp_cast(struct crypto_tfm *tfm)
-{
- BUG_ON((crypto_tfm_alg_type(tfm) ^ CRYPTO_ALG_TYPE_COMPRESS) &
- CRYPTO_ALG_TYPE_MASK);
- return __crypto_comp_cast(tfm);
-}
-
-static inline struct crypto_comp *crypto_alloc_comp(const char *alg_name,
- u32 type, u32 mask)
-{
- type &= ~CRYPTO_ALG_TYPE_MASK;
- type |= CRYPTO_ALG_TYPE_COMPRESS;
- mask |= CRYPTO_ALG_TYPE_MASK;
-
- return __crypto_comp_cast(crypto_alloc_base(alg_name, type, mask));
-}
-
-static inline struct crypto_tfm *crypto_comp_tfm(struct crypto_comp *tfm)
-{
- return &tfm->base;
-}
-
-static inline void crypto_free_comp(struct crypto_comp *tfm)
-{
- crypto_free_tfm(crypto_comp_tfm(tfm));
-}
-
-static inline int crypto_has_comp(const char *alg_name, u32 type, u32 mask)
-{
- type &= ~CRYPTO_ALG_TYPE_MASK;
- type |= CRYPTO_ALG_TYPE_COMPRESS;
- mask |= CRYPTO_ALG_TYPE_MASK;
-
- return crypto_has_alg(alg_name, type, mask);
-}
-
-static inline const char *crypto_comp_name(struct crypto_comp *tfm)
-{
- return crypto_tfm_alg_name(crypto_comp_tfm(tfm));
-}
-
-static inline struct compress_tfm *crypto_comp_crt(struct crypto_comp *tfm)
-{
- return &crypto_comp_tfm(tfm)->crt_compress;
-}
-
-static inline int crypto_comp_compress(struct crypto_comp *tfm,
- const u8 *src, unsigned int slen,
- u8 *dst, unsigned int *dlen)
-{
- return crypto_comp_crt(tfm)->cot_compress(crypto_comp_tfm(tfm),
- src, slen, dst, dlen);
-}
+struct crypto_async_request *crypto_request_clone(
+ struct crypto_async_request *req, size_t total, gfp_t gfp);
-static inline int crypto_comp_decompress(struct crypto_comp *tfm,
- const u8 *src, unsigned int slen,
- u8 *dst, unsigned int *dlen)
+static inline void crypto_stack_request_init(struct crypto_async_request *req,
+ struct crypto_tfm *tfm)
{
- return crypto_comp_crt(tfm)->cot_decompress(crypto_comp_tfm(tfm),
- src, slen, dst, dlen);
+ req->flags = 0;
+ crypto_request_set_tfm(req, tfm);
+ req->flags |= CRYPTO_TFM_REQ_ON_STACK;
}
#endif /* _LINUX_CRYPTO_H */
diff --git a/include/linux/cryptohash.h b/include/linux/cryptohash.h
deleted file mode 100644
index df4d3e943d28..000000000000
--- a/include/linux/cryptohash.h
+++ /dev/null
@@ -1,13 +0,0 @@
-#ifndef __CRYPTOHASH_H
-#define __CRYPTOHASH_H
-
-#include <uapi/linux/types.h>
-
-#define SHA_DIGEST_WORDS 5
-#define SHA_MESSAGE_BYTES (512 /*bits*/ / 8)
-#define SHA_WORKSPACE_WORDS 16
-
-void sha_init(__u32 *buf);
-void sha_transform(__u32 *digest, const char *data, __u32 *W);
-
-#endif
diff --git a/include/linux/cs5535.h b/include/linux/cs5535.h
index cfe83239d7f0..2be1120174eb 100644
--- a/include/linux/cs5535.h
+++ b/include/linux/cs5535.h
@@ -1,11 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* AMD CS5535/CS5536 definitions
* Copyright (C) 2006 Advanced Micro Devices, Inc.
* Copyright (C) 2009 Andres Salomon <dilinger@collabora.co.uk>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of version 2 of the GNU General Public License
- * as published by the Free Software Foundation.
*/
#ifndef _CS5535_H
diff --git a/include/linux/ctype.h b/include/linux/ctype.h
index f13e4ff6835a..bc95aef2219c 100644
--- a/include/linux/ctype.h
+++ b/include/linux/ctype.h
@@ -1,6 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_CTYPE_H
#define _LINUX_CTYPE_H
+#include <linux/compiler.h>
+
/*
* NOTE! This ctype does not handle EOF like the standard C
* library is required to.
@@ -22,10 +25,6 @@ extern const unsigned char _ctype[];
#define isalnum(c) ((__ismask(c)&(_U|_L|_D)) != 0)
#define isalpha(c) ((__ismask(c)&(_U|_L)) != 0)
#define iscntrl(c) ((__ismask(c)&(_C)) != 0)
-static inline int isdigit(int c)
-{
- return '0' <= c && c <= '9';
-}
#define isgraph(c) ((__ismask(c)&(_P|_U|_L|_D)) != 0)
#define islower(c) ((__ismask(c)&(_L)) != 0)
#define isprint(c) ((__ismask(c)&(_P|_U|_L|_D|_SP)) != 0)
@@ -38,6 +37,15 @@ static inline int isdigit(int c)
#define isascii(c) (((unsigned char)(c))<=0x7f)
#define toascii(c) (((unsigned char)(c))&0x7f)
+#if __has_builtin(__builtin_isdigit)
+#define isdigit(c) __builtin_isdigit(c)
+#else
+static inline int isdigit(int c)
+{
+ return '0' <= c && c <= '9';
+}
+#endif
+
static inline unsigned char __tolower(unsigned char c)
{
if (isupper(c))
diff --git a/include/linux/cuda.h b/include/linux/cuda.h
index b72332823807..daf3e6f98444 100644
--- a/include/linux/cuda.h
+++ b/include/linux/cuda.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Definitions for talking to the CUDA. The CUDA is a microcontroller
* which controls the ADB, system power, RTC, and various other things.
@@ -7,12 +8,16 @@
#ifndef _LINUX_CUDA_H
#define _LINUX_CUDA_H
+#include <linux/rtc.h>
#include <uapi/linux/cuda.h>
-extern int find_via_cuda(void);
+extern int __init find_via_cuda(void);
extern int cuda_request(struct adb_request *req,
void (*done)(struct adb_request *), int nbytes, ...);
extern void cuda_poll(void);
+extern time64_t cuda_get_time(void);
+extern int cuda_set_rtc_time(struct rtc_time *tm);
+
#endif /* _LINUX_CUDA_H */
diff --git a/include/linux/cyclades.h b/include/linux/cyclades.h
deleted file mode 100644
index 19ae518f5471..000000000000
--- a/include/linux/cyclades.h
+++ /dev/null
@@ -1,360 +0,0 @@
-/* $Revision: 3.0 $$Date: 1998/11/02 14:20:59 $
- * linux/include/linux/cyclades.h
- *
- * This file was initially written by
- * Randolph Bentson <bentson@grieg.seaslug.org> and is maintained by
- * Ivan Passos <ivan@cyclades.com>.
- *
- * This file contains the general definitions for the cyclades.c driver
- *$Log: cyclades.h,v $
- *Revision 3.1 2002/01/29 11:36:16 henrique
- *added throttle field on struct cyclades_port to indicate whether the
- *port is throttled or not
- *
- *Revision 3.1 2000/04/19 18:52:52 ivan
- *converted address fields to unsigned long and added fields for physical
- *addresses on cyclades_card structure;
- *
- *Revision 3.0 1998/11/02 14:20:59 ivan
- *added nports field on cyclades_card structure;
- *
- *Revision 2.5 1998/08/03 16:57:01 ivan
- *added cyclades_idle_stats structure;
- *
- *Revision 2.4 1998/06/01 12:09:53 ivan
- *removed closing_wait2 from cyclades_port structure;
- *
- *Revision 2.3 1998/03/16 18:01:12 ivan
- *changes in the cyclades_port structure to get it closer to the
- *standard serial port structure;
- *added constants for new ioctls;
- *
- *Revision 2.2 1998/02/17 16:50:00 ivan
- *changes in the cyclades_port structure (addition of shutdown_wait and
- *chip_rev variables);
- *added constants for new ioctls and for CD1400 rev. numbers.
- *
- *Revision 2.1 1997/10/24 16:03:00 ivan
- *added rflow (which allows enabling the CD1400 special flow control
- *feature) and rtsdtr_inv (which allows DTR/RTS pin inversion) to
- *cyclades_port structure;
- *added Alpha support
- *
- *Revision 2.0 1997/06/30 10:30:00 ivan
- *added some new doorbell command constants related to IOCTLW and
- *UART error signaling
- *
- *Revision 1.8 1997/06/03 15:30:00 ivan
- *added constant ZFIRM_HLT
- *added constant CyPCI_Ze_win ( = 2 * Cy_PCI_Zwin)
- *
- *Revision 1.7 1997/03/26 10:30:00 daniel
- *new entries at the end of cyclades_port struct to reallocate
- *variables illegally allocated within card memory.
- *
- *Revision 1.6 1996/09/09 18:35:30 bentson
- *fold in changes for Cyclom-Z -- including structures for
- *communicating with board as well modest changes to original
- *structures to support new features.
- *
- *Revision 1.5 1995/11/13 21:13:31 bentson
- *changes suggested by Michael Chastain <mec@duracef.shout.net>
- *to support use of this file in non-kernel applications
- *
- *
- */
-#ifndef _LINUX_CYCLADES_H
-#define _LINUX_CYCLADES_H
-
-#include <uapi/linux/cyclades.h>
-
-
-/* Per card data structure */
-struct cyclades_card {
- void __iomem *base_addr;
- union {
- void __iomem *p9050;
- struct RUNTIME_9060 __iomem *p9060;
- } ctl_addr;
- struct BOARD_CTRL __iomem *board_ctrl; /* cyz specific */
- int irq;
- unsigned int num_chips; /* 0 if card absent, -1 if Z/PCI, else Y */
- unsigned int first_line; /* minor number of first channel on card */
- unsigned int nports; /* Number of ports in the card */
- int bus_index; /* address shift - 0 for ISA, 1 for PCI */
- int intr_enabled; /* FW Interrupt flag - 0 disabled, 1 enabled */
- u32 hw_ver;
- spinlock_t card_lock;
- struct cyclades_port *ports;
-};
-
-/***************************************
- * Memory access functions/macros *
- * (required to support Alpha systems) *
- ***************************************/
-
-#define cy_writeb(port,val) do { writeb((val), (port)); mb(); } while (0)
-#define cy_writew(port,val) do { writew((val), (port)); mb(); } while (0)
-#define cy_writel(port,val) do { writel((val), (port)); mb(); } while (0)
-
-/*
- * Statistics counters
- */
-struct cyclades_icount {
- __u32 cts, dsr, rng, dcd, tx, rx;
- __u32 frame, parity, overrun, brk;
- __u32 buf_overrun;
-};
-
-/*
- * This is our internal structure for each serial port's state.
- *
- * Many fields are paralleled by the structure used by the serial_struct
- * structure.
- *
- * For definitions of the flags field, see tty.h
- */
-
-struct cyclades_port {
- int magic;
- struct tty_port port;
- struct cyclades_card *card;
- union {
- struct {
- void __iomem *base_addr;
- } cyy;
- struct {
- struct CH_CTRL __iomem *ch_ctrl;
- struct BUF_CTRL __iomem *buf_ctrl;
- } cyz;
- } u;
- int line;
- int flags; /* defined in tty.h */
- int type; /* UART type */
- int read_status_mask;
- int ignore_status_mask;
- int timeout;
- int xmit_fifo_size;
- int cor1,cor2,cor3,cor4,cor5;
- int tbpr,tco,rbpr,rco;
- int baud;
- int rflow;
- int rtsdtr_inv;
- int chip_rev;
- int custom_divisor;
- u8 x_char; /* to be pushed out ASAP */
- int breakon;
- int breakoff;
- int xmit_head;
- int xmit_tail;
- int xmit_cnt;
- int default_threshold;
- int default_timeout;
- unsigned long rflush_count;
- struct cyclades_monitor mon;
- struct cyclades_idle_stats idle_stats;
- struct cyclades_icount icount;
- struct completion shutdown_wait;
- int throttle;
-};
-
-#define CLOSING_WAIT_DELAY 30*HZ
-#define CY_CLOSING_WAIT_NONE ASYNC_CLOSING_WAIT_NONE
-#define CY_CLOSING_WAIT_INF ASYNC_CLOSING_WAIT_INF
-
-
-#define CyMAX_CHIPS_PER_CARD 8
-#define CyMAX_CHAR_FIFO 12
-#define CyPORTS_PER_CHIP 4
-#define CD1400_MAX_SPEED 115200
-
-#define CyISA_Ywin 0x2000
-
-#define CyPCI_Ywin 0x4000
-#define CyPCI_Yctl 0x80
-#define CyPCI_Zctl CTRL_WINDOW_SIZE
-#define CyPCI_Zwin 0x80000
-#define CyPCI_Ze_win (2 * CyPCI_Zwin)
-
-#define PCI_DEVICE_ID_MASK 0x06
-
-/**** CD1400 registers ****/
-
-#define CD1400_REV_G 0x46
-#define CD1400_REV_J 0x48
-
-#define CyRegSize 0x0400
-#define Cy_HwReset 0x1400
-#define Cy_ClrIntr 0x1800
-#define Cy_EpldRev 0x1e00
-
-/* Global Registers */
-
-#define CyGFRCR (0x40*2)
-#define CyRevE (44)
-#define CyCAR (0x68*2)
-#define CyCHAN_0 (0x00)
-#define CyCHAN_1 (0x01)
-#define CyCHAN_2 (0x02)
-#define CyCHAN_3 (0x03)
-#define CyGCR (0x4B*2)
-#define CyCH0_SERIAL (0x00)
-#define CyCH0_PARALLEL (0x80)
-#define CySVRR (0x67*2)
-#define CySRModem (0x04)
-#define CySRTransmit (0x02)
-#define CySRReceive (0x01)
-#define CyRICR (0x44*2)
-#define CyTICR (0x45*2)
-#define CyMICR (0x46*2)
-#define CyICR0 (0x00)
-#define CyICR1 (0x01)
-#define CyICR2 (0x02)
-#define CyICR3 (0x03)
-#define CyRIR (0x6B*2)
-#define CyTIR (0x6A*2)
-#define CyMIR (0x69*2)
-#define CyIRDirEq (0x80)
-#define CyIRBusy (0x40)
-#define CyIRUnfair (0x20)
-#define CyIRContext (0x1C)
-#define CyIRChannel (0x03)
-#define CyPPR (0x7E*2)
-#define CyCLOCK_20_1MS (0x27)
-#define CyCLOCK_25_1MS (0x31)
-#define CyCLOCK_25_5MS (0xf4)
-#define CyCLOCK_60_1MS (0x75)
-#define CyCLOCK_60_2MS (0xea)
-
-/* Virtual Registers */
-
-#define CyRIVR (0x43*2)
-#define CyTIVR (0x42*2)
-#define CyMIVR (0x41*2)
-#define CyIVRMask (0x07)
-#define CyIVRRxEx (0x07)
-#define CyIVRRxOK (0x03)
-#define CyIVRTxOK (0x02)
-#define CyIVRMdmOK (0x01)
-#define CyTDR (0x63*2)
-#define CyRDSR (0x62*2)
-#define CyTIMEOUT (0x80)
-#define CySPECHAR (0x70)
-#define CyBREAK (0x08)
-#define CyPARITY (0x04)
-#define CyFRAME (0x02)
-#define CyOVERRUN (0x01)
-#define CyMISR (0x4C*2)
-/* see CyMCOR_ and CyMSVR_ for bits*/
-#define CyEOSRR (0x60*2)
-
-/* Channel Registers */
-
-#define CyLIVR (0x18*2)
-#define CyMscsr (0x01)
-#define CyTdsr (0x02)
-#define CyRgdsr (0x03)
-#define CyRedsr (0x07)
-#define CyCCR (0x05*2)
-/* Format 1 */
-#define CyCHAN_RESET (0x80)
-#define CyCHIP_RESET (0x81)
-#define CyFlushTransFIFO (0x82)
-/* Format 2 */
-#define CyCOR_CHANGE (0x40)
-#define CyCOR1ch (0x02)
-#define CyCOR2ch (0x04)
-#define CyCOR3ch (0x08)
-/* Format 3 */
-#define CySEND_SPEC_1 (0x21)
-#define CySEND_SPEC_2 (0x22)
-#define CySEND_SPEC_3 (0x23)
-#define CySEND_SPEC_4 (0x24)
-/* Format 4 */
-#define CyCHAN_CTL (0x10)
-#define CyDIS_RCVR (0x01)
-#define CyENB_RCVR (0x02)
-#define CyDIS_XMTR (0x04)
-#define CyENB_XMTR (0x08)
-#define CySRER (0x06*2)
-#define CyMdmCh (0x80)
-#define CyRxData (0x10)
-#define CyTxRdy (0x04)
-#define CyTxMpty (0x02)
-#define CyNNDT (0x01)
-#define CyCOR1 (0x08*2)
-#define CyPARITY_NONE (0x00)
-#define CyPARITY_0 (0x20)
-#define CyPARITY_1 (0xA0)
-#define CyPARITY_E (0x40)
-#define CyPARITY_O (0xC0)
-#define Cy_1_STOP (0x00)
-#define Cy_1_5_STOP (0x04)
-#define Cy_2_STOP (0x08)
-#define Cy_5_BITS (0x00)
-#define Cy_6_BITS (0x01)
-#define Cy_7_BITS (0x02)
-#define Cy_8_BITS (0x03)
-#define CyCOR2 (0x09*2)
-#define CyIXM (0x80)
-#define CyTxIBE (0x40)
-#define CyETC (0x20)
-#define CyAUTO_TXFL (0x60)
-#define CyLLM (0x10)
-#define CyRLM (0x08)
-#define CyRtsAO (0x04)
-#define CyCtsAE (0x02)
-#define CyDsrAE (0x01)
-#define CyCOR3 (0x0A*2)
-#define CySPL_CH_DRANGE (0x80) /* special character detect range */
-#define CySPL_CH_DET1 (0x40) /* enable special character detection
- on SCHR4-SCHR3 */
-#define CyFL_CTRL_TRNSP (0x20) /* Flow Control Transparency */
-#define CySPL_CH_DET2 (0x10) /* Enable special character detection
- on SCHR2-SCHR1 */
-#define CyREC_FIFO (0x0F) /* Receive FIFO threshold */
-#define CyCOR4 (0x1E*2)
-#define CyCOR5 (0x1F*2)
-#define CyCCSR (0x0B*2)
-#define CyRxEN (0x80)
-#define CyRxFloff (0x40)
-#define CyRxFlon (0x20)
-#define CyTxEN (0x08)
-#define CyTxFloff (0x04)
-#define CyTxFlon (0x02)
-#define CyRDCR (0x0E*2)
-#define CySCHR1 (0x1A*2)
-#define CySCHR2 (0x1B*2)
-#define CySCHR3 (0x1C*2)
-#define CySCHR4 (0x1D*2)
-#define CySCRL (0x22*2)
-#define CySCRH (0x23*2)
-#define CyLNC (0x24*2)
-#define CyMCOR1 (0x15*2)
-#define CyMCOR2 (0x16*2)
-#define CyRTPR (0x21*2)
-#define CyMSVR1 (0x6C*2)
-#define CyMSVR2 (0x6D*2)
-#define CyANY_DELTA (0xF0)
-#define CyDSR (0x80)
-#define CyCTS (0x40)
-#define CyRI (0x20)
-#define CyDCD (0x10)
-#define CyDTR (0x02)
-#define CyRTS (0x01)
-#define CyPVSR (0x6F*2)
-#define CyRBPR (0x78*2)
-#define CyRCOR (0x7C*2)
-#define CyTBPR (0x72*2)
-#define CyTCOR (0x76*2)
-
-/* Custom Registers */
-
-#define CyPLX_VER (0x3400)
-#define PLX_9050 0x0b
-#define PLX_9060 0x0c
-#define PLX_9080 0x0d
-
-/***************************************************************************/
-
-#endif /* _LINUX_CYCLADES_H */
diff --git a/include/linux/damon.h b/include/linux/damon.h
new file mode 100644
index 000000000000..3813373a9200
--- /dev/null
+++ b/include/linux/damon.h
@@ -0,0 +1,975 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * DAMON api
+ *
+ * Author: SeongJae Park <sj@kernel.org>
+ */
+
+#ifndef _DAMON_H_
+#define _DAMON_H_
+
+#include <linux/memcontrol.h>
+#include <linux/mutex.h>
+#include <linux/time64.h>
+#include <linux/types.h>
+#include <linux/random.h>
+
+/* Minimal region size. Every damon_region is aligned by this. */
+#define DAMON_MIN_REGION PAGE_SIZE
+/* Max priority score for DAMON-based operation schemes */
+#define DAMOS_MAX_SCORE (99)
+
+/* Get a random number in [l, r) */
+static inline unsigned long damon_rand(unsigned long l, unsigned long r)
+{
+ return l + get_random_u32_below(r - l);
+}
+
+/**
+ * struct damon_addr_range - Represents an address region of [@start, @end).
+ * @start: Start address of the region (inclusive).
+ * @end: End address of the region (exclusive).
+ */
+struct damon_addr_range {
+ unsigned long start;
+ unsigned long end;
+};
+
+/**
+ * struct damon_size_range - Represents size for filter to operate on [@min, @max].
+ * @min: Min size (inclusive).
+ * @max: Max size (inclusive).
+ */
+struct damon_size_range {
+ unsigned long min;
+ unsigned long max;
+};
+
+/**
+ * struct damon_region - Represents a monitoring target region.
+ * @ar: The address range of the region.
+ * @sampling_addr: Address of the sample for the next access check.
+ * @nr_accesses: Access frequency of this region.
+ * @nr_accesses_bp: @nr_accesses in basis point (0.01%) that updated for
+ * each sampling interval.
+ * @list: List head for siblings.
+ * @age: Age of this region.
+ *
+ * @nr_accesses is reset to zero for every &damon_attrs->aggr_interval and be
+ * increased for every &damon_attrs->sample_interval if an access to the region
+ * during the last sampling interval is found. The update of this field should
+ * not be done with direct access but with the helper function,
+ * damon_update_region_access_rate().
+ *
+ * @nr_accesses_bp is another representation of @nr_accesses in basis point
+ * (1 in 10,000) that updated for every &damon_attrs->sample_interval in a
+ * manner similar to moving sum. By the algorithm, this value becomes
+ * @nr_accesses * 10000 for every &struct damon_attrs->aggr_interval. This can
+ * be used when the aggregation interval is too huge and therefore cannot wait
+ * for it before getting the access monitoring results.
+ *
+ * @age is initially zero, increased for each aggregation interval, and reset
+ * to zero again if the access frequency is significantly changed. If two
+ * regions are merged into a new region, both @nr_accesses and @age of the new
+ * region are set as region size-weighted average of those of the two regions.
+ */
+struct damon_region {
+ struct damon_addr_range ar;
+ unsigned long sampling_addr;
+ unsigned int nr_accesses;
+ unsigned int nr_accesses_bp;
+ struct list_head list;
+
+ unsigned int age;
+/* private: Internal value for age calculation. */
+ unsigned int last_nr_accesses;
+};
+
+/**
+ * struct damon_target - Represents a monitoring target.
+ * @pid: The PID of the virtual address space to monitor.
+ * @nr_regions: Number of monitoring target regions of this target.
+ * @regions_list: Head of the monitoring target regions of this target.
+ * @list: List head for siblings.
+ * @obsolete: Whether the commit destination target is obsolete.
+ *
+ * Each monitoring context could have multiple targets. For example, a context
+ * for virtual memory address spaces could have multiple target processes. The
+ * @pid should be set for appropriate &struct damon_operations including the
+ * virtual address spaces monitoring operations.
+ *
+ * @obsolete is used only for damon_commit_targets() source targets, to specify
+ * the matching destination targets are obsolete. Read damon_commit_targets()
+ * to see how it is handled.
+ */
+struct damon_target {
+ struct pid *pid;
+ unsigned int nr_regions;
+ struct list_head regions_list;
+ struct list_head list;
+ bool obsolete;
+};
+
+/**
+ * enum damos_action - Represents an action of a Data Access Monitoring-based
+ * Operation Scheme.
+ *
+ * @DAMOS_WILLNEED: Call ``madvise()`` for the region with MADV_WILLNEED.
+ * @DAMOS_COLD: Call ``madvise()`` for the region with MADV_COLD.
+ * @DAMOS_PAGEOUT: Reclaim the region.
+ * @DAMOS_HUGEPAGE: Call ``madvise()`` for the region with MADV_HUGEPAGE.
+ * @DAMOS_NOHUGEPAGE: Call ``madvise()`` for the region with MADV_NOHUGEPAGE.
+ * @DAMOS_LRU_PRIO: Prioritize the region on its LRU lists.
+ * @DAMOS_LRU_DEPRIO: Deprioritize the region on its LRU lists.
+ * @DAMOS_MIGRATE_HOT: Migrate the regions prioritizing warmer regions.
+ * @DAMOS_MIGRATE_COLD: Migrate the regions prioritizing colder regions.
+ * @DAMOS_STAT: Do nothing but count the stat.
+ * @NR_DAMOS_ACTIONS: Total number of DAMOS actions
+ *
+ * The support of each action is up to running &struct damon_operations.
+ * Refer to 'Operation Action' section of Documentation/mm/damon/design.rst for
+ * status of the supports.
+ *
+ * Note that DAMOS_PAGEOUT doesn't trigger demotions.
+ */
+enum damos_action {
+ DAMOS_WILLNEED,
+ DAMOS_COLD,
+ DAMOS_PAGEOUT,
+ DAMOS_HUGEPAGE,
+ DAMOS_NOHUGEPAGE,
+ DAMOS_LRU_PRIO,
+ DAMOS_LRU_DEPRIO,
+ DAMOS_MIGRATE_HOT,
+ DAMOS_MIGRATE_COLD,
+ DAMOS_STAT, /* Do nothing but only record the stat */
+ NR_DAMOS_ACTIONS,
+};
+
+/**
+ * enum damos_quota_goal_metric - Represents the metric to be used as the goal
+ *
+ * @DAMOS_QUOTA_USER_INPUT: User-input value.
+ * @DAMOS_QUOTA_SOME_MEM_PSI_US: System level some memory PSI in us.
+ * @DAMOS_QUOTA_NODE_MEM_USED_BP: MemUsed ratio of a node.
+ * @DAMOS_QUOTA_NODE_MEM_FREE_BP: MemFree ratio of a node.
+ * @DAMOS_QUOTA_NODE_MEMCG_USED_BP: MemUsed ratio of a node for a cgroup.
+ * @DAMOS_QUOTA_NODE_MEMCG_FREE_BP: MemFree ratio of a node for a cgroup.
+ * @NR_DAMOS_QUOTA_GOAL_METRICS: Number of DAMOS quota goal metrics.
+ *
+ * Metrics equal to larger than @NR_DAMOS_QUOTA_GOAL_METRICS are unsupported.
+ */
+enum damos_quota_goal_metric {
+ DAMOS_QUOTA_USER_INPUT,
+ DAMOS_QUOTA_SOME_MEM_PSI_US,
+ DAMOS_QUOTA_NODE_MEM_USED_BP,
+ DAMOS_QUOTA_NODE_MEM_FREE_BP,
+ DAMOS_QUOTA_NODE_MEMCG_USED_BP,
+ DAMOS_QUOTA_NODE_MEMCG_FREE_BP,
+ NR_DAMOS_QUOTA_GOAL_METRICS,
+};
+
+/**
+ * struct damos_quota_goal - DAMOS scheme quota auto-tuning goal.
+ * @metric: Metric to be used for representing the goal.
+ * @target_value: Target value of @metric to achieve with the tuning.
+ * @current_value: Current value of @metric.
+ * @last_psi_total: Last measured total PSI
+ * @nid: Node id.
+ * @memcg_id: Memcg id.
+ * @list: List head for siblings.
+ *
+ * Data structure for getting the current score of the quota tuning goal. The
+ * score is calculated by how close @current_value and @target_value are. Then
+ * the score is entered to DAMON's internal feedback loop mechanism to get the
+ * auto-tuned quota.
+ *
+ * If @metric is DAMOS_QUOTA_USER_INPUT, @current_value should be manually
+ * entered by the user, probably inside the kdamond callbacks. Otherwise,
+ * DAMON sets @current_value with self-measured value of @metric.
+ *
+ * If @metric is DAMOS_QUOTA_NODE_MEM_{USED,FREE}_BP, @nid represents the node
+ * id of the target node to account the used/free memory.
+ *
+ * If @metric is DAMOS_QUOTA_NODE_MEMCG_{USED,FREE}_BP, @nid and @memcg_id
+ * represents the node id and the cgroup to account the used memory for.
+ */
+struct damos_quota_goal {
+ enum damos_quota_goal_metric metric;
+ unsigned long target_value;
+ unsigned long current_value;
+ /* metric-dependent fields */
+ union {
+ u64 last_psi_total;
+ struct {
+ int nid;
+ unsigned short memcg_id;
+ };
+ };
+ struct list_head list;
+};
+
+/**
+ * struct damos_quota - Controls the aggressiveness of the given scheme.
+ * @reset_interval: Charge reset interval in milliseconds.
+ * @ms: Maximum milliseconds that the scheme can use.
+ * @sz: Maximum bytes of memory that the action can be applied.
+ * @goals: Head of quota tuning goals (&damos_quota_goal) list.
+ * @esz: Effective size quota in bytes.
+ *
+ * @weight_sz: Weight of the region's size for prioritization.
+ * @weight_nr_accesses: Weight of the region's nr_accesses for prioritization.
+ * @weight_age: Weight of the region's age for prioritization.
+ *
+ * To avoid consuming too much CPU time or IO resources for applying the
+ * &struct damos->action to large memory, DAMON allows users to set time and/or
+ * size quotas. The quotas can be set by writing non-zero values to &ms and
+ * &sz, respectively. If the time quota is set, DAMON tries to use only up to
+ * &ms milliseconds within &reset_interval for applying the action. If the
+ * size quota is set, DAMON tries to apply the action only up to &sz bytes
+ * within &reset_interval.
+ *
+ * To convince the different types of quotas and goals, DAMON internally
+ * converts those into one single size quota called "effective quota". DAMON
+ * internally uses it as the only one real quota. The conversion is made as
+ * follows.
+ *
+ * The time quota is transformed to a size quota using estimated throughput of
+ * the scheme's action. DAMON then compares it against &sz and uses smaller
+ * one as the effective quota.
+ *
+ * If @goals is not empty, DAMON calculates yet another size quota based on the
+ * goals using its internal feedback loop algorithm, for every @reset_interval.
+ * Then, if the new size quota is smaller than the effective quota, it uses the
+ * new size quota as the effective quota.
+ *
+ * The resulting effective size quota in bytes is set to @esz.
+ *
+ * For selecting regions within the quota, DAMON prioritizes current scheme's
+ * target memory regions using the &struct damon_operations->get_scheme_score.
+ * You could customize the prioritization logic by setting &weight_sz,
+ * &weight_nr_accesses, and &weight_age, because monitoring operations are
+ * encouraged to respect those.
+ */
+struct damos_quota {
+ unsigned long reset_interval;
+ unsigned long ms;
+ unsigned long sz;
+ struct list_head goals;
+ unsigned long esz;
+
+ unsigned int weight_sz;
+ unsigned int weight_nr_accesses;
+ unsigned int weight_age;
+
+/* private: */
+ /* For throughput estimation */
+ unsigned long total_charged_sz;
+ unsigned long total_charged_ns;
+
+ /* For charging the quota */
+ unsigned long charged_sz;
+ unsigned long charged_from;
+ struct damon_target *charge_target_from;
+ unsigned long charge_addr_from;
+
+ /* For prioritization */
+ unsigned int min_score;
+
+ /* For feedback loop */
+ unsigned long esz_bp;
+};
+
+/**
+ * enum damos_wmark_metric - Represents the watermark metric.
+ *
+ * @DAMOS_WMARK_NONE: Ignore the watermarks of the given scheme.
+ * @DAMOS_WMARK_FREE_MEM_RATE: Free memory rate of the system in [0,1000].
+ * @NR_DAMOS_WMARK_METRICS: Total number of DAMOS watermark metrics
+ */
+enum damos_wmark_metric {
+ DAMOS_WMARK_NONE,
+ DAMOS_WMARK_FREE_MEM_RATE,
+ NR_DAMOS_WMARK_METRICS,
+};
+
+/**
+ * struct damos_watermarks - Controls when a given scheme should be activated.
+ * @metric: Metric for the watermarks.
+ * @interval: Watermarks check time interval in microseconds.
+ * @high: High watermark.
+ * @mid: Middle watermark.
+ * @low: Low watermark.
+ *
+ * If &metric is &DAMOS_WMARK_NONE, the scheme is always active. Being active
+ * means DAMON does monitoring and applying the action of the scheme to
+ * appropriate memory regions. Else, DAMON checks &metric of the system for at
+ * least every &interval microseconds and works as below.
+ *
+ * If &metric is higher than &high, the scheme is inactivated. If &metric is
+ * between &mid and &low, the scheme is activated. If &metric is lower than
+ * &low, the scheme is inactivated.
+ */
+struct damos_watermarks {
+ enum damos_wmark_metric metric;
+ unsigned long interval;
+ unsigned long high;
+ unsigned long mid;
+ unsigned long low;
+
+/* private: */
+ bool activated;
+};
+
+/**
+ * struct damos_stat - Statistics on a given scheme.
+ * @nr_tried: Total number of regions that the scheme is tried to be applied.
+ * @sz_tried: Total size of regions that the scheme is tried to be applied.
+ * @nr_applied: Total number of regions that the scheme is applied.
+ * @sz_applied: Total size of regions that the scheme is applied.
+ * @sz_ops_filter_passed:
+ * Total bytes that passed ops layer-handled DAMOS filters.
+ * @qt_exceeds: Total number of times the quota of the scheme has exceeded.
+ *
+ * "Tried an action to a region" in this context means the DAMOS core logic
+ * determined the region as eligible to apply the action. The access pattern
+ * (&struct damos_access_pattern), quotas (&struct damos_quota), watermarks
+ * (&struct damos_watermarks) and filters (&struct damos_filter) that handled
+ * on core logic can affect this. The core logic asks the operation set
+ * (&struct damon_operations) to apply the action to the region.
+ *
+ * "Applied an action to a region" in this context means the operation set
+ * (&struct damon_operations) successfully applied the action to the region, at
+ * least to a part of the region. The filters (&struct damos_filter) that
+ * handled on operation set layer and type of the action and pages of the
+ * region can affect this. For example, if a filter is set to exclude
+ * anonymous pages and the region has only anonymous pages, the region will be
+ * failed at applying the action. If the action is &DAMOS_PAGEOUT and all
+ * pages of the region are already paged out, the region will be failed at
+ * applying the action.
+ */
+struct damos_stat {
+ unsigned long nr_tried;
+ unsigned long sz_tried;
+ unsigned long nr_applied;
+ unsigned long sz_applied;
+ unsigned long sz_ops_filter_passed;
+ unsigned long qt_exceeds;
+};
+
+/**
+ * enum damos_filter_type - Type of memory for &struct damos_filter
+ * @DAMOS_FILTER_TYPE_ANON: Anonymous pages.
+ * @DAMOS_FILTER_TYPE_ACTIVE: Active pages.
+ * @DAMOS_FILTER_TYPE_MEMCG: Specific memcg's pages.
+ * @DAMOS_FILTER_TYPE_YOUNG: Recently accessed pages.
+ * @DAMOS_FILTER_TYPE_HUGEPAGE_SIZE: Page is part of a hugepage.
+ * @DAMOS_FILTER_TYPE_UNMAPPED: Unmapped pages.
+ * @DAMOS_FILTER_TYPE_ADDR: Address range.
+ * @DAMOS_FILTER_TYPE_TARGET: Data Access Monitoring target.
+ * @NR_DAMOS_FILTER_TYPES: Number of filter types.
+ *
+ * The anon pages type and memcg type filters are handled by underlying
+ * &struct damon_operations as a part of scheme action trying, and therefore
+ * accounted as 'tried'. In contrast, other types are handled by core layer
+ * before trying of the action and therefore not accounted as 'tried'.
+ *
+ * The support of the filters that handled by &struct damon_operations depend
+ * on the running &struct damon_operations.
+ * &enum DAMON_OPS_PADDR supports both anon pages type and memcg type filters,
+ * while &enum DAMON_OPS_VADDR and &enum DAMON_OPS_FVADDR don't support any of
+ * the two types.
+ */
+enum damos_filter_type {
+ DAMOS_FILTER_TYPE_ANON,
+ DAMOS_FILTER_TYPE_ACTIVE,
+ DAMOS_FILTER_TYPE_MEMCG,
+ DAMOS_FILTER_TYPE_YOUNG,
+ DAMOS_FILTER_TYPE_HUGEPAGE_SIZE,
+ DAMOS_FILTER_TYPE_UNMAPPED,
+ DAMOS_FILTER_TYPE_ADDR,
+ DAMOS_FILTER_TYPE_TARGET,
+ NR_DAMOS_FILTER_TYPES,
+};
+
+/**
+ * struct damos_filter - DAMOS action target memory filter.
+ * @type: Type of the target memory.
+ * @matching: Whether this is for @type-matching memory.
+ * @allow: Whether to include or exclude the @matching memory.
+ * @memcg_id: Memcg id of the question if @type is DAMOS_FILTER_MEMCG.
+ * @addr_range: Address range if @type is DAMOS_FILTER_TYPE_ADDR.
+ * @target_idx: Index of the &struct damon_target of
+ * &damon_ctx->adaptive_targets if @type is
+ * DAMOS_FILTER_TYPE_TARGET.
+ * @sz_range: Size range if @type is DAMOS_FILTER_TYPE_HUGEPAGE_SIZE.
+ * @list: List head for siblings.
+ *
+ * Before applying the &damos->action to a memory region, DAMOS checks if each
+ * byte of the region matches to this given condition and avoid applying the
+ * action if so. Support of each filter type depends on the running &struct
+ * damon_operations and the type. Refer to &enum damos_filter_type for more
+ * details.
+ */
+struct damos_filter {
+ enum damos_filter_type type;
+ bool matching;
+ bool allow;
+ union {
+ unsigned short memcg_id;
+ struct damon_addr_range addr_range;
+ int target_idx;
+ struct damon_size_range sz_range;
+ };
+ struct list_head list;
+};
+
+struct damon_ctx;
+struct damos;
+
+/**
+ * struct damos_walk_control - Control damos_walk().
+ *
+ * @walk_fn: Function to be called back for each region.
+ * @data: Data that will be passed to walk functions.
+ *
+ * Control damos_walk(), which requests specific kdamond to invoke the given
+ * function to each region that eligible to apply actions of the kdamond's
+ * schemes. Refer to damos_walk() for more details.
+ */
+struct damos_walk_control {
+ void (*walk_fn)(void *data, struct damon_ctx *ctx,
+ struct damon_target *t, struct damon_region *r,
+ struct damos *s, unsigned long sz_filter_passed);
+ void *data;
+/* private: internal use only */
+ /* informs if the kdamond finished handling of the walk request */
+ struct completion completion;
+ /* informs if the walk is canceled. */
+ bool canceled;
+};
+
+/**
+ * struct damos_access_pattern - Target access pattern of the given scheme.
+ * @min_sz_region: Minimum size of target regions.
+ * @max_sz_region: Maximum size of target regions.
+ * @min_nr_accesses: Minimum ``->nr_accesses`` of target regions.
+ * @max_nr_accesses: Maximum ``->nr_accesses`` of target regions.
+ * @min_age_region: Minimum age of target regions.
+ * @max_age_region: Maximum age of target regions.
+ */
+struct damos_access_pattern {
+ unsigned long min_sz_region;
+ unsigned long max_sz_region;
+ unsigned int min_nr_accesses;
+ unsigned int max_nr_accesses;
+ unsigned int min_age_region;
+ unsigned int max_age_region;
+};
+
+/**
+ * struct damos_migrate_dests - Migration destination nodes and their weights.
+ * @node_id_arr: Array of migration destination node ids.
+ * @weight_arr: Array of migration weights for @node_id_arr.
+ * @nr_dests: Length of the @node_id_arr and @weight_arr arrays.
+ *
+ * @node_id_arr is an array of the ids of migration destination nodes.
+ * @weight_arr is an array of the weights for those. The weights in
+ * @weight_arr are for nodes in @node_id_arr of same array index.
+ */
+struct damos_migrate_dests {
+ unsigned int *node_id_arr;
+ unsigned int *weight_arr;
+ size_t nr_dests;
+};
+
+/**
+ * struct damos - Represents a Data Access Monitoring-based Operation Scheme.
+ * @pattern: Access pattern of target regions.
+ * @action: &damos_action to be applied to the target regions.
+ * @apply_interval_us: The time between applying the @action.
+ * @quota: Control the aggressiveness of this scheme.
+ * @wmarks: Watermarks for automated (in)activation of this scheme.
+ * @migrate_dests: Destination nodes if @action is "migrate_{hot,cold}".
+ * @target_nid: Destination node if @action is "migrate_{hot,cold}".
+ * @core_filters: Additional set of &struct damos_filter for &action.
+ * @ops_filters: ops layer handling &struct damos_filter objects list.
+ * @last_applied: Last @action applied ops-managing entity.
+ * @stat: Statistics of this scheme.
+ * @list: List head for siblings.
+ *
+ * For each @apply_interval_us, DAMON finds regions which fit in the
+ * &pattern and applies &action to those. To avoid consuming too much
+ * CPU time or IO resources for the &action, &quota is used.
+ *
+ * If @apply_interval_us is zero, &damon_attrs->aggr_interval is used instead.
+ *
+ * To do the work only when needed, schemes can be activated for specific
+ * system situations using &wmarks. If all schemes that registered to the
+ * monitoring context are inactive, DAMON stops monitoring either, and just
+ * repeatedly checks the watermarks.
+ *
+ * @migrate_dests specifies multiple migration target nodes with different
+ * weights for migrate_hot or migrate_cold actions. @target_nid is ignored if
+ * this is set.
+ *
+ * @target_nid is used to set the migration target node for migrate_hot or
+ * migrate_cold actions, and @migrate_dests is unset.
+ *
+ * Before applying the &action to a memory region, &struct damon_operations
+ * implementation could check pages of the region and skip &action to respect
+ * &core_filters
+ *
+ * The minimum entity that @action can be applied depends on the underlying
+ * &struct damon_operations. Since it may not be aligned with the core layer
+ * abstract, namely &struct damon_region, &struct damon_operations could apply
+ * @action to same entity multiple times. Large folios that underlying on
+ * multiple &struct damon region objects could be such examples. The &struct
+ * damon_operations can use @last_applied to avoid that. DAMOS core logic
+ * unsets @last_applied when each regions walking for applying the scheme is
+ * finished.
+ *
+ * After applying the &action to each region, &stat_count and &stat_sz is
+ * updated to reflect the number of regions and total size of regions that the
+ * &action is applied.
+ */
+struct damos {
+ struct damos_access_pattern pattern;
+ enum damos_action action;
+ unsigned long apply_interval_us;
+/* private: internal use only */
+ /*
+ * number of sample intervals that should be passed before applying
+ * @action
+ */
+ unsigned long next_apply_sis;
+ /* informs if ongoing DAMOS walk for this scheme is finished */
+ bool walk_completed;
+ /*
+ * If the current region in the filtering stage is allowed by core
+ * layer-handled filters. If true, operations layer allows it, too.
+ */
+ bool core_filters_allowed;
+ /* whether to reject core/ops filters umatched regions */
+ bool core_filters_default_reject;
+ bool ops_filters_default_reject;
+/* public: */
+ struct damos_quota quota;
+ struct damos_watermarks wmarks;
+ union {
+ struct {
+ int target_nid;
+ struct damos_migrate_dests migrate_dests;
+ };
+ };
+ struct list_head core_filters;
+ struct list_head ops_filters;
+ void *last_applied;
+ struct damos_stat stat;
+ struct list_head list;
+};
+
+/**
+ * enum damon_ops_id - Identifier for each monitoring operations implementation
+ *
+ * @DAMON_OPS_VADDR: Monitoring operations for virtual address spaces
+ * @DAMON_OPS_FVADDR: Monitoring operations for only fixed ranges of virtual
+ * address spaces
+ * @DAMON_OPS_PADDR: Monitoring operations for the physical address space
+ * @NR_DAMON_OPS: Number of monitoring operations implementations
+ */
+enum damon_ops_id {
+ DAMON_OPS_VADDR,
+ DAMON_OPS_FVADDR,
+ DAMON_OPS_PADDR,
+ NR_DAMON_OPS,
+};
+
+/**
+ * struct damon_operations - Monitoring operations for given use cases.
+ *
+ * @id: Identifier of this operations set.
+ * @init: Initialize operations-related data structures.
+ * @update: Update operations-related data structures.
+ * @prepare_access_checks: Prepare next access check of target regions.
+ * @check_accesses: Check the accesses to target regions.
+ * @get_scheme_score: Get the score of a region for a scheme.
+ * @apply_scheme: Apply a DAMON-based operation scheme.
+ * @target_valid: Determine if the target is valid.
+ * @cleanup_target: Clean up each target before deallocation.
+ * @cleanup: Clean up the context.
+ *
+ * DAMON can be extended for various address spaces and usages. For this,
+ * users should register the low level operations for their target address
+ * space and usecase via the &damon_ctx.ops. Then, the monitoring thread
+ * (&damon_ctx.kdamond) calls @init and @prepare_access_checks before starting
+ * the monitoring, @update after each &damon_attrs.ops_update_interval, and
+ * @check_accesses, @target_valid and @prepare_access_checks after each
+ * &damon_attrs.sample_interval.
+ *
+ * Each &struct damon_operations instance having valid @id can be registered
+ * via damon_register_ops() and selected by damon_select_ops() later.
+ * @init should initialize operations-related data structures. For example,
+ * this could be used to construct proper monitoring target regions and link
+ * those to @damon_ctx.adaptive_targets.
+ * @update should update the operations-related data structures. For example,
+ * this could be used to update monitoring target regions for current status.
+ * @prepare_access_checks should manipulate the monitoring regions to be
+ * prepared for the next access check.
+ * @check_accesses should check the accesses to each region that made after the
+ * last preparation and update the number of observed accesses of each region.
+ * It should also return max number of observed accesses that made as a result
+ * of its update. The value will be used for regions adjustment threshold.
+ * @get_scheme_score should return the priority score of a region for a scheme
+ * as an integer in [0, &DAMOS_MAX_SCORE].
+ * @apply_scheme is called from @kdamond when a region for user provided
+ * DAMON-based operation scheme is found. It should apply the scheme's action
+ * to the region and return bytes of the region that the action is successfully
+ * applied. It should also report how many bytes of the region has passed
+ * filters (&struct damos_filter) that handled by itself.
+ * @target_valid should check whether the target is still valid for the
+ * monitoring.
+ * @cleanup_target is called before the target will be deallocated.
+ * @cleanup is called from @kdamond just before its termination.
+ */
+struct damon_operations {
+ enum damon_ops_id id;
+ void (*init)(struct damon_ctx *context);
+ void (*update)(struct damon_ctx *context);
+ void (*prepare_access_checks)(struct damon_ctx *context);
+ unsigned int (*check_accesses)(struct damon_ctx *context);
+ int (*get_scheme_score)(struct damon_ctx *context,
+ struct damon_target *t, struct damon_region *r,
+ struct damos *scheme);
+ unsigned long (*apply_scheme)(struct damon_ctx *context,
+ struct damon_target *t, struct damon_region *r,
+ struct damos *scheme, unsigned long *sz_filter_passed);
+ bool (*target_valid)(struct damon_target *t);
+ void (*cleanup_target)(struct damon_target *t);
+ void (*cleanup)(struct damon_ctx *context);
+};
+
+/*
+ * struct damon_call_control - Control damon_call().
+ *
+ * @fn: Function to be called back.
+ * @data: Data that will be passed to @fn.
+ * @repeat: Repeat invocations.
+ * @return_code: Return code from @fn invocation.
+ * @dealloc_on_cancel: De-allocate when canceled.
+ *
+ * Control damon_call(), which requests specific kdamond to invoke a given
+ * function. Refer to damon_call() for more details.
+ */
+struct damon_call_control {
+ int (*fn)(void *data);
+ void *data;
+ bool repeat;
+ int return_code;
+ bool dealloc_on_cancel;
+/* private: internal use only */
+ /* informs if the kdamond finished handling of the request */
+ struct completion completion;
+ /* informs if the kdamond canceled @fn infocation */
+ bool canceled;
+ /* List head for siblings. */
+ struct list_head list;
+};
+
+/**
+ * struct damon_intervals_goal - Monitoring intervals auto-tuning goal.
+ *
+ * @access_bp: Access events observation ratio to achieve in bp.
+ * @aggrs: Number of aggregations to achieve @access_bp within.
+ * @min_sample_us: Minimum resulting sampling interval in microseconds.
+ * @max_sample_us: Maximum resulting sampling interval in microseconds.
+ *
+ * DAMON automatically tunes &damon_attrs->sample_interval and
+ * &damon_attrs->aggr_interval aiming the ratio in bp (1/10,000) of
+ * DAMON-observed access events to theoretical maximum amount within @aggrs
+ * aggregations be same to @access_bp. The logic increases
+ * &damon_attrs->aggr_interval and &damon_attrs->sampling_interval in same
+ * ratio if the current access events observation ratio is lower than the
+ * target for each @aggrs aggregations, and vice versa.
+ *
+ * If @aggrs is zero, the tuning is disabled and hence this struct is ignored.
+ */
+struct damon_intervals_goal {
+ unsigned long access_bp;
+ unsigned long aggrs;
+ unsigned long min_sample_us;
+ unsigned long max_sample_us;
+};
+
+/**
+ * struct damon_attrs - Monitoring attributes for accuracy/overhead control.
+ *
+ * @sample_interval: The time between access samplings.
+ * @aggr_interval: The time between monitor results aggregations.
+ * @ops_update_interval: The time between monitoring operations updates.
+ * @intervals_goal: Intervals auto-tuning goal.
+ * @min_nr_regions: The minimum number of adaptive monitoring
+ * regions.
+ * @max_nr_regions: The maximum number of adaptive monitoring
+ * regions.
+ *
+ * For each @sample_interval, DAMON checks whether each region is accessed or
+ * not during the last @sample_interval. If such access is found, DAMON
+ * aggregates the information by increasing &damon_region->nr_accesses for
+ * @aggr_interval time. For each @aggr_interval, the count is reset. DAMON
+ * also checks whether the target memory regions need update (e.g., by
+ * ``mmap()`` calls from the application, in case of virtual memory monitoring)
+ * and applies the changes for each @ops_update_interval. All time intervals
+ * are in micro-seconds. Please refer to &struct damon_operations and &struct
+ * damon_call_control for more detail.
+ */
+struct damon_attrs {
+ unsigned long sample_interval;
+ unsigned long aggr_interval;
+ unsigned long ops_update_interval;
+ struct damon_intervals_goal intervals_goal;
+ unsigned long min_nr_regions;
+ unsigned long max_nr_regions;
+/* private: internal use only */
+ /*
+ * @aggr_interval to @sample_interval ratio.
+ * Core-external components call damon_set_attrs() with &damon_attrs
+ * that this field is unset. In the case, damon_set_attrs() sets this
+ * field of resulting &damon_attrs. Core-internal components such as
+ * kdamond_tune_intervals() calls damon_set_attrs() with &damon_attrs
+ * that this field is set. In the case, damon_set_attrs() just keep
+ * it.
+ */
+ unsigned long aggr_samples;
+};
+
+/**
+ * struct damon_ctx - Represents a context for each monitoring. This is the
+ * main interface that allows users to set the attributes and get the results
+ * of the monitoring.
+ *
+ * @attrs: Monitoring attributes for accuracy/overhead control.
+ * @kdamond: Kernel thread who does the monitoring.
+ * @kdamond_lock: Mutex for the synchronizations with @kdamond.
+ *
+ * For each monitoring context, one kernel thread for the monitoring is
+ * created. The pointer to the thread is stored in @kdamond.
+ *
+ * Once started, the monitoring thread runs until explicitly required to be
+ * terminated or every monitoring target is invalid. The validity of the
+ * targets is checked via the &damon_operations.target_valid of @ops. The
+ * termination can also be explicitly requested by calling damon_stop().
+ * The thread sets @kdamond to NULL when it terminates. Therefore, users can
+ * know whether the monitoring is ongoing or terminated by reading @kdamond.
+ * Reads and writes to @kdamond from outside of the monitoring thread must
+ * be protected by @kdamond_lock.
+ *
+ * Note that the monitoring thread protects only @kdamond via @kdamond_lock.
+ * Accesses to other fields must be protected by themselves.
+ *
+ * @ops: Set of monitoring operations for given use cases.
+ * @addr_unit: Scale factor for core to ops address conversion.
+ * @min_sz_region: Minimum region size.
+ * @adaptive_targets: Head of monitoring targets (&damon_target) list.
+ * @schemes: Head of schemes (&damos) list.
+ */
+struct damon_ctx {
+ struct damon_attrs attrs;
+
+/* private: internal use only */
+ /* number of sample intervals that passed since this context started */
+ unsigned long passed_sample_intervals;
+ /*
+ * number of sample intervals that should be passed before next
+ * aggregation
+ */
+ unsigned long next_aggregation_sis;
+ /*
+ * number of sample intervals that should be passed before next ops
+ * update
+ */
+ unsigned long next_ops_update_sis;
+ /*
+ * number of sample intervals that should be passed before next
+ * intervals tuning
+ */
+ unsigned long next_intervals_tune_sis;
+ /* for waiting until the execution of the kdamond_fn is started */
+ struct completion kdamond_started;
+ /* for scheme quotas prioritization */
+ unsigned long *regions_score_histogram;
+
+ /* lists of &struct damon_call_control */
+ struct list_head call_controls;
+ struct mutex call_controls_lock;
+
+ struct damos_walk_control *walk_control;
+ struct mutex walk_control_lock;
+
+/* public: */
+ struct task_struct *kdamond;
+ struct mutex kdamond_lock;
+
+ struct damon_operations ops;
+ unsigned long addr_unit;
+ unsigned long min_sz_region;
+
+ struct list_head adaptive_targets;
+ struct list_head schemes;
+};
+
+static inline struct damon_region *damon_next_region(struct damon_region *r)
+{
+ return container_of(r->list.next, struct damon_region, list);
+}
+
+static inline struct damon_region *damon_prev_region(struct damon_region *r)
+{
+ return container_of(r->list.prev, struct damon_region, list);
+}
+
+static inline struct damon_region *damon_last_region(struct damon_target *t)
+{
+ return list_last_entry(&t->regions_list, struct damon_region, list);
+}
+
+static inline struct damon_region *damon_first_region(struct damon_target *t)
+{
+ return list_first_entry(&t->regions_list, struct damon_region, list);
+}
+
+static inline unsigned long damon_sz_region(struct damon_region *r)
+{
+ return r->ar.end - r->ar.start;
+}
+
+
+#define damon_for_each_region(r, t) \
+ list_for_each_entry(r, &t->regions_list, list)
+
+#define damon_for_each_region_from(r, t) \
+ list_for_each_entry_from(r, &t->regions_list, list)
+
+#define damon_for_each_region_safe(r, next, t) \
+ list_for_each_entry_safe(r, next, &t->regions_list, list)
+
+#define damon_for_each_target(t, ctx) \
+ list_for_each_entry(t, &(ctx)->adaptive_targets, list)
+
+#define damon_for_each_target_safe(t, next, ctx) \
+ list_for_each_entry_safe(t, next, &(ctx)->adaptive_targets, list)
+
+#define damon_for_each_scheme(s, ctx) \
+ list_for_each_entry(s, &(ctx)->schemes, list)
+
+#define damon_for_each_scheme_safe(s, next, ctx) \
+ list_for_each_entry_safe(s, next, &(ctx)->schemes, list)
+
+#define damos_for_each_quota_goal(goal, quota) \
+ list_for_each_entry(goal, &quota->goals, list)
+
+#define damos_for_each_quota_goal_safe(goal, next, quota) \
+ list_for_each_entry_safe(goal, next, &(quota)->goals, list)
+
+#define damos_for_each_core_filter(f, scheme) \
+ list_for_each_entry(f, &(scheme)->core_filters, list)
+
+#define damos_for_each_core_filter_safe(f, next, scheme) \
+ list_for_each_entry_safe(f, next, &(scheme)->core_filters, list)
+
+#define damos_for_each_ops_filter(f, scheme) \
+ list_for_each_entry(f, &(scheme)->ops_filters, list)
+
+#define damos_for_each_ops_filter_safe(f, next, scheme) \
+ list_for_each_entry_safe(f, next, &(scheme)->ops_filters, list)
+
+#ifdef CONFIG_DAMON
+
+struct damon_region *damon_new_region(unsigned long start, unsigned long end);
+
+/*
+ * Add a region between two other regions
+ */
+static inline void damon_insert_region(struct damon_region *r,
+ struct damon_region *prev, struct damon_region *next,
+ struct damon_target *t)
+{
+ __list_add(&r->list, &prev->list, &next->list);
+ t->nr_regions++;
+}
+
+void damon_add_region(struct damon_region *r, struct damon_target *t);
+void damon_destroy_region(struct damon_region *r, struct damon_target *t);
+int damon_set_regions(struct damon_target *t, struct damon_addr_range *ranges,
+ unsigned int nr_ranges, unsigned long min_sz_region);
+void damon_update_region_access_rate(struct damon_region *r, bool accessed,
+ struct damon_attrs *attrs);
+
+struct damos_filter *damos_new_filter(enum damos_filter_type type,
+ bool matching, bool allow);
+void damos_add_filter(struct damos *s, struct damos_filter *f);
+bool damos_filter_for_ops(enum damos_filter_type type);
+void damos_destroy_filter(struct damos_filter *f);
+
+struct damos_quota_goal *damos_new_quota_goal(
+ enum damos_quota_goal_metric metric,
+ unsigned long target_value);
+void damos_add_quota_goal(struct damos_quota *q, struct damos_quota_goal *g);
+void damos_destroy_quota_goal(struct damos_quota_goal *goal);
+
+struct damos *damon_new_scheme(struct damos_access_pattern *pattern,
+ enum damos_action action,
+ unsigned long apply_interval_us,
+ struct damos_quota *quota,
+ struct damos_watermarks *wmarks,
+ int target_nid);
+void damon_add_scheme(struct damon_ctx *ctx, struct damos *s);
+void damon_destroy_scheme(struct damos *s);
+int damos_commit_quota_goals(struct damos_quota *dst, struct damos_quota *src);
+
+struct damon_target *damon_new_target(void);
+void damon_add_target(struct damon_ctx *ctx, struct damon_target *t);
+bool damon_targets_empty(struct damon_ctx *ctx);
+void damon_free_target(struct damon_target *t);
+void damon_destroy_target(struct damon_target *t, struct damon_ctx *ctx);
+unsigned int damon_nr_regions(struct damon_target *t);
+
+struct damon_ctx *damon_new_ctx(void);
+void damon_destroy_ctx(struct damon_ctx *ctx);
+int damon_set_attrs(struct damon_ctx *ctx, struct damon_attrs *attrs);
+void damon_set_schemes(struct damon_ctx *ctx,
+ struct damos **schemes, ssize_t nr_schemes);
+int damon_commit_ctx(struct damon_ctx *old_ctx, struct damon_ctx *new_ctx);
+int damon_nr_running_ctxs(void);
+bool damon_is_registered_ops(enum damon_ops_id id);
+int damon_register_ops(struct damon_operations *ops);
+int damon_select_ops(struct damon_ctx *ctx, enum damon_ops_id id);
+
+static inline bool damon_target_has_pid(const struct damon_ctx *ctx)
+{
+ return ctx->ops.id == DAMON_OPS_VADDR || ctx->ops.id == DAMON_OPS_FVADDR;
+}
+
+static inline unsigned int damon_max_nr_accesses(const struct damon_attrs *attrs)
+{
+ /* {aggr,sample}_interval are unsigned long, hence could overflow */
+ return min(attrs->aggr_interval / attrs->sample_interval,
+ (unsigned long)UINT_MAX);
+}
+
+
+bool damon_initialized(void);
+int damon_start(struct damon_ctx **ctxs, int nr_ctxs, bool exclusive);
+int damon_stop(struct damon_ctx **ctxs, int nr_ctxs);
+bool damon_is_running(struct damon_ctx *ctx);
+
+int damon_call(struct damon_ctx *ctx, struct damon_call_control *control);
+int damos_walk(struct damon_ctx *ctx, struct damos_walk_control *control);
+
+int damon_set_region_biggest_system_ram_default(struct damon_target *t,
+ unsigned long *start, unsigned long *end,
+ unsigned long min_sz_region);
+
+#endif /* CONFIG_DAMON */
+
+#endif /* _DAMON_H */
diff --git a/include/linux/dasd_mod.h b/include/linux/dasd_mod.h
new file mode 100644
index 000000000000..14e6cf8c6267
--- /dev/null
+++ b/include/linux/dasd_mod.h
@@ -0,0 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef DASD_MOD_H
+#define DASD_MOD_H
+
+#include <asm/dasd.h>
+
+struct gendisk;
+
+extern int dasd_biodasdinfo(struct gendisk *disk, dasd_information2_t *info);
+
+#endif
diff --git a/include/linux/davinci_emac.h b/include/linux/davinci_emac.h
index 05b97144d342..28e6cf1356da 100644
--- a/include/linux/davinci_emac.h
+++ b/include/linux/davinci_emac.h
@@ -46,5 +46,4 @@ enum {
EMAC_VERSION_2, /* DM646x */
};
-void davinci_get_mac_addr(struct nvmem_device *nvmem, void *context);
#endif
diff --git a/include/linux/dax.h b/include/linux/dax.h
index eb0bff6f1eab..9d624f4d9df6 100644
--- a/include/linux/dax.h
+++ b/include/linux/dax.h
@@ -1,13 +1,24 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_DAX_H
#define _LINUX_DAX_H
#include <linux/fs.h>
#include <linux/mm.h>
#include <linux/radix-tree.h>
-#include <asm/pgtable.h>
-struct iomap_ops;
+typedef unsigned long dax_entry_t;
+
struct dax_device;
+struct gendisk;
+struct iomap_ops;
+struct iomap_iter;
+struct iomap;
+
+enum dax_access_mode {
+ DAX_ACCESS,
+ DAX_RECOVERY_WRITE,
+};
+
struct dax_operations {
/*
* direct_access: translate a device-relative
@@ -15,107 +26,289 @@ struct dax_operations {
* number of pages available for DAX at that pfn.
*/
long (*direct_access)(struct dax_device *, pgoff_t, long,
- void **, pfn_t *);
- /* copy_from_iter: required operation for fs-dax direct-i/o */
- size_t (*copy_from_iter)(struct dax_device *, pgoff_t, void *, size_t,
- struct iov_iter *);
- /* flush: optional driver-specific cache management after writes */
- void (*flush)(struct dax_device *, pgoff_t, void *, size_t);
+ enum dax_access_mode, void **, unsigned long *);
+ /* zero_page_range: required operation. Zero page range */
+ int (*zero_page_range)(struct dax_device *, pgoff_t, size_t);
+ /*
+ * recovery_write: recover a poisoned range by DAX device driver
+ * capable of clearing poison.
+ */
+ size_t (*recovery_write)(struct dax_device *dax_dev, pgoff_t pgoff,
+ void *addr, size_t bytes, struct iov_iter *iter);
};
-extern struct attribute_group dax_attribute_group;
+struct dax_holder_operations {
+ /*
+ * notify_failure - notify memory failure into inner holder device
+ * @dax_dev: the dax device which contains the holder
+ * @offset: offset on this dax device where memory failure occurs
+ * @len: length of this memory failure event
+ * @flags: action flags for memory failure handler
+ */
+ int (*notify_failure)(struct dax_device *dax_dev, u64 offset,
+ u64 len, int mf_flags);
+};
#if IS_ENABLED(CONFIG_DAX)
-struct dax_device *dax_get_by_host(const char *host);
+struct dax_device *alloc_dax(void *private, const struct dax_operations *ops);
+void *dax_holder(struct dax_device *dax_dev);
void put_dax(struct dax_device *dax_dev);
+void kill_dax(struct dax_device *dax_dev);
+void dax_write_cache(struct dax_device *dax_dev, bool wc);
+bool dax_write_cache_enabled(struct dax_device *dax_dev);
+bool dax_synchronous(struct dax_device *dax_dev);
+void set_dax_nocache(struct dax_device *dax_dev);
+void set_dax_nomc(struct dax_device *dax_dev);
+void set_dax_synchronous(struct dax_device *dax_dev);
+size_t dax_recovery_write(struct dax_device *dax_dev, pgoff_t pgoff,
+ void *addr, size_t bytes, struct iov_iter *i);
+/*
+ * Check if given mapping is supported by the file / underlying device.
+ */
+static inline bool daxdev_mapping_supported(vm_flags_t vm_flags,
+ const struct inode *inode,
+ struct dax_device *dax_dev)
+{
+ if (!(vm_flags & VM_SYNC))
+ return true;
+ if (!IS_DAX(inode))
+ return false;
+ return dax_synchronous(dax_dev);
+}
#else
-static inline struct dax_device *dax_get_by_host(const char *host)
+static inline void *dax_holder(struct dax_device *dax_dev)
{
return NULL;
}
-
+static inline struct dax_device *alloc_dax(void *private,
+ const struct dax_operations *ops)
+{
+ return ERR_PTR(-EOPNOTSUPP);
+}
static inline void put_dax(struct dax_device *dax_dev)
{
}
+static inline void kill_dax(struct dax_device *dax_dev)
+{
+}
+static inline void dax_write_cache(struct dax_device *dax_dev, bool wc)
+{
+}
+static inline bool dax_write_cache_enabled(struct dax_device *dax_dev)
+{
+ return false;
+}
+static inline bool dax_synchronous(struct dax_device *dax_dev)
+{
+ return true;
+}
+static inline void set_dax_nocache(struct dax_device *dax_dev)
+{
+}
+static inline void set_dax_nomc(struct dax_device *dax_dev)
+{
+}
+static inline void set_dax_synchronous(struct dax_device *dax_dev)
+{
+}
+static inline bool daxdev_mapping_supported(vm_flags_t vm_flags,
+ const struct inode *inode,
+ struct dax_device *dax_dev)
+{
+ return !(vm_flags & VM_SYNC);
+}
+static inline size_t dax_recovery_write(struct dax_device *dax_dev,
+ pgoff_t pgoff, void *addr, size_t bytes, struct iov_iter *i)
+{
+ return 0;
+}
#endif
-int bdev_dax_pgoff(struct block_device *, sector_t, size_t, pgoff_t *pgoff);
-#if IS_ENABLED(CONFIG_FS_DAX)
-int __bdev_dax_supported(struct super_block *sb, int blocksize);
-static inline int bdev_dax_supported(struct super_block *sb, int blocksize)
+struct writeback_control;
+#if defined(CONFIG_BLOCK) && defined(CONFIG_FS_DAX)
+int dax_add_host(struct dax_device *dax_dev, struct gendisk *disk);
+void dax_remove_host(struct gendisk *disk);
+struct dax_device *fs_dax_get_by_bdev(struct block_device *bdev, u64 *start_off,
+ void *holder, const struct dax_holder_operations *ops);
+void fs_put_dax(struct dax_device *dax_dev, void *holder);
+#else
+static inline int dax_add_host(struct dax_device *dax_dev, struct gendisk *disk)
+{
+ return 0;
+}
+static inline void dax_remove_host(struct gendisk *disk)
+{
+}
+static inline struct dax_device *fs_dax_get_by_bdev(struct block_device *bdev,
+ u64 *start_off, void *holder,
+ const struct dax_holder_operations *ops)
+{
+ return NULL;
+}
+static inline void fs_put_dax(struct dax_device *dax_dev, void *holder)
{
- return __bdev_dax_supported(sb, blocksize);
}
+#endif /* CONFIG_BLOCK && CONFIG_FS_DAX */
+
+#if IS_ENABLED(CONFIG_FS_DAX)
+int dax_writeback_mapping_range(struct address_space *mapping,
+ struct dax_device *dax_dev, struct writeback_control *wbc);
-static inline struct dax_device *fs_dax_get_by_host(const char *host)
+struct page *dax_layout_busy_page(struct address_space *mapping);
+struct page *dax_layout_busy_page_range(struct address_space *mapping, loff_t start, loff_t end);
+dax_entry_t dax_lock_folio(struct folio *folio);
+void dax_unlock_folio(struct folio *folio, dax_entry_t cookie);
+dax_entry_t dax_lock_mapping_entry(struct address_space *mapping,
+ unsigned long index, struct page **page);
+void dax_unlock_mapping_entry(struct address_space *mapping,
+ unsigned long index, dax_entry_t cookie);
+#else
+static inline struct page *dax_layout_busy_page(struct address_space *mapping)
{
- return dax_get_by_host(host);
+ return NULL;
}
-static inline void fs_put_dax(struct dax_device *dax_dev)
+static inline struct page *dax_layout_busy_page_range(struct address_space *mapping, pgoff_t start, pgoff_t nr_pages)
{
- put_dax(dax_dev);
+ return NULL;
}
-#else
-static inline int bdev_dax_supported(struct super_block *sb, int blocksize)
+static inline int dax_writeback_mapping_range(struct address_space *mapping,
+ struct dax_device *dax_dev, struct writeback_control *wbc)
{
return -EOPNOTSUPP;
}
-static inline struct dax_device *fs_dax_get_by_host(const char *host)
+static inline dax_entry_t dax_lock_folio(struct folio *folio)
+{
+ if (IS_DAX(folio->mapping->host))
+ return ~0UL;
+ return 0;
+}
+
+static inline void dax_unlock_folio(struct folio *folio, dax_entry_t cookie)
{
- return NULL;
}
-static inline void fs_put_dax(struct dax_device *dax_dev)
+static inline dax_entry_t dax_lock_mapping_entry(struct address_space *mapping,
+ unsigned long index, struct page **page)
+{
+ return 0;
+}
+
+static inline void dax_unlock_mapping_entry(struct address_space *mapping,
+ unsigned long index, dax_entry_t cookie)
{
}
#endif
+int dax_file_unshare(struct inode *inode, loff_t pos, loff_t len,
+ const struct iomap_ops *ops);
+int dax_zero_range(struct inode *inode, loff_t pos, loff_t len, bool *did_zero,
+ const struct iomap_ops *ops);
+int dax_truncate_page(struct inode *inode, loff_t pos, bool *did_zero,
+ const struct iomap_ops *ops);
+
+static inline bool dax_page_is_idle(struct page *page)
+{
+ return page && page_ref_count(page) == 0;
+}
+
+#if IS_ENABLED(CONFIG_DAX)
int dax_read_lock(void);
void dax_read_unlock(int id);
-struct dax_device *alloc_dax(void *private, const char *host,
- const struct dax_operations *ops);
+#else
+static inline int dax_read_lock(void)
+{
+ return 0;
+}
+
+static inline void dax_read_unlock(int id)
+{
+}
+#endif /* CONFIG_DAX */
+
+#if !IS_ENABLED(CONFIG_FS_DAX)
+static inline int __must_check dax_break_layout(struct inode *inode,
+ loff_t start, loff_t end, void (cb)(struct inode *))
+{
+ return 0;
+}
+
+static inline void dax_break_layout_final(struct inode *inode)
+{
+}
+#endif
+
bool dax_alive(struct dax_device *dax_dev);
-void kill_dax(struct dax_device *dax_dev);
void *dax_get_private(struct dax_device *dax_dev);
long dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff, long nr_pages,
- void **kaddr, pfn_t *pfn);
+ enum dax_access_mode mode, void **kaddr, unsigned long *pfn);
size_t dax_copy_from_iter(struct dax_device *dax_dev, pgoff_t pgoff, void *addr,
size_t bytes, struct iov_iter *i);
-void dax_flush(struct dax_device *dax_dev, pgoff_t pgoff, void *addr,
- size_t size);
-void dax_write_cache(struct dax_device *dax_dev, bool wc);
-bool dax_write_cache_enabled(struct dax_device *dax_dev);
+size_t dax_copy_to_iter(struct dax_device *dax_dev, pgoff_t pgoff, void *addr,
+ size_t bytes, struct iov_iter *i);
+int dax_zero_page_range(struct dax_device *dax_dev, pgoff_t pgoff,
+ size_t nr_pages);
+int dax_holder_notify_failure(struct dax_device *dax_dev, u64 off, u64 len,
+ int mf_flags);
+void dax_flush(struct dax_device *dax_dev, void *addr, size_t size);
ssize_t dax_iomap_rw(struct kiocb *iocb, struct iov_iter *iter,
const struct iomap_ops *ops);
-int dax_iomap_fault(struct vm_fault *vmf, enum page_entry_size pe_size,
- const struct iomap_ops *ops);
+vm_fault_t dax_iomap_fault(struct vm_fault *vmf, unsigned int order,
+ unsigned long *pfnp, int *errp,
+ const struct iomap_ops *ops);
+vm_fault_t dax_finish_sync_fault(struct vm_fault *vmf,
+ unsigned int order, unsigned long pfn);
int dax_delete_mapping_entry(struct address_space *mapping, pgoff_t index);
+void dax_delete_mapping_range(struct address_space *mapping,
+ loff_t start, loff_t end);
int dax_invalidate_mapping_entry_sync(struct address_space *mapping,
pgoff_t index);
-
-#ifdef CONFIG_FS_DAX
-int __dax_zero_page_range(struct block_device *bdev,
- struct dax_device *dax_dev, sector_t sector,
- unsigned int offset, unsigned int length);
-#else
-static inline int __dax_zero_page_range(struct block_device *bdev,
- struct dax_device *dax_dev, sector_t sector,
- unsigned int offset, unsigned int length)
+int __must_check dax_break_layout(struct inode *inode, loff_t start,
+ loff_t end, void (cb)(struct inode *));
+static inline int __must_check dax_break_layout_inode(struct inode *inode,
+ void (cb)(struct inode *))
{
- return -ENXIO;
+ return dax_break_layout(inode, 0, LLONG_MAX, cb);
}
-#endif
-
+void dax_break_layout_final(struct inode *inode);
+int dax_dedupe_file_range_compare(struct inode *src, loff_t srcoff,
+ struct inode *dest, loff_t destoff,
+ loff_t len, bool *is_same,
+ const struct iomap_ops *ops);
+int dax_remap_file_range_prep(struct file *file_in, loff_t pos_in,
+ struct file *file_out, loff_t pos_out,
+ loff_t *len, unsigned int remap_flags,
+ const struct iomap_ops *ops);
static inline bool dax_mapping(struct address_space *mapping)
{
return mapping->host && IS_DAX(mapping->host);
}
-struct writeback_control;
-int dax_writeback_mapping_range(struct address_space *mapping,
- struct block_device *bdev, struct writeback_control *wbc);
+/*
+ * Due to dax's memory and block duo personalities, hwpoison reporting
+ * takes into consideration which personality is presently visible.
+ * When dax acts like a block device, such as in block IO, an encounter of
+ * dax hwpoison is reported as -EIO.
+ * When dax acts like memory, such as in page fault, a detection of hwpoison
+ * is reported as -EHWPOISON which leads to VM_FAULT_HWPOISON.
+ */
+static inline int dax_mem2blk_err(int err)
+{
+ return (err == -EHWPOISON) ? -EIO : err;
+}
+
+#ifdef CONFIG_DEV_DAX_HMEM_DEVICES
+void hmem_register_resource(int target_nid, struct resource *r);
+#else
+static inline void hmem_register_resource(int target_nid, struct resource *r)
+{
+}
+#endif
+
+typedef int (*walk_hmem_fn)(struct device *dev, int target_nid,
+ const struct resource *res);
+int walk_hmem_resources(struct device *dev, walk_hmem_fn fn);
#endif
diff --git a/include/linux/dca.h b/include/linux/dca.h
index ad956c2e07a8..d6228e334f48 100644
--- a/include/linux/dca.h
+++ b/include/linux/dca.h
@@ -1,22 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* Copyright(c) 2007 - 2009 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the Free
- * Software Foundation; either version 2 of the License, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc., 59
- * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called COPYING.
*/
#ifndef DCA_H
#define DCA_H
diff --git a/include/linux/dcache.h b/include/linux/dcache.h
index aae1cdb76851..898c60d21c92 100644
--- a/include/linux/dcache.h
+++ b/include/linux/dcache.h
@@ -1,8 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __LINUX_DCACHE_H
#define __LINUX_DCACHE_H
#include <linux/atomic.h>
#include <linux/list.h>
+#include <linux/math.h>
#include <linux/rculist.h>
#include <linux/rculist_bl.h>
#include <linux/spinlock.h>
@@ -14,6 +16,7 @@
#include <linux/wait.h>
struct path;
+struct file;
struct vfsmount;
/*
@@ -54,20 +57,12 @@ struct qstr {
};
#define QSTR_INIT(n,l) { { { .len = l } }, .name = n }
+#define QSTR_LEN(n,l) (struct qstr)QSTR_INIT(n,l)
+#define QSTR(n) QSTR_LEN(n, strlen(n))
-extern const char empty_string[];
extern const struct qstr empty_name;
-extern const char slash_string[];
extern const struct qstr slash_name;
-
-struct dentry_stat_t {
- long nr_dentry;
- long nr_unused;
- long age_limit; /* age in seconds */
- long want_pages; /* pages requested by system */
- long dummy[2];
-};
-extern struct dentry_stat_t dentry_stat;
+extern const struct qstr dotdot_name;
/*
* Try to keep struct dentry aligned on 64 byte cachelines (this will
@@ -75,41 +70,57 @@ extern struct dentry_stat_t dentry_stat;
* large memory footprint increase).
*/
#ifdef CONFIG_64BIT
-# define DNAME_INLINE_LEN 32 /* 192 bytes */
+# define DNAME_INLINE_WORDS 5 /* 192 bytes */
#else
# ifdef CONFIG_SMP
-# define DNAME_INLINE_LEN 36 /* 128 bytes */
+# define DNAME_INLINE_WORDS 9 /* 128 bytes */
# else
-# define DNAME_INLINE_LEN 40 /* 128 bytes */
+# define DNAME_INLINE_WORDS 11 /* 128 bytes */
# endif
#endif
+#define DNAME_INLINE_LEN (DNAME_INLINE_WORDS*sizeof(unsigned long))
+
+union shortname_store {
+ unsigned char string[DNAME_INLINE_LEN];
+ unsigned long words[DNAME_INLINE_WORDS];
+};
+
#define d_lock d_lockref.lock
+#define d_iname d_shortname.string
struct dentry {
/* RCU lookup touched fields */
unsigned int d_flags; /* protected by d_lock */
- seqcount_t d_seq; /* per dentry seqlock */
+ seqcount_spinlock_t d_seq; /* per dentry seqlock */
struct hlist_bl_node d_hash; /* lookup hash list */
struct dentry *d_parent; /* parent directory */
- struct qstr d_name;
+ union {
+ struct qstr __d_name; /* for use ONLY in fs/dcache.c */
+ const struct qstr d_name;
+ };
struct inode *d_inode; /* Where the name belongs to - NULL is
* negative */
- unsigned char d_iname[DNAME_INLINE_LEN]; /* small names */
+ union shortname_store d_shortname;
+ /* --- cacheline 1 boundary (64 bytes) was 32 bytes ago --- */
/* Ref lookup also touches following */
- struct lockref d_lockref; /* per-dentry lock and refcount */
const struct dentry_operations *d_op;
struct super_block *d_sb; /* The root of the dentry tree */
unsigned long d_time; /* used by d_revalidate */
void *d_fsdata; /* fs-specific data */
+ /* --- cacheline 2 boundary (128 bytes) --- */
+ struct lockref d_lockref; /* per-dentry lock and refcount
+ * keep separate from RCU lookup area if
+ * possible!
+ */
union {
struct list_head d_lru; /* LRU list */
wait_queue_head_t *d_wait; /* in-lookup ones only */
};
- struct list_head d_child; /* child of parent list */
- struct list_head d_subdirs; /* our children */
+ struct hlist_node d_sib; /* child of parent list */
+ struct hlist_head d_children; /* our children */
/*
* d_alias and d_rcu can share memory
*/
@@ -118,7 +129,7 @@ struct dentry {
struct hlist_bl_node d_in_lookup_hash; /* only for in-lookup ones */
struct rcu_head d_rcu;
} d_u;
-} __randomize_layout;
+};
/*
* dentry->d_lock spinlock nesting subclasses:
@@ -132,8 +143,14 @@ enum dentry_d_lock_class
DENTRY_D_LOCK_NESTED
};
+enum d_real_type {
+ D_REAL_DATA,
+ D_REAL_METADATA,
+};
+
struct dentry_operations {
- int (*d_revalidate)(struct dentry *, unsigned int);
+ int (*d_revalidate)(struct inode *, const struct qstr *,
+ struct dentry *, unsigned int);
int (*d_weak_revalidate)(struct dentry *, unsigned int);
int (*d_hash)(const struct dentry *, struct qstr *);
int (*d_compare)(const struct dentry *,
@@ -146,118 +163,115 @@ struct dentry_operations {
char *(*d_dname)(struct dentry *, char *, int);
struct vfsmount *(*d_automount)(struct path *);
int (*d_manage)(const struct path *, bool);
- struct dentry *(*d_real)(struct dentry *, const struct inode *,
- unsigned int);
+ struct dentry *(*d_real)(struct dentry *, enum d_real_type type);
+ bool (*d_unalias_trylock)(const struct dentry *);
+ void (*d_unalias_unlock)(const struct dentry *);
} ____cacheline_aligned;
/*
* Locking rules for dentry_operations callbacks are to be found in
- * Documentation/filesystems/Locking. Keep it updated!
+ * Documentation/filesystems/locking.rst. Keep it updated!
*
- * FUrther descriptions are found in Documentation/filesystems/vfs.txt.
+ * FUrther descriptions are found in Documentation/filesystems/vfs.rst.
* Keep it updated too!
*/
/* d_flags entries */
-#define DCACHE_OP_HASH 0x00000001
-#define DCACHE_OP_COMPARE 0x00000002
-#define DCACHE_OP_REVALIDATE 0x00000004
-#define DCACHE_OP_DELETE 0x00000008
-#define DCACHE_OP_PRUNE 0x00000010
-
-#define DCACHE_DISCONNECTED 0x00000020
- /* This dentry is possibly not currently connected to the dcache tree, in
- * which case its parent will either be itself, or will have this flag as
- * well. nfsd will not use a dentry with this bit set, but will first
- * endeavour to clear the bit either by discovering that it is connected,
- * or by performing lookup operations. Any filesystem which supports
- * nfsd_operations MUST have a lookup function which, if it finds a
- * directory inode with a DCACHE_DISCONNECTED dentry, will d_move that
- * dentry into place and return that dentry rather than the passed one,
- * typically using d_splice_alias. */
-
-#define DCACHE_REFERENCED 0x00000040 /* Recently used, don't discard. */
-#define DCACHE_RCUACCESS 0x00000080 /* Entry has ever been RCU-visible */
-
-#define DCACHE_CANT_MOUNT 0x00000100
-#define DCACHE_GENOCIDE 0x00000200
-#define DCACHE_SHRINK_LIST 0x00000400
-
-#define DCACHE_OP_WEAK_REVALIDATE 0x00000800
-
-#define DCACHE_NFSFS_RENAMED 0x00001000
- /* this dentry has been "silly renamed" and has to be deleted on the last
- * dput() */
-#define DCACHE_COOKIE 0x00002000 /* For use by dcookie subsystem */
-#define DCACHE_FSNOTIFY_PARENT_WATCHED 0x00004000
- /* Parent inode is watched by some fsnotify listener */
-
-#define DCACHE_DENTRY_KILLED 0x00008000
-
-#define DCACHE_MOUNTED 0x00010000 /* is a mountpoint */
-#define DCACHE_NEED_AUTOMOUNT 0x00020000 /* handle automount on this dir */
-#define DCACHE_MANAGE_TRANSIT 0x00040000 /* manage transit from this dirent */
+enum dentry_flags {
+ DCACHE_OP_HASH = BIT(0),
+ DCACHE_OP_COMPARE = BIT(1),
+ DCACHE_OP_REVALIDATE = BIT(2),
+ DCACHE_OP_DELETE = BIT(3),
+ DCACHE_OP_PRUNE = BIT(4),
+ /*
+ * This dentry is possibly not currently connected to the dcache tree,
+ * in which case its parent will either be itself, or will have this
+ * flag as well. nfsd will not use a dentry with this bit set, but will
+ * first endeavour to clear the bit either by discovering that it is
+ * connected, or by performing lookup operations. Any filesystem which
+ * supports nfsd_operations MUST have a lookup function which, if it
+ * finds a directory inode with a DCACHE_DISCONNECTED dentry, will
+ * d_move that dentry into place and return that dentry rather than the
+ * passed one, typically using d_splice_alias.
+ */
+ DCACHE_DISCONNECTED = BIT(5),
+ DCACHE_REFERENCED = BIT(6), /* Recently used, don't discard. */
+ DCACHE_DONTCACHE = BIT(7), /* Purge from memory on final dput() */
+ DCACHE_CANT_MOUNT = BIT(8),
+ DCACHE_SHRINK_LIST = BIT(10),
+ DCACHE_OP_WEAK_REVALIDATE = BIT(11),
+ /*
+ * this dentry has been "silly renamed" and has to be deleted on the
+ * last dput()
+ */
+ DCACHE_NFSFS_RENAMED = BIT(12),
+ DCACHE_FSNOTIFY_PARENT_WATCHED = BIT(13), /* Parent inode is watched by some fsnotify listener */
+ DCACHE_DENTRY_KILLED = BIT(14),
+ DCACHE_MOUNTED = BIT(15), /* is a mountpoint */
+ DCACHE_NEED_AUTOMOUNT = BIT(16), /* handle automount on this dir */
+ DCACHE_MANAGE_TRANSIT = BIT(17), /* manage transit from this dirent */
+ DCACHE_LRU_LIST = BIT(18),
+ DCACHE_ENTRY_TYPE = (7 << 19), /* bits 19..21 are for storing type: */
+ DCACHE_MISS_TYPE = (0 << 19), /* Negative dentry */
+ DCACHE_WHITEOUT_TYPE = (1 << 19), /* Whiteout dentry (stop pathwalk) */
+ DCACHE_DIRECTORY_TYPE = (2 << 19), /* Normal directory */
+ DCACHE_AUTODIR_TYPE = (3 << 19), /* Lookupless directory (presumed automount) */
+ DCACHE_REGULAR_TYPE = (4 << 19), /* Regular file type */
+ DCACHE_SPECIAL_TYPE = (5 << 19), /* Other file type */
+ DCACHE_SYMLINK_TYPE = (6 << 19), /* Symlink */
+ DCACHE_NOKEY_NAME = BIT(22), /* Encrypted name encoded without key */
+ DCACHE_OP_REAL = BIT(23),
+ DCACHE_PAR_LOOKUP = BIT(24), /* being looked up (with parent locked shared) */
+ DCACHE_DENTRY_CURSOR = BIT(25),
+ DCACHE_NORCU = BIT(26), /* No RCU delay for freeing */
+ DCACHE_PERSISTENT = BIT(27)
+};
+
#define DCACHE_MANAGED_DENTRY \
(DCACHE_MOUNTED|DCACHE_NEED_AUTOMOUNT|DCACHE_MANAGE_TRANSIT)
-#define DCACHE_LRU_LIST 0x00080000
-
-#define DCACHE_ENTRY_TYPE 0x00700000
-#define DCACHE_MISS_TYPE 0x00000000 /* Negative dentry (maybe fallthru to nowhere) */
-#define DCACHE_WHITEOUT_TYPE 0x00100000 /* Whiteout dentry (stop pathwalk) */
-#define DCACHE_DIRECTORY_TYPE 0x00200000 /* Normal directory */
-#define DCACHE_AUTODIR_TYPE 0x00300000 /* Lookupless directory (presumed automount) */
-#define DCACHE_REGULAR_TYPE 0x00400000 /* Regular file type (or fallthru to such) */
-#define DCACHE_SPECIAL_TYPE 0x00500000 /* Other file type (or fallthru to such) */
-#define DCACHE_SYMLINK_TYPE 0x00600000 /* Symlink (or fallthru to such) */
-
-#define DCACHE_MAY_FREE 0x00800000
-#define DCACHE_FALLTHRU 0x01000000 /* Fall through to lower layer */
-#define DCACHE_ENCRYPTED_WITH_KEY 0x02000000 /* dir is encrypted with a valid key */
-#define DCACHE_OP_REAL 0x04000000
-
-#define DCACHE_PAR_LOOKUP 0x10000000 /* being looked up (with parent locked shared) */
-#define DCACHE_DENTRY_CURSOR 0x20000000
-
extern seqlock_t rename_lock;
/*
* These are the low-level FS interfaces to the dcache..
*/
extern void d_instantiate(struct dentry *, struct inode *);
-extern struct dentry * d_instantiate_unique(struct dentry *, struct inode *);
-extern int d_instantiate_no_diralias(struct dentry *, struct inode *);
+extern void d_instantiate_new(struct dentry *, struct inode *);
extern void __d_drop(struct dentry *dentry);
extern void d_drop(struct dentry *dentry);
extern void d_delete(struct dentry *);
-extern void d_set_d_op(struct dentry *dentry, const struct dentry_operations *op);
/* allocate/de-allocate */
extern struct dentry * d_alloc(struct dentry *, const struct qstr *);
-extern struct dentry * d_alloc_pseudo(struct super_block *, const struct qstr *);
+extern struct dentry * d_alloc_anon(struct super_block *);
extern struct dentry * d_alloc_parallel(struct dentry *, const struct qstr *,
wait_queue_head_t *);
extern struct dentry * d_splice_alias(struct inode *, struct dentry *);
+/* weird procfs mess; *NOT* exported */
+extern struct dentry * d_splice_alias_ops(struct inode *, struct dentry *,
+ const struct dentry_operations *);
extern struct dentry * d_add_ci(struct dentry *, struct inode *, struct qstr *);
-extern struct dentry * d_exact_alias(struct dentry *, struct inode *);
+extern bool d_same_name(const struct dentry *dentry, const struct dentry *parent,
+ const struct qstr *name);
extern struct dentry *d_find_any_alias(struct inode *inode);
extern struct dentry * d_obtain_alias(struct inode *);
extern struct dentry * d_obtain_root(struct inode *);
extern void shrink_dcache_sb(struct super_block *);
extern void shrink_dcache_parent(struct dentry *);
-extern void shrink_dcache_for_umount(struct super_block *);
extern void d_invalidate(struct dentry *);
/* only used at mount-time */
extern struct dentry * d_make_root(struct inode *);
-/* <clickety>-<click> the ramfs-type tree */
-extern void d_genocide(struct dentry *);
-
-extern void d_tmpfile(struct dentry *, struct inode *);
+extern void d_mark_tmpfile(struct file *, struct inode *);
+extern void d_tmpfile(struct file *, struct inode *);
extern struct dentry *d_find_alias(struct inode *);
extern void d_prune_aliases(struct inode *);
+extern void d_dispose_if_unused(struct dentry *, struct list_head *);
+extern void shrink_dentry_list(struct list_head *);
+
+extern struct dentry *d_find_alias_rcu(struct inode *);
/* test whether we have any submounts in a subdir tree */
extern int path_has_submounts(const struct path *);
@@ -269,55 +283,69 @@ extern void d_rehash(struct dentry *);
extern void d_add(struct dentry *, struct inode *);
-extern void dentry_update_name_case(struct dentry *, const struct qstr *);
-
/* used for rename() and baskets */
extern void d_move(struct dentry *, struct dentry *);
extern void d_exchange(struct dentry *, struct dentry *);
extern struct dentry *d_ancestor(struct dentry *, struct dentry *);
-/* appendix may either be NULL or be used for transname suffixes */
extern struct dentry *d_lookup(const struct dentry *, const struct qstr *);
-extern struct dentry *d_hash_and_lookup(struct dentry *, struct qstr *);
-extern struct dentry *__d_lookup(const struct dentry *, const struct qstr *);
-extern struct dentry *__d_lookup_rcu(const struct dentry *parent,
- const struct qstr *name, unsigned *seq);
static inline unsigned d_count(const struct dentry *dentry)
{
return dentry->d_lockref.count;
}
+ino_t d_parent_ino(struct dentry *dentry);
+
/*
* helper function for dentry_operations.d_dname() members
*/
-extern __printf(4, 5)
-char *dynamic_dname(struct dentry *, char *, int, const char *, ...);
-extern char *simple_dname(struct dentry *, char *, int);
+extern __printf(3, 4)
+char *dynamic_dname(char *, int, const char *, ...);
extern char *__d_path(const struct path *, const struct path *, char *, int);
extern char *d_absolute_path(const struct path *, char *, int);
extern char *d_path(const struct path *, char *, int);
-extern char *dentry_path_raw(struct dentry *, char *, int);
-extern char *dentry_path(struct dentry *, char *, int);
+extern char *dentry_path_raw(const struct dentry *, char *, int);
+extern char *dentry_path(const struct dentry *, char *, int);
/* Allocation counts.. */
/**
- * dget, dget_dlock - get a reference to a dentry
- * @dentry: dentry to get a reference to
+ * dget_dlock - get a reference to a dentry
+ * @dentry: dentry to get a reference to
*
- * Given a dentry or %NULL pointer increment the reference count
- * if appropriate and return the dentry. A dentry will not be
- * destroyed when it has references.
+ * Given a live dentry, increment the reference count and return the dentry.
+ * Caller must hold @dentry->d_lock. Making sure that dentry is alive is
+ * caller's resonsibility. There are many conditions sufficient to guarantee
+ * that; e.g. anything with non-negative refcount is alive, so's anything
+ * hashed, anything positive, anyone's parent, etc.
*/
static inline struct dentry *dget_dlock(struct dentry *dentry)
{
- if (dentry)
- dentry->d_lockref.count++;
+ dentry->d_lockref.count++;
return dentry;
}
+
+/**
+ * dget - get a reference to a dentry
+ * @dentry: dentry to get a reference to
+ *
+ * Given a dentry or %NULL pointer increment the reference count
+ * if appropriate and return the dentry. A dentry will not be
+ * destroyed when it has references. Conversely, a dentry with
+ * no references can disappear for any number of reasons, starting
+ * with memory pressure. In other words, that primitive is
+ * used to clone an existing reference; using it on something with
+ * zero refcount is a bug.
+ *
+ * NOTE: it will spin if @dentry->d_lock is held. From the deadlock
+ * avoidance point of view it is equivalent to spin_lock()/increment
+ * refcount/spin_unlock(), so calling it under @dentry->d_lock is
+ * always a bug; so's calling it under ->d_lock on any of its descendents.
+ *
+ */
static inline struct dentry *dget(struct dentry *dentry)
{
if (dentry)
@@ -328,12 +356,11 @@ static inline struct dentry *dget(struct dentry *dentry)
extern struct dentry *dget_parent(struct dentry *dentry);
/**
- * d_unhashed - is dentry hashed
- * @dentry: entry to check
+ * d_unhashed - is dentry hashed
+ * @dentry: entry to check
*
- * Returns true if the dentry passed is not currently hashed.
+ * Returns true if the dentry passed is not currently hashed.
*/
-
static inline int d_unhashed(const struct dentry *dentry)
{
return hlist_bl_unhashed(&dentry->d_hash);
@@ -356,20 +383,17 @@ static inline void dont_mount(struct dentry *dentry)
spin_unlock(&dentry->d_lock);
}
-extern void __d_lookup_done(struct dentry *);
+extern void __d_lookup_unhash_wake(struct dentry *dentry);
-static inline int d_in_lookup(struct dentry *dentry)
+static inline int d_in_lookup(const struct dentry *dentry)
{
return dentry->d_flags & DCACHE_PAR_LOOKUP;
}
static inline void d_lookup_done(struct dentry *dentry)
{
- if (unlikely(d_in_lookup(dentry))) {
- spin_lock(&dentry->d_lock);
- __d_lookup_done(dentry);
- spin_unlock(&dentry->d_lock);
- }
+ if (unlikely(d_in_lookup(dentry)))
+ __d_lookup_unhash_wake(dentry);
}
extern void dput(struct dentry *);
@@ -443,6 +467,11 @@ static inline bool d_is_negative(const struct dentry *dentry)
return d_is_miss(dentry);
}
+static inline bool d_flags_negative(unsigned flags)
+{
+ return (flags & DCACHE_ENTRY_TYPE) == DCACHE_MISS_TYPE;
+}
+
static inline bool d_is_positive(const struct dentry *dentry)
{
return !d_is_negative(dentry);
@@ -486,25 +515,12 @@ static inline bool d_really_is_positive(const struct dentry *dentry)
return dentry->d_inode != NULL;
}
-static inline int simple_positive(struct dentry *dentry)
+static inline int simple_positive(const struct dentry *dentry)
{
return d_really_is_positive(dentry) && !d_unhashed(dentry);
}
-extern void d_set_fallthru(struct dentry *dentry);
-
-static inline bool d_is_fallthru(const struct dentry *dentry)
-{
- return dentry->d_flags & DCACHE_FALLTHRU;
-}
-
-
-extern int sysctl_vfs_cache_pressure;
-
-static inline unsigned long vfs_pressure_ratio(unsigned long val)
-{
- return mult_frac(val, sysctl_vfs_cache_pressure, 100);
-}
+unsigned long vfs_pressure_ratio(unsigned long val);
/**
* d_inode - Get the actual inode of this dentry
@@ -519,7 +535,7 @@ static inline struct inode *d_inode(const struct dentry *dentry)
}
/**
- * d_inode_rcu - Get the actual inode of this dentry with ACCESS_ONCE()
+ * d_inode_rcu - Get the actual inode of this dentry with READ_ONCE()
* @dentry: The dentry to query
*
* This is the helper normal filesystems should use to get at their own inodes
@@ -527,7 +543,7 @@ static inline struct inode *d_inode(const struct dentry *dentry)
*/
static inline struct inode *d_inode_rcu(const struct dentry *dentry)
{
- return ACCESS_ONCE(dentry->d_inode);
+ return READ_ONCE(dentry->d_inode);
}
/**
@@ -548,43 +564,25 @@ static inline struct inode *d_backing_inode(const struct dentry *upper)
}
/**
- * d_backing_dentry - Get upper or lower dentry we should be using
- * @upper: The upper layer
- *
- * This is the helper that should be used to get the dentry of the inode that
- * will be used if this dentry were opened as a file. It may be the upper
- * dentry or it may be a lower dentry pinned by the upper.
- *
- * Normal filesystems should not use this to access their own dentries.
- */
-static inline struct dentry *d_backing_dentry(struct dentry *upper)
-{
- return upper;
-}
-
-/**
* d_real - Return the real dentry
* @dentry: the dentry to query
- * @inode: inode to select the dentry from multiple layers (can be NULL)
- * @flags: open flags to control copy-up behavior
+ * @type: the type of real dentry (data or metadata)
*
* If dentry is on a union/overlay, then return the underlying, real dentry.
* Otherwise return the dentry itself.
*
- * See also: Documentation/filesystems/vfs.txt
+ * See also: Documentation/filesystems/vfs.rst
*/
-static inline struct dentry *d_real(struct dentry *dentry,
- const struct inode *inode,
- unsigned int flags)
+static inline struct dentry *d_real(struct dentry *dentry, enum d_real_type type)
{
if (unlikely(dentry->d_flags & DCACHE_OP_REAL))
- return dentry->d_op->d_real(dentry, inode, flags);
+ return dentry->d_op->d_real(dentry, type);
else
return dentry;
}
/**
- * d_real_inode - Return the real inode
+ * d_real_inode - Return the real inode hosting the data
* @dentry: The dentry to query
*
* If dentry is on a union/overlay, then return the underlying, real inode.
@@ -593,14 +591,28 @@ static inline struct dentry *d_real(struct dentry *dentry,
static inline struct inode *d_real_inode(const struct dentry *dentry)
{
/* This usage of d_real() results in const dentry */
- return d_backing_inode(d_real((struct dentry *) dentry, NULL, 0));
+ return d_inode(d_real((struct dentry *) dentry, D_REAL_DATA));
}
struct name_snapshot {
- const unsigned char *name;
- unsigned char inline_name[DNAME_INLINE_LEN];
+ struct qstr name;
+ union shortname_store inline_name;
};
void take_dentry_name_snapshot(struct name_snapshot *, struct dentry *);
void release_dentry_name_snapshot(struct name_snapshot *);
+static inline struct dentry *d_first_child(const struct dentry *dentry)
+{
+ return hlist_entry_safe(dentry->d_children.first, struct dentry, d_sib);
+}
+
+static inline struct dentry *d_next_sibling(const struct dentry *dentry)
+{
+ return hlist_entry_safe(dentry->d_sib.next, struct dentry, d_sib);
+}
+
+void set_default_d_op(struct super_block *, const struct dentry_operations *);
+struct dentry *d_make_persistent(struct dentry *, struct inode *);
+void d_make_discardable(struct dentry *dentry);
+
#endif /* __LINUX_DCACHE_H */
diff --git a/include/linux/dccp.h b/include/linux/dccp.h
index 68449293c4b6..0b61b8b996d4 100644
--- a/include/linux/dccp.h
+++ b/include/linux/dccp.h
@@ -1,79 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_DCCP_H
#define _LINUX_DCCP_H
-
-#include <linux/in.h>
-#include <linux/interrupt.h>
-#include <linux/ktime.h>
-#include <linux/list.h>
-#include <linux/uio.h>
-#include <linux/workqueue.h>
-
-#include <net/inet_connection_sock.h>
-#include <net/inet_sock.h>
-#include <net/inet_timewait_sock.h>
-#include <net/tcp_states.h>
#include <uapi/linux/dccp.h>
-enum dccp_state {
- DCCP_OPEN = TCP_ESTABLISHED,
- DCCP_REQUESTING = TCP_SYN_SENT,
- DCCP_LISTEN = TCP_LISTEN,
- DCCP_RESPOND = TCP_SYN_RECV,
- /*
- * States involved in closing a DCCP connection:
- * 1) ACTIVE_CLOSEREQ is entered by a server sending a CloseReq.
- *
- * 2) CLOSING can have three different meanings (RFC 4340, 8.3):
- * a. Client has performed active-close, has sent a Close to the server
- * from state OPEN or PARTOPEN, and is waiting for the final Reset
- * (in this case, SOCK_DONE == 1).
- * b. Client is asked to perform passive-close, by receiving a CloseReq
- * in (PART)OPEN state. It sends a Close and waits for final Reset
- * (in this case, SOCK_DONE == 0).
- * c. Server performs an active-close as in (a), keeps TIMEWAIT state.
- *
- * 3) The following intermediate states are employed to give passively
- * closing nodes a chance to process their unread data:
- * - PASSIVE_CLOSE (from OPEN => CLOSED) and
- * - PASSIVE_CLOSEREQ (from (PART)OPEN to CLOSING; case (b) above).
- */
- DCCP_ACTIVE_CLOSEREQ = TCP_FIN_WAIT1,
- DCCP_PASSIVE_CLOSE = TCP_CLOSE_WAIT, /* any node receiving a Close */
- DCCP_CLOSING = TCP_CLOSING,
- DCCP_TIME_WAIT = TCP_TIME_WAIT,
- DCCP_CLOSED = TCP_CLOSE,
- DCCP_NEW_SYN_RECV = TCP_NEW_SYN_RECV,
- DCCP_PARTOPEN = TCP_MAX_STATES,
- DCCP_PASSIVE_CLOSEREQ, /* clients receiving CloseReq */
- DCCP_MAX_STATES
-};
-
-enum {
- DCCPF_OPEN = TCPF_ESTABLISHED,
- DCCPF_REQUESTING = TCPF_SYN_SENT,
- DCCPF_LISTEN = TCPF_LISTEN,
- DCCPF_RESPOND = TCPF_SYN_RECV,
- DCCPF_ACTIVE_CLOSEREQ = TCPF_FIN_WAIT1,
- DCCPF_CLOSING = TCPF_CLOSING,
- DCCPF_TIME_WAIT = TCPF_TIME_WAIT,
- DCCPF_CLOSED = TCPF_CLOSE,
- DCCPF_NEW_SYN_RECV = TCPF_NEW_SYN_RECV,
- DCCPF_PARTOPEN = (1 << DCCP_PARTOPEN),
-};
-
-static inline struct dccp_hdr *dccp_hdr(const struct sk_buff *skb)
-{
- return (struct dccp_hdr *)skb_transport_header(skb);
-}
-
-static inline struct dccp_hdr *dccp_zeroed_hdr(struct sk_buff *skb, int headlen)
-{
- skb_push(skb, headlen);
- skb_reset_transport_header(skb);
- return memset(skb_transport_header(skb), 0, headlen);
-}
-
static inline struct dccp_hdr_ext *dccp_hdrx(const struct dccp_hdr *dh)
{
return (struct dccp_hdr_ext *)((unsigned char *)dh + sizeof(*dh));
@@ -84,12 +14,6 @@ static inline unsigned int __dccp_basic_hdr_len(const struct dccp_hdr *dh)
return sizeof(*dh) + (dh->dccph_x ? sizeof(struct dccp_hdr_ext) : 0);
}
-static inline unsigned int dccp_basic_hdr_len(const struct sk_buff *skb)
-{
- const struct dccp_hdr *dh = dccp_hdr(skb);
- return __dccp_basic_hdr_len(dh);
-}
-
static inline __u64 dccp_hdr_seq(const struct dccp_hdr *dh)
{
__u64 seq_nr = ntohs(dh->dccph_seq);
@@ -102,224 +26,10 @@ static inline __u64 dccp_hdr_seq(const struct dccp_hdr *dh)
return seq_nr;
}
-static inline struct dccp_hdr_request *dccp_hdr_request(struct sk_buff *skb)
-{
- return (struct dccp_hdr_request *)(skb_transport_header(skb) +
- dccp_basic_hdr_len(skb));
-}
-
-static inline struct dccp_hdr_ack_bits *dccp_hdr_ack_bits(const struct sk_buff *skb)
-{
- return (struct dccp_hdr_ack_bits *)(skb_transport_header(skb) +
- dccp_basic_hdr_len(skb));
-}
-
-static inline u64 dccp_hdr_ack_seq(const struct sk_buff *skb)
-{
- const struct dccp_hdr_ack_bits *dhack = dccp_hdr_ack_bits(skb);
- return ((u64)ntohs(dhack->dccph_ack_nr_high) << 32) + ntohl(dhack->dccph_ack_nr_low);
-}
-
-static inline struct dccp_hdr_response *dccp_hdr_response(struct sk_buff *skb)
-{
- return (struct dccp_hdr_response *)(skb_transport_header(skb) +
- dccp_basic_hdr_len(skb));
-}
-
-static inline struct dccp_hdr_reset *dccp_hdr_reset(struct sk_buff *skb)
-{
- return (struct dccp_hdr_reset *)(skb_transport_header(skb) +
- dccp_basic_hdr_len(skb));
-}
-
static inline unsigned int __dccp_hdr_len(const struct dccp_hdr *dh)
{
return __dccp_basic_hdr_len(dh) +
dccp_packet_hdr_len(dh->dccph_type);
}
-static inline unsigned int dccp_hdr_len(const struct sk_buff *skb)
-{
- return __dccp_hdr_len(dccp_hdr(skb));
-}
-
-/**
- * struct dccp_request_sock - represent DCCP-specific connection request
- * @dreq_inet_rsk: structure inherited from
- * @dreq_iss: initial sequence number, sent on the first Response (RFC 4340, 7.1)
- * @dreq_gss: greatest sequence number sent (for retransmitted Responses)
- * @dreq_isr: initial sequence number received in the first Request
- * @dreq_gsr: greatest sequence number received (for retransmitted Request(s))
- * @dreq_service: service code present on the Request (there is just one)
- * @dreq_featneg: feature negotiation options for this connection
- * The following two fields are analogous to the ones in dccp_sock:
- * @dreq_timestamp_echo: last received timestamp to echo (13.1)
- * @dreq_timestamp_echo: the time of receiving the last @dreq_timestamp_echo
- */
-struct dccp_request_sock {
- struct inet_request_sock dreq_inet_rsk;
- __u64 dreq_iss;
- __u64 dreq_gss;
- __u64 dreq_isr;
- __u64 dreq_gsr;
- __be32 dreq_service;
- spinlock_t dreq_lock;
- struct list_head dreq_featneg;
- __u32 dreq_timestamp_echo;
- __u32 dreq_timestamp_time;
-};
-
-static inline struct dccp_request_sock *dccp_rsk(const struct request_sock *req)
-{
- return (struct dccp_request_sock *)req;
-}
-
-extern struct inet_timewait_death_row dccp_death_row;
-
-extern int dccp_parse_options(struct sock *sk, struct dccp_request_sock *dreq,
- struct sk_buff *skb);
-
-struct dccp_options_received {
- u64 dccpor_ndp:48;
- u32 dccpor_timestamp;
- u32 dccpor_timestamp_echo;
- u32 dccpor_elapsed_time;
-};
-
-struct ccid;
-
-enum dccp_role {
- DCCP_ROLE_UNDEFINED,
- DCCP_ROLE_LISTEN,
- DCCP_ROLE_CLIENT,
- DCCP_ROLE_SERVER,
-};
-
-struct dccp_service_list {
- __u32 dccpsl_nr;
- __be32 dccpsl_list[0];
-};
-
-#define DCCP_SERVICE_INVALID_VALUE htonl((__u32)-1)
-#define DCCP_SERVICE_CODE_IS_ABSENT 0
-
-static inline bool dccp_list_has_service(const struct dccp_service_list *sl,
- const __be32 service)
-{
- if (likely(sl != NULL)) {
- u32 i = sl->dccpsl_nr;
- while (i--)
- if (sl->dccpsl_list[i] == service)
- return true;
- }
- return false;
-}
-
-struct dccp_ackvec;
-
-/**
- * struct dccp_sock - DCCP socket state
- *
- * @dccps_swl - sequence number window low
- * @dccps_swh - sequence number window high
- * @dccps_awl - acknowledgement number window low
- * @dccps_awh - acknowledgement number window high
- * @dccps_iss - initial sequence number sent
- * @dccps_isr - initial sequence number received
- * @dccps_osr - first OPEN sequence number received
- * @dccps_gss - greatest sequence number sent
- * @dccps_gsr - greatest valid sequence number received
- * @dccps_gar - greatest valid ack number received on a non-Sync; initialized to %dccps_iss
- * @dccps_service - first (passive sock) or unique (active sock) service code
- * @dccps_service_list - second .. last service code on passive socket
- * @dccps_timestamp_echo - latest timestamp received on a TIMESTAMP option
- * @dccps_timestamp_time - time of receiving latest @dccps_timestamp_echo
- * @dccps_l_ack_ratio - feature-local Ack Ratio
- * @dccps_r_ack_ratio - feature-remote Ack Ratio
- * @dccps_l_seq_win - local Sequence Window (influences ack number validity)
- * @dccps_r_seq_win - remote Sequence Window (influences seq number validity)
- * @dccps_pcslen - sender partial checksum coverage (via sockopt)
- * @dccps_pcrlen - receiver partial checksum coverage (via sockopt)
- * @dccps_send_ndp_count - local Send NDP Count feature (7.7.2)
- * @dccps_ndp_count - number of Non Data Packets since last data packet
- * @dccps_mss_cache - current value of MSS (path MTU minus header sizes)
- * @dccps_rate_last - timestamp for rate-limiting DCCP-Sync (RFC 4340, 7.5.4)
- * @dccps_featneg - tracks feature-negotiation state (mostly during handshake)
- * @dccps_hc_rx_ackvec - rx half connection ack vector
- * @dccps_hc_rx_ccid - CCID used for the receiver (or receiving half-connection)
- * @dccps_hc_tx_ccid - CCID used for the sender (or sending half-connection)
- * @dccps_options_received - parsed set of retrieved options
- * @dccps_qpolicy - TX dequeueing policy, one of %dccp_packet_dequeueing_policy
- * @dccps_tx_qlen - maximum length of the TX queue
- * @dccps_role - role of this sock, one of %dccp_role
- * @dccps_hc_rx_insert_options - receiver wants to add options when acking
- * @dccps_hc_tx_insert_options - sender wants to add options when sending
- * @dccps_server_timewait - server holds timewait state on close (RFC 4340, 8.3)
- * @dccps_sync_scheduled - flag which signals "send out-of-band message soon"
- * @dccps_xmitlet - tasklet scheduled by the TX CCID to dequeue data packets
- * @dccps_xmit_timer - used by the TX CCID to delay sending (rate-based pacing)
- * @dccps_syn_rtt - RTT sample from Request/Response exchange (in usecs)
- */
-struct dccp_sock {
- /* inet_connection_sock has to be the first member of dccp_sock */
- struct inet_connection_sock dccps_inet_connection;
-#define dccps_syn_rtt dccps_inet_connection.icsk_ack.lrcvtime
- __u64 dccps_swl;
- __u64 dccps_swh;
- __u64 dccps_awl;
- __u64 dccps_awh;
- __u64 dccps_iss;
- __u64 dccps_isr;
- __u64 dccps_osr;
- __u64 dccps_gss;
- __u64 dccps_gsr;
- __u64 dccps_gar;
- __be32 dccps_service;
- __u32 dccps_mss_cache;
- struct dccp_service_list *dccps_service_list;
- __u32 dccps_timestamp_echo;
- __u32 dccps_timestamp_time;
- __u16 dccps_l_ack_ratio;
- __u16 dccps_r_ack_ratio;
- __u64 dccps_l_seq_win:48;
- __u64 dccps_r_seq_win:48;
- __u8 dccps_pcslen:4;
- __u8 dccps_pcrlen:4;
- __u8 dccps_send_ndp_count:1;
- __u64 dccps_ndp_count:48;
- unsigned long dccps_rate_last;
- struct list_head dccps_featneg;
- struct dccp_ackvec *dccps_hc_rx_ackvec;
- struct ccid *dccps_hc_rx_ccid;
- struct ccid *dccps_hc_tx_ccid;
- struct dccp_options_received dccps_options_received;
- __u8 dccps_qpolicy;
- __u32 dccps_tx_qlen;
- enum dccp_role dccps_role:2;
- __u8 dccps_hc_rx_insert_options:1;
- __u8 dccps_hc_tx_insert_options:1;
- __u8 dccps_server_timewait:1;
- __u8 dccps_sync_scheduled:1;
- struct tasklet_struct dccps_xmitlet;
- struct timer_list dccps_xmit_timer;
-};
-
-static inline struct dccp_sock *dccp_sk(const struct sock *sk)
-{
- return (struct dccp_sock *)sk;
-}
-
-static inline const char *dccp_role(const struct sock *sk)
-{
- switch (dccp_sk(sk)->dccps_role) {
- case DCCP_ROLE_UNDEFINED: return "undefined";
- case DCCP_ROLE_LISTEN: return "listen";
- case DCCP_ROLE_SERVER: return "server";
- case DCCP_ROLE_CLIENT: return "client";
- }
- return NULL;
-}
-
-extern void dccp_syn_ack_timeout(const struct request_sock *req);
-
#endif /* _LINUX_DCCP_H */
diff --git a/include/linux/dcookies.h b/include/linux/dcookies.h
deleted file mode 100644
index 699b6c499c4f..000000000000
--- a/include/linux/dcookies.h
+++ /dev/null
@@ -1,68 +0,0 @@
-/*
- * dcookies.h
- *
- * Persistent cookie-path mappings
- *
- * Copyright 2002 John Levon <levon@movementarian.org>
- */
-
-#ifndef DCOOKIES_H
-#define DCOOKIES_H
-
-
-#ifdef CONFIG_PROFILING
-
-#include <linux/dcache.h>
-#include <linux/types.h>
-
-struct dcookie_user;
-struct path;
-
-/**
- * dcookie_register - register a user of dcookies
- *
- * Register as a dcookie user. Returns %NULL on failure.
- */
-struct dcookie_user * dcookie_register(void);
-
-/**
- * dcookie_unregister - unregister a user of dcookies
- *
- * Unregister as a dcookie user. This may invalidate
- * any dcookie values returned from get_dcookie().
- */
-void dcookie_unregister(struct dcookie_user * user);
-
-/**
- * get_dcookie - acquire a dcookie
- *
- * Convert the given dentry/vfsmount pair into
- * a cookie value.
- *
- * Returns -EINVAL if no living task has registered as a
- * dcookie user.
- *
- * Returns 0 on success, with *cookie filled in
- */
-int get_dcookie(const struct path *path, unsigned long *cookie);
-
-#else
-
-static inline struct dcookie_user * dcookie_register(void)
-{
- return NULL;
-}
-
-static inline void dcookie_unregister(struct dcookie_user * user)
-{
- return;
-}
-
-static inline int get_dcookie(const struct path *path, unsigned long *cookie)
-{
- return -ENOSYS;
-}
-
-#endif /* CONFIG_PROFILING */
-
-#endif /* DCOOKIES_H */
diff --git a/include/linux/debug_locks.h b/include/linux/debug_locks.h
index 822c1354f3a6..dbb409d77d4f 100644
--- a/include/linux/debug_locks.h
+++ b/include/linux/debug_locks.h
@@ -1,17 +1,17 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __LINUX_DEBUG_LOCKING_H
#define __LINUX_DEBUG_LOCKING_H
-#include <linux/kernel.h>
#include <linux/atomic.h>
-#include <linux/bug.h>
+#include <linux/cache.h>
struct task_struct;
-extern int debug_locks;
-extern int debug_locks_silent;
+extern int debug_locks __read_mostly;
+extern int debug_locks_silent __read_mostly;
-static inline int __debug_locks_off(void)
+static __always_inline int __debug_locks_off(void)
{
return xchg(&debug_locks, 0);
}
@@ -26,8 +26,10 @@ extern int debug_locks_off(void);
int __ret = 0; \
\
if (!oops_in_progress && unlikely(c)) { \
+ instrumentation_begin(); \
if (debug_locks_off() && !debug_locks_silent) \
WARN(1, "DEBUG_LOCKS_WARN_ON(%s)", #c); \
+ instrumentation_end(); \
__ret = 1; \
} \
__ret; \
@@ -45,8 +47,6 @@ extern int debug_locks_off(void);
# define locking_selftest() do { } while (0)
#endif
-struct task_struct;
-
#ifdef CONFIG_LOCKDEP
extern void debug_show_all_locks(void);
extern void debug_show_held_locks(struct task_struct *task);
diff --git a/include/linux/debugfs.h b/include/linux/debugfs.h
index b93efc8feecd..7cecda29447e 100644
--- a/include/linux/debugfs.h
+++ b/include/linux/debugfs.h
@@ -1,13 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* debugfs.h - a tiny little debug file system
*
* Copyright (C) 2004 Greg Kroah-Hartman <greg@kroah.com>
* Copyright (C) 2004 IBM Inc.
*
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version
- * 2 as published by the Free Software Foundation.
- *
* debugfs is for people to use instead of /proc or /sys.
* See Documentation/filesystems/ for more details.
*/
@@ -23,7 +20,6 @@
struct device;
struct file_operations;
-struct srcu_struct;
struct debugfs_blob_wrapper {
void *data;
@@ -39,30 +35,17 @@ struct debugfs_regset32 {
const struct debugfs_reg32 *regs;
int nregs;
void __iomem *base;
+ struct device *dev; /* Optional device for Runtime PM */
};
-extern struct dentry *arch_debugfs_dir;
-
-extern struct srcu_struct debugfs_srcu;
+struct debugfs_u32_array {
+ u32 *array;
+ u32 n_elements;
+};
-/**
- * debugfs_real_fops - getter for the real file operation
- * @filp: a pointer to a struct file
- *
- * Must only be called under the protection established by
- * debugfs_use_file_start().
- */
-static inline const struct file_operations *debugfs_real_fops(const struct file *filp)
- __must_hold(&debugfs_srcu)
-{
- /*
- * Neither the pointer to the struct file_operations, nor its
- * contents ever change -- srcu_dereference() is not needed here.
- */
- return filp->f_path.dentry->d_fsdata;
-}
+extern struct dentry *arch_debugfs_dir;
-#define DEFINE_DEBUGFS_ATTRIBUTE(__fops, __get, __set, __fmt) \
+#define DEFINE_DEBUGFS_ATTRIBUTE_XSIGNED(__fops, __get, __set, __fmt, __is_signed) \
static int __fops ## _open(struct inode *inode, struct file *file) \
{ \
__simple_attr_check_format(__fmt, 0ull); \
@@ -73,97 +56,171 @@ static const struct file_operations __fops = { \
.open = __fops ## _open, \
.release = simple_attr_release, \
.read = debugfs_attr_read, \
- .write = debugfs_attr_write, \
- .llseek = no_llseek, \
+ .write = (__is_signed) ? debugfs_attr_write_signed : debugfs_attr_write, \
}
+#define DEFINE_DEBUGFS_ATTRIBUTE(__fops, __get, __set, __fmt) \
+ DEFINE_DEBUGFS_ATTRIBUTE_XSIGNED(__fops, __get, __set, __fmt, false)
+
+#define DEFINE_DEBUGFS_ATTRIBUTE_SIGNED(__fops, __get, __set, __fmt) \
+ DEFINE_DEBUGFS_ATTRIBUTE_XSIGNED(__fops, __get, __set, __fmt, true)
+
+typedef struct vfsmount *(*debugfs_automount_t)(struct dentry *, void *);
+
+struct debugfs_short_fops {
+ ssize_t (*read)(struct file *, char __user *, size_t, loff_t *);
+ ssize_t (*write)(struct file *, const char __user *, size_t, loff_t *);
+ loff_t (*llseek) (struct file *, loff_t, int);
+};
+
#if defined(CONFIG_DEBUG_FS)
struct dentry *debugfs_lookup(const char *name, struct dentry *parent);
-struct dentry *debugfs_create_file(const char *name, umode_t mode,
- struct dentry *parent, void *data,
- const struct file_operations *fops);
+struct dentry *debugfs_create_file_full(const char *name, umode_t mode,
+ struct dentry *parent, void *data,
+ const void *aux,
+ const struct file_operations *fops);
+struct dentry *debugfs_create_file_short(const char *name, umode_t mode,
+ struct dentry *parent, void *data,
+ const void *aux,
+ const struct debugfs_short_fops *fops);
+
+/**
+ * debugfs_create_file - create a file in the debugfs filesystem
+ * @name: a pointer to a string containing the name of the file to create.
+ * @mode: the permission that the file should have.
+ * @parent: a pointer to the parent dentry for this file. This should be a
+ * directory dentry if set. If this parameter is NULL, then the
+ * file will be created in the root of the debugfs filesystem.
+ * @data: a pointer to something that the caller will want to get to later
+ * on. The inode.i_private pointer will point to this value on
+ * the open() call.
+ * @fops: a pointer to a struct file_operations or struct debugfs_short_fops that
+ * should be used for this file.
+ *
+ * This is the basic "create a file" function for debugfs. It allows for a
+ * wide range of flexibility in creating a file, or a directory (if you want
+ * to create a directory, the debugfs_create_dir() function is
+ * recommended to be used instead.)
+ *
+ * This function will return a pointer to a dentry if it succeeds. This
+ * pointer must be passed to the debugfs_remove() function when the file is
+ * to be removed (no automatic cleanup happens if your module is unloaded,
+ * you are responsible here.) If an error occurs, ERR_PTR(-ERROR) will be
+ * returned.
+ *
+ * If debugfs is not enabled in the kernel, the value -%ENODEV will be
+ * returned.
+ *
+ * If fops points to a struct debugfs_short_fops, then simple_open() will be
+ * used for the open, and only read/write/llseek are supported and are proxied,
+ * so no module reference or release are needed.
+ *
+ * NOTE: it's expected that most callers should _ignore_ the errors returned
+ * by this function. Other debugfs functions handle the fact that the "dentry"
+ * passed to them could be an error and they don't crash in that case.
+ * Drivers should generally work fine even if debugfs fails to init anyway.
+ */
+#define debugfs_create_file(name, mode, parent, data, fops) \
+ _Generic(fops, \
+ const struct file_operations *: debugfs_create_file_full, \
+ const struct debugfs_short_fops *: debugfs_create_file_short, \
+ struct file_operations *: debugfs_create_file_full, \
+ struct debugfs_short_fops *: debugfs_create_file_short) \
+ (name, mode, parent, data, NULL, fops)
+
+#define debugfs_create_file_aux(name, mode, parent, data, aux, fops) \
+ _Generic(fops, \
+ const struct file_operations *: debugfs_create_file_full, \
+ const struct debugfs_short_fops *: debugfs_create_file_short, \
+ struct file_operations *: debugfs_create_file_full, \
+ struct debugfs_short_fops *: debugfs_create_file_short) \
+ (name, mode, parent, data, aux, fops)
+
struct dentry *debugfs_create_file_unsafe(const char *name, umode_t mode,
struct dentry *parent, void *data,
const struct file_operations *fops);
-struct dentry *debugfs_create_file_size(const char *name, umode_t mode,
- struct dentry *parent, void *data,
- const struct file_operations *fops,
- loff_t file_size);
+void debugfs_create_file_size(const char *name, umode_t mode,
+ struct dentry *parent, void *data,
+ const struct file_operations *fops,
+ loff_t file_size);
struct dentry *debugfs_create_dir(const char *name, struct dentry *parent);
struct dentry *debugfs_create_symlink(const char *name, struct dentry *parent,
const char *dest);
-typedef struct vfsmount *(*debugfs_automount_t)(struct dentry *, void *);
struct dentry *debugfs_create_automount(const char *name,
struct dentry *parent,
debugfs_automount_t f,
void *data);
void debugfs_remove(struct dentry *dentry);
-void debugfs_remove_recursive(struct dentry *dentry);
+#define debugfs_remove_recursive debugfs_remove
+
+void debugfs_lookup_and_remove(const char *name, struct dentry *parent);
-int debugfs_use_file_start(const struct dentry *dentry, int *srcu_idx)
- __acquires(&debugfs_srcu);
+void *debugfs_get_aux(const struct file *file);
-void debugfs_use_file_finish(int srcu_idx) __releases(&debugfs_srcu);
+int debugfs_file_get(struct dentry *dentry);
+void debugfs_file_put(struct dentry *dentry);
ssize_t debugfs_attr_read(struct file *file, char __user *buf,
size_t len, loff_t *ppos);
ssize_t debugfs_attr_write(struct file *file, const char __user *buf,
size_t len, loff_t *ppos);
+ssize_t debugfs_attr_write_signed(struct file *file, const char __user *buf,
+ size_t len, loff_t *ppos);
-struct dentry *debugfs_rename(struct dentry *old_dir, struct dentry *old_dentry,
- struct dentry *new_dir, const char *new_name);
-
-struct dentry *debugfs_create_u8(const char *name, umode_t mode,
- struct dentry *parent, u8 *value);
-struct dentry *debugfs_create_u16(const char *name, umode_t mode,
- struct dentry *parent, u16 *value);
-struct dentry *debugfs_create_u32(const char *name, umode_t mode,
- struct dentry *parent, u32 *value);
-struct dentry *debugfs_create_u64(const char *name, umode_t mode,
- struct dentry *parent, u64 *value);
-struct dentry *debugfs_create_ulong(const char *name, umode_t mode,
- struct dentry *parent, unsigned long *value);
-struct dentry *debugfs_create_x8(const char *name, umode_t mode,
- struct dentry *parent, u8 *value);
-struct dentry *debugfs_create_x16(const char *name, umode_t mode,
- struct dentry *parent, u16 *value);
-struct dentry *debugfs_create_x32(const char *name, umode_t mode,
- struct dentry *parent, u32 *value);
-struct dentry *debugfs_create_x64(const char *name, umode_t mode,
- struct dentry *parent, u64 *value);
-struct dentry *debugfs_create_size_t(const char *name, umode_t mode,
- struct dentry *parent, size_t *value);
-struct dentry *debugfs_create_atomic_t(const char *name, umode_t mode,
- struct dentry *parent, atomic_t *value);
-struct dentry *debugfs_create_bool(const char *name, umode_t mode,
- struct dentry *parent, bool *value);
+int debugfs_change_name(struct dentry *dentry, const char *fmt, ...) __printf(2, 3);
+
+void debugfs_create_u8(const char *name, umode_t mode, struct dentry *parent,
+ u8 *value);
+void debugfs_create_u16(const char *name, umode_t mode, struct dentry *parent,
+ u16 *value);
+void debugfs_create_u32(const char *name, umode_t mode, struct dentry *parent,
+ u32 *value);
+void debugfs_create_u64(const char *name, umode_t mode, struct dentry *parent,
+ u64 *value);
+void debugfs_create_ulong(const char *name, umode_t mode, struct dentry *parent,
+ unsigned long *value);
+void debugfs_create_x8(const char *name, umode_t mode, struct dentry *parent,
+ u8 *value);
+void debugfs_create_x16(const char *name, umode_t mode, struct dentry *parent,
+ u16 *value);
+void debugfs_create_x32(const char *name, umode_t mode, struct dentry *parent,
+ u32 *value);
+void debugfs_create_x64(const char *name, umode_t mode, struct dentry *parent,
+ u64 *value);
+void debugfs_create_size_t(const char *name, umode_t mode,
+ struct dentry *parent, size_t *value);
+void debugfs_create_atomic_t(const char *name, umode_t mode,
+ struct dentry *parent, atomic_t *value);
+void debugfs_create_bool(const char *name, umode_t mode, struct dentry *parent,
+ bool *value);
+void debugfs_create_str(const char *name, umode_t mode,
+ struct dentry *parent, char **value);
struct dentry *debugfs_create_blob(const char *name, umode_t mode,
struct dentry *parent,
struct debugfs_blob_wrapper *blob);
-struct dentry *debugfs_create_regset32(const char *name, umode_t mode,
- struct dentry *parent,
- struct debugfs_regset32 *regset);
+void debugfs_create_regset32(const char *name, umode_t mode,
+ struct dentry *parent,
+ struct debugfs_regset32 *regset);
void debugfs_print_regs32(struct seq_file *s, const struct debugfs_reg32 *regs,
int nregs, void __iomem *base, char *prefix);
-struct dentry *debugfs_create_u32_array(const char *name, umode_t mode,
- struct dentry *parent,
- u32 *array, u32 elements);
+void debugfs_create_u32_array(const char *name, umode_t mode,
+ struct dentry *parent,
+ struct debugfs_u32_array *array);
-struct dentry *debugfs_create_devm_seqfile(struct device *dev, const char *name,
- struct dentry *parent,
- int (*read_fn)(struct seq_file *s,
- void *data));
+void debugfs_create_devm_seqfile(struct device *dev, const char *name,
+ struct dentry *parent,
+ int (*read_fn)(struct seq_file *s, void *data));
bool debugfs_initialized(void);
@@ -173,6 +230,28 @@ ssize_t debugfs_read_file_bool(struct file *file, char __user *user_buf,
ssize_t debugfs_write_file_bool(struct file *file, const char __user *user_buf,
size_t count, loff_t *ppos);
+ssize_t debugfs_read_file_str(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos);
+
+/**
+ * struct debugfs_cancellation - cancellation data
+ * @list: internal, for keeping track
+ * @cancel: callback to call
+ * @cancel_data: extra data for the callback to call
+ */
+struct debugfs_cancellation {
+ struct list_head list;
+ void (*cancel)(struct dentry *, void *);
+ void *cancel_data;
+};
+
+void __acquires(cancellation)
+debugfs_enter_cancellation(struct file *file,
+ struct debugfs_cancellation *cancellation);
+void __releases(cancellation)
+debugfs_leave_cancellation(struct file *file,
+ struct debugfs_cancellation *cancellation);
+
#else
#include <linux/err.h>
@@ -189,9 +268,17 @@ static inline struct dentry *debugfs_lookup(const char *name,
return ERR_PTR(-ENODEV);
}
+static inline struct dentry *debugfs_create_file_aux(const char *name,
+ umode_t mode, struct dentry *parent,
+ void *data, void *aux,
+ const void *fops)
+{
+ return ERR_PTR(-ENODEV);
+}
+
static inline struct dentry *debugfs_create_file(const char *name, umode_t mode,
struct dentry *parent, void *data,
- const struct file_operations *fops)
+ const void *fops)
{
return ERR_PTR(-ENODEV);
}
@@ -204,13 +291,11 @@ static inline struct dentry *debugfs_create_file_unsafe(const char *name,
return ERR_PTR(-ENODEV);
}
-static inline struct dentry *debugfs_create_file_size(const char *name, umode_t mode,
- struct dentry *parent, void *data,
- const struct file_operations *fops,
- loff_t file_size)
-{
- return ERR_PTR(-ENODEV);
-}
+static inline void debugfs_create_file_size(const char *name, umode_t mode,
+ struct dentry *parent, void *data,
+ const struct file_operations *fops,
+ loff_t file_size)
+{ }
static inline struct dentry *debugfs_create_dir(const char *name,
struct dentry *parent)
@@ -227,7 +312,7 @@ static inline struct dentry *debugfs_create_symlink(const char *name,
static inline struct dentry *debugfs_create_automount(const char *name,
struct dentry *parent,
- struct vfsmount *(*f)(void *),
+ debugfs_automount_t f,
void *data)
{
return ERR_PTR(-ENODEV);
@@ -239,15 +324,18 @@ static inline void debugfs_remove(struct dentry *dentry)
static inline void debugfs_remove_recursive(struct dentry *dentry)
{ }
-static inline int debugfs_use_file_start(const struct dentry *dentry,
- int *srcu_idx)
- __acquires(&debugfs_srcu)
+static inline void debugfs_lookup_and_remove(const char *name,
+ struct dentry *parent)
+{ }
+
+void *debugfs_get_aux(const struct file *file);
+
+static inline int debugfs_file_get(struct dentry *dentry)
{
return 0;
}
-static inline void debugfs_use_file_finish(int srcu_idx)
- __releases(&debugfs_srcu)
+static inline void debugfs_file_put(struct dentry *dentry)
{ }
static inline ssize_t debugfs_attr_read(struct file *file, char __user *buf,
@@ -263,95 +351,63 @@ static inline ssize_t debugfs_attr_write(struct file *file,
return -ENODEV;
}
-static inline struct dentry *debugfs_rename(struct dentry *old_dir, struct dentry *old_dentry,
- struct dentry *new_dir, char *new_name)
+static inline ssize_t debugfs_attr_write_signed(struct file *file,
+ const char __user *buf,
+ size_t len, loff_t *ppos)
{
- return ERR_PTR(-ENODEV);
+ return -ENODEV;
}
-static inline struct dentry *debugfs_create_u8(const char *name, umode_t mode,
- struct dentry *parent,
- u8 *value)
+static inline int __printf(2, 3) debugfs_change_name(struct dentry *dentry,
+ const char *fmt, ...)
{
- return ERR_PTR(-ENODEV);
+ return -ENODEV;
}
-static inline struct dentry *debugfs_create_u16(const char *name, umode_t mode,
- struct dentry *parent,
- u16 *value)
-{
- return ERR_PTR(-ENODEV);
-}
+static inline void debugfs_create_u8(const char *name, umode_t mode,
+ struct dentry *parent, u8 *value) { }
-static inline struct dentry *debugfs_create_u32(const char *name, umode_t mode,
- struct dentry *parent,
- u32 *value)
-{
- return ERR_PTR(-ENODEV);
-}
+static inline void debugfs_create_u16(const char *name, umode_t mode,
+ struct dentry *parent, u16 *value) { }
-static inline struct dentry *debugfs_create_u64(const char *name, umode_t mode,
- struct dentry *parent,
- u64 *value)
-{
- return ERR_PTR(-ENODEV);
-}
+static inline void debugfs_create_u32(const char *name, umode_t mode,
+ struct dentry *parent, u32 *value) { }
-static inline struct dentry *debugfs_create_ulong(const char *name,
- umode_t mode,
- struct dentry *parent,
- unsigned long *value)
-{
- return ERR_PTR(-ENODEV);
-}
+static inline void debugfs_create_u64(const char *name, umode_t mode,
+ struct dentry *parent, u64 *value) { }
-static inline struct dentry *debugfs_create_x8(const char *name, umode_t mode,
- struct dentry *parent,
- u8 *value)
-{
- return ERR_PTR(-ENODEV);
-}
+static inline void debugfs_create_ulong(const char *name, umode_t mode,
+ struct dentry *parent,
+ unsigned long *value) { }
-static inline struct dentry *debugfs_create_x16(const char *name, umode_t mode,
- struct dentry *parent,
- u16 *value)
-{
- return ERR_PTR(-ENODEV);
-}
+static inline void debugfs_create_x8(const char *name, umode_t mode,
+ struct dentry *parent, u8 *value) { }
-static inline struct dentry *debugfs_create_x32(const char *name, umode_t mode,
- struct dentry *parent,
- u32 *value)
-{
- return ERR_PTR(-ENODEV);
-}
+static inline void debugfs_create_x16(const char *name, umode_t mode,
+ struct dentry *parent, u16 *value) { }
-static inline struct dentry *debugfs_create_x64(const char *name, umode_t mode,
- struct dentry *parent,
- u64 *value)
-{
- return ERR_PTR(-ENODEV);
-}
+static inline void debugfs_create_x32(const char *name, umode_t mode,
+ struct dentry *parent, u32 *value) { }
-static inline struct dentry *debugfs_create_size_t(const char *name, umode_t mode,
- struct dentry *parent,
- size_t *value)
-{
- return ERR_PTR(-ENODEV);
-}
+static inline void debugfs_create_x64(const char *name, umode_t mode,
+ struct dentry *parent, u64 *value) { }
-static inline struct dentry *debugfs_create_atomic_t(const char *name, umode_t mode,
- struct dentry *parent, atomic_t *value)
-{
- return ERR_PTR(-ENODEV);
-}
+static inline void debugfs_create_size_t(const char *name, umode_t mode,
+ struct dentry *parent, size_t *value)
+{ }
-static inline struct dentry *debugfs_create_bool(const char *name, umode_t mode,
- struct dentry *parent,
- bool *value)
-{
- return ERR_PTR(-ENODEV);
-}
+static inline void debugfs_create_atomic_t(const char *name, umode_t mode,
+ struct dentry *parent,
+ atomic_t *value)
+{ }
+
+static inline void debugfs_create_bool(const char *name, umode_t mode,
+ struct dentry *parent, bool *value) { }
+
+static inline void debugfs_create_str(const char *name, umode_t mode,
+ struct dentry *parent,
+ char **value)
+{ }
static inline struct dentry *debugfs_create_blob(const char *name, umode_t mode,
struct dentry *parent,
@@ -360,11 +416,10 @@ static inline struct dentry *debugfs_create_blob(const char *name, umode_t mode,
return ERR_PTR(-ENODEV);
}
-static inline struct dentry *debugfs_create_regset32(const char *name,
- umode_t mode, struct dentry *parent,
- struct debugfs_regset32 *regset)
+static inline void debugfs_create_regset32(const char *name, umode_t mode,
+ struct dentry *parent,
+ struct debugfs_regset32 *regset)
{
- return ERR_PTR(-ENODEV);
}
static inline void debugfs_print_regs32(struct seq_file *s, const struct debugfs_reg32 *regs,
@@ -377,20 +432,18 @@ static inline bool debugfs_initialized(void)
return false;
}
-static inline struct dentry *debugfs_create_u32_array(const char *name, umode_t mode,
- struct dentry *parent,
- u32 *array, u32 elements)
+static inline void debugfs_create_u32_array(const char *name, umode_t mode,
+ struct dentry *parent,
+ struct debugfs_u32_array *array)
{
- return ERR_PTR(-ENODEV);
}
-static inline struct dentry *debugfs_create_devm_seqfile(struct device *dev,
- const char *name,
- struct dentry *parent,
- int (*read_fn)(struct seq_file *s,
- void *data))
+static inline void debugfs_create_devm_seqfile(struct device *dev,
+ const char *name,
+ struct dentry *parent,
+ int (*read_fn)(struct seq_file *s,
+ void *data))
{
- return ERR_PTR(-ENODEV);
}
static inline ssize_t debugfs_read_file_bool(struct file *file,
@@ -407,6 +460,39 @@ static inline ssize_t debugfs_write_file_bool(struct file *file,
return -ENODEV;
}
+static inline ssize_t debugfs_read_file_str(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ return -ENODEV;
+}
+
#endif
+#define debugfs_create_file_aux_num(name, mode, parent, data, n, fops) \
+ debugfs_create_file_aux(name, mode, parent, data, \
+ (void *)(unsigned long)n, fops)
+#define debugfs_get_aux_num(f) (unsigned long)debugfs_get_aux(f)
+
+/**
+ * debugfs_create_xul - create a debugfs file that is used to read and write an
+ * unsigned long value, formatted in hexadecimal
+ * @name: a pointer to a string containing the name of the file to create.
+ * @mode: the permission that the file should have
+ * @parent: a pointer to the parent dentry for this file. This should be a
+ * directory dentry if set. If this parameter is %NULL, then the
+ * file will be created in the root of the debugfs filesystem.
+ * @value: a pointer to the variable that the file should read to and write
+ * from.
+ */
+static inline void debugfs_create_xul(const char *name, umode_t mode,
+ struct dentry *parent,
+ unsigned long *value)
+{
+ if (sizeof(*value) == sizeof(u32))
+ debugfs_create_x32(name, mode, parent, (u32 *)value);
+ else
+ debugfs_create_x64(name, mode, parent, (u64 *)value);
+}
+
#endif
diff --git a/include/linux/debugobjects.h b/include/linux/debugobjects.h
index d82bf1994485..8b95545e7924 100644
--- a/include/linux/debugobjects.h
+++ b/include/linux/debugobjects.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_DEBUGOBJECTS_H
#define _LINUX_DEBUGOBJECTS_H
@@ -17,19 +18,23 @@ enum debug_obj_state {
struct debug_obj_descr;
/**
- * struct debug_obj - representaion of an tracked object
+ * struct debug_obj - representation of an tracked object
* @node: hlist node to link the object into the tracker list
* @state: tracked object state
* @astate: current active state
* @object: pointer to the real object
+ * @batch_last: pointer to the last hlist node in a batch
* @descr: pointer to an object type specific debug description structure
*/
struct debug_obj {
- struct hlist_node node;
- enum debug_obj_state state;
- unsigned int astate;
- void *object;
- struct debug_obj_descr *descr;
+ struct hlist_node node;
+ enum debug_obj_state state;
+ unsigned int astate;
+ union {
+ void *object;
+ struct hlist_node *batch_last;
+ };
+ const struct debug_obj_descr *descr;
};
/**
@@ -63,14 +68,14 @@ struct debug_obj_descr {
};
#ifdef CONFIG_DEBUG_OBJECTS
-extern void debug_object_init (void *addr, struct debug_obj_descr *descr);
+extern void debug_object_init (void *addr, const struct debug_obj_descr *descr);
extern void
-debug_object_init_on_stack(void *addr, struct debug_obj_descr *descr);
-extern int debug_object_activate (void *addr, struct debug_obj_descr *descr);
-extern void debug_object_deactivate(void *addr, struct debug_obj_descr *descr);
-extern void debug_object_destroy (void *addr, struct debug_obj_descr *descr);
-extern void debug_object_free (void *addr, struct debug_obj_descr *descr);
-extern void debug_object_assert_init(void *addr, struct debug_obj_descr *descr);
+debug_object_init_on_stack(void *addr, const struct debug_obj_descr *descr);
+extern int debug_object_activate (void *addr, const struct debug_obj_descr *descr);
+extern void debug_object_deactivate(void *addr, const struct debug_obj_descr *descr);
+extern void debug_object_destroy (void *addr, const struct debug_obj_descr *descr);
+extern void debug_object_free (void *addr, const struct debug_obj_descr *descr);
+extern void debug_object_assert_init(void *addr, const struct debug_obj_descr *descr);
/*
* Active state:
@@ -78,26 +83,26 @@ extern void debug_object_assert_init(void *addr, struct debug_obj_descr *descr);
* - Must return to 0 before deactivation.
*/
extern void
-debug_object_active_state(void *addr, struct debug_obj_descr *descr,
+debug_object_active_state(void *addr, const struct debug_obj_descr *descr,
unsigned int expect, unsigned int next);
extern void debug_objects_early_init(void);
extern void debug_objects_mem_init(void);
#else
static inline void
-debug_object_init (void *addr, struct debug_obj_descr *descr) { }
+debug_object_init (void *addr, const struct debug_obj_descr *descr) { }
static inline void
-debug_object_init_on_stack(void *addr, struct debug_obj_descr *descr) { }
+debug_object_init_on_stack(void *addr, const struct debug_obj_descr *descr) { }
static inline int
-debug_object_activate (void *addr, struct debug_obj_descr *descr) { return 0; }
+debug_object_activate (void *addr, const struct debug_obj_descr *descr) { return 0; }
static inline void
-debug_object_deactivate(void *addr, struct debug_obj_descr *descr) { }
+debug_object_deactivate(void *addr, const struct debug_obj_descr *descr) { }
static inline void
-debug_object_destroy (void *addr, struct debug_obj_descr *descr) { }
+debug_object_destroy (void *addr, const struct debug_obj_descr *descr) { }
static inline void
-debug_object_free (void *addr, struct debug_obj_descr *descr) { }
+debug_object_free (void *addr, const struct debug_obj_descr *descr) { }
static inline void
-debug_object_assert_init(void *addr, struct debug_obj_descr *descr) { }
+debug_object_assert_init(void *addr, const struct debug_obj_descr *descr) { }
static inline void debug_objects_early_init(void) { }
static inline void debug_objects_mem_init(void) { }
diff --git a/include/linux/decompress/bunzip2.h b/include/linux/decompress/bunzip2.h
index 4d683df898e6..5860163942a4 100644
--- a/include/linux/decompress/bunzip2.h
+++ b/include/linux/decompress/bunzip2.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef DECOMPRESS_BUNZIP2_H
#define DECOMPRESS_BUNZIP2_H
diff --git a/include/linux/decompress/generic.h b/include/linux/decompress/generic.h
index 1fcfd64b5076..207d80138db5 100644
--- a/include/linux/decompress/generic.h
+++ b/include/linux/decompress/generic.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef DECOMPRESS_GENERIC_H
#define DECOMPRESS_GENERIC_H
diff --git a/include/linux/decompress/inflate.h b/include/linux/decompress/inflate.h
index e4f411fdbd24..b65f24e7d442 100644
--- a/include/linux/decompress/inflate.h
+++ b/include/linux/decompress/inflate.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef LINUX_DECOMPRESS_INFLATE_H
#define LINUX_DECOMPRESS_INFLATE_H
diff --git a/include/linux/decompress/mm.h b/include/linux/decompress/mm.h
index 7925bf0ee836..ac862422df15 100644
--- a/include/linux/decompress/mm.h
+++ b/include/linux/decompress/mm.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* linux/compr_mm.h
*
@@ -24,13 +25,21 @@
#define STATIC_RW_DATA static
#endif
+/*
+ * When an architecture needs to share the malloc()/free() implementation
+ * between compilation units, it needs to have non-local visibility.
+ */
+#ifndef MALLOC_VISIBLE
+#define MALLOC_VISIBLE static
+#endif
+
/* A trivial malloc implementation, adapted from
* malloc by Hannu Savolainen 1993 and Matthias Urlichs 1994
*/
STATIC_RW_DATA unsigned long malloc_ptr;
STATIC_RW_DATA int malloc_count;
-static void *malloc(int size)
+MALLOC_VISIBLE void *malloc(int size)
{
void *p;
@@ -39,7 +48,7 @@ static void *malloc(int size)
if (!malloc_ptr)
malloc_ptr = free_mem_ptr;
- malloc_ptr = (malloc_ptr + 3) & ~3; /* Align */
+ malloc_ptr = (malloc_ptr + 7) & ~7; /* Align */
p = (void *)malloc_ptr;
malloc_ptr += size;
@@ -51,7 +60,7 @@ static void *malloc(int size)
return p;
}
-static void free(void *where)
+MALLOC_VISIBLE void free(void *where)
{
malloc_count--;
if (!malloc_count)
diff --git a/include/linux/decompress/unlz4.h b/include/linux/decompress/unlz4.h
index 3273c2f36496..5a235f605d5f 100644
--- a/include/linux/decompress/unlz4.h
+++ b/include/linux/decompress/unlz4.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef DECOMPRESS_UNLZ4_H
#define DECOMPRESS_UNLZ4_H
diff --git a/include/linux/decompress/unlzma.h b/include/linux/decompress/unlzma.h
index 8a891a193840..1c930f125182 100644
--- a/include/linux/decompress/unlzma.h
+++ b/include/linux/decompress/unlzma.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef DECOMPRESS_UNLZMA_H
#define DECOMPRESS_UNLZMA_H
diff --git a/include/linux/decompress/unlzo.h b/include/linux/decompress/unlzo.h
index af18f95d6570..550ae8783d1b 100644
--- a/include/linux/decompress/unlzo.h
+++ b/include/linux/decompress/unlzo.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef DECOMPRESS_UNLZO_H
#define DECOMPRESS_UNLZO_H
diff --git a/include/linux/decompress/unxz.h b/include/linux/decompress/unxz.h
index f764e2a7201e..3dd2658a9dab 100644
--- a/include/linux/decompress/unxz.h
+++ b/include/linux/decompress/unxz.h
@@ -1,10 +1,9 @@
+/* SPDX-License-Identifier: 0BSD */
+
/*
* Wrapper for decompressing XZ-compressed kernel, initramfs, and initrd
*
* Author: Lasse Collin <lasse.collin@tukaani.org>
- *
- * This file has been put into the public domain.
- * You can do whatever you want with this file.
*/
#ifndef DECOMPRESS_UNXZ_H
diff --git a/include/linux/decompress/unzstd.h b/include/linux/decompress/unzstd.h
new file mode 100644
index 000000000000..56d539ae880f
--- /dev/null
+++ b/include/linux/decompress/unzstd.h
@@ -0,0 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef LINUX_DECOMPRESS_UNZSTD_H
+#define LINUX_DECOMPRESS_UNZSTD_H
+
+int unzstd(unsigned char *inbuf, long len,
+ long (*fill)(void*, unsigned long),
+ long (*flush)(void*, unsigned long),
+ unsigned char *output,
+ long *pos,
+ void (*error_fn)(char *x));
+#endif
diff --git a/include/linux/delay.h b/include/linux/delay.h
index 2ecb3c46b20a..46412c00033a 100644
--- a/include/linux/delay.h
+++ b/include/linux/delay.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_DELAY_H
#define _LINUX_DELAY_H
@@ -5,20 +6,12 @@
* Copyright (C) 1993 Linus Torvalds
*
* Delay routines, using a pre-computed "loops_per_jiffy" value.
- *
- * Please note that ndelay(), udelay() and mdelay() may return early for
- * several reasons:
- * 1. computed loops_per_jiffy too low (due to the time taken to
- * execute the timer interrupt.)
- * 2. cache behaviour affecting the time it takes to execute the
- * loop function.
- * 3. CPU clock rate changes.
- *
- * Please see this thread:
- * http://lists.openwall.net/linux-kernel/2011/01/09/56
+ * Sleep routines using timer list timers or hrtimers.
*/
-#include <linux/kernel.h>
+#include <linux/math.h>
+#include <linux/sched.h>
+#include <linux/jiffies.h>
extern unsigned long loops_per_jiffy;
@@ -33,12 +26,21 @@ extern unsigned long loops_per_jiffy;
* The 2nd mdelay() definition ensures GCC will optimize away the
* while loop for the common cases where n <= MAX_UDELAY_MS -- Paul G.
*/
-
#ifndef MAX_UDELAY_MS
#define MAX_UDELAY_MS 5
#endif
#ifndef mdelay
+/**
+ * mdelay - Inserting a delay based on milliseconds with busy waiting
+ * @n: requested delay in milliseconds
+ *
+ * See udelay() for basic information about mdelay() and it's variants.
+ *
+ * Please double check, whether mdelay() is the right way to go or whether a
+ * refactoring of the code is the better variant to be able to use msleep()
+ * instead.
+ */
#define mdelay(n) (\
(__builtin_constant_p(n) && (n)<=MAX_UDELAY_MS) ? udelay((n)*1000) : \
({unsigned long __ms=(n); while (__ms--) udelay(1000);}))
@@ -54,13 +56,82 @@ static inline void ndelay(unsigned long x)
extern unsigned long lpj_fine;
void calibrate_delay(void);
+unsigned long calibrate_delay_is_known(void);
+void __attribute__((weak)) calibration_delay_done(void);
void msleep(unsigned int msecs);
unsigned long msleep_interruptible(unsigned int msecs);
-void usleep_range(unsigned long min, unsigned long max);
+void usleep_range_state(unsigned long min, unsigned long max,
+ unsigned int state);
+
+/**
+ * usleep_range - Sleep for an approximate time
+ * @min: Minimum time in microseconds to sleep
+ * @max: Maximum time in microseconds to sleep
+ *
+ * For basic information please refer to usleep_range_state().
+ *
+ * The task will be in the state TASK_UNINTERRUPTIBLE during the sleep.
+ */
+static inline void usleep_range(unsigned long min, unsigned long max)
+{
+ usleep_range_state(min, max, TASK_UNINTERRUPTIBLE);
+}
+
+/**
+ * usleep_range_idle - Sleep for an approximate time with idle time accounting
+ * @min: Minimum time in microseconds to sleep
+ * @max: Maximum time in microseconds to sleep
+ *
+ * For basic information please refer to usleep_range_state().
+ *
+ * The sleeping task has the state TASK_IDLE during the sleep to prevent
+ * contribution to the load average.
+ */
+static inline void usleep_range_idle(unsigned long min, unsigned long max)
+{
+ usleep_range_state(min, max, TASK_IDLE);
+}
+/**
+ * ssleep - wrapper for seconds around msleep
+ * @seconds: Requested sleep duration in seconds
+ *
+ * Please refer to msleep() for detailed information.
+ */
static inline void ssleep(unsigned int seconds)
{
msleep(seconds * 1000);
}
+static const unsigned int max_slack_shift = 2;
+#define USLEEP_RANGE_UPPER_BOUND ((TICK_NSEC << max_slack_shift) / NSEC_PER_USEC)
+
+/**
+ * fsleep - flexible sleep which autoselects the best mechanism
+ * @usecs: requested sleep duration in microseconds
+ *
+ * flseep() selects the best mechanism that will provide maximum 25% slack
+ * to the requested sleep duration. Therefore it uses:
+ *
+ * * udelay() loop for sleep durations <= 10 microseconds to avoid hrtimer
+ * overhead for really short sleep durations.
+ * * usleep_range() for sleep durations which would lead with the usage of
+ * msleep() to a slack larger than 25%. This depends on the granularity of
+ * jiffies.
+ * * msleep() for all other sleep durations.
+ *
+ * Note: When %CONFIG_HIGH_RES_TIMERS is not set, all sleeps are processed with
+ * the granularity of jiffies and the slack might exceed 25% especially for
+ * short sleep durations.
+ */
+static inline void fsleep(unsigned long usecs)
+{
+ if (usecs <= 10)
+ udelay(usecs);
+ else if (usecs < USLEEP_RANGE_UPPER_BOUND)
+ usleep_range(usecs, usecs + (usecs >> max_slack_shift));
+ else
+ msleep(DIV_ROUND_UP(usecs, USEC_PER_MSEC));
+}
+
#endif /* defined(_LINUX_DELAY_H) */
diff --git a/include/linux/delayacct.h b/include/linux/delayacct.h
index 4178d2493547..800dcc360db2 100644
--- a/include/linux/delayacct.h
+++ b/include/linux/delayacct.h
@@ -1,17 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/* delayacct.h - per-task delay accounting
*
* Copyright (C) Shailabh Nagar, IBM Corp. 2006
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
- * the GNU General Public License for more details.
- *
*/
#ifndef _LINUX_DELAYACCT_H
@@ -19,18 +9,9 @@
#include <uapi/linux/taskstats.h>
-/*
- * Per-task flags relevant to delay accounting
- * maintained privately to avoid exhausting similar flags in sched.h:PF_*
- * Used to set current->delays->flags
- */
-#define DELAYACCT_PF_SWAPIN 0x00000001 /* I am doing a swapin */
-#define DELAYACCT_PF_BLKIO 0x00000002 /* I am waiting on IO */
-
#ifdef CONFIG_TASK_DELAY_ACCT
struct task_delay_info {
- spinlock_t lock;
- unsigned int flags; /* Private per-task flags */
+ raw_spinlock_t lock;
/* For each stat XXX, add following, aligned appropriately
*
@@ -47,55 +28,77 @@ struct task_delay_info {
* associated with the operation is added to XXX_delay.
* XXX_delay contains the accumulated delay time in nanoseconds.
*/
- u64 blkio_start; /* Shared by blkio, swapin */
+ u64 blkio_start;
+ u64 blkio_delay_max;
+ u64 blkio_delay_min;
u64 blkio_delay; /* wait for sync block io completion */
- u64 swapin_delay; /* wait for swapin block io completion */
+ u64 swapin_start;
+ u64 swapin_delay_max;
+ u64 swapin_delay_min;
+ u64 swapin_delay; /* wait for swapin */
u32 blkio_count; /* total count of the number of sync block */
/* io operations performed */
- u32 swapin_count; /* total count of the number of swapin block */
- /* io operations performed */
+ u32 swapin_count; /* total count of swapin */
u64 freepages_start;
+ u64 freepages_delay_max;
+ u64 freepages_delay_min;
u64 freepages_delay; /* wait for memory reclaim */
+
+ u64 thrashing_start;
+ u64 thrashing_delay_max;
+ u64 thrashing_delay_min;
+ u64 thrashing_delay; /* wait for thrashing page */
+
+ u64 compact_start;
+ u64 compact_delay_max;
+ u64 compact_delay_min;
+ u64 compact_delay; /* wait for memory compact */
+
+ u64 wpcopy_start;
+ u64 wpcopy_delay_max;
+ u64 wpcopy_delay_min;
+ u64 wpcopy_delay; /* wait for write-protect copy */
+
+ u64 irq_delay_max;
+ u64 irq_delay_min;
+ u64 irq_delay; /* wait for IRQ/SOFTIRQ */
+
u32 freepages_count; /* total count of memory reclaim */
+ u32 thrashing_count; /* total count of thrash waits */
+ u32 compact_count; /* total count of memory compact */
+ u32 wpcopy_count; /* total count of write-protect copy */
+ u32 irq_count; /* total count of IRQ/SOFTIRQ */
};
#endif
#include <linux/sched.h>
#include <linux/slab.h>
+#include <linux/jump_label.h>
#ifdef CONFIG_TASK_DELAY_ACCT
+DECLARE_STATIC_KEY_FALSE(delayacct_key);
extern int delayacct_on; /* Delay accounting turned on/off */
extern struct kmem_cache *delayacct_cache;
extern void delayacct_init(void);
+
extern void __delayacct_tsk_init(struct task_struct *);
extern void __delayacct_tsk_exit(struct task_struct *);
extern void __delayacct_blkio_start(void);
-extern void __delayacct_blkio_end(void);
-extern int __delayacct_add_tsk(struct taskstats *, struct task_struct *);
+extern void __delayacct_blkio_end(struct task_struct *);
+extern int delayacct_add_tsk(struct taskstats *, struct task_struct *);
extern __u64 __delayacct_blkio_ticks(struct task_struct *);
extern void __delayacct_freepages_start(void);
extern void __delayacct_freepages_end(void);
-
-static inline int delayacct_is_task_waiting_on_io(struct task_struct *p)
-{
- if (p->delays)
- return (p->delays->flags & DELAYACCT_PF_BLKIO);
- else
- return 0;
-}
-
-static inline void delayacct_set_flag(int flag)
-{
- if (current->delays)
- current->delays->flags |= flag;
-}
-
-static inline void delayacct_clear_flag(int flag)
-{
- if (current->delays)
- current->delays->flags &= ~flag;
-}
+extern void __delayacct_thrashing_start(bool *in_thrashing);
+extern void __delayacct_thrashing_end(bool *in_thrashing);
+extern void __delayacct_swapin_start(void);
+extern void __delayacct_swapin_end(void);
+extern void __delayacct_compact_start(void);
+extern void __delayacct_compact_end(void);
+extern void __delayacct_wpcopy_start(void);
+extern void __delayacct_wpcopy_end(void);
+extern void __delayacct_irq(struct task_struct *task, u32 delta);
static inline void delayacct_tsk_init(struct task_struct *tsk)
{
@@ -117,24 +120,20 @@ static inline void delayacct_tsk_free(struct task_struct *tsk)
static inline void delayacct_blkio_start(void)
{
- delayacct_set_flag(DELAYACCT_PF_BLKIO);
+ if (!static_branch_unlikely(&delayacct_key))
+ return;
+
if (current->delays)
__delayacct_blkio_start();
}
-static inline void delayacct_blkio_end(void)
+static inline void delayacct_blkio_end(struct task_struct *p)
{
- if (current->delays)
- __delayacct_blkio_end();
- delayacct_clear_flag(DELAYACCT_PF_BLKIO);
-}
+ if (!static_branch_unlikely(&delayacct_key))
+ return;
-static inline int delayacct_add_tsk(struct taskstats *d,
- struct task_struct *tsk)
-{
- if (!delayacct_on || !tsk->delays)
- return 0;
- return __delayacct_add_tsk(d, tsk);
+ if (p->delays)
+ __delayacct_blkio_end(p);
}
static inline __u64 delayacct_blkio_ticks(struct task_struct *tsk)
@@ -146,21 +145,104 @@ static inline __u64 delayacct_blkio_ticks(struct task_struct *tsk)
static inline void delayacct_freepages_start(void)
{
+ if (!static_branch_unlikely(&delayacct_key))
+ return;
+
if (current->delays)
__delayacct_freepages_start();
}
static inline void delayacct_freepages_end(void)
{
+ if (!static_branch_unlikely(&delayacct_key))
+ return;
+
if (current->delays)
__delayacct_freepages_end();
}
+static inline void delayacct_thrashing_start(bool *in_thrashing)
+{
+ if (!static_branch_unlikely(&delayacct_key))
+ return;
+
+ if (current->delays)
+ __delayacct_thrashing_start(in_thrashing);
+}
+
+static inline void delayacct_thrashing_end(bool *in_thrashing)
+{
+ if (!static_branch_unlikely(&delayacct_key))
+ return;
+
+ if (current->delays)
+ __delayacct_thrashing_end(in_thrashing);
+}
+
+static inline void delayacct_swapin_start(void)
+{
+ if (!static_branch_unlikely(&delayacct_key))
+ return;
+
+ if (current->delays)
+ __delayacct_swapin_start();
+}
+
+static inline void delayacct_swapin_end(void)
+{
+ if (!static_branch_unlikely(&delayacct_key))
+ return;
+
+ if (current->delays)
+ __delayacct_swapin_end();
+}
+
+static inline void delayacct_compact_start(void)
+{
+ if (!static_branch_unlikely(&delayacct_key))
+ return;
+
+ if (current->delays)
+ __delayacct_compact_start();
+}
+
+static inline void delayacct_compact_end(void)
+{
+ if (!static_branch_unlikely(&delayacct_key))
+ return;
+
+ if (current->delays)
+ __delayacct_compact_end();
+}
+
+static inline void delayacct_wpcopy_start(void)
+{
+ if (!static_branch_unlikely(&delayacct_key))
+ return;
+
+ if (current->delays)
+ __delayacct_wpcopy_start();
+}
+
+static inline void delayacct_wpcopy_end(void)
+{
+ if (!static_branch_unlikely(&delayacct_key))
+ return;
+
+ if (current->delays)
+ __delayacct_wpcopy_end();
+}
+
+static inline void delayacct_irq(struct task_struct *task, u32 delta)
+{
+ if (!static_branch_unlikely(&delayacct_key))
+ return;
+
+ if (task->delays)
+ __delayacct_irq(task, delta);
+}
+
#else
-static inline void delayacct_set_flag(int flag)
-{}
-static inline void delayacct_clear_flag(int flag)
-{}
static inline void delayacct_init(void)
{}
static inline void delayacct_tsk_init(struct task_struct *tsk)
@@ -169,7 +251,7 @@ static inline void delayacct_tsk_free(struct task_struct *tsk)
{}
static inline void delayacct_blkio_start(void)
{}
-static inline void delayacct_blkio_end(void)
+static inline void delayacct_blkio_end(struct task_struct *p)
{}
static inline int delayacct_add_tsk(struct taskstats *d,
struct task_struct *tsk)
@@ -182,6 +264,24 @@ static inline void delayacct_freepages_start(void)
{}
static inline void delayacct_freepages_end(void)
{}
+static inline void delayacct_thrashing_start(bool *in_thrashing)
+{}
+static inline void delayacct_thrashing_end(bool *in_thrashing)
+{}
+static inline void delayacct_swapin_start(void)
+{}
+static inline void delayacct_swapin_end(void)
+{}
+static inline void delayacct_compact_start(void)
+{}
+static inline void delayacct_compact_end(void)
+{}
+static inline void delayacct_wpcopy_start(void)
+{}
+static inline void delayacct_wpcopy_end(void)
+{}
+static inline void delayacct_irq(struct task_struct *task, u32 delta)
+{}
#endif /* CONFIG_TASK_DELAY_ACCT */
diff --git a/include/linux/delayed_call.h b/include/linux/delayed_call.h
index f7fa76ae1a9b..a26c3b95b5cf 100644
--- a/include/linux/delayed_call.h
+++ b/include/linux/delayed_call.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _DELAYED_CALL_H
#define _DELAYED_CALL_H
diff --git a/include/linux/dell-led.h b/include/linux/dell-led.h
deleted file mode 100644
index 3f033c48071e..000000000000
--- a/include/linux/dell-led.h
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef __DELL_LED_H__
-#define __DELL_LED_H__
-
-int dell_micmute_led_set(int on);
-
-#endif
diff --git a/include/linux/dev_printk.h b/include/linux/dev_printk.h
new file mode 100644
index 000000000000..eb2094e43050
--- /dev/null
+++ b/include/linux/dev_printk.h
@@ -0,0 +1,289 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * dev_printk.h - printk messages helpers for devices
+ *
+ * Copyright (c) 2001-2003 Patrick Mochel <mochel@osdl.org>
+ * Copyright (c) 2004-2009 Greg Kroah-Hartman <gregkh@suse.de>
+ * Copyright (c) 2008-2009 Novell Inc.
+ *
+ */
+
+#ifndef _DEVICE_PRINTK_H_
+#define _DEVICE_PRINTK_H_
+
+#include <linux/compiler.h>
+#include <linux/types.h>
+#include <linux/ratelimit.h>
+
+#ifndef dev_fmt
+#define dev_fmt(fmt) fmt
+#endif
+
+struct device;
+
+#define PRINTK_INFO_SUBSYSTEM_LEN 16
+#define PRINTK_INFO_DEVICE_LEN 48
+
+struct dev_printk_info {
+ char subsystem[PRINTK_INFO_SUBSYSTEM_LEN];
+ char device[PRINTK_INFO_DEVICE_LEN];
+};
+
+#ifdef CONFIG_PRINTK
+
+__printf(3, 0) __cold
+int dev_vprintk_emit(int level, const struct device *dev,
+ const char *fmt, va_list args);
+__printf(3, 4) __cold
+int dev_printk_emit(int level, const struct device *dev, const char *fmt, ...);
+
+__printf(3, 4) __cold
+void _dev_printk(const char *level, const struct device *dev,
+ const char *fmt, ...);
+__printf(2, 3) __cold
+void _dev_emerg(const struct device *dev, const char *fmt, ...);
+__printf(2, 3) __cold
+void _dev_alert(const struct device *dev, const char *fmt, ...);
+__printf(2, 3) __cold
+void _dev_crit(const struct device *dev, const char *fmt, ...);
+__printf(2, 3) __cold
+void _dev_err(const struct device *dev, const char *fmt, ...);
+__printf(2, 3) __cold
+void _dev_warn(const struct device *dev, const char *fmt, ...);
+__printf(2, 3) __cold
+void _dev_notice(const struct device *dev, const char *fmt, ...);
+__printf(2, 3) __cold
+void _dev_info(const struct device *dev, const char *fmt, ...);
+
+#else
+
+static inline __printf(3, 0)
+int dev_vprintk_emit(int level, const struct device *dev,
+ const char *fmt, va_list args)
+{ return 0; }
+static inline __printf(3, 4)
+int dev_printk_emit(int level, const struct device *dev, const char *fmt, ...)
+{ return 0; }
+
+static inline void __dev_printk(const char *level, const struct device *dev,
+ struct va_format *vaf)
+{}
+static inline __printf(3, 4)
+void _dev_printk(const char *level, const struct device *dev,
+ const char *fmt, ...)
+{}
+
+static inline __printf(2, 3)
+void _dev_emerg(const struct device *dev, const char *fmt, ...)
+{}
+static inline __printf(2, 3)
+void _dev_crit(const struct device *dev, const char *fmt, ...)
+{}
+static inline __printf(2, 3)
+void _dev_alert(const struct device *dev, const char *fmt, ...)
+{}
+static inline __printf(2, 3)
+void _dev_err(const struct device *dev, const char *fmt, ...)
+{}
+static inline __printf(2, 3)
+void _dev_warn(const struct device *dev, const char *fmt, ...)
+{}
+static inline __printf(2, 3)
+void _dev_notice(const struct device *dev, const char *fmt, ...)
+{}
+static inline __printf(2, 3)
+void _dev_info(const struct device *dev, const char *fmt, ...)
+{}
+
+#endif
+
+/*
+ * Need to take variadic arguments even though we don't use them, as dev_fmt()
+ * may only just have been expanded and may result in multiple arguments.
+ */
+#define dev_printk_index_emit(level, fmt, ...) \
+ printk_index_subsys_emit("%s %s: ", level, fmt)
+
+#define dev_printk_index_wrap(_p_func, level, dev, fmt, ...) \
+ ({ \
+ dev_printk_index_emit(level, fmt); \
+ _p_func(dev, fmt, ##__VA_ARGS__); \
+ })
+
+/*
+ * Some callsites directly call dev_printk rather than going through the
+ * dev_<level> infrastructure, so we need to emit here as well as inside those
+ * level-specific macros. Only one index entry will be produced, either way,
+ * since dev_printk's `fmt` isn't known at compile time if going through the
+ * dev_<level> macros.
+ *
+ * dev_fmt() isn't called for dev_printk when used directly, as it's used by
+ * the dev_<level> macros internally which already have dev_fmt() processed.
+ *
+ * We also can't use dev_printk_index_wrap directly, because we have a separate
+ * level to process.
+ */
+#define dev_printk(level, dev, fmt, ...) \
+ ({ \
+ dev_printk_index_emit(level, fmt); \
+ _dev_printk(level, dev, fmt, ##__VA_ARGS__); \
+ })
+
+/*
+ * Dummy dev_printk for disabled debugging statements to use whilst maintaining
+ * gcc's format checking.
+ */
+#define dev_no_printk(level, dev, fmt, ...) \
+ ({ \
+ if (0) \
+ _dev_printk(level, dev, fmt, ##__VA_ARGS__); \
+ })
+
+/*
+ * #defines for all the dev_<level> macros to prefix with whatever
+ * possible use of #define dev_fmt(fmt) ...
+ */
+
+#define dev_emerg(dev, fmt, ...) \
+ dev_printk_index_wrap(_dev_emerg, KERN_EMERG, dev, dev_fmt(fmt), ##__VA_ARGS__)
+#define dev_crit(dev, fmt, ...) \
+ dev_printk_index_wrap(_dev_crit, KERN_CRIT, dev, dev_fmt(fmt), ##__VA_ARGS__)
+#define dev_alert(dev, fmt, ...) \
+ dev_printk_index_wrap(_dev_alert, KERN_ALERT, dev, dev_fmt(fmt), ##__VA_ARGS__)
+#define dev_err(dev, fmt, ...) \
+ dev_printk_index_wrap(_dev_err, KERN_ERR, dev, dev_fmt(fmt), ##__VA_ARGS__)
+#define dev_warn(dev, fmt, ...) \
+ dev_printk_index_wrap(_dev_warn, KERN_WARNING, dev, dev_fmt(fmt), ##__VA_ARGS__)
+#define dev_notice(dev, fmt, ...) \
+ dev_printk_index_wrap(_dev_notice, KERN_NOTICE, dev, dev_fmt(fmt), ##__VA_ARGS__)
+#define dev_info(dev, fmt, ...) \
+ dev_printk_index_wrap(_dev_info, KERN_INFO, dev, dev_fmt(fmt), ##__VA_ARGS__)
+
+#if defined(CONFIG_DYNAMIC_DEBUG) || \
+ (defined(CONFIG_DYNAMIC_DEBUG_CORE) && defined(DYNAMIC_DEBUG_MODULE))
+#define dev_dbg(dev, fmt, ...) \
+ dynamic_dev_dbg(dev, dev_fmt(fmt), ##__VA_ARGS__)
+#elif defined(DEBUG)
+#define dev_dbg(dev, fmt, ...) \
+ dev_printk(KERN_DEBUG, dev, dev_fmt(fmt), ##__VA_ARGS__)
+#else
+#define dev_dbg(dev, fmt, ...) \
+ dev_no_printk(KERN_DEBUG, dev, dev_fmt(fmt), ##__VA_ARGS__)
+#endif
+
+#ifdef CONFIG_PRINTK
+#define dev_level_once(dev_level, dev, fmt, ...) \
+do { \
+ static bool __print_once __read_mostly; \
+ \
+ if (!__print_once) { \
+ __print_once = true; \
+ dev_level(dev, fmt, ##__VA_ARGS__); \
+ } \
+} while (0)
+#else
+#define dev_level_once(dev_level, dev, fmt, ...) \
+do { \
+ if (0) \
+ dev_level(dev, fmt, ##__VA_ARGS__); \
+} while (0)
+#endif
+
+#define dev_emerg_once(dev, fmt, ...) \
+ dev_level_once(dev_emerg, dev, fmt, ##__VA_ARGS__)
+#define dev_alert_once(dev, fmt, ...) \
+ dev_level_once(dev_alert, dev, fmt, ##__VA_ARGS__)
+#define dev_crit_once(dev, fmt, ...) \
+ dev_level_once(dev_crit, dev, fmt, ##__VA_ARGS__)
+#define dev_err_once(dev, fmt, ...) \
+ dev_level_once(dev_err, dev, fmt, ##__VA_ARGS__)
+#define dev_warn_once(dev, fmt, ...) \
+ dev_level_once(dev_warn, dev, fmt, ##__VA_ARGS__)
+#define dev_notice_once(dev, fmt, ...) \
+ dev_level_once(dev_notice, dev, fmt, ##__VA_ARGS__)
+#define dev_info_once(dev, fmt, ...) \
+ dev_level_once(dev_info, dev, fmt, ##__VA_ARGS__)
+#define dev_dbg_once(dev, fmt, ...) \
+ dev_level_once(dev_dbg, dev, fmt, ##__VA_ARGS__)
+
+#define dev_level_ratelimited(dev_level, dev, fmt, ...) \
+do { \
+ static DEFINE_RATELIMIT_STATE(_rs, \
+ DEFAULT_RATELIMIT_INTERVAL, \
+ DEFAULT_RATELIMIT_BURST); \
+ if (__ratelimit(&_rs)) \
+ dev_level(dev, fmt, ##__VA_ARGS__); \
+} while (0)
+
+#define dev_emerg_ratelimited(dev, fmt, ...) \
+ dev_level_ratelimited(dev_emerg, dev, fmt, ##__VA_ARGS__)
+#define dev_alert_ratelimited(dev, fmt, ...) \
+ dev_level_ratelimited(dev_alert, dev, fmt, ##__VA_ARGS__)
+#define dev_crit_ratelimited(dev, fmt, ...) \
+ dev_level_ratelimited(dev_crit, dev, fmt, ##__VA_ARGS__)
+#define dev_err_ratelimited(dev, fmt, ...) \
+ dev_level_ratelimited(dev_err, dev, fmt, ##__VA_ARGS__)
+#define dev_warn_ratelimited(dev, fmt, ...) \
+ dev_level_ratelimited(dev_warn, dev, fmt, ##__VA_ARGS__)
+#define dev_notice_ratelimited(dev, fmt, ...) \
+ dev_level_ratelimited(dev_notice, dev, fmt, ##__VA_ARGS__)
+#define dev_info_ratelimited(dev, fmt, ...) \
+ dev_level_ratelimited(dev_info, dev, fmt, ##__VA_ARGS__)
+#if defined(CONFIG_DYNAMIC_DEBUG) || \
+ (defined(CONFIG_DYNAMIC_DEBUG_CORE) && defined(DYNAMIC_DEBUG_MODULE))
+/* descriptor check is first to prevent flooding with "callbacks suppressed" */
+#define dev_dbg_ratelimited(dev, fmt, ...) \
+do { \
+ static DEFINE_RATELIMIT_STATE(_rs, \
+ DEFAULT_RATELIMIT_INTERVAL, \
+ DEFAULT_RATELIMIT_BURST); \
+ DEFINE_DYNAMIC_DEBUG_METADATA(descriptor, fmt); \
+ if (DYNAMIC_DEBUG_BRANCH(descriptor) && \
+ __ratelimit(&_rs)) \
+ __dynamic_dev_dbg(&descriptor, dev, dev_fmt(fmt), \
+ ##__VA_ARGS__); \
+} while (0)
+#elif defined(DEBUG)
+#define dev_dbg_ratelimited(dev, fmt, ...) \
+do { \
+ static DEFINE_RATELIMIT_STATE(_rs, \
+ DEFAULT_RATELIMIT_INTERVAL, \
+ DEFAULT_RATELIMIT_BURST); \
+ if (__ratelimit(&_rs)) \
+ dev_printk(KERN_DEBUG, dev, dev_fmt(fmt), ##__VA_ARGS__); \
+} while (0)
+#else
+#define dev_dbg_ratelimited(dev, fmt, ...) \
+ dev_no_printk(KERN_DEBUG, dev, dev_fmt(fmt), ##__VA_ARGS__)
+#endif
+
+#ifdef VERBOSE_DEBUG
+#define dev_vdbg dev_dbg
+#else
+#define dev_vdbg(dev, fmt, ...) \
+ dev_no_printk(KERN_DEBUG, dev, dev_fmt(fmt), ##__VA_ARGS__)
+#endif
+
+/*
+ * dev_WARN*() acts like dev_printk(), but with the key difference of
+ * using WARN/WARN_ONCE to include file/line information and a backtrace.
+ */
+#define dev_WARN(dev, format, arg...) \
+ WARN(1, "%s %s: " format, dev_driver_string(dev), dev_name(dev), ## arg)
+
+#define dev_WARN_ONCE(dev, condition, format, arg...) \
+ WARN_ONCE(condition, "%s %s: " format, \
+ dev_driver_string(dev), dev_name(dev), ## arg)
+
+__printf(3, 4) int dev_err_probe(const struct device *dev, int err, const char *fmt, ...);
+__printf(3, 4) int dev_warn_probe(const struct device *dev, int err, const char *fmt, ...);
+
+/* Simple helper for dev_err_probe() when ERR_PTR() is to be returned. */
+#define dev_err_ptr_probe(dev, ___err, fmt, ...) \
+ ERR_PTR(dev_err_probe(dev, ___err, fmt, ##__VA_ARGS__))
+
+/* Simple helper for dev_err_probe() when ERR_CAST() is to be returned. */
+#define dev_err_cast_probe(dev, ___err_ptr, fmt, ...) \
+ ERR_PTR(dev_err_probe(dev, PTR_ERR(___err_ptr), fmt, ##__VA_ARGS__))
+
+#endif /* _DEVICE_PRINTK_H_ */
diff --git a/include/linux/devcoredump.h b/include/linux/devcoredump.h
index 269521f143ac..377892604ff4 100644
--- a/include/linux/devcoredump.h
+++ b/include/linux/devcoredump.h
@@ -1,21 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
- * This file is provided under the GPLv2 license.
- *
- * GPL LICENSE SUMMARY
- *
* Copyright(c) 2015 Intel Deutschland GmbH
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * The full GNU General Public License is included in this distribution
- * in the file called COPYING.
*/
#ifndef __DEVCOREDUMP_H
#define __DEVCOREDUMP_H
@@ -27,6 +12,9 @@
#include <linux/scatterlist.h>
#include <linux/slab.h>
+/* if data isn't read by userspace after 5 minutes then delete it */
+#define DEVCD_TIMEOUT (HZ * 60 * 5)
+
/*
* _devcd_free_sgtable - free all the memory of the given scatterlist table
* (i.e. both pages and scatterlist instances)
@@ -65,19 +53,22 @@ static inline void _devcd_free_sgtable(struct scatterlist *table)
kfree(delete_iter);
}
-
#ifdef CONFIG_DEV_COREDUMP
void dev_coredumpv(struct device *dev, void *data, size_t datalen,
gfp_t gfp);
-void dev_coredumpm(struct device *dev, struct module *owner,
- void *data, size_t datalen, gfp_t gfp,
- ssize_t (*read)(char *buffer, loff_t offset, size_t count,
- void *data, size_t datalen),
- void (*free)(void *data));
+void dev_coredumpm_timeout(struct device *dev, struct module *owner,
+ void *data, size_t datalen, gfp_t gfp,
+ ssize_t (*read)(char *buffer, loff_t offset,
+ size_t count, void *data,
+ size_t datalen),
+ void (*free)(void *data),
+ unsigned long timeout);
void dev_coredumpsg(struct device *dev, struct scatterlist *table,
size_t datalen, gfp_t gfp);
+
+void dev_coredump_put(struct device *dev);
#else
static inline void dev_coredumpv(struct device *dev, void *data,
size_t datalen, gfp_t gfp)
@@ -86,11 +77,13 @@ static inline void dev_coredumpv(struct device *dev, void *data,
}
static inline void
-dev_coredumpm(struct device *dev, struct module *owner,
- void *data, size_t datalen, gfp_t gfp,
- ssize_t (*read)(char *buffer, loff_t offset, size_t count,
- void *data, size_t datalen),
- void (*free)(void *data))
+dev_coredumpm_timeout(struct device *dev, struct module *owner,
+ void *data, size_t datalen, gfp_t gfp,
+ ssize_t (*read)(char *buffer, loff_t offset,
+ size_t count, void *data,
+ size_t datalen),
+ void (*free)(void *data),
+ unsigned long timeout)
{
free(data);
}
@@ -100,6 +93,34 @@ static inline void dev_coredumpsg(struct device *dev, struct scatterlist *table,
{
_devcd_free_sgtable(table);
}
+static inline void dev_coredump_put(struct device *dev)
+{
+}
#endif /* CONFIG_DEV_COREDUMP */
+/**
+ * dev_coredumpm - create device coredump with read/free methods
+ * @dev: the struct device for the crashed device
+ * @owner: the module that contains the read/free functions, use %THIS_MODULE
+ * @data: data cookie for the @read/@free functions
+ * @datalen: length of the data
+ * @gfp: allocation flags
+ * @read: function to read from the given buffer
+ * @free: function to free the given buffer
+ *
+ * Creates a new device coredump for the given device. If a previous one hasn't
+ * been read yet, the new coredump is discarded. The data lifetime is determined
+ * by the device coredump framework and when it is no longer needed the @free
+ * function will be called to free the data.
+ */
+static inline void dev_coredumpm(struct device *dev, struct module *owner,
+ void *data, size_t datalen, gfp_t gfp,
+ ssize_t (*read)(char *buffer, loff_t offset, size_t count,
+ void *data, size_t datalen),
+ void (*free)(void *data))
+{
+ dev_coredumpm_timeout(dev, owner, data, datalen, gfp, read, free,
+ DEVCD_TIMEOUT);
+}
+
#endif /* __DEVCOREDUMP_H */
diff --git a/include/linux/devfreq-event.h b/include/linux/devfreq-event.h
index 4db00b02ca3f..4a50a5c71a5f 100644
--- a/include/linux/devfreq-event.h
+++ b/include/linux/devfreq-event.h
@@ -1,12 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* devfreq-event: a framework to provide raw data and events of devfreq devices
*
* Copyright (C) 2014 Samsung Electronics
* Author: Chanwoo Choi <cw00.choi@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef __LINUX_DEVFREQ_EVENT_H__
@@ -81,14 +78,20 @@ struct devfreq_event_ops {
* struct devfreq_event_desc - the descriptor of devfreq-event device
*
* @name : the name of devfreq-event device.
+ * @event_type : the type of the event determined and used by driver
* @driver_data : the private data for devfreq-event driver.
* @ops : the operation to control devfreq-event device.
*
* Each devfreq-event device is described with a this structure.
* This structure contains the various data for devfreq-event device.
+ * The event_type describes what is going to be counted in the register.
+ * It might choose to count e.g. read requests, write data in bytes, etc.
+ * The full supported list of types is present in specyfic header in:
+ * include/dt-bindings/pmu/.
*/
struct devfreq_event_desc {
const char *name;
+ u32 event_type;
void *driver_data;
const struct devfreq_event_ops *ops;
@@ -103,8 +106,11 @@ extern int devfreq_event_get_event(struct devfreq_event_dev *edev,
struct devfreq_event_data *edata);
extern int devfreq_event_reset_event(struct devfreq_event_dev *edev);
extern struct devfreq_event_dev *devfreq_event_get_edev_by_phandle(
- struct device *dev, int index);
-extern int devfreq_event_get_edev_count(struct device *dev);
+ struct device *dev,
+ const char *phandle_name,
+ int index);
+extern int devfreq_event_get_edev_count(struct device *dev,
+ const char *phandle_name);
extern struct devfreq_event_dev *devfreq_event_add_edev(struct device *dev,
struct devfreq_event_desc *desc);
extern int devfreq_event_remove_edev(struct devfreq_event_dev *edev);
@@ -149,12 +155,15 @@ static inline int devfreq_event_reset_event(struct devfreq_event_dev *edev)
}
static inline struct devfreq_event_dev *devfreq_event_get_edev_by_phandle(
- struct device *dev, int index)
+ struct device *dev,
+ const char *phandle_name,
+ int index)
{
return ERR_PTR(-EINVAL);
}
-static inline int devfreq_event_get_edev_count(struct device *dev)
+static inline int devfreq_event_get_edev_count(struct device *dev,
+ const char *phandle_name)
{
return -EINVAL;
}
diff --git a/include/linux/devfreq-governor.h b/include/linux/devfreq-governor.h
new file mode 100644
index 000000000000..dfdd0160a29f
--- /dev/null
+++ b/include/linux/devfreq-governor.h
@@ -0,0 +1,102 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * governor.h - internal header for devfreq governors.
+ *
+ * Copyright (C) 2011 Samsung Electronics
+ * MyungJoo Ham <myungjoo.ham@samsung.com>
+ *
+ * This header is for devfreq governors
+ */
+
+#ifndef __LINUX_DEVFREQ_DEVFREQ_H__
+#define __LINUX_DEVFREQ_DEVFREQ_H__
+
+#include <linux/devfreq.h>
+
+#define DEVFREQ_NAME_LEN 16
+
+#define to_devfreq(DEV) container_of((DEV), struct devfreq, dev)
+
+/* Devfreq events */
+#define DEVFREQ_GOV_START 0x1
+#define DEVFREQ_GOV_STOP 0x2
+#define DEVFREQ_GOV_UPDATE_INTERVAL 0x3
+#define DEVFREQ_GOV_SUSPEND 0x4
+#define DEVFREQ_GOV_RESUME 0x5
+
+#define DEVFREQ_MIN_FREQ 0
+#define DEVFREQ_MAX_FREQ ULONG_MAX
+
+/*
+ * Definition of the governor feature flags
+ * - DEVFREQ_GOV_FLAG_IMMUTABLE
+ * : This governor is never changeable to other governors.
+ * - DEVFREQ_GOV_FLAG_IRQ_DRIVEN
+ * : The devfreq won't schedule the work for this governor.
+ */
+#define DEVFREQ_GOV_FLAG_IMMUTABLE BIT(0)
+#define DEVFREQ_GOV_FLAG_IRQ_DRIVEN BIT(1)
+
+/*
+ * Definition of governor attribute flags except for common sysfs attributes
+ * - DEVFREQ_GOV_ATTR_POLLING_INTERVAL
+ * : Indicate polling_interval sysfs attribute
+ * - DEVFREQ_GOV_ATTR_TIMER
+ * : Indicate timer sysfs attribute
+ */
+#define DEVFREQ_GOV_ATTR_POLLING_INTERVAL BIT(0)
+#define DEVFREQ_GOV_ATTR_TIMER BIT(1)
+
+/**
+ * struct devfreq_governor - Devfreq policy governor
+ * @node: list node - contains registered devfreq governors
+ * @name: Governor's name
+ * @attrs: Governor's sysfs attribute flags
+ * @flags: Governor's feature flags
+ * @get_target_freq: Returns desired operating frequency for the device.
+ * Basically, get_target_freq will run
+ * devfreq_dev_profile.get_dev_status() to get the
+ * status of the device (load = busy_time / total_time).
+ * @event_handler: Callback for devfreq core framework to notify events
+ * to governors. Events include per device governor
+ * init and exit, opp changes out of devfreq, suspend
+ * and resume of per device devfreq during device idle.
+ *
+ * Note that the callbacks are called with devfreq->lock locked by devfreq.
+ */
+struct devfreq_governor {
+ struct list_head node;
+
+ const char name[DEVFREQ_NAME_LEN];
+ const u64 attrs;
+ const u64 flags;
+ int (*get_target_freq)(struct devfreq *this, unsigned long *freq);
+ int (*event_handler)(struct devfreq *devfreq,
+ unsigned int event, void *data);
+};
+
+void devfreq_monitor_start(struct devfreq *devfreq);
+void devfreq_monitor_stop(struct devfreq *devfreq);
+void devfreq_monitor_suspend(struct devfreq *devfreq);
+void devfreq_monitor_resume(struct devfreq *devfreq);
+void devfreq_update_interval(struct devfreq *devfreq, unsigned int *delay);
+
+int devfreq_add_governor(struct devfreq_governor *governor);
+int devfreq_remove_governor(struct devfreq_governor *governor);
+
+int devm_devfreq_add_governor(struct device *dev,
+ struct devfreq_governor *governor);
+
+int devfreq_update_status(struct devfreq *devfreq, unsigned long freq);
+int devfreq_update_target(struct devfreq *devfreq, unsigned long freq);
+void devfreq_get_freq_range(struct devfreq *devfreq, unsigned long *min_freq,
+ unsigned long *max_freq);
+
+static inline int devfreq_update_stats(struct devfreq *df)
+{
+ if (!df->profile->get_dev_status)
+ return -EINVAL;
+
+ return df->profile->get_dev_status(df->dev.parent, &df->last_status);
+}
+#endif /* __LINUX_DEVFREQ_DEVFREQ_H__ */
diff --git a/include/linux/devfreq.h b/include/linux/devfreq.h
index 597294e0cc40..dc1075dc3446 100644
--- a/include/linux/devfreq.h
+++ b/include/linux/devfreq.h
@@ -1,13 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* devfreq: Generic Dynamic Voltage and Frequency Scaling (DVFS) Framework
* for Non-CPU Devices.
*
* Copyright (C) 2011 Samsung Electronics
* MyungJoo Ham <myungjoo.ham@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef __LINUX_DEVFREQ_H__
@@ -16,8 +13,14 @@
#include <linux/device.h>
#include <linux/notifier.h>
#include <linux/pm_opp.h>
+#include <linux/pm_qos.h>
-#define DEVFREQ_NAME_LEN 16
+/* DEVFREQ governor name */
+#define DEVFREQ_GOV_SIMPLE_ONDEMAND "simple_ondemand"
+#define DEVFREQ_GOV_PERFORMANCE "performance"
+#define DEVFREQ_GOV_POWERSAVE "powersave"
+#define DEVFREQ_GOV_USERSPACE "userspace"
+#define DEVFREQ_GOV_PASSIVE "passive"
/* DEVFREQ notifier interface */
#define DEVFREQ_TRANSITION_NOTIFIER (0)
@@ -26,8 +29,17 @@
#define DEVFREQ_PRECHANGE (0)
#define DEVFREQ_POSTCHANGE (1)
+/* DEVFREQ work timers */
+enum devfreq_timer {
+ DEVFREQ_TIMER_DEFERRABLE = 0,
+ DEVFREQ_TIMER_DELAYED,
+ DEVFREQ_TIMER_NUM,
+};
+
struct devfreq;
struct devfreq_governor;
+struct devfreq_cpu_data;
+struct thermal_cooling_device;
/**
* struct devfreq_dev_status - Data given from devfreq user device to
@@ -65,6 +77,7 @@ struct devfreq_dev_status {
* @initial_freq: The operating frequency when devfreq_add_device() is
* called.
* @polling_ms: The polling interval in ms. 0 disables polling.
+ * @timer: Timer type is either deferrable or delayed timer.
* @target: The device should set its operating frequency at
* freq or lowest-upper-than-freq value. If freq is
* higher than any operable frequency, set maximum.
@@ -84,12 +97,19 @@ struct devfreq_dev_status {
* from devfreq_remove_device() call. If the user
* has registered devfreq->nb at a notifier-head,
* this is the time to unregister it.
- * @freq_table: Optional list of frequencies to support statistics.
- * @max_state: The size of freq_table.
+ * @freq_table: Optional list of frequencies to support statistics
+ * and freq_table must be generated in ascending order.
+ * @max_state: The size of freq_table.
+ *
+ * @is_cooling_device: A self-explanatory boolean giving the device a
+ * cooling effect property.
+ * @dev_groups: Optional device-specific sysfs attribute groups that to
+ * be attached to the devfreq device.
*/
struct devfreq_dev_profile {
unsigned long initial_freq;
unsigned int polling_ms;
+ enum devfreq_timer timer;
int (*target)(struct device *dev, unsigned long *freq, u32 flags);
int (*get_dev_status)(struct device *dev,
@@ -99,6 +119,24 @@ struct devfreq_dev_profile {
unsigned long *freq_table;
unsigned int max_state;
+
+ bool is_cooling_device;
+
+ const struct attribute_group **dev_groups;
+};
+
+/**
+ * struct devfreq_stats - Statistics of devfreq device behavior
+ * @total_trans: Number of devfreq transitions.
+ * @trans_table: Statistics of devfreq transitions.
+ * @time_in_state: Statistics of devfreq states.
+ * @last_update: The last time stats were updated.
+ */
+struct devfreq_stats {
+ unsigned int total_trans;
+ unsigned int *trans_table;
+ u64 *time_in_state;
+ u64 last_update;
};
/**
@@ -110,30 +148,38 @@ struct devfreq_dev_profile {
* using devfreq.
* @profile: device-specific devfreq profile
* @governor: method how to choose frequency based on the usage.
- * @governor_name: devfreq governor name for use with this devfreq
+ * @opp_table: Reference to OPP table of dev.parent, if one exists.
* @nb: notifier block used to notify devfreq object that it should
* reevaluate operable frequencies. Devfreq users may use
* devfreq.nb to the corresponding register notifier call chain.
* @work: delayed work for load monitoring.
+ * @freq_table: current frequency table used by the devfreq driver.
+ * @max_state: count of entry present in the frequency table.
* @previous_freq: previously configured frequency value.
- * @data: Private data of the governor. The devfreq framework does not
- * touch this.
- * @min_freq: Limit minimum frequency requested by user (0: none)
- * @max_freq: Limit maximum frequency requested by user (0: none)
+ * @last_status: devfreq user device info, performance statistics
+ * @data: devfreq driver pass to governors, governor should not change it.
+ * @governor_data: private data for governors, devfreq core doesn't touch it.
+ * @user_min_freq_req: PM QoS minimum frequency request from user (via sysfs)
+ * @user_max_freq_req: PM QoS maximum frequency request from user (via sysfs)
+ * @scaling_min_freq: Limit minimum frequency requested by OPP interface
+ * @scaling_max_freq: Limit maximum frequency requested by OPP interface
* @stop_polling: devfreq polling status of a device.
- * @total_trans: Number of devfreq transitions
- * @trans_table: Statistics of devfreq transitions
- * @time_in_state: Statistics of devfreq states
- * @last_stat_updated: The last time stat updated
+ * @suspend_freq: frequency of a device set during suspend phase.
+ * @resume_freq: frequency of a device set in resume phase.
+ * @suspend_count: suspend requests counter for a device.
+ * @stats: Statistics of devfreq device behavior
* @transition_notifier_list: list head of DEVFREQ_TRANSITION_NOTIFIER notifier
+ * @cdev: Cooling device pointer if the devfreq has cooling property
+ * @nb_min: Notifier block for DEV_PM_QOS_MIN_FREQUENCY
+ * @nb_max: Notifier block for DEV_PM_QOS_MAX_FREQUENCY
*
- * This structure stores the devfreq information for a give device.
+ * This structure stores the devfreq information for a given device.
*
* Note that when a governor accesses entries in struct devfreq in its
* functions except for the context of callbacks defined in struct
* devfreq_governor, the governor should protect its access with the
* struct mutex lock in struct devfreq. A governor may use this mutex
- * to protect its own private data in void *data as well.
+ * to protect its own private data in ``void *data`` as well.
*/
struct devfreq {
struct list_head node;
@@ -142,26 +188,39 @@ struct devfreq {
struct device dev;
struct devfreq_dev_profile *profile;
const struct devfreq_governor *governor;
- char governor_name[DEVFREQ_NAME_LEN];
+ struct opp_table *opp_table;
struct notifier_block nb;
struct delayed_work work;
+ unsigned long *freq_table;
+ unsigned int max_state;
+
unsigned long previous_freq;
struct devfreq_dev_status last_status;
- void *data; /* private data for governors */
+ void *data;
+ void *governor_data;
- unsigned long min_freq;
- unsigned long max_freq;
+ struct dev_pm_qos_request user_min_freq_req;
+ struct dev_pm_qos_request user_max_freq_req;
+ unsigned long scaling_min_freq;
+ unsigned long scaling_max_freq;
bool stop_polling;
- /* information for device frequency transition */
- unsigned int total_trans;
- unsigned int *trans_table;
- unsigned long *time_in_state;
- unsigned long last_stat_updated;
+ unsigned long suspend_freq;
+ unsigned long resume_freq;
+ atomic_t suspend_count;
+
+ /* information for device frequency transitions */
+ struct devfreq_stats stats;
struct srcu_notifier_head transition_notifier_list;
+
+ /* Pointer to the cooling device if used for thermal mitigation */
+ struct thermal_cooling_device *cdev;
+
+ struct notifier_block nb_min;
+ struct notifier_block nb_max;
};
struct devfreq_freqs {
@@ -170,53 +229,59 @@ struct devfreq_freqs {
};
#if defined(CONFIG_PM_DEVFREQ)
-extern struct devfreq *devfreq_add_device(struct device *dev,
- struct devfreq_dev_profile *profile,
- const char *governor_name,
- void *data);
-extern int devfreq_remove_device(struct devfreq *devfreq);
-extern struct devfreq *devm_devfreq_add_device(struct device *dev,
- struct devfreq_dev_profile *profile,
- const char *governor_name,
- void *data);
-extern void devm_devfreq_remove_device(struct device *dev,
- struct devfreq *devfreq);
+struct devfreq *devfreq_add_device(struct device *dev,
+ struct devfreq_dev_profile *profile,
+ const char *governor_name,
+ void *data);
+int devfreq_remove_device(struct devfreq *devfreq);
+struct devfreq *devm_devfreq_add_device(struct device *dev,
+ struct devfreq_dev_profile *profile,
+ const char *governor_name,
+ void *data);
+void devm_devfreq_remove_device(struct device *dev, struct devfreq *devfreq);
/* Supposed to be called by PM callbacks */
-extern int devfreq_suspend_device(struct devfreq *devfreq);
-extern int devfreq_resume_device(struct devfreq *devfreq);
+int devfreq_suspend_device(struct devfreq *devfreq);
+int devfreq_resume_device(struct devfreq *devfreq);
+
+void devfreq_suspend(void);
+void devfreq_resume(void);
+
+/* update_devfreq() - Reevaluate the device and configure frequency */
+int update_devfreq(struct devfreq *devfreq);
/* Helper functions for devfreq user device driver with OPP. */
-extern struct dev_pm_opp *devfreq_recommended_opp(struct device *dev,
- unsigned long *freq, u32 flags);
-extern int devfreq_register_opp_notifier(struct device *dev,
- struct devfreq *devfreq);
-extern int devfreq_unregister_opp_notifier(struct device *dev,
- struct devfreq *devfreq);
-extern int devm_devfreq_register_opp_notifier(struct device *dev,
- struct devfreq *devfreq);
-extern void devm_devfreq_unregister_opp_notifier(struct device *dev,
- struct devfreq *devfreq);
-extern int devfreq_register_notifier(struct devfreq *devfreq,
- struct notifier_block *nb,
- unsigned int list);
-extern int devfreq_unregister_notifier(struct devfreq *devfreq,
- struct notifier_block *nb,
- unsigned int list);
-extern int devm_devfreq_register_notifier(struct device *dev,
+struct dev_pm_opp *devfreq_recommended_opp(struct device *dev,
+ unsigned long *freq, u32 flags);
+int devfreq_register_opp_notifier(struct device *dev,
+ struct devfreq *devfreq);
+int devfreq_unregister_opp_notifier(struct device *dev,
+ struct devfreq *devfreq);
+int devm_devfreq_register_opp_notifier(struct device *dev,
+ struct devfreq *devfreq);
+void devm_devfreq_unregister_opp_notifier(struct device *dev,
+ struct devfreq *devfreq);
+int devfreq_register_notifier(struct devfreq *devfreq,
+ struct notifier_block *nb,
+ unsigned int list);
+int devfreq_unregister_notifier(struct devfreq *devfreq,
+ struct notifier_block *nb,
+ unsigned int list);
+int devm_devfreq_register_notifier(struct device *dev,
struct devfreq *devfreq,
struct notifier_block *nb,
unsigned int list);
-extern void devm_devfreq_unregister_notifier(struct device *dev,
+void devm_devfreq_unregister_notifier(struct device *dev,
struct devfreq *devfreq,
struct notifier_block *nb,
unsigned int list);
-extern struct devfreq *devfreq_get_devfreq_by_phandle(struct device *dev,
- int index);
+struct devfreq *devfreq_get_devfreq_by_node(struct device_node *node);
+struct devfreq *devfreq_get_devfreq_by_phandle(struct device *dev,
+ const char *phandle_name, int index);
+#endif /* CONFIG_PM_DEVFREQ */
-#if IS_ENABLED(CONFIG_DEVFREQ_GOV_SIMPLE_ONDEMAND)
/**
- * struct devfreq_simple_ondemand_data - void *data fed to struct devfreq
+ * struct devfreq_simple_ondemand_data - ``void *data`` fed to struct devfreq
* and devfreq_add_device
* @upthreshold: If the load is over this value, the frequency jumps.
* Specify 0 to use the default. Valid value = 0 to 100.
@@ -232,11 +297,14 @@ struct devfreq_simple_ondemand_data {
unsigned int upthreshold;
unsigned int downdifferential;
};
-#endif
-#if IS_ENABLED(CONFIG_DEVFREQ_GOV_PASSIVE)
+enum devfreq_parent_dev_type {
+ DEVFREQ_PARENT_DEV,
+ CPUFREQ_PARENT_DEV,
+};
+
/**
- * struct devfreq_passive_data - void *data fed to struct devfreq
+ * struct devfreq_passive_data - ``void *data`` fed to struct devfreq
* and devfreq_add_device
* @parent: the devfreq instance of parent device.
* @get_target_freq: Optional callback, Returns desired operating frequency
@@ -246,8 +314,11 @@ struct devfreq_simple_ondemand_data {
* using governors except for passive governor.
* If the devfreq device has the specific method to decide
* the next frequency, should use this callback.
- * @this: the devfreq instance of own device.
- * @nb: the notifier block for DEVFREQ_TRANSITION_NOTIFIER list
+ * @parent_type: the parent type of the device.
+ * @this: the devfreq instance of own device.
+ * @nb: the notifier block for DEVFREQ_TRANSITION_NOTIFIER or
+ * CPUFREQ_TRANSITION_NOTIFIER list.
+ * @cpu_data_list: the list of cpu frequency data for all cpufreq_policy.
*
* The devfreq_passive_data have to set the devfreq instance of parent
* device with governors except for the passive governor. But, don't need to
@@ -261,17 +332,20 @@ struct devfreq_passive_data {
/* Optional callback to decide the next frequency of passvice device */
int (*get_target_freq)(struct devfreq *this, unsigned long *freq);
+ /* Should set the type of parent device */
+ enum devfreq_parent_dev_type parent_type;
+
/* For passive governor's internal use. Don't need to set them */
struct devfreq *this;
struct notifier_block nb;
+ struct list_head cpu_data_list;
};
-#endif
-#else /* !CONFIG_PM_DEVFREQ */
+#if !defined(CONFIG_PM_DEVFREQ)
static inline struct devfreq *devfreq_add_device(struct device *dev,
- struct devfreq_dev_profile *profile,
- const char *governor_name,
- void *data)
+ struct devfreq_dev_profile *profile,
+ const char *governor_name,
+ void *data)
{
return ERR_PTR(-ENOSYS);
}
@@ -304,32 +378,35 @@ static inline int devfreq_resume_device(struct devfreq *devfreq)
return 0;
}
+static inline void devfreq_suspend(void) {}
+static inline void devfreq_resume(void) {}
+
static inline struct dev_pm_opp *devfreq_recommended_opp(struct device *dev,
- unsigned long *freq, u32 flags)
+ unsigned long *freq, u32 flags)
{
return ERR_PTR(-EINVAL);
}
static inline int devfreq_register_opp_notifier(struct device *dev,
- struct devfreq *devfreq)
+ struct devfreq *devfreq)
{
return -EINVAL;
}
static inline int devfreq_unregister_opp_notifier(struct device *dev,
- struct devfreq *devfreq)
+ struct devfreq *devfreq)
{
return -EINVAL;
}
static inline int devm_devfreq_register_opp_notifier(struct device *dev,
- struct devfreq *devfreq)
+ struct devfreq *devfreq)
{
return -EINVAL;
}
static inline void devm_devfreq_unregister_opp_notifier(struct device *dev,
- struct devfreq *devfreq)
+ struct devfreq *devfreq)
{
}
@@ -348,22 +425,27 @@ static inline int devfreq_unregister_notifier(struct devfreq *devfreq,
}
static inline int devm_devfreq_register_notifier(struct device *dev,
- struct devfreq *devfreq,
- struct notifier_block *nb,
- unsigned int list)
+ struct devfreq *devfreq,
+ struct notifier_block *nb,
+ unsigned int list)
{
return 0;
}
static inline void devm_devfreq_unregister_notifier(struct device *dev,
- struct devfreq *devfreq,
- struct notifier_block *nb,
- unsigned int list)
+ struct devfreq *devfreq,
+ struct notifier_block *nb,
+ unsigned int list)
+{
+}
+
+static inline struct devfreq *devfreq_get_devfreq_by_node(struct device_node *node)
{
+ return ERR_PTR(-ENODEV);
}
static inline struct devfreq *devfreq_get_devfreq_by_phandle(struct device *dev,
- int index)
+ const char *phandle_name, int index)
{
return ERR_PTR(-ENODEV);
}
diff --git a/include/linux/devfreq_cooling.h b/include/linux/devfreq_cooling.h
index 4635f95000a4..14baa73fc2de 100644
--- a/include/linux/devfreq_cooling.h
+++ b/include/linux/devfreq_cooling.h
@@ -1,17 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* devfreq_cooling: Thermal cooling device implementation for devices using
* devfreq
*
* Copyright (C) 2014-2015 ARM Limited
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether express or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#ifndef __DEVFREQ_COOLING_H__
@@ -23,17 +16,6 @@
/**
* struct devfreq_cooling_power - Devfreq cooling power ops
- * @get_static_power: Take voltage, in mV, and return the static power
- * in mW. If NULL, the static power is assumed
- * to be 0.
- * @get_dynamic_power: Take voltage, in mV, and frequency, in HZ, and
- * return the dynamic power draw in mW. If NULL,
- * a simple power model is used.
- * @dyn_power_coeff: Coefficient for the simple dynamic power model in
- * mW/(MHz mV mV).
- * If get_dynamic_power() is NULL, then the
- * dynamic power is calculated as
- * @dyn_power_coeff * frequency * voltage^2
* @get_real_power: When this is set, the framework uses it to ask the
* device driver for the actual power.
* Some devices have more sophisticated methods
@@ -53,14 +35,8 @@
* max total (static + dynamic) power value for each OPP.
*/
struct devfreq_cooling_power {
- unsigned long (*get_static_power)(struct devfreq *devfreq,
- unsigned long voltage);
- unsigned long (*get_dynamic_power)(struct devfreq *devfreq,
- unsigned long freq,
- unsigned long voltage);
int (*get_real_power)(struct devfreq *df, u32 *power,
unsigned long freq, unsigned long voltage);
- unsigned long dyn_power_coeff;
};
#ifdef CONFIG_DEVFREQ_THERMAL
@@ -72,10 +48,13 @@ struct thermal_cooling_device *
of_devfreq_cooling_register(struct device_node *np, struct devfreq *df);
struct thermal_cooling_device *devfreq_cooling_register(struct devfreq *df);
void devfreq_cooling_unregister(struct thermal_cooling_device *dfc);
+struct thermal_cooling_device *
+devfreq_cooling_em_register(struct devfreq *df,
+ struct devfreq_cooling_power *dfc_power);
#else /* !CONFIG_DEVFREQ_THERMAL */
-struct thermal_cooling_device *
+static inline struct thermal_cooling_device *
of_devfreq_cooling_register_power(struct device_node *np, struct devfreq *df,
struct devfreq_cooling_power *dfc_power)
{
@@ -94,6 +73,13 @@ devfreq_cooling_register(struct devfreq *df)
return ERR_PTR(-EINVAL);
}
+static inline struct thermal_cooling_device *
+devfreq_cooling_em_register(struct devfreq *df,
+ struct devfreq_cooling_power *dfc_power)
+{
+ return ERR_PTR(-EINVAL);
+}
+
static inline void
devfreq_cooling_unregister(struct thermal_cooling_device *dfc)
{
diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h
index 4f2b3b2076c4..38f625af6ab4 100644
--- a/include/linux/device-mapper.h
+++ b/include/linux/device-mapper.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2001 Sistina Software (UK) Limited.
* Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
@@ -10,14 +11,17 @@
#include <linux/bio.h>
#include <linux/blkdev.h>
+#include <linux/dm-ioctl.h>
#include <linux/math64.h>
#include <linux/ratelimit.h>
struct dm_dev;
struct dm_target;
struct dm_table;
+struct dm_report_zones_args;
struct mapped_device;
struct bio_vec;
+enum dax_access_mode;
/*
* Type of table, mapped_device's mempool and request_queue
@@ -26,11 +30,10 @@ enum dm_queue_mode {
DM_TYPE_NONE = 0,
DM_TYPE_BIO_BASED = 1,
DM_TYPE_REQUEST_BASED = 2,
- DM_TYPE_MQ_REQUEST_BASED = 3,
- DM_TYPE_DAX_BIO_BASED = 4,
+ DM_TYPE_DAX_BIO_BASED = 3,
};
-typedef enum { STATUSTYPE_INFO, STATUSTYPE_TABLE } status_type_t;
+typedef enum { STATUSTYPE_INFO, STATUSTYPE_TABLE, STATUSTYPE_IMA } status_type_t;
union map_info {
void *ptr;
@@ -61,7 +64,8 @@ typedef int (*dm_clone_and_map_request_fn) (struct dm_target *ti,
struct request *rq,
union map_info *map_context,
struct request **clone);
-typedef void (*dm_release_clone_request_fn) (struct request *clone);
+typedef void (*dm_release_clone_request_fn) (struct request *clone,
+ union map_info *map_context);
/*
* Returns:
@@ -84,12 +88,32 @@ typedef int (*dm_preresume_fn) (struct dm_target *ti);
typedef void (*dm_resume_fn) (struct dm_target *ti);
typedef void (*dm_status_fn) (struct dm_target *ti, status_type_t status_type,
- unsigned status_flags, char *result, unsigned maxlen);
+ unsigned int status_flags, char *result, unsigned int maxlen);
-typedef int (*dm_message_fn) (struct dm_target *ti, unsigned argc, char **argv);
+typedef int (*dm_message_fn) (struct dm_target *ti, unsigned int argc, char **argv,
+ char *result, unsigned int maxlen);
-typedef int (*dm_prepare_ioctl_fn) (struct dm_target *ti,
- struct block_device **bdev, fmode_t *mode);
+/*
+ * Called with *forward == true. If it remains true, the ioctl should be
+ * forwarded to bdev. If it is reset to false, the target already fully handled
+ * the ioctl and the return value is the return value for the whole ioctl.
+ */
+typedef int (*dm_prepare_ioctl_fn) (struct dm_target *ti, struct block_device **bdev,
+ unsigned int cmd, unsigned long arg,
+ bool *forward);
+
+#ifdef CONFIG_BLK_DEV_ZONED
+typedef int (*dm_report_zones_fn) (struct dm_target *ti,
+ struct dm_report_zones_args *args,
+ unsigned int nr_zones);
+#else
+/*
+ * Define dm_report_zones_fn so that targets can assign to NULL if
+ * CONFIG_BLK_DEV_ZONED disabled. Otherwise each target needs to do
+ * awkward #ifdefs in their target_type, etc.
+ */
+typedef int (*dm_report_zones_fn) (struct dm_target *dummy);
+#endif
/*
* These iteration functions are typically used to check (and combine)
@@ -131,33 +155,43 @@ typedef int (*dm_busy_fn) (struct dm_target *ti);
* >= 0 : the number of bytes accessible at the address
*/
typedef long (*dm_dax_direct_access_fn) (struct dm_target *ti, pgoff_t pgoff,
- long nr_pages, void **kaddr, pfn_t *pfn);
-typedef size_t (*dm_dax_copy_from_iter_fn)(struct dm_target *ti, pgoff_t pgoff,
+ long nr_pages, enum dax_access_mode node, void **kaddr,
+ unsigned long *pfn);
+typedef int (*dm_dax_zero_page_range_fn)(struct dm_target *ti, pgoff_t pgoff,
+ size_t nr_pages);
+
+/*
+ * Returns:
+ * != 0 : number of bytes transferred
+ * 0 : recovery write failed
+ */
+typedef size_t (*dm_dax_recovery_write_fn)(struct dm_target *ti, pgoff_t pgoff,
void *addr, size_t bytes, struct iov_iter *i);
-typedef void (*dm_dax_flush_fn)(struct dm_target *ti, pgoff_t pgoff, void *addr,
- size_t size);
-#define PAGE_SECTORS (PAGE_SIZE / 512)
void dm_error(const char *message);
struct dm_dev {
struct block_device *bdev;
+ struct file *bdev_file;
struct dax_device *dax_dev;
- fmode_t mode;
+ blk_mode_t mode;
char name[16];
};
-dev_t dm_get_dev_t(const char *path);
-
/*
* Constructors should call these functions to ensure destination devices
* are opened/closed correctly.
*/
-int dm_get_device(struct dm_target *ti, const char *path, fmode_t mode,
+int dm_get_device(struct dm_target *ti, const char *path, blk_mode_t mode,
struct dm_dev **result);
void dm_put_device(struct dm_target *ti, struct dm_dev *d);
/*
+ * Helper function for getting devices
+ */
+int dm_devt_from_path(const char *path, dev_t *dev_p);
+
+/*
* Information about a target type
*/
@@ -165,7 +199,7 @@ struct target_type {
uint64_t features;
const char *name;
struct module *module;
- unsigned version[3];
+ unsigned int version[3];
dm_ctr_fn ctr;
dm_dtr_fn dtr;
dm_map_fn map;
@@ -181,12 +215,13 @@ struct target_type {
dm_status_fn status;
dm_message_fn message;
dm_prepare_ioctl_fn prepare_ioctl;
+ dm_report_zones_fn report_zones;
dm_busy_fn busy;
dm_iterate_devices_fn iterate_devices;
dm_io_hints_fn io_hints;
dm_dax_direct_access_fn direct_access;
- dm_dax_copy_from_iter_fn dax_copy_from_iter;
- dm_dax_flush_fn dax_flush;
+ dm_dax_zero_page_range_fn dax_zero_page_range;
+ dm_dax_recovery_write_fn dax_recovery_write;
/* For internal device-mapper use. */
struct list_head list;
@@ -224,14 +259,6 @@ struct target_type {
#define dm_target_is_wildcard(type) ((type)->features & DM_TARGET_WILDCARD)
/*
- * Some targets need to be sent the same WRITE bio severals times so
- * that they can send copies of it to different devices. This function
- * examines any supplied bio and returns the number of copies of it the
- * target requires.
- */
-typedef unsigned (*dm_num_write_bios_fn) (struct dm_target *ti, struct bio *bio);
-
-/*
* A target implements own bio data integrity.
*/
#define DM_TARGET_INTEGRITY 0x00000010
@@ -244,10 +271,43 @@ typedef unsigned (*dm_num_write_bios_fn) (struct dm_target *ti, struct bio *bio)
#define dm_target_passes_integrity(type) ((type)->features & DM_TARGET_PASSES_INTEGRITY)
/*
- * Indicates that a target supports host-managed zoned block devices.
+ * Indicates support for zoned block devices:
+ * - DM_TARGET_ZONED_HM: the target also supports host-managed zoned
+ * block devices but does not support combining different zoned models.
+ * - DM_TARGET_MIXED_ZONED_MODEL: the target supports combining multiple
+ * devices with different zoned models.
*/
+#ifdef CONFIG_BLK_DEV_ZONED
#define DM_TARGET_ZONED_HM 0x00000040
#define dm_target_supports_zoned_hm(type) ((type)->features & DM_TARGET_ZONED_HM)
+#else
+#define DM_TARGET_ZONED_HM 0x00000000
+#define dm_target_supports_zoned_hm(type) (false)
+#endif
+
+/*
+ * A target handles REQ_NOWAIT
+ */
+#define DM_TARGET_NOWAIT 0x00000080
+#define dm_target_supports_nowait(type) ((type)->features & DM_TARGET_NOWAIT)
+
+/*
+ * A target supports passing through inline crypto support.
+ */
+#define DM_TARGET_PASSES_CRYPTO 0x00000100
+#define dm_target_passes_crypto(type) ((type)->features & DM_TARGET_PASSES_CRYPTO)
+
+#ifdef CONFIG_BLK_DEV_ZONED
+#define DM_TARGET_MIXED_ZONED_MODEL 0x00000200
+#define dm_target_supports_mixed_zoned_model(type) \
+ ((type)->features & DM_TARGET_MIXED_ZONED_MODEL)
+#else
+#define DM_TARGET_MIXED_ZONED_MODEL 0x00000000
+#define dm_target_supports_mixed_zoned_model(type) (false)
+#endif
+
+#define DM_TARGET_ATOMIC_WRITES 0x00000400
+#define dm_target_supports_atomic_writes(type) ((type)->features & DM_TARGET_ATOMIC_WRITES)
struct dm_target {
struct dm_table *table;
@@ -268,38 +328,31 @@ struct dm_target {
* It is a responsibility of the target driver to remap these bios
* to the real underlying devices.
*/
- unsigned num_flush_bios;
+ unsigned int num_flush_bios;
/*
* The number of discard bios that will be submitted to the target.
* The bio number can be accessed with dm_bio_get_target_bio_nr.
*/
- unsigned num_discard_bios;
+ unsigned int num_discard_bios;
/*
- * The number of WRITE SAME bios that will be submitted to the target.
+ * The number of secure erase bios that will be submitted to the target.
* The bio number can be accessed with dm_bio_get_target_bio_nr.
*/
- unsigned num_write_same_bios;
+ unsigned int num_secure_erase_bios;
/*
* The number of WRITE ZEROES bios that will be submitted to the target.
* The bio number can be accessed with dm_bio_get_target_bio_nr.
*/
- unsigned num_write_zeroes_bios;
+ unsigned int num_write_zeroes_bios;
/*
* The minimum number of extra bytes allocated in each io for the
* target to use.
*/
- unsigned per_io_data_size;
-
- /*
- * If defined, this function is called to find out how many
- * duplicate bios should be sent to the target when writing
- * data.
- */
- dm_num_write_bios_fn num_write_bios;
+ unsigned int per_io_data_size;
/* target specific data */
void *private;
@@ -320,47 +373,68 @@ struct dm_target {
bool discards_supported:1;
/*
- * Set if the target required discard bios to be split
- * on max_io_len boundary.
+ * Automatically set by dm-core if this target supports
+ * REQ_OP_ZONE_RESET_ALL. Otherwise, this operation will be emulated
+ * using REQ_OP_ZONE_RESET. Target drivers must not set this manually.
*/
- bool split_discard_bios:1;
-};
+ bool zone_reset_all_supported:1;
-/* Each target can link one of these into the table */
-struct dm_target_callbacks {
- struct list_head list;
- int (*congested_fn) (struct dm_target_callbacks *, int);
-};
+ /*
+ * Set if this target requires that discards be split on
+ * 'max_discard_sectors' boundaries.
+ */
+ bool max_discard_granularity:1;
-/*
- * For bio-based dm.
- * One of these is allocated for each bio.
- * This structure shouldn't be touched directly by target drivers.
- * It is here so that we can inline dm_per_bio_data and
- * dm_bio_from_per_bio_data
- */
-struct dm_target_io {
- struct dm_io *io;
- struct dm_target *ti;
- unsigned target_bio_nr;
- unsigned *len_ptr;
- struct bio clone;
-};
+ /*
+ * Set if we need to limit the number of in-flight bios when swapping.
+ */
+ bool limit_swap_bios:1;
-static inline void *dm_per_bio_data(struct bio *bio, size_t data_size)
-{
- return (char *)bio - offsetof(struct dm_target_io, clone) - data_size;
-}
+ /*
+ * Set if this target implements a zoned device and needs emulation of
+ * zone append operations using regular writes.
+ */
+ bool emulate_zone_append:1;
-static inline struct bio *dm_bio_from_per_bio_data(void *data, size_t data_size)
-{
- return (struct bio *)((char *)data + data_size + offsetof(struct dm_target_io, clone));
-}
+ /*
+ * Set if the target will submit IO using dm_submit_bio_remap()
+ * after returning DM_MAPIO_SUBMITTED from its map function.
+ */
+ bool accounts_remapped_io:1;
-static inline unsigned dm_bio_get_target_bio_nr(const struct bio *bio)
-{
- return container_of(bio, struct dm_target_io, clone)->target_bio_nr;
-}
+ /*
+ * Set if the target will submit the DM bio without first calling
+ * bio_set_dev(). NOTE: ideally a target should _not_ need this.
+ */
+ bool needs_bio_set_dev:1;
+
+ /*
+ * Set if the target supports flush optimization. If all the targets in
+ * a table have flush_bypasses_map set, the dm core will not send
+ * flushes to the targets via a ->map method. It will iterate over
+ * dm_table->devices and send flushes to the devices directly. This
+ * optimization reduces the number of flushes being sent when multiple
+ * targets in a table use the same underlying device.
+ *
+ * This optimization may be enabled on targets that just pass the
+ * flushes to the underlying devices without performing any other
+ * actions on the flush request. Currently, dm-linear and dm-stripe
+ * support it.
+ */
+ bool flush_bypasses_map:1;
+
+ /*
+ * Set if the target calls bio_integrity_alloc on bios received
+ * in the map method.
+ */
+ bool mempool_needs_integrity:1;
+};
+
+void *dm_per_bio_data(struct bio *bio, size_t data_size);
+struct bio *dm_bio_from_per_bio_data(void *data, size_t data_size);
+unsigned int dm_bio_get_target_bio_nr(const struct bio *bio);
+
+u64 dm_start_time_ns_from_clone(struct bio *bio);
int dm_register_target(struct target_type *t);
void dm_unregister_target(struct target_type *t);
@@ -369,7 +443,7 @@ void dm_unregister_target(struct target_type *t);
* Target argument parsing.
*/
struct dm_arg_set {
- unsigned argc;
+ unsigned int argc;
char **argv;
};
@@ -378,8 +452,8 @@ struct dm_arg_set {
* the error message to use if the number is found to be outside that range.
*/
struct dm_arg {
- unsigned min;
- unsigned max;
+ unsigned int min;
+ unsigned int max;
char *error;
};
@@ -387,16 +461,16 @@ struct dm_arg {
* Validate the next argument, either returning it as *value or, if invalid,
* returning -EINVAL and setting *error.
*/
-int dm_read_arg(struct dm_arg *arg, struct dm_arg_set *arg_set,
- unsigned *value, char **error);
+int dm_read_arg(const struct dm_arg *arg, struct dm_arg_set *arg_set,
+ unsigned int *value, char **error);
/*
* Process the next argument as the start of a group containing between
* arg->min and arg->max further arguments. Either return the size as
* *num_args or, if invalid, return -EINVAL and set *error.
*/
-int dm_read_arg_group(struct dm_arg *arg, struct dm_arg_set *arg_set,
- unsigned *num_args, char **error);
+int dm_read_arg_group(const struct dm_arg *arg, struct dm_arg_set *arg_set,
+ unsigned int *num_args, char **error);
/*
* Return the current argument and shift to the next.
@@ -406,12 +480,14 @@ const char *dm_shift_arg(struct dm_arg_set *as);
/*
* Move through num_args arguments.
*/
-void dm_consume_args(struct dm_arg_set *as, unsigned num_args);
+void dm_consume_args(struct dm_arg_set *as, unsigned int num_args);
-/*-----------------------------------------------------------------
+/*
+ *----------------------------------------------------------------
* Functions for creating and manipulating mapped devices.
* Drop the reference with dm_put when you finish with the object.
- *---------------------------------------------------------------*/
+ *----------------------------------------------------------------
+ */
/*
* DM_ANY_MINOR chooses the next available minor number.
@@ -436,7 +512,7 @@ void *dm_get_mdptr(struct mapped_device *md);
/*
* A device can still be used while suspended, but I/O is deferred.
*/
-int dm_suspend(struct mapped_device *md, unsigned suspend_flags);
+int dm_suspend(struct mapped_device *md, unsigned int suspend_flags);
int dm_resume(struct mapped_device *md);
/*
@@ -454,13 +530,40 @@ const char *dm_device_name(struct mapped_device *md);
int dm_copy_name_and_uuid(struct mapped_device *md, char *name, char *uuid);
struct gendisk *dm_disk(struct mapped_device *md);
int dm_suspended(struct dm_target *ti);
+int dm_post_suspending(struct dm_target *ti);
int dm_noflush_suspending(struct dm_target *ti);
-void dm_accept_partial_bio(struct bio *bio, unsigned n_sectors);
-void dm_remap_zone_report(struct dm_target *ti, struct bio *bio,
- sector_t start);
-union map_info *dm_get_rq_mapinfo(struct request *rq);
+void dm_accept_partial_bio(struct bio *bio, unsigned int n_sectors);
+void dm_submit_bio_remap(struct bio *clone, struct bio *tgt_clone);
+
+#ifdef CONFIG_BLK_DEV_ZONED
+struct dm_report_zones_args {
+ struct dm_target *tgt;
+ struct gendisk *disk;
+ sector_t next_sector;
+
+ unsigned int zone_idx;
+
+ /* for block layer ->report_zones */
+ struct blk_report_zones_args *rep_args;
+
+ /* for internal users */
+ report_zones_cb cb;
+ void *data;
+
+ /* must be filled by ->report_zones before calling dm_report_zones_cb */
+ sector_t start;
+};
+int dm_report_zones(struct block_device *bdev, sector_t start, sector_t sector,
+ struct dm_report_zones_args *args, unsigned int nr_zones);
+#endif /* CONFIG_BLK_DEV_ZONED */
-struct queue_limits *dm_get_queue_limits(struct mapped_device *md);
+/*
+ * Device mapper functions to parse and create devices specified by the
+ * parameter "dm-mod.create="
+ */
+int __init dm_early_create(struct dm_ioctl *dmi,
+ struct dm_target_spec **spec_array,
+ char **target_params_array);
/*
* Geometry functions.
@@ -468,15 +571,17 @@ struct queue_limits *dm_get_queue_limits(struct mapped_device *md);
int dm_get_geometry(struct mapped_device *md, struct hd_geometry *geo);
int dm_set_geometry(struct mapped_device *md, struct hd_geometry *geo);
-/*-----------------------------------------------------------------
+/*
+ *---------------------------------------------------------------
* Functions for manipulating device-mapper tables.
- *---------------------------------------------------------------*/
+ *---------------------------------------------------------------
+ */
/*
* First create an empty table.
*/
-int dm_table_create(struct dm_table **result, fmode_t mode,
- unsigned num_targets, struct mapped_device *md);
+int dm_table_create(struct dm_table **result, blk_mode_t mode,
+ unsigned int num_targets, struct mapped_device *md);
/*
* Then call this once for each target.
@@ -485,11 +590,6 @@ int dm_table_add_target(struct dm_table *t, const char *type,
sector_t start, sector_t len, char *params);
/*
- * Target_ctr should call this if it needs to add any callbacks.
- */
-void dm_table_add_target_callbacks(struct dm_table *t, struct dm_target_callbacks *cb);
-
-/*
* Target can use this to set the table's type.
* Can only ever be called from a target's ctr.
* Useful for "hybrid" target (supports both bio-based
@@ -503,6 +603,11 @@ void dm_table_set_type(struct dm_table *t, enum dm_queue_mode type);
int dm_table_complete(struct dm_table *t);
/*
+ * Destroy the table when finished.
+ */
+void dm_table_destroy(struct dm_table *t);
+
+/*
* Target may require that it is never sent I/O larger than len.
*/
int __must_check dm_set_target_max_io_len(struct dm_target *ti, sector_t len);
@@ -518,9 +623,9 @@ void dm_sync_table(struct mapped_device *md);
* Queries
*/
sector_t dm_table_get_size(struct dm_table *t);
-unsigned int dm_table_get_num_targets(struct dm_table *t);
-fmode_t dm_table_get_mode(struct dm_table *t);
+blk_mode_t dm_table_get_mode(struct dm_table *t);
struct mapped_device *dm_table_get_md(struct dm_table *t);
+const char *dm_table_device_name(struct dm_table *t);
/*
* Trigger an event.
@@ -540,47 +645,56 @@ struct dm_table *dm_swap_table(struct mapped_device *md,
struct dm_table *t);
/*
- * A wrapper around vmalloc.
+ * Table blk_crypto_profile functions
*/
-void *dm_vcalloc(unsigned long nmemb, unsigned long elem_size);
+void dm_destroy_crypto_profile(struct blk_crypto_profile *profile);
-/*-----------------------------------------------------------------
+/*
+ *---------------------------------------------------------------
* Macros.
- *---------------------------------------------------------------*/
+ *---------------------------------------------------------------
+ */
#define DM_NAME "device-mapper"
-#define DM_RATELIMIT(pr_func, fmt, ...) \
-do { \
- static DEFINE_RATELIMIT_STATE(rs, DEFAULT_RATELIMIT_INTERVAL, \
- DEFAULT_RATELIMIT_BURST); \
- \
- if (__ratelimit(&rs)) \
- pr_func(DM_FMT(fmt), ##__VA_ARGS__); \
-} while (0)
-
#define DM_FMT(fmt) DM_NAME ": " DM_MSG_PREFIX ": " fmt "\n"
#define DMCRIT(fmt, ...) pr_crit(DM_FMT(fmt), ##__VA_ARGS__)
#define DMERR(fmt, ...) pr_err(DM_FMT(fmt), ##__VA_ARGS__)
-#define DMERR_LIMIT(fmt, ...) DM_RATELIMIT(pr_err, fmt, ##__VA_ARGS__)
+#define DMERR_LIMIT(fmt, ...) pr_err_ratelimited(DM_FMT(fmt), ##__VA_ARGS__)
#define DMWARN(fmt, ...) pr_warn(DM_FMT(fmt), ##__VA_ARGS__)
-#define DMWARN_LIMIT(fmt, ...) DM_RATELIMIT(pr_warn, fmt, ##__VA_ARGS__)
+#define DMWARN_LIMIT(fmt, ...) pr_warn_ratelimited(DM_FMT(fmt), ##__VA_ARGS__)
#define DMINFO(fmt, ...) pr_info(DM_FMT(fmt), ##__VA_ARGS__)
-#define DMINFO_LIMIT(fmt, ...) DM_RATELIMIT(pr_info, fmt, ##__VA_ARGS__)
+#define DMINFO_LIMIT(fmt, ...) pr_info_ratelimited(DM_FMT(fmt), ##__VA_ARGS__)
-#ifdef CONFIG_DM_DEBUG
-#define DMDEBUG(fmt, ...) printk(KERN_DEBUG DM_FMT(fmt), ##__VA_ARGS__)
-#define DMDEBUG_LIMIT(fmt, ...) DM_RATELIMIT(pr_debug, fmt, ##__VA_ARGS__)
-#else
-#define DMDEBUG(fmt, ...) no_printk(fmt, ##__VA_ARGS__)
-#define DMDEBUG_LIMIT(fmt, ...) no_printk(fmt, ##__VA_ARGS__)
-#endif
+#define DMDEBUG(fmt, ...) pr_debug(DM_FMT(fmt), ##__VA_ARGS__)
+#define DMDEBUG_LIMIT(fmt, ...) pr_debug_ratelimited(DM_FMT(fmt), ##__VA_ARGS__)
+
+#define DMEMIT(x...) (sz += ((sz >= maxlen) ? 0 : scnprintf(result + sz, maxlen - sz, x)))
-#define DMEMIT(x...) sz += ((sz >= maxlen) ? \
- 0 : scnprintf(result + sz, maxlen - sz, x))
+#define DMEMIT_TARGET_NAME_VERSION(y) \
+ DMEMIT("target_name=%s,target_version=%u.%u.%u", \
+ (y)->name, (y)->version[0], (y)->version[1], (y)->version[2])
-#define SECTOR_SHIFT 9
+/**
+ * module_dm() - Helper macro for DM targets that don't do anything
+ * special in their module_init and module_exit.
+ * Each module may only use this macro once, and calling it replaces
+ * module_init() and module_exit().
+ *
+ * @name: DM target's name
+ */
+#define module_dm(name) \
+static int __init dm_##name##_init(void) \
+{ \
+ return dm_register_target(&(name##_target)); \
+} \
+module_init(dm_##name##_init) \
+static void __exit dm_##name##_exit(void) \
+{ \
+ dm_unregister_target(&(name##_target)); \
+} \
+module_exit(dm_##name##_exit)
/*
* Definitions of return values from target end_io function.
@@ -588,6 +702,7 @@ do { \
#define DM_ENDIO_DONE 0
#define DM_ENDIO_INCOMPLETE 1
#define DM_ENDIO_REQUEUE 2
+#define DM_ENDIO_DELAY_REQUEUE 3
/*
* Definitions of return values from target map function.
@@ -595,7 +710,7 @@ do { \
#define DM_MAPIO_SUBMITTED 0
#define DM_MAPIO_REMAPPED 1
#define DM_MAPIO_REQUEUE DM_ENDIO_REQUEUE
-#define DM_MAPIO_DELAY_REQUEUE 3
+#define DM_MAPIO_DELAY_REQUEUE DM_ENDIO_DELAY_REQUEUE
#define DM_MAPIO_KILL 4
#define dm_sector_div64(x, y)( \
@@ -624,16 +739,13 @@ do { \
*/
#define dm_round_up(n, sz) (dm_div_up((n), (sz)) * (sz))
-#define dm_array_too_big(fixed, obj, num) \
- ((num) > (UINT_MAX - (fixed)) / (obj))
-
/*
* Sector offset taken relative to the start of the target instead of
* relative to the start of the device.
*/
#define dm_target_offset(ti, sector) ((sector) - (ti)->begin)
-static inline sector_t to_sector(unsigned long n)
+static inline sector_t to_sector(unsigned long long n)
{
return (n >> SECTOR_SHIFT);
}
diff --git a/include/linux/device.h b/include/linux/device.h
index c6f27207dbe8..0be95294b6e6 100644
--- a/include/linux/device.h
+++ b/include/linux/device.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* device.h - generic, centralized driver model
*
@@ -5,14 +6,14 @@
* Copyright (c) 2004-2009 Greg Kroah-Hartman <gregkh@suse.de>
* Copyright (c) 2008-2009 Novell Inc.
*
- * This file is released under the GPLv2
- *
- * See Documentation/driver-model/ for more information.
+ * See Documentation/driver-api/driver-model/ for more information.
*/
#ifndef _DEVICE_H_
#define _DEVICE_H_
+#include <linux/dev_printk.h>
+#include <linux/energy_model.h>
#include <linux/ioport.h>
#include <linux/kobject.h>
#include <linux/klist.h>
@@ -21,12 +22,15 @@
#include <linux/compiler.h>
#include <linux/types.h>
#include <linux/mutex.h>
-#include <linux/pinctrl/devinfo.h>
#include <linux/pm.h>
#include <linux/atomic.h>
-#include <linux/ratelimit.h>
#include <linux/uidgid.h>
#include <linux/gfp.h>
+#include <linux/device/bus.h>
+#include <linux/device/class.h>
+#include <linux/device/devres.h>
+#include <linux/device/driver.h>
+#include <linux/cleanup.h>
#include <asm/device.h>
struct device;
@@ -36,304 +40,17 @@ struct driver_private;
struct module;
struct class;
struct subsys_private;
-struct bus_type;
struct device_node;
struct fwnode_handle;
-struct iommu_ops;
struct iommu_group;
-struct iommu_fwspec;
-
-struct bus_attribute {
- struct attribute attr;
- ssize_t (*show)(struct bus_type *bus, char *buf);
- ssize_t (*store)(struct bus_type *bus, const char *buf, size_t count);
-};
-
-#define BUS_ATTR(_name, _mode, _show, _store) \
- struct bus_attribute bus_attr_##_name = __ATTR(_name, _mode, _show, _store)
-#define BUS_ATTR_RW(_name) \
- struct bus_attribute bus_attr_##_name = __ATTR_RW(_name)
-#define BUS_ATTR_RO(_name) \
- struct bus_attribute bus_attr_##_name = __ATTR_RO(_name)
-
-extern int __must_check bus_create_file(struct bus_type *,
- struct bus_attribute *);
-extern void bus_remove_file(struct bus_type *, struct bus_attribute *);
-
-/**
- * struct bus_type - The bus type of the device
- *
- * @name: The name of the bus.
- * @dev_name: Used for subsystems to enumerate devices like ("foo%u", dev->id).
- * @dev_root: Default device to use as the parent.
- * @bus_groups: Default attributes of the bus.
- * @dev_groups: Default attributes of the devices on the bus.
- * @drv_groups: Default attributes of the device drivers on the bus.
- * @match: Called, perhaps multiple times, whenever a new device or driver
- * is added for this bus. It should return a positive value if the
- * given device can be handled by the given driver and zero
- * otherwise. It may also return error code if determining that
- * the driver supports the device is not possible. In case of
- * -EPROBE_DEFER it will queue the device for deferred probing.
- * @uevent: Called when a device is added, removed, or a few other things
- * that generate uevents to add the environment variables.
- * @probe: Called when a new device or driver add to this bus, and callback
- * the specific driver's probe to initial the matched device.
- * @remove: Called when a device removed from this bus.
- * @shutdown: Called at shut-down time to quiesce the device.
- *
- * @online: Called to put the device back online (after offlining it).
- * @offline: Called to put the device offline for hot-removal. May fail.
- *
- * @suspend: Called when a device on this bus wants to go to sleep mode.
- * @resume: Called to bring a device on this bus out of sleep mode.
- * @num_vf: Called to find out how many virtual functions a device on this
- * bus supports.
- * @pm: Power management operations of this bus, callback the specific
- * device driver's pm-ops.
- * @iommu_ops: IOMMU specific operations for this bus, used to attach IOMMU
- * driver implementations to a bus and allow the driver to do
- * bus-specific setup
- * @p: The private data of the driver core, only the driver core can
- * touch this.
- * @lock_key: Lock class key for use by the lock validator
- *
- * A bus is a channel between the processor and one or more devices. For the
- * purposes of the device model, all devices are connected via a bus, even if
- * it is an internal, virtual, "platform" bus. Buses can plug into each other.
- * A USB controller is usually a PCI device, for example. The device model
- * represents the actual connections between buses and the devices they control.
- * A bus is represented by the bus_type structure. It contains the name, the
- * default attributes, the bus' methods, PM operations, and the driver core's
- * private data.
- */
-struct bus_type {
- const char *name;
- const char *dev_name;
- struct device *dev_root;
- const struct attribute_group **bus_groups;
- const struct attribute_group **dev_groups;
- const struct attribute_group **drv_groups;
-
- int (*match)(struct device *dev, struct device_driver *drv);
- int (*uevent)(struct device *dev, struct kobj_uevent_env *env);
- int (*probe)(struct device *dev);
- int (*remove)(struct device *dev);
- void (*shutdown)(struct device *dev);
-
- int (*online)(struct device *dev);
- int (*offline)(struct device *dev);
-
- int (*suspend)(struct device *dev, pm_message_t state);
- int (*resume)(struct device *dev);
-
- int (*num_vf)(struct device *dev);
-
- const struct dev_pm_ops *pm;
-
- const struct iommu_ops *iommu_ops;
-
- struct subsys_private *p;
- struct lock_class_key lock_key;
-};
-
-extern int __must_check bus_register(struct bus_type *bus);
-
-extern void bus_unregister(struct bus_type *bus);
-
-extern int __must_check bus_rescan_devices(struct bus_type *bus);
-
-/* iterator helpers for buses */
-struct subsys_dev_iter {
- struct klist_iter ki;
- const struct device_type *type;
-};
-void subsys_dev_iter_init(struct subsys_dev_iter *iter,
- struct bus_type *subsys,
- struct device *start,
- const struct device_type *type);
-struct device *subsys_dev_iter_next(struct subsys_dev_iter *iter);
-void subsys_dev_iter_exit(struct subsys_dev_iter *iter);
-
-int bus_for_each_dev(struct bus_type *bus, struct device *start, void *data,
- int (*fn)(struct device *dev, void *data));
-struct device *bus_find_device(struct bus_type *bus, struct device *start,
- void *data,
- int (*match)(struct device *dev, void *data));
-struct device *bus_find_device_by_name(struct bus_type *bus,
- struct device *start,
- const char *name);
-struct device *subsys_find_device_by_id(struct bus_type *bus, unsigned int id,
- struct device *hint);
-int bus_for_each_drv(struct bus_type *bus, struct device_driver *start,
- void *data, int (*fn)(struct device_driver *, void *));
-void bus_sort_breadthfirst(struct bus_type *bus,
- int (*compare)(const struct device *a,
- const struct device *b));
-/*
- * Bus notifiers: Get notified of addition/removal of devices
- * and binding/unbinding of drivers to devices.
- * In the long run, it should be a replacement for the platform
- * notify hooks.
- */
-struct notifier_block;
-
-extern int bus_register_notifier(struct bus_type *bus,
- struct notifier_block *nb);
-extern int bus_unregister_notifier(struct bus_type *bus,
- struct notifier_block *nb);
-
-/* All 4 notifers below get called with the target struct device *
- * as an argument. Note that those functions are likely to be called
- * with the device lock held in the core, so be careful.
- */
-#define BUS_NOTIFY_ADD_DEVICE 0x00000001 /* device added */
-#define BUS_NOTIFY_DEL_DEVICE 0x00000002 /* device to be removed */
-#define BUS_NOTIFY_REMOVED_DEVICE 0x00000003 /* device removed */
-#define BUS_NOTIFY_BIND_DRIVER 0x00000004 /* driver about to be
- bound */
-#define BUS_NOTIFY_BOUND_DRIVER 0x00000005 /* driver bound to device */
-#define BUS_NOTIFY_UNBIND_DRIVER 0x00000006 /* driver about to be
- unbound */
-#define BUS_NOTIFY_UNBOUND_DRIVER 0x00000007 /* driver is unbound
- from the device */
-#define BUS_NOTIFY_DRIVER_NOT_BOUND 0x00000008 /* driver fails to be bound */
-
-extern struct kset *bus_get_kset(struct bus_type *bus);
-extern struct klist *bus_get_device_klist(struct bus_type *bus);
-
-/**
- * enum probe_type - device driver probe type to try
- * Device drivers may opt in for special handling of their
- * respective probe routines. This tells the core what to
- * expect and prefer.
- *
- * @PROBE_DEFAULT_STRATEGY: Used by drivers that work equally well
- * whether probed synchronously or asynchronously.
- * @PROBE_PREFER_ASYNCHRONOUS: Drivers for "slow" devices which
- * probing order is not essential for booting the system may
- * opt into executing their probes asynchronously.
- * @PROBE_FORCE_SYNCHRONOUS: Use this to annotate drivers that need
- * their probe routines to run synchronously with driver and
- * device registration (with the exception of -EPROBE_DEFER
- * handling - re-probing always ends up being done asynchronously).
- *
- * Note that the end goal is to switch the kernel to use asynchronous
- * probing by default, so annotating drivers with
- * %PROBE_PREFER_ASYNCHRONOUS is a temporary measure that allows us
- * to speed up boot process while we are validating the rest of the
- * drivers.
- */
-enum probe_type {
- PROBE_DEFAULT_STRATEGY,
- PROBE_PREFER_ASYNCHRONOUS,
- PROBE_FORCE_SYNCHRONOUS,
-};
-
-/**
- * struct device_driver - The basic device driver structure
- * @name: Name of the device driver.
- * @bus: The bus which the device of this driver belongs to.
- * @owner: The module owner.
- * @mod_name: Used for built-in modules.
- * @suppress_bind_attrs: Disables bind/unbind via sysfs.
- * @probe_type: Type of the probe (synchronous or asynchronous) to use.
- * @of_match_table: The open firmware table.
- * @acpi_match_table: The ACPI match table.
- * @probe: Called to query the existence of a specific device,
- * whether this driver can work with it, and bind the driver
- * to a specific device.
- * @remove: Called when the device is removed from the system to
- * unbind a device from this driver.
- * @shutdown: Called at shut-down time to quiesce the device.
- * @suspend: Called to put the device to sleep mode. Usually to a
- * low power state.
- * @resume: Called to bring a device from sleep mode.
- * @groups: Default attributes that get created by the driver core
- * automatically.
- * @pm: Power management operations of the device which matched
- * this driver.
- * @p: Driver core's private data, no one other than the driver
- * core can touch this.
- *
- * The device driver-model tracks all of the drivers known to the system.
- * The main reason for this tracking is to enable the driver core to match
- * up drivers with new devices. Once drivers are known objects within the
- * system, however, a number of other things become possible. Device drivers
- * can export information and configuration variables that are independent
- * of any specific device.
- */
-struct device_driver {
- const char *name;
- struct bus_type *bus;
-
- struct module *owner;
- const char *mod_name; /* used for built-in modules */
-
- bool suppress_bind_attrs; /* disables bind/unbind via sysfs */
- enum probe_type probe_type;
-
- const struct of_device_id *of_match_table;
- const struct acpi_device_id *acpi_match_table;
-
- int (*probe) (struct device *dev);
- int (*remove) (struct device *dev);
- void (*shutdown) (struct device *dev);
- int (*suspend) (struct device *dev, pm_message_t state);
- int (*resume) (struct device *dev);
- const struct attribute_group **groups;
-
- const struct dev_pm_ops *pm;
-
- struct driver_private *p;
-};
-
-
-extern int __must_check driver_register(struct device_driver *drv);
-extern void driver_unregister(struct device_driver *drv);
-
-extern struct device_driver *driver_find(const char *name,
- struct bus_type *bus);
-extern int driver_probe_done(void);
-extern void wait_for_device_probe(void);
-
-
-/* sysfs interface for exporting driver attributes */
-
-struct driver_attribute {
- struct attribute attr;
- ssize_t (*show)(struct device_driver *driver, char *buf);
- ssize_t (*store)(struct device_driver *driver, const char *buf,
- size_t count);
-};
-
-#define DRIVER_ATTR(_name, _mode, _show, _store) \
- struct driver_attribute driver_attr_##_name = __ATTR(_name, _mode, _show, _store)
-#define DRIVER_ATTR_RW(_name) \
- struct driver_attribute driver_attr_##_name = __ATTR_RW(_name)
-#define DRIVER_ATTR_RO(_name) \
- struct driver_attribute driver_attr_##_name = __ATTR_RO(_name)
-#define DRIVER_ATTR_WO(_name) \
- struct driver_attribute driver_attr_##_name = __ATTR_WO(_name)
-
-extern int __must_check driver_create_file(struct device_driver *driver,
- const struct driver_attribute *attr);
-extern void driver_remove_file(struct device_driver *driver,
- const struct driver_attribute *attr);
-
-extern int __must_check driver_for_each_device(struct device_driver *drv,
- struct device *start,
- void *data,
- int (*fn)(struct device *dev,
- void *));
-struct device *driver_find_device(struct device_driver *drv,
- struct device *start, void *data,
- int (*match)(struct device *dev, void *data));
+struct dev_pin_info;
+struct dev_iommu;
+struct msi_device_data;
/**
* struct subsys_interface - interfaces to device functions
* @name: name of the device function
- * @subsys: subsytem of the devices to attach to
+ * @subsys: subsystem of the devices to attach to
* @node: the list of functions registered at the subsystem
* @add_dev: device hookup to device function handler
* @remove_dev: device hookup to device function handler
@@ -345,7 +62,7 @@ struct device *driver_find_device(struct device_driver *drv,
*/
struct subsys_interface {
const char *name;
- struct bus_type *subsys;
+ const struct bus_type *subsys;
struct list_head node;
int (*add_dev)(struct device *dev, struct subsys_interface *sif);
void (*remove_dev)(struct device *dev, struct subsys_interface *sif);
@@ -354,181 +71,11 @@ struct subsys_interface {
int subsys_interface_register(struct subsys_interface *sif);
void subsys_interface_unregister(struct subsys_interface *sif);
-int subsys_system_register(struct bus_type *subsys,
+int subsys_system_register(const struct bus_type *subsys,
const struct attribute_group **groups);
-int subsys_virtual_register(struct bus_type *subsys,
+int subsys_virtual_register(const struct bus_type *subsys,
const struct attribute_group **groups);
-/**
- * struct class - device classes
- * @name: Name of the class.
- * @owner: The module owner.
- * @class_groups: Default attributes of this class.
- * @dev_groups: Default attributes of the devices that belong to the class.
- * @dev_kobj: The kobject that represents this class and links it into the hierarchy.
- * @dev_uevent: Called when a device is added, removed from this class, or a
- * few other things that generate uevents to add the environment
- * variables.
- * @devnode: Callback to provide the devtmpfs.
- * @class_release: Called to release this class.
- * @dev_release: Called to release the device.
- * @suspend: Used to put the device to sleep mode, usually to a low power
- * state.
- * @resume: Used to bring the device from the sleep mode.
- * @shutdown_pre: Called at shut-down time before driver shutdown.
- * @ns_type: Callbacks so sysfs can detemine namespaces.
- * @namespace: Namespace of the device belongs to this class.
- * @pm: The default device power management operations of this class.
- * @p: The private data of the driver core, no one other than the
- * driver core can touch this.
- *
- * A class is a higher-level view of a device that abstracts out low-level
- * implementation details. Drivers may see a SCSI disk or an ATA disk, but,
- * at the class level, they are all simply disks. Classes allow user space
- * to work with devices based on what they do, rather than how they are
- * connected or how they work.
- */
-struct class {
- const char *name;
- struct module *owner;
-
- const struct attribute_group **class_groups;
- const struct attribute_group **dev_groups;
- struct kobject *dev_kobj;
-
- int (*dev_uevent)(struct device *dev, struct kobj_uevent_env *env);
- char *(*devnode)(struct device *dev, umode_t *mode);
-
- void (*class_release)(struct class *class);
- void (*dev_release)(struct device *dev);
-
- int (*suspend)(struct device *dev, pm_message_t state);
- int (*resume)(struct device *dev);
- int (*shutdown_pre)(struct device *dev);
-
- const struct kobj_ns_type_operations *ns_type;
- const void *(*namespace)(struct device *dev);
-
- const struct dev_pm_ops *pm;
-
- struct subsys_private *p;
-};
-
-struct class_dev_iter {
- struct klist_iter ki;
- const struct device_type *type;
-};
-
-extern struct kobject *sysfs_dev_block_kobj;
-extern struct kobject *sysfs_dev_char_kobj;
-extern int __must_check __class_register(struct class *class,
- struct lock_class_key *key);
-extern void class_unregister(struct class *class);
-
-/* This is a #define to keep the compiler from merging different
- * instances of the __key variable */
-#define class_register(class) \
-({ \
- static struct lock_class_key __key; \
- __class_register(class, &__key); \
-})
-
-struct class_compat;
-struct class_compat *class_compat_register(const char *name);
-void class_compat_unregister(struct class_compat *cls);
-int class_compat_create_link(struct class_compat *cls, struct device *dev,
- struct device *device_link);
-void class_compat_remove_link(struct class_compat *cls, struct device *dev,
- struct device *device_link);
-
-extern void class_dev_iter_init(struct class_dev_iter *iter,
- struct class *class,
- struct device *start,
- const struct device_type *type);
-extern struct device *class_dev_iter_next(struct class_dev_iter *iter);
-extern void class_dev_iter_exit(struct class_dev_iter *iter);
-
-extern int class_for_each_device(struct class *class, struct device *start,
- void *data,
- int (*fn)(struct device *dev, void *data));
-extern struct device *class_find_device(struct class *class,
- struct device *start, const void *data,
- int (*match)(struct device *, const void *));
-
-struct class_attribute {
- struct attribute attr;
- ssize_t (*show)(struct class *class, struct class_attribute *attr,
- char *buf);
- ssize_t (*store)(struct class *class, struct class_attribute *attr,
- const char *buf, size_t count);
-};
-
-#define CLASS_ATTR_RW(_name) \
- struct class_attribute class_attr_##_name = __ATTR_RW(_name)
-#define CLASS_ATTR_RO(_name) \
- struct class_attribute class_attr_##_name = __ATTR_RO(_name)
-#define CLASS_ATTR_WO(_name) \
- struct class_attribute class_attr_##_name = __ATTR_WO(_name)
-
-extern int __must_check class_create_file_ns(struct class *class,
- const struct class_attribute *attr,
- const void *ns);
-extern void class_remove_file_ns(struct class *class,
- const struct class_attribute *attr,
- const void *ns);
-
-static inline int __must_check class_create_file(struct class *class,
- const struct class_attribute *attr)
-{
- return class_create_file_ns(class, attr, NULL);
-}
-
-static inline void class_remove_file(struct class *class,
- const struct class_attribute *attr)
-{
- return class_remove_file_ns(class, attr, NULL);
-}
-
-/* Simple class attribute that is just a static string */
-struct class_attribute_string {
- struct class_attribute attr;
- char *str;
-};
-
-/* Currently read-only only */
-#define _CLASS_ATTR_STRING(_name, _mode, _str) \
- { __ATTR(_name, _mode, show_class_attr_string, NULL), _str }
-#define CLASS_ATTR_STRING(_name, _mode, _str) \
- struct class_attribute_string class_attr_##_name = \
- _CLASS_ATTR_STRING(_name, _mode, _str)
-
-extern ssize_t show_class_attr_string(struct class *class, struct class_attribute *attr,
- char *buf);
-
-struct class_interface {
- struct list_head node;
- struct class *class;
-
- int (*add_dev) (struct device *, struct class_interface *);
- void (*remove_dev) (struct device *, struct class_interface *);
-};
-
-extern int __must_check class_interface_register(struct class_interface *);
-extern void class_interface_unregister(struct class_interface *);
-
-extern struct class * __must_check __class_create(struct module *owner,
- const char *name,
- struct lock_class_key *key);
-extern void class_destroy(struct class *cls);
-
-/* This is a #define to keep the compiler from merging different
- * instances of the __key variable */
-#define class_create(owner, name) \
-({ \
- static struct lock_class_key __key; \
- __class_create(owner, name, &__key); \
-})
-
/*
* The type of device, "struct device" is embedded in. A class
* or bus can contain devices of different types
@@ -541,15 +88,20 @@ extern void class_destroy(struct class *cls);
struct device_type {
const char *name;
const struct attribute_group **groups;
- int (*uevent)(struct device *dev, struct kobj_uevent_env *env);
- char *(*devnode)(struct device *dev, umode_t *mode,
+ int (*uevent)(const struct device *dev, struct kobj_uevent_env *env);
+ char *(*devnode)(const struct device *dev, umode_t *mode,
kuid_t *uid, kgid_t *gid);
void (*release)(struct device *dev);
const struct dev_pm_ops *pm;
};
-/* interface for exporting device attributes */
+/**
+ * struct device_attribute - Interface for exporting device attributes.
+ * @attr: sysfs attribute definition.
+ * @show: Show handler.
+ * @store: Store handler.
+ */
struct device_attribute {
struct attribute attr;
ssize_t (*show)(struct device *dev, struct device_attribute *attr,
@@ -558,6 +110,11 @@ struct device_attribute {
const char *buf, size_t count);
};
+/**
+ * struct dev_ext_attribute - Exported device attribute with extra context.
+ * @attr: Exported device attribute.
+ * @var: Pointer to context.
+ */
struct dev_ext_attribute {
struct device_attribute attr;
void *var;
@@ -575,151 +132,154 @@ ssize_t device_show_bool(struct device *dev, struct device_attribute *attr,
char *buf);
ssize_t device_store_bool(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count);
+ssize_t device_show_string(struct device *dev, struct device_attribute *attr,
+ char *buf);
+/**
+ * DEVICE_ATTR - Define a device attribute.
+ * @_name: Attribute name.
+ * @_mode: File mode.
+ * @_show: Show handler. Optional, but mandatory if attribute is readable.
+ * @_store: Store handler. Optional, but mandatory if attribute is writable.
+ *
+ * Convenience macro for defining a struct device_attribute.
+ *
+ * For example, ``DEVICE_ATTR(foo, 0644, foo_show, foo_store);`` expands to:
+ *
+ * .. code-block:: c
+ *
+ * struct device_attribute dev_attr_foo = {
+ * .attr = { .name = "foo", .mode = 0644 },
+ * .show = foo_show,
+ * .store = foo_store,
+ * };
+ */
#define DEVICE_ATTR(_name, _mode, _show, _store) \
struct device_attribute dev_attr_##_name = __ATTR(_name, _mode, _show, _store)
+
+/**
+ * DEVICE_ATTR_PREALLOC - Define a preallocated device attribute.
+ * @_name: Attribute name.
+ * @_mode: File mode.
+ * @_show: Show handler. Optional, but mandatory if attribute is readable.
+ * @_store: Store handler. Optional, but mandatory if attribute is writable.
+ *
+ * Like DEVICE_ATTR(), but ``SYSFS_PREALLOC`` is set on @_mode.
+ */
+#define DEVICE_ATTR_PREALLOC(_name, _mode, _show, _store) \
+ struct device_attribute dev_attr_##_name = \
+ __ATTR_PREALLOC(_name, _mode, _show, _store)
+
+/**
+ * DEVICE_ATTR_RW - Define a read-write device attribute.
+ * @_name: Attribute name.
+ *
+ * Like DEVICE_ATTR(), but @_mode is 0644, @_show is <_name>_show,
+ * and @_store is <_name>_store.
+ */
#define DEVICE_ATTR_RW(_name) \
struct device_attribute dev_attr_##_name = __ATTR_RW(_name)
+
+/**
+ * DEVICE_ATTR_ADMIN_RW - Define an admin-only read-write device attribute.
+ * @_name: Attribute name.
+ *
+ * Like DEVICE_ATTR_RW(), but @_mode is 0600.
+ */
+#define DEVICE_ATTR_ADMIN_RW(_name) \
+ struct device_attribute dev_attr_##_name = __ATTR_RW_MODE(_name, 0600)
+
+/**
+ * DEVICE_ATTR_RO - Define a readable device attribute.
+ * @_name: Attribute name.
+ *
+ * Like DEVICE_ATTR(), but @_mode is 0444 and @_show is <_name>_show.
+ */
#define DEVICE_ATTR_RO(_name) \
struct device_attribute dev_attr_##_name = __ATTR_RO(_name)
+
+/**
+ * DEVICE_ATTR_ADMIN_RO - Define an admin-only readable device attribute.
+ * @_name: Attribute name.
+ *
+ * Like DEVICE_ATTR_RO(), but @_mode is 0400.
+ */
+#define DEVICE_ATTR_ADMIN_RO(_name) \
+ struct device_attribute dev_attr_##_name = __ATTR_RO_MODE(_name, 0400)
+
+/**
+ * DEVICE_ATTR_WO - Define an admin-only writable device attribute.
+ * @_name: Attribute name.
+ *
+ * Like DEVICE_ATTR(), but @_mode is 0200 and @_store is <_name>_store.
+ */
#define DEVICE_ATTR_WO(_name) \
struct device_attribute dev_attr_##_name = __ATTR_WO(_name)
+
+/**
+ * DEVICE_ULONG_ATTR - Define a device attribute backed by an unsigned long.
+ * @_name: Attribute name.
+ * @_mode: File mode.
+ * @_var: Identifier of unsigned long.
+ *
+ * Like DEVICE_ATTR(), but @_show and @_store are automatically provided
+ * such that reads and writes to the attribute from userspace affect @_var.
+ */
#define DEVICE_ULONG_ATTR(_name, _mode, _var) \
struct dev_ext_attribute dev_attr_##_name = \
{ __ATTR(_name, _mode, device_show_ulong, device_store_ulong), &(_var) }
+
+/**
+ * DEVICE_INT_ATTR - Define a device attribute backed by an int.
+ * @_name: Attribute name.
+ * @_mode: File mode.
+ * @_var: Identifier of int.
+ *
+ * Like DEVICE_ULONG_ATTR(), but @_var is an int.
+ */
#define DEVICE_INT_ATTR(_name, _mode, _var) \
struct dev_ext_attribute dev_attr_##_name = \
{ __ATTR(_name, _mode, device_show_int, device_store_int), &(_var) }
+
+/**
+ * DEVICE_BOOL_ATTR - Define a device attribute backed by a bool.
+ * @_name: Attribute name.
+ * @_mode: File mode.
+ * @_var: Identifier of bool.
+ *
+ * Like DEVICE_ULONG_ATTR(), but @_var is a bool.
+ */
#define DEVICE_BOOL_ATTR(_name, _mode, _var) \
struct dev_ext_attribute dev_attr_##_name = \
{ __ATTR(_name, _mode, device_show_bool, device_store_bool), &(_var) }
-#define DEVICE_ATTR_IGNORE_LOCKDEP(_name, _mode, _show, _store) \
- struct device_attribute dev_attr_##_name = \
- __ATTR_IGNORE_LOCKDEP(_name, _mode, _show, _store)
-
-extern int device_create_file(struct device *device,
- const struct device_attribute *entry);
-extern void device_remove_file(struct device *dev,
- const struct device_attribute *attr);
-extern bool device_remove_file_self(struct device *dev,
- const struct device_attribute *attr);
-extern int __must_check device_create_bin_file(struct device *dev,
- const struct bin_attribute *attr);
-extern void device_remove_bin_file(struct device *dev,
- const struct bin_attribute *attr);
-
-/* device resource management */
-typedef void (*dr_release_t)(struct device *dev, void *res);
-typedef int (*dr_match_t)(struct device *dev, void *res, void *match_data);
-
-#ifdef CONFIG_DEBUG_DEVRES
-extern void *__devres_alloc_node(dr_release_t release, size_t size, gfp_t gfp,
- int nid, const char *name) __malloc;
-#define devres_alloc(release, size, gfp) \
- __devres_alloc_node(release, size, gfp, NUMA_NO_NODE, #release)
-#define devres_alloc_node(release, size, gfp, nid) \
- __devres_alloc_node(release, size, gfp, nid, #release)
-#else
-extern void *devres_alloc_node(dr_release_t release, size_t size, gfp_t gfp,
- int nid) __malloc;
-static inline void *devres_alloc(dr_release_t release, size_t size, gfp_t gfp)
-{
- return devres_alloc_node(release, size, gfp, NUMA_NO_NODE);
-}
-#endif
-
-extern void devres_for_each_res(struct device *dev, dr_release_t release,
- dr_match_t match, void *match_data,
- void (*fn)(struct device *, void *, void *),
- void *data);
-extern void devres_free(void *res);
-extern void devres_add(struct device *dev, void *res);
-extern void *devres_find(struct device *dev, dr_release_t release,
- dr_match_t match, void *match_data);
-extern void *devres_get(struct device *dev, void *new_res,
- dr_match_t match, void *match_data);
-extern void *devres_remove(struct device *dev, dr_release_t release,
- dr_match_t match, void *match_data);
-extern int devres_destroy(struct device *dev, dr_release_t release,
- dr_match_t match, void *match_data);
-extern int devres_release(struct device *dev, dr_release_t release,
- dr_match_t match, void *match_data);
-
-/* devres group */
-extern void * __must_check devres_open_group(struct device *dev, void *id,
- gfp_t gfp);
-extern void devres_close_group(struct device *dev, void *id);
-extern void devres_remove_group(struct device *dev, void *id);
-extern int devres_release_group(struct device *dev, void *id);
-
-/* managed devm_k.alloc/kfree for device drivers */
-extern void *devm_kmalloc(struct device *dev, size_t size, gfp_t gfp) __malloc;
-extern __printf(3, 0)
-char *devm_kvasprintf(struct device *dev, gfp_t gfp, const char *fmt,
- va_list ap) __malloc;
-extern __printf(3, 4)
-char *devm_kasprintf(struct device *dev, gfp_t gfp, const char *fmt, ...) __malloc;
-static inline void *devm_kzalloc(struct device *dev, size_t size, gfp_t gfp)
-{
- return devm_kmalloc(dev, size, gfp | __GFP_ZERO);
-}
-static inline void *devm_kmalloc_array(struct device *dev,
- size_t n, size_t size, gfp_t flags)
-{
- if (size != 0 && n > SIZE_MAX / size)
- return NULL;
- return devm_kmalloc(dev, n * size, flags);
-}
-static inline void *devm_kcalloc(struct device *dev,
- size_t n, size_t size, gfp_t flags)
-{
- return devm_kmalloc_array(dev, n, size, flags | __GFP_ZERO);
-}
-extern void devm_kfree(struct device *dev, void *p);
-extern char *devm_kstrdup(struct device *dev, const char *s, gfp_t gfp) __malloc;
-extern void *devm_kmemdup(struct device *dev, const void *src, size_t len,
- gfp_t gfp);
-
-extern unsigned long devm_get_free_pages(struct device *dev,
- gfp_t gfp_mask, unsigned int order);
-extern void devm_free_pages(struct device *dev, unsigned long addr);
-
-void __iomem *devm_ioremap_resource(struct device *dev, struct resource *res);
-
-/* allows to add/remove a custom action to devres stack */
-int devm_add_action(struct device *dev, void (*action)(void *), void *data);
-void devm_remove_action(struct device *dev, void (*action)(void *), void *data);
-
-static inline int devm_add_action_or_reset(struct device *dev,
- void (*action)(void *), void *data)
-{
- int ret;
-
- ret = devm_add_action(dev, action, data);
- if (ret)
- action(data);
-
- return ret;
-}
/**
- * devm_alloc_percpu - Resource-managed alloc_percpu
- * @dev: Device to allocate per-cpu memory for
- * @type: Type to allocate per-cpu memory for
+ * DEVICE_STRING_ATTR_RO - Define a device attribute backed by a r/o string.
+ * @_name: Attribute name.
+ * @_mode: File mode.
+ * @_var: Identifier of string.
*
- * Managed alloc_percpu. Per-cpu memory allocated with this function is
- * automatically freed on driver detach.
- *
- * RETURNS:
- * Pointer to allocated memory on success, NULL on failure.
+ * Like DEVICE_ULONG_ATTR(), but @_var is a string. Because the length of the
+ * string allocation is unknown, the attribute must be read-only.
*/
-#define devm_alloc_percpu(dev, type) \
- ((typeof(type) __percpu *)__devm_alloc_percpu((dev), sizeof(type), \
- __alignof__(type)))
+#define DEVICE_STRING_ATTR_RO(_name, _mode, _var) \
+ struct dev_ext_attribute dev_attr_##_name = \
+ { __ATTR(_name, (_mode) & ~0222, device_show_string, NULL), (_var) }
+
+#define DEVICE_ATTR_IGNORE_LOCKDEP(_name, _mode, _show, _store) \
+ struct device_attribute dev_attr_##_name = \
+ __ATTR_IGNORE_LOCKDEP(_name, _mode, _show, _store)
-void __percpu *__devm_alloc_percpu(struct device *dev, size_t size,
- size_t align);
-void devm_free_percpu(struct device *dev, void __percpu *pdata);
+int device_create_file(struct device *device,
+ const struct device_attribute *entry);
+void device_remove_file(struct device *dev,
+ const struct device_attribute *attr);
+bool device_remove_file_self(struct device *dev,
+ const struct device_attribute *attr);
+int __must_check device_create_bin_file(struct device *dev,
+ const struct bin_attribute *attr);
+void device_remove_bin_file(struct device *dev,
+ const struct bin_attribute *attr);
struct device_dma_parameters {
/*
@@ -727,6 +287,7 @@ struct device_dma_parameters {
* sg limitations.
*/
unsigned int max_segment_size;
+ unsigned int min_align_mask;
unsigned long segment_boundary_mask;
};
@@ -751,39 +312,26 @@ enum device_link_state {
/*
* Device link flags.
*
- * STATELESS: The core won't track the presence of supplier/consumer drivers.
- * AUTOREMOVE: Remove this link automatically on consumer driver unbind.
+ * STATELESS: The core will not remove this link automatically.
+ * AUTOREMOVE_CONSUMER: Remove the link automatically on consumer driver unbind.
* PM_RUNTIME: If set, the runtime PM framework will use this link.
* RPM_ACTIVE: Run pm_runtime_get_sync() on the supplier during link creation.
+ * AUTOREMOVE_SUPPLIER: Remove the link automatically on supplier driver unbind.
+ * AUTOPROBE_CONSUMER: Probe consumer driver automatically after supplier binds.
+ * MANAGED: The core tracks presence of supplier/consumer drivers (internal).
+ * SYNC_STATE_ONLY: Link only affects sync_state() behavior.
+ * INFERRED: Inferred from data (eg: firmware) and not from driver actions.
*/
-#define DL_FLAG_STATELESS BIT(0)
-#define DL_FLAG_AUTOREMOVE BIT(1)
-#define DL_FLAG_PM_RUNTIME BIT(2)
-#define DL_FLAG_RPM_ACTIVE BIT(3)
-
-/**
- * struct device_link - Device link representation.
- * @supplier: The device on the supplier end of the link.
- * @s_node: Hook to the supplier device's list of links to consumers.
- * @consumer: The device on the consumer end of the link.
- * @c_node: Hook to the consumer device's list of links to suppliers.
- * @status: The state of the link (with respect to the presence of drivers).
- * @flags: Link flags.
- * @rpm_active: Whether or not the consumer device is runtime-PM-active.
- * @rcu_head: An RCU head to use for deferred execution of SRCU callbacks.
- */
-struct device_link {
- struct device *supplier;
- struct list_head s_node;
- struct device *consumer;
- struct list_head c_node;
- enum device_link_state status;
- u32 flags;
- bool rpm_active;
-#ifdef CONFIG_SRCU
- struct rcu_head rcu_head;
-#endif
-};
+#define DL_FLAG_STATELESS BIT(0)
+#define DL_FLAG_AUTOREMOVE_CONSUMER BIT(1)
+#define DL_FLAG_PM_RUNTIME BIT(2)
+#define DL_FLAG_RPM_ACTIVE BIT(3)
+#define DL_FLAG_AUTOREMOVE_SUPPLIER BIT(4)
+#define DL_FLAG_AUTOPROBE_CONSUMER BIT(5)
+#define DL_FLAG_MANAGED BIT(6)
+#define DL_FLAG_SYNC_STATE_ONLY BIT(7)
+#define DL_FLAG_INFERRED BIT(8)
+#define DL_FLAG_CYCLE BIT(9)
/**
* enum dl_dev_state - Device driver presence tracking information.
@@ -800,18 +348,117 @@ enum dl_dev_state {
};
/**
+ * enum device_removable - Whether the device is removable. The criteria for a
+ * device to be classified as removable is determined by its subsystem or bus.
+ * @DEVICE_REMOVABLE_NOT_SUPPORTED: This attribute is not supported for this
+ * device (default).
+ * @DEVICE_REMOVABLE_UNKNOWN: Device location is Unknown.
+ * @DEVICE_FIXED: Device is not removable by the user.
+ * @DEVICE_REMOVABLE: Device is removable by the user.
+ */
+enum device_removable {
+ DEVICE_REMOVABLE_NOT_SUPPORTED = 0, /* must be 0 */
+ DEVICE_REMOVABLE_UNKNOWN,
+ DEVICE_FIXED,
+ DEVICE_REMOVABLE,
+};
+
+/**
* struct dev_links_info - Device data related to device links.
* @suppliers: List of links to supplier devices.
* @consumers: List of links to consumer devices.
+ * @defer_sync: Hook to global list of devices that have deferred sync_state.
* @status: Driver status information.
*/
struct dev_links_info {
struct list_head suppliers;
struct list_head consumers;
+ struct list_head defer_sync;
enum dl_dev_state status;
};
/**
+ * struct dev_msi_info - Device data related to MSI
+ * @domain: The MSI interrupt domain associated to the device
+ * @data: Pointer to MSI device data
+ */
+struct dev_msi_info {
+#ifdef CONFIG_GENERIC_MSI_IRQ
+ struct irq_domain *domain;
+ struct msi_device_data *data;
+#endif
+};
+
+/**
+ * enum device_physical_location_panel - Describes which panel surface of the
+ * system's housing the device connection point resides on.
+ * @DEVICE_PANEL_TOP: Device connection point is on the top panel.
+ * @DEVICE_PANEL_BOTTOM: Device connection point is on the bottom panel.
+ * @DEVICE_PANEL_LEFT: Device connection point is on the left panel.
+ * @DEVICE_PANEL_RIGHT: Device connection point is on the right panel.
+ * @DEVICE_PANEL_FRONT: Device connection point is on the front panel.
+ * @DEVICE_PANEL_BACK: Device connection point is on the back panel.
+ * @DEVICE_PANEL_UNKNOWN: The panel with device connection point is unknown.
+ */
+enum device_physical_location_panel {
+ DEVICE_PANEL_TOP,
+ DEVICE_PANEL_BOTTOM,
+ DEVICE_PANEL_LEFT,
+ DEVICE_PANEL_RIGHT,
+ DEVICE_PANEL_FRONT,
+ DEVICE_PANEL_BACK,
+ DEVICE_PANEL_UNKNOWN,
+};
+
+/**
+ * enum device_physical_location_vertical_position - Describes vertical
+ * position of the device connection point on the panel surface.
+ * @DEVICE_VERT_POS_UPPER: Device connection point is at upper part of panel.
+ * @DEVICE_VERT_POS_CENTER: Device connection point is at center part of panel.
+ * @DEVICE_VERT_POS_LOWER: Device connection point is at lower part of panel.
+ */
+enum device_physical_location_vertical_position {
+ DEVICE_VERT_POS_UPPER,
+ DEVICE_VERT_POS_CENTER,
+ DEVICE_VERT_POS_LOWER,
+};
+
+/**
+ * enum device_physical_location_horizontal_position - Describes horizontal
+ * position of the device connection point on the panel surface.
+ * @DEVICE_HORI_POS_LEFT: Device connection point is at left part of panel.
+ * @DEVICE_HORI_POS_CENTER: Device connection point is at center part of panel.
+ * @DEVICE_HORI_POS_RIGHT: Device connection point is at right part of panel.
+ */
+enum device_physical_location_horizontal_position {
+ DEVICE_HORI_POS_LEFT,
+ DEVICE_HORI_POS_CENTER,
+ DEVICE_HORI_POS_RIGHT,
+};
+
+/**
+ * struct device_physical_location - Device data related to physical location
+ * of the device connection point.
+ * @panel: Panel surface of the system's housing that the device connection
+ * point resides on.
+ * @vertical_position: Vertical position of the device connection point within
+ * the panel.
+ * @horizontal_position: Horizontal position of the device connection point
+ * within the panel.
+ * @dock: Set if the device connection point resides in a docking station or
+ * port replicator.
+ * @lid: Set if this device connection point resides on the lid of laptop
+ * system.
+ */
+struct device_physical_location {
+ enum device_physical_location_panel panel;
+ enum device_physical_location_vertical_position vertical_position;
+ enum device_physical_location_horizontal_position horizontal_position;
+ bool dock;
+ bool lid;
+};
+
+/**
* struct device - The basic device structure
* @parent: The device's "parent" device, the device to which it is attached.
* In most cases, a parent device is some sort of bus or host
@@ -838,26 +485,32 @@ struct dev_links_info {
* @driver_data: Private pointer for driver specific info.
* @links: Links to suppliers and consumers of this device.
* @power: For device power management.
- * See Documentation/power/admin-guide/devices.rst for details.
+ * See Documentation/driver-api/pm/devices.rst for details.
* @pm_domain: Provide callbacks that are executed during system suspend,
* hibernation, system resume and during runtime PM transitions
* along with subsystem-level and driver-level callbacks.
+ * @em_pd: device's energy model performance domain
* @pins: For device pin management.
- * See Documentation/driver-api/pinctl.rst for details.
- * @msi_list: Hosts MSI descriptors
- * @msi_domain: The generic MSI domain this device is using.
+ * See Documentation/driver-api/pin-control.rst for details.
+ * @msi: MSI related data
* @numa_node: NUMA node this device is close to.
* @dma_ops: DMA mapping operations for this device.
* @dma_mask: Dma mask (if dma'ble device).
* @coherent_dma_mask: Like dma_mask, but for alloc_coherent mapping as not all
* hardware supports 64-bit addresses for consistent allocations
* such descriptors.
- * @dma_pfn_offset: offset of DMA memory range relatively of RAM
+ * @bus_dma_limit: Limit of an upstream bridge or bus which imposes a smaller
+ * DMA limit than the device itself supports.
+ * @dma_range_map: map for DMA memory ranges relative to that of RAM
* @dma_parms: A low level driver may set these to teach IOMMU code about
* segment limitations.
* @dma_pools: Dma pools (if dma'ble device).
* @dma_mem: Internal for coherent mem override.
* @cma_area: Contiguous memory area for dma allocations
+ * @dma_io_tlb_mem: Software IO TLB allocator. Not for driver use.
+ * @dma_io_tlb_pools: List of transient swiotlb memory pools.
+ * @dma_io_tlb_lock: Protects changes to the list of active pools.
+ * @dma_uses_io_tlb: %true if device has used the software IO TLB.
* @archdata: For arch-specific additions.
* @of_node: Associated device tree node.
* @fwnode: Associated device node supplied by platform firmware.
@@ -865,19 +518,39 @@ struct dev_links_info {
* @id: device instance
* @devres_lock: Spinlock to protect the resource of the device.
* @devres_head: The resources list of the device.
- * @knode_class: The node used to add the device to the class list.
* @class: The class of the device.
* @groups: Optional attribute groups.
* @release: Callback to free the device after all references have
* gone away. This should be set by the allocator of the
* device (i.e. the bus driver that discovered the device).
* @iommu_group: IOMMU group the device belongs to.
- * @iommu_fwspec: IOMMU-specific properties supplied by firmware.
+ * @iommu: Per device generic IOMMU runtime data
+ * @physical_location: Describes physical location of the device connection
+ * point in the system housing.
+ * @removable: Whether the device can be removed from the system. This
+ * should be set by the subsystem / bus driver that discovered
+ * the device.
*
* @offline_disabled: If set, the device is permanently online.
* @offline: Set after successful invocation of bus type's .offline().
* @of_node_reused: Set if the device-tree node is shared with an ancestor
* device.
+ * @state_synced: The hardware state of this device has been synced to match
+ * the software state of this device by calling the driver/bus
+ * sync_state() callback.
+ * @can_match: The device has matched with a driver at least once or it is in
+ * a bus (like AMBA) which can't check for matching drivers until
+ * other devices probe successfully.
+ * @dma_coherent: this particular device is dma coherent, even if the
+ * architecture supports non-coherent devices.
+ * @dma_ops_bypass: If set to %true then the dma_ops are bypassed for the
+ * streaming DMA operations (->map_* / ->unmap_* / ->sync_*),
+ * and optionall (if the coherent mask is large enough) also
+ * for dma allocations. This flag is managed by the dma ops
+ * instance from ->dma_supported.
+ * @dma_skip_sync: DMA sync operations can be skipped for coherent buffers.
+ * @dma_iommu: Device is using default IOMMU implementation for DMA and
+ * doesn't rely on dma_ops structure.
*
* At the lowest level, every device in a Linux system is represented by an
* instance of struct device. The device structure contains the information
@@ -888,94 +561,164 @@ struct dev_links_info {
* a higher-level representation of the device.
*/
struct device {
+ struct kobject kobj;
struct device *parent;
struct device_private *p;
- struct kobject kobj;
const char *init_name; /* initial name of the device */
const struct device_type *type;
- struct mutex mutex; /* mutex to synchronize calls to
- * its driver.
- */
-
- struct bus_type *bus; /* type of bus device is on */
+ const struct bus_type *bus; /* type of bus device is on */
struct device_driver *driver; /* which driver has allocated this
device */
void *platform_data; /* Platform specific data, device
core doesn't touch it */
void *driver_data; /* Driver data, set and get with
- dev_set/get_drvdata */
+ dev_set_drvdata/dev_get_drvdata */
+ struct mutex mutex; /* mutex to synchronize calls to
+ * its driver.
+ */
+
struct dev_links_info links;
struct dev_pm_info power;
struct dev_pm_domain *pm_domain;
-#ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN
- struct irq_domain *msi_domain;
+#ifdef CONFIG_ENERGY_MODEL
+ struct em_perf_domain *em_pd;
#endif
+
#ifdef CONFIG_PINCTRL
struct dev_pin_info *pins;
#endif
-#ifdef CONFIG_GENERIC_MSI_IRQ
- struct list_head msi_list;
-#endif
-
-#ifdef CONFIG_NUMA
- int numa_node; /* NUMA node this device is close to */
-#endif
+ struct dev_msi_info msi;
+#ifdef CONFIG_ARCH_HAS_DMA_OPS
const struct dma_map_ops *dma_ops;
+#endif
u64 *dma_mask; /* dma mask (if dma'able device) */
u64 coherent_dma_mask;/* Like dma_mask, but for
alloc_coherent mappings as
not all hardware supports
64 bit addresses for consistent
allocations such descriptors. */
- unsigned long dma_pfn_offset;
+ u64 bus_dma_limit; /* upstream dma constraint */
+ const struct bus_dma_region *dma_range_map;
struct device_dma_parameters *dma_parms;
struct list_head dma_pools; /* dma pools (if dma'ble) */
+#ifdef CONFIG_DMA_DECLARE_COHERENT
struct dma_coherent_mem *dma_mem; /* internal for coherent mem
override */
+#endif
#ifdef CONFIG_DMA_CMA
struct cma *cma_area; /* contiguous memory area for dma
allocations */
#endif
+#ifdef CONFIG_SWIOTLB
+ struct io_tlb_mem *dma_io_tlb_mem;
+#endif
+#ifdef CONFIG_SWIOTLB_DYNAMIC
+ struct list_head dma_io_tlb_pools;
+ spinlock_t dma_io_tlb_lock;
+ bool dma_uses_io_tlb;
+#endif
/* arch specific additions */
struct dev_archdata archdata;
struct device_node *of_node; /* associated device tree node */
struct fwnode_handle *fwnode; /* firmware device node */
+#ifdef CONFIG_NUMA
+ int numa_node; /* NUMA node this device is close to */
+#endif
dev_t devt; /* dev_t, creates the sysfs "dev" */
u32 id; /* device instance */
spinlock_t devres_lock;
struct list_head devres_head;
- struct klist_node knode_class;
- struct class *class;
+ const struct class *class;
const struct attribute_group **groups; /* optional groups */
void (*release)(struct device *dev);
struct iommu_group *iommu_group;
- struct iommu_fwspec *iommu_fwspec;
+ struct dev_iommu *iommu;
+
+ struct device_physical_location *physical_location;
+
+ enum device_removable removable;
bool offline_disabled:1;
bool offline:1;
bool of_node_reused:1;
+ bool state_synced:1;
+ bool can_match:1;
+#if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE) || \
+ defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU) || \
+ defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL)
+ bool dma_coherent:1;
+#endif
+#ifdef CONFIG_DMA_OPS_BYPASS
+ bool dma_ops_bypass : 1;
+#endif
+#ifdef CONFIG_DMA_NEED_SYNC
+ bool dma_skip_sync:1;
+#endif
+#ifdef CONFIG_IOMMU_DMA
+ bool dma_iommu:1;
+#endif
};
-static inline struct device *kobj_to_dev(struct kobject *kobj)
+/**
+ * struct device_link - Device link representation.
+ * @supplier: The device on the supplier end of the link.
+ * @s_node: Hook to the supplier device's list of links to consumers.
+ * @consumer: The device on the consumer end of the link.
+ * @c_node: Hook to the consumer device's list of links to suppliers.
+ * @link_dev: device used to expose link details in sysfs
+ * @status: The state of the link (with respect to the presence of drivers).
+ * @flags: Link flags.
+ * @rpm_active: Whether or not the consumer device is runtime-PM-active.
+ * @kref: Count repeated addition of the same link.
+ * @rm_work: Work structure used for removing the link.
+ * @supplier_preactivated: Supplier has been made active before consumer probe.
+ */
+struct device_link {
+ struct device *supplier;
+ struct list_head s_node;
+ struct device *consumer;
+ struct list_head c_node;
+ struct device link_dev;
+ enum device_link_state status;
+ u32 flags;
+ refcount_t rpm_active;
+ struct kref kref;
+ struct work_struct rm_work;
+ bool supplier_preactivated; /* Owned by consumer probe. */
+};
+
+#define kobj_to_dev(__kobj) container_of_const(__kobj, struct device, kobj)
+
+/**
+ * device_iommu_mapped - Returns true when the device DMA is translated
+ * by an IOMMU
+ * @dev: Device to perform the check on
+ */
+static inline bool device_iommu_mapped(struct device *dev)
{
- return container_of(kobj, struct device, kobj);
+ return (dev->iommu_group != NULL);
}
/* Get the wakeup routines, which depend on struct device */
#include <linux/pm_wakeup.h>
+/**
+ * dev_name - Return a device's name.
+ * @dev: Device with name to get.
+ * Return: The kobject name of the device, or its initial name if unavailable.
+ */
static inline const char *dev_name(const struct device *dev)
{
/* Use the init name until the kobject becomes available */
@@ -985,8 +728,19 @@ static inline const char *dev_name(const struct device *dev)
return kobject_name(&dev->kobj);
}
-extern __printf(2, 3)
-int dev_set_name(struct device *dev, const char *name, ...);
+/**
+ * dev_bus_name - Return a device's bus/class name, if at all possible
+ * @dev: struct device to get the bus/class name of
+ *
+ * Will return the name of the bus/class the device is attached to. If it is
+ * not attached to a bus/class, an empty string will be returned.
+ */
+static inline const char *dev_bus_name(const struct device *dev)
+{
+ return dev->bus ? dev->bus->name : (dev->class ? dev->class->name : "");
+}
+
+__printf(2, 3) int dev_set_name(struct device *dev, const char *name, ...);
#ifdef CONFIG_NUMA
static inline int dev_to_node(struct device *dev)
@@ -1000,7 +754,7 @@ static inline void set_dev_node(struct device *dev, int node)
#else
static inline int dev_to_node(struct device *dev)
{
- return -1;
+ return NUMA_NO_NODE;
}
static inline void set_dev_node(struct device *dev, int node)
{
@@ -1009,8 +763,8 @@ static inline void set_dev_node(struct device *dev, int node)
static inline struct irq_domain *dev_get_msi_domain(const struct device *dev)
{
-#ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN
- return dev->msi_domain;
+#ifdef CONFIG_GENERIC_MSI_IRQ
+ return dev->msi.domain;
#else
return NULL;
#endif
@@ -1018,8 +772,8 @@ static inline struct irq_domain *dev_get_msi_domain(const struct device *dev)
static inline void dev_set_msi_domain(struct device *dev, struct irq_domain *d)
{
-#ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN
- dev->msi_domain = d;
+#ifdef CONFIG_GENERIC_MSI_IRQ
+ dev->msi.domain = d;
#endif
}
@@ -1070,6 +824,19 @@ static inline bool device_async_suspend_enabled(struct device *dev)
return !!dev->power.async_suspend;
}
+static inline bool device_pm_not_required(struct device *dev)
+{
+ return dev->power.no_pm;
+}
+
+static inline void device_set_pm_not_required(struct device *dev)
+{
+ dev->power.no_pm = true;
+#ifdef CONFIG_PM
+ dev->power.no_callbacks = true;
+#endif
+}
+
static inline void dev_pm_syscore_device(struct device *dev, bool val)
{
#ifdef CONFIG_PM_SLEEP
@@ -1077,6 +844,52 @@ static inline void dev_pm_syscore_device(struct device *dev, bool val)
#endif
}
+static inline void dev_pm_set_driver_flags(struct device *dev, u32 flags)
+{
+ dev->power.driver_flags = flags;
+}
+
+static inline bool dev_pm_test_driver_flags(struct device *dev, u32 flags)
+{
+ return !!(dev->power.driver_flags & flags);
+}
+
+static inline bool dev_pm_smart_suspend(struct device *dev)
+{
+#ifdef CONFIG_PM_SLEEP
+ return dev->power.smart_suspend;
+#else
+ return false;
+#endif
+}
+
+/*
+ * dev_pm_set_strict_midlayer - Update the device's power.strict_midlayer flag
+ * @dev: Target device.
+ * @val: New flag value.
+ *
+ * When set, power.strict_midlayer means that the middle layer power management
+ * code (typically, a bus type or a PM domain) does not expect its runtime PM
+ * suspend callback to be invoked at all during system-wide PM transitions and
+ * it does not expect its runtime PM resume callback to be invoked at any point
+ * when runtime PM is disabled for the device during system-wide PM transitions.
+ */
+static inline void dev_pm_set_strict_midlayer(struct device *dev, bool val)
+{
+#ifdef CONFIG_PM_SLEEP
+ dev->power.strict_midlayer = val;
+#endif
+}
+
+static inline bool dev_pm_strict_midlayer_is_set(struct device *dev)
+{
+#ifdef CONFIG_PM_SLEEP
+ return dev->power.strict_midlayer;
+#else
+ return false;
+#endif
+}
+
static inline void device_lock(struct device *dev)
{
mutex_lock(&dev->mutex);
@@ -1097,54 +910,175 @@ static inline void device_unlock(struct device *dev)
mutex_unlock(&dev->mutex);
}
+DEFINE_GUARD(device, struct device *, device_lock(_T), device_unlock(_T))
+
static inline void device_lock_assert(struct device *dev)
{
lockdep_assert_held(&dev->mutex);
}
-static inline struct device_node *dev_of_node(struct device *dev)
+static inline bool dev_has_sync_state(struct device *dev)
{
- if (!IS_ENABLED(CONFIG_OF))
- return NULL;
- return dev->of_node;
+ if (!dev)
+ return false;
+ if (dev->driver && dev->driver->sync_state)
+ return true;
+ if (dev->bus && dev->bus->sync_state)
+ return true;
+ return false;
}
-void driver_init(void);
+static inline int dev_set_drv_sync_state(struct device *dev,
+ void (*fn)(struct device *dev))
+{
+ if (!dev || !dev->driver)
+ return 0;
+ if (dev->driver->sync_state && dev->driver->sync_state != fn)
+ return -EBUSY;
+ if (!dev->driver->sync_state)
+ dev->driver->sync_state = fn;
+ return 0;
+}
+
+static inline void dev_set_removable(struct device *dev,
+ enum device_removable removable)
+{
+ dev->removable = removable;
+}
+
+static inline bool dev_is_removable(struct device *dev)
+{
+ return dev->removable == DEVICE_REMOVABLE;
+}
+
+static inline bool dev_removable_is_valid(struct device *dev)
+{
+ return dev->removable != DEVICE_REMOVABLE_NOT_SUPPORTED;
+}
/*
* High level routines for use by the bus drivers
*/
-extern int __must_check device_register(struct device *dev);
-extern void device_unregister(struct device *dev);
-extern void device_initialize(struct device *dev);
-extern int __must_check device_add(struct device *dev);
-extern void device_del(struct device *dev);
-extern int device_for_each_child(struct device *dev, void *data,
- int (*fn)(struct device *dev, void *data));
-extern int device_for_each_child_reverse(struct device *dev, void *data,
- int (*fn)(struct device *dev, void *data));
-extern struct device *device_find_child(struct device *dev, void *data,
- int (*match)(struct device *dev, void *data));
-extern int device_rename(struct device *dev, const char *new_name);
-extern int device_move(struct device *dev, struct device *new_parent,
- enum dpm_order dpm_order);
-extern const char *device_get_devnode(struct device *dev,
- umode_t *mode, kuid_t *uid, kgid_t *gid,
- const char **tmp);
+int __must_check device_register(struct device *dev);
+void device_unregister(struct device *dev);
+void device_initialize(struct device *dev);
+int __must_check device_add(struct device *dev);
+void device_del(struct device *dev);
+
+DEFINE_FREE(device_del, struct device *, if (_T) device_del(_T))
+
+int device_for_each_child(struct device *parent, void *data,
+ device_iter_t fn);
+int device_for_each_child_reverse(struct device *parent, void *data,
+ device_iter_t fn);
+int device_for_each_child_reverse_from(struct device *parent,
+ struct device *from, void *data,
+ device_iter_t fn);
+struct device *device_find_child(struct device *parent, const void *data,
+ device_match_t match);
+/**
+ * device_find_child_by_name - device iterator for locating a child device.
+ * @parent: parent struct device
+ * @name: name of the child device
+ *
+ * This is similar to the device_find_child() function above, but it
+ * returns a reference to a device that has the name @name.
+ *
+ * NOTE: you will need to drop the reference with put_device() after use.
+ */
+static inline struct device *device_find_child_by_name(struct device *parent,
+ const char *name)
+{
+ return device_find_child(parent, name, device_match_name);
+}
+
+/**
+ * device_find_any_child - device iterator for locating a child device, if any.
+ * @parent: parent struct device
+ *
+ * This is similar to the device_find_child() function above, but it
+ * returns a reference to a child device, if any.
+ *
+ * NOTE: you will need to drop the reference with put_device() after use.
+ */
+static inline struct device *device_find_any_child(struct device *parent)
+{
+ return device_find_child(parent, NULL, device_match_any);
+}
+
+int device_rename(struct device *dev, const char *new_name);
+int device_move(struct device *dev, struct device *new_parent,
+ enum dpm_order dpm_order);
+int device_change_owner(struct device *dev, kuid_t kuid, kgid_t kgid);
static inline bool device_supports_offline(struct device *dev)
{
return dev->bus && dev->bus->offline && dev->bus->online;
}
-extern void lock_device_hotplug(void);
-extern void unlock_device_hotplug(void);
-extern int lock_device_hotplug_sysfs(void);
-extern int device_offline(struct device *dev);
-extern int device_online(struct device *dev);
-extern void set_primary_fwnode(struct device *dev, struct fwnode_handle *fwnode);
-extern void set_secondary_fwnode(struct device *dev, struct fwnode_handle *fwnode);
+#define __device_lock_set_class(dev, name, key) \
+do { \
+ struct device *__d2 __maybe_unused = dev; \
+ lock_set_class(&__d2->mutex.dep_map, name, key, 0, _THIS_IP_); \
+} while (0)
+
+/**
+ * device_lock_set_class - Specify a temporary lock class while a device
+ * is attached to a driver
+ * @dev: device to modify
+ * @key: lock class key data
+ *
+ * This must be called with the device_lock() already held, for example
+ * from driver ->probe(). Take care to only override the default
+ * lockdep_no_validate class.
+ */
+#ifdef CONFIG_LOCKDEP
+#define device_lock_set_class(dev, key) \
+do { \
+ struct device *__d = dev; \
+ dev_WARN_ONCE(__d, !lockdep_match_class(&__d->mutex, \
+ &__lockdep_no_validate__), \
+ "overriding existing custom lock class\n"); \
+ __device_lock_set_class(__d, #key, key); \
+} while (0)
+#else
+#define device_lock_set_class(dev, key) __device_lock_set_class(dev, #key, key)
+#endif
+
+/**
+ * device_lock_reset_class - Return a device to the default lockdep novalidate state
+ * @dev: device to modify
+ *
+ * This must be called with the device_lock() already held, for example
+ * from driver ->remove().
+ */
+#define device_lock_reset_class(dev) \
+do { \
+ struct device *__d __maybe_unused = dev; \
+ lock_set_novalidate_class(&__d->mutex.dep_map, "&dev->mutex", \
+ _THIS_IP_); \
+} while (0)
+
+void lock_device_hotplug(void);
+void unlock_device_hotplug(void);
+int lock_device_hotplug_sysfs(void);
+int device_offline(struct device *dev);
+int device_online(struct device *dev);
+
+void set_primary_fwnode(struct device *dev, struct fwnode_handle *fwnode);
+void set_secondary_fwnode(struct device *dev, struct fwnode_handle *fwnode);
+void device_set_node(struct device *dev, struct fwnode_handle *fwnode);
+int device_add_of_node(struct device *dev, struct device_node *of_node);
+void device_remove_of_node(struct device *dev);
void device_set_of_node_from_dev(struct device *dev, const struct device *dev2);
+struct device *get_dev_from_fwnode(struct fwnode_handle *fwnode);
+
+static inline struct device_node *dev_of_node(struct device *dev)
+{
+ if (!IS_ENABLED(CONFIG_OF) || !dev)
+ return NULL;
+ return dev->of_node;
+}
static inline int dev_num_vf(struct device *dev)
{
@@ -1156,14 +1090,13 @@ static inline int dev_num_vf(struct device *dev)
/*
* Root device objects for grouping under /sys/devices
*/
-extern struct device *__root_device_register(const char *name,
- struct module *owner);
+struct device *__root_device_register(const char *name, struct module *owner);
/* This is a macro to avoid include problems with THIS_MODULE */
#define root_device_register(name) \
__root_device_register(name, THIS_MODULE)
-extern void root_device_unregister(struct device *root);
+void root_device_unregister(struct device *root);
static inline void *dev_get_platdata(const struct device *dev)
{
@@ -1174,37 +1107,33 @@ static inline void *dev_get_platdata(const struct device *dev)
* Manual binding of a device to driver. See drivers/base/bus.c
* for information on use.
*/
-extern int __must_check device_bind_driver(struct device *dev);
-extern void device_release_driver(struct device *dev);
-extern int __must_check device_attach(struct device *dev);
-extern int __must_check driver_attach(struct device_driver *drv);
-extern void device_initial_probe(struct device *dev);
-extern int __must_check device_reprobe(struct device *dev);
+int __must_check device_driver_attach(const struct device_driver *drv,
+ struct device *dev);
+int __must_check device_bind_driver(struct device *dev);
+void device_release_driver(struct device *dev);
+int __must_check device_attach(struct device *dev);
+int __must_check driver_attach(const struct device_driver *drv);
+void device_initial_probe(struct device *dev);
+int __must_check device_reprobe(struct device *dev);
-extern bool device_is_bound(struct device *dev);
+bool device_is_bound(struct device *dev);
/*
* Easy functions for dynamically creating devices on the fly
*/
-extern __printf(5, 0)
-struct device *device_create_vargs(struct class *cls, struct device *parent,
- dev_t devt, void *drvdata,
- const char *fmt, va_list vargs);
-extern __printf(5, 6)
-struct device *device_create(struct class *cls, struct device *parent,
- dev_t devt, void *drvdata,
- const char *fmt, ...);
-extern __printf(6, 7)
-struct device *device_create_with_groups(struct class *cls,
- struct device *parent, dev_t devt, void *drvdata,
- const struct attribute_group **groups,
- const char *fmt, ...);
-extern void device_destroy(struct class *cls, dev_t devt);
-
-extern int __must_check device_add_groups(struct device *dev,
- const struct attribute_group **groups);
-extern void device_remove_groups(struct device *dev,
- const struct attribute_group **groups);
+__printf(5, 6) struct device *
+device_create(const struct class *cls, struct device *parent, dev_t devt,
+ void *drvdata, const char *fmt, ...);
+__printf(6, 7) struct device *
+device_create_with_groups(const struct class *cls, struct device *parent, dev_t devt,
+ void *drvdata, const struct attribute_group **groups,
+ const char *fmt, ...);
+void device_destroy(const struct class *cls, dev_t devt);
+
+int __must_check device_add_groups(struct device *dev,
+ const struct attribute_group **groups);
+void device_remove_groups(struct device *dev,
+ const struct attribute_group **groups);
static inline int __must_check device_add_group(struct device *dev,
const struct attribute_group *grp)
@@ -1219,258 +1148,48 @@ static inline void device_remove_group(struct device *dev,
{
const struct attribute_group *groups[] = { grp, NULL };
- return device_remove_groups(dev, groups);
+ device_remove_groups(dev, groups);
}
-extern int __must_check devm_device_add_groups(struct device *dev,
- const struct attribute_group **groups);
-extern void devm_device_remove_groups(struct device *dev,
- const struct attribute_group **groups);
-extern int __must_check devm_device_add_group(struct device *dev,
- const struct attribute_group *grp);
-extern void devm_device_remove_group(struct device *dev,
- const struct attribute_group *grp);
-
-/*
- * Platform "fixup" functions - allow the platform to have their say
- * about devices and actions that the general device layer doesn't
- * know about.
- */
-/* Notify platform of device discovery */
-extern int (*platform_notify)(struct device *dev);
-
-extern int (*platform_notify_remove)(struct device *dev);
-
+int __must_check devm_device_add_group(struct device *dev,
+ const struct attribute_group *grp);
/*
* get_device - atomically increment the reference count for the device.
*
*/
-extern struct device *get_device(struct device *dev);
-extern void put_device(struct device *dev);
+struct device *get_device(struct device *dev);
+void put_device(struct device *dev);
+
+DEFINE_FREE(put_device, struct device *, if (_T) put_device(_T))
+
+bool kill_device(struct device *dev);
#ifdef CONFIG_DEVTMPFS
-extern int devtmpfs_create_node(struct device *dev);
-extern int devtmpfs_delete_node(struct device *dev);
-extern int devtmpfs_mount(const char *mntdir);
+int devtmpfs_mount(void);
#else
-static inline int devtmpfs_create_node(struct device *dev) { return 0; }
-static inline int devtmpfs_delete_node(struct device *dev) { return 0; }
-static inline int devtmpfs_mount(const char *mountpoint) { return 0; }
+static inline int devtmpfs_mount(void) { return 0; }
#endif
/* drivers/base/power/shutdown.c */
-extern void device_shutdown(void);
+void device_shutdown(void);
/* debugging and troubleshooting/diagnostic helpers. */
-extern const char *dev_driver_string(const struct device *dev);
+const char *dev_driver_string(const struct device *dev);
/* Device links interface. */
struct device_link *device_link_add(struct device *consumer,
struct device *supplier, u32 flags);
void device_link_del(struct device_link *link);
+void device_link_remove(void *consumer, struct device *supplier);
+void device_links_supplier_sync_state_pause(void);
+void device_links_supplier_sync_state_resume(void);
+void device_link_wait_removal(void);
-#ifdef CONFIG_PRINTK
-
-extern __printf(3, 0)
-int dev_vprintk_emit(int level, const struct device *dev,
- const char *fmt, va_list args);
-extern __printf(3, 4)
-int dev_printk_emit(int level, const struct device *dev, const char *fmt, ...);
-
-extern __printf(3, 4)
-void dev_printk(const char *level, const struct device *dev,
- const char *fmt, ...);
-extern __printf(2, 3)
-void dev_emerg(const struct device *dev, const char *fmt, ...);
-extern __printf(2, 3)
-void dev_alert(const struct device *dev, const char *fmt, ...);
-extern __printf(2, 3)
-void dev_crit(const struct device *dev, const char *fmt, ...);
-extern __printf(2, 3)
-void dev_err(const struct device *dev, const char *fmt, ...);
-extern __printf(2, 3)
-void dev_warn(const struct device *dev, const char *fmt, ...);
-extern __printf(2, 3)
-void dev_notice(const struct device *dev, const char *fmt, ...);
-extern __printf(2, 3)
-void _dev_info(const struct device *dev, const char *fmt, ...);
-
-#else
-
-static inline __printf(3, 0)
-int dev_vprintk_emit(int level, const struct device *dev,
- const char *fmt, va_list args)
-{ return 0; }
-static inline __printf(3, 4)
-int dev_printk_emit(int level, const struct device *dev, const char *fmt, ...)
-{ return 0; }
-
-static inline void __dev_printk(const char *level, const struct device *dev,
- struct va_format *vaf)
-{}
-static inline __printf(3, 4)
-void dev_printk(const char *level, const struct device *dev,
- const char *fmt, ...)
-{}
-
-static inline __printf(2, 3)
-void dev_emerg(const struct device *dev, const char *fmt, ...)
-{}
-static inline __printf(2, 3)
-void dev_crit(const struct device *dev, const char *fmt, ...)
-{}
-static inline __printf(2, 3)
-void dev_alert(const struct device *dev, const char *fmt, ...)
-{}
-static inline __printf(2, 3)
-void dev_err(const struct device *dev, const char *fmt, ...)
-{}
-static inline __printf(2, 3)
-void dev_warn(const struct device *dev, const char *fmt, ...)
-{}
-static inline __printf(2, 3)
-void dev_notice(const struct device *dev, const char *fmt, ...)
-{}
-static inline __printf(2, 3)
-void _dev_info(const struct device *dev, const char *fmt, ...)
-{}
-
-#endif
-
-/*
- * Stupid hackaround for existing uses of non-printk uses dev_info
- *
- * Note that the definition of dev_info below is actually _dev_info
- * and a macro is used to avoid redefining dev_info
- */
-
-#define dev_info(dev, fmt, arg...) _dev_info(dev, fmt, ##arg)
-
-#if defined(CONFIG_DYNAMIC_DEBUG)
-#define dev_dbg(dev, format, ...) \
-do { \
- dynamic_dev_dbg(dev, format, ##__VA_ARGS__); \
-} while (0)
-#elif defined(DEBUG)
-#define dev_dbg(dev, format, arg...) \
- dev_printk(KERN_DEBUG, dev, format, ##arg)
-#else
-#define dev_dbg(dev, format, arg...) \
-({ \
- if (0) \
- dev_printk(KERN_DEBUG, dev, format, ##arg); \
-})
-#endif
-
-#ifdef CONFIG_PRINTK
-#define dev_level_once(dev_level, dev, fmt, ...) \
-do { \
- static bool __print_once __read_mostly; \
- \
- if (!__print_once) { \
- __print_once = true; \
- dev_level(dev, fmt, ##__VA_ARGS__); \
- } \
-} while (0)
-#else
-#define dev_level_once(dev_level, dev, fmt, ...) \
-do { \
- if (0) \
- dev_level(dev, fmt, ##__VA_ARGS__); \
-} while (0)
-#endif
-
-#define dev_emerg_once(dev, fmt, ...) \
- dev_level_once(dev_emerg, dev, fmt, ##__VA_ARGS__)
-#define dev_alert_once(dev, fmt, ...) \
- dev_level_once(dev_alert, dev, fmt, ##__VA_ARGS__)
-#define dev_crit_once(dev, fmt, ...) \
- dev_level_once(dev_crit, dev, fmt, ##__VA_ARGS__)
-#define dev_err_once(dev, fmt, ...) \
- dev_level_once(dev_err, dev, fmt, ##__VA_ARGS__)
-#define dev_warn_once(dev, fmt, ...) \
- dev_level_once(dev_warn, dev, fmt, ##__VA_ARGS__)
-#define dev_notice_once(dev, fmt, ...) \
- dev_level_once(dev_notice, dev, fmt, ##__VA_ARGS__)
-#define dev_info_once(dev, fmt, ...) \
- dev_level_once(dev_info, dev, fmt, ##__VA_ARGS__)
-#define dev_dbg_once(dev, fmt, ...) \
- dev_level_once(dev_dbg, dev, fmt, ##__VA_ARGS__)
-
-#define dev_level_ratelimited(dev_level, dev, fmt, ...) \
-do { \
- static DEFINE_RATELIMIT_STATE(_rs, \
- DEFAULT_RATELIMIT_INTERVAL, \
- DEFAULT_RATELIMIT_BURST); \
- if (__ratelimit(&_rs)) \
- dev_level(dev, fmt, ##__VA_ARGS__); \
-} while (0)
-
-#define dev_emerg_ratelimited(dev, fmt, ...) \
- dev_level_ratelimited(dev_emerg, dev, fmt, ##__VA_ARGS__)
-#define dev_alert_ratelimited(dev, fmt, ...) \
- dev_level_ratelimited(dev_alert, dev, fmt, ##__VA_ARGS__)
-#define dev_crit_ratelimited(dev, fmt, ...) \
- dev_level_ratelimited(dev_crit, dev, fmt, ##__VA_ARGS__)
-#define dev_err_ratelimited(dev, fmt, ...) \
- dev_level_ratelimited(dev_err, dev, fmt, ##__VA_ARGS__)
-#define dev_warn_ratelimited(dev, fmt, ...) \
- dev_level_ratelimited(dev_warn, dev, fmt, ##__VA_ARGS__)
-#define dev_notice_ratelimited(dev, fmt, ...) \
- dev_level_ratelimited(dev_notice, dev, fmt, ##__VA_ARGS__)
-#define dev_info_ratelimited(dev, fmt, ...) \
- dev_level_ratelimited(dev_info, dev, fmt, ##__VA_ARGS__)
-#if defined(CONFIG_DYNAMIC_DEBUG)
-/* descriptor check is first to prevent flooding with "callbacks suppressed" */
-#define dev_dbg_ratelimited(dev, fmt, ...) \
-do { \
- static DEFINE_RATELIMIT_STATE(_rs, \
- DEFAULT_RATELIMIT_INTERVAL, \
- DEFAULT_RATELIMIT_BURST); \
- DEFINE_DYNAMIC_DEBUG_METADATA(descriptor, fmt); \
- if (unlikely(descriptor.flags & _DPRINTK_FLAGS_PRINT) && \
- __ratelimit(&_rs)) \
- __dynamic_dev_dbg(&descriptor, dev, fmt, \
- ##__VA_ARGS__); \
-} while (0)
-#elif defined(DEBUG)
-#define dev_dbg_ratelimited(dev, fmt, ...) \
-do { \
- static DEFINE_RATELIMIT_STATE(_rs, \
- DEFAULT_RATELIMIT_INTERVAL, \
- DEFAULT_RATELIMIT_BURST); \
- if (__ratelimit(&_rs)) \
- dev_printk(KERN_DEBUG, dev, fmt, ##__VA_ARGS__); \
-} while (0)
-#else
-#define dev_dbg_ratelimited(dev, fmt, ...) \
-do { \
- if (0) \
- dev_printk(KERN_DEBUG, dev, fmt, ##__VA_ARGS__); \
-} while (0)
-#endif
-
-#ifdef VERBOSE_DEBUG
-#define dev_vdbg dev_dbg
-#else
-#define dev_vdbg(dev, format, arg...) \
-({ \
- if (0) \
- dev_printk(KERN_DEBUG, dev, format, ##arg); \
-})
-#endif
-
-/*
- * dev_WARN*() acts like dev_printk(), but with the key difference of
- * using WARN/WARN_ONCE to include file/line information and a backtrace.
- */
-#define dev_WARN(dev, format, arg...) \
- WARN(1, "%s %s: " format, dev_driver_string(dev), dev_name(dev), ## arg);
-
-#define dev_WARN_ONCE(dev, condition, format, arg...) \
- WARN_ONCE(condition, "%s %s: " format, \
- dev_driver_string(dev), dev_name(dev), ## arg)
+static inline bool device_link_test(const struct device_link *link, u32 flags)
+{
+ return !!(link->flags & flags);
+}
/* Create alias, so I can be autoloaded. */
#define MODULE_ALIAS_CHARDEV(major,minor) \
@@ -1478,58 +1197,4 @@ do { \
#define MODULE_ALIAS_CHARDEV_MAJOR(major) \
MODULE_ALIAS("char-major-" __stringify(major) "-*")
-#ifdef CONFIG_SYSFS_DEPRECATED
-extern long sysfs_deprecated;
-#else
-#define sysfs_deprecated 0
-#endif
-
-/**
- * module_driver() - Helper macro for drivers that don't do anything
- * special in module init/exit. This eliminates a lot of boilerplate.
- * Each module may only use this macro once, and calling it replaces
- * module_init() and module_exit().
- *
- * @__driver: driver name
- * @__register: register function for this driver type
- * @__unregister: unregister function for this driver type
- * @...: Additional arguments to be passed to __register and __unregister.
- *
- * Use this macro to construct bus specific macros for registering
- * drivers, and do not use it on its own.
- */
-#define module_driver(__driver, __register, __unregister, ...) \
-static int __init __driver##_init(void) \
-{ \
- return __register(&(__driver) , ##__VA_ARGS__); \
-} \
-module_init(__driver##_init); \
-static void __exit __driver##_exit(void) \
-{ \
- __unregister(&(__driver) , ##__VA_ARGS__); \
-} \
-module_exit(__driver##_exit);
-
-/**
- * builtin_driver() - Helper macro for drivers that don't do anything
- * special in init and have no exit. This eliminates some boilerplate.
- * Each driver may only use this macro once, and calling it replaces
- * device_initcall (or in some cases, the legacy __initcall). This is
- * meant to be a direct parallel of module_driver() above but without
- * the __exit stuff that is not used for builtin cases.
- *
- * @__driver: driver name
- * @__register: register function for this driver type
- * @...: Additional arguments to be passed to __register
- *
- * Use this macro to construct bus specific macros for registering
- * drivers, and do not use it on its own.
- */
-#define builtin_driver(__driver, __register, ...) \
-static int __init __driver##_init(void) \
-{ \
- return __register(&(__driver) , ##__VA_ARGS__); \
-} \
-device_initcall(__driver##_init);
-
#endif /* _DEVICE_H_ */
diff --git a/include/linux/device/bus.h b/include/linux/device/bus.h
new file mode 100644
index 000000000000..99b1002b3e31
--- /dev/null
+++ b/include/linux/device/bus.h
@@ -0,0 +1,289 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * bus.h - the bus-specific portions of the driver model
+ *
+ * Copyright (c) 2001-2003 Patrick Mochel <mochel@osdl.org>
+ * Copyright (c) 2004-2009 Greg Kroah-Hartman <gregkh@suse.de>
+ * Copyright (c) 2008-2009 Novell Inc.
+ * Copyright (c) 2012-2019 Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+ * Copyright (c) 2012-2019 Linux Foundation
+ *
+ * See Documentation/driver-api/driver-model/ for more information.
+ */
+
+#ifndef _DEVICE_BUS_H_
+#define _DEVICE_BUS_H_
+
+#include <linux/kobject.h>
+#include <linux/klist.h>
+#include <linux/pm.h>
+
+struct device_driver;
+struct fwnode_handle;
+
+/**
+ * struct bus_type - The bus type of the device
+ *
+ * @name: The name of the bus.
+ * @dev_name: Used for subsystems to enumerate devices like ("foo%u", dev->id).
+ * @bus_groups: Default attributes of the bus.
+ * @dev_groups: Default attributes of the devices on the bus.
+ * @drv_groups: Default attributes of the device drivers on the bus.
+ * @match: Called, perhaps multiple times, whenever a new device or driver
+ * is added for this bus. It should return a positive value if the
+ * given device can be handled by the given driver and zero
+ * otherwise. It may also return error code if determining that
+ * the driver supports the device is not possible. In case of
+ * -EPROBE_DEFER it will queue the device for deferred probing.
+ * @uevent: Called when a device is added, removed, or a few other things
+ * that generate uevents to add the environment variables.
+ * @probe: Called when a new device or driver add to this bus, and callback
+ * the specific driver's probe to initial the matched device.
+ * @sync_state: Called to sync device state to software state after all the
+ * state tracking consumers linked to this device (present at
+ * the time of late_initcall) have successfully bound to a
+ * driver. If the device has no consumers, this function will
+ * be called at late_initcall_sync level. If the device has
+ * consumers that are never bound to a driver, this function
+ * will never get called until they do.
+ * @remove: Called when a device removed from this bus.
+ * @shutdown: Called at shut-down time to quiesce the device.
+ * @irq_get_affinity: Get IRQ affinity mask for the device on this bus.
+ *
+ * @online: Called to put the device back online (after offlining it).
+ * @offline: Called to put the device offline for hot-removal. May fail.
+ *
+ * @suspend: Called when a device on this bus wants to go to sleep mode.
+ * @resume: Called to bring a device on this bus out of sleep mode.
+ * @num_vf: Called to find out how many virtual functions a device on this
+ * bus supports.
+ * @dma_configure: Called to setup DMA configuration on a device on
+ * this bus.
+ * @dma_cleanup: Called to cleanup DMA configuration on a device on
+ * this bus.
+ * @pm: Power management operations of this bus, callback the specific
+ * device driver's pm-ops.
+ * @need_parent_lock: When probing or removing a device on this bus, the
+ * device core should lock the device's parent.
+ *
+ * A bus is a channel between the processor and one or more devices. For the
+ * purposes of the device model, all devices are connected via a bus, even if
+ * it is an internal, virtual, "platform" bus. Buses can plug into each other.
+ * A USB controller is usually a PCI device, for example. The device model
+ * represents the actual connections between buses and the devices they control.
+ * A bus is represented by the bus_type structure. It contains the name, the
+ * default attributes, the bus' methods, PM operations, and the driver core's
+ * private data.
+ */
+struct bus_type {
+ const char *name;
+ const char *dev_name;
+ const struct attribute_group **bus_groups;
+ const struct attribute_group **dev_groups;
+ const struct attribute_group **drv_groups;
+
+ int (*match)(struct device *dev, const struct device_driver *drv);
+ int (*uevent)(const struct device *dev, struct kobj_uevent_env *env);
+ int (*probe)(struct device *dev);
+ void (*sync_state)(struct device *dev);
+ void (*remove)(struct device *dev);
+ void (*shutdown)(struct device *dev);
+ const struct cpumask *(*irq_get_affinity)(struct device *dev,
+ unsigned int irq_vec);
+
+ int (*online)(struct device *dev);
+ int (*offline)(struct device *dev);
+
+ int (*suspend)(struct device *dev, pm_message_t state);
+ int (*resume)(struct device *dev);
+
+ int (*num_vf)(struct device *dev);
+
+ int (*dma_configure)(struct device *dev);
+ void (*dma_cleanup)(struct device *dev);
+
+ const struct dev_pm_ops *pm;
+
+ bool need_parent_lock;
+};
+
+int __must_check bus_register(const struct bus_type *bus);
+
+void bus_unregister(const struct bus_type *bus);
+
+int __must_check bus_rescan_devices(const struct bus_type *bus);
+
+struct bus_attribute {
+ struct attribute attr;
+ ssize_t (*show)(const struct bus_type *bus, char *buf);
+ ssize_t (*store)(const struct bus_type *bus, const char *buf, size_t count);
+};
+
+#define BUS_ATTR_RW(_name) \
+ struct bus_attribute bus_attr_##_name = __ATTR_RW(_name)
+#define BUS_ATTR_RO(_name) \
+ struct bus_attribute bus_attr_##_name = __ATTR_RO(_name)
+#define BUS_ATTR_WO(_name) \
+ struct bus_attribute bus_attr_##_name = __ATTR_WO(_name)
+
+int __must_check bus_create_file(const struct bus_type *bus, struct bus_attribute *attr);
+void bus_remove_file(const struct bus_type *bus, struct bus_attribute *attr);
+
+/* Matching function type for drivers/base APIs to find a specific device */
+typedef int (*device_match_t)(struct device *dev, const void *data);
+
+/* Generic device matching functions that all busses can use to match with */
+int device_match_name(struct device *dev, const void *name);
+int device_match_type(struct device *dev, const void *type);
+int device_match_of_node(struct device *dev, const void *np);
+int device_match_fwnode(struct device *dev, const void *fwnode);
+int device_match_devt(struct device *dev, const void *pdevt);
+int device_match_acpi_dev(struct device *dev, const void *adev);
+int device_match_acpi_handle(struct device *dev, const void *handle);
+int device_match_any(struct device *dev, const void *unused);
+
+/* Device iterating function type for various driver core for_each APIs */
+typedef int (*device_iter_t)(struct device *dev, void *data);
+
+/* iterator helpers for buses */
+int bus_for_each_dev(const struct bus_type *bus, struct device *start,
+ void *data, device_iter_t fn);
+struct device *bus_find_device(const struct bus_type *bus, struct device *start,
+ const void *data, device_match_t match);
+struct device *bus_find_device_reverse(const struct bus_type *bus,
+ struct device *start, const void *data,
+ device_match_t match);
+/**
+ * bus_find_device_by_name - device iterator for locating a particular device
+ * of a specific name.
+ * @bus: bus type
+ * @start: Device to begin with
+ * @name: name of the device to match
+ */
+static inline struct device *bus_find_device_by_name(const struct bus_type *bus,
+ struct device *start,
+ const char *name)
+{
+ return bus_find_device(bus, start, name, device_match_name);
+}
+
+/**
+ * bus_find_device_by_of_node : device iterator for locating a particular device
+ * matching the of_node.
+ * @bus: bus type
+ * @np: of_node of the device to match.
+ */
+static inline struct device *
+bus_find_device_by_of_node(const struct bus_type *bus, const struct device_node *np)
+{
+ return bus_find_device(bus, NULL, np, device_match_of_node);
+}
+
+/**
+ * bus_find_device_by_fwnode : device iterator for locating a particular device
+ * matching the fwnode.
+ * @bus: bus type
+ * @fwnode: fwnode of the device to match.
+ */
+static inline struct device *
+bus_find_device_by_fwnode(const struct bus_type *bus, const struct fwnode_handle *fwnode)
+{
+ return bus_find_device(bus, NULL, fwnode, device_match_fwnode);
+}
+
+/**
+ * bus_find_device_by_devt : device iterator for locating a particular device
+ * matching the device type.
+ * @bus: bus type
+ * @devt: device type of the device to match.
+ */
+static inline struct device *bus_find_device_by_devt(const struct bus_type *bus,
+ dev_t devt)
+{
+ return bus_find_device(bus, NULL, &devt, device_match_devt);
+}
+
+/**
+ * bus_find_next_device - Find the next device after a given device in a
+ * given bus.
+ * @bus: bus type
+ * @cur: device to begin the search with.
+ */
+static inline struct device *
+bus_find_next_device(const struct bus_type *bus,struct device *cur)
+{
+ return bus_find_device(bus, cur, NULL, device_match_any);
+}
+
+#ifdef CONFIG_ACPI
+struct acpi_device;
+
+/**
+ * bus_find_device_by_acpi_dev : device iterator for locating a particular device
+ * matching the ACPI COMPANION device.
+ * @bus: bus type
+ * @adev: ACPI COMPANION device to match.
+ */
+static inline struct device *
+bus_find_device_by_acpi_dev(const struct bus_type *bus, const struct acpi_device *adev)
+{
+ return bus_find_device(bus, NULL, adev, device_match_acpi_dev);
+}
+#else
+static inline struct device *
+bus_find_device_by_acpi_dev(const struct bus_type *bus, const void *adev)
+{
+ return NULL;
+}
+#endif
+
+int bus_for_each_drv(const struct bus_type *bus, struct device_driver *start,
+ void *data, int (*fn)(struct device_driver *, void *));
+void bus_sort_breadthfirst(const struct bus_type *bus,
+ int (*compare)(const struct device *a,
+ const struct device *b));
+/*
+ * Bus notifiers: Get notified of addition/removal of devices
+ * and binding/unbinding of drivers to devices.
+ * In the long run, it should be a replacement for the platform
+ * notify hooks.
+ */
+struct notifier_block;
+
+int bus_register_notifier(const struct bus_type *bus, struct notifier_block *nb);
+int bus_unregister_notifier(const struct bus_type *bus, struct notifier_block *nb);
+
+/**
+ * enum bus_notifier_event - Bus Notifier events that have happened
+ * @BUS_NOTIFY_ADD_DEVICE: device is added to this bus
+ * @BUS_NOTIFY_DEL_DEVICE: device is about to be removed from this bus
+ * @BUS_NOTIFY_REMOVED_DEVICE: device is successfully removed from this bus
+ * @BUS_NOTIFY_BIND_DRIVER: a driver is about to be bound to this device on this bus
+ * @BUS_NOTIFY_BOUND_DRIVER: a driver is successfully bound to this device on this bus
+ * @BUS_NOTIFY_UNBIND_DRIVER: a driver is about to be unbound from this device on this bus
+ * @BUS_NOTIFY_UNBOUND_DRIVER: a driver is successfully unbound from this device on this bus
+ * @BUS_NOTIFY_DRIVER_NOT_BOUND: a driver failed to be bound to this device on this bus
+ *
+ * These are the value passed to a bus notifier when a specific event happens.
+ *
+ * Note that bus notifiers are likely to be called with the device lock already
+ * held by the driver core, so be careful in any notifier callback as to what
+ * you do with the device structure.
+ *
+ * All bus notifiers are called with the target struct device * as an argument.
+ */
+enum bus_notifier_event {
+ BUS_NOTIFY_ADD_DEVICE,
+ BUS_NOTIFY_DEL_DEVICE,
+ BUS_NOTIFY_REMOVED_DEVICE,
+ BUS_NOTIFY_BIND_DRIVER,
+ BUS_NOTIFY_BOUND_DRIVER,
+ BUS_NOTIFY_UNBIND_DRIVER,
+ BUS_NOTIFY_UNBOUND_DRIVER,
+ BUS_NOTIFY_DRIVER_NOT_BOUND,
+};
+
+struct kset *bus_get_kset(const struct bus_type *bus);
+struct device *bus_get_dev_root(const struct bus_type *bus);
+
+#endif
diff --git a/include/linux/device/class.h b/include/linux/device/class.h
new file mode 100644
index 000000000000..65880e60c720
--- /dev/null
+++ b/include/linux/device/class.h
@@ -0,0 +1,229 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * The class-specific portions of the driver model
+ *
+ * Copyright (c) 2001-2003 Patrick Mochel <mochel@osdl.org>
+ * Copyright (c) 2004-2009 Greg Kroah-Hartman <gregkh@suse.de>
+ * Copyright (c) 2008-2009 Novell Inc.
+ * Copyright (c) 2012-2019 Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+ * Copyright (c) 2012-2019 Linux Foundation
+ *
+ * See Documentation/driver-api/driver-model/ for more information.
+ */
+
+#ifndef _DEVICE_CLASS_H_
+#define _DEVICE_CLASS_H_
+
+#include <linux/kobject.h>
+#include <linux/klist.h>
+#include <linux/pm.h>
+#include <linux/device/bus.h>
+
+struct device;
+struct fwnode_handle;
+
+/**
+ * struct class - device classes
+ * @name: Name of the class.
+ * @class_groups: Default attributes of this class.
+ * @dev_groups: Default attributes of the devices that belong to the class.
+ * @dev_uevent: Called when a device is added, removed from this class, or a
+ * few other things that generate uevents to add the environment
+ * variables.
+ * @devnode: Callback to provide the devtmpfs.
+ * @class_release: Called to release this class.
+ * @dev_release: Called to release the device.
+ * @shutdown_pre: Called at shut-down time before driver shutdown.
+ * @ns_type: Callbacks so sysfs can detemine namespaces.
+ * @namespace: Namespace of the device belongs to this class.
+ * @get_ownership: Allows class to specify uid/gid of the sysfs directories
+ * for the devices belonging to the class. Usually tied to
+ * device's namespace.
+ * @pm: The default device power management operations of this class.
+ *
+ * A class is a higher-level view of a device that abstracts out low-level
+ * implementation details. Drivers may see a SCSI disk or an ATA disk, but,
+ * at the class level, they are all simply disks. Classes allow user space
+ * to work with devices based on what they do, rather than how they are
+ * connected or how they work.
+ */
+struct class {
+ const char *name;
+
+ const struct attribute_group **class_groups;
+ const struct attribute_group **dev_groups;
+
+ int (*dev_uevent)(const struct device *dev, struct kobj_uevent_env *env);
+ char *(*devnode)(const struct device *dev, umode_t *mode);
+
+ void (*class_release)(const struct class *class);
+ void (*dev_release)(struct device *dev);
+
+ int (*shutdown_pre)(struct device *dev);
+
+ const struct kobj_ns_type_operations *ns_type;
+ const void *(*namespace)(const struct device *dev);
+
+ void (*get_ownership)(const struct device *dev, kuid_t *uid, kgid_t *gid);
+
+ const struct dev_pm_ops *pm;
+};
+
+struct class_dev_iter {
+ struct klist_iter ki;
+ const struct device_type *type;
+ struct subsys_private *sp;
+};
+
+int __must_check class_register(const struct class *class);
+void class_unregister(const struct class *class);
+bool class_is_registered(const struct class *class);
+
+struct class_compat;
+struct class_compat *class_compat_register(const char *name);
+void class_compat_unregister(struct class_compat *cls);
+int class_compat_create_link(struct class_compat *cls, struct device *dev);
+void class_compat_remove_link(struct class_compat *cls, struct device *dev);
+
+void class_dev_iter_init(struct class_dev_iter *iter, const struct class *class,
+ const struct device *start, const struct device_type *type);
+struct device *class_dev_iter_next(struct class_dev_iter *iter);
+void class_dev_iter_exit(struct class_dev_iter *iter);
+
+int class_for_each_device(const struct class *class, const struct device *start,
+ void *data, device_iter_t fn);
+struct device *class_find_device(const struct class *class, const struct device *start,
+ const void *data, device_match_t match);
+
+/**
+ * class_find_device_by_name - device iterator for locating a particular device
+ * of a specific name.
+ * @class: class type
+ * @name: name of the device to match
+ */
+static inline struct device *class_find_device_by_name(const struct class *class,
+ const char *name)
+{
+ return class_find_device(class, NULL, name, device_match_name);
+}
+
+/**
+ * class_find_device_by_of_node : device iterator for locating a particular device
+ * matching the of_node.
+ * @class: class type
+ * @np: of_node of the device to match.
+ */
+static inline struct device *class_find_device_by_of_node(const struct class *class,
+ const struct device_node *np)
+{
+ return class_find_device(class, NULL, np, device_match_of_node);
+}
+
+/**
+ * class_find_device_by_fwnode : device iterator for locating a particular device
+ * matching the fwnode.
+ * @class: class type
+ * @fwnode: fwnode of the device to match.
+ */
+static inline struct device *class_find_device_by_fwnode(const struct class *class,
+ const struct fwnode_handle *fwnode)
+{
+ return class_find_device(class, NULL, fwnode, device_match_fwnode);
+}
+
+/**
+ * class_find_device_by_devt : device iterator for locating a particular device
+ * matching the device type.
+ * @class: class type
+ * @devt: device type of the device to match.
+ */
+static inline struct device *class_find_device_by_devt(const struct class *class,
+ dev_t devt)
+{
+ return class_find_device(class, NULL, &devt, device_match_devt);
+}
+
+#ifdef CONFIG_ACPI
+struct acpi_device;
+/**
+ * class_find_device_by_acpi_dev : device iterator for locating a particular
+ * device matching the ACPI_COMPANION device.
+ * @class: class type
+ * @adev: ACPI_COMPANION device to match.
+ */
+static inline struct device *class_find_device_by_acpi_dev(const struct class *class,
+ const struct acpi_device *adev)
+{
+ return class_find_device(class, NULL, adev, device_match_acpi_dev);
+}
+#else
+static inline struct device *class_find_device_by_acpi_dev(const struct class *class,
+ const void *adev)
+{
+ return NULL;
+}
+#endif
+
+struct class_attribute {
+ struct attribute attr;
+ ssize_t (*show)(const struct class *class, const struct class_attribute *attr,
+ char *buf);
+ ssize_t (*store)(const struct class *class, const struct class_attribute *attr,
+ const char *buf, size_t count);
+};
+
+#define CLASS_ATTR_RW(_name) \
+ struct class_attribute class_attr_##_name = __ATTR_RW(_name)
+#define CLASS_ATTR_RO(_name) \
+ struct class_attribute class_attr_##_name = __ATTR_RO(_name)
+#define CLASS_ATTR_WO(_name) \
+ struct class_attribute class_attr_##_name = __ATTR_WO(_name)
+
+int __must_check class_create_file_ns(const struct class *class, const struct class_attribute *attr,
+ const void *ns);
+void class_remove_file_ns(const struct class *class, const struct class_attribute *attr,
+ const void *ns);
+
+static inline int __must_check class_create_file(const struct class *class,
+ const struct class_attribute *attr)
+{
+ return class_create_file_ns(class, attr, NULL);
+}
+
+static inline void class_remove_file(const struct class *class,
+ const struct class_attribute *attr)
+{
+ class_remove_file_ns(class, attr, NULL);
+}
+
+/* Simple class attribute that is just a static string */
+struct class_attribute_string {
+ struct class_attribute attr;
+ char *str;
+};
+
+/* Currently read-only only */
+#define _CLASS_ATTR_STRING(_name, _mode, _str) \
+ { __ATTR(_name, _mode, show_class_attr_string, NULL), _str }
+#define CLASS_ATTR_STRING(_name, _mode, _str) \
+ struct class_attribute_string class_attr_##_name = \
+ _CLASS_ATTR_STRING(_name, _mode, _str)
+
+ssize_t show_class_attr_string(const struct class *class, const struct class_attribute *attr,
+ char *buf);
+
+struct class_interface {
+ struct list_head node;
+ const struct class *class;
+
+ int (*add_dev) (struct device *dev);
+ void (*remove_dev) (struct device *dev);
+};
+
+int __must_check class_interface_register(struct class_interface *);
+void class_interface_unregister(struct class_interface *);
+
+struct class * __must_check class_create(const char *name);
+void class_destroy(const struct class *cls);
+
+#endif /* _DEVICE_CLASS_H_ */
diff --git a/include/linux/device/devres.h b/include/linux/device/devres.h
new file mode 100644
index 000000000000..9c1e3d643d69
--- /dev/null
+++ b/include/linux/device/devres.h
@@ -0,0 +1,189 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _DEVICE_DEVRES_H_
+#define _DEVICE_DEVRES_H_
+
+#include <linux/err.h>
+#include <linux/gfp_types.h>
+#include <linux/numa.h>
+#include <linux/overflow.h>
+#include <linux/stdarg.h>
+#include <linux/types.h>
+#include <asm/bug.h>
+#include <asm/percpu.h>
+
+struct device;
+struct device_node;
+struct resource;
+
+/* device resource management */
+typedef void (*dr_release_t)(struct device *dev, void *res);
+typedef int (*dr_match_t)(struct device *dev, void *res, void *match_data);
+
+void * __malloc
+__devres_alloc_node(dr_release_t release, size_t size, gfp_t gfp, int nid, const char *name);
+#define devres_alloc(release, size, gfp) \
+ __devres_alloc_node(release, size, gfp, NUMA_NO_NODE, #release)
+#define devres_alloc_node(release, size, gfp, nid) \
+ __devres_alloc_node(release, size, gfp, nid, #release)
+
+void devres_for_each_res(struct device *dev, dr_release_t release,
+ dr_match_t match, void *match_data,
+ void (*fn)(struct device *, void *, void *),
+ void *data);
+void devres_free(void *res);
+void devres_add(struct device *dev, void *res);
+void *devres_find(struct device *dev, dr_release_t release, dr_match_t match, void *match_data);
+void *devres_get(struct device *dev, void *new_res, dr_match_t match, void *match_data);
+void *devres_remove(struct device *dev, dr_release_t release, dr_match_t match, void *match_data);
+int devres_destroy(struct device *dev, dr_release_t release, dr_match_t match, void *match_data);
+int devres_release(struct device *dev, dr_release_t release, dr_match_t match, void *match_data);
+
+/* devres group */
+void * __must_check devres_open_group(struct device *dev, void *id, gfp_t gfp);
+void devres_close_group(struct device *dev, void *id);
+void devres_remove_group(struct device *dev, void *id);
+int devres_release_group(struct device *dev, void *id);
+
+/* managed devm_k.alloc/kfree for device drivers */
+void * __alloc_size(2)
+devm_kmalloc(struct device *dev, size_t size, gfp_t gfp);
+void * __must_check __realloc_size(3)
+devm_krealloc(struct device *dev, void *ptr, size_t size, gfp_t gfp);
+static inline void *devm_kzalloc(struct device *dev, size_t size, gfp_t gfp)
+{
+ return devm_kmalloc(dev, size, gfp | __GFP_ZERO);
+}
+static inline void *devm_kmalloc_array(struct device *dev, size_t n, size_t size, gfp_t flags)
+{
+ size_t bytes;
+
+ if (unlikely(check_mul_overflow(n, size, &bytes)))
+ return NULL;
+
+ return devm_kmalloc(dev, bytes, flags);
+}
+static inline void *devm_kcalloc(struct device *dev, size_t n, size_t size, gfp_t flags)
+{
+ return devm_kmalloc_array(dev, n, size, flags | __GFP_ZERO);
+}
+static inline __realloc_size(3, 4) void * __must_check
+devm_krealloc_array(struct device *dev, void *p, size_t new_n, size_t new_size, gfp_t flags)
+{
+ size_t bytes;
+
+ if (unlikely(check_mul_overflow(new_n, new_size, &bytes)))
+ return NULL;
+
+ return devm_krealloc(dev, p, bytes, flags);
+}
+
+void devm_kfree(struct device *dev, const void *p);
+
+void * __realloc_size(3)
+devm_kmemdup(struct device *dev, const void *src, size_t len, gfp_t gfp);
+const void *
+devm_kmemdup_const(struct device *dev, const void *src, size_t len, gfp_t gfp);
+static inline void *devm_kmemdup_array(struct device *dev, const void *src,
+ size_t n, size_t size, gfp_t flags)
+{
+ return devm_kmemdup(dev, src, size_mul(size, n), flags);
+}
+
+char * __malloc
+devm_kstrdup(struct device *dev, const char *s, gfp_t gfp);
+const char *devm_kstrdup_const(struct device *dev, const char *s, gfp_t gfp);
+char * __printf(3, 0) __malloc
+devm_kvasprintf(struct device *dev, gfp_t gfp, const char *fmt, va_list ap);
+char * __printf(3, 4) __malloc
+devm_kasprintf(struct device *dev, gfp_t gfp, const char *fmt, ...);
+
+/**
+ * devm_alloc_percpu - Resource-managed alloc_percpu
+ * @dev: Device to allocate per-cpu memory for
+ * @type: Type to allocate per-cpu memory for
+ *
+ * Managed alloc_percpu. Per-cpu memory allocated with this function is
+ * automatically freed on driver detach.
+ *
+ * RETURNS:
+ * Pointer to allocated memory on success, NULL on failure.
+ */
+#define devm_alloc_percpu(dev, type) \
+ ((typeof(type) __percpu *)__devm_alloc_percpu((dev), sizeof(type), __alignof__(type)))
+
+void __percpu *__devm_alloc_percpu(struct device *dev, size_t size, size_t align);
+
+unsigned long devm_get_free_pages(struct device *dev, gfp_t gfp_mask, unsigned int order);
+void devm_free_pages(struct device *dev, unsigned long addr);
+
+#ifdef CONFIG_HAS_IOMEM
+
+void __iomem *devm_ioremap_resource(struct device *dev, const struct resource *res);
+void __iomem *devm_ioremap_resource_wc(struct device *dev, const struct resource *res);
+
+void __iomem *devm_of_iomap(struct device *dev, struct device_node *node, int index,
+ resource_size_t *size);
+#else
+
+static inline
+void __iomem *devm_ioremap_resource(struct device *dev, const struct resource *res)
+{
+ return IOMEM_ERR_PTR(-EINVAL);
+}
+
+static inline
+void __iomem *devm_ioremap_resource_wc(struct device *dev, const struct resource *res)
+{
+ return IOMEM_ERR_PTR(-EINVAL);
+}
+
+static inline
+void __iomem *devm_of_iomap(struct device *dev, struct device_node *node, int index,
+ resource_size_t *size)
+{
+ return IOMEM_ERR_PTR(-EINVAL);
+}
+
+#endif
+
+/* allows to add/remove a custom action to devres stack */
+int devm_remove_action_nowarn(struct device *dev, void (*action)(void *), void *data);
+
+/**
+ * devm_remove_action() - removes previously added custom action
+ * @dev: Device that owns the action
+ * @action: Function implementing the action
+ * @data: Pointer to data passed to @action implementation
+ *
+ * Removes instance of @action previously added by devm_add_action().
+ * Both action and data should match one of the existing entries.
+ */
+static inline
+void devm_remove_action(struct device *dev, void (*action)(void *), void *data)
+{
+ WARN_ON(devm_remove_action_nowarn(dev, action, data));
+}
+
+void devm_release_action(struct device *dev, void (*action)(void *), void *data);
+
+int __devm_add_action(struct device *dev, void (*action)(void *), void *data, const char *name);
+#define devm_add_action(dev, action, data) \
+ __devm_add_action(dev, action, data, #action)
+
+static inline int __devm_add_action_or_reset(struct device *dev, void (*action)(void *),
+ void *data, const char *name)
+{
+ int ret;
+
+ ret = __devm_add_action(dev, action, data, name);
+ if (ret)
+ action(data);
+
+ return ret;
+}
+#define devm_add_action_or_reset(dev, action, data) \
+ __devm_add_action_or_reset(dev, action, data, #action)
+
+bool devm_is_action_added(struct device *dev, void (*action)(void *), void *data);
+
+#endif /* _DEVICE_DEVRES_H_ */
diff --git a/include/linux/device/driver.h b/include/linux/device/driver.h
new file mode 100644
index 000000000000..cd8e0f0a634b
--- /dev/null
+++ b/include/linux/device/driver.h
@@ -0,0 +1,291 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * The driver-specific portions of the driver model
+ *
+ * Copyright (c) 2001-2003 Patrick Mochel <mochel@osdl.org>
+ * Copyright (c) 2004-2009 Greg Kroah-Hartman <gregkh@suse.de>
+ * Copyright (c) 2008-2009 Novell Inc.
+ * Copyright (c) 2012-2019 Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+ * Copyright (c) 2012-2019 Linux Foundation
+ *
+ * See Documentation/driver-api/driver-model/ for more information.
+ */
+
+#ifndef _DEVICE_DRIVER_H_
+#define _DEVICE_DRIVER_H_
+
+#include <linux/kobject.h>
+#include <linux/klist.h>
+#include <linux/pm.h>
+#include <linux/device/bus.h>
+#include <linux/module.h>
+
+/**
+ * enum probe_type - device driver probe type to try
+ * Device drivers may opt in for special handling of their
+ * respective probe routines. This tells the core what to
+ * expect and prefer.
+ *
+ * @PROBE_DEFAULT_STRATEGY: Used by drivers that work equally well
+ * whether probed synchronously or asynchronously.
+ * @PROBE_PREFER_ASYNCHRONOUS: Drivers for "slow" devices which
+ * probing order is not essential for booting the system may
+ * opt into executing their probes asynchronously.
+ * @PROBE_FORCE_SYNCHRONOUS: Use this to annotate drivers that need
+ * their probe routines to run synchronously with driver and
+ * device registration (with the exception of -EPROBE_DEFER
+ * handling - re-probing always ends up being done asynchronously).
+ *
+ * Note that the end goal is to switch the kernel to use asynchronous
+ * probing by default, so annotating drivers with
+ * %PROBE_PREFER_ASYNCHRONOUS is a temporary measure that allows us
+ * to speed up boot process while we are validating the rest of the
+ * drivers.
+ */
+enum probe_type {
+ PROBE_DEFAULT_STRATEGY,
+ PROBE_PREFER_ASYNCHRONOUS,
+ PROBE_FORCE_SYNCHRONOUS,
+};
+
+/**
+ * struct device_driver - The basic device driver structure
+ * @name: Name of the device driver.
+ * @bus: The bus which the device of this driver belongs to.
+ * @owner: The module owner.
+ * @mod_name: Used for built-in modules.
+ * @suppress_bind_attrs: Disables bind/unbind via sysfs.
+ * @probe_type: Type of the probe (synchronous or asynchronous) to use.
+ * @of_match_table: The open firmware table.
+ * @acpi_match_table: The ACPI match table.
+ * @probe: Called to query the existence of a specific device,
+ * whether this driver can work with it, and bind the driver
+ * to a specific device.
+ * @sync_state: Called to sync device state to software state after all the
+ * state tracking consumers linked to this device (present at
+ * the time of late_initcall) have successfully bound to a
+ * driver. If the device has no consumers, this function will
+ * be called at late_initcall_sync level. If the device has
+ * consumers that are never bound to a driver, this function
+ * will never get called until they do.
+ * @remove: Called when the device is removed from the system to
+ * unbind a device from this driver.
+ * @shutdown: Called at shut-down time to quiesce the device.
+ * @suspend: Called to put the device to sleep mode. Usually to a
+ * low power state.
+ * @resume: Called to bring a device from sleep mode.
+ * @groups: Default attributes that get created by the driver core
+ * automatically.
+ * @dev_groups: Additional attributes attached to device instance once
+ * it is bound to the driver.
+ * @pm: Power management operations of the device which matched
+ * this driver.
+ * @coredump: Called when sysfs entry is written to. The device driver
+ * is expected to call the dev_coredump API resulting in a
+ * uevent.
+ * @p: Driver core's private data, no one other than the driver
+ * core can touch this.
+ *
+ * The device driver-model tracks all of the drivers known to the system.
+ * The main reason for this tracking is to enable the driver core to match
+ * up drivers with new devices. Once drivers are known objects within the
+ * system, however, a number of other things become possible. Device drivers
+ * can export information and configuration variables that are independent
+ * of any specific device.
+ */
+struct device_driver {
+ const char *name;
+ const struct bus_type *bus;
+
+ struct module *owner;
+ const char *mod_name; /* used for built-in modules */
+
+ bool suppress_bind_attrs; /* disables bind/unbind via sysfs */
+ enum probe_type probe_type;
+
+ const struct of_device_id *of_match_table;
+ const struct acpi_device_id *acpi_match_table;
+
+ int (*probe) (struct device *dev);
+ void (*sync_state)(struct device *dev);
+ int (*remove) (struct device *dev);
+ void (*shutdown) (struct device *dev);
+ int (*suspend) (struct device *dev, pm_message_t state);
+ int (*resume) (struct device *dev);
+ const struct attribute_group **groups;
+ const struct attribute_group **dev_groups;
+
+ const struct dev_pm_ops *pm;
+ void (*coredump) (struct device *dev);
+
+ struct driver_private *p;
+};
+
+
+int __must_check driver_register(struct device_driver *drv);
+void driver_unregister(struct device_driver *drv);
+
+struct device_driver *driver_find(const char *name, const struct bus_type *bus);
+bool __init driver_probe_done(void);
+void wait_for_device_probe(void);
+void __init wait_for_init_devices_probe(void);
+
+/* sysfs interface for exporting driver attributes */
+
+struct driver_attribute {
+ struct attribute attr;
+ ssize_t (*show)(struct device_driver *driver, char *buf);
+ ssize_t (*store)(struct device_driver *driver, const char *buf,
+ size_t count);
+};
+
+#define DRIVER_ATTR_RW(_name) \
+ struct driver_attribute driver_attr_##_name = __ATTR_RW(_name)
+#define DRIVER_ATTR_RO(_name) \
+ struct driver_attribute driver_attr_##_name = __ATTR_RO(_name)
+#define DRIVER_ATTR_WO(_name) \
+ struct driver_attribute driver_attr_##_name = __ATTR_WO(_name)
+
+int __must_check driver_create_file(const struct device_driver *driver,
+ const struct driver_attribute *attr);
+void driver_remove_file(const struct device_driver *driver,
+ const struct driver_attribute *attr);
+
+int driver_set_override(struct device *dev, const char **override,
+ const char *s, size_t len);
+int __must_check driver_for_each_device(struct device_driver *drv, struct device *start,
+ void *data, device_iter_t fn);
+struct device *driver_find_device(const struct device_driver *drv,
+ struct device *start, const void *data,
+ device_match_t match);
+
+/**
+ * driver_find_device_by_name - device iterator for locating a particular device
+ * of a specific name.
+ * @drv: the driver we're iterating
+ * @name: name of the device to match
+ */
+static inline struct device *driver_find_device_by_name(const struct device_driver *drv,
+ const char *name)
+{
+ return driver_find_device(drv, NULL, name, device_match_name);
+}
+
+/**
+ * driver_find_device_by_of_node- device iterator for locating a particular device
+ * by of_node pointer.
+ * @drv: the driver we're iterating
+ * @np: of_node pointer to match.
+ */
+static inline struct device *
+driver_find_device_by_of_node(const struct device_driver *drv,
+ const struct device_node *np)
+{
+ return driver_find_device(drv, NULL, np, device_match_of_node);
+}
+
+/**
+ * driver_find_device_by_fwnode- device iterator for locating a particular device
+ * by fwnode pointer.
+ * @drv: the driver we're iterating
+ * @fwnode: fwnode pointer to match.
+ */
+static inline struct device *
+driver_find_device_by_fwnode(struct device_driver *drv,
+ const struct fwnode_handle *fwnode)
+{
+ return driver_find_device(drv, NULL, fwnode, device_match_fwnode);
+}
+
+/**
+ * driver_find_device_by_devt- device iterator for locating a particular device
+ * by devt.
+ * @drv: the driver we're iterating
+ * @devt: devt pointer to match.
+ */
+static inline struct device *driver_find_device_by_devt(const struct device_driver *drv,
+ dev_t devt)
+{
+ return driver_find_device(drv, NULL, &devt, device_match_devt);
+}
+
+static inline struct device *driver_find_next_device(const struct device_driver *drv,
+ struct device *start)
+{
+ return driver_find_device(drv, start, NULL, device_match_any);
+}
+
+#ifdef CONFIG_ACPI
+/**
+ * driver_find_device_by_acpi_dev : device iterator for locating a particular
+ * device matching the ACPI_COMPANION device.
+ * @drv: the driver we're iterating
+ * @adev: ACPI_COMPANION device to match.
+ */
+static inline struct device *
+driver_find_device_by_acpi_dev(const struct device_driver *drv,
+ const struct acpi_device *adev)
+{
+ return driver_find_device(drv, NULL, adev, device_match_acpi_dev);
+}
+#else
+static inline struct device *
+driver_find_device_by_acpi_dev(const struct device_driver *drv, const void *adev)
+{
+ return NULL;
+}
+#endif
+
+void driver_deferred_probe_add(struct device *dev);
+int driver_deferred_probe_check_state(struct device *dev);
+void driver_init(void);
+
+/**
+ * module_driver() - Helper macro for drivers that don't do anything
+ * special in module init/exit. This eliminates a lot of boilerplate.
+ * Each module may only use this macro once, and calling it replaces
+ * module_init() and module_exit().
+ *
+ * @__driver: driver name
+ * @__register: register function for this driver type
+ * @__unregister: unregister function for this driver type
+ * @...: Additional arguments to be passed to __register and __unregister.
+ *
+ * Use this macro to construct bus specific macros for registering
+ * drivers, and do not use it on its own.
+ */
+#define module_driver(__driver, __register, __unregister, ...) \
+static int __init __driver##_init(void) \
+{ \
+ return __register(&(__driver) , ##__VA_ARGS__); \
+} \
+module_init(__driver##_init); \
+static void __exit __driver##_exit(void) \
+{ \
+ __unregister(&(__driver) , ##__VA_ARGS__); \
+} \
+module_exit(__driver##_exit);
+
+/**
+ * builtin_driver() - Helper macro for drivers that don't do anything
+ * special in init and have no exit. This eliminates some boilerplate.
+ * Each driver may only use this macro once, and calling it replaces
+ * device_initcall (or in some cases, the legacy __initcall). This is
+ * meant to be a direct parallel of module_driver() above but without
+ * the __exit stuff that is not used for builtin cases.
+ *
+ * @__driver: driver name
+ * @__register: register function for this driver type
+ * @...: Additional arguments to be passed to __register
+ *
+ * Use this macro to construct bus specific macros for registering
+ * drivers, and do not use it on its own.
+ */
+#define builtin_driver(__driver, __register, ...) \
+static int __init __driver##_init(void) \
+{ \
+ return __register(&(__driver) , ##__VA_ARGS__); \
+} \
+device_initcall(__driver##_init);
+
+#endif /* _DEVICE_DRIVER_H_ */
diff --git a/include/linux/device/faux.h b/include/linux/device/faux.h
new file mode 100644
index 000000000000..9f43c0e46aa4
--- /dev/null
+++ b/include/linux/device/faux.h
@@ -0,0 +1,69 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2025 Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+ * Copyright (c) 2025 The Linux Foundation
+ *
+ * A "simple" faux bus that allows devices to be created and added
+ * automatically to it. This is to be used whenever you need to create a
+ * device that is not associated with any "real" system resources, and do
+ * not want to have to deal with a bus/driver binding logic. It is
+ * intended to be very simple, with only a create and a destroy function
+ * available.
+ */
+#ifndef _FAUX_DEVICE_H_
+#define _FAUX_DEVICE_H_
+
+#include <linux/container_of.h>
+#include <linux/device.h>
+
+/**
+ * struct faux_device - a "faux" device
+ * @dev: internal struct device of the object
+ *
+ * A simple faux device that can be created/destroyed. To be used when a
+ * driver only needs to have a device to "hang" something off. This can be
+ * used for downloading firmware or other basic tasks. Use this instead of
+ * a struct platform_device if the device has no resources assigned to
+ * it at all.
+ */
+struct faux_device {
+ struct device dev;
+};
+#define to_faux_device(x) container_of_const((x), struct faux_device, dev)
+
+/**
+ * struct faux_device_ops - a set of callbacks for a struct faux_device
+ * @probe: called when a faux device is probed by the driver core
+ * before the device is fully bound to the internal faux bus
+ * code. If probe succeeds, return 0, otherwise return a
+ * negative error number to stop the probe sequence from
+ * succeeding.
+ * @remove: called when a faux device is removed from the system
+ *
+ * Both @probe and @remove are optional, if not needed, set to NULL.
+ */
+struct faux_device_ops {
+ int (*probe)(struct faux_device *faux_dev);
+ void (*remove)(struct faux_device *faux_dev);
+};
+
+struct faux_device *faux_device_create(const char *name,
+ struct device *parent,
+ const struct faux_device_ops *faux_ops);
+struct faux_device *faux_device_create_with_groups(const char *name,
+ struct device *parent,
+ const struct faux_device_ops *faux_ops,
+ const struct attribute_group **groups);
+void faux_device_destroy(struct faux_device *faux_dev);
+
+static inline void *faux_device_get_drvdata(const struct faux_device *faux_dev)
+{
+ return dev_get_drvdata(&faux_dev->dev);
+}
+
+static inline void faux_device_set_drvdata(struct faux_device *faux_dev, void *data)
+{
+ dev_set_drvdata(&faux_dev->dev, data);
+}
+
+#endif /* _FAUX_DEVICE_H_ */
diff --git a/include/linux/device_cgroup.h b/include/linux/device_cgroup.h
index 8b64221b432b..0864773a57e8 100644
--- a/include/linux/device_cgroup.h
+++ b/include/linux/device_cgroup.h
@@ -1,17 +1,66 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#include <linux/fs.h>
-#ifdef CONFIG_CGROUP_DEVICE
-extern int __devcgroup_inode_permission(struct inode *inode, int mask);
-extern int devcgroup_inode_mknod(int mode, dev_t dev);
+#define DEVCG_ACC_MKNOD 1
+#define DEVCG_ACC_READ 2
+#define DEVCG_ACC_WRITE 4
+#define DEVCG_ACC_MASK (DEVCG_ACC_MKNOD | DEVCG_ACC_READ | DEVCG_ACC_WRITE)
+
+#define DEVCG_DEV_BLOCK 1
+#define DEVCG_DEV_CHAR 2
+#define DEVCG_DEV_ALL 4 /* this represents all devices */
+
+
+#if defined(CONFIG_CGROUP_DEVICE) || defined(CONFIG_CGROUP_BPF)
+int devcgroup_check_permission(short type, u32 major, u32 minor,
+ short access);
static inline int devcgroup_inode_permission(struct inode *inode, int mask)
{
+ short type, access = 0;
+
+ if (likely(!S_ISBLK(inode->i_mode) && !S_ISCHR(inode->i_mode)))
+ return 0;
+
if (likely(!inode->i_rdev))
return 0;
- if (!S_ISBLK(inode->i_mode) && !S_ISCHR(inode->i_mode))
+
+ if (S_ISBLK(inode->i_mode))
+ type = DEVCG_DEV_BLOCK;
+ else /* S_ISCHR by the test above */
+ type = DEVCG_DEV_CHAR;
+
+ if (mask & MAY_WRITE)
+ access |= DEVCG_ACC_WRITE;
+ if (mask & MAY_READ)
+ access |= DEVCG_ACC_READ;
+
+ return devcgroup_check_permission(type, imajor(inode), iminor(inode),
+ access);
+}
+
+static inline int devcgroup_inode_mknod(int mode, dev_t dev)
+{
+ short type;
+
+ if (!S_ISBLK(mode) && !S_ISCHR(mode))
+ return 0;
+
+ if (S_ISCHR(mode) && dev == WHITEOUT_DEV)
return 0;
- return __devcgroup_inode_permission(inode, mask);
+
+ if (S_ISBLK(mode))
+ type = DEVCG_DEV_BLOCK;
+ else
+ type = DEVCG_DEV_CHAR;
+
+ return devcgroup_check_permission(type, MAJOR(dev), MINOR(dev),
+ DEVCG_ACC_MKNOD);
}
+
#else
+static inline int devcgroup_check_permission(short type, u32 major, u32 minor,
+ short access)
+{ return 0; }
static inline int devcgroup_inode_permission(struct inode *inode, int mask)
{ return 0; }
static inline int devcgroup_inode_mknod(int mode, dev_t dev)
diff --git a/include/linux/devm-helpers.h b/include/linux/devm-helpers.h
new file mode 100644
index 000000000000..708ca9131402
--- /dev/null
+++ b/include/linux/devm-helpers.h
@@ -0,0 +1,79 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+#ifndef __LINUX_DEVM_HELPERS_H
+#define __LINUX_DEVM_HELPERS_H
+
+/*
+ * Functions which do automatically cancel operations or release resources upon
+ * driver detach.
+ *
+ * These should be helpful to avoid mixing the manual and devm-based resource
+ * management which can be source of annoying, rarely occurring,
+ * hard-to-reproduce bugs.
+ *
+ * Please take into account that devm based cancellation may be performed some
+ * time after the remove() is ran.
+ *
+ * Thus mixing devm and manual resource management can easily cause problems
+ * when unwinding operations with dependencies. IRQ scheduling a work in a queue
+ * is typical example where IRQs are often devm-managed and WQs are manually
+ * cleaned at remove(). If IRQs are not manually freed at remove() (and this is
+ * often the case when we use devm for IRQs) we have a period of time after
+ * remove() - and before devm managed IRQs are freed - where new IRQ may fire
+ * and schedule a work item which won't be cancelled because remove() was
+ * already ran.
+ */
+
+#include <linux/device.h>
+#include <linux/workqueue.h>
+
+static inline void devm_delayed_work_drop(void *res)
+{
+ cancel_delayed_work_sync(res);
+}
+
+/**
+ * devm_delayed_work_autocancel - Resource-managed delayed work allocation
+ * @dev: Device which lifetime work is bound to
+ * @w: Work item to be queued
+ * @worker: Worker function
+ *
+ * Initialize delayed work which is automatically cancelled when driver is
+ * detached. A few drivers need delayed work which must be cancelled before
+ * driver is detached to avoid accessing removed resources.
+ * devm_delayed_work_autocancel() can be used to omit the explicit
+ * cancellation when driver is detached.
+ */
+static inline int devm_delayed_work_autocancel(struct device *dev,
+ struct delayed_work *w,
+ work_func_t worker)
+{
+ INIT_DELAYED_WORK(w, worker);
+ return devm_add_action(dev, devm_delayed_work_drop, w);
+}
+
+static inline void devm_work_drop(void *res)
+{
+ cancel_work_sync(res);
+}
+
+/**
+ * devm_work_autocancel - Resource-managed work allocation
+ * @dev: Device which lifetime work is bound to
+ * @w: Work to be added (and automatically cancelled)
+ * @worker: Worker function
+ *
+ * Initialize work which is automatically cancelled when driver is detached.
+ * A few drivers need to queue work which must be cancelled before driver
+ * is detached to avoid accessing removed resources.
+ * devm_work_autocancel() can be used to omit the explicit
+ * cancellation when driver is detached.
+ */
+static inline int devm_work_autocancel(struct device *dev,
+ struct work_struct *w,
+ work_func_t worker)
+{
+ INIT_WORK(w, worker);
+ return devm_add_action(dev, devm_work_drop, w);
+}
+
+#endif
diff --git a/include/linux/devpts_fs.h b/include/linux/devpts_fs.h
index 100cb4343763..45f746a48dcd 100644
--- a/include/linux/devpts_fs.h
+++ b/include/linux/devpts_fs.h
@@ -1,13 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/* -*- linux-c -*- --------------------------------------------------------- *
*
* linux/include/linux/devpts_fs.h
*
* Copyright 1998-2004 H. Peter Anvin -- All Rights Reserved
*
- * This file is part of the Linux kernel and is made available under
- * the terms of the GNU General Public License, version 2, or at your
- * option, any later version, incorporated herein by reference.
- *
* ------------------------------------------------------------------------- */
#ifndef _LINUX_DEVPTS_FS_H
diff --git a/include/linux/dfl.h b/include/linux/dfl.h
new file mode 100644
index 000000000000..1f02db0c1897
--- /dev/null
+++ b/include/linux/dfl.h
@@ -0,0 +1,95 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Header file for DFL driver and device API
+ *
+ * Copyright (C) 2020 Intel Corporation, Inc.
+ */
+
+#ifndef __LINUX_DFL_H
+#define __LINUX_DFL_H
+
+#include <linux/device.h>
+#include <linux/mod_devicetable.h>
+
+/**
+ * enum dfl_id_type - define the DFL FIU types
+ */
+enum dfl_id_type {
+ FME_ID = 0,
+ PORT_ID = 1,
+ DFL_ID_MAX,
+};
+
+/**
+ * struct dfl_device - represent an dfl device on dfl bus
+ *
+ * @dev: generic device interface.
+ * @id: id of the dfl device.
+ * @type: type of DFL FIU of the device. See enum dfl_id_type.
+ * @feature_id: feature identifier local to its DFL FIU type.
+ * @revision: revision of this dfl device feature.
+ * @mmio_res: mmio resource of this dfl device.
+ * @irqs: list of Linux IRQ numbers of this dfl device.
+ * @num_irqs: number of IRQs supported by this dfl device.
+ * @cdev: pointer to DFL FPGA container device this dfl device belongs to.
+ * @id_entry: matched id entry in dfl driver's id table.
+ * @dfh_version: version of DFH for the device
+ * @param_size: size of the block parameters in bytes
+ * @params: pointer to block of parameters copied memory
+ */
+struct dfl_device {
+ struct device dev;
+ int id;
+ u16 type;
+ u16 feature_id;
+ u8 revision;
+ struct resource mmio_res;
+ int *irqs;
+ unsigned int num_irqs;
+ struct dfl_fpga_cdev *cdev;
+ const struct dfl_device_id *id_entry;
+ u8 dfh_version;
+ unsigned int param_size;
+ void *params;
+};
+
+/**
+ * struct dfl_driver - represent an dfl device driver
+ *
+ * @drv: driver model structure.
+ * @id_table: pointer to table of device IDs the driver is interested in.
+ * { } member terminated.
+ * @probe: mandatory callback for device binding.
+ * @remove: callback for device unbinding.
+ */
+struct dfl_driver {
+ struct device_driver drv;
+ const struct dfl_device_id *id_table;
+
+ int (*probe)(struct dfl_device *dfl_dev);
+ void (*remove)(struct dfl_device *dfl_dev);
+};
+
+#define to_dfl_dev(d) container_of(d, struct dfl_device, dev)
+#define to_dfl_drv(d) container_of_const(d, struct dfl_driver, drv)
+
+/*
+ * use a macro to avoid include chaining to get THIS_MODULE.
+ */
+#define dfl_driver_register(drv) \
+ __dfl_driver_register(drv, THIS_MODULE)
+int __dfl_driver_register(struct dfl_driver *dfl_drv, struct module *owner);
+void dfl_driver_unregister(struct dfl_driver *dfl_drv);
+
+/*
+ * module_dfl_driver() - Helper macro for drivers that don't do
+ * anything special in module init/exit. This eliminates a lot of
+ * boilerplate. Each module may only use this macro once, and
+ * calling it replaces module_init() and module_exit().
+ */
+#define module_dfl_driver(__dfl_driver) \
+ module_driver(__dfl_driver, dfl_driver_register, \
+ dfl_driver_unregister)
+
+void *dfh_find_param(struct dfl_device *dfl_dev, int param_id, size_t *pcount);
+#endif /* __LINUX_DFL_H */
diff --git a/include/linux/dibs.h b/include/linux/dibs.h
new file mode 100644
index 000000000000..c75607f8a5cf
--- /dev/null
+++ b/include/linux/dibs.h
@@ -0,0 +1,464 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Direct Internal Buffer Sharing
+ *
+ * Definitions for the DIBS module
+ *
+ * Copyright IBM Corp. 2025
+ */
+#ifndef _DIBS_H
+#define _DIBS_H
+
+#include <linux/device.h>
+#include <linux/uuid.h>
+
+/* DIBS - Direct Internal Buffer Sharing - concept
+ * -----------------------------------------------
+ * In the case of multiple system sharing the same hardware, dibs fabrics can
+ * provide dibs devices to these systems. The systems use dibs devices of the
+ * same fabric to communicate via dmbs (Direct Memory Buffers). Each dmb has
+ * exactly one owning local dibs device and one remote using dibs device, that
+ * is authorized to write into this dmb. This access control is provided by the
+ * dibs fabric.
+ *
+ * Because the access to the dmb is based on access to physical memory, it is
+ * lossless and synchronous. The remote devices can directly access any offset
+ * of the dmb.
+ *
+ * Dibs fabrics, dibs devices and dmbs are identified by tokens and ids.
+ * Dibs fabric id is unique within the same hardware (with the exception of the
+ * dibs loopback fabric), dmb token is unique within the same fabric, dibs
+ * device gids are guaranteed to be unique within the same fabric and
+ * statistically likely to be globally unique. The exchange of these tokens and
+ * ids between the systems is not part of the dibs concept.
+ *
+ * The dibs layer provides an abstraction between dibs device drivers and dibs
+ * clients.
+ */
+
+/* DMB - Direct Memory Buffer
+ * --------------------------
+ * A dibs client provides a dmb as input buffer for a local receiving
+ * dibs device for exactly one (remote) sending dibs device. Only this
+ * sending device can send data into this dmb using move_data(). Sender
+ * and receiver can be the same device. A dmb belongs to exactly one client.
+ */
+struct dibs_dmb {
+ /* tok - Token for this dmb
+ * Used by remote and local devices and clients to address this dmb.
+ * Provided by dibs fabric. Unique per dibs fabric.
+ */
+ u64 dmb_tok;
+ /* rgid - GID of designated remote sending device */
+ uuid_t rgid;
+ /* cpu_addr - buffer address */
+ void *cpu_addr;
+ /* len - buffer length */
+ u32 dmb_len;
+ /* idx - Index of this DMB on this receiving device */
+ u32 idx;
+ /* VLAN support (deprecated)
+ * In order to write into a vlan-tagged dmb, the remote device needs
+ * to belong to the this vlan
+ */
+ u32 vlan_valid;
+ u32 vlan_id;
+ /* optional, used by device driver */
+ dma_addr_t dma_addr;
+};
+
+/* DIBS events
+ * -----------
+ * Dibs devices can optionally notify dibs clients about events that happened
+ * in the fabric or at the remote device or remote dmb.
+ */
+enum dibs_event_type {
+ /* Buffer event, e.g. a remote dmb was unregistered */
+ DIBS_BUF_EVENT,
+ /* Device event, e.g. a remote dibs device was disabled */
+ DIBS_DEV_EVENT,
+ /* Software event, a dibs client can send an event signal to a
+ * remote dibs device.
+ */
+ DIBS_SW_EVENT,
+ DIBS_OTHER_TYPE };
+
+enum dibs_event_subtype {
+ DIBS_BUF_UNREGISTERED,
+ DIBS_DEV_DISABLED,
+ DIBS_DEV_ERR_STATE,
+ DIBS_OTHER_SUBTYPE
+};
+
+struct dibs_event {
+ u32 type;
+ u32 subtype;
+ /* uuid_null if invalid */
+ uuid_t gid;
+ /* zero if invalid */
+ u64 buffer_tok;
+ u64 time;
+ /* additional data or zero */
+ u64 data;
+};
+
+struct dibs_dev;
+
+/* DIBS client
+ * -----------
+ */
+#define MAX_DIBS_CLIENTS 8
+#define NO_DIBS_CLIENT 0xff
+/* All dibs clients have access to all dibs devices.
+ * A dibs client provides the following functions to be called by dibs layer or
+ * dibs device drivers:
+ */
+struct dibs_client_ops {
+ /**
+ * add_dev() - add a dibs device
+ * @dev: device that was added
+ *
+ * Will be called during dibs_register_client() for all existing
+ * dibs devices and whenever a new dibs device is registered.
+ * dev is usable until dibs_client.remove() is called.
+ * *dev is protected by device refcounting.
+ */
+ void (*add_dev)(struct dibs_dev *dev);
+ /**
+ * del_dev() - remove a dibs device
+ * @dev: device to be removed
+ *
+ * Will be called whenever a dibs device is removed.
+ * Will be called during dibs_unregister_client() for all existing
+ * dibs devices and whenever a dibs device is unregistered.
+ * The device has already stopped initiative for this client:
+ * No new handlers will be started.
+ * The device is no longer usable by this client after this call.
+ */
+ void (*del_dev)(struct dibs_dev *dev);
+ /**
+ * handle_irq() - Handle signaling for a DMB
+ * @dev: device that owns the dmb
+ * @idx: Index of the dmb that got signalled
+ * @dmbemask: signaling mask of the dmb
+ *
+ * Handle signaling for a dmb that was registered by this client
+ * for this device.
+ * The dibs device can coalesce multiple signaling triggers into a
+ * single call of handle_irq(). dmbemask can be used to indicate
+ * different kinds of triggers.
+ *
+ * Context: Called in IRQ context by dibs device driver
+ */
+ void (*handle_irq)(struct dibs_dev *dev, unsigned int idx,
+ u16 dmbemask);
+ /**
+ * handle_event() - Handle control information sent by device
+ * @dev: device reporting the event
+ * @event: ism event structure
+ *
+ * * Context: Called in IRQ context by dibs device driver
+ */
+ void (*handle_event)(struct dibs_dev *dev,
+ const struct dibs_event *event);
+};
+
+struct dibs_client {
+ /* client name for logging and debugging purposes */
+ const char *name;
+ const struct dibs_client_ops *ops;
+ /* client index - provided and used by dibs layer */
+ u8 id;
+};
+
+/* Functions to be called by dibs clients:
+ */
+/**
+ * dibs_register_client() - register a client with dibs layer
+ * @client: this client
+ *
+ * Will call client->ops->add_dev() for all existing dibs devices.
+ * Return: zero on success.
+ */
+int dibs_register_client(struct dibs_client *client);
+/**
+ * dibs_unregister_client() - unregister a client with dibs layer
+ * @client: this client
+ *
+ * Will call client->ops->del_dev() for all existing dibs devices.
+ * Return: zero on success.
+ */
+int dibs_unregister_client(struct dibs_client *client);
+
+/* dibs clients can call dibs device ops. */
+
+/* DIBS devices
+ * ------------
+ */
+
+/* Defined fabric id / CHID for all loopback devices:
+ * All dibs loopback devices report this fabric id. In this case devices with
+ * the same fabric id can NOT communicate via dibs. Only loopback devices with
+ * the same dibs device gid can communicate (=same device with itself).
+ */
+#define DIBS_LOOPBACK_FABRIC 0xFFFF
+
+/* A dibs device provides the following functions to be called by dibs clients.
+ * They are mandatory, unless marked 'optional'.
+ */
+struct dibs_dev_ops {
+ /**
+ * get_fabric_id()
+ * @dev: local dibs device
+ *
+ * Only devices on the same dibs fabric can communicate. Fabric_id is
+ * unique inside the same HW system. Use fabric_id for fast negative
+ * checks, but only query_remote_gid() can give a reliable positive
+ * answer:
+ * Different fabric_id: dibs is not possible
+ * Same fabric_id: dibs may be possible or not
+ * (e.g. different HW systems)
+ * EXCEPTION: DIBS_LOOPBACK_FABRIC denotes an ism_loopback device
+ * that can only communicate with itself. Use dibs_dev.gid
+ * or query_remote_gid()to determine whether sender and
+ * receiver use the same ism_loopback device.
+ * Return: 2 byte dibs fabric id
+ */
+ u16 (*get_fabric_id)(struct dibs_dev *dev);
+ /**
+ * query_remote_gid()
+ * @dev: local dibs device
+ * @rgid: gid of remote dibs device
+ * @vid_valid: if zero, vid will be ignored;
+ * deprecated, ignored if device does not support vlan
+ * @vid: VLAN id; deprecated, ignored if device does not support vlan
+ *
+ * Query whether a remote dibs device is reachable via this local device
+ * and this vlan id.
+ * Return: 0 if remote gid is reachable.
+ */
+ int (*query_remote_gid)(struct dibs_dev *dev, const uuid_t *rgid,
+ u32 vid_valid, u32 vid);
+ /**
+ * max_dmbs()
+ * Return: Max number of DMBs that can be registered for this kind of
+ * dibs_dev
+ */
+ int (*max_dmbs)(void);
+ /**
+ * register_dmb() - allocate and register a dmb
+ * @dev: dibs device
+ * @dmb: dmb struct to be registered
+ * @client: dibs client
+ * @vid: VLAN id; deprecated, ignored if device does not support vlan
+ *
+ * The following fields of dmb must provide valid input:
+ * @rgid: gid of remote user device
+ * @dmb_len: buffer length
+ * @idx: Optionally:requested idx (if non-zero)
+ * @vlan_valid: if zero, vlan_id will be ignored;
+ * deprecated, ignored if device does not support vlan
+ * @vlan_id: deprecated, ignored if device does not support vlan
+ * Upon return in addition the following fields will be valid:
+ * @dmb_tok: for usage by remote and local devices and clients
+ * @cpu_addr: allocated buffer
+ * @idx: dmb index, unique per dibs device
+ * @dma_addr: to be used by device driver,if applicable
+ *
+ * Allocate a dmb buffer and register it with this device and for this
+ * client.
+ * Return: zero on success
+ */
+ int (*register_dmb)(struct dibs_dev *dev, struct dibs_dmb *dmb,
+ struct dibs_client *client);
+ /**
+ * unregister_dmb() - unregister and free a dmb
+ * @dev: dibs device
+ * @dmb: dmb struct to be unregistered
+ * The following fields of dmb must provide valid input:
+ * @dmb_tok
+ * @cpu_addr
+ * @idx
+ *
+ * Free dmb.cpu_addr and unregister the dmb from this device.
+ * Return: zero on success
+ */
+ int (*unregister_dmb)(struct dibs_dev *dev, struct dibs_dmb *dmb);
+ /**
+ * move_data() - write into a remote dmb
+ * @dev: Local sending dibs device
+ * @dmb_tok: Token of the remote dmb
+ * @idx: signaling index in dmbemask
+ * @sf: signaling flag;
+ * if true, idx will be turned on at target dmbemask mask
+ * and target device will be signaled.
+ * @offset: offset within target dmb
+ * @data: pointer to data to be sent
+ * @size: length of data to be sent, can be zero.
+ *
+ * Use dev to write data of size at offset into a remote dmb
+ * identified by dmb_tok. Data is moved synchronously, *data can
+ * be freed when this function returns.
+ *
+ * If signaling flag (sf) is true, bit number idx bit will be turned
+ * on in the dmbemask mask when handle_irq() is called at the remote
+ * dibs client that owns the target dmb. The target device may chose
+ * to coalesce the signaling triggers of multiple move_data() calls
+ * to the same target dmb into a single handle_irq() call.
+ * Return: zero on success
+ */
+ int (*move_data)(struct dibs_dev *dev, u64 dmb_tok, unsigned int idx,
+ bool sf, unsigned int offset, void *data,
+ unsigned int size);
+ /**
+ * add_vlan_id() - add dibs device to vlan (optional, deprecated)
+ * @dev: dibs device
+ * @vlan_id: vlan id
+ *
+ * In order to write into a vlan-tagged dmb, the remote device needs
+ * to belong to the this vlan. A device can belong to more than 1 vlan.
+ * Any device can access an untagged dmb.
+ * Deprecated, only supported for backwards compatibility.
+ * Return: zero on success
+ */
+ int (*add_vlan_id)(struct dibs_dev *dev, u64 vlan_id);
+ /**
+ * del_vlan_id() - remove dibs device from vlan (optional, deprecated)
+ * @dev: dibs device
+ * @vlan_id: vlan id
+ * Return: zero on success
+ */
+ int (*del_vlan_id)(struct dibs_dev *dev, u64 vlan_id);
+ /**
+ * signal_event() - trigger an event at a remote dibs device (optional)
+ * @dev: local dibs device
+ * @rgid: gid of remote dibs device
+ * trigger_irq: zero: notification may be coalesced with other events
+ * non-zero: notify immediately
+ * @subtype: 4 byte event code, meaning is defined by dibs client
+ * @data: 8 bytes of additional information,
+ * meaning is defined by dibs client
+ *
+ * dibs devices can offer support for sending a control event of type
+ * EVENT_SWR to a remote dibs device.
+ * NOTE: handle_event() will be called for all registered dibs clients
+ * at the remote device.
+ * Return: zero on success
+ */
+ int (*signal_event)(struct dibs_dev *dev, const uuid_t *rgid,
+ u32 trigger_irq, u32 event_code, u64 info);
+ /**
+ * support_mmapped_rdmb() - can this device provide memory mapped
+ * remote dmbs? (optional)
+ * @dev: dibs device
+ *
+ * A dibs device can provide a kernel address + length, that represent
+ * a remote target dmb (like MMIO). Alternatively to calling
+ * move_data(), a dibs client can write into such a ghost-send-buffer
+ * (= to this kernel address) and the data will automatically
+ * immediately appear in the target dmb, even without calling
+ * move_data().
+ *
+ * Either all 3 function pointers for support_dmb_nocopy(),
+ * attach_dmb() and detach_dmb() are defined, or all of them must
+ * be NULL.
+ *
+ * Return: non-zero, if memory mapped remote dmbs are supported.
+ */
+ int (*support_mmapped_rdmb)(struct dibs_dev *dev);
+ /**
+ * attach_dmb() - attach local memory to a remote dmb
+ * @dev: Local sending ism device
+ * @dmb: all other parameters are passed in the form of a
+ * dmb struct
+ * TODO: (THIS IS CONFUSING, should be changed)
+ * dmb_tok: (in) Token of the remote dmb, we want to attach to
+ * cpu_addr: (out) MMIO address
+ * dma_addr: (out) MMIO address (if applicable, invalid otherwise)
+ * dmb_len: (out) length of local MMIO region,
+ * equal to length of remote DMB.
+ * sba_idx: (out) index of remote dmb (NOT HELPFUL, should be removed)
+ *
+ * Provides a memory address to the sender that can be used to
+ * directly write into the remote dmb.
+ * Memory is available until detach_dmb is called
+ *
+ * Return: Zero upon success, Error code otherwise
+ */
+ int (*attach_dmb)(struct dibs_dev *dev, struct dibs_dmb *dmb);
+ /**
+ * detach_dmb() - Detach the ghost buffer from a remote dmb
+ * @dev: ism device
+ * @token: dmb token of the remote dmb
+ *
+ * No need to free cpu_addr.
+ *
+ * Return: Zero upon success, Error code otherwise
+ */
+ int (*detach_dmb)(struct dibs_dev *dev, u64 token);
+};
+
+struct dibs_dev {
+ struct list_head list;
+ struct device dev;
+ /* To be filled by device driver, before calling dibs_dev_add(): */
+ const struct dibs_dev_ops *ops;
+ uuid_t gid;
+ /* priv pointer for device driver */
+ void *drv_priv;
+
+ /* priv pointer per client; for client usage only */
+ void *priv[MAX_DIBS_CLIENTS];
+
+ /* get this lock before accessing any of the fields below */
+ spinlock_t lock;
+ /* array of client ids indexed by dmb idx;
+ * can be used as indices into priv and subs arrays
+ */
+ u8 *dmb_clientid_arr;
+ /* Sparse array of all ISM clients */
+ struct dibs_client *subs[MAX_DIBS_CLIENTS];
+};
+
+static inline void dibs_set_priv(struct dibs_dev *dev,
+ struct dibs_client *client, void *priv)
+{
+ dev->priv[client->id] = priv;
+}
+
+static inline void *dibs_get_priv(struct dibs_dev *dev,
+ struct dibs_client *client)
+{
+ return dev->priv[client->id];
+}
+
+/* ------- End of client-only functions ----------- */
+
+/* Functions to be called by dibs device drivers:
+ */
+/**
+ * dibs_dev_alloc() - allocate and reference device structure
+ *
+ * The following fields will be valid upon successful return: dev
+ * NOTE: Use put_device(dibs_get_dev(@dibs)) to give up your reference instead
+ * of freeing @dibs @dev directly once you have successfully called this
+ * function.
+ * Return: Pointer to dibs device structure
+ */
+struct dibs_dev *dibs_dev_alloc(void);
+/**
+ * dibs_dev_add() - register with dibs layer and all clients
+ * @dibs: dibs device
+ *
+ * The following fields must be valid upon entry: dev, ops, drv_priv
+ * All fields will be valid upon successful return.
+ * Return: zero on success
+ */
+int dibs_dev_add(struct dibs_dev *dibs);
+/**
+ * dibs_dev_del() - unregister from dibs layer and all clients
+ * @dibs: dibs device
+ */
+void dibs_dev_del(struct dibs_dev *dibs);
+
+#endif /* _DIBS_H */
diff --git a/include/linux/digsig.h b/include/linux/digsig.h
index 6f85a070bb45..2ace69e41088 100644
--- a/include/linux/digsig.h
+++ b/include/linux/digsig.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2011 Nokia Corporation
* Copyright (C) 2011 Intel Corporation
@@ -5,11 +6,6 @@
* Author:
* Dmitry Kasatkin <dmitry.kasatkin@nokia.com>
* <dmitry.kasatkin@intel.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation, version 2 of the License.
- *
*/
#ifndef _DIGSIG_H
@@ -33,7 +29,7 @@ struct pubkey_hdr {
uint32_t timestamp; /* key made, always 0 for now */
uint8_t algo;
uint8_t nmpi;
- char mpi[0];
+ char mpi[];
} __packed;
struct signature_hdr {
@@ -43,7 +39,7 @@ struct signature_hdr {
uint8_t hash;
uint8_t keyid[8];
uint8_t nmpi;
- char mpi[0];
+ char mpi[];
} __packed;
#if defined(CONFIG_SIGNATURE) || defined(CONFIG_SIGNATURE_MODULE)
diff --git a/include/linux/dim.h b/include/linux/dim.h
new file mode 100644
index 000000000000..06543fd40fcc
--- /dev/null
+++ b/include/linux/dim.h
@@ -0,0 +1,451 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2019 Mellanox Technologies. */
+
+#ifndef DIM_H
+#define DIM_H
+
+#include <linux/bits.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/workqueue.h>
+
+struct net_device;
+
+/* Number of DIM profiles and period mode. */
+#define NET_DIM_PARAMS_NUM_PROFILES 5
+#define NET_DIM_DEFAULT_RX_CQ_PKTS_FROM_EQE 256
+#define NET_DIM_DEFAULT_TX_CQ_PKTS_FROM_EQE 128
+#define NET_DIM_DEF_PROFILE_CQE 1
+#define NET_DIM_DEF_PROFILE_EQE 1
+
+/*
+ * Number of events between DIM iterations.
+ * Causes a moderation of the algorithm run.
+ */
+#define DIM_NEVENTS 64
+
+/*
+ * Is a difference between values justifies taking an action.
+ * We consider 10% difference as significant.
+ */
+#define IS_SIGNIFICANT_DIFF(val, ref) \
+ ((ref) && (((100UL * abs((val) - (ref))) / (ref)) > 10))
+
+/*
+ * Calculate the gap between two values.
+ * Take wrap-around and variable size into consideration.
+ */
+#define BIT_GAP(bits, end, start) ((((end) - (start)) + BIT_ULL(bits)) \
+ & (BIT_ULL(bits) - 1))
+
+/**
+ * struct dim_cq_moder - Structure for CQ moderation values.
+ * Used for communications between DIM and its consumer.
+ *
+ * @usec: CQ timer suggestion (by DIM)
+ * @pkts: CQ packet counter suggestion (by DIM)
+ * @comps: Completion counter
+ * @cq_period_mode: CQ period count mode (from CQE/EQE)
+ * @rcu: for asynchronous kfree_rcu
+ */
+struct dim_cq_moder {
+ u16 usec;
+ u16 pkts;
+ u16 comps;
+ u8 cq_period_mode;
+ struct rcu_head rcu;
+};
+
+#define DIM_PROFILE_RX BIT(0) /* support rx profile modification */
+#define DIM_PROFILE_TX BIT(1) /* support tx profile modification */
+
+#define DIM_COALESCE_USEC BIT(0) /* support usec field modification */
+#define DIM_COALESCE_PKTS BIT(1) /* support pkts field modification */
+#define DIM_COALESCE_COMPS BIT(2) /* support comps field modification */
+
+/**
+ * struct dim_irq_moder - Structure for irq moderation information.
+ * Used to collect irq moderation related information.
+ *
+ * @profile_flags: DIM_PROFILE_*
+ * @coal_flags: DIM_COALESCE_* for Rx and Tx
+ * @dim_rx_mode: Rx DIM period count mode: CQE or EQE
+ * @dim_tx_mode: Tx DIM period count mode: CQE or EQE
+ * @rx_profile: DIM profile list for Rx
+ * @tx_profile: DIM profile list for Tx
+ * @rx_dim_work: Rx DIM worker scheduled by net_dim()
+ * @tx_dim_work: Tx DIM worker scheduled by net_dim()
+ */
+struct dim_irq_moder {
+ u8 profile_flags;
+ u8 coal_flags;
+ u8 dim_rx_mode;
+ u8 dim_tx_mode;
+ struct dim_cq_moder __rcu *rx_profile;
+ struct dim_cq_moder __rcu *tx_profile;
+ void (*rx_dim_work)(struct work_struct *work);
+ void (*tx_dim_work)(struct work_struct *work);
+};
+
+/**
+ * struct dim_sample - Structure for DIM sample data.
+ * Used for communications between DIM and its consumer.
+ *
+ * @time: Sample timestamp
+ * @pkt_ctr: Number of packets
+ * @byte_ctr: Number of bytes
+ * @event_ctr: Number of events
+ * @comp_ctr: Current completion counter
+ */
+struct dim_sample {
+ ktime_t time;
+ u32 pkt_ctr;
+ u32 byte_ctr;
+ u16 event_ctr;
+ u32 comp_ctr;
+};
+
+/**
+ * struct dim_stats - Structure for DIM stats.
+ * Used for holding current measured rates.
+ *
+ * @ppms: Packets per msec
+ * @bpms: Bytes per msec
+ * @epms: Events per msec
+ * @cpms: Completions per msec
+ * @cpe_ratio: Ratio of completions to events
+ */
+struct dim_stats {
+ int ppms; /* packets per msec */
+ int bpms; /* bytes per msec */
+ int epms; /* events per msec */
+ int cpms; /* completions per msec */
+ int cpe_ratio; /* ratio of completions to events */
+};
+
+/**
+ * struct dim - Main structure for dynamic interrupt moderation (DIM).
+ * Used for holding all information about a specific DIM instance.
+ *
+ * @state: Algorithm state (see below)
+ * @prev_stats: Measured rates from previous iteration (for comparison)
+ * @start_sample: Sampled data at start of current iteration
+ * @measuring_sample: A &dim_sample that is used to update the current events
+ * @work: Work to perform on action required
+ * @priv: A pointer to the struct that points to dim
+ * @profile_ix: Current moderation profile
+ * @mode: CQ period count mode
+ * @tune_state: Algorithm tuning state (see below)
+ * @steps_right: Number of steps taken towards higher moderation
+ * @steps_left: Number of steps taken towards lower moderation
+ * @tired: Parking depth counter
+ */
+struct dim {
+ u8 state;
+ struct dim_stats prev_stats;
+ struct dim_sample start_sample;
+ struct dim_sample measuring_sample;
+ struct work_struct work;
+ void *priv;
+ u8 profile_ix;
+ u8 mode;
+ u8 tune_state;
+ u8 steps_right;
+ u8 steps_left;
+ u8 tired;
+};
+
+/**
+ * enum dim_cq_period_mode - Modes for CQ period count
+ *
+ * @DIM_CQ_PERIOD_MODE_START_FROM_EQE: Start counting from EQE
+ * @DIM_CQ_PERIOD_MODE_START_FROM_CQE: Start counting from CQE (implies timer reset)
+ * @DIM_CQ_PERIOD_NUM_MODES: Number of modes
+ */
+enum dim_cq_period_mode {
+ DIM_CQ_PERIOD_MODE_START_FROM_EQE = 0x0,
+ DIM_CQ_PERIOD_MODE_START_FROM_CQE = 0x1,
+ DIM_CQ_PERIOD_NUM_MODES
+};
+
+/**
+ * enum dim_state - DIM algorithm states
+ *
+ * These will determine if the algorithm is in a valid state to start an iteration.
+ *
+ * @DIM_START_MEASURE: This is the first iteration (also after applying a new profile)
+ * @DIM_MEASURE_IN_PROGRESS: Algorithm is already in progress - check if
+ * need to perform an action
+ * @DIM_APPLY_NEW_PROFILE: DIM consumer is currently applying a profile - no need to measure
+ */
+enum dim_state {
+ DIM_START_MEASURE,
+ DIM_MEASURE_IN_PROGRESS,
+ DIM_APPLY_NEW_PROFILE,
+};
+
+/**
+ * enum dim_tune_state - DIM algorithm tune states
+ *
+ * These will determine which action the algorithm should perform.
+ *
+ * @DIM_PARKING_ON_TOP: Algorithm found a local top point - exit on significant difference
+ * @DIM_PARKING_TIRED: Algorithm found a deep top point - don't exit if tired > 0
+ * @DIM_GOING_RIGHT: Algorithm is currently trying higher moderation levels
+ * @DIM_GOING_LEFT: Algorithm is currently trying lower moderation levels
+ */
+enum dim_tune_state {
+ DIM_PARKING_ON_TOP,
+ DIM_PARKING_TIRED,
+ DIM_GOING_RIGHT,
+ DIM_GOING_LEFT,
+};
+
+/**
+ * enum dim_stats_state - DIM algorithm statistics states
+ *
+ * These will determine the verdict of current iteration.
+ *
+ * @DIM_STATS_WORSE: Current iteration shows worse performance than before
+ * @DIM_STATS_SAME: Current iteration shows same performance than before
+ * @DIM_STATS_BETTER: Current iteration shows better performance than before
+ */
+enum dim_stats_state {
+ DIM_STATS_WORSE,
+ DIM_STATS_SAME,
+ DIM_STATS_BETTER,
+};
+
+/**
+ * enum dim_step_result - DIM algorithm step results
+ *
+ * These describe the result of a step.
+ *
+ * @DIM_STEPPED: Performed a regular step
+ * @DIM_TOO_TIRED: Same kind of step was done multiple times - should go to
+ * tired parking
+ * @DIM_ON_EDGE: Stepped to the most left/right profile
+ */
+enum dim_step_result {
+ DIM_STEPPED,
+ DIM_TOO_TIRED,
+ DIM_ON_EDGE,
+};
+
+/**
+ * net_dim_init_irq_moder - collect information to initialize irq moderation
+ * @dev: target network device
+ * @profile_flags: Rx or Tx profile modification capability
+ * @coal_flags: irq moderation params flags
+ * @rx_mode: CQ period mode for Rx
+ * @tx_mode: CQ period mode for Tx
+ * @rx_dim_work: Rx worker called after dim decision
+ * @tx_dim_work: Tx worker called after dim decision
+ *
+ * Return: 0 on success or a negative error code.
+ */
+int net_dim_init_irq_moder(struct net_device *dev, u8 profile_flags,
+ u8 coal_flags, u8 rx_mode, u8 tx_mode,
+ void (*rx_dim_work)(struct work_struct *work),
+ void (*tx_dim_work)(struct work_struct *work));
+
+/**
+ * net_dim_free_irq_moder - free fields for irq moderation
+ * @dev: target network device
+ */
+void net_dim_free_irq_moder(struct net_device *dev);
+
+/**
+ * net_dim_setting - initialize DIM's cq mode and schedule worker
+ * @dev: target network device
+ * @dim: DIM context
+ * @is_tx: true indicates the tx direction, false indicates the rx direction
+ */
+void net_dim_setting(struct net_device *dev, struct dim *dim, bool is_tx);
+
+/**
+ * net_dim_work_cancel - synchronously cancel dim's worker
+ * @dim: DIM context
+ */
+void net_dim_work_cancel(struct dim *dim);
+
+/**
+ * net_dim_get_rx_irq_moder - get DIM rx results based on profile_ix
+ * @dev: target network device
+ * @dim: DIM context
+ *
+ * Return: DIM irq moderation
+ */
+struct dim_cq_moder
+net_dim_get_rx_irq_moder(struct net_device *dev, struct dim *dim);
+
+/**
+ * net_dim_get_tx_irq_moder - get DIM tx results based on profile_ix
+ * @dev: target network device
+ * @dim: DIM context
+ *
+ * Return: DIM irq moderation
+ */
+struct dim_cq_moder
+net_dim_get_tx_irq_moder(struct net_device *dev, struct dim *dim);
+
+/**
+ * net_dim_set_rx_mode - set DIM rx cq mode
+ * @dev: target network device
+ * @rx_mode: target rx cq mode
+ */
+void net_dim_set_rx_mode(struct net_device *dev, u8 rx_mode);
+
+/**
+ * net_dim_set_tx_mode - set DIM tx cq mode
+ * @dev: target network device
+ * @tx_mode: target tx cq mode
+ */
+void net_dim_set_tx_mode(struct net_device *dev, u8 tx_mode);
+
+/**
+ * dim_on_top - check if current state is a good place to stop (top location)
+ * @dim: DIM context
+ *
+ * Check if current profile is a good place to park at.
+ * This will result in reducing the DIM checks frequency as we assume we
+ * shouldn't probably change profiles, unless traffic pattern wasn't changed.
+ */
+bool dim_on_top(struct dim *dim);
+
+/**
+ * dim_turn - change profile altering direction
+ * @dim: DIM context
+ *
+ * Go left if we were going right and vice-versa.
+ * Do nothing if currently parking.
+ */
+void dim_turn(struct dim *dim);
+
+/**
+ * dim_park_on_top - enter a parking state on a top location
+ * @dim: DIM context
+ *
+ * Enter parking state.
+ * Clear all movement history.
+ */
+void dim_park_on_top(struct dim *dim);
+
+/**
+ * dim_park_tired - enter a tired parking state
+ * @dim: DIM context
+ *
+ * Enter parking state.
+ * Clear all movement history and cause DIM checks frequency to reduce.
+ */
+void dim_park_tired(struct dim *dim);
+
+/**
+ * dim_calc_stats - calculate the difference between two samples
+ * @start: start sample
+ * @end: end sample
+ * @curr_stats: delta between samples
+ *
+ * Calculate the delta between two samples (in data rates).
+ * Takes into consideration counter wrap-around.
+ * Returned boolean indicates whether curr_stats are reliable.
+ */
+bool dim_calc_stats(const struct dim_sample *start,
+ const struct dim_sample *end,
+ struct dim_stats *curr_stats);
+
+/**
+ * dim_update_sample - set a sample's fields with given values
+ * @event_ctr: number of events to set
+ * @packets: number of packets to set
+ * @bytes: number of bytes to set
+ * @s: DIM sample
+ */
+static inline void
+dim_update_sample(u16 event_ctr, u64 packets, u64 bytes, struct dim_sample *s)
+{
+ s->time = ktime_get();
+ s->pkt_ctr = packets;
+ s->byte_ctr = bytes;
+ s->event_ctr = event_ctr;
+}
+
+/**
+ * dim_update_sample_with_comps - set a sample's fields with given
+ * values including the completion parameter
+ * @event_ctr: number of events to set
+ * @packets: number of packets to set
+ * @bytes: number of bytes to set
+ * @comps: number of completions to set
+ * @s: DIM sample
+ */
+static inline void
+dim_update_sample_with_comps(u16 event_ctr, u64 packets, u64 bytes, u64 comps,
+ struct dim_sample *s)
+{
+ dim_update_sample(event_ctr, packets, bytes, s);
+ s->comp_ctr = comps;
+}
+
+/* Net DIM */
+
+/**
+ * net_dim_get_rx_moderation - provide a CQ moderation object for the given RX profile
+ * @cq_period_mode: CQ period mode
+ * @ix: Profile index
+ */
+struct dim_cq_moder net_dim_get_rx_moderation(u8 cq_period_mode, int ix);
+
+/**
+ * net_dim_get_def_rx_moderation - provide the default RX moderation
+ * @cq_period_mode: CQ period mode
+ */
+struct dim_cq_moder net_dim_get_def_rx_moderation(u8 cq_period_mode);
+
+/**
+ * net_dim_get_tx_moderation - provide a CQ moderation object for the given TX profile
+ * @cq_period_mode: CQ period mode
+ * @ix: Profile index
+ */
+struct dim_cq_moder net_dim_get_tx_moderation(u8 cq_period_mode, int ix);
+
+/**
+ * net_dim_get_def_tx_moderation - provide the default TX moderation
+ * @cq_period_mode: CQ period mode
+ */
+struct dim_cq_moder net_dim_get_def_tx_moderation(u8 cq_period_mode);
+
+/**
+ * net_dim - main DIM algorithm entry point
+ * @dim: DIM instance information
+ * @end_sample: Current data measurement
+ *
+ * Called by the consumer.
+ * This is the main logic of the algorithm, where data is processed in order
+ * to decide on next required action.
+ */
+void net_dim(struct dim *dim, const struct dim_sample *end_sample);
+
+/* RDMA DIM */
+
+/*
+ * RDMA DIM profile:
+ * profile size must be of RDMA_DIM_PARAMS_NUM_PROFILES.
+ */
+#define RDMA_DIM_PARAMS_NUM_PROFILES 9
+#define RDMA_DIM_START_PROFILE 0
+
+/**
+ * rdma_dim - Runs the adaptive moderation.
+ * @dim: The moderation struct.
+ * @completions: The number of completions collected in this round.
+ *
+ * Each call to rdma_dim takes the latest amount of completions that
+ * have been collected and counts them as a new event.
+ * Once enough events have been collected the algorithm decides a new
+ * moderation level.
+ */
+void rdma_dim(struct dim *dim, u64 completions);
+
+#endif /* DIM_H */
diff --git a/include/linux/dio.h b/include/linux/dio.h
index 2cc0fd00463f..464331c4c4a7 100644
--- a/include/linux/dio.h
+++ b/include/linux/dio.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/* header file for DIO boards for the HP300 architecture.
* Maybe this should handle DIO-II later?
* The general structure of this is vaguely based on how
@@ -67,7 +68,7 @@ struct dio_bus {
};
extern struct dio_bus dio_bus; /* Single DIO bus */
-extern struct bus_type dio_bus_type;
+extern const struct bus_type dio_bus_type;
/*
* DIO device IDs
@@ -92,7 +93,7 @@ struct dio_driver {
struct device_driver driver;
};
-#define to_dio_driver(drv) container_of(drv, struct dio_driver, driver)
+#define to_dio_driver(drv) container_of_const(drv, struct dio_driver, driver)
/* DIO/DIO-II boards all have the following 8bit registers.
* These are offsets from the base of the device.
@@ -246,11 +247,6 @@ extern int dio_create_sysfs_dev_files(struct dio_dev *);
/* New-style probing */
extern int dio_register_driver(struct dio_driver *);
extern void dio_unregister_driver(struct dio_driver *);
-extern const struct dio_device_id *dio_match_device(const struct dio_device_id *ids, const struct dio_dev *z);
-static inline struct dio_driver *dio_dev_driver(const struct dio_dev *d)
-{
- return d->driver;
-}
#define dio_resource_start(d) ((d)->resource.start)
#define dio_resource_end(d) ((d)->resource.end)
diff --git a/include/linux/dirent.h b/include/linux/dirent.h
index f072fb8d10a3..99002220cd45 100644
--- a/include/linux/dirent.h
+++ b/include/linux/dirent.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_DIRENT_H
#define _LINUX_DIRENT_H
@@ -6,7 +7,7 @@ struct linux_dirent64 {
s64 d_off;
unsigned short d_reclen;
unsigned char d_type;
- char d_name[0];
+ char d_name[];
};
#endif
diff --git a/include/linux/dlm.h b/include/linux/dlm.h
index d02da2c6fc1a..7e7b45b0d097 100644
--- a/include/linux/dlm.h
+++ b/include/linux/dlm.h
@@ -1,12 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/******************************************************************************
*******************************************************************************
**
** Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
** Copyright (C) 2004-2011 Red Hat, Inc. All rights reserved.
**
-** This copyrighted material is made available to anyone wishing to use,
-** modify, copy, or redistribute it subject to the terms and conditions
-** of the GNU General Public License v.2.
**
*******************************************************************************
******************************************************************************/
@@ -37,6 +35,9 @@ struct dlm_lockspace_ops {
int num_slots, int our_slot, uint32_t generation);
};
+/* only relevant for kernel lockspaces, will be removed in future */
+#define DLM_LSFL_SOFTIRQ __DLM_LSFL_RESERVED0
+
/*
* dlm_new_lockspace
*
@@ -55,14 +56,13 @@ struct dlm_lockspace_ops {
* The dlm should not use a resource directory, but statically assign
* resource mastery to nodes based on the name hash that is otherwise
* used to select the directory node. Must be the same on all nodes.
- * DLM_LSFL_TIMEWARN
- * The dlm should emit netlink messages if locks have been waiting
- * for a configurable amount of time. (Unused.)
- * DLM_LSFL_FS
- * The lockspace user is in the kernel (i.e. filesystem). Enables
- * direct bast/cast callbacks.
* DLM_LSFL_NEWEXCL
* dlm_new_lockspace() should return -EEXIST if the lockspace exists.
+ * DLM_LSFL_SOFTIRQ
+ * dlm request callbacks (ast, bast) are softirq safe. Flag should be
+ * preferred by users. Will be default in some future. If set the
+ * strongest context for ast, bast callback is softirq as it avoids
+ * an additional context switch.
*
* lvblen: length of lvb in bytes. Must be multiple of 8.
* dlm_new_lockspace() returns an error if this does not match
@@ -88,12 +88,43 @@ int dlm_new_lockspace(const char *name, const char *cluster,
int *ops_result, dlm_lockspace_t **lockspace);
/*
+ * dlm_release_lockspace() release_option values:
+ *
+ * DLM_RELEASE_NO_LOCKS returns -EBUSY if any locks (lkb's)
+ * exist in the local lockspace.
+ *
+ * DLM_RELEASE_UNUSED previous value that is no longer used.
+ *
+ * DLM_RELEASE_NORMAL releases the lockspace regardless of any
+ * locks managed in the local lockspace.
+ *
+ * DLM_RELEASE_NO_EVENT release the lockspace regardless of any
+ * locks managed in the local lockspace, and does not submit
+ * a leave event to the cluster manager, so other nodes will
+ * not be notified that the node should be removed from the
+ * list of lockspace members.
+ *
+ * DLM_RELEASE_RECOVER like DLM_RELEASE_NORMAL, but the remaining
+ * nodes will handle the removal of the node as if the node
+ * had failed, e.g. the recover_slot() callback would be used.
+ */
+#define DLM_RELEASE_NO_LOCKS 0
+#define DLM_RELEASE_UNUSED 1
+#define DLM_RELEASE_NORMAL 2
+#define DLM_RELEASE_NO_EVENT 3
+#define DLM_RELEASE_RECOVER 4
+#define __DLM_RELEASE_MAX DLM_RELEASE_RECOVER
+
+/*
* dlm_release_lockspace
*
* Stop a lockspace.
+ *
+ * release_option: see DLM_RELEASE values above.
*/
-int dlm_release_lockspace(dlm_lockspace_t *lockspace, int force);
+int dlm_release_lockspace(dlm_lockspace_t *lockspace,
+ unsigned int release_option);
/*
* dlm_lock
@@ -129,14 +160,21 @@ int dlm_release_lockspace(dlm_lockspace_t *lockspace, int force);
* call.
*
* AST routines should not block (at least not for long), but may make
- * any locking calls they please.
+ * any locking calls they please. If DLM_LSFL_SOFTIRQ for kernel
+ * users of dlm_new_lockspace() is passed the ast and bast callbacks
+ * can be processed in softirq context. Also some of the callback
+ * contexts are in the same context as the DLM lock request API, users
+ * must not hold locks while calling dlm lock request API and trying
+ * to acquire this lock in the callback again, this will end in a
+ * lock recursion. For newer implementation the DLM_LSFL_SOFTIRQ
+ * should be used.
*/
int dlm_lock(dlm_lockspace_t *lockspace,
int mode,
struct dlm_lksb *lksb,
uint32_t flags,
- void *name,
+ const void *name,
unsigned int namelen,
uint32_t parent_lkid,
void (*lockast) (void *astarg),
diff --git a/include/linux/dlm_plock.h b/include/linux/dlm_plock.h
index 95ad387a7769..15fc856d198c 100644
--- a/include/linux/dlm_plock.h
+++ b/include/linux/dlm_plock.h
@@ -1,9 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2005-2008 Red Hat, Inc. All rights reserved.
- *
- * This copyrighted material is made available to anyone wishing to use,
- * modify, copy, or redistribute it subject to the terms and conditions
- * of the GNU General Public License v.2.
*/
#ifndef __DLM_PLOCK_DOT_H__
#define __DLM_PLOCK_DOT_H__
@@ -14,6 +11,8 @@ int dlm_posix_lock(dlm_lockspace_t *lockspace, u64 number, struct file *file,
int cmd, struct file_lock *fl);
int dlm_posix_unlock(dlm_lockspace_t *lockspace, u64 number, struct file *file,
struct file_lock *fl);
+int dlm_posix_cancel(dlm_lockspace_t *lockspace, u64 number, struct file *file,
+ struct file_lock *fl);
int dlm_posix_get(dlm_lockspace_t *lockspace, u64 number, struct file *file,
struct file_lock *fl);
#endif
diff --git a/include/linux/dm-bufio.h b/include/linux/dm-bufio.h
new file mode 100644
index 000000000000..d1503b815a78
--- /dev/null
+++ b/include/linux/dm-bufio.h
@@ -0,0 +1,171 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2009-2011 Red Hat, Inc.
+ *
+ * Author: Mikulas Patocka <mpatocka@redhat.com>
+ *
+ * This file is released under the GPL.
+ */
+
+#ifndef _LINUX_DM_BUFIO_H
+#define _LINUX_DM_BUFIO_H
+
+#include <linux/blkdev.h>
+#include <linux/types.h>
+
+/*----------------------------------------------------------------*/
+
+struct dm_bufio_client;
+struct dm_buffer;
+
+/*
+ * Flags for dm_bufio_client_create
+ */
+#define DM_BUFIO_CLIENT_NO_SLEEP 0x1
+
+/*
+ * Create a buffered IO cache on a given device
+ */
+struct dm_bufio_client *
+dm_bufio_client_create(struct block_device *bdev, unsigned int block_size,
+ unsigned int reserved_buffers, unsigned int aux_size,
+ void (*alloc_callback)(struct dm_buffer *),
+ void (*write_callback)(struct dm_buffer *),
+ unsigned int flags);
+
+/*
+ * Release a buffered IO cache.
+ */
+void dm_bufio_client_destroy(struct dm_bufio_client *c);
+
+void dm_bufio_client_reset(struct dm_bufio_client *c);
+
+/*
+ * Set the sector range.
+ * When this function is called, there must be no I/O in progress on the bufio
+ * client.
+ */
+void dm_bufio_set_sector_offset(struct dm_bufio_client *c, sector_t start);
+
+/*
+ * WARNING: to avoid deadlocks, these conditions are observed:
+ *
+ * - At most one thread can hold at most "reserved_buffers" simultaneously.
+ * - Each other threads can hold at most one buffer.
+ * - Threads which call only dm_bufio_get can hold unlimited number of
+ * buffers.
+ */
+
+/*
+ * Read a given block from disk. Returns pointer to data. Returns a
+ * pointer to dm_buffer that can be used to release the buffer or to make
+ * it dirty.
+ */
+void *dm_bufio_read(struct dm_bufio_client *c, sector_t block,
+ struct dm_buffer **bp);
+
+void *dm_bufio_read_with_ioprio(struct dm_bufio_client *c, sector_t block,
+ struct dm_buffer **bp, unsigned short ioprio);
+
+/*
+ * Like dm_bufio_read, but return buffer from cache, don't read
+ * it. If the buffer is not in the cache, return NULL.
+ */
+void *dm_bufio_get(struct dm_bufio_client *c, sector_t block,
+ struct dm_buffer **bp);
+
+/*
+ * Like dm_bufio_read, but don't read anything from the disk. It is
+ * expected that the caller initializes the buffer and marks it dirty.
+ */
+void *dm_bufio_new(struct dm_bufio_client *c, sector_t block,
+ struct dm_buffer **bp);
+
+/*
+ * Prefetch the specified blocks to the cache.
+ * The function starts to read the blocks and returns without waiting for
+ * I/O to finish.
+ */
+void dm_bufio_prefetch(struct dm_bufio_client *c,
+ sector_t block, unsigned int n_blocks);
+
+void dm_bufio_prefetch_with_ioprio(struct dm_bufio_client *c,
+ sector_t block, unsigned int n_blocks,
+ unsigned short ioprio);
+
+/*
+ * Release a reference obtained with dm_bufio_{read,get,new}. The data
+ * pointer and dm_buffer pointer is no longer valid after this call.
+ */
+void dm_bufio_release(struct dm_buffer *b);
+
+/*
+ * Mark a buffer dirty. It should be called after the buffer is modified.
+ *
+ * In case of memory pressure, the buffer may be written after
+ * dm_bufio_mark_buffer_dirty, but before dm_bufio_write_dirty_buffers. So
+ * dm_bufio_write_dirty_buffers guarantees that the buffer is on-disk but
+ * the actual writing may occur earlier.
+ */
+void dm_bufio_mark_buffer_dirty(struct dm_buffer *b);
+
+/*
+ * Mark a part of the buffer dirty.
+ *
+ * The specified part of the buffer is scheduled to be written. dm-bufio may
+ * write the specified part of the buffer or it may write a larger superset.
+ */
+void dm_bufio_mark_partial_buffer_dirty(struct dm_buffer *b,
+ unsigned int start, unsigned int end);
+
+/*
+ * Initiate writing of dirty buffers, without waiting for completion.
+ */
+void dm_bufio_write_dirty_buffers_async(struct dm_bufio_client *c);
+
+/*
+ * Write all dirty buffers. Guarantees that all dirty buffers created prior
+ * to this call are on disk when this call exits.
+ */
+int dm_bufio_write_dirty_buffers(struct dm_bufio_client *c);
+
+/*
+ * Send an empty write barrier to the device to flush hardware disk cache.
+ */
+int dm_bufio_issue_flush(struct dm_bufio_client *c);
+
+/*
+ * Send a discard request to the underlying device.
+ */
+int dm_bufio_issue_discard(struct dm_bufio_client *c, sector_t block, sector_t count);
+
+/*
+ * Free the given buffer.
+ * This is just a hint, if the buffer is in use or dirty, this function
+ * does nothing.
+ */
+void dm_bufio_forget(struct dm_bufio_client *c, sector_t block);
+
+/*
+ * Free the given range of buffers.
+ * This is just a hint, if the buffer is in use or dirty, this function
+ * does nothing.
+ */
+void dm_bufio_forget_buffers(struct dm_bufio_client *c, sector_t block, sector_t n_blocks);
+
+/*
+ * Set the minimum number of buffers before cleanup happens.
+ */
+void dm_bufio_set_minimum_buffers(struct dm_bufio_client *c, unsigned int n);
+
+unsigned int dm_bufio_get_block_size(struct dm_bufio_client *c);
+sector_t dm_bufio_get_device_size(struct dm_bufio_client *c);
+struct dm_io_client *dm_bufio_get_dm_io_client(struct dm_bufio_client *c);
+sector_t dm_bufio_get_block_number(struct dm_buffer *b);
+void *dm_bufio_get_block_data(struct dm_buffer *b);
+void *dm_bufio_get_aux_data(struct dm_buffer *b);
+struct dm_bufio_client *dm_bufio_get_client(struct dm_buffer *b);
+
+/*----------------------------------------------------------------*/
+
+#endif
diff --git a/include/linux/dm-dirty-log.h b/include/linux/dm-dirty-log.h
index 7084503c3405..0b10faedb26a 100644
--- a/include/linux/dm-dirty-log.h
+++ b/include/linux/dm-dirty-log.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2003 Sistina Software
* Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
@@ -33,7 +34,7 @@ struct dm_dirty_log_type {
struct list_head list;
int (*ctr)(struct dm_dirty_log *log, struct dm_target *ti,
- unsigned argc, char **argv);
+ unsigned int argc, char **argv);
void (*dtr)(struct dm_dirty_log *log);
/*
@@ -96,7 +97,7 @@ struct dm_dirty_log_type {
* Do not confuse this function with 'in_sync()', one
* tells you if an area is synchronised, the other
* assigns recovery work.
- */
+ */
int (*get_resync_work)(struct dm_dirty_log *log, region_t *region);
/*
@@ -116,7 +117,7 @@ struct dm_dirty_log_type {
* Support function for mirror status requests.
*/
int (*status)(struct dm_dirty_log *log, status_type_t status_type,
- char *result, unsigned maxlen);
+ char *result, unsigned int maxlen);
/*
* is_remote_recovering is necessary for cluster mirroring. It provides
@@ -139,7 +140,7 @@ int dm_dirty_log_type_unregister(struct dm_dirty_log_type *type);
struct dm_dirty_log *dm_dirty_log_create(const char *type_name,
struct dm_target *ti,
int (*flush_callback_fn)(struct dm_target *ti),
- unsigned argc, char **argv);
+ unsigned int argc, char **argv);
void dm_dirty_log_destroy(struct dm_dirty_log *log);
#endif /* __KERNEL__ */
diff --git a/include/linux/dm-io.h b/include/linux/dm-io.h
index a52c6580cc9a..7b2968612b7e 100644
--- a/include/linux/dm-io.h
+++ b/include/linux/dm-io.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2003 Sistina Software
* Copyright (C) 2004 - 2008 Red Hat, Inc. All rights reserved.
@@ -13,6 +14,7 @@
#ifdef __KERNEL__
#include <linux/types.h>
+#include <linux/blk_types.h>
struct dm_io_region {
struct block_device *bdev;
@@ -25,7 +27,7 @@ struct page_list {
struct page *page;
};
-typedef void (*io_notify_fn)(unsigned long error, void *context);
+typedef void (*io_notify_fn)(unsigned int long error, void *context);
enum dm_io_mem_type {
DM_IO_PAGE_LIST,/* Page list */
@@ -37,7 +39,7 @@ enum dm_io_mem_type {
struct dm_io_memory {
enum dm_io_mem_type type;
- unsigned offset;
+ unsigned int offset;
union {
struct page_list *pl;
@@ -57,8 +59,7 @@ struct dm_io_notify {
*/
struct dm_io_client;
struct dm_io_request {
- int bi_op; /* REQ_OP */
- int bi_op_flags; /* req_flag_bits */
+ blk_opf_t bi_opf; /* Request type and flags */
struct dm_io_memory mem; /* Memory to use for io */
struct dm_io_notify notify; /* Synchronous if notify.fn is NULL */
struct dm_io_client *client; /* Client memory handler */
@@ -78,8 +79,9 @@ void dm_io_client_destroy(struct dm_io_client *client);
* Each bit in the optional 'sync_error_bits' bitset indicates whether an
* error occurred doing io to the corresponding region.
*/
-int dm_io(struct dm_io_request *io_req, unsigned num_regions,
- struct dm_io_region *region, unsigned long *sync_error_bits);
+int dm_io(struct dm_io_request *io_req, unsigned int num_regions,
+ struct dm_io_region *region, unsigned int long *sync_error_bits,
+ unsigned short ioprio);
#endif /* __KERNEL__ */
#endif /* _LINUX_DM_IO_H */
diff --git a/include/linux/dm-kcopyd.h b/include/linux/dm-kcopyd.h
index cfac8588ed56..51fb1af0b63e 100644
--- a/include/linux/dm-kcopyd.h
+++ b/include/linux/dm-kcopyd.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2001 - 2003 Sistina Software
* Copyright (C) 2004 - 2008 Red Hat, Inc. All rights reserved.
@@ -23,11 +24,11 @@
#define DM_KCOPYD_WRITE_SEQ 2
struct dm_kcopyd_throttle {
- unsigned throttle;
- unsigned num_io_jobs;
- unsigned io_period;
- unsigned total_period;
- unsigned last_jiffies;
+ unsigned int throttle;
+ unsigned int num_io_jobs;
+ unsigned int io_period;
+ unsigned int total_period;
+ unsigned int last_jiffies;
};
/*
@@ -51,6 +52,7 @@ MODULE_PARM_DESC(name, description)
struct dm_kcopyd_client;
struct dm_kcopyd_client *dm_kcopyd_client_create(struct dm_kcopyd_throttle *throttle);
void dm_kcopyd_client_destroy(struct dm_kcopyd_client *kc);
+void dm_kcopyd_client_flush(struct dm_kcopyd_client *kc);
/*
* Submit a copy job to kcopyd. This is built on top of the
@@ -59,12 +61,12 @@ void dm_kcopyd_client_destroy(struct dm_kcopyd_client *kc);
* read_err is a boolean,
* write_err is a bitset, with 1 bit for each destination region
*/
-typedef void (*dm_kcopyd_notify_fn)(int read_err, unsigned long write_err,
+typedef void (*dm_kcopyd_notify_fn)(int read_err, unsigned int long write_err,
void *context);
-int dm_kcopyd_copy(struct dm_kcopyd_client *kc, struct dm_io_region *from,
- unsigned num_dests, struct dm_io_region *dests,
- unsigned flags, dm_kcopyd_notify_fn fn, void *context);
+void dm_kcopyd_copy(struct dm_kcopyd_client *kc, struct dm_io_region *from,
+ unsigned int num_dests, struct dm_io_region *dests,
+ unsigned int flags, dm_kcopyd_notify_fn fn, void *context);
/*
* Prepare a callback and submit it via the kcopyd thread.
@@ -79,11 +81,11 @@ int dm_kcopyd_copy(struct dm_kcopyd_client *kc, struct dm_io_region *from,
*/
void *dm_kcopyd_prepare_callback(struct dm_kcopyd_client *kc,
dm_kcopyd_notify_fn fn, void *context);
-void dm_kcopyd_do_callback(void *job, int read_err, unsigned long write_err);
+void dm_kcopyd_do_callback(void *job, int read_err, unsigned int long write_err);
-int dm_kcopyd_zero(struct dm_kcopyd_client *kc,
- unsigned num_dests, struct dm_io_region *dests,
- unsigned flags, dm_kcopyd_notify_fn fn, void *context);
+void dm_kcopyd_zero(struct dm_kcopyd_client *kc,
+ unsigned int num_dests, struct dm_io_region *dests,
+ unsigned int flags, dm_kcopyd_notify_fn fn, void *context);
#endif /* __KERNEL__ */
#endif /* _LINUX_DM_KCOPYD_H */
diff --git a/include/linux/dm-region-hash.h b/include/linux/dm-region-hash.h
index 9e2a7a401df5..3079ed93dd2d 100644
--- a/include/linux/dm-region-hash.h
+++ b/include/linux/dm-region-hash.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2003 Sistina Software Limited.
* Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
@@ -12,9 +13,11 @@
#include <linux/dm-dirty-log.h>
-/*-----------------------------------------------------------------
+/*
+ *----------------------------------------------------------------
* Region hash
- *----------------------------------------------------------------*/
+ *----------------------------------------------------------------
+ */
struct dm_region_hash;
struct dm_region;
@@ -37,7 +40,7 @@ struct dm_region_hash *dm_region_hash_create(
struct bio_list *bios),
void (*wakeup_workers)(void *context),
void (*wakeup_all_recovery_waiters)(void *context),
- sector_t target_begin, unsigned max_recovery,
+ sector_t target_begin, unsigned int max_recovery,
struct dm_dirty_log *log, uint32_t region_size,
region_t nr_regions);
void dm_region_hash_destroy(struct dm_region_hash *rh);
diff --git a/include/linux/dm-verity-loadpin.h b/include/linux/dm-verity-loadpin.h
new file mode 100644
index 000000000000..3ac6dbaeaa37
--- /dev/null
+++ b/include/linux/dm-verity-loadpin.h
@@ -0,0 +1,27 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __LINUX_DM_VERITY_LOADPIN_H
+#define __LINUX_DM_VERITY_LOADPIN_H
+
+#include <linux/list.h>
+
+struct block_device;
+
+extern struct list_head dm_verity_loadpin_trusted_root_digests;
+
+struct dm_verity_loadpin_trusted_root_digest {
+ struct list_head node;
+ unsigned int len;
+ u8 data[] __counted_by(len);
+};
+
+#if IS_ENABLED(CONFIG_SECURITY_LOADPIN_VERITY)
+bool dm_verity_loadpin_is_bdev_trusted(struct block_device *bdev);
+#else
+static inline bool dm_verity_loadpin_is_bdev_trusted(struct block_device *bdev)
+{
+ return false;
+}
+#endif
+
+#endif /* __LINUX_DM_VERITY_LOADPIN_H */
diff --git a/include/linux/dm9000.h b/include/linux/dm9000.h
index 841925fbfe8a..df0341dbb451 100644
--- a/include/linux/dm9000.h
+++ b/include/linux/dm9000.h
@@ -1,14 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/* include/linux/dm9000.h
*
* Copyright (c) 2004 Simtec Electronics
* Ben Dooks <ben@simtec.co.uk>
*
* Header file for dm9000 platform data
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
*/
#ifndef __DM9000_PLATFORM_DATA
diff --git a/include/linux/dma-buf-mapping.h b/include/linux/dma-buf-mapping.h
new file mode 100644
index 000000000000..a3c0ce2d3a42
--- /dev/null
+++ b/include/linux/dma-buf-mapping.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * DMA BUF Mapping Helpers
+ *
+ */
+#ifndef __DMA_BUF_MAPPING_H__
+#define __DMA_BUF_MAPPING_H__
+#include <linux/dma-buf.h>
+
+struct sg_table *dma_buf_phys_vec_to_sgt(struct dma_buf_attachment *attach,
+ struct p2pdma_provider *provider,
+ struct dma_buf_phys_vec *phys_vec,
+ size_t nr_ranges, size_t size,
+ enum dma_data_direction dir);
+void dma_buf_free_sgt(struct dma_buf_attachment *attach, struct sg_table *sgt,
+ enum dma_data_direction dir);
+#endif
diff --git a/include/linux/dma-buf.h b/include/linux/dma-buf.h
index 79f27d60ec66..0bc492090237 100644
--- a/include/linux/dma-buf.h
+++ b/include/linux/dma-buf.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Header file for dma buffer sharing framework.
*
@@ -8,22 +9,11 @@
* Arnd Bergmann <arnd@arndb.de>, Rob Clark <rob@ti.com> and
* Daniel Vetter <daniel@ffwll.ch> for their support in creation and
* refining of this idea.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef __DMA_BUF_H__
#define __DMA_BUF_H__
+#include <linux/iosys-map.h>
#include <linux/file.h>
#include <linux/err.h>
#include <linux/scatterlist.h>
@@ -32,6 +22,7 @@
#include <linux/fs.h>
#include <linux/dma-fence.h>
#include <linux/wait.h>
+#include <linux/pci-p2pdma.h>
struct device;
struct dma_buf;
@@ -39,13 +30,6 @@ struct dma_buf_attachment;
/**
* struct dma_buf_ops - operations possible on struct dma_buf
- * @map_atomic: maps a page from the buffer into kernel address
- * space, users may not block until the subsequent unmap call.
- * This callback must not sleep.
- * @unmap_atomic: [optional] unmaps a atomically mapped page from the buffer.
- * This Callback must not sleep.
- * @map: maps a page from the buffer into kernel address space.
- * @unmap: [optional] unmaps a page from the buffer.
* @vmap: [optional] creates a virtual mapping for the buffer into kernel
* address space. Same restrictions as for vmap and friends apply.
* @vunmap: [optional] unmaps a vmap from the buffer
@@ -55,14 +39,14 @@ struct dma_buf_ops {
* @attach:
*
* This is called from dma_buf_attach() to make sure that a given
- * &device can access the provided &dma_buf. Exporters which support
- * buffer objects in special locations like VRAM or device-specific
- * carveout areas should check whether the buffer could be move to
- * system memory (or directly accessed by the provided device), and
- * otherwise need to fail the attach operation.
+ * &dma_buf_attachment.dev can access the provided &dma_buf. Exporters
+ * which support buffer objects in special locations like VRAM or
+ * device-specific carveout areas should check whether the buffer could
+ * be move to system memory (or directly accessed by the provided
+ * device), and otherwise need to fail the attach operation.
*
* The exporter should also in general check whether the current
- * allocation fullfills the DMA constraints of the new device. If this
+ * allocation fulfills the DMA constraints of the new device. If this
* is not the case, and the allocation cannot be moved, it should also
* fail the attach operation.
*
@@ -77,8 +61,7 @@ struct dma_buf_ops {
* to signal that backing storage is already allocated and incompatible
* with the requirements of requesting device.
*/
- int (*attach)(struct dma_buf *, struct device *,
- struct dma_buf_attachment *);
+ int (*attach)(struct dma_buf *, struct dma_buf_attachment *);
/**
* @detach:
@@ -92,13 +75,50 @@ struct dma_buf_ops {
void (*detach)(struct dma_buf *, struct dma_buf_attachment *);
/**
+ * @pin:
+ *
+ * This is called by dma_buf_pin() and lets the exporter know that the
+ * DMA-buf can't be moved any more. Ideally, the exporter should
+ * pin the buffer so that it is generally accessible by all
+ * devices.
+ *
+ * This is called with the &dmabuf.resv object locked and is mutual
+ * exclusive with @cache_sgt_mapping.
+ *
+ * This is called automatically for non-dynamic importers from
+ * dma_buf_attach().
+ *
+ * Note that similar to non-dynamic exporters in their @map_dma_buf
+ * callback the driver must guarantee that the memory is available for
+ * use and cleared of any old data by the time this function returns.
+ * Drivers which pipeline their buffer moves internally must wait for
+ * all moves and clears to complete.
+ *
+ * Returns:
+ *
+ * 0 on success, negative error code on failure.
+ */
+ int (*pin)(struct dma_buf_attachment *attach);
+
+ /**
+ * @unpin:
+ *
+ * This is called by dma_buf_unpin() and lets the exporter know that the
+ * DMA-buf can be moved again.
+ *
+ * This is called with the dmabuf->resv object locked and is mutual
+ * exclusive with @cache_sgt_mapping.
+ *
+ * This callback is optional.
+ */
+ void (*unpin)(struct dma_buf_attachment *attach);
+
+ /**
* @map_dma_buf:
*
* This is called by dma_buf_map_attachment() and is used to map a
* shared &dma_buf into device address space, and it is mandatory. It
- * can only be called if @attach has been called successfully. This
- * essentially pins the DMA buffer into place, and it cannot be moved
- * any more
+ * can only be called if @attach has been called successfully.
*
* This call may sleep, e.g. when the backing storage first needs to be
* allocated, or moved to a location suitable for all currently attached
@@ -119,15 +139,34 @@ struct dma_buf_ops {
* any other kind of sharing that the exporter might wish to make
* available to buffer-users.
*
+ * This is always called with the dmabuf->resv object locked when
+ * the dynamic_mapping flag is true.
+ *
+ * Note that for non-dynamic exporters the driver must guarantee that
+ * that the memory is available for use and cleared of any old data by
+ * the time this function returns. Drivers which pipeline their buffer
+ * moves internally must wait for all moves and clears to complete.
+ * Dynamic exporters do not need to follow this rule: For non-dynamic
+ * importers the buffer is already pinned through @pin, which has the
+ * same requirements. Dynamic importers otoh are required to obey the
+ * dma_resv fences.
+ *
* Returns:
*
- * A &sg_table scatter list of or the backing storage of the DMA buffer,
+ * A &sg_table scatter list of the backing storage of the DMA buffer,
* already mapped into the device address space of the &device attached
- * with the provided &dma_buf_attachment.
+ * with the provided &dma_buf_attachment. The addresses and lengths in
+ * the scatter list are PAGE_SIZE aligned.
*
* On failure, returns a negative error value wrapped into a pointer.
* May also return -EINTR when a signal was received while being
* blocked.
+ *
+ * Note that exporters should not try to cache the scatter list, or
+ * return the same one for multiple calls. Caching is done either by the
+ * DMA-BUF code (for non-dynamic importers) or the importer. Ownership
+ * of the scatter list is transferred to the caller, and returned by
+ * @unmap_dma_buf.
*/
struct sg_table * (*map_dma_buf)(struct dma_buf_attachment *,
enum dma_data_direction);
@@ -136,9 +175,8 @@ struct dma_buf_ops {
*
* This is called by dma_buf_unmap_attachment() and should unmap and
* release the &sg_table allocated in @map_dma_buf, and it is mandatory.
- * It should also unpin the backing storage if this is the last mapping
- * of the DMA buffer, it the exporter supports backing storage
- * migration.
+ * For static dma_buf handling this might also unpin the backing
+ * storage if this is the last mapping of the DMA buffer.
*/
void (*unmap_dma_buf)(struct dma_buf_attachment *,
struct sg_table *,
@@ -160,24 +198,19 @@ struct dma_buf_ops {
* @begin_cpu_access:
*
* This is called from dma_buf_begin_cpu_access() and allows the
- * exporter to ensure that the memory is actually available for cpu
- * access - the exporter might need to allocate or swap-in and pin the
- * backing storage. The exporter also needs to ensure that cpu access is
- * coherent for the access direction. The direction can be used by the
- * exporter to optimize the cache flushing, i.e. access with a different
+ * exporter to ensure that the memory is actually coherent for cpu
+ * access. The exporter also needs to ensure that cpu access is coherent
+ * for the access direction. The direction can be used by the exporter
+ * to optimize the cache flushing, i.e. access with a different
* direction (read instead of write) might return stale or even bogus
* data (e.g. when the exporter needs to copy the data to temporary
* storage).
*
- * This callback is optional.
+ * Note that this is both called through the DMA_BUF_IOCTL_SYNC IOCTL
+ * command for userspace mappings established through @mmap, and also
+ * for kernel mappings established with @vmap.
*
- * FIXME: This is both called through the DMA_BUF_IOCTL_SYNC command
- * from userspace (where storage shouldn't be pinned to avoid handing
- * de-factor mlock rights to userspace) and for the kernel-internal
- * users of the various kmap interfaces, where the backing storage must
- * be pinned to guarantee that the atomic kmap calls can succeed. Since
- * there's no in-kernel users of the kmap interfaces yet this isn't a
- * real problem.
+ * This callback is optional.
*
* Returns:
*
@@ -193,9 +226,7 @@ struct dma_buf_ops {
*
* This is called from dma_buf_end_cpu_access() when the importer is
* done accessing the CPU. The exporter can use this to flush caches and
- * unpin any resources pinned in @begin_cpu_access.
- * The result of any dma_buf kmap calls after end_cpu_access is
- * undefined.
+ * undo anything else done in @begin_cpu_access.
*
* This callback is optional.
*
@@ -206,10 +237,6 @@ struct dma_buf_ops {
* to be restarted.
*/
int (*end_cpu_access)(struct dma_buf *, enum dma_data_direction);
- void *(*map_atomic)(struct dma_buf *, unsigned long);
- void (*unmap_atomic)(struct dma_buf *, unsigned long, void *);
- void *(*map)(struct dma_buf *, unsigned long);
- void (*unmap)(struct dma_buf *, unsigned long, void *);
/**
* @mmap:
@@ -217,7 +244,7 @@ struct dma_buf_ops {
* This callback is used by the dma_buf_mmap() function
*
* Note that the mapping needs to be incoherent, userspace is expected
- * to braket CPU access using the DMA_BUF_IOCTL_SYNC interface.
+ * to bracket CPU access using the DMA_BUF_IOCTL_SYNC interface.
*
* Because dma-buf buffers have invariant size over their lifetime, the
* dma-buf core checks whether a vma is too large and rejects such
@@ -248,28 +275,12 @@ struct dma_buf_ops {
*/
int (*mmap)(struct dma_buf *, struct vm_area_struct *vma);
- void *(*vmap)(struct dma_buf *);
- void (*vunmap)(struct dma_buf *, void *vaddr);
+ int (*vmap)(struct dma_buf *dmabuf, struct iosys_map *map);
+ void (*vunmap)(struct dma_buf *dmabuf, struct iosys_map *map);
};
/**
* struct dma_buf - shared buffer object
- * @size: size of the buffer
- * @file: file pointer used for sharing buffers across, and for refcounting.
- * @attachments: list of dma_buf_attachment that denotes all devices attached.
- * @ops: dma_buf_ops associated with this buffer object.
- * @lock: used internally to serialize list manipulation, attach/detach and vmap/unmap
- * @vmapping_counter: used internally to refcnt the vmaps
- * @vmap_ptr: the current vmap ptr if vmapping_counter > 0
- * @exp_name: name of the exporter; useful for debugging.
- * @owner: pointer to exporter module; used for refcounting when exporter is a
- * kernel module.
- * @list_node: node for dma_buf accounting and debugging.
- * @priv: exporter specific private data for this buffer object.
- * @resv: reservation object linked to this dma-buf
- * @poll: for userspace poll support
- * @cb_excl: for userspace poll support
- * @cb_shared: for userspace poll support
*
* This represents a shared buffer, created by calling dma_buf_export(). The
* userspace representation is a normal file descriptor, which can be created by
@@ -281,36 +292,202 @@ struct dma_buf_ops {
* Device DMA access is handled by the separate &struct dma_buf_attachment.
*/
struct dma_buf {
+ /**
+ * @size:
+ *
+ * Size of the buffer; invariant over the lifetime of the buffer.
+ */
size_t size;
+
+ /**
+ * @file:
+ *
+ * File pointer used for sharing buffers across, and for refcounting.
+ * See dma_buf_get() and dma_buf_put().
+ */
struct file *file;
+
+ /**
+ * @attachments:
+ *
+ * List of dma_buf_attachment that denotes all devices attached,
+ * protected by &dma_resv lock @resv.
+ */
struct list_head attachments;
+
+ /** @ops: dma_buf_ops associated with this buffer object. */
const struct dma_buf_ops *ops;
- struct mutex lock;
+
+ /**
+ * @vmapping_counter:
+ *
+ * Used internally to refcnt the vmaps returned by dma_buf_vmap().
+ * Protected by @lock.
+ */
unsigned vmapping_counter;
- void *vmap_ptr;
+
+ /**
+ * @vmap_ptr:
+ * The current vmap ptr if @vmapping_counter > 0. Protected by @lock.
+ */
+ struct iosys_map vmap_ptr;
+
+ /**
+ * @exp_name:
+ *
+ * Name of the exporter; useful for debugging. Must not be NULL
+ */
const char *exp_name;
+
+ /**
+ * @name:
+ *
+ * Userspace-provided name. Default value is NULL. If not NULL,
+ * length cannot be longer than DMA_BUF_NAME_LEN, including NIL
+ * char. Useful for accounting and debugging. Read/Write accesses
+ * are protected by @name_lock
+ *
+ * See the IOCTLs DMA_BUF_SET_NAME or DMA_BUF_SET_NAME_A/B
+ */
+ const char *name;
+
+ /** @name_lock: Spinlock to protect name access for read access. */
+ spinlock_t name_lock;
+
+ /**
+ * @owner:
+ *
+ * Pointer to exporter module; used for refcounting when exporter is a
+ * kernel module.
+ */
struct module *owner;
+
+ /** @list_node: node for dma_buf accounting and debugging. */
struct list_head list_node;
+
+ /** @priv: exporter specific private data for this buffer object. */
void *priv;
- struct reservation_object *resv;
- /* poll support */
+ /**
+ * @resv:
+ *
+ * Reservation object linked to this dma-buf.
+ *
+ * IMPLICIT SYNCHRONIZATION RULES:
+ *
+ * Drivers which support implicit synchronization of buffer access as
+ * e.g. exposed in `Implicit Fence Poll Support`_ must follow the
+ * below rules.
+ *
+ * - Drivers must add a read fence through dma_resv_add_fence() with the
+ * DMA_RESV_USAGE_READ flag for anything the userspace API considers a
+ * read access. This highly depends upon the API and window system.
+ *
+ * - Similarly drivers must add a write fence through
+ * dma_resv_add_fence() with the DMA_RESV_USAGE_WRITE flag for
+ * anything the userspace API considers write access.
+ *
+ * - Drivers may just always add a write fence, since that only
+ * causes unnecessary synchronization, but no correctness issues.
+ *
+ * - Some drivers only expose a synchronous userspace API with no
+ * pipelining across drivers. These do not set any fences for their
+ * access. An example here is v4l.
+ *
+ * - Driver should use dma_resv_usage_rw() when retrieving fences as
+ * dependency for implicit synchronization.
+ *
+ * DYNAMIC IMPORTER RULES:
+ *
+ * Dynamic importers, see dma_buf_attachment_is_dynamic(), have
+ * additional constraints on how they set up fences:
+ *
+ * - Dynamic importers must obey the write fences and wait for them to
+ * signal before allowing access to the buffer's underlying storage
+ * through the device.
+ *
+ * - Dynamic importers should set fences for any access that they can't
+ * disable immediately from their &dma_buf_attach_ops.move_notify
+ * callback.
+ *
+ * IMPORTANT:
+ *
+ * All drivers and memory management related functions must obey the
+ * struct dma_resv rules, specifically the rules for updating and
+ * obeying fences. See enum dma_resv_usage for further descriptions.
+ */
+ struct dma_resv *resv;
+
+ /** @poll: for userspace poll support */
wait_queue_head_t poll;
+ /** @cb_in: for userspace poll support */
+ /** @cb_out: for userspace poll support */
struct dma_buf_poll_cb_t {
struct dma_fence_cb cb;
wait_queue_head_t *poll;
- unsigned long active;
- } cb_excl, cb_shared;
+ __poll_t active;
+ } cb_in, cb_out;
+#ifdef CONFIG_DMABUF_SYSFS_STATS
+ /**
+ * @sysfs_entry:
+ *
+ * For exposing information about this buffer in sysfs. See also
+ * `DMA-BUF statistics`_ for the uapi this enables.
+ */
+ struct dma_buf_sysfs_entry {
+ struct kobject kobj;
+ struct dma_buf *dmabuf;
+ } *sysfs_entry;
+#endif
+};
+
+/**
+ * struct dma_buf_attach_ops - importer operations for an attachment
+ *
+ * Attachment operations implemented by the importer.
+ */
+struct dma_buf_attach_ops {
+ /**
+ * @allow_peer2peer:
+ *
+ * If this is set to true the importer must be able to handle peer
+ * resources without struct pages.
+ */
+ bool allow_peer2peer;
+
+ /**
+ * @move_notify: [optional] notification that the DMA-buf is moving
+ *
+ * If this callback is provided the framework can avoid pinning the
+ * backing store while mappings exists.
+ *
+ * This callback is called with the lock of the reservation object
+ * associated with the dma_buf held and the mapping function must be
+ * called with this lock held as well. This makes sure that no mapping
+ * is created concurrently with an ongoing move operation.
+ *
+ * Mappings stay valid and are not directly affected by this callback.
+ * But the DMA-buf can now be in a different physical location, so all
+ * mappings should be destroyed and re-created as soon as possible.
+ *
+ * New mappings can be created after this callback returns, and will
+ * point to the new location of the DMA-buf.
+ */
+ void (*move_notify)(struct dma_buf_attachment *attach);
};
/**
* struct dma_buf_attachment - holds device-buffer attachment data
* @dmabuf: buffer for this attachment.
* @dev: device attached to the buffer.
- * @node: list of dma_buf_attachment.
+ * @node: list of dma_buf_attachment, protected by dma_resv lock of the dmabuf.
+ * @peer2peer: true if the importer can handle peer resources without pages.
* @priv: exporter specific attachment data.
+ * @importer_ops: importer operations for this attachment, if provided
+ * dma_buf_map/unmap_attachment() must be called with the dma_resv lock held.
+ * @importer_priv: importer specific attachment data.
*
* This structure holds the attachment information between the dma_buf buffer
* and its user device(s). The list contains one attachment struct per device
@@ -325,6 +502,9 @@ struct dma_buf_attachment {
struct dma_buf *dmabuf;
struct device *dev;
struct list_head node;
+ bool peer2peer;
+ const struct dma_buf_attach_ops *importer_ops;
+ void *importer_priv;
void *priv;
};
@@ -333,7 +513,7 @@ struct dma_buf_attachment {
* @exp_name: name of the exporter - useful for debugging.
* @owner: pointer to exporter module - used for refcounting kernel module
* @ops: Attach allocator-defined dma buf ops to the new buffer
- * @size: Size of the buffer
+ * @size: Size of the buffer - invariant over the lifetime of the buffer
* @flags: mode flags for the file
* @resv: reservation-object, NULL to allocate default one
* @priv: Attach private data of allocator to this buffer
@@ -347,11 +527,21 @@ struct dma_buf_export_info {
const struct dma_buf_ops *ops;
size_t size;
int flags;
- struct reservation_object *resv;
+ struct dma_resv *resv;
void *priv;
};
/**
+ * struct dma_buf_phys_vec - describe continuous chunk of memory
+ * @paddr: physical address of that chunk
+ * @len: Length of this chunk
+ */
+struct dma_buf_phys_vec {
+ phys_addr_t paddr;
+ size_t len;
+};
+
+/**
* DEFINE_DMA_BUF_EXPORT_INFO - helper macro for exporters
* @name: export-info name
*
@@ -376,10 +566,29 @@ static inline void get_dma_buf(struct dma_buf *dmabuf)
get_file(dmabuf->file);
}
+/**
+ * dma_buf_is_dynamic - check if a DMA-buf uses dynamic mappings.
+ * @dmabuf: the DMA-buf to check
+ *
+ * Returns true if a DMA-buf exporter wants to be called with the dma_resv
+ * locked for the map/unmap callbacks, false if it doesn't wants to be called
+ * with the lock held.
+ */
+static inline bool dma_buf_is_dynamic(struct dma_buf *dmabuf)
+{
+ return !!dmabuf->ops->pin;
+}
+
struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf,
- struct device *dev);
+ struct device *dev);
+struct dma_buf_attachment *
+dma_buf_dynamic_attach(struct dma_buf *dmabuf, struct device *dev,
+ const struct dma_buf_attach_ops *importer_ops,
+ void *importer_priv);
void dma_buf_detach(struct dma_buf *dmabuf,
- struct dma_buf_attachment *dmabuf_attach);
+ struct dma_buf_attachment *attach);
+int dma_buf_pin(struct dma_buf_attachment *attach);
+void dma_buf_unpin(struct dma_buf_attachment *attach);
struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info);
@@ -391,17 +600,24 @@ struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *,
enum dma_data_direction);
void dma_buf_unmap_attachment(struct dma_buf_attachment *, struct sg_table *,
enum dma_data_direction);
+void dma_buf_move_notify(struct dma_buf *dma_buf);
int dma_buf_begin_cpu_access(struct dma_buf *dma_buf,
enum dma_data_direction dir);
int dma_buf_end_cpu_access(struct dma_buf *dma_buf,
enum dma_data_direction dir);
-void *dma_buf_kmap_atomic(struct dma_buf *, unsigned long);
-void dma_buf_kunmap_atomic(struct dma_buf *, unsigned long, void *);
-void *dma_buf_kmap(struct dma_buf *, unsigned long);
-void dma_buf_kunmap(struct dma_buf *, unsigned long, void *);
+struct sg_table *
+dma_buf_map_attachment_unlocked(struct dma_buf_attachment *attach,
+ enum dma_data_direction direction);
+void dma_buf_unmap_attachment_unlocked(struct dma_buf_attachment *attach,
+ struct sg_table *sg_table,
+ enum dma_data_direction direction);
int dma_buf_mmap(struct dma_buf *, struct vm_area_struct *,
unsigned long);
-void *dma_buf_vmap(struct dma_buf *);
-void dma_buf_vunmap(struct dma_buf *, void *vaddr);
+int dma_buf_vmap(struct dma_buf *dmabuf, struct iosys_map *map);
+void dma_buf_vunmap(struct dma_buf *dmabuf, struct iosys_map *map);
+int dma_buf_vmap_unlocked(struct dma_buf *dmabuf, struct iosys_map *map);
+void dma_buf_vunmap_unlocked(struct dma_buf *dmabuf, struct iosys_map *map);
+struct dma_buf *dma_buf_iter_begin(void);
+struct dma_buf *dma_buf_iter_next(struct dma_buf *dmbuf);
#endif /* __DMA_BUF_H__ */
diff --git a/include/linux/dma-buf/heaps/cma.h b/include/linux/dma-buf/heaps/cma.h
new file mode 100644
index 000000000000..e751479e21e7
--- /dev/null
+++ b/include/linux/dma-buf/heaps/cma.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef DMA_BUF_HEAP_CMA_H_
+#define DMA_BUF_HEAP_CMA_H_
+
+struct cma;
+
+#ifdef CONFIG_DMABUF_HEAPS_CMA
+int dma_heap_cma_register_heap(struct cma *cma);
+#else
+static inline int dma_heap_cma_register_heap(struct cma *cma)
+{
+ return 0;
+}
+#endif // CONFIG_DMABUF_HEAPS_CMA
+
+#endif // DMA_BUF_HEAP_CMA_H_
diff --git a/include/linux/dma-contiguous.h b/include/linux/dma-contiguous.h
deleted file mode 100644
index b67bf6ac907d..000000000000
--- a/include/linux/dma-contiguous.h
+++ /dev/null
@@ -1,164 +0,0 @@
-#ifndef __LINUX_CMA_H
-#define __LINUX_CMA_H
-
-/*
- * Contiguous Memory Allocator for DMA mapping framework
- * Copyright (c) 2010-2011 by Samsung Electronics.
- * Written by:
- * Marek Szyprowski <m.szyprowski@samsung.com>
- * Michal Nazarewicz <mina86@mina86.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation; either version 2 of the
- * License or (at your optional) any later version of the license.
- */
-
-/*
- * Contiguous Memory Allocator
- *
- * The Contiguous Memory Allocator (CMA) makes it possible to
- * allocate big contiguous chunks of memory after the system has
- * booted.
- *
- * Why is it needed?
- *
- * Various devices on embedded systems have no scatter-getter and/or
- * IO map support and require contiguous blocks of memory to
- * operate. They include devices such as cameras, hardware video
- * coders, etc.
- *
- * Such devices often require big memory buffers (a full HD frame
- * is, for instance, more then 2 mega pixels large, i.e. more than 6
- * MB of memory), which makes mechanisms such as kmalloc() or
- * alloc_page() ineffective.
- *
- * At the same time, a solution where a big memory region is
- * reserved for a device is suboptimal since often more memory is
- * reserved then strictly required and, moreover, the memory is
- * inaccessible to page system even if device drivers don't use it.
- *
- * CMA tries to solve this issue by operating on memory regions
- * where only movable pages can be allocated from. This way, kernel
- * can use the memory for pagecache and when device driver requests
- * it, allocated pages can be migrated.
- *
- * Driver usage
- *
- * CMA should not be used by the device drivers directly. It is
- * only a helper framework for dma-mapping subsystem.
- *
- * For more information, see kernel-docs in drivers/base/dma-contiguous.c
- */
-
-#ifdef __KERNEL__
-
-#include <linux/device.h>
-
-struct cma;
-struct page;
-
-#ifdef CONFIG_DMA_CMA
-
-extern struct cma *dma_contiguous_default_area;
-
-static inline struct cma *dev_get_cma_area(struct device *dev)
-{
- if (dev && dev->cma_area)
- return dev->cma_area;
- return dma_contiguous_default_area;
-}
-
-static inline void dev_set_cma_area(struct device *dev, struct cma *cma)
-{
- if (dev)
- dev->cma_area = cma;
-}
-
-static inline void dma_contiguous_set_default(struct cma *cma)
-{
- dma_contiguous_default_area = cma;
-}
-
-void dma_contiguous_reserve(phys_addr_t addr_limit);
-
-int __init dma_contiguous_reserve_area(phys_addr_t size, phys_addr_t base,
- phys_addr_t limit, struct cma **res_cma,
- bool fixed);
-
-/**
- * dma_declare_contiguous() - reserve area for contiguous memory handling
- * for particular device
- * @dev: Pointer to device structure.
- * @size: Size of the reserved memory.
- * @base: Start address of the reserved memory (optional, 0 for any).
- * @limit: End address of the reserved memory (optional, 0 for any).
- *
- * This function reserves memory for specified device. It should be
- * called by board specific code when early allocator (memblock or bootmem)
- * is still activate.
- */
-
-static inline int dma_declare_contiguous(struct device *dev, phys_addr_t size,
- phys_addr_t base, phys_addr_t limit)
-{
- struct cma *cma;
- int ret;
- ret = dma_contiguous_reserve_area(size, base, limit, &cma, true);
- if (ret == 0)
- dev_set_cma_area(dev, cma);
-
- return ret;
-}
-
-struct page *dma_alloc_from_contiguous(struct device *dev, size_t count,
- unsigned int order, gfp_t gfp_mask);
-bool dma_release_from_contiguous(struct device *dev, struct page *pages,
- int count);
-
-#else
-
-static inline struct cma *dev_get_cma_area(struct device *dev)
-{
- return NULL;
-}
-
-static inline void dev_set_cma_area(struct device *dev, struct cma *cma) { }
-
-static inline void dma_contiguous_set_default(struct cma *cma) { }
-
-static inline void dma_contiguous_reserve(phys_addr_t limit) { }
-
-static inline int dma_contiguous_reserve_area(phys_addr_t size, phys_addr_t base,
- phys_addr_t limit, struct cma **res_cma,
- bool fixed)
-{
- return -ENOSYS;
-}
-
-static inline
-int dma_declare_contiguous(struct device *dev, phys_addr_t size,
- phys_addr_t base, phys_addr_t limit)
-{
- return -ENOSYS;
-}
-
-static inline
-struct page *dma_alloc_from_contiguous(struct device *dev, size_t count,
- unsigned int order, gfp_t gfp_mask)
-{
- return NULL;
-}
-
-static inline
-bool dma_release_from_contiguous(struct device *dev, struct page *pages,
- int count)
-{
- return false;
-}
-
-#endif
-
-#endif
-
-#endif
diff --git a/include/linux/dma-debug.h b/include/linux/dma-debug.h
deleted file mode 100644
index c7d844f09c3a..000000000000
--- a/include/linux/dma-debug.h
+++ /dev/null
@@ -1,213 +0,0 @@
-/*
- * Copyright (C) 2008 Advanced Micro Devices, Inc.
- *
- * Author: Joerg Roedel <joerg.roedel@amd.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published
- * by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
-
-#ifndef __DMA_DEBUG_H
-#define __DMA_DEBUG_H
-
-#include <linux/types.h>
-
-struct device;
-struct scatterlist;
-struct bus_type;
-
-#ifdef CONFIG_DMA_API_DEBUG
-
-extern void dma_debug_add_bus(struct bus_type *bus);
-
-extern void dma_debug_init(u32 num_entries);
-
-extern int dma_debug_resize_entries(u32 num_entries);
-
-extern void debug_dma_map_page(struct device *dev, struct page *page,
- size_t offset, size_t size,
- int direction, dma_addr_t dma_addr,
- bool map_single);
-
-extern void debug_dma_mapping_error(struct device *dev, dma_addr_t dma_addr);
-
-extern void debug_dma_unmap_page(struct device *dev, dma_addr_t addr,
- size_t size, int direction, bool map_single);
-
-extern void debug_dma_map_sg(struct device *dev, struct scatterlist *sg,
- int nents, int mapped_ents, int direction);
-
-extern void debug_dma_unmap_sg(struct device *dev, struct scatterlist *sglist,
- int nelems, int dir);
-
-extern void debug_dma_alloc_coherent(struct device *dev, size_t size,
- dma_addr_t dma_addr, void *virt);
-
-extern void debug_dma_free_coherent(struct device *dev, size_t size,
- void *virt, dma_addr_t addr);
-
-extern void debug_dma_map_resource(struct device *dev, phys_addr_t addr,
- size_t size, int direction,
- dma_addr_t dma_addr);
-
-extern void debug_dma_unmap_resource(struct device *dev, dma_addr_t dma_addr,
- size_t size, int direction);
-
-extern void debug_dma_sync_single_for_cpu(struct device *dev,
- dma_addr_t dma_handle, size_t size,
- int direction);
-
-extern void debug_dma_sync_single_for_device(struct device *dev,
- dma_addr_t dma_handle,
- size_t size, int direction);
-
-extern void debug_dma_sync_single_range_for_cpu(struct device *dev,
- dma_addr_t dma_handle,
- unsigned long offset,
- size_t size,
- int direction);
-
-extern void debug_dma_sync_single_range_for_device(struct device *dev,
- dma_addr_t dma_handle,
- unsigned long offset,
- size_t size, int direction);
-
-extern void debug_dma_sync_sg_for_cpu(struct device *dev,
- struct scatterlist *sg,
- int nelems, int direction);
-
-extern void debug_dma_sync_sg_for_device(struct device *dev,
- struct scatterlist *sg,
- int nelems, int direction);
-
-extern void debug_dma_dump_mappings(struct device *dev);
-
-extern void debug_dma_assert_idle(struct page *page);
-
-#else /* CONFIG_DMA_API_DEBUG */
-
-static inline void dma_debug_add_bus(struct bus_type *bus)
-{
-}
-
-static inline void dma_debug_init(u32 num_entries)
-{
-}
-
-static inline int dma_debug_resize_entries(u32 num_entries)
-{
- return 0;
-}
-
-static inline void debug_dma_map_page(struct device *dev, struct page *page,
- size_t offset, size_t size,
- int direction, dma_addr_t dma_addr,
- bool map_single)
-{
-}
-
-static inline void debug_dma_mapping_error(struct device *dev,
- dma_addr_t dma_addr)
-{
-}
-
-static inline void debug_dma_unmap_page(struct device *dev, dma_addr_t addr,
- size_t size, int direction,
- bool map_single)
-{
-}
-
-static inline void debug_dma_map_sg(struct device *dev, struct scatterlist *sg,
- int nents, int mapped_ents, int direction)
-{
-}
-
-static inline void debug_dma_unmap_sg(struct device *dev,
- struct scatterlist *sglist,
- int nelems, int dir)
-{
-}
-
-static inline void debug_dma_alloc_coherent(struct device *dev, size_t size,
- dma_addr_t dma_addr, void *virt)
-{
-}
-
-static inline void debug_dma_free_coherent(struct device *dev, size_t size,
- void *virt, dma_addr_t addr)
-{
-}
-
-static inline void debug_dma_map_resource(struct device *dev, phys_addr_t addr,
- size_t size, int direction,
- dma_addr_t dma_addr)
-{
-}
-
-static inline void debug_dma_unmap_resource(struct device *dev,
- dma_addr_t dma_addr, size_t size,
- int direction)
-{
-}
-
-static inline void debug_dma_sync_single_for_cpu(struct device *dev,
- dma_addr_t dma_handle,
- size_t size, int direction)
-{
-}
-
-static inline void debug_dma_sync_single_for_device(struct device *dev,
- dma_addr_t dma_handle,
- size_t size, int direction)
-{
-}
-
-static inline void debug_dma_sync_single_range_for_cpu(struct device *dev,
- dma_addr_t dma_handle,
- unsigned long offset,
- size_t size,
- int direction)
-{
-}
-
-static inline void debug_dma_sync_single_range_for_device(struct device *dev,
- dma_addr_t dma_handle,
- unsigned long offset,
- size_t size,
- int direction)
-{
-}
-
-static inline void debug_dma_sync_sg_for_cpu(struct device *dev,
- struct scatterlist *sg,
- int nelems, int direction)
-{
-}
-
-static inline void debug_dma_sync_sg_for_device(struct device *dev,
- struct scatterlist *sg,
- int nelems, int direction)
-{
-}
-
-static inline void debug_dma_dump_mappings(struct device *dev)
-{
-}
-
-static inline void debug_dma_assert_idle(struct page *page)
-{
-}
-
-#endif /* CONFIG_DMA_API_DEBUG */
-
-#endif /* __DMA_DEBUG_H */
diff --git a/include/linux/dma-direct.h b/include/linux/dma-direct.h
new file mode 100644
index 000000000000..c249912456f9
--- /dev/null
+++ b/include/linux/dma-direct.h
@@ -0,0 +1,153 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Internals of the DMA direct mapping implementation. Only for use by the
+ * DMA mapping code and IOMMU drivers.
+ */
+#ifndef _LINUX_DMA_DIRECT_H
+#define _LINUX_DMA_DIRECT_H 1
+
+#include <linux/dma-mapping.h>
+#include <linux/dma-map-ops.h>
+#include <linux/memblock.h> /* for min_low_pfn */
+#include <linux/mem_encrypt.h>
+#include <linux/swiotlb.h>
+
+extern u64 zone_dma_limit;
+
+/*
+ * Record the mapping of CPU physical to DMA addresses for a given region.
+ */
+struct bus_dma_region {
+ phys_addr_t cpu_start;
+ dma_addr_t dma_start;
+ u64 size;
+};
+
+static inline dma_addr_t translate_phys_to_dma(struct device *dev,
+ phys_addr_t paddr)
+{
+ const struct bus_dma_region *m;
+
+ for (m = dev->dma_range_map; m->size; m++) {
+ u64 offset = paddr - m->cpu_start;
+
+ if (paddr >= m->cpu_start && offset < m->size)
+ return m->dma_start + offset;
+ }
+
+ /* make sure dma_capable fails when no translation is available */
+ return DMA_MAPPING_ERROR;
+}
+
+static inline phys_addr_t translate_dma_to_phys(struct device *dev,
+ dma_addr_t dma_addr)
+{
+ const struct bus_dma_region *m;
+
+ for (m = dev->dma_range_map; m->size; m++) {
+ u64 offset = dma_addr - m->dma_start;
+
+ if (dma_addr >= m->dma_start && offset < m->size)
+ return m->cpu_start + offset;
+ }
+
+ return (phys_addr_t)-1;
+}
+
+static inline dma_addr_t dma_range_map_min(const struct bus_dma_region *map)
+{
+ dma_addr_t ret = (dma_addr_t)U64_MAX;
+
+ for (; map->size; map++)
+ ret = min(ret, map->dma_start);
+ return ret;
+}
+
+static inline dma_addr_t dma_range_map_max(const struct bus_dma_region *map)
+{
+ dma_addr_t ret = 0;
+
+ for (; map->size; map++)
+ ret = max(ret, map->dma_start + map->size - 1);
+ return ret;
+}
+
+#ifdef CONFIG_ARCH_HAS_PHYS_TO_DMA
+#include <asm/dma-direct.h>
+#ifndef phys_to_dma_unencrypted
+#define phys_to_dma_unencrypted phys_to_dma
+#endif
+#else
+static inline dma_addr_t __phys_to_dma(struct device *dev, phys_addr_t paddr)
+{
+ if (dev->dma_range_map)
+ return translate_phys_to_dma(dev, paddr);
+ return paddr;
+}
+
+static inline dma_addr_t phys_to_dma_unencrypted(struct device *dev,
+ phys_addr_t paddr)
+{
+ return dma_addr_unencrypted(__phys_to_dma(dev, paddr));
+}
+/*
+ * If memory encryption is supported, phys_to_dma will set the memory encryption
+ * bit in the DMA address, and dma_to_phys will clear it.
+ * phys_to_dma_unencrypted is for use on special unencrypted memory like swiotlb
+ * buffers.
+ */
+static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
+{
+ return dma_addr_encrypted(__phys_to_dma(dev, paddr));
+}
+
+static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t dma_addr)
+{
+ phys_addr_t paddr;
+
+ dma_addr = dma_addr_canonical(dma_addr);
+ if (dev->dma_range_map)
+ paddr = translate_dma_to_phys(dev, dma_addr);
+ else
+ paddr = dma_addr;
+
+ return paddr;
+}
+#endif /* !CONFIG_ARCH_HAS_PHYS_TO_DMA */
+
+#ifdef CONFIG_ARCH_HAS_FORCE_DMA_UNENCRYPTED
+bool force_dma_unencrypted(struct device *dev);
+#else
+static inline bool force_dma_unencrypted(struct device *dev)
+{
+ return false;
+}
+#endif /* CONFIG_ARCH_HAS_FORCE_DMA_UNENCRYPTED */
+
+static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size,
+ bool is_ram)
+{
+ dma_addr_t end = addr + size - 1;
+
+ if (addr == DMA_MAPPING_ERROR)
+ return false;
+ if (is_ram && !IS_ENABLED(CONFIG_ARCH_DMA_ADDR_T_64BIT) &&
+ min(addr, end) < phys_to_dma(dev, PFN_PHYS(min_low_pfn)))
+ return false;
+
+ return end <= min_not_zero(*dev->dma_mask, dev->bus_dma_limit);
+}
+
+u64 dma_direct_get_required_mask(struct device *dev);
+void *dma_direct_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
+ gfp_t gfp, unsigned long attrs);
+void dma_direct_free(struct device *dev, size_t size, void *cpu_addr,
+ dma_addr_t dma_addr, unsigned long attrs);
+struct page *dma_direct_alloc_pages(struct device *dev, size_t size,
+ dma_addr_t *dma_handle, enum dma_data_direction dir, gfp_t gfp);
+void dma_direct_free_pages(struct device *dev, size_t size,
+ struct page *page, dma_addr_t dma_addr,
+ enum dma_data_direction dir);
+int dma_direct_supported(struct device *dev, u64 mask);
+
+#endif /* _LINUX_DMA_DIRECT_H */
diff --git a/include/linux/dma-direction.h b/include/linux/dma-direction.h
index 95b6a82f5951..a2fe4571bc92 100644
--- a/include/linux/dma-direction.h
+++ b/include/linux/dma-direction.h
@@ -1,13 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_DMA_DIRECTION_H
#define _LINUX_DMA_DIRECTION_H
-/*
- * These definitions mirror those in pci.h, so they can be used
- * interchangeably with their PCI_ counterparts.
- */
+
enum dma_data_direction {
DMA_BIDIRECTIONAL = 0,
DMA_TO_DEVICE = 1,
DMA_FROM_DEVICE = 2,
DMA_NONE = 3,
};
-#endif
+
+static inline int valid_dma_direction(enum dma_data_direction dir)
+{
+ return dir == DMA_BIDIRECTIONAL || dir == DMA_TO_DEVICE ||
+ dir == DMA_FROM_DEVICE;
+}
+
+#endif /* _LINUX_DMA_DIRECTION_H */
diff --git a/include/linux/dma-fence-array.h b/include/linux/dma-fence-array.h
index 332a5420243c..079b3dec0a16 100644
--- a/include/linux/dma-fence-array.h
+++ b/include/linux/dma-fence-array.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* fence-array: aggregates fence to be waited together
*
@@ -6,21 +7,13 @@
* Authors:
* Gustavo Padovan <gustavo@padovan.org>
* Christian König <christian.koenig@amd.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
*/
#ifndef __LINUX_DMA_FENCE_ARRAY_H
#define __LINUX_DMA_FENCE_ARRAY_H
#include <linux/dma-fence.h>
+#include <linux/irq_work.h>
/**
* struct dma_fence_array_cb - callback helper for fence array
@@ -39,6 +32,8 @@ struct dma_fence_array_cb {
* @num_fences: number of fences in the array
* @num_pending: fences in the array still pending
* @fences: array of the fences
+ * @work: internal irq_work function
+ * @callbacks: array of callback helpers
*/
struct dma_fence_array {
struct dma_fence base;
@@ -47,20 +42,11 @@ struct dma_fence_array {
unsigned num_fences;
atomic_t num_pending;
struct dma_fence **fences;
-};
-extern const struct dma_fence_ops dma_fence_array_ops;
+ struct irq_work work;
-/**
- * dma_fence_is_array - check if a fence is from the array subsclass
- * @fence: fence to test
- *
- * Return true if it is a dma_fence_array and false otherwise.
- */
-static inline bool dma_fence_is_array(struct dma_fence *fence)
-{
- return fence->ops == &dma_fence_array_ops;
-}
+ struct dma_fence_array_cb callbacks[] __counted_by(num_fences);
+};
/**
* to_dma_fence_array - cast a fence to a dma_fence_array
@@ -72,12 +58,33 @@ static inline bool dma_fence_is_array(struct dma_fence *fence)
static inline struct dma_fence_array *
to_dma_fence_array(struct dma_fence *fence)
{
- if (fence->ops != &dma_fence_array_ops)
+ if (!fence || !dma_fence_is_array(fence))
return NULL;
return container_of(fence, struct dma_fence_array, base);
}
+/**
+ * dma_fence_array_for_each - iterate over all fences in array
+ * @fence: current fence
+ * @index: index into the array
+ * @head: potential dma_fence_array object
+ *
+ * Test if @array is a dma_fence_array object and if yes iterate over all fences
+ * in the array. If not just iterate over the fence in @array itself.
+ *
+ * For a deep dive iterator see dma_fence_unwrap_for_each().
+ */
+#define dma_fence_array_for_each(fence, index, head) \
+ for (index = 0, fence = dma_fence_array_first(head); fence; \
+ ++(index), fence = dma_fence_array_next(head, index))
+
+struct dma_fence_array *dma_fence_array_alloc(int num_fences);
+void dma_fence_array_init(struct dma_fence_array *array,
+ int num_fences, struct dma_fence **fences,
+ u64 context, unsigned seqno,
+ bool signal_on_any);
+
struct dma_fence_array *dma_fence_array_create(int num_fences,
struct dma_fence **fences,
u64 context, unsigned seqno,
@@ -85,4 +92,8 @@ struct dma_fence_array *dma_fence_array_create(int num_fences,
bool dma_fence_match_context(struct dma_fence *fence, u64 context);
+struct dma_fence *dma_fence_array_first(struct dma_fence *head);
+struct dma_fence *dma_fence_array_next(struct dma_fence *head,
+ unsigned int index);
+
#endif /* __LINUX_DMA_FENCE_ARRAY_H */
diff --git a/include/linux/dma-fence-chain.h b/include/linux/dma-fence-chain.h
new file mode 100644
index 000000000000..68c3c1e41014
--- /dev/null
+++ b/include/linux/dma-fence-chain.h
@@ -0,0 +1,131 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * fence-chain: chain fences together in a timeline
+ *
+ * Copyright (C) 2018 Advanced Micro Devices, Inc.
+ * Authors:
+ * Christian König <christian.koenig@amd.com>
+ */
+
+#ifndef __LINUX_DMA_FENCE_CHAIN_H
+#define __LINUX_DMA_FENCE_CHAIN_H
+
+#include <linux/dma-fence.h>
+#include <linux/irq_work.h>
+#include <linux/slab.h>
+
+/**
+ * struct dma_fence_chain - fence to represent an node of a fence chain
+ * @base: fence base class
+ * @prev: previous fence of the chain
+ * @prev_seqno: original previous seqno before garbage collection
+ * @fence: encapsulated fence
+ * @lock: spinlock for fence handling
+ */
+struct dma_fence_chain {
+ struct dma_fence base;
+ struct dma_fence __rcu *prev;
+ u64 prev_seqno;
+ struct dma_fence *fence;
+ union {
+ /**
+ * @cb: callback for signaling
+ *
+ * This is used to add the callback for signaling the
+ * complection of the fence chain. Never used at the same time
+ * as the irq work.
+ */
+ struct dma_fence_cb cb;
+
+ /**
+ * @work: irq work item for signaling
+ *
+ * Irq work structure to allow us to add the callback without
+ * running into lock inversion. Never used at the same time as
+ * the callback.
+ */
+ struct irq_work work;
+ };
+ spinlock_t lock;
+};
+
+
+/**
+ * to_dma_fence_chain - cast a fence to a dma_fence_chain
+ * @fence: fence to cast to a dma_fence_array
+ *
+ * Returns NULL if the fence is not a dma_fence_chain,
+ * or the dma_fence_chain otherwise.
+ */
+static inline struct dma_fence_chain *
+to_dma_fence_chain(struct dma_fence *fence)
+{
+ if (!fence || !dma_fence_is_chain(fence))
+ return NULL;
+
+ return container_of(fence, struct dma_fence_chain, base);
+}
+
+/**
+ * dma_fence_chain_contained - return the contained fence
+ * @fence: the fence to test
+ *
+ * If the fence is a dma_fence_chain the function returns the fence contained
+ * inside the chain object, otherwise it returns the fence itself.
+ */
+static inline struct dma_fence *
+dma_fence_chain_contained(struct dma_fence *fence)
+{
+ struct dma_fence_chain *chain = to_dma_fence_chain(fence);
+
+ return chain ? chain->fence : fence;
+}
+
+/**
+ * dma_fence_chain_alloc
+ *
+ * Returns a new struct dma_fence_chain object or NULL on failure.
+ *
+ * This specialized allocator has to be a macro for its allocations to be
+ * accounted separately (to have a separate alloc_tag). The typecast is
+ * intentional to enforce typesafety.
+ */
+#define dma_fence_chain_alloc() \
+ ((struct dma_fence_chain *)kmalloc(sizeof(struct dma_fence_chain), GFP_KERNEL))
+
+/**
+ * dma_fence_chain_free
+ * @chain: chain node to free
+ *
+ * Frees up an allocated but not used struct dma_fence_chain object. This
+ * doesn't need an RCU grace period since the fence was never initialized nor
+ * published. After dma_fence_chain_init() has been called the fence must be
+ * released by calling dma_fence_put(), and not through this function.
+ */
+static inline void dma_fence_chain_free(struct dma_fence_chain *chain)
+{
+ kfree(chain);
+};
+
+/**
+ * dma_fence_chain_for_each - iterate over all fences in chain
+ * @iter: current fence
+ * @head: starting point
+ *
+ * Iterate over all fences in the chain. We keep a reference to the current
+ * fence while inside the loop which must be dropped when breaking out.
+ *
+ * For a deep dive iterator see dma_fence_unwrap_for_each().
+ */
+#define dma_fence_chain_for_each(iter, head) \
+ for (iter = dma_fence_get(head); iter; \
+ iter = dma_fence_chain_walk(iter))
+
+struct dma_fence *dma_fence_chain_walk(struct dma_fence *fence);
+int dma_fence_chain_find_seqno(struct dma_fence **pfence, uint64_t seqno);
+void dma_fence_chain_init(struct dma_fence_chain *chain,
+ struct dma_fence *prev,
+ struct dma_fence *fence,
+ uint64_t seqno);
+
+#endif /* __LINUX_DMA_FENCE_CHAIN_H */
diff --git a/include/linux/dma-fence-unwrap.h b/include/linux/dma-fence-unwrap.h
new file mode 100644
index 000000000000..62df222fe0f1
--- /dev/null
+++ b/include/linux/dma-fence-unwrap.h
@@ -0,0 +1,77 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2022 Advanced Micro Devices, Inc.
+ * Authors:
+ * Christian König <christian.koenig@amd.com>
+ */
+
+#ifndef __LINUX_DMA_FENCE_UNWRAP_H
+#define __LINUX_DMA_FENCE_UNWRAP_H
+
+struct dma_fence;
+
+/**
+ * struct dma_fence_unwrap - cursor into the container structure
+ *
+ * Should be used with dma_fence_unwrap_for_each() iterator macro.
+ */
+struct dma_fence_unwrap {
+ /**
+ * @chain: potential dma_fence_chain, but can be other fence as well
+ */
+ struct dma_fence *chain;
+ /**
+ * @array: potential dma_fence_array, but can be other fence as well
+ */
+ struct dma_fence *array;
+ /**
+ * @index: last returned index if @array is really a dma_fence_array
+ */
+ unsigned int index;
+};
+
+struct dma_fence *dma_fence_unwrap_first(struct dma_fence *head,
+ struct dma_fence_unwrap *cursor);
+struct dma_fence *dma_fence_unwrap_next(struct dma_fence_unwrap *cursor);
+
+/**
+ * dma_fence_unwrap_for_each - iterate over all fences in containers
+ * @fence: current fence
+ * @cursor: current position inside the containers
+ * @head: starting point for the iterator
+ *
+ * Unwrap dma_fence_chain and dma_fence_array containers and deep dive into all
+ * potential fences in them. If @head is just a normal fence only that one is
+ * returned.
+ */
+#define dma_fence_unwrap_for_each(fence, cursor, head) \
+ for (fence = dma_fence_unwrap_first(head, cursor); fence; \
+ fence = dma_fence_unwrap_next(cursor))
+
+struct dma_fence *__dma_fence_unwrap_merge(unsigned int num_fences,
+ struct dma_fence **fences,
+ struct dma_fence_unwrap *cursors);
+
+int dma_fence_dedup_array(struct dma_fence **array, int num_fences);
+
+/**
+ * dma_fence_unwrap_merge - unwrap and merge fences
+ *
+ * All fences given as parameters are unwrapped and merged back together as flat
+ * dma_fence_array. Useful if multiple containers need to be merged together.
+ *
+ * Implemented as a macro to allocate the necessary arrays on the stack and
+ * account the stack frame size to the caller.
+ *
+ * Returns NULL on memory allocation failure, a dma_fence object representing
+ * all the given fences otherwise.
+ */
+#define dma_fence_unwrap_merge(...) \
+ ({ \
+ struct dma_fence *__f[] = { __VA_ARGS__ }; \
+ struct dma_fence_unwrap __c[ARRAY_SIZE(__f)]; \
+ \
+ __dma_fence_unwrap_merge(ARRAY_SIZE(__f), __f, __c); \
+ })
+
+#endif
diff --git a/include/linux/dma-fence.h b/include/linux/dma-fence.h
index 171895072435..64639e104110 100644
--- a/include/linux/dma-fence.h
+++ b/include/linux/dma-fence.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Fence mechanism for dma-buf to allow for asynchronous dma access
*
@@ -7,15 +8,6 @@
* Authors:
* Rob Clark <robdclark@gmail.com>
* Maarten Lankhorst <maarten.lankhorst@canonical.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
*/
#ifndef __LINUX_DMA_FENCE_H
@@ -29,10 +21,12 @@
#include <linux/sched.h>
#include <linux/printk.h>
#include <linux/rcupdate.h>
+#include <linux/timekeeping.h>
struct dma_fence;
struct dma_fence_ops;
struct dma_fence_cb;
+struct seq_file;
/**
* struct dma_fence - software synchronization primitive
@@ -71,19 +65,40 @@ struct dma_fence_cb;
* been completed, or never called at all.
*/
struct dma_fence {
- struct kref refcount;
- const struct dma_fence_ops *ops;
- struct rcu_head rcu;
- struct list_head cb_list;
spinlock_t *lock;
+ const struct dma_fence_ops *ops;
+ /*
+ * We clear the callback list on kref_put so that by the time we
+ * release the fence it is unused. No one should be adding to the
+ * cb_list that they don't themselves hold a reference for.
+ *
+ * The lifetime of the timestamp is similarly tied to both the
+ * rcu freelist and the cb_list. The timestamp is only set upon
+ * signaling while simultaneously notifying the cb_list. Ergo, we
+ * only use either the cb_list of timestamp. Upon destruction,
+ * neither are accessible, and so we can use the rcu. This means
+ * that the cb_list is *only* valid until the signal bit is set,
+ * and to read either you *must* hold a reference to the fence,
+ * and not just the rcu_read_lock.
+ *
+ * Listed in chronological order.
+ */
+ union {
+ struct list_head cb_list;
+ /* @cb_list replaced by @timestamp on dma_fence_signal() */
+ ktime_t timestamp;
+ /* @timestamp replaced by @rcu on dma_fence_release() */
+ struct rcu_head rcu;
+ };
u64 context;
- unsigned seqno;
+ u64 seqno;
unsigned long flags;
- ktime_t timestamp;
+ struct kref refcount;
int error;
};
enum dma_fence_flag_bits {
+ DMA_FENCE_FLAG_SEQNO64_BIT,
DMA_FENCE_FLAG_SIGNALED_BIT,
DMA_FENCE_FLAG_TIMESTAMP_BIT,
DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
@@ -94,11 +109,11 @@ typedef void (*dma_fence_func_t)(struct dma_fence *fence,
struct dma_fence_cb *cb);
/**
- * struct dma_fence_cb - callback for dma_fence_add_callback
- * @node: used by dma_fence_add_callback to append this struct to fence::cb_list
+ * struct dma_fence_cb - callback for dma_fence_add_callback()
+ * @node: used by dma_fence_add_callback() to append this struct to fence::cb_list
* @func: dma_fence_func_t to call
*
- * This struct will be initialized by dma_fence_add_callback, additional
+ * This struct will be initialized by dma_fence_add_callback(), additional
* data can be passed along by embedding dma_fence_cb in another struct.
*/
struct dma_fence_cb {
@@ -108,88 +123,149 @@ struct dma_fence_cb {
/**
* struct dma_fence_ops - operations implemented for fence
- * @get_driver_name: returns the driver name.
- * @get_timeline_name: return the name of the context this fence belongs to.
- * @enable_signaling: enable software signaling of fence.
- * @signaled: [optional] peek whether the fence is signaled, can be null.
- * @wait: custom wait implementation, or dma_fence_default_wait.
- * @release: [optional] called on destruction of fence, can be null
- * @fill_driver_data: [optional] callback to fill in free-form debug info
- * Returns amount of bytes filled, or -errno.
- * @fence_value_str: [optional] fills in the value of the fence as a string
- * @timeline_value_str: [optional] fills in the current value of the timeline
- * as a string
- *
- * Notes on enable_signaling:
- * For fence implementations that have the capability for hw->hw
- * signaling, they can implement this op to enable the necessary
- * irqs, or insert commands into cmdstream, etc. This is called
- * in the first wait() or add_callback() path to let the fence
- * implementation know that there is another driver waiting on
- * the signal (ie. hw->sw case).
- *
- * This function can be called called from atomic context, but not
- * from irq context, so normal spinlocks can be used.
- *
- * A return value of false indicates the fence already passed,
- * or some failure occurred that made it impossible to enable
- * signaling. True indicates successful enabling.
- *
- * fence->error may be set in enable_signaling, but only when false is
- * returned.
- *
- * Calling dma_fence_signal before enable_signaling is called allows
- * for a tiny race window in which enable_signaling is called during,
- * before, or after dma_fence_signal. To fight this, it is recommended
- * that before enable_signaling returns true an extra reference is
- * taken on the fence, to be released when the fence is signaled.
- * This will mean dma_fence_signal will still be called twice, but
- * the second time will be a noop since it was already signaled.
- *
- * Notes on signaled:
- * May set fence->error if returning true.
- *
- * Notes on wait:
- * Must not be NULL, set to dma_fence_default_wait for default implementation.
- * the dma_fence_default_wait implementation should work for any fence, as long
- * as enable_signaling works correctly.
- *
- * Must return -ERESTARTSYS if the wait is intr = true and the wait was
- * interrupted, and remaining jiffies if fence has signaled, or 0 if wait
- * timed out. Can also return other error values on custom implementations,
- * which should be treated as if the fence is signaled. For example a hardware
- * lockup could be reported like that.
- *
- * Notes on release:
- * Can be NULL, this function allows additional commands to run on
- * destruction of the fence. Can be called from irq context.
- * If pointer is set to NULL, kfree will get called instead.
+ *
*/
-
struct dma_fence_ops {
+ /**
+ * @get_driver_name:
+ *
+ * Returns the driver name. This is a callback to allow drivers to
+ * compute the name at runtime, without having it to store permanently
+ * for each fence, or build a cache of some sort.
+ *
+ * This callback is mandatory.
+ */
const char * (*get_driver_name)(struct dma_fence *fence);
+
+ /**
+ * @get_timeline_name:
+ *
+ * Return the name of the context this fence belongs to. This is a
+ * callback to allow drivers to compute the name at runtime, without
+ * having it to store permanently for each fence, or build a cache of
+ * some sort.
+ *
+ * This callback is mandatory.
+ */
const char * (*get_timeline_name)(struct dma_fence *fence);
+
+ /**
+ * @enable_signaling:
+ *
+ * Enable software signaling of fence.
+ *
+ * For fence implementations that have the capability for hw->hw
+ * signaling, they can implement this op to enable the necessary
+ * interrupts, or insert commands into cmdstream, etc, to avoid these
+ * costly operations for the common case where only hw->hw
+ * synchronization is required. This is called in the first
+ * dma_fence_wait() or dma_fence_add_callback() path to let the fence
+ * implementation know that there is another driver waiting on the
+ * signal (ie. hw->sw case).
+ *
+ * This is called with irq's disabled, so only spinlocks which disable
+ * IRQ's can be used in the code outside of this callback.
+ *
+ * A return value of false indicates the fence already passed,
+ * or some failure occurred that made it impossible to enable
+ * signaling. True indicates successful enabling.
+ *
+ * &dma_fence.error may be set in enable_signaling, but only when false
+ * is returned.
+ *
+ * Since many implementations can call dma_fence_signal() even when before
+ * @enable_signaling has been called there's a race window, where the
+ * dma_fence_signal() might result in the final fence reference being
+ * released and its memory freed. To avoid this, implementations of this
+ * callback should grab their own reference using dma_fence_get(), to be
+ * released when the fence is signalled (through e.g. the interrupt
+ * handler).
+ *
+ * This callback is optional. If this callback is not present, then the
+ * driver must always have signaling enabled.
+ */
bool (*enable_signaling)(struct dma_fence *fence);
+
+ /**
+ * @signaled:
+ *
+ * Peek whether the fence is signaled, as a fastpath optimization for
+ * e.g. dma_fence_wait() or dma_fence_add_callback(). Note that this
+ * callback does not need to make any guarantees beyond that a fence
+ * once indicates as signalled must always return true from this
+ * callback. This callback may return false even if the fence has
+ * completed already, in this case information hasn't propogated throug
+ * the system yet. See also dma_fence_is_signaled().
+ *
+ * May set &dma_fence.error if returning true.
+ *
+ * This callback is optional.
+ */
bool (*signaled)(struct dma_fence *fence);
+
+ /**
+ * @wait:
+ *
+ * Custom wait implementation, defaults to dma_fence_default_wait() if
+ * not set.
+ *
+ * Deprecated and should not be used by new implementations. Only used
+ * by existing implementations which need special handling for their
+ * hardware reset procedure.
+ *
+ * Must return -ERESTARTSYS if the wait is intr = true and the wait was
+ * interrupted, and remaining jiffies if fence has signaled, or 0 if wait
+ * timed out. Can also return other error values on custom implementations,
+ * which should be treated as if the fence is signaled. For example a hardware
+ * lockup could be reported like that.
+ */
signed long (*wait)(struct dma_fence *fence,
bool intr, signed long timeout);
+
+ /**
+ * @release:
+ *
+ * Called on destruction of fence to release additional resources.
+ * Can be called from irq context. This callback is optional. If it is
+ * NULL, then dma_fence_free() is instead called as the default
+ * implementation.
+ */
void (*release)(struct dma_fence *fence);
- int (*fill_driver_data)(struct dma_fence *fence, void *data, int size);
- void (*fence_value_str)(struct dma_fence *fence, char *str, int size);
- void (*timeline_value_str)(struct dma_fence *fence,
- char *str, int size);
+ /**
+ * @set_deadline:
+ *
+ * Callback to allow a fence waiter to inform the fence signaler of
+ * an upcoming deadline, such as vblank, by which point the waiter
+ * would prefer the fence to be signaled by. This is intended to
+ * give feedback to the fence signaler to aid in power management
+ * decisions, such as boosting GPU frequency.
+ *
+ * This is called without &dma_fence.lock held, it can be called
+ * multiple times and from any context. Locking is up to the callee
+ * if it has some state to manage. If multiple deadlines are set,
+ * the expectation is to track the soonest one. If the deadline is
+ * before the current time, it should be interpreted as an immediate
+ * deadline.
+ *
+ * This callback is optional.
+ */
+ void (*set_deadline)(struct dma_fence *fence, ktime_t deadline);
};
void dma_fence_init(struct dma_fence *fence, const struct dma_fence_ops *ops,
- spinlock_t *lock, u64 context, unsigned seqno);
+ spinlock_t *lock, u64 context, u64 seqno);
+
+void dma_fence_init64(struct dma_fence *fence, const struct dma_fence_ops *ops,
+ spinlock_t *lock, u64 context, u64 seqno);
void dma_fence_release(struct kref *kref);
void dma_fence_free(struct dma_fence *fence);
+void dma_fence_describe(struct dma_fence *fence, struct seq_file *seq);
/**
* dma_fence_put - decreases refcount of the fence
- * @fence: [in] fence to reduce refcount of
+ * @fence: fence to reduce refcount of
*/
static inline void dma_fence_put(struct dma_fence *fence)
{
@@ -199,7 +275,7 @@ static inline void dma_fence_put(struct dma_fence *fence)
/**
* dma_fence_get - increases refcount of the fence
- * @fence: [in] fence to increase refcount of
+ * @fence: fence to increase refcount of
*
* Returns the same fence, with refcount increased by 1.
*/
@@ -211,9 +287,9 @@ static inline struct dma_fence *dma_fence_get(struct dma_fence *fence)
}
/**
- * dma_fence_get_rcu - get a fence from a reservation_object_list with
+ * dma_fence_get_rcu - get a fence from a dma_resv_list with
* rcu read lock
- * @fence: [in] fence to increase refcount of
+ * @fence: fence to increase refcount of
*
* Function returns NULL if no refcount could be obtained, or the fence.
*/
@@ -227,7 +303,7 @@ static inline struct dma_fence *dma_fence_get_rcu(struct dma_fence *fence)
/**
* dma_fence_get_rcu_safe - acquire a reference to an RCU tracked fence
- * @fencep: [in] pointer to fence to increase refcount of
+ * @fencep: pointer to fence to increase refcount of
*
* Function returns NULL if no refcount could be obtained, or the fence.
* This function handles acquiring a reference to a fence that may be
@@ -235,22 +311,25 @@ static inline struct dma_fence *dma_fence_get_rcu(struct dma_fence *fence)
* so long as the caller is using RCU on the pointer to the fence.
*
* An alternative mechanism is to employ a seqlock to protect a bunch of
- * fences, such as used by struct reservation_object. When using a seqlock,
+ * fences, such as used by struct dma_resv. When using a seqlock,
* the seqlock must be taken before and checked after a reference to the
* fence is acquired (as shown here).
*
* The caller is required to hold the RCU read lock.
*/
static inline struct dma_fence *
-dma_fence_get_rcu_safe(struct dma_fence * __rcu *fencep)
+dma_fence_get_rcu_safe(struct dma_fence __rcu **fencep)
{
do {
struct dma_fence *fence;
fence = rcu_dereference(*fencep);
- if (!fence || !dma_fence_get_rcu(fence))
+ if (!fence)
return NULL;
+ if (!dma_fence_get_rcu(fence))
+ continue;
+
/* The atomic_inc_not_zero() inside dma_fence_get_rcu()
* provides a full memory barrier upon success (such as now).
* This is paired with the write barrier from assigning
@@ -272,8 +351,24 @@ dma_fence_get_rcu_safe(struct dma_fence * __rcu *fencep)
} while (1);
}
+#ifdef CONFIG_LOCKDEP
+bool dma_fence_begin_signalling(void);
+void dma_fence_end_signalling(bool cookie);
+void __dma_fence_might_wait(void);
+#else
+static inline bool dma_fence_begin_signalling(void)
+{
+ return true;
+}
+static inline void dma_fence_end_signalling(bool cookie) {}
+static inline void __dma_fence_might_wait(void) {}
+#endif
+
int dma_fence_signal(struct dma_fence *fence);
int dma_fence_signal_locked(struct dma_fence *fence);
+int dma_fence_signal_timestamp(struct dma_fence *fence, ktime_t timestamp);
+int dma_fence_signal_timestamp_locked(struct dma_fence *fence,
+ ktime_t timestamp);
signed long dma_fence_default_wait(struct dma_fence *fence,
bool intr, signed long timeout);
int dma_fence_add_callback(struct dma_fence *fence,
@@ -284,16 +379,41 @@ bool dma_fence_remove_callback(struct dma_fence *fence,
void dma_fence_enable_sw_signaling(struct dma_fence *fence);
/**
+ * DOC: Safe external access to driver provided object members
+ *
+ * All data not stored directly in the dma-fence object, such as the
+ * &dma_fence.lock and memory potentially accessed by functions in the
+ * &dma_fence.ops table, MUST NOT be accessed after the fence has been signalled
+ * because after that point drivers are allowed to free it.
+ *
+ * All code accessing that data via the dma-fence API (or directly, which is
+ * discouraged), MUST make sure to contain the complete access within a
+ * &rcu_read_lock and &rcu_read_unlock pair.
+ *
+ * Some dma-fence API handles this automatically, while other, as for example
+ * &dma_fence_driver_name and &dma_fence_timeline_name, leave that
+ * responsibility to the caller.
+ *
+ * To enable this scheme to work drivers MUST ensure a RCU grace period elapses
+ * between signalling the fence and freeing the said data.
+ *
+ */
+const char __rcu *dma_fence_driver_name(struct dma_fence *fence);
+const char __rcu *dma_fence_timeline_name(struct dma_fence *fence);
+
+/**
* dma_fence_is_signaled_locked - Return an indication if the fence
* is signaled yet.
- * @fence: [in] the fence to check
+ * @fence: the fence to check
*
* Returns true if the fence was already signaled, false if not. Since this
* function doesn't enable signaling, it is not guaranteed to ever return
- * true if dma_fence_add_callback, dma_fence_wait or
- * dma_fence_enable_sw_signaling haven't been called before.
+ * true if dma_fence_add_callback(), dma_fence_wait() or
+ * dma_fence_enable_sw_signaling() haven't been called before.
+ *
+ * This function requires &dma_fence.lock to be held.
*
- * This function requires fence->lock to be held.
+ * See also dma_fence_is_signaled().
*/
static inline bool
dma_fence_is_signaled_locked(struct dma_fence *fence)
@@ -311,17 +431,19 @@ dma_fence_is_signaled_locked(struct dma_fence *fence)
/**
* dma_fence_is_signaled - Return an indication if the fence is signaled yet.
- * @fence: [in] the fence to check
+ * @fence: the fence to check
*
* Returns true if the fence was already signaled, false if not. Since this
* function doesn't enable signaling, it is not guaranteed to ever return
- * true if dma_fence_add_callback, dma_fence_wait or
- * dma_fence_enable_sw_signaling haven't been called before.
+ * true if dma_fence_add_callback(), dma_fence_wait() or
+ * dma_fence_enable_sw_signaling() haven't been called before.
*
* It's recommended for seqno fences to call dma_fence_signal when the
* operation is complete, it makes it possible to prevent issues from
* wraparound between time of issue and time of use by checking the return
* value of this function before calling hardware-specific wait instructions.
+ *
+ * See also dma_fence_is_signaled_locked().
*/
static inline bool
dma_fence_is_signaled(struct dma_fence *fence)
@@ -339,21 +461,29 @@ dma_fence_is_signaled(struct dma_fence *fence)
/**
* __dma_fence_is_later - return if f1 is chronologically later than f2
- * @f1: [in] the first fence's seqno
- * @f2: [in] the second fence's seqno from the same context
+ * @fence: fence in whose context to do the comparison
+ * @f1: the first fence's seqno
+ * @f2: the second fence's seqno from the same context
*
* Returns true if f1 is chronologically later than f2. Both fences must be
* from the same context, since a seqno is not common across contexts.
*/
-static inline bool __dma_fence_is_later(u32 f1, u32 f2)
+static inline bool __dma_fence_is_later(struct dma_fence *fence, u64 f1, u64 f2)
{
- return (int)(f1 - f2) > 0;
+ /* This is for backward compatibility with drivers which can only handle
+ * 32bit sequence numbers. Use a 64bit compare when the driver says to
+ * do so.
+ */
+ if (test_bit(DMA_FENCE_FLAG_SEQNO64_BIT, &fence->flags))
+ return f1 > f2;
+
+ return (int)(lower_32_bits(f1) - lower_32_bits(f2)) > 0;
}
/**
* dma_fence_is_later - return if f1 is chronologically later than f2
- * @f1: [in] the first fence from the same context
- * @f2: [in] the second fence from the same context
+ * @f1: the first fence from the same context
+ * @f2: the second fence from the same context
*
* Returns true if f1 is chronologically later than f2. Both fences must be
* from the same context, since a seqno is not re-used across contexts.
@@ -364,13 +494,28 @@ static inline bool dma_fence_is_later(struct dma_fence *f1,
if (WARN_ON(f1->context != f2->context))
return false;
- return __dma_fence_is_later(f1->seqno, f2->seqno);
+ return __dma_fence_is_later(f1, f1->seqno, f2->seqno);
+}
+
+/**
+ * dma_fence_is_later_or_same - return true if f1 is later or same as f2
+ * @f1: the first fence from the same context
+ * @f2: the second fence from the same context
+ *
+ * Returns true if f1 is chronologically later than f2 or the same fence. Both
+ * fences must be from the same context, since a seqno is not re-used across
+ * contexts.
+ */
+static inline bool dma_fence_is_later_or_same(struct dma_fence *f1,
+ struct dma_fence *f2)
+{
+ return f1 == f2 || dma_fence_is_later(f1, f2);
}
/**
* dma_fence_later - return the chronologically later fence
- * @f1: [in] the first fence from the same context
- * @f2: [in] the second fence from the same context
+ * @f1: the first fence from the same context
+ * @f2: the second fence from the same context
*
* Returns NULL if both fences are signaled, otherwise the fence that would be
* signaled last. Both fences must be from the same context, since a seqno is
@@ -395,7 +540,7 @@ static inline struct dma_fence *dma_fence_later(struct dma_fence *f1,
/**
* dma_fence_get_status_locked - returns the status upon completion
- * @fence: [in] the dma_fence to query
+ * @fence: the dma_fence to query
*
* Drivers can supply an optional error status condition before they signal
* the fence (to indicate whether the fence was completed due to an error
@@ -419,14 +564,20 @@ int dma_fence_get_status(struct dma_fence *fence);
/**
* dma_fence_set_error - flag an error condition on the fence
- * @fence: [in] the dma_fence
- * @error: [in] the error to store
+ * @fence: the dma_fence
+ * @error: the error to store
*
* Drivers can supply an optional error status condition before they signal
* the fence, to indicate that the fence was completed due to an error
* rather than success. This must be set before signaling (so that the value
* is visible before any waiters on the signal callback are woken). This
* helper exists to help catching erroneous setting of #dma_fence.error.
+ *
+ * Examples of error codes which drivers should use:
+ *
+ * * %-ENODATA This operation produced no data, no other operation affected.
+ * * %-ECANCELED All operations from the same context have been canceled.
+ * * %-ETIME Operation caused a timeout and potentially device reset.
*/
static inline void dma_fence_set_error(struct dma_fence *fence,
int error)
@@ -437,6 +588,25 @@ static inline void dma_fence_set_error(struct dma_fence *fence,
fence->error = error;
}
+/**
+ * dma_fence_timestamp - helper to get the completion timestamp of a fence
+ * @fence: fence to get the timestamp from.
+ *
+ * After a fence is signaled the timestamp is updated with the signaling time,
+ * but setting the timestamp can race with tasks waiting for the signaling. This
+ * helper busy waits for the correct timestamp to appear.
+ */
+static inline ktime_t dma_fence_timestamp(struct dma_fence *fence)
+{
+ if (WARN_ON(!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)))
+ return ktime_get();
+
+ while (!test_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT, &fence->flags))
+ cpu_relax();
+
+ return fence->timestamp;
+}
+
signed long dma_fence_wait_timeout(struct dma_fence *,
bool intr, signed long timeout);
signed long dma_fence_wait_any_timeout(struct dma_fence **fences,
@@ -446,8 +616,8 @@ signed long dma_fence_wait_any_timeout(struct dma_fence **fences,
/**
* dma_fence_wait - sleep until the fence gets signaled
- * @fence: [in] the fence to wait on
- * @intr: [in] if true, do an interruptible wait
+ * @fence: the fence to wait on
+ * @intr: if true, do an interruptible wait
*
* This function will return -ERESTARTSYS if interrupted by a signal,
* or 0 if the fence was signaled. Other error values may be
@@ -456,6 +626,8 @@ signed long dma_fence_wait_any_timeout(struct dma_fence **fences,
* Performs a synchronous wait on this fence. It is assumed the caller
* directly or indirectly holds a reference to the fence, otherwise the
* fence might be freed before return, resulting in undefined behavior.
+ *
+ * See also dma_fence_wait_timeout() and dma_fence_wait_any_timeout().
*/
static inline signed long dma_fence_wait(struct dma_fence *fence, bool intr)
{
@@ -470,28 +642,48 @@ static inline signed long dma_fence_wait(struct dma_fence *fence, bool intr)
return ret < 0 ? ret : 0;
}
+void dma_fence_set_deadline(struct dma_fence *fence, ktime_t deadline);
+
+struct dma_fence *dma_fence_get_stub(void);
+struct dma_fence *dma_fence_allocate_private_stub(ktime_t timestamp);
u64 dma_fence_context_alloc(unsigned num);
-#define DMA_FENCE_TRACE(f, fmt, args...) \
- do { \
- struct dma_fence *__ff = (f); \
- if (IS_ENABLED(CONFIG_DMA_FENCE_TRACE)) \
- pr_info("f %llu#%u: " fmt, \
- __ff->context, __ff->seqno, ##args); \
- } while (0)
-
-#define DMA_FENCE_WARN(f, fmt, args...) \
- do { \
- struct dma_fence *__ff = (f); \
- pr_warn("f %llu#%u: " fmt, __ff->context, __ff->seqno, \
- ##args); \
- } while (0)
-
-#define DMA_FENCE_ERR(f, fmt, args...) \
- do { \
- struct dma_fence *__ff = (f); \
- pr_err("f %llu#%u: " fmt, __ff->context, __ff->seqno, \
- ##args); \
- } while (0)
+extern const struct dma_fence_ops dma_fence_array_ops;
+extern const struct dma_fence_ops dma_fence_chain_ops;
+
+/**
+ * dma_fence_is_array - check if a fence is from the array subclass
+ * @fence: the fence to test
+ *
+ * Return true if it is a dma_fence_array and false otherwise.
+ */
+static inline bool dma_fence_is_array(struct dma_fence *fence)
+{
+ return fence->ops == &dma_fence_array_ops;
+}
+
+/**
+ * dma_fence_is_chain - check if a fence is from the chain subclass
+ * @fence: the fence to test
+ *
+ * Return true if it is a dma_fence_chain and false otherwise.
+ */
+static inline bool dma_fence_is_chain(struct dma_fence *fence)
+{
+ return fence->ops == &dma_fence_chain_ops;
+}
+
+/**
+ * dma_fence_is_container - check if a fence is a container for other fences
+ * @fence: the fence to test
+ *
+ * Return true if this fence is a container for other fences, false otherwise.
+ * This is important since we can't build up large fence structure or otherwise
+ * we run into recursion during operation on those fences.
+ */
+static inline bool dma_fence_is_container(struct dma_fence *fence)
+{
+ return dma_fence_is_array(fence) || dma_fence_is_chain(fence);
+}
#endif /* __LINUX_DMA_FENCE_H */
diff --git a/include/linux/dma-heap.h b/include/linux/dma-heap.h
new file mode 100644
index 000000000000..27d15f60950a
--- /dev/null
+++ b/include/linux/dma-heap.h
@@ -0,0 +1,49 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * DMABUF Heaps Allocation Infrastructure
+ *
+ * Copyright (C) 2011 Google, Inc.
+ * Copyright (C) 2019 Linaro Ltd.
+ */
+
+#ifndef _DMA_HEAPS_H
+#define _DMA_HEAPS_H
+
+#include <linux/types.h>
+
+struct dma_heap;
+
+/**
+ * struct dma_heap_ops - ops to operate on a given heap
+ * @allocate: allocate dmabuf and return struct dma_buf ptr
+ *
+ * allocate returns dmabuf on success, ERR_PTR(-errno) on error.
+ */
+struct dma_heap_ops {
+ struct dma_buf *(*allocate)(struct dma_heap *heap,
+ unsigned long len,
+ u32 fd_flags,
+ u64 heap_flags);
+};
+
+/**
+ * struct dma_heap_export_info - information needed to export a new dmabuf heap
+ * @name: used for debugging/device-node name
+ * @ops: ops struct for this heap
+ * @priv: heap exporter private data
+ *
+ * Information needed to export a new dmabuf heap.
+ */
+struct dma_heap_export_info {
+ const char *name;
+ const struct dma_heap_ops *ops;
+ void *priv;
+};
+
+void *dma_heap_get_drvdata(struct dma_heap *heap);
+
+const char *dma_heap_get_name(struct dma_heap *heap);
+
+struct dma_heap *dma_heap_add(const struct dma_heap_export_info *exp_info);
+
+#endif /* _DMA_HEAPS_H */
diff --git a/include/linux/dma-iommu.h b/include/linux/dma-iommu.h
deleted file mode 100644
index 92f20832fd28..000000000000
--- a/include/linux/dma-iommu.h
+++ /dev/null
@@ -1,112 +0,0 @@
-/*
- * Copyright (C) 2014-2015 ARM Ltd.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- */
-#ifndef __DMA_IOMMU_H
-#define __DMA_IOMMU_H
-
-#ifdef __KERNEL__
-#include <asm/errno.h>
-
-#ifdef CONFIG_IOMMU_DMA
-#include <linux/dma-mapping.h>
-#include <linux/iommu.h>
-#include <linux/msi.h>
-
-int iommu_dma_init(void);
-
-/* Domain management interface for IOMMU drivers */
-int iommu_get_dma_cookie(struct iommu_domain *domain);
-int iommu_get_msi_cookie(struct iommu_domain *domain, dma_addr_t base);
-void iommu_put_dma_cookie(struct iommu_domain *domain);
-
-/* Setup call for arch DMA mapping code */
-int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base,
- u64 size, struct device *dev);
-
-/* General helpers for DMA-API <-> IOMMU-API interaction */
-int dma_info_to_prot(enum dma_data_direction dir, bool coherent,
- unsigned long attrs);
-
-/*
- * These implement the bulk of the relevant DMA mapping callbacks, but require
- * the arch code to take care of attributes and cache maintenance
- */
-struct page **iommu_dma_alloc(struct device *dev, size_t size, gfp_t gfp,
- unsigned long attrs, int prot, dma_addr_t *handle,
- void (*flush_page)(struct device *, const void *, phys_addr_t));
-void iommu_dma_free(struct device *dev, struct page **pages, size_t size,
- dma_addr_t *handle);
-
-int iommu_dma_mmap(struct page **pages, size_t size, struct vm_area_struct *vma);
-
-dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page,
- unsigned long offset, size_t size, int prot);
-int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg,
- int nents, int prot);
-
-/*
- * Arch code with no special attribute handling may use these
- * directly as DMA mapping callbacks for simplicity
- */
-void iommu_dma_unmap_page(struct device *dev, dma_addr_t handle, size_t size,
- enum dma_data_direction dir, unsigned long attrs);
-void iommu_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
- enum dma_data_direction dir, unsigned long attrs);
-dma_addr_t iommu_dma_map_resource(struct device *dev, phys_addr_t phys,
- size_t size, enum dma_data_direction dir, unsigned long attrs);
-void iommu_dma_unmap_resource(struct device *dev, dma_addr_t handle,
- size_t size, enum dma_data_direction dir, unsigned long attrs);
-int iommu_dma_mapping_error(struct device *dev, dma_addr_t dma_addr);
-
-/* The DMA API isn't _quite_ the whole story, though... */
-void iommu_dma_map_msi_msg(int irq, struct msi_msg *msg);
-void iommu_dma_get_resv_regions(struct device *dev, struct list_head *list);
-
-#else
-
-struct iommu_domain;
-struct msi_msg;
-struct device;
-
-static inline int iommu_dma_init(void)
-{
- return 0;
-}
-
-static inline int iommu_get_dma_cookie(struct iommu_domain *domain)
-{
- return -ENODEV;
-}
-
-static inline int iommu_get_msi_cookie(struct iommu_domain *domain, dma_addr_t base)
-{
- return -ENODEV;
-}
-
-static inline void iommu_put_dma_cookie(struct iommu_domain *domain)
-{
-}
-
-static inline void iommu_dma_map_msi_msg(int irq, struct msi_msg *msg)
-{
-}
-
-static inline void iommu_dma_get_resv_regions(struct device *dev, struct list_head *list)
-{
-}
-
-#endif /* CONFIG_IOMMU_DMA */
-#endif /* __KERNEL__ */
-#endif /* __DMA_IOMMU_H */
diff --git a/include/linux/dma-map-ops.h b/include/linux/dma-map-ops.h
new file mode 100644
index 000000000000..4809204c674c
--- /dev/null
+++ b/include/linux/dma-map-ops.h
@@ -0,0 +1,434 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * This header is for implementations of dma_map_ops and related code.
+ * It should not be included in drivers just using the DMA API.
+ */
+#ifndef _LINUX_DMA_MAP_OPS_H
+#define _LINUX_DMA_MAP_OPS_H
+
+#include <linux/dma-mapping.h>
+#include <linux/pgtable.h>
+#include <linux/slab.h>
+
+struct cma;
+struct iommu_ops;
+
+struct dma_map_ops {
+ void *(*alloc)(struct device *dev, size_t size,
+ dma_addr_t *dma_handle, gfp_t gfp,
+ unsigned long attrs);
+ void (*free)(struct device *dev, size_t size, void *vaddr,
+ dma_addr_t dma_handle, unsigned long attrs);
+ struct page *(*alloc_pages_op)(struct device *dev, size_t size,
+ dma_addr_t *dma_handle, enum dma_data_direction dir,
+ gfp_t gfp);
+ void (*free_pages)(struct device *dev, size_t size, struct page *vaddr,
+ dma_addr_t dma_handle, enum dma_data_direction dir);
+ int (*mmap)(struct device *, struct vm_area_struct *,
+ void *, dma_addr_t, size_t, unsigned long attrs);
+
+ int (*get_sgtable)(struct device *dev, struct sg_table *sgt,
+ void *cpu_addr, dma_addr_t dma_addr, size_t size,
+ unsigned long attrs);
+
+ dma_addr_t (*map_phys)(struct device *dev, phys_addr_t phys,
+ size_t size, enum dma_data_direction dir,
+ unsigned long attrs);
+ void (*unmap_phys)(struct device *dev, dma_addr_t dma_handle,
+ size_t size, enum dma_data_direction dir,
+ unsigned long attrs);
+ /*
+ * map_sg should return a negative error code on error. See
+ * dma_map_sgtable() for a list of appropriate error codes
+ * and their meanings.
+ */
+ int (*map_sg)(struct device *dev, struct scatterlist *sg, int nents,
+ enum dma_data_direction dir, unsigned long attrs);
+ void (*unmap_sg)(struct device *dev, struct scatterlist *sg, int nents,
+ enum dma_data_direction dir, unsigned long attrs);
+ void (*sync_single_for_cpu)(struct device *dev, dma_addr_t dma_handle,
+ size_t size, enum dma_data_direction dir);
+ void (*sync_single_for_device)(struct device *dev,
+ dma_addr_t dma_handle, size_t size,
+ enum dma_data_direction dir);
+ void (*sync_sg_for_cpu)(struct device *dev, struct scatterlist *sg,
+ int nents, enum dma_data_direction dir);
+ void (*sync_sg_for_device)(struct device *dev, struct scatterlist *sg,
+ int nents, enum dma_data_direction dir);
+ void (*cache_sync)(struct device *dev, void *vaddr, size_t size,
+ enum dma_data_direction direction);
+ int (*dma_supported)(struct device *dev, u64 mask);
+ u64 (*get_required_mask)(struct device *dev);
+ size_t (*max_mapping_size)(struct device *dev);
+ size_t (*opt_mapping_size)(void);
+ unsigned long (*get_merge_boundary)(struct device *dev);
+};
+
+#ifdef CONFIG_ARCH_HAS_DMA_OPS
+#include <asm/dma-mapping.h>
+
+static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
+{
+ if (dev->dma_ops)
+ return dev->dma_ops;
+ return get_arch_dma_ops();
+}
+
+static inline void set_dma_ops(struct device *dev,
+ const struct dma_map_ops *dma_ops)
+{
+ dev->dma_ops = dma_ops;
+}
+#else /* CONFIG_ARCH_HAS_DMA_OPS */
+static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
+{
+ return NULL;
+}
+static inline void set_dma_ops(struct device *dev,
+ const struct dma_map_ops *dma_ops)
+{
+}
+#endif /* CONFIG_ARCH_HAS_DMA_OPS */
+
+#ifdef CONFIG_DMA_CMA
+extern struct cma *dma_contiguous_default_area;
+
+static inline struct cma *dev_get_cma_area(struct device *dev)
+{
+ if (dev && dev->cma_area)
+ return dev->cma_area;
+ return dma_contiguous_default_area;
+}
+
+void dma_contiguous_reserve(phys_addr_t addr_limit);
+int __init dma_contiguous_reserve_area(phys_addr_t size, phys_addr_t base,
+ phys_addr_t limit, struct cma **res_cma, bool fixed);
+
+struct page *dma_alloc_from_contiguous(struct device *dev, size_t count,
+ unsigned int order, bool no_warn);
+bool dma_release_from_contiguous(struct device *dev, struct page *pages,
+ int count);
+struct page *dma_alloc_contiguous(struct device *dev, size_t size, gfp_t gfp);
+void dma_free_contiguous(struct device *dev, struct page *page, size_t size);
+
+void dma_contiguous_early_fixup(phys_addr_t base, unsigned long size);
+#else /* CONFIG_DMA_CMA */
+static inline struct cma *dev_get_cma_area(struct device *dev)
+{
+ return NULL;
+}
+static inline void dma_contiguous_reserve(phys_addr_t limit)
+{
+}
+static inline int dma_contiguous_reserve_area(phys_addr_t size,
+ phys_addr_t base, phys_addr_t limit, struct cma **res_cma,
+ bool fixed)
+{
+ return -ENOSYS;
+}
+static inline struct page *dma_alloc_from_contiguous(struct device *dev,
+ size_t count, unsigned int order, bool no_warn)
+{
+ return NULL;
+}
+static inline bool dma_release_from_contiguous(struct device *dev,
+ struct page *pages, int count)
+{
+ return false;
+}
+/* Use fallback alloc() and free() when CONFIG_DMA_CMA=n */
+static inline struct page *dma_alloc_contiguous(struct device *dev, size_t size,
+ gfp_t gfp)
+{
+ return NULL;
+}
+static inline void dma_free_contiguous(struct device *dev, struct page *page,
+ size_t size)
+{
+ __free_pages(page, get_order(size));
+}
+static inline void dma_contiguous_early_fixup(phys_addr_t base, unsigned long size)
+{
+}
+#endif /* CONFIG_DMA_CMA*/
+
+#ifdef CONFIG_DMA_DECLARE_COHERENT
+int dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr,
+ dma_addr_t device_addr, size_t size);
+void dma_release_coherent_memory(struct device *dev);
+int dma_alloc_from_dev_coherent(struct device *dev, ssize_t size,
+ dma_addr_t *dma_handle, void **ret);
+int dma_release_from_dev_coherent(struct device *dev, int order, void *vaddr);
+int dma_mmap_from_dev_coherent(struct device *dev, struct vm_area_struct *vma,
+ void *cpu_addr, size_t size, int *ret);
+#else
+static inline int dma_declare_coherent_memory(struct device *dev,
+ phys_addr_t phys_addr, dma_addr_t device_addr, size_t size)
+{
+ return -ENOSYS;
+}
+
+#define dma_alloc_from_dev_coherent(dev, size, handle, ret) (0)
+#define dma_release_from_dev_coherent(dev, order, vaddr) (0)
+#define dma_mmap_from_dev_coherent(dev, vma, vaddr, order, ret) (0)
+static inline void dma_release_coherent_memory(struct device *dev) { }
+#endif /* CONFIG_DMA_DECLARE_COHERENT */
+
+#ifdef CONFIG_DMA_GLOBAL_POOL
+void *dma_alloc_from_global_coherent(struct device *dev, ssize_t size,
+ dma_addr_t *dma_handle);
+int dma_release_from_global_coherent(int order, void *vaddr);
+int dma_mmap_from_global_coherent(struct vm_area_struct *vma, void *cpu_addr,
+ size_t size, int *ret);
+int dma_init_global_coherent(phys_addr_t phys_addr, size_t size);
+#else
+static inline void *dma_alloc_from_global_coherent(struct device *dev,
+ ssize_t size, dma_addr_t *dma_handle)
+{
+ return NULL;
+}
+static inline int dma_release_from_global_coherent(int order, void *vaddr)
+{
+ return 0;
+}
+static inline int dma_mmap_from_global_coherent(struct vm_area_struct *vma,
+ void *cpu_addr, size_t size, int *ret)
+{
+ return 0;
+}
+#endif /* CONFIG_DMA_GLOBAL_POOL */
+
+int dma_common_get_sgtable(struct device *dev, struct sg_table *sgt,
+ void *cpu_addr, dma_addr_t dma_addr, size_t size,
+ unsigned long attrs);
+int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
+ void *cpu_addr, dma_addr_t dma_addr, size_t size,
+ unsigned long attrs);
+struct page *dma_common_alloc_pages(struct device *dev, size_t size,
+ dma_addr_t *dma_handle, enum dma_data_direction dir, gfp_t gfp);
+void dma_common_free_pages(struct device *dev, size_t size, struct page *vaddr,
+ dma_addr_t dma_handle, enum dma_data_direction dir);
+
+struct page **dma_common_find_pages(void *cpu_addr);
+void *dma_common_contiguous_remap(struct page *page, size_t size, pgprot_t prot,
+ const void *caller);
+void *dma_common_pages_remap(struct page **pages, size_t size, pgprot_t prot,
+ const void *caller);
+void dma_common_free_remap(void *cpu_addr, size_t size);
+
+struct page *dma_alloc_from_pool(struct device *dev, size_t size,
+ void **cpu_addr, gfp_t flags,
+ bool (*phys_addr_ok)(struct device *, phys_addr_t, size_t));
+bool dma_free_from_pool(struct device *dev, void *start, size_t size);
+
+int dma_direct_set_offset(struct device *dev, phys_addr_t cpu_start,
+ dma_addr_t dma_start, u64 size);
+
+#if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE) || \
+ defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU) || \
+ defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL)
+extern bool dma_default_coherent;
+static inline bool dev_is_dma_coherent(struct device *dev)
+{
+ return dev->dma_coherent;
+}
+#else
+#define dma_default_coherent true
+
+static inline bool dev_is_dma_coherent(struct device *dev)
+{
+ return true;
+}
+#endif
+
+static inline void dma_reset_need_sync(struct device *dev)
+{
+#ifdef CONFIG_DMA_NEED_SYNC
+ /* Reset it only once so that the function can be called on hotpath */
+ if (unlikely(dev->dma_skip_sync))
+ dev->dma_skip_sync = false;
+#endif
+}
+
+/*
+ * Check whether potential kmalloc() buffers are safe for non-coherent DMA.
+ */
+static inline bool dma_kmalloc_safe(struct device *dev,
+ enum dma_data_direction dir)
+{
+ /*
+ * If DMA bouncing of kmalloc() buffers is disabled, the kmalloc()
+ * caches have already been aligned to a DMA-safe size.
+ */
+ if (!IS_ENABLED(CONFIG_DMA_BOUNCE_UNALIGNED_KMALLOC))
+ return true;
+
+ /*
+ * kmalloc() buffers are DMA-safe irrespective of size if the device
+ * is coherent or the direction is DMA_TO_DEVICE (non-desctructive
+ * cache maintenance and benign cache line evictions).
+ */
+ if (dev_is_dma_coherent(dev) || dir == DMA_TO_DEVICE)
+ return true;
+
+ return false;
+}
+
+/*
+ * Check whether the given size, assuming it is for a kmalloc()'ed buffer, is
+ * sufficiently aligned for non-coherent DMA.
+ */
+static inline bool dma_kmalloc_size_aligned(size_t size)
+{
+ /*
+ * Larger kmalloc() sizes are guaranteed to be aligned to
+ * ARCH_DMA_MINALIGN.
+ */
+ if (size >= 2 * ARCH_DMA_MINALIGN ||
+ IS_ALIGNED(kmalloc_size_roundup(size), dma_get_cache_alignment()))
+ return true;
+
+ return false;
+}
+
+/*
+ * Check whether the given object size may have originated from a kmalloc()
+ * buffer with a slab alignment below the DMA-safe alignment and needs
+ * bouncing for non-coherent DMA. The pointer alignment is not considered and
+ * in-structure DMA-safe offsets are the responsibility of the caller. Such
+ * code should use the static ARCH_DMA_MINALIGN for compiler annotations.
+ *
+ * The heuristics can have false positives, bouncing unnecessarily, though the
+ * buffers would be small. False negatives are theoretically possible if, for
+ * example, multiple small kmalloc() buffers are coalesced into a larger
+ * buffer that passes the alignment check. There are no such known constructs
+ * in the kernel.
+ */
+static inline bool dma_kmalloc_needs_bounce(struct device *dev, size_t size,
+ enum dma_data_direction dir)
+{
+ return !dma_kmalloc_safe(dev, dir) && !dma_kmalloc_size_aligned(size);
+}
+
+void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
+ gfp_t gfp, unsigned long attrs);
+void arch_dma_free(struct device *dev, size_t size, void *cpu_addr,
+ dma_addr_t dma_addr, unsigned long attrs);
+
+#ifdef CONFIG_ARCH_HAS_DMA_SET_MASK
+void arch_dma_set_mask(struct device *dev, u64 mask);
+#else
+#define arch_dma_set_mask(dev, mask) do { } while (0)
+#endif
+
+#ifdef CONFIG_MMU
+/*
+ * Page protection so that devices that can't snoop CPU caches can use the
+ * memory coherently. We default to pgprot_noncached which is usually used
+ * for ioremap as a safe bet, but architectures can override this with less
+ * strict semantics if possible.
+ */
+#ifndef pgprot_dmacoherent
+#define pgprot_dmacoherent(prot) pgprot_noncached(prot)
+#endif
+
+pgprot_t dma_pgprot(struct device *dev, pgprot_t prot, unsigned long attrs);
+#else
+static inline pgprot_t dma_pgprot(struct device *dev, pgprot_t prot,
+ unsigned long attrs)
+{
+ return prot; /* no protection bits supported without page tables */
+}
+#endif /* CONFIG_MMU */
+
+#ifdef CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE
+void arch_sync_dma_for_device(phys_addr_t paddr, size_t size,
+ enum dma_data_direction dir);
+#else
+static inline void arch_sync_dma_for_device(phys_addr_t paddr, size_t size,
+ enum dma_data_direction dir)
+{
+}
+#endif /* ARCH_HAS_SYNC_DMA_FOR_DEVICE */
+
+#ifdef CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU
+void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size,
+ enum dma_data_direction dir);
+#else
+static inline void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size,
+ enum dma_data_direction dir)
+{
+}
+#endif /* ARCH_HAS_SYNC_DMA_FOR_CPU */
+
+#ifdef CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL
+void arch_sync_dma_for_cpu_all(void);
+#else
+static inline void arch_sync_dma_for_cpu_all(void)
+{
+}
+#endif /* CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL */
+
+#ifdef CONFIG_ARCH_HAS_DMA_PREP_COHERENT
+void arch_dma_prep_coherent(struct page *page, size_t size);
+#else
+static inline void arch_dma_prep_coherent(struct page *page, size_t size)
+{
+}
+#endif /* CONFIG_ARCH_HAS_DMA_PREP_COHERENT */
+
+#ifdef CONFIG_ARCH_HAS_DMA_MARK_CLEAN
+void arch_dma_mark_clean(phys_addr_t paddr, size_t size);
+#else
+static inline void arch_dma_mark_clean(phys_addr_t paddr, size_t size)
+{
+}
+#endif /* ARCH_HAS_DMA_MARK_CLEAN */
+
+void *arch_dma_set_uncached(void *addr, size_t size);
+void arch_dma_clear_uncached(void *addr, size_t size);
+
+#ifdef CONFIG_ARCH_HAS_DMA_MAP_DIRECT
+bool arch_dma_map_phys_direct(struct device *dev, phys_addr_t addr);
+bool arch_dma_unmap_phys_direct(struct device *dev, dma_addr_t dma_handle);
+bool arch_dma_map_sg_direct(struct device *dev, struct scatterlist *sg,
+ int nents);
+bool arch_dma_unmap_sg_direct(struct device *dev, struct scatterlist *sg,
+ int nents);
+#else
+#define arch_dma_map_phys_direct(d, a) (false)
+#define arch_dma_unmap_phys_direct(d, a) (false)
+#define arch_dma_map_sg_direct(d, s, n) (false)
+#define arch_dma_unmap_sg_direct(d, s, n) (false)
+#endif
+
+#ifdef CONFIG_ARCH_HAS_SETUP_DMA_OPS
+void arch_setup_dma_ops(struct device *dev, bool coherent);
+#else
+static inline void arch_setup_dma_ops(struct device *dev, bool coherent)
+{
+}
+#endif /* CONFIG_ARCH_HAS_SETUP_DMA_OPS */
+
+#ifdef CONFIG_ARCH_HAS_TEARDOWN_DMA_OPS
+void arch_teardown_dma_ops(struct device *dev);
+#else
+static inline void arch_teardown_dma_ops(struct device *dev)
+{
+}
+#endif /* CONFIG_ARCH_HAS_TEARDOWN_DMA_OPS */
+
+#ifdef CONFIG_DMA_API_DEBUG
+void dma_debug_add_bus(const struct bus_type *bus);
+void debug_dma_dump_mappings(struct device *dev);
+#else
+static inline void dma_debug_add_bus(const struct bus_type *bus)
+{
+}
+static inline void debug_dma_dump_mappings(struct device *dev)
+{
+}
+#endif /* CONFIG_DMA_API_DEBUG */
+
+extern const struct dma_map_ops dma_dummy_ops;
+#endif /* _LINUX_DMA_MAP_OPS_H */
diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
index 2189c79cde5d..2ceda49c609f 100644
--- a/include/linux/dma-mapping.h
+++ b/include/linux/dma-mapping.h
@@ -1,25 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_DMA_MAPPING_H
#define _LINUX_DMA_MAPPING_H
-#include <linux/sizes.h>
-#include <linux/string.h>
#include <linux/device.h>
#include <linux/err.h>
-#include <linux/dma-debug.h>
#include <linux/dma-direction.h>
#include <linux/scatterlist.h>
-#include <linux/kmemcheck.h>
#include <linux/bug.h>
-#include <linux/mem_encrypt.h>
/**
* List of possible attributes associated with a DMA mapping. The semantics
- * of each attribute should be defined in Documentation/DMA-attributes.txt.
- *
- * DMA_ATTR_WRITE_BARRIER: DMA to a memory region with this attribute
- * forces all pending DMA writes to complete.
+ * of each attribute should be defined in Documentation/core-api/dma-attributes.rst.
*/
-#define DMA_ATTR_WRITE_BARRIER (1UL << 0)
+
/*
* DMA_ATTR_WEAK_ORDERING: Specifies that reads and writes to the mapping
* may be weakly ordered, that is that reads and writes may pass each other.
@@ -31,11 +24,6 @@
*/
#define DMA_ATTR_WRITE_COMBINE (1UL << 2)
/*
- * DMA_ATTR_NON_CONSISTENT: Lets the platform to choose to return either
- * consistent or non-consistent memory as it sees fit.
- */
-#define DMA_ATTR_NON_CONSISTENT (1UL << 3)
-/*
* DMA_ATTR_NO_KERNEL_MAPPING: Lets the platform to avoid creating a kernel
* virtual mapping for the allocated buffer.
*/
@@ -71,559 +59,553 @@
#define DMA_ATTR_PRIVILEGED (1UL << 9)
/*
- * A dma_addr_t can hold any valid DMA or bus address for the platform.
- * It can be given to a device to use as a DMA source or target. A CPU cannot
- * reference a dma_addr_t directly because there may be translation between
- * its physical address space and the bus address space.
+ * DMA_ATTR_MMIO - Indicates memory-mapped I/O (MMIO) region for DMA mapping
+ *
+ * This attribute indicates the physical address is not normal system
+ * memory. It may not be used with kmap*()/phys_to_virt()/phys_to_page()
+ * functions, it may not be cacheable, and access using CPU load/store
+ * instructions may not be allowed.
+ *
+ * Usually this will be used to describe MMIO addresses, or other non-cacheable
+ * register addresses. When DMA mapping this sort of address we call
+ * the operation Peer to Peer as a one device is DMA'ing to another device.
+ * For PCI devices the p2pdma APIs must be used to determine if DMA_ATTR_MMIO
+ * is appropriate.
+ *
+ * For architectures that require cache flushing for DMA coherence
+ * DMA_ATTR_MMIO will not perform any cache flushing. The address
+ * provided must never be mapped cacheable into the CPU.
*/
-struct dma_map_ops {
- void* (*alloc)(struct device *dev, size_t size,
- dma_addr_t *dma_handle, gfp_t gfp,
- unsigned long attrs);
- void (*free)(struct device *dev, size_t size,
- void *vaddr, dma_addr_t dma_handle,
- unsigned long attrs);
- int (*mmap)(struct device *, struct vm_area_struct *,
- void *, dma_addr_t, size_t,
- unsigned long attrs);
-
- int (*get_sgtable)(struct device *dev, struct sg_table *sgt, void *,
- dma_addr_t, size_t, unsigned long attrs);
-
- dma_addr_t (*map_page)(struct device *dev, struct page *page,
- unsigned long offset, size_t size,
- enum dma_data_direction dir,
- unsigned long attrs);
- void (*unmap_page)(struct device *dev, dma_addr_t dma_handle,
- size_t size, enum dma_data_direction dir,
- unsigned long attrs);
- /*
- * map_sg returns 0 on error and a value > 0 on success.
- * It should never return a value < 0.
- */
- int (*map_sg)(struct device *dev, struct scatterlist *sg,
- int nents, enum dma_data_direction dir,
- unsigned long attrs);
- void (*unmap_sg)(struct device *dev,
- struct scatterlist *sg, int nents,
- enum dma_data_direction dir,
- unsigned long attrs);
- dma_addr_t (*map_resource)(struct device *dev, phys_addr_t phys_addr,
- size_t size, enum dma_data_direction dir,
- unsigned long attrs);
- void (*unmap_resource)(struct device *dev, dma_addr_t dma_handle,
- size_t size, enum dma_data_direction dir,
- unsigned long attrs);
- void (*sync_single_for_cpu)(struct device *dev,
- dma_addr_t dma_handle, size_t size,
- enum dma_data_direction dir);
- void (*sync_single_for_device)(struct device *dev,
- dma_addr_t dma_handle, size_t size,
- enum dma_data_direction dir);
- void (*sync_sg_for_cpu)(struct device *dev,
- struct scatterlist *sg, int nents,
- enum dma_data_direction dir);
- void (*sync_sg_for_device)(struct device *dev,
- struct scatterlist *sg, int nents,
- enum dma_data_direction dir);
- int (*mapping_error)(struct device *dev, dma_addr_t dma_addr);
- int (*dma_supported)(struct device *dev, u64 mask);
-#ifdef ARCH_HAS_DMA_GET_REQUIRED_MASK
- u64 (*get_required_mask)(struct device *dev);
-#endif
- int is_phys;
-};
+#define DMA_ATTR_MMIO (1UL << 10)
-extern const struct dma_map_ops dma_noop_ops;
-extern const struct dma_map_ops dma_virt_ops;
+/*
+ * A dma_addr_t can hold any valid DMA or bus address for the platform. It can
+ * be given to a device to use as a DMA source or target. It is specific to a
+ * given device and there may be a translation between the CPU physical address
+ * space and the bus address space.
+ *
+ * DMA_MAPPING_ERROR is the magic error code if a mapping failed. It should not
+ * be used directly in drivers, but checked for using dma_mapping_error()
+ * instead.
+ */
+#define DMA_MAPPING_ERROR (~(dma_addr_t)0)
-#define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
+#define DMA_BIT_MASK(n) GENMASK_ULL(n - 1, 0)
-#define DMA_MASK_NONE 0x0ULL
+struct dma_iova_state {
+ dma_addr_t addr;
+ u64 __size;
+};
-static inline int valid_dma_direction(int dma_direction)
+/*
+ * Use the high bit to mark if we used swiotlb for one or more ranges.
+ */
+#define DMA_IOVA_USE_SWIOTLB (1ULL << 63)
+
+static inline size_t dma_iova_size(struct dma_iova_state *state)
{
- return ((dma_direction == DMA_BIDIRECTIONAL) ||
- (dma_direction == DMA_TO_DEVICE) ||
- (dma_direction == DMA_FROM_DEVICE));
+ /* Casting is needed for 32-bits systems */
+ return (size_t)(state->__size & ~DMA_IOVA_USE_SWIOTLB);
}
-static inline int is_device_dma_capable(struct device *dev)
+#ifdef CONFIG_DMA_API_DEBUG
+void debug_dma_mapping_error(struct device *dev, dma_addr_t dma_addr);
+void debug_dma_map_single(struct device *dev, const void *addr,
+ unsigned long len);
+#else
+static inline void debug_dma_mapping_error(struct device *dev,
+ dma_addr_t dma_addr)
{
- return dev->dma_mask != NULL && *dev->dma_mask != DMA_MASK_NONE;
}
+static inline void debug_dma_map_single(struct device *dev, const void *addr,
+ unsigned long len)
+{
+}
+#endif /* CONFIG_DMA_API_DEBUG */
-#ifdef CONFIG_HAVE_GENERIC_DMA_COHERENT
-/*
- * These three functions are only for dma allocator.
- * Don't use them in device drivers.
- */
-int dma_alloc_from_dev_coherent(struct device *dev, ssize_t size,
- dma_addr_t *dma_handle, void **ret);
-int dma_release_from_dev_coherent(struct device *dev, int order, void *vaddr);
-
-int dma_mmap_from_dev_coherent(struct device *dev, struct vm_area_struct *vma,
- void *cpu_addr, size_t size, int *ret);
-
-void *dma_alloc_from_global_coherent(ssize_t size, dma_addr_t *dma_handle);
-int dma_release_from_global_coherent(int order, void *vaddr);
-int dma_mmap_from_global_coherent(struct vm_area_struct *vma, void *cpu_addr,
- size_t size, int *ret);
+#ifdef CONFIG_HAS_DMA
+static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
+{
+ debug_dma_mapping_error(dev, dma_addr);
-#else
-#define dma_alloc_from_dev_coherent(dev, size, handle, ret) (0)
-#define dma_release_from_dev_coherent(dev, order, vaddr) (0)
-#define dma_mmap_from_dev_coherent(dev, vma, vaddr, order, ret) (0)
+ if (unlikely(dma_addr == DMA_MAPPING_ERROR))
+ return -ENOMEM;
+ return 0;
+}
-static inline void *dma_alloc_from_global_coherent(ssize_t size,
- dma_addr_t *dma_handle)
+dma_addr_t dma_map_page_attrs(struct device *dev, struct page *page,
+ size_t offset, size_t size, enum dma_data_direction dir,
+ unsigned long attrs);
+void dma_unmap_page_attrs(struct device *dev, dma_addr_t addr, size_t size,
+ enum dma_data_direction dir, unsigned long attrs);
+dma_addr_t dma_map_phys(struct device *dev, phys_addr_t phys, size_t size,
+ enum dma_data_direction dir, unsigned long attrs);
+void dma_unmap_phys(struct device *dev, dma_addr_t addr, size_t size,
+ enum dma_data_direction dir, unsigned long attrs);
+unsigned int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg,
+ int nents, enum dma_data_direction dir, unsigned long attrs);
+void dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg,
+ int nents, enum dma_data_direction dir,
+ unsigned long attrs);
+int dma_map_sgtable(struct device *dev, struct sg_table *sgt,
+ enum dma_data_direction dir, unsigned long attrs);
+dma_addr_t dma_map_resource(struct device *dev, phys_addr_t phys_addr,
+ size_t size, enum dma_data_direction dir, unsigned long attrs);
+void dma_unmap_resource(struct device *dev, dma_addr_t addr, size_t size,
+ enum dma_data_direction dir, unsigned long attrs);
+void *dma_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle,
+ gfp_t flag, unsigned long attrs);
+void dma_free_attrs(struct device *dev, size_t size, void *cpu_addr,
+ dma_addr_t dma_handle, unsigned long attrs);
+void *dmam_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle,
+ gfp_t gfp, unsigned long attrs);
+void dmam_free_coherent(struct device *dev, size_t size, void *vaddr,
+ dma_addr_t dma_handle);
+int dma_get_sgtable_attrs(struct device *dev, struct sg_table *sgt,
+ void *cpu_addr, dma_addr_t dma_addr, size_t size,
+ unsigned long attrs);
+int dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
+ void *cpu_addr, dma_addr_t dma_addr, size_t size,
+ unsigned long attrs);
+bool dma_can_mmap(struct device *dev);
+bool dma_pci_p2pdma_supported(struct device *dev);
+int dma_set_mask(struct device *dev, u64 mask);
+int dma_set_coherent_mask(struct device *dev, u64 mask);
+u64 dma_get_required_mask(struct device *dev);
+bool dma_addressing_limited(struct device *dev);
+size_t dma_max_mapping_size(struct device *dev);
+size_t dma_opt_mapping_size(struct device *dev);
+unsigned long dma_get_merge_boundary(struct device *dev);
+struct sg_table *dma_alloc_noncontiguous(struct device *dev, size_t size,
+ enum dma_data_direction dir, gfp_t gfp, unsigned long attrs);
+void dma_free_noncontiguous(struct device *dev, size_t size,
+ struct sg_table *sgt, enum dma_data_direction dir);
+void *dma_vmap_noncontiguous(struct device *dev, size_t size,
+ struct sg_table *sgt);
+void dma_vunmap_noncontiguous(struct device *dev, void *vaddr);
+int dma_mmap_noncontiguous(struct device *dev, struct vm_area_struct *vma,
+ size_t size, struct sg_table *sgt);
+#else /* CONFIG_HAS_DMA */
+static inline dma_addr_t dma_map_page_attrs(struct device *dev,
+ struct page *page, size_t offset, size_t size,
+ enum dma_data_direction dir, unsigned long attrs)
+{
+ return DMA_MAPPING_ERROR;
+}
+static inline void dma_unmap_page_attrs(struct device *dev, dma_addr_t addr,
+ size_t size, enum dma_data_direction dir, unsigned long attrs)
+{
+}
+static inline dma_addr_t dma_map_phys(struct device *dev, phys_addr_t phys,
+ size_t size, enum dma_data_direction dir, unsigned long attrs)
+{
+ return DMA_MAPPING_ERROR;
+}
+static inline void dma_unmap_phys(struct device *dev, dma_addr_t addr,
+ size_t size, enum dma_data_direction dir, unsigned long attrs)
+{
+}
+static inline unsigned int dma_map_sg_attrs(struct device *dev,
+ struct scatterlist *sg, int nents, enum dma_data_direction dir,
+ unsigned long attrs)
+{
+ return 0;
+}
+static inline void dma_unmap_sg_attrs(struct device *dev,
+ struct scatterlist *sg, int nents, enum dma_data_direction dir,
+ unsigned long attrs)
+{
+}
+static inline int dma_map_sgtable(struct device *dev, struct sg_table *sgt,
+ enum dma_data_direction dir, unsigned long attrs)
+{
+ return -EOPNOTSUPP;
+}
+static inline dma_addr_t dma_map_resource(struct device *dev,
+ phys_addr_t phys_addr, size_t size, enum dma_data_direction dir,
+ unsigned long attrs)
+{
+ return DMA_MAPPING_ERROR;
+}
+static inline void dma_unmap_resource(struct device *dev, dma_addr_t addr,
+ size_t size, enum dma_data_direction dir, unsigned long attrs)
+{
+}
+static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
+{
+ return -ENOMEM;
+}
+static inline void *dma_alloc_attrs(struct device *dev, size_t size,
+ dma_addr_t *dma_handle, gfp_t flag, unsigned long attrs)
{
return NULL;
}
-
-static inline int dma_release_from_global_coherent(int order, void *vaddr)
+static void dma_free_attrs(struct device *dev, size_t size, void *cpu_addr,
+ dma_addr_t dma_handle, unsigned long attrs)
+{
+}
+static inline void *dmam_alloc_attrs(struct device *dev, size_t size,
+ dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
+{
+ return NULL;
+}
+static inline void dmam_free_coherent(struct device *dev, size_t size,
+ void *vaddr, dma_addr_t dma_handle)
+{
+}
+static inline int dma_get_sgtable_attrs(struct device *dev,
+ struct sg_table *sgt, void *cpu_addr, dma_addr_t dma_addr,
+ size_t size, unsigned long attrs)
+{
+ return -ENXIO;
+}
+static inline int dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
+ void *cpu_addr, dma_addr_t dma_addr, size_t size,
+ unsigned long attrs)
+{
+ return -ENXIO;
+}
+static inline bool dma_can_mmap(struct device *dev)
+{
+ return false;
+}
+static inline bool dma_pci_p2pdma_supported(struct device *dev)
+{
+ return false;
+}
+static inline int dma_set_mask(struct device *dev, u64 mask)
+{
+ return -EIO;
+}
+static inline int dma_set_coherent_mask(struct device *dev, u64 mask)
+{
+ return -EIO;
+}
+static inline u64 dma_get_required_mask(struct device *dev)
{
return 0;
}
-
-static inline int dma_mmap_from_global_coherent(struct vm_area_struct *vma,
- void *cpu_addr, size_t size,
- int *ret)
+static inline bool dma_addressing_limited(struct device *dev)
+{
+ return false;
+}
+static inline size_t dma_max_mapping_size(struct device *dev)
{
return 0;
}
-#endif /* CONFIG_HAVE_GENERIC_DMA_COHERENT */
-
-#ifdef CONFIG_HAS_DMA
-#include <asm/dma-mapping.h>
-static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
+static inline size_t dma_opt_mapping_size(struct device *dev)
{
- if (dev && dev->dma_ops)
- return dev->dma_ops;
- return get_arch_dma_ops(dev ? dev->bus : NULL);
+ return 0;
}
-
-static inline void set_dma_ops(struct device *dev,
- const struct dma_map_ops *dma_ops)
+static inline unsigned long dma_get_merge_boundary(struct device *dev)
{
- dev->dma_ops = dma_ops;
+ return 0;
}
-#else
-/*
- * Define the dma api to allow compilation but not linking of
- * dma dependent code. Code that depends on the dma-mapping
- * API needs to set 'depends on HAS_DMA' in its Kconfig
- */
-extern const struct dma_map_ops bad_dma_ops;
-static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
+static inline struct sg_table *dma_alloc_noncontiguous(struct device *dev,
+ size_t size, enum dma_data_direction dir, gfp_t gfp,
+ unsigned long attrs)
+{
+ return NULL;
+}
+static inline void dma_free_noncontiguous(struct device *dev, size_t size,
+ struct sg_table *sgt, enum dma_data_direction dir)
{
- return &bad_dma_ops;
}
-#endif
-
-static inline dma_addr_t dma_map_single_attrs(struct device *dev, void *ptr,
- size_t size,
- enum dma_data_direction dir,
- unsigned long attrs)
+static inline void *dma_vmap_noncontiguous(struct device *dev, size_t size,
+ struct sg_table *sgt)
{
- const struct dma_map_ops *ops = get_dma_ops(dev);
- dma_addr_t addr;
-
- kmemcheck_mark_initialized(ptr, size);
- BUG_ON(!valid_dma_direction(dir));
- addr = ops->map_page(dev, virt_to_page(ptr),
- offset_in_page(ptr), size,
- dir, attrs);
- debug_dma_map_page(dev, virt_to_page(ptr),
- offset_in_page(ptr), size,
- dir, addr, true);
- return addr;
+ return NULL;
}
-
-static inline void dma_unmap_single_attrs(struct device *dev, dma_addr_t addr,
- size_t size,
- enum dma_data_direction dir,
- unsigned long attrs)
+static inline void dma_vunmap_noncontiguous(struct device *dev, void *vaddr)
{
- const struct dma_map_ops *ops = get_dma_ops(dev);
-
- BUG_ON(!valid_dma_direction(dir));
- if (ops->unmap_page)
- ops->unmap_page(dev, addr, size, dir, attrs);
- debug_dma_unmap_page(dev, addr, size, dir, true);
}
+static inline int dma_mmap_noncontiguous(struct device *dev,
+ struct vm_area_struct *vma, size_t size, struct sg_table *sgt)
+{
+ return -EINVAL;
+}
+#endif /* CONFIG_HAS_DMA */
-/*
- * dma_maps_sg_attrs returns 0 on error and > 0 on success.
- * It should never return a value < 0.
+#ifdef CONFIG_IOMMU_DMA
+/**
+ * dma_use_iova - check if the IOVA API is used for this state
+ * @state: IOVA state
+ *
+ * Return %true if the DMA transfers uses the dma_iova_*() calls or %false if
+ * they can't be used.
*/
-static inline int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg,
- int nents, enum dma_data_direction dir,
- unsigned long attrs)
+static inline bool dma_use_iova(struct dma_iova_state *state)
{
- const struct dma_map_ops *ops = get_dma_ops(dev);
- int i, ents;
- struct scatterlist *s;
-
- for_each_sg(sg, s, nents, i)
- kmemcheck_mark_initialized(sg_virt(s), s->length);
- BUG_ON(!valid_dma_direction(dir));
- ents = ops->map_sg(dev, sg, nents, dir, attrs);
- BUG_ON(ents < 0);
- debug_dma_map_sg(dev, sg, nents, ents, dir);
-
- return ents;
+ return state->__size != 0;
}
-static inline void dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg,
- int nents, enum dma_data_direction dir,
- unsigned long attrs)
+bool dma_iova_try_alloc(struct device *dev, struct dma_iova_state *state,
+ phys_addr_t phys, size_t size);
+void dma_iova_free(struct device *dev, struct dma_iova_state *state);
+void dma_iova_destroy(struct device *dev, struct dma_iova_state *state,
+ size_t mapped_len, enum dma_data_direction dir,
+ unsigned long attrs);
+int dma_iova_sync(struct device *dev, struct dma_iova_state *state,
+ size_t offset, size_t size);
+int dma_iova_link(struct device *dev, struct dma_iova_state *state,
+ phys_addr_t phys, size_t offset, size_t size,
+ enum dma_data_direction dir, unsigned long attrs);
+void dma_iova_unlink(struct device *dev, struct dma_iova_state *state,
+ size_t offset, size_t size, enum dma_data_direction dir,
+ unsigned long attrs);
+#else /* CONFIG_IOMMU_DMA */
+static inline bool dma_use_iova(struct dma_iova_state *state)
{
- const struct dma_map_ops *ops = get_dma_ops(dev);
-
- BUG_ON(!valid_dma_direction(dir));
- debug_dma_unmap_sg(dev, sg, nents, dir);
- if (ops->unmap_sg)
- ops->unmap_sg(dev, sg, nents, dir, attrs);
+ return false;
}
-
-static inline dma_addr_t dma_map_page_attrs(struct device *dev,
- struct page *page,
- size_t offset, size_t size,
- enum dma_data_direction dir,
- unsigned long attrs)
+static inline bool dma_iova_try_alloc(struct device *dev,
+ struct dma_iova_state *state, phys_addr_t phys, size_t size)
{
- const struct dma_map_ops *ops = get_dma_ops(dev);
- dma_addr_t addr;
-
- kmemcheck_mark_initialized(page_address(page) + offset, size);
- BUG_ON(!valid_dma_direction(dir));
- addr = ops->map_page(dev, page, offset, size, dir, attrs);
- debug_dma_map_page(dev, page, offset, size, dir, addr, false);
-
- return addr;
+ return false;
}
-
-static inline void dma_unmap_page_attrs(struct device *dev,
- dma_addr_t addr, size_t size,
- enum dma_data_direction dir,
- unsigned long attrs)
+static inline void dma_iova_free(struct device *dev,
+ struct dma_iova_state *state)
{
- const struct dma_map_ops *ops = get_dma_ops(dev);
-
- BUG_ON(!valid_dma_direction(dir));
- if (ops->unmap_page)
- ops->unmap_page(dev, addr, size, dir, attrs);
- debug_dma_unmap_page(dev, addr, size, dir, false);
}
-
-static inline dma_addr_t dma_map_resource(struct device *dev,
- phys_addr_t phys_addr,
- size_t size,
- enum dma_data_direction dir,
- unsigned long attrs)
+static inline void dma_iova_destroy(struct device *dev,
+ struct dma_iova_state *state, size_t mapped_len,
+ enum dma_data_direction dir, unsigned long attrs)
{
- const struct dma_map_ops *ops = get_dma_ops(dev);
- dma_addr_t addr;
-
- BUG_ON(!valid_dma_direction(dir));
-
- /* Don't allow RAM to be mapped */
- BUG_ON(pfn_valid(PHYS_PFN(phys_addr)));
-
- addr = phys_addr;
- if (ops->map_resource)
- addr = ops->map_resource(dev, phys_addr, size, dir, attrs);
-
- debug_dma_map_resource(dev, phys_addr, size, dir, addr);
-
- return addr;
}
-
-static inline void dma_unmap_resource(struct device *dev, dma_addr_t addr,
- size_t size, enum dma_data_direction dir,
- unsigned long attrs)
+static inline int dma_iova_sync(struct device *dev,
+ struct dma_iova_state *state, size_t offset, size_t size)
+{
+ return -EOPNOTSUPP;
+}
+static inline int dma_iova_link(struct device *dev,
+ struct dma_iova_state *state, phys_addr_t phys, size_t offset,
+ size_t size, enum dma_data_direction dir, unsigned long attrs)
+{
+ return -EOPNOTSUPP;
+}
+static inline void dma_iova_unlink(struct device *dev,
+ struct dma_iova_state *state, size_t offset, size_t size,
+ enum dma_data_direction dir, unsigned long attrs)
{
- const struct dma_map_ops *ops = get_dma_ops(dev);
+}
+#endif /* CONFIG_IOMMU_DMA */
+
+#if defined(CONFIG_HAS_DMA) && defined(CONFIG_DMA_NEED_SYNC)
+void __dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr, size_t size,
+ enum dma_data_direction dir);
+void __dma_sync_single_for_device(struct device *dev, dma_addr_t addr,
+ size_t size, enum dma_data_direction dir);
+void __dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
+ int nelems, enum dma_data_direction dir);
+void __dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
+ int nelems, enum dma_data_direction dir);
+bool __dma_need_sync(struct device *dev, dma_addr_t dma_addr);
- BUG_ON(!valid_dma_direction(dir));
- if (ops->unmap_resource)
- ops->unmap_resource(dev, addr, size, dir, attrs);
- debug_dma_unmap_resource(dev, addr, size, dir);
+static inline bool dma_dev_need_sync(const struct device *dev)
+{
+ /* Always call DMA sync operations when debugging is enabled */
+ return !dev->dma_skip_sync || IS_ENABLED(CONFIG_DMA_API_DEBUG);
}
static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr,
- size_t size,
- enum dma_data_direction dir)
+ size_t size, enum dma_data_direction dir)
{
- const struct dma_map_ops *ops = get_dma_ops(dev);
-
- BUG_ON(!valid_dma_direction(dir));
- if (ops->sync_single_for_cpu)
- ops->sync_single_for_cpu(dev, addr, size, dir);
- debug_dma_sync_single_for_cpu(dev, addr, size, dir);
+ if (dma_dev_need_sync(dev))
+ __dma_sync_single_for_cpu(dev, addr, size, dir);
}
static inline void dma_sync_single_for_device(struct device *dev,
- dma_addr_t addr, size_t size,
- enum dma_data_direction dir)
+ dma_addr_t addr, size_t size, enum dma_data_direction dir)
{
- const struct dma_map_ops *ops = get_dma_ops(dev);
-
- BUG_ON(!valid_dma_direction(dir));
- if (ops->sync_single_for_device)
- ops->sync_single_for_device(dev, addr, size, dir);
- debug_dma_sync_single_for_device(dev, addr, size, dir);
+ if (dma_dev_need_sync(dev))
+ __dma_sync_single_for_device(dev, addr, size, dir);
}
-static inline void dma_sync_single_range_for_cpu(struct device *dev,
- dma_addr_t addr,
- unsigned long offset,
- size_t size,
- enum dma_data_direction dir)
+static inline void dma_sync_sg_for_cpu(struct device *dev,
+ struct scatterlist *sg, int nelems, enum dma_data_direction dir)
{
- const struct dma_map_ops *ops = get_dma_ops(dev);
-
- BUG_ON(!valid_dma_direction(dir));
- if (ops->sync_single_for_cpu)
- ops->sync_single_for_cpu(dev, addr + offset, size, dir);
- debug_dma_sync_single_range_for_cpu(dev, addr, offset, size, dir);
+ if (dma_dev_need_sync(dev))
+ __dma_sync_sg_for_cpu(dev, sg, nelems, dir);
}
-static inline void dma_sync_single_range_for_device(struct device *dev,
- dma_addr_t addr,
- unsigned long offset,
- size_t size,
- enum dma_data_direction dir)
+static inline void dma_sync_sg_for_device(struct device *dev,
+ struct scatterlist *sg, int nelems, enum dma_data_direction dir)
{
- const struct dma_map_ops *ops = get_dma_ops(dev);
-
- BUG_ON(!valid_dma_direction(dir));
- if (ops->sync_single_for_device)
- ops->sync_single_for_device(dev, addr + offset, size, dir);
- debug_dma_sync_single_range_for_device(dev, addr, offset, size, dir);
+ if (dma_dev_need_sync(dev))
+ __dma_sync_sg_for_device(dev, sg, nelems, dir);
}
-static inline void
-dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
- int nelems, enum dma_data_direction dir)
+static inline bool dma_need_sync(struct device *dev, dma_addr_t dma_addr)
{
- const struct dma_map_ops *ops = get_dma_ops(dev);
-
- BUG_ON(!valid_dma_direction(dir));
- if (ops->sync_sg_for_cpu)
- ops->sync_sg_for_cpu(dev, sg, nelems, dir);
- debug_dma_sync_sg_for_cpu(dev, sg, nelems, dir);
+ return dma_dev_need_sync(dev) ? __dma_need_sync(dev, dma_addr) : false;
}
-
-static inline void
-dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
- int nelems, enum dma_data_direction dir)
+bool dma_need_unmap(struct device *dev);
+#else /* !CONFIG_HAS_DMA || !CONFIG_DMA_NEED_SYNC */
+static inline bool dma_dev_need_sync(const struct device *dev)
{
- const struct dma_map_ops *ops = get_dma_ops(dev);
-
- BUG_ON(!valid_dma_direction(dir));
- if (ops->sync_sg_for_device)
- ops->sync_sg_for_device(dev, sg, nelems, dir);
- debug_dma_sync_sg_for_device(dev, sg, nelems, dir);
-
+ return false;
}
-
-#define dma_map_single(d, a, s, r) dma_map_single_attrs(d, a, s, r, 0)
-#define dma_unmap_single(d, a, s, r) dma_unmap_single_attrs(d, a, s, r, 0)
-#define dma_map_sg(d, s, n, r) dma_map_sg_attrs(d, s, n, r, 0)
-#define dma_unmap_sg(d, s, n, r) dma_unmap_sg_attrs(d, s, n, r, 0)
-#define dma_map_page(d, p, o, s, r) dma_map_page_attrs(d, p, o, s, r, 0)
-#define dma_unmap_page(d, a, s, r) dma_unmap_page_attrs(d, a, s, r, 0)
-
-extern int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
- void *cpu_addr, dma_addr_t dma_addr, size_t size);
-
-void *dma_common_contiguous_remap(struct page *page, size_t size,
- unsigned long vm_flags,
- pgprot_t prot, const void *caller);
-
-void *dma_common_pages_remap(struct page **pages, size_t size,
- unsigned long vm_flags, pgprot_t prot,
- const void *caller);
-void dma_common_free_remap(void *cpu_addr, size_t size, unsigned long vm_flags);
-
-/**
- * dma_mmap_attrs - map a coherent DMA allocation into user space
- * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
- * @vma: vm_area_struct describing requested user mapping
- * @cpu_addr: kernel CPU-view address returned from dma_alloc_attrs
- * @handle: device-view address returned from dma_alloc_attrs
- * @size: size of memory originally requested in dma_alloc_attrs
- * @attrs: attributes of mapping properties requested in dma_alloc_attrs
- *
- * Map a coherent DMA buffer previously allocated by dma_alloc_attrs
- * into user space. The coherent DMA buffer must not be freed by the
- * driver until the user space mapping has been released.
- */
-static inline int
-dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma, void *cpu_addr,
- dma_addr_t dma_addr, size_t size, unsigned long attrs)
+static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr,
+ size_t size, enum dma_data_direction dir)
{
- const struct dma_map_ops *ops = get_dma_ops(dev);
- BUG_ON(!ops);
- if (ops->mmap)
- return ops->mmap(dev, vma, cpu_addr, dma_addr, size, attrs);
- return dma_common_mmap(dev, vma, cpu_addr, dma_addr, size);
}
-
-#define dma_mmap_coherent(d, v, c, h, s) dma_mmap_attrs(d, v, c, h, s, 0)
-
-int
-dma_common_get_sgtable(struct device *dev, struct sg_table *sgt,
- void *cpu_addr, dma_addr_t dma_addr, size_t size);
-
-static inline int
-dma_get_sgtable_attrs(struct device *dev, struct sg_table *sgt, void *cpu_addr,
- dma_addr_t dma_addr, size_t size,
- unsigned long attrs)
+static inline void dma_sync_single_for_device(struct device *dev,
+ dma_addr_t addr, size_t size, enum dma_data_direction dir)
{
- const struct dma_map_ops *ops = get_dma_ops(dev);
- BUG_ON(!ops);
- if (ops->get_sgtable)
- return ops->get_sgtable(dev, sgt, cpu_addr, dma_addr, size,
- attrs);
- return dma_common_get_sgtable(dev, sgt, cpu_addr, dma_addr, size);
}
-
-#define dma_get_sgtable(d, t, v, h, s) dma_get_sgtable_attrs(d, t, v, h, s, 0)
-
-#ifndef arch_dma_alloc_attrs
-#define arch_dma_alloc_attrs(dev, flag) (true)
-#endif
-
-static inline void *dma_alloc_attrs(struct device *dev, size_t size,
- dma_addr_t *dma_handle, gfp_t flag,
- unsigned long attrs)
+static inline void dma_sync_sg_for_cpu(struct device *dev,
+ struct scatterlist *sg, int nelems, enum dma_data_direction dir)
{
- const struct dma_map_ops *ops = get_dma_ops(dev);
- void *cpu_addr;
-
- BUG_ON(!ops);
-
- if (dma_alloc_from_dev_coherent(dev, size, dma_handle, &cpu_addr))
- return cpu_addr;
-
- if (!arch_dma_alloc_attrs(&dev, &flag))
- return NULL;
- if (!ops->alloc)
- return NULL;
-
- cpu_addr = ops->alloc(dev, size, dma_handle, flag, attrs);
- debug_dma_alloc_coherent(dev, size, *dma_handle, cpu_addr);
- return cpu_addr;
}
-
-static inline void dma_free_attrs(struct device *dev, size_t size,
- void *cpu_addr, dma_addr_t dma_handle,
- unsigned long attrs)
+static inline void dma_sync_sg_for_device(struct device *dev,
+ struct scatterlist *sg, int nelems, enum dma_data_direction dir)
{
- const struct dma_map_ops *ops = get_dma_ops(dev);
-
- BUG_ON(!ops);
- WARN_ON(irqs_disabled());
-
- if (dma_release_from_dev_coherent(dev, get_order(size), cpu_addr))
- return;
-
- if (!ops->free || !cpu_addr)
- return;
-
- debug_dma_free_coherent(dev, size, cpu_addr, dma_handle);
- ops->free(dev, size, cpu_addr, dma_handle, attrs);
}
-
-static inline void *dma_alloc_coherent(struct device *dev, size_t size,
- dma_addr_t *dma_handle, gfp_t flag)
+static inline bool dma_need_sync(struct device *dev, dma_addr_t dma_addr)
{
- return dma_alloc_attrs(dev, size, dma_handle, flag, 0);
+ return false;
}
-
-static inline void dma_free_coherent(struct device *dev, size_t size,
- void *cpu_addr, dma_addr_t dma_handle)
+static inline bool dma_need_unmap(struct device *dev)
{
- return dma_free_attrs(dev, size, cpu_addr, dma_handle, 0);
+ return false;
}
+#endif /* !CONFIG_HAS_DMA || !CONFIG_DMA_NEED_SYNC */
+
+struct page *dma_alloc_pages(struct device *dev, size_t size,
+ dma_addr_t *dma_handle, enum dma_data_direction dir, gfp_t gfp);
+void dma_free_pages(struct device *dev, size_t size, struct page *page,
+ dma_addr_t dma_handle, enum dma_data_direction dir);
+int dma_mmap_pages(struct device *dev, struct vm_area_struct *vma,
+ size_t size, struct page *page);
static inline void *dma_alloc_noncoherent(struct device *dev, size_t size,
- dma_addr_t *dma_handle, gfp_t gfp)
+ dma_addr_t *dma_handle, enum dma_data_direction dir, gfp_t gfp)
{
- return dma_alloc_attrs(dev, size, dma_handle, gfp,
- DMA_ATTR_NON_CONSISTENT);
+ struct page *page = dma_alloc_pages(dev, size, dma_handle, dir, gfp);
+ return page ? page_address(page) : NULL;
}
static inline void dma_free_noncoherent(struct device *dev, size_t size,
- void *cpu_addr, dma_addr_t dma_handle)
+ void *vaddr, dma_addr_t dma_handle, enum dma_data_direction dir)
{
- dma_free_attrs(dev, size, cpu_addr, dma_handle,
- DMA_ATTR_NON_CONSISTENT);
+ dma_free_pages(dev, size, virt_to_page(vaddr), dma_handle, dir);
}
-static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
+static inline dma_addr_t dma_map_single_attrs(struct device *dev, void *ptr,
+ size_t size, enum dma_data_direction dir, unsigned long attrs)
{
- debug_dma_mapping_error(dev, dma_addr);
-
- if (get_dma_ops(dev)->mapping_error)
- return get_dma_ops(dev)->mapping_error(dev, dma_addr);
- return 0;
+ /* DMA must never operate on areas that might be remapped. */
+ if (dev_WARN_ONCE(dev, is_vmalloc_addr(ptr),
+ "rejecting DMA map of vmalloc memory\n"))
+ return DMA_MAPPING_ERROR;
+ debug_dma_map_single(dev, ptr, size);
+ return dma_map_page_attrs(dev, virt_to_page(ptr), offset_in_page(ptr),
+ size, dir, attrs);
}
-static inline void dma_check_mask(struct device *dev, u64 mask)
+static inline void dma_unmap_single_attrs(struct device *dev, dma_addr_t addr,
+ size_t size, enum dma_data_direction dir, unsigned long attrs)
{
- if (sme_active() && (mask < (((u64)sme_get_me_mask() << 1) - 1)))
- dev_warn(dev, "SME is active, device will require DMA bounce buffers\n");
+ return dma_unmap_page_attrs(dev, addr, size, dir, attrs);
}
-static inline int dma_supported(struct device *dev, u64 mask)
+static inline void dma_sync_single_range_for_cpu(struct device *dev,
+ dma_addr_t addr, unsigned long offset, size_t size,
+ enum dma_data_direction dir)
{
- const struct dma_map_ops *ops = get_dma_ops(dev);
+ return dma_sync_single_for_cpu(dev, addr + offset, size, dir);
+}
- if (!ops)
- return 0;
- if (!ops->dma_supported)
- return 1;
- return ops->dma_supported(dev, mask);
+static inline void dma_sync_single_range_for_device(struct device *dev,
+ dma_addr_t addr, unsigned long offset, size_t size,
+ enum dma_data_direction dir)
+{
+ return dma_sync_single_for_device(dev, addr + offset, size, dir);
}
-#ifndef HAVE_ARCH_DMA_SET_MASK
-static inline int dma_set_mask(struct device *dev, u64 mask)
+/**
+ * dma_unmap_sgtable - Unmap the given buffer for DMA
+ * @dev: The device for which to perform the DMA operation
+ * @sgt: The sg_table object describing the buffer
+ * @dir: DMA direction
+ * @attrs: Optional DMA attributes for the unmap operation
+ *
+ * Unmaps a buffer described by a scatterlist stored in the given sg_table
+ * object for the @dir DMA operation by the @dev device. After this function
+ * the ownership of the buffer is transferred back to the CPU domain.
+ */
+static inline void dma_unmap_sgtable(struct device *dev, struct sg_table *sgt,
+ enum dma_data_direction dir, unsigned long attrs)
{
- if (!dev->dma_mask || !dma_supported(dev, mask))
- return -EIO;
+ dma_unmap_sg_attrs(dev, sgt->sgl, sgt->orig_nents, dir, attrs);
+}
- dma_check_mask(dev, mask);
+/**
+ * dma_sync_sgtable_for_cpu - Synchronize the given buffer for CPU access
+ * @dev: The device for which to perform the DMA operation
+ * @sgt: The sg_table object describing the buffer
+ * @dir: DMA direction
+ *
+ * Performs the needed cache synchronization and moves the ownership of the
+ * buffer back to the CPU domain, so it is safe to perform any access to it
+ * by the CPU. Before doing any further DMA operations, one has to transfer
+ * the ownership of the buffer back to the DMA domain by calling the
+ * dma_sync_sgtable_for_device().
+ */
+static inline void dma_sync_sgtable_for_cpu(struct device *dev,
+ struct sg_table *sgt, enum dma_data_direction dir)
+{
+ dma_sync_sg_for_cpu(dev, sgt->sgl, sgt->orig_nents, dir);
+}
- *dev->dma_mask = mask;
- return 0;
+/**
+ * dma_sync_sgtable_for_device - Synchronize the given buffer for DMA
+ * @dev: The device for which to perform the DMA operation
+ * @sgt: The sg_table object describing the buffer
+ * @dir: DMA direction
+ *
+ * Performs the needed cache synchronization and moves the ownership of the
+ * buffer back to the DMA domain, so it is safe to perform the DMA operation.
+ * Once finished, one has to call dma_sync_sgtable_for_cpu() or
+ * dma_unmap_sgtable().
+ */
+static inline void dma_sync_sgtable_for_device(struct device *dev,
+ struct sg_table *sgt, enum dma_data_direction dir)
+{
+ dma_sync_sg_for_device(dev, sgt->sgl, sgt->orig_nents, dir);
}
-#endif
-static inline u64 dma_get_mask(struct device *dev)
+#define dma_map_single(d, a, s, r) dma_map_single_attrs(d, a, s, r, 0)
+#define dma_unmap_single(d, a, s, r) dma_unmap_single_attrs(d, a, s, r, 0)
+#define dma_map_sg(d, s, n, r) dma_map_sg_attrs(d, s, n, r, 0)
+#define dma_unmap_sg(d, s, n, r) dma_unmap_sg_attrs(d, s, n, r, 0)
+#define dma_map_page(d, p, o, s, r) dma_map_page_attrs(d, p, o, s, r, 0)
+#define dma_unmap_page(d, a, s, r) dma_unmap_page_attrs(d, a, s, r, 0)
+#define dma_get_sgtable(d, t, v, h, s) dma_get_sgtable_attrs(d, t, v, h, s, 0)
+#define dma_mmap_coherent(d, v, c, h, s) dma_mmap_attrs(d, v, c, h, s, 0)
+
+bool dma_coherent_ok(struct device *dev, phys_addr_t phys, size_t size);
+
+static inline void *dma_alloc_coherent(struct device *dev, size_t size,
+ dma_addr_t *dma_handle, gfp_t gfp)
{
- if (dev && dev->dma_mask && *dev->dma_mask)
- return *dev->dma_mask;
- return DMA_BIT_MASK(32);
+ return dma_alloc_attrs(dev, size, dma_handle, gfp,
+ (gfp & __GFP_NOWARN) ? DMA_ATTR_NO_WARN : 0);
}
-#ifdef CONFIG_ARCH_HAS_DMA_SET_COHERENT_MASK
-int dma_set_coherent_mask(struct device *dev, u64 mask);
-#else
-static inline int dma_set_coherent_mask(struct device *dev, u64 mask)
+static inline void dma_free_coherent(struct device *dev, size_t size,
+ void *cpu_addr, dma_addr_t dma_handle)
{
- if (!dma_supported(dev, mask))
- return -EIO;
+ return dma_free_attrs(dev, size, cpu_addr, dma_handle, 0);
+}
- dma_check_mask(dev, mask);
- dev->coherent_dma_mask = mask;
- return 0;
+static inline u64 dma_get_mask(struct device *dev)
+{
+ if (dev->dma_mask && *dev->dma_mask)
+ return *dev->dma_mask;
+ return DMA_BIT_MASK(32);
}
-#endif
/*
* Set both the DMA mask and the coherent DMA mask to the same thing.
@@ -649,18 +631,6 @@ static inline int dma_coerce_mask_and_coherent(struct device *dev, u64 mask)
return dma_set_mask_and_coherent(dev, mask);
}
-extern u64 dma_get_required_mask(struct device *dev);
-
-#ifndef arch_setup_dma_ops
-static inline void arch_setup_dma_ops(struct device *dev, u64 dma_base,
- u64 size, const struct iommu_ops *iommu,
- bool coherent) { }
-#endif
-
-#ifndef arch_teardown_dma_ops
-static inline void arch_teardown_dma_ops(struct device *dev) { }
-#endif
-
static inline unsigned int dma_get_max_seg_size(struct device *dev)
{
if (dev->dma_parms && dev->dma_parms->max_segment_size)
@@ -668,140 +638,88 @@ static inline unsigned int dma_get_max_seg_size(struct device *dev)
return SZ_64K;
}
-static inline unsigned int dma_set_max_seg_size(struct device *dev,
- unsigned int size)
+static inline void dma_set_max_seg_size(struct device *dev, unsigned int size)
{
- if (dev->dma_parms) {
- dev->dma_parms->max_segment_size = size;
- return 0;
- }
- return -EIO;
+ if (WARN_ON_ONCE(!dev->dma_parms))
+ return;
+ dev->dma_parms->max_segment_size = size;
}
static inline unsigned long dma_get_seg_boundary(struct device *dev)
{
if (dev->dma_parms && dev->dma_parms->segment_boundary_mask)
return dev->dma_parms->segment_boundary_mask;
- return DMA_BIT_MASK(32);
-}
-
-static inline int dma_set_seg_boundary(struct device *dev, unsigned long mask)
-{
- if (dev->dma_parms) {
- dev->dma_parms->segment_boundary_mask = mask;
- return 0;
- }
- return -EIO;
-}
-
-#ifndef dma_max_pfn
-static inline unsigned long dma_max_pfn(struct device *dev)
-{
- return *dev->dma_mask >> PAGE_SHIFT;
+ return ULONG_MAX;
}
-#endif
-static inline void *dma_zalloc_coherent(struct device *dev, size_t size,
- dma_addr_t *dma_handle, gfp_t flag)
+/**
+ * dma_get_seg_boundary_nr_pages - return the segment boundary in "page" units
+ * @dev: device to guery the boundary for
+ * @page_shift: ilog() of the IOMMU page size
+ *
+ * Return the segment boundary in IOMMU page units (which may be different from
+ * the CPU page size) for the passed in device.
+ *
+ * If @dev is NULL a boundary of U32_MAX is assumed, this case is just for
+ * non-DMA API callers.
+ */
+static inline unsigned long dma_get_seg_boundary_nr_pages(struct device *dev,
+ unsigned int page_shift)
{
- void *ret = dma_alloc_coherent(dev, size, dma_handle,
- flag | __GFP_ZERO);
- return ret;
+ if (!dev)
+ return (U32_MAX >> page_shift) + 1;
+ return (dma_get_seg_boundary(dev) >> page_shift) + 1;
}
-#ifdef CONFIG_HAS_DMA
-static inline int dma_get_cache_alignment(void)
+static inline void dma_set_seg_boundary(struct device *dev, unsigned long mask)
{
-#ifdef ARCH_DMA_MINALIGN
- return ARCH_DMA_MINALIGN;
-#endif
- return 1;
+ if (WARN_ON_ONCE(!dev->dma_parms))
+ return;
+ dev->dma_parms->segment_boundary_mask = mask;
}
-#endif
-/* flags for the coherent memory api */
-#define DMA_MEMORY_MAP 0x01
-#define DMA_MEMORY_IO 0x02
-#define DMA_MEMORY_INCLUDES_CHILDREN 0x04
-#define DMA_MEMORY_EXCLUSIVE 0x08
-
-#ifdef CONFIG_HAVE_GENERIC_DMA_COHERENT
-int dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr,
- dma_addr_t device_addr, size_t size, int flags);
-void dma_release_declared_memory(struct device *dev);
-void *dma_mark_declared_memory_occupied(struct device *dev,
- dma_addr_t device_addr, size_t size);
-#else
-static inline int
-dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr,
- dma_addr_t device_addr, size_t size, int flags)
+static inline unsigned int dma_get_min_align_mask(struct device *dev)
{
+ if (dev->dma_parms)
+ return dev->dma_parms->min_align_mask;
return 0;
}
-static inline void
-dma_release_declared_memory(struct device *dev)
+static inline void dma_set_min_align_mask(struct device *dev,
+ unsigned int min_align_mask)
{
+ if (WARN_ON_ONCE(!dev->dma_parms))
+ return;
+ dev->dma_parms->min_align_mask = min_align_mask;
}
-static inline void *
-dma_mark_declared_memory_occupied(struct device *dev,
- dma_addr_t device_addr, size_t size)
-{
- return ERR_PTR(-EBUSY);
-}
-#endif /* CONFIG_HAVE_GENERIC_DMA_COHERENT */
-
-#ifdef CONFIG_HAS_DMA
-int dma_configure(struct device *dev);
-void dma_deconfigure(struct device *dev);
-#else
-static inline int dma_configure(struct device *dev)
+#ifndef dma_get_cache_alignment
+static inline int dma_get_cache_alignment(void)
{
- return 0;
-}
-
-static inline void dma_deconfigure(struct device *dev) {}
+#ifdef ARCH_HAS_DMA_MINALIGN
+ return ARCH_DMA_MINALIGN;
#endif
-
-/*
- * Managed DMA API
- */
-extern void *dmam_alloc_coherent(struct device *dev, size_t size,
- dma_addr_t *dma_handle, gfp_t gfp);
-extern void dmam_free_coherent(struct device *dev, size_t size, void *vaddr,
- dma_addr_t dma_handle);
-extern void *dmam_alloc_attrs(struct device *dev, size_t size,
- dma_addr_t *dma_handle, gfp_t gfp,
- unsigned long attrs);
-#ifdef CONFIG_HAVE_GENERIC_DMA_COHERENT
-extern int dmam_declare_coherent_memory(struct device *dev,
- phys_addr_t phys_addr,
- dma_addr_t device_addr, size_t size,
- int flags);
-extern void dmam_release_declared_memory(struct device *dev);
-#else /* CONFIG_HAVE_GENERIC_DMA_COHERENT */
-static inline int dmam_declare_coherent_memory(struct device *dev,
- phys_addr_t phys_addr, dma_addr_t device_addr,
- size_t size, gfp_t gfp)
-{
- return 0;
+ return 1;
}
+#endif
-static inline void dmam_release_declared_memory(struct device *dev)
+static inline void *dmam_alloc_coherent(struct device *dev, size_t size,
+ dma_addr_t *dma_handle, gfp_t gfp)
{
+ return dmam_alloc_attrs(dev, size, dma_handle, gfp,
+ (gfp & __GFP_NOWARN) ? DMA_ATTR_NO_WARN : 0);
}
-#endif /* CONFIG_HAVE_GENERIC_DMA_COHERENT */
static inline void *dma_alloc_wc(struct device *dev, size_t size,
dma_addr_t *dma_addr, gfp_t gfp)
{
- return dma_alloc_attrs(dev, size, dma_addr, gfp,
- DMA_ATTR_WRITE_COMBINE);
+ unsigned long attrs = DMA_ATTR_WRITE_COMBINE;
+
+ if (gfp & __GFP_NOWARN)
+ attrs |= DMA_ATTR_NO_WARN;
+
+ return dma_alloc_attrs(dev, size, dma_addr, gfp, attrs);
}
-#ifndef dma_alloc_writecombine
-#define dma_alloc_writecombine dma_alloc_wc
-#endif
static inline void dma_free_wc(struct device *dev, size_t size,
void *cpu_addr, dma_addr_t dma_addr)
@@ -809,9 +727,6 @@ static inline void dma_free_wc(struct device *dev, size_t size,
return dma_free_attrs(dev, size, cpu_addr, dma_addr,
DMA_ATTR_WRITE_COMBINE);
}
-#ifndef dma_free_writecombine
-#define dma_free_writecombine dma_free_wc
-#endif
static inline int dma_mmap_wc(struct device *dev,
struct vm_area_struct *vma,
@@ -821,11 +736,8 @@ static inline int dma_mmap_wc(struct device *dev,
return dma_mmap_attrs(dev, vma, cpu_addr, dma_addr, size,
DMA_ATTR_WRITE_COMBINE);
}
-#ifndef dma_mmap_writecombine
-#define dma_mmap_writecombine dma_mmap_wc
-#endif
-#if defined(CONFIG_NEED_DMA_MAP_STATE) || defined(CONFIG_DMA_API_DEBUG)
+#ifdef CONFIG_NEED_DMA_MAP_STATE
#define DEFINE_DMA_UNMAP_ADDR(ADDR_NAME) dma_addr_t ADDR_NAME
#define DEFINE_DMA_UNMAP_LEN(LEN_NAME) __u32 LEN_NAME
#define dma_unmap_addr(PTR, ADDR_NAME) ((PTR)->ADDR_NAME)
@@ -835,10 +747,14 @@ static inline int dma_mmap_wc(struct device *dev,
#else
#define DEFINE_DMA_UNMAP_ADDR(ADDR_NAME)
#define DEFINE_DMA_UNMAP_LEN(LEN_NAME)
-#define dma_unmap_addr(PTR, ADDR_NAME) (0)
-#define dma_unmap_addr_set(PTR, ADDR_NAME, VAL) do { } while (0)
-#define dma_unmap_len(PTR, LEN_NAME) (0)
-#define dma_unmap_len_set(PTR, LEN_NAME, VAL) do { } while (0)
+#define dma_unmap_addr(PTR, ADDR_NAME) \
+ ({ typeof(PTR) __p __maybe_unused = PTR; 0; })
+#define dma_unmap_addr_set(PTR, ADDR_NAME, VAL) \
+ do { typeof(PTR) __p __maybe_unused = PTR; } while (0)
+#define dma_unmap_len(PTR, LEN_NAME) \
+ ({ typeof(PTR) __p __maybe_unused = PTR; 0; })
+#define dma_unmap_len_set(PTR, LEN_NAME, VAL) \
+ do { typeof(PTR) __p __maybe_unused = PTR; } while (0)
#endif
-#endif
+#endif /* _LINUX_DMA_MAPPING_H */
diff --git a/include/linux/dma-resv.h b/include/linux/dma-resv.h
new file mode 100644
index 000000000000..c5ab6fd9ebe8
--- /dev/null
+++ b/include/linux/dma-resv.h
@@ -0,0 +1,487 @@
+/*
+ * Header file for reservations for dma-buf and ttm
+ *
+ * Copyright(C) 2011 Linaro Limited. All rights reserved.
+ * Copyright (C) 2012-2013 Canonical Ltd
+ * Copyright (C) 2012 Texas Instruments
+ *
+ * Authors:
+ * Rob Clark <robdclark@gmail.com>
+ * Maarten Lankhorst <maarten.lankhorst@canonical.com>
+ * Thomas Hellstrom <thellstrom-at-vmware-dot-com>
+ *
+ * Based on bo.c which bears the following copyright notice,
+ * but is dual licensed:
+ *
+ * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#ifndef _LINUX_RESERVATION_H
+#define _LINUX_RESERVATION_H
+
+#include <linux/ww_mutex.h>
+#include <linux/dma-fence.h>
+#include <linux/slab.h>
+#include <linux/seqlock.h>
+#include <linux/rcupdate.h>
+
+extern struct ww_class reservation_ww_class;
+
+struct dma_resv_list;
+
+/**
+ * enum dma_resv_usage - how the fences from a dma_resv obj are used
+ *
+ * This enum describes the different use cases for a dma_resv object and
+ * controls which fences are returned when queried.
+ *
+ * An important fact is that there is the order KERNEL<WRITE<READ<BOOKKEEP and
+ * when the dma_resv object is asked for fences for one use case the fences
+ * for the lower use case are returned as well.
+ *
+ * For example when asking for WRITE fences then the KERNEL fences are returned
+ * as well. Similar when asked for READ fences then both WRITE and KERNEL
+ * fences are returned as well.
+ *
+ * Already used fences can be promoted in the sense that a fence with
+ * DMA_RESV_USAGE_BOOKKEEP could become DMA_RESV_USAGE_READ by adding it again
+ * with this usage. But fences can never be degraded in the sense that a fence
+ * with DMA_RESV_USAGE_WRITE could become DMA_RESV_USAGE_READ.
+ */
+enum dma_resv_usage {
+ /**
+ * @DMA_RESV_USAGE_KERNEL: For in kernel memory management only.
+ *
+ * This should only be used for things like copying or clearing memory
+ * with a DMA hardware engine for the purpose of kernel memory
+ * management.
+ *
+ * Drivers *always* must wait for those fences before accessing the
+ * resource protected by the dma_resv object. The only exception for
+ * that is when the resource is known to be locked down in place by
+ * pinning it previously.
+ */
+ DMA_RESV_USAGE_KERNEL,
+
+ /**
+ * @DMA_RESV_USAGE_WRITE: Implicit write synchronization.
+ *
+ * This should only be used for userspace command submissions which add
+ * an implicit write dependency.
+ */
+ DMA_RESV_USAGE_WRITE,
+
+ /**
+ * @DMA_RESV_USAGE_READ: Implicit read synchronization.
+ *
+ * This should only be used for userspace command submissions which add
+ * an implicit read dependency.
+ */
+ DMA_RESV_USAGE_READ,
+
+ /**
+ * @DMA_RESV_USAGE_BOOKKEEP: No implicit sync.
+ *
+ * This should be used by submissions which don't want to participate in
+ * any implicit synchronization.
+ *
+ * The most common cases are preemption fences, page table updates, TLB
+ * flushes as well as explicitly synced user submissions.
+ *
+ * Explicitly synced user submissions can be promoted to
+ * DMA_RESV_USAGE_READ or DMA_RESV_USAGE_WRITE as needed using
+ * dma_buf_import_sync_file() when implicit synchronization should
+ * become necessary after initial adding of the fence.
+ */
+ DMA_RESV_USAGE_BOOKKEEP
+};
+
+/**
+ * dma_resv_usage_rw - helper for implicit sync
+ * @write: true if we create a new implicit sync write
+ *
+ * This returns the implicit synchronization usage for write or read accesses,
+ * see enum dma_resv_usage and &dma_buf.resv.
+ */
+static inline enum dma_resv_usage dma_resv_usage_rw(bool write)
+{
+ /* This looks confusing at first sight, but is indeed correct.
+ *
+ * The rational is that new write operations needs to wait for the
+ * existing read and write operations to finish.
+ * But a new read operation only needs to wait for the existing write
+ * operations to finish.
+ */
+ return write ? DMA_RESV_USAGE_READ : DMA_RESV_USAGE_WRITE;
+}
+
+/**
+ * struct dma_resv - a reservation object manages fences for a buffer
+ *
+ * This is a container for dma_fence objects which needs to handle multiple use
+ * cases.
+ *
+ * One use is to synchronize cross-driver access to a struct dma_buf, either for
+ * dynamic buffer management or just to handle implicit synchronization between
+ * different users of the buffer in userspace. See &dma_buf.resv for a more
+ * in-depth discussion.
+ *
+ * The other major use is to manage access and locking within a driver in a
+ * buffer based memory manager. struct ttm_buffer_object is the canonical
+ * example here, since this is where reservation objects originated from. But
+ * use in drivers is spreading and some drivers also manage struct
+ * drm_gem_object with the same scheme.
+ */
+struct dma_resv {
+ /**
+ * @lock:
+ *
+ * Update side lock. Don't use directly, instead use the wrapper
+ * functions like dma_resv_lock() and dma_resv_unlock().
+ *
+ * Drivers which use the reservation object to manage memory dynamically
+ * also use this lock to protect buffer object state like placement,
+ * allocation policies or throughout command submission.
+ */
+ struct ww_mutex lock;
+
+ /**
+ * @fences:
+ *
+ * Array of fences which where added to the dma_resv object
+ *
+ * A new fence is added by calling dma_resv_add_fence(). Since this
+ * often needs to be done past the point of no return in command
+ * submission it cannot fail, and therefore sufficient slots need to be
+ * reserved by calling dma_resv_reserve_fences().
+ */
+ struct dma_resv_list __rcu *fences;
+};
+
+/**
+ * struct dma_resv_iter - current position into the dma_resv fences
+ *
+ * Don't touch this directly in the driver, use the accessor function instead.
+ *
+ * IMPORTANT
+ *
+ * When using the lockless iterators like dma_resv_iter_next_unlocked() or
+ * dma_resv_for_each_fence_unlocked() beware that the iterator can be restarted.
+ * Code which accumulates statistics or similar needs to check for this with
+ * dma_resv_iter_is_restarted().
+ */
+struct dma_resv_iter {
+ /** @obj: The dma_resv object we iterate over */
+ struct dma_resv *obj;
+
+ /** @usage: Return fences with this usage or lower. */
+ enum dma_resv_usage usage;
+
+ /** @fence: the currently handled fence */
+ struct dma_fence *fence;
+
+ /** @fence_usage: the usage of the current fence */
+ enum dma_resv_usage fence_usage;
+
+ /** @index: index into the shared fences */
+ unsigned int index;
+
+ /** @fences: the shared fences; private, *MUST* not dereference */
+ struct dma_resv_list *fences;
+
+ /** @num_fences: number of fences */
+ unsigned int num_fences;
+
+ /** @is_restarted: true if this is the first returned fence */
+ bool is_restarted;
+};
+
+struct dma_fence *dma_resv_iter_first_unlocked(struct dma_resv_iter *cursor);
+struct dma_fence *dma_resv_iter_next_unlocked(struct dma_resv_iter *cursor);
+struct dma_fence *dma_resv_iter_first(struct dma_resv_iter *cursor);
+struct dma_fence *dma_resv_iter_next(struct dma_resv_iter *cursor);
+
+/**
+ * dma_resv_iter_begin - initialize a dma_resv_iter object
+ * @cursor: The dma_resv_iter object to initialize
+ * @obj: The dma_resv object which we want to iterate over
+ * @usage: controls which fences to include, see enum dma_resv_usage.
+ */
+static inline void dma_resv_iter_begin(struct dma_resv_iter *cursor,
+ struct dma_resv *obj,
+ enum dma_resv_usage usage)
+{
+ cursor->obj = obj;
+ cursor->usage = usage;
+ cursor->fence = NULL;
+}
+
+/**
+ * dma_resv_iter_end - cleanup a dma_resv_iter object
+ * @cursor: the dma_resv_iter object which should be cleaned up
+ *
+ * Make sure that the reference to the fence in the cursor is properly
+ * dropped.
+ */
+static inline void dma_resv_iter_end(struct dma_resv_iter *cursor)
+{
+ dma_fence_put(cursor->fence);
+}
+
+/**
+ * dma_resv_iter_usage - Return the usage of the current fence
+ * @cursor: the cursor of the current position
+ *
+ * Returns the usage of the currently processed fence.
+ */
+static inline enum dma_resv_usage
+dma_resv_iter_usage(struct dma_resv_iter *cursor)
+{
+ return cursor->fence_usage;
+}
+
+/**
+ * dma_resv_iter_is_restarted - test if this is the first fence after a restart
+ * @cursor: the cursor with the current position
+ *
+ * Return true if this is the first fence in an iteration after a restart.
+ */
+static inline bool dma_resv_iter_is_restarted(struct dma_resv_iter *cursor)
+{
+ return cursor->is_restarted;
+}
+
+/**
+ * dma_resv_for_each_fence_unlocked - unlocked fence iterator
+ * @cursor: a struct dma_resv_iter pointer
+ * @fence: the current fence
+ *
+ * Iterate over the fences in a struct dma_resv object without holding the
+ * &dma_resv.lock and using RCU instead. The cursor needs to be initialized
+ * with dma_resv_iter_begin() and cleaned up with dma_resv_iter_end(). Inside
+ * the iterator a reference to the dma_fence is held and the RCU lock dropped.
+ *
+ * Beware that the iterator can be restarted when the struct dma_resv for
+ * @cursor is modified. Code which accumulates statistics or similar needs to
+ * check for this with dma_resv_iter_is_restarted(). For this reason prefer the
+ * lock iterator dma_resv_for_each_fence() whenever possible.
+ */
+#define dma_resv_for_each_fence_unlocked(cursor, fence) \
+ for (fence = dma_resv_iter_first_unlocked(cursor); \
+ fence; fence = dma_resv_iter_next_unlocked(cursor))
+
+/**
+ * dma_resv_for_each_fence - fence iterator
+ * @cursor: a struct dma_resv_iter pointer
+ * @obj: a dma_resv object pointer
+ * @usage: controls which fences to return
+ * @fence: the current fence
+ *
+ * Iterate over the fences in a struct dma_resv object while holding the
+ * &dma_resv.lock. @all_fences controls if the shared fences are returned as
+ * well. The cursor initialisation is part of the iterator and the fence stays
+ * valid as long as the lock is held and so no extra reference to the fence is
+ * taken.
+ */
+#define dma_resv_for_each_fence(cursor, obj, usage, fence) \
+ for (dma_resv_iter_begin(cursor, obj, usage), \
+ fence = dma_resv_iter_first(cursor); fence; \
+ fence = dma_resv_iter_next(cursor))
+
+#define dma_resv_held(obj) lockdep_is_held(&(obj)->lock.base)
+#define dma_resv_assert_held(obj) lockdep_assert_held(&(obj)->lock.base)
+
+#ifdef CONFIG_DEBUG_MUTEXES
+void dma_resv_reset_max_fences(struct dma_resv *obj);
+#else
+static inline void dma_resv_reset_max_fences(struct dma_resv *obj) {}
+#endif
+
+/**
+ * dma_resv_lock - lock the reservation object
+ * @obj: the reservation object
+ * @ctx: the locking context
+ *
+ * Locks the reservation object for exclusive access and modification. Note,
+ * that the lock is only against other writers, readers will run concurrently
+ * with a writer under RCU. The seqlock is used to notify readers if they
+ * overlap with a writer.
+ *
+ * As the reservation object may be locked by multiple parties in an
+ * undefined order, a #ww_acquire_ctx is passed to unwind if a cycle
+ * is detected. See ww_mutex_lock() and ww_acquire_init(). A reservation
+ * object may be locked by itself by passing NULL as @ctx.
+ *
+ * When a die situation is indicated by returning -EDEADLK all locks held by
+ * @ctx must be unlocked and then dma_resv_lock_slow() called on @obj.
+ *
+ * Unlocked by calling dma_resv_unlock().
+ *
+ * See also dma_resv_lock_interruptible() for the interruptible variant.
+ */
+static inline int dma_resv_lock(struct dma_resv *obj,
+ struct ww_acquire_ctx *ctx)
+{
+ return ww_mutex_lock(&obj->lock, ctx);
+}
+
+/**
+ * dma_resv_lock_interruptible - lock the reservation object
+ * @obj: the reservation object
+ * @ctx: the locking context
+ *
+ * Locks the reservation object interruptible for exclusive access and
+ * modification. Note, that the lock is only against other writers, readers
+ * will run concurrently with a writer under RCU. The seqlock is used to
+ * notify readers if they overlap with a writer.
+ *
+ * As the reservation object may be locked by multiple parties in an
+ * undefined order, a #ww_acquire_ctx is passed to unwind if a cycle
+ * is detected. See ww_mutex_lock() and ww_acquire_init(). A reservation
+ * object may be locked by itself by passing NULL as @ctx.
+ *
+ * When a die situation is indicated by returning -EDEADLK all locks held by
+ * @ctx must be unlocked and then dma_resv_lock_slow_interruptible() called on
+ * @obj.
+ *
+ * Unlocked by calling dma_resv_unlock().
+ */
+static inline int dma_resv_lock_interruptible(struct dma_resv *obj,
+ struct ww_acquire_ctx *ctx)
+{
+ return ww_mutex_lock_interruptible(&obj->lock, ctx);
+}
+
+/**
+ * dma_resv_lock_slow - slowpath lock the reservation object
+ * @obj: the reservation object
+ * @ctx: the locking context
+ *
+ * Acquires the reservation object after a die case. This function
+ * will sleep until the lock becomes available. See dma_resv_lock() as
+ * well.
+ *
+ * See also dma_resv_lock_slow_interruptible() for the interruptible variant.
+ */
+static inline void dma_resv_lock_slow(struct dma_resv *obj,
+ struct ww_acquire_ctx *ctx)
+{
+ ww_mutex_lock_slow(&obj->lock, ctx);
+}
+
+/**
+ * dma_resv_lock_slow_interruptible - slowpath lock the reservation
+ * object, interruptible
+ * @obj: the reservation object
+ * @ctx: the locking context
+ *
+ * Acquires the reservation object interruptible after a die case. This function
+ * will sleep until the lock becomes available. See
+ * dma_resv_lock_interruptible() as well.
+ */
+static inline int dma_resv_lock_slow_interruptible(struct dma_resv *obj,
+ struct ww_acquire_ctx *ctx)
+{
+ return ww_mutex_lock_slow_interruptible(&obj->lock, ctx);
+}
+
+/**
+ * dma_resv_trylock - trylock the reservation object
+ * @obj: the reservation object
+ *
+ * Tries to lock the reservation object for exclusive access and modification.
+ * Note, that the lock is only against other writers, readers will run
+ * concurrently with a writer under RCU. The seqlock is used to notify readers
+ * if they overlap with a writer.
+ *
+ * Also note that since no context is provided, no deadlock protection is
+ * possible, which is also not needed for a trylock.
+ *
+ * Returns true if the lock was acquired, false otherwise.
+ */
+static inline bool __must_check dma_resv_trylock(struct dma_resv *obj)
+{
+ return ww_mutex_trylock(&obj->lock, NULL);
+}
+
+/**
+ * dma_resv_is_locked - is the reservation object locked
+ * @obj: the reservation object
+ *
+ * Returns true if the mutex is locked, false if unlocked.
+ */
+static inline bool dma_resv_is_locked(struct dma_resv *obj)
+{
+ return ww_mutex_is_locked(&obj->lock);
+}
+
+/**
+ * dma_resv_locking_ctx - returns the context used to lock the object
+ * @obj: the reservation object
+ *
+ * Returns the context used to lock a reservation object or NULL if no context
+ * was used or the object is not locked at all.
+ *
+ * WARNING: This interface is pretty horrible, but TTM needs it because it
+ * doesn't pass the struct ww_acquire_ctx around in some very long callchains.
+ * Everyone else just uses it to check whether they're holding a reservation or
+ * not.
+ */
+static inline struct ww_acquire_ctx *dma_resv_locking_ctx(struct dma_resv *obj)
+{
+ return READ_ONCE(obj->lock.ctx);
+}
+
+/**
+ * dma_resv_unlock - unlock the reservation object
+ * @obj: the reservation object
+ *
+ * Unlocks the reservation object following exclusive access.
+ */
+static inline void dma_resv_unlock(struct dma_resv *obj)
+{
+ dma_resv_reset_max_fences(obj);
+ ww_mutex_unlock(&obj->lock);
+}
+
+void dma_resv_init(struct dma_resv *obj);
+void dma_resv_fini(struct dma_resv *obj);
+int dma_resv_reserve_fences(struct dma_resv *obj, unsigned int num_fences);
+void dma_resv_add_fence(struct dma_resv *obj, struct dma_fence *fence,
+ enum dma_resv_usage usage);
+void dma_resv_replace_fences(struct dma_resv *obj, uint64_t context,
+ struct dma_fence *fence,
+ enum dma_resv_usage usage);
+int dma_resv_get_fences(struct dma_resv *obj, enum dma_resv_usage usage,
+ unsigned int *num_fences, struct dma_fence ***fences);
+int dma_resv_get_singleton(struct dma_resv *obj, enum dma_resv_usage usage,
+ struct dma_fence **fence);
+int dma_resv_copy_fences(struct dma_resv *dst, struct dma_resv *src);
+long dma_resv_wait_timeout(struct dma_resv *obj, enum dma_resv_usage usage,
+ bool intr, unsigned long timeout);
+void dma_resv_set_deadline(struct dma_resv *obj, enum dma_resv_usage usage,
+ ktime_t deadline);
+bool dma_resv_test_signaled(struct dma_resv *obj, enum dma_resv_usage usage);
+void dma_resv_describe(struct dma_resv *obj, struct seq_file *seq);
+
+#endif /* _LINUX_RESERVATION_H */
diff --git a/include/linux/dma/amd_xdma.h b/include/linux/dma/amd_xdma.h
new file mode 100644
index 000000000000..ceba69ed7cb4
--- /dev/null
+++ b/include/linux/dma/amd_xdma.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (C) 2022, Advanced Micro Devices, Inc.
+ */
+
+#ifndef _DMAENGINE_AMD_XDMA_H
+#define _DMAENGINE_AMD_XDMA_H
+
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+
+int xdma_enable_user_irq(struct platform_device *pdev, u32 irq_num);
+void xdma_disable_user_irq(struct platform_device *pdev, u32 irq_num);
+int xdma_get_user_irq(struct platform_device *pdev, u32 user_irq_index);
+
+#endif /* _DMAENGINE_AMD_XDMA_H */
diff --git a/include/linux/dma/dw.h b/include/linux/dma/dw.h
index e166cac8e870..9752f3745f76 100644
--- a/include/linux/dma/dw.h
+++ b/include/linux/dma/dw.h
@@ -1,13 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Driver for the Synopsys DesignWare DMA Controller
*
* Copyright (C) 2007 Atmel Corporation
* Copyright (C) 2010-2011 ST Microelectronics
* Copyright (C) 2014 Intel Corporation
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef _DMA_DW_H
#define _DMA_DW_H
@@ -45,9 +42,13 @@ struct dw_dma_chip {
#if IS_ENABLED(CONFIG_DW_DMAC_CORE)
int dw_dma_probe(struct dw_dma_chip *chip);
int dw_dma_remove(struct dw_dma_chip *chip);
+int idma32_dma_probe(struct dw_dma_chip *chip);
+int idma32_dma_remove(struct dw_dma_chip *chip);
#else
static inline int dw_dma_probe(struct dw_dma_chip *chip) { return -ENODEV; }
static inline int dw_dma_remove(struct dw_dma_chip *chip) { return 0; }
+static inline int idma32_dma_probe(struct dw_dma_chip *chip) { return -ENODEV; }
+static inline int idma32_dma_remove(struct dw_dma_chip *chip) { return 0; }
#endif /* CONFIG_DW_DMAC_CORE */
#endif /* _DMA_DW_H */
diff --git a/include/linux/dma/edma.h b/include/linux/dma/edma.h
new file mode 100644
index 000000000000..3080747689f6
--- /dev/null
+++ b/include/linux/dma/edma.h
@@ -0,0 +1,120 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2018-2019 Synopsys, Inc. and/or its affiliates.
+ * Synopsys DesignWare eDMA core driver
+ *
+ * Author: Gustavo Pimentel <gustavo.pimentel@synopsys.com>
+ */
+
+#ifndef _DW_EDMA_H
+#define _DW_EDMA_H
+
+#include <linux/device.h>
+#include <linux/dmaengine.h>
+
+#define EDMA_MAX_WR_CH 8
+#define EDMA_MAX_RD_CH 8
+
+struct dw_edma;
+
+struct dw_edma_region {
+ u64 paddr;
+ union {
+ void *mem;
+ void __iomem *io;
+ } vaddr;
+ size_t sz;
+};
+
+/**
+ * struct dw_edma_core_ops - platform-specific eDMA methods
+ * @irq_vector: Get IRQ number of the passed eDMA channel. Note the
+ * method accepts the channel id in the end-to-end
+ * numbering with the eDMA write channels being placed
+ * first in the row.
+ * @pci_address: Get PCIe bus address corresponding to the passed CPU
+ * address. Note there is no need in specifying this
+ * function if the address translation is performed by
+ * the DW PCIe RP/EP controller with the DW eDMA device in
+ * subject and DMA_BYPASS isn't set for all the outbound
+ * iATU windows. That will be done by the controller
+ * automatically.
+ */
+struct dw_edma_plat_ops {
+ int (*irq_vector)(struct device *dev, unsigned int nr);
+ u64 (*pci_address)(struct device *dev, phys_addr_t cpu_addr);
+};
+
+enum dw_edma_map_format {
+ EDMA_MF_EDMA_LEGACY = 0x0,
+ EDMA_MF_EDMA_UNROLL = 0x1,
+ EDMA_MF_HDMA_COMPAT = 0x5,
+ EDMA_MF_HDMA_NATIVE = 0x7,
+};
+
+/**
+ * enum dw_edma_chip_flags - Flags specific to an eDMA chip
+ * @DW_EDMA_CHIP_LOCAL: eDMA is used locally by an endpoint
+ */
+enum dw_edma_chip_flags {
+ DW_EDMA_CHIP_LOCAL = BIT(0),
+};
+
+/**
+ * struct dw_edma_chip - representation of DesignWare eDMA controller hardware
+ * @dev: struct device of the eDMA controller
+ * @id: instance ID
+ * @nr_irqs: total number of DMA IRQs
+ * @ops DMA channel to IRQ number mapping
+ * @flags dw_edma_chip_flags
+ * @reg_base DMA register base address
+ * @ll_wr_cnt DMA write link list count
+ * @ll_rd_cnt DMA read link list count
+ * @rg_region DMA register region
+ * @ll_region_wr DMA descriptor link list memory for write channel
+ * @ll_region_rd DMA descriptor link list memory for read channel
+ * @dt_region_wr DMA data memory for write channel
+ * @dt_region_rd DMA data memory for read channel
+ * @mf DMA register map format
+ * @dw: struct dw_edma that is filled by dw_edma_probe()
+ */
+struct dw_edma_chip {
+ struct device *dev;
+ int nr_irqs;
+ const struct dw_edma_plat_ops *ops;
+ u32 flags;
+
+ void __iomem *reg_base;
+
+ u16 ll_wr_cnt;
+ u16 ll_rd_cnt;
+ /* link list address */
+ struct dw_edma_region ll_region_wr[EDMA_MAX_WR_CH];
+ struct dw_edma_region ll_region_rd[EDMA_MAX_RD_CH];
+
+ /* data region */
+ struct dw_edma_region dt_region_wr[EDMA_MAX_WR_CH];
+ struct dw_edma_region dt_region_rd[EDMA_MAX_RD_CH];
+
+ enum dw_edma_map_format mf;
+
+ struct dw_edma *dw;
+};
+
+/* Export to the platform drivers */
+#if IS_REACHABLE(CONFIG_DW_EDMA)
+int dw_edma_probe(struct dw_edma_chip *chip);
+int dw_edma_remove(struct dw_edma_chip *chip);
+#else
+static inline int dw_edma_probe(struct dw_edma_chip *chip)
+{
+ return -ENODEV;
+}
+
+static inline int dw_edma_remove(struct dw_edma_chip *chip)
+{
+ return 0;
+}
+#endif /* CONFIG_DW_EDMA */
+
+#endif /* _DW_EDMA_H */
diff --git a/include/linux/dma/hsu.h b/include/linux/dma/hsu.h
index 197eec63e501..77ea602c287c 100644
--- a/include/linux/dma/hsu.h
+++ b/include/linux/dma/hsu.h
@@ -1,21 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Driver for the High Speed UART DMA
*
* Copyright (C) 2015 Intel Corporation
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef _DMA_HSU_H
#define _DMA_HSU_H
-#include <linux/device.h>
-#include <linux/interrupt.h>
+#include <linux/errno.h>
+#include <linux/kconfig.h>
+#include <linux/types.h>
#include <linux/platform_data/dma-hsu.h>
+struct device;
struct hsu_dma;
/**
diff --git a/include/linux/dma/idma64.h b/include/linux/dma/idma64.h
new file mode 100644
index 000000000000..621cfae60554
--- /dev/null
+++ b/include/linux/dma/idma64.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Definitions for the Intel integrated DMA 64-bit
+ *
+ * Copyright (C) 2019 Intel Corporation
+ */
+
+#ifndef __LINUX_DMA_IDMA64_H__
+#define __LINUX_DMA_IDMA64_H__
+
+/* Platform driver name */
+#define LPSS_IDMA64_DRIVER_NAME "idma64"
+
+#endif /* __LINUX_DMA_IDMA64_H__ */
diff --git a/include/linux/platform_data/dma-imx.h b/include/linux/dma/imx-dma.h
index 7d964e787299..76a8de9ae151 100644
--- a/include/linux/platform_data/dma-imx.h
+++ b/include/linux/dma/imx-dma.h
@@ -1,13 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright 2004-2009 Freescale Semiconductor, Inc. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
-#ifndef __ASM_ARCH_MXC_DMA_H__
-#define __ASM_ARCH_MXC_DMA_H__
+#ifndef __LINUX_DMA_IMX_H
+#define __LINUX_DMA_IMX_H
#include <linux/scatterlist.h>
#include <linux/device.h>
@@ -42,6 +39,9 @@ enum sdma_peripheral_type {
IMX_DMATYPE_SSI_DUAL, /* SSI Dual FIFO */
IMX_DMATYPE_ASRC_SP, /* Shared ASRC */
IMX_DMATYPE_SAI, /* SAI */
+ IMX_DMATYPE_MULTI_SAI, /* MULTI FIFOs For Audio */
+ IMX_DMATYPE_HDMI, /* HDMI Audio */
+ IMX_DMATYPE_I2C, /* I2C */
};
enum imx_dma_prio {
@@ -68,4 +68,36 @@ static inline int imx_dma_is_general_purpose(struct dma_chan *chan)
!strcmp(chan->device->dev->driver->name, "imx-dma");
}
-#endif
+/**
+ * struct sdma_peripheral_config - SDMA config for audio
+ * @n_fifos_src: Number of FIFOs for recording
+ * @n_fifos_dst: Number of FIFOs for playback
+ * @stride_fifos_src: FIFO address stride for recording, 0 means all FIFOs are
+ * continuous, 1 means 1 word stride between FIFOs. All stride
+ * between FIFOs should be same.
+ * @stride_fifos_dst: FIFO address stride for playback
+ * @words_per_fifo: numbers of words per FIFO fetch/fill, 1 means
+ * one channel per FIFO, 2 means 2 channels per FIFO..
+ * If 'n_fifos_src = 4' and 'words_per_fifo = 2', it
+ * means the first two words(channels) fetch from FIFO0
+ * and then jump to FIFO1 for next two words, and so on
+ * after the last FIFO3 fetched, roll back to FIFO0.
+ * @sw_done: Use software done. Needed for PDM (micfil)
+ *
+ * Some i.MX Audio devices (SAI, micfil) have multiple successive FIFO
+ * registers. For multichannel recording/playback the SAI/micfil have
+ * one FIFO register per channel and the SDMA engine has to read/write
+ * the next channel from/to the next register and wrap around to the
+ * first register when all channels are handled. The number of active
+ * channels must be communicated to the SDMA engine using this struct.
+ */
+struct sdma_peripheral_config {
+ int n_fifos_src;
+ int n_fifos_dst;
+ int stride_fifos_src;
+ int stride_fifos_dst;
+ int words_per_fifo;
+ bool sw_done;
+};
+
+#endif /* __LINUX_DMA_IMX_H */
diff --git a/include/linux/dma/ipu-dma.h b/include/linux/dma/ipu-dma.h
deleted file mode 100644
index 18031115c668..000000000000
--- a/include/linux/dma/ipu-dma.h
+++ /dev/null
@@ -1,177 +0,0 @@
-/*
- * Copyright (C) 2008
- * Guennadi Liakhovetski, DENX Software Engineering, <lg@denx.de>
- *
- * Copyright (C) 2005-2007 Freescale Semiconductor, Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#ifndef __LINUX_DMA_IPU_DMA_H
-#define __LINUX_DMA_IPU_DMA_H
-
-#include <linux/types.h>
-#include <linux/dmaengine.h>
-
-/* IPU DMA Controller channel definitions. */
-enum ipu_channel {
- IDMAC_IC_0 = 0, /* IC (encoding task) to memory */
- IDMAC_IC_1 = 1, /* IC (viewfinder task) to memory */
- IDMAC_ADC_0 = 1,
- IDMAC_IC_2 = 2,
- IDMAC_ADC_1 = 2,
- IDMAC_IC_3 = 3,
- IDMAC_IC_4 = 4,
- IDMAC_IC_5 = 5,
- IDMAC_IC_6 = 6,
- IDMAC_IC_7 = 7, /* IC (sensor data) to memory */
- IDMAC_IC_8 = 8,
- IDMAC_IC_9 = 9,
- IDMAC_IC_10 = 10,
- IDMAC_IC_11 = 11,
- IDMAC_IC_12 = 12,
- IDMAC_IC_13 = 13,
- IDMAC_SDC_0 = 14, /* Background synchronous display data */
- IDMAC_SDC_1 = 15, /* Foreground data (overlay) */
- IDMAC_SDC_2 = 16,
- IDMAC_SDC_3 = 17,
- IDMAC_ADC_2 = 18,
- IDMAC_ADC_3 = 19,
- IDMAC_ADC_4 = 20,
- IDMAC_ADC_5 = 21,
- IDMAC_ADC_6 = 22,
- IDMAC_ADC_7 = 23,
- IDMAC_PF_0 = 24,
- IDMAC_PF_1 = 25,
- IDMAC_PF_2 = 26,
- IDMAC_PF_3 = 27,
- IDMAC_PF_4 = 28,
- IDMAC_PF_5 = 29,
- IDMAC_PF_6 = 30,
- IDMAC_PF_7 = 31,
-};
-
-/* Order significant! */
-enum ipu_channel_status {
- IPU_CHANNEL_FREE,
- IPU_CHANNEL_INITIALIZED,
- IPU_CHANNEL_READY,
- IPU_CHANNEL_ENABLED,
-};
-
-#define IPU_CHANNELS_NUM 32
-
-enum pixel_fmt {
- /* 1 byte */
- IPU_PIX_FMT_GENERIC,
- IPU_PIX_FMT_RGB332,
- IPU_PIX_FMT_YUV420P,
- IPU_PIX_FMT_YUV422P,
- IPU_PIX_FMT_YUV420P2,
- IPU_PIX_FMT_YVU422P,
- /* 2 bytes */
- IPU_PIX_FMT_RGB565,
- IPU_PIX_FMT_RGB666,
- IPU_PIX_FMT_BGR666,
- IPU_PIX_FMT_YUYV,
- IPU_PIX_FMT_UYVY,
- /* 3 bytes */
- IPU_PIX_FMT_RGB24,
- IPU_PIX_FMT_BGR24,
- /* 4 bytes */
- IPU_PIX_FMT_GENERIC_32,
- IPU_PIX_FMT_RGB32,
- IPU_PIX_FMT_BGR32,
- IPU_PIX_FMT_ABGR32,
- IPU_PIX_FMT_BGRA32,
- IPU_PIX_FMT_RGBA32,
-};
-
-enum ipu_color_space {
- IPU_COLORSPACE_RGB,
- IPU_COLORSPACE_YCBCR,
- IPU_COLORSPACE_YUV
-};
-
-/*
- * Enumeration of IPU rotation modes
- */
-enum ipu_rotate_mode {
- /* Note the enum values correspond to BAM value */
- IPU_ROTATE_NONE = 0,
- IPU_ROTATE_VERT_FLIP = 1,
- IPU_ROTATE_HORIZ_FLIP = 2,
- IPU_ROTATE_180 = 3,
- IPU_ROTATE_90_RIGHT = 4,
- IPU_ROTATE_90_RIGHT_VFLIP = 5,
- IPU_ROTATE_90_RIGHT_HFLIP = 6,
- IPU_ROTATE_90_LEFT = 7,
-};
-
-/*
- * Enumeration of DI ports for ADC.
- */
-enum display_port {
- DISP0,
- DISP1,
- DISP2,
- DISP3
-};
-
-struct idmac_video_param {
- unsigned short in_width;
- unsigned short in_height;
- uint32_t in_pixel_fmt;
- unsigned short out_width;
- unsigned short out_height;
- uint32_t out_pixel_fmt;
- unsigned short out_stride;
- bool graphics_combine_en;
- bool global_alpha_en;
- bool key_color_en;
- enum display_port disp;
- unsigned short out_left;
- unsigned short out_top;
-};
-
-/*
- * Union of initialization parameters for a logical channel. So far only video
- * parameters are used.
- */
-union ipu_channel_param {
- struct idmac_video_param video;
-};
-
-struct idmac_tx_desc {
- struct dma_async_tx_descriptor txd;
- struct scatterlist *sg; /* scatterlist for this */
- unsigned int sg_len; /* tx-descriptor. */
- struct list_head list;
-};
-
-struct idmac_channel {
- struct dma_chan dma_chan;
- dma_cookie_t completed; /* last completed cookie */
- union ipu_channel_param params;
- enum ipu_channel link; /* input channel, linked to the output */
- enum ipu_channel_status status;
- void *client; /* Only one client per channel */
- unsigned int n_tx_desc;
- struct idmac_tx_desc *desc; /* allocated tx-descriptors */
- struct scatterlist *sg[2]; /* scatterlist elements in buffer-0 and -1 */
- struct list_head free_list; /* free tx-descriptors */
- struct list_head queue; /* queued tx-descriptors */
- spinlock_t lock; /* protects sg[0,1], queue */
- struct mutex chan_mutex; /* protects status, cookie, free_list */
- bool sec_chan_en;
- int active_buffer;
- unsigned int eof_irq;
- char eof_name[16]; /* EOF IRQ name for request_irq() */
-};
-
-#define to_tx_desc(tx) container_of(tx, struct idmac_tx_desc, txd)
-#define to_idmac_chan(c) container_of(c, struct idmac_channel, dma_chan)
-
-#endif /* __LINUX_DMA_IPU_DMA_H */
diff --git a/include/linux/dma/k3-event-router.h b/include/linux/dma/k3-event-router.h
new file mode 100644
index 000000000000..e3f88b2f87be
--- /dev/null
+++ b/include/linux/dma/k3-event-router.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2020 Texas Instruments Incorporated - https://www.ti.com
+ */
+
+#ifndef K3_EVENT_ROUTER_
+#define K3_EVENT_ROUTER_
+
+#include <linux/types.h>
+
+struct k3_event_route_data {
+ void *priv;
+ int (*set_event)(void *priv, u32 event);
+};
+
+#endif /* K3_EVENT_ROUTER_ */
diff --git a/include/linux/dma/k3-psil.h b/include/linux/dma/k3-psil.h
new file mode 100644
index 000000000000..5f106d852f1c
--- /dev/null
+++ b/include/linux/dma/k3-psil.h
@@ -0,0 +1,86 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2019 Texas Instruments Incorporated - https://www.ti.com
+ */
+
+#ifndef K3_PSIL_H_
+#define K3_PSIL_H_
+
+#include <linux/types.h>
+
+#define K3_PSIL_DST_THREAD_ID_OFFSET 0x8000
+
+struct device;
+
+/**
+ * enum udma_tp_level - Channel Throughput Levels
+ * @UDMA_TP_NORMAL: Normal channel
+ * @UDMA_TP_HIGH: High Throughput channel
+ * @UDMA_TP_ULTRAHIGH: Ultra High Throughput channel
+ */
+enum udma_tp_level {
+ UDMA_TP_NORMAL = 0,
+ UDMA_TP_HIGH,
+ UDMA_TP_ULTRAHIGH,
+ UDMA_TP_LAST,
+};
+
+/**
+ * enum psil_endpoint_type - PSI-L Endpoint type
+ * @PSIL_EP_NATIVE: Normal channel
+ * @PSIL_EP_PDMA_XY: XY mode PDMA
+ * @PSIL_EP_PDMA_MCAN: MCAN mode PDMA
+ * @PSIL_EP_PDMA_AASRC: AASRC mode PDMA
+ */
+enum psil_endpoint_type {
+ PSIL_EP_NATIVE = 0,
+ PSIL_EP_PDMA_XY,
+ PSIL_EP_PDMA_MCAN,
+ PSIL_EP_PDMA_AASRC,
+};
+
+/**
+ * struct psil_endpoint_config - PSI-L Endpoint configuration
+ * @ep_type: PSI-L endpoint type
+ * @channel_tpl: Desired throughput level for the channel
+ * @pkt_mode: If set, the channel must be in Packet mode, otherwise in
+ * TR mode
+ * @notdpkt: TDCM must be suppressed on the TX channel
+ * @needs_epib: Endpoint needs EPIB
+ * @pdma_acc32: ACC32 must be enabled on the PDMA side
+ * @pdma_burst: BURST must be enabled on the PDMA side
+ * @psd_size: If set, PSdata is used by the endpoint
+ * @mapped_channel_id: PKTDMA thread to channel mapping for mapped channels.
+ * The thread must be serviced by the specified channel if
+ * mapped_channel_id is >= 0 in case of PKTDMA
+ * @flow_start: PKDMA flow range start of mapped channel. Unmapped
+ * channels use flow_id == chan_id
+ * @flow_num: PKDMA flow count of mapped channel. Unmapped channels
+ * use flow_id == chan_id
+ * @default_flow_id: PKDMA default (r)flow index of mapped channel.
+ * Must be within the flow range of the mapped channel.
+ */
+struct psil_endpoint_config {
+ enum psil_endpoint_type ep_type;
+ enum udma_tp_level channel_tpl;
+
+ unsigned pkt_mode:1;
+ unsigned notdpkt:1;
+ unsigned needs_epib:1;
+ /* PDMA properties, valid for PSIL_EP_PDMA_* */
+ unsigned pdma_acc32:1;
+ unsigned pdma_burst:1;
+
+ u32 psd_size;
+ /* PKDMA mapped channel */
+ s16 mapped_channel_id;
+ /* PKTDMA tflow and rflow ranges for mapped channel */
+ u16 flow_start;
+ u16 flow_num;
+ s16 default_flow_id;
+};
+
+int psil_set_new_ep_config(struct device *dev, const char *name,
+ struct psil_endpoint_config *ep_config);
+
+#endif /* K3_PSIL_H_ */
diff --git a/include/linux/dma/k3-udma-glue.h b/include/linux/dma/k3-udma-glue.h
new file mode 100644
index 000000000000..5d43881e6fb7
--- /dev/null
+++ b/include/linux/dma/k3-udma-glue.h
@@ -0,0 +1,153 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2019 Texas Instruments Incorporated - https://www.ti.com
+ */
+
+#ifndef K3_UDMA_GLUE_H_
+#define K3_UDMA_GLUE_H_
+
+#include <linux/types.h>
+#include <linux/soc/ti/k3-ringacc.h>
+#include <linux/dma/ti-cppi5.h>
+
+struct k3_udma_glue_tx_channel_cfg {
+ struct k3_ring_cfg tx_cfg;
+ struct k3_ring_cfg txcq_cfg;
+
+ bool tx_pause_on_err;
+ bool tx_filt_einfo;
+ bool tx_filt_pswords;
+ bool tx_supr_tdpkt;
+ u32 swdata_size;
+};
+
+struct k3_udma_glue_tx_channel;
+
+struct k3_udma_glue_tx_channel *k3_udma_glue_request_tx_chn(struct device *dev,
+ const char *name, struct k3_udma_glue_tx_channel_cfg *cfg);
+
+struct k3_udma_glue_tx_channel *
+k3_udma_glue_request_tx_chn_for_thread_id(struct device *dev,
+ struct k3_udma_glue_tx_channel_cfg *cfg,
+ struct device_node *udmax_np, u32 thread_id);
+
+void k3_udma_glue_release_tx_chn(struct k3_udma_glue_tx_channel *tx_chn);
+int k3_udma_glue_push_tx_chn(struct k3_udma_glue_tx_channel *tx_chn,
+ struct cppi5_host_desc_t *desc_tx,
+ dma_addr_t desc_dma);
+int k3_udma_glue_pop_tx_chn(struct k3_udma_glue_tx_channel *tx_chn,
+ dma_addr_t *desc_dma);
+int k3_udma_glue_enable_tx_chn(struct k3_udma_glue_tx_channel *tx_chn);
+void k3_udma_glue_disable_tx_chn(struct k3_udma_glue_tx_channel *tx_chn);
+void k3_udma_glue_tdown_tx_chn(struct k3_udma_glue_tx_channel *tx_chn,
+ bool sync);
+void k3_udma_glue_reset_tx_chn(struct k3_udma_glue_tx_channel *tx_chn,
+ void *data, void (*cleanup)(void *data, dma_addr_t desc_dma));
+u32 k3_udma_glue_tx_get_hdesc_size(struct k3_udma_glue_tx_channel *tx_chn);
+u32 k3_udma_glue_tx_get_txcq_id(struct k3_udma_glue_tx_channel *tx_chn);
+int k3_udma_glue_tx_get_irq(struct k3_udma_glue_tx_channel *tx_chn);
+struct device *
+ k3_udma_glue_tx_get_dma_device(struct k3_udma_glue_tx_channel *tx_chn);
+void k3_udma_glue_tx_dma_to_cppi5_addr(struct k3_udma_glue_tx_channel *tx_chn,
+ dma_addr_t *addr);
+void k3_udma_glue_tx_cppi5_to_dma_addr(struct k3_udma_glue_tx_channel *tx_chn,
+ dma_addr_t *addr);
+
+enum {
+ K3_UDMA_GLUE_SRC_TAG_LO_KEEP = 0,
+ K3_UDMA_GLUE_SRC_TAG_LO_USE_FLOW_REG = 1,
+ K3_UDMA_GLUE_SRC_TAG_LO_USE_REMOTE_FLOW_ID = 2,
+ K3_UDMA_GLUE_SRC_TAG_LO_USE_REMOTE_SRC_TAG = 4,
+};
+
+/**
+ * k3_udma_glue_rx_flow_cfg - UDMA RX flow cfg
+ *
+ * @rx_cfg: RX ring configuration
+ * @rxfdq_cfg: RX free Host PD ring configuration
+ * @ring_rxq_id: RX ring id (or -1 for any)
+ * @ring_rxfdq0_id: RX free Host PD ring (FDQ) if (or -1 for any)
+ * @rx_error_handling: Rx Error Handling Mode (0 - drop, 1 - re-try)
+ * @src_tag_lo_sel: Rx Source Tag Low Byte Selector in Host PD
+ */
+struct k3_udma_glue_rx_flow_cfg {
+ struct k3_ring_cfg rx_cfg;
+ struct k3_ring_cfg rxfdq_cfg;
+ int ring_rxq_id;
+ int ring_rxfdq0_id;
+ bool rx_error_handling;
+ int src_tag_lo_sel;
+};
+
+/**
+ * k3_udma_glue_rx_channel_cfg - UDMA RX channel cfg
+ *
+ * @psdata_size: SW Data is present in Host PD of @swdata_size bytes
+ * @flow_id_base: first flow_id used by channel.
+ * if @flow_id_base = -1 - range of GP rflows will be
+ * allocated dynamically.
+ * @flow_id_num: number of RX flows used by channel
+ * @flow_id_use_rxchan_id: use RX channel id as flow id,
+ * used only if @flow_id_num = 1
+ * @remote indication that RX channel is remote - some remote CPU
+ * core owns and control the RX channel. Linux Host only
+ * allowed to attach and configure RX Flow within RX
+ * channel. if set - not RX channel operation will be
+ * performed by K3 NAVSS DMA glue interface.
+ * @def_flow_cfg default RX flow configuration,
+ * used only if @flow_id_num = 1
+ */
+struct k3_udma_glue_rx_channel_cfg {
+ u32 swdata_size;
+ int flow_id_base;
+ int flow_id_num;
+ bool flow_id_use_rxchan_id;
+ bool remote;
+
+ struct k3_udma_glue_rx_flow_cfg *def_flow_cfg;
+};
+
+struct k3_udma_glue_rx_channel;
+
+struct k3_udma_glue_rx_channel *k3_udma_glue_request_rx_chn(
+ struct device *dev,
+ const char *name,
+ struct k3_udma_glue_rx_channel_cfg *cfg);
+
+struct k3_udma_glue_rx_channel *
+k3_udma_glue_request_remote_rx_chn_for_thread_id(struct device *dev,
+ struct k3_udma_glue_rx_channel_cfg *cfg,
+ struct device_node *udmax_np, u32 thread_id);
+
+void k3_udma_glue_release_rx_chn(struct k3_udma_glue_rx_channel *rx_chn);
+int k3_udma_glue_enable_rx_chn(struct k3_udma_glue_rx_channel *rx_chn);
+void k3_udma_glue_disable_rx_chn(struct k3_udma_glue_rx_channel *rx_chn);
+void k3_udma_glue_tdown_rx_chn(struct k3_udma_glue_rx_channel *rx_chn,
+ bool sync);
+int k3_udma_glue_push_rx_chn(struct k3_udma_glue_rx_channel *rx_chn,
+ u32 flow_num, struct cppi5_host_desc_t *desc_tx,
+ dma_addr_t desc_dma);
+int k3_udma_glue_pop_rx_chn(struct k3_udma_glue_rx_channel *rx_chn,
+ u32 flow_num, dma_addr_t *desc_dma);
+int k3_udma_glue_rx_flow_init(struct k3_udma_glue_rx_channel *rx_chn,
+ u32 flow_idx, struct k3_udma_glue_rx_flow_cfg *flow_cfg);
+u32 k3_udma_glue_rx_flow_get_fdq_id(struct k3_udma_glue_rx_channel *rx_chn,
+ u32 flow_idx);
+u32 k3_udma_glue_rx_get_flow_id_base(struct k3_udma_glue_rx_channel *rx_chn);
+int k3_udma_glue_rx_get_irq(struct k3_udma_glue_rx_channel *rx_chn,
+ u32 flow_num);
+void k3_udma_glue_reset_rx_chn(struct k3_udma_glue_rx_channel *rx_chn,
+ u32 flow_num, void *data,
+ void (*cleanup)(void *data, dma_addr_t desc_dma));
+int k3_udma_glue_rx_flow_enable(struct k3_udma_glue_rx_channel *rx_chn,
+ u32 flow_idx);
+int k3_udma_glue_rx_flow_disable(struct k3_udma_glue_rx_channel *rx_chn,
+ u32 flow_idx);
+struct device *
+ k3_udma_glue_rx_get_dma_device(struct k3_udma_glue_rx_channel *rx_chn);
+void k3_udma_glue_rx_dma_to_cppi5_addr(struct k3_udma_glue_rx_channel *rx_chn,
+ dma_addr_t *addr);
+void k3_udma_glue_rx_cppi5_to_dma_addr(struct k3_udma_glue_rx_channel *rx_chn,
+ dma_addr_t *addr);
+
+#endif /* K3_UDMA_GLUE_H_ */
diff --git a/include/linux/dma/mmp-pdma.h b/include/linux/dma/mmp-pdma.h
deleted file mode 100644
index 2dc9b2bc18fc..000000000000
--- a/include/linux/dma/mmp-pdma.h
+++ /dev/null
@@ -1,15 +0,0 @@
-#ifndef _MMP_PDMA_H_
-#define _MMP_PDMA_H_
-
-struct dma_chan;
-
-#ifdef CONFIG_MMP_PDMA
-bool mmp_pdma_filter_fn(struct dma_chan *chan, void *param);
-#else
-static inline bool mmp_pdma_filter_fn(struct dma_chan *chan, void *param)
-{
- return false;
-}
-#endif
-
-#endif /* _MMP_PDMA_H_ */
diff --git a/include/linux/dma/mxs-dma.h b/include/linux/dma/mxs-dma.h
new file mode 100644
index 000000000000..069d9f5a609e
--- /dev/null
+++ b/include/linux/dma/mxs-dma.h
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _MXS_DMA_H_
+#define _MXS_DMA_H_
+
+#include <linux/dmaengine.h>
+
+#define MXS_DMA_CTRL_WAIT4END BIT(31)
+#define MXS_DMA_CTRL_WAIT4RDY BIT(30)
+
+/*
+ * The mxs dmaengine can do PIO transfers. We pass a pointer to the PIO words
+ * in the second argument to dmaengine_prep_slave_sg when the direction is
+ * set to DMA_TRANS_NONE. To make this clear and to prevent users from doing
+ * the error prone casting we have this wrapper function
+ */
+static inline struct dma_async_tx_descriptor *mxs_dmaengine_prep_pio(
+ struct dma_chan *chan, u32 *pio, unsigned int npio,
+ enum dma_transfer_direction dir, unsigned long flags)
+{
+ return dmaengine_prep_slave_sg(chan, (struct scatterlist *)pio, npio,
+ dir, flags);
+}
+
+#endif /* _MXS_DMA_H_ */
diff --git a/include/linux/dma/pxa-dma.h b/include/linux/dma/pxa-dma.h
index 3edc99294bf6..fceb5df07097 100644
--- a/include/linux/dma/pxa-dma.h
+++ b/include/linux/dma/pxa-dma.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _PXA_DMA_H_
#define _PXA_DMA_H_
@@ -8,20 +9,18 @@ enum pxad_chan_prio {
PXAD_PRIO_LOWEST,
};
+/**
+ * struct pxad_param - dma channel request parameters
+ * @drcmr: requestor line number
+ * @prio: minimal mandatory priority of the channel
+ *
+ * If a requested channel is granted, its priority will be at least @prio,
+ * ie. if PXAD_PRIO_LOW is required, the requested channel will be either
+ * PXAD_PRIO_LOW, PXAD_PRIO_NORMAL or PXAD_PRIO_HIGHEST.
+ */
struct pxad_param {
unsigned int drcmr;
enum pxad_chan_prio prio;
};
-struct dma_chan;
-
-#ifdef CONFIG_PXA_DMA
-bool pxad_filter_fn(struct dma_chan *chan, void *param);
-#else
-static inline bool pxad_filter_fn(struct dma_chan *chan, void *param)
-{
- return false;
-}
-#endif
-
#endif /* _PXA_DMA_H_ */
diff --git a/include/linux/dma/qcom-gpi-dma.h b/include/linux/dma/qcom-gpi-dma.h
new file mode 100644
index 000000000000..6680dd1a43c6
--- /dev/null
+++ b/include/linux/dma/qcom-gpi-dma.h
@@ -0,0 +1,83 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2020, Linaro Limited
+ */
+
+#ifndef QCOM_GPI_DMA_H
+#define QCOM_GPI_DMA_H
+
+/**
+ * enum spi_transfer_cmd - spi transfer commands
+ */
+enum spi_transfer_cmd {
+ SPI_TX = 1,
+ SPI_RX,
+ SPI_DUPLEX,
+};
+
+/**
+ * struct gpi_spi_config - spi config for peripheral
+ *
+ * @loopback_en: spi loopback enable when set
+ * @clock_pol_high: clock polarity
+ * @data_pol_high: data polarity
+ * @pack_en: process tx/rx buffers as packed
+ * @word_len: spi word length
+ * @clk_div: source clock divider
+ * @clk_src: serial clock
+ * @cmd: spi cmd
+ * @fragmentation: keep CS asserted at end of sequence
+ * @cs: chip select toggle
+ * @set_config: set peripheral config
+ * @rx_len: receive length for buffer
+ */
+struct gpi_spi_config {
+ u8 set_config;
+ u8 loopback_en;
+ u8 clock_pol_high;
+ u8 data_pol_high;
+ u8 pack_en;
+ u8 word_len;
+ u8 fragmentation;
+ u8 cs;
+ u32 clk_div;
+ u32 clk_src;
+ enum spi_transfer_cmd cmd;
+ u32 rx_len;
+};
+
+enum i2c_op {
+ I2C_WRITE = 1,
+ I2C_READ,
+};
+
+/**
+ * struct gpi_i2c_config - i2c config for peripheral
+ *
+ * @pack_enable: process tx/rx buffers as packed
+ * @cycle_count: clock cycles to be sent
+ * @high_count: high period of clock
+ * @low_count: low period of clock
+ * @clk_div: source clock divider
+ * @addr: i2c bus address
+ * @stretch: stretch the clock at eot
+ * @set_config: set peripheral config
+ * @rx_len: receive length for buffer
+ * @op: i2c cmd
+ * @muli-msg: is part of multi i2c r-w msgs
+ */
+struct gpi_i2c_config {
+ u8 set_config;
+ u8 pack_enable;
+ u8 cycle_count;
+ u8 high_count;
+ u8 low_count;
+ u8 addr;
+ u8 stretch;
+ u16 clk_div;
+ u32 rx_len;
+ enum i2c_op op;
+ bool multi_msg;
+};
+
+#endif /* QCOM_GPI_DMA_H */
diff --git a/include/linux/dma/qcom_adm.h b/include/linux/dma/qcom_adm.h
new file mode 100644
index 000000000000..af20df674f0c
--- /dev/null
+++ b/include/linux/dma/qcom_adm.h
@@ -0,0 +1,12 @@
+// SPDX-License-Identifier: GPL-2.0-only
+#ifndef __LINUX_DMA_QCOM_ADM_H
+#define __LINUX_DMA_QCOM_ADM_H
+
+#include <linux/types.h>
+
+struct qcom_adm_peripheral_config {
+ u32 crci;
+ u32 mux;
+};
+
+#endif /* __LINUX_DMA_QCOM_ADM_H */
diff --git a/include/linux/dma/qcom_bam_dma.h b/include/linux/dma/qcom_bam_dma.h
new file mode 100644
index 000000000000..68fc0e643b1b
--- /dev/null
+++ b/include/linux/dma/qcom_bam_dma.h
@@ -0,0 +1,71 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _QCOM_BAM_DMA_H
+#define _QCOM_BAM_DMA_H
+
+#include <asm/byteorder.h>
+
+/*
+ * This data type corresponds to the native Command Element
+ * supported by BAM DMA Engine.
+ *
+ * @cmd_and_addr - upper 8 bits command and lower 24 bits register address.
+ * @data - for write command: content to be written into peripheral register.
+ * for read command: dest addr to write peripheral register value.
+ * @mask - register mask.
+ * @reserved - for future usage.
+ *
+ */
+struct bam_cmd_element {
+ __le32 cmd_and_addr;
+ __le32 data;
+ __le32 mask;
+ __le32 reserved;
+};
+
+/*
+ * This enum indicates the command type in a command element
+ */
+enum bam_command_type {
+ BAM_WRITE_COMMAND = 0,
+ BAM_READ_COMMAND,
+};
+
+/*
+ * prep_bam_ce_le32 - Wrapper function to prepare a single BAM command
+ * element with the data already in le32 format.
+ *
+ * @bam_ce: bam command element
+ * @addr: target address
+ * @cmd: BAM command
+ * @data: actual data for write and dest addr for read in le32
+ */
+static inline void
+bam_prep_ce_le32(struct bam_cmd_element *bam_ce, u32 addr,
+ enum bam_command_type cmd, __le32 data)
+{
+ bam_ce->cmd_and_addr =
+ cpu_to_le32((addr & 0xffffff) | ((cmd & 0xff) << 24));
+ bam_ce->data = data;
+ bam_ce->mask = cpu_to_le32(0xffffffff);
+}
+
+/*
+ * bam_prep_ce - Wrapper function to prepare a single BAM command element
+ * with the data.
+ *
+ * @bam_ce: BAM command element
+ * @addr: target address
+ * @cmd: BAM command
+ * @data: actual data for write and dest addr for read
+ */
+static inline void
+bam_prep_ce(struct bam_cmd_element *bam_ce, u32 addr,
+ enum bam_command_type cmd, u32 data)
+{
+ bam_prep_ce_le32(bam_ce, addr, cmd, cpu_to_le32(data));
+}
+#endif
diff --git a/include/linux/dma/sprd-dma.h b/include/linux/dma/sprd-dma.h
new file mode 100644
index 000000000000..d09c6f6f6da5
--- /dev/null
+++ b/include/linux/dma/sprd-dma.h
@@ -0,0 +1,190 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef _SPRD_DMA_H_
+#define _SPRD_DMA_H_
+
+#define SPRD_DMA_REQ_SHIFT 8
+#define SPRD_DMA_TRG_MODE_SHIFT 16
+#define SPRD_DMA_CHN_MODE_SHIFT 24
+#define SPRD_DMA_FLAGS(chn_mode, trg_mode, req_mode, int_type) \
+ ((chn_mode) << SPRD_DMA_CHN_MODE_SHIFT | \
+ (trg_mode) << SPRD_DMA_TRG_MODE_SHIFT | \
+ (req_mode) << SPRD_DMA_REQ_SHIFT | (int_type))
+
+/*
+ * The Spreadtrum DMA controller supports channel 2-stage tansfer, that means
+ * we can request 2 dma channels, one for source channel, and another one for
+ * destination channel. Each channel is independent, and has its own
+ * configurations. Once the source channel's transaction is done, it will
+ * trigger the destination channel's transaction automatically by hardware
+ * signal.
+ *
+ * To support 2-stage tansfer, we must configure the channel mode and trigger
+ * mode as below definition.
+ */
+
+/*
+ * enum sprd_dma_chn_mode: define the DMA channel mode for 2-stage transfer
+ * @SPRD_DMA_CHN_MODE_NONE: No channel mode setting which means channel doesn't
+ * support the 2-stage transfer.
+ * @SPRD_DMA_SRC_CHN0: Channel used as source channel 0.
+ * @SPRD_DMA_SRC_CHN1: Channel used as source channel 1.
+ * @SPRD_DMA_DST_CHN0: Channel used as destination channel 0.
+ * @SPRD_DMA_DST_CHN1: Channel used as destination channel 1.
+ *
+ * Now the DMA controller can supports 2 groups 2-stage transfer.
+ */
+enum sprd_dma_chn_mode {
+ SPRD_DMA_CHN_MODE_NONE,
+ SPRD_DMA_SRC_CHN0,
+ SPRD_DMA_SRC_CHN1,
+ SPRD_DMA_DST_CHN0,
+ SPRD_DMA_DST_CHN1,
+};
+
+/*
+ * enum sprd_dma_trg_mode: define the DMA channel trigger mode for 2-stage
+ * transfer
+ * @SPRD_DMA_NO_TRG: No trigger setting.
+ * @SPRD_DMA_FRAG_DONE_TRG: Trigger the transaction of destination channel
+ * automatically once the source channel's fragment request is done.
+ * @SPRD_DMA_BLOCK_DONE_TRG: Trigger the transaction of destination channel
+ * automatically once the source channel's block request is done.
+ * @SPRD_DMA_TRANS_DONE_TRG: Trigger the transaction of destination channel
+ * automatically once the source channel's transfer request is done.
+ * @SPRD_DMA_LIST_DONE_TRG: Trigger the transaction of destination channel
+ * automatically once the source channel's link-list request is done.
+ */
+enum sprd_dma_trg_mode {
+ SPRD_DMA_NO_TRG,
+ SPRD_DMA_FRAG_DONE_TRG,
+ SPRD_DMA_BLOCK_DONE_TRG,
+ SPRD_DMA_TRANS_DONE_TRG,
+ SPRD_DMA_LIST_DONE_TRG,
+};
+
+/*
+ * enum sprd_dma_req_mode: define the DMA request mode
+ * @SPRD_DMA_FRAG_REQ: fragment request mode
+ * @SPRD_DMA_BLK_REQ: block request mode
+ * @SPRD_DMA_TRANS_REQ: transaction request mode
+ * @SPRD_DMA_LIST_REQ: link-list request mode
+ *
+ * We have 4 types request mode: fragment mode, block mode, transaction mode
+ * and linklist mode. One transaction can contain several blocks, one block can
+ * contain several fragments. Link-list mode means we can save several DMA
+ * configuration into one reserved memory, then DMA can fetch each DMA
+ * configuration automatically to start transfer.
+ */
+enum sprd_dma_req_mode {
+ SPRD_DMA_FRAG_REQ,
+ SPRD_DMA_BLK_REQ,
+ SPRD_DMA_TRANS_REQ,
+ SPRD_DMA_LIST_REQ,
+};
+
+/*
+ * enum sprd_dma_int_type: define the DMA interrupt type
+ * @SPRD_DMA_NO_INT: do not need generate DMA interrupts.
+ * @SPRD_DMA_FRAG_INT: fragment done interrupt when one fragment request
+ * is done.
+ * @SPRD_DMA_BLK_INT: block done interrupt when one block request is done.
+ * @SPRD_DMA_BLK_FRAG_INT: block and fragment interrupt when one fragment
+ * or one block request is done.
+ * @SPRD_DMA_TRANS_INT: tansaction done interrupt when one transaction
+ * request is done.
+ * @SPRD_DMA_TRANS_FRAG_INT: transaction and fragment interrupt when one
+ * transaction request or fragment request is done.
+ * @SPRD_DMA_TRANS_BLK_INT: transaction and block interrupt when one
+ * transaction request or block request is done.
+ * @SPRD_DMA_LIST_INT: link-list done interrupt when one link-list request
+ * is done.
+ * @SPRD_DMA_CFGERR_INT: configure error interrupt when configuration is
+ * incorrect.
+ */
+enum sprd_dma_int_type {
+ SPRD_DMA_NO_INT,
+ SPRD_DMA_FRAG_INT,
+ SPRD_DMA_BLK_INT,
+ SPRD_DMA_BLK_FRAG_INT,
+ SPRD_DMA_TRANS_INT,
+ SPRD_DMA_TRANS_FRAG_INT,
+ SPRD_DMA_TRANS_BLK_INT,
+ SPRD_DMA_LIST_INT,
+ SPRD_DMA_CFGERR_INT,
+};
+
+/*
+ * struct sprd_dma_linklist - DMA link-list address structure
+ * @virt_addr: link-list virtual address to configure link-list node
+ * @phy_addr: link-list physical address to link DMA transfer
+ * @wrap_addr: the wrap address for link-list mode, which means once the
+ * transfer address reaches the wrap address, the next transfer address
+ * will jump to the address specified by wrap_to register.
+ *
+ * The Spreadtrum DMA controller supports the link-list mode, that means slaves
+ * can supply several groups configurations (each configuration represents one
+ * DMA transfer) saved in memory, and DMA controller will link these groups
+ * configurations by writing the physical address of each configuration into the
+ * link-list register.
+ *
+ * Just as shown below, the link-list pointer register will be pointed to the
+ * physical address of 'configuration 1', and the 'configuration 1' link-list
+ * pointer will be pointed to 'configuration 2', and so on.
+ * Once trigger the DMA transfer, the DMA controller will load 'configuration
+ * 1' to its registers automatically, after 'configuration 1' transaction is
+ * done, DMA controller will load 'configuration 2' automatically, until all
+ * DMA transactions are done.
+ *
+ * Note: The last link-list pointer should point to the physical address
+ * of 'configuration 1', which can avoid DMA controller loads incorrect
+ * configuration when the last configuration transaction is done.
+ *
+ * DMA controller linklist memory
+ * ====================== -----------------------
+ *| | | configuration 1 |<---
+ *| DMA controller | ------->| | |
+ *| | | | | |
+ *| | | | | |
+ *| | | | | |
+ *| linklist pointer reg |---- ----| linklist pointer | |
+ * ====================== | ----------------------- |
+ * | |
+ * | ----------------------- |
+ * | | configuration 2 | |
+ * --->| | |
+ * | | |
+ * | | |
+ * | | |
+ * ----| linklist pointer | |
+ * | ----------------------- |
+ * | |
+ * | ----------------------- |
+ * | | configuration 3 | |
+ * --->| | |
+ * | | |
+ * | . | |
+ * . |
+ * . |
+ * . |
+ * | . |
+ * | ----------------------- |
+ * | | configuration n | |
+ * --->| | |
+ * | | |
+ * | | |
+ * | | |
+ * | linklist pointer |----
+ * -----------------------
+ *
+ * To support the link-list mode, DMA slaves should allocate one segment memory
+ * from always-on IRAM or dma coherent memory to store these groups of DMA
+ * configuration, and pass the virtual and physical address to DMA controller.
+ */
+struct sprd_dma_linklist {
+ unsigned long virt_addr;
+ phys_addr_t phy_addr;
+ phys_addr_t wrap_addr;
+};
+
+#endif
diff --git a/include/linux/dma/ti-cppi5.h b/include/linux/dma/ti-cppi5.h
new file mode 100644
index 000000000000..c53c0f6e3b1a
--- /dev/null
+++ b/include/linux/dma/ti-cppi5.h
@@ -0,0 +1,1060 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * CPPI5 descriptors interface
+ *
+ * Copyright (C) 2019 Texas Instruments Incorporated - https://www.ti.com
+ */
+
+#ifndef __TI_CPPI5_H__
+#define __TI_CPPI5_H__
+
+#include <linux/bitops.h>
+#include <linux/printk.h>
+#include <linux/bug.h>
+
+/**
+ * struct cppi5_desc_hdr_t - Descriptor header, present in all types of
+ * descriptors
+ * @pkt_info0: Packet info word 0 (n/a in Buffer desc)
+ * @pkt_info0: Packet info word 1 (n/a in Buffer desc)
+ * @pkt_info0: Packet info word 2 (n/a in Buffer desc)
+ * @src_dst_tag: Packet info word 3 (n/a in Buffer desc)
+ */
+struct cppi5_desc_hdr_t {
+ u32 pkt_info0;
+ u32 pkt_info1;
+ u32 pkt_info2;
+ u32 src_dst_tag;
+} __packed;
+
+/**
+ * struct cppi5_host_desc_t - Host-mode packet and buffer descriptor definition
+ * @hdr: Descriptor header
+ * @next_desc: word 4/5: Linking word
+ * @buf_ptr: word 6/7: Buffer pointer
+ * @buf_info1: word 8: Buffer valid data length
+ * @org_buf_len: word 9: Original buffer length
+ * @org_buf_ptr: word 10/11: Original buffer pointer
+ * @epib[0]: Extended Packet Info Data (optional, 4 words), and/or
+ * Protocol Specific Data (optional, 0-128 bytes in
+ * multiples of 4), and/or
+ * Other Software Data (0-N bytes, optional)
+ */
+struct cppi5_host_desc_t {
+ struct cppi5_desc_hdr_t hdr;
+ u64 next_desc;
+ u64 buf_ptr;
+ u32 buf_info1;
+ u32 org_buf_len;
+ u64 org_buf_ptr;
+ u32 epib[];
+} __packed;
+
+#define CPPI5_DESC_MIN_ALIGN (16U)
+
+#define CPPI5_INFO0_HDESC_EPIB_SIZE (16U)
+#define CPPI5_INFO0_HDESC_PSDATA_MAX_SIZE (128U)
+
+#define CPPI5_INFO0_HDESC_TYPE_SHIFT (30U)
+#define CPPI5_INFO0_HDESC_TYPE_MASK GENMASK(31, 30)
+#define CPPI5_INFO0_DESC_TYPE_VAL_HOST (1U)
+#define CPPI5_INFO0_DESC_TYPE_VAL_MONO (2U)
+#define CPPI5_INFO0_DESC_TYPE_VAL_TR (3U)
+#define CPPI5_INFO0_HDESC_EPIB_PRESENT BIT(29)
+/*
+ * Protocol Specific Words location:
+ * 0 - located in the descriptor,
+ * 1 = located in the SOP Buffer immediately prior to the data.
+ */
+#define CPPI5_INFO0_HDESC_PSINFO_LOCATION BIT(28)
+#define CPPI5_INFO0_HDESC_PSINFO_SIZE_SHIFT (22U)
+#define CPPI5_INFO0_HDESC_PSINFO_SIZE_MASK GENMASK(27, 22)
+#define CPPI5_INFO0_HDESC_PKTLEN_SHIFT (0)
+#define CPPI5_INFO0_HDESC_PKTLEN_MASK GENMASK(21, 0)
+
+#define CPPI5_INFO1_DESC_PKTERROR_SHIFT (28U)
+#define CPPI5_INFO1_DESC_PKTERROR_MASK GENMASK(31, 28)
+#define CPPI5_INFO1_HDESC_PSFLGS_SHIFT (24U)
+#define CPPI5_INFO1_HDESC_PSFLGS_MASK GENMASK(27, 24)
+#define CPPI5_INFO1_DESC_PKTID_SHIFT (14U)
+#define CPPI5_INFO1_DESC_PKTID_MASK GENMASK(23, 14)
+#define CPPI5_INFO1_DESC_FLOWID_SHIFT (0)
+#define CPPI5_INFO1_DESC_FLOWID_MASK GENMASK(13, 0)
+#define CPPI5_INFO1_DESC_FLOWID_DEFAULT CPPI5_INFO1_DESC_FLOWID_MASK
+
+#define CPPI5_INFO2_HDESC_PKTTYPE_SHIFT (27U)
+#define CPPI5_INFO2_HDESC_PKTTYPE_MASK GENMASK(31, 27)
+/* Return Policy: 0 - Entire packet 1 - Each buffer */
+#define CPPI5_INFO2_HDESC_RETPOLICY BIT(18)
+/*
+ * Early Return:
+ * 0 = desc pointers should be returned after all reads have been completed
+ * 1 = desc pointers should be returned immediately upon fetching
+ * the descriptor and beginning to transfer data.
+ */
+#define CPPI5_INFO2_HDESC_EARLYRET BIT(17)
+/*
+ * Return Push Policy:
+ * 0 = Descriptor must be returned to tail of queue
+ * 1 = Descriptor must be returned to head of queue
+ */
+#define CPPI5_INFO2_DESC_RETPUSHPOLICY BIT(16)
+#define CPPI5_INFO2_DESC_RETP_MASK GENMASK(18, 16)
+
+#define CPPI5_INFO2_DESC_RETQ_SHIFT (0)
+#define CPPI5_INFO2_DESC_RETQ_MASK GENMASK(15, 0)
+
+#define CPPI5_INFO3_DESC_SRCTAG_SHIFT (16U)
+#define CPPI5_INFO3_DESC_SRCTAG_MASK GENMASK(31, 16)
+#define CPPI5_INFO3_DESC_DSTTAG_SHIFT (0)
+#define CPPI5_INFO3_DESC_DSTTAG_MASK GENMASK(15, 0)
+
+#define CPPI5_BUFINFO1_HDESC_DATA_LEN_SHIFT (0)
+#define CPPI5_BUFINFO1_HDESC_DATA_LEN_MASK GENMASK(27, 0)
+
+#define CPPI5_OBUFINFO0_HDESC_BUF_LEN_SHIFT (0)
+#define CPPI5_OBUFINFO0_HDESC_BUF_LEN_MASK GENMASK(27, 0)
+
+/**
+ * struct cppi5_desc_epib_t - Host Packet Descriptor Extended Packet Info Block
+ * @timestamp: word 0: application specific timestamp
+ * @sw_info0: word 1: Software Info 0
+ * @sw_info1: word 1: Software Info 1
+ * @sw_info2: word 1: Software Info 2
+ */
+struct cppi5_desc_epib_t {
+ u32 timestamp; /* w0: application specific timestamp */
+ u32 sw_info0; /* w1: Software Info 0 */
+ u32 sw_info1; /* w2: Software Info 1 */
+ u32 sw_info2; /* w3: Software Info 2 */
+};
+
+/**
+ * struct cppi5_monolithic_desc_t - Monolithic-mode packet descriptor
+ * @hdr: Descriptor header
+ * @epib[0]: Extended Packet Info Data (optional, 4 words), and/or
+ * Protocol Specific Data (optional, 0-128 bytes in
+ * multiples of 4), and/or
+ * Other Software Data (0-N bytes, optional)
+ */
+struct cppi5_monolithic_desc_t {
+ struct cppi5_desc_hdr_t hdr;
+ u32 epib[];
+};
+
+#define CPPI5_INFO2_MDESC_DATA_OFFSET_SHIFT (18U)
+#define CPPI5_INFO2_MDESC_DATA_OFFSET_MASK GENMASK(26, 18)
+
+/*
+ * Reload Count:
+ * 0 = Finish the packet and place the descriptor back on the return queue
+ * 1-0x1ff = Vector to the Reload Index and resume processing
+ * 0x1ff indicates perpetual loop, infinite reload until the channel is stopped
+ */
+#define CPPI5_INFO0_TRDESC_RLDCNT_SHIFT (20U)
+#define CPPI5_INFO0_TRDESC_RLDCNT_MASK GENMASK(28, 20)
+#define CPPI5_INFO0_TRDESC_RLDCNT_MAX (0x1ff)
+#define CPPI5_INFO0_TRDESC_RLDCNT_INFINITE CPPI5_INFO0_TRDESC_RLDCNT_MAX
+#define CPPI5_INFO0_TRDESC_RLDIDX_SHIFT (14U)
+#define CPPI5_INFO0_TRDESC_RLDIDX_MASK GENMASK(19, 14)
+#define CPPI5_INFO0_TRDESC_RLDIDX_MAX (0x3f)
+#define CPPI5_INFO0_TRDESC_LASTIDX_SHIFT (0)
+#define CPPI5_INFO0_TRDESC_LASTIDX_MASK GENMASK(13, 0)
+
+#define CPPI5_INFO1_TRDESC_RECSIZE_SHIFT (24U)
+#define CPPI5_INFO1_TRDESC_RECSIZE_MASK GENMASK(26, 24)
+#define CPPI5_INFO1_TRDESC_RECSIZE_VAL_16B (0)
+#define CPPI5_INFO1_TRDESC_RECSIZE_VAL_32B (1U)
+#define CPPI5_INFO1_TRDESC_RECSIZE_VAL_64B (2U)
+#define CPPI5_INFO1_TRDESC_RECSIZE_VAL_128B (3U)
+
+static inline void cppi5_desc_dump(void *desc, u32 size)
+{
+ print_hex_dump(KERN_ERR, "dump udmap_desc: ", DUMP_PREFIX_NONE,
+ 32, 4, desc, size, false);
+}
+
+#define CPPI5_TDCM_MARKER (0x1)
+/**
+ * cppi5_desc_is_tdcm - check if the paddr indicates Teardown Complete Message
+ * @paddr: Physical address of the packet popped from the ring
+ *
+ * Returns true if the address indicates TDCM
+ */
+static inline bool cppi5_desc_is_tdcm(dma_addr_t paddr)
+{
+ return (paddr & CPPI5_TDCM_MARKER) ? true : false;
+}
+
+/**
+ * cppi5_desc_get_type - get descriptor type
+ * @desc_hdr: packet descriptor/TR header
+ *
+ * Returns descriptor type:
+ * CPPI5_INFO0_DESC_TYPE_VAL_HOST
+ * CPPI5_INFO0_DESC_TYPE_VAL_MONO
+ * CPPI5_INFO0_DESC_TYPE_VAL_TR
+ */
+static inline u32 cppi5_desc_get_type(struct cppi5_desc_hdr_t *desc_hdr)
+{
+ return (desc_hdr->pkt_info0 & CPPI5_INFO0_HDESC_TYPE_MASK) >>
+ CPPI5_INFO0_HDESC_TYPE_SHIFT;
+}
+
+/**
+ * cppi5_desc_get_errflags - get Error Flags from Desc
+ * @desc_hdr: packet/TR descriptor header
+ *
+ * Returns Error Flags from Packet/TR Descriptor
+ */
+static inline u32 cppi5_desc_get_errflags(struct cppi5_desc_hdr_t *desc_hdr)
+{
+ return (desc_hdr->pkt_info1 & CPPI5_INFO1_DESC_PKTERROR_MASK) >>
+ CPPI5_INFO1_DESC_PKTERROR_SHIFT;
+}
+
+/**
+ * cppi5_desc_get_pktids - get Packet and Flow ids from Desc
+ * @desc_hdr: packet/TR descriptor header
+ * @pkt_id: Packet ID
+ * @flow_id: Flow ID
+ *
+ * Returns Packet and Flow ids from packet/TR descriptor
+ */
+static inline void cppi5_desc_get_pktids(struct cppi5_desc_hdr_t *desc_hdr,
+ u32 *pkt_id, u32 *flow_id)
+{
+ *pkt_id = (desc_hdr->pkt_info1 & CPPI5_INFO1_DESC_PKTID_MASK) >>
+ CPPI5_INFO1_DESC_PKTID_SHIFT;
+ *flow_id = (desc_hdr->pkt_info1 & CPPI5_INFO1_DESC_FLOWID_MASK) >>
+ CPPI5_INFO1_DESC_FLOWID_SHIFT;
+}
+
+/**
+ * cppi5_desc_set_pktids - set Packet and Flow ids in Desc
+ * @desc_hdr: packet/TR descriptor header
+ * @pkt_id: Packet ID
+ * @flow_id: Flow ID
+ */
+static inline void cppi5_desc_set_pktids(struct cppi5_desc_hdr_t *desc_hdr,
+ u32 pkt_id, u32 flow_id)
+{
+ desc_hdr->pkt_info1 &= ~(CPPI5_INFO1_DESC_PKTID_MASK |
+ CPPI5_INFO1_DESC_FLOWID_MASK);
+ desc_hdr->pkt_info1 |= (pkt_id << CPPI5_INFO1_DESC_PKTID_SHIFT) &
+ CPPI5_INFO1_DESC_PKTID_MASK;
+ desc_hdr->pkt_info1 |= (flow_id << CPPI5_INFO1_DESC_FLOWID_SHIFT) &
+ CPPI5_INFO1_DESC_FLOWID_MASK;
+}
+
+/**
+ * cppi5_desc_set_retpolicy - set Packet Return Policy in Desc
+ * @desc_hdr: packet/TR descriptor header
+ * @flags: fags, supported values
+ * CPPI5_INFO2_HDESC_RETPOLICY
+ * CPPI5_INFO2_HDESC_EARLYRET
+ * CPPI5_INFO2_DESC_RETPUSHPOLICY
+ * @return_ring_id: Packet Return Queue/Ring id, value 0xFFFF reserved
+ */
+static inline void cppi5_desc_set_retpolicy(struct cppi5_desc_hdr_t *desc_hdr,
+ u32 flags, u32 return_ring_id)
+{
+ desc_hdr->pkt_info2 &= ~(CPPI5_INFO2_DESC_RETP_MASK |
+ CPPI5_INFO2_DESC_RETQ_MASK);
+ desc_hdr->pkt_info2 |= flags & CPPI5_INFO2_DESC_RETP_MASK;
+ desc_hdr->pkt_info2 |= return_ring_id & CPPI5_INFO2_DESC_RETQ_MASK;
+}
+
+/**
+ * cppi5_desc_get_tags_ids - get Packet Src/Dst Tags from Desc
+ * @desc_hdr: packet/TR descriptor header
+ * @src_tag_id: Source Tag
+ * @dst_tag_id: Dest Tag
+ *
+ * Returns Packet Src/Dst Tags from packet/TR descriptor
+ */
+static inline void cppi5_desc_get_tags_ids(struct cppi5_desc_hdr_t *desc_hdr,
+ u32 *src_tag_id, u32 *dst_tag_id)
+{
+ if (src_tag_id)
+ *src_tag_id = (desc_hdr->src_dst_tag &
+ CPPI5_INFO3_DESC_SRCTAG_MASK) >>
+ CPPI5_INFO3_DESC_SRCTAG_SHIFT;
+ if (dst_tag_id)
+ *dst_tag_id = desc_hdr->src_dst_tag &
+ CPPI5_INFO3_DESC_DSTTAG_MASK;
+}
+
+/**
+ * cppi5_desc_set_tags_ids - set Packet Src/Dst Tags in HDesc
+ * @desc_hdr: packet/TR descriptor header
+ * @src_tag_id: Source Tag
+ * @dst_tag_id: Dest Tag
+ *
+ * Returns Packet Src/Dst Tags from packet/TR descriptor
+ */
+static inline void cppi5_desc_set_tags_ids(struct cppi5_desc_hdr_t *desc_hdr,
+ u32 src_tag_id, u32 dst_tag_id)
+{
+ desc_hdr->src_dst_tag = (src_tag_id << CPPI5_INFO3_DESC_SRCTAG_SHIFT) &
+ CPPI5_INFO3_DESC_SRCTAG_MASK;
+ desc_hdr->src_dst_tag |= dst_tag_id & CPPI5_INFO3_DESC_DSTTAG_MASK;
+}
+
+/**
+ * cppi5_hdesc_calc_size - Calculate Host Packet Descriptor size
+ * @epib: is EPIB present
+ * @psdata_size: PSDATA size
+ * @sw_data_size: SWDATA size
+ *
+ * Returns required Host Packet Descriptor size
+ * 0 - if PSDATA > CPPI5_INFO0_HDESC_PSDATA_MAX_SIZE
+ */
+static inline u32 cppi5_hdesc_calc_size(bool epib, u32 psdata_size,
+ u32 sw_data_size)
+{
+ u32 desc_size;
+
+ if (psdata_size > CPPI5_INFO0_HDESC_PSDATA_MAX_SIZE)
+ return 0;
+
+ desc_size = sizeof(struct cppi5_host_desc_t) + psdata_size +
+ sw_data_size;
+
+ if (epib)
+ desc_size += CPPI5_INFO0_HDESC_EPIB_SIZE;
+
+ return ALIGN(desc_size, CPPI5_DESC_MIN_ALIGN);
+}
+
+/**
+ * cppi5_hdesc_init - Init Host Packet Descriptor size
+ * @desc: Host packet descriptor
+ * @flags: supported values
+ * CPPI5_INFO0_HDESC_EPIB_PRESENT
+ * CPPI5_INFO0_HDESC_PSINFO_LOCATION
+ * @psdata_size: PSDATA size
+ *
+ * Returns required Host Packet Descriptor size
+ * 0 - if PSDATA > CPPI5_INFO0_HDESC_PSDATA_MAX_SIZE
+ */
+static inline void cppi5_hdesc_init(struct cppi5_host_desc_t *desc, u32 flags,
+ u32 psdata_size)
+{
+ desc->hdr.pkt_info0 = (CPPI5_INFO0_DESC_TYPE_VAL_HOST <<
+ CPPI5_INFO0_HDESC_TYPE_SHIFT) | (flags);
+ desc->hdr.pkt_info0 |= ((psdata_size >> 2) <<
+ CPPI5_INFO0_HDESC_PSINFO_SIZE_SHIFT) &
+ CPPI5_INFO0_HDESC_PSINFO_SIZE_MASK;
+ desc->next_desc = 0;
+}
+
+/**
+ * cppi5_hdesc_update_flags - Replace descriptor flags
+ * @desc: Host packet descriptor
+ * @flags: supported values
+ * CPPI5_INFO0_HDESC_EPIB_PRESENT
+ * CPPI5_INFO0_HDESC_PSINFO_LOCATION
+ */
+static inline void cppi5_hdesc_update_flags(struct cppi5_host_desc_t *desc,
+ u32 flags)
+{
+ desc->hdr.pkt_info0 &= ~(CPPI5_INFO0_HDESC_EPIB_PRESENT |
+ CPPI5_INFO0_HDESC_PSINFO_LOCATION);
+ desc->hdr.pkt_info0 |= flags;
+}
+
+/**
+ * cppi5_hdesc_update_psdata_size - Replace PSdata size
+ * @desc: Host packet descriptor
+ * @psdata_size: PSDATA size
+ */
+static inline void
+cppi5_hdesc_update_psdata_size(struct cppi5_host_desc_t *desc, u32 psdata_size)
+{
+ desc->hdr.pkt_info0 &= ~CPPI5_INFO0_HDESC_PSINFO_SIZE_MASK;
+ desc->hdr.pkt_info0 |= ((psdata_size >> 2) <<
+ CPPI5_INFO0_HDESC_PSINFO_SIZE_SHIFT) &
+ CPPI5_INFO0_HDESC_PSINFO_SIZE_MASK;
+}
+
+/**
+ * cppi5_hdesc_get_psdata_size - get PSdata size in bytes
+ * @desc: Host packet descriptor
+ */
+static inline u32 cppi5_hdesc_get_psdata_size(struct cppi5_host_desc_t *desc)
+{
+ u32 psdata_size = 0;
+
+ if (!(desc->hdr.pkt_info0 & CPPI5_INFO0_HDESC_PSINFO_LOCATION))
+ psdata_size = (desc->hdr.pkt_info0 &
+ CPPI5_INFO0_HDESC_PSINFO_SIZE_MASK) >>
+ CPPI5_INFO0_HDESC_PSINFO_SIZE_SHIFT;
+
+ return (psdata_size << 2);
+}
+
+/**
+ * cppi5_hdesc_get_pktlen - get Packet Length from HDesc
+ * @desc: Host packet descriptor
+ *
+ * Returns Packet Length from Host Packet Descriptor
+ */
+static inline u32 cppi5_hdesc_get_pktlen(struct cppi5_host_desc_t *desc)
+{
+ return (desc->hdr.pkt_info0 & CPPI5_INFO0_HDESC_PKTLEN_MASK);
+}
+
+/**
+ * cppi5_hdesc_set_pktlen - set Packet Length in HDesc
+ * @desc: Host packet descriptor
+ */
+static inline void cppi5_hdesc_set_pktlen(struct cppi5_host_desc_t *desc,
+ u32 pkt_len)
+{
+ desc->hdr.pkt_info0 &= ~CPPI5_INFO0_HDESC_PKTLEN_MASK;
+ desc->hdr.pkt_info0 |= (pkt_len & CPPI5_INFO0_HDESC_PKTLEN_MASK);
+}
+
+/**
+ * cppi5_hdesc_get_psflags - get Protocol Specific Flags from HDesc
+ * @desc: Host packet descriptor
+ *
+ * Returns Protocol Specific Flags from Host Packet Descriptor
+ */
+static inline u32 cppi5_hdesc_get_psflags(struct cppi5_host_desc_t *desc)
+{
+ return (desc->hdr.pkt_info1 & CPPI5_INFO1_HDESC_PSFLGS_MASK) >>
+ CPPI5_INFO1_HDESC_PSFLGS_SHIFT;
+}
+
+/**
+ * cppi5_hdesc_set_psflags - set Protocol Specific Flags in HDesc
+ * @desc: Host packet descriptor
+ */
+static inline void cppi5_hdesc_set_psflags(struct cppi5_host_desc_t *desc,
+ u32 ps_flags)
+{
+ desc->hdr.pkt_info1 &= ~CPPI5_INFO1_HDESC_PSFLGS_MASK;
+ desc->hdr.pkt_info1 |= (ps_flags <<
+ CPPI5_INFO1_HDESC_PSFLGS_SHIFT) &
+ CPPI5_INFO1_HDESC_PSFLGS_MASK;
+}
+
+/**
+ * cppi5_hdesc_get_errflags - get Packet Type from HDesc
+ * @desc: Host packet descriptor
+ */
+static inline u32 cppi5_hdesc_get_pkttype(struct cppi5_host_desc_t *desc)
+{
+ return (desc->hdr.pkt_info2 & CPPI5_INFO2_HDESC_PKTTYPE_MASK) >>
+ CPPI5_INFO2_HDESC_PKTTYPE_SHIFT;
+}
+
+/**
+ * cppi5_hdesc_get_errflags - set Packet Type in HDesc
+ * @desc: Host packet descriptor
+ * @pkt_type: Packet Type
+ */
+static inline void cppi5_hdesc_set_pkttype(struct cppi5_host_desc_t *desc,
+ u32 pkt_type)
+{
+ desc->hdr.pkt_info2 &= ~CPPI5_INFO2_HDESC_PKTTYPE_MASK;
+ desc->hdr.pkt_info2 |=
+ (pkt_type << CPPI5_INFO2_HDESC_PKTTYPE_SHIFT) &
+ CPPI5_INFO2_HDESC_PKTTYPE_MASK;
+}
+
+/**
+ * cppi5_hdesc_attach_buf - attach buffer to HDesc
+ * @desc: Host packet descriptor
+ * @buf: Buffer physical address
+ * @buf_data_len: Buffer length
+ * @obuf: Original Buffer physical address
+ * @obuf_len: Original Buffer length
+ *
+ * Attaches buffer to Host Packet Descriptor
+ */
+static inline void cppi5_hdesc_attach_buf(struct cppi5_host_desc_t *desc,
+ dma_addr_t buf, u32 buf_data_len,
+ dma_addr_t obuf, u32 obuf_len)
+{
+ desc->buf_ptr = buf;
+ desc->buf_info1 = buf_data_len & CPPI5_BUFINFO1_HDESC_DATA_LEN_MASK;
+ desc->org_buf_ptr = obuf;
+ desc->org_buf_len = obuf_len & CPPI5_OBUFINFO0_HDESC_BUF_LEN_MASK;
+}
+
+static inline void cppi5_hdesc_get_obuf(struct cppi5_host_desc_t *desc,
+ dma_addr_t *obuf, u32 *obuf_len)
+{
+ *obuf = desc->org_buf_ptr;
+ *obuf_len = desc->org_buf_len & CPPI5_OBUFINFO0_HDESC_BUF_LEN_MASK;
+}
+
+static inline void cppi5_hdesc_reset_to_original(struct cppi5_host_desc_t *desc)
+{
+ desc->buf_ptr = desc->org_buf_ptr;
+ desc->buf_info1 = desc->org_buf_len;
+}
+
+/**
+ * cppi5_hdesc_link_hbdesc - link Host Buffer Descriptor to HDesc
+ * @desc: Host Packet Descriptor
+ * @buf_desc: Host Buffer Descriptor physical address
+ *
+ * add and link Host Buffer Descriptor to HDesc
+ */
+static inline void cppi5_hdesc_link_hbdesc(struct cppi5_host_desc_t *desc,
+ dma_addr_t hbuf_desc)
+{
+ desc->next_desc = hbuf_desc;
+}
+
+static inline dma_addr_t
+cppi5_hdesc_get_next_hbdesc(struct cppi5_host_desc_t *desc)
+{
+ return (dma_addr_t)desc->next_desc;
+}
+
+static inline void cppi5_hdesc_reset_hbdesc(struct cppi5_host_desc_t *desc)
+{
+ desc->hdr = (struct cppi5_desc_hdr_t) { 0 };
+ desc->next_desc = 0;
+}
+
+/**
+ * cppi5_hdesc_epib_present - check if EPIB present
+ * @desc_hdr: packet descriptor/TR header
+ *
+ * Returns true if EPIB present in the packet
+ */
+static inline bool cppi5_hdesc_epib_present(struct cppi5_desc_hdr_t *desc_hdr)
+{
+ return !!(desc_hdr->pkt_info0 & CPPI5_INFO0_HDESC_EPIB_PRESENT);
+}
+
+/**
+ * cppi5_hdesc_get_psdata - Get pointer on PSDATA
+ * @desc: Host packet descriptor
+ *
+ * Returns pointer on PSDATA in HDesc.
+ * NULL - if ps_data placed at the start of data buffer.
+ */
+static inline void *cppi5_hdesc_get_psdata(struct cppi5_host_desc_t *desc)
+{
+ u32 psdata_size;
+ void *psdata;
+
+ if (desc->hdr.pkt_info0 & CPPI5_INFO0_HDESC_PSINFO_LOCATION)
+ return NULL;
+
+ psdata_size = (desc->hdr.pkt_info0 &
+ CPPI5_INFO0_HDESC_PSINFO_SIZE_MASK) >>
+ CPPI5_INFO0_HDESC_PSINFO_SIZE_SHIFT;
+
+ if (!psdata_size)
+ return NULL;
+
+ psdata = &desc->epib;
+
+ if (cppi5_hdesc_epib_present(&desc->hdr))
+ psdata += CPPI5_INFO0_HDESC_EPIB_SIZE;
+
+ return psdata;
+}
+
+/**
+ * cppi5_hdesc_get_swdata - Get pointer on swdata
+ * @desc: Host packet descriptor
+ *
+ * Returns pointer on SWDATA in HDesc.
+ * NOTE. It's caller responsibility to be sure hdesc actually has swdata.
+ */
+static inline void *cppi5_hdesc_get_swdata(struct cppi5_host_desc_t *desc)
+{
+ u32 psdata_size = 0;
+ void *swdata;
+
+ if (!(desc->hdr.pkt_info0 & CPPI5_INFO0_HDESC_PSINFO_LOCATION))
+ psdata_size = (desc->hdr.pkt_info0 &
+ CPPI5_INFO0_HDESC_PSINFO_SIZE_MASK) >>
+ CPPI5_INFO0_HDESC_PSINFO_SIZE_SHIFT;
+
+ swdata = &desc->epib;
+
+ if (cppi5_hdesc_epib_present(&desc->hdr))
+ swdata += CPPI5_INFO0_HDESC_EPIB_SIZE;
+
+ swdata += (psdata_size << 2);
+
+ return swdata;
+}
+
+/* ================================== TR ================================== */
+
+#define CPPI5_TR_TYPE_SHIFT (0U)
+#define CPPI5_TR_TYPE_MASK GENMASK(3, 0)
+#define CPPI5_TR_STATIC BIT(4)
+#define CPPI5_TR_WAIT BIT(5)
+#define CPPI5_TR_EVENT_SIZE_SHIFT (6U)
+#define CPPI5_TR_EVENT_SIZE_MASK GENMASK(7, 6)
+#define CPPI5_TR_TRIGGER0_SHIFT (8U)
+#define CPPI5_TR_TRIGGER0_MASK GENMASK(9, 8)
+#define CPPI5_TR_TRIGGER0_TYPE_SHIFT (10U)
+#define CPPI5_TR_TRIGGER0_TYPE_MASK GENMASK(11, 10)
+#define CPPI5_TR_TRIGGER1_SHIFT (12U)
+#define CPPI5_TR_TRIGGER1_MASK GENMASK(13, 12)
+#define CPPI5_TR_TRIGGER1_TYPE_SHIFT (14U)
+#define CPPI5_TR_TRIGGER1_TYPE_MASK GENMASK(15, 14)
+#define CPPI5_TR_CMD_ID_SHIFT (16U)
+#define CPPI5_TR_CMD_ID_MASK GENMASK(23, 16)
+#define CPPI5_TR_CSF_FLAGS_SHIFT (24U)
+#define CPPI5_TR_CSF_FLAGS_MASK GENMASK(31, 24)
+#define CPPI5_TR_CSF_SA_INDIRECT BIT(0)
+#define CPPI5_TR_CSF_DA_INDIRECT BIT(1)
+#define CPPI5_TR_CSF_SUPR_EVT BIT(2)
+#define CPPI5_TR_CSF_EOL_ADV_SHIFT (4U)
+#define CPPI5_TR_CSF_EOL_ADV_MASK GENMASK(6, 4)
+#define CPPI5_TR_CSF_EOL_ICNT0 BIT(4)
+#define CPPI5_TR_CSF_EOP BIT(7)
+
+/**
+ * enum cppi5_tr_types - TR types
+ * @CPPI5_TR_TYPE0: One dimensional data move
+ * @CPPI5_TR_TYPE1: Two dimensional data move
+ * @CPPI5_TR_TYPE2: Three dimensional data move
+ * @CPPI5_TR_TYPE3: Four dimensional data move
+ * @CPPI5_TR_TYPE4: Four dimensional data move with data formatting
+ * @CPPI5_TR_TYPE5: Four dimensional Cache Warm
+ * @CPPI5_TR_TYPE8: Four Dimensional Block Move
+ * @CPPI5_TR_TYPE9: Four Dimensional Block Move with Repacking
+ * @CPPI5_TR_TYPE10: Two Dimensional Block Move
+ * @CPPI5_TR_TYPE11: Two Dimensional Block Move with Repacking
+ * @CPPI5_TR_TYPE15: Four Dimensional Block Move with Repacking and
+ * Indirection
+ */
+enum cppi5_tr_types {
+ CPPI5_TR_TYPE0 = 0,
+ CPPI5_TR_TYPE1,
+ CPPI5_TR_TYPE2,
+ CPPI5_TR_TYPE3,
+ CPPI5_TR_TYPE4,
+ CPPI5_TR_TYPE5,
+ /* type6-7: Reserved */
+ CPPI5_TR_TYPE8 = 8,
+ CPPI5_TR_TYPE9,
+ CPPI5_TR_TYPE10,
+ CPPI5_TR_TYPE11,
+ /* type12-14: Reserved */
+ CPPI5_TR_TYPE15 = 15,
+ CPPI5_TR_TYPE_MAX
+};
+
+/**
+ * enum cppi5_tr_event_size - TR Flags EVENT_SIZE field specifies when an event
+ * is generated for each TR.
+ * @CPPI5_TR_EVENT_SIZE_COMPLETION: When TR is complete and all status for
+ * the TR has been received
+ * @CPPI5_TR_EVENT_SIZE_ICNT1_DEC: Type 0: when the last data transaction
+ * is sent for the TR
+ * Type 1-11: when ICNT1 is decremented
+ * @CPPI5_TR_EVENT_SIZE_ICNT2_DEC: Type 0-1,10-11: when the last data
+ * transaction is sent for the TR
+ * All other types: when ICNT2 is
+ * decremented
+ * @CPPI5_TR_EVENT_SIZE_ICNT3_DEC: Type 0-2,10-11: when the last data
+ * transaction is sent for the TR
+ * All other types: when ICNT3 is
+ * decremented
+ */
+enum cppi5_tr_event_size {
+ CPPI5_TR_EVENT_SIZE_COMPLETION,
+ CPPI5_TR_EVENT_SIZE_ICNT1_DEC,
+ CPPI5_TR_EVENT_SIZE_ICNT2_DEC,
+ CPPI5_TR_EVENT_SIZE_ICNT3_DEC,
+ CPPI5_TR_EVENT_SIZE_MAX
+};
+
+/**
+ * enum cppi5_tr_trigger - TR Flags TRIGGERx field specifies the type of trigger
+ * used to enable the TR to transfer data as specified
+ * by TRIGGERx_TYPE field.
+ * @CPPI5_TR_TRIGGER_NONE: No trigger
+ * @CPPI5_TR_TRIGGER_GLOBAL0: Global trigger 0
+ * @CPPI5_TR_TRIGGER_GLOBAL1: Global trigger 1
+ * @CPPI5_TR_TRIGGER_LOCAL_EVENT: Local Event
+ */
+enum cppi5_tr_trigger {
+ CPPI5_TR_TRIGGER_NONE,
+ CPPI5_TR_TRIGGER_GLOBAL0,
+ CPPI5_TR_TRIGGER_GLOBAL1,
+ CPPI5_TR_TRIGGER_LOCAL_EVENT,
+ CPPI5_TR_TRIGGER_MAX
+};
+
+/**
+ * enum cppi5_tr_trigger_type - TR Flags TRIGGERx_TYPE field specifies the type
+ * of data transfer that will be enabled by
+ * receiving a trigger as specified by TRIGGERx.
+ * @CPPI5_TR_TRIGGER_TYPE_ICNT1_DEC: The second inner most loop (ICNT1) will
+ * be decremented by 1
+ * @CPPI5_TR_TRIGGER_TYPE_ICNT2_DEC: The third inner most loop (ICNT2) will
+ * be decremented by 1
+ * @CPPI5_TR_TRIGGER_TYPE_ICNT3_DEC: The outer most loop (ICNT3) will be
+ * decremented by 1
+ * @CPPI5_TR_TRIGGER_TYPE_ALL: The entire TR will be allowed to
+ * complete
+ */
+enum cppi5_tr_trigger_type {
+ CPPI5_TR_TRIGGER_TYPE_ICNT1_DEC,
+ CPPI5_TR_TRIGGER_TYPE_ICNT2_DEC,
+ CPPI5_TR_TRIGGER_TYPE_ICNT3_DEC,
+ CPPI5_TR_TRIGGER_TYPE_ALL,
+ CPPI5_TR_TRIGGER_TYPE_MAX
+};
+
+typedef u32 cppi5_tr_flags_t;
+
+/**
+ * struct cppi5_tr_type0_t - Type 0 (One dimensional data move) TR (16 byte)
+ * @flags: TR flags (type, triggers, event, configuration)
+ * @icnt0: Total loop iteration count for level 0 (innermost)
+ * @_reserved: Not used
+ * @addr: Starting address for the source data or destination data
+ */
+struct cppi5_tr_type0_t {
+ cppi5_tr_flags_t flags;
+ u16 icnt0;
+ u16 _reserved;
+ u64 addr;
+} __aligned(16) __packed;
+
+/**
+ * struct cppi5_tr_type1_t - Type 1 (Two dimensional data move) TR (32 byte)
+ * @flags: TR flags (type, triggers, event, configuration)
+ * @icnt0: Total loop iteration count for level 0 (innermost)
+ * @icnt1: Total loop iteration count for level 1
+ * @addr: Starting address for the source data or destination data
+ * @dim1: Signed dimension for loop level 1
+ */
+struct cppi5_tr_type1_t {
+ cppi5_tr_flags_t flags;
+ u16 icnt0;
+ u16 icnt1;
+ u64 addr;
+ s32 dim1;
+} __aligned(32) __packed;
+
+/**
+ * struct cppi5_tr_type2_t - Type 2 (Three dimensional data move) TR (32 byte)
+ * @flags: TR flags (type, triggers, event, configuration)
+ * @icnt0: Total loop iteration count for level 0 (innermost)
+ * @icnt1: Total loop iteration count for level 1
+ * @addr: Starting address for the source data or destination data
+ * @dim1: Signed dimension for loop level 1
+ * @icnt2: Total loop iteration count for level 2
+ * @_reserved: Not used
+ * @dim2: Signed dimension for loop level 2
+ */
+struct cppi5_tr_type2_t {
+ cppi5_tr_flags_t flags;
+ u16 icnt0;
+ u16 icnt1;
+ u64 addr;
+ s32 dim1;
+ u16 icnt2;
+ u16 _reserved;
+ s32 dim2;
+} __aligned(32) __packed;
+
+/**
+ * struct cppi5_tr_type3_t - Type 3 (Four dimensional data move) TR (32 byte)
+ * @flags: TR flags (type, triggers, event, configuration)
+ * @icnt0: Total loop iteration count for level 0 (innermost)
+ * @icnt1: Total loop iteration count for level 1
+ * @addr: Starting address for the source data or destination data
+ * @dim1: Signed dimension for loop level 1
+ * @icnt2: Total loop iteration count for level 2
+ * @icnt3: Total loop iteration count for level 3 (outermost)
+ * @dim2: Signed dimension for loop level 2
+ * @dim3: Signed dimension for loop level 3
+ */
+struct cppi5_tr_type3_t {
+ cppi5_tr_flags_t flags;
+ u16 icnt0;
+ u16 icnt1;
+ u64 addr;
+ s32 dim1;
+ u16 icnt2;
+ u16 icnt3;
+ s32 dim2;
+ s32 dim3;
+} __aligned(32) __packed;
+
+/**
+ * struct cppi5_tr_type15_t - Type 15 (Four Dimensional Block Copy with
+ * Repacking and Indirection Support) TR (64 byte)
+ * @flags: TR flags (type, triggers, event, configuration)
+ * @icnt0: Total loop iteration count for level 0 (innermost) for
+ * source
+ * @icnt1: Total loop iteration count for level 1 for source
+ * @addr: Starting address for the source data
+ * @dim1: Signed dimension for loop level 1 for source
+ * @icnt2: Total loop iteration count for level 2 for source
+ * @icnt3: Total loop iteration count for level 3 (outermost) for
+ * source
+ * @dim2: Signed dimension for loop level 2 for source
+ * @dim3: Signed dimension for loop level 3 for source
+ * @_reserved: Not used
+ * @ddim1: Signed dimension for loop level 1 for destination
+ * @daddr: Starting address for the destination data
+ * @ddim2: Signed dimension for loop level 2 for destination
+ * @ddim3: Signed dimension for loop level 3 for destination
+ * @dicnt0: Total loop iteration count for level 0 (innermost) for
+ * destination
+ * @dicnt1: Total loop iteration count for level 1 for destination
+ * @dicnt2: Total loop iteration count for level 2 for destination
+ * @sicnt3: Total loop iteration count for level 3 (outermost) for
+ * destination
+ */
+struct cppi5_tr_type15_t {
+ cppi5_tr_flags_t flags;
+ u16 icnt0;
+ u16 icnt1;
+ u64 addr;
+ s32 dim1;
+ u16 icnt2;
+ u16 icnt3;
+ s32 dim2;
+ s32 dim3;
+ u32 _reserved;
+ s32 ddim1;
+ u64 daddr;
+ s32 ddim2;
+ s32 ddim3;
+ u16 dicnt0;
+ u16 dicnt1;
+ u16 dicnt2;
+ u16 dicnt3;
+} __aligned(64) __packed;
+
+/**
+ * struct cppi5_tr_resp_t - TR response record
+ * @status: Status type and info
+ * @_reserved: Not used
+ * @cmd_id: Command ID for the TR for TR identification
+ * @flags: Configuration Specific Flags
+ */
+struct cppi5_tr_resp_t {
+ u8 status;
+ u8 _reserved;
+ u8 cmd_id;
+ u8 flags;
+} __packed;
+
+#define CPPI5_TR_RESPONSE_STATUS_TYPE_SHIFT (0U)
+#define CPPI5_TR_RESPONSE_STATUS_TYPE_MASK GENMASK(3, 0)
+#define CPPI5_TR_RESPONSE_STATUS_INFO_SHIFT (4U)
+#define CPPI5_TR_RESPONSE_STATUS_INFO_MASK GENMASK(7, 4)
+#define CPPI5_TR_RESPONSE_CMDID_SHIFT (16U)
+#define CPPI5_TR_RESPONSE_CMDID_MASK GENMASK(23, 16)
+#define CPPI5_TR_RESPONSE_CFG_SPECIFIC_SHIFT (24U)
+#define CPPI5_TR_RESPONSE_CFG_SPECIFIC_MASK GENMASK(31, 24)
+
+/**
+ * enum cppi5_tr_resp_status_type - TR Response Status Type field is used to
+ * determine what type of status is being
+ * returned.
+ * @CPPI5_TR_RESPONSE_STATUS_NONE: No error, completion: completed
+ * @CPPI5_TR_RESPONSE_STATUS_TRANSFER_ERR: Transfer Error, completion: none
+ * or partially completed
+ * @CPPI5_TR_RESPONSE_STATUS_ABORTED_ERR: Aborted Error, completion: none
+ * or partially completed
+ * @CPPI5_TR_RESPONSE_STATUS_SUBMISSION_ERR: Submission Error, completion:
+ * none
+ * @CPPI5_TR_RESPONSE_STATUS_UNSUPPORTED_ERR: Unsupported Error, completion:
+ * none
+ * @CPPI5_TR_RESPONSE_STATUS_TRANSFER_EXCEPTION: Transfer Exception, completion:
+ * partially completed
+ * @CPPI5_TR_RESPONSE_STATUS__TEARDOWN_FLUSH: Teardown Flush, completion: none
+ */
+enum cppi5_tr_resp_status_type {
+ CPPI5_TR_RESPONSE_STATUS_NONE,
+ CPPI5_TR_RESPONSE_STATUS_TRANSFER_ERR,
+ CPPI5_TR_RESPONSE_STATUS_ABORTED_ERR,
+ CPPI5_TR_RESPONSE_STATUS_SUBMISSION_ERR,
+ CPPI5_TR_RESPONSE_STATUS_UNSUPPORTED_ERR,
+ CPPI5_TR_RESPONSE_STATUS_TRANSFER_EXCEPTION,
+ CPPI5_TR_RESPONSE_STATUS__TEARDOWN_FLUSH,
+ CPPI5_TR_RESPONSE_STATUS_MAX
+};
+
+/**
+ * enum cppi5_tr_resp_status_submission - TR Response Status field values which
+ * corresponds Submission Error
+ * @CPPI5_TR_RESPONSE_STATUS_SUBMISSION_ICNT0: ICNT0 was 0
+ * @CPPI5_TR_RESPONSE_STATUS_SUBMISSION_FIFO_FULL: Channel FIFO was full when TR
+ * received
+ * @CPPI5_TR_RESPONSE_STATUS_SUBMISSION_OWN: Channel is not owned by the
+ * submitter
+ */
+enum cppi5_tr_resp_status_submission {
+ CPPI5_TR_RESPONSE_STATUS_SUBMISSION_ICNT0,
+ CPPI5_TR_RESPONSE_STATUS_SUBMISSION_FIFO_FULL,
+ CPPI5_TR_RESPONSE_STATUS_SUBMISSION_OWN,
+ CPPI5_TR_RESPONSE_STATUS_SUBMISSION_MAX
+};
+
+/**
+ * enum cppi5_tr_resp_status_unsupported - TR Response Status field values which
+ * corresponds Unsupported Error
+ * @CPPI5_TR_RESPONSE_STATUS_UNSUPPORTED_TR_TYPE: TR Type not supported
+ * @CPPI5_TR_RESPONSE_STATUS_UNSUPPORTED_STATIC: STATIC not supported
+ * @CPPI5_TR_RESPONSE_STATUS_UNSUPPORTED_EOL: EOL not supported
+ * @CPPI5_TR_RESPONSE_STATUS_UNSUPPORTED_CFG_SPECIFIC: CONFIGURATION SPECIFIC
+ * not supported
+ * @CPPI5_TR_RESPONSE_STATUS_UNSUPPORTED_AMODE: AMODE not supported
+ * @CPPI5_TR_RESPONSE_STATUS_UNSUPPORTED_ELTYPE: ELTYPE not supported
+ * @CPPI5_TR_RESPONSE_STATUS_UNSUPPORTED_DFMT: DFMT not supported
+ * @CPPI5_TR_RESPONSE_STATUS_UNSUPPORTED_SECTR: SECTR not supported
+ * @CPPI5_TR_RESPONSE_STATUS_UNSUPPORTED_AMODE_SPECIFIC: AMODE SPECIFIC field
+ * not supported
+ */
+enum cppi5_tr_resp_status_unsupported {
+ CPPI5_TR_RESPONSE_STATUS_UNSUPPORTED_TR_TYPE,
+ CPPI5_TR_RESPONSE_STATUS_UNSUPPORTED_STATIC,
+ CPPI5_TR_RESPONSE_STATUS_UNSUPPORTED_EOL,
+ CPPI5_TR_RESPONSE_STATUS_UNSUPPORTED_CFG_SPECIFIC,
+ CPPI5_TR_RESPONSE_STATUS_UNSUPPORTED_AMODE,
+ CPPI5_TR_RESPONSE_STATUS_UNSUPPORTED_ELTYPE,
+ CPPI5_TR_RESPONSE_STATUS_UNSUPPORTED_DFMT,
+ CPPI5_TR_RESPONSE_STATUS_UNSUPPORTED_SECTR,
+ CPPI5_TR_RESPONSE_STATUS_UNSUPPORTED_AMODE_SPECIFIC,
+ CPPI5_TR_RESPONSE_STATUS_UNSUPPORTED_MAX
+};
+
+/**
+ * cppi5_trdesc_calc_size - Calculate TR Descriptor size
+ * @tr_count: number of TR records
+ * @tr_size: Nominal size of TR record (max) [16, 32, 64, 128]
+ *
+ * Returns required TR Descriptor size
+ */
+static inline size_t cppi5_trdesc_calc_size(u32 tr_count, u32 tr_size)
+{
+ /*
+ * The Size of a TR descriptor is:
+ * 1 x tr_size : the first 16 bytes is used by the packet info block +
+ * tr_count x tr_size : Transfer Request Records +
+ * tr_count x sizeof(struct cppi5_tr_resp_t) : Transfer Response Records
+ */
+ return tr_size * (tr_count + 1) +
+ sizeof(struct cppi5_tr_resp_t) * tr_count;
+}
+
+/**
+ * cppi5_trdesc_init - Init TR Descriptor
+ * @desc: TR Descriptor
+ * @tr_count: number of TR records
+ * @tr_size: Nominal size of TR record (max) [16, 32, 64, 128]
+ * @reload_idx: Absolute index to jump to on the 2nd and following passes
+ * through the TR packet.
+ * @reload_count: Number of times to jump from last entry to reload_idx. 0x1ff
+ * indicates infinite looping.
+ *
+ * Init TR Descriptor
+ */
+static inline void cppi5_trdesc_init(struct cppi5_desc_hdr_t *desc_hdr,
+ u32 tr_count, u32 tr_size, u32 reload_idx,
+ u32 reload_count)
+{
+ desc_hdr->pkt_info0 = CPPI5_INFO0_DESC_TYPE_VAL_TR <<
+ CPPI5_INFO0_HDESC_TYPE_SHIFT;
+ desc_hdr->pkt_info0 |=
+ (reload_count << CPPI5_INFO0_TRDESC_RLDCNT_SHIFT) &
+ CPPI5_INFO0_TRDESC_RLDCNT_MASK;
+ desc_hdr->pkt_info0 |=
+ (reload_idx << CPPI5_INFO0_TRDESC_RLDIDX_SHIFT) &
+ CPPI5_INFO0_TRDESC_RLDIDX_MASK;
+ desc_hdr->pkt_info0 |= (tr_count - 1) & CPPI5_INFO0_TRDESC_LASTIDX_MASK;
+
+ desc_hdr->pkt_info1 |= ((ffs(tr_size >> 4) - 1) <<
+ CPPI5_INFO1_TRDESC_RECSIZE_SHIFT) &
+ CPPI5_INFO1_TRDESC_RECSIZE_MASK;
+}
+
+/**
+ * cppi5_tr_init - Init TR record
+ * @flags: Pointer to the TR's flags
+ * @type: TR type
+ * @static_tr: TR is static
+ * @wait: Wait for TR completion before allow the next TR to start
+ * @event_size: output event generation cfg
+ * @cmd_id: TR identifier (application specifics)
+ *
+ * Init TR record
+ */
+static inline void cppi5_tr_init(cppi5_tr_flags_t *flags,
+ enum cppi5_tr_types type, bool static_tr,
+ bool wait, enum cppi5_tr_event_size event_size,
+ u32 cmd_id)
+{
+ *flags = type;
+ *flags |= (event_size << CPPI5_TR_EVENT_SIZE_SHIFT) &
+ CPPI5_TR_EVENT_SIZE_MASK;
+
+ *flags |= (cmd_id << CPPI5_TR_CMD_ID_SHIFT) &
+ CPPI5_TR_CMD_ID_MASK;
+
+ if (static_tr && (type == CPPI5_TR_TYPE8 || type == CPPI5_TR_TYPE9))
+ *flags |= CPPI5_TR_STATIC;
+
+ if (wait)
+ *flags |= CPPI5_TR_WAIT;
+}
+
+/**
+ * cppi5_tr_set_trigger - Configure trigger0/1 and trigger0/1_type
+ * @flags: Pointer to the TR's flags
+ * @trigger0: trigger0 selection
+ * @trigger0_type: type of data transfer that will be enabled by trigger0
+ * @trigger1: trigger1 selection
+ * @trigger1_type: type of data transfer that will be enabled by trigger1
+ *
+ * Configure the triggers for the TR
+ */
+static inline void cppi5_tr_set_trigger(cppi5_tr_flags_t *flags,
+ enum cppi5_tr_trigger trigger0,
+ enum cppi5_tr_trigger_type trigger0_type,
+ enum cppi5_tr_trigger trigger1,
+ enum cppi5_tr_trigger_type trigger1_type)
+{
+ *flags &= ~(CPPI5_TR_TRIGGER0_MASK | CPPI5_TR_TRIGGER0_TYPE_MASK |
+ CPPI5_TR_TRIGGER1_MASK | CPPI5_TR_TRIGGER1_TYPE_MASK);
+ *flags |= (trigger0 << CPPI5_TR_TRIGGER0_SHIFT) &
+ CPPI5_TR_TRIGGER0_MASK;
+ *flags |= (trigger0_type << CPPI5_TR_TRIGGER0_TYPE_SHIFT) &
+ CPPI5_TR_TRIGGER0_TYPE_MASK;
+
+ *flags |= (trigger1 << CPPI5_TR_TRIGGER1_SHIFT) &
+ CPPI5_TR_TRIGGER1_MASK;
+ *flags |= (trigger1_type << CPPI5_TR_TRIGGER1_TYPE_SHIFT) &
+ CPPI5_TR_TRIGGER1_TYPE_MASK;
+}
+
+/**
+ * cppi5_tr_cflag_set - Update the Configuration specific flags
+ * @flags: Pointer to the TR's flags
+ * @csf: Configuration specific flags
+ *
+ * Set a bit in Configuration Specific Flags section of the TR flags.
+ */
+static inline void cppi5_tr_csf_set(cppi5_tr_flags_t *flags, u32 csf)
+{
+ *flags &= ~CPPI5_TR_CSF_FLAGS_MASK;
+ *flags |= (csf << CPPI5_TR_CSF_FLAGS_SHIFT) &
+ CPPI5_TR_CSF_FLAGS_MASK;
+}
+
+#endif /* __TI_CPPI5_H__ */
diff --git a/include/linux/dma/xilinx_dma.h b/include/linux/dma/xilinx_dma.h
index 3ae300052553..0dde1a46ab75 100644
--- a/include/linux/dma/xilinx_dma.h
+++ b/include/linux/dma/xilinx_dma.h
@@ -1,12 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* Xilinx DMA Engine drivers support header file
*
* Copyright (C) 2010-2014 Xilinx, Inc. All rights reserved.
- *
- * This is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
*/
#ifndef __DMA_XILINX_DMA_H
@@ -27,6 +23,7 @@
* @delay: Delay counter
* @reset: Reset Channel
* @ext_fsync: External Frame Sync source
+ * @vflip_en: Vertical Flip enable
*/
struct xilinx_vdma_config {
int frm_dly;
@@ -39,20 +36,7 @@ struct xilinx_vdma_config {
int delay;
int reset;
int ext_fsync;
-};
-
-/**
- * enum xdma_ip_type: DMA IP type.
- *
- * XDMA_TYPE_AXIDMA: Axi dma ip.
- * XDMA_TYPE_CDMA: Axi cdma ip.
- * XDMA_TYPE_VDMA: Axi vdma ip.
- *
- */
-enum xdma_ip_type {
- XDMA_TYPE_AXIDMA = 0,
- XDMA_TYPE_CDMA,
- XDMA_TYPE_VDMA,
+ bool vflip_en;
};
int xilinx_vdma_channel_set_config(struct dma_chan *dchan,
diff --git a/include/linux/dma/xilinx_dpdma.h b/include/linux/dma/xilinx_dpdma.h
new file mode 100644
index 000000000000..02a4adf8921b
--- /dev/null
+++ b/include/linux/dma/xilinx_dpdma.h
@@ -0,0 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __LINUX_DMA_XILINX_DPDMA_H
+#define __LINUX_DMA_XILINX_DPDMA_H
+
+#include <linux/types.h>
+
+struct xilinx_dpdma_peripheral_config {
+ bool video_group;
+};
+
+#endif /* __LINUX_DMA_XILINX_DPDMA_H */
diff --git a/include/linux/dma_remapping.h b/include/linux/dma_remapping.h
deleted file mode 100644
index 90884072fa73..000000000000
--- a/include/linux/dma_remapping.h
+++ /dev/null
@@ -1,57 +0,0 @@
-#ifndef _DMA_REMAPPING_H
-#define _DMA_REMAPPING_H
-
-/*
- * VT-d hardware uses 4KiB page size regardless of host page size.
- */
-#define VTD_PAGE_SHIFT (12)
-#define VTD_PAGE_SIZE (1UL << VTD_PAGE_SHIFT)
-#define VTD_PAGE_MASK (((u64)-1) << VTD_PAGE_SHIFT)
-#define VTD_PAGE_ALIGN(addr) (((addr) + VTD_PAGE_SIZE - 1) & VTD_PAGE_MASK)
-
-#define VTD_STRIDE_SHIFT (9)
-#define VTD_STRIDE_MASK (((u64)-1) << VTD_STRIDE_SHIFT)
-
-#define DMA_PTE_READ (1)
-#define DMA_PTE_WRITE (2)
-#define DMA_PTE_LARGE_PAGE (1 << 7)
-#define DMA_PTE_SNP (1 << 11)
-
-#define CONTEXT_TT_MULTI_LEVEL 0
-#define CONTEXT_TT_DEV_IOTLB 1
-#define CONTEXT_TT_PASS_THROUGH 2
-/* Extended context entry types */
-#define CONTEXT_TT_PT_PASID 4
-#define CONTEXT_TT_PT_PASID_DEV_IOTLB 5
-#define CONTEXT_TT_MASK (7ULL << 2)
-
-#define CONTEXT_DINVE (1ULL << 8)
-#define CONTEXT_PRS (1ULL << 9)
-#define CONTEXT_PASIDE (1ULL << 11)
-
-struct intel_iommu;
-struct dmar_domain;
-struct root_entry;
-
-
-#ifdef CONFIG_INTEL_IOMMU
-extern int iommu_calculate_agaw(struct intel_iommu *iommu);
-extern int iommu_calculate_max_sagaw(struct intel_iommu *iommu);
-extern int dmar_disabled;
-extern int intel_iommu_enabled;
-extern int intel_iommu_tboot_noforce;
-#else
-static inline int iommu_calculate_agaw(struct intel_iommu *iommu)
-{
- return 0;
-}
-static inline int iommu_calculate_max_sagaw(struct intel_iommu *iommu)
-{
- return 0;
-}
-#define dmar_disabled (1)
-#define intel_iommu_enabled (0)
-#endif
-
-
-#endif
diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h
index 533680860865..99efe2b9b4ea 100644
--- a/include/linux/dmaengine.h
+++ b/include/linux/dmaengine.h
@@ -1,18 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* Copyright(c) 2004 - 2006 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the Free
- * Software Foundation; either version 2 of the License, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called COPYING.
*/
#ifndef LINUX_DMAENGINE_H
#define LINUX_DMAENGINE_H
@@ -51,6 +39,7 @@ enum dma_status {
DMA_IN_PROGRESS,
DMA_PAUSED,
DMA_ERROR,
+ DMA_OUT_OF_ORDER,
};
/**
@@ -68,12 +57,14 @@ enum dma_transaction_type {
DMA_MEMSET,
DMA_MEMSET_SG,
DMA_INTERRUPT,
- DMA_SG,
DMA_PRIVATE,
DMA_ASYNC_TX,
DMA_SLAVE,
DMA_CYCLIC,
DMA_INTERLEAVE,
+ DMA_COMPLETION_NO_ORDER,
+ DMA_REPEAT,
+ DMA_LOAD_EOT,
/* last transaction type for creation of the capabilities mask */
DMA_TX_TYPE_END,
};
@@ -93,12 +84,12 @@ enum dma_transfer_direction {
DMA_TRANS_NONE,
};
-/**
+/*
* Interleaved Transfer Request
* ----------------------------
- * A chunk is collection of contiguous bytes to be transfered.
+ * A chunk is collection of contiguous bytes to be transferred.
* The gap(in bytes) between two chunks is called inter-chunk-gap(ICG).
- * ICGs may or maynot change between chunks.
+ * ICGs may or may not change between chunks.
* A FRAME is the smallest series of contiguous {chunk,icg} pairs,
* that when repeated an integral number of times, specifies the transfer.
* A transfer template is specification of a Frame, the number of times
@@ -166,7 +157,17 @@ struct dma_interleaved_template {
bool dst_sgl;
size_t numf;
size_t frame_size;
- struct data_chunk sgl[0];
+ struct data_chunk sgl[];
+};
+
+/**
+ * struct dma_vec - DMA vector
+ * @addr: Bus address of the start of the vector
+ * @len: Length in bytes of the DMA vector
+ */
+struct dma_vec {
+ dma_addr_t addr;
+ size_t len;
};
/**
@@ -175,7 +176,7 @@ struct dma_interleaved_template {
* @DMA_PREP_INTERRUPT - trigger an interrupt (callback) upon completion of
* this transaction
* @DMA_CTRL_ACK - if clear, the descriptor cannot be reused until the client
- * acknowledges receipt, i.e. has has a chance to establish any dependency
+ * acknowledges receipt, i.e. has a chance to establish any dependency
* chains
* @DMA_PREP_PQ_DISABLE_P - prevent generation of P while generating Q
* @DMA_PREP_PQ_DISABLE_Q - prevent generation of Q while generating P
@@ -186,6 +187,19 @@ struct dma_interleaved_template {
* on the result of this operation
* @DMA_CTRL_REUSE: client can reuse the descriptor and submit again till
* cleared or freed
+ * @DMA_PREP_CMD: tell the driver that the data passed to DMA API is command
+ * data and the descriptor should be in different format from normal
+ * data descriptors.
+ * @DMA_PREP_REPEAT: tell the driver that the transaction shall be automatically
+ * repeated when it ends until a transaction is issued on the same channel
+ * with the DMA_PREP_LOAD_EOT flag set. This flag is only applicable to
+ * interleaved transactions and is ignored for all other transaction types.
+ * @DMA_PREP_LOAD_EOT: tell the driver that the transaction shall replace any
+ * active repeated (as indicated by DMA_PREP_REPEAT) transaction when the
+ * repeated transaction ends. Not setting this flag when the previously queued
+ * transaction is marked with DMA_PREP_REPEAT will cause the new transaction
+ * to never be processed and stay in the issued queue forever. The flag is
+ * ignored if the previous transaction is not a repeated transaction.
*/
enum dma_ctrl_flags {
DMA_PREP_INTERRUPT = (1 << 0),
@@ -195,6 +209,9 @@ enum dma_ctrl_flags {
DMA_PREP_CONTINUE = (1 << 4),
DMA_PREP_FENCE = (1 << 5),
DMA_CTRL_REUSE = (1 << 6),
+ DMA_PREP_CMD = (1 << 7),
+ DMA_PREP_REPEAT = (1 << 8),
+ DMA_PREP_LOAD_EOT = (1 << 9),
};
/**
@@ -206,7 +223,7 @@ enum sum_check_bits {
};
/**
- * enum pq_check_flags - result of async_{xor,pq}_zero_sum operations
+ * enum sum_check_flags - result of async_{xor,pq}_zero_sum operations
* @SUM_CHECK_P_RESULT - 1 if xor zero sum error, 0 otherwise
* @SUM_CHECK_Q_RESULT - 1 if reed-solomon zero sum error, 0 otherwise
*/
@@ -223,11 +240,66 @@ enum sum_check_flags {
typedef struct { DECLARE_BITMAP(bits, DMA_TX_TYPE_END); } dma_cap_mask_t;
/**
+ * enum dma_desc_metadata_mode - per descriptor metadata mode types supported
+ * @DESC_METADATA_CLIENT - the metadata buffer is allocated/provided by the
+ * client driver and it is attached (via the dmaengine_desc_attach_metadata()
+ * helper) to the descriptor.
+ *
+ * Client drivers interested to use this mode can follow:
+ * - DMA_MEM_TO_DEV / DEV_MEM_TO_MEM:
+ * 1. prepare the descriptor (dmaengine_prep_*)
+ * construct the metadata in the client's buffer
+ * 2. use dmaengine_desc_attach_metadata() to attach the buffer to the
+ * descriptor
+ * 3. submit the transfer
+ * - DMA_DEV_TO_MEM:
+ * 1. prepare the descriptor (dmaengine_prep_*)
+ * 2. use dmaengine_desc_attach_metadata() to attach the buffer to the
+ * descriptor
+ * 3. submit the transfer
+ * 4. when the transfer is completed, the metadata should be available in the
+ * attached buffer
+ *
+ * @DESC_METADATA_ENGINE - the metadata buffer is allocated/managed by the DMA
+ * driver. The client driver can ask for the pointer, maximum size and the
+ * currently used size of the metadata and can directly update or read it.
+ * dmaengine_desc_get_metadata_ptr() and dmaengine_desc_set_metadata_len() is
+ * provided as helper functions.
+ *
+ * Note: the metadata area for the descriptor is no longer valid after the
+ * transfer has been completed (valid up to the point when the completion
+ * callback returns if used).
+ *
+ * Client drivers interested to use this mode can follow:
+ * - DMA_MEM_TO_DEV / DEV_MEM_TO_MEM:
+ * 1. prepare the descriptor (dmaengine_prep_*)
+ * 2. use dmaengine_desc_get_metadata_ptr() to get the pointer to the engine's
+ * metadata area
+ * 3. update the metadata at the pointer
+ * 4. use dmaengine_desc_set_metadata_len() to tell the DMA engine the amount
+ * of data the client has placed into the metadata buffer
+ * 5. submit the transfer
+ * - DMA_DEV_TO_MEM:
+ * 1. prepare the descriptor (dmaengine_prep_*)
+ * 2. submit the transfer
+ * 3. on transfer completion, use dmaengine_desc_get_metadata_ptr() to get the
+ * pointer to the engine's metadata area
+ * 4. Read out the metadata from the pointer
+ *
+ * Warning: the two modes are not compatible and clients must use one mode for a
+ * descriptor.
+ */
+enum dma_desc_metadata_mode {
+ DESC_METADATA_NONE = 0,
+ DESC_METADATA_CLIENT = BIT(0),
+ DESC_METADATA_ENGINE = BIT(1),
+};
+
+/**
* struct dma_chan_percpu - the per-CPU part of struct dma_chan
* @memcpy_count: transaction counter
* @bytes_transferred: byte counter
*/
-
struct dma_chan_percpu {
/* stats */
unsigned long memcpy_count;
@@ -247,10 +319,14 @@ struct dma_router {
/**
* struct dma_chan - devices supply DMA channels, clients use them
* @device: ptr to the dma device who supplies this channel, always !%NULL
+ * @slave: ptr to the device using this channel
* @cookie: last cookie value returned to client
* @completed_cookie: last completed cookie for this channel
* @chan_id: channel ID for sysfs
* @dev: class device for sysfs
+ * @name: backlink name for sysfs
+ * @dbg_client_name: slave name for debugfs in format:
+ * dev_name(requester's dev):channel name, for example: "2b00000.mcasp:tx"
* @device_node: used to add this to the device chan list
* @local: per-cpu pointer to a struct dma_chan_percpu
* @client_count: how many clients are using this channel
@@ -261,12 +337,17 @@ struct dma_router {
*/
struct dma_chan {
struct dma_device *device;
+ struct device *slave;
dma_cookie_t cookie;
dma_cookie_t completed_cookie;
/* sysfs */
int chan_id;
struct dma_chan_dev *dev;
+ const char *name;
+#ifdef CONFIG_DEBUG_FS
+ char *dbg_client_name;
+#endif
struct list_head device_node;
struct dma_chan_percpu __percpu *local;
@@ -285,13 +366,14 @@ struct dma_chan {
* @chan: driver channel device
* @device: sysfs device
* @dev_id: parent dma_device dev_id
- * @idr_ref: reference count to gate release of dma_device dev_id
+ * @chan_dma_dev: The channel is using custom/different dma-mapping
+ * compared to the parent dma_device
*/
struct dma_chan_dev {
struct dma_chan *chan;
struct device device;
int dev_id;
- atomic_t *idr_ref;
+ bool chan_dma_dev;
};
/**
@@ -308,6 +390,7 @@ enum dma_slave_buswidth {
DMA_SLAVE_BUSWIDTH_16_BYTES = 16,
DMA_SLAVE_BUSWIDTH_32_BYTES = 32,
DMA_SLAVE_BUSWIDTH_64_BYTES = 64,
+ DMA_SLAVE_BUSWIDTH_128_BYTES = 128,
};
/**
@@ -321,12 +404,12 @@ enum dma_slave_buswidth {
* should be read (RX), if the source is memory this argument is
* ignored.
* @dst_addr: this is the physical address where DMA slave data
- * should be written (TX), if the source is memory this argument
+ * should be written (TX), if the destination is memory this argument
* is ignored.
* @src_addr_width: this is the width in bytes of the source (RX)
* register where DMA data shall be read. If the source
* is memory this may be ignored depending on architecture.
- * Legal values: 1, 2, 4, 8.
+ * Legal values: 1, 2, 3, 4, 8, 16, 32, 64, 128.
* @dst_addr_width: same as src_addr_width but for destination
* target (TX) mutatis mutandis.
* @src_maxburst: the maximum number of words (note: words, as in
@@ -345,9 +428,9 @@ enum dma_slave_buswidth {
* @device_fc: Flow Controller Settings. Only valid for slave channels. Fill
* with 'true' if peripheral should be flow controller. Direction will be
* selected at Runtime.
- * @slave_id: Slave requester id. Only valid for slave channels. The dma
- * slave peripheral will have unique id as dma requester which need to be
- * pass as slave config.
+ * @peripheral_config: peripheral configuration for programming peripheral
+ * for dmaengine transfer
+ * @peripheral_size: peripheral configuration buffer size
*
* This struct is passed in as configuration data to a DMA engine
* in order to set up a certain channel for DMA transport at runtime.
@@ -372,7 +455,8 @@ struct dma_slave_config {
u32 src_port_window_size;
u32 dst_port_window_size;
bool device_fc;
- unsigned int slave_id;
+ void *peripheral_config;
+ size_t peripheral_size;
};
/**
@@ -401,16 +485,24 @@ enum dma_residue_granularity {
DMA_RESIDUE_GRANULARITY_BURST = 2,
};
-/* struct dma_slave_caps - expose capabilities of a slave channel only
- *
- * @src_addr_widths: bit mask of src addr widths the channel supports
- * @dst_addr_widths: bit mask of dstn addr widths the channel supports
- * @directions: bit mask of slave direction the channel supported
- * since the enum dma_transfer_direction is not defined as bits for each
- * type of direction, the dma controller should fill (1 << <TYPE>) and same
- * should be checked by controller as well
+/**
+ * struct dma_slave_caps - expose capabilities of a slave channel only
+ * @src_addr_widths: bit mask of src addr widths the channel supports.
+ * Width is specified in bytes, e.g. for a channel supporting
+ * a width of 4 the mask should have BIT(4) set.
+ * @dst_addr_widths: bit mask of dst addr widths the channel supports
+ * @directions: bit mask of slave directions the channel supports.
+ * Since the enum dma_transfer_direction is not defined as bit flag for
+ * each type, the dma controller should set BIT(<TYPE>) and same
+ * should be checked by controller as well
+ * @min_burst: min burst capability per-transfer
* @max_burst: max burst capability per-transfer
- * @cmd_pause: true, if pause and thereby resume is supported
+ * @max_sg_burst: max number of SG list entries executed in a single burst
+ * DMA tansaction with no software intervention for reinitialization.
+ * Zero value means unlimited number of entries.
+ * @cmd_pause: true, if pause is supported (i.e. for reading residue or
+ * for resume later)
+ * @cmd_resume: true, if resume is supported
* @cmd_terminate: true, if terminate cmd is supported
* @residue_granularity: granularity of the reported transfer residue
* @descriptor_reuse: if a descriptor can be reused by client and
@@ -420,8 +512,11 @@ struct dma_slave_caps {
u32 src_addr_widths;
u32 dst_addr_widths;
u32 directions;
+ u32 min_burst;
u32 max_burst;
+ u32 max_sg_burst;
bool cmd_pause;
+ bool cmd_resume;
bool cmd_terminate;
enum dma_residue_granularity residue_granularity;
bool descriptor_reuse;
@@ -432,8 +527,6 @@ static inline const char *dma_chan_name(struct dma_chan *chan)
return dev_name(&chan->dev->device);
}
-void dma_chan_cleanup(struct kref *kref);
-
/**
* typedef dma_filter_fn - callback filter for dma_request_channel
* @chan: channel to be reviewed
@@ -465,14 +558,30 @@ typedef void (*dma_async_tx_callback_result)(void *dma_async_param,
const struct dmaengine_result *result);
struct dmaengine_unmap_data {
+#if IS_ENABLED(CONFIG_DMA_ENGINE_RAID)
+ u16 map_cnt;
+#else
u8 map_cnt;
+#endif
u8 to_cnt;
u8 from_cnt;
u8 bidi_cnt;
struct device *dev;
struct kref kref;
size_t len;
- dma_addr_t addr[0];
+ dma_addr_t addr[];
+};
+
+struct dma_async_tx_descriptor;
+
+struct dma_descriptor_metadata_ops {
+ int (*attach)(struct dma_async_tx_descriptor *desc, void *data,
+ size_t len);
+
+ void *(*get_ptr)(struct dma_async_tx_descriptor *desc,
+ size_t *payload_len, size_t *max_len);
+ int (*set_len)(struct dma_async_tx_descriptor *desc,
+ size_t payload_len);
};
/**
@@ -481,13 +590,22 @@ struct dmaengine_unmap_data {
* @cookie: tracking cookie for this transaction, set to -EBUSY if
* this tx is sitting on a dependency list
* @flags: flags to augment operation preparation, control completion, and
- * communicate status
+ * communicate status
* @phys: physical address of the descriptor
* @chan: target channel for this operation
* @tx_submit: accept the descriptor, assign ordered cookie and mark the
- * descriptor pending. To be pushed on .issue_pending() call
+ * descriptor pending. To be pushed on .issue_pending() call
+ * @desc_free: driver's callback function to free a resusable descriptor
+ * after completion
* @callback: routine to call after this operation is complete
+ * @callback_result: error result from a DMA transaction
* @callback_param: general parameter to pass to the callback routine
+ * @unmap: hook for generic DMA unmap data
+ * @desc_metadata_mode: core managed metadata mode to protect mixed use of
+ * DESC_METADATA_CLIENT or DESC_METADATA_ENGINE. Otherwise
+ * DESC_METADATA_NONE
+ * @metadata_ops: DMA driver provided metadata mode ops, need to be set by the
+ * DMA driver if metadata mode is supported with the descriptor
* ---async_tx api specific fields---
* @next: at completion submit this descriptor
* @parent: pointer to the next level up in the dependency chain
@@ -504,6 +622,8 @@ struct dma_async_tx_descriptor {
dma_async_tx_callback_result callback_result;
void *callback_param;
struct dmaengine_unmap_data *unmap;
+ enum dma_desc_metadata_mode desc_metadata_mode;
+ struct dma_descriptor_metadata_ops *metadata_ops;
#ifdef CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH
struct dma_async_tx_descriptor *next;
struct dma_async_tx_descriptor *parent;
@@ -539,10 +659,11 @@ static inline void dmaengine_unmap_put(struct dmaengine_unmap_data *unmap)
static inline void dma_descriptor_unmap(struct dma_async_tx_descriptor *tx)
{
- if (tx->unmap) {
- dmaengine_unmap_put(tx->unmap);
- tx->unmap = NULL;
- }
+ if (!tx->unmap)
+ return;
+
+ dmaengine_unmap_put(tx->unmap);
+ tx->unmap = NULL;
}
#ifndef CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH
@@ -611,11 +732,13 @@ static inline struct dma_async_tx_descriptor *txd_next(struct dma_async_tx_descr
* @residue: the remaining number of bytes left to transmit
* on the selected transfer for states DMA_IN_PROGRESS and
* DMA_PAUSED if this is implemented in the driver, else 0
+ * @in_flight_bytes: amount of data in bytes cached by the DMA.
*/
struct dma_tx_state {
dma_cookie_t last;
dma_cookie_t used;
u32 residue;
+ u32 in_flight_bytes;
};
/**
@@ -630,6 +753,8 @@ enum dmaengine_alignment {
DMAENGINE_ALIGN_16_BYTES = 4,
DMAENGINE_ALIGN_32_BYTES = 5,
DMAENGINE_ALIGN_64_BYTES = 6,
+ DMAENGINE_ALIGN_128_BYTES = 7,
+ DMAENGINE_ALIGN_256_BYTES = 8,
};
/**
@@ -660,12 +785,14 @@ struct dma_filter {
/**
* struct dma_device - info on the entity supplying DMA services
+ * @ref: reference is taken and put every time a channel is allocated or freed
* @chancnt: how many DMA channels are supported
* @privatecnt: how many DMA channels are requested by dma_request_channel
* @channels: the list of struct dma_chan
* @global_node: list_head for global dma_device_list
* @filter: information for device/slave to filter function/param mapping
* @cap_mask: one or more dma_capability flags
+ * @desc_metadata_modes: supported metadata modes by the DMA device
* @max_xor: maximum number of xor sources, 0 if no capability
* @max_pq: maximum number of PQ sources and PQ-continue capability
* @copy_align: alignment shift for memcpy operations
@@ -674,17 +801,27 @@ struct dma_filter {
* @fill_align: alignment shift for memset operations
* @dev_id: unique device ID
* @dev: struct device reference for dma mapping api
+ * @owner: owner module (automatically set based on the provided dev)
+ * @chan_ida: unique channel ID
* @src_addr_widths: bit mask of src addr widths the device supports
+ * Width is specified in bytes, e.g. for a device supporting
+ * a width of 4 the mask should have BIT(4) set.
* @dst_addr_widths: bit mask of dst addr widths the device supports
- * @directions: bit mask of slave direction the device supports since
- * the enum dma_transfer_direction is not defined as bits for
- * each type of direction, the dma controller should fill (1 <<
- * <TYPE>) and same should be checked by controller as well
+ * @directions: bit mask of slave directions the device supports.
+ * Since the enum dma_transfer_direction is not defined as bit flag for
+ * each type, the dma controller should set BIT(<TYPE>) and same
+ * should be checked by controller as well
+ * @min_burst: min burst capability per-transfer
* @max_burst: max burst capability per-transfer
+ * @max_sg_burst: max number of SG list entries executed in a single burst
+ * DMA tansaction with no software intervention for reinitialization.
+ * Zero value means unlimited number of entries.
+ * @descriptor_reuse: a submitted transfer can be resubmitted after completion
* @residue_granularity: granularity of the transfer residue reported
* by tx_status
* @device_alloc_chan_resources: allocate resources and return the
* number of allocated descriptors
+ * @device_router_config: optional callback for DMA router configuration
* @device_free_chan_resources: release DMA channel's resources
* @device_prep_dma_memcpy: prepares a memcpy operation
* @device_prep_dma_xor: prepares a xor operation
@@ -694,12 +831,16 @@ struct dma_filter {
* @device_prep_dma_memset: prepares a memset operation
* @device_prep_dma_memset_sg: prepares a memset operation over a scatter list
* @device_prep_dma_interrupt: prepares an end of chain interrupt operation
+ * @device_prep_peripheral_dma_vec: prepares a scatter-gather DMA transfer,
+ * where the address and size of each segment is located in one entry of
+ * the dma_vec array.
* @device_prep_slave_sg: prepares a slave dma operation
* @device_prep_dma_cyclic: prepare a cyclic dma operation suitable for audio.
* The function takes a buffer of size buf_len. The callback function will
* be called after period_len bytes have been transferred.
* @device_prep_interleaved_dma: Transfer expression in a generic way.
- * @device_prep_dma_imm_data: DMA's 8 byte immediate data to the dst address
+ * @device_caps: May be used to override the generic DMA slave capabilities
+ * with per-channel specific ones
* @device_config: Pushes a new configuration to a channel, return 0 or an error
* code
* @device_pause: Pauses any transfer happening on a channel. Returns
@@ -715,16 +856,24 @@ struct dma_filter {
* struct with auxiliary transfer status information, otherwise the call
* will just return a simple status code
* @device_issue_pending: push pending transactions to hardware
- * @descriptor_reuse: a submitted transfer can be resubmitted after completion
+ * @device_release: called sometime atfer dma_async_device_unregister() is
+ * called and there are no further references to this structure. This
+ * must be implemented to free resources however many existing drivers
+ * do not and are therefore not safe to unbind while in use.
+ * @dbg_summary_show: optional routine to show contents in debugfs; default code
+ * will be used when this is omitted, but custom code can show extra,
+ * controller specific information.
+ * @dbg_dev_root: the root folder in debugfs for this device
*/
struct dma_device {
-
+ struct kref ref;
unsigned int chancnt;
unsigned int privatecnt;
struct list_head channels;
struct list_head global_node;
struct dma_filter filter;
- dma_cap_mask_t cap_mask;
+ dma_cap_mask_t cap_mask;
+ enum dma_desc_metadata_mode desc_metadata_modes;
unsigned short max_xor;
unsigned short max_pq;
enum dmaengine_alignment copy_align;
@@ -735,15 +884,20 @@ struct dma_device {
int dev_id;
struct device *dev;
+ struct module *owner;
+ struct ida chan_ida;
u32 src_addr_widths;
u32 dst_addr_widths;
u32 directions;
+ u32 min_burst;
u32 max_burst;
+ u32 max_sg_burst;
bool descriptor_reuse;
enum dma_residue_granularity residue_granularity;
int (*device_alloc_chan_resources)(struct dma_chan *chan);
+ int (*device_router_config)(struct dma_chan *chan);
void (*device_free_chan_resources)(struct dma_chan *chan);
struct dma_async_tx_descriptor *(*device_prep_dma_memcpy)(
@@ -771,12 +925,11 @@ struct dma_device {
unsigned int nents, int value, unsigned long flags);
struct dma_async_tx_descriptor *(*device_prep_dma_interrupt)(
struct dma_chan *chan, unsigned long flags);
- struct dma_async_tx_descriptor *(*device_prep_dma_sg)(
- struct dma_chan *chan,
- struct scatterlist *dst_sg, unsigned int dst_nents,
- struct scatterlist *src_sg, unsigned int src_nents,
- unsigned long flags);
+ struct dma_async_tx_descriptor *(*device_prep_peripheral_dma_vec)(
+ struct dma_chan *chan, const struct dma_vec *vecs,
+ size_t nents, enum dma_transfer_direction direction,
+ unsigned long flags);
struct dma_async_tx_descriptor *(*device_prep_slave_sg)(
struct dma_chan *chan, struct scatterlist *sgl,
unsigned int sg_len, enum dma_transfer_direction direction,
@@ -788,12 +941,9 @@ struct dma_device {
struct dma_async_tx_descriptor *(*device_prep_interleaved_dma)(
struct dma_chan *chan, struct dma_interleaved_template *xt,
unsigned long flags);
- struct dma_async_tx_descriptor *(*device_prep_dma_imm_data)(
- struct dma_chan *chan, dma_addr_t dst, u64 data,
- unsigned long flags);
- int (*device_config)(struct dma_chan *chan,
- struct dma_slave_config *config);
+ void (*device_caps)(struct dma_chan *chan, struct dma_slave_caps *caps);
+ int (*device_config)(struct dma_chan *chan, struct dma_slave_config *config);
int (*device_pause)(struct dma_chan *chan);
int (*device_resume)(struct dma_chan *chan);
int (*device_terminate_all)(struct dma_chan *chan);
@@ -803,6 +953,10 @@ struct dma_device {
dma_cookie_t cookie,
struct dma_tx_state *txstate);
void (*device_issue_pending)(struct dma_chan *chan);
+ void (*device_release)(struct dma_device *dev);
+ /* debugfs support */
+ void (*dbg_summary_show)(struct seq_file *s, struct dma_device *dev);
+ struct dentry *dbg_dev_root;
};
static inline int dmaengine_slave_config(struct dma_chan *chan,
@@ -816,7 +970,8 @@ static inline int dmaengine_slave_config(struct dma_chan *chan,
static inline bool is_slave_direction(enum dma_transfer_direction direction)
{
- return (direction == DMA_MEM_TO_DEV) || (direction == DMA_DEV_TO_MEM);
+ return (direction == DMA_MEM_TO_DEV) || (direction == DMA_DEV_TO_MEM) ||
+ (direction == DMA_DEV_TO_DEV);
}
static inline struct dma_async_tx_descriptor *dmaengine_prep_slave_single(
@@ -835,6 +990,25 @@ static inline struct dma_async_tx_descriptor *dmaengine_prep_slave_single(
dir, flags, NULL);
}
+/**
+ * dmaengine_prep_peripheral_dma_vec() - Prepare a DMA scatter-gather descriptor
+ * @chan: The channel to be used for this descriptor
+ * @vecs: The array of DMA vectors that should be transferred
+ * @nents: The number of DMA vectors in the array
+ * @dir: Specifies the direction of the data transfer
+ * @flags: DMA engine flags
+ */
+static inline struct dma_async_tx_descriptor *dmaengine_prep_peripheral_dma_vec(
+ struct dma_chan *chan, const struct dma_vec *vecs, size_t nents,
+ enum dma_transfer_direction dir, unsigned long flags)
+{
+ if (!chan || !chan->device || !chan->device->device_prep_peripheral_dma_vec)
+ return NULL;
+
+ return chan->device->device_prep_peripheral_dma_vec(chan, vecs, nents,
+ dir, flags);
+}
+
static inline struct dma_async_tx_descriptor *dmaengine_prep_slave_sg(
struct dma_chan *chan, struct scatterlist *sgl, unsigned int sg_len,
enum dma_transfer_direction dir, unsigned long flags)
@@ -879,10 +1053,21 @@ static inline struct dma_async_tx_descriptor *dmaengine_prep_interleaved_dma(
{
if (!chan || !chan->device || !chan->device->device_prep_interleaved_dma)
return NULL;
+ if (flags & DMA_PREP_REPEAT &&
+ !test_bit(DMA_REPEAT, chan->device->cap_mask.bits))
+ return NULL;
return chan->device->device_prep_interleaved_dma(chan, xt, flags);
}
+/**
+ * dmaengine_prep_dma_memset() - Prepare a DMA memset descriptor.
+ * @chan: The channel to be used for this descriptor
+ * @dest: Address of buffer to be set
+ * @value: Treated as a single byte value that fills the destination buffer
+ * @len: The total size of dest
+ * @flags: DMA engine flags
+ */
static inline struct dma_async_tx_descriptor *dmaengine_prep_dma_memset(
struct dma_chan *chan, dma_addr_t dest, int value, size_t len,
unsigned long flags)
@@ -905,18 +1090,40 @@ static inline struct dma_async_tx_descriptor *dmaengine_prep_dma_memcpy(
len, flags);
}
-static inline struct dma_async_tx_descriptor *dmaengine_prep_dma_sg(
- struct dma_chan *chan,
- struct scatterlist *dst_sg, unsigned int dst_nents,
- struct scatterlist *src_sg, unsigned int src_nents,
- unsigned long flags)
+static inline bool dmaengine_is_metadata_mode_supported(struct dma_chan *chan,
+ enum dma_desc_metadata_mode mode)
{
- if (!chan || !chan->device || !chan->device->device_prep_dma_sg)
- return NULL;
+ if (!chan)
+ return false;
- return chan->device->device_prep_dma_sg(chan, dst_sg, dst_nents,
- src_sg, src_nents, flags);
+ return !!(chan->device->desc_metadata_modes & mode);
+}
+
+#ifdef CONFIG_DMA_ENGINE
+int dmaengine_desc_attach_metadata(struct dma_async_tx_descriptor *desc,
+ void *data, size_t len);
+void *dmaengine_desc_get_metadata_ptr(struct dma_async_tx_descriptor *desc,
+ size_t *payload_len, size_t *max_len);
+int dmaengine_desc_set_metadata_len(struct dma_async_tx_descriptor *desc,
+ size_t payload_len);
+#else /* CONFIG_DMA_ENGINE */
+static inline int dmaengine_desc_attach_metadata(
+ struct dma_async_tx_descriptor *desc, void *data, size_t len)
+{
+ return -EINVAL;
+}
+static inline void *dmaengine_desc_get_metadata_ptr(
+ struct dma_async_tx_descriptor *desc, size_t *payload_len,
+ size_t *max_len)
+{
+ return NULL;
+}
+static inline int dmaengine_desc_set_metadata_len(
+ struct dma_async_tx_descriptor *desc, size_t payload_len)
+{
+ return -EINVAL;
}
+#endif /* CONFIG_DMA_ENGINE */
/**
* dmaengine_terminate_all() - Terminate all active DMA transfers
@@ -946,7 +1153,7 @@ static inline int dmaengine_terminate_all(struct dma_chan *chan)
* dmaengine_synchronize() needs to be called before it is safe to free
* any memory that is accessed by previously submitted descriptors or before
* freeing any resources accessed from within the completion callback of any
- * perviously submitted descriptors.
+ * previously submitted descriptors.
*
* This function can be called from atomic context as well as from within a
* complete callback of a descriptor submitted on the same channel.
@@ -968,7 +1175,7 @@ static inline int dmaengine_terminate_async(struct dma_chan *chan)
*
* Synchronizes to the DMA channel termination to the current context. When this
* function returns it is guaranteed that all transfers for previously issued
- * descriptors have stopped and and it is safe to free the memory assoicated
+ * descriptors have stopped and it is safe to free the memory associated
* with them. Furthermore it is guaranteed that all complete callback functions
* for a previously submitted descriptor have finished running and it is safe to
* free resources accessed from within the complete callbacks.
@@ -1045,14 +1252,7 @@ static inline dma_cookie_t dmaengine_submit(struct dma_async_tx_descriptor *desc
static inline bool dmaengine_check_align(enum dmaengine_alignment align,
size_t off1, size_t off2, size_t len)
{
- size_t mask;
-
- if (!align)
- return true;
- mask = (1 << align) - 1;
- if (mask & (off1 | off2 | len))
- return false;
- return true;
+ return !(((1 << align) - 1) & (off1 | off2 | len));
}
static inline bool is_dma_copy_aligned(struct dma_device *dev, size_t off1,
@@ -1126,9 +1326,9 @@ static inline int dma_maxpq(struct dma_device *dma, enum dma_ctrl_flags flags)
{
if (dma_dev_has_pq_continue(dma) || !dmaf_continue(flags))
return dma_dev_to_maxpq(dma);
- else if (dmaf_p_disabled_continue(flags))
+ if (dmaf_p_disabled_continue(flags))
return dma_dev_to_maxpq(dma) - 1;
- else if (dmaf_continue(flags))
+ if (dmaf_continue(flags))
return dma_dev_to_maxpq(dma) - 3;
BUG();
}
@@ -1139,7 +1339,7 @@ static inline size_t dmaengine_get_icg(bool inc, bool sgl, size_t icg,
if (inc) {
if (dir_icg)
return dir_icg;
- else if (sgl)
+ if (sgl)
return icg;
}
@@ -1305,11 +1505,12 @@ static inline enum dma_status dma_async_is_complete(dma_cookie_t cookie,
static inline void
dma_set_tx_state(struct dma_tx_state *st, dma_cookie_t last, dma_cookie_t used, u32 residue)
{
- if (st) {
- st->last = last;
- st->used = used;
- st->residue = residue;
- }
+ if (!st)
+ return;
+
+ st->last = last;
+ st->used = used;
+ st->residue = residue;
}
#ifdef CONFIG_DMA_ENGINE
@@ -1318,11 +1519,12 @@ enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie);
enum dma_status dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx);
void dma_issue_pending_all(void);
struct dma_chan *__dma_request_channel(const dma_cap_mask_t *mask,
- dma_filter_fn fn, void *fn_param);
-struct dma_chan *dma_request_slave_channel(struct device *dev, const char *name);
+ dma_filter_fn fn, void *fn_param,
+ struct device_node *np);
struct dma_chan *dma_request_chan(struct device *dev, const char *name);
struct dma_chan *dma_request_chan_by_mask(const dma_cap_mask_t *mask);
+struct dma_chan *devm_dma_request_chan(struct device *dev, const char *name);
void dma_release_channel(struct dma_chan *chan);
int dma_get_slave_caps(struct dma_chan *chan, struct dma_slave_caps *caps);
@@ -1343,12 +1545,9 @@ static inline void dma_issue_pending_all(void)
{
}
static inline struct dma_chan *__dma_request_channel(const dma_cap_mask_t *mask,
- dma_filter_fn fn, void *fn_param)
-{
- return NULL;
-}
-static inline struct dma_chan *dma_request_slave_channel(struct device *dev,
- const char *name)
+ dma_filter_fn fn,
+ void *fn_param,
+ struct device_node *np)
{
return NULL;
}
@@ -1362,6 +1561,12 @@ static inline struct dma_chan *dma_request_chan_by_mask(
{
return ERR_PTR(-ENODEV);
}
+
+static inline struct dma_chan *devm_dma_request_chan(struct device *dev, const char *name)
+{
+ return ERR_PTR(-ENODEV);
+}
+
static inline void dma_release_channel(struct dma_chan *chan)
{
}
@@ -1372,20 +1577,20 @@ static inline int dma_get_slave_caps(struct dma_chan *chan,
}
#endif
-#define dma_request_slave_channel_reason(dev, name) dma_request_chan(dev, name)
-
static inline int dmaengine_desc_set_reuse(struct dma_async_tx_descriptor *tx)
{
struct dma_slave_caps caps;
+ int ret;
- dma_get_slave_caps(tx->chan, &caps);
+ ret = dma_get_slave_caps(tx->chan, &caps);
+ if (ret)
+ return ret;
- if (caps.descriptor_reuse) {
- tx->flags |= DMA_CTRL_REUSE;
- return 0;
- } else {
+ if (!caps.descriptor_reuse)
return -EPERM;
- }
+
+ tx->flags |= DMA_CTRL_REUSE;
+ return 0;
}
static inline void dmaengine_desc_clear_reuse(struct dma_async_tx_descriptor *tx)
@@ -1401,37 +1606,75 @@ static inline bool dmaengine_desc_test_reuse(struct dma_async_tx_descriptor *tx)
static inline int dmaengine_desc_free(struct dma_async_tx_descriptor *desc)
{
/* this is supported for reusable desc, so check that */
- if (dmaengine_desc_test_reuse(desc))
- return desc->desc_free(desc);
- else
+ if (!dmaengine_desc_test_reuse(desc))
return -EPERM;
+
+ return desc->desc_free(desc);
}
/* --- DMA device --- */
int dma_async_device_register(struct dma_device *device);
+int dmaenginem_async_device_register(struct dma_device *device);
void dma_async_device_unregister(struct dma_device *device);
+int dma_async_device_channel_register(struct dma_device *device,
+ struct dma_chan *chan,
+ const char *name);
+void dma_async_device_channel_unregister(struct dma_device *device,
+ struct dma_chan *chan);
void dma_run_dependencies(struct dma_async_tx_descriptor *tx);
-struct dma_chan *dma_get_slave_channel(struct dma_chan *chan);
-struct dma_chan *dma_get_any_slave_channel(struct dma_device *device);
-#define dma_request_channel(mask, x, y) __dma_request_channel(&(mask), x, y)
-#define dma_request_slave_channel_compat(mask, x, y, dev, name) \
- __dma_request_slave_channel_compat(&(mask), x, y, dev, name)
+#define dma_request_channel(mask, x, y) \
+ __dma_request_channel(&(mask), x, y, NULL)
+
+/* Deprecated, please use dma_request_chan() directly */
+static inline struct dma_chan * __deprecated
+dma_request_slave_channel(struct device *dev, const char *name)
+{
+ struct dma_chan *ch = dma_request_chan(dev, name);
+
+ return IS_ERR(ch) ? NULL : ch;
+}
static inline struct dma_chan
-*__dma_request_slave_channel_compat(const dma_cap_mask_t *mask,
+*dma_request_slave_channel_compat(const dma_cap_mask_t mask,
dma_filter_fn fn, void *fn_param,
struct device *dev, const char *name)
{
struct dma_chan *chan;
- chan = dma_request_slave_channel(dev, name);
- if (chan)
+ chan = dma_request_chan(dev, name);
+ if (!IS_ERR(chan))
return chan;
if (!fn || !fn_param)
return NULL;
- return __dma_request_channel(mask, fn, fn_param);
+ return dma_request_channel(mask, fn, fn_param);
+}
+
+static inline char *
+dmaengine_get_direction_text(enum dma_transfer_direction dir)
+{
+ switch (dir) {
+ case DMA_DEV_TO_MEM:
+ return "DEV_TO_MEM";
+ case DMA_MEM_TO_DEV:
+ return "MEM_TO_DEV";
+ case DMA_MEM_TO_MEM:
+ return "MEM_TO_MEM";
+ case DMA_DEV_TO_DEV:
+ return "DEV_TO_DEV";
+ default:
+ return "invalid";
+ }
+}
+
+static inline struct device *dmaengine_get_dma_device(struct dma_chan *chan)
+{
+ if (chan->dev->chan_dma_dev)
+ return &chan->dev->device;
+
+ return chan->device->dev;
}
+
#endif /* DMAENGINE_H */
diff --git a/include/linux/dmapool.h b/include/linux/dmapool.h
index 53ba737505df..7d40b51933d1 100644
--- a/include/linux/dmapool.h
+++ b/include/linux/dmapool.h
@@ -11,25 +11,21 @@
#ifndef LINUX_DMAPOOL_H
#define LINUX_DMAPOOL_H
+#include <linux/nodemask_types.h>
#include <linux/scatterlist.h>
#include <asm/io.h>
struct device;
-struct dma_pool *dma_pool_create(const char *name, struct device *dev,
- size_t size, size_t align, size_t allocation);
+#ifdef CONFIG_HAS_DMA
+
+struct dma_pool *dma_pool_create_node(const char *name, struct device *dev,
+ size_t size, size_t align, size_t boundary, int node);
void dma_pool_destroy(struct dma_pool *pool);
void *dma_pool_alloc(struct dma_pool *pool, gfp_t mem_flags,
dma_addr_t *handle);
-
-static inline void *dma_pool_zalloc(struct dma_pool *pool, gfp_t mem_flags,
- dma_addr_t *handle)
-{
- return dma_pool_alloc(pool, mem_flags | __GFP_ZERO, handle);
-}
-
void dma_pool_free(struct dma_pool *pool, void *vaddr, dma_addr_t addr);
/*
@@ -39,5 +35,44 @@ struct dma_pool *dmam_pool_create(const char *name, struct device *dev,
size_t size, size_t align, size_t allocation);
void dmam_pool_destroy(struct dma_pool *pool);
+#else /* !CONFIG_HAS_DMA */
+static inline struct dma_pool *dma_pool_create_node(const char *name,
+ struct device *dev, size_t size, size_t align, size_t boundary,
+ int node)
+{
+ return NULL;
+}
+static inline void dma_pool_destroy(struct dma_pool *pool) { }
+static inline void *dma_pool_alloc(struct dma_pool *pool, gfp_t mem_flags,
+ dma_addr_t *handle) { return NULL; }
+static inline void dma_pool_free(struct dma_pool *pool, void *vaddr,
+ dma_addr_t addr) { }
+static inline struct dma_pool *dmam_pool_create(const char *name,
+ struct device *dev, size_t size, size_t align, size_t allocation)
+{ return NULL; }
+static inline void dmam_pool_destroy(struct dma_pool *pool) { }
+#endif /* !CONFIG_HAS_DMA */
+
+static inline struct dma_pool *dma_pool_create(const char *name,
+ struct device *dev, size_t size, size_t align, size_t boundary)
+{
+ return dma_pool_create_node(name, dev, size, align, boundary,
+ NUMA_NO_NODE);
+}
+
+/**
+ * dma_pool_zalloc - Get a zero-initialized block of DMA coherent memory.
+ * @pool: dma pool that will produce the block
+ * @mem_flags: GFP_* bitmask
+ * @handle: pointer to dma address of block
+ *
+ * Same as dma_pool_alloc(), but the returned memory is zeroed.
+ */
+static inline void *dma_pool_zalloc(struct dma_pool *pool, gfp_t mem_flags,
+ dma_addr_t *handle)
+{
+ return dma_pool_alloc(pool, mem_flags | __GFP_ZERO, handle);
+}
+
#endif
diff --git a/include/linux/dmar.h b/include/linux/dmar.h
index e8ffba1052d3..692b2b445761 100644
--- a/include/linux/dmar.h
+++ b/include/linux/dmar.h
@@ -1,19 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2006, Intel Corporation.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
- * Place - Suite 330, Boston, MA 02111-1307 USA.
- *
* Copyright (C) Ashok Raj <ashok.raj@intel.com>
* Copyright (C) Shaohua Li <shaohua.li@intel.com>
*/
@@ -30,15 +18,12 @@
struct acpi_dmar_header;
-#ifdef CONFIG_X86
-# define DMAR_UNITS_SUPPORTED MAX_IO_APICS
-#else
-# define DMAR_UNITS_SUPPORTED 64
-#endif
+#define DMAR_UNITS_SUPPORTED 1024
/* DMAR Flags */
#define DMAR_INTR_REMAP 0x1
#define DMAR_X2APIC_OPT_OUT 0x2
+#define DMAR_PLATFORM_OPT_IN 0x4
struct intel_iommu;
@@ -54,11 +39,13 @@ struct dmar_drhd_unit {
struct list_head list; /* list of drhd units */
struct acpi_dmar_header *hdr; /* ACPI header */
u64 reg_base_addr; /* register base address*/
+ unsigned long reg_size; /* size of register set */
struct dmar_dev_scope *devices;/* target device array */
int devices_cnt; /* target device count */
u16 segment; /* PCI domain */
u8 ignored:1; /* ignore drhd */
u8 include_all:1;
+ u8 gfx_dedicated:1; /* graphic dedicated */
struct intel_iommu *iommu;
};
@@ -80,19 +67,23 @@ struct dmar_pci_notify_info {
extern struct rw_semaphore dmar_global_lock;
extern struct list_head dmar_drhd_units;
-#define for_each_drhd_unit(drhd) \
- list_for_each_entry_rcu(drhd, &dmar_drhd_units, list)
+#define for_each_drhd_unit(drhd) \
+ list_for_each_entry_rcu(drhd, &dmar_drhd_units, list, \
+ dmar_rcu_check())
#define for_each_active_drhd_unit(drhd) \
- list_for_each_entry_rcu(drhd, &dmar_drhd_units, list) \
+ list_for_each_entry_rcu(drhd, &dmar_drhd_units, list, \
+ dmar_rcu_check()) \
if (drhd->ignored) {} else
#define for_each_active_iommu(i, drhd) \
- list_for_each_entry_rcu(drhd, &dmar_drhd_units, list) \
+ list_for_each_entry_rcu(drhd, &dmar_drhd_units, list, \
+ dmar_rcu_check()) \
if (i=drhd->iommu, drhd->ignored) {} else
#define for_each_iommu(i, drhd) \
- list_for_each_entry_rcu(drhd, &dmar_drhd_units, list) \
+ list_for_each_entry_rcu(drhd, &dmar_drhd_units, list, \
+ dmar_rcu_check()) \
if (i=drhd->iommu, 0) {} else
static inline bool dmar_rcu_check(void)
@@ -103,17 +94,18 @@ static inline bool dmar_rcu_check(void)
#define dmar_rcu_dereference(p) rcu_dereference_check((p), dmar_rcu_check())
-#define for_each_dev_scope(a, c, p, d) \
- for ((p) = 0; ((d) = (p) < (c) ? dmar_rcu_dereference((a)[(p)].dev) : \
- NULL, (p) < (c)); (p)++)
+#define for_each_dev_scope(devs, cnt, i, tmp) \
+ for ((i) = 0; ((tmp) = (i) < (cnt) ? \
+ dmar_rcu_dereference((devs)[(i)].dev) : NULL, (i) < (cnt)); \
+ (i)++)
-#define for_each_active_dev_scope(a, c, p, d) \
- for_each_dev_scope((a), (c), (p), (d)) if (!(d)) { continue; } else
+#define for_each_active_dev_scope(devs, cnt, i, tmp) \
+ for_each_dev_scope((devs), (cnt), (i), (tmp)) \
+ if (!(tmp)) { continue; } else
extern int dmar_table_init(void);
extern int dmar_dev_scope_init(void);
-extern int dmar_parse_dev_scope(void *start, void *end, int *cnt,
- struct dmar_dev_scope **devices, u16 segment);
+extern void dmar_register_bus_notifier(void);
extern void *dmar_alloc_dev_scope(void *start, void *end, int *cnt);
extern void dmar_free_dev_scope(struct dmar_dev_scope **devices, int *cnt);
extern int dmar_insert_dev_scope(struct dmar_pci_notify_info *info,
@@ -124,8 +116,8 @@ extern int dmar_remove_dev_scope(struct dmar_pci_notify_info *info,
u16 segment, struct dmar_dev_scope *devices,
int count);
/* Intel IOMMU detection */
-extern int detect_intel_iommu(void);
-extern int enable_drhd_fault_handling(void);
+void detect_intel_iommu(void);
+extern int enable_drhd_fault_handling(unsigned int cpu);
extern int dmar_device_add(acpi_handle handle);
extern int dmar_device_remove(acpi_handle handle);
@@ -134,22 +126,34 @@ static inline int dmar_res_noop(struct acpi_dmar_header *hdr, void *arg)
return 0;
}
+#ifdef CONFIG_DMAR_DEBUG
+void dmar_fault_dump_ptes(struct intel_iommu *iommu, u16 source_id,
+ unsigned long long addr, u32 pasid);
+#else
+static inline void dmar_fault_dump_ptes(struct intel_iommu *iommu, u16 source_id,
+ unsigned long long addr, u32 pasid) {}
+#endif
+
#ifdef CONFIG_INTEL_IOMMU
extern int iommu_detected, no_iommu;
extern int intel_iommu_init(void);
+extern void intel_iommu_shutdown(void);
extern int dmar_parse_one_rmrr(struct acpi_dmar_header *header, void *arg);
extern int dmar_parse_one_atsr(struct acpi_dmar_header *header, void *arg);
extern int dmar_check_one_atsr(struct acpi_dmar_header *hdr, void *arg);
+extern int dmar_parse_one_satc(struct acpi_dmar_header *hdr, void *arg);
extern int dmar_release_one_atsr(struct acpi_dmar_header *hdr, void *arg);
extern int dmar_iommu_hotplug(struct dmar_drhd_unit *dmaru, bool insert);
extern int dmar_iommu_notify_scope_dev(struct dmar_pci_notify_info *info);
#else /* !CONFIG_INTEL_IOMMU: */
static inline int intel_iommu_init(void) { return -ENODEV; }
+static inline void intel_iommu_shutdown(void) { }
#define dmar_parse_one_rmrr dmar_res_noop
#define dmar_parse_one_atsr dmar_res_noop
#define dmar_check_one_atsr dmar_res_noop
#define dmar_release_one_atsr dmar_res_noop
+#define dmar_parse_one_satc dmar_res_noop
static inline int dmar_iommu_notify_scope_dev(struct dmar_pci_notify_info *info)
{
@@ -169,6 +173,8 @@ static inline int dmar_ir_hotplug(struct dmar_drhd_unit *dmaru, bool insert)
{ return 0; }
#endif /* CONFIG_IRQ_REMAP */
+extern bool dmar_platform_optin(void);
+
#else /* CONFIG_DMAR_TABLE */
static inline int dmar_device_add(void *handle)
@@ -181,71 +187,87 @@ static inline int dmar_device_remove(void *handle)
return 0;
}
-#endif /* CONFIG_DMAR_TABLE */
-
-struct irte {
- union {
- /* Shared between remapped and posted mode*/
- struct {
- __u64 present : 1, /* 0 */
- fpd : 1, /* 1 */
- __res0 : 6, /* 2 - 6 */
- avail : 4, /* 8 - 11 */
- __res1 : 3, /* 12 - 14 */
- pst : 1, /* 15 */
- vector : 8, /* 16 - 23 */
- __res2 : 40; /* 24 - 63 */
- };
+static inline bool dmar_platform_optin(void)
+{
+ return false;
+}
- /* Remapped mode */
- struct {
- __u64 r_present : 1, /* 0 */
- r_fpd : 1, /* 1 */
- dst_mode : 1, /* 2 */
- redir_hint : 1, /* 3 */
- trigger_mode : 1, /* 4 */
- dlvry_mode : 3, /* 5 - 7 */
- r_avail : 4, /* 8 - 11 */
- r_res0 : 4, /* 12 - 15 */
- r_vector : 8, /* 16 - 23 */
- r_res1 : 8, /* 24 - 31 */
- dest_id : 32; /* 32 - 63 */
- };
+static inline void detect_intel_iommu(void)
+{
+}
- /* Posted mode */
- struct {
- __u64 p_present : 1, /* 0 */
- p_fpd : 1, /* 1 */
- p_res0 : 6, /* 2 - 7 */
- p_avail : 4, /* 8 - 11 */
- p_res1 : 2, /* 12 - 13 */
- p_urgent : 1, /* 14 */
- p_pst : 1, /* 15 */
- p_vector : 8, /* 16 - 23 */
- p_res2 : 14, /* 24 - 37 */
- pda_l : 26; /* 38 - 63 */
- };
- __u64 low;
- };
+#endif /* CONFIG_DMAR_TABLE */
+struct irte {
union {
- /* Shared between remapped and posted mode*/
- struct {
- __u64 sid : 16, /* 64 - 79 */
- sq : 2, /* 80 - 81 */
- svt : 2, /* 82 - 83 */
- __res3 : 44; /* 84 - 127 */
- };
-
- /* Posted mode*/
struct {
- __u64 p_sid : 16, /* 64 - 79 */
- p_sq : 2, /* 80 - 81 */
- p_svt : 2, /* 82 - 83 */
- p_res3 : 12, /* 84 - 95 */
- pda_h : 32; /* 96 - 127 */
+ union {
+ /* Shared between remapped and posted mode*/
+ struct {
+ __u64 present : 1, /* 0 */
+ fpd : 1, /* 1 */
+ __res0 : 6, /* 2 - 6 */
+ avail : 4, /* 8 - 11 */
+ __res1 : 3, /* 12 - 14 */
+ pst : 1, /* 15 */
+ vector : 8, /* 16 - 23 */
+ __res2 : 40; /* 24 - 63 */
+ };
+
+ /* Remapped mode */
+ struct {
+ __u64 r_present : 1, /* 0 */
+ r_fpd : 1, /* 1 */
+ dst_mode : 1, /* 2 */
+ redir_hint : 1, /* 3 */
+ trigger_mode : 1, /* 4 */
+ dlvry_mode : 3, /* 5 - 7 */
+ r_avail : 4, /* 8 - 11 */
+ r_res0 : 4, /* 12 - 15 */
+ r_vector : 8, /* 16 - 23 */
+ r_res1 : 8, /* 24 - 31 */
+ dest_id : 32; /* 32 - 63 */
+ };
+
+ /* Posted mode */
+ struct {
+ __u64 p_present : 1, /* 0 */
+ p_fpd : 1, /* 1 */
+ p_res0 : 6, /* 2 - 7 */
+ p_avail : 4, /* 8 - 11 */
+ p_res1 : 2, /* 12 - 13 */
+ p_urgent : 1, /* 14 */
+ p_pst : 1, /* 15 */
+ p_vector : 8, /* 16 - 23 */
+ p_res2 : 14, /* 24 - 37 */
+ pda_l : 26; /* 38 - 63 */
+ };
+ __u64 low;
+ };
+
+ union {
+ /* Shared between remapped and posted mode*/
+ struct {
+ __u64 sid : 16, /* 64 - 79 */
+ sq : 2, /* 80 - 81 */
+ svt : 2, /* 82 - 83 */
+ __res3 : 44; /* 84 - 127 */
+ };
+
+ /* Posted mode*/
+ struct {
+ __u64 p_sid : 16, /* 64 - 79 */
+ p_sq : 2, /* 80 - 81 */
+ p_svt : 2, /* 82 - 83 */
+ p_res3 : 12, /* 84 - 95 */
+ pda_h : 32; /* 96 - 127 */
+ };
+ __u64 high;
+ };
};
- __u64 high;
+#ifdef CONFIG_IRQ_REMAP
+ __u128 irte;
+#endif
};
};
@@ -264,18 +286,12 @@ static inline void dmar_copy_shared_irte(struct irte *dst, struct irte *src)
#define PDA_LOW_BIT 26
#define PDA_HIGH_BIT 32
-enum {
- IRQ_REMAP_XAPIC_MODE,
- IRQ_REMAP_X2APIC_MODE,
-};
-
/* Can't use the common MSI interrupt functions
* since DMAR is not a pci device
*/
struct irq_data;
extern void dmar_msi_unmask(struct irq_data *data);
extern void dmar_msi_mask(struct irq_data *data);
-extern void dmar_msi_read(int irq, struct msi_msg *msg);
extern void dmar_msi_write(int irq, struct msi_msg *msg);
extern int dmar_set_interrupt(struct intel_iommu *iommu);
extern irqreturn_t dmar_fault(int irq, void *dev_id);
diff --git a/include/linux/dmi.h b/include/linux/dmi.h
index 9bbf21a516e4..927f8a8b7a1d 100644
--- a/include/linux/dmi.h
+++ b/include/linux/dmi.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __DMI_H__
#define __DMI_H__
@@ -101,10 +102,9 @@ const struct dmi_system_id *dmi_first_match(const struct dmi_system_id *list);
extern const char * dmi_get_system_info(int field);
extern const struct dmi_device * dmi_find_device(int type, const char *name,
const struct dmi_device *from);
-extern void dmi_scan_machine(void);
-extern void dmi_memdev_walk(void);
-extern void dmi_set_dump_stack_arch_desc(void);
+extern void dmi_setup(void);
extern bool dmi_get_date(int field, int *yearp, int *monthp, int *dayp);
+extern int dmi_get_bios_year(void);
extern int dmi_name_in_vendors(const char *str);
extern int dmi_name_in_serial(const char *str);
extern int dmi_available;
@@ -112,6 +112,9 @@ extern int dmi_walk(void (*decode)(const struct dmi_header *, void *),
void *private_data);
extern bool dmi_match(enum dmi_field f, const char *str);
extern void dmi_memdev_name(u16 handle, const char **bank, const char **device);
+extern u64 dmi_memdev_size(u16 handle);
+extern u8 dmi_memdev_type(u16 handle);
+extern u16 dmi_memdev_handle(int slot);
#else
@@ -119,9 +122,7 @@ static inline int dmi_check_system(const struct dmi_system_id *list) { return 0;
static inline const char * dmi_get_system_info(int field) { return NULL; }
static inline const struct dmi_device * dmi_find_device(int type, const char *name,
const struct dmi_device *from) { return NULL; }
-static inline void dmi_scan_machine(void) { return; }
-static inline void dmi_memdev_walk(void) { }
-static inline void dmi_set_dump_stack_arch_desc(void) { }
+static inline void dmi_setup(void) { }
static inline bool dmi_get_date(int field, int *yearp, int *monthp, int *dayp)
{
if (yearp)
@@ -132,6 +133,7 @@ static inline bool dmi_get_date(int field, int *yearp, int *monthp, int *dayp)
*dayp = 0;
return false;
}
+static inline int dmi_get_bios_year(void) { return -ENXIO; }
static inline int dmi_name_in_vendors(const char *s) { return 0; }
static inline int dmi_name_in_serial(const char *s) { return 0; }
#define dmi_available 0
@@ -141,6 +143,9 @@ static inline bool dmi_match(enum dmi_field f, const char *str)
{ return false; }
static inline void dmi_memdev_name(u16 handle, const char **bank,
const char **device) { }
+static inline u64 dmi_memdev_size(u16 handle) { return ~0ul; }
+static inline u8 dmi_memdev_type(u16 handle) { return 0x0; }
+static inline u16 dmi_memdev_handle(int slot) { return 0xffff; }
static inline const struct dmi_system_id *
dmi_first_match(const struct dmi_system_id *list) { return NULL; }
diff --git a/include/linux/dnotify.h b/include/linux/dnotify.h
index 3290555a52ee..9f183a679277 100644
--- a/include/linux/dnotify.h
+++ b/include/linux/dnotify.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_DNOTIFY_H
#define _LINUX_DNOTIFY_H
/*
@@ -25,12 +26,11 @@ struct dnotify_struct {
FS_MODIFY | FS_MODIFY_CHILD |\
FS_ACCESS | FS_ACCESS_CHILD |\
FS_ATTRIB | FS_ATTRIB_CHILD |\
- FS_CREATE | FS_DN_RENAME |\
+ FS_CREATE | FS_RENAME |\
FS_MOVED_FROM | FS_MOVED_TO)
-extern int dir_notify_enable;
extern void dnotify_flush(struct file *, fl_owner_t);
-extern int fcntl_dirnotify(int, struct file *, unsigned long);
+extern int fcntl_dirnotify(int, struct file *, unsigned int);
#else
@@ -38,7 +38,7 @@ static inline void dnotify_flush(struct file *filp, fl_owner_t id)
{
}
-static inline int fcntl_dirnotify(int fd, struct file *filp, unsigned long arg)
+static inline int fcntl_dirnotify(int fd, struct file *filp, unsigned int arg)
{
return -EINVAL;
}
diff --git a/include/linux/dns_resolver.h b/include/linux/dns_resolver.h
index 6ac3cad9aef1..976cbbdb2832 100644
--- a/include/linux/dns_resolver.h
+++ b/include/linux/dns_resolver.h
@@ -24,11 +24,11 @@
#ifndef _LINUX_DNS_RESOLVER_H
#define _LINUX_DNS_RESOLVER_H
-#ifdef __KERNEL__
+#include <uapi/linux/dns_resolver.h>
-extern int dns_query(const char *type, const char *name, size_t namelen,
- const char *options, char **_result, time64_t *_expiry);
-
-#endif /* KERNEL */
+struct net;
+extern int dns_query(struct net *net, const char *type, const char *name, size_t namelen,
+ const char *options, char **_result, time64_t *_expiry,
+ bool invalidate);
#endif /* _LINUX_DNS_RESOLVER_H */
diff --git a/include/linux/dpll.h b/include/linux/dpll.h
new file mode 100644
index 000000000000..562f520b23c2
--- /dev/null
+++ b/include/linux/dpll.h
@@ -0,0 +1,229 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2023 Meta Platforms, Inc. and affiliates
+ * Copyright (c) 2023 Intel and affiliates
+ */
+
+#ifndef __DPLL_H__
+#define __DPLL_H__
+
+#include <uapi/linux/dpll.h>
+#include <linux/device.h>
+#include <linux/netlink.h>
+#include <linux/netdevice.h>
+#include <linux/rtnetlink.h>
+
+struct dpll_device;
+struct dpll_pin;
+struct dpll_pin_esync;
+
+struct dpll_device_ops {
+ int (*mode_get)(const struct dpll_device *dpll, void *dpll_priv,
+ enum dpll_mode *mode, struct netlink_ext_ack *extack);
+ int (*lock_status_get)(const struct dpll_device *dpll, void *dpll_priv,
+ enum dpll_lock_status *status,
+ enum dpll_lock_status_error *status_error,
+ struct netlink_ext_ack *extack);
+ int (*temp_get)(const struct dpll_device *dpll, void *dpll_priv,
+ s32 *temp, struct netlink_ext_ack *extack);
+ int (*clock_quality_level_get)(const struct dpll_device *dpll,
+ void *dpll_priv,
+ unsigned long *qls,
+ struct netlink_ext_ack *extack);
+ int (*phase_offset_monitor_set)(const struct dpll_device *dpll,
+ void *dpll_priv,
+ enum dpll_feature_state state,
+ struct netlink_ext_ack *extack);
+ int (*phase_offset_monitor_get)(const struct dpll_device *dpll,
+ void *dpll_priv,
+ enum dpll_feature_state *state,
+ struct netlink_ext_ack *extack);
+ int (*phase_offset_avg_factor_set)(const struct dpll_device *dpll,
+ void *dpll_priv, u32 factor,
+ struct netlink_ext_ack *extack);
+ int (*phase_offset_avg_factor_get)(const struct dpll_device *dpll,
+ void *dpll_priv, u32 *factor,
+ struct netlink_ext_ack *extack);
+};
+
+struct dpll_pin_ops {
+ int (*frequency_set)(const struct dpll_pin *pin, void *pin_priv,
+ const struct dpll_device *dpll, void *dpll_priv,
+ const u64 frequency,
+ struct netlink_ext_ack *extack);
+ int (*frequency_get)(const struct dpll_pin *pin, void *pin_priv,
+ const struct dpll_device *dpll, void *dpll_priv,
+ u64 *frequency, struct netlink_ext_ack *extack);
+ int (*direction_set)(const struct dpll_pin *pin, void *pin_priv,
+ const struct dpll_device *dpll, void *dpll_priv,
+ const enum dpll_pin_direction direction,
+ struct netlink_ext_ack *extack);
+ int (*direction_get)(const struct dpll_pin *pin, void *pin_priv,
+ const struct dpll_device *dpll, void *dpll_priv,
+ enum dpll_pin_direction *direction,
+ struct netlink_ext_ack *extack);
+ int (*state_on_pin_get)(const struct dpll_pin *pin, void *pin_priv,
+ const struct dpll_pin *parent_pin,
+ void *parent_pin_priv,
+ enum dpll_pin_state *state,
+ struct netlink_ext_ack *extack);
+ int (*state_on_dpll_get)(const struct dpll_pin *pin, void *pin_priv,
+ const struct dpll_device *dpll,
+ void *dpll_priv, enum dpll_pin_state *state,
+ struct netlink_ext_ack *extack);
+ int (*state_on_pin_set)(const struct dpll_pin *pin, void *pin_priv,
+ const struct dpll_pin *parent_pin,
+ void *parent_pin_priv,
+ const enum dpll_pin_state state,
+ struct netlink_ext_ack *extack);
+ int (*state_on_dpll_set)(const struct dpll_pin *pin, void *pin_priv,
+ const struct dpll_device *dpll,
+ void *dpll_priv,
+ const enum dpll_pin_state state,
+ struct netlink_ext_ack *extack);
+ int (*prio_get)(const struct dpll_pin *pin, void *pin_priv,
+ const struct dpll_device *dpll, void *dpll_priv,
+ u32 *prio, struct netlink_ext_ack *extack);
+ int (*prio_set)(const struct dpll_pin *pin, void *pin_priv,
+ const struct dpll_device *dpll, void *dpll_priv,
+ const u32 prio, struct netlink_ext_ack *extack);
+ int (*phase_offset_get)(const struct dpll_pin *pin, void *pin_priv,
+ const struct dpll_device *dpll, void *dpll_priv,
+ s64 *phase_offset,
+ struct netlink_ext_ack *extack);
+ int (*phase_adjust_get)(const struct dpll_pin *pin, void *pin_priv,
+ const struct dpll_device *dpll, void *dpll_priv,
+ s32 *phase_adjust,
+ struct netlink_ext_ack *extack);
+ int (*phase_adjust_set)(const struct dpll_pin *pin, void *pin_priv,
+ const struct dpll_device *dpll, void *dpll_priv,
+ const s32 phase_adjust,
+ struct netlink_ext_ack *extack);
+ int (*ffo_get)(const struct dpll_pin *pin, void *pin_priv,
+ const struct dpll_device *dpll, void *dpll_priv,
+ s64 *ffo, struct netlink_ext_ack *extack);
+ int (*esync_set)(const struct dpll_pin *pin, void *pin_priv,
+ const struct dpll_device *dpll, void *dpll_priv,
+ u64 freq, struct netlink_ext_ack *extack);
+ int (*esync_get)(const struct dpll_pin *pin, void *pin_priv,
+ const struct dpll_device *dpll, void *dpll_priv,
+ struct dpll_pin_esync *esync,
+ struct netlink_ext_ack *extack);
+ int (*ref_sync_set)(const struct dpll_pin *pin, void *pin_priv,
+ const struct dpll_pin *ref_sync_pin,
+ void *ref_sync_pin_priv,
+ const enum dpll_pin_state state,
+ struct netlink_ext_ack *extack);
+ int (*ref_sync_get)(const struct dpll_pin *pin, void *pin_priv,
+ const struct dpll_pin *ref_sync_pin,
+ void *ref_sync_pin_priv,
+ enum dpll_pin_state *state,
+ struct netlink_ext_ack *extack);
+};
+
+struct dpll_pin_frequency {
+ u64 min;
+ u64 max;
+};
+
+#define DPLL_PIN_FREQUENCY_RANGE(_min, _max) \
+ { \
+ .min = _min, \
+ .max = _max, \
+ }
+
+#define DPLL_PIN_FREQUENCY(_val) DPLL_PIN_FREQUENCY_RANGE(_val, _val)
+#define DPLL_PIN_FREQUENCY_1PPS \
+ DPLL_PIN_FREQUENCY(DPLL_PIN_FREQUENCY_1_HZ)
+#define DPLL_PIN_FREQUENCY_10MHZ \
+ DPLL_PIN_FREQUENCY(DPLL_PIN_FREQUENCY_10_MHZ)
+#define DPLL_PIN_FREQUENCY_IRIG_B \
+ DPLL_PIN_FREQUENCY(DPLL_PIN_FREQUENCY_10_KHZ)
+#define DPLL_PIN_FREQUENCY_DCF77 \
+ DPLL_PIN_FREQUENCY(DPLL_PIN_FREQUENCY_77_5_KHZ)
+
+struct dpll_pin_phase_adjust_range {
+ s32 min;
+ s32 max;
+};
+
+struct dpll_pin_esync {
+ u64 freq;
+ const struct dpll_pin_frequency *range;
+ u8 range_num;
+ u8 pulse;
+};
+
+struct dpll_pin_properties {
+ const char *board_label;
+ const char *panel_label;
+ const char *package_label;
+ enum dpll_pin_type type;
+ unsigned long capabilities;
+ u32 freq_supported_num;
+ struct dpll_pin_frequency *freq_supported;
+ struct dpll_pin_phase_adjust_range phase_range;
+ u32 phase_gran;
+};
+
+#if IS_ENABLED(CONFIG_DPLL)
+void dpll_netdev_pin_set(struct net_device *dev, struct dpll_pin *dpll_pin);
+void dpll_netdev_pin_clear(struct net_device *dev);
+
+size_t dpll_netdev_pin_handle_size(const struct net_device *dev);
+int dpll_netdev_add_pin_handle(struct sk_buff *msg,
+ const struct net_device *dev);
+#else
+static inline void
+dpll_netdev_pin_set(struct net_device *dev, struct dpll_pin *dpll_pin) { }
+static inline void dpll_netdev_pin_clear(struct net_device *dev) { }
+
+static inline size_t dpll_netdev_pin_handle_size(const struct net_device *dev)
+{
+ return 0;
+}
+
+static inline int
+dpll_netdev_add_pin_handle(struct sk_buff *msg, const struct net_device *dev)
+{
+ return 0;
+}
+#endif
+
+struct dpll_device *
+dpll_device_get(u64 clock_id, u32 dev_driver_id, struct module *module);
+
+void dpll_device_put(struct dpll_device *dpll);
+
+int dpll_device_register(struct dpll_device *dpll, enum dpll_type type,
+ const struct dpll_device_ops *ops, void *priv);
+
+void dpll_device_unregister(struct dpll_device *dpll,
+ const struct dpll_device_ops *ops, void *priv);
+
+struct dpll_pin *
+dpll_pin_get(u64 clock_id, u32 dev_driver_id, struct module *module,
+ const struct dpll_pin_properties *prop);
+
+int dpll_pin_register(struct dpll_device *dpll, struct dpll_pin *pin,
+ const struct dpll_pin_ops *ops, void *priv);
+
+void dpll_pin_unregister(struct dpll_device *dpll, struct dpll_pin *pin,
+ const struct dpll_pin_ops *ops, void *priv);
+
+void dpll_pin_put(struct dpll_pin *pin);
+
+int dpll_pin_on_pin_register(struct dpll_pin *parent, struct dpll_pin *pin,
+ const struct dpll_pin_ops *ops, void *priv);
+
+void dpll_pin_on_pin_unregister(struct dpll_pin *parent, struct dpll_pin *pin,
+ const struct dpll_pin_ops *ops, void *priv);
+
+int dpll_pin_ref_sync_pair_add(struct dpll_pin *pin,
+ struct dpll_pin *ref_sync_pin);
+
+int dpll_device_change_ntf(struct dpll_device *dpll);
+
+int dpll_pin_change_ntf(struct dpll_pin *pin);
+
+#endif
diff --git a/include/linux/dqblk_qtree.h b/include/linux/dqblk_qtree.h
index 0de21e935976..100d22a46b82 100644
--- a/include/linux/dqblk_qtree.h
+++ b/include/linux/dqblk_qtree.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Definitions of structures and functions for quota formats using trie
*/
diff --git a/include/linux/dqblk_v1.h b/include/linux/dqblk_v1.h
index c0d4d1e2a45c..85d837a14838 100644
--- a/include/linux/dqblk_v1.h
+++ b/include/linux/dqblk_v1.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* File with in-memory structures of old quota format
*/
diff --git a/include/linux/dqblk_v2.h b/include/linux/dqblk_v2.h
index 18000a542677..da95932ad9e7 100644
--- a/include/linux/dqblk_v2.h
+++ b/include/linux/dqblk_v2.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Definitions for vfsv0 quota format
*/
diff --git a/include/linux/drbd.h b/include/linux/drbd.h
index 002611c85318..5468a2399d48 100644
--- a/include/linux/drbd.h
+++ b/include/linux/drbd.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
drbd.h
Kernel module for 2.6.x Kernels
@@ -8,19 +9,6 @@
Copyright (C) 2001-2008, Philipp Reisner <philipp.reisner@linbit.com>.
Copyright (C) 2001-2008, Lars Ellenberg <lars.ellenberg@linbit.com>.
- drbd is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2, or (at your option)
- any later version.
-
- drbd is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with drbd; see the file COPYING. If not, write to
- the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#ifndef DRBD_H
@@ -50,13 +38,6 @@
#endif
-extern const char *drbd_buildtag(void);
-#define REL_VERSION "8.4.7"
-#define API_VERSION 1
-#define PRO_VERSION_MIN 86
-#define PRO_VERSION_MAX 101
-
-
enum drbd_io_error_p {
EP_PASS_ON, /* FIXME should the better be named "Ignore"? */
EP_CALL_HELPER,
diff --git a/include/linux/drbd_config.h b/include/linux/drbd_config.h
new file mode 100644
index 000000000000..d215365c6bb1
--- /dev/null
+++ b/include/linux/drbd_config.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * drbd_config.h
+ * DRBD's compile time configuration.
+ */
+
+#ifndef DRBD_CONFIG_H
+#define DRBD_CONFIG_H
+
+extern const char *drbd_buildtag(void);
+
+#define REL_VERSION "8.4.11"
+#define PRO_VERSION_MIN 86
+#define PRO_VERSION_MAX 101
+
+#endif
diff --git a/include/linux/drbd_genl.h b/include/linux/drbd_genl.h
index 2896f93808ae..53f44b8cd75f 100644
--- a/include/linux/drbd_genl.h
+++ b/include/linux/drbd_genl.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* General overview:
* full generic netlink message:
@@ -132,7 +133,8 @@ GENL_struct(DRBD_NLA_DISK_CONF, 3, disk_conf,
__flg_field_def(18, DRBD_GENLA_F_MANDATORY, disk_drain, DRBD_DISK_DRAIN_DEF)
__flg_field_def(19, DRBD_GENLA_F_MANDATORY, md_flushes, DRBD_MD_FLUSHES_DEF)
__flg_field_def(23, 0 /* OPTIONAL */, al_updates, DRBD_AL_UPDATES_DEF)
- __flg_field_def(24, 0 /* OPTIONAL */, discard_zeroes_if_aligned, DRBD_DISCARD_ZEROES_IF_ALIGNED)
+ __flg_field_def(24, 0 /* OPTIONAL */, discard_zeroes_if_aligned, DRBD_DISCARD_ZEROES_IF_ALIGNED_DEF)
+ __flg_field_def(26, 0 /* OPTIONAL */, disable_write_same, DRBD_DISABLE_WRITE_SAME_DEF)
)
GENL_struct(DRBD_NLA_RESOURCE_OPTS, 4, res_opts,
diff --git a/include/linux/drbd_genl_api.h b/include/linux/drbd_genl_api.h
index 9ef50d51e34e..70682c058027 100644
--- a/include/linux/drbd_genl_api.h
+++ b/include/linux/drbd_genl_api.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef DRBD_GENL_STRUCT_H
#define DRBD_GENL_STRUCT_H
@@ -46,7 +47,7 @@ enum drbd_state_info_bcast_reason {
#undef linux
#include <linux/drbd.h>
-#define GENL_MAGIC_VERSION API_VERSION
+#define GENL_MAGIC_VERSION 1
#define GENL_MAGIC_FAMILY drbd
#define GENL_MAGIC_FAMILY_HDRSZ sizeof(struct drbd_genlmsghdr)
#define GENL_MAGIC_INCLUDE_FILE <linux/drbd_genl.h>
diff --git a/include/linux/drbd_limits.h b/include/linux/drbd_limits.h
index ddac68422a96..5b042fb427e9 100644
--- a/include/linux/drbd_limits.h
+++ b/include/linux/drbd_limits.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
drbd_limits.h
This file is part of DRBD by Philipp Reisner and Lars Ellenberg.
@@ -15,123 +16,123 @@
#define DEBUG_RANGE_CHECK 0
-#define DRBD_MINOR_COUNT_MIN 1
-#define DRBD_MINOR_COUNT_MAX 255
-#define DRBD_MINOR_COUNT_DEF 32
+#define DRBD_MINOR_COUNT_MIN 1U
+#define DRBD_MINOR_COUNT_MAX 255U
+#define DRBD_MINOR_COUNT_DEF 32U
#define DRBD_MINOR_COUNT_SCALE '1'
-#define DRBD_VOLUME_MAX 65535
+#define DRBD_VOLUME_MAX 65534U
-#define DRBD_DIALOG_REFRESH_MIN 0
-#define DRBD_DIALOG_REFRESH_MAX 600
+#define DRBD_DIALOG_REFRESH_MIN 0U
+#define DRBD_DIALOG_REFRESH_MAX 600U
#define DRBD_DIALOG_REFRESH_SCALE '1'
/* valid port number */
-#define DRBD_PORT_MIN 1
-#define DRBD_PORT_MAX 0xffff
+#define DRBD_PORT_MIN 1U
+#define DRBD_PORT_MAX 0xffffU
#define DRBD_PORT_SCALE '1'
/* startup { */
/* if you want more than 3.4 days, disable */
-#define DRBD_WFC_TIMEOUT_MIN 0
-#define DRBD_WFC_TIMEOUT_MAX 300000
-#define DRBD_WFC_TIMEOUT_DEF 0
+#define DRBD_WFC_TIMEOUT_MIN 0U
+#define DRBD_WFC_TIMEOUT_MAX 300000U
+#define DRBD_WFC_TIMEOUT_DEF 0U
#define DRBD_WFC_TIMEOUT_SCALE '1'
-#define DRBD_DEGR_WFC_TIMEOUT_MIN 0
-#define DRBD_DEGR_WFC_TIMEOUT_MAX 300000
-#define DRBD_DEGR_WFC_TIMEOUT_DEF 0
+#define DRBD_DEGR_WFC_TIMEOUT_MIN 0U
+#define DRBD_DEGR_WFC_TIMEOUT_MAX 300000U
+#define DRBD_DEGR_WFC_TIMEOUT_DEF 0U
#define DRBD_DEGR_WFC_TIMEOUT_SCALE '1'
-#define DRBD_OUTDATED_WFC_TIMEOUT_MIN 0
-#define DRBD_OUTDATED_WFC_TIMEOUT_MAX 300000
-#define DRBD_OUTDATED_WFC_TIMEOUT_DEF 0
+#define DRBD_OUTDATED_WFC_TIMEOUT_MIN 0U
+#define DRBD_OUTDATED_WFC_TIMEOUT_MAX 300000U
+#define DRBD_OUTDATED_WFC_TIMEOUT_DEF 0U
#define DRBD_OUTDATED_WFC_TIMEOUT_SCALE '1'
/* }*/
/* net { */
/* timeout, unit centi seconds
* more than one minute timeout is not useful */
-#define DRBD_TIMEOUT_MIN 1
-#define DRBD_TIMEOUT_MAX 600
-#define DRBD_TIMEOUT_DEF 60 /* 6 seconds */
+#define DRBD_TIMEOUT_MIN 1U
+#define DRBD_TIMEOUT_MAX 600U
+#define DRBD_TIMEOUT_DEF 60U /* 6 seconds */
#define DRBD_TIMEOUT_SCALE '1'
/* If backing disk takes longer than disk_timeout, mark the disk as failed */
-#define DRBD_DISK_TIMEOUT_MIN 0 /* 0 = disabled */
-#define DRBD_DISK_TIMEOUT_MAX 6000 /* 10 Minutes */
-#define DRBD_DISK_TIMEOUT_DEF 0 /* disabled */
+#define DRBD_DISK_TIMEOUT_MIN 0U /* 0 = disabled */
+#define DRBD_DISK_TIMEOUT_MAX 6000U /* 10 Minutes */
+#define DRBD_DISK_TIMEOUT_DEF 0U /* disabled */
#define DRBD_DISK_TIMEOUT_SCALE '1'
/* active connection retries when C_WF_CONNECTION */
-#define DRBD_CONNECT_INT_MIN 1
-#define DRBD_CONNECT_INT_MAX 120
-#define DRBD_CONNECT_INT_DEF 10 /* seconds */
+#define DRBD_CONNECT_INT_MIN 1U
+#define DRBD_CONNECT_INT_MAX 120U
+#define DRBD_CONNECT_INT_DEF 10U /* seconds */
#define DRBD_CONNECT_INT_SCALE '1'
/* keep-alive probes when idle */
-#define DRBD_PING_INT_MIN 1
-#define DRBD_PING_INT_MAX 120
-#define DRBD_PING_INT_DEF 10
+#define DRBD_PING_INT_MIN 1U
+#define DRBD_PING_INT_MAX 120U
+#define DRBD_PING_INT_DEF 10U
#define DRBD_PING_INT_SCALE '1'
/* timeout for the ping packets.*/
-#define DRBD_PING_TIMEO_MIN 1
-#define DRBD_PING_TIMEO_MAX 300
-#define DRBD_PING_TIMEO_DEF 5
+#define DRBD_PING_TIMEO_MIN 1U
+#define DRBD_PING_TIMEO_MAX 300U
+#define DRBD_PING_TIMEO_DEF 5U
#define DRBD_PING_TIMEO_SCALE '1'
/* max number of write requests between write barriers */
-#define DRBD_MAX_EPOCH_SIZE_MIN 1
-#define DRBD_MAX_EPOCH_SIZE_MAX 20000
-#define DRBD_MAX_EPOCH_SIZE_DEF 2048
+#define DRBD_MAX_EPOCH_SIZE_MIN 1U
+#define DRBD_MAX_EPOCH_SIZE_MAX 20000U
+#define DRBD_MAX_EPOCH_SIZE_DEF 2048U
#define DRBD_MAX_EPOCH_SIZE_SCALE '1'
/* I don't think that a tcp send buffer of more than 10M is useful */
-#define DRBD_SNDBUF_SIZE_MIN 0
-#define DRBD_SNDBUF_SIZE_MAX (10<<20)
-#define DRBD_SNDBUF_SIZE_DEF 0
+#define DRBD_SNDBUF_SIZE_MIN 0U
+#define DRBD_SNDBUF_SIZE_MAX (10U<<20)
+#define DRBD_SNDBUF_SIZE_DEF 0U
#define DRBD_SNDBUF_SIZE_SCALE '1'
-#define DRBD_RCVBUF_SIZE_MIN 0
-#define DRBD_RCVBUF_SIZE_MAX (10<<20)
-#define DRBD_RCVBUF_SIZE_DEF 0
+#define DRBD_RCVBUF_SIZE_MIN 0U
+#define DRBD_RCVBUF_SIZE_MAX (10U<<20)
+#define DRBD_RCVBUF_SIZE_DEF 0U
#define DRBD_RCVBUF_SIZE_SCALE '1'
/* @4k PageSize -> 128kB - 512MB */
-#define DRBD_MAX_BUFFERS_MIN 32
-#define DRBD_MAX_BUFFERS_MAX 131072
-#define DRBD_MAX_BUFFERS_DEF 2048
+#define DRBD_MAX_BUFFERS_MIN 32U
+#define DRBD_MAX_BUFFERS_MAX 131072U
+#define DRBD_MAX_BUFFERS_DEF 2048U
#define DRBD_MAX_BUFFERS_SCALE '1'
/* @4k PageSize -> 4kB - 512MB */
-#define DRBD_UNPLUG_WATERMARK_MIN 1
-#define DRBD_UNPLUG_WATERMARK_MAX 131072
+#define DRBD_UNPLUG_WATERMARK_MIN 1U
+#define DRBD_UNPLUG_WATERMARK_MAX 131072U
#define DRBD_UNPLUG_WATERMARK_DEF (DRBD_MAX_BUFFERS_DEF/16)
#define DRBD_UNPLUG_WATERMARK_SCALE '1'
/* 0 is disabled.
* 200 should be more than enough even for very short timeouts */
-#define DRBD_KO_COUNT_MIN 0
-#define DRBD_KO_COUNT_MAX 200
-#define DRBD_KO_COUNT_DEF 7
+#define DRBD_KO_COUNT_MIN 0U
+#define DRBD_KO_COUNT_MAX 200U
+#define DRBD_KO_COUNT_DEF 7U
#define DRBD_KO_COUNT_SCALE '1'
/* } */
/* syncer { */
/* FIXME allow rate to be zero? */
-#define DRBD_RESYNC_RATE_MIN 1
+#define DRBD_RESYNC_RATE_MIN 1U
/* channel bonding 10 GbE, or other hardware */
#define DRBD_RESYNC_RATE_MAX (4 << 20)
-#define DRBD_RESYNC_RATE_DEF 250
+#define DRBD_RESYNC_RATE_DEF 250U
#define DRBD_RESYNC_RATE_SCALE 'k' /* kilobytes */
-#define DRBD_AL_EXTENTS_MIN 67
+#define DRBD_AL_EXTENTS_MIN 67U
/* we use u16 as "slot number", (u16)~0 is "FREE".
* If you use >= 292 kB on-disk ring buffer,
* this is the maximum you can use: */
-#define DRBD_AL_EXTENTS_MAX 0xfffe
-#define DRBD_AL_EXTENTS_DEF 1237
+#define DRBD_AL_EXTENTS_MAX 0xfffeU
+#define DRBD_AL_EXTENTS_DEF 1237U
#define DRBD_AL_EXTENTS_SCALE '1'
#define DRBD_MINOR_NUMBER_MIN -1
@@ -146,9 +147,9 @@
* the upper limit with 64bit kernel, enough ram and flexible meta data
* is 1 PiB, currently. */
/* DRBD_MAX_SECTORS */
-#define DRBD_DISK_SIZE_MIN 0
-#define DRBD_DISK_SIZE_MAX (1 * (2LLU << 40))
-#define DRBD_DISK_SIZE_DEF 0 /* = disabled = no user size... */
+#define DRBD_DISK_SIZE_MIN 0LLU
+#define DRBD_DISK_SIZE_MAX (1LLU * (2LLU << 40))
+#define DRBD_DISK_SIZE_DEF 0LLU /* = disabled = no user size... */
#define DRBD_DISK_SIZE_SCALE 's' /* sectors */
#define DRBD_ON_IO_ERROR_DEF EP_DETACH
@@ -161,39 +162,39 @@
#define DRBD_ON_CONGESTION_DEF OC_BLOCK
#define DRBD_READ_BALANCING_DEF RB_PREFER_LOCAL
-#define DRBD_MAX_BIO_BVECS_MIN 0
-#define DRBD_MAX_BIO_BVECS_MAX 128
-#define DRBD_MAX_BIO_BVECS_DEF 0
+#define DRBD_MAX_BIO_BVECS_MIN 0U
+#define DRBD_MAX_BIO_BVECS_MAX 128U
+#define DRBD_MAX_BIO_BVECS_DEF 0U
#define DRBD_MAX_BIO_BVECS_SCALE '1'
-#define DRBD_C_PLAN_AHEAD_MIN 0
-#define DRBD_C_PLAN_AHEAD_MAX 300
-#define DRBD_C_PLAN_AHEAD_DEF 20
+#define DRBD_C_PLAN_AHEAD_MIN 0U
+#define DRBD_C_PLAN_AHEAD_MAX 300U
+#define DRBD_C_PLAN_AHEAD_DEF 20U
#define DRBD_C_PLAN_AHEAD_SCALE '1'
-#define DRBD_C_DELAY_TARGET_MIN 1
-#define DRBD_C_DELAY_TARGET_MAX 100
-#define DRBD_C_DELAY_TARGET_DEF 10
+#define DRBD_C_DELAY_TARGET_MIN 1U
+#define DRBD_C_DELAY_TARGET_MAX 100U
+#define DRBD_C_DELAY_TARGET_DEF 10U
#define DRBD_C_DELAY_TARGET_SCALE '1'
-#define DRBD_C_FILL_TARGET_MIN 0
-#define DRBD_C_FILL_TARGET_MAX (1<<20) /* 500MByte in sec */
-#define DRBD_C_FILL_TARGET_DEF 100 /* Try to place 50KiB in socket send buffer during resync */
+#define DRBD_C_FILL_TARGET_MIN 0U
+#define DRBD_C_FILL_TARGET_MAX (1U<<20) /* 500MByte in sec */
+#define DRBD_C_FILL_TARGET_DEF 100U /* Try to place 50KiB in socket send buffer during resync */
#define DRBD_C_FILL_TARGET_SCALE 's' /* sectors */
-#define DRBD_C_MAX_RATE_MIN 250
-#define DRBD_C_MAX_RATE_MAX (4 << 20)
-#define DRBD_C_MAX_RATE_DEF 102400
+#define DRBD_C_MAX_RATE_MIN 250U
+#define DRBD_C_MAX_RATE_MAX (4U << 20)
+#define DRBD_C_MAX_RATE_DEF 102400U
#define DRBD_C_MAX_RATE_SCALE 'k' /* kilobytes */
-#define DRBD_C_MIN_RATE_MIN 0
-#define DRBD_C_MIN_RATE_MAX (4 << 20)
-#define DRBD_C_MIN_RATE_DEF 250
+#define DRBD_C_MIN_RATE_MIN 0U
+#define DRBD_C_MIN_RATE_MAX (4U << 20)
+#define DRBD_C_MIN_RATE_DEF 250U
#define DRBD_C_MIN_RATE_SCALE 'k' /* kilobytes */
-#define DRBD_CONG_FILL_MIN 0
-#define DRBD_CONG_FILL_MAX (10<<21) /* 10GByte in sectors */
-#define DRBD_CONG_FILL_DEF 0
+#define DRBD_CONG_FILL_MIN 0U
+#define DRBD_CONG_FILL_MAX (10U<<21) /* 10GByte in sectors */
+#define DRBD_CONG_FILL_DEF 0U
#define DRBD_CONG_FILL_SCALE 's' /* sectors */
#define DRBD_CONG_EXTENTS_MIN DRBD_AL_EXTENTS_MIN
@@ -203,42 +204,48 @@
#define DRBD_PROTOCOL_DEF DRBD_PROT_C
-#define DRBD_DISK_BARRIER_DEF 0
-#define DRBD_DISK_FLUSHES_DEF 1
-#define DRBD_DISK_DRAIN_DEF 1
-#define DRBD_MD_FLUSHES_DEF 1
-#define DRBD_TCP_CORK_DEF 1
-#define DRBD_AL_UPDATES_DEF 1
+#define DRBD_DISK_BARRIER_DEF 0U
+#define DRBD_DISK_FLUSHES_DEF 1U
+#define DRBD_DISK_DRAIN_DEF 1U
+#define DRBD_MD_FLUSHES_DEF 1U
+#define DRBD_TCP_CORK_DEF 1U
+#define DRBD_AL_UPDATES_DEF 1U
+
/* We used to ignore the discard_zeroes_data setting.
* To not change established (and expected) behaviour,
* by default assume that, for discard_zeroes_data=0,
* we can make that an effective discard_zeroes_data=1,
* if we only explicitly zero-out unaligned partial chunks. */
-#define DRBD_DISCARD_ZEROES_IF_ALIGNED 1
+#define DRBD_DISCARD_ZEROES_IF_ALIGNED_DEF 1U
+
+/* Some backends pretend to support WRITE SAME,
+ * but fail such requests when they are actually submitted.
+ * This is to tell DRBD to not even try. */
+#define DRBD_DISABLE_WRITE_SAME_DEF 0U
-#define DRBD_ALLOW_TWO_PRIMARIES_DEF 0
-#define DRBD_ALWAYS_ASBP_DEF 0
-#define DRBD_USE_RLE_DEF 1
-#define DRBD_CSUMS_AFTER_CRASH_ONLY_DEF 0
+#define DRBD_ALLOW_TWO_PRIMARIES_DEF 0U
+#define DRBD_ALWAYS_ASBP_DEF 0U
+#define DRBD_USE_RLE_DEF 1U
+#define DRBD_CSUMS_AFTER_CRASH_ONLY_DEF 0U
-#define DRBD_AL_STRIPES_MIN 1
-#define DRBD_AL_STRIPES_MAX 1024
-#define DRBD_AL_STRIPES_DEF 1
+#define DRBD_AL_STRIPES_MIN 1U
+#define DRBD_AL_STRIPES_MAX 1024U
+#define DRBD_AL_STRIPES_DEF 1U
#define DRBD_AL_STRIPES_SCALE '1'
-#define DRBD_AL_STRIPE_SIZE_MIN 4
-#define DRBD_AL_STRIPE_SIZE_MAX 16777216
-#define DRBD_AL_STRIPE_SIZE_DEF 32
+#define DRBD_AL_STRIPE_SIZE_MIN 4U
+#define DRBD_AL_STRIPE_SIZE_MAX 16777216U
+#define DRBD_AL_STRIPE_SIZE_DEF 32U
#define DRBD_AL_STRIPE_SIZE_SCALE 'k' /* kilobytes */
-#define DRBD_SOCKET_CHECK_TIMEO_MIN 0
+#define DRBD_SOCKET_CHECK_TIMEO_MIN 0U
#define DRBD_SOCKET_CHECK_TIMEO_MAX DRBD_PING_TIMEO_MAX
-#define DRBD_SOCKET_CHECK_TIMEO_DEF 0
+#define DRBD_SOCKET_CHECK_TIMEO_DEF 0U
#define DRBD_SOCKET_CHECK_TIMEO_SCALE '1'
-#define DRBD_RS_DISCARD_GRANULARITY_MIN 0
-#define DRBD_RS_DISCARD_GRANULARITY_MAX (1<<20) /* 1MiByte */
-#define DRBD_RS_DISCARD_GRANULARITY_DEF 0 /* disabled by default */
+#define DRBD_RS_DISCARD_GRANULARITY_MIN 0U
+#define DRBD_RS_DISCARD_GRANULARITY_MAX (1U<<20) /* 1MiByte */
+#define DRBD_RS_DISCARD_GRANULARITY_DEF 0U /* disabled by default */
#define DRBD_RS_DISCARD_GRANULARITY_SCALE '1' /* bytes */
#endif
diff --git a/include/linux/ds2782_battery.h b/include/linux/ds2782_battery.h
index b4e281f65c15..fb6c97e10956 100644
--- a/include/linux/ds2782_battery.h
+++ b/include/linux/ds2782_battery.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __LINUX_DS2782_BATTERY_H
#define __LINUX_DS2782_BATTERY_H
diff --git a/include/linux/dsa/8021q.h b/include/linux/dsa/8021q.h
new file mode 100644
index 000000000000..d13aabdeb4b2
--- /dev/null
+++ b/include/linux/dsa/8021q.h
@@ -0,0 +1,37 @@
+/* SPDX-License-Identifier: GPL-2.0
+ * Copyright (c) 2019, Vladimir Oltean <olteanv@gmail.com>
+ */
+
+#ifndef _NET_DSA_8021Q_H
+#define _NET_DSA_8021Q_H
+
+#include <net/dsa.h>
+#include <linux/types.h>
+
+/* VBID is limited to three bits only and zero is reserved.
+ * Only 7 bridges can be enumerated.
+ */
+#define DSA_TAG_8021Q_MAX_NUM_BRIDGES 7
+
+int dsa_tag_8021q_register(struct dsa_switch *ds, __be16 proto);
+
+void dsa_tag_8021q_unregister(struct dsa_switch *ds);
+
+int dsa_tag_8021q_bridge_join(struct dsa_switch *ds, int port,
+ struct dsa_bridge bridge, bool *tx_fwd_offload,
+ struct netlink_ext_ack *extack);
+
+void dsa_tag_8021q_bridge_leave(struct dsa_switch *ds, int port,
+ struct dsa_bridge bridge);
+
+u16 dsa_tag_8021q_bridge_vid(unsigned int bridge_num);
+
+u16 dsa_tag_8021q_standalone_vid(const struct dsa_port *dp);
+
+int dsa_8021q_rx_switch_id(u16 vid);
+
+int dsa_8021q_rx_source_port(u16 vid);
+
+bool vid_is_dsa_8021q(u16 vid);
+
+#endif /* _NET_DSA_8021Q_H */
diff --git a/include/linux/dsa/brcm.h b/include/linux/dsa/brcm.h
new file mode 100644
index 000000000000..47545a948784
--- /dev/null
+++ b/include/linux/dsa/brcm.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0-only
+ * Copyright (C) 2014 Broadcom Corporation
+ */
+
+/* Included by drivers/net/ethernet/broadcom/bcmsysport.c and
+ * net/dsa/tag_brcm.c
+ */
+#ifndef _NET_DSA_BRCM_H
+#define _NET_DSA_BRCM_H
+
+/* Broadcom tag specific helpers to insert and extract queue/port number */
+#define BRCM_TAG_SET_PORT_QUEUE(p, q) ((p) << 8 | q)
+#define BRCM_TAG_GET_PORT(v) ((v) >> 8)
+#define BRCM_TAG_GET_QUEUE(v) ((v) & 0xff)
+
+#endif
diff --git a/include/linux/dsa/ksz_common.h b/include/linux/dsa/ksz_common.h
new file mode 100644
index 000000000000..576a99ca698d
--- /dev/null
+++ b/include/linux/dsa/ksz_common.h
@@ -0,0 +1,53 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Microchip switch tag common header
+ *
+ * Copyright (C) 2022 Microchip Technology Inc.
+ */
+
+#ifndef _NET_DSA_KSZ_COMMON_H_
+#define _NET_DSA_KSZ_COMMON_H_
+
+#include <net/dsa.h>
+
+/* All time stamps from the KSZ consist of 2 bits for seconds and 30 bits for
+ * nanoseconds. This is NOT the same as 32 bits for nanoseconds.
+ */
+#define KSZ_TSTAMP_SEC_MASK GENMASK(31, 30)
+#define KSZ_TSTAMP_NSEC_MASK GENMASK(29, 0)
+
+static inline ktime_t ksz_decode_tstamp(u32 tstamp)
+{
+ u64 ns = FIELD_GET(KSZ_TSTAMP_SEC_MASK, tstamp) * NSEC_PER_SEC +
+ FIELD_GET(KSZ_TSTAMP_NSEC_MASK, tstamp);
+
+ return ns_to_ktime(ns);
+}
+
+struct ksz_deferred_xmit_work {
+ struct dsa_port *dp;
+ struct sk_buff *skb;
+ struct kthread_work work;
+};
+
+struct ksz_tagger_data {
+ void (*xmit_work_fn)(struct kthread_work *work);
+ void (*hwtstamp_set_state)(struct dsa_switch *ds, bool on);
+};
+
+struct ksz_skb_cb {
+ struct sk_buff *clone;
+ unsigned int ptp_type;
+ bool update_correction;
+ u32 tstamp;
+};
+
+#define KSZ_SKB_CB(skb) \
+ ((struct ksz_skb_cb *)((skb)->cb))
+
+static inline struct ksz_tagger_data *
+ksz_tagger_data(struct dsa_switch *ds)
+{
+ return ds->tagger_data;
+}
+
+#endif /* _NET_DSA_KSZ_COMMON_H_ */
diff --git a/include/linux/dsa/lan9303.h b/include/linux/dsa/lan9303.h
new file mode 100644
index 000000000000..3ce7cbcc37a3
--- /dev/null
+++ b/include/linux/dsa/lan9303.h
@@ -0,0 +1,39 @@
+/* Included by drivers/net/dsa/lan9303.h and net/dsa/tag_lan9303.c */
+#include <linux/if_ether.h>
+
+struct lan9303;
+
+struct lan9303_phy_ops {
+ /* PHY 1 and 2 access*/
+ int (*phy_read)(struct lan9303 *chip, int addr, int regnum);
+ int (*phy_write)(struct lan9303 *chip, int addr,
+ int regnum, u16 val);
+};
+
+#define LAN9303_NUM_ALR_RECORDS 512
+struct lan9303_alr_cache_entry {
+ u8 mac_addr[ETH_ALEN];
+ u8 port_map; /* Bitmap of ports. Zero if unused entry */
+ u8 stp_override; /* non zero if set LAN9303_ALR_DAT1_AGE_OVERRID */
+};
+
+struct lan9303 {
+ struct device *dev;
+ struct regmap *regmap;
+ struct regmap_irq_chip_data *irq_data;
+ struct gpio_desc *reset_gpio;
+ u32 reset_duration; /* in [ms] */
+ int phy_addr_base;
+ struct dsa_switch *ds;
+ struct mutex indirect_mutex; /* protect indexed register access */
+ struct mutex alr_mutex; /* protect ALR access */
+ const struct lan9303_phy_ops *ops;
+ bool is_bridged; /* true if port 1 and 2 are bridged */
+
+ /* remember LAN9303_SWE_PORT_STATE while not bridged */
+ u32 swe_port_state;
+ /* LAN9303 do not offer reading specific ALR entry. Cache all
+ * static entries in a flat table
+ **/
+ struct lan9303_alr_cache_entry alr_cache[LAN9303_NUM_ALR_RECORDS];
+};
diff --git a/include/linux/dsa/loop.h b/include/linux/dsa/loop.h
new file mode 100644
index 000000000000..b8fef35591aa
--- /dev/null
+++ b/include/linux/dsa/loop.h
@@ -0,0 +1,42 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef DSA_LOOP_H
+#define DSA_LOOP_H
+
+#include <linux/if_vlan.h>
+#include <linux/types.h>
+#include <linux/ethtool.h>
+#include <net/dsa.h>
+
+struct dsa_loop_vlan {
+ u16 members;
+ u16 untagged;
+};
+
+struct dsa_loop_mib_entry {
+ char name[ETH_GSTRING_LEN];
+ unsigned long val;
+};
+
+enum dsa_loop_mib_counters {
+ DSA_LOOP_PHY_READ_OK,
+ DSA_LOOP_PHY_READ_ERR,
+ DSA_LOOP_PHY_WRITE_OK,
+ DSA_LOOP_PHY_WRITE_ERR,
+ __DSA_LOOP_CNT_MAX,
+};
+
+struct dsa_loop_port {
+ struct dsa_loop_mib_entry mib[__DSA_LOOP_CNT_MAX];
+ u16 pvid;
+ int mtu;
+};
+
+struct dsa_loop_priv {
+ struct mii_bus *bus;
+ unsigned int port_base;
+ struct dsa_loop_vlan vlans[VLAN_N_VID];
+ struct net_device *netdev;
+ struct dsa_loop_port ports[DSA_MAX_PORTS];
+};
+
+#endif /* DSA_LOOP_H */
diff --git a/include/linux/dsa/mv88e6xxx.h b/include/linux/dsa/mv88e6xxx.h
new file mode 100644
index 000000000000..8c3d45eca46b
--- /dev/null
+++ b/include/linux/dsa/mv88e6xxx.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0
+ * Copyright 2021 NXP
+ */
+
+#ifndef _NET_DSA_TAG_MV88E6XXX_H
+#define _NET_DSA_TAG_MV88E6XXX_H
+
+#include <linux/if_vlan.h>
+
+#define MV88E6XXX_VID_STANDALONE 0
+#define MV88E6XXX_VID_BRIDGED (VLAN_N_VID - 1)
+
+#endif
diff --git a/include/linux/dsa/ocelot.h b/include/linux/dsa/ocelot.h
new file mode 100644
index 000000000000..620a3260fc08
--- /dev/null
+++ b/include/linux/dsa/ocelot.h
@@ -0,0 +1,324 @@
+/* SPDX-License-Identifier: GPL-2.0
+ * Copyright 2019-2021 NXP
+ */
+
+#ifndef _NET_DSA_TAG_OCELOT_H
+#define _NET_DSA_TAG_OCELOT_H
+
+#include <linux/if_bridge.h>
+#include <linux/if_vlan.h>
+#include <linux/kthread.h>
+#include <linux/packing.h>
+#include <linux/skbuff.h>
+#include <net/dsa.h>
+
+struct ocelot_skb_cb {
+ struct sk_buff *clone;
+ unsigned int ptp_class; /* valid only for clones */
+ unsigned long ptp_tx_time; /* valid only for clones */
+ u32 tstamp_lo;
+ u8 ptp_cmd;
+ u8 ts_id;
+};
+
+#define OCELOT_SKB_CB(skb) \
+ ((struct ocelot_skb_cb *)((skb)->cb))
+
+#define IFH_TAG_TYPE_C 0
+#define IFH_TAG_TYPE_S 1
+
+#define IFH_REW_OP_NOOP 0x0
+#define IFH_REW_OP_DSCP 0x1
+#define IFH_REW_OP_ONE_STEP_PTP 0x2
+#define IFH_REW_OP_TWO_STEP_PTP 0x3
+#define IFH_REW_OP_ORIGIN_PTP 0x5
+
+#define OCELOT_TAG_LEN 16
+#define OCELOT_SHORT_PREFIX_LEN 4
+#define OCELOT_LONG_PREFIX_LEN 16
+#define OCELOT_TOTAL_TAG_LEN (OCELOT_SHORT_PREFIX_LEN + OCELOT_TAG_LEN)
+
+/* The CPU injection header and the CPU extraction header can have 3 types of
+ * prefixes: long, short and no prefix. The format of the header itself is the
+ * same in all 3 cases.
+ *
+ * Extraction with long prefix:
+ *
+ * +-------------------+-------------------+------+------+------------+-------+
+ * | ff:ff:ff:ff:ff:ff | fe:ff:ff:ff:ff:ff | 8880 | 000a | extraction | frame |
+ * | | | | | header | |
+ * +-------------------+-------------------+------+------+------------+-------+
+ * 48 bits 48 bits 16 bits 16 bits 128 bits
+ *
+ * Extraction with short prefix:
+ *
+ * +------+------+------------+-------+
+ * | 8880 | 000a | extraction | frame |
+ * | | | header | |
+ * +------+------+------------+-------+
+ * 16 bits 16 bits 128 bits
+ *
+ * Extraction with no prefix:
+ *
+ * +------------+-------+
+ * | extraction | frame |
+ * | header | |
+ * +------------+-------+
+ * 128 bits
+ *
+ *
+ * Injection with long prefix:
+ *
+ * +-------------------+-------------------+------+------+------------+-------+
+ * | any dmac | any smac | 8880 | 000a | injection | frame |
+ * | | | | | header | |
+ * +-------------------+-------------------+------+------+------------+-------+
+ * 48 bits 48 bits 16 bits 16 bits 128 bits
+ *
+ * Injection with short prefix:
+ *
+ * +------+------+------------+-------+
+ * | 8880 | 000a | injection | frame |
+ * | | | header | |
+ * +------+------+------------+-------+
+ * 16 bits 16 bits 128 bits
+ *
+ * Injection with no prefix:
+ *
+ * +------------+-------+
+ * | injection | frame |
+ * | header | |
+ * +------------+-------+
+ * 128 bits
+ *
+ * The injection header looks like this (network byte order, bit 127
+ * is part of lowest address byte in memory, bit 0 is part of highest
+ * address byte):
+ *
+ * +------+------+------+------+------+------+------+------+
+ * 127:120 |BYPASS| MASQ | MASQ_PORT |REW_OP|REW_OP|
+ * +------+------+------+------+------+------+------+------+
+ * 119:112 | REW_OP |
+ * +------+------+------+------+------+------+------+------+
+ * 111:104 | REW_VAL |
+ * +------+------+------+------+------+------+------+------+
+ * 103: 96 | REW_VAL |
+ * +------+------+------+------+------+------+------+------+
+ * 95: 88 | REW_VAL |
+ * +------+------+------+------+------+------+------+------+
+ * 87: 80 | REW_VAL |
+ * +------+------+------+------+------+------+------+------+
+ * 79: 72 | RSV |
+ * +------+------+------+------+------+------+------+------+
+ * 71: 64 | RSV | DEST |
+ * +------+------+------+------+------+------+------+------+
+ * 63: 56 | DEST |
+ * +------+------+------+------+------+------+------+------+
+ * 55: 48 | RSV |
+ * +------+------+------+------+------+------+------+------+
+ * 47: 40 | RSV | SRC_PORT | RSV |TFRM_TIMER|
+ * +------+------+------+------+------+------+------+------+
+ * 39: 32 | TFRM_TIMER | RSV |
+ * +------+------+------+------+------+------+------+------+
+ * 31: 24 | RSV | DP | POP_CNT | CPUQ |
+ * +------+------+------+------+------+------+------+------+
+ * 23: 16 | CPUQ | QOS_CLASS |TAG_TYPE|
+ * +------+------+------+------+------+------+------+------+
+ * 15: 8 | PCP | DEI | VID |
+ * +------+------+------+------+------+------+------+------+
+ * 7: 0 | VID |
+ * +------+------+------+------+------+------+------+------+
+ *
+ * And the extraction header looks like this:
+ *
+ * +------+------+------+------+------+------+------+------+
+ * 127:120 | RSV | REW_OP |
+ * +------+------+------+------+------+------+------+------+
+ * 119:112 | REW_OP | REW_VAL |
+ * +------+------+------+------+------+------+------+------+
+ * 111:104 | REW_VAL |
+ * +------+------+------+------+------+------+------+------+
+ * 103: 96 | REW_VAL |
+ * +------+------+------+------+------+------+------+------+
+ * 95: 88 | REW_VAL |
+ * +------+------+------+------+------+------+------+------+
+ * 87: 80 | REW_VAL | LLEN |
+ * +------+------+------+------+------+------+------+------+
+ * 79: 72 | LLEN | WLEN |
+ * +------+------+------+------+------+------+------+------+
+ * 71: 64 | WLEN | RSV |
+ * +------+------+------+------+------+------+------+------+
+ * 63: 56 | RSV |
+ * +------+------+------+------+------+------+------+------+
+ * 55: 48 | RSV |
+ * +------+------+------+------+------+------+------+------+
+ * 47: 40 | RSV | SRC_PORT | ACL_ID |
+ * +------+------+------+------+------+------+------+------+
+ * 39: 32 | ACL_ID | RSV | SFLOW_ID |
+ * +------+------+------+------+------+------+------+------+
+ * 31: 24 |ACL_HIT| DP | LRN_FLAGS | CPUQ |
+ * +------+------+------+------+------+------+------+------+
+ * 23: 16 | CPUQ | QOS_CLASS |TAG_TYPE|
+ * +------+------+------+------+------+------+------+------+
+ * 15: 8 | PCP | DEI | VID |
+ * +------+------+------+------+------+------+------+------+
+ * 7: 0 | VID |
+ * +------+------+------+------+------+------+------+------+
+ */
+
+struct felix_deferred_xmit_work {
+ struct dsa_port *dp;
+ struct sk_buff *skb;
+ struct kthread_work work;
+};
+
+struct ocelot_8021q_tagger_data {
+ void (*xmit_work_fn)(struct kthread_work *work);
+};
+
+static inline struct ocelot_8021q_tagger_data *
+ocelot_8021q_tagger_data(struct dsa_switch *ds)
+{
+ BUG_ON(ds->dst->tag_ops->proto != DSA_TAG_PROTO_OCELOT_8021Q);
+
+ return ds->tagger_data;
+}
+
+static inline void ocelot_xfh_get_rew_val(void *extraction, u64 *rew_val)
+{
+ packing(extraction, rew_val, 116, 85, OCELOT_TAG_LEN, UNPACK, 0);
+}
+
+static inline void ocelot_xfh_get_len(void *extraction, u64 *len)
+{
+ u64 llen, wlen;
+
+ packing(extraction, &llen, 84, 79, OCELOT_TAG_LEN, UNPACK, 0);
+ packing(extraction, &wlen, 78, 71, OCELOT_TAG_LEN, UNPACK, 0);
+
+ *len = 60 * wlen + llen - 80;
+}
+
+static inline void ocelot_xfh_get_src_port(void *extraction, u64 *src_port)
+{
+ packing(extraction, src_port, 46, 43, OCELOT_TAG_LEN, UNPACK, 0);
+}
+
+static inline void ocelot_xfh_get_qos_class(void *extraction, u64 *qos_class)
+{
+ packing(extraction, qos_class, 19, 17, OCELOT_TAG_LEN, UNPACK, 0);
+}
+
+static inline void ocelot_xfh_get_tag_type(void *extraction, u64 *tag_type)
+{
+ packing(extraction, tag_type, 16, 16, OCELOT_TAG_LEN, UNPACK, 0);
+}
+
+static inline void ocelot_xfh_get_vlan_tci(void *extraction, u64 *vlan_tci)
+{
+ packing(extraction, vlan_tci, 15, 0, OCELOT_TAG_LEN, UNPACK, 0);
+}
+
+static inline void ocelot_ifh_set_bypass(void *injection, u64 bypass)
+{
+ packing(injection, &bypass, 127, 127, OCELOT_TAG_LEN, PACK, 0);
+}
+
+static inline void ocelot_ifh_set_rew_op(void *injection, u64 rew_op)
+{
+ packing(injection, &rew_op, 125, 117, OCELOT_TAG_LEN, PACK, 0);
+}
+
+static inline void ocelot_ifh_set_dest(void *injection, u64 dest)
+{
+ packing(injection, &dest, 67, 56, OCELOT_TAG_LEN, PACK, 0);
+}
+
+static inline void ocelot_ifh_set_qos_class(void *injection, u64 qos_class)
+{
+ packing(injection, &qos_class, 19, 17, OCELOT_TAG_LEN, PACK, 0);
+}
+
+static inline void seville_ifh_set_dest(void *injection, u64 dest)
+{
+ packing(injection, &dest, 67, 57, OCELOT_TAG_LEN, PACK, 0);
+}
+
+static inline void ocelot_ifh_set_src(void *injection, u64 src)
+{
+ packing(injection, &src, 46, 43, OCELOT_TAG_LEN, PACK, 0);
+}
+
+static inline void ocelot_ifh_set_tag_type(void *injection, u64 tag_type)
+{
+ packing(injection, &tag_type, 16, 16, OCELOT_TAG_LEN, PACK, 0);
+}
+
+static inline void ocelot_ifh_set_vlan_tci(void *injection, u64 vlan_tci)
+{
+ packing(injection, &vlan_tci, 15, 0, OCELOT_TAG_LEN, PACK, 0);
+}
+
+/* Determine the PTP REW_OP to use for injecting the given skb */
+static inline u32 ocelot_ptp_rew_op(struct sk_buff *skb)
+{
+ struct sk_buff *clone = OCELOT_SKB_CB(skb)->clone;
+ u8 ptp_cmd = OCELOT_SKB_CB(skb)->ptp_cmd;
+ u32 rew_op = 0;
+
+ if (ptp_cmd == IFH_REW_OP_TWO_STEP_PTP && clone) {
+ rew_op = ptp_cmd;
+ rew_op |= OCELOT_SKB_CB(clone)->ts_id << 3;
+ } else if (ptp_cmd == IFH_REW_OP_ORIGIN_PTP) {
+ rew_op = ptp_cmd;
+ }
+
+ return rew_op;
+}
+
+/**
+ * ocelot_xmit_get_vlan_info: Determine VLAN_TCI and TAG_TYPE for injected frame
+ * @skb: Pointer to socket buffer
+ * @br: Pointer to bridge device that the port is under, if any
+ * @vlan_tci:
+ * @tag_type:
+ *
+ * If the port is under a VLAN-aware bridge, remove the VLAN header from the
+ * payload and move it into the DSA tag, which will make the switch classify
+ * the packet to the bridge VLAN. Otherwise, leave the classified VLAN at zero,
+ * which is the pvid of standalone ports (OCELOT_STANDALONE_PVID), although not
+ * of VLAN-unaware bridge ports (that would be ocelot_vlan_unaware_pvid()).
+ * Anyway, VID 0 is fine because it is stripped on egress for these port modes,
+ * and source address learning is not performed for packets injected from the
+ * CPU anyway, so it doesn't matter that the VID is "wrong".
+ */
+static inline void ocelot_xmit_get_vlan_info(struct sk_buff *skb,
+ struct net_device *br,
+ u64 *vlan_tci, u64 *tag_type)
+{
+ struct vlan_ethhdr *hdr;
+ u16 proto, tci;
+
+ if (!br || !br_vlan_enabled(br)) {
+ *vlan_tci = 0;
+ *tag_type = IFH_TAG_TYPE_C;
+ return;
+ }
+
+ hdr = (struct vlan_ethhdr *)skb_mac_header(skb);
+ br_vlan_get_proto(br, &proto);
+
+ if (ntohs(hdr->h_vlan_proto) == proto) {
+ vlan_remove_tag(skb, &tci);
+ *vlan_tci = tci;
+ } else {
+ rcu_read_lock();
+ br_vlan_get_pvid_rcu(br, &tci);
+ rcu_read_unlock();
+ *vlan_tci = tci;
+ }
+
+ *tag_type = (proto != ETH_P_8021Q) ? IFH_TAG_TYPE_S : IFH_TAG_TYPE_C;
+}
+
+#endif
diff --git a/include/linux/dsa/sja1105.h b/include/linux/dsa/sja1105.h
new file mode 100644
index 000000000000..b9dd35d4b8f5
--- /dev/null
+++ b/include/linux/dsa/sja1105.h
@@ -0,0 +1,75 @@
+/* SPDX-License-Identifier: GPL-2.0
+ * Copyright (c) 2019, Vladimir Oltean <olteanv@gmail.com>
+ */
+
+/* Included by drivers/net/dsa/sja1105/sja1105.h and net/dsa/tag_sja1105.c */
+
+#ifndef _NET_DSA_SJA1105_H
+#define _NET_DSA_SJA1105_H
+
+#include <linux/skbuff.h>
+#include <linux/etherdevice.h>
+#include <linux/dsa/8021q.h>
+#include <net/dsa.h>
+
+#define ETH_P_SJA1105 ETH_P_DSA_8021Q
+#define ETH_P_SJA1105_META 0x0008
+#define ETH_P_SJA1110 0xdadc
+
+#define SJA1105_DEFAULT_VLAN (VLAN_N_VID - 1)
+
+/* IEEE 802.3 Annex 57A: Slow Protocols PDUs (01:80:C2:xx:xx:xx) */
+#define SJA1105_LINKLOCAL_FILTER_A 0x0180C2000000ull
+#define SJA1105_LINKLOCAL_FILTER_A_MASK 0xFFFFFF000000ull
+/* IEEE 1588 Annex F: Transport of PTP over Ethernet (01:1B:19:xx:xx:xx) */
+#define SJA1105_LINKLOCAL_FILTER_B 0x011B19000000ull
+#define SJA1105_LINKLOCAL_FILTER_B_MASK 0xFFFFFF000000ull
+
+/* Source and Destination MAC of follow-up meta frames.
+ * Whereas the choice of SMAC only affects the unique identification of the
+ * switch as sender of meta frames, the DMAC must be an address that is present
+ * in the DSA conduit port's multicast MAC filter.
+ * 01-80-C2-00-00-0E is a good choice for this, as all profiles of IEEE 1588
+ * over L2 use this address for some purpose already.
+ */
+#define SJA1105_META_SMAC 0x222222222222ull
+#define SJA1105_META_DMAC 0x0180C200000Eull
+
+enum sja1110_meta_tstamp {
+ SJA1110_META_TSTAMP_TX = 0,
+ SJA1110_META_TSTAMP_RX = 1,
+};
+
+struct sja1105_deferred_xmit_work {
+ struct dsa_port *dp;
+ struct sk_buff *skb;
+ struct kthread_work work;
+};
+
+/* Global tagger data */
+struct sja1105_tagger_data {
+ void (*xmit_work_fn)(struct kthread_work *work);
+ void (*meta_tstamp_handler)(struct dsa_switch *ds, int port, u8 ts_id,
+ enum sja1110_meta_tstamp dir, u64 tstamp);
+};
+
+struct sja1105_skb_cb {
+ struct sk_buff *clone;
+ u64 tstamp;
+ /* Only valid for packets cloned for 2-step TX timestamping */
+ u8 ts_id;
+};
+
+#define SJA1105_SKB_CB(skb) \
+ ((struct sja1105_skb_cb *)((skb)->cb))
+
+static inline struct sja1105_tagger_data *
+sja1105_tagger_data(struct dsa_switch *ds)
+{
+ BUG_ON(ds->dst->tag_ops->proto != DSA_TAG_PROTO_SJA1105 &&
+ ds->dst->tag_ops->proto != DSA_TAG_PROTO_SJA1110);
+
+ return ds->tagger_data;
+}
+
+#endif /* _NET_DSA_SJA1105_H */
diff --git a/include/linux/dsa/tag_qca.h b/include/linux/dsa/tag_qca.h
new file mode 100644
index 000000000000..ee657452f122
--- /dev/null
+++ b/include/linux/dsa/tag_qca.h
@@ -0,0 +1,87 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __TAG_QCA_H
+#define __TAG_QCA_H
+
+#include <linux/types.h>
+
+struct dsa_switch;
+struct sk_buff;
+
+#define QCA_HDR_LEN 2
+#define QCA_HDR_VERSION 0x2
+
+#define QCA_HDR_RECV_VERSION GENMASK(15, 14)
+#define QCA_HDR_RECV_PRIORITY GENMASK(13, 11)
+#define QCA_HDR_RECV_TYPE GENMASK(10, 6)
+#define QCA_HDR_RECV_FRAME_IS_TAGGED BIT(3)
+#define QCA_HDR_RECV_SOURCE_PORT GENMASK(2, 0)
+
+/* Packet type for recv */
+#define QCA_HDR_RECV_TYPE_NORMAL 0x0
+#define QCA_HDR_RECV_TYPE_MIB 0x1
+#define QCA_HDR_RECV_TYPE_RW_REG_ACK 0x2
+
+#define QCA_HDR_XMIT_VERSION GENMASK(15, 14)
+#define QCA_HDR_XMIT_PRIORITY GENMASK(13, 11)
+#define QCA_HDR_XMIT_CONTROL GENMASK(10, 8)
+#define QCA_HDR_XMIT_FROM_CPU BIT(7)
+#define QCA_HDR_XMIT_DP_BIT GENMASK(6, 0)
+
+/* Packet type for xmit */
+#define QCA_HDR_XMIT_TYPE_NORMAL 0x0
+#define QCA_HDR_XMIT_TYPE_RW_REG 0x1
+
+/* Check code for a valid mgmt packet. Switch will ignore the packet
+ * with this wrong.
+ */
+#define QCA_HDR_MGMT_CHECK_CODE_VAL 0x5
+
+/* Specific define for in-band MDIO read/write with Ethernet packet */
+#define QCA_HDR_MGMT_SEQ_LEN 4 /* 4 byte for the seq */
+#define QCA_HDR_MGMT_COMMAND_LEN 4 /* 4 byte for the command */
+#define QCA_HDR_MGMT_DATA1_LEN 4 /* First 4 byte for the mdio data */
+#define QCA_HDR_MGMT_HEADER_LEN (QCA_HDR_MGMT_SEQ_LEN + \
+ QCA_HDR_MGMT_COMMAND_LEN + \
+ QCA_HDR_MGMT_DATA1_LEN)
+
+#define QCA_HDR_MGMT_DATA2_LEN 28 /* Other 28 byte for the mdio data */
+#define QCA_HDR_MGMT_PADDING_LEN 18 /* Padding to reach the min Ethernet packet */
+
+#define QCA_HDR_MGMT_PKT_LEN (QCA_HDR_MGMT_HEADER_LEN + \
+ QCA_HDR_LEN + \
+ QCA_HDR_MGMT_DATA2_LEN + \
+ QCA_HDR_MGMT_PADDING_LEN)
+
+#define QCA_HDR_MGMT_SEQ_NUM GENMASK(31, 0) /* 63, 32 */
+#define QCA_HDR_MGMT_CHECK_CODE GENMASK(31, 29) /* 31, 29 */
+#define QCA_HDR_MGMT_CMD BIT(28) /* 28 */
+#define QCA_HDR_MGMT_LENGTH GENMASK(23, 20) /* 23, 20 */
+#define QCA_HDR_MGMT_ADDR GENMASK(18, 0) /* 18, 0 */
+
+/* Special struct emulating a Ethernet header */
+struct qca_mgmt_ethhdr {
+ __le32 command; /* command bit 31:0 */
+ __le32 seq; /* seq 63:32 */
+ __le32 mdio_data; /* first 4byte mdio */
+ __be16 hdr; /* qca hdr */
+} __packed;
+
+enum mdio_cmd {
+ MDIO_WRITE = 0x0,
+ MDIO_READ
+};
+
+struct mib_ethhdr {
+ __le32 data[3]; /* first 3 mib counter */
+ __be16 hdr; /* qca hdr */
+} __packed;
+
+struct qca_tagger_data {
+ void (*rw_reg_ack_handler)(struct dsa_switch *ds,
+ struct sk_buff *skb);
+ void (*mib_autocast_handler)(struct dsa_switch *ds,
+ struct sk_buff *skb);
+};
+
+#endif /* __TAG_QCA_H */
diff --git a/include/linux/dtlk.h b/include/linux/dtlk.h
index 22a7b9a5f5d1..27b95e70bde3 100644
--- a/include/linux/dtlk.h
+++ b/include/linux/dtlk.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#define DTLK_MINOR 0
#define DTLK_IO_EXTENT 0x02
diff --git a/include/linux/dtpm.h b/include/linux/dtpm.h
new file mode 100644
index 000000000000..a4a13514b730
--- /dev/null
+++ b/include/linux/dtpm.h
@@ -0,0 +1,73 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2020 Linaro Ltd
+ *
+ * Author: Daniel Lezcano <daniel.lezcano@linaro.org>
+ */
+#ifndef ___DTPM_H__
+#define ___DTPM_H__
+
+#include <linux/powercap.h>
+
+#define MAX_DTPM_DESCR 8
+#define MAX_DTPM_CONSTRAINTS 1
+
+struct dtpm {
+ struct powercap_zone zone;
+ struct dtpm *parent;
+ struct list_head sibling;
+ struct list_head children;
+ struct dtpm_ops *ops;
+ unsigned long flags;
+ u64 power_limit;
+ u64 power_max;
+ u64 power_min;
+ int weight;
+};
+
+struct dtpm_ops {
+ u64 (*set_power_uw)(struct dtpm *, u64);
+ u64 (*get_power_uw)(struct dtpm *);
+ int (*update_power_uw)(struct dtpm *);
+ void (*release)(struct dtpm *);
+};
+
+struct device_node;
+
+struct dtpm_subsys_ops {
+ const char *name;
+ int (*init)(void);
+ void (*exit)(void);
+ int (*setup)(struct dtpm *, struct device_node *);
+};
+
+enum DTPM_NODE_TYPE {
+ DTPM_NODE_VIRTUAL = 0,
+ DTPM_NODE_DT,
+};
+
+struct dtpm_node {
+ enum DTPM_NODE_TYPE type;
+ const char *name;
+ struct dtpm_node *parent;
+};
+
+static inline struct dtpm *to_dtpm(struct powercap_zone *zone)
+{
+ return container_of(zone, struct dtpm, zone);
+}
+
+int dtpm_update_power(struct dtpm *dtpm);
+
+int dtpm_release_zone(struct powercap_zone *pcz);
+
+void dtpm_init(struct dtpm *dtpm, struct dtpm_ops *ops);
+
+void dtpm_unregister(struct dtpm *dtpm);
+
+int dtpm_register(const char *name, struct dtpm *dtpm, struct dtpm *parent);
+
+int dtpm_create_hierarchy(struct of_device_id *dtpm_match_table);
+
+void dtpm_destroy_hierarchy(void);
+#endif
diff --git a/include/linux/dw_apb_timer.h b/include/linux/dw_apb_timer.h
index 4334106f44c3..f8811c46b89e 100644
--- a/include/linux/dw_apb_timer.h
+++ b/include/linux/dw_apb_timer.h
@@ -1,13 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* (C) Copyright 2009 Intel Corporation
* Author: Jacob Pan (jacob.jun.pan@intel.com)
*
* Shared with ARM platforms, Jamie Iles, Picochip 2011
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
* Support for the Synopsys DesignWare APB Timers.
*/
#ifndef __DW_APB_TIMER_H__
@@ -28,7 +25,6 @@ struct dw_apb_timer {
struct dw_apb_clock_event_device {
struct clock_event_device ced;
struct dw_apb_timer timer;
- struct irqaction irqaction;
void (*eoi)(struct dw_apb_timer *);
};
@@ -38,9 +34,6 @@ struct dw_apb_clocksource {
};
void dw_apb_clockevent_register(struct dw_apb_clock_event_device *dw_ced);
-void dw_apb_clockevent_pause(struct dw_apb_clock_event_device *dw_ced);
-void dw_apb_clockevent_resume(struct dw_apb_clock_event_device *dw_ced);
-void dw_apb_clockevent_stop(struct dw_apb_clock_event_device *dw_ced);
struct dw_apb_clock_event_device *
dw_apb_clockevent_init(int cpu, const char *name, unsigned rating,
diff --git a/include/linux/dynamic_debug.h b/include/linux/dynamic_debug.h
index 546d68057e3b..05743900a116 100644
--- a/include/linux/dynamic_debug.h
+++ b/include/linux/dynamic_debug.h
@@ -1,10 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _DYNAMIC_DEBUG_H
#define _DYNAMIC_DEBUG_H
-#if defined(CC_HAVE_ASM_GOTO) && defined(CONFIG_JUMP_LABEL)
+#if defined(CONFIG_JUMP_LABEL)
#include <linux/jump_label.h>
#endif
+#include <linux/build_bug.h>
+
/*
* An instance of this structure is created in a special
* ELF section at every dynamic debug callsite. At runtime,
@@ -20,6 +23,9 @@ struct _ddebug {
const char *filename;
const char *format;
unsigned int lineno:18;
+#define CLS_BITS 6
+ unsigned int class_id:CLS_BITS;
+#define _DPRINTK_CLASS_DFLT ((1 << CLS_BITS) - 1)
/*
* The flags field controls the behaviour at the callsite.
* The bits here are changed dynamically when the user
@@ -31,13 +37,21 @@ struct _ddebug {
#define _DPRINTK_FLAGS_INCL_FUNCNAME (1<<2)
#define _DPRINTK_FLAGS_INCL_LINENO (1<<3)
#define _DPRINTK_FLAGS_INCL_TID (1<<4)
+#define _DPRINTK_FLAGS_INCL_SOURCENAME (1<<5)
+#define _DPRINTK_FLAGS_INCL_STACK (1<<6)
+
+#define _DPRINTK_FLAGS_INCL_ANY \
+ (_DPRINTK_FLAGS_INCL_MODNAME | _DPRINTK_FLAGS_INCL_FUNCNAME |\
+ _DPRINTK_FLAGS_INCL_LINENO | _DPRINTK_FLAGS_INCL_TID |\
+ _DPRINTK_FLAGS_INCL_SOURCENAME | _DPRINTK_FLAGS_INCL_STACK)
+
#if defined DEBUG
#define _DPRINTK_FLAGS_DEFAULT _DPRINTK_FLAGS_PRINT
#else
#define _DPRINTK_FLAGS_DEFAULT 0
#endif
unsigned int flags:8;
-#ifdef HAVE_JUMP_LABEL
+#ifdef CONFIG_JUMP_LABEL
union {
struct static_key_true dd_key_true;
struct static_key_false dd_key_false;
@@ -45,18 +59,88 @@ struct _ddebug {
#endif
} __attribute__((aligned(8)));
+enum class_map_type {
+ DD_CLASS_TYPE_DISJOINT_BITS,
+ /**
+ * DD_CLASS_TYPE_DISJOINT_BITS: classes are independent, one per bit.
+ * expecting hex input. Built for drm.debug, basis for other types.
+ */
+ DD_CLASS_TYPE_LEVEL_NUM,
+ /**
+ * DD_CLASS_TYPE_LEVEL_NUM: input is numeric level, 0-N.
+ * N turns on just bits N-1 .. 0, so N=0 turns all bits off.
+ */
+ DD_CLASS_TYPE_DISJOINT_NAMES,
+ /**
+ * DD_CLASS_TYPE_DISJOINT_NAMES: input is a CSV of [+-]CLASS_NAMES,
+ * classes are independent, like _DISJOINT_BITS.
+ */
+ DD_CLASS_TYPE_LEVEL_NAMES,
+ /**
+ * DD_CLASS_TYPE_LEVEL_NAMES: input is a CSV of [+-]CLASS_NAMES,
+ * intended for names like: INFO,DEBUG,TRACE, with a module prefix
+ * avoid EMERG,ALERT,CRIT,ERR,WARNING: they're not debug
+ */
+};
-int ddebug_add_module(struct _ddebug *tab, unsigned int n,
- const char *modname);
+struct ddebug_class_map {
+ struct list_head link;
+ struct module *mod;
+ const char *mod_name; /* needed for builtins */
+ const char **class_names;
+ const int length;
+ const int base; /* index of 1st .class_id, allows split/shared space */
+ enum class_map_type map_type;
+};
+
+/**
+ * DECLARE_DYNDBG_CLASSMAP - declare classnames known by a module
+ * @_var: a struct ddebug_class_map, passed to module_param_cb
+ * @_type: enum class_map_type, chooses bits/verbose, numeric/symbolic
+ * @_base: offset of 1st class-name. splits .class_id space
+ * @classes: class-names used to control class'd prdbgs
+ */
+#define DECLARE_DYNDBG_CLASSMAP(_var, _maptype, _base, ...) \
+ static const char *_var##_classnames[] = { __VA_ARGS__ }; \
+ static struct ddebug_class_map __aligned(8) __used \
+ __section("__dyndbg_classes") _var = { \
+ .mod = THIS_MODULE, \
+ .mod_name = KBUILD_MODNAME, \
+ .base = _base, \
+ .map_type = _maptype, \
+ .length = NUM_TYPE_ARGS(char*, __VA_ARGS__), \
+ .class_names = _var##_classnames, \
+ }
+#define NUM_TYPE_ARGS(eltype, ...) \
+ (sizeof((eltype[]){__VA_ARGS__}) / sizeof(eltype))
+
+/* encapsulate linker provided built-in (or module) dyndbg data */
+struct _ddebug_info {
+ struct _ddebug *descs;
+ struct ddebug_class_map *classes;
+ unsigned int num_descs;
+ unsigned int num_classes;
+};
+
+struct ddebug_class_param {
+ union {
+ unsigned long *bits;
+ unsigned int *lvl;
+ };
+ char flags[8];
+ const struct ddebug_class_map *map;
+};
+
+/*
+ * pr_debug() and friends are globally enabled or modules have selectively
+ * enabled them.
+ */
+#if defined(CONFIG_DYNAMIC_DEBUG) || \
+ (defined(CONFIG_DYNAMIC_DEBUG_CORE) && defined(DYNAMIC_DEBUG_MODULE))
-#if defined(CONFIG_DYNAMIC_DEBUG)
-extern int ddebug_remove_module(const char *mod_name);
extern __printf(2, 3)
void __dynamic_pr_debug(struct _ddebug *descriptor, const char *fmt, ...);
-extern int ddebug_dyndbg_module_param_cb(char *param, char *val,
- const char *modname);
-
struct device;
extern __printf(3, 4)
@@ -70,44 +154,55 @@ void __dynamic_netdev_dbg(struct _ddebug *descriptor,
const struct net_device *dev,
const char *fmt, ...);
-#define DEFINE_DYNAMIC_DEBUG_METADATA_KEY(name, fmt, key, init) \
+struct ib_device;
+
+extern __printf(3, 4)
+void __dynamic_ibdev_dbg(struct _ddebug *descriptor,
+ const struct ib_device *ibdev,
+ const char *fmt, ...);
+
+#define __dynamic_dump_stack(desc) \
+{ \
+ if (desc.flags & _DPRINTK_FLAGS_INCL_STACK) \
+ dump_stack(); \
+}
+
+#define DEFINE_DYNAMIC_DEBUG_METADATA_CLS(name, cls, fmt) \
static struct _ddebug __aligned(8) \
- __attribute__((section("__verbose"))) name = { \
+ __section("__dyndbg") name = { \
.modname = KBUILD_MODNAME, \
.function = __func__, \
.filename = __FILE__, \
.format = (fmt), \
.lineno = __LINE__, \
.flags = _DPRINTK_FLAGS_DEFAULT, \
- dd_key_init(key, init) \
- }
+ .class_id = cls, \
+ _DPRINTK_KEY_INIT \
+ }; \
+ BUILD_BUG_ON_MSG(cls > _DPRINTK_CLASS_DFLT, \
+ "classid value overflow")
-#ifdef HAVE_JUMP_LABEL
+#define DEFINE_DYNAMIC_DEBUG_METADATA(name, fmt) \
+ DEFINE_DYNAMIC_DEBUG_METADATA_CLS(name, _DPRINTK_CLASS_DFLT, fmt)
-#define dd_key_init(key, init) key = (init)
+#ifdef CONFIG_JUMP_LABEL
#ifdef DEBUG
-#define DEFINE_DYNAMIC_DEBUG_METADATA(name, fmt) \
- DEFINE_DYNAMIC_DEBUG_METADATA_KEY(name, fmt, .key.dd_key_true, \
- (STATIC_KEY_TRUE_INIT))
+
+#define _DPRINTK_KEY_INIT .key.dd_key_true = (STATIC_KEY_TRUE_INIT)
#define DYNAMIC_DEBUG_BRANCH(descriptor) \
static_branch_likely(&descriptor.key.dd_key_true)
#else
-#define DEFINE_DYNAMIC_DEBUG_METADATA(name, fmt) \
- DEFINE_DYNAMIC_DEBUG_METADATA_KEY(name, fmt, .key.dd_key_false, \
- (STATIC_KEY_FALSE_INIT))
+#define _DPRINTK_KEY_INIT .key.dd_key_false = (STATIC_KEY_FALSE_INIT)
#define DYNAMIC_DEBUG_BRANCH(descriptor) \
static_branch_unlikely(&descriptor.key.dd_key_false)
#endif
-#else
-
-#define dd_key_init(key, init)
+#else /* !CONFIG_JUMP_LABEL */
-#define DEFINE_DYNAMIC_DEBUG_METADATA(name, fmt) \
- DEFINE_DYNAMIC_DEBUG_METADATA_KEY(name, fmt, 0, 0)
+#define _DPRINTK_KEY_INIT
#ifdef DEBUG
#define DYNAMIC_DEBUG_BRANCH(descriptor) \
@@ -117,57 +212,137 @@ void __dynamic_netdev_dbg(struct _ddebug *descriptor,
unlikely(descriptor.flags & _DPRINTK_FLAGS_PRINT)
#endif
-#endif
+#endif /* CONFIG_JUMP_LABEL */
-#define dynamic_pr_debug(fmt, ...) \
-do { \
- DEFINE_DYNAMIC_DEBUG_METADATA(descriptor, fmt); \
- if (DYNAMIC_DEBUG_BRANCH(descriptor)) \
- __dynamic_pr_debug(&descriptor, pr_fmt(fmt), \
- ##__VA_ARGS__); \
+/*
+ * Factory macros: ($prefix)dynamic_func_call($suffix)
+ *
+ * Lower layer (with __ prefix) gets the callsite metadata, and wraps
+ * the func inside a debug-branch/static-key construct. Upper layer
+ * (with _ prefix) does the UNIQUE_ID once, so that lower can ref the
+ * name/label multiple times, and tie the elements together.
+ * Multiple flavors:
+ * (|_cls): adds in _DPRINT_CLASS_DFLT as needed
+ * (|_no_desc): former gets callsite descriptor as 1st arg (for prdbgs)
+ */
+#define __dynamic_func_call_cls(id, cls, fmt, func, ...) do { \
+ DEFINE_DYNAMIC_DEBUG_METADATA_CLS(id, cls, fmt); \
+ if (DYNAMIC_DEBUG_BRANCH(id)) { \
+ func(&id, ##__VA_ARGS__); \
+ __dynamic_dump_stack(id); \
+ } \
} while (0)
+#define __dynamic_func_call(id, fmt, func, ...) \
+ __dynamic_func_call_cls(id, _DPRINTK_CLASS_DFLT, fmt, \
+ func, ##__VA_ARGS__)
-#define dynamic_dev_dbg(dev, fmt, ...) \
-do { \
- DEFINE_DYNAMIC_DEBUG_METADATA(descriptor, fmt); \
- if (DYNAMIC_DEBUG_BRANCH(descriptor)) \
- __dynamic_dev_dbg(&descriptor, dev, fmt, \
- ##__VA_ARGS__); \
+#define __dynamic_func_call_cls_no_desc(id, cls, fmt, func, ...) do { \
+ DEFINE_DYNAMIC_DEBUG_METADATA_CLS(id, cls, fmt); \
+ if (DYNAMIC_DEBUG_BRANCH(id)) { \
+ func(__VA_ARGS__); \
+ __dynamic_dump_stack(id); \
+ } \
} while (0)
+#define __dynamic_func_call_no_desc(id, fmt, func, ...) \
+ __dynamic_func_call_cls_no_desc(id, _DPRINTK_CLASS_DFLT, \
+ fmt, func, ##__VA_ARGS__)
+
+/*
+ * "Factory macro" for generating a call to func, guarded by a
+ * DYNAMIC_DEBUG_BRANCH. The dynamic debug descriptor will be
+ * initialized using the fmt argument. The function will be called with
+ * the address of the descriptor as first argument, followed by all
+ * the varargs. Note that fmt is repeated in invocations of this
+ * macro.
+ */
+#define _dynamic_func_call_cls(cls, fmt, func, ...) \
+ __dynamic_func_call_cls(__UNIQUE_ID(ddebug), cls, fmt, func, ##__VA_ARGS__)
+#define _dynamic_func_call(fmt, func, ...) \
+ _dynamic_func_call_cls(_DPRINTK_CLASS_DFLT, fmt, func, ##__VA_ARGS__)
+
+/*
+ * A variant that does the same, except that the descriptor is not
+ * passed as the first argument to the function; it is only called
+ * with precisely the macro's varargs.
+ */
+#define _dynamic_func_call_cls_no_desc(cls, fmt, func, ...) \
+ __dynamic_func_call_cls_no_desc(__UNIQUE_ID(ddebug), cls, fmt, \
+ func, ##__VA_ARGS__)
+#define _dynamic_func_call_no_desc(fmt, func, ...) \
+ _dynamic_func_call_cls_no_desc(_DPRINTK_CLASS_DFLT, fmt, \
+ func, ##__VA_ARGS__)
+
+#define dynamic_pr_debug_cls(cls, fmt, ...) \
+ _dynamic_func_call_cls(cls, fmt, __dynamic_pr_debug, \
+ pr_fmt(fmt), ##__VA_ARGS__)
+
+#define dynamic_pr_debug(fmt, ...) \
+ _dynamic_func_call(fmt, __dynamic_pr_debug, \
+ pr_fmt(fmt), ##__VA_ARGS__)
+
+#define dynamic_dev_dbg(dev, fmt, ...) \
+ _dynamic_func_call(fmt, __dynamic_dev_dbg, \
+ dev, fmt, ##__VA_ARGS__)
#define dynamic_netdev_dbg(dev, fmt, ...) \
-do { \
- DEFINE_DYNAMIC_DEBUG_METADATA(descriptor, fmt); \
- if (DYNAMIC_DEBUG_BRANCH(descriptor)) \
- __dynamic_netdev_dbg(&descriptor, dev, fmt, \
- ##__VA_ARGS__); \
-} while (0)
+ _dynamic_func_call(fmt, __dynamic_netdev_dbg, \
+ dev, fmt, ##__VA_ARGS__)
-#define dynamic_hex_dump(prefix_str, prefix_type, rowsize, \
- groupsize, buf, len, ascii) \
-do { \
- DEFINE_DYNAMIC_DEBUG_METADATA(descriptor, \
- __builtin_constant_p(prefix_str) ? prefix_str : "hexdump");\
- if (DYNAMIC_DEBUG_BRANCH(descriptor)) \
- print_hex_dump(KERN_DEBUG, prefix_str, \
- prefix_type, rowsize, groupsize, \
- buf, len, ascii); \
-} while (0)
+#define dynamic_ibdev_dbg(dev, fmt, ...) \
+ _dynamic_func_call(fmt, __dynamic_ibdev_dbg, \
+ dev, fmt, ##__VA_ARGS__)
-#else
+#define dynamic_hex_dump(prefix_str, prefix_type, rowsize, \
+ groupsize, buf, len, ascii) \
+ _dynamic_func_call_no_desc(__builtin_constant_p(prefix_str) ? prefix_str : "hexdump", \
+ print_hex_dump, \
+ KERN_DEBUG, prefix_str, prefix_type, \
+ rowsize, groupsize, buf, len, ascii)
+
+/* for test only, generally expect drm.debug style macro wrappers */
+#define __pr_debug_cls(cls, fmt, ...) do { \
+ BUILD_BUG_ON_MSG(!__builtin_constant_p(cls), \
+ "expecting constant class int/enum"); \
+ dynamic_pr_debug_cls(cls, fmt, ##__VA_ARGS__); \
+ } while (0)
+
+#else /* !(CONFIG_DYNAMIC_DEBUG || (CONFIG_DYNAMIC_DEBUG_CORE && DYNAMIC_DEBUG_MODULE)) */
#include <linux/string.h>
#include <linux/errno.h>
+#include <linux/printk.h>
-static inline int ddebug_remove_module(const char *mod)
-{
- return 0;
-}
+#define DEFINE_DYNAMIC_DEBUG_METADATA(name, fmt)
+#define DYNAMIC_DEBUG_BRANCH(descriptor) false
+
+#define dynamic_pr_debug(fmt, ...) \
+ no_printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__)
+#define dynamic_dev_dbg(dev, fmt, ...) \
+ dev_no_printk(KERN_DEBUG, dev, fmt, ##__VA_ARGS__)
+#define dynamic_hex_dump(prefix_str, prefix_type, rowsize, \
+ groupsize, buf, len, ascii) \
+ do { if (0) \
+ print_hex_dump(KERN_DEBUG, prefix_str, prefix_type, \
+ rowsize, groupsize, buf, len, ascii); \
+ } while (0)
+
+#endif /* CONFIG_DYNAMIC_DEBUG || (CONFIG_DYNAMIC_DEBUG_CORE && DYNAMIC_DEBUG_MODULE) */
+
+
+#ifdef CONFIG_DYNAMIC_DEBUG_CORE
+
+extern int ddebug_dyndbg_module_param_cb(char *param, char *val,
+ const char *modname);
+struct kernel_param;
+int param_set_dyndbg_classes(const char *instr, const struct kernel_param *kp);
+int param_get_dyndbg_classes(char *buffer, const struct kernel_param *kp);
+
+#else
static inline int ddebug_dyndbg_module_param_cb(char *param, char *val,
const char *modname)
{
- if (strstr(param, "dyndbg")) {
+ if (!strcmp(param, "dyndbg")) {
/* avoid pr_warn(), which wants pr_fmt() fully defined */
printk(KERN_WARNING "dyndbg param is supported only in "
"CONFIG_DYNAMIC_DEBUG builds\n");
@@ -176,10 +351,15 @@ static inline int ddebug_dyndbg_module_param_cb(char *param, char *val,
return -EINVAL;
}
-#define dynamic_pr_debug(fmt, ...) \
- do { if (0) printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__); } while (0)
-#define dynamic_dev_dbg(dev, fmt, ...) \
- do { if (0) dev_printk(KERN_DEBUG, dev, fmt, ##__VA_ARGS__); } while (0)
-#endif
+struct kernel_param;
+static inline int param_set_dyndbg_classes(const char *instr, const struct kernel_param *kp)
+{ return 0; }
+static inline int param_get_dyndbg_classes(char *buffer, const struct kernel_param *kp)
+{ return 0; }
#endif
+
+
+extern const struct kernel_param_ops param_ops_dyndbg_classes;
+
+#endif /* _DYNAMIC_DEBUG_H */
diff --git a/include/linux/dynamic_queue_limits.h b/include/linux/dynamic_queue_limits.h
index a4be70398ce1..808b1a5102e7 100644
--- a/include/linux/dynamic_queue_limits.h
+++ b/include/linux/dynamic_queue_limits.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Dynamic queue limits (dql) - Definitions
*
@@ -37,12 +38,25 @@
#ifdef __KERNEL__
+#include <linux/bitops.h>
+#include <asm/bug.h>
+
+#define DQL_HIST_LEN 4
+#define DQL_HIST_ENT(dql, idx) ((dql)->history[(idx) % DQL_HIST_LEN])
+
struct dql {
/* Fields accessed in enqueue path (dql_queued) */
unsigned int num_queued; /* Total ever queued */
unsigned int adj_limit; /* limit + num_completed */
unsigned int last_obj_cnt; /* Count at last queuing */
+ /* Stall threshold (in jiffies), defined by user */
+ unsigned short stall_thrs;
+
+ unsigned long history_head; /* top 58 bits of jiffies */
+ /* stall entries, a bit per entry */
+ unsigned long history[DQL_HIST_LEN];
+
/* Fields accessed only by completion path (dql_completed) */
unsigned int limit ____cacheline_aligned_in_smp; /* Current limit */
@@ -59,21 +73,61 @@ struct dql {
unsigned int max_limit; /* Max limit */
unsigned int min_limit; /* Minimum limit */
unsigned int slack_hold_time; /* Time to measure slack */
+
+ /* Longest stall detected, reported to user */
+ unsigned short stall_max;
+ unsigned long last_reap; /* Last reap (in jiffies) */
+ unsigned long stall_cnt; /* Number of stalls */
};
/* Set some static maximums */
#define DQL_MAX_OBJECT (UINT_MAX / 16)
#define DQL_MAX_LIMIT ((UINT_MAX / 2) - DQL_MAX_OBJECT)
+/* Populate the bitmap to be processed later in dql_check_stall() */
+static inline void dql_queue_stall(struct dql *dql)
+{
+ unsigned long map, now, now_hi, i;
+
+ now = jiffies;
+ now_hi = now / BITS_PER_LONG;
+
+ /* The following code set a bit in the ring buffer, where each
+ * bit trackes time the packet was queued. The dql->history buffer
+ * tracks DQL_HIST_LEN * BITS_PER_LONG time (jiffies) slot
+ */
+ if (unlikely(now_hi != dql->history_head)) {
+ /* About to reuse slots, clear them */
+ for (i = 0; i < DQL_HIST_LEN; i++) {
+ /* Multiplication masks high bits */
+ if (now_hi * BITS_PER_LONG ==
+ (dql->history_head + i) * BITS_PER_LONG)
+ break;
+ DQL_HIST_ENT(dql, dql->history_head + i + 1) = 0;
+ }
+ /* pairs with smp_rmb() in dql_check_stall() */
+ smp_wmb();
+ WRITE_ONCE(dql->history_head, now_hi);
+ }
+
+ /* __set_bit() does not guarantee WRITE_ONCE() semantics */
+ map = DQL_HIST_ENT(dql, now_hi);
+
+ /* Populate the history with an entry (bit) per queued */
+ if (!(map & BIT_MASK(now)))
+ WRITE_ONCE(DQL_HIST_ENT(dql, now_hi), map | BIT_MASK(now));
+}
+
/*
* Record number of objects queued. Assumes that caller has already checked
* availability in the queue with dql_avail.
*/
static inline void dql_queued(struct dql *dql, unsigned int count)
{
- BUG_ON(count > DQL_MAX_OBJECT);
+ if (WARN_ON_ONCE(count > DQL_MAX_OBJECT))
+ return;
- dql->last_obj_cnt = count;
+ WRITE_ONCE(dql->last_obj_cnt, count);
/* We want to force a write first, so that cpu do not attempt
* to get cache line containing last_obj_cnt, num_queued, adj_limit
@@ -83,12 +137,16 @@ static inline void dql_queued(struct dql *dql, unsigned int count)
barrier();
dql->num_queued += count;
+
+ /* Only populate stall information if the threshold is set */
+ if (READ_ONCE(dql->stall_thrs))
+ dql_queue_stall(dql);
}
/* Returns how many objects can be queued, < 0 indicates over limit. */
static inline int dql_avail(const struct dql *dql)
{
- return ACCESS_ONCE(dql->adj_limit) - ACCESS_ONCE(dql->num_queued);
+ return READ_ONCE(dql->adj_limit) - READ_ONCE(dql->num_queued);
}
/* Record number of completed objects and recalculate the limit. */
@@ -98,7 +156,7 @@ void dql_completed(struct dql *dql, unsigned int count);
void dql_reset(struct dql *dql);
/* Initialize dql state */
-int dql_init(struct dql *dql, unsigned hold_time);
+void dql_init(struct dql *dql, unsigned int hold_time);
#endif /* _KERNEL_ */
diff --git a/include/linux/earlycpio.h b/include/linux/earlycpio.h
index 111f46d83d00..c70519267c77 100644
--- a/include/linux/earlycpio.h
+++ b/include/linux/earlycpio.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_EARLYCPIO_H
#define _LINUX_EARLYCPIO_H
diff --git a/include/linux/ecryptfs.h b/include/linux/ecryptfs.h
index 8d5ab998a222..91e142abf7e8 100644
--- a/include/linux/ecryptfs.h
+++ b/include/linux/ecryptfs.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_ECRYPTFS_H
#define _LINUX_ECRYPTFS_H
diff --git a/include/linux/edac.h b/include/linux/edac.h
index cd75c173fd00..fa32f2aca22f 100644
--- a/include/linux/edac.h
+++ b/include/linux/edac.h
@@ -17,6 +17,7 @@
#include <linux/completion.h>
#include <linux/workqueue.h>
#include <linux/debugfs.h>
+#include <linux/numa.h>
#define EDAC_DEVICE_NAME_LEN 31
@@ -29,15 +30,7 @@ struct device;
extern int edac_op_state;
-struct bus_type *edac_get_sysfs_subsys(void);
-int edac_get_report_status(void);
-void edac_set_report_status(int new);
-
-enum {
- EDAC_REPORTING_ENABLED,
- EDAC_REPORTING_DISABLED,
- EDAC_REPORTING_FORCE
-};
+const struct bus_type *edac_get_sysfs_subsys(void);
static inline void opstate_init(void)
{
@@ -182,10 +175,19 @@ static inline char *mc_event_error_type(const unsigned int err_type)
* @MEM_RDDR3: Registered DDR3 RAM
* This is a variant of the DDR3 memories.
* @MEM_LRDDR3: Load-Reduced DDR3 memory.
+ * @MEM_LPDDR3: Low-Power DDR3 memory.
* @MEM_DDR4: Unbuffered DDR4 RAM
* @MEM_RDDR4: Registered DDR4 RAM
* This is a variant of the DDR4 memories.
* @MEM_LRDDR4: Load-Reduced DDR4 memory.
+ * @MEM_LPDDR4: Low-Power DDR4 memory.
+ * @MEM_DDR5: Unbuffered DDR5 RAM
+ * @MEM_RDDR5: Registered DDR5 RAM
+ * @MEM_LRDDR5: Load-Reduced DDR5 memory.
+ * @MEM_NVDIMM: Non-volatile RAM
+ * @MEM_WIO2: Wide I/O 2.
+ * @MEM_HBM2: High bandwidth Memory Gen 2.
+ * @MEM_HBM3: High bandwidth Memory Gen 3.
*/
enum mem_type {
MEM_EMPTY = 0,
@@ -206,9 +208,18 @@ enum mem_type {
MEM_DDR3,
MEM_RDDR3,
MEM_LRDDR3,
+ MEM_LPDDR3,
MEM_DDR4,
MEM_RDDR4,
MEM_LRDDR4,
+ MEM_LPDDR4,
+ MEM_DDR5,
+ MEM_RDDR5,
+ MEM_LRDDR5,
+ MEM_NVDIMM,
+ MEM_WIO2,
+ MEM_HBM2,
+ MEM_HBM3,
};
#define MEM_FLAG_EMPTY BIT(MEM_EMPTY)
@@ -222,18 +233,27 @@ enum mem_type {
#define MEM_FLAG_DDR BIT(MEM_DDR)
#define MEM_FLAG_RDDR BIT(MEM_RDDR)
#define MEM_FLAG_RMBS BIT(MEM_RMBS)
-#define MEM_FLAG_DDR2 BIT(MEM_DDR2)
-#define MEM_FLAG_FB_DDR2 BIT(MEM_FB_DDR2)
-#define MEM_FLAG_RDDR2 BIT(MEM_RDDR2)
-#define MEM_FLAG_XDR BIT(MEM_XDR)
-#define MEM_FLAG_DDR3 BIT(MEM_DDR3)
-#define MEM_FLAG_RDDR3 BIT(MEM_RDDR3)
-#define MEM_FLAG_DDR4 BIT(MEM_DDR4)
-#define MEM_FLAG_RDDR4 BIT(MEM_RDDR4)
-#define MEM_FLAG_LRDDR4 BIT(MEM_LRDDR4)
+#define MEM_FLAG_DDR2 BIT(MEM_DDR2)
+#define MEM_FLAG_FB_DDR2 BIT(MEM_FB_DDR2)
+#define MEM_FLAG_RDDR2 BIT(MEM_RDDR2)
+#define MEM_FLAG_XDR BIT(MEM_XDR)
+#define MEM_FLAG_DDR3 BIT(MEM_DDR3)
+#define MEM_FLAG_RDDR3 BIT(MEM_RDDR3)
+#define MEM_FLAG_LPDDR3 BIT(MEM_LPDDR3)
+#define MEM_FLAG_DDR4 BIT(MEM_DDR4)
+#define MEM_FLAG_RDDR4 BIT(MEM_RDDR4)
+#define MEM_FLAG_LRDDR4 BIT(MEM_LRDDR4)
+#define MEM_FLAG_LPDDR4 BIT(MEM_LPDDR4)
+#define MEM_FLAG_DDR5 BIT(MEM_DDR5)
+#define MEM_FLAG_RDDR5 BIT(MEM_RDDR5)
+#define MEM_FLAG_LRDDR5 BIT(MEM_LRDDR5)
+#define MEM_FLAG_NVDIMM BIT(MEM_NVDIMM)
+#define MEM_FLAG_WIO2 BIT(MEM_WIO2)
+#define MEM_FLAG_HBM2 BIT(MEM_HBM2)
+#define MEM_FLAG_HBM3 BIT(MEM_HBM3)
/**
- * enum edac-type - Error Detection and Correction capabilities and mode
+ * enum edac_type - Error Detection and Correction capabilities and mode
* @EDAC_UNKNOWN: Unknown if ECC is available
* @EDAC_NONE: Doesn't support ECC
* @EDAC_RESERVED: Reserved ECC type
@@ -313,7 +333,7 @@ enum scrub_type {
#define OP_OFFLINE 0x300
/**
- * enum edac_mc_layer - memory controller hierarchy layer
+ * enum edac_mc_layer_type - memory controller hierarchy layer
*
* @EDAC_MC_LAYER_BRANCH: memory layer is named "branch"
* @EDAC_MC_LAYER_CHANNEL: memory layer is named "channel"
@@ -358,87 +378,16 @@ struct edac_mc_layer {
*/
#define EDAC_MAX_LAYERS 3
-/**
- * EDAC_DIMM_OFF - Macro responsible to get a pointer offset inside a pointer
- * array for the element given by [layer0,layer1,layer2]
- * position
- *
- * @layers: a struct edac_mc_layer array, describing how many elements
- * were allocated for each layer
- * @nlayers: Number of layers at the @layers array
- * @layer0: layer0 position
- * @layer1: layer1 position. Unused if n_layers < 2
- * @layer2: layer2 position. Unused if n_layers < 3
- *
- * For 1 layer, this macro returns "var[layer0] - var";
- *
- * For 2 layers, this macro is similar to allocate a bi-dimensional array
- * and to return "var[layer0][layer1] - var";
- *
- * For 3 layers, this macro is similar to allocate a tri-dimensional array
- * and to return "var[layer0][layer1][layer2] - var".
- *
- * A loop could be used here to make it more generic, but, as we only have
- * 3 layers, this is a little faster.
- *
- * By design, layers can never be 0 or more than 3. If that ever happens,
- * a NULL is returned, causing an OOPS during the memory allocation routine,
- * with would point to the developer that he's doing something wrong.
- */
-#define EDAC_DIMM_OFF(layers, nlayers, layer0, layer1, layer2) ({ \
- int __i; \
- if ((nlayers) == 1) \
- __i = layer0; \
- else if ((nlayers) == 2) \
- __i = (layer1) + ((layers[1]).size * (layer0)); \
- else if ((nlayers) == 3) \
- __i = (layer2) + ((layers[2]).size * ((layer1) + \
- ((layers[1]).size * (layer0)))); \
- else \
- __i = -EINVAL; \
- __i; \
-})
-
-/**
- * EDAC_DIMM_PTR - Macro responsible to get a pointer inside a pointer array
- * for the element given by [layer0,layer1,layer2] position
- *
- * @layers: a struct edac_mc_layer array, describing how many elements
- * were allocated for each layer
- * @var: name of the var where we want to get the pointer
- * (like mci->dimms)
- * @nlayers: Number of layers at the @layers array
- * @layer0: layer0 position
- * @layer1: layer1 position. Unused if n_layers < 2
- * @layer2: layer2 position. Unused if n_layers < 3
- *
- * For 1 layer, this macro returns "var[layer0]";
- *
- * For 2 layers, this macro is similar to allocate a bi-dimensional array
- * and to return "var[layer0][layer1]";
- *
- * For 3 layers, this macro is similar to allocate a tri-dimensional array
- * and to return "var[layer0][layer1][layer2]";
- */
-#define EDAC_DIMM_PTR(layers, var, nlayers, layer0, layer1, layer2) ({ \
- typeof(*var) __p; \
- int ___i = EDAC_DIMM_OFF(layers, nlayers, layer0, layer1, layer2); \
- if (___i < 0) \
- __p = NULL; \
- else \
- __p = (var)[___i]; \
- __p; \
-})
-
struct dimm_info {
struct device dev;
char label[EDAC_MC_LABEL_LEN + 1]; /* DIMM label on motherboard */
/* Memory location data */
- unsigned location[EDAC_MAX_LAYERS];
+ unsigned int location[EDAC_MAX_LAYERS];
struct mem_ctl_info *mci; /* the parent */
+ unsigned int idx; /* index within the parent dimm array */
u32 grain; /* granularity of reported error in bytes */
enum dev_type dtype; /* memory device type */
@@ -447,7 +396,12 @@ struct dimm_info {
u32 nr_pages; /* number of pages on this dimm */
- unsigned csrow, cschannel; /* Points to the old API data */
+ unsigned int csrow, cschannel; /* Points to the old API data */
+
+ u16 smbios_handle; /* Handle for SMBIOS type 17 */
+
+ u32 ce_count;
+ u32 ue_count;
};
/**
@@ -507,6 +461,7 @@ struct errcount_attribute_data {
* struct edac_raw_error_desc - Raw error report structure
* @grain: minimum granularity for an error report, in bytes
* @error_count: number of errors of the same type
+ * @type: severity of the error (CE/UE/Fatal)
* @top_layer: top layer of the error (layer[0])
* @mid_layer: middle layer of the error (layer[1])
* @low_layer: low layer of the error (layer[2])
@@ -518,20 +473,14 @@ struct errcount_attribute_data {
* @location: location of the error
* @label: label of the affected DIMM(s)
* @other_detail: other driver-specific detail about the error
- * @enable_per_layer_report: if false, the error affects all layers
- * (typically, a memory controller error)
*/
struct edac_raw_error_desc {
- /*
- * NOTE: everything before grain won't be cleaned by
- * edac_raw_error_desc_clean()
- */
char location[LOCATION_SIZE];
char label[(EDAC_MC_LABEL_LEN + 1 + sizeof(OTHER_LABEL)) * EDAC_MAX_LABELS];
long grain;
- /* the vars below and grain will be cleaned on every new error report */
u16 error_count;
+ enum hw_event_mc_err_type type;
int top_layer;
int mid_layer;
int low_layer;
@@ -540,14 +489,13 @@ struct edac_raw_error_desc {
unsigned long syndrome;
const char *msg;
const char *other_detail;
- bool enable_per_layer_report;
};
/* MEMORY controller information structure
*/
struct mem_ctl_info {
struct device dev;
- struct bus_type *bus;
+ const struct bus_type *bus;
struct list_head link; /* for global list of mem_ctl_info structs */
@@ -591,7 +539,7 @@ struct mem_ctl_info {
unsigned long page);
int mc_idx;
struct csrow_info **csrows;
- unsigned nr_csrows, num_cschannel;
+ unsigned int nr_csrows, num_cschannel;
/*
* Memory Controller hierarchy
@@ -602,14 +550,14 @@ struct mem_ctl_info {
* of the recent drivers enumerate memories per DIMM, instead.
* When the memory controller is per rank, csbased is true.
*/
- unsigned n_layers;
+ unsigned int n_layers;
struct edac_mc_layer *layers;
bool csbased;
/*
* DIMM info. Will eventually remove the entire csrows_info some day
*/
- unsigned tot_dimms;
+ unsigned int tot_dimms;
struct dimm_info **dimms;
/*
@@ -630,7 +578,6 @@ struct mem_ctl_info {
*/
u32 ce_noinfo_count, ue_noinfo_count;
u32 ue_mc, ce_mc;
- u32 *ce_per_layer[EDAC_MAX_LAYERS], *ue_per_layer[EDAC_MAX_LAYERS];
struct completion complete;
@@ -664,9 +611,276 @@ struct mem_ctl_info {
u16 fake_inject_count;
};
-/*
- * Maximum number of memory controllers in the coherent fabric.
+#define mci_for_each_dimm(mci, dimm) \
+ for ((dimm) = (mci)->dimms[0]; \
+ (dimm); \
+ (dimm) = (dimm)->idx + 1 < (mci)->tot_dimms \
+ ? (mci)->dimms[(dimm)->idx + 1] \
+ : NULL)
+
+/**
+ * edac_get_dimm - Get DIMM info from a memory controller given by
+ * [layer0,layer1,layer2] position
+ *
+ * @mci: MC descriptor struct mem_ctl_info
+ * @layer0: layer0 position
+ * @layer1: layer1 position. Unused if n_layers < 2
+ * @layer2: layer2 position. Unused if n_layers < 3
+ *
+ * For 1 layer, this function returns "dimms[layer0]";
+ *
+ * For 2 layers, this function is similar to allocating a two-dimensional
+ * array and returning "dimms[layer0][layer1]";
+ *
+ * For 3 layers, this function is similar to allocating a tri-dimensional
+ * array and returning "dimms[layer0][layer1][layer2]";
+ */
+static inline struct dimm_info *edac_get_dimm(struct mem_ctl_info *mci,
+ int layer0, int layer1, int layer2)
+{
+ int index;
+
+ if (layer0 < 0
+ || (mci->n_layers > 1 && layer1 < 0)
+ || (mci->n_layers > 2 && layer2 < 0))
+ return NULL;
+
+ index = layer0;
+
+ if (mci->n_layers > 1)
+ index = index * mci->layers[1].size + layer1;
+
+ if (mci->n_layers > 2)
+ index = index * mci->layers[2].size + layer2;
+
+ if (index < 0 || index >= mci->tot_dimms)
+ return NULL;
+
+ if (WARN_ON_ONCE(mci->dimms[index]->idx != index))
+ return NULL;
+
+ return mci->dimms[index];
+}
+
+#define EDAC_FEAT_NAME_LEN 128
+
+/* RAS feature type */
+enum edac_dev_feat {
+ RAS_FEAT_SCRUB,
+ RAS_FEAT_ECS,
+ RAS_FEAT_MEM_REPAIR,
+ RAS_FEAT_MAX
+};
+
+/**
+ * struct edac_scrub_ops - scrub device operations (all elements optional)
+ * @read_addr: read base address of scrubbing range.
+ * @read_size: read offset of scrubbing range.
+ * @write_addr: set base address of the scrubbing range.
+ * @write_size: set offset of the scrubbing range.
+ * @get_enabled_bg: check if currently performing background scrub.
+ * @set_enabled_bg: start or stop a bg-scrub.
+ * @get_min_cycle: get minimum supported scrub cycle duration in seconds.
+ * @get_max_cycle: get maximum supported scrub cycle duration in seconds.
+ * @get_cycle_duration: get current scrub cycle duration in seconds.
+ * @set_cycle_duration: set current scrub cycle duration in seconds.
+ */
+struct edac_scrub_ops {
+ int (*read_addr)(struct device *dev, void *drv_data, u64 *base);
+ int (*read_size)(struct device *dev, void *drv_data, u64 *size);
+ int (*write_addr)(struct device *dev, void *drv_data, u64 base);
+ int (*write_size)(struct device *dev, void *drv_data, u64 size);
+ int (*get_enabled_bg)(struct device *dev, void *drv_data, bool *enable);
+ int (*set_enabled_bg)(struct device *dev, void *drv_data, bool enable);
+ int (*get_min_cycle)(struct device *dev, void *drv_data, u32 *min);
+ int (*get_max_cycle)(struct device *dev, void *drv_data, u32 *max);
+ int (*get_cycle_duration)(struct device *dev, void *drv_data, u32 *cycle);
+ int (*set_cycle_duration)(struct device *dev, void *drv_data, u32 cycle);
+};
+
+#if IS_ENABLED(CONFIG_EDAC_SCRUB)
+int edac_scrub_get_desc(struct device *scrub_dev,
+ const struct attribute_group **attr_groups,
+ u8 instance);
+#else
+static inline int edac_scrub_get_desc(struct device *scrub_dev,
+ const struct attribute_group **attr_groups,
+ u8 instance)
+{ return -EOPNOTSUPP; }
+#endif /* CONFIG_EDAC_SCRUB */
+
+/**
+ * struct edac_ecs_ops - ECS device operations (all elements optional)
+ * @get_log_entry_type: read the log entry type value.
+ * @set_log_entry_type: set the log entry type value.
+ * @get_mode: read the mode value.
+ * @set_mode: set the mode value.
+ * @reset: reset the ECS counter.
+ * @get_threshold: read the threshold count per gigabits of memory cells.
+ * @set_threshold: set the threshold count per gigabits of memory cells.
*/
-#define EDAC_MAX_MCS 16
+struct edac_ecs_ops {
+ int (*get_log_entry_type)(struct device *dev, void *drv_data, int fru_id, u32 *val);
+ int (*set_log_entry_type)(struct device *dev, void *drv_data, int fru_id, u32 val);
+ int (*get_mode)(struct device *dev, void *drv_data, int fru_id, u32 *val);
+ int (*set_mode)(struct device *dev, void *drv_data, int fru_id, u32 val);
+ int (*reset)(struct device *dev, void *drv_data, int fru_id, u32 val);
+ int (*get_threshold)(struct device *dev, void *drv_data, int fru_id, u32 *threshold);
+ int (*set_threshold)(struct device *dev, void *drv_data, int fru_id, u32 threshold);
+};
+
+struct edac_ecs_ex_info {
+ u16 num_media_frus;
+};
+
+#if IS_ENABLED(CONFIG_EDAC_ECS)
+int edac_ecs_get_desc(struct device *ecs_dev,
+ const struct attribute_group **attr_groups,
+ u16 num_media_frus);
+#else
+static inline int edac_ecs_get_desc(struct device *ecs_dev,
+ const struct attribute_group **attr_groups,
+ u16 num_media_frus)
+{ return -EOPNOTSUPP; }
+#endif /* CONFIG_EDAC_ECS */
+
+enum edac_mem_repair_type {
+ EDAC_REPAIR_PPR,
+ EDAC_REPAIR_CACHELINE_SPARING,
+ EDAC_REPAIR_ROW_SPARING,
+ EDAC_REPAIR_BANK_SPARING,
+ EDAC_REPAIR_RANK_SPARING,
+ EDAC_REPAIR_MAX
+};
+
+extern const char * const edac_repair_type[];
+
+enum edac_mem_repair_cmd {
+ EDAC_DO_MEM_REPAIR = 1,
+};
+
+/**
+ * struct edac_mem_repair_ops - memory repair operations
+ * (all elements are optional except do_repair, set_hpa/set_dpa)
+ * @get_repair_type: get the memory repair type, listed in
+ * enum edac_mem_repair_function.
+ * @get_persist_mode: get the current persist mode.
+ * false - Soft repair type (temporary repair).
+ * true - Hard memory repair type (permanent repair).
+ * @set_persist_mode: set the persist mode of the memory repair instance.
+ * @get_repair_safe_when_in_use: get whether memory media is accessible and
+ * data is retained during repair operation.
+ * @get_hpa: get current host physical address (HPA) of memory to repair.
+ * @set_hpa: set host physical address (HPA) of memory to repair.
+ * @get_min_hpa: get the minimum supported host physical address (HPA).
+ * @get_max_hpa: get the maximum supported host physical address (HPA).
+ * @get_dpa: get current device physical address (DPA) of memory to repair.
+ * @set_dpa: set device physical address (DPA) of memory to repair.
+ * In some states of system configuration (e.g. before address decoders
+ * have been configured), memory devices (e.g. CXL) may not have an active
+ * mapping in the host physical address map. As such, the memory
+ * to repair must be identified by a device specific physical addressing
+ * scheme using a device physical address(DPA). The DPA and other control
+ * attributes to use for the repair operations will be presented in related
+ * error records.
+ * @get_min_dpa: get the minimum supported device physical address (DPA).
+ * @get_max_dpa: get the maximum supported device physical address (DPA).
+ * @get_nibble_mask: get current nibble mask of memory to repair.
+ * @set_nibble_mask: set nibble mask of memory to repair.
+ * @get_bank_group: get current bank group of memory to repair.
+ * @set_bank_group: set bank group of memory to repair.
+ * @get_bank: get current bank of memory to repair.
+ * @set_bank: set bank of memory to repair.
+ * @get_rank: get current rank of memory to repair.
+ * @set_rank: set rank of memory to repair.
+ * @get_row: get current row of memory to repair.
+ * @set_row: set row of memory to repair.
+ * @get_column: get current column of memory to repair.
+ * @set_column: set column of memory to repair.
+ * @get_channel: get current channel of memory to repair.
+ * @set_channel: set channel of memory to repair.
+ * @get_sub_channel: get current subchannel of memory to repair.
+ * @set_sub_channel: set subchannel of memory to repair.
+ * @do_repair: Issue memory repair operation for the HPA/DPA and
+ * other control attributes set for the memory to repair.
+ *
+ * All elements are optional except do_repair and at least one of set_hpa/set_dpa.
+ */
+struct edac_mem_repair_ops {
+ int (*get_repair_type)(struct device *dev, void *drv_data, const char **type);
+ int (*get_persist_mode)(struct device *dev, void *drv_data, bool *persist);
+ int (*set_persist_mode)(struct device *dev, void *drv_data, bool persist);
+ int (*get_repair_safe_when_in_use)(struct device *dev, void *drv_data, bool *safe);
+ int (*get_hpa)(struct device *dev, void *drv_data, u64 *hpa);
+ int (*set_hpa)(struct device *dev, void *drv_data, u64 hpa);
+ int (*get_min_hpa)(struct device *dev, void *drv_data, u64 *hpa);
+ int (*get_max_hpa)(struct device *dev, void *drv_data, u64 *hpa);
+ int (*get_dpa)(struct device *dev, void *drv_data, u64 *dpa);
+ int (*set_dpa)(struct device *dev, void *drv_data, u64 dpa);
+ int (*get_min_dpa)(struct device *dev, void *drv_data, u64 *dpa);
+ int (*get_max_dpa)(struct device *dev, void *drv_data, u64 *dpa);
+ int (*get_nibble_mask)(struct device *dev, void *drv_data, u32 *val);
+ int (*set_nibble_mask)(struct device *dev, void *drv_data, u32 val);
+ int (*get_bank_group)(struct device *dev, void *drv_data, u32 *val);
+ int (*set_bank_group)(struct device *dev, void *drv_data, u32 val);
+ int (*get_bank)(struct device *dev, void *drv_data, u32 *val);
+ int (*set_bank)(struct device *dev, void *drv_data, u32 val);
+ int (*get_rank)(struct device *dev, void *drv_data, u32 *val);
+ int (*set_rank)(struct device *dev, void *drv_data, u32 val);
+ int (*get_row)(struct device *dev, void *drv_data, u32 *val);
+ int (*set_row)(struct device *dev, void *drv_data, u32 val);
+ int (*get_column)(struct device *dev, void *drv_data, u32 *val);
+ int (*set_column)(struct device *dev, void *drv_data, u32 val);
+ int (*get_channel)(struct device *dev, void *drv_data, u32 *val);
+ int (*set_channel)(struct device *dev, void *drv_data, u32 val);
+ int (*get_sub_channel)(struct device *dev, void *drv_data, u32 *val);
+ int (*set_sub_channel)(struct device *dev, void *drv_data, u32 val);
+ int (*do_repair)(struct device *dev, void *drv_data, u32 val);
+};
+
+#if IS_ENABLED(CONFIG_EDAC_MEM_REPAIR)
+int edac_mem_repair_get_desc(struct device *dev,
+ const struct attribute_group **attr_groups,
+ u8 instance);
+#else
+static inline int edac_mem_repair_get_desc(struct device *dev,
+ const struct attribute_group **attr_groups,
+ u8 instance)
+{ return -EOPNOTSUPP; }
+#endif /* CONFIG_EDAC_MEM_REPAIR */
+
+/* EDAC device feature information structure */
+struct edac_dev_data {
+ union {
+ const struct edac_scrub_ops *scrub_ops;
+ const struct edac_ecs_ops *ecs_ops;
+ const struct edac_mem_repair_ops *mem_repair_ops;
+ };
+ u8 instance;
+ void *private;
+};
+
+struct edac_dev_feat_ctx {
+ struct device dev;
+ void *private;
+ struct edac_dev_data *scrub;
+ struct edac_dev_data ecs;
+ struct edac_dev_data *mem_repair;
+};
+
+struct edac_dev_feature {
+ enum edac_dev_feat ft_type;
+ u8 instance;
+ union {
+ const struct edac_scrub_ops *scrub_ops;
+ const struct edac_ecs_ops *ecs_ops;
+ const struct edac_mem_repair_ops *mem_repair_ops;
+ };
+ void *ctx;
+ struct edac_ecs_ex_info ecs_info;
+};
-#endif
+int edac_dev_register(struct device *parent, char *dev_name,
+ void *parent_pvt_data, int num_features,
+ const struct edac_dev_feature *ras_features);
+#endif /* _LINUX_EDAC_H_ */
diff --git a/include/linux/edd.h b/include/linux/edd.h
index 83d4371ec996..1c16fbcb81c0 100644
--- a/include/linux/edd.h
+++ b/include/linux/edd.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* linux/include/linux/edd.h
* Copyright (C) 2002, 2003, 2004 Dell Inc.
@@ -16,16 +17,6 @@
* transferred into the edd structure, and in drivers/firmware/edd.c, that
* information is used to identify BIOS boot disk. The code in setup.S
* is very sensitive to the size of these structures.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License v2.0 as published by
- * the Free Software Foundation
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
*/
#ifndef _LINUX_EDD_H
#define _LINUX_EDD_H
diff --git a/include/linux/edma.h b/include/linux/edma.h
deleted file mode 100644
index a1307e7827e8..000000000000
--- a/include/linux/edma.h
+++ /dev/null
@@ -1,29 +0,0 @@
-/*
- * TI EDMA DMA engine driver
- *
- * Copyright 2012 Texas Instruments
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation version 2.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether express or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-#ifndef __LINUX_EDMA_H
-#define __LINUX_EDMA_H
-
-struct dma_chan;
-
-#if defined(CONFIG_TI_EDMA) || defined(CONFIG_TI_EDMA_MODULE)
-bool edma_filter_fn(struct dma_chan *, void *);
-#else
-static inline bool edma_filter_fn(struct dma_chan *chan, void *param)
-{
- return false;
-}
-#endif
-
-#endif
diff --git a/include/linux/eeprom_93cx6.h b/include/linux/eeprom_93cx6.h
index eb0b1988050a..3a485cc0e0fa 100644
--- a/include/linux/eeprom_93cx6.h
+++ b/include/linux/eeprom_93cx6.h
@@ -1,21 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
Copyright (C) 2004 - 2006 rt2x00 SourceForge Project
<http://rt2x00.serialmonkey.com>
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the
- Free Software Foundation, Inc.,
- 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*/
/*
@@ -24,6 +11,8 @@
Supported chipsets: 93c46, 93c56 and 93c66.
*/
+#include <linux/bits.h>
+
/*
* EEPROM operation defines.
*/
@@ -47,6 +36,7 @@
* @register_write(struct eeprom_93cx6 *eeprom): handler to
* write to the eeprom register by using all reg_* fields.
* @width: eeprom width, should be one of the PCI_EEPROM_WIDTH_* defines
+ * @quirks: eeprom or controller quirks
* @drive_data: Set if we're driving the data line.
* @reg_data_in: register field to indicate data input
* @reg_data_out: register field to indicate data output
@@ -63,6 +53,9 @@ struct eeprom_93cx6 {
void (*register_write)(struct eeprom_93cx6 *eeprom);
int width;
+ unsigned int quirks;
+/* Some EEPROMs require an extra clock cycle before reading */
+#define PCI_EEPROM_QUIRK_EXTRA_READ_CYCLE BIT(0)
char drive_data;
char reg_data_in;
@@ -84,3 +77,8 @@ extern void eeprom_93cx6_wren(struct eeprom_93cx6 *eeprom, bool enable);
extern void eeprom_93cx6_write(struct eeprom_93cx6 *eeprom,
u8 addr, u16 data);
+
+static inline bool has_quirk_extra_read_cycle(struct eeprom_93cx6 *eeprom)
+{
+ return eeprom->quirks & PCI_EEPROM_QUIRK_EXTRA_READ_CYCLE;
+}
diff --git a/include/linux/eeprom_93xx46.h b/include/linux/eeprom_93xx46.h
deleted file mode 100644
index 915898759280..000000000000
--- a/include/linux/eeprom_93xx46.h
+++ /dev/null
@@ -1,26 +0,0 @@
-/*
- * Module: eeprom_93xx46
- * platform description for 93xx46 EEPROMs.
- */
-#include <linux/gpio/consumer.h>
-
-struct eeprom_93xx46_platform_data {
- unsigned char flags;
-#define EE_ADDR8 0x01 /* 8 bit addr. cfg */
-#define EE_ADDR16 0x02 /* 16 bit addr. cfg */
-#define EE_READONLY 0x08 /* forbid writing */
-
- unsigned int quirks;
-/* Single word read transfers only; no sequential read. */
-#define EEPROM_93XX46_QUIRK_SINGLE_WORD_READ (1 << 0)
-/* Instructions such as EWEN are (addrlen + 2) in length. */
-#define EEPROM_93XX46_QUIRK_INSTRUCTION_LENGTH (1 << 1)
-
- /*
- * optional hooks to control additional logic
- * before and after spi transfer.
- */
- void (*prepare)(void *);
- void (*finish)(void *);
- struct gpio_desc *select;
-};
diff --git a/include/linux/efi-bgrt.h b/include/linux/efi-bgrt.h
index e6f624b53c3d..e6cd51005633 100644
--- a/include/linux/efi-bgrt.h
+++ b/include/linux/efi-bgrt.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_EFI_BGRT_H
#define _LINUX_EFI_BGRT_H
diff --git a/include/linux/efi.h b/include/linux/efi.h
index 4102b85217d5..2a43094e23f7 100644
--- a/include/linux/efi.h
+++ b/include/linux/efi.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_EFI_H
#define _LINUX_EFI_H
@@ -23,21 +24,24 @@
#include <linux/range.h>
#include <linux/reboot.h>
#include <linux/uuid.h>
-#include <linux/screen_info.h>
#include <asm/page.h>
+struct screen_info;
+
#define EFI_SUCCESS 0
-#define EFI_LOAD_ERROR ( 1 | (1UL << (BITS_PER_LONG-1)))
+#define EFI_LOAD_ERROR ( 1 | (1UL << (BITS_PER_LONG-1)))
#define EFI_INVALID_PARAMETER ( 2 | (1UL << (BITS_PER_LONG-1)))
#define EFI_UNSUPPORTED ( 3 | (1UL << (BITS_PER_LONG-1)))
-#define EFI_BAD_BUFFER_SIZE ( 4 | (1UL << (BITS_PER_LONG-1)))
+#define EFI_BAD_BUFFER_SIZE ( 4 | (1UL << (BITS_PER_LONG-1)))
#define EFI_BUFFER_TOO_SMALL ( 5 | (1UL << (BITS_PER_LONG-1)))
#define EFI_NOT_READY ( 6 | (1UL << (BITS_PER_LONG-1)))
#define EFI_DEVICE_ERROR ( 7 | (1UL << (BITS_PER_LONG-1)))
#define EFI_WRITE_PROTECTED ( 8 | (1UL << (BITS_PER_LONG-1)))
#define EFI_OUT_OF_RESOURCES ( 9 | (1UL << (BITS_PER_LONG-1)))
#define EFI_NOT_FOUND (14 | (1UL << (BITS_PER_LONG-1)))
+#define EFI_ACCESS_DENIED (15 | (1UL << (BITS_PER_LONG-1)))
+#define EFI_TIMEOUT (18 | (1UL << (BITS_PER_LONG-1)))
#define EFI_ABORTED (21 | (1UL << (BITS_PER_LONG-1)))
#define EFI_SECURITY_VIOLATION (26 | (1UL << (BITS_PER_LONG-1)))
@@ -47,10 +51,33 @@ typedef u16 efi_char16_t; /* UNICODE character */
typedef u64 efi_physical_addr_t;
typedef void *efi_handle_t;
-typedef uuid_le efi_guid_t;
+#if defined(CONFIG_X86_64)
+#define __efiapi __attribute__((ms_abi))
+#elif defined(CONFIG_X86_32)
+#define __efiapi __attribute__((regparm(0)))
+#else
+#define __efiapi
+#endif
+
+/*
+ * The UEFI spec and EDK2 reference implementation both define EFI_GUID as
+ * struct { u32 a; u16; b; u16 c; u8 d[8]; }; and so the implied alignment
+ * is 32 bits not 8 bits like our guid_t. In some cases (i.e., on 32-bit ARM),
+ * this means that firmware services invoked by the kernel may assume that
+ * efi_guid_t* arguments are 32-bit aligned, and use memory accessors that
+ * do not tolerate misalignment. So let's set the minimum alignment to 32 bits.
+ *
+ * Note that the UEFI spec as well as some comments in the EDK2 code base
+ * suggest that EFI_GUID should be 64-bit aligned, but this appears to be
+ * a mistake, given that no code seems to exist that actually enforces that
+ * or relies on it.
+ */
+typedef guid_t efi_guid_t __aligned(__alignof__(u32));
-#define EFI_GUID(a,b,c,d0,d1,d2,d3,d4,d5,d6,d7) \
- UUID_LE(a, b, c, d0, d1, d2, d3, d4, d5, d6, d7)
+#define EFI_GUID(a, b, c, d...) ((efi_guid_t){ { \
+ (a) & 0xff, ((a) >> 8) & 0xff, ((a) >> 16) & 0xff, ((a) >> 24) & 0xff, \
+ (b) & 0xff, ((b) >> 8) & 0xff, \
+ (c) & 0xff, ((c) >> 8) & 0xff, d } })
/*
* Generic EFI table header
@@ -83,22 +110,26 @@ typedef struct {
#define EFI_MEMORY_MAPPED_IO_PORT_SPACE 12
#define EFI_PAL_CODE 13
#define EFI_PERSISTENT_MEMORY 14
-#define EFI_MAX_MEMORY_TYPE 15
+#define EFI_UNACCEPTED_MEMORY 15
+#define EFI_MAX_MEMORY_TYPE 16
/* Attribute values: */
-#define EFI_MEMORY_UC ((u64)0x0000000000000001ULL) /* uncached */
-#define EFI_MEMORY_WC ((u64)0x0000000000000002ULL) /* write-coalescing */
-#define EFI_MEMORY_WT ((u64)0x0000000000000004ULL) /* write-through */
-#define EFI_MEMORY_WB ((u64)0x0000000000000008ULL) /* write-back */
-#define EFI_MEMORY_UCE ((u64)0x0000000000000010ULL) /* uncached, exported */
-#define EFI_MEMORY_WP ((u64)0x0000000000001000ULL) /* write-protect */
-#define EFI_MEMORY_RP ((u64)0x0000000000002000ULL) /* read-protect */
-#define EFI_MEMORY_XP ((u64)0x0000000000004000ULL) /* execute-protect */
-#define EFI_MEMORY_NV ((u64)0x0000000000008000ULL) /* non-volatile */
-#define EFI_MEMORY_MORE_RELIABLE \
- ((u64)0x0000000000010000ULL) /* higher reliability */
-#define EFI_MEMORY_RO ((u64)0x0000000000020000ULL) /* read-only */
-#define EFI_MEMORY_RUNTIME ((u64)0x8000000000000000ULL) /* range requires runtime mapping */
+#define EFI_MEMORY_UC BIT_ULL(0) /* uncached */
+#define EFI_MEMORY_WC BIT_ULL(1) /* write-coalescing */
+#define EFI_MEMORY_WT BIT_ULL(2) /* write-through */
+#define EFI_MEMORY_WB BIT_ULL(3) /* write-back */
+#define EFI_MEMORY_UCE BIT_ULL(4) /* uncached, exported */
+#define EFI_MEMORY_WP BIT_ULL(12) /* write-protect */
+#define EFI_MEMORY_RP BIT_ULL(13) /* read-protect */
+#define EFI_MEMORY_XP BIT_ULL(14) /* execute-protect */
+#define EFI_MEMORY_NV BIT_ULL(15) /* non-volatile */
+#define EFI_MEMORY_MORE_RELIABLE BIT_ULL(16) /* higher reliability */
+#define EFI_MEMORY_RO BIT_ULL(17) /* read-only */
+#define EFI_MEMORY_SP BIT_ULL(18) /* soft reserved */
+#define EFI_MEMORY_CPU_CRYPTO BIT_ULL(19) /* supports encryption */
+#define EFI_MEMORY_HOT_PLUGGABLE BIT_ULL(20) /* supports unplugging at runtime */
+#define EFI_MEMORY_RUNTIME BIT_ULL(63) /* range requires runtime mapping */
+
#define EFI_MEMORY_DESCRIPTOR_VERSION 1
#define EFI_PAGE_SHIFT 12
@@ -121,13 +152,50 @@ typedef struct {
u32 imagesize;
} efi_capsule_header_t;
-struct efi_boot_memmap {
- efi_memory_desc_t **map;
- unsigned long *map_size;
- unsigned long *desc_size;
- u32 *desc_ver;
- unsigned long *key_ptr;
- unsigned long *buff_size;
+/* EFI_FIRMWARE_MANAGEMENT_CAPSULE_HEADER */
+struct efi_manage_capsule_header {
+ u32 ver;
+ u16 emb_drv_cnt;
+ u16 payload_cnt;
+ /*
+ * Variable-size array of the size given by the sum of
+ * emb_drv_cnt and payload_cnt.
+ */
+ u64 offset_list[];
+} __packed;
+
+/* EFI_FIRMWARE_MANAGEMENT_CAPSULE_IMAGE_HEADER */
+struct efi_manage_capsule_image_header {
+ u32 ver;
+ efi_guid_t image_type_id;
+ u8 image_index;
+ u8 reserved_bytes[3];
+ u32 image_size;
+ u32 vendor_code_size;
+ /* hw_ins was introduced in version 2 */
+ u64 hw_ins;
+ /* capsule_support was introduced in version 3 */
+ u64 capsule_support;
+} __packed;
+
+/* WIN_CERTIFICATE */
+struct win_cert {
+ u32 len;
+ u16 rev;
+ u16 cert_type;
+};
+
+/* WIN_CERTIFICATE_UEFI_GUID */
+struct win_cert_uefi_guid {
+ struct win_cert hdr;
+ efi_guid_t cert_type;
+ u8 cert_data[];
+};
+
+/* EFI_FIRMWARE_IMAGE_AUTHENTICATION */
+struct efi_image_auth {
+ u64 mon_count;
+ struct win_cert_uefi_guid auth_info;
};
/*
@@ -139,27 +207,21 @@ struct efi_boot_memmap {
struct capsule_info {
efi_capsule_header_t header;
+ efi_capsule_header_t *capsule;
int reset_type;
long index;
size_t count;
size_t total_size;
- phys_addr_t *pages;
+ struct page **pages;
+ phys_addr_t *phys;
size_t page_bytes_remain;
};
+int efi_capsule_setup_info(struct capsule_info *cap_info, void *kbuff,
+ size_t hdr_bytes);
int __efi_capsule_setup_info(struct capsule_info *cap_info);
/*
- * Allocation types for calls to boottime->allocate_pages.
- */
-#define EFI_ALLOCATE_ANY_PAGES 0
-#define EFI_ALLOCATE_MAX_ADDRESS 1
-#define EFI_ALLOCATE_ADDRESS 2
-#define EFI_MAX_ALLOCATE_TYPE 3
-
-typedef int (*efi_freemem_callback_t) (u64 start, u64 end, void *arg);
-
-/*
* Types and defines for Time Services
*/
#define EFI_TIME_ADJUST_DAYLIGHT 0x1
@@ -186,291 +248,7 @@ typedef struct {
u8 sets_to_zero;
} efi_time_cap_t;
-typedef struct {
- efi_table_hdr_t hdr;
- u32 raise_tpl;
- u32 restore_tpl;
- u32 allocate_pages;
- u32 free_pages;
- u32 get_memory_map;
- u32 allocate_pool;
- u32 free_pool;
- u32 create_event;
- u32 set_timer;
- u32 wait_for_event;
- u32 signal_event;
- u32 close_event;
- u32 check_event;
- u32 install_protocol_interface;
- u32 reinstall_protocol_interface;
- u32 uninstall_protocol_interface;
- u32 handle_protocol;
- u32 __reserved;
- u32 register_protocol_notify;
- u32 locate_handle;
- u32 locate_device_path;
- u32 install_configuration_table;
- u32 load_image;
- u32 start_image;
- u32 exit;
- u32 unload_image;
- u32 exit_boot_services;
- u32 get_next_monotonic_count;
- u32 stall;
- u32 set_watchdog_timer;
- u32 connect_controller;
- u32 disconnect_controller;
- u32 open_protocol;
- u32 close_protocol;
- u32 open_protocol_information;
- u32 protocols_per_handle;
- u32 locate_handle_buffer;
- u32 locate_protocol;
- u32 install_multiple_protocol_interfaces;
- u32 uninstall_multiple_protocol_interfaces;
- u32 calculate_crc32;
- u32 copy_mem;
- u32 set_mem;
- u32 create_event_ex;
-} __packed efi_boot_services_32_t;
-
-typedef struct {
- efi_table_hdr_t hdr;
- u64 raise_tpl;
- u64 restore_tpl;
- u64 allocate_pages;
- u64 free_pages;
- u64 get_memory_map;
- u64 allocate_pool;
- u64 free_pool;
- u64 create_event;
- u64 set_timer;
- u64 wait_for_event;
- u64 signal_event;
- u64 close_event;
- u64 check_event;
- u64 install_protocol_interface;
- u64 reinstall_protocol_interface;
- u64 uninstall_protocol_interface;
- u64 handle_protocol;
- u64 __reserved;
- u64 register_protocol_notify;
- u64 locate_handle;
- u64 locate_device_path;
- u64 install_configuration_table;
- u64 load_image;
- u64 start_image;
- u64 exit;
- u64 unload_image;
- u64 exit_boot_services;
- u64 get_next_monotonic_count;
- u64 stall;
- u64 set_watchdog_timer;
- u64 connect_controller;
- u64 disconnect_controller;
- u64 open_protocol;
- u64 close_protocol;
- u64 open_protocol_information;
- u64 protocols_per_handle;
- u64 locate_handle_buffer;
- u64 locate_protocol;
- u64 install_multiple_protocol_interfaces;
- u64 uninstall_multiple_protocol_interfaces;
- u64 calculate_crc32;
- u64 copy_mem;
- u64 set_mem;
- u64 create_event_ex;
-} __packed efi_boot_services_64_t;
-
-/*
- * EFI Boot Services table
- */
-typedef struct {
- efi_table_hdr_t hdr;
- void *raise_tpl;
- void *restore_tpl;
- efi_status_t (*allocate_pages)(int, int, unsigned long,
- efi_physical_addr_t *);
- efi_status_t (*free_pages)(efi_physical_addr_t, unsigned long);
- efi_status_t (*get_memory_map)(unsigned long *, void *, unsigned long *,
- unsigned long *, u32 *);
- efi_status_t (*allocate_pool)(int, unsigned long, void **);
- efi_status_t (*free_pool)(void *);
- void *create_event;
- void *set_timer;
- void *wait_for_event;
- void *signal_event;
- void *close_event;
- void *check_event;
- void *install_protocol_interface;
- void *reinstall_protocol_interface;
- void *uninstall_protocol_interface;
- efi_status_t (*handle_protocol)(efi_handle_t, efi_guid_t *, void **);
- void *__reserved;
- void *register_protocol_notify;
- efi_status_t (*locate_handle)(int, efi_guid_t *, void *,
- unsigned long *, efi_handle_t *);
- void *locate_device_path;
- efi_status_t (*install_configuration_table)(efi_guid_t *, void *);
- void *load_image;
- void *start_image;
- void *exit;
- void *unload_image;
- efi_status_t (*exit_boot_services)(efi_handle_t, unsigned long);
- void *get_next_monotonic_count;
- void *stall;
- void *set_watchdog_timer;
- void *connect_controller;
- void *disconnect_controller;
- void *open_protocol;
- void *close_protocol;
- void *open_protocol_information;
- void *protocols_per_handle;
- void *locate_handle_buffer;
- efi_status_t (*locate_protocol)(efi_guid_t *, void *, void **);
- void *install_multiple_protocol_interfaces;
- void *uninstall_multiple_protocol_interfaces;
- void *calculate_crc32;
- void *copy_mem;
- void *set_mem;
- void *create_event_ex;
-} efi_boot_services_t;
-
-typedef enum {
- EfiPciIoWidthUint8,
- EfiPciIoWidthUint16,
- EfiPciIoWidthUint32,
- EfiPciIoWidthUint64,
- EfiPciIoWidthFifoUint8,
- EfiPciIoWidthFifoUint16,
- EfiPciIoWidthFifoUint32,
- EfiPciIoWidthFifoUint64,
- EfiPciIoWidthFillUint8,
- EfiPciIoWidthFillUint16,
- EfiPciIoWidthFillUint32,
- EfiPciIoWidthFillUint64,
- EfiPciIoWidthMaximum
-} EFI_PCI_IO_PROTOCOL_WIDTH;
-
-typedef enum {
- EfiPciIoAttributeOperationGet,
- EfiPciIoAttributeOperationSet,
- EfiPciIoAttributeOperationEnable,
- EfiPciIoAttributeOperationDisable,
- EfiPciIoAttributeOperationSupported,
- EfiPciIoAttributeOperationMaximum
-} EFI_PCI_IO_PROTOCOL_ATTRIBUTE_OPERATION;
-
-typedef struct {
- u32 read;
- u32 write;
-} efi_pci_io_protocol_access_32_t;
-
-typedef struct {
- u64 read;
- u64 write;
-} efi_pci_io_protocol_access_64_t;
-
-typedef struct {
- void *read;
- void *write;
-} efi_pci_io_protocol_access_t;
-
-typedef struct {
- u32 poll_mem;
- u32 poll_io;
- efi_pci_io_protocol_access_32_t mem;
- efi_pci_io_protocol_access_32_t io;
- efi_pci_io_protocol_access_32_t pci;
- u32 copy_mem;
- u32 map;
- u32 unmap;
- u32 allocate_buffer;
- u32 free_buffer;
- u32 flush;
- u32 get_location;
- u32 attributes;
- u32 get_bar_attributes;
- u32 set_bar_attributes;
- uint64_t romsize;
- void *romimage;
-} efi_pci_io_protocol_32;
-
-typedef struct {
- u64 poll_mem;
- u64 poll_io;
- efi_pci_io_protocol_access_64_t mem;
- efi_pci_io_protocol_access_64_t io;
- efi_pci_io_protocol_access_64_t pci;
- u64 copy_mem;
- u64 map;
- u64 unmap;
- u64 allocate_buffer;
- u64 free_buffer;
- u64 flush;
- u64 get_location;
- u64 attributes;
- u64 get_bar_attributes;
- u64 set_bar_attributes;
- uint64_t romsize;
- void *romimage;
-} efi_pci_io_protocol_64;
-
-typedef struct {
- void *poll_mem;
- void *poll_io;
- efi_pci_io_protocol_access_t mem;
- efi_pci_io_protocol_access_t io;
- efi_pci_io_protocol_access_t pci;
- void *copy_mem;
- void *map;
- void *unmap;
- void *allocate_buffer;
- void *free_buffer;
- void *flush;
- void *get_location;
- void *attributes;
- void *get_bar_attributes;
- void *set_bar_attributes;
- uint64_t romsize;
- void *romimage;
-} efi_pci_io_protocol;
-
-#define EFI_PCI_IO_ATTRIBUTE_ISA_MOTHERBOARD_IO 0x0001
-#define EFI_PCI_IO_ATTRIBUTE_ISA_IO 0x0002
-#define EFI_PCI_IO_ATTRIBUTE_VGA_PALETTE_IO 0x0004
-#define EFI_PCI_IO_ATTRIBUTE_VGA_MEMORY 0x0008
-#define EFI_PCI_IO_ATTRIBUTE_VGA_IO 0x0010
-#define EFI_PCI_IO_ATTRIBUTE_IDE_PRIMARY_IO 0x0020
-#define EFI_PCI_IO_ATTRIBUTE_IDE_SECONDARY_IO 0x0040
-#define EFI_PCI_IO_ATTRIBUTE_MEMORY_WRITE_COMBINE 0x0080
-#define EFI_PCI_IO_ATTRIBUTE_IO 0x0100
-#define EFI_PCI_IO_ATTRIBUTE_MEMORY 0x0200
-#define EFI_PCI_IO_ATTRIBUTE_BUS_MASTER 0x0400
-#define EFI_PCI_IO_ATTRIBUTE_MEMORY_CACHED 0x0800
-#define EFI_PCI_IO_ATTRIBUTE_MEMORY_DISABLE 0x1000
-#define EFI_PCI_IO_ATTRIBUTE_EMBEDDED_DEVICE 0x2000
-#define EFI_PCI_IO_ATTRIBUTE_EMBEDDED_ROM 0x4000
-#define EFI_PCI_IO_ATTRIBUTE_DUAL_ADDRESS_CYCLE 0x8000
-#define EFI_PCI_IO_ATTRIBUTE_ISA_IO_16 0x10000
-#define EFI_PCI_IO_ATTRIBUTE_VGA_PALETTE_IO_16 0x20000
-#define EFI_PCI_IO_ATTRIBUTE_VGA_IO_16 0x40000
-
-typedef struct {
- u32 version;
- u32 get;
- u32 set;
- u32 del;
- u32 get_all;
-} apple_properties_protocol_32_t;
-
-typedef struct {
- u64 version;
- u64 get;
- u64 set;
- u64 del;
- u64 get_all;
-} apple_properties_protocol_64_t;
+typedef union efi_boot_services efi_boot_services_t;
/*
* Types and defines for EFI ResetSystem
@@ -503,24 +281,6 @@ typedef struct {
u32 query_variable_info;
} efi_runtime_services_32_t;
-typedef struct {
- efi_table_hdr_t hdr;
- u64 get_time;
- u64 set_time;
- u64 get_wakeup_time;
- u64 set_wakeup_time;
- u64 set_virtual_address_map;
- u64 convert_pointer;
- u64 get_variable;
- u64 get_next_variable;
- u64 set_variable;
- u64 get_next_high_mono_count;
- u64 reset_system;
- u64 update_capsule;
- u64 query_capsule_caps;
- u64 query_variable_info;
-} efi_runtime_services_64_t;
-
typedef efi_status_t efi_get_time_t (efi_time_t *tm, efi_time_cap_t *tc);
typedef efi_status_t efi_set_time_t (efi_time_t *tm);
typedef efi_status_t efi_get_wakeup_time_t (efi_bool_t *enabled, efi_bool_t *pending,
@@ -530,7 +290,7 @@ typedef efi_status_t efi_get_variable_t (efi_char16_t *name, efi_guid_t *vendor,
unsigned long *data_size, void *data);
typedef efi_status_t efi_get_next_variable_t (unsigned long *name_size, efi_char16_t *name,
efi_guid_t *vendor);
-typedef efi_status_t efi_set_variable_t (efi_char16_t *name, efi_guid_t *vendor,
+typedef efi_status_t efi_set_variable_t (efi_char16_t *name, efi_guid_t *vendor,
u32 attr, unsigned long data_size,
void *data);
typedef efi_status_t efi_get_next_high_mono_count_t (u32 *count);
@@ -555,22 +315,25 @@ typedef efi_status_t efi_query_variable_store_t(u32 attributes,
unsigned long size,
bool nonblocking);
-typedef struct {
- efi_table_hdr_t hdr;
- efi_get_time_t *get_time;
- efi_set_time_t *set_time;
- efi_get_wakeup_time_t *get_wakeup_time;
- efi_set_wakeup_time_t *set_wakeup_time;
- efi_set_virtual_address_map_t *set_virtual_address_map;
- void *convert_pointer;
- efi_get_variable_t *get_variable;
- efi_get_next_variable_t *get_next_variable;
- efi_set_variable_t *set_variable;
- efi_get_next_high_mono_count_t *get_next_high_mono_count;
- efi_reset_system_t *reset_system;
- efi_update_capsule_t *update_capsule;
- efi_query_capsule_caps_t *query_capsule_caps;
- efi_query_variable_info_t *query_variable_info;
+typedef union {
+ struct {
+ efi_table_hdr_t hdr;
+ efi_get_time_t __efiapi *get_time;
+ efi_set_time_t __efiapi *set_time;
+ efi_get_wakeup_time_t __efiapi *get_wakeup_time;
+ efi_set_wakeup_time_t __efiapi *set_wakeup_time;
+ efi_set_virtual_address_map_t __efiapi *set_virtual_address_map;
+ void *convert_pointer;
+ efi_get_variable_t __efiapi *get_variable;
+ efi_get_next_variable_t __efiapi *get_next_variable;
+ efi_set_variable_t __efiapi *set_variable;
+ efi_get_next_high_mono_count_t __efiapi *get_next_high_mono_count;
+ efi_reset_system_t __efiapi *reset_system;
+ efi_update_capsule_t __efiapi *update_capsule;
+ efi_query_capsule_caps_t __efiapi *query_capsule_caps;
+ efi_query_variable_info_t __efiapi *query_variable_info;
+ };
+ efi_runtime_services_32_t mixed_mode;
} efi_runtime_services_t;
void efi_native_runtime_setup(void);
@@ -597,43 +360,88 @@ void efi_native_runtime_setup(void);
* where the UEFI SPEC breaks the line.
*/
#define NULL_GUID EFI_GUID(0x00000000, 0x0000, 0x0000, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00)
-#define MPS_TABLE_GUID EFI_GUID(0xeb9d2d2f, 0x2d88, 0x11d3, 0x9a, 0x16, 0x00, 0x90, 0x27, 0x3f, 0xc1, 0x4d)
#define ACPI_TABLE_GUID EFI_GUID(0xeb9d2d30, 0x2d88, 0x11d3, 0x9a, 0x16, 0x00, 0x90, 0x27, 0x3f, 0xc1, 0x4d)
#define ACPI_20_TABLE_GUID EFI_GUID(0x8868e871, 0xe4f1, 0x11d3, 0xbc, 0x22, 0x00, 0x80, 0xc7, 0x3c, 0x88, 0x81)
#define SMBIOS_TABLE_GUID EFI_GUID(0xeb9d2d31, 0x2d88, 0x11d3, 0x9a, 0x16, 0x00, 0x90, 0x27, 0x3f, 0xc1, 0x4d)
#define SMBIOS3_TABLE_GUID EFI_GUID(0xf2fd1544, 0x9794, 0x4a2c, 0x99, 0x2e, 0xe5, 0xbb, 0xcf, 0x20, 0xe3, 0x94)
-#define SAL_SYSTEM_TABLE_GUID EFI_GUID(0xeb9d2d32, 0x2d88, 0x11d3, 0x9a, 0x16, 0x00, 0x90, 0x27, 0x3f, 0xc1, 0x4d)
-#define HCDP_TABLE_GUID EFI_GUID(0xf951938d, 0x620b, 0x42ef, 0x82, 0x79, 0xa8, 0x4b, 0x79, 0x61, 0x78, 0x98)
-#define UGA_IO_PROTOCOL_GUID EFI_GUID(0x61a4d49e, 0x6f68, 0x4f1b, 0xb9, 0x22, 0xa8, 0x6e, 0xed, 0x0b, 0x07, 0xa2)
#define EFI_GLOBAL_VARIABLE_GUID EFI_GUID(0x8be4df61, 0x93ca, 0x11d2, 0xaa, 0x0d, 0x00, 0xe0, 0x98, 0x03, 0x2b, 0x8c)
#define UV_SYSTEM_TABLE_GUID EFI_GUID(0x3b13a7d4, 0x633e, 0x11dd, 0x93, 0xec, 0xda, 0x25, 0x56, 0xd8, 0x95, 0x93)
#define LINUX_EFI_CRASH_GUID EFI_GUID(0xcfc8fc79, 0xbe2e, 0x4ddc, 0x97, 0xf0, 0x9f, 0x98, 0xbf, 0xe2, 0x98, 0xa0)
#define LOADED_IMAGE_PROTOCOL_GUID EFI_GUID(0x5b1b31a1, 0x9562, 0x11d2, 0x8e, 0x3f, 0x00, 0xa0, 0xc9, 0x69, 0x72, 0x3b)
+#define LOADED_IMAGE_DEVICE_PATH_PROTOCOL_GUID EFI_GUID(0xbc62157e, 0x3e33, 0x4fec, 0x99, 0x20, 0x2d, 0x3b, 0x36, 0xd7, 0x50, 0xdf)
+#define EFI_DEVICE_PATH_PROTOCOL_GUID EFI_GUID(0x09576e91, 0x6d3f, 0x11d2, 0x8e, 0x39, 0x00, 0xa0, 0xc9, 0x69, 0x72, 0x3b)
+#define EFI_DEVICE_PATH_TO_TEXT_PROTOCOL_GUID EFI_GUID(0x8b843e20, 0x8132, 0x4852, 0x90, 0xcc, 0x55, 0x1a, 0x4e, 0x4a, 0x7f, 0x1c)
+#define EFI_DEVICE_PATH_FROM_TEXT_PROTOCOL_GUID EFI_GUID(0x05c99a21, 0xc70f, 0x4ad2, 0x8a, 0x5f, 0x35, 0xdf, 0x33, 0x43, 0xf5, 0x1e)
#define EFI_GRAPHICS_OUTPUT_PROTOCOL_GUID EFI_GUID(0x9042a9de, 0x23dc, 0x4a38, 0x96, 0xfb, 0x7a, 0xde, 0xd0, 0x80, 0x51, 0x6a)
-#define EFI_UGA_PROTOCOL_GUID EFI_GUID(0x982c298b, 0xf4fa, 0x41cb, 0xb8, 0x38, 0x77, 0xaa, 0x68, 0x8f, 0xb8, 0x39)
+#define EFI_EDID_DISCOVERED_PROTOCOL_GUID EFI_GUID(0x1c0c34f6, 0xd380, 0x41fa, 0xa0, 0x49, 0x8a, 0xd0, 0x6c, 0x1a, 0x66, 0xaa)
+#define EFI_EDID_ACTIVE_PROTOCOL_GUID EFI_GUID(0xbd8c1056, 0x9f36, 0x44ec, 0x92, 0xa8, 0xa6, 0x33, 0x7f, 0x81, 0x79, 0x86)
#define EFI_PCI_IO_PROTOCOL_GUID EFI_GUID(0x4cf5b200, 0x68b8, 0x4ca5, 0x9e, 0xec, 0xb2, 0x3e, 0x3f, 0x50, 0x02, 0x9a)
#define EFI_FILE_INFO_ID EFI_GUID(0x09576e92, 0x6d3f, 0x11d2, 0x8e, 0x39, 0x00, 0xa0, 0xc9, 0x69, 0x72, 0x3b)
#define EFI_SYSTEM_RESOURCE_TABLE_GUID EFI_GUID(0xb122a263, 0x3661, 0x4f68, 0x99, 0x29, 0x78, 0xf8, 0xb0, 0xd6, 0x21, 0x80)
#define EFI_FILE_SYSTEM_GUID EFI_GUID(0x964e5b22, 0x6459, 0x11d2, 0x8e, 0x39, 0x00, 0xa0, 0xc9, 0x69, 0x72, 0x3b)
#define DEVICE_TREE_GUID EFI_GUID(0xb1b621d5, 0xf19c, 0x41a5, 0x83, 0x0b, 0xd9, 0x15, 0x2c, 0x69, 0xaa, 0xe0)
-#define EFI_PROPERTIES_TABLE_GUID EFI_GUID(0x880aaca3, 0x4adc, 0x4a04, 0x90, 0x79, 0xb7, 0x47, 0x34, 0x08, 0x25, 0xe5)
#define EFI_RNG_PROTOCOL_GUID EFI_GUID(0x3152bca5, 0xeade, 0x433d, 0x86, 0x2e, 0xc0, 0x1c, 0xdc, 0x29, 0x1f, 0x44)
#define EFI_RNG_ALGORITHM_RAW EFI_GUID(0xe43176d7, 0xb6e8, 0x4827, 0xb7, 0x84, 0x7f, 0xfd, 0xc4, 0xb6, 0x85, 0x61)
#define EFI_MEMORY_ATTRIBUTES_TABLE_GUID EFI_GUID(0xdcfa911d, 0x26eb, 0x469f, 0xa2, 0x20, 0x38, 0xb7, 0xdc, 0x46, 0x12, 0x20)
#define EFI_CONSOLE_OUT_DEVICE_GUID EFI_GUID(0xd3b36f2c, 0xd551, 0x11d4, 0x9a, 0x46, 0x00, 0x90, 0x27, 0x3f, 0xc1, 0x4d)
#define APPLE_PROPERTIES_PROTOCOL_GUID EFI_GUID(0x91bd12fe, 0xf6c3, 0x44fb, 0xa5, 0xb7, 0x51, 0x22, 0xab, 0x30, 0x3a, 0xe0)
+#define APPLE_SET_OS_PROTOCOL_GUID EFI_GUID(0xc5c5da95, 0x7d5c, 0x45e6, 0xb2, 0xf1, 0x3f, 0xd5, 0x2b, 0xb1, 0x00, 0x77)
+#define EFI_TCG2_PROTOCOL_GUID EFI_GUID(0x607f766c, 0x7455, 0x42be, 0x93, 0x0b, 0xe4, 0xd7, 0x6d, 0xb2, 0x72, 0x0f)
+#define EFI_TCG2_FINAL_EVENTS_TABLE_GUID EFI_GUID(0x1e2ed096, 0x30e2, 0x4254, 0xbd, 0x89, 0x86, 0x3b, 0xbe, 0xf8, 0x23, 0x25)
+#define EFI_LOAD_FILE_PROTOCOL_GUID EFI_GUID(0x56ec3091, 0x954c, 0x11d2, 0x8e, 0x3f, 0x00, 0xa0, 0xc9, 0x69, 0x72, 0x3b)
+#define EFI_LOAD_FILE2_PROTOCOL_GUID EFI_GUID(0x4006c0c1, 0xfcb3, 0x403e, 0x99, 0x6d, 0x4a, 0x6c, 0x87, 0x24, 0xe0, 0x6d)
+#define EFI_RT_PROPERTIES_TABLE_GUID EFI_GUID(0xeb66918a, 0x7eef, 0x402a, 0x84, 0x2e, 0x93, 0x1d, 0x21, 0xc3, 0x8a, 0xe9)
+#define EFI_DXE_SERVICES_TABLE_GUID EFI_GUID(0x05ad34ba, 0x6f02, 0x4214, 0x95, 0x2e, 0x4d, 0xa0, 0x39, 0x8e, 0x2b, 0xb9)
+#define EFI_SMBIOS_PROTOCOL_GUID EFI_GUID(0x03583ff6, 0xcb36, 0x4940, 0x94, 0x7e, 0xb9, 0xb3, 0x9f, 0x4a, 0xfa, 0xf7)
+#define EFI_MEMORY_ATTRIBUTE_PROTOCOL_GUID EFI_GUID(0xf4560cf6, 0x40ec, 0x4b4a, 0xa1, 0x92, 0xbf, 0x1d, 0x57, 0xd0, 0xb1, 0x89)
#define EFI_IMAGE_SECURITY_DATABASE_GUID EFI_GUID(0xd719b2cb, 0x3d3a, 0x4596, 0xa3, 0xbc, 0xda, 0xd0, 0x0e, 0x67, 0x65, 0x6f)
#define EFI_SHIM_LOCK_GUID EFI_GUID(0x605dab50, 0xe046, 0x4300, 0xab, 0xb6, 0x3d, 0xd8, 0x10, 0xdd, 0x8b, 0x23)
+#define EFI_CERT_SHA256_GUID EFI_GUID(0xc1c41626, 0x504c, 0x4092, 0xac, 0xa9, 0x41, 0xf9, 0x36, 0x93, 0x43, 0x28)
+#define EFI_CERT_X509_GUID EFI_GUID(0xa5c059a1, 0x94e4, 0x4aa7, 0x87, 0xb5, 0xab, 0x15, 0x5c, 0x2b, 0xf0, 0x72)
+#define EFI_CERT_X509_SHA256_GUID EFI_GUID(0x3bd2a492, 0x96c0, 0x4079, 0xb4, 0x20, 0xfc, 0xf9, 0x8e, 0xf1, 0x03, 0xed)
+#define EFI_CC_BLOB_GUID EFI_GUID(0x067b1f5f, 0xcf26, 0x44c5, 0x85, 0x54, 0x93, 0xd7, 0x77, 0x91, 0x2d, 0x42)
+#define EFI_CC_MEASUREMENT_PROTOCOL_GUID EFI_GUID(0x96751a3d, 0x72f4, 0x41a6, 0xa7, 0x94, 0xed, 0x5d, 0x0e, 0x67, 0xae, 0x6b)
+#define EFI_CC_FINAL_EVENTS_TABLE_GUID EFI_GUID(0xdd4a4648, 0x2de7, 0x4665, 0x96, 0x4d, 0x21, 0xd9, 0xef, 0x5f, 0xb4, 0x46)
+
/*
* This GUID is used to pass to the kernel proper the struct screen_info
* structure that was populated by the stub based on the GOP protocol instance
* associated with ConOut
*/
-#define LINUX_EFI_ARM_SCREEN_INFO_TABLE_GUID EFI_GUID(0xe03fc20a, 0x85dc, 0x406e, 0xb9, 0x0e, 0x4a, 0xb5, 0x02, 0x37, 0x1d, 0x95)
+#define LINUX_EFI_SCREEN_INFO_TABLE_GUID EFI_GUID(0xe03fc20a, 0x85dc, 0x406e, 0xb9, 0x0e, 0x4a, 0xb5, 0x02, 0x37, 0x1d, 0x95)
+#define LINUX_EFI_ARM_CPU_STATE_TABLE_GUID EFI_GUID(0xef79e4aa, 0x3c3d, 0x4989, 0xb9, 0x02, 0x07, 0xa9, 0x43, 0xe5, 0x50, 0xd2)
#define LINUX_EFI_LOADER_ENTRY_GUID EFI_GUID(0x4a67b082, 0x0a4c, 0x41cf, 0xb6, 0xc7, 0x44, 0x0b, 0x29, 0xbb, 0x8c, 0x4f)
#define LINUX_EFI_RANDOM_SEED_TABLE_GUID EFI_GUID(0x1ce1e5bc, 0x7ceb, 0x42f2, 0x81, 0xe5, 0x8a, 0xad, 0xf1, 0x80, 0xf5, 0x7b)
+#define LINUX_EFI_TPM_EVENT_LOG_GUID EFI_GUID(0xb7799cb0, 0xeca2, 0x4943, 0x96, 0x67, 0x1f, 0xae, 0x07, 0xb7, 0x47, 0xfa)
+#define LINUX_EFI_MEMRESERVE_TABLE_GUID EFI_GUID(0x888eb0c6, 0x8ede, 0x4ff5, 0xa8, 0xf0, 0x9a, 0xee, 0x5c, 0xb9, 0x77, 0xc2)
+#define LINUX_EFI_INITRD_MEDIA_GUID EFI_GUID(0x5568e427, 0x68fc, 0x4f3d, 0xac, 0x74, 0xca, 0x55, 0x52, 0x31, 0xcc, 0x68)
+#define LINUX_EFI_MOK_VARIABLE_TABLE_GUID EFI_GUID(0xc451ed2b, 0x9694, 0x45d3, 0xba, 0xba, 0xed, 0x9f, 0x89, 0x88, 0xa3, 0x89)
+#define LINUX_EFI_COCO_SECRET_AREA_GUID EFI_GUID(0xadf956ad, 0xe98c, 0x484c, 0xae, 0x11, 0xb5, 0x1c, 0x7d, 0x33, 0x64, 0x47)
+#define LINUX_EFI_BOOT_MEMMAP_GUID EFI_GUID(0x800f683f, 0xd08b, 0x423a, 0xa2, 0x93, 0x96, 0x5c, 0x3c, 0x6f, 0xe2, 0xb4)
+#define LINUX_EFI_UNACCEPTED_MEM_TABLE_GUID EFI_GUID(0xd5d1de3c, 0x105c, 0x44f9, 0x9e, 0xa9, 0xbc, 0xef, 0x98, 0x12, 0x00, 0x31)
+
+#define RISCV_EFI_BOOT_PROTOCOL_GUID EFI_GUID(0xccd15fec, 0x6f73, 0x4eec, 0x83, 0x95, 0x3e, 0x69, 0xe4, 0xb9, 0x40, 0xbf)
+
+/*
+ * This GUID may be installed onto the kernel image's handle as a NULL protocol
+ * to signal to the stub that the placement of the image should be respected,
+ * and moving the image in physical memory is undesirable. To ensure
+ * compatibility with 64k pages kernels with virtually mapped stacks, and to
+ * avoid defeating physical randomization, this protocol should only be
+ * installed if the image was placed at a randomized 128k aligned address in
+ * memory.
+ */
+#define LINUX_EFI_LOADED_IMAGE_FIXED_GUID EFI_GUID(0xf5a37b6d, 0x3344, 0x42a5, 0xb6, 0xbb, 0x97, 0x86, 0x48, 0xc1, 0x89, 0x0a)
+
+/* OEM GUIDs */
+#define DELLEMC_EFI_RCI2_TABLE_GUID EFI_GUID(0x2d9f28a2, 0xa886, 0x456a, 0x97, 0xa8, 0xf1, 0x1e, 0xf2, 0x4f, 0xf4, 0x55)
+#define AMD_SEV_MEM_ENCRYPT_GUID EFI_GUID(0x0cf29b71, 0x9e51, 0x433a, 0xa3, 0xb7, 0x81, 0xf3, 0xab, 0x16, 0xb8, 0x75)
+
+/* OVMF protocol GUIDs */
+#define OVMF_SEV_MEMORY_ACCEPTANCE_PROTOCOL_GUID EFI_GUID(0xc5a010fe, 0x38a7, 0x4531, 0x8a, 0x4a, 0x05, 0x00, 0xd2, 0xfd, 0x16, 0x49)
+#define OVMF_MEMORY_LOG_TABLE_GUID EFI_GUID(0x95305139, 0xb20f, 0x4723, 0x84, 0x25, 0x62, 0x7c, 0x88, 0x8f, 0xf1, 0x21)
typedef struct {
efi_guid_t guid;
@@ -645,18 +453,22 @@ typedef struct {
u32 table;
} efi_config_table_32_t;
-typedef struct {
- efi_guid_t guid;
- unsigned long table;
+typedef union {
+ struct {
+ efi_guid_t guid;
+ void *table;
+ };
+ efi_config_table_32_t mixed_mode;
} efi_config_table_t;
typedef struct {
efi_guid_t guid;
- const char *name;
unsigned long *ptr;
+ const char name[16];
} efi_config_table_type_t;
#define EFI_SYSTEM_TABLE_SIGNATURE ((u64)0x5453595320494249ULL)
+#define EFI_DXE_SERVICES_TABLE_SIGNATURE ((u64)0x565245535f455844ULL)
#define EFI_2_30_SYSTEM_TABLE_REVISION ((2 << 16) | (30))
#define EFI_2_20_SYSTEM_TABLE_REVISION ((2 << 16) | (20))
@@ -699,32 +511,56 @@ typedef struct {
u32 tables;
} efi_system_table_32_t;
-typedef struct {
- efi_table_hdr_t hdr;
- unsigned long fw_vendor; /* physical addr of CHAR16 vendor string */
- u32 fw_revision;
- unsigned long con_in_handle;
- unsigned long con_in;
- unsigned long con_out_handle;
- unsigned long con_out;
- unsigned long stderr_handle;
- unsigned long stderr;
- efi_runtime_services_t *runtime;
- efi_boot_services_t *boottime;
- unsigned long nr_tables;
- unsigned long tables;
+typedef union efi_simple_text_input_protocol efi_simple_text_input_protocol_t;
+typedef union efi_simple_text_output_protocol efi_simple_text_output_protocol_t;
+
+typedef union {
+ struct {
+ efi_table_hdr_t hdr;
+ unsigned long fw_vendor; /* physical addr of CHAR16 vendor string */
+ u32 fw_revision;
+ unsigned long con_in_handle;
+ efi_simple_text_input_protocol_t *con_in;
+ unsigned long con_out_handle;
+ efi_simple_text_output_protocol_t *con_out;
+ unsigned long stderr_handle;
+ unsigned long stderr;
+ efi_runtime_services_t *runtime;
+ efi_boot_services_t *boottime;
+ unsigned long nr_tables;
+ unsigned long tables;
+ };
+ efi_system_table_32_t mixed_mode;
} efi_system_table_t;
+struct efi_boot_memmap {
+ unsigned long map_size;
+ unsigned long desc_size;
+ u32 desc_ver;
+ unsigned long map_key;
+ unsigned long buff_size;
+ efi_memory_desc_t map[];
+};
+
+struct efi_unaccepted_memory {
+ u32 version;
+ u32 unit_size;
+ u64 phys_base;
+ u64 size;
+ unsigned long bitmap[];
+};
+
/*
* Architecture independent structure for describing a memory map for the
- * benefit of efi_memmap_init_early(), saving us the need to pass four
- * parameters.
+ * benefit of efi_memmap_init_early(), and for passing context between
+ * efi_memmap_alloc() and efi_memmap_install().
*/
struct efi_memory_map_data {
phys_addr_t phys_map;
unsigned long size;
unsigned long desc_version;
unsigned long desc_size;
+ unsigned long flags;
};
struct efi_memory_map {
@@ -734,7 +570,10 @@ struct efi_memory_map {
int nr_map;
unsigned long desc_version;
unsigned long desc_size;
- bool late;
+#define EFI_MEMMAP_LATE (1UL << 0)
+#define EFI_MEMMAP_MEMBLOCK (1UL << 1)
+#define EFI_MEMMAP_SLAB (1UL << 2)
+ unsigned long flags;
};
struct efi_mem_range {
@@ -742,191 +581,122 @@ struct efi_mem_range {
u64 attribute;
};
-struct efi_fdt_params {
- u64 system_table;
- u64 mmap;
- u32 mmap_size;
- u32 desc_size;
- u32 desc_ver;
-};
-
-typedef struct {
- u32 revision;
- u32 parent_handle;
- u32 system_table;
- u32 device_handle;
- u32 file_path;
- u32 reserved;
- u32 load_options_size;
- u32 load_options;
- u32 image_base;
- __aligned_u64 image_size;
- unsigned int image_code_type;
- unsigned int image_data_type;
- unsigned long unload;
-} efi_loaded_image_32_t;
-
typedef struct {
- u32 revision;
- u64 parent_handle;
- u64 system_table;
- u64 device_handle;
- u64 file_path;
- u64 reserved;
- u32 load_options_size;
- u64 load_options;
- u64 image_base;
- __aligned_u64 image_size;
- unsigned int image_code_type;
- unsigned int image_data_type;
- unsigned long unload;
-} efi_loaded_image_64_t;
+ u16 version;
+ u16 length;
+ u32 runtime_services_supported;
+} efi_rt_properties_table_t;
-typedef struct {
- u32 revision;
- void *parent_handle;
- efi_system_table_t *system_table;
- void *device_handle;
- void *file_path;
- void *reserved;
- u32 load_options_size;
- void *load_options;
- void *image_base;
- __aligned_u64 image_size;
- unsigned int image_code_type;
- unsigned int image_data_type;
- unsigned long unload;
-} efi_loaded_image_t;
+#define EFI_RT_PROPERTIES_TABLE_VERSION 0x1
+#define EFI_INVALID_TABLE_ADDR (~0UL)
-typedef struct {
- u64 size;
- u64 file_size;
- u64 phys_size;
- efi_time_t create_time;
- efi_time_t last_access_time;
- efi_time_t modification_time;
- __aligned_u64 attribute;
- efi_char16_t filename[1];
-} efi_file_info_t;
+// BIT0 implies that Runtime code includes the forward control flow guard
+// instruction, such as X86 CET-IBT or ARM BTI.
+#define EFI_MEMORY_ATTRIBUTES_FLAGS_RT_FORWARD_CONTROL_FLOW_GUARD 0x1
typedef struct {
- u64 revision;
- u32 open;
- u32 close;
- u32 delete;
- u32 read;
- u32 write;
- u32 get_position;
- u32 set_position;
- u32 get_info;
- u32 set_info;
- u32 flush;
-} efi_file_handle_32_t;
+ u32 version;
+ u32 num_entries;
+ u32 desc_size;
+ u32 flags;
+ /*
+ * There are @num_entries following, each of size @desc_size bytes,
+ * including an efi_memory_desc_t header. See efi_memdesc_ptr().
+ */
+ efi_memory_desc_t entry[];
+} efi_memory_attributes_table_t;
typedef struct {
- u64 revision;
- u64 open;
- u64 close;
- u64 delete;
- u64 read;
- u64 write;
- u64 get_position;
- u64 set_position;
- u64 get_info;
- u64 set_info;
- u64 flush;
-} efi_file_handle_64_t;
-
-typedef struct _efi_file_handle {
- u64 revision;
- efi_status_t (*open)(struct _efi_file_handle *,
- struct _efi_file_handle **,
- efi_char16_t *, u64, u64);
- efi_status_t (*close)(struct _efi_file_handle *);
- void *delete;
- efi_status_t (*read)(struct _efi_file_handle *, unsigned long *,
- void *);
- void *write;
- void *get_position;
- void *set_position;
- efi_status_t (*get_info)(struct _efi_file_handle *, efi_guid_t *,
- unsigned long *, void *);
- void *set_info;
- void *flush;
-} efi_file_handle_t;
-
-typedef struct _efi_file_io_interface {
- u64 revision;
- int (*open_volume)(struct _efi_file_io_interface *,
- efi_file_handle_t **);
-} efi_file_io_interface_t;
-
-#define EFI_FILE_MODE_READ 0x0000000000000001
-#define EFI_FILE_MODE_WRITE 0x0000000000000002
-#define EFI_FILE_MODE_CREATE 0x8000000000000000
+ efi_guid_t signature_owner;
+ u8 signature_data[];
+} efi_signature_data_t;
typedef struct {
- u32 version;
- u32 length;
- u64 memory_protection_attribute;
-} efi_properties_table_t;
-
-#define EFI_PROPERTIES_TABLE_VERSION 0x00010000
-#define EFI_PROPERTIES_RUNTIME_MEMORY_PROTECTION_NON_EXECUTABLE_PE_DATA 0x1
+ efi_guid_t signature_type;
+ u32 signature_list_size;
+ u32 signature_header_size;
+ u32 signature_size;
+ u8 signature_header[];
+ /* efi_signature_data_t signatures[][] */
+} efi_signature_list_t;
-#define EFI_INVALID_TABLE_ADDR (~0UL)
+typedef u8 efi_sha256_hash_t[32];
typedef struct {
- u32 version;
- u32 num_entries;
- u32 desc_size;
- u32 reserved;
- efi_memory_desc_t entry[0];
-} efi_memory_attributes_table_t;
+ efi_sha256_hash_t to_be_signed_hash;
+ efi_time_t time_of_revocation;
+} efi_cert_x509_sha256_t;
+
+extern unsigned long __ro_after_init efi_rng_seed; /* RNG Seed table */
/*
* All runtime access to EFI goes through this structure:
*/
extern struct efi {
- efi_system_table_t *systab; /* EFI system table */
- unsigned int runtime_version; /* Runtime services version */
- unsigned long mps; /* MPS table */
- unsigned long acpi; /* ACPI table (IA64 ext 0.71) */
- unsigned long acpi20; /* ACPI table (ACPI 2.0) */
- unsigned long smbios; /* SMBIOS table (32 bit entry point) */
- unsigned long smbios3; /* SMBIOS table (64 bit entry point) */
- unsigned long sal_systab; /* SAL system table */
- unsigned long boot_info; /* boot info table */
- unsigned long hcdp; /* HCDP table */
- unsigned long uga; /* UGA table */
- unsigned long uv_systab; /* UV system table */
- unsigned long fw_vendor; /* fw_vendor */
- unsigned long runtime; /* runtime table */
- unsigned long config_table; /* config tables */
- unsigned long esrt; /* ESRT table */
- unsigned long properties_table; /* properties table */
- unsigned long mem_attr_table; /* memory attributes table */
- unsigned long rng_seed; /* UEFI firmware random seed */
- efi_get_time_t *get_time;
- efi_set_time_t *set_time;
- efi_get_wakeup_time_t *get_wakeup_time;
- efi_set_wakeup_time_t *set_wakeup_time;
- efi_get_variable_t *get_variable;
- efi_get_next_variable_t *get_next_variable;
- efi_set_variable_t *set_variable;
- efi_set_variable_t *set_variable_nonblocking;
- efi_query_variable_info_t *query_variable_info;
- efi_query_variable_info_t *query_variable_info_nonblocking;
- efi_update_capsule_t *update_capsule;
- efi_query_capsule_caps_t *query_capsule_caps;
- efi_get_next_high_mono_count_t *get_next_high_mono_count;
- efi_reset_system_t *reset_system;
- efi_set_virtual_address_map_t *set_virtual_address_map;
- struct efi_memory_map memmap;
- unsigned long flags;
+ const efi_runtime_services_t *runtime; /* EFI runtime services table */
+ unsigned int runtime_version; /* Runtime services version */
+ unsigned int runtime_supported_mask;
+
+ unsigned long acpi; /* ACPI table (IA64 ext 0.71) */
+ unsigned long acpi20; /* ACPI table (ACPI 2.0) */
+ unsigned long smbios; /* SMBIOS table (32 bit entry point) */
+ unsigned long smbios3; /* SMBIOS table (64 bit entry point) */
+ unsigned long esrt; /* ESRT table */
+ unsigned long tpm_log; /* TPM2 Event Log table */
+ unsigned long tpm_final_log; /* TPM2 Final Events Log table */
+ unsigned long ovmf_debug_log;
+ unsigned long mokvar_table; /* MOK variable config table */
+ unsigned long coco_secret; /* Confidential computing secret table */
+ unsigned long unaccepted; /* Unaccepted memory table */
+
+ efi_get_time_t *get_time;
+ efi_set_time_t *set_time;
+ efi_get_wakeup_time_t *get_wakeup_time;
+ efi_set_wakeup_time_t *set_wakeup_time;
+ efi_get_variable_t *get_variable;
+ efi_get_next_variable_t *get_next_variable;
+ efi_set_variable_t *set_variable;
+ efi_set_variable_t *set_variable_nonblocking;
+ efi_query_variable_info_t *query_variable_info;
+ efi_query_variable_info_t *query_variable_info_nonblocking;
+ efi_update_capsule_t *update_capsule;
+ efi_query_capsule_caps_t *query_capsule_caps;
+ efi_get_next_high_mono_count_t *get_next_high_mono_count;
+ efi_reset_system_t *reset_system;
+
+ struct efi_memory_map memmap;
+ unsigned long flags;
} efi;
+#define EFI_RT_SUPPORTED_GET_TIME 0x0001
+#define EFI_RT_SUPPORTED_SET_TIME 0x0002
+#define EFI_RT_SUPPORTED_GET_WAKEUP_TIME 0x0004
+#define EFI_RT_SUPPORTED_SET_WAKEUP_TIME 0x0008
+#define EFI_RT_SUPPORTED_GET_VARIABLE 0x0010
+#define EFI_RT_SUPPORTED_GET_NEXT_VARIABLE_NAME 0x0020
+#define EFI_RT_SUPPORTED_SET_VARIABLE 0x0040
+#define EFI_RT_SUPPORTED_SET_VIRTUAL_ADDRESS_MAP 0x0080
+#define EFI_RT_SUPPORTED_CONVERT_POINTER 0x0100
+#define EFI_RT_SUPPORTED_GET_NEXT_HIGH_MONOTONIC_COUNT 0x0200
+#define EFI_RT_SUPPORTED_RESET_SYSTEM 0x0400
+#define EFI_RT_SUPPORTED_UPDATE_CAPSULE 0x0800
+#define EFI_RT_SUPPORTED_QUERY_CAPSULE_CAPABILITIES 0x1000
+#define EFI_RT_SUPPORTED_QUERY_VARIABLE_INFO 0x2000
+
+#define EFI_RT_SUPPORTED_ALL 0x3fff
+
+#define EFI_RT_SUPPORTED_TIME_SERVICES 0x0003
+#define EFI_RT_SUPPORTED_WAKEUP_SERVICES 0x000c
+#define EFI_RT_SUPPORTED_VARIABLE_SERVICES 0x0070
+
+extern struct mm_struct efi_mm;
+
+static inline bool mm_is_efi(struct mm_struct *mm)
+{
+ return IS_ENABLED(CONFIG_EFI) && mm == &efi_mm;
+}
+
static inline int
efi_guidcmp (efi_guid_t left, efi_guid_t right)
{
@@ -941,21 +711,17 @@ efi_guid_to_str(efi_guid_t *guid, char *out)
}
extern void efi_init (void);
-extern void *efi_get_pal_addr (void);
-extern void efi_map_pal_code (void);
-extern void efi_memmap_walk (efi_freemem_callback_t callback, void *arg);
-extern void efi_gettimeofday (struct timespec64 *ts);
+extern void efi_earlycon_reprobe(void);
+#ifdef CONFIG_EFI
extern void efi_enter_virtual_mode (void); /* switch EFI to virtual mode, if possible */
+#else
+static inline void efi_enter_virtual_mode (void) {}
+#endif
#ifdef CONFIG_X86
-extern void efi_late_init(void);
-extern void efi_free_boot_services(void);
extern efi_status_t efi_query_variable_store(u32 attributes,
unsigned long size,
bool nonblocking);
-extern void efi_find_mirror(void);
#else
-static inline void efi_late_init(void) {}
-static inline void efi_free_boot_services(void) {}
static inline efi_status_t efi_query_variable_store(u32 attributes,
unsigned long size,
@@ -964,26 +730,23 @@ static inline efi_status_t efi_query_variable_store(u32 attributes,
return EFI_SUCCESS;
}
#endif
-extern void __iomem *efi_lookup_mapped_addr(u64 phys_addr);
-extern phys_addr_t __init efi_memmap_alloc(unsigned int num_entries);
+extern int __init __efi_memmap_init(struct efi_memory_map_data *data);
extern int __init efi_memmap_init_early(struct efi_memory_map_data *data);
extern int __init efi_memmap_init_late(phys_addr_t addr, unsigned long size);
extern void __init efi_memmap_unmap(void);
-extern int __init efi_memmap_install(phys_addr_t addr, unsigned int nr_map);
-extern int __init efi_memmap_split_count(efi_memory_desc_t *md,
- struct range *range);
-extern void __init efi_memmap_insert(struct efi_memory_map *old_memmap,
- void *buf, struct efi_mem_range *mem);
-extern int efi_config_init(efi_config_table_type_t *arch_tables);
#ifdef CONFIG_EFI_ESRT
extern void __init efi_esrt_init(void);
#else
static inline void efi_esrt_init(void) { }
#endif
-extern int efi_config_parse_tables(void *config_tables, int count, int sz,
- efi_config_table_type_t *arch_tables);
+extern int efi_config_parse_tables(const efi_config_table_t *config_tables,
+ int count,
+ const efi_config_table_type_t *arch_tables);
+extern int efi_systab_check_header(const efi_table_hdr_t *systab_hdr);
+extern void efi_systab_report_header(const efi_table_hdr_t *systab_hdr,
+ unsigned long fw_vendor);
extern u64 efi_get_iobase (void);
extern int efi_mem_type(unsigned long phys_addr);
extern u64 efi_mem_attributes (unsigned long phys_addr);
@@ -991,21 +754,16 @@ extern u64 efi_mem_attribute (unsigned long phys_addr, unsigned long size);
extern int __init efi_uart_console_only (void);
extern u64 efi_mem_desc_end(efi_memory_desc_t *md);
extern int efi_mem_desc_lookup(u64 phys_addr, efi_memory_desc_t *out_md);
+extern int __efi_mem_desc_lookup(u64 phys_addr, efi_memory_desc_t *out_md);
extern void efi_mem_reserve(phys_addr_t addr, u64 size);
-extern void efi_initialize_iomem_resources(struct resource *code_resource,
- struct resource *data_resource, struct resource *bss_resource);
-extern void efi_reserve_boot_services(void);
-extern int efi_get_fdt_params(struct efi_fdt_params *params);
+extern int efi_mem_reserve_persistent(phys_addr_t addr, u64 size);
+extern u64 efi_get_fdt_params(struct efi_memory_map_data *data);
extern struct kobject *efi_kobj;
extern int efi_reboot_quirk_mode;
extern bool efi_poweroff_required(void);
-#ifdef CONFIG_EFI_FAKE_MEMMAP
-extern void __init efi_fake_memmap(void);
-#else
-static inline void efi_fake_memmap(void) { }
-#endif
+extern unsigned long efi_mem_attr_table;
/*
* efi_memattr_perm_setter - arch specific callback function passed into
@@ -1014,14 +772,14 @@ static inline void efi_fake_memmap(void) { }
* argument in the page tables referred to by the
* first argument.
*/
-typedef int (*efi_memattr_perm_setter)(struct mm_struct *, efi_memory_desc_t *);
+typedef int (*efi_memattr_perm_setter)(struct mm_struct *, efi_memory_desc_t *, bool);
-extern int efi_memattr_init(void);
+extern void efi_memattr_init(void);
extern int efi_memattr_apply_permissions(struct mm_struct *mm,
efi_memattr_perm_setter fn);
/*
- * efi_early_memdesc_ptr - get the n-th EFI memmap descriptor
+ * efi_memdesc_ptr - get the n-th EFI memmap descriptor
* @map: the start of efi memmap
* @desc_size: the size of space for each EFI memmap descriptor
* @n: the index of efi memmap descriptor
@@ -1039,7 +797,7 @@ extern int efi_memattr_apply_permissions(struct mm_struct *mm,
* during bootup since for_each_efi_memory_desc_xxx() is available after the
* kernel initializes the EFI subsystem to set up struct efi_memory_map.
*/
-#define efi_early_memdesc_ptr(map, desc_size, n) \
+#define efi_memdesc_ptr(map, desc_size, n) \
(efi_memory_desc_t *)((void *)(map) + ((n) * (desc_size)))
/* Iterate through an efi_memory_map */
@@ -1064,6 +822,15 @@ extern int efi_memattr_apply_permissions(struct mm_struct *mm,
char * __init efi_md_typeattr_format(char *buf, size_t size,
const efi_memory_desc_t *md);
+
+typedef void (*efi_element_handler_t)(const char *source,
+ const void *element_data,
+ size_t element_size);
+extern int __init parse_efi_signature_list(
+ const char *source,
+ const void *data, size_t size,
+ efi_element_handler_t (*get_handler_for_guid)(const efi_guid_t *));
+
/**
* efi_range_is_wc - check the WC bit on an address range
* @start: starting kvirt address
@@ -1085,10 +852,6 @@ static inline int efi_range_is_wc(unsigned long start, unsigned long len)
return 1;
}
-#ifdef CONFIG_EFI_PCDP
-extern int __init efi_setup_pcdp_console(char *);
-#endif
-
/*
* We play games with efi_enabled so that the compiler will, if
* possible, remove EFI-related code altogether.
@@ -1101,8 +864,9 @@ extern int __init efi_setup_pcdp_console(char *);
#define EFI_PARAVIRT 6 /* Access is via a paravirt interface */
#define EFI_ARCH_1 7 /* First arch-specific bit */
#define EFI_DBG 8 /* Print additional debug info at runtime */
-#define EFI_NX_PE_DATA 9 /* Can runtime data regions be mapped non-executable? */
-#define EFI_MEM_ATTR 10 /* Did firmware publish an EFI_MEMORY_ATTRIBUTES table? */
+#define EFI_MEM_ATTR 9 /* Did firmware publish an EFI_MEMORY_ATTRIBUTES table? */
+#define EFI_MEM_NO_SOFT_RESERVE 10 /* Is the kernel configured to ignore soft reservations? */
+#define EFI_PRESERVE_BS_REGIONS 11 /* Are EFI boot-services memory segments available? */
#ifdef CONFIG_EFI
/*
@@ -1114,7 +878,19 @@ static inline bool efi_enabled(int feature)
}
extern void efi_reboot(enum reboot_mode reboot_mode, const char *__unused);
-extern bool efi_is_table_address(unsigned long phys_addr);
+bool __pure __efi_soft_reserve_enabled(void);
+
+static inline bool __pure efi_soft_reserve_enabled(void)
+{
+ return IS_ENABLED(CONFIG_EFI_SOFT_RESERVE)
+ && __efi_soft_reserve_enabled();
+}
+
+static inline bool efi_rt_services_supported(unsigned int mask)
+{
+ return (efi.runtime_supported_mask & mask) == mask;
+}
+extern void efi_find_mirror(void);
#else
static inline bool efi_enabled(int feature)
{
@@ -1123,16 +899,17 @@ static inline bool efi_enabled(int feature)
static inline void
efi_reboot(enum reboot_mode reboot_mode, const char *__unused) {}
-static inline bool
-efi_capsule_pending(int *reset_type)
+static inline bool efi_soft_reserve_enabled(void)
{
return false;
}
-static inline bool efi_is_table_address(unsigned long phys_addr)
+static inline bool efi_rt_services_supported(unsigned int mask)
{
return false;
}
+
+static inline void efi_find_mirror(void) {}
#endif
extern int efi_status_to_err(efi_status_t status);
@@ -1148,7 +925,7 @@ extern int efi_status_to_err(efi_status_t status);
#define EFI_VARIABLE_TIME_BASED_AUTHENTICATED_WRITE_ACCESS 0x0000000000000020
#define EFI_VARIABLE_APPEND_WRITE 0x0000000000000040
-#define EFI_VARIABLE_MASK (EFI_VARIABLE_NON_VOLATILE | \
+#define EFI_VARIABLE_MASK (EFI_VARIABLE_NON_VOLATILE | \
EFI_VARIABLE_BOOTSERVICE_ACCESS | \
EFI_VARIABLE_RUNTIME_ACCESS | \
EFI_VARIABLE_HARDWARE_ERROR_RECORD | \
@@ -1162,13 +939,6 @@ extern int efi_status_to_err(efi_status_t status);
#define EFI_VARIABLE_GUID_LEN UUID_STRING_LEN
/*
- * The type of search to perform when calling boottime->locate_handle
- */
-#define EFI_LOCATE_ALL_HANDLES 0
-#define EFI_LOCATE_BY_REGISTER_NOTIFY 1
-#define EFI_LOCATE_BY_PROTOCOL 2
-
-/*
* EFI Device Path information
*/
#define EFI_DEV_HW 0x01
@@ -1200,6 +970,7 @@ extern int efi_status_to_err(efi_status_t status);
#define EFI_DEV_MEDIA_VENDOR 3
#define EFI_DEV_MEDIA_FILE 4
#define EFI_DEV_MEDIA_PROTOCOL 5
+#define EFI_DEV_MEDIA_REL_OFFSET 8
#define EFI_DEV_BIOS_BOOT 0x05
#define EFI_DEV_END_PATH 0x7F
#define EFI_DEV_END_PATH2 0xFF
@@ -1207,30 +978,60 @@ extern int efi_status_to_err(efi_status_t status);
#define EFI_DEV_END_ENTIRE 0xFF
struct efi_generic_dev_path {
- u8 type;
- u8 sub_type;
- u16 length;
-} __attribute ((packed));
+ u8 type;
+ u8 sub_type;
+ u16 length;
+} __packed;
+
+struct efi_acpi_dev_path {
+ struct efi_generic_dev_path header;
+ u32 hid;
+ u32 uid;
+} __packed;
+
+struct efi_pci_dev_path {
+ struct efi_generic_dev_path header;
+ u8 fn;
+ u8 dev;
+} __packed;
+
+struct efi_vendor_dev_path {
+ struct efi_generic_dev_path header;
+ efi_guid_t vendorguid;
+ u8 vendordata[];
+} __packed;
+
+struct efi_rel_offset_dev_path {
+ struct efi_generic_dev_path header;
+ u32 reserved;
+ u64 starting_offset;
+ u64 ending_offset;
+} __packed;
+
+struct efi_mem_mapped_dev_path {
+ struct efi_generic_dev_path header;
+ u32 memory_type;
+ u64 starting_addr;
+ u64 ending_addr;
+} __packed;
+
+struct efi_file_path_dev_path {
+ struct efi_generic_dev_path header;
+ efi_char16_t filename[];
+} __packed;
struct efi_dev_path {
- u8 type; /* can be replaced with unnamed */
- u8 sub_type; /* struct efi_generic_dev_path; */
- u16 length; /* once we've moved to -std=c11 */
union {
- struct {
- u32 hid;
- u32 uid;
- } acpi;
- struct {
- u8 fn;
- u8 dev;
- } pci;
+ struct efi_generic_dev_path header;
+ struct efi_acpi_dev_path acpi;
+ struct efi_pci_dev_path pci;
+ struct efi_vendor_dev_path vendor;
+ struct efi_rel_offset_dev_path rel_offset;
};
-} __attribute ((packed));
+} __packed;
-#if IS_ENABLED(CONFIG_EFI_DEV_PATH_PARSER)
-struct device *efi_get_device_by_path(struct efi_dev_path **node, size_t *len);
-#endif
+struct device *efi_get_device_by_path(const struct efi_dev_path **node,
+ size_t *len);
static inline void memrange_efi_to_native(u64 *addr, u64 *npages)
{
@@ -1251,193 +1052,62 @@ struct efivar_operations {
efi_set_variable_t *set_variable;
efi_set_variable_t *set_variable_nonblocking;
efi_query_variable_store_t *query_variable_store;
+ efi_query_variable_info_t *query_variable_info;
};
struct efivars {
struct kset *kset;
- struct kobject *kobject;
const struct efivar_operations *ops;
};
+#ifdef CONFIG_X86
+u64 __attribute_const__ efivar_reserved_space(void);
+#else
+static inline u64 efivar_reserved_space(void) { return 0; }
+#endif
+
/*
- * The maximum size of VariableName + Data = 1024
- * Therefore, it's reasonable to save that much
- * space in each part of the structure,
- * and we use a page for reading/writing.
+ * There is no actual upper limit specified for the variable name size.
+ *
+ * This limit exists only for practical purposes, since name conversions
+ * are bounds-checked and name data is occasionally stored in-line.
*/
-
#define EFI_VAR_NAME_LEN 1024
-struct efi_variable {
- efi_char16_t VariableName[EFI_VAR_NAME_LEN/sizeof(efi_char16_t)];
- efi_guid_t VendorGuid;
- unsigned long DataSize;
- __u8 Data[1024];
- efi_status_t Status;
- __u32 Attributes;
-} __attribute__((packed));
-
-struct efivar_entry {
- struct efi_variable var;
- struct list_head list;
- struct kobject kobj;
- bool scanning;
- bool deleting;
-};
-
-typedef struct {
- u32 reset;
- u32 output_string;
- u32 test_string;
-} efi_simple_text_output_protocol_32_t;
-
-typedef struct {
- u64 reset;
- u64 output_string;
- u64 test_string;
-} efi_simple_text_output_protocol_64_t;
-
-struct efi_simple_text_output_protocol {
- void *reset;
- efi_status_t (*output_string)(void *, void *);
- void *test_string;
-};
-
-#define PIXEL_RGB_RESERVED_8BIT_PER_COLOR 0
-#define PIXEL_BGR_RESERVED_8BIT_PER_COLOR 1
-#define PIXEL_BIT_MASK 2
-#define PIXEL_BLT_ONLY 3
-#define PIXEL_FORMAT_MAX 4
-
-struct efi_pixel_bitmask {
- u32 red_mask;
- u32 green_mask;
- u32 blue_mask;
- u32 reserved_mask;
-};
-
-struct efi_graphics_output_mode_info {
- u32 version;
- u32 horizontal_resolution;
- u32 vertical_resolution;
- int pixel_format;
- struct efi_pixel_bitmask pixel_information;
- u32 pixels_per_scan_line;
-} __packed;
-
-struct efi_graphics_output_protocol_mode_32 {
- u32 max_mode;
- u32 mode;
- u32 info;
- u32 size_of_info;
- u64 frame_buffer_base;
- u32 frame_buffer_size;
-} __packed;
-
-struct efi_graphics_output_protocol_mode_64 {
- u32 max_mode;
- u32 mode;
- u64 info;
- u64 size_of_info;
- u64 frame_buffer_base;
- u64 frame_buffer_size;
-} __packed;
-
-struct efi_graphics_output_protocol_mode {
- u32 max_mode;
- u32 mode;
- unsigned long info;
- unsigned long size_of_info;
- u64 frame_buffer_base;
- unsigned long frame_buffer_size;
-} __packed;
-
-struct efi_graphics_output_protocol_32 {
- u32 query_mode;
- u32 set_mode;
- u32 blt;
- u32 mode;
-};
-
-struct efi_graphics_output_protocol_64 {
- u64 query_mode;
- u64 set_mode;
- u64 blt;
- u64 mode;
-};
-
-struct efi_graphics_output_protocol {
- unsigned long query_mode;
- unsigned long set_mode;
- unsigned long blt;
- struct efi_graphics_output_protocol_mode *mode;
-};
-
-typedef efi_status_t (*efi_graphics_output_protocol_query_mode)(
- struct efi_graphics_output_protocol *, u32, unsigned long *,
- struct efi_graphics_output_mode_info **);
-
-extern struct list_head efivar_sysfs_list;
-
-static inline void
-efivar_unregister(struct efivar_entry *var)
-{
- kobject_put(&var->kobj);
-}
-
int efivars_register(struct efivars *efivars,
- const struct efivar_operations *ops,
- struct kobject *kobject);
+ const struct efivar_operations *ops);
int efivars_unregister(struct efivars *efivars);
-struct kobject *efivars_kobject(void);
-
-int efivar_init(int (*func)(efi_char16_t *, efi_guid_t, unsigned long, void *),
- void *data, bool duplicates, struct list_head *head);
-
-int efivar_entry_add(struct efivar_entry *entry, struct list_head *head);
-int efivar_entry_remove(struct efivar_entry *entry);
-
-int __efivar_entry_delete(struct efivar_entry *entry);
-int efivar_entry_delete(struct efivar_entry *entry);
-int efivar_entry_size(struct efivar_entry *entry, unsigned long *size);
-int __efivar_entry_get(struct efivar_entry *entry, u32 *attributes,
- unsigned long *size, void *data);
-int efivar_entry_get(struct efivar_entry *entry, u32 *attributes,
- unsigned long *size, void *data);
-int efivar_entry_set(struct efivar_entry *entry, u32 attributes,
- unsigned long size, void *data, struct list_head *head);
-int efivar_entry_set_get_size(struct efivar_entry *entry, u32 attributes,
- unsigned long *size, void *data, bool *set);
-int efivar_entry_set_safe(efi_char16_t *name, efi_guid_t vendor, u32 attributes,
- bool block, unsigned long size, void *data);
+#ifdef CONFIG_EFI
+bool efivar_is_available(void);
+#else
+static inline bool efivar_is_available(void) { return false; }
+#endif
-int efivar_entry_iter_begin(void);
-void efivar_entry_iter_end(void);
+bool efivar_supports_writes(void);
-int __efivar_entry_iter(int (*func)(struct efivar_entry *, void *),
- struct list_head *head, void *data,
- struct efivar_entry **prev);
-int efivar_entry_iter(int (*func)(struct efivar_entry *, void *),
- struct list_head *head, void *data);
+int efivar_lock(void);
+int efivar_trylock(void);
+void efivar_unlock(void);
-struct efivar_entry *efivar_entry_find(efi_char16_t *name, efi_guid_t guid,
- struct list_head *head, bool remove);
+efi_status_t efivar_get_variable(efi_char16_t *name, efi_guid_t *vendor,
+ u32 *attr, unsigned long *size, void *data);
-bool efivar_validate(efi_guid_t vendor, efi_char16_t *var_name, u8 *data,
- unsigned long data_size);
-bool efivar_variable_is_removable(efi_guid_t vendor, const char *name,
- size_t len);
+efi_status_t efivar_get_next_variable(unsigned long *name_size,
+ efi_char16_t *name, efi_guid_t *vendor);
-extern struct work_struct efivar_work;
-void efivar_run_worker(void);
+efi_status_t efivar_set_variable_locked(efi_char16_t *name, efi_guid_t *vendor,
+ u32 attr, unsigned long data_size,
+ void *data, bool nonblocking);
-#if defined(CONFIG_EFI_VARS) || defined(CONFIG_EFI_VARS_MODULE)
-int efivars_sysfs_init(void);
+efi_status_t efivar_set_variable(efi_char16_t *name, efi_guid_t *vendor,
+ u32 attr, unsigned long data_size, void *data);
-#define EFIVARS_DATA_SIZE_MAX 1024
+efi_status_t efivar_query_variable_info(u32 attr, u64 *storage_space,
+ u64 *remaining_space,
+ u64 *max_variable_size);
-#endif /* CONFIG_EFI_VARS */
+#if IS_ENABLED(CONFIG_EFI_CAPSULE_LOADER)
extern bool efi_capsule_pending(int *reset_type);
extern int efi_capsule_supported(efi_guid_t guid, u32 flags,
@@ -1445,78 +1115,20 @@ extern int efi_capsule_supported(efi_guid_t guid, u32 flags,
extern int efi_capsule_update(efi_capsule_header_t *capsule,
phys_addr_t *pages);
-
-#ifdef CONFIG_EFI_RUNTIME_MAP
-int efi_runtime_map_init(struct kobject *);
-int efi_get_runtime_map_size(void);
-int efi_get_runtime_map_desc_size(void);
-int efi_runtime_map_copy(void *buf, size_t bufsz);
#else
-static inline int efi_runtime_map_init(struct kobject *kobj)
-{
- return 0;
-}
-
-static inline int efi_get_runtime_map_size(void)
-{
- return 0;
-}
-
-static inline int efi_get_runtime_map_desc_size(void)
-{
- return 0;
-}
-
-static inline int efi_runtime_map_copy(void *buf, size_t bufsz)
-{
- return 0;
-}
-
+static inline bool efi_capsule_pending(int *reset_type) { return false; }
#endif
-/* prototypes shared between arch specific and generic stub code */
-
-void efi_printk(efi_system_table_t *sys_table_arg, char *str);
-
-void efi_free(efi_system_table_t *sys_table_arg, unsigned long size,
- unsigned long addr);
-
-char *efi_convert_cmdline(efi_system_table_t *sys_table_arg,
- efi_loaded_image_t *image, int *cmd_line_len);
-
-efi_status_t efi_get_memory_map(efi_system_table_t *sys_table_arg,
- struct efi_boot_memmap *map);
-
-efi_status_t efi_low_alloc(efi_system_table_t *sys_table_arg,
- unsigned long size, unsigned long align,
- unsigned long *addr);
-
-efi_status_t efi_high_alloc(efi_system_table_t *sys_table_arg,
- unsigned long size, unsigned long align,
- unsigned long *addr, unsigned long max);
-
-efi_status_t efi_relocate_kernel(efi_system_table_t *sys_table_arg,
- unsigned long *image_addr,
- unsigned long image_size,
- unsigned long alloc_size,
- unsigned long preferred_addr,
- unsigned long alignment);
-
-efi_status_t handle_cmdline_files(efi_system_table_t *sys_table_arg,
- efi_loaded_image_t *image,
- char *cmd_line, char *option_string,
- unsigned long max_addr,
- unsigned long *load_addr,
- unsigned long *load_size);
-
-efi_status_t efi_parse_options(char const *cmdline);
+#ifdef CONFIG_EFI
+extern bool efi_runtime_disabled(void);
+#else
+static inline bool efi_runtime_disabled(void) { return true; }
+#endif
-efi_status_t efi_setup_gop(efi_system_table_t *sys_table_arg,
- struct screen_info *si, efi_guid_t *proto,
- unsigned long size);
+extern void efi_call_virt_check_flags(unsigned long flags, const void *caller);
+extern unsigned long efi_call_virt_save_flags(void);
-bool efi_runtime_disabled(void);
-extern void efi_call_virt_check_flags(unsigned long flags, const char *call);
+void efi_runtime_assert_lock_held(void);
enum efi_secureboot_mode {
efi_secureboot_mode_unset,
@@ -1524,11 +1136,39 @@ enum efi_secureboot_mode {
efi_secureboot_mode_disabled,
efi_secureboot_mode_enabled,
};
-enum efi_secureboot_mode efi_get_secureboot(efi_system_table_t *sys_table);
+
+static inline
+enum efi_secureboot_mode efi_get_secureboot_mode(efi_get_variable_t *get_var)
+{
+ u8 secboot, setupmode = 0;
+ efi_status_t status;
+ unsigned long size;
+
+ size = sizeof(secboot);
+ status = get_var(L"SecureBoot", &EFI_GLOBAL_VARIABLE_GUID, NULL, &size,
+ &secboot);
+ if (status == EFI_NOT_FOUND)
+ return efi_secureboot_mode_disabled;
+ if (status != EFI_SUCCESS)
+ return efi_secureboot_mode_unknown;
+
+ size = sizeof(setupmode);
+ get_var(L"SetupMode", &EFI_GLOBAL_VARIABLE_GUID, NULL, &size, &setupmode);
+ if (secboot == 0 || setupmode == 1)
+ return efi_secureboot_mode_disabled;
+ return efi_secureboot_mode_enabled;
+}
+
+#ifdef CONFIG_EFI_EMBEDDED_FIRMWARE
+void efi_check_for_embedded_firmwares(void);
+#else
+static inline void efi_check_for_embedded_firmwares(void) { }
+#endif
+
+#define arch_efi_call_virt(p, f, args...) ((p)->f(args))
/*
- * Arch code can implement the following three template macros, avoiding
- * reptition for the void/non-void return cases of {__,}efi_call_virt():
+ * Arch code must implement the following three routines:
*
* * arch_efi_call_virt_setup()
*
@@ -1537,9 +1177,8 @@ enum efi_secureboot_mode efi_get_secureboot(efi_system_table_t *sys_table);
*
* * arch_efi_call_virt()
*
- * Performs the call. The last expression in the macro must be the call
- * itself, allowing the logic to be shared by the void and non-void
- * cases.
+ * Performs the call. This routine takes a variable number of arguments so
+ * it must be implemented as a variadic preprocessor macro.
*
* * arch_efi_call_virt_teardown()
*
@@ -1548,47 +1187,180 @@ enum efi_secureboot_mode efi_get_secureboot(efi_system_table_t *sys_table);
#define efi_call_virt_pointer(p, f, args...) \
({ \
- efi_status_t __s; \
+ typeof((p)->f(args)) __s; \
unsigned long __flags; \
\
arch_efi_call_virt_setup(); \
\
- local_save_flags(__flags); \
+ __flags = efi_call_virt_save_flags(); \
__s = arch_efi_call_virt(p, f, args); \
- efi_call_virt_check_flags(__flags, __stringify(f)); \
+ efi_call_virt_check_flags(__flags, NULL); \
\
arch_efi_call_virt_teardown(); \
\
__s; \
})
-#define __efi_call_virt_pointer(p, f, args...) \
-({ \
- unsigned long __flags; \
- \
- arch_efi_call_virt_setup(); \
- \
- local_save_flags(__flags); \
- arch_efi_call_virt(p, f, args); \
- efi_call_virt_check_flags(__flags, __stringify(f)); \
- \
- arch_efi_call_virt_teardown(); \
-})
-
-typedef efi_status_t (*efi_exit_boot_map_processing)(
- efi_system_table_t *sys_table_arg,
- struct efi_boot_memmap *map,
- void *priv);
-
-efi_status_t efi_exit_boot_services(efi_system_table_t *sys_table,
- void *handle,
- struct efi_boot_memmap *map,
- void *priv,
- efi_exit_boot_map_processing priv_func);
+#define EFI_RANDOM_SEED_SIZE 32U // BLAKE2S_HASH_SIZE
struct linux_efi_random_seed {
u32 size;
u8 bits[];
};
+struct linux_efi_tpm_eventlog {
+ u32 size;
+ u32 final_events_preboot_size;
+ u8 version;
+ u8 log[];
+};
+
+extern int efi_tpm_eventlog_init(void);
+
+struct efi_tcg2_final_events_table {
+ u64 version;
+ u64 nr_events;
+ u8 events[];
+};
+extern int efi_tpm_final_log_size;
+
+extern unsigned long rci2_table_phys;
+
+efi_status_t
+efi_call_acpi_prm_handler(efi_status_t (__efiapi *handler_addr)(u64, void *),
+ u64 param_buffer_addr, void *context);
+
+/*
+ * efi_runtime_service() function identifiers.
+ * "NONE" is used by efi_recover_from_page_fault() to check if the page
+ * fault happened while executing an efi runtime service.
+ */
+enum efi_rts_ids {
+ EFI_NONE,
+ EFI_GET_TIME,
+ EFI_SET_TIME,
+ EFI_GET_WAKEUP_TIME,
+ EFI_SET_WAKEUP_TIME,
+ EFI_GET_VARIABLE,
+ EFI_GET_NEXT_VARIABLE,
+ EFI_SET_VARIABLE,
+ EFI_QUERY_VARIABLE_INFO,
+ EFI_GET_NEXT_HIGH_MONO_COUNT,
+ EFI_RESET_SYSTEM,
+ EFI_UPDATE_CAPSULE,
+ EFI_QUERY_CAPSULE_CAPS,
+ EFI_ACPI_PRM_HANDLER,
+};
+
+union efi_rts_args;
+
+/*
+ * efi_runtime_work: Details of EFI Runtime Service work
+ * @args: Pointer to union describing the arguments
+ * @status: Status of executing EFI Runtime Service
+ * @efi_rts_id: EFI Runtime Service function identifier
+ * @efi_rts_comp: Struct used for handling completions
+ * @caller: The caller of the runtime service
+ */
+struct efi_runtime_work {
+ union efi_rts_args *args;
+ efi_status_t status;
+ struct work_struct work;
+ enum efi_rts_ids efi_rts_id;
+ struct completion efi_rts_comp;
+ const void *caller;
+};
+
+extern struct efi_runtime_work efi_rts_work;
+
+/* Workqueue to queue EFI Runtime Services */
+extern struct workqueue_struct *efi_rts_wq;
+
+struct linux_efi_memreserve {
+ int size; // allocated size of the array
+ atomic_t count; // number of entries used
+ phys_addr_t next; // pa of next struct instance
+ struct {
+ phys_addr_t base;
+ phys_addr_t size;
+ } entry[];
+};
+
+#define EFI_MEMRESERVE_COUNT(size) (((size) - sizeof(struct linux_efi_memreserve)) \
+ / sizeof_field(struct linux_efi_memreserve, entry[0]))
+
+void __init efi_arch_mem_reserve(phys_addr_t addr, u64 size);
+
+/*
+ * The LINUX_EFI_MOK_VARIABLE_TABLE_GUID config table can be provided
+ * to the kernel by an EFI boot loader. The table contains a packed
+ * sequence of these entries, one for each named MOK variable.
+ * The sequence is terminated by an entry with a completely NULL
+ * name and 0 data size.
+ */
+struct efi_mokvar_table_entry {
+ char name[256];
+ u64 data_size;
+ u8 data[];
+} __attribute((packed));
+
+#ifdef CONFIG_LOAD_UEFI_KEYS
+extern void __init efi_mokvar_table_init(void);
+extern struct efi_mokvar_table_entry *efi_mokvar_entry_next(
+ struct efi_mokvar_table_entry **mokvar_entry);
+extern struct efi_mokvar_table_entry *efi_mokvar_entry_find(const char *name);
+#else
+static inline void efi_mokvar_table_init(void) { }
+static inline struct efi_mokvar_table_entry *efi_mokvar_entry_next(
+ struct efi_mokvar_table_entry **mokvar_entry)
+{
+ return NULL;
+}
+static inline struct efi_mokvar_table_entry *efi_mokvar_entry_find(
+ const char *name)
+{
+ return NULL;
+}
+#endif
+
+extern void efifb_setup_from_dmi(struct screen_info *si, const char *opt);
+
+struct linux_efi_coco_secret_area {
+ u64 base_pa;
+ u64 size;
+};
+
+struct linux_efi_initrd {
+ unsigned long base;
+ unsigned long size;
+};
+
+/* Header of a populated EFI secret area */
+#define EFI_SECRET_TABLE_HEADER_GUID EFI_GUID(0x1e74f542, 0x71dd, 0x4d66, 0x96, 0x3e, 0xef, 0x42, 0x87, 0xff, 0x17, 0x3b)
+
+bool xen_efi_config_table_is_usable(const efi_guid_t *guid, unsigned long table);
+
+static __always_inline
+bool efi_config_table_is_usable(const efi_guid_t *guid, unsigned long table)
+{
+ if (!IS_ENABLED(CONFIG_XEN_EFI))
+ return true;
+ return xen_efi_config_table_is_usable(guid, table);
+}
+
+umode_t efi_attr_is_visible(struct kobject *kobj, struct attribute *attr, int n);
+
+int ovmf_log_probe(unsigned long ovmf_debug_log_table);
+
+/*
+ * efivar ops event type
+ */
+#define EFIVAR_OPS_RDONLY 0
+#define EFIVAR_OPS_RDWR 1
+
+extern struct blocking_notifier_head efivar_ops_nh;
+
+void efivars_generic_ops_register(void);
+void efivars_generic_ops_unregister(void);
+
#endif /* _LINUX_EFI_H */
diff --git a/include/linux/efi_embedded_fw.h b/include/linux/efi_embedded_fw.h
new file mode 100644
index 000000000000..a97a12bb2c9e
--- /dev/null
+++ b/include/linux/efi_embedded_fw.h
@@ -0,0 +1,41 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_EFI_EMBEDDED_FW_H
+#define _LINUX_EFI_EMBEDDED_FW_H
+
+#include <linux/list.h>
+#include <linux/mod_devicetable.h>
+
+#define EFI_EMBEDDED_FW_PREFIX_LEN 8
+
+/*
+ * This struct is private to the efi-embedded fw implementation.
+ * They are in this header for use by lib/test_firmware.c only!
+ */
+struct efi_embedded_fw {
+ struct list_head list;
+ const char *name;
+ const u8 *data;
+ size_t length;
+};
+
+/**
+ * struct efi_embedded_fw_desc - This struct is used by the EFI embedded-fw
+ * code to search for embedded firmwares.
+ *
+ * @name: Name to register the firmware with if found
+ * @prefix: First 8 bytes of the firmware
+ * @length: Length of the firmware in bytes including prefix
+ * @sha256: SHA256 of the firmware
+ */
+struct efi_embedded_fw_desc {
+ const char *name;
+ u8 prefix[EFI_EMBEDDED_FW_PREFIX_LEN];
+ u32 length;
+ u8 sha256[32];
+};
+
+extern const struct dmi_system_id touchscreen_dmi_table[];
+
+int efi_get_embedded_fw(const char *name, const u8 **dat, size_t *sz);
+
+#endif
diff --git a/include/linux/efs_vh.h b/include/linux/efs_vh.h
index 8a11150c61fe..206c5270f7b8 100644
--- a/include/linux/efs_vh.h
+++ b/include/linux/efs_vh.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* efs_vh.h
*
diff --git a/include/linux/ehl_pse_io_aux.h b/include/linux/ehl_pse_io_aux.h
new file mode 100644
index 000000000000..afb8587ee5fb
--- /dev/null
+++ b/include/linux/ehl_pse_io_aux.h
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Intel Elkhart Lake PSE I/O Auxiliary Device
+ *
+ * Copyright (c) 2025 Intel Corporation.
+ *
+ * Author: Raag Jadav <raag.jadav@intel.com>
+ */
+
+#ifndef _EHL_PSE_IO_AUX_H_
+#define _EHL_PSE_IO_AUX_H_
+
+#include <linux/ioport.h>
+
+#define EHL_PSE_IO_NAME "ehl_pse_io"
+#define EHL_PSE_GPIO_NAME "gpio"
+#define EHL_PSE_TIO_NAME "pps_tio"
+
+struct ehl_pse_io_data {
+ struct resource mem;
+ int irq;
+};
+
+#endif /* _EHL_PSE_IO_AUX_H_ */
diff --git a/include/linux/eisa.h b/include/linux/eisa.h
index 6925249a5ac6..cf55630b595b 100644
--- a/include/linux/eisa.h
+++ b/include/linux/eisa.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_EISA_H
#define _LINUX_EISA_H
@@ -27,6 +28,9 @@
#define EISA_CONFIG_ENABLED 1
#define EISA_CONFIG_FORCED 2
+/* Chosen to hold the longest string in eisa.ids. */
+#define EISA_DEVICE_INFO_NAME_SIZE 74
+
/* There is not much we can say about an EISA device, apart from
* signature, slot number, and base address. dma_mask is set by
* default to parent device mask..*/
@@ -40,7 +44,7 @@ struct eisa_device {
u64 dma_mask;
struct device dev; /* generic device */
#ifdef CONFIG_EISA_NAMES
- char pretty_name[50];
+ char pretty_name[EISA_DEVICE_INFO_NAME_SIZE];
#endif
};
@@ -59,12 +63,12 @@ struct eisa_driver {
struct device_driver driver;
};
-#define to_eisa_driver(drv) container_of(drv,struct eisa_driver, driver)
+#define to_eisa_driver(drv) container_of_const(drv,struct eisa_driver, driver)
/* These external functions are only available when EISA support is enabled. */
#ifdef CONFIG_EISA
-extern struct bus_type eisa_bus_type;
+extern const struct bus_type eisa_bus_type;
int eisa_driver_register (struct eisa_driver *edrv);
void eisa_driver_unregister (struct eisa_driver *edrv);
diff --git a/include/linux/elevator.h b/include/linux/elevator.h
deleted file mode 100644
index 5bc8f8682a3e..000000000000
--- a/include/linux/elevator.h
+++ /dev/null
@@ -1,270 +0,0 @@
-#ifndef _LINUX_ELEVATOR_H
-#define _LINUX_ELEVATOR_H
-
-#include <linux/percpu.h>
-#include <linux/hashtable.h>
-
-#ifdef CONFIG_BLOCK
-
-struct io_cq;
-struct elevator_type;
-#ifdef CONFIG_BLK_DEBUG_FS
-struct blk_mq_debugfs_attr;
-#endif
-
-/*
- * Return values from elevator merger
- */
-enum elv_merge {
- ELEVATOR_NO_MERGE = 0,
- ELEVATOR_FRONT_MERGE = 1,
- ELEVATOR_BACK_MERGE = 2,
- ELEVATOR_DISCARD_MERGE = 3,
-};
-
-typedef enum elv_merge (elevator_merge_fn) (struct request_queue *, struct request **,
- struct bio *);
-
-typedef void (elevator_merge_req_fn) (struct request_queue *, struct request *, struct request *);
-
-typedef void (elevator_merged_fn) (struct request_queue *, struct request *, enum elv_merge);
-
-typedef int (elevator_allow_bio_merge_fn) (struct request_queue *,
- struct request *, struct bio *);
-
-typedef int (elevator_allow_rq_merge_fn) (struct request_queue *,
- struct request *, struct request *);
-
-typedef void (elevator_bio_merged_fn) (struct request_queue *,
- struct request *, struct bio *);
-
-typedef int (elevator_dispatch_fn) (struct request_queue *, int);
-
-typedef void (elevator_add_req_fn) (struct request_queue *, struct request *);
-typedef struct request *(elevator_request_list_fn) (struct request_queue *, struct request *);
-typedef void (elevator_completed_req_fn) (struct request_queue *, struct request *);
-typedef int (elevator_may_queue_fn) (struct request_queue *, unsigned int);
-
-typedef void (elevator_init_icq_fn) (struct io_cq *);
-typedef void (elevator_exit_icq_fn) (struct io_cq *);
-typedef int (elevator_set_req_fn) (struct request_queue *, struct request *,
- struct bio *, gfp_t);
-typedef void (elevator_put_req_fn) (struct request *);
-typedef void (elevator_activate_req_fn) (struct request_queue *, struct request *);
-typedef void (elevator_deactivate_req_fn) (struct request_queue *, struct request *);
-
-typedef int (elevator_init_fn) (struct request_queue *,
- struct elevator_type *e);
-typedef void (elevator_exit_fn) (struct elevator_queue *);
-typedef void (elevator_registered_fn) (struct request_queue *);
-
-struct elevator_ops
-{
- elevator_merge_fn *elevator_merge_fn;
- elevator_merged_fn *elevator_merged_fn;
- elevator_merge_req_fn *elevator_merge_req_fn;
- elevator_allow_bio_merge_fn *elevator_allow_bio_merge_fn;
- elevator_allow_rq_merge_fn *elevator_allow_rq_merge_fn;
- elevator_bio_merged_fn *elevator_bio_merged_fn;
-
- elevator_dispatch_fn *elevator_dispatch_fn;
- elevator_add_req_fn *elevator_add_req_fn;
- elevator_activate_req_fn *elevator_activate_req_fn;
- elevator_deactivate_req_fn *elevator_deactivate_req_fn;
-
- elevator_completed_req_fn *elevator_completed_req_fn;
-
- elevator_request_list_fn *elevator_former_req_fn;
- elevator_request_list_fn *elevator_latter_req_fn;
-
- elevator_init_icq_fn *elevator_init_icq_fn; /* see iocontext.h */
- elevator_exit_icq_fn *elevator_exit_icq_fn; /* ditto */
-
- elevator_set_req_fn *elevator_set_req_fn;
- elevator_put_req_fn *elevator_put_req_fn;
-
- elevator_may_queue_fn *elevator_may_queue_fn;
-
- elevator_init_fn *elevator_init_fn;
- elevator_exit_fn *elevator_exit_fn;
- elevator_registered_fn *elevator_registered_fn;
-};
-
-struct blk_mq_alloc_data;
-struct blk_mq_hw_ctx;
-
-struct elevator_mq_ops {
- int (*init_sched)(struct request_queue *, struct elevator_type *);
- void (*exit_sched)(struct elevator_queue *);
- int (*init_hctx)(struct blk_mq_hw_ctx *, unsigned int);
- void (*exit_hctx)(struct blk_mq_hw_ctx *, unsigned int);
-
- bool (*allow_merge)(struct request_queue *, struct request *, struct bio *);
- bool (*bio_merge)(struct blk_mq_hw_ctx *, struct bio *);
- int (*request_merge)(struct request_queue *q, struct request **, struct bio *);
- void (*request_merged)(struct request_queue *, struct request *, enum elv_merge);
- void (*requests_merged)(struct request_queue *, struct request *, struct request *);
- void (*limit_depth)(unsigned int, struct blk_mq_alloc_data *);
- void (*prepare_request)(struct request *, struct bio *bio);
- void (*finish_request)(struct request *);
- void (*insert_requests)(struct blk_mq_hw_ctx *, struct list_head *, bool);
- struct request *(*dispatch_request)(struct blk_mq_hw_ctx *);
- bool (*has_work)(struct blk_mq_hw_ctx *);
- void (*completed_request)(struct request *);
- void (*started_request)(struct request *);
- void (*requeue_request)(struct request *);
- struct request *(*former_request)(struct request_queue *, struct request *);
- struct request *(*next_request)(struct request_queue *, struct request *);
- void (*init_icq)(struct io_cq *);
- void (*exit_icq)(struct io_cq *);
-};
-
-#define ELV_NAME_MAX (16)
-
-struct elv_fs_entry {
- struct attribute attr;
- ssize_t (*show)(struct elevator_queue *, char *);
- ssize_t (*store)(struct elevator_queue *, const char *, size_t);
-};
-
-/*
- * identifies an elevator type, such as AS or deadline
- */
-struct elevator_type
-{
- /* managed by elevator core */
- struct kmem_cache *icq_cache;
-
- /* fields provided by elevator implementation */
- union {
- struct elevator_ops sq;
- struct elevator_mq_ops mq;
- } ops;
- size_t icq_size; /* see iocontext.h */
- size_t icq_align; /* ditto */
- struct elv_fs_entry *elevator_attrs;
- char elevator_name[ELV_NAME_MAX];
- struct module *elevator_owner;
- bool uses_mq;
-#ifdef CONFIG_BLK_DEBUG_FS
- const struct blk_mq_debugfs_attr *queue_debugfs_attrs;
- const struct blk_mq_debugfs_attr *hctx_debugfs_attrs;
-#endif
-
- /* managed by elevator core */
- char icq_cache_name[ELV_NAME_MAX + 6]; /* elvname + "_io_cq" */
- struct list_head list;
-};
-
-#define ELV_HASH_BITS 6
-
-void elv_rqhash_del(struct request_queue *q, struct request *rq);
-void elv_rqhash_add(struct request_queue *q, struct request *rq);
-void elv_rqhash_reposition(struct request_queue *q, struct request *rq);
-struct request *elv_rqhash_find(struct request_queue *q, sector_t offset);
-
-/*
- * each queue has an elevator_queue associated with it
- */
-struct elevator_queue
-{
- struct elevator_type *type;
- void *elevator_data;
- struct kobject kobj;
- struct mutex sysfs_lock;
- unsigned int registered:1;
- unsigned int uses_mq:1;
- DECLARE_HASHTABLE(hash, ELV_HASH_BITS);
-};
-
-/*
- * block elevator interface
- */
-extern void elv_dispatch_sort(struct request_queue *, struct request *);
-extern void elv_dispatch_add_tail(struct request_queue *, struct request *);
-extern void elv_add_request(struct request_queue *, struct request *, int);
-extern void __elv_add_request(struct request_queue *, struct request *, int);
-extern enum elv_merge elv_merge(struct request_queue *, struct request **,
- struct bio *);
-extern void elv_merge_requests(struct request_queue *, struct request *,
- struct request *);
-extern void elv_merged_request(struct request_queue *, struct request *,
- enum elv_merge);
-extern void elv_bio_merged(struct request_queue *q, struct request *,
- struct bio *);
-extern bool elv_attempt_insert_merge(struct request_queue *, struct request *);
-extern void elv_requeue_request(struct request_queue *, struct request *);
-extern struct request *elv_former_request(struct request_queue *, struct request *);
-extern struct request *elv_latter_request(struct request_queue *, struct request *);
-extern int elv_register_queue(struct request_queue *q);
-extern void elv_unregister_queue(struct request_queue *q);
-extern int elv_may_queue(struct request_queue *, unsigned int);
-extern void elv_completed_request(struct request_queue *, struct request *);
-extern int elv_set_request(struct request_queue *q, struct request *rq,
- struct bio *bio, gfp_t gfp_mask);
-extern void elv_put_request(struct request_queue *, struct request *);
-extern void elv_drain_elevator(struct request_queue *);
-
-/*
- * io scheduler registration
- */
-extern void __init load_default_elevator_module(void);
-extern int elv_register(struct elevator_type *);
-extern void elv_unregister(struct elevator_type *);
-
-/*
- * io scheduler sysfs switching
- */
-extern ssize_t elv_iosched_show(struct request_queue *, char *);
-extern ssize_t elv_iosched_store(struct request_queue *, const char *, size_t);
-
-extern int elevator_init(struct request_queue *, char *);
-extern void elevator_exit(struct request_queue *, struct elevator_queue *);
-extern bool elv_bio_merge_ok(struct request *, struct bio *);
-extern struct elevator_queue *elevator_alloc(struct request_queue *,
- struct elevator_type *);
-
-/*
- * Helper functions.
- */
-extern struct request *elv_rb_former_request(struct request_queue *, struct request *);
-extern struct request *elv_rb_latter_request(struct request_queue *, struct request *);
-
-/*
- * rb support functions.
- */
-extern void elv_rb_add(struct rb_root *, struct request *);
-extern void elv_rb_del(struct rb_root *, struct request *);
-extern struct request *elv_rb_find(struct rb_root *, sector_t);
-
-/*
- * Insertion selection
- */
-#define ELEVATOR_INSERT_FRONT 1
-#define ELEVATOR_INSERT_BACK 2
-#define ELEVATOR_INSERT_SORT 3
-#define ELEVATOR_INSERT_REQUEUE 4
-#define ELEVATOR_INSERT_FLUSH 5
-#define ELEVATOR_INSERT_SORT_MERGE 6
-
-/*
- * return values from elevator_may_queue_fn
- */
-enum {
- ELV_MQUEUE_MAY,
- ELV_MQUEUE_NO,
- ELV_MQUEUE_MUST,
-};
-
-#define rq_end_sector(rq) (blk_rq_pos(rq) + blk_rq_sectors(rq))
-#define rb_entry_rq(node) rb_entry((node), struct request, rb_node)
-
-#define rq_entry_fifo(ptr) list_entry((ptr), struct request, queuelist)
-#define rq_fifo_clear(rq) list_del_init(&(rq)->queuelist)
-
-#else /* CONFIG_BLOCK */
-
-static inline void load_default_elevator_module(void) { }
-
-#endif /* CONFIG_BLOCK */
-#endif
diff --git a/include/linux/elf-fdpic.h b/include/linux/elf-fdpic.h
index 386440317b0c..e533f4513194 100644
--- a/include/linux/elf-fdpic.h
+++ b/include/linux/elf-fdpic.h
@@ -1,12 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/* FDPIC ELF load map
*
* Copyright (C) 2003 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
*/
#ifndef _LINUX_ELF_FDPIC_H
@@ -14,13 +10,25 @@
#include <uapi/linux/elf-fdpic.h>
+#if ELF_CLASS == ELFCLASS32
+#define Elf_Sword Elf32_Sword
+#define elf_fdpic_loadseg elf32_fdpic_loadseg
+#define elf_fdpic_loadmap elf32_fdpic_loadmap
+#define ELF_FDPIC_LOADMAP_VERSION ELF32_FDPIC_LOADMAP_VERSION
+#else
+#define Elf_Sword Elf64_Sxword
+#define elf_fdpic_loadmap elf64_fdpic_loadmap
+#define elf_fdpic_loadseg elf64_fdpic_loadseg
+#define ELF_FDPIC_LOADMAP_VERSION ELF64_FDPIC_LOADMAP_VERSION
+#endif
+
/*
* binfmt binary parameters structure
*/
struct elf_fdpic_params {
struct elfhdr hdr; /* ref copy of ELF header */
struct elf_phdr *phdrs; /* ref copy of PT_PHDR table */
- struct elf32_fdpic_loadmap *loadmap; /* loadmap to be passed to userspace */
+ struct elf_fdpic_loadmap *loadmap; /* loadmap to be passed to userspace */
unsigned long elfhdr_addr; /* mapped ELF header user address */
unsigned long ph_addr; /* mapped PT_PHDR user address */
unsigned long map_addr; /* mapped loadmap user address */
diff --git a/include/linux/elf-randomize.h b/include/linux/elf-randomize.h
index b5f0bda9472e..da0dbb7b6be3 100644
--- a/include/linux/elf-randomize.h
+++ b/include/linux/elf-randomize.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ELF_RANDOMIZE_H
#define _ELF_RANDOMIZE_H
diff --git a/include/linux/elf.h b/include/linux/elf.h
index ba069e8f4f78..5c402788da19 100644
--- a/include/linux/elf.h
+++ b/include/linux/elf.h
@@ -1,6 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_ELF_H
#define _LINUX_ELF_H
+#include <linux/types.h>
#include <asm/elf.h>
#include <uapi/linux/elf.h>
@@ -20,6 +22,19 @@
SET_PERSONALITY(ex)
#endif
+#ifndef START_THREAD
+#define START_THREAD(elf_ex, regs, elf_entry, start_stack) \
+ start_thread(regs, elf_entry, start_stack)
+#endif
+
+#if defined(ARCH_HAS_SETUP_ADDITIONAL_PAGES) && !defined(ARCH_SETUP_ADDITIONAL_PAGES)
+#define ARCH_SETUP_ADDITIONAL_PAGES(bprm, ex, interpreter) \
+ arch_setup_additional_pages(bprm, interpreter)
+#endif
+
+#define ELF32_GNU_PROPERTY_ALIGN 4
+#define ELF64_GNU_PROPERTY_ALIGN 8
+
#if ELF_CLASS == ELFCLASS32
extern Elf32_Dyn _DYNAMIC [];
@@ -30,6 +45,7 @@ extern Elf32_Dyn _DYNAMIC [];
#define elf_addr_t Elf32_Off
#define Elf_Half Elf32_Half
#define Elf_Word Elf32_Word
+#define ELF_GNU_PROPERTY_ALIGN ELF32_GNU_PROPERTY_ALIGN
#else
@@ -41,6 +57,7 @@ extern Elf64_Dyn _DYNAMIC [];
#define elf_addr_t Elf64_Off
#define Elf_Half Elf64_Half
#define Elf_Word Elf64_Word
+#define ELF_GNU_PROPERTY_ALIGN ELF64_GNU_PROPERTY_ALIGN
#endif
@@ -48,11 +65,48 @@ extern Elf64_Dyn _DYNAMIC [];
struct file;
struct coredump_params;
-#ifndef ARCH_HAVE_EXTRA_ELF_NOTES
+#ifndef CONFIG_ARCH_HAVE_EXTRA_ELF_NOTES
static inline int elf_coredump_extra_notes_size(void) { return 0; }
static inline int elf_coredump_extra_notes_write(struct coredump_params *cprm) { return 0; }
#else
extern int elf_coredump_extra_notes_size(void);
extern int elf_coredump_extra_notes_write(struct coredump_params *cprm);
#endif
+
+/*
+ * NT_GNU_PROPERTY_TYPE_0 header:
+ * Keep this internal until/unless there is an agreed UAPI definition.
+ * pr_type values (GNU_PROPERTY_*) are public and defined in the UAPI header.
+ */
+struct gnu_property {
+ u32 pr_type;
+ u32 pr_datasz;
+};
+
+struct arch_elf_state;
+
+#ifndef CONFIG_ARCH_USE_GNU_PROPERTY
+static inline int arch_parse_elf_property(u32 type, const void *data,
+ size_t datasz, bool compat,
+ struct arch_elf_state *arch)
+{
+ return 0;
+}
+#else
+extern int arch_parse_elf_property(u32 type, const void *data, size_t datasz,
+ bool compat, struct arch_elf_state *arch);
+#endif
+
+#ifdef CONFIG_ARCH_HAVE_ELF_PROT
+int arch_elf_adjust_prot(int prot, const struct arch_elf_state *state,
+ bool has_interp, bool is_interp);
+#else
+static inline int arch_elf_adjust_prot(int prot,
+ const struct arch_elf_state *state,
+ bool has_interp, bool is_interp)
+{
+ return prot;
+}
+#endif
+
#endif /* _LINUX_ELF_H */
diff --git a/include/linux/elfcore-compat.h b/include/linux/elfcore-compat.h
index 0a90e1c3a422..54feb64e9b5d 100644
--- a/include/linux/elfcore-compat.h
+++ b/include/linux/elfcore-compat.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_ELFCORE_COMPAT_H
#define _LINUX_ELFCORE_COMPAT_H
@@ -16,7 +17,7 @@ struct compat_elf_siginfo
compat_int_t si_errno;
};
-struct compat_elf_prstatus
+struct compat_elf_prstatus_common
{
struct compat_elf_siginfo pr_info;
short pr_cursig;
@@ -26,16 +27,10 @@ struct compat_elf_prstatus
compat_pid_t pr_ppid;
compat_pid_t pr_pgrp;
compat_pid_t pr_sid;
- struct compat_timeval pr_utime;
- struct compat_timeval pr_stime;
- struct compat_timeval pr_cutime;
- struct compat_timeval pr_cstime;
- compat_elf_gregset_t pr_reg;
-#ifdef CONFIG_BINFMT_ELF_FDPIC
- compat_ulong_t pr_exec_fdpic_loadmap;
- compat_ulong_t pr_interp_fdpic_loadmap;
-#endif
- compat_int_t pr_fpvalid;
+ struct old_timeval32 pr_utime;
+ struct old_timeval32 pr_stime;
+ struct old_timeval32 pr_cutime;
+ struct old_timeval32 pr_cstime;
};
struct compat_elf_prpsinfo
@@ -48,8 +43,24 @@ struct compat_elf_prpsinfo
__compat_uid_t pr_uid;
__compat_gid_t pr_gid;
compat_pid_t pr_pid, pr_ppid, pr_pgrp, pr_sid;
+ /*
+ * The hard-coded 16 is derived from TASK_COMM_LEN, but it can't be
+ * changed as it is exposed to userspace. We'd better make it hard-coded
+ * here.
+ */
char pr_fname[16];
char pr_psargs[ELF_PRARGSZ];
};
+#ifdef CONFIG_ARCH_HAS_ELFCORE_COMPAT
+#include <asm/elfcore-compat.h>
+#endif
+
+struct compat_elf_prstatus
+{
+ struct compat_elf_prstatus_common common;
+ compat_elf_gregset_t pr_reg;
+ compat_int_t pr_fpvalid;
+};
+
#endif /* _LINUX_ELFCORE_COMPAT_H */
diff --git a/include/linux/elfcore.h b/include/linux/elfcore.h
index c8240a12c42d..bd5560542c79 100644
--- a/include/linux/elfcore.h
+++ b/include/linux/elfcore.h
@@ -1,15 +1,79 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_ELFCORE_H
#define _LINUX_ELFCORE_H
#include <linux/user.h>
#include <linux/bug.h>
#include <linux/sched/task_stack.h>
-
-#include <asm/elf.h>
-#include <uapi/linux/elfcore.h>
+#include <linux/types.h>
+#include <linux/signal.h>
+#include <linux/time.h>
+#include <linux/ptrace.h>
+#include <linux/fs.h>
+#include <linux/elf.h>
struct coredump_params;
+struct elf_siginfo
+{
+ int si_signo; /* signal number */
+ int si_code; /* extra code */
+ int si_errno; /* errno */
+};
+
+/*
+ * Definitions to generate Intel SVR4-like core files.
+ * These mostly have the same names as the SVR4 types with "elf_"
+ * tacked on the front to prevent clashes with linux definitions,
+ * and the typedef forms have been avoided. This is mostly like
+ * the SVR4 structure, but more Linuxy, with things that Linux does
+ * not support and which gdb doesn't really use excluded.
+ */
+struct elf_prstatus_common
+{
+ struct elf_siginfo pr_info; /* Info associated with signal */
+ short pr_cursig; /* Current signal */
+ unsigned long pr_sigpend; /* Set of pending signals */
+ unsigned long pr_sighold; /* Set of held signals */
+ pid_t pr_pid;
+ pid_t pr_ppid;
+ pid_t pr_pgrp;
+ pid_t pr_sid;
+ struct __kernel_old_timeval pr_utime; /* User time */
+ struct __kernel_old_timeval pr_stime; /* System time */
+ struct __kernel_old_timeval pr_cutime; /* Cumulative user time */
+ struct __kernel_old_timeval pr_cstime; /* Cumulative system time */
+};
+
+struct elf_prstatus
+{
+ struct elf_prstatus_common common;
+ elf_gregset_t pr_reg; /* GP registers */
+ int pr_fpvalid; /* True if math co-processor being used. */
+};
+
+#define ELF_PRARGSZ (80) /* Number of chars for args */
+
+struct elf_prpsinfo
+{
+ char pr_state; /* numeric process state */
+ char pr_sname; /* char for pr_state */
+ char pr_zomb; /* zombie */
+ char pr_nice; /* nice val */
+ unsigned long pr_flag; /* flags */
+ __kernel_uid_t pr_uid;
+ __kernel_gid_t pr_gid;
+ pid_t pr_pid, pr_ppid, pr_pgrp, pr_sid;
+ /* Lots missing */
+ /*
+ * The hard-coded 16 is derived from TASK_COMM_LEN, but it can't be
+ * changed as it is exposed to userspace. We'd better make it hard-coded
+ * here.
+ */
+ char pr_fname[16]; /* filename of executable */
+ char pr_psargs[ELF_PRARGSZ]; /* initial part of arg list */
+};
+
static inline void elf_core_copy_regs(elf_gregset_t *elfregs, struct pt_regs *regs)
{
#ifdef ELF_CORE_COPY_REGS
@@ -20,43 +84,19 @@ static inline void elf_core_copy_regs(elf_gregset_t *elfregs, struct pt_regs *re
#endif
}
-static inline void elf_core_copy_kernel_regs(elf_gregset_t *elfregs, struct pt_regs *regs)
-{
-#ifdef ELF_CORE_COPY_KERNEL_REGS
- ELF_CORE_COPY_KERNEL_REGS((*elfregs), regs);
-#else
- elf_core_copy_regs(elfregs, regs);
-#endif
-}
-
static inline int elf_core_copy_task_regs(struct task_struct *t, elf_gregset_t* elfregs)
{
#if defined (ELF_CORE_COPY_TASK_REGS)
return ELF_CORE_COPY_TASK_REGS(t, elfregs);
-#elif defined (task_pt_regs)
+#else
elf_core_copy_regs(elfregs, task_pt_regs(t));
#endif
return 0;
}
-extern int dump_fpu (struct pt_regs *, elf_fpregset_t *);
-
-static inline int elf_core_copy_task_fpregs(struct task_struct *t, struct pt_regs *regs, elf_fpregset_t *fpu)
-{
-#ifdef ELF_CORE_COPY_FPREGS
- return ELF_CORE_COPY_FPREGS(t, fpu);
-#else
- return dump_fpu(regs, fpu);
-#endif
-}
-
-#ifdef ELF_CORE_COPY_XFPREGS
-static inline int elf_core_copy_task_xfpregs(struct task_struct *t, elf_fpxregset_t *xfpu)
-{
- return ELF_CORE_COPY_XFPREGS(t, xfpu);
-}
-#endif
+int elf_core_copy_task_fpregs(struct task_struct *t, elf_fpregset_t *fpu);
+#ifdef CONFIG_ARCH_BINFMT_ELF_EXTRA_PHDRS
/*
* These functions parameterize elf_core_dump in fs/binfmt_elf.c to write out
* extra segments containing the gate DSO contents. Dumping its
@@ -65,11 +105,32 @@ static inline int elf_core_copy_task_xfpregs(struct task_struct *t, elf_fpxregse
* Dumping its extra ELF program headers includes all the other information
* a debugger needs to easily find how the gate DSO was being used.
*/
-extern Elf_Half elf_core_extra_phdrs(void);
+extern Elf_Half elf_core_extra_phdrs(struct coredump_params *cprm);
extern int
elf_core_write_extra_phdrs(struct coredump_params *cprm, loff_t offset);
extern int
elf_core_write_extra_data(struct coredump_params *cprm);
-extern size_t elf_core_extra_data_size(void);
+extern size_t elf_core_extra_data_size(struct coredump_params *cprm);
+#else
+static inline Elf_Half elf_core_extra_phdrs(struct coredump_params *cprm)
+{
+ return 0;
+}
+
+static inline int elf_core_write_extra_phdrs(struct coredump_params *cprm, loff_t offset)
+{
+ return 1;
+}
+
+static inline int elf_core_write_extra_data(struct coredump_params *cprm)
+{
+ return 1;
+}
+
+static inline size_t elf_core_extra_data_size(struct coredump_params *cprm)
+{
+ return 0;
+}
+#endif /* CONFIG_ARCH_BINFMT_ELF_EXTRA_PHDRS */
#endif /* _LINUX_ELFCORE_H */
diff --git a/include/linux/elfnote-lto.h b/include/linux/elfnote-lto.h
new file mode 100644
index 000000000000..d4635a3ecc4f
--- /dev/null
+++ b/include/linux/elfnote-lto.h
@@ -0,0 +1,14 @@
+#ifndef __ELFNOTE_LTO_H
+#define __ELFNOTE_LTO_H
+
+#include <linux/elfnote.h>
+
+#define LINUX_ELFNOTE_LTO_INFO 0x101
+
+#ifdef CONFIG_LTO
+#define BUILD_LTO_INFO ELFNOTE32("Linux", LINUX_ELFNOTE_LTO_INFO, 1)
+#else
+#define BUILD_LTO_INFO ELFNOTE32("Linux", LINUX_ELFNOTE_LTO_INFO, 0)
+#endif
+
+#endif /* __ELFNOTE_LTO_H */
diff --git a/include/linux/elfnote.h b/include/linux/elfnote.h
index 278e3ef05336..bb3dcded055f 100644
--- a/include/linux/elfnote.h
+++ b/include/linux/elfnote.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_ELFNOTE_H
#define _LINUX_ELFNOTE_H
/*
@@ -53,29 +54,27 @@
.popsection ;
#define ELFNOTE(name, type, desc) \
- ELFNOTE_START(name, type, "") \
+ ELFNOTE_START(name, type, "a") \
desc ; \
ELFNOTE_END
#else /* !__ASSEMBLER__ */
-#include <linux/elf.h>
+#include <uapi/linux/elf.h>
+#include <linux/compiler.h>
/*
* Use an anonymous structure which matches the shape of
* Elf{32,64}_Nhdr, but includes the name and desc data. The size and
* type of name and desc depend on the macro arguments. "name" must
- * be a literal string, and "desc" must be passed by value. You may
- * only define one note per line, since __LINE__ is used to generate
- * unique symbols.
+ * be a literal string, and "desc" must be passed by value.
*/
-#define _ELFNOTE_PASTE(a,b) a##b
-#define _ELFNOTE(size, name, unique, type, desc) \
+#define ELFNOTE(size, name, type, desc) \
static const struct { \
struct elf##size##_note _nhdr; \
unsigned char _name[sizeof(name)] \
__attribute__((aligned(sizeof(Elf##size##_Word)))); \
typeof(desc) _desc \
__attribute__((aligned(sizeof(Elf##size##_Word)))); \
- } _ELFNOTE_PASTE(_note_, unique) \
+ } __UNIQUE_ID(note) \
__used \
__attribute__((section(".note." name), \
aligned(sizeof(Elf##size##_Word)), \
@@ -88,11 +87,10 @@
name, \
desc \
}
-#define ELFNOTE(size, name, type, desc) \
- _ELFNOTE(size, name, __LINE__, type, desc)
#define ELFNOTE32(name, type, desc) ELFNOTE(32, name, type, desc)
#define ELFNOTE64(name, type, desc) ELFNOTE(64, name, type, desc)
+
#endif /* __ASSEMBLER__ */
#endif /* _LINUX_ELFNOTE_H */
diff --git a/include/linux/enclosure.h b/include/linux/enclosure.h
index a4cf57cd0f75..1c630e2c2756 100644
--- a/include/linux/enclosure.h
+++ b/include/linux/enclosure.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Enclosure Services
*
@@ -5,18 +6,6 @@
*
**-----------------------------------------------------------------------------
**
-** This program is free software; you can redistribute it and/or
-** modify it under the terms of the GNU General Public License
-** version 2 as published by the Free Software Foundation.
-**
-** This program is distributed in the hope that it will be useful,
-** but WITHOUT ANY WARRANTY; without even the implied warranty of
-** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-** GNU General Public License for more details.
-**
-** You should have received a copy of the GNU General Public License
-** along with this program; if not, write to the Free Software
-** Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
**
**-----------------------------------------------------------------------------
*/
@@ -112,7 +101,7 @@ struct enclosure_device {
struct device edev;
struct enclosure_component_callbacks *cb;
int components;
- struct enclosure_component component[0];
+ struct enclosure_component component[];
};
static inline struct enclosure_device *
diff --git a/include/linux/energy_model.h b/include/linux/energy_model.h
new file mode 100644
index 000000000000..43aa6153dc57
--- /dev/null
+++ b/include/linux/energy_model.h
@@ -0,0 +1,425 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_ENERGY_MODEL_H
+#define _LINUX_ENERGY_MODEL_H
+#include <linux/cpumask.h>
+#include <linux/device.h>
+#include <linux/jump_label.h>
+#include <linux/kobject.h>
+#include <linux/kref.h>
+#include <linux/rcupdate.h>
+#include <linux/sched/cpufreq.h>
+#include <linux/sched/topology.h>
+#include <linux/types.h>
+
+/**
+ * struct em_perf_state - Performance state of a performance domain
+ * @performance: CPU performance (capacity) at a given frequency
+ * @frequency: The frequency in KHz, for consistency with CPUFreq
+ * @power: The power consumed at this level (by 1 CPU or by a registered
+ * device). It can be a total power: static and dynamic.
+ * @cost: The cost coefficient associated with this level, used during
+ * energy calculation. Equal to: power * max_frequency / frequency
+ * @flags: see "em_perf_state flags" description below.
+ */
+struct em_perf_state {
+ unsigned long performance;
+ unsigned long frequency;
+ unsigned long power;
+ unsigned long cost;
+ unsigned long flags;
+};
+
+/*
+ * em_perf_state flags:
+ *
+ * EM_PERF_STATE_INEFFICIENT: The performance state is inefficient. There is
+ * in this em_perf_domain, another performance state with a higher frequency
+ * but a lower or equal power cost. Such inefficient states are ignored when
+ * using em_pd_get_efficient_*() functions.
+ */
+#define EM_PERF_STATE_INEFFICIENT BIT(0)
+
+/**
+ * struct em_perf_table - Performance states table
+ * @rcu: RCU used for safe access and destruction
+ * @kref: Reference counter to track the users
+ * @state: List of performance states, in ascending order
+ */
+struct em_perf_table {
+ struct rcu_head rcu;
+ struct kref kref;
+ struct em_perf_state state[];
+};
+
+/**
+ * struct em_perf_domain - Performance domain
+ * @em_table: Pointer to the runtime modifiable em_perf_table
+ * @node: node in em_pd_list (in energy_model.c)
+ * @id: A unique ID number for each performance domain
+ * @nr_perf_states: Number of performance states
+ * @min_perf_state: Minimum allowed Performance State index
+ * @max_perf_state: Maximum allowed Performance State index
+ * @flags: See "em_perf_domain flags"
+ * @cpus: Cpumask covering the CPUs of the domain. It's here
+ * for performance reasons to avoid potential cache
+ * misses during energy calculations in the scheduler
+ * and simplifies allocating/freeing that memory region.
+ *
+ * In case of CPU device, a "performance domain" represents a group of CPUs
+ * whose performance is scaled together. All CPUs of a performance domain
+ * must have the same micro-architecture. Performance domains often have
+ * a 1-to-1 mapping with CPUFreq policies. In case of other devices the @cpus
+ * field is unused.
+ */
+struct em_perf_domain {
+ struct em_perf_table __rcu *em_table;
+ struct list_head node;
+ int id;
+ int nr_perf_states;
+ int min_perf_state;
+ int max_perf_state;
+ unsigned long flags;
+ unsigned long cpus[];
+};
+
+/*
+ * em_perf_domain flags:
+ *
+ * EM_PERF_DOMAIN_MICROWATTS: The power values are in micro-Watts or some
+ * other scale.
+ *
+ * EM_PERF_DOMAIN_SKIP_INEFFICIENCIES: Skip inefficient states when estimating
+ * energy consumption.
+ *
+ * EM_PERF_DOMAIN_ARTIFICIAL: The power values are artificial and might be
+ * created by platform missing real power information
+ */
+#define EM_PERF_DOMAIN_MICROWATTS BIT(0)
+#define EM_PERF_DOMAIN_SKIP_INEFFICIENCIES BIT(1)
+#define EM_PERF_DOMAIN_ARTIFICIAL BIT(2)
+
+#define em_span_cpus(em) (to_cpumask((em)->cpus))
+#define em_is_artificial(em) ((em)->flags & EM_PERF_DOMAIN_ARTIFICIAL)
+
+#ifdef CONFIG_ENERGY_MODEL
+/*
+ * The max power value in micro-Watts. The limit of 64 Watts is set as
+ * a safety net to not overflow multiplications on 32bit platforms. The
+ * 32bit value limit for total Perf Domain power implies a limit of
+ * maximum CPUs in such domain to 64.
+ */
+#define EM_MAX_POWER (64000000) /* 64 Watts */
+
+/*
+ * To avoid possible energy estimation overflow on 32bit machines add
+ * limits to number of CPUs in the Perf. Domain.
+ * We are safe on 64bit machine, thus some big number.
+ */
+#ifdef CONFIG_64BIT
+#define EM_MAX_NUM_CPUS 4096
+#else
+#define EM_MAX_NUM_CPUS 16
+#endif
+
+struct em_data_callback {
+ /**
+ * active_power() - Provide power at the next performance state of
+ * a device
+ * @dev : Device for which we do this operation (can be a CPU)
+ * @power : Active power at the performance state
+ * (modified)
+ * @freq : Frequency at the performance state in kHz
+ * (modified)
+ *
+ * active_power() must find the lowest performance state of 'dev' above
+ * 'freq' and update 'power' and 'freq' to the matching active power
+ * and frequency.
+ *
+ * In case of CPUs, the power is the one of a single CPU in the domain,
+ * expressed in micro-Watts or an abstract scale. It is expected to
+ * fit in the [0, EM_MAX_POWER] range.
+ *
+ * Return 0 on success.
+ */
+ int (*active_power)(struct device *dev, unsigned long *power,
+ unsigned long *freq);
+
+ /**
+ * get_cost() - Provide the cost at the given performance state of
+ * a device
+ * @dev : Device for which we do this operation (can be a CPU)
+ * @freq : Frequency at the performance state in kHz
+ * @cost : The cost value for the performance state
+ * (modified)
+ *
+ * In case of CPUs, the cost is the one of a single CPU in the domain.
+ * It is expected to fit in the [0, EM_MAX_POWER] range due to internal
+ * usage in EAS calculation.
+ *
+ * Return 0 on success, or appropriate error value in case of failure.
+ */
+ int (*get_cost)(struct device *dev, unsigned long freq,
+ unsigned long *cost);
+};
+#define EM_SET_ACTIVE_POWER_CB(em_cb, cb) ((em_cb).active_power = cb)
+#define EM_ADV_DATA_CB(_active_power_cb, _cost_cb) \
+ { .active_power = _active_power_cb, \
+ .get_cost = _cost_cb }
+#define EM_DATA_CB(_active_power_cb) \
+ EM_ADV_DATA_CB(_active_power_cb, NULL)
+
+struct em_perf_domain *em_cpu_get(int cpu);
+struct em_perf_domain *em_pd_get(struct device *dev);
+int em_dev_update_perf_domain(struct device *dev,
+ struct em_perf_table *new_table);
+int em_dev_register_perf_domain(struct device *dev, unsigned int nr_states,
+ const struct em_data_callback *cb,
+ const cpumask_t *cpus, bool microwatts);
+int em_dev_register_pd_no_update(struct device *dev, unsigned int nr_states,
+ const struct em_data_callback *cb,
+ const cpumask_t *cpus, bool microwatts);
+void em_dev_unregister_perf_domain(struct device *dev);
+struct em_perf_table *em_table_alloc(struct em_perf_domain *pd);
+void em_table_free(struct em_perf_table *table);
+int em_dev_compute_costs(struct device *dev, struct em_perf_state *table,
+ int nr_states);
+int em_dev_update_chip_binning(struct device *dev);
+int em_update_performance_limits(struct em_perf_domain *pd,
+ unsigned long freq_min_khz, unsigned long freq_max_khz);
+void em_adjust_cpu_capacity(unsigned int cpu);
+void em_rebuild_sched_domains(void);
+
+/**
+ * em_pd_get_efficient_state() - Get an efficient performance state from the EM
+ * @table: List of performance states, in ascending order
+ * @pd: performance domain for which this must be done
+ * @max_util: Max utilization to map with the EM
+ *
+ * It is called from the scheduler code quite frequently and as a consequence
+ * doesn't implement any check.
+ *
+ * Return: An efficient performance state id, high enough to meet @max_util
+ * requirement.
+ */
+static inline int
+em_pd_get_efficient_state(struct em_perf_state *table,
+ struct em_perf_domain *pd, unsigned long max_util)
+{
+ unsigned long pd_flags = pd->flags;
+ int min_ps = pd->min_perf_state;
+ int max_ps = pd->max_perf_state;
+ struct em_perf_state *ps;
+ int i;
+
+ for (i = min_ps; i <= max_ps; i++) {
+ ps = &table[i];
+ if (ps->performance >= max_util) {
+ if (pd_flags & EM_PERF_DOMAIN_SKIP_INEFFICIENCIES &&
+ ps->flags & EM_PERF_STATE_INEFFICIENT)
+ continue;
+ return i;
+ }
+ }
+
+ return max_ps;
+}
+
+/**
+ * em_cpu_energy() - Estimates the energy consumed by the CPUs of a
+ * performance domain
+ * @pd : performance domain for which energy has to be estimated
+ * @max_util : highest utilization among CPUs of the domain
+ * @sum_util : sum of the utilization of all CPUs in the domain
+ * @allowed_cpu_cap : maximum allowed CPU capacity for the @pd, which
+ * might reflect reduced frequency (due to thermal)
+ *
+ * This function must be used only for CPU devices. There is no validation,
+ * i.e. if the EM is a CPU type and has cpumask allocated. It is called from
+ * the scheduler code quite frequently and that is why there is not checks.
+ *
+ * Return: the sum of the energy consumed by the CPUs of the domain assuming
+ * a capacity state satisfying the max utilization of the domain.
+ */
+static inline unsigned long em_cpu_energy(struct em_perf_domain *pd,
+ unsigned long max_util, unsigned long sum_util,
+ unsigned long allowed_cpu_cap)
+{
+ struct em_perf_table *em_table;
+ struct em_perf_state *ps;
+ int i;
+
+ WARN_ONCE(!rcu_read_lock_held(), "EM: rcu read lock needed\n");
+
+ if (!sum_util)
+ return 0;
+
+ /*
+ * In order to predict the performance state, map the utilization of
+ * the most utilized CPU of the performance domain to a requested
+ * performance, like schedutil. Take also into account that the real
+ * performance might be set lower (due to thermal capping). Thus, clamp
+ * max utilization to the allowed CPU capacity before calculating
+ * effective performance.
+ */
+ max_util = min(max_util, allowed_cpu_cap);
+
+ /*
+ * Find the lowest performance state of the Energy Model above the
+ * requested performance.
+ */
+ em_table = rcu_dereference(pd->em_table);
+ i = em_pd_get_efficient_state(em_table->state, pd, max_util);
+ ps = &em_table->state[i];
+
+ /*
+ * The performance (capacity) of a CPU in the domain at the performance
+ * state (ps) can be computed as:
+ *
+ * ps->freq * scale_cpu
+ * ps->performance = -------------------- (1)
+ * cpu_max_freq
+ *
+ * So, ignoring the costs of idle states (which are not available in
+ * the EM), the energy consumed by this CPU at that performance state
+ * is estimated as:
+ *
+ * ps->power * cpu_util
+ * cpu_nrg = -------------------- (2)
+ * ps->performance
+ *
+ * since 'cpu_util / ps->performance' represents its percentage of busy
+ * time.
+ *
+ * NOTE: Although the result of this computation actually is in
+ * units of power, it can be manipulated as an energy value
+ * over a scheduling period, since it is assumed to be
+ * constant during that interval.
+ *
+ * By injecting (1) in (2), 'cpu_nrg' can be re-expressed as a product
+ * of two terms:
+ *
+ * ps->power * cpu_max_freq
+ * cpu_nrg = ------------------------ * cpu_util (3)
+ * ps->freq * scale_cpu
+ *
+ * The first term is static, and is stored in the em_perf_state struct
+ * as 'ps->cost'.
+ *
+ * Since all CPUs of the domain have the same micro-architecture, they
+ * share the same 'ps->cost', and the same CPU capacity. Hence, the
+ * total energy of the domain (which is the simple sum of the energy of
+ * all of its CPUs) can be factorized as:
+ *
+ * pd_nrg = ps->cost * \Sum cpu_util (4)
+ */
+ return ps->cost * sum_util;
+}
+
+/**
+ * em_pd_nr_perf_states() - Get the number of performance states of a perf.
+ * domain
+ * @pd : performance domain for which this must be done
+ *
+ * Return: the number of performance states in the performance domain table
+ */
+static inline int em_pd_nr_perf_states(struct em_perf_domain *pd)
+{
+ return pd->nr_perf_states;
+}
+
+/**
+ * em_perf_state_from_pd() - Get the performance states table of perf.
+ * domain
+ * @pd : performance domain for which this must be done
+ *
+ * To use this function the rcu_read_lock() should be hold. After the usage
+ * of the performance states table is finished, the rcu_read_unlock() should
+ * be called.
+ *
+ * Return: the pointer to performance states table of the performance domain
+ */
+static inline
+struct em_perf_state *em_perf_state_from_pd(struct em_perf_domain *pd)
+{
+ return rcu_dereference(pd->em_table)->state;
+}
+
+#else
+struct em_data_callback {};
+#define EM_ADV_DATA_CB(_active_power_cb, _cost_cb) { }
+#define EM_DATA_CB(_active_power_cb) { }
+#define EM_SET_ACTIVE_POWER_CB(em_cb, cb) do { } while (0)
+
+static inline
+int em_dev_register_perf_domain(struct device *dev, unsigned int nr_states,
+ const struct em_data_callback *cb,
+ const cpumask_t *cpus, bool microwatts)
+{
+ return -EINVAL;
+}
+static inline
+int em_dev_register_pd_no_update(struct device *dev, unsigned int nr_states,
+ const struct em_data_callback *cb,
+ const cpumask_t *cpus, bool microwatts)
+{
+ return -EINVAL;
+}
+static inline void em_dev_unregister_perf_domain(struct device *dev)
+{
+}
+static inline struct em_perf_domain *em_cpu_get(int cpu)
+{
+ return NULL;
+}
+static inline struct em_perf_domain *em_pd_get(struct device *dev)
+{
+ return NULL;
+}
+static inline unsigned long em_cpu_energy(struct em_perf_domain *pd,
+ unsigned long max_util, unsigned long sum_util,
+ unsigned long allowed_cpu_cap)
+{
+ return 0;
+}
+static inline int em_pd_nr_perf_states(struct em_perf_domain *pd)
+{
+ return 0;
+}
+static inline
+struct em_perf_table *em_table_alloc(struct em_perf_domain *pd)
+{
+ return NULL;
+}
+static inline void em_table_free(struct em_perf_table *table) {}
+static inline
+int em_dev_update_perf_domain(struct device *dev,
+ struct em_perf_table *new_table)
+{
+ return -EINVAL;
+}
+static inline
+struct em_perf_state *em_perf_state_from_pd(struct em_perf_domain *pd)
+{
+ return NULL;
+}
+static inline
+int em_dev_compute_costs(struct device *dev, struct em_perf_state *table,
+ int nr_states)
+{
+ return -EINVAL;
+}
+static inline int em_dev_update_chip_binning(struct device *dev)
+{
+ return -EINVAL;
+}
+static inline
+int em_update_performance_limits(struct em_perf_domain *pd,
+ unsigned long freq_min_khz, unsigned long freq_max_khz)
+{
+ return -EINVAL;
+}
+static inline void em_adjust_cpu_capacity(unsigned int cpu) {}
+static inline void em_rebuild_sched_domains(void) {}
+#endif
+
+#endif
diff --git a/include/linux/entry-common.h b/include/linux/entry-common.h
new file mode 100644
index 000000000000..87efb38b7081
--- /dev/null
+++ b/include/linux/entry-common.h
@@ -0,0 +1,199 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __LINUX_ENTRYCOMMON_H
+#define __LINUX_ENTRYCOMMON_H
+
+#include <linux/irq-entry-common.h>
+#include <linux/livepatch.h>
+#include <linux/ptrace.h>
+#include <linux/resume_user_mode.h>
+#include <linux/seccomp.h>
+#include <linux/sched.h>
+
+#include <asm/entry-common.h>
+#include <asm/syscall.h>
+
+#ifndef _TIF_UPROBE
+# define _TIF_UPROBE (0)
+#endif
+
+/*
+ * SYSCALL_WORK flags handled in syscall_enter_from_user_mode()
+ */
+#ifndef ARCH_SYSCALL_WORK_ENTER
+# define ARCH_SYSCALL_WORK_ENTER (0)
+#endif
+
+/*
+ * SYSCALL_WORK flags handled in syscall_exit_to_user_mode()
+ */
+#ifndef ARCH_SYSCALL_WORK_EXIT
+# define ARCH_SYSCALL_WORK_EXIT (0)
+#endif
+
+#define SYSCALL_WORK_ENTER (SYSCALL_WORK_SECCOMP | \
+ SYSCALL_WORK_SYSCALL_TRACEPOINT | \
+ SYSCALL_WORK_SYSCALL_TRACE | \
+ SYSCALL_WORK_SYSCALL_EMU | \
+ SYSCALL_WORK_SYSCALL_AUDIT | \
+ SYSCALL_WORK_SYSCALL_USER_DISPATCH | \
+ ARCH_SYSCALL_WORK_ENTER)
+
+#define SYSCALL_WORK_EXIT (SYSCALL_WORK_SYSCALL_TRACEPOINT | \
+ SYSCALL_WORK_SYSCALL_TRACE | \
+ SYSCALL_WORK_SYSCALL_AUDIT | \
+ SYSCALL_WORK_SYSCALL_USER_DISPATCH | \
+ SYSCALL_WORK_SYSCALL_EXIT_TRAP | \
+ ARCH_SYSCALL_WORK_EXIT)
+
+long syscall_trace_enter(struct pt_regs *regs, long syscall, unsigned long work);
+
+/**
+ * syscall_enter_from_user_mode_work - Check and handle work before invoking
+ * a syscall
+ * @regs: Pointer to currents pt_regs
+ * @syscall: The syscall number
+ *
+ * Invoked from architecture specific syscall entry code with interrupts
+ * enabled after invoking enter_from_user_mode(), enabling interrupts and
+ * extra architecture specific work.
+ *
+ * Returns: The original or a modified syscall number
+ *
+ * If the returned syscall number is -1 then the syscall should be
+ * skipped. In this case the caller may invoke syscall_set_error() or
+ * syscall_set_return_value() first. If neither of those are called and -1
+ * is returned, then the syscall will fail with ENOSYS.
+ *
+ * It handles the following work items:
+ *
+ * 1) syscall_work flag dependent invocations of
+ * ptrace_report_syscall_entry(), __secure_computing(), trace_sys_enter()
+ * 2) Invocation of audit_syscall_entry()
+ */
+static __always_inline long syscall_enter_from_user_mode_work(struct pt_regs *regs, long syscall)
+{
+ unsigned long work = READ_ONCE(current_thread_info()->syscall_work);
+
+ if (work & SYSCALL_WORK_ENTER)
+ syscall = syscall_trace_enter(regs, syscall, work);
+
+ return syscall;
+}
+
+/**
+ * syscall_enter_from_user_mode - Establish state and check and handle work
+ * before invoking a syscall
+ * @regs: Pointer to currents pt_regs
+ * @syscall: The syscall number
+ *
+ * Invoked from architecture specific syscall entry code with interrupts
+ * disabled. The calling code has to be non-instrumentable. When the
+ * function returns all state is correct, interrupts are enabled and the
+ * subsequent functions can be instrumented.
+ *
+ * This is the combination of enter_from_user_mode() and
+ * syscall_enter_from_user_mode_work() to be used when there is no
+ * architecture specific work to be done between the two.
+ *
+ * Returns: The original or a modified syscall number. See
+ * syscall_enter_from_user_mode_work() for further explanation.
+ */
+static __always_inline long syscall_enter_from_user_mode(struct pt_regs *regs, long syscall)
+{
+ long ret;
+
+ enter_from_user_mode(regs);
+
+ instrumentation_begin();
+ local_irq_enable();
+ ret = syscall_enter_from_user_mode_work(regs, syscall);
+ instrumentation_end();
+
+ return ret;
+}
+
+/**
+ * syscall_exit_work - Handle work before returning to user mode
+ * @regs: Pointer to current pt_regs
+ * @work: Current thread syscall work
+ *
+ * Do one-time syscall specific work.
+ */
+void syscall_exit_work(struct pt_regs *regs, unsigned long work);
+
+/**
+ * syscall_exit_to_user_mode_work - Handle work before returning to user mode
+ * @regs: Pointer to currents pt_regs
+ *
+ * Same as step 1 and 2 of syscall_exit_to_user_mode() but without calling
+ * exit_to_user_mode() to perform the final transition to user mode.
+ *
+ * Calling convention is the same as for syscall_exit_to_user_mode() and it
+ * returns with all work handled and interrupts disabled. The caller must
+ * invoke exit_to_user_mode() before actually switching to user mode to
+ * make the final state transitions. Interrupts must stay disabled between
+ * return from this function and the invocation of exit_to_user_mode().
+ */
+static __always_inline void syscall_exit_to_user_mode_work(struct pt_regs *regs)
+{
+ unsigned long work = READ_ONCE(current_thread_info()->syscall_work);
+ unsigned long nr = syscall_get_nr(current, regs);
+
+ CT_WARN_ON(ct_state() != CT_STATE_KERNEL);
+
+ if (IS_ENABLED(CONFIG_PROVE_LOCKING)) {
+ if (WARN(irqs_disabled(), "syscall %lu left IRQs disabled", nr))
+ local_irq_enable();
+ }
+
+ rseq_debug_syscall_return(regs);
+
+ /*
+ * Do one-time syscall specific work. If these work items are
+ * enabled, we want to run them exactly once per syscall exit with
+ * interrupts enabled.
+ */
+ if (unlikely(work & SYSCALL_WORK_EXIT))
+ syscall_exit_work(regs, work);
+ local_irq_disable_exit_to_user();
+ syscall_exit_to_user_mode_prepare(regs);
+}
+
+/**
+ * syscall_exit_to_user_mode - Handle work before returning to user mode
+ * @regs: Pointer to currents pt_regs
+ *
+ * Invoked with interrupts enabled and fully valid regs. Returns with all
+ * work handled, interrupts disabled such that the caller can immediately
+ * switch to user mode. Called from architecture specific syscall and ret
+ * from fork code.
+ *
+ * The call order is:
+ * 1) One-time syscall exit work:
+ * - rseq syscall exit
+ * - audit
+ * - syscall tracing
+ * - ptrace (single stepping)
+ *
+ * 2) Preparatory work
+ * - Exit to user mode loop (common TIF handling). Invokes
+ * arch_exit_to_user_mode_work() for architecture specific TIF work
+ * - Architecture specific one time work arch_exit_to_user_mode_prepare()
+ * - Address limit and lockdep checks
+ *
+ * 3) Final transition (lockdep, tracing, context tracking, RCU), i.e. the
+ * functionality in exit_to_user_mode().
+ *
+ * This is a combination of syscall_exit_to_user_mode_work() (1,2) and
+ * exit_to_user_mode(). This function is preferred unless there is a
+ * compelling architectural reason to use the separate functions.
+ */
+static __always_inline void syscall_exit_to_user_mode(struct pt_regs *regs)
+{
+ instrumentation_begin();
+ syscall_exit_to_user_mode_work(regs);
+ instrumentation_end();
+ exit_to_user_mode();
+}
+
+#endif
diff --git a/include/linux/entry-virt.h b/include/linux/entry-virt.h
new file mode 100644
index 000000000000..bfa767702d9a
--- /dev/null
+++ b/include/linux/entry-virt.h
@@ -0,0 +1,95 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __LINUX_ENTRYVIRT_H
+#define __LINUX_ENTRYVIRT_H
+
+#include <linux/static_call_types.h>
+#include <linux/resume_user_mode.h>
+#include <linux/syscalls.h>
+#include <linux/seccomp.h>
+#include <linux/sched.h>
+#include <linux/tick.h>
+
+/* Transfer to guest mode work */
+#ifdef CONFIG_VIRT_XFER_TO_GUEST_WORK
+
+#ifndef ARCH_XFER_TO_GUEST_MODE_WORK
+# define ARCH_XFER_TO_GUEST_MODE_WORK (0)
+#endif
+
+#define XFER_TO_GUEST_MODE_WORK \
+ (_TIF_NEED_RESCHED | _TIF_NEED_RESCHED_LAZY | _TIF_SIGPENDING | \
+ _TIF_NOTIFY_SIGNAL | _TIF_NOTIFY_RESUME | \
+ ARCH_XFER_TO_GUEST_MODE_WORK)
+
+/**
+ * arch_xfer_to_guest_mode_handle_work - Architecture specific xfer to guest
+ * mode work handling function.
+ * @vcpu: Pointer to current's VCPU data
+ * @ti_work: Cached TIF flags gathered in xfer_to_guest_mode_handle_work()
+ *
+ * Invoked from xfer_to_guest_mode_handle_work(). Defaults to NOOP. Can be
+ * replaced by architecture specific code.
+ */
+static inline int arch_xfer_to_guest_mode_handle_work(unsigned long ti_work);
+
+#ifndef arch_xfer_to_guest_mode_handle_work
+static inline int arch_xfer_to_guest_mode_handle_work(unsigned long ti_work)
+{
+ return 0;
+}
+#endif
+
+/**
+ * xfer_to_guest_mode_handle_work - Check and handle pending work which needs
+ * to be handled before going to guest mode
+ *
+ * Returns: 0 or an error code
+ */
+int xfer_to_guest_mode_handle_work(void);
+
+/**
+ * xfer_to_guest_mode_prepare - Perform last minute preparation work that
+ * need to be handled while IRQs are disabled
+ * upon entering to guest.
+ *
+ * Has to be invoked with interrupts disabled before the last call
+ * to xfer_to_guest_mode_work_pending().
+ */
+static inline void xfer_to_guest_mode_prepare(void)
+{
+ lockdep_assert_irqs_disabled();
+ tick_nohz_user_enter_prepare();
+}
+
+/**
+ * __xfer_to_guest_mode_work_pending - Check if work is pending
+ *
+ * Returns: True if work pending, False otherwise.
+ *
+ * Bare variant of xfer_to_guest_mode_work_pending(). Can be called from
+ * interrupt enabled code for racy quick checks with care.
+ */
+static inline bool __xfer_to_guest_mode_work_pending(void)
+{
+ unsigned long ti_work = read_thread_flags();
+
+ return !!(ti_work & XFER_TO_GUEST_MODE_WORK);
+}
+
+/**
+ * xfer_to_guest_mode_work_pending - Check if work is pending which needs to be
+ * handled before returning to guest mode
+ *
+ * Returns: True if work pending, False otherwise.
+ *
+ * Has to be invoked with interrupts disabled before the transition to
+ * guest mode.
+ */
+static inline bool xfer_to_guest_mode_work_pending(void)
+{
+ lockdep_assert_irqs_disabled();
+ return __xfer_to_guest_mode_work_pending();
+}
+#endif /* CONFIG_VIRT_XFER_TO_GUEST_WORK */
+
+#endif
diff --git a/include/linux/err.h b/include/linux/err.h
index 1e3558845e4c..8c37be0620ab 100644
--- a/include/linux/err.h
+++ b/include/linux/err.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_ERR_H
#define _LINUX_ERR_H
@@ -18,23 +19,74 @@
#ifndef __ASSEMBLY__
+/**
+ * IS_ERR_VALUE - Detect an error pointer.
+ * @x: The pointer to check.
+ *
+ * Like IS_ERR(), but does not generate a compiler warning if result is unused.
+ */
#define IS_ERR_VALUE(x) unlikely((unsigned long)(void *)(x) >= (unsigned long)-MAX_ERRNO)
+/**
+ * ERR_PTR - Create an error pointer.
+ * @error: A negative error code.
+ *
+ * Encodes @error into a pointer value. Users should consider the result
+ * opaque and not assume anything about how the error is encoded.
+ *
+ * Return: A pointer with @error encoded within its value.
+ */
static inline void * __must_check ERR_PTR(long error)
{
return (void *) error;
}
+/**
+ * INIT_ERR_PTR - Init a const error pointer.
+ * @error: A negative error code.
+ *
+ * Like ERR_PTR(), but usable to initialize static variables.
+ */
+#define INIT_ERR_PTR(error) ((void *)(error))
+
+/* Return the pointer in the percpu address space. */
+#define ERR_PTR_PCPU(error) ((void __percpu *)(unsigned long)ERR_PTR(error))
+
+/* Cast an error pointer to __iomem. */
+#define IOMEM_ERR_PTR(error) (__force void __iomem *)ERR_PTR(error)
+
+/**
+ * PTR_ERR - Extract the error code from an error pointer.
+ * @ptr: An error pointer.
+ * Return: The error code within @ptr.
+ */
static inline long __must_check PTR_ERR(__force const void *ptr)
{
return (long) ptr;
}
+/* Read an error pointer from the percpu address space. */
+#define PTR_ERR_PCPU(ptr) (PTR_ERR((const void *)(__force const unsigned long)(ptr)))
+
+/**
+ * IS_ERR - Detect an error pointer.
+ * @ptr: The pointer to check.
+ * Return: true if @ptr is an error pointer, false otherwise.
+ */
static inline bool __must_check IS_ERR(__force const void *ptr)
{
return IS_ERR_VALUE((unsigned long)ptr);
}
+/* Read an error pointer from the percpu address space. */
+#define IS_ERR_PCPU(ptr) (IS_ERR((const void *)(__force const unsigned long)(ptr)))
+
+/**
+ * IS_ERR_OR_NULL - Detect an error pointer or a null pointer.
+ * @ptr: The pointer to check.
+ *
+ * Like IS_ERR(), but also returns true for a null pointer.
+ */
static inline bool __must_check IS_ERR_OR_NULL(__force const void *ptr)
{
return unlikely(!ptr) || IS_ERR_VALUE((unsigned long)ptr);
@@ -53,6 +105,23 @@ static inline void * __must_check ERR_CAST(__force const void *ptr)
return (void *) ptr;
}
+/**
+ * PTR_ERR_OR_ZERO - Extract the error code from a pointer if it has one.
+ * @ptr: A potential error pointer.
+ *
+ * Convenience function that can be used inside a function that returns
+ * an error code to propagate errors received as error pointers.
+ * For example, ``return PTR_ERR_OR_ZERO(ptr);`` replaces:
+ *
+ * .. code-block:: c
+ *
+ * if (IS_ERR(ptr))
+ * return PTR_ERR(ptr);
+ * else
+ * return 0;
+ *
+ * Return: The error code within @ptr if it is an error pointer; 0 otherwise.
+ */
static inline int __must_check PTR_ERR_OR_ZERO(__force const void *ptr)
{
if (IS_ERR(ptr))
@@ -61,9 +130,6 @@ static inline int __must_check PTR_ERR_OR_ZERO(__force const void *ptr)
return 0;
}
-/* Deprecated */
-#define PTR_RET(p) PTR_ERR_OR_ZERO(p)
-
#endif
#endif /* _LINUX_ERR_H */
diff --git a/include/linux/errname.h b/include/linux/errname.h
new file mode 100644
index 000000000000..e8576ad90cb7
--- /dev/null
+++ b/include/linux/errname.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_ERRNAME_H
+#define _LINUX_ERRNAME_H
+
+#include <linux/stddef.h>
+
+#ifdef CONFIG_SYMBOLIC_ERRNAME
+const char *errname(int err);
+#else
+static inline const char *errname(int err)
+{
+ return NULL;
+}
+#endif
+
+#endif /* _LINUX_ERRNAME_H */
diff --git a/include/linux/errno.h b/include/linux/errno.h
index 7ce9fb1b7d28..8b0c754bab02 100644
--- a/include/linux/errno.h
+++ b/include/linux/errno.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_ERRNO_H
#define _LINUX_ERRNO_H
@@ -17,6 +18,7 @@
#define ERESTART_RESTARTBLOCK 516 /* restart by calling sys_restart_syscall */
#define EPROBE_DEFER 517 /* Driver requests probe retry */
#define EOPENSTALE 518 /* open found a stale dentry */
+#define ENOPARAM 519 /* Parameter not supported */
/* Defined for the NFSv3 protocol */
#define EBADHANDLE 521 /* Illegal NFS file handle */
@@ -29,5 +31,6 @@
#define EJUKEBOX 528 /* Request initiated, but will not complete before timeout */
#define EIOCBQUEUED 529 /* iocb queued, will get completion event */
#define ERECALLCONFLICT 530 /* conflict with recalled state */
+#define ENOGRACE 531 /* NFS file lock reclaim refused */
#endif
diff --git a/include/linux/error-injection.h b/include/linux/error-injection.h
new file mode 100644
index 000000000000..20e738f4eae8
--- /dev/null
+++ b/include/linux/error-injection.h
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_ERROR_INJECTION_H
+#define _LINUX_ERROR_INJECTION_H
+
+#include <linux/compiler.h>
+#include <linux/errno.h>
+#include <asm-generic/error-injection.h>
+
+#ifdef CONFIG_FUNCTION_ERROR_INJECTION
+
+extern bool within_error_injection_list(unsigned long addr);
+extern int get_injectable_error_type(unsigned long addr);
+
+#else /* !CONFIG_FUNCTION_ERROR_INJECTION */
+
+static inline bool within_error_injection_list(unsigned long addr)
+{
+ return false;
+}
+
+static inline int get_injectable_error_type(unsigned long addr)
+{
+ return -EOPNOTSUPP;
+}
+
+#endif
+
+#endif /* _LINUX_ERROR_INJECTION_H */
diff --git a/include/linux/errqueue.h b/include/linux/errqueue.h
index 6fdfc884fdeb..be1cf7291d6c 100644
--- a/include/linux/errqueue.h
+++ b/include/linux/errqueue.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_ERRQUEUE_H
#define _LINUX_ERRQUEUE_H 1
diff --git a/include/linux/errseq.h b/include/linux/errseq.h
index 9e0d444ac88d..fc2777770768 100644
--- a/include/linux/errseq.h
+++ b/include/linux/errseq.h
@@ -1,18 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * See Documentation/core-api/errseq.rst and lib/errseq.c
+ */
#ifndef _LINUX_ERRSEQ_H
#define _LINUX_ERRSEQ_H
-/* See lib/errseq.c for more info */
-
typedef u32 errseq_t;
-errseq_t __errseq_set(errseq_t *eseq, int err);
-static inline void errseq_set(errseq_t *eseq, int err)
-{
- /* Optimize for the common case of no error */
- if (unlikely(err))
- __errseq_set(eseq, err);
-}
-
+errseq_t errseq_set(errseq_t *eseq, int err);
errseq_t errseq_sample(errseq_t *eseq);
int errseq_check(errseq_t *eseq, errseq_t since);
int errseq_check_and_advance(errseq_t *eseq, errseq_t *since);
diff --git a/include/linux/etherdevice.h b/include/linux/etherdevice.h
index 2d9f80848d4b..9a1eacf35d37 100644
--- a/include/linux/etherdevice.h
+++ b/include/linux/etherdevice.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* INET An implementation of the TCP/IP protocol suite for the LINUX
* operating system. NET is implemented using the BSD Socket
@@ -10,14 +11,8 @@
* Authors: Ross Biro
* Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
*
- * Relocated to include/linux where it belongs by Alan Cox
+ * Relocated to include/linux where it belongs by Alan Cox
* <gw4pts@gw4pts.ampr.org>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- *
*/
#ifndef _LINUX_ETHERDEVICE_H
#define _LINUX_ETHERDEVICE_H
@@ -25,14 +20,23 @@
#include <linux/if_ether.h>
#include <linux/netdevice.h>
#include <linux/random.h>
-#include <asm/unaligned.h>
+#include <linux/crc32.h>
+#include <linux/unaligned.h>
#include <asm/bitsperlong.h>
#ifdef __KERNEL__
struct device;
+struct fwnode_handle;
+
int eth_platform_get_mac_address(struct device *dev, u8 *mac_addr);
-unsigned char *arch_get_platform_get_mac_address(void);
-u32 eth_get_headlen(void *data, unsigned int max_len);
+int platform_get_ethdev_address(struct device *dev, struct net_device *netdev);
+unsigned char *arch_get_platform_mac_address(void);
+int nvmem_get_mac_address(struct device *dev, void *addrbuf);
+int device_get_mac_address(struct device *dev, char *addr);
+int device_get_ethdev_address(struct device *dev, struct net_device *netdev);
+int fwnode_get_mac_address(struct fwnode_handle *fwnode, char *addr);
+
+u32 eth_get_headlen(const struct net_device *dev, const void *data, u32 len);
__be16 eth_type_trans(struct sk_buff *skb, struct net_device *dev);
extern const struct header_ops eth_header_ops;
@@ -43,10 +47,10 @@ int eth_header_cache(const struct neighbour *neigh, struct hh_cache *hh,
__be16 type);
void eth_header_cache_update(struct hh_cache *hh, const struct net_device *dev,
const unsigned char *haddr);
+__be16 eth_header_parse_protocol(const struct sk_buff *skb);
int eth_prepare_mac_addr_change(struct net_device *dev, void *p);
void eth_commit_mac_addr_change(struct net_device *dev, void *p);
int eth_mac_addr(struct net_device *dev, void *p);
-int eth_change_mtu(struct net_device *dev, int new_mtu);
int eth_validate_addr(struct net_device *dev);
struct net_device *alloc_etherdev_mqs(int sizeof_priv, unsigned int txqs,
@@ -59,19 +63,25 @@ struct net_device *devm_alloc_etherdev_mqs(struct device *dev, int sizeof_priv,
unsigned int rxqs);
#define devm_alloc_etherdev(dev, sizeof_priv) devm_alloc_etherdev_mqs(dev, sizeof_priv, 1, 1)
-struct sk_buff **eth_gro_receive(struct sk_buff **head,
- struct sk_buff *skb);
+struct sk_buff *eth_gro_receive(struct list_head *head, struct sk_buff *skb);
int eth_gro_complete(struct sk_buff *skb, int nhoff);
/* Reserved Ethernet Addresses per IEEE 802.1Q */
static const u8 eth_reserved_addr_base[ETH_ALEN] __aligned(2) =
{ 0x01, 0x80, 0xc2, 0x00, 0x00, 0x00 };
+#define eth_stp_addr eth_reserved_addr_base
+
+static const u8 eth_ipv4_mcast_addr_base[ETH_ALEN] __aligned(2) =
+{ 0x01, 0x00, 0x5e, 0x00, 0x00, 0x00 };
+
+static const u8 eth_ipv6_mcast_addr_base[ETH_ALEN] __aligned(2) =
+{ 0x33, 0x33, 0x00, 0x00, 0x00, 0x00 };
/**
* is_link_local_ether_addr - Determine if given Ethernet address is link-local
* @addr: Pointer to a six-byte array containing the Ethernet address
*
- * Return true if address is link local reserved addr (01:80:c2:00:00:0X) per
+ * Return: true if address is link local reserved addr (01:80:c2:00:00:0X) per
* IEEE 802.1Q 8.6.3 Frame filtering.
*
* Please note: addr must be aligned to u16.
@@ -94,7 +104,7 @@ static inline bool is_link_local_ether_addr(const u8 *addr)
* is_zero_ether_addr - Determine if give Ethernet address is all zeros.
* @addr: Pointer to a six-byte array containing the Ethernet address
*
- * Return true if the address is all zeroes.
+ * Return: true if the address is all zeroes.
*
* Please note: addr must be aligned to u16.
*/
@@ -113,7 +123,7 @@ static inline bool is_zero_ether_addr(const u8 *addr)
* is_multicast_ether_addr - Determine if the Ethernet address is a multicast.
* @addr: Pointer to a six-byte array containing the Ethernet address
*
- * Return true if the address is a multicast address.
+ * Return: true if the address is a multicast address.
* By definition the broadcast address is also a multicast address.
*/
static inline bool is_multicast_ether_addr(const u8 *addr)
@@ -130,7 +140,7 @@ static inline bool is_multicast_ether_addr(const u8 *addr)
#endif
}
-static inline bool is_multicast_ether_addr_64bits(const u8 addr[6+2])
+static inline bool is_multicast_ether_addr_64bits(const u8 *addr)
{
#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64
#ifdef __BIG_ENDIAN
@@ -147,7 +157,7 @@ static inline bool is_multicast_ether_addr_64bits(const u8 addr[6+2])
* is_local_ether_addr - Determine if the Ethernet address is locally-assigned one (IEEE 802).
* @addr: Pointer to a six-byte array containing the Ethernet address
*
- * Return true if the address is a local address.
+ * Return: true if the address is a local address.
*/
static inline bool is_local_ether_addr(const u8 *addr)
{
@@ -158,7 +168,7 @@ static inline bool is_local_ether_addr(const u8 *addr)
* is_broadcast_ether_addr - Determine if the Ethernet address is broadcast
* @addr: Pointer to a six-byte array containing the Ethernet address
*
- * Return true if the address is the broadcast address.
+ * Return: true if the address is the broadcast address.
*
* Please note: addr must be aligned to u16.
*/
@@ -173,7 +183,7 @@ static inline bool is_broadcast_ether_addr(const u8 *addr)
* is_unicast_ether_addr - Determine if the Ethernet address is unicast
* @addr: Pointer to a six-byte array containing the Ethernet address
*
- * Return true if the address is a unicast address.
+ * Return: true if the address is a unicast address.
*/
static inline bool is_unicast_ether_addr(const u8 *addr)
{
@@ -187,7 +197,7 @@ static inline bool is_unicast_ether_addr(const u8 *addr)
* Check that the Ethernet address (MAC) is not 00:00:00:00:00:00, is not
* a multicast address, and is not FF:FF:FF:FF:FF:FF.
*
- * Return true if the address is valid.
+ * Return: true if the address is valid.
*
* Please note: addr must be aligned to u16.
*/
@@ -204,7 +214,7 @@ static inline bool is_valid_ether_addr(const u8 *addr)
*
* Check that the value from the Ethertype/length field is a valid Ethertype.
*
- * Return true if the valid is an 802.3 supported Ethertype.
+ * Return: true if the valid is an 802.3 supported Ethertype.
*/
static inline bool eth_proto_is_802_3(__be16 proto)
{
@@ -230,8 +240,6 @@ static inline void eth_random_addr(u8 *addr)
addr[0] |= 0x02; /* set local assignment bit (IEEE802) */
}
-#define random_ether_addr(addr) eth_random_addr(addr)
-
/**
* eth_broadcast_addr - Assign broadcast address
* @addr: Pointer to a six-byte array containing the Ethernet address
@@ -265,8 +273,22 @@ static inline void eth_zero_addr(u8 *addr)
*/
static inline void eth_hw_addr_random(struct net_device *dev)
{
+ u8 addr[ETH_ALEN];
+
+ eth_random_addr(addr);
+ __dev_addr_set(dev, addr, ETH_ALEN);
dev->addr_assign_type = NET_ADDR_RANDOM;
- eth_random_addr(dev->dev_addr);
+}
+
+/**
+ * eth_hw_addr_crc - Calculate CRC from netdev_hw_addr
+ * @ha: pointer to hardware address
+ *
+ * Calculate CRC from a hardware address as basis for filter hashes.
+ */
+static inline u32 eth_hw_addr_crc(struct netdev_hw_addr *ha)
+{
+ return ether_crc(ETH_ALEN, ha->addr);
}
/**
@@ -292,6 +314,18 @@ static inline void ether_addr_copy(u8 *dst, const u8 *src)
}
/**
+ * eth_hw_addr_set - Assign Ethernet address to a net_device
+ * @dev: pointer to net_device structure
+ * @addr: address to assign
+ *
+ * Assign given address to the net_device, addr_assign_type is not changed.
+ */
+static inline void eth_hw_addr_set(struct net_device *dev, const u8 *addr)
+{
+ __dev_addr_set(dev, addr, ETH_ALEN);
+}
+
+/**
* eth_hw_addr_inherit - Copy dev_addr from another net_device
* @dst: pointer to net_device to copy dev_addr to
* @src: pointer to net_device to copy dev_addr from
@@ -303,7 +337,7 @@ static inline void eth_hw_addr_inherit(struct net_device *dst,
struct net_device *src)
{
dst->addr_assign_type = src->addr_assign_type;
- ether_addr_copy(dst->dev_addr, src->dev_addr);
+ eth_hw_addr_set(dst, src->dev_addr);
}
/**
@@ -344,8 +378,7 @@ static inline bool ether_addr_equal(const u8 *addr1, const u8 *addr2)
* Please note that alignment of addr1 & addr2 are only guaranteed to be 16 bits.
*/
-static inline bool ether_addr_equal_64bits(const u8 addr1[6+2],
- const u8 addr2[6+2])
+static inline bool ether_addr_equal_64bits(const u8 *addr1, const u8 *addr2)
{
#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64
u64 fold = (*(const u64 *)addr1) ^ (*(const u64 *)addr2);
@@ -401,11 +434,31 @@ static inline bool ether_addr_equal_masked(const u8 *addr1, const u8 *addr2,
return true;
}
+static inline bool ether_addr_is_ipv4_mcast(const u8 *addr)
+{
+ u8 mask[ETH_ALEN] = { 0xff, 0xff, 0xff, 0x80, 0x00, 0x00 };
+
+ return ether_addr_equal_masked(addr, eth_ipv4_mcast_addr_base, mask);
+}
+
+static inline bool ether_addr_is_ipv6_mcast(const u8 *addr)
+{
+ u8 mask[ETH_ALEN] = { 0xff, 0xff, 0x00, 0x00, 0x00, 0x00 };
+
+ return ether_addr_equal_masked(addr, eth_ipv6_mcast_addr_base, mask);
+}
+
+static inline bool ether_addr_is_ip_mcast(const u8 *addr)
+{
+ return ether_addr_is_ipv4_mcast(addr) ||
+ ether_addr_is_ipv6_mcast(addr);
+}
+
/**
* ether_addr_to_u64 - Convert an Ethernet address into a u64 value.
* @addr: Pointer to a six-byte array containing the Ethernet address
*
- * Return a u64 value of the address
+ * Return: a u64 value of the address
*/
static inline u64 ether_addr_to_u64(const u8 *addr)
{
@@ -447,6 +500,32 @@ static inline void eth_addr_dec(u8 *addr)
}
/**
+ * eth_addr_inc() - Increment the given MAC address.
+ * @addr: Pointer to a six-byte array containing Ethernet address to increment.
+ */
+static inline void eth_addr_inc(u8 *addr)
+{
+ u64 u = ether_addr_to_u64(addr);
+
+ u++;
+ u64_to_ether_addr(u, addr);
+}
+
+/**
+ * eth_addr_add() - Add (or subtract) an offset to/from the given MAC address.
+ *
+ * @offset: Offset to add.
+ * @addr: Pointer to a six-byte array containing Ethernet address to increment.
+ */
+static inline void eth_addr_add(u8 *addr, long offset)
+{
+ u64 u = ether_addr_to_u64(addr);
+
+ u += offset;
+ u64_to_ether_addr(u, addr);
+}
+
+/**
* is_etherdev_addr - Tell if given Ethernet address belongs to the device.
* @dev: Pointer to a device structure
* @addr: Pointer to a six-byte array containing the Ethernet address
@@ -512,7 +591,61 @@ static inline unsigned long compare_ether_header(const void *a, const void *b)
}
/**
- * eth_skb_pad - Pad buffer to mininum number of octets for Ethernet frame
+ * eth_hw_addr_gen - Generate and assign Ethernet address to a port
+ * @dev: pointer to port's net_device structure
+ * @base_addr: base Ethernet address
+ * @id: offset to add to the base address
+ *
+ * Generate a MAC address using a base address and an offset and assign it
+ * to a net_device. Commonly used by switch drivers which need to compute
+ * addresses for all their ports. addr_assign_type is not changed.
+ */
+static inline void eth_hw_addr_gen(struct net_device *dev, const u8 *base_addr,
+ unsigned int id)
+{
+ u64 u = ether_addr_to_u64(base_addr);
+ u8 addr[ETH_ALEN];
+
+ u += id;
+ u64_to_ether_addr(u, addr);
+ eth_hw_addr_set(dev, addr);
+}
+
+/**
+ * eth_skb_pkt_type - Assign packet type if destination address does not match
+ * @skb: Assigned a packet type if address does not match @dev address
+ * @dev: Network device used to compare packet address against
+ *
+ * If the destination MAC address of the packet does not match the network
+ * device address, assign an appropriate packet type.
+ */
+static inline void eth_skb_pkt_type(struct sk_buff *skb,
+ const struct net_device *dev)
+{
+ const struct ethhdr *eth = eth_hdr(skb);
+
+ if (unlikely(!ether_addr_equal_64bits(eth->h_dest, dev->dev_addr))) {
+ if (unlikely(is_multicast_ether_addr_64bits(eth->h_dest))) {
+ if (ether_addr_equal_64bits(eth->h_dest, dev->broadcast))
+ skb->pkt_type = PACKET_BROADCAST;
+ else
+ skb->pkt_type = PACKET_MULTICAST;
+ } else {
+ skb->pkt_type = PACKET_OTHERHOST;
+ }
+ }
+}
+
+static inline struct ethhdr *eth_skb_pull_mac(struct sk_buff *skb)
+{
+ struct ethhdr *eth = (struct ethhdr *)skb->data;
+
+ skb_pull_inline(skb, ETH_HLEN);
+ return eth;
+}
+
+/**
+ * eth_skb_pad - Pad buffer to minimum number of octets for Ethernet frame
* @skb: Buffer to pad
*
* An Ethernet frame should have a minimum size of 60 bytes. This function
diff --git a/include/linux/ethtool.h b/include/linux/ethtool.h
index 83cc9863444b..5c9162193d26 100644
--- a/include/linux/ethtool.h
+++ b/include/linux/ethtool.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* ethtool.h: Defines for Linux ethtool.
*
@@ -14,9 +15,15 @@
#include <linux/bitmap.h>
#include <linux/compat.h>
+#include <linux/if_ether.h>
+#include <linux/netlink.h>
+#include <linux/timer_types.h>
#include <uapi/linux/ethtool.h>
+#include <uapi/linux/ethtool_netlink_generated.h>
+#include <uapi/linux/net_tstamp.h>
-#ifdef CONFIG_COMPAT
+#define ETHTOOL_MM_MAX_VERIFY_TIME_MS 128
+#define ETHTOOL_MM_MAX_VERIFY_RETRIES 3
struct compat_ethtool_rx_flow_spec {
u32 flow_type;
@@ -34,11 +41,9 @@ struct compat_ethtool_rxnfc {
compat_u64 data;
struct compat_ethtool_rx_flow_spec fs;
u32 rule_cnt;
- u32 rule_locs[0];
+ u32 rule_locs[];
};
-#endif /* CONFIG_COMPAT */
-
#include <linux/rculist.h>
/**
@@ -69,6 +74,51 @@ enum {
ETH_RSS_HASH_FUNCS_COUNT
};
+/**
+ * struct kernel_ethtool_ringparam - RX/TX ring configuration
+ * @rx_buf_len: Current length of buffers on the rx ring.
+ * @tcp_data_split: Scatter packet headers and data to separate buffers
+ * @tx_push: The flag of tx push mode
+ * @rx_push: The flag of rx push mode
+ * @cqe_size: Size of TX/RX completion queue event
+ * @tx_push_buf_len: Size of TX push buffer
+ * @tx_push_buf_max_len: Maximum allowed size of TX push buffer
+ * @hds_thresh: Packet size threshold for header data split (HDS)
+ * @hds_thresh_max: Maximum supported setting for @hds_threshold
+ *
+ */
+struct kernel_ethtool_ringparam {
+ u32 rx_buf_len;
+ u8 tcp_data_split;
+ u8 tx_push;
+ u8 rx_push;
+ u32 cqe_size;
+ u32 tx_push_buf_len;
+ u32 tx_push_buf_max_len;
+ u32 hds_thresh;
+ u32 hds_thresh_max;
+};
+
+/**
+ * enum ethtool_supported_ring_param - indicator caps for setting ring params
+ * @ETHTOOL_RING_USE_RX_BUF_LEN: capture for setting rx_buf_len
+ * @ETHTOOL_RING_USE_CQE_SIZE: capture for setting cqe_size
+ * @ETHTOOL_RING_USE_TX_PUSH: capture for setting tx_push
+ * @ETHTOOL_RING_USE_RX_PUSH: capture for setting rx_push
+ * @ETHTOOL_RING_USE_TX_PUSH_BUF_LEN: capture for setting tx_push_buf_len
+ * @ETHTOOL_RING_USE_TCP_DATA_SPLIT: capture for setting tcp_data_split
+ * @ETHTOOL_RING_USE_HDS_THRS: capture for setting header-data-split-thresh
+ */
+enum ethtool_supported_ring_param {
+ ETHTOOL_RING_USE_RX_BUF_LEN = BIT(0),
+ ETHTOOL_RING_USE_CQE_SIZE = BIT(1),
+ ETHTOOL_RING_USE_TX_PUSH = BIT(2),
+ ETHTOOL_RING_USE_RX_PUSH = BIT(3),
+ ETHTOOL_RING_USE_TX_PUSH_BUF_LEN = BIT(4),
+ ETHTOOL_RING_USE_TCP_DATA_SPLIT = BIT(5),
+ ETHTOOL_RING_USE_HDS_THRS = BIT(6),
+};
+
#define __ETH_RSS_HASH_BIT(bit) ((u32)1 << (bit))
#define __ETH_RSS_HASH(name) __ETH_RSS_HASH_BIT(ETH_RSS_HASH_##name##_BIT)
@@ -80,10 +130,35 @@ enum {
#define ETH_RSS_HASH_NO_CHANGE 0
struct net_device;
+struct netlink_ext_ack;
-/* Some generic methods drivers may use in their ethtool_ops */
-u32 ethtool_op_get_link(struct net_device *dev);
-int ethtool_op_get_ts_info(struct net_device *dev, struct ethtool_ts_info *eti);
+/* Link extended state and substate. */
+struct ethtool_link_ext_state_info {
+ enum ethtool_link_ext_state link_ext_state;
+ union {
+ enum ethtool_link_ext_substate_autoneg autoneg;
+ enum ethtool_link_ext_substate_link_training link_training;
+ enum ethtool_link_ext_substate_link_logical_mismatch link_logical_mismatch;
+ enum ethtool_link_ext_substate_bad_signal_integrity bad_signal_integrity;
+ enum ethtool_link_ext_substate_cable_issue cable_issue;
+ enum ethtool_link_ext_substate_module module;
+ u32 __link_ext_substate;
+ };
+};
+
+struct ethtool_link_ext_stats {
+ /* Custom Linux statistic for PHY level link down events.
+ * In a simpler world it should be equal to netdev->carrier_down_count
+ * unfortunately netdev also counts local reconfigurations which don't
+ * actually take the physical link down, not to mention NC-SI which,
+ * if present, keeps the link up regardless of host state.
+ * This statistic counts when PHY _actually_ went down, or lost link.
+ *
+ * Note that we need u64 for ethtool_stats_init() and comparisons
+ * to ETHTOOL_STAT_NOT_SET, but only u32 is exposed to the user.
+ */
+ u64 link_down_events;
+};
/**
* ethtool_rxfh_indir_default - get default value for RX flow hash indirection
@@ -97,9 +172,56 @@ static inline u32 ethtool_rxfh_indir_default(u32 index, u32 n_rx_rings)
return index % n_rx_rings;
}
-/* number of link mode bits/ulongs handled internally by kernel */
-#define __ETHTOOL_LINK_MODE_MASK_NBITS \
- (__ETHTOOL_LINK_MODE_LAST + 1)
+/**
+ * struct ethtool_rxfh_context - a custom RSS context configuration
+ * @indir_size: Number of u32 entries in indirection table
+ * @key_size: Size of hash key, in bytes
+ * @priv_size: Size of driver private data, in bytes
+ * @hfunc: RSS hash function identifier. One of the %ETH_RSS_HASH_*
+ * @input_xfrm: Defines how the input data is transformed. Valid values are one
+ * of %RXH_XFRM_*.
+ * @indir_configured: indir has been specified (at create time or subsequently)
+ * @key_configured: hkey has been specified (at create time or subsequently)
+ */
+struct ethtool_rxfh_context {
+ u32 indir_size;
+ u32 key_size;
+ u16 priv_size;
+ u8 hfunc;
+ u8 input_xfrm;
+ u8 indir_configured:1;
+ u8 key_configured:1;
+ /* private: driver private data, indirection table, and hash key are
+ * stored sequentially in @data area. Use below helpers to access.
+ */
+ u32 key_off;
+ u8 data[] __aligned(sizeof(void *));
+};
+
+static inline void *ethtool_rxfh_context_priv(struct ethtool_rxfh_context *ctx)
+{
+ return ctx->data;
+}
+
+static inline u32 *ethtool_rxfh_context_indir(struct ethtool_rxfh_context *ctx)
+{
+ return (u32 *)(ctx->data + ALIGN(ctx->priv_size, sizeof(u32)));
+}
+
+static inline u8 *ethtool_rxfh_context_key(struct ethtool_rxfh_context *ctx)
+{
+ return &ctx->data[ctx->key_off];
+}
+
+void ethtool_rxfh_context_lost(struct net_device *dev, u32 context_id);
+
+struct link_mode_info {
+ int speed;
+ u8 lanes;
+ u8 duplex;
+};
+
+extern const struct link_mode_info link_mode_params[];
/* declare a link mode bitmap */
#define __ETHTOOL_DECLARE_LINK_MODE_MASK(name) \
@@ -115,6 +237,7 @@ struct ethtool_link_ksettings {
__ETHTOOL_DECLARE_LINK_MODE_MASK(advertising);
__ETHTOOL_DECLARE_LINK_MODE_MASK(lp_advertising);
} link_modes;
+ u32 lanes;
};
/**
@@ -137,13 +260,24 @@ struct ethtool_link_ksettings {
__set_bit(ETHTOOL_LINK_MODE_ ## mode ## _BIT, (ptr)->link_modes.name)
/**
+ * ethtool_link_ksettings_del_link_mode - clear bit in link_ksettings
+ * link mode mask
+ * @ptr : pointer to struct ethtool_link_ksettings
+ * @name : one of supported/advertising/lp_advertising
+ * @mode : one of the ETHTOOL_LINK_MODE_*_BIT
+ * (not atomic, no bound checking)
+ */
+#define ethtool_link_ksettings_del_link_mode(ptr, name, mode) \
+ __clear_bit(ETHTOOL_LINK_MODE_ ## mode ## _BIT, (ptr)->link_modes.name)
+
+/**
* ethtool_link_ksettings_test_link_mode - test bit in ksettings link mode mask
* @ptr : pointer to struct ethtool_link_ksettings
* @name : one of supported/advertising/lp_advertising
* @mode : one of the ETHTOOL_LINK_MODE_*_BIT
* (not atomic, no bound checking)
*
- * Returns true/false.
+ * Returns: true/false.
*/
#define ethtool_link_ksettings_test_link_mode(ptr, name, mode) \
test_bit(ETHTOOL_LINK_MODE_ ## mode ## _BIT, (ptr)->link_modes.name)
@@ -152,6 +286,34 @@ extern int
__ethtool_get_link_ksettings(struct net_device *dev,
struct ethtool_link_ksettings *link_ksettings);
+struct ethtool_keee {
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(supported);
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(advertised);
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(lp_advertised);
+ u32 tx_lpi_timer;
+ bool tx_lpi_enabled;
+ bool eee_active;
+ bool eee_enabled;
+};
+
+struct kernel_ethtool_coalesce {
+ u8 use_cqe_mode_tx;
+ u8 use_cqe_mode_rx;
+ u32 tx_aggr_max_bytes;
+ u32 tx_aggr_max_frames;
+ u32 tx_aggr_time_usecs;
+};
+
+/**
+ * ethtool_intersect_link_masks - Given two link masks, AND them together
+ * @dst: first mask and where result is stored
+ * @src: second mask to intersect with
+ *
+ * Given two link mode masks, AND them together and save the result in dst.
+ */
+void ethtool_intersect_link_masks(struct ethtool_link_ksettings *dst,
+ struct ethtool_link_ksettings *src);
+
void ethtool_convert_legacy_u32_to_link_mode(unsigned long *dst,
u32 legacy_u32);
@@ -159,20 +321,597 @@ void ethtool_convert_legacy_u32_to_link_mode(unsigned long *dst,
bool ethtool_convert_link_mode_to_legacy_u32(u32 *legacy_u32,
const unsigned long *src);
+#define ETHTOOL_COALESCE_RX_USECS BIT(0)
+#define ETHTOOL_COALESCE_RX_MAX_FRAMES BIT(1)
+#define ETHTOOL_COALESCE_RX_USECS_IRQ BIT(2)
+#define ETHTOOL_COALESCE_RX_MAX_FRAMES_IRQ BIT(3)
+#define ETHTOOL_COALESCE_TX_USECS BIT(4)
+#define ETHTOOL_COALESCE_TX_MAX_FRAMES BIT(5)
+#define ETHTOOL_COALESCE_TX_USECS_IRQ BIT(6)
+#define ETHTOOL_COALESCE_TX_MAX_FRAMES_IRQ BIT(7)
+#define ETHTOOL_COALESCE_STATS_BLOCK_USECS BIT(8)
+#define ETHTOOL_COALESCE_USE_ADAPTIVE_RX BIT(9)
+#define ETHTOOL_COALESCE_USE_ADAPTIVE_TX BIT(10)
+#define ETHTOOL_COALESCE_PKT_RATE_LOW BIT(11)
+#define ETHTOOL_COALESCE_RX_USECS_LOW BIT(12)
+#define ETHTOOL_COALESCE_RX_MAX_FRAMES_LOW BIT(13)
+#define ETHTOOL_COALESCE_TX_USECS_LOW BIT(14)
+#define ETHTOOL_COALESCE_TX_MAX_FRAMES_LOW BIT(15)
+#define ETHTOOL_COALESCE_PKT_RATE_HIGH BIT(16)
+#define ETHTOOL_COALESCE_RX_USECS_HIGH BIT(17)
+#define ETHTOOL_COALESCE_RX_MAX_FRAMES_HIGH BIT(18)
+#define ETHTOOL_COALESCE_TX_USECS_HIGH BIT(19)
+#define ETHTOOL_COALESCE_TX_MAX_FRAMES_HIGH BIT(20)
+#define ETHTOOL_COALESCE_RATE_SAMPLE_INTERVAL BIT(21)
+#define ETHTOOL_COALESCE_USE_CQE_RX BIT(22)
+#define ETHTOOL_COALESCE_USE_CQE_TX BIT(23)
+#define ETHTOOL_COALESCE_TX_AGGR_MAX_BYTES BIT(24)
+#define ETHTOOL_COALESCE_TX_AGGR_MAX_FRAMES BIT(25)
+#define ETHTOOL_COALESCE_TX_AGGR_TIME_USECS BIT(26)
+#define ETHTOOL_COALESCE_RX_PROFILE BIT(27)
+#define ETHTOOL_COALESCE_TX_PROFILE BIT(28)
+#define ETHTOOL_COALESCE_ALL_PARAMS GENMASK(28, 0)
+
+#define ETHTOOL_COALESCE_USECS \
+ (ETHTOOL_COALESCE_RX_USECS | ETHTOOL_COALESCE_TX_USECS)
+#define ETHTOOL_COALESCE_MAX_FRAMES \
+ (ETHTOOL_COALESCE_RX_MAX_FRAMES | ETHTOOL_COALESCE_TX_MAX_FRAMES)
+#define ETHTOOL_COALESCE_USECS_IRQ \
+ (ETHTOOL_COALESCE_RX_USECS_IRQ | ETHTOOL_COALESCE_TX_USECS_IRQ)
+#define ETHTOOL_COALESCE_MAX_FRAMES_IRQ \
+ (ETHTOOL_COALESCE_RX_MAX_FRAMES_IRQ | \
+ ETHTOOL_COALESCE_TX_MAX_FRAMES_IRQ)
+#define ETHTOOL_COALESCE_USE_ADAPTIVE \
+ (ETHTOOL_COALESCE_USE_ADAPTIVE_RX | ETHTOOL_COALESCE_USE_ADAPTIVE_TX)
+#define ETHTOOL_COALESCE_USECS_LOW_HIGH \
+ (ETHTOOL_COALESCE_RX_USECS_LOW | ETHTOOL_COALESCE_TX_USECS_LOW | \
+ ETHTOOL_COALESCE_RX_USECS_HIGH | ETHTOOL_COALESCE_TX_USECS_HIGH)
+#define ETHTOOL_COALESCE_MAX_FRAMES_LOW_HIGH \
+ (ETHTOOL_COALESCE_RX_MAX_FRAMES_LOW | \
+ ETHTOOL_COALESCE_TX_MAX_FRAMES_LOW | \
+ ETHTOOL_COALESCE_RX_MAX_FRAMES_HIGH | \
+ ETHTOOL_COALESCE_TX_MAX_FRAMES_HIGH)
+#define ETHTOOL_COALESCE_PKT_RATE_RX_USECS \
+ (ETHTOOL_COALESCE_USE_ADAPTIVE_RX | \
+ ETHTOOL_COALESCE_RX_USECS_LOW | ETHTOOL_COALESCE_RX_USECS_HIGH | \
+ ETHTOOL_COALESCE_PKT_RATE_LOW | ETHTOOL_COALESCE_PKT_RATE_HIGH | \
+ ETHTOOL_COALESCE_RATE_SAMPLE_INTERVAL)
+#define ETHTOOL_COALESCE_USE_CQE \
+ (ETHTOOL_COALESCE_USE_CQE_RX | ETHTOOL_COALESCE_USE_CQE_TX)
+#define ETHTOOL_COALESCE_TX_AGGR \
+ (ETHTOOL_COALESCE_TX_AGGR_MAX_BYTES | \
+ ETHTOOL_COALESCE_TX_AGGR_MAX_FRAMES | \
+ ETHTOOL_COALESCE_TX_AGGR_TIME_USECS)
+
+#define ETHTOOL_STAT_NOT_SET (~0ULL)
+
+static inline void ethtool_stats_init(u64 *stats, unsigned int n)
+{
+ while (n--)
+ stats[n] = ETHTOOL_STAT_NOT_SET;
+}
+
+/* Basic IEEE 802.3 MAC statistics (30.3.1.1.*), not otherwise exposed
+ * via a more targeted API.
+ */
+struct ethtool_eth_mac_stats {
+ enum ethtool_mac_stats_src src;
+ struct_group(stats,
+ u64 FramesTransmittedOK;
+ u64 SingleCollisionFrames;
+ u64 MultipleCollisionFrames;
+ u64 FramesReceivedOK;
+ u64 FrameCheckSequenceErrors;
+ u64 AlignmentErrors;
+ u64 OctetsTransmittedOK;
+ u64 FramesWithDeferredXmissions;
+ u64 LateCollisions;
+ u64 FramesAbortedDueToXSColls;
+ u64 FramesLostDueToIntMACXmitError;
+ u64 CarrierSenseErrors;
+ u64 OctetsReceivedOK;
+ u64 FramesLostDueToIntMACRcvError;
+ u64 MulticastFramesXmittedOK;
+ u64 BroadcastFramesXmittedOK;
+ u64 FramesWithExcessiveDeferral;
+ u64 MulticastFramesReceivedOK;
+ u64 BroadcastFramesReceivedOK;
+ u64 InRangeLengthErrors;
+ u64 OutOfRangeLengthField;
+ u64 FrameTooLongErrors;
+ );
+};
+
+/* Basic IEEE 802.3 PHY statistics (30.3.2.1.*), not otherwise exposed
+ * via a more targeted API.
+ */
+struct ethtool_eth_phy_stats {
+ enum ethtool_mac_stats_src src;
+ struct_group(stats,
+ u64 SymbolErrorDuringCarrier;
+ );
+};
+
+/**
+ * struct ethtool_phy_stats - PHY-level statistics counters
+ * @rx_packets: Total successfully received frames
+ * @rx_bytes: Total successfully received bytes
+ * @rx_errors: Total received frames with errors (e.g., CRC errors)
+ * @tx_packets: Total successfully transmitted frames
+ * @tx_bytes: Total successfully transmitted bytes
+ * @tx_errors: Total transmitted frames with errors
+ *
+ * This structure provides a standardized interface for reporting
+ * PHY-level statistics counters. It is designed to expose statistics
+ * commonly provided by PHYs but not explicitly defined in the IEEE
+ * 802.3 standard.
+ */
+struct ethtool_phy_stats {
+ u64 rx_packets;
+ u64 rx_bytes;
+ u64 rx_errors;
+ u64 tx_packets;
+ u64 tx_bytes;
+ u64 tx_errors;
+};
+
+/* Basic IEEE 802.3 MAC Ctrl statistics (30.3.3.*), not otherwise exposed
+ * via a more targeted API.
+ */
+struct ethtool_eth_ctrl_stats {
+ enum ethtool_mac_stats_src src;
+ struct_group(stats,
+ u64 MACControlFramesTransmitted;
+ u64 MACControlFramesReceived;
+ u64 UnsupportedOpcodesReceived;
+ );
+};
+
+/**
+ * struct ethtool_pause_stats - statistics for IEEE 802.3x pause frames
+ * @src: input field denoting whether stats should be queried from the eMAC or
+ * pMAC (if the MM layer is supported). To be ignored otherwise.
+ * @tx_pause_frames: transmitted pause frame count. Reported to user space
+ * as %ETHTOOL_A_PAUSE_STAT_TX_FRAMES.
+ *
+ * Equivalent to `30.3.4.2 aPAUSEMACCtrlFramesTransmitted`
+ * from the standard.
+ *
+ * @rx_pause_frames: received pause frame count. Reported to user space
+ * as %ETHTOOL_A_PAUSE_STAT_RX_FRAMES. Equivalent to:
+ *
+ * Equivalent to `30.3.4.3 aPAUSEMACCtrlFramesReceived`
+ * from the standard.
+ */
+struct ethtool_pause_stats {
+ enum ethtool_mac_stats_src src;
+ struct_group(stats,
+ u64 tx_pause_frames;
+ u64 rx_pause_frames;
+ );
+};
+
+#define ETHTOOL_MAX_LANES 8
+/*
+ * IEEE 802.3ck/df defines 16 bins for FEC histogram plus one more for
+ * the end-of-list marker, total 17 items
+ */
+#define ETHTOOL_FEC_HIST_MAX 17
+/**
+ * struct ethtool_fec_hist_range - error bits range for FEC histogram
+ * statistics
+ * @low: low bound of the bin (inclusive)
+ * @high: high bound of the bin (inclusive)
+ */
+struct ethtool_fec_hist_range {
+ u16 low;
+ u16 high;
+};
+
+struct ethtool_fec_hist {
+ struct ethtool_fec_hist_value {
+ u64 sum;
+ u64 per_lane[ETHTOOL_MAX_LANES];
+ } values[ETHTOOL_FEC_HIST_MAX];
+ const struct ethtool_fec_hist_range *ranges;
+};
+/**
+ * struct ethtool_fec_stats - statistics for IEEE 802.3 FEC
+ * @corrected_blocks: number of received blocks corrected by FEC
+ * Reported to user space as %ETHTOOL_A_FEC_STAT_CORRECTED.
+ *
+ * Equivalent to `30.5.1.1.17 aFECCorrectedBlocks` from the standard.
+ *
+ * @uncorrectable_blocks: number of received blocks FEC was not able to correct
+ * Reported to user space as %ETHTOOL_A_FEC_STAT_UNCORR.
+ *
+ * Equivalent to `30.5.1.1.18 aFECUncorrectableBlocks` from the standard.
+ *
+ * @corrected_bits: number of bits corrected by FEC
+ * Similar to @corrected_blocks but counts individual bit changes,
+ * not entire FEC data blocks. This is a non-standard statistic.
+ * Reported to user space as %ETHTOOL_A_FEC_STAT_CORR_BITS.
+ *
+ * For each of the above fields, the two substructure members are:
+ *
+ * - @lanes: per-lane/PCS-instance counts as defined by the standard
+ * - @total: error counts for the entire port, for drivers incapable of reporting
+ * per-lane stats
+ *
+ * Drivers should fill in either only total or per-lane statistics, core
+ * will take care of adding lane values up to produce the total.
+ */
+struct ethtool_fec_stats {
+ struct ethtool_fec_stat {
+ u64 total;
+ u64 lanes[ETHTOOL_MAX_LANES];
+ } corrected_blocks, uncorrectable_blocks, corrected_bits;
+};
+
+/**
+ * struct ethtool_rmon_hist_range - byte range for histogram statistics
+ * @low: low bound of the bucket (inclusive)
+ * @high: high bound of the bucket (inclusive)
+ */
+struct ethtool_rmon_hist_range {
+ u16 low;
+ u16 high;
+};
+
+#define ETHTOOL_RMON_HIST_MAX 11
+
+/**
+ * struct ethtool_rmon_stats - selected RMON (RFC 2819) statistics
+ * @src: input field denoting whether stats should be queried from the eMAC or
+ * pMAC (if the MM layer is supported). To be ignored otherwise.
+ * @undersize_pkts: Equivalent to `etherStatsUndersizePkts` from the RFC.
+ * @oversize_pkts: Equivalent to `etherStatsOversizePkts` from the RFC.
+ * @fragments: Equivalent to `etherStatsFragments` from the RFC.
+ * @jabbers: Equivalent to `etherStatsJabbers` from the RFC.
+ * @hist: Packet counter for packet length buckets (e.g.
+ * `etherStatsPkts128to255Octets` from the RFC).
+ * @hist_tx: Tx counters in similar form to @hist, not defined in the RFC.
+ *
+ * Selection of RMON (RFC 2819) statistics which are not exposed via different
+ * APIs, primarily the packet-length-based counters.
+ * Unfortunately different designs choose different buckets beyond
+ * the 1024B mark (jumbo frame teritory), so the definition of the bucket
+ * ranges is left to the driver.
+ */
+struct ethtool_rmon_stats {
+ enum ethtool_mac_stats_src src;
+ struct_group(stats,
+ u64 undersize_pkts;
+ u64 oversize_pkts;
+ u64 fragments;
+ u64 jabbers;
+
+ u64 hist[ETHTOOL_RMON_HIST_MAX];
+ u64 hist_tx[ETHTOOL_RMON_HIST_MAX];
+ );
+};
+
+/**
+ * struct ethtool_ts_stats - HW timestamping statistics
+ * @pkts: Number of packets successfully timestamped by the hardware.
+ * @onestep_pkts_unconfirmed: Number of PTP packets with one-step TX
+ * timestamping that were sent, but for which the
+ * device offers no confirmation whether they made
+ * it onto the wire and the timestamp was inserted
+ * in the originTimestamp or correctionField, or
+ * not.
+ * @lost: Number of hardware timestamping requests where the timestamping
+ * information from the hardware never arrived for submission with
+ * the skb.
+ * @err: Number of arbitrary timestamp generation error events that the
+ * hardware encountered, exclusive of @lost statistics. Cases such
+ * as resource exhaustion, unavailability, firmware errors, and
+ * detected illogical timestamp values not submitted with the skb
+ * are inclusive to this counter.
+ */
+struct ethtool_ts_stats {
+ struct_group(tx_stats,
+ u64 pkts;
+ u64 onestep_pkts_unconfirmed;
+ u64 lost;
+ u64 err;
+ );
+};
+
+#define ETH_MODULE_EEPROM_PAGE_LEN 128
+#define ETH_MODULE_MAX_I2C_ADDRESS 0x7f
+
+/**
+ * struct ethtool_module_eeprom - plug-in module EEPROM read / write parameters
+ * @offset: When @offset is 0-127, it is used as an address to the Lower Memory
+ * (@page must be 0). Otherwise, it is used as an address to the
+ * Upper Memory.
+ * @length: Number of bytes to read / write.
+ * @page: Page number.
+ * @bank: Bank number, if supported by EEPROM spec.
+ * @i2c_address: I2C address of a page. Value less than 0x7f expected. Most
+ * EEPROMs use 0x50 or 0x51.
+ * @data: Pointer to buffer with EEPROM data of @length size.
+ */
+struct ethtool_module_eeprom {
+ u32 offset;
+ u32 length;
+ u8 page;
+ u8 bank;
+ u8 i2c_address;
+ u8 *data;
+};
+
+/**
+ * struct ethtool_module_power_mode_params - module power mode parameters
+ * @policy: The power mode policy enforced by the host for the plug-in module.
+ * @mode: The operational power mode of the plug-in module. Should be filled by
+ * device drivers on get operations.
+ */
+struct ethtool_module_power_mode_params {
+ enum ethtool_module_power_mode_policy policy;
+ enum ethtool_module_power_mode mode;
+};
+
+/**
+ * struct ethtool_mm_state - 802.3 MAC merge layer state
+ * @verify_time:
+ * wait time between verification attempts in ms (according to clause
+ * 30.14.1.6 aMACMergeVerifyTime)
+ * @max_verify_time:
+ * maximum accepted value for the @verify_time variable in set requests
+ * @verify_status:
+ * state of the verification state machine of the MM layer (according to
+ * clause 30.14.1.2 aMACMergeStatusVerify)
+ * @tx_enabled:
+ * set if the MM layer is administratively enabled in the TX direction
+ * (according to clause 30.14.1.3 aMACMergeEnableTx)
+ * @tx_active:
+ * set if the MM layer is enabled in the TX direction, which makes FP
+ * possible (according to 30.14.1.5 aMACMergeStatusTx). This should be
+ * true if MM is enabled, and the verification status is either verified,
+ * or disabled.
+ * @pmac_enabled:
+ * set if the preemptible MAC is powered on and is able to receive
+ * preemptible packets and respond to verification frames.
+ * @verify_enabled:
+ * set if the Verify function of the MM layer (which sends SMD-V
+ * verification requests) is administratively enabled (regardless of
+ * whether it is currently in the ETHTOOL_MM_VERIFY_STATUS_DISABLED state
+ * or not), according to clause 30.14.1.4 aMACMergeVerifyDisableTx (but
+ * using positive rather than negative logic). The device should always
+ * respond to received SMD-V requests as long as @pmac_enabled is set.
+ * @tx_min_frag_size:
+ * the minimum size of non-final mPacket fragments that the link partner
+ * supports receiving, expressed in octets. Compared to the definition
+ * from clause 30.14.1.7 aMACMergeAddFragSize which is expressed in the
+ * range 0 to 3 (requiring a translation to the size in octets according
+ * to the formula 64 * (1 + addFragSize) - 4), a value in a continuous and
+ * unbounded range can be specified here.
+ * @rx_min_frag_size:
+ * the minimum size of non-final mPacket fragments that this device
+ * supports receiving, expressed in octets.
+ */
+struct ethtool_mm_state {
+ u32 verify_time;
+ u32 max_verify_time;
+ enum ethtool_mm_verify_status verify_status;
+ bool tx_enabled;
+ bool tx_active;
+ bool pmac_enabled;
+ bool verify_enabled;
+ u32 tx_min_frag_size;
+ u32 rx_min_frag_size;
+};
+
+/**
+ * struct ethtool_mm_cfg - 802.3 MAC merge layer configuration
+ * @verify_time: see struct ethtool_mm_state
+ * @verify_enabled: see struct ethtool_mm_state
+ * @tx_enabled: see struct ethtool_mm_state
+ * @pmac_enabled: see struct ethtool_mm_state
+ * @tx_min_frag_size: see struct ethtool_mm_state
+ */
+struct ethtool_mm_cfg {
+ u32 verify_time;
+ bool verify_enabled;
+ bool tx_enabled;
+ bool pmac_enabled;
+ u32 tx_min_frag_size;
+};
+
+/**
+ * struct ethtool_mm_stats - 802.3 MAC merge layer statistics
+ * @MACMergeFrameAssErrorCount:
+ * received MAC frames with reassembly errors
+ * @MACMergeFrameSmdErrorCount:
+ * received MAC frames/fragments rejected due to unknown or incorrect SMD
+ * @MACMergeFrameAssOkCount:
+ * received MAC frames that were successfully reassembled and passed up
+ * @MACMergeFragCountRx:
+ * number of additional correct SMD-C mPackets received due to preemption
+ * @MACMergeFragCountTx:
+ * number of additional mPackets sent due to preemption
+ * @MACMergeHoldCount:
+ * number of times the MM layer entered the HOLD state, which blocks
+ * transmission of preemptible traffic
+ */
+struct ethtool_mm_stats {
+ u64 MACMergeFrameAssErrorCount;
+ u64 MACMergeFrameSmdErrorCount;
+ u64 MACMergeFrameAssOkCount;
+ u64 MACMergeFragCountRx;
+ u64 MACMergeFragCountTx;
+ u64 MACMergeHoldCount;
+};
+
+enum ethtool_mmsv_event {
+ ETHTOOL_MMSV_LP_SENT_VERIFY_MPACKET,
+ ETHTOOL_MMSV_LD_SENT_VERIFY_MPACKET,
+ ETHTOOL_MMSV_LP_SENT_RESPONSE_MPACKET,
+};
+
+/* MAC Merge verification mPacket type */
+enum ethtool_mpacket {
+ ETHTOOL_MPACKET_VERIFY,
+ ETHTOOL_MPACKET_RESPONSE,
+};
+
+struct ethtool_mmsv;
+
+/**
+ * struct ethtool_mmsv_ops - Operations for MAC Merge Software Verification
+ * @configure_tx: Driver callback for the event where the preemptible TX
+ * becomes active or inactive. Preemptible traffic
+ * classes must be committed to hardware only while
+ * preemptible TX is active.
+ * @configure_pmac: Driver callback for the event where the pMAC state
+ * changes as result of an administrative setting
+ * (ethtool) or a call to ethtool_mmsv_link_state_handle().
+ * @send_mpacket: Driver-provided method for sending a Verify or a Response
+ * mPacket.
+ */
+struct ethtool_mmsv_ops {
+ void (*configure_tx)(struct ethtool_mmsv *mmsv, bool tx_active);
+ void (*configure_pmac)(struct ethtool_mmsv *mmsv, bool pmac_enabled);
+ void (*send_mpacket)(struct ethtool_mmsv *mmsv, enum ethtool_mpacket mpacket);
+};
+
+/**
+ * struct ethtool_mmsv - MAC Merge Software Verification
+ * @ops: operations for MAC Merge Software Verification
+ * @dev: pointer to net_device structure
+ * @lock: serialize access to MAC Merge state between
+ * ethtool requests and link state updates.
+ * @status: current verification FSM state
+ * @verify_timer: timer for verification in local TX direction
+ * @verify_enabled: indicates if verification is enabled
+ * @verify_retries: number of retries for verification
+ * @pmac_enabled: indicates if the preemptible MAC is enabled
+ * @verify_time: time for verification in milliseconds
+ * @tx_enabled: indicates if transmission is enabled
+ */
+struct ethtool_mmsv {
+ const struct ethtool_mmsv_ops *ops;
+ struct net_device *dev;
+ spinlock_t lock;
+ enum ethtool_mm_verify_status status;
+ struct timer_list verify_timer;
+ bool verify_enabled;
+ int verify_retries;
+ bool pmac_enabled;
+ u32 verify_time;
+ bool tx_enabled;
+};
+
+void ethtool_mmsv_stop(struct ethtool_mmsv *mmsv);
+void ethtool_mmsv_link_state_handle(struct ethtool_mmsv *mmsv, bool up);
+void ethtool_mmsv_event_handle(struct ethtool_mmsv *mmsv,
+ enum ethtool_mmsv_event event);
+void ethtool_mmsv_get_mm(struct ethtool_mmsv *mmsv,
+ struct ethtool_mm_state *state);
+void ethtool_mmsv_set_mm(struct ethtool_mmsv *mmsv, struct ethtool_mm_cfg *cfg);
+void ethtool_mmsv_init(struct ethtool_mmsv *mmsv, struct net_device *dev,
+ const struct ethtool_mmsv_ops *ops);
+
+/**
+ * struct ethtool_rxfh_param - RXFH (RSS) parameters
+ * @hfunc: Defines the current RSS hash function used by HW (or to be set to).
+ * Valid values are one of the %ETH_RSS_HASH_*.
+ * @indir_size: On SET, the array size of the user buffer for the
+ * indirection table, which may be zero, or
+ * %ETH_RXFH_INDIR_NO_CHANGE. On GET (read from the driver),
+ * the array size of the hardware indirection table.
+ * @indir: The indirection table of size @indir_size entries.
+ * @key_size: On SET, the array size of the user buffer for the hash key,
+ * which may be zero. On GET (read from the driver), the size of the
+ * hardware hash key.
+ * @key: The hash key of size @key_size bytes.
+ * @rss_context: RSS context identifier. Context 0 is the default for normal
+ * traffic; other contexts can be referenced as the destination for RX flow
+ * classification rules. On SET, %ETH_RXFH_CONTEXT_ALLOC is used
+ * to allocate a new RSS context; on return this field will
+ * contain the ID of the newly allocated context.
+ * @rss_delete: Set to non-ZERO to remove the @rss_context context.
+ * @input_xfrm: Defines how the input data is transformed. Valid values are one
+ * of %RXH_XFRM_*.
+ */
+struct ethtool_rxfh_param {
+ u8 hfunc;
+ u32 indir_size;
+ u32 *indir;
+ u32 key_size;
+ u8 *key;
+ u32 rss_context;
+ u8 rss_delete;
+ u8 input_xfrm;
+};
+
+/**
+ * struct ethtool_rxfh_fields - Rx Flow Hashing (RXFH) header field config
+ * @data: which header fields are used for hashing, bitmask of RXH_* defines
+ * @flow_type: L2-L4 network traffic flow type
+ * @rss_context: RSS context, will only be used if rxfh_per_ctx_fields is
+ * set in struct ethtool_ops
+ */
+struct ethtool_rxfh_fields {
+ u32 data;
+ u32 flow_type;
+ u32 rss_context;
+};
+
+/**
+ * struct kernel_ethtool_ts_info - kernel copy of struct ethtool_ts_info
+ * @cmd: command number = %ETHTOOL_GET_TS_INFO
+ * @so_timestamping: bit mask of the sum of the supported SO_TIMESTAMPING flags
+ * @phc_index: device index of the associated PHC, or -1 if there is none
+ * @phc_qualifier: qualifier of the associated PHC
+ * @phc_source: source device of the associated PHC
+ * @phc_phyindex: index of PHY device source of the associated PHC
+ * @tx_types: bit mask of the supported hwtstamp_tx_types enumeration values
+ * @rx_filters: bit mask of the supported hwtstamp_rx_filters enumeration values
+ */
+struct kernel_ethtool_ts_info {
+ u32 cmd;
+ u32 so_timestamping;
+ int phc_index;
+ enum hwtstamp_provider_qualifier phc_qualifier;
+ enum hwtstamp_source phc_source;
+ int phc_phyindex;
+ u32 tx_types;
+ u32 rx_filters;
+};
+
/**
* struct ethtool_ops - optional netdev operations
- * @get_settings: DEPRECATED, use %get_link_ksettings/%set_link_ksettings
- * API. Get various device settings including Ethernet link
- * settings. The @cmd parameter is expected to have been cleared
- * before get_settings is called. Returns a negative error code
- * or zero.
- * @set_settings: DEPRECATED, use %get_link_ksettings/%set_link_ksettings
- * API. Set various device settings including Ethernet link
- * settings. Returns a negative error code or zero.
- * @get_drvinfo: Report driver/device information. Should only set the
- * @driver, @version, @fw_version and @bus_info fields. If not
- * implemented, the @driver and @bus_info fields will be filled in
- * according to the netdev's parent device.
+ * @supported_input_xfrm: supported types of input xfrm from %RXH_XFRM_*.
+ * @cap_link_lanes_supported: indicates if the driver supports lanes
+ * parameter.
+ * @rxfh_per_ctx_fields: device supports selecting different header fields
+ * for Rx hash calculation and RSS for each additional context.
+ * @rxfh_per_ctx_key: device supports setting different RSS key for each
+ * additional context. Netlink API should report hfunc, key, and input_xfrm
+ * for every context, not just context 0.
+ * @cap_rss_rxnfc_adds: device supports nonzero ring_cookie in filters with
+ * %FLOW_RSS flag; the queue ID from the filter is added to the value from
+ * the indirection table to determine the delivery queue.
+ * @rxfh_indir_space: max size of RSS indirection tables, if indirection table
+ * size as returned by @get_rxfh_indir_size may change during lifetime
+ * of the device. Leave as 0 if the table size is constant.
+ * @rxfh_key_space: same as @rxfh_indir_space, but for the key.
+ * @rxfh_priv_size: size of the driver private data area the core should
+ * allocate for an RSS context (in &struct ethtool_rxfh_context).
+ * @rxfh_max_num_contexts: maximum (exclusive) supported RSS context ID.
+ * If this is zero then the core may choose any (nonzero) ID, otherwise
+ * the core will only use IDs strictly less than this value, as the
+ * @rss_context argument to @create_rxfh_context and friends.
+ * @supported_coalesce_params: supported types of interrupt coalescing.
+ * @supported_ring_params: supported ring params.
+ * @supported_hwtstamp_qualifiers: bitfield of supported hwtstamp qualifier.
+ * @get_drvinfo: Report driver/device information. Modern drivers no
+ * longer have to implement this callback. Most fields are
+ * correctly filled in by the core using system information, or
+ * populated using other driver operations.
* @get_regs_len: Get buffer length required for @get_regs
* @get_regs: Get device registers
* @get_wol: Report whether Wake-on-Lan is enabled
@@ -186,6 +925,15 @@ bool ethtool_convert_link_mode_to_legacy_u32(u32 *legacy_u32,
* @get_link: Report whether physical link is up. Will only be called if
* the netdev is up. Should usually be set to ethtool_op_get_link(),
* which uses netif_carrier_ok().
+ * @get_link_ext_state: Report link extended state. Should set link_ext_state and
+ * link_ext_substate (link_ext_substate of 0 means link_ext_substate is unknown,
+ * do not attach ext_substate attribute to netlink message). If link_ext_state
+ * and link_ext_substate are unknown, return -ENODATA. If not implemented,
+ * link_ext_state and link_ext_substate will not be sent to userspace.
+ * @get_link_ext_stats: Read extra link-related counters.
+ * @get_eeprom_len: Read range of EEPROM addresses for validation of
+ * @get_eeprom and @set_eeprom requests.
+ * Returns 0 if device does not support EEPROM access.
* @get_eeprom: Read data from the device EEPROM.
* Should fill in the magic field. Don't need to check len for zero
* or wraparound. Fill in the data argument with the eeprom values
@@ -197,10 +945,14 @@ bool ethtool_convert_link_mode_to_legacy_u32(u32 *legacy_u32,
* or zero.
* @get_coalesce: Get interrupt coalescing parameters. Returns a negative
* error code or zero.
- * @set_coalesce: Set interrupt coalescing parameters. Returns a negative
- * error code or zero.
+ * @set_coalesce: Set interrupt coalescing parameters. Supported coalescing
+ * types should be set in @supported_coalesce_params.
+ * Returns a negative error code or zero.
* @get_ringparam: Report ring sizes
* @set_ringparam: Set ring sizes. Returns a negative error code or zero.
+ * @get_pause_stats: Report pause frame statistics. Drivers must not zero
+ * statistics which they don't report. The stats structure is initialized
+ * to ETHTOOL_STAT_NOT_SET indicating driver does not report statistics.
* @get_pauseparam: Report pause parameters
* @set_pauseparam: Set pause parameters. Returns a negative error code
* or zero.
@@ -238,6 +990,7 @@ bool ethtool_convert_link_mode_to_legacy_u32(u32 *legacy_u32,
* @reset: Reset (part of) the device, as specified by a bitmask of
* flags from &enum ethtool_reset_flags. Returns a negative
* error code or zero.
+ * @get_rx_ring_count: Return the number of RX rings
* @get_rxfh_key_size: Get the size of the RX flow hash key.
* Returns zero if not supported for this specific device.
* @get_rxfh_indir_size: Get the size of the RX flow hash indirection table.
@@ -250,6 +1003,34 @@ bool ethtool_convert_link_mode_to_legacy_u32(u32 *legacy_u32,
* will remain unchanged.
* Returns a negative error code or zero. An error code must be returned
* if at least one unsupported change was requested.
+ * @get_rxfh_fields: Get header fields used for flow hashing.
+ * @set_rxfh_fields: Set header fields used for flow hashing.
+ * @create_rxfh_context: Create a new RSS context with the specified RX flow
+ * hash indirection table, hash key, and hash function.
+ * The &struct ethtool_rxfh_context for this context is passed in @ctx;
+ * note that the indir table, hkey and hfunc are not yet populated as
+ * of this call. The driver does not need to update these; the core
+ * will do so if this op succeeds.
+ * However, if @rxfh.indir is set to %NULL, the driver must update the
+ * indir table in @ctx with the (default or inherited) table actually in
+ * use; similarly, if @rxfh.key is %NULL, @rxfh.hfunc is
+ * %ETH_RSS_HASH_NO_CHANGE, or @rxfh.input_xfrm is %RXH_XFRM_NO_CHANGE,
+ * the driver should update the corresponding information in @ctx.
+ * If the driver provides this method, it must also provide
+ * @modify_rxfh_context and @remove_rxfh_context.
+ * Returns a negative error code or zero.
+ * @modify_rxfh_context: Reconfigure the specified RSS context. Allows setting
+ * the contents of the RX flow hash indirection table, hash key, and/or
+ * hash function associated with the given context.
+ * Parameters which are set to %NULL or zero will remain unchanged.
+ * The &struct ethtool_rxfh_context for this context is passed in @ctx;
+ * note that it will still contain the *old* settings. The driver does
+ * not need to update these; the core will do so if this op succeeds.
+ * Returns a negative error code or zero. An error code must be returned
+ * if at least one unsupported change was requested.
+ * @remove_rxfh_context: Remove the specified RSS context.
+ * The &struct ethtool_rxfh_context for this context is passed in @ctx.
+ * Returns a negative error code or zero.
* @get_channels: Get number of channels.
* @set_channels: Set number of channels. Returns a negative error code or
* zero.
@@ -258,13 +1039,20 @@ bool ethtool_convert_link_mode_to_legacy_u32(u32 *legacy_u32,
* @get_dump_data: Get dump data.
* @set_dump: Set dump specific flags to the device.
* @get_ts_info: Get the time stamping and PTP hardware clock capabilities.
+ * It may be called with RCU, or rtnl or reference on the device.
* Drivers supporting transmit time stamps in software should set this to
* ethtool_op_get_ts_info().
+ * @get_ts_stats: Query the device hardware timestamping statistics. Drivers
+ * must not zero statistics which they don't report. The stats structure
+ * is initialized to ETHTOOL_STAT_NOT_SET indicating driver does not
+ * report statistics.
* @get_module_info: Get the size and type of the eeprom contained within
* a plug-in module.
* @get_module_eeprom: Get the eeprom information from the plug-in module
* @get_eee: Get Energy-Efficient (EEE) supported and status.
* @set_eee: Set EEE status (enable/disable) as well as LPI timers.
+ * @get_tunable: Read the value of a driver / device tunable.
+ * @set_tunable: Set the value of a driver / device tunable.
* @get_per_queue_coalesce: Get interrupt coalescing parameters per queue.
* It must check that the given queue number is valid. If neither a RX nor
* a TX queue has this number, return -EINVAL. If only a RX queue or a TX
@@ -273,21 +1061,49 @@ bool ethtool_convert_link_mode_to_legacy_u32(u32 *legacy_u32,
* @set_per_queue_coalesce: Set interrupt coalescing parameters per queue.
* It must check that the given queue number is valid. If neither a RX nor
* a TX queue has this number, return -EINVAL. If only a RX queue or a TX
- * queue has this number, ignore the inapplicable fields.
+ * queue has this number, ignore the inapplicable fields. Supported
+ * coalescing types should be set in @supported_coalesce_params.
* Returns a negative error code or zero.
- * @get_link_ksettings: When defined, takes precedence over the
- * %get_settings method. Get various device settings
- * including Ethernet link settings. The %cmd and
- * %link_mode_masks_nwords fields should be ignored (use
- * %__ETHTOOL_LINK_MODE_MASK_NBITS instead of the latter), any
- * change to them will be overwritten by kernel. Returns a
- * negative error code or zero.
- * @set_link_ksettings: When defined, takes precedence over the
- * %set_settings method. Set various device settings including
- * Ethernet link settings. The %cmd and %link_mode_masks_nwords
- * fields should be ignored (use %__ETHTOOL_LINK_MODE_MASK_NBITS
- * instead of the latter), any change to them will be overwritten
- * by kernel. Returns a negative error code or zero.
+ * @get_link_ksettings: Get various device settings including Ethernet link
+ * settings. The %cmd and %link_mode_masks_nwords fields should be
+ * ignored (use %__ETHTOOL_LINK_MODE_MASK_NBITS instead of the latter),
+ * any change to them will be overwritten by kernel. Returns a negative
+ * error code or zero.
+ * @set_link_ksettings: Set various device settings including Ethernet link
+ * settings. The %cmd and %link_mode_masks_nwords fields should be
+ * ignored (use %__ETHTOOL_LINK_MODE_MASK_NBITS instead of the latter),
+ * any change to them will be overwritten by kernel. Returns a negative
+ * error code or zero.
+ * @get_fec_stats: Report FEC statistics.
+ * Core will sum up per-lane stats to get the total.
+ * Drivers must not zero statistics which they don't report. The stats
+ * structure is initialized to ETHTOOL_STAT_NOT_SET indicating driver does
+ * not report statistics.
+ * @get_fecparam: Get the network device Forward Error Correction parameters.
+ * @set_fecparam: Set the network device Forward Error Correction parameters.
+ * @get_ethtool_phy_stats: Return extended statistics about the PHY device.
+ * This is only useful if the device maintains PHY statistics and
+ * cannot use the standard PHY library helpers.
+ * @get_phy_tunable: Read the value of a PHY tunable.
+ * @set_phy_tunable: Set the value of a PHY tunable.
+ * @get_module_eeprom_by_page: Get a region of plug-in module EEPROM data from
+ * specified page. Returns a negative error code or the amount of bytes
+ * read.
+ * @set_module_eeprom_by_page: Write to a region of plug-in module EEPROM,
+ * from kernel space only. Returns a negative error code or zero.
+ * @get_eth_phy_stats: Query some of the IEEE 802.3 PHY statistics.
+ * @get_eth_mac_stats: Query some of the IEEE 802.3 MAC statistics.
+ * @get_eth_ctrl_stats: Query some of the IEEE 802.3 MAC Ctrl statistics.
+ * @get_rmon_stats: Query some of the RMON (RFC 2819) statistics.
+ * Set %ranges to a pointer to zero-terminated array of byte ranges.
+ * @get_module_power_mode: Get the power mode policy for the plug-in module
+ * used by the network device and its operational power mode, if
+ * plugged-in.
+ * @set_module_power_mode: Set the power mode policy for the plug-in module
+ * used by the network device.
+ * @get_mm: Query the 802.3 MAC Merge layer state.
+ * @set_mm: Set the 802.3 MAC Merge layer parameters.
+ * @get_mm_stats: Query the 802.3 MAC Merge layer statistics.
*
* All operations are optional (i.e. the function pointer may be set
* to %NULL) and callers must take this into account. Callers must
@@ -302,8 +1118,18 @@ bool ethtool_convert_link_mode_to_legacy_u32(u32 *legacy_u32,
* of the generic netdev features interface.
*/
struct ethtool_ops {
- int (*get_settings)(struct net_device *, struct ethtool_cmd *);
- int (*set_settings)(struct net_device *, struct ethtool_cmd *);
+ u32 supported_input_xfrm:8;
+ u32 cap_link_lanes_supported:1;
+ u32 rxfh_per_ctx_fields:1;
+ u32 rxfh_per_ctx_key:1;
+ u32 cap_rss_rxnfc_adds:1;
+ u32 rxfh_indir_space;
+ u16 rxfh_key_space;
+ u16 rxfh_priv_size;
+ u32 rxfh_max_num_contexts;
+ u32 supported_coalesce_params;
+ u32 supported_ring_params;
+ u32 supported_hwtstamp_qualifiers;
void (*get_drvinfo)(struct net_device *, struct ethtool_drvinfo *);
int (*get_regs_len)(struct net_device *);
void (*get_regs)(struct net_device *, struct ethtool_regs *, void *);
@@ -313,17 +1139,33 @@ struct ethtool_ops {
void (*set_msglevel)(struct net_device *, u32);
int (*nway_reset)(struct net_device *);
u32 (*get_link)(struct net_device *);
+ int (*get_link_ext_state)(struct net_device *,
+ struct ethtool_link_ext_state_info *);
+ void (*get_link_ext_stats)(struct net_device *dev,
+ struct ethtool_link_ext_stats *stats);
int (*get_eeprom_len)(struct net_device *);
int (*get_eeprom)(struct net_device *,
struct ethtool_eeprom *, u8 *);
int (*set_eeprom)(struct net_device *,
struct ethtool_eeprom *, u8 *);
- int (*get_coalesce)(struct net_device *, struct ethtool_coalesce *);
- int (*set_coalesce)(struct net_device *, struct ethtool_coalesce *);
+ int (*get_coalesce)(struct net_device *,
+ struct ethtool_coalesce *,
+ struct kernel_ethtool_coalesce *,
+ struct netlink_ext_ack *);
+ int (*set_coalesce)(struct net_device *,
+ struct ethtool_coalesce *,
+ struct kernel_ethtool_coalesce *,
+ struct netlink_ext_ack *);
void (*get_ringparam)(struct net_device *,
- struct ethtool_ringparam *);
+ struct ethtool_ringparam *,
+ struct kernel_ethtool_ringparam *,
+ struct netlink_ext_ack *);
int (*set_ringparam)(struct net_device *,
- struct ethtool_ringparam *);
+ struct ethtool_ringparam *,
+ struct kernel_ethtool_ringparam *,
+ struct netlink_ext_ack *);
+ void (*get_pause_stats)(struct net_device *dev,
+ struct ethtool_pause_stats *pause_stats);
void (*get_pauseparam)(struct net_device *,
struct ethtool_pauseparam*);
int (*set_pauseparam)(struct net_device *,
@@ -343,25 +1185,44 @@ struct ethtool_ops {
int (*set_rxnfc)(struct net_device *, struct ethtool_rxnfc *);
int (*flash_device)(struct net_device *, struct ethtool_flash *);
int (*reset)(struct net_device *, u32 *);
+ u32 (*get_rx_ring_count)(struct net_device *dev);
u32 (*get_rxfh_key_size)(struct net_device *);
u32 (*get_rxfh_indir_size)(struct net_device *);
- int (*get_rxfh)(struct net_device *, u32 *indir, u8 *key,
- u8 *hfunc);
- int (*set_rxfh)(struct net_device *, const u32 *indir,
- const u8 *key, const u8 hfunc);
+ int (*get_rxfh)(struct net_device *, struct ethtool_rxfh_param *);
+ int (*set_rxfh)(struct net_device *, struct ethtool_rxfh_param *,
+ struct netlink_ext_ack *extack);
+ int (*get_rxfh_fields)(struct net_device *,
+ struct ethtool_rxfh_fields *);
+ int (*set_rxfh_fields)(struct net_device *,
+ const struct ethtool_rxfh_fields *,
+ struct netlink_ext_ack *extack);
+ int (*create_rxfh_context)(struct net_device *,
+ struct ethtool_rxfh_context *ctx,
+ const struct ethtool_rxfh_param *rxfh,
+ struct netlink_ext_ack *extack);
+ int (*modify_rxfh_context)(struct net_device *,
+ struct ethtool_rxfh_context *ctx,
+ const struct ethtool_rxfh_param *rxfh,
+ struct netlink_ext_ack *extack);
+ int (*remove_rxfh_context)(struct net_device *,
+ struct ethtool_rxfh_context *ctx,
+ u32 rss_context,
+ struct netlink_ext_ack *extack);
void (*get_channels)(struct net_device *, struct ethtool_channels *);
int (*set_channels)(struct net_device *, struct ethtool_channels *);
int (*get_dump_flag)(struct net_device *, struct ethtool_dump *);
int (*get_dump_data)(struct net_device *,
struct ethtool_dump *, void *);
int (*set_dump)(struct net_device *, struct ethtool_dump *);
- int (*get_ts_info)(struct net_device *, struct ethtool_ts_info *);
+ int (*get_ts_info)(struct net_device *, struct kernel_ethtool_ts_info *);
+ void (*get_ts_stats)(struct net_device *dev,
+ struct ethtool_ts_stats *ts_stats);
int (*get_module_info)(struct net_device *,
struct ethtool_modinfo *);
int (*get_module_eeprom)(struct net_device *,
struct ethtool_eeprom *, u8 *);
- int (*get_eee)(struct net_device *, struct ethtool_eee *);
- int (*set_eee)(struct net_device *, struct ethtool_eee *);
+ int (*get_eee)(struct net_device *dev, struct ethtool_keee *eee);
+ int (*set_eee)(struct net_device *dev, struct ethtool_keee *eee);
int (*get_tunable)(struct net_device *,
const struct ethtool_tunable *, void *);
int (*set_tunable)(struct net_device *,
@@ -374,5 +1235,250 @@ struct ethtool_ops {
struct ethtool_link_ksettings *);
int (*set_link_ksettings)(struct net_device *,
const struct ethtool_link_ksettings *);
+ void (*get_fec_stats)(struct net_device *dev,
+ struct ethtool_fec_stats *fec_stats,
+ struct ethtool_fec_hist *hist);
+ int (*get_fecparam)(struct net_device *,
+ struct ethtool_fecparam *);
+ int (*set_fecparam)(struct net_device *,
+ struct ethtool_fecparam *);
+ void (*get_ethtool_phy_stats)(struct net_device *,
+ struct ethtool_stats *, u64 *);
+ int (*get_phy_tunable)(struct net_device *,
+ const struct ethtool_tunable *, void *);
+ int (*set_phy_tunable)(struct net_device *,
+ const struct ethtool_tunable *, const void *);
+ int (*get_module_eeprom_by_page)(struct net_device *dev,
+ const struct ethtool_module_eeprom *page,
+ struct netlink_ext_ack *extack);
+ int (*set_module_eeprom_by_page)(struct net_device *dev,
+ const struct ethtool_module_eeprom *page,
+ struct netlink_ext_ack *extack);
+ void (*get_eth_phy_stats)(struct net_device *dev,
+ struct ethtool_eth_phy_stats *phy_stats);
+ void (*get_eth_mac_stats)(struct net_device *dev,
+ struct ethtool_eth_mac_stats *mac_stats);
+ void (*get_eth_ctrl_stats)(struct net_device *dev,
+ struct ethtool_eth_ctrl_stats *ctrl_stats);
+ void (*get_rmon_stats)(struct net_device *dev,
+ struct ethtool_rmon_stats *rmon_stats,
+ const struct ethtool_rmon_hist_range **ranges);
+ int (*get_module_power_mode)(struct net_device *dev,
+ struct ethtool_module_power_mode_params *params,
+ struct netlink_ext_ack *extack);
+ int (*set_module_power_mode)(struct net_device *dev,
+ const struct ethtool_module_power_mode_params *params,
+ struct netlink_ext_ack *extack);
+ int (*get_mm)(struct net_device *dev, struct ethtool_mm_state *state);
+ int (*set_mm)(struct net_device *dev, struct ethtool_mm_cfg *cfg,
+ struct netlink_ext_ack *extack);
+ void (*get_mm_stats)(struct net_device *dev, struct ethtool_mm_stats *stats);
+};
+
+int ethtool_check_ops(const struct ethtool_ops *ops);
+
+struct ethtool_rx_flow_rule {
+ struct flow_rule *rule;
+ unsigned long priv[];
+};
+
+struct ethtool_rx_flow_spec_input {
+ const struct ethtool_rx_flow_spec *fs;
+ u32 rss_ctx;
+};
+
+struct ethtool_rx_flow_rule *
+ethtool_rx_flow_rule_create(const struct ethtool_rx_flow_spec_input *input);
+void ethtool_rx_flow_rule_destroy(struct ethtool_rx_flow_rule *rule);
+
+bool ethtool_virtdev_validate_cmd(const struct ethtool_link_ksettings *cmd);
+int ethtool_virtdev_set_link_ksettings(struct net_device *dev,
+ const struct ethtool_link_ksettings *cmd,
+ u32 *dev_speed, u8 *dev_duplex);
+
+/**
+ * struct ethtool_netdev_state - per-netdevice state for ethtool features
+ * @rss_ctx: XArray of custom RSS contexts
+ * @rss_lock: Protects entries in @rss_ctx. May be taken from
+ * within RTNL.
+ * @wol_enabled: Wake-on-LAN is enabled
+ * @module_fw_flash_in_progress: Module firmware flashing is in progress.
+ */
+struct ethtool_netdev_state {
+ struct xarray rss_ctx;
+ struct mutex rss_lock;
+ unsigned wol_enabled:1;
+ unsigned module_fw_flash_in_progress:1;
+};
+
+struct phy_device;
+struct phy_tdr_config;
+struct phy_plca_cfg;
+struct phy_plca_status;
+
+/**
+ * struct ethtool_phy_ops - Optional PHY device options
+ * @get_sset_count: Get number of strings that @get_strings will write.
+ * @get_strings: Return a set of strings that describe the requested objects
+ * @get_stats: Return extended statistics about the PHY device.
+ * @get_plca_cfg: Return PLCA configuration.
+ * @set_plca_cfg: Set PLCA configuration.
+ * @get_plca_status: Get PLCA configuration.
+ * @start_cable_test: Start a cable test
+ * @start_cable_test_tdr: Start a Time Domain Reflectometry cable test
+ *
+ * All operations are optional (i.e. the function pointer may be set to %NULL)
+ * and callers must take this into account. Callers must hold the RTNL lock.
+ */
+struct ethtool_phy_ops {
+ int (*get_sset_count)(struct phy_device *dev);
+ int (*get_strings)(struct phy_device *dev, u8 *data);
+ int (*get_stats)(struct phy_device *dev,
+ struct ethtool_stats *stats, u64 *data);
+ int (*get_plca_cfg)(struct phy_device *dev,
+ struct phy_plca_cfg *plca_cfg);
+ int (*set_plca_cfg)(struct phy_device *dev,
+ const struct phy_plca_cfg *plca_cfg,
+ struct netlink_ext_ack *extack);
+ int (*get_plca_status)(struct phy_device *dev,
+ struct phy_plca_status *plca_st);
+ int (*start_cable_test)(struct phy_device *phydev,
+ struct netlink_ext_ack *extack);
+ int (*start_cable_test_tdr)(struct phy_device *phydev,
+ struct netlink_ext_ack *extack,
+ const struct phy_tdr_config *config);
+};
+
+/**
+ * ethtool_set_ethtool_phy_ops - Set the ethtool_phy_ops singleton
+ * @ops: Ethtool PHY operations to set
+ */
+void ethtool_set_ethtool_phy_ops(const struct ethtool_phy_ops *ops);
+
+/**
+ * ethtool_params_from_link_mode - Derive link parameters from a given link mode
+ * @link_ksettings: Link parameters to be derived from the link mode
+ * @link_mode: Link mode
+ */
+void
+ethtool_params_from_link_mode(struct ethtool_link_ksettings *link_ksettings,
+ enum ethtool_link_mode_bit_indices link_mode);
+
+/**
+ * ethtool_get_phc_vclocks - Derive phc vclocks information, and caller
+ * is responsible to free memory of vclock_index
+ * @dev: pointer to net_device structure
+ * @vclock_index: pointer to pointer of vclock index
+ *
+ * Return: number of phc vclocks
+ */
+int ethtool_get_phc_vclocks(struct net_device *dev, int **vclock_index);
+
+/* Some generic methods drivers may use in their ethtool_ops */
+u32 ethtool_op_get_link(struct net_device *dev);
+int ethtool_op_get_ts_info(struct net_device *dev,
+ struct kernel_ethtool_ts_info *eti);
+
+/**
+ * ethtool_mm_frag_size_add_to_min - Translate (standard) additional fragment
+ * size expressed as multiplier into (absolute) minimum fragment size
+ * value expressed in octets
+ * @val_add: Value of addFragSize multiplier
+ */
+static inline u32 ethtool_mm_frag_size_add_to_min(u32 val_add)
+{
+ return (ETH_ZLEN + ETH_FCS_LEN) * (1 + val_add) - ETH_FCS_LEN;
+}
+
+/**
+ * ethtool_mm_frag_size_min_to_add - Translate (absolute) minimum fragment size
+ * expressed in octets into (standard) additional fragment size expressed
+ * as multiplier
+ * @val_min: Value of addFragSize variable in octets
+ * @val_add: Pointer where the standard addFragSize value is to be returned
+ * @extack: Netlink extended ack
+ *
+ * Translate a value in octets to one of 0, 1, 2, 3 according to the reverse
+ * application of the 802.3 formula 64 * (1 + addFragSize) - 4. To be called
+ * by drivers which do not support programming the minimum fragment size to a
+ * continuous range. Returns error on other fragment length values.
+ */
+static inline int ethtool_mm_frag_size_min_to_add(u32 val_min, u32 *val_add,
+ struct netlink_ext_ack *extack)
+{
+ u32 add_frag_size;
+
+ for (add_frag_size = 0; add_frag_size < 4; add_frag_size++) {
+ if (ethtool_mm_frag_size_add_to_min(add_frag_size) == val_min) {
+ *val_add = add_frag_size;
+ return 0;
+ }
+ }
+
+ NL_SET_ERR_MSG_MOD(extack,
+ "minFragSize required to be one of 60, 124, 188 or 252");
+ return -EINVAL;
+}
+
+/**
+ * ethtool_get_ts_info_by_layer - Obtains time stamping capabilities from the MAC or PHY layer.
+ * @dev: pointer to net_device structure
+ * @info: buffer to hold the result
+ * Returns: zero on success, non-zero otherwise.
+ */
+int ethtool_get_ts_info_by_layer(struct net_device *dev,
+ struct kernel_ethtool_ts_info *info);
+
+/**
+ * ethtool_sprintf - Write formatted string to ethtool string data
+ * @data: Pointer to a pointer to the start of string to update
+ * @fmt: Format of string to write
+ *
+ * Write formatted string to *data. Update *data to point at start of
+ * next string.
+ */
+extern __printf(2, 3) void ethtool_sprintf(u8 **data, const char *fmt, ...);
+
+/**
+ * ethtool_puts - Write string to ethtool string data
+ * @data: Pointer to a pointer to the start of string to update
+ * @str: String to write
+ *
+ * Write string to *data without a trailing newline. Update *data
+ * to point at start of next string.
+ *
+ * Prefer this function to ethtool_sprintf() when given only
+ * two arguments or if @fmt is just "%s".
+ */
+extern void ethtool_puts(u8 **data, const char *str);
+
+/**
+ * ethtool_cpy - Write possibly-not-NUL-terminated string to ethtool string data
+ * @data: Pointer to a pointer to the start of string to write into
+ * @str: NUL-byte padded char array of size ETH_GSTRING_LEN to copy from
+ */
+#define ethtool_cpy(data, str) do { \
+ BUILD_BUG_ON(sizeof(str) != ETH_GSTRING_LEN); \
+ memcpy(*(data), str, ETH_GSTRING_LEN); \
+ *(data) += ETH_GSTRING_LEN; \
+} while (0)
+
+/* Link mode to forced speed capabilities maps */
+struct ethtool_forced_speed_map {
+ u32 speed;
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(caps);
+
+ const u32 *cap_arr;
+ u32 arr_size;
};
+
+#define ETHTOOL_FORCED_SPEED_MAP(prefix, value) \
+{ \
+ .speed = SPEED_##value, \
+ .cap_arr = prefix##_##value, \
+ .arr_size = ARRAY_SIZE(prefix##_##value), \
+}
+
+void
+ethtool_forced_speed_maps_init(struct ethtool_forced_speed_map *maps, u32 size);
#endif /* _LINUX_ETHTOOL_H */
diff --git a/include/linux/ethtool_netlink.h b/include/linux/ethtool_netlink.h
new file mode 100644
index 000000000000..39254b2726c0
--- /dev/null
+++ b/include/linux/ethtool_netlink.h
@@ -0,0 +1,146 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#ifndef _LINUX_ETHTOOL_NETLINK_H_
+#define _LINUX_ETHTOOL_NETLINK_H_
+
+#include <uapi/linux/ethtool_netlink.h>
+#include <linux/ethtool.h>
+#include <linux/netdevice.h>
+
+#define __ETHTOOL_LINK_MODE_MASK_NWORDS \
+ DIV_ROUND_UP(__ETHTOOL_LINK_MODE_MASK_NBITS, 32)
+
+#define ETHTOOL_PAUSE_STAT_CNT (__ETHTOOL_A_PAUSE_STAT_CNT - \
+ ETHTOOL_A_PAUSE_STAT_TX_FRAMES)
+
+enum ethtool_multicast_groups {
+ ETHNL_MCGRP_MONITOR,
+};
+
+struct phy_device;
+
+#if IS_ENABLED(CONFIG_ETHTOOL_NETLINK)
+int ethnl_cable_test_alloc(struct phy_device *phydev, u8 cmd);
+void ethnl_cable_test_free(struct phy_device *phydev);
+void ethnl_cable_test_finished(struct phy_device *phydev);
+int ethnl_cable_test_result_with_src(struct phy_device *phydev, u8 pair,
+ u8 result, u32 src);
+int ethnl_cable_test_fault_length_with_src(struct phy_device *phydev, u8 pair,
+ u32 cm, u32 src);
+int ethnl_cable_test_amplitude(struct phy_device *phydev, u8 pair, s16 mV);
+int ethnl_cable_test_pulse(struct phy_device *phydev, u16 mV);
+int ethnl_cable_test_step(struct phy_device *phydev, u32 first, u32 last,
+ u32 step);
+void ethtool_aggregate_mac_stats(struct net_device *dev,
+ struct ethtool_eth_mac_stats *mac_stats);
+void ethtool_aggregate_phy_stats(struct net_device *dev,
+ struct ethtool_eth_phy_stats *phy_stats);
+void ethtool_aggregate_ctrl_stats(struct net_device *dev,
+ struct ethtool_eth_ctrl_stats *ctrl_stats);
+void ethtool_aggregate_pause_stats(struct net_device *dev,
+ struct ethtool_pause_stats *pause_stats);
+void ethtool_aggregate_rmon_stats(struct net_device *dev,
+ struct ethtool_rmon_stats *rmon_stats);
+bool ethtool_dev_mm_supported(struct net_device *dev);
+
+void ethnl_pse_send_ntf(struct net_device *netdev, unsigned long notif);
+
+#else
+static inline int ethnl_cable_test_alloc(struct phy_device *phydev, u8 cmd)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline void ethnl_cable_test_free(struct phy_device *phydev)
+{
+}
+
+static inline void ethnl_cable_test_finished(struct phy_device *phydev)
+{
+}
+static inline int ethnl_cable_test_result_with_src(struct phy_device *phydev,
+ u8 pair, u8 result, u32 src)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int ethnl_cable_test_fault_length_with_src(struct phy_device *phydev,
+ u8 pair, u32 cm, u32 src)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int ethnl_cable_test_amplitude(struct phy_device *phydev,
+ u8 pair, s16 mV)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int ethnl_cable_test_pulse(struct phy_device *phydev, u16 mV)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int ethnl_cable_test_step(struct phy_device *phydev, u32 first,
+ u32 last, u32 step)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline void
+ethtool_aggregate_mac_stats(struct net_device *dev,
+ struct ethtool_eth_mac_stats *mac_stats)
+{
+}
+
+static inline void
+ethtool_aggregate_phy_stats(struct net_device *dev,
+ struct ethtool_eth_phy_stats *phy_stats)
+{
+}
+
+static inline void
+ethtool_aggregate_ctrl_stats(struct net_device *dev,
+ struct ethtool_eth_ctrl_stats *ctrl_stats)
+{
+}
+
+static inline void
+ethtool_aggregate_pause_stats(struct net_device *dev,
+ struct ethtool_pause_stats *pause_stats)
+{
+}
+
+static inline void
+ethtool_aggregate_rmon_stats(struct net_device *dev,
+ struct ethtool_rmon_stats *rmon_stats)
+{
+}
+
+static inline bool ethtool_dev_mm_supported(struct net_device *dev)
+{
+ return false;
+}
+
+static inline void ethnl_pse_send_ntf(struct net_device *netdev,
+ unsigned long notif)
+{
+}
+
+#endif /* IS_ENABLED(CONFIG_ETHTOOL_NETLINK) */
+
+static inline int ethnl_cable_test_result(struct phy_device *phydev, u8 pair,
+ u8 result)
+{
+ return ethnl_cable_test_result_with_src(phydev, pair, result,
+ ETHTOOL_A_CABLE_INF_SRC_TDR);
+}
+
+static inline int ethnl_cable_test_fault_length(struct phy_device *phydev,
+ u8 pair, u32 cm)
+{
+ return ethnl_cable_test_fault_length_with_src(phydev, pair, cm,
+ ETHTOOL_A_CABLE_INF_SRC_TDR);
+}
+
+#endif /* _LINUX_ETHTOOL_NETLINK_H_ */
diff --git a/include/linux/eventfd.h b/include/linux/eventfd.h
index 9e4befd95bc7..e32bee4345fb 100644
--- a/include/linux/eventfd.h
+++ b/include/linux/eventfd.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* include/linux/eventfd.h
*
@@ -8,8 +9,12 @@
#ifndef _LINUX_EVENTFD_H
#define _LINUX_EVENTFD_H
-#include <linux/fcntl.h>
#include <linux/wait.h>
+#include <linux/err.h>
+#include <linux/percpu-defs.h>
+#include <linux/percpu.h>
+#include <linux/sched.h>
+#include <uapi/linux/eventfd.h>
/*
* CAREFUL: Check include/uapi/asm-generic/fcntl.h when defining
@@ -18,27 +23,27 @@
* from eventfd, in order to leave a free define-space for
* shared O_* flags.
*/
-#define EFD_SEMAPHORE (1 << 0)
-#define EFD_CLOEXEC O_CLOEXEC
-#define EFD_NONBLOCK O_NONBLOCK
-
#define EFD_SHARED_FCNTL_FLAGS (O_CLOEXEC | O_NONBLOCK)
#define EFD_FLAGS_SET (EFD_SHARED_FCNTL_FLAGS | EFD_SEMAPHORE)
+struct eventfd_ctx;
struct file;
#ifdef CONFIG_EVENTFD
-struct file *eventfd_file_create(unsigned int count, int flags);
-struct eventfd_ctx *eventfd_ctx_get(struct eventfd_ctx *ctx);
void eventfd_ctx_put(struct eventfd_ctx *ctx);
struct file *eventfd_fget(int fd);
struct eventfd_ctx *eventfd_ctx_fdget(int fd);
struct eventfd_ctx *eventfd_ctx_fileget(struct file *file);
-__u64 eventfd_signal(struct eventfd_ctx *ctx, __u64 n);
-ssize_t eventfd_ctx_read(struct eventfd_ctx *ctx, int no_wait, __u64 *cnt);
+void eventfd_signal_mask(struct eventfd_ctx *ctx, __poll_t mask);
int eventfd_ctx_remove_wait_queue(struct eventfd_ctx *ctx, wait_queue_entry_t *wait,
__u64 *cnt);
+void eventfd_ctx_do_read(struct eventfd_ctx *ctx, __u64 *cnt);
+
+static inline bool eventfd_signal_allowed(void)
+{
+ return !current->in_eventfd;
+}
#else /* CONFIG_EVENTFD */
@@ -46,19 +51,14 @@ int eventfd_ctx_remove_wait_queue(struct eventfd_ctx *ctx, wait_queue_entry_t *w
* Ugly ugly ugly error layer to support modules that uses eventfd but
* pretend to work in !CONFIG_EVENTFD configurations. Namely, AIO.
*/
-static inline struct file *eventfd_file_create(unsigned int count, int flags)
-{
- return ERR_PTR(-ENOSYS);
-}
static inline struct eventfd_ctx *eventfd_ctx_fdget(int fd)
{
return ERR_PTR(-ENOSYS);
}
-static inline int eventfd_signal(struct eventfd_ctx *ctx, int n)
+static inline void eventfd_signal_mask(struct eventfd_ctx *ctx, __poll_t mask)
{
- return -ENOSYS;
}
static inline void eventfd_ctx_put(struct eventfd_ctx *ctx)
@@ -66,19 +66,28 @@ static inline void eventfd_ctx_put(struct eventfd_ctx *ctx)
}
-static inline ssize_t eventfd_ctx_read(struct eventfd_ctx *ctx, int no_wait,
- __u64 *cnt)
+static inline int eventfd_ctx_remove_wait_queue(struct eventfd_ctx *ctx,
+ wait_queue_entry_t *wait, __u64 *cnt)
{
return -ENOSYS;
}
-static inline int eventfd_ctx_remove_wait_queue(struct eventfd_ctx *ctx,
- wait_queue_entry_t *wait, __u64 *cnt)
+static inline bool eventfd_signal_allowed(void)
{
- return -ENOSYS;
+ return true;
+}
+
+static inline void eventfd_ctx_do_read(struct eventfd_ctx *ctx, __u64 *cnt)
+{
+
}
#endif
+static inline void eventfd_signal(struct eventfd_ctx *ctx)
+{
+ eventfd_signal_mask(ctx, 0);
+}
+
#endif /* _LINUX_EVENTFD_H */
diff --git a/include/linux/eventpoll.h b/include/linux/eventpoll.h
index 2f14ac73d01d..ccb478eb174b 100644
--- a/include/linux/eventpoll.h
+++ b/include/linux/eventpoll.h
@@ -1,14 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* include/linux/eventpoll.h ( Efficient event polling implementation )
* Copyright (C) 2001,...,2006 Davide Libenzi
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
* Davide Libenzi <davidel@xmailserver.org>
- *
*/
#ifndef _LINUX_EVENTPOLL_H
#define _LINUX_EVENTPOLL_H
@@ -23,21 +18,17 @@ struct file;
#ifdef CONFIG_EPOLL
-#ifdef CONFIG_CHECKPOINT_RESTORE
+#ifdef CONFIG_KCMP
struct file *get_epoll_tfile_raw_ptr(struct file *file, int tfd, unsigned long toff);
#endif
-/* Used to initialize the epoll bits inside the "struct file" */
-static inline void eventpoll_init_file(struct file *file)
-{
- INIT_LIST_HEAD(&file->f_ep_links);
- INIT_LIST_HEAD(&file->f_tfile_llink);
-}
-
-
/* Used to release the epoll bits inside the "struct file" */
void eventpoll_release_file(struct file *file);
+/* Copy ready events to userspace */
+int epoll_sendevents(struct file *file, struct epoll_event __user *events,
+ int maxevents);
+
/*
* This is called from inside fs/file_table.c:__fput() to unlink files
* from the eventpoll interface. We need to have this facility to cleanup
@@ -55,7 +46,7 @@ static inline void eventpoll_release(struct file *file)
* because the file in on the way to be removed and nobody ( but
* eventpoll ) has still a reference to this file.
*/
- if (likely(list_empty(&file->f_ep_links)))
+ if (likely(!READ_ONCE(file->f_ep)))
return;
/*
@@ -66,11 +57,37 @@ static inline void eventpoll_release(struct file *file)
eventpoll_release_file(file);
}
+int do_epoll_ctl(int epfd, int op, int fd, struct epoll_event *epds,
+ bool nonblock);
+
+/* Tells if the epoll_ctl(2) operation needs an event copy from userspace */
+static inline int ep_op_has_event(int op)
+{
+ return op != EPOLL_CTL_DEL;
+}
+
#else
-static inline void eventpoll_init_file(struct file *file) {}
static inline void eventpoll_release(struct file *file) {}
#endif
+#if defined(CONFIG_ARM) && defined(CONFIG_OABI_COMPAT)
+/* ARM OABI has an incompatible struct layout and needs a special handler */
+extern struct epoll_event __user *
+epoll_put_uevent(__poll_t revents, __u64 data,
+ struct epoll_event __user *uevent);
+#else
+static inline struct epoll_event __user *
+epoll_put_uevent(__poll_t revents, __u64 data,
+ struct epoll_event __user *uevent)
+{
+ if (__put_user(revents, &uevent->events) ||
+ __put_user(data, &uevent->data))
+ return NULL;
+
+ return uevent+1;
+}
+#endif
+
#endif /* #ifndef _LINUX_EVENTPOLL_H */
diff --git a/include/linux/evm.h b/include/linux/evm.h
index 35ed9a8a403a..ddece4a6b25d 100644
--- a/include/linux/evm.h
+++ b/include/linux/evm.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* evm.h
*
@@ -11,29 +12,22 @@
#include <linux/integrity.h>
#include <linux/xattr.h>
-struct integrity_iint_cache;
-
#ifdef CONFIG_EVM
extern int evm_set_key(void *key, size_t keylen);
extern enum integrity_status evm_verifyxattr(struct dentry *dentry,
const char *xattr_name,
void *xattr_value,
- size_t xattr_value_len,
- struct integrity_iint_cache *iint);
-extern int evm_inode_setattr(struct dentry *dentry, struct iattr *attr);
-extern void evm_inode_post_setattr(struct dentry *dentry, int ia_valid);
-extern int evm_inode_setxattr(struct dentry *dentry, const char *name,
- const void *value, size_t size);
-extern void evm_inode_post_setxattr(struct dentry *dentry,
- const char *xattr_name,
- const void *xattr_value,
- size_t xattr_value_len);
-extern int evm_inode_removexattr(struct dentry *dentry, const char *xattr_name);
-extern void evm_inode_post_removexattr(struct dentry *dentry,
- const char *xattr_name);
-extern int evm_inode_init_security(struct inode *inode,
- const struct xattr *xattr_array,
- struct xattr *evm);
+ size_t xattr_value_len);
+int evm_inode_init_security(struct inode *inode, struct inode *dir,
+ const struct qstr *qstr, struct xattr *xattrs,
+ int *xattr_count);
+extern bool evm_revalidate_status(const char *xattr_name);
+extern int evm_protected_xattr_if_enabled(const char *req_xattr_name);
+extern int evm_read_protected_xattrs(struct dentry *dentry, u8 *buffer,
+ int buffer_size, char type,
+ bool canonical_fmt);
+extern bool evm_metadata_changed(struct inode *inode,
+ struct inode *metadata_inode);
#ifdef CONFIG_FS_POSIX_ACL
extern int posix_xattr_acl(const char *xattrname);
#else
@@ -53,54 +47,41 @@ static inline int evm_set_key(void *key, size_t keylen)
static inline enum integrity_status evm_verifyxattr(struct dentry *dentry,
const char *xattr_name,
void *xattr_value,
- size_t xattr_value_len,
- struct integrity_iint_cache *iint)
+ size_t xattr_value_len)
{
return INTEGRITY_UNKNOWN;
}
#endif
-static inline int evm_inode_setattr(struct dentry *dentry, struct iattr *attr)
+static inline int evm_inode_init_security(struct inode *inode, struct inode *dir,
+ const struct qstr *qstr,
+ struct xattr *xattrs,
+ int *xattr_count)
{
return 0;
}
-static inline void evm_inode_post_setattr(struct dentry *dentry, int ia_valid)
-{
- return;
-}
-
-static inline int evm_inode_setxattr(struct dentry *dentry, const char *name,
- const void *value, size_t size)
+static inline bool evm_revalidate_status(const char *xattr_name)
{
- return 0;
+ return false;
}
-static inline void evm_inode_post_setxattr(struct dentry *dentry,
- const char *xattr_name,
- const void *xattr_value,
- size_t xattr_value_len)
+static inline int evm_protected_xattr_if_enabled(const char *req_xattr_name)
{
- return;
+ return false;
}
-static inline int evm_inode_removexattr(struct dentry *dentry,
- const char *xattr_name)
+static inline int evm_read_protected_xattrs(struct dentry *dentry, u8 *buffer,
+ int buffer_size, char type,
+ bool canonical_fmt)
{
- return 0;
-}
-
-static inline void evm_inode_post_removexattr(struct dentry *dentry,
- const char *xattr_name)
-{
- return;
+ return -EOPNOTSUPP;
}
-static inline int evm_inode_init_security(struct inode *inode,
- const struct xattr *xattr_array,
- struct xattr *evm)
+static inline bool evm_metadata_changed(struct inode *inode,
+ struct inode *metadata_inode)
{
- return 0;
+ return false;
}
#endif /* CONFIG_EVM */
diff --git a/include/linux/execmem.h b/include/linux/execmem.h
new file mode 100644
index 000000000000..7de229134e30
--- /dev/null
+++ b/include/linux/execmem.h
@@ -0,0 +1,207 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_EXECMEM_ALLOC_H
+#define _LINUX_EXECMEM_ALLOC_H
+
+#include <linux/types.h>
+#include <linux/moduleloader.h>
+#include <linux/cleanup.h>
+
+#if (defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)) && \
+ !defined(CONFIG_KASAN_VMALLOC)
+#include <linux/kasan.h>
+#define MODULE_ALIGN (PAGE_SIZE << KASAN_SHADOW_SCALE_SHIFT)
+#else
+#define MODULE_ALIGN PAGE_SIZE
+#endif
+
+/**
+ * enum execmem_type - types of executable memory ranges
+ *
+ * There are several subsystems that allocate executable memory.
+ * Architectures define different restrictions on placement,
+ * permissions, alignment and other parameters for memory that can be used
+ * by these subsystems.
+ * Types in this enum identify subsystems that allocate executable memory
+ * and let architectures define parameters for ranges suitable for
+ * allocations by each subsystem.
+ *
+ * @EXECMEM_DEFAULT: default parameters that would be used for types that
+ * are not explicitly defined.
+ * @EXECMEM_MODULE_TEXT: parameters for module text sections
+ * @EXECMEM_KPROBES: parameters for kprobes
+ * @EXECMEM_FTRACE: parameters for ftrace
+ * @EXECMEM_BPF: parameters for BPF
+ * @EXECMEM_MODULE_DATA: parameters for module data sections
+ * @EXECMEM_TYPE_MAX:
+ */
+enum execmem_type {
+ EXECMEM_DEFAULT,
+ EXECMEM_MODULE_TEXT = EXECMEM_DEFAULT,
+ EXECMEM_KPROBES,
+ EXECMEM_FTRACE,
+ EXECMEM_BPF,
+ EXECMEM_MODULE_DATA,
+ EXECMEM_TYPE_MAX,
+};
+
+/**
+ * enum execmem_range_flags - options for executable memory allocations
+ * @EXECMEM_KASAN_SHADOW: allocate kasan shadow
+ * @EXECMEM_ROX_CACHE: allocations should use ROX cache of huge pages
+ */
+enum execmem_range_flags {
+ EXECMEM_KASAN_SHADOW = (1 << 0),
+ EXECMEM_ROX_CACHE = (1 << 1),
+};
+
+#ifdef CONFIG_ARCH_HAS_EXECMEM_ROX
+/**
+ * execmem_fill_trapping_insns - set memory to contain instructions that
+ * will trap
+ * @ptr: pointer to memory to fill
+ * @size: size of the range to fill
+ *
+ * A hook for architecures to fill execmem ranges with invalid instructions.
+ * Architectures that use EXECMEM_ROX_CACHE must implement this.
+ */
+void execmem_fill_trapping_insns(void *ptr, size_t size);
+
+/**
+ * execmem_restore_rox - restore read-only-execute permissions
+ * @ptr: address of the region to remap
+ * @size: size of the region to remap
+ *
+ * Restores read-only-execute permissions on a range [@ptr, @ptr + @size)
+ * after it was temporarily remapped as writable. Relies on architecture
+ * implementation of set_memory_rox() to restore mapping using large pages.
+ *
+ * Return: 0 on success or negative error code on failure.
+ */
+int execmem_restore_rox(void *ptr, size_t size);
+#else
+static inline int execmem_restore_rox(void *ptr, size_t size) { return 0; }
+#endif
+
+/**
+ * struct execmem_range - definition of an address space suitable for code and
+ * related data allocations
+ * @start: address space start
+ * @end: address space end (inclusive)
+ * @fallback_start: start of the secondary address space range for fallback
+ * allocations on architectures that require it
+ * @fallback_end: start of the secondary address space (inclusive)
+ * @pgprot: permissions for memory in this address space
+ * @alignment: alignment required for text allocations
+ * @flags: options for memory allocations for this range
+ */
+struct execmem_range {
+ unsigned long start;
+ unsigned long end;
+ unsigned long fallback_start;
+ unsigned long fallback_end;
+ pgprot_t pgprot;
+ unsigned int alignment;
+ enum execmem_range_flags flags;
+};
+
+/**
+ * struct execmem_info - architecture parameters for code allocations
+ * @ranges: array of parameter sets defining architecture specific
+ * parameters for executable memory allocations. The ranges that are not
+ * explicitly initialized by an architecture use parameters defined for
+ * @EXECMEM_DEFAULT.
+ */
+struct execmem_info {
+ struct execmem_range ranges[EXECMEM_TYPE_MAX];
+};
+
+/**
+ * execmem_arch_setup - define parameters for allocations of executable memory
+ *
+ * A hook for architectures to define parameters for allocations of
+ * executable memory. These parameters should be filled into the
+ * @execmem_info structure.
+ *
+ * For architectures that do not implement this method a default set of
+ * parameters will be used
+ *
+ * Return: a structure defining architecture parameters and restrictions
+ * for allocations of executable memory
+ */
+struct execmem_info *execmem_arch_setup(void);
+
+/**
+ * execmem_alloc - allocate executable memory
+ * @type: type of the allocation
+ * @size: how many bytes of memory are required
+ *
+ * Allocates memory that will contain executable code, either generated or
+ * loaded from kernel modules.
+ *
+ * Allocates memory that will contain data coupled with executable code,
+ * like data sections in kernel modules.
+ *
+ * The memory will have protections defined by architecture for executable
+ * region of the @type.
+ *
+ * Return: a pointer to the allocated memory or %NULL
+ */
+void *execmem_alloc(enum execmem_type type, size_t size);
+
+/**
+ * execmem_alloc_rw - allocate writable executable memory
+ * @type: type of the allocation
+ * @size: how many bytes of memory are required
+ *
+ * Allocates memory that will contain executable code, either generated or
+ * loaded from kernel modules.
+ *
+ * Allocates memory that will contain data coupled with executable code,
+ * like data sections in kernel modules.
+ *
+ * Forces writable permissions on the allocated memory and the caller is
+ * responsible to manage the permissions afterwards.
+ *
+ * For architectures that use ROX cache the permissions will be set to R+W.
+ * For architectures that don't use ROX cache the default permissions for @type
+ * will be used as they must be writable.
+ *
+ * Return: a pointer to the allocated memory or %NULL
+ */
+void *execmem_alloc_rw(enum execmem_type type, size_t size);
+
+/**
+ * execmem_free - free executable memory
+ * @ptr: pointer to the memory that should be freed
+ */
+void execmem_free(void *ptr);
+
+DEFINE_FREE(execmem, void *, if (_T) execmem_free(_T));
+
+#ifdef CONFIG_MMU
+/**
+ * execmem_vmap - create virtual mapping for EXECMEM_MODULE_DATA memory
+ * @size: size of the virtual mapping in bytes
+ *
+ * Maps virtually contiguous area in the range suitable for EXECMEM_MODULE_DATA.
+ *
+ * Return: the area descriptor on success or %NULL on failure.
+ */
+struct vm_struct *execmem_vmap(size_t size);
+#endif
+
+/**
+ * execmem_is_rox - check if execmem is read-only
+ * @type - the execmem type to check
+ *
+ * Return: %true if the @type is read-only, %false if it's writable
+ */
+bool execmem_is_rox(enum execmem_type type);
+
+#if defined(CONFIG_EXECMEM) && !defined(CONFIG_ARCH_WANTS_EXECMEM_LATE)
+void execmem_init(void);
+#else
+static inline void execmem_init(void) {}
+#endif
+
+#endif /* _LINUX_EXECMEM_ALLOC_H */
diff --git a/include/linux/export-internal.h b/include/linux/export-internal.h
new file mode 100644
index 000000000000..d445705ac13c
--- /dev/null
+++ b/include/linux/export-internal.h
@@ -0,0 +1,72 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Please do not include this explicitly.
+ * This is used by C files generated by modpost.
+ */
+
+#ifndef __LINUX_EXPORT_INTERNAL_H__
+#define __LINUX_EXPORT_INTERNAL_H__
+
+#include <linux/compiler.h>
+#include <linux/types.h>
+
+#if defined(CONFIG_HAVE_ARCH_PREL32_RELOCATIONS)
+/*
+ * relative reference: this reduces the size by half on 64-bit architectures,
+ * and eliminates the need for absolute relocations that require runtime
+ * processing on relocatable kernels.
+ */
+#define __KSYM_ALIGN ".balign 4"
+#define __KSYM_REF(sym) ".long " #sym "- ."
+#elif defined(CONFIG_64BIT)
+#define __KSYM_ALIGN ".balign 8"
+#define __KSYM_REF(sym) ".quad " #sym
+#else
+#define __KSYM_ALIGN ".balign 4"
+#define __KSYM_REF(sym) ".long " #sym
+#endif
+
+/*
+ * For every exported symbol, do the following:
+ *
+ * - Put the name of the symbol and namespace (empty string "" for none) in
+ * __ksymtab_strings.
+ * - Place a struct kernel_symbol entry in the __ksymtab section.
+ *
+ * Note on .section use: we specify progbits since usage of the "M" (SHF_MERGE)
+ * section flag requires it. Use '%progbits' instead of '@progbits' since the
+ * former apparently works on all arches according to the binutils source.
+ */
+#define __KSYMTAB(name, sym, sec, ns) \
+ asm(" .section \"__ksymtab_strings\",\"aMS\",%progbits,1" "\n" \
+ "__kstrtab_" #name ":" "\n" \
+ " .asciz \"" #name "\"" "\n" \
+ "__kstrtabns_" #name ":" "\n" \
+ " .asciz \"" ns "\"" "\n" \
+ " .previous" "\n" \
+ " .section \"___ksymtab" sec "+" #name "\", \"a\"" "\n" \
+ __KSYM_ALIGN "\n" \
+ "__ksymtab_" #name ":" "\n" \
+ __KSYM_REF(sym) "\n" \
+ __KSYM_REF(__kstrtab_ ##name) "\n" \
+ __KSYM_REF(__kstrtabns_ ##name) "\n" \
+ " .previous" "\n" \
+ )
+
+#if defined(CONFIG_PARISC) && defined(CONFIG_64BIT)
+#define KSYM_FUNC(name) P%name
+#else
+#define KSYM_FUNC(name) name
+#endif
+
+#define KSYMTAB_FUNC(name, sec, ns) __KSYMTAB(name, KSYM_FUNC(name), sec, ns)
+#define KSYMTAB_DATA(name, sec, ns) __KSYMTAB(name, name, sec, ns)
+
+#define SYMBOL_CRC(sym, crc, sec) \
+ asm(".section \"___kcrctab" sec "+" #sym "\",\"a\"" "\n" \
+ ".balign 4" "\n" \
+ "__crc_" #sym ":" "\n" \
+ ".long " #crc "\n" \
+ ".previous" "\n")
+
+#endif /* __LINUX_EXPORT_INTERNAL_H__ */
diff --git a/include/linux/export.h b/include/linux/export.h
index 1a1dfdb2a5c6..a686fd0ba406 100644
--- a/include/linux/export.h
+++ b/include/linux/export.h
@@ -1,133 +1,96 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
#ifndef _LINUX_EXPORT_H
#define _LINUX_EXPORT_H
+#include <linux/compiler.h>
+#include <linux/linkage.h>
+#include <linux/stringify.h>
+
/*
- * Export symbols from the kernel to modules. Forked from module.h
- * to reduce the amount of pointless cruft we feed to gcc when only
- * exporting a simple symbol or two.
+ * This comment block is used by fixdep. Please do not remove.
*
- * Try not to add #includes here. It slows compilation and makes kernel
- * hackers place grumpy comments in header files.
+ * When CONFIG_MODVERSIONS is changed from n to y, all source files having
+ * EXPORT_SYMBOL variants must be re-compiled because genksyms is run as a
+ * side effect of the *.o build rule.
*/
-/* Some toolchains use a `_' prefix for all user symbols. */
-#ifdef CONFIG_HAVE_UNDERSCORE_SYMBOL_PREFIX
-#define __VMLINUX_SYMBOL(x) _##x
-#define __VMLINUX_SYMBOL_STR(x) "_" #x
-#else
-#define __VMLINUX_SYMBOL(x) x
-#define __VMLINUX_SYMBOL_STR(x) #x
-#endif
-
-/* Indirect, so macros are expanded before pasting. */
-#define VMLINUX_SYMBOL(x) __VMLINUX_SYMBOL(x)
-#define VMLINUX_SYMBOL_STR(x) __VMLINUX_SYMBOL_STR(x)
-
-#ifndef __ASSEMBLY__
-struct kernel_symbol
-{
- unsigned long value;
- const char *name;
-};
-
-#ifdef MODULE
-extern struct module __this_module;
-#define THIS_MODULE (&__this_module)
+#ifdef CONFIG_64BIT
+#define __EXPORT_SYMBOL_REF(sym) \
+ .balign 8 ASM_NL \
+ .quad sym
#else
-#define THIS_MODULE ((struct module *)0)
+#define __EXPORT_SYMBOL_REF(sym) \
+ .balign 4 ASM_NL \
+ .long sym
#endif
-#ifdef CONFIG_MODULES
-
-#if defined(__KERNEL__) && !defined(__GENKSYMS__)
-#ifdef CONFIG_MODVERSIONS
-/* Mark the CRC weak since genksyms apparently decides not to
- * generate a checksums for some symbols */
-#if defined(CONFIG_MODULE_REL_CRCS)
-#define __CRC_SYMBOL(sym, sec) \
- asm(" .section \"___kcrctab" sec "+" #sym "\", \"a\" \n" \
- " .weak " VMLINUX_SYMBOL_STR(__crc_##sym) " \n" \
- " .long " VMLINUX_SYMBOL_STR(__crc_##sym) " - . \n" \
- " .previous \n");
-#else
-#define __CRC_SYMBOL(sym, sec) \
- asm(" .section \"___kcrctab" sec "+" #sym "\", \"a\" \n" \
- " .weak " VMLINUX_SYMBOL_STR(__crc_##sym) " \n" \
- " .long " VMLINUX_SYMBOL_STR(__crc_##sym) " \n" \
- " .previous \n");
-#endif
-#else
-#define __CRC_SYMBOL(sym, sec)
-#endif
-
-/* For every exported symbol, place a struct in the __ksymtab section */
-#define ___EXPORT_SYMBOL(sym, sec) \
- extern typeof(sym) sym; \
- __CRC_SYMBOL(sym, sec) \
- static const char __kstrtab_##sym[] \
- __attribute__((section("__ksymtab_strings"), aligned(1))) \
- = VMLINUX_SYMBOL_STR(sym); \
- static const struct kernel_symbol __ksymtab_##sym \
- __used \
- __attribute__((section("___ksymtab" sec "+" #sym), used)) \
- = { (unsigned long)&sym, __kstrtab_##sym }
+/*
+ * LLVM integrated assembler cam merge adjacent string literals (like
+ * C and GNU-as) passed to '.ascii', but not to '.asciz' and chokes on:
+ *
+ * .asciz "MODULE_" "kvm" ;
+ */
+#define ___EXPORT_SYMBOL(sym, license, ns...) \
+ .section ".export_symbol","a" ASM_NL \
+ __export_symbol_##sym: ASM_NL \
+ .asciz license ASM_NL \
+ .ascii ns "\0" ASM_NL \
+ __EXPORT_SYMBOL_REF(sym) ASM_NL \
+ .previous
-#if defined(__KSYM_DEPS__)
+#if defined(__DISABLE_EXPORTS)
/*
- * For fine grained build dependencies, we want to tell the build system
- * about each possible exported symbol even if they're not actually exported.
- * We use a string pattern that is unlikely to be valid code that the build
- * system filters out from the preprocessor output (see ksym_dep_filter
- * in scripts/Kbuild.include).
+ * Allow symbol exports to be disabled completely so that C code may
+ * be reused in other execution contexts such as the UEFI stub or the
+ * decompressor.
*/
-#define __EXPORT_SYMBOL(sym, sec) === __KSYM_##sym ===
+#define __EXPORT_SYMBOL(sym, license, ns)
+
+#elif defined(__GENKSYMS__)
-#elif defined(CONFIG_TRIM_UNUSED_KSYMS)
+#define __EXPORT_SYMBOL(sym, license, ns) __GENKSYMS_EXPORT_SYMBOL(sym)
-#include <generated/autoksyms.h>
+#elif defined(__ASSEMBLY__)
-#define __EXPORT_SYMBOL(sym, sec) \
- __cond_export_sym(sym, sec, __is_defined(__KSYM_##sym))
-#define __cond_export_sym(sym, sec, conf) \
- ___cond_export_sym(sym, sec, conf)
-#define ___cond_export_sym(sym, sec, enabled) \
- __cond_export_sym_##enabled(sym, sec)
-#define __cond_export_sym_1(sym, sec) ___EXPORT_SYMBOL(sym, sec)
-#define __cond_export_sym_0(sym, sec) /* nothing */
+#define __EXPORT_SYMBOL(sym, license, ns) \
+ ___EXPORT_SYMBOL(sym, license, ns)
#else
-#define __EXPORT_SYMBOL ___EXPORT_SYMBOL
-#endif
-#define EXPORT_SYMBOL(sym) \
- __EXPORT_SYMBOL(sym, "")
+#ifdef CONFIG_GENDWARFKSYMS
+/*
+ * With CONFIG_GENDWARFKSYMS, ensure the compiler emits debugging
+ * information for all exported symbols, including those defined in
+ * different TUs, by adding a __gendwarfksyms_ptr_<symbol> pointer
+ * that's discarded during the final link.
+ */
+#define __GENDWARFKSYMS_EXPORT(sym) \
+ static typeof(sym) *__gendwarfksyms_ptr_##sym __used \
+ __section(".discard.gendwarfksyms") = &sym;
+#else
+#define __GENDWARFKSYMS_EXPORT(sym)
+#endif
-#define EXPORT_SYMBOL_GPL(sym) \
- __EXPORT_SYMBOL(sym, "_gpl")
+#define __EXPORT_SYMBOL(sym, license, ns) \
+ extern typeof(sym) sym; \
+ __ADDRESSABLE(sym) \
+ __GENDWARFKSYMS_EXPORT(sym) \
+ asm(__stringify(___EXPORT_SYMBOL(sym, license, ns)))
-#define EXPORT_SYMBOL_GPL_FUTURE(sym) \
- __EXPORT_SYMBOL(sym, "_gpl_future")
+#endif
-#ifdef CONFIG_UNUSED_SYMBOLS
-#define EXPORT_UNUSED_SYMBOL(sym) __EXPORT_SYMBOL(sym, "_unused")
-#define EXPORT_UNUSED_SYMBOL_GPL(sym) __EXPORT_SYMBOL(sym, "_unused_gpl")
+#ifdef DEFAULT_SYMBOL_NAMESPACE
+#define _EXPORT_SYMBOL(sym, license) __EXPORT_SYMBOL(sym, license, DEFAULT_SYMBOL_NAMESPACE)
#else
-#define EXPORT_UNUSED_SYMBOL(sym)
-#define EXPORT_UNUSED_SYMBOL_GPL(sym)
+#define _EXPORT_SYMBOL(sym, license) __EXPORT_SYMBOL(sym, license, "")
#endif
-#endif /* __GENKSYMS__ */
-
-#else /* !CONFIG_MODULES... */
-
-#define EXPORT_SYMBOL(sym)
-#define EXPORT_SYMBOL_GPL(sym)
-#define EXPORT_SYMBOL_GPL_FUTURE(sym)
-#define EXPORT_UNUSED_SYMBOL(sym)
-#define EXPORT_UNUSED_SYMBOL_GPL(sym)
+#define EXPORT_SYMBOL(sym) _EXPORT_SYMBOL(sym, "")
+#define EXPORT_SYMBOL_GPL(sym) _EXPORT_SYMBOL(sym, "GPL")
+#define EXPORT_SYMBOL_NS(sym, ns) __EXPORT_SYMBOL(sym, "", ns)
+#define EXPORT_SYMBOL_NS_GPL(sym, ns) __EXPORT_SYMBOL(sym, "GPL", ns)
-#endif /* CONFIG_MODULES */
-#endif /* !__ASSEMBLY__ */
+#define EXPORT_SYMBOL_FOR_MODULES(sym, mods) __EXPORT_SYMBOL(sym, "GPL", "module:" mods)
#endif /* _LINUX_EXPORT_H */
diff --git a/include/linux/exportfs.h b/include/linux/exportfs.h
index 5ab958cdc50b..f0cf2714ec52 100644
--- a/include/linux/exportfs.h
+++ b/include/linux/exportfs.h
@@ -1,7 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef LINUX_EXPORTFS_H
#define LINUX_EXPORTFS_H 1
#include <linux/types.h>
+#include <linux/path.h>
struct dentry;
struct iattr;
@@ -98,12 +100,40 @@ enum fid_type {
FILEID_FAT_WITH_PARENT = 0x72,
/*
+ * 64 bit inode number, 32 bit generation number.
+ */
+ FILEID_INO64_GEN = 0x81,
+
+ /*
+ * 64 bit inode number, 32 bit generation number,
+ * 64 bit parent inode number, 32 bit parent generation.
+ */
+ FILEID_INO64_GEN_PARENT = 0x82,
+
+ /*
* 128 bit child FID (struct lu_fid)
* 128 bit parent FID (struct lu_fid)
*/
FILEID_LUSTRE = 0x97,
/*
+ * 64 bit inode number, 32 bit subvolume, 32 bit generation number:
+ */
+ FILEID_BCACHEFS_WITHOUT_PARENT = 0xb1,
+ FILEID_BCACHEFS_WITH_PARENT = 0xb2,
+
+ /*
+ *
+ * 64 bit namespace identifier, 32 bit namespace type, 32 bit inode number.
+ */
+ FILEID_NSFS = 0xf1,
+
+ /*
+ * 64 bit unique kernfs id
+ */
+ FILEID_KERNFS = 0xfe,
+
+ /*
* Filesystems must not use 0xff file ID.
*/
FILEID_INVALID = 0xff,
@@ -117,7 +147,11 @@ struct fid {
u32 parent_ino;
u32 parent_gen;
} i32;
- struct {
+ struct {
+ u64 ino;
+ u32 gen;
+ } __packed i64;
+ struct {
u32 block;
u16 partref;
u16 parent_partref;
@@ -125,10 +159,38 @@ struct fid {
u32 parent_block;
u32 parent_generation;
} udf;
- __u32 raw[0];
+ DECLARE_FLEX_ARRAY(__u32, raw);
};
};
+enum handle_to_path_flags {
+ HANDLE_CHECK_PERMS = (1 << 0),
+ HANDLE_CHECK_SUBTREE = (1 << 1),
+};
+
+struct handle_to_path_ctx {
+ struct path root;
+ enum handle_to_path_flags flags;
+ unsigned int fh_flags;
+};
+
+#define EXPORT_FH_CONNECTABLE 0x1 /* Encode file handle with parent */
+#define EXPORT_FH_FID 0x2 /* File handle may be non-decodeable */
+#define EXPORT_FH_DIR_ONLY 0x4 /* Only decode file handle for a directory */
+
+/*
+ * Filesystems use only lower 8 bits of file_handle type for fid_type.
+ * name_to_handle_at() uses upper 16 bits of type as user flags to be
+ * interpreted by open_by_handle_at().
+ */
+#define FILEID_USER_FLAGS_MASK 0xffff0000
+#define FILEID_USER_FLAGS(type) ((type) & FILEID_USER_FLAGS_MASK)
+
+/* Flags supported in encoded handle_type that is exported to user */
+#define FILEID_IS_CONNECTABLE 0x10000
+#define FILEID_IS_DIR 0x20000
+#define FILEID_VALID_USER_FLAGS (FILEID_IS_CONNECTABLE | FILEID_IS_DIR)
+
/**
* struct export_operations - for nfsd to communicate with file systems
* @encode_fh: encode a file handle fragment from a dentry
@@ -138,13 +200,13 @@ struct fid {
* @get_parent: find the parent of a given directory
* @commit_metadata: commit metadata changes to stable storage
*
- * See Documentation/filesystems/nfs/Exporting for details on how to use
+ * See Documentation/filesystems/nfs/exporting.rst for details on how to use
* this interface correctly.
*
* encode_fh:
* @encode_fh should store in the file handle fragment @fh (using at most
* @max_len bytes) information that can be used by @decode_fh to recover the
- * file referred to by the &struct dentry @de. If the @connectable flag is
+ * file referred to by the &struct dentry @de. If @flag has CONNECTABLE bit
* set, the encode_fh() should store sufficient information so that a good
* attempt can be made to find not only the file but also it's place in the
* filesystem. This typically means storing a reference to de->d_parent in
@@ -172,20 +234,26 @@ struct fid {
* get_name:
* @get_name should find a name for the given @child in the given @parent
* directory. The name should be stored in the @name (with the
- * understanding that it is already pointing to a a %NAME_MAX+1 sized
+ * understanding that it is already pointing to a %NAME_MAX+1 sized
* buffer. get_name() should return %0 on success, a negative error code
- * or error. @get_name will be called without @parent->i_mutex held.
+ * or error. @get_name will be called without @parent->i_rwsem held.
*
* get_parent:
* @get_parent should find the parent directory for the given @child which
* is also a directory. In the event that it cannot be found, or storage
* space cannot be allocated, a %ERR_PTR should be returned.
*
+ * permission:
+ * Allow filesystems to specify a custom permission function.
+ *
+ * open:
+ * Allow filesystems to specify a custom open function.
+ *
* commit_metadata:
* @commit_metadata should commit metadata changes to stable storage.
*
* Locking rules:
- * get_parent is called with child->d_inode->i_mutex down
+ * get_parent is called with child->d_inode->i_rwsem down
* get_name is not (which is possibly inconsistent)
*/
@@ -207,12 +275,89 @@ struct export_operations {
bool write, u32 *device_generation);
int (*commit_blocks)(struct inode *inode, struct iomap *iomaps,
int nr_iomaps, struct iattr *iattr);
+ int (*permission)(struct handle_to_path_ctx *ctx, unsigned int oflags);
+ struct file * (*open)(const struct path *path, unsigned int oflags);
+#define EXPORT_OP_NOWCC (0x1) /* don't collect v3 wcc data */
+#define EXPORT_OP_NOSUBTREECHK (0x2) /* no subtree checking */
+#define EXPORT_OP_CLOSE_BEFORE_UNLINK (0x4) /* close files before unlink */
+#define EXPORT_OP_REMOTE_FS (0x8) /* Filesystem is remote */
+#define EXPORT_OP_NOATOMIC_ATTR (0x10) /* Filesystem cannot supply
+ atomic attribute updates
+ */
+#define EXPORT_OP_FLUSH_ON_CLOSE (0x20) /* fs flushes file data on close */
+#define EXPORT_OP_NOLOCKS (0x40) /* no file locking support */
+ unsigned long flags;
};
+/**
+ * exportfs_cannot_lock() - check if export implements file locking
+ * @export_ops: the nfs export operations to check
+ *
+ * Returns true if the export does not support file locking.
+ */
+static inline bool
+exportfs_cannot_lock(const struct export_operations *export_ops)
+{
+ return export_ops->flags & EXPORT_OP_NOLOCKS;
+}
+
extern int exportfs_encode_inode_fh(struct inode *inode, struct fid *fid,
- int *max_len, struct inode *parent);
+ int *max_len, struct inode *parent,
+ int flags);
extern int exportfs_encode_fh(struct dentry *dentry, struct fid *fid,
- int *max_len, int connectable);
+ int *max_len, int flags);
+
+static inline bool exportfs_can_encode_fid(const struct export_operations *nop)
+{
+ return !nop || nop->encode_fh;
+}
+
+static inline bool exportfs_can_decode_fh(const struct export_operations *nop)
+{
+ return nop && nop->fh_to_dentry;
+}
+
+static inline bool exportfs_can_encode_fh(const struct export_operations *nop,
+ int fh_flags)
+{
+ /*
+ * If a non-decodeable file handle was requested, we only need to make
+ * sure that filesystem did not opt-out of encoding fid.
+ */
+ if (fh_flags & EXPORT_FH_FID)
+ return exportfs_can_encode_fid(nop);
+
+ /* Normal file handles cannot be created without export ops */
+ if (!nop)
+ return false;
+
+ /*
+ * If a connectable file handle was requested, we need to make sure that
+ * filesystem can also decode connected file handles.
+ */
+ if ((fh_flags & EXPORT_FH_CONNECTABLE) && !nop->fh_to_parent)
+ return false;
+
+ /*
+ * If a decodeable file handle was requested, we need to make sure that
+ * filesystem can also decode file handles.
+ */
+ return exportfs_can_decode_fh(nop);
+}
+
+static inline int exportfs_encode_fid(struct inode *inode, struct fid *fid,
+ int *max_len)
+{
+ return exportfs_encode_inode_fh(inode, fid, max_len, NULL,
+ EXPORT_FH_FID);
+}
+
+extern struct dentry *exportfs_decode_fh_raw(struct vfsmount *mnt,
+ struct fid *fid, int fh_len,
+ int fileid_type,
+ unsigned int flags,
+ int (*acceptable)(void *, struct dentry *),
+ void *context);
extern struct dentry *exportfs_decode_fh(struct vfsmount *mnt, struct fid *fid,
int fh_len, int fileid_type, int (*acceptable)(void *, struct dentry *),
void *context);
@@ -220,10 +365,12 @@ extern struct dentry *exportfs_decode_fh(struct vfsmount *mnt, struct fid *fid,
/*
* Generic helpers for filesystems.
*/
-extern struct dentry *generic_fh_to_dentry(struct super_block *sb,
+int generic_encode_ino32_fh(struct inode *inode, __u32 *fh, int *max_len,
+ struct inode *parent);
+struct dentry *generic_fh_to_dentry(struct super_block *sb,
struct fid *fid, int fh_len, int fh_type,
struct inode *(*get_inode) (struct super_block *sb, u64 ino, u32 gen));
-extern struct dentry *generic_fh_to_parent(struct super_block *sb,
+struct dentry *generic_fh_to_parent(struct super_block *sb,
struct fid *fid, int fh_len, int fh_type,
struct inode *(*get_inode) (struct super_block *sb, u64 ino, u32 gen));
diff --git a/include/linux/ext2_fs.h b/include/linux/ext2_fs.h
index 2723e715f67a..1fef88569037 100644
--- a/include/linux/ext2_fs.h
+++ b/include/linux/ext2_fs.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* linux/include/linux/ext2_fs.h
*
diff --git a/include/linux/extable.h b/include/linux/extable.h
index 28addad0dda7..4ab9e78f313b 100644
--- a/include/linux/extable.h
+++ b/include/linux/extable.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_EXTABLE_H
#define _LINUX_EXTABLE_H
@@ -18,6 +19,8 @@ void trim_init_extable(struct module *m);
/* Given an address, look for it in the exception tables */
const struct exception_table_entry *search_exception_tables(unsigned long add);
+const struct exception_table_entry *
+search_kernel_exception_table(unsigned long addr);
#ifdef CONFIG_MODULES
/* For extable.c to search modules' exception tables. */
@@ -30,4 +33,14 @@ search_module_extables(unsigned long addr)
}
#endif /*CONFIG_MODULES*/
+#ifdef CONFIG_BPF_JIT
+const struct exception_table_entry *search_bpf_extables(unsigned long addr);
+#else
+static inline const struct exception_table_entry *
+search_bpf_extables(unsigned long addr)
+{
+ return NULL;
+}
+#endif
+
#endif /* _LINUX_EXTABLE_H */
diff --git a/include/linux/extcon-provider.h b/include/linux/extcon-provider.h
new file mode 100644
index 000000000000..fa70945f4e6b
--- /dev/null
+++ b/include/linux/extcon-provider.h
@@ -0,0 +1,134 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * External Connector (extcon) framework
+ * - linux/include/linux/extcon-provider.h for extcon provider device driver.
+ *
+ * Copyright (C) 2017 Samsung Electronics
+ * Author: Chanwoo Choi <cw00.choi@samsung.com>
+ */
+
+#ifndef __LINUX_EXTCON_PROVIDER_H__
+#define __LINUX_EXTCON_PROVIDER_H__
+
+#include <linux/extcon.h>
+
+struct extcon_dev;
+
+#if IS_ENABLED(CONFIG_EXTCON)
+
+/* Following APIs register/unregister the extcon device. */
+int extcon_dev_register(struct extcon_dev *edev);
+void extcon_dev_unregister(struct extcon_dev *edev);
+int devm_extcon_dev_register(struct device *dev,
+ struct extcon_dev *edev);
+void devm_extcon_dev_unregister(struct device *dev,
+ struct extcon_dev *edev);
+
+/* Following APIs allocate/free the memory of the extcon device. */
+struct extcon_dev *extcon_dev_allocate(const unsigned int *cable);
+void extcon_dev_free(struct extcon_dev *edev);
+struct extcon_dev *devm_extcon_dev_allocate(struct device *dev,
+ const unsigned int *cable);
+void devm_extcon_dev_free(struct device *dev, struct extcon_dev *edev);
+
+/* Synchronize the state and property value for each external connector. */
+int extcon_sync(struct extcon_dev *edev, unsigned int id);
+
+/*
+ * Following APIs set the connected state of each external connector.
+ * The 'id' argument indicates the defined external connector.
+ */
+int extcon_set_state(struct extcon_dev *edev, unsigned int id,
+ bool state);
+int extcon_set_state_sync(struct extcon_dev *edev, unsigned int id,
+ bool state);
+
+/*
+ * Following APIs set the property of each external connector.
+ * The 'id' argument indicates the defined external connector
+ * and the 'prop' indicates the extcon property.
+ *
+ * And extcon_set_property_capability() set the capability of the property
+ * for each external connector. They are used to set the capability of the
+ * property of each external connector based on the id and property.
+ */
+int extcon_set_property(struct extcon_dev *edev, unsigned int id,
+ unsigned int prop,
+ union extcon_property_value prop_val);
+int extcon_set_property_sync(struct extcon_dev *edev, unsigned int id,
+ unsigned int prop,
+ union extcon_property_value prop_val);
+int extcon_set_property_capability(struct extcon_dev *edev,
+ unsigned int id, unsigned int prop);
+
+#else /* CONFIG_EXTCON */
+static inline int extcon_dev_register(struct extcon_dev *edev)
+{
+ return 0;
+}
+
+static inline void extcon_dev_unregister(struct extcon_dev *edev) { }
+
+static inline int devm_extcon_dev_register(struct device *dev,
+ struct extcon_dev *edev)
+{
+ return -EINVAL;
+}
+
+static inline void devm_extcon_dev_unregister(struct device *dev,
+ struct extcon_dev *edev) { }
+
+static inline struct extcon_dev *extcon_dev_allocate(const unsigned int *cable)
+{
+ return ERR_PTR(-ENOSYS);
+}
+
+static inline void extcon_dev_free(struct extcon_dev *edev) { }
+
+static inline struct extcon_dev *devm_extcon_dev_allocate(struct device *dev,
+ const unsigned int *cable)
+{
+ return ERR_PTR(-ENOSYS);
+}
+
+static inline void devm_extcon_dev_free(struct extcon_dev *edev) { }
+
+
+static inline int extcon_set_state(struct extcon_dev *edev, unsigned int id,
+ bool state)
+{
+ return 0;
+}
+
+static inline int extcon_set_state_sync(struct extcon_dev *edev, unsigned int id,
+ bool state)
+{
+ return 0;
+}
+
+static inline int extcon_sync(struct extcon_dev *edev, unsigned int id)
+{
+ return 0;
+}
+
+static inline int extcon_set_property(struct extcon_dev *edev, unsigned int id,
+ unsigned int prop,
+ union extcon_property_value prop_val)
+{
+ return 0;
+}
+
+static inline int extcon_set_property_sync(struct extcon_dev *edev,
+ unsigned int id, unsigned int prop,
+ union extcon_property_value prop_val)
+{
+ return 0;
+}
+
+static inline int extcon_set_property_capability(struct extcon_dev *edev,
+ unsigned int id, unsigned int prop)
+{
+ return 0;
+}
+#endif /* CONFIG_EXTCON */
+#endif /* __LINUX_EXTCON_PROVIDER_H__ */
diff --git a/include/linux/extcon.h b/include/linux/extcon.h
index 744d60ca80c3..e596a0abcb27 100644
--- a/include/linux/extcon.h
+++ b/include/linux/extcon.h
@@ -1,5 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* External Connector (extcon) framework
+ * - linux/include/linux/extcon.h for extcon consumer device driver.
*
* Copyright (C) 2015 Samsung Electronics
* Author: Chanwoo Choi <cw00.choi@samsung.com>
@@ -11,15 +13,6 @@
* based on switch class driver
* Copyright (C) 2008 Google, Inc.
* Author: Mike Lockwood <lockwood@android.com>
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#ifndef __LINUX_EXTCON_H__
@@ -83,6 +76,8 @@
#define EXTCON_DISP_VGA 43 /* Video Graphics Array */
#define EXTCON_DISP_DP 44 /* Display Port */
#define EXTCON_DISP_HMD 45 /* Head-Mounted Display */
+#define EXTCON_DISP_CVBS 46 /* Composite Video Broadcast Signal */
+#define EXTCON_DISP_EDP 47 /* Embedded Display Port */
/* Miscellaneous external connector */
#define EXTCON_DOCK 60
@@ -170,60 +165,28 @@ union extcon_property_value {
int intval; /* type : integer (intval) */
};
-struct extcon_cable;
struct extcon_dev;
#if IS_ENABLED(CONFIG_EXTCON)
-
-/* Following APIs register/unregister the extcon device. */
-extern int extcon_dev_register(struct extcon_dev *edev);
-extern void extcon_dev_unregister(struct extcon_dev *edev);
-extern int devm_extcon_dev_register(struct device *dev,
- struct extcon_dev *edev);
-extern void devm_extcon_dev_unregister(struct device *dev,
- struct extcon_dev *edev);
-
-/* Following APIs allocate/free the memory of the extcon device. */
-extern struct extcon_dev *extcon_dev_allocate(const unsigned int *cable);
-extern void extcon_dev_free(struct extcon_dev *edev);
-extern struct extcon_dev *devm_extcon_dev_allocate(struct device *dev,
- const unsigned int *cable);
-extern void devm_extcon_dev_free(struct device *dev, struct extcon_dev *edev);
-
-/* Synchronize the state and property value for each external connector. */
-extern int extcon_sync(struct extcon_dev *edev, unsigned int id);
-
/*
- * Following APIs get/set the connected state of each external connector.
+ * Following APIs get the connected state of each external connector.
* The 'id' argument indicates the defined external connector.
*/
-extern int extcon_get_state(struct extcon_dev *edev, unsigned int id);
-extern int extcon_set_state(struct extcon_dev *edev, unsigned int id,
- bool state);
-extern int extcon_set_state_sync(struct extcon_dev *edev, unsigned int id,
- bool state);
+int extcon_get_state(struct extcon_dev *edev, unsigned int id);
/*
- * Following APIs get/set the property of each external connector.
+ * Following APIs get the property of each external connector.
* The 'id' argument indicates the defined external connector
* and the 'prop' indicates the extcon property.
*
- * And extcon_get/set_property_capability() set the capability of the property
- * for each external connector. They are used to set the capability of the
+ * And extcon_get_property_capability() get the capability of the property
+ * for each external connector. They are used to get the capability of the
* property of each external connector based on the id and property.
*/
-extern int extcon_get_property(struct extcon_dev *edev, unsigned int id,
+int extcon_get_property(struct extcon_dev *edev, unsigned int id,
unsigned int prop,
union extcon_property_value *prop_val);
-extern int extcon_set_property(struct extcon_dev *edev, unsigned int id,
- unsigned int prop,
- union extcon_property_value prop_val);
-extern int extcon_set_property_sync(struct extcon_dev *edev, unsigned int id,
- unsigned int prop,
- union extcon_property_value prop_val);
-extern int extcon_get_property_capability(struct extcon_dev *edev,
- unsigned int id, unsigned int prop);
-extern int extcon_set_property_capability(struct extcon_dev *edev,
+int extcon_get_property_capability(struct extcon_dev *edev,
unsigned int id, unsigned int prop);
/*
@@ -235,112 +198,51 @@ extern int extcon_set_property_capability(struct extcon_dev *edev,
* extcon_register_notifier_all(*edev, *nb) : Register a notifier block
* for all supported external connectors of the extcon.
*/
-extern int extcon_register_notifier(struct extcon_dev *edev, unsigned int id,
+int extcon_register_notifier(struct extcon_dev *edev, unsigned int id,
struct notifier_block *nb);
-extern int extcon_unregister_notifier(struct extcon_dev *edev, unsigned int id,
+int extcon_unregister_notifier(struct extcon_dev *edev, unsigned int id,
struct notifier_block *nb);
-extern int devm_extcon_register_notifier(struct device *dev,
+int devm_extcon_register_notifier(struct device *dev,
struct extcon_dev *edev, unsigned int id,
struct notifier_block *nb);
-extern void devm_extcon_unregister_notifier(struct device *dev,
+void devm_extcon_unregister_notifier(struct device *dev,
struct extcon_dev *edev, unsigned int id,
struct notifier_block *nb);
-extern int extcon_register_notifier_all(struct extcon_dev *edev,
+int extcon_register_notifier_all(struct extcon_dev *edev,
struct notifier_block *nb);
-extern int extcon_unregister_notifier_all(struct extcon_dev *edev,
+int extcon_unregister_notifier_all(struct extcon_dev *edev,
struct notifier_block *nb);
-extern int devm_extcon_register_notifier_all(struct device *dev,
+int devm_extcon_register_notifier_all(struct device *dev,
struct extcon_dev *edev,
struct notifier_block *nb);
-extern void devm_extcon_unregister_notifier_all(struct device *dev,
+void devm_extcon_unregister_notifier_all(struct device *dev,
struct extcon_dev *edev,
struct notifier_block *nb);
/*
* Following APIs get the extcon_dev from devicetree or by through extcon name.
*/
-extern struct extcon_dev *extcon_get_extcon_dev(const char *extcon_name);
-extern struct extcon_dev *extcon_get_edev_by_phandle(struct device *dev,
+struct extcon_dev *extcon_get_extcon_dev(const char *extcon_name);
+struct extcon_dev *extcon_find_edev_by_node(struct device_node *node);
+struct extcon_dev *extcon_get_edev_by_phandle(struct device *dev,
int index);
/* Following API get the name of extcon device. */
-extern const char *extcon_get_edev_name(struct extcon_dev *edev);
+const char *extcon_get_edev_name(struct extcon_dev *edev);
#else /* CONFIG_EXTCON */
-static inline int extcon_dev_register(struct extcon_dev *edev)
-{
- return 0;
-}
-
-static inline void extcon_dev_unregister(struct extcon_dev *edev) { }
-
-static inline int devm_extcon_dev_register(struct device *dev,
- struct extcon_dev *edev)
-{
- return -EINVAL;
-}
-
-static inline void devm_extcon_dev_unregister(struct device *dev,
- struct extcon_dev *edev) { }
-
-static inline struct extcon_dev *extcon_dev_allocate(const unsigned int *cable)
-{
- return ERR_PTR(-ENOSYS);
-}
-
-static inline void extcon_dev_free(struct extcon_dev *edev) { }
-
-static inline struct extcon_dev *devm_extcon_dev_allocate(struct device *dev,
- const unsigned int *cable)
-{
- return ERR_PTR(-ENOSYS);
-}
-
-static inline void devm_extcon_dev_free(struct extcon_dev *edev) { }
-
-
static inline int extcon_get_state(struct extcon_dev *edev, unsigned int id)
{
return 0;
}
-static inline int extcon_set_state(struct extcon_dev *edev, unsigned int id,
- bool state)
-{
- return 0;
-}
-
-static inline int extcon_set_state_sync(struct extcon_dev *edev, unsigned int id,
- bool state)
-{
- return 0;
-}
-
-static inline int extcon_sync(struct extcon_dev *edev, unsigned int id)
-{
- return 0;
-}
-
static inline int extcon_get_property(struct extcon_dev *edev, unsigned int id,
unsigned int prop,
union extcon_property_value *prop_val)
{
return 0;
}
-static inline int extcon_set_property(struct extcon_dev *edev, unsigned int id,
- unsigned int prop,
- union extcon_property_value prop_val)
-{
- return 0;
-}
-
-static inline int extcon_set_property_sync(struct extcon_dev *edev,
- unsigned int id, unsigned int prop,
- union extcon_property_value prop_val)
-{
- return 0;
-}
static inline int extcon_get_property_capability(struct extcon_dev *edev,
unsigned int id, unsigned int prop)
@@ -348,12 +250,6 @@ static inline int extcon_get_property_capability(struct extcon_dev *edev,
return 0;
}
-static inline int extcon_set_property_capability(struct extcon_dev *edev,
- unsigned int id, unsigned int prop)
-{
- return 0;
-}
-
static inline int extcon_register_notifier(struct extcon_dev *edev,
unsigned int id, struct notifier_block *nb)
{
@@ -377,8 +273,36 @@ static inline void devm_extcon_unregister_notifier(struct device *dev,
struct extcon_dev *edev, unsigned int id,
struct notifier_block *nb) { }
+static inline int extcon_register_notifier_all(struct extcon_dev *edev,
+ struct notifier_block *nb)
+{
+ return 0;
+}
+
+static inline int extcon_unregister_notifier_all(struct extcon_dev *edev,
+ struct notifier_block *nb)
+{
+ return 0;
+}
+
+static inline int devm_extcon_register_notifier_all(struct device *dev,
+ struct extcon_dev *edev,
+ struct notifier_block *nb)
+{
+ return 0;
+}
+
+static inline void devm_extcon_unregister_notifier_all(struct device *dev,
+ struct extcon_dev *edev,
+ struct notifier_block *nb) { }
+
static inline struct extcon_dev *extcon_get_extcon_dev(const char *extcon_name)
{
+ return NULL;
+}
+
+static inline struct extcon_dev *extcon_find_edev_by_node(struct device_node *node)
+{
return ERR_PTR(-ENODEV);
}
@@ -387,6 +311,11 @@ static inline struct extcon_dev *extcon_get_edev_by_phandle(struct device *dev,
{
return ERR_PTR(-ENODEV);
}
+
+static inline const char *extcon_get_edev_name(struct extcon_dev *edev)
+{
+ return NULL;
+}
#endif /* CONFIG_EXTCON */
/*
@@ -399,16 +328,4 @@ struct extcon_specific_cable_nb {
struct extcon_dev *edev;
unsigned long previous_value;
};
-
-static inline int extcon_register_interest(struct extcon_specific_cable_nb *obj,
- const char *extcon_name, const char *cable_name,
- struct notifier_block *nb)
-{
- return -EINVAL;
-}
-
-static inline int extcon_unregister_interest(struct extcon_specific_cable_nb *obj)
-{
- return -EINVAL;
-}
#endif /* __LINUX_EXTCON_H__ */
diff --git a/include/linux/extcon/extcon-adc-jack.h b/include/linux/extcon/extcon-adc-jack.h
index 2aa32075bca1..19b437e9c080 100644
--- a/include/linux/extcon/extcon-adc-jack.h
+++ b/include/linux/extcon/extcon-adc-jack.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* include/linux/extcon/extcon-adc-jack.h
*
@@ -5,11 +6,6 @@
*
* Copyright (C) 2012 Samsung Electronics
* MyungJoo Ham <myungjoo.ham@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
*/
#ifndef _EXTCON_ADC_JACK_H_
diff --git a/include/linux/extcon/extcon-gpio.h b/include/linux/extcon/extcon-gpio.h
deleted file mode 100644
index 7cacafb78b09..000000000000
--- a/include/linux/extcon/extcon-gpio.h
+++ /dev/null
@@ -1,47 +0,0 @@
-/*
- * Single-state GPIO extcon driver based on extcon class
- *
- * Copyright (C) 2012 Samsung Electronics
- * Author: MyungJoo Ham <myungjoo.ham@samsung.com>
- *
- * based on switch class driver
- * Copyright (C) 2008 Google, Inc.
- * Author: Mike Lockwood <lockwood@android.com>
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-#ifndef __EXTCON_GPIO_H__
-#define __EXTCON_GPIO_H__ __FILE__
-
-#include <linux/extcon.h>
-
-/**
- * struct gpio_extcon_pdata - A simple GPIO-controlled extcon device.
- * @extcon_id: The unique id of specific external connector.
- * @gpio: Corresponding GPIO.
- * @gpio_active_low: Boolean describing whether gpio active state is 1 or 0
- * If true, low state of gpio means active.
- * If false, high state of gpio means active.
- * @debounce: Debounce time for GPIO IRQ in ms.
- * @irq_flags: IRQ Flags (e.g., IRQF_TRIGGER_LOW).
- * @check_on_resume: Boolean describing whether to check the state of gpio
- * while resuming from sleep.
- */
-struct gpio_extcon_pdata {
- unsigned int extcon_id;
- unsigned gpio;
- bool gpio_active_low;
- unsigned long debounce;
- unsigned long irq_flags;
-
- bool check_on_resume;
-};
-
-#endif /* __EXTCON_GPIO_H__ */
diff --git a/include/linux/f2fs_fs.h b/include/linux/f2fs_fs.h
index b6feed6547ce..a7880787cad3 100644
--- a/include/linux/f2fs_fs.h
+++ b/include/linux/f2fs_fs.h
@@ -1,12 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/**
* include/linux/f2fs_fs.h
*
* Copyright (c) 2012 Samsung Electronics Co., Ltd.
* http://www.samsung.com/
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef _LINUX_F2FS_FS_H
#define _LINUX_F2FS_FS_H
@@ -16,18 +13,23 @@
#define F2FS_SUPER_OFFSET 1024 /* byte-size offset */
#define F2FS_MIN_LOG_SECTOR_SIZE 9 /* 9 bits for 512 bytes */
-#define F2FS_MAX_LOG_SECTOR_SIZE 12 /* 12 bits for 4096 bytes */
-#define F2FS_LOG_SECTORS_PER_BLOCK 3 /* log number for sector/blk */
-#define F2FS_BLKSIZE 4096 /* support only 4KB block */
-#define F2FS_BLKSIZE_BITS 12 /* bits for F2FS_BLKSIZE */
+#define F2FS_MAX_LOG_SECTOR_SIZE PAGE_SHIFT /* Max is Block Size */
+#define F2FS_LOG_SECTORS_PER_BLOCK (PAGE_SHIFT - 9) /* log number for sector/blk */
+#define F2FS_BLKSIZE PAGE_SIZE /* support only block == page */
+#define F2FS_BLKSIZE_BITS PAGE_SHIFT /* bits for F2FS_BLKSIZE */
+#define F2FS_SUM_BLKSIZE 4096 /* only support 4096 byte sum block */
#define F2FS_MAX_EXTENSION 64 /* # of extension entries */
-#define F2FS_BLK_ALIGN(x) (((x) + F2FS_BLKSIZE - 1) >> F2FS_BLKSIZE_BITS)
+#define F2FS_EXTENSION_LEN 8 /* max size of extension */
#define NULL_ADDR ((block_t)0) /* used as block_t addresses */
#define NEW_ADDR ((block_t)-1) /* used as block_t addresses */
+#define COMPRESS_ADDR ((block_t)-2) /* used as compressed data flag */
-#define F2FS_BYTES_TO_BLK(bytes) ((bytes) >> F2FS_BLKSIZE_BITS)
-#define F2FS_BLK_TO_BYTES(blk) ((blk) << F2FS_BLKSIZE_BITS)
+#define F2FS_BLKSIZE_MASK (F2FS_BLKSIZE - 1)
+#define F2FS_BYTES_TO_BLK(bytes) ((unsigned long long)(bytes) >> F2FS_BLKSIZE_BITS)
+#define F2FS_BLK_TO_BYTES(blk) ((unsigned long long)(blk) << F2FS_BLKSIZE_BITS)
+#define F2FS_BLK_END_BYTES(blk) (F2FS_BLK_TO_BYTES(blk + 1) - 1)
+#define F2FS_BLK_ALIGN(x) (F2FS_BYTES_TO_BLK((x) + F2FS_BLKSIZE - 1))
/* 0, 1(node nid), 2(meta nid) are reserved node id */
#define F2FS_RESERVED_NODE_NUM 3
@@ -35,16 +37,14 @@
#define F2FS_ROOT_INO(sbi) ((sbi)->root_ino_num)
#define F2FS_NODE_INO(sbi) ((sbi)->node_ino_num)
#define F2FS_META_INO(sbi) ((sbi)->meta_ino_num)
+#define F2FS_COMPRESS_INO(sbi) (NM_I(sbi)->max_nid)
+
+#define F2FS_MAX_QUOTAS 3
-#define F2FS_IO_SIZE(sbi) (1 << (sbi)->write_io_size_bits) /* Blocks */
-#define F2FS_IO_SIZE_KB(sbi) (1 << ((sbi)->write_io_size_bits + 2)) /* KB */
-#define F2FS_IO_SIZE_BYTES(sbi) (1 << ((sbi)->write_io_size_bits + 12)) /* B */
-#define F2FS_IO_SIZE_BITS(sbi) ((sbi)->write_io_size_bits) /* power of 2 */
-#define F2FS_IO_SIZE_MASK(sbi) (F2FS_IO_SIZE(sbi) - 1)
+#define F2FS_ENC_UTF8_12_1 1
/* This flag is used by node and meta inodes, and by recovery */
#define GFP_F2FS_ZERO (GFP_NOFS | __GFP_ZERO)
-#define GFP_F2FS_HIGH_ZERO (GFP_NOFS | __GFP_ZERO | __GFP_HIGHMEM)
/*
* For further optimization on multi-head logs, on-disk layout supports maximum
@@ -69,6 +69,47 @@ struct f2fs_device {
__le32 total_segments;
} __packed;
+/* reason of stop_checkpoint */
+enum stop_cp_reason {
+ STOP_CP_REASON_SHUTDOWN,
+ STOP_CP_REASON_FAULT_INJECT,
+ STOP_CP_REASON_META_PAGE,
+ STOP_CP_REASON_WRITE_FAIL,
+ STOP_CP_REASON_CORRUPTED_SUMMARY,
+ STOP_CP_REASON_UPDATE_INODE,
+ STOP_CP_REASON_FLUSH_FAIL,
+ STOP_CP_REASON_NO_SEGMENT,
+ STOP_CP_REASON_CORRUPTED_FREE_BITMAP,
+ STOP_CP_REASON_CORRUPTED_NID,
+ STOP_CP_REASON_MAX,
+};
+
+#define MAX_STOP_REASON 32
+
+/* detail reason for EFSCORRUPTED */
+enum f2fs_error {
+ ERROR_CORRUPTED_CLUSTER,
+ ERROR_FAIL_DECOMPRESSION,
+ ERROR_INVALID_BLKADDR,
+ ERROR_CORRUPTED_DIRENT,
+ ERROR_CORRUPTED_INODE,
+ ERROR_INCONSISTENT_SUMMARY,
+ ERROR_INCONSISTENT_FOOTER,
+ ERROR_INCONSISTENT_SUM_TYPE,
+ ERROR_CORRUPTED_JOURNAL,
+ ERROR_INCONSISTENT_NODE_COUNT,
+ ERROR_INCONSISTENT_BLOCK_COUNT,
+ ERROR_INVALID_CURSEG,
+ ERROR_INCONSISTENT_SIT,
+ ERROR_CORRUPTED_VERITY_XATTR,
+ ERROR_CORRUPTED_XATTR,
+ ERROR_INVALID_NODE_REFERENCE,
+ ERROR_INCONSISTENT_NAT,
+ ERROR_MAX,
+};
+
+#define MAX_F2FS_ERRORS 16
+
struct f2fs_super_block {
__le32 magic; /* Magic Number */
__le16 major_ver; /* Major Version */
@@ -100,7 +141,7 @@ struct f2fs_super_block {
__u8 uuid[16]; /* 128-bit uuid for volume */
__le16 volume_name[MAX_VOLUME_NAME]; /* volume name */
__le32 extension_count; /* # of extensions below */
- __u8 extension_list[F2FS_MAX_EXTENSION][8]; /* extension array */
+ __u8 extension_list[F2FS_MAX_EXTENSION][F2FS_EXTENSION_LEN];/* extension array */
__le32 cp_payload;
__u8 version[VERSION_LEN]; /* the kernel version */
__u8 init_version[VERSION_LEN]; /* the initial kernel version */
@@ -108,12 +149,25 @@ struct f2fs_super_block {
__u8 encryption_level; /* versioning level for encryption */
__u8 encrypt_pw_salt[16]; /* Salt used for string2key algorithm */
struct f2fs_device devs[MAX_DEVICES]; /* device list */
- __u8 reserved[327]; /* valid reserved region */
+ __le32 qf_ino[F2FS_MAX_QUOTAS]; /* quota inode numbers */
+ __u8 hot_ext_count; /* # of hot file extension */
+ __le16 s_encoding; /* Filename charset encoding */
+ __le16 s_encoding_flags; /* Filename charset encoding flags */
+ __u8 s_stop_reason[MAX_STOP_REASON]; /* stop checkpoint reason */
+ __u8 s_errors[MAX_F2FS_ERRORS]; /* reason of image corrupts */
+ __u8 reserved[258]; /* valid reserved region */
+ __le32 crc; /* checksum of superblock */
} __packed;
/*
* For checkpoint
*/
+#define CP_RESIZEFS_FLAG 0x00004000
+#define CP_DISABLED_QUICK_FLAG 0x00002000
+#define CP_DISABLED_FLAG 0x00001000
+#define CP_QUOTA_NEED_FSCK_FLAG 0x00000800
+#define CP_LARGE_NAT_BITMAP_FLAG 0x00000400
+#define CP_NOCRC_RECOVERY_FLAG 0x00000200
#define CP_TRIMMED_FLAG 0x00000100
#define CP_NAT_BITS_FLAG 0x00000080
#define CP_CRC_RECOVERY_FLAG 0x00000040
@@ -154,13 +208,17 @@ struct f2fs_checkpoint {
unsigned char alloc_type[MAX_ACTIVE_LOGS];
/* SIT and NAT version bitmap */
- unsigned char sit_nat_version_bitmap[1];
+ unsigned char sit_nat_version_bitmap[];
} __packed;
+#define CP_CHKSUM_OFFSET (F2FS_BLKSIZE - sizeof(__le32)) /* default chksum offset in checkpoint */
+#define CP_MIN_CHKSUM_OFFSET \
+ (offsetof(struct f2fs_checkpoint, sit_nat_version_bitmap))
+
/*
* For orphan inode management
*/
-#define F2FS_ORPHANS_PER_BLOCK 1020
+#define F2FS_ORPHANS_PER_BLOCK ((F2FS_BLKSIZE - 4 * sizeof(__le32)) / sizeof(__le32))
#define GET_ORPHAN_BLOCKS(n) (((n) + F2FS_ORPHANS_PER_BLOCK - 1) / \
F2FS_ORPHANS_PER_BLOCK)
@@ -180,19 +238,39 @@ struct f2fs_orphan_block {
struct f2fs_extent {
__le32 fofs; /* start file offset of the extent */
__le32 blk; /* start block address of the extent */
- __le32 len; /* lengh of the extent */
+ __le32 len; /* length of the extent */
} __packed;
#define F2FS_NAME_LEN 255
-#define F2FS_INLINE_XATTR_ADDRS 50 /* 200 bytes for inline xattrs */
-#define DEF_ADDRS_PER_INODE 923 /* Address Pointers in an Inode */
+/* 200 bytes for inline xattrs by default */
+#define DEFAULT_INLINE_XATTR_ADDRS 50
+
+#define OFFSET_OF_END_OF_I_EXT 360
+#define SIZE_OF_I_NID 20
+
+struct node_footer {
+ __le32 nid; /* node id */
+ __le32 ino; /* inode number */
+ __le32 flag; /* include cold/fsync/dentry marks and offset */
+ __le64 cp_ver; /* checkpoint version */
+ __le32 next_blkaddr; /* next node page block address */
+} __packed;
+
+/* Address Pointers in an Inode */
+#define DEF_ADDRS_PER_INODE ((F2FS_BLKSIZE - OFFSET_OF_END_OF_I_EXT \
+ - SIZE_OF_I_NID \
+ - sizeof(struct node_footer)) / sizeof(__le32))
+#define CUR_ADDRS_PER_INODE(inode) (DEF_ADDRS_PER_INODE - \
+ get_extra_isize(inode))
#define DEF_NIDS_PER_INODE 5 /* Node IDs in an Inode */
-#define ADDRS_PER_INODE(inode) addrs_per_inode(inode)
-#define ADDRS_PER_BLOCK 1018 /* Address Pointers in a Direct Block */
-#define NIDS_PER_BLOCK 1018 /* Node IDs in an Indirect Block */
+#define ADDRS_PER_INODE(inode) addrs_per_page(inode, true)
+/* Address Pointers in a Direct Block */
+#define DEF_ADDRS_PER_BLOCK ((F2FS_BLKSIZE - sizeof(struct node_footer)) / sizeof(__le32))
+#define ADDRS_PER_BLOCK(inode) addrs_per_page(inode, false)
+/* Node IDs in an Indirect Block */
+#define NIDS_PER_BLOCK ((F2FS_BLKSIZE - sizeof(struct node_footer)) / sizeof(__le32))
-#define ADDRS_PER_PAGE(page, inode) \
- (IS_INODE(page) ? ADDRS_PER_INODE(inode) : ADDRS_PER_BLOCK)
+#define ADDRS_PER_PAGE(folio, inode) (addrs_per_page(inode, IS_INODE(folio)))
#define NODE_DIR1_BLOCK (DEF_ADDRS_PER_INODE + 1)
#define NODE_DIR2_BLOCK (DEF_ADDRS_PER_INODE + 2)
@@ -204,10 +282,10 @@ struct f2fs_extent {
#define F2FS_INLINE_DATA 0x02 /* file inline data flag */
#define F2FS_INLINE_DENTRY 0x04 /* file inline dentry flag */
#define F2FS_DATA_EXIST 0x08 /* file inline data exist flag */
-#define F2FS_INLINE_DOTS 0x10 /* file having implicit dot dentries */
-
-#define MAX_INLINE_DATA (sizeof(__le32) * (DEF_ADDRS_PER_INODE - \
- F2FS_INLINE_XATTR_ADDRS - 1))
+#define F2FS_INLINE_DOTS 0x10 /* file having implicit dot dentries (obsolete) */
+#define F2FS_EXTRA_ATTR 0x20 /* file having extra attribute */
+#define F2FS_PIN_FILE 0x40 /* file should not be gced */
+#define F2FS_COMPRESS_RELEASED 0x80 /* file released compressed blocks */
struct f2fs_inode {
__le16 i_mode; /* file mode */
@@ -225,7 +303,13 @@ struct f2fs_inode {
__le32 i_ctime_nsec; /* change time in nano scale */
__le32 i_mtime_nsec; /* modification time in nano scale */
__le32 i_generation; /* file version (for NFS) */
- __le32 i_current_depth; /* only for directory depth */
+ union {
+ __le32 i_current_depth; /* only for directory depth */
+ __le16 i_gc_failures; /*
+ * # of gc failures on pinned file.
+ * only for regular files.
+ */
+ };
__le32 i_xattr_nid; /* nid to save xattr */
__le32 i_flags; /* file attributes */
__le32 i_pino; /* parent inode number */
@@ -235,14 +319,31 @@ struct f2fs_inode {
struct f2fs_extent i_ext; /* caching a largest extent */
- __le32 i_addr[DEF_ADDRS_PER_INODE]; /* Pointers to data blocks */
-
+ union {
+ struct {
+ __le16 i_extra_isize; /* extra inode attribute size */
+ __le16 i_inline_xattr_size; /* inline xattr size, unit: 4 bytes */
+ __le32 i_projid; /* project id */
+ __le32 i_inode_checksum;/* inode meta checksum */
+ __le64 i_crtime; /* creation time */
+ __le32 i_crtime_nsec; /* creation time in nano scale */
+ __le64 i_compr_blocks; /* # of compressed blocks */
+ __u8 i_compress_algorithm; /* compress algorithm */
+ __u8 i_log_cluster_size; /* log of cluster size */
+ __le16 i_compress_flag; /* compress flag */
+ /* 0 bit: chksum flag
+ * [8,15] bits: compress level
+ */
+ __le32 i_extra_end[0]; /* for attribute size calculation */
+ } __packed;
+ __le32 i_addr[DEF_ADDRS_PER_INODE]; /* Pointers to data blocks */
+ };
__le32 i_nid[DEF_NIDS_PER_INODE]; /* direct(2), indirect(2),
double_indirect(1) node id */
} __packed;
struct direct_node {
- __le32 addr[ADDRS_PER_BLOCK]; /* array of data block address */
+ __le32 addr[DEF_ADDRS_PER_BLOCK]; /* array of data block address */
} __packed;
struct indirect_node {
@@ -256,15 +357,7 @@ enum {
OFFSET_BIT_SHIFT
};
-#define OFFSET_BIT_MASK (0x07) /* (0x01 << OFFSET_BIT_SHIFT) - 1 */
-
-struct node_footer {
- __le32 nid; /* node id */
- __le32 ino; /* inode nunmber */
- __le32 flag; /* include cold/fsync/dentry marks and offset */
- __le64 cp_ver; /* checkpoint version */
- __le32 next_blkaddr; /* next node page block address */
-} __packed;
+#define OFFSET_BIT_MASK GENMASK(OFFSET_BIT_SHIFT - 1, 0)
struct f2fs_node {
/* can be one of three types: inode, direct, and indirect types */
@@ -279,8 +372,7 @@ struct f2fs_node {
/*
* For NAT entries
*/
-#define NAT_ENTRY_PER_BLOCK (PAGE_SIZE / sizeof(struct f2fs_nat_entry))
-#define NAT_ENTRY_BITMAP_SIZE ((NAT_ENTRY_PER_BLOCK + 7) / 8)
+#define NAT_ENTRY_PER_BLOCK (F2FS_BLKSIZE / sizeof(struct f2fs_nat_entry))
struct f2fs_nat_entry {
__u8 version; /* latest version of cached nat entry */
@@ -295,16 +387,18 @@ struct f2fs_nat_block {
/*
* For SIT entries
*
- * Each segment is 2MB in size by default so that a bitmap for validity of
- * there-in blocks should occupy 64 bytes, 512 bits.
+ * A validity bitmap of 64 bytes covers 512 blocks of area. For a 4K page size,
+ * this results in a segment size of 2MB. For 16k pages, the default segment size
+ * is 8MB.
* Not allow to change this.
*/
#define SIT_VBLOCK_MAP_SIZE 64
-#define SIT_ENTRY_PER_BLOCK (PAGE_SIZE / sizeof(struct f2fs_sit_entry))
+#define SIT_ENTRY_PER_BLOCK (F2FS_BLKSIZE / sizeof(struct f2fs_sit_entry))
/*
* F2FS uses 4 bytes to represent block address. As a result, supported size of
- * disk is 16 TB and it equals to 16 * 1024 * 1024 / 2 segments.
+ * disk is 16 TB for a 4K page size and 64 TB for a 16K page size and it equals
+ * to 16 * 1024 * 1024 / 2 segments.
*/
#define F2FS_MAX_SEGMENT ((16 * 1024 * 1024) / 2)
@@ -334,8 +428,10 @@ struct f2fs_sit_block {
/*
* For segment summary
*
- * One summary block contains exactly 512 summary entries, which represents
- * exactly 2MB segment by default. Not allow to change the basic units.
+ * One summary block with 4KB size contains exactly 512 summary entries, which
+ * represents exactly one segment with 2MB size.
+ * Similarly, in the case of block with 16KB size, it represents one segment with 8MB size.
+ * Not allow to change the basic units.
*
* NOTE: For initializing fields, you must use set_summary
*
@@ -346,12 +442,12 @@ struct f2fs_sit_block {
* from node's page's beginning to get a data block address.
* ex) data_blkaddr = (block_t)(nodepage_start_address + ofs_in_node)
*/
-#define ENTRIES_IN_SUM 512
-#define SUMMARY_SIZE (7) /* sizeof(struct summary) */
+#define ENTRIES_IN_SUM (F2FS_SUM_BLKSIZE / 8)
+#define SUMMARY_SIZE (7) /* sizeof(struct f2fs_summary) */
#define SUM_FOOTER_SIZE (5) /* sizeof(struct summary_footer) */
#define SUM_ENTRY_SIZE (SUMMARY_SIZE * ENTRIES_IN_SUM)
-/* a summary entry for a 4KB-sized block in a segment */
+/* a summary entry for a block in a segment */
struct f2fs_summary {
__le32 nid; /* parent node id */
union {
@@ -372,7 +468,7 @@ struct summary_footer {
__le32 check_sum; /* summary checksum */
} __packed;
-#define SUM_JOURNAL_SIZE (F2FS_BLKSIZE - SUM_FOOTER_SIZE -\
+#define SUM_JOURNAL_SIZE (F2FS_SUM_BLKSIZE - SUM_FOOTER_SIZE -\
SUM_ENTRY_SIZE)
#define NAT_JOURNAL_ENTRIES ((SUM_JOURNAL_SIZE - 2) /\
sizeof(struct nat_journal_entry))
@@ -435,7 +531,7 @@ struct f2fs_journal {
};
} __packed;
-/* 4KB-sized summary block structure */
+/* Block-sized summary block structure */
struct f2fs_summary_block {
struct f2fs_summary entries[ENTRIES_IN_SUM];
struct f2fs_journal journal;
@@ -462,37 +558,42 @@ typedef __le32 f2fs_hash_t;
#define MAX_DIR_HASH_DEPTH 63
/* MAX buckets in one level of dir */
-#define MAX_DIR_BUCKETS (1 << ((MAX_DIR_HASH_DEPTH / 2) - 1))
+#define MAX_DIR_BUCKETS BIT((MAX_DIR_HASH_DEPTH / 2) - 1)
/*
- * space utilization of regular dentry and inline dentry
- * regular dentry inline dentry
- * bitmap 1 * 27 = 27 1 * 23 = 23
- * reserved 1 * 3 = 3 1 * 7 = 7
- * dentry 11 * 214 = 2354 11 * 182 = 2002
- * filename 8 * 214 = 1712 8 * 182 = 1456
- * total 4096 3488
+ * space utilization of regular dentry and inline dentry (w/o extra reservation)
+ * when block size is 4KB.
+ * regular dentry inline dentry (def) inline dentry (min)
+ * bitmap 1 * 27 = 27 1 * 23 = 23 1 * 1 = 1
+ * reserved 1 * 3 = 3 1 * 7 = 7 1 * 1 = 1
+ * dentry 11 * 214 = 2354 11 * 182 = 2002 11 * 2 = 22
+ * filename 8 * 214 = 1712 8 * 182 = 1456 8 * 2 = 16
+ * total 4096 3488 40
*
* Note: there are more reserved space in inline dentry than in regular
* dentry, when converting inline dentry we should handle this carefully.
*/
-#define NR_DENTRY_IN_BLOCK 214 /* the number of dentry in a block */
+
+/* the number of dentry in a block */
+#define NR_DENTRY_IN_BLOCK ((BITS_PER_BYTE * F2FS_BLKSIZE) / \
+ ((SIZE_OF_DIR_ENTRY + F2FS_SLOT_LEN) * BITS_PER_BYTE + 1))
#define SIZE_OF_DIR_ENTRY 11 /* by byte */
#define SIZE_OF_DENTRY_BITMAP ((NR_DENTRY_IN_BLOCK + BITS_PER_BYTE - 1) / \
BITS_PER_BYTE)
-#define SIZE_OF_RESERVED (PAGE_SIZE - ((SIZE_OF_DIR_ENTRY + \
+#define SIZE_OF_RESERVED (F2FS_BLKSIZE - ((SIZE_OF_DIR_ENTRY + \
F2FS_SLOT_LEN) * \
NR_DENTRY_IN_BLOCK + SIZE_OF_DENTRY_BITMAP))
+#define MIN_INLINE_DENTRY_SIZE 40 /* just include '.' and '..' entries */
/* One directory entry slot representing F2FS_SLOT_LEN-sized file name */
struct f2fs_dir_entry {
__le32 hash_code; /* hash code of file name */
__le32 ino; /* inode number */
- __le16 name_len; /* lengh of file name */
+ __le16 name_len; /* length of file name */
__u8 file_type; /* file type */
} __packed;
-/* 4KB-sized directory entry block */
+/* Block-sized directory entry block */
struct f2fs_dentry_block {
/* validity bitmap for directory entries in each block */
__u8 dentry_bitmap[SIZE_OF_DENTRY_BITMAP];
@@ -501,37 +602,6 @@ struct f2fs_dentry_block {
__u8 filename[NR_DENTRY_IN_BLOCK][F2FS_SLOT_LEN];
} __packed;
-/* for inline dir */
-#define NR_INLINE_DENTRY (MAX_INLINE_DATA * BITS_PER_BYTE / \
- ((SIZE_OF_DIR_ENTRY + F2FS_SLOT_LEN) * \
- BITS_PER_BYTE + 1))
-#define INLINE_DENTRY_BITMAP_SIZE ((NR_INLINE_DENTRY + \
- BITS_PER_BYTE - 1) / BITS_PER_BYTE)
-#define INLINE_RESERVED_SIZE (MAX_INLINE_DATA - \
- ((SIZE_OF_DIR_ENTRY + F2FS_SLOT_LEN) * \
- NR_INLINE_DENTRY + INLINE_DENTRY_BITMAP_SIZE))
-
-/* inline directory entry structure */
-struct f2fs_inline_dentry {
- __u8 dentry_bitmap[INLINE_DENTRY_BITMAP_SIZE];
- __u8 reserved[INLINE_RESERVED_SIZE];
- struct f2fs_dir_entry dentry[NR_INLINE_DENTRY];
- __u8 filename[NR_INLINE_DENTRY][F2FS_SLOT_LEN];
-} __packed;
-
-/* file types used in inode_info->flags */
-enum {
- F2FS_FT_UNKNOWN,
- F2FS_FT_REG_FILE,
- F2FS_FT_DIR,
- F2FS_FT_CHRDEV,
- F2FS_FT_BLKDEV,
- F2FS_FT_FIFO,
- F2FS_FT_SOCK,
- F2FS_FT_SYMLINK,
- F2FS_FT_MAX
-};
-
-#define S_SHIFT 12
+#define F2FS_DEF_PROJID 0 /* default project ID */
#endif /* _LINUX_F2FS_FS_H */
diff --git a/include/linux/falloc.h b/include/linux/falloc.h
index 7494dc67c66f..7c38c6b76b60 100644
--- a/include/linux/falloc.h
+++ b/include/linux/falloc.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _FALLOC_H_
#define _FALLOC_H_
@@ -19,13 +20,44 @@ struct space_resv {
};
#define FS_IOC_RESVSP _IOW('X', 40, struct space_resv)
+#define FS_IOC_UNRESVSP _IOW('X', 41, struct space_resv)
#define FS_IOC_RESVSP64 _IOW('X', 42, struct space_resv)
+#define FS_IOC_UNRESVSP64 _IOW('X', 43, struct space_resv)
+#define FS_IOC_ZERO_RANGE _IOW('X', 57, struct space_resv)
-#define FALLOC_FL_SUPPORTED_MASK (FALLOC_FL_KEEP_SIZE | \
- FALLOC_FL_PUNCH_HOLE | \
- FALLOC_FL_COLLAPSE_RANGE | \
- FALLOC_FL_ZERO_RANGE | \
- FALLOC_FL_INSERT_RANGE | \
- FALLOC_FL_UNSHARE_RANGE)
+/*
+ * Mask of all supported fallocate modes. Only one can be set at a time.
+ *
+ * In addition to the mode bit, the mode argument can also encode flags.
+ * FALLOC_FL_KEEP_SIZE is the only supported flag so far.
+ */
+#define FALLOC_FL_MODE_MASK (FALLOC_FL_ALLOCATE_RANGE | \
+ FALLOC_FL_PUNCH_HOLE | \
+ FALLOC_FL_COLLAPSE_RANGE | \
+ FALLOC_FL_ZERO_RANGE | \
+ FALLOC_FL_INSERT_RANGE | \
+ FALLOC_FL_UNSHARE_RANGE | \
+ FALLOC_FL_WRITE_ZEROES)
+
+/* on ia32 l_start is on a 32-bit boundary */
+#if defined(CONFIG_X86_64)
+struct space_resv_32 {
+ __s16 l_type;
+ __s16 l_whence;
+ __s64 l_start __attribute__((packed));
+ /* len == 0 means until end of file */
+ __s64 l_len __attribute__((packed));
+ __s32 l_sysid;
+ __u32 l_pid;
+ __s32 l_pad[4]; /* reserve area */
+};
+
+#define FS_IOC_RESVSP_32 _IOW ('X', 40, struct space_resv_32)
+#define FS_IOC_UNRESVSP_32 _IOW ('X', 41, struct space_resv_32)
+#define FS_IOC_RESVSP64_32 _IOW ('X', 42, struct space_resv_32)
+#define FS_IOC_UNRESVSP64_32 _IOW ('X', 43, struct space_resv_32)
+#define FS_IOC_ZERO_RANGE_32 _IOW ('X', 57, struct space_resv_32)
+
+#endif
#endif /* _FALLOC_H_ */
diff --git a/include/linux/fanotify.h b/include/linux/fanotify.h
index cef93ddcc5a0..879cff5eccd4 100644
--- a/include/linux/fanotify.h
+++ b/include/linux/fanotify.h
@@ -1,8 +1,150 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_FANOTIFY_H
#define _LINUX_FANOTIFY_H
+#include <linux/sysctl.h>
#include <uapi/linux/fanotify.h>
-/* not valid from userspace, only kernel internal */
-#define FAN_MARK_ONDIR 0x00000100
+#define FAN_GROUP_FLAG(group, flag) \
+ ((group)->fanotify_data.flags & (flag))
+
+/*
+ * Flags allowed to be passed from/to userspace.
+ *
+ * We intentionally do not add new bits to the old FAN_ALL_* constants, because
+ * they are uapi exposed constants. If there are programs out there using
+ * these constant, the programs may break if re-compiled with new uapi headers
+ * and then run on an old kernel.
+ */
+
+/* Group classes where permission events are allowed */
+#define FANOTIFY_PERM_CLASSES (FAN_CLASS_CONTENT | \
+ FAN_CLASS_PRE_CONTENT)
+
+#define FANOTIFY_CLASS_BITS (FAN_CLASS_NOTIF | FANOTIFY_PERM_CLASSES)
+
+#define FANOTIFY_FID_BITS (FAN_REPORT_DFID_NAME_TARGET)
+
+#define FANOTIFY_INFO_MODES (FANOTIFY_FID_BITS | FAN_REPORT_PIDFD | FAN_REPORT_MNT)
+
+/*
+ * fanotify_init() flags that require CAP_SYS_ADMIN.
+ * We do not allow unprivileged groups to request permission events.
+ * We do not allow unprivileged groups to get other process pid in events.
+ * We do not allow unprivileged groups to use unlimited resources.
+ */
+#define FANOTIFY_ADMIN_INIT_FLAGS (FANOTIFY_PERM_CLASSES | \
+ FAN_REPORT_TID | \
+ FAN_REPORT_PIDFD | \
+ FAN_REPORT_FD_ERROR | \
+ FAN_UNLIMITED_QUEUE | \
+ FAN_UNLIMITED_MARKS)
+
+/*
+ * fanotify_init() flags that are allowed for user without CAP_SYS_ADMIN.
+ * FAN_CLASS_NOTIF is the only class we allow for unprivileged group.
+ * We do not allow unprivileged groups to get file descriptors in events,
+ * so one of the flags for reporting file handles is required.
+ */
+#define FANOTIFY_USER_INIT_FLAGS (FAN_CLASS_NOTIF | \
+ FANOTIFY_FID_BITS | FAN_REPORT_MNT | \
+ FAN_CLOEXEC | FAN_NONBLOCK)
+
+#define FANOTIFY_INIT_FLAGS (FANOTIFY_ADMIN_INIT_FLAGS | \
+ FANOTIFY_USER_INIT_FLAGS)
+
+/* Internal group flags */
+#define FANOTIFY_UNPRIV 0x80000000
+#define FANOTIFY_INTERNAL_GROUP_FLAGS (FANOTIFY_UNPRIV)
+
+#define FANOTIFY_MARK_TYPE_BITS (FAN_MARK_INODE | FAN_MARK_MOUNT | \
+ FAN_MARK_FILESYSTEM | FAN_MARK_MNTNS)
+
+#define FANOTIFY_MARK_CMD_BITS (FAN_MARK_ADD | FAN_MARK_REMOVE | \
+ FAN_MARK_FLUSH)
+
+#define FANOTIFY_MARK_IGNORE_BITS (FAN_MARK_IGNORED_MASK | \
+ FAN_MARK_IGNORE)
+
+#define FANOTIFY_MARK_FLAGS (FANOTIFY_MARK_TYPE_BITS | \
+ FANOTIFY_MARK_CMD_BITS | \
+ FANOTIFY_MARK_IGNORE_BITS | \
+ FAN_MARK_DONT_FOLLOW | \
+ FAN_MARK_ONLYDIR | \
+ FAN_MARK_IGNORED_SURV_MODIFY | \
+ FAN_MARK_EVICTABLE)
+
+/*
+ * Events that can be reported with data type FSNOTIFY_EVENT_PATH.
+ * Note that FAN_MODIFY can also be reported with data type
+ * FSNOTIFY_EVENT_INODE.
+ */
+#define FANOTIFY_PATH_EVENTS (FAN_ACCESS | FAN_MODIFY | \
+ FAN_CLOSE | FAN_OPEN | FAN_OPEN_EXEC)
+
+/*
+ * Directory entry modification events - reported only to directory
+ * where entry is modified and not to a watching parent.
+ */
+#define FANOTIFY_DIRENT_EVENTS (FAN_MOVE | FAN_CREATE | FAN_DELETE | \
+ FAN_RENAME)
+
+/* Content events can be used to inspect file content */
+#define FANOTIFY_CONTENT_PERM_EVENTS (FAN_OPEN_PERM | FAN_OPEN_EXEC_PERM | \
+ FAN_ACCESS_PERM)
+/* Pre-content events can be used to fill file content */
+#define FANOTIFY_PRE_CONTENT_EVENTS (FAN_PRE_ACCESS)
+
+/* Events that require a permission response from user */
+#define FANOTIFY_PERM_EVENTS (FANOTIFY_CONTENT_PERM_EVENTS | \
+ FANOTIFY_PRE_CONTENT_EVENTS)
+
+/* Events that can be reported with event->fd */
+#define FANOTIFY_FD_EVENTS (FANOTIFY_PATH_EVENTS | FANOTIFY_PERM_EVENTS)
+
+/* Events that can only be reported with data type FSNOTIFY_EVENT_INODE */
+#define FANOTIFY_INODE_EVENTS (FANOTIFY_DIRENT_EVENTS | \
+ FAN_ATTRIB | FAN_MOVE_SELF | FAN_DELETE_SELF)
+
+/* Events that can only be reported with data type FSNOTIFY_EVENT_ERROR */
+#define FANOTIFY_ERROR_EVENTS (FAN_FS_ERROR)
+
+#define FANOTIFY_MOUNT_EVENTS (FAN_MNT_ATTACH | FAN_MNT_DETACH)
+
+/* Events that user can request to be notified on */
+#define FANOTIFY_EVENTS (FANOTIFY_PATH_EVENTS | \
+ FANOTIFY_INODE_EVENTS | \
+ FANOTIFY_ERROR_EVENTS | \
+ FANOTIFY_MOUNT_EVENTS)
+
+/* Extra flags that may be reported with event or control handling of events */
+#define FANOTIFY_EVENT_FLAGS (FAN_EVENT_ON_CHILD | FAN_ONDIR)
+
+/* Events that may be reported to user */
+#define FANOTIFY_OUTGOING_EVENTS (FANOTIFY_EVENTS | \
+ FANOTIFY_PERM_EVENTS | \
+ FAN_Q_OVERFLOW | FAN_ONDIR)
+
+/* Events and flags relevant only for directories */
+#define FANOTIFY_DIRONLY_EVENT_BITS (FANOTIFY_DIRENT_EVENTS | \
+ FAN_EVENT_ON_CHILD | FAN_ONDIR)
+
+#define ALL_FANOTIFY_EVENT_BITS (FANOTIFY_OUTGOING_EVENTS | \
+ FANOTIFY_EVENT_FLAGS)
+
+/* These masks check for invalid bits in permission responses. */
+#define FANOTIFY_RESPONSE_ACCESS (FAN_ALLOW | FAN_DENY)
+#define FANOTIFY_RESPONSE_FLAGS (FAN_AUDIT | FAN_INFO)
+#define FANOTIFY_RESPONSE_VALID_MASK \
+ (FANOTIFY_RESPONSE_ACCESS | FANOTIFY_RESPONSE_FLAGS | \
+ (FAN_ERRNO_MASK << FAN_ERRNO_SHIFT))
+
+/* Do not use these old uapi constants internally */
+#undef FAN_ALL_CLASS_BITS
+#undef FAN_ALL_INIT_FLAGS
+#undef FAN_ALL_MARK_FLAGS
+#undef FAN_ALL_EVENTS
+#undef FAN_ALL_PERM_EVENTS
+#undef FAN_ALL_OUTGOING_EVENTS
+
#endif /* _LINUX_FANOTIFY_H */
diff --git a/include/linux/fault-inject-usercopy.h b/include/linux/fault-inject-usercopy.h
new file mode 100644
index 000000000000..56c3a693fdd9
--- /dev/null
+++ b/include/linux/fault-inject-usercopy.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __LINUX_FAULT_INJECT_USERCOPY_H__
+#define __LINUX_FAULT_INJECT_USERCOPY_H__
+
+/*
+ * This header provides a wrapper for injecting failures to user space memory
+ * access functions.
+ */
+
+#include <linux/types.h>
+
+#ifdef CONFIG_FAULT_INJECTION_USERCOPY
+
+bool should_fail_usercopy(void);
+
+#else
+
+static inline bool should_fail_usercopy(void) { return false; }
+
+#endif /* CONFIG_FAULT_INJECTION_USERCOPY */
+
+#endif /* __LINUX_FAULT_INJECT_USERCOPY_H__ */
diff --git a/include/linux/fault-inject.h b/include/linux/fault-inject.h
index 728d4e0292aa..58fd14c82270 100644
--- a/include/linux/fault-inject.h
+++ b/include/linux/fault-inject.h
@@ -1,16 +1,26 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_FAULT_INJECT_H
#define _LINUX_FAULT_INJECT_H
+#include <linux/err.h>
+#include <linux/types.h>
+
+struct dentry;
+struct kmem_cache;
+
+enum fault_flags {
+ FAULT_NOWARN = 1 << 0,
+};
+
#ifdef CONFIG_FAULT_INJECTION
-#include <linux/types.h>
-#include <linux/debugfs.h>
-#include <linux/ratelimit.h>
#include <linux/atomic.h>
+#include <linux/configfs.h>
+#include <linux/ratelimit.h>
/*
* For explanation of the elements of this struct, see
- * Documentation/fault-injection/fault-injection.txt
+ * Documentation/fault-injection/fault-injection.rst
*/
struct fault_attr {
unsigned long probability;
@@ -42,8 +52,31 @@ struct fault_attr {
#define DECLARE_FAULT_ATTR(name) struct fault_attr name = FAULT_ATTR_INITIALIZER
int setup_fault_attr(struct fault_attr *attr, char *str);
+bool should_fail_ex(struct fault_attr *attr, ssize_t size, int flags);
bool should_fail(struct fault_attr *attr, ssize_t size);
+#else /* CONFIG_FAULT_INJECTION */
+
+struct fault_attr {
+};
+
+#define DECLARE_FAULT_ATTR(name) struct fault_attr name = {}
+
+static inline int setup_fault_attr(struct fault_attr *attr, char *str)
+{
+ return 0; /* Note: 0 means error for __setup() handlers! */
+}
+static inline bool should_fail_ex(struct fault_attr *attr, ssize_t size, int flags)
+{
+ return false;
+}
+static inline bool should_fail(struct fault_attr *attr, ssize_t size)
+{
+ return false;
+}
+
+#endif /* CONFIG_FAULT_INJECTION */
+
#ifdef CONFIG_FAULT_INJECTION_DEBUG_FS
struct dentry *fault_create_debugfs_attr(const char *name,
@@ -59,14 +92,40 @@ static inline struct dentry *fault_create_debugfs_attr(const char *name,
#endif /* CONFIG_FAULT_INJECTION_DEBUG_FS */
-#endif /* CONFIG_FAULT_INJECTION */
+#ifdef CONFIG_FAULT_INJECTION_CONFIGFS
-struct kmem_cache;
+struct fault_config {
+ struct fault_attr attr;
+ struct config_group group;
+};
+
+void fault_config_init(struct fault_config *config, const char *name);
+
+#else /* CONFIG_FAULT_INJECTION_CONFIGFS */
+
+struct fault_config {
+};
+
+static inline void fault_config_init(struct fault_config *config,
+ const char *name)
+{
+}
+
+#endif /* CONFIG_FAULT_INJECTION_CONFIGFS */
+
+#ifdef CONFIG_FAIL_PAGE_ALLOC
+bool should_fail_alloc_page(gfp_t gfp_mask, unsigned int order);
+#else
+static inline bool should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
+{
+ return false;
+}
+#endif /* CONFIG_FAIL_PAGE_ALLOC */
#ifdef CONFIG_FAILSLAB
-extern bool should_failslab(struct kmem_cache *s, gfp_t gfpflags);
+int should_failslab(struct kmem_cache *s, gfp_t gfpflags);
#else
-static inline bool should_failslab(struct kmem_cache *s, gfp_t gfpflags)
+static inline int should_failslab(struct kmem_cache *s, gfp_t gfpflags)
{
return false;
}
diff --git a/include/linux/fb.h b/include/linux/fb.h
index a964d076b4dc..05cc251035da 100644
--- a/include/linux/fb.h
+++ b/include/linux/fb.h
@@ -1,26 +1,32 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_FB_H
#define _LINUX_FB_H
-#include <linux/kgdb.h>
#include <uapi/linux/fb.h>
#define FBIO_CURSOR _IOWR('F', 0x08, struct fb_cursor_user)
-#include <linux/fs.h>
-#include <linux/init.h>
+#include <linux/mutex.h>
+#include <linux/printk.h>
+#include <linux/refcount.h>
+#include <linux/types.h>
#include <linux/workqueue.h>
-#include <linux/notifier.h>
-#include <linux/list.h>
-#include <linux/backlight.h>
-#include <linux/slab.h>
-#include <asm/io.h>
-struct vm_area_struct;
-struct fb_info;
+#include <asm/video.h>
+
+struct backlight_device;
struct device;
+struct device_node;
+struct fb_info;
struct file;
+struct i2c_adapter;
+struct inode;
+struct lcd_device;
+struct module;
+struct notifier_block;
+struct page;
struct videomode;
-struct device_node;
+struct vm_area_struct;
/* Definitions below are used in the parsed monitor specs */
#define FB_DPMS_ACTIVE_OFF 1
@@ -123,54 +129,24 @@ struct fb_cursor_user {
* Register/unregister for framebuffer events
*/
-/* The resolution of the passed in fb_info about to change */
-#define FB_EVENT_MODE_CHANGE 0x01
-/* The display on this fb_info is beeing suspended, no access to the
- * framebuffer is allowed any more after that call returns
- */
-#define FB_EVENT_SUSPEND 0x02
-/* The display on this fb_info was resumed, you can restore the display
- * if you own it
- */
-#define FB_EVENT_RESUME 0x03
-/* An entry from the modelist was removed */
-#define FB_EVENT_MODE_DELETE 0x04
-/* A driver registered itself */
+#ifdef CONFIG_GUMSTIX_AM200EPD
+/* only used by mach-pxa/am200epd.c */
#define FB_EVENT_FB_REGISTERED 0x05
-/* A driver unregistered itself */
#define FB_EVENT_FB_UNREGISTERED 0x06
-/* CONSOLE-SPECIFIC: get console to framebuffer mapping */
-#define FB_EVENT_GET_CONSOLE_MAP 0x07
-/* CONSOLE-SPECIFIC: set console to framebuffer mapping */
-#define FB_EVENT_SET_CONSOLE_MAP 0x08
-/* A hardware display blank change occurred */
-#define FB_EVENT_BLANK 0x09
-/* Private modelist is to be replaced */
-#define FB_EVENT_NEW_MODELIST 0x0A
-/* The resolution of the passed in fb_info about to change and
- all vc's should be changed */
-#define FB_EVENT_MODE_CHANGE_ALL 0x0B
-/* A software display blank change occurred */
-#define FB_EVENT_CONBLANK 0x0C
-/* Get drawing requirements */
-#define FB_EVENT_GET_REQ 0x0D
-/* Unbind from the console if possible */
-#define FB_EVENT_FB_UNBIND 0x0E
-/* CONSOLE-SPECIFIC: remap all consoles to new fb - for vga_switcheroo */
-#define FB_EVENT_REMAP_ALL_CONSOLE 0x0F
-/* A hardware display blank early change occured */
-#define FB_EARLY_EVENT_BLANK 0x10
-/* A hardware display blank revert early change occured */
-#define FB_R_EARLY_EVENT_BLANK 0x11
+#endif
struct fb_event {
struct fb_info *info;
void *data;
};
+/* Enough for the VT console needs, see its max_font_width/height */
+#define FB_MAX_BLIT_WIDTH 64
+#define FB_MAX_BLIT_HEIGHT 128
+
struct fb_blit_caps {
- u32 x;
- u32 y;
+ DECLARE_BITMAP(x, FB_MAX_BLIT_WIDTH);
+ DECLARE_BITMAP(y, FB_MAX_BLIT_HEIGHT);
u32 len;
u32 flags;
};
@@ -217,23 +193,35 @@ struct fb_pixmap {
u32 scan_align; /* alignment per scanline */
u32 access_align; /* alignment per read/write (bits) */
u32 flags; /* see FB_PIXMAP_* */
- u32 blit_x; /* supported bit block dimensions (1-32)*/
- u32 blit_y; /* Format: blit_x = 1 << (width - 1) */
- /* blit_y = 1 << (height - 1) */
- /* if 0, will be set to 0xffffffff (all)*/
+ /* supported bit block dimensions */
+ /* Format: test_bit(width - 1, blit_x) */
+ /* test_bit(height - 1, blit_y) */
+ /* if zero, will be set to full (all) */
+ DECLARE_BITMAP(blit_x, FB_MAX_BLIT_WIDTH);
+ DECLARE_BITMAP(blit_y, FB_MAX_BLIT_HEIGHT);
/* access methods */
void (*writeio)(struct fb_info *info, void __iomem *dst, void *src, unsigned int size);
void (*readio) (struct fb_info *info, void *dst, void __iomem *src, unsigned int size);
};
#ifdef CONFIG_FB_DEFERRED_IO
+struct fb_deferred_io_pageref {
+ struct page *page;
+ unsigned long offset;
+ /* private */
+ struct list_head list;
+};
+
struct fb_deferred_io {
/* delay between mkwrite and deferred handler */
unsigned long delay;
- struct mutex lock; /* mutex that protects the page list */
- struct list_head pagelist; /* list of touched pages */
+ bool sort_pagereflist; /* sort pagelist by offset */
+ int open_count; /* number of opened files; protected by fb_info lock */
+ struct mutex lock; /* mutex that protects the pageref list */
+ struct list_head pagereflist; /* list of pagerefs for touched pages */
+ struct address_space *mapping; /* page cache object for fb device */
/* callback */
- void (*first_io)(struct fb_info *info);
+ struct page *(*get_page)(struct fb_info *info, unsigned long offset);
void (*deferred_io)(struct fb_info *info, struct list_head *pagelist);
};
#endif
@@ -400,7 +388,6 @@ struct fb_tile_ops {
#endif /* CONFIG_FB_TILEBLITTING */
/* FBINFO_* = fb_info.flags bit flags */
-#define FBINFO_MODULE 0x0001 /* Low-level driver is a module */
#define FBINFO_HWACCEL_DISABLED 0x0002
/* When FBINFO_HWACCEL_DISABLED is set:
* Hardware acceleration is turned off. Software implementations
@@ -427,8 +414,6 @@ struct fb_tile_ops {
#define FBINFO_HWACCEL_YPAN 0x2000 /* optional */
#define FBINFO_HWACCEL_YWRAP 0x4000 /* optional */
-#define FBINFO_MISC_USEREVENT 0x10000 /* event request
- from userspace */
#define FBINFO_MISC_TILEBLITTING 0x20000 /* use tile blitting */
/* A driver may set this flag to indicate that it does want a set_par to be
@@ -443,8 +428,6 @@ struct fb_tile_ops {
*/
#define FBINFO_MISC_ALWAYS_SETPAR 0x40000
-/* where the fb is a firmware driver, and can be replaced with a proper one */
-#define FBINFO_MISC_FIRMWARE 0x80000
/*
* Host and GPU endianness differ.
*/
@@ -455,45 +438,65 @@ struct fb_tile_ops {
* and host endianness. Drivers should not use this flag.
*/
#define FBINFO_BE_MATH 0x100000
+/*
+ * Hide smem_start in the FBIOGET_FSCREENINFO IOCTL. This is used by modern DRM
+ * drivers to stop userspace from trying to share buffers behind the kernel's
+ * back. Instead dma-buf based buffer sharing should be used.
+ */
+#define FBINFO_HIDE_SMEM_START 0x200000
-/* report to the VT layer that this fb driver can accept forced console
- output like oopses */
-#define FBINFO_CAN_FORCE_OUTPUT 0x200000
struct fb_info {
- atomic_t count;
+ refcount_t count;
int node;
int flags;
+ /*
+ * -1 by default, set to a FB_ROTATE_* value by the driver, if it knows
+ * a lcd is not mounted upright and fbcon should rotate to compensate.
+ */
+ int fbcon_rotate_hint;
struct mutex lock; /* Lock for open/release/ioctl funcs */
struct mutex mm_lock; /* Lock for fb_mmap and smem_* fields */
struct fb_var_screeninfo var; /* Current var */
struct fb_fix_screeninfo fix; /* Current fix */
struct fb_monspecs monspecs; /* Current Monitor specs */
- struct work_struct queue; /* Framebuffer event queue */
struct fb_pixmap pixmap; /* Image hardware mapper */
struct fb_pixmap sprite; /* Cursor hardware mapper */
struct fb_cmap cmap; /* Current cmap */
struct list_head modelist; /* mode list */
struct fb_videomode *mode; /* current mode */
-#ifdef CONFIG_FB_BACKLIGHT
+ int blank; /* current blanking; see FB_BLANK_ constants */
+
+#if IS_ENABLED(CONFIG_FB_BACKLIGHT)
/* assigned backlight device */
- /* set before framebuffer registration,
+ /* set before framebuffer registration,
remove after unregister */
struct backlight_device *bl_dev;
/* Backlight level curve */
- struct mutex bl_curve_mutex;
+ struct mutex bl_curve_mutex;
u8 bl_curve[FB_BACKLIGHT_LEVELS];
#endif
+
+ /*
+ * Assigned LCD device; set before framebuffer
+ * registration, remove after unregister
+ */
+ struct lcd_device *lcd_dev;
+
#ifdef CONFIG_FB_DEFERRED_IO
struct delayed_work deferred_work;
+ unsigned long npagerefs;
+ struct fb_deferred_io_pageref *pagerefs;
struct fb_deferred_io *fbdefio;
#endif
- struct fb_ops *fbops;
+ const struct fb_ops *fbops;
struct device *device; /* This is the parent */
+#if defined(CONFIG_FB_DEVICE)
struct device *dev; /* This is this fb device */
+#endif
int class_flag; /* private sysfs flags */
#ifdef CONFIG_FB_TILEBLITTING
struct fb_tile_ops *tileops; /* Tile Blitting */
@@ -502,47 +505,19 @@ struct fb_info {
char __iomem *screen_base; /* Virtual address */
char *screen_buffer;
};
- unsigned long screen_size; /* Amount of ioremapped VRAM or 0 */
- void *pseudo_palette; /* Fake palette of 16 colors */
+ unsigned long screen_size; /* Amount of ioremapped VRAM or 0 */
+ void *pseudo_palette; /* Fake palette of 16 colors */
#define FBINFO_STATE_RUNNING 0
#define FBINFO_STATE_SUSPENDED 1
u32 state; /* Hardware state i.e suspend */
void *fbcon_par; /* fbcon use-only private area */
/* From here on everything is device dependent */
void *par;
- /* we need the PCI or similar aperture base/size not
- smem_start/size as smem_start may just be an object
- allocated inside the aperture so may not actually overlap */
- struct apertures_struct {
- unsigned int count;
- struct aperture {
- resource_size_t base;
- resource_size_t size;
- } ranges[0];
- } *apertures;
bool skip_vt_switch; /* no VT switch on suspend/resume required */
+ bool skip_panic; /* Do not write to the fb after a panic */
};
-static inline struct apertures_struct *alloc_apertures(unsigned int max_num) {
- struct apertures_struct *a = kzalloc(sizeof(struct apertures_struct)
- + max_num * sizeof(struct aperture), GFP_KERNEL);
- if (!a)
- return NULL;
- a->count = max_num;
- return a;
-}
-
-#ifdef MODULE
-#define FBINFO_DEFAULT FBINFO_MODULE
-#else
-#define FBINFO_DEFAULT 0
-#endif
-
-// This will go away
-#define FBINFO_FLAG_MODULE FBINFO_MODULE
-#define FBINFO_FLAG_DEFAULT FBINFO_DEFAULT
-
/* This will go away
* fbset currently hacks in FB_ACCELF_TEXT into var.accel_flags
* when it wants to turn the acceleration engine on. This is
@@ -551,56 +526,6 @@ static inline struct apertures_struct *alloc_apertures(unsigned int max_num) {
*/
#define STUPID_ACCELF_TEXT_SHIT
-// This will go away
-#if defined(__sparc__)
-
-/* We map all of our framebuffers such that big-endian accesses
- * are what we want, so the following is sufficient.
- */
-
-// This will go away
-#define fb_readb sbus_readb
-#define fb_readw sbus_readw
-#define fb_readl sbus_readl
-#define fb_readq sbus_readq
-#define fb_writeb sbus_writeb
-#define fb_writew sbus_writew
-#define fb_writel sbus_writel
-#define fb_writeq sbus_writeq
-#define fb_memset sbus_memset_io
-#define fb_memcpy_fromfb sbus_memcpy_fromio
-#define fb_memcpy_tofb sbus_memcpy_toio
-
-#elif defined(__i386__) || defined(__alpha__) || defined(__x86_64__) || defined(__hppa__) || defined(__sh__) || defined(__powerpc__) || defined(__avr32__) || defined(__bfin__) || defined(__arm__)
-
-#define fb_readb __raw_readb
-#define fb_readw __raw_readw
-#define fb_readl __raw_readl
-#define fb_readq __raw_readq
-#define fb_writeb __raw_writeb
-#define fb_writew __raw_writew
-#define fb_writel __raw_writel
-#define fb_writeq __raw_writeq
-#define fb_memset memset_io
-#define fb_memcpy_fromfb memcpy_fromio
-#define fb_memcpy_tofb memcpy_toio
-
-#else
-
-#define fb_readb(addr) (*(volatile u8 *) (addr))
-#define fb_readw(addr) (*(volatile u16 *) (addr))
-#define fb_readl(addr) (*(volatile u32 *) (addr))
-#define fb_readq(addr) (*(volatile u64 *) (addr))
-#define fb_writeb(b,addr) (*(volatile u8 *) (addr) = (b))
-#define fb_writew(b,addr) (*(volatile u16 *) (addr) = (b))
-#define fb_writel(b,addr) (*(volatile u32 *) (addr) = (b))
-#define fb_writeq(b,addr) (*(volatile u64 *) (addr) = (b))
-#define fb_memset memset
-#define fb_memcpy_fromfb memcpy
-#define fb_memcpy_tofb memcpy
-
-#endif
-
#define FB_LEFT_POS(p, bpp) (fb_be_math(p) ? (32 - (bpp)) : 0)
#define FB_SHIFT_HIGH(p, val, bits) (fb_be_math(p) ? (val) >> (bits) : \
(val) << (bits))
@@ -611,15 +536,44 @@ static inline struct apertures_struct *alloc_apertures(unsigned int max_num) {
* `Generic' versions of the frame buffer device operations
*/
-extern int fb_set_var(struct fb_info *info, struct fb_var_screeninfo *var);
-extern int fb_pan_display(struct fb_info *info, struct fb_var_screeninfo *var);
+extern int fb_set_var(struct fb_info *info, struct fb_var_screeninfo *var);
+extern int fb_pan_display(struct fb_info *info, struct fb_var_screeninfo *var);
extern int fb_blank(struct fb_info *info, int blank);
-extern void cfb_fillrect(struct fb_info *info, const struct fb_fillrect *rect);
-extern void cfb_copyarea(struct fb_info *info, const struct fb_copyarea *area);
+
+/*
+ * Helpers for framebuffers in I/O memory
+ */
+
+extern void cfb_fillrect(struct fb_info *info, const struct fb_fillrect *rect);
+extern void cfb_copyarea(struct fb_info *info, const struct fb_copyarea *area);
extern void cfb_imageblit(struct fb_info *info, const struct fb_image *image);
+extern ssize_t fb_io_read(struct fb_info *info, char __user *buf,
+ size_t count, loff_t *ppos);
+extern ssize_t fb_io_write(struct fb_info *info, const char __user *buf,
+ size_t count, loff_t *ppos);
+int fb_io_mmap(struct fb_info *info, struct vm_area_struct *vma);
+
+#define __FB_DEFAULT_IOMEM_OPS_RDWR \
+ .fb_read = fb_io_read, \
+ .fb_write = fb_io_write
+
+#define __FB_DEFAULT_IOMEM_OPS_DRAW \
+ .fb_fillrect = cfb_fillrect, \
+ .fb_copyarea = cfb_copyarea, \
+ .fb_imageblit = cfb_imageblit
+
+#define __FB_DEFAULT_IOMEM_OPS_MMAP \
+ .fb_mmap = fb_io_mmap
+
+#define FB_DEFAULT_IOMEM_OPS \
+ __FB_DEFAULT_IOMEM_OPS_RDWR, \
+ __FB_DEFAULT_IOMEM_OPS_DRAW, \
+ __FB_DEFAULT_IOMEM_OPS_MMAP
+
/*
- * Drawing operations where framebuffer is in system RAM
+ * Helpers for framebuffers in system memory
*/
+
extern void sys_fillrect(struct fb_info *info, const struct fb_fillrect *rect);
extern void sys_copyarea(struct fb_info *info, const struct fb_copyarea *area);
extern void sys_imageblit(struct fb_info *info, const struct fb_image *image);
@@ -628,14 +582,32 @@ extern ssize_t fb_sys_read(struct fb_info *info, char __user *buf,
extern ssize_t fb_sys_write(struct fb_info *info, const char __user *buf,
size_t count, loff_t *ppos);
-/* drivers/video/fbmem.c */
+#define __FB_DEFAULT_SYSMEM_OPS_RDWR \
+ .fb_read = fb_sys_read, \
+ .fb_write = fb_sys_write
+
+#define __FB_DEFAULT_SYSMEM_OPS_DRAW \
+ .fb_fillrect = sys_fillrect, \
+ .fb_copyarea = sys_copyarea, \
+ .fb_imageblit = sys_imageblit
+
+/*
+ * Helpers for framebuffers in DMA-able memory
+ */
+
+#define __FB_DEFAULT_DMAMEM_OPS_RDWR \
+ .fb_read = fb_sys_read, \
+ .fb_write = fb_sys_write
+
+#define __FB_DEFAULT_DMAMEM_OPS_DRAW \
+ .fb_fillrect = sys_fillrect, \
+ .fb_copyarea = sys_copyarea, \
+ .fb_imageblit = sys_imageblit
+
+/* fbmem.c */
extern int register_framebuffer(struct fb_info *fb_info);
-extern int unregister_framebuffer(struct fb_info *fb_info);
-extern int unlink_framebuffer(struct fb_info *fb_info);
-extern int remove_conflicting_framebuffers(struct apertures_struct *a,
- const char *name, bool primary);
-extern int fb_prepare_logo(struct fb_info *fb_info, int rotate);
-extern int fb_show_logo(struct fb_info *fb_info, int rotate);
+extern void unregister_framebuffer(struct fb_info *fb_info);
+extern int devm_register_framebuffer(struct device *dev, struct fb_info *fb_info);
extern char* fb_get_buffer_offset(struct fb_info *info, struct fb_pixmap *buf, u32 size);
extern void fb_pad_unaligned_buffer(u8 *dst, u32 d_pitch, u8 *src, u32 idx,
u32 height, u32 shift_high, u32 shift_low, u32 mod);
@@ -646,11 +618,10 @@ extern int fb_get_color_depth(struct fb_var_screeninfo *var,
extern int fb_get_options(const char *name, char **option);
extern int fb_new_modelist(struct fb_info *info);
-extern struct fb_info *registered_fb[FB_MAX];
-extern int num_registered_fb;
-extern struct class *fb_class;
-
-extern int lock_fb_info(struct fb_info *info);
+static inline void lock_fb_info(struct fb_info *info)
+{
+ mutex_lock(&info->lock);
+}
static inline void unlock_fb_info(struct fb_info *info)
{
@@ -672,16 +643,90 @@ static inline void __fb_pad_aligned_buffer(u8 *dst, u32 d_pitch,
}
}
-/* drivers/video/fb_defio.c */
+/* fb_defio.c */
int fb_deferred_io_mmap(struct fb_info *info, struct vm_area_struct *vma);
-extern void fb_deferred_io_init(struct fb_info *info);
+extern int fb_deferred_io_init(struct fb_info *info);
extern void fb_deferred_io_open(struct fb_info *info,
struct inode *inode,
struct file *file);
+extern void fb_deferred_io_release(struct fb_info *info);
extern void fb_deferred_io_cleanup(struct fb_info *info);
extern int fb_deferred_io_fsync(struct file *file, loff_t start,
loff_t end, int datasync);
+/*
+ * Generate callbacks for deferred I/O
+ */
+
+#define __FB_GEN_DEFAULT_DEFERRED_OPS_RDWR(__prefix, __damage_range, __mode) \
+ static ssize_t __prefix ## _defio_read(struct fb_info *info, char __user *buf, \
+ size_t count, loff_t *ppos) \
+ { \
+ return fb_ ## __mode ## _read(info, buf, count, ppos); \
+ } \
+ static ssize_t __prefix ## _defio_write(struct fb_info *info, const char __user *buf, \
+ size_t count, loff_t *ppos) \
+ { \
+ unsigned long offset = *ppos; \
+ ssize_t ret = fb_ ## __mode ## _write(info, buf, count, ppos); \
+ if (ret > 0) \
+ __damage_range(info, offset, ret); \
+ return ret; \
+ }
+
+#define __FB_GEN_DEFAULT_DEFERRED_OPS_DRAW(__prefix, __damage_area, __mode) \
+ static void __prefix ## _defio_fillrect(struct fb_info *info, \
+ const struct fb_fillrect *rect) \
+ { \
+ __mode ## _fillrect(info, rect); \
+ __damage_area(info, rect->dx, rect->dy, rect->width, rect->height); \
+ } \
+ static void __prefix ## _defio_copyarea(struct fb_info *info, \
+ const struct fb_copyarea *area) \
+ { \
+ __mode ## _copyarea(info, area); \
+ __damage_area(info, area->dx, area->dy, area->width, area->height); \
+ } \
+ static void __prefix ## _defio_imageblit(struct fb_info *info, \
+ const struct fb_image *image) \
+ { \
+ __mode ## _imageblit(info, image); \
+ __damage_area(info, image->dx, image->dy, image->width, image->height); \
+ }
+
+#define FB_GEN_DEFAULT_DEFERRED_IOMEM_OPS(__prefix, __damage_range, __damage_area) \
+ __FB_GEN_DEFAULT_DEFERRED_OPS_RDWR(__prefix, __damage_range, io) \
+ __FB_GEN_DEFAULT_DEFERRED_OPS_DRAW(__prefix, __damage_area, cfb)
+
+#define FB_GEN_DEFAULT_DEFERRED_SYSMEM_OPS(__prefix, __damage_range, __damage_area) \
+ __FB_GEN_DEFAULT_DEFERRED_OPS_RDWR(__prefix, __damage_range, sys) \
+ __FB_GEN_DEFAULT_DEFERRED_OPS_DRAW(__prefix, __damage_area, sys)
+
+#define FB_GEN_DEFAULT_DEFERRED_DMAMEM_OPS(__prefix, __damage_range, __damage_area) \
+ __FB_GEN_DEFAULT_DEFERRED_OPS_RDWR(__prefix, __damage_range, sys) \
+ __FB_GEN_DEFAULT_DEFERRED_OPS_DRAW(__prefix, __damage_area, sys)
+
+/*
+ * Initializes struct fb_ops for deferred I/O.
+ */
+
+#define __FB_DEFAULT_DEFERRED_OPS_RDWR(__prefix) \
+ .fb_read = __prefix ## _defio_read, \
+ .fb_write = __prefix ## _defio_write
+
+#define __FB_DEFAULT_DEFERRED_OPS_DRAW(__prefix) \
+ .fb_fillrect = __prefix ## _defio_fillrect, \
+ .fb_copyarea = __prefix ## _defio_copyarea, \
+ .fb_imageblit = __prefix ## _defio_imageblit
+
+#define __FB_DEFAULT_DEFERRED_OPS_MMAP(__prefix) \
+ .fb_mmap = fb_deferred_io_mmap
+
+#define FB_DEFAULT_DEFERRED_OPS(__prefix) \
+ __FB_DEFAULT_DEFERRED_OPS_RDWR(__prefix), \
+ __FB_DEFAULT_DEFERRED_OPS_DRAW(__prefix), \
+ __FB_DEFAULT_DEFERRED_OPS_MMAP(__prefix)
+
static inline bool fb_be_math(struct fb_info *info)
{
#ifdef CONFIG_FB_FOREIGN_ENDIAN
@@ -701,14 +746,29 @@ static inline bool fb_be_math(struct fb_info *info)
#endif /* CONFIG_FB_FOREIGN_ENDIAN */
}
-/* drivers/video/fbsysfs.c */
extern struct fb_info *framebuffer_alloc(size_t size, struct device *dev);
extern void framebuffer_release(struct fb_info *info);
-extern int fb_init_device(struct fb_info *fb_info);
-extern void fb_cleanup_device(struct fb_info *head);
extern void fb_bl_default_curve(struct fb_info *fb_info, u8 off, u8 min, u8 max);
-/* drivers/video/fbmon.c */
+#if IS_ENABLED(CONFIG_FB_BACKLIGHT)
+struct backlight_device *fb_bl_device(struct fb_info *info);
+void fb_bl_notify_blank(struct fb_info *info, int old_blank);
+#else
+static inline struct backlight_device *fb_bl_device(struct fb_info *info)
+{
+ return NULL;
+}
+
+static inline void fb_bl_notify_blank(struct fb_info *info, int old_blank)
+{ }
+#endif
+
+static inline struct lcd_device *fb_lcd_device(struct fb_info *info)
+{
+ return info->lcd_dev;
+}
+
+/* fbmon.c */
#define FB_MAXTIMINGS 0
#define FB_VSYNCTIMINGS 1
#define FB_HSYNCTIMINGS 2
@@ -732,8 +792,6 @@ extern int fb_parse_edid(unsigned char *edid, struct fb_var_screeninfo *var);
extern const unsigned char *fb_firmware_edid(struct device *device);
extern void fb_edid_to_monspecs(unsigned char *edid,
struct fb_monspecs *specs);
-extern void fb_edid_add_monspecs(unsigned char *edid,
- struct fb_monspecs *specs);
extern void fb_destroy_modedb(struct fb_videomode *modedb);
extern int fb_find_mode_cvt(struct fb_videomode *mode, int margins, int rb);
extern unsigned char *fb_ddc_read(struct i2c_adapter *adapter);
@@ -744,7 +802,7 @@ extern int of_get_fb_videomode(struct device_node *np,
extern int fb_videomode_from_videomode(const struct videomode *vm,
struct fb_videomode *fbmode);
-/* drivers/video/modedb.c */
+/* modedb.c */
#define VESA_MODEDB_SIZE 43
#define DMT_SIZE 0x50
@@ -770,7 +828,7 @@ extern void fb_videomode_to_modelist(const struct fb_videomode *modedb, int num,
extern const struct fb_videomode *fb_find_best_display(const struct fb_monspecs *specs,
struct list_head *head);
-/* drivers/video/fbcmap.c */
+/* fbcmap.c */
extern int fb_alloc_cmap(struct fb_cmap *cmap, int len, int transp);
extern int fb_alloc_cmap_gfp(struct fb_cmap *cmap, int len, int transp, gfp_t flags);
extern void fb_dealloc_cmap(struct fb_cmap *cmap);
@@ -805,9 +863,7 @@ struct dmt_videomode {
const struct fb_videomode *mode;
};
-extern const char *fb_mode_option;
extern const struct fb_videomode vesa_modes[];
-extern const struct fb_videomode cea_modes[65];
extern const struct dmt_videomode dmt_modes[];
struct fb_modelist {
@@ -822,7 +878,12 @@ extern int fb_find_mode(struct fb_var_screeninfo *var,
const struct fb_videomode *default_mode,
unsigned int default_bpp);
-/* Convenience logging macros */
+bool fb_modesetting_disabled(const char *drvname);
+
+/*
+ * Convenience logging macros
+ */
+
#define fb_err(fb_info, fmt, ...) \
pr_err("fb%d: " fmt, (fb_info)->node, ##__VA_ARGS__)
#define fb_notice(info, fmt, ...) \
@@ -834,4 +895,12 @@ extern int fb_find_mode(struct fb_var_screeninfo *var,
#define fb_dbg(fb_info, fmt, ...) \
pr_debug("fb%d: " fmt, (fb_info)->node, ##__VA_ARGS__)
+#define fb_warn_once(fb_info, fmt, ...) \
+ pr_warn_once("fb%d: " fmt, (fb_info)->node, ##__VA_ARGS__)
+
+#define fb_WARN_ONCE(fb_info, condition, fmt, ...) \
+ WARN_ONCE(condition, "fb%d: " fmt, (fb_info)->node, ##__VA_ARGS__)
+#define fb_WARN_ON_ONCE(fb_info, x) \
+ fb_WARN_ONCE(fb_info, (x), "%s", "fb_WARN_ON_ONCE(" __stringify(x) ")")
+
#endif /* _LINUX_FB_H */
diff --git a/include/linux/fbcon.h b/include/linux/fbcon.h
new file mode 100644
index 000000000000..f206370060e1
--- /dev/null
+++ b/include/linux/fbcon.h
@@ -0,0 +1,55 @@
+#ifndef _LINUX_FBCON_H
+#define _LINUX_FBCON_H
+
+#include <linux/compiler_types.h>
+
+struct fb_blit_caps;
+struct fb_info;
+struct fb_var_screeninfo;
+struct fb_videomode;
+
+#ifdef CONFIG_FRAMEBUFFER_CONSOLE
+void __init fb_console_init(void);
+void __exit fb_console_exit(void);
+int fbcon_fb_registered(struct fb_info *info);
+void fbcon_fb_unregistered(struct fb_info *info);
+void fbcon_fb_unbind(struct fb_info *info);
+void fbcon_suspended(struct fb_info *info);
+void fbcon_resumed(struct fb_info *info);
+int fbcon_mode_deleted(struct fb_info *info,
+ struct fb_videomode *mode);
+void fbcon_delete_modelist(struct list_head *head);
+void fbcon_new_modelist(struct fb_info *info);
+void fbcon_get_requirement(struct fb_info *info,
+ struct fb_blit_caps *caps);
+void fbcon_fb_blanked(struct fb_info *info, int blank);
+int fbcon_modechange_possible(struct fb_info *info,
+ struct fb_var_screeninfo *var);
+void fbcon_update_vcs(struct fb_info *info, bool all);
+void fbcon_remap_all(struct fb_info *info);
+int fbcon_set_con2fb_map_ioctl(void __user *argp);
+int fbcon_get_con2fb_map_ioctl(void __user *argp);
+#else
+static inline void fb_console_init(void) {}
+static inline void fb_console_exit(void) {}
+static inline int fbcon_fb_registered(struct fb_info *info) { return 0; }
+static inline void fbcon_fb_unregistered(struct fb_info *info) {}
+static inline void fbcon_fb_unbind(struct fb_info *info) {}
+static inline void fbcon_suspended(struct fb_info *info) {}
+static inline void fbcon_resumed(struct fb_info *info) {}
+static inline int fbcon_mode_deleted(struct fb_info *info,
+ struct fb_videomode *mode) { return 0; }
+static inline void fbcon_delete_modelist(struct list_head *head) {}
+static inline void fbcon_new_modelist(struct fb_info *info) {}
+static inline void fbcon_get_requirement(struct fb_info *info,
+ struct fb_blit_caps *caps) {}
+static inline void fbcon_fb_blanked(struct fb_info *info, int blank) {}
+static inline int fbcon_modechange_possible(struct fb_info *info,
+ struct fb_var_screeninfo *var) { return 0; }
+static inline void fbcon_update_vcs(struct fb_info *info, bool all) {}
+static inline void fbcon_remap_all(struct fb_info *info) {}
+static inline int fbcon_set_con2fb_map_ioctl(void __user *argp) { return 0; }
+static inline int fbcon_get_con2fb_map_ioctl(void __user *argp) { return 0; }
+#endif
+
+#endif /* _LINUX_FBCON_H */
diff --git a/include/linux/fcdevice.h b/include/linux/fcdevice.h
index 5009fa16b5d8..3d14ebe59dc9 100644
--- a/include/linux/fcdevice.h
+++ b/include/linux/fcdevice.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* INET An implementation of the TCP/IP protocol suite for the LINUX
* operating system. NET is implemented using the BSD Socket
@@ -12,13 +13,7 @@
* Relocated to include/linux where it belongs by Alan Cox
* <gw4pts@gw4pts.ampr.org>
*
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- *
* WARNING: This move may well be temporary. This file will get merged with others RSN.
- *
*/
#ifndef _LINUX_FCDEVICE_H
#define _LINUX_FCDEVICE_H
diff --git a/include/linux/fcntl.h b/include/linux/fcntl.h
index 1b48d9c9a561..a332e79b3207 100644
--- a/include/linux/fcntl.h
+++ b/include/linux/fcntl.h
@@ -1,17 +1,28 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_FCNTL_H
#define _LINUX_FCNTL_H
+#include <linux/stat.h>
#include <uapi/linux/fcntl.h>
-/* list of all valid flags for the open/openat flags argument: */
+/* List of all valid flags for the open/openat flags argument: */
#define VALID_OPEN_FLAGS \
(O_RDONLY | O_WRONLY | O_RDWR | O_CREAT | O_EXCL | O_NOCTTY | O_TRUNC | \
- O_APPEND | O_NDELAY | O_NONBLOCK | O_NDELAY | __O_SYNC | O_DSYNC | \
+ O_APPEND | O_NDELAY | O_NONBLOCK | __O_SYNC | O_DSYNC | \
FASYNC | O_DIRECT | O_LARGEFILE | O_DIRECTORY | O_NOFOLLOW | \
O_NOATIME | O_CLOEXEC | O_PATH | __O_TMPFILE)
+/* List of all valid flags for the how->resolve argument: */
+#define VALID_RESOLVE_FLAGS \
+ (RESOLVE_NO_XDEV | RESOLVE_NO_MAGICLINKS | RESOLVE_NO_SYMLINKS | \
+ RESOLVE_BENEATH | RESOLVE_IN_ROOT | RESOLVE_CACHED)
+
+/* List of all open_how "versions". */
+#define OPEN_HOW_SIZE_VER0 24 /* sizeof first published struct */
+#define OPEN_HOW_SIZE_LATEST OPEN_HOW_SIZE_VER0
+
#ifndef force_o_largefile
-#define force_o_largefile() (BITS_PER_LONG != 32)
+#define force_o_largefile() (!IS_ENABLED(CONFIG_ARCH_32BIT_OFF_T))
#endif
#if BITS_PER_LONG == 32
diff --git a/include/linux/fd.h b/include/linux/fd.h
index 69275bccc3e4..ece5ea53205b 100644
--- a/include/linux/fd.h
+++ b/include/linux/fd.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_FD_H
#define _LINUX_FD_H
diff --git a/include/linux/fddidevice.h b/include/linux/fddidevice.h
index 32c22cfb238b..906ee446db92 100644
--- a/include/linux/fddidevice.h
+++ b/include/linux/fddidevice.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* INET An implementation of the TCP/IP protocol suite for the LINUX
* operating system. INET is implemented using the BSD Socket
@@ -13,11 +14,6 @@
* Ross Biro
* Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
* Alan Cox, <gw4pts@gw4pts.ampr.org>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
*/
#ifndef _LINUX_FDDIDEVICE_H
#define _LINUX_FDDIDEVICE_H
diff --git a/include/linux/fdtable.h b/include/linux/fdtable.h
index 6e84b2cae6ad..c45306a9f007 100644
--- a/include/linux/fdtable.h
+++ b/include/linux/fdtable.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* descriptor table internals; you almost certainly want file.h instead.
*/
@@ -9,6 +10,7 @@
#include <linux/compiler.h>
#include <linux/spinlock.h>
#include <linux/rcupdate.h>
+#include <linux/nospec.h>
#include <linux/types.h>
#include <linux/init.h>
#include <linux/fs.h>
@@ -30,16 +32,6 @@ struct fdtable {
struct rcu_head rcu;
};
-static inline bool close_on_exec(unsigned int fd, const struct fdtable *fdt)
-{
- return test_bit(fd, fdt->close_on_exec);
-}
-
-static inline bool fd_is_open(unsigned int fd, const struct fdtable *fdt)
-{
- return test_bit(fd, fdt->open_fds);
-}
-
/*
* Open file table structure
*/
@@ -77,46 +69,49 @@ struct dentry;
/*
* The caller must ensure that fd table isn't shared or hold rcu or file lock
*/
-static inline struct file *__fcheck_files(struct files_struct *files, unsigned int fd)
+static inline struct file *files_lookup_fd_raw(struct files_struct *files, unsigned int fd)
{
struct fdtable *fdt = rcu_dereference_raw(files->fdt);
-
- if (fd < fdt->max_fds)
- return rcu_dereference_raw(fdt->fd[fd]);
- return NULL;
+ unsigned long mask = array_index_mask_nospec(fd, fdt->max_fds);
+ struct file *needs_masking;
+
+ /*
+ * 'mask' is zero for an out-of-bounds fd, all ones for ok.
+ * 'fd&mask' is 'fd' for ok, or 0 for out of bounds.
+ *
+ * Accessing fdt->fd[0] is ok, but needs masking of the result.
+ */
+ needs_masking = rcu_dereference_raw(fdt->fd[fd&mask]);
+ return (struct file *)(mask & (unsigned long)needs_masking);
}
-static inline struct file *fcheck_files(struct files_struct *files, unsigned int fd)
+static inline struct file *files_lookup_fd_locked(struct files_struct *files, unsigned int fd)
{
- RCU_LOCKDEP_WARN(!rcu_read_lock_held() &&
- !lockdep_is_held(&files->file_lock),
+ RCU_LOCKDEP_WARN(!lockdep_is_held(&files->file_lock),
"suspicious rcu_dereference_check() usage");
- return __fcheck_files(files, fd);
+ return files_lookup_fd_raw(files, fd);
}
-/*
- * Check whether the specified fd has an open file.
- */
-#define fcheck(fd) fcheck_files(current->files, fd)
+static inline bool close_on_exec(unsigned int fd, const struct files_struct *files)
+{
+ return test_bit(fd, files_fdtable(files)->close_on_exec);
+}
struct task_struct;
-struct files_struct *get_files_struct(struct task_struct *);
void put_files_struct(struct files_struct *fs);
-void reset_files_struct(struct files_struct *);
-int unshare_files(struct files_struct **);
-struct files_struct *dup_fd(struct files_struct *, int *) __latent_entropy;
+int unshare_files(void);
+struct fd_range {
+ unsigned int from, to;
+};
+struct files_struct *dup_fd(struct files_struct *, struct fd_range *) __latent_entropy;
void do_close_on_exec(struct files_struct *);
int iterate_fd(struct files_struct *, unsigned,
int (*)(const void *, struct file *, unsigned),
const void *);
-extern int __alloc_fd(struct files_struct *files,
- unsigned start, unsigned end, unsigned flags);
-extern void __fd_install(struct files_struct *files,
- unsigned int fd, struct file *file);
-extern int __close_fd(struct files_struct *files,
- unsigned int fd);
+extern int close_fd(unsigned int fd);
+extern struct file *file_close_fd(unsigned int fd);
extern struct kmem_cache *files_cachep;
diff --git a/include/linux/fec.h b/include/linux/fec.h
index 1454a503622d..9aaf53f07269 100644
--- a/include/linux/fec.h
+++ b/include/linux/fec.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/* include/linux/fec.h
*
* Copyright (c) 2009 Orex Computed Radiography
@@ -6,10 +7,6 @@
* Copyright (C) 2010 Freescale Semiconductor, Inc.
*
* Header file for the FEC platform data
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef __LINUX_FEC_H__
#define __LINUX_FEC_H__
diff --git a/include/linux/fiemap.h b/include/linux/fiemap.h
new file mode 100644
index 000000000000..966092ffa89a
--- /dev/null
+++ b/include/linux/fiemap.h
@@ -0,0 +1,27 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_FIEMAP_H
+#define _LINUX_FIEMAP_H 1
+
+#include <uapi/linux/fiemap.h>
+#include <linux/fs.h>
+
+/**
+ * struct fiemap_extent_info - fiemap request to a filesystem
+ * @fi_flags: Flags as passed from user
+ * @fi_extents_mapped: Number of mapped extents
+ * @fi_extents_max: Size of fiemap_extent array
+ * @fi_extents_start: Start of fiemap_extent array
+ */
+struct fiemap_extent_info {
+ unsigned int fi_flags;
+ unsigned int fi_extents_mapped;
+ unsigned int fi_extents_max;
+ struct fiemap_extent __user *fi_extents_start;
+};
+
+int fiemap_prep(struct inode *inode, struct fiemap_extent_info *fieinfo,
+ u64 start, u64 *len, u32 supported_flags);
+int fiemap_fill_next_extent(struct fiemap_extent_info *info, u64 logical,
+ u64 phys, u64 len, u32 flags);
+
+#endif /* _LINUX_FIEMAP_H 1 */
diff --git a/include/linux/file.h b/include/linux/file.h
index 61eb82cbafba..cf389fde9bc2 100644
--- a/include/linux/file.h
+++ b/include/linux/file.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Wrapper functions for accessing the file_struct fd array.
*/
@@ -8,82 +9,248 @@
#include <linux/compiler.h>
#include <linux/types.h>
#include <linux/posix_types.h>
+#include <linux/errno.h>
+#include <linux/cleanup.h>
+#include <linux/err.h>
struct file;
extern void fput(struct file *);
struct file_operations;
+struct task_struct;
struct vfsmount;
struct dentry;
+struct inode;
struct path;
-extern struct file *alloc_file(const struct path *, fmode_t mode,
- const struct file_operations *fop);
-
-static inline void fput_light(struct file *file, int fput_needed)
-{
- if (fput_needed)
- fput(file);
-}
+extern struct file *alloc_file_pseudo(struct inode *, struct vfsmount *,
+ const char *, int flags, const struct file_operations *);
+extern struct file *alloc_file_pseudo_noaccount(struct inode *, struct vfsmount *,
+ const char *, int flags, const struct file_operations *);
+extern struct file *alloc_file_clone(struct file *, int flags,
+ const struct file_operations *);
+/* either a reference to struct file + flags
+ * (cloned vs. borrowed, pos locked), with
+ * flags stored in lower bits of value,
+ * or empty (represented by 0).
+ */
struct fd {
- struct file *file;
- unsigned int flags;
+ unsigned long word;
};
#define FDPUT_FPUT 1
#define FDPUT_POS_UNLOCK 2
-static inline void fdput(struct fd fd)
+#define fd_file(f) ((struct file *)((f).word & ~(FDPUT_FPUT|FDPUT_POS_UNLOCK)))
+static inline bool fd_empty(struct fd f)
{
- if (fd.flags & FDPUT_FPUT)
- fput(fd.file);
+ return unlikely(!f.word);
}
-extern struct file *fget(unsigned int fd);
-extern struct file *fget_raw(unsigned int fd);
-extern unsigned long __fdget(unsigned int fd);
-extern unsigned long __fdget_raw(unsigned int fd);
-extern unsigned long __fdget_pos(unsigned int fd);
-extern void __f_unlock_pos(struct file *);
-
-static inline struct fd __to_fd(unsigned long v)
+#define EMPTY_FD (struct fd){0}
+static inline struct fd BORROWED_FD(struct file *f)
{
- return (struct fd){(struct file *)(v & ~3),v & 3};
+ return (struct fd){(unsigned long)f};
}
-
-static inline struct fd fdget(unsigned int fd)
+static inline struct fd CLONED_FD(struct file *f)
{
- return __to_fd(__fdget(fd));
+ return (struct fd){(unsigned long)f | FDPUT_FPUT};
}
-static inline struct fd fdget_raw(unsigned int fd)
+static inline void fdput(struct fd fd)
{
- return __to_fd(__fdget_raw(fd));
+ if (unlikely(fd.word & FDPUT_FPUT))
+ fput(fd_file(fd));
}
-static inline struct fd fdget_pos(int fd)
-{
- return __to_fd(__fdget_pos(fd));
-}
+extern struct file *fget(unsigned int fd);
+extern struct file *fget_raw(unsigned int fd);
+extern struct file *fget_task(struct task_struct *task, unsigned int fd);
+extern struct file *fget_task_next(struct task_struct *task, unsigned int *fd);
+extern void __f_unlock_pos(struct file *);
+
+struct fd fdget(unsigned int fd);
+struct fd fdget_raw(unsigned int fd);
+struct fd fdget_pos(unsigned int fd);
static inline void fdput_pos(struct fd f)
{
- if (f.flags & FDPUT_POS_UNLOCK)
- __f_unlock_pos(f.file);
+ if (f.word & FDPUT_POS_UNLOCK)
+ __f_unlock_pos(fd_file(f));
fdput(f);
}
+DEFINE_CLASS(fd, struct fd, fdput(_T), fdget(fd), int fd)
+DEFINE_CLASS(fd_raw, struct fd, fdput(_T), fdget_raw(fd), int fd)
+DEFINE_CLASS(fd_pos, struct fd, fdput_pos(_T), fdget_pos(fd), int fd)
+
extern int f_dupfd(unsigned int from, struct file *file, unsigned flags);
extern int replace_fd(unsigned fd, struct file *file, unsigned flags);
extern void set_close_on_exec(unsigned int fd, int flag);
extern bool get_close_on_exec(unsigned int fd);
-extern void put_filp(struct file *);
+extern int __get_unused_fd_flags(unsigned flags, unsigned long nofile);
extern int get_unused_fd_flags(unsigned flags);
extern void put_unused_fd(unsigned int fd);
+DEFINE_CLASS(get_unused_fd, int, if (_T >= 0) put_unused_fd(_T),
+ get_unused_fd_flags(flags), unsigned flags)
+DEFINE_FREE(fput, struct file *, if (!IS_ERR_OR_NULL(_T)) fput(_T))
+
+/*
+ * take_fd() will take care to set @fd to -EBADF ensuring that
+ * CLASS(get_unused_fd) won't call put_unused_fd(). This makes it
+ * easier to rely on CLASS(get_unused_fd):
+ *
+ * struct file *f;
+ *
+ * CLASS(get_unused_fd, fd)(O_CLOEXEC);
+ * if (fd < 0)
+ * return fd;
+ *
+ * f = dentry_open(&path, O_RDONLY, current_cred());
+ * if (IS_ERR(f))
+ * return PTR_ERR(f);
+ *
+ * fd_install(fd, f);
+ * return take_fd(fd);
+ */
+#define take_fd(fd) __get_and_null(fd, -EBADF)
+
extern void fd_install(unsigned int fd, struct file *file);
+int receive_fd(struct file *file, int __user *ufd, unsigned int o_flags);
+
+int receive_fd_replace(int new_fd, struct file *file, unsigned int o_flags);
+
extern void flush_delayed_fput(void);
extern void __fput_sync(struct file *);
+extern unsigned int sysctl_nr_open_min, sysctl_nr_open_max;
+
+/*
+ * fd_prepare: Combined fd + file allocation cleanup class.
+ * @err: Error code to indicate if allocation succeeded.
+ * @__fd: Allocated fd (may not be accessed directly)
+ * @__file: Allocated struct file pointer (may not be accessed directly)
+ *
+ * Allocates an fd and a file together. On error paths, automatically cleans
+ * up whichever resource was successfully allocated. Allows flexible file
+ * allocation with different functions per usage.
+ *
+ * Do not use directly.
+ */
+struct fd_prepare {
+ s32 err;
+ s32 __fd; /* do not access directly */
+ struct file *__file; /* do not access directly */
+};
+
+/* Typedef for fd_prepare cleanup guards. */
+typedef struct fd_prepare class_fd_prepare_t;
+
+/*
+ * Accessors for fd_prepare class members.
+ * _Generic() is used for zero-cost type safety.
+ */
+#define fd_prepare_fd(_fdf) \
+ (_Generic((_fdf), struct fd_prepare: (_fdf).__fd))
+
+#define fd_prepare_file(_fdf) \
+ (_Generic((_fdf), struct fd_prepare: (_fdf).__file))
+
+/* Do not use directly. */
+static inline void class_fd_prepare_destructor(const struct fd_prepare *fdf)
+{
+ if (unlikely(fdf->err)) {
+ if (likely(fdf->__fd >= 0))
+ put_unused_fd(fdf->__fd);
+ if (unlikely(!IS_ERR_OR_NULL(fdf->__file)))
+ fput(fdf->__file);
+ }
+}
+
+/* Do not use directly. */
+static inline int class_fd_prepare_lock_err(const struct fd_prepare *fdf)
+{
+ if (unlikely(fdf->err))
+ return fdf->err;
+ if (unlikely(fdf->__fd < 0))
+ return fdf->__fd;
+ if (unlikely(IS_ERR(fdf->__file)))
+ return PTR_ERR(fdf->__file);
+ if (unlikely(!fdf->__file))
+ return -ENOMEM;
+ return 0;
+}
+
+/*
+ * __FD_PREPARE_INIT - Helper to initialize fd_prepare class.
+ * @_fd_flags: flags for get_unused_fd_flags()
+ * @_file_owned: expression that returns struct file *
+ *
+ * Returns a struct fd_prepare with fd, file, and err set.
+ * If fd allocation fails, fd will be negative and err will be set. If
+ * fd succeeds but file_init_expr fails, file will be ERR_PTR and err
+ * will be set. The err field is the single source of truth for error
+ * checking.
+ */
+#define __FD_PREPARE_INIT(_fd_flags, _file_owned) \
+ ({ \
+ struct fd_prepare fdf = { \
+ .__fd = get_unused_fd_flags((_fd_flags)), \
+ }; \
+ if (likely(fdf.__fd >= 0)) \
+ fdf.__file = (_file_owned); \
+ fdf.err = ACQUIRE_ERR(fd_prepare, &fdf); \
+ fdf; \
+ })
+
+/*
+ * FD_PREPARE - Macro to declare and initialize an fd_prepare variable.
+ *
+ * Declares and initializes an fd_prepare variable with automatic
+ * cleanup. No separate scope required - cleanup happens when variable
+ * goes out of scope.
+ *
+ * @_fdf: name of struct fd_prepare variable to define
+ * @_fd_flags: flags for get_unused_fd_flags()
+ * @_file_owned: struct file to take ownership of (can be expression)
+ */
+#define FD_PREPARE(_fdf, _fd_flags, _file_owned) \
+ CLASS_INIT(fd_prepare, _fdf, __FD_PREPARE_INIT(_fd_flags, _file_owned))
+
+/*
+ * fd_publish - Publish prepared fd and file to the fd table.
+ * @_fdf: struct fd_prepare variable
+ */
+#define fd_publish(_fdf) \
+ ({ \
+ struct fd_prepare *fdp = &(_fdf); \
+ VFS_WARN_ON_ONCE(fdp->err); \
+ VFS_WARN_ON_ONCE(fdp->__fd < 0); \
+ VFS_WARN_ON_ONCE(IS_ERR_OR_NULL(fdp->__file)); \
+ fd_install(fdp->__fd, fdp->__file); \
+ fdp->__fd; \
+ })
+
+/* Do not use directly. */
+#define __FD_ADD(_fdf, _fd_flags, _file_owned) \
+ ({ \
+ FD_PREPARE(_fdf, _fd_flags, _file_owned); \
+ s32 ret = _fdf.err; \
+ if (likely(!ret)) \
+ ret = fd_publish(_fdf); \
+ ret; \
+ })
+
+/*
+ * FD_ADD - Allocate and install an fd and file in one step.
+ * @_fd_flags: flags for get_unused_fd_flags()
+ * @_file_owned: struct file to take ownership of
+ *
+ * Returns the allocated fd number, or negative error code on failure.
+ */
+#define FD_ADD(_fd_flags, _file_owned) \
+ __FD_ADD(__UNIQUE_ID(fd_prepare), _fd_flags, _file_owned)
+
#endif /* __LINUX_FILE_H */
diff --git a/include/linux/file_ref.h b/include/linux/file_ref.h
new file mode 100644
index 000000000000..31551e4cb8f3
--- /dev/null
+++ b/include/linux/file_ref.h
@@ -0,0 +1,218 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+#ifndef _LINUX_FILE_REF_H
+#define _LINUX_FILE_REF_H
+
+#include <linux/atomic.h>
+#include <linux/preempt.h>
+#include <linux/types.h>
+
+/*
+ * file_ref is a reference count implementation specifically for use by
+ * files. It takes inspiration from rcuref but differs in key aspects
+ * such as support for SLAB_TYPESAFE_BY_RCU type caches.
+ *
+ * FILE_REF_ONEREF FILE_REF_MAXREF
+ * 0x0000000000000000UL 0x7FFFFFFFFFFFFFFFUL
+ * <-------------------valid ------------------->
+ *
+ * FILE_REF_SATURATED
+ * 0x8000000000000000UL 0xA000000000000000UL 0xBFFFFFFFFFFFFFFFUL
+ * <-----------------------saturation zone---------------------->
+ *
+ * FILE_REF_RELEASED FILE_REF_DEAD
+ * 0xC000000000000000UL 0xE000000000000000UL
+ * <-------------------dead zone------------------->
+ *
+ * FILE_REF_NOREF
+ * 0xFFFFFFFFFFFFFFFFUL
+ */
+
+#ifdef CONFIG_64BIT
+#define FILE_REF_ONEREF 0x0000000000000000UL
+#define FILE_REF_MAXREF 0x7FFFFFFFFFFFFFFFUL
+#define FILE_REF_SATURATED 0xA000000000000000UL
+#define FILE_REF_RELEASED 0xC000000000000000UL
+#define FILE_REF_DEAD 0xE000000000000000UL
+#define FILE_REF_NOREF 0xFFFFFFFFFFFFFFFFUL
+#else
+#define FILE_REF_ONEREF 0x00000000U
+#define FILE_REF_MAXREF 0x7FFFFFFFU
+#define FILE_REF_SATURATED 0xA0000000U
+#define FILE_REF_RELEASED 0xC0000000U
+#define FILE_REF_DEAD 0xE0000000U
+#define FILE_REF_NOREF 0xFFFFFFFFU
+#endif
+
+typedef struct {
+#ifdef CONFIG_64BIT
+ atomic64_t refcnt;
+#else
+ atomic_t refcnt;
+#endif
+} file_ref_t;
+
+/**
+ * file_ref_init - Initialize a file reference count
+ * @ref: Pointer to the reference count
+ * @cnt: The initial reference count typically '1'
+ */
+static inline void file_ref_init(file_ref_t *ref, unsigned long cnt)
+{
+ atomic_long_set(&ref->refcnt, cnt - 1);
+}
+
+bool __file_ref_put(file_ref_t *ref, unsigned long cnt);
+
+/**
+ * file_ref_get - Acquire one reference on a file
+ * @ref: Pointer to the reference count
+ *
+ * Similar to atomic_inc_not_zero() but saturates at FILE_REF_MAXREF.
+ *
+ * Provides full memory ordering.
+ *
+ * Return: False if the attempt to acquire a reference failed. This happens
+ * when the last reference has been put already. True if a reference
+ * was successfully acquired
+ */
+static __always_inline __must_check bool file_ref_get(file_ref_t *ref)
+{
+ /*
+ * Unconditionally increase the reference count with full
+ * ordering. The saturation and dead zones provide enough
+ * tolerance for this.
+ *
+ * If this indicates negative the file in question the fail can
+ * be freed and immediately reused due to SLAB_TYPSAFE_BY_RCU.
+ * Hence, unconditionally altering the file reference count to
+ * e.g., reset the file reference count back to the middle of
+ * the deadzone risk end up marking someone else's file as dead
+ * behind their back.
+ *
+ * It would be possible to do a careful:
+ *
+ * cnt = atomic_long_inc_return();
+ * if (likely(cnt >= 0))
+ * return true;
+ *
+ * and then something like:
+ *
+ * if (cnt >= FILE_REF_RELEASE)
+ * atomic_long_try_cmpxchg(&ref->refcnt, &cnt, FILE_REF_DEAD),
+ *
+ * to set the value back to the middle of the deadzone. But it's
+ * practically impossible to go from FILE_REF_DEAD to
+ * FILE_REF_ONEREF. It would need 2305843009213693952/2^61
+ * file_ref_get()s to resurrect such a dead file.
+ */
+ return !atomic_long_add_negative(1, &ref->refcnt);
+}
+
+/**
+ * file_ref_inc - Acquire one reference on a file
+ * @ref: Pointer to the reference count
+ *
+ * Acquire an additional reference on a file. Warns if the caller didn't
+ * already hold a reference.
+ */
+static __always_inline void file_ref_inc(file_ref_t *ref)
+{
+ long prior = atomic_long_fetch_inc_relaxed(&ref->refcnt);
+ WARN_ONCE(prior < 0, "file_ref_inc() on a released file reference");
+}
+
+/**
+ * file_ref_put -- Release a file reference
+ * @ref: Pointer to the reference count
+ *
+ * Provides release memory ordering, such that prior loads and stores
+ * are done before, and provides an acquire ordering on success such
+ * that free() must come after.
+ *
+ * Return: True if this was the last reference with no future references
+ * possible. This signals the caller that it can safely release
+ * the object which is protected by the reference counter.
+ * False if there are still active references or the put() raced
+ * with a concurrent get()/put() pair. Caller is not allowed to
+ * release the protected object.
+ */
+static __always_inline __must_check bool file_ref_put(file_ref_t *ref)
+{
+ long cnt;
+
+ /*
+ * While files are SLAB_TYPESAFE_BY_RCU and thus file_ref_put()
+ * calls don't risk UAFs when a file is recyclyed, it is still
+ * vulnerable to UAFs caused by freeing the whole slab page once
+ * it becomes unused. Prevent file_ref_put() from being
+ * preempted protects against this.
+ */
+ guard(preempt)();
+ /*
+ * Unconditionally decrease the reference count. The saturation
+ * and dead zones provide enough tolerance for this. If this
+ * fails then we need to handle the last reference drop and
+ * cases inside the saturation and dead zones.
+ */
+ cnt = atomic_long_dec_return(&ref->refcnt);
+ if (cnt >= 0)
+ return false;
+ return __file_ref_put(ref, cnt);
+}
+
+/**
+ * file_ref_put_close - drop a reference expecting it would transition to FILE_REF_NOREF
+ * @ref: Pointer to the reference count
+ *
+ * Semantically it is equivalent to calling file_ref_put(), but it trades lower
+ * performance in face of other CPUs also modifying the refcount for higher
+ * performance when this happens to be the last reference.
+ *
+ * For the last reference file_ref_put() issues 2 atomics. One to drop the
+ * reference and another to transition it to FILE_REF_DEAD. This routine does
+ * the work in one step, but in order to do it has to pre-read the variable which
+ * decreases scalability.
+ *
+ * Use with close() et al, stick to file_ref_put() by default.
+ */
+static __always_inline __must_check bool file_ref_put_close(file_ref_t *ref)
+{
+ long old;
+
+ old = atomic_long_read(&ref->refcnt);
+ if (likely(old == FILE_REF_ONEREF)) {
+ if (likely(atomic_long_try_cmpxchg(&ref->refcnt, &old, FILE_REF_DEAD)))
+ return true;
+ }
+ return file_ref_put(ref);
+}
+
+/**
+ * file_ref_read - Read the number of file references
+ * @ref: Pointer to the reference count
+ *
+ * Return: The number of held references (0 ... N)
+ */
+static inline unsigned long file_ref_read(file_ref_t *ref)
+{
+ unsigned long c = atomic_long_read(&ref->refcnt);
+
+ /* Return 0 if within the DEAD zone. */
+ return c >= FILE_REF_RELEASED ? 0 : c + 1;
+}
+
+/*
+ * __file_ref_read_raw - Return the value stored in ref->refcnt
+ * @ref: Pointer to the reference count
+ *
+ * Return: The raw value found in the counter
+ *
+ * A hack for file_needs_f_pos_lock(), you probably want to use
+ * file_ref_read() instead.
+ */
+static inline unsigned long __file_ref_read_raw(file_ref_t *ref)
+{
+ return atomic_long_read(&ref->refcnt);
+}
+
+#endif
diff --git a/include/linux/fileattr.h b/include/linux/fileattr.h
new file mode 100644
index 000000000000..f89dcfad3f8f
--- /dev/null
+++ b/include/linux/fileattr.h
@@ -0,0 +1,83 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef _LINUX_FILEATTR_H
+#define _LINUX_FILEATTR_H
+
+/* Flags shared betwen flags/xflags */
+#define FS_COMMON_FL \
+ (FS_SYNC_FL | FS_IMMUTABLE_FL | FS_APPEND_FL | \
+ FS_NODUMP_FL | FS_NOATIME_FL | FS_DAX_FL | \
+ FS_PROJINHERIT_FL)
+
+#define FS_XFLAG_COMMON \
+ (FS_XFLAG_SYNC | FS_XFLAG_IMMUTABLE | FS_XFLAG_APPEND | \
+ FS_XFLAG_NODUMP | FS_XFLAG_NOATIME | FS_XFLAG_DAX | \
+ FS_XFLAG_PROJINHERIT)
+
+/* Read-only inode flags */
+#define FS_XFLAG_RDONLY_MASK \
+ (FS_XFLAG_PREALLOC | FS_XFLAG_HASATTR)
+
+/* Flags to indicate valid value of fsx_ fields */
+#define FS_XFLAG_VALUES_MASK \
+ (FS_XFLAG_EXTSIZE | FS_XFLAG_COWEXTSIZE)
+
+/* Flags for directories */
+#define FS_XFLAG_DIRONLY_MASK \
+ (FS_XFLAG_RTINHERIT | FS_XFLAG_NOSYMLINKS | FS_XFLAG_EXTSZINHERIT)
+
+/* Misc settable flags */
+#define FS_XFLAG_MISC_MASK \
+ (FS_XFLAG_REALTIME | FS_XFLAG_NODEFRAG | FS_XFLAG_FILESTREAM)
+
+#define FS_XFLAGS_MASK \
+ (FS_XFLAG_COMMON | FS_XFLAG_RDONLY_MASK | FS_XFLAG_VALUES_MASK | \
+ FS_XFLAG_DIRONLY_MASK | FS_XFLAG_MISC_MASK)
+
+/*
+ * Merged interface for miscellaneous file attributes. 'flags' originates from
+ * ext* and 'fsx_flags' from xfs. There's some overlap between the two, which
+ * is handled by the VFS helpers, so filesystems are free to implement just one
+ * or both of these sub-interfaces.
+ */
+struct file_kattr {
+ u32 flags; /* flags (FS_IOC_GETFLAGS/FS_IOC_SETFLAGS) */
+ /* struct fsxattr: */
+ u32 fsx_xflags; /* xflags field value (get/set) */
+ u32 fsx_extsize; /* extsize field value (get/set)*/
+ u32 fsx_nextents; /* nextents field value (get) */
+ u32 fsx_projid; /* project identifier (get/set) */
+ u32 fsx_cowextsize; /* CoW extsize field value (get/set)*/
+ /* selectors: */
+ bool flags_valid:1;
+ bool fsx_valid:1;
+};
+
+int copy_fsxattr_to_user(const struct file_kattr *fa, struct fsxattr __user *ufa);
+
+void fileattr_fill_xflags(struct file_kattr *fa, u32 xflags);
+void fileattr_fill_flags(struct file_kattr *fa, u32 flags);
+
+/**
+ * fileattr_has_fsx - check for extended flags/attributes
+ * @fa: fileattr pointer
+ *
+ * Return: true if any attributes are present that are not represented in
+ * ->flags.
+ */
+static inline bool fileattr_has_fsx(const struct file_kattr *fa)
+{
+ return fa->fsx_valid &&
+ ((fa->fsx_xflags & ~FS_XFLAG_COMMON) || fa->fsx_extsize != 0 ||
+ fa->fsx_projid != 0 || fa->fsx_cowextsize != 0);
+}
+
+int vfs_fileattr_get(struct dentry *dentry, struct file_kattr *fa);
+int vfs_fileattr_set(struct mnt_idmap *idmap, struct dentry *dentry,
+ struct file_kattr *fa);
+int ioctl_getflags(struct file *file, unsigned int __user *argp);
+int ioctl_setflags(struct file *file, unsigned int __user *argp);
+int ioctl_fsgetxattr(struct file *file, void __user *argp);
+int ioctl_fssetxattr(struct file *file, void __user *argp);
+
+#endif /* _LINUX_FILEATTR_H */
diff --git a/include/linux/filelock.h b/include/linux/filelock.h
new file mode 100644
index 000000000000..54b824c05299
--- /dev/null
+++ b/include/linux/filelock.h
@@ -0,0 +1,584 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_FILELOCK_H
+#define _LINUX_FILELOCK_H
+
+#include <linux/fs.h>
+
+#define FL_POSIX 1
+#define FL_FLOCK 2
+#define FL_DELEG 4 /* NFSv4 delegation */
+#define FL_ACCESS 8 /* not trying to lock, just looking */
+#define FL_EXISTS 16 /* when unlocking, test for existence */
+#define FL_LEASE 32 /* lease held on this file */
+#define FL_CLOSE 64 /* unlock on close */
+#define FL_SLEEP 128 /* A blocking lock */
+#define FL_DOWNGRADE_PENDING 256 /* Lease is being downgraded */
+#define FL_UNLOCK_PENDING 512 /* Lease is being broken */
+#define FL_OFDLCK 1024 /* lock is "owned" by struct file */
+#define FL_LAYOUT 2048 /* outstanding pNFS layout */
+#define FL_RECLAIM 4096 /* reclaiming from a reboot server */
+
+#define FL_CLOSE_POSIX (FL_POSIX | FL_CLOSE)
+
+/*
+ * Special return value from posix_lock_file() and vfs_lock_file() for
+ * asynchronous locking.
+ */
+#define FILE_LOCK_DEFERRED 1
+
+struct file_lock;
+struct file_lease;
+
+struct file_lock_operations {
+ void (*fl_copy_lock)(struct file_lock *, struct file_lock *);
+ void (*fl_release_private)(struct file_lock *);
+};
+
+struct lock_manager_operations {
+ void *lm_mod_owner;
+ fl_owner_t (*lm_get_owner)(fl_owner_t);
+ void (*lm_put_owner)(fl_owner_t);
+ void (*lm_notify)(struct file_lock *); /* unblock callback */
+ int (*lm_grant)(struct file_lock *, int);
+ bool (*lm_lock_expirable)(struct file_lock *cfl);
+ void (*lm_expire_lock)(void);
+};
+
+struct lease_manager_operations {
+ bool (*lm_break)(struct file_lease *);
+ int (*lm_change)(struct file_lease *, int, struct list_head *);
+ void (*lm_setup)(struct file_lease *, void **);
+ bool (*lm_breaker_owns_lease)(struct file_lease *);
+};
+
+struct lock_manager {
+ struct list_head list;
+ /*
+ * NFSv4 and up also want opens blocked during the grace period;
+ * NLM doesn't care:
+ */
+ bool block_opens;
+};
+
+struct net;
+void locks_start_grace(struct net *, struct lock_manager *);
+void locks_end_grace(struct lock_manager *);
+bool locks_in_grace(struct net *);
+bool opens_in_grace(struct net *);
+
+/*
+ * struct file_lock has a union that some filesystems use to track
+ * their own private info. The NFS side of things is defined here:
+ */
+#include <linux/nfs_fs_i.h>
+
+/*
+ * struct file_lock represents a generic "file lock". It's used to represent
+ * POSIX byte range locks, BSD (flock) locks, and leases. It's important to
+ * note that the same struct is used to represent both a request for a lock and
+ * the lock itself, but the same object is never used for both.
+ *
+ * FIXME: should we create a separate "struct lock_request" to help distinguish
+ * these two uses?
+ *
+ * The varous i_flctx lists are ordered by:
+ *
+ * 1) lock owner
+ * 2) lock range start
+ * 3) lock range end
+ *
+ * Obviously, the last two criteria only matter for POSIX locks.
+ */
+
+struct file_lock_core {
+ struct file_lock_core *flc_blocker; /* The lock that is blocking us */
+ struct list_head flc_list; /* link into file_lock_context */
+ struct hlist_node flc_link; /* node in global lists */
+ struct list_head flc_blocked_requests; /* list of requests with
+ * ->fl_blocker pointing here
+ */
+ struct list_head flc_blocked_member; /* node in
+ * ->fl_blocker->fl_blocked_requests
+ */
+ fl_owner_t flc_owner;
+ unsigned int flc_flags;
+ unsigned char flc_type;
+ pid_t flc_pid;
+ int flc_link_cpu; /* what cpu's list is this on? */
+ wait_queue_head_t flc_wait;
+ struct file *flc_file;
+};
+
+struct file_lock {
+ struct file_lock_core c;
+ loff_t fl_start;
+ loff_t fl_end;
+
+ const struct file_lock_operations *fl_ops; /* Callbacks for filesystems */
+ const struct lock_manager_operations *fl_lmops; /* Callbacks for lockmanagers */
+ union {
+ struct nfs_lock_info nfs_fl;
+ struct nfs4_lock_info nfs4_fl;
+ struct {
+ struct list_head link; /* link in AFS vnode's pending_locks list */
+ int state; /* state of grant or error if -ve */
+ unsigned int debug_id;
+ } afs;
+ struct {
+ struct inode *inode;
+ } ceph;
+ } fl_u;
+} __randomize_layout;
+
+struct file_lease {
+ struct file_lock_core c;
+ struct fasync_struct * fl_fasync; /* for lease break notifications */
+ /* for lease breaks: */
+ unsigned long fl_break_time;
+ unsigned long fl_downgrade_time;
+ const struct lease_manager_operations *fl_lmops; /* Callbacks for lease managers */
+} __randomize_layout;
+
+struct file_lock_context {
+ spinlock_t flc_lock;
+ struct list_head flc_flock;
+ struct list_head flc_posix;
+ struct list_head flc_lease;
+};
+
+#ifdef CONFIG_FILE_LOCKING
+int fcntl_getlk(struct file *, unsigned int, struct flock *);
+int fcntl_setlk(unsigned int, struct file *, unsigned int,
+ struct flock *);
+
+#if BITS_PER_LONG == 32
+int fcntl_getlk64(struct file *, unsigned int, struct flock64 *);
+int fcntl_setlk64(unsigned int, struct file *, unsigned int,
+ struct flock64 *);
+#endif
+
+int fcntl_setlease(unsigned int fd, struct file *filp, int arg);
+int fcntl_getlease(struct file *filp);
+int fcntl_setdeleg(unsigned int fd, struct file *filp, struct delegation *deleg);
+int fcntl_getdeleg(struct file *filp, struct delegation *deleg);
+
+static inline bool lock_is_unlock(struct file_lock *fl)
+{
+ return fl->c.flc_type == F_UNLCK;
+}
+
+static inline bool lock_is_read(struct file_lock *fl)
+{
+ return fl->c.flc_type == F_RDLCK;
+}
+
+static inline bool lock_is_write(struct file_lock *fl)
+{
+ return fl->c.flc_type == F_WRLCK;
+}
+
+static inline void locks_wake_up_waiter(struct file_lock_core *flc)
+{
+ wake_up(&flc->flc_wait);
+}
+
+static inline void locks_wake_up(struct file_lock *fl)
+{
+ locks_wake_up_waiter(&fl->c);
+}
+
+static inline bool locks_can_async_lock(const struct file_operations *fops)
+{
+ return !fops->lock || fops->fop_flags & FOP_ASYNC_LOCK;
+}
+
+/* fs/locks.c */
+void locks_free_lock_context(struct inode *inode);
+void locks_free_lock(struct file_lock *fl);
+void locks_init_lock(struct file_lock *);
+struct file_lock *locks_alloc_lock(void);
+void locks_copy_lock(struct file_lock *, struct file_lock *);
+void locks_copy_conflock(struct file_lock *, struct file_lock *);
+void locks_remove_posix(struct file *, fl_owner_t);
+void locks_remove_file(struct file *);
+void locks_release_private(struct file_lock *);
+void posix_test_lock(struct file *, struct file_lock *);
+int posix_lock_file(struct file *, struct file_lock *, struct file_lock *);
+int locks_delete_block(struct file_lock *);
+int vfs_test_lock(struct file *, struct file_lock *);
+int vfs_lock_file(struct file *, unsigned int, struct file_lock *, struct file_lock *);
+int vfs_cancel_lock(struct file *filp, struct file_lock *fl);
+bool vfs_inode_has_locks(struct inode *inode);
+int locks_lock_inode_wait(struct inode *inode, struct file_lock *fl);
+
+void locks_init_lease(struct file_lease *);
+void locks_free_lease(struct file_lease *fl);
+struct file_lease *locks_alloc_lease(void);
+
+#define LEASE_BREAK_LEASE BIT(0) // break leases and delegations
+#define LEASE_BREAK_DELEG BIT(1) // break delegations only
+#define LEASE_BREAK_LAYOUT BIT(2) // break layouts only
+#define LEASE_BREAK_NONBLOCK BIT(3) // non-blocking break
+#define LEASE_BREAK_OPEN_RDONLY BIT(4) // readonly open event
+
+int __break_lease(struct inode *inode, unsigned int flags);
+void lease_get_mtime(struct inode *, struct timespec64 *time);
+int generic_setlease(struct file *, int, struct file_lease **, void **priv);
+int kernel_setlease(struct file *, int, struct file_lease **, void **);
+int vfs_setlease(struct file *, int, struct file_lease **, void **);
+int lease_modify(struct file_lease *, int, struct list_head *);
+
+struct notifier_block;
+int lease_register_notifier(struct notifier_block *);
+void lease_unregister_notifier(struct notifier_block *);
+
+struct files_struct;
+void show_fd_locks(struct seq_file *f,
+ struct file *filp, struct files_struct *files);
+bool locks_owner_has_blockers(struct file_lock_context *flctx,
+ fl_owner_t owner);
+
+static inline struct file_lock_context *
+locks_inode_context(const struct inode *inode)
+{
+ return smp_load_acquire(&inode->i_flctx);
+}
+
+#else /* !CONFIG_FILE_LOCKING */
+static inline int fcntl_getlk(struct file *file, unsigned int cmd,
+ struct flock __user *user)
+{
+ return -EINVAL;
+}
+
+static inline int fcntl_setlk(unsigned int fd, struct file *file,
+ unsigned int cmd, struct flock __user *user)
+{
+ return -EACCES;
+}
+
+#if BITS_PER_LONG == 32
+static inline int fcntl_getlk64(struct file *file, unsigned int cmd,
+ struct flock64 *user)
+{
+ return -EINVAL;
+}
+
+static inline int fcntl_setlk64(unsigned int fd, struct file *file,
+ unsigned int cmd, struct flock64 *user)
+{
+ return -EACCES;
+}
+#endif
+static inline int fcntl_setlease(unsigned int fd, struct file *filp, int arg)
+{
+ return -EINVAL;
+}
+
+static inline int fcntl_getlease(struct file *filp)
+{
+ return F_UNLCK;
+}
+
+static inline int fcntl_setdeleg(unsigned int fd, struct file *filp, struct delegation *deleg)
+{
+ return -EINVAL;
+}
+
+static inline int fcntl_getdeleg(struct file *filp, struct delegation *deleg)
+{
+ return -EINVAL;
+}
+
+static inline bool lock_is_unlock(struct file_lock *fl)
+{
+ return false;
+}
+
+static inline bool lock_is_read(struct file_lock *fl)
+{
+ return false;
+}
+
+static inline bool lock_is_write(struct file_lock *fl)
+{
+ return false;
+}
+
+static inline void locks_wake_up(struct file_lock *fl)
+{
+}
+
+static inline void
+locks_free_lock_context(struct inode *inode)
+{
+}
+
+static inline void locks_init_lock(struct file_lock *fl)
+{
+ return;
+}
+
+static inline void locks_init_lease(struct file_lease *fl)
+{
+ return;
+}
+
+static inline void locks_copy_conflock(struct file_lock *new, struct file_lock *fl)
+{
+ return;
+}
+
+static inline void locks_copy_lock(struct file_lock *new, struct file_lock *fl)
+{
+ return;
+}
+
+static inline void locks_remove_posix(struct file *filp, fl_owner_t owner)
+{
+ return;
+}
+
+static inline void locks_remove_file(struct file *filp)
+{
+ return;
+}
+
+static inline void posix_test_lock(struct file *filp, struct file_lock *fl)
+{
+ return;
+}
+
+static inline int posix_lock_file(struct file *filp, struct file_lock *fl,
+ struct file_lock *conflock)
+{
+ return -ENOLCK;
+}
+
+static inline int locks_delete_block(struct file_lock *waiter)
+{
+ return -ENOENT;
+}
+
+static inline int vfs_test_lock(struct file *filp, struct file_lock *fl)
+{
+ return 0;
+}
+
+static inline int vfs_lock_file(struct file *filp, unsigned int cmd,
+ struct file_lock *fl, struct file_lock *conf)
+{
+ return -ENOLCK;
+}
+
+static inline int vfs_cancel_lock(struct file *filp, struct file_lock *fl)
+{
+ return 0;
+}
+
+static inline bool vfs_inode_has_locks(struct inode *inode)
+{
+ return false;
+}
+
+static inline int locks_lock_inode_wait(struct inode *inode, struct file_lock *fl)
+{
+ return -ENOLCK;
+}
+
+static inline int __break_lease(struct inode *inode, unsigned int flags)
+{
+ return 0;
+}
+
+static inline void lease_get_mtime(struct inode *inode,
+ struct timespec64 *time)
+{
+ return;
+}
+
+static inline int generic_setlease(struct file *filp, int arg,
+ struct file_lease **flp, void **priv)
+{
+ return -EINVAL;
+}
+
+static inline int kernel_setlease(struct file *filp, int arg,
+ struct file_lease **lease, void **priv)
+{
+ return -EINVAL;
+}
+
+static inline int vfs_setlease(struct file *filp, int arg,
+ struct file_lease **lease, void **priv)
+{
+ return -EINVAL;
+}
+
+static inline int lease_modify(struct file_lease *fl, int arg,
+ struct list_head *dispose)
+{
+ return -EINVAL;
+}
+
+struct files_struct;
+static inline void show_fd_locks(struct seq_file *f,
+ struct file *filp, struct files_struct *files) {}
+static inline bool locks_owner_has_blockers(struct file_lock_context *flctx,
+ fl_owner_t owner)
+{
+ return false;
+}
+
+static inline struct file_lock_context *
+locks_inode_context(const struct inode *inode)
+{
+ return NULL;
+}
+
+#endif /* !CONFIG_FILE_LOCKING */
+
+/* for walking lists of file_locks linked by fl_list */
+#define for_each_file_lock(_fl, _head) list_for_each_entry(_fl, _head, c.flc_list)
+
+static inline int locks_lock_file_wait(struct file *filp, struct file_lock *fl)
+{
+ return locks_lock_inode_wait(file_inode(filp), fl);
+}
+
+#ifdef CONFIG_FILE_LOCKING
+static inline unsigned int openmode_to_lease_flags(unsigned int mode)
+{
+ unsigned int flags = 0;
+
+ if ((mode & O_ACCMODE) == O_RDONLY)
+ flags |= LEASE_BREAK_OPEN_RDONLY;
+ if (mode & O_NONBLOCK)
+ flags |= LEASE_BREAK_NONBLOCK;
+ return flags;
+}
+
+static inline int break_lease(struct inode *inode, unsigned int mode)
+{
+ struct file_lock_context *flctx;
+
+ /*
+ * Since this check is lockless, we must ensure that any refcounts
+ * taken are done before checking i_flctx->flc_lease. Otherwise, we
+ * could end up racing with tasks trying to set a new lease on this
+ * file.
+ */
+ flctx = READ_ONCE(inode->i_flctx);
+ if (!flctx)
+ return 0;
+ smp_mb();
+ if (!list_empty_careful(&flctx->flc_lease))
+ return __break_lease(inode, LEASE_BREAK_LEASE | openmode_to_lease_flags(mode));
+ return 0;
+}
+
+static inline int break_deleg(struct inode *inode, unsigned int flags)
+{
+ struct file_lock_context *flctx;
+
+ /*
+ * Since this check is lockless, we must ensure that any refcounts
+ * taken are done before checking i_flctx->flc_lease. Otherwise, we
+ * could end up racing with tasks trying to set a new lease on this
+ * file.
+ */
+ flctx = READ_ONCE(inode->i_flctx);
+ if (!flctx)
+ return 0;
+ smp_mb();
+ if (!list_empty_careful(&flctx->flc_lease)) {
+ flags |= LEASE_BREAK_DELEG;
+ return __break_lease(inode, flags);
+ }
+ return 0;
+}
+
+struct delegated_inode {
+ struct inode *di_inode;
+};
+
+static inline bool is_delegated(struct delegated_inode *di)
+{
+ return di->di_inode;
+}
+
+static inline int try_break_deleg(struct inode *inode,
+ struct delegated_inode *di)
+{
+ int ret;
+
+ ret = break_deleg(inode, LEASE_BREAK_NONBLOCK);
+ if (ret == -EWOULDBLOCK && di) {
+ di->di_inode = inode;
+ ihold(inode);
+ }
+ return ret;
+}
+
+static inline int break_deleg_wait(struct delegated_inode *di)
+{
+ int ret;
+
+ ret = break_deleg(di->di_inode, 0);
+ iput(di->di_inode);
+ di->di_inode = NULL;
+ return ret;
+}
+
+static inline int break_layout(struct inode *inode, bool wait)
+{
+ smp_mb();
+ if (inode->i_flctx && !list_empty_careful(&inode->i_flctx->flc_lease)) {
+ unsigned int flags = LEASE_BREAK_LAYOUT;
+
+ if (!wait)
+ flags |= LEASE_BREAK_NONBLOCK;
+
+ return __break_lease(inode, flags);
+ }
+ return 0;
+}
+
+#else /* !CONFIG_FILE_LOCKING */
+struct delegated_inode { };
+
+static inline bool is_delegated(struct delegated_inode *di)
+{
+ return false;
+}
+
+static inline int break_lease(struct inode *inode, bool wait)
+{
+ return 0;
+}
+
+static inline int break_deleg(struct inode *inode, unsigned int flags)
+{
+ return 0;
+}
+
+static inline int try_break_deleg(struct inode *inode,
+ struct delegated_inode *delegated_inode)
+{
+ return 0;
+}
+
+static inline int break_deleg_wait(struct delegated_inode *delegated_inode)
+{
+ BUG();
+ return 0;
+}
+
+static inline int break_layout(struct inode *inode, bool wait)
+{
+ return 0;
+}
+
+#endif /* CONFIG_FILE_LOCKING */
+
+#endif /* _LINUX_FILELOCK_H */
diff --git a/include/linux/filter.h b/include/linux/filter.h
index bfef1e5734f8..fd54fed8f95f 100644
--- a/include/linux/filter.h
+++ b/include/linux/filter.h
@@ -1,12 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Linux Socket Filter Data Structures
*/
#ifndef __LINUX_FILTER_H__
#define __LINUX_FILTER_H__
-#include <stdarg.h>
-
#include <linux/atomic.h>
+#include <linux/bpf.h>
#include <linux/refcount.h>
#include <linux/compat.h>
#include <linux/skbuff.h>
@@ -14,19 +14,30 @@
#include <linux/printk.h>
#include <linux/workqueue.h>
#include <linux/sched.h>
+#include <linux/sched/clock.h>
#include <linux/capability.h>
-#include <linux/cryptohash.h>
#include <linux/set_memory.h>
+#include <linux/kallsyms.h>
+#include <linux/if_vlan.h>
+#include <linux/vmalloc.h>
+#include <linux/sockptr.h>
+#include <crypto/sha1.h>
+#include <linux/u64_stats_sync.h>
#include <net/sch_generic.h>
+#include <asm/byteorder.h>
#include <uapi/linux/filter.h>
-#include <uapi/linux/bpf.h>
struct sk_buff;
struct sock;
struct seccomp_data;
struct bpf_prog_aux;
+struct xdp_rxq_info;
+struct xdp_buff;
+struct sock_reuseport;
+struct ctl_table;
+struct ctl_table_header;
/* ArgX, context and stack frame pointer register positions. Note,
* Arg1, Arg2, Arg3, etc are used as argument mappings of function
@@ -43,20 +54,41 @@ struct bpf_prog_aux;
/* Additional register mappings for converted user programs. */
#define BPF_REG_A BPF_REG_0
#define BPF_REG_X BPF_REG_7
-#define BPF_REG_TMP BPF_REG_8
+#define BPF_REG_TMP BPF_REG_2 /* scratch reg */
+#define BPF_REG_D BPF_REG_8 /* data, callee-saved */
+#define BPF_REG_H BPF_REG_9 /* hlen, callee-saved */
-/* Kernel hidden auxiliary/helper register for hardening step.
- * Only used by eBPF JITs. It's nothing more than a temporary
- * register that JITs use internally, only that here it's part
- * of eBPF instructions that have been rewritten for blinding
- * constants. See JIT pre-step in bpf_jit_blind_constants().
- */
+/* Kernel hidden auxiliary/helper register. */
#define BPF_REG_AX MAX_BPF_REG
-#define MAX_BPF_JIT_REG (MAX_BPF_REG + 1)
+#define MAX_BPF_EXT_REG (MAX_BPF_REG + 1)
+#define MAX_BPF_JIT_REG MAX_BPF_EXT_REG
/* unused opcode to mark special call to bpf_tail_call() helper */
#define BPF_TAIL_CALL 0xf0
+/* unused opcode to mark special load instruction. Same as BPF_ABS */
+#define BPF_PROBE_MEM 0x20
+
+/* unused opcode to mark special ldsx instruction. Same as BPF_IND */
+#define BPF_PROBE_MEMSX 0x40
+
+/* unused opcode to mark special load instruction. Same as BPF_MSH */
+#define BPF_PROBE_MEM32 0xa0
+
+/* unused opcode to mark special atomic instruction */
+#define BPF_PROBE_ATOMIC 0xe0
+
+/* unused opcode to mark special ldsx instruction. Same as BPF_NOSPEC */
+#define BPF_PROBE_MEM32SX 0xc0
+
+/* unused opcode to mark call to interpreter with arguments */
+#define BPF_CALL_ARGS 0xe0
+
+/* unused opcode to mark speculation barrier for mitigating
+ * Spectre v1 and v4
+ */
+#define BPF_NOSPEC 0xc0
+
/* As per nm, we expose JITed images as text (code) section for
* kallsyms. That way, tools like perf can find it to match
* addresses.
@@ -70,39 +102,49 @@ struct bpf_prog_aux;
/* ALU ops on registers, bpf_add|sub|...: dst_reg += src_reg */
-#define BPF_ALU64_REG(OP, DST, SRC) \
+#define BPF_ALU64_REG_OFF(OP, DST, SRC, OFF) \
((struct bpf_insn) { \
.code = BPF_ALU64 | BPF_OP(OP) | BPF_X, \
.dst_reg = DST, \
.src_reg = SRC, \
- .off = 0, \
+ .off = OFF, \
.imm = 0 })
-#define BPF_ALU32_REG(OP, DST, SRC) \
+#define BPF_ALU64_REG(OP, DST, SRC) \
+ BPF_ALU64_REG_OFF(OP, DST, SRC, 0)
+
+#define BPF_ALU32_REG_OFF(OP, DST, SRC, OFF) \
((struct bpf_insn) { \
.code = BPF_ALU | BPF_OP(OP) | BPF_X, \
.dst_reg = DST, \
.src_reg = SRC, \
- .off = 0, \
+ .off = OFF, \
.imm = 0 })
+#define BPF_ALU32_REG(OP, DST, SRC) \
+ BPF_ALU32_REG_OFF(OP, DST, SRC, 0)
+
/* ALU ops on immediates, bpf_add|sub|...: dst_reg += imm32 */
-#define BPF_ALU64_IMM(OP, DST, IMM) \
+#define BPF_ALU64_IMM_OFF(OP, DST, IMM, OFF) \
((struct bpf_insn) { \
.code = BPF_ALU64 | BPF_OP(OP) | BPF_K, \
.dst_reg = DST, \
.src_reg = 0, \
- .off = 0, \
+ .off = OFF, \
.imm = IMM })
+#define BPF_ALU64_IMM(OP, DST, IMM) \
+ BPF_ALU64_IMM_OFF(OP, DST, IMM, 0)
-#define BPF_ALU32_IMM(OP, DST, IMM) \
+#define BPF_ALU32_IMM_OFF(OP, DST, IMM, OFF) \
((struct bpf_insn) { \
.code = BPF_ALU | BPF_OP(OP) | BPF_K, \
.dst_reg = DST, \
.src_reg = 0, \
- .off = 0, \
+ .off = OFF, \
.imm = IMM })
+#define BPF_ALU32_IMM(OP, DST, IMM) \
+ BPF_ALU32_IMM_OFF(OP, DST, IMM, 0)
/* Endianess conversion, cpu_to_{l,b}e(), {l,b}e_to_cpu() */
@@ -114,6 +156,16 @@ struct bpf_prog_aux;
.off = 0, \
.imm = LEN })
+/* Byte Swap, bswap16/32/64 */
+
+#define BPF_BSWAP(DST, LEN) \
+ ((struct bpf_insn) { \
+ .code = BPF_ALU64 | BPF_END | BPF_SRC(BPF_TO_LE), \
+ .dst_reg = DST, \
+ .src_reg = 0, \
+ .off = 0, \
+ .imm = LEN })
+
/* Short form of mov, dst_reg = src_reg */
#define BPF_MOV64_REG(DST, SRC) \
@@ -132,6 +184,25 @@ struct bpf_prog_aux;
.off = 0, \
.imm = 0 })
+/* Special (internal-only) form of mov, used to resolve per-CPU addrs:
+ * dst_reg = src_reg + <percpu_base_off>
+ * BPF_ADDR_PERCPU is used as a special insn->off value.
+ */
+#define BPF_ADDR_PERCPU (-1)
+
+#define BPF_MOV64_PERCPU_REG(DST, SRC) \
+ ((struct bpf_insn) { \
+ .code = BPF_ALU64 | BPF_MOV | BPF_X, \
+ .dst_reg = DST, \
+ .src_reg = SRC, \
+ .off = BPF_ADDR_PERCPU, \
+ .imm = 0 })
+
+static inline bool insn_is_mov_percpu_addr(const struct bpf_insn *insn)
+{
+ return insn->code == (BPF_ALU64 | BPF_MOV | BPF_X) && insn->off == BPF_ADDR_PERCPU;
+}
+
/* Short form of mov, dst_reg = imm32 */
#define BPF_MOV64_IMM(DST, IMM) \
@@ -150,6 +221,48 @@ struct bpf_prog_aux;
.off = 0, \
.imm = IMM })
+/* Short form of movsx, dst_reg = (s8,s16,s32)src_reg */
+
+#define BPF_MOVSX64_REG(DST, SRC, OFF) \
+ ((struct bpf_insn) { \
+ .code = BPF_ALU64 | BPF_MOV | BPF_X, \
+ .dst_reg = DST, \
+ .src_reg = SRC, \
+ .off = OFF, \
+ .imm = 0 })
+
+#define BPF_MOVSX32_REG(DST, SRC, OFF) \
+ ((struct bpf_insn) { \
+ .code = BPF_ALU | BPF_MOV | BPF_X, \
+ .dst_reg = DST, \
+ .src_reg = SRC, \
+ .off = OFF, \
+ .imm = 0 })
+
+/* Special form of mov32, used for doing explicit zero extension on dst. */
+#define BPF_ZEXT_REG(DST) \
+ ((struct bpf_insn) { \
+ .code = BPF_ALU | BPF_MOV | BPF_X, \
+ .dst_reg = DST, \
+ .src_reg = DST, \
+ .off = 0, \
+ .imm = 1 })
+
+static inline bool insn_is_zext(const struct bpf_insn *insn)
+{
+ return insn->code == (BPF_ALU | BPF_MOV | BPF_X) && insn->imm == 1;
+}
+
+/* addr_space_cast from as(0) to as(1) is for converting bpf arena pointers
+ * to pointers in user vma.
+ */
+static inline bool insn_is_cast_user(const struct bpf_insn *insn)
+{
+ return insn->code == (BPF_ALU64 | BPF_MOV | BPF_X) &&
+ insn->off == BPF_ADDR_SPACE_CAST &&
+ insn->imm == 1U << 16;
+}
+
/* BPF_LD_IMM64 macro encodes single 'load 64-bit immediate' insn */
#define BPF_LD_IMM64(DST, IMM) \
BPF_LD_IMM64_RAW(DST, 0, IMM)
@@ -220,6 +333,16 @@ struct bpf_prog_aux;
.off = OFF, \
.imm = 0 })
+/* Memory load, dst_reg = *(signed size *) (src_reg + off16) */
+
+#define BPF_LDX_MEMSX(SIZE, DST, SRC, OFF) \
+ ((struct bpf_insn) { \
+ .code = BPF_LDX | BPF_SIZE(SIZE) | BPF_MEMSX, \
+ .dst_reg = DST, \
+ .src_reg = SRC, \
+ .off = OFF, \
+ .imm = 0 })
+
/* Memory store, *(uint *) (dst_reg + off16) = src_reg */
#define BPF_STX_MEM(SIZE, DST, SRC, OFF) \
@@ -230,15 +353,34 @@ struct bpf_prog_aux;
.off = OFF, \
.imm = 0 })
-/* Atomic memory add, *(uint *)(dst_reg + off16) += src_reg */
-#define BPF_STX_XADD(SIZE, DST, SRC, OFF) \
+/*
+ * Atomic operations:
+ *
+ * BPF_ADD *(uint *) (dst_reg + off16) += src_reg
+ * BPF_AND *(uint *) (dst_reg + off16) &= src_reg
+ * BPF_OR *(uint *) (dst_reg + off16) |= src_reg
+ * BPF_XOR *(uint *) (dst_reg + off16) ^= src_reg
+ * BPF_ADD | BPF_FETCH src_reg = atomic_fetch_add(dst_reg + off16, src_reg);
+ * BPF_AND | BPF_FETCH src_reg = atomic_fetch_and(dst_reg + off16, src_reg);
+ * BPF_OR | BPF_FETCH src_reg = atomic_fetch_or(dst_reg + off16, src_reg);
+ * BPF_XOR | BPF_FETCH src_reg = atomic_fetch_xor(dst_reg + off16, src_reg);
+ * BPF_XCHG src_reg = atomic_xchg(dst_reg + off16, src_reg)
+ * BPF_CMPXCHG r0 = atomic_cmpxchg(dst_reg + off16, r0, src_reg)
+ * BPF_LOAD_ACQ dst_reg = smp_load_acquire(src_reg + off16)
+ * BPF_STORE_REL smp_store_release(dst_reg + off16, src_reg)
+ */
+
+#define BPF_ATOMIC_OP(SIZE, OP, DST, SRC, OFF) \
((struct bpf_insn) { \
- .code = BPF_STX | BPF_SIZE(SIZE) | BPF_XADD, \
+ .code = BPF_STX | BPF_SIZE(SIZE) | BPF_ATOMIC, \
.dst_reg = DST, \
.src_reg = SRC, \
.off = OFF, \
- .imm = 0 })
+ .imm = OP })
+
+/* Legacy alias */
+#define BPF_STX_XADD(SIZE, DST, SRC, OFF) BPF_ATOMIC_OP(SIZE, BPF_ADD, DST, SRC, OFF)
/* Memory store, *(uint *) (dst_reg + off16) = imm32 */
@@ -270,6 +412,26 @@ struct bpf_prog_aux;
.off = OFF, \
.imm = IMM })
+/* Like BPF_JMP_REG, but with 32-bit wide operands for comparison. */
+
+#define BPF_JMP32_REG(OP, DST, SRC, OFF) \
+ ((struct bpf_insn) { \
+ .code = BPF_JMP32 | BPF_OP(OP) | BPF_X, \
+ .dst_reg = DST, \
+ .src_reg = SRC, \
+ .off = OFF, \
+ .imm = 0 })
+
+/* Like BPF_JMP_IMM, but with 32-bit wide operands for comparison. */
+
+#define BPF_JMP32_IMM(OP, DST, IMM, OFF) \
+ ((struct bpf_insn) { \
+ .code = BPF_JMP32 | BPF_OP(OP) | BPF_K, \
+ .dst_reg = DST, \
+ .src_reg = 0, \
+ .off = OFF, \
+ .imm = IMM })
+
/* Unconditional jumps, goto pc + off16 */
#define BPF_JMP_A(OFF) \
@@ -280,7 +442,29 @@ struct bpf_prog_aux;
.off = OFF, \
.imm = 0 })
-/* Function call */
+/* Unconditional jumps, gotol pc + imm32 */
+
+#define BPF_JMP32_A(IMM) \
+ ((struct bpf_insn) { \
+ .code = BPF_JMP32 | BPF_JA, \
+ .dst_reg = 0, \
+ .src_reg = 0, \
+ .off = 0, \
+ .imm = IMM })
+
+/* Relative call */
+
+#define BPF_CALL_REL(TGT) \
+ ((struct bpf_insn) { \
+ .code = BPF_JMP | BPF_CALL, \
+ .dst_reg = 0, \
+ .src_reg = BPF_PSEUDO_CALL, \
+ .off = 0, \
+ .imm = TGT })
+
+/* Convert function address to BPF immediate */
+
+#define BPF_CALL_IMM(x) ((void *)(x) - (void *)__bpf_call_base)
#define BPF_EMIT_CALL(FUNC) \
((struct bpf_insn) { \
@@ -288,7 +472,17 @@ struct bpf_prog_aux;
.dst_reg = 0, \
.src_reg = 0, \
.off = 0, \
- .imm = ((FUNC) - __bpf_call_base) })
+ .imm = BPF_CALL_IMM(FUNC) })
+
+/* Kfunc call */
+
+#define BPF_CALL_KFUNC(OFF, IMM) \
+ ((struct bpf_insn) { \
+ .code = BPF_JMP | BPF_CALL, \
+ .dst_reg = 0, \
+ .src_reg = BPF_PSEUDO_KFUNC_CALL, \
+ .off = OFF, \
+ .imm = IMM })
/* Raw code statement block */
@@ -310,6 +504,16 @@ struct bpf_prog_aux;
.off = 0, \
.imm = 0 })
+/* Speculation barrier */
+
+#define BPF_ST_NOSPEC() \
+ ((struct bpf_insn) { \
+ .code = BPF_ST | BPF_NOSPEC, \
+ .dst_reg = 0, \
+ .src_reg = 0, \
+ .off = 0, \
+ .imm = 0 })
+
/* Internal classic blocks for direct assignment */
#define __BPF_STMT(CODE, K) \
@@ -359,14 +563,14 @@ struct bpf_prog_aux;
#define BPF_FIELD_SIZEOF(type, field) \
({ \
- const int __size = bytes_to_bpf_size(FIELD_SIZEOF(type, field)); \
+ const int __size = bytes_to_bpf_size(sizeof_field(type, field)); \
BUILD_BUG_ON(__size < 0); \
__size; \
})
#define BPF_LDST_BYTES(insn) \
({ \
- const int __size = bpf_size_to_bytes(BPF_SIZE(insn->code)); \
+ const int __size = bpf_size_to_bytes(BPF_SIZE((insn)->code)); \
WARN_ON(__size < 0); \
__size; \
})
@@ -403,76 +607,76 @@ struct bpf_prog_aux;
__BPF_MAP(n, __BPF_DECL_ARGS, __BPF_N, u64, __ur_1, u64, __ur_2, \
u64, __ur_3, u64, __ur_4, u64, __ur_5)
-#define BPF_CALL_x(x, name, ...) \
+#define BPF_CALL_x(x, attr, name, ...) \
static __always_inline \
u64 ____##name(__BPF_MAP(x, __BPF_DECL_ARGS, __BPF_V, __VA_ARGS__)); \
- u64 name(__BPF_REG(x, __BPF_DECL_REGS, __BPF_N, __VA_ARGS__)); \
- u64 name(__BPF_REG(x, __BPF_DECL_REGS, __BPF_N, __VA_ARGS__)) \
+ typedef u64 (*btf_##name)(__BPF_MAP(x, __BPF_DECL_ARGS, __BPF_V, __VA_ARGS__)); \
+ attr u64 name(__BPF_REG(x, __BPF_DECL_REGS, __BPF_N, __VA_ARGS__)); \
+ attr u64 name(__BPF_REG(x, __BPF_DECL_REGS, __BPF_N, __VA_ARGS__)) \
{ \
- return ____##name(__BPF_MAP(x,__BPF_CAST,__BPF_N,__VA_ARGS__));\
+ return ((btf_##name)____##name)(__BPF_MAP(x,__BPF_CAST,__BPF_N,__VA_ARGS__));\
} \
static __always_inline \
u64 ____##name(__BPF_MAP(x, __BPF_DECL_ARGS, __BPF_V, __VA_ARGS__))
-#define BPF_CALL_0(name, ...) BPF_CALL_x(0, name, __VA_ARGS__)
-#define BPF_CALL_1(name, ...) BPF_CALL_x(1, name, __VA_ARGS__)
-#define BPF_CALL_2(name, ...) BPF_CALL_x(2, name, __VA_ARGS__)
-#define BPF_CALL_3(name, ...) BPF_CALL_x(3, name, __VA_ARGS__)
-#define BPF_CALL_4(name, ...) BPF_CALL_x(4, name, __VA_ARGS__)
-#define BPF_CALL_5(name, ...) BPF_CALL_x(5, name, __VA_ARGS__)
+#define __NOATTR
+#define BPF_CALL_0(name, ...) BPF_CALL_x(0, __NOATTR, name, __VA_ARGS__)
+#define BPF_CALL_1(name, ...) BPF_CALL_x(1, __NOATTR, name, __VA_ARGS__)
+#define BPF_CALL_2(name, ...) BPF_CALL_x(2, __NOATTR, name, __VA_ARGS__)
+#define BPF_CALL_3(name, ...) BPF_CALL_x(3, __NOATTR, name, __VA_ARGS__)
+#define BPF_CALL_4(name, ...) BPF_CALL_x(4, __NOATTR, name, __VA_ARGS__)
+#define BPF_CALL_5(name, ...) BPF_CALL_x(5, __NOATTR, name, __VA_ARGS__)
+
+#define NOTRACE_BPF_CALL_1(name, ...) BPF_CALL_x(1, notrace, name, __VA_ARGS__)
#define bpf_ctx_range(TYPE, MEMBER) \
offsetof(TYPE, MEMBER) ... offsetofend(TYPE, MEMBER) - 1
#define bpf_ctx_range_till(TYPE, MEMBER1, MEMBER2) \
offsetof(TYPE, MEMBER1) ... offsetofend(TYPE, MEMBER2) - 1
+#if BITS_PER_LONG == 64
+# define bpf_ctx_range_ptr(TYPE, MEMBER) \
+ offsetof(TYPE, MEMBER) ... offsetofend(TYPE, MEMBER) - 1
+#else
+# define bpf_ctx_range_ptr(TYPE, MEMBER) \
+ offsetof(TYPE, MEMBER) ... offsetof(TYPE, MEMBER) + 8 - 1
+#endif /* BITS_PER_LONG == 64 */
#define bpf_target_off(TYPE, MEMBER, SIZE, PTR_SIZE) \
({ \
- BUILD_BUG_ON(FIELD_SIZEOF(TYPE, MEMBER) != (SIZE)); \
+ BUILD_BUG_ON(sizeof_field(TYPE, MEMBER) != (SIZE)); \
*(PTR_SIZE) = (SIZE); \
offsetof(TYPE, MEMBER); \
})
-#ifdef CONFIG_COMPAT
/* A struct sock_filter is architecture independent. */
struct compat_sock_fprog {
u16 len;
compat_uptr_t filter; /* struct sock_filter * */
};
-#endif
struct sock_fprog_kern {
u16 len;
struct sock_filter *filter;
};
+/* Some arches need doubleword alignment for their instructions and/or data */
+#define BPF_IMAGE_ALIGNMENT 8
+
struct bpf_binary_header {
- unsigned int pages;
- u8 image[];
+ u32 size;
+ u8 image[] __aligned(BPF_IMAGE_ALIGNMENT);
};
-struct bpf_prog {
- u16 pages; /* Number of allocated pages */
- kmemcheck_bitfield_begin(meta);
- u16 jited:1, /* Is our filter JIT'ed? */
- locked:1, /* Program image locked? */
- gpl_compatible:1, /* Is filter GPL compatible? */
- cb_access:1, /* Is control block accessed? */
- dst_needed:1; /* Do we need dst entry? */
- kmemcheck_bitfield_end(meta);
- enum bpf_prog_type type; /* Type of BPF program */
- u32 len; /* Number of filter blocks */
- u32 jited_len; /* Size of jited insns in bytes */
- u8 tag[BPF_TAG_SIZE];
- struct bpf_prog_aux *aux; /* Auxiliary fields */
- struct sock_fprog_kern *orig_prog; /* Original BPF program */
- unsigned int (*bpf_func)(const void *ctx,
- const struct bpf_insn *insn);
- /* Instructions for interpreter */
- union {
- struct sock_filter insns[0];
- struct bpf_insn insnsi[0];
- };
+struct bpf_prog_stats {
+ u64_stats_t cnt;
+ u64_stats_t nsecs;
+ u64_stats_t misses;
+ struct u64_stats_sync syncp;
+} __aligned(2 * sizeof(u64));
+
+struct bpf_timed_may_goto {
+ u64 count;
+ u64 timestamp;
};
struct sk_filter {
@@ -481,33 +685,266 @@ struct sk_filter {
struct bpf_prog *prog;
};
-#define BPF_PROG_RUN(filter, ctx) (*filter->bpf_func)(ctx, filter->insnsi)
+DECLARE_STATIC_KEY_FALSE(bpf_stats_enabled_key);
+
+extern struct mutex nf_conn_btf_access_lock;
+extern int (*nfct_btf_struct_access)(struct bpf_verifier_log *log,
+ const struct bpf_reg_state *reg,
+ int off, int size);
+
+typedef unsigned int (*bpf_dispatcher_fn)(const void *ctx,
+ const struct bpf_insn *insnsi,
+ unsigned int (*bpf_func)(const void *,
+ const struct bpf_insn *));
+
+static __always_inline u32 __bpf_prog_run(const struct bpf_prog *prog,
+ const void *ctx,
+ bpf_dispatcher_fn dfunc)
+{
+ u32 ret;
+
+ cant_migrate();
+ if (static_branch_unlikely(&bpf_stats_enabled_key)) {
+ struct bpf_prog_stats *stats;
+ u64 duration, start = sched_clock();
+ unsigned long flags;
+
+ ret = dfunc(ctx, prog->insnsi, prog->bpf_func);
+
+ duration = sched_clock() - start;
+ if (likely(prog->stats)) {
+ stats = this_cpu_ptr(prog->stats);
+ flags = u64_stats_update_begin_irqsave(&stats->syncp);
+ u64_stats_inc(&stats->cnt);
+ u64_stats_add(&stats->nsecs, duration);
+ u64_stats_update_end_irqrestore(&stats->syncp, flags);
+ }
+ } else {
+ ret = dfunc(ctx, prog->insnsi, prog->bpf_func);
+ }
+ return ret;
+}
+
+static __always_inline u32 bpf_prog_run(const struct bpf_prog *prog, const void *ctx)
+{
+ return __bpf_prog_run(prog, ctx, bpf_dispatcher_nop_func);
+}
+
+/*
+ * Use in preemptible and therefore migratable context to make sure that
+ * the execution of the BPF program runs on one CPU.
+ *
+ * This uses migrate_disable/enable() explicitly to document that the
+ * invocation of a BPF program does not require reentrancy protection
+ * against a BPF program which is invoked from a preempting task.
+ */
+static inline u32 bpf_prog_run_pin_on_cpu(const struct bpf_prog *prog,
+ const void *ctx)
+{
+ u32 ret;
+
+ migrate_disable();
+ ret = bpf_prog_run(prog, ctx);
+ migrate_enable();
+ return ret;
+}
#define BPF_SKB_CB_LEN QDISC_CB_PRIV_LEN
struct bpf_skb_data_end {
struct qdisc_skb_cb qdisc_cb;
+ void *data_meta;
void *data_end;
};
-struct xdp_buff {
- void *data;
- void *data_end;
- void *data_hard_start;
+struct bpf_nh_params {
+ u32 nh_family;
+ union {
+ u32 ipv4_nh;
+ struct in6_addr ipv6_nh;
+ };
+};
+
+/* flags for bpf_redirect_info kern_flags */
+#define BPF_RI_F_RF_NO_DIRECT BIT(0) /* no napi_direct on return_frame */
+#define BPF_RI_F_RI_INIT BIT(1)
+#define BPF_RI_F_CPU_MAP_INIT BIT(2)
+#define BPF_RI_F_DEV_MAP_INIT BIT(3)
+#define BPF_RI_F_XSK_MAP_INIT BIT(4)
+
+struct bpf_redirect_info {
+ u64 tgt_index;
+ void *tgt_value;
+ struct bpf_map *map;
+ u32 flags;
+ u32 map_id;
+ enum bpf_map_type map_type;
+ struct bpf_nh_params nh;
+ u32 kern_flags;
+};
+
+struct bpf_net_context {
+ struct bpf_redirect_info ri;
+ struct list_head cpu_map_flush_list;
+ struct list_head dev_map_flush_list;
+ struct list_head xskmap_map_flush_list;
};
-/* compute the linear packet data range [data, data_end) which
- * will be accessed by cls_bpf, act_bpf and lwt programs
+static inline struct bpf_net_context *bpf_net_ctx_set(struct bpf_net_context *bpf_net_ctx)
+{
+ struct task_struct *tsk = current;
+
+ if (tsk->bpf_net_context != NULL)
+ return NULL;
+ bpf_net_ctx->ri.kern_flags = 0;
+
+ tsk->bpf_net_context = bpf_net_ctx;
+ return bpf_net_ctx;
+}
+
+static inline void bpf_net_ctx_clear(struct bpf_net_context *bpf_net_ctx)
+{
+ if (bpf_net_ctx)
+ current->bpf_net_context = NULL;
+}
+
+static inline struct bpf_net_context *bpf_net_ctx_get(void)
+{
+ return current->bpf_net_context;
+}
+
+static inline struct bpf_redirect_info *bpf_net_ctx_get_ri(void)
+{
+ struct bpf_net_context *bpf_net_ctx = bpf_net_ctx_get();
+
+ if (!(bpf_net_ctx->ri.kern_flags & BPF_RI_F_RI_INIT)) {
+ memset(&bpf_net_ctx->ri, 0, offsetof(struct bpf_net_context, ri.nh));
+ bpf_net_ctx->ri.kern_flags |= BPF_RI_F_RI_INIT;
+ }
+
+ return &bpf_net_ctx->ri;
+}
+
+static inline struct list_head *bpf_net_ctx_get_cpu_map_flush_list(void)
+{
+ struct bpf_net_context *bpf_net_ctx = bpf_net_ctx_get();
+
+ if (!(bpf_net_ctx->ri.kern_flags & BPF_RI_F_CPU_MAP_INIT)) {
+ INIT_LIST_HEAD(&bpf_net_ctx->cpu_map_flush_list);
+ bpf_net_ctx->ri.kern_flags |= BPF_RI_F_CPU_MAP_INIT;
+ }
+
+ return &bpf_net_ctx->cpu_map_flush_list;
+}
+
+static inline struct list_head *bpf_net_ctx_get_dev_flush_list(void)
+{
+ struct bpf_net_context *bpf_net_ctx = bpf_net_ctx_get();
+
+ if (!(bpf_net_ctx->ri.kern_flags & BPF_RI_F_DEV_MAP_INIT)) {
+ INIT_LIST_HEAD(&bpf_net_ctx->dev_map_flush_list);
+ bpf_net_ctx->ri.kern_flags |= BPF_RI_F_DEV_MAP_INIT;
+ }
+
+ return &bpf_net_ctx->dev_map_flush_list;
+}
+
+static inline struct list_head *bpf_net_ctx_get_xskmap_flush_list(void)
+{
+ struct bpf_net_context *bpf_net_ctx = bpf_net_ctx_get();
+
+ if (!(bpf_net_ctx->ri.kern_flags & BPF_RI_F_XSK_MAP_INIT)) {
+ INIT_LIST_HEAD(&bpf_net_ctx->xskmap_map_flush_list);
+ bpf_net_ctx->ri.kern_flags |= BPF_RI_F_XSK_MAP_INIT;
+ }
+
+ return &bpf_net_ctx->xskmap_map_flush_list;
+}
+
+static inline void bpf_net_ctx_get_all_used_flush_lists(struct list_head **lh_map,
+ struct list_head **lh_dev,
+ struct list_head **lh_xsk)
+{
+ struct bpf_net_context *bpf_net_ctx = bpf_net_ctx_get();
+ u32 kern_flags = bpf_net_ctx->ri.kern_flags;
+ struct list_head *lh;
+
+ *lh_map = *lh_dev = *lh_xsk = NULL;
+
+ if (!IS_ENABLED(CONFIG_BPF_SYSCALL))
+ return;
+
+ lh = &bpf_net_ctx->dev_map_flush_list;
+ if (kern_flags & BPF_RI_F_DEV_MAP_INIT && !list_empty(lh))
+ *lh_dev = lh;
+
+ lh = &bpf_net_ctx->cpu_map_flush_list;
+ if (kern_flags & BPF_RI_F_CPU_MAP_INIT && !list_empty(lh))
+ *lh_map = lh;
+
+ lh = &bpf_net_ctx->xskmap_map_flush_list;
+ if (IS_ENABLED(CONFIG_XDP_SOCKETS) &&
+ kern_flags & BPF_RI_F_XSK_MAP_INIT && !list_empty(lh))
+ *lh_xsk = lh;
+}
+
+/* Compute the linear packet data range [data, data_end) which
+ * will be accessed by various program types (cls_bpf, act_bpf,
+ * lwt, ...). Subsystems allowing direct data access must (!)
+ * ensure that cb[] area can be written to when BPF program is
+ * invoked (otherwise cb[] save/restore is necessary).
+ */
+static inline void bpf_compute_data_pointers(struct sk_buff *skb)
+{
+ struct bpf_skb_data_end *cb = (struct bpf_skb_data_end *)skb->cb;
+
+ BUILD_BUG_ON(sizeof(*cb) > sizeof_field(struct sk_buff, cb));
+ cb->data_meta = skb->data - skb_metadata_len(skb);
+ cb->data_end = skb->data + skb_headlen(skb);
+}
+
+static inline int bpf_prog_run_data_pointers(
+ const struct bpf_prog *prog,
+ struct sk_buff *skb)
+{
+ struct bpf_skb_data_end *cb = (struct bpf_skb_data_end *)skb->cb;
+ void *save_data_meta, *save_data_end;
+ int res;
+
+ save_data_meta = cb->data_meta;
+ save_data_end = cb->data_end;
+
+ bpf_compute_data_pointers(skb);
+ res = bpf_prog_run(prog, skb);
+
+ cb->data_meta = save_data_meta;
+ cb->data_end = save_data_end;
+
+ return res;
+}
+
+/* Similar to bpf_compute_data_pointers(), except that save orginal
+ * data in cb->data and cb->meta_data for restore.
*/
-static inline void bpf_compute_data_end(struct sk_buff *skb)
+static inline void bpf_compute_and_save_data_end(
+ struct sk_buff *skb, void **saved_data_end)
{
struct bpf_skb_data_end *cb = (struct bpf_skb_data_end *)skb->cb;
- BUILD_BUG_ON(sizeof(*cb) > FIELD_SIZEOF(struct sk_buff, cb));
- cb->data_end = skb->data + skb_headlen(skb);
+ *saved_data_end = cb->data_end;
+ cb->data_end = skb->data + skb_headlen(skb);
}
-static inline u8 *bpf_skb_cb(struct sk_buff *skb)
+/* Restore data saved by bpf_compute_and_save_data_end(). */
+static inline void bpf_restore_data_end(
+ struct sk_buff *skb, void *saved_data_end)
+{
+ struct bpf_skb_data_end *cb = (struct bpf_skb_data_end *)skb->cb;
+
+ cb->data_end = saved_data_end;
+}
+
+static inline u8 *bpf_skb_cb(const struct sk_buff *skb)
{
/* eBPF programs may read/write skb->cb[] area to transfer meta
* data between tail calls. Since this also needs to work with
@@ -519,16 +956,18 @@ static inline u8 *bpf_skb_cb(struct sk_buff *skb)
* attached to sockets, we need to clear the bpf_skb_cb() area
* to not leak previous contents to user space.
*/
- BUILD_BUG_ON(FIELD_SIZEOF(struct __sk_buff, cb) != BPF_SKB_CB_LEN);
- BUILD_BUG_ON(FIELD_SIZEOF(struct __sk_buff, cb) !=
- FIELD_SIZEOF(struct qdisc_skb_cb, data));
+ BUILD_BUG_ON(sizeof_field(struct __sk_buff, cb) != BPF_SKB_CB_LEN);
+ BUILD_BUG_ON(sizeof_field(struct __sk_buff, cb) !=
+ sizeof_field(struct qdisc_skb_cb, data));
return qdisc_skb_cb(skb)->data;
}
-static inline u32 bpf_prog_run_save_cb(const struct bpf_prog *prog,
- struct sk_buff *skb)
+/* Must be invoked with migration disabled */
+static inline u32 __bpf_prog_run_save_cb(const struct bpf_prog *prog,
+ const void *ctx)
{
+ const struct sk_buff *skb = ctx;
u8 *cb_data = bpf_skb_cb(skb);
u8 cb_saved[BPF_SKB_CB_LEN];
u32 res;
@@ -538,7 +977,7 @@ static inline u32 bpf_prog_run_save_cb(const struct bpf_prog *prog,
memset(cb_data, 0, sizeof(cb_saved));
}
- res = BPF_PROG_RUN(prog, skb);
+ res = bpf_prog_run(prog, skb);
if (unlikely(prog->cb_access))
memcpy(cb_data, cb_saved, sizeof(cb_saved));
@@ -546,40 +985,43 @@ static inline u32 bpf_prog_run_save_cb(const struct bpf_prog *prog,
return res;
}
+static inline u32 bpf_prog_run_save_cb(const struct bpf_prog *prog,
+ struct sk_buff *skb)
+{
+ u32 res;
+
+ migrate_disable();
+ res = __bpf_prog_run_save_cb(prog, skb);
+ migrate_enable();
+ return res;
+}
+
static inline u32 bpf_prog_run_clear_cb(const struct bpf_prog *prog,
struct sk_buff *skb)
{
u8 *cb_data = bpf_skb_cb(skb);
+ u32 res;
if (unlikely(prog->cb_access))
memset(cb_data, 0, BPF_SKB_CB_LEN);
- return BPF_PROG_RUN(prog, skb);
+ res = bpf_prog_run_pin_on_cpu(prog, skb);
+ return res;
}
-static __always_inline u32 bpf_prog_run_xdp(const struct bpf_prog *prog,
- struct xdp_buff *xdp)
-{
- /* Caller needs to hold rcu_read_lock() (!), otherwise program
- * can be released while still running, or map elements could be
- * freed early while still having concurrent users. XDP fastpath
- * already takes rcu_read_lock() when fetching the program, so
- * it's not necessary here anymore.
- */
- return BPF_PROG_RUN(prog, xdp);
-}
+DECLARE_BPF_DISPATCHER(xdp)
+
+DECLARE_STATIC_KEY_FALSE(bpf_master_redirect_enabled_key);
+
+u32 xdp_master_redirect(struct xdp_buff *xdp);
+
+void bpf_prog_change_xdp(struct bpf_prog *prev_prog, struct bpf_prog *prog);
static inline u32 bpf_prog_insn_size(const struct bpf_prog *prog)
{
return prog->len * sizeof(struct bpf_insn);
}
-static inline u32 bpf_prog_tag_scratch_size(const struct bpf_prog *prog)
-{
- return round_up(bpf_prog_insn_size(prog) +
- sizeof(__be64) + 1, SHA_MESSAGE_BYTES);
-}
-
static inline unsigned int bpf_prog_size(unsigned int proglen)
{
return max(sizeof(struct bpf_prog),
@@ -596,91 +1038,94 @@ static inline bool bpf_prog_was_classic(const struct bpf_prog *prog)
return prog->type == BPF_PROG_TYPE_UNSPEC;
}
-static inline bool
-bpf_ctx_narrow_access_ok(u32 off, u32 size, const u32 size_default)
+static inline u32 bpf_ctx_off_adjust_machine(u32 size)
{
- bool off_ok;
-#ifdef __LITTLE_ENDIAN
- off_ok = (off & (size_default - 1)) == 0;
-#else
- off_ok = (off & (size_default - 1)) + size == size_default;
-#endif
- return off_ok && size <= size_default && (size & (size - 1)) == 0;
-}
+ const u32 size_machine = sizeof(unsigned long);
-#define bpf_classic_proglen(fprog) (fprog->len * sizeof(fprog->filter[0]))
+ if (size > size_machine && size % size_machine == 0)
+ size = size_machine;
-#ifdef CONFIG_ARCH_HAS_SET_MEMORY
-static inline void bpf_prog_lock_ro(struct bpf_prog *fp)
-{
- fp->locked = 1;
- WARN_ON_ONCE(set_memory_ro((unsigned long)fp, fp->pages));
+ return size;
}
-static inline void bpf_prog_unlock_ro(struct bpf_prog *fp)
+static inline bool
+bpf_ctx_narrow_access_ok(u32 off, u32 size, u32 size_default)
{
- if (fp->locked) {
- WARN_ON_ONCE(set_memory_rw((unsigned long)fp, fp->pages));
- /* In case set_memory_rw() fails, we want to be the first
- * to crash here instead of some random place later on.
- */
- fp->locked = 0;
- }
+ return size <= size_default && (size & (size - 1)) == 0;
}
-static inline void bpf_jit_binary_lock_ro(struct bpf_binary_header *hdr)
+static inline u8
+bpf_ctx_narrow_access_offset(u32 off, u32 size, u32 size_default)
{
- WARN_ON_ONCE(set_memory_ro((unsigned long)hdr, hdr->pages));
-}
+ u8 access_off = off & (size_default - 1);
-static inline void bpf_jit_binary_unlock_ro(struct bpf_binary_header *hdr)
-{
- WARN_ON_ONCE(set_memory_rw((unsigned long)hdr, hdr->pages));
-}
+#ifdef __LITTLE_ENDIAN
+ return access_off;
#else
-static inline void bpf_prog_lock_ro(struct bpf_prog *fp)
-{
+ return size_default - (access_off + size);
+#endif
}
-static inline void bpf_prog_unlock_ro(struct bpf_prog *fp)
-{
-}
+#define bpf_ctx_wide_access_ok(off, size, type, field) \
+ (size == sizeof(__u64) && \
+ off >= offsetof(type, field) && \
+ off + sizeof(__u64) <= offsetofend(type, field) && \
+ off % sizeof(__u64) == 0)
+
+#define bpf_classic_proglen(fprog) (fprog->len * sizeof(fprog->filter[0]))
-static inline void bpf_jit_binary_lock_ro(struct bpf_binary_header *hdr)
+static inline int __must_check bpf_prog_lock_ro(struct bpf_prog *fp)
{
+#ifndef CONFIG_BPF_JIT_ALWAYS_ON
+ if (!fp->jited) {
+ set_vm_flush_reset_perms(fp);
+ return set_memory_ro((unsigned long)fp, fp->pages);
+ }
+#endif
+ return 0;
}
-static inline void bpf_jit_binary_unlock_ro(struct bpf_binary_header *hdr)
+static inline int __must_check
+bpf_jit_binary_lock_ro(struct bpf_binary_header *hdr)
{
+ set_vm_flush_reset_perms(hdr);
+ return set_memory_rox((unsigned long)hdr, hdr->size >> PAGE_SHIFT);
}
-#endif /* CONFIG_ARCH_HAS_SET_MEMORY */
-static inline struct bpf_binary_header *
-bpf_jit_binary_hdr(const struct bpf_prog *fp)
+int sk_filter_trim_cap(struct sock *sk, struct sk_buff *skb, unsigned int cap,
+ enum skb_drop_reason *reason);
+
+static inline int sk_filter(struct sock *sk, struct sk_buff *skb)
{
- unsigned long real_start = (unsigned long)fp->bpf_func;
- unsigned long addr = real_start & PAGE_MASK;
+ enum skb_drop_reason ignore_reason;
- return (void *)addr;
+ return sk_filter_trim_cap(sk, skb, 1, &ignore_reason);
}
-int sk_filter_trim_cap(struct sock *sk, struct sk_buff *skb, unsigned int cap);
-static inline int sk_filter(struct sock *sk, struct sk_buff *skb)
+static inline int sk_filter_reason(struct sock *sk, struct sk_buff *skb,
+ enum skb_drop_reason *reason)
{
- return sk_filter_trim_cap(sk, skb, 1);
+ return sk_filter_trim_cap(sk, skb, 1, reason);
}
struct bpf_prog *bpf_prog_select_runtime(struct bpf_prog *fp, int *err);
void bpf_prog_free(struct bpf_prog *fp);
+bool bpf_opcode_in_insntable(u8 code);
+
+void bpf_prog_fill_jited_linfo(struct bpf_prog *prog,
+ const u32 *insn_to_jit_off);
+int bpf_prog_alloc_jited_linfo(struct bpf_prog *prog);
+void bpf_prog_jit_attempt_done(struct bpf_prog *prog);
+
struct bpf_prog *bpf_prog_alloc(unsigned int size, gfp_t gfp_extra_flags);
+struct bpf_prog *bpf_prog_alloc_no_stats(unsigned int size, gfp_t gfp_extra_flags);
struct bpf_prog *bpf_prog_realloc(struct bpf_prog *fp_old, unsigned int size,
gfp_t gfp_extra_flags);
void __bpf_prog_free(struct bpf_prog *fp);
static inline void bpf_prog_unlock_free(struct bpf_prog *fp)
{
- bpf_prog_unlock_ro(fp);
__bpf_prog_free(fp);
}
@@ -696,37 +1141,173 @@ int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk);
int sk_attach_bpf(u32 ufd, struct sock *sk);
int sk_reuseport_attach_filter(struct sock_fprog *fprog, struct sock *sk);
int sk_reuseport_attach_bpf(u32 ufd, struct sock *sk);
+void sk_reuseport_prog_free(struct bpf_prog *prog);
int sk_detach_filter(struct sock *sk);
-int sk_get_filter(struct sock *sk, struct sock_filter __user *filter,
- unsigned int len);
+int sk_get_filter(struct sock *sk, sockptr_t optval, unsigned int len);
bool sk_filter_charge(struct sock *sk, struct sk_filter *fp);
void sk_filter_uncharge(struct sock *sk, struct sk_filter *fp);
u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
+#define __bpf_call_base_args \
+ ((u64 (*)(u64, u64, u64, u64, u64, const struct bpf_insn *)) \
+ (void *)__bpf_call_base)
struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog);
void bpf_jit_compile(struct bpf_prog *prog);
-bool bpf_helper_changes_pkt_data(void *func);
+bool bpf_jit_needs_zext(void);
+bool bpf_jit_inlines_helper_call(s32 imm);
+bool bpf_jit_supports_subprog_tailcalls(void);
+bool bpf_jit_supports_percpu_insn(void);
+bool bpf_jit_supports_kfunc_call(void);
+bool bpf_jit_supports_far_kfunc_call(void);
+bool bpf_jit_supports_exceptions(void);
+bool bpf_jit_supports_ptr_xchg(void);
+bool bpf_jit_supports_arena(void);
+bool bpf_jit_supports_insn(struct bpf_insn *insn, bool in_arena);
+bool bpf_jit_supports_private_stack(void);
+bool bpf_jit_supports_timed_may_goto(void);
+u64 bpf_arch_uaddress_limit(void);
+void arch_bpf_stack_walk(bool (*consume_fn)(void *cookie, u64 ip, u64 sp, u64 bp), void *cookie);
+u64 arch_bpf_timed_may_goto(void);
+u64 bpf_check_timed_may_goto(struct bpf_timed_may_goto *);
+bool bpf_helper_changes_pkt_data(enum bpf_func_id func_id);
+
+static inline bool bpf_dump_raw_ok(const struct cred *cred)
+{
+ /* Reconstruction of call-sites is dependent on kallsyms,
+ * thus make dump the same restriction.
+ */
+ return kallsyms_show_value(cred);
+}
struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off,
const struct bpf_insn *patch, u32 len);
-void bpf_warn_invalid_xdp_action(u32 act);
+int bpf_remove_insns(struct bpf_prog *prog, u32 off, u32 cnt);
+
+static inline bool xdp_return_frame_no_direct(void)
+{
+ struct bpf_redirect_info *ri = bpf_net_ctx_get_ri();
+
+ return ri->kern_flags & BPF_RI_F_RF_NO_DIRECT;
+}
+
+static inline void xdp_set_return_frame_no_direct(void)
+{
+ struct bpf_redirect_info *ri = bpf_net_ctx_get_ri();
+
+ ri->kern_flags |= BPF_RI_F_RF_NO_DIRECT;
+}
+
+static inline void xdp_clear_return_frame_no_direct(void)
+{
+ struct bpf_redirect_info *ri = bpf_net_ctx_get_ri();
+
+ ri->kern_flags &= ~BPF_RI_F_RF_NO_DIRECT;
+}
+
+static inline int xdp_ok_fwd_dev(const struct net_device *fwd,
+ unsigned int pktlen)
+{
+ unsigned int len;
+
+ if (unlikely(!(fwd->flags & IFF_UP)))
+ return -ENETDOWN;
+
+ len = fwd->mtu + fwd->hard_header_len + VLAN_HLEN;
+ if (pktlen > len)
+ return -EMSGSIZE;
+
+ return 0;
+}
+
+/* The pair of xdp_do_redirect and xdp_do_flush MUST be called in the
+ * same cpu context. Further for best results no more than a single map
+ * for the do_redirect/do_flush pair should be used. This limitation is
+ * because we only track one map and force a flush when the map changes.
+ * This does not appear to be a real limitation for existing software.
+ */
+int xdp_do_generic_redirect(struct net_device *dev, struct sk_buff *skb,
+ struct xdp_buff *xdp, const struct bpf_prog *prog);
+int xdp_do_redirect(struct net_device *dev,
+ struct xdp_buff *xdp,
+ const struct bpf_prog *prog);
+int xdp_do_redirect_frame(struct net_device *dev,
+ struct xdp_buff *xdp,
+ struct xdp_frame *xdpf,
+ const struct bpf_prog *prog);
+void xdp_do_flush(void);
+
+void bpf_warn_invalid_xdp_action(const struct net_device *dev,
+ const struct bpf_prog *prog, u32 act);
+
+#ifdef CONFIG_INET
+struct sock *bpf_run_sk_reuseport(struct sock_reuseport *reuse, struct sock *sk,
+ struct bpf_prog *prog, struct sk_buff *skb,
+ struct sock *migrating_sk,
+ u32 hash);
+#else
+static inline struct sock *
+bpf_run_sk_reuseport(struct sock_reuseport *reuse, struct sock *sk,
+ struct bpf_prog *prog, struct sk_buff *skb,
+ struct sock *migrating_sk,
+ u32 hash)
+{
+ return NULL;
+}
+#endif
#ifdef CONFIG_BPF_JIT
extern int bpf_jit_enable;
extern int bpf_jit_harden;
extern int bpf_jit_kallsyms;
+extern long bpf_jit_limit;
+extern long bpf_jit_limit_max;
typedef void (*bpf_jit_fill_hole_t)(void *area, unsigned int size);
+void bpf_jit_fill_hole_with_zero(void *area, unsigned int size);
+
struct bpf_binary_header *
bpf_jit_binary_alloc(unsigned int proglen, u8 **image_ptr,
unsigned int alignment,
bpf_jit_fill_hole_t bpf_fill_ill_insns);
void bpf_jit_binary_free(struct bpf_binary_header *hdr);
-
+u64 bpf_jit_alloc_exec_limit(void);
+void *bpf_jit_alloc_exec(unsigned long size);
+void bpf_jit_free_exec(void *addr);
void bpf_jit_free(struct bpf_prog *fp);
+struct bpf_binary_header *
+bpf_jit_binary_pack_hdr(const struct bpf_prog *fp);
+
+void *bpf_prog_pack_alloc(u32 size, bpf_jit_fill_hole_t bpf_fill_ill_insns);
+void bpf_prog_pack_free(void *ptr, u32 size);
+
+static inline bool bpf_prog_kallsyms_verify_off(const struct bpf_prog *fp)
+{
+ return list_empty(&fp->aux->ksym.lnode) ||
+ fp->aux->ksym.lnode.prev == LIST_POISON2;
+}
+
+struct bpf_binary_header *
+bpf_jit_binary_pack_alloc(unsigned int proglen, u8 **ro_image,
+ unsigned int alignment,
+ struct bpf_binary_header **rw_hdr,
+ u8 **rw_image,
+ bpf_jit_fill_hole_t bpf_fill_ill_insns);
+int bpf_jit_binary_pack_finalize(struct bpf_binary_header *ro_header,
+ struct bpf_binary_header *rw_header);
+void bpf_jit_binary_pack_free(struct bpf_binary_header *ro_header,
+ struct bpf_binary_header *rw_header);
+
+int bpf_jit_add_poke_descriptor(struct bpf_prog *prog,
+ struct bpf_jit_poke_descriptor *poke);
+
+int bpf_jit_get_func_addr(const struct bpf_prog *prog,
+ const struct bpf_insn *insn, bool extra_pass,
+ u64 *func_addr, bool *func_addr_fixed);
+
+const char *bpf_jit_get_prog_name(struct bpf_prog *prog);
struct bpf_prog *bpf_jit_blind_constants(struct bpf_prog *fp);
void bpf_jit_prog_release_other(struct bpf_prog *fp, struct bpf_prog *fp_other);
@@ -734,7 +1315,7 @@ void bpf_jit_prog_release_other(struct bpf_prog *fp, struct bpf_prog *fp_other);
static inline void bpf_jit_dump(unsigned int flen, unsigned int proglen,
u32 pass, void *image)
{
- pr_err("flen=%u proglen=%u pass=%u image=%pK from=%s pid=%d\n", flen,
+ pr_err("flen=%u proglen=%u pass=%u image=%p from=%s pid=%d\n", flen,
proglen, pass, image, current->comm, task_pid_nr(current));
if (image)
@@ -761,7 +1342,7 @@ static inline bool bpf_prog_ebpf_jited(const struct bpf_prog *fp)
return fp->jited && bpf_jit_is_ebpf();
}
-static inline bool bpf_jit_blinding_enabled(void)
+static inline bool bpf_jit_blinding_enabled(struct bpf_prog *prog)
{
/* These are the prerequisites, should someone ever have the
* idea to call blinding outside of them, we make sure to
@@ -769,11 +1350,11 @@ static inline bool bpf_jit_blinding_enabled(void)
*/
if (!bpf_jit_is_ebpf())
return false;
- if (!bpf_jit_enable)
+ if (!prog->jit_requested)
return false;
if (!bpf_jit_harden)
return false;
- if (bpf_jit_harden == 1 && capable(CAP_SYS_ADMIN))
+ if (bpf_jit_harden == 1 && bpf_token_capable(prog->aux->token, CAP_BPF))
return false;
return true;
@@ -794,17 +1375,18 @@ static inline bool bpf_jit_kallsyms_enabled(void)
return false;
}
-const char *__bpf_address_lookup(unsigned long addr, unsigned long *size,
+int __bpf_address_lookup(unsigned long addr, unsigned long *size,
unsigned long *off, char *sym);
bool is_bpf_text_address(unsigned long addr);
int bpf_get_kallsym(unsigned int symnum, unsigned long *value, char *type,
char *sym);
+struct bpf_prog *bpf_prog_ksym_find(unsigned long addr);
-static inline const char *
+static inline int
bpf_address_lookup(unsigned long addr, unsigned long *size,
unsigned long *off, char **modname, char *sym)
{
- const char *ret = __bpf_address_lookup(addr, size, off, sym);
+ int ret = __bpf_address_lookup(addr, size, off, sym);
if (ret && modname)
*modname = NULL;
@@ -821,11 +1403,23 @@ static inline bool ebpf_jit_enabled(void)
return false;
}
+static inline bool bpf_jit_blinding_enabled(struct bpf_prog *prog)
+{
+ return false;
+}
+
static inline bool bpf_prog_ebpf_jited(const struct bpf_prog *fp)
{
return false;
}
+static inline int
+bpf_jit_add_poke_descriptor(struct bpf_prog *prog,
+ struct bpf_jit_poke_descriptor *poke)
+{
+ return -ENOTSUPP;
+}
+
static inline void bpf_jit_free(struct bpf_prog *fp)
{
bpf_prog_unlock_free(fp);
@@ -836,11 +1430,11 @@ static inline bool bpf_jit_kallsyms_enabled(void)
return false;
}
-static inline const char *
+static inline int
__bpf_address_lookup(unsigned long addr, unsigned long *size,
unsigned long *off, char *sym)
{
- return NULL;
+ return 0;
}
static inline bool is_bpf_text_address(unsigned long addr)
@@ -854,11 +1448,16 @@ static inline int bpf_get_kallsym(unsigned int symnum, unsigned long *value,
return -ERANGE;
}
-static inline const char *
+static inline struct bpf_prog *bpf_prog_ksym_find(unsigned long addr)
+{
+ return NULL;
+}
+
+static inline int
bpf_address_lookup(unsigned long addr, unsigned long *size,
unsigned long *off, char **modname, char *sym)
{
- return NULL;
+ return 0;
}
static inline void bpf_prog_kallsyms_add(struct bpf_prog *fp)
@@ -868,8 +1467,11 @@ static inline void bpf_prog_kallsyms_add(struct bpf_prog *fp)
static inline void bpf_prog_kallsyms_del(struct bpf_prog *fp)
{
}
+
#endif /* CONFIG_BPF_JIT */
+void bpf_prog_kallsyms_del_all(struct bpf_prog *fp);
+
#define BPF_ANC BIT(15)
static inline bool bpf_needs_clear_a(const struct sock_filter *first)
@@ -919,7 +1521,7 @@ static inline u16 bpf_anc_helper(const struct sock_filter *ftest)
BPF_ANCILLARY(RANDOM);
BPF_ANCILLARY(VLAN_TPID);
}
- /* Fallthrough. */
+ fallthrough;
default:
return ftest->code;
}
@@ -928,27 +1530,328 @@ static inline u16 bpf_anc_helper(const struct sock_filter *ftest)
void *bpf_internal_load_pointer_neg_helper(const struct sk_buff *skb,
int k, unsigned int size);
-static inline void *bpf_load_pointer(const struct sk_buff *skb, int k,
- unsigned int size, void *buffer)
-{
- if (k >= 0)
- return skb_header_pointer(skb, k, size, buffer);
-
- return bpf_internal_load_pointer_neg_helper(skb, k, size);
-}
-
static inline int bpf_tell_extensions(void)
{
return SKF_AD_MAX;
}
+struct bpf_sock_addr_kern {
+ struct sock *sk;
+ struct sockaddr_unsized *uaddr;
+ /* Temporary "register" to make indirect stores to nested structures
+ * defined above. We need three registers to make such a store, but
+ * only two (src and dst) are available at convert_ctx_access time
+ */
+ u64 tmp_reg;
+ void *t_ctx; /* Attach type specific context. */
+ u32 uaddrlen;
+};
+
struct bpf_sock_ops_kern {
struct sock *sk;
- u32 op;
union {
+ u32 args[4];
u32 reply;
u32 replylong[4];
};
+ struct sk_buff *syn_skb;
+ struct sk_buff *skb;
+ void *skb_data_end;
+ u8 op;
+ u8 is_fullsock;
+ u8 is_locked_tcp_sock;
+ u8 remaining_opt_len;
+ u64 temp; /* temp and everything after is not
+ * initialized to 0 before calling
+ * the BPF program. New fields that
+ * should be initialized to 0 should
+ * be inserted before temp.
+ * temp is scratch storage used by
+ * sock_ops_convert_ctx_access
+ * as temporary storage of a register.
+ */
+};
+
+struct bpf_sysctl_kern {
+ struct ctl_table_header *head;
+ const struct ctl_table *table;
+ void *cur_val;
+ size_t cur_len;
+ void *new_val;
+ size_t new_len;
+ int new_updated;
+ int write;
+ loff_t *ppos;
+ /* Temporary "register" for indirect stores to ppos. */
+ u64 tmp_reg;
+};
+
+#define BPF_SOCKOPT_KERN_BUF_SIZE 32
+struct bpf_sockopt_buf {
+ u8 data[BPF_SOCKOPT_KERN_BUF_SIZE];
+};
+
+struct bpf_sockopt_kern {
+ struct sock *sk;
+ u8 *optval;
+ u8 *optval_end;
+ s32 level;
+ s32 optname;
+ s32 optlen;
+ /* for retval in struct bpf_cg_run_ctx */
+ struct task_struct *current_task;
+ /* Temporary "register" for indirect stores to ppos. */
+ u64 tmp_reg;
+};
+
+int copy_bpf_fprog_from_user(struct sock_fprog *dst, sockptr_t src, int len);
+
+struct bpf_sk_lookup_kern {
+ u16 family;
+ u16 protocol;
+ __be16 sport;
+ u16 dport;
+ struct {
+ __be32 saddr;
+ __be32 daddr;
+ } v4;
+ struct {
+ const struct in6_addr *saddr;
+ const struct in6_addr *daddr;
+ } v6;
+ struct sock *selected_sk;
+ u32 ingress_ifindex;
+ bool no_reuseport;
};
+extern struct static_key_false bpf_sk_lookup_enabled;
+
+/* Runners for BPF_SK_LOOKUP programs to invoke on socket lookup.
+ *
+ * Allowed return values for a BPF SK_LOOKUP program are SK_PASS and
+ * SK_DROP. Their meaning is as follows:
+ *
+ * SK_PASS && ctx.selected_sk != NULL: use selected_sk as lookup result
+ * SK_PASS && ctx.selected_sk == NULL: continue to htable-based socket lookup
+ * SK_DROP : terminate lookup with -ECONNREFUSED
+ *
+ * This macro aggregates return values and selected sockets from
+ * multiple BPF programs according to following rules in order:
+ *
+ * 1. If any program returned SK_PASS and a non-NULL ctx.selected_sk,
+ * macro result is SK_PASS and last ctx.selected_sk is used.
+ * 2. If any program returned SK_DROP return value,
+ * macro result is SK_DROP.
+ * 3. Otherwise result is SK_PASS and ctx.selected_sk is NULL.
+ *
+ * Caller must ensure that the prog array is non-NULL, and that the
+ * array as well as the programs it contains remain valid.
+ */
+#define BPF_PROG_SK_LOOKUP_RUN_ARRAY(array, ctx, func) \
+ ({ \
+ struct bpf_sk_lookup_kern *_ctx = &(ctx); \
+ struct bpf_prog_array_item *_item; \
+ struct sock *_selected_sk = NULL; \
+ bool _no_reuseport = false; \
+ struct bpf_prog *_prog; \
+ bool _all_pass = true; \
+ u32 _ret; \
+ \
+ migrate_disable(); \
+ _item = &(array)->items[0]; \
+ while ((_prog = READ_ONCE(_item->prog))) { \
+ /* restore most recent selection */ \
+ _ctx->selected_sk = _selected_sk; \
+ _ctx->no_reuseport = _no_reuseport; \
+ \
+ _ret = func(_prog, _ctx); \
+ if (_ret == SK_PASS && _ctx->selected_sk) { \
+ /* remember last non-NULL socket */ \
+ _selected_sk = _ctx->selected_sk; \
+ _no_reuseport = _ctx->no_reuseport; \
+ } else if (_ret == SK_DROP && _all_pass) { \
+ _all_pass = false; \
+ } \
+ _item++; \
+ } \
+ _ctx->selected_sk = _selected_sk; \
+ _ctx->no_reuseport = _no_reuseport; \
+ migrate_enable(); \
+ _all_pass || _selected_sk ? SK_PASS : SK_DROP; \
+ })
+
+static inline bool bpf_sk_lookup_run_v4(const struct net *net, int protocol,
+ const __be32 saddr, const __be16 sport,
+ const __be32 daddr, const u16 dport,
+ const int ifindex, struct sock **psk)
+{
+ struct bpf_prog_array *run_array;
+ struct sock *selected_sk = NULL;
+ bool no_reuseport = false;
+
+ rcu_read_lock();
+ run_array = rcu_dereference(net->bpf.run_array[NETNS_BPF_SK_LOOKUP]);
+ if (run_array) {
+ struct bpf_sk_lookup_kern ctx = {
+ .family = AF_INET,
+ .protocol = protocol,
+ .v4.saddr = saddr,
+ .v4.daddr = daddr,
+ .sport = sport,
+ .dport = dport,
+ .ingress_ifindex = ifindex,
+ };
+ u32 act;
+
+ act = BPF_PROG_SK_LOOKUP_RUN_ARRAY(run_array, ctx, bpf_prog_run);
+ if (act == SK_PASS) {
+ selected_sk = ctx.selected_sk;
+ no_reuseport = ctx.no_reuseport;
+ } else {
+ selected_sk = ERR_PTR(-ECONNREFUSED);
+ }
+ }
+ rcu_read_unlock();
+ *psk = selected_sk;
+ return no_reuseport;
+}
+
+#if IS_ENABLED(CONFIG_IPV6)
+static inline bool bpf_sk_lookup_run_v6(const struct net *net, int protocol,
+ const struct in6_addr *saddr,
+ const __be16 sport,
+ const struct in6_addr *daddr,
+ const u16 dport,
+ const int ifindex, struct sock **psk)
+{
+ struct bpf_prog_array *run_array;
+ struct sock *selected_sk = NULL;
+ bool no_reuseport = false;
+
+ rcu_read_lock();
+ run_array = rcu_dereference(net->bpf.run_array[NETNS_BPF_SK_LOOKUP]);
+ if (run_array) {
+ struct bpf_sk_lookup_kern ctx = {
+ .family = AF_INET6,
+ .protocol = protocol,
+ .v6.saddr = saddr,
+ .v6.daddr = daddr,
+ .sport = sport,
+ .dport = dport,
+ .ingress_ifindex = ifindex,
+ };
+ u32 act;
+
+ act = BPF_PROG_SK_LOOKUP_RUN_ARRAY(run_array, ctx, bpf_prog_run);
+ if (act == SK_PASS) {
+ selected_sk = ctx.selected_sk;
+ no_reuseport = ctx.no_reuseport;
+ } else {
+ selected_sk = ERR_PTR(-ECONNREFUSED);
+ }
+ }
+ rcu_read_unlock();
+ *psk = selected_sk;
+ return no_reuseport;
+}
+#endif /* IS_ENABLED(CONFIG_IPV6) */
+
+static __always_inline long __bpf_xdp_redirect_map(struct bpf_map *map, u64 index,
+ u64 flags, const u64 flag_mask,
+ void *lookup_elem(struct bpf_map *map, u32 key))
+{
+ struct bpf_redirect_info *ri = bpf_net_ctx_get_ri();
+ const u64 action_mask = XDP_ABORTED | XDP_DROP | XDP_PASS | XDP_TX;
+
+ /* Lower bits of the flags are used as return code on lookup failure */
+ if (unlikely(flags & ~(action_mask | flag_mask)))
+ return XDP_ABORTED;
+
+ ri->tgt_value = lookup_elem(map, index);
+ if (unlikely(!ri->tgt_value) && !(flags & BPF_F_BROADCAST)) {
+ /* If the lookup fails we want to clear out the state in the
+ * redirect_info struct completely, so that if an eBPF program
+ * performs multiple lookups, the last one always takes
+ * precedence.
+ */
+ ri->map_id = INT_MAX; /* Valid map id idr range: [1,INT_MAX[ */
+ ri->map_type = BPF_MAP_TYPE_UNSPEC;
+ return flags & action_mask;
+ }
+
+ ri->tgt_index = index;
+ ri->map_id = map->id;
+ ri->map_type = map->map_type;
+
+ if (flags & BPF_F_BROADCAST) {
+ WRITE_ONCE(ri->map, map);
+ ri->flags = flags;
+ } else {
+ WRITE_ONCE(ri->map, NULL);
+ ri->flags = 0;
+ }
+
+ return XDP_REDIRECT;
+}
+
+#ifdef CONFIG_NET
+int __bpf_skb_load_bytes(const struct sk_buff *skb, u32 offset, void *to, u32 len);
+int __bpf_skb_store_bytes(struct sk_buff *skb, u32 offset, const void *from,
+ u32 len, u64 flags);
+int __bpf_xdp_load_bytes(struct xdp_buff *xdp, u32 offset, void *buf, u32 len);
+int __bpf_xdp_store_bytes(struct xdp_buff *xdp, u32 offset, void *buf, u32 len);
+void *bpf_xdp_pointer(struct xdp_buff *xdp, u32 offset, u32 len);
+void bpf_xdp_copy_buf(struct xdp_buff *xdp, unsigned long off,
+ void *buf, unsigned long len, bool flush);
+int __bpf_skb_meta_store_bytes(struct sk_buff *skb, u32 offset,
+ const void *from, u32 len, u64 flags);
+void *bpf_skb_meta_pointer(struct sk_buff *skb, u32 offset);
+#else /* CONFIG_NET */
+static inline int __bpf_skb_load_bytes(const struct sk_buff *skb, u32 offset,
+ void *to, u32 len)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int __bpf_skb_store_bytes(struct sk_buff *skb, u32 offset,
+ const void *from, u32 len, u64 flags)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int __bpf_xdp_load_bytes(struct xdp_buff *xdp, u32 offset,
+ void *buf, u32 len)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int __bpf_xdp_store_bytes(struct xdp_buff *xdp, u32 offset,
+ void *buf, u32 len)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline void *bpf_xdp_pointer(struct xdp_buff *xdp, u32 offset, u32 len)
+{
+ return NULL;
+}
+
+static inline void bpf_xdp_copy_buf(struct xdp_buff *xdp, unsigned long off, void *buf,
+ unsigned long len, bool flush)
+{
+}
+
+static inline int __bpf_skb_meta_store_bytes(struct sk_buff *skb, u32 offset,
+ const void *from, u32 len,
+ u64 flags)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline void *bpf_skb_meta_pointer(struct sk_buff *skb, u32 offset)
+{
+ return ERR_PTR(-EOPNOTSUPP);
+}
+#endif /* CONFIG_NET */
+
#endif /* __LINUX_FILTER_H__ */
diff --git a/include/linux/find.h b/include/linux/find.h
new file mode 100644
index 000000000000..9d720ad92bc1
--- /dev/null
+++ b/include/linux/find.h
@@ -0,0 +1,697 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __LINUX_FIND_H_
+#define __LINUX_FIND_H_
+
+#ifndef __LINUX_BITMAP_H
+#error only <linux/bitmap.h> can be included directly
+#endif
+
+#include <linux/bitops.h>
+
+unsigned long _find_next_bit(const unsigned long *addr1, unsigned long nbits,
+ unsigned long start);
+unsigned long _find_next_and_bit(const unsigned long *addr1, const unsigned long *addr2,
+ unsigned long nbits, unsigned long start);
+unsigned long _find_next_andnot_bit(const unsigned long *addr1, const unsigned long *addr2,
+ unsigned long nbits, unsigned long start);
+unsigned long _find_next_or_bit(const unsigned long *addr1, const unsigned long *addr2,
+ unsigned long nbits, unsigned long start);
+unsigned long _find_next_zero_bit(const unsigned long *addr, unsigned long nbits,
+ unsigned long start);
+extern unsigned long _find_first_bit(const unsigned long *addr, unsigned long size);
+unsigned long __find_nth_bit(const unsigned long *addr, unsigned long size, unsigned long n);
+unsigned long __find_nth_and_bit(const unsigned long *addr1, const unsigned long *addr2,
+ unsigned long size, unsigned long n);
+unsigned long __find_nth_andnot_bit(const unsigned long *addr1, const unsigned long *addr2,
+ unsigned long size, unsigned long n);
+unsigned long __find_nth_and_andnot_bit(const unsigned long *addr1, const unsigned long *addr2,
+ const unsigned long *addr3, unsigned long size,
+ unsigned long n);
+extern unsigned long _find_first_and_bit(const unsigned long *addr1,
+ const unsigned long *addr2, unsigned long size);
+unsigned long _find_first_andnot_bit(const unsigned long *addr1, const unsigned long *addr2,
+ unsigned long size);
+unsigned long _find_first_and_and_bit(const unsigned long *addr1, const unsigned long *addr2,
+ const unsigned long *addr3, unsigned long size);
+extern unsigned long _find_first_zero_bit(const unsigned long *addr, unsigned long size);
+extern unsigned long _find_last_bit(const unsigned long *addr, unsigned long size);
+
+#ifdef __BIG_ENDIAN
+unsigned long _find_first_zero_bit_le(const unsigned long *addr, unsigned long size);
+unsigned long _find_next_zero_bit_le(const unsigned long *addr, unsigned
+ long size, unsigned long offset);
+unsigned long _find_next_bit_le(const unsigned long *addr, unsigned
+ long size, unsigned long offset);
+#endif
+
+unsigned long find_random_bit(const unsigned long *addr, unsigned long size);
+
+#ifndef find_next_bit
+/**
+ * find_next_bit - find the next set bit in a memory region
+ * @addr: The address to base the search on
+ * @size: The bitmap size in bits
+ * @offset: The bitnumber to start searching at
+ *
+ * Returns the bit number for the next set bit
+ * If no bits are set, returns @size.
+ */
+static __always_inline
+unsigned long find_next_bit(const unsigned long *addr, unsigned long size,
+ unsigned long offset)
+{
+ if (small_const_nbits(size)) {
+ unsigned long val;
+
+ if (unlikely(offset >= size))
+ return size;
+
+ val = *addr & GENMASK(size - 1, offset);
+ return val ? __ffs(val) : size;
+ }
+
+ return _find_next_bit(addr, size, offset);
+}
+#endif
+
+#ifndef find_next_and_bit
+/**
+ * find_next_and_bit - find the next set bit in both memory regions
+ * @addr1: The first address to base the search on
+ * @addr2: The second address to base the search on
+ * @size: The bitmap size in bits
+ * @offset: The bitnumber to start searching at
+ *
+ * Returns the bit number for the next set bit
+ * If no bits are set, returns @size.
+ */
+static __always_inline
+unsigned long find_next_and_bit(const unsigned long *addr1,
+ const unsigned long *addr2, unsigned long size,
+ unsigned long offset)
+{
+ if (small_const_nbits(size)) {
+ unsigned long val;
+
+ if (unlikely(offset >= size))
+ return size;
+
+ val = *addr1 & *addr2 & GENMASK(size - 1, offset);
+ return val ? __ffs(val) : size;
+ }
+
+ return _find_next_and_bit(addr1, addr2, size, offset);
+}
+#endif
+
+#ifndef find_next_andnot_bit
+/**
+ * find_next_andnot_bit - find the next set bit in *addr1 excluding all the bits
+ * in *addr2
+ * @addr1: The first address to base the search on
+ * @addr2: The second address to base the search on
+ * @size: The bitmap size in bits
+ * @offset: The bitnumber to start searching at
+ *
+ * Returns the bit number for the next set bit
+ * If no bits are set, returns @size.
+ */
+static __always_inline
+unsigned long find_next_andnot_bit(const unsigned long *addr1,
+ const unsigned long *addr2, unsigned long size,
+ unsigned long offset)
+{
+ if (small_const_nbits(size)) {
+ unsigned long val;
+
+ if (unlikely(offset >= size))
+ return size;
+
+ val = *addr1 & ~*addr2 & GENMASK(size - 1, offset);
+ return val ? __ffs(val) : size;
+ }
+
+ return _find_next_andnot_bit(addr1, addr2, size, offset);
+}
+#endif
+
+#ifndef find_next_or_bit
+/**
+ * find_next_or_bit - find the next set bit in either memory regions
+ * @addr1: The first address to base the search on
+ * @addr2: The second address to base the search on
+ * @size: The bitmap size in bits
+ * @offset: The bitnumber to start searching at
+ *
+ * Returns the bit number for the next set bit
+ * If no bits are set, returns @size.
+ */
+static __always_inline
+unsigned long find_next_or_bit(const unsigned long *addr1,
+ const unsigned long *addr2, unsigned long size,
+ unsigned long offset)
+{
+ if (small_const_nbits(size)) {
+ unsigned long val;
+
+ if (unlikely(offset >= size))
+ return size;
+
+ val = (*addr1 | *addr2) & GENMASK(size - 1, offset);
+ return val ? __ffs(val) : size;
+ }
+
+ return _find_next_or_bit(addr1, addr2, size, offset);
+}
+#endif
+
+#ifndef find_next_zero_bit
+/**
+ * find_next_zero_bit - find the next cleared bit in a memory region
+ * @addr: The address to base the search on
+ * @size: The bitmap size in bits
+ * @offset: The bitnumber to start searching at
+ *
+ * Returns the bit number of the next zero bit
+ * If no bits are zero, returns @size.
+ */
+static __always_inline
+unsigned long find_next_zero_bit(const unsigned long *addr, unsigned long size,
+ unsigned long offset)
+{
+ if (small_const_nbits(size)) {
+ unsigned long val;
+
+ if (unlikely(offset >= size))
+ return size;
+
+ val = *addr | ~GENMASK(size - 1, offset);
+ return val == ~0UL ? size : ffz(val);
+ }
+
+ return _find_next_zero_bit(addr, size, offset);
+}
+#endif
+
+#ifndef find_first_bit
+/**
+ * find_first_bit - find the first set bit in a memory region
+ * @addr: The address to start the search at
+ * @size: The maximum number of bits to search
+ *
+ * Returns the bit number of the first set bit.
+ * If no bits are set, returns @size.
+ */
+static __always_inline
+unsigned long find_first_bit(const unsigned long *addr, unsigned long size)
+{
+ if (small_const_nbits(size)) {
+ unsigned long val = *addr & GENMASK(size - 1, 0);
+
+ return val ? __ffs(val) : size;
+ }
+
+ return _find_first_bit(addr, size);
+}
+#endif
+
+/**
+ * find_nth_bit - find N'th set bit in a memory region
+ * @addr: The address to start the search at
+ * @size: The maximum number of bits to search
+ * @n: The number of set bit, which position is needed, counting from 0
+ *
+ * The following is semantically equivalent:
+ * idx = find_nth_bit(addr, size, 0);
+ * idx = find_first_bit(addr, size);
+ *
+ * Returns the bit number of the N'th set bit.
+ * If no such, returns >= @size.
+ */
+static __always_inline
+unsigned long find_nth_bit(const unsigned long *addr, unsigned long size, unsigned long n)
+{
+ if (n >= size)
+ return size;
+
+ if (small_const_nbits(size)) {
+ unsigned long val = *addr & GENMASK(size - 1, 0);
+
+ return val ? fns(val, n) : size;
+ }
+
+ return __find_nth_bit(addr, size, n);
+}
+
+/**
+ * find_nth_and_bit - find N'th set bit in 2 memory regions
+ * @addr1: The 1st address to start the search at
+ * @addr2: The 2nd address to start the search at
+ * @size: The maximum number of bits to search
+ * @n: The number of set bit, which position is needed, counting from 0
+ *
+ * Returns the bit number of the N'th set bit.
+ * If no such, returns @size.
+ */
+static __always_inline
+unsigned long find_nth_and_bit(const unsigned long *addr1, const unsigned long *addr2,
+ unsigned long size, unsigned long n)
+{
+ if (n >= size)
+ return size;
+
+ if (small_const_nbits(size)) {
+ unsigned long val = *addr1 & *addr2 & GENMASK(size - 1, 0);
+
+ return val ? fns(val, n) : size;
+ }
+
+ return __find_nth_and_bit(addr1, addr2, size, n);
+}
+
+/**
+ * find_nth_and_andnot_bit - find N'th set bit in 2 memory regions,
+ * excluding those set in 3rd region
+ * @addr1: The 1st address to start the search at
+ * @addr2: The 2nd address to start the search at
+ * @addr3: The 3rd address to start the search at
+ * @size: The maximum number of bits to search
+ * @n: The number of set bit, which position is needed, counting from 0
+ *
+ * Returns the bit number of the N'th set bit.
+ * If no such, returns @size.
+ */
+static __always_inline
+unsigned long find_nth_and_andnot_bit(const unsigned long *addr1,
+ const unsigned long *addr2,
+ const unsigned long *addr3,
+ unsigned long size, unsigned long n)
+{
+ if (n >= size)
+ return size;
+
+ if (small_const_nbits(size)) {
+ unsigned long val = *addr1 & *addr2 & (~*addr3) & GENMASK(size - 1, 0);
+
+ return val ? fns(val, n) : size;
+ }
+
+ return __find_nth_and_andnot_bit(addr1, addr2, addr3, size, n);
+}
+
+#ifndef find_first_and_bit
+/**
+ * find_first_and_bit - find the first set bit in both memory regions
+ * @addr1: The first address to base the search on
+ * @addr2: The second address to base the search on
+ * @size: The bitmap size in bits
+ *
+ * Returns the bit number for the next set bit
+ * If no bits are set, returns @size.
+ */
+static __always_inline
+unsigned long find_first_and_bit(const unsigned long *addr1,
+ const unsigned long *addr2,
+ unsigned long size)
+{
+ if (small_const_nbits(size)) {
+ unsigned long val = *addr1 & *addr2 & GENMASK(size - 1, 0);
+
+ return val ? __ffs(val) : size;
+ }
+
+ return _find_first_and_bit(addr1, addr2, size);
+}
+#endif
+
+/**
+ * find_first_andnot_bit - find the first bit set in 1st memory region and unset in 2nd
+ * @addr1: The first address to base the search on
+ * @addr2: The second address to base the search on
+ * @size: The bitmap size in bits
+ *
+ * Returns the bit number for the first set bit
+ * If no bits are set, returns >= @size.
+ */
+static __always_inline
+unsigned long find_first_andnot_bit(const unsigned long *addr1,
+ const unsigned long *addr2,
+ unsigned long size)
+{
+ if (small_const_nbits(size)) {
+ unsigned long val = *addr1 & (~*addr2) & GENMASK(size - 1, 0);
+
+ return val ? __ffs(val) : size;
+ }
+
+ return _find_first_andnot_bit(addr1, addr2, size);
+}
+
+/**
+ * find_first_and_and_bit - find the first set bit in 3 memory regions
+ * @addr1: The first address to base the search on
+ * @addr2: The second address to base the search on
+ * @addr3: The third address to base the search on
+ * @size: The bitmap size in bits
+ *
+ * Returns the bit number for the first set bit
+ * If no bits are set, returns @size.
+ */
+static __always_inline
+unsigned long find_first_and_and_bit(const unsigned long *addr1,
+ const unsigned long *addr2,
+ const unsigned long *addr3,
+ unsigned long size)
+{
+ if (small_const_nbits(size)) {
+ unsigned long val = *addr1 & *addr2 & *addr3 & GENMASK(size - 1, 0);
+
+ return val ? __ffs(val) : size;
+ }
+
+ return _find_first_and_and_bit(addr1, addr2, addr3, size);
+}
+
+#ifndef find_first_zero_bit
+/**
+ * find_first_zero_bit - find the first cleared bit in a memory region
+ * @addr: The address to start the search at
+ * @size: The maximum number of bits to search
+ *
+ * Returns the bit number of the first cleared bit.
+ * If no bits are zero, returns @size.
+ */
+static __always_inline
+unsigned long find_first_zero_bit(const unsigned long *addr, unsigned long size)
+{
+ if (small_const_nbits(size)) {
+ unsigned long val = *addr | ~GENMASK(size - 1, 0);
+
+ return val == ~0UL ? size : ffz(val);
+ }
+
+ return _find_first_zero_bit(addr, size);
+}
+#endif
+
+#ifndef find_last_bit
+/**
+ * find_last_bit - find the last set bit in a memory region
+ * @addr: The address to start the search at
+ * @size: The number of bits to search
+ *
+ * Returns the bit number of the last set bit, or size.
+ */
+static __always_inline
+unsigned long find_last_bit(const unsigned long *addr, unsigned long size)
+{
+ if (small_const_nbits(size)) {
+ unsigned long val = *addr & GENMASK(size - 1, 0);
+
+ return val ? __fls(val) : size;
+ }
+
+ return _find_last_bit(addr, size);
+}
+#endif
+
+/**
+ * find_next_and_bit_wrap - find the next set bit in both memory regions
+ * @addr1: The first address to base the search on
+ * @addr2: The second address to base the search on
+ * @size: The bitmap size in bits
+ * @offset: The bitnumber to start searching at
+ *
+ * Returns the bit number for the next set bit, or first set bit up to @offset
+ * If no bits are set, returns @size.
+ */
+static __always_inline
+unsigned long find_next_and_bit_wrap(const unsigned long *addr1,
+ const unsigned long *addr2,
+ unsigned long size, unsigned long offset)
+{
+ unsigned long bit = find_next_and_bit(addr1, addr2, size, offset);
+
+ if (bit < size || offset == 0)
+ return bit;
+
+ bit = find_first_and_bit(addr1, addr2, offset);
+ return bit < offset ? bit : size;
+}
+
+/**
+ * find_next_bit_wrap - find the next set bit in a memory region
+ * @addr: The address to base the search on
+ * @size: The bitmap size in bits
+ * @offset: The bitnumber to start searching at
+ *
+ * Returns the bit number for the next set bit, or first set bit up to @offset
+ * If no bits are set, returns @size.
+ */
+static __always_inline
+unsigned long find_next_bit_wrap(const unsigned long *addr,
+ unsigned long size, unsigned long offset)
+{
+ unsigned long bit = find_next_bit(addr, size, offset);
+
+ if (bit < size || offset == 0)
+ return bit;
+
+ bit = find_first_bit(addr, offset);
+ return bit < offset ? bit : size;
+}
+
+/*
+ * Helper for for_each_set_bit_wrap(). Make sure you're doing right thing
+ * before using it alone.
+ */
+static __always_inline
+unsigned long __for_each_wrap(const unsigned long *bitmap, unsigned long size,
+ unsigned long start, unsigned long n)
+{
+ unsigned long bit;
+
+ /* If not wrapped around */
+ if (n > start) {
+ /* and have a bit, just return it. */
+ bit = find_next_bit(bitmap, size, n);
+ if (bit < size)
+ return bit;
+
+ /* Otherwise, wrap around and ... */
+ n = 0;
+ }
+
+ /* Search the other part. */
+ bit = find_next_bit(bitmap, start, n);
+ return bit < start ? bit : size;
+}
+
+/**
+ * find_next_clump8 - find next 8-bit clump with set bits in a memory region
+ * @clump: location to store copy of found clump
+ * @addr: address to base the search on
+ * @size: bitmap size in number of bits
+ * @offset: bit offset at which to start searching
+ *
+ * Returns the bit offset for the next set clump; the found clump value is
+ * copied to the location pointed by @clump. If no bits are set, returns @size.
+ */
+extern unsigned long find_next_clump8(unsigned long *clump,
+ const unsigned long *addr,
+ unsigned long size, unsigned long offset);
+
+#define find_first_clump8(clump, bits, size) \
+ find_next_clump8((clump), (bits), (size), 0)
+
+#if defined(__LITTLE_ENDIAN)
+
+static __always_inline
+unsigned long find_next_zero_bit_le(const void *addr, unsigned long size, unsigned long offset)
+{
+ return find_next_zero_bit(addr, size, offset);
+}
+
+static __always_inline
+unsigned long find_next_bit_le(const void *addr, unsigned long size, unsigned long offset)
+{
+ return find_next_bit(addr, size, offset);
+}
+
+static __always_inline
+unsigned long find_first_zero_bit_le(const void *addr, unsigned long size)
+{
+ return find_first_zero_bit(addr, size);
+}
+
+#elif defined(__BIG_ENDIAN)
+
+#ifndef find_next_zero_bit_le
+static __always_inline
+unsigned long find_next_zero_bit_le(const void *addr, unsigned
+ long size, unsigned long offset)
+{
+ if (small_const_nbits(size)) {
+ unsigned long val = *(const unsigned long *)addr;
+
+ if (unlikely(offset >= size))
+ return size;
+
+ val = swab(val) | ~GENMASK(size - 1, offset);
+ return val == ~0UL ? size : ffz(val);
+ }
+
+ return _find_next_zero_bit_le(addr, size, offset);
+}
+#endif
+
+#ifndef find_first_zero_bit_le
+static __always_inline
+unsigned long find_first_zero_bit_le(const void *addr, unsigned long size)
+{
+ if (small_const_nbits(size)) {
+ unsigned long val = swab(*(const unsigned long *)addr) | ~GENMASK(size - 1, 0);
+
+ return val == ~0UL ? size : ffz(val);
+ }
+
+ return _find_first_zero_bit_le(addr, size);
+}
+#endif
+
+#ifndef find_next_bit_le
+static __always_inline
+unsigned long find_next_bit_le(const void *addr, unsigned
+ long size, unsigned long offset)
+{
+ if (small_const_nbits(size)) {
+ unsigned long val = *(const unsigned long *)addr;
+
+ if (unlikely(offset >= size))
+ return size;
+
+ val = swab(val) & GENMASK(size - 1, offset);
+ return val ? __ffs(val) : size;
+ }
+
+ return _find_next_bit_le(addr, size, offset);
+}
+#endif
+
+#else
+#error "Please fix <asm/byteorder.h>"
+#endif
+
+#define for_each_set_bit(bit, addr, size) \
+ for ((bit) = 0; (bit) = find_next_bit((addr), (size), (bit)), (bit) < (size); (bit)++)
+
+#define for_each_and_bit(bit, addr1, addr2, size) \
+ for ((bit) = 0; \
+ (bit) = find_next_and_bit((addr1), (addr2), (size), (bit)), (bit) < (size);\
+ (bit)++)
+
+#define for_each_andnot_bit(bit, addr1, addr2, size) \
+ for ((bit) = 0; \
+ (bit) = find_next_andnot_bit((addr1), (addr2), (size), (bit)), (bit) < (size);\
+ (bit)++)
+
+#define for_each_or_bit(bit, addr1, addr2, size) \
+ for ((bit) = 0; \
+ (bit) = find_next_or_bit((addr1), (addr2), (size), (bit)), (bit) < (size);\
+ (bit)++)
+
+/* same as for_each_set_bit() but use bit as value to start with */
+#define for_each_set_bit_from(bit, addr, size) \
+ for (; (bit) = find_next_bit((addr), (size), (bit)), (bit) < (size); (bit)++)
+
+#define for_each_clear_bit(bit, addr, size) \
+ for ((bit) = 0; \
+ (bit) = find_next_zero_bit((addr), (size), (bit)), (bit) < (size); \
+ (bit)++)
+
+/* same as for_each_clear_bit() but use bit as value to start with */
+#define for_each_clear_bit_from(bit, addr, size) \
+ for (; (bit) = find_next_zero_bit((addr), (size), (bit)), (bit) < (size); (bit)++)
+
+/**
+ * for_each_set_bitrange - iterate over all set bit ranges [b; e)
+ * @b: bit offset of start of current bitrange (first set bit)
+ * @e: bit offset of end of current bitrange (first unset bit)
+ * @addr: bitmap address to base the search on
+ * @size: bitmap size in number of bits
+ */
+#define for_each_set_bitrange(b, e, addr, size) \
+ for ((b) = 0; \
+ (b) = find_next_bit((addr), (size), b), \
+ (e) = find_next_zero_bit((addr), (size), (b) + 1), \
+ (b) < (size); \
+ (b) = (e) + 1)
+
+/**
+ * for_each_set_bitrange_from - iterate over all set bit ranges [b; e)
+ * @b: bit offset of start of current bitrange (first set bit); must be initialized
+ * @e: bit offset of end of current bitrange (first unset bit)
+ * @addr: bitmap address to base the search on
+ * @size: bitmap size in number of bits
+ */
+#define for_each_set_bitrange_from(b, e, addr, size) \
+ for (; \
+ (b) = find_next_bit((addr), (size), (b)), \
+ (e) = find_next_zero_bit((addr), (size), (b) + 1), \
+ (b) < (size); \
+ (b) = (e) + 1)
+
+/**
+ * for_each_clear_bitrange - iterate over all unset bit ranges [b; e)
+ * @b: bit offset of start of current bitrange (first unset bit)
+ * @e: bit offset of end of current bitrange (first set bit)
+ * @addr: bitmap address to base the search on
+ * @size: bitmap size in number of bits
+ */
+#define for_each_clear_bitrange(b, e, addr, size) \
+ for ((b) = 0; \
+ (b) = find_next_zero_bit((addr), (size), (b)), \
+ (e) = find_next_bit((addr), (size), (b) + 1), \
+ (b) < (size); \
+ (b) = (e) + 1)
+
+/**
+ * for_each_clear_bitrange_from - iterate over all unset bit ranges [b; e)
+ * @b: bit offset of start of current bitrange (first set bit); must be initialized
+ * @e: bit offset of end of current bitrange (first unset bit)
+ * @addr: bitmap address to base the search on
+ * @size: bitmap size in number of bits
+ */
+#define for_each_clear_bitrange_from(b, e, addr, size) \
+ for (; \
+ (b) = find_next_zero_bit((addr), (size), (b)), \
+ (e) = find_next_bit((addr), (size), (b) + 1), \
+ (b) < (size); \
+ (b) = (e) + 1)
+
+/**
+ * for_each_set_bit_wrap - iterate over all set bits starting from @start, and
+ * wrapping around the end of bitmap.
+ * @bit: offset for current iteration
+ * @addr: bitmap address to base the search on
+ * @size: bitmap size in number of bits
+ * @start: Starting bit for bitmap traversing, wrapping around the bitmap end
+ */
+#define for_each_set_bit_wrap(bit, addr, size, start) \
+ for ((bit) = find_next_bit_wrap((addr), (size), (start)); \
+ (bit) < (size); \
+ (bit) = __for_each_wrap((addr), (size), (start), (bit) + 1))
+
+/**
+ * for_each_set_clump8 - iterate over bitmap for each 8-bit clump with set bits
+ * @start: bit offset to start search and to store the current iteration offset
+ * @clump: location to store copy of current 8-bit clump
+ * @bits: bitmap address to base the search on
+ * @size: bitmap size in number of bits
+ */
+#define for_each_set_clump8(start, clump, bits, size) \
+ for ((start) = find_first_clump8(&(clump), (bits), (size)); \
+ (start) < (size); \
+ (start) = find_next_clump8(&(clump), (bits), (size), (start) + 8))
+
+#endif /*__LINUX_FIND_H_ */
diff --git a/include/linux/fips.h b/include/linux/fips.h
index f8fb07b0b6b8..c6961e932fef 100644
--- a/include/linux/fips.h
+++ b/include/linux/fips.h
@@ -1,10 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _FIPS_H
#define _FIPS_H
#ifdef CONFIG_CRYPTO_FIPS
extern int fips_enabled;
+extern struct atomic_notifier_head fips_fail_notif_chain;
+
+void fips_fail_notify(void);
+
#else
#define fips_enabled 0
+
+static inline void fips_fail_notify(void) {}
+
#endif
#endif
diff --git a/include/linux/firewire.h b/include/linux/firewire.h
index d4b7683c722d..6143b7d28eac 100644
--- a/include/linux/firewire.h
+++ b/include/linux/firewire.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_FIREWIRE_H
#define _LINUX_FIREWIRE_H
@@ -74,7 +75,7 @@ void fw_csr_iterator_init(struct fw_csr_iterator *ci, const u32 *p);
int fw_csr_iterator_next(struct fw_csr_iterator *ci, int *key, int *value);
int fw_csr_string(const u32 *directory, int key, char *buf, size_t size);
-extern struct bus_type fw_bus_type;
+extern const struct bus_type fw_bus_type;
struct fw_card_driver;
struct fw_node;
@@ -87,23 +88,30 @@ struct fw_card {
int node_id;
int generation;
- int current_tlabel;
- u64 tlabel_mask;
- struct list_head transaction_list;
u64 reset_jiffies;
- u32 split_timeout_hi;
- u32 split_timeout_lo;
- unsigned int split_timeout_cycles;
- unsigned int split_timeout_jiffies;
+ struct {
+ int current_tlabel;
+ u64 tlabel_mask;
+ struct list_head list;
+ spinlock_t lock;
+ } transactions;
+
+ struct {
+ u32 hi;
+ u32 lo;
+ unsigned int cycles;
+ unsigned int jiffies;
+ spinlock_t lock;
+ } split_timeout;
unsigned long long guid;
unsigned max_receive;
int link_speed;
int config_rom_generation;
- spinlock_t lock; /* Take this lock when handling the lists in
- * this struct. */
+ spinlock_t lock;
+
struct fw_node *local_node;
struct fw_node *root_node;
struct fw_node *irm_node;
@@ -114,8 +122,6 @@ struct fw_card {
int index;
struct list_head link;
- struct list_head phy_receiver_list;
-
struct delayed_work br_work; /* bus reset job */
bool br_short;
@@ -130,9 +136,16 @@ struct fw_card {
bool broadcast_channel_allocated;
u32 broadcast_channel;
- __be32 topology_map[(CSR_TOPOLOGY_MAP_END - CSR_TOPOLOGY_MAP) / 4];
+
+ struct {
+ __be32 buffer[(CSR_TOPOLOGY_MAP_END - CSR_TOPOLOGY_MAP) / 4];
+ spinlock_t lock;
+ } topology_map;
__be32 maint_utility_register;
+
+ struct workqueue_struct *isoc_wq;
+ struct workqueue_struct *async_wq;
};
static inline struct fw_card *fw_card_get(struct fw_card *card)
@@ -149,12 +162,28 @@ static inline void fw_card_put(struct fw_card *card)
kref_put(&card->kref, fw_card_release);
}
+int fw_card_read_cycle_time(struct fw_card *card, u32 *cycle_time);
+
struct fw_attribute_group {
struct attribute_group *groups[2];
struct attribute_group group;
struct attribute *attrs[13];
};
+enum fw_device_quirk {
+ // See afa1282a35d3 ("firewire: core: check for 1394a compliant IRM, fix inaccessibility of Sony camcorder").
+ FW_DEVICE_QUIRK_IRM_IS_1394_1995_ONLY = BIT(0),
+
+ // See a509e43ff338 ("firewire: core: fix unstable I/O with Canon camcorder").
+ FW_DEVICE_QUIRK_IRM_IGNORES_BUS_MANAGER = BIT(1),
+
+ // MOTU Audio Express transfers acknowledge packet with 0x10 for pending state.
+ FW_DEVICE_QUIRK_ACK_PACKET_WITH_INVALID_PENDING_CODE = BIT(2),
+
+ // TASCAM FW-1082/FW-1804/FW-1884 often freezes when receiving S400 packets.
+ FW_DEVICE_QUIRK_UNSTABLE_AT_S400 = BIT(3),
+};
+
enum fw_device_state {
FW_DEVICE_INITIALIZING,
FW_DEVICE_RUNNING,
@@ -188,6 +217,9 @@ struct fw_device {
struct fw_card *card;
struct device device;
+ // A set of enum fw_device_quirk.
+ int quirks;
+
struct mutex client_list_mutex;
struct list_head client_list;
@@ -205,10 +237,7 @@ struct fw_device {
struct fw_attribute_group attribute_group;
};
-static inline struct fw_device *fw_device(struct device *dev)
-{
- return container_of(dev, struct fw_device, device);
-}
+#define fw_device(dev) container_of_const(dev, struct fw_device, device)
static inline int fw_device_is_shutdown(struct fw_device *device)
{
@@ -226,10 +255,7 @@ struct fw_unit {
struct fw_attribute_group attribute_group;
};
-static inline struct fw_unit *fw_unit(struct device *dev)
-{
- return container_of(dev, struct fw_unit, device);
-}
+#define fw_unit(dev) container_of_const(dev, struct fw_unit, device)
static inline struct fw_unit *fw_unit_get(struct fw_unit *unit)
{
@@ -243,10 +269,7 @@ static inline void fw_unit_put(struct fw_unit *unit)
put_device(&unit->device);
}
-static inline struct fw_device *fw_parent_device(struct fw_unit *unit)
-{
- return fw_device(unit->device.parent);
-}
+#define fw_parent_device(unit) fw_device(unit->device.parent)
struct ieee1394_device_id;
@@ -267,6 +290,15 @@ typedef void (*fw_packet_callback_t)(struct fw_packet *packet,
typedef void (*fw_transaction_callback_t)(struct fw_card *card, int rcode,
void *data, size_t length,
void *callback_data);
+typedef void (*fw_transaction_callback_with_tstamp_t)(struct fw_card *card, int rcode,
+ u32 request_tstamp, u32 response_tstamp, void *data,
+ size_t length, void *callback_data);
+
+union fw_transaction_callback {
+ fw_transaction_callback_t without_tstamp;
+ fw_transaction_callback_with_tstamp_t with_tstamp;
+};
+
/*
* This callback handles an inbound request subaction. It is called in
* RCU read-side context, therefore must not sleep.
@@ -275,9 +307,8 @@ typedef void (*fw_transaction_callback_t)(struct fw_card *card, int rcode,
* Otherwise there is a danger of recursion of inbound and outbound
* transactions from and to the local node.
*
- * The callback is responsible that either fw_send_response() or kfree()
- * is called on the @request, except for FCP registers for which the core
- * takes care of that.
+ * The callback is responsible that fw_send_response() is called on the @request, except for FCP
+ * registers for which the core takes care of that.
*/
typedef void (*fw_address_callback_t)(struct fw_card *card,
struct fw_request *request,
@@ -303,8 +334,7 @@ struct fw_packet {
* For successful transmission, the status code is the ack received
* from the destination. Otherwise it is one of the juju-specific
* rcodes: RCODE_SEND_ERROR, _CANCELLED, _BUSY, _GENERATION, _NO_ACK.
- * The callback can be called from tasklet context and thus
- * must never block.
+ * The callback can be called from workqueue and thus must never block.
*/
fw_packet_callback_t callback;
int ack;
@@ -319,6 +349,7 @@ struct fw_transaction {
struct fw_card *card;
bool is_split_transaction;
struct timer_list split_timeout_timer;
+ u32 split_timeout_cycle;
struct fw_packet packet;
@@ -326,7 +357,8 @@ struct fw_transaction {
* The data passed to the callback is valid only during the
* callback.
*/
- fw_transaction_callback_t callback;
+ union fw_transaction_callback callback;
+ bool with_tstamp;
void *callback_data;
};
@@ -335,7 +367,11 @@ struct fw_address_handler {
u64 length;
fw_address_callback_t address_callback;
void *callback_data;
+
+ // Only for core functions.
struct list_head link;
+ struct kref kref;
+ struct completion done;
};
struct fw_address_region {
@@ -351,10 +387,80 @@ void fw_core_remove_address_handler(struct fw_address_handler *handler);
void fw_send_response(struct fw_card *card,
struct fw_request *request, int rcode);
int fw_get_request_speed(struct fw_request *request);
-void fw_send_request(struct fw_card *card, struct fw_transaction *t,
- int tcode, int destination_id, int generation, int speed,
- unsigned long long offset, void *payload, size_t length,
- fw_transaction_callback_t callback, void *callback_data);
+u32 fw_request_get_timestamp(const struct fw_request *request);
+
+void __fw_send_request(struct fw_card *card, struct fw_transaction *t, int tcode,
+ int destination_id, int generation, int speed, unsigned long long offset,
+ void *payload, size_t length, union fw_transaction_callback callback,
+ bool with_tstamp, void *callback_data);
+
+/**
+ * fw_send_request() - submit a request packet for transmission to generate callback for response
+ * subaction without time stamp.
+ * @card: interface to send the request at
+ * @t: transaction instance to which the request belongs
+ * @tcode: transaction code
+ * @destination_id: destination node ID, consisting of bus_ID and phy_ID
+ * @generation: bus generation in which request and response are valid
+ * @speed: transmission speed
+ * @offset: 48bit wide offset into destination's address space
+ * @payload: data payload for the request subaction
+ * @length: length of the payload, in bytes
+ * @callback: function to be called when the transaction is completed
+ * @callback_data: data to be passed to the transaction completion callback
+ *
+ * A variation of __fw_send_request() to generate callback for response subaction without time
+ * stamp.
+ *
+ * The callback is invoked in the workqueue context in most cases. However, if an error is detected
+ * before queueing or the destination address refers to the local node, it is invoked in the
+ * current context instead.
+ */
+static inline void fw_send_request(struct fw_card *card, struct fw_transaction *t, int tcode,
+ int destination_id, int generation, int speed,
+ unsigned long long offset, void *payload, size_t length,
+ fw_transaction_callback_t callback, void *callback_data)
+{
+ union fw_transaction_callback cb = {
+ .without_tstamp = callback,
+ };
+ __fw_send_request(card, t, tcode, destination_id, generation, speed, offset, payload,
+ length, cb, false, callback_data);
+}
+
+/**
+ * fw_send_request_with_tstamp() - submit a request packet for transmission to generate callback for
+ * response with time stamp.
+ * @card: interface to send the request at
+ * @t: transaction instance to which the request belongs
+ * @tcode: transaction code
+ * @destination_id: destination node ID, consisting of bus_ID and phy_ID
+ * @generation: bus generation in which request and response are valid
+ * @speed: transmission speed
+ * @offset: 48bit wide offset into destination's address space
+ * @payload: data payload for the request subaction
+ * @length: length of the payload, in bytes
+ * @callback: function to be called when the transaction is completed
+ * @callback_data: data to be passed to the transaction completion callback
+ *
+ * A variation of __fw_send_request() to generate callback for response subaction with time stamp.
+ *
+ * The callback is invoked in the workqueue context in most cases. However, if an error is detected
+ * before queueing or the destination address refers to the local node, it is invoked in the current
+ * context instead.
+ */
+static inline void fw_send_request_with_tstamp(struct fw_card *card, struct fw_transaction *t,
+ int tcode, int destination_id, int generation, int speed, unsigned long long offset,
+ void *payload, size_t length, fw_transaction_callback_with_tstamp_t callback,
+ void *callback_data)
+{
+ union fw_transaction_callback cb = {
+ .with_tstamp = callback,
+ };
+ __fw_send_request(card, t, tcode, destination_id, generation, speed, offset, payload,
+ length, cb, true, callback_data);
+}
+
int fw_cancel_transaction(struct fw_card *card,
struct fw_transaction *transaction);
int fw_run_transaction(struct fw_card *card, int tcode, int destination_id,
@@ -396,8 +502,8 @@ struct fw_iso_packet {
/* rx: Sync bit, wait for matching sy */
u32 tag:2; /* tx: Tag in packet header */
u32 sy:4; /* tx: Sy in packet header */
- u32 header_length:8; /* Length of immediate header */
- u32 header[0]; /* tx: Top of 1394 isoch. data_block */
+ u32 header_length:8; /* Size of immediate header */
+ u32 header[]; /* tx: Top of 1394 isoch. data_block */
};
#define FW_ISO_CONTEXT_TRANSMIT 0
@@ -435,17 +541,21 @@ typedef void (*fw_iso_callback_t)(struct fw_iso_context *context,
void *header, void *data);
typedef void (*fw_iso_mc_callback_t)(struct fw_iso_context *context,
dma_addr_t completed, void *data);
+
+union fw_iso_callback {
+ fw_iso_callback_t sc;
+ fw_iso_mc_callback_t mc;
+};
+
struct fw_iso_context {
struct fw_card *card;
+ struct work_struct work;
int type;
int channel;
int speed;
bool drop_overflow_headers;
size_t header_size;
- union {
- fw_iso_callback_t sc;
- fw_iso_mc_callback_t mc;
- } callback;
+ union fw_iso_callback callback;
void *callback_data;
};
@@ -459,6 +569,25 @@ int fw_iso_context_queue(struct fw_iso_context *ctx,
unsigned long payload);
void fw_iso_context_queue_flush(struct fw_iso_context *ctx);
int fw_iso_context_flush_completions(struct fw_iso_context *ctx);
+
+/**
+ * fw_iso_context_schedule_flush_completions() - schedule work item to process isochronous context.
+ * @ctx: the isochronous context
+ *
+ * Schedule a work item on workqueue to process the isochronous context. The registered callback
+ * function is called by the worker when a queued packet buffer with the interrupt flag is
+ * completed, either after transmission in the IT context or after being filled in the IR context.
+ * The callback function is also called when the header buffer in the context becomes full, If it
+ * is required to process the context in the current context, fw_iso_context_flush_completions() is
+ * available instead.
+ *
+ * Context: Any context.
+ */
+static inline void fw_iso_context_schedule_flush_completions(struct fw_iso_context *ctx)
+{
+ queue_work(ctx->card->isoc_wq, &ctx->work);
+}
+
int fw_iso_context_start(struct fw_iso_context *ctx,
int cycle, int sync, int tags);
int fw_iso_context_stop(struct fw_iso_context *ctx);
diff --git a/include/linux/firmware-map.h b/include/linux/firmware-map.h
index 71d4fa721db9..3e1077e99002 100644
--- a/include/linux/firmware-map.h
+++ b/include/linux/firmware-map.h
@@ -1,17 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* include/linux/firmware-map.h:
* Copyright (C) 2008 SUSE LINUX Products GmbH
* by Bernhard Walle <bernhard.walle@gmx.de>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License v2.0 as published by
- * the Free Software Foundation
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
*/
#ifndef _LINUX_FIRMWARE_MAP_H
#define _LINUX_FIRMWARE_MAP_H
diff --git a/include/linux/firmware.h b/include/linux/firmware.h
index b1f9f0ccb8ac..aae1b85ffc10 100644
--- a/include/linux/firmware.h
+++ b/include/linux/firmware.h
@@ -1,46 +1,111 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_FIRMWARE_H
#define _LINUX_FIRMWARE_H
#include <linux/types.h>
#include <linux/compiler.h>
+#include <linux/cleanup.h>
#include <linux/gfp.h>
-#define FW_ACTION_NOHOTPLUG 0
-#define FW_ACTION_HOTPLUG 1
+#define FW_ACTION_NOUEVENT 0
+#define FW_ACTION_UEVENT 1
struct firmware {
size_t size;
const u8 *data;
- struct page **pages;
/* firmware loader private fields */
void *priv;
};
-struct module;
-struct device;
+/**
+ * enum fw_upload_err - firmware upload error codes
+ * @FW_UPLOAD_ERR_NONE: returned to indicate success
+ * @FW_UPLOAD_ERR_HW_ERROR: error signalled by hardware, see kernel log
+ * @FW_UPLOAD_ERR_TIMEOUT: SW timed out on handshake with HW/firmware
+ * @FW_UPLOAD_ERR_CANCELED: upload was cancelled by the user
+ * @FW_UPLOAD_ERR_BUSY: there is an upload operation already in progress
+ * @FW_UPLOAD_ERR_INVALID_SIZE: invalid firmware image size
+ * @FW_UPLOAD_ERR_RW_ERROR: read or write to HW failed, see kernel log
+ * @FW_UPLOAD_ERR_WEAROUT: FLASH device is approaching wear-out, wait & retry
+ * @FW_UPLOAD_ERR_FW_INVALID: invalid firmware file
+ * @FW_UPLOAD_ERR_MAX: Maximum error code marker
+ */
+enum fw_upload_err {
+ FW_UPLOAD_ERR_NONE,
+ FW_UPLOAD_ERR_HW_ERROR,
+ FW_UPLOAD_ERR_TIMEOUT,
+ FW_UPLOAD_ERR_CANCELED,
+ FW_UPLOAD_ERR_BUSY,
+ FW_UPLOAD_ERR_INVALID_SIZE,
+ FW_UPLOAD_ERR_RW_ERROR,
+ FW_UPLOAD_ERR_WEAROUT,
+ FW_UPLOAD_ERR_FW_INVALID,
+ FW_UPLOAD_ERR_MAX
+};
-struct builtin_fw {
- char *name;
- void *data;
- unsigned long size;
+struct fw_upload {
+ void *dd_handle; /* reference to parent driver */
+ void *priv; /* firmware loader private fields */
};
-/* We have to play tricks here much like stringify() to get the
- __COUNTER__ macro to be expanded as we want it */
-#define __fw_concat1(x, y) x##y
-#define __fw_concat(x, y) __fw_concat1(x, y)
+/**
+ * struct fw_upload_ops - device specific operations to support firmware upload
+ * @prepare: Required: Prepare secure update
+ * @write: Required: The write() op receives the remaining
+ * size to be written and must return the actual
+ * size written or a negative error code. The write()
+ * op will be called repeatedly until all data is
+ * written.
+ * @poll_complete: Required: Check for the completion of the
+ * HW authentication/programming process.
+ * @cancel: Required: Request cancellation of update. This op
+ * is called from the context of a different kernel
+ * thread, so race conditions need to be considered.
+ * @cleanup: Optional: Complements the prepare()
+ * function and is called at the completion
+ * of the update, on success or failure, if the
+ * prepare function succeeded.
+ */
+struct fw_upload_ops {
+ enum fw_upload_err (*prepare)(struct fw_upload *fw_upload,
+ const u8 *data, u32 size);
+ enum fw_upload_err (*write)(struct fw_upload *fw_upload,
+ const u8 *data, u32 offset,
+ u32 size, u32 *written);
+ enum fw_upload_err (*poll_complete)(struct fw_upload *fw_upload);
+ void (*cancel)(struct fw_upload *fw_upload);
+ void (*cleanup)(struct fw_upload *fw_upload);
+};
-#define DECLARE_BUILTIN_FIRMWARE(name, blob) \
- DECLARE_BUILTIN_FIRMWARE_SIZE(name, &(blob), sizeof(blob))
+struct module;
+struct device;
-#define DECLARE_BUILTIN_FIRMWARE_SIZE(name, blob, size) \
- static const struct builtin_fw __fw_concat(__builtin_fw,__COUNTER__) \
- __used __section(.builtin_fw) = { name, blob, size }
+/*
+ * Built-in firmware functionality is only available if FW_LOADER=y, but not
+ * FW_LOADER=m
+ */
+#ifdef CONFIG_FW_LOADER
+bool firmware_request_builtin(struct firmware *fw, const char *name);
+#else
+static inline bool firmware_request_builtin(struct firmware *fw,
+ const char *name)
+{
+ return false;
+}
+#endif
-#if defined(CONFIG_FW_LOADER) || (defined(CONFIG_FW_LOADER_MODULE) && defined(MODULE))
+#if IS_REACHABLE(CONFIG_FW_LOADER)
int request_firmware(const struct firmware **fw, const char *name,
struct device *device);
+int firmware_request_nowait_nowarn(
+ struct module *module, const char *name,
+ struct device *device, gfp_t gfp, void *context,
+ void (*cont)(const struct firmware *fw, void *context));
+int firmware_request_nowarn(const struct firmware **fw, const char *name,
+ struct device *device);
+int firmware_request_platform(const struct firmware **fw, const char *name,
+ struct device *device);
int request_firmware_nowait(
struct module *module, bool uevent,
const char *name, struct device *device, gfp_t gfp, void *context,
@@ -49,6 +114,9 @@ int request_firmware_direct(const struct firmware **fw, const char *name,
struct device *device);
int request_firmware_into_buf(const struct firmware **firmware_p,
const char *name, struct device *device, void *buf, size_t size);
+int request_partial_firmware_into_buf(const struct firmware **firmware_p,
+ const char *name, struct device *device,
+ void *buf, size_t size, size_t offset);
void release_firmware(const struct firmware *fw);
#else
@@ -58,6 +126,29 @@ static inline int request_firmware(const struct firmware **fw,
{
return -EINVAL;
}
+
+static inline int firmware_request_nowait_nowarn(
+ struct module *module, const char *name,
+ struct device *device, gfp_t gfp, void *context,
+ void (*cont)(const struct firmware *fw, void *context))
+{
+ return -EINVAL;
+}
+
+static inline int firmware_request_nowarn(const struct firmware **fw,
+ const char *name,
+ struct device *device)
+{
+ return -EINVAL;
+}
+
+static inline int firmware_request_platform(const struct firmware **fw,
+ const char *name,
+ struct device *device)
+{
+ return -EINVAL;
+}
+
static inline int request_firmware_nowait(
struct module *module, bool uevent,
const char *name, struct device *device, gfp_t gfp, void *context,
@@ -83,5 +174,43 @@ static inline int request_firmware_into_buf(const struct firmware **firmware_p,
return -EINVAL;
}
+static inline int request_partial_firmware_into_buf
+ (const struct firmware **firmware_p,
+ const char *name,
+ struct device *device,
+ void *buf, size_t size, size_t offset)
+{
+ return -EINVAL;
+}
+
#endif
+
+#ifdef CONFIG_FW_UPLOAD
+
+struct fw_upload *
+firmware_upload_register(struct module *module, struct device *parent,
+ const char *name, const struct fw_upload_ops *ops,
+ void *dd_handle);
+void firmware_upload_unregister(struct fw_upload *fw_upload);
+
+#else
+
+static inline struct fw_upload *
+firmware_upload_register(struct module *module, struct device *parent,
+ const char *name, const struct fw_upload_ops *ops,
+ void *dd_handle)
+{
+ return ERR_PTR(-EINVAL);
+}
+
+static inline void firmware_upload_unregister(struct fw_upload *fw_upload)
+{
+}
+
+#endif
+
+int firmware_request_cache(struct device *device, const char *name);
+
+DEFINE_FREE(firmware, struct firmware *, release_firmware(_T))
+
#endif
diff --git a/include/linux/firmware/broadcom/tee_bnxt_fw.h b/include/linux/firmware/broadcom/tee_bnxt_fw.h
new file mode 100644
index 000000000000..f24c82d6ef73
--- /dev/null
+++ b/include/linux/firmware/broadcom/tee_bnxt_fw.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+/*
+ * Copyright 2019 Broadcom.
+ */
+
+#ifndef _BROADCOM_TEE_BNXT_FW_H
+#define _BROADCOM_TEE_BNXT_FW_H
+
+#include <linux/types.h>
+
+int tee_bnxt_fw_load(void);
+int tee_bnxt_copy_coredump(void *buf, u32 offset, u32 size);
+
+#endif /* _BROADCOM_TEE_BNXT_FW_H */
diff --git a/include/linux/firmware/cirrus/cs_dsp.h b/include/linux/firmware/cirrus/cs_dsp.h
new file mode 100644
index 000000000000..0ec1cdc5585d
--- /dev/null
+++ b/include/linux/firmware/cirrus/cs_dsp.h
@@ -0,0 +1,357 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * cs_dsp.h -- Cirrus Logic DSP firmware support
+ *
+ * Based on sound/soc/codecs/wm_adsp.h
+ *
+ * Copyright 2012 Wolfson Microelectronics plc
+ * Copyright (C) 2015-2021 Cirrus Logic, Inc. and
+ * Cirrus Logic International Semiconductor Ltd.
+ */
+#ifndef __CS_DSP_H
+#define __CS_DSP_H
+
+#include <linux/bits.h>
+#include <linux/device.h>
+#include <linux/firmware.h>
+#include <linux/list.h>
+#include <linux/regmap.h>
+
+#define CS_ADSP2_REGION_0 BIT(0)
+#define CS_ADSP2_REGION_1 BIT(1)
+#define CS_ADSP2_REGION_2 BIT(2)
+#define CS_ADSP2_REGION_3 BIT(3)
+#define CS_ADSP2_REGION_4 BIT(4)
+#define CS_ADSP2_REGION_5 BIT(5)
+#define CS_ADSP2_REGION_6 BIT(6)
+#define CS_ADSP2_REGION_7 BIT(7)
+#define CS_ADSP2_REGION_8 BIT(8)
+#define CS_ADSP2_REGION_9 BIT(9)
+#define CS_ADSP2_REGION_1_9 (CS_ADSP2_REGION_1 | \
+ CS_ADSP2_REGION_2 | CS_ADSP2_REGION_3 | \
+ CS_ADSP2_REGION_4 | CS_ADSP2_REGION_5 | \
+ CS_ADSP2_REGION_6 | CS_ADSP2_REGION_7 | \
+ CS_ADSP2_REGION_8 | CS_ADSP2_REGION_9)
+#define CS_ADSP2_REGION_ALL (CS_ADSP2_REGION_0 | CS_ADSP2_REGION_1_9)
+
+#define CS_DSP_DATA_WORD_SIZE 3
+#define CS_DSP_DATA_WORD_BITS (3 * BITS_PER_BYTE)
+
+#define CS_DSP_ACKED_CTL_TIMEOUT_MS 100
+#define CS_DSP_ACKED_CTL_N_QUICKPOLLS 10
+#define CS_DSP_ACKED_CTL_MIN_VALUE 0
+#define CS_DSP_ACKED_CTL_MAX_VALUE 0xFFFFFF
+
+/*
+ * Write sequence operation codes
+ */
+#define CS_DSP_WSEQ_FULL 0x00
+#define CS_DSP_WSEQ_ADDR8 0x02
+#define CS_DSP_WSEQ_L16 0x04
+#define CS_DSP_WSEQ_H16 0x05
+#define CS_DSP_WSEQ_UNLOCK 0xFD
+#define CS_DSP_WSEQ_END 0xFF
+
+/**
+ * struct cs_dsp_region - Describes a logical memory region in DSP address space
+ * @type: Memory region type
+ * @base: Address of region
+ */
+struct cs_dsp_region {
+ int type;
+ unsigned int base;
+};
+
+/**
+ * struct cs_dsp_alg_region - Describes a logical algorithm region in DSP address space
+ * @alg: Algorithm id
+ * @ver: Expected algorithm version
+ * @type: Memory region type
+ * @base: Address of region
+ */
+struct cs_dsp_alg_region {
+ unsigned int alg;
+ unsigned int ver;
+ int type;
+ unsigned int base;
+};
+
+/**
+ * struct cs_dsp_coeff_ctl - Describes a coefficient control
+ * @list: List node for internal use
+ * @dsp: DSP instance associated with this control
+ * @cache: Cached value of the control
+ * @fw_name: Name of the firmware
+ * @subname: Name of the control parsed from the WMFW
+ * @subname_len: Length of subname
+ * @offset: Offset of control within alg_region in words
+ * @len: Length of the cached value in bytes
+ * @type: One of the WMFW_CTL_TYPE_ control types defined in wmfw.h
+ * @flags: Bitfield of WMFW_CTL_FLAG_ control flags defined in wmfw.h
+ * @set: Flag indicating the value has been written by the user
+ * @enabled: Flag indicating whether control is enabled
+ * @alg_region: Logical region associated with this control
+ * @priv: For use by the client
+ */
+struct cs_dsp_coeff_ctl {
+ struct list_head list;
+ struct cs_dsp *dsp;
+ void *cache;
+ const char *fw_name;
+ /* Subname is needed to match with firmware */
+ const char *subname;
+ unsigned int subname_len;
+ unsigned int offset;
+ unsigned int len;
+ unsigned int type;
+ unsigned int flags;
+ unsigned int set:1;
+ unsigned int enabled:1;
+ struct cs_dsp_alg_region alg_region;
+
+ void *priv;
+};
+
+struct cs_dsp_ops;
+struct cs_dsp_client_ops;
+
+/**
+ * struct cs_dsp - Configuration and state of a Cirrus Logic DSP
+ * @name: The name of the DSP instance
+ * @rev: Revision of the DSP
+ * @num: DSP instance number
+ * @type: Type of DSP
+ * @dev: Driver model representation of the device
+ * @regmap: Register map of the device
+ * @ops: Function pointers for internal callbacks
+ * @client_ops: Function pointers for client callbacks
+ * @base: Address of the DSP registers
+ * @base_sysinfo: Address of the sysinfo register (Halo only)
+ * @sysclk_reg: Address of the sysclk register (ADSP1 only)
+ * @sysclk_mask: Mask of frequency bits within sysclk register (ADSP1 only)
+ * @sysclk_shift: Shift of frequency bits within sysclk register (ADSP1 only)
+ * @alg_regions: List of currently loaded algorithm regions
+ * @fw_name: Name of the current firmware
+ * @fw_id: ID of the current firmware, obtained from the wmfw
+ * @fw_id_version: Version of the firmware, obtained from the wmfw
+ * @fw_vendor_id: Vendor of the firmware, obtained from the wmfw
+ * @mem: DSP memory region descriptions
+ * @num_mems: Number of memory regions in this DSP
+ * @fw_ver: Version of the wmfw file format
+ * @booted: Flag indicating DSP has been configured
+ * @running: Flag indicating DSP is executing firmware
+ * @ctl_list: Controls defined within the loaded DSP firmware
+ * @lock_regions: Enable MPU traps on specified memory regions
+ * @pwr_lock: Lock used to serialize accesses
+ * @debugfs_root: Debugfs directory for this DSP instance
+ * @wmfw_file_name: Filename of the currently loaded firmware
+ * @bin_file_name: Filename of the currently loaded coefficients
+ */
+struct cs_dsp {
+ const char *name;
+ int rev;
+ int num;
+ int type;
+ struct device *dev;
+ struct regmap *regmap;
+
+ const struct cs_dsp_ops *ops;
+ const struct cs_dsp_client_ops *client_ops;
+
+ unsigned int base;
+ unsigned int base_sysinfo;
+ unsigned int sysclk_reg;
+ unsigned int sysclk_mask;
+ unsigned int sysclk_shift;
+ bool no_core_startstop;
+
+ struct list_head alg_regions;
+
+ const char *fw_name;
+ unsigned int fw_id;
+ unsigned int fw_id_version;
+ unsigned int fw_vendor_id;
+
+ const struct cs_dsp_region *mem;
+ int num_mems;
+
+ int wmfw_ver;
+
+ bool booted;
+ bool running;
+
+ struct list_head ctl_list;
+
+ struct mutex pwr_lock;
+
+ unsigned int lock_regions;
+
+#ifdef CONFIG_DEBUG_FS
+ struct dentry *debugfs_root;
+ const char *wmfw_file_name;
+ const char *bin_file_name;
+#endif
+};
+
+/**
+ * struct cs_dsp_client_ops - client callbacks
+ * @control_add: Called under the pwr_lock when a control is created
+ * @control_remove: Called under the pwr_lock when a control is destroyed
+ * @pre_run: Called under the pwr_lock by cs_dsp_run() before the core is started
+ * @post_run: Called under the pwr_lock by cs_dsp_run() after the core is started
+ * @pre_stop: Called under the pwr_lock by cs_dsp_stop() before the core is stopped
+ * @post_stop: Called under the pwr_lock by cs_dsp_stop() after the core is stopped
+ * @watchdog_expired: Called when a watchdog expiry is detected
+ *
+ * These callbacks give the cs_dsp client an opportunity to respond to events
+ * or to perform actions atomically.
+ */
+struct cs_dsp_client_ops {
+ int (*control_add)(struct cs_dsp_coeff_ctl *ctl);
+ void (*control_remove)(struct cs_dsp_coeff_ctl *ctl);
+ int (*pre_run)(struct cs_dsp *dsp);
+ int (*post_run)(struct cs_dsp *dsp);
+ void (*pre_stop)(struct cs_dsp *dsp);
+ void (*post_stop)(struct cs_dsp *dsp);
+ void (*watchdog_expired)(struct cs_dsp *dsp);
+};
+
+int cs_dsp_adsp1_init(struct cs_dsp *dsp);
+int cs_dsp_adsp2_init(struct cs_dsp *dsp);
+int cs_dsp_halo_init(struct cs_dsp *dsp);
+
+int cs_dsp_adsp1_power_up(struct cs_dsp *dsp,
+ const struct firmware *wmfw_firmware, const char *wmfw_filename,
+ const struct firmware *coeff_firmware, const char *coeff_filename,
+ const char *fw_name);
+void cs_dsp_adsp1_power_down(struct cs_dsp *dsp);
+int cs_dsp_power_up(struct cs_dsp *dsp,
+ const struct firmware *wmfw_firmware, const char *wmfw_filename,
+ const struct firmware *coeff_firmware, const char *coeff_filename,
+ const char *fw_name);
+void cs_dsp_power_down(struct cs_dsp *dsp);
+int cs_dsp_run(struct cs_dsp *dsp);
+void cs_dsp_stop(struct cs_dsp *dsp);
+
+void cs_dsp_remove(struct cs_dsp *dsp);
+
+int cs_dsp_set_dspclk(struct cs_dsp *dsp, unsigned int freq);
+void cs_dsp_adsp2_bus_error(struct cs_dsp *dsp);
+void cs_dsp_halo_bus_error(struct cs_dsp *dsp);
+void cs_dsp_halo_wdt_expire(struct cs_dsp *dsp);
+
+void cs_dsp_init_debugfs(struct cs_dsp *dsp, struct dentry *debugfs_root);
+void cs_dsp_cleanup_debugfs(struct cs_dsp *dsp);
+
+int cs_dsp_coeff_write_acked_control(struct cs_dsp_coeff_ctl *ctl, unsigned int event_id);
+int cs_dsp_coeff_write_ctrl(struct cs_dsp_coeff_ctl *ctl, unsigned int off,
+ const void *buf, size_t len);
+int cs_dsp_coeff_lock_and_write_ctrl(struct cs_dsp_coeff_ctl *ctl, unsigned int off,
+ const void *buf, size_t len);
+int cs_dsp_coeff_read_ctrl(struct cs_dsp_coeff_ctl *ctl, unsigned int off,
+ void *buf, size_t len);
+int cs_dsp_coeff_lock_and_read_ctrl(struct cs_dsp_coeff_ctl *ctl, unsigned int off,
+ void *buf, size_t len);
+struct cs_dsp_coeff_ctl *cs_dsp_get_ctl(struct cs_dsp *dsp, const char *name, int type,
+ unsigned int alg);
+
+int cs_dsp_read_raw_data_block(struct cs_dsp *dsp, int mem_type, unsigned int mem_addr,
+ unsigned int num_words, __be32 *data);
+int cs_dsp_read_data_word(struct cs_dsp *dsp, int mem_type, unsigned int mem_addr, u32 *data);
+int cs_dsp_write_data_word(struct cs_dsp *dsp, int mem_type, unsigned int mem_addr, u32 data);
+void cs_dsp_remove_padding(u32 *buf, int nwords);
+
+struct cs_dsp_alg_region *cs_dsp_find_alg_region(struct cs_dsp *dsp,
+ int type, unsigned int id);
+
+const char *cs_dsp_mem_region_name(unsigned int type);
+
+/**
+ * struct cs_dsp_wseq - Describes a write sequence
+ * @ctl: Write sequence cs_dsp control
+ * @ops: Operations contained within
+ */
+struct cs_dsp_wseq {
+ struct cs_dsp_coeff_ctl *ctl;
+ struct list_head ops;
+};
+
+int cs_dsp_wseq_init(struct cs_dsp *dsp, struct cs_dsp_wseq *wseqs, unsigned int num_wseqs);
+int cs_dsp_wseq_write(struct cs_dsp *dsp, struct cs_dsp_wseq *wseq, u32 addr, u32 data,
+ u8 op_code, bool update);
+int cs_dsp_wseq_multi_write(struct cs_dsp *dsp, struct cs_dsp_wseq *wseq,
+ const struct reg_sequence *reg_seq, int num_regs,
+ u8 op_code, bool update);
+
+/**
+ * struct cs_dsp_chunk - Describes a buffer holding data formatted for the DSP
+ * @data: Pointer to underlying buffer memory
+ * @max: Pointer to end of the buffer memory
+ * @bytes: Number of bytes read/written into the memory chunk
+ * @cache: Temporary holding data as it is formatted
+ * @cachebits: Number of bits of data currently in cache
+ */
+struct cs_dsp_chunk {
+ u8 *data;
+ u8 *max;
+ int bytes;
+
+ u32 cache;
+ int cachebits;
+};
+
+/**
+ * cs_dsp_chunk() - Create a DSP memory chunk
+ * @data: Pointer to the buffer that will be used to store data
+ * @size: Size of the buffer in bytes
+ *
+ * Return: A cs_dsp_chunk structure
+ */
+static inline struct cs_dsp_chunk cs_dsp_chunk(void *data, int size)
+{
+ struct cs_dsp_chunk ch = {
+ .data = data,
+ .max = data + size,
+ };
+
+ return ch;
+}
+
+/**
+ * cs_dsp_chunk_end() - Check if a DSP memory chunk is full
+ * @ch: Pointer to the chunk structure
+ *
+ * Return: True if the whole buffer has been read/written
+ */
+static inline bool cs_dsp_chunk_end(struct cs_dsp_chunk *ch)
+{
+ return ch->data == ch->max;
+}
+
+/**
+ * cs_dsp_chunk_bytes() - Number of bytes written/read from a DSP memory chunk
+ * @ch: Pointer to the chunk structure
+ *
+ * Return: Number of bytes read/written to the buffer
+ */
+static inline int cs_dsp_chunk_bytes(struct cs_dsp_chunk *ch)
+{
+ return ch->bytes;
+}
+
+/**
+ * cs_dsp_chunk_valid_addr() - Check if an address is in a DSP memory chunk
+ * @ch: Pointer to the chunk structure
+ *
+ * Return: True if the given address is within the buffer
+ */
+static inline bool cs_dsp_chunk_valid_addr(struct cs_dsp_chunk *ch, void *addr)
+{
+ return (u8 *)addr >= ch->data && (u8 *)addr < ch->max;
+}
+
+int cs_dsp_chunk_write(struct cs_dsp_chunk *ch, int nbits, u32 val);
+int cs_dsp_chunk_flush(struct cs_dsp_chunk *ch);
+int cs_dsp_chunk_read(struct cs_dsp_chunk *ch, int nbits);
+
+#endif
diff --git a/include/linux/firmware/cirrus/cs_dsp_test_utils.h b/include/linux/firmware/cirrus/cs_dsp_test_utils.h
new file mode 100644
index 000000000000..1f97764fdfd7
--- /dev/null
+++ b/include/linux/firmware/cirrus/cs_dsp_test_utils.h
@@ -0,0 +1,159 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Support utilities for cs_dsp testing.
+ *
+ * Copyright (C) 2024 Cirrus Logic, Inc. and
+ * Cirrus Logic International Semiconductor Ltd.
+ */
+
+#include <linux/regmap.h>
+#include <linux/firmware/cirrus/wmfw.h>
+
+struct kunit;
+struct cs_dsp_test;
+struct cs_dsp_test_local;
+
+/**
+ * struct cs_dsp_test - base class for test utilities
+ *
+ * @test: Pointer to struct kunit instance.
+ * @dsp: Pointer to struct cs_dsp instance.
+ * @local: Private data for each test suite.
+ */
+struct cs_dsp_test {
+ struct kunit *test;
+ struct cs_dsp *dsp;
+
+ struct cs_dsp_test_local *local;
+
+ /* private: Following members are private */
+ bool saw_bus_write;
+};
+
+/**
+ * struct cs_dsp_mock_alg_def - Info for creating a mock algorithm entry.
+ *
+ * @id: Algorithm ID.
+ * @ver: Algorithm version.
+ * @xm_base_words: XM base address in DSP words.
+ * @xm_size_words: XM size in DSP words.
+ * @ym_base_words: YM base address in DSP words.
+ * @ym_size_words: YM size in DSP words.
+ * @zm_base_words: ZM base address in DSP words.
+ * @zm_size_words: ZM size in DSP words.
+ */
+struct cs_dsp_mock_alg_def {
+ unsigned int id;
+ unsigned int ver;
+ unsigned int xm_base_words;
+ unsigned int xm_size_words;
+ unsigned int ym_base_words;
+ unsigned int ym_size_words;
+ unsigned int zm_base_words;
+ unsigned int zm_size_words;
+};
+
+struct cs_dsp_mock_coeff_def {
+ const char *shortname;
+ const char *fullname;
+ const char *description;
+ u16 type;
+ u16 flags;
+ u16 mem_type;
+ unsigned int offset_dsp_words;
+ unsigned int length_bytes;
+};
+
+/**
+ * struct cs_dsp_mock_xm_header - XM header builder
+ *
+ * @test_priv: Pointer to the struct cs_dsp_test.
+ * @blob_data: Pointer to the created blob data.
+ * @blob_size_bytes: Size of the data at blob_data.
+ */
+struct cs_dsp_mock_xm_header {
+ struct cs_dsp_test *test_priv;
+ void *blob_data;
+ size_t blob_size_bytes;
+};
+
+struct cs_dsp_mock_wmfw_builder;
+struct cs_dsp_mock_bin_builder;
+
+extern const unsigned int cs_dsp_mock_adsp2_32bit_sysbase;
+extern const unsigned int cs_dsp_mock_adsp2_16bit_sysbase;
+extern const unsigned int cs_dsp_mock_halo_core_base;
+extern const unsigned int cs_dsp_mock_halo_sysinfo_base;
+
+extern const struct cs_dsp_region cs_dsp_mock_halo_dsp1_regions[];
+extern const unsigned int cs_dsp_mock_halo_dsp1_region_sizes[];
+extern const struct cs_dsp_region cs_dsp_mock_adsp2_32bit_dsp1_regions[];
+extern const unsigned int cs_dsp_mock_adsp2_32bit_dsp1_region_sizes[];
+extern const struct cs_dsp_region cs_dsp_mock_adsp2_16bit_dsp1_regions[];
+extern const unsigned int cs_dsp_mock_adsp2_16bit_dsp1_region_sizes[];
+int cs_dsp_mock_count_regions(const unsigned int *region_sizes);
+unsigned int cs_dsp_mock_size_of_region(const struct cs_dsp *dsp, int mem_type);
+unsigned int cs_dsp_mock_base_addr_for_mem(struct cs_dsp_test *priv, int mem_type);
+unsigned int cs_dsp_mock_reg_addr_inc_per_unpacked_word(struct cs_dsp_test *priv);
+unsigned int cs_dsp_mock_reg_block_length_bytes(struct cs_dsp_test *priv, int mem_type);
+unsigned int cs_dsp_mock_reg_block_length_registers(struct cs_dsp_test *priv, int mem_type);
+unsigned int cs_dsp_mock_reg_block_length_dsp_words(struct cs_dsp_test *priv, int mem_type);
+bool cs_dsp_mock_has_zm(struct cs_dsp_test *priv);
+int cs_dsp_mock_packed_to_unpacked_mem_type(int packed_mem_type);
+unsigned int cs_dsp_mock_num_dsp_words_to_num_packed_regs(unsigned int num_dsp_words);
+unsigned int cs_dsp_mock_xm_header_get_alg_base_in_words(struct cs_dsp_test *priv,
+ unsigned int alg_id,
+ int mem_type);
+unsigned int cs_dsp_mock_xm_header_get_fw_version(struct cs_dsp_mock_xm_header *header);
+void cs_dsp_mock_xm_header_drop_from_regmap_cache(struct cs_dsp_test *priv);
+int cs_dsp_mock_xm_header_write_to_regmap(struct cs_dsp_mock_xm_header *header);
+struct cs_dsp_mock_xm_header *cs_dsp_create_mock_xm_header(struct cs_dsp_test *priv,
+ const struct cs_dsp_mock_alg_def *algs,
+ size_t num_algs);
+
+int cs_dsp_mock_regmap_init(struct cs_dsp_test *priv);
+void cs_dsp_mock_regmap_drop_range(struct cs_dsp_test *priv,
+ unsigned int first_reg, unsigned int last_reg);
+void cs_dsp_mock_regmap_drop_regs(struct cs_dsp_test *priv,
+ unsigned int first_reg, size_t num_regs);
+void cs_dsp_mock_regmap_drop_bytes(struct cs_dsp_test *priv,
+ unsigned int first_reg, size_t num_bytes);
+void cs_dsp_mock_regmap_drop_system_regs(struct cs_dsp_test *priv);
+bool cs_dsp_mock_regmap_is_dirty(struct cs_dsp_test *priv, bool drop_system_regs);
+
+struct cs_dsp_mock_bin_builder *cs_dsp_mock_bin_init(struct cs_dsp_test *priv,
+ int format_version,
+ unsigned int fw_version);
+void cs_dsp_mock_bin_add_raw_block(struct cs_dsp_mock_bin_builder *builder,
+ unsigned int alg_id, unsigned int alg_ver,
+ int type, unsigned int offset,
+ const void *payload_data, size_t payload_len_bytes);
+void cs_dsp_mock_bin_add_info(struct cs_dsp_mock_bin_builder *builder,
+ const char *info);
+void cs_dsp_mock_bin_add_name(struct cs_dsp_mock_bin_builder *builder,
+ const char *name);
+void cs_dsp_mock_bin_add_patch(struct cs_dsp_mock_bin_builder *builder,
+ unsigned int alg_id, unsigned int alg_ver,
+ int mem_region, unsigned int reg_addr_offset,
+ const void *payload_data, size_t payload_len_bytes);
+struct firmware *cs_dsp_mock_bin_get_firmware(struct cs_dsp_mock_bin_builder *builder);
+
+struct cs_dsp_mock_wmfw_builder *cs_dsp_mock_wmfw_init(struct cs_dsp_test *priv,
+ int format_version);
+void cs_dsp_mock_wmfw_add_raw_block(struct cs_dsp_mock_wmfw_builder *builder,
+ int mem_region, unsigned int mem_offset_dsp_words,
+ const void *payload_data, size_t payload_len_bytes);
+void cs_dsp_mock_wmfw_add_info(struct cs_dsp_mock_wmfw_builder *builder,
+ const char *info);
+void cs_dsp_mock_wmfw_add_data_block(struct cs_dsp_mock_wmfw_builder *builder,
+ int mem_region, unsigned int mem_offset_dsp_words,
+ const void *payload_data, size_t payload_len_bytes);
+void cs_dsp_mock_wmfw_start_alg_info_block(struct cs_dsp_mock_wmfw_builder *builder,
+ unsigned int alg_id,
+ const char *name,
+ const char *description);
+void cs_dsp_mock_wmfw_add_coeff_desc(struct cs_dsp_mock_wmfw_builder *builder,
+ const struct cs_dsp_mock_coeff_def *def);
+void cs_dsp_mock_wmfw_end_alg_info_block(struct cs_dsp_mock_wmfw_builder *builder);
+struct firmware *cs_dsp_mock_wmfw_get_firmware(struct cs_dsp_mock_wmfw_builder *builder);
+int cs_dsp_mock_wmfw_format_version(struct cs_dsp_mock_wmfw_builder *builder);
diff --git a/include/linux/firmware/cirrus/wmfw.h b/include/linux/firmware/cirrus/wmfw.h
new file mode 100644
index 000000000000..74e5a4f6c13a
--- /dev/null
+++ b/include/linux/firmware/cirrus/wmfw.h
@@ -0,0 +1,203 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * wmfw.h - Wolfson firmware format information
+ *
+ * Copyright 2012 Wolfson Microelectronics plc
+ *
+ * Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
+ */
+
+#ifndef __WMFW_H
+#define __WMFW_H
+
+#include <linux/types.h>
+
+#define WMFW_MAX_ALG_NAME 256
+#define WMFW_MAX_ALG_DESCR_NAME 256
+
+#define WMFW_MAX_COEFF_NAME 256
+#define WMFW_MAX_COEFF_DESCR_NAME 256
+
+#define WMFW_CTL_FLAG_SYS 0x8000
+#define WMFW_CTL_FLAG_VOLATILE 0x0004
+#define WMFW_CTL_FLAG_WRITEABLE 0x0002
+#define WMFW_CTL_FLAG_READABLE 0x0001
+
+#define WMFW_CTL_TYPE_BYTES 0x0004 /* byte control */
+
+/* Non-ALSA coefficient types start at 0x1000 */
+#define WMFW_CTL_TYPE_ACKED 0x1000 /* acked control */
+#define WMFW_CTL_TYPE_HOSTEVENT 0x1001 /* event control */
+#define WMFW_CTL_TYPE_HOST_BUFFER 0x1002 /* host buffer pointer */
+#define WMFW_CTL_TYPE_FWEVENT 0x1004 /* firmware event control */
+
+struct wmfw_header {
+ char magic[4];
+ __le32 len;
+ __le16 rev;
+ u8 core;
+ u8 ver;
+} __packed;
+
+struct wmfw_footer {
+ __le64 timestamp;
+ __le32 checksum;
+} __packed;
+
+struct wmfw_adsp1_sizes {
+ __le32 dm;
+ __le32 pm;
+ __le32 zm;
+} __packed;
+
+struct wmfw_adsp2_sizes {
+ __le32 xm;
+ __le32 ym;
+ __le32 pm;
+ __le32 zm;
+} __packed;
+
+struct wmfw_region {
+ union {
+ __be32 type;
+ __le32 offset;
+ };
+ __le32 len;
+ u8 data[];
+} __packed;
+
+struct wmfw_id_hdr {
+ __be32 core_id;
+ __be32 core_rev;
+ __be32 id;
+ __be32 ver;
+} __packed;
+
+struct wmfw_v3_id_hdr {
+ __be32 core_id;
+ __be32 block_rev;
+ __be32 vendor_id;
+ __be32 id;
+ __be32 ver;
+} __packed;
+
+struct wmfw_adsp1_id_hdr {
+ struct wmfw_id_hdr fw;
+ __be32 zm;
+ __be32 dm;
+ __be32 n_algs;
+} __packed;
+
+struct wmfw_adsp2_id_hdr {
+ struct wmfw_id_hdr fw;
+ __be32 zm;
+ __be32 xm;
+ __be32 ym;
+ __be32 n_algs;
+} __packed;
+
+struct wmfw_halo_id_hdr {
+ struct wmfw_v3_id_hdr fw;
+ __be32 xm_base;
+ __be32 xm_size;
+ __be32 ym_base;
+ __be32 ym_size;
+ __be32 n_algs;
+} __packed;
+
+struct wmfw_alg_hdr {
+ __be32 id;
+ __be32 ver;
+} __packed;
+
+struct wmfw_adsp1_alg_hdr {
+ struct wmfw_alg_hdr alg;
+ __be32 zm;
+ __be32 dm;
+} __packed;
+
+struct wmfw_adsp2_alg_hdr {
+ struct wmfw_alg_hdr alg;
+ __be32 zm;
+ __be32 xm;
+ __be32 ym;
+} __packed;
+
+struct wmfw_halo_alg_hdr {
+ struct wmfw_alg_hdr alg;
+ __be32 xm_base;
+ __be32 xm_size;
+ __be32 ym_base;
+ __be32 ym_size;
+} __packed;
+
+struct wmfw_adsp_alg_data {
+ __le32 id;
+ u8 name[WMFW_MAX_ALG_NAME];
+ u8 descr[WMFW_MAX_ALG_DESCR_NAME];
+ __le32 ncoeff;
+ u8 data[];
+} __packed;
+
+struct wmfw_adsp_coeff_data {
+ struct {
+ __le16 offset;
+ __le16 type;
+ __le32 size;
+ } hdr;
+ u8 name[WMFW_MAX_COEFF_NAME];
+ u8 descr[WMFW_MAX_COEFF_DESCR_NAME];
+ __le16 ctl_type;
+ __le16 flags;
+ __le32 len;
+ u8 data[];
+} __packed;
+
+struct wmfw_coeff_hdr {
+ u8 magic[4];
+ __le32 len;
+ union {
+ __be32 rev;
+ __le32 ver;
+ };
+ union {
+ __be32 core;
+ __le32 core_ver;
+ };
+ u8 data[];
+} __packed;
+
+struct wmfw_coeff_item {
+ __le16 offset;
+ __le16 type;
+ __le32 id;
+ __le32 ver;
+ __le32 sr;
+ __le32 len;
+ u8 data[];
+} __packed;
+
+#define WMFW_ADSP1 1
+#define WMFW_ADSP2 2
+#define WMFW_HALO 4
+
+#define WMFW_ABSOLUTE 0xf0
+#define WMFW_ALGORITHM_DATA 0xf2
+#define WMFW_METADATA 0xfc
+#define WMFW_NAME_TEXT 0xfe
+#define WMFW_INFO_TEXT 0xff
+
+#define WMFW_ADSP1_PM 2
+#define WMFW_ADSP1_DM 3
+#define WMFW_ADSP1_ZM 4
+
+#define WMFW_ADSP2_PM 2
+#define WMFW_ADSP2_ZM 4
+#define WMFW_ADSP2_XM 5
+#define WMFW_ADSP2_YM 6
+
+#define WMFW_HALO_PM_PACKED 0x10
+#define WMFW_HALO_XM_PACKED 0x11
+#define WMFW_HALO_YM_PACKED 0x12
+
+#endif
diff --git a/include/linux/firmware/imx/dsp.h b/include/linux/firmware/imx/dsp.h
new file mode 100644
index 000000000000..1f176a2683fe
--- /dev/null
+++ b/include/linux/firmware/imx/dsp.h
@@ -0,0 +1,71 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright 2019 NXP
+ *
+ * Header file for the DSP IPC implementation
+ */
+
+#ifndef _IMX_DSP_IPC_H
+#define _IMX_DSP_IPC_H
+
+#include <linux/device.h>
+#include <linux/types.h>
+#include <linux/mailbox_client.h>
+
+#define DSP_MU_CHAN_NUM 4
+
+struct imx_dsp_chan {
+ struct imx_dsp_ipc *ipc;
+ struct mbox_client cl;
+ struct mbox_chan *ch;
+ char *name;
+ int idx;
+};
+
+struct imx_dsp_ops {
+ void (*handle_reply)(struct imx_dsp_ipc *ipc);
+ void (*handle_request)(struct imx_dsp_ipc *ipc);
+};
+
+struct imx_dsp_ipc {
+ /* Host <-> DSP communication uses 2 txdb and 2 rxdb channels */
+ struct imx_dsp_chan chans[DSP_MU_CHAN_NUM];
+ struct device *dev;
+ struct imx_dsp_ops *ops;
+ void *private_data;
+};
+
+static inline void imx_dsp_set_data(struct imx_dsp_ipc *ipc, void *data)
+{
+ ipc->private_data = data;
+}
+
+static inline void *imx_dsp_get_data(struct imx_dsp_ipc *ipc)
+{
+ return ipc->private_data;
+}
+
+#if IS_ENABLED(CONFIG_IMX_DSP)
+
+int imx_dsp_ring_doorbell(struct imx_dsp_ipc *dsp, unsigned int chan_idx);
+
+struct mbox_chan *imx_dsp_request_channel(struct imx_dsp_ipc *ipc, int idx);
+void imx_dsp_free_channel(struct imx_dsp_ipc *ipc, int idx);
+
+#else
+
+static inline int imx_dsp_ring_doorbell(struct imx_dsp_ipc *ipc,
+ unsigned int chan_idx)
+{
+ return -ENOTSUPP;
+}
+
+struct mbox_chan *imx_dsp_request_channel(struct imx_dsp_ipc *ipc, int idx)
+{
+ return ERR_PTR(-EOPNOTSUPP);
+}
+
+void imx_dsp_free_channel(struct imx_dsp_ipc *ipc, int idx) { }
+
+#endif
+#endif /* _IMX_DSP_IPC_H */
diff --git a/include/linux/firmware/imx/ipc.h b/include/linux/firmware/imx/ipc.h
new file mode 100644
index 000000000000..0b4643571625
--- /dev/null
+++ b/include/linux/firmware/imx/ipc.h
@@ -0,0 +1,71 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright 2018 NXP
+ *
+ * Header file for the IPC implementation.
+ */
+
+#ifndef _SC_IPC_H
+#define _SC_IPC_H
+
+#include <linux/device.h>
+#include <linux/types.h>
+
+#define IMX_SC_RPC_VERSION 1
+#define IMX_SC_RPC_MAX_MSG 8
+
+struct imx_sc_ipc;
+
+enum imx_sc_rpc_svc {
+ IMX_SC_RPC_SVC_UNKNOWN = 0,
+ IMX_SC_RPC_SVC_RETURN = 1,
+ IMX_SC_RPC_SVC_PM = 2,
+ IMX_SC_RPC_SVC_RM = 3,
+ IMX_SC_RPC_SVC_TIMER = 5,
+ IMX_SC_RPC_SVC_PAD = 6,
+ IMX_SC_RPC_SVC_MISC = 7,
+ IMX_SC_RPC_SVC_IRQ = 8,
+};
+
+struct imx_sc_rpc_msg {
+ uint8_t ver;
+ uint8_t size;
+ uint8_t svc;
+ uint8_t func;
+};
+
+#ifdef CONFIG_IMX_SCU
+/*
+ * This is an function to send an RPC message over an IPC channel.
+ * It is called by client-side SCFW API function shims.
+ *
+ * @param[in] ipc IPC handle
+ * @param[in,out] msg handle to a message
+ * @param[in] have_resp response flag
+ *
+ * If have_resp is true then this function waits for a response
+ * and returns the result in msg.
+ */
+int imx_scu_call_rpc(struct imx_sc_ipc *ipc, void *msg, bool have_resp);
+
+/*
+ * This function gets the default ipc handle used by SCU
+ *
+ * @param[out] ipc sc ipc handle
+ *
+ * @return Returns an error code (0 = success, failed if < 0)
+ */
+int imx_scu_get_handle(struct imx_sc_ipc **ipc);
+#else
+static inline int imx_scu_call_rpc(struct imx_sc_ipc *ipc, void *msg,
+ bool have_resp)
+{
+ return -ENOTSUPP;
+}
+
+static inline int imx_scu_get_handle(struct imx_sc_ipc **ipc)
+{
+ return -ENOTSUPP;
+}
+#endif
+#endif /* _SC_IPC_H */
diff --git a/include/linux/firmware/imx/s4.h b/include/linux/firmware/imx/s4.h
new file mode 100644
index 000000000000..9e34923ae1d6
--- /dev/null
+++ b/include/linux/firmware/imx/s4.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright 2021 NXP
+ *
+ * Header file for the IPC implementation.
+ */
+
+#ifndef _S4_IPC_H
+#define _S4_IPC_H
+
+struct imx_s4_ipc;
+
+struct imx_s4_rpc_msg {
+ uint8_t ver;
+ uint8_t size;
+ uint8_t cmd;
+ uint8_t tag;
+} __packed;
+
+#endif /* _S4_IPC_H */
diff --git a/include/linux/firmware/imx/sci.h b/include/linux/firmware/imx/sci.h
new file mode 100644
index 000000000000..df17196df5ff
--- /dev/null
+++ b/include/linux/firmware/imx/sci.h
@@ -0,0 +1,57 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright (C) 2016 Freescale Semiconductor, Inc.
+ * Copyright 2017~2018 NXP
+ *
+ * Header file containing the public System Controller Interface (SCI)
+ * definitions.
+ */
+
+#ifndef _SC_SCI_H
+#define _SC_SCI_H
+
+#include <linux/firmware/imx/ipc.h>
+
+#include <linux/firmware/imx/svc/misc.h>
+#include <linux/firmware/imx/svc/pm.h>
+#include <linux/firmware/imx/svc/rm.h>
+
+#if IS_ENABLED(CONFIG_IMX_SCU)
+int imx_scu_enable_general_irq_channel(struct device *dev);
+int imx_scu_irq_register_notifier(struct notifier_block *nb);
+int imx_scu_irq_unregister_notifier(struct notifier_block *nb);
+int imx_scu_irq_group_enable(u8 group, u32 mask, u8 enable);
+int imx_scu_irq_get_status(u8 group, u32 *irq_status);
+int imx_scu_soc_init(struct device *dev);
+#else
+static inline int imx_scu_soc_init(struct device *dev)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int imx_scu_enable_general_irq_channel(struct device *dev)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int imx_scu_irq_register_notifier(struct notifier_block *nb)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int imx_scu_irq_unregister_notifier(struct notifier_block *nb)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int imx_scu_irq_group_enable(u8 group, u32 mask, u8 enable)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int imx_scu_irq_get_status(u8 group, u32 *irq_status)
+{
+ return -EOPNOTSUPP;
+}
+#endif
+#endif /* _SC_SCI_H */
diff --git a/include/linux/firmware/imx/sm.h b/include/linux/firmware/imx/sm.h
new file mode 100644
index 000000000000..a33b45027356
--- /dev/null
+++ b/include/linux/firmware/imx/sm.h
@@ -0,0 +1,97 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright 2024 NXP
+ */
+
+#ifndef _SCMI_IMX_H
+#define _SCMI_IMX_H
+
+#include <linux/bitfield.h>
+#include <linux/errno.h>
+#include <linux/scmi_imx_protocol.h>
+#include <linux/types.h>
+
+#define SCMI_IMX95_CTRL_PDM_CLK_SEL 0 /* AON PDM clock sel */
+#define SCMI_IMX95_CTRL_MQS1_SETTINGS 1 /* AON MQS settings */
+#define SCMI_IMX95_CTRL_SAI1_MCLK 2 /* AON SAI1 MCLK */
+#define SCMI_IMX95_CTRL_SAI3_MCLK 3 /* WAKE SAI3 MCLK */
+#define SCMI_IMX95_CTRL_SAI4_MCLK 4 /* WAKE SAI4 MCLK */
+#define SCMI_IMX95_CTRL_SAI5_MCLK 5 /* WAKE SAI5 MCLK */
+
+#define SCMI_IMX94_CTRL_PDM_CLK_SEL 0U /*!< AON PDM clock sel */
+#define SCMI_IMX94_CTRL_MQS1_SETTINGS 1U /*!< AON MQS settings */
+#define SCMI_IMX94_CTRL_MQS2_SETTINGS 2U /*!< WAKE MQS settings */
+#define SCMI_IMX94_CTRL_SAI1_MCLK 3U /*!< AON SAI1 MCLK */
+#define SCMI_IMX94_CTRL_SAI2_MCLK 4U /*!< WAKE SAI2 MCLK */
+#define SCMI_IMX94_CTRL_SAI3_MCLK 5U /*!< WAKE SAI3 MCLK */
+#define SCMI_IMX94_CTRL_SAI4_MCLK 6U /*!< WAKE SAI4 MCLK */
+
+#if IS_ENABLED(CONFIG_IMX_SCMI_MISC_DRV)
+int scmi_imx_misc_ctrl_get(u32 id, u32 *num, u32 *val);
+int scmi_imx_misc_ctrl_set(u32 id, u32 val);
+#else
+static inline int scmi_imx_misc_ctrl_get(u32 id, u32 *num, u32 *val)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int scmi_imx_misc_ctrl_set(u32 id, u32 val)
+{
+ return -EOPNOTSUPP;
+}
+#endif
+
+#if IS_ENABLED(CONFIG_IMX_SCMI_CPU_DRV)
+int scmi_imx_cpu_start(u32 cpuid, bool start);
+int scmi_imx_cpu_started(u32 cpuid, bool *started);
+int scmi_imx_cpu_reset_vector_set(u32 cpuid, u64 vector, bool start, bool boot,
+ bool resume);
+#else
+static inline int scmi_imx_cpu_start(u32 cpuid, bool start)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int scmi_imx_cpu_started(u32 cpuid, bool *started)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int scmi_imx_cpu_reset_vector_set(u32 cpuid, u64 vector, bool start,
+ bool boot, bool resume)
+{
+ return -EOPNOTSUPP;
+}
+#endif
+
+enum scmi_imx_lmm_op {
+ SCMI_IMX_LMM_BOOT,
+ SCMI_IMX_LMM_POWER_ON,
+ SCMI_IMX_LMM_SHUTDOWN,
+};
+
+/* For shutdown pperation */
+#define SCMI_IMX_LMM_OP_FORCEFUL 0
+#define SCMI_IMX_LMM_OP_GRACEFUL BIT(0)
+
+#if IS_ENABLED(CONFIG_IMX_SCMI_LMM_DRV)
+int scmi_imx_lmm_operation(u32 lmid, enum scmi_imx_lmm_op op, u32 flags);
+int scmi_imx_lmm_info(u32 lmid, struct scmi_imx_lmm_info *info);
+int scmi_imx_lmm_reset_vector_set(u32 lmid, u32 cpuid, u32 flags, u64 vector);
+#else
+static inline int scmi_imx_lmm_operation(u32 lmid, enum scmi_imx_lmm_op op, u32 flags)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int scmi_imx_lmm_info(u32 lmid, struct scmi_imx_lmm_info *info)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int scmi_imx_lmm_reset_vector_set(u32 lmid, u32 cpuid, u32 flags, u64 vector)
+{
+ return -EOPNOTSUPP;
+}
+#endif
+#endif
diff --git a/include/linux/firmware/imx/svc/misc.h b/include/linux/firmware/imx/svc/misc.h
new file mode 100644
index 000000000000..760db08a67fc
--- /dev/null
+++ b/include/linux/firmware/imx/svc/misc.h
@@ -0,0 +1,77 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright (C) 2016 Freescale Semiconductor, Inc.
+ * Copyright 2017~2018 NXP
+ *
+ * Header file containing the public API for the System Controller (SC)
+ * Miscellaneous (MISC) function.
+ *
+ * MISC_SVC (SVC) Miscellaneous Service
+ *
+ * Module for the Miscellaneous (MISC) service.
+ */
+
+#ifndef _SC_MISC_API_H
+#define _SC_MISC_API_H
+
+#include <linux/firmware/imx/sci.h>
+
+/*
+ * This type is used to indicate RPC MISC function calls.
+ */
+enum imx_misc_func {
+ IMX_SC_MISC_FUNC_UNKNOWN = 0,
+ IMX_SC_MISC_FUNC_SET_CONTROL = 1,
+ IMX_SC_MISC_FUNC_GET_CONTROL = 2,
+ IMX_SC_MISC_FUNC_SET_MAX_DMA_GROUP = 4,
+ IMX_SC_MISC_FUNC_SET_DMA_GROUP = 5,
+ IMX_SC_MISC_FUNC_SECO_IMAGE_LOAD = 8,
+ IMX_SC_MISC_FUNC_SECO_AUTHENTICATE = 9,
+ IMX_SC_MISC_FUNC_DEBUG_OUT = 10,
+ IMX_SC_MISC_FUNC_WAVEFORM_CAPTURE = 6,
+ IMX_SC_MISC_FUNC_BUILD_INFO = 15,
+ IMX_SC_MISC_FUNC_UNIQUE_ID = 19,
+ IMX_SC_MISC_FUNC_SET_ARI = 3,
+ IMX_SC_MISC_FUNC_BOOT_STATUS = 7,
+ IMX_SC_MISC_FUNC_BOOT_DONE = 14,
+ IMX_SC_MISC_FUNC_OTP_FUSE_READ = 11,
+ IMX_SC_MISC_FUNC_OTP_FUSE_WRITE = 17,
+ IMX_SC_MISC_FUNC_SET_TEMP = 12,
+ IMX_SC_MISC_FUNC_GET_TEMP = 13,
+ IMX_SC_MISC_FUNC_GET_BOOT_DEV = 16,
+ IMX_SC_MISC_FUNC_GET_BUTTON_STATUS = 18,
+};
+
+/*
+ * Control Functions
+ */
+
+#ifdef CONFIG_IMX_SCU
+int imx_sc_misc_set_control(struct imx_sc_ipc *ipc, u32 resource,
+ u8 ctrl, u32 val);
+
+int imx_sc_misc_get_control(struct imx_sc_ipc *ipc, u32 resource,
+ u8 ctrl, u32 *val);
+
+int imx_sc_pm_cpu_start(struct imx_sc_ipc *ipc, u32 resource,
+ bool enable, u64 phys_addr);
+#else
+static inline int imx_sc_misc_set_control(struct imx_sc_ipc *ipc,
+ u32 resource, u8 ctrl, u32 val)
+{
+ return -ENOTSUPP;
+}
+
+static inline int imx_sc_misc_get_control(struct imx_sc_ipc *ipc,
+ u32 resource, u8 ctrl, u32 *val)
+{
+ return -ENOTSUPP;
+}
+
+static inline int imx_sc_pm_cpu_start(struct imx_sc_ipc *ipc, u32 resource,
+ bool enable, u64 phys_addr)
+{
+ return -ENOTSUPP;
+}
+#endif
+#endif /* _SC_MISC_API_H */
diff --git a/include/linux/firmware/imx/svc/pm.h b/include/linux/firmware/imx/svc/pm.h
new file mode 100644
index 000000000000..1f6975dd37b0
--- /dev/null
+++ b/include/linux/firmware/imx/svc/pm.h
@@ -0,0 +1,85 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright (C) 2016 Freescale Semiconductor, Inc.
+ * Copyright 2017-2018 NXP
+ *
+ * Header file containing the public API for the System Controller (SC)
+ * Power Management (PM) function. This includes functions for power state
+ * control, clock control, reset control, and wake-up event control.
+ *
+ * PM_SVC (SVC) Power Management Service
+ *
+ * Module for the Power Management (PM) service.
+ */
+
+#ifndef _SC_PM_API_H
+#define _SC_PM_API_H
+
+#include <linux/firmware/imx/sci.h>
+
+/*
+ * This type is used to indicate RPC PM function calls.
+ */
+enum imx_sc_pm_func {
+ IMX_SC_PM_FUNC_UNKNOWN = 0,
+ IMX_SC_PM_FUNC_SET_SYS_POWER_MODE = 19,
+ IMX_SC_PM_FUNC_SET_PARTITION_POWER_MODE = 1,
+ IMX_SC_PM_FUNC_GET_SYS_POWER_MODE = 2,
+ IMX_SC_PM_FUNC_SET_RESOURCE_POWER_MODE = 3,
+ IMX_SC_PM_FUNC_GET_RESOURCE_POWER_MODE = 4,
+ IMX_SC_PM_FUNC_REQ_LOW_POWER_MODE = 16,
+ IMX_SC_PM_FUNC_SET_CPU_RESUME_ADDR = 17,
+ IMX_SC_PM_FUNC_REQ_SYS_IF_POWER_MODE = 18,
+ IMX_SC_PM_FUNC_SET_CLOCK_RATE = 5,
+ IMX_SC_PM_FUNC_GET_CLOCK_RATE = 6,
+ IMX_SC_PM_FUNC_CLOCK_ENABLE = 7,
+ IMX_SC_PM_FUNC_SET_CLOCK_PARENT = 14,
+ IMX_SC_PM_FUNC_GET_CLOCK_PARENT = 15,
+ IMX_SC_PM_FUNC_RESET = 13,
+ IMX_SC_PM_FUNC_RESET_REASON = 10,
+ IMX_SC_PM_FUNC_BOOT = 8,
+ IMX_SC_PM_FUNC_REBOOT = 9,
+ IMX_SC_PM_FUNC_REBOOT_PARTITION = 12,
+ IMX_SC_PM_FUNC_CPU_START = 11,
+};
+
+/*
+ * Defines for ALL parameters
+ */
+#define IMX_SC_PM_CLK_ALL UINT8_MAX /* All clocks */
+
+/*
+ * Defines for SC PM Power Mode
+ */
+#define IMX_SC_PM_PW_MODE_OFF 0 /* Power off */
+#define IMX_SC_PM_PW_MODE_STBY 1 /* Power in standby */
+#define IMX_SC_PM_PW_MODE_LP 2 /* Power in low-power */
+#define IMX_SC_PM_PW_MODE_ON 3 /* Power on */
+
+/*
+ * Defines for SC PM CLK
+ */
+#define IMX_SC_PM_CLK_SLV_BUS 0 /* Slave bus clock */
+#define IMX_SC_PM_CLK_MST_BUS 1 /* Master bus clock */
+#define IMX_SC_PM_CLK_PER 2 /* Peripheral clock */
+#define IMX_SC_PM_CLK_PHY 3 /* Phy clock */
+#define IMX_SC_PM_CLK_MISC 4 /* Misc clock */
+#define IMX_SC_PM_CLK_MISC0 0 /* Misc 0 clock */
+#define IMX_SC_PM_CLK_MISC1 1 /* Misc 1 clock */
+#define IMX_SC_PM_CLK_MISC2 2 /* Misc 2 clock */
+#define IMX_SC_PM_CLK_MISC3 3 /* Misc 3 clock */
+#define IMX_SC_PM_CLK_MISC4 4 /* Misc 4 clock */
+#define IMX_SC_PM_CLK_CPU 2 /* CPU clock */
+#define IMX_SC_PM_CLK_PLL 4 /* PLL */
+#define IMX_SC_PM_CLK_BYPASS 4 /* Bypass clock */
+
+/*
+ * Defines for SC PM CLK Parent
+ */
+#define IMX_SC_PM_PARENT_XTAL 0 /* Parent is XTAL. */
+#define IMX_SC_PM_PARENT_PLL0 1 /* Parent is PLL0 */
+#define IMX_SC_PM_PARENT_PLL1 2 /* Parent is PLL1 or PLL0/2 */
+#define IMX_SC_PM_PARENT_PLL2 3 /* Parent in PLL2 or PLL0/4 */
+#define IMX_SC_PM_PARENT_BYPS 4 /* Parent is a bypass clock. */
+
+#endif /* _SC_PM_API_H */
diff --git a/include/linux/firmware/imx/svc/rm.h b/include/linux/firmware/imx/svc/rm.h
new file mode 100644
index 000000000000..31456f897aa9
--- /dev/null
+++ b/include/linux/firmware/imx/svc/rm.h
@@ -0,0 +1,74 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright (C) 2016 Freescale Semiconductor, Inc.
+ * Copyright 2017-2020 NXP
+ *
+ * Header file containing the public API for the System Controller (SC)
+ * Resource Management (RM) function. This includes functions for
+ * partitioning resources, pads, and memory regions.
+ *
+ * RM_SVC (SVC) Resource Management Service
+ *
+ * Module for the Resource Management (RM) service.
+ */
+
+#ifndef _SC_RM_API_H
+#define _SC_RM_API_H
+
+#include <linux/firmware/imx/sci.h>
+
+/*
+ * This type is used to indicate RPC RM function calls.
+ */
+enum imx_sc_rm_func {
+ IMX_SC_RM_FUNC_UNKNOWN = 0,
+ IMX_SC_RM_FUNC_PARTITION_ALLOC = 1,
+ IMX_SC_RM_FUNC_SET_CONFIDENTIAL = 31,
+ IMX_SC_RM_FUNC_PARTITION_FREE = 2,
+ IMX_SC_RM_FUNC_GET_DID = 26,
+ IMX_SC_RM_FUNC_PARTITION_STATIC = 3,
+ IMX_SC_RM_FUNC_PARTITION_LOCK = 4,
+ IMX_SC_RM_FUNC_GET_PARTITION = 5,
+ IMX_SC_RM_FUNC_SET_PARENT = 6,
+ IMX_SC_RM_FUNC_MOVE_ALL = 7,
+ IMX_SC_RM_FUNC_ASSIGN_RESOURCE = 8,
+ IMX_SC_RM_FUNC_SET_RESOURCE_MOVABLE = 9,
+ IMX_SC_RM_FUNC_SET_SUBSYS_RSRC_MOVABLE = 28,
+ IMX_SC_RM_FUNC_SET_MASTER_ATTRIBUTES = 10,
+ IMX_SC_RM_FUNC_SET_MASTER_SID = 11,
+ IMX_SC_RM_FUNC_SET_PERIPHERAL_PERMISSIONS = 12,
+ IMX_SC_RM_FUNC_IS_RESOURCE_OWNED = 13,
+ IMX_SC_RM_FUNC_GET_RESOURCE_OWNER = 33,
+ IMX_SC_RM_FUNC_IS_RESOURCE_MASTER = 14,
+ IMX_SC_RM_FUNC_IS_RESOURCE_PERIPHERAL = 15,
+ IMX_SC_RM_FUNC_GET_RESOURCE_INFO = 16,
+ IMX_SC_RM_FUNC_MEMREG_ALLOC = 17,
+ IMX_SC_RM_FUNC_MEMREG_SPLIT = 29,
+ IMX_SC_RM_FUNC_MEMREG_FRAG = 32,
+ IMX_SC_RM_FUNC_MEMREG_FREE = 18,
+ IMX_SC_RM_FUNC_FIND_MEMREG = 30,
+ IMX_SC_RM_FUNC_ASSIGN_MEMREG = 19,
+ IMX_SC_RM_FUNC_SET_MEMREG_PERMISSIONS = 20,
+ IMX_SC_RM_FUNC_IS_MEMREG_OWNED = 21,
+ IMX_SC_RM_FUNC_GET_MEMREG_INFO = 22,
+ IMX_SC_RM_FUNC_ASSIGN_PAD = 23,
+ IMX_SC_RM_FUNC_SET_PAD_MOVABLE = 24,
+ IMX_SC_RM_FUNC_IS_PAD_OWNED = 25,
+ IMX_SC_RM_FUNC_DUMP = 27,
+};
+
+#if IS_ENABLED(CONFIG_IMX_SCU)
+bool imx_sc_rm_is_resource_owned(struct imx_sc_ipc *ipc, u16 resource);
+int imx_sc_rm_get_resource_owner(struct imx_sc_ipc *ipc, u16 resource, u8 *pt);
+#else
+static inline bool
+imx_sc_rm_is_resource_owned(struct imx_sc_ipc *ipc, u16 resource)
+{
+ return true;
+}
+static inline int imx_sc_rm_get_resource_owner(struct imx_sc_ipc *ipc, u16 resource, u8 *pt)
+{
+ return -EOPNOTSUPP;
+}
+#endif
+#endif
diff --git a/include/linux/firmware/intel/stratix10-smc.h b/include/linux/firmware/intel/stratix10-smc.h
new file mode 100644
index 000000000000..935dba3633b5
--- /dev/null
+++ b/include/linux/firmware/intel/stratix10-smc.h
@@ -0,0 +1,734 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2017-2018, Intel Corporation
+ * Copyright (C) 2025, Altera Corporation
+ */
+
+#ifndef __STRATIX10_SMC_H
+#define __STRATIX10_SMC_H
+
+#include <linux/arm-smccc.h>
+#include <linux/bitops.h>
+
+/**
+ * This file defines the Secure Monitor Call (SMC) message protocol used for
+ * service layer driver in normal world (EL1) to communicate with secure
+ * monitor software in Secure Monitor Exception Level 3 (EL3).
+ *
+ * This file is shared with secure firmware (FW) which is out of kernel tree.
+ *
+ * An ARM SMC instruction takes a function identifier and up to 6 64-bit
+ * register values as arguments, and can return up to 4 64-bit register
+ * value. The operation of the secure monitor is determined by the parameter
+ * values passed in through registers.
+ *
+ * EL1 and EL3 communicates pointer as physical address rather than the
+ * virtual address.
+ *
+ * Functions specified by ARM SMC Calling convention:
+ *
+ * FAST call executes atomic operations, returns when the requested operation
+ * has completed.
+ * STD call starts a operation which can be preempted by a non-secure
+ * interrupt. The call can return before the requested operation has
+ * completed.
+ *
+ * a0..a7 is used as register names in the descriptions below, on arm32
+ * that translates to r0..r7 and on arm64 to w0..w7.
+ */
+
+/**
+ * @func_num: function ID
+ */
+#define INTEL_SIP_SMC_STD_CALL_VAL(func_num) \
+ ARM_SMCCC_CALL_VAL(ARM_SMCCC_STD_CALL, ARM_SMCCC_SMC_64, \
+ ARM_SMCCC_OWNER_SIP, (func_num))
+
+#define INTEL_SIP_SMC_FAST_CALL_VAL(func_num) \
+ ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, ARM_SMCCC_SMC_64, \
+ ARM_SMCCC_OWNER_SIP, (func_num))
+
+#define INTEL_SIP_SMC_ASYNC_VAL(func_name) \
+ ARM_SMCCC_CALL_VAL(ARM_SMCCC_STD_CALL, ARM_SMCCC_SMC_64, \
+ ARM_SMCCC_OWNER_SIP, (func_name))
+
+/**
+ * Return values in INTEL_SIP_SMC_* call
+ *
+ * INTEL_SIP_SMC_RETURN_UNKNOWN_FUNCTION:
+ * Secure monitor software doesn't recognize the request.
+ *
+ * INTEL_SIP_SMC_STATUS_OK:
+ * Secure monitor software accepts the service client's request.
+ *
+ * INTEL_SIP_SMC_STATUS_BUSY:
+ * Secure monitor software is still processing service client's request.
+ *
+ * INTEL_SIP_SMC_STATUS_REJECTED:
+ * Secure monitor software reject the service client's request.
+ *
+ * INTEL_SIP_SMC_STATUS_ERROR:
+ * There is error during the process of service request.
+ *
+ * INTEL_SIP_SMC_RSU_ERROR:
+ * There is error during the process of remote status update request.
+ */
+#define INTEL_SIP_SMC_RETURN_UNKNOWN_FUNCTION 0xFFFFFFFF
+#define INTEL_SIP_SMC_STATUS_OK 0x0
+#define INTEL_SIP_SMC_STATUS_BUSY 0x1
+#define INTEL_SIP_SMC_STATUS_REJECTED 0x2
+#define INTEL_SIP_SMC_STATUS_ERROR 0x4
+#define INTEL_SIP_SMC_RSU_ERROR 0x7
+
+/**
+ * Request INTEL_SIP_SMC_FPGA_CONFIG_START
+ *
+ * Sync call used by service driver at EL1 to request the FPGA in EL3 to
+ * be prepare to receive a new configuration.
+ *
+ * Call register usage:
+ * a0: INTEL_SIP_SMC_FPGA_CONFIG_START.
+ * a1: flag for full or partial configuration. 0 for full and 1 for partial
+ * configuration.
+ * a2-7: not used.
+ *
+ * Return status:
+ * a0: INTEL_SIP_SMC_STATUS_OK, or INTEL_SIP_SMC_STATUS_ERROR.
+ * a1-3: not used.
+ */
+#define INTEL_SIP_SMC_FUNCID_FPGA_CONFIG_START 1
+#define INTEL_SIP_SMC_FPGA_CONFIG_START \
+ INTEL_SIP_SMC_FAST_CALL_VAL(INTEL_SIP_SMC_FUNCID_FPGA_CONFIG_START)
+
+/**
+ * Request INTEL_SIP_SMC_FPGA_CONFIG_WRITE
+ *
+ * Async call used by service driver at EL1 to provide FPGA configuration data
+ * to secure world.
+ *
+ * Call register usage:
+ * a0: INTEL_SIP_SMC_FPGA_CONFIG_WRITE.
+ * a1: 64bit physical address of the configuration data memory block
+ * a2: Size of configuration data block.
+ * a3-7: not used.
+ *
+ * Return status:
+ * a0: INTEL_SIP_SMC_STATUS_OK, INTEL_SIP_SMC_STATUS_BUSY or
+ * INTEL_SIP_SMC_STATUS_ERROR.
+ * a1: 64bit physical address of 1st completed memory block if any completed
+ * block, otherwise zero value.
+ * a2: 64bit physical address of 2nd completed memory block if any completed
+ * block, otherwise zero value.
+ * a3: 64bit physical address of 3rd completed memory block if any completed
+ * block, otherwise zero value.
+ */
+#define INTEL_SIP_SMC_FUNCID_FPGA_CONFIG_WRITE 2
+#define INTEL_SIP_SMC_FPGA_CONFIG_WRITE \
+ INTEL_SIP_SMC_STD_CALL_VAL(INTEL_SIP_SMC_FUNCID_FPGA_CONFIG_WRITE)
+
+/**
+ * Request INTEL_SIP_SMC_FPGA_CONFIG_COMPLETED_WRITE
+ *
+ * Sync call used by service driver at EL1 to track the completed write
+ * transactions. This request is called after INTEL_SIP_SMC_FPGA_CONFIG_WRITE
+ * call returns INTEL_SIP_SMC_STATUS_BUSY.
+ *
+ * Call register usage:
+ * a0: INTEL_SIP_SMC_FPGA_CONFIG_COMPLETED_WRITE.
+ * a1-7: not used.
+ *
+ * Return status:
+ * a0: INTEL_SIP_SMC_STATUS_OK, INTEL_SIP_SMC_FPGA_BUSY or
+ * INTEL_SIP_SMC_STATUS_ERROR.
+ * a1: 64bit physical address of 1st completed memory block.
+ * a2: 64bit physical address of 2nd completed memory block if
+ * any completed block, otherwise zero value.
+ * a3: 64bit physical address of 3rd completed memory block if
+ * any completed block, otherwise zero value.
+ */
+#define INTEL_SIP_SMC_FUNCID_FPGA_CONFIG_COMPLETED_WRITE 3
+#define INTEL_SIP_SMC_FPGA_CONFIG_COMPLETED_WRITE \
+INTEL_SIP_SMC_FAST_CALL_VAL(INTEL_SIP_SMC_FUNCID_FPGA_CONFIG_COMPLETED_WRITE)
+
+/**
+ * Request INTEL_SIP_SMC_FPGA_CONFIG_ISDONE
+ *
+ * Sync call used by service driver at EL1 to inform secure world that all
+ * data are sent, to check whether or not the secure world had completed
+ * the FPGA configuration process.
+ *
+ * Call register usage:
+ * a0: INTEL_SIP_SMC_FPGA_CONFIG_ISDONE.
+ * a1-7: not used.
+ *
+ * Return status:
+ * a0: INTEL_SIP_SMC_STATUS_OK, INTEL_SIP_SMC_STATUS_BUSY or
+ * INTEL_SIP_SMC_STATUS_ERROR.
+ * a1-3: not used.
+ */
+#define INTEL_SIP_SMC_FUNCID_FPGA_CONFIG_ISDONE 4
+#define INTEL_SIP_SMC_FPGA_CONFIG_ISDONE \
+ INTEL_SIP_SMC_FAST_CALL_VAL(INTEL_SIP_SMC_FUNCID_FPGA_CONFIG_ISDONE)
+
+/**
+ * Request INTEL_SIP_SMC_FPGA_CONFIG_GET_MEM
+ *
+ * Sync call used by service driver at EL1 to query the physical address of
+ * memory block reserved by secure monitor software.
+ *
+ * Call register usage:
+ * a0:INTEL_SIP_SMC_FPGA_CONFIG_GET_MEM.
+ * a1-7: not used.
+ *
+ * Return status:
+ * a0: INTEL_SIP_SMC_STATUS_OK or INTEL_SIP_SMC_STATUS_ERROR.
+ * a1: start of physical address of reserved memory block.
+ * a2: size of reserved memory block.
+ * a3: not used.
+ */
+#define INTEL_SIP_SMC_FUNCID_FPGA_CONFIG_GET_MEM 5
+#define INTEL_SIP_SMC_FPGA_CONFIG_GET_MEM \
+ INTEL_SIP_SMC_FAST_CALL_VAL(INTEL_SIP_SMC_FUNCID_FPGA_CONFIG_GET_MEM)
+
+/**
+ * Request INTEL_SIP_SMC_FPGA_CONFIG_LOOPBACK
+ *
+ * For SMC loop-back mode only, used for internal integration, debugging
+ * or troubleshooting.
+ *
+ * Call register usage:
+ * a0: INTEL_SIP_SMC_FPGA_CONFIG_LOOPBACK.
+ * a1-7: not used.
+ *
+ * Return status:
+ * a0: INTEL_SIP_SMC_STATUS_OK or INTEL_SIP_SMC_STATUS_ERROR.
+ * a1-3: not used.
+ */
+#define INTEL_SIP_SMC_FUNCID_FPGA_CONFIG_LOOPBACK 6
+#define INTEL_SIP_SMC_FPGA_CONFIG_LOOPBACK \
+ INTEL_SIP_SMC_FAST_CALL_VAL(INTEL_SIP_SMC_FUNCID_FPGA_CONFIG_LOOPBACK)
+
+/**
+ * Request INTEL_SIP_SMC_REG_READ
+ *
+ * Read a protected register at EL3
+ *
+ * Call register usage:
+ * a0: INTEL_SIP_SMC_REG_READ.
+ * a1: register address.
+ * a2-7: not used.
+ *
+ * Return status:
+ * a0: INTEL_SIP_SMC_STATUS_OK or INTEL_SIP_SMC_REG_ERROR.
+ * a1: value in the register
+ * a2-3: not used.
+ */
+#define INTEL_SIP_SMC_FUNCID_REG_READ 7
+#define INTEL_SIP_SMC_REG_READ \
+ INTEL_SIP_SMC_FAST_CALL_VAL(INTEL_SIP_SMC_FUNCID_REG_READ)
+
+/**
+ * Request INTEL_SIP_SMC_REG_WRITE
+ *
+ * Write a protected register at EL3
+ *
+ * Call register usage:
+ * a0: INTEL_SIP_SMC_REG_WRITE.
+ * a1: register address
+ * a2: value to program into register.
+ * a3-7: not used.
+ *
+ * Return status:
+ * a0: INTEL_SIP_SMC_STATUS_OK or INTEL_SIP_SMC_REG_ERROR.
+ * a1-3: not used.
+ */
+#define INTEL_SIP_SMC_FUNCID_REG_WRITE 8
+#define INTEL_SIP_SMC_REG_WRITE \
+ INTEL_SIP_SMC_FAST_CALL_VAL(INTEL_SIP_SMC_FUNCID_REG_WRITE)
+
+/**
+ * Request INTEL_SIP_SMC_FUNCID_REG_UPDATE
+ *
+ * Update one or more bits in a protected register at EL3 using a
+ * read-modify-write operation.
+ *
+ * Call register usage:
+ * a0: INTEL_SIP_SMC_REG_UPDATE.
+ * a1: register address
+ * a2: write Mask.
+ * a3: value to write.
+ * a4-7: not used.
+ *
+ * Return status:
+ * a0: INTEL_SIP_SMC_STATUS_OK or INTEL_SIP_SMC_REG_ERROR.
+ * a1-3: Not used.
+ */
+#define INTEL_SIP_SMC_FUNCID_REG_UPDATE 9
+#define INTEL_SIP_SMC_REG_UPDATE \
+ INTEL_SIP_SMC_FAST_CALL_VAL(INTEL_SIP_SMC_FUNCID_REG_UPDATE)
+
+/**
+ * Request INTEL_SIP_SMC_RSU_STATUS
+ *
+ * Request remote status update boot log, call is synchronous.
+ *
+ * Call register usage:
+ * a0 INTEL_SIP_SMC_RSU_STATUS
+ * a1-7 not used
+ *
+ * Return status
+ * a0: Current Image
+ * a1: Last Failing Image
+ * a2: Version | State
+ * a3: Error details | Error location
+ *
+ * Or
+ *
+ * a0: INTEL_SIP_SMC_RSU_ERROR
+ */
+#define INTEL_SIP_SMC_FUNCID_RSU_STATUS 11
+#define INTEL_SIP_SMC_RSU_STATUS \
+ INTEL_SIP_SMC_FAST_CALL_VAL(INTEL_SIP_SMC_FUNCID_RSU_STATUS)
+
+/**
+ * Request INTEL_SIP_SMC_RSU_UPDATE
+ *
+ * Request to set the offset of the bitstream to boot after reboot, call
+ * is synchronous.
+ *
+ * Call register usage:
+ * a0 INTEL_SIP_SMC_RSU_UPDATE
+ * a1 64bit physical address of the configuration data memory in flash
+ * a2-7 not used
+ *
+ * Return status
+ * a0 INTEL_SIP_SMC_STATUS_OK
+ */
+#define INTEL_SIP_SMC_FUNCID_RSU_UPDATE 12
+#define INTEL_SIP_SMC_RSU_UPDATE \
+ INTEL_SIP_SMC_FAST_CALL_VAL(INTEL_SIP_SMC_FUNCID_RSU_UPDATE)
+
+/**
+ * Request INTEL_SIP_SMC_ECC_DBE
+ *
+ * Sync call used by service driver at EL1 to alert EL3 that a Double
+ * Bit ECC error has occurred.
+ *
+ * Call register usage:
+ * a0 INTEL_SIP_SMC_ECC_DBE
+ * a1 SysManager Double Bit Error value
+ * a2-7 not used
+ *
+ * Return status
+ * a0 INTEL_SIP_SMC_STATUS_OK
+ */
+#define INTEL_SIP_SMC_FUNCID_ECC_DBE 13
+#define INTEL_SIP_SMC_ECC_DBE \
+ INTEL_SIP_SMC_FAST_CALL_VAL(INTEL_SIP_SMC_FUNCID_ECC_DBE)
+
+/**
+ * Request INTEL_SIP_SMC_RSU_NOTIFY
+ *
+ * Sync call used by service driver at EL1 to report hard processor
+ * system execution stage to firmware
+ *
+ * Call register usage:
+ * a0 INTEL_SIP_SMC_RSU_NOTIFY
+ * a1 32bit value representing hard processor system execution stage
+ * a2-7 not used
+ *
+ * Return status
+ * a0 INTEL_SIP_SMC_STATUS_OK
+ */
+#define INTEL_SIP_SMC_FUNCID_RSU_NOTIFY 14
+#define INTEL_SIP_SMC_RSU_NOTIFY \
+ INTEL_SIP_SMC_FAST_CALL_VAL(INTEL_SIP_SMC_FUNCID_RSU_NOTIFY)
+
+/**
+ * Request INTEL_SIP_SMC_RSU_RETRY_COUNTER
+ *
+ * Sync call used by service driver at EL1 to query RSU retry counter
+ *
+ * Call register usage:
+ * a0 INTEL_SIP_SMC_RSU_RETRY_COUNTER
+ * a1-7 not used
+ *
+ * Return status
+ * a0 INTEL_SIP_SMC_STATUS_OK
+ * a1 the retry counter
+ *
+ * Or
+ *
+ * a0 INTEL_SIP_SMC_RSU_ERROR
+ */
+#define INTEL_SIP_SMC_FUNCID_RSU_RETRY_COUNTER 15
+#define INTEL_SIP_SMC_RSU_RETRY_COUNTER \
+ INTEL_SIP_SMC_FAST_CALL_VAL(INTEL_SIP_SMC_FUNCID_RSU_RETRY_COUNTER)
+
+/**
+ * Request INTEL_SIP_SMC_RSU_DCMF_VERSION
+ *
+ * Sync call used by service driver at EL1 to query DCMF (Decision
+ * Configuration Management Firmware) version from FW
+ *
+ * Call register usage:
+ * a0 INTEL_SIP_SMC_RSU_DCMF_VERSION
+ * a1-7 not used
+ *
+ * Return status
+ * a0 INTEL_SIP_SMC_STATUS_OK
+ * a1 dcmf1 | dcmf0
+ * a2 dcmf3 | dcmf2
+ *
+ * Or
+ *
+ * a0 INTEL_SIP_SMC_RSU_ERROR
+ */
+#define INTEL_SIP_SMC_FUNCID_RSU_DCMF_VERSION 16
+#define INTEL_SIP_SMC_RSU_DCMF_VERSION \
+ INTEL_SIP_SMC_FAST_CALL_VAL(INTEL_SIP_SMC_FUNCID_RSU_DCMF_VERSION)
+
+/**
+ * Request INTEL_SIP_SMC_RSU_MAX_RETRY
+ *
+ * Sync call used by service driver at EL1 to query max retry value from FW
+ *
+ * Call register usage:
+ * a0 INTEL_SIP_SMC_RSU_MAX_RETRY
+ * a1-7 not used
+ *
+ * Return status
+ * a0 INTEL_SIP_SMC_STATUS_OK
+ * a1 max retry value
+ *
+ * Or
+ * a0 INTEL_SIP_SMC_RSU_ERROR
+ */
+#define INTEL_SIP_SMC_FUNCID_RSU_MAX_RETRY 18
+#define INTEL_SIP_SMC_RSU_MAX_RETRY \
+ INTEL_SIP_SMC_FAST_CALL_VAL(INTEL_SIP_SMC_FUNCID_RSU_MAX_RETRY)
+
+/**
+ * Request INTEL_SIP_SMC_RSU_DCMF_STATUS
+ *
+ * Sync call used by service driver at EL1 to query DCMF status from FW
+ *
+ * Call register usage:
+ * a0 INTEL_SIP_SMC_RSU_DCMF_STATUS
+ * a1-7 not used
+ *
+ * Return status
+ * a0 INTEL_SIP_SMC_STATUS_OK
+ * a1 dcmf3 | dcmf2 | dcmf1 | dcmf0
+ *
+ * Or
+ *
+ * a0 INTEL_SIP_SMC_RSU_ERROR
+ */
+#define INTEL_SIP_SMC_FUNCID_RSU_DCMF_STATUS 20
+#define INTEL_SIP_SMC_RSU_DCMF_STATUS \
+ INTEL_SIP_SMC_FAST_CALL_VAL(INTEL_SIP_SMC_FUNCID_RSU_DCMF_STATUS)
+
+/**
+ * Request INTEL_SIP_SMC_SERVICE_COMPLETED
+ * Sync call to check if the secure world have completed service request
+ * or not.
+ *
+ * Call register usage:
+ * a0: INTEL_SIP_SMC_SERVICE_COMPLETED
+ * a1: this register is optional. If used, it is the physical address for
+ * secure firmware to put output data
+ * a2: this register is optional. If used, it is the size of output data
+ * a3-a7: not used
+ *
+ * Return status:
+ * a0: INTEL_SIP_SMC_STATUS_OK, INTEL_SIP_SMC_STATUS_ERROR,
+ * INTEL_SIP_SMC_REJECTED or INTEL_SIP_SMC_STATUS_BUSY
+ * a1: mailbox error if a0 is INTEL_SIP_SMC_STATUS_ERROR
+ * a2: physical address containing the process info
+ * for FCS certificate -- the data contains the certificate status
+ * for FCS cryption -- the data contains the actual data size FW processes
+ * a3: output data size
+ */
+#define INTEL_SIP_SMC_FUNCID_SERVICE_COMPLETED 30
+#define INTEL_SIP_SMC_SERVICE_COMPLETED \
+ INTEL_SIP_SMC_FAST_CALL_VAL(INTEL_SIP_SMC_FUNCID_SERVICE_COMPLETED)
+
+/**
+ * Request INTEL_SIP_SMC_FIRMWARE_VERSION
+ *
+ * Sync call used to query the version of running firmware
+ *
+ * Call register usage:
+ * a0 INTEL_SIP_SMC_FIRMWARE_VERSION
+ * a1-a7 not used
+ *
+ * Return status:
+ * a0 INTEL_SIP_SMC_STATUS_OK or INTEL_SIP_SMC_STATUS_ERROR
+ * a1 running firmware version
+ */
+#define INTEL_SIP_SMC_FUNCID_FIRMWARE_VERSION 31
+#define INTEL_SIP_SMC_FIRMWARE_VERSION \
+ INTEL_SIP_SMC_FAST_CALL_VAL(INTEL_SIP_SMC_FUNCID_FIRMWARE_VERSION)
+
+/**
+ * SMC call protocol for Mailbox, starting FUNCID from 60
+ *
+ * Call register usage:
+ * a0 INTEL_SIP_SMC_MBOX_SEND_CMD
+ * a1 mailbox command code
+ * a2 physical address that contain mailbox command data (not include header)
+ * a3 mailbox command data size in word
+ * a4 set to 0 for CASUAL, set to 1 for URGENT
+ * a5 physical address for secure firmware to put response data
+ * (not include header)
+ * a6 maximum size in word of physical address to store response data
+ * a7 not used
+ *
+ * Return status
+ * a0 INTEL_SIP_SMC_STATUS_OK, INTEL_SIP_SMC_STATUS_REJECTED or
+ * INTEL_SIP_SMC_STATUS_ERROR
+ * a1 mailbox error code
+ * a2 response data length in word
+ * a3 not used
+ */
+#define INTEL_SIP_SMC_FUNCID_MBOX_SEND_CMD 60
+ #define INTEL_SIP_SMC_MBOX_SEND_CMD \
+ INTEL_SIP_SMC_FAST_CALL_VAL(INTEL_SIP_SMC_FUNCID_MBOX_SEND_CMD)
+
+/**
+ * Request INTEL_SIP_SMC_SVC_VERSION
+ *
+ * Sync call used to query the SIP SMC API Version
+ *
+ * Call register usage:
+ * a0 INTEL_SIP_SMC_SVC_VERSION
+ * a1-a7 not used
+ *
+ * Return status:
+ * a0 INTEL_SIP_SMC_STATUS_OK
+ * a1 Major
+ * a2 Minor
+ */
+#define INTEL_SIP_SMC_SVC_FUNCID_VERSION 512
+#define INTEL_SIP_SMC_SVC_VERSION \
+ INTEL_SIP_SMC_FAST_CALL_VAL(INTEL_SIP_SMC_SVC_FUNCID_VERSION)
+
+/**
+ * SMC call protocol for FPGA Crypto Service (FCS)
+ * FUNCID starts from 90
+ */
+
+/**
+ * Request INTEL_SIP_SMC_FCS_RANDOM_NUMBER
+ *
+ * Sync call used to query the random number generated by the firmware
+ *
+ * Call register usage:
+ * a0 INTEL_SIP_SMC_FCS_RANDOM_NUMBER
+ * a1 the physical address for firmware to write generated random data
+ * a2-a7 not used
+ *
+ * Return status:
+ * a0 INTEL_SIP_SMC_STATUS_OK, INTEL_SIP_SMC_FCS_ERROR or
+ * INTEL_SIP_SMC_FCS_REJECTED
+ * a1 mailbox error
+ * a2 the physical address of generated random number
+ * a3 size
+ */
+#define INTEL_SIP_SMC_FUNCID_FCS_RANDOM_NUMBER 90
+#define INTEL_SIP_SMC_FCS_RANDOM_NUMBER \
+ INTEL_SIP_SMC_FAST_CALL_VAL(INTEL_SIP_SMC_FUNCID_FCS_RANDOM_NUMBER)
+
+/**
+ * Request INTEL_SIP_SMC_FCS_CRYPTION
+ * Async call for data encryption and HMAC signature generation, or for
+ * data decryption and HMAC verification.
+ *
+ * Call INTEL_SIP_SMC_SERVICE_COMPLETED to get the output encrypted or
+ * decrypted data
+ *
+ * Call register usage:
+ * a0 INTEL_SIP_SMC_FCS_CRYPTION
+ * a1 cryption mode (1 for encryption and 0 for decryption)
+ * a2 physical address which stores to be encrypted or decrypted data
+ * a3 input data size
+ * a4 physical address which will hold the encrypted or decrypted output data
+ * a5 output data size
+ * a6-a7 not used
+ *
+ * Return status:
+ * a0 INTEL_SIP_SMC_STATUS_OK, INTEL_SIP_SMC_STATUS_ERROR or
+ * INTEL_SIP_SMC_STATUS_REJECTED
+ * a1-3 not used
+ */
+#define INTEL_SIP_SMC_FUNCID_FCS_CRYPTION 91
+#define INTEL_SIP_SMC_FCS_CRYPTION \
+ INTEL_SIP_SMC_STD_CALL_VAL(INTEL_SIP_SMC_FUNCID_FCS_CRYPTION)
+
+/**
+ * Request INTEL_SIP_SMC_FCS_SERVICE_REQUEST
+ * Async call for authentication service of HPS software
+ *
+ * Call register usage:
+ * a0 INTEL_SIP_SMC_FCS_SERVICE_REQUEST
+ * a1 the physical address of data block
+ * a2 size of data block
+ * a3-a7 not used
+ *
+ * Return status:
+ * a0 INTEL_SIP_SMC_STATUS_OK, INTEL_SIP_SMC_ERROR or
+ * INTEL_SIP_SMC_REJECTED
+ * a1-a3 not used
+ */
+#define INTEL_SIP_SMC_FUNCID_FCS_SERVICE_REQUEST 92
+#define INTEL_SIP_SMC_FCS_SERVICE_REQUEST \
+ INTEL_SIP_SMC_STD_CALL_VAL(INTEL_SIP_SMC_FUNCID_FCS_SERVICE_REQUEST)
+
+/**
+ * Request INTEL_SIP_SMC_FUNCID_FCS_SEND_CERTIFICATE
+ * Sync call to send a signed certificate
+ *
+ * Call register usage:
+ * a0 INTEL_SIP_SMC_FCS_SEND_CERTIFICATE
+ * a1 the physical address of CERTIFICATE block
+ * a2 size of data block
+ * a3-a7 not used
+ *
+ * Return status:
+ * a0 INTEL_SIP_SMC_STATUS_OK or INTEL_SIP_SMC_FCS_REJECTED
+ * a1-a3 not used
+ */
+#define INTEL_SIP_SMC_FUNCID_FCS_SEND_CERTIFICATE 93
+#define INTEL_SIP_SMC_FCS_SEND_CERTIFICATE \
+ INTEL_SIP_SMC_STD_CALL_VAL(INTEL_SIP_SMC_FUNCID_FCS_SEND_CERTIFICATE)
+
+/**
+ * Request INTEL_SIP_SMC_FCS_GET_PROVISION_DATA
+ * Sync call to dump all the fuses and key hashes
+ *
+ * Call register usage:
+ * a0 INTEL_SIP_SMC_FCS_GET_PROVISION_DATA
+ * a1 the physical address for firmware to write structure of fuse and
+ * key hashes
+ * a2-a7 not used
+ *
+ * Return status:
+ * a0 INTEL_SIP_SMC_STATUS_OK, INTEL_SIP_SMC_FCS_ERROR or
+ * INTEL_SIP_SMC_FCS_REJECTED
+ * a1 mailbox error
+ * a2 physical address for the structure of fuse and key hashes
+ * a3 the size of structure
+ *
+ */
+#define INTEL_SIP_SMC_FUNCID_FCS_GET_PROVISION_DATA 94
+#define INTEL_SIP_SMC_FCS_GET_PROVISION_DATA \
+ INTEL_SIP_SMC_FAST_CALL_VAL(INTEL_SIP_SMC_FUNCID_FCS_GET_PROVISION_DATA)
+
+/**
+ * Request INTEL_SIP_SMC_HWMON_READTEMP
+ * Sync call to request temperature
+ *
+ * Call register usage:
+ * a0 Temperature Channel
+ * a1-a7 not used
+ *
+ * Return status
+ * a0 INTEL_SIP_SMC_STATUS_OK
+ * a1 Temperature Value
+ * a2-a3 not used
+ */
+#define INTEL_SIP_SMC_FUNCID_HWMON_READTEMP 32
+#define INTEL_SIP_SMC_HWMON_READTEMP \
+ INTEL_SIP_SMC_FAST_CALL_VAL(INTEL_SIP_SMC_FUNCID_HWMON_READTEMP)
+
+/**
+ * Request INTEL_SIP_SMC_HWMON_READVOLT
+ * Sync call to request voltage
+ *
+ * Call register usage:
+ * a0 Voltage Channel
+ * a1-a7 not used
+ *
+ * Return status
+ * a0 INTEL_SIP_SMC_STATUS_OK
+ * a1 Voltage Value
+ * a2-a3 not used
+ */
+#define INTEL_SIP_SMC_FUNCID_HWMON_READVOLT 33
+#define INTEL_SIP_SMC_HWMON_READVOLT \
+ INTEL_SIP_SMC_FAST_CALL_VAL(INTEL_SIP_SMC_FUNCID_HWMON_READVOLT)
+
+/**
+ * Request INTEL_SIP_SMC_ASYNC_POLL
+ * Async call used by service driver at EL1 to query mailbox response from SDM.
+ *
+ * Call register usage:
+ * a0 INTEL_SIP_SMC_ASYNC_POLL
+ * a1 transaction job id
+ * a2-17 will be used to return the response data
+ *
+ * Return status
+ * a0 INTEL_SIP_SMC_STATUS_OK
+ * a1-17 will contain the response values from mailbox for the previous send
+ * transaction
+ * Or
+ * a0 INTEL_SIP_SMC_STATUS_NO_RESPONSE
+ * a1-17 not used
+ */
+#define INTEL_SIP_SMC_ASYNC_FUNC_ID_POLL (0xC8)
+#define INTEL_SIP_SMC_ASYNC_POLL \
+ INTEL_SIP_SMC_ASYNC_VAL(INTEL_SIP_SMC_ASYNC_FUNC_ID_POLL)
+
+/**
+ * Request INTEL_SIP_SMC_ASYNC_RSU_GET_SPT
+ * Async call to get RSU SPT from SDM.
+ * Call register usage:
+ * a0 INTEL_SIP_SMC_ASYNC_RSU_GET_SPT
+ * a1 transaction job id
+ * a2-a17 not used
+ *
+ * Return status:
+ * a0 INTEL_SIP_SMC_STATUS_OK ,INTEL_SIP_SMC_STATUS_REJECTED
+ * or INTEL_SIP_SMC_STATUS_BUSY
+ * a1-a17 not used
+ */
+#define INTEL_SIP_SMC_ASYNC_FUNC_ID_RSU_GET_SPT (0xEA)
+#define INTEL_SIP_SMC_ASYNC_RSU_GET_SPT \
+ INTEL_SIP_SMC_ASYNC_VAL(INTEL_SIP_SMC_ASYNC_FUNC_ID_RSU_GET_SPT)
+
+/**
+ * Request INTEL_SIP_SMC_ASYNC_RSU_GET_ERROR_STATUS
+ * Async call to get RSU error status from SDM.
+ * Call register usage:
+ * a0 INTEL_SIP_SMC_ASYNC_RSU_GET_ERROR_STATUS
+ * a1 transaction job id
+ * a2-a17 not used
+ *
+ * Return status:
+ * a0 INTEL_SIP_SMC_STATUS_OK ,INTEL_SIP_SMC_STATUS_REJECTED
+ * or INTEL_SIP_SMC_STATUS_BUSY
+ * a1-a17 not used
+ */
+#define INTEL_SIP_SMC_ASYNC_FUNC_ID_RSU_GET_ERROR_STATUS (0xEB)
+#define INTEL_SIP_SMC_ASYNC_RSU_GET_ERROR_STATUS \
+ INTEL_SIP_SMC_ASYNC_VAL(INTEL_SIP_SMC_ASYNC_FUNC_ID_RSU_GET_ERROR_STATUS)
+
+/**
+ * Request INTEL_SIP_SMC_ASYNC_RSU_NOTIFY
+ * Async call to send NOTIFY value to SDM.
+ * Call register usage:
+ * a0 INTEL_SIP_SMC_ASYNC_RSU_NOTIFY
+ * a1 transaction job id
+ * a2 notify value
+ * a3-a17 not used
+ *
+ * Return status:
+ * a0 INTEL_SIP_SMC_STATUS_OK ,INTEL_SIP_SMC_STATUS_REJECTED
+ * or INTEL_SIP_SMC_STATUS_BUSY
+ * a1-a17 not used
+ */
+#define INTEL_SIP_SMC_ASYNC_FUNC_ID_RSU_NOTIFY (0xEC)
+#define INTEL_SIP_SMC_ASYNC_RSU_NOTIFY \
+ INTEL_SIP_SMC_ASYNC_VAL(INTEL_SIP_SMC_ASYNC_FUNC_ID_RSU_NOTIFY)
+#endif
diff --git a/include/linux/firmware/intel/stratix10-svc-client.h b/include/linux/firmware/intel/stratix10-svc-client.h
new file mode 100644
index 000000000000..d290060f4c73
--- /dev/null
+++ b/include/linux/firmware/intel/stratix10-svc-client.h
@@ -0,0 +1,392 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2017-2018, Intel Corporation
+ * Copyright (C) 2025, Altera Corporation
+ */
+
+#ifndef __STRATIX10_SVC_CLIENT_H
+#define __STRATIX10_SVC_CLIENT_H
+
+/*
+ * Service layer driver supports client names
+ *
+ * fpga: for FPGA configuration
+ * rsu: for remote status update
+ * hwmon: for hardware monitoring (voltage and temperature)
+ */
+#define SVC_CLIENT_FPGA "fpga"
+#define SVC_CLIENT_RSU "rsu"
+#define SVC_CLIENT_FCS "fcs"
+#define SVC_CLIENT_HWMON "hwmon"
+
+/*
+ * Status of the sent command, in bit number
+ *
+ * SVC_STATUS_OK:
+ * Secure firmware accepts the request issued by one of service clients.
+ *
+ * SVC_STATUS_BUFFER_SUBMITTED:
+ * Service client successfully submits data buffer to secure firmware.
+ *
+ * SVC_STATUS_BUFFER_DONE:
+ * Secure firmware completes data process, ready to accept the
+ * next WRITE transaction.
+ *
+ * SVC_STATUS_COMPLETED:
+ * Secure firmware completes service request successfully. In case of
+ * FPGA configuration, FPGA should be in user mode.
+ *
+ * SVC_COMMAND_STATUS_BUSY:
+ * Service request is still in process.
+ *
+ * SVC_COMMAND_STATUS_ERROR:
+ * Error encountered during the process of the service request.
+ *
+ * SVC_STATUS_NO_SUPPORT:
+ * Secure firmware doesn't support requested features such as RSU retry
+ * or RSU notify.
+ */
+#define SVC_STATUS_OK 0
+#define SVC_STATUS_BUFFER_SUBMITTED 1
+#define SVC_STATUS_BUFFER_DONE 2
+#define SVC_STATUS_COMPLETED 3
+#define SVC_STATUS_BUSY 4
+#define SVC_STATUS_ERROR 5
+#define SVC_STATUS_NO_SUPPORT 6
+#define SVC_STATUS_INVALID_PARAM 7
+
+/*
+ * Flag bit for COMMAND_RECONFIG
+ *
+ * COMMAND_RECONFIG_FLAG_PARTIAL:
+ * Set to FPGA configuration type (full or partial).
+ */
+#define COMMAND_RECONFIG_FLAG_PARTIAL 0
+
+/*
+ * Timeout settings for service clients:
+ * timeout value used in Stratix10 FPGA manager driver.
+ * timeout value used in RSU driver
+ */
+#define SVC_RECONFIG_REQUEST_TIMEOUT_MS 300
+#define SVC_RECONFIG_BUFFER_TIMEOUT_MS 720
+#define SVC_RSU_REQUEST_TIMEOUT_MS 300
+#define SVC_FCS_REQUEST_TIMEOUT_MS 2000
+#define SVC_COMPLETED_TIMEOUT_MS 30000
+#define SVC_HWMON_REQUEST_TIMEOUT_MS 300
+
+struct stratix10_svc_chan;
+
+/**
+ * enum stratix10_svc_command_code - supported service commands
+ *
+ * @COMMAND_NOOP: do 'dummy' request for integration/debug/trouble-shooting
+ *
+ * @COMMAND_RECONFIG: ask for FPGA configuration preparation, return status
+ * is SVC_STATUS_OK
+ *
+ * @COMMAND_RECONFIG_DATA_SUBMIT: submit buffer(s) of bit-stream data for the
+ * FPGA configuration, return status is SVC_STATUS_SUBMITTED or SVC_STATUS_ERROR
+ *
+ * @COMMAND_RECONFIG_DATA_CLAIM: check the status of the configuration, return
+ * status is SVC_STATUS_COMPLETED, or SVC_STATUS_BUSY, or SVC_STATUS_ERROR
+ *
+ * @COMMAND_RECONFIG_STATUS: check the status of the configuration, return
+ * status is SVC_STATUS_COMPLETED, or SVC_STATUS_BUSY, or SVC_STATUS_ERROR
+ *
+ * @COMMAND_RSU_STATUS: request remote system update boot log, return status
+ * is log data or SVC_STATUS_RSU_ERROR
+ *
+ * @COMMAND_RSU_UPDATE: set the offset of the bitstream to boot after reboot,
+ * return status is SVC_STATUS_OK or SVC_STATUS_ERROR
+ *
+ * @COMMAND_RSU_NOTIFY: report the status of hard processor system
+ * software to firmware, return status is SVC_STATUS_OK or
+ * SVC_STATUS_ERROR
+ *
+ * @COMMAND_RSU_RETRY: query firmware for the current image's retry counter,
+ * return status is SVC_STATUS_OK or SVC_STATUS_ERROR
+ *
+ * @COMMAND_RSU_MAX_RETRY: query firmware for the max retry value,
+ * return status is SVC_STATUS_OK or SVC_STATUS_ERROR
+ *
+ * @COMMAND_RSU_DCMF_VERSION: query firmware for the DCMF version, return status
+ * is SVC_STATUS_OK or SVC_STATUS_ERROR
+ *
+ * @COMMAND_POLL_SERVICE_STATUS: poll if the service request is complete,
+ * return statis is SVC_STATUS_OK, SVC_STATUS_ERROR or SVC_STATUS_BUSY
+ *
+ * @COMMAND_FIRMWARE_VERSION: query running firmware version, return status
+ * is SVC_STATUS_OK or SVC_STATUS_ERROR
+ *
+ * @COMMAND_SMC_SVC_VERSION: Non-mailbox SMC SVC API Version,
+ * return status is SVC_STATUS_OK
+ *
+ * @COMMAND_MBOX_SEND_CMD: send generic mailbox command, return status is
+ * SVC_STATUS_OK or SVC_STATUS_ERROR
+ *
+ * @COMMAND_RSU_DCMF_STATUS: query firmware for the DCMF status
+ * return status is SVC_STATUS_OK or SVC_STATUS_ERROR
+ *
+ * @COMMAND_RSU_GET_SPT_TABLE: query firmware for SPT table
+ * return status is SVC_STATUS_OK or SVC_STATUS_ERROR
+ *
+ * @COMMAND_FCS_REQUEST_SERVICE: request validation of image from firmware,
+ * return status is SVC_STATUS_OK, SVC_STATUS_INVALID_PARAM
+ *
+ * @COMMAND_FCS_SEND_CERTIFICATE: send a certificate, return status is
+ * SVC_STATUS_OK, SVC_STATUS_INVALID_PARAM, SVC_STATUS_ERROR
+ *
+ * @COMMAND_FCS_GET_PROVISION_DATA: read the provisioning data, return status is
+ * SVC_STATUS_OK, SVC_STATUS_INVALID_PARAM, SVC_STATUS_ERROR
+ *
+ * @COMMAND_FCS_DATA_ENCRYPTION: encrypt the data, return status is
+ * SVC_STATUS_OK, SVC_STATUS_INVALID_PARAM, SVC_STATUS_ERROR
+ *
+ * @COMMAND_FCS_DATA_DECRYPTION: decrypt the data, return status is
+ * SVC_STATUS_OK, SVC_STATUS_INVALID_PARAM, SVC_STATUS_ERROR
+ *
+ * @COMMAND_FCS_RANDOM_NUMBER_GEN: generate a random number, return status
+ * is SVC_STATUS_OK, SVC_STATUS_ERROR
+ *
+ * @COMMAND_HWMON_READTEMP: query the temperature from the hardware monitor,
+ * return status is SVC_STATUS_OK or SVC_STATUS_ERROR
+ *
+ * @COMMAND_HWMON_READVOLT: query the voltage from the hardware monitor,
+ * return status is SVC_STATUS_OK or SVC_STATUS_ERROR
+ */
+enum stratix10_svc_command_code {
+ /* for FPGA */
+ COMMAND_NOOP = 0,
+ COMMAND_RECONFIG,
+ COMMAND_RECONFIG_DATA_SUBMIT,
+ COMMAND_RECONFIG_DATA_CLAIM,
+ COMMAND_RECONFIG_STATUS,
+ /* for RSU */
+ COMMAND_RSU_STATUS = 10,
+ COMMAND_RSU_UPDATE,
+ COMMAND_RSU_NOTIFY,
+ COMMAND_RSU_RETRY,
+ COMMAND_RSU_MAX_RETRY,
+ COMMAND_RSU_DCMF_VERSION,
+ COMMAND_RSU_DCMF_STATUS,
+ COMMAND_FIRMWARE_VERSION,
+ COMMAND_RSU_GET_SPT_TABLE,
+ /* for FCS */
+ COMMAND_FCS_REQUEST_SERVICE = 20,
+ COMMAND_FCS_SEND_CERTIFICATE,
+ COMMAND_FCS_GET_PROVISION_DATA,
+ COMMAND_FCS_DATA_ENCRYPTION,
+ COMMAND_FCS_DATA_DECRYPTION,
+ COMMAND_FCS_RANDOM_NUMBER_GEN,
+ /* for general status poll */
+ COMMAND_POLL_SERVICE_STATUS = 40,
+ /* for generic mailbox send command */
+ COMMAND_MBOX_SEND_CMD = 100,
+ /* Non-mailbox SMC Call */
+ COMMAND_SMC_SVC_VERSION = 200,
+ /* for HWMON */
+ COMMAND_HWMON_READTEMP,
+ COMMAND_HWMON_READVOLT
+};
+
+/**
+ * struct stratix10_svc_client_msg - message sent by client to service
+ * @payload: starting address of data need be processed
+ * @payload_length: to be processed data size in bytes
+ * @payload_output: starting address of processed data
+ * @payload_length_output: processed data size in bytes
+ * @command: service command
+ * @arg: args to be passed via registers and not physically mapped buffers
+ */
+struct stratix10_svc_client_msg {
+ void *payload;
+ size_t payload_length;
+ void *payload_output;
+ size_t payload_length_output;
+ enum stratix10_svc_command_code command;
+ u64 arg[3];
+};
+
+/**
+ * struct stratix10_svc_command_config_type - config type
+ * @flags: flag bit for the type of FPGA configuration
+ */
+struct stratix10_svc_command_config_type {
+ u32 flags;
+};
+
+/**
+ * struct stratix10_svc_cb_data - callback data structure from service layer
+ * @status: the status of sent command
+ * @kaddr1: address of 1st completed data block
+ * @kaddr2: address of 2nd completed data block
+ * @kaddr3: address of 3rd completed data block
+ */
+struct stratix10_svc_cb_data {
+ u32 status;
+ void *kaddr1;
+ void *kaddr2;
+ void *kaddr3;
+};
+
+/**
+ * struct stratix10_svc_client - service client structure
+ * @dev: the client device
+ * @receive_cb: callback to provide service client the received data
+ * @priv: client private data
+ */
+struct stratix10_svc_client {
+ struct device *dev;
+ void (*receive_cb)(struct stratix10_svc_client *client,
+ struct stratix10_svc_cb_data *cb_data);
+ void *priv;
+};
+
+/**
+ * stratix10_svc_request_channel_byname() - request service channel
+ * @client: identity of the client requesting the channel
+ * @name: supporting client name defined above
+ *
+ * Return: a pointer to channel assigned to the client on success,
+ * or ERR_PTR() on error.
+ */
+struct stratix10_svc_chan
+*stratix10_svc_request_channel_byname(struct stratix10_svc_client *client,
+ const char *name);
+
+/**
+ * stratix10_svc_free_channel() - free service channel.
+ * @chan: service channel to be freed
+ */
+void stratix10_svc_free_channel(struct stratix10_svc_chan *chan);
+
+/**
+ * stratix10_svc_allocate_memory() - allocate the momory
+ * @chan: service channel assigned to the client
+ * @size: number of bytes client requests
+ *
+ * Service layer allocates the requested number of bytes from the memory
+ * pool for the client.
+ *
+ * Return: the starting address of allocated memory on success, or
+ * ERR_PTR() on error.
+ */
+void *stratix10_svc_allocate_memory(struct stratix10_svc_chan *chan,
+ size_t size);
+
+/**
+ * stratix10_svc_free_memory() - free allocated memory
+ * @chan: service channel assigned to the client
+ * @kaddr: starting address of memory to be free back to pool
+ */
+void stratix10_svc_free_memory(struct stratix10_svc_chan *chan, void *kaddr);
+
+/**
+ * stratix10_svc_send() - send a message to the remote
+ * @chan: service channel assigned to the client
+ * @msg: message data to be sent, in the format of
+ * struct stratix10_svc_client_msg
+ *
+ * Return: 0 for success, -ENOMEM or -ENOBUFS on error.
+ */
+int stratix10_svc_send(struct stratix10_svc_chan *chan, void *msg);
+
+/**
+ * stratix10_svc_done() - complete service request
+ * @chan: service channel assigned to the client
+ *
+ * This function is used by service client to inform service layer that
+ * client's service requests are completed, or there is an error in the
+ * request process.
+ */
+void stratix10_svc_done(struct stratix10_svc_chan *chan);
+
+/**
+ * typedef async_callback_t - A type definition for an asynchronous callback function.
+ *
+ * This type defines a function pointer for an asynchronous callback.
+ * The callback function takes a single argument, which is a pointer to
+ * user-defined data.
+ *
+ * @cb_arg: Argument to be passed to the callback function.
+ */
+typedef void (*async_callback_t)(void *cb_arg);
+
+/**
+ * stratix10_svc_add_async_client - Add an asynchronous client to a Stratix 10
+ * service channel.
+ * @chan: Pointer to the Stratix 10 service channel structure.
+ * @use_unique_clientid: Boolean flag indicating whether to use a unique client ID.
+ *
+ * This function registers an asynchronous client with the specified Stratix 10
+ * service channel. If the use_unique_clientid flag is set to true, a unique client
+ * ID will be assigned to the client.
+ *
+ * Return: 0 on success, or a negative error code on failure:
+ * -EINVAL if the channel is NULL or the async controller is not initialized.
+ * -EALREADY if the async channel is already allocated.
+ * -ENOMEM if memory allocation fails.
+ * Other negative values if ID allocation fails
+ */
+int stratix10_svc_add_async_client(struct stratix10_svc_chan *chan, bool use_unique_clientid);
+
+/**
+ * stratix10_svc_remove_async_client - Remove an asynchronous client from the Stratix 10
+ * service channel.
+ * @chan: Pointer to the Stratix 10 service channel structure.
+ *
+ * This function removes an asynchronous client from the specified Stratix 10 service channel.
+ * It is typically used to clean up and release resources associated with the client.
+ *
+ * Return: 0 on success, -EINVAL if the channel or asynchronous channel is invalid.
+ */
+int stratix10_svc_remove_async_client(struct stratix10_svc_chan *chan);
+
+/**
+ * stratix10_svc_async_send - Send an asynchronous message to the SDM mailbox
+ * in EL3 secure firmware.
+ * @chan: Pointer to the service channel structure.
+ * @msg: Pointer to the message to be sent.
+ * @handler: Pointer to the handler object used by caller to track the transaction.
+ * @cb: Callback function to be called upon completion.
+ * @cb_arg: Argument to be passed to the callback function.
+ *
+ * This function sends a message asynchronously to the SDM mailbox in EL3 secure firmware.
+ * and registers a callback function to be invoked when the operation completes.
+ *
+ * Return: 0 on success,and negative error codes on failure.
+ */
+int stratix10_svc_async_send(struct stratix10_svc_chan *chan, void *msg, void **handler,
+ async_callback_t cb, void *cb_arg);
+
+/**
+ * stratix10_svc_async_poll - Polls the status of an asynchronous service request.
+ * @chan: Pointer to the service channel structure.
+ * @tx_handle: Handle to the transaction being polled.
+ * @data: Pointer to the callback data structure to be filled with the result.
+ *
+ * This function checks the status of an asynchronous service request
+ * and fills the provided callback data structure with the result.
+ *
+ * Return: 0 on success, -EINVAL if any input parameter is invalid or if the
+ * async controller is not initialized, -EAGAIN if the transaction is
+ * still in progress, or other negative error codes on failure.
+ */
+int stratix10_svc_async_poll(struct stratix10_svc_chan *chan, void *tx_handle,
+ struct stratix10_svc_cb_data *data);
+
+/**
+ * stratix10_svc_async_done - Complete an asynchronous transaction
+ * @chan: Pointer to the service channel structure
+ * @tx_handle: Pointer to the transaction handle
+ *
+ * This function completes an asynchronous transaction by removing the
+ * transaction from the hash table and deallocating the associated resources.
+ *
+ * Return: 0 on success, -EINVAL on invalid input or errors.
+ */
+int stratix10_svc_async_done(struct stratix10_svc_chan *chan, void *tx_handle);
+
+#endif
+
diff --git a/include/linux/firmware/mediatek/mtk-adsp-ipc.h b/include/linux/firmware/mediatek/mtk-adsp-ipc.h
new file mode 100644
index 000000000000..6e86799a7dc4
--- /dev/null
+++ b/include/linux/firmware/mediatek/mtk-adsp-ipc.h
@@ -0,0 +1,59 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2022 MediaTek Inc.
+ */
+
+#ifndef MTK_ADSP_IPC_H
+#define MTK_ADSP_IPC_H
+
+#include <linux/device.h>
+#include <linux/types.h>
+#include <linux/mailbox_controller.h>
+#include <linux/mailbox_client.h>
+
+#define MTK_ADSP_IPC_REQ 0
+#define MTK_ADSP_IPC_RSP 1
+#define MTK_ADSP_IPC_OP_REQ 0x1
+#define MTK_ADSP_IPC_OP_RSP 0x2
+
+enum {
+ MTK_ADSP_MBOX_REPLY,
+ MTK_ADSP_MBOX_REQUEST,
+ MTK_ADSP_MBOX_NUM,
+};
+
+struct mtk_adsp_ipc;
+
+struct mtk_adsp_ipc_ops {
+ void (*handle_reply)(struct mtk_adsp_ipc *ipc);
+ void (*handle_request)(struct mtk_adsp_ipc *ipc);
+};
+
+struct mtk_adsp_chan {
+ struct mtk_adsp_ipc *ipc;
+ struct mbox_client cl;
+ struct mbox_chan *ch;
+ char *name;
+ int idx;
+};
+
+struct mtk_adsp_ipc {
+ struct mtk_adsp_chan chans[MTK_ADSP_MBOX_NUM];
+ struct device *dev;
+ const struct mtk_adsp_ipc_ops *ops;
+ void *private_data;
+};
+
+static inline void mtk_adsp_ipc_set_data(struct mtk_adsp_ipc *ipc, void *data)
+{
+ ipc->private_data = data;
+}
+
+static inline void *mtk_adsp_ipc_get_data(struct mtk_adsp_ipc *ipc)
+{
+ return ipc->private_data;
+}
+
+int mtk_adsp_ipc_send(struct mtk_adsp_ipc *ipc, unsigned int idx, uint32_t op);
+
+#endif /* MTK_ADSP_IPC_H */
diff --git a/include/linux/firmware/meson/meson_sm.h b/include/linux/firmware/meson/meson_sm.h
index 37a5eaea69dd..8eaf8922ab02 100644
--- a/include/linux/firmware/meson/meson_sm.h
+++ b/include/linux/firmware/meson/meson_sm.h
@@ -1,13 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2016 Endless Mobile, Inc.
* Author: Carlo Caione <carlo@endlessm.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * version 2 as published by the Free Software Foundation.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef _MESON_SM_FW_H_
@@ -17,15 +11,21 @@ enum {
SM_EFUSE_READ,
SM_EFUSE_WRITE,
SM_EFUSE_USER_MAX,
+ SM_GET_CHIP_ID,
+ SM_A1_PWRC_SET,
+ SM_A1_PWRC_GET,
};
struct meson_sm_firmware;
-int meson_sm_call(unsigned int cmd_index, u32 *ret, u32 arg0, u32 arg1,
- u32 arg2, u32 arg3, u32 arg4);
-int meson_sm_call_write(void *buffer, unsigned int b_size, unsigned int cmd_index,
- u32 arg0, u32 arg1, u32 arg2, u32 arg3, u32 arg4);
-int meson_sm_call_read(void *buffer, unsigned int bsize, unsigned int cmd_index,
- u32 arg0, u32 arg1, u32 arg2, u32 arg3, u32 arg4);
+int meson_sm_call(struct meson_sm_firmware *fw, unsigned int cmd_index,
+ s32 *ret, u32 arg0, u32 arg1, u32 arg2, u32 arg3, u32 arg4);
+int meson_sm_call_write(struct meson_sm_firmware *fw, void *buffer,
+ unsigned int b_size, unsigned int cmd_index, u32 arg0,
+ u32 arg1, u32 arg2, u32 arg3, u32 arg4);
+int meson_sm_call_read(struct meson_sm_firmware *fw, void *buffer,
+ unsigned int bsize, unsigned int cmd_index, u32 arg0,
+ u32 arg1, u32 arg2, u32 arg3, u32 arg4);
+struct meson_sm_firmware *meson_sm_get(struct device_node *firmware_node);
#endif /* _MESON_SM_FW_H_ */
diff --git a/include/linux/firmware/qcom/qcom_qseecom.h b/include/linux/firmware/qcom/qcom_qseecom.h
new file mode 100644
index 000000000000..3387897bf368
--- /dev/null
+++ b/include/linux/firmware/qcom/qcom_qseecom.h
@@ -0,0 +1,54 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Driver for Qualcomm Secure Execution Environment (SEE) interface (QSEECOM).
+ * Responsible for setting up and managing QSEECOM client devices.
+ *
+ * Copyright (C) 2023 Maximilian Luz <luzmaximilian@gmail.com>
+ */
+
+#ifndef __QCOM_QSEECOM_H
+#define __QCOM_QSEECOM_H
+
+#include <linux/auxiliary_bus.h>
+#include <linux/dma-mapping.h>
+#include <linux/types.h>
+
+#include <linux/firmware/qcom/qcom_scm.h>
+
+/**
+ * struct qseecom_client - QSEECOM client device.
+ * @aux_dev: Underlying auxiliary device.
+ * @app_id: ID of the loaded application.
+ */
+struct qseecom_client {
+ struct auxiliary_device aux_dev;
+ u32 app_id;
+};
+
+/**
+ * qcom_qseecom_app_send() - Send to and receive data from a given QSEE app.
+ * @client: The QSEECOM client associated with the target app.
+ * @req: Request buffer sent to the app (must be TZ memory).
+ * @req_size: Size of the request buffer.
+ * @rsp: Response buffer, written to by the app (must be TZ memory).
+ * @rsp_size: Size of the response buffer.
+ *
+ * Sends a request to the QSEE app associated with the given client and read
+ * back its response. The caller must provide two DMA memory regions, one for
+ * the request and one for the response, and fill out the @req region with the
+ * respective (app-specific) request data. The QSEE app reads this and returns
+ * its response in the @rsp region.
+ *
+ * Note: This is a convenience wrapper around qcom_scm_qseecom_app_send().
+ * Clients should prefer to use this wrapper.
+ *
+ * Return: Zero on success, nonzero on failure.
+ */
+static inline int qcom_qseecom_app_send(struct qseecom_client *client,
+ void *req, size_t req_size,
+ void *rsp, size_t rsp_size)
+{
+ return qcom_scm_qseecom_app_send(client->app_id, req, req_size, rsp, rsp_size);
+}
+
+#endif /* __QCOM_QSEECOM_H */
diff --git a/include/linux/firmware/qcom/qcom_scm.h b/include/linux/firmware/qcom/qcom_scm.h
new file mode 100644
index 000000000000..a55ca771286b
--- /dev/null
+++ b/include/linux/firmware/qcom/qcom_scm.h
@@ -0,0 +1,184 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (c) 2010-2015, 2018-2019 The Linux Foundation. All rights reserved.
+ * Copyright (C) 2015 Linaro Ltd.
+ */
+#ifndef __QCOM_SCM_H
+#define __QCOM_SCM_H
+
+#include <linux/err.h>
+#include <linux/types.h>
+#include <linux/cpumask.h>
+
+#include <dt-bindings/firmware/qcom,scm.h>
+
+#define QCOM_SCM_VERSION(major, minor) (((major) << 16) | ((minor) & 0xFF))
+#define QCOM_SCM_CPU_PWR_DOWN_L2_ON 0x0
+#define QCOM_SCM_CPU_PWR_DOWN_L2_OFF 0x1
+#define QCOM_SCM_HDCP_MAX_REQ_CNT 5
+
+struct qcom_scm_hdcp_req {
+ u32 addr;
+ u32 val;
+};
+
+struct qcom_scm_vmperm {
+ int vmid;
+ int perm;
+};
+
+enum qcom_scm_ocmem_client {
+ QCOM_SCM_OCMEM_UNUSED_ID = 0x0,
+ QCOM_SCM_OCMEM_GRAPHICS_ID,
+ QCOM_SCM_OCMEM_VIDEO_ID,
+ QCOM_SCM_OCMEM_LP_AUDIO_ID,
+ QCOM_SCM_OCMEM_SENSORS_ID,
+ QCOM_SCM_OCMEM_OTHER_OS_ID,
+ QCOM_SCM_OCMEM_DEBUG_ID,
+};
+
+enum qcom_scm_sec_dev_id {
+ QCOM_SCM_MDSS_DEV_ID = 1,
+ QCOM_SCM_OCMEM_DEV_ID = 5,
+ QCOM_SCM_PCIE0_DEV_ID = 11,
+ QCOM_SCM_PCIE1_DEV_ID = 12,
+ QCOM_SCM_GFX_DEV_ID = 18,
+ QCOM_SCM_UFS_DEV_ID = 19,
+ QCOM_SCM_ICE_DEV_ID = 20,
+};
+
+enum qcom_scm_ice_cipher {
+ QCOM_SCM_ICE_CIPHER_AES_128_XTS = 0,
+ QCOM_SCM_ICE_CIPHER_AES_128_CBC = 1,
+ QCOM_SCM_ICE_CIPHER_AES_256_XTS = 3,
+ QCOM_SCM_ICE_CIPHER_AES_256_CBC = 4,
+};
+
+#define QCOM_SCM_PERM_READ 0x4
+#define QCOM_SCM_PERM_WRITE 0x2
+#define QCOM_SCM_PERM_EXEC 0x1
+#define QCOM_SCM_PERM_RW (QCOM_SCM_PERM_READ | QCOM_SCM_PERM_WRITE)
+#define QCOM_SCM_PERM_RWX (QCOM_SCM_PERM_RW | QCOM_SCM_PERM_EXEC)
+
+bool qcom_scm_is_available(void);
+
+int qcom_scm_set_cold_boot_addr(void *entry);
+int qcom_scm_set_warm_boot_addr(void *entry);
+void qcom_scm_cpu_power_down(u32 flags);
+int qcom_scm_set_remote_state(u32 state, u32 id);
+
+struct qcom_scm_pas_metadata {
+ void *ptr;
+ dma_addr_t phys;
+ ssize_t size;
+};
+
+int qcom_scm_pas_init_image(u32 peripheral, const void *metadata, size_t size,
+ struct qcom_scm_pas_metadata *ctx);
+void qcom_scm_pas_metadata_release(struct qcom_scm_pas_metadata *ctx);
+int qcom_scm_pas_mem_setup(u32 peripheral, phys_addr_t addr, phys_addr_t size);
+int qcom_scm_pas_auth_and_reset(u32 peripheral);
+int qcom_scm_pas_shutdown(u32 peripheral);
+bool qcom_scm_pas_supported(u32 peripheral);
+
+int qcom_scm_io_readl(phys_addr_t addr, unsigned int *val);
+int qcom_scm_io_writel(phys_addr_t addr, unsigned int val);
+
+bool qcom_scm_restore_sec_cfg_available(void);
+int qcom_scm_restore_sec_cfg(u32 device_id, u32 spare);
+int qcom_scm_set_gpu_smmu_aperture(unsigned int context_bank);
+bool qcom_scm_set_gpu_smmu_aperture_is_available(void);
+int qcom_scm_iommu_secure_ptbl_size(u32 spare, size_t *size);
+int qcom_scm_iommu_secure_ptbl_init(u64 addr, u32 size, u32 spare);
+int qcom_scm_iommu_set_cp_pool_size(u32 spare, u32 size);
+int qcom_scm_mem_protect_video_var(u32 cp_start, u32 cp_size,
+ u32 cp_nonpixel_start, u32 cp_nonpixel_size);
+int qcom_scm_assign_mem(phys_addr_t mem_addr, size_t mem_sz, u64 *src,
+ const struct qcom_scm_vmperm *newvm,
+ unsigned int dest_cnt);
+
+bool qcom_scm_ocmem_lock_available(void);
+int qcom_scm_ocmem_lock(enum qcom_scm_ocmem_client id, u32 offset, u32 size,
+ u32 mode);
+int qcom_scm_ocmem_unlock(enum qcom_scm_ocmem_client id, u32 offset, u32 size);
+
+bool qcom_scm_ice_available(void);
+int qcom_scm_ice_invalidate_key(u32 index);
+int qcom_scm_ice_set_key(u32 index, const u8 *key, u32 key_size,
+ enum qcom_scm_ice_cipher cipher, u32 data_unit_size);
+bool qcom_scm_has_wrapped_key_support(void);
+int qcom_scm_derive_sw_secret(const u8 *eph_key, size_t eph_key_size,
+ u8 *sw_secret, size_t sw_secret_size);
+int qcom_scm_generate_ice_key(u8 *lt_key, size_t lt_key_size);
+int qcom_scm_prepare_ice_key(const u8 *lt_key, size_t lt_key_size,
+ u8 *eph_key, size_t eph_key_size);
+int qcom_scm_import_ice_key(const u8 *raw_key, size_t raw_key_size,
+ u8 *lt_key, size_t lt_key_size);
+
+bool qcom_scm_hdcp_available(void);
+int qcom_scm_hdcp_req(struct qcom_scm_hdcp_req *req, u32 req_cnt, u32 *resp);
+
+int qcom_scm_iommu_set_pt_format(u32 sec_id, u32 ctx_num, u32 pt_fmt);
+int qcom_scm_qsmmu500_wait_safe_toggle(bool en);
+
+int qcom_scm_lmh_dcvsh(u32 payload_fn, u32 payload_reg, u32 payload_val,
+ u64 limit_node, u32 node_id, u64 version);
+int qcom_scm_lmh_profile_change(u32 profile_id);
+bool qcom_scm_lmh_dcvsh_available(void);
+
+/*
+ * Request TZ to program set of access controlled registers necessary
+ * irrespective of any features
+ */
+#define QCOM_SCM_GPU_ALWAYS_EN_REQ BIT(0)
+/*
+ * Request TZ to program BCL id to access controlled register when BCL is
+ * enabled
+ */
+#define QCOM_SCM_GPU_BCL_EN_REQ BIT(1)
+/*
+ * Request TZ to program set of access controlled register for CLX feature
+ * when enabled
+ */
+#define QCOM_SCM_GPU_CLX_EN_REQ BIT(2)
+/*
+ * Request TZ to program tsense ids to access controlled registers for reading
+ * gpu temperature sensors
+ */
+#define QCOM_SCM_GPU_TSENSE_EN_REQ BIT(3)
+
+int qcom_scm_gpu_init_regs(u32 gpu_req);
+
+int qcom_scm_shm_bridge_create(u64 pfn_and_ns_perm_flags,
+ u64 ipfn_and_s_perm_flags, u64 size_and_flags,
+ u64 ns_vmids, u64 *handle);
+int qcom_scm_shm_bridge_delete(u64 handle);
+
+#ifdef CONFIG_QCOM_QSEECOM
+
+int qcom_scm_qseecom_app_get_id(const char *app_name, u32 *app_id);
+int qcom_scm_qseecom_app_send(u32 app_id, void *req, size_t req_size,
+ void *rsp, size_t rsp_size);
+
+#else /* CONFIG_QCOM_QSEECOM */
+
+static inline int qcom_scm_qseecom_app_get_id(const char *app_name, u32 *app_id)
+{
+ return -EINVAL;
+}
+
+static inline int qcom_scm_qseecom_app_send(u32 app_id,
+ void *req, size_t req_size,
+ void *rsp, size_t rsp_size)
+{
+ return -EINVAL;
+}
+
+#endif /* CONFIG_QCOM_QSEECOM */
+
+int qcom_scm_qtee_invoke_smc(phys_addr_t inbuf, size_t inbuf_size,
+ phys_addr_t outbuf, size_t outbuf_size,
+ u64 *result, u64 *response_type);
+int qcom_scm_qtee_callback_response(phys_addr_t buf, size_t buf_size,
+ u64 *result, u64 *response_type);
+
+#endif
diff --git a/include/linux/firmware/qcom/qcom_tzmem.h b/include/linux/firmware/qcom/qcom_tzmem.h
new file mode 100644
index 000000000000..23173e0c3ddd
--- /dev/null
+++ b/include/linux/firmware/qcom/qcom_tzmem.h
@@ -0,0 +1,80 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2023-2024 Linaro Ltd.
+ */
+
+#ifndef __QCOM_TZMEM_H
+#define __QCOM_TZMEM_H
+
+#include <linux/cleanup.h>
+#include <linux/gfp.h>
+#include <linux/types.h>
+
+struct device;
+struct qcom_tzmem_pool;
+
+/**
+ * enum qcom_tzmem_policy - Policy for pool growth.
+ */
+enum qcom_tzmem_policy {
+ /**
+ * @QCOM_TZMEM_POLICY_STATIC: Static pool,
+ * never grow above initial size.
+ */
+ QCOM_TZMEM_POLICY_STATIC = 1,
+ /**
+ * @QCOM_TZMEM_POLICY_MULTIPLIER: When out of memory,
+ * add increment * current size of memory.
+ */
+ QCOM_TZMEM_POLICY_MULTIPLIER,
+ /**
+ * @QCOM_TZMEM_POLICY_ON_DEMAND: When out of memory
+ * add as much as is needed until max_size.
+ */
+ QCOM_TZMEM_POLICY_ON_DEMAND,
+};
+
+/**
+ * struct qcom_tzmem_pool_config - TZ memory pool configuration.
+ * @initial_size: Number of bytes to allocate for the pool during its creation.
+ * @policy: Pool size growth policy.
+ * @increment: Used with policies that allow pool growth.
+ * @max_size: Size above which the pool will never grow.
+ */
+struct qcom_tzmem_pool_config {
+ size_t initial_size;
+ enum qcom_tzmem_policy policy;
+ size_t increment;
+ size_t max_size;
+};
+
+struct qcom_tzmem_pool *
+qcom_tzmem_pool_new(const struct qcom_tzmem_pool_config *config);
+void qcom_tzmem_pool_free(struct qcom_tzmem_pool *pool);
+struct qcom_tzmem_pool *
+devm_qcom_tzmem_pool_new(struct device *dev,
+ const struct qcom_tzmem_pool_config *config);
+
+void *qcom_tzmem_alloc(struct qcom_tzmem_pool *pool, size_t size, gfp_t gfp);
+void qcom_tzmem_free(void *ptr);
+
+DEFINE_FREE(qcom_tzmem, void *, if (_T) qcom_tzmem_free(_T))
+
+phys_addr_t qcom_tzmem_to_phys(void *ptr);
+
+#if IS_ENABLED(CONFIG_QCOM_TZMEM_MODE_SHMBRIDGE)
+int qcom_tzmem_shm_bridge_create(phys_addr_t paddr, size_t size, u64 *handle);
+void qcom_tzmem_shm_bridge_delete(u64 handle);
+#else
+static inline int qcom_tzmem_shm_bridge_create(phys_addr_t paddr,
+ size_t size, u64 *handle)
+{
+ return 0;
+}
+
+static inline void qcom_tzmem_shm_bridge_delete(u64 handle)
+{
+}
+#endif
+
+#endif /* __QCOM_TZMEM */
diff --git a/include/linux/firmware/samsung/exynos-acpm-protocol.h b/include/linux/firmware/samsung/exynos-acpm-protocol.h
new file mode 100644
index 000000000000..2091da965a5a
--- /dev/null
+++ b/include/linux/firmware/samsung/exynos-acpm-protocol.h
@@ -0,0 +1,70 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright 2020 Samsung Electronics Co., Ltd.
+ * Copyright 2020 Google LLC.
+ * Copyright 2024 Linaro Ltd.
+ */
+
+#ifndef __EXYNOS_ACPM_PROTOCOL_H
+#define __EXYNOS_ACPM_PROTOCOL_H
+
+#include <linux/types.h>
+
+struct acpm_handle;
+struct device_node;
+
+struct acpm_dvfs_ops {
+ int (*set_rate)(const struct acpm_handle *handle,
+ unsigned int acpm_chan_id, unsigned int clk_id,
+ unsigned long rate);
+ unsigned long (*get_rate)(const struct acpm_handle *handle,
+ unsigned int acpm_chan_id,
+ unsigned int clk_id);
+};
+
+struct acpm_pmic_ops {
+ int (*read_reg)(const struct acpm_handle *handle,
+ unsigned int acpm_chan_id, u8 type, u8 reg, u8 chan,
+ u8 *buf);
+ int (*bulk_read)(const struct acpm_handle *handle,
+ unsigned int acpm_chan_id, u8 type, u8 reg, u8 chan,
+ u8 count, u8 *buf);
+ int (*write_reg)(const struct acpm_handle *handle,
+ unsigned int acpm_chan_id, u8 type, u8 reg, u8 chan,
+ u8 value);
+ int (*bulk_write)(const struct acpm_handle *handle,
+ unsigned int acpm_chan_id, u8 type, u8 reg, u8 chan,
+ u8 count, const u8 *buf);
+ int (*update_reg)(const struct acpm_handle *handle,
+ unsigned int acpm_chan_id, u8 type, u8 reg, u8 chan,
+ u8 value, u8 mask);
+};
+
+struct acpm_ops {
+ struct acpm_dvfs_ops dvfs_ops;
+ struct acpm_pmic_ops pmic_ops;
+};
+
+/**
+ * struct acpm_handle - Reference to an initialized protocol instance
+ * @ops:
+ */
+struct acpm_handle {
+ struct acpm_ops ops;
+};
+
+struct device;
+
+#if IS_ENABLED(CONFIG_EXYNOS_ACPM_PROTOCOL)
+const struct acpm_handle *devm_acpm_get_by_node(struct device *dev,
+ struct device_node *np);
+#else
+
+static inline const struct acpm_handle *devm_acpm_get_by_node(struct device *dev,
+ struct device_node *np)
+{
+ return NULL;
+}
+#endif
+
+#endif /* __EXYNOS_ACPM_PROTOCOL_H */
diff --git a/include/linux/firmware/thead/thead,th1520-aon.h b/include/linux/firmware/thead/thead,th1520-aon.h
new file mode 100644
index 000000000000..dae132b66873
--- /dev/null
+++ b/include/linux/firmware/thead/thead,th1520-aon.h
@@ -0,0 +1,200 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2021 Alibaba Group Holding Limited.
+ */
+
+#ifndef _THEAD_AON_H
+#define _THEAD_AON_H
+
+#include <linux/device.h>
+#include <linux/types.h>
+
+#define AON_RPC_MSG_MAGIC (0xef)
+#define TH1520_AON_RPC_VERSION 2
+#define TH1520_AON_RPC_MSG_NUM 7
+
+struct th1520_aon_chan;
+
+enum th1520_aon_rpc_svc {
+ TH1520_AON_RPC_SVC_UNKNOWN = 0,
+ TH1520_AON_RPC_SVC_PM = 1,
+ TH1520_AON_RPC_SVC_MISC = 2,
+ TH1520_AON_RPC_SVC_AVFS = 3,
+ TH1520_AON_RPC_SVC_SYS = 4,
+ TH1520_AON_RPC_SVC_WDG = 5,
+ TH1520_AON_RPC_SVC_LPM = 6,
+ TH1520_AON_RPC_SVC_MAX = 0x3F,
+};
+
+enum th1520_aon_misc_func {
+ TH1520_AON_MISC_FUNC_UNKNOWN = 0,
+ TH1520_AON_MISC_FUNC_SET_CONTROL = 1,
+ TH1520_AON_MISC_FUNC_GET_CONTROL = 2,
+ TH1520_AON_MISC_FUNC_REGDUMP_CFG = 3,
+};
+
+enum th1520_aon_wdg_func {
+ TH1520_AON_WDG_FUNC_UNKNOWN = 0,
+ TH1520_AON_WDG_FUNC_START = 1,
+ TH1520_AON_WDG_FUNC_STOP = 2,
+ TH1520_AON_WDG_FUNC_PING = 3,
+ TH1520_AON_WDG_FUNC_TIMEOUTSET = 4,
+ TH1520_AON_WDG_FUNC_RESTART = 5,
+ TH1520_AON_WDG_FUNC_GET_STATE = 6,
+ TH1520_AON_WDG_FUNC_POWER_OFF = 7,
+ TH1520_AON_WDG_FUNC_AON_WDT_ON = 8,
+ TH1520_AON_WDG_FUNC_AON_WDT_OFF = 9,
+};
+
+enum th1520_aon_sys_func {
+ TH1520_AON_SYS_FUNC_UNKNOWN = 0,
+ TH1520_AON_SYS_FUNC_AON_RESERVE_MEM = 1,
+};
+
+enum th1520_aon_lpm_func {
+ TH1520_AON_LPM_FUNC_UNKNOWN = 0,
+ TH1520_AON_LPM_FUNC_REQUIRE_STR = 1,
+ TH1520_AON_LPM_FUNC_RESUME_STR = 2,
+ TH1520_AON_LPM_FUNC_REQUIRE_STD = 3,
+ TH1520_AON_LPM_FUNC_CPUHP = 4,
+ TH1520_AON_LPM_FUNC_REGDUMP_CFG = 5,
+};
+
+enum th1520_aon_pm_func {
+ TH1520_AON_PM_FUNC_UNKNOWN = 0,
+ TH1520_AON_PM_FUNC_SET_RESOURCE_REGULATOR = 1,
+ TH1520_AON_PM_FUNC_GET_RESOURCE_REGULATOR = 2,
+ TH1520_AON_PM_FUNC_SET_RESOURCE_POWER_MODE = 3,
+ TH1520_AON_PM_FUNC_PWR_SET = 4,
+ TH1520_AON_PM_FUNC_PWR_GET = 5,
+ TH1520_AON_PM_FUNC_CHECK_FAULT = 6,
+ TH1520_AON_PM_FUNC_GET_TEMPERATURE = 7,
+};
+
+struct th1520_aon_rpc_msg_hdr {
+ u8 ver; /* version of msg hdr */
+ u8 size; /* msg size ,uinit in bytes,the size includes rpc msg header self */
+ u8 svc; /* rpc main service id */
+ u8 func; /* rpc sub func id of specific service, sent by caller */
+} __packed __aligned(1);
+
+struct th1520_aon_rpc_ack_common {
+ struct th1520_aon_rpc_msg_hdr hdr;
+ u8 err_code;
+} __packed __aligned(1);
+
+#define RPC_SVC_MSG_TYPE_DATA 0
+#define RPC_SVC_MSG_TYPE_ACK 1
+#define RPC_SVC_MSG_NEED_ACK 0
+#define RPC_SVC_MSG_NO_NEED_ACK 1
+
+#define RPC_GET_VER(MESG) ((MESG)->ver)
+#define RPC_SET_VER(MESG, VER) ((MESG)->ver = (VER))
+#define RPC_GET_SVC_ID(MESG) ((MESG)->svc & 0x3F)
+#define RPC_SET_SVC_ID(MESG, ID) ((MESG)->svc |= 0x3F & (ID))
+#define RPC_GET_SVC_FLAG_MSG_TYPE(MESG) (((MESG)->svc & 0x80) >> 7)
+#define RPC_SET_SVC_FLAG_MSG_TYPE(MESG, TYPE) ((MESG)->svc |= (TYPE) << 7)
+#define RPC_GET_SVC_FLAG_ACK_TYPE(MESG) (((MESG)->svc & 0x40) >> 6)
+#define RPC_SET_SVC_FLAG_ACK_TYPE(MESG, ACK) ((MESG)->svc |= (ACK) << 6)
+
+#define RPC_SET_BE64(MESG, OFFSET, SET_DATA) \
+ do { \
+ u8 *data = (u8 *)(MESG); \
+ u64 _offset = (OFFSET); \
+ u64 _set_data = (SET_DATA); \
+ data[_offset + 7] = _set_data & 0xFF; \
+ data[_offset + 6] = (_set_data & 0xFF00) >> 8; \
+ data[_offset + 5] = (_set_data & 0xFF0000) >> 16; \
+ data[_offset + 4] = (_set_data & 0xFF000000) >> 24; \
+ data[_offset + 3] = (_set_data & 0xFF00000000) >> 32; \
+ data[_offset + 2] = (_set_data & 0xFF0000000000) >> 40; \
+ data[_offset + 1] = (_set_data & 0xFF000000000000) >> 48; \
+ data[_offset + 0] = (_set_data & 0xFF00000000000000) >> 56; \
+ } while (0)
+
+#define RPC_SET_BE32(MESG, OFFSET, SET_DATA) \
+ do { \
+ u8 *data = (u8 *)(MESG); \
+ u64 _offset = (OFFSET); \
+ u64 _set_data = (SET_DATA); \
+ data[_offset + 3] = (_set_data) & 0xFF; \
+ data[_offset + 2] = (_set_data & 0xFF00) >> 8; \
+ data[_offset + 1] = (_set_data & 0xFF0000) >> 16; \
+ data[_offset + 0] = (_set_data & 0xFF000000) >> 24; \
+ } while (0)
+
+#define RPC_SET_BE16(MESG, OFFSET, SET_DATA) \
+ do { \
+ u8 *data = (u8 *)(MESG); \
+ u64 _offset = (OFFSET); \
+ u64 _set_data = (SET_DATA); \
+ data[_offset + 1] = (_set_data) & 0xFF; \
+ data[_offset + 0] = (_set_data & 0xFF00) >> 8; \
+ } while (0)
+
+#define RPC_SET_U8(MESG, OFFSET, SET_DATA) \
+ do { \
+ u8 *data = (u8 *)(MESG); \
+ data[OFFSET] = (SET_DATA) & 0xFF; \
+ } while (0)
+
+#define RPC_GET_BE64(MESG, OFFSET, PTR) \
+ do { \
+ u8 *data = (u8 *)(MESG); \
+ u64 _offset = (OFFSET); \
+ *(u32 *)(PTR) = \
+ (data[_offset + 7] | data[_offset + 6] << 8 | \
+ data[_offset + 5] << 16 | data[_offset + 4] << 24 | \
+ data[_offset + 3] << 32 | data[_offset + 2] << 40 | \
+ data[_offset + 1] << 48 | data[_offset + 0] << 56); \
+ } while (0)
+
+#define RPC_GET_BE32(MESG, OFFSET, PTR) \
+ do { \
+ u8 *data = (u8 *)(MESG); \
+ u64 _offset = (OFFSET); \
+ *(u32 *)(PTR) = \
+ (data[_offset + 3] | data[_offset + 2] << 8 | \
+ data[_offset + 1] << 16 | data[_offset + 0] << 24); \
+ } while (0)
+
+#define RPC_GET_BE16(MESG, OFFSET, PTR) \
+ do { \
+ u8 *data = (u8 *)(MESG); \
+ u64 _offset = (OFFSET); \
+ *(u16 *)(PTR) = (data[_offset + 1] | data[_offset + 0] << 8); \
+ } while (0)
+
+#define RPC_GET_U8(MESG, OFFSET, PTR) \
+ do { \
+ u8 *data = (u8 *)(MESG); \
+ *(u8 *)(PTR) = (data[OFFSET]); \
+ } while (0)
+
+/*
+ * Defines for SC PM Power Mode
+ */
+#define TH1520_AON_PM_PW_MODE_OFF 0 /* Power off */
+#define TH1520_AON_PM_PW_MODE_STBY 1 /* Power in standby */
+#define TH1520_AON_PM_PW_MODE_LP 2 /* Power in low-power */
+#define TH1520_AON_PM_PW_MODE_ON 3 /* Power on */
+
+/*
+ * Defines for AON power islands
+ */
+#define TH1520_AON_AUDIO_PD 0
+#define TH1520_AON_VDEC_PD 1
+#define TH1520_AON_NPU_PD 2
+#define TH1520_AON_VENC_PD 3
+#define TH1520_AON_GPU_PD 4
+#define TH1520_AON_DSP0_PD 5
+#define TH1520_AON_DSP1_PD 6
+
+struct th1520_aon_chan *th1520_aon_init(struct device *dev);
+void th1520_aon_deinit(struct th1520_aon_chan *aon_chan);
+
+int th1520_aon_call_rpc(struct th1520_aon_chan *aon_chan, void *msg);
+int th1520_aon_power_update(struct th1520_aon_chan *aon_chan, u16 rsrc,
+ bool power_on);
+
+#endif /* _THEAD_AON_H */
diff --git a/include/linux/firmware/trusted_foundations.h b/include/linux/firmware/trusted_foundations.h
new file mode 100644
index 000000000000..931b6c5c72df
--- /dev/null
+++ b/include/linux/firmware/trusted_foundations.h
@@ -0,0 +1,92 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (c) 2013, NVIDIA Corporation.
+ */
+
+/*
+ * Support for the Trusted Foundations secure monitor.
+ *
+ * Trusted Foundation comes active on some ARM consumer devices (most
+ * Tegra-based devices sold on the market are concerned). Such devices can only
+ * perform some basic operations, like setting the CPU reset vector, through
+ * SMC calls to the secure monitor. The calls are completely specific to
+ * Trusted Foundations, and do *not* follow the SMC calling convention or the
+ * PSCI standard.
+ */
+
+#ifndef __FIRMWARE_TRUSTED_FOUNDATIONS_H
+#define __FIRMWARE_TRUSTED_FOUNDATIONS_H
+
+#include <linux/printk.h>
+#include <linux/bug.h>
+#include <linux/of.h>
+#include <linux/cpu.h>
+#include <linux/smp.h>
+#include <linux/types.h>
+
+#include <asm/hardware/cache-l2x0.h>
+#include <asm/outercache.h>
+
+#define TF_PM_MODE_LP0 0
+#define TF_PM_MODE_LP1 1
+#define TF_PM_MODE_LP1_NO_MC_CLK 2
+#define TF_PM_MODE_LP2 3
+#define TF_PM_MODE_LP2_NOFLUSH_L2 4
+#define TF_PM_MODE_NONE 5
+
+struct trusted_foundations_platform_data {
+ unsigned int version_major;
+ unsigned int version_minor;
+};
+
+#if IS_ENABLED(CONFIG_TRUSTED_FOUNDATIONS)
+
+void register_trusted_foundations(struct trusted_foundations_platform_data *pd);
+void of_register_trusted_foundations(void);
+bool trusted_foundations_registered(void);
+
+#else /* CONFIG_TRUSTED_FOUNDATIONS */
+static inline void tf_dummy_write_sec(unsigned long val, unsigned int reg)
+{
+}
+
+static inline void register_trusted_foundations(
+ struct trusted_foundations_platform_data *pd)
+{
+ /*
+ * If the system requires TF and we cannot provide it, continue booting
+ * but disable features that cannot be provided.
+ */
+ pr_err("No support for Trusted Foundations, continuing in degraded mode.\n");
+ pr_err("Secondary processors as well as CPU PM will be disabled.\n");
+#if IS_ENABLED(CONFIG_CACHE_L2X0)
+ pr_err("L2X0 cache will be kept disabled.\n");
+ outer_cache.write_sec = tf_dummy_write_sec;
+#endif
+#if IS_ENABLED(CONFIG_SMP)
+ setup_max_cpus = 0;
+#endif
+ cpu_idle_poll_ctrl(true);
+}
+
+static inline void of_register_trusted_foundations(void)
+{
+ struct device_node *np = of_find_compatible_node(NULL, NULL, "tlm,trusted-foundations");
+
+ if (!np)
+ return;
+ of_node_put(np);
+ /*
+ * If we find the target should enable TF but does not support it,
+ * fail as the system won't be able to do much anyway
+ */
+ register_trusted_foundations(NULL);
+}
+
+static inline bool trusted_foundations_registered(void)
+{
+ return false;
+}
+#endif /* CONFIG_TRUSTED_FOUNDATIONS */
+
+#endif
diff --git a/include/linux/firmware/xlnx-event-manager.h b/include/linux/firmware/xlnx-event-manager.h
new file mode 100644
index 000000000000..645dd34155e6
--- /dev/null
+++ b/include/linux/firmware/xlnx-event-manager.h
@@ -0,0 +1,46 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Xilinx Event Management Driver
+ *
+ * Copyright (C) 2024, Advanced Micro Devices, Inc.
+ */
+
+#ifndef _FIRMWARE_XLNX_EVENT_MANAGER_H_
+#define _FIRMWARE_XLNX_EVENT_MANAGER_H_
+
+#include <linux/firmware/xlnx-zynqmp.h>
+
+#define CB_MAX_PAYLOAD_SIZE (4U) /*In payload maximum 32bytes */
+
+#define EVENT_SUBSYSTEM_RESTART (4U)
+
+#define PM_DEV_ACPU_0_0 (0x1810c0afU)
+#define PM_DEV_ACPU_0 (0x1810c003U)
+
+/************************** Exported Function *****************************/
+
+typedef void (*event_cb_func_t)(const u32 *payload, void *data);
+
+#if IS_REACHABLE(CONFIG_XLNX_EVENT_MANAGER)
+int xlnx_register_event(const enum pm_api_cb_id cb_type, const u32 node_id,
+ const u32 event, const bool wake,
+ event_cb_func_t cb_fun, void *data);
+
+int xlnx_unregister_event(const enum pm_api_cb_id cb_type, const u32 node_id,
+ const u32 event, event_cb_func_t cb_fun, void *data);
+#else
+static inline int xlnx_register_event(const enum pm_api_cb_id cb_type, const u32 node_id,
+ const u32 event, const bool wake,
+ event_cb_func_t cb_fun, void *data)
+{
+ return -ENODEV;
+}
+
+static inline int xlnx_unregister_event(const enum pm_api_cb_id cb_type, const u32 node_id,
+ const u32 event, event_cb_func_t cb_fun, void *data)
+{
+ return -ENODEV;
+}
+#endif
+
+#endif /* _FIRMWARE_XLNX_EVENT_MANAGER_H_ */
diff --git a/include/linux/firmware/xlnx-zynqmp-ufs.h b/include/linux/firmware/xlnx-zynqmp-ufs.h
new file mode 100644
index 000000000000..d3538dd5822a
--- /dev/null
+++ b/include/linux/firmware/xlnx-zynqmp-ufs.h
@@ -0,0 +1,38 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Firmware layer for UFS APIs.
+ *
+ * Copyright (c) 2025 Advanced Micro Devices, Inc.
+ */
+
+#ifndef __FIRMWARE_XLNX_ZYNQMP_UFS_H__
+#define __FIRMWARE_XLNX_ZYNQMP_UFS_H__
+
+#if IS_REACHABLE(CONFIG_ZYNQMP_FIRMWARE)
+int zynqmp_pm_is_mphy_tx_rx_config_ready(bool *is_ready);
+int zynqmp_pm_is_sram_init_done(bool *is_done);
+int zynqmp_pm_set_sram_bypass(void);
+int zynqmp_pm_get_ufs_calibration_values(u32 *val);
+#else
+static inline int zynqmp_pm_is_mphy_tx_rx_config_ready(bool *is_ready)
+{
+ return -ENODEV;
+}
+
+static inline int zynqmp_pm_is_sram_init_done(bool *is_done)
+{
+ return -ENODEV;
+}
+
+static inline int zynqmp_pm_set_sram_bypass(void)
+{
+ return -ENODEV;
+}
+
+static inline int zynqmp_pm_get_ufs_calibration_values(u32 *val)
+{
+ return -ENODEV;
+}
+#endif
+
+#endif /* __FIRMWARE_XLNX_ZYNQMP_UFS_H__ */
diff --git a/include/linux/firmware/xlnx-zynqmp.h b/include/linux/firmware/xlnx-zynqmp.h
new file mode 100644
index 000000000000..15fdbd089bbf
--- /dev/null
+++ b/include/linux/firmware/xlnx-zynqmp.h
@@ -0,0 +1,970 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Xilinx Zynq MPSoC Firmware layer
+ *
+ * Copyright (C) 2014-2021 Xilinx
+ * Copyright (C) 2022 - 2025 Advanced Micro Devices, Inc.
+ *
+ * Michal Simek <michal.simek@amd.com>
+ * Davorin Mista <davorin.mista@aggios.com>
+ * Jolly Shah <jollys@xilinx.com>
+ * Rajan Vaja <rajanv@xilinx.com>
+ */
+
+#ifndef __FIRMWARE_ZYNQMP_H__
+#define __FIRMWARE_ZYNQMP_H__
+#include <linux/types.h>
+
+#include <linux/err.h>
+#include <linux/firmware/xlnx-zynqmp-ufs.h>
+
+#define ZYNQMP_PM_VERSION_MAJOR 1
+#define ZYNQMP_PM_VERSION_MINOR 0
+
+#define ZYNQMP_PM_VERSION ((ZYNQMP_PM_VERSION_MAJOR << 16) | \
+ ZYNQMP_PM_VERSION_MINOR)
+
+#define ZYNQMP_TZ_VERSION_MAJOR 1
+#define ZYNQMP_TZ_VERSION_MINOR 0
+
+#define ZYNQMP_TZ_VERSION ((ZYNQMP_TZ_VERSION_MAJOR << 16) | \
+ ZYNQMP_TZ_VERSION_MINOR)
+
+/* SMC SIP service Call Function Identifier Prefix */
+#define PM_SIP_SVC 0xC2000000
+
+/* SMC function ID to get SiP SVC version */
+#define GET_SIP_SVC_VERSION (0x8200ff03U)
+
+/* SiP Service Calls version numbers */
+#define SIP_SVC_VERSION_MAJOR (0U)
+#define SIP_SVC_VERSION_MINOR (2U)
+
+#define SIP_SVC_PASSTHROUGH_VERSION ((SIP_SVC_VERSION_MAJOR << 16) | \
+ SIP_SVC_VERSION_MINOR)
+
+/* Fixed ID for FW specific APIs */
+#define PASS_THROUGH_FW_CMD_ID GENMASK(11, 0)
+
+/* PM API versions */
+#define PM_API_VERSION_1 1
+#define PM_API_VERSION_2 2
+
+#define PM_PINCTRL_PARAM_SET_VERSION 2
+
+/* Family codes */
+#define PM_ZYNQMP_FAMILY_CODE 0x1 /* ZynqMP family code */
+#define PM_VERSAL_FAMILY_CODE 0x2 /* Versal family code */
+#define PM_VERSAL_NET_FAMILY_CODE 0x3 /* Versal NET family code */
+
+#define API_ID_MASK GENMASK(7, 0)
+#define MODULE_ID_MASK GENMASK(11, 8)
+#define PLM_MODULE_ID_MASK GENMASK(15, 8)
+
+/* Firmware feature check version mask */
+#define FIRMWARE_VERSION_MASK 0xFFFFU
+
+/* ATF only commands */
+#define TF_A_PM_REGISTER_SGI 0xa04
+#define PM_GET_TRUSTZONE_VERSION 0xa03
+#define PM_SET_SUSPEND_MODE 0xa02
+#define GET_CALLBACK_DATA 0xa01
+
+/* Number of 32bits values in payload */
+#define PAYLOAD_ARG_CNT 7U
+
+/* Number of 64bits arguments for SMC call */
+#define SMC_ARG_CNT_64 8U
+
+/* Number of 32bits arguments for SMC call */
+#define SMC_ARG_CNT_32 13U
+
+/* Number of arguments for a callback */
+#define CB_ARG_CNT 4
+
+/* Payload size (consists of callback API ID + arguments) */
+#define CB_PAYLOAD_SIZE (CB_ARG_CNT + 1)
+
+#define ZYNQMP_PM_MAX_QOS 100U
+
+#define GSS_NUM_REGS (4)
+
+/* Node capabilities */
+#define ZYNQMP_PM_CAPABILITY_ACCESS 0x1U
+#define ZYNQMP_PM_CAPABILITY_CONTEXT 0x2U
+#define ZYNQMP_PM_CAPABILITY_WAKEUP 0x4U
+#define ZYNQMP_PM_CAPABILITY_UNUSABLE 0x8U
+
+/* Loader commands */
+#define PM_LOAD_PDI 0x701
+#define PDI_SRC_DDR 0xF
+
+/*
+ * Firmware FPGA Manager flags
+ * XILINX_ZYNQMP_PM_FPGA_FULL: FPGA full reconfiguration
+ * XILINX_ZYNQMP_PM_FPGA_PARTIAL: FPGA partial reconfiguration
+ */
+#define XILINX_ZYNQMP_PM_FPGA_FULL 0x0U
+#define XILINX_ZYNQMP_PM_FPGA_PARTIAL BIT(0)
+
+/* FPGA Status Reg */
+#define XILINX_ZYNQMP_PM_FPGA_CONFIG_STAT_OFFSET 7U
+#define XILINX_ZYNQMP_PM_FPGA_READ_CONFIG_REG 0U
+
+/*
+ * Node IDs for the Error Events.
+ */
+#define VERSAL_EVENT_ERROR_PMC_ERR1 (0x28100000U)
+#define VERSAL_EVENT_ERROR_PMC_ERR2 (0x28104000U)
+#define VERSAL_EVENT_ERROR_PSM_ERR1 (0x28108000U)
+#define VERSAL_EVENT_ERROR_PSM_ERR2 (0x2810C000U)
+
+#define VERSAL_NET_EVENT_ERROR_PMC_ERR1 (0x28100000U)
+#define VERSAL_NET_EVENT_ERROR_PMC_ERR2 (0x28104000U)
+#define VERSAL_NET_EVENT_ERROR_PMC_ERR3 (0x28108000U)
+#define VERSAL_NET_EVENT_ERROR_PSM_ERR1 (0x2810C000U)
+#define VERSAL_NET_EVENT_ERROR_PSM_ERR2 (0x28110000U)
+#define VERSAL_NET_EVENT_ERROR_PSM_ERR3 (0x28114000U)
+#define VERSAL_NET_EVENT_ERROR_PSM_ERR4 (0x28118000U)
+
+/* ZynqMP SD tap delay tuning */
+#define SD_ITAPDLY 0xFF180314
+#define SD_OTAPDLYSEL 0xFF180318
+
+/**
+ * XPM_EVENT_ERROR_MASK_DDRMC_CR: Error event mask for DDRMC MC Correctable ECC Error.
+ */
+#define XPM_EVENT_ERROR_MASK_DDRMC_CR BIT(18)
+
+/**
+ * XPM_EVENT_ERROR_MASK_DDRMC_NCR: Error event mask for DDRMC MC Non-Correctable ECC Error.
+ */
+#define XPM_EVENT_ERROR_MASK_DDRMC_NCR BIT(19)
+#define XPM_EVENT_ERROR_MASK_NOC_NCR BIT(13)
+#define XPM_EVENT_ERROR_MASK_NOC_CR BIT(12)
+
+enum pm_module_id {
+ PM_MODULE_ID = 0x0,
+ XPM_MODULE_ID = 0x2,
+ XSEM_MODULE_ID = 0x3,
+ TF_A_MODULE_ID = 0xa,
+};
+
+enum pm_api_cb_id {
+ PM_INIT_SUSPEND_CB = 30,
+ PM_ACKNOWLEDGE_CB = 31,
+ PM_NOTIFY_CB = 32,
+};
+
+enum pm_api_id {
+ PM_API_FEATURES = 0,
+ PM_GET_API_VERSION = 1,
+ PM_GET_NODE_STATUS = 3,
+ PM_REGISTER_NOTIFIER = 5,
+ PM_FORCE_POWERDOWN = 8,
+ PM_REQUEST_WAKEUP = 10,
+ PM_SYSTEM_SHUTDOWN = 12,
+ PM_REQUEST_NODE = 13,
+ PM_RELEASE_NODE = 14,
+ PM_SET_REQUIREMENT = 15,
+ PM_RESET_ASSERT = 17,
+ PM_RESET_GET_STATUS = 18,
+ PM_MMIO_WRITE = 19,
+ PM_MMIO_READ = 20,
+ PM_PM_INIT_FINALIZE = 21,
+ PM_FPGA_LOAD = 22,
+ PM_FPGA_GET_STATUS = 23,
+ PM_GET_CHIPID = 24,
+ PM_SECURE_SHA = 26,
+ PM_PINCTRL_REQUEST = 28,
+ PM_PINCTRL_RELEASE = 29,
+ PM_PINCTRL_SET_FUNCTION = 31,
+ PM_PINCTRL_CONFIG_PARAM_GET = 32,
+ PM_PINCTRL_CONFIG_PARAM_SET = 33,
+ PM_IOCTL = 34,
+ PM_QUERY_DATA = 35,
+ PM_CLOCK_ENABLE = 36,
+ PM_CLOCK_DISABLE = 37,
+ PM_CLOCK_GETSTATE = 38,
+ PM_CLOCK_SETDIVIDER = 39,
+ PM_CLOCK_GETDIVIDER = 40,
+ PM_CLOCK_SETPARENT = 43,
+ PM_CLOCK_GETPARENT = 44,
+ PM_FPGA_READ = 46,
+ PM_SECURE_AES = 47,
+ PM_EFUSE_ACCESS = 53,
+ PM_FEATURE_CHECK = 63,
+};
+
+/* PMU-FW return status codes */
+enum pm_ret_status {
+ XST_PM_SUCCESS = 0,
+ XST_PM_INVALID_VERSION = 4,
+ XST_PM_NO_FEATURE = 19,
+ XST_PM_INVALID_CRC = 301,
+ XST_PM_INTERNAL = 2000,
+ XST_PM_CONFLICT = 2001,
+ XST_PM_NO_ACCESS = 2002,
+ XST_PM_INVALID_NODE = 2003,
+ XST_PM_DOUBLE_REQ = 2004,
+ XST_PM_ABORT_SUSPEND = 2005,
+ XST_PM_MULT_USER = 2008,
+};
+
+enum pm_ioctl_id {
+ IOCTL_GET_RPU_OPER_MODE = 0,
+ IOCTL_SET_RPU_OPER_MODE = 1,
+ IOCTL_RPU_BOOT_ADDR_CONFIG = 2,
+ IOCTL_TCM_COMB_CONFIG = 3,
+ IOCTL_SET_TAPDELAY_BYPASS = 4,
+ IOCTL_SD_DLL_RESET = 6,
+ IOCTL_SET_SD_TAPDELAY = 7,
+ IOCTL_SET_PLL_FRAC_MODE = 8,
+ IOCTL_GET_PLL_FRAC_MODE = 9,
+ IOCTL_SET_PLL_FRAC_DATA = 10,
+ IOCTL_GET_PLL_FRAC_DATA = 11,
+ IOCTL_WRITE_GGS = 12,
+ IOCTL_READ_GGS = 13,
+ IOCTL_WRITE_PGGS = 14,
+ IOCTL_READ_PGGS = 15,
+ /* Set healthy bit value */
+ IOCTL_SET_BOOT_HEALTH_STATUS = 17,
+ IOCTL_OSPI_MUX_SELECT = 21,
+ /* Register SGI to ATF */
+ IOCTL_REGISTER_SGI = 25,
+ /* Runtime feature configuration */
+ IOCTL_SET_FEATURE_CONFIG = 26,
+ IOCTL_GET_FEATURE_CONFIG = 27,
+ /* IOCTL for Secure Read/Write Interface */
+ IOCTL_READ_REG = 28,
+ IOCTL_MASK_WRITE_REG = 29,
+ /* Dynamic SD/GEM configuration */
+ IOCTL_SET_SD_CONFIG = 30,
+ IOCTL_SET_GEM_CONFIG = 31,
+ /* IOCTL to get default/current QoS */
+ IOCTL_GET_QOS = 34,
+};
+
+enum pm_query_id {
+ PM_QID_INVALID = 0,
+ PM_QID_CLOCK_GET_NAME = 1,
+ PM_QID_CLOCK_GET_TOPOLOGY = 2,
+ PM_QID_CLOCK_GET_FIXEDFACTOR_PARAMS = 3,
+ PM_QID_CLOCK_GET_PARENTS = 4,
+ PM_QID_CLOCK_GET_ATTRIBUTES = 5,
+ PM_QID_PINCTRL_GET_NUM_PINS = 6,
+ PM_QID_PINCTRL_GET_NUM_FUNCTIONS = 7,
+ PM_QID_PINCTRL_GET_NUM_FUNCTION_GROUPS = 8,
+ PM_QID_PINCTRL_GET_FUNCTION_NAME = 9,
+ PM_QID_PINCTRL_GET_FUNCTION_GROUPS = 10,
+ PM_QID_PINCTRL_GET_PIN_GROUPS = 11,
+ PM_QID_CLOCK_GET_NUM_CLOCKS = 12,
+ PM_QID_CLOCK_GET_MAX_DIVISOR = 13,
+ PM_QID_PINCTRL_GET_ATTRIBUTES = 15,
+};
+
+enum rpu_oper_mode {
+ PM_RPU_MODE_LOCKSTEP = 0,
+ PM_RPU_MODE_SPLIT = 1,
+};
+
+enum rpu_boot_mem {
+ PM_RPU_BOOTMEM_LOVEC = 0,
+ PM_RPU_BOOTMEM_HIVEC = 1,
+};
+
+enum rpu_tcm_comb {
+ PM_RPU_TCM_SPLIT = 0,
+ PM_RPU_TCM_COMB = 1,
+};
+
+enum zynqmp_pm_reset_action {
+ PM_RESET_ACTION_RELEASE = 0,
+ PM_RESET_ACTION_ASSERT = 1,
+ PM_RESET_ACTION_PULSE = 2,
+};
+
+enum zynqmp_pm_reset {
+ ZYNQMP_PM_RESET_START = 1000,
+ ZYNQMP_PM_RESET_PCIE_CFG = ZYNQMP_PM_RESET_START,
+ ZYNQMP_PM_RESET_PCIE_BRIDGE = 1001,
+ ZYNQMP_PM_RESET_PCIE_CTRL = 1002,
+ ZYNQMP_PM_RESET_DP = 1003,
+ ZYNQMP_PM_RESET_SWDT_CRF = 1004,
+ ZYNQMP_PM_RESET_AFI_FM5 = 1005,
+ ZYNQMP_PM_RESET_AFI_FM4 = 1006,
+ ZYNQMP_PM_RESET_AFI_FM3 = 1007,
+ ZYNQMP_PM_RESET_AFI_FM2 = 1008,
+ ZYNQMP_PM_RESET_AFI_FM1 = 1009,
+ ZYNQMP_PM_RESET_AFI_FM0 = 1010,
+ ZYNQMP_PM_RESET_GDMA = 1011,
+ ZYNQMP_PM_RESET_GPU_PP1 = 1012,
+ ZYNQMP_PM_RESET_GPU_PP0 = 1013,
+ ZYNQMP_PM_RESET_GPU = 1014,
+ ZYNQMP_PM_RESET_GT = 1015,
+ ZYNQMP_PM_RESET_SATA = 1016,
+ ZYNQMP_PM_RESET_ACPU3_PWRON = 1017,
+ ZYNQMP_PM_RESET_ACPU2_PWRON = 1018,
+ ZYNQMP_PM_RESET_ACPU1_PWRON = 1019,
+ ZYNQMP_PM_RESET_ACPU0_PWRON = 1020,
+ ZYNQMP_PM_RESET_APU_L2 = 1021,
+ ZYNQMP_PM_RESET_ACPU3 = 1022,
+ ZYNQMP_PM_RESET_ACPU2 = 1023,
+ ZYNQMP_PM_RESET_ACPU1 = 1024,
+ ZYNQMP_PM_RESET_ACPU0 = 1025,
+ ZYNQMP_PM_RESET_DDR = 1026,
+ ZYNQMP_PM_RESET_APM_FPD = 1027,
+ ZYNQMP_PM_RESET_SOFT = 1028,
+ ZYNQMP_PM_RESET_GEM0 = 1029,
+ ZYNQMP_PM_RESET_GEM1 = 1030,
+ ZYNQMP_PM_RESET_GEM2 = 1031,
+ ZYNQMP_PM_RESET_GEM3 = 1032,
+ ZYNQMP_PM_RESET_QSPI = 1033,
+ ZYNQMP_PM_RESET_UART0 = 1034,
+ ZYNQMP_PM_RESET_UART1 = 1035,
+ ZYNQMP_PM_RESET_SPI0 = 1036,
+ ZYNQMP_PM_RESET_SPI1 = 1037,
+ ZYNQMP_PM_RESET_SDIO0 = 1038,
+ ZYNQMP_PM_RESET_SDIO1 = 1039,
+ ZYNQMP_PM_RESET_CAN0 = 1040,
+ ZYNQMP_PM_RESET_CAN1 = 1041,
+ ZYNQMP_PM_RESET_I2C0 = 1042,
+ ZYNQMP_PM_RESET_I2C1 = 1043,
+ ZYNQMP_PM_RESET_TTC0 = 1044,
+ ZYNQMP_PM_RESET_TTC1 = 1045,
+ ZYNQMP_PM_RESET_TTC2 = 1046,
+ ZYNQMP_PM_RESET_TTC3 = 1047,
+ ZYNQMP_PM_RESET_SWDT_CRL = 1048,
+ ZYNQMP_PM_RESET_NAND = 1049,
+ ZYNQMP_PM_RESET_ADMA = 1050,
+ ZYNQMP_PM_RESET_GPIO = 1051,
+ ZYNQMP_PM_RESET_IOU_CC = 1052,
+ ZYNQMP_PM_RESET_TIMESTAMP = 1053,
+ ZYNQMP_PM_RESET_RPU_R50 = 1054,
+ ZYNQMP_PM_RESET_RPU_R51 = 1055,
+ ZYNQMP_PM_RESET_RPU_AMBA = 1056,
+ ZYNQMP_PM_RESET_OCM = 1057,
+ ZYNQMP_PM_RESET_RPU_PGE = 1058,
+ ZYNQMP_PM_RESET_USB0_CORERESET = 1059,
+ ZYNQMP_PM_RESET_USB1_CORERESET = 1060,
+ ZYNQMP_PM_RESET_USB0_HIBERRESET = 1061,
+ ZYNQMP_PM_RESET_USB1_HIBERRESET = 1062,
+ ZYNQMP_PM_RESET_USB0_APB = 1063,
+ ZYNQMP_PM_RESET_USB1_APB = 1064,
+ ZYNQMP_PM_RESET_IPI = 1065,
+ ZYNQMP_PM_RESET_APM_LPD = 1066,
+ ZYNQMP_PM_RESET_RTC = 1067,
+ ZYNQMP_PM_RESET_SYSMON = 1068,
+ ZYNQMP_PM_RESET_AFI_FM6 = 1069,
+ ZYNQMP_PM_RESET_LPD_SWDT = 1070,
+ ZYNQMP_PM_RESET_FPD = 1071,
+ ZYNQMP_PM_RESET_RPU_DBG1 = 1072,
+ ZYNQMP_PM_RESET_RPU_DBG0 = 1073,
+ ZYNQMP_PM_RESET_DBG_LPD = 1074,
+ ZYNQMP_PM_RESET_DBG_FPD = 1075,
+ ZYNQMP_PM_RESET_APLL = 1076,
+ ZYNQMP_PM_RESET_DPLL = 1077,
+ ZYNQMP_PM_RESET_VPLL = 1078,
+ ZYNQMP_PM_RESET_IOPLL = 1079,
+ ZYNQMP_PM_RESET_RPLL = 1080,
+ ZYNQMP_PM_RESET_GPO3_PL_0 = 1081,
+ ZYNQMP_PM_RESET_GPO3_PL_1 = 1082,
+ ZYNQMP_PM_RESET_GPO3_PL_2 = 1083,
+ ZYNQMP_PM_RESET_GPO3_PL_3 = 1084,
+ ZYNQMP_PM_RESET_GPO3_PL_4 = 1085,
+ ZYNQMP_PM_RESET_GPO3_PL_5 = 1086,
+ ZYNQMP_PM_RESET_GPO3_PL_6 = 1087,
+ ZYNQMP_PM_RESET_GPO3_PL_7 = 1088,
+ ZYNQMP_PM_RESET_GPO3_PL_8 = 1089,
+ ZYNQMP_PM_RESET_GPO3_PL_9 = 1090,
+ ZYNQMP_PM_RESET_GPO3_PL_10 = 1091,
+ ZYNQMP_PM_RESET_GPO3_PL_11 = 1092,
+ ZYNQMP_PM_RESET_GPO3_PL_12 = 1093,
+ ZYNQMP_PM_RESET_GPO3_PL_13 = 1094,
+ ZYNQMP_PM_RESET_GPO3_PL_14 = 1095,
+ ZYNQMP_PM_RESET_GPO3_PL_15 = 1096,
+ ZYNQMP_PM_RESET_GPO3_PL_16 = 1097,
+ ZYNQMP_PM_RESET_GPO3_PL_17 = 1098,
+ ZYNQMP_PM_RESET_GPO3_PL_18 = 1099,
+ ZYNQMP_PM_RESET_GPO3_PL_19 = 1100,
+ ZYNQMP_PM_RESET_GPO3_PL_20 = 1101,
+ ZYNQMP_PM_RESET_GPO3_PL_21 = 1102,
+ ZYNQMP_PM_RESET_GPO3_PL_22 = 1103,
+ ZYNQMP_PM_RESET_GPO3_PL_23 = 1104,
+ ZYNQMP_PM_RESET_GPO3_PL_24 = 1105,
+ ZYNQMP_PM_RESET_GPO3_PL_25 = 1106,
+ ZYNQMP_PM_RESET_GPO3_PL_26 = 1107,
+ ZYNQMP_PM_RESET_GPO3_PL_27 = 1108,
+ ZYNQMP_PM_RESET_GPO3_PL_28 = 1109,
+ ZYNQMP_PM_RESET_GPO3_PL_29 = 1110,
+ ZYNQMP_PM_RESET_GPO3_PL_30 = 1111,
+ ZYNQMP_PM_RESET_GPO3_PL_31 = 1112,
+ ZYNQMP_PM_RESET_RPU_LS = 1113,
+ ZYNQMP_PM_RESET_PS_ONLY = 1114,
+ ZYNQMP_PM_RESET_PL = 1115,
+ ZYNQMP_PM_RESET_PS_PL0 = 1116,
+ ZYNQMP_PM_RESET_PS_PL1 = 1117,
+ ZYNQMP_PM_RESET_PS_PL2 = 1118,
+ ZYNQMP_PM_RESET_PS_PL3 = 1119,
+ ZYNQMP_PM_RESET_END = ZYNQMP_PM_RESET_PS_PL3
+};
+
+enum zynqmp_pm_suspend_reason {
+ SUSPEND_POWER_REQUEST = 201,
+ SUSPEND_ALERT = 202,
+ SUSPEND_SYSTEM_SHUTDOWN = 203,
+};
+
+enum zynqmp_pm_request_ack {
+ ZYNQMP_PM_REQUEST_ACK_NO = 1,
+ ZYNQMP_PM_REQUEST_ACK_BLOCKING = 2,
+ ZYNQMP_PM_REQUEST_ACK_NON_BLOCKING = 3,
+};
+
+enum pm_node_id {
+ NODE_SD_0 = 39,
+ NODE_SD_1 = 40,
+};
+
+enum tap_delay_type {
+ PM_TAPDELAY_INPUT = 0,
+ PM_TAPDELAY_OUTPUT = 1,
+};
+
+enum dll_reset_type {
+ PM_DLL_RESET_ASSERT = 0,
+ PM_DLL_RESET_RELEASE = 1,
+ PM_DLL_RESET_PULSE = 2,
+};
+
+enum pm_pinctrl_config_param {
+ PM_PINCTRL_CONFIG_SLEW_RATE = 0,
+ PM_PINCTRL_CONFIG_BIAS_STATUS = 1,
+ PM_PINCTRL_CONFIG_PULL_CTRL = 2,
+ PM_PINCTRL_CONFIG_SCHMITT_CMOS = 3,
+ PM_PINCTRL_CONFIG_DRIVE_STRENGTH = 4,
+ PM_PINCTRL_CONFIG_VOLTAGE_STATUS = 5,
+ PM_PINCTRL_CONFIG_TRI_STATE = 6,
+ PM_PINCTRL_CONFIG_MAX = 7,
+};
+
+enum pm_pinctrl_slew_rate {
+ PM_PINCTRL_SLEW_RATE_FAST = 0,
+ PM_PINCTRL_SLEW_RATE_SLOW = 1,
+};
+
+enum pm_pinctrl_bias_status {
+ PM_PINCTRL_BIAS_DISABLE = 0,
+ PM_PINCTRL_BIAS_ENABLE = 1,
+};
+
+enum pm_pinctrl_pull_ctrl {
+ PM_PINCTRL_BIAS_PULL_DOWN = 0,
+ PM_PINCTRL_BIAS_PULL_UP = 1,
+};
+
+enum pm_pinctrl_schmitt_cmos {
+ PM_PINCTRL_INPUT_TYPE_CMOS = 0,
+ PM_PINCTRL_INPUT_TYPE_SCHMITT = 1,
+};
+
+enum pm_pinctrl_drive_strength {
+ PM_PINCTRL_DRIVE_STRENGTH_2MA = 0,
+ PM_PINCTRL_DRIVE_STRENGTH_4MA = 1,
+ PM_PINCTRL_DRIVE_STRENGTH_8MA = 2,
+ PM_PINCTRL_DRIVE_STRENGTH_12MA = 3,
+};
+
+enum pm_pinctrl_tri_state {
+ PM_PINCTRL_TRI_STATE_DISABLE = 0,
+ PM_PINCTRL_TRI_STATE_ENABLE = 1,
+};
+
+enum zynqmp_pm_shutdown_type {
+ ZYNQMP_PM_SHUTDOWN_TYPE_SHUTDOWN = 0,
+ ZYNQMP_PM_SHUTDOWN_TYPE_RESET = 1,
+ ZYNQMP_PM_SHUTDOWN_TYPE_SETSCOPE_ONLY = 2,
+};
+
+enum zynqmp_pm_shutdown_subtype {
+ ZYNQMP_PM_SHUTDOWN_SUBTYPE_SUBSYSTEM = 0,
+ ZYNQMP_PM_SHUTDOWN_SUBTYPE_PS_ONLY = 1,
+ ZYNQMP_PM_SHUTDOWN_SUBTYPE_SYSTEM = 2,
+};
+
+enum tap_delay_signal_type {
+ PM_TAPDELAY_NAND_DQS_IN = 0,
+ PM_TAPDELAY_NAND_DQS_OUT = 1,
+ PM_TAPDELAY_QSPI = 2,
+ PM_TAPDELAY_MAX = 3,
+};
+
+enum tap_delay_bypass_ctrl {
+ PM_TAPDELAY_BYPASS_DISABLE = 0,
+ PM_TAPDELAY_BYPASS_ENABLE = 1,
+};
+
+enum ospi_mux_select_type {
+ PM_OSPI_MUX_SEL_DMA = 0,
+ PM_OSPI_MUX_SEL_LINEAR = 1,
+};
+
+enum pm_feature_config_id {
+ PM_FEATURE_INVALID = 0,
+ PM_FEATURE_OVERTEMP_STATUS = 1,
+ PM_FEATURE_OVERTEMP_VALUE = 2,
+ PM_FEATURE_EXTWDT_STATUS = 3,
+ PM_FEATURE_EXTWDT_VALUE = 4,
+};
+
+/**
+ * enum pm_sd_config_type - PM SD configuration.
+ * @SD_CONFIG_EMMC_SEL: To set SD_EMMC_SEL in CTRL_REG_SD and SD_SLOTTYPE
+ * @SD_CONFIG_BASECLK: To set SD_BASECLK in SD_CONFIG_REG1
+ * @SD_CONFIG_8BIT: To set SD_8BIT in SD_CONFIG_REG2
+ * @SD_CONFIG_FIXED: To set fixed config registers
+ */
+enum pm_sd_config_type {
+ SD_CONFIG_EMMC_SEL = 1,
+ SD_CONFIG_BASECLK = 2,
+ SD_CONFIG_8BIT = 3,
+ SD_CONFIG_FIXED = 4,
+};
+
+/**
+ * enum pm_gem_config_type - PM GEM configuration.
+ * @GEM_CONFIG_SGMII_MODE: To set GEM_SGMII_MODE in GEM_CLK_CTRL register
+ * @GEM_CONFIG_FIXED: To set fixed config registers
+ */
+enum pm_gem_config_type {
+ GEM_CONFIG_SGMII_MODE = 1,
+ GEM_CONFIG_FIXED = 2,
+};
+
+/**
+ * struct zynqmp_pm_query_data - PM query data
+ * @qid: query ID
+ * @arg1: Argument 1 of query data
+ * @arg2: Argument 2 of query data
+ * @arg3: Argument 3 of query data
+ */
+struct zynqmp_pm_query_data {
+ u32 qid;
+ u32 arg1;
+ u32 arg2;
+ u32 arg3;
+};
+
+int zynqmp_pm_invoke_fn(u32 pm_api_id, u32 *ret_payload, u32 num_args, ...);
+int zynqmp_pm_invoke_fw_fn(u32 pm_api_id, u32 *ret_payload, u32 num_args, ...);
+
+#if IS_REACHABLE(CONFIG_ZYNQMP_FIRMWARE)
+int zynqmp_pm_get_api_version(u32 *version);
+int zynqmp_pm_get_chipid(u32 *idcode, u32 *version);
+int zynqmp_pm_get_family_info(u32 *family);
+int zynqmp_pm_query_data(struct zynqmp_pm_query_data qdata, u32 *out);
+int zynqmp_pm_clock_enable(u32 clock_id);
+int zynqmp_pm_clock_disable(u32 clock_id);
+int zynqmp_pm_clock_getstate(u32 clock_id, u32 *state);
+int zynqmp_pm_clock_setdivider(u32 clock_id, u32 divider);
+int zynqmp_pm_clock_getdivider(u32 clock_id, u32 *divider);
+int zynqmp_pm_clock_setparent(u32 clock_id, u32 parent_id);
+int zynqmp_pm_clock_getparent(u32 clock_id, u32 *parent_id);
+int zynqmp_pm_set_pll_frac_mode(u32 clk_id, u32 mode);
+int zynqmp_pm_get_pll_frac_mode(u32 clk_id, u32 *mode);
+int zynqmp_pm_set_pll_frac_data(u32 clk_id, u32 data);
+int zynqmp_pm_get_pll_frac_data(u32 clk_id, u32 *data);
+int zynqmp_pm_set_sd_tapdelay(u32 node_id, u32 type, u32 value);
+int zynqmp_pm_sd_dll_reset(u32 node_id, u32 type);
+int zynqmp_pm_ospi_mux_select(u32 dev_id, u32 select);
+int zynqmp_pm_reset_assert(const u32 reset,
+ const enum zynqmp_pm_reset_action assert_flag);
+int zynqmp_pm_reset_get_status(const u32 reset, u32 *status);
+unsigned int zynqmp_pm_bootmode_read(u32 *ps_mode);
+int zynqmp_pm_bootmode_write(u32 ps_mode);
+int zynqmp_pm_set_suspend_mode(u32 mode);
+int zynqmp_pm_request_node(const u32 node, const u32 capabilities,
+ const u32 qos, const enum zynqmp_pm_request_ack ack);
+int zynqmp_pm_release_node(const u32 node);
+int zynqmp_pm_set_requirement(const u32 node, const u32 capabilities,
+ const u32 qos,
+ const enum zynqmp_pm_request_ack ack);
+int zynqmp_pm_aes_engine(const u64 address, u32 *out);
+int zynqmp_pm_efuse_access(const u64 address, u32 *out);
+int zynqmp_pm_sha_hash(const u64 address, const u32 size, const u32 flags);
+int zynqmp_pm_fpga_load(const u64 address, const u32 size, const u32 flags);
+int zynqmp_pm_fpga_get_status(u32 *value);
+int zynqmp_pm_fpga_get_config_status(u32 *value);
+int zynqmp_pm_write_ggs(u32 index, u32 value);
+int zynqmp_pm_read_ggs(u32 index, u32 *value);
+int zynqmp_pm_write_pggs(u32 index, u32 value);
+int zynqmp_pm_read_pggs(u32 index, u32 *value);
+int zynqmp_pm_set_tapdelay_bypass(u32 index, u32 value);
+int zynqmp_pm_system_shutdown(const u32 type, const u32 subtype);
+int zynqmp_pm_set_boot_health_status(u32 value);
+int zynqmp_pm_pinctrl_request(const u32 pin);
+int zynqmp_pm_pinctrl_release(const u32 pin);
+int zynqmp_pm_pinctrl_set_function(const u32 pin, const u32 id);
+int zynqmp_pm_pinctrl_get_config(const u32 pin, const u32 param,
+ u32 *value);
+int zynqmp_pm_pinctrl_set_config(const u32 pin, const u32 param,
+ u32 value);
+int zynqmp_pm_load_pdi(const u32 src, const u64 address);
+int zynqmp_pm_register_notifier(const u32 node, const u32 event,
+ const u32 wake, const u32 enable);
+int zynqmp_pm_feature(const u32 api_id);
+int zynqmp_pm_is_function_supported(const u32 api_id, const u32 id);
+int zynqmp_pm_set_feature_config(enum pm_feature_config_id id, u32 value);
+int zynqmp_pm_get_feature_config(enum pm_feature_config_id id, u32 *payload);
+int zynqmp_pm_sec_read_reg(u32 node_id, u32 offset, u32 *ret_value);
+int zynqmp_pm_sec_mask_write_reg(const u32 node_id, const u32 offset,
+ u32 mask, u32 value);
+int zynqmp_pm_register_sgi(u32 sgi_num, u32 reset);
+int zynqmp_pm_force_pwrdwn(const u32 target,
+ const enum zynqmp_pm_request_ack ack);
+int zynqmp_pm_request_wake(const u32 node,
+ const bool set_addr,
+ const u64 address,
+ const enum zynqmp_pm_request_ack ack);
+int zynqmp_pm_get_rpu_mode(u32 node_id, enum rpu_oper_mode *rpu_mode);
+int zynqmp_pm_set_rpu_mode(u32 node_id, enum rpu_oper_mode rpu_mode);
+int zynqmp_pm_set_tcm_config(u32 node_id, enum rpu_tcm_comb tcm_mode);
+int zynqmp_pm_get_node_status(const u32 node, u32 *const status,
+ u32 *const requirements, u32 *const usage);
+int zynqmp_pm_set_sd_config(u32 node, enum pm_sd_config_type config, u32 value);
+int zynqmp_pm_set_gem_config(u32 node, enum pm_gem_config_type config,
+ u32 value);
+#else
+static inline int zynqmp_pm_get_api_version(u32 *version)
+{
+ return -ENODEV;
+}
+
+static inline int zynqmp_pm_get_chipid(u32 *idcode, u32 *version)
+{
+ return -ENODEV;
+}
+
+static inline int zynqmp_pm_get_family_info(u32 *family)
+{
+ return -ENODEV;
+}
+
+static inline int zynqmp_pm_query_data(struct zynqmp_pm_query_data qdata,
+ u32 *out)
+{
+ return -ENODEV;
+}
+
+static inline int zynqmp_pm_clock_enable(u32 clock_id)
+{
+ return -ENODEV;
+}
+
+static inline int zynqmp_pm_clock_disable(u32 clock_id)
+{
+ return -ENODEV;
+}
+
+static inline int zynqmp_pm_clock_getstate(u32 clock_id, u32 *state)
+{
+ return -ENODEV;
+}
+
+static inline int zynqmp_pm_clock_setdivider(u32 clock_id, u32 divider)
+{
+ return -ENODEV;
+}
+
+static inline int zynqmp_pm_clock_getdivider(u32 clock_id, u32 *divider)
+{
+ return -ENODEV;
+}
+
+static inline int zynqmp_pm_clock_setparent(u32 clock_id, u32 parent_id)
+{
+ return -ENODEV;
+}
+
+static inline int zynqmp_pm_clock_getparent(u32 clock_id, u32 *parent_id)
+{
+ return -ENODEV;
+}
+
+static inline int zynqmp_pm_set_pll_frac_mode(u32 clk_id, u32 mode)
+{
+ return -ENODEV;
+}
+
+static inline int zynqmp_pm_get_pll_frac_mode(u32 clk_id, u32 *mode)
+{
+ return -ENODEV;
+}
+
+static inline int zynqmp_pm_set_pll_frac_data(u32 clk_id, u32 data)
+{
+ return -ENODEV;
+}
+
+static inline int zynqmp_pm_get_pll_frac_data(u32 clk_id, u32 *data)
+{
+ return -ENODEV;
+}
+
+static inline int zynqmp_pm_set_sd_tapdelay(u32 node_id, u32 type, u32 value)
+{
+ return -ENODEV;
+}
+
+static inline int zynqmp_pm_sd_dll_reset(u32 node_id, u32 type)
+{
+ return -ENODEV;
+}
+
+static inline int zynqmp_pm_ospi_mux_select(u32 dev_id, u32 select)
+{
+ return -ENODEV;
+}
+
+static inline int zynqmp_pm_reset_assert(const u32 reset,
+ const enum zynqmp_pm_reset_action assert_flag)
+{
+ return -ENODEV;
+}
+
+static inline int zynqmp_pm_reset_get_status(const u32 reset, u32 *status)
+{
+ return -ENODEV;
+}
+
+static inline unsigned int zynqmp_pm_bootmode_read(u32 *ps_mode)
+{
+ return -ENODEV;
+}
+
+static inline int zynqmp_pm_bootmode_write(u32 ps_mode)
+{
+ return -ENODEV;
+}
+
+static inline int zynqmp_pm_set_suspend_mode(u32 mode)
+{
+ return -ENODEV;
+}
+
+static inline int zynqmp_pm_request_node(const u32 node, const u32 capabilities,
+ const u32 qos,
+ const enum zynqmp_pm_request_ack ack)
+{
+ return -ENODEV;
+}
+
+static inline int zynqmp_pm_release_node(const u32 node)
+{
+ return -ENODEV;
+}
+
+static inline int zynqmp_pm_set_requirement(const u32 node,
+ const u32 capabilities,
+ const u32 qos,
+ const enum zynqmp_pm_request_ack ack)
+{
+ return -ENODEV;
+}
+
+static inline int zynqmp_pm_aes_engine(const u64 address, u32 *out)
+{
+ return -ENODEV;
+}
+
+static inline int zynqmp_pm_efuse_access(const u64 address, u32 *out)
+{
+ return -ENODEV;
+}
+
+static inline int zynqmp_pm_sha_hash(const u64 address, const u32 size,
+ const u32 flags)
+{
+ return -ENODEV;
+}
+
+static inline int zynqmp_pm_fpga_load(const u64 address, const u32 size,
+ const u32 flags)
+{
+ return -ENODEV;
+}
+
+static inline int zynqmp_pm_fpga_get_status(u32 *value)
+{
+ return -ENODEV;
+}
+
+static inline int zynqmp_pm_fpga_get_config_status(u32 *value)
+{
+ return -ENODEV;
+}
+
+static inline int zynqmp_pm_write_ggs(u32 index, u32 value)
+{
+ return -ENODEV;
+}
+
+static inline int zynqmp_pm_read_ggs(u32 index, u32 *value)
+{
+ return -ENODEV;
+}
+
+static inline int zynqmp_pm_write_pggs(u32 index, u32 value)
+{
+ return -ENODEV;
+}
+
+static inline int zynqmp_pm_read_pggs(u32 index, u32 *value)
+{
+ return -ENODEV;
+}
+
+static inline int zynqmp_pm_set_tapdelay_bypass(u32 index, u32 value)
+{
+ return -ENODEV;
+}
+
+static inline int zynqmp_pm_system_shutdown(const u32 type, const u32 subtype)
+{
+ return -ENODEV;
+}
+
+static inline int zynqmp_pm_set_boot_health_status(u32 value)
+{
+ return -ENODEV;
+}
+
+static inline int zynqmp_pm_pinctrl_request(const u32 pin)
+{
+ return -ENODEV;
+}
+
+static inline int zynqmp_pm_pinctrl_release(const u32 pin)
+{
+ return -ENODEV;
+}
+
+static inline int zynqmp_pm_is_function_supported(const u32 api_id, const u32 id)
+{
+ return -ENODEV;
+}
+
+static inline int zynqmp_pm_pinctrl_set_function(const u32 pin, const u32 id)
+{
+ return -ENODEV;
+}
+
+static inline int zynqmp_pm_pinctrl_get_config(const u32 pin, const u32 param,
+ u32 *value)
+{
+ return -ENODEV;
+}
+
+static inline int zynqmp_pm_pinctrl_set_config(const u32 pin, const u32 param,
+ u32 value)
+{
+ return -ENODEV;
+}
+
+static inline int zynqmp_pm_load_pdi(const u32 src, const u64 address)
+{
+ return -ENODEV;
+}
+
+static inline int zynqmp_pm_register_notifier(const u32 node, const u32 event,
+ const u32 wake, const u32 enable)
+{
+ return -ENODEV;
+}
+
+static inline int zynqmp_pm_feature(const u32 api_id)
+{
+ return -ENODEV;
+}
+
+static inline int zynqmp_pm_set_feature_config(enum pm_feature_config_id id,
+ u32 value)
+{
+ return -ENODEV;
+}
+
+static inline int zynqmp_pm_get_feature_config(enum pm_feature_config_id id,
+ u32 *payload)
+{
+ return -ENODEV;
+}
+
+static inline int zynqmp_pm_register_sgi(u32 sgi_num, u32 reset)
+{
+ return -ENODEV;
+}
+
+static inline int zynqmp_pm_force_pwrdwn(const u32 target,
+ const enum zynqmp_pm_request_ack ack)
+{
+ return -ENODEV;
+}
+
+static inline int zynqmp_pm_request_wake(const u32 node,
+ const bool set_addr,
+ const u64 address,
+ const enum zynqmp_pm_request_ack ack)
+{
+ return -ENODEV;
+}
+
+static inline int zynqmp_pm_sec_read_reg(u32 node_id, u32 offset, u32 *ret_value)
+{
+ return -ENODEV;
+}
+
+static inline int zynqmp_pm_sec_mask_write_reg(const u32 node_id, const u32 offset,
+ u32 mask, u32 value)
+{
+ return -ENODEV;
+}
+
+static inline int zynqmp_pm_get_rpu_mode(u32 node_id, enum rpu_oper_mode *rpu_mode)
+{
+ return -ENODEV;
+}
+
+static inline int zynqmp_pm_set_rpu_mode(u32 node_id, enum rpu_oper_mode rpu_mode)
+{
+ return -ENODEV;
+}
+
+static inline int zynqmp_pm_set_tcm_config(u32 node_id, enum rpu_tcm_comb tcm_mode)
+{
+ return -ENODEV;
+}
+
+static inline int zynqmp_pm_get_node_status(const u32 node, u32 *const status,
+ u32 *const requirements,
+ u32 *const usage)
+{
+ return -ENODEV;
+}
+
+static inline int zynqmp_pm_set_sd_config(u32 node,
+ enum pm_sd_config_type config,
+ u32 value)
+{
+ return -ENODEV;
+}
+
+static inline int zynqmp_pm_set_gem_config(u32 node,
+ enum pm_gem_config_type config,
+ u32 value)
+{
+ return -ENODEV;
+}
+
+#endif
+
+#endif /* __FIRMWARE_ZYNQMP_H__ */
diff --git a/include/linux/fixp-arith.h b/include/linux/fixp-arith.h
index d4686fe1cac7..e485fb0c1201 100644
--- a/include/linux/fixp-arith.h
+++ b/include/linux/fixp-arith.h
@@ -1,6 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
#ifndef _FIXP_ARITH_H
#define _FIXP_ARITH_H
+#include <linux/bug.h>
#include <linux/math64.h>
/*
@@ -11,19 +13,6 @@
*/
/*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
* Should you need to contact me, the author, you can do so by
* e-mail - mail your message to <johann.deneux@gmail.com>
@@ -153,4 +142,23 @@ static inline s32 fixp_sin32_rad(u32 radians, u32 twopi)
#define fixp_cos32_rad(rad, twopi) \
fixp_sin32_rad(rad + twopi / 4, twopi)
+/**
+ * fixp_linear_interpolate() - interpolates a value from two known points
+ *
+ * @x0: x value of point 0
+ * @y0: y value of point 0
+ * @x1: x value of point 1
+ * @y1: y value of point 1
+ * @x: the linear interpolant
+ */
+static inline int fixp_linear_interpolate(int x0, int y0, int x1, int y1, int x)
+{
+ if (y0 == y1 || x == x0)
+ return y0;
+ if (x1 == x0 || x == x1)
+ return y1;
+
+ return y0 + ((y1 - y0) * (x - x0) / (x1 - x0));
+}
+
#endif
diff --git a/include/linux/flat.h b/include/linux/flat.h
index 7d542dfd0def..83977c0ce3de 100644
--- a/include/linux/flat.h
+++ b/include/linux/flat.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) 2002-2003 David McCullough <davidm@snapgear.com>
* Copyright (C) 1998 Kenneth Albanowski <kjahds@kjahds.com>
@@ -9,8 +10,41 @@
#ifndef _LINUX_FLAT_H
#define _LINUX_FLAT_H
-#include <uapi/linux/flat.h>
-#include <asm/flat.h>
+#define FLAT_VERSION 0x00000004L
+
+/*
+ * To make everything easier to port and manage cross platform
+ * development, all fields are in network byte order.
+ */
+
+struct flat_hdr {
+ char magic[4];
+ __be32 rev; /* version (as above) */
+ __be32 entry; /* Offset of first executable instruction
+ with text segment from beginning of file */
+ __be32 data_start; /* Offset of data segment from beginning of
+ file */
+ __be32 data_end; /* Offset of end of data segment from beginning
+ of file */
+ __be32 bss_end; /* Offset of end of bss segment from beginning
+ of file */
+
+ /* (It is assumed that data_end through bss_end forms the bss segment.) */
+
+ __be32 stack_size; /* Size of stack, in bytes */
+ __be32 reloc_start; /* Offset of relocation records from beginning of
+ file */
+ __be32 reloc_count; /* Number of relocation records */
+ __be32 flags;
+ __be32 build_date; /* When the program/library was built */
+ __u32 filler[5]; /* Reservered, set to zero */
+};
+
+#define FLAT_FLAG_RAM 0x0001 /* load program entirely into RAM */
+#define FLAT_FLAG_GOTPIC 0x0002 /* program is PIC with GOT */
+#define FLAT_FLAG_GZIP 0x0004 /* all but the header is compressed */
+#define FLAT_FLAG_GZDATA 0x0008 /* only data/relocs are compressed (for XIP) */
+#define FLAT_FLAG_KTRACE 0x0010 /* output useful kernel trace for debugging */
/*
* While it would be nice to keep this header clean, users of older
@@ -21,28 +55,21 @@
* with the format above, except to fix bugs with old format support.
*/
-#include <asm/byteorder.h>
-
#define OLD_FLAT_VERSION 0x00000002L
#define OLD_FLAT_RELOC_TYPE_TEXT 0
#define OLD_FLAT_RELOC_TYPE_DATA 1
#define OLD_FLAT_RELOC_TYPE_BSS 2
typedef union {
- unsigned long value;
+ u32 value;
struct {
-# if defined(mc68000) && !defined(CONFIG_COLDFIRE)
- signed long offset : 30;
- unsigned long type : 2;
-# define OLD_FLAT_FLAG_RAM 0x1 /* load program entirely into RAM */
+#if defined(__LITTLE_ENDIAN_BITFIELD) || \
+ (defined(mc68000) && !defined(CONFIG_COLDFIRE))
+ s32 offset : 30;
+ u32 type : 2;
# elif defined(__BIG_ENDIAN_BITFIELD)
- unsigned long type : 2;
- signed long offset : 30;
-# define OLD_FLAT_FLAG_RAM 0x1 /* load program entirely into RAM */
-# elif defined(__LITTLE_ENDIAN_BITFIELD)
- signed long offset : 30;
- unsigned long type : 2;
-# define OLD_FLAT_FLAG_RAM 0x1 /* load program entirely into RAM */
+ u32 type : 2;
+ s32 offset : 30;
# else
# error "Unknown bitfield order for flat files."
# endif
diff --git a/include/linux/flex_array.h b/include/linux/flex_array.h
deleted file mode 100644
index 11366b3ff0b4..000000000000
--- a/include/linux/flex_array.h
+++ /dev/null
@@ -1,148 +0,0 @@
-#ifndef _FLEX_ARRAY_H
-#define _FLEX_ARRAY_H
-
-#include <linux/types.h>
-#include <linux/reciprocal_div.h>
-#include <asm/page.h>
-
-#define FLEX_ARRAY_PART_SIZE PAGE_SIZE
-#define FLEX_ARRAY_BASE_SIZE PAGE_SIZE
-
-struct flex_array_part;
-
-/*
- * This is meant to replace cases where an array-like
- * structure has gotten too big to fit into kmalloc()
- * and the developer is getting tempted to use
- * vmalloc().
- */
-
-struct flex_array {
- union {
- struct {
- int element_size;
- int total_nr_elements;
- int elems_per_part;
- struct reciprocal_value reciprocal_elems;
- struct flex_array_part *parts[];
- };
- /*
- * This little trick makes sure that
- * sizeof(flex_array) == PAGE_SIZE
- */
- char padding[FLEX_ARRAY_BASE_SIZE];
- };
-};
-
-/* Number of bytes left in base struct flex_array, excluding metadata */
-#define FLEX_ARRAY_BASE_BYTES_LEFT \
- (FLEX_ARRAY_BASE_SIZE - offsetof(struct flex_array, parts))
-
-/* Number of pointers in base to struct flex_array_part pages */
-#define FLEX_ARRAY_NR_BASE_PTRS \
- (FLEX_ARRAY_BASE_BYTES_LEFT / sizeof(struct flex_array_part *))
-
-/* Number of elements of size that fit in struct flex_array_part */
-#define FLEX_ARRAY_ELEMENTS_PER_PART(size) \
- (FLEX_ARRAY_PART_SIZE / size)
-
-/*
- * Defines a statically allocated flex array and ensures its parameters are
- * valid.
- */
-#define DEFINE_FLEX_ARRAY(__arrayname, __element_size, __total) \
- struct flex_array __arrayname = { { { \
- .element_size = (__element_size), \
- .total_nr_elements = (__total), \
- } } }; \
- static inline void __arrayname##_invalid_parameter(void) \
- { \
- BUILD_BUG_ON((__total) > FLEX_ARRAY_NR_BASE_PTRS * \
- FLEX_ARRAY_ELEMENTS_PER_PART(__element_size)); \
- }
-
-/**
- * flex_array_alloc() - Creates a flexible array.
- * @element_size: individual object size.
- * @total: maximum number of objects which can be stored.
- * @flags: GFP flags
- *
- * Return: Returns an object of structure flex_array.
- */
-struct flex_array *flex_array_alloc(int element_size, unsigned int total,
- gfp_t flags);
-
-/**
- * flex_array_prealloc() - Ensures that memory for the elements indexed in the
- * range defined by start and nr_elements has been allocated.
- * @fa: array to allocate memory to.
- * @start: start address
- * @nr_elements: number of elements to be allocated.
- * @flags: GFP flags
- *
- */
-int flex_array_prealloc(struct flex_array *fa, unsigned int start,
- unsigned int nr_elements, gfp_t flags);
-
-/**
- * flex_array_free() - Removes all elements of a flexible array.
- * @fa: array to be freed.
- */
-void flex_array_free(struct flex_array *fa);
-
-/**
- * flex_array_free_parts() - Removes all elements of a flexible array, but
- * leaves the array itself in place.
- * @fa: array to be emptied.
- */
-void flex_array_free_parts(struct flex_array *fa);
-
-/**
- * flex_array_put() - Stores data into a flexible array.
- * @fa: array where element is to be stored.
- * @element_nr: position to copy, must be less than the maximum specified when
- * the array was created.
- * @src: data source to be copied into the array.
- * @flags: GFP flags
- *
- * Return: Returns zero on success, a negative error code otherwise.
- */
-int flex_array_put(struct flex_array *fa, unsigned int element_nr, void *src,
- gfp_t flags);
-
-/**
- * flex_array_clear() - Clears an individual element in the array, sets the
- * given element to FLEX_ARRAY_FREE.
- * @element_nr: element position to clear.
- * @fa: array to which element to be cleared belongs.
- *
- * Return: Returns zero on success, -EINVAL otherwise.
- */
-int flex_array_clear(struct flex_array *fa, unsigned int element_nr);
-
-/**
- * flex_array_get() - Retrieves data into a flexible array.
- *
- * @element_nr: Element position to retrieve data from.
- * @fa: array from which data is to be retrieved.
- *
- * Return: Returns a pointer to the data element, or NULL if that
- * particular element has never been allocated.
- */
-void *flex_array_get(struct flex_array *fa, unsigned int element_nr);
-
-/**
- * flex_array_shrink() - Reduces the allocated size of an array.
- * @fa: array to shrink.
- *
- * Return: Returns number of pages of memory actually freed.
- *
- */
-int flex_array_shrink(struct flex_array *fa);
-
-#define flex_array_put_ptr(fa, nr, src, gfp) \
- flex_array_put(fa, nr, (void *)&(src), gfp)
-
-void *flex_array_get_ptr(struct flex_array *fa, unsigned int element_nr);
-
-#endif /* _FLEX_ARRAY_H */
diff --git a/include/linux/flex_proportions.h b/include/linux/flex_proportions.h
index 0d348e011a6e..e9a72fd0bfe7 100644
--- a/include/linux/flex_proportions.h
+++ b/include/linux/flex_proportions.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Floating proportions with flexible aging period
*
@@ -38,38 +39,6 @@ void fprop_global_destroy(struct fprop_global *p);
bool fprop_new_period(struct fprop_global *p, int periods);
/*
- * ---- SINGLE ----
- */
-struct fprop_local_single {
- /* the local events counter */
- unsigned long events;
- /* Period in which we last updated events */
- unsigned int period;
- raw_spinlock_t lock; /* Protect period and numerator */
-};
-
-#define INIT_FPROP_LOCAL_SINGLE(name) \
-{ .lock = __RAW_SPIN_LOCK_UNLOCKED(name.lock), \
-}
-
-int fprop_local_init_single(struct fprop_local_single *pl);
-void fprop_local_destroy_single(struct fprop_local_single *pl);
-void __fprop_inc_single(struct fprop_global *p, struct fprop_local_single *pl);
-void fprop_fraction_single(struct fprop_global *p,
- struct fprop_local_single *pl, unsigned long *numerator,
- unsigned long *denominator);
-
-static inline
-void fprop_inc_single(struct fprop_global *p, struct fprop_local_single *pl)
-{
- unsigned long flags;
-
- local_irq_save(flags);
- __fprop_inc_single(p, pl);
- local_irq_restore(flags);
-}
-
-/*
* ---- PERCPU ----
*/
struct fprop_local_percpu {
@@ -82,9 +51,10 @@ struct fprop_local_percpu {
int fprop_local_init_percpu(struct fprop_local_percpu *pl, gfp_t gfp);
void fprop_local_destroy_percpu(struct fprop_local_percpu *pl);
-void __fprop_inc_percpu(struct fprop_global *p, struct fprop_local_percpu *pl);
-void __fprop_inc_percpu_max(struct fprop_global *p, struct fprop_local_percpu *pl,
- int max_frac);
+void __fprop_add_percpu(struct fprop_global *p, struct fprop_local_percpu *pl,
+ long nr);
+void __fprop_add_percpu_max(struct fprop_global *p,
+ struct fprop_local_percpu *pl, int max_frac, long nr);
void fprop_fraction_percpu(struct fprop_global *p,
struct fprop_local_percpu *pl, unsigned long *numerator,
unsigned long *denominator);
@@ -95,7 +65,7 @@ void fprop_inc_percpu(struct fprop_global *p, struct fprop_local_percpu *pl)
unsigned long flags;
local_irq_save(flags);
- __fprop_inc_percpu(p, pl);
+ __fprop_add_percpu(p, pl, 1);
local_irq_restore(flags);
}
diff --git a/include/linux/fmc-sdb.h b/include/linux/fmc-sdb.h
deleted file mode 100644
index 599bd6bab56d..000000000000
--- a/include/linux/fmc-sdb.h
+++ /dev/null
@@ -1,38 +0,0 @@
-/*
- * This file is separate from sdb.h, because I want that one to remain
- * unchanged (as far as possible) from the official sdb distribution
- *
- * This file and associated functionality are a playground for me to
- * understand stuff which will later be implemented in more generic places.
- */
-#include <linux/sdb.h>
-
-/* This is the union of all currently defined types */
-union sdb_record {
- struct sdb_interconnect ic;
- struct sdb_device dev;
- struct sdb_bridge bridge;
- struct sdb_integration integr;
- struct sdb_empty empty;
- struct sdb_synthesis synthesis;
- struct sdb_repo_url repo_url;
-};
-
-struct fmc_device;
-
-/* Every sdb table is turned into this structure */
-struct sdb_array {
- int len;
- int level;
- unsigned long baseaddr;
- struct fmc_device *fmc; /* the device that hosts it */
- struct sdb_array *parent; /* NULL at root */
- union sdb_record *record; /* copies of the struct */
- struct sdb_array **subtree; /* only valid for bridge items */
-};
-
-extern int fmc_scan_sdb_tree(struct fmc_device *fmc, unsigned long address);
-extern void fmc_show_sdb_tree(const struct fmc_device *fmc);
-extern signed long fmc_find_sdb_device(struct sdb_array *tree, uint64_t vendor,
- uint32_t device, unsigned long *sz);
-extern int fmc_free_sdb_tree(struct fmc_device *fmc);
diff --git a/include/linux/fmc.h b/include/linux/fmc.h
deleted file mode 100644
index 3dc8a1b2db7b..000000000000
--- a/include/linux/fmc.h
+++ /dev/null
@@ -1,270 +0,0 @@
-/*
- * Copyright (C) 2012 CERN (www.cern.ch)
- * Author: Alessandro Rubini <rubini@gnudd.com>
- *
- * Released according to the GNU GPL, version 2 or any later version.
- *
- * This work is part of the White Rabbit project, a research effort led
- * by CERN, the European Institute for Nuclear Research.
- */
-#ifndef __LINUX_FMC_H__
-#define __LINUX_FMC_H__
-#include <linux/types.h>
-#include <linux/moduleparam.h>
-#include <linux/device.h>
-#include <linux/list.h>
-#include <linux/interrupt.h>
-#include <linux/io.h>
-
-struct fmc_device;
-struct fmc_driver;
-
-/*
- * This bus abstraction is developed separately from drivers, so we need
- * to check the version of the data structures we receive.
- */
-
-#define FMC_MAJOR 3
-#define FMC_MINOR 0
-#define FMC_VERSION ((FMC_MAJOR << 16) | FMC_MINOR)
-#define __FMC_MAJOR(x) ((x) >> 16)
-#define __FMC_MINOR(x) ((x) & 0xffff)
-
-/*
- * The device identification, as defined by the IPMI FRU (Field Replaceable
- * Unit) includes four different strings to describe the device. Here we
- * only match the "Board Manufacturer" and the "Board Product Name",
- * ignoring the "Board Serial Number" and "Board Part Number". All 4 are
- * expected to be strings, so they are treated as zero-terminated C strings.
- * Unspecified string (NULL) means "any", so if both are unspecified this
- * is a catch-all driver. So null entries are allowed and we use array
- * and length. This is unlike pci and usb that use null-terminated arrays
- */
-struct fmc_fru_id {
- char *manufacturer;
- char *product_name;
-};
-
-/*
- * If the FPGA is already programmed (think Etherbone or the second
- * SVEC slot), we can match on SDB devices in the memory image. This
- * match uses an array of devices that must all be present, and the
- * match is based on vendor and device only. Further checks are expected
- * to happen in the probe function. Zero means "any" and catch-all is allowed.
- */
-struct fmc_sdb_one_id {
- uint64_t vendor;
- uint32_t device;
-};
-struct fmc_sdb_id {
- struct fmc_sdb_one_id *cores;
- int cores_nr;
-};
-
-struct fmc_device_id {
- struct fmc_fru_id *fru_id;
- int fru_id_nr;
- struct fmc_sdb_id *sdb_id;
- int sdb_id_nr;
-};
-
-/* This sizes the module_param_array used by generic module parameters */
-#define FMC_MAX_CARDS 32
-
-/* The driver is a pretty simple thing */
-struct fmc_driver {
- unsigned long version;
- struct device_driver driver;
- int (*probe)(struct fmc_device *);
- int (*remove)(struct fmc_device *);
- const struct fmc_device_id id_table;
- /* What follows is for generic module parameters */
- int busid_n;
- int busid_val[FMC_MAX_CARDS];
- int gw_n;
- char *gw_val[FMC_MAX_CARDS];
-};
-#define to_fmc_driver(x) container_of((x), struct fmc_driver, driver)
-
-/* These are the generic parameters, that drivers may instantiate */
-#define FMC_PARAM_BUSID(_d) \
- module_param_array_named(busid, _d.busid_val, int, &_d.busid_n, 0444)
-#define FMC_PARAM_GATEWARE(_d) \
- module_param_array_named(gateware, _d.gw_val, charp, &_d.gw_n, 0444)
-
-/*
- * Drivers may need to configure gpio pins in the carrier. To read input
- * (a very uncommon operation, and definitely not in the hot paths), just
- * configure one gpio only and get 0 or 1 as retval of the config method
- */
-struct fmc_gpio {
- char *carrier_name; /* name or NULL for virtual pins */
- int gpio;
- int _gpio; /* internal use by the carrier */
- int mode; /* GPIOF_DIR_OUT etc, from <linux/gpio.h> */
- int irqmode; /* IRQF_TRIGGER_LOW and so on */
-};
-
-/* The numbering of gpio pins allows access to raw pins or virtual roles */
-#define FMC_GPIO_RAW(x) (x) /* 4096 of them */
-#define __FMC_GPIO_IS_RAW(x) ((x) < 0x1000)
-#define FMC_GPIO_IRQ(x) ((x) + 0x1000) /* 256 of them */
-#define FMC_GPIO_LED(x) ((x) + 0x1100) /* 256 of them */
-#define FMC_GPIO_KEY(x) ((x) + 0x1200) /* 256 of them */
-#define FMC_GPIO_TP(x) ((x) + 0x1300) /* 256 of them */
-#define FMC_GPIO_USER(x) ((x) + 0x1400) /* 256 of them */
-/* We may add SCL and SDA, or other roles if the need arises */
-
-/* GPIOF_DIR_IN etc are missing before 3.0. copy from <linux/gpio.h> */
-#ifndef GPIOF_DIR_IN
-# define GPIOF_DIR_OUT (0 << 0)
-# define GPIOF_DIR_IN (1 << 0)
-# define GPIOF_INIT_LOW (0 << 1)
-# define GPIOF_INIT_HIGH (1 << 1)
-#endif
-
-/*
- * The operations are offered by each carrier and should make driver
- * design completely independent of the carrier. Named GPIO pins may be
- * the exception.
- */
-struct fmc_operations {
- uint32_t (*read32)(struct fmc_device *fmc, int offset);
- void (*write32)(struct fmc_device *fmc, uint32_t value, int offset);
- int (*validate)(struct fmc_device *fmc, struct fmc_driver *drv);
- int (*reprogram_raw)(struct fmc_device *f, struct fmc_driver *d,
- void *gw, unsigned long len);
- int (*reprogram)(struct fmc_device *f, struct fmc_driver *d, char *gw);
- int (*irq_request)(struct fmc_device *fmc, irq_handler_t h,
- char *name, int flags);
- void (*irq_ack)(struct fmc_device *fmc);
- int (*irq_free)(struct fmc_device *fmc);
- int (*gpio_config)(struct fmc_device *fmc, struct fmc_gpio *gpio,
- int ngpio);
- int (*read_ee)(struct fmc_device *fmc, int pos, void *d, int l);
- int (*write_ee)(struct fmc_device *fmc, int pos, const void *d, int l);
-};
-
-/* Prefer this helper rather than calling of fmc->reprogram directly */
-int fmc_reprogram_raw(struct fmc_device *fmc, struct fmc_driver *d,
- void *gw, unsigned long len, int sdb_entry);
-extern int fmc_reprogram(struct fmc_device *f, struct fmc_driver *d, char *gw,
- int sdb_entry);
-
-/*
- * The device reports all information needed to access hw.
- *
- * If we have eeprom_len and not contents, the core reads it.
- * Then, parsing of identifiers is done by the core which fills fmc_fru_id..
- * Similarly a device that must be matched based on SDB cores must
- * fill the entry point and the core will scan the bus (FIXME: sdb match)
- */
-struct fmc_device {
- unsigned long version;
- unsigned long flags;
- struct module *owner; /* char device must pin it */
- struct fmc_fru_id id; /* for EEPROM-based match */
- struct fmc_operations *op; /* carrier-provided */
- int irq; /* according to host bus. 0 == none */
- int eeprom_len; /* Usually 8kB, may be less */
- int eeprom_addr; /* 0x50, 0x52 etc */
- uint8_t *eeprom; /* Full contents or leading part */
- char *carrier_name; /* "SPEC" or similar, for special use */
- void *carrier_data; /* "struct spec *" or equivalent */
- __iomem void *fpga_base; /* May be NULL (Etherbone) */
- __iomem void *slot_base; /* Set by the driver */
- struct fmc_device **devarray; /* Allocated by the bus */
- int slot_id; /* Index in the slot array */
- int nr_slots; /* Number of slots in this carrier */
- unsigned long memlen; /* Used for the char device */
- struct device dev; /* For Linux use */
- struct device *hwdev; /* The underlying hardware device */
- unsigned long sdbfs_entry;
- struct sdb_array *sdb;
- uint32_t device_id; /* Filled by the device */
- char *mezzanine_name; /* Defaults to ``fmc'' */
- void *mezzanine_data;
-
- struct dentry *dbg_dir;
- struct dentry *dbg_sdb_dump;
-};
-#define to_fmc_device(x) container_of((x), struct fmc_device, dev)
-
-#define FMC_DEVICE_HAS_GOLDEN 1
-#define FMC_DEVICE_HAS_CUSTOM 2
-#define FMC_DEVICE_NO_MEZZANINE 4
-#define FMC_DEVICE_MATCH_SDB 8 /* fmc-core must scan sdb in fpga */
-
-/*
- * If fpga_base can be used, the carrier offers no readl/writel methods, and
- * this expands to a single, fast, I/O access.
- */
-static inline uint32_t fmc_readl(struct fmc_device *fmc, int offset)
-{
- if (unlikely(fmc->op->read32))
- return fmc->op->read32(fmc, offset);
- return readl(fmc->fpga_base + offset);
-}
-static inline void fmc_writel(struct fmc_device *fmc, uint32_t val, int off)
-{
- if (unlikely(fmc->op->write32))
- fmc->op->write32(fmc, val, off);
- else
- writel(val, fmc->fpga_base + off);
-}
-
-/* pci-like naming */
-static inline void *fmc_get_drvdata(const struct fmc_device *fmc)
-{
- return dev_get_drvdata(&fmc->dev);
-}
-
-static inline void fmc_set_drvdata(struct fmc_device *fmc, void *data)
-{
- dev_set_drvdata(&fmc->dev, data);
-}
-
-struct fmc_gateware {
- void *bitstream;
- unsigned long len;
-};
-
-/* The 5 access points */
-extern int fmc_driver_register(struct fmc_driver *drv);
-extern void fmc_driver_unregister(struct fmc_driver *drv);
-extern int fmc_device_register(struct fmc_device *tdev);
-extern int fmc_device_register_gw(struct fmc_device *tdev,
- struct fmc_gateware *gw);
-extern void fmc_device_unregister(struct fmc_device *tdev);
-
-/* Three more for device sets, all driven by the same FPGA */
-extern int fmc_device_register_n(struct fmc_device **devs, int n);
-extern int fmc_device_register_n_gw(struct fmc_device **devs, int n,
- struct fmc_gateware *gw);
-extern void fmc_device_unregister_n(struct fmc_device **devs, int n);
-
-/* Internal cross-calls between files; not exported to other modules */
-extern int fmc_match(struct device *dev, struct device_driver *drv);
-extern int fmc_fill_id_info(struct fmc_device *fmc);
-extern void fmc_free_id_info(struct fmc_device *fmc);
-extern void fmc_dump_eeprom(const struct fmc_device *fmc);
-
-/* helpers for FMC operations */
-extern int fmc_irq_request(struct fmc_device *fmc, irq_handler_t h,
- char *name, int flags);
-extern void fmc_irq_free(struct fmc_device *fmc);
-extern void fmc_irq_ack(struct fmc_device *fmc);
-extern int fmc_validate(struct fmc_device *fmc, struct fmc_driver *drv);
-extern int fmc_gpio_config(struct fmc_device *fmc, struct fmc_gpio *gpio,
- int ngpio);
-extern int fmc_read_ee(struct fmc_device *fmc, int pos, void *d, int l);
-extern int fmc_write_ee(struct fmc_device *fmc, int pos, const void *d, int l);
-
-/* helpers for FMC operations */
-extern int fmc_irq_request(struct fmc_device *fmc, irq_handler_t h,
- char *name, int flags);
-extern void fmc_irq_free(struct fmc_device *fmc);
-extern void fmc_irq_ack(struct fmc_device *fmc);
-extern int fmc_validate(struct fmc_device *fmc, struct fmc_driver *drv);
-
-#endif /* __LINUX_FMC_H__ */
diff --git a/include/linux/folio_queue.h b/include/linux/folio_queue.h
new file mode 100644
index 000000000000..adab609c972e
--- /dev/null
+++ b/include/linux/folio_queue.h
@@ -0,0 +1,282 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/* Queue of folios definitions
+ *
+ * Copyright (C) 2024 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * See:
+ *
+ * Documentation/core-api/folio_queue.rst
+ *
+ * for a description of the API.
+ */
+
+#ifndef _LINUX_FOLIO_QUEUE_H
+#define _LINUX_FOLIO_QUEUE_H
+
+#include <linux/pagevec.h>
+#include <linux/mm.h>
+
+/*
+ * Segment in a queue of running buffers. Each segment can hold a number of
+ * folios and a portion of the queue can be referenced with the ITER_FOLIOQ
+ * iterator. The possibility exists of inserting non-folio elements into the
+ * queue (such as gaps).
+ *
+ * Explicit prev and next pointers are used instead of a list_head to make it
+ * easier to add segments to tail and remove them from the head without the
+ * need for a lock.
+ */
+struct folio_queue {
+ struct folio_batch vec; /* Folios in the queue segment */
+ u8 orders[PAGEVEC_SIZE]; /* Order of each folio */
+ struct folio_queue *next; /* Next queue segment or NULL */
+ struct folio_queue *prev; /* Previous queue segment of NULL */
+ unsigned long marks; /* 1-bit mark per folio */
+ unsigned long marks2; /* Second 1-bit mark per folio */
+#if PAGEVEC_SIZE > BITS_PER_LONG
+#error marks is not big enough
+#endif
+ unsigned int rreq_id;
+ unsigned int debug_id;
+};
+
+/**
+ * folioq_init - Initialise a folio queue segment
+ * @folioq: The segment to initialise
+ * @rreq_id: The request identifier to use in tracelines.
+ *
+ * Initialise a folio queue segment and set an identifier to be used in traces.
+ *
+ * Note that the folio pointers are left uninitialised.
+ */
+static inline void folioq_init(struct folio_queue *folioq, unsigned int rreq_id)
+{
+ folio_batch_init(&folioq->vec);
+ folioq->next = NULL;
+ folioq->prev = NULL;
+ folioq->marks = 0;
+ folioq->marks2 = 0;
+ folioq->rreq_id = rreq_id;
+ folioq->debug_id = 0;
+}
+
+/**
+ * folioq_nr_slots: Query the capacity of a folio queue segment
+ * @folioq: The segment to query
+ *
+ * Query the number of folios that a particular folio queue segment might hold.
+ * [!] NOTE: This must not be assumed to be the same for every segment!
+ */
+static inline unsigned int folioq_nr_slots(const struct folio_queue *folioq)
+{
+ return PAGEVEC_SIZE;
+}
+
+/**
+ * folioq_count: Query the occupancy of a folio queue segment
+ * @folioq: The segment to query
+ *
+ * Query the number of folios that have been added to a folio queue segment.
+ * Note that this is not decreased as folios are removed from a segment.
+ */
+static inline unsigned int folioq_count(struct folio_queue *folioq)
+{
+ return folio_batch_count(&folioq->vec);
+}
+
+/**
+ * folioq_full: Query if a folio queue segment is full
+ * @folioq: The segment to query
+ *
+ * Query if a folio queue segment is fully occupied. Note that this does not
+ * change if folios are removed from a segment.
+ */
+static inline bool folioq_full(struct folio_queue *folioq)
+{
+ //return !folio_batch_space(&folioq->vec);
+ return folioq_count(folioq) >= folioq_nr_slots(folioq);
+}
+
+/**
+ * folioq_is_marked: Check first folio mark in a folio queue segment
+ * @folioq: The segment to query
+ * @slot: The slot number of the folio to query
+ *
+ * Determine if the first mark is set for the folio in the specified slot in a
+ * folio queue segment.
+ */
+static inline bool folioq_is_marked(const struct folio_queue *folioq, unsigned int slot)
+{
+ return test_bit(slot, &folioq->marks);
+}
+
+/**
+ * folioq_mark: Set the first mark on a folio in a folio queue segment
+ * @folioq: The segment to modify
+ * @slot: The slot number of the folio to modify
+ *
+ * Set the first mark for the folio in the specified slot in a folio queue
+ * segment.
+ */
+static inline void folioq_mark(struct folio_queue *folioq, unsigned int slot)
+{
+ set_bit(slot, &folioq->marks);
+}
+
+/**
+ * folioq_unmark: Clear the first mark on a folio in a folio queue segment
+ * @folioq: The segment to modify
+ * @slot: The slot number of the folio to modify
+ *
+ * Clear the first mark for the folio in the specified slot in a folio queue
+ * segment.
+ */
+static inline void folioq_unmark(struct folio_queue *folioq, unsigned int slot)
+{
+ clear_bit(slot, &folioq->marks);
+}
+
+/**
+ * folioq_is_marked2: Check second folio mark in a folio queue segment
+ * @folioq: The segment to query
+ * @slot: The slot number of the folio to query
+ *
+ * Determine if the second mark is set for the folio in the specified slot in a
+ * folio queue segment.
+ */
+static inline bool folioq_is_marked2(const struct folio_queue *folioq, unsigned int slot)
+{
+ return test_bit(slot, &folioq->marks2);
+}
+
+/**
+ * folioq_mark2: Set the second mark on a folio in a folio queue segment
+ * @folioq: The segment to modify
+ * @slot: The slot number of the folio to modify
+ *
+ * Set the second mark for the folio in the specified slot in a folio queue
+ * segment.
+ */
+static inline void folioq_mark2(struct folio_queue *folioq, unsigned int slot)
+{
+ set_bit(slot, &folioq->marks2);
+}
+
+/**
+ * folioq_unmark2: Clear the second mark on a folio in a folio queue segment
+ * @folioq: The segment to modify
+ * @slot: The slot number of the folio to modify
+ *
+ * Clear the second mark for the folio in the specified slot in a folio queue
+ * segment.
+ */
+static inline void folioq_unmark2(struct folio_queue *folioq, unsigned int slot)
+{
+ clear_bit(slot, &folioq->marks2);
+}
+
+/**
+ * folioq_append: Add a folio to a folio queue segment
+ * @folioq: The segment to add to
+ * @folio: The folio to add
+ *
+ * Add a folio to the tail of the sequence in a folio queue segment, increasing
+ * the occupancy count and returning the slot number for the folio just added.
+ * The folio size is extracted and stored in the queue and the marks are left
+ * unmodified.
+ *
+ * Note that it's left up to the caller to check that the segment capacity will
+ * not be exceeded and to extend the queue.
+ */
+static inline unsigned int folioq_append(struct folio_queue *folioq, struct folio *folio)
+{
+ unsigned int slot = folioq->vec.nr++;
+
+ folioq->vec.folios[slot] = folio;
+ folioq->orders[slot] = folio_order(folio);
+ return slot;
+}
+
+/**
+ * folioq_append_mark: Add a folio to a folio queue segment
+ * @folioq: The segment to add to
+ * @folio: The folio to add
+ *
+ * Add a folio to the tail of the sequence in a folio queue segment, increasing
+ * the occupancy count and returning the slot number for the folio just added.
+ * The folio size is extracted and stored in the queue, the first mark is set
+ * and and the second and third marks are left unmodified.
+ *
+ * Note that it's left up to the caller to check that the segment capacity will
+ * not be exceeded and to extend the queue.
+ */
+static inline unsigned int folioq_append_mark(struct folio_queue *folioq, struct folio *folio)
+{
+ unsigned int slot = folioq->vec.nr++;
+
+ folioq->vec.folios[slot] = folio;
+ folioq->orders[slot] = folio_order(folio);
+ folioq_mark(folioq, slot);
+ return slot;
+}
+
+/**
+ * folioq_folio: Get a folio from a folio queue segment
+ * @folioq: The segment to access
+ * @slot: The folio slot to access
+ *
+ * Retrieve the folio in the specified slot from a folio queue segment. Note
+ * that no bounds check is made and if the slot hasn't been added into yet, the
+ * pointer will be undefined. If the slot has been cleared, NULL will be
+ * returned.
+ */
+static inline struct folio *folioq_folio(const struct folio_queue *folioq, unsigned int slot)
+{
+ return folioq->vec.folios[slot];
+}
+
+/**
+ * folioq_folio_order: Get the order of a folio from a folio queue segment
+ * @folioq: The segment to access
+ * @slot: The folio slot to access
+ *
+ * Retrieve the order of the folio in the specified slot from a folio queue
+ * segment. Note that no bounds check is made and if the slot hasn't been
+ * added into yet, the order returned will be 0.
+ */
+static inline unsigned int folioq_folio_order(const struct folio_queue *folioq, unsigned int slot)
+{
+ return folioq->orders[slot];
+}
+
+/**
+ * folioq_folio_size: Get the size of a folio from a folio queue segment
+ * @folioq: The segment to access
+ * @slot: The folio slot to access
+ *
+ * Retrieve the size of the folio in the specified slot from a folio queue
+ * segment. Note that no bounds check is made and if the slot hasn't been
+ * added into yet, the size returned will be PAGE_SIZE.
+ */
+static inline size_t folioq_folio_size(const struct folio_queue *folioq, unsigned int slot)
+{
+ return PAGE_SIZE << folioq_folio_order(folioq, slot);
+}
+
+/**
+ * folioq_clear: Clear a folio from a folio queue segment
+ * @folioq: The segment to clear
+ * @slot: The folio slot to clear
+ *
+ * Clear a folio from a sequence in a folio queue segment and clear its marks.
+ * The occupancy count is left unchanged.
+ */
+static inline void folioq_clear(struct folio_queue *folioq, unsigned int slot)
+{
+ folioq->vec.folios[slot] = NULL;
+ folioq_unmark(folioq, slot);
+ folioq_unmark2(folioq, slot);
+}
+
+#endif /* _LINUX_FOLIO_QUEUE_H */
diff --git a/include/linux/font.h b/include/linux/font.h
index d6821769dd1e..fd8625cd76b2 100644
--- a/include/linux/font.h
+++ b/include/linux/font.h
@@ -16,7 +16,8 @@
struct font_desc {
int idx;
const char *name;
- int width, height;
+ unsigned int width, height;
+ unsigned int charcount;
const void *data;
int pref;
};
@@ -32,6 +33,9 @@ struct font_desc {
#define ACORN8x8_IDX 8
#define MINI4x6_IDX 9
#define FONT6x10_IDX 10
+#define TER16x32_IDX 11
+#define FONT6x8_IDX 12
+#define TER10x18_IDX 13
extern const struct font_desc font_vga_8x8,
font_vga_8x16,
@@ -43,7 +47,10 @@ extern const struct font_desc font_vga_8x8,
font_sun_12x22,
font_acorn_8x8,
font_mini_4x6,
- font_6x10;
+ font_6x10,
+ font_ter_16x32,
+ font_6x8,
+ font_ter_10x18;
/* Find a font with a specific name */
@@ -52,9 +59,23 @@ extern const struct font_desc *find_font(const char *name);
/* Get the default font for a specific screen size */
extern const struct font_desc *get_default_font(int xres, int yres,
- u32 font_w, u32 font_h);
+ unsigned long *font_w,
+ unsigned long *font_h);
/* Max. length for the name of a predefined font */
#define MAX_FONT_NAME 32
+/* Extra word getters */
+#define REFCOUNT(fd) (((int *)(fd))[-1])
+#define FNTSIZE(fd) (((int *)(fd))[-2])
+#define FNTCHARCNT(fd) (((int *)(fd))[-3])
+#define FNTSUM(fd) (((int *)(fd))[-4])
+
+#define FONT_EXTRA_WORDS 4
+
+struct font_data {
+ unsigned int extra[FONT_EXTRA_WORDS];
+ const unsigned char data[];
+} __packed;
+
#endif /* _VIDEO_FONT_H */
diff --git a/include/linux/fortify-string.h b/include/linux/fortify-string.h
new file mode 100644
index 000000000000..b3b53f8c1b28
--- /dev/null
+++ b/include/linux/fortify-string.h
@@ -0,0 +1,819 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_FORTIFY_STRING_H_
+#define _LINUX_FORTIFY_STRING_H_
+
+#include <linux/bitfield.h>
+#include <linux/bug.h>
+#include <linux/const.h>
+#include <linux/limits.h>
+
+#define __FORTIFY_INLINE extern __always_inline __gnu_inline __overloadable
+#define __RENAME(x) __asm__(#x)
+
+#define FORTIFY_REASON_DIR(r) FIELD_GET(BIT(0), r)
+#define FORTIFY_REASON_FUNC(r) FIELD_GET(GENMASK(7, 1), r)
+#define FORTIFY_REASON(func, write) (FIELD_PREP(BIT(0), write) | \
+ FIELD_PREP(GENMASK(7, 1), func))
+
+/* Overridden by KUnit tests. */
+#ifndef fortify_panic
+# define fortify_panic(func, write, avail, size, retfail) \
+ __fortify_panic(FORTIFY_REASON(func, write), avail, size)
+#endif
+#ifndef fortify_warn_once
+# define fortify_warn_once(x...) WARN_ONCE(x)
+#endif
+
+#define FORTIFY_READ 0
+#define FORTIFY_WRITE 1
+
+#define EACH_FORTIFY_FUNC(macro) \
+ macro(strncpy), \
+ macro(strnlen), \
+ macro(strlen), \
+ macro(strscpy), \
+ macro(strlcat), \
+ macro(strcat), \
+ macro(strncat), \
+ macro(memset), \
+ macro(memcpy), \
+ macro(memmove), \
+ macro(memscan), \
+ macro(memcmp), \
+ macro(memchr), \
+ macro(memchr_inv), \
+ macro(kmemdup), \
+ macro(strcpy), \
+ macro(UNKNOWN),
+
+#define MAKE_FORTIFY_FUNC(func) FORTIFY_FUNC_##func
+
+enum fortify_func {
+ EACH_FORTIFY_FUNC(MAKE_FORTIFY_FUNC)
+};
+
+void __fortify_report(const u8 reason, const size_t avail, const size_t size);
+void __fortify_panic(const u8 reason, const size_t avail, const size_t size) __cold __noreturn;
+void __read_overflow(void) __compiletime_error("detected read beyond size of object (1st parameter)");
+void __read_overflow2(void) __compiletime_error("detected read beyond size of object (2nd parameter)");
+void __read_overflow2_field(size_t avail, size_t wanted) __compiletime_warning("detected read beyond size of field (2nd parameter); maybe use struct_group()?");
+void __write_overflow(void) __compiletime_error("detected write beyond size of object (1st parameter)");
+void __write_overflow_field(size_t avail, size_t wanted) __compiletime_warning("detected write beyond size of field (1st parameter); maybe use struct_group()?");
+
+#define __compiletime_strlen(p) \
+({ \
+ char *__p = (char *)(p); \
+ size_t __ret = SIZE_MAX; \
+ const size_t __p_size = __member_size(p); \
+ if (__p_size != SIZE_MAX && \
+ __builtin_constant_p(*__p)) { \
+ size_t __p_len = __p_size - 1; \
+ if (__builtin_constant_p(__p[__p_len]) && \
+ __p[__p_len] == '\0') \
+ __ret = __builtin_strlen(__p); \
+ } \
+ __ret; \
+})
+
+#if defined(__SANITIZE_ADDRESS__)
+
+#if !defined(CONFIG_CC_HAS_KASAN_MEMINTRINSIC_PREFIX) && !defined(CONFIG_GENERIC_ENTRY)
+extern void *__underlying_memset(void *p, int c, __kernel_size_t size) __RENAME(memset);
+extern void *__underlying_memmove(void *p, const void *q, __kernel_size_t size) __RENAME(memmove);
+extern void *__underlying_memcpy(void *p, const void *q, __kernel_size_t size) __RENAME(memcpy);
+#elif defined(CONFIG_KASAN_GENERIC)
+extern void *__underlying_memset(void *p, int c, __kernel_size_t size) __RENAME(__asan_memset);
+extern void *__underlying_memmove(void *p, const void *q, __kernel_size_t size) __RENAME(__asan_memmove);
+extern void *__underlying_memcpy(void *p, const void *q, __kernel_size_t size) __RENAME(__asan_memcpy);
+#else /* CONFIG_KASAN_SW_TAGS */
+extern void *__underlying_memset(void *p, int c, __kernel_size_t size) __RENAME(__hwasan_memset);
+extern void *__underlying_memmove(void *p, const void *q, __kernel_size_t size) __RENAME(__hwasan_memmove);
+extern void *__underlying_memcpy(void *p, const void *q, __kernel_size_t size) __RENAME(__hwasan_memcpy);
+#endif
+
+extern void *__underlying_memchr(const void *p, int c, __kernel_size_t size) __RENAME(memchr);
+extern int __underlying_memcmp(const void *p, const void *q, __kernel_size_t size) __RENAME(memcmp);
+extern char *__underlying_strcat(char *p, const char *q) __RENAME(strcat);
+extern char *__underlying_strcpy(char *p, const char *q) __RENAME(strcpy);
+extern __kernel_size_t __underlying_strlen(const char *p) __RENAME(strlen);
+extern char *__underlying_strncat(char *p, const char *q, __kernel_size_t count) __RENAME(strncat);
+extern char *__underlying_strncpy(char *p, const char *q, __kernel_size_t size) __RENAME(strncpy);
+
+#else
+
+#if defined(__SANITIZE_MEMORY__)
+/*
+ * For KMSAN builds all memcpy/memset/memmove calls should be replaced by the
+ * corresponding __msan_XXX functions.
+ */
+#include <linux/kmsan_string.h>
+#define __underlying_memcpy __msan_memcpy
+#define __underlying_memmove __msan_memmove
+#define __underlying_memset __msan_memset
+#else
+#define __underlying_memcpy __builtin_memcpy
+#define __underlying_memmove __builtin_memmove
+#define __underlying_memset __builtin_memset
+#endif
+
+#define __underlying_memchr __builtin_memchr
+#define __underlying_memcmp __builtin_memcmp
+#define __underlying_strcat __builtin_strcat
+#define __underlying_strcpy __builtin_strcpy
+#define __underlying_strlen __builtin_strlen
+#define __underlying_strncat __builtin_strncat
+#define __underlying_strncpy __builtin_strncpy
+
+#endif
+
+/**
+ * unsafe_memcpy - memcpy implementation with no FORTIFY bounds checking
+ *
+ * @dst: Destination memory address to write to
+ * @src: Source memory address to read from
+ * @bytes: How many bytes to write to @dst from @src
+ * @justification: Free-form text or comment describing why the use is needed
+ *
+ * This should be used for corner cases where the compiler cannot do the
+ * right thing, or during transitions between APIs, etc. It should be used
+ * very rarely, and includes a place for justification detailing where bounds
+ * checking has happened, and why existing solutions cannot be employed.
+ */
+#define unsafe_memcpy(dst, src, bytes, justification) \
+ __underlying_memcpy(dst, src, bytes)
+
+/*
+ * Clang's use of __builtin_*object_size() within inlines needs hinting via
+ * __pass_*object_size(). The preference is to only ever use type 1 (member
+ * size, rather than struct size), but there remain some stragglers using
+ * type 0 that will be converted in the future.
+ */
+#if __has_builtin(__builtin_dynamic_object_size)
+#define POS __pass_dynamic_object_size(1)
+#define POS0 __pass_dynamic_object_size(0)
+#else
+#define POS __pass_object_size(1)
+#define POS0 __pass_object_size(0)
+#endif
+
+#define __compiletime_lessthan(bounds, length) ( \
+ __builtin_constant_p((bounds) < (length)) && \
+ (bounds) < (length) \
+)
+
+/**
+ * strncpy - Copy a string to memory with non-guaranteed NUL padding
+ *
+ * @p: pointer to destination of copy
+ * @q: pointer to NUL-terminated source string to copy
+ * @size: bytes to write at @p
+ *
+ * If strlen(@q) >= @size, the copy of @q will stop after @size bytes,
+ * and @p will NOT be NUL-terminated
+ *
+ * If strlen(@q) < @size, following the copy of @q, trailing NUL bytes
+ * will be written to @p until @size total bytes have been written.
+ *
+ * Do not use this function. While FORTIFY_SOURCE tries to avoid
+ * over-reads of @q, it cannot defend against writing unterminated
+ * results to @p. Using strncpy() remains ambiguous and fragile.
+ * Instead, please choose an alternative, so that the expectation
+ * of @p's contents is unambiguous:
+ *
+ * +--------------------+--------------------+------------+
+ * | **p** needs to be: | padded to **size** | not padded |
+ * +====================+====================+============+
+ * | NUL-terminated | strscpy_pad() | strscpy() |
+ * +--------------------+--------------------+------------+
+ * | not NUL-terminated | strtomem_pad() | strtomem() |
+ * +--------------------+--------------------+------------+
+ *
+ * Note strscpy*()'s differing return values for detecting truncation,
+ * and strtomem*()'s expectation that the destination is marked with
+ * __nonstring when it is a character array.
+ *
+ */
+__FORTIFY_INLINE __diagnose_as(__builtin_strncpy, 1, 2, 3)
+char *strncpy(char * const POS p, const char *q, __kernel_size_t size)
+{
+ const size_t p_size = __member_size(p);
+
+ if (__compiletime_lessthan(p_size, size))
+ __write_overflow();
+ if (p_size < size)
+ fortify_panic(FORTIFY_FUNC_strncpy, FORTIFY_WRITE, p_size, size, p);
+ return __underlying_strncpy(p, q, size);
+}
+
+extern __kernel_size_t __real_strnlen(const char *, __kernel_size_t) __RENAME(strnlen);
+/**
+ * strnlen - Return bounded count of characters in a NUL-terminated string
+ *
+ * @p: pointer to NUL-terminated string to count.
+ * @maxlen: maximum number of characters to count.
+ *
+ * Returns number of characters in @p (NOT including the final NUL), or
+ * @maxlen, if no NUL has been found up to there.
+ *
+ */
+__FORTIFY_INLINE __kernel_size_t strnlen(const char * const POS p, __kernel_size_t maxlen)
+{
+ const size_t p_size = __member_size(p);
+ const size_t p_len = __compiletime_strlen(p);
+ size_t ret;
+
+ /* We can take compile-time actions when maxlen is const. */
+ if (__builtin_constant_p(maxlen) && p_len != SIZE_MAX) {
+ /* If p is const, we can use its compile-time-known len. */
+ if (maxlen >= p_size)
+ return p_len;
+ }
+
+ /* Do not check characters beyond the end of p. */
+ ret = __real_strnlen(p, maxlen < p_size ? maxlen : p_size);
+ if (p_size <= ret && maxlen != ret)
+ fortify_panic(FORTIFY_FUNC_strnlen, FORTIFY_READ, p_size, ret + 1, ret);
+ return ret;
+}
+
+/*
+ * Defined after fortified strnlen to reuse it. However, it must still be
+ * possible for strlen() to be used on compile-time strings for use in
+ * static initializers (i.e. as a constant expression).
+ */
+/**
+ * strlen - Return count of characters in a NUL-terminated string
+ *
+ * @p: pointer to NUL-terminated string to count.
+ *
+ * Do not use this function unless the string length is known at
+ * compile-time. When @p is unterminated, this function may crash
+ * or return unexpected counts that could lead to memory content
+ * exposures. Prefer strnlen().
+ *
+ * Returns number of characters in @p (NOT including the final NUL).
+ *
+ */
+#define strlen(p) \
+ __builtin_choose_expr(__is_constexpr(__builtin_strlen(p)), \
+ __builtin_strlen(p), __fortify_strlen(p))
+__FORTIFY_INLINE __diagnose_as(__builtin_strlen, 1)
+__kernel_size_t __fortify_strlen(const char * const POS p)
+{
+ const size_t p_size = __member_size(p);
+ __kernel_size_t ret;
+
+ /* Give up if we don't know how large p is. */
+ if (p_size == SIZE_MAX)
+ return __underlying_strlen(p);
+ ret = strnlen(p, p_size);
+ if (p_size <= ret)
+ fortify_panic(FORTIFY_FUNC_strlen, FORTIFY_READ, p_size, ret + 1, ret);
+ return ret;
+}
+
+/* Defined after fortified strnlen() to reuse it. */
+extern ssize_t __real_strscpy(char *, const char *, size_t) __RENAME(sized_strscpy);
+__FORTIFY_INLINE ssize_t sized_strscpy(char * const POS p, const char * const POS q, size_t size)
+{
+ /* Use string size rather than possible enclosing struct size. */
+ const size_t p_size = __member_size(p);
+ const size_t q_size = __member_size(q);
+ size_t len;
+
+ /* If we cannot get size of p and q default to call strscpy. */
+ if (p_size == SIZE_MAX && q_size == SIZE_MAX)
+ return __real_strscpy(p, q, size);
+
+ /*
+ * If size can be known at compile time and is greater than
+ * p_size, generate a compile time write overflow error.
+ */
+ if (__compiletime_lessthan(p_size, size))
+ __write_overflow();
+
+ /* Short-circuit for compile-time known-safe lengths. */
+ if (__compiletime_lessthan(p_size, SIZE_MAX)) {
+ len = __compiletime_strlen(q);
+
+ if (len < SIZE_MAX && __compiletime_lessthan(len, size)) {
+ __underlying_memcpy(p, q, len + 1);
+ return len;
+ }
+ }
+
+ /*
+ * This call protects from read overflow, because len will default to q
+ * length if it smaller than size.
+ */
+ len = strnlen(q, size);
+ /*
+ * If len equals size, we will copy only size bytes which leads to
+ * -E2BIG being returned.
+ * Otherwise we will copy len + 1 because of the final '\O'.
+ */
+ len = len == size ? size : len + 1;
+
+ /*
+ * Generate a runtime write overflow error if len is greater than
+ * p_size.
+ */
+ if (p_size < len)
+ fortify_panic(FORTIFY_FUNC_strscpy, FORTIFY_WRITE, p_size, len, -E2BIG);
+
+ /*
+ * We can now safely call vanilla strscpy because we are protected from:
+ * 1. Read overflow thanks to call to strnlen().
+ * 2. Write overflow thanks to above ifs.
+ */
+ return __real_strscpy(p, q, len);
+}
+
+/* Defined after fortified strlen() to reuse it. */
+extern size_t __real_strlcat(char *p, const char *q, size_t avail) __RENAME(strlcat);
+/**
+ * strlcat - Append a string to an existing string
+ *
+ * @p: pointer to %NUL-terminated string to append to
+ * @q: pointer to %NUL-terminated string to append from
+ * @avail: Maximum bytes available in @p
+ *
+ * Appends %NUL-terminated string @q after the %NUL-terminated
+ * string at @p, but will not write beyond @avail bytes total,
+ * potentially truncating the copy from @q. @p will stay
+ * %NUL-terminated only if a %NUL already existed within
+ * the @avail bytes of @p. If so, the resulting number of
+ * bytes copied from @q will be at most "@avail - strlen(@p) - 1".
+ *
+ * Do not use this function. While FORTIFY_SOURCE tries to avoid
+ * read and write overflows, this is only possible when the sizes
+ * of @p and @q are known to the compiler. Prefer building the
+ * string with formatting, via scnprintf(), seq_buf, or similar.
+ *
+ * Returns total bytes that _would_ have been contained by @p
+ * regardless of truncation, similar to snprintf(). If return
+ * value is >= @avail, the string has been truncated.
+ *
+ */
+__FORTIFY_INLINE
+size_t strlcat(char * const POS p, const char * const POS q, size_t avail)
+{
+ const size_t p_size = __member_size(p);
+ const size_t q_size = __member_size(q);
+ size_t p_len, copy_len;
+ size_t actual, wanted;
+
+ /* Give up immediately if both buffer sizes are unknown. */
+ if (p_size == SIZE_MAX && q_size == SIZE_MAX)
+ return __real_strlcat(p, q, avail);
+
+ p_len = strnlen(p, avail);
+ copy_len = strlen(q);
+ wanted = actual = p_len + copy_len;
+
+ /* Cannot append any more: report truncation. */
+ if (avail <= p_len)
+ return wanted;
+
+ /* Give up if string is already overflowed. */
+ if (p_size <= p_len)
+ fortify_panic(FORTIFY_FUNC_strlcat, FORTIFY_READ, p_size, p_len + 1, wanted);
+
+ if (actual >= avail) {
+ copy_len = avail - p_len - 1;
+ actual = p_len + copy_len;
+ }
+
+ /* Give up if copy will overflow. */
+ if (p_size <= actual)
+ fortify_panic(FORTIFY_FUNC_strlcat, FORTIFY_WRITE, p_size, actual + 1, wanted);
+ __underlying_memcpy(p + p_len, q, copy_len);
+ p[actual] = '\0';
+
+ return wanted;
+}
+
+/* Defined after fortified strlcat() to reuse it. */
+/**
+ * strcat - Append a string to an existing string
+ *
+ * @p: pointer to NUL-terminated string to append to
+ * @q: pointer to NUL-terminated source string to append from
+ *
+ * Do not use this function. While FORTIFY_SOURCE tries to avoid
+ * read and write overflows, this is only possible when the
+ * destination buffer size is known to the compiler. Prefer
+ * building the string with formatting, via scnprintf() or similar.
+ * At the very least, use strncat().
+ *
+ * Returns @p.
+ *
+ */
+__FORTIFY_INLINE __diagnose_as(__builtin_strcat, 1, 2)
+char *strcat(char * const POS p, const char *q)
+{
+ const size_t p_size = __member_size(p);
+ const size_t wanted = strlcat(p, q, p_size);
+
+ if (p_size <= wanted)
+ fortify_panic(FORTIFY_FUNC_strcat, FORTIFY_WRITE, p_size, wanted + 1, p);
+ return p;
+}
+
+/**
+ * strncat - Append a string to an existing string
+ *
+ * @p: pointer to NUL-terminated string to append to
+ * @q: pointer to source string to append from
+ * @count: Maximum bytes to read from @q
+ *
+ * Appends at most @count bytes from @q (stopping at the first
+ * NUL byte) after the NUL-terminated string at @p. @p will be
+ * NUL-terminated.
+ *
+ * Do not use this function. While FORTIFY_SOURCE tries to avoid
+ * read and write overflows, this is only possible when the sizes
+ * of @p and @q are known to the compiler. Prefer building the
+ * string with formatting, via scnprintf() or similar.
+ *
+ * Returns @p.
+ *
+ */
+/* Defined after fortified strlen() and strnlen() to reuse them. */
+__FORTIFY_INLINE __diagnose_as(__builtin_strncat, 1, 2, 3)
+char *strncat(char * const POS p, const char * const POS q, __kernel_size_t count)
+{
+ const size_t p_size = __member_size(p);
+ const size_t q_size = __member_size(q);
+ size_t p_len, copy_len, total;
+
+ if (p_size == SIZE_MAX && q_size == SIZE_MAX)
+ return __underlying_strncat(p, q, count);
+ p_len = strlen(p);
+ copy_len = strnlen(q, count);
+ total = p_len + copy_len + 1;
+ if (p_size < total)
+ fortify_panic(FORTIFY_FUNC_strncat, FORTIFY_WRITE, p_size, total, p);
+ __underlying_memcpy(p + p_len, q, copy_len);
+ p[p_len + copy_len] = '\0';
+ return p;
+}
+
+__FORTIFY_INLINE bool fortify_memset_chk(__kernel_size_t size,
+ const size_t p_size,
+ const size_t p_size_field)
+{
+ if (__builtin_constant_p(size)) {
+ /*
+ * Length argument is a constant expression, so we
+ * can perform compile-time bounds checking where
+ * buffer sizes are also known at compile time.
+ */
+
+ /* Error when size is larger than enclosing struct. */
+ if (__compiletime_lessthan(p_size_field, p_size) &&
+ __compiletime_lessthan(p_size, size))
+ __write_overflow();
+
+ /* Warn when write size is larger than dest field. */
+ if (__compiletime_lessthan(p_size_field, size))
+ __write_overflow_field(p_size_field, size);
+ }
+ /*
+ * At this point, length argument may not be a constant expression,
+ * so run-time bounds checking can be done where buffer sizes are
+ * known. (This is not an "else" because the above checks may only
+ * be compile-time warnings, and we want to still warn for run-time
+ * overflows.)
+ */
+
+ /*
+ * Always stop accesses beyond the struct that contains the
+ * field, when the buffer's remaining size is known.
+ * (The SIZE_MAX test is to optimize away checks where the buffer
+ * lengths are unknown.)
+ */
+ if (p_size != SIZE_MAX && p_size < size)
+ fortify_panic(FORTIFY_FUNC_memset, FORTIFY_WRITE, p_size, size, true);
+ return false;
+}
+
+#define __fortify_memset_chk(p, c, size, p_size, p_size_field) ({ \
+ size_t __fortify_size = (size_t)(size); \
+ fortify_memset_chk(__fortify_size, p_size, p_size_field), \
+ __underlying_memset(p, c, __fortify_size); \
+})
+
+/*
+ * __struct_size() vs __member_size() must be captured here to avoid
+ * evaluating argument side-effects further into the macro layers.
+ */
+#ifndef CONFIG_KMSAN
+#define memset(p, c, s) __fortify_memset_chk(p, c, s, \
+ __struct_size(p), __member_size(p))
+#endif
+
+/*
+ * To make sure the compiler can enforce protection against buffer overflows,
+ * memcpy(), memmove(), and memset() must not be used beyond individual
+ * struct members. If you need to copy across multiple members, please use
+ * struct_group() to create a named mirror of an anonymous struct union.
+ * (e.g. see struct sk_buff.) Read overflow checking is currently only
+ * done when a write overflow is also present, or when building with W=1.
+ *
+ * Mitigation coverage matrix
+ * Bounds checking at:
+ * +-------+-------+-------+-------+
+ * | Compile time | Run time |
+ * memcpy() argument sizes: | write | read | write | read |
+ * dest source length +-------+-------+-------+-------+
+ * memcpy(known, known, constant) | y | y | n/a | n/a |
+ * memcpy(known, unknown, constant) | y | n | n/a | V |
+ * memcpy(known, known, dynamic) | n | n | B | B |
+ * memcpy(known, unknown, dynamic) | n | n | B | V |
+ * memcpy(unknown, known, constant) | n | y | V | n/a |
+ * memcpy(unknown, unknown, constant) | n | n | V | V |
+ * memcpy(unknown, known, dynamic) | n | n | V | B |
+ * memcpy(unknown, unknown, dynamic) | n | n | V | V |
+ * +-------+-------+-------+-------+
+ *
+ * y = perform deterministic compile-time bounds checking
+ * n = cannot perform deterministic compile-time bounds checking
+ * n/a = no run-time bounds checking needed since compile-time deterministic
+ * B = can perform run-time bounds checking (currently unimplemented)
+ * V = vulnerable to run-time overflow (will need refactoring to solve)
+ *
+ */
+__FORTIFY_INLINE bool fortify_memcpy_chk(__kernel_size_t size,
+ const size_t p_size,
+ const size_t q_size,
+ const size_t p_size_field,
+ const size_t q_size_field,
+ const u8 func)
+{
+ if (__builtin_constant_p(size)) {
+ /*
+ * Length argument is a constant expression, so we
+ * can perform compile-time bounds checking where
+ * buffer sizes are also known at compile time.
+ */
+
+ /* Error when size is larger than enclosing struct. */
+ if (__compiletime_lessthan(p_size_field, p_size) &&
+ __compiletime_lessthan(p_size, size))
+ __write_overflow();
+ if (__compiletime_lessthan(q_size_field, q_size) &&
+ __compiletime_lessthan(q_size, size))
+ __read_overflow2();
+
+ /* Warn when write size argument larger than dest field. */
+ if (__compiletime_lessthan(p_size_field, size))
+ __write_overflow_field(p_size_field, size);
+ /*
+ * Warn for source field over-read when building with W=1
+ * or when an over-write happened, so both can be fixed at
+ * the same time.
+ */
+ if ((IS_ENABLED(KBUILD_EXTRA_WARN1) ||
+ __compiletime_lessthan(p_size_field, size)) &&
+ __compiletime_lessthan(q_size_field, size))
+ __read_overflow2_field(q_size_field, size);
+ }
+ /*
+ * At this point, length argument may not be a constant expression,
+ * so run-time bounds checking can be done where buffer sizes are
+ * known. (This is not an "else" because the above checks may only
+ * be compile-time warnings, and we want to still warn for run-time
+ * overflows.)
+ */
+
+ /*
+ * Always stop accesses beyond the struct that contains the
+ * field, when the buffer's remaining size is known.
+ * (The SIZE_MAX test is to optimize away checks where the buffer
+ * lengths are unknown.)
+ */
+ if (p_size != SIZE_MAX && p_size < size)
+ fortify_panic(func, FORTIFY_WRITE, p_size, size, true);
+ else if (q_size != SIZE_MAX && q_size < size)
+ fortify_panic(func, FORTIFY_READ, q_size, size, true);
+
+ /*
+ * Warn when writing beyond destination field size.
+ *
+ * Note the implementation of __builtin_*object_size() behaves
+ * like sizeof() when not directly referencing a flexible
+ * array member, which means there will be many bounds checks
+ * that will appear at run-time, without a way for them to be
+ * detected at compile-time (as can be done when the destination
+ * is specifically the flexible array member).
+ * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=101832
+ */
+ if (p_size_field != SIZE_MAX &&
+ p_size != p_size_field && p_size_field < size)
+ return true;
+
+ return false;
+}
+
+/*
+ * To work around what seems to be an optimizer bug, the macro arguments
+ * need to have const copies or the values end up changed by the time they
+ * reach fortify_warn_once(). See commit 6f7630b1b5bc ("fortify: Capture
+ * __bos() results in const temp vars") for more details.
+ */
+#define __fortify_memcpy_chk(p, q, size, p_size, q_size, \
+ p_size_field, q_size_field, op) ({ \
+ const size_t __fortify_size = (size_t)(size); \
+ const size_t __p_size = (p_size); \
+ const size_t __q_size = (q_size); \
+ const size_t __p_size_field = (p_size_field); \
+ const size_t __q_size_field = (q_size_field); \
+ /* Keep a mutable version of the size for the final copy. */ \
+ size_t __copy_size = __fortify_size; \
+ fortify_warn_once(fortify_memcpy_chk(__fortify_size, __p_size, \
+ __q_size, __p_size_field, \
+ __q_size_field, FORTIFY_FUNC_ ##op), \
+ #op ": detected field-spanning write (size %zu) of single %s (size %zu)\n", \
+ __fortify_size, \
+ "field \"" #p "\" at " FILE_LINE, \
+ __p_size_field); \
+ /* Hide only the run-time size from value range tracking to */ \
+ /* silence compile-time false positive bounds warnings. */ \
+ if (!__builtin_constant_p(__copy_size)) \
+ OPTIMIZER_HIDE_VAR(__copy_size); \
+ __underlying_##op(p, q, __copy_size); \
+})
+
+/*
+ * Notes about compile-time buffer size detection:
+ *
+ * With these types...
+ *
+ * struct middle {
+ * u16 a;
+ * u8 middle_buf[16];
+ * int b;
+ * };
+ * struct end {
+ * u16 a;
+ * u8 end_buf[16];
+ * };
+ * struct flex {
+ * int a;
+ * u8 flex_buf[];
+ * };
+ *
+ * void func(TYPE *ptr) { ... }
+ *
+ * Cases where destination size cannot be currently detected:
+ * - the size of ptr's object (seemingly by design, gcc & clang fail):
+ * __builtin_object_size(ptr, 1) == SIZE_MAX
+ * - the size of flexible arrays in ptr's obj (by design, dynamic size):
+ * __builtin_object_size(ptr->flex_buf, 1) == SIZE_MAX
+ * - the size of ANY array at the end of ptr's obj (gcc and clang bug):
+ * __builtin_object_size(ptr->end_buf, 1) == SIZE_MAX
+ * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=101836
+ *
+ * Cases where destination size is currently detected:
+ * - the size of non-array members within ptr's object:
+ * __builtin_object_size(ptr->a, 1) == 2
+ * - the size of non-flexible-array in the middle of ptr's obj:
+ * __builtin_object_size(ptr->middle_buf, 1) == 16
+ *
+ */
+
+/*
+ * __struct_size() vs __member_size() must be captured here to avoid
+ * evaluating argument side-effects further into the macro layers.
+ */
+#define memcpy(p, q, s) __fortify_memcpy_chk(p, q, s, \
+ __struct_size(p), __struct_size(q), \
+ __member_size(p), __member_size(q), \
+ memcpy)
+#define memmove(p, q, s) __fortify_memcpy_chk(p, q, s, \
+ __struct_size(p), __struct_size(q), \
+ __member_size(p), __member_size(q), \
+ memmove)
+
+extern void *__real_memscan(void *, int, __kernel_size_t) __RENAME(memscan);
+__FORTIFY_INLINE void *memscan(void * const POS0 p, int c, __kernel_size_t size)
+{
+ const size_t p_size = __struct_size(p);
+
+ if (__compiletime_lessthan(p_size, size))
+ __read_overflow();
+ if (p_size < size)
+ fortify_panic(FORTIFY_FUNC_memscan, FORTIFY_READ, p_size, size, NULL);
+ return __real_memscan(p, c, size);
+}
+
+__FORTIFY_INLINE __diagnose_as(__builtin_memcmp, 1, 2, 3)
+int memcmp(const void * const POS0 p, const void * const POS0 q, __kernel_size_t size)
+{
+ const size_t p_size = __struct_size(p);
+ const size_t q_size = __struct_size(q);
+
+ if (__builtin_constant_p(size)) {
+ if (__compiletime_lessthan(p_size, size))
+ __read_overflow();
+ if (__compiletime_lessthan(q_size, size))
+ __read_overflow2();
+ }
+ if (p_size < size)
+ fortify_panic(FORTIFY_FUNC_memcmp, FORTIFY_READ, p_size, size, INT_MIN);
+ else if (q_size < size)
+ fortify_panic(FORTIFY_FUNC_memcmp, FORTIFY_READ, q_size, size, INT_MIN);
+ return __underlying_memcmp(p, q, size);
+}
+
+__FORTIFY_INLINE __diagnose_as(__builtin_memchr, 1, 2, 3)
+void *memchr(const void * const POS0 p, int c, __kernel_size_t size)
+{
+ const size_t p_size = __struct_size(p);
+
+ if (__compiletime_lessthan(p_size, size))
+ __read_overflow();
+ if (p_size < size)
+ fortify_panic(FORTIFY_FUNC_memchr, FORTIFY_READ, p_size, size, NULL);
+ return __underlying_memchr(p, c, size);
+}
+
+void *__real_memchr_inv(const void *s, int c, size_t n) __RENAME(memchr_inv);
+__FORTIFY_INLINE void *memchr_inv(const void * const POS0 p, int c, size_t size)
+{
+ const size_t p_size = __struct_size(p);
+
+ if (__compiletime_lessthan(p_size, size))
+ __read_overflow();
+ if (p_size < size)
+ fortify_panic(FORTIFY_FUNC_memchr_inv, FORTIFY_READ, p_size, size, NULL);
+ return __real_memchr_inv(p, c, size);
+}
+
+extern void *__real_kmemdup(const void *src, size_t len, gfp_t gfp) __RENAME(kmemdup_noprof)
+ __realloc_size(2);
+__FORTIFY_INLINE void *kmemdup_noprof(const void * const POS0 p, size_t size, gfp_t gfp)
+{
+ const size_t p_size = __struct_size(p);
+
+ if (__compiletime_lessthan(p_size, size))
+ __read_overflow();
+ if (p_size < size)
+ fortify_panic(FORTIFY_FUNC_kmemdup, FORTIFY_READ, p_size, size,
+ __real_kmemdup(p, 0, gfp));
+ return __real_kmemdup(p, size, gfp);
+}
+#define kmemdup(...) alloc_hooks(kmemdup_noprof(__VA_ARGS__))
+
+/**
+ * strcpy - Copy a string into another string buffer
+ *
+ * @p: pointer to destination of copy
+ * @q: pointer to NUL-terminated source string to copy
+ *
+ * Do not use this function. While FORTIFY_SOURCE tries to avoid
+ * overflows, this is only possible when the sizes of @q and @p are
+ * known to the compiler. Prefer strscpy(), though note its different
+ * return values for detecting truncation.
+ *
+ * Returns @p.
+ *
+ */
+/* Defined after fortified strlen to reuse it. */
+__FORTIFY_INLINE __diagnose_as(__builtin_strcpy, 1, 2)
+char *strcpy(char * const POS p, const char * const POS q)
+{
+ const size_t p_size = __member_size(p);
+ const size_t q_size = __member_size(q);
+ size_t size;
+
+ /* If neither buffer size is known, immediately give up. */
+ if (__builtin_constant_p(p_size) &&
+ __builtin_constant_p(q_size) &&
+ p_size == SIZE_MAX && q_size == SIZE_MAX)
+ return __underlying_strcpy(p, q);
+ size = strlen(q) + 1;
+ /* Compile-time check for const size overflow. */
+ if (__compiletime_lessthan(p_size, size))
+ __write_overflow();
+ /* Run-time check for dynamic size overflow. */
+ if (p_size < size)
+ fortify_panic(FORTIFY_FUNC_strcpy, FORTIFY_WRITE, p_size, size, p);
+ __underlying_memcpy(p, q, size);
+ return p;
+}
+
+/* Don't use these outside the FORITFY_SOURCE implementation */
+#undef __underlying_memchr
+#undef __underlying_memcmp
+#undef __underlying_strcat
+#undef __underlying_strcpy
+#undef __underlying_strlen
+#undef __underlying_strncat
+#undef __underlying_strncpy
+
+#undef POS
+#undef POS0
+
+#endif /* _LINUX_FORTIFY_STRING_H_ */
diff --git a/include/linux/fpga/altera-pr-ip-core.h b/include/linux/fpga/altera-pr-ip-core.h
index 3810a9033f49..a6b4c07858cc 100644
--- a/include/linux/fpga/altera-pr-ip-core.h
+++ b/include/linux/fpga/altera-pr-ip-core.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Driver for Altera Partial Reconfiguration IP Core
*
@@ -5,18 +6,6 @@
*
* Based on socfpga-a10.c Copyright (C) 2015-2016 Altera Corporation
* by Alan Tull <atull@opensource.altera.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef _ALT_PR_IP_CORE_H
@@ -24,6 +13,5 @@
#include <linux/io.h>
int alt_pr_register(struct device *dev, void __iomem *reg_base);
-int alt_pr_unregister(struct device *dev);
#endif /* _ALT_PR_IP_CORE_H */
diff --git a/include/linux/fpga/fpga-bridge.h b/include/linux/fpga/fpga-bridge.h
index dba6e3c697c7..94c4edd047e5 100644
--- a/include/linux/fpga/fpga-bridge.h
+++ b/include/linux/fpga/fpga-bridge.h
@@ -1,21 +1,42 @@
-#include <linux/device.h>
-#include <linux/fpga/fpga-mgr.h>
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_FPGA_BRIDGE_H
#define _LINUX_FPGA_BRIDGE_H
+#include <linux/device.h>
+#include <linux/fpga/fpga-mgr.h>
+
struct fpga_bridge;
/**
* struct fpga_bridge_ops - ops for low level FPGA bridge drivers
* @enable_show: returns the FPGA bridge's status
- * @enable_set: set a FPGA bridge as enabled or disabled
+ * @enable_set: set an FPGA bridge as enabled or disabled
* @fpga_bridge_remove: set FPGA into a specific state during driver remove
+ * @groups: optional attribute groups.
*/
struct fpga_bridge_ops {
int (*enable_show)(struct fpga_bridge *bridge);
int (*enable_set)(struct fpga_bridge *bridge, bool enable);
void (*fpga_bridge_remove)(struct fpga_bridge *bridge);
+ const struct attribute_group **groups;
+};
+
+/**
+ * struct fpga_bridge_info - collection of parameters an FPGA Bridge
+ * @name: fpga bridge name
+ * @br_ops: pointer to structure of fpga bridge ops
+ * @priv: fpga bridge private data
+ *
+ * fpga_bridge_info contains parameters for the register function. These
+ * are separated into an info structure because they some are optional
+ * others could be added to in the future. The info structure facilitates
+ * maintaining a stable API.
+ */
+struct fpga_bridge_info {
+ const char *name;
+ const struct fpga_bridge_ops *br_ops;
+ void *priv;
};
/**
@@ -24,6 +45,7 @@ struct fpga_bridge_ops {
* @dev: FPGA bridge device
* @mutex: enforces exclusive reference to bridge
* @br_ops: pointer to struct of FPGA bridge ops
+ * @br_ops_owner: module containing the br_ops
* @info: fpga image specific information
* @node: FPGA bridge list node
* @priv: low level driver private date
@@ -33,6 +55,7 @@ struct fpga_bridge {
struct device dev;
struct mutex mutex; /* for exclusive reference to bridge */
const struct fpga_bridge_ops *br_ops;
+ struct module *br_ops_owner;
struct fpga_image_info *info;
struct list_head node;
void *priv;
@@ -42,6 +65,8 @@ struct fpga_bridge {
struct fpga_bridge *of_fpga_bridge_get(struct device_node *node,
struct fpga_image_info *info);
+struct fpga_bridge *fpga_bridge_get(struct device *dev,
+ struct fpga_image_info *info);
void fpga_bridge_put(struct fpga_bridge *bridge);
int fpga_bridge_enable(struct fpga_bridge *bridge);
int fpga_bridge_disable(struct fpga_bridge *bridge);
@@ -49,12 +74,19 @@ int fpga_bridge_disable(struct fpga_bridge *bridge);
int fpga_bridges_enable(struct list_head *bridge_list);
int fpga_bridges_disable(struct list_head *bridge_list);
void fpga_bridges_put(struct list_head *bridge_list);
-int fpga_bridge_get_to_list(struct device_node *np,
+int fpga_bridge_get_to_list(struct device *dev,
struct fpga_image_info *info,
struct list_head *bridge_list);
+int of_fpga_bridge_get_to_list(struct device_node *np,
+ struct fpga_image_info *info,
+ struct list_head *bridge_list);
-int fpga_bridge_register(struct device *dev, const char *name,
- const struct fpga_bridge_ops *br_ops, void *priv);
-void fpga_bridge_unregister(struct device *dev);
+#define fpga_bridge_register(parent, name, br_ops, priv) \
+ __fpga_bridge_register(parent, name, br_ops, priv, THIS_MODULE)
+struct fpga_bridge *
+__fpga_bridge_register(struct device *parent, const char *name,
+ const struct fpga_bridge_ops *br_ops, void *priv,
+ struct module *owner);
+void fpga_bridge_unregister(struct fpga_bridge *br);
#endif /* _LINUX_FPGA_BRIDGE_H */
diff --git a/include/linux/fpga/fpga-mgr.h b/include/linux/fpga/fpga-mgr.h
index bfa14bc023fb..0d4fe068f3d8 100644
--- a/include/linux/fpga/fpga-mgr.h
+++ b/include/linux/fpga/fpga-mgr.h
@@ -1,26 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* FPGA Framework
*
- * Copyright (C) 2013-2015 Altera Corporation
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program. If not, see <http://www.gnu.org/licenses/>.
+ * Copyright (C) 2013-2016 Altera Corporation
+ * Copyright (C) 2017 Intel Corporation
*/
-#include <linux/mutex.h>
-#include <linux/platform_device.h>
-
#ifndef _LINUX_FPGA_MGR_H
#define _LINUX_FPGA_MGR_H
+#include <linux/mutex.h>
+#include <linux/platform_device.h>
+
struct fpga_manager;
struct sg_table;
@@ -32,6 +22,8 @@ struct sg_table;
* @FPGA_MGR_STATE_RESET: FPGA in reset state
* @FPGA_MGR_STATE_FIRMWARE_REQ: firmware request in progress
* @FPGA_MGR_STATE_FIRMWARE_REQ_ERR: firmware request failed
+ * @FPGA_MGR_STATE_PARSE_HEADER: parse FPGA image header
+ * @FPGA_MGR_STATE_PARSE_HEADER_ERR: Error during PARSE_HEADER stage
* @FPGA_MGR_STATE_WRITE_INIT: preparing FPGA for programming
* @FPGA_MGR_STATE_WRITE_INIT_ERR: Error during WRITE_INIT stage
* @FPGA_MGR_STATE_WRITE: writing image to FPGA
@@ -51,7 +43,9 @@ enum fpga_mgr_states {
FPGA_MGR_STATE_FIRMWARE_REQ,
FPGA_MGR_STATE_FIRMWARE_REQ_ERR,
- /* write sequence: init, write, complete */
+ /* write sequence: parse header, init, write, complete */
+ FPGA_MGR_STATE_PARSE_HEADER,
+ FPGA_MGR_STATE_PARSE_HEADER_ERR,
FPGA_MGR_STATE_WRITE_INIT,
FPGA_MGR_STATE_WRITE_INIT_ERR,
FPGA_MGR_STATE_WRITE,
@@ -63,12 +57,20 @@ enum fpga_mgr_states {
FPGA_MGR_STATE_OPERATING,
};
-/*
- * FPGA Manager flags
- * FPGA_MGR_PARTIAL_RECONFIG: do partial reconfiguration if supported
- * FPGA_MGR_EXTERNAL_CONFIG: FPGA has been configured prior to Linux booting
- * FPGA_MGR_BITSTREAM_LSB_FIRST: SPI bitstream bit order is LSB first
- * FPGA_MGR_COMPRESSED_BITSTREAM: FPGA bitstream is compressed
+/**
+ * DOC: FPGA Manager flags
+ *
+ * Flags used in the &fpga_image_info->flags field
+ *
+ * %FPGA_MGR_PARTIAL_RECONFIG: do partial reconfiguration if supported
+ *
+ * %FPGA_MGR_EXTERNAL_CONFIG: FPGA has been configured prior to Linux booting
+ *
+ * %FPGA_MGR_ENCRYPTED_BITSTREAM: indicates bitstream is encrypted
+ *
+ * %FPGA_MGR_BITSTREAM_LSB_FIRST: SPI bitstream bit order is LSB first
+ *
+ * %FPGA_MGR_COMPRESSED_BITSTREAM: FPGA bitstream is compressed
*/
#define FPGA_MGR_PARTIAL_RECONFIG BIT(0)
#define FPGA_MGR_EXTERNAL_CONFIG BIT(1)
@@ -77,29 +79,89 @@ enum fpga_mgr_states {
#define FPGA_MGR_COMPRESSED_BITSTREAM BIT(4)
/**
- * struct fpga_image_info - information specific to a FPGA image
+ * struct fpga_image_info - information specific to an FPGA image
* @flags: boolean flags as defined above
* @enable_timeout_us: maximum time to enable traffic through bridge (uSec)
* @disable_timeout_us: maximum time to disable traffic through bridge (uSec)
* @config_complete_timeout_us: maximum time for FPGA to switch to operating
* status in the write_complete op.
+ * @firmware_name: name of FPGA image firmware file
+ * @sgt: scatter/gather table containing FPGA image
+ * @buf: contiguous buffer containing FPGA image
+ * @count: size of buf
+ * @header_size: size of image header.
+ * @data_size: size of image data to be sent to the device. If not specified,
+ * whole image will be used. Header may be skipped in either case.
+ * @region_id: id of target region
+ * @dev: device that owns this
+ * @overlay: Device Tree overlay
*/
struct fpga_image_info {
u32 flags;
u32 enable_timeout_us;
u32 disable_timeout_us;
u32 config_complete_timeout_us;
+ char *firmware_name;
+ struct sg_table *sgt;
+ const char *buf;
+ size_t count;
+ size_t header_size;
+ size_t data_size;
+ int region_id;
+ struct device *dev;
+#ifdef CONFIG_OF
+ struct device_node *overlay;
+#endif
+};
+
+/**
+ * struct fpga_compat_id - id for compatibility check
+ *
+ * @id_h: high 64bit of the compat_id
+ * @id_l: low 64bit of the compat_id
+ */
+struct fpga_compat_id {
+ u64 id_h;
+ u64 id_l;
+};
+
+/**
+ * struct fpga_manager_info - collection of parameters for an FPGA Manager
+ * @name: fpga manager name
+ * @compat_id: FPGA manager id for compatibility check.
+ * @mops: pointer to structure of fpga manager ops
+ * @priv: fpga manager private data
+ *
+ * fpga_manager_info contains parameters for the register_full function.
+ * These are separated into an info structure because they some are optional
+ * others could be added to in the future. The info structure facilitates
+ * maintaining a stable API.
+ */
+struct fpga_manager_info {
+ const char *name;
+ struct fpga_compat_id *compat_id;
+ const struct fpga_manager_ops *mops;
+ void *priv;
};
/**
* struct fpga_manager_ops - ops for low level fpga manager drivers
- * @initial_header_size: Maximum number of bytes that should be passed into write_init
+ * @initial_header_size: minimum number of bytes that should be passed into
+ * parse_header and write_init.
+ * @skip_header: bool flag to tell fpga-mgr core whether it should skip
+ * info->header_size part at the beginning of the image when invoking
+ * write callback.
* @state: returns an enum value of the FPGA's state
- * @write_init: prepare the FPGA to receive confuration data
+ * @status: returns status of the FPGA, including reconfiguration error code
+ * @parse_header: parse FPGA image header to set info->header_size and
+ * info->data_size. In case the input buffer is not large enough, set
+ * required size to info->header_size and return -EAGAIN.
+ * @write_init: prepare the FPGA to receive configuration data
* @write: write count bytes of configuration data to the FPGA
* @write_sg: write the scatter list of configuration data to the FPGA
* @write_complete: set FPGA to operating state after writing is done
* @fpga_remove: optional: Set FPGA into a specific state during driver remove
+ * @groups: optional attribute groups.
*
* fpga_manager_ops are the low level functions implemented by a specific
* fpga manager driver. The optional ones are tested for NULL before being
@@ -107,7 +169,12 @@ struct fpga_image_info {
*/
struct fpga_manager_ops {
size_t initial_header_size;
+ bool skip_header;
enum fpga_mgr_states (*state)(struct fpga_manager *mgr);
+ u64 (*status)(struct fpga_manager *mgr);
+ int (*parse_header)(struct fpga_manager *mgr,
+ struct fpga_image_info *info,
+ const char *buf, size_t count);
int (*write_init)(struct fpga_manager *mgr,
struct fpga_image_info *info,
const char *buf, size_t count);
@@ -116,15 +183,25 @@ struct fpga_manager_ops {
int (*write_complete)(struct fpga_manager *mgr,
struct fpga_image_info *info);
void (*fpga_remove)(struct fpga_manager *mgr);
+ const struct attribute_group **groups;
};
+/* FPGA manager status: Partial/Full Reconfiguration errors */
+#define FPGA_MGR_STATUS_OPERATION_ERR BIT(0)
+#define FPGA_MGR_STATUS_CRC_ERR BIT(1)
+#define FPGA_MGR_STATUS_INCOMPATIBLE_IMAGE_ERR BIT(2)
+#define FPGA_MGR_STATUS_IP_PROTOCOL_ERR BIT(3)
+#define FPGA_MGR_STATUS_FIFO_OVERFLOW_ERR BIT(4)
+
/**
* struct fpga_manager - fpga manager structure
* @name: name of low level fpga manager
* @dev: fpga manager device
* @ref_mutex: only allows one reference to fpga manager
* @state: state of fpga manager
+ * @compat_id: FPGA manager id for compatibility check.
* @mops: pointer to struct of fpga manager ops
+ * @mops_owner: module containing the mops
* @priv: low level driver private date
*/
struct fpga_manager {
@@ -132,20 +209,22 @@ struct fpga_manager {
struct device dev;
struct mutex ref_mutex;
enum fpga_mgr_states state;
+ struct fpga_compat_id *compat_id;
const struct fpga_manager_ops *mops;
+ struct module *mops_owner;
void *priv;
};
#define to_fpga_manager(d) container_of(d, struct fpga_manager, dev)
-int fpga_mgr_buf_load(struct fpga_manager *mgr, struct fpga_image_info *info,
- const char *buf, size_t count);
-int fpga_mgr_buf_load_sg(struct fpga_manager *mgr, struct fpga_image_info *info,
- struct sg_table *sgt);
+struct fpga_image_info *fpga_image_info_alloc(struct device *dev);
-int fpga_mgr_firmware_load(struct fpga_manager *mgr,
- struct fpga_image_info *info,
- const char *image_name);
+void fpga_image_info_free(struct fpga_image_info *info);
+
+int fpga_mgr_load(struct fpga_manager *mgr, struct fpga_image_info *info);
+
+int fpga_mgr_lock(struct fpga_manager *mgr);
+void fpga_mgr_unlock(struct fpga_manager *mgr);
struct fpga_manager *of_fpga_mgr_get(struct device_node *node);
@@ -153,9 +232,30 @@ struct fpga_manager *fpga_mgr_get(struct device *dev);
void fpga_mgr_put(struct fpga_manager *mgr);
-int fpga_mgr_register(struct device *dev, const char *name,
- const struct fpga_manager_ops *mops, void *priv);
+#define fpga_mgr_register_full(parent, info) \
+ __fpga_mgr_register_full(parent, info, THIS_MODULE)
+struct fpga_manager *
+__fpga_mgr_register_full(struct device *parent, const struct fpga_manager_info *info,
+ struct module *owner);
+
+#define fpga_mgr_register(parent, name, mops, priv) \
+ __fpga_mgr_register(parent, name, mops, priv, THIS_MODULE)
+struct fpga_manager *
+__fpga_mgr_register(struct device *parent, const char *name,
+ const struct fpga_manager_ops *mops, void *priv, struct module *owner);
+
+void fpga_mgr_unregister(struct fpga_manager *mgr);
-void fpga_mgr_unregister(struct device *dev);
+#define devm_fpga_mgr_register_full(parent, info) \
+ __devm_fpga_mgr_register_full(parent, info, THIS_MODULE)
+struct fpga_manager *
+__devm_fpga_mgr_register_full(struct device *parent, const struct fpga_manager_info *info,
+ struct module *owner);
+#define devm_fpga_mgr_register(parent, name, mops, priv) \
+ __devm_fpga_mgr_register(parent, name, mops, priv, THIS_MODULE)
+struct fpga_manager *
+__devm_fpga_mgr_register(struct device *parent, const char *name,
+ const struct fpga_manager_ops *mops, void *priv,
+ struct module *owner);
#endif /*_LINUX_FPGA_MGR_H */
diff --git a/include/linux/fpga/fpga-region.h b/include/linux/fpga/fpga-region.h
new file mode 100644
index 000000000000..5fbc05fe70a6
--- /dev/null
+++ b/include/linux/fpga/fpga-region.h
@@ -0,0 +1,76 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef _FPGA_REGION_H
+#define _FPGA_REGION_H
+
+#include <linux/device.h>
+#include <linux/fpga/fpga-mgr.h>
+#include <linux/fpga/fpga-bridge.h>
+
+struct fpga_region;
+
+/**
+ * struct fpga_region_info - collection of parameters an FPGA Region
+ * @mgr: fpga region manager
+ * @compat_id: FPGA region id for compatibility check.
+ * @priv: fpga region private data
+ * @get_bridges: optional function to get bridges to a list
+ *
+ * fpga_region_info contains parameters for the register_full function.
+ * These are separated into an info structure because they some are optional
+ * others could be added to in the future. The info structure facilitates
+ * maintaining a stable API.
+ */
+struct fpga_region_info {
+ struct fpga_manager *mgr;
+ struct fpga_compat_id *compat_id;
+ void *priv;
+ int (*get_bridges)(struct fpga_region *region);
+};
+
+/**
+ * struct fpga_region - FPGA Region structure
+ * @dev: FPGA Region device
+ * @mutex: enforces exclusive reference to region
+ * @bridge_list: list of FPGA bridges specified in region
+ * @mgr: FPGA manager
+ * @info: FPGA image info
+ * @compat_id: FPGA region id for compatibility check.
+ * @ops_owner: module containing the get_bridges function
+ * @priv: private data
+ * @get_bridges: optional function to get bridges to a list
+ */
+struct fpga_region {
+ struct device dev;
+ struct mutex mutex; /* for exclusive reference to region */
+ struct list_head bridge_list;
+ struct fpga_manager *mgr;
+ struct fpga_image_info *info;
+ struct fpga_compat_id *compat_id;
+ struct module *ops_owner;
+ void *priv;
+ int (*get_bridges)(struct fpga_region *region);
+};
+
+#define to_fpga_region(d) container_of(d, struct fpga_region, dev)
+
+struct fpga_region *
+fpga_region_class_find(struct device *start, const void *data,
+ int (*match)(struct device *, const void *));
+
+int fpga_region_program_fpga(struct fpga_region *region);
+
+#define fpga_region_register_full(parent, info) \
+ __fpga_region_register_full(parent, info, THIS_MODULE)
+struct fpga_region *
+__fpga_region_register_full(struct device *parent, const struct fpga_region_info *info,
+ struct module *owner);
+
+#define fpga_region_register(parent, mgr, get_bridges) \
+ __fpga_region_register(parent, mgr, get_bridges, THIS_MODULE)
+struct fpga_region *
+__fpga_region_register(struct device *parent, struct fpga_manager *mgr,
+ int (*get_bridges)(struct fpga_region *), struct module *owner);
+void fpga_region_unregister(struct fpga_region *region);
+
+#endif /* _FPGA_REGION_H */
diff --git a/include/linux/fprobe.h b/include/linux/fprobe.h
new file mode 100644
index 000000000000..0a3bcd1718f3
--- /dev/null
+++ b/include/linux/fprobe.h
@@ -0,0 +1,156 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Simple ftrace probe wrapper */
+#ifndef _LINUX_FPROBE_H
+#define _LINUX_FPROBE_H
+
+#include <linux/compiler.h>
+#include <linux/ftrace.h>
+#include <linux/rcupdate.h>
+#include <linux/refcount.h>
+#include <linux/rhashtable.h>
+#include <linux/slab.h>
+
+struct fprobe;
+typedef int (*fprobe_entry_cb)(struct fprobe *fp, unsigned long entry_ip,
+ unsigned long ret_ip, struct ftrace_regs *regs,
+ void *entry_data);
+
+typedef void (*fprobe_exit_cb)(struct fprobe *fp, unsigned long entry_ip,
+ unsigned long ret_ip, struct ftrace_regs *regs,
+ void *entry_data);
+
+/**
+ * struct fprobe_hlist_node - address based hash list node for fprobe.
+ *
+ * @hlist: The hlist node for address search hash table.
+ * @addr: One of the probing address of @fp.
+ * @fp: The fprobe which owns this.
+ */
+struct fprobe_hlist_node {
+ struct rhlist_head hlist;
+ unsigned long addr;
+ struct fprobe *fp;
+};
+
+/**
+ * struct fprobe_hlist - hash list nodes for fprobe.
+ *
+ * @hlist: The hlist node for existence checking hash table.
+ * @rcu: rcu_head for RCU deferred release.
+ * @fp: The fprobe which owns this fprobe_hlist.
+ * @size: The size of @array.
+ * @array: The fprobe_hlist_node for each address to probe.
+ */
+struct fprobe_hlist {
+ struct hlist_node hlist;
+ struct rcu_head rcu;
+ struct fprobe *fp;
+ int size;
+ struct fprobe_hlist_node array[] __counted_by(size);
+};
+
+/**
+ * struct fprobe - ftrace based probe.
+ *
+ * @nmissed: The counter for missing events.
+ * @flags: The status flag.
+ * @entry_data_size: The private data storage size.
+ * @entry_handler: The callback function for function entry.
+ * @exit_handler: The callback function for function exit.
+ * @hlist_array: The fprobe_hlist for fprobe search from IP hash table.
+ */
+struct fprobe {
+ unsigned long nmissed;
+ unsigned int flags;
+ size_t entry_data_size;
+
+ fprobe_entry_cb entry_handler;
+ fprobe_exit_cb exit_handler;
+
+ struct fprobe_hlist *hlist_array;
+};
+
+/* This fprobe is soft-disabled. */
+#define FPROBE_FL_DISABLED 1
+
+/*
+ * This fprobe handler will be shared with kprobes.
+ * This flag must be set before registering.
+ */
+#define FPROBE_FL_KPROBE_SHARED 2
+
+static inline bool fprobe_disabled(struct fprobe *fp)
+{
+ return (fp) ? fp->flags & FPROBE_FL_DISABLED : false;
+}
+
+static inline bool fprobe_shared_with_kprobes(struct fprobe *fp)
+{
+ return (fp) ? fp->flags & FPROBE_FL_KPROBE_SHARED : false;
+}
+
+#ifdef CONFIG_FPROBE
+int register_fprobe(struct fprobe *fp, const char *filter, const char *notfilter);
+int register_fprobe_ips(struct fprobe *fp, unsigned long *addrs, int num);
+int register_fprobe_syms(struct fprobe *fp, const char **syms, int num);
+int unregister_fprobe(struct fprobe *fp);
+bool fprobe_is_registered(struct fprobe *fp);
+int fprobe_count_ips_from_filter(const char *filter, const char *notfilter);
+#else
+static inline int register_fprobe(struct fprobe *fp, const char *filter, const char *notfilter)
+{
+ return -EOPNOTSUPP;
+}
+static inline int register_fprobe_ips(struct fprobe *fp, unsigned long *addrs, int num)
+{
+ return -EOPNOTSUPP;
+}
+static inline int register_fprobe_syms(struct fprobe *fp, const char **syms, int num)
+{
+ return -EOPNOTSUPP;
+}
+static inline int unregister_fprobe(struct fprobe *fp)
+{
+ return -EOPNOTSUPP;
+}
+static inline bool fprobe_is_registered(struct fprobe *fp)
+{
+ return false;
+}
+static inline int fprobe_count_ips_from_filter(const char *filter, const char *notfilter)
+{
+ return -EOPNOTSUPP;
+}
+#endif
+
+/**
+ * disable_fprobe() - Disable fprobe
+ * @fp: The fprobe to be disabled.
+ *
+ * This will soft-disable @fp. Note that this doesn't remove the ftrace
+ * hooks from the function entry.
+ */
+static inline void disable_fprobe(struct fprobe *fp)
+{
+ if (fp)
+ fp->flags |= FPROBE_FL_DISABLED;
+}
+
+/**
+ * enable_fprobe() - Enable fprobe
+ * @fp: The fprobe to be enabled.
+ *
+ * This will soft-enable @fp.
+ */
+static inline void enable_fprobe(struct fprobe *fp)
+{
+ if (fp)
+ fp->flags &= ~FPROBE_FL_DISABLED;
+}
+
+/* The entry data size is 4 bits (=16) * sizeof(long) in maximum */
+#define FPROBE_DATA_SIZE_BITS 4
+#define MAX_FPROBE_DATA_SIZE_WORD ((1L << FPROBE_DATA_SIZE_BITS) - 1)
+#define MAX_FPROBE_DATA_SIZE (MAX_FPROBE_DATA_SIZE_WORD * sizeof(long))
+
+#endif
diff --git a/include/linux/fpu.h b/include/linux/fpu.h
new file mode 100644
index 000000000000..2fb63e22913b
--- /dev/null
+++ b/include/linux/fpu.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef _LINUX_FPU_H
+#define _LINUX_FPU_H
+
+#ifdef _LINUX_FPU_COMPILATION_UNIT
+#error FP code must be compiled separately. See Documentation/core-api/floating-point.rst.
+#endif
+
+#include <asm/fpu.h>
+
+#endif
diff --git a/include/linux/frame.h b/include/linux/frame.h
deleted file mode 100644
index d772c61c31da..000000000000
--- a/include/linux/frame.h
+++ /dev/null
@@ -1,23 +0,0 @@
-#ifndef _LINUX_FRAME_H
-#define _LINUX_FRAME_H
-
-#ifdef CONFIG_STACK_VALIDATION
-/*
- * This macro marks the given function's stack frame as "non-standard", which
- * tells objtool to ignore the function when doing stack metadata validation.
- * It should only be used in special cases where you're 100% sure it won't
- * affect the reliability of frame pointers and kernel stack traces.
- *
- * For more information, see tools/objtool/Documentation/stack-validation.txt.
- */
-#define STACK_FRAME_NON_STANDARD(func) \
- static void __used __section(.discard.func_stack_frame_non_standard) \
- *__func_stack_frame_non_standard_##func = func
-
-#else /* !CONFIG_STACK_VALIDATION */
-
-#define STACK_FRAME_NON_STANDARD(func)
-
-#endif /* CONFIG_STACK_VALIDATION */
-
-#endif /* _LINUX_FRAME_H */
diff --git a/include/linux/framer/framer-provider.h b/include/linux/framer/framer-provider.h
new file mode 100644
index 000000000000..9724d4b44b9c
--- /dev/null
+++ b/include/linux/framer/framer-provider.h
@@ -0,0 +1,193 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Generic framer profider header file
+ *
+ * Copyright 2023 CS GROUP France
+ *
+ * Author: Herve Codina <herve.codina@bootlin.com>
+ */
+
+#ifndef __DRIVERS_PROVIDER_FRAMER_H
+#define __DRIVERS_PROVIDER_FRAMER_H
+
+#include <linux/export.h>
+#include <linux/framer/framer.h>
+#include <linux/types.h>
+
+#define FRAMER_FLAG_POLL_STATUS BIT(0)
+
+/**
+ * struct framer_ops - set of function pointers for performing framer operations
+ * @init: operation to be performed for initializing the framer
+ * @exit: operation to be performed while exiting
+ * @power_on: powering on the framer
+ * @power_off: powering off the framer
+ * @flags: OR-ed flags (FRAMER_FLAG_*) to ask for core functionality
+ * - @FRAMER_FLAG_POLL_STATUS:
+ * Ask the core to perform a polling to get the framer status and
+ * notify consumers on change.
+ * The framer should call @framer_notify_status_change() when it
+ * detects a status change. This is usually done using interrupts.
+ * If the framer cannot detect this change, it can ask the core for
+ * a status polling. The core will call @get_status() periodically
+ * and, on change detected, it will notify the consumer.
+ * the @get_status()
+ * @owner: the module owner containing the ops
+ */
+struct framer_ops {
+ int (*init)(struct framer *framer);
+ void (*exit)(struct framer *framer);
+ int (*power_on)(struct framer *framer);
+ int (*power_off)(struct framer *framer);
+
+ /**
+ * @get_status:
+ *
+ * Optional.
+ *
+ * Used to get the framer status. framer_init() must have
+ * been called on the framer.
+ *
+ * Returns: 0 if successful, an negative error code otherwise
+ */
+ int (*get_status)(struct framer *framer, struct framer_status *status);
+
+ /**
+ * @set_config:
+ *
+ * Optional.
+ *
+ * Used to set the framer configuration. framer_init() must have
+ * been called on the framer.
+ *
+ * Returns: 0 if successful, an negative error code otherwise
+ */
+ int (*set_config)(struct framer *framer, const struct framer_config *config);
+
+ /**
+ * @get_config:
+ *
+ * Optional.
+ *
+ * Used to get the framer configuration. framer_init() must have
+ * been called on the framer.
+ *
+ * Returns: 0 if successful, an negative error code otherwise
+ */
+ int (*get_config)(struct framer *framer, struct framer_config *config);
+
+ u32 flags;
+ struct module *owner;
+};
+
+/**
+ * struct framer_provider - represents the framer provider
+ * @dev: framer provider device
+ * @owner: the module owner having of_xlate
+ * @list: to maintain a linked list of framer providers
+ * @of_xlate: function pointer to obtain framer instance from framer pointer
+ */
+struct framer_provider {
+ struct device *dev;
+ struct module *owner;
+ struct list_head list;
+ struct framer * (*of_xlate)(struct device *dev,
+ const struct of_phandle_args *args);
+};
+
+static inline void framer_set_drvdata(struct framer *framer, void *data)
+{
+ dev_set_drvdata(&framer->dev, data);
+}
+
+static inline void *framer_get_drvdata(struct framer *framer)
+{
+ return dev_get_drvdata(&framer->dev);
+}
+
+#if IS_ENABLED(CONFIG_GENERIC_FRAMER)
+
+/* Create and destroy a framer */
+struct framer *framer_create(struct device *dev, struct device_node *node,
+ const struct framer_ops *ops);
+void framer_destroy(struct framer *framer);
+
+/* devm version */
+struct framer *devm_framer_create(struct device *dev, struct device_node *node,
+ const struct framer_ops *ops);
+
+struct framer *framer_provider_simple_of_xlate(struct device *dev,
+ const struct of_phandle_args *args);
+
+struct framer_provider *
+__framer_provider_of_register(struct device *dev, struct module *owner,
+ struct framer *(*of_xlate)(struct device *dev,
+ const struct of_phandle_args *args));
+
+void framer_provider_of_unregister(struct framer_provider *framer_provider);
+
+struct framer_provider *
+__devm_framer_provider_of_register(struct device *dev, struct module *owner,
+ struct framer *(*of_xlate)(struct device *dev,
+ const struct of_phandle_args *args));
+
+void framer_notify_status_change(struct framer *framer);
+
+#else /* IS_ENABLED(CONFIG_GENERIC_FRAMER) */
+
+static inline struct framer *framer_create(struct device *dev, struct device_node *node,
+ const struct framer_ops *ops)
+{
+ return ERR_PTR(-ENOSYS);
+}
+
+static inline void framer_destroy(struct framer *framer)
+{
+}
+
+/* devm version */
+static inline struct framer *devm_framer_create(struct device *dev, struct device_node *node,
+ const struct framer_ops *ops)
+{
+ return ERR_PTR(-ENOSYS);
+}
+
+static inline struct framer *framer_provider_simple_of_xlate(struct device *dev,
+ const struct of_phandle_args *args)
+{
+ return ERR_PTR(-ENOSYS);
+}
+
+static inline struct framer_provider *
+__framer_provider_of_register(struct device *dev, struct module *owner,
+ struct framer *(*of_xlate)(struct device *dev,
+ const struct of_phandle_args *args))
+{
+ return ERR_PTR(-ENOSYS);
+}
+
+void framer_provider_of_unregister(struct framer_provider *framer_provider)
+{
+}
+
+static inline struct framer_provider *
+__devm_framer_provider_of_register(struct device *dev, struct module *owner,
+ struct framer *(*of_xlate)(struct device *dev,
+ const struct of_phandle_args *args))
+{
+ return ERR_PTR(-ENOSYS);
+}
+
+void framer_notify_status_change(struct framer *framer)
+{
+}
+
+#endif /* IS_ENABLED(CONFIG_GENERIC_FRAMER) */
+
+#define framer_provider_of_register(dev, xlate) \
+ __framer_provider_of_register((dev), THIS_MODULE, (xlate))
+
+#define devm_framer_provider_of_register(dev, xlate) \
+ __devm_framer_provider_of_register((dev), THIS_MODULE, (xlate))
+
+#endif /* __DRIVERS_PROVIDER_FRAMER_H */
diff --git a/include/linux/framer/framer.h b/include/linux/framer/framer.h
new file mode 100644
index 000000000000..2b85fe9e7f9a
--- /dev/null
+++ b/include/linux/framer/framer.h
@@ -0,0 +1,205 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Generic framer header file
+ *
+ * Copyright 2023 CS GROUP France
+ *
+ * Author: Herve Codina <herve.codina@bootlin.com>
+ */
+
+#ifndef __DRIVERS_FRAMER_H
+#define __DRIVERS_FRAMER_H
+
+#include <linux/err.h>
+#include <linux/mutex.h>
+#include <linux/notifier.h>
+#include <linux/of.h>
+#include <linux/device.h>
+#include <linux/workqueue.h>
+
+/**
+ * enum framer_iface - Framer interface
+ * @FRAMER_IFACE_E1: E1 interface
+ * @FRAMER_IFACE_T1: T1 interface
+ */
+enum framer_iface {
+ FRAMER_IFACE_E1,
+ FRAMER_IFACE_T1,
+};
+
+/**
+ * enum framer_clock_type - Framer clock type
+ * @FRAMER_CLOCK_EXT: External clock
+ * @FRAMER_CLOCK_INT: Internal clock
+ */
+enum framer_clock_type {
+ FRAMER_CLOCK_EXT,
+ FRAMER_CLOCK_INT,
+};
+
+/**
+ * struct framer_config - Framer configuration
+ * @iface: Framer line interface
+ * @clock_type: Framer clock type
+ * @line_clock_rate: Framer line clock rate
+ */
+struct framer_config {
+ enum framer_iface iface;
+ enum framer_clock_type clock_type;
+ unsigned long line_clock_rate;
+};
+
+/**
+ * struct framer_status - Framer status
+ * @link_is_on: Framer link state. true, the link is on, false, the link is off.
+ */
+struct framer_status {
+ bool link_is_on;
+};
+
+/**
+ * enum framer_event - Event available for notification
+ * @FRAMER_EVENT_STATUS: Event notified on framer_status changes
+ */
+enum framer_event {
+ FRAMER_EVENT_STATUS,
+};
+
+/**
+ * struct framer - represents the framer device
+ * @dev: framer device
+ * @id: id of the framer device
+ * @ops: function pointers for performing framer operations
+ * @mutex: mutex to protect framer_ops
+ * @init_count: used to protect when the framer is used by multiple consumers
+ * @power_count: used to protect when the framer is used by multiple consumers
+ * @pwr: power regulator associated with the framer
+ * @notify_status_work: work structure used for status notifications
+ * @notifier_list: notifier list used for notifications
+ * @polling_work: delayed work structure used for the polling task
+ * @prev_status: previous read status used by the polling task to detect changes
+ */
+struct framer {
+ struct device dev;
+ int id;
+ const struct framer_ops *ops;
+ struct mutex mutex; /* Protect framer */
+ int init_count;
+ int power_count;
+ struct regulator *pwr;
+ struct work_struct notify_status_work;
+ struct blocking_notifier_head notifier_list;
+ struct delayed_work polling_work;
+ struct framer_status prev_status;
+};
+
+#if IS_ENABLED(CONFIG_GENERIC_FRAMER)
+int framer_pm_runtime_get(struct framer *framer);
+int framer_pm_runtime_get_sync(struct framer *framer);
+int framer_pm_runtime_put(struct framer *framer);
+int framer_pm_runtime_put_sync(struct framer *framer);
+int framer_init(struct framer *framer);
+int framer_exit(struct framer *framer);
+int framer_power_on(struct framer *framer);
+int framer_power_off(struct framer *framer);
+int framer_get_status(struct framer *framer, struct framer_status *status);
+int framer_get_config(struct framer *framer, struct framer_config *config);
+int framer_set_config(struct framer *framer, const struct framer_config *config);
+int framer_notifier_register(struct framer *framer, struct notifier_block *nb);
+int framer_notifier_unregister(struct framer *framer, struct notifier_block *nb);
+
+struct framer *framer_get(struct device *dev, const char *con_id);
+void framer_put(struct device *dev, struct framer *framer);
+
+struct framer *devm_framer_get(struct device *dev, const char *con_id);
+struct framer *devm_framer_optional_get(struct device *dev, const char *con_id);
+#else
+static inline int framer_pm_runtime_get(struct framer *framer)
+{
+ return -ENOSYS;
+}
+
+static inline int framer_pm_runtime_get_sync(struct framer *framer)
+{
+ return -ENOSYS;
+}
+
+static inline int framer_pm_runtime_put(struct framer *framer)
+{
+ return -ENOSYS;
+}
+
+static inline int framer_pm_runtime_put_sync(struct framer *framer)
+{
+ return -ENOSYS;
+}
+
+static inline int framer_init(struct framer *framer)
+{
+ return -ENOSYS;
+}
+
+static inline int framer_exit(struct framer *framer)
+{
+ return -ENOSYS;
+}
+
+static inline int framer_power_on(struct framer *framer)
+{
+ return -ENOSYS;
+}
+
+static inline int framer_power_off(struct framer *framer)
+{
+ return -ENOSYS;
+}
+
+static inline int framer_get_status(struct framer *framer, struct framer_status *status)
+{
+ return -ENOSYS;
+}
+
+static inline int framer_get_config(struct framer *framer, struct framer_config *config)
+{
+ return -ENOSYS;
+}
+
+static inline int framer_set_config(struct framer *framer, const struct framer_config *config)
+{
+ return -ENOSYS;
+}
+
+static inline int framer_notifier_register(struct framer *framer,
+ struct notifier_block *nb)
+{
+ return -ENOSYS;
+}
+
+static inline int framer_notifier_unregister(struct framer *framer,
+ struct notifier_block *nb)
+{
+ return -ENOSYS;
+}
+
+static inline struct framer *framer_get(struct device *dev, const char *con_id)
+{
+ return ERR_PTR(-ENOSYS);
+}
+
+static inline void framer_put(struct device *dev, struct framer *framer)
+{
+}
+
+static inline struct framer *devm_framer_get(struct device *dev, const char *con_id)
+{
+ return ERR_PTR(-ENOSYS);
+}
+
+static inline struct framer *devm_framer_optional_get(struct device *dev, const char *con_id)
+{
+ return NULL;
+}
+
+#endif
+
+#endif /* __DRIVERS_FRAMER_H */
diff --git a/include/linux/framer/pef2256.h b/include/linux/framer/pef2256.h
new file mode 100644
index 000000000000..71d80af58c40
--- /dev/null
+++ b/include/linux/framer/pef2256.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * PEF2256 consumer API
+ *
+ * Copyright 2023 CS GROUP France
+ *
+ * Author: Herve Codina <herve.codina@bootlin.com>
+ */
+#ifndef __PEF2256_H__
+#define __PEF2256_H__
+
+#include <linux/types.h>
+
+struct pef2256;
+struct regmap;
+
+/* Retrieve the PEF2256 regmap */
+struct regmap *pef2256_get_regmap(struct pef2256 *pef2256);
+
+/* PEF2256 hardware versions */
+enum pef2256_version {
+ PEF2256_VERSION_UNKNOWN,
+ PEF2256_VERSION_1_2,
+ PEF2256_VERSION_2_1,
+ PEF2256_VERSION_2_2,
+};
+
+/* Get the PEF2256 hardware version */
+enum pef2256_version pef2256_get_version(struct pef2256 *pef2256);
+
+#endif /* __PEF2256_H__ */
diff --git a/include/linux/freezer.h b/include/linux/freezer.h
index dd03e837ebb7..0a8c6c4d1a82 100644
--- a/include/linux/freezer.h
+++ b/include/linux/freezer.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/* Freezer declarations */
#ifndef FREEZER_H_INCLUDED
@@ -7,9 +8,11 @@
#include <linux/sched.h>
#include <linux/wait.h>
#include <linux/atomic.h>
+#include <linux/jump_label.h>
#ifdef CONFIG_FREEZER
-extern atomic_t system_freezing_cnt; /* nr of freezing conds in effect */
+DECLARE_STATIC_KEY_FALSE(freezer_active);
+
extern bool pm_freezing; /* PM freezing in effect */
extern bool pm_nosig_freezing; /* PM nosig freezing in effect */
@@ -19,23 +22,25 @@ extern bool pm_nosig_freezing; /* PM nosig freezing in effect */
extern unsigned int freeze_timeout_msecs;
/*
- * Check if a process has been frozen
+ * Check if a process has been frozen for PM or cgroup1 freezer. Note that
+ * cgroup2 freezer uses the job control mechanism and does not interact with
+ * the PM freezer.
*/
-static inline bool frozen(struct task_struct *p)
-{
- return p->flags & PF_FROZEN;
-}
+extern bool frozen(struct task_struct *p);
extern bool freezing_slow_path(struct task_struct *p);
/*
- * Check if there is a request to freeze a process
+ * Check if there is a request to freeze a task from PM or cgroup1 freezer.
+ * Note that cgroup2 freezer uses the job control mechanism and does not
+ * interact with the PM freezer.
*/
static inline bool freezing(struct task_struct *p)
{
- if (likely(!atomic_read(&system_freezing_cnt)))
- return false;
- return freezing_slow_path(p);
+ if (static_branch_unlikely(&freezer_active))
+ return freezing_slow_path(p);
+
+ return false;
}
/* Takes and releases task alloc lock using task_lock() */
@@ -46,216 +51,30 @@ extern int freeze_processes(void);
extern int freeze_kernel_threads(void);
extern void thaw_processes(void);
extern void thaw_kernel_threads(void);
+extern void thaw_process(struct task_struct *p);
-/*
- * DO NOT ADD ANY NEW CALLERS OF THIS FUNCTION
- * If try_to_freeze causes a lockdep warning it means the caller may deadlock
- */
-static inline bool try_to_freeze_unsafe(void)
+static inline bool try_to_freeze(void)
{
might_sleep();
if (likely(!freezing(current)))
return false;
- return __refrigerator(false);
-}
-
-static inline bool try_to_freeze(void)
-{
if (!(current->flags & PF_NOFREEZE))
debug_check_no_locks_held();
- return try_to_freeze_unsafe();
+ return __refrigerator(false);
}
extern bool freeze_task(struct task_struct *p);
extern bool set_freezable(void);
#ifdef CONFIG_CGROUP_FREEZER
-extern bool cgroup_freezing(struct task_struct *task);
+extern bool cgroup1_freezing(struct task_struct *task);
#else /* !CONFIG_CGROUP_FREEZER */
-static inline bool cgroup_freezing(struct task_struct *task)
+static inline bool cgroup1_freezing(struct task_struct *task)
{
return false;
}
#endif /* !CONFIG_CGROUP_FREEZER */
-/*
- * The PF_FREEZER_SKIP flag should be set by a vfork parent right before it
- * calls wait_for_completion(&vfork) and reset right after it returns from this
- * function. Next, the parent should call try_to_freeze() to freeze itself
- * appropriately in case the child has exited before the freezing of tasks is
- * complete. However, we don't want kernel threads to be frozen in unexpected
- * places, so we allow them to block freeze_processes() instead or to set
- * PF_NOFREEZE if needed. Fortunately, in the ____call_usermodehelper() case the
- * parent won't really block freeze_processes(), since ____call_usermodehelper()
- * (the child) does a little before exec/exit and it can't be frozen before
- * waking up the parent.
- */
-
-
-/**
- * freezer_do_not_count - tell freezer to ignore %current
- *
- * Tell freezers to ignore the current task when determining whether the
- * target frozen state is reached. IOW, the current task will be
- * considered frozen enough by freezers.
- *
- * The caller shouldn't do anything which isn't allowed for a frozen task
- * until freezer_cont() is called. Usually, freezer[_do_not]_count() pair
- * wrap a scheduling operation and nothing much else.
- */
-static inline void freezer_do_not_count(void)
-{
- current->flags |= PF_FREEZER_SKIP;
-}
-
-/**
- * freezer_count - tell freezer to stop ignoring %current
- *
- * Undo freezer_do_not_count(). It tells freezers that %current should be
- * considered again and tries to freeze if freezing condition is already in
- * effect.
- */
-static inline void freezer_count(void)
-{
- current->flags &= ~PF_FREEZER_SKIP;
- /*
- * If freezing is in progress, the following paired with smp_mb()
- * in freezer_should_skip() ensures that either we see %true
- * freezing() or freezer_should_skip() sees !PF_FREEZER_SKIP.
- */
- smp_mb();
- try_to_freeze();
-}
-
-/* DO NOT ADD ANY NEW CALLERS OF THIS FUNCTION */
-static inline void freezer_count_unsafe(void)
-{
- current->flags &= ~PF_FREEZER_SKIP;
- smp_mb();
- try_to_freeze_unsafe();
-}
-
-/**
- * freezer_should_skip - whether to skip a task when determining frozen
- * state is reached
- * @p: task in quesion
- *
- * This function is used by freezers after establishing %true freezing() to
- * test whether a task should be skipped when determining the target frozen
- * state is reached. IOW, if this function returns %true, @p is considered
- * frozen enough.
- */
-static inline bool freezer_should_skip(struct task_struct *p)
-{
- /*
- * The following smp_mb() paired with the one in freezer_count()
- * ensures that either freezer_count() sees %true freezing() or we
- * see cleared %PF_FREEZER_SKIP and return %false. This makes it
- * impossible for a task to slip frozen state testing after
- * clearing %PF_FREEZER_SKIP.
- */
- smp_mb();
- return p->flags & PF_FREEZER_SKIP;
-}
-
-/*
- * These functions are intended to be used whenever you want allow a sleeping
- * task to be frozen. Note that neither return any clear indication of
- * whether a freeze event happened while in this function.
- */
-
-/* Like schedule(), but should not block the freezer. */
-static inline void freezable_schedule(void)
-{
- freezer_do_not_count();
- schedule();
- freezer_count();
-}
-
-/* DO NOT ADD ANY NEW CALLERS OF THIS FUNCTION */
-static inline void freezable_schedule_unsafe(void)
-{
- freezer_do_not_count();
- schedule();
- freezer_count_unsafe();
-}
-
-/*
- * Like freezable_schedule_timeout(), but should not block the freezer. Do not
- * call this with locks held.
- */
-static inline long freezable_schedule_timeout(long timeout)
-{
- long __retval;
- freezer_do_not_count();
- __retval = schedule_timeout(timeout);
- freezer_count();
- return __retval;
-}
-
-/*
- * Like schedule_timeout_interruptible(), but should not block the freezer. Do not
- * call this with locks held.
- */
-static inline long freezable_schedule_timeout_interruptible(long timeout)
-{
- long __retval;
- freezer_do_not_count();
- __retval = schedule_timeout_interruptible(timeout);
- freezer_count();
- return __retval;
-}
-
-/* Like schedule_timeout_killable(), but should not block the freezer. */
-static inline long freezable_schedule_timeout_killable(long timeout)
-{
- long __retval;
- freezer_do_not_count();
- __retval = schedule_timeout_killable(timeout);
- freezer_count();
- return __retval;
-}
-
-/* DO NOT ADD ANY NEW CALLERS OF THIS FUNCTION */
-static inline long freezable_schedule_timeout_killable_unsafe(long timeout)
-{
- long __retval;
- freezer_do_not_count();
- __retval = schedule_timeout_killable(timeout);
- freezer_count_unsafe();
- return __retval;
-}
-
-/*
- * Like schedule_hrtimeout_range(), but should not block the freezer. Do not
- * call this with locks held.
- */
-static inline int freezable_schedule_hrtimeout_range(ktime_t *expires,
- u64 delta, const enum hrtimer_mode mode)
-{
- int __retval;
- freezer_do_not_count();
- __retval = schedule_hrtimeout_range(expires, delta, mode);
- freezer_count();
- return __retval;
-}
-
-/*
- * Freezer-friendly wrappers around wait_event_interruptible(),
- * wait_event_killable() and wait_event_interruptible_timeout(), originally
- * defined in <linux/wait.h>
- */
-
-/* DO NOT ADD ANY NEW CALLERS OF THIS FUNCTION */
-#define wait_event_freezekillable_unsafe(wq, condition) \
-({ \
- int __retval; \
- freezer_do_not_count(); \
- __retval = wait_event_killable(wq, (condition)); \
- freezer_count_unsafe(); \
- __retval; \
-})
-
#else /* !CONFIG_FREEZER */
static inline bool frozen(struct task_struct *p) { return false; }
static inline bool freezing(struct task_struct *p) { return false; }
@@ -266,36 +85,12 @@ static inline int freeze_processes(void) { return -ENOSYS; }
static inline int freeze_kernel_threads(void) { return -ENOSYS; }
static inline void thaw_processes(void) {}
static inline void thaw_kernel_threads(void) {}
+static inline void thaw_process(struct task_struct *p) {}
-static inline bool try_to_freeze_nowarn(void) { return false; }
static inline bool try_to_freeze(void) { return false; }
-static inline void freezer_do_not_count(void) {}
-static inline void freezer_count(void) {}
-static inline int freezer_should_skip(struct task_struct *p) { return 0; }
static inline void set_freezable(void) {}
-#define freezable_schedule() schedule()
-
-#define freezable_schedule_unsafe() schedule()
-
-#define freezable_schedule_timeout(timeout) schedule_timeout(timeout)
-
-#define freezable_schedule_timeout_interruptible(timeout) \
- schedule_timeout_interruptible(timeout)
-
-#define freezable_schedule_timeout_killable(timeout) \
- schedule_timeout_killable(timeout)
-
-#define freezable_schedule_timeout_killable_unsafe(timeout) \
- schedule_timeout_killable(timeout)
-
-#define freezable_schedule_hrtimeout_range(expires, delta, mode) \
- schedule_hrtimeout_range(expires, delta, mode)
-
-#define wait_event_freezekillable_unsafe(wq, condition) \
- wait_event_killable(wq, condition)
-
#endif /* !CONFIG_FREEZER */
#endif /* FREEZER_H_INCLUDED */
diff --git a/include/linux/frontswap.h b/include/linux/frontswap.h
deleted file mode 100644
index 1d18af034554..000000000000
--- a/include/linux/frontswap.h
+++ /dev/null
@@ -1,114 +0,0 @@
-#ifndef _LINUX_FRONTSWAP_H
-#define _LINUX_FRONTSWAP_H
-
-#include <linux/swap.h>
-#include <linux/mm.h>
-#include <linux/bitops.h>
-#include <linux/jump_label.h>
-
-struct frontswap_ops {
- void (*init)(unsigned); /* this swap type was just swapon'ed */
- int (*store)(unsigned, pgoff_t, struct page *); /* store a page */
- int (*load)(unsigned, pgoff_t, struct page *); /* load a page */
- void (*invalidate_page)(unsigned, pgoff_t); /* page no longer needed */
- void (*invalidate_area)(unsigned); /* swap type just swapoff'ed */
- struct frontswap_ops *next; /* private pointer to next ops */
-};
-
-extern void frontswap_register_ops(struct frontswap_ops *ops);
-extern void frontswap_shrink(unsigned long);
-extern unsigned long frontswap_curr_pages(void);
-extern void frontswap_writethrough(bool);
-#define FRONTSWAP_HAS_EXCLUSIVE_GETS
-extern void frontswap_tmem_exclusive_gets(bool);
-
-extern bool __frontswap_test(struct swap_info_struct *, pgoff_t);
-extern void __frontswap_init(unsigned type, unsigned long *map);
-extern int __frontswap_store(struct page *page);
-extern int __frontswap_load(struct page *page);
-extern void __frontswap_invalidate_page(unsigned, pgoff_t);
-extern void __frontswap_invalidate_area(unsigned);
-
-#ifdef CONFIG_FRONTSWAP
-extern struct static_key_false frontswap_enabled_key;
-
-static inline bool frontswap_enabled(void)
-{
- return static_branch_unlikely(&frontswap_enabled_key);
-}
-
-static inline bool frontswap_test(struct swap_info_struct *sis, pgoff_t offset)
-{
- return __frontswap_test(sis, offset);
-}
-
-static inline void frontswap_map_set(struct swap_info_struct *p,
- unsigned long *map)
-{
- p->frontswap_map = map;
-}
-
-static inline unsigned long *frontswap_map_get(struct swap_info_struct *p)
-{
- return p->frontswap_map;
-}
-#else
-/* all inline routines become no-ops and all externs are ignored */
-
-static inline bool frontswap_enabled(void)
-{
- return false;
-}
-
-static inline bool frontswap_test(struct swap_info_struct *sis, pgoff_t offset)
-{
- return false;
-}
-
-static inline void frontswap_map_set(struct swap_info_struct *p,
- unsigned long *map)
-{
-}
-
-static inline unsigned long *frontswap_map_get(struct swap_info_struct *p)
-{
- return NULL;
-}
-#endif
-
-static inline int frontswap_store(struct page *page)
-{
- if (frontswap_enabled())
- return __frontswap_store(page);
-
- return -1;
-}
-
-static inline int frontswap_load(struct page *page)
-{
- if (frontswap_enabled())
- return __frontswap_load(page);
-
- return -1;
-}
-
-static inline void frontswap_invalidate_page(unsigned type, pgoff_t offset)
-{
- if (frontswap_enabled())
- __frontswap_invalidate_page(type, offset);
-}
-
-static inline void frontswap_invalidate_area(unsigned type)
-{
- if (frontswap_enabled())
- __frontswap_invalidate_area(type);
-}
-
-static inline void frontswap_init(unsigned type, unsigned long *map)
-{
-#ifdef CONFIG_FRONTSWAP
- __frontswap_init(type, map);
-#endif
-}
-
-#endif /* _LINUX_FRONTSWAP_H */
diff --git a/include/linux/fs.h b/include/linux/fs.h
index baea880c5c03..04ceeca12a0d 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -1,6 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_FS_H
#define _LINUX_FS_H
+#include <linux/fs/super.h>
+#include <linux/vfsdebug.h>
#include <linux/linkage.h>
#include <linux/wait_bit.h>
#include <linux/kdev_t.h>
@@ -9,9 +12,9 @@
#include <linux/stat.h>
#include <linux/cache.h>
#include <linux/list.h>
-#include <linux/list_lru.h>
#include <linux/llist.h>
#include <linux/radix-tree.h>
+#include <linux/xarray.h>
#include <linux/rbtree.h>
#include <linux/init.h>
#include <linux/pid.h>
@@ -22,7 +25,6 @@
#include <linux/capability.h>
#include <linux/semaphore.h>
#include <linux/fcntl.h>
-#include <linux/fiemap.h>
#include <linux/rculist_bl.h>
#include <linux/atomic.h>
#include <linux/shrinker.h>
@@ -34,14 +36,25 @@
#include <linux/delayed_call.h>
#include <linux/uuid.h>
#include <linux/errseq.h>
+#include <linux/ioprio.h>
+#include <linux/build_bug.h>
+#include <linux/stddef.h>
+#include <linux/mount.h>
+#include <linux/cred.h>
+#include <linux/mnt_idmapping.h>
+#include <linux/slab.h>
+#include <linux/maple_tree.h>
+#include <linux/rw_hint.h>
+#include <linux/file_ref.h>
+#include <linux/unicode.h>
#include <asm/byteorder.h>
#include <uapi/linux/fs.h>
-struct backing_dev_info;
struct bdi_writeback;
struct bio;
-struct export_operations;
+struct io_comp_batch;
+struct fiemap_extent_info;
struct hd_geometry;
struct iovec;
struct kiocb;
@@ -54,23 +67,21 @@ struct vfsmount;
struct cred;
struct swap_info_struct;
struct seq_file;
-struct workqueue_struct;
struct iov_iter;
-struct fscrypt_info;
-struct fscrypt_operations;
+struct fsnotify_mark_connector;
+struct fs_context;
+struct fs_parameter_spec;
+struct file_kattr;
+struct iomap_ops;
+struct delegated_inode;
extern void __init inode_init(void);
extern void __init inode_init_early(void);
extern void __init files_init(void);
extern void __init files_maxfiles_init(void);
-extern struct files_stat_struct files_stat;
extern unsigned long get_max_files(void);
extern unsigned int sysctl_nr_open;
-extern struct inodes_stat_t inodes_stat;
-extern int leases_enable, lease_break_time;
-extern int sysctl_protected_symlinks;
-extern int sysctl_protected_hardlinks;
typedef __kernel_rwf_t rwf_t;
@@ -92,32 +103,32 @@ typedef int (dio_iodone_t)(struct kiocb *iocb, loff_t offset,
/*
* flags in file.f_mode. Note that FMODE_READ and FMODE_WRITE must correspond
- * to O_WRONLY and O_RDWR via the strange trick in __dentry_open()
+ * to O_WRONLY and O_RDWR via the strange trick in do_dentry_open()
*/
/* file is open for reading */
-#define FMODE_READ ((__force fmode_t)0x1)
+#define FMODE_READ ((__force fmode_t)(1 << 0))
/* file is open for writing */
-#define FMODE_WRITE ((__force fmode_t)0x2)
+#define FMODE_WRITE ((__force fmode_t)(1 << 1))
/* file is seekable */
-#define FMODE_LSEEK ((__force fmode_t)0x4)
+#define FMODE_LSEEK ((__force fmode_t)(1 << 2))
/* file can be accessed using pread */
-#define FMODE_PREAD ((__force fmode_t)0x8)
+#define FMODE_PREAD ((__force fmode_t)(1 << 3))
/* file can be accessed using pwrite */
-#define FMODE_PWRITE ((__force fmode_t)0x10)
+#define FMODE_PWRITE ((__force fmode_t)(1 << 4))
/* File is opened for execution with sys_execve / sys_uselib */
-#define FMODE_EXEC ((__force fmode_t)0x20)
-/* File is opened with O_NDELAY (only set for block devices) */
-#define FMODE_NDELAY ((__force fmode_t)0x40)
-/* File is opened with O_EXCL (only set for block devices) */
-#define FMODE_EXCL ((__force fmode_t)0x80)
-/* File is opened using open(.., 3, ..) and is writeable only for ioctls
- (specialy hack for floppy.c) */
-#define FMODE_WRITE_IOCTL ((__force fmode_t)0x100)
+#define FMODE_EXEC ((__force fmode_t)(1 << 5))
+/* File writes are restricted (block device specific) */
+#define FMODE_WRITE_RESTRICTED ((__force fmode_t)(1 << 6))
+/* File supports atomic writes */
+#define FMODE_CAN_ATOMIC_WRITE ((__force fmode_t)(1 << 7))
+
+/* FMODE_* bit 8 */
+
/* 32bit hashes as llseek() offset (for directories) */
-#define FMODE_32BITHASH ((__force fmode_t)0x200)
+#define FMODE_32BITHASH ((__force fmode_t)(1 << 9))
/* 64bit hashes as llseek() offset (for directories) */
-#define FMODE_64BITHASH ((__force fmode_t)0x400)
+#define FMODE_64BITHASH ((__force fmode_t)(1 << 10))
/*
* Don't update ctime and mtime.
@@ -125,39 +136,85 @@ typedef int (dio_iodone_t)(struct kiocb *iocb, loff_t offset,
* Currently a special hack for the XFS open_by_handle ioctl, but we'll
* hopefully graduate it to a proper O_CMTIME flag supported by open(2) soon.
*/
-#define FMODE_NOCMTIME ((__force fmode_t)0x800)
+#define FMODE_NOCMTIME ((__force fmode_t)(1 << 11))
/* Expect random access pattern */
-#define FMODE_RANDOM ((__force fmode_t)0x1000)
+#define FMODE_RANDOM ((__force fmode_t)(1 << 12))
-/* File is huge (eg. /dev/kmem): treat loff_t as unsigned */
-#define FMODE_UNSIGNED_OFFSET ((__force fmode_t)0x2000)
+/* Supports IOCB_HAS_METADATA */
+#define FMODE_HAS_METADATA ((__force fmode_t)(1 << 13))
/* File is opened with O_PATH; almost nothing can be done with it */
-#define FMODE_PATH ((__force fmode_t)0x4000)
+#define FMODE_PATH ((__force fmode_t)(1 << 14))
/* File needs atomic accesses to f_pos */
-#define FMODE_ATOMIC_POS ((__force fmode_t)0x8000)
+#define FMODE_ATOMIC_POS ((__force fmode_t)(1 << 15))
/* Write access to underlying fs */
-#define FMODE_WRITER ((__force fmode_t)0x10000)
+#define FMODE_WRITER ((__force fmode_t)(1 << 16))
/* Has read method(s) */
-#define FMODE_CAN_READ ((__force fmode_t)0x20000)
+#define FMODE_CAN_READ ((__force fmode_t)(1 << 17))
/* Has write method(s) */
-#define FMODE_CAN_WRITE ((__force fmode_t)0x40000)
+#define FMODE_CAN_WRITE ((__force fmode_t)(1 << 18))
+
+#define FMODE_OPENED ((__force fmode_t)(1 << 19))
+#define FMODE_CREATED ((__force fmode_t)(1 << 20))
-/* File was opened by fanotify and shouldn't generate fanotify events */
-#define FMODE_NONOTIFY ((__force fmode_t)0x4000000)
+/* File is stream-like */
+#define FMODE_STREAM ((__force fmode_t)(1 << 21))
-/* File is capable of returning -EAGAIN if AIO will block */
-#define FMODE_AIO_NOWAIT ((__force fmode_t)0x8000000)
+/* File supports DIRECT IO */
+#define FMODE_CAN_ODIRECT ((__force fmode_t)(1 << 22))
+
+#define FMODE_NOREUSE ((__force fmode_t)(1 << 23))
+
+/* File is embedded in backing_file object */
+#define FMODE_BACKING ((__force fmode_t)(1 << 24))
+
+/*
+ * Together with FMODE_NONOTIFY_PERM defines which fsnotify events shouldn't be
+ * generated (see below)
+ */
+#define FMODE_NONOTIFY ((__force fmode_t)(1 << 25))
/*
- * Flag for rw_copy_check_uvector and compat_rw_copy_check_uvector
- * that indicates that they should check the contents of the iovec are
- * valid, but not check the memory that the iovec elements
- * points too.
+ * Together with FMODE_NONOTIFY defines which fsnotify events shouldn't be
+ * generated (see below)
*/
-#define CHECK_IOVEC_ONLY -1
+#define FMODE_NONOTIFY_PERM ((__force fmode_t)(1 << 26))
+
+/* File is capable of returning -EAGAIN if I/O will block */
+#define FMODE_NOWAIT ((__force fmode_t)(1 << 27))
+
+/* File represents mount that needs unmounting */
+#define FMODE_NEED_UNMOUNT ((__force fmode_t)(1 << 28))
+
+/* File does not contribute to nr_files count */
+#define FMODE_NOACCOUNT ((__force fmode_t)(1 << 29))
+
+/*
+ * The two FMODE_NONOTIFY* define which fsnotify events should not be generated
+ * for an open file. These are the possible values of
+ * (f->f_mode & FMODE_FSNOTIFY_MASK) and their meaning:
+ *
+ * FMODE_NONOTIFY - suppress all (incl. non-permission) events.
+ * FMODE_NONOTIFY_PERM - suppress permission (incl. pre-content) events.
+ * FMODE_NONOTIFY | FMODE_NONOTIFY_PERM - suppress only FAN_ACCESS_PERM.
+ */
+#define FMODE_FSNOTIFY_MASK \
+ (FMODE_NONOTIFY | FMODE_NONOTIFY_PERM)
+
+#define FMODE_FSNOTIFY_NONE(mode) \
+ ((mode & FMODE_FSNOTIFY_MASK) == FMODE_NONOTIFY)
+#ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
+#define FMODE_FSNOTIFY_HSM(mode) \
+ ((mode & FMODE_FSNOTIFY_MASK) == 0 || \
+ (mode & FMODE_FSNOTIFY_MASK) == (FMODE_NONOTIFY | FMODE_NONOTIFY_PERM))
+#define FMODE_FSNOTIFY_ACCESS_PERM(mode) \
+ ((mode & FMODE_FSNOTIFY_MASK) == 0)
+#else
+#define FMODE_FSNOTIFY_ACCESS_PERM(mode) 0
+#define FMODE_FSNOTIFY_HSM(mode) 0
+#endif
/*
* Attribute flags. These should be or-ed together to figure out what
@@ -173,7 +230,7 @@ typedef int (dio_iodone_t)(struct kiocb *iocb, loff_t offset,
#define ATTR_ATIME_SET (1 << 7)
#define ATTR_MTIME_SET (1 << 8)
#define ATTR_FORCE (1 << 9) /* Not a change, but a change it */
-#define ATTR_ATTR_FLAG (1 << 10)
+#define ATTR_CTIME_SET (1 << 10)
#define ATTR_KILL_SUID (1 << 11)
#define ATTR_KILL_SGID (1 << 12)
#define ATTR_FILE (1 << 13)
@@ -181,6 +238,7 @@ typedef int (dio_iodone_t)(struct kiocb *iocb, loff_t offset,
#define ATTR_OPEN (1 << 15) /* Truncating from open(O_TRUNC) */
#define ATTR_TIMES_SET (1 << 16)
#define ATTR_TOUCH (1 << 17)
+#define ATTR_DELEG (1 << 18) /* Delegated attrs. Don't break write delegations */
/*
* Whiteout is represented by a char device. The following constants define the
@@ -201,12 +259,30 @@ typedef int (dio_iodone_t)(struct kiocb *iocb, loff_t offset,
struct iattr {
unsigned int ia_valid;
umode_t ia_mode;
- kuid_t ia_uid;
- kgid_t ia_gid;
+ /*
+ * The two anonymous unions wrap structures with the same member.
+ *
+ * Filesystems raising FS_ALLOW_IDMAP need to use ia_vfs{g,u}id which
+ * are a dedicated type requiring the filesystem to use the dedicated
+ * helpers. Other filesystem can continue to use ia_{g,u}id until they
+ * have been ported.
+ *
+ * They always contain the same value. In other words FS_ALLOW_IDMAP
+ * pass down the same value on idmapped mounts as they would on regular
+ * mounts.
+ */
+ union {
+ kuid_t ia_uid;
+ vfsuid_t ia_vfsuid;
+ };
+ union {
+ kgid_t ia_gid;
+ vfsgid_t ia_vfsgid;
+ };
loff_t ia_size;
- struct timespec ia_atime;
- struct timespec ia_mtime;
- struct timespec ia_ctime;
+ struct timespec64 ia_atime;
+ struct timespec64 ia_mtime;
+ struct timespec64 ia_ctime;
/*
* Not an attribute, but an auxiliary info for filesystems wanting to
@@ -217,11 +293,6 @@ struct iattr {
};
/*
- * Includes for diskquotas.
- */
-#include <linux/quota.h>
-
-/*
* Maximum number of layers of fs stack. Needs to be limited to
* prevent kernel stack overflow
*/
@@ -245,7 +316,7 @@ struct iattr {
* trying again. The aop will be taking reasonable
* precautions not to livelock. If the caller held a page
* reference, it should drop it before retrying. Returned
- * by readpage().
+ * by read_folio().
*
* address_space_operation functions return these large constants to indicate
* special semantics to the caller. These are much larger than the bytes in a
@@ -258,219 +329,204 @@ enum positive_aop_returns {
AOP_TRUNCATED_PAGE = 0x80001,
};
-#define AOP_FLAG_CONT_EXPAND 0x0001 /* called from cont_expand */
-#define AOP_FLAG_NOFS 0x0002 /* used by filesystem to direct
- * helper code (eg buffer layer)
- * to clear GFP_FS from alloc */
-
/*
* oh the beauties of C type declarations.
*/
struct page;
struct address_space;
struct writeback_control;
-
-/*
- * Write life time hint values.
- */
-enum rw_hint {
- WRITE_LIFE_NOT_SET = 0,
- WRITE_LIFE_NONE = RWH_WRITE_LIFE_NONE,
- WRITE_LIFE_SHORT = RWH_WRITE_LIFE_SHORT,
- WRITE_LIFE_MEDIUM = RWH_WRITE_LIFE_MEDIUM,
- WRITE_LIFE_LONG = RWH_WRITE_LIFE_LONG,
- WRITE_LIFE_EXTREME = RWH_WRITE_LIFE_EXTREME,
-};
-
-#define IOCB_EVENTFD (1 << 0)
-#define IOCB_APPEND (1 << 1)
-#define IOCB_DIRECT (1 << 2)
-#define IOCB_HIPRI (1 << 3)
-#define IOCB_DSYNC (1 << 4)
-#define IOCB_SYNC (1 << 5)
-#define IOCB_WRITE (1 << 6)
-#define IOCB_NOWAIT (1 << 7)
+struct readahead_control;
+
+/* Match RWF_* bits to IOCB bits */
+#define IOCB_HIPRI (__force int) RWF_HIPRI
+#define IOCB_DSYNC (__force int) RWF_DSYNC
+#define IOCB_SYNC (__force int) RWF_SYNC
+#define IOCB_NOWAIT (__force int) RWF_NOWAIT
+#define IOCB_APPEND (__force int) RWF_APPEND
+#define IOCB_ATOMIC (__force int) RWF_ATOMIC
+#define IOCB_DONTCACHE (__force int) RWF_DONTCACHE
+#define IOCB_NOSIGNAL (__force int) RWF_NOSIGNAL
+
+/* non-RWF related bits - start at 16 */
+#define IOCB_EVENTFD (1 << 16)
+#define IOCB_DIRECT (1 << 17)
+#define IOCB_WRITE (1 << 18)
+/* iocb->ki_waitq is valid */
+#define IOCB_WAITQ (1 << 19)
+#define IOCB_NOIO (1 << 20)
+/* can use bio alloc cache */
+#define IOCB_ALLOC_CACHE (1 << 21)
+/* kiocb is a read or write operation submitted by fs/aio.c. */
+#define IOCB_AIO_RW (1 << 22)
+#define IOCB_HAS_METADATA (1 << 23)
+
+/* for use in trace events */
+#define TRACE_IOCB_STRINGS \
+ { IOCB_HIPRI, "HIPRI" }, \
+ { IOCB_DSYNC, "DSYNC" }, \
+ { IOCB_SYNC, "SYNC" }, \
+ { IOCB_NOWAIT, "NOWAIT" }, \
+ { IOCB_APPEND, "APPEND" }, \
+ { IOCB_ATOMIC, "ATOMIC" }, \
+ { IOCB_DONTCACHE, "DONTCACHE" }, \
+ { IOCB_EVENTFD, "EVENTFD"}, \
+ { IOCB_DIRECT, "DIRECT" }, \
+ { IOCB_WRITE, "WRITE" }, \
+ { IOCB_WAITQ, "WAITQ" }, \
+ { IOCB_NOIO, "NOIO" }, \
+ { IOCB_ALLOC_CACHE, "ALLOC_CACHE" }, \
+ { IOCB_AIO_RW, "AIO_RW" }, \
+ { IOCB_HAS_METADATA, "AIO_HAS_METADATA" }
struct kiocb {
struct file *ki_filp;
loff_t ki_pos;
- void (*ki_complete)(struct kiocb *iocb, long ret, long ret2);
+ void (*ki_complete)(struct kiocb *iocb, long ret);
void *private;
int ki_flags;
- enum rw_hint ki_hint;
-} __randomize_layout;
+ u16 ki_ioprio; /* See linux/ioprio.h */
+ u8 ki_write_stream;
+
+ /*
+ * Only used for async buffered reads, where it denotes the page
+ * waitqueue associated with completing the read.
+ * Valid IFF IOCB_WAITQ is set.
+ */
+ struct wait_page_queue *ki_waitq;
+};
static inline bool is_sync_kiocb(struct kiocb *kiocb)
{
return kiocb->ki_complete == NULL;
}
-/*
- * "descriptor" for what we're up to with a read.
- * This allows us to use the same read code yet
- * have multiple different users of the data that
- * we read from a file.
- *
- * The simplest case just copies the data to user
- * mode.
- */
-typedef struct {
- size_t written;
- size_t count;
- union {
- char __user *buf;
- void *data;
- } arg;
- int error;
-} read_descriptor_t;
-
-typedef int (*read_actor_t)(read_descriptor_t *, struct page *,
- unsigned long, unsigned long);
-
struct address_space_operations {
- int (*writepage)(struct page *page, struct writeback_control *wbc);
- int (*readpage)(struct file *, struct page *);
+ int (*read_folio)(struct file *, struct folio *);
/* Write back some dirty pages from this mapping. */
int (*writepages)(struct address_space *, struct writeback_control *);
- /* Set a page dirty. Return true if this dirtied it */
- int (*set_page_dirty)(struct page *page);
+ /* Mark a folio dirty. Return true if this dirtied it */
+ bool (*dirty_folio)(struct address_space *, struct folio *);
- int (*readpages)(struct file *filp, struct address_space *mapping,
- struct list_head *pages, unsigned nr_pages);
+ void (*readahead)(struct readahead_control *);
- int (*write_begin)(struct file *, struct address_space *mapping,
- loff_t pos, unsigned len, unsigned flags,
- struct page **pagep, void **fsdata);
- int (*write_end)(struct file *, struct address_space *mapping,
+ int (*write_begin)(const struct kiocb *, struct address_space *mapping,
+ loff_t pos, unsigned len,
+ struct folio **foliop, void **fsdata);
+ int (*write_end)(const struct kiocb *, struct address_space *mapping,
loff_t pos, unsigned len, unsigned copied,
- struct page *page, void *fsdata);
+ struct folio *folio, void *fsdata);
/* Unfortunately this kludge is needed for FIBMAP. Don't use it */
sector_t (*bmap)(struct address_space *, sector_t);
- void (*invalidatepage) (struct page *, unsigned int, unsigned int);
- int (*releasepage) (struct page *, gfp_t);
- void (*freepage)(struct page *);
+ void (*invalidate_folio) (struct folio *, size_t offset, size_t len);
+ bool (*release_folio)(struct folio *, gfp_t);
+ void (*free_folio)(struct folio *folio);
ssize_t (*direct_IO)(struct kiocb *, struct iov_iter *iter);
/*
- * migrate the contents of a page to the specified target. If
+ * migrate the contents of a folio to the specified target. If
* migrate_mode is MIGRATE_ASYNC, it must not block.
*/
- int (*migratepage) (struct address_space *,
- struct page *, struct page *, enum migrate_mode);
- bool (*isolate_page)(struct page *, isolate_mode_t);
- void (*putback_page)(struct page *);
- int (*launder_page) (struct page *);
- int (*is_partially_uptodate) (struct page *, unsigned long,
- unsigned long);
- void (*is_dirty_writeback) (struct page *, bool *, bool *);
- int (*error_remove_page)(struct address_space *, struct page *);
+ int (*migrate_folio)(struct address_space *, struct folio *dst,
+ struct folio *src, enum migrate_mode);
+ int (*launder_folio)(struct folio *);
+ bool (*is_partially_uptodate) (struct folio *, size_t from,
+ size_t count);
+ void (*is_dirty_writeback) (struct folio *, bool *dirty, bool *wb);
+ int (*error_remove_folio)(struct address_space *, struct folio *);
/* swapfile support */
int (*swap_activate)(struct swap_info_struct *sis, struct file *file,
sector_t *span);
void (*swap_deactivate)(struct file *file);
+ int (*swap_rw)(struct kiocb *iocb, struct iov_iter *iter);
};
extern const struct address_space_operations empty_aops;
-/*
- * pagecache_write_begin/pagecache_write_end must be used by general code
- * to write into the pagecache.
+/**
+ * struct address_space - Contents of a cacheable, mappable object.
+ * @host: Owner, either the inode or the block_device.
+ * @i_pages: Cached pages.
+ * @invalidate_lock: Guards coherency between page cache contents and
+ * file offset->disk block mappings in the filesystem during invalidates.
+ * It is also used to block modification of page cache contents through
+ * memory mappings.
+ * @gfp_mask: Memory allocation flags to use for allocating pages.
+ * @i_mmap_writable: Number of VM_SHARED, VM_MAYWRITE mappings.
+ * @nr_thps: Number of THPs in the pagecache (non-shmem only).
+ * @i_mmap: Tree of private and shared mappings.
+ * @i_mmap_rwsem: Protects @i_mmap and @i_mmap_writable.
+ * @nrpages: Number of page entries, protected by the i_pages lock.
+ * @writeback_index: Writeback starts here.
+ * @a_ops: Methods.
+ * @flags: Error bits and flags (AS_*).
+ * @wb_err: The most recent error which has occurred.
+ * @i_private_lock: For use by the owner of the address_space.
+ * @i_private_list: For use by the owner of the address_space.
+ * @i_private_data: For use by the owner of the address_space.
*/
-int pagecache_write_begin(struct file *, struct address_space *mapping,
- loff_t pos, unsigned len, unsigned flags,
- struct page **pagep, void **fsdata);
-
-int pagecache_write_end(struct file *, struct address_space *mapping,
- loff_t pos, unsigned len, unsigned copied,
- struct page *page, void *fsdata);
-
struct address_space {
- struct inode *host; /* owner: inode, block_device */
- struct radix_tree_root page_tree; /* radix tree of all pages */
- spinlock_t tree_lock; /* and lock protecting it */
- atomic_t i_mmap_writable;/* count VM_SHARED mappings */
- struct rb_root i_mmap; /* tree of private and shared mappings */
- struct rw_semaphore i_mmap_rwsem; /* protect tree, count, list */
- /* Protected by tree_lock together with the radix tree */
- unsigned long nrpages; /* number of total pages */
- /* number of shadow or DAX exceptional entries */
- unsigned long nrexceptional;
- pgoff_t writeback_index;/* writeback starts here */
- const struct address_space_operations *a_ops; /* methods */
- unsigned long flags; /* error bits */
- spinlock_t private_lock; /* for use by the address_space */
- gfp_t gfp_mask; /* implicit gfp mask for allocations */
- struct list_head private_list; /* ditto */
- void *private_data; /* ditto */
+ struct inode *host;
+ struct xarray i_pages;
+ struct rw_semaphore invalidate_lock;
+ gfp_t gfp_mask;
+ atomic_t i_mmap_writable;
+#ifdef CONFIG_READ_ONLY_THP_FOR_FS
+ /* number of thp, only for non-shmem files */
+ atomic_t nr_thps;
+#endif
+ struct rb_root_cached i_mmap;
+ unsigned long nrpages;
+ pgoff_t writeback_index;
+ const struct address_space_operations *a_ops;
+ unsigned long flags;
errseq_t wb_err;
+ spinlock_t i_private_lock;
+ struct list_head i_private_list;
+ struct rw_semaphore i_mmap_rwsem;
+ void * i_private_data;
} __attribute__((aligned(sizeof(long)))) __randomize_layout;
/*
* On most architectures that alignment is already the case; but
* must be enforced here for CRIS, to let the least significant bit
- * of struct page's "mapping" pointer be used for PAGE_MAPPING_ANON.
- */
-struct request_queue;
-
-struct block_device {
- dev_t bd_dev; /* not a kdev_t - it's a search key */
- int bd_openers;
- struct inode * bd_inode; /* will die */
- struct super_block * bd_super;
- struct mutex bd_mutex; /* open/close mutex */
- void * bd_claiming;
- void * bd_holder;
- int bd_holders;
- bool bd_write_holder;
-#ifdef CONFIG_SYSFS
- struct list_head bd_holder_disks;
-#endif
- struct block_device * bd_contains;
- unsigned bd_block_size;
- struct hd_struct * bd_part;
- /* number of times partitions within this device have been opened. */
- unsigned bd_part_count;
- int bd_invalidated;
- struct gendisk * bd_disk;
- struct request_queue * bd_queue;
- struct backing_dev_info *bd_bdi;
- struct list_head bd_list;
- /*
- * Private data. You must have bd_claim'ed the block_device
- * to use this. NOTE: bd_claim allows an owner to claim
- * the same device multiple times, the owner must take special
- * care to not mess up bd_private for that case.
+ * of struct folio's "mapping" pointer be used for FOLIO_MAPPING_ANON.
*/
- unsigned long bd_private;
- /* The counter of freeze processes */
- int bd_fsfreeze_count;
- /* Mutex for freeze */
- struct mutex bd_fsfreeze_mutex;
-} __randomize_layout;
+/* XArray tags, for tagging dirty and writeback pages in the pagecache. */
+#define PAGECACHE_TAG_DIRTY XA_MARK_0
+#define PAGECACHE_TAG_WRITEBACK XA_MARK_1
+#define PAGECACHE_TAG_TOWRITE XA_MARK_2
/*
- * Radix-tree tags, for tagging dirty and writeback pages within the pagecache
- * radix trees
+ * Returns true if any of the pages in the mapping are marked with the tag.
*/
-#define PAGECACHE_TAG_DIRTY 0
-#define PAGECACHE_TAG_WRITEBACK 1
-#define PAGECACHE_TAG_TOWRITE 2
-
-int mapping_tagged(struct address_space *mapping, int tag);
+static inline bool mapping_tagged(const struct address_space *mapping, xa_mark_t tag)
+{
+ return xa_marked(&mapping->i_pages, tag);
+}
static inline void i_mmap_lock_write(struct address_space *mapping)
{
down_write(&mapping->i_mmap_rwsem);
}
+static inline int i_mmap_trylock_write(struct address_space *mapping)
+{
+ return down_write_trylock(&mapping->i_mmap_rwsem);
+}
+
static inline void i_mmap_unlock_write(struct address_space *mapping)
{
up_write(&mapping->i_mmap_rwsem);
}
+static inline int i_mmap_trylock_read(struct address_space *mapping)
+{
+ return down_read_trylock(&mapping->i_mmap_rwsem);
+}
+
static inline void i_mmap_lock_read(struct address_space *mapping)
{
down_read(&mapping->i_mmap_rwsem);
@@ -481,24 +537,34 @@ static inline void i_mmap_unlock_read(struct address_space *mapping)
up_read(&mapping->i_mmap_rwsem);
}
+static inline void i_mmap_assert_locked(struct address_space *mapping)
+{
+ lockdep_assert_held(&mapping->i_mmap_rwsem);
+}
+
+static inline void i_mmap_assert_write_locked(struct address_space *mapping)
+{
+ lockdep_assert_held_write(&mapping->i_mmap_rwsem);
+}
+
/*
* Might pages of this file be mapped into userspace?
*/
-static inline int mapping_mapped(struct address_space *mapping)
+static inline int mapping_mapped(const struct address_space *mapping)
{
- return !RB_EMPTY_ROOT(&mapping->i_mmap);
+ return !RB_EMPTY_ROOT(&mapping->i_mmap.rb_root);
}
/*
* Might pages of this file have been modified in userspace?
- * Note that i_mmap_writable counts all VM_SHARED vmas: do_mmap_pgoff
+ * Note that i_mmap_writable counts all VM_SHARED, VM_MAYWRITE vmas: do_mmap
* marks vma as VM_SHARED if it is shared, and the file was opened for
* writing i.e. vma may be mprotected writable even if now readonly.
*
* If i_mmap_writable is negative, no new writable mappings are allowed. You
* can only deny writable mappings, if none exists right now.
*/
-static inline int mapping_writably_mapped(struct address_space *mapping)
+static inline int mapping_writably_mapped(const struct address_space *mapping)
{
return atomic_read(&mapping->i_mmap_writable) > 0;
}
@@ -538,6 +604,11 @@ static inline void mapping_allow_writable(struct address_space *mapping)
struct posix_acl;
#define ACL_NOT_CACHED ((void *)(-1))
+/*
+ * ACL_DONT_CACHE is for stacked filesystems, that rely on underlying fs to
+ * cache the ACL. This also means that ->get_inode_acl() can be called in RCU
+ * mode with the LOOKUP_RCU flag.
+ */
#define ACL_DONT_CACHE ((void *)(-3))
static inline struct posix_acl *
@@ -552,13 +623,139 @@ is_uncached_acl(struct posix_acl *acl)
return (long)acl & 1;
}
-#define IOP_FASTPERM 0x0001
-#define IOP_LOOKUP 0x0002
-#define IOP_NOFOLLOW 0x0004
-#define IOP_XATTR 0x0008
+#define IOP_FASTPERM 0x0001
+#define IOP_LOOKUP 0x0002
+#define IOP_NOFOLLOW 0x0004
+#define IOP_XATTR 0x0008
#define IOP_DEFAULT_READLINK 0x0010
+#define IOP_MGTIME 0x0020
+#define IOP_CACHED_LINK 0x0040
+#define IOP_FASTPERM_MAY_EXEC 0x0080
-struct fsnotify_mark_connector;
+/*
+ * Inode state bits. Protected by inode->i_lock
+ *
+ * Four bits determine the dirty state of the inode: I_DIRTY_SYNC,
+ * I_DIRTY_DATASYNC, I_DIRTY_PAGES, and I_DIRTY_TIME.
+ *
+ * Four bits define the lifetime of an inode. Initially, inodes are I_NEW,
+ * until that flag is cleared. I_WILL_FREE, I_FREEING and I_CLEAR are set at
+ * various stages of removing an inode.
+ *
+ * Two bits are used for locking and completion notification, I_NEW and I_SYNC.
+ *
+ * I_DIRTY_SYNC Inode is dirty, but doesn't have to be written on
+ * fdatasync() (unless I_DIRTY_DATASYNC is also set).
+ * Timestamp updates are the usual cause.
+ * I_DIRTY_DATASYNC Data-related inode changes pending. We keep track of
+ * these changes separately from I_DIRTY_SYNC so that we
+ * don't have to write inode on fdatasync() when only
+ * e.g. the timestamps have changed.
+ * I_DIRTY_PAGES Inode has dirty pages. Inode itself may be clean.
+ * I_DIRTY_TIME The inode itself has dirty timestamps, and the
+ * lazytime mount option is enabled. We keep track of this
+ * separately from I_DIRTY_SYNC in order to implement
+ * lazytime. This gets cleared if I_DIRTY_INODE
+ * (I_DIRTY_SYNC and/or I_DIRTY_DATASYNC) gets set. But
+ * I_DIRTY_TIME can still be set if I_DIRTY_SYNC is already
+ * in place because writeback might already be in progress
+ * and we don't want to lose the time update
+ * I_NEW Serves as both a mutex and completion notification.
+ * New inodes set I_NEW. If two processes both create
+ * the same inode, one of them will release its inode and
+ * wait for I_NEW to be released before returning.
+ * Inodes in I_WILL_FREE, I_FREEING or I_CLEAR state can
+ * also cause waiting on I_NEW, without I_NEW actually
+ * being set. find_inode() uses this to prevent returning
+ * nearly-dead inodes.
+ * I_WILL_FREE Must be set when calling write_inode_now() if i_count
+ * is zero. I_FREEING must be set when I_WILL_FREE is
+ * cleared.
+ * I_FREEING Set when inode is about to be freed but still has dirty
+ * pages or buffers attached or the inode itself is still
+ * dirty.
+ * I_CLEAR Added by clear_inode(). In this state the inode is
+ * clean and can be destroyed. Inode keeps I_FREEING.
+ *
+ * Inodes that are I_WILL_FREE, I_FREEING or I_CLEAR are
+ * prohibited for many purposes. iget() must wait for
+ * the inode to be completely released, then create it
+ * anew. Other functions will just ignore such inodes,
+ * if appropriate. I_NEW is used for waiting.
+ *
+ * I_SYNC Writeback of inode is running. The bit is set during
+ * data writeback, and cleared with a wakeup on the bit
+ * address once it is done. The bit is also used to pin
+ * the inode in memory for flusher thread.
+ *
+ * I_REFERENCED Marks the inode as recently references on the LRU list.
+ *
+ * I_WB_SWITCH Cgroup bdi_writeback switching in progress. Used to
+ * synchronize competing switching instances and to tell
+ * wb stat updates to grab the i_pages lock. See
+ * inode_switch_wbs_work_fn() for details.
+ *
+ * I_OVL_INUSE Used by overlayfs to get exclusive ownership on upper
+ * and work dirs among overlayfs mounts.
+ *
+ * I_CREATING New object's inode in the middle of setting up.
+ *
+ * I_DONTCACHE Evict inode as soon as it is not used anymore.
+ *
+ * I_SYNC_QUEUED Inode is queued in b_io or b_more_io writeback lists.
+ * Used to detect that mark_inode_dirty() should not move
+ * inode between dirty lists.
+ *
+ * I_PINNING_FSCACHE_WB Inode is pinning an fscache object for writeback.
+ *
+ * I_LRU_ISOLATING Inode is pinned being isolated from LRU without holding
+ * i_count.
+ *
+ * Q: What is the difference between I_WILL_FREE and I_FREEING?
+ *
+ * __I_{SYNC,NEW,LRU_ISOLATING} are used to derive unique addresses to wait
+ * upon. There's one free address left.
+ */
+
+enum inode_state_bits {
+ __I_NEW = 0U,
+ __I_SYNC = 1U,
+ __I_LRU_ISOLATING = 2U
+ /* reserved wait address bit 3 */
+};
+
+enum inode_state_flags_enum {
+ I_NEW = (1U << __I_NEW),
+ I_SYNC = (1U << __I_SYNC),
+ I_LRU_ISOLATING = (1U << __I_LRU_ISOLATING),
+ /* reserved flag bit 3 */
+ I_DIRTY_SYNC = (1U << 4),
+ I_DIRTY_DATASYNC = (1U << 5),
+ I_DIRTY_PAGES = (1U << 6),
+ I_WILL_FREE = (1U << 7),
+ I_FREEING = (1U << 8),
+ I_CLEAR = (1U << 9),
+ I_REFERENCED = (1U << 10),
+ I_LINKABLE = (1U << 11),
+ I_DIRTY_TIME = (1U << 12),
+ I_WB_SWITCH = (1U << 13),
+ I_OVL_INUSE = (1U << 14),
+ I_CREATING = (1U << 15),
+ I_DONTCACHE = (1U << 16),
+ I_SYNC_QUEUED = (1U << 17),
+ I_PINNING_NETFS_WB = (1U << 18)
+};
+
+#define I_DIRTY_INODE (I_DIRTY_SYNC | I_DIRTY_DATASYNC)
+#define I_DIRTY (I_DIRTY_INODE | I_DIRTY_PAGES)
+#define I_DIRTY_ALL (I_DIRTY | I_DIRTY_TIME)
+
+/*
+ * Use inode_state_read() & friends to access.
+ */
+struct inode_state_flags {
+ enum inode_state_flags_enum __state;
+};
/*
* Keep mostly read-only and often accessed (especially for
@@ -568,14 +765,13 @@ struct fsnotify_mark_connector;
struct inode {
umode_t i_mode;
unsigned short i_opflags;
- kuid_t i_uid;
- kgid_t i_gid;
unsigned int i_flags;
-
#ifdef CONFIG_FS_POSIX_ACL
struct posix_acl *i_acl;
struct posix_acl *i_default_acl;
#endif
+ kuid_t i_uid;
+ kgid_t i_gid;
const struct inode_operations *i_op;
struct super_block *i_sb;
@@ -600,12 +796,16 @@ struct inode {
};
dev_t i_rdev;
loff_t i_size;
- struct timespec i_atime;
- struct timespec i_mtime;
- struct timespec i_ctime;
+ time64_t i_atime_sec;
+ time64_t i_mtime_sec;
+ time64_t i_ctime_sec;
+ u32 i_atime_nsec;
+ u32 i_mtime_nsec;
+ u32 i_ctime_nsec;
+ u32 i_generation;
spinlock_t i_lock; /* i_blocks, i_bytes, maybe i_size */
unsigned short i_bytes;
- unsigned int i_blkbits;
+ u8 i_blkbits;
enum rw_hint i_write_hint;
blkcnt_t i_blocks;
@@ -614,7 +814,8 @@ struct inode {
#endif
/* Misc */
- unsigned long i_state;
+ struct inode_state_flags i_state;
+ /* 32-bit hole */
struct rw_semaphore i_rwsem;
unsigned long dirtied_when; /* jiffies of first dirtying */
@@ -637,39 +838,141 @@ struct inode {
struct hlist_head i_dentry;
struct rcu_head i_rcu;
};
- u64 i_version;
+ atomic64_t i_version;
+ atomic64_t i_sequence; /* see futex */
atomic_t i_count;
atomic_t i_dio_count;
atomic_t i_writecount;
-#ifdef CONFIG_IMA
+#if defined(CONFIG_IMA) || defined(CONFIG_FILE_LOCKING)
atomic_t i_readcount; /* struct files open RO */
#endif
- const struct file_operations *i_fop; /* former ->i_op->default_file_ops */
+ union {
+ const struct file_operations *i_fop; /* former ->i_op->default_file_ops */
+ void (*free_inode)(struct inode *);
+ };
struct file_lock_context *i_flctx;
struct address_space i_data;
- struct list_head i_devices;
+ union {
+ struct list_head i_devices;
+ int i_linklen;
+ };
union {
struct pipe_inode_info *i_pipe;
- struct block_device *i_bdev;
struct cdev *i_cdev;
char *i_link;
unsigned i_dir_seq;
};
- __u32 i_generation;
#ifdef CONFIG_FSNOTIFY
__u32 i_fsnotify_mask; /* all events this inode cares about */
+ /* 32-bit hole reserved for expanding i_fsnotify_mask */
struct fsnotify_mark_connector __rcu *i_fsnotify_marks;
#endif
-#if IS_ENABLED(CONFIG_FS_ENCRYPTION)
- struct fscrypt_info *i_crypt_info;
-#endif
-
void *i_private; /* fs or device private pointer */
} __randomize_layout;
+/*
+ * i_state handling
+ *
+ * We hide all of it behind helpers so that we can validate consumers.
+ */
+static inline enum inode_state_flags_enum inode_state_read_once(struct inode *inode)
+{
+ return READ_ONCE(inode->i_state.__state);
+}
+
+static inline enum inode_state_flags_enum inode_state_read(struct inode *inode)
+{
+ lockdep_assert_held(&inode->i_lock);
+ return inode->i_state.__state;
+}
+
+static inline void inode_state_set_raw(struct inode *inode,
+ enum inode_state_flags_enum flags)
+{
+ WRITE_ONCE(inode->i_state.__state, inode->i_state.__state | flags);
+}
+
+static inline void inode_state_set(struct inode *inode,
+ enum inode_state_flags_enum flags)
+{
+ lockdep_assert_held(&inode->i_lock);
+ inode_state_set_raw(inode, flags);
+}
+
+static inline void inode_state_clear_raw(struct inode *inode,
+ enum inode_state_flags_enum flags)
+{
+ WRITE_ONCE(inode->i_state.__state, inode->i_state.__state & ~flags);
+}
+
+static inline void inode_state_clear(struct inode *inode,
+ enum inode_state_flags_enum flags)
+{
+ lockdep_assert_held(&inode->i_lock);
+ inode_state_clear_raw(inode, flags);
+}
+
+static inline void inode_state_assign_raw(struct inode *inode,
+ enum inode_state_flags_enum flags)
+{
+ WRITE_ONCE(inode->i_state.__state, flags);
+}
+
+static inline void inode_state_assign(struct inode *inode,
+ enum inode_state_flags_enum flags)
+{
+ lockdep_assert_held(&inode->i_lock);
+ inode_state_assign_raw(inode, flags);
+}
+
+static inline void inode_state_replace_raw(struct inode *inode,
+ enum inode_state_flags_enum clearflags,
+ enum inode_state_flags_enum setflags)
+{
+ enum inode_state_flags_enum flags;
+ flags = inode->i_state.__state;
+ flags &= ~clearflags;
+ flags |= setflags;
+ inode_state_assign_raw(inode, flags);
+}
+
+static inline void inode_state_replace(struct inode *inode,
+ enum inode_state_flags_enum clearflags,
+ enum inode_state_flags_enum setflags)
+{
+ lockdep_assert_held(&inode->i_lock);
+ inode_state_replace_raw(inode, clearflags, setflags);
+}
+
+static inline void inode_set_cached_link(struct inode *inode, char *link, int linklen)
+{
+ VFS_WARN_ON_INODE(strlen(link) != linklen, inode);
+ VFS_WARN_ON_INODE(inode->i_opflags & IOP_CACHED_LINK, inode);
+ inode->i_link = link;
+ inode->i_linklen = linklen;
+ inode->i_opflags |= IOP_CACHED_LINK;
+}
+
+/*
+ * Get bit address from inode->i_state to use with wait_var_event()
+ * infrastructre.
+ */
+#define inode_state_wait_address(inode, bit) ((char *)&(inode)->i_state + (bit))
+
+struct wait_queue_head *inode_bit_waitqueue(struct wait_bit_queue_entry *wqe,
+ struct inode *inode, u32 bit);
+
+static inline void inode_wake_up_bit(struct inode *inode, u32 bit)
+{
+ /* Caller is responsible for correct memory barriers. */
+ wake_up_var(inode_state_wait_address(inode, bit));
+}
+
+struct timespec64 timestamp_truncate(struct timespec64 t, struct inode *inode);
+
static inline unsigned int i_blocksize(const struct inode *node)
{
return (1 << node->i_blkbits);
@@ -681,7 +984,20 @@ static inline int inode_unhashed(struct inode *inode)
}
/*
- * inode->i_mutex nesting subclasses for the lock validator:
+ * __mark_inode_dirty expects inodes to be hashed. Since we don't
+ * want special inodes in the fileset inode space, we make them
+ * appear hashed, but do not put on any lists. hlist_del()
+ * will work fine and require no locking.
+ */
+static inline void inode_fake_hash(struct inode *inode)
+{
+ hlist_add_fake(&inode->i_hash);
+}
+
+void wait_on_new_inode(struct inode *inode);
+
+/*
+ * inode->i_rwsem nesting subclasses for the lock validator:
*
* 0: the object of the current VFS operation
* 1: parent
@@ -711,6 +1027,11 @@ static inline void inode_lock(struct inode *inode)
down_write(&inode->i_rwsem);
}
+static inline __must_check int inode_lock_killable(struct inode *inode)
+{
+ return down_write_killable(&inode->i_rwsem);
+}
+
static inline void inode_unlock(struct inode *inode)
{
up_write(&inode->i_rwsem);
@@ -721,6 +1042,11 @@ static inline void inode_lock_shared(struct inode *inode)
down_read(&inode->i_rwsem);
}
+static inline __must_check int inode_lock_shared_killable(struct inode *inode)
+{
+ return down_read_killable(&inode->i_rwsem);
+}
+
static inline void inode_unlock_shared(struct inode *inode)
{
up_read(&inode->i_rwsem);
@@ -746,9 +1072,47 @@ static inline void inode_lock_nested(struct inode *inode, unsigned subclass)
down_write_nested(&inode->i_rwsem, subclass);
}
+static inline void inode_lock_shared_nested(struct inode *inode, unsigned subclass)
+{
+ down_read_nested(&inode->i_rwsem, subclass);
+}
+
+static inline void filemap_invalidate_lock(struct address_space *mapping)
+{
+ down_write(&mapping->invalidate_lock);
+}
+
+static inline void filemap_invalidate_unlock(struct address_space *mapping)
+{
+ up_write(&mapping->invalidate_lock);
+}
+
+static inline void filemap_invalidate_lock_shared(struct address_space *mapping)
+{
+ down_read(&mapping->invalidate_lock);
+}
+
+static inline int filemap_invalidate_trylock_shared(
+ struct address_space *mapping)
+{
+ return down_read_trylock(&mapping->invalidate_lock);
+}
+
+static inline void filemap_invalidate_unlock_shared(
+ struct address_space *mapping)
+{
+ up_read(&mapping->invalidate_lock);
+}
+
void lock_two_nondirectories(struct inode *, struct inode*);
void unlock_two_nondirectories(struct inode *, struct inode*);
+void filemap_invalidate_lock_two(struct address_space *mapping1,
+ struct address_space *mapping2);
+void filemap_invalidate_unlock_two(struct address_space *mapping1,
+ struct address_space *mapping2);
+
+
/*
* NOTE: in a 32bit arch with a preemptable kernel and
* an UP compile the i_size_read/write must be atomic
@@ -770,7 +1134,7 @@ static inline loff_t i_size_read(const struct inode *inode)
i_size = inode->i_size;
} while (read_seqcount_retry(&inode->i_size_seqcount, seq));
return i_size;
-#elif BITS_PER_LONG==32 && defined(CONFIG_PREEMPT)
+#elif BITS_PER_LONG==32 && defined(CONFIG_PREEMPTION)
loff_t i_size;
preempt_disable();
@@ -778,13 +1142,14 @@ static inline loff_t i_size_read(const struct inode *inode)
preempt_enable();
return i_size;
#else
- return inode->i_size;
+ /* Pairs with smp_store_release() in i_size_write() */
+ return smp_load_acquire(&inode->i_size);
#endif
}
/*
* NOTE: unlike i_size_read(), i_size_write() does need locking around it
- * (normally i_mutex), otherwise on 32bit/SMP an update of i_size_seqcount
+ * (normally i_rwsem), otherwise on 32bit/SMP an update of i_size_seqcount
* can be lost, resulting in subsequent i_size_read() calls spinning forever.
*/
static inline void i_size_write(struct inode *inode, loff_t i_size)
@@ -795,12 +1160,17 @@ static inline void i_size_write(struct inode *inode, loff_t i_size)
inode->i_size = i_size;
write_seqcount_end(&inode->i_size_seqcount);
preempt_enable();
-#elif BITS_PER_LONG==32 && defined(CONFIG_PREEMPT)
+#elif BITS_PER_LONG==32 && defined(CONFIG_PREEMPTION)
preempt_disable();
inode->i_size = i_size;
preempt_enable();
#else
- inode->i_size = i_size;
+ /*
+ * Pairs with smp_load_acquire() in i_size_read() to ensure
+ * changes related to inode size (such as page contents) are
+ * visible before we see the changed inode size.
+ */
+ smp_store_release(&inode->i_size, i_size);
#endif
}
@@ -814,9 +1184,8 @@ static inline unsigned imajor(const struct inode *inode)
return MAJOR(inode->i_rdev);
}
-extern struct block_device *I_BDEV(struct inode *inode);
-
struct fown_struct {
+ struct file *file; /* backpointer for security modules */
rwlock_t lock; /* protects pid, uid, euid fields */
struct pid *pid; /* pid or -pgrp where SIGIO should be sent */
enum pid_type pid_type; /* Kind of process group SIGIO should be sent to */
@@ -824,18 +1193,29 @@ struct fown_struct {
int signum; /* posix.1b rt signal to be delivered on IO */
};
-/*
- * Track a single file's readahead state
+/**
+ * struct file_ra_state - Track a file's readahead state.
+ * @start: Where the most recent readahead started.
+ * @size: Number of pages read in the most recent readahead.
+ * @async_size: Numer of pages that were/are not needed immediately
+ * and so were/are genuinely "ahead". Start next readahead when
+ * the first of these pages is accessed.
+ * @ra_pages: Maximum size of a readahead request, copied from the bdi.
+ * @order: Preferred folio order used for most recent readahead.
+ * @mmap_miss: How many mmap accesses missed in the page cache.
+ * @prev_pos: The last byte in the most recent read request.
+ *
+ * When this structure is passed to ->readahead(), the "most recent"
+ * readahead means the current readahead.
*/
struct file_ra_state {
- pgoff_t start; /* where readahead started */
- unsigned int size; /* # of readahead pages */
- unsigned int async_size; /* do asynchronous readahead when
- there are only # of pages ahead */
-
- unsigned int ra_pages; /* Maximum readahead window */
- unsigned int mmap_miss; /* Cache miss stat for mmap accesses */
- loff_t prev_pos; /* Cache last read() position */
+ pgoff_t start;
+ unsigned int size;
+ unsigned int async_size;
+ unsigned int ra_pages;
+ unsigned short order;
+ unsigned short mmap_miss;
+ loff_t prev_pos;
};
/*
@@ -847,44 +1227,74 @@ static inline int ra_has_index(struct file_ra_state *ra, pgoff_t index)
index < ra->start + ra->size);
}
+/**
+ * struct file - Represents a file
+ * @f_lock: Protects f_ep, f_flags. Must not be taken from IRQ context.
+ * @f_mode: FMODE_* flags often used in hotpaths
+ * @f_op: file operations
+ * @f_mapping: Contents of a cacheable, mappable object.
+ * @private_data: filesystem or driver specific data
+ * @f_inode: cached inode
+ * @f_flags: file flags
+ * @f_iocb_flags: iocb flags
+ * @f_cred: stashed credentials of creator/opener
+ * @f_owner: file owner
+ * @f_path: path of the file
+ * @__f_path: writable alias for @f_path; *ONLY* for core VFS and only before
+ * the file gets open
+ * @f_pos_lock: lock protecting file position
+ * @f_pipe: specific to pipes
+ * @f_pos: file position
+ * @f_security: LSM security context of this file
+ * @f_wb_err: writeback error
+ * @f_sb_err: per sb writeback errors
+ * @f_ep: link of all epoll hooks for this file
+ * @f_task_work: task work entry point
+ * @f_llist: work queue entrypoint
+ * @f_ra: file's readahead state
+ * @f_freeptr: Pointer used by SLAB_TYPESAFE_BY_RCU file cache (don't touch.)
+ * @f_ref: reference count
+ */
struct file {
- union {
- struct llist_node fu_llist;
- struct rcu_head fu_rcuhead;
- } f_u;
- struct path f_path;
- struct inode *f_inode; /* cached value */
+ spinlock_t f_lock;
+ fmode_t f_mode;
const struct file_operations *f_op;
-
- /*
- * Protects f_ep_links, f_flags.
- * Must not be taken from IRQ context.
- */
- spinlock_t f_lock;
- enum rw_hint f_write_hint;
- atomic_long_t f_count;
- unsigned int f_flags;
- fmode_t f_mode;
- struct mutex f_pos_lock;
- loff_t f_pos;
- struct fown_struct f_owner;
- const struct cred *f_cred;
- struct file_ra_state f_ra;
-
- u64 f_version;
+ struct address_space *f_mapping;
+ void *private_data;
+ struct inode *f_inode;
+ unsigned int f_flags;
+ unsigned int f_iocb_flags;
+ const struct cred *f_cred;
+ struct fown_struct *f_owner;
+ /* --- cacheline 1 boundary (64 bytes) --- */
+ union {
+ const struct path f_path;
+ struct path __f_path;
+ };
+ union {
+ /* regular files (with FMODE_ATOMIC_POS) and directories */
+ struct mutex f_pos_lock;
+ /* pipes */
+ u64 f_pipe;
+ };
+ loff_t f_pos;
#ifdef CONFIG_SECURITY
- void *f_security;
+ void *f_security;
#endif
- /* needed for tty driver, and maybe others */
- void *private_data;
-
+ /* --- cacheline 2 boundary (128 bytes) --- */
+ errseq_t f_wb_err;
+ errseq_t f_sb_err;
#ifdef CONFIG_EPOLL
- /* Used by fs/eventpoll.c to link all the hooks to this file */
- struct list_head f_ep_links;
- struct list_head f_tfile_llink;
-#endif /* #ifdef CONFIG_EPOLL */
- struct address_space *f_mapping;
- errseq_t f_wb_err;
+ struct hlist_head *f_ep;
+#endif
+ union {
+ struct callback_head f_task_work;
+ struct llist_node f_llist;
+ struct file_ra_state f_ra;
+ freeptr_t f_freeptr;
+ };
+ file_ref_t f_ref;
+ /* --- cacheline 3 boundary (192 bytes) --- */
} __randomize_layout
__attribute__((aligned(4))); /* lest something weird decides that 2 is OK */
@@ -892,17 +1302,19 @@ struct file_handle {
__u32 handle_bytes;
int handle_type;
/* file identifier */
- unsigned char f_handle[0];
+ unsigned char f_handle[] __counted_by(handle_bytes);
};
static inline struct file *get_file(struct file *f)
{
- atomic_long_inc(&f->f_count);
+ file_ref_inc(&f->f_ref);
return f;
}
-#define get_file_rcu(x) atomic_long_inc_not_zero(&(x)->f_count)
-#define fput_atomic(x) atomic_long_add_unless(&(x)->f_count, -1, 1)
-#define file_count(x) atomic_long_read(&(x)->f_count)
+
+struct file *get_file_rcu(struct file __rcu **f);
+struct file *get_file_active(struct file **f);
+
+#define file_count(f) file_ref_read(&(f)->f_ref)
#define MAX_NON_LFS ((1UL<<31) - 1)
@@ -914,730 +1326,539 @@ static inline struct file *get_file(struct file *f)
#define MAX_LFS_FILESIZE ((loff_t)LLONG_MAX)
#endif
-#define FL_POSIX 1
-#define FL_FLOCK 2
-#define FL_DELEG 4 /* NFSv4 delegation */
-#define FL_ACCESS 8 /* not trying to lock, just looking */
-#define FL_EXISTS 16 /* when unlocking, test for existence */
-#define FL_LEASE 32 /* lease held on this file */
-#define FL_CLOSE 64 /* unlock on close */
-#define FL_SLEEP 128 /* A blocking lock */
-#define FL_DOWNGRADE_PENDING 256 /* Lease is being downgraded */
-#define FL_UNLOCK_PENDING 512 /* Lease is being broken */
-#define FL_OFDLCK 1024 /* lock is "owned" by struct file */
-#define FL_LAYOUT 2048 /* outstanding pNFS layout */
-
-#define FL_CLOSE_POSIX (FL_POSIX | FL_CLOSE)
-
-/*
- * Special return value from posix_lock_file() and vfs_lock_file() for
- * asynchronous locking.
- */
-#define FILE_LOCK_DEFERRED 1
-
/* legacy typedef, should eventually be removed */
typedef void *fl_owner_t;
struct file_lock;
+struct file_lease;
-struct file_lock_operations {
- void (*fl_copy_lock)(struct file_lock *, struct file_lock *);
- void (*fl_release_private)(struct file_lock *);
-};
-
-struct lock_manager_operations {
- int (*lm_compare_owner)(struct file_lock *, struct file_lock *);
- unsigned long (*lm_owner_key)(struct file_lock *);
- fl_owner_t (*lm_get_owner)(fl_owner_t);
- void (*lm_put_owner)(fl_owner_t);
- void (*lm_notify)(struct file_lock *); /* unblock callback */
- int (*lm_grant)(struct file_lock *, int);
- bool (*lm_break)(struct file_lock *);
- int (*lm_change)(struct file_lock *, int, struct list_head *);
- void (*lm_setup)(struct file_lock *, void **);
-};
+/* The following constant reflects the upper bound of the file/locking space */
+#ifndef OFFSET_MAX
+#define OFFSET_MAX type_max(loff_t)
+#define OFFT_OFFSET_MAX type_max(off_t)
+#endif
-struct lock_manager {
- struct list_head list;
- /*
- * NFSv4 and up also want opens blocked during the grace period;
- * NLM doesn't care:
- */
- bool block_opens;
-};
+int file_f_owner_allocate(struct file *file);
+static inline struct fown_struct *file_f_owner(const struct file *file)
+{
+ return READ_ONCE(file->f_owner);
+}
-struct net;
-void locks_start_grace(struct net *, struct lock_manager *);
-void locks_end_grace(struct lock_manager *);
-int locks_in_grace(struct net *);
-int opens_in_grace(struct net *);
+extern void send_sigio(struct fown_struct *fown, int fd, int band);
-/* that will die - we need it for nfs_lock_info */
-#include <linux/nfs_fs_i.h>
+static inline struct inode *file_inode(const struct file *f)
+{
+ return f->f_inode;
+}
/*
- * struct file_lock represents a generic "file lock". It's used to represent
- * POSIX byte range locks, BSD (flock) locks, and leases. It's important to
- * note that the same struct is used to represent both a request for a lock and
- * the lock itself, but the same object is never used for both.
- *
- * FIXME: should we create a separate "struct lock_request" to help distinguish
- * these two uses?
- *
- * The varous i_flctx lists are ordered by:
- *
- * 1) lock owner
- * 2) lock range start
- * 3) lock range end
- *
- * Obviously, the last two criteria only matter for POSIX locks.
- */
-struct file_lock {
- struct file_lock *fl_next; /* singly linked list for this inode */
- struct list_head fl_list; /* link into file_lock_context */
- struct hlist_node fl_link; /* node in global lists */
- struct list_head fl_block; /* circular list of blocked processes */
- fl_owner_t fl_owner;
- unsigned int fl_flags;
- unsigned char fl_type;
- unsigned int fl_pid;
- int fl_link_cpu; /* what cpu's list is this on? */
- struct pid *fl_nspid;
- wait_queue_head_t fl_wait;
- struct file *fl_file;
- loff_t fl_start;
- loff_t fl_end;
-
- struct fasync_struct * fl_fasync; /* for lease break notifications */
- /* for lease breaks: */
- unsigned long fl_break_time;
- unsigned long fl_downgrade_time;
-
- const struct file_lock_operations *fl_ops; /* Callbacks for filesystems */
- const struct lock_manager_operations *fl_lmops; /* Callbacks for lockmanagers */
- union {
- struct nfs_lock_info nfs_fl;
- struct nfs4_lock_info nfs4_fl;
- struct {
- struct list_head link; /* link in AFS vnode's pending_locks list */
- int state; /* state of grant or error if -ve */
- } afs;
- } fl_u;
-} __randomize_layout;
+ * file_dentry() is a relic from the days that overlayfs was using files with a
+ * "fake" path, meaning, f_path on overlayfs and f_inode on underlying fs.
+ * In those days, file_dentry() was needed to get the underlying fs dentry that
+ * matches f_inode.
+ * Files with "fake" path should not exist nowadays, so use an assertion to make
+ * sure that file_dentry() was not papering over filesystem bugs.
+ */
+static inline struct dentry *file_dentry(const struct file *file)
+{
+ struct dentry *dentry = file->f_path.dentry;
+
+ WARN_ON_ONCE(d_inode(dentry) != file_inode(file));
+ return dentry;
+}
-struct file_lock_context {
- spinlock_t flc_lock;
- struct list_head flc_flock;
- struct list_head flc_posix;
- struct list_head flc_lease;
+struct fasync_struct {
+ rwlock_t fa_lock;
+ int magic;
+ int fa_fd;
+ struct fasync_struct *fa_next; /* singly linked list */
+ struct file *fa_file;
+ struct rcu_head fa_rcu;
};
-/* The following constant reflects the upper bound of the file/locking space */
-#ifndef OFFSET_MAX
-#define INT_LIMIT(x) (~((x)1 << (sizeof(x)*8 - 1)))
-#define OFFSET_MAX INT_LIMIT(loff_t)
-#define OFFT_OFFSET_MAX INT_LIMIT(off_t)
-#endif
+#define FASYNC_MAGIC 0x4601
-extern void send_sigio(struct fown_struct *fown, int fd, int band);
+/* SMP safe fasync helpers: */
+extern int fasync_helper(int, struct file *, int, struct fasync_struct **);
+extern struct fasync_struct *fasync_insert_entry(int, struct file *, struct fasync_struct **, struct fasync_struct *);
+extern int fasync_remove_entry(struct file *, struct fasync_struct **);
+extern struct fasync_struct *fasync_alloc(void);
+extern void fasync_free(struct fasync_struct *);
+
+/* can be called from interrupts */
+extern void kill_fasync(struct fasync_struct **, int, int);
+
+extern void __f_setown(struct file *filp, struct pid *, enum pid_type, int force);
+extern int f_setown(struct file *filp, int who, int force);
+extern void f_delown(struct file *filp);
+extern pid_t f_getown(struct file *filp);
+extern int send_sigurg(struct file *file);
/*
- * Return the inode to use for locking
- *
- * For overlayfs this should be the overlay inode, not the real inode returned
- * by file_inode(). For any other fs file_inode(filp) and locks_inode(filp) are
- * equal.
+ * Umount options
*/
-static inline struct inode *locks_inode(const struct file *f)
-{
- return f->f_path.dentry->d_inode;
-}
-
-#ifdef CONFIG_FILE_LOCKING
-extern int fcntl_getlk(struct file *, unsigned int, struct flock *);
-extern int fcntl_setlk(unsigned int, struct file *, unsigned int,
- struct flock *);
-#if BITS_PER_LONG == 32
-extern int fcntl_getlk64(struct file *, unsigned int, struct flock64 *);
-extern int fcntl_setlk64(unsigned int, struct file *, unsigned int,
- struct flock64 *);
-#endif
+#define MNT_FORCE 0x00000001 /* Attempt to forcibily umount */
+#define MNT_DETACH 0x00000002 /* Just detach from the tree */
+#define MNT_EXPIRE 0x00000004 /* Mark for expiry */
+#define UMOUNT_NOFOLLOW 0x00000008 /* Don't follow symlink on umount */
+#define UMOUNT_UNUSED 0x80000000 /* Flag guaranteed to be unused */
-extern int fcntl_setlease(unsigned int fd, struct file *filp, long arg);
-extern int fcntl_getlease(struct file *filp);
-
-/* fs/locks.c */
-void locks_free_lock_context(struct inode *inode);
-void locks_free_lock(struct file_lock *fl);
-extern void locks_init_lock(struct file_lock *);
-extern struct file_lock * locks_alloc_lock(void);
-extern void locks_copy_lock(struct file_lock *, struct file_lock *);
-extern void locks_copy_conflock(struct file_lock *, struct file_lock *);
-extern void locks_remove_posix(struct file *, fl_owner_t);
-extern void locks_remove_file(struct file *);
-extern void locks_release_private(struct file_lock *);
-extern void posix_test_lock(struct file *, struct file_lock *);
-extern int posix_lock_file(struct file *, struct file_lock *, struct file_lock *);
-extern int posix_unblock_lock(struct file_lock *);
-extern int vfs_test_lock(struct file *, struct file_lock *);
-extern int vfs_lock_file(struct file *, unsigned int, struct file_lock *, struct file_lock *);
-extern int vfs_cancel_lock(struct file *filp, struct file_lock *fl);
-extern int locks_lock_inode_wait(struct inode *inode, struct file_lock *fl);
-extern int __break_lease(struct inode *inode, unsigned int flags, unsigned int type);
-extern void lease_get_mtime(struct inode *, struct timespec *time);
-extern int generic_setlease(struct file *, long, struct file_lock **, void **priv);
-extern int vfs_setlease(struct file *, long, struct file_lock **, void **);
-extern int lease_modify(struct file_lock *, int, struct list_head *);
-struct files_struct;
-extern void show_fd_locks(struct seq_file *f,
- struct file *filp, struct files_struct *files);
-#else /* !CONFIG_FILE_LOCKING */
-static inline int fcntl_getlk(struct file *file, unsigned int cmd,
- struct flock __user *user)
+static inline struct user_namespace *i_user_ns(const struct inode *inode)
{
- return -EINVAL;
+ return inode->i_sb->s_user_ns;
}
-static inline int fcntl_setlk(unsigned int fd, struct file *file,
- unsigned int cmd, struct flock __user *user)
+/* Helper functions so that in most cases filesystems will
+ * not need to deal directly with kuid_t and kgid_t and can
+ * instead deal with the raw numeric values that are stored
+ * in the filesystem.
+ */
+static inline uid_t i_uid_read(const struct inode *inode)
{
- return -EACCES;
+ return from_kuid(i_user_ns(inode), inode->i_uid);
}
-#if BITS_PER_LONG == 32
-static inline int fcntl_getlk64(struct file *file, unsigned int cmd,
- struct flock64 __user *user)
+static inline gid_t i_gid_read(const struct inode *inode)
{
- return -EINVAL;
+ return from_kgid(i_user_ns(inode), inode->i_gid);
}
-static inline int fcntl_setlk64(unsigned int fd, struct file *file,
- unsigned int cmd, struct flock64 __user *user)
-{
- return -EACCES;
-}
-#endif
-static inline int fcntl_setlease(unsigned int fd, struct file *filp, long arg)
+static inline void i_uid_write(struct inode *inode, uid_t uid)
{
- return -EINVAL;
+ inode->i_uid = make_kuid(i_user_ns(inode), uid);
}
-static inline int fcntl_getlease(struct file *filp)
+static inline void i_gid_write(struct inode *inode, gid_t gid)
{
- return F_UNLCK;
+ inode->i_gid = make_kgid(i_user_ns(inode), gid);
}
-static inline void
-locks_free_lock_context(struct inode *inode)
+/**
+ * i_uid_into_vfsuid - map an inode's i_uid down according to an idmapping
+ * @idmap: idmap of the mount the inode was found from
+ * @inode: inode to map
+ *
+ * Return: whe inode's i_uid mapped down according to @idmap.
+ * If the inode's i_uid has no mapping INVALID_VFSUID is returned.
+ */
+static inline vfsuid_t i_uid_into_vfsuid(struct mnt_idmap *idmap,
+ const struct inode *inode)
{
+ return make_vfsuid(idmap, i_user_ns(inode), inode->i_uid);
}
-static inline void locks_init_lock(struct file_lock *fl)
+/**
+ * i_uid_needs_update - check whether inode's i_uid needs to be updated
+ * @idmap: idmap of the mount the inode was found from
+ * @attr: the new attributes of @inode
+ * @inode: the inode to update
+ *
+ * Check whether the $inode's i_uid field needs to be updated taking idmapped
+ * mounts into account if the filesystem supports it.
+ *
+ * Return: true if @inode's i_uid field needs to be updated, false if not.
+ */
+static inline bool i_uid_needs_update(struct mnt_idmap *idmap,
+ const struct iattr *attr,
+ const struct inode *inode)
{
- return;
+ return ((attr->ia_valid & ATTR_UID) &&
+ !vfsuid_eq(attr->ia_vfsuid,
+ i_uid_into_vfsuid(idmap, inode)));
}
-static inline void locks_copy_conflock(struct file_lock *new, struct file_lock *fl)
+/**
+ * i_uid_update - update @inode's i_uid field
+ * @idmap: idmap of the mount the inode was found from
+ * @attr: the new attributes of @inode
+ * @inode: the inode to update
+ *
+ * Safely update @inode's i_uid field translating the vfsuid of any idmapped
+ * mount into the filesystem kuid.
+ */
+static inline void i_uid_update(struct mnt_idmap *idmap,
+ const struct iattr *attr,
+ struct inode *inode)
{
- return;
+ if (attr->ia_valid & ATTR_UID)
+ inode->i_uid = from_vfsuid(idmap, i_user_ns(inode),
+ attr->ia_vfsuid);
}
-static inline void locks_copy_lock(struct file_lock *new, struct file_lock *fl)
+/**
+ * i_gid_into_vfsgid - map an inode's i_gid down according to an idmapping
+ * @idmap: idmap of the mount the inode was found from
+ * @inode: inode to map
+ *
+ * Return: the inode's i_gid mapped down according to @idmap.
+ * If the inode's i_gid has no mapping INVALID_VFSGID is returned.
+ */
+static inline vfsgid_t i_gid_into_vfsgid(struct mnt_idmap *idmap,
+ const struct inode *inode)
{
- return;
+ return make_vfsgid(idmap, i_user_ns(inode), inode->i_gid);
}
-static inline void locks_remove_posix(struct file *filp, fl_owner_t owner)
+/**
+ * i_gid_needs_update - check whether inode's i_gid needs to be updated
+ * @idmap: idmap of the mount the inode was found from
+ * @attr: the new attributes of @inode
+ * @inode: the inode to update
+ *
+ * Check whether the $inode's i_gid field needs to be updated taking idmapped
+ * mounts into account if the filesystem supports it.
+ *
+ * Return: true if @inode's i_gid field needs to be updated, false if not.
+ */
+static inline bool i_gid_needs_update(struct mnt_idmap *idmap,
+ const struct iattr *attr,
+ const struct inode *inode)
{
- return;
+ return ((attr->ia_valid & ATTR_GID) &&
+ !vfsgid_eq(attr->ia_vfsgid,
+ i_gid_into_vfsgid(idmap, inode)));
}
-static inline void locks_remove_file(struct file *filp)
+/**
+ * i_gid_update - update @inode's i_gid field
+ * @idmap: idmap of the mount the inode was found from
+ * @attr: the new attributes of @inode
+ * @inode: the inode to update
+ *
+ * Safely update @inode's i_gid field translating the vfsgid of any idmapped
+ * mount into the filesystem kgid.
+ */
+static inline void i_gid_update(struct mnt_idmap *idmap,
+ const struct iattr *attr,
+ struct inode *inode)
{
- return;
+ if (attr->ia_valid & ATTR_GID)
+ inode->i_gid = from_vfsgid(idmap, i_user_ns(inode),
+ attr->ia_vfsgid);
}
-static inline void posix_test_lock(struct file *filp, struct file_lock *fl)
+/**
+ * inode_fsuid_set - initialize inode's i_uid field with callers fsuid
+ * @inode: inode to initialize
+ * @idmap: idmap of the mount the inode was found from
+ *
+ * Initialize the i_uid field of @inode. If the inode was found/created via
+ * an idmapped mount map the caller's fsuid according to @idmap.
+ */
+static inline void inode_fsuid_set(struct inode *inode,
+ struct mnt_idmap *idmap)
{
- return;
+ inode->i_uid = mapped_fsuid(idmap, i_user_ns(inode));
}
-static inline int posix_lock_file(struct file *filp, struct file_lock *fl,
- struct file_lock *conflock)
+/**
+ * inode_fsgid_set - initialize inode's i_gid field with callers fsgid
+ * @inode: inode to initialize
+ * @idmap: idmap of the mount the inode was found from
+ *
+ * Initialize the i_gid field of @inode. If the inode was found/created via
+ * an idmapped mount map the caller's fsgid according to @idmap.
+ */
+static inline void inode_fsgid_set(struct inode *inode,
+ struct mnt_idmap *idmap)
{
- return -ENOLCK;
+ inode->i_gid = mapped_fsgid(idmap, i_user_ns(inode));
}
-static inline int posix_unblock_lock(struct file_lock *waiter)
+/**
+ * fsuidgid_has_mapping() - check whether caller's fsuid/fsgid is mapped
+ * @sb: the superblock we want a mapping in
+ * @idmap: idmap of the relevant mount
+ *
+ * Check whether the caller's fsuid and fsgid have a valid mapping in the
+ * s_user_ns of the superblock @sb. If the caller is on an idmapped mount map
+ * the caller's fsuid and fsgid according to the @idmap first.
+ *
+ * Return: true if fsuid and fsgid is mapped, false if not.
+ */
+static inline bool fsuidgid_has_mapping(struct super_block *sb,
+ struct mnt_idmap *idmap)
{
- return -ENOENT;
-}
+ struct user_namespace *fs_userns = sb->s_user_ns;
+ kuid_t kuid;
+ kgid_t kgid;
-static inline int vfs_test_lock(struct file *filp, struct file_lock *fl)
-{
- return 0;
+ kuid = mapped_fsuid(idmap, fs_userns);
+ if (!uid_valid(kuid))
+ return false;
+ kgid = mapped_fsgid(idmap, fs_userns);
+ if (!gid_valid(kgid))
+ return false;
+ return kuid_has_mapping(fs_userns, kuid) &&
+ kgid_has_mapping(fs_userns, kgid);
}
-static inline int vfs_lock_file(struct file *filp, unsigned int cmd,
- struct file_lock *fl, struct file_lock *conf)
-{
- return -ENOLCK;
-}
+struct timespec64 current_time(struct inode *inode);
+struct timespec64 inode_set_ctime_current(struct inode *inode);
+struct timespec64 inode_set_ctime_deleg(struct inode *inode,
+ struct timespec64 update);
-static inline int vfs_cancel_lock(struct file *filp, struct file_lock *fl)
+static inline time64_t inode_get_atime_sec(const struct inode *inode)
{
- return 0;
+ return inode->i_atime_sec;
}
-static inline int locks_lock_inode_wait(struct inode *inode, struct file_lock *fl)
+static inline long inode_get_atime_nsec(const struct inode *inode)
{
- return -ENOLCK;
+ return inode->i_atime_nsec;
}
-static inline int __break_lease(struct inode *inode, unsigned int mode, unsigned int type)
+static inline struct timespec64 inode_get_atime(const struct inode *inode)
{
- return 0;
+ struct timespec64 ts = { .tv_sec = inode_get_atime_sec(inode),
+ .tv_nsec = inode_get_atime_nsec(inode) };
+
+ return ts;
}
-static inline void lease_get_mtime(struct inode *inode, struct timespec *time)
+static inline struct timespec64 inode_set_atime_to_ts(struct inode *inode,
+ struct timespec64 ts)
{
- return;
+ inode->i_atime_sec = ts.tv_sec;
+ inode->i_atime_nsec = ts.tv_nsec;
+ return ts;
}
-static inline int generic_setlease(struct file *filp, long arg,
- struct file_lock **flp, void **priv)
+static inline struct timespec64 inode_set_atime(struct inode *inode,
+ time64_t sec, long nsec)
{
- return -EINVAL;
+ struct timespec64 ts = { .tv_sec = sec,
+ .tv_nsec = nsec };
+
+ return inode_set_atime_to_ts(inode, ts);
}
-static inline int vfs_setlease(struct file *filp, long arg,
- struct file_lock **lease, void **priv)
+static inline time64_t inode_get_mtime_sec(const struct inode *inode)
{
- return -EINVAL;
+ return inode->i_mtime_sec;
}
-static inline int lease_modify(struct file_lock *fl, int arg,
- struct list_head *dispose)
+static inline long inode_get_mtime_nsec(const struct inode *inode)
{
- return -EINVAL;
+ return inode->i_mtime_nsec;
}
-struct files_struct;
-static inline void show_fd_locks(struct seq_file *f,
- struct file *filp, struct files_struct *files) {}
-#endif /* !CONFIG_FILE_LOCKING */
-
-static inline struct inode *file_inode(const struct file *f)
+static inline struct timespec64 inode_get_mtime(const struct inode *inode)
{
- return f->f_inode;
+ struct timespec64 ts = { .tv_sec = inode_get_mtime_sec(inode),
+ .tv_nsec = inode_get_mtime_nsec(inode) };
+ return ts;
}
-static inline struct dentry *file_dentry(const struct file *file)
+static inline struct timespec64 inode_set_mtime_to_ts(struct inode *inode,
+ struct timespec64 ts)
{
- return d_real(file->f_path.dentry, file_inode(file), 0);
+ inode->i_mtime_sec = ts.tv_sec;
+ inode->i_mtime_nsec = ts.tv_nsec;
+ return ts;
}
-static inline int locks_lock_file_wait(struct file *filp, struct file_lock *fl)
+static inline struct timespec64 inode_set_mtime(struct inode *inode,
+ time64_t sec, long nsec)
{
- return locks_lock_inode_wait(locks_inode(filp), fl);
+ struct timespec64 ts = { .tv_sec = sec,
+ .tv_nsec = nsec };
+ return inode_set_mtime_to_ts(inode, ts);
}
-struct fasync_struct {
- spinlock_t fa_lock;
- int magic;
- int fa_fd;
- struct fasync_struct *fa_next; /* singly linked list */
- struct file *fa_file;
- struct rcu_head fa_rcu;
-};
-
-#define FASYNC_MAGIC 0x4601
-
-/* SMP safe fasync helpers: */
-extern int fasync_helper(int, struct file *, int, struct fasync_struct **);
-extern struct fasync_struct *fasync_insert_entry(int, struct file *, struct fasync_struct **, struct fasync_struct *);
-extern int fasync_remove_entry(struct file *, struct fasync_struct **);
-extern struct fasync_struct *fasync_alloc(void);
-extern void fasync_free(struct fasync_struct *);
-
-/* can be called from interrupts */
-extern void kill_fasync(struct fasync_struct **, int, int);
-
-extern void __f_setown(struct file *filp, struct pid *, enum pid_type, int force);
-extern int f_setown(struct file *filp, unsigned long arg, int force);
-extern void f_delown(struct file *filp);
-extern pid_t f_getown(struct file *filp);
-extern int send_sigurg(struct fown_struct *fown);
-
/*
- * Umount options
+ * Multigrain timestamps
+ *
+ * Conditionally use fine-grained ctime and mtime timestamps when there
+ * are users actively observing them via getattr. The primary use-case
+ * for this is NFS clients that use the ctime to distinguish between
+ * different states of the file, and that are often fooled by multiple
+ * operations that occur in the same coarse-grained timer tick.
*/
+#define I_CTIME_QUERIED ((u32)BIT(31))
-#define MNT_FORCE 0x00000001 /* Attempt to forcibily umount */
-#define MNT_DETACH 0x00000002 /* Just detach from the tree */
-#define MNT_EXPIRE 0x00000004 /* Mark for expiry */
-#define UMOUNT_NOFOLLOW 0x00000008 /* Don't follow symlink on umount */
-#define UMOUNT_UNUSED 0x80000000 /* Flag guaranteed to be unused */
-
-/* sb->s_iflags */
-#define SB_I_CGROUPWB 0x00000001 /* cgroup-aware writeback enabled */
-#define SB_I_NOEXEC 0x00000002 /* Ignore executables on this fs */
-#define SB_I_NODEV 0x00000004 /* Ignore devices on this fs */
-
-/* sb->s_iflags to limit user namespace mounts */
-#define SB_I_USERNS_VISIBLE 0x00000010 /* fstype already mounted */
-
-/* Possible states of 'frozen' field */
-enum {
- SB_UNFROZEN = 0, /* FS is unfrozen */
- SB_FREEZE_WRITE = 1, /* Writes, dir ops, ioctls frozen */
- SB_FREEZE_PAGEFAULT = 2, /* Page faults stopped as well */
- SB_FREEZE_FS = 3, /* For internal FS use (e.g. to stop
- * internal threads if needed) */
- SB_FREEZE_COMPLETE = 4, /* ->freeze_fs finished successfully */
-};
-
-#define SB_FREEZE_LEVELS (SB_FREEZE_COMPLETE - 1)
-
-struct sb_writers {
- int frozen; /* Is sb frozen? */
- wait_queue_head_t wait_unfrozen; /* for get_super_thawed() */
- struct percpu_rw_semaphore rw_sem[SB_FREEZE_LEVELS];
-};
-
-struct super_block {
- struct list_head s_list; /* Keep this first */
- dev_t s_dev; /* search index; _not_ kdev_t */
- unsigned char s_blocksize_bits;
- unsigned long s_blocksize;
- loff_t s_maxbytes; /* Max file size */
- struct file_system_type *s_type;
- const struct super_operations *s_op;
- const struct dquot_operations *dq_op;
- const struct quotactl_ops *s_qcop;
- const struct export_operations *s_export_op;
- unsigned long s_flags;
- unsigned long s_iflags; /* internal SB_I_* flags */
- unsigned long s_magic;
- struct dentry *s_root;
- struct rw_semaphore s_umount;
- int s_count;
- atomic_t s_active;
-#ifdef CONFIG_SECURITY
- void *s_security;
-#endif
- const struct xattr_handler **s_xattr;
-
- const struct fscrypt_operations *s_cop;
-
- struct hlist_bl_head s_anon; /* anonymous dentries for (nfs) exporting */
- struct list_head s_mounts; /* list of mounts; _not_ for fs use */
- struct block_device *s_bdev;
- struct backing_dev_info *s_bdi;
- struct mtd_info *s_mtd;
- struct hlist_node s_instances;
- unsigned int s_quota_types; /* Bitmask of supported quota types */
- struct quota_info s_dquot; /* Diskquota specific options */
-
- struct sb_writers s_writers;
-
- char s_id[32]; /* Informational name */
- uuid_t s_uuid; /* UUID */
-
- void *s_fs_info; /* Filesystem private info */
- unsigned int s_max_links;
- fmode_t s_mode;
-
- /* Granularity of c/m/atime in ns.
- Cannot be worse than a second */
- u32 s_time_gran;
-
- /*
- * The next field is for VFS *only*. No filesystems have any business
- * even looking at it. You had been warned.
- */
- struct mutex s_vfs_rename_mutex; /* Kludge */
-
- /*
- * Filesystem subtype. If non-empty the filesystem type field
- * in /proc/mounts will be "type.subtype"
- */
- char *s_subtype;
-
- const struct dentry_operations *s_d_op; /* default d_op for dentries */
-
- /*
- * Saved pool identifier for cleancache (-1 means none)
- */
- int cleancache_poolid;
-
- struct shrinker s_shrink; /* per-sb shrinker handle */
-
- /* Number of inodes with nlink == 0 but still referenced */
- atomic_long_t s_remove_count;
-
- /* Being remounted read-only */
- int s_readonly_remount;
-
- /* AIO completions deferred from interrupt context */
- struct workqueue_struct *s_dio_done_wq;
- struct hlist_head s_pins;
-
- /*
- * Owning user namespace and default context in which to
- * interpret filesystem uids, gids, quotas, device nodes,
- * xattrs and security labels.
- */
- struct user_namespace *s_user_ns;
-
- /*
- * Keep the lru lists last in the structure so they always sit on their
- * own individual cachelines.
- */
- struct list_lru s_dentry_lru ____cacheline_aligned_in_smp;
- struct list_lru s_inode_lru ____cacheline_aligned_in_smp;
- struct rcu_head rcu;
- struct work_struct destroy_work;
-
- struct mutex s_sync_lock; /* sync serialisation lock */
-
- /*
- * Indicates how deep in a filesystem stack this SB is
- */
- int s_stack_depth;
-
- /* s_inode_list_lock protects s_inodes */
- spinlock_t s_inode_list_lock ____cacheline_aligned_in_smp;
- struct list_head s_inodes; /* all inodes */
-
- spinlock_t s_inode_wblist_lock;
- struct list_head s_inodes_wb; /* writeback inodes */
-} __randomize_layout;
-
-/* Helper functions so that in most cases filesystems will
- * not need to deal directly with kuid_t and kgid_t and can
- * instead deal with the raw numeric values that are stored
- * in the filesystem.
- */
-static inline uid_t i_uid_read(const struct inode *inode)
+static inline time64_t inode_get_ctime_sec(const struct inode *inode)
{
- return from_kuid(inode->i_sb->s_user_ns, inode->i_uid);
+ return inode->i_ctime_sec;
}
-static inline gid_t i_gid_read(const struct inode *inode)
+static inline long inode_get_ctime_nsec(const struct inode *inode)
{
- return from_kgid(inode->i_sb->s_user_ns, inode->i_gid);
+ return inode->i_ctime_nsec & ~I_CTIME_QUERIED;
}
-static inline void i_uid_write(struct inode *inode, uid_t uid)
+static inline struct timespec64 inode_get_ctime(const struct inode *inode)
{
- inode->i_uid = make_kuid(inode->i_sb->s_user_ns, uid);
-}
+ struct timespec64 ts = { .tv_sec = inode_get_ctime_sec(inode),
+ .tv_nsec = inode_get_ctime_nsec(inode) };
-static inline void i_gid_write(struct inode *inode, gid_t gid)
-{
- inode->i_gid = make_kgid(inode->i_sb->s_user_ns, gid);
+ return ts;
}
-extern struct timespec current_time(struct inode *inode);
-
-/*
- * Snapshotting support.
- */
-
-void __sb_end_write(struct super_block *sb, int level);
-int __sb_start_write(struct super_block *sb, int level, bool wait);
-
-#define __sb_writers_acquired(sb, lev) \
- percpu_rwsem_acquire(&(sb)->s_writers.rw_sem[(lev)-1], 1, _THIS_IP_)
-#define __sb_writers_release(sb, lev) \
- percpu_rwsem_release(&(sb)->s_writers.rw_sem[(lev)-1], 1, _THIS_IP_)
+struct timespec64 inode_set_ctime_to_ts(struct inode *inode, struct timespec64 ts);
/**
- * sb_end_write - drop write access to a superblock
- * @sb: the super we wrote to
+ * inode_set_ctime - set the ctime in the inode
+ * @inode: inode in which to set the ctime
+ * @sec: tv_sec value to set
+ * @nsec: tv_nsec value to set
*
- * Decrement number of writers to the filesystem. Wake up possible waiters
- * wanting to freeze the filesystem.
+ * Set the ctime in @inode to { @sec, @nsec }
*/
-static inline void sb_end_write(struct super_block *sb)
+static inline struct timespec64 inode_set_ctime(struct inode *inode,
+ time64_t sec, long nsec)
{
- __sb_end_write(sb, SB_FREEZE_WRITE);
+ struct timespec64 ts = { .tv_sec = sec,
+ .tv_nsec = nsec };
+
+ return inode_set_ctime_to_ts(inode, ts);
}
-/**
- * sb_end_pagefault - drop write access to a superblock from a page fault
- * @sb: the super we wrote to
- *
- * Decrement number of processes handling write page fault to the filesystem.
- * Wake up possible waiters wanting to freeze the filesystem.
+struct timespec64 simple_inode_init_ts(struct inode *inode);
+
+/*
+ * Snapshotting support.
*/
-static inline void sb_end_pagefault(struct super_block *sb)
-{
- __sb_end_write(sb, SB_FREEZE_PAGEFAULT);
-}
/**
- * sb_end_intwrite - drop write access to a superblock for internal fs purposes
- * @sb: the super we wrote to
+ * file_write_started - check if SB_FREEZE_WRITE is held
+ * @file: the file we write to
*
- * Decrement fs-internal number of writers to the filesystem. Wake up possible
- * waiters wanting to freeze the filesystem.
+ * May be false positive with !CONFIG_LOCKDEP/LOCK_STATE_UNKNOWN.
+ * May be false positive with !S_ISREG, because file_start_write() has
+ * no effect on !S_ISREG.
*/
-static inline void sb_end_intwrite(struct super_block *sb)
+static inline bool file_write_started(const struct file *file)
{
- __sb_end_write(sb, SB_FREEZE_FS);
+ if (!S_ISREG(file_inode(file)->i_mode))
+ return true;
+ return sb_write_started(file_inode(file)->i_sb);
}
/**
- * sb_start_write - get write access to a superblock
- * @sb: the super we write to
- *
- * When a process wants to write data or metadata to a file system (i.e. dirty
- * a page or an inode), it should embed the operation in a sb_start_write() -
- * sb_end_write() pair to get exclusion against file system freezing. This
- * function increments number of writers preventing freezing. If the file
- * system is already frozen, the function waits until the file system is
- * thawed.
+ * file_write_not_started - check if SB_FREEZE_WRITE is not held
+ * @file: the file we write to
*
- * Since freeze protection behaves as a lock, users have to preserve
- * ordering of freeze protection and other filesystem locks. Generally,
- * freeze protection should be the outermost lock. In particular, we have:
- *
- * sb_start_write
- * -> i_mutex (write path, truncate, directory ops, ...)
- * -> s_umount (freeze_super, thaw_super)
+ * May be false positive with !CONFIG_LOCKDEP/LOCK_STATE_UNKNOWN.
+ * May be false positive with !S_ISREG, because file_start_write() has
+ * no effect on !S_ISREG.
*/
-static inline void sb_start_write(struct super_block *sb)
+static inline bool file_write_not_started(const struct file *file)
{
- __sb_start_write(sb, SB_FREEZE_WRITE, true);
+ if (!S_ISREG(file_inode(file)->i_mode))
+ return true;
+ return sb_write_not_started(file_inode(file)->i_sb);
}
-static inline int sb_start_write_trylock(struct super_block *sb)
-{
- return __sb_start_write(sb, SB_FREEZE_WRITE, false);
-}
+bool inode_owner_or_capable(struct mnt_idmap *idmap,
+ const struct inode *inode);
-/**
- * sb_start_pagefault - get write access to a superblock from a page fault
- * @sb: the super we write to
- *
- * When a process starts handling write page fault, it should embed the
- * operation into sb_start_pagefault() - sb_end_pagefault() pair to get
- * exclusion against file system freezing. This is needed since the page fault
- * is going to dirty a page. This function increments number of running page
- * faults preventing freezing. If the file system is already frozen, the
- * function waits until the file system is thawed.
- *
- * Since page fault freeze protection behaves as a lock, users have to preserve
- * ordering of freeze protection and other filesystem locks. It is advised to
- * put sb_start_pagefault() close to mmap_sem in lock ordering. Page fault
- * handling code implies lock dependency:
- *
- * mmap_sem
- * -> sb_start_pagefault
+/*
+ * VFS helper functions..
*/
-static inline void sb_start_pagefault(struct super_block *sb)
-{
- __sb_start_write(sb, SB_FREEZE_PAGEFAULT, true);
-}
+int vfs_create(struct mnt_idmap *, struct dentry *, umode_t,
+ struct delegated_inode *);
+struct dentry *vfs_mkdir(struct mnt_idmap *, struct inode *,
+ struct dentry *, umode_t, struct delegated_inode *);
+int vfs_mknod(struct mnt_idmap *, struct inode *, struct dentry *,
+ umode_t, dev_t, struct delegated_inode *);
+int vfs_symlink(struct mnt_idmap *, struct inode *,
+ struct dentry *, const char *, struct delegated_inode *);
+int vfs_link(struct dentry *, struct mnt_idmap *, struct inode *,
+ struct dentry *, struct delegated_inode *);
+int vfs_rmdir(struct mnt_idmap *, struct inode *, struct dentry *,
+ struct delegated_inode *);
+int vfs_unlink(struct mnt_idmap *, struct inode *, struct dentry *,
+ struct delegated_inode *);
-/*
- * sb_start_intwrite - get write access to a superblock for internal fs purposes
- * @sb: the super we write to
- *
- * This is the third level of protection against filesystem freezing. It is
- * free for use by a filesystem. The only requirement is that it must rank
- * below sb_start_pagefault.
- *
- * For example filesystem can call sb_start_intwrite() when starting a
- * transaction which somewhat eases handling of freezing for internal sources
- * of filesystem changes (internal fs threads, discarding preallocation on file
- * close, etc.).
+/**
+ * struct renamedata - contains all information required for renaming
+ * @mnt_idmap: idmap of the mount in which the rename is happening.
+ * @old_parent: parent of source
+ * @old_dentry: source
+ * @new_parent: parent of destination
+ * @new_dentry: destination
+ * @delegated_inode: returns an inode needing a delegation break
+ * @flags: rename flags
*/
-static inline void sb_start_intwrite(struct super_block *sb)
+struct renamedata {
+ struct mnt_idmap *mnt_idmap;
+ struct dentry *old_parent;
+ struct dentry *old_dentry;
+ struct dentry *new_parent;
+ struct dentry *new_dentry;
+ struct delegated_inode *delegated_inode;
+ unsigned int flags;
+} __randomize_layout;
+
+int vfs_rename(struct renamedata *);
+
+static inline int vfs_whiteout(struct mnt_idmap *idmap,
+ struct inode *dir, struct dentry *dentry)
{
- __sb_start_write(sb, SB_FREEZE_FS, true);
+ return vfs_mknod(idmap, dir, dentry, S_IFCHR | WHITEOUT_MODE,
+ WHITEOUT_DEV, NULL);
}
+struct file *kernel_tmpfile_open(struct mnt_idmap *idmap,
+ const struct path *parentpath,
+ umode_t mode, int open_flag,
+ const struct cred *cred);
+struct file *kernel_file_open(const struct path *path, int flags,
+ const struct cred *cred);
-extern bool inode_owner_or_capable(const struct inode *inode);
+int vfs_mkobj(struct dentry *, umode_t,
+ int (*f)(struct dentry *, umode_t, void *),
+ void *);
-/*
- * VFS helper functions..
- */
-extern int vfs_create(struct inode *, struct dentry *, umode_t, bool);
-extern int vfs_mkdir(struct inode *, struct dentry *, umode_t);
-extern int vfs_mknod(struct inode *, struct dentry *, umode_t, dev_t);
-extern int vfs_symlink(struct inode *, struct dentry *, const char *);
-extern int vfs_link(struct dentry *, struct inode *, struct dentry *, struct inode **);
-extern int vfs_rmdir(struct inode *, struct dentry *);
-extern int vfs_unlink(struct inode *, struct dentry *, struct inode **);
-extern int vfs_rename(struct inode *, struct dentry *, struct inode *, struct dentry *, struct inode **, unsigned int);
-extern int vfs_whiteout(struct inode *, struct dentry *);
+int vfs_fchown(struct file *file, uid_t user, gid_t group);
+int vfs_fchmod(struct file *file, umode_t mode);
+int vfs_utimes(const struct path *path, struct timespec64 *times);
-extern struct dentry *vfs_tmpfile(struct dentry *dentry, umode_t mode,
- int open_flag);
+#ifdef CONFIG_COMPAT
+extern long compat_ptr_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg);
+#else
+#define compat_ptr_ioctl NULL
+#endif
/*
* VFS file helper functions.
*/
-extern void inode_init_owner(struct inode *inode, const struct inode *dir,
- umode_t mode);
+void inode_init_owner(struct mnt_idmap *idmap, struct inode *inode,
+ const struct inode *dir, umode_t mode);
extern bool may_open_dev(const struct path *path);
-/*
- * VFS FS_IOC_FIEMAP helper definitions.
- */
-struct fiemap_extent_info {
- unsigned int fi_flags; /* Flags as passed from user */
- unsigned int fi_extents_mapped; /* Number of mapped extents */
- unsigned int fi_extents_max; /* Size of fiemap_extent array */
- struct fiemap_extent __user *fi_extents_start; /* Start of
- fiemap_extent array */
-};
-int fiemap_fill_next_extent(struct fiemap_extent_info *info, u64 logical,
- u64 phys, u64 len, u32 flags);
-int fiemap_check_flags(struct fiemap_extent_info *fieinfo, u32 fs_flags);
-
-/*
- * File types
- *
- * NOTE! These match bits 12..15 of stat.st_mode
- * (ie "(i_mode >> 12) & 15").
- */
-#define DT_UNKNOWN 0
-#define DT_FIFO 1
-#define DT_CHR 2
-#define DT_DIR 4
-#define DT_BLK 6
-#define DT_REG 8
-#define DT_LNK 10
-#define DT_SOCK 12
-#define DT_WHT 14
+umode_t mode_strip_sgid(struct mnt_idmap *idmap,
+ const struct inode *dir, umode_t mode);
+bool in_group_or_capable(struct mnt_idmap *idmap,
+ const struct inode *inode, vfsgid_t vfsgid);
/*
* This is the "filldir" function type, used by readdir() to let
* the kernel specify what kind of dirent layout it wants to have.
* This allows the kernel to read directories into kernel space or
* to have different dirent layouts depending on the binary type.
+ * Return 'true' to keep going and 'false' if there are no more entries.
*/
struct dir_context;
-typedef int (*filldir_t)(struct dir_context *, const char *, int, loff_t, u64,
+typedef bool (*filldir_t)(struct dir_context *, const char *, int, loff_t, u64,
unsigned);
struct dir_context {
- const filldir_t actor;
+ filldir_t actor;
loff_t pos;
+ /*
+ * Filesystems MUST NOT MODIFY count, but may use as a hint:
+ * 0 unknown
+ * > 0 space in buffer (assume at least one entry)
+ * INT_MAX unlimited
+ */
+ int count;
};
-struct block_device_operations;
-
-/* These macros are for out of kernel modules to test that
- * the kernel supports the unlocked_ioctl and compat_ioctl
- * fields in struct file_operations. */
-#define HAVE_COMPAT_IOCTL 1
-#define HAVE_UNLOCKED_IOCTL 1
+/* If OR-ed with d_type, pending signals are not checked */
+#define FILLDIR_FLAG_NOINTR 0x1000
/*
* These flags let !MMU mmap() govern direct device mapping vs immediate
@@ -1658,19 +1879,52 @@ struct block_device_operations;
#define NOMMU_VMFLAGS \
(NOMMU_MAP_READ | NOMMU_MAP_WRITE | NOMMU_MAP_EXEC)
+/*
+ * These flags control the behavior of the remap_file_range function pointer.
+ * If it is called with len == 0 that means "remap to end of source file".
+ * See Documentation/filesystems/vfs.rst for more details about this call.
+ *
+ * REMAP_FILE_DEDUP: only remap if contents identical (i.e. deduplicate)
+ * REMAP_FILE_CAN_SHORTEN: caller can handle a shortened request
+ */
+#define REMAP_FILE_DEDUP (1 << 0)
+#define REMAP_FILE_CAN_SHORTEN (1 << 1)
+
+/*
+ * These flags signal that the caller is ok with altering various aspects of
+ * the behavior of the remap operation. The changes must be made by the
+ * implementation; the vfs remap helper functions can take advantage of them.
+ * Flags in this category exist to preserve the quirky behavior of the hoisted
+ * btrfs clone/dedupe ioctls.
+ */
+#define REMAP_FILE_ADVISORY (REMAP_FILE_CAN_SHORTEN)
+
+/*
+ * These flags control the behavior of vfs_copy_file_range().
+ * They are not available to the user via syscall.
+ *
+ * COPY_FILE_SPLICE: call splice direct instead of fs clone/copy ops
+ */
+#define COPY_FILE_SPLICE (1 << 0)
struct iov_iter;
+struct io_uring_cmd;
+struct offset_ctx;
+
+typedef unsigned int __bitwise fop_flags_t;
struct file_operations {
struct module *owner;
+ fop_flags_t fop_flags;
loff_t (*llseek) (struct file *, loff_t, int);
ssize_t (*read) (struct file *, char __user *, size_t, loff_t *);
ssize_t (*write) (struct file *, const char __user *, size_t, loff_t *);
ssize_t (*read_iter) (struct kiocb *, struct iov_iter *);
ssize_t (*write_iter) (struct kiocb *, struct iov_iter *);
- int (*iterate) (struct file *, struct dir_context *);
+ int (*iopoll)(struct kiocb *kiocb, struct io_comp_batch *,
+ unsigned int flags);
int (*iterate_shared) (struct file *, struct dir_context *);
- unsigned int (*poll) (struct file *, struct poll_table_struct *);
+ __poll_t (*poll) (struct file *, struct poll_table_struct *);
long (*unlocked_ioctl) (struct file *, unsigned int, unsigned long);
long (*compat_ioctl) (struct file *, unsigned int, unsigned long);
int (*mmap) (struct file *, struct vm_area_struct *);
@@ -1680,13 +1934,13 @@ struct file_operations {
int (*fsync) (struct file *, loff_t, loff_t, int datasync);
int (*fasync) (int, struct file *, int);
int (*lock) (struct file *, int, struct file_lock *);
- ssize_t (*sendpage) (struct file *, struct page *, int, size_t, loff_t *, int);
unsigned long (*get_unmapped_area)(struct file *, unsigned long, unsigned long, unsigned long, unsigned long);
int (*check_flags)(int);
int (*flock) (struct file *, int, struct file_lock *);
ssize_t (*splice_write)(struct pipe_inode_info *, struct file *, loff_t *, size_t, unsigned int);
ssize_t (*splice_read)(struct file *, loff_t *, struct pipe_inode_info *, size_t, unsigned int);
- int (*setlease)(struct file *, long, struct file_lock **, void **);
+ void (*splice_eof)(struct file *file);
+ int (*setlease)(struct file *, int, struct file_lease **, void **);
long (*fallocate)(struct file *file, int mode, loff_t offset,
loff_t len);
void (*show_fdinfo)(struct seq_file *m, struct file *f);
@@ -1695,140 +1949,162 @@ struct file_operations {
#endif
ssize_t (*copy_file_range)(struct file *, loff_t, struct file *,
loff_t, size_t, unsigned int);
- int (*clone_file_range)(struct file *, loff_t, struct file *, loff_t,
- u64);
- ssize_t (*dedupe_file_range)(struct file *, u64, u64, struct file *,
- u64);
+ loff_t (*remap_file_range)(struct file *file_in, loff_t pos_in,
+ struct file *file_out, loff_t pos_out,
+ loff_t len, unsigned int remap_flags);
+ int (*fadvise)(struct file *, loff_t, loff_t, int);
+ int (*uring_cmd)(struct io_uring_cmd *ioucmd, unsigned int issue_flags);
+ int (*uring_cmd_iopoll)(struct io_uring_cmd *, struct io_comp_batch *,
+ unsigned int poll_flags);
+ int (*mmap_prepare)(struct vm_area_desc *);
} __randomize_layout;
+/* Supports async buffered reads */
+#define FOP_BUFFER_RASYNC ((__force fop_flags_t)(1 << 0))
+/* Supports async buffered writes */
+#define FOP_BUFFER_WASYNC ((__force fop_flags_t)(1 << 1))
+/* Supports synchronous page faults for mappings */
+#define FOP_MMAP_SYNC ((__force fop_flags_t)(1 << 2))
+/* Supports non-exclusive O_DIRECT writes from multiple threads */
+#define FOP_DIO_PARALLEL_WRITE ((__force fop_flags_t)(1 << 3))
+/* Contains huge pages */
+#define FOP_HUGE_PAGES ((__force fop_flags_t)(1 << 4))
+/* Treat loff_t as unsigned (e.g., /dev/mem) */
+#define FOP_UNSIGNED_OFFSET ((__force fop_flags_t)(1 << 5))
+/* Supports asynchronous lock callbacks */
+#define FOP_ASYNC_LOCK ((__force fop_flags_t)(1 << 6))
+/* File system supports uncached read/write buffered IO */
+#define FOP_DONTCACHE ((__force fop_flags_t)(1 << 7))
+
+/* Wrap a directory iterator that needs exclusive inode access */
+int wrap_directory_iterator(struct file *, struct dir_context *,
+ int (*) (struct file *, struct dir_context *));
+#define WRAP_DIR_ITER(x) \
+ static int shared_##x(struct file *file , struct dir_context *ctx) \
+ { return wrap_directory_iterator(file, ctx, x); }
+
struct inode_operations {
struct dentry * (*lookup) (struct inode *,struct dentry *, unsigned int);
const char * (*get_link) (struct dentry *, struct inode *, struct delayed_call *);
- int (*permission) (struct inode *, int);
- struct posix_acl * (*get_acl)(struct inode *, int);
+ int (*permission) (struct mnt_idmap *, struct inode *, int);
+ struct posix_acl * (*get_inode_acl)(struct inode *, int, bool);
int (*readlink) (struct dentry *, char __user *,int);
- int (*create) (struct inode *,struct dentry *, umode_t, bool);
+ int (*create) (struct mnt_idmap *, struct inode *,struct dentry *,
+ umode_t, bool);
int (*link) (struct dentry *,struct inode *,struct dentry *);
int (*unlink) (struct inode *,struct dentry *);
- int (*symlink) (struct inode *,struct dentry *,const char *);
- int (*mkdir) (struct inode *,struct dentry *,umode_t);
+ int (*symlink) (struct mnt_idmap *, struct inode *,struct dentry *,
+ const char *);
+ struct dentry *(*mkdir) (struct mnt_idmap *, struct inode *,
+ struct dentry *, umode_t);
int (*rmdir) (struct inode *,struct dentry *);
- int (*mknod) (struct inode *,struct dentry *,umode_t,dev_t);
- int (*rename) (struct inode *, struct dentry *,
+ int (*mknod) (struct mnt_idmap *, struct inode *,struct dentry *,
+ umode_t,dev_t);
+ int (*rename) (struct mnt_idmap *, struct inode *, struct dentry *,
struct inode *, struct dentry *, unsigned int);
- int (*setattr) (struct dentry *, struct iattr *);
- int (*getattr) (const struct path *, struct kstat *, u32, unsigned int);
+ int (*setattr) (struct mnt_idmap *, struct dentry *, struct iattr *);
+ int (*getattr) (struct mnt_idmap *, const struct path *,
+ struct kstat *, u32, unsigned int);
ssize_t (*listxattr) (struct dentry *, char *, size_t);
int (*fiemap)(struct inode *, struct fiemap_extent_info *, u64 start,
u64 len);
- int (*update_time)(struct inode *, struct timespec *, int);
+ int (*update_time)(struct inode *, int);
int (*atomic_open)(struct inode *, struct dentry *,
struct file *, unsigned open_flag,
- umode_t create_mode, int *opened);
- int (*tmpfile) (struct inode *, struct dentry *, umode_t);
- int (*set_acl)(struct inode *, struct posix_acl *, int);
+ umode_t create_mode);
+ int (*tmpfile) (struct mnt_idmap *, struct inode *,
+ struct file *, umode_t);
+ struct posix_acl *(*get_acl)(struct mnt_idmap *, struct dentry *,
+ int);
+ int (*set_acl)(struct mnt_idmap *, struct dentry *,
+ struct posix_acl *, int);
+ int (*fileattr_set)(struct mnt_idmap *idmap,
+ struct dentry *dentry, struct file_kattr *fa);
+ int (*fileattr_get)(struct dentry *dentry, struct file_kattr *fa);
+ struct offset_ctx *(*get_offset_ctx)(struct inode *inode);
} ____cacheline_aligned;
-static inline ssize_t call_read_iter(struct file *file, struct kiocb *kio,
- struct iov_iter *iter)
+/* Did the driver provide valid mmap hook configuration? */
+static inline bool can_mmap_file(struct file *file)
{
- return file->f_op->read_iter(kio, iter);
-}
+ bool has_mmap = file->f_op->mmap;
+ bool has_mmap_prepare = file->f_op->mmap_prepare;
-static inline ssize_t call_write_iter(struct file *file, struct kiocb *kio,
- struct iov_iter *iter)
-{
- return file->f_op->write_iter(kio, iter);
+ /* Hooks are mutually exclusive. */
+ if (WARN_ON_ONCE(has_mmap && has_mmap_prepare))
+ return false;
+ if (!has_mmap && !has_mmap_prepare)
+ return false;
+
+ return true;
}
-static inline int call_mmap(struct file *file, struct vm_area_struct *vma)
+int __compat_vma_mmap(const struct file_operations *f_op,
+ struct file *file, struct vm_area_struct *vma);
+int compat_vma_mmap(struct file *file, struct vm_area_struct *vma);
+
+static inline int vfs_mmap(struct file *file, struct vm_area_struct *vma)
{
+ if (file->f_op->mmap_prepare)
+ return compat_vma_mmap(file, vma);
+
return file->f_op->mmap(file, vma);
}
-ssize_t rw_copy_check_uvector(int type, const struct iovec __user * uvector,
- unsigned long nr_segs, unsigned long fast_segs,
- struct iovec *fast_pointer,
- struct iovec **ret_pointer);
+static inline int vfs_mmap_prepare(struct file *file, struct vm_area_desc *desc)
+{
+ return file->f_op->mmap_prepare(desc);
+}
-extern ssize_t __vfs_read(struct file *, char __user *, size_t, loff_t *);
-extern ssize_t __vfs_write(struct file *, const char __user *, size_t, loff_t *);
extern ssize_t vfs_read(struct file *, char __user *, size_t, loff_t *);
extern ssize_t vfs_write(struct file *, const char __user *, size_t, loff_t *);
-extern ssize_t vfs_readv(struct file *, const struct iovec __user *,
- unsigned long, loff_t *, rwf_t);
-extern ssize_t vfs_writev(struct file *, const struct iovec __user *,
- unsigned long, loff_t *, rwf_t);
extern ssize_t vfs_copy_file_range(struct file *, loff_t , struct file *,
loff_t, size_t, unsigned int);
-extern int vfs_clone_file_prep_inodes(struct inode *inode_in, loff_t pos_in,
- struct inode *inode_out, loff_t pos_out,
- u64 *len, bool is_dedupe);
-extern int vfs_clone_file_range(struct file *file_in, loff_t pos_in,
- struct file *file_out, loff_t pos_out, u64 len);
-extern int vfs_dedupe_file_range_compare(struct inode *src, loff_t srcoff,
- struct inode *dest, loff_t destoff,
- loff_t len, bool *is_same);
+int remap_verify_area(struct file *file, loff_t pos, loff_t len, bool write);
+int __generic_remap_file_range_prep(struct file *file_in, loff_t pos_in,
+ struct file *file_out, loff_t pos_out,
+ loff_t *len, unsigned int remap_flags,
+ const struct iomap_ops *dax_read_ops);
+int generic_remap_file_range_prep(struct file *file_in, loff_t pos_in,
+ struct file *file_out, loff_t pos_out,
+ loff_t *count, unsigned int remap_flags);
+extern loff_t vfs_clone_file_range(struct file *file_in, loff_t pos_in,
+ struct file *file_out, loff_t pos_out,
+ loff_t len, unsigned int remap_flags);
extern int vfs_dedupe_file_range(struct file *file,
struct file_dedupe_range *same);
-
-struct super_operations {
- struct inode *(*alloc_inode)(struct super_block *sb);
- void (*destroy_inode)(struct inode *);
-
- void (*dirty_inode) (struct inode *, int flags);
- int (*write_inode) (struct inode *, struct writeback_control *wbc);
- int (*drop_inode) (struct inode *);
- void (*evict_inode) (struct inode *);
- void (*put_super) (struct super_block *);
- int (*sync_fs)(struct super_block *sb, int wait);
- int (*freeze_super) (struct super_block *);
- int (*freeze_fs) (struct super_block *);
- int (*thaw_super) (struct super_block *);
- int (*unfreeze_fs) (struct super_block *);
- int (*statfs) (struct dentry *, struct kstatfs *);
- int (*remount_fs) (struct super_block *, int *, char *);
- void (*umount_begin) (struct super_block *);
-
- int (*show_options)(struct seq_file *, struct dentry *);
- int (*show_devname)(struct seq_file *, struct dentry *);
- int (*show_path)(struct seq_file *, struct dentry *);
- int (*show_stats)(struct seq_file *, struct dentry *);
-#ifdef CONFIG_QUOTA
- ssize_t (*quota_read)(struct super_block *, int, char *, size_t, loff_t);
- ssize_t (*quota_write)(struct super_block *, int, const char *, size_t, loff_t);
- struct dquot **(*get_dquots)(struct inode *);
-#endif
- int (*bdev_try_to_free_page)(struct super_block*, struct page*, gfp_t);
- long (*nr_cached_objects)(struct super_block *,
- struct shrink_control *);
- long (*free_cached_objects)(struct super_block *,
- struct shrink_control *);
-};
+extern loff_t vfs_dedupe_file_range_one(struct file *src_file, loff_t src_pos,
+ struct file *dst_file, loff_t dst_pos,
+ loff_t len, unsigned int remap_flags);
/*
* Inode flags - they have no relation to superblock flags now
*/
-#define S_SYNC 1 /* Writes are synced at once */
-#define S_NOATIME 2 /* Do not update access times */
-#define S_APPEND 4 /* Append-only file */
-#define S_IMMUTABLE 8 /* Immutable file */
-#define S_DEAD 16 /* removed, but still open directory */
-#define S_NOQUOTA 32 /* Inode is not counted to quota */
-#define S_DIRSYNC 64 /* Directory modifications are synchronous */
-#define S_NOCMTIME 128 /* Do not update file c/mtime */
-#define S_SWAPFILE 256 /* Do not truncate: swapon got its bmaps */
-#define S_PRIVATE 512 /* Inode is fs-internal */
-#define S_IMA 1024 /* Inode has an associated IMA struct */
-#define S_AUTOMOUNT 2048 /* Automount/referral quasi-directory */
-#define S_NOSEC 4096 /* no suid or xattr security attributes */
+#define S_SYNC (1 << 0) /* Writes are synced at once */
+#define S_NOATIME (1 << 1) /* Do not update access times */
+#define S_APPEND (1 << 2) /* Append-only file */
+#define S_IMMUTABLE (1 << 3) /* Immutable file */
+#define S_DEAD (1 << 4) /* removed, but still open directory */
+#define S_NOQUOTA (1 << 5) /* Inode is not counted to quota */
+#define S_DIRSYNC (1 << 6) /* Directory modifications are synchronous */
+#define S_NOCMTIME (1 << 7) /* Do not update file c/mtime */
+#define S_SWAPFILE (1 << 8) /* Do not truncate: swapon got its bmaps */
+#define S_PRIVATE (1 << 9) /* Inode is fs-internal */
+#define S_IMA (1 << 10) /* Inode has an associated IMA struct */
+#define S_AUTOMOUNT (1 << 11) /* Automount/referral quasi-directory */
+#define S_NOSEC (1 << 12) /* no suid or xattr security attributes */
#ifdef CONFIG_FS_DAX
-#define S_DAX 8192 /* Direct Access, avoiding the page cache */
+#define S_DAX (1 << 13) /* Direct Access, avoiding the page cache */
#else
-#define S_DAX 0 /* Make all the DAX code disappear */
+#define S_DAX 0 /* Make all the DAX code disappear */
#endif
+#define S_ENCRYPTED (1 << 14) /* Encrypted file (using fs/crypto/) */
+#define S_CASEFOLD (1 << 15) /* Casefolded file */
+#define S_VERITY (1 << 16) /* Verity file (using fs/verity/) */
+#define S_KERNEL_FILE (1 << 17) /* File is in use by the kernel (eg. fs/cachefiles) */
+#define S_ANON_INODE (1 << 19) /* Inode is an anonymous inode */
/*
* Note that nosuid etc flags are inode-specific: setting some file-system
@@ -1836,7 +2112,7 @@ struct super_operations {
* possible to override it selectively if you really wanted to with some
* ioctl() that is not currently implemented.
*
- * Exception: MS_RDONLY is always applied to the entire file system.
+ * Exception: SB_RDONLY is always applied to the entire file system.
*
* Unfortunately, it is possible to change a filesystems flags with it mounted
* with files in use. This means that all of the inodes will not have their
@@ -1845,140 +2121,74 @@ struct super_operations {
*/
#define __IS_FLG(inode, flg) ((inode)->i_sb->s_flags & (flg))
-#define IS_RDONLY(inode) ((inode)->i_sb->s_flags & MS_RDONLY)
-#define IS_SYNC(inode) (__IS_FLG(inode, MS_SYNCHRONOUS) || \
+#define IS_RDONLY(inode) sb_rdonly((inode)->i_sb)
+#define IS_SYNC(inode) (__IS_FLG(inode, SB_SYNCHRONOUS) || \
((inode)->i_flags & S_SYNC))
-#define IS_DIRSYNC(inode) (__IS_FLG(inode, MS_SYNCHRONOUS|MS_DIRSYNC) || \
+#define IS_DIRSYNC(inode) (__IS_FLG(inode, SB_SYNCHRONOUS|SB_DIRSYNC) || \
((inode)->i_flags & (S_SYNC|S_DIRSYNC)))
-#define IS_MANDLOCK(inode) __IS_FLG(inode, MS_MANDLOCK)
-#define IS_NOATIME(inode) __IS_FLG(inode, MS_RDONLY|MS_NOATIME)
-#define IS_I_VERSION(inode) __IS_FLG(inode, MS_I_VERSION)
+#define IS_MANDLOCK(inode) __IS_FLG(inode, SB_MANDLOCK)
+#define IS_NOATIME(inode) __IS_FLG(inode, SB_RDONLY|SB_NOATIME)
+#define IS_I_VERSION(inode) __IS_FLG(inode, SB_I_VERSION)
#define IS_NOQUOTA(inode) ((inode)->i_flags & S_NOQUOTA)
#define IS_APPEND(inode) ((inode)->i_flags & S_APPEND)
#define IS_IMMUTABLE(inode) ((inode)->i_flags & S_IMMUTABLE)
-#define IS_POSIXACL(inode) __IS_FLG(inode, MS_POSIXACL)
+
+#ifdef CONFIG_FS_POSIX_ACL
+#define IS_POSIXACL(inode) __IS_FLG(inode, SB_POSIXACL)
+#else
+#define IS_POSIXACL(inode) 0
+#endif
#define IS_DEADDIR(inode) ((inode)->i_flags & S_DEAD)
#define IS_NOCMTIME(inode) ((inode)->i_flags & S_NOCMTIME)
+
+#ifdef CONFIG_SWAP
#define IS_SWAPFILE(inode) ((inode)->i_flags & S_SWAPFILE)
+#else
+#define IS_SWAPFILE(inode) ((void)(inode), 0U)
+#endif
+
#define IS_PRIVATE(inode) ((inode)->i_flags & S_PRIVATE)
#define IS_IMA(inode) ((inode)->i_flags & S_IMA)
#define IS_AUTOMOUNT(inode) ((inode)->i_flags & S_AUTOMOUNT)
#define IS_NOSEC(inode) ((inode)->i_flags & S_NOSEC)
#define IS_DAX(inode) ((inode)->i_flags & S_DAX)
+#define IS_ENCRYPTED(inode) ((inode)->i_flags & S_ENCRYPTED)
+#define IS_CASEFOLDED(inode) ((inode)->i_flags & S_CASEFOLD)
+#define IS_VERITY(inode) ((inode)->i_flags & S_VERITY)
#define IS_WHITEOUT(inode) (S_ISCHR(inode->i_mode) && \
(inode)->i_rdev == WHITEOUT_DEV)
+#define IS_ANON_FILE(inode) ((inode)->i_flags & S_ANON_INODE)
-static inline bool HAS_UNMAPPED_ID(struct inode *inode)
+static inline bool HAS_UNMAPPED_ID(struct mnt_idmap *idmap,
+ struct inode *inode)
{
- return !uid_valid(inode->i_uid) || !gid_valid(inode->i_gid);
+ return !vfsuid_valid(i_uid_into_vfsuid(idmap, inode)) ||
+ !vfsgid_valid(i_gid_into_vfsgid(idmap, inode));
}
-static inline enum rw_hint file_write_hint(struct file *file)
+static inline void init_sync_kiocb(struct kiocb *kiocb, struct file *filp)
{
- if (file->f_write_hint != WRITE_LIFE_NOT_SET)
- return file->f_write_hint;
-
- return file_inode(file)->i_write_hint;
+ *kiocb = (struct kiocb) {
+ .ki_filp = filp,
+ .ki_flags = filp->f_iocb_flags,
+ .ki_ioprio = get_current_ioprio(),
+ };
}
-static inline int iocb_flags(struct file *file);
-
-static inline void init_sync_kiocb(struct kiocb *kiocb, struct file *filp)
+static inline void kiocb_clone(struct kiocb *kiocb, struct kiocb *kiocb_src,
+ struct file *filp)
{
*kiocb = (struct kiocb) {
.ki_filp = filp,
- .ki_flags = iocb_flags(filp),
- .ki_hint = file_write_hint(filp),
+ .ki_flags = kiocb_src->ki_flags,
+ .ki_ioprio = kiocb_src->ki_ioprio,
+ .ki_pos = kiocb_src->ki_pos,
};
}
-/*
- * Inode state bits. Protected by inode->i_lock
- *
- * Three bits determine the dirty state of the inode, I_DIRTY_SYNC,
- * I_DIRTY_DATASYNC and I_DIRTY_PAGES.
- *
- * Four bits define the lifetime of an inode. Initially, inodes are I_NEW,
- * until that flag is cleared. I_WILL_FREE, I_FREEING and I_CLEAR are set at
- * various stages of removing an inode.
- *
- * Two bits are used for locking and completion notification, I_NEW and I_SYNC.
- *
- * I_DIRTY_SYNC Inode is dirty, but doesn't have to be written on
- * fdatasync(). i_atime is the usual cause.
- * I_DIRTY_DATASYNC Data-related inode changes pending. We keep track of
- * these changes separately from I_DIRTY_SYNC so that we
- * don't have to write inode on fdatasync() when only
- * mtime has changed in it.
- * I_DIRTY_PAGES Inode has dirty pages. Inode itself may be clean.
- * I_NEW Serves as both a mutex and completion notification.
- * New inodes set I_NEW. If two processes both create
- * the same inode, one of them will release its inode and
- * wait for I_NEW to be released before returning.
- * Inodes in I_WILL_FREE, I_FREEING or I_CLEAR state can
- * also cause waiting on I_NEW, without I_NEW actually
- * being set. find_inode() uses this to prevent returning
- * nearly-dead inodes.
- * I_WILL_FREE Must be set when calling write_inode_now() if i_count
- * is zero. I_FREEING must be set when I_WILL_FREE is
- * cleared.
- * I_FREEING Set when inode is about to be freed but still has dirty
- * pages or buffers attached or the inode itself is still
- * dirty.
- * I_CLEAR Added by clear_inode(). In this state the inode is
- * clean and can be destroyed. Inode keeps I_FREEING.
- *
- * Inodes that are I_WILL_FREE, I_FREEING or I_CLEAR are
- * prohibited for many purposes. iget() must wait for
- * the inode to be completely released, then create it
- * anew. Other functions will just ignore such inodes,
- * if appropriate. I_NEW is used for waiting.
- *
- * I_SYNC Writeback of inode is running. The bit is set during
- * data writeback, and cleared with a wakeup on the bit
- * address once it is done. The bit is also used to pin
- * the inode in memory for flusher thread.
- *
- * I_REFERENCED Marks the inode as recently references on the LRU list.
- *
- * I_DIO_WAKEUP Never set. Only used as a key for wait_on_bit().
- *
- * I_WB_SWITCH Cgroup bdi_writeback switching in progress. Used to
- * synchronize competing switching instances and to tell
- * wb stat updates to grab mapping->tree_lock. See
- * inode_switch_wb_work_fn() for details.
- *
- * I_OVL_INUSE Used by overlayfs to get exclusive ownership on upper
- * and work dirs among overlayfs mounts.
- *
- * Q: What is the difference between I_WILL_FREE and I_FREEING?
- */
-#define I_DIRTY_SYNC (1 << 0)
-#define I_DIRTY_DATASYNC (1 << 1)
-#define I_DIRTY_PAGES (1 << 2)
-#define __I_NEW 3
-#define I_NEW (1 << __I_NEW)
-#define I_WILL_FREE (1 << 4)
-#define I_FREEING (1 << 5)
-#define I_CLEAR (1 << 6)
-#define __I_SYNC 7
-#define I_SYNC (1 << __I_SYNC)
-#define I_REFERENCED (1 << 8)
-#define __I_DIO_WAKEUP 9
-#define I_DIO_WAKEUP (1 << __I_DIO_WAKEUP)
-#define I_LINKABLE (1 << 10)
-#define I_DIRTY_TIME (1 << 11)
-#define __I_DIRTY_TIME_EXPIRED 12
-#define I_DIRTY_TIME_EXPIRED (1 << __I_DIRTY_TIME_EXPIRED)
-#define I_WB_SWITCH (1 << 13)
-#define I_OVL_INUSE (1 << 14)
-
-#define I_DIRTY (I_DIRTY_SYNC | I_DIRTY_DATASYNC | I_DIRTY_PAGES)
-#define I_DIRTY_ALL (I_DIRTY | I_DIRTY_TIME)
-
extern void __mark_inode_dirty(struct inode *, int);
static inline void mark_inode_dirty(struct inode *inode)
{
@@ -1990,6 +2200,26 @@ static inline void mark_inode_dirty_sync(struct inode *inode)
__mark_inode_dirty(inode, I_DIRTY_SYNC);
}
+static inline int icount_read(const struct inode *inode)
+{
+ return atomic_read(&inode->i_count);
+}
+
+/*
+ * Returns true if the given inode itself only has dirty timestamps (its pages
+ * may still be dirty) and isn't currently being allocated or freed.
+ * Filesystems should call this if when writing an inode when lazytime is
+ * enabled, they want to opportunistically write the timestamps of other inodes
+ * located very nearby on-disk, e.g. in the same inode block. This returns true
+ * if the given inode is in need of such an opportunistic update. Requires
+ * i_lock, or at least later re-checking under i_lock.
+ */
+static inline bool inode_is_dirtytime_only(struct inode *inode)
+{
+ return (inode_state_read_once(inode) &
+ (I_DIRTY_TIME | I_NEW | I_FREEING | I_WILL_FREE)) == I_DIRTY_TIME;
+}
+
extern void inc_nlink(struct inode *inode);
extern void drop_nlink(struct inode *inode);
extern void clear_nlink(struct inode *inode);
@@ -2007,21 +2237,6 @@ static inline void inode_dec_link_count(struct inode *inode)
mark_inode_dirty(inode);
}
-/**
- * inode_inc_iversion - increments i_version
- * @inode: inode that need to be updated
- *
- * Every time the inode is modified, the i_version field will be incremented.
- * The filesystem has to be mounted with i_version flag
- */
-
-static inline void inode_inc_iversion(struct inode *inode)
-{
- spin_lock(&inode->i_lock);
- inode->i_version++;
- spin_unlock(&inode->i_lock);
-}
-
enum file_time_flags {
S_ATIME = 1,
S_MTIME = 2,
@@ -2029,14 +2244,19 @@ enum file_time_flags {
S_VERSION = 8,
};
+extern bool atime_needs_update(const struct path *, struct inode *);
extern void touch_atime(const struct path *);
+int inode_update_time(struct inode *inode, int flags);
+
static inline void file_accessed(struct file *file)
{
if (!(file->f_flags & O_NOATIME))
touch_atime(&file->f_path);
}
-int sync_inode(struct inode *inode, struct writeback_control *wbc);
+extern int file_modified(struct file *file);
+int kiocb_modified(struct kiocb *iocb);
+
int sync_inode_metadata(struct inode *inode, int wait);
struct file_system_type {
@@ -2046,7 +2266,14 @@ struct file_system_type {
#define FS_BINARY_MOUNTDATA 2
#define FS_HAS_SUBTYPE 4
#define FS_USERNS_MOUNT 8 /* Can be mounted by userns root */
+#define FS_DISALLOW_NOTIFY_PERM 16 /* Disable fanotify permission events */
+#define FS_ALLOW_IDMAP 32 /* FS has been updated to handle vfs idmappings. */
+#define FS_MGTIME 64 /* FS uses multigrain timestamps */
+#define FS_LBS 128 /* FS supports LBS */
+#define FS_POWER_FREEZE 256 /* Always freeze on suspend/hibernate */
#define FS_RENAME_DOES_D_MOVE 32768 /* FS will handle d_move() during rename() internally. */
+ int (*init_fs_context)(struct fs_context *);
+ const struct fs_parameter_spec *parameters;
struct dentry *(*mount) (struct file_system_type *, int,
const char *, void *);
void (*kill_sb) (struct super_block *);
@@ -2061,61 +2288,55 @@ struct file_system_type {
struct lock_class_key i_lock_key;
struct lock_class_key i_mutex_key;
+ struct lock_class_key invalidate_lock_key;
struct lock_class_key i_mutex_dir_key;
};
#define MODULE_ALIAS_FS(NAME) MODULE_ALIAS("fs-" NAME)
-extern struct dentry *mount_ns(struct file_system_type *fs_type,
- int flags, void *data, void *ns, struct user_namespace *user_ns,
- int (*fill_super)(struct super_block *, void *, int));
-extern struct dentry *mount_bdev(struct file_system_type *fs_type,
- int flags, const char *dev_name, void *data,
- int (*fill_super)(struct super_block *, void *, int));
-extern struct dentry *mount_single(struct file_system_type *fs_type,
- int flags, void *data,
- int (*fill_super)(struct super_block *, void *, int));
-extern struct dentry *mount_nodev(struct file_system_type *fs_type,
- int flags, void *data,
- int (*fill_super)(struct super_block *, void *, int));
+/**
+ * is_mgtime: is this inode using multigrain timestamps
+ * @inode: inode to test for multigrain timestamps
+ *
+ * Return true if the inode uses multigrain timestamps, false otherwise.
+ */
+static inline bool is_mgtime(const struct inode *inode)
+{
+ return inode->i_opflags & IOP_MGTIME;
+}
+
extern struct dentry *mount_subtree(struct vfsmount *mnt, const char *path);
+void retire_super(struct super_block *sb);
void generic_shutdown_super(struct super_block *sb);
void kill_block_super(struct super_block *sb);
void kill_anon_super(struct super_block *sb);
-void kill_litter_super(struct super_block *sb);
void deactivate_super(struct super_block *sb);
void deactivate_locked_super(struct super_block *sb);
int set_anon_super(struct super_block *s, void *data);
+int set_anon_super_fc(struct super_block *s, struct fs_context *fc);
int get_anon_bdev(dev_t *);
void free_anon_bdev(dev_t);
-struct super_block *sget_userns(struct file_system_type *type,
- int (*test)(struct super_block *,void *),
- int (*set)(struct super_block *,void *),
- int flags, struct user_namespace *user_ns,
- void *data);
+struct super_block *sget_fc(struct fs_context *fc,
+ int (*test)(struct super_block *, struct fs_context *),
+ int (*set)(struct super_block *, struct fs_context *));
struct super_block *sget(struct file_system_type *type,
int (*test)(struct super_block *,void *),
int (*set)(struct super_block *,void *),
int flags, void *data);
-extern struct dentry *mount_pseudo_xattr(struct file_system_type *, char *,
- const struct super_operations *ops,
- const struct xattr_handler **xattr,
- const struct dentry_operations *dops,
- unsigned long);
-
-static inline struct dentry *
-mount_pseudo(struct file_system_type *fs_type, char *name,
- const struct super_operations *ops,
- const struct dentry_operations *dops, unsigned long magic)
-{
- return mount_pseudo_xattr(fs_type, name, ops, NULL, dops, magic);
-}
+struct super_block *sget_dev(struct fs_context *fc, dev_t dev);
/* Alas, no aliases. Too much hassle with bringing module.h everywhere */
-#define fops_get(fops) \
- (((fops) && try_module_get((fops)->owner) ? (fops) : NULL))
-#define fops_put(fops) \
- do { if (fops) module_put((fops)->owner); } while(0)
+#define fops_get(fops) ({ \
+ const struct file_operations *_fops = (fops); \
+ (((_fops) && try_module_get((_fops)->owner) ? (_fops) : NULL)); \
+})
+
+#define fops_put(fops) ({ \
+ const struct file_operations *_fops = (fops); \
+ if (_fops) \
+ module_put((_fops)->owner); \
+})
+
/*
* This one is to be used *ONLY* from ->open() instances.
* fops must be non-NULL, pinned down *and* module dependencies
@@ -2130,254 +2351,184 @@ mount_pseudo(struct file_system_type *fs_type, char *name,
extern int register_filesystem(struct file_system_type *);
extern int unregister_filesystem(struct file_system_type *);
-extern struct vfsmount *kern_mount_data(struct file_system_type *, void *data);
-#define kern_mount(type) kern_mount_data(type, NULL)
-extern void kern_unmount(struct vfsmount *mnt);
-extern int may_umount_tree(struct vfsmount *);
-extern int may_umount(struct vfsmount *);
-extern long do_mount(const char *, const char __user *,
- const char *, unsigned long, void *);
-extern struct vfsmount *collect_mounts(const struct path *);
-extern void drop_collected_mounts(struct vfsmount *);
-extern int iterate_mounts(int (*)(struct vfsmount *, void *), void *,
- struct vfsmount *);
extern int vfs_statfs(const struct path *, struct kstatfs *);
extern int user_statfs(const char __user *, struct kstatfs *);
extern int fd_statfs(int, struct kstatfs *);
-extern int vfs_ustat(dev_t, struct kstatfs *);
-extern int freeze_super(struct super_block *super);
-extern int thaw_super(struct super_block *super);
-extern bool our_mnt(struct vfsmount *mnt);
extern __printf(2, 3)
int super_setup_bdi_name(struct super_block *sb, char *fmt, ...);
extern int super_setup_bdi(struct super_block *sb);
-extern int current_umask(void);
-
-extern void ihold(struct inode * inode);
-extern void iput(struct inode *);
-extern int generic_update_time(struct inode *, struct timespec *, int);
-
-/* /sys/fs */
-extern struct kobject *fs_kobj;
-
-#define MAX_RW_COUNT (INT_MAX & PAGE_MASK)
-
-#ifdef CONFIG_MANDATORY_FILE_LOCKING
-extern int locks_mandatory_locked(struct file *);
-extern int locks_mandatory_area(struct inode *, struct file *, loff_t, loff_t, unsigned char);
-
-/*
- * Candidates for mandatory locking have the setgid bit set
- * but no group execute bit - an otherwise meaningless combination.
- */
-
-static inline int __mandatory_lock(struct inode *ino)
-{
- return (ino->i_mode & (S_ISGID | S_IXGRP)) == S_ISGID;
-}
-
-/*
- * ... and these candidates should be on MS_MANDLOCK mounted fs,
- * otherwise these will be advisory locks
- */
-
-static inline int mandatory_lock(struct inode *ino)
+static inline void super_set_uuid(struct super_block *sb, const u8 *uuid, unsigned len)
{
- return IS_MANDLOCK(ino) && __mandatory_lock(ino);
+ if (WARN_ON(len > sizeof(sb->s_uuid)))
+ len = sizeof(sb->s_uuid);
+ sb->s_uuid_len = len;
+ memcpy(&sb->s_uuid, uuid, len);
}
-static inline int locks_verify_locked(struct file *file)
+/* set sb sysfs name based on sb->s_bdev */
+static inline void super_set_sysfs_name_bdev(struct super_block *sb)
{
- if (mandatory_lock(locks_inode(file)))
- return locks_mandatory_locked(file);
- return 0;
+ snprintf(sb->s_sysfs_name, sizeof(sb->s_sysfs_name), "%pg", sb->s_bdev);
}
-static inline int locks_verify_truncate(struct inode *inode,
- struct file *f,
- loff_t size)
+/* set sb sysfs name based on sb->s_uuid */
+static inline void super_set_sysfs_name_uuid(struct super_block *sb)
{
- if (!inode->i_flctx || !mandatory_lock(inode))
- return 0;
-
- if (size < inode->i_size) {
- return locks_mandatory_area(inode, f, size, inode->i_size - 1,
- F_WRLCK);
- } else {
- return locks_mandatory_area(inode, f, inode->i_size, size - 1,
- F_WRLCK);
- }
+ WARN_ON(sb->s_uuid_len != sizeof(sb->s_uuid));
+ snprintf(sb->s_sysfs_name, sizeof(sb->s_sysfs_name), "%pU", sb->s_uuid.b);
}
-#else /* !CONFIG_MANDATORY_FILE_LOCKING */
-
-static inline int locks_mandatory_locked(struct file *file)
+/* set sb sysfs name based on sb->s_id */
+static inline void super_set_sysfs_name_id(struct super_block *sb)
{
- return 0;
-}
-
-static inline int locks_mandatory_area(struct inode *inode, struct file *filp,
- loff_t start, loff_t end, unsigned char type)
-{
- return 0;
+ strscpy(sb->s_sysfs_name, sb->s_id, sizeof(sb->s_sysfs_name));
}
-static inline int __mandatory_lock(struct inode *inode)
+/* try to use something standard before you use this */
+__printf(2, 3)
+static inline void super_set_sysfs_name_generic(struct super_block *sb, const char *fmt, ...)
{
- return 0;
-}
+ va_list args;
-static inline int mandatory_lock(struct inode *inode)
-{
- return 0;
+ va_start(args, fmt);
+ vsnprintf(sb->s_sysfs_name, sizeof(sb->s_sysfs_name), fmt, args);
+ va_end(args);
}
-static inline int locks_verify_locked(struct file *file)
-{
- return 0;
-}
+extern void ihold(struct inode * inode);
+extern void iput(struct inode *);
+void iput_not_last(struct inode *);
+int inode_update_timestamps(struct inode *inode, int flags);
+int generic_update_time(struct inode *, int);
-static inline int locks_verify_truncate(struct inode *inode, struct file *filp,
- size_t size)
-{
- return 0;
-}
+/* /sys/fs */
+extern struct kobject *fs_kobj;
-#endif /* CONFIG_MANDATORY_FILE_LOCKING */
+#define MAX_RW_COUNT (INT_MAX & PAGE_MASK)
+/* fs/open.c */
+struct audit_names;
+struct filename {
+ const char *name; /* pointer to actual string */
+ const __user char *uptr; /* original userland pointer */
+ atomic_t refcnt;
+ struct audit_names *aname;
+ const char iname[];
+};
+static_assert(offsetof(struct filename, iname) % sizeof(long) == 0);
-#ifdef CONFIG_FILE_LOCKING
-static inline int break_lease(struct inode *inode, unsigned int mode)
+static inline struct mnt_idmap *file_mnt_idmap(const struct file *file)
{
- /*
- * Since this check is lockless, we must ensure that any refcounts
- * taken are done before checking i_flctx->flc_lease. Otherwise, we
- * could end up racing with tasks trying to set a new lease on this
- * file.
- */
- smp_mb();
- if (inode->i_flctx && !list_empty_careful(&inode->i_flctx->flc_lease))
- return __break_lease(inode, mode, FL_LEASE);
- return 0;
+ return mnt_idmap(file->f_path.mnt);
}
-static inline int break_deleg(struct inode *inode, unsigned int mode)
+/**
+ * is_idmapped_mnt - check whether a mount is mapped
+ * @mnt: the mount to check
+ *
+ * If @mnt has an non @nop_mnt_idmap attached to it then @mnt is mapped.
+ *
+ * Return: true if mount is mapped, false if not.
+ */
+static inline bool is_idmapped_mnt(const struct vfsmount *mnt)
{
- /*
- * Since this check is lockless, we must ensure that any refcounts
- * taken are done before checking i_flctx->flc_lease. Otherwise, we
- * could end up racing with tasks trying to set a new lease on this
- * file.
- */
- smp_mb();
- if (inode->i_flctx && !list_empty_careful(&inode->i_flctx->flc_lease))
- return __break_lease(inode, mode, FL_DELEG);
- return 0;
+ return mnt_idmap(mnt) != &nop_mnt_idmap;
}
-static inline int try_break_deleg(struct inode *inode, struct inode **delegated_inode)
+int vfs_truncate(const struct path *, loff_t);
+int do_truncate(struct mnt_idmap *, struct dentry *, loff_t start,
+ unsigned int time_attrs, struct file *filp);
+extern int vfs_fallocate(struct file *file, int mode, loff_t offset,
+ loff_t len);
+int do_sys_open(int dfd, const char __user *filename, int flags,
+ umode_t mode);
+extern struct file *file_open_name(struct filename *, int, umode_t);
+extern struct file *filp_open(const char *, int, umode_t);
+extern struct file *file_open_root(const struct path *,
+ const char *, int, umode_t);
+static inline struct file *file_open_root_mnt(struct vfsmount *mnt,
+ const char *name, int flags, umode_t mode)
{
- int ret;
-
- ret = break_deleg(inode, O_WRONLY|O_NONBLOCK);
- if (ret == -EWOULDBLOCK && delegated_inode) {
- *delegated_inode = inode;
- ihold(inode);
- }
- return ret;
+ return file_open_root(&(struct path){.mnt = mnt, .dentry = mnt->mnt_root},
+ name, flags, mode);
}
+struct file *dentry_open(const struct path *path, int flags,
+ const struct cred *creds);
+struct file *dentry_open_nonotify(const struct path *path, int flags,
+ const struct cred *cred);
+struct file *dentry_create(const struct path *path, int flags, umode_t mode,
+ const struct cred *cred);
+const struct path *backing_file_user_path(const struct file *f);
-static inline int break_deleg_wait(struct inode **delegated_inode)
+/*
+ * When mmapping a file on a stackable filesystem (e.g., overlayfs), the file
+ * stored in ->vm_file is a backing file whose f_inode is on the underlying
+ * filesystem. When the mapped file path and inode number are displayed to
+ * user (e.g. via /proc/<pid>/maps), these helpers should be used to get the
+ * path and inode number to display to the user, which is the path of the fd
+ * that user has requested to map and the inode number that would be returned
+ * by fstat() on that same fd.
+ */
+/* Get the path to display in /proc/<pid>/maps */
+static inline const struct path *file_user_path(const struct file *f)
{
- int ret;
-
- ret = break_deleg(*delegated_inode, O_WRONLY);
- iput(*delegated_inode);
- *delegated_inode = NULL;
- return ret;
+ if (unlikely(f->f_mode & FMODE_BACKING))
+ return backing_file_user_path(f);
+ return &f->f_path;
}
-
-static inline int break_layout(struct inode *inode, bool wait)
+/* Get the inode whose inode number to display in /proc/<pid>/maps */
+static inline const struct inode *file_user_inode(const struct file *f)
{
- smp_mb();
- if (inode->i_flctx && !list_empty_careful(&inode->i_flctx->flc_lease))
- return __break_lease(inode,
- wait ? O_WRONLY : O_WRONLY | O_NONBLOCK,
- FL_LAYOUT);
- return 0;
+ if (unlikely(f->f_mode & FMODE_BACKING))
+ return d_inode(backing_file_user_path(f)->dentry);
+ return file_inode(f);
}
-#else /* !CONFIG_FILE_LOCKING */
-static inline int break_lease(struct inode *inode, unsigned int mode)
+static inline struct file *file_clone_open(struct file *file)
{
- return 0;
+ return dentry_open(&file->f_path, file->f_flags, file->f_cred);
}
+extern int filp_close(struct file *, fl_owner_t id);
-static inline int break_deleg(struct inode *inode, unsigned int mode)
+extern struct filename *getname_flags(const char __user *, int);
+extern struct filename *getname_uflags(const char __user *, int);
+static inline struct filename *getname(const char __user *name)
{
- return 0;
+ return getname_flags(name, 0);
}
-
-static inline int try_break_deleg(struct inode *inode, struct inode **delegated_inode)
+extern struct filename *getname_kernel(const char *);
+extern struct filename *__getname_maybe_null(const char __user *);
+static inline struct filename *getname_maybe_null(const char __user *name, int flags)
{
- return 0;
-}
+ if (!(flags & AT_EMPTY_PATH))
+ return getname(name);
-static inline int break_deleg_wait(struct inode **delegated_inode)
-{
- BUG();
- return 0;
+ if (!name)
+ return NULL;
+ return __getname_maybe_null(name);
}
+extern void putname(struct filename *name);
+DEFINE_FREE(putname, struct filename *, if (!IS_ERR_OR_NULL(_T)) putname(_T))
-static inline int break_layout(struct inode *inode, bool wait)
+static inline struct filename *refname(struct filename *name)
{
- return 0;
+ atomic_inc(&name->refcnt);
+ return name;
}
-#endif /* CONFIG_FILE_LOCKING */
-
-/* fs/open.c */
-struct audit_names;
-struct filename {
- const char *name; /* pointer to actual string */
- const __user char *uptr; /* original userland pointer */
- struct audit_names *aname;
- int refcnt;
- const char iname[];
-};
-
-extern long vfs_truncate(const struct path *, loff_t);
-extern int do_truncate(struct dentry *, loff_t start, unsigned int time_attrs,
- struct file *filp);
-extern int vfs_fallocate(struct file *file, int mode, loff_t offset,
- loff_t len);
-extern long do_sys_open(int dfd, const char __user *filename, int flags,
- umode_t mode);
-extern struct file *file_open_name(struct filename *, int, umode_t);
-extern struct file *filp_open(const char *, int, umode_t);
-extern struct file *file_open_root(struct dentry *, struct vfsmount *,
- const char *, int, umode_t);
-extern struct file * dentry_open(const struct path *, int, const struct cred *);
-extern int filp_close(struct file *, fl_owner_t id);
-
-extern struct filename *getname_flags(const char __user *, int, int *);
-extern struct filename *getname(const char __user *);
-extern struct filename *getname_kernel(const char *);
-extern void putname(struct filename *name);
-
-enum {
- FILE_CREATED = 1,
- FILE_OPENED = 2
-};
extern int finish_open(struct file *file, struct dentry *dentry,
- int (*open)(struct inode *, struct file *),
- int *opened);
+ int (*open)(struct inode *, struct file *));
extern int finish_no_open(struct file *file, struct dentry *dentry);
-/* fs/ioctl.c */
+/* Helper for the simple case when original dentry is used */
+static inline int finish_open_simple(struct file *file, int error)
+{
+ if (error)
+ return error;
-extern int ioctl_preallocate(struct file *filp, void __user *argp);
+ return finish_open(file, file->f_path.dentry, NULL);
+}
/* fs/dcache.c */
extern void __init vfs_caches_init_early(void);
@@ -2388,87 +2539,10 @@ extern struct kmem_cache *names_cachep;
#define __getname() kmem_cache_alloc(names_cachep, GFP_KERNEL)
#define __putname(name) kmem_cache_free(names_cachep, (void *)(name))
-#ifdef CONFIG_BLOCK
-extern int register_blkdev(unsigned int, const char *);
-extern void unregister_blkdev(unsigned int, const char *);
-extern void bdev_unhash_inode(dev_t dev);
-extern struct block_device *bdget(dev_t);
-extern struct block_device *bdgrab(struct block_device *bdev);
-extern void bd_set_size(struct block_device *, loff_t size);
-extern void bd_forget(struct inode *inode);
-extern void bdput(struct block_device *);
-extern void invalidate_bdev(struct block_device *);
-extern void iterate_bdevs(void (*)(struct block_device *, void *), void *);
-extern int sync_blockdev(struct block_device *bdev);
-extern void kill_bdev(struct block_device *);
-extern struct super_block *freeze_bdev(struct block_device *);
-extern void emergency_thaw_all(void);
-extern int thaw_bdev(struct block_device *bdev, struct super_block *sb);
-extern int fsync_bdev(struct block_device *);
-
-extern struct super_block *blockdev_superblock;
-
-static inline bool sb_is_blkdev_sb(struct super_block *sb)
-{
- return sb == blockdev_superblock;
-}
-#else
-static inline void bd_forget(struct inode *inode) {}
-static inline int sync_blockdev(struct block_device *bdev) { return 0; }
-static inline void kill_bdev(struct block_device *bdev) {}
-static inline void invalidate_bdev(struct block_device *bdev) {}
-
-static inline struct super_block *freeze_bdev(struct block_device *sb)
-{
- return NULL;
-}
-
-static inline int thaw_bdev(struct block_device *bdev, struct super_block *sb)
-{
- return 0;
-}
-
-static inline void iterate_bdevs(void (*f)(struct block_device *, void *), void *arg)
-{
-}
-
-static inline bool sb_is_blkdev_sb(struct super_block *sb)
-{
- return false;
-}
-#endif
+void emergency_thaw_all(void);
extern int sync_filesystem(struct super_block *);
extern const struct file_operations def_blk_fops;
extern const struct file_operations def_chr_fops;
-#ifdef CONFIG_BLOCK
-extern int ioctl_by_bdev(struct block_device *, unsigned, unsigned long);
-extern int blkdev_ioctl(struct block_device *, fmode_t, unsigned, unsigned long);
-extern long compat_blkdev_ioctl(struct file *, unsigned, unsigned long);
-extern int blkdev_get(struct block_device *bdev, fmode_t mode, void *holder);
-extern struct block_device *blkdev_get_by_path(const char *path, fmode_t mode,
- void *holder);
-extern struct block_device *blkdev_get_by_dev(dev_t dev, fmode_t mode,
- void *holder);
-extern void blkdev_put(struct block_device *bdev, fmode_t mode);
-extern int __blkdev_reread_part(struct block_device *bdev);
-extern int blkdev_reread_part(struct block_device *bdev);
-
-#ifdef CONFIG_SYSFS
-extern int bd_link_disk_holder(struct block_device *bdev, struct gendisk *disk);
-extern void bd_unlink_disk_holder(struct block_device *bdev,
- struct gendisk *disk);
-#else
-static inline int bd_link_disk_holder(struct block_device *bdev,
- struct gendisk *disk)
-{
- return 0;
-}
-static inline void bd_unlink_disk_holder(struct block_device *bdev,
- struct gendisk *disk)
-{
-}
-#endif
-#endif
/* fs/char_dev.c */
#define CHRDEV_MAJOR_MAX 512
@@ -2499,125 +2573,38 @@ static inline void unregister_chrdev(unsigned int major, const char *name)
__unregister_chrdev(major, 0, 256, name);
}
-/* fs/block_dev.c */
-#define BDEVNAME_SIZE 32 /* Largest string for a blockdev identifier */
-#define BDEVT_SIZE 10 /* Largest string for MAJ:MIN for blkdev */
-
-#ifdef CONFIG_BLOCK
-#define BLKDEV_MAJOR_MAX 512
-extern const char *__bdevname(dev_t, char *buffer);
-extern const char *bdevname(struct block_device *bdev, char *buffer);
-extern struct block_device *lookup_bdev(const char *);
-extern void blkdev_show(struct seq_file *,off_t);
-
-#else
-#define BLKDEV_MAJOR_MAX 0
-#endif
-
extern void init_special_inode(struct inode *, umode_t, dev_t);
/* Invalid inode operations -- fs/bad_inode.c */
extern void make_bad_inode(struct inode *);
extern bool is_bad_inode(struct inode *);
-#ifdef CONFIG_BLOCK
-extern void check_disk_size_change(struct gendisk *disk,
- struct block_device *bdev);
-extern int revalidate_disk(struct gendisk *);
-extern int check_disk_change(struct block_device *);
-extern int __invalidate_device(struct block_device *, bool);
-extern int invalidate_partition(struct gendisk *, int);
-#endif
-unsigned long invalidate_mapping_pages(struct address_space *mapping,
- pgoff_t start, pgoff_t end);
-
-static inline void invalidate_remote_inode(struct inode *inode)
-{
- if (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
- S_ISLNK(inode->i_mode))
- invalidate_mapping_pages(inode->i_mapping, 0, -1);
-}
-extern int invalidate_inode_pages2(struct address_space *mapping);
-extern int invalidate_inode_pages2_range(struct address_space *mapping,
- pgoff_t start, pgoff_t end);
-extern int write_inode_now(struct inode *, int);
-extern int filemap_fdatawrite(struct address_space *);
-extern int filemap_flush(struct address_space *);
-extern int filemap_fdatawait(struct address_space *);
-extern int filemap_fdatawait_keep_errors(struct address_space *mapping);
-extern int filemap_fdatawait_range(struct address_space *, loff_t lstart,
- loff_t lend);
-extern bool filemap_range_has_page(struct address_space *, loff_t lstart,
- loff_t lend);
-extern int filemap_write_and_wait(struct address_space *mapping);
-extern int filemap_write_and_wait_range(struct address_space *mapping,
- loff_t lstart, loff_t lend);
-extern int __filemap_fdatawrite_range(struct address_space *mapping,
- loff_t start, loff_t end, int sync_mode);
-extern int filemap_fdatawrite_range(struct address_space *mapping,
- loff_t start, loff_t end);
-extern int filemap_check_errors(struct address_space *mapping);
-
-extern void __filemap_set_wb_err(struct address_space *mapping, int err);
+extern int __must_check file_fdatawait_range(struct file *file, loff_t lstart,
+ loff_t lend);
extern int __must_check file_check_and_advance_wb_err(struct file *file);
extern int __must_check file_write_and_wait_range(struct file *file,
loff_t start, loff_t end);
+int filemap_flush_range(struct address_space *mapping, loff_t start,
+ loff_t end);
-/**
- * filemap_set_wb_err - set a writeback error on an address_space
- * @mapping: mapping in which to set writeback error
- * @err: error to be set in mapping
- *
- * When writeback fails in some way, we must record that error so that
- * userspace can be informed when fsync and the like are called. We endeavor
- * to report errors on any file that was open at the time of the error. Some
- * internal callers also need to know when writeback errors have occurred.
- *
- * When a writeback error occurs, most filesystems will want to call
- * filemap_set_wb_err to record the error in the mapping so that it will be
- * automatically reported whenever fsync is called on the file.
- *
- * FIXME: mention FS_* flag here?
- */
-static inline void filemap_set_wb_err(struct address_space *mapping, int err)
-{
- /* Fastpath for common case of no error */
- if (unlikely(err))
- __filemap_set_wb_err(mapping, err);
-}
-
-/**
- * filemap_check_wb_error - has an error occurred since the mark was sampled?
- * @mapping: mapping to check for writeback errors
- * @since: previously-sampled errseq_t
- *
- * Grab the errseq_t value from the mapping, and see if it has changed "since"
- * the given value was sampled.
- *
- * If it has then report the latest error set, otherwise return 0.
- */
-static inline int filemap_check_wb_err(struct address_space *mapping,
- errseq_t since)
-{
- return errseq_check(&mapping->wb_err, since);
-}
-
-/**
- * filemap_sample_wb_err - sample the current errseq_t to test for later errors
- * @mapping: mapping to be sampled
- *
- * Writeback errors are always reported relative to a particular sample point
- * in the past. This function provides those sample points.
- */
-static inline errseq_t filemap_sample_wb_err(struct address_space *mapping)
+static inline int file_write_and_wait(struct file *file)
{
- return errseq_sample(&mapping->wb_err);
+ return file_write_and_wait_range(file, 0, LLONG_MAX);
}
extern int vfs_fsync_range(struct file *file, loff_t start, loff_t end,
int datasync);
extern int vfs_fsync(struct file *file, int datasync);
+extern int sync_file_range(struct file *file, loff_t offset, loff_t nbytes,
+ unsigned int flags);
+
+static inline bool iocb_is_dsync(const struct kiocb *iocb)
+{
+ return (iocb->ki_flags & IOCB_DSYNC) ||
+ IS_SYNC(iocb->ki_filp->f_mapping->host);
+}
+
/*
* Sync the bytes written if this was a synchronous write. Expect ki_pos
* to already be updated for the write, and will return either the amount
@@ -2625,12 +2612,17 @@ extern int vfs_fsync(struct file *file, int datasync);
*/
static inline ssize_t generic_write_sync(struct kiocb *iocb, ssize_t count)
{
- if (iocb->ki_flags & IOCB_DSYNC) {
+ if (iocb_is_dsync(iocb)) {
int ret = vfs_fsync_range(iocb->ki_filp,
iocb->ki_pos - count, iocb->ki_pos - 1,
(iocb->ki_flags & IOCB_SYNC) ? 0 : 1);
if (ret)
return ret;
+ } else if (iocb->ki_flags & IOCB_DONTCACHE) {
+ struct address_space *mapping = iocb->ki_filp->f_mapping;
+
+ filemap_flush_range(mapping, iocb->ki_pos - count,
+ iocb->ki_pos - 1);
}
return count;
@@ -2638,64 +2630,128 @@ static inline ssize_t generic_write_sync(struct kiocb *iocb, ssize_t count)
extern void emergency_sync(void);
extern void emergency_remount(void);
+
#ifdef CONFIG_BLOCK
-extern sector_t bmap(struct inode *, sector_t);
+extern int bmap(struct inode *inode, sector_t *block);
+#else
+static inline int bmap(struct inode *inode, sector_t *block)
+{
+ return -EINVAL;
+}
#endif
-extern int notify_change(struct dentry *, struct iattr *, struct inode **);
-extern int inode_permission(struct inode *, int);
-extern int __inode_permission(struct inode *, int);
-extern int generic_permission(struct inode *, int);
-extern int __check_sticky(struct inode *dir, struct inode *inode);
+
+int notify_change(struct mnt_idmap *, struct dentry *,
+ struct iattr *, struct delegated_inode *);
+int inode_permission(struct mnt_idmap *, struct inode *, int);
+int generic_permission(struct mnt_idmap *, struct inode *, int);
+static inline int file_permission(struct file *file, int mask)
+{
+ return inode_permission(file_mnt_idmap(file),
+ file_inode(file), mask);
+}
+static inline int path_permission(const struct path *path, int mask)
+{
+ return inode_permission(mnt_idmap(path->mnt),
+ d_inode(path->dentry), mask);
+}
+int __check_sticky(struct mnt_idmap *idmap, struct inode *dir,
+ struct inode *inode);
static inline bool execute_ok(struct inode *inode)
{
return (inode->i_mode & S_IXUGO) || S_ISDIR(inode->i_mode);
}
+static inline bool inode_wrong_type(const struct inode *inode, umode_t mode)
+{
+ return (inode->i_mode ^ mode) & S_IFMT;
+}
+
+/**
+ * file_start_write - get write access to a superblock for regular file io
+ * @file: the file we want to write to
+ *
+ * This is a variant of sb_start_write() which is a noop on non-regular file.
+ * Should be matched with a call to file_end_write().
+ */
static inline void file_start_write(struct file *file)
{
if (!S_ISREG(file_inode(file)->i_mode))
return;
- __sb_start_write(file_inode(file)->i_sb, SB_FREEZE_WRITE, true);
+ sb_start_write(file_inode(file)->i_sb);
}
static inline bool file_start_write_trylock(struct file *file)
{
if (!S_ISREG(file_inode(file)->i_mode))
return true;
- return __sb_start_write(file_inode(file)->i_sb, SB_FREEZE_WRITE, false);
+ return sb_start_write_trylock(file_inode(file)->i_sb);
}
+/**
+ * file_end_write - drop write access to a superblock of a regular file
+ * @file: the file we wrote to
+ *
+ * Should be matched with a call to file_start_write().
+ */
static inline void file_end_write(struct file *file)
{
if (!S_ISREG(file_inode(file)->i_mode))
return;
- __sb_end_write(file_inode(file)->i_sb, SB_FREEZE_WRITE);
+ sb_end_write(file_inode(file)->i_sb);
}
-static inline int do_clone_file_range(struct file *file_in, loff_t pos_in,
- struct file *file_out, loff_t pos_out,
- u64 len)
+/**
+ * kiocb_start_write - get write access to a superblock for async file io
+ * @iocb: the io context we want to submit the write with
+ *
+ * This is a variant of sb_start_write() for async io submission.
+ * Should be matched with a call to kiocb_end_write().
+ */
+static inline void kiocb_start_write(struct kiocb *iocb)
{
- int ret;
+ struct inode *inode = file_inode(iocb->ki_filp);
- file_start_write(file_out);
- ret = vfs_clone_file_range(file_in, pos_in, file_out, pos_out, len);
- file_end_write(file_out);
+ sb_start_write(inode->i_sb);
+ /*
+ * Fool lockdep by telling it the lock got released so that it
+ * doesn't complain about the held lock when we return to userspace.
+ */
+ __sb_writers_release(inode->i_sb, SB_FREEZE_WRITE);
+}
- return ret;
+/**
+ * kiocb_end_write - drop write access to a superblock after async file io
+ * @iocb: the io context we sumbitted the write with
+ *
+ * Should be matched with a call to kiocb_start_write().
+ */
+static inline void kiocb_end_write(struct kiocb *iocb)
+{
+ struct inode *inode = file_inode(iocb->ki_filp);
+
+ /*
+ * Tell lockdep we inherited freeze protection from submission thread.
+ */
+ __sb_writers_acquired(inode->i_sb, SB_FREEZE_WRITE);
+ sb_end_write(inode->i_sb);
}
/*
+ * This is used for regular files where some users -- especially the
+ * currently executed binary in a process, previously handled via
+ * VM_DENYWRITE -- cannot handle concurrent write (and maybe mmap
+ * read-write shared) accesses.
+ *
* get_write_access() gets write permission for a file.
* put_write_access() releases this write permission.
- * This is used for regular files.
- * We cannot support write (and maybe mmap read-write shared) accesses and
- * MAP_DENYWRITE mmappings simultaneously. The i_writecount field of an inode
- * can have the following values:
- * 0: no writers, no VM_DENYWRITE mappings
- * < 0: (-i_writecount) vm_area_structs with VM_DENYWRITE set exist
- * > 0: (i_writecount) users are writing to the file.
+ * deny_write_access() denies write access to a file.
+ * allow_write_access() re-enables write access to a file.
+ *
+ * The i_writecount field of an inode can have the following values:
+ * 0: no write access, no denied write access
+ * < 0: (-i_writecount) users that denied write access to the file.
+ * > 0: (i_writecount) users that have write access to the file.
*
* Normally we operate on that counter with atomic_{inc,dec} and it's safe
* except for the cases where we don't hold i_writecount yet. Then we need to
@@ -2720,16 +2776,43 @@ static inline void allow_write_access(struct file *file)
if (file)
atomic_inc(&file_inode(file)->i_writecount);
}
+
+/*
+ * Do not prevent write to executable file when watched by pre-content events.
+ *
+ * Note that FMODE_FSNOTIFY_HSM mode is set depending on pre-content watches at
+ * the time of file open and remains constant for entire lifetime of the file,
+ * so if pre-content watches are added post execution or removed before the end
+ * of the execution, it will not cause i_writecount reference leak.
+ */
+static inline int exe_file_deny_write_access(struct file *exe_file)
+{
+ if (unlikely(FMODE_FSNOTIFY_HSM(exe_file->f_mode)))
+ return 0;
+ return deny_write_access(exe_file);
+}
+static inline void exe_file_allow_write_access(struct file *exe_file)
+{
+ if (unlikely(!exe_file || FMODE_FSNOTIFY_HSM(exe_file->f_mode)))
+ return;
+ allow_write_access(exe_file);
+}
+
+static inline void file_set_fsnotify_mode(struct file *file, fmode_t mode)
+{
+ file->f_mode &= ~FMODE_FSNOTIFY_MASK;
+ file->f_mode |= mode;
+}
+
static inline bool inode_is_open_for_write(const struct inode *inode)
{
return atomic_read(&inode->i_writecount) > 0;
}
-#ifdef CONFIG_IMA
+#if defined(CONFIG_IMA) || defined(CONFIG_FILE_LOCKING)
static inline void i_readcount_dec(struct inode *inode)
{
- BUG_ON(!atomic_read(&inode->i_readcount));
- atomic_dec(&inode->i_readcount);
+ BUG_ON(atomic_dec_return(&inode->i_readcount) < 0);
}
static inline void i_readcount_inc(struct inode *inode)
{
@@ -2747,52 +2830,46 @@ static inline void i_readcount_inc(struct inode *inode)
#endif
extern int do_pipe_flags(int *, int);
-#define __kernel_read_file_id(id) \
- id(UNKNOWN, unknown) \
- id(FIRMWARE, firmware) \
- id(FIRMWARE_PREALLOC_BUFFER, firmware) \
- id(MODULE, kernel-module) \
- id(KEXEC_IMAGE, kexec-image) \
- id(KEXEC_INITRAMFS, kexec-initramfs) \
- id(POLICY, security-policy) \
- id(MAX_ID, )
-
-#define __fid_enumify(ENUM, dummy) READING_ ## ENUM,
-#define __fid_stringify(dummy, str) #str,
-
-enum kernel_read_file_id {
- __kernel_read_file_id(__fid_enumify)
-};
-
-static const char * const kernel_read_file_str[] = {
- __kernel_read_file_id(__fid_stringify)
-};
-
-static inline const char *kernel_read_file_id_str(enum kernel_read_file_id id)
-{
- if ((unsigned)id >= READING_MAX_ID)
- return kernel_read_file_str[READING_UNKNOWN];
-
- return kernel_read_file_str[id];
-}
-
-extern int kernel_read(struct file *, loff_t, char *, unsigned long);
-extern int kernel_read_file(struct file *, void **, loff_t *, loff_t,
- enum kernel_read_file_id);
-extern int kernel_read_file_from_path(char *, void **, loff_t *, loff_t,
- enum kernel_read_file_id);
-extern int kernel_read_file_from_fd(int, void **, loff_t *, loff_t,
- enum kernel_read_file_id);
-extern ssize_t kernel_write(struct file *, const char *, size_t, loff_t);
-extern ssize_t __kernel_write(struct file *, const char *, size_t, loff_t *);
+extern ssize_t kernel_read(struct file *, void *, size_t, loff_t *);
+ssize_t __kernel_read(struct file *file, void *buf, size_t count, loff_t *pos);
+extern ssize_t kernel_write(struct file *, const void *, size_t, loff_t *);
+extern ssize_t __kernel_write(struct file *, const void *, size_t, loff_t *);
extern struct file * open_exec(const char *);
/* fs/dcache.c -- generic fs support functions */
extern bool is_subdir(struct dentry *, struct dentry *);
extern bool path_is_under(const struct path *, const struct path *);
+u64 vfsmount_to_propagation_flags(struct vfsmount *mnt);
extern char *file_path(struct file *, char *, int);
+/**
+ * is_dot_dotdot - returns true only if @name is "." or ".."
+ * @name: file name to check
+ * @len: length of file name, in bytes
+ */
+static inline bool is_dot_dotdot(const char *name, size_t len)
+{
+ return len && unlikely(name[0] == '.') &&
+ (len == 1 || (len == 2 && name[1] == '.'));
+}
+
+/**
+ * name_contains_dotdot - check if a file name contains ".." path components
+ * @name: File path string to check
+ * Search for ".." surrounded by either '/' or start/end of string.
+ */
+static inline bool name_contains_dotdot(const char *name)
+{
+ size_t name_len;
+
+ name_len = strlen(name);
+ return strcmp(name, "..") == 0 ||
+ strncmp(name, "../", 3) == 0 ||
+ strstr(name, "/../") != NULL ||
+ (name_len >= 3 && strcmp(name + name_len - 3, "/..") == 0);
+}
+
#include <linux/err.h>
/* needed for stackable file system support */
@@ -2800,32 +2877,50 @@ extern loff_t default_llseek(struct file *file, loff_t offset, int whence);
extern loff_t vfs_llseek(struct file *file, loff_t offset, int whence);
-extern int inode_init_always(struct super_block *, struct inode *);
+extern int inode_init_always_gfp(struct super_block *, struct inode *, gfp_t);
+static inline int inode_init_always(struct super_block *sb, struct inode *inode)
+{
+ return inode_init_always_gfp(sb, inode, GFP_NOFS);
+}
+
extern void inode_init_once(struct inode *);
extern void address_space_init_once(struct address_space *mapping);
extern struct inode * igrab(struct inode *);
extern ino_t iunique(struct super_block *, ino_t);
extern int inode_needs_sync(struct inode *inode);
-extern int generic_delete_inode(struct inode *inode);
-static inline int generic_drop_inode(struct inode *inode)
+extern int inode_just_drop(struct inode *inode);
+static inline int inode_generic_drop(struct inode *inode)
{
return !inode->i_nlink || inode_unhashed(inode);
}
+extern void d_mark_dontcache(struct inode *inode);
extern struct inode *ilookup5_nowait(struct super_block *sb,
unsigned long hashval, int (*test)(struct inode *, void *),
- void *data);
+ void *data, bool *isnew);
extern struct inode *ilookup5(struct super_block *sb, unsigned long hashval,
int (*test)(struct inode *, void *), void *data);
extern struct inode *ilookup(struct super_block *sb, unsigned long ino);
-extern struct inode * iget5_locked(struct super_block *, unsigned long, int (*test)(struct inode *, void *), int (*set)(struct inode *, void *), void *);
+extern struct inode *inode_insert5(struct inode *inode, unsigned long hashval,
+ int (*test)(struct inode *, void *),
+ int (*set)(struct inode *, void *),
+ void *data);
+struct inode *iget5_locked(struct super_block *, unsigned long,
+ int (*test)(struct inode *, void *),
+ int (*set)(struct inode *, void *), void *);
+struct inode *iget5_locked_rcu(struct super_block *, unsigned long,
+ int (*test)(struct inode *, void *),
+ int (*set)(struct inode *, void *), void *);
extern struct inode * iget_locked(struct super_block *, unsigned long);
extern struct inode *find_inode_nowait(struct super_block *,
unsigned long,
int (*match)(struct inode *,
unsigned long, void *),
void *data);
+extern struct inode *find_inode_rcu(struct super_block *, unsigned long,
+ int (*)(struct inode *, void *), void *);
+extern struct inode *find_inode_by_ino_rcu(struct super_block *, unsigned long);
extern int insert_inode_locked4(struct inode *, unsigned long, int (*test)(struct inode *, void *), void *);
extern int insert_inode_locked(struct inode *);
#ifdef CONFIG_DEBUG_LOCK_ALLOC
@@ -2834,17 +2929,52 @@ extern void lockdep_annotate_inode_mutex_key(struct inode *inode);
static inline void lockdep_annotate_inode_mutex_key(struct inode *inode) { };
#endif
extern void unlock_new_inode(struct inode *);
+extern void discard_new_inode(struct inode *);
extern unsigned int get_next_ino(void);
+extern void evict_inodes(struct super_block *sb);
+void dump_mapping(const struct address_space *);
+
+/*
+ * Userspace may rely on the inode number being non-zero. For example, glibc
+ * simply ignores files with zero i_ino in unlink() and other places.
+ *
+ * As an additional complication, if userspace was compiled with
+ * _FILE_OFFSET_BITS=32 on a 64-bit kernel we'll only end up reading out the
+ * lower 32 bits, so we need to check that those aren't zero explicitly. With
+ * _FILE_OFFSET_BITS=64, this may cause some harmless false-negatives, but
+ * better safe than sorry.
+ */
+static inline bool is_zero_ino(ino_t ino)
+{
+ return (u32)ino == 0;
+}
+
+static inline void __iget(struct inode *inode)
+{
+ lockdep_assert_held(&inode->i_lock);
+ atomic_inc(&inode->i_count);
+}
-extern void __iget(struct inode * inode);
extern void iget_failed(struct inode *);
extern void clear_inode(struct inode *);
extern void __destroy_inode(struct inode *);
-extern struct inode *new_inode_pseudo(struct super_block *sb);
+struct inode *alloc_inode(struct super_block *sb);
+static inline struct inode *new_inode_pseudo(struct super_block *sb)
+{
+ return alloc_inode(sb);
+}
extern struct inode *new_inode(struct super_block *sb);
extern void free_inode_nonrcu(struct inode *inode);
-extern int should_remove_suid(struct dentry *);
+extern int setattr_should_drop_suidgid(struct mnt_idmap *, struct inode *);
extern int file_remove_privs(struct file *);
+int setattr_should_drop_sgid(struct mnt_idmap *idmap,
+ const struct inode *inode);
+
+/*
+ * This must be used for allocating filesystems specific inodes to set
+ * up the inode reclaim context correctly.
+ */
+#define alloc_inode_sb(_sb, _cache, _gfp) kmem_cache_alloc_lru(_cache, &_sb->s_inode_lru, _gfp)
extern void __insert_inode_hash(struct inode *, unsigned long hashval);
static inline void insert_inode_hash(struct inode *inode)
@@ -2860,60 +2990,64 @@ static inline void remove_inode_hash(struct inode *inode)
}
extern void inode_sb_list_add(struct inode *inode);
+extern void inode_lru_list_add(struct inode *inode);
-#ifdef CONFIG_BLOCK
-extern int bdev_read_only(struct block_device *);
-#endif
-extern int set_blocksize(struct block_device *, int);
-extern int sb_set_blocksize(struct super_block *, int);
-extern int sb_min_blocksize(struct super_block *, int);
-
-extern int generic_file_mmap(struct file *, struct vm_area_struct *);
-extern int generic_file_readonly_mmap(struct file *, struct vm_area_struct *);
+int generic_file_mmap(struct file *, struct vm_area_struct *);
+int generic_file_mmap_prepare(struct vm_area_desc *desc);
+int generic_file_readonly_mmap(struct file *, struct vm_area_struct *);
+int generic_file_readonly_mmap_prepare(struct vm_area_desc *desc);
extern ssize_t generic_write_checks(struct kiocb *, struct iov_iter *);
+int generic_write_checks_count(struct kiocb *iocb, loff_t *count);
+extern int generic_write_check_limits(struct file *file, loff_t pos,
+ loff_t *count);
+extern int generic_file_rw_checks(struct file *file_in, struct file *file_out);
+ssize_t filemap_read(struct kiocb *iocb, struct iov_iter *to,
+ ssize_t already_read);
extern ssize_t generic_file_read_iter(struct kiocb *, struct iov_iter *);
extern ssize_t __generic_file_write_iter(struct kiocb *, struct iov_iter *);
extern ssize_t generic_file_write_iter(struct kiocb *, struct iov_iter *);
extern ssize_t generic_file_direct_write(struct kiocb *, struct iov_iter *);
-extern ssize_t generic_perform_write(struct file *, struct iov_iter *, loff_t);
+ssize_t generic_perform_write(struct kiocb *, struct iov_iter *);
+ssize_t direct_write_fallback(struct kiocb *iocb, struct iov_iter *iter,
+ ssize_t direct_written, ssize_t buffered_written);
ssize_t vfs_iter_read(struct file *file, struct iov_iter *iter, loff_t *ppos,
rwf_t flags);
ssize_t vfs_iter_write(struct file *file, struct iov_iter *iter, loff_t *ppos,
rwf_t flags);
-
-/* fs/block_dev.c */
-extern ssize_t blkdev_read_iter(struct kiocb *iocb, struct iov_iter *to);
-extern ssize_t blkdev_write_iter(struct kiocb *iocb, struct iov_iter *from);
-extern int blkdev_fsync(struct file *filp, loff_t start, loff_t end,
- int datasync);
-extern void block_sync_page(struct page *page);
+ssize_t vfs_iocb_iter_read(struct file *file, struct kiocb *iocb,
+ struct iov_iter *iter);
+ssize_t vfs_iocb_iter_write(struct file *file, struct kiocb *iocb,
+ struct iov_iter *iter);
/* fs/splice.c */
-extern ssize_t generic_file_splice_read(struct file *, loff_t *,
- struct pipe_inode_info *, size_t, unsigned int);
+ssize_t filemap_splice_read(struct file *in, loff_t *ppos,
+ struct pipe_inode_info *pipe,
+ size_t len, unsigned int flags);
+ssize_t copy_splice_read(struct file *in, loff_t *ppos,
+ struct pipe_inode_info *pipe,
+ size_t len, unsigned int flags);
extern ssize_t iter_file_splice_write(struct pipe_inode_info *,
struct file *, loff_t *, size_t, unsigned int);
-extern ssize_t generic_splice_sendpage(struct pipe_inode_info *pipe,
- struct file *out, loff_t *, size_t len, unsigned int flags);
-extern long do_splice_direct(struct file *in, loff_t *ppos, struct file *out,
- loff_t *opos, size_t len, unsigned int flags);
extern void
file_ra_state_init(struct file_ra_state *ra, struct address_space *mapping);
extern loff_t noop_llseek(struct file *file, loff_t offset, int whence);
-extern loff_t no_llseek(struct file *file, loff_t offset, int whence);
extern loff_t vfs_setpos(struct file *file, loff_t offset, loff_t maxsize);
extern loff_t generic_file_llseek(struct file *file, loff_t offset, int whence);
extern loff_t generic_file_llseek_size(struct file *file, loff_t offset,
int whence, loff_t maxsize, loff_t eof);
+loff_t generic_llseek_cookie(struct file *file, loff_t offset, int whence,
+ u64 *cookie);
extern loff_t fixed_size_llseek(struct file *file, loff_t offset,
int whence, loff_t size);
extern loff_t no_seek_end_llseek_size(struct file *, loff_t, int, loff_t);
extern loff_t no_seek_end_llseek(struct file *, loff_t, int);
+int rw_verify_area(int, struct file *, const loff_t *, size_t);
extern int generic_file_open(struct inode * inode, struct file * filp);
extern int nonseekable_open(struct inode * inode, struct file * filp);
+extern int stream_open(struct inode * inode, struct file * filp);
#ifdef CONFIG_BLOCK
typedef void (dio_submit_t)(struct bio *bio, struct inode *inode,
@@ -2925,20 +3059,12 @@ enum {
/* filesystem does not support filling holes */
DIO_SKIP_HOLES = 0x02,
-
- /* filesystem can handle aio writes beyond i_size */
- DIO_ASYNC_EXTEND = 0x04,
-
- /* inode/fs/bdev does not need truncate protection */
- DIO_SKIP_DIO_COUNT = 0x08,
};
-void dio_end_io(struct bio *bio);
-
ssize_t __blockdev_direct_IO(struct kiocb *iocb, struct inode *inode,
struct block_device *bdev, struct iov_iter *iter,
get_block_t get_block,
- dio_iodone_t end_io, dio_submit_t submit_io,
+ dio_iodone_t end_io,
int flags);
static inline ssize_t blockdev_direct_IO(struct kiocb *iocb,
@@ -2947,13 +3073,15 @@ static inline ssize_t blockdev_direct_IO(struct kiocb *iocb,
get_block_t get_block)
{
return __blockdev_direct_IO(iocb, inode, inode->i_sb->s_bdev, iter,
- get_block, NULL, NULL, DIO_LOCKING | DIO_SKIP_HOLES);
+ get_block, NULL, DIO_LOCKING | DIO_SKIP_HOLES);
}
#endif
+bool inode_dio_finished(const struct inode *inode);
void inode_dio_wait(struct inode *inode);
+void inode_dio_wait_interruptible(struct inode *inode);
-/*
+/**
* inode_dio_begin - signal start of a direct I/O requests
* @inode: inode the direct I/O happens on
*
@@ -2965,7 +3093,7 @@ static inline void inode_dio_begin(struct inode *inode)
atomic_inc(&inode->i_dio_count);
}
-/*
+/**
* inode_dio_end - signal finish of a direct I/O requests
* @inode: inode the direct I/O happens on
*
@@ -2975,7 +3103,7 @@ static inline void inode_dio_begin(struct inode *inode)
static inline void inode_dio_end(struct inode *inode)
{
if (atomic_dec_and_test(&inode->i_dio_count))
- wake_up_bit(&inode->i_state, __I_DIO_WAKEUP);
+ wake_up_var(&inode->i_dio_count);
}
extern void inode_set_flags(struct inode *inode, unsigned int flags,
@@ -2985,23 +3113,33 @@ extern const struct file_operations generic_ro_fops;
#define special_file(m) (S_ISCHR(m)||S_ISBLK(m)||S_ISFIFO(m)||S_ISSOCK(m))
-extern int readlink_copy(char __user *, int, const char *);
+extern int readlink_copy(char __user *, int, const char *, int);
extern int page_readlink(struct dentry *, char __user *, int);
+extern const char *page_get_link_raw(struct dentry *, struct inode *,
+ struct delayed_call *);
extern const char *page_get_link(struct dentry *, struct inode *,
struct delayed_call *);
extern void page_put_link(void *);
-extern int __page_symlink(struct inode *inode, const char *symname, int len,
- int nofs);
extern int page_symlink(struct inode *inode, const char *symname, int len);
extern const struct inode_operations page_symlink_inode_operations;
extern void kfree_link(void *);
-extern void generic_fillattr(struct inode *, struct kstat *);
+void fill_mg_cmtime(struct kstat *stat, u32 request_mask, struct inode *inode);
+void generic_fillattr(struct mnt_idmap *, u32, struct inode *, struct kstat *);
+void generic_fill_statx_attr(struct inode *inode, struct kstat *stat);
+void generic_fill_statx_atomic_writes(struct kstat *stat,
+ unsigned int unit_min,
+ unsigned int unit_max,
+ unsigned int unit_max_opt);
extern int vfs_getattr_nosec(const struct path *, struct kstat *, u32, unsigned int);
extern int vfs_getattr(const struct path *, struct kstat *, u32, unsigned int);
void __inode_add_bytes(struct inode *inode, loff_t bytes);
void inode_add_bytes(struct inode *inode, loff_t bytes);
void __inode_sub_bytes(struct inode *inode, loff_t bytes);
void inode_sub_bytes(struct inode *inode, loff_t bytes);
+static inline loff_t __inode_get_bytes(struct inode *inode)
+{
+ return (((loff_t)inode->i_blocks) << 9) + inode->i_bytes;
+}
loff_t inode_get_bytes(struct inode *inode);
void inode_set_bytes(struct inode *inode, loff_t bytes);
const char *simple_get_link(struct dentry *, struct inode *,
@@ -3010,81 +3148,76 @@ extern const struct inode_operations simple_symlink_inode_operations;
extern int iterate_dir(struct file *, struct dir_context *);
-extern int vfs_statx(int, const char __user *, int, struct kstat *, u32);
-extern int vfs_statx_fd(unsigned int, struct kstat *, u32, unsigned int);
+int vfs_fstatat(int dfd, const char __user *filename, struct kstat *stat,
+ int flags);
+int vfs_fstat(int fd, struct kstat *stat);
static inline int vfs_stat(const char __user *filename, struct kstat *stat)
{
- return vfs_statx(AT_FDCWD, filename, AT_NO_AUTOMOUNT,
- stat, STATX_BASIC_STATS);
+ return vfs_fstatat(AT_FDCWD, filename, stat, 0);
}
static inline int vfs_lstat(const char __user *name, struct kstat *stat)
{
- return vfs_statx(AT_FDCWD, name, AT_SYMLINK_NOFOLLOW | AT_NO_AUTOMOUNT,
- stat, STATX_BASIC_STATS);
+ return vfs_fstatat(AT_FDCWD, name, stat, AT_SYMLINK_NOFOLLOW);
}
-static inline int vfs_fstatat(int dfd, const char __user *filename,
- struct kstat *stat, int flags)
-{
- return vfs_statx(dfd, filename, flags | AT_NO_AUTOMOUNT,
- stat, STATX_BASIC_STATS);
-}
-static inline int vfs_fstat(int fd, struct kstat *stat)
-{
- return vfs_statx_fd(fd, stat, STATX_BASIC_STATS, 0);
-}
-
extern const char *vfs_get_link(struct dentry *, struct delayed_call *);
extern int vfs_readlink(struct dentry *, char __user *, int);
-extern int __generic_block_fiemap(struct inode *inode,
- struct fiemap_extent_info *fieinfo,
- loff_t start, loff_t len,
- get_block_t *get_block);
-extern int generic_block_fiemap(struct inode *inode,
- struct fiemap_extent_info *fieinfo, u64 start,
- u64 len, get_block_t *get_block);
-
extern struct file_system_type *get_filesystem(struct file_system_type *fs);
extern void put_filesystem(struct file_system_type *fs);
extern struct file_system_type *get_fs_type(const char *name);
-extern struct super_block *get_super(struct block_device *);
-extern struct super_block *get_super_thawed(struct block_device *);
-extern struct super_block *get_super_exclusive_thawed(struct block_device *bdev);
-extern struct super_block *get_active_super(struct block_device *bdev);
extern void drop_super(struct super_block *sb);
extern void drop_super_exclusive(struct super_block *sb);
-extern void iterate_supers(void (*)(struct super_block *, void *), void *);
+extern void iterate_supers(void (*f)(struct super_block *, void *), void *arg);
extern void iterate_supers_type(struct file_system_type *,
void (*)(struct super_block *, void *), void *);
+void filesystems_freeze(bool freeze_all);
+void filesystems_thaw(void);
+
+void end_dirop(struct dentry *de);
extern int dcache_dir_open(struct inode *, struct file *);
extern int dcache_dir_close(struct inode *, struct file *);
extern loff_t dcache_dir_lseek(struct file *, loff_t, int);
extern int dcache_readdir(struct file *, struct dir_context *);
-extern int simple_setattr(struct dentry *, struct iattr *);
-extern int simple_getattr(const struct path *, struct kstat *, u32, unsigned int);
+extern int simple_setattr(struct mnt_idmap *, struct dentry *,
+ struct iattr *);
+extern int simple_getattr(struct mnt_idmap *, const struct path *,
+ struct kstat *, u32, unsigned int);
extern int simple_statfs(struct dentry *, struct kstatfs *);
extern int simple_open(struct inode *inode, struct file *file);
extern int simple_link(struct dentry *, struct inode *, struct dentry *);
extern int simple_unlink(struct inode *, struct dentry *);
extern int simple_rmdir(struct inode *, struct dentry *);
-extern int simple_rename(struct inode *, struct dentry *,
- struct inode *, struct dentry *, unsigned int);
+extern void __simple_unlink(struct inode *, struct dentry *);
+extern void __simple_rmdir(struct inode *, struct dentry *);
+void simple_rename_timestamp(struct inode *old_dir, struct dentry *old_dentry,
+ struct inode *new_dir, struct dentry *new_dentry);
+extern int simple_rename_exchange(struct inode *old_dir, struct dentry *old_dentry,
+ struct inode *new_dir, struct dentry *new_dentry);
+extern int simple_rename(struct mnt_idmap *, struct inode *,
+ struct dentry *, struct inode *, struct dentry *,
+ unsigned int);
+extern void simple_recursive_removal(struct dentry *,
+ void (*callback)(struct dentry *));
+extern void simple_remove_by_name(struct dentry *, const char *,
+ void (*callback)(struct dentry *));
+extern void locked_recursive_removal(struct dentry *,
+ void (*callback)(struct dentry *));
extern int noop_fsync(struct file *, loff_t, loff_t, int);
+extern ssize_t noop_direct_IO(struct kiocb *iocb, struct iov_iter *iter);
extern int simple_empty(struct dentry *);
-extern int simple_readpage(struct file *file, struct page *page);
-extern int simple_write_begin(struct file *file, struct address_space *mapping,
- loff_t pos, unsigned len, unsigned flags,
- struct page **pagep, void **fsdata);
-extern int simple_write_end(struct file *file, struct address_space *mapping,
- loff_t pos, unsigned len, unsigned copied,
- struct page *page, void *fsdata);
+extern int simple_write_begin(const struct kiocb *iocb,
+ struct address_space *mapping,
+ loff_t pos, unsigned len,
+ struct folio **foliop, void **fsdata);
+extern const struct address_space_operations ram_aops;
extern int always_delete_dentry(const struct dentry *);
extern struct inode *alloc_anon_inode(struct super_block *);
-extern int simple_nosetlease(struct file *, long, struct file_lock **, void **);
-extern const struct dentry_operations simple_dentry_operations;
+struct inode *anon_inode_make_secure_inode(struct super_block *sb, const char *name,
+ const struct inode *context_inode);
+extern int simple_nosetlease(struct file *, int, struct file_lease **, void **);
extern struct dentry *simple_lookup(struct inode *, struct dentry *, unsigned int flags);
extern ssize_t generic_read_dir(struct file *, char __user *, size_t, loff_t *);
@@ -3098,39 +3231,124 @@ extern int simple_fill_super(struct super_block *, unsigned long,
const struct tree_descr *);
extern int simple_pin_fs(struct file_system_type *, struct vfsmount **mount, int *count);
extern void simple_release_fs(struct vfsmount **mount, int *count);
+struct dentry *simple_start_creating(struct dentry *, const char *);
+void simple_done_creating(struct dentry *);
extern ssize_t simple_read_from_buffer(void __user *to, size_t count,
loff_t *ppos, const void *from, size_t available);
extern ssize_t simple_write_to_buffer(void *to, size_t available, loff_t *ppos,
const void __user *from, size_t count);
+struct offset_ctx {
+ struct maple_tree mt;
+ unsigned long next_offset;
+};
+
+void simple_offset_init(struct offset_ctx *octx);
+int simple_offset_add(struct offset_ctx *octx, struct dentry *dentry);
+void simple_offset_remove(struct offset_ctx *octx, struct dentry *dentry);
+int simple_offset_rename(struct inode *old_dir, struct dentry *old_dentry,
+ struct inode *new_dir, struct dentry *new_dentry);
+int simple_offset_rename_exchange(struct inode *old_dir,
+ struct dentry *old_dentry,
+ struct inode *new_dir,
+ struct dentry *new_dentry);
+void simple_offset_destroy(struct offset_ctx *octx);
+
+extern const struct file_operations simple_offset_dir_operations;
+
extern int __generic_file_fsync(struct file *, loff_t, loff_t, int);
extern int generic_file_fsync(struct file *, loff_t, loff_t, int);
extern int generic_check_addressable(unsigned, u64);
-#ifdef CONFIG_MIGRATION
-extern int buffer_migrate_page(struct address_space *,
- struct page *, struct page *,
- enum migrate_mode);
+extern void generic_set_sb_d_ops(struct super_block *sb);
+extern int generic_ci_match(const struct inode *parent,
+ const struct qstr *name,
+ const struct qstr *folded_name,
+ const u8 *de_name, u32 de_name_len);
+
+#if IS_ENABLED(CONFIG_UNICODE)
+int generic_ci_d_hash(const struct dentry *dentry, struct qstr *str);
+int generic_ci_d_compare(const struct dentry *dentry, unsigned int len,
+ const char *str, const struct qstr *name);
+
+/**
+ * generic_ci_validate_strict_name - Check if a given name is suitable
+ * for a directory
+ *
+ * This functions checks if the proposed filename is valid for the
+ * parent directory. That means that only valid UTF-8 filenames will be
+ * accepted for casefold directories from filesystems created with the
+ * strict encoding flag. That also means that any name will be
+ * accepted for directories that doesn't have casefold enabled, or
+ * aren't being strict with the encoding.
+ *
+ * @dir: inode of the directory where the new file will be created
+ * @name: name of the new file
+ *
+ * Return:
+ * * True: if the filename is suitable for this directory. It can be
+ * true if a given name is not suitable for a strict encoding
+ * directory, but the directory being used isn't strict
+ * * False if the filename isn't suitable for this directory. This only
+ * happens when a directory is casefolded and the filesystem is strict
+ * about its encoding.
+ */
+static inline bool generic_ci_validate_strict_name(struct inode *dir,
+ const struct qstr *name)
+{
+ if (!IS_CASEFOLDED(dir) || !sb_has_strict_encoding(dir->i_sb))
+ return true;
+
+ /*
+ * A casefold dir must have a encoding set, unless the filesystem
+ * is corrupted
+ */
+ if (WARN_ON_ONCE(!dir->i_sb->s_encoding))
+ return true;
+
+ return !utf8_validate(dir->i_sb->s_encoding, name);
+}
#else
-#define buffer_migrate_page NULL
+static inline bool generic_ci_validate_strict_name(struct inode *dir,
+ const struct qstr *name)
+{
+ return true;
+}
#endif
-extern int setattr_prepare(struct dentry *, struct iattr *);
+int may_setattr(struct mnt_idmap *idmap, struct inode *inode,
+ unsigned int ia_valid);
+int setattr_prepare(struct mnt_idmap *, struct dentry *, struct iattr *);
extern int inode_newsize_ok(const struct inode *, loff_t offset);
-extern void setattr_copy(struct inode *inode, const struct iattr *attr);
+void setattr_copy(struct mnt_idmap *, struct inode *inode,
+ const struct iattr *attr);
extern int file_update_time(struct file *file);
-static inline bool io_is_direct(struct file *filp)
+static inline bool file_is_dax(const struct file *file)
{
- return (filp->f_flags & O_DIRECT) || IS_DAX(filp->f_mapping->host);
+ return file && IS_DAX(file->f_mapping->host);
}
-static inline bool vma_is_dax(struct vm_area_struct *vma)
+static inline bool vma_is_dax(const struct vm_area_struct *vma)
{
- return vma->vm_file && IS_DAX(vma->vm_file->f_mapping->host);
+ return file_is_dax(vma->vm_file);
+}
+
+static inline bool vma_is_fsdax(struct vm_area_struct *vma)
+{
+ struct inode *inode;
+
+ if (!IS_ENABLED(CONFIG_FS_DAX) || !vma->vm_file)
+ return false;
+ if (!vma_is_dax(vma))
+ return false;
+ inode = file_inode(vma->vm_file);
+ if (S_ISCHR(inode->i_mode))
+ return false; /* device-dax */
+ return true;
}
static inline int iocb_flags(struct file *file)
@@ -3138,46 +3356,60 @@ static inline int iocb_flags(struct file *file)
int res = 0;
if (file->f_flags & O_APPEND)
res |= IOCB_APPEND;
- if (io_is_direct(file))
+ if (file->f_flags & O_DIRECT)
res |= IOCB_DIRECT;
- if ((file->f_flags & O_DSYNC) || IS_SYNC(file->f_mapping->host))
+ if (file->f_flags & O_DSYNC)
res |= IOCB_DSYNC;
if (file->f_flags & __O_SYNC)
res |= IOCB_SYNC;
return res;
}
-static inline int kiocb_set_rw_flags(struct kiocb *ki, rwf_t flags)
+static inline int kiocb_set_rw_flags(struct kiocb *ki, rwf_t flags,
+ int rw_type)
{
+ int kiocb_flags = 0;
+
+ /* make sure there's no overlap between RWF and private IOCB flags */
+ BUILD_BUG_ON((__force int) RWF_SUPPORTED & IOCB_EVENTFD);
+
+ if (!flags)
+ return 0;
if (unlikely(flags & ~RWF_SUPPORTED))
return -EOPNOTSUPP;
+ if (unlikely((flags & RWF_APPEND) && (flags & RWF_NOAPPEND)))
+ return -EINVAL;
if (flags & RWF_NOWAIT) {
- if (!(ki->ki_filp->f_mode & FMODE_AIO_NOWAIT))
+ if (!(ki->ki_filp->f_mode & FMODE_NOWAIT))
return -EOPNOTSUPP;
- ki->ki_flags |= IOCB_NOWAIT;
}
- if (flags & RWF_HIPRI)
- ki->ki_flags |= IOCB_HIPRI;
- if (flags & RWF_DSYNC)
- ki->ki_flags |= IOCB_DSYNC;
+ if (flags & RWF_ATOMIC) {
+ if (rw_type != WRITE)
+ return -EOPNOTSUPP;
+ if (!(ki->ki_filp->f_mode & FMODE_CAN_ATOMIC_WRITE))
+ return -EOPNOTSUPP;
+ }
+ if (flags & RWF_DONTCACHE) {
+ /* file system must support it */
+ if (!(ki->ki_filp->f_op->fop_flags & FOP_DONTCACHE))
+ return -EOPNOTSUPP;
+ /* DAX mappings not supported */
+ if (IS_DAX(ki->ki_filp->f_mapping->host))
+ return -EOPNOTSUPP;
+ }
+ kiocb_flags |= (__force int) (flags & RWF_SUPPORTED);
if (flags & RWF_SYNC)
- ki->ki_flags |= (IOCB_DSYNC | IOCB_SYNC);
- return 0;
-}
+ kiocb_flags |= IOCB_DSYNC;
-static inline ino_t parent_ino(struct dentry *dentry)
-{
- ino_t res;
+ if ((flags & RWF_NOAPPEND) && (ki->ki_flags & IOCB_APPEND)) {
+ if (IS_APPEND(file_inode(ki->ki_filp)))
+ return -EPERM;
+ ki->ki_flags &= ~IOCB_APPEND;
+ }
- /*
- * Don't strictly need d_lock here? If the parent ino could change
- * then surely we'd have a deeper race in the caller?
- */
- spin_lock(&dentry->d_lock);
- res = dentry->d_parent->d_inode->i_ino;
- spin_unlock(&dentry->d_lock);
- return res;
+ ki->ki_flags |= kiocb_flags;
+ return 0;
}
/* Transaction based IO helpers */
@@ -3188,7 +3420,7 @@ static inline ino_t parent_ino(struct dentry *dentry)
*/
struct simple_transaction_argresp {
ssize_t size;
- char data[0];
+ char data[];
};
#define SIMPLE_TRANSACTION_LIMIT (PAGE_SIZE - sizeof(struct simple_transaction_argresp))
@@ -3217,7 +3449,7 @@ void simple_transaction_set(struct file *file, size_t n);
* All attributes contain a text representation of a numeric value
* that are accessed with the get() and set() functions.
*/
-#define DEFINE_SIMPLE_ATTRIBUTE(__fops, __get, __set, __fmt) \
+#define DEFINE_SIMPLE_ATTRIBUTE_XSIGNED(__fops, __get, __set, __fmt, __is_signed) \
static int __fops ## _open(struct inode *inode, struct file *file) \
{ \
__simple_attr_check_format(__fmt, 0ull); \
@@ -3228,10 +3460,16 @@ static const struct file_operations __fops = { \
.open = __fops ## _open, \
.release = simple_attr_release, \
.read = simple_attr_read, \
- .write = simple_attr_write, \
+ .write = (__is_signed) ? simple_attr_write_signed : simple_attr_write, \
.llseek = generic_file_llseek, \
}
+#define DEFINE_SIMPLE_ATTRIBUTE(__fops, __get, __set, __fmt) \
+ DEFINE_SIMPLE_ATTRIBUTE_XSIGNED(__fops, __get, __set, __fmt, false)
+
+#define DEFINE_SIMPLE_ATTRIBUTE_SIGNED(__fops, __get, __set, __fmt) \
+ DEFINE_SIMPLE_ATTRIBUTE_XSIGNED(__fops, __get, __set, __fmt, true)
+
static inline __printf(1, 2)
void __simple_attr_check_format(const char *fmt, ...)
{
@@ -3246,39 +3484,34 @@ ssize_t simple_attr_read(struct file *file, char __user *buf,
size_t len, loff_t *ppos);
ssize_t simple_attr_write(struct file *file, const char __user *buf,
size_t len, loff_t *ppos);
+ssize_t simple_attr_write_signed(struct file *file, const char __user *buf,
+ size_t len, loff_t *ppos);
struct ctl_table;
-int proc_nr_files(struct ctl_table *table, int write,
- void __user *buffer, size_t *lenp, loff_t *ppos);
-int proc_nr_dentry(struct ctl_table *table, int write,
- void __user *buffer, size_t *lenp, loff_t *ppos);
-int proc_nr_inodes(struct ctl_table *table, int write,
- void __user *buffer, size_t *lenp, loff_t *ppos);
-int __init get_filesystem_list(char *buf);
+int __init list_bdev_fs_names(char *buf, size_t size);
#define __FMODE_EXEC ((__force int) FMODE_EXEC)
-#define __FMODE_NONOTIFY ((__force int) FMODE_NONOTIFY)
#define ACC_MODE(x) ("\004\002\006\006"[(x)&O_ACCMODE])
-#define OPEN_FMODE(flag) ((__force fmode_t)(((flag + 1) & O_ACCMODE) | \
- (flag & __FMODE_NONOTIFY)))
+#define OPEN_FMODE(flag) ((__force fmode_t)((flag + 1) & O_ACCMODE))
static inline bool is_sxid(umode_t mode)
{
- return (mode & S_ISUID) || ((mode & S_ISGID) && (mode & S_IXGRP));
+ return mode & (S_ISUID | S_ISGID);
}
-static inline int check_sticky(struct inode *dir, struct inode *inode)
+static inline int check_sticky(struct mnt_idmap *idmap,
+ struct inode *dir, struct inode *inode)
{
if (!(dir->i_mode & S_ISVTX))
return 0;
- return __check_sticky(dir, inode);
+ return __check_sticky(idmap, dir, inode);
}
static inline void inode_has_no_xattr(struct inode *inode)
{
- if (!is_sxid(inode->i_mode) && (inode->i_sb->s_flags & MS_NOSEC))
+ if (!is_sxid(inode->i_mode) && (inode->i_sb->s_flags & SB_NOSEC))
inode->i_flags |= S_NOSEC;
}
@@ -3291,17 +3524,17 @@ static inline bool dir_emit(struct dir_context *ctx,
const char *name, int namelen,
u64 ino, unsigned type)
{
- return ctx->actor(ctx, name, namelen, ctx->pos, ino, type) == 0;
+ return ctx->actor(ctx, name, namelen, ctx->pos, ino, type);
}
static inline bool dir_emit_dot(struct file *file, struct dir_context *ctx)
{
return ctx->actor(ctx, ".", 1, ctx->pos,
- file->f_path.dentry->d_inode->i_ino, DT_DIR) == 0;
+ file->f_path.dentry->d_inode->i_ino, DT_DIR);
}
static inline bool dir_emit_dotdot(struct file *file, struct dir_context *ctx)
{
return ctx->actor(ctx, "..", 2, ctx->pos,
- parent_ino(file->f_path.dentry), DT_DIR) == 0;
+ d_parent_ino(file->f_path.dentry), DT_DIR);
}
static inline bool dir_emit_dots(struct file *file, struct dir_context *ctx)
{
@@ -3334,4 +3567,43 @@ static inline bool dir_relax_shared(struct inode *inode)
extern bool path_noexec(const struct path *path);
extern void inode_nohighmem(struct inode *inode);
+/* mm/fadvise.c */
+extern int vfs_fadvise(struct file *file, loff_t offset, loff_t len,
+ int advice);
+extern int generic_fadvise(struct file *file, loff_t offset, loff_t len,
+ int advice);
+
+static inline bool vfs_empty_path(int dfd, const char __user *path)
+{
+ char c;
+
+ if (dfd < 0)
+ return false;
+
+ /* We now allow NULL to be used for empty path. */
+ if (!path)
+ return true;
+
+ if (unlikely(get_user(c, path)))
+ return false;
+
+ return !c;
+}
+
+int generic_atomic_write_valid(struct kiocb *iocb, struct iov_iter *iter);
+
+static inline bool extensible_ioctl_valid(unsigned int cmd_a,
+ unsigned int cmd_b, size_t min_size)
+{
+ if (_IOC_DIR(cmd_a) != _IOC_DIR(cmd_b))
+ return false;
+ if (_IOC_TYPE(cmd_a) != _IOC_TYPE(cmd_b))
+ return false;
+ if (_IOC_NR(cmd_a) != _IOC_NR(cmd_b))
+ return false;
+ if (_IOC_SIZE(cmd_a) < min_size)
+ return false;
+ return true;
+}
+
#endif /* _LINUX_FS_H */
diff --git a/include/linux/fs/super.h b/include/linux/fs/super.h
new file mode 100644
index 000000000000..f21ffbb6dea5
--- /dev/null
+++ b/include/linux/fs/super.h
@@ -0,0 +1,238 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_FS_SUPER_H
+#define _LINUX_FS_SUPER_H
+
+#include <linux/fs/super_types.h>
+#include <linux/unicode.h>
+
+/*
+ * These are internal functions, please use sb_start_{write,pagefault,intwrite}
+ * instead.
+ */
+static inline void __sb_end_write(struct super_block *sb, int level)
+{
+ percpu_up_read(sb->s_writers.rw_sem + level - 1);
+}
+
+static inline void __sb_start_write(struct super_block *sb, int level)
+{
+ percpu_down_read_freezable(sb->s_writers.rw_sem + level - 1, true);
+}
+
+static inline bool __sb_start_write_trylock(struct super_block *sb, int level)
+{
+ return percpu_down_read_trylock(sb->s_writers.rw_sem + level - 1);
+}
+
+#define __sb_writers_acquired(sb, lev) \
+ percpu_rwsem_acquire(&(sb)->s_writers.rw_sem[(lev) - 1], 1, _THIS_IP_)
+#define __sb_writers_release(sb, lev) \
+ percpu_rwsem_release(&(sb)->s_writers.rw_sem[(lev) - 1], _THIS_IP_)
+
+/**
+ * __sb_write_started - check if sb freeze level is held
+ * @sb: the super we write to
+ * @level: the freeze level
+ *
+ * * > 0 - sb freeze level is held
+ * * 0 - sb freeze level is not held
+ * * < 0 - !CONFIG_LOCKDEP/LOCK_STATE_UNKNOWN
+ */
+static inline int __sb_write_started(const struct super_block *sb, int level)
+{
+ return lockdep_is_held_type(sb->s_writers.rw_sem + level - 1, 1);
+}
+
+/**
+ * sb_write_started - check if SB_FREEZE_WRITE is held
+ * @sb: the super we write to
+ *
+ * May be false positive with !CONFIG_LOCKDEP/LOCK_STATE_UNKNOWN.
+ */
+static inline bool sb_write_started(const struct super_block *sb)
+{
+ return __sb_write_started(sb, SB_FREEZE_WRITE);
+}
+
+/**
+ * sb_write_not_started - check if SB_FREEZE_WRITE is not held
+ * @sb: the super we write to
+ *
+ * May be false positive with !CONFIG_LOCKDEP/LOCK_STATE_UNKNOWN.
+ */
+static inline bool sb_write_not_started(const struct super_block *sb)
+{
+ return __sb_write_started(sb, SB_FREEZE_WRITE) <= 0;
+}
+
+/**
+ * sb_end_write - drop write access to a superblock
+ * @sb: the super we wrote to
+ *
+ * Decrement number of writers to the filesystem. Wake up possible waiters
+ * wanting to freeze the filesystem.
+ */
+static inline void sb_end_write(struct super_block *sb)
+{
+ __sb_end_write(sb, SB_FREEZE_WRITE);
+}
+
+/**
+ * sb_end_pagefault - drop write access to a superblock from a page fault
+ * @sb: the super we wrote to
+ *
+ * Decrement number of processes handling write page fault to the filesystem.
+ * Wake up possible waiters wanting to freeze the filesystem.
+ */
+static inline void sb_end_pagefault(struct super_block *sb)
+{
+ __sb_end_write(sb, SB_FREEZE_PAGEFAULT);
+}
+
+/**
+ * sb_end_intwrite - drop write access to a superblock for internal fs purposes
+ * @sb: the super we wrote to
+ *
+ * Decrement fs-internal number of writers to the filesystem. Wake up possible
+ * waiters wanting to freeze the filesystem.
+ */
+static inline void sb_end_intwrite(struct super_block *sb)
+{
+ __sb_end_write(sb, SB_FREEZE_FS);
+}
+
+/**
+ * sb_start_write - get write access to a superblock
+ * @sb: the super we write to
+ *
+ * When a process wants to write data or metadata to a file system (i.e. dirty
+ * a page or an inode), it should embed the operation in a sb_start_write() -
+ * sb_end_write() pair to get exclusion against file system freezing. This
+ * function increments number of writers preventing freezing. If the file
+ * system is already frozen, the function waits until the file system is
+ * thawed.
+ *
+ * Since freeze protection behaves as a lock, users have to preserve
+ * ordering of freeze protection and other filesystem locks. Generally,
+ * freeze protection should be the outermost lock. In particular, we have:
+ *
+ * sb_start_write
+ * -> i_rwsem (write path, truncate, directory ops, ...)
+ * -> s_umount (freeze_super, thaw_super)
+ */
+static inline void sb_start_write(struct super_block *sb)
+{
+ __sb_start_write(sb, SB_FREEZE_WRITE);
+}
+
+DEFINE_GUARD(super_write,
+ struct super_block *,
+ sb_start_write(_T),
+ sb_end_write(_T))
+
+static inline bool sb_start_write_trylock(struct super_block *sb)
+{
+ return __sb_start_write_trylock(sb, SB_FREEZE_WRITE);
+}
+
+/**
+ * sb_start_pagefault - get write access to a superblock from a page fault
+ * @sb: the super we write to
+ *
+ * When a process starts handling write page fault, it should embed the
+ * operation into sb_start_pagefault() - sb_end_pagefault() pair to get
+ * exclusion against file system freezing. This is needed since the page fault
+ * is going to dirty a page. This function increments number of running page
+ * faults preventing freezing. If the file system is already frozen, the
+ * function waits until the file system is thawed.
+ *
+ * Since page fault freeze protection behaves as a lock, users have to preserve
+ * ordering of freeze protection and other filesystem locks. It is advised to
+ * put sb_start_pagefault() close to mmap_lock in lock ordering. Page fault
+ * handling code implies lock dependency:
+ *
+ * mmap_lock
+ * -> sb_start_pagefault
+ */
+static inline void sb_start_pagefault(struct super_block *sb)
+{
+ __sb_start_write(sb, SB_FREEZE_PAGEFAULT);
+}
+
+/**
+ * sb_start_intwrite - get write access to a superblock for internal fs purposes
+ * @sb: the super we write to
+ *
+ * This is the third level of protection against filesystem freezing. It is
+ * free for use by a filesystem. The only requirement is that it must rank
+ * below sb_start_pagefault.
+ *
+ * For example filesystem can call sb_start_intwrite() when starting a
+ * transaction which somewhat eases handling of freezing for internal sources
+ * of filesystem changes (internal fs threads, discarding preallocation on file
+ * close, etc.).
+ */
+static inline void sb_start_intwrite(struct super_block *sb)
+{
+ __sb_start_write(sb, SB_FREEZE_FS);
+}
+
+static inline bool sb_start_intwrite_trylock(struct super_block *sb)
+{
+ return __sb_start_write_trylock(sb, SB_FREEZE_FS);
+}
+
+static inline bool sb_rdonly(const struct super_block *sb)
+{
+ return sb->s_flags & SB_RDONLY;
+}
+
+static inline bool sb_is_blkdev_sb(struct super_block *sb)
+{
+ return IS_ENABLED(CONFIG_BLOCK) && sb == blockdev_superblock;
+}
+
+#if IS_ENABLED(CONFIG_UNICODE)
+static inline struct unicode_map *sb_encoding(const struct super_block *sb)
+{
+ return sb->s_encoding;
+}
+
+/* Compare if two super blocks have the same encoding and flags */
+static inline bool sb_same_encoding(const struct super_block *sb1,
+ const struct super_block *sb2)
+{
+ if (sb1->s_encoding == sb2->s_encoding)
+ return true;
+
+ return (sb1->s_encoding && sb2->s_encoding &&
+ (sb1->s_encoding->version == sb2->s_encoding->version) &&
+ (sb1->s_encoding_flags == sb2->s_encoding_flags));
+}
+#else
+static inline struct unicode_map *sb_encoding(const struct super_block *sb)
+{
+ return NULL;
+}
+
+static inline bool sb_same_encoding(const struct super_block *sb1,
+ const struct super_block *sb2)
+{
+ return true;
+}
+#endif
+
+static inline bool sb_has_encoding(const struct super_block *sb)
+{
+ return !!sb_encoding(sb);
+}
+
+int sb_set_blocksize(struct super_block *sb, int size);
+int __must_check sb_min_blocksize(struct super_block *sb, int size);
+
+int freeze_super(struct super_block *super, enum freeze_holder who,
+ const void *freeze_owner);
+int thaw_super(struct super_block *super, enum freeze_holder who,
+ const void *freeze_owner);
+
+#endif /* _LINUX_FS_SUPER_H */
diff --git a/include/linux/fs/super_types.h b/include/linux/fs/super_types.h
new file mode 100644
index 000000000000..6bd3009e09b3
--- /dev/null
+++ b/include/linux/fs/super_types.h
@@ -0,0 +1,336 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_FS_SUPER_TYPES_H
+#define _LINUX_FS_SUPER_TYPES_H
+
+#include <linux/fs_dirent.h>
+#include <linux/errseq.h>
+#include <linux/list_lru.h>
+#include <linux/list.h>
+#include <linux/list_bl.h>
+#include <linux/llist.h>
+#include <linux/uidgid.h>
+#include <linux/uuid.h>
+#include <linux/percpu-rwsem.h>
+#include <linux/workqueue_types.h>
+#include <linux/quota.h>
+
+struct backing_dev_info;
+struct block_device;
+struct dentry;
+struct dentry_operations;
+struct dquot_operations;
+struct export_operations;
+struct file;
+struct file_system_type;
+struct fscrypt_operations;
+struct fsnotify_sb_info;
+struct fsverity_operations;
+struct kstatfs;
+struct mount;
+struct mtd_info;
+struct quotactl_ops;
+struct shrinker;
+struct unicode_map;
+struct user_namespace;
+struct workqueue_struct;
+struct writeback_control;
+struct xattr_handler;
+
+extern struct super_block *blockdev_superblock;
+
+/* Possible states of 'frozen' field */
+enum {
+ SB_UNFROZEN = 0, /* FS is unfrozen */
+ SB_FREEZE_WRITE = 1, /* Writes, dir ops, ioctls frozen */
+ SB_FREEZE_PAGEFAULT = 2, /* Page faults stopped as well */
+ SB_FREEZE_FS = 3, /* For internal FS use (e.g. to stop internal threads if needed) */
+ SB_FREEZE_COMPLETE = 4, /* ->freeze_fs finished successfully */
+};
+
+#define SB_FREEZE_LEVELS (SB_FREEZE_COMPLETE - 1)
+
+struct sb_writers {
+ unsigned short frozen; /* Is sb frozen? */
+ int freeze_kcount; /* How many kernel freeze requests? */
+ int freeze_ucount; /* How many userspace freeze requests? */
+ const void *freeze_owner; /* Owner of the freeze */
+ struct percpu_rw_semaphore rw_sem[SB_FREEZE_LEVELS];
+};
+
+/**
+ * enum freeze_holder - holder of the freeze
+ * @FREEZE_HOLDER_KERNEL: kernel wants to freeze or thaw filesystem
+ * @FREEZE_HOLDER_USERSPACE: userspace wants to freeze or thaw filesystem
+ * @FREEZE_MAY_NEST: whether nesting freeze and thaw requests is allowed
+ * @FREEZE_EXCL: a freeze that can only be undone by the owner
+ *
+ * Indicate who the owner of the freeze or thaw request is and whether
+ * the freeze needs to be exclusive or can nest.
+ * Without @FREEZE_MAY_NEST, multiple freeze and thaw requests from the
+ * same holder aren't allowed. It is however allowed to hold a single
+ * @FREEZE_HOLDER_USERSPACE and a single @FREEZE_HOLDER_KERNEL freeze at
+ * the same time. This is relied upon by some filesystems during online
+ * repair or similar.
+ */
+enum freeze_holder {
+ FREEZE_HOLDER_KERNEL = (1U << 0),
+ FREEZE_HOLDER_USERSPACE = (1U << 1),
+ FREEZE_MAY_NEST = (1U << 2),
+ FREEZE_EXCL = (1U << 3),
+};
+
+struct super_operations {
+ struct inode *(*alloc_inode)(struct super_block *sb);
+ void (*destroy_inode)(struct inode *inode);
+ void (*free_inode)(struct inode *inode);
+ void (*dirty_inode)(struct inode *inode, int flags);
+ int (*write_inode)(struct inode *inode, struct writeback_control *wbc);
+ int (*drop_inode)(struct inode *inode);
+ void (*evict_inode)(struct inode *inode);
+ void (*put_super)(struct super_block *sb);
+ int (*sync_fs)(struct super_block *sb, int wait);
+ int (*freeze_super)(struct super_block *sb, enum freeze_holder who,
+ const void *owner);
+ int (*freeze_fs)(struct super_block *sb);
+ int (*thaw_super)(struct super_block *sb, enum freeze_holder who,
+ const void *owner);
+ int (*unfreeze_fs)(struct super_block *sb);
+ int (*statfs)(struct dentry *dentry, struct kstatfs *kstatfs);
+ int (*remount_fs) (struct super_block *, int *, char *);
+ void (*umount_begin)(struct super_block *sb);
+
+ int (*show_options)(struct seq_file *seq, struct dentry *dentry);
+ int (*show_devname)(struct seq_file *seq, struct dentry *dentry);
+ int (*show_path)(struct seq_file *seq, struct dentry *dentry);
+ int (*show_stats)(struct seq_file *seq, struct dentry *dentry);
+#ifdef CONFIG_QUOTA
+ ssize_t (*quota_read)(struct super_block *sb, int type, char *data,
+ size_t len, loff_t off);
+ ssize_t (*quota_write)(struct super_block *sb, int type,
+ const char *data, size_t len, loff_t off);
+ struct dquot __rcu **(*get_dquots)(struct inode *inode);
+#endif
+ long (*nr_cached_objects)(struct super_block *sb,
+ struct shrink_control *sc);
+ long (*free_cached_objects)(struct super_block *sb,
+ struct shrink_control *sc);
+ /*
+ * If a filesystem can support graceful removal of a device and
+ * continue read-write operations, implement this callback.
+ *
+ * Return 0 if the filesystem can continue read-write.
+ * Non-zero return value or no such callback means the fs will be shutdown
+ * as usual.
+ */
+ int (*remove_bdev)(struct super_block *sb, struct block_device *bdev);
+ void (*shutdown)(struct super_block *sb);
+};
+
+struct super_block {
+ struct list_head s_list; /* Keep this first */
+ dev_t s_dev; /* search index; _not_ kdev_t */
+ unsigned char s_blocksize_bits;
+ unsigned long s_blocksize;
+ loff_t s_maxbytes; /* Max file size */
+ struct file_system_type *s_type;
+ const struct super_operations *s_op;
+ const struct dquot_operations *dq_op;
+ const struct quotactl_ops *s_qcop;
+ const struct export_operations *s_export_op;
+ unsigned long s_flags;
+ unsigned long s_iflags; /* internal SB_I_* flags */
+ unsigned long s_magic;
+ struct dentry *s_root;
+ struct rw_semaphore s_umount;
+ int s_count;
+ atomic_t s_active;
+#ifdef CONFIG_SECURITY
+ void *s_security;
+#endif
+ const struct xattr_handler *const *s_xattr;
+#ifdef CONFIG_FS_ENCRYPTION
+ const struct fscrypt_operations *s_cop;
+ struct fscrypt_keyring *s_master_keys; /* master crypto keys in use */
+#endif
+#ifdef CONFIG_FS_VERITY
+ const struct fsverity_operations *s_vop;
+#endif
+#if IS_ENABLED(CONFIG_UNICODE)
+ struct unicode_map *s_encoding;
+ __u16 s_encoding_flags;
+#endif
+ struct hlist_bl_head s_roots; /* alternate root dentries for NFS */
+ struct mount *s_mounts; /* list of mounts; _not_ for fs use */
+ struct block_device *s_bdev; /* can go away once we use an accessor for @s_bdev_file */
+ struct file *s_bdev_file;
+ struct backing_dev_info *s_bdi;
+ struct mtd_info *s_mtd;
+ struct hlist_node s_instances;
+ unsigned int s_quota_types; /* Bitmask of supported quota types */
+ struct quota_info s_dquot; /* Diskquota specific options */
+
+ struct sb_writers s_writers;
+
+ /*
+ * Keep s_fs_info, s_time_gran, s_fsnotify_mask, and
+ * s_fsnotify_info together for cache efficiency. They are frequently
+ * accessed and rarely modified.
+ */
+ void *s_fs_info; /* Filesystem private info */
+
+ /* Granularity of c/m/atime in ns (cannot be worse than a second) */
+ u32 s_time_gran;
+ /* Time limits for c/m/atime in seconds */
+ time64_t s_time_min;
+ time64_t s_time_max;
+#ifdef CONFIG_FSNOTIFY
+ u32 s_fsnotify_mask;
+ struct fsnotify_sb_info *s_fsnotify_info;
+#endif
+
+ /*
+ * q: why are s_id and s_sysfs_name not the same? both are human
+ * readable strings that identify the filesystem
+ * a: s_id is allowed to change at runtime; it's used in log messages,
+ * and we want to when a device starts out as single device (s_id is dev
+ * name) but then a device is hot added and we have to switch to
+ * identifying it by UUID
+ * but s_sysfs_name is a handle for programmatic access, and can't
+ * change at runtime
+ */
+ char s_id[32]; /* Informational name */
+ uuid_t s_uuid; /* UUID */
+ u8 s_uuid_len; /* Default 16, possibly smaller for weird filesystems */
+
+ /* if set, fs shows up under sysfs at /sys/fs/$FSTYP/s_sysfs_name */
+ char s_sysfs_name[UUID_STRING_LEN + 1];
+
+ unsigned int s_max_links;
+ unsigned int s_d_flags; /* default d_flags for dentries */
+
+ /*
+ * The next field is for VFS *only*. No filesystems have any business
+ * even looking at it. You had been warned.
+ */
+ struct mutex s_vfs_rename_mutex; /* Kludge */
+
+ /*
+ * Filesystem subtype. If non-empty the filesystem type field
+ * in /proc/mounts will be "type.subtype"
+ */
+ const char *s_subtype;
+
+ const struct dentry_operations *__s_d_op; /* default d_op for dentries */
+
+ struct shrinker *s_shrink; /* per-sb shrinker handle */
+
+ /* Number of inodes with nlink == 0 but still referenced */
+ atomic_long_t s_remove_count;
+
+ /* Read-only state of the superblock is being changed */
+ int s_readonly_remount;
+
+ /* per-sb errseq_t for reporting writeback errors via syncfs */
+ errseq_t s_wb_err;
+
+ /* AIO completions deferred from interrupt context */
+ struct workqueue_struct *s_dio_done_wq;
+ struct hlist_head s_pins;
+
+ /*
+ * Owning user namespace and default context in which to
+ * interpret filesystem uids, gids, quotas, device nodes,
+ * xattrs and security labels.
+ */
+ struct user_namespace *s_user_ns;
+
+ /*
+ * The list_lru structure is essentially just a pointer to a table
+ * of per-node lru lists, each of which has its own spinlock.
+ * There is no need to put them into separate cachelines.
+ */
+ struct list_lru s_dentry_lru;
+ struct list_lru s_inode_lru;
+ struct rcu_head rcu;
+ struct work_struct destroy_work;
+
+ struct mutex s_sync_lock; /* sync serialisation lock */
+
+ /*
+ * Indicates how deep in a filesystem stack this SB is
+ */
+ int s_stack_depth;
+
+ /* s_inode_list_lock protects s_inodes */
+ spinlock_t s_inode_list_lock ____cacheline_aligned_in_smp;
+ struct list_head s_inodes; /* all inodes */
+
+ spinlock_t s_inode_wblist_lock;
+ struct list_head s_inodes_wb; /* writeback inodes */
+ long s_min_writeback_pages;
+} __randomize_layout;
+
+/*
+ * sb->s_flags. Note that these mirror the equivalent MS_* flags where
+ * represented in both.
+ */
+#define SB_RDONLY BIT(0) /* Mount read-only */
+#define SB_NOSUID BIT(1) /* Ignore suid and sgid bits */
+#define SB_NODEV BIT(2) /* Disallow access to device special files */
+#define SB_NOEXEC BIT(3) /* Disallow program execution */
+#define SB_SYNCHRONOUS BIT(4) /* Writes are synced at once */
+#define SB_MANDLOCK BIT(6) /* Allow mandatory locks on an FS */
+#define SB_DIRSYNC BIT(7) /* Directory modifications are synchronous */
+#define SB_NOATIME BIT(10) /* Do not update access times. */
+#define SB_NODIRATIME BIT(11) /* Do not update directory access times */
+#define SB_SILENT BIT(15)
+#define SB_POSIXACL BIT(16) /* Supports POSIX ACLs */
+#define SB_INLINECRYPT BIT(17) /* Use blk-crypto for encrypted files */
+#define SB_KERNMOUNT BIT(22) /* this is a kern_mount call */
+#define SB_I_VERSION BIT(23) /* Update inode I_version field */
+#define SB_LAZYTIME BIT(25) /* Update the on-disk [acm]times lazily */
+
+/* These sb flags are internal to the kernel */
+#define SB_DEAD BIT(21)
+#define SB_DYING BIT(24)
+#define SB_FORCE BIT(27)
+#define SB_NOSEC BIT(28)
+#define SB_BORN BIT(29)
+#define SB_ACTIVE BIT(30)
+#define SB_NOUSER BIT(31)
+
+/* These flags relate to encoding and casefolding */
+#define SB_ENC_STRICT_MODE_FL (1 << 0)
+#define SB_ENC_NO_COMPAT_FALLBACK_FL (1 << 1)
+
+#define sb_has_strict_encoding(sb) \
+ (sb->s_encoding_flags & SB_ENC_STRICT_MODE_FL)
+
+#if IS_ENABLED(CONFIG_UNICODE)
+#define sb_no_casefold_compat_fallback(sb) \
+ (sb->s_encoding_flags & SB_ENC_NO_COMPAT_FALLBACK_FL)
+#else
+#define sb_no_casefold_compat_fallback(sb) (1)
+#endif
+
+/* sb->s_iflags */
+#define SB_I_CGROUPWB 0x00000001 /* cgroup-aware writeback enabled */
+#define SB_I_NOEXEC 0x00000002 /* Ignore executables on this fs */
+#define SB_I_NODEV 0x00000004 /* Ignore devices on this fs */
+#define SB_I_STABLE_WRITES 0x00000008 /* don't modify blks until WB is done */
+
+/* sb->s_iflags to limit user namespace mounts */
+#define SB_I_USERNS_VISIBLE 0x00000010 /* fstype already mounted */
+#define SB_I_IMA_UNVERIFIABLE_SIGNATURE 0x00000020
+#define SB_I_UNTRUSTED_MOUNTER 0x00000040
+#define SB_I_EVM_HMAC_UNSUPPORTED 0x00000080
+
+#define SB_I_SKIP_SYNC 0x00000100 /* Skip superblock at global sync */
+#define SB_I_PERSB_BDI 0x00000200 /* has a per-sb bdi */
+#define SB_I_TS_EXPIRY_WARNED 0x00000400 /* warned about timestamp range expiry */
+#define SB_I_RETIRED 0x00000800 /* superblock shouldn't be reused */
+#define SB_I_NOUMASK 0x00001000 /* VFS does not apply umask */
+#define SB_I_NOIDMAP 0x00002000 /* No idmapped mounts on this superblock */
+#define SB_I_ALLOW_HSM 0x00004000 /* Allow HSM events on this superblock */
+
+#endif /* _LINUX_FS_SUPER_TYPES_H */
diff --git a/include/linux/fs_api.h b/include/linux/fs_api.h
new file mode 100644
index 000000000000..83be38d6d413
--- /dev/null
+++ b/include/linux/fs_api.h
@@ -0,0 +1 @@
+#include <linux/fs.h>
diff --git a/include/linux/fs_context.h b/include/linux/fs_context.h
new file mode 100644
index 000000000000..0d6c8a6d7be2
--- /dev/null
+++ b/include/linux/fs_context.h
@@ -0,0 +1,256 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/* Filesystem superblock creation and reconfiguration context.
+ *
+ * Copyright (C) 2018 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ */
+
+#ifndef _LINUX_FS_CONTEXT_H
+#define _LINUX_FS_CONTEXT_H
+
+#include <linux/kernel.h>
+#include <linux/refcount.h>
+#include <linux/errno.h>
+#include <linux/security.h>
+#include <linux/mutex.h>
+
+struct cred;
+struct dentry;
+struct file_operations;
+struct file_system_type;
+struct mnt_namespace;
+struct net;
+struct pid_namespace;
+struct super_block;
+struct user_namespace;
+struct vfsmount;
+struct path;
+
+enum fs_context_purpose {
+ FS_CONTEXT_FOR_MOUNT, /* New superblock for explicit mount */
+ FS_CONTEXT_FOR_SUBMOUNT, /* New superblock for automatic submount */
+ FS_CONTEXT_FOR_RECONFIGURE, /* Superblock reconfiguration (remount) */
+};
+
+/*
+ * Userspace usage phase for fsopen/fspick.
+ */
+enum fs_context_phase {
+ FS_CONTEXT_CREATE_PARAMS, /* Loading params for sb creation */
+ FS_CONTEXT_CREATING, /* A superblock is being created */
+ FS_CONTEXT_AWAITING_MOUNT, /* Superblock created, awaiting fsmount() */
+ FS_CONTEXT_AWAITING_RECONF, /* Awaiting initialisation for reconfiguration */
+ FS_CONTEXT_RECONF_PARAMS, /* Loading params for reconfiguration */
+ FS_CONTEXT_RECONFIGURING, /* Reconfiguring the superblock */
+ FS_CONTEXT_FAILED, /* Failed to correctly transition a context */
+};
+
+/*
+ * Type of parameter value.
+ */
+enum fs_value_type {
+ fs_value_is_undefined,
+ fs_value_is_flag, /* Value not given a value */
+ fs_value_is_string, /* Value is a string */
+ fs_value_is_blob, /* Value is a binary blob */
+ fs_value_is_filename, /* Value is a filename* + dirfd */
+ fs_value_is_file, /* Value is a file* */
+};
+
+/*
+ * Configuration parameter.
+ */
+struct fs_parameter {
+ const char *key; /* Parameter name */
+ enum fs_value_type type:8; /* The type of value here */
+ union {
+ char *string;
+ void *blob;
+ struct filename *name;
+ struct file *file;
+ };
+ size_t size;
+ int dirfd;
+};
+
+struct p_log {
+ const char *prefix;
+ struct fc_log *log;
+};
+
+/*
+ * Filesystem context for holding the parameters used in the creation or
+ * reconfiguration of a superblock.
+ *
+ * Superblock creation fills in ->root whereas reconfiguration begins with this
+ * already set.
+ *
+ * See Documentation/filesystems/mount_api.rst
+ */
+struct fs_context {
+ const struct fs_context_operations *ops;
+ struct mutex uapi_mutex; /* Userspace access mutex */
+ struct file_system_type *fs_type;
+ void *fs_private; /* The filesystem's context */
+ void *sget_key;
+ struct dentry *root; /* The root and superblock */
+ struct user_namespace *user_ns; /* The user namespace for this mount */
+ struct net *net_ns; /* The network namespace for this mount */
+ const struct cred *cred; /* The mounter's credentials */
+ struct p_log log; /* Logging buffer */
+ const char *source; /* The source name (eg. dev path) */
+ void *security; /* LSM options */
+ void *s_fs_info; /* Proposed s_fs_info */
+ unsigned int sb_flags; /* Proposed superblock flags (SB_*) */
+ unsigned int sb_flags_mask; /* Superblock flags that were changed */
+ unsigned int s_iflags; /* OR'd with sb->s_iflags */
+ enum fs_context_purpose purpose:8;
+ enum fs_context_phase phase:8; /* The phase the context is in */
+ bool need_free:1; /* Need to call ops->free() */
+ bool global:1; /* Goes into &init_user_ns */
+ bool oldapi:1; /* Coming from mount(2) */
+ bool exclusive:1; /* create new superblock, reject existing one */
+};
+
+struct fs_context_operations {
+ void (*free)(struct fs_context *fc);
+ int (*dup)(struct fs_context *fc, struct fs_context *src_fc);
+ int (*parse_param)(struct fs_context *fc, struct fs_parameter *param);
+ int (*parse_monolithic)(struct fs_context *fc, void *data);
+ int (*get_tree)(struct fs_context *fc);
+ int (*reconfigure)(struct fs_context *fc);
+};
+
+/*
+ * fs_context manipulation functions.
+ */
+extern struct fs_context *fs_context_for_mount(struct file_system_type *fs_type,
+ unsigned int sb_flags);
+extern struct fs_context *fs_context_for_reconfigure(struct dentry *dentry,
+ unsigned int sb_flags,
+ unsigned int sb_flags_mask);
+extern struct fs_context *fs_context_for_submount(struct file_system_type *fs_type,
+ struct dentry *reference);
+
+extern struct fs_context *vfs_dup_fs_context(struct fs_context *fc);
+extern int vfs_parse_fs_param(struct fs_context *fc, struct fs_parameter *param);
+extern int vfs_parse_fs_qstr(struct fs_context *fc, const char *key,
+ const struct qstr *value);
+static inline int vfs_parse_fs_string(struct fs_context *fc, const char *key,
+ const char *value)
+{
+ return vfs_parse_fs_qstr(fc, key, value ? &QSTR(value) : NULL);
+}
+int vfs_parse_monolithic_sep(struct fs_context *fc, void *data,
+ char *(*sep)(char **));
+extern int generic_parse_monolithic(struct fs_context *fc, void *data);
+extern int vfs_get_tree(struct fs_context *fc);
+extern void put_fs_context(struct fs_context *fc);
+extern int vfs_parse_fs_param_source(struct fs_context *fc,
+ struct fs_parameter *param);
+extern void fc_drop_locked(struct fs_context *fc);
+
+extern int get_tree_nodev(struct fs_context *fc,
+ int (*fill_super)(struct super_block *sb,
+ struct fs_context *fc));
+extern int get_tree_single(struct fs_context *fc,
+ int (*fill_super)(struct super_block *sb,
+ struct fs_context *fc));
+extern int get_tree_keyed(struct fs_context *fc,
+ int (*fill_super)(struct super_block *sb,
+ struct fs_context *fc),
+ void *key);
+
+int setup_bdev_super(struct super_block *sb, int sb_flags,
+ struct fs_context *fc);
+
+#define GET_TREE_BDEV_QUIET_LOOKUP 0x0001
+int get_tree_bdev_flags(struct fs_context *fc,
+ int (*fill_super)(struct super_block *sb,
+ struct fs_context *fc), unsigned int flags);
+
+extern int get_tree_bdev(struct fs_context *fc,
+ int (*fill_super)(struct super_block *sb,
+ struct fs_context *fc));
+
+extern const struct file_operations fscontext_fops;
+
+/*
+ * Mount error, warning and informational message logging. This structure is
+ * shareable between a mount and a subordinate mount.
+ */
+struct fc_log {
+ refcount_t usage;
+ u8 head; /* Insertion index in buffer[] */
+ u8 tail; /* Removal index in buffer[] */
+ u8 need_free; /* Mask of kfree'able items in buffer[] */
+ struct module *owner; /* Owner module for strings that don't then need freeing */
+ char *buffer[8];
+};
+
+extern __attribute__((format(printf, 4, 5)))
+void logfc(struct fc_log *log, const char *prefix, char level, const char *fmt, ...);
+
+#define __logfc(fc, l, fmt, ...) \
+ logfc((fc)->log.log, NULL, (l), (fmt), ## __VA_ARGS__)
+#define __plogp(p, prefix, l, fmt, ...) \
+ logfc((p)->log, (prefix), (l), (fmt), ## __VA_ARGS__)
+#define __plog(p, l, fmt, ...) __plogp(p, (p)->prefix, l, fmt, ## __VA_ARGS__)
+
+/**
+ * infof - Store supplementary informational message
+ * @fc: The context in which to log the informational message
+ * @fmt: The format string
+ *
+ * Store the supplementary informational message for the process if the process
+ * has enabled the facility.
+ */
+#define infof(fc, fmt, ...) __logfc(fc, 'i', fmt, ## __VA_ARGS__)
+#define info_plog(p, fmt, ...) __plog(p, 'i', fmt, ## __VA_ARGS__)
+#define infofc(fc, fmt, ...) __plog((&(fc)->log), 'i', fmt, ## __VA_ARGS__)
+#define infofcp(fc, prefix, fmt, ...) \
+ __plogp((&(fc)->log), prefix, 'i', fmt, ## __VA_ARGS__)
+
+/**
+ * warnf - Store supplementary warning message
+ * @fc: The context in which to log the error message
+ * @fmt: The format string
+ *
+ * Store the supplementary warning message for the process if the process has
+ * enabled the facility.
+ */
+#define warnf(fc, fmt, ...) __logfc(fc, 'w', fmt, ## __VA_ARGS__)
+#define warn_plog(p, fmt, ...) __plog(p, 'w', fmt, ## __VA_ARGS__)
+#define warnfc(fc, fmt, ...) __plog((&(fc)->log), 'w', fmt, ## __VA_ARGS__)
+#define warnfcp(fc, prefix, fmt, ...) \
+ __plogp((&(fc)->log), prefix, 'w', fmt, ## __VA_ARGS__)
+
+/**
+ * errorf - Store supplementary error message
+ * @fc: The context in which to log the error message
+ * @fmt: The format string
+ *
+ * Store the supplementary error message for the process if the process has
+ * enabled the facility.
+ */
+#define errorf(fc, fmt, ...) __logfc(fc, 'e', fmt, ## __VA_ARGS__)
+#define error_plog(p, fmt, ...) __plog(p, 'e', fmt, ## __VA_ARGS__)
+#define errorfc(fc, fmt, ...) __plog((&(fc)->log), 'e', fmt, ## __VA_ARGS__)
+#define errorfcp(fc, prefix, fmt, ...) \
+ __plogp((&(fc)->log), prefix, 'e', fmt, ## __VA_ARGS__)
+
+/**
+ * invalf - Store supplementary invalid argument error message
+ * @fc: The context in which to log the error message
+ * @fmt: The format string
+ *
+ * Store the supplementary error message for the process if the process has
+ * enabled the facility and return -EINVAL.
+ */
+#define invalf(fc, fmt, ...) (errorf(fc, fmt, ## __VA_ARGS__), -EINVAL)
+#define inval_plog(p, fmt, ...) (error_plog(p, fmt, ## __VA_ARGS__), -EINVAL)
+#define invalfc(fc, fmt, ...) (errorfc(fc, fmt, ## __VA_ARGS__), -EINVAL)
+#define invalfcp(fc, prefix, fmt, ...) \
+ (errorfcp(fc, prefix, fmt, ## __VA_ARGS__), -EINVAL)
+
+#endif /* _LINUX_FS_CONTEXT_H */
diff --git a/include/linux/fs_dirent.h b/include/linux/fs_dirent.h
new file mode 100644
index 000000000000..92f75c5bac19
--- /dev/null
+++ b/include/linux/fs_dirent.h
@@ -0,0 +1,78 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_FS_DIRENT_H
+#define _LINUX_FS_DIRENT_H
+
+#include <linux/stat.h>
+#include <linux/types.h>
+
+/*
+ * This is a header for the common implementation of dirent
+ * to fs on-disk file type conversion. Although the fs on-disk
+ * bits are specific to every file system, in practice, many
+ * file systems use the exact same on-disk format to describe
+ * the lower 3 file type bits that represent the 7 POSIX file
+ * types.
+ *
+ * It is important to note that the definitions in this
+ * header MUST NOT change. This would break both the
+ * userspace ABI and the on-disk format of filesystems
+ * using this code.
+ *
+ * All those file systems can use this generic code for the
+ * conversions.
+ */
+
+/*
+ * struct dirent file types
+ * exposed to user via getdents(2), readdir(3)
+ *
+ * These match bits 12..15 of stat.st_mode
+ * (ie "(i_mode >> 12) & 15").
+ */
+#define S_DT_SHIFT 12
+#define S_DT(mode) (((mode) & S_IFMT) >> S_DT_SHIFT)
+#define S_DT_MASK (S_IFMT >> S_DT_SHIFT)
+
+/* these are defined by POSIX and also present in glibc's dirent.h */
+#define DT_UNKNOWN 0
+#define DT_FIFO 1
+#define DT_CHR 2
+#define DT_DIR 4
+#define DT_BLK 6
+#define DT_REG 8
+#define DT_LNK 10
+#define DT_SOCK 12
+#define DT_WHT 14
+
+#define DT_MAX (S_DT_MASK + 1) /* 16 */
+
+/*
+ * fs on-disk file types.
+ * Only the low 3 bits are used for the POSIX file types.
+ * Other bits are reserved for fs private use.
+ * These definitions are shared and used by multiple filesystems,
+ * and MUST NOT change under any circumstances.
+ *
+ * Note that no fs currently stores the whiteout type on-disk,
+ * so whiteout dirents are exposed to user as DT_CHR.
+ */
+#define FT_UNKNOWN 0
+#define FT_REG_FILE 1
+#define FT_DIR 2
+#define FT_CHRDEV 3
+#define FT_BLKDEV 4
+#define FT_FIFO 5
+#define FT_SOCK 6
+#define FT_SYMLINK 7
+
+#define FT_MAX 8
+
+/*
+ * declarations for helper functions, accompanying implementation
+ * is in fs/fs_dirent.c
+ */
+extern unsigned char fs_ftype_to_dtype(unsigned int filetype);
+extern unsigned char fs_umode_to_ftype(umode_t mode);
+extern unsigned char fs_umode_to_dtype(umode_t mode);
+
+#endif /* _LINUX_FS_DIRENT_H */
diff --git a/include/linux/fs_enet_pd.h b/include/linux/fs_enet_pd.h
deleted file mode 100644
index 77d783f71527..000000000000
--- a/include/linux/fs_enet_pd.h
+++ /dev/null
@@ -1,165 +0,0 @@
-/*
- * Platform information definitions for the
- * universal Freescale Ethernet driver.
- *
- * Copyright (c) 2003 Intracom S.A.
- * by Pantelis Antoniou <panto@intracom.gr>
- *
- * 2005 (c) MontaVista Software, Inc.
- * Vitaly Bordug <vbordug@ru.mvista.com>
- *
- * This file is licensed under the terms of the GNU General Public License
- * version 2. This program is licensed "as is" without any warranty of any
- * kind, whether express or implied.
- */
-
-#ifndef FS_ENET_PD_H
-#define FS_ENET_PD_H
-
-#include <linux/clk.h>
-#include <linux/string.h>
-#include <linux/of_mdio.h>
-#include <linux/if_ether.h>
-#include <asm/types.h>
-
-#define FS_ENET_NAME "fs_enet"
-
-enum fs_id {
- fsid_fec1,
- fsid_fec2,
- fsid_fcc1,
- fsid_fcc2,
- fsid_fcc3,
- fsid_scc1,
- fsid_scc2,
- fsid_scc3,
- fsid_scc4,
-};
-
-#define FS_MAX_INDEX 9
-
-static inline int fs_get_fec_index(enum fs_id id)
-{
- if (id >= fsid_fec1 && id <= fsid_fec2)
- return id - fsid_fec1;
- return -1;
-}
-
-static inline int fs_get_fcc_index(enum fs_id id)
-{
- if (id >= fsid_fcc1 && id <= fsid_fcc3)
- return id - fsid_fcc1;
- return -1;
-}
-
-static inline int fs_get_scc_index(enum fs_id id)
-{
- if (id >= fsid_scc1 && id <= fsid_scc4)
- return id - fsid_scc1;
- return -1;
-}
-
-static inline int fs_fec_index2id(int index)
-{
- int id = fsid_fec1 + index - 1;
- if (id >= fsid_fec1 && id <= fsid_fec2)
- return id;
- return FS_MAX_INDEX;
- }
-
-static inline int fs_fcc_index2id(int index)
-{
- int id = fsid_fcc1 + index - 1;
- if (id >= fsid_fcc1 && id <= fsid_fcc3)
- return id;
- return FS_MAX_INDEX;
-}
-
-static inline int fs_scc_index2id(int index)
-{
- int id = fsid_scc1 + index - 1;
- if (id >= fsid_scc1 && id <= fsid_scc4)
- return id;
- return FS_MAX_INDEX;
-}
-
-enum fs_mii_method {
- fsmii_fixed,
- fsmii_fec,
- fsmii_bitbang,
-};
-
-enum fs_ioport {
- fsiop_porta,
- fsiop_portb,
- fsiop_portc,
- fsiop_portd,
- fsiop_porte,
-};
-
-struct fs_mii_bit {
- u32 offset;
- u8 bit;
- u8 polarity;
-};
-struct fs_mii_bb_platform_info {
- struct fs_mii_bit mdio_dir;
- struct fs_mii_bit mdio_dat;
- struct fs_mii_bit mdc_dat;
- int delay; /* delay in us */
- int irq[32]; /* irqs per phy's */
-};
-
-struct fs_platform_info {
-
- void(*init_ioports)(struct fs_platform_info *);
- /* device specific information */
- int fs_no; /* controller index */
- char fs_type[4]; /* controller type */
-
- u32 cp_page; /* CPM page */
- u32 cp_block; /* CPM sblock */
- u32 cp_command; /* CPM page/sblock/mcn */
-
- u32 clk_trx; /* some stuff for pins & mux configuration*/
- u32 clk_rx;
- u32 clk_tx;
- u32 clk_route;
- u32 clk_mask;
-
- u32 mem_offset;
- u32 dpram_offset;
- u32 fcc_regs_c;
-
- u32 device_flags;
-
- struct device_node *phy_node;
- const struct fs_mii_bus_info *bus_info;
-
- int rx_ring, tx_ring; /* number of buffers on rx */
- __u8 macaddr[ETH_ALEN]; /* mac address */
- int rx_copybreak; /* limit we copy small frames */
- int napi_weight; /* NAPI weight */
-
- int use_rmii; /* use RMII mode */
- int has_phy; /* if the network is phy container as well...*/
-
- struct clk *clk_per; /* 'per' clock for register access */
-};
-struct fs_mii_fec_platform_info {
- u32 irq[32];
- u32 mii_speed;
-};
-
-static inline int fs_get_id(struct fs_platform_info *fpi)
-{
- if(strstr(fpi->fs_type, "SCC"))
- return fs_scc_index2id(fpi->fs_no);
- if(strstr(fpi->fs_type, "FCC"))
- return fs_fcc_index2id(fpi->fs_no);
- if(strstr(fpi->fs_type, "FEC"))
- return fs_fec_index2id(fpi->fs_no);
- return fpi->fs_no;
-}
-
-#endif
diff --git a/include/linux/fs_parser.h b/include/linux/fs_parser.h
new file mode 100644
index 000000000000..5e8a3b546033
--- /dev/null
+++ b/include/linux/fs_parser.h
@@ -0,0 +1,143 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/* Filesystem parameter description and parser
+ *
+ * Copyright (C) 2018 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ */
+
+#ifndef _LINUX_FS_PARSER_H
+#define _LINUX_FS_PARSER_H
+
+#include <linux/fs_context.h>
+
+struct path;
+
+struct constant_table {
+ const char *name;
+ int value;
+};
+
+struct fs_parameter_spec;
+struct fs_parse_result;
+typedef int fs_param_type(struct p_log *,
+ const struct fs_parameter_spec *,
+ struct fs_parameter *,
+ struct fs_parse_result *);
+/*
+ * The type of parameter expected.
+ */
+fs_param_type fs_param_is_bool, fs_param_is_u32, fs_param_is_s32, fs_param_is_u64,
+ fs_param_is_enum, fs_param_is_string, fs_param_is_blob, fs_param_is_blockdev,
+ fs_param_is_path, fs_param_is_fd, fs_param_is_uid, fs_param_is_gid,
+ fs_param_is_file_or_string;
+
+/*
+ * Specification of the type of value a parameter wants.
+ *
+ * Note that the fsparam_flag(), fsparam_string(), fsparam_u32(), ... macros
+ * should be used to generate elements of this type.
+ */
+struct fs_parameter_spec {
+ const char *name;
+ fs_param_type *type; /* The desired parameter type */
+ u8 opt; /* Option number (returned by fs_parse()) */
+ unsigned short flags;
+#define fs_param_neg_with_no 0x0002 /* "noxxx" is negative param */
+#define fs_param_can_be_empty 0x0004 /* "xxx=" is allowed */
+#define fs_param_deprecated 0x0008 /* The param is deprecated */
+ const void *data;
+};
+
+/*
+ * Result of parse.
+ */
+struct fs_parse_result {
+ bool negated; /* T if param was "noxxx" */
+ union {
+ bool boolean; /* For spec_bool */
+ int int_32; /* For spec_s32/spec_enum */
+ unsigned int uint_32; /* For spec_u32{,_octal,_hex}/spec_enum */
+ u64 uint_64; /* For spec_u64 */
+ kuid_t uid;
+ kgid_t gid;
+ };
+};
+
+extern int __fs_parse(struct p_log *log,
+ const struct fs_parameter_spec *desc,
+ struct fs_parameter *value,
+ struct fs_parse_result *result);
+
+static inline int fs_parse(struct fs_context *fc,
+ const struct fs_parameter_spec *desc,
+ struct fs_parameter *param,
+ struct fs_parse_result *result)
+{
+ return __fs_parse(&fc->log, desc, param, result);
+}
+
+extern int fs_lookup_param(struct fs_context *fc,
+ struct fs_parameter *param,
+ bool want_bdev,
+ unsigned int flags,
+ struct path *_path);
+
+extern int lookup_constant(const struct constant_table tbl[], const char *name, int not_found);
+
+extern const struct constant_table bool_names[];
+
+#ifdef CONFIG_VALIDATE_FS_PARSER
+extern bool fs_validate_description(const char *name,
+ const struct fs_parameter_spec *desc);
+#else
+static inline bool fs_validate_description(const char *name,
+ const struct fs_parameter_spec *desc)
+{ return true; }
+#endif
+
+/*
+ * Parameter type, name, index and flags element constructors. Use as:
+ *
+ * fsparam_xxxx("foo", Opt_foo)
+ *
+ * If existing helpers are not enough, direct use of __fsparam() would
+ * work, but any such case is probably a sign that new helper is needed.
+ * Helpers will remain stable; low-level implementation may change.
+ */
+#define __fsparam(TYPE, NAME, OPT, FLAGS, DATA) \
+ { \
+ .name = NAME, \
+ .opt = OPT, \
+ .type = TYPE, \
+ .flags = FLAGS, \
+ .data = DATA \
+ }
+
+#define fsparam_flag(NAME, OPT) __fsparam(NULL, NAME, OPT, 0, NULL)
+#define fsparam_flag_no(NAME, OPT) \
+ __fsparam(NULL, NAME, OPT, fs_param_neg_with_no, NULL)
+#define fsparam_bool(NAME, OPT) __fsparam(fs_param_is_bool, NAME, OPT, 0, NULL)
+#define fsparam_u32(NAME, OPT) __fsparam(fs_param_is_u32, NAME, OPT, 0, NULL)
+#define fsparam_u32oct(NAME, OPT) \
+ __fsparam(fs_param_is_u32, NAME, OPT, 0, (void *)8)
+#define fsparam_u32hex(NAME, OPT) \
+ __fsparam(fs_param_is_u32, NAME, OPT, 0, (void *)16)
+#define fsparam_s32(NAME, OPT) __fsparam(fs_param_is_s32, NAME, OPT, 0, NULL)
+#define fsparam_u64(NAME, OPT) __fsparam(fs_param_is_u64, NAME, OPT, 0, NULL)
+#define fsparam_enum(NAME, OPT, array) __fsparam(fs_param_is_enum, NAME, OPT, 0, array)
+#define fsparam_string(NAME, OPT) \
+ __fsparam(fs_param_is_string, NAME, OPT, 0, NULL)
+#define fsparam_blob(NAME, OPT) __fsparam(fs_param_is_blob, NAME, OPT, 0, NULL)
+#define fsparam_bdev(NAME, OPT) __fsparam(fs_param_is_blockdev, NAME, OPT, 0, NULL)
+#define fsparam_path(NAME, OPT) __fsparam(fs_param_is_path, NAME, OPT, 0, NULL)
+#define fsparam_fd(NAME, OPT) __fsparam(fs_param_is_fd, NAME, OPT, 0, NULL)
+#define fsparam_file_or_string(NAME, OPT) \
+ __fsparam(fs_param_is_file_or_string, NAME, OPT, 0, NULL)
+#define fsparam_uid(NAME, OPT) __fsparam(fs_param_is_uid, NAME, OPT, 0, NULL)
+#define fsparam_gid(NAME, OPT) __fsparam(fs_param_is_gid, NAME, OPT, 0, NULL)
+
+/* String parameter that allows empty argument */
+#define fsparam_string_empty(NAME, OPT) \
+ __fsparam(fs_param_is_string, NAME, OPT, fs_param_can_be_empty, NULL)
+
+#endif /* _LINUX_FS_PARSER_H */
diff --git a/include/linux/fs_pin.h b/include/linux/fs_pin.h
index 3886b3bffd7f..bdd09fd2520c 100644
--- a/include/linux/fs_pin.h
+++ b/include/linux/fs_pin.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#include <linux/wait.h>
struct fs_pin {
@@ -19,6 +20,5 @@ static inline void init_fs_pin(struct fs_pin *p, void (*kill)(struct fs_pin *))
}
void pin_remove(struct fs_pin *);
-void pin_insert_group(struct fs_pin *, struct vfsmount *, struct hlist_head *);
void pin_insert(struct fs_pin *, struct vfsmount *);
void pin_kill(struct fs_pin *);
diff --git a/include/linux/fs_stack.h b/include/linux/fs_stack.h
index da317c7163ab..0cc2fa283305 100644
--- a/include/linux/fs_stack.h
+++ b/include/linux/fs_stack.h
@@ -1,8 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_FS_STACK_H
#define _LINUX_FS_STACK_H
/* This file defines generic functions used primarily by stackable
- * filesystems; none of these functions require i_mutex to be held.
+ * filesystems; none of these functions require i_rwsem to be held.
*/
#include <linux/fs.h>
@@ -15,15 +16,15 @@ extern void fsstack_copy_inode_size(struct inode *dst, struct inode *src);
static inline void fsstack_copy_attr_atime(struct inode *dest,
const struct inode *src)
{
- dest->i_atime = src->i_atime;
+ inode_set_atime_to_ts(dest, inode_get_atime(src));
}
static inline void fsstack_copy_attr_times(struct inode *dest,
const struct inode *src)
{
- dest->i_atime = src->i_atime;
- dest->i_mtime = src->i_mtime;
- dest->i_ctime = src->i_ctime;
+ inode_set_atime_to_ts(dest, inode_get_atime(src));
+ inode_set_mtime_to_ts(dest, inode_get_mtime(src));
+ inode_set_ctime_to_ts(dest, inode_get_ctime(src));
}
#endif /* _LINUX_FS_STACK_H */
diff --git a/include/linux/fs_struct.h b/include/linux/fs_struct.h
index 7a026240cbb1..0070764b790a 100644
--- a/include/linux/fs_struct.h
+++ b/include/linux/fs_struct.h
@@ -1,14 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_FS_STRUCT_H
#define _LINUX_FS_STRUCT_H
+#include <linux/sched.h>
#include <linux/path.h>
#include <linux/spinlock.h>
#include <linux/seqlock.h>
struct fs_struct {
int users;
- spinlock_t lock;
- seqcount_t seq;
+ seqlock_t seq;
int umask;
int in_exec;
struct path root, pwd;
@@ -25,20 +26,25 @@ extern int unshare_fs_struct(void);
static inline void get_fs_root(struct fs_struct *fs, struct path *root)
{
- spin_lock(&fs->lock);
+ read_seqlock_excl(&fs->seq);
*root = fs->root;
path_get(root);
- spin_unlock(&fs->lock);
+ read_sequnlock_excl(&fs->seq);
}
static inline void get_fs_pwd(struct fs_struct *fs, struct path *pwd)
{
- spin_lock(&fs->lock);
+ read_seqlock_excl(&fs->seq);
*pwd = fs->pwd;
path_get(pwd);
- spin_unlock(&fs->lock);
+ read_sequnlock_excl(&fs->seq);
}
extern bool current_chrooted(void);
+static inline int current_umask(void)
+{
+ return current->fs->umask;
+}
+
#endif /* _LINUX_FS_STRUCT_H */
diff --git a/include/linux/fs_uart_pd.h b/include/linux/fs_uart_pd.h
deleted file mode 100644
index 36b61ff39277..000000000000
--- a/include/linux/fs_uart_pd.h
+++ /dev/null
@@ -1,71 +0,0 @@
-/*
- * Platform information definitions for the CPM Uart driver.
- *
- * 2006 (c) MontaVista Software, Inc.
- * Vitaly Bordug <vbordug@ru.mvista.com>
- *
- * This file is licensed under the terms of the GNU General Public License
- * version 2. This program is licensed "as is" without any warranty of any
- * kind, whether express or implied.
- */
-
-#ifndef FS_UART_PD_H
-#define FS_UART_PD_H
-
-#include <asm/types.h>
-
-enum fs_uart_id {
- fsid_smc1_uart,
- fsid_smc2_uart,
- fsid_scc1_uart,
- fsid_scc2_uart,
- fsid_scc3_uart,
- fsid_scc4_uart,
- fs_uart_nr,
-};
-
-static inline int fs_uart_id_scc2fsid(int id)
-{
- return fsid_scc1_uart + id - 1;
-}
-
-static inline int fs_uart_id_fsid2scc(int id)
-{
- return id - fsid_scc1_uart + 1;
-}
-
-static inline int fs_uart_id_smc2fsid(int id)
-{
- return fsid_smc1_uart + id - 1;
-}
-
-static inline int fs_uart_id_fsid2smc(int id)
-{
- return id - fsid_smc1_uart + 1;
-}
-
-struct fs_uart_platform_info {
- void(*init_ioports)(struct fs_uart_platform_info *);
- /* device specific information */
- int fs_no; /* controller index */
- char fs_type[4]; /* controller type */
- u32 uart_clk;
- u8 tx_num_fifo;
- u8 tx_buf_size;
- u8 rx_num_fifo;
- u8 rx_buf_size;
- u8 brg;
- u8 clk_rx;
- u8 clk_tx;
-};
-
-static inline int fs_uart_get_id(struct fs_uart_platform_info *fpi)
-{
- if(strstr(fpi->fs_type, "SMC"))
- return fs_uart_id_smc2fsid(fpi->fs_no);
- if(strstr(fpi->fs_type, "SCC"))
- return fs_uart_id_scc2fsid(fpi->fs_no);
- return fpi->fs_no;
-}
-
-#endif
diff --git a/include/linux/fscache-cache.h b/include/linux/fscache-cache.h
index 4c467ef50159..4c91a019972b 100644
--- a/include/linux/fscache-cache.h
+++ b/include/linux/fscache-cache.h
@@ -1,16 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/* General filesystem caching backing cache interface
*
- * Copyright (C) 2004-2007 Red Hat, Inc. All Rights Reserved.
+ * Copyright (C) 2021 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
*
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- *
* NOTE!!! See:
*
- * Documentation/filesystems/caching/backend-api.txt
+ * Documentation/filesystems/caching/backend-api.rst
*
* for a description of the cache backend interface declared here.
*/
@@ -19,196 +15,35 @@
#define _LINUX_FSCACHE_CACHE_H
#include <linux/fscache.h>
-#include <linux/sched.h>
-#include <linux/workqueue.h>
-
-#define NR_MAXCACHES BITS_PER_LONG
-
-struct fscache_cache;
-struct fscache_cache_ops;
-struct fscache_object;
-struct fscache_operation;
-/*
- * cache tag definition
- */
-struct fscache_cache_tag {
- struct list_head link;
- struct fscache_cache *cache; /* cache referred to by this tag */
- unsigned long flags;
-#define FSCACHE_TAG_RESERVED 0 /* T if tag is reserved for a cache */
- atomic_t usage;
- char name[0]; /* tag name */
+enum fscache_cache_trace;
+enum fscache_cookie_trace;
+enum fscache_access_trace;
+enum fscache_volume_trace;
+
+enum fscache_cache_state {
+ FSCACHE_CACHE_IS_NOT_PRESENT, /* No cache is present for this name */
+ FSCACHE_CACHE_IS_PREPARING, /* A cache is preparing to come live */
+ FSCACHE_CACHE_IS_ACTIVE, /* Attached cache is active and can be used */
+ FSCACHE_CACHE_GOT_IOERROR, /* Attached cache stopped on I/O error */
+ FSCACHE_CACHE_IS_WITHDRAWN, /* Attached cache is being withdrawn */
+#define NR__FSCACHE_CACHE_STATE (FSCACHE_CACHE_IS_WITHDRAWN + 1)
};
/*
- * cache definition
+ * Cache cookie.
*/
struct fscache_cache {
const struct fscache_cache_ops *ops;
- struct fscache_cache_tag *tag; /* tag representing this cache */
- struct kobject *kobj; /* system representation of this cache */
- struct list_head link; /* link in list of caches */
- size_t max_index_size; /* maximum size of index data */
- char identifier[36]; /* cache label */
-
- /* node management */
- struct work_struct op_gc; /* operation garbage collector */
- struct list_head object_list; /* list of data/index objects */
- struct list_head op_gc_list; /* list of ops to be deleted */
- spinlock_t object_list_lock;
- spinlock_t op_gc_list_lock;
+ struct list_head cache_link; /* Link in cache list */
+ void *cache_priv; /* Private cache data (or NULL) */
+ refcount_t ref;
+ atomic_t n_volumes; /* Number of active volumes; */
+ atomic_t n_accesses; /* Number of in-progress accesses on the cache */
atomic_t object_count; /* no. of live objects in this cache */
- struct fscache_object *fsdef; /* object for the fsdef index */
- unsigned long flags;
-#define FSCACHE_IOERROR 0 /* cache stopped on I/O error */
-#define FSCACHE_CACHE_WITHDRAWN 1 /* cache has been withdrawn */
-};
-
-extern wait_queue_head_t fscache_cache_cleared_wq;
-
-/*
- * operation to be applied to a cache object
- * - retrieval initiation operations are done in the context of the process
- * that issued them, and not in an async thread pool
- */
-typedef void (*fscache_operation_release_t)(struct fscache_operation *op);
-typedef void (*fscache_operation_processor_t)(struct fscache_operation *op);
-typedef void (*fscache_operation_cancel_t)(struct fscache_operation *op);
-
-enum fscache_operation_state {
- FSCACHE_OP_ST_BLANK, /* Op is not yet submitted */
- FSCACHE_OP_ST_INITIALISED, /* Op is initialised */
- FSCACHE_OP_ST_PENDING, /* Op is blocked from running */
- FSCACHE_OP_ST_IN_PROGRESS, /* Op is in progress */
- FSCACHE_OP_ST_COMPLETE, /* Op is complete */
- FSCACHE_OP_ST_CANCELLED, /* Op has been cancelled */
- FSCACHE_OP_ST_DEAD /* Op is now dead */
-};
-
-struct fscache_operation {
- struct work_struct work; /* record for async ops */
- struct list_head pend_link; /* link in object->pending_ops */
- struct fscache_object *object; /* object to be operated upon */
-
- unsigned long flags;
-#define FSCACHE_OP_TYPE 0x000f /* operation type */
-#define FSCACHE_OP_ASYNC 0x0001 /* - async op, processor may sleep for disk */
-#define FSCACHE_OP_MYTHREAD 0x0002 /* - processing is done be issuing thread, not pool */
-#define FSCACHE_OP_WAITING 4 /* cleared when op is woken */
-#define FSCACHE_OP_EXCLUSIVE 5 /* exclusive op, other ops must wait */
-#define FSCACHE_OP_DEC_READ_CNT 6 /* decrement object->n_reads on destruction */
-#define FSCACHE_OP_UNUSE_COOKIE 7 /* call fscache_unuse_cookie() on completion */
-#define FSCACHE_OP_KEEP_FLAGS 0x00f0 /* flags to keep when repurposing an op */
-
- enum fscache_operation_state state;
- atomic_t usage;
- unsigned debug_id; /* debugging ID */
-
- /* operation processor callback
- * - can be NULL if FSCACHE_OP_WAITING is going to be used to perform
- * the op in a non-pool thread */
- fscache_operation_processor_t processor;
-
- /* Operation cancellation cleanup (optional) */
- fscache_operation_cancel_t cancel;
-
- /* operation releaser */
- fscache_operation_release_t release;
-};
-
-extern atomic_t fscache_op_debug_id;
-extern void fscache_op_work_func(struct work_struct *work);
-
-extern void fscache_enqueue_operation(struct fscache_operation *);
-extern void fscache_op_complete(struct fscache_operation *, bool);
-extern void fscache_put_operation(struct fscache_operation *);
-extern void fscache_operation_init(struct fscache_operation *,
- fscache_operation_processor_t,
- fscache_operation_cancel_t,
- fscache_operation_release_t);
-
-/*
- * data read operation
- */
-struct fscache_retrieval {
- struct fscache_operation op;
- struct fscache_cookie *cookie; /* The netfs cookie */
- struct address_space *mapping; /* netfs pages */
- fscache_rw_complete_t end_io_func; /* function to call on I/O completion */
- void *context; /* netfs read context (pinned) */
- struct list_head to_do; /* list of things to be done by the backend */
- unsigned long start_time; /* time at which retrieval started */
- atomic_t n_pages; /* number of pages to be retrieved */
-};
-
-typedef int (*fscache_page_retrieval_func_t)(struct fscache_retrieval *op,
- struct page *page,
- gfp_t gfp);
-
-typedef int (*fscache_pages_retrieval_func_t)(struct fscache_retrieval *op,
- struct list_head *pages,
- unsigned *nr_pages,
- gfp_t gfp);
-
-/**
- * fscache_get_retrieval - Get an extra reference on a retrieval operation
- * @op: The retrieval operation to get a reference on
- *
- * Get an extra reference on a retrieval operation.
- */
-static inline
-struct fscache_retrieval *fscache_get_retrieval(struct fscache_retrieval *op)
-{
- atomic_inc(&op->op.usage);
- return op;
-}
-
-/**
- * fscache_enqueue_retrieval - Enqueue a retrieval operation for processing
- * @op: The retrieval operation affected
- *
- * Enqueue a retrieval operation for processing by the FS-Cache thread pool.
- */
-static inline void fscache_enqueue_retrieval(struct fscache_retrieval *op)
-{
- fscache_enqueue_operation(&op->op);
-}
-
-/**
- * fscache_retrieval_complete - Record (partial) completion of a retrieval
- * @op: The retrieval operation affected
- * @n_pages: The number of pages to account for
- */
-static inline void fscache_retrieval_complete(struct fscache_retrieval *op,
- int n_pages)
-{
- atomic_sub(n_pages, &op->n_pages);
- if (atomic_read(&op->n_pages) <= 0)
- fscache_op_complete(&op->op, true);
-}
-
-/**
- * fscache_put_retrieval - Drop a reference to a retrieval operation
- * @op: The retrieval operation affected
- *
- * Drop a reference to a retrieval operation.
- */
-static inline void fscache_put_retrieval(struct fscache_retrieval *op)
-{
- fscache_put_operation(&op->op);
-}
-
-/*
- * cached page storage work item
- * - used to do three things:
- * - batch writes to the cache
- * - do cache writes asynchronously
- * - defer writes until cache object lookup completion
- */
-struct fscache_storage {
- struct fscache_operation op;
- pgoff_t store_limit; /* don't write more than this */
+ unsigned int debug_id;
+ enum fscache_cache_state state;
+ char *name;
};
/*
@@ -218,337 +53,162 @@ struct fscache_cache_ops {
/* name of cache provider */
const char *name;
- /* allocate an object record for a cookie */
- struct fscache_object *(*alloc_object)(struct fscache_cache *cache,
- struct fscache_cookie *cookie);
-
- /* look up the object for a cookie
- * - return -ETIMEDOUT to be requeued
- */
- int (*lookup_object)(struct fscache_object *object);
-
- /* finished looking up */
- void (*lookup_complete)(struct fscache_object *object);
+ /* Acquire a volume */
+ void (*acquire_volume)(struct fscache_volume *volume);
- /* increment the usage count on this object (may fail if unmounting) */
- struct fscache_object *(*grab_object)(struct fscache_object *object);
+ /* Free the cache's data attached to a volume */
+ void (*free_volume)(struct fscache_volume *volume);
- /* pin an object in the cache */
- int (*pin_object)(struct fscache_object *object);
+ /* Look up a cookie in the cache */
+ bool (*lookup_cookie)(struct fscache_cookie *cookie);
- /* unpin an object in the cache */
- void (*unpin_object)(struct fscache_object *object);
+ /* Withdraw an object without any cookie access counts held */
+ void (*withdraw_cookie)(struct fscache_cookie *cookie);
- /* check the consistency between the backing cache and the FS-Cache
- * cookie */
- int (*check_consistency)(struct fscache_operation *op);
-
- /* store the updated auxiliary data on an object */
- void (*update_object)(struct fscache_object *object);
+ /* Change the size of a data object */
+ void (*resize_cookie)(struct netfs_cache_resources *cres,
+ loff_t new_size);
/* Invalidate an object */
- void (*invalidate_object)(struct fscache_operation *op);
-
- /* discard the resources pinned by an object and effect retirement if
- * necessary */
- void (*drop_object)(struct fscache_object *object);
-
- /* dispose of a reference to an object */
- void (*put_object)(struct fscache_object *object);
-
- /* sync a cache */
- void (*sync_cache)(struct fscache_cache *cache);
-
- /* notification that the attributes of a non-index object (such as
- * i_size) have changed */
- int (*attr_changed)(struct fscache_object *object);
-
- /* reserve space for an object's data and associated metadata */
- int (*reserve_space)(struct fscache_object *object, loff_t i_size);
-
- /* request a backing block for a page be read or allocated in the
- * cache */
- fscache_page_retrieval_func_t read_or_alloc_page;
-
- /* request backing blocks for a list of pages be read or allocated in
- * the cache */
- fscache_pages_retrieval_func_t read_or_alloc_pages;
-
- /* request a backing block for a page be allocated in the cache so that
- * it can be written directly */
- fscache_page_retrieval_func_t allocate_page;
-
- /* request backing blocks for pages be allocated in the cache so that
- * they can be written directly */
- fscache_pages_retrieval_func_t allocate_pages;
-
- /* write a page to its backing block in the cache */
- int (*write_page)(struct fscache_storage *op, struct page *page);
-
- /* detach backing block from a page (optional)
- * - must release the cookie lock before returning
- * - may sleep
- */
- void (*uncache_page)(struct fscache_object *object,
- struct page *page);
-
- /* dissociate a cache from all the pages it was backing */
- void (*dissociate_pages)(struct fscache_cache *cache);
-};
+ bool (*invalidate_cookie)(struct fscache_cookie *cookie);
-extern struct fscache_cookie fscache_fsdef_index;
+ /* Begin an operation for the netfs lib */
+ bool (*begin_operation)(struct netfs_cache_resources *cres,
+ enum fscache_want_state want_state);
-/*
- * Event list for fscache_object::{event_mask,events}
- */
-enum {
- FSCACHE_OBJECT_EV_NEW_CHILD, /* T if object has a new child */
- FSCACHE_OBJECT_EV_PARENT_READY, /* T if object's parent is ready */
- FSCACHE_OBJECT_EV_UPDATE, /* T if object should be updated */
- FSCACHE_OBJECT_EV_INVALIDATE, /* T if cache requested object invalidation */
- FSCACHE_OBJECT_EV_CLEARED, /* T if accessors all gone */
- FSCACHE_OBJECT_EV_ERROR, /* T if fatal error occurred during processing */
- FSCACHE_OBJECT_EV_KILL, /* T if netfs relinquished or cache withdrew object */
- NR_FSCACHE_OBJECT_EVENTS
+ /* Prepare to write to a live cache object */
+ void (*prepare_to_write)(struct fscache_cookie *cookie);
};
-#define FSCACHE_OBJECT_EVENTS_MASK ((1UL << NR_FSCACHE_OBJECT_EVENTS) - 1)
+extern struct workqueue_struct *fscache_wq;
+extern wait_queue_head_t fscache_clearance_waiters;
/*
- * States for object state machine.
- */
-struct fscache_transition {
- unsigned long events;
- const struct fscache_state *transit_to;
-};
-
-struct fscache_state {
- char name[24];
- char short_name[8];
- const struct fscache_state *(*work)(struct fscache_object *object,
- int event);
- const struct fscache_transition transitions[];
-};
-
-/*
- * on-disk cache file or index handle
+ * out-of-line cache backend functions
*/
-struct fscache_object {
- const struct fscache_state *state; /* Object state machine state */
- const struct fscache_transition *oob_table; /* OOB state transition table */
- int debug_id; /* debugging ID */
- int n_children; /* number of child objects */
- int n_ops; /* number of extant ops on object */
- int n_obj_ops; /* number of object ops outstanding on object */
- int n_in_progress; /* number of ops in progress */
- int n_exclusive; /* number of exclusive ops queued or in progress */
- atomic_t n_reads; /* number of read ops in progress */
- spinlock_t lock; /* state and operations lock */
-
- unsigned long lookup_jif; /* time at which lookup started */
- unsigned long oob_event_mask; /* OOB events this object is interested in */
- unsigned long event_mask; /* events this object is interested in */
- unsigned long events; /* events to be processed by this object
- * (order is important - using fls) */
-
- unsigned long flags;
-#define FSCACHE_OBJECT_LOCK 0 /* T if object is busy being processed */
-#define FSCACHE_OBJECT_PENDING_WRITE 1 /* T if object has pending write */
-#define FSCACHE_OBJECT_WAITING 2 /* T if object is waiting on its parent */
-#define FSCACHE_OBJECT_IS_LIVE 3 /* T if object is not withdrawn or relinquished */
-#define FSCACHE_OBJECT_IS_LOOKED_UP 4 /* T if object has been looked up */
-#define FSCACHE_OBJECT_IS_AVAILABLE 5 /* T if object has become active */
-#define FSCACHE_OBJECT_RETIRED 6 /* T if object was retired on relinquishment */
-#define FSCACHE_OBJECT_KILLED_BY_CACHE 7 /* T if object was killed by the cache */
-#define FSCACHE_OBJECT_RUN_AFTER_DEAD 8 /* T if object has been dispatched after death */
-
- struct list_head cache_link; /* link in cache->object_list */
- struct hlist_node cookie_link; /* link in cookie->backing_objects */
- struct fscache_cache *cache; /* cache that supplied this object */
- struct fscache_cookie *cookie; /* netfs's file/index object */
- struct fscache_object *parent; /* parent object */
- struct work_struct work; /* attention scheduling record */
- struct list_head dependents; /* FIFO of dependent objects */
- struct list_head dep_link; /* link in parent's dependents list */
- struct list_head pending_ops; /* unstarted operations on this object */
-#ifdef CONFIG_FSCACHE_OBJECT_LIST
- struct rb_node objlist_link; /* link in global object list */
-#endif
- pgoff_t store_limit; /* current storage limit */
- loff_t store_limit_l; /* current storage limit */
-};
-
-extern void fscache_object_init(struct fscache_object *, struct fscache_cookie *,
- struct fscache_cache *);
-extern void fscache_object_destroy(struct fscache_object *);
-
-extern void fscache_object_lookup_negative(struct fscache_object *object);
-extern void fscache_obtained_object(struct fscache_object *object);
-
-static inline bool fscache_object_is_live(struct fscache_object *object)
-{
- return test_bit(FSCACHE_OBJECT_IS_LIVE, &object->flags);
-}
-
-static inline bool fscache_object_is_dying(struct fscache_object *object)
-{
- return !fscache_object_is_live(object);
-}
-
-static inline bool fscache_object_is_available(struct fscache_object *object)
-{
- return test_bit(FSCACHE_OBJECT_IS_AVAILABLE, &object->flags);
-}
+extern struct rw_semaphore fscache_addremove_sem;
+extern struct fscache_cache *fscache_acquire_cache(const char *name);
+extern void fscache_relinquish_cache(struct fscache_cache *cache);
+extern int fscache_add_cache(struct fscache_cache *cache,
+ const struct fscache_cache_ops *ops,
+ void *cache_priv);
+extern void fscache_withdraw_cache(struct fscache_cache *cache);
+extern void fscache_withdraw_volume(struct fscache_volume *volume);
+extern void fscache_withdraw_cookie(struct fscache_cookie *cookie);
-static inline bool fscache_cache_is_broken(struct fscache_object *object)
-{
- return test_bit(FSCACHE_IOERROR, &object->cache->flags);
-}
+extern void fscache_io_error(struct fscache_cache *cache);
-static inline bool fscache_object_is_active(struct fscache_object *object)
-{
- return fscache_object_is_available(object) &&
- fscache_object_is_live(object) &&
- !fscache_cache_is_broken(object);
-}
+extern struct fscache_volume *
+fscache_try_get_volume(struct fscache_volume *volume,
+ enum fscache_volume_trace where);
+extern void fscache_put_volume(struct fscache_volume *volume,
+ enum fscache_volume_trace where);
+extern void fscache_end_volume_access(struct fscache_volume *volume,
+ struct fscache_cookie *cookie,
+ enum fscache_access_trace why);
+
+extern struct fscache_cookie *fscache_get_cookie(struct fscache_cookie *cookie,
+ enum fscache_cookie_trace where);
+extern void fscache_put_cookie(struct fscache_cookie *cookie,
+ enum fscache_cookie_trace where);
+extern void fscache_end_cookie_access(struct fscache_cookie *cookie,
+ enum fscache_access_trace why);
+extern void fscache_cookie_lookup_negative(struct fscache_cookie *cookie);
+extern void fscache_resume_after_invalidation(struct fscache_cookie *cookie);
+extern void fscache_caching_failed(struct fscache_cookie *cookie);
+extern bool fscache_wait_for_operation(struct netfs_cache_resources *cred,
+ enum fscache_want_state state);
/**
- * fscache_object_destroyed - Note destruction of an object in a cache
- * @cache: The cache from which the object came
+ * fscache_cookie_state - Read the state of a cookie
+ * @cookie: The cookie to query
*
- * Note the destruction and deallocation of an object record in a cache.
+ * Get the state of a cookie, imposing an ordering between the cookie contents
+ * and the state value. Paired with fscache_set_cookie_state().
*/
-static inline void fscache_object_destroyed(struct fscache_cache *cache)
+static inline
+enum fscache_cookie_state fscache_cookie_state(struct fscache_cookie *cookie)
{
- if (atomic_dec_and_test(&cache->object_count))
- wake_up_all(&fscache_cache_cleared_wq);
+ return smp_load_acquire(&cookie->state);
}
/**
- * fscache_object_lookup_error - Note an object encountered an error
- * @object: The object on which the error was encountered
+ * fscache_get_key - Get a pointer to the cookie key
+ * @cookie: The cookie to query
*
- * Note that an object encountered a fatal error (usually an I/O error) and
- * that it should be withdrawn as soon as possible.
+ * Return a pointer to the where a cookie's key is stored.
*/
-static inline void fscache_object_lookup_error(struct fscache_object *object)
+static inline void *fscache_get_key(struct fscache_cookie *cookie)
{
- set_bit(FSCACHE_OBJECT_EV_ERROR, &object->events);
+ if (cookie->key_len <= sizeof(cookie->inline_key))
+ return cookie->inline_key;
+ else
+ return cookie->key;
}
-/**
- * fscache_set_store_limit - Set the maximum size to be stored in an object
- * @object: The object to set the maximum on
- * @i_size: The limit to set in bytes
- *
- * Set the maximum size an object is permitted to reach, implying the highest
- * byte that may be written. Intended to be called by the attr_changed() op.
- *
- * See Documentation/filesystems/caching/backend-api.txt for a complete
- * description.
- */
-static inline
-void fscache_set_store_limit(struct fscache_object *object, loff_t i_size)
+static inline struct fscache_cookie *fscache_cres_cookie(struct netfs_cache_resources *cres)
{
- object->store_limit_l = i_size;
- object->store_limit = i_size >> PAGE_SHIFT;
- if (i_size & ~PAGE_MASK)
- object->store_limit++;
+ return cres->cache_priv;
}
/**
- * fscache_end_io - End a retrieval operation on a page
- * @op: The FS-Cache operation covering the retrieval
- * @page: The page that was to be fetched
- * @error: The error code (0 if successful)
+ * fscache_count_object - Tell fscache that an object has been added
+ * @cache: The cache to account to
*
- * Note the end of an operation to retrieve a page, as covered by a particular
- * operation record.
+ * Tell fscache that an object has been added to the cache. This prevents the
+ * cache from tearing down the cache structure until the object is uncounted.
*/
-static inline void fscache_end_io(struct fscache_retrieval *op,
- struct page *page, int error)
+static inline void fscache_count_object(struct fscache_cache *cache)
{
- op->end_io_func(page, op->context, error);
-}
-
-static inline void __fscache_use_cookie(struct fscache_cookie *cookie)
-{
- atomic_inc(&cookie->n_active);
+ atomic_inc(&cache->object_count);
}
/**
- * fscache_use_cookie - Request usage of cookie attached to an object
- * @object: Object description
- *
- * Request usage of the cookie attached to an object. NULL is returned if the
- * relinquishment had reduced the cookie usage count to 0.
+ * fscache_uncount_object - Tell fscache that an object has been removed
+ * @cache: The cache to account to
+ *
+ * Tell fscache that an object has been removed from the cache and will no
+ * longer be accessed. After this point, the cache cookie may be destroyed.
*/
-static inline bool fscache_use_cookie(struct fscache_object *object)
-{
- struct fscache_cookie *cookie = object->cookie;
- return atomic_inc_not_zero(&cookie->n_active) != 0;
-}
-
-static inline bool __fscache_unuse_cookie(struct fscache_cookie *cookie)
+static inline void fscache_uncount_object(struct fscache_cache *cache)
{
- return atomic_dec_and_test(&cookie->n_active);
-}
-
-static inline void __fscache_wake_unused_cookie(struct fscache_cookie *cookie)
-{
- wake_up_atomic_t(&cookie->n_active);
+ if (atomic_dec_and_test(&cache->object_count))
+ wake_up_all(&fscache_clearance_waiters);
}
/**
- * fscache_unuse_cookie - Cease usage of cookie attached to an object
- * @object: Object description
- *
- * Cease usage of the cookie attached to an object. When the users count
- * reaches zero then the cookie relinquishment will be permitted to proceed.
- */
-static inline void fscache_unuse_cookie(struct fscache_object *object)
-{
- struct fscache_cookie *cookie = object->cookie;
- if (__fscache_unuse_cookie(cookie))
- __fscache_wake_unused_cookie(cookie);
-}
-
-/*
- * out-of-line cache backend functions
- */
-extern __printf(3, 4)
-void fscache_init_cache(struct fscache_cache *cache,
- const struct fscache_cache_ops *ops,
- const char *idfmt, ...);
-
-extern int fscache_add_cache(struct fscache_cache *cache,
- struct fscache_object *fsdef,
- const char *tagname);
-extern void fscache_withdraw_cache(struct fscache_cache *cache);
-
-extern void fscache_io_error(struct fscache_cache *cache);
-
-extern void fscache_mark_page_cached(struct fscache_retrieval *op,
- struct page *page);
-
-extern void fscache_mark_pages_cached(struct fscache_retrieval *op,
- struct pagevec *pagevec);
-
-extern bool fscache_object_sleep_till_congested(signed long *timeoutp);
-
-extern enum fscache_checkaux fscache_check_aux(struct fscache_object *object,
- const void *data,
- uint16_t datalen);
-
-extern void fscache_object_retrying_stale(struct fscache_object *object);
-
-enum fscache_why_object_killed {
- FSCACHE_OBJECT_IS_STALE,
- FSCACHE_OBJECT_NO_SPACE,
- FSCACHE_OBJECT_WAS_RETIRED,
- FSCACHE_OBJECT_WAS_CULLED,
-};
-extern void fscache_object_mark_killed(struct fscache_object *object,
- enum fscache_why_object_killed why);
+ * fscache_wait_for_objects - Wait for all objects to be withdrawn
+ * @cache: The cache to query
+ *
+ * Wait for all extant objects in a cache to finish being withdrawn
+ * and go away.
+ */
+static inline void fscache_wait_for_objects(struct fscache_cache *cache)
+{
+ wait_event(fscache_clearance_waiters,
+ atomic_read(&cache->object_count) == 0);
+}
+
+#ifdef CONFIG_FSCACHE_STATS
+extern atomic_t fscache_n_read;
+extern atomic_t fscache_n_write;
+extern atomic_t fscache_n_no_write_space;
+extern atomic_t fscache_n_no_create_space;
+extern atomic_t fscache_n_culled;
+extern atomic_t fscache_n_dio_misfit;
+#define fscache_count_read() atomic_inc(&fscache_n_read)
+#define fscache_count_write() atomic_inc(&fscache_n_write)
+#define fscache_count_no_write_space() atomic_inc(&fscache_n_no_write_space)
+#define fscache_count_no_create_space() atomic_inc(&fscache_n_no_create_space)
+#define fscache_count_culled() atomic_inc(&fscache_n_culled)
+#define fscache_count_dio_misfit() atomic_inc(&fscache_n_dio_misfit)
+#else
+#define fscache_count_read() do {} while(0)
+#define fscache_count_write() do {} while(0)
+#define fscache_count_no_write_space() do {} while(0)
+#define fscache_count_no_create_space() do {} while(0)
+#define fscache_count_culled() do {} while(0)
+#define fscache_count_dio_misfit() do {} while(0)
+#endif
#endif /* _LINUX_FSCACHE_CACHE_H */
diff --git a/include/linux/fscache.h b/include/linux/fscache.h
index f4ff47d4a893..58fdb9605425 100644
--- a/include/linux/fscache.h
+++ b/include/linux/fscache.h
@@ -1,16 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/* General filesystem caching interface
*
- * Copyright (C) 2004-2007 Red Hat, Inc. All Rights Reserved.
+ * Copyright (C) 2021 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
*
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- *
* NOTE!!! See:
*
- * Documentation/filesystems/caching/netfs-api.txt
+ * Documentation/filesystems/caching/netfs-api.rst
*
* for a description of the network filesystem interface declared here.
*/
@@ -19,180 +15,138 @@
#define _LINUX_FSCACHE_H
#include <linux/fs.h>
-#include <linux/list.h>
-#include <linux/pagemap.h>
-#include <linux/pagevec.h>
+#include <linux/netfs.h>
+#include <linux/writeback.h>
#if defined(CONFIG_FSCACHE) || defined(CONFIG_FSCACHE_MODULE)
+#define __fscache_available (1)
#define fscache_available() (1)
+#define fscache_volume_valid(volume) (volume)
#define fscache_cookie_valid(cookie) (cookie)
+#define fscache_resources_valid(cres) ((cres)->cache_priv)
+#define fscache_cookie_enabled(cookie) (cookie && !test_bit(FSCACHE_COOKIE_DISABLED, &cookie->flags))
#else
+#define __fscache_available (0)
#define fscache_available() (0)
+#define fscache_volume_valid(volume) (0)
#define fscache_cookie_valid(cookie) (0)
+#define fscache_resources_valid(cres) (false)
+#define fscache_cookie_enabled(cookie) (0)
#endif
-
-/*
- * overload PG_private_2 to give us PG_fscache - this is used to indicate that
- * a page is currently backed by a local disk cache
- */
-#define PageFsCache(page) PagePrivate2((page))
-#define SetPageFsCache(page) SetPagePrivate2((page))
-#define ClearPageFsCache(page) ClearPagePrivate2((page))
-#define TestSetPageFsCache(page) TestSetPagePrivate2((page))
-#define TestClearPageFsCache(page) TestClearPagePrivate2((page))
-
-/* pattern used to fill dead space in an index entry */
-#define FSCACHE_INDEX_DEADFILL_PATTERN 0x79
-
-struct pagevec;
-struct fscache_cache_tag;
struct fscache_cookie;
-struct fscache_netfs;
-typedef void (*fscache_rw_complete_t)(struct page *page,
- void *context,
- int error);
+#define FSCACHE_ADV_SINGLE_CHUNK 0x01 /* The object is a single chunk of data */
+#define FSCACHE_ADV_WRITE_CACHE 0x00 /* Do cache if written to locally */
+#define FSCACHE_ADV_WRITE_NOCACHE 0x02 /* Don't cache if written to locally */
+#define FSCACHE_ADV_WANT_CACHE_SIZE 0x04 /* Retrieve cache size at runtime */
+
+#define FSCACHE_INVAL_DIO_WRITE 0x01 /* Invalidate due to DIO write */
-/* result of index entry consultation */
-enum fscache_checkaux {
- FSCACHE_CHECKAUX_OKAY, /* entry okay as is */
- FSCACHE_CHECKAUX_NEEDS_UPDATE, /* entry requires update */
- FSCACHE_CHECKAUX_OBSOLETE, /* entry requires deletion */
+enum fscache_want_state {
+ FSCACHE_WANT_PARAMS,
+ FSCACHE_WANT_WRITE,
+ FSCACHE_WANT_READ,
};
/*
- * fscache cookie definition
- */
-struct fscache_cookie_def {
- /* name of cookie type */
- char name[16];
-
- /* cookie type */
- uint8_t type;
-#define FSCACHE_COOKIE_TYPE_INDEX 0
-#define FSCACHE_COOKIE_TYPE_DATAFILE 1
-
- /* select the cache into which to insert an entry in this index
- * - optional
- * - should return a cache identifier or NULL to cause the cache to be
- * inherited from the parent if possible or the first cache picked
- * for a non-index file if not
- */
- struct fscache_cache_tag *(*select_cache)(
- const void *parent_netfs_data,
- const void *cookie_netfs_data);
-
- /* get an index key
- * - should store the key data in the buffer
- * - should return the amount of data stored
- * - not permitted to return an error
- * - the netfs data from the cookie being used as the source is
- * presented
- */
- uint16_t (*get_key)(const void *cookie_netfs_data,
- void *buffer,
- uint16_t bufmax);
-
- /* get certain file attributes from the netfs data
- * - this function can be absent for an index
- * - not permitted to return an error
- * - the netfs data from the cookie being used as the source is
- * presented
- */
- void (*get_attr)(const void *cookie_netfs_data, uint64_t *size);
-
- /* get the auxiliary data from netfs data
- * - this function can be absent if the index carries no state data
- * - should store the auxiliary data in the buffer
- * - should return the amount of amount stored
- * - not permitted to return an error
- * - the netfs data from the cookie being used as the source is
- * presented
- */
- uint16_t (*get_aux)(const void *cookie_netfs_data,
- void *buffer,
- uint16_t bufmax);
-
- /* consult the netfs about the state of an object
- * - this function can be absent if the index carries no state data
- * - the netfs data from the cookie being used as the target is
- * presented, as is the auxiliary data
- */
- enum fscache_checkaux (*check_aux)(void *cookie_netfs_data,
- const void *data,
- uint16_t datalen);
-
- /* get an extra reference on a read context
- * - this function can be absent if the completion function doesn't
- * require a context
- */
- void (*get_context)(void *cookie_netfs_data, void *context);
-
- /* release an extra reference on a read context
- * - this function can be absent if the completion function doesn't
- * require a context
- */
- void (*put_context)(void *cookie_netfs_data, void *context);
-
- /* indicate page that now have cache metadata retained
- * - this function should mark the specified page as now being cached
- * - the page will have been marked with PG_fscache before this is
- * called, so this is optional
- */
- void (*mark_page_cached)(void *cookie_netfs_data,
- struct address_space *mapping,
- struct page *page);
-};
+ * Data object state.
+ */
+enum fscache_cookie_state {
+ FSCACHE_COOKIE_STATE_QUIESCENT, /* The cookie is uncached */
+ FSCACHE_COOKIE_STATE_LOOKING_UP, /* The cache object is being looked up */
+ FSCACHE_COOKIE_STATE_CREATING, /* The cache object is being created */
+ FSCACHE_COOKIE_STATE_ACTIVE, /* The cache is active, readable and writable */
+ FSCACHE_COOKIE_STATE_INVALIDATING, /* The cache is being invalidated */
+ FSCACHE_COOKIE_STATE_FAILED, /* The cache failed, withdraw to clear */
+ FSCACHE_COOKIE_STATE_LRU_DISCARDING, /* The cookie is being discarded by the LRU */
+ FSCACHE_COOKIE_STATE_WITHDRAWING, /* The cookie is being withdrawn */
+ FSCACHE_COOKIE_STATE_RELINQUISHING, /* The cookie is being relinquished */
+ FSCACHE_COOKIE_STATE_DROPPED, /* The cookie has been dropped */
+#define FSCACHE_COOKIE_STATE__NR (FSCACHE_COOKIE_STATE_DROPPED + 1)
+} __attribute__((mode(byte)));
/*
- * fscache cached network filesystem type
- * - name, version and ops must be filled in before registration
- * - all other fields will be set during registration
- */
-struct fscache_netfs {
- uint32_t version; /* indexing version */
- const char *name; /* filesystem name */
- struct fscache_cookie *primary_index;
- struct list_head link; /* internal link */
+ * Volume representation cookie.
+ */
+struct fscache_volume {
+ refcount_t ref;
+ atomic_t n_cookies; /* Number of data cookies in volume */
+ atomic_t n_accesses; /* Number of cache accesses in progress */
+ unsigned int debug_id;
+ unsigned int key_hash; /* Hash of key string */
+ u8 *key; /* Volume ID, eg. "afs@example.com@1234" */
+ struct list_head proc_link; /* Link in /proc/fs/fscache/volumes */
+ struct hlist_bl_node hash_link; /* Link in hash table */
+ struct work_struct work;
+ struct fscache_cache *cache; /* The cache in which this resides */
+ void *cache_priv; /* Cache private data */
+ spinlock_t lock;
+ unsigned long flags;
+#define FSCACHE_VOLUME_RELINQUISHED 0 /* Volume is being cleaned up */
+#define FSCACHE_VOLUME_INVALIDATE 1 /* Volume was invalidated */
+#define FSCACHE_VOLUME_COLLIDED_WITH 2 /* Volume was collided with */
+#define FSCACHE_VOLUME_ACQUIRE_PENDING 3 /* Volume is waiting to complete acquisition */
+#define FSCACHE_VOLUME_CREATING 4 /* Volume is being created on disk */
+ u8 coherency_len; /* Length of the coherency data */
+ u8 coherency[]; /* Coherency data */
};
/*
- * data file or index object cookie
+ * Data file representation cookie.
* - a file will only appear in one cache
* - a request to cache a file may or may not be honoured, subject to
* constraints such as disk space
* - indices are created on disk just-in-time
*/
struct fscache_cookie {
- atomic_t usage; /* number of users of this cookie */
- atomic_t n_children; /* number of children of this cookie */
- atomic_t n_active; /* number of active users of netfs ptrs */
+ refcount_t ref;
+ atomic_t n_active; /* number of active users of cookie */
+ atomic_t n_accesses; /* Number of cache accesses in progress */
+ unsigned int debug_id;
+ unsigned int inval_counter; /* Number of invalidations made */
spinlock_t lock;
- spinlock_t stores_lock; /* lock on page store tree */
- struct hlist_head backing_objects; /* object(s) backing this file/index */
- const struct fscache_cookie_def *def; /* definition */
- struct fscache_cookie *parent; /* parent of this entry */
- void *netfs_data; /* back pointer to netfs */
- struct radix_tree_root stores; /* pages to be stored on this cookie */
-#define FSCACHE_COOKIE_PENDING_TAG 0 /* pages tag: pending write to cache */
-#define FSCACHE_COOKIE_STORING_TAG 1 /* pages tag: writing to cache */
-
+ struct fscache_volume *volume; /* Parent volume of this file. */
+ void *cache_priv; /* Cache-side representation */
+ struct hlist_bl_node hash_link; /* Link in hash table */
+ struct list_head proc_link; /* Link in proc list */
+ struct list_head commit_link; /* Link in commit queue */
+ struct work_struct work; /* Commit/relinq/withdraw work */
+ loff_t object_size; /* Size of the netfs object */
+ unsigned long unused_at; /* Time at which unused (jiffies) */
unsigned long flags;
-#define FSCACHE_COOKIE_LOOKING_UP 0 /* T if non-index cookie being looked up still */
-#define FSCACHE_COOKIE_NO_DATA_YET 1 /* T if new object with no cached data yet */
-#define FSCACHE_COOKIE_UNAVAILABLE 2 /* T if cookie is unavailable (error, etc) */
-#define FSCACHE_COOKIE_INVALIDATING 3 /* T if cookie is being invalidated */
-#define FSCACHE_COOKIE_RELINQUISHED 4 /* T if cookie has been relinquished */
-#define FSCACHE_COOKIE_ENABLED 5 /* T if cookie is enabled */
-#define FSCACHE_COOKIE_ENABLEMENT_LOCK 6 /* T if cookie is being en/disabled */
+#define FSCACHE_COOKIE_RELINQUISHED 0 /* T if cookie has been relinquished */
+#define FSCACHE_COOKIE_RETIRED 1 /* T if this cookie has retired on relinq */
+#define FSCACHE_COOKIE_IS_CACHING 2 /* T if this cookie is cached */
+#define FSCACHE_COOKIE_NO_DATA_TO_READ 3 /* T if this cookie has nothing to read */
+#define FSCACHE_COOKIE_NEEDS_UPDATE 4 /* T if attrs have been updated */
+#define FSCACHE_COOKIE_HAS_BEEN_CACHED 5 /* T if cookie needs withdraw-on-relinq */
+#define FSCACHE_COOKIE_DISABLED 6 /* T if cookie has been disabled */
+#define FSCACHE_COOKIE_LOCAL_WRITE 7 /* T if cookie has been modified locally */
+#define FSCACHE_COOKIE_NO_ACCESS_WAKE 8 /* T if no wake when n_accesses goes 0 */
+#define FSCACHE_COOKIE_DO_RELINQUISH 9 /* T if this cookie needs relinquishment */
+#define FSCACHE_COOKIE_DO_WITHDRAW 10 /* T if this cookie needs withdrawing */
+#define FSCACHE_COOKIE_DO_LRU_DISCARD 11 /* T if this cookie needs LRU discard */
+#define FSCACHE_COOKIE_DO_PREP_TO_WRITE 12 /* T if cookie needs write preparation */
+#define FSCACHE_COOKIE_HAVE_DATA 13 /* T if this cookie has data stored */
+#define FSCACHE_COOKIE_IS_HASHED 14 /* T if this cookie is hashed */
+#define FSCACHE_COOKIE_DO_INVALIDATE 15 /* T if cookie needs invalidation */
+
+ enum fscache_cookie_state state;
+ u8 advice; /* FSCACHE_ADV_* */
+ u8 key_len; /* Length of index key */
+ u8 aux_len; /* Length of auxiliary data */
+ u32 key_hash; /* Hash of volume, key, len */
+ union {
+ void *key; /* Index key */
+ u8 inline_key[16]; /* - If the key is short enough */
+ };
+ union {
+ void *aux; /* Auxiliary data */
+ u8 inline_aux[8]; /* - If the aux data is short enough */
+ };
};
-static inline bool fscache_cookie_enabled(struct fscache_cookie *cookie)
-{
- return test_bit(FSCACHE_COOKIE_ENABLED, &cookie->flags);
-}
-
/*
* slow-path functions for when there is actually caching available, and the
* netfs does actually have a valid token
@@ -200,146 +154,142 @@ static inline bool fscache_cookie_enabled(struct fscache_cookie *cookie)
* - these are undefined symbols when FS-Cache is not configured and the
* optimiser takes care of not using them
*/
-extern int __fscache_register_netfs(struct fscache_netfs *);
-extern void __fscache_unregister_netfs(struct fscache_netfs *);
-extern struct fscache_cache_tag *__fscache_lookup_cache_tag(const char *);
-extern void __fscache_release_cache_tag(struct fscache_cache_tag *);
+extern struct fscache_volume *__fscache_acquire_volume(const char *, const char *,
+ const void *, size_t);
+extern void __fscache_relinquish_volume(struct fscache_volume *, const void *, bool);
extern struct fscache_cookie *__fscache_acquire_cookie(
- struct fscache_cookie *,
- const struct fscache_cookie_def *,
- void *, bool);
+ struct fscache_volume *,
+ u8,
+ const void *, size_t,
+ const void *, size_t,
+ loff_t);
+extern void __fscache_use_cookie(struct fscache_cookie *, bool);
+extern void __fscache_unuse_cookie(struct fscache_cookie *, const void *, const loff_t *);
extern void __fscache_relinquish_cookie(struct fscache_cookie *, bool);
-extern int __fscache_check_consistency(struct fscache_cookie *);
-extern void __fscache_update_cookie(struct fscache_cookie *);
-extern int __fscache_attr_changed(struct fscache_cookie *);
-extern void __fscache_invalidate(struct fscache_cookie *);
-extern void __fscache_wait_on_invalidate(struct fscache_cookie *);
-extern int __fscache_read_or_alloc_page(struct fscache_cookie *,
- struct page *,
- fscache_rw_complete_t,
- void *,
- gfp_t);
-extern int __fscache_read_or_alloc_pages(struct fscache_cookie *,
- struct address_space *,
- struct list_head *,
- unsigned *,
- fscache_rw_complete_t,
- void *,
- gfp_t);
-extern int __fscache_alloc_page(struct fscache_cookie *, struct page *, gfp_t);
-extern int __fscache_write_page(struct fscache_cookie *, struct page *, gfp_t);
-extern void __fscache_uncache_page(struct fscache_cookie *, struct page *);
-extern bool __fscache_check_page_write(struct fscache_cookie *, struct page *);
-extern void __fscache_wait_on_page_write(struct fscache_cookie *, struct page *);
-extern bool __fscache_maybe_release_page(struct fscache_cookie *, struct page *,
- gfp_t);
-extern void __fscache_uncache_all_inode_pages(struct fscache_cookie *,
- struct inode *);
-extern void __fscache_readpages_cancel(struct fscache_cookie *cookie,
- struct list_head *pages);
-extern void __fscache_disable_cookie(struct fscache_cookie *, bool);
-extern void __fscache_enable_cookie(struct fscache_cookie *,
- bool (*)(void *), void *);
+extern void __fscache_resize_cookie(struct fscache_cookie *, loff_t);
+extern void __fscache_invalidate(struct fscache_cookie *, const void *, loff_t, unsigned int);
+extern int __fscache_begin_read_operation(struct netfs_cache_resources *, struct fscache_cookie *);
+extern int __fscache_begin_write_operation(struct netfs_cache_resources *, struct fscache_cookie *);
+
+void __fscache_write_to_cache(struct fscache_cookie *cookie,
+ struct address_space *mapping,
+ loff_t start, size_t len, loff_t i_size,
+ netfs_io_terminated_t term_func,
+ void *term_func_priv,
+ bool using_pgpriv2, bool cond);
+extern void __fscache_clear_page_bits(struct address_space *, loff_t, size_t);
/**
- * fscache_register_netfs - Register a filesystem as desiring caching services
- * @netfs: The description of the filesystem
+ * fscache_acquire_volume - Register a volume as desiring caching services
+ * @volume_key: An identification string for the volume
+ * @cache_name: The name of the cache to use (or NULL for the default)
+ * @coherency_data: Piece of arbitrary coherency data to check (or NULL)
+ * @coherency_len: The size of the coherency data
*
- * Register a filesystem as desiring caching services if they're available.
+ * Register a volume as desiring caching services if they're available. The
+ * caller must provide an identifier for the volume and may also indicate which
+ * cache it should be in. If a preexisting volume entry is found in the cache,
+ * the coherency data must match otherwise the entry will be invalidated.
*
- * See Documentation/filesystems/caching/netfs-api.txt for a complete
- * description.
+ * Returns a cookie pointer on success, -ENOMEM if out of memory or -EBUSY if a
+ * cache volume of that name is already acquired. Note that "NULL" is a valid
+ * cookie pointer and can be returned if caching is refused.
*/
static inline
-int fscache_register_netfs(struct fscache_netfs *netfs)
+struct fscache_volume *fscache_acquire_volume(const char *volume_key,
+ const char *cache_name,
+ const void *coherency_data,
+ size_t coherency_len)
{
- if (fscache_available())
- return __fscache_register_netfs(netfs);
- else
- return 0;
+ if (!fscache_available())
+ return NULL;
+ return __fscache_acquire_volume(volume_key, cache_name,
+ coherency_data, coherency_len);
}
/**
- * fscache_unregister_netfs - Indicate that a filesystem no longer desires
- * caching services
- * @netfs: The description of the filesystem
+ * fscache_relinquish_volume - Cease caching a volume
+ * @volume: The volume cookie
+ * @coherency_data: Piece of arbitrary coherency data to set (or NULL)
+ * @invalidate: True if the volume should be invalidated
*
- * Indicate that a filesystem no longer desires caching services for the
- * moment.
- *
- * See Documentation/filesystems/caching/netfs-api.txt for a complete
- * description.
+ * Indicate that a filesystem no longer desires caching services for a volume.
+ * The caller must have relinquished all file cookies prior to calling this.
+ * The stored coherency data is updated.
*/
static inline
-void fscache_unregister_netfs(struct fscache_netfs *netfs)
+void fscache_relinquish_volume(struct fscache_volume *volume,
+ const void *coherency_data,
+ bool invalidate)
{
- if (fscache_available())
- __fscache_unregister_netfs(netfs);
+ if (fscache_volume_valid(volume))
+ __fscache_relinquish_volume(volume, coherency_data, invalidate);
}
/**
- * fscache_lookup_cache_tag - Look up a cache tag
- * @name: The name of the tag to search for
+ * fscache_acquire_cookie - Acquire a cookie to represent a cache object
+ * @volume: The volume in which to locate/create this cookie
+ * @advice: Advice flags (FSCACHE_COOKIE_ADV_*)
+ * @index_key: The index key for this cookie
+ * @index_key_len: Size of the index key
+ * @aux_data: The auxiliary data for the cookie (may be NULL)
+ * @aux_data_len: Size of the auxiliary data buffer
+ * @object_size: The initial size of object
*
- * Acquire a specific cache referral tag that can be used to select a specific
- * cache in which to cache an index.
+ * Acquire a cookie to represent a data file within the given cache volume.
*
- * See Documentation/filesystems/caching/netfs-api.txt for a complete
+ * See Documentation/filesystems/caching/netfs-api.rst for a complete
* description.
*/
static inline
-struct fscache_cache_tag *fscache_lookup_cache_tag(const char *name)
+struct fscache_cookie *fscache_acquire_cookie(struct fscache_volume *volume,
+ u8 advice,
+ const void *index_key,
+ size_t index_key_len,
+ const void *aux_data,
+ size_t aux_data_len,
+ loff_t object_size)
{
- if (fscache_available())
- return __fscache_lookup_cache_tag(name);
- else
+ if (!fscache_volume_valid(volume))
return NULL;
+ return __fscache_acquire_cookie(volume, advice,
+ index_key, index_key_len,
+ aux_data, aux_data_len,
+ object_size);
}
/**
- * fscache_release_cache_tag - Release a cache tag
- * @tag: The tag to release
- *
- * Release a reference to a cache referral tag previously looked up.
+ * fscache_use_cookie - Request usage of cookie attached to an object
+ * @cookie: The cookie representing the cache object
+ * @will_modify: If cache is expected to be modified locally
*
- * See Documentation/filesystems/caching/netfs-api.txt for a complete
- * description.
+ * Request usage of the cookie attached to an object. The caller should tell
+ * the cache if the object's contents are about to be modified locally and then
+ * the cache can apply the policy that has been set to handle this case.
*/
-static inline
-void fscache_release_cache_tag(struct fscache_cache_tag *tag)
+static inline void fscache_use_cookie(struct fscache_cookie *cookie,
+ bool will_modify)
{
- if (fscache_available())
- __fscache_release_cache_tag(tag);
+ if (fscache_cookie_valid(cookie))
+ __fscache_use_cookie(cookie, will_modify);
}
/**
- * fscache_acquire_cookie - Acquire a cookie to represent a cache object
- * @parent: The cookie that's to be the parent of this one
- * @def: A description of the cache object, including callback operations
- * @netfs_data: An arbitrary piece of data to be kept in the cookie to
- * represent the cache object to the netfs
- * @enable: Whether or not to enable a data cookie immediately
- *
- * This function is used to inform FS-Cache about part of an index hierarchy
- * that can be used to locate files. This is done by requesting a cookie for
- * each index in the path to the file.
+ * fscache_unuse_cookie - Cease usage of cookie attached to an object
+ * @cookie: The cookie representing the cache object
+ * @aux_data: Updated auxiliary data (or NULL)
+ * @object_size: Revised size of the object (or NULL)
*
- * See Documentation/filesystems/caching/netfs-api.txt for a complete
- * description.
+ * Cease usage of the cookie attached to an object. When the users count
+ * reaches zero then the cookie relinquishment will be permitted to proceed.
*/
-static inline
-struct fscache_cookie *fscache_acquire_cookie(
- struct fscache_cookie *parent,
- const struct fscache_cookie_def *def,
- void *netfs_data,
- bool enable)
+static inline void fscache_unuse_cookie(struct fscache_cookie *cookie,
+ const void *aux_data,
+ const loff_t *object_size)
{
- if (fscache_cookie_valid(parent) && fscache_cookie_enabled(parent))
- return __fscache_acquire_cookie(parent, def, netfs_data,
- enable);
- else
- return NULL;
+ if (fscache_cookie_valid(cookie))
+ __fscache_unuse_cookie(cookie, aux_data, object_size);
}
/**
@@ -351,7 +301,7 @@ struct fscache_cookie *fscache_acquire_cookie(
* This function returns a cookie to the cache, forcibly discarding the
* associated cache object if retire is set to true.
*
- * See Documentation/filesystems/caching/netfs-api.txt for a complete
+ * See Documentation/filesystems/caching/netfs-api.rst for a complete
* description.
*/
static inline
@@ -361,463 +311,343 @@ void fscache_relinquish_cookie(struct fscache_cookie *cookie, bool retire)
__fscache_relinquish_cookie(cookie, retire);
}
-/**
- * fscache_check_consistency - Request that if the cache is updated
- * @cookie: The cookie representing the cache object
- *
- * Request an consistency check from fscache, which passes the request
- * to the backing cache.
- *
- * Returns 0 if consistent and -ESTALE if inconsistent. May also
- * return -ENOMEM and -ERESTARTSYS.
+/*
+ * Find the auxiliary data on a cookie.
*/
-static inline
-int fscache_check_consistency(struct fscache_cookie *cookie)
+static inline void *fscache_get_aux(struct fscache_cookie *cookie)
{
- if (fscache_cookie_valid(cookie) && fscache_cookie_enabled(cookie))
- return __fscache_check_consistency(cookie);
+ if (cookie->aux_len <= sizeof(cookie->inline_aux))
+ return cookie->inline_aux;
else
- return 0;
+ return cookie->aux;
}
-/**
- * fscache_update_cookie - Request that a cache object be updated
- * @cookie: The cookie representing the cache object
- *
- * Request an update of the index data for the cache object associated with the
- * cookie.
- *
- * See Documentation/filesystems/caching/netfs-api.txt for a complete
- * description.
+/*
+ * Update the auxiliary data on a cookie.
*/
static inline
-void fscache_update_cookie(struct fscache_cookie *cookie)
+void fscache_update_aux(struct fscache_cookie *cookie,
+ const void *aux_data, const loff_t *object_size)
{
- if (fscache_cookie_valid(cookie) && fscache_cookie_enabled(cookie))
- __fscache_update_cookie(cookie);
+ void *p = fscache_get_aux(cookie);
+
+ if (aux_data && p)
+ memcpy(p, aux_data, cookie->aux_len);
+ if (object_size)
+ cookie->object_size = *object_size;
}
-/**
- * fscache_pin_cookie - Pin a data-storage cache object in its cache
- * @cookie: The cookie representing the cache object
- *
- * Permit data-storage cache objects to be pinned in the cache.
- *
- * See Documentation/filesystems/caching/netfs-api.txt for a complete
- * description.
- */
+#ifdef CONFIG_FSCACHE_STATS
+extern atomic_t fscache_n_updates;
+#endif
+
static inline
-int fscache_pin_cookie(struct fscache_cookie *cookie)
+void __fscache_update_cookie(struct fscache_cookie *cookie, const void *aux_data,
+ const loff_t *object_size)
{
- return -ENOBUFS;
+#ifdef CONFIG_FSCACHE_STATS
+ atomic_inc(&fscache_n_updates);
+#endif
+ fscache_update_aux(cookie, aux_data, object_size);
+ smp_wmb();
+ set_bit(FSCACHE_COOKIE_NEEDS_UPDATE, &cookie->flags);
}
/**
- * fscache_pin_cookie - Unpin a data-storage cache object in its cache
+ * fscache_update_cookie - Request that a cache object be updated
* @cookie: The cookie representing the cache object
+ * @aux_data: The updated auxiliary data for the cookie (may be NULL)
+ * @object_size: The current size of the object (may be NULL)
*
- * Permit data-storage cache objects to be unpinned from the cache.
+ * Request an update of the index data for the cache object associated with the
+ * cookie. The auxiliary data on the cookie will be updated first if @aux_data
+ * is set and the object size will be updated and the object possibly trimmed
+ * if @object_size is set.
*
- * See Documentation/filesystems/caching/netfs-api.txt for a complete
+ * See Documentation/filesystems/caching/netfs-api.rst for a complete
* description.
*/
static inline
-void fscache_unpin_cookie(struct fscache_cookie *cookie)
+void fscache_update_cookie(struct fscache_cookie *cookie, const void *aux_data,
+ const loff_t *object_size)
{
+ if (fscache_cookie_enabled(cookie))
+ __fscache_update_cookie(cookie, aux_data, object_size);
}
/**
- * fscache_attr_changed - Notify cache that an object's attributes changed
+ * fscache_resize_cookie - Request that a cache object be resized
* @cookie: The cookie representing the cache object
+ * @new_size: The new size of the object (may be NULL)
*
- * Send a notification to the cache indicating that an object's attributes have
- * changed. This includes the data size. These attributes will be obtained
- * through the get_attr() cookie definition op.
+ * Request that the size of an object be changed.
*
- * See Documentation/filesystems/caching/netfs-api.txt for a complete
+ * See Documentation/filesystems/caching/netfs-api.rst for a complete
* description.
*/
static inline
-int fscache_attr_changed(struct fscache_cookie *cookie)
+void fscache_resize_cookie(struct fscache_cookie *cookie, loff_t new_size)
{
- if (fscache_cookie_valid(cookie) && fscache_cookie_enabled(cookie))
- return __fscache_attr_changed(cookie);
- else
- return -ENOBUFS;
+ if (fscache_cookie_enabled(cookie))
+ __fscache_resize_cookie(cookie, new_size);
}
/**
* fscache_invalidate - Notify cache that an object needs invalidation
* @cookie: The cookie representing the cache object
+ * @aux_data: The updated auxiliary data for the cookie (may be NULL)
+ * @size: The revised size of the object.
+ * @flags: Invalidation flags (FSCACHE_INVAL_*)
*
* Notify the cache that an object is needs to be invalidated and that it
- * should abort any retrievals or stores it is doing on the cache. The object
- * is then marked non-caching until such time as the invalidation is complete.
+ * should abort any retrievals or stores it is doing on the cache. This
+ * increments inval_counter on the cookie which can be used by the caller to
+ * reconsider I/O requests as they complete.
*
- * This can be called with spinlocks held.
+ * If @flags has FSCACHE_INVAL_DIO_WRITE set, this indicates that this is due
+ * to a direct I/O write and will cause caching to be disabled on this cookie
+ * until it is completely unused.
*
- * See Documentation/filesystems/caching/netfs-api.txt for a complete
+ * See Documentation/filesystems/caching/netfs-api.rst for a complete
* description.
*/
static inline
-void fscache_invalidate(struct fscache_cookie *cookie)
+void fscache_invalidate(struct fscache_cookie *cookie,
+ const void *aux_data, loff_t size, unsigned int flags)
{
- if (fscache_cookie_valid(cookie) && fscache_cookie_enabled(cookie))
- __fscache_invalidate(cookie);
+ if (fscache_cookie_enabled(cookie))
+ __fscache_invalidate(cookie, aux_data, size, flags);
}
/**
- * fscache_wait_on_invalidate - Wait for invalidation to complete
- * @cookie: The cookie representing the cache object
+ * fscache_operation_valid - Return true if operations resources are usable
+ * @cres: The resources to check.
*
- * Wait for the invalidation of an object to complete.
- *
- * See Documentation/filesystems/caching/netfs-api.txt for a complete
- * description.
+ * Returns a pointer to the operations table if usable or NULL if not.
*/
static inline
-void fscache_wait_on_invalidate(struct fscache_cookie *cookie)
+const struct netfs_cache_ops *fscache_operation_valid(const struct netfs_cache_resources *cres)
{
- if (fscache_cookie_valid(cookie))
- __fscache_wait_on_invalidate(cookie);
+ return fscache_resources_valid(cres) ? cres->ops : NULL;
}
/**
- * fscache_reserve_space - Reserve data space for a cached object
+ * fscache_begin_read_operation - Begin a read operation for the netfs lib
+ * @cres: The cache resources for the read being performed
* @cookie: The cookie representing the cache object
- * @i_size: The amount of space to be reserved
*
- * Reserve an amount of space in the cache for the cache object attached to a
- * cookie so that a write to that object within the space can always be
- * honoured.
+ * Begin a read operation on behalf of the netfs helper library. @cres
+ * indicates the cache resources to which the operation state should be
+ * attached; @cookie indicates the cache object that will be accessed.
*
- * See Documentation/filesystems/caching/netfs-api.txt for a complete
- * description.
+ * @cres->inval_counter is set from @cookie->inval_counter for comparison at
+ * the end of the operation. This allows invalidation during the operation to
+ * be detected by the caller.
+ *
+ * Returns:
+ * * 0 - Success
+ * * -ENOBUFS - No caching available
+ * * Other error code from the cache, such as -ENOMEM.
*/
static inline
-int fscache_reserve_space(struct fscache_cookie *cookie, loff_t size)
+int fscache_begin_read_operation(struct netfs_cache_resources *cres,
+ struct fscache_cookie *cookie)
{
+ if (fscache_cookie_enabled(cookie))
+ return __fscache_begin_read_operation(cres, cookie);
return -ENOBUFS;
}
/**
- * fscache_read_or_alloc_page - Read a page from the cache or allocate a block
- * in which to store it
- * @cookie: The cookie representing the cache object
- * @page: The netfs page to fill if possible
- * @end_io_func: The callback to invoke when and if the page is filled
- * @context: An arbitrary piece of data to pass on to end_io_func()
- * @gfp: The conditions under which memory allocation should be made
- *
- * Read a page from the cache, or if that's not possible make a potential
- * one-block reservation in the cache into which the page may be stored once
- * fetched from the server.
+ * fscache_end_operation - End the read operation for the netfs lib
+ * @cres: The cache resources for the read operation
*
- * If the page is not backed by the cache object, or if it there's some reason
- * it can't be, -ENOBUFS will be returned and nothing more will be done for
- * that page.
- *
- * Else, if that page is backed by the cache, a read will be initiated directly
- * to the netfs's page and 0 will be returned by this function. The
- * end_io_func() callback will be invoked when the operation terminates on a
- * completion or failure. Note that the callback may be invoked before the
- * return.
- *
- * Else, if the page is unbacked, -ENODATA is returned and a block may have
- * been allocated in the cache.
- *
- * See Documentation/filesystems/caching/netfs-api.txt for a complete
- * description.
+ * Clean up the resources at the end of the read request.
*/
-static inline
-int fscache_read_or_alloc_page(struct fscache_cookie *cookie,
- struct page *page,
- fscache_rw_complete_t end_io_func,
- void *context,
- gfp_t gfp)
+static inline void fscache_end_operation(struct netfs_cache_resources *cres)
{
- if (fscache_cookie_valid(cookie) && fscache_cookie_enabled(cookie))
- return __fscache_read_or_alloc_page(cookie, page, end_io_func,
- context, gfp);
- else
- return -ENOBUFS;
-}
+ const struct netfs_cache_ops *ops = fscache_operation_valid(cres);
-/**
- * fscache_read_or_alloc_pages - Read pages from the cache and/or allocate
- * blocks in which to store them
- * @cookie: The cookie representing the cache object
- * @mapping: The netfs inode mapping to which the pages will be attached
- * @pages: A list of potential netfs pages to be filled
- * @nr_pages: Number of pages to be read and/or allocated
- * @end_io_func: The callback to invoke when and if each page is filled
- * @context: An arbitrary piece of data to pass on to end_io_func()
- * @gfp: The conditions under which memory allocation should be made
- *
- * Read a set of pages from the cache, or if that's not possible, attempt to
- * make a potential one-block reservation for each page in the cache into which
- * that page may be stored once fetched from the server.
- *
- * If some pages are not backed by the cache object, or if it there's some
- * reason they can't be, -ENOBUFS will be returned and nothing more will be
- * done for that pages.
- *
- * Else, if some of the pages are backed by the cache, a read will be initiated
- * directly to the netfs's page and 0 will be returned by this function. The
- * end_io_func() callback will be invoked when the operation terminates on a
- * completion or failure. Note that the callback may be invoked before the
- * return.
- *
- * Else, if a page is unbacked, -ENODATA is returned and a block may have
- * been allocated in the cache.
- *
- * Because the function may want to return all of -ENOBUFS, -ENODATA and 0 in
- * regard to different pages, the return values are prioritised in that order.
- * Any pages submitted for reading are removed from the pages list.
- *
- * See Documentation/filesystems/caching/netfs-api.txt for a complete
- * description.
- */
-static inline
-int fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
- struct address_space *mapping,
- struct list_head *pages,
- unsigned *nr_pages,
- fscache_rw_complete_t end_io_func,
- void *context,
- gfp_t gfp)
-{
- if (fscache_cookie_valid(cookie) && fscache_cookie_enabled(cookie))
- return __fscache_read_or_alloc_pages(cookie, mapping, pages,
- nr_pages, end_io_func,
- context, gfp);
- else
- return -ENOBUFS;
+ if (ops)
+ ops->end_operation(cres);
}
/**
- * fscache_alloc_page - Allocate a block in which to store a page
- * @cookie: The cookie representing the cache object
- * @page: The netfs page to allocate a page for
- * @gfp: The conditions under which memory allocation should be made
+ * fscache_read - Start a read from the cache.
+ * @cres: The cache resources to use
+ * @start_pos: The beginning file offset in the cache file
+ * @iter: The buffer to fill - and also the length
+ * @read_hole: How to handle a hole in the data.
+ * @term_func: The function to call upon completion
+ * @term_func_priv: The private data for @term_func
*
- * Request Allocation a block in the cache in which to store a netfs page
- * without retrieving any contents from the cache.
+ * Start a read from the cache. @cres indicates the cache object to read from
+ * and must be obtained by a call to fscache_begin_operation() beforehand.
*
- * If the page is not backed by a file then -ENOBUFS will be returned and
- * nothing more will be done, and no reservation will be made.
+ * The data is read into the iterator, @iter, and that also indicates the size
+ * of the operation. @start_pos is the start position in the file, though if
+ * @seek_data is set appropriately, the cache can use SEEK_DATA to find the
+ * next piece of data, writing zeros for the hole into the iterator.
*
- * Else, a block will be allocated if one wasn't already, and 0 will be
- * returned
+ * Upon termination of the operation, @term_func will be called and supplied
+ * with @term_func_priv plus the amount of data written, if successful, or the
+ * error code otherwise.
*
- * See Documentation/filesystems/caching/netfs-api.txt for a complete
- * description.
- */
-static inline
-int fscache_alloc_page(struct fscache_cookie *cookie,
- struct page *page,
- gfp_t gfp)
-{
- if (fscache_cookie_valid(cookie) && fscache_cookie_enabled(cookie))
- return __fscache_alloc_page(cookie, page, gfp);
- else
- return -ENOBUFS;
-}
-
-/**
- * fscache_readpages_cancel - Cancel read/alloc on pages
- * @cookie: The cookie representing the inode's cache object.
- * @pages: The netfs pages that we canceled write on in readpages()
+ * @read_hole indicates how a partially populated region in the cache should be
+ * handled. It can be one of a number of settings:
*
- * Uncache/unreserve the pages reserved earlier in readpages() via
- * fscache_readpages_or_alloc() and similar. In most successful caches in
- * readpages() this doesn't do anything. In cases when the underlying netfs's
- * readahead failed we need to clean up the pagelist (unmark and uncache).
+ * NETFS_READ_HOLE_IGNORE - Just try to read (may return a short read).
*
- * This function may sleep as it may have to clean up disk state.
+ * NETFS_READ_HOLE_FAIL - Give ENODATA if we encounter a hole.
*/
static inline
-void fscache_readpages_cancel(struct fscache_cookie *cookie,
- struct list_head *pages)
+int fscache_read(struct netfs_cache_resources *cres,
+ loff_t start_pos,
+ struct iov_iter *iter,
+ enum netfs_read_from_hole read_hole,
+ netfs_io_terminated_t term_func,
+ void *term_func_priv)
{
- if (fscache_cookie_valid(cookie))
- __fscache_readpages_cancel(cookie, pages);
+ const struct netfs_cache_ops *ops = fscache_operation_valid(cres);
+ return ops->read(cres, start_pos, iter, read_hole,
+ term_func, term_func_priv);
}
/**
- * fscache_write_page - Request storage of a page in the cache
+ * fscache_begin_write_operation - Begin a write operation for the netfs lib
+ * @cres: The cache resources for the write being performed
* @cookie: The cookie representing the cache object
- * @page: The netfs page to store
- * @gfp: The conditions under which memory allocation should be made
*
- * Request the contents of the netfs page be written into the cache. This
- * request may be ignored if no cache block is currently allocated, in which
- * case it will return -ENOBUFS.
+ * Begin a write operation on behalf of the netfs helper library. @cres
+ * indicates the cache resources to which the operation state should be
+ * attached; @cookie indicates the cache object that will be accessed.
*
- * If a cache block was already allocated, a write will be initiated and 0 will
- * be returned. The PG_fscache_write page bit is set immediately and will then
- * be cleared at the completion of the write to indicate the success or failure
- * of the operation. Note that the completion may happen before the return.
+ * @cres->inval_counter is set from @cookie->inval_counter for comparison at
+ * the end of the operation. This allows invalidation during the operation to
+ * be detected by the caller.
*
- * See Documentation/filesystems/caching/netfs-api.txt for a complete
- * description.
+ * Returns:
+ * * 0 - Success
+ * * -ENOBUFS - No caching available
+ * * Other error code from the cache, such as -ENOMEM.
*/
static inline
-int fscache_write_page(struct fscache_cookie *cookie,
- struct page *page,
- gfp_t gfp)
+int fscache_begin_write_operation(struct netfs_cache_resources *cres,
+ struct fscache_cookie *cookie)
{
- if (fscache_cookie_valid(cookie) && fscache_cookie_enabled(cookie))
- return __fscache_write_page(cookie, page, gfp);
- else
- return -ENOBUFS;
+ if (fscache_cookie_enabled(cookie))
+ return __fscache_begin_write_operation(cres, cookie);
+ return -ENOBUFS;
}
/**
- * fscache_uncache_page - Indicate that caching is no longer required on a page
- * @cookie: The cookie representing the cache object
- * @page: The netfs page that was being cached.
- *
- * Tell the cache that we no longer want a page to be cached and that it should
- * remove any knowledge of the netfs page it may have.
- *
- * Note that this cannot cancel any outstanding I/O operations between this
- * page and the cache.
+ * fscache_write - Start a write to the cache.
+ * @cres: The cache resources to use
+ * @start_pos: The beginning file offset in the cache file
+ * @iter: The data to write - and also the length
+ * @term_func: The function to call upon completion
+ * @term_func_priv: The private data for @term_func
*
- * See Documentation/filesystems/caching/netfs-api.txt for a complete
- * description.
- */
-static inline
-void fscache_uncache_page(struct fscache_cookie *cookie,
- struct page *page)
-{
- if (fscache_cookie_valid(cookie))
- __fscache_uncache_page(cookie, page);
-}
-
-/**
- * fscache_check_page_write - Ask if a page is being writing to the cache
- * @cookie: The cookie representing the cache object
- * @page: The netfs page that is being cached.
+ * Start a write to the cache. @cres indicates the cache object to write to and
+ * must be obtained by a call to fscache_begin_operation() beforehand.
*
- * Ask the cache if a page is being written to the cache.
+ * The data to be written is obtained from the iterator, @iter, and that also
+ * indicates the size of the operation. @start_pos is the start position in
+ * the file.
*
- * See Documentation/filesystems/caching/netfs-api.txt for a complete
- * description.
+ * Upon termination of the operation, @term_func will be called and supplied
+ * with @term_func_priv plus the amount of data written, if successful, or the
+ * error code otherwise.
*/
static inline
-bool fscache_check_page_write(struct fscache_cookie *cookie,
- struct page *page)
+int fscache_write(struct netfs_cache_resources *cres,
+ loff_t start_pos,
+ struct iov_iter *iter,
+ netfs_io_terminated_t term_func,
+ void *term_func_priv)
{
- if (fscache_cookie_valid(cookie))
- return __fscache_check_page_write(cookie, page);
- return false;
+ const struct netfs_cache_ops *ops = fscache_operation_valid(cres);
+ return ops->write(cres, start_pos, iter, term_func, term_func_priv);
}
/**
- * fscache_wait_on_page_write - Wait for a page to complete writing to the cache
- * @cookie: The cookie representing the cache object
- * @page: The netfs page that is being cached.
- *
- * Ask the cache to wake us up when a page is no longer being written to the
- * cache.
- *
- * See Documentation/filesystems/caching/netfs-api.txt for a complete
- * description.
- */
-static inline
-void fscache_wait_on_page_write(struct fscache_cookie *cookie,
- struct page *page)
+ * fscache_clear_page_bits - Clear the PG_fscache bits from a set of pages
+ * @mapping: The netfs inode to use as the source
+ * @start: The start position in @mapping
+ * @len: The amount of data to unlock
+ * @caching: If PG_fscache has been set
+ *
+ * Clear the PG_fscache flag from a sequence of pages and wake up anyone who's
+ * waiting.
+ */
+static inline void fscache_clear_page_bits(struct address_space *mapping,
+ loff_t start, size_t len,
+ bool caching)
{
- if (fscache_cookie_valid(cookie))
- __fscache_wait_on_page_write(cookie, page);
+ if (caching)
+ __fscache_clear_page_bits(mapping, start, len);
}
/**
- * fscache_maybe_release_page - Consider releasing a page, cancelling a store
+ * fscache_write_to_cache - Save a write to the cache and clear PG_fscache
* @cookie: The cookie representing the cache object
- * @page: The netfs page that is being cached.
- * @gfp: The gfp flags passed to releasepage()
- *
- * Consider releasing a page for the vmscan algorithm, on behalf of the netfs's
- * releasepage() call. A storage request on the page may cancelled if it is
- * not currently being processed.
- *
- * The function returns true if the page no longer has a storage request on it,
- * and false if a storage request is left in place. If true is returned, the
- * page will have been passed to fscache_uncache_page(). If false is returned
- * the page cannot be freed yet.
- */
-static inline
-bool fscache_maybe_release_page(struct fscache_cookie *cookie,
- struct page *page,
- gfp_t gfp)
+ * @mapping: The netfs inode to use as the source
+ * @start: The start position in @mapping
+ * @len: The amount of data to write back
+ * @i_size: The new size of the inode
+ * @term_func: The function to call upon completion
+ * @term_func_priv: The private data for @term_func
+ * @using_pgpriv2: If we're using PG_private_2 to mark in-progress write
+ * @caching: If we actually want to do the caching
+ *
+ * Helper function for a netfs to write dirty data from an inode into the cache
+ * object that's backing it.
+ *
+ * @start and @len describe the range of the data. This does not need to be
+ * page-aligned, but to satisfy DIO requirements, the cache may expand it up to
+ * the page boundaries on either end. All the pages covering the range must be
+ * marked with PG_fscache.
+ *
+ * If given, @term_func will be called upon completion and supplied with
+ * @term_func_priv. Note that if @using_pgpriv2 is set, the PG_private_2 flags
+ * will have been cleared by this point, so the netfs must retain its own pin
+ * on the mapping.
+ */
+static inline void fscache_write_to_cache(struct fscache_cookie *cookie,
+ struct address_space *mapping,
+ loff_t start, size_t len, loff_t i_size,
+ netfs_io_terminated_t term_func,
+ void *term_func_priv,
+ bool using_pgpriv2, bool caching)
{
- if (fscache_cookie_valid(cookie) && PageFsCache(page))
- return __fscache_maybe_release_page(cookie, page, gfp);
- return false;
-}
+ if (caching)
+ __fscache_write_to_cache(cookie, mapping, start, len, i_size,
+ term_func, term_func_priv,
+ using_pgpriv2, caching);
+ else if (term_func)
+ term_func(term_func_priv, -ENOBUFS);
-/**
- * fscache_uncache_all_inode_pages - Uncache all an inode's pages
- * @cookie: The cookie representing the inode's cache object.
- * @inode: The inode to uncache pages from.
- *
- * Uncache all the pages in an inode that are marked PG_fscache, assuming them
- * to be associated with the given cookie.
- *
- * This function may sleep. It will wait for pages that are being written out
- * and will wait whilst the PG_fscache mark is removed by the cache.
- */
-static inline
-void fscache_uncache_all_inode_pages(struct fscache_cookie *cookie,
- struct inode *inode)
-{
- if (fscache_cookie_valid(cookie))
- __fscache_uncache_all_inode_pages(cookie, inode);
}
/**
- * fscache_disable_cookie - Disable a cookie
- * @cookie: The cookie representing the cache object
- * @invalidate: Invalidate the backing object
- *
- * Disable a cookie from accepting further alloc, read, write, invalidate,
- * update or acquire operations. Outstanding operations can still be waited
- * upon and pages can still be uncached and the cookie relinquished.
+ * fscache_note_page_release - Note that a netfs page got released
+ * @cookie: The cookie corresponding to the file
*
- * This will not return until all outstanding operations have completed.
- *
- * If @invalidate is set, then the backing object will be invalidated and
- * detached, otherwise it will just be detached.
+ * Note that a page that has been copied to the cache has been released. This
+ * means that future reads will need to look in the cache to see if it's there.
*/
static inline
-void fscache_disable_cookie(struct fscache_cookie *cookie, bool invalidate)
+void fscache_note_page_release(struct fscache_cookie *cookie)
{
- if (fscache_cookie_valid(cookie) && fscache_cookie_enabled(cookie))
- __fscache_disable_cookie(cookie, invalidate);
-}
-
-/**
- * fscache_enable_cookie - Reenable a cookie
- * @cookie: The cookie representing the cache object
- * @can_enable: A function to permit enablement once lock is held
- * @data: Data for can_enable()
- *
- * Reenable a previously disabled cookie, allowing it to accept further alloc,
- * read, write, invalidate, update or acquire operations. An attempt will be
- * made to immediately reattach the cookie to a backing object.
- *
- * The can_enable() function is called (if not NULL) once the enablement lock
- * is held to rule on whether enablement is still permitted to go ahead.
- */
-static inline
-void fscache_enable_cookie(struct fscache_cookie *cookie,
- bool (*can_enable)(void *data),
- void *data)
-{
- if (fscache_cookie_valid(cookie) && !fscache_cookie_enabled(cookie))
- __fscache_enable_cookie(cookie, can_enable, data);
+ /* If we've written data to the cache (HAVE_DATA) and there wasn't any
+ * data in the cache when we started (NO_DATA_TO_READ), it may no
+ * longer be true that we can skip reading from the cache - so clear
+ * the flag that causes reads to be skipped.
+ */
+ if (cookie &&
+ test_bit(FSCACHE_COOKIE_HAVE_DATA, &cookie->flags) &&
+ test_bit(FSCACHE_COOKIE_NO_DATA_TO_READ, &cookie->flags))
+ clear_bit(FSCACHE_COOKIE_NO_DATA_TO_READ, &cookie->flags);
}
#endif /* _LINUX_FSCACHE_H */
diff --git a/include/linux/fscrypt.h b/include/linux/fscrypt.h
new file mode 100644
index 000000000000..516aba5b858b
--- /dev/null
+++ b/include/linux/fscrypt.h
@@ -0,0 +1,1154 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * fscrypt.h: declarations for per-file encryption
+ *
+ * Filesystems that implement per-file encryption must include this header
+ * file.
+ *
+ * Copyright (C) 2015, Google, Inc.
+ *
+ * Written by Michael Halcrow, 2015.
+ * Modified by Jaegeuk Kim, 2015.
+ */
+#ifndef _LINUX_FSCRYPT_H
+#define _LINUX_FSCRYPT_H
+
+#include <linux/fs.h>
+#include <linux/mm.h>
+#include <linux/slab.h>
+#include <uapi/linux/fscrypt.h>
+
+/*
+ * The lengths of all file contents blocks must be divisible by this value.
+ * This is needed to ensure that all contents encryption modes will work, as
+ * some of the supported modes don't support arbitrarily byte-aligned messages.
+ *
+ * Since the needed alignment is 16 bytes, most filesystems will meet this
+ * requirement naturally, as typical block sizes are powers of 2. However, if a
+ * filesystem can generate arbitrarily byte-aligned block lengths (e.g., via
+ * compression), then it will need to pad to this alignment before encryption.
+ */
+#define FSCRYPT_CONTENTS_ALIGNMENT 16
+
+union fscrypt_policy;
+struct fscrypt_inode_info;
+struct fs_parameter;
+struct seq_file;
+
+struct fscrypt_str {
+ unsigned char *name;
+ u32 len;
+};
+
+struct fscrypt_name {
+ const struct qstr *usr_fname;
+ struct fscrypt_str disk_name;
+ u32 hash;
+ u32 minor_hash;
+ struct fscrypt_str crypto_buf;
+ bool is_nokey_name;
+};
+
+#define FSTR_INIT(n, l) { .name = n, .len = l }
+#define FSTR_TO_QSTR(f) QSTR_INIT((f)->name, (f)->len)
+#define fname_name(p) ((p)->disk_name.name)
+#define fname_len(p) ((p)->disk_name.len)
+
+/* Maximum value for the third parameter of fscrypt_operations.set_context(). */
+#define FSCRYPT_SET_CONTEXT_MAX_SIZE 40
+
+#ifdef CONFIG_FS_ENCRYPTION
+
+/* Crypto operations for filesystems */
+struct fscrypt_operations {
+ /*
+ * The offset of the pointer to struct fscrypt_inode_info in the
+ * filesystem-specific part of the inode, relative to the beginning of
+ * the common part of the inode (the 'struct inode').
+ */
+ ptrdiff_t inode_info_offs;
+
+ /*
+ * If set, then fs/crypto/ will allocate a global bounce page pool the
+ * first time an encryption key is set up for a file. The bounce page
+ * pool is required by the following functions:
+ *
+ * - fscrypt_encrypt_pagecache_blocks()
+ * - fscrypt_zeroout_range() for files not using inline crypto
+ *
+ * If the filesystem doesn't use those, it doesn't need to set this.
+ */
+ unsigned int needs_bounce_pages : 1;
+
+ /*
+ * If set, then fs/crypto/ will allow the use of encryption settings
+ * that assume inode numbers fit in 32 bits (i.e.
+ * FSCRYPT_POLICY_FLAG_IV_INO_LBLK_{32,64}), provided that the other
+ * prerequisites for these settings are also met. This is only useful
+ * if the filesystem wants to support inline encryption hardware that is
+ * limited to 32-bit or 64-bit data unit numbers and where programming
+ * keyslots is very slow.
+ */
+ unsigned int has_32bit_inodes : 1;
+
+ /*
+ * If set, then fs/crypto/ will allow users to select a crypto data unit
+ * size that is less than the filesystem block size. This is done via
+ * the log2_data_unit_size field of the fscrypt policy. This flag is
+ * not compatible with filesystems that encrypt variable-length blocks
+ * (i.e. blocks that aren't all equal to filesystem's block size), for
+ * example as a result of compression. It's also not compatible with
+ * the fscrypt_encrypt_block_inplace() and
+ * fscrypt_decrypt_block_inplace() functions.
+ */
+ unsigned int supports_subblock_data_units : 1;
+
+ /*
+ * This field exists only for backwards compatibility reasons and should
+ * only be set by the filesystems that are setting it already. It
+ * contains the filesystem-specific key description prefix that is
+ * accepted for "logon" keys for v1 fscrypt policies. This
+ * functionality is deprecated in favor of the generic prefix
+ * "fscrypt:", which itself is deprecated in favor of the filesystem
+ * keyring ioctls such as FS_IOC_ADD_ENCRYPTION_KEY. Filesystems that
+ * are newly adding fscrypt support should not set this field.
+ */
+ const char *legacy_key_prefix;
+
+ /*
+ * Get the fscrypt context of the given inode.
+ *
+ * @inode: the inode whose context to get
+ * @ctx: the buffer into which to get the context
+ * @len: length of the @ctx buffer in bytes
+ *
+ * Return: On success, returns the length of the context in bytes; this
+ * may be less than @len. On failure, returns -ENODATA if the
+ * inode doesn't have a context, -ERANGE if the context is
+ * longer than @len, or another -errno code.
+ */
+ int (*get_context)(struct inode *inode, void *ctx, size_t len);
+
+ /*
+ * Set an fscrypt context on the given inode.
+ *
+ * @inode: the inode whose context to set. The inode won't already have
+ * an fscrypt context.
+ * @ctx: the context to set
+ * @len: length of @ctx in bytes (at most FSCRYPT_SET_CONTEXT_MAX_SIZE)
+ * @fs_data: If called from fscrypt_set_context(), this will be the
+ * value the filesystem passed to fscrypt_set_context().
+ * Otherwise (i.e. when called from
+ * FS_IOC_SET_ENCRYPTION_POLICY) this will be NULL.
+ *
+ * i_rwsem will be held for write.
+ *
+ * Return: 0 on success, -errno on failure.
+ */
+ int (*set_context)(struct inode *inode, const void *ctx, size_t len,
+ void *fs_data);
+
+ /*
+ * Get the dummy fscrypt policy in use on the filesystem (if any).
+ *
+ * Filesystems only need to implement this function if they support the
+ * test_dummy_encryption mount option.
+ *
+ * Return: A pointer to the dummy fscrypt policy, if the filesystem is
+ * mounted with test_dummy_encryption; otherwise NULL.
+ */
+ const union fscrypt_policy *(*get_dummy_policy)(struct super_block *sb);
+
+ /*
+ * Check whether a directory is empty. i_rwsem will be held for write.
+ */
+ bool (*empty_dir)(struct inode *inode);
+
+ /*
+ * Check whether the filesystem's inode numbers and UUID are stable,
+ * meaning that they will never be changed even by offline operations
+ * such as filesystem shrinking and therefore can be used in the
+ * encryption without the possibility of files becoming unreadable.
+ *
+ * Filesystems only need to implement this function if they want to
+ * support the FSCRYPT_POLICY_FLAG_IV_INO_LBLK_{32,64} flags. These
+ * flags are designed to work around the limitations of UFS and eMMC
+ * inline crypto hardware, and they shouldn't be used in scenarios where
+ * such hardware isn't being used.
+ *
+ * Leaving this NULL is equivalent to always returning false.
+ */
+ bool (*has_stable_inodes)(struct super_block *sb);
+
+ /*
+ * Return an array of pointers to the block devices to which the
+ * filesystem may write encrypted file contents, NULL if the filesystem
+ * only has a single such block device, or an ERR_PTR() on error.
+ *
+ * On successful non-NULL return, *num_devs is set to the number of
+ * devices in the returned array. The caller must free the returned
+ * array using kfree().
+ *
+ * If the filesystem can use multiple block devices (other than block
+ * devices that aren't used for encrypted file contents, such as
+ * external journal devices), and wants to support inline encryption,
+ * then it must implement this function. Otherwise it's not needed.
+ */
+ struct block_device **(*get_devices)(struct super_block *sb,
+ unsigned int *num_devs);
+};
+
+int fscrypt_d_revalidate(struct inode *dir, const struct qstr *name,
+ struct dentry *dentry, unsigned int flags);
+
+/*
+ * Returns the address of the fscrypt info pointer within the
+ * filesystem-specific part of the inode. (To save memory on filesystems that
+ * don't support fscrypt, a field in 'struct inode' itself is no longer used.)
+ */
+static inline struct fscrypt_inode_info **
+fscrypt_inode_info_addr(const struct inode *inode)
+{
+ VFS_WARN_ON_ONCE(inode->i_sb->s_cop->inode_info_offs == 0);
+ return (void *)inode + inode->i_sb->s_cop->inode_info_offs;
+}
+
+/*
+ * Load the inode's fscrypt info pointer, using a raw dereference. Since this
+ * uses a raw dereference with no memory barrier, it is appropriate to use only
+ * when the caller knows the inode's key setup already happened, resulting in
+ * non-NULL fscrypt info. E.g., the file contents en/decryption functions use
+ * this, since fscrypt_file_open() set up the key.
+ */
+static inline struct fscrypt_inode_info *
+fscrypt_get_inode_info_raw(const struct inode *inode)
+{
+ struct fscrypt_inode_info *ci = *fscrypt_inode_info_addr(inode);
+
+ VFS_WARN_ON_ONCE(ci == NULL);
+ return ci;
+}
+
+static inline struct fscrypt_inode_info *
+fscrypt_get_inode_info(const struct inode *inode)
+{
+ /*
+ * Pairs with the cmpxchg_release() in fscrypt_setup_encryption_info().
+ * I.e., another task may publish the fscrypt info concurrently,
+ * executing a RELEASE barrier. Use smp_load_acquire() here to safely
+ * ACQUIRE the memory the other task published.
+ */
+ return smp_load_acquire(fscrypt_inode_info_addr(inode));
+}
+
+/**
+ * fscrypt_needs_contents_encryption() - check whether an inode needs
+ * contents encryption
+ * @inode: the inode to check
+ *
+ * Return: %true iff the inode is an encrypted regular file and the kernel was
+ * built with fscrypt support.
+ *
+ * If you need to know whether the encrypt bit is set even when the kernel was
+ * built without fscrypt support, you must use IS_ENCRYPTED() directly instead.
+ */
+static inline bool fscrypt_needs_contents_encryption(const struct inode *inode)
+{
+ return IS_ENCRYPTED(inode) && S_ISREG(inode->i_mode);
+}
+
+/*
+ * When d_splice_alias() moves a directory's no-key alias to its
+ * plaintext alias as a result of the encryption key being added,
+ * DCACHE_NOKEY_NAME must be cleared and there might be an opportunity
+ * to disable d_revalidate. Note that we don't have to support the
+ * inverse operation because fscrypt doesn't allow no-key names to be
+ * the source or target of a rename().
+ */
+static inline void fscrypt_handle_d_move(struct dentry *dentry)
+{
+ /*
+ * VFS calls fscrypt_handle_d_move even for non-fscrypt
+ * filesystems.
+ */
+ if (dentry->d_flags & DCACHE_NOKEY_NAME) {
+ dentry->d_flags &= ~DCACHE_NOKEY_NAME;
+
+ /*
+ * Other filesystem features might be handling dentry
+ * revalidation, in which case it cannot be disabled.
+ */
+ if (dentry->d_op->d_revalidate == fscrypt_d_revalidate)
+ dentry->d_flags &= ~DCACHE_OP_REVALIDATE;
+ }
+}
+
+/**
+ * fscrypt_is_nokey_name() - test whether a dentry is a no-key name
+ * @dentry: the dentry to check
+ *
+ * This returns true if the dentry is a no-key dentry. A no-key dentry is a
+ * dentry that was created in an encrypted directory that hasn't had its
+ * encryption key added yet. Such dentries may be either positive or negative.
+ *
+ * When a filesystem is asked to create a new filename in an encrypted directory
+ * and the new filename's dentry is a no-key dentry, it must fail the operation
+ * with ENOKEY. This includes ->create(), ->mkdir(), ->mknod(), ->symlink(),
+ * ->rename(), and ->link(). (However, ->rename() and ->link() are already
+ * handled by fscrypt_prepare_rename() and fscrypt_prepare_link().)
+ *
+ * This is necessary because creating a filename requires the directory's
+ * encryption key, but just checking for the key on the directory inode during
+ * the final filesystem operation doesn't guarantee that the key was available
+ * during the preceding dentry lookup. And the key must have already been
+ * available during the dentry lookup in order for it to have been checked
+ * whether the filename already exists in the directory and for the new file's
+ * dentry not to be invalidated due to it incorrectly having the no-key flag.
+ *
+ * Return: %true if the dentry is a no-key name
+ */
+static inline bool fscrypt_is_nokey_name(const struct dentry *dentry)
+{
+ return dentry->d_flags & DCACHE_NOKEY_NAME;
+}
+
+static inline void fscrypt_prepare_dentry(struct dentry *dentry,
+ bool is_nokey_name)
+{
+ /*
+ * This code tries to only take ->d_lock when necessary to write
+ * to ->d_flags. We shouldn't be peeking on d_flags for
+ * DCACHE_OP_REVALIDATE unlocked, but in the unlikely case
+ * there is a race, the worst it can happen is that we fail to
+ * unset DCACHE_OP_REVALIDATE and pay the cost of an extra
+ * d_revalidate.
+ */
+ if (is_nokey_name) {
+ spin_lock(&dentry->d_lock);
+ dentry->d_flags |= DCACHE_NOKEY_NAME;
+ spin_unlock(&dentry->d_lock);
+ } else if (dentry->d_flags & DCACHE_OP_REVALIDATE &&
+ dentry->d_op->d_revalidate == fscrypt_d_revalidate) {
+ /*
+ * Unencrypted dentries and encrypted dentries where the
+ * key is available are always valid from fscrypt
+ * perspective. Avoid the cost of calling
+ * fscrypt_d_revalidate unnecessarily.
+ */
+ spin_lock(&dentry->d_lock);
+ dentry->d_flags &= ~DCACHE_OP_REVALIDATE;
+ spin_unlock(&dentry->d_lock);
+ }
+}
+
+/* crypto.c */
+void fscrypt_enqueue_decrypt_work(struct work_struct *);
+
+struct page *fscrypt_encrypt_pagecache_blocks(struct folio *folio,
+ size_t len, size_t offs, gfp_t gfp_flags);
+int fscrypt_encrypt_block_inplace(const struct inode *inode, struct page *page,
+ unsigned int len, unsigned int offs,
+ u64 lblk_num);
+
+int fscrypt_decrypt_pagecache_blocks(struct folio *folio, size_t len,
+ size_t offs);
+int fscrypt_decrypt_block_inplace(const struct inode *inode, struct page *page,
+ unsigned int len, unsigned int offs,
+ u64 lblk_num);
+
+static inline bool fscrypt_is_bounce_page(struct page *page)
+{
+ return page->mapping == NULL;
+}
+
+static inline struct page *fscrypt_pagecache_page(struct page *bounce_page)
+{
+ return (struct page *)page_private(bounce_page);
+}
+
+static inline bool fscrypt_is_bounce_folio(const struct folio *folio)
+{
+ return folio->mapping == NULL;
+}
+
+static inline
+struct folio *fscrypt_pagecache_folio(const struct folio *bounce_folio)
+{
+ return bounce_folio->private;
+}
+
+void fscrypt_free_bounce_page(struct page *bounce_page);
+
+/* policy.c */
+int fscrypt_ioctl_set_policy(struct file *filp, const void __user *arg);
+int fscrypt_ioctl_get_policy(struct file *filp, void __user *arg);
+int fscrypt_ioctl_get_policy_ex(struct file *filp, void __user *arg);
+int fscrypt_ioctl_get_nonce(struct file *filp, void __user *arg);
+int fscrypt_has_permitted_context(struct inode *parent, struct inode *child);
+int fscrypt_context_for_new_inode(void *ctx, struct inode *inode);
+int fscrypt_set_context(struct inode *inode, void *fs_data);
+
+struct fscrypt_dummy_policy {
+ const union fscrypt_policy *policy;
+};
+
+int fscrypt_parse_test_dummy_encryption(const struct fs_parameter *param,
+ struct fscrypt_dummy_policy *dummy_policy);
+bool fscrypt_dummy_policies_equal(const struct fscrypt_dummy_policy *p1,
+ const struct fscrypt_dummy_policy *p2);
+void fscrypt_show_test_dummy_encryption(struct seq_file *seq, char sep,
+ struct super_block *sb);
+static inline bool
+fscrypt_is_dummy_policy_set(const struct fscrypt_dummy_policy *dummy_policy)
+{
+ return dummy_policy->policy != NULL;
+}
+static inline void
+fscrypt_free_dummy_policy(struct fscrypt_dummy_policy *dummy_policy)
+{
+ kfree(dummy_policy->policy);
+ dummy_policy->policy = NULL;
+}
+
+/* keyring.c */
+void fscrypt_destroy_keyring(struct super_block *sb);
+int fscrypt_ioctl_add_key(struct file *filp, void __user *arg);
+int fscrypt_ioctl_remove_key(struct file *filp, void __user *arg);
+int fscrypt_ioctl_remove_key_all_users(struct file *filp, void __user *arg);
+int fscrypt_ioctl_get_key_status(struct file *filp, void __user *arg);
+
+/* keysetup.c */
+int fscrypt_prepare_new_inode(struct inode *dir, struct inode *inode,
+ bool *encrypt_ret);
+void fscrypt_put_encryption_info(struct inode *inode);
+void fscrypt_free_inode(struct inode *inode);
+int fscrypt_drop_inode(struct inode *inode);
+
+/* fname.c */
+int fscrypt_fname_encrypt(const struct inode *inode, const struct qstr *iname,
+ u8 *out, unsigned int olen);
+bool fscrypt_fname_encrypted_size(const struct inode *inode, u32 orig_len,
+ u32 max_len, u32 *encrypted_len_ret);
+int fscrypt_setup_filename(struct inode *inode, const struct qstr *iname,
+ int lookup, struct fscrypt_name *fname);
+
+static inline void fscrypt_free_filename(struct fscrypt_name *fname)
+{
+ kfree(fname->crypto_buf.name);
+}
+
+int fscrypt_fname_alloc_buffer(u32 max_encrypted_len,
+ struct fscrypt_str *crypto_str);
+void fscrypt_fname_free_buffer(struct fscrypt_str *crypto_str);
+int fscrypt_fname_disk_to_usr(const struct inode *inode,
+ u32 hash, u32 minor_hash,
+ const struct fscrypt_str *iname,
+ struct fscrypt_str *oname);
+bool fscrypt_match_name(const struct fscrypt_name *fname,
+ const u8 *de_name, u32 de_name_len);
+u64 fscrypt_fname_siphash(const struct inode *dir, const struct qstr *name);
+
+/* bio.c */
+bool fscrypt_decrypt_bio(struct bio *bio);
+int fscrypt_zeroout_range(const struct inode *inode, pgoff_t lblk,
+ sector_t pblk, unsigned int len);
+
+/* hooks.c */
+int fscrypt_file_open(struct inode *inode, struct file *filp);
+int __fscrypt_prepare_link(struct inode *inode, struct inode *dir,
+ struct dentry *dentry);
+int __fscrypt_prepare_rename(struct inode *old_dir, struct dentry *old_dentry,
+ struct inode *new_dir, struct dentry *new_dentry,
+ unsigned int flags);
+int __fscrypt_prepare_lookup(struct inode *dir, struct dentry *dentry,
+ struct fscrypt_name *fname);
+int fscrypt_prepare_lookup_partial(struct inode *dir, struct dentry *dentry);
+int __fscrypt_prepare_readdir(struct inode *dir);
+int __fscrypt_prepare_setattr(struct dentry *dentry, struct iattr *attr);
+int fscrypt_prepare_setflags(struct inode *inode,
+ unsigned int oldflags, unsigned int flags);
+int fscrypt_prepare_symlink(struct inode *dir, const char *target,
+ unsigned int len, unsigned int max_len,
+ struct fscrypt_str *disk_link);
+int __fscrypt_encrypt_symlink(struct inode *inode, const char *target,
+ unsigned int len, struct fscrypt_str *disk_link);
+const char *fscrypt_get_symlink(struct inode *inode, const void *caddr,
+ unsigned int max_size,
+ struct delayed_call *done);
+int fscrypt_symlink_getattr(const struct path *path, struct kstat *stat);
+static inline void fscrypt_set_ops(struct super_block *sb,
+ const struct fscrypt_operations *s_cop)
+{
+ sb->s_cop = s_cop;
+}
+#else /* !CONFIG_FS_ENCRYPTION */
+
+static inline struct fscrypt_inode_info *
+fscrypt_get_inode_info(const struct inode *inode)
+{
+ return NULL;
+}
+
+static inline bool fscrypt_needs_contents_encryption(const struct inode *inode)
+{
+ return false;
+}
+
+static inline void fscrypt_handle_d_move(struct dentry *dentry)
+{
+}
+
+static inline bool fscrypt_is_nokey_name(const struct dentry *dentry)
+{
+ return false;
+}
+
+static inline void fscrypt_prepare_dentry(struct dentry *dentry,
+ bool is_nokey_name)
+{
+}
+
+/* crypto.c */
+static inline void fscrypt_enqueue_decrypt_work(struct work_struct *work)
+{
+}
+
+static inline struct page *fscrypt_encrypt_pagecache_blocks(struct folio *folio,
+ size_t len, size_t offs, gfp_t gfp_flags)
+{
+ return ERR_PTR(-EOPNOTSUPP);
+}
+
+static inline int fscrypt_encrypt_block_inplace(const struct inode *inode,
+ struct page *page,
+ unsigned int len,
+ unsigned int offs, u64 lblk_num)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int fscrypt_decrypt_pagecache_blocks(struct folio *folio,
+ size_t len, size_t offs)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int fscrypt_decrypt_block_inplace(const struct inode *inode,
+ struct page *page,
+ unsigned int len,
+ unsigned int offs, u64 lblk_num)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline bool fscrypt_is_bounce_page(struct page *page)
+{
+ return false;
+}
+
+static inline struct page *fscrypt_pagecache_page(struct page *bounce_page)
+{
+ WARN_ON_ONCE(1);
+ return ERR_PTR(-EINVAL);
+}
+
+static inline bool fscrypt_is_bounce_folio(const struct folio *folio)
+{
+ return false;
+}
+
+static inline
+struct folio *fscrypt_pagecache_folio(const struct folio *bounce_folio)
+{
+ WARN_ON_ONCE(1);
+ return ERR_PTR(-EINVAL);
+}
+
+static inline void fscrypt_free_bounce_page(struct page *bounce_page)
+{
+}
+
+/* policy.c */
+static inline int fscrypt_ioctl_set_policy(struct file *filp,
+ const void __user *arg)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int fscrypt_ioctl_get_policy(struct file *filp, void __user *arg)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int fscrypt_ioctl_get_policy_ex(struct file *filp,
+ void __user *arg)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int fscrypt_ioctl_get_nonce(struct file *filp, void __user *arg)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int fscrypt_has_permitted_context(struct inode *parent,
+ struct inode *child)
+{
+ return 0;
+}
+
+static inline int fscrypt_set_context(struct inode *inode, void *fs_data)
+{
+ return -EOPNOTSUPP;
+}
+
+struct fscrypt_dummy_policy {
+};
+
+static inline int
+fscrypt_parse_test_dummy_encryption(const struct fs_parameter *param,
+ struct fscrypt_dummy_policy *dummy_policy)
+{
+ return -EINVAL;
+}
+
+static inline bool
+fscrypt_dummy_policies_equal(const struct fscrypt_dummy_policy *p1,
+ const struct fscrypt_dummy_policy *p2)
+{
+ return true;
+}
+
+static inline void fscrypt_show_test_dummy_encryption(struct seq_file *seq,
+ char sep,
+ struct super_block *sb)
+{
+}
+
+static inline bool
+fscrypt_is_dummy_policy_set(const struct fscrypt_dummy_policy *dummy_policy)
+{
+ return false;
+}
+
+static inline void
+fscrypt_free_dummy_policy(struct fscrypt_dummy_policy *dummy_policy)
+{
+}
+
+/* keyring.c */
+static inline void fscrypt_destroy_keyring(struct super_block *sb)
+{
+}
+
+static inline int fscrypt_ioctl_add_key(struct file *filp, void __user *arg)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int fscrypt_ioctl_remove_key(struct file *filp, void __user *arg)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int fscrypt_ioctl_remove_key_all_users(struct file *filp,
+ void __user *arg)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int fscrypt_ioctl_get_key_status(struct file *filp,
+ void __user *arg)
+{
+ return -EOPNOTSUPP;
+}
+
+/* keysetup.c */
+
+static inline int fscrypt_prepare_new_inode(struct inode *dir,
+ struct inode *inode,
+ bool *encrypt_ret)
+{
+ if (IS_ENCRYPTED(dir))
+ return -EOPNOTSUPP;
+ return 0;
+}
+
+static inline void fscrypt_put_encryption_info(struct inode *inode)
+{
+ return;
+}
+
+static inline void fscrypt_free_inode(struct inode *inode)
+{
+}
+
+static inline int fscrypt_drop_inode(struct inode *inode)
+{
+ return 0;
+}
+
+ /* fname.c */
+static inline int fscrypt_setup_filename(struct inode *dir,
+ const struct qstr *iname,
+ int lookup, struct fscrypt_name *fname)
+{
+ if (IS_ENCRYPTED(dir))
+ return -EOPNOTSUPP;
+
+ memset(fname, 0, sizeof(*fname));
+ fname->usr_fname = iname;
+ fname->disk_name.name = (unsigned char *)iname->name;
+ fname->disk_name.len = iname->len;
+ return 0;
+}
+
+static inline void fscrypt_free_filename(struct fscrypt_name *fname)
+{
+ return;
+}
+
+static inline int fscrypt_fname_alloc_buffer(u32 max_encrypted_len,
+ struct fscrypt_str *crypto_str)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline void fscrypt_fname_free_buffer(struct fscrypt_str *crypto_str)
+{
+ return;
+}
+
+static inline int fscrypt_fname_disk_to_usr(const struct inode *inode,
+ u32 hash, u32 minor_hash,
+ const struct fscrypt_str *iname,
+ struct fscrypt_str *oname)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline bool fscrypt_match_name(const struct fscrypt_name *fname,
+ const u8 *de_name, u32 de_name_len)
+{
+ /* Encryption support disabled; use standard comparison */
+ if (de_name_len != fname->disk_name.len)
+ return false;
+ return !memcmp(de_name, fname->disk_name.name, fname->disk_name.len);
+}
+
+static inline u64 fscrypt_fname_siphash(const struct inode *dir,
+ const struct qstr *name)
+{
+ WARN_ON_ONCE(1);
+ return 0;
+}
+
+static inline int fscrypt_d_revalidate(struct inode *dir, const struct qstr *name,
+ struct dentry *dentry, unsigned int flags)
+{
+ return 1;
+}
+
+/* bio.c */
+static inline bool fscrypt_decrypt_bio(struct bio *bio)
+{
+ return true;
+}
+
+static inline int fscrypt_zeroout_range(const struct inode *inode, pgoff_t lblk,
+ sector_t pblk, unsigned int len)
+{
+ return -EOPNOTSUPP;
+}
+
+/* hooks.c */
+
+static inline int fscrypt_file_open(struct inode *inode, struct file *filp)
+{
+ if (IS_ENCRYPTED(inode))
+ return -EOPNOTSUPP;
+ return 0;
+}
+
+static inline int __fscrypt_prepare_link(struct inode *inode, struct inode *dir,
+ struct dentry *dentry)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int __fscrypt_prepare_rename(struct inode *old_dir,
+ struct dentry *old_dentry,
+ struct inode *new_dir,
+ struct dentry *new_dentry,
+ unsigned int flags)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int __fscrypt_prepare_lookup(struct inode *dir,
+ struct dentry *dentry,
+ struct fscrypt_name *fname)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int fscrypt_prepare_lookup_partial(struct inode *dir,
+ struct dentry *dentry)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int __fscrypt_prepare_readdir(struct inode *dir)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int __fscrypt_prepare_setattr(struct dentry *dentry,
+ struct iattr *attr)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int fscrypt_prepare_setflags(struct inode *inode,
+ unsigned int oldflags,
+ unsigned int flags)
+{
+ return 0;
+}
+
+static inline int fscrypt_prepare_symlink(struct inode *dir,
+ const char *target,
+ unsigned int len,
+ unsigned int max_len,
+ struct fscrypt_str *disk_link)
+{
+ if (IS_ENCRYPTED(dir))
+ return -EOPNOTSUPP;
+ disk_link->name = (unsigned char *)target;
+ disk_link->len = len + 1;
+ if (disk_link->len > max_len)
+ return -ENAMETOOLONG;
+ return 0;
+}
+
+static inline int __fscrypt_encrypt_symlink(struct inode *inode,
+ const char *target,
+ unsigned int len,
+ struct fscrypt_str *disk_link)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline const char *fscrypt_get_symlink(struct inode *inode,
+ const void *caddr,
+ unsigned int max_size,
+ struct delayed_call *done)
+{
+ return ERR_PTR(-EOPNOTSUPP);
+}
+
+static inline int fscrypt_symlink_getattr(const struct path *path,
+ struct kstat *stat)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline void fscrypt_set_ops(struct super_block *sb,
+ const struct fscrypt_operations *s_cop)
+{
+}
+
+#endif /* !CONFIG_FS_ENCRYPTION */
+
+/* inline_crypt.c */
+#ifdef CONFIG_FS_ENCRYPTION_INLINE_CRYPT
+
+bool __fscrypt_inode_uses_inline_crypto(const struct inode *inode);
+
+void fscrypt_set_bio_crypt_ctx(struct bio *bio,
+ const struct inode *inode, u64 first_lblk,
+ gfp_t gfp_mask);
+
+void fscrypt_set_bio_crypt_ctx_bh(struct bio *bio,
+ const struct buffer_head *first_bh,
+ gfp_t gfp_mask);
+
+bool fscrypt_mergeable_bio(struct bio *bio, const struct inode *inode,
+ u64 next_lblk);
+
+bool fscrypt_mergeable_bio_bh(struct bio *bio,
+ const struct buffer_head *next_bh);
+
+bool fscrypt_dio_supported(struct inode *inode);
+
+u64 fscrypt_limit_io_blocks(const struct inode *inode, u64 lblk, u64 nr_blocks);
+
+#else /* CONFIG_FS_ENCRYPTION_INLINE_CRYPT */
+
+static inline bool __fscrypt_inode_uses_inline_crypto(const struct inode *inode)
+{
+ return false;
+}
+
+static inline void fscrypt_set_bio_crypt_ctx(struct bio *bio,
+ const struct inode *inode,
+ u64 first_lblk, gfp_t gfp_mask) { }
+
+static inline void fscrypt_set_bio_crypt_ctx_bh(
+ struct bio *bio,
+ const struct buffer_head *first_bh,
+ gfp_t gfp_mask) { }
+
+static inline bool fscrypt_mergeable_bio(struct bio *bio,
+ const struct inode *inode,
+ u64 next_lblk)
+{
+ return true;
+}
+
+static inline bool fscrypt_mergeable_bio_bh(struct bio *bio,
+ const struct buffer_head *next_bh)
+{
+ return true;
+}
+
+static inline bool fscrypt_dio_supported(struct inode *inode)
+{
+ return !fscrypt_needs_contents_encryption(inode);
+}
+
+static inline u64 fscrypt_limit_io_blocks(const struct inode *inode, u64 lblk,
+ u64 nr_blocks)
+{
+ return nr_blocks;
+}
+#endif /* !CONFIG_FS_ENCRYPTION_INLINE_CRYPT */
+
+/**
+ * fscrypt_inode_uses_inline_crypto() - test whether an inode uses inline
+ * encryption
+ * @inode: an inode. If encrypted, its key must be set up.
+ *
+ * Return: true if the inode requires file contents encryption and if the
+ * encryption should be done in the block layer via blk-crypto rather
+ * than in the filesystem layer.
+ */
+static inline bool fscrypt_inode_uses_inline_crypto(const struct inode *inode)
+{
+ return fscrypt_needs_contents_encryption(inode) &&
+ __fscrypt_inode_uses_inline_crypto(inode);
+}
+
+/**
+ * fscrypt_inode_uses_fs_layer_crypto() - test whether an inode uses fs-layer
+ * encryption
+ * @inode: an inode. If encrypted, its key must be set up.
+ *
+ * Return: true if the inode requires file contents encryption and if the
+ * encryption should be done in the filesystem layer rather than in the
+ * block layer via blk-crypto.
+ */
+static inline bool fscrypt_inode_uses_fs_layer_crypto(const struct inode *inode)
+{
+ return fscrypt_needs_contents_encryption(inode) &&
+ !__fscrypt_inode_uses_inline_crypto(inode);
+}
+
+/**
+ * fscrypt_has_encryption_key() - check whether an inode has had its key set up
+ * @inode: the inode to check
+ *
+ * Return: %true if the inode has had its encryption key set up, else %false.
+ *
+ * Usually this should be preceded by fscrypt_get_encryption_info() to try to
+ * set up the key first.
+ */
+static inline bool fscrypt_has_encryption_key(const struct inode *inode)
+{
+ return fscrypt_get_inode_info(inode) != NULL;
+}
+
+/**
+ * fscrypt_prepare_link() - prepare to link an inode into a possibly-encrypted
+ * directory
+ * @old_dentry: an existing dentry for the inode being linked
+ * @dir: the target directory
+ * @dentry: negative dentry for the target filename
+ *
+ * A new link can only be added to an encrypted directory if the directory's
+ * encryption key is available --- since otherwise we'd have no way to encrypt
+ * the filename.
+ *
+ * We also verify that the link will not violate the constraint that all files
+ * in an encrypted directory tree use the same encryption policy.
+ *
+ * Return: 0 on success, -ENOKEY if the directory's encryption key is missing,
+ * -EXDEV if the link would result in an inconsistent encryption policy, or
+ * another -errno code.
+ */
+static inline int fscrypt_prepare_link(struct dentry *old_dentry,
+ struct inode *dir,
+ struct dentry *dentry)
+{
+ if (IS_ENCRYPTED(dir))
+ return __fscrypt_prepare_link(d_inode(old_dentry), dir, dentry);
+ return 0;
+}
+
+/**
+ * fscrypt_prepare_rename() - prepare for a rename between possibly-encrypted
+ * directories
+ * @old_dir: source directory
+ * @old_dentry: dentry for source file
+ * @new_dir: target directory
+ * @new_dentry: dentry for target location (may be negative unless exchanging)
+ * @flags: rename flags (we care at least about %RENAME_EXCHANGE)
+ *
+ * Prepare for ->rename() where the source and/or target directories may be
+ * encrypted. A new link can only be added to an encrypted directory if the
+ * directory's encryption key is available --- since otherwise we'd have no way
+ * to encrypt the filename. A rename to an existing name, on the other hand,
+ * *is* cryptographically possible without the key. However, we take the more
+ * conservative approach and just forbid all no-key renames.
+ *
+ * We also verify that the rename will not violate the constraint that all files
+ * in an encrypted directory tree use the same encryption policy.
+ *
+ * Return: 0 on success, -ENOKEY if an encryption key is missing, -EXDEV if the
+ * rename would cause inconsistent encryption policies, or another -errno code.
+ */
+static inline int fscrypt_prepare_rename(struct inode *old_dir,
+ struct dentry *old_dentry,
+ struct inode *new_dir,
+ struct dentry *new_dentry,
+ unsigned int flags)
+{
+ if (IS_ENCRYPTED(old_dir) || IS_ENCRYPTED(new_dir))
+ return __fscrypt_prepare_rename(old_dir, old_dentry,
+ new_dir, new_dentry, flags);
+ return 0;
+}
+
+/**
+ * fscrypt_prepare_lookup() - prepare to lookup a name in a possibly-encrypted
+ * directory
+ * @dir: directory being searched
+ * @dentry: filename being looked up
+ * @fname: (output) the name to use to search the on-disk directory
+ *
+ * Prepare for ->lookup() in a directory which may be encrypted by determining
+ * the name that will actually be used to search the directory on-disk. If the
+ * directory's encryption policy is supported by this kernel and its encryption
+ * key is available, then the lookup is assumed to be by plaintext name;
+ * otherwise, it is assumed to be by no-key name.
+ *
+ * This will set DCACHE_NOKEY_NAME on the dentry if the lookup is by no-key
+ * name. In this case the filesystem must assign the dentry a dentry_operations
+ * which contains fscrypt_d_revalidate (or contains a d_revalidate method that
+ * calls fscrypt_d_revalidate), so that the dentry will be invalidated if the
+ * directory's encryption key is later added.
+ *
+ * Return: 0 on success; -ENOENT if the directory's key is unavailable but the
+ * filename isn't a valid no-key name, so a negative dentry should be created;
+ * or another -errno code.
+ */
+static inline int fscrypt_prepare_lookup(struct inode *dir,
+ struct dentry *dentry,
+ struct fscrypt_name *fname)
+{
+ if (IS_ENCRYPTED(dir))
+ return __fscrypt_prepare_lookup(dir, dentry, fname);
+
+ memset(fname, 0, sizeof(*fname));
+ fname->usr_fname = &dentry->d_name;
+ fname->disk_name.name = (unsigned char *)dentry->d_name.name;
+ fname->disk_name.len = dentry->d_name.len;
+
+ fscrypt_prepare_dentry(dentry, false);
+
+ return 0;
+}
+
+/**
+ * fscrypt_prepare_readdir() - prepare to read a possibly-encrypted directory
+ * @dir: the directory inode
+ *
+ * If the directory is encrypted and it doesn't already have its encryption key
+ * set up, try to set it up so that the filenames will be listed in plaintext
+ * form rather than in no-key form.
+ *
+ * Return: 0 on success; -errno on error. Note that the encryption key being
+ * unavailable is not considered an error. It is also not an error if
+ * the encryption policy is unsupported by this kernel; that is treated
+ * like the key being unavailable, so that files can still be deleted.
+ */
+static inline int fscrypt_prepare_readdir(struct inode *dir)
+{
+ if (IS_ENCRYPTED(dir))
+ return __fscrypt_prepare_readdir(dir);
+ return 0;
+}
+
+/**
+ * fscrypt_prepare_setattr() - prepare to change a possibly-encrypted inode's
+ * attributes
+ * @dentry: dentry through which the inode is being changed
+ * @attr: attributes to change
+ *
+ * Prepare for ->setattr() on a possibly-encrypted inode. On an encrypted file,
+ * most attribute changes are allowed even without the encryption key. However,
+ * without the encryption key we do have to forbid truncates. This is needed
+ * because the size being truncated to may not be a multiple of the filesystem
+ * block size, and in that case we'd have to decrypt the final block, zero the
+ * portion past i_size, and re-encrypt it. (We *could* allow truncating to a
+ * filesystem block boundary, but it's simpler to just forbid all truncates ---
+ * and we already forbid all other contents modifications without the key.)
+ *
+ * Return: 0 on success, -ENOKEY if the key is missing, or another -errno code
+ * if a problem occurred while setting up the encryption key.
+ */
+static inline int fscrypt_prepare_setattr(struct dentry *dentry,
+ struct iattr *attr)
+{
+ if (IS_ENCRYPTED(d_inode(dentry)))
+ return __fscrypt_prepare_setattr(dentry, attr);
+ return 0;
+}
+
+/**
+ * fscrypt_encrypt_symlink() - encrypt the symlink target if needed
+ * @inode: symlink inode
+ * @target: plaintext symlink target
+ * @len: length of @target excluding null terminator
+ * @disk_link: (in/out) the on-disk symlink target being prepared
+ *
+ * If the symlink target needs to be encrypted, then this function encrypts it
+ * into @disk_link->name. fscrypt_prepare_symlink() must have been called
+ * previously to compute @disk_link->len. If the filesystem did not allocate a
+ * buffer for @disk_link->name after calling fscrypt_prepare_link(), then one
+ * will be kmalloc()'ed and the filesystem will be responsible for freeing it.
+ *
+ * Return: 0 on success, -errno on failure
+ */
+static inline int fscrypt_encrypt_symlink(struct inode *inode,
+ const char *target,
+ unsigned int len,
+ struct fscrypt_str *disk_link)
+{
+ if (IS_ENCRYPTED(inode))
+ return __fscrypt_encrypt_symlink(inode, target, len, disk_link);
+ return 0;
+}
+
+/* If *pagep is a bounce page, free it and set *pagep to the pagecache page */
+static inline void fscrypt_finalize_bounce_page(struct page **pagep)
+{
+ struct page *page = *pagep;
+
+ if (fscrypt_is_bounce_page(page)) {
+ *pagep = fscrypt_pagecache_page(page);
+ fscrypt_free_bounce_page(page);
+ }
+}
+
+#endif /* _LINUX_FSCRYPT_H */
diff --git a/include/linux/fscrypt_common.h b/include/linux/fscrypt_common.h
deleted file mode 100644
index 97f738628b36..000000000000
--- a/include/linux/fscrypt_common.h
+++ /dev/null
@@ -1,141 +0,0 @@
-/*
- * fscrypt_common.h: common declarations for per-file encryption
- *
- * Copyright (C) 2015, Google, Inc.
- *
- * Written by Michael Halcrow, 2015.
- * Modified by Jaegeuk Kim, 2015.
- */
-
-#ifndef _LINUX_FSCRYPT_COMMON_H
-#define _LINUX_FSCRYPT_COMMON_H
-
-#include <linux/key.h>
-#include <linux/fs.h>
-#include <linux/mm.h>
-#include <linux/bio.h>
-#include <linux/dcache.h>
-#include <crypto/skcipher.h>
-#include <uapi/linux/fs.h>
-
-#define FS_CRYPTO_BLOCK_SIZE 16
-
-struct fscrypt_info;
-
-struct fscrypt_ctx {
- union {
- struct {
- struct page *bounce_page; /* Ciphertext page */
- struct page *control_page; /* Original page */
- } w;
- struct {
- struct bio *bio;
- struct work_struct work;
- } r;
- struct list_head free_list; /* Free list */
- };
- u8 flags; /* Flags */
-};
-
-/**
- * For encrypted symlinks, the ciphertext length is stored at the beginning
- * of the string in little-endian format.
- */
-struct fscrypt_symlink_data {
- __le16 len;
- char encrypted_path[1];
-} __packed;
-
-struct fscrypt_str {
- unsigned char *name;
- u32 len;
-};
-
-struct fscrypt_name {
- const struct qstr *usr_fname;
- struct fscrypt_str disk_name;
- u32 hash;
- u32 minor_hash;
- struct fscrypt_str crypto_buf;
-};
-
-#define FSTR_INIT(n, l) { .name = n, .len = l }
-#define FSTR_TO_QSTR(f) QSTR_INIT((f)->name, (f)->len)
-#define fname_name(p) ((p)->disk_name.name)
-#define fname_len(p) ((p)->disk_name.len)
-
-/*
- * fscrypt superblock flags
- */
-#define FS_CFLG_OWN_PAGES (1U << 1)
-
-/*
- * crypto opertions for filesystems
- */
-struct fscrypt_operations {
- unsigned int flags;
- const char *key_prefix;
- int (*get_context)(struct inode *, void *, size_t);
- int (*set_context)(struct inode *, const void *, size_t, void *);
- bool (*dummy_context)(struct inode *);
- bool (*is_encrypted)(struct inode *);
- bool (*empty_dir)(struct inode *);
- unsigned (*max_namelen)(struct inode *);
-};
-
-/* Maximum value for the third parameter of fscrypt_operations.set_context(). */
-#define FSCRYPT_SET_CONTEXT_MAX_SIZE 28
-
-static inline bool fscrypt_dummy_context_enabled(struct inode *inode)
-{
- if (inode->i_sb->s_cop->dummy_context &&
- inode->i_sb->s_cop->dummy_context(inode))
- return true;
- return false;
-}
-
-static inline bool fscrypt_valid_enc_modes(u32 contents_mode,
- u32 filenames_mode)
-{
- if (contents_mode == FS_ENCRYPTION_MODE_AES_128_CBC &&
- filenames_mode == FS_ENCRYPTION_MODE_AES_128_CTS)
- return true;
-
- if (contents_mode == FS_ENCRYPTION_MODE_AES_256_XTS &&
- filenames_mode == FS_ENCRYPTION_MODE_AES_256_CTS)
- return true;
-
- return false;
-}
-
-static inline bool fscrypt_is_dot_dotdot(const struct qstr *str)
-{
- if (str->len == 1 && str->name[0] == '.')
- return true;
-
- if (str->len == 2 && str->name[0] == '.' && str->name[1] == '.')
- return true;
-
- return false;
-}
-
-static inline struct page *fscrypt_control_page(struct page *page)
-{
-#if IS_ENABLED(CONFIG_FS_ENCRYPTION)
- return ((struct fscrypt_ctx *)page_private(page))->w.control_page;
-#else
- WARN_ON_ONCE(1);
- return ERR_PTR(-EINVAL);
-#endif
-}
-
-static inline int fscrypt_has_encryption_key(const struct inode *inode)
-{
-#if IS_ENABLED(CONFIG_FS_ENCRYPTION)
- return (inode->i_crypt_info != NULL);
-#else
- return 0;
-#endif
-}
-
-#endif /* _LINUX_FSCRYPT_COMMON_H */
diff --git a/include/linux/fscrypt_notsupp.h b/include/linux/fscrypt_notsupp.h
deleted file mode 100644
index ec406aed2f2f..000000000000
--- a/include/linux/fscrypt_notsupp.h
+++ /dev/null
@@ -1,177 +0,0 @@
-/*
- * fscrypt_notsupp.h
- *
- * This stubs out the fscrypt functions for filesystems configured without
- * encryption support.
- */
-
-#ifndef _LINUX_FSCRYPT_NOTSUPP_H
-#define _LINUX_FSCRYPT_NOTSUPP_H
-
-#include <linux/fscrypt_common.h>
-
-/* crypto.c */
-static inline struct fscrypt_ctx *fscrypt_get_ctx(const struct inode *inode,
- gfp_t gfp_flags)
-{
- return ERR_PTR(-EOPNOTSUPP);
-}
-
-static inline void fscrypt_release_ctx(struct fscrypt_ctx *ctx)
-{
- return;
-}
-
-static inline struct page *fscrypt_encrypt_page(const struct inode *inode,
- struct page *page,
- unsigned int len,
- unsigned int offs,
- u64 lblk_num, gfp_t gfp_flags)
-{
- return ERR_PTR(-EOPNOTSUPP);
-}
-
-static inline int fscrypt_decrypt_page(const struct inode *inode,
- struct page *page,
- unsigned int len, unsigned int offs,
- u64 lblk_num)
-{
- return -EOPNOTSUPP;
-}
-
-
-static inline void fscrypt_restore_control_page(struct page *page)
-{
- return;
-}
-
-static inline void fscrypt_set_d_op(struct dentry *dentry)
-{
- return;
-}
-
-static inline void fscrypt_set_encrypted_dentry(struct dentry *dentry)
-{
- return;
-}
-
-/* policy.c */
-static inline int fscrypt_ioctl_set_policy(struct file *filp,
- const void __user *arg)
-{
- return -EOPNOTSUPP;
-}
-
-static inline int fscrypt_ioctl_get_policy(struct file *filp, void __user *arg)
-{
- return -EOPNOTSUPP;
-}
-
-static inline int fscrypt_has_permitted_context(struct inode *parent,
- struct inode *child)
-{
- return 0;
-}
-
-static inline int fscrypt_inherit_context(struct inode *parent,
- struct inode *child,
- void *fs_data, bool preload)
-{
- return -EOPNOTSUPP;
-}
-
-/* keyinfo.c */
-static inline int fscrypt_get_encryption_info(struct inode *inode)
-{
- return -EOPNOTSUPP;
-}
-
-static inline void fscrypt_put_encryption_info(struct inode *inode,
- struct fscrypt_info *ci)
-{
- return;
-}
-
- /* fname.c */
-static inline int fscrypt_setup_filename(struct inode *dir,
- const struct qstr *iname,
- int lookup, struct fscrypt_name *fname)
-{
- if (dir->i_sb->s_cop->is_encrypted(dir))
- return -EOPNOTSUPP;
-
- memset(fname, 0, sizeof(struct fscrypt_name));
- fname->usr_fname = iname;
- fname->disk_name.name = (unsigned char *)iname->name;
- fname->disk_name.len = iname->len;
- return 0;
-}
-
-static inline void fscrypt_free_filename(struct fscrypt_name *fname)
-{
- return;
-}
-
-static inline u32 fscrypt_fname_encrypted_size(const struct inode *inode,
- u32 ilen)
-{
- /* never happens */
- WARN_ON(1);
- return 0;
-}
-
-static inline int fscrypt_fname_alloc_buffer(const struct inode *inode,
- u32 ilen,
- struct fscrypt_str *crypto_str)
-{
- return -EOPNOTSUPP;
-}
-
-static inline void fscrypt_fname_free_buffer(struct fscrypt_str *crypto_str)
-{
- return;
-}
-
-static inline int fscrypt_fname_disk_to_usr(struct inode *inode,
- u32 hash, u32 minor_hash,
- const struct fscrypt_str *iname,
- struct fscrypt_str *oname)
-{
- return -EOPNOTSUPP;
-}
-
-static inline int fscrypt_fname_usr_to_disk(struct inode *inode,
- const struct qstr *iname,
- struct fscrypt_str *oname)
-{
- return -EOPNOTSUPP;
-}
-
-static inline bool fscrypt_match_name(const struct fscrypt_name *fname,
- const u8 *de_name, u32 de_name_len)
-{
- /* Encryption support disabled; use standard comparison */
- if (de_name_len != fname->disk_name.len)
- return false;
- return !memcmp(de_name, fname->disk_name.name, fname->disk_name.len);
-}
-
-/* bio.c */
-static inline void fscrypt_decrypt_bio_pages(struct fscrypt_ctx *ctx,
- struct bio *bio)
-{
- return;
-}
-
-static inline void fscrypt_pullback_bio_page(struct page **page, bool restore)
-{
- return;
-}
-
-static inline int fscrypt_zeroout_range(const struct inode *inode, pgoff_t lblk,
- sector_t pblk, unsigned int len)
-{
- return -EOPNOTSUPP;
-}
-
-#endif /* _LINUX_FSCRYPT_NOTSUPP_H */
diff --git a/include/linux/fscrypt_supp.h b/include/linux/fscrypt_supp.h
deleted file mode 100644
index 32e2fcf13b01..000000000000
--- a/include/linux/fscrypt_supp.h
+++ /dev/null
@@ -1,145 +0,0 @@
-/*
- * fscrypt_supp.h
- *
- * This is included by filesystems configured with encryption support.
- */
-
-#ifndef _LINUX_FSCRYPT_SUPP_H
-#define _LINUX_FSCRYPT_SUPP_H
-
-#include <linux/fscrypt_common.h>
-
-/* crypto.c */
-extern struct kmem_cache *fscrypt_info_cachep;
-extern struct fscrypt_ctx *fscrypt_get_ctx(const struct inode *, gfp_t);
-extern void fscrypt_release_ctx(struct fscrypt_ctx *);
-extern struct page *fscrypt_encrypt_page(const struct inode *, struct page *,
- unsigned int, unsigned int,
- u64, gfp_t);
-extern int fscrypt_decrypt_page(const struct inode *, struct page *, unsigned int,
- unsigned int, u64);
-extern void fscrypt_restore_control_page(struct page *);
-
-extern const struct dentry_operations fscrypt_d_ops;
-
-static inline void fscrypt_set_d_op(struct dentry *dentry)
-{
- d_set_d_op(dentry, &fscrypt_d_ops);
-}
-
-static inline void fscrypt_set_encrypted_dentry(struct dentry *dentry)
-{
- spin_lock(&dentry->d_lock);
- dentry->d_flags |= DCACHE_ENCRYPTED_WITH_KEY;
- spin_unlock(&dentry->d_lock);
-}
-
-/* policy.c */
-extern int fscrypt_ioctl_set_policy(struct file *, const void __user *);
-extern int fscrypt_ioctl_get_policy(struct file *, void __user *);
-extern int fscrypt_has_permitted_context(struct inode *, struct inode *);
-extern int fscrypt_inherit_context(struct inode *, struct inode *,
- void *, bool);
-/* keyinfo.c */
-extern int fscrypt_get_encryption_info(struct inode *);
-extern void fscrypt_put_encryption_info(struct inode *, struct fscrypt_info *);
-
-/* fname.c */
-extern int fscrypt_setup_filename(struct inode *, const struct qstr *,
- int lookup, struct fscrypt_name *);
-
-static inline void fscrypt_free_filename(struct fscrypt_name *fname)
-{
- kfree(fname->crypto_buf.name);
-}
-
-extern u32 fscrypt_fname_encrypted_size(const struct inode *, u32);
-extern int fscrypt_fname_alloc_buffer(const struct inode *, u32,
- struct fscrypt_str *);
-extern void fscrypt_fname_free_buffer(struct fscrypt_str *);
-extern int fscrypt_fname_disk_to_usr(struct inode *, u32, u32,
- const struct fscrypt_str *, struct fscrypt_str *);
-extern int fscrypt_fname_usr_to_disk(struct inode *, const struct qstr *,
- struct fscrypt_str *);
-
-#define FSCRYPT_FNAME_MAX_UNDIGESTED_SIZE 32
-
-/* Extracts the second-to-last ciphertext block; see explanation below */
-#define FSCRYPT_FNAME_DIGEST(name, len) \
- ((name) + round_down((len) - FS_CRYPTO_BLOCK_SIZE - 1, \
- FS_CRYPTO_BLOCK_SIZE))
-
-#define FSCRYPT_FNAME_DIGEST_SIZE FS_CRYPTO_BLOCK_SIZE
-
-/**
- * fscrypt_digested_name - alternate identifier for an on-disk filename
- *
- * When userspace lists an encrypted directory without access to the key,
- * filenames whose ciphertext is longer than FSCRYPT_FNAME_MAX_UNDIGESTED_SIZE
- * bytes are shown in this abbreviated form (base64-encoded) rather than as the
- * full ciphertext (base64-encoded). This is necessary to allow supporting
- * filenames up to NAME_MAX bytes, since base64 encoding expands the length.
- *
- * To make it possible for filesystems to still find the correct directory entry
- * despite not knowing the full on-disk name, we encode any filesystem-specific
- * 'hash' and/or 'minor_hash' which the filesystem may need for its lookups,
- * followed by the second-to-last ciphertext block of the filename. Due to the
- * use of the CBC-CTS encryption mode, the second-to-last ciphertext block
- * depends on the full plaintext. (Note that ciphertext stealing causes the
- * last two blocks to appear "flipped".) This makes accidental collisions very
- * unlikely: just a 1 in 2^128 chance for two filenames to collide even if they
- * share the same filesystem-specific hashes.
- *
- * However, this scheme isn't immune to intentional collisions, which can be
- * created by anyone able to create arbitrary plaintext filenames and view them
- * without the key. Making the "digest" be a real cryptographic hash like
- * SHA-256 over the full ciphertext would prevent this, although it would be
- * less efficient and harder to implement, especially since the filesystem would
- * need to calculate it for each directory entry examined during a search.
- */
-struct fscrypt_digested_name {
- u32 hash;
- u32 minor_hash;
- u8 digest[FSCRYPT_FNAME_DIGEST_SIZE];
-};
-
-/**
- * fscrypt_match_name() - test whether the given name matches a directory entry
- * @fname: the name being searched for
- * @de_name: the name from the directory entry
- * @de_name_len: the length of @de_name in bytes
- *
- * Normally @fname->disk_name will be set, and in that case we simply compare
- * that to the name stored in the directory entry. The only exception is that
- * if we don't have the key for an encrypted directory and a filename in it is
- * very long, then we won't have the full disk_name and we'll instead need to
- * match against the fscrypt_digested_name.
- *
- * Return: %true if the name matches, otherwise %false.
- */
-static inline bool fscrypt_match_name(const struct fscrypt_name *fname,
- const u8 *de_name, u32 de_name_len)
-{
- if (unlikely(!fname->disk_name.name)) {
- const struct fscrypt_digested_name *n =
- (const void *)fname->crypto_buf.name;
- if (WARN_ON_ONCE(fname->usr_fname->name[0] != '_'))
- return false;
- if (de_name_len <= FSCRYPT_FNAME_MAX_UNDIGESTED_SIZE)
- return false;
- return !memcmp(FSCRYPT_FNAME_DIGEST(de_name, de_name_len),
- n->digest, FSCRYPT_FNAME_DIGEST_SIZE);
- }
-
- if (de_name_len != fname->disk_name.len)
- return false;
- return !memcmp(de_name, fname->disk_name.name, fname->disk_name.len);
-}
-
-/* bio.c */
-extern void fscrypt_decrypt_bio_pages(struct fscrypt_ctx *, struct bio *);
-extern void fscrypt_pullback_bio_page(struct page **, bool);
-extern int fscrypt_zeroout_range(const struct inode *, pgoff_t, sector_t,
- unsigned int);
-
-#endif /* _LINUX_FSCRYPT_SUPP_H */
diff --git a/include/linux/fsi-occ.h b/include/linux/fsi-occ.h
new file mode 100644
index 000000000000..7ee3dbd7f4b3
--- /dev/null
+++ b/include/linux/fsi-occ.h
@@ -0,0 +1,27 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#ifndef LINUX_FSI_OCC_H
+#define LINUX_FSI_OCC_H
+
+struct device;
+
+#define OCC_RESP_CMD_IN_PRG 0xFF
+#define OCC_RESP_SUCCESS 0
+#define OCC_RESP_CMD_INVAL 0x11
+#define OCC_RESP_CMD_LEN_INVAL 0x12
+#define OCC_RESP_DATA_INVAL 0x13
+#define OCC_RESP_CHKSUM_ERR 0x14
+#define OCC_RESP_INT_ERR 0x15
+#define OCC_RESP_BAD_STATE 0x16
+#define OCC_RESP_CRIT_EXCEPT 0xE0
+#define OCC_RESP_CRIT_INIT 0xE1
+#define OCC_RESP_CRIT_WATCHDOG 0xE2
+#define OCC_RESP_CRIT_OCB 0xE3
+#define OCC_RESP_CRIT_HW 0xE4
+
+#define OCC_MAX_RESP_WORDS 2048
+
+int fsi_occ_submit(struct device *dev, const void *request, size_t req_len,
+ void *response, size_t *resp_len);
+
+#endif /* LINUX_FSI_OCC_H */
diff --git a/include/linux/fsi-sbefifo.h b/include/linux/fsi-sbefifo.h
new file mode 100644
index 000000000000..a9935e806f8e
--- /dev/null
+++ b/include/linux/fsi-sbefifo.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * SBEFIFO FSI Client device driver
+ *
+ * Copyright (C) IBM Corporation 2017
+ */
+
+#ifndef LINUX_FSI_SBEFIFO_H
+#define LINUX_FSI_SBEFIFO_H
+
+#define SBEFIFO_CMD_PUT_OCC_SRAM 0xa404
+#define SBEFIFO_CMD_GET_OCC_SRAM 0xa403
+#define SBEFIFO_CMD_GET_SBE_FFDC 0xa801
+
+#define SBEFIFO_MAX_FFDC_SIZE 0x2000
+
+struct device;
+
+int sbefifo_submit(struct device *dev, const __be32 *command, size_t cmd_len,
+ __be32 *response, size_t *resp_len);
+
+int sbefifo_parse_status(struct device *dev, u16 cmd, __be32 *response,
+ size_t resp_len, size_t *data_len);
+
+#endif /* LINUX_FSI_SBEFIFO_H */
diff --git a/include/linux/fsi.h b/include/linux/fsi.h
index 141fd38d061f..adea1b432f2d 100644
--- a/include/linux/fsi.h
+++ b/include/linux/fsi.h
@@ -1,15 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/* FSI device & driver interfaces
*
* Copyright (C) IBM Corporation 2016
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#ifndef LINUX_FSI_H
@@ -52,7 +44,7 @@ struct fsi_driver {
};
#define to_fsi_dev(devp) container_of(devp, struct fsi_device, dev)
-#define to_fsi_drv(drvp) container_of(drvp, struct fsi_driver, drv)
+#define to_fsi_drv(drvp) container_of_const(drvp, struct fsi_driver, drv)
extern int fsi_driver_register(struct fsi_driver *fsi_drv);
extern void fsi_driver_unregister(struct fsi_driver *fsi_drv);
@@ -76,8 +68,18 @@ extern int fsi_slave_read(struct fsi_slave *slave, uint32_t addr,
extern int fsi_slave_write(struct fsi_slave *slave, uint32_t addr,
const void *val, size_t size);
+extern const struct bus_type fsi_bus_type;
+extern const struct device_type fsi_cdev_type;
+enum fsi_dev_type {
+ fsi_dev_cfam,
+ fsi_dev_sbefifo,
+ fsi_dev_scom,
+ fsi_dev_occ
+};
-extern struct bus_type fsi_bus_type;
+extern int fsi_get_new_minor(struct fsi_device *fdev, enum fsi_dev_type type,
+ dev_t *out_dev, int *out_index);
+extern void fsi_free_minor(dev_t dev);
#endif /* LINUX_FSI_H */
diff --git a/include/linux/fsl-diu-fb.h b/include/linux/fsl-diu-fb.h
index c46eab5bc893..9a55ddc0d277 100644
--- a/include/linux/fsl-diu-fb.h
+++ b/include/linux/fsl-diu-fb.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* Copyright 2008 Freescale Semiconductor, Inc. All Rights Reserved.
*
@@ -9,12 +10,6 @@
* York Sun <yorksun@freescale.com>
*
* Based on imxfb.c Copyright (C) 2004 S.Hauer, Pengutronix
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
*/
#ifndef __FSL_DIU_FB_H__
diff --git a/include/linux/fsl/bestcomm/bestcomm.h b/include/linux/fsl/bestcomm/bestcomm.h
index a0e2e6b19b57..154e541ce57e 100644
--- a/include/linux/fsl/bestcomm/bestcomm.h
+++ b/include/linux/fsl/bestcomm/bestcomm.h
@@ -27,7 +27,7 @@
*/
struct bcom_bd {
u32 status;
- u32 data[0]; /* variable payload size */
+ u32 data[]; /* variable payload size */
};
/* ======================================================================== */
diff --git a/include/linux/fsl/bestcomm/gen_bd.h b/include/linux/fsl/bestcomm/gen_bd.h
index de47260e69da..aeb312a1cd00 100644
--- a/include/linux/fsl/bestcomm/gen_bd.h
+++ b/include/linux/fsl/bestcomm/gen_bd.h
@@ -1,16 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Header for Bestcomm General Buffer Descriptor tasks driver
*
- *
* Copyright (C) 2007 Sylvain Munaut <tnt@246tNt.com>
* Copyright (C) 2006 AppSpec Computer Technologies Corp.
* Jeff Gibbons <jeff.gibbons@appspec.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published
- * by the Free Software Foundation.
- *
- *
*/
#ifndef __BESTCOMM_GEN_BD_H__
diff --git a/include/linux/fsl/edac.h b/include/linux/fsl/edac.h
index 90d64d4ec1a9..148a297d7587 100644
--- a/include/linux/fsl/edac.h
+++ b/include/linux/fsl/edac.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef FSL_EDAC_H
#define FSL_EDAC_H
diff --git a/include/linux/fsl/enetc_mdio.h b/include/linux/fsl/enetc_mdio.h
new file mode 100644
index 000000000000..623ccfcbf39c
--- /dev/null
+++ b/include/linux/fsl/enetc_mdio.h
@@ -0,0 +1,68 @@
+/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
+/* Copyright 2019 NXP */
+
+#ifndef _FSL_ENETC_MDIO_H_
+#define _FSL_ENETC_MDIO_H_
+
+#include <linux/phy.h>
+
+/* PCS registers */
+#define ENETC_PCS_LINK_TIMER1 0x12
+#define ENETC_PCS_LINK_TIMER1_VAL 0x06a0
+#define ENETC_PCS_LINK_TIMER2 0x13
+#define ENETC_PCS_LINK_TIMER2_VAL 0x0003
+#define ENETC_PCS_IF_MODE 0x14
+#define ENETC_PCS_IF_MODE_SGMII_EN BIT(0)
+#define ENETC_PCS_IF_MODE_USE_SGMII_AN BIT(1)
+#define ENETC_PCS_IF_MODE_SGMII_SPEED(x) (((x) << 2) & GENMASK(3, 2))
+#define ENETC_PCS_IF_MODE_DUPLEX_HALF BIT(3)
+
+/* Not a mistake, the SerDes PLL needs to be set at 3.125 GHz by Reset
+ * Configuration Word (RCW, outside Linux control) for 2.5G SGMII mode. The PCS
+ * still thinks it's at gigabit.
+ */
+enum enetc_pcs_speed {
+ ENETC_PCS_SPEED_10 = 0,
+ ENETC_PCS_SPEED_100 = 1,
+ ENETC_PCS_SPEED_1000 = 2,
+ ENETC_PCS_SPEED_2500 = 2,
+};
+
+struct enetc_hw;
+
+struct enetc_mdio_priv {
+ struct enetc_hw *hw;
+ int mdio_base;
+};
+
+#if IS_REACHABLE(CONFIG_FSL_ENETC_MDIO)
+
+int enetc_mdio_read_c22(struct mii_bus *bus, int phy_id, int regnum);
+int enetc_mdio_write_c22(struct mii_bus *bus, int phy_id, int regnum,
+ u16 value);
+int enetc_mdio_read_c45(struct mii_bus *bus, int phy_id, int devad, int regnum);
+int enetc_mdio_write_c45(struct mii_bus *bus, int phy_id, int devad, int regnum,
+ u16 value);
+struct enetc_hw *enetc_hw_alloc(struct device *dev, void __iomem *port_regs);
+
+#else
+
+static inline int enetc_mdio_read_c22(struct mii_bus *bus, int phy_id,
+ int regnum)
+{ return -EINVAL; }
+static inline int enetc_mdio_write_c22(struct mii_bus *bus, int phy_id,
+ int regnum, u16 value)
+{ return -EINVAL; }
+static inline int enetc_mdio_read_c45(struct mii_bus *bus, int phy_id,
+ int devad, int regnum)
+{ return -EINVAL; }
+static inline int enetc_mdio_write_c45(struct mii_bus *bus, int phy_id,
+ int devad, int regnum, u16 value)
+{ return -EINVAL; }
+static inline struct enetc_hw *enetc_hw_alloc(struct device *dev,
+ void __iomem *port_regs)
+{ return ERR_PTR(-EINVAL); }
+
+#endif
+
+#endif
diff --git a/include/linux/fsl/ftm.h b/include/linux/fsl/ftm.h
new file mode 100644
index 000000000000..d59011acf66c
--- /dev/null
+++ b/include/linux/fsl/ftm.h
@@ -0,0 +1,88 @@
+// SPDX-License-Identifier: GPL-2.0
+#ifndef __FSL_FTM_H__
+#define __FSL_FTM_H__
+
+#define FTM_SC 0x0 /* Status And Control */
+#define FTM_CNT 0x4 /* Counter */
+#define FTM_MOD 0x8 /* Modulo */
+
+#define FTM_CNTIN 0x4C /* Counter Initial Value */
+#define FTM_STATUS 0x50 /* Capture And Compare Status */
+#define FTM_MODE 0x54 /* Features Mode Selection */
+#define FTM_SYNC 0x58 /* Synchronization */
+#define FTM_OUTINIT 0x5C /* Initial State For Channels Output */
+#define FTM_OUTMASK 0x60 /* Output Mask */
+#define FTM_COMBINE 0x64 /* Function For Linked Channels */
+#define FTM_DEADTIME 0x68 /* Deadtime Insertion Control */
+#define FTM_EXTTRIG 0x6C /* FTM External Trigger */
+#define FTM_POL 0x70 /* Channels Polarity */
+#define FTM_FMS 0x74 /* Fault Mode Status */
+#define FTM_FILTER 0x78 /* Input Capture Filter Control */
+#define FTM_FLTCTRL 0x7C /* Fault Control */
+#define FTM_QDCTRL 0x80 /* Quadrature Decoder Control And Status */
+#define FTM_CONF 0x84 /* Configuration */
+#define FTM_FLTPOL 0x88 /* FTM Fault Input Polarity */
+#define FTM_SYNCONF 0x8C /* Synchronization Configuration */
+#define FTM_INVCTRL 0x90 /* FTM Inverting Control */
+#define FTM_SWOCTRL 0x94 /* FTM Software Output Control */
+#define FTM_PWMLOAD 0x98 /* FTM PWM Load */
+
+#define FTM_SC_CLK_MASK_SHIFT 3
+#define FTM_SC_CLK_MASK (3 << FTM_SC_CLK_MASK_SHIFT)
+#define FTM_SC_TOF 0x80
+#define FTM_SC_TOIE 0x40
+#define FTM_SC_CPWMS 0x20
+#define FTM_SC_CLKS 0x18
+#define FTM_SC_PS_1 0x0
+#define FTM_SC_PS_2 0x1
+#define FTM_SC_PS_4 0x2
+#define FTM_SC_PS_8 0x3
+#define FTM_SC_PS_16 0x4
+#define FTM_SC_PS_32 0x5
+#define FTM_SC_PS_64 0x6
+#define FTM_SC_PS_128 0x7
+#define FTM_SC_PS_MASK 0x7
+
+#define FTM_MODE_FAULTIE 0x80
+#define FTM_MODE_FAULTM 0x60
+#define FTM_MODE_CAPTEST 0x10
+#define FTM_MODE_PWMSYNC 0x8
+#define FTM_MODE_WPDIS 0x4
+#define FTM_MODE_INIT 0x2
+#define FTM_MODE_FTMEN 0x1
+
+/* NXP Errata: The PHAFLTREN and PHBFLTREN bits are tide to zero internally
+ * and these bits cannot be set. Flextimer cannot use Filter in
+ * Quadrature Decoder Mode.
+ * https://community.nxp.com/thread/467648#comment-1010319
+ */
+#define FTM_QDCTRL_PHAFLTREN 0x80
+#define FTM_QDCTRL_PHBFLTREN 0x40
+#define FTM_QDCTRL_PHAPOL 0x20
+#define FTM_QDCTRL_PHBPOL 0x10
+#define FTM_QDCTRL_QUADMODE 0x8
+#define FTM_QDCTRL_QUADDIR 0x4
+#define FTM_QDCTRL_TOFDIR 0x2
+#define FTM_QDCTRL_QUADEN 0x1
+
+#define FTM_FMS_FAULTF 0x80
+#define FTM_FMS_WPEN 0x40
+#define FTM_FMS_FAULTIN 0x10
+#define FTM_FMS_FAULTF3 0x8
+#define FTM_FMS_FAULTF2 0x4
+#define FTM_FMS_FAULTF1 0x2
+#define FTM_FMS_FAULTF0 0x1
+
+#define FTM_CSC_BASE 0xC
+#define FTM_CSC_MSB 0x20
+#define FTM_CSC_MSA 0x10
+#define FTM_CSC_ELSB 0x8
+#define FTM_CSC_ELSA 0x4
+#define FTM_CSC(_channel) (FTM_CSC_BASE + ((_channel) * 8))
+
+#define FTM_CV_BASE 0x10
+#define FTM_CV(_channel) (FTM_CV_BASE + ((_channel) * 8))
+
+#define FTM_PS_MAX 7
+
+#endif
diff --git a/include/linux/fsl/guts.h b/include/linux/fsl/guts.h
index 3efa3b861d44..fdb55ca47a4f 100644
--- a/include/linux/fsl/guts.h
+++ b/include/linux/fsl/guts.h
@@ -1,23 +1,20 @@
-/**
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
* Freecale 85xx and 86xx Global Utilties register set
*
* Authors: Jeff Brown
* Timur Tabi <timur@freescale.com>
*
* Copyright 2004,2007,2012 Freescale Semiconductor, Inc
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
*/
#ifndef __FSL_GUTS_H__
#define __FSL_GUTS_H__
#include <linux/types.h>
+#include <linux/io.h>
-/**
+/*
* Global Utility Registers.
*
* Not all registers defined in this structure are available on all chips, so
@@ -134,8 +131,6 @@ struct ccsr_guts {
u32 srds2cr1; /* 0x.0f44 - SerDes2 Control Register 0 */
} __attribute__ ((packed));
-u32 fsl_guts_get_svr(void);
-
/* Alternate function signal multiplex control */
#define MPC85xx_PMUXCR_QE(x) (0x8000 >> (x))
diff --git a/include/linux/fsl/mc.h b/include/linux/fsl/mc.h
new file mode 100644
index 000000000000..897d6211c163
--- /dev/null
+++ b/include/linux/fsl/mc.h
@@ -0,0 +1,681 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Freescale Management Complex (MC) bus public interface
+ *
+ * Copyright (C) 2014-2016 Freescale Semiconductor, Inc.
+ * Copyright 2019-2020 NXP
+ * Author: German Rivera <German.Rivera@freescale.com>
+ *
+ */
+#ifndef _FSL_MC_H_
+#define _FSL_MC_H_
+
+#include <linux/device.h>
+#include <linux/mod_devicetable.h>
+#include <linux/interrupt.h>
+#include <uapi/linux/fsl_mc.h>
+
+#define FSL_MC_VENDOR_FREESCALE 0x1957
+
+struct irq_domain;
+struct msi_domain_info;
+
+struct fsl_mc_device;
+struct fsl_mc_io;
+
+/**
+ * struct fsl_mc_driver - MC object device driver object
+ * @driver: Generic device driver
+ * @match_id_table: table of supported device matching Ids
+ * @probe: Function called when a device is added
+ * @remove: Function called when a device is removed
+ * @shutdown: Function called at shutdown time to quiesce the device
+ * @suspend: Function called when a device is stopped
+ * @resume: Function called when a device is resumed
+ * @driver_managed_dma: Device driver doesn't use kernel DMA API for DMA.
+ * For most device drivers, no need to care about this flag
+ * as long as all DMAs are handled through the kernel DMA API.
+ * For some special ones, for example VFIO drivers, they know
+ * how to manage the DMA themselves and set this flag so that
+ * the IOMMU layer will allow them to setup and manage their
+ * own I/O address space.
+ *
+ * Generic DPAA device driver object for device drivers that are registered
+ * with a DPRC bus. This structure is to be embedded in each device-specific
+ * driver structure.
+ */
+struct fsl_mc_driver {
+ struct device_driver driver;
+ const struct fsl_mc_device_id *match_id_table;
+ int (*probe)(struct fsl_mc_device *dev);
+ void (*remove)(struct fsl_mc_device *dev);
+ void (*shutdown)(struct fsl_mc_device *dev);
+ int (*suspend)(struct fsl_mc_device *dev, pm_message_t state);
+ int (*resume)(struct fsl_mc_device *dev);
+ bool driver_managed_dma;
+};
+
+#define to_fsl_mc_driver(_drv) \
+ container_of_const(_drv, struct fsl_mc_driver, driver)
+
+/**
+ * enum fsl_mc_pool_type - Types of allocatable MC bus resources
+ *
+ * Entries in these enum are used as indices in the array of resource
+ * pools of an fsl_mc_bus object.
+ */
+enum fsl_mc_pool_type {
+ FSL_MC_POOL_DPMCP = 0x0, /* corresponds to "dpmcp" in the MC */
+ FSL_MC_POOL_DPBP, /* corresponds to "dpbp" in the MC */
+ FSL_MC_POOL_DPCON, /* corresponds to "dpcon" in the MC */
+ FSL_MC_POOL_IRQ,
+
+ /*
+ * NOTE: New resource pool types must be added before this entry
+ */
+ FSL_MC_NUM_POOL_TYPES
+};
+
+/**
+ * struct fsl_mc_resource - MC generic resource
+ * @type: type of resource
+ * @id: unique MC resource Id within the resources of the same type
+ * @data: pointer to resource-specific data if the resource is currently
+ * allocated, or NULL if the resource is not currently allocated.
+ * @parent_pool: pointer to the parent resource pool from which this
+ * resource is allocated from.
+ * @node: Node in the free list of the corresponding resource pool
+ *
+ * NOTE: This structure is to be embedded as a field of specific
+ * MC resource structures.
+ */
+struct fsl_mc_resource {
+ enum fsl_mc_pool_type type;
+ s32 id;
+ void *data;
+ struct fsl_mc_resource_pool *parent_pool;
+ struct list_head node;
+};
+
+/**
+ * struct fsl_mc_device_irq - MC object device message-based interrupt
+ * @virq: Linux virtual interrupt number
+ * @mc_dev: MC object device that owns this interrupt
+ * @dev_irq_index: device-relative IRQ index
+ * @resource: MC generic resource associated with the interrupt
+ */
+struct fsl_mc_device_irq {
+ unsigned int virq;
+ struct fsl_mc_device *mc_dev;
+ u8 dev_irq_index;
+ struct fsl_mc_resource resource;
+};
+
+#define to_fsl_mc_irq(_mc_resource) \
+ container_of(_mc_resource, struct fsl_mc_device_irq, resource)
+
+/* Opened state - Indicates that an object is open by at least one owner */
+#define FSL_MC_OBJ_STATE_OPEN 0x00000001
+/* Plugged state - Indicates that the object is plugged */
+#define FSL_MC_OBJ_STATE_PLUGGED 0x00000002
+
+/**
+ * Shareability flag - Object flag indicating no memory shareability.
+ * the object generates memory accesses that are non coherent with other
+ * masters;
+ * user is responsible for proper memory handling through IOMMU configuration.
+ */
+#define FSL_MC_OBJ_FLAG_NO_MEM_SHAREABILITY 0x0001
+
+/**
+ * struct fsl_mc_obj_desc - Object descriptor
+ * @type: Type of object: NULL terminated string
+ * @id: ID of logical object resource
+ * @vendor: Object vendor identifier
+ * @ver_major: Major version number
+ * @ver_minor: Minor version number
+ * @irq_count: Number of interrupts supported by the object
+ * @region_count: Number of mappable regions supported by the object
+ * @state: Object state: combination of FSL_MC_OBJ_STATE_ states
+ * @label: Object label: NULL terminated string
+ * @flags: Object's flags
+ */
+struct fsl_mc_obj_desc {
+ char type[16];
+ int id;
+ u16 vendor;
+ u16 ver_major;
+ u16 ver_minor;
+ u8 irq_count;
+ u8 region_count;
+ u32 state;
+ char label[16];
+ u16 flags;
+};
+
+/**
+ * Bit masks for a MC object device (struct fsl_mc_device) flags
+ */
+#define FSL_MC_IS_DPRC 0x0001
+
+/* Region flags */
+/* Indicates that region can be mapped as cacheable */
+#define FSL_MC_REGION_CACHEABLE 0x00000001
+
+/* Indicates that region can be mapped as shareable */
+#define FSL_MC_REGION_SHAREABLE 0x00000002
+
+/**
+ * struct fsl_mc_device - MC object device object
+ * @dev: Linux driver model device object
+ * @dma_mask: Default DMA mask
+ * @flags: MC object device flags
+ * @icid: Isolation context ID for the device
+ * @mc_handle: MC handle for the corresponding MC object opened
+ * @mc_io: Pointer to MC IO object assigned to this device or
+ * NULL if none.
+ * @obj_desc: MC description of the DPAA device
+ * @regions: pointer to array of MMIO region entries
+ * @irqs: pointer to array of pointers to interrupts allocated to this device
+ * @resource: generic resource associated with this MC object device, if any.
+ * @driver_override: driver name to force a match; do not set directly,
+ * because core frees it; use driver_set_override() to
+ * set or clear it.
+ *
+ * Generic device object for MC object devices that are "attached" to a
+ * MC bus.
+ *
+ * NOTES:
+ * - For a non-DPRC object its icid is the same as its parent DPRC's icid.
+ * - The SMMU notifier callback gets invoked after device_add() has been
+ * called for an MC object device, but before the device-specific probe
+ * callback gets called.
+ * - DP_OBJ_DPRC objects are the only MC objects that have built-in MC
+ * portals. For all other MC objects, their device drivers are responsible for
+ * allocating MC portals for them by calling fsl_mc_portal_allocate().
+ * - Some types of MC objects (e.g., DP_OBJ_DPBP, DP_OBJ_DPCON) are
+ * treated as resources that can be allocated/deallocated from the
+ * corresponding resource pool in the object's parent DPRC, using the
+ * fsl_mc_object_allocate()/fsl_mc_object_free() functions. These MC objects
+ * are known as "allocatable" objects. For them, the corresponding
+ * fsl_mc_device's 'resource' points to the associated resource object.
+ * For MC objects that are not allocatable (e.g., DP_OBJ_DPRC, DP_OBJ_DPNI),
+ * 'resource' is NULL.
+ */
+struct fsl_mc_device {
+ struct device dev;
+ u64 dma_mask;
+ u16 flags;
+ u32 icid;
+ u16 mc_handle;
+ struct fsl_mc_io *mc_io;
+ struct fsl_mc_obj_desc obj_desc;
+ struct resource *regions;
+ struct fsl_mc_device_irq **irqs;
+ struct fsl_mc_resource *resource;
+ struct device_link *consumer_link;
+ const char *driver_override;
+};
+
+#define to_fsl_mc_device(_dev) \
+ container_of(_dev, struct fsl_mc_device, dev)
+
+struct mc_cmd_header {
+ u8 src_id;
+ u8 flags_hw;
+ u8 status;
+ u8 flags_sw;
+ __le16 token;
+ __le16 cmd_id;
+};
+
+enum mc_cmd_status {
+ MC_CMD_STATUS_OK = 0x0, /* Completed successfully */
+ MC_CMD_STATUS_READY = 0x1, /* Ready to be processed */
+ MC_CMD_STATUS_AUTH_ERR = 0x3, /* Authentication error */
+ MC_CMD_STATUS_NO_PRIVILEGE = 0x4, /* No privilege */
+ MC_CMD_STATUS_DMA_ERR = 0x5, /* DMA or I/O error */
+ MC_CMD_STATUS_CONFIG_ERR = 0x6, /* Configuration error */
+ MC_CMD_STATUS_TIMEOUT = 0x7, /* Operation timed out */
+ MC_CMD_STATUS_NO_RESOURCE = 0x8, /* No resources */
+ MC_CMD_STATUS_NO_MEMORY = 0x9, /* No memory available */
+ MC_CMD_STATUS_BUSY = 0xA, /* Device is busy */
+ MC_CMD_STATUS_UNSUPPORTED_OP = 0xB, /* Unsupported operation */
+ MC_CMD_STATUS_INVALID_STATE = 0xC /* Invalid state */
+};
+
+/*
+ * MC command flags
+ */
+
+/* High priority flag */
+#define MC_CMD_FLAG_PRI 0x80
+/* Command completion flag */
+#define MC_CMD_FLAG_INTR_DIS 0x01
+
+static inline __le64 mc_encode_cmd_header(u16 cmd_id,
+ u32 cmd_flags,
+ u16 token)
+{
+ __le64 header = 0;
+ struct mc_cmd_header *hdr = (struct mc_cmd_header *)&header;
+
+ hdr->cmd_id = cpu_to_le16(cmd_id);
+ hdr->token = cpu_to_le16(token);
+ hdr->status = MC_CMD_STATUS_READY;
+ if (cmd_flags & MC_CMD_FLAG_PRI)
+ hdr->flags_hw = MC_CMD_FLAG_PRI;
+ if (cmd_flags & MC_CMD_FLAG_INTR_DIS)
+ hdr->flags_sw = MC_CMD_FLAG_INTR_DIS;
+
+ return header;
+}
+
+static inline u16 mc_cmd_hdr_read_token(struct fsl_mc_command *cmd)
+{
+ struct mc_cmd_header *hdr = (struct mc_cmd_header *)&cmd->header;
+ u16 token = le16_to_cpu(hdr->token);
+
+ return token;
+}
+
+struct mc_rsp_create {
+ __le32 object_id;
+};
+
+struct mc_rsp_api_ver {
+ __le16 major_ver;
+ __le16 minor_ver;
+};
+
+static inline u32 mc_cmd_read_object_id(struct fsl_mc_command *cmd)
+{
+ struct mc_rsp_create *rsp_params;
+
+ rsp_params = (struct mc_rsp_create *)cmd->params;
+ return le32_to_cpu(rsp_params->object_id);
+}
+
+static inline void mc_cmd_read_api_version(struct fsl_mc_command *cmd,
+ u16 *major_ver,
+ u16 *minor_ver)
+{
+ struct mc_rsp_api_ver *rsp_params;
+
+ rsp_params = (struct mc_rsp_api_ver *)cmd->params;
+ *major_ver = le16_to_cpu(rsp_params->major_ver);
+ *minor_ver = le16_to_cpu(rsp_params->minor_ver);
+}
+
+/**
+ * Bit masks for a MC I/O object (struct fsl_mc_io) flags
+ */
+#define FSL_MC_IO_ATOMIC_CONTEXT_PORTAL 0x0001
+
+/**
+ * struct fsl_mc_io - MC I/O object to be passed-in to mc_send_command()
+ * @dev: device associated with this Mc I/O object
+ * @flags: flags for mc_send_command()
+ * @portal_size: MC command portal size in bytes
+ * @portal_phys_addr: MC command portal physical address
+ * @portal_virt_addr: MC command portal virtual address
+ * @dpmcp_dev: pointer to the DPMCP device associated with the MC portal.
+ *
+ * Fields are only meaningful if the FSL_MC_IO_ATOMIC_CONTEXT_PORTAL flag is not
+ * set:
+ * @mutex: Mutex to serialize mc_send_command() calls that use the same MC
+ * portal, if the fsl_mc_io object was created with the
+ * FSL_MC_IO_ATOMIC_CONTEXT_PORTAL flag off. mc_send_command() calls for this
+ * fsl_mc_io object must be made only from non-atomic context.
+ *
+ * Fields are only meaningful if the FSL_MC_IO_ATOMIC_CONTEXT_PORTAL flag is
+ * set:
+ * @spinlock: Spinlock to serialize mc_send_command() calls that use the same MC
+ * portal, if the fsl_mc_io object was created with the
+ * FSL_MC_IO_ATOMIC_CONTEXT_PORTAL flag on. mc_send_command() calls for this
+ * fsl_mc_io object can be made from atomic or non-atomic context.
+ */
+struct fsl_mc_io {
+ struct device *dev;
+ u16 flags;
+ u32 portal_size;
+ phys_addr_t portal_phys_addr;
+ void __iomem *portal_virt_addr;
+ struct fsl_mc_device *dpmcp_dev;
+ union {
+ /*
+ * This field is only meaningful if the
+ * FSL_MC_IO_ATOMIC_CONTEXT_PORTAL flag is not set
+ */
+ struct mutex mutex; /* serializes mc_send_command() */
+
+ /*
+ * This field is only meaningful if the
+ * FSL_MC_IO_ATOMIC_CONTEXT_PORTAL flag is set
+ */
+ raw_spinlock_t spinlock; /* serializes mc_send_command() */
+ };
+};
+
+int mc_send_command(struct fsl_mc_io *mc_io, struct fsl_mc_command *cmd);
+
+#ifdef CONFIG_FSL_MC_BUS
+#define dev_is_fsl_mc(_dev) ((_dev)->bus == &fsl_mc_bus_type)
+#else
+/* If fsl-mc bus is not present device cannot belong to fsl-mc bus */
+#define dev_is_fsl_mc(_dev) (0)
+#endif
+
+/* Macro to check if a device is a container device */
+#define fsl_mc_is_cont_dev(_dev) (to_fsl_mc_device(_dev)->flags & \
+ FSL_MC_IS_DPRC)
+
+/* Macro to get the container device of a MC device */
+#define fsl_mc_cont_dev(_dev) (fsl_mc_is_cont_dev(_dev) ? \
+ (_dev) : (_dev)->parent)
+
+/*
+ * module_fsl_mc_driver() - Helper macro for drivers that don't do
+ * anything special in module init/exit. This eliminates a lot of
+ * boilerplate. Each module may only use this macro once, and
+ * calling it replaces module_init() and module_exit()
+ */
+#define module_fsl_mc_driver(__fsl_mc_driver) \
+ module_driver(__fsl_mc_driver, fsl_mc_driver_register, \
+ fsl_mc_driver_unregister)
+
+/*
+ * Macro to avoid include chaining to get THIS_MODULE
+ */
+#define fsl_mc_driver_register(drv) \
+ __fsl_mc_driver_register(drv, THIS_MODULE)
+
+int __must_check __fsl_mc_driver_register(struct fsl_mc_driver *fsl_mc_driver,
+ struct module *owner);
+
+void fsl_mc_driver_unregister(struct fsl_mc_driver *driver);
+
+/**
+ * struct fsl_mc_version
+ * @major: Major version number: incremented on API compatibility changes
+ * @minor: Minor version number: incremented on API additions (that are
+ * backward compatible); reset when major version is incremented
+ * @revision: Internal revision number: incremented on implementation changes
+ * and/or bug fixes that have no impact on API
+ */
+struct fsl_mc_version {
+ u32 major;
+ u32 minor;
+ u32 revision;
+};
+
+struct fsl_mc_version *fsl_mc_get_version(void);
+
+int __must_check fsl_mc_portal_allocate(struct fsl_mc_device *mc_dev,
+ u16 mc_io_flags,
+ struct fsl_mc_io **new_mc_io);
+
+void fsl_mc_portal_free(struct fsl_mc_io *mc_io);
+
+int __must_check fsl_mc_object_allocate(struct fsl_mc_device *mc_dev,
+ enum fsl_mc_pool_type pool_type,
+ struct fsl_mc_device **new_mc_adev);
+
+void fsl_mc_object_free(struct fsl_mc_device *mc_adev);
+
+struct irq_domain *fsl_mc_msi_create_irq_domain(struct fwnode_handle *fwnode,
+ struct msi_domain_info *info,
+ struct irq_domain *parent);
+
+int __must_check fsl_mc_allocate_irqs(struct fsl_mc_device *mc_dev);
+
+void fsl_mc_free_irqs(struct fsl_mc_device *mc_dev);
+
+struct fsl_mc_device *fsl_mc_get_endpoint(struct fsl_mc_device *mc_dev,
+ u16 if_id);
+
+extern const struct bus_type fsl_mc_bus_type;
+
+extern const struct device_type fsl_mc_bus_dprc_type;
+extern const struct device_type fsl_mc_bus_dpni_type;
+extern const struct device_type fsl_mc_bus_dpio_type;
+extern const struct device_type fsl_mc_bus_dpsw_type;
+extern const struct device_type fsl_mc_bus_dpbp_type;
+extern const struct device_type fsl_mc_bus_dpcon_type;
+extern const struct device_type fsl_mc_bus_dpmcp_type;
+extern const struct device_type fsl_mc_bus_dpmac_type;
+extern const struct device_type fsl_mc_bus_dprtc_type;
+extern const struct device_type fsl_mc_bus_dpseci_type;
+extern const struct device_type fsl_mc_bus_dpdmux_type;
+extern const struct device_type fsl_mc_bus_dpdcei_type;
+extern const struct device_type fsl_mc_bus_dpaiop_type;
+extern const struct device_type fsl_mc_bus_dpci_type;
+extern const struct device_type fsl_mc_bus_dpdmai_type;
+
+static inline bool is_fsl_mc_bus_dprc(const struct fsl_mc_device *mc_dev)
+{
+ return mc_dev->dev.type == &fsl_mc_bus_dprc_type;
+}
+
+static inline bool is_fsl_mc_bus_dpni(const struct fsl_mc_device *mc_dev)
+{
+ return mc_dev->dev.type == &fsl_mc_bus_dpni_type;
+}
+
+static inline bool is_fsl_mc_bus_dpio(const struct fsl_mc_device *mc_dev)
+{
+ return mc_dev->dev.type == &fsl_mc_bus_dpio_type;
+}
+
+static inline bool is_fsl_mc_bus_dpsw(const struct fsl_mc_device *mc_dev)
+{
+ return mc_dev->dev.type == &fsl_mc_bus_dpsw_type;
+}
+
+static inline bool is_fsl_mc_bus_dpdmux(const struct fsl_mc_device *mc_dev)
+{
+ return mc_dev->dev.type == &fsl_mc_bus_dpdmux_type;
+}
+
+static inline bool is_fsl_mc_bus_dpbp(const struct fsl_mc_device *mc_dev)
+{
+ return mc_dev->dev.type == &fsl_mc_bus_dpbp_type;
+}
+
+static inline bool is_fsl_mc_bus_dpcon(const struct fsl_mc_device *mc_dev)
+{
+ return mc_dev->dev.type == &fsl_mc_bus_dpcon_type;
+}
+
+static inline bool is_fsl_mc_bus_dpmcp(const struct fsl_mc_device *mc_dev)
+{
+ return mc_dev->dev.type == &fsl_mc_bus_dpmcp_type;
+}
+
+static inline bool is_fsl_mc_bus_dpmac(const struct fsl_mc_device *mc_dev)
+{
+ return mc_dev->dev.type == &fsl_mc_bus_dpmac_type;
+}
+
+static inline bool is_fsl_mc_bus_dprtc(const struct fsl_mc_device *mc_dev)
+{
+ return mc_dev->dev.type == &fsl_mc_bus_dprtc_type;
+}
+
+static inline bool is_fsl_mc_bus_dpseci(const struct fsl_mc_device *mc_dev)
+{
+ return mc_dev->dev.type == &fsl_mc_bus_dpseci_type;
+}
+
+static inline bool is_fsl_mc_bus_dpdcei(const struct fsl_mc_device *mc_dev)
+{
+ return mc_dev->dev.type == &fsl_mc_bus_dpdcei_type;
+}
+
+static inline bool is_fsl_mc_bus_dpaiop(const struct fsl_mc_device *mc_dev)
+{
+ return mc_dev->dev.type == &fsl_mc_bus_dpaiop_type;
+}
+
+static inline bool is_fsl_mc_bus_dpci(const struct fsl_mc_device *mc_dev)
+{
+ return mc_dev->dev.type == &fsl_mc_bus_dpci_type;
+}
+
+static inline bool is_fsl_mc_bus_dpdmai(const struct fsl_mc_device *mc_dev)
+{
+ return mc_dev->dev.type == &fsl_mc_bus_dpdmai_type;
+}
+
+#define DPRC_RESET_OPTION_NON_RECURSIVE 0x00000001
+int dprc_reset_container(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ int child_container_id,
+ u32 options);
+
+int dprc_scan_container(struct fsl_mc_device *mc_bus_dev,
+ bool alloc_interrupts);
+
+void dprc_remove_devices(struct fsl_mc_device *mc_bus_dev,
+ struct fsl_mc_obj_desc *obj_desc_array,
+ int num_child_objects_in_mc);
+
+int dprc_cleanup(struct fsl_mc_device *mc_dev);
+
+int dprc_setup(struct fsl_mc_device *mc_dev);
+
+/**
+ * Maximum number of total IRQs that can be pre-allocated for an MC bus'
+ * IRQ pool
+ */
+#define FSL_MC_IRQ_POOL_MAX_TOTAL_IRQS 256
+
+int fsl_mc_populate_irq_pool(struct fsl_mc_device *mc_bus_dev,
+ unsigned int irq_count);
+
+void fsl_mc_cleanup_irq_pool(struct fsl_mc_device *mc_bus_dev);
+
+/*
+ * Data Path Buffer Pool (DPBP) API
+ * Contains initialization APIs and runtime control APIs for DPBP
+ */
+
+int dpbp_open(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ int dpbp_id,
+ u16 *token);
+
+int dpbp_close(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token);
+
+int dpbp_enable(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token);
+
+int dpbp_disable(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token);
+
+int dpbp_reset(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token);
+
+/**
+ * struct dpbp_attr - Structure representing DPBP attributes
+ * @id: DPBP object ID
+ * @bpid: Hardware buffer pool ID; should be used as an argument in
+ * acquire/release operations on buffers
+ */
+struct dpbp_attr {
+ int id;
+ u16 bpid;
+};
+
+int dpbp_get_attributes(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ struct dpbp_attr *attr);
+
+/* Data Path Concentrator (DPCON) API
+ * Contains initialization APIs and runtime control APIs for DPCON
+ */
+
+/**
+ * Use it to disable notifications; see dpcon_set_notification()
+ */
+#define DPCON_INVALID_DPIO_ID (int)(-1)
+
+int dpcon_open(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ int dpcon_id,
+ u16 *token);
+
+int dpcon_close(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token);
+
+int dpcon_enable(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token);
+
+int dpcon_disable(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token);
+
+int dpcon_reset(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token);
+
+int fsl_mc_obj_open(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ int obj_id,
+ char *obj_type,
+ u16 *token);
+
+int fsl_mc_obj_close(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token);
+
+int fsl_mc_obj_reset(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token);
+
+/**
+ * struct dpcon_attr - Structure representing DPCON attributes
+ * @id: DPCON object ID
+ * @qbman_ch_id: Channel ID to be used by dequeue operation
+ * @num_priorities: Number of priorities for the DPCON channel (1-8)
+ */
+struct dpcon_attr {
+ int id;
+ u16 qbman_ch_id;
+ u8 num_priorities;
+};
+
+int dpcon_get_attributes(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ struct dpcon_attr *attr);
+
+/**
+ * struct dpcon_notification_cfg - Structure representing notification params
+ * @dpio_id: DPIO object ID; must be configured with a notification channel;
+ * to disable notifications set it to 'DPCON_INVALID_DPIO_ID';
+ * @priority: Priority selection within the DPIO channel; valid values
+ * are 0-7, depending on the number of priorities in that channel
+ * @user_ctx: User context value provided with each CDAN message
+ */
+struct dpcon_notification_cfg {
+ int dpio_id;
+ u8 priority;
+ u64 user_ctx;
+};
+
+int dpcon_set_notification(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ struct dpcon_notification_cfg *cfg);
+
+#endif /* _FSL_MC_H_ */
diff --git a/include/linux/fsl/netc_global.h b/include/linux/fsl/netc_global.h
new file mode 100644
index 000000000000..fdecca8c90f0
--- /dev/null
+++ b/include/linux/fsl/netc_global.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
+/* Copyright 2024 NXP
+ */
+#ifndef __NETC_GLOBAL_H
+#define __NETC_GLOBAL_H
+
+#include <linux/io.h>
+
+static inline u32 netc_read(void __iomem *reg)
+{
+ return ioread32(reg);
+}
+
+static inline void netc_write(void __iomem *reg, u32 val)
+{
+ iowrite32(val, reg);
+}
+
+#endif
diff --git a/include/linux/fsl/ntmp.h b/include/linux/fsl/ntmp.h
new file mode 100644
index 000000000000..916dc4fe7de3
--- /dev/null
+++ b/include/linux/fsl/ntmp.h
@@ -0,0 +1,121 @@
+/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
+/* Copyright 2025 NXP */
+#ifndef __NETC_NTMP_H
+#define __NETC_NTMP_H
+
+#include <linux/bitops.h>
+#include <linux/if_ether.h>
+
+struct maft_keye_data {
+ u8 mac_addr[ETH_ALEN];
+ __le16 resv;
+};
+
+struct maft_cfge_data {
+ __le16 si_bitmap;
+ __le16 resv;
+};
+
+struct netc_cbdr_regs {
+ void __iomem *pir;
+ void __iomem *cir;
+ void __iomem *mr;
+
+ void __iomem *bar0;
+ void __iomem *bar1;
+ void __iomem *lenr;
+};
+
+struct netc_tbl_vers {
+ u8 maft_ver;
+ u8 rsst_ver;
+};
+
+struct netc_cbdr {
+ struct device *dev;
+ struct netc_cbdr_regs regs;
+
+ int bd_num;
+ int next_to_use;
+ int next_to_clean;
+
+ int dma_size;
+ void *addr_base;
+ void *addr_base_align;
+ dma_addr_t dma_base;
+ dma_addr_t dma_base_align;
+
+ /* Serialize the order of command BD ring */
+ spinlock_t ring_lock;
+};
+
+struct ntmp_user {
+ int cbdr_num; /* number of control BD ring */
+ struct device *dev;
+ struct netc_cbdr *ring;
+ struct netc_tbl_vers tbl;
+};
+
+struct maft_entry_data {
+ struct maft_keye_data keye;
+ struct maft_cfge_data cfge;
+};
+
+#if IS_ENABLED(CONFIG_NXP_NETC_LIB)
+int ntmp_init_cbdr(struct netc_cbdr *cbdr, struct device *dev,
+ const struct netc_cbdr_regs *regs);
+void ntmp_free_cbdr(struct netc_cbdr *cbdr);
+
+/* NTMP APIs */
+int ntmp_maft_add_entry(struct ntmp_user *user, u32 entry_id,
+ struct maft_entry_data *maft);
+int ntmp_maft_query_entry(struct ntmp_user *user, u32 entry_id,
+ struct maft_entry_data *maft);
+int ntmp_maft_delete_entry(struct ntmp_user *user, u32 entry_id);
+int ntmp_rsst_update_entry(struct ntmp_user *user, const u32 *table,
+ int count);
+int ntmp_rsst_query_entry(struct ntmp_user *user,
+ u32 *table, int count);
+#else
+static inline int ntmp_init_cbdr(struct netc_cbdr *cbdr, struct device *dev,
+ const struct netc_cbdr_regs *regs)
+{
+ return 0;
+}
+
+static inline void ntmp_free_cbdr(struct netc_cbdr *cbdr)
+{
+}
+
+static inline int ntmp_maft_add_entry(struct ntmp_user *user, u32 entry_id,
+ struct maft_entry_data *maft)
+{
+ return 0;
+}
+
+static inline int ntmp_maft_query_entry(struct ntmp_user *user, u32 entry_id,
+ struct maft_entry_data *maft)
+{
+ return 0;
+}
+
+static inline int ntmp_maft_delete_entry(struct ntmp_user *user, u32 entry_id)
+{
+ return 0;
+}
+
+static inline int ntmp_rsst_update_entry(struct ntmp_user *user,
+ const u32 *table, int count)
+{
+ return 0;
+}
+
+static inline int ntmp_rsst_query_entry(struct ntmp_user *user,
+ u32 *table, int count)
+{
+ return 0;
+}
+
+#endif
+
+#endif
diff --git a/include/linux/fsl/ptp_qoriq.h b/include/linux/fsl/ptp_qoriq.h
new file mode 100644
index 000000000000..3601e25779ba
--- /dev/null
+++ b/include/linux/fsl/ptp_qoriq.h
@@ -0,0 +1,198 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2010 OMICRON electronics GmbH
+ * Copyright 2018 NXP
+ */
+#ifndef __PTP_QORIQ_H__
+#define __PTP_QORIQ_H__
+
+#include <linux/io.h>
+#include <linux/interrupt.h>
+#include <linux/ptp_clock_kernel.h>
+
+/*
+ * qoriq ptp registers
+ */
+struct ctrl_regs {
+ u32 tmr_ctrl; /* Timer control register */
+ u32 tmr_tevent; /* Timestamp event register */
+ u32 tmr_temask; /* Timer event mask register */
+ u32 tmr_pevent; /* Timestamp event register */
+ u32 tmr_pemask; /* Timer event mask register */
+ u32 tmr_stat; /* Timestamp status register */
+ u32 tmr_cnt_h; /* Timer counter high register */
+ u32 tmr_cnt_l; /* Timer counter low register */
+ u32 tmr_add; /* Timer drift compensation addend register */
+ u32 tmr_acc; /* Timer accumulator register */
+ u32 tmr_prsc; /* Timer prescale */
+ u8 res1[4];
+ u32 tmroff_h; /* Timer offset high */
+ u32 tmroff_l; /* Timer offset low */
+};
+
+struct alarm_regs {
+ u32 tmr_alarm1_h; /* Timer alarm 1 high register */
+ u32 tmr_alarm1_l; /* Timer alarm 1 high register */
+ u32 tmr_alarm2_h; /* Timer alarm 2 high register */
+ u32 tmr_alarm2_l; /* Timer alarm 2 high register */
+};
+
+struct fiper_regs {
+ u32 tmr_fiper1; /* Timer fixed period interval */
+ u32 tmr_fiper2; /* Timer fixed period interval */
+ u32 tmr_fiper3; /* Timer fixed period interval */
+};
+
+struct etts_regs {
+ u32 tmr_etts1_h; /* Timestamp of general purpose external trigger */
+ u32 tmr_etts1_l; /* Timestamp of general purpose external trigger */
+ u32 tmr_etts2_h; /* Timestamp of general purpose external trigger */
+ u32 tmr_etts2_l; /* Timestamp of general purpose external trigger */
+};
+
+struct ptp_qoriq_registers {
+ struct ctrl_regs __iomem *ctrl_regs;
+ struct alarm_regs __iomem *alarm_regs;
+ struct fiper_regs __iomem *fiper_regs;
+ struct etts_regs __iomem *etts_regs;
+};
+
+/* Offset definitions for the four register groups */
+#define ETSEC_CTRL_REGS_OFFSET 0x0
+#define ETSEC_ALARM_REGS_OFFSET 0x40
+#define ETSEC_FIPER_REGS_OFFSET 0x80
+#define ETSEC_ETTS_REGS_OFFSET 0xa0
+
+#define CTRL_REGS_OFFSET 0x80
+#define ALARM_REGS_OFFSET 0xb8
+#define FIPER_REGS_OFFSET 0xd0
+#define ETTS_REGS_OFFSET 0xe0
+
+
+/* Bit definitions for the TMR_CTRL register */
+#define ALM1P (1<<31) /* Alarm1 output polarity */
+#define ALM2P (1<<30) /* Alarm2 output polarity */
+#define FIPERST (1<<28) /* FIPER start indication */
+#define PP1L (1<<27) /* Fiper1 pulse loopback mode enabled. */
+#define PP2L (1<<26) /* Fiper2 pulse loopback mode enabled. */
+#define TCLK_PERIOD_SHIFT (16) /* 1588 timer reference clock period. */
+#define TCLK_PERIOD_MASK (0x3ff)
+#define RTPE (1<<15) /* Record Tx Timestamp to PAL Enable. */
+#define FRD (1<<14) /* FIPER Realignment Disable */
+#define ESFDP (1<<11) /* External Tx/Rx SFD Polarity. */
+#define ESFDE (1<<10) /* External Tx/Rx SFD Enable. */
+#define ETEP2 (1<<9) /* External trigger 2 edge polarity */
+#define ETEP1 (1<<8) /* External trigger 1 edge polarity */
+#define COPH (1<<7) /* Generated clock output phase. */
+#define CIPH (1<<6) /* External oscillator input clock phase */
+#define TMSR (1<<5) /* Timer soft reset. */
+#define BYP (1<<3) /* Bypass drift compensated clock */
+#define TE (1<<2) /* 1588 timer enable. */
+#define CKSEL_SHIFT (0) /* 1588 Timer reference clock source */
+#define CKSEL_MASK (0x3)
+
+/* Bit definitions for the TMR_TEVENT register */
+#define ETS2 (1<<25) /* External trigger 2 timestamp sampled */
+#define ETS1 (1<<24) /* External trigger 1 timestamp sampled */
+#define ALM2 (1<<17) /* Current time = alarm time register 2 */
+#define ALM1 (1<<16) /* Current time = alarm time register 1 */
+#define PP1 (1<<7) /* periodic pulse generated on FIPER1 */
+#define PP2 (1<<6) /* periodic pulse generated on FIPER2 */
+#define PP3 (1<<5) /* periodic pulse generated on FIPER3 */
+
+/* Bit definitions for the TMR_TEMASK register */
+#define ETS2EN (1<<25) /* External trigger 2 timestamp enable */
+#define ETS1EN (1<<24) /* External trigger 1 timestamp enable */
+#define ALM2EN (1<<17) /* Timer ALM2 event enable */
+#define ALM1EN (1<<16) /* Timer ALM1 event enable */
+#define PP1EN (1<<7) /* Periodic pulse event 1 enable */
+#define PP2EN (1<<6) /* Periodic pulse event 2 enable */
+
+/* Bit definitions for the TMR_PEVENT register */
+#define TXP2 (1<<9) /* PTP transmitted timestamp im TXTS2 */
+#define TXP1 (1<<8) /* PTP transmitted timestamp in TXTS1 */
+#define RXP (1<<0) /* PTP frame has been received */
+
+/* Bit definitions for the TMR_PEMASK register */
+#define TXP2EN (1<<9) /* Transmit PTP packet event 2 enable */
+#define TXP1EN (1<<8) /* Transmit PTP packet event 1 enable */
+#define RXPEN (1<<0) /* Receive PTP packet event enable */
+
+/* Bit definitions for the TMR_STAT register */
+#define STAT_VEC_SHIFT (0) /* Timer general purpose status vector */
+#define STAT_VEC_MASK (0x3f)
+#define ETS1_VLD (1<<24)
+#define ETS2_VLD (1<<25)
+
+/* Bit definitions for the TMR_PRSC register */
+#define PRSC_OCK_SHIFT (0) /* Output clock division/prescale factor. */
+#define PRSC_OCK_MASK (0xffff)
+
+
+#define DRIVER "ptp_qoriq"
+#define N_EXT_TS 2
+
+#define DEFAULT_CKSEL 1
+#define DEFAULT_TMR_PRSC 2
+#define DEFAULT_FIPER1_PERIOD 1000000000
+#define DEFAULT_FIPER2_PERIOD 1000000000
+#define DEFAULT_FIPER3_PERIOD 1000000000
+
+struct ptp_qoriq {
+ void __iomem *base;
+ struct ptp_qoriq_registers regs;
+ spinlock_t lock; /* protects regs */
+ struct ptp_clock *clock;
+ struct ptp_clock_info caps;
+ struct resource *rsrc;
+ struct device *dev;
+ bool extts_fifo_support;
+ bool fiper3_support;
+ bool etsec;
+ int irq;
+ int phc_index;
+ u32 tclk_period; /* nanoseconds */
+ u32 tmr_prsc;
+ u32 tmr_add;
+ u32 cksel;
+ u32 tmr_fiper1;
+ u32 tmr_fiper2;
+ u32 tmr_fiper3;
+ u32 (*read)(unsigned __iomem *addr);
+ void (*write)(unsigned __iomem *addr, u32 val);
+};
+
+static inline u32 qoriq_read_be(unsigned __iomem *addr)
+{
+ return ioread32be(addr);
+}
+
+static inline void qoriq_write_be(unsigned __iomem *addr, u32 val)
+{
+ iowrite32be(val, addr);
+}
+
+static inline u32 qoriq_read_le(unsigned __iomem *addr)
+{
+ return ioread32(addr);
+}
+
+static inline void qoriq_write_le(unsigned __iomem *addr, u32 val)
+{
+ iowrite32(val, addr);
+}
+
+irqreturn_t ptp_qoriq_isr(int irq, void *priv);
+int ptp_qoriq_init(struct ptp_qoriq *ptp_qoriq, void __iomem *base,
+ const struct ptp_clock_info *caps);
+void ptp_qoriq_free(struct ptp_qoriq *ptp_qoriq);
+int ptp_qoriq_adjfine(struct ptp_clock_info *ptp, long scaled_ppm);
+int ptp_qoriq_adjtime(struct ptp_clock_info *ptp, s64 delta);
+int ptp_qoriq_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts);
+int ptp_qoriq_settime(struct ptp_clock_info *ptp,
+ const struct timespec64 *ts);
+int ptp_qoriq_enable(struct ptp_clock_info *ptp,
+ struct ptp_clock_request *rq, int on);
+int extts_clean_up(struct ptp_qoriq *ptp_qoriq, int index, bool update_event);
+
+#endif
diff --git a/include/linux/fsl_devices.h b/include/linux/fsl_devices.h
index 60cef8227534..49f20c2f99bf 100644
--- a/include/linux/fsl_devices.h
+++ b/include/linux/fsl_devices.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* include/linux/fsl_devices.h
*
@@ -7,11 +8,6 @@
* Maintainer: Kumar Gala <galak@kernel.crashing.org>
*
* Copyright 2004,2012 Freescale Semiconductor, Inc
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
*/
#ifndef _FSL_DEVICE_H_
@@ -98,10 +94,12 @@ struct fsl_usb2_platform_data {
unsigned suspended:1;
unsigned already_suspended:1;
- unsigned has_fsl_erratum_a007792:1;
- unsigned has_fsl_erratum_a005275:1;
+ unsigned has_fsl_erratum_a007792:1;
+ unsigned has_fsl_erratum_14:1;
+ unsigned has_fsl_erratum_a005275:1;
unsigned has_fsl_erratum_a005697:1;
- unsigned check_phy_clk_valid:1;
+ unsigned has_fsl_erratum_a006918:1;
+ unsigned check_phy_clk_valid:1;
/* register save area for suspend/resume */
u32 pm_command;
@@ -120,7 +118,6 @@ struct fsl_usb2_platform_data {
#define FSL_USB2_PORT0_ENABLED 0x00000001
#define FSL_USB2_PORT1_ENABLED 0x00000002
-#define FLS_USB2_WORKAROUND_ENGCM09152 (1 << 0)
struct spi_device;
diff --git a/include/linux/fsl_ifc.h b/include/linux/fsl_ifc.h
index c332f0a45607..0af96a45e903 100644
--- a/include/linux/fsl_ifc.h
+++ b/include/linux/fsl_ifc.h
@@ -1,22 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/* Freescale Integrated Flash Controller
*
* Copyright 2011 Freescale Semiconductor, Inc
*
* Author: Dipen Dudhat <dipen.dudhat@freescale.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#ifndef __ASM_FSL_IFC_H
@@ -274,6 +261,8 @@
*/
/* Auto Boot Mode */
#define IFC_NAND_NCFGR_BOOT 0x80000000
+/* SRAM Initialization */
+#define IFC_NAND_NCFGR_SRAM_INIT_EN 0x20000000
/* Addressing Mode-ROW0+n/COL0 */
#define IFC_NAND_NCFGR_ADDR_MODE_RC0 0x00000000
/* Addressing Mode-ROW0+n/COL0+n */
@@ -734,11 +723,7 @@ struct fsl_ifc_nand {
u32 res19[0x10];
__be32 nand_fsr;
u32 res20;
- /* The V1 nand_eccstat is actually 4 words that overlaps the
- * V2 nand_eccstat.
- */
- __be32 v1_nand_eccstat[2];
- __be32 v2_nand_eccstat[6];
+ __be32 nand_eccstat[8];
u32 res21[0x1c];
__be32 nanndcr;
u32 res22[0x2];
diff --git a/include/linux/fsldma.h b/include/linux/fsldma.h
index b213c02963c9..c523d716ebd2 100644
--- a/include/linux/fsldma.h
+++ b/include/linux/fsldma.h
@@ -1,8 +1,5 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
- * This is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
*/
#ifndef FSL_DMA_H
diff --git a/include/linux/fsnotify.h b/include/linux/fsnotify.h
index b78aa7ac77ce..28a9cb13fbfa 100644
--- a/include/linux/fsnotify.h
+++ b/include/linux/fsnotify.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_FS_NOTIFY_H
#define _LINUX_FS_NOTIFY_H
@@ -16,84 +17,265 @@
#include <linux/slab.h>
#include <linux/bug.h>
+/* Are there any inode/mount/sb objects watched with priority prio or above? */
+static inline bool fsnotify_sb_has_priority_watchers(struct super_block *sb,
+ int prio)
+{
+ struct fsnotify_sb_info *sbinfo = fsnotify_sb_info(sb);
+
+ /* Were any marks ever added to any object on this sb? */
+ if (!sbinfo)
+ return false;
+
+ return atomic_long_read(&sbinfo->watched_objects[prio]);
+}
+
+/* Are there any inode/mount/sb objects that are being watched at all? */
+static inline bool fsnotify_sb_has_watchers(struct super_block *sb)
+{
+ return fsnotify_sb_has_priority_watchers(sb, 0);
+}
+
+/*
+ * Notify this @dir inode about a change in a child directory entry.
+ * The directory entry may have turned positive or negative or its inode may
+ * have changed (i.e. renamed over).
+ *
+ * Unlike fsnotify_parent(), the event will be reported regardless of the
+ * FS_EVENT_ON_CHILD mask on the parent inode and will not be reported if only
+ * the child is interested and not the parent.
+ */
+static inline int fsnotify_name(__u32 mask, const void *data, int data_type,
+ struct inode *dir, const struct qstr *name,
+ u32 cookie)
+{
+ if (!fsnotify_sb_has_watchers(dir->i_sb))
+ return 0;
+
+ return fsnotify(mask, data, data_type, dir, name, NULL, cookie);
+}
+
+static inline void fsnotify_dirent(struct inode *dir, struct dentry *dentry,
+ __u32 mask)
+{
+ fsnotify_name(mask, dentry, FSNOTIFY_EVENT_DENTRY, dir, &dentry->d_name, 0);
+}
+
+static inline void fsnotify_inode(struct inode *inode, __u32 mask)
+{
+ if (!fsnotify_sb_has_watchers(inode->i_sb))
+ return;
+
+ if (S_ISDIR(inode->i_mode))
+ mask |= FS_ISDIR;
+
+ fsnotify(mask, inode, FSNOTIFY_EVENT_INODE, NULL, NULL, inode, 0);
+}
+
/* Notify this dentry's parent about a child's events. */
-static inline int fsnotify_parent(const struct path *path, struct dentry *dentry, __u32 mask)
+static inline int fsnotify_parent(struct dentry *dentry, __u32 mask,
+ const void *data, int data_type)
+{
+ struct inode *inode = d_inode(dentry);
+
+ if (!fsnotify_sb_has_watchers(inode->i_sb))
+ return 0;
+
+ if (S_ISDIR(inode->i_mode)) {
+ mask |= FS_ISDIR;
+
+ /* sb/mount marks are not interested in name of directory */
+ if (!(dentry->d_flags & DCACHE_FSNOTIFY_PARENT_WATCHED))
+ goto notify_child;
+ }
+
+ /* disconnected dentry cannot notify parent */
+ if (IS_ROOT(dentry))
+ goto notify_child;
+
+ return __fsnotify_parent(dentry, mask, data, data_type);
+
+notify_child:
+ return fsnotify(mask, data, data_type, NULL, NULL, inode, 0);
+}
+
+/*
+ * Simple wrappers to consolidate calls to fsnotify_parent() when an event
+ * is on a file/dentry.
+ */
+static inline void fsnotify_dentry(struct dentry *dentry, __u32 mask)
+{
+ fsnotify_parent(dentry, mask, dentry, FSNOTIFY_EVENT_DENTRY);
+}
+
+static inline int fsnotify_path(const struct path *path, __u32 mask)
{
- if (!dentry)
- dentry = path->dentry;
+ return fsnotify_parent(path->dentry, mask, path, FSNOTIFY_EVENT_PATH);
+}
+
+static inline int fsnotify_file(struct file *file, __u32 mask)
+{
+ /*
+ * FMODE_NONOTIFY are fds generated by fanotify itself which should not
+ * generate new events. We also don't want to generate events for
+ * FMODE_PATH fds (involves open & close events) as they are just
+ * handle creation / destruction events and not "real" file events.
+ */
+ if (FMODE_FSNOTIFY_NONE(file->f_mode))
+ return 0;
- return __fsnotify_parent(path, dentry, mask);
+ return fsnotify_path(&file->f_path, mask);
}
-/* simple call site for access decisions */
-static inline int fsnotify_perm(struct file *file, int mask)
+#ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
+
+int fsnotify_open_perm_and_set_mode(struct file *file);
+
+/*
+ * fsnotify_file_area_perm - permission hook before access to file range
+ */
+static inline int fsnotify_file_area_perm(struct file *file, int perm_mask,
+ const loff_t *ppos, size_t count)
{
- const struct path *path = &file->f_path;
/*
- * Do not use file_inode() here or anywhere in this file to get the
- * inode. That would break *notity on overlayfs.
+ * filesystem may be modified in the context of permission events
+ * (e.g. by HSM filling a file on access), so sb freeze protection
+ * must not be held.
*/
- struct inode *inode = path->dentry->d_inode;
- __u32 fsnotify_mask = 0;
- int ret;
+ lockdep_assert_once(file_write_not_started(file));
- if (file->f_mode & FMODE_NONOTIFY)
+ if (!(perm_mask & (MAY_READ | MAY_WRITE | MAY_ACCESS)))
return 0;
- if (!(mask & (MAY_READ | MAY_OPEN)))
+
+ /*
+ * read()/write() and other types of access generate pre-content events.
+ */
+ if (unlikely(FMODE_FSNOTIFY_HSM(file->f_mode))) {
+ int ret = fsnotify_pre_content(&file->f_path, ppos, count);
+
+ if (ret)
+ return ret;
+ }
+
+ if (!(perm_mask & MAY_READ) ||
+ likely(!FMODE_FSNOTIFY_ACCESS_PERM(file->f_mode)))
return 0;
- if (mask & MAY_OPEN)
- fsnotify_mask = FS_OPEN_PERM;
- else if (mask & MAY_READ)
- fsnotify_mask = FS_ACCESS_PERM;
- else
- BUG();
- ret = fsnotify_parent(path, NULL, fsnotify_mask);
- if (ret)
- return ret;
+ /*
+ * read() also generates the legacy FS_ACCESS_PERM event, so content
+ * scanners can inspect the content filled by pre-content event.
+ */
+ return fsnotify_path(&file->f_path, FS_ACCESS_PERM);
+}
+
+/*
+ * fsnotify_mmap_perm - permission hook before mmap of file range
+ */
+static inline int fsnotify_mmap_perm(struct file *file, int prot,
+ const loff_t off, size_t len)
+{
+ /*
+ * mmap() generates only pre-content events.
+ */
+ if (!file || likely(!FMODE_FSNOTIFY_HSM(file->f_mode)))
+ return 0;
+
+ return fsnotify_pre_content(&file->f_path, &off, len);
+}
+
+/*
+ * fsnotify_truncate_perm - permission hook before file truncate
+ */
+static inline int fsnotify_truncate_perm(const struct path *path, loff_t length)
+{
+ struct inode *inode = d_inode(path->dentry);
+
+ if (!(inode->i_sb->s_iflags & SB_I_ALLOW_HSM) ||
+ !fsnotify_sb_has_priority_watchers(inode->i_sb,
+ FSNOTIFY_PRIO_PRE_CONTENT))
+ return 0;
+
+ return fsnotify_pre_content(path, &length, 0);
+}
+
+/*
+ * fsnotify_file_perm - permission hook before file access (unknown range)
+ */
+static inline int fsnotify_file_perm(struct file *file, int perm_mask)
+{
+ return fsnotify_file_area_perm(file, perm_mask, NULL, 0);
+}
+
+#else
+static inline int fsnotify_open_perm_and_set_mode(struct file *file)
+{
+ return 0;
+}
+
+static inline int fsnotify_file_area_perm(struct file *file, int perm_mask,
+ const loff_t *ppos, size_t count)
+{
+ return 0;
+}
- return fsnotify(inode, fsnotify_mask, path, FSNOTIFY_EVENT_PATH, NULL, 0);
+static inline int fsnotify_mmap_perm(struct file *file, int prot,
+ const loff_t off, size_t len)
+{
+ return 0;
+}
+
+static inline int fsnotify_truncate_perm(const struct path *path, loff_t length)
+{
+ return 0;
+}
+
+static inline int fsnotify_file_perm(struct file *file, int perm_mask)
+{
+ return 0;
}
+#endif
/*
* fsnotify_link_count - inode's link count changed
*/
static inline void fsnotify_link_count(struct inode *inode)
{
- fsnotify(inode, FS_ATTRIB, inode, FSNOTIFY_EVENT_INODE, NULL, 0);
+ fsnotify_inode(inode, FS_ATTRIB);
}
/*
* fsnotify_move - file old_name at old_dir was moved to new_name at new_dir
*/
static inline void fsnotify_move(struct inode *old_dir, struct inode *new_dir,
- const unsigned char *old_name,
- int isdir, struct inode *target, struct dentry *moved)
+ const struct qstr *old_name,
+ int isdir, struct inode *target,
+ struct dentry *moved)
{
struct inode *source = moved->d_inode;
u32 fs_cookie = fsnotify_get_cookie();
- __u32 old_dir_mask = (FS_EVENT_ON_CHILD | FS_MOVED_FROM);
- __u32 new_dir_mask = (FS_EVENT_ON_CHILD | FS_MOVED_TO);
- const unsigned char *new_name = moved->d_name.name;
-
- if (old_dir == new_dir)
- old_dir_mask |= FS_DN_RENAME;
+ __u32 old_dir_mask = FS_MOVED_FROM;
+ __u32 new_dir_mask = FS_MOVED_TO;
+ __u32 rename_mask = FS_RENAME;
+ const struct qstr *new_name = &moved->d_name;
if (isdir) {
old_dir_mask |= FS_ISDIR;
new_dir_mask |= FS_ISDIR;
+ rename_mask |= FS_ISDIR;
}
- fsnotify(old_dir, old_dir_mask, source, FSNOTIFY_EVENT_INODE, old_name,
- fs_cookie);
- fsnotify(new_dir, new_dir_mask, source, FSNOTIFY_EVENT_INODE, new_name,
- fs_cookie);
+ /* Event with information about both old and new parent+name */
+ fsnotify_name(rename_mask, moved, FSNOTIFY_EVENT_DENTRY,
+ old_dir, old_name, 0);
+
+ fsnotify_name(old_dir_mask, source, FSNOTIFY_EVENT_INODE,
+ old_dir, old_name, fs_cookie);
+ fsnotify_name(new_dir_mask, source, FSNOTIFY_EVENT_INODE,
+ new_dir, new_name, fs_cookie);
if (target)
fsnotify_link_count(target);
-
- if (source)
- fsnotify(source, FS_MOVE_SELF, moved->d_inode, FSNOTIFY_EVENT_INODE, NULL, 0);
+ fsnotify_inode(source, FS_MOVE_SELF);
audit_inode_child(new_dir, moved, AUDIT_TYPE_CHILD_CREATE);
}
@@ -113,17 +295,9 @@ static inline void fsnotify_vfsmount_delete(struct vfsmount *mnt)
__fsnotify_vfsmount_delete(mnt);
}
-/*
- * fsnotify_nameremove - a filename was removed from a directory
- */
-static inline void fsnotify_nameremove(struct dentry *dentry, int isdir)
+static inline void fsnotify_mntns_delete(struct mnt_namespace *mntns)
{
- __u32 mask = FS_DELETE;
-
- if (isdir)
- mask |= FS_ISDIR;
-
- fsnotify_parent(NULL, dentry, mask);
+ __fsnotify_mntns_delete(mntns);
}
/*
@@ -131,44 +305,116 @@ static inline void fsnotify_nameremove(struct dentry *dentry, int isdir)
*/
static inline void fsnotify_inoderemove(struct inode *inode)
{
- fsnotify(inode, FS_DELETE_SELF, inode, FSNOTIFY_EVENT_INODE, NULL, 0);
+ fsnotify_inode(inode, FS_DELETE_SELF);
__fsnotify_inode_delete(inode);
}
/*
* fsnotify_create - 'name' was linked in
+ *
+ * Caller must make sure that dentry->d_name is stable.
+ * Note: some filesystems (e.g. kernfs) leave @dentry negative and instantiate
+ * ->d_inode later
*/
-static inline void fsnotify_create(struct inode *inode, struct dentry *dentry)
+static inline void fsnotify_create(struct inode *dir, struct dentry *dentry)
{
- audit_inode_child(inode, dentry, AUDIT_TYPE_CHILD_CREATE);
+ audit_inode_child(dir, dentry, AUDIT_TYPE_CHILD_CREATE);
- fsnotify(inode, FS_CREATE, dentry->d_inode, FSNOTIFY_EVENT_INODE, dentry->d_name.name, 0);
+ fsnotify_dirent(dir, dentry, FS_CREATE);
}
/*
* fsnotify_link - new hardlink in 'inode' directory
+ *
+ * Caller must make sure that new_dentry->d_name is stable.
* Note: We have to pass also the linked inode ptr as some filesystems leave
* new_dentry->d_inode NULL and instantiate inode pointer later
*/
-static inline void fsnotify_link(struct inode *dir, struct inode *inode, struct dentry *new_dentry)
+static inline void fsnotify_link(struct inode *dir, struct inode *inode,
+ struct dentry *new_dentry)
{
fsnotify_link_count(inode);
audit_inode_child(dir, new_dentry, AUDIT_TYPE_CHILD_CREATE);
- fsnotify(dir, FS_CREATE, inode, FSNOTIFY_EVENT_INODE, new_dentry->d_name.name, 0);
+ fsnotify_name(FS_CREATE, inode, FSNOTIFY_EVENT_INODE,
+ dir, &new_dentry->d_name, 0);
+}
+
+/*
+ * fsnotify_delete - @dentry was unlinked and unhashed
+ *
+ * Caller must make sure that dentry->d_name is stable.
+ *
+ * Note: unlike fsnotify_unlink(), we have to pass also the unlinked inode
+ * as this may be called after d_delete() and old_dentry may be negative.
+ */
+static inline void fsnotify_delete(struct inode *dir, struct inode *inode,
+ struct dentry *dentry)
+{
+ __u32 mask = FS_DELETE;
+
+ if (S_ISDIR(inode->i_mode))
+ mask |= FS_ISDIR;
+
+ fsnotify_name(mask, inode, FSNOTIFY_EVENT_INODE, dir, &dentry->d_name,
+ 0);
+}
+
+/**
+ * d_delete_notify - delete a dentry and call fsnotify_delete()
+ * @dentry: The dentry to delete
+ *
+ * This helper is used to guaranty that the unlinked inode cannot be found
+ * by lookup of this name after fsnotify_delete() event has been delivered.
+ */
+static inline void d_delete_notify(struct inode *dir, struct dentry *dentry)
+{
+ struct inode *inode = d_inode(dentry);
+
+ ihold(inode);
+ d_delete(dentry);
+ fsnotify_delete(dir, inode, dentry);
+ iput(inode);
+}
+
+/*
+ * fsnotify_unlink - 'name' was unlinked
+ *
+ * Caller must make sure that dentry->d_name is stable.
+ */
+static inline void fsnotify_unlink(struct inode *dir, struct dentry *dentry)
+{
+ if (WARN_ON_ONCE(d_is_negative(dentry)))
+ return;
+
+ fsnotify_delete(dir, d_inode(dentry), dentry);
}
/*
* fsnotify_mkdir - directory 'name' was created
+ *
+ * Caller must make sure that dentry->d_name is stable.
+ * Note: some filesystems (e.g. kernfs) leave @dentry negative and instantiate
+ * ->d_inode later
*/
-static inline void fsnotify_mkdir(struct inode *inode, struct dentry *dentry)
+static inline void fsnotify_mkdir(struct inode *dir, struct dentry *dentry)
{
- __u32 mask = (FS_CREATE | FS_ISDIR);
- struct inode *d_inode = dentry->d_inode;
+ audit_inode_child(dir, dentry, AUDIT_TYPE_CHILD_CREATE);
- audit_inode_child(inode, dentry, AUDIT_TYPE_CHILD_CREATE);
+ fsnotify_dirent(dir, dentry, FS_CREATE | FS_ISDIR);
+}
+
+/*
+ * fsnotify_rmdir - directory 'name' was removed
+ *
+ * Caller must make sure that dentry->d_name is stable.
+ */
+static inline void fsnotify_rmdir(struct inode *dir, struct dentry *dentry)
+{
+ if (WARN_ON_ONCE(d_is_negative(dentry)))
+ return;
- fsnotify(inode, mask, d_inode, FSNOTIFY_EVENT_INODE, dentry->d_name.name, 0);
+ fsnotify_delete(dir, d_inode(dentry), dentry);
}
/*
@@ -176,17 +422,7 @@ static inline void fsnotify_mkdir(struct inode *inode, struct dentry *dentry)
*/
static inline void fsnotify_access(struct file *file)
{
- const struct path *path = &file->f_path;
- struct inode *inode = path->dentry->d_inode;
- __u32 mask = FS_ACCESS;
-
- if (S_ISDIR(inode->i_mode))
- mask |= FS_ISDIR;
-
- if (!(file->f_mode & FMODE_NONOTIFY)) {
- fsnotify_parent(path, NULL, mask);
- fsnotify(inode, mask, path, FSNOTIFY_EVENT_PATH, NULL, 0);
- }
+ fsnotify_file(file, FS_ACCESS);
}
/*
@@ -194,17 +430,7 @@ static inline void fsnotify_access(struct file *file)
*/
static inline void fsnotify_modify(struct file *file)
{
- const struct path *path = &file->f_path;
- struct inode *inode = path->dentry->d_inode;
- __u32 mask = FS_MODIFY;
-
- if (S_ISDIR(inode->i_mode))
- mask |= FS_ISDIR;
-
- if (!(file->f_mode & FMODE_NONOTIFY)) {
- fsnotify_parent(path, NULL, mask);
- fsnotify(inode, mask, path, FSNOTIFY_EVENT_PATH, NULL, 0);
- }
+ fsnotify_file(file, FS_MODIFY);
}
/*
@@ -212,15 +438,12 @@ static inline void fsnotify_modify(struct file *file)
*/
static inline void fsnotify_open(struct file *file)
{
- const struct path *path = &file->f_path;
- struct inode *inode = path->dentry->d_inode;
__u32 mask = FS_OPEN;
- if (S_ISDIR(inode->i_mode))
- mask |= FS_ISDIR;
+ if (file->f_flags & __FMODE_EXEC)
+ mask |= FS_OPEN_EXEC;
- fsnotify_parent(path, NULL, mask);
- fsnotify(inode, mask, path, FSNOTIFY_EVENT_PATH, NULL, 0);
+ fsnotify_file(file, mask);
}
/*
@@ -228,18 +451,10 @@ static inline void fsnotify_open(struct file *file)
*/
static inline void fsnotify_close(struct file *file)
{
- const struct path *path = &file->f_path;
- struct inode *inode = path->dentry->d_inode;
- fmode_t mode = file->f_mode;
- __u32 mask = (mode & FMODE_WRITE) ? FS_CLOSE_WRITE : FS_CLOSE_NOWRITE;
-
- if (S_ISDIR(inode->i_mode))
- mask |= FS_ISDIR;
+ __u32 mask = (file->f_mode & FMODE_WRITE) ? FS_CLOSE_WRITE :
+ FS_CLOSE_NOWRITE;
- if (!(file->f_mode & FMODE_NONOTIFY)) {
- fsnotify_parent(path, NULL, mask);
- fsnotify(inode, mask, path, FSNOTIFY_EVENT_PATH, NULL, 0);
- }
+ fsnotify_file(file, mask);
}
/*
@@ -247,14 +462,7 @@ static inline void fsnotify_close(struct file *file)
*/
static inline void fsnotify_xattr(struct dentry *dentry)
{
- struct inode *inode = dentry->d_inode;
- __u32 mask = FS_ATTRIB;
-
- if (S_ISDIR(inode->i_mode))
- mask |= FS_ISDIR;
-
- fsnotify_parent(NULL, dentry, mask);
- fsnotify(inode, mask, inode, FSNOTIFY_EVENT_INODE, NULL, 0);
+ fsnotify_dentry(dentry, FS_ATTRIB);
}
/*
@@ -263,7 +471,6 @@ static inline void fsnotify_xattr(struct dentry *dentry)
*/
static inline void fsnotify_change(struct dentry *dentry, unsigned int ia_valid)
{
- struct inode *inode = dentry->d_inode;
__u32 mask = 0;
if (ia_valid & ATTR_UID)
@@ -284,13 +491,36 @@ static inline void fsnotify_change(struct dentry *dentry, unsigned int ia_valid)
if (ia_valid & ATTR_MODE)
mask |= FS_ATTRIB;
- if (mask) {
- if (S_ISDIR(inode->i_mode))
- mask |= FS_ISDIR;
+ if (mask)
+ fsnotify_dentry(dentry, mask);
+}
- fsnotify_parent(NULL, dentry, mask);
- fsnotify(inode, mask, inode, FSNOTIFY_EVENT_INODE, NULL, 0);
- }
+static inline int fsnotify_sb_error(struct super_block *sb, struct inode *inode,
+ int error)
+{
+ struct fs_error_report report = {
+ .error = error,
+ .inode = inode,
+ .sb = sb,
+ };
+
+ return fsnotify(FS_ERROR, &report, FSNOTIFY_EVENT_ERROR,
+ NULL, NULL, NULL, 0);
+}
+
+static inline void fsnotify_mnt_attach(struct mnt_namespace *ns, struct vfsmount *mnt)
+{
+ fsnotify_mnt(FS_MNT_ATTACH, ns, mnt);
+}
+
+static inline void fsnotify_mnt_detach(struct mnt_namespace *ns, struct vfsmount *mnt)
+{
+ fsnotify_mnt(FS_MNT_DETACH, ns, mnt);
+}
+
+static inline void fsnotify_mnt_move(struct mnt_namespace *ns, struct vfsmount *mnt)
+{
+ fsnotify_mnt(FS_MNT_MOVE, ns, mnt);
}
#endif /* _LINUX_FS_NOTIFY_H */
diff --git a/include/linux/fsnotify_backend.h b/include/linux/fsnotify_backend.h
index c6c69318752b..0d954ea7b179 100644
--- a/include/linux/fsnotify_backend.h
+++ b/include/linux/fsnotify_backend.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Filesystem access notification for Linux
*
@@ -17,6 +18,9 @@
#include <linux/types.h>
#include <linux/atomic.h>
#include <linux/user_namespace.h>
+#include <linux/refcount.h>
+#include <linux/mempool.h>
+#include <linux/sched/mm.h>
/*
* IN_* from inotfy.h lines up EXACTLY with FS_*, this is so we can easily
@@ -27,8 +31,8 @@
#define FS_ACCESS 0x00000001 /* File was accessed */
#define FS_MODIFY 0x00000002 /* File was modified */
#define FS_ATTRIB 0x00000004 /* Metadata changed */
-#define FS_CLOSE_WRITE 0x00000008 /* Writtable file was closed */
-#define FS_CLOSE_NOWRITE 0x00000010 /* Unwrittable file closed */
+#define FS_CLOSE_WRITE 0x00000008 /* Writable file was closed */
+#define FS_CLOSE_NOWRITE 0x00000010 /* Unwritable file closed */
#define FS_OPEN 0x00000020 /* File was opened */
#define FS_MOVED_FROM 0x00000040 /* File was moved from X */
#define FS_MOVED_TO 0x00000080 /* File was moved to Y */
@@ -36,44 +40,91 @@
#define FS_DELETE 0x00000200 /* Subfile was deleted */
#define FS_DELETE_SELF 0x00000400 /* Self was deleted */
#define FS_MOVE_SELF 0x00000800 /* Self was moved */
+#define FS_OPEN_EXEC 0x00001000 /* File was opened for exec */
#define FS_UNMOUNT 0x00002000 /* inode on umount fs */
#define FS_Q_OVERFLOW 0x00004000 /* Event queued overflowed */
+#define FS_ERROR 0x00008000 /* Filesystem Error (fanotify) */
+
+/*
+ * FS_IN_IGNORED overloads FS_ERROR. It is only used internally by inotify
+ * which does not support FS_ERROR.
+ */
#define FS_IN_IGNORED 0x00008000 /* last inotify event here */
#define FS_OPEN_PERM 0x00010000 /* open event in an permission hook */
#define FS_ACCESS_PERM 0x00020000 /* access event in a permissions hook */
+#define FS_OPEN_EXEC_PERM 0x00040000 /* open/exec event in a permission hook */
+/* #define FS_DIR_MODIFY 0x00080000 */ /* Deprecated (reserved) */
-#define FS_EXCL_UNLINK 0x04000000 /* do not send events if object is unlinked */
-#define FS_ISDIR 0x40000000 /* event occurred against dir */
-#define FS_IN_ONESHOT 0x80000000 /* only send event once */
+#define FS_PRE_ACCESS 0x00100000 /* Pre-content access hook */
-#define FS_DN_RENAME 0x10000000 /* file renamed */
-#define FS_DN_MULTISHOT 0x20000000 /* dnotify multishot */
+#define FS_MNT_ATTACH 0x01000000 /* Mount was attached */
+#define FS_MNT_DETACH 0x02000000 /* Mount was detached */
+#define FS_MNT_MOVE (FS_MNT_ATTACH | FS_MNT_DETACH)
-/* This inode cares about things that happen to its children. Always set for
- * dnotify and inotify. */
+/*
+ * Set on inode mark that cares about things that happen to its children.
+ * Always set for dnotify and inotify.
+ * Set on inode/sb/mount marks that care about parent/name info.
+ */
#define FS_EVENT_ON_CHILD 0x08000000
-/* This is a list of all events that may get sent to a parernt based on fs event
- * happening to inodes inside that directory */
-#define FS_EVENTS_POSS_ON_CHILD (FS_ACCESS | FS_MODIFY | FS_ATTRIB |\
- FS_CLOSE_WRITE | FS_CLOSE_NOWRITE | FS_OPEN |\
- FS_MOVED_FROM | FS_MOVED_TO | FS_CREATE |\
- FS_DELETE | FS_OPEN_PERM | FS_ACCESS_PERM)
+#define FS_RENAME 0x10000000 /* File was renamed */
+#define FS_DN_MULTISHOT 0x20000000 /* dnotify multishot */
+#define FS_ISDIR 0x40000000 /* event occurred against dir */
#define FS_MOVE (FS_MOVED_FROM | FS_MOVED_TO)
-#define ALL_FSNOTIFY_PERM_EVENTS (FS_OPEN_PERM | FS_ACCESS_PERM)
+/*
+ * Directory entry modification events - reported only to directory
+ * where entry is modified and not to a watching parent.
+ * The watching parent may get an FS_ATTRIB|FS_EVENT_ON_CHILD event
+ * when a directory entry inside a child subdir changes.
+ */
+#define ALL_FSNOTIFY_DIRENT_EVENTS (FS_CREATE | FS_DELETE | FS_MOVE | FS_RENAME)
+
+/* Mount namespace events */
+#define FSNOTIFY_MNT_EVENTS (FS_MNT_ATTACH | FS_MNT_DETACH)
+
+/* Content events can be used to inspect file content */
+#define FSNOTIFY_CONTENT_PERM_EVENTS (FS_OPEN_PERM | FS_OPEN_EXEC_PERM | \
+ FS_ACCESS_PERM)
+/* Pre-content events can be used to fill file content */
+#define FSNOTIFY_PRE_CONTENT_EVENTS (FS_PRE_ACCESS)
+
+#define ALL_FSNOTIFY_PERM_EVENTS (FSNOTIFY_CONTENT_PERM_EVENTS | \
+ FSNOTIFY_PRE_CONTENT_EVENTS)
-#define ALL_FSNOTIFY_EVENTS (FS_ACCESS | FS_MODIFY | FS_ATTRIB | \
- FS_CLOSE_WRITE | FS_CLOSE_NOWRITE | FS_OPEN | \
- FS_MOVED_FROM | FS_MOVED_TO | FS_CREATE | \
- FS_DELETE | FS_DELETE_SELF | FS_MOVE_SELF | \
+/*
+ * This is a list of all events that may get sent to a parent that is watching
+ * with flag FS_EVENT_ON_CHILD based on fs event on a child of that directory.
+ */
+#define FS_EVENTS_POSS_ON_CHILD (ALL_FSNOTIFY_PERM_EVENTS | \
+ FS_ACCESS | FS_MODIFY | FS_ATTRIB | \
+ FS_CLOSE_WRITE | FS_CLOSE_NOWRITE | \
+ FS_OPEN | FS_OPEN_EXEC)
+
+/*
+ * This is a list of all events that may get sent with the parent inode as the
+ * @to_tell argument of fsnotify().
+ * It may include events that can be sent to an inode/sb/mount mark, but cannot
+ * be sent to a parent watching children.
+ */
+#define FS_EVENTS_POSS_TO_PARENT (FS_EVENTS_POSS_ON_CHILD)
+
+/* Events that can be reported to backends */
+#define ALL_FSNOTIFY_EVENTS (ALL_FSNOTIFY_DIRENT_EVENTS | \
+ FSNOTIFY_MNT_EVENTS | \
+ FS_EVENTS_POSS_ON_CHILD | \
+ FS_DELETE_SELF | FS_MOVE_SELF | \
FS_UNMOUNT | FS_Q_OVERFLOW | FS_IN_IGNORED | \
- FS_OPEN_PERM | FS_ACCESS_PERM | FS_EXCL_UNLINK | \
- FS_ISDIR | FS_IN_ONESHOT | FS_DN_RENAME | \
- FS_DN_MULTISHOT | FS_EVENT_ON_CHILD)
+ FS_ERROR)
+
+/* Extra flags that may be reported with event or control handling of events */
+#define ALL_FSNOTIFY_FLAGS (FS_ISDIR | FS_EVENT_ON_CHILD | FS_DN_MULTISHOT)
+
+#define ALL_FSNOTIFY_BITS (ALL_FSNOTIFY_EVENTS | ALL_FSNOTIFY_FLAGS)
struct fsnotify_group;
struct fsnotify_event;
@@ -82,28 +133,53 @@ struct fsnotify_event_private_data;
struct fsnotify_fname;
struct fsnotify_iter_info;
+struct mem_cgroup;
+
/*
* Each group much define these ops. The fsnotify infrastructure will call
* these operations for each relevant group.
*
* handle_event - main call for a group to handle an fs event
+ * @group: group to notify
+ * @mask: event type and flags
+ * @data: object that event happened on
+ * @data_type: type of object for fanotify_data_XXX() accessors
+ * @dir: optional directory associated with event -
+ * if @file_name is not NULL, this is the directory that
+ * @file_name is relative to
+ * @file_name: optional file name associated with event
+ * @cookie: inotify rename cookie
+ * @iter_info: array of marks from this group that are interested in the event
+ *
+ * handle_inode_event - simple variant of handle_event() for groups that only
+ * have inode marks and don't have ignore mask
+ * @mark: mark to notify
+ * @mask: event type and flags
+ * @inode: inode that event happened on
+ * @dir: optional directory associated with event -
+ * if @file_name is not NULL, this is the directory that
+ * @file_name is relative to.
+ * Either @inode or @dir must be non-NULL.
+ * @file_name: optional file name associated with event
+ * @cookie: inotify rename cookie
+ *
* free_group_priv - called when a group refcnt hits 0 to clean up the private union
* freeing_mark - called when a mark is being destroyed for some reason. The group
- * MUST be holding a reference on each mark and that reference must be
- * dropped in this function. inotify uses this function to send
- * userspace messages that marks have been removed.
+ * MUST be holding a reference on each mark and that reference must be
+ * dropped in this function. inotify uses this function to send
+ * userspace messages that marks have been removed.
*/
struct fsnotify_ops {
- int (*handle_event)(struct fsnotify_group *group,
- struct inode *inode,
- struct fsnotify_mark *inode_mark,
- struct fsnotify_mark *vfsmount_mark,
- u32 mask, const void *data, int data_type,
- const unsigned char *file_name, u32 cookie,
+ int (*handle_event)(struct fsnotify_group *group, u32 mask,
+ const void *data, int data_type, struct inode *dir,
+ const struct qstr *file_name, u32 cookie,
struct fsnotify_iter_info *iter_info);
+ int (*handle_inode_event)(struct fsnotify_mark *mark, u32 mask,
+ struct inode *inode, struct inode *dir,
+ const struct qstr *file_name, u32 cookie);
void (*free_group_priv)(struct fsnotify_group *group);
void (*freeing_mark)(struct fsnotify_mark *mark, struct fsnotify_group *group);
- void (*free_event)(struct fsnotify_event *event);
+ void (*free_event)(struct fsnotify_group *group, struct fsnotify_event *event);
/* called on final put+free to free memory */
void (*free_mark)(struct fsnotify_mark *mark);
};
@@ -115,9 +191,17 @@ struct fsnotify_ops {
*/
struct fsnotify_event {
struct list_head list;
- /* inode may ONLY be dereferenced during handle_event(). */
- struct inode *inode; /* either the inode the event happened to or its parent */
- u32 mask; /* the type of access, bitwise OR for FS_* event types */
+};
+
+/*
+ * fsnotify group priorities.
+ * Events are sent in order from highest priority to lowest priority.
+ */
+enum fsnotify_group_prio {
+ FSNOTIFY_PRIO_NORMAL = 0, /* normal notifiers, no permissions */
+ FSNOTIFY_PRIO_CONTENT, /* fanotify permission events */
+ FSNOTIFY_PRIO_PRE_CONTENT, /* fanotify pre-content events */
+ __FSNOTIFY_PRIO_NUM
};
/*
@@ -127,6 +211,8 @@ struct fsnotify_event {
* everything will be cleaned up.
*/
struct fsnotify_group {
+ const struct fsnotify_ops *ops; /* how this group handles things */
+
/*
* How the refcnt is used is up to each group. When the refcnt hits 0
* fsnotify will clean up all of the resources associated with this group.
@@ -135,9 +221,7 @@ struct fsnotify_group {
* inotify_init() and the refcnt will hit 0 only when that fd has been
* closed.
*/
- atomic_t refcnt; /* things with interest in this group */
-
- const struct fsnotify_ops *ops; /* how this group handles things */
+ refcount_t refcnt; /* things with interest in this group */
/* needed to send notification to userspace */
spinlock_t notification_lock; /* protect the notification_list */
@@ -145,21 +229,18 @@ struct fsnotify_group {
wait_queue_head_t notification_waitq; /* read() on the notification file blocks on this waitq */
unsigned int q_len; /* events on the queue */
unsigned int max_events; /* maximum events allowed on the list */
- /*
- * Valid fsnotify group priorities. Events are send in order from highest
- * priority to lowest priority. We default to the lowest priority.
- */
- #define FS_PRIO_0 0 /* normal notifiers, no permissions */
- #define FS_PRIO_1 1 /* fanotify content based access control */
- #define FS_PRIO_2 2 /* fanotify pre-content access */
- unsigned int priority;
+ enum fsnotify_group_prio priority; /* priority for sending events */
bool shutdown; /* group is being shut down, don't queue more events */
+#define FSNOTIFY_GROUP_USER 0x01 /* user allocated group */
+#define FSNOTIFY_GROUP_DUPS 0x02 /* allow multiple marks per object */
+ int flags;
+ unsigned int owner_flags; /* stored flags of mark_mutex owner */
+
/* stores all fastpath marks assoc with this group so they can be cleaned on unregister */
struct mutex mark_mutex; /* protect marks_list */
- atomic_t num_marks; /* 1 for each mark and 1 for not being
- * past the point of no return when freeing
- * a group */
+ atomic_t user_waits; /* Number of tasks waiting for user
+ * response */
struct list_head marks_list; /* all inode marks for this group */
struct fasync_struct *fsn_fa; /* async notification */
@@ -167,8 +248,9 @@ struct fsnotify_group {
struct fsnotify_event *overflow_event; /* Event we queue when the
* notification list is too
* full */
- atomic_t user_waits; /* Number of tasks waiting for user
- * response */
+
+ struct mem_cgroup *memcg; /* memcg to charge allocations */
+ struct user_namespace *user_ns; /* user ns where group was created */
/* groups can define private fields here or use the void *private */
union {
@@ -182,49 +264,330 @@ struct fsnotify_group {
#endif
#ifdef CONFIG_FANOTIFY
struct fanotify_group_private_data {
-#ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
+ /* Hash table of events for merge */
+ struct hlist_head *merge_hash;
/* allows a group to block waiting for a userspace response */
struct list_head access_list;
wait_queue_head_t access_waitq;
-#endif /* CONFIG_FANOTIFY_ACCESS_PERMISSIONS */
- int f_flags;
- unsigned int max_marks;
- struct user_struct *user;
+ int flags; /* flags from fanotify_init() */
+ int f_flags; /* event_f_flags from fanotify_init() */
+ struct ucounts *ucounts;
+ mempool_t error_events_pool;
+ /* chained on perm_group_list */
+ struct list_head perm_grp_list;
} fanotify_data;
#endif /* CONFIG_FANOTIFY */
};
};
-/* when calling fsnotify tell it if the data is a path or inode */
-#define FSNOTIFY_EVENT_NONE 0
-#define FSNOTIFY_EVENT_PATH 1
-#define FSNOTIFY_EVENT_INODE 2
+/*
+ * These helpers are used to prevent deadlock when reclaiming inodes with
+ * evictable marks of the same group that is allocating a new mark.
+ */
+static inline void fsnotify_group_lock(struct fsnotify_group *group)
+{
+ mutex_lock(&group->mark_mutex);
+ group->owner_flags = memalloc_nofs_save();
+}
+
+static inline void fsnotify_group_unlock(struct fsnotify_group *group)
+{
+ memalloc_nofs_restore(group->owner_flags);
+ mutex_unlock(&group->mark_mutex);
+}
+
+static inline void fsnotify_group_assert_locked(struct fsnotify_group *group)
+{
+ WARN_ON_ONCE(!mutex_is_locked(&group->mark_mutex));
+ WARN_ON_ONCE(!(current->flags & PF_MEMALLOC_NOFS));
+}
+
+/* When calling fsnotify tell it if the data is a path or inode */
+enum fsnotify_data_type {
+ FSNOTIFY_EVENT_NONE,
+ FSNOTIFY_EVENT_FILE_RANGE,
+ FSNOTIFY_EVENT_PATH,
+ FSNOTIFY_EVENT_INODE,
+ FSNOTIFY_EVENT_DENTRY,
+ FSNOTIFY_EVENT_MNT,
+ FSNOTIFY_EVENT_ERROR,
+};
+
+struct fs_error_report {
+ int error;
+ struct inode *inode;
+ struct super_block *sb;
+};
+
+struct file_range {
+ const struct path *path;
+ loff_t pos;
+ size_t count;
+};
+
+static inline const struct path *file_range_path(const struct file_range *range)
+{
+ return range->path;
+}
+
+struct fsnotify_mnt {
+ const struct mnt_namespace *ns;
+ u64 mnt_id;
+};
+
+static inline struct inode *fsnotify_data_inode(const void *data, int data_type)
+{
+ switch (data_type) {
+ case FSNOTIFY_EVENT_INODE:
+ return (struct inode *)data;
+ case FSNOTIFY_EVENT_DENTRY:
+ return d_inode(data);
+ case FSNOTIFY_EVENT_PATH:
+ return d_inode(((const struct path *)data)->dentry);
+ case FSNOTIFY_EVENT_FILE_RANGE:
+ return d_inode(file_range_path(data)->dentry);
+ case FSNOTIFY_EVENT_ERROR:
+ return ((struct fs_error_report *)data)->inode;
+ default:
+ return NULL;
+ }
+}
+
+static inline struct dentry *fsnotify_data_dentry(const void *data, int data_type)
+{
+ switch (data_type) {
+ case FSNOTIFY_EVENT_DENTRY:
+ /* Non const is needed for dget() */
+ return (struct dentry *)data;
+ case FSNOTIFY_EVENT_PATH:
+ return ((const struct path *)data)->dentry;
+ case FSNOTIFY_EVENT_FILE_RANGE:
+ return file_range_path(data)->dentry;
+ default:
+ return NULL;
+ }
+}
+
+static inline const struct path *fsnotify_data_path(const void *data,
+ int data_type)
+{
+ switch (data_type) {
+ case FSNOTIFY_EVENT_PATH:
+ return data;
+ case FSNOTIFY_EVENT_FILE_RANGE:
+ return file_range_path(data);
+ default:
+ return NULL;
+ }
+}
+
+static inline struct super_block *fsnotify_data_sb(const void *data,
+ int data_type)
+{
+ switch (data_type) {
+ case FSNOTIFY_EVENT_INODE:
+ return ((struct inode *)data)->i_sb;
+ case FSNOTIFY_EVENT_DENTRY:
+ return ((struct dentry *)data)->d_sb;
+ case FSNOTIFY_EVENT_PATH:
+ return ((const struct path *)data)->dentry->d_sb;
+ case FSNOTIFY_EVENT_FILE_RANGE:
+ return file_range_path(data)->dentry->d_sb;
+ case FSNOTIFY_EVENT_ERROR:
+ return ((struct fs_error_report *) data)->sb;
+ default:
+ return NULL;
+ }
+}
+
+static inline const struct fsnotify_mnt *fsnotify_data_mnt(const void *data,
+ int data_type)
+{
+ switch (data_type) {
+ case FSNOTIFY_EVENT_MNT:
+ return data;
+ default:
+ return NULL;
+ }
+}
+
+static inline u64 fsnotify_data_mnt_id(const void *data, int data_type)
+{
+ const struct fsnotify_mnt *mnt_data = fsnotify_data_mnt(data, data_type);
+
+ return mnt_data ? mnt_data->mnt_id : 0;
+}
+
+static inline struct fs_error_report *fsnotify_data_error_report(
+ const void *data,
+ int data_type)
+{
+ switch (data_type) {
+ case FSNOTIFY_EVENT_ERROR:
+ return (struct fs_error_report *) data;
+ default:
+ return NULL;
+ }
+}
+
+static inline const struct file_range *fsnotify_data_file_range(
+ const void *data,
+ int data_type)
+{
+ switch (data_type) {
+ case FSNOTIFY_EVENT_FILE_RANGE:
+ return (struct file_range *)data;
+ default:
+ return NULL;
+ }
+}
/*
- * Inode / vfsmount point to this structure which tracks all marks attached to
- * the inode / vfsmount. The reference to inode / vfsmount is held by this
+ * Index to merged marks iterator array that correlates to a type of watch.
+ * The type of watched object can be deduced from the iterator type, but not
+ * the other way around, because an event can match different watched objects
+ * of the same object type.
+ * For example, both parent and child are watching an object of type inode.
+ */
+enum fsnotify_iter_type {
+ FSNOTIFY_ITER_TYPE_INODE,
+ FSNOTIFY_ITER_TYPE_VFSMOUNT,
+ FSNOTIFY_ITER_TYPE_SB,
+ FSNOTIFY_ITER_TYPE_PARENT,
+ FSNOTIFY_ITER_TYPE_INODE2,
+ FSNOTIFY_ITER_TYPE_MNTNS,
+ FSNOTIFY_ITER_TYPE_COUNT
+};
+
+/* The type of object that a mark is attached to */
+enum fsnotify_obj_type {
+ FSNOTIFY_OBJ_TYPE_ANY = -1,
+ FSNOTIFY_OBJ_TYPE_INODE,
+ FSNOTIFY_OBJ_TYPE_VFSMOUNT,
+ FSNOTIFY_OBJ_TYPE_SB,
+ FSNOTIFY_OBJ_TYPE_MNTNS,
+ FSNOTIFY_OBJ_TYPE_COUNT,
+ FSNOTIFY_OBJ_TYPE_DETACHED = FSNOTIFY_OBJ_TYPE_COUNT
+};
+
+static inline bool fsnotify_valid_obj_type(unsigned int obj_type)
+{
+ return (obj_type < FSNOTIFY_OBJ_TYPE_COUNT);
+}
+
+struct fsnotify_iter_info {
+ struct fsnotify_mark *marks[FSNOTIFY_ITER_TYPE_COUNT];
+ struct fsnotify_group *current_group;
+ unsigned int report_mask;
+ int srcu_idx;
+};
+
+static inline bool fsnotify_iter_should_report_type(
+ struct fsnotify_iter_info *iter_info, int iter_type)
+{
+ return (iter_info->report_mask & (1U << iter_type));
+}
+
+static inline void fsnotify_iter_set_report_type(
+ struct fsnotify_iter_info *iter_info, int iter_type)
+{
+ iter_info->report_mask |= (1U << iter_type);
+}
+
+static inline struct fsnotify_mark *fsnotify_iter_mark(
+ struct fsnotify_iter_info *iter_info, int iter_type)
+{
+ if (fsnotify_iter_should_report_type(iter_info, iter_type))
+ return iter_info->marks[iter_type];
+ return NULL;
+}
+
+static inline int fsnotify_iter_step(struct fsnotify_iter_info *iter, int type,
+ struct fsnotify_mark **markp)
+{
+ while (type < FSNOTIFY_ITER_TYPE_COUNT) {
+ *markp = fsnotify_iter_mark(iter, type);
+ if (*markp)
+ break;
+ type++;
+ }
+ return type;
+}
+
+#define FSNOTIFY_ITER_FUNCS(name, NAME) \
+static inline struct fsnotify_mark *fsnotify_iter_##name##_mark( \
+ struct fsnotify_iter_info *iter_info) \
+{ \
+ return fsnotify_iter_mark(iter_info, FSNOTIFY_ITER_TYPE_##NAME); \
+}
+
+FSNOTIFY_ITER_FUNCS(inode, INODE)
+FSNOTIFY_ITER_FUNCS(parent, PARENT)
+FSNOTIFY_ITER_FUNCS(vfsmount, VFSMOUNT)
+FSNOTIFY_ITER_FUNCS(sb, SB)
+
+#define fsnotify_foreach_iter_type(type) \
+ for (type = 0; type < FSNOTIFY_ITER_TYPE_COUNT; type++)
+#define fsnotify_foreach_iter_mark_type(iter, mark, type) \
+ for (type = 0; \
+ type = fsnotify_iter_step(iter, type, &mark), \
+ type < FSNOTIFY_ITER_TYPE_COUNT; \
+ type++)
+
+/*
+ * Inode/vfsmount/sb point to this structure which tracks all marks attached to
+ * the inode/vfsmount/sb. The reference to inode/vfsmount/sb is held by this
* structure. We destroy this structure when there are no more marks attached
* to it. The structure is protected by fsnotify_mark_srcu.
*/
struct fsnotify_mark_connector {
spinlock_t lock;
-#define FSNOTIFY_OBJ_TYPE_INODE 0x01
-#define FSNOTIFY_OBJ_TYPE_VFSMOUNT 0x02
-#define FSNOTIFY_OBJ_ALL_TYPES (FSNOTIFY_OBJ_TYPE_INODE | \
- FSNOTIFY_OBJ_TYPE_VFSMOUNT)
- unsigned int flags; /* Type of object [lock] */
- union { /* Object pointer [lock] */
- struct inode *inode;
- struct vfsmount *mnt;
- };
+ unsigned char type; /* Type of object [lock] */
+ unsigned char prio; /* Highest priority group */
+#define FSNOTIFY_CONN_FLAG_IS_WATCHED 0x01
+#define FSNOTIFY_CONN_FLAG_HAS_IREF 0x02
+ unsigned short flags; /* flags [lock] */
union {
- struct hlist_head list;
+ /* Object pointer [lock] */
+ void *obj;
/* Used listing heads to free after srcu period expires */
struct fsnotify_mark_connector *destroy_next;
};
+ struct hlist_head list;
};
/*
+ * Container for per-sb fsnotify state (sb marks and more).
+ * Attached lazily on first marked object on the sb and freed when killing sb.
+ */
+struct fsnotify_sb_info {
+ struct fsnotify_mark_connector __rcu *sb_marks;
+ /*
+ * Number of inode/mount/sb objects that are being watched in this sb.
+ * Note that inodes objects are currently double-accounted.
+ *
+ * The value in watched_objects[prio] is the number of objects that are
+ * watched by groups of priority >= prio, so watched_objects[0] is the
+ * total number of watched objects in this sb.
+ */
+ atomic_long_t watched_objects[__FSNOTIFY_PRIO_NUM];
+};
+
+static inline struct fsnotify_sb_info *fsnotify_sb_info(struct super_block *sb)
+{
+#ifdef CONFIG_FSNOTIFY
+ return READ_ONCE(sb->s_fsnotify_info);
+#else
+ return NULL;
+#endif
+}
+
+static inline atomic_long_t *fsnotify_sb_watched_objects(struct super_block *sb)
+{
+ return &fsnotify_sb_info(sb)->watched_objects[0];
+}
+
+/*
* A mark is simply an object attached to an in core inode which allows an
* fsnotify listener to indicate they are either no longer interested in events
* of a type matching mask or only interested in those events.
@@ -243,11 +606,11 @@ struct fsnotify_mark {
__u32 mask;
/* We hold one for presence in g_list. Also one ref for each 'thing'
* in kernel that found and may be using this mark. */
- atomic_t refcnt;
+ refcount_t refcnt;
/* Group this mark is for. Set on mark creation, stable until last ref
* is dropped */
struct fsnotify_group *group;
- /* List of marks by group->i_fsnotify_marks. Also reused for queueing
+ /* List of marks by group->marks_list. Also reused for queueing
* mark into destroy_list when it's waiting for the end of SRCU period
* before it can be freed. [group->mark_mutex] */
struct list_head g_list;
@@ -257,11 +620,20 @@ struct fsnotify_mark {
struct hlist_node obj_list;
/* Head of list of marks for an object [mark ref] */
struct fsnotify_mark_connector *connector;
- /* Events types to ignore [mark->lock, group->mark_mutex] */
- __u32 ignored_mask;
-#define FSNOTIFY_MARK_FLAG_IGNORED_SURV_MODIFY 0x01
-#define FSNOTIFY_MARK_FLAG_ALIVE 0x02
-#define FSNOTIFY_MARK_FLAG_ATTACHED 0x04
+ /* Events types and flags to ignore [mark->lock, group->mark_mutex] */
+ __u32 ignore_mask;
+ /* General fsnotify mark flags */
+#define FSNOTIFY_MARK_FLAG_ALIVE 0x0001
+#define FSNOTIFY_MARK_FLAG_ATTACHED 0x0002
+ /* inotify mark flags */
+#define FSNOTIFY_MARK_FLAG_EXCL_UNLINK 0x0010
+#define FSNOTIFY_MARK_FLAG_IN_ONESHOT 0x0020
+ /* fanotify mark flags */
+#define FSNOTIFY_MARK_FLAG_IGNORED_SURV_MODIFY 0x0100
+#define FSNOTIFY_MARK_FLAG_NO_IREF 0x0200
+#define FSNOTIFY_MARK_FLAG_HAS_IGNORE_FLAGS 0x0400
+#define FSNOTIFY_MARK_FLAG_HAS_FSID 0x0800
+#define FSNOTIFY_MARK_FLAG_WEAK_FSID 0x1000
unsigned int flags; /* flags [mark->lock] */
};
@@ -270,21 +642,42 @@ struct fsnotify_mark {
/* called from the vfs helpers */
/* main fsnotify call to send events */
-extern int fsnotify(struct inode *to_tell, __u32 mask, const void *data, int data_is,
- const unsigned char *name, u32 cookie);
-extern int __fsnotify_parent(const struct path *path, struct dentry *dentry, __u32 mask);
+extern int fsnotify(__u32 mask, const void *data, int data_type,
+ struct inode *dir, const struct qstr *name,
+ struct inode *inode, u32 cookie);
+extern int __fsnotify_parent(struct dentry *dentry, __u32 mask, const void *data,
+ int data_type);
extern void __fsnotify_inode_delete(struct inode *inode);
extern void __fsnotify_vfsmount_delete(struct vfsmount *mnt);
+extern void fsnotify_sb_delete(struct super_block *sb);
+extern void __fsnotify_mntns_delete(struct mnt_namespace *mntns);
+extern void fsnotify_sb_free(struct super_block *sb);
extern u32 fsnotify_get_cookie(void);
+extern void fsnotify_mnt(__u32 mask, struct mnt_namespace *ns, struct vfsmount *mnt);
+
+static inline __u32 fsnotify_parent_needed_mask(__u32 mask)
+{
+ /* FS_EVENT_ON_CHILD is set on marks that want parent/name info */
+ if (!(mask & FS_EVENT_ON_CHILD))
+ return 0;
+ /*
+ * This object might be watched by a mark that cares about parent/name
+ * info, does it care about the specific set of events that can be
+ * reported with parent/name info?
+ */
+ return mask & FS_EVENTS_POSS_TO_PARENT;
+}
static inline int fsnotify_inode_watches_children(struct inode *inode)
{
+ __u32 parent_mask = READ_ONCE(inode->i_fsnotify_mask);
+
/* FS_EVENT_ON_CHILD is set if the inode may care */
- if (!(inode->i_fsnotify_mask & FS_EVENT_ON_CHILD))
+ if (!(parent_mask & FS_EVENT_ON_CHILD))
return 0;
/* this inode might care about child events, does it care about the
* specific set of events that can happen on a child? */
- return inode->i_fsnotify_mask & FS_EVENTS_POSS_ON_CHILD;
+ return parent_mask & FS_EVENTS_POSS_ON_CHILD;
}
/*
@@ -298,7 +691,7 @@ static inline void fsnotify_update_flags(struct dentry *dentry)
/*
* Serialisation of setting PARENT_WATCHED on the dentries is provided
* by d_lock. If inotify_inode_watched changes after we have taken
- * d_lock, the following __fsnotify_update_child_dentry_flags call will
+ * d_lock, the following fsnotify_set_children_dentry_flags call will
* find our entry, so it will spin until we complete here, and update
* us with the new state.
*/
@@ -311,7 +704,9 @@ static inline void fsnotify_update_flags(struct dentry *dentry)
/* called from fsnotify listeners, such as fanotify or dnotify */
/* create a new group */
-extern struct fsnotify_group *fsnotify_alloc_group(const struct fsnotify_ops *ops);
+extern struct fsnotify_group *fsnotify_alloc_group(
+ const struct fsnotify_ops *ops,
+ int flags);
/* get reference to a group */
extern void fsnotify_get_group(struct fsnotify_group *group);
/* drop reference on a group from fsnotify_alloc_group */
@@ -326,32 +721,183 @@ extern int fsnotify_fasync(int fd, struct file *file, int on);
extern void fsnotify_destroy_event(struct fsnotify_group *group,
struct fsnotify_event *event);
/* attach the event to the group notification queue */
-extern int fsnotify_add_event(struct fsnotify_group *group,
- struct fsnotify_event *event,
- int (*merge)(struct list_head *,
- struct fsnotify_event *));
-/* true if the group notification queue is empty */
+extern int fsnotify_insert_event(struct fsnotify_group *group,
+ struct fsnotify_event *event,
+ int (*merge)(struct fsnotify_group *,
+ struct fsnotify_event *),
+ void (*insert)(struct fsnotify_group *,
+ struct fsnotify_event *));
+
+static inline int fsnotify_add_event(struct fsnotify_group *group,
+ struct fsnotify_event *event,
+ int (*merge)(struct fsnotify_group *,
+ struct fsnotify_event *))
+{
+ return fsnotify_insert_event(group, event, merge, NULL);
+}
+
+/* Queue overflow event to a notification group */
+static inline void fsnotify_queue_overflow(struct fsnotify_group *group)
+{
+ fsnotify_add_event(group, group->overflow_event, NULL);
+}
+
+static inline bool fsnotify_is_overflow_event(u32 mask)
+{
+ return mask & FS_Q_OVERFLOW;
+}
+
+static inline bool fsnotify_notify_queue_is_empty(struct fsnotify_group *group)
+{
+ assert_spin_locked(&group->notification_lock);
+
+ return list_empty(&group->notification_list);
+}
+
extern bool fsnotify_notify_queue_is_empty(struct fsnotify_group *group);
/* return, but do not dequeue the first event on the notification queue */
extern struct fsnotify_event *fsnotify_peek_first_event(struct fsnotify_group *group);
/* return AND dequeue the first event on the notification queue */
extern struct fsnotify_event *fsnotify_remove_first_event(struct fsnotify_group *group);
+/* Remove event queued in the notification list */
+extern void fsnotify_remove_queued_event(struct fsnotify_group *group,
+ struct fsnotify_event *event);
/* functions used to manipulate the marks attached to inodes */
+/*
+ * Canonical "ignore mask" including event flags.
+ *
+ * Note the subtle semantic difference from the legacy ->ignored_mask.
+ * ->ignored_mask traditionally only meant which events should be ignored,
+ * while ->ignore_mask also includes flags regarding the type of objects on
+ * which events should be ignored.
+ */
+static inline __u32 fsnotify_ignore_mask(struct fsnotify_mark *mark)
+{
+ __u32 ignore_mask = mark->ignore_mask;
+
+ /* The event flags in ignore mask take effect */
+ if (mark->flags & FSNOTIFY_MARK_FLAG_HAS_IGNORE_FLAGS)
+ return ignore_mask;
+
+ /*
+ * Legacy behavior:
+ * - Always ignore events on dir
+ * - Ignore events on child if parent is watching children
+ */
+ ignore_mask |= FS_ISDIR;
+ ignore_mask &= ~FS_EVENT_ON_CHILD;
+ ignore_mask |= mark->mask & FS_EVENT_ON_CHILD;
+
+ return ignore_mask;
+}
+
+/* Legacy ignored_mask - only event types to ignore */
+static inline __u32 fsnotify_ignored_events(struct fsnotify_mark *mark)
+{
+ return mark->ignore_mask & ALL_FSNOTIFY_EVENTS;
+}
+
+/*
+ * Check if mask (or ignore mask) should be applied depending if victim is a
+ * directory and whether it is reported to a watching parent.
+ */
+static inline bool fsnotify_mask_applicable(__u32 mask, bool is_dir,
+ int iter_type)
+{
+ /* Should mask be applied to a directory? */
+ if (is_dir && !(mask & FS_ISDIR))
+ return false;
+
+ /* Should mask be applied to a child? */
+ if (iter_type == FSNOTIFY_ITER_TYPE_PARENT &&
+ !(mask & FS_EVENT_ON_CHILD))
+ return false;
+
+ return true;
+}
+
+/*
+ * Effective ignore mask taking into account if event victim is a
+ * directory and whether it is reported to a watching parent.
+ */
+static inline __u32 fsnotify_effective_ignore_mask(struct fsnotify_mark *mark,
+ bool is_dir, int iter_type)
+{
+ __u32 ignore_mask = fsnotify_ignored_events(mark);
+
+ if (!ignore_mask)
+ return 0;
+
+ /* For non-dir and non-child, no need to consult the event flags */
+ if (!is_dir && iter_type != FSNOTIFY_ITER_TYPE_PARENT)
+ return ignore_mask;
+
+ ignore_mask = fsnotify_ignore_mask(mark);
+ if (!fsnotify_mask_applicable(ignore_mask, is_dir, iter_type))
+ return 0;
+
+ return ignore_mask & ALL_FSNOTIFY_EVENTS;
+}
+
+/* Get mask for calculating object interest taking ignore mask into account */
+static inline __u32 fsnotify_calc_mask(struct fsnotify_mark *mark)
+{
+ __u32 mask = mark->mask;
+
+ if (!fsnotify_ignored_events(mark))
+ return mask;
+
+ /* Interest in FS_MODIFY may be needed for clearing ignore mask */
+ if (!(mark->flags & FSNOTIFY_MARK_FLAG_IGNORED_SURV_MODIFY))
+ mask |= FS_MODIFY;
+
+ /*
+ * If mark is interested in ignoring events on children, the object must
+ * show interest in those events for fsnotify_parent() to notice it.
+ */
+ return mask | mark->ignore_mask;
+}
+
+/* Get mask of events for a list of marks */
+extern __u32 fsnotify_conn_mask(struct fsnotify_mark_connector *conn);
/* Calculate mask of events for a list of marks */
extern void fsnotify_recalc_mask(struct fsnotify_mark_connector *conn);
extern void fsnotify_init_mark(struct fsnotify_mark *mark,
struct fsnotify_group *group);
/* Find mark belonging to given group in the list of marks */
-extern struct fsnotify_mark *fsnotify_find_mark(
- struct fsnotify_mark_connector __rcu **connp,
- struct fsnotify_group *group);
-/* attach the mark to the inode or vfsmount */
-extern int fsnotify_add_mark(struct fsnotify_mark *mark, struct inode *inode,
- struct vfsmount *mnt, int allow_dups);
-extern int fsnotify_add_mark_locked(struct fsnotify_mark *mark,
- struct inode *inode, struct vfsmount *mnt, int allow_dups);
+struct fsnotify_mark *fsnotify_find_mark(void *obj, unsigned int obj_type,
+ struct fsnotify_group *group);
+/* attach the mark to the object */
+int fsnotify_add_mark(struct fsnotify_mark *mark, void *obj,
+ unsigned int obj_type, int add_flags);
+int fsnotify_add_mark_locked(struct fsnotify_mark *mark, void *obj,
+ unsigned int obj_type, int add_flags);
+
+/* attach the mark to the inode */
+static inline int fsnotify_add_inode_mark(struct fsnotify_mark *mark,
+ struct inode *inode,
+ int add_flags)
+{
+ return fsnotify_add_mark(mark, inode, FSNOTIFY_OBJ_TYPE_INODE,
+ add_flags);
+}
+static inline int fsnotify_add_inode_mark_locked(struct fsnotify_mark *mark,
+ struct inode *inode,
+ int add_flags)
+{
+ return fsnotify_add_mark_locked(mark, inode, FSNOTIFY_OBJ_TYPE_INODE,
+ add_flags);
+}
+
+static inline struct fsnotify_mark *fsnotify_find_inode_mark(
+ struct inode *inode,
+ struct fsnotify_group *group)
+{
+ return fsnotify_find_mark(inode, FSNOTIFY_OBJ_TYPE_INODE, group);
+}
+
/* given a group and a mark, flag mark to be freed when all references are dropped */
extern void fsnotify_destroy_mark(struct fsnotify_mark *mark,
struct fsnotify_group *group);
@@ -359,37 +905,40 @@ extern void fsnotify_destroy_mark(struct fsnotify_mark *mark,
extern void fsnotify_detach_mark(struct fsnotify_mark *mark);
/* free mark */
extern void fsnotify_free_mark(struct fsnotify_mark *mark);
-/* run all the marks in a group, and clear all of the marks attached to given object type */
-extern void fsnotify_clear_marks_by_group(struct fsnotify_group *group, unsigned int type);
-/* run all the marks in a group, and clear all of the vfsmount marks */
-static inline void fsnotify_clear_vfsmount_marks_by_group(struct fsnotify_group *group)
-{
- fsnotify_clear_marks_by_group(group, FSNOTIFY_OBJ_TYPE_VFSMOUNT);
-}
-/* run all the marks in a group, and clear all of the inode marks */
-static inline void fsnotify_clear_inode_marks_by_group(struct fsnotify_group *group)
-{
- fsnotify_clear_marks_by_group(group, FSNOTIFY_OBJ_TYPE_INODE);
-}
+/* Wait until all marks queued for destruction are destroyed */
+extern void fsnotify_wait_marks_destroyed(void);
+/* Clear all of the marks of a group attached to a given object type */
+extern void fsnotify_clear_marks_by_group(struct fsnotify_group *group,
+ unsigned int obj_type);
extern void fsnotify_get_mark(struct fsnotify_mark *mark);
extern void fsnotify_put_mark(struct fsnotify_mark *mark);
-extern void fsnotify_unmount_inodes(struct super_block *sb);
extern void fsnotify_finish_user_wait(struct fsnotify_iter_info *iter_info);
extern bool fsnotify_prepare_user_wait(struct fsnotify_iter_info *iter_info);
-/* put here because inotify does some weird stuff when destroying watches */
-extern void fsnotify_init_event(struct fsnotify_event *event,
- struct inode *to_tell, u32 mask);
+static inline void fsnotify_init_event(struct fsnotify_event *event)
+{
+ INIT_LIST_HEAD(&event->list);
+}
+int fsnotify_pre_content(const struct path *path, const loff_t *ppos,
+ size_t count);
#else
-static inline int fsnotify(struct inode *to_tell, __u32 mask, const void *data, int data_is,
- const unsigned char *name, u32 cookie)
+static inline int fsnotify_pre_content(const struct path *path,
+ const loff_t *ppos, size_t count)
{
return 0;
}
-static inline int __fsnotify_parent(const struct path *path, struct dentry *dentry, __u32 mask)
+static inline int fsnotify(__u32 mask, const void *data, int data_type,
+ struct inode *dir, const struct qstr *name,
+ struct inode *inode, u32 cookie)
+{
+ return 0;
+}
+
+static inline int __fsnotify_parent(struct dentry *dentry, __u32 mask,
+ const void *data, int data_type)
{
return 0;
}
@@ -400,6 +949,15 @@ static inline void __fsnotify_inode_delete(struct inode *inode)
static inline void __fsnotify_vfsmount_delete(struct vfsmount *mnt)
{}
+static inline void fsnotify_sb_delete(struct super_block *sb)
+{}
+
+static inline void __fsnotify_mntns_delete(struct mnt_namespace *mntns)
+{}
+
+static inline void fsnotify_sb_free(struct super_block *sb)
+{}
+
static inline void fsnotify_update_flags(struct dentry *dentry)
{}
@@ -411,6 +969,9 @@ static inline u32 fsnotify_get_cookie(void)
static inline void fsnotify_unmount_inodes(struct super_block *sb)
{}
+static inline void fsnotify_mnt(__u32 mask, struct mnt_namespace *ns, struct vfsmount *mnt)
+{}
+
#endif /* CONFIG_FSNOTIFY */
#endif /* __KERNEL __ */
diff --git a/include/linux/fsverity.h b/include/linux/fsverity.h
new file mode 100644
index 000000000000..5bc7280425a7
--- /dev/null
+++ b/include/linux/fsverity.h
@@ -0,0 +1,359 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * fs-verity: read-only file-based authenticity protection
+ *
+ * This header declares the interface between the fs/verity/ support layer and
+ * filesystems that support fs-verity.
+ *
+ * Copyright 2019 Google LLC
+ */
+
+#ifndef _LINUX_FSVERITY_H
+#define _LINUX_FSVERITY_H
+
+#include <linux/fs.h>
+#include <linux/mm.h>
+#include <crypto/hash_info.h>
+#include <crypto/sha2.h>
+#include <uapi/linux/fsverity.h>
+
+/*
+ * Largest digest size among all hash algorithms supported by fs-verity.
+ * Currently assumed to be <= size of fsverity_descriptor::root_hash.
+ */
+#define FS_VERITY_MAX_DIGEST_SIZE SHA512_DIGEST_SIZE
+
+/* Arbitrary limit to bound the kmalloc() size. Can be changed. */
+#define FS_VERITY_MAX_DESCRIPTOR_SIZE 16384
+
+struct fsverity_info;
+
+/* Verity operations for filesystems */
+struct fsverity_operations {
+ /**
+ * The offset of the pointer to struct fsverity_info in the
+ * filesystem-specific part of the inode, relative to the beginning of
+ * the common part of the inode (the 'struct inode').
+ */
+ ptrdiff_t inode_info_offs;
+
+ /**
+ * Begin enabling verity on the given file.
+ *
+ * @filp: a readonly file descriptor for the file
+ *
+ * The filesystem must do any needed filesystem-specific preparations
+ * for enabling verity, e.g. evicting inline data. It also must return
+ * -EBUSY if verity is already being enabled on the given file.
+ *
+ * i_rwsem is held for write.
+ *
+ * Return: 0 on success, -errno on failure
+ */
+ int (*begin_enable_verity)(struct file *filp);
+
+ /**
+ * End enabling verity on the given file.
+ *
+ * @filp: a readonly file descriptor for the file
+ * @desc: the verity descriptor to write, or NULL on failure
+ * @desc_size: size of verity descriptor, or 0 on failure
+ * @merkle_tree_size: total bytes the Merkle tree took up
+ *
+ * If desc == NULL, then enabling verity failed and the filesystem only
+ * must do any necessary cleanups. Else, it must also store the given
+ * verity descriptor to a fs-specific location associated with the inode
+ * and do any fs-specific actions needed to mark the inode as a verity
+ * inode, e.g. setting a bit in the on-disk inode. The filesystem is
+ * also responsible for setting the S_VERITY flag in the VFS inode.
+ *
+ * i_rwsem is held for write, but it may have been dropped between
+ * ->begin_enable_verity() and ->end_enable_verity().
+ *
+ * Return: 0 on success, -errno on failure
+ */
+ int (*end_enable_verity)(struct file *filp, const void *desc,
+ size_t desc_size, u64 merkle_tree_size);
+
+ /**
+ * Get the verity descriptor of the given inode.
+ *
+ * @inode: an inode with the S_VERITY flag set
+ * @buf: buffer in which to place the verity descriptor
+ * @bufsize: size of @buf, or 0 to retrieve the size only
+ *
+ * If bufsize == 0, then the size of the verity descriptor is returned.
+ * Otherwise the verity descriptor is written to 'buf' and its actual
+ * size is returned; -ERANGE is returned if it's too large. This may be
+ * called by multiple processes concurrently on the same inode.
+ *
+ * Return: the size on success, -errno on failure
+ */
+ int (*get_verity_descriptor)(struct inode *inode, void *buf,
+ size_t bufsize);
+
+ /**
+ * Read a Merkle tree page of the given inode.
+ *
+ * @inode: the inode
+ * @index: 0-based index of the page within the Merkle tree
+ * @num_ra_pages: The number of Merkle tree pages that should be
+ * prefetched starting at @index if the page at @index
+ * isn't already cached. Implementations may ignore this
+ * argument; it's only a performance optimization.
+ *
+ * This can be called at any time on an open verity file. It may be
+ * called by multiple processes concurrently, even with the same page.
+ *
+ * Note that this must retrieve a *page*, not necessarily a *block*.
+ *
+ * Return: the page on success, ERR_PTR() on failure
+ */
+ struct page *(*read_merkle_tree_page)(struct inode *inode,
+ pgoff_t index,
+ unsigned long num_ra_pages);
+
+ /**
+ * Write a Merkle tree block to the given inode.
+ *
+ * @inode: the inode for which the Merkle tree is being built
+ * @buf: the Merkle tree block to write
+ * @pos: the position of the block in the Merkle tree (in bytes)
+ * @size: the Merkle tree block size (in bytes)
+ *
+ * This is only called between ->begin_enable_verity() and
+ * ->end_enable_verity().
+ *
+ * Return: 0 on success, -errno on failure
+ */
+ int (*write_merkle_tree_block)(struct inode *inode, const void *buf,
+ u64 pos, unsigned int size);
+};
+
+#ifdef CONFIG_FS_VERITY
+
+/*
+ * Returns the address of the verity info pointer within the filesystem-specific
+ * part of the inode. (To save memory on filesystems that don't support
+ * fsverity, a field in 'struct inode' itself is no longer used.)
+ */
+static inline struct fsverity_info **
+fsverity_info_addr(const struct inode *inode)
+{
+ VFS_WARN_ON_ONCE(inode->i_sb->s_vop->inode_info_offs == 0);
+ return (void *)inode + inode->i_sb->s_vop->inode_info_offs;
+}
+
+static inline struct fsverity_info *fsverity_get_info(const struct inode *inode)
+{
+ /*
+ * Since this function can be called on inodes belonging to filesystems
+ * that don't support fsverity at all, and fsverity_info_addr() doesn't
+ * work on such filesystems, we have to start with an IS_VERITY() check.
+ * Checking IS_VERITY() here is also useful to minimize the overhead of
+ * fsverity_active() on non-verity files.
+ */
+ if (!IS_VERITY(inode))
+ return NULL;
+
+ /*
+ * Pairs with the cmpxchg_release() in fsverity_set_info(). I.e.,
+ * another task may publish the inode's verity info concurrently,
+ * executing a RELEASE barrier. Use smp_load_acquire() here to safely
+ * ACQUIRE the memory the other task published.
+ */
+ return smp_load_acquire(fsverity_info_addr(inode));
+}
+
+/* enable.c */
+
+int fsverity_ioctl_enable(struct file *filp, const void __user *arg);
+
+/* measure.c */
+
+int fsverity_ioctl_measure(struct file *filp, void __user *arg);
+int fsverity_get_digest(struct inode *inode,
+ u8 raw_digest[FS_VERITY_MAX_DIGEST_SIZE],
+ u8 *alg, enum hash_algo *halg);
+
+/* open.c */
+
+int __fsverity_file_open(struct inode *inode, struct file *filp);
+int __fsverity_prepare_setattr(struct dentry *dentry, struct iattr *attr);
+void __fsverity_cleanup_inode(struct inode *inode);
+
+/**
+ * fsverity_cleanup_inode() - free the inode's verity info, if present
+ * @inode: an inode being evicted
+ *
+ * Filesystems must call this on inode eviction to free the inode's verity info.
+ */
+static inline void fsverity_cleanup_inode(struct inode *inode)
+{
+ /*
+ * Only IS_VERITY() inodes can have verity info, so start by checking
+ * for IS_VERITY() (which is faster than retrieving the pointer to the
+ * verity info). This minimizes overhead for non-verity inodes.
+ */
+ if (IS_VERITY(inode))
+ __fsverity_cleanup_inode(inode);
+ else
+ VFS_WARN_ON_ONCE(*fsverity_info_addr(inode) != NULL);
+}
+
+/* read_metadata.c */
+
+int fsverity_ioctl_read_metadata(struct file *filp, const void __user *uarg);
+
+/* verify.c */
+
+bool fsverity_verify_blocks(struct folio *folio, size_t len, size_t offset);
+void fsverity_verify_bio(struct bio *bio);
+void fsverity_enqueue_verify_work(struct work_struct *work);
+
+#else /* !CONFIG_FS_VERITY */
+
+static inline struct fsverity_info *fsverity_get_info(const struct inode *inode)
+{
+ return NULL;
+}
+
+/* enable.c */
+
+static inline int fsverity_ioctl_enable(struct file *filp,
+ const void __user *arg)
+{
+ return -EOPNOTSUPP;
+}
+
+/* measure.c */
+
+static inline int fsverity_ioctl_measure(struct file *filp, void __user *arg)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int fsverity_get_digest(struct inode *inode,
+ u8 raw_digest[FS_VERITY_MAX_DIGEST_SIZE],
+ u8 *alg, enum hash_algo *halg)
+{
+ /*
+ * fsverity is not enabled in the kernel configuration, so always report
+ * that the file doesn't have fsverity enabled (digest size 0).
+ */
+ return 0;
+}
+
+/* open.c */
+
+static inline int __fsverity_file_open(struct inode *inode, struct file *filp)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int __fsverity_prepare_setattr(struct dentry *dentry,
+ struct iattr *attr)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline void fsverity_cleanup_inode(struct inode *inode)
+{
+}
+
+/* read_metadata.c */
+
+static inline int fsverity_ioctl_read_metadata(struct file *filp,
+ const void __user *uarg)
+{
+ return -EOPNOTSUPP;
+}
+
+/* verify.c */
+
+static inline bool fsverity_verify_blocks(struct folio *folio, size_t len,
+ size_t offset)
+{
+ WARN_ON_ONCE(1);
+ return false;
+}
+
+static inline void fsverity_verify_bio(struct bio *bio)
+{
+ WARN_ON_ONCE(1);
+}
+
+static inline void fsverity_enqueue_verify_work(struct work_struct *work)
+{
+ WARN_ON_ONCE(1);
+}
+
+#endif /* !CONFIG_FS_VERITY */
+
+static inline bool fsverity_verify_folio(struct folio *folio)
+{
+ return fsverity_verify_blocks(folio, folio_size(folio), 0);
+}
+
+static inline bool fsverity_verify_page(struct page *page)
+{
+ return fsverity_verify_blocks(page_folio(page), PAGE_SIZE, 0);
+}
+
+/**
+ * fsverity_active() - do reads from the inode need to go through fs-verity?
+ * @inode: inode to check
+ *
+ * This checks whether the inode's verity info has been set.
+ *
+ * Filesystems call this from ->readahead() to check whether the pages need to
+ * be verified or not. Don't use IS_VERITY() for this purpose; it's subject to
+ * a race condition where the file is being read concurrently with
+ * FS_IOC_ENABLE_VERITY completing. (S_VERITY is set before the verity info.)
+ *
+ * Return: true if reads need to go through fs-verity, otherwise false
+ */
+static inline bool fsverity_active(const struct inode *inode)
+{
+ return fsverity_get_info(inode) != NULL;
+}
+
+/**
+ * fsverity_file_open() - prepare to open a verity file
+ * @inode: the inode being opened
+ * @filp: the struct file being set up
+ *
+ * When opening a verity file, deny the open if it is for writing. Otherwise,
+ * set up the inode's verity info if not already done.
+ *
+ * When combined with fscrypt, this must be called after fscrypt_file_open().
+ * Otherwise, we won't have the key set up to decrypt the verity metadata.
+ *
+ * Return: 0 on success, -errno on failure
+ */
+static inline int fsverity_file_open(struct inode *inode, struct file *filp)
+{
+ if (IS_VERITY(inode))
+ return __fsverity_file_open(inode, filp);
+ return 0;
+}
+
+/**
+ * fsverity_prepare_setattr() - prepare to change a verity inode's attributes
+ * @dentry: dentry through which the inode is being changed
+ * @attr: attributes to change
+ *
+ * Verity files are immutable, so deny truncates. This isn't covered by the
+ * open-time check because sys_truncate() takes a path, not a file descriptor.
+ *
+ * Return: 0 on success, -errno on failure
+ */
+static inline int fsverity_prepare_setattr(struct dentry *dentry,
+ struct iattr *attr)
+{
+ if (IS_VERITY(d_inode(dentry)))
+ return __fsverity_prepare_setattr(dentry, attr);
+ return 0;
+}
+
+#endif /* _LINUX_FSVERITY_H */
diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h
index 6383115e9d2c..770f0dc993cc 100644
--- a/include/linux/ftrace.h
+++ b/include/linux/ftrace.h
@@ -1,12 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Ftrace header. For implementation details beyond the random comments
- * scattered below, see: Documentation/trace/ftrace-design.txt
+ * scattered below, see: Documentation/trace/ftrace-design.rst
*/
#ifndef _LINUX_FTRACE_H
#define _LINUX_FTRACE_H
+#include <linux/trace_recursion.h>
#include <linux/trace_clock.h>
+#include <linux/jump_label.h>
#include <linux/kallsyms.h>
#include <linux/linkage.h>
#include <linux/bitops.h>
@@ -28,16 +31,45 @@
#define ARCH_SUPPORTS_FTRACE_OPS 0
#endif
+#ifdef CONFIG_TRACING
+extern void ftrace_boot_snapshot(void);
+#else
+static inline void ftrace_boot_snapshot(void) { }
+#endif
+
+struct ftrace_ops;
+struct ftrace_regs;
+struct dyn_ftrace;
+
+char *arch_ftrace_match_adjust(char *str, const char *search);
+
+#ifdef CONFIG_HAVE_FUNCTION_GRAPH_FREGS
+unsigned long ftrace_return_to_handler(struct ftrace_regs *fregs);
+#else
+unsigned long ftrace_return_to_handler(unsigned long frame_pointer);
+#endif
+
+#ifdef CONFIG_FUNCTION_TRACER
/*
* If the arch's mcount caller does not support all of ftrace's
* features, then it must call an indirect function that
- * does. Or at least does enough to prevent any unwelcomed side effects.
+ * does. Or at least does enough to prevent any unwelcome side effects.
+ *
+ * Also define the function prototype that these architectures use
+ * to call the ftrace_ops_list_func().
*/
#if !ARCH_SUPPORTS_FTRACE_OPS
# define FTRACE_FORCE_LIST_FUNC 1
+void arch_ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip);
#else
# define FTRACE_FORCE_LIST_FUNC 0
+void arch_ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
+ struct ftrace_ops *op, struct ftrace_regs *fregs);
#endif
+extern const struct ftrace_ops ftrace_nop_ops;
+extern const struct ftrace_ops ftrace_list_ops;
+struct ftrace_ops *ftrace_find_unique_ops(struct dyn_ftrace *rec);
+#endif /* CONFIG_FUNCTION_TRACER */
/* Main tracing buffer and events set up */
#ifdef CONFIG_TRACING
@@ -51,25 +83,207 @@ static inline void early_trace_init(void) { }
struct module;
struct ftrace_hash;
+#if defined(CONFIG_FUNCTION_TRACER) && defined(CONFIG_MODULES) && \
+ defined(CONFIG_DYNAMIC_FTRACE)
+int
+ftrace_mod_address_lookup(unsigned long addr, unsigned long *size,
+ unsigned long *off, char **modname, char *sym);
+#else
+static inline int
+ftrace_mod_address_lookup(unsigned long addr, unsigned long *size,
+ unsigned long *off, char **modname, char *sym)
+{
+ return 0;
+}
+#endif
+
+#if defined(CONFIG_FUNCTION_TRACER) && defined(CONFIG_DYNAMIC_FTRACE)
+int ftrace_mod_get_kallsym(unsigned int symnum, unsigned long *value,
+ char *type, char *name,
+ char *module_name, int *exported);
+#else
+static inline int ftrace_mod_get_kallsym(unsigned int symnum, unsigned long *value,
+ char *type, char *name,
+ char *module_name, int *exported)
+{
+ return -1;
+}
+#endif
+
#ifdef CONFIG_FUNCTION_TRACER
+#include <linux/ftrace_regs.h>
+
extern int ftrace_enabled;
-extern int
-ftrace_enable_sysctl(struct ctl_table *table, int write,
- void __user *buffer, size_t *lenp,
- loff_t *ppos);
-struct ftrace_ops;
+/**
+ * ftrace_regs - ftrace partial/optimal register set
+ *
+ * ftrace_regs represents a group of registers which is used at the
+ * function entry and exit. There are three types of registers.
+ *
+ * - Registers for passing the parameters to callee, including the stack
+ * pointer. (e.g. rcx, rdx, rdi, rsi, r8, r9 and rsp on x86_64)
+ * - Registers for passing the return values to caller.
+ * (e.g. rax and rdx on x86_64)
+ * - Registers for hooking the function call and return including the
+ * frame pointer (the frame pointer is architecture/config dependent)
+ * (e.g. rip, rbp and rsp for x86_64)
+ *
+ * Also, architecture dependent fields can be used for internal process.
+ * (e.g. orig_ax on x86_64)
+ *
+ * Basically, ftrace_regs stores the registers related to the context.
+ * On function entry, registers for function parameters and hooking the
+ * function call are stored, and on function exit, registers for function
+ * return value and frame pointers are stored.
+ *
+ * And also, it dpends on the context that which registers are restored
+ * from the ftrace_regs.
+ * On the function entry, those registers will be restored except for
+ * the stack pointer, so that user can change the function parameters
+ * and instruction pointer (e.g. live patching.)
+ * On the function exit, only registers which is used for return values
+ * are restored.
+ *
+ * NOTE: user *must not* access regs directly, only do it via APIs, because
+ * the member can be changed according to the architecture.
+ * This is why the structure is empty here, so that nothing accesses
+ * the ftrace_regs directly.
+ */
+struct ftrace_regs {
+ /* Nothing to see here, use the accessor functions! */
+};
+
+#define ftrace_regs_size() sizeof(struct __arch_ftrace_regs)
+
+#ifndef CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS
+/*
+ * Architectures that define HAVE_DYNAMIC_FTRACE_WITH_ARGS must define their own
+ * arch_ftrace_get_regs() where it only returns pt_regs *if* it is fully
+ * populated. It should return NULL otherwise.
+ */
+static inline struct pt_regs *arch_ftrace_get_regs(struct ftrace_regs *fregs)
+{
+ return &arch_ftrace_regs(fregs)->regs;
+}
+
+/*
+ * ftrace_regs_set_instruction_pointer() is to be defined by the architecture
+ * if to allow setting of the instruction pointer from the ftrace_regs when
+ * HAVE_DYNAMIC_FTRACE_WITH_ARGS is set and it supports live kernel patching.
+ */
+#define ftrace_regs_set_instruction_pointer(fregs, ip) do { } while (0)
+#endif /* CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS */
+
+#ifdef CONFIG_HAVE_FTRACE_REGS_HAVING_PT_REGS
+
+static_assert(sizeof(struct pt_regs) == ftrace_regs_size());
+
+#endif /* CONFIG_HAVE_FTRACE_REGS_HAVING_PT_REGS */
+
+static __always_inline struct pt_regs *ftrace_get_regs(struct ftrace_regs *fregs)
+{
+ if (!fregs)
+ return NULL;
+
+ return arch_ftrace_get_regs(fregs);
+}
+
+#if !defined(CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS) || \
+ defined(CONFIG_HAVE_FTRACE_REGS_HAVING_PT_REGS)
+
+#ifndef arch_ftrace_partial_regs
+#define arch_ftrace_partial_regs(regs) do {} while (0)
+#endif
+
+static __always_inline struct pt_regs *
+ftrace_partial_regs(struct ftrace_regs *fregs, struct pt_regs *regs)
+{
+ /*
+ * If CONFIG_HAVE_FTRACE_REGS_HAVING_PT_REGS=y, ftrace_regs memory
+ * layout is including pt_regs. So always returns that address.
+ * Since arch_ftrace_get_regs() will check some members and may return
+ * NULL, we can not use it.
+ */
+ regs = &arch_ftrace_regs(fregs)->regs;
+
+ /* Allow arch specific updates to regs. */
+ arch_ftrace_partial_regs(regs);
+ return regs;
+}
+
+#endif /* !CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS || CONFIG_HAVE_FTRACE_REGS_HAVING_PT_REGS */
+
+#ifdef CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS
+
+/*
+ * Please define arch dependent pt_regs which compatible to the
+ * perf_arch_fetch_caller_regs() but based on ftrace_regs.
+ * This requires
+ * - user_mode(_regs) returns false (always kernel mode).
+ * - able to use the _regs for stack trace.
+ */
+#ifndef arch_ftrace_fill_perf_regs
+/* As same as perf_arch_fetch_caller_regs(), do nothing by default */
+#define arch_ftrace_fill_perf_regs(fregs, _regs) do {} while (0)
+#endif
+
+static __always_inline struct pt_regs *
+ftrace_fill_perf_regs(struct ftrace_regs *fregs, struct pt_regs *regs)
+{
+ arch_ftrace_fill_perf_regs(fregs, regs);
+ return regs;
+}
+
+#else /* !CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS */
+
+static __always_inline struct pt_regs *
+ftrace_fill_perf_regs(struct ftrace_regs *fregs, struct pt_regs *regs)
+{
+ return &arch_ftrace_regs(fregs)->regs;
+}
+
+#endif
+
+/*
+ * When true, the ftrace_regs_{get,set}_*() functions may be used on fregs.
+ * Note: this can be true even when ftrace_get_regs() cannot provide a pt_regs.
+ */
+static __always_inline bool ftrace_regs_has_args(struct ftrace_regs *fregs)
+{
+ if (IS_ENABLED(CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS))
+ return true;
+
+ return ftrace_get_regs(fregs) != NULL;
+}
+
+#ifdef CONFIG_HAVE_REGS_AND_STACK_ACCESS_API
+static __always_inline unsigned long
+ftrace_regs_get_kernel_stack_nth(struct ftrace_regs *fregs, unsigned int nth)
+{
+ unsigned long *stackp;
+
+ stackp = (unsigned long *)ftrace_regs_get_stack_pointer(fregs);
+ if (((unsigned long)(stackp + nth) & ~(THREAD_SIZE - 1)) ==
+ ((unsigned long)stackp & ~(THREAD_SIZE - 1)))
+ return *(stackp + nth);
+
+ return 0;
+}
+#else /* !CONFIG_HAVE_REGS_AND_STACK_ACCESS_API */
+#define ftrace_regs_get_kernel_stack_nth(fregs, nth) (0L)
+#endif /* CONFIG_HAVE_REGS_AND_STACK_ACCESS_API */
typedef void (*ftrace_func_t)(unsigned long ip, unsigned long parent_ip,
- struct ftrace_ops *op, struct pt_regs *regs);
+ struct ftrace_ops *op, struct ftrace_regs *fregs);
ftrace_func_t ftrace_ops_get_func(struct ftrace_ops *ops);
/*
* FTRACE_OPS_FL_* bits denote the state of ftrace_ops struct and are
* set in the flags member.
- * CONTROL, SAVE_REGS, SAVE_REGS_IF_SUPPORTED, RECURSION_SAFE, STUB and
+ * CONTROL, SAVE_REGS, SAVE_REGS_IF_SUPPORTED, RECURSION, STUB and
* IPMODIFY are a kind of attribute flags which can be set only before
* registering the ftrace_ops, and can not be modified while registered.
* Changing those attribute flags after registering ftrace_ops will
@@ -78,10 +292,6 @@ ftrace_func_t ftrace_ops_get_func(struct ftrace_ops *ops);
* ENABLED - set/unset when ftrace_ops is registered/unregistered
* DYNAMIC - set when ftrace_ops is registered to denote dynamically
* allocated ftrace_ops which need special care
- * PER_CPU - set manualy by ftrace_ops user to denote the ftrace_ops
- * could be controlled by following calls:
- * ftrace_function_local_enable
- * ftrace_function_local_disable
* SAVE_REGS - The ftrace_ops wants regs saved at each function called
* and passed to the callback. If this flag is set, but the
* architecture does not support passing regs
@@ -96,10 +306,10 @@ ftrace_func_t ftrace_ops_get_func(struct ftrace_ops *ops);
* passing regs to the handler.
* Note, if this flag is set, the SAVE_REGS flag will automatically
* get set upon registering the ftrace_ops, if the arch supports it.
- * RECURSION_SAFE - The ftrace_ops can set this to tell the ftrace infrastructure
- * that the call back has its own recursion protection. If it does
- * not set this, then the ftrace infrastructure will add recursion
- * protection for the caller.
+ * RECURSION - The ftrace_ops can set this to tell the ftrace infrastructure
+ * that the call back needs recursion protection. If it does
+ * not set this, then the ftrace infrastructure will assume
+ * that the callback can handle recursion on its own.
* STUB - The ftrace_ops is just a place holder.
* INITIALIZED - The ftrace_ops has already been initialized (first use time
* register_ftrace_function() is called, it will initialized the ops)
@@ -121,27 +331,80 @@ ftrace_func_t ftrace_ops_get_func(struct ftrace_ops *ops);
* PID - Is affected by set_ftrace_pid (allows filtering on those pids)
* RCU - Set when the ops can only be called when RCU is watching.
* TRACE_ARRAY - The ops->private points to a trace_array descriptor.
+ * PERMANENT - Set when the ops is permanent and should not be affected by
+ * ftrace_enabled.
+ * DIRECT - Used by the direct ftrace_ops helper for direct functions
+ * (internal ftrace only, should not be used by others)
+ * SUBOP - Is controlled by another op in field managed.
+ * GRAPH - Is a component of the fgraph_ops structure
*/
enum {
- FTRACE_OPS_FL_ENABLED = 1 << 0,
- FTRACE_OPS_FL_DYNAMIC = 1 << 1,
- FTRACE_OPS_FL_PER_CPU = 1 << 2,
- FTRACE_OPS_FL_SAVE_REGS = 1 << 3,
- FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED = 1 << 4,
- FTRACE_OPS_FL_RECURSION_SAFE = 1 << 5,
- FTRACE_OPS_FL_STUB = 1 << 6,
- FTRACE_OPS_FL_INITIALIZED = 1 << 7,
- FTRACE_OPS_FL_DELETED = 1 << 8,
- FTRACE_OPS_FL_ADDING = 1 << 9,
- FTRACE_OPS_FL_REMOVING = 1 << 10,
- FTRACE_OPS_FL_MODIFYING = 1 << 11,
- FTRACE_OPS_FL_ALLOC_TRAMP = 1 << 12,
- FTRACE_OPS_FL_IPMODIFY = 1 << 13,
- FTRACE_OPS_FL_PID = 1 << 14,
- FTRACE_OPS_FL_RCU = 1 << 15,
- FTRACE_OPS_FL_TRACE_ARRAY = 1 << 16,
+ FTRACE_OPS_FL_ENABLED = BIT(0),
+ FTRACE_OPS_FL_DYNAMIC = BIT(1),
+ FTRACE_OPS_FL_SAVE_REGS = BIT(2),
+ FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED = BIT(3),
+ FTRACE_OPS_FL_RECURSION = BIT(4),
+ FTRACE_OPS_FL_STUB = BIT(5),
+ FTRACE_OPS_FL_INITIALIZED = BIT(6),
+ FTRACE_OPS_FL_DELETED = BIT(7),
+ FTRACE_OPS_FL_ADDING = BIT(8),
+ FTRACE_OPS_FL_REMOVING = BIT(9),
+ FTRACE_OPS_FL_MODIFYING = BIT(10),
+ FTRACE_OPS_FL_ALLOC_TRAMP = BIT(11),
+ FTRACE_OPS_FL_IPMODIFY = BIT(12),
+ FTRACE_OPS_FL_PID = BIT(13),
+ FTRACE_OPS_FL_RCU = BIT(14),
+ FTRACE_OPS_FL_TRACE_ARRAY = BIT(15),
+ FTRACE_OPS_FL_PERMANENT = BIT(16),
+ FTRACE_OPS_FL_DIRECT = BIT(17),
+ FTRACE_OPS_FL_SUBOP = BIT(18),
+ FTRACE_OPS_FL_GRAPH = BIT(19),
+ FTRACE_OPS_FL_JMP = BIT(20),
};
+#ifndef CONFIG_DYNAMIC_FTRACE_WITH_ARGS
+#define FTRACE_OPS_FL_SAVE_ARGS FTRACE_OPS_FL_SAVE_REGS
+#else
+#define FTRACE_OPS_FL_SAVE_ARGS 0
+#endif
+
+/*
+ * FTRACE_OPS_CMD_* commands allow the ftrace core logic to request changes
+ * to a ftrace_ops. Note, the requests may fail.
+ *
+ * ENABLE_SHARE_IPMODIFY_SELF - enable a DIRECT ops to work on the same
+ * function as an ops with IPMODIFY. Called
+ * when the DIRECT ops is being registered.
+ * This is called with both direct_mutex and
+ * ftrace_lock are locked.
+ *
+ * ENABLE_SHARE_IPMODIFY_PEER - enable a DIRECT ops to work on the same
+ * function as an ops with IPMODIFY. Called
+ * when the other ops (the one with IPMODIFY)
+ * is being registered.
+ * This is called with direct_mutex locked.
+ *
+ * DISABLE_SHARE_IPMODIFY_PEER - disable a DIRECT ops to work on the same
+ * function as an ops with IPMODIFY. Called
+ * when the other ops (the one with IPMODIFY)
+ * is being unregistered.
+ * This is called with direct_mutex locked.
+ */
+enum ftrace_ops_cmd {
+ FTRACE_OPS_CMD_ENABLE_SHARE_IPMODIFY_SELF,
+ FTRACE_OPS_CMD_ENABLE_SHARE_IPMODIFY_PEER,
+ FTRACE_OPS_CMD_DISABLE_SHARE_IPMODIFY_PEER,
+};
+
+/*
+ * For most ftrace_ops_cmd,
+ * Returns:
+ * 0 - Success.
+ * Negative on failure. The return value is dependent on the
+ * callback.
+ */
+typedef int (*ftrace_ops_func_t)(struct ftrace_ops *op, enum ftrace_ops_cmd cmd);
+
#ifdef CONFIG_DYNAMIC_FTRACE
/* The hash used to know what functions callbacks trace */
struct ftrace_ops_hash {
@@ -151,8 +414,13 @@ struct ftrace_ops_hash {
};
void ftrace_free_init_mem(void);
+void ftrace_free_mem(struct module *mod, void *start, void *end);
#else
-static inline void ftrace_free_init_mem(void) { }
+static inline void ftrace_free_init_mem(void)
+{
+ ftrace_boot_snapshot();
+}
+static inline void ftrace_free_mem(struct module *mod, void *start, void *end) { }
#endif
/*
@@ -172,16 +440,45 @@ struct ftrace_ops {
unsigned long flags;
void *private;
ftrace_func_t saved_func;
- int __percpu *disabled;
#ifdef CONFIG_DYNAMIC_FTRACE
struct ftrace_ops_hash local_hash;
struct ftrace_ops_hash *func_hash;
struct ftrace_ops_hash old_hash;
unsigned long trampoline;
unsigned long trampoline_size;
+ struct list_head list;
+ struct list_head subop_list;
+ ftrace_ops_func_t ops_func;
+ struct ftrace_ops *managed;
+#ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
+ unsigned long direct_call;
+#endif
#endif
};
+extern struct ftrace_ops __rcu *ftrace_ops_list;
+extern struct ftrace_ops ftrace_list_end;
+
+/*
+ * Traverse the ftrace_ops_list, invoking all entries. The reason that we
+ * can use rcu_dereference_raw_check() is that elements removed from this list
+ * are simply leaked, so there is no need to interact with a grace-period
+ * mechanism. The rcu_dereference_raw_check() calls are needed to handle
+ * concurrent insertions into the ftrace_ops_list.
+ *
+ * Silly Alpha and silly pointer-speculation compiler optimizations!
+ */
+#define do_for_each_ftrace_op(op, list) \
+ op = rcu_dereference_raw_check(list); \
+ do
+
+/*
+ * Optimized for just a single item in the list (as that is the normal case).
+ */
+#define while_for_each_ftrace_op(op) \
+ while (likely(op = rcu_dereference_raw_check((op)->next)) && \
+ unlikely((op) != &ftrace_list_end))
+
/*
* Type of the current tracing.
*/
@@ -202,93 +499,121 @@ extern enum ftrace_tracing_type_t ftrace_tracing_type;
*/
int register_ftrace_function(struct ftrace_ops *ops);
int unregister_ftrace_function(struct ftrace_ops *ops);
-void clear_ftrace_function(void);
-/**
- * ftrace_function_local_enable - enable ftrace_ops on current cpu
- *
- * This function enables tracing on current cpu by decreasing
- * the per cpu control variable.
- * It must be called with preemption disabled and only on ftrace_ops
- * registered with FTRACE_OPS_FL_PER_CPU. If called without preemption
- * disabled, this_cpu_ptr will complain when CONFIG_DEBUG_PREEMPT is enabled.
+extern void ftrace_stub(unsigned long a0, unsigned long a1,
+ struct ftrace_ops *op, struct ftrace_regs *fregs);
+
+
+int ftrace_lookup_symbols(const char **sorted_syms, size_t cnt, unsigned long *addrs);
+#else /* !CONFIG_FUNCTION_TRACER */
+/*
+ * (un)register_ftrace_function must be a macro since the ops parameter
+ * must not be evaluated.
*/
-static inline void ftrace_function_local_enable(struct ftrace_ops *ops)
+#define register_ftrace_function(ops) ({ 0; })
+#define unregister_ftrace_function(ops) ({ 0; })
+static inline void ftrace_kill(void) { }
+static inline void ftrace_free_init_mem(void) { }
+static inline void ftrace_free_mem(struct module *mod, void *start, void *end) { }
+static inline int ftrace_lookup_symbols(const char **sorted_syms, size_t cnt, unsigned long *addrs)
{
- if (WARN_ON_ONCE(!(ops->flags & FTRACE_OPS_FL_PER_CPU)))
- return;
+ return -EOPNOTSUPP;
+}
+#endif /* CONFIG_FUNCTION_TRACER */
+
+struct ftrace_func_entry {
+ struct hlist_node hlist;
+ unsigned long ip;
+ unsigned long direct; /* for direct lookup only */
+};
- (*this_cpu_ptr(ops->disabled))--;
+#ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
+unsigned long ftrace_find_rec_direct(unsigned long ip);
+int register_ftrace_direct(struct ftrace_ops *ops, unsigned long addr);
+int unregister_ftrace_direct(struct ftrace_ops *ops, unsigned long addr,
+ bool free_filters);
+int modify_ftrace_direct(struct ftrace_ops *ops, unsigned long addr);
+int modify_ftrace_direct_nolock(struct ftrace_ops *ops, unsigned long addr);
+
+void ftrace_stub_direct_tramp(void);
+
+#else
+struct ftrace_ops;
+static inline unsigned long ftrace_find_rec_direct(unsigned long ip)
+{
+ return 0;
+}
+static inline int register_ftrace_direct(struct ftrace_ops *ops, unsigned long addr)
+{
+ return -ENODEV;
+}
+static inline int unregister_ftrace_direct(struct ftrace_ops *ops, unsigned long addr,
+ bool free_filters)
+{
+ return -ENODEV;
+}
+static inline int modify_ftrace_direct(struct ftrace_ops *ops, unsigned long addr)
+{
+ return -ENODEV;
+}
+static inline int modify_ftrace_direct_nolock(struct ftrace_ops *ops, unsigned long addr)
+{
+ return -ENODEV;
}
-/**
- * ftrace_function_local_disable - disable ftrace_ops on current cpu
+/*
+ * This must be implemented by the architecture.
+ * It is the way the ftrace direct_ops helper, when called
+ * via ftrace (because there's other callbacks besides the
+ * direct call), can inform the architecture's trampoline that this
+ * routine has a direct caller, and what the caller is.
*
- * This function disables tracing on current cpu by increasing
- * the per cpu control variable.
- * It must be called with preemption disabled and only on ftrace_ops
- * registered with FTRACE_OPS_FL_PER_CPU. If called without preemption
- * disabled, this_cpu_ptr will complain when CONFIG_DEBUG_PREEMPT is enabled.
+ * For example, in x86, it returns the direct caller
+ * callback function via the regs->orig_ax parameter.
+ * Then in the ftrace trampoline, if this is set, it makes
+ * the return from the trampoline jump to the direct caller
+ * instead of going back to the function it just traced.
*/
-static inline void ftrace_function_local_disable(struct ftrace_ops *ops)
+static inline void arch_ftrace_set_direct_caller(struct ftrace_regs *fregs,
+ unsigned long addr) { }
+#endif /* CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS */
+
+#ifdef CONFIG_DYNAMIC_FTRACE_WITH_JMP
+static inline bool ftrace_is_jmp(unsigned long addr)
{
- if (WARN_ON_ONCE(!(ops->flags & FTRACE_OPS_FL_PER_CPU)))
- return;
+ return addr & 1;
+}
- (*this_cpu_ptr(ops->disabled))++;
+static inline unsigned long ftrace_jmp_set(unsigned long addr)
+{
+ return addr | 1UL;
}
-/**
- * ftrace_function_local_disabled - returns ftrace_ops disabled value
- * on current cpu
- *
- * This function returns value of ftrace_ops::disabled on current cpu.
- * It must be called with preemption disabled and only on ftrace_ops
- * registered with FTRACE_OPS_FL_PER_CPU. If called without preemption
- * disabled, this_cpu_ptr will complain when CONFIG_DEBUG_PREEMPT is enabled.
- */
-static inline int ftrace_function_local_disabled(struct ftrace_ops *ops)
+static inline unsigned long ftrace_jmp_get(unsigned long addr)
+{
+ return addr & ~1UL;
+}
+#else
+static inline bool ftrace_is_jmp(unsigned long addr)
{
- WARN_ON_ONCE(!(ops->flags & FTRACE_OPS_FL_PER_CPU));
- return *this_cpu_ptr(ops->disabled);
+ return false;
}
-extern void ftrace_stub(unsigned long a0, unsigned long a1,
- struct ftrace_ops *op, struct pt_regs *regs);
+static inline unsigned long ftrace_jmp_set(unsigned long addr)
+{
+ return addr;
+}
-#else /* !CONFIG_FUNCTION_TRACER */
-/*
- * (un)register_ftrace_function must be a macro since the ops parameter
- * must not be evaluated.
- */
-#define register_ftrace_function(ops) ({ 0; })
-#define unregister_ftrace_function(ops) ({ 0; })
-static inline int ftrace_nr_registered_ops(void)
+static inline unsigned long ftrace_jmp_get(unsigned long addr)
{
- return 0;
+ return addr;
}
-static inline void clear_ftrace_function(void) { }
-static inline void ftrace_kill(void) { }
-static inline void ftrace_free_init_mem(void) { }
-#endif /* CONFIG_FUNCTION_TRACER */
+#endif /* CONFIG_DYNAMIC_FTRACE_WITH_JMP */
#ifdef CONFIG_STACK_TRACER
-#define STACK_TRACE_ENTRIES 500
-
-struct stack_trace;
-
-extern unsigned stack_trace_index[];
-extern struct stack_trace stack_trace_max;
-extern unsigned long stack_trace_max_size;
-extern arch_spinlock_t stack_trace_max_lock;
-
-extern int stack_tracer_enabled;
-void stack_trace_print(void);
-int
-stack_trace_sysctl(struct ctl_table *table, int write,
- void __user *buffer, size_t *lenp,
- loff_t *ppos);
+int stack_trace_sysctl(const struct ctl_table *table, int write, void *buffer,
+ size_t *lenp, loff_t *ppos);
/* DO NOT MODIFY THIS VARIABLE DIRECTLY! */
DECLARE_PER_CPU(int, disable_stack_tracer);
@@ -306,8 +631,8 @@ DECLARE_PER_CPU(int, disable_stack_tracer);
*/
static inline void stack_tracer_disable(void)
{
- /* Preemption or interupts must be disabled */
- if (IS_ENABLED(CONFIG_PREEMPT_DEBUG))
+ /* Preemption or interrupts must be disabled */
+ if (IS_ENABLED(CONFIG_DEBUG_PREEMPT))
WARN_ON_ONCE(!preempt_count() || !irqs_disabled());
this_cpu_inc(disable_stack_tracer);
}
@@ -320,7 +645,7 @@ static inline void stack_tracer_disable(void)
*/
static inline void stack_tracer_enable(void)
{
- if (IS_ENABLED(CONFIG_PREEMPT_DEBUG))
+ if (IS_ENABLED(CONFIG_DEBUG_PREEMPT))
WARN_ON_ONCE(!preempt_count() || !irqs_disabled());
this_cpu_dec(disable_stack_tracer);
}
@@ -329,12 +654,34 @@ static inline void stack_tracer_disable(void) { }
static inline void stack_tracer_enable(void) { }
#endif
-#ifdef CONFIG_DYNAMIC_FTRACE
+enum {
+ FTRACE_UPDATE_CALLS = (1 << 0),
+ FTRACE_DISABLE_CALLS = (1 << 1),
+ FTRACE_UPDATE_TRACE_FUNC = (1 << 2),
+ FTRACE_START_FUNC_RET = (1 << 3),
+ FTRACE_STOP_FUNC_RET = (1 << 4),
+ FTRACE_MAY_SLEEP = (1 << 5),
+};
-int ftrace_arch_code_modify_prepare(void);
-int ftrace_arch_code_modify_post_process(void);
+/* Arches can override ftrace_get_symaddr() to convert fentry_ip to symaddr. */
+#ifndef ftrace_get_symaddr
+/**
+ * ftrace_get_symaddr - return the symbol address from fentry_ip
+ * @fentry_ip: the address of ftrace location
+ *
+ * Get the symbol address from @fentry_ip (fast path). If there is no fast
+ * search path, this returns 0.
+ * User may need to use kallsyms API to find the symbol address.
+ */
+#define ftrace_get_symaddr(fentry_ip) (0)
+#endif
-struct dyn_ftrace;
+void ftrace_sync_ipi(void *data);
+
+#ifdef CONFIG_DYNAMIC_FTRACE
+
+void ftrace_arch_code_modify_prepare(void);
+void ftrace_arch_code_modify_post_process(void);
enum ftrace_bug_type {
FTRACE_BUG_UNKNOWN,
@@ -357,7 +704,7 @@ struct seq_file;
extern int ftrace_text_reserved(const void *start, const void *end);
-extern int ftrace_nr_registered_ops(void);
+struct ftrace_ops *ftrace_ops_trampoline(unsigned long addr);
bool is_ftrace_trampoline(unsigned long addr);
@@ -373,9 +720,14 @@ bool is_ftrace_trampoline(unsigned long addr);
* REGS_EN - the function is set up to save regs.
* IPMODIFY - the record allows for the IP address to be changed.
* DISABLED - the record is not ready to be touched yet
+ * DIRECT - there is a direct function to call
+ * CALL_OPS - the record can use callsite-specific ops
+ * CALL_OPS_EN - the function is set up to use callsite-specific ops
+ * TOUCHED - A callback was added since boot up
+ * MODIFIED - The function had IPMODIFY or DIRECT attached to it
*
* When a new ftrace_ops is registered and wants a function to save
- * pt_regs, the rec->flag REGS is set. When the function has been
+ * pt_regs, the rec->flags REGS is set. When the function has been
* set up to save regs, the REG_EN flag is set. Once a function
* starts saving regs it will do so until all ftrace_ops are removed
* from tracing that function.
@@ -388,15 +740,18 @@ enum {
FTRACE_FL_TRAMP_EN = (1UL << 27),
FTRACE_FL_IPMODIFY = (1UL << 26),
FTRACE_FL_DISABLED = (1UL << 25),
+ FTRACE_FL_DIRECT = (1UL << 24),
+ FTRACE_FL_DIRECT_EN = (1UL << 23),
+ FTRACE_FL_CALL_OPS = (1UL << 22),
+ FTRACE_FL_CALL_OPS_EN = (1UL << 21),
+ FTRACE_FL_TOUCHED = (1UL << 20),
+ FTRACE_FL_MODIFIED = (1UL << 19),
};
-#define FTRACE_REF_MAX_SHIFT 25
-#define FTRACE_FL_BITS 7
-#define FTRACE_FL_MASKED_BITS ((1UL << FTRACE_FL_BITS) - 1)
-#define FTRACE_FL_MASK (FTRACE_FL_MASKED_BITS << FTRACE_REF_MAX_SHIFT)
+#define FTRACE_REF_MAX_SHIFT 19
#define FTRACE_REF_MAX ((1UL << FTRACE_REF_MAX_SHIFT) - 1)
-#define ftrace_rec_count(rec) ((rec)->flags & ~FTRACE_FL_MASK)
+#define ftrace_rec_count(rec) ((rec)->flags & FTRACE_REF_MAX)
struct dyn_ftrace {
unsigned long ip; /* address of mcount call-site */
@@ -404,9 +759,10 @@ struct dyn_ftrace {
struct dyn_arch_ftrace arch;
};
-int ftrace_force_update(void);
int ftrace_set_filter_ip(struct ftrace_ops *ops, unsigned long ip,
int remove, int reset);
+int ftrace_set_filter_ips(struct ftrace_ops *ops, unsigned long *ips,
+ unsigned int cnt, int remove, int reset);
int ftrace_set_filter(struct ftrace_ops *ops, unsigned char *buf,
int len, int reset);
int ftrace_set_notrace(struct ftrace_ops *ops, unsigned char *buf,
@@ -416,14 +772,6 @@ void ftrace_set_global_notrace(unsigned char *buf, int len, int reset);
void ftrace_free_filter(struct ftrace_ops *ops);
void ftrace_ops_set_global_filter(struct ftrace_ops *ops);
-enum {
- FTRACE_UPDATE_CALLS = (1 << 0),
- FTRACE_DISABLE_CALLS = (1 << 1),
- FTRACE_UPDATE_TRACE_FUNC = (1 << 2),
- FTRACE_START_FUNC_RET = (1 << 3),
- FTRACE_STOP_FUNC_RET = (1 << 4),
-};
-
/*
* The FTRACE_UPDATE_* enum is used to pass information back
* from the ftrace_update_record() and ftrace_test_record()
@@ -450,9 +798,14 @@ enum {
FTRACE_ITER_PROBE = (1 << 4),
FTRACE_ITER_MOD = (1 << 5),
FTRACE_ITER_ENABLED = (1 << 6),
+ FTRACE_ITER_TOUCHED = (1 << 7),
+ FTRACE_ITER_ADDRS = (1 << 8),
};
void arch_ftrace_update_code(int command);
+void arch_ftrace_update_trampoline(struct ftrace_ops *ops);
+void *arch_ftrace_trampoline_func(struct ftrace_ops *ops, struct dyn_ftrace *rec);
+void arch_ftrace_trampoline_free(struct ftrace_ops *ops);
struct ftrace_rec_iter;
@@ -466,8 +819,8 @@ struct dyn_ftrace *ftrace_rec_iter_record(struct ftrace_rec_iter *iter);
iter = ftrace_rec_iter_next(iter))
-int ftrace_update_record(struct dyn_ftrace *rec, int enable);
-int ftrace_test_record(struct dyn_ftrace *rec, int enable);
+int ftrace_update_record(struct dyn_ftrace *rec, bool enable);
+int ftrace_test_record(struct dyn_ftrace *rec, bool enable);
void ftrace_run_stop_machine(int command);
unsigned long ftrace_location(unsigned long ip);
unsigned long ftrace_location_range(unsigned long start, unsigned long end);
@@ -488,7 +841,6 @@ void __init
ftrace_set_early_filter(struct ftrace_ops *ops, char *buf, int enable);
/* defined in arch */
-extern int ftrace_ip_converted(unsigned long ip);
extern int ftrace_dyn_arch_init(void);
extern void ftrace_replace_code(int enable);
extern int ftrace_update_ftrace_func(ftrace_func_t func);
@@ -538,7 +890,7 @@ static inline int ftrace_disable_ftrace_graph_caller(void) { return 0; }
/**
* ftrace_make_nop - convert code into nop
* @mod: module structure if called by module load initialization
- * @rec: the mcount call site record
+ * @rec: the call site record (e.g. mcount/fentry)
* @addr: the address that the call site should be calling
*
* This is a very sensitive operation and great care needs
@@ -560,8 +912,53 @@ extern int ftrace_make_nop(struct module *mod,
struct dyn_ftrace *rec, unsigned long addr);
/**
+ * ftrace_need_init_nop - return whether nop call sites should be initialized
+ *
+ * Normally the compiler's -mnop-mcount generates suitable nops, so we don't
+ * need to call ftrace_init_nop() if the code is built with that flag.
+ * Architectures where this is not always the case may define their own
+ * condition.
+ *
+ * Return must be:
+ * 0 if ftrace_init_nop() should be called
+ * Nonzero if ftrace_init_nop() should not be called
+ */
+
+#ifndef ftrace_need_init_nop
+#define ftrace_need_init_nop() (!__is_defined(CC_USING_NOP_MCOUNT))
+#endif
+
+/**
+ * ftrace_init_nop - initialize a nop call site
+ * @mod: module structure if called by module load initialization
+ * @rec: the call site record (e.g. mcount/fentry)
+ *
+ * This is a very sensitive operation and great care needs
+ * to be taken by the arch. The operation should carefully
+ * read the location, check to see if what is read is indeed
+ * what we expect it to be, and then on success of the compare,
+ * it should write to the location.
+ *
+ * The code segment at @rec->ip should contain the contents created by
+ * the compiler
+ *
+ * Return must be:
+ * 0 on success
+ * -EFAULT on error reading the location
+ * -EINVAL on a failed compare of the contents
+ * -EPERM on error writing to the location
+ * Any other value will be considered a failure.
+ */
+#ifndef ftrace_init_nop
+static inline int ftrace_init_nop(struct module *mod, struct dyn_ftrace *rec)
+{
+ return ftrace_make_nop(mod, rec, MCOUNT_ADDR);
+}
+#endif
+
+/**
* ftrace_make_call - convert a nop call site into a call to addr
- * @rec: the mcount call site record
+ * @rec: the call site record (e.g. mcount/fentry)
* @addr: the address that the call site should call
*
* This is a very sensitive operation and great care needs
@@ -581,10 +978,12 @@ extern int ftrace_make_nop(struct module *mod,
*/
extern int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr);
-#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
+#if defined(CONFIG_DYNAMIC_FTRACE_WITH_REGS) || \
+ defined(CONFIG_DYNAMIC_FTRACE_WITH_CALL_OPS) || \
+ defined(CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS)
/**
* ftrace_modify_call - convert from one addr to another (no nop)
- * @rec: the mcount call site record
+ * @rec: the call site record (e.g. mcount/fentry)
* @old_addr: the address expected to be currently called to
* @addr: the address to change to
*
@@ -594,6 +993,9 @@ extern int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr);
* what we expect it to be, and then on success of the compare,
* it should write to the location.
*
+ * When using call ops, this is called when the associated ops change, even
+ * when (addr == old_addr).
+ *
* The code segment at @rec->ip should be a caller to @old_addr
*
* Return must be:
@@ -614,21 +1016,12 @@ static inline int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_a
}
#endif
-/* May be defined in arch */
-extern int ftrace_arch_read_dyn_info(char *buf, int size);
-
extern int skip_trace(unsigned long ip);
extern void ftrace_module_init(struct module *mod);
extern void ftrace_module_enable(struct module *mod);
extern void ftrace_release_mod(struct module *mod);
-
-extern void ftrace_disable_daemon(void);
-extern void ftrace_enable_daemon(void);
#else /* CONFIG_DYNAMIC_FTRACE */
static inline int skip_trace(unsigned long ip) { return 0; }
-static inline int ftrace_force_update(void) { return 0; }
-static inline void ftrace_disable_daemon(void) { }
-static inline void ftrace_enable_daemon(void) { }
static inline void ftrace_module_init(struct module *mod) { }
static inline void ftrace_module_enable(struct module *mod) { }
static inline void ftrace_release_mod(struct module *mod) { }
@@ -649,6 +1042,7 @@ static inline unsigned long ftrace_location(unsigned long ip)
#define ftrace_regex_open(ops, flag, inod, file) ({ -ENODEV; })
#define ftrace_set_early_filter(ops, buf, enable) do { } while (0)
#define ftrace_set_filter_ip(ops, ip, remove, reset) ({ -ENODEV; })
+#define ftrace_set_filter_ips(ops, ips, cnt, remove, reset) ({ -ENODEV; })
#define ftrace_set_filter(ops, buf, len, reset) ({ -ENODEV; })
#define ftrace_set_notrace(ops, buf, len, reset) ({ -ENODEV; })
#define ftrace_free_filter(ops) do { } while (0)
@@ -667,6 +1061,15 @@ static inline bool is_ftrace_trampoline(unsigned long addr)
}
#endif /* CONFIG_DYNAMIC_FTRACE */
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+#ifndef ftrace_graph_func
+#define ftrace_graph_func ftrace_stub
+#define FTRACE_OPS_GRAPH_STUB FTRACE_OPS_FL_STUB
+#else
+#define FTRACE_OPS_GRAPH_STUB 0
+#endif
+#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
+
/* totally disable ftrace - can not re-enable after this */
void ftrace_kill(void);
@@ -722,7 +1125,7 @@ static inline void __ftrace_enabled_restore(int enabled)
#define CALLER_ADDR5 ((unsigned long)ftrace_return_address(5))
#define CALLER_ADDR6 ((unsigned long)ftrace_return_address(6))
-static inline unsigned long get_lock_parent_ip(void)
+static __always_inline unsigned long get_lock_parent_ip(void)
{
unsigned long addr = CALLER_ADDR0;
@@ -734,15 +1137,7 @@ static inline unsigned long get_lock_parent_ip(void)
return CALLER_ADDR2;
}
-#ifdef CONFIG_IRQSOFF_TRACER
- extern void time_hardirqs_on(unsigned long a0, unsigned long a1);
- extern void time_hardirqs_off(unsigned long a0, unsigned long a1);
-#else
- static inline void time_hardirqs_on(unsigned long a0, unsigned long a1) { }
- static inline void time_hardirqs_off(unsigned long a0, unsigned long a1) { }
-#endif
-
-#ifdef CONFIG_PREEMPT_TRACER
+#ifdef CONFIG_TRACE_PREEMPT_TOGGLE
extern void trace_preempt_on(unsigned long a0, unsigned long a1);
extern void trace_preempt_off(unsigned long a0, unsigned long a1);
#else
@@ -754,8 +1149,13 @@ static inline unsigned long get_lock_parent_ip(void)
# define trace_preempt_off(a0, a1) do { } while (0)
#endif
-#ifdef CONFIG_FTRACE_MCOUNT_RECORD
+#ifdef CONFIG_DYNAMIC_FTRACE
extern void ftrace_init(void);
+#ifdef CC_USING_PATCHABLE_FUNCTION_ENTRY
+#define FTRACE_CALLSITE_SECTION "__patchable_function_entries"
+#else
+#define FTRACE_CALLSITE_SECTION "__mcount_loc"
+#endif
#else
static inline void ftrace_init(void) { }
#endif
@@ -767,7 +1167,15 @@ static inline void ftrace_init(void) { }
*/
struct ftrace_graph_ent {
unsigned long func; /* Current function */
- int depth;
+ unsigned long depth;
+} __packed;
+
+/*
+ * Structure that defines an entry function trace with retaddr.
+ */
+struct fgraph_retaddr_ent {
+ struct ftrace_graph_ent ent;
+ unsigned long retaddr; /* Return address */
} __packed;
/*
@@ -777,21 +1185,43 @@ struct ftrace_graph_ent {
*/
struct ftrace_graph_ret {
unsigned long func; /* Current function */
- /* Number of functions that overran the depth limit for current task */
- unsigned long overrun;
- unsigned long long calltime;
- unsigned long long rettime;
+#ifdef CONFIG_FUNCTION_GRAPH_RETVAL
+ unsigned long retval;
+#endif
int depth;
+ /* Number of functions that overran the depth limit for current task */
+ unsigned int overrun;
} __packed;
+struct fgraph_ops;
+
/* Type of the callback handlers for tracing function graph*/
-typedef void (*trace_func_graph_ret_t)(struct ftrace_graph_ret *); /* return */
-typedef int (*trace_func_graph_ent_t)(struct ftrace_graph_ent *); /* entry */
+typedef void (*trace_func_graph_ret_t)(struct ftrace_graph_ret *,
+ struct fgraph_ops *,
+ struct ftrace_regs *); /* return */
+typedef int (*trace_func_graph_ent_t)(struct ftrace_graph_ent *,
+ struct fgraph_ops *,
+ struct ftrace_regs *); /* entry */
+
+extern int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace,
+ struct fgraph_ops *gops,
+ struct ftrace_regs *fregs);
+bool ftrace_pids_enabled(struct ftrace_ops *ops);
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
-/* for init task */
-#define INIT_FTRACE_GRAPH .ret_stack = NULL,
+struct fgraph_ops {
+ trace_func_graph_ent_t entryfunc;
+ trace_func_graph_ret_t retfunc;
+ struct ftrace_ops ops; /* for the hash lists */
+ void *private;
+ trace_func_graph_ent_t saved_func;
+ int idx;
+};
+
+void *fgraph_reserve_data(int idx, int size_bytes);
+void *fgraph_retrieve_data(int idx, int *size_bytes);
+void *fgraph_retrieve_parent_data(int idx, int *size_bytes, int depth);
/*
* Stack of return addresses for functions
@@ -801,16 +1231,10 @@ typedef int (*trace_func_graph_ent_t)(struct ftrace_graph_ent *); /* entry */
struct ftrace_ret_stack {
unsigned long ret;
unsigned long func;
- unsigned long long calltime;
-#ifdef CONFIG_FUNCTION_PROFILER
- unsigned long long subtime;
-#endif
#ifdef HAVE_FUNCTION_GRAPH_FP_TEST
unsigned long fp;
#endif
-#ifdef HAVE_FUNCTION_GRAPH_RET_ADDR_PTR
unsigned long *retp;
-#endif
};
/*
@@ -821,11 +1245,23 @@ struct ftrace_ret_stack {
extern void return_to_handler(void);
extern int
-ftrace_push_return_trace(unsigned long ret, unsigned long func, int *depth,
- unsigned long frame_pointer, unsigned long *retp);
+function_graph_enter_regs(unsigned long ret, unsigned long func,
+ unsigned long frame_pointer, unsigned long *retp,
+ struct ftrace_regs *fregs);
+
+static inline int function_graph_enter(unsigned long ret, unsigned long func,
+ unsigned long fp, unsigned long *retp)
+{
+ return function_graph_enter_regs(ret, func, fp, retp, NULL);
+}
+
+struct ftrace_ret_stack *
+ftrace_graph_get_ret_stack(struct task_struct *task, int skip);
+unsigned long ftrace_graph_top_ret_addr(struct task_struct *task);
unsigned long ftrace_graph_ret_addr(struct task_struct *task, int *idx,
unsigned long ret, unsigned long *retp);
+unsigned long *fgraph_get_task_var(struct fgraph_ops *gops);
/*
* Sometimes we don't want to trace a function with the function
@@ -834,29 +1270,38 @@ unsigned long ftrace_graph_ret_addr(struct task_struct *task, int *idx,
*/
#define __notrace_funcgraph notrace
-#define FTRACE_NOTRACE_DEPTH 65536
#define FTRACE_RETFUNC_DEPTH 50
#define FTRACE_RETSTACK_ALLOC_SIZE 32
-extern int register_ftrace_graph(trace_func_graph_ret_t retfunc,
- trace_func_graph_ent_t entryfunc);
-extern bool ftrace_graph_is_dead(void);
+extern int register_ftrace_graph(struct fgraph_ops *ops);
+extern void unregister_ftrace_graph(struct fgraph_ops *ops);
+
+/**
+ * ftrace_graph_is_dead - returns true if ftrace_graph_stop() was called
+ *
+ * ftrace_graph_stop() is called when a severe error is detected in
+ * the function graph tracing. This function is called by the critical
+ * paths of function graph to keep those paths from doing any more harm.
+ */
+DECLARE_STATIC_KEY_FALSE(kill_ftrace_graph);
+
+static inline bool ftrace_graph_is_dead(void)
+{
+ return static_branch_unlikely(&kill_ftrace_graph);
+}
+
extern void ftrace_graph_stop(void);
/* The current handlers in use */
extern trace_func_graph_ret_t ftrace_graph_return;
extern trace_func_graph_ent_t ftrace_graph_entry;
-extern void unregister_ftrace_graph(void);
-
extern void ftrace_graph_init_task(struct task_struct *t);
extern void ftrace_graph_exit_task(struct task_struct *t);
extern void ftrace_graph_init_idle_task(struct task_struct *t, int cpu);
-static inline int task_curr_ret_stack(struct task_struct *t)
-{
- return t->curr_ret_stack;
-}
+/* Used by assembly, but to quiet sparse warnings */
+extern struct ftrace_ops *function_trace_op;
static inline void pause_graph_tracing(void)
{
@@ -870,23 +1315,14 @@ static inline void unpause_graph_tracing(void)
#else /* !CONFIG_FUNCTION_GRAPH_TRACER */
#define __notrace_funcgraph
-#define INIT_FTRACE_GRAPH
static inline void ftrace_graph_init_task(struct task_struct *t) { }
static inline void ftrace_graph_exit_task(struct task_struct *t) { }
static inline void ftrace_graph_init_idle_task(struct task_struct *t, int cpu) { }
-static inline int register_ftrace_graph(trace_func_graph_ret_t retfunc,
- trace_func_graph_ent_t entryfunc)
-{
- return -1;
-}
-static inline void unregister_ftrace_graph(void) { }
-
-static inline int task_curr_ret_stack(struct task_struct *tsk)
-{
- return -1;
-}
+/* Define as macros as fgraph_ops may not be defined */
+#define register_ftrace_graph(ops) ({ -1; })
+#define unregister_ftrace_graph(ops) do { } while (0)
static inline unsigned long
ftrace_graph_ret_addr(struct task_struct *task, int *idx, unsigned long ret,
@@ -900,71 +1336,16 @@ static inline void unpause_graph_tracing(void) { }
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
#ifdef CONFIG_TRACING
-
-/* flags for current->trace */
-enum {
- TSK_TRACE_FL_TRACE_BIT = 0,
- TSK_TRACE_FL_GRAPH_BIT = 1,
-};
-enum {
- TSK_TRACE_FL_TRACE = 1 << TSK_TRACE_FL_TRACE_BIT,
- TSK_TRACE_FL_GRAPH = 1 << TSK_TRACE_FL_GRAPH_BIT,
-};
-
-static inline void set_tsk_trace_trace(struct task_struct *tsk)
-{
- set_bit(TSK_TRACE_FL_TRACE_BIT, &tsk->trace);
-}
-
-static inline void clear_tsk_trace_trace(struct task_struct *tsk)
-{
- clear_bit(TSK_TRACE_FL_TRACE_BIT, &tsk->trace);
-}
-
-static inline int test_tsk_trace_trace(struct task_struct *tsk)
-{
- return tsk->trace & TSK_TRACE_FL_TRACE;
-}
-
-static inline void set_tsk_trace_graph(struct task_struct *tsk)
-{
- set_bit(TSK_TRACE_FL_GRAPH_BIT, &tsk->trace);
-}
-
-static inline void clear_tsk_trace_graph(struct task_struct *tsk)
-{
- clear_bit(TSK_TRACE_FL_GRAPH_BIT, &tsk->trace);
-}
-
-static inline int test_tsk_trace_graph(struct task_struct *tsk)
-{
- return tsk->trace & TSK_TRACE_FL_GRAPH;
-}
-
enum ftrace_dump_mode;
-extern enum ftrace_dump_mode ftrace_dump_on_oops;
-extern int tracepoint_printk;
+extern int ftrace_dump_on_oops_enabled(void);
extern void disable_trace_on_warning(void);
-extern int __disable_trace_on_warning;
-
-#ifdef CONFIG_PREEMPT
-#define INIT_TRACE_RECURSION .trace_recursion = 0,
-#endif
-
-int tracepoint_printk_sysctl(struct ctl_table *table, int write,
- void __user *buffer, size_t *lenp,
- loff_t *ppos);
#else /* CONFIG_TRACING */
static inline void disable_trace_on_warning(void) { }
#endif /* CONFIG_TRACING */
-#ifndef INIT_TRACE_RECURSION
-#define INIT_TRACE_RECURSION
-#endif
-
#ifdef CONFIG_FTRACE_SYSCALLS
unsigned long arch_syscall_addr(int nr);
diff --git a/include/linux/ftrace_irq.h b/include/linux/ftrace_irq.h
index 4ec2c9b205f2..f6faa31289ba 100644
--- a/include/linux/ftrace_irq.h
+++ b/include/linux/ftrace_irq.h
@@ -1,36 +1,39 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_FTRACE_IRQ_H
#define _LINUX_FTRACE_IRQ_H
-
-#ifdef CONFIG_FTRACE_NMI_ENTER
-extern void arch_ftrace_nmi_enter(void);
-extern void arch_ftrace_nmi_exit(void);
-#else
-static inline void arch_ftrace_nmi_enter(void) { }
-static inline void arch_ftrace_nmi_exit(void) { }
-#endif
-
#ifdef CONFIG_HWLAT_TRACER
extern bool trace_hwlat_callback_enabled;
extern void trace_hwlat_callback(bool enter);
#endif
+#ifdef CONFIG_OSNOISE_TRACER
+extern bool trace_osnoise_callback_enabled;
+extern void trace_osnoise_callback(bool enter);
+#endif
+
static inline void ftrace_nmi_enter(void)
{
#ifdef CONFIG_HWLAT_TRACER
if (trace_hwlat_callback_enabled)
trace_hwlat_callback(true);
#endif
- arch_ftrace_nmi_enter();
+#ifdef CONFIG_OSNOISE_TRACER
+ if (trace_osnoise_callback_enabled)
+ trace_osnoise_callback(true);
+#endif
}
static inline void ftrace_nmi_exit(void)
{
- arch_ftrace_nmi_exit();
#ifdef CONFIG_HWLAT_TRACER
if (trace_hwlat_callback_enabled)
trace_hwlat_callback(false);
#endif
+#ifdef CONFIG_OSNOISE_TRACER
+ if (trace_osnoise_callback_enabled)
+ trace_osnoise_callback(false);
+#endif
}
#endif /* _LINUX_FTRACE_IRQ_H */
diff --git a/include/linux/ftrace_regs.h b/include/linux/ftrace_regs.h
new file mode 100644
index 000000000000..15627ceea9bc
--- /dev/null
+++ b/include/linux/ftrace_regs.h
@@ -0,0 +1,43 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_FTRACE_REGS_H
+#define _LINUX_FTRACE_REGS_H
+
+/*
+ * For archs that just copy pt_regs in ftrace regs, it can use this default.
+ * If an architecture does not use pt_regs, it must define all the below
+ * accessor functions.
+ */
+#ifndef HAVE_ARCH_FTRACE_REGS
+struct __arch_ftrace_regs {
+ struct pt_regs regs;
+};
+
+#define arch_ftrace_regs(fregs) ((struct __arch_ftrace_regs *)(fregs))
+
+struct ftrace_regs;
+
+#define ftrace_regs_get_instruction_pointer(fregs) \
+ instruction_pointer(&arch_ftrace_regs(fregs)->regs)
+#define ftrace_regs_get_argument(fregs, n) \
+ regs_get_kernel_argument(&arch_ftrace_regs(fregs)->regs, n)
+#define ftrace_regs_get_stack_pointer(fregs) \
+ kernel_stack_pointer(&arch_ftrace_regs(fregs)->regs)
+#define ftrace_regs_get_return_value(fregs) \
+ regs_return_value(&arch_ftrace_regs(fregs)->regs)
+#define ftrace_regs_set_return_value(fregs, ret) \
+ regs_set_return_value(&arch_ftrace_regs(fregs)->regs, ret)
+#define ftrace_override_function_with_return(fregs) \
+ override_function_with_return(&arch_ftrace_regs(fregs)->regs)
+#define ftrace_regs_query_register_offset(name) \
+ regs_query_register_offset(name)
+#define ftrace_regs_get_frame_pointer(fregs) \
+ frame_pointer(&arch_ftrace_regs(fregs)->regs)
+
+#endif /* HAVE_ARCH_FTRACE_REGS */
+
+/* This can be overridden by the architectures */
+#ifndef FTRACE_REGS_MAX_ARGS
+# define FTRACE_REGS_MAX_ARGS 6
+#endif
+
+#endif /* _LINUX_FTRACE_REGS_H */
diff --git a/include/linux/futex.h b/include/linux/futex.h
index f36bfd26f998..9e9750f04980 100644
--- a/include/linux/futex.h
+++ b/include/linux/futex.h
@@ -1,19 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_FUTEX_H
#define _LINUX_FUTEX_H
+#include <linux/sched.h>
#include <linux/ktime.h>
+#include <linux/mm_types.h>
+
#include <uapi/linux/futex.h>
struct inode;
-struct mm_struct;
struct task_struct;
-long do_futex(u32 __user *uaddr, int op, u32 val, ktime_t *timeout,
- u32 __user *uaddr2, u32 val2, u32 val3);
-
-extern int
-handle_futex_death(u32 __user *uaddr, struct task_struct *curr, int pi);
-
/*
* Futexes are matched on equal values of this key.
* The key type depends on whether it's a shared or private mapping.
@@ -34,43 +31,90 @@ handle_futex_death(u32 __user *uaddr, struct task_struct *curr, int pi);
union futex_key {
struct {
+ u64 i_seq;
unsigned long pgoff;
- struct inode *inode;
- int offset;
+ unsigned int offset;
+ /* unsigned int node; */
} shared;
struct {
+ union {
+ struct mm_struct *mm;
+ u64 __tmp;
+ };
unsigned long address;
- struct mm_struct *mm;
- int offset;
+ unsigned int offset;
+ /* unsigned int node; */
} private;
struct {
+ u64 ptr;
unsigned long word;
- void *ptr;
- int offset;
+ unsigned int offset;
+ unsigned int node; /* NOT hashed! */
} both;
};
-#define FUTEX_KEY_INIT (union futex_key) { .both = { .ptr = NULL } }
+#define FUTEX_KEY_INIT (union futex_key) { .both = { .ptr = 0ULL } }
#ifdef CONFIG_FUTEX
-extern void exit_robust_list(struct task_struct *curr);
-#ifdef CONFIG_HAVE_FUTEX_CMPXCHG
-#define futex_cmpxchg_enabled 1
-#else
-extern int futex_cmpxchg_enabled;
-#endif
-#else
-static inline void exit_robust_list(struct task_struct *curr)
+enum {
+ FUTEX_STATE_OK,
+ FUTEX_STATE_EXITING,
+ FUTEX_STATE_DEAD,
+};
+
+static inline void futex_init_task(struct task_struct *tsk)
{
-}
+ tsk->robust_list = NULL;
+#ifdef CONFIG_COMPAT
+ tsk->compat_robust_list = NULL;
#endif
+ INIT_LIST_HEAD(&tsk->pi_state_list);
+ tsk->pi_state_cache = NULL;
+ tsk->futex_state = FUTEX_STATE_OK;
+ mutex_init(&tsk->futex_exit_mutex);
+}
+
+void futex_exit_recursive(struct task_struct *tsk);
+void futex_exit_release(struct task_struct *tsk);
+void futex_exec_release(struct task_struct *tsk);
-#ifdef CONFIG_FUTEX_PI
-extern void exit_pi_state_list(struct task_struct *curr);
-#else
-static inline void exit_pi_state_list(struct task_struct *curr)
+long do_futex(u32 __user *uaddr, int op, u32 val, ktime_t *timeout,
+ u32 __user *uaddr2, u32 val2, u32 val3);
+int futex_hash_prctl(unsigned long arg2, unsigned long arg3, unsigned long arg4);
+
+#ifdef CONFIG_FUTEX_PRIVATE_HASH
+int futex_hash_allocate_default(void);
+void futex_hash_free(struct mm_struct *mm);
+int futex_mm_init(struct mm_struct *mm);
+
+#else /* !CONFIG_FUTEX_PRIVATE_HASH */
+static inline int futex_hash_allocate_default(void) { return 0; }
+static inline int futex_hash_free(struct mm_struct *mm) { return 0; }
+static inline int futex_mm_init(struct mm_struct *mm) { return 0; }
+#endif /* CONFIG_FUTEX_PRIVATE_HASH */
+
+#else /* !CONFIG_FUTEX */
+static inline void futex_init_task(struct task_struct *tsk) { }
+static inline void futex_exit_recursive(struct task_struct *tsk) { }
+static inline void futex_exit_release(struct task_struct *tsk) { }
+static inline void futex_exec_release(struct task_struct *tsk) { }
+static inline long do_futex(u32 __user *uaddr, int op, u32 val,
+ ktime_t *timeout, u32 __user *uaddr2,
+ u32 val2, u32 val3)
{
+ return -EINVAL;
}
+static inline int futex_hash_prctl(unsigned long arg2, unsigned long arg3, unsigned long arg4)
+{
+ return -EINVAL;
+}
+static inline int futex_hash_allocate_default(void)
+{
+ return 0;
+}
+static inline int futex_hash_free(struct mm_struct *mm) { return 0; }
+static inline int futex_mm_init(struct mm_struct *mm) { return 0; }
+
#endif
#endif
diff --git a/include/linux/fw_table.h b/include/linux/fw_table.h
new file mode 100644
index 000000000000..9bd605b87c4c
--- /dev/null
+++ b/include/linux/fw_table.h
@@ -0,0 +1,61 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * fw_tables.h - Parsing support for ACPI and ACPI-like tables provided by
+ * platform or device firmware
+ *
+ * Copyright (C) 2001 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
+ * Copyright (C) 2023 Intel Corp.
+ */
+#ifndef _FW_TABLE_H_
+#define _FW_TABLE_H_
+
+union acpi_subtable_headers;
+
+typedef int (*acpi_tbl_entry_handler)(union acpi_subtable_headers *header,
+ const unsigned long end);
+
+typedef int (*acpi_tbl_entry_handler_arg)(union acpi_subtable_headers *header,
+ void *arg, const unsigned long end);
+
+struct acpi_subtable_proc {
+ int id;
+ acpi_tbl_entry_handler handler;
+ acpi_tbl_entry_handler_arg handler_arg;
+ void *arg;
+ int count;
+};
+
+union fw_table_header {
+ struct acpi_table_header acpi;
+ struct acpi_table_cdat cdat;
+};
+
+union acpi_subtable_headers {
+ struct acpi_subtable_header common;
+ struct acpi_hmat_structure hmat;
+ struct acpi_prmt_module_header prmt;
+ struct acpi_cedt_header cedt;
+ struct acpi_cdat_header cdat;
+};
+
+int acpi_parse_entries_array(char *id, unsigned long table_size,
+ union fw_table_header *table_header,
+ unsigned long max_length,
+ struct acpi_subtable_proc *proc,
+ int proc_num, unsigned int max_entries);
+
+int cdat_table_parse(enum acpi_cdat_type type,
+ acpi_tbl_entry_handler_arg handler_arg, void *arg,
+ struct acpi_table_cdat *table_header,
+ unsigned long length);
+
+/* CXL is the only non-ACPI consumer of the FIRMWARE_TABLE library */
+#if IS_ENABLED(CONFIG_ACPI) && !IS_ENABLED(CONFIG_CXL_BUS)
+#define EXPORT_SYMBOL_FWTBL_LIB(x) EXPORT_SYMBOL_ACPI_LIB(x)
+#define __init_or_fwtbl_lib __init_or_acpilib
+#else
+#define EXPORT_SYMBOL_FWTBL_LIB(x) EXPORT_SYMBOL_NS_GPL(x, "CXL")
+#define __init_or_fwtbl_lib
+#endif
+
+#endif
diff --git a/include/linux/fwctl.h b/include/linux/fwctl.h
new file mode 100644
index 000000000000..5d61fc8a6871
--- /dev/null
+++ b/include/linux/fwctl.h
@@ -0,0 +1,135 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2024-2025, NVIDIA CORPORATION & AFFILIATES
+ */
+#ifndef __LINUX_FWCTL_H
+#define __LINUX_FWCTL_H
+#include <linux/device.h>
+#include <linux/cdev.h>
+#include <linux/cleanup.h>
+#include <uapi/fwctl/fwctl.h>
+
+struct fwctl_device;
+struct fwctl_uctx;
+
+/**
+ * struct fwctl_ops - Driver provided operations
+ *
+ * fwctl_unregister() will wait until all excuting ops are completed before it
+ * returns. Drivers should be mindful to not let their ops run for too long as
+ * it will block device hot unplug and module unloading.
+ */
+struct fwctl_ops {
+ /**
+ * @device_type: The drivers assigned device_type number. This is uABI.
+ */
+ enum fwctl_device_type device_type;
+ /**
+ * @uctx_size: The size of the fwctl_uctx struct to allocate. The first
+ * bytes of this memory will be a fwctl_uctx. The driver can use the
+ * remaining bytes as its private memory.
+ */
+ size_t uctx_size;
+ /**
+ * @open_uctx: Called when a file descriptor is opened before the uctx
+ * is ever used.
+ */
+ int (*open_uctx)(struct fwctl_uctx *uctx);
+ /**
+ * @close_uctx: Called when the uctx is destroyed, usually when the FD
+ * is closed.
+ */
+ void (*close_uctx)(struct fwctl_uctx *uctx);
+ /**
+ * @info: Implement FWCTL_INFO. Return a kmalloc() memory that is copied
+ * to out_device_data. On input length indicates the size of the user
+ * buffer on output it indicates the size of the memory. The driver can
+ * ignore length on input, the core code will handle everything.
+ */
+ void *(*info)(struct fwctl_uctx *uctx, size_t *length);
+ /**
+ * @fw_rpc: Implement FWCTL_RPC. Deliver rpc_in/in_len to the FW and
+ * return the response and set out_len. rpc_in can be returned as the
+ * response pointer. Otherwise the returned pointer is freed with
+ * kvfree().
+ */
+ void *(*fw_rpc)(struct fwctl_uctx *uctx, enum fwctl_rpc_scope scope,
+ void *rpc_in, size_t in_len, size_t *out_len);
+};
+
+/**
+ * struct fwctl_device - Per-driver registration struct
+ * @dev: The sysfs (class/fwctl/fwctlXX) device
+ *
+ * Each driver instance will have one of these structs with the driver private
+ * data following immediately after. This struct is refcounted, it is freed by
+ * calling fwctl_put().
+ */
+struct fwctl_device {
+ struct device dev;
+ /* private: */
+ struct cdev cdev;
+
+ /* Protect uctx_list */
+ struct mutex uctx_list_lock;
+ struct list_head uctx_list;
+ /*
+ * Protect ops, held for write when ops becomes NULL during unregister,
+ * held for read whenever ops is loaded or an ops function is running.
+ */
+ struct rw_semaphore registration_lock;
+ const struct fwctl_ops *ops;
+};
+
+struct fwctl_device *_fwctl_alloc_device(struct device *parent,
+ const struct fwctl_ops *ops,
+ size_t size);
+/**
+ * fwctl_alloc_device - Allocate a fwctl
+ * @parent: Physical device that provides the FW interface
+ * @ops: Driver ops to register
+ * @drv_struct: 'struct driver_fwctl' that holds the struct fwctl_device
+ * @member: Name of the struct fwctl_device in @drv_struct
+ *
+ * This allocates and initializes the fwctl_device embedded in the drv_struct.
+ * Upon success the pointer must be freed via fwctl_put(). Returns a 'drv_struct
+ * \*' on success, NULL on error.
+ */
+#define fwctl_alloc_device(parent, ops, drv_struct, member) \
+ ({ \
+ static_assert(__same_type(struct fwctl_device, \
+ ((drv_struct *)NULL)->member)); \
+ static_assert(offsetof(drv_struct, member) == 0); \
+ (drv_struct *)_fwctl_alloc_device(parent, ops, \
+ sizeof(drv_struct)); \
+ })
+
+static inline struct fwctl_device *fwctl_get(struct fwctl_device *fwctl)
+{
+ get_device(&fwctl->dev);
+ return fwctl;
+}
+static inline void fwctl_put(struct fwctl_device *fwctl)
+{
+ put_device(&fwctl->dev);
+}
+DEFINE_FREE(fwctl, struct fwctl_device *, if (_T) fwctl_put(_T));
+
+int fwctl_register(struct fwctl_device *fwctl);
+void fwctl_unregister(struct fwctl_device *fwctl);
+
+/**
+ * struct fwctl_uctx - Per user FD context
+ * @fwctl: fwctl instance that owns the context
+ *
+ * Every FD opened by userspace will get a unique context allocation. Any driver
+ * private data will follow immediately after.
+ */
+struct fwctl_uctx {
+ struct fwctl_device *fwctl;
+ /* private: */
+ /* Head at fwctl_device::uctx_list */
+ struct list_head uctx_list_entry;
+};
+
+#endif
diff --git a/include/linux/fwnode.h b/include/linux/fwnode.h
index 0c35b6caf0f6..097be89487bf 100644
--- a/include/linux/fwnode.h
+++ b/include/linux/fwnode.h
@@ -1,24 +1,80 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* fwnode.h - Firmware device node object handle type definition.
*
+ * This header file provides low-level data types and definitions for firmware
+ * and device property providers. The respective API header files supplied by
+ * them should contain all of the requisite data types and definitions for end
+ * users, so including it directly should not be necessary.
+ *
* Copyright (C) 2015, Intel Corporation
* Author: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef _LINUX_FWNODE_H_
#define _LINUX_FWNODE_H_
+#include <linux/bits.h>
+#include <linux/err.h>
+#include <linux/list.h>
#include <linux/types.h>
+enum dev_dma_attr {
+ DEV_DMA_NOT_SUPPORTED,
+ DEV_DMA_NON_COHERENT,
+ DEV_DMA_COHERENT,
+};
+
struct fwnode_operations;
+struct device;
+
+/*
+ * fwnode flags
+ *
+ * LINKS_ADDED: The fwnode has already be parsed to add fwnode links.
+ * NOT_DEVICE: The fwnode will never be populated as a struct device.
+ * INITIALIZED: The hardware corresponding to fwnode has been initialized.
+ * NEEDS_CHILD_BOUND_ON_ADD: For this fwnode/device to probe successfully, its
+ * driver needs its child devices to be bound with
+ * their respective drivers as soon as they are
+ * added.
+ * BEST_EFFORT: The fwnode/device needs to probe early and might be missing some
+ * suppliers. Only enforce ordering with suppliers that have
+ * drivers.
+ */
+#define FWNODE_FLAG_LINKS_ADDED BIT(0)
+#define FWNODE_FLAG_NOT_DEVICE BIT(1)
+#define FWNODE_FLAG_INITIALIZED BIT(2)
+#define FWNODE_FLAG_NEEDS_CHILD_BOUND_ON_ADD BIT(3)
+#define FWNODE_FLAG_BEST_EFFORT BIT(4)
+#define FWNODE_FLAG_VISITED BIT(5)
struct fwnode_handle {
struct fwnode_handle *secondary;
const struct fwnode_operations *ops;
+
+ /* The below is used solely by device links, don't use otherwise */
+ struct device *dev;
+ struct list_head suppliers;
+ struct list_head consumers;
+ u8 flags;
+};
+
+/*
+ * fwnode link flags
+ *
+ * CYCLE: The fwnode link is part of a cycle. Don't defer probe.
+ * IGNORE: Completely ignore this link, even during cycle detection.
+ */
+#define FWLINK_FLAG_CYCLE BIT(0)
+#define FWLINK_FLAG_IGNORE BIT(1)
+
+struct fwnode_link {
+ struct fwnode_handle *supplier;
+ struct list_head s_hook;
+ struct fwnode_handle *consumer;
+ struct list_head c_hook;
+ u8 flags;
};
/**
@@ -33,7 +89,14 @@ struct fwnode_endpoint {
const struct fwnode_handle *local_fwnode;
};
-#define NR_FWNODE_REFERENCE_ARGS 8
+/*
+ * ports and endpoints defined as software_nodes should all follow a common
+ * naming scheme; use these macros to ensure commonality.
+ */
+#define SWNODE_GRAPH_PORT_NAME_FMT "port@%u"
+#define SWNODE_GRAPH_ENDPOINT_NAME_FMT "endpoint@%u"
+
+#define NR_FWNODE_REFERENCE_ARGS 16
/**
* struct fwnode_reference_args - Fwnode reference with additional arguments
@@ -44,19 +107,23 @@ struct fwnode_endpoint {
struct fwnode_reference_args {
struct fwnode_handle *fwnode;
unsigned int nargs;
- unsigned int args[NR_FWNODE_REFERENCE_ARGS];
+ u64 args[NR_FWNODE_REFERENCE_ARGS];
};
/**
* struct fwnode_operations - Operations for fwnode interface
* @get: Get a reference to an fwnode.
* @put: Put a reference to an fwnode.
+ * @device_is_available: Return true if the device is available.
+ * @device_get_match_data: Return the device driver match data.
* @property_present: Return true if a property is present.
- * @property_read_integer_array: Read an array of integer properties. Return
- * zero on success, a negative error code
- * otherwise.
+ * @property_read_bool: Return a boolean property value.
+ * @property_read_int_array: Read an array of integer properties. Return zero on
+ * success, a negative error code otherwise.
* @property_read_string_array: Read an array of string properties. Return zero
* on success, a negative error code otherwise.
+ * @get_name: Return the name of an fwnode.
+ * @get_name_prefix: Get a prefix for a node (for printing purposes).
* @get_parent: Return the parent of an fwnode.
* @get_next_child_node: Return the next child node in an iteration.
* @get_named_child_node: Return a child node with a given name.
@@ -66,13 +133,22 @@ struct fwnode_reference_args {
* endpoint node.
* @graph_get_port_parent: Return the parent node of a port node.
* @graph_parse_endpoint: Parse endpoint for port and endpoint id.
+ * @add_links: Create fwnode links to all the suppliers of the fwnode. Return
+ * zero on success, a negative error code otherwise.
*/
struct fwnode_operations {
- void (*get)(struct fwnode_handle *fwnode);
+ struct fwnode_handle *(*get)(struct fwnode_handle *fwnode);
void (*put)(struct fwnode_handle *fwnode);
bool (*device_is_available)(const struct fwnode_handle *fwnode);
+ const void *(*device_get_match_data)(const struct fwnode_handle *fwnode,
+ const struct device *dev);
+ bool (*device_dma_supported)(const struct fwnode_handle *fwnode);
+ enum dev_dma_attr
+ (*device_get_dma_attr)(const struct fwnode_handle *fwnode);
bool (*property_present)(const struct fwnode_handle *fwnode,
const char *propname);
+ bool (*property_read_bool)(const struct fwnode_handle *fwnode,
+ const char *propname);
int (*property_read_int_array)(const struct fwnode_handle *fwnode,
const char *propname,
unsigned int elem_size, void *val,
@@ -81,6 +157,8 @@ struct fwnode_operations {
(*property_read_string_array)(const struct fwnode_handle *fwnode_handle,
const char *propname, const char **val,
size_t nval);
+ const char *(*get_name)(const struct fwnode_handle *fwnode);
+ const char *(*get_name_prefix)(const struct fwnode_handle *fwnode);
struct fwnode_handle *(*get_parent)(const struct fwnode_handle *fwnode);
struct fwnode_handle *
(*get_next_child_node)(const struct fwnode_handle *fwnode,
@@ -101,18 +179,22 @@ struct fwnode_operations {
(*graph_get_port_parent)(struct fwnode_handle *fwnode);
int (*graph_parse_endpoint)(const struct fwnode_handle *fwnode,
struct fwnode_endpoint *endpoint);
+ void __iomem *(*iomap)(struct fwnode_handle *fwnode, int index);
+ int (*irq_get)(const struct fwnode_handle *fwnode, unsigned int index);
+ int (*add_links)(struct fwnode_handle *fwnode);
};
-#define fwnode_has_op(fwnode, op) \
- ((fwnode) && (fwnode)->ops && (fwnode)->ops->op)
+#define fwnode_has_op(fwnode, op) \
+ (!IS_ERR_OR_NULL(fwnode) && (fwnode)->ops && (fwnode)->ops->op)
+
#define fwnode_call_int_op(fwnode, op, ...) \
- (fwnode ? (fwnode_has_op(fwnode, op) ? \
- (fwnode)->ops->op(fwnode, ## __VA_ARGS__) : -ENXIO) : \
- -EINVAL)
-#define fwnode_call_bool_op(fwnode, op, ...) \
- (fwnode ? (fwnode_has_op(fwnode, op) ? \
- (fwnode)->ops->op(fwnode, ## __VA_ARGS__) : false) : \
- false)
+ (fwnode_has_op(fwnode, op) ? \
+ (fwnode)->ops->op(fwnode, ## __VA_ARGS__) : (IS_ERR_OR_NULL(fwnode) ? -EINVAL : -ENXIO))
+
+#define fwnode_call_bool_op(fwnode, op, ...) \
+ (fwnode_has_op(fwnode, op) ? \
+ (fwnode)->ops->op(fwnode, ## __VA_ARGS__) : false)
+
#define fwnode_call_ptr_op(fwnode, op, ...) \
(fwnode_has_op(fwnode, op) ? \
(fwnode)->ops->op(fwnode, ## __VA_ARGS__) : NULL)
@@ -122,4 +204,30 @@ struct fwnode_operations {
(fwnode)->ops->op(fwnode, ## __VA_ARGS__); \
} while (false)
+static inline void fwnode_init(struct fwnode_handle *fwnode,
+ const struct fwnode_operations *ops)
+{
+ fwnode->ops = ops;
+ INIT_LIST_HEAD(&fwnode->consumers);
+ INIT_LIST_HEAD(&fwnode->suppliers);
+}
+
+static inline void fwnode_dev_initialized(struct fwnode_handle *fwnode,
+ bool initialized)
+{
+ if (IS_ERR_OR_NULL(fwnode))
+ return;
+
+ if (initialized)
+ fwnode->flags |= FWNODE_FLAG_INITIALIZED;
+ else
+ fwnode->flags &= ~FWNODE_FLAG_INITIALIZED;
+}
+
+int fwnode_link_add(struct fwnode_handle *con, struct fwnode_handle *sup,
+ u8 flags);
+void fwnode_links_purge(struct fwnode_handle *fwnode);
+void fw_devlink_purge_absent_suppliers(struct fwnode_handle *fwnode);
+bool fw_devlink_is_strict(void);
+
#endif
diff --git a/include/linux/fwnode_mdio.h b/include/linux/fwnode_mdio.h
new file mode 100644
index 000000000000..faf603c48c86
--- /dev/null
+++ b/include/linux/fwnode_mdio.h
@@ -0,0 +1,35 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * FWNODE helper for the MDIO (Ethernet PHY) API
+ */
+
+#ifndef __LINUX_FWNODE_MDIO_H
+#define __LINUX_FWNODE_MDIO_H
+
+#include <linux/phy.h>
+
+#if IS_ENABLED(CONFIG_FWNODE_MDIO)
+int fwnode_mdiobus_phy_device_register(struct mii_bus *mdio,
+ struct phy_device *phy,
+ struct fwnode_handle *child, u32 addr);
+
+int fwnode_mdiobus_register_phy(struct mii_bus *bus,
+ struct fwnode_handle *child, u32 addr);
+
+#else /* CONFIG_FWNODE_MDIO */
+int fwnode_mdiobus_phy_device_register(struct mii_bus *mdio,
+ struct phy_device *phy,
+ struct fwnode_handle *child, u32 addr)
+{
+ return -EINVAL;
+}
+
+static inline int fwnode_mdiobus_register_phy(struct mii_bus *bus,
+ struct fwnode_handle *child,
+ u32 addr)
+{
+ return -EINVAL;
+}
+#endif
+
+#endif /* __LINUX_FWNODE_MDIO_H */
diff --git a/include/linux/gameport.h b/include/linux/gameport.h
index bb7de09e8d57..86d62fdafd7a 100644
--- a/include/linux/gameport.h
+++ b/include/linux/gameport.h
@@ -1,14 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 1999-2002 Vojtech Pavlik
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
*/
#ifndef _GAMEPORT_H
#define _GAMEPORT_H
-#include <asm/io.h>
#include <linux/types.h>
#include <linux/list.h>
#include <linux/mutex.h>
@@ -62,12 +58,12 @@ struct gameport_driver {
bool ignore;
};
-#define to_gameport_driver(d) container_of(d, struct gameport_driver, driver)
+#define to_gameport_driver(d) container_of_const(d, struct gameport_driver, driver)
int gameport_open(struct gameport *gameport, struct gameport_driver *drv, int mode);
void gameport_close(struct gameport *gameport);
-#if defined(CONFIG_GAMEPORT) || (defined(MODULE) && defined(CONFIG_GAMEPORT_MODULE))
+#if IS_REACHABLE(CONFIG_GAMEPORT)
void __gameport_register_port(struct gameport *gameport, struct module *owner);
/* use a define to avoid include chaining to get THIS_MODULE */
@@ -113,7 +109,7 @@ static inline void gameport_free_port(struct gameport *gameport)
static inline void gameport_set_name(struct gameport *gameport, const char *name)
{
- strlcpy(gameport->name, name, sizeof(gameport->name));
+ strscpy(gameport->name, name, sizeof(gameport->name));
}
/*
@@ -168,18 +164,12 @@ void gameport_unregister_driver(struct gameport_driver *drv);
static inline void gameport_trigger(struct gameport *gameport)
{
- if (gameport->trigger)
- gameport->trigger(gameport);
- else
- outb(0xff, gameport->io);
+ gameport->trigger(gameport);
}
static inline unsigned char gameport_read(struct gameport *gameport)
{
- if (gameport->read)
- return gameport->read(gameport);
- else
- return inb(gameport->io);
+ return gameport->read(gameport);
}
static inline int gameport_cooked_read(struct gameport *gameport, int *axes, int *buttons)
diff --git a/include/linux/gcd.h b/include/linux/gcd.h
index 69f5e8a01bad..616e81a7f7e3 100644
--- a/include/linux/gcd.h
+++ b/include/linux/gcd.h
@@ -1,7 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _GCD_H
#define _GCD_H
#include <linux/compiler.h>
+#include <linux/jump_label.h>
+
+DECLARE_STATIC_KEY_TRUE(efficient_ffs_key);
unsigned long gcd(unsigned long a, unsigned long b) __attribute_const__;
diff --git a/include/linux/genalloc.h b/include/linux/genalloc.h
index 6dfec4d638df..0bd581003cd5 100644
--- a/include/linux/genalloc.h
+++ b/include/linux/genalloc.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Basic general purpose allocator for managing special purpose
* memory, for example, memory that is not managed by the regular
@@ -21,9 +22,6 @@
* the allocator can NOT be used in NMI handler. So code uses the
* allocator in NMI handler should depend on
* CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG.
- *
- * This source code is licensed under the GNU General Public License,
- * Version 2. See the file COPYING for more details.
*/
@@ -32,6 +30,7 @@
#include <linux/types.h>
#include <linux/spinlock_types.h>
+#include <linux/atomic.h>
struct device;
struct device_node;
@@ -50,7 +49,8 @@ typedef unsigned long (*genpool_algo_t)(unsigned long *map,
unsigned long size,
unsigned long start,
unsigned int nr,
- void *data, struct gen_pool *pool);
+ void *data, struct gen_pool *pool,
+ unsigned long start_addr);
/*
* General purpose special memory pool descriptor.
@@ -71,11 +71,12 @@ struct gen_pool {
*/
struct gen_pool_chunk {
struct list_head next_chunk; /* next chunk in pool */
- atomic_t avail;
+ atomic_long_t avail;
phys_addr_t phys_addr; /* physical starting address of memory chunk */
+ void *owner; /* private data to retrieve at alloc time */
unsigned long start_addr; /* start address of memory chunk */
unsigned long end_addr; /* end address of memory chunk (inclusive) */
- unsigned long bits[0]; /* bitmap for allocating memory chunk */
+ unsigned long bits[]; /* bitmap for allocating memory chunk */
};
/*
@@ -94,8 +95,15 @@ struct genpool_data_fixed {
extern struct gen_pool *gen_pool_create(int, int);
extern phys_addr_t gen_pool_virt_to_phys(struct gen_pool *pool, unsigned long);
-extern int gen_pool_add_virt(struct gen_pool *, unsigned long, phys_addr_t,
- size_t, int);
+extern int gen_pool_add_owner(struct gen_pool *, unsigned long, phys_addr_t,
+ size_t, int, void *);
+
+static inline int gen_pool_add_virt(struct gen_pool *pool, unsigned long addr,
+ phys_addr_t phys, size_t size, int nid)
+{
+ return gen_pool_add_owner(pool, addr, phys, size, nid, NULL);
+}
+
/**
* gen_pool_add - add a new chunk of special memory to the pool
* @pool: pool to add new memory chunk to
@@ -114,12 +122,56 @@ static inline int gen_pool_add(struct gen_pool *pool, unsigned long addr,
return gen_pool_add_virt(pool, addr, -1, size, nid);
}
extern void gen_pool_destroy(struct gen_pool *);
-extern unsigned long gen_pool_alloc(struct gen_pool *, size_t);
-extern unsigned long gen_pool_alloc_algo(struct gen_pool *, size_t,
- genpool_algo_t algo, void *data);
+unsigned long gen_pool_alloc_algo_owner(struct gen_pool *pool, size_t size,
+ genpool_algo_t algo, void *data, void **owner);
+
+static inline unsigned long gen_pool_alloc_owner(struct gen_pool *pool,
+ size_t size, void **owner)
+{
+ return gen_pool_alloc_algo_owner(pool, size, pool->algo, pool->data,
+ owner);
+}
+
+static inline unsigned long gen_pool_alloc_algo(struct gen_pool *pool,
+ size_t size, genpool_algo_t algo, void *data)
+{
+ return gen_pool_alloc_algo_owner(pool, size, algo, data, NULL);
+}
+
+/**
+ * gen_pool_alloc - allocate special memory from the pool
+ * @pool: pool to allocate from
+ * @size: number of bytes to allocate from the pool
+ *
+ * Allocate the requested number of bytes from the specified pool.
+ * Uses the pool allocation function (with first-fit algorithm by default).
+ * Can not be used in NMI handler on architectures without
+ * NMI-safe cmpxchg implementation.
+ */
+static inline unsigned long gen_pool_alloc(struct gen_pool *pool, size_t size)
+{
+ return gen_pool_alloc_algo(pool, size, pool->algo, pool->data);
+}
+
extern void *gen_pool_dma_alloc(struct gen_pool *pool, size_t size,
dma_addr_t *dma);
-extern void gen_pool_free(struct gen_pool *, unsigned long, size_t);
+extern void *gen_pool_dma_alloc_algo(struct gen_pool *pool, size_t size,
+ dma_addr_t *dma, genpool_algo_t algo, void *data);
+extern void *gen_pool_dma_alloc_align(struct gen_pool *pool, size_t size,
+ dma_addr_t *dma, int align);
+extern void *gen_pool_dma_zalloc(struct gen_pool *pool, size_t size, dma_addr_t *dma);
+extern void *gen_pool_dma_zalloc_algo(struct gen_pool *pool, size_t size,
+ dma_addr_t *dma, genpool_algo_t algo, void *data);
+extern void *gen_pool_dma_zalloc_align(struct gen_pool *pool, size_t size,
+ dma_addr_t *dma, int align);
+extern void gen_pool_free_owner(struct gen_pool *pool, unsigned long addr,
+ size_t size, void **owner);
+static inline void gen_pool_free(struct gen_pool *pool, unsigned long addr,
+ size_t size)
+{
+ gen_pool_free_owner(pool, addr, size, NULL);
+}
+
extern void gen_pool_for_each_chunk(struct gen_pool *,
void (*)(struct gen_pool *, struct gen_pool_chunk *, void *), void *);
extern size_t gen_pool_avail(struct gen_pool *);
@@ -130,31 +182,31 @@ extern void gen_pool_set_algo(struct gen_pool *pool, genpool_algo_t algo,
extern unsigned long gen_pool_first_fit(unsigned long *map, unsigned long size,
unsigned long start, unsigned int nr, void *data,
- struct gen_pool *pool);
+ struct gen_pool *pool, unsigned long start_addr);
extern unsigned long gen_pool_fixed_alloc(unsigned long *map,
unsigned long size, unsigned long start, unsigned int nr,
- void *data, struct gen_pool *pool);
+ void *data, struct gen_pool *pool, unsigned long start_addr);
extern unsigned long gen_pool_first_fit_align(unsigned long *map,
unsigned long size, unsigned long start, unsigned int nr,
- void *data, struct gen_pool *pool);
+ void *data, struct gen_pool *pool, unsigned long start_addr);
extern unsigned long gen_pool_first_fit_order_align(unsigned long *map,
unsigned long size, unsigned long start, unsigned int nr,
- void *data, struct gen_pool *pool);
+ void *data, struct gen_pool *pool, unsigned long start_addr);
extern unsigned long gen_pool_best_fit(unsigned long *map, unsigned long size,
unsigned long start, unsigned int nr, void *data,
- struct gen_pool *pool);
+ struct gen_pool *pool, unsigned long start_addr);
extern struct gen_pool *devm_gen_pool_create(struct device *dev,
int min_alloc_order, int nid, const char *name);
extern struct gen_pool *gen_pool_get(struct device *dev, const char *name);
-bool addr_in_gen_pool(struct gen_pool *pool, unsigned long start,
+extern bool gen_pool_has_addr(struct gen_pool *pool, unsigned long start,
size_t size);
#ifdef CONFIG_OF
diff --git a/include/linux/generic-radix-tree.h b/include/linux/generic-radix-tree.h
new file mode 100644
index 000000000000..5b51c3d582d6
--- /dev/null
+++ b/include/linux/generic-radix-tree.h
@@ -0,0 +1,402 @@
+#ifndef _LINUX_GENERIC_RADIX_TREE_H
+#define _LINUX_GENERIC_RADIX_TREE_H
+
+/**
+ * DOC: Generic radix trees/sparse arrays
+ *
+ * Very simple and minimalistic, supporting arbitrary size entries up to
+ * GENRADIX_NODE_SIZE.
+ *
+ * A genradix is defined with the type it will store, like so:
+ *
+ * static GENRADIX(struct foo) foo_genradix;
+ *
+ * The main operations are:
+ *
+ * - genradix_init(radix) - initialize an empty genradix
+ *
+ * - genradix_free(radix) - free all memory owned by the genradix and
+ * reinitialize it
+ *
+ * - genradix_ptr(radix, idx) - gets a pointer to the entry at idx, returning
+ * NULL if that entry does not exist
+ *
+ * - genradix_ptr_alloc(radix, idx, gfp) - gets a pointer to an entry,
+ * allocating it if necessary
+ *
+ * - genradix_for_each(radix, iter, p) - iterate over each entry in a genradix
+ *
+ * The radix tree allocates one page of entries at a time, so entries may exist
+ * that were never explicitly allocated - they will be initialized to all
+ * zeroes.
+ *
+ * Internally, a genradix is just a radix tree of pages, and indexing works in
+ * terms of byte offsets. The wrappers in this header file use sizeof on the
+ * type the radix contains to calculate a byte offset from the index - see
+ * __idx_to_offset.
+ */
+
+#include <asm/page.h>
+#include <linux/bug.h>
+#include <linux/limits.h>
+#include <linux/log2.h>
+#include <linux/math.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+
+struct genradix_root;
+
+#define GENRADIX_NODE_SHIFT 9
+#define GENRADIX_NODE_SIZE (1U << GENRADIX_NODE_SHIFT)
+
+#define GENRADIX_ARY (GENRADIX_NODE_SIZE / sizeof(struct genradix_node *))
+#define GENRADIX_ARY_SHIFT ilog2(GENRADIX_ARY)
+
+/* depth that's needed for a genradix that can address up to ULONG_MAX: */
+#define GENRADIX_MAX_DEPTH \
+ DIV_ROUND_UP(BITS_PER_LONG - GENRADIX_NODE_SHIFT, GENRADIX_ARY_SHIFT)
+
+#define GENRADIX_DEPTH_MASK \
+ ((unsigned long) (roundup_pow_of_two(GENRADIX_MAX_DEPTH + 1) - 1))
+
+static inline int genradix_depth_shift(unsigned depth)
+{
+ return GENRADIX_NODE_SHIFT + GENRADIX_ARY_SHIFT * depth;
+}
+
+/*
+ * Returns size (of data, in bytes) that a tree of a given depth holds:
+ */
+static inline size_t genradix_depth_size(unsigned depth)
+{
+ return 1UL << genradix_depth_shift(depth);
+}
+
+static inline unsigned genradix_root_to_depth(struct genradix_root *r)
+{
+ return (unsigned long) r & GENRADIX_DEPTH_MASK;
+}
+
+static inline struct genradix_node *genradix_root_to_node(struct genradix_root *r)
+{
+ return (void *) ((unsigned long) r & ~GENRADIX_DEPTH_MASK);
+}
+
+struct __genradix {
+ struct genradix_root *root;
+};
+
+struct genradix_node {
+ union {
+ /* Interior node: */
+ struct genradix_node *children[GENRADIX_ARY];
+
+ /* Leaf: */
+ u8 data[GENRADIX_NODE_SIZE];
+ };
+};
+
+static inline struct genradix_node *genradix_alloc_node(gfp_t gfp_mask)
+{
+ return kzalloc(GENRADIX_NODE_SIZE, gfp_mask);
+}
+
+static inline void genradix_free_node(struct genradix_node *node)
+{
+ kfree(node);
+}
+
+/*
+ * NOTE: currently, sizeof(_type) must not be larger than GENRADIX_NODE_SIZE:
+ */
+
+#define __GENRADIX_INITIALIZER \
+ { \
+ .tree = { \
+ .root = NULL, \
+ } \
+ }
+
+/*
+ * We use a 0 size array to stash the type we're storing without taking any
+ * space at runtime - then the various accessor macros can use typeof() to get
+ * to it for casts/sizeof - we also force the alignment so that storing a type
+ * with a ridiculous alignment doesn't blow up the alignment or size of the
+ * genradix.
+ */
+
+#define GENRADIX(_type) \
+struct { \
+ struct __genradix tree; \
+ _type type[0] __aligned(1); \
+}
+
+#define DEFINE_GENRADIX(_name, _type) \
+ GENRADIX(_type) _name = __GENRADIX_INITIALIZER
+
+/**
+ * genradix_init - initialize a genradix
+ * @_radix: genradix to initialize
+ *
+ * Does not fail
+ */
+#define genradix_init(_radix) \
+do { \
+ *(_radix) = (typeof(*_radix)) __GENRADIX_INITIALIZER; \
+} while (0)
+
+void __genradix_free(struct __genradix *);
+
+/**
+ * genradix_free: free all memory owned by a genradix
+ * @_radix: the genradix to free
+ *
+ * After freeing, @_radix will be reinitialized and empty
+ */
+#define genradix_free(_radix) __genradix_free(&(_radix)->tree)
+
+static inline size_t __idx_to_offset(size_t idx, size_t obj_size)
+{
+ if (__builtin_constant_p(obj_size))
+ BUILD_BUG_ON(obj_size > GENRADIX_NODE_SIZE);
+ else
+ BUG_ON(obj_size > GENRADIX_NODE_SIZE);
+
+ if (!is_power_of_2(obj_size)) {
+ size_t objs_per_page = GENRADIX_NODE_SIZE / obj_size;
+
+ return (idx / objs_per_page) * GENRADIX_NODE_SIZE +
+ (idx % objs_per_page) * obj_size;
+ } else {
+ return idx * obj_size;
+ }
+}
+
+#define __genradix_cast(_radix) (typeof((_radix)->type[0]) *)
+#define __genradix_obj_size(_radix) sizeof((_radix)->type[0])
+#define __genradix_objs_per_page(_radix) \
+ (GENRADIX_NODE_SIZE / sizeof((_radix)->type[0]))
+#define __genradix_page_remainder(_radix) \
+ (GENRADIX_NODE_SIZE % sizeof((_radix)->type[0]))
+
+#define __genradix_idx_to_offset(_radix, _idx) \
+ __idx_to_offset(_idx, __genradix_obj_size(_radix))
+
+static inline void *__genradix_ptr_inlined(struct __genradix *radix, size_t offset)
+{
+ struct genradix_root *r = READ_ONCE(radix->root);
+ struct genradix_node *n = genradix_root_to_node(r);
+ unsigned level = genradix_root_to_depth(r);
+ unsigned shift = genradix_depth_shift(level);
+
+ if (unlikely(ilog2(offset) >= genradix_depth_shift(level)))
+ return NULL;
+
+ while (n && shift > GENRADIX_NODE_SHIFT) {
+ shift -= GENRADIX_ARY_SHIFT;
+ n = n->children[offset >> shift];
+ offset &= (1UL << shift) - 1;
+ }
+
+ return n ? &n->data[offset] : NULL;
+}
+
+#define genradix_ptr_inlined(_radix, _idx) \
+ (__genradix_cast(_radix) \
+ __genradix_ptr_inlined(&(_radix)->tree, \
+ __genradix_idx_to_offset(_radix, _idx)))
+
+void *__genradix_ptr(struct __genradix *, size_t);
+
+/**
+ * genradix_ptr - get a pointer to a genradix entry
+ * @_radix: genradix to access
+ * @_idx: index to fetch
+ *
+ * Returns a pointer to entry at @_idx, or NULL if that entry does not exist.
+ */
+#define genradix_ptr(_radix, _idx) \
+ (__genradix_cast(_radix) \
+ __genradix_ptr(&(_radix)->tree, \
+ __genradix_idx_to_offset(_radix, _idx)))
+
+void *__genradix_ptr_alloc(struct __genradix *, size_t,
+ struct genradix_node **, gfp_t);
+
+#define genradix_ptr_alloc_inlined(_radix, _idx, _gfp) \
+ (__genradix_cast(_radix) \
+ (__genradix_ptr_inlined(&(_radix)->tree, \
+ __genradix_idx_to_offset(_radix, _idx)) ?: \
+ __genradix_ptr_alloc(&(_radix)->tree, \
+ __genradix_idx_to_offset(_radix, _idx), \
+ NULL, _gfp)))
+
+#define genradix_ptr_alloc_preallocated_inlined(_radix, _idx, _new_node, _gfp)\
+ (__genradix_cast(_radix) \
+ (__genradix_ptr_inlined(&(_radix)->tree, \
+ __genradix_idx_to_offset(_radix, _idx)) ?: \
+ __genradix_ptr_alloc(&(_radix)->tree, \
+ __genradix_idx_to_offset(_radix, _idx), \
+ _new_node, _gfp)))
+
+/**
+ * genradix_ptr_alloc - get a pointer to a genradix entry, allocating it
+ * if necessary
+ * @_radix: genradix to access
+ * @_idx: index to fetch
+ * @_gfp: gfp mask
+ *
+ * Returns a pointer to entry at @_idx, or NULL on allocation failure
+ */
+#define genradix_ptr_alloc(_radix, _idx, _gfp) \
+ (__genradix_cast(_radix) \
+ __genradix_ptr_alloc(&(_radix)->tree, \
+ __genradix_idx_to_offset(_radix, _idx), \
+ NULL, _gfp))
+
+#define genradix_ptr_alloc_preallocated(_radix, _idx, _new_node, _gfp)\
+ (__genradix_cast(_radix) \
+ __genradix_ptr_alloc(&(_radix)->tree, \
+ __genradix_idx_to_offset(_radix, _idx), \
+ _new_node, _gfp))
+
+struct genradix_iter {
+ size_t offset;
+ size_t pos;
+};
+
+/**
+ * genradix_iter_init - initialize a genradix_iter
+ * @_radix: genradix that will be iterated over
+ * @_idx: index to start iterating from
+ */
+#define genradix_iter_init(_radix, _idx) \
+ ((struct genradix_iter) { \
+ .pos = (_idx), \
+ .offset = __genradix_idx_to_offset((_radix), (_idx)),\
+ })
+
+void *__genradix_iter_peek(struct genradix_iter *, struct __genradix *, size_t);
+
+/**
+ * genradix_iter_peek - get first entry at or above iterator's current
+ * position
+ * @_iter: a genradix_iter
+ * @_radix: genradix being iterated over
+ *
+ * If no more entries exist at or above @_iter's current position, returns NULL
+ */
+#define genradix_iter_peek(_iter, _radix) \
+ (__genradix_cast(_radix) \
+ __genradix_iter_peek(_iter, &(_radix)->tree, \
+ __genradix_objs_per_page(_radix)))
+
+void *__genradix_iter_peek_prev(struct genradix_iter *, struct __genradix *,
+ size_t, size_t);
+
+/**
+ * genradix_iter_peek_prev - get first entry at or below iterator's current
+ * position
+ * @_iter: a genradix_iter
+ * @_radix: genradix being iterated over
+ *
+ * If no more entries exist at or below @_iter's current position, returns NULL
+ */
+#define genradix_iter_peek_prev(_iter, _radix) \
+ (__genradix_cast(_radix) \
+ __genradix_iter_peek_prev(_iter, &(_radix)->tree, \
+ __genradix_objs_per_page(_radix), \
+ __genradix_obj_size(_radix) + \
+ __genradix_page_remainder(_radix)))
+
+static inline void __genradix_iter_advance(struct genradix_iter *iter,
+ size_t obj_size)
+{
+ if (iter->offset + obj_size < iter->offset) {
+ iter->offset = SIZE_MAX;
+ iter->pos = SIZE_MAX;
+ return;
+ }
+
+ iter->offset += obj_size;
+
+ if (!is_power_of_2(obj_size) &&
+ (iter->offset & (GENRADIX_NODE_SIZE - 1)) + obj_size > GENRADIX_NODE_SIZE)
+ iter->offset = round_up(iter->offset, GENRADIX_NODE_SIZE);
+
+ iter->pos++;
+}
+
+#define genradix_iter_advance(_iter, _radix) \
+ __genradix_iter_advance(_iter, __genradix_obj_size(_radix))
+
+static inline void __genradix_iter_rewind(struct genradix_iter *iter,
+ size_t obj_size)
+{
+ if (iter->offset == 0 ||
+ iter->offset == SIZE_MAX) {
+ iter->offset = SIZE_MAX;
+ return;
+ }
+
+ if ((iter->offset & (GENRADIX_NODE_SIZE - 1)) == 0)
+ iter->offset -= GENRADIX_NODE_SIZE % obj_size;
+
+ iter->offset -= obj_size;
+ iter->pos--;
+}
+
+#define genradix_iter_rewind(_iter, _radix) \
+ __genradix_iter_rewind(_iter, __genradix_obj_size(_radix))
+
+#define genradix_for_each_from(_radix, _iter, _p, _start) \
+ for (_iter = genradix_iter_init(_radix, _start); \
+ (_p = genradix_iter_peek(&_iter, _radix)) != NULL; \
+ genradix_iter_advance(&_iter, _radix))
+
+/**
+ * genradix_for_each - iterate over entry in a genradix
+ * @_radix: genradix to iterate over
+ * @_iter: a genradix_iter to track current position
+ * @_p: pointer to genradix entry type
+ *
+ * On every iteration, @_p will point to the current entry, and @_iter.pos
+ * will be the current entry's index.
+ */
+#define genradix_for_each(_radix, _iter, _p) \
+ genradix_for_each_from(_radix, _iter, _p, 0)
+
+#define genradix_last_pos(_radix) \
+ (SIZE_MAX / GENRADIX_NODE_SIZE * __genradix_objs_per_page(_radix) - 1)
+
+/**
+ * genradix_for_each_reverse - iterate over entry in a genradix, reverse order
+ * @_radix: genradix to iterate over
+ * @_iter: a genradix_iter to track current position
+ * @_p: pointer to genradix entry type
+ *
+ * On every iteration, @_p will point to the current entry, and @_iter.pos
+ * will be the current entry's index.
+ */
+#define genradix_for_each_reverse(_radix, _iter, _p) \
+ for (_iter = genradix_iter_init(_radix, genradix_last_pos(_radix));\
+ (_p = genradix_iter_peek_prev(&_iter, _radix)) != NULL;\
+ genradix_iter_rewind(&_iter, _radix))
+
+int __genradix_prealloc(struct __genradix *, size_t, gfp_t);
+
+/**
+ * genradix_prealloc - preallocate entries in a generic radix tree
+ * @_radix: genradix to preallocate
+ * @_nr: number of entries to preallocate
+ * @_gfp: gfp mask
+ *
+ * Returns 0 on success, -ENOMEM on failure
+ */
+#define genradix_prealloc(_radix, _nr, _gfp) \
+ __genradix_prealloc(&(_radix)->tree, \
+ __genradix_idx_to_offset(_radix, _nr + 1),\
+ _gfp)
+
+
+#endif /* _LINUX_GENERIC_RADIX_TREE_H */
diff --git a/include/linux/generic_pt/common.h b/include/linux/generic_pt/common.h
new file mode 100644
index 000000000000..6a9a1acb5aad
--- /dev/null
+++ b/include/linux/generic_pt/common.h
@@ -0,0 +1,191 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2024-2025, NVIDIA CORPORATION & AFFILIATES
+ */
+#ifndef __GENERIC_PT_COMMON_H
+#define __GENERIC_PT_COMMON_H
+
+#include <linux/types.h>
+#include <linux/build_bug.h>
+#include <linux/bits.h>
+
+/**
+ * DOC: Generic Radix Page Table
+ *
+ * Generic Radix Page Table is a set of functions and helpers to efficiently
+ * parse radix style page tables typically seen in HW implementations. The
+ * interface is built to deliver similar code generation as the mm's pte/pmd/etc
+ * system by fully inlining the exact code required to handle each table level.
+ *
+ * Like the mm subsystem each format contributes its parsing implementation
+ * under common names and the common code implements the required algorithms.
+ *
+ * The system is divided into three logical levels:
+ *
+ * - The page table format and its manipulation functions
+ * - Generic helpers to give a consistent API regardless of underlying format
+ * - An algorithm implementation (e.g. IOMMU/DRM/KVM/MM)
+ *
+ * Multiple implementations are supported. The intention is to have the generic
+ * format code be re-usable for whatever specialized implementation is required.
+ * The generic code is solely about the format of the radix tree; it does not
+ * include memory allocation or higher level decisions that are left for the
+ * implementation.
+ *
+ * The generic framework supports a superset of functions across many HW
+ * implementations:
+ *
+ * - Entries comprised of contiguous blocks of IO PTEs for larger page sizes
+ * - Multi-level tables, up to 6 levels. Runtime selected top level
+ * - Runtime variable table level size (ARM's concatenated tables)
+ * - Expandable top level allowing dynamic sizing of table levels
+ * - Optional leaf entries at any level
+ * - 32-bit/64-bit virtual and output addresses, using every address bit
+ * - Dirty tracking
+ * - Sign extended addressing
+ */
+
+/**
+ * struct pt_common - struct for all page table implementations
+ */
+struct pt_common {
+ /**
+ * @top_of_table: Encodes the table top pointer and the top level in a
+ * single value. Must use READ_ONCE/WRITE_ONCE to access it. The lower
+ * bits of the aligned table pointer are used for the level.
+ */
+ uintptr_t top_of_table;
+ /**
+ * @max_oasz_lg2: Maximum number of bits the OA can contain. Upper bits
+ * must be zero. This may be less than what the page table format
+ * supports, but must not be more.
+ */
+ u8 max_oasz_lg2;
+ /**
+ * @max_vasz_lg2: Maximum number of bits the VA can contain. Upper bits
+ * are 0 or 1 depending on pt_full_va_prefix(). This may be less than
+ * what the page table format supports, but must not be more. When
+ * PT_FEAT_DYNAMIC_TOP is set this reflects the maximum VA capability.
+ */
+ u8 max_vasz_lg2;
+ /**
+ * @features: Bitmap of `enum pt_features`
+ */
+ unsigned int features;
+};
+
+/* Encoding parameters for top_of_table */
+enum {
+ PT_TOP_LEVEL_BITS = 3,
+ PT_TOP_LEVEL_MASK = GENMASK(PT_TOP_LEVEL_BITS - 1, 0),
+};
+
+/**
+ * enum pt_features - Features turned on in the table. Each symbol is a bit
+ * position.
+ */
+enum pt_features {
+ /**
+ * @PT_FEAT_DMA_INCOHERENT: Cache flush page table memory before
+ * assuming the HW can read it. Otherwise a SMP release is sufficient
+ * for HW to read it.
+ */
+ PT_FEAT_DMA_INCOHERENT,
+ /**
+ * @PT_FEAT_FULL_VA: The table can span the full VA range from 0 to
+ * PT_VADDR_MAX.
+ */
+ PT_FEAT_FULL_VA,
+ /**
+ * @PT_FEAT_DYNAMIC_TOP: The table's top level can be increased
+ * dynamically during map. This requires HW support for atomically
+ * setting both the table top pointer and the starting table level.
+ */
+ PT_FEAT_DYNAMIC_TOP,
+ /**
+ * @PT_FEAT_SIGN_EXTEND: The top most bit of the valid VA range sign
+ * extends up to the full pt_vaddr_t. This divides the page table into
+ * three VA ranges::
+ *
+ * 0 -> 2^N - 1 Lower
+ * 2^N -> (MAX - 2^N - 1) Non-Canonical
+ * MAX - 2^N -> MAX Upper
+ *
+ * In this mode pt_common::max_vasz_lg2 includes the sign bit and the
+ * upper bits that don't fall within the translation are just validated.
+ *
+ * If not set there is no sign extension and valid VA goes from 0 to 2^N
+ * - 1.
+ */
+ PT_FEAT_SIGN_EXTEND,
+ /**
+ * @PT_FEAT_FLUSH_RANGE: IOTLB maintenance is done by flushing IOVA
+ * ranges which will clean out any walk cache or any IOPTE fully
+ * contained by the range. The optimization objective is to minimize the
+ * number of flushes even if ranges include IOVA gaps that do not need
+ * to be flushed.
+ */
+ PT_FEAT_FLUSH_RANGE,
+ /**
+ * @PT_FEAT_FLUSH_RANGE_NO_GAPS: Like PT_FEAT_FLUSH_RANGE except that
+ * the optimization objective is to only flush IOVA that has been
+ * changed. This mode is suitable for cases like hypervisor shadowing
+ * where flushing unchanged ranges may cause the hypervisor to reparse
+ * significant amount of page table.
+ */
+ PT_FEAT_FLUSH_RANGE_NO_GAPS,
+ /* private: */
+ PT_FEAT_FMT_START,
+};
+
+struct pt_amdv1 {
+ struct pt_common common;
+};
+
+enum {
+ /*
+ * The memory backing the tables is encrypted. Use __sme_set() to adjust
+ * the page table pointers in the tree. This only works with
+ * CONFIG_AMD_MEM_ENCRYPT.
+ */
+ PT_FEAT_AMDV1_ENCRYPT_TABLES = PT_FEAT_FMT_START,
+ /*
+ * The PTEs are set to prevent cache incoherent traffic, such as PCI no
+ * snoop. This is set either at creation time or before the first map
+ * operation.
+ */
+ PT_FEAT_AMDV1_FORCE_COHERENCE,
+};
+
+struct pt_vtdss {
+ struct pt_common common;
+};
+
+enum {
+ /*
+ * The PTEs are set to prevent cache incoherent traffic, such as PCI no
+ * snoop. This is set either at creation time or before the first map
+ * operation.
+ */
+ PT_FEAT_VTDSS_FORCE_COHERENCE = PT_FEAT_FMT_START,
+ /*
+ * Prevent creating read-only PTEs. Used to work around HW errata
+ * ERRATA_772415_SPR17.
+ */
+ PT_FEAT_VTDSS_FORCE_WRITEABLE,
+};
+
+struct pt_x86_64 {
+ struct pt_common common;
+};
+
+enum {
+ /*
+ * The memory backing the tables is encrypted. Use __sme_set() to adjust
+ * the page table pointers in the tree. This only works with
+ * CONFIG_AMD_MEM_ENCRYPT.
+ */
+ PT_FEAT_X86_64_AMD_ENCRYPT_TABLES = PT_FEAT_FMT_START,
+};
+
+#endif
diff --git a/include/linux/generic_pt/iommu.h b/include/linux/generic_pt/iommu.h
new file mode 100644
index 000000000000..9eefbb74efd0
--- /dev/null
+++ b/include/linux/generic_pt/iommu.h
@@ -0,0 +1,293 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2024-2025, NVIDIA CORPORATION & AFFILIATES
+ */
+#ifndef __GENERIC_PT_IOMMU_H
+#define __GENERIC_PT_IOMMU_H
+
+#include <linux/generic_pt/common.h>
+#include <linux/iommu.h>
+#include <linux/mm_types.h>
+
+struct iommu_iotlb_gather;
+struct pt_iommu_ops;
+struct pt_iommu_driver_ops;
+struct iommu_dirty_bitmap;
+
+/**
+ * DOC: IOMMU Radix Page Table
+ *
+ * The IOMMU implementation of the Generic Page Table provides an ops struct
+ * that is useful to go with an iommu_domain to serve the DMA API, IOMMUFD and
+ * the generic map/unmap interface.
+ *
+ * This interface uses a caller provided locking approach. The caller must have
+ * a VA range lock concept that prevents concurrent threads from calling ops on
+ * the same VA. Generally the range lock must be at least as large as a single
+ * map call.
+ */
+
+/**
+ * struct pt_iommu - Base structure for IOMMU page tables
+ *
+ * The format-specific struct will include this as the first member.
+ */
+struct pt_iommu {
+ /**
+ * @domain: The core IOMMU domain. The driver should use a union to
+ * overlay this memory with its previously existing domain struct to
+ * create an alias.
+ */
+ struct iommu_domain domain;
+
+ /**
+ * @ops: Function pointers to access the API
+ */
+ const struct pt_iommu_ops *ops;
+
+ /**
+ * @driver_ops: Function pointers provided by the HW driver to help
+ * manage HW details like caches.
+ */
+ const struct pt_iommu_driver_ops *driver_ops;
+
+ /**
+ * @nid: Node ID to use for table memory allocations. The IOMMU driver
+ * may want to set the NID to the device's NID, if there are multiple
+ * table walkers.
+ */
+ int nid;
+
+ /**
+ * @iommu_device: Device pointer used for any DMA cache flushing when
+ * PT_FEAT_DMA_INCOHERENT. This is the iommu device that created the
+ * page table which must have dma ops that perform cache flushing.
+ */
+ struct device *iommu_device;
+};
+
+/**
+ * struct pt_iommu_info - Details about the IOMMU page table
+ *
+ * Returned from pt_iommu_ops->get_info()
+ */
+struct pt_iommu_info {
+ /**
+ * @pgsize_bitmap: A bitmask where each set bit indicates
+ * a page size that can be natively stored in the page table.
+ */
+ u64 pgsize_bitmap;
+};
+
+struct pt_iommu_ops {
+ /**
+ * @set_dirty: Make the iova write dirty
+ * @iommu_table: Table to manipulate
+ * @iova: IO virtual address to start
+ *
+ * This is only used by iommufd testing. It makes the iova dirty so that
+ * read_and_clear_dirty() will see it as dirty. Unlike all the other ops
+ * this one is safe to call without holding any locking. It may return
+ * -EAGAIN if there is a race.
+ */
+ int (*set_dirty)(struct pt_iommu *iommu_table, dma_addr_t iova);
+
+ /**
+ * @get_info: Return the pt_iommu_info structure
+ * @iommu_table: Table to query
+ *
+ * Return some basic static information about the page table.
+ */
+ void (*get_info)(struct pt_iommu *iommu_table,
+ struct pt_iommu_info *info);
+
+ /**
+ * @deinit: Undo a format specific init operation
+ * @iommu_table: Table to destroy
+ *
+ * Release all of the memory. The caller must have already removed the
+ * table from all HW access and all caches.
+ */
+ void (*deinit)(struct pt_iommu *iommu_table);
+};
+
+/**
+ * struct pt_iommu_driver_ops - HW IOTLB cache flushing operations
+ *
+ * The IOMMU driver should implement these using container_of(iommu_table) to
+ * get to it's iommu_domain derived structure. All ops can be called in atomic
+ * contexts as they are buried under DMA API calls.
+ */
+struct pt_iommu_driver_ops {
+ /**
+ * @change_top: Update the top of table pointer
+ * @iommu_table: Table to operate on
+ * @top_paddr: New CPU physical address of the top pointer
+ * @top_level: IOMMU PT level of the new top
+ *
+ * Called under the get_top_lock() spinlock. The driver must update all
+ * HW references to this domain with a new top address and
+ * configuration. On return mappings placed in the new top must be
+ * reachable by the HW.
+ *
+ * top_level encodes the level in IOMMU PT format, level 0 is the
+ * smallest page size increasing from there. This has to be translated
+ * to any HW specific format. During this call the new top will not be
+ * visible to any other API.
+ *
+ * This op is only used by PT_FEAT_DYNAMIC_TOP, and is required if
+ * enabled.
+ */
+ void (*change_top)(struct pt_iommu *iommu_table, phys_addr_t top_paddr,
+ unsigned int top_level);
+
+ /**
+ * @get_top_lock: lock to hold when changing the table top
+ * @iommu_table: Table to operate on
+ *
+ * Return a lock to hold when changing the table top page table from
+ * being stored in HW. The lock will be held prior to calling
+ * change_top() and released once the top is fully visible.
+ *
+ * Typically this would be a lock that protects the iommu_domain's
+ * attachment list.
+ *
+ * This op is only used by PT_FEAT_DYNAMIC_TOP, and is required if
+ * enabled.
+ */
+ spinlock_t *(*get_top_lock)(struct pt_iommu *iommu_table);
+};
+
+static inline void pt_iommu_deinit(struct pt_iommu *iommu_table)
+{
+ /*
+ * It is safe to call pt_iommu_deinit() before an init, or if init
+ * fails. The ops pointer will only become non-NULL if deinit needs to be
+ * run.
+ */
+ if (iommu_table->ops)
+ iommu_table->ops->deinit(iommu_table);
+}
+
+/**
+ * struct pt_iommu_cfg - Common configuration values for all formats
+ */
+struct pt_iommu_cfg {
+ /**
+ * @features: Features required. Only these features will be turned on.
+ * The feature list should reflect what the IOMMU HW is capable of.
+ */
+ unsigned int features;
+ /**
+ * @hw_max_vasz_lg2: Maximum VA the IOMMU HW can support. This will
+ * imply the top level of the table.
+ */
+ u8 hw_max_vasz_lg2;
+ /**
+ * @hw_max_oasz_lg2: Maximum OA the IOMMU HW can support. The format
+ * might select a lower maximum OA.
+ */
+ u8 hw_max_oasz_lg2;
+};
+
+/* Generate the exported function signatures from iommu_pt.h */
+#define IOMMU_PROTOTYPES(fmt) \
+ phys_addr_t pt_iommu_##fmt##_iova_to_phys(struct iommu_domain *domain, \
+ dma_addr_t iova); \
+ int pt_iommu_##fmt##_map_pages(struct iommu_domain *domain, \
+ unsigned long iova, phys_addr_t paddr, \
+ size_t pgsize, size_t pgcount, \
+ int prot, gfp_t gfp, size_t *mapped); \
+ size_t pt_iommu_##fmt##_unmap_pages( \
+ struct iommu_domain *domain, unsigned long iova, \
+ size_t pgsize, size_t pgcount, \
+ struct iommu_iotlb_gather *iotlb_gather); \
+ int pt_iommu_##fmt##_read_and_clear_dirty( \
+ struct iommu_domain *domain, unsigned long iova, size_t size, \
+ unsigned long flags, struct iommu_dirty_bitmap *dirty); \
+ int pt_iommu_##fmt##_init(struct pt_iommu_##fmt *table, \
+ const struct pt_iommu_##fmt##_cfg *cfg, \
+ gfp_t gfp); \
+ void pt_iommu_##fmt##_hw_info(struct pt_iommu_##fmt *table, \
+ struct pt_iommu_##fmt##_hw_info *info)
+#define IOMMU_FORMAT(fmt, member) \
+ struct pt_iommu_##fmt { \
+ struct pt_iommu iommu; \
+ struct pt_##fmt member; \
+ }; \
+ IOMMU_PROTOTYPES(fmt)
+
+/*
+ * A driver uses IOMMU_PT_DOMAIN_OPS to populate the iommu_domain_ops for the
+ * iommu_pt
+ */
+#define IOMMU_PT_DOMAIN_OPS(fmt) \
+ .iova_to_phys = &pt_iommu_##fmt##_iova_to_phys, \
+ .map_pages = &pt_iommu_##fmt##_map_pages, \
+ .unmap_pages = &pt_iommu_##fmt##_unmap_pages
+#define IOMMU_PT_DIRTY_OPS(fmt) \
+ .read_and_clear_dirty = &pt_iommu_##fmt##_read_and_clear_dirty
+
+/*
+ * The driver should setup its domain struct like
+ * union {
+ * struct iommu_domain domain;
+ * struct pt_iommu_xxx xx;
+ * };
+ * PT_IOMMU_CHECK_DOMAIN(struct mock_iommu_domain, xx.iommu, domain);
+ *
+ * Which creates an alias between driver_domain.domain and
+ * driver_domain.xx.iommu.domain. This is to avoid a mass rename of existing
+ * driver_domain.domain users.
+ */
+#define PT_IOMMU_CHECK_DOMAIN(s, pt_iommu_memb, domain_memb) \
+ static_assert(offsetof(s, pt_iommu_memb.domain) == \
+ offsetof(s, domain_memb))
+
+struct pt_iommu_amdv1_cfg {
+ struct pt_iommu_cfg common;
+ unsigned int starting_level;
+};
+
+struct pt_iommu_amdv1_hw_info {
+ u64 host_pt_root;
+ u8 mode;
+};
+
+IOMMU_FORMAT(amdv1, amdpt);
+
+/* amdv1_mock is used by the iommufd selftest */
+#define pt_iommu_amdv1_mock pt_iommu_amdv1
+#define pt_iommu_amdv1_mock_cfg pt_iommu_amdv1_cfg
+struct pt_iommu_amdv1_mock_hw_info;
+IOMMU_PROTOTYPES(amdv1_mock);
+
+struct pt_iommu_vtdss_cfg {
+ struct pt_iommu_cfg common;
+ /* 4 is a 57 bit 5 level table */
+ unsigned int top_level;
+};
+
+struct pt_iommu_vtdss_hw_info {
+ u64 ssptptr;
+ u8 aw;
+};
+
+IOMMU_FORMAT(vtdss, vtdss_pt);
+
+struct pt_iommu_x86_64_cfg {
+ struct pt_iommu_cfg common;
+ /* 4 is a 57 bit 5 level table */
+ unsigned int top_level;
+};
+
+struct pt_iommu_x86_64_hw_info {
+ u64 gcr3_pt;
+ u8 levels;
+};
+
+IOMMU_FORMAT(x86_64, x86_64_pt);
+
+#undef IOMMU_PROTOTYPES
+#undef IOMMU_FORMAT
+#endif
diff --git a/include/linux/genetlink.h b/include/linux/genetlink.h
deleted file mode 100644
index a4c61cbce777..000000000000
--- a/include/linux/genetlink.h
+++ /dev/null
@@ -1,42 +0,0 @@
-#ifndef __LINUX_GENERIC_NETLINK_H
-#define __LINUX_GENERIC_NETLINK_H
-
-#include <uapi/linux/genetlink.h>
-
-
-/* All generic netlink requests are serialized by a global lock. */
-extern void genl_lock(void);
-extern void genl_unlock(void);
-#ifdef CONFIG_LOCKDEP
-extern bool lockdep_genl_is_held(void);
-#endif
-
-/* for synchronisation between af_netlink and genetlink */
-extern atomic_t genl_sk_destructing_cnt;
-extern wait_queue_head_t genl_sk_destructing_waitq;
-
-/**
- * rcu_dereference_genl - rcu_dereference with debug checking
- * @p: The pointer to read, prior to dereferencing
- *
- * Do an rcu_dereference(p), but check caller either holds rcu_read_lock()
- * or genl mutex. Note : Please prefer genl_dereference() or rcu_dereference()
- */
-#define rcu_dereference_genl(p) \
- rcu_dereference_check(p, lockdep_genl_is_held())
-
-/**
- * genl_dereference - fetch RCU pointer when updates are prevented by genl mutex
- * @p: The pointer to read, prior to dereferencing
- *
- * Return the value of the specified RCU-protected pointer, but omit
- * both the smp_read_barrier_depends() and the ACCESS_ONCE(), because
- * caller holds genl mutex.
- */
-#define genl_dereference(p) \
- rcu_dereference_protected(p, lockdep_genl_is_held())
-
-#define MODULE_ALIAS_GENL_FAMILY(family)\
- MODULE_ALIAS_NET_PF_PROTO_NAME(PF_NETLINK, NETLINK_GENERIC, "-family-" family)
-
-#endif /* __LINUX_GENERIC_NETLINK_H */
diff --git a/include/linux/genhd.h b/include/linux/genhd.h
deleted file mode 100644
index e619fae2f037..000000000000
--- a/include/linux/genhd.h
+++ /dev/null
@@ -1,735 +0,0 @@
-#ifndef _LINUX_GENHD_H
-#define _LINUX_GENHD_H
-
-/*
- * genhd.h Copyright (C) 1992 Drew Eckhardt
- * Generic hard disk header file by
- * Drew Eckhardt
- *
- * <drew@colorado.edu>
- */
-
-#include <linux/types.h>
-#include <linux/kdev_t.h>
-#include <linux/rcupdate.h>
-#include <linux/slab.h>
-#include <linux/percpu-refcount.h>
-#include <linux/uuid.h>
-
-#ifdef CONFIG_BLOCK
-
-#define dev_to_disk(device) container_of((device), struct gendisk, part0.__dev)
-#define dev_to_part(device) container_of((device), struct hd_struct, __dev)
-#define disk_to_dev(disk) (&(disk)->part0.__dev)
-#define part_to_dev(part) (&((part)->__dev))
-
-extern struct device_type part_type;
-extern struct kobject *block_depr;
-extern struct class block_class;
-
-enum {
-/* These three have identical behaviour; use the second one if DOS FDISK gets
- confused about extended/logical partitions starting past cylinder 1023. */
- DOS_EXTENDED_PARTITION = 5,
- LINUX_EXTENDED_PARTITION = 0x85,
- WIN98_EXTENDED_PARTITION = 0x0f,
-
- SUN_WHOLE_DISK = DOS_EXTENDED_PARTITION,
-
- LINUX_SWAP_PARTITION = 0x82,
- LINUX_DATA_PARTITION = 0x83,
- LINUX_LVM_PARTITION = 0x8e,
- LINUX_RAID_PARTITION = 0xfd, /* autodetect RAID partition */
-
- SOLARIS_X86_PARTITION = LINUX_SWAP_PARTITION,
- NEW_SOLARIS_X86_PARTITION = 0xbf,
-
- DM6_AUX1PARTITION = 0x51, /* no DDO: use xlated geom */
- DM6_AUX3PARTITION = 0x53, /* no DDO: use xlated geom */
- DM6_PARTITION = 0x54, /* has DDO: use xlated geom & offset */
- EZD_PARTITION = 0x55, /* EZ-DRIVE */
-
- FREEBSD_PARTITION = 0xa5, /* FreeBSD Partition ID */
- OPENBSD_PARTITION = 0xa6, /* OpenBSD Partition ID */
- NETBSD_PARTITION = 0xa9, /* NetBSD Partition ID */
- BSDI_PARTITION = 0xb7, /* BSDI Partition ID */
- MINIX_PARTITION = 0x81, /* Minix Partition ID */
- UNIXWARE_PARTITION = 0x63, /* Same as GNU_HURD and SCO Unix */
-};
-
-#define DISK_MAX_PARTS 256
-#define DISK_NAME_LEN 32
-
-#include <linux/major.h>
-#include <linux/device.h>
-#include <linux/smp.h>
-#include <linux/string.h>
-#include <linux/fs.h>
-#include <linux/workqueue.h>
-
-struct partition {
- unsigned char boot_ind; /* 0x80 - active */
- unsigned char head; /* starting head */
- unsigned char sector; /* starting sector */
- unsigned char cyl; /* starting cylinder */
- unsigned char sys_ind; /* What partition type */
- unsigned char end_head; /* end head */
- unsigned char end_sector; /* end sector */
- unsigned char end_cyl; /* end cylinder */
- __le32 start_sect; /* starting sector counting from 0 */
- __le32 nr_sects; /* nr of sectors in partition */
-} __attribute__((packed));
-
-struct disk_stats {
- unsigned long sectors[2]; /* READs and WRITEs */
- unsigned long ios[2];
- unsigned long merges[2];
- unsigned long ticks[2];
- unsigned long io_ticks;
- unsigned long time_in_queue;
-};
-
-#define PARTITION_META_INFO_VOLNAMELTH 64
-/*
- * Enough for the string representation of any kind of UUID plus NULL.
- * EFI UUID is 36 characters. MSDOS UUID is 11 characters.
- */
-#define PARTITION_META_INFO_UUIDLTH (UUID_STRING_LEN + 1)
-
-struct partition_meta_info {
- char uuid[PARTITION_META_INFO_UUIDLTH];
- u8 volname[PARTITION_META_INFO_VOLNAMELTH];
-};
-
-struct hd_struct {
- sector_t start_sect;
- /*
- * nr_sects is protected by sequence counter. One might extend a
- * partition while IO is happening to it and update of nr_sects
- * can be non-atomic on 32bit machines with 64bit sector_t.
- */
- sector_t nr_sects;
- seqcount_t nr_sects_seq;
- sector_t alignment_offset;
- unsigned int discard_alignment;
- struct device __dev;
- struct kobject *holder_dir;
- int policy, partno;
- struct partition_meta_info *info;
-#ifdef CONFIG_FAIL_MAKE_REQUEST
- int make_it_fail;
-#endif
- unsigned long stamp;
- atomic_t in_flight[2];
-#ifdef CONFIG_SMP
- struct disk_stats __percpu *dkstats;
-#else
- struct disk_stats dkstats;
-#endif
- struct percpu_ref ref;
- struct rcu_head rcu_head;
-};
-
-#define GENHD_FL_REMOVABLE 1
-/* 2 is unused */
-#define GENHD_FL_MEDIA_CHANGE_NOTIFY 4
-#define GENHD_FL_CD 8
-#define GENHD_FL_UP 16
-#define GENHD_FL_SUPPRESS_PARTITION_INFO 32
-#define GENHD_FL_EXT_DEVT 64 /* allow extended devt */
-#define GENHD_FL_NATIVE_CAPACITY 128
-#define GENHD_FL_BLOCK_EVENTS_ON_EXCL_WRITE 256
-#define GENHD_FL_NO_PART_SCAN 512
-
-enum {
- DISK_EVENT_MEDIA_CHANGE = 1 << 0, /* media changed */
- DISK_EVENT_EJECT_REQUEST = 1 << 1, /* eject requested */
-};
-
-struct disk_part_tbl {
- struct rcu_head rcu_head;
- int len;
- struct hd_struct __rcu *last_lookup;
- struct hd_struct __rcu *part[];
-};
-
-struct disk_events;
-struct badblocks;
-
-#if defined(CONFIG_BLK_DEV_INTEGRITY)
-
-struct blk_integrity {
- const struct blk_integrity_profile *profile;
- unsigned char flags;
- unsigned char tuple_size;
- unsigned char interval_exp;
- unsigned char tag_size;
-};
-
-#endif /* CONFIG_BLK_DEV_INTEGRITY */
-
-struct gendisk {
- /* major, first_minor and minors are input parameters only,
- * don't use directly. Use disk_devt() and disk_max_parts().
- */
- int major; /* major number of driver */
- int first_minor;
- int minors; /* maximum number of minors, =1 for
- * disks that can't be partitioned. */
-
- char disk_name[DISK_NAME_LEN]; /* name of major driver */
- char *(*devnode)(struct gendisk *gd, umode_t *mode);
-
- unsigned int events; /* supported events */
- unsigned int async_events; /* async events, subset of all */
-
- /* Array of pointers to partitions indexed by partno.
- * Protected with matching bdev lock but stat and other
- * non-critical accesses use RCU. Always access through
- * helpers.
- */
- struct disk_part_tbl __rcu *part_tbl;
- struct hd_struct part0;
-
- const struct block_device_operations *fops;
- struct request_queue *queue;
- void *private_data;
-
- int flags;
- struct kobject *slave_dir;
-
- struct timer_rand_state *random;
- atomic_t sync_io; /* RAID */
- struct disk_events *ev;
-#ifdef CONFIG_BLK_DEV_INTEGRITY
- struct kobject integrity_kobj;
-#endif /* CONFIG_BLK_DEV_INTEGRITY */
- int node_id;
- struct badblocks *bb;
-};
-
-static inline struct gendisk *part_to_disk(struct hd_struct *part)
-{
- if (likely(part)) {
- if (part->partno)
- return dev_to_disk(part_to_dev(part)->parent);
- else
- return dev_to_disk(part_to_dev(part));
- }
- return NULL;
-}
-
-static inline int disk_max_parts(struct gendisk *disk)
-{
- if (disk->flags & GENHD_FL_EXT_DEVT)
- return DISK_MAX_PARTS;
- return disk->minors;
-}
-
-static inline bool disk_part_scan_enabled(struct gendisk *disk)
-{
- return disk_max_parts(disk) > 1 &&
- !(disk->flags & GENHD_FL_NO_PART_SCAN);
-}
-
-static inline dev_t disk_devt(struct gendisk *disk)
-{
- return disk_to_dev(disk)->devt;
-}
-
-static inline dev_t part_devt(struct hd_struct *part)
-{
- return part_to_dev(part)->devt;
-}
-
-extern struct hd_struct *disk_get_part(struct gendisk *disk, int partno);
-
-static inline void disk_put_part(struct hd_struct *part)
-{
- if (likely(part))
- put_device(part_to_dev(part));
-}
-
-/*
- * Smarter partition iterator without context limits.
- */
-#define DISK_PITER_REVERSE (1 << 0) /* iterate in the reverse direction */
-#define DISK_PITER_INCL_EMPTY (1 << 1) /* include 0-sized parts */
-#define DISK_PITER_INCL_PART0 (1 << 2) /* include partition 0 */
-#define DISK_PITER_INCL_EMPTY_PART0 (1 << 3) /* include empty partition 0 */
-
-struct disk_part_iter {
- struct gendisk *disk;
- struct hd_struct *part;
- int idx;
- unsigned int flags;
-};
-
-extern void disk_part_iter_init(struct disk_part_iter *piter,
- struct gendisk *disk, unsigned int flags);
-extern struct hd_struct *disk_part_iter_next(struct disk_part_iter *piter);
-extern void disk_part_iter_exit(struct disk_part_iter *piter);
-
-extern struct hd_struct *disk_map_sector_rcu(struct gendisk *disk,
- sector_t sector);
-
-/*
- * Macros to operate on percpu disk statistics:
- *
- * {disk|part|all}_stat_{add|sub|inc|dec}() modify the stat counters
- * and should be called between disk_stat_lock() and
- * disk_stat_unlock().
- *
- * part_stat_read() can be called at any time.
- *
- * part_stat_{add|set_all}() and {init|free}_part_stats are for
- * internal use only.
- */
-#ifdef CONFIG_SMP
-#define part_stat_lock() ({ rcu_read_lock(); get_cpu(); })
-#define part_stat_unlock() do { put_cpu(); rcu_read_unlock(); } while (0)
-
-#define __part_stat_add(cpu, part, field, addnd) \
- (per_cpu_ptr((part)->dkstats, (cpu))->field += (addnd))
-
-#define part_stat_read(part, field) \
-({ \
- typeof((part)->dkstats->field) res = 0; \
- unsigned int _cpu; \
- for_each_possible_cpu(_cpu) \
- res += per_cpu_ptr((part)->dkstats, _cpu)->field; \
- res; \
-})
-
-static inline void part_stat_set_all(struct hd_struct *part, int value)
-{
- int i;
-
- for_each_possible_cpu(i)
- memset(per_cpu_ptr(part->dkstats, i), value,
- sizeof(struct disk_stats));
-}
-
-static inline int init_part_stats(struct hd_struct *part)
-{
- part->dkstats = alloc_percpu(struct disk_stats);
- if (!part->dkstats)
- return 0;
- return 1;
-}
-
-static inline void free_part_stats(struct hd_struct *part)
-{
- free_percpu(part->dkstats);
-}
-
-#else /* !CONFIG_SMP */
-#define part_stat_lock() ({ rcu_read_lock(); 0; })
-#define part_stat_unlock() rcu_read_unlock()
-
-#define __part_stat_add(cpu, part, field, addnd) \
- ((part)->dkstats.field += addnd)
-
-#define part_stat_read(part, field) ((part)->dkstats.field)
-
-static inline void part_stat_set_all(struct hd_struct *part, int value)
-{
- memset(&part->dkstats, value, sizeof(struct disk_stats));
-}
-
-static inline int init_part_stats(struct hd_struct *part)
-{
- return 1;
-}
-
-static inline void free_part_stats(struct hd_struct *part)
-{
-}
-
-#endif /* CONFIG_SMP */
-
-#define part_stat_add(cpu, part, field, addnd) do { \
- __part_stat_add((cpu), (part), field, addnd); \
- if ((part)->partno) \
- __part_stat_add((cpu), &part_to_disk((part))->part0, \
- field, addnd); \
-} while (0)
-
-#define part_stat_dec(cpu, gendiskp, field) \
- part_stat_add(cpu, gendiskp, field, -1)
-#define part_stat_inc(cpu, gendiskp, field) \
- part_stat_add(cpu, gendiskp, field, 1)
-#define part_stat_sub(cpu, gendiskp, field, subnd) \
- part_stat_add(cpu, gendiskp, field, -subnd)
-
-static inline void part_inc_in_flight(struct hd_struct *part, int rw)
-{
- atomic_inc(&part->in_flight[rw]);
- if (part->partno)
- atomic_inc(&part_to_disk(part)->part0.in_flight[rw]);
-}
-
-static inline void part_dec_in_flight(struct hd_struct *part, int rw)
-{
- atomic_dec(&part->in_flight[rw]);
- if (part->partno)
- atomic_dec(&part_to_disk(part)->part0.in_flight[rw]);
-}
-
-static inline int part_in_flight(struct hd_struct *part)
-{
- return atomic_read(&part->in_flight[0]) + atomic_read(&part->in_flight[1]);
-}
-
-static inline struct partition_meta_info *alloc_part_info(struct gendisk *disk)
-{
- if (disk)
- return kzalloc_node(sizeof(struct partition_meta_info),
- GFP_KERNEL, disk->node_id);
- return kzalloc(sizeof(struct partition_meta_info), GFP_KERNEL);
-}
-
-static inline void free_part_info(struct hd_struct *part)
-{
- kfree(part->info);
-}
-
-/* block/blk-core.c */
-extern void part_round_stats(int cpu, struct hd_struct *part);
-
-/* block/genhd.c */
-extern void device_add_disk(struct device *parent, struct gendisk *disk);
-static inline void add_disk(struct gendisk *disk)
-{
- device_add_disk(NULL, disk);
-}
-
-extern void del_gendisk(struct gendisk *gp);
-extern struct gendisk *get_gendisk(dev_t dev, int *partno);
-extern struct block_device *bdget_disk(struct gendisk *disk, int partno);
-
-extern void set_device_ro(struct block_device *bdev, int flag);
-extern void set_disk_ro(struct gendisk *disk, int flag);
-
-static inline int get_disk_ro(struct gendisk *disk)
-{
- return disk->part0.policy;
-}
-
-extern void disk_block_events(struct gendisk *disk);
-extern void disk_unblock_events(struct gendisk *disk);
-extern void disk_flush_events(struct gendisk *disk, unsigned int mask);
-extern unsigned int disk_clear_events(struct gendisk *disk, unsigned int mask);
-
-/* drivers/char/random.c */
-extern void add_disk_randomness(struct gendisk *disk) __latent_entropy;
-extern void rand_initialize_disk(struct gendisk *disk);
-
-static inline sector_t get_start_sect(struct block_device *bdev)
-{
- return bdev->bd_part->start_sect;
-}
-static inline sector_t get_capacity(struct gendisk *disk)
-{
- return disk->part0.nr_sects;
-}
-static inline void set_capacity(struct gendisk *disk, sector_t size)
-{
- disk->part0.nr_sects = size;
-}
-
-#ifdef CONFIG_SOLARIS_X86_PARTITION
-
-#define SOLARIS_X86_NUMSLICE 16
-#define SOLARIS_X86_VTOC_SANE (0x600DDEEEUL)
-
-struct solaris_x86_slice {
- __le16 s_tag; /* ID tag of partition */
- __le16 s_flag; /* permission flags */
- __le32 s_start; /* start sector no of partition */
- __le32 s_size; /* # of blocks in partition */
-};
-
-struct solaris_x86_vtoc {
- unsigned int v_bootinfo[3]; /* info needed by mboot (unsupported) */
- __le32 v_sanity; /* to verify vtoc sanity */
- __le32 v_version; /* layout version */
- char v_volume[8]; /* volume name */
- __le16 v_sectorsz; /* sector size in bytes */
- __le16 v_nparts; /* number of partitions */
- unsigned int v_reserved[10]; /* free space */
- struct solaris_x86_slice
- v_slice[SOLARIS_X86_NUMSLICE]; /* slice headers */
- unsigned int timestamp[SOLARIS_X86_NUMSLICE]; /* timestamp (unsupported) */
- char v_asciilabel[128]; /* for compatibility */
-};
-
-#endif /* CONFIG_SOLARIS_X86_PARTITION */
-
-#ifdef CONFIG_BSD_DISKLABEL
-/*
- * BSD disklabel support by Yossi Gottlieb <yogo@math.tau.ac.il>
- * updated by Marc Espie <Marc.Espie@openbsd.org>
- */
-
-/* check against BSD src/sys/sys/disklabel.h for consistency */
-
-#define BSD_DISKMAGIC (0x82564557UL) /* The disk magic number */
-#define BSD_MAXPARTITIONS 16
-#define OPENBSD_MAXPARTITIONS 16
-#define BSD_FS_UNUSED 0 /* disklabel unused partition entry ID */
-struct bsd_disklabel {
- __le32 d_magic; /* the magic number */
- __s16 d_type; /* drive type */
- __s16 d_subtype; /* controller/d_type specific */
- char d_typename[16]; /* type name, e.g. "eagle" */
- char d_packname[16]; /* pack identifier */
- __u32 d_secsize; /* # of bytes per sector */
- __u32 d_nsectors; /* # of data sectors per track */
- __u32 d_ntracks; /* # of tracks per cylinder */
- __u32 d_ncylinders; /* # of data cylinders per unit */
- __u32 d_secpercyl; /* # of data sectors per cylinder */
- __u32 d_secperunit; /* # of data sectors per unit */
- __u16 d_sparespertrack; /* # of spare sectors per track */
- __u16 d_sparespercyl; /* # of spare sectors per cylinder */
- __u32 d_acylinders; /* # of alt. cylinders per unit */
- __u16 d_rpm; /* rotational speed */
- __u16 d_interleave; /* hardware sector interleave */
- __u16 d_trackskew; /* sector 0 skew, per track */
- __u16 d_cylskew; /* sector 0 skew, per cylinder */
- __u32 d_headswitch; /* head switch time, usec */
- __u32 d_trkseek; /* track-to-track seek, usec */
- __u32 d_flags; /* generic flags */
-#define NDDATA 5
- __u32 d_drivedata[NDDATA]; /* drive-type specific information */
-#define NSPARE 5
- __u32 d_spare[NSPARE]; /* reserved for future use */
- __le32 d_magic2; /* the magic number (again) */
- __le16 d_checksum; /* xor of data incl. partitions */
-
- /* filesystem and partition information: */
- __le16 d_npartitions; /* number of partitions in following */
- __le32 d_bbsize; /* size of boot area at sn0, bytes */
- __le32 d_sbsize; /* max size of fs superblock, bytes */
- struct bsd_partition { /* the partition table */
- __le32 p_size; /* number of sectors in partition */
- __le32 p_offset; /* starting sector */
- __le32 p_fsize; /* filesystem basic fragment size */
- __u8 p_fstype; /* filesystem type, see below */
- __u8 p_frag; /* filesystem fragments per block */
- __le16 p_cpg; /* filesystem cylinders per group */
- } d_partitions[BSD_MAXPARTITIONS]; /* actually may be more */
-};
-
-#endif /* CONFIG_BSD_DISKLABEL */
-
-#ifdef CONFIG_UNIXWARE_DISKLABEL
-/*
- * Unixware slices support by Andrzej Krzysztofowicz <ankry@mif.pg.gda.pl>
- * and Krzysztof G. Baranowski <kgb@knm.org.pl>
- */
-
-#define UNIXWARE_DISKMAGIC (0xCA5E600DUL) /* The disk magic number */
-#define UNIXWARE_DISKMAGIC2 (0x600DDEEEUL) /* The slice table magic nr */
-#define UNIXWARE_NUMSLICE 16
-#define UNIXWARE_FS_UNUSED 0 /* Unused slice entry ID */
-
-struct unixware_slice {
- __le16 s_label; /* label */
- __le16 s_flags; /* permission flags */
- __le32 start_sect; /* starting sector */
- __le32 nr_sects; /* number of sectors in slice */
-};
-
-struct unixware_disklabel {
- __le32 d_type; /* drive type */
- __le32 d_magic; /* the magic number */
- __le32 d_version; /* version number */
- char d_serial[12]; /* serial number of the device */
- __le32 d_ncylinders; /* # of data cylinders per device */
- __le32 d_ntracks; /* # of tracks per cylinder */
- __le32 d_nsectors; /* # of data sectors per track */
- __le32 d_secsize; /* # of bytes per sector */
- __le32 d_part_start; /* # of first sector of this partition */
- __le32 d_unknown1[12]; /* ? */
- __le32 d_alt_tbl; /* byte offset of alternate table */
- __le32 d_alt_len; /* byte length of alternate table */
- __le32 d_phys_cyl; /* # of physical cylinders per device */
- __le32 d_phys_trk; /* # of physical tracks per cylinder */
- __le32 d_phys_sec; /* # of physical sectors per track */
- __le32 d_phys_bytes; /* # of physical bytes per sector */
- __le32 d_unknown2; /* ? */
- __le32 d_unknown3; /* ? */
- __le32 d_pad[8]; /* pad */
-
- struct unixware_vtoc {
- __le32 v_magic; /* the magic number */
- __le32 v_version; /* version number */
- char v_name[8]; /* volume name */
- __le16 v_nslices; /* # of slices */
- __le16 v_unknown1; /* ? */
- __le32 v_reserved[10]; /* reserved */
- struct unixware_slice
- v_slice[UNIXWARE_NUMSLICE]; /* slice headers */
- } vtoc;
-
-}; /* 408 */
-
-#endif /* CONFIG_UNIXWARE_DISKLABEL */
-
-#ifdef CONFIG_MINIX_SUBPARTITION
-# define MINIX_NR_SUBPARTITIONS 4
-#endif /* CONFIG_MINIX_SUBPARTITION */
-
-#define ADDPART_FLAG_NONE 0
-#define ADDPART_FLAG_RAID 1
-#define ADDPART_FLAG_WHOLEDISK 2
-
-extern int blk_alloc_devt(struct hd_struct *part, dev_t *devt);
-extern void blk_free_devt(dev_t devt);
-extern dev_t blk_lookup_devt(const char *name, int partno);
-extern char *disk_name (struct gendisk *hd, int partno, char *buf);
-
-extern int disk_expand_part_tbl(struct gendisk *disk, int target);
-extern int rescan_partitions(struct gendisk *disk, struct block_device *bdev);
-extern int invalidate_partitions(struct gendisk *disk, struct block_device *bdev);
-extern struct hd_struct * __must_check add_partition(struct gendisk *disk,
- int partno, sector_t start,
- sector_t len, int flags,
- struct partition_meta_info
- *info);
-extern void __delete_partition(struct percpu_ref *);
-extern void delete_partition(struct gendisk *, int);
-extern void printk_all_partitions(void);
-
-extern struct gendisk *alloc_disk_node(int minors, int node_id);
-extern struct gendisk *alloc_disk(int minors);
-extern struct kobject *get_disk(struct gendisk *disk);
-extern void put_disk(struct gendisk *disk);
-extern void blk_register_region(dev_t devt, unsigned long range,
- struct module *module,
- struct kobject *(*probe)(dev_t, int *, void *),
- int (*lock)(dev_t, void *),
- void *data);
-extern void blk_unregister_region(dev_t devt, unsigned long range);
-
-extern ssize_t part_size_show(struct device *dev,
- struct device_attribute *attr, char *buf);
-extern ssize_t part_stat_show(struct device *dev,
- struct device_attribute *attr, char *buf);
-extern ssize_t part_inflight_show(struct device *dev,
- struct device_attribute *attr, char *buf);
-#ifdef CONFIG_FAIL_MAKE_REQUEST
-extern ssize_t part_fail_show(struct device *dev,
- struct device_attribute *attr, char *buf);
-extern ssize_t part_fail_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count);
-#endif /* CONFIG_FAIL_MAKE_REQUEST */
-
-static inline int hd_ref_init(struct hd_struct *part)
-{
- if (percpu_ref_init(&part->ref, __delete_partition, 0,
- GFP_KERNEL))
- return -ENOMEM;
- return 0;
-}
-
-static inline void hd_struct_get(struct hd_struct *part)
-{
- percpu_ref_get(&part->ref);
-}
-
-static inline int hd_struct_try_get(struct hd_struct *part)
-{
- return percpu_ref_tryget_live(&part->ref);
-}
-
-static inline void hd_struct_put(struct hd_struct *part)
-{
- percpu_ref_put(&part->ref);
-}
-
-static inline void hd_struct_kill(struct hd_struct *part)
-{
- percpu_ref_kill(&part->ref);
-}
-
-static inline void hd_free_part(struct hd_struct *part)
-{
- free_part_stats(part);
- free_part_info(part);
- percpu_ref_exit(&part->ref);
-}
-
-/*
- * Any access of part->nr_sects which is not protected by partition
- * bd_mutex or gendisk bdev bd_mutex, should be done using this
- * accessor function.
- *
- * Code written along the lines of i_size_read() and i_size_write().
- * CONFIG_PREEMPT case optimizes the case of UP kernel with preemption
- * on.
- */
-static inline sector_t part_nr_sects_read(struct hd_struct *part)
-{
-#if BITS_PER_LONG==32 && defined(CONFIG_LBDAF) && defined(CONFIG_SMP)
- sector_t nr_sects;
- unsigned seq;
- do {
- seq = read_seqcount_begin(&part->nr_sects_seq);
- nr_sects = part->nr_sects;
- } while (read_seqcount_retry(&part->nr_sects_seq, seq));
- return nr_sects;
-#elif BITS_PER_LONG==32 && defined(CONFIG_LBDAF) && defined(CONFIG_PREEMPT)
- sector_t nr_sects;
-
- preempt_disable();
- nr_sects = part->nr_sects;
- preempt_enable();
- return nr_sects;
-#else
- return part->nr_sects;
-#endif
-}
-
-/*
- * Should be called with mutex lock held (typically bd_mutex) of partition
- * to provide mutual exlusion among writers otherwise seqcount might be
- * left in wrong state leaving the readers spinning infinitely.
- */
-static inline void part_nr_sects_write(struct hd_struct *part, sector_t size)
-{
-#if BITS_PER_LONG==32 && defined(CONFIG_LBDAF) && defined(CONFIG_SMP)
- write_seqcount_begin(&part->nr_sects_seq);
- part->nr_sects = size;
- write_seqcount_end(&part->nr_sects_seq);
-#elif BITS_PER_LONG==32 && defined(CONFIG_LBDAF) && defined(CONFIG_PREEMPT)
- preempt_disable();
- part->nr_sects = size;
- preempt_enable();
-#else
- part->nr_sects = size;
-#endif
-}
-
-#if defined(CONFIG_BLK_DEV_INTEGRITY)
-extern void blk_integrity_add(struct gendisk *);
-extern void blk_integrity_del(struct gendisk *);
-#else /* CONFIG_BLK_DEV_INTEGRITY */
-static inline void blk_integrity_add(struct gendisk *disk) { }
-static inline void blk_integrity_del(struct gendisk *disk) { }
-#endif /* CONFIG_BLK_DEV_INTEGRITY */
-
-#else /* CONFIG_BLOCK */
-
-static inline void printk_all_partitions(void) { }
-
-static inline dev_t blk_lookup_devt(const char *name, int partno)
-{
- dev_t devt = MKDEV(0, 0);
- return devt;
-}
-#endif /* CONFIG_BLOCK */
-
-#endif /* _LINUX_GENHD_H */
diff --git a/include/linux/genl_magic_func.h b/include/linux/genl_magic_func.h
index 377257d8f7e3..d4da060b7532 100644
--- a/include/linux/genl_magic_func.h
+++ b/include/linux/genl_magic_func.h
@@ -1,6 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef GENL_MAGIC_FUNC_H
#define GENL_MAGIC_FUNC_H
+#include <linux/args.h>
+#include <linux/build_bug.h>
#include <linux/genl_magic_struct.h>
/*
@@ -21,7 +24,7 @@
#define GENL_struct(tag_name, tag_number, s_name, s_fields) \
[tag_name] = { .type = NLA_NESTED },
-static struct nla_policy CONCAT_(GENL_MAGIC_FAMILY, _tla_nl_policy)[] = {
+static struct nla_policy CONCATENATE(GENL_MAGIC_FAMILY, _tla_nl_policy)[] = {
#include GENL_MAGIC_INCLUDE_FILE
};
@@ -131,17 +134,6 @@ static void dprint_array(const char *dir, int nla_type,
* use one static buffer for parsing of nested attributes */
static struct nlattr *nested_attr_tb[128];
-#ifndef BUILD_BUG_ON
-/* Force a compilation error if condition is true */
-#define BUILD_BUG_ON(condition) ((void)BUILD_BUG_ON_ZERO(condition))
-/* Force a compilation error if condition is true, but also produce a
- result (of value 0 and type size_t), so the expression can be used
- e.g. in a structure initializer (or where-ever else comma expressions
- aren't permitted). */
-#define BUILD_BUG_ON_ZERO(e) (sizeof(struct { int:-!!(e); }))
-#define BUILD_BUG_ON_NULL(e) ((void *)sizeof(struct { int:-!!(e); }))
-#endif
-
#undef GENL_struct
#define GENL_struct(tag_name, tag_number, s_name, s_fields) \
/* *_from_attrs functions are static, but potentially unused */ \
@@ -218,7 +210,7 @@ static int s_name ## _from_attrs_for_change(struct s_name *s, \
* Magic: define op number to op name mapping {{{1
* {{{2
*/
-const char *CONCAT_(GENL_MAGIC_FAMILY, _genl_cmd_to_str)(__u8 cmd)
+static const char *CONCATENATE(GENL_MAGIC_FAMILY, _genl_cmd_to_str)(__u8 cmd)
{
switch (cmd) {
#undef GENL_op
@@ -242,10 +234,9 @@ const char *CONCAT_(GENL_MAGIC_FAMILY, _genl_cmd_to_str)(__u8 cmd)
{ \
handler \
.cmd = op_name, \
- .policy = CONCAT_(GENL_MAGIC_FAMILY, _tla_nl_policy), \
},
-#define ZZZ_genl_ops CONCAT_(GENL_MAGIC_FAMILY, _genl_ops)
+#define ZZZ_genl_ops CONCATENATE(GENL_MAGIC_FAMILY, _genl_ops)
static struct genl_ops ZZZ_genl_ops[] __read_mostly = {
#include GENL_MAGIC_INCLUDE_FILE
};
@@ -258,32 +249,32 @@ static struct genl_ops ZZZ_genl_ops[] __read_mostly = {
* and provide register/unregister functions.
* {{{2
*/
-#define ZZZ_genl_family CONCAT_(GENL_MAGIC_FAMILY, _genl_family)
+#define ZZZ_genl_family CONCATENATE(GENL_MAGIC_FAMILY, _genl_family)
static struct genl_family ZZZ_genl_family;
/*
* Magic: define multicast groups
* Magic: define multicast group registration helper
*/
-#define ZZZ_genl_mcgrps CONCAT_(GENL_MAGIC_FAMILY, _genl_mcgrps)
+#define ZZZ_genl_mcgrps CONCATENATE(GENL_MAGIC_FAMILY, _genl_mcgrps)
static const struct genl_multicast_group ZZZ_genl_mcgrps[] = {
#undef GENL_mc_group
#define GENL_mc_group(group) { .name = #group, },
#include GENL_MAGIC_INCLUDE_FILE
};
-enum CONCAT_(GENL_MAGIC_FAMILY, group_ids) {
+enum CONCATENATE(GENL_MAGIC_FAMILY, group_ids) {
#undef GENL_mc_group
-#define GENL_mc_group(group) CONCAT_(GENL_MAGIC_FAMILY, _group_ ## group),
+#define GENL_mc_group(group) CONCATENATE(GENL_MAGIC_FAMILY, _group_ ## group),
#include GENL_MAGIC_INCLUDE_FILE
};
#undef GENL_mc_group
#define GENL_mc_group(group) \
-static int CONCAT_(GENL_MAGIC_FAMILY, _genl_multicast_ ## group)( \
+static int CONCATENATE(GENL_MAGIC_FAMILY, _genl_multicast_ ## group)( \
struct sk_buff *skb, gfp_t flags) \
{ \
unsigned int group_id = \
- CONCAT_(GENL_MAGIC_FAMILY, _group_ ## group); \
+ CONCATENATE(GENL_MAGIC_FAMILY, _group_ ## group); \
return genlmsg_multicast(&ZZZ_genl_family, skb, 0, \
group_id, flags); \
}
@@ -299,20 +290,22 @@ static struct genl_family ZZZ_genl_family __ro_after_init = {
#ifdef GENL_MAGIC_FAMILY_HDRSZ
.hdrsize = NLA_ALIGN(GENL_MAGIC_FAMILY_HDRSZ),
#endif
- .maxattr = ARRAY_SIZE(drbd_tla_nl_policy)-1,
+ .maxattr = ARRAY_SIZE(CONCATENATE(GENL_MAGIC_FAMILY, _tla_nl_policy))-1,
+ .policy = CONCATENATE(GENL_MAGIC_FAMILY, _tla_nl_policy),
.ops = ZZZ_genl_ops,
.n_ops = ARRAY_SIZE(ZZZ_genl_ops),
.mcgrps = ZZZ_genl_mcgrps,
+ .resv_start_op = 42, /* drbd is currently the only user */
.n_mcgrps = ARRAY_SIZE(ZZZ_genl_mcgrps),
.module = THIS_MODULE,
};
-int CONCAT_(GENL_MAGIC_FAMILY, _genl_register)(void)
+int CONCATENATE(GENL_MAGIC_FAMILY, _genl_register)(void)
{
return genl_register_family(&ZZZ_genl_family);
}
-void CONCAT_(GENL_MAGIC_FAMILY, _genl_unregister)(void)
+void CONCATENATE(GENL_MAGIC_FAMILY, _genl_unregister)(void)
{
genl_unregister_family(&ZZZ_genl_family);
}
@@ -413,4 +406,3 @@ s_fields \
/* }}}1 */
#endif /* GENL_MAGIC_FUNC_H */
-/* vim: set foldmethod=marker foldlevel=1 nofoldenable : */
diff --git a/include/linux/genl_magic_struct.h b/include/linux/genl_magic_struct.h
index 6270a56e5edc..621b87a87d74 100644
--- a/include/linux/genl_magic_struct.h
+++ b/include/linux/genl_magic_struct.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef GENL_MAGIC_STRUCT_H
#define GENL_MAGIC_STRUCT_H
@@ -13,14 +14,12 @@
# error "you need to define GENL_MAGIC_INCLUDE_FILE before inclusion"
#endif
-#include <linux/genetlink.h>
+#include <linux/args.h>
#include <linux/types.h>
+#include <net/genetlink.h>
-#define CONCAT__(a,b) a ## b
-#define CONCAT_(a,b) CONCAT__(a,b)
-
-extern int CONCAT_(GENL_MAGIC_FAMILY, _genl_register)(void);
-extern void CONCAT_(GENL_MAGIC_FAMILY, _genl_unregister)(void);
+extern int CONCATENATE(GENL_MAGIC_FAMILY, _genl_register)(void);
+extern void CONCATENATE(GENL_MAGIC_FAMILY, _genl_unregister)(void);
/*
* Extension of genl attribute validation policies {{{2
@@ -88,7 +87,7 @@ static inline int nla_put_u64_0pad(struct sk_buff *skb, int attrtype, u64 value)
nla_get_u64, nla_put_u64_0pad, false)
#define __str_field(attr_nr, attr_flag, name, maxlen) \
__array(attr_nr, attr_flag, name, NLA_NUL_STRING, char, maxlen, \
- nla_strlcpy, nla_put, false)
+ nla_strscpy, nla_put, false)
#define __bin_field(attr_nr, attr_flag, name, maxlen) \
__array(attr_nr, attr_flag, name, NLA_BINARY, char, maxlen, \
nla_memcpy, nla_put, false)
@@ -190,6 +189,7 @@ static inline void ct_assert_unique_operations(void)
{
switch (0) {
#include GENL_MAGIC_INCLUDE_FILE
+ case 0:
;
}
}
@@ -208,6 +208,7 @@ static inline void ct_assert_unique_top_level_attributes(void)
{
switch (0) {
#include GENL_MAGIC_INCLUDE_FILE
+ case 0:
;
}
}
@@ -217,7 +218,8 @@ static inline void ct_assert_unique_top_level_attributes(void)
static inline void ct_assert_unique_ ## s_name ## _attributes(void) \
{ \
switch (0) { \
- s_fields \
+ s_fields \
+ case 0: \
; \
} \
}
@@ -279,4 +281,3 @@ enum { \
/* }}}1 */
#endif /* GENL_MAGIC_STRUCT_H */
-/* vim: set foldmethod=marker nofoldenable : */
diff --git a/include/linux/getcpu.h b/include/linux/getcpu.h
index c7372d7a97be..c304dcdb4eac 100644
--- a/include/linux/getcpu.h
+++ b/include/linux/getcpu.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_GETCPU_H
#define _LINUX_GETCPU_H 1
diff --git a/include/linux/gfp.h b/include/linux/gfp.h
index bcfb9f7c46f5..b155929af5b1 100644
--- a/include/linux/gfp.h
+++ b/include/linux/gfp.h
@@ -1,319 +1,36 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __LINUX_GFP_H
#define __LINUX_GFP_H
-#include <linux/mmdebug.h>
+#include <linux/gfp_types.h>
+
#include <linux/mmzone.h>
-#include <linux/stddef.h>
-#include <linux/linkage.h>
#include <linux/topology.h>
+#include <linux/alloc_tag.h>
+#include <linux/cleanup.h>
+#include <linux/sched.h>
struct vm_area_struct;
-
-/*
- * In case of changes, please don't forget to update
- * include/trace/events/mmflags.h and tools/perf/builtin-kmem.c
- */
-
-/* Plain integer GFP bitmasks. Do not use this directly. */
-#define ___GFP_DMA 0x01u
-#define ___GFP_HIGHMEM 0x02u
-#define ___GFP_DMA32 0x04u
-#define ___GFP_MOVABLE 0x08u
-#define ___GFP_RECLAIMABLE 0x10u
-#define ___GFP_HIGH 0x20u
-#define ___GFP_IO 0x40u
-#define ___GFP_FS 0x80u
-#define ___GFP_COLD 0x100u
-#define ___GFP_NOWARN 0x200u
-#define ___GFP_RETRY_MAYFAIL 0x400u
-#define ___GFP_NOFAIL 0x800u
-#define ___GFP_NORETRY 0x1000u
-#define ___GFP_MEMALLOC 0x2000u
-#define ___GFP_COMP 0x4000u
-#define ___GFP_ZERO 0x8000u
-#define ___GFP_NOMEMALLOC 0x10000u
-#define ___GFP_HARDWALL 0x20000u
-#define ___GFP_THISNODE 0x40000u
-#define ___GFP_ATOMIC 0x80000u
-#define ___GFP_ACCOUNT 0x100000u
-#define ___GFP_NOTRACK 0x200000u
-#define ___GFP_DIRECT_RECLAIM 0x400000u
-#define ___GFP_WRITE 0x800000u
-#define ___GFP_KSWAPD_RECLAIM 0x1000000u
-#ifdef CONFIG_LOCKDEP
-#define ___GFP_NOLOCKDEP 0x2000000u
-#else
-#define ___GFP_NOLOCKDEP 0
-#endif
-/* If the above are modified, __GFP_BITS_SHIFT may need updating */
-
-/*
- * Physical address zone modifiers (see linux/mmzone.h - low four bits)
- *
- * Do not put any conditional on these. If necessary modify the definitions
- * without the underscores and use them consistently. The definitions here may
- * be used in bit comparisons.
- */
-#define __GFP_DMA ((__force gfp_t)___GFP_DMA)
-#define __GFP_HIGHMEM ((__force gfp_t)___GFP_HIGHMEM)
-#define __GFP_DMA32 ((__force gfp_t)___GFP_DMA32)
-#define __GFP_MOVABLE ((__force gfp_t)___GFP_MOVABLE) /* ZONE_MOVABLE allowed */
-#define GFP_ZONEMASK (__GFP_DMA|__GFP_HIGHMEM|__GFP_DMA32|__GFP_MOVABLE)
-
-/*
- * Page mobility and placement hints
- *
- * These flags provide hints about how mobile the page is. Pages with similar
- * mobility are placed within the same pageblocks to minimise problems due
- * to external fragmentation.
- *
- * __GFP_MOVABLE (also a zone modifier) indicates that the page can be
- * moved by page migration during memory compaction or can be reclaimed.
- *
- * __GFP_RECLAIMABLE is used for slab allocations that specify
- * SLAB_RECLAIM_ACCOUNT and whose pages can be freed via shrinkers.
- *
- * __GFP_WRITE indicates the caller intends to dirty the page. Where possible,
- * these pages will be spread between local zones to avoid all the dirty
- * pages being in one zone (fair zone allocation policy).
- *
- * __GFP_HARDWALL enforces the cpuset memory allocation policy.
- *
- * __GFP_THISNODE forces the allocation to be satisified from the requested
- * node with no fallbacks or placement policy enforcements.
- *
- * __GFP_ACCOUNT causes the allocation to be accounted to kmemcg.
- */
-#define __GFP_RECLAIMABLE ((__force gfp_t)___GFP_RECLAIMABLE)
-#define __GFP_WRITE ((__force gfp_t)___GFP_WRITE)
-#define __GFP_HARDWALL ((__force gfp_t)___GFP_HARDWALL)
-#define __GFP_THISNODE ((__force gfp_t)___GFP_THISNODE)
-#define __GFP_ACCOUNT ((__force gfp_t)___GFP_ACCOUNT)
-
-/*
- * Watermark modifiers -- controls access to emergency reserves
- *
- * __GFP_HIGH indicates that the caller is high-priority and that granting
- * the request is necessary before the system can make forward progress.
- * For example, creating an IO context to clean pages.
- *
- * __GFP_ATOMIC indicates that the caller cannot reclaim or sleep and is
- * high priority. Users are typically interrupt handlers. This may be
- * used in conjunction with __GFP_HIGH
- *
- * __GFP_MEMALLOC allows access to all memory. This should only be used when
- * the caller guarantees the allocation will allow more memory to be freed
- * very shortly e.g. process exiting or swapping. Users either should
- * be the MM or co-ordinating closely with the VM (e.g. swap over NFS).
- *
- * __GFP_NOMEMALLOC is used to explicitly forbid access to emergency reserves.
- * This takes precedence over the __GFP_MEMALLOC flag if both are set.
- */
-#define __GFP_ATOMIC ((__force gfp_t)___GFP_ATOMIC)
-#define __GFP_HIGH ((__force gfp_t)___GFP_HIGH)
-#define __GFP_MEMALLOC ((__force gfp_t)___GFP_MEMALLOC)
-#define __GFP_NOMEMALLOC ((__force gfp_t)___GFP_NOMEMALLOC)
-
-/*
- * Reclaim modifiers
- *
- * __GFP_IO can start physical IO.
- *
- * __GFP_FS can call down to the low-level FS. Clearing the flag avoids the
- * allocator recursing into the filesystem which might already be holding
- * locks.
- *
- * __GFP_DIRECT_RECLAIM indicates that the caller may enter direct reclaim.
- * This flag can be cleared to avoid unnecessary delays when a fallback
- * option is available.
- *
- * __GFP_KSWAPD_RECLAIM indicates that the caller wants to wake kswapd when
- * the low watermark is reached and have it reclaim pages until the high
- * watermark is reached. A caller may wish to clear this flag when fallback
- * options are available and the reclaim is likely to disrupt the system. The
- * canonical example is THP allocation where a fallback is cheap but
- * reclaim/compaction may cause indirect stalls.
- *
- * __GFP_RECLAIM is shorthand to allow/forbid both direct and kswapd reclaim.
- *
- * The default allocator behavior depends on the request size. We have a concept
- * of so called costly allocations (with order > PAGE_ALLOC_COSTLY_ORDER).
- * !costly allocations are too essential to fail so they are implicitly
- * non-failing by default (with some exceptions like OOM victims might fail so
- * the caller still has to check for failures) while costly requests try to be
- * not disruptive and back off even without invoking the OOM killer.
- * The following three modifiers might be used to override some of these
- * implicit rules
- *
- * __GFP_NORETRY: The VM implementation will try only very lightweight
- * memory direct reclaim to get some memory under memory pressure (thus
- * it can sleep). It will avoid disruptive actions like OOM killer. The
- * caller must handle the failure which is quite likely to happen under
- * heavy memory pressure. The flag is suitable when failure can easily be
- * handled at small cost, such as reduced throughput
- *
- * __GFP_RETRY_MAYFAIL: The VM implementation will retry memory reclaim
- * procedures that have previously failed if there is some indication
- * that progress has been made else where. It can wait for other
- * tasks to attempt high level approaches to freeing memory such as
- * compaction (which removes fragmentation) and page-out.
- * There is still a definite limit to the number of retries, but it is
- * a larger limit than with __GFP_NORETRY.
- * Allocations with this flag may fail, but only when there is
- * genuinely little unused memory. While these allocations do not
- * directly trigger the OOM killer, their failure indicates that
- * the system is likely to need to use the OOM killer soon. The
- * caller must handle failure, but can reasonably do so by failing
- * a higher-level request, or completing it only in a much less
- * efficient manner.
- * If the allocation does fail, and the caller is in a position to
- * free some non-essential memory, doing so could benefit the system
- * as a whole.
- *
- * __GFP_NOFAIL: The VM implementation _must_ retry infinitely: the caller
- * cannot handle allocation failures. The allocation could block
- * indefinitely but will never return with failure. Testing for
- * failure is pointless.
- * New users should be evaluated carefully (and the flag should be
- * used only when there is no reasonable failure policy) but it is
- * definitely preferable to use the flag rather than opencode endless
- * loop around allocator.
- * Using this flag for costly allocations is _highly_ discouraged.
- */
-#define __GFP_IO ((__force gfp_t)___GFP_IO)
-#define __GFP_FS ((__force gfp_t)___GFP_FS)
-#define __GFP_DIRECT_RECLAIM ((__force gfp_t)___GFP_DIRECT_RECLAIM) /* Caller can reclaim */
-#define __GFP_KSWAPD_RECLAIM ((__force gfp_t)___GFP_KSWAPD_RECLAIM) /* kswapd can wake */
-#define __GFP_RECLAIM ((__force gfp_t)(___GFP_DIRECT_RECLAIM|___GFP_KSWAPD_RECLAIM))
-#define __GFP_RETRY_MAYFAIL ((__force gfp_t)___GFP_RETRY_MAYFAIL)
-#define __GFP_NOFAIL ((__force gfp_t)___GFP_NOFAIL)
-#define __GFP_NORETRY ((__force gfp_t)___GFP_NORETRY)
-
-/*
- * Action modifiers
- *
- * __GFP_COLD indicates that the caller does not expect to be used in the near
- * future. Where possible, a cache-cold page will be returned.
- *
- * __GFP_NOWARN suppresses allocation failure reports.
- *
- * __GFP_COMP address compound page metadata.
- *
- * __GFP_ZERO returns a zeroed page on success.
- *
- * __GFP_NOTRACK avoids tracking with kmemcheck.
- *
- * __GFP_NOTRACK_FALSE_POSITIVE is an alias of __GFP_NOTRACK. It's a means of
- * distinguishing in the source between false positives and allocations that
- * cannot be supported (e.g. page tables).
- */
-#define __GFP_COLD ((__force gfp_t)___GFP_COLD)
-#define __GFP_NOWARN ((__force gfp_t)___GFP_NOWARN)
-#define __GFP_COMP ((__force gfp_t)___GFP_COMP)
-#define __GFP_ZERO ((__force gfp_t)___GFP_ZERO)
-#define __GFP_NOTRACK ((__force gfp_t)___GFP_NOTRACK)
-#define __GFP_NOTRACK_FALSE_POSITIVE (__GFP_NOTRACK)
-
-/* Disable lockdep for GFP context tracking */
-#define __GFP_NOLOCKDEP ((__force gfp_t)___GFP_NOLOCKDEP)
-
-/* Room for N __GFP_FOO bits */
-#define __GFP_BITS_SHIFT (25 + IS_ENABLED(CONFIG_LOCKDEP))
-#define __GFP_BITS_MASK ((__force gfp_t)((1 << __GFP_BITS_SHIFT) - 1))
-
-/*
- * Useful GFP flag combinations that are commonly used. It is recommended
- * that subsystems start with one of these combinations and then set/clear
- * __GFP_FOO flags as necessary.
- *
- * GFP_ATOMIC users can not sleep and need the allocation to succeed. A lower
- * watermark is applied to allow access to "atomic reserves"
- *
- * GFP_KERNEL is typical for kernel-internal allocations. The caller requires
- * ZONE_NORMAL or a lower zone for direct access but can direct reclaim.
- *
- * GFP_KERNEL_ACCOUNT is the same as GFP_KERNEL, except the allocation is
- * accounted to kmemcg.
- *
- * GFP_NOWAIT is for kernel allocations that should not stall for direct
- * reclaim, start physical IO or use any filesystem callback.
- *
- * GFP_NOIO will use direct reclaim to discard clean pages or slab pages
- * that do not require the starting of any physical IO.
- * Please try to avoid using this flag directly and instead use
- * memalloc_noio_{save,restore} to mark the whole scope which cannot
- * perform any IO with a short explanation why. All allocation requests
- * will inherit GFP_NOIO implicitly.
- *
- * GFP_NOFS will use direct reclaim but will not use any filesystem interfaces.
- * Please try to avoid using this flag directly and instead use
- * memalloc_nofs_{save,restore} to mark the whole scope which cannot/shouldn't
- * recurse into the FS layer with a short explanation why. All allocation
- * requests will inherit GFP_NOFS implicitly.
- *
- * GFP_USER is for userspace allocations that also need to be directly
- * accessibly by the kernel or hardware. It is typically used by hardware
- * for buffers that are mapped to userspace (e.g. graphics) that hardware
- * still must DMA to. cpuset limits are enforced for these allocations.
- *
- * GFP_DMA exists for historical reasons and should be avoided where possible.
- * The flags indicates that the caller requires that the lowest zone be
- * used (ZONE_DMA or 16M on x86-64). Ideally, this would be removed but
- * it would require careful auditing as some users really require it and
- * others use the flag to avoid lowmem reserves in ZONE_DMA and treat the
- * lowest zone as a type of emergency reserve.
- *
- * GFP_DMA32 is similar to GFP_DMA except that the caller requires a 32-bit
- * address.
- *
- * GFP_HIGHUSER is for userspace allocations that may be mapped to userspace,
- * do not need to be directly accessible by the kernel but that cannot
- * move once in use. An example may be a hardware allocation that maps
- * data directly into userspace but has no addressing limitations.
- *
- * GFP_HIGHUSER_MOVABLE is for userspace allocations that the kernel does not
- * need direct access to but can use kmap() when access is required. They
- * are expected to be movable via page reclaim or page migration. Typically,
- * pages on the LRU would also be allocated with GFP_HIGHUSER_MOVABLE.
- *
- * GFP_TRANSHUGE and GFP_TRANSHUGE_LIGHT are used for THP allocations. They are
- * compound allocations that will generally fail quickly if memory is not
- * available and will not wake kswapd/kcompactd on failure. The _LIGHT
- * version does not attempt reclaim/compaction at all and is by default used
- * in page fault path, while the non-light is used by khugepaged.
- */
-#define GFP_ATOMIC (__GFP_HIGH|__GFP_ATOMIC|__GFP_KSWAPD_RECLAIM)
-#define GFP_KERNEL (__GFP_RECLAIM | __GFP_IO | __GFP_FS)
-#define GFP_KERNEL_ACCOUNT (GFP_KERNEL | __GFP_ACCOUNT)
-#define GFP_NOWAIT (__GFP_KSWAPD_RECLAIM)
-#define GFP_NOIO (__GFP_RECLAIM)
-#define GFP_NOFS (__GFP_RECLAIM | __GFP_IO)
-#define GFP_TEMPORARY (__GFP_RECLAIM | __GFP_IO | __GFP_FS | \
- __GFP_RECLAIMABLE)
-#define GFP_USER (__GFP_RECLAIM | __GFP_IO | __GFP_FS | __GFP_HARDWALL)
-#define GFP_DMA __GFP_DMA
-#define GFP_DMA32 __GFP_DMA32
-#define GFP_HIGHUSER (GFP_USER | __GFP_HIGHMEM)
-#define GFP_HIGHUSER_MOVABLE (GFP_HIGHUSER | __GFP_MOVABLE)
-#define GFP_TRANSHUGE_LIGHT ((GFP_HIGHUSER_MOVABLE | __GFP_COMP | \
- __GFP_NOMEMALLOC | __GFP_NOWARN) & ~__GFP_RECLAIM)
-#define GFP_TRANSHUGE (GFP_TRANSHUGE_LIGHT | __GFP_DIRECT_RECLAIM)
+struct mempolicy;
/* Convert GFP flags to their corresponding migrate type */
#define GFP_MOVABLE_MASK (__GFP_RECLAIMABLE|__GFP_MOVABLE)
#define GFP_MOVABLE_SHIFT 3
-static inline int gfpflags_to_migratetype(const gfp_t gfp_flags)
+static inline int gfp_migratetype(const gfp_t gfp_flags)
{
VM_WARN_ON((gfp_flags & GFP_MOVABLE_MASK) == GFP_MOVABLE_MASK);
BUILD_BUG_ON((1UL << GFP_MOVABLE_SHIFT) != ___GFP_MOVABLE);
BUILD_BUG_ON((___GFP_MOVABLE >> GFP_MOVABLE_SHIFT) != MIGRATE_MOVABLE);
+ BUILD_BUG_ON((___GFP_RECLAIMABLE >> GFP_MOVABLE_SHIFT) != MIGRATE_RECLAIMABLE);
+ BUILD_BUG_ON(((___GFP_MOVABLE | ___GFP_RECLAIMABLE) >>
+ GFP_MOVABLE_SHIFT) != MIGRATE_HIGHATOMIC);
if (unlikely(page_group_by_mobility_disabled))
return MIGRATE_UNMOVABLE;
/* Group based on mobility */
- return (gfp_flags & GFP_MOVABLE_MASK) >> GFP_MOVABLE_SHIFT;
+ return (__force unsigned long)(gfp_flags & GFP_MOVABLE_MASK) >> GFP_MOVABLE_SHIFT;
}
#undef GFP_MOVABLE_MASK
#undef GFP_MOVABLE_SHIFT
@@ -323,6 +40,25 @@ static inline bool gfpflags_allow_blocking(const gfp_t gfp_flags)
return !!(gfp_flags & __GFP_DIRECT_RECLAIM);
}
+static inline bool gfpflags_allow_spinning(const gfp_t gfp_flags)
+{
+ /*
+ * !__GFP_DIRECT_RECLAIM -> direct claim is not allowed.
+ * !__GFP_KSWAPD_RECLAIM -> it's not safe to wake up kswapd.
+ * All GFP_* flags including GFP_NOWAIT use one or both flags.
+ * alloc_pages_nolock() is the only API that doesn't specify either flag.
+ *
+ * This is stronger than GFP_NOWAIT or GFP_ATOMIC because
+ * those are guaranteed to never block on a sleeping lock.
+ * Here we are enforcing that the allocation doesn't ever spin
+ * on any locks (i.e. only trylocks). There is no high level
+ * GFP_$FOO flag for this use in alloc_pages_nolock() as the
+ * regular page allocator doesn't fully support this
+ * allocation mode.
+ */
+ return !!(gfp_flags & __GFP_RECLAIM);
+}
+
#ifdef CONFIG_HIGHMEM
#define OPT_ZONE_HIGHMEM ZONE_HIGHMEM
#else
@@ -358,7 +94,7 @@ static inline bool gfpflags_allow_blocking(const gfp_t gfp_flags)
* 0x1 => DMA or NORMAL
* 0x2 => HIGHMEM or NORMAL
* 0x3 => BAD (DMA+HIGHMEM)
- * 0x4 => DMA32 or DMA or NORMAL
+ * 0x4 => DMA32 or NORMAL
* 0x5 => BAD (DMA+DMA32)
* 0x6 => BAD (HIGHMEM+DMA32)
* 0x7 => BAD (HIGHMEM+DMA32+DMA)
@@ -366,7 +102,7 @@ static inline bool gfpflags_allow_blocking(const gfp_t gfp_flags)
* 0x9 => DMA or NORMAL (MOVABLE+DMA)
* 0xa => MOVABLE (Movable is valid only if HIGHMEM is set too)
* 0xb => BAD (MOVABLE+HIGHMEM+DMA)
- * 0xc => DMA32 (MOVABLE+DMA32)
+ * 0xc => DMA32 or NORMAL (MOVABLE+DMA32)
* 0xd => BAD (MOVABLE+DMA32+DMA)
* 0xe => BAD (MOVABLE+DMA32+HIGHMEM)
* 0xf => BAD (MOVABLE+DMA32+HIGHMEM+DMA)
@@ -441,13 +177,38 @@ static inline int gfp_zonelist(gfp_t flags)
}
/*
+ * gfp flag masking for nested internal allocations.
+ *
+ * For code that needs to do allocations inside the public allocation API (e.g.
+ * memory allocation tracking code) the allocations need to obey the caller
+ * allocation context constrains to prevent allocation context mismatches (e.g.
+ * GFP_KERNEL allocations in GFP_NOFS contexts) from potential deadlock
+ * situations.
+ *
+ * It is also assumed that these nested allocations are for internal kernel
+ * object storage purposes only and are not going to be used for DMA, etc. Hence
+ * we strip out all the zone information and leave just the context information
+ * intact.
+ *
+ * Further, internal allocations must fail before the higher level allocation
+ * can fail, so we must make them fail faster and fail silently. We also don't
+ * want them to deplete emergency reserves. Hence nested allocations must be
+ * prepared for these allocations to fail.
+ */
+static inline gfp_t gfp_nested_mask(gfp_t flags)
+{
+ return ((flags & (GFP_KERNEL | GFP_ATOMIC | __GFP_NOLOCKDEP)) |
+ (__GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN));
+}
+
+/*
* We get the zone list from the current node and the gfp_mask.
- * This zone list contains a maximum of MAXNODES*MAX_NR_ZONES zones.
+ * This zone list contains a maximum of MAX_NUMNODES*MAX_NR_ZONES zones.
* There are two zonelists per node, one for all zones with memory and
* one containing just zones from the node the zonelist belongs to.
*
- * For the normal case of non-DISCONTIGMEM systems the NODE_DATA() gets
- * optimized to &contig_page_data at compile-time.
+ * For the case of non-NUMA systems the NODE_DATA() gets optimized to
+ * &contig_page_data at compile-time.
*/
static inline struct zonelist *node_zonelist(int nid, gfp_t flags)
{
@@ -461,14 +222,54 @@ static inline void arch_free_page(struct page *page, int order) { }
static inline void arch_alloc_page(struct page *page, int order) { }
#endif
-struct page *
-__alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order, int preferred_nid,
- nodemask_t *nodemask);
+struct page *__alloc_pages_noprof(gfp_t gfp, unsigned int order, int preferred_nid,
+ nodemask_t *nodemask);
+#define __alloc_pages(...) alloc_hooks(__alloc_pages_noprof(__VA_ARGS__))
-static inline struct page *
-__alloc_pages(gfp_t gfp_mask, unsigned int order, int preferred_nid)
+struct folio *__folio_alloc_noprof(gfp_t gfp, unsigned int order, int preferred_nid,
+ nodemask_t *nodemask);
+#define __folio_alloc(...) alloc_hooks(__folio_alloc_noprof(__VA_ARGS__))
+
+unsigned long alloc_pages_bulk_noprof(gfp_t gfp, int preferred_nid,
+ nodemask_t *nodemask, int nr_pages,
+ struct page **page_array);
+#define __alloc_pages_bulk(...) alloc_hooks(alloc_pages_bulk_noprof(__VA_ARGS__))
+
+unsigned long alloc_pages_bulk_mempolicy_noprof(gfp_t gfp,
+ unsigned long nr_pages,
+ struct page **page_array);
+#define alloc_pages_bulk_mempolicy(...) \
+ alloc_hooks(alloc_pages_bulk_mempolicy_noprof(__VA_ARGS__))
+
+/* Bulk allocate order-0 pages */
+#define alloc_pages_bulk(_gfp, _nr_pages, _page_array) \
+ __alloc_pages_bulk(_gfp, numa_mem_id(), NULL, _nr_pages, _page_array)
+
+static inline unsigned long
+alloc_pages_bulk_node_noprof(gfp_t gfp, int nid, unsigned long nr_pages,
+ struct page **page_array)
+{
+ if (nid == NUMA_NO_NODE)
+ nid = numa_mem_id();
+
+ return alloc_pages_bulk_noprof(gfp, nid, NULL, nr_pages, page_array);
+}
+
+#define alloc_pages_bulk_node(...) \
+ alloc_hooks(alloc_pages_bulk_node_noprof(__VA_ARGS__))
+
+static inline void warn_if_node_offline(int this_node, gfp_t gfp_mask)
{
- return __alloc_pages_nodemask(gfp_mask, order, preferred_nid, NULL);
+ gfp_t warn_gfp = gfp_mask & (__GFP_THISNODE|__GFP_NOWARN);
+
+ if (warn_gfp != (__GFP_THISNODE|__GFP_NOWARN))
+ return;
+
+ if (node_online(this_node))
+ return;
+
+ pr_warn("%pGg allocation from offline node %d\n", &gfp_mask, this_node);
+ dump_stack();
}
/*
@@ -476,88 +277,123 @@ __alloc_pages(gfp_t gfp_mask, unsigned int order, int preferred_nid)
* online. For more general interface, see alloc_pages_node().
*/
static inline struct page *
-__alloc_pages_node(int nid, gfp_t gfp_mask, unsigned int order)
+__alloc_pages_node_noprof(int nid, gfp_t gfp_mask, unsigned int order)
{
VM_BUG_ON(nid < 0 || nid >= MAX_NUMNODES);
- VM_WARN_ON(!node_online(nid));
+ warn_if_node_offline(nid, gfp_mask);
- return __alloc_pages(gfp_mask, order, nid);
+ return __alloc_pages_noprof(gfp_mask, order, nid, NULL);
}
+#define __alloc_pages_node(...) alloc_hooks(__alloc_pages_node_noprof(__VA_ARGS__))
+
+static inline
+struct folio *__folio_alloc_node_noprof(gfp_t gfp, unsigned int order, int nid)
+{
+ VM_BUG_ON(nid < 0 || nid >= MAX_NUMNODES);
+ warn_if_node_offline(nid, gfp);
+
+ return __folio_alloc_noprof(gfp, order, nid, NULL);
+}
+
+#define __folio_alloc_node(...) alloc_hooks(__folio_alloc_node_noprof(__VA_ARGS__))
+
/*
* Allocate pages, preferring the node given as nid. When nid == NUMA_NO_NODE,
* prefer the current CPU's closest node. Otherwise node must be valid and
* online.
*/
-static inline struct page *alloc_pages_node(int nid, gfp_t gfp_mask,
- unsigned int order)
+static inline struct page *alloc_pages_node_noprof(int nid, gfp_t gfp_mask,
+ unsigned int order)
{
if (nid == NUMA_NO_NODE)
nid = numa_mem_id();
- return __alloc_pages_node(nid, gfp_mask, order);
+ return __alloc_pages_node_noprof(nid, gfp_mask, order);
}
-#ifdef CONFIG_NUMA
-extern struct page *alloc_pages_current(gfp_t gfp_mask, unsigned order);
+#define alloc_pages_node(...) alloc_hooks(alloc_pages_node_noprof(__VA_ARGS__))
-static inline struct page *
-alloc_pages(gfp_t gfp_mask, unsigned int order)
+#ifdef CONFIG_NUMA
+struct page *alloc_pages_noprof(gfp_t gfp, unsigned int order);
+struct folio *folio_alloc_noprof(gfp_t gfp, unsigned int order);
+struct folio *folio_alloc_mpol_noprof(gfp_t gfp, unsigned int order,
+ struct mempolicy *mpol, pgoff_t ilx, int nid);
+struct folio *vma_alloc_folio_noprof(gfp_t gfp, int order, struct vm_area_struct *vma,
+ unsigned long addr);
+#else
+static inline struct page *alloc_pages_noprof(gfp_t gfp_mask, unsigned int order)
{
- return alloc_pages_current(gfp_mask, order);
+ return alloc_pages_node_noprof(numa_node_id(), gfp_mask, order);
}
-extern struct page *alloc_pages_vma(gfp_t gfp_mask, int order,
- struct vm_area_struct *vma, unsigned long addr,
- int node, bool hugepage);
-#define alloc_hugepage_vma(gfp_mask, vma, addr, order) \
- alloc_pages_vma(gfp_mask, order, vma, addr, numa_node_id(), true)
-#else
-#define alloc_pages(gfp_mask, order) \
- alloc_pages_node(numa_node_id(), gfp_mask, order)
-#define alloc_pages_vma(gfp_mask, order, vma, addr, node, false)\
- alloc_pages(gfp_mask, order)
-#define alloc_hugepage_vma(gfp_mask, vma, addr, order) \
- alloc_pages(gfp_mask, order)
+static inline struct folio *folio_alloc_noprof(gfp_t gfp, unsigned int order)
+{
+ return __folio_alloc_node_noprof(gfp, order, numa_node_id());
+}
+static inline struct folio *folio_alloc_mpol_noprof(gfp_t gfp, unsigned int order,
+ struct mempolicy *mpol, pgoff_t ilx, int nid)
+{
+ return folio_alloc_noprof(gfp, order);
+}
+#define vma_alloc_folio_noprof(gfp, order, vma, addr) \
+ folio_alloc_noprof(gfp, order)
#endif
+
+#define alloc_pages(...) alloc_hooks(alloc_pages_noprof(__VA_ARGS__))
+#define folio_alloc(...) alloc_hooks(folio_alloc_noprof(__VA_ARGS__))
+#define folio_alloc_mpol(...) alloc_hooks(folio_alloc_mpol_noprof(__VA_ARGS__))
+#define vma_alloc_folio(...) alloc_hooks(vma_alloc_folio_noprof(__VA_ARGS__))
+
#define alloc_page(gfp_mask) alloc_pages(gfp_mask, 0)
-#define alloc_page_vma(gfp_mask, vma, addr) \
- alloc_pages_vma(gfp_mask, 0, vma, addr, numa_node_id(), false)
-#define alloc_page_vma_node(gfp_mask, vma, addr, node) \
- alloc_pages_vma(gfp_mask, 0, vma, addr, node, false)
-extern unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order);
-extern unsigned long get_zeroed_page(gfp_t gfp_mask);
+static inline struct page *alloc_page_vma_noprof(gfp_t gfp,
+ struct vm_area_struct *vma, unsigned long addr)
+{
+ struct folio *folio = vma_alloc_folio_noprof(gfp, 0, vma, addr);
+
+ return &folio->page;
+}
+#define alloc_page_vma(...) alloc_hooks(alloc_page_vma_noprof(__VA_ARGS__))
+
+struct page *alloc_pages_nolock_noprof(gfp_t gfp_flags, int nid, unsigned int order);
+#define alloc_pages_nolock(...) alloc_hooks(alloc_pages_nolock_noprof(__VA_ARGS__))
+
+extern unsigned long get_free_pages_noprof(gfp_t gfp_mask, unsigned int order);
+#define __get_free_pages(...) alloc_hooks(get_free_pages_noprof(__VA_ARGS__))
+
+extern unsigned long get_zeroed_page_noprof(gfp_t gfp_mask);
+#define get_zeroed_page(...) alloc_hooks(get_zeroed_page_noprof(__VA_ARGS__))
+
+void *alloc_pages_exact_noprof(size_t size, gfp_t gfp_mask) __alloc_size(1);
+#define alloc_pages_exact(...) alloc_hooks(alloc_pages_exact_noprof(__VA_ARGS__))
-void *alloc_pages_exact(size_t size, gfp_t gfp_mask);
void free_pages_exact(void *virt, size_t size);
-void * __meminit alloc_pages_exact_nid(int nid, size_t size, gfp_t gfp_mask);
-#define __get_free_page(gfp_mask) \
- __get_free_pages((gfp_mask), 0)
+__meminit void *alloc_pages_exact_nid_noprof(int nid, size_t size, gfp_t gfp_mask) __alloc_size(2);
+#define alloc_pages_exact_nid(...) \
+ alloc_hooks(alloc_pages_exact_nid_noprof(__VA_ARGS__))
+
+#define __get_free_page(gfp_mask) \
+ __get_free_pages((gfp_mask), 0)
-#define __get_dma_pages(gfp_mask, order) \
- __get_free_pages((gfp_mask) | GFP_DMA, (order))
+#define __get_dma_pages(gfp_mask, order) \
+ __get_free_pages((gfp_mask) | GFP_DMA, (order))
extern void __free_pages(struct page *page, unsigned int order);
+extern void free_pages_nolock(struct page *page, unsigned int order);
extern void free_pages(unsigned long addr, unsigned int order);
-extern void free_hot_cold_page(struct page *page, bool cold);
-extern void free_hot_cold_page_list(struct list_head *list, bool cold);
-
-struct page_frag_cache;
-extern void __page_frag_cache_drain(struct page *page, unsigned int count);
-extern void *page_frag_alloc(struct page_frag_cache *nc,
- unsigned int fragsz, gfp_t gfp_mask);
-extern void page_frag_free(void *addr);
#define __free_page(page) __free_pages((page), 0)
#define free_page(addr) free_pages((addr), 0)
-void page_alloc_init(void);
+void page_alloc_init_cpuhp(void);
+bool decay_pcp_high(struct zone *zone, struct per_cpu_pages *pcp);
void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp);
void drain_all_pages(struct zone *zone);
void drain_local_pages(struct zone *zone);
void page_alloc_init_late(void);
+void setup_pcp_cacheinfo(unsigned int cpu);
/*
* gfp_allowed_mask is set to GFP_BOOT_MASK during early boot to restrict what
@@ -571,28 +407,63 @@ extern gfp_t gfp_allowed_mask;
/* Returns true if the gfp_mask allows use of ALLOC_NO_WATERMARK */
bool gfp_pfmemalloc_allowed(gfp_t gfp_mask);
-extern void pm_restrict_gfp_mask(void);
-extern void pm_restore_gfp_mask(void);
+static inline bool gfp_has_io_fs(gfp_t gfp)
+{
+ return (gfp & (__GFP_IO | __GFP_FS)) == (__GFP_IO | __GFP_FS);
+}
-#ifdef CONFIG_PM_SLEEP
-extern bool pm_suspended_storage(void);
-#else
-static inline bool pm_suspended_storage(void)
+/*
+ * Check if the gfp flags allow compaction - GFP_NOIO is a really
+ * tricky context because the migration might require IO.
+ */
+static inline bool gfp_compaction_allowed(gfp_t gfp_mask)
{
- return false;
+ return IS_ENABLED(CONFIG_COMPACTION) && (gfp_mask & __GFP_IO);
}
-#endif /* CONFIG_PM_SLEEP */
-#if (defined(CONFIG_MEMORY_ISOLATION) && defined(CONFIG_COMPACTION)) || defined(CONFIG_CMA)
+extern gfp_t vma_thp_gfp_mask(struct vm_area_struct *vma);
+
+#ifdef CONFIG_CONTIG_ALLOC
+
+typedef unsigned int __bitwise acr_flags_t;
+#define ACR_FLAGS_NONE ((__force acr_flags_t)0) // ordinary allocation request
+#define ACR_FLAGS_CMA ((__force acr_flags_t)BIT(0)) // allocate for CMA
+
/* The below functions must be run on a range from a single zone. */
-extern int alloc_contig_range(unsigned long start, unsigned long end,
- unsigned migratetype, gfp_t gfp_mask);
-extern void free_contig_range(unsigned long pfn, unsigned nr_pages);
+extern int alloc_contig_range_noprof(unsigned long start, unsigned long end,
+ acr_flags_t alloc_flags, gfp_t gfp_mask);
+#define alloc_contig_range(...) alloc_hooks(alloc_contig_range_noprof(__VA_ARGS__))
+
+extern struct page *alloc_contig_pages_noprof(unsigned long nr_pages, gfp_t gfp_mask,
+ int nid, nodemask_t *nodemask);
+#define alloc_contig_pages(...) alloc_hooks(alloc_contig_pages_noprof(__VA_ARGS__))
+
#endif
+void free_contig_range(unsigned long pfn, unsigned long nr_pages);
+
+#ifdef CONFIG_CONTIG_ALLOC
+static inline struct folio *folio_alloc_gigantic_noprof(int order, gfp_t gfp,
+ int nid, nodemask_t *node)
+{
+ struct page *page;
+
+ if (WARN_ON(!order || !(gfp & __GFP_COMP)))
+ return NULL;
-#ifdef CONFIG_CMA
-/* CMA stuff */
-extern void init_cma_reserved_pageblock(struct page *page);
+ page = alloc_contig_pages_noprof(1 << order, gfp, nid, node);
+
+ return page ? page_folio(page) : NULL;
+}
+#else
+static inline struct folio *folio_alloc_gigantic_noprof(int order, gfp_t gfp,
+ int nid, nodemask_t *node)
+{
+ return NULL;
+}
#endif
+/* This should be paired with folio_put() rather than free_contig_range(). */
+#define folio_alloc_gigantic(...) alloc_hooks(folio_alloc_gigantic_noprof(__VA_ARGS__))
+
+DEFINE_FREE(free_page, void *, free_page((unsigned long)_T))
#endif /* __LINUX_GFP_H */
diff --git a/include/linux/gfp_api.h b/include/linux/gfp_api.h
new file mode 100644
index 000000000000..5a05a2764a86
--- /dev/null
+++ b/include/linux/gfp_api.h
@@ -0,0 +1 @@
+#include <linux/gfp.h>
diff --git a/include/linux/gfp_types.h b/include/linux/gfp_types.h
new file mode 100644
index 000000000000..3de43b12209e
--- /dev/null
+++ b/include/linux/gfp_types.h
@@ -0,0 +1,386 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __LINUX_GFP_TYPES_H
+#define __LINUX_GFP_TYPES_H
+
+#include <linux/bits.h>
+
+/* The typedef is in types.h but we want the documentation here */
+#if 0
+/**
+ * typedef gfp_t - Memory allocation flags.
+ *
+ * GFP flags are commonly used throughout Linux to indicate how memory
+ * should be allocated. The GFP acronym stands for get_free_pages(),
+ * the underlying memory allocation function. Not every GFP flag is
+ * supported by every function which may allocate memory. Most users
+ * will want to use a plain ``GFP_KERNEL``.
+ */
+typedef unsigned int __bitwise gfp_t;
+#endif
+
+/*
+ * In case of changes, please don't forget to update
+ * include/trace/events/mmflags.h and tools/perf/builtin-kmem.c
+ */
+
+enum {
+ ___GFP_DMA_BIT,
+ ___GFP_HIGHMEM_BIT,
+ ___GFP_DMA32_BIT,
+ ___GFP_MOVABLE_BIT,
+ ___GFP_RECLAIMABLE_BIT,
+ ___GFP_HIGH_BIT,
+ ___GFP_IO_BIT,
+ ___GFP_FS_BIT,
+ ___GFP_ZERO_BIT,
+ ___GFP_UNUSED_BIT, /* 0x200u unused */
+ ___GFP_DIRECT_RECLAIM_BIT,
+ ___GFP_KSWAPD_RECLAIM_BIT,
+ ___GFP_WRITE_BIT,
+ ___GFP_NOWARN_BIT,
+ ___GFP_RETRY_MAYFAIL_BIT,
+ ___GFP_NOFAIL_BIT,
+ ___GFP_NORETRY_BIT,
+ ___GFP_MEMALLOC_BIT,
+ ___GFP_COMP_BIT,
+ ___GFP_NOMEMALLOC_BIT,
+ ___GFP_HARDWALL_BIT,
+ ___GFP_THISNODE_BIT,
+ ___GFP_ACCOUNT_BIT,
+ ___GFP_ZEROTAGS_BIT,
+#ifdef CONFIG_KASAN_HW_TAGS
+ ___GFP_SKIP_ZERO_BIT,
+ ___GFP_SKIP_KASAN_BIT,
+#endif
+#ifdef CONFIG_LOCKDEP
+ ___GFP_NOLOCKDEP_BIT,
+#endif
+ ___GFP_NO_OBJ_EXT_BIT,
+ ___GFP_LAST_BIT
+};
+
+/* Plain integer GFP bitmasks. Do not use this directly. */
+#define ___GFP_DMA BIT(___GFP_DMA_BIT)
+#define ___GFP_HIGHMEM BIT(___GFP_HIGHMEM_BIT)
+#define ___GFP_DMA32 BIT(___GFP_DMA32_BIT)
+#define ___GFP_MOVABLE BIT(___GFP_MOVABLE_BIT)
+#define ___GFP_RECLAIMABLE BIT(___GFP_RECLAIMABLE_BIT)
+#define ___GFP_HIGH BIT(___GFP_HIGH_BIT)
+#define ___GFP_IO BIT(___GFP_IO_BIT)
+#define ___GFP_FS BIT(___GFP_FS_BIT)
+#define ___GFP_ZERO BIT(___GFP_ZERO_BIT)
+/* 0x200u unused */
+#define ___GFP_DIRECT_RECLAIM BIT(___GFP_DIRECT_RECLAIM_BIT)
+#define ___GFP_KSWAPD_RECLAIM BIT(___GFP_KSWAPD_RECLAIM_BIT)
+#define ___GFP_WRITE BIT(___GFP_WRITE_BIT)
+#define ___GFP_NOWARN BIT(___GFP_NOWARN_BIT)
+#define ___GFP_RETRY_MAYFAIL BIT(___GFP_RETRY_MAYFAIL_BIT)
+#define ___GFP_NOFAIL BIT(___GFP_NOFAIL_BIT)
+#define ___GFP_NORETRY BIT(___GFP_NORETRY_BIT)
+#define ___GFP_MEMALLOC BIT(___GFP_MEMALLOC_BIT)
+#define ___GFP_COMP BIT(___GFP_COMP_BIT)
+#define ___GFP_NOMEMALLOC BIT(___GFP_NOMEMALLOC_BIT)
+#define ___GFP_HARDWALL BIT(___GFP_HARDWALL_BIT)
+#define ___GFP_THISNODE BIT(___GFP_THISNODE_BIT)
+#define ___GFP_ACCOUNT BIT(___GFP_ACCOUNT_BIT)
+#define ___GFP_ZEROTAGS BIT(___GFP_ZEROTAGS_BIT)
+#ifdef CONFIG_KASAN_HW_TAGS
+#define ___GFP_SKIP_ZERO BIT(___GFP_SKIP_ZERO_BIT)
+#define ___GFP_SKIP_KASAN BIT(___GFP_SKIP_KASAN_BIT)
+#else
+#define ___GFP_SKIP_ZERO 0
+#define ___GFP_SKIP_KASAN 0
+#endif
+#ifdef CONFIG_LOCKDEP
+#define ___GFP_NOLOCKDEP BIT(___GFP_NOLOCKDEP_BIT)
+#else
+#define ___GFP_NOLOCKDEP 0
+#endif
+#define ___GFP_NO_OBJ_EXT BIT(___GFP_NO_OBJ_EXT_BIT)
+
+/*
+ * Physical address zone modifiers (see linux/mmzone.h - low four bits)
+ *
+ * Do not put any conditional on these. If necessary modify the definitions
+ * without the underscores and use them consistently. The definitions here may
+ * be used in bit comparisons.
+ */
+#define __GFP_DMA ((__force gfp_t)___GFP_DMA)
+#define __GFP_HIGHMEM ((__force gfp_t)___GFP_HIGHMEM)
+#define __GFP_DMA32 ((__force gfp_t)___GFP_DMA32)
+#define __GFP_MOVABLE ((__force gfp_t)___GFP_MOVABLE) /* ZONE_MOVABLE allowed */
+#define GFP_ZONEMASK (__GFP_DMA|__GFP_HIGHMEM|__GFP_DMA32|__GFP_MOVABLE)
+
+/**
+ * DOC: Page mobility and placement hints
+ *
+ * Page mobility and placement hints
+ * ---------------------------------
+ *
+ * These flags provide hints about how mobile the page is. Pages with similar
+ * mobility are placed within the same pageblocks to minimise problems due
+ * to external fragmentation.
+ *
+ * %__GFP_MOVABLE (also a zone modifier) indicates that the page can be
+ * moved by page migration during memory compaction or can be reclaimed.
+ *
+ * %__GFP_RECLAIMABLE is used for slab allocations that specify
+ * SLAB_RECLAIM_ACCOUNT and whose pages can be freed via shrinkers.
+ *
+ * %__GFP_WRITE indicates the caller intends to dirty the page. Where possible,
+ * these pages will be spread between local zones to avoid all the dirty
+ * pages being in one zone (fair zone allocation policy).
+ *
+ * %__GFP_HARDWALL enforces the cpuset memory allocation policy.
+ *
+ * %__GFP_THISNODE forces the allocation to be satisfied from the requested
+ * node with no fallbacks or placement policy enforcements.
+ *
+ * %__GFP_ACCOUNT causes the allocation to be accounted to kmemcg.
+ *
+ * %__GFP_NO_OBJ_EXT causes slab allocation to have no object extension.
+ */
+#define __GFP_RECLAIMABLE ((__force gfp_t)___GFP_RECLAIMABLE)
+#define __GFP_WRITE ((__force gfp_t)___GFP_WRITE)
+#define __GFP_HARDWALL ((__force gfp_t)___GFP_HARDWALL)
+#define __GFP_THISNODE ((__force gfp_t)___GFP_THISNODE)
+#define __GFP_ACCOUNT ((__force gfp_t)___GFP_ACCOUNT)
+#define __GFP_NO_OBJ_EXT ((__force gfp_t)___GFP_NO_OBJ_EXT)
+
+/**
+ * DOC: Watermark modifiers
+ *
+ * Watermark modifiers -- controls access to emergency reserves
+ * ------------------------------------------------------------
+ *
+ * %__GFP_HIGH indicates that the caller is high-priority and that granting
+ * the request is necessary before the system can make forward progress.
+ * For example creating an IO context to clean pages and requests
+ * from atomic context.
+ *
+ * %__GFP_MEMALLOC allows access to all memory. This should only be used when
+ * the caller guarantees the allocation will allow more memory to be freed
+ * very shortly e.g. process exiting or swapping. Users either should
+ * be the MM or co-ordinating closely with the VM (e.g. swap over NFS).
+ * Users of this flag have to be extremely careful to not deplete the reserve
+ * completely and implement a throttling mechanism which controls the
+ * consumption of the reserve based on the amount of freed memory.
+ * Usage of a pre-allocated pool (e.g. mempool) should be always considered
+ * before using this flag.
+ *
+ * %__GFP_NOMEMALLOC is used to explicitly forbid access to emergency reserves.
+ * This takes precedence over the %__GFP_MEMALLOC flag if both are set.
+ */
+#define __GFP_HIGH ((__force gfp_t)___GFP_HIGH)
+#define __GFP_MEMALLOC ((__force gfp_t)___GFP_MEMALLOC)
+#define __GFP_NOMEMALLOC ((__force gfp_t)___GFP_NOMEMALLOC)
+
+/**
+ * DOC: Reclaim modifiers
+ *
+ * Reclaim modifiers
+ * -----------------
+ * Please note that all the following flags are only applicable to sleepable
+ * allocations (e.g. %GFP_NOWAIT and %GFP_ATOMIC will ignore them).
+ *
+ * %__GFP_IO can start physical IO.
+ *
+ * %__GFP_FS can call down to the low-level FS. Clearing the flag avoids the
+ * allocator recursing into the filesystem which might already be holding
+ * locks.
+ *
+ * %__GFP_DIRECT_RECLAIM indicates that the caller may enter direct reclaim.
+ * This flag can be cleared to avoid unnecessary delays when a fallback
+ * option is available.
+ *
+ * %__GFP_KSWAPD_RECLAIM indicates that the caller wants to wake kswapd when
+ * the low watermark is reached and have it reclaim pages until the high
+ * watermark is reached. A caller may wish to clear this flag when fallback
+ * options are available and the reclaim is likely to disrupt the system. The
+ * canonical example is THP allocation where a fallback is cheap but
+ * reclaim/compaction may cause indirect stalls.
+ *
+ * %__GFP_RECLAIM is shorthand to allow/forbid both direct and kswapd reclaim.
+ *
+ * The default allocator behavior depends on the request size. We have a concept
+ * of so-called costly allocations (with order > %PAGE_ALLOC_COSTLY_ORDER).
+ * !costly allocations are too essential to fail so they are implicitly
+ * non-failing by default (with some exceptions like OOM victims might fail so
+ * the caller still has to check for failures) while costly requests try to be
+ * not disruptive and back off even without invoking the OOM killer.
+ * The following three modifiers might be used to override some of these
+ * implicit rules. Please note that all of them must be used along with
+ * %__GFP_DIRECT_RECLAIM flag.
+ *
+ * %__GFP_NORETRY: The VM implementation will try only very lightweight
+ * memory direct reclaim to get some memory under memory pressure (thus
+ * it can sleep). It will avoid disruptive actions like OOM killer. The
+ * caller must handle the failure which is quite likely to happen under
+ * heavy memory pressure. The flag is suitable when failure can easily be
+ * handled at small cost, such as reduced throughput.
+ *
+ * %__GFP_RETRY_MAYFAIL: The VM implementation will retry memory reclaim
+ * procedures that have previously failed if there is some indication
+ * that progress has been made elsewhere. It can wait for other
+ * tasks to attempt high-level approaches to freeing memory such as
+ * compaction (which removes fragmentation) and page-out.
+ * There is still a definite limit to the number of retries, but it is
+ * a larger limit than with %__GFP_NORETRY.
+ * Allocations with this flag may fail, but only when there is
+ * genuinely little unused memory. While these allocations do not
+ * directly trigger the OOM killer, their failure indicates that
+ * the system is likely to need to use the OOM killer soon. The
+ * caller must handle failure, but can reasonably do so by failing
+ * a higher-level request, or completing it only in a much less
+ * efficient manner.
+ * If the allocation does fail, and the caller is in a position to
+ * free some non-essential memory, doing so could benefit the system
+ * as a whole.
+ *
+ * %__GFP_NOFAIL: The VM implementation _must_ retry infinitely: the caller
+ * cannot handle allocation failures. The allocation could block
+ * indefinitely but will never return with failure. Testing for
+ * failure is pointless.
+ * It _must_ be blockable and used together with __GFP_DIRECT_RECLAIM.
+ * It should _never_ be used in non-sleepable contexts.
+ * New users should be evaluated carefully (and the flag should be
+ * used only when there is no reasonable failure policy) but it is
+ * definitely preferable to use the flag rather than opencode endless
+ * loop around allocator.
+ * Allocating pages from the buddy with __GFP_NOFAIL and order > 1 is
+ * not supported. Please consider using kvmalloc() instead.
+ */
+#define __GFP_IO ((__force gfp_t)___GFP_IO)
+#define __GFP_FS ((__force gfp_t)___GFP_FS)
+#define __GFP_DIRECT_RECLAIM ((__force gfp_t)___GFP_DIRECT_RECLAIM) /* Caller can reclaim */
+#define __GFP_KSWAPD_RECLAIM ((__force gfp_t)___GFP_KSWAPD_RECLAIM) /* kswapd can wake */
+#define __GFP_RECLAIM ((__force gfp_t)(___GFP_DIRECT_RECLAIM|___GFP_KSWAPD_RECLAIM))
+#define __GFP_RETRY_MAYFAIL ((__force gfp_t)___GFP_RETRY_MAYFAIL)
+#define __GFP_NOFAIL ((__force gfp_t)___GFP_NOFAIL)
+#define __GFP_NORETRY ((__force gfp_t)___GFP_NORETRY)
+
+/**
+ * DOC: Action modifiers
+ *
+ * Action modifiers
+ * ----------------
+ *
+ * %__GFP_NOWARN suppresses allocation failure reports.
+ *
+ * %__GFP_COMP address compound page metadata.
+ *
+ * %__GFP_ZERO returns a zeroed page on success.
+ *
+ * %__GFP_ZEROTAGS zeroes memory tags at allocation time if the memory itself
+ * is being zeroed (either via __GFP_ZERO or via init_on_alloc, provided that
+ * __GFP_SKIP_ZERO is not set). This flag is intended for optimization: setting
+ * memory tags at the same time as zeroing memory has minimal additional
+ * performance impact.
+ *
+ * %__GFP_SKIP_KASAN makes KASAN skip unpoisoning on page allocation.
+ * Used for userspace and vmalloc pages; the latter are unpoisoned by
+ * kasan_unpoison_vmalloc instead. For userspace pages, results in
+ * poisoning being skipped as well, see should_skip_kasan_poison for
+ * details. Only effective in HW_TAGS mode.
+ */
+#define __GFP_NOWARN ((__force gfp_t)___GFP_NOWARN)
+#define __GFP_COMP ((__force gfp_t)___GFP_COMP)
+#define __GFP_ZERO ((__force gfp_t)___GFP_ZERO)
+#define __GFP_ZEROTAGS ((__force gfp_t)___GFP_ZEROTAGS)
+#define __GFP_SKIP_ZERO ((__force gfp_t)___GFP_SKIP_ZERO)
+#define __GFP_SKIP_KASAN ((__force gfp_t)___GFP_SKIP_KASAN)
+
+/* Disable lockdep for GFP context tracking */
+#define __GFP_NOLOCKDEP ((__force gfp_t)___GFP_NOLOCKDEP)
+
+/* Room for N __GFP_FOO bits */
+#define __GFP_BITS_SHIFT ___GFP_LAST_BIT
+#define __GFP_BITS_MASK ((__force gfp_t)((1 << __GFP_BITS_SHIFT) - 1))
+
+/**
+ * DOC: Useful GFP flag combinations
+ *
+ * Useful GFP flag combinations
+ * ----------------------------
+ *
+ * Useful GFP flag combinations that are commonly used. It is recommended
+ * that subsystems start with one of these combinations and then set/clear
+ * %__GFP_FOO flags as necessary.
+ *
+ * %GFP_ATOMIC users can not sleep and need the allocation to succeed. A lower
+ * watermark is applied to allow access to "atomic reserves".
+ * The current implementation doesn't support NMI and few other strict
+ * non-preemptive contexts (e.g. raw_spin_lock). The same applies to %GFP_NOWAIT.
+ *
+ * %GFP_KERNEL is typical for kernel-internal allocations. The caller requires
+ * %ZONE_NORMAL or a lower zone for direct access but can direct reclaim.
+ *
+ * %GFP_KERNEL_ACCOUNT is the same as GFP_KERNEL, except the allocation is
+ * accounted to kmemcg.
+ *
+ * %GFP_NOWAIT is for kernel allocations that should not stall for direct
+ * reclaim, start physical IO or use any filesystem callback. It is very
+ * likely to fail to allocate memory, even for very small allocations.
+ *
+ * %GFP_NOIO will use direct reclaim to discard clean pages or slab pages
+ * that do not require the starting of any physical IO.
+ * Please try to avoid using this flag directly and instead use
+ * memalloc_noio_{save,restore} to mark the whole scope which cannot
+ * perform any IO with a short explanation why. All allocation requests
+ * will inherit GFP_NOIO implicitly.
+ *
+ * %GFP_NOFS will use direct reclaim but will not use any filesystem interfaces.
+ * Please try to avoid using this flag directly and instead use
+ * memalloc_nofs_{save,restore} to mark the whole scope which cannot/shouldn't
+ * recurse into the FS layer with a short explanation why. All allocation
+ * requests will inherit GFP_NOFS implicitly.
+ *
+ * %GFP_USER is for userspace allocations that also need to be directly
+ * accessibly by the kernel or hardware. It is typically used by hardware
+ * for buffers that are mapped to userspace (e.g. graphics) that hardware
+ * still must DMA to. cpuset limits are enforced for these allocations.
+ *
+ * %GFP_DMA exists for historical reasons and should be avoided where possible.
+ * The flags indicates that the caller requires that the lowest zone be
+ * used (%ZONE_DMA or 16M on x86-64). Ideally, this would be removed but
+ * it would require careful auditing as some users really require it and
+ * others use the flag to avoid lowmem reserves in %ZONE_DMA and treat the
+ * lowest zone as a type of emergency reserve.
+ *
+ * %GFP_DMA32 is similar to %GFP_DMA except that the caller requires a 32-bit
+ * address. Note that kmalloc(..., GFP_DMA32) does not return DMA32 memory
+ * because the DMA32 kmalloc cache array is not implemented.
+ * (Reason: there is no such user in kernel).
+ *
+ * %GFP_HIGHUSER is for userspace allocations that may be mapped to userspace,
+ * do not need to be directly accessible by the kernel but that cannot
+ * move once in use. An example may be a hardware allocation that maps
+ * data directly into userspace but has no addressing limitations.
+ *
+ * %GFP_HIGHUSER_MOVABLE is for userspace allocations that the kernel does not
+ * need direct access to but can use kmap() when access is required. They
+ * are expected to be movable via page reclaim or page migration. Typically,
+ * pages on the LRU would also be allocated with %GFP_HIGHUSER_MOVABLE.
+ *
+ * %GFP_TRANSHUGE and %GFP_TRANSHUGE_LIGHT are used for THP allocations. They
+ * are compound allocations that will generally fail quickly if memory is not
+ * available and will not wake kswapd/kcompactd on failure. The _LIGHT
+ * version does not attempt reclaim/compaction at all and is by default used
+ * in page fault path, while the non-light is used by khugepaged.
+ */
+#define GFP_ATOMIC (__GFP_HIGH|__GFP_KSWAPD_RECLAIM)
+#define GFP_KERNEL (__GFP_RECLAIM | __GFP_IO | __GFP_FS)
+#define GFP_KERNEL_ACCOUNT (GFP_KERNEL | __GFP_ACCOUNT)
+#define GFP_NOWAIT (__GFP_KSWAPD_RECLAIM | __GFP_NOWARN)
+#define GFP_NOIO (__GFP_RECLAIM)
+#define GFP_NOFS (__GFP_RECLAIM | __GFP_IO)
+#define GFP_USER (__GFP_RECLAIM | __GFP_IO | __GFP_FS | __GFP_HARDWALL)
+#define GFP_DMA __GFP_DMA
+#define GFP_DMA32 __GFP_DMA32
+#define GFP_HIGHUSER (GFP_USER | __GFP_HIGHMEM)
+#define GFP_HIGHUSER_MOVABLE (GFP_HIGHUSER | __GFP_MOVABLE | __GFP_SKIP_KASAN)
+#define GFP_TRANSHUGE_LIGHT ((GFP_HIGHUSER_MOVABLE | __GFP_COMP | \
+ __GFP_NOMEMALLOC | __GFP_NOWARN) & ~__GFP_RECLAIM)
+#define GFP_TRANSHUGE (GFP_TRANSHUGE_LIGHT | __GFP_DIRECT_RECLAIM)
+
+#endif /* __LINUX_GFP_TYPES_H */
diff --git a/include/linux/glob.h b/include/linux/glob.h
index 861d8347d08e..861327b33e41 100644
--- a/include/linux/glob.h
+++ b/include/linux/glob.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_GLOB_H
#define _LINUX_GLOB_H
diff --git a/include/linux/gnss.h b/include/linux/gnss.h
new file mode 100644
index 000000000000..36968a0f33e8
--- /dev/null
+++ b/include/linux/gnss.h
@@ -0,0 +1,76 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * GNSS receiver support
+ *
+ * Copyright (C) 2018 Johan Hovold <johan@kernel.org>
+ */
+
+#ifndef _LINUX_GNSS_H
+#define _LINUX_GNSS_H
+
+#include <linux/cdev.h>
+#include <linux/device.h>
+#include <linux/kfifo.h>
+#include <linux/mutex.h>
+#include <linux/rwsem.h>
+#include <linux/types.h>
+#include <linux/wait.h>
+
+struct gnss_device;
+
+enum gnss_type {
+ GNSS_TYPE_NMEA = 0,
+ GNSS_TYPE_SIRF,
+ GNSS_TYPE_UBX,
+ GNSS_TYPE_MTK,
+
+ GNSS_TYPE_COUNT
+};
+
+struct gnss_operations {
+ int (*open)(struct gnss_device *gdev);
+ void (*close)(struct gnss_device *gdev);
+ int (*write_raw)(struct gnss_device *gdev, const unsigned char *buf,
+ size_t count);
+};
+
+struct gnss_device {
+ struct device dev;
+ struct cdev cdev;
+ int id;
+
+ enum gnss_type type;
+ unsigned long flags;
+
+ struct rw_semaphore rwsem;
+ const struct gnss_operations *ops;
+ unsigned int count;
+ unsigned int disconnected:1;
+
+ struct mutex read_mutex;
+ struct kfifo read_fifo;
+ wait_queue_head_t read_queue;
+
+ struct mutex write_mutex;
+ char *write_buf;
+};
+
+struct gnss_device *gnss_allocate_device(struct device *parent);
+void gnss_put_device(struct gnss_device *gdev);
+int gnss_register_device(struct gnss_device *gdev);
+void gnss_deregister_device(struct gnss_device *gdev);
+
+int gnss_insert_raw(struct gnss_device *gdev, const unsigned char *buf,
+ size_t count);
+
+static inline void gnss_set_drvdata(struct gnss_device *gdev, void *data)
+{
+ dev_set_drvdata(&gdev->dev, data);
+}
+
+static inline void *gnss_get_drvdata(struct gnss_device *gdev)
+{
+ return dev_get_drvdata(&gdev->dev);
+}
+
+#endif /* _LINUX_GNSS_H */
diff --git a/include/linux/goldfish.h b/include/linux/goldfish.h
index 93e080b39cf6..bcc17f95b906 100644
--- a/include/linux/goldfish.h
+++ b/include/linux/goldfish.h
@@ -1,14 +1,28 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __LINUX_GOLDFISH_H
#define __LINUX_GOLDFISH_H
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/io.h>
+
/* Helpers for Goldfish virtual platform */
+#ifndef gf_ioread32
+#define gf_ioread32 ioread32
+#endif
+#ifndef gf_iowrite32
+#define gf_iowrite32 iowrite32
+#endif
+
static inline void gf_write_ptr(const void *ptr, void __iomem *portl,
void __iomem *porth)
{
- writel((u32)(unsigned long)ptr, portl);
+ const unsigned long addr = (unsigned long)ptr;
+
+ gf_iowrite32(lower_32_bits(addr), portl);
#ifdef CONFIG_64BIT
- writel((unsigned long)ptr >> 32, porth);
+ gf_iowrite32(upper_32_bits(addr), porth);
#endif
}
@@ -16,9 +30,9 @@ static inline void gf_write_dma_addr(const dma_addr_t addr,
void __iomem *portl,
void __iomem *porth)
{
- writel((u32)addr, portl);
+ gf_iowrite32(lower_32_bits(addr), portl);
#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
- writel(addr >> 32, porth);
+ gf_iowrite32(upper_32_bits(addr), porth);
#endif
}
diff --git a/include/linux/gpio-fan.h b/include/linux/gpio-fan.h
deleted file mode 100644
index 096659169215..000000000000
--- a/include/linux/gpio-fan.h
+++ /dev/null
@@ -1,36 +0,0 @@
-/*
- * include/linux/gpio-fan.h
- *
- * Platform data structure for GPIO fan driver
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
- */
-
-#ifndef __LINUX_GPIO_FAN_H
-#define __LINUX_GPIO_FAN_H
-
-struct gpio_fan_alarm {
- unsigned gpio;
- unsigned active_low;
-};
-
-struct gpio_fan_speed {
- int rpm;
- int ctrl_val;
-};
-
-struct gpio_fan_platform_data {
- int num_ctrl;
- unsigned *ctrl; /* fan control GPIOs. */
- struct gpio_fan_alarm *alarm; /* fan alarm GPIO. */
- /*
- * Speed conversion array: rpm from/to GPIO bit field.
- * This array _must_ be sorted in ascending rpm order.
- */
- int num_speed;
- struct gpio_fan_speed *speed;
-};
-
-#endif /* __LINUX_GPIO_FAN_H */
diff --git a/include/linux/gpio-pxa.h b/include/linux/gpio-pxa.h
index d90ebbe02ca4..1e1fa0160480 100644
--- a/include/linux/gpio-pxa.h
+++ b/include/linux/gpio-pxa.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __GPIO_PXA_H
#define __GPIO_PXA_H
diff --git a/include/linux/gpio.h b/include/linux/gpio.h
index d12b5d566e4b..8f85ddb26429 100644
--- a/include/linux/gpio.h
+++ b/include/linux/gpio.h
@@ -1,100 +1,101 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * NOTE: This header *must not* be included.
+ *
+ * This is the LEGACY GPIO bulk include file, including legacy APIs. It is
+ * used for GPIO drivers still referencing the global GPIO numberspace,
+ * and should not be included in new code.
+ *
+ * If you're implementing a GPIO driver, only include <linux/gpio/driver.h>
+ * If you're implementing a GPIO consumer, only include <linux/gpio/consumer.h>
+ */
#ifndef __LINUX_GPIO_H
#define __LINUX_GPIO_H
-#include <linux/errno.h>
-
-/* see Documentation/gpio/gpio-legacy.txt */
-
-/* make these flag values available regardless of GPIO kconfig options */
-#define GPIOF_DIR_OUT (0 << 0)
-#define GPIOF_DIR_IN (1 << 0)
-
-#define GPIOF_INIT_LOW (0 << 1)
-#define GPIOF_INIT_HIGH (1 << 1)
-
-#define GPIOF_IN (GPIOF_DIR_IN)
-#define GPIOF_OUT_INIT_LOW (GPIOF_DIR_OUT | GPIOF_INIT_LOW)
-#define GPIOF_OUT_INIT_HIGH (GPIOF_DIR_OUT | GPIOF_INIT_HIGH)
-
-/* Gpio pin is active-low */
-#define GPIOF_ACTIVE_LOW (1 << 2)
-
-/* Gpio pin is open drain */
-#define GPIOF_OPEN_DRAIN (1 << 3)
+#include <linux/types.h>
+#ifdef CONFIG_GPIOLIB
+#include <linux/gpio/consumer.h>
+#endif
-/* Gpio pin is open source */
-#define GPIOF_OPEN_SOURCE (1 << 4)
+#ifdef CONFIG_GPIOLIB_LEGACY
-#define GPIOF_EXPORT (1 << 5)
-#define GPIOF_EXPORT_CHANGEABLE (1 << 6)
-#define GPIOF_EXPORT_DIR_FIXED (GPIOF_EXPORT)
-#define GPIOF_EXPORT_DIR_CHANGEABLE (GPIOF_EXPORT | GPIOF_EXPORT_CHANGEABLE)
+struct device;
-/**
- * struct gpio - a structure describing a GPIO with configuration
- * @gpio: the GPIO number
- * @flags: GPIO configuration as specified by GPIOF_*
- * @label: a literal description string of this GPIO
- */
-struct gpio {
- unsigned gpio;
- unsigned long flags;
- const char *label;
-};
+/* make these flag values available regardless of GPIO kconfig options */
+#define GPIOF_IN ((1 << 0))
+#define GPIOF_OUT_INIT_LOW ((0 << 0) | (0 << 1))
+#define GPIOF_OUT_INIT_HIGH ((0 << 0) | (1 << 1))
#ifdef CONFIG_GPIOLIB
+/*
+ * "valid" GPIO numbers are nonnegative and may be passed to
+ * setup routines like gpio_request(). Only some valid numbers
+ * can successfully be requested and used.
+ *
+ * Invalid GPIO numbers are useful for indicating no-such-GPIO in
+ * platform data and other tables.
+ */
+static inline bool gpio_is_valid(int number)
+{
+ /* only non-negative numbers are valid */
+ return number >= 0;
+}
-#ifdef CONFIG_ARCH_HAVE_CUSTOM_GPIO_H
-#include <asm/gpio.h>
-#else
+/*
+ * Platforms may implement their GPIO interface with library code,
+ * at a small performance cost for non-inlined operations and some
+ * extra memory (for code and for per-GPIO table entries).
+ */
-#include <asm-generic/gpio.h>
+/* Always use the library code for GPIO management calls,
+ * or when sleeping may be involved.
+ */
+int gpio_request(unsigned gpio, const char *label);
+void gpio_free(unsigned gpio);
-static inline int gpio_get_value(unsigned int gpio)
+static inline int gpio_direction_input(unsigned gpio)
{
- return __gpio_get_value(gpio);
+ return gpiod_direction_input(gpio_to_desc(gpio));
}
-
-static inline void gpio_set_value(unsigned int gpio, int value)
+static inline int gpio_direction_output(unsigned gpio, int value)
{
- __gpio_set_value(gpio, value);
+ return gpiod_direction_output_raw(gpio_to_desc(gpio), value);
}
-static inline int gpio_cansleep(unsigned int gpio)
+static inline int gpio_get_value_cansleep(unsigned gpio)
{
- return __gpio_cansleep(gpio);
+ return gpiod_get_raw_value_cansleep(gpio_to_desc(gpio));
}
-
-static inline int gpio_to_irq(unsigned int gpio)
+static inline void gpio_set_value_cansleep(unsigned gpio, int value)
{
- return __gpio_to_irq(gpio);
+ gpiod_set_raw_value_cansleep(gpio_to_desc(gpio), value);
}
-static inline int irq_to_gpio(unsigned int irq)
+static inline int gpio_get_value(unsigned gpio)
{
- return -EINVAL;
+ return gpiod_get_raw_value(gpio_to_desc(gpio));
+}
+static inline void gpio_set_value(unsigned gpio, int value)
+{
+ gpiod_set_raw_value(gpio_to_desc(gpio), value);
}
-#endif /* ! CONFIG_ARCH_HAVE_CUSTOM_GPIO_H */
-
-/* CONFIG_GPIOLIB: bindings for managed devices that want to request gpios */
+static inline int gpio_to_irq(unsigned gpio)
+{
+ return gpiod_to_irq(gpio_to_desc(gpio));
+}
-struct device;
+int gpio_request_one(unsigned gpio, unsigned long flags, const char *label);
-int devm_gpio_request(struct device *dev, unsigned gpio, const char *label);
int devm_gpio_request_one(struct device *dev, unsigned gpio,
unsigned long flags, const char *label);
-void devm_gpio_free(struct device *dev, unsigned int gpio);
#else /* ! CONFIG_GPIOLIB */
#include <linux/kernel.h>
-#include <linux/types.h>
-#include <linux/bug.h>
-#include <linux/pinctrl/pinctrl.h>
-struct device;
-struct gpio_chip;
+#include <asm/bug.h>
+#include <asm/errno.h>
static inline bool gpio_is_valid(int number)
{
@@ -112,11 +113,6 @@ static inline int gpio_request_one(unsigned gpio,
return -ENOSYS;
}
-static inline int gpio_request_array(const struct gpio *array, size_t num)
-{
- return -ENOSYS;
-}
-
static inline void gpio_free(unsigned gpio)
{
might_sleep();
@@ -125,14 +121,6 @@ static inline void gpio_free(unsigned gpio)
WARN_ON(1);
}
-static inline void gpio_free_array(const struct gpio *array, size_t num)
-{
- might_sleep();
-
- /* GPIO can never have been requested */
- WARN_ON(1);
-}
-
static inline int gpio_direction_input(unsigned gpio)
{
return -ENOSYS;
@@ -143,11 +131,6 @@ static inline int gpio_direction_output(unsigned gpio, int value)
return -ENOSYS;
}
-static inline int gpio_set_debounce(unsigned gpio, unsigned debounce)
-{
- return -ENOSYS;
-}
-
static inline int gpio_get_value(unsigned gpio)
{
/* GPIO can never have been requested or set as {in,out}put */
@@ -161,13 +144,6 @@ static inline void gpio_set_value(unsigned gpio, int value)
WARN_ON(1);
}
-static inline int gpio_cansleep(unsigned gpio)
-{
- /* GPIO can never have been requested or set as {in,out}put */
- WARN_ON(1);
- return 0;
-}
-
static inline int gpio_get_value_cansleep(unsigned gpio)
{
/* GPIO can never have been requested or set as {in,out}put */
@@ -181,27 +157,6 @@ static inline void gpio_set_value_cansleep(unsigned gpio, int value)
WARN_ON(1);
}
-static inline int gpio_export(unsigned gpio, bool direction_may_change)
-{
- /* GPIO can never have been requested or set as {in,out}put */
- WARN_ON(1);
- return -EINVAL;
-}
-
-static inline int gpio_export_link(struct device *dev, const char *name,
- unsigned gpio)
-{
- /* GPIO can never have been exported */
- WARN_ON(1);
- return -EINVAL;
-}
-
-static inline void gpio_unexport(unsigned gpio)
-{
- /* GPIO can never have been exported */
- WARN_ON(1);
-}
-
static inline int gpio_to_irq(unsigned gpio)
{
/* GPIO can never have been requested or set as input */
@@ -209,57 +164,6 @@ static inline int gpio_to_irq(unsigned gpio)
return -EINVAL;
}
-static inline int gpiochip_lock_as_irq(struct gpio_chip *chip,
- unsigned int offset)
-{
- WARN_ON(1);
- return -EINVAL;
-}
-
-static inline void gpiochip_unlock_as_irq(struct gpio_chip *chip,
- unsigned int offset)
-{
- WARN_ON(1);
-}
-
-static inline int irq_to_gpio(unsigned irq)
-{
- /* irq can never have been returned from gpio_to_irq() */
- WARN_ON(1);
- return -EINVAL;
-}
-
-static inline int
-gpiochip_add_pin_range(struct gpio_chip *chip, const char *pinctl_name,
- unsigned int gpio_offset, unsigned int pin_offset,
- unsigned int npins)
-{
- WARN_ON(1);
- return -EINVAL;
-}
-
-static inline int
-gpiochip_add_pingroup_range(struct gpio_chip *chip,
- struct pinctrl_dev *pctldev,
- unsigned int gpio_offset, const char *pin_group)
-{
- WARN_ON(1);
- return -EINVAL;
-}
-
-static inline void
-gpiochip_remove_pin_ranges(struct gpio_chip *chip)
-{
- WARN_ON(1);
-}
-
-static inline int devm_gpio_request(struct device *dev, unsigned gpio,
- const char *label)
-{
- WARN_ON(1);
- return -EINVAL;
-}
-
static inline int devm_gpio_request_one(struct device *dev, unsigned gpio,
unsigned long flags, const char *label)
{
@@ -267,11 +171,6 @@ static inline int devm_gpio_request_one(struct device *dev, unsigned gpio,
return -EINVAL;
}
-static inline void devm_gpio_free(struct device *dev, unsigned int gpio)
-{
- WARN_ON(1);
-}
-
#endif /* ! CONFIG_GPIOLIB */
-
+#endif /* CONFIG_GPIOLIB_LEGACY */
#endif /* __LINUX_GPIO_H */
diff --git a/include/linux/gpio/aspeed.h b/include/linux/gpio/aspeed.h
new file mode 100644
index 000000000000..9a547e66c8c4
--- /dev/null
+++ b/include/linux/gpio/aspeed.h
@@ -0,0 +1,19 @@
+#ifndef __GPIO_ASPEED_H
+#define __GPIO_ASPEED_H
+
+#include <linux/types.h>
+
+struct gpio_desc;
+
+struct aspeed_gpio_copro_ops {
+ int (*request_access)(void *data);
+ int (*release_access)(void *data);
+};
+
+int aspeed_gpio_copro_grab_gpio(struct gpio_desc *desc,
+ u16 *vreg_offset, u16 *dreg_offset, u8 *bit);
+int aspeed_gpio_copro_release_gpio(struct gpio_desc *desc);
+int aspeed_gpio_copro_set_ops(const struct aspeed_gpio_copro_ops *ops, void *data);
+
+
+#endif /* __GPIO_ASPEED_H */
diff --git a/include/linux/gpio/consumer.h b/include/linux/gpio/consumer.h
index 8f702fcbe485..cafeb7a40ad1 100644
--- a/include/linux/gpio/consumer.h
+++ b/include/linux/gpio/consumer.h
@@ -1,26 +1,28 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __LINUX_GPIO_CONSUMER_H
#define __LINUX_GPIO_CONSUMER_H
-#include <linux/bug.h>
+#include <linux/bits.h>
#include <linux/err.h>
-#include <linux/kernel.h>
+#include <linux/types.h>
+struct acpi_device;
struct device;
+struct fwnode_handle;
-/**
- * Opaque descriptor for a GPIO. These are obtained using gpiod_get() and are
- * preferable to the old integer-based handles.
- *
- * Contrary to integers, a pointer to a gpio_desc is guaranteed to be valid
- * until the GPIO is released.
- */
+struct gpio_array;
struct gpio_desc;
/**
- * Struct containing an array of descriptors that can be obtained using
- * gpiod_get_array().
+ * struct gpio_descs - Struct containing an array of descriptors that can be
+ * obtained using gpiod_get_array()
+ *
+ * @info: Pointer to the opaque gpio_array structure
+ * @ndescs: Number of held descriptors
+ * @desc: Array of pointers to GPIO descriptors
*/
struct gpio_descs {
+ struct gpio_array *info;
unsigned int ndescs;
struct gpio_desc *desc[];
};
@@ -28,10 +30,21 @@ struct gpio_descs {
#define GPIOD_FLAGS_BIT_DIR_SET BIT(0)
#define GPIOD_FLAGS_BIT_DIR_OUT BIT(1)
#define GPIOD_FLAGS_BIT_DIR_VAL BIT(2)
+#define GPIOD_FLAGS_BIT_OPEN_DRAIN BIT(3)
+/* GPIOD_FLAGS_BIT_NONEXCLUSIVE is DEPRECATED, don't use in new code. */
+#define GPIOD_FLAGS_BIT_NONEXCLUSIVE BIT(4)
/**
- * Optional flags that can be passed to one of gpiod_* to configure direction
- * and output value. These values cannot be OR'd.
+ * enum gpiod_flags - Optional flags that can be passed to one of gpiod_* to
+ * configure direction and output value. These values
+ * cannot be OR'd.
+ *
+ * @GPIOD_ASIS: Don't change anything
+ * @GPIOD_IN: Set lines to input mode
+ * @GPIOD_OUT_LOW: Set lines to output and drive them low
+ * @GPIOD_OUT_HIGH: Set lines to output and drive them high
+ * @GPIOD_OUT_LOW_OPEN_DRAIN: Set lines to open-drain output and drive them low
+ * @GPIOD_OUT_HIGH_OPEN_DRAIN: Set lines to open-drain output and drive them high
*/
enum gpiod_flags {
GPIOD_ASIS = 0,
@@ -39,6 +52,8 @@ enum gpiod_flags {
GPIOD_OUT_LOW = GPIOD_FLAGS_BIT_DIR_SET | GPIOD_FLAGS_BIT_DIR_OUT,
GPIOD_OUT_HIGH = GPIOD_FLAGS_BIT_DIR_SET | GPIOD_FLAGS_BIT_DIR_OUT |
GPIOD_FLAGS_BIT_DIR_VAL,
+ GPIOD_OUT_LOW_OPEN_DRAIN = GPIOD_OUT_LOW | GPIOD_FLAGS_BIT_OPEN_DRAIN,
+ GPIOD_OUT_HIGH_OPEN_DRAIN = GPIOD_OUT_HIGH | GPIOD_FLAGS_BIT_OPEN_DRAIN,
};
#ifdef CONFIG_GPIOLIB
@@ -90,6 +105,7 @@ struct gpio_descs *__must_check
devm_gpiod_get_array_optional(struct device *dev, const char *con_id,
enum gpiod_flags flags);
void devm_gpiod_put(struct device *dev, struct gpio_desc *desc);
+void devm_gpiod_unhinge(struct device *dev, struct gpio_desc *desc);
void devm_gpiod_put_array(struct device *dev, struct gpio_descs *descs);
int gpiod_get_direction(struct gpio_desc *desc);
@@ -99,53 +115,84 @@ int gpiod_direction_output_raw(struct gpio_desc *desc, int value);
/* Value get/set from non-sleeping context */
int gpiod_get_value(const struct gpio_desc *desc);
-void gpiod_set_value(struct gpio_desc *desc, int value);
-void gpiod_set_array_value(unsigned int array_size,
- struct gpio_desc **desc_array, int *value_array);
+int gpiod_get_array_value(unsigned int array_size,
+ struct gpio_desc **desc_array,
+ struct gpio_array *array_info,
+ unsigned long *value_bitmap);
+int gpiod_set_value(struct gpio_desc *desc, int value);
+int gpiod_set_array_value(unsigned int array_size,
+ struct gpio_desc **desc_array,
+ struct gpio_array *array_info,
+ unsigned long *value_bitmap);
int gpiod_get_raw_value(const struct gpio_desc *desc);
-void gpiod_set_raw_value(struct gpio_desc *desc, int value);
-void gpiod_set_raw_array_value(unsigned int array_size,
- struct gpio_desc **desc_array,
- int *value_array);
+int gpiod_get_raw_array_value(unsigned int array_size,
+ struct gpio_desc **desc_array,
+ struct gpio_array *array_info,
+ unsigned long *value_bitmap);
+int gpiod_set_raw_value(struct gpio_desc *desc, int value);
+int gpiod_set_raw_array_value(unsigned int array_size,
+ struct gpio_desc **desc_array,
+ struct gpio_array *array_info,
+ unsigned long *value_bitmap);
/* Value get/set from sleeping context */
int gpiod_get_value_cansleep(const struct gpio_desc *desc);
-void gpiod_set_value_cansleep(struct gpio_desc *desc, int value);
-void gpiod_set_array_value_cansleep(unsigned int array_size,
- struct gpio_desc **desc_array,
- int *value_array);
+int gpiod_get_array_value_cansleep(unsigned int array_size,
+ struct gpio_desc **desc_array,
+ struct gpio_array *array_info,
+ unsigned long *value_bitmap);
+int gpiod_set_value_cansleep(struct gpio_desc *desc, int value);
+int gpiod_set_array_value_cansleep(unsigned int array_size,
+ struct gpio_desc **desc_array,
+ struct gpio_array *array_info,
+ unsigned long *value_bitmap);
int gpiod_get_raw_value_cansleep(const struct gpio_desc *desc);
-void gpiod_set_raw_value_cansleep(struct gpio_desc *desc, int value);
-void gpiod_set_raw_array_value_cansleep(unsigned int array_size,
- struct gpio_desc **desc_array,
- int *value_array);
-
-int gpiod_set_debounce(struct gpio_desc *desc, unsigned debounce);
+int gpiod_get_raw_array_value_cansleep(unsigned int array_size,
+ struct gpio_desc **desc_array,
+ struct gpio_array *array_info,
+ unsigned long *value_bitmap);
+int gpiod_set_raw_value_cansleep(struct gpio_desc *desc, int value);
+int gpiod_set_raw_array_value_cansleep(unsigned int array_size,
+ struct gpio_desc **desc_array,
+ struct gpio_array *array_info,
+ unsigned long *value_bitmap);
+
+int gpiod_set_config(struct gpio_desc *desc, unsigned long config);
+int gpiod_set_debounce(struct gpio_desc *desc, unsigned int debounce);
+void gpiod_toggle_active_low(struct gpio_desc *desc);
int gpiod_is_active_low(const struct gpio_desc *desc);
int gpiod_cansleep(const struct gpio_desc *desc);
int gpiod_to_irq(const struct gpio_desc *desc);
+int gpiod_set_consumer_name(struct gpio_desc *desc, const char *name);
+
+bool gpiod_is_shared(const struct gpio_desc *desc);
/* Convert between the old gpio_ and new gpiod_ interfaces */
struct gpio_desc *gpio_to_desc(unsigned gpio);
int desc_to_gpio(const struct gpio_desc *desc);
-/* Child properties interface */
-struct fwnode_handle;
+int gpiod_hwgpio(const struct gpio_desc *desc);
-struct gpio_desc *fwnode_get_named_gpiod(struct fwnode_handle *fwnode,
- const char *propname, int index,
- enum gpiod_flags dflags,
+struct gpio_desc *fwnode_gpiod_get_index(struct fwnode_handle *fwnode,
+ const char *con_id, int index,
+ enum gpiod_flags flags,
const char *label);
-struct gpio_desc *devm_fwnode_get_index_gpiod_from_child(struct device *dev,
- const char *con_id, int index,
- struct fwnode_handle *child,
- enum gpiod_flags flags,
- const char *label);
+struct gpio_desc *devm_fwnode_gpiod_get_index(struct device *dev,
+ struct fwnode_handle *child,
+ const char *con_id, int index,
+ enum gpiod_flags flags,
+ const char *label);
+
+bool gpiod_is_equal(const struct gpio_desc *desc,
+ const struct gpio_desc *other);
#else /* CONFIG_GPIOLIB */
+#include <linux/bug.h>
+#include <linux/kernel.h>
+
static inline int gpiod_count(struct device *dev, const char *con_id)
{
return 0;
@@ -199,7 +246,16 @@ static inline void gpiod_put(struct gpio_desc *desc)
might_sleep();
/* GPIO can never have been requested */
- WARN_ON(1);
+ WARN_ON(desc);
+}
+
+static inline void devm_gpiod_unhinge(struct device *dev,
+ struct gpio_desc *desc)
+{
+ might_sleep();
+
+ /* GPIO can never have been requested */
+ WARN_ON(desc);
}
static inline void gpiod_put_array(struct gpio_descs *descs)
@@ -207,7 +263,7 @@ static inline void gpiod_put_array(struct gpio_descs *descs)
might_sleep();
/* GPIO can never have been requested */
- WARN_ON(1);
+ WARN_ON(descs);
}
static inline struct gpio_desc *__must_check
@@ -260,7 +316,7 @@ static inline void devm_gpiod_put(struct device *dev, struct gpio_desc *desc)
might_sleep();
/* GPIO can never have been requested */
- WARN_ON(1);
+ WARN_ON(desc);
}
static inline void devm_gpiod_put_array(struct device *dev,
@@ -269,185 +325,341 @@ static inline void devm_gpiod_put_array(struct device *dev,
might_sleep();
/* GPIO can never have been requested */
- WARN_ON(1);
+ WARN_ON(descs);
}
static inline int gpiod_get_direction(const struct gpio_desc *desc)
{
/* GPIO can never have been requested */
- WARN_ON(1);
+ WARN_ON(desc);
return -ENOSYS;
}
static inline int gpiod_direction_input(struct gpio_desc *desc)
{
/* GPIO can never have been requested */
- WARN_ON(1);
+ WARN_ON(desc);
return -ENOSYS;
}
static inline int gpiod_direction_output(struct gpio_desc *desc, int value)
{
/* GPIO can never have been requested */
- WARN_ON(1);
+ WARN_ON(desc);
return -ENOSYS;
}
static inline int gpiod_direction_output_raw(struct gpio_desc *desc, int value)
{
/* GPIO can never have been requested */
- WARN_ON(1);
+ WARN_ON(desc);
return -ENOSYS;
}
-
-
static inline int gpiod_get_value(const struct gpio_desc *desc)
{
/* GPIO can never have been requested */
- WARN_ON(1);
+ WARN_ON(desc);
return 0;
}
-static inline void gpiod_set_value(struct gpio_desc *desc, int value)
+static inline int gpiod_get_array_value(unsigned int array_size,
+ struct gpio_desc **desc_array,
+ struct gpio_array *array_info,
+ unsigned long *value_bitmap)
{
/* GPIO can never have been requested */
- WARN_ON(1);
+ WARN_ON(desc_array);
+ return 0;
}
-static inline void gpiod_set_array_value(unsigned int array_size,
- struct gpio_desc **desc_array,
- int *value_array)
+static inline int gpiod_set_value(struct gpio_desc *desc, int value)
{
/* GPIO can never have been requested */
- WARN_ON(1);
+ WARN_ON(desc);
+ return 0;
+}
+static inline int gpiod_set_array_value(unsigned int array_size,
+ struct gpio_desc **desc_array,
+ struct gpio_array *array_info,
+ unsigned long *value_bitmap)
+{
+ /* GPIO can never have been requested */
+ WARN_ON(desc_array);
+ return 0;
}
static inline int gpiod_get_raw_value(const struct gpio_desc *desc)
{
/* GPIO can never have been requested */
- WARN_ON(1);
+ WARN_ON(desc);
+ return 0;
+}
+static inline int gpiod_get_raw_array_value(unsigned int array_size,
+ struct gpio_desc **desc_array,
+ struct gpio_array *array_info,
+ unsigned long *value_bitmap)
+{
+ /* GPIO can never have been requested */
+ WARN_ON(desc_array);
return 0;
}
-static inline void gpiod_set_raw_value(struct gpio_desc *desc, int value)
+static inline int gpiod_set_raw_value(struct gpio_desc *desc, int value)
{
/* GPIO can never have been requested */
- WARN_ON(1);
+ WARN_ON(desc);
+ return 0;
}
-static inline void gpiod_set_raw_array_value(unsigned int array_size,
- struct gpio_desc **desc_array,
- int *value_array)
+static inline int gpiod_set_raw_array_value(unsigned int array_size,
+ struct gpio_desc **desc_array,
+ struct gpio_array *array_info,
+ unsigned long *value_bitmap)
{
/* GPIO can never have been requested */
- WARN_ON(1);
+ WARN_ON(desc_array);
+ return 0;
}
static inline int gpiod_get_value_cansleep(const struct gpio_desc *desc)
{
/* GPIO can never have been requested */
- WARN_ON(1);
+ WARN_ON(desc);
return 0;
}
-static inline void gpiod_set_value_cansleep(struct gpio_desc *desc, int value)
+static inline int gpiod_get_array_value_cansleep(unsigned int array_size,
+ struct gpio_desc **desc_array,
+ struct gpio_array *array_info,
+ unsigned long *value_bitmap)
{
/* GPIO can never have been requested */
- WARN_ON(1);
+ WARN_ON(desc_array);
+ return 0;
}
-static inline void gpiod_set_array_value_cansleep(unsigned int array_size,
+static inline int gpiod_set_value_cansleep(struct gpio_desc *desc, int value)
+{
+ /* GPIO can never have been requested */
+ WARN_ON(desc);
+ return 0;
+}
+static inline int gpiod_set_array_value_cansleep(unsigned int array_size,
struct gpio_desc **desc_array,
- int *value_array)
+ struct gpio_array *array_info,
+ unsigned long *value_bitmap)
{
/* GPIO can never have been requested */
- WARN_ON(1);
+ WARN_ON(desc_array);
+ return 0;
}
static inline int gpiod_get_raw_value_cansleep(const struct gpio_desc *desc)
{
/* GPIO can never have been requested */
- WARN_ON(1);
+ WARN_ON(desc);
return 0;
}
-static inline void gpiod_set_raw_value_cansleep(struct gpio_desc *desc,
- int value)
+static inline int gpiod_get_raw_array_value_cansleep(unsigned int array_size,
+ struct gpio_desc **desc_array,
+ struct gpio_array *array_info,
+ unsigned long *value_bitmap)
{
/* GPIO can never have been requested */
- WARN_ON(1);
+ WARN_ON(desc_array);
+ return 0;
}
-static inline void gpiod_set_raw_array_value_cansleep(unsigned int array_size,
+static inline int gpiod_set_raw_value_cansleep(struct gpio_desc *desc,
+ int value)
+{
+ /* GPIO can never have been requested */
+ WARN_ON(desc);
+ return 0;
+}
+static inline int gpiod_set_raw_array_value_cansleep(unsigned int array_size,
struct gpio_desc **desc_array,
- int *value_array)
+ struct gpio_array *array_info,
+ unsigned long *value_bitmap)
+{
+ /* GPIO can never have been requested */
+ WARN_ON(desc_array);
+ return 0;
+}
+
+static inline int gpiod_set_config(struct gpio_desc *desc, unsigned long config)
{
/* GPIO can never have been requested */
- WARN_ON(1);
+ WARN_ON(desc);
+ return -ENOSYS;
}
-static inline int gpiod_set_debounce(struct gpio_desc *desc, unsigned debounce)
+static inline int gpiod_set_debounce(struct gpio_desc *desc, unsigned int debounce)
{
/* GPIO can never have been requested */
- WARN_ON(1);
+ WARN_ON(desc);
return -ENOSYS;
}
+static inline void gpiod_toggle_active_low(struct gpio_desc *desc)
+{
+ /* GPIO can never have been requested */
+ WARN_ON(desc);
+}
+
static inline int gpiod_is_active_low(const struct gpio_desc *desc)
{
/* GPIO can never have been requested */
- WARN_ON(1);
+ WARN_ON(desc);
return 0;
}
static inline int gpiod_cansleep(const struct gpio_desc *desc)
{
/* GPIO can never have been requested */
- WARN_ON(1);
+ WARN_ON(desc);
return 0;
}
static inline int gpiod_to_irq(const struct gpio_desc *desc)
{
/* GPIO can never have been requested */
- WARN_ON(1);
+ WARN_ON(desc);
+ return -EINVAL;
+}
+
+static inline int gpiod_set_consumer_name(struct gpio_desc *desc,
+ const char *name)
+{
+ /* GPIO can never have been requested */
+ WARN_ON(desc);
return -EINVAL;
}
+static inline bool gpiod_is_shared(const struct gpio_desc *desc)
+{
+ /* GPIO can never have been requested */
+ WARN_ON(desc);
+ return false;
+}
+
static inline struct gpio_desc *gpio_to_desc(unsigned gpio)
{
- return ERR_PTR(-EINVAL);
+ return NULL;
}
static inline int desc_to_gpio(const struct gpio_desc *desc)
{
/* GPIO can never have been requested */
- WARN_ON(1);
+ WARN_ON(desc);
return -EINVAL;
}
-/* Child properties interface */
-struct fwnode_handle;
-
static inline
-struct gpio_desc *fwnode_get_named_gpiod(struct fwnode_handle *fwnode,
- const char *propname, int index,
- enum gpiod_flags dflags,
+struct gpio_desc *fwnode_gpiod_get_index(struct fwnode_handle *fwnode,
+ const char *con_id, int index,
+ enum gpiod_flags flags,
const char *label)
{
return ERR_PTR(-ENOSYS);
}
static inline
-struct gpio_desc *devm_fwnode_get_index_gpiod_from_child(struct device *dev,
- const char *con_id, int index,
- struct fwnode_handle *child,
- enum gpiod_flags flags,
- const char *label)
+struct gpio_desc *devm_fwnode_gpiod_get_index(struct device *dev,
+ struct fwnode_handle *fwnode,
+ const char *con_id, int index,
+ enum gpiod_flags flags,
+ const char *label)
{
return ERR_PTR(-ENOSYS);
}
+static inline bool
+gpiod_is_equal(const struct gpio_desc *desc, const struct gpio_desc *other)
+{
+ WARN_ON(desc || other);
+ return false;
+}
+
#endif /* CONFIG_GPIOLIB */
+#if IS_ENABLED(CONFIG_GPIOLIB) && IS_ENABLED(CONFIG_HTE)
+int gpiod_enable_hw_timestamp_ns(struct gpio_desc *desc, unsigned long flags);
+int gpiod_disable_hw_timestamp_ns(struct gpio_desc *desc, unsigned long flags);
+#else
+
+#include <linux/bug.h>
+
+static inline int gpiod_enable_hw_timestamp_ns(struct gpio_desc *desc,
+ unsigned long flags)
+{
+ if (!IS_ENABLED(CONFIG_GPIOLIB))
+ WARN_ON(desc);
+
+ return -ENOSYS;
+}
+static inline int gpiod_disable_hw_timestamp_ns(struct gpio_desc *desc,
+ unsigned long flags)
+{
+ if (!IS_ENABLED(CONFIG_GPIOLIB))
+ WARN_ON(desc);
+
+ return -ENOSYS;
+}
+#endif /* CONFIG_GPIOLIB && CONFIG_HTE */
+
static inline
-struct gpio_desc *devm_fwnode_get_gpiod_from_child(struct device *dev,
- const char *con_id,
- struct fwnode_handle *child,
- enum gpiod_flags flags,
- const char *label)
+struct gpio_desc *devm_fwnode_gpiod_get(struct device *dev,
+ struct fwnode_handle *fwnode,
+ const char *con_id,
+ enum gpiod_flags flags,
+ const char *label)
{
- return devm_fwnode_get_index_gpiod_from_child(dev, con_id, 0, child,
- flags, label);
+ return devm_fwnode_gpiod_get_index(dev, fwnode, con_id, 0,
+ flags, label);
}
+struct acpi_gpio_params {
+ unsigned int crs_entry_index;
+ unsigned short line_index;
+ bool active_low;
+};
+
+struct acpi_gpio_mapping {
+ const char *name;
+ const struct acpi_gpio_params *data;
+ unsigned int size;
+
+/* Ignore IoRestriction field */
+#define ACPI_GPIO_QUIRK_NO_IO_RESTRICTION BIT(0)
+/*
+ * When ACPI GPIO mapping table is in use the index parameter inside it
+ * refers to the GPIO resource in _CRS method. That index has no
+ * distinction of actual type of the resource. When consumer wants to
+ * get GpioIo type explicitly, this quirk may be used.
+ */
+#define ACPI_GPIO_QUIRK_ONLY_GPIOIO BIT(1)
+/* Use given pin as an absolute GPIO number in the system */
+#define ACPI_GPIO_QUIRK_ABSOLUTE_NUMBER BIT(2)
+
+ unsigned int quirks;
+};
+
+#if IS_ENABLED(CONFIG_GPIOLIB) && IS_ENABLED(CONFIG_ACPI)
+
+int acpi_dev_add_driver_gpios(struct acpi_device *adev,
+ const struct acpi_gpio_mapping *gpios);
+void acpi_dev_remove_driver_gpios(struct acpi_device *adev);
+
+int devm_acpi_dev_add_driver_gpios(struct device *dev,
+ const struct acpi_gpio_mapping *gpios);
+
+#else /* CONFIG_GPIOLIB && CONFIG_ACPI */
+
+static inline int acpi_dev_add_driver_gpios(struct acpi_device *adev,
+ const struct acpi_gpio_mapping *gpios)
+{
+ return -ENXIO;
+}
+static inline void acpi_dev_remove_driver_gpios(struct acpi_device *adev) {}
+
+static inline int devm_acpi_dev_add_driver_gpios(struct device *dev,
+ const struct acpi_gpio_mapping *gpios)
+{
+ return -ENXIO;
+}
+
+#endif /* CONFIG_GPIOLIB && CONFIG_ACPI */
+
+
#if IS_ENABLED(CONFIG_GPIOLIB) && IS_ENABLED(CONFIG_GPIO_SYSFS)
int gpiod_export(struct gpio_desc *desc, bool direction_may_change);
@@ -475,4 +687,14 @@ static inline void gpiod_unexport(struct gpio_desc *desc)
#endif /* CONFIG_GPIOLIB && CONFIG_GPIO_SYSFS */
+static inline int gpiod_multi_set_value_cansleep(struct gpio_descs *descs,
+ unsigned long *value_bitmap)
+{
+ if (IS_ERR_OR_NULL(descs))
+ return PTR_ERR_OR_ZERO(descs);
+
+ return gpiod_set_array_value_cansleep(descs->ndescs, descs->desc,
+ descs->info, value_bitmap);
+}
+
#endif
diff --git a/include/linux/gpio/driver.h b/include/linux/gpio/driver.h
index c97f8325e8bf..fabe2baf7b50 100644
--- a/include/linux/gpio/driver.h
+++ b/include/linux/gpio/driver.h
@@ -1,23 +1,325 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __LINUX_GPIO_DRIVER_H
#define __LINUX_GPIO_DRIVER_H
-#include <linux/device.h>
-#include <linux/types.h>
-#include <linux/irq.h>
+#include <linux/bits.h>
+#include <linux/cleanup.h>
+#include <linux/err.h>
#include <linux/irqchip/chained_irq.h>
#include <linux/irqdomain.h>
+#include <linux/irqhandler.h>
#include <linux/lockdep.h>
-#include <linux/pinctrl/pinctrl.h>
#include <linux/pinctrl/pinconf-generic.h>
+#include <linux/pinctrl/pinctrl.h>
+#include <linux/property.h>
+#include <linux/spinlock_types.h>
+#include <linux/types.h>
+#include <linux/util_macros.h>
-struct gpio_desc;
+#ifdef CONFIG_GENERIC_MSI_IRQ
+#include <asm/msi.h>
+#endif
+
+struct device;
+struct irq_chip;
+struct irq_data;
+struct module;
struct of_phandle_args;
-struct device_node;
+struct pinctrl_dev;
struct seq_file;
+
+struct gpio_chip;
+struct gpio_desc;
struct gpio_device;
-struct module;
-#ifdef CONFIG_GPIOLIB
+enum gpio_lookup_flags;
+enum gpiod_flags;
+
+union gpio_irq_fwspec {
+ struct irq_fwspec fwspec;
+#ifdef CONFIG_GENERIC_MSI_IRQ
+ msi_alloc_info_t msiinfo;
+#endif
+};
+
+#define GPIO_LINE_DIRECTION_IN 1
+#define GPIO_LINE_DIRECTION_OUT 0
+
+/**
+ * struct gpio_irq_chip - GPIO interrupt controller
+ */
+struct gpio_irq_chip {
+ /**
+ * @chip:
+ *
+ * GPIO IRQ chip implementation, provided by GPIO driver.
+ */
+ struct irq_chip *chip;
+
+ /**
+ * @domain:
+ *
+ * Interrupt translation domain; responsible for mapping between GPIO
+ * hwirq number and Linux IRQ number.
+ */
+ struct irq_domain *domain;
+
+#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
+ /**
+ * @fwnode:
+ *
+ * Firmware node corresponding to this gpiochip/irqchip, necessary
+ * for hierarchical irqdomain support.
+ */
+ struct fwnode_handle *fwnode;
+
+ /**
+ * @parent_domain:
+ *
+ * If non-NULL, will be set as the parent of this GPIO interrupt
+ * controller's IRQ domain to establish a hierarchical interrupt
+ * domain. The presence of this will activate the hierarchical
+ * interrupt support.
+ */
+ struct irq_domain *parent_domain;
+
+ /**
+ * @child_to_parent_hwirq:
+ *
+ * This callback translates a child hardware IRQ offset to a parent
+ * hardware IRQ offset on a hierarchical interrupt chip. The child
+ * hardware IRQs correspond to the GPIO index 0..ngpio-1 (see the
+ * ngpio field of struct gpio_chip) and the corresponding parent
+ * hardware IRQ and type (such as IRQ_TYPE_*) shall be returned by
+ * the driver. The driver can calculate this from an offset or using
+ * a lookup table or whatever method is best for this chip. Return
+ * 0 on successful translation in the driver.
+ *
+ * If some ranges of hardware IRQs do not have a corresponding parent
+ * HWIRQ, return -EINVAL, but also make sure to fill in @valid_mask and
+ * @need_valid_mask to make these GPIO lines unavailable for
+ * translation.
+ */
+ int (*child_to_parent_hwirq)(struct gpio_chip *gc,
+ unsigned int child_hwirq,
+ unsigned int child_type,
+ unsigned int *parent_hwirq,
+ unsigned int *parent_type);
+
+ /**
+ * @populate_parent_alloc_arg :
+ *
+ * This optional callback allocates and populates the specific struct
+ * for the parent's IRQ domain. If this is not specified, then
+ * &gpiochip_populate_parent_fwspec_twocell will be used. A four-cell
+ * variant named &gpiochip_populate_parent_fwspec_fourcell is also
+ * available.
+ */
+ int (*populate_parent_alloc_arg)(struct gpio_chip *gc,
+ union gpio_irq_fwspec *fwspec,
+ unsigned int parent_hwirq,
+ unsigned int parent_type);
+
+ /**
+ * @child_offset_to_irq:
+ *
+ * This optional callback is used to translate the child's GPIO line
+ * offset on the GPIO chip to an IRQ number for the GPIO to_irq()
+ * callback. If this is not specified, then a default callback will be
+ * provided that returns the line offset.
+ */
+ unsigned int (*child_offset_to_irq)(struct gpio_chip *gc,
+ unsigned int pin);
+
+ /**
+ * @child_irq_domain_ops:
+ *
+ * The IRQ domain operations that will be used for this GPIO IRQ
+ * chip. If no operations are provided, then default callbacks will
+ * be populated to setup the IRQ hierarchy. Some drivers need to
+ * supply their own translate function.
+ */
+ struct irq_domain_ops child_irq_domain_ops;
+#endif
+
+ /**
+ * @handler:
+ *
+ * The IRQ handler to use (often a predefined IRQ core function) for
+ * GPIO IRQs, provided by GPIO driver.
+ */
+ irq_flow_handler_t handler;
+
+ /**
+ * @default_type:
+ *
+ * Default IRQ triggering type applied during GPIO driver
+ * initialization, provided by GPIO driver.
+ */
+ unsigned int default_type;
+
+ /**
+ * @lock_key:
+ *
+ * Per GPIO IRQ chip lockdep class for IRQ lock.
+ */
+ struct lock_class_key *lock_key;
+
+ /**
+ * @request_key:
+ *
+ * Per GPIO IRQ chip lockdep class for IRQ request.
+ */
+ struct lock_class_key *request_key;
+
+ /**
+ * @parent_handler:
+ *
+ * The interrupt handler for the GPIO chip's parent interrupts, may be
+ * NULL if the parent interrupts are nested rather than cascaded.
+ */
+ irq_flow_handler_t parent_handler;
+
+ union {
+ /**
+ * @parent_handler_data:
+ *
+ * If @per_parent_data is false, @parent_handler_data is a
+ * single pointer used as the data associated with every
+ * parent interrupt.
+ */
+ void *parent_handler_data;
+
+ /**
+ * @parent_handler_data_array:
+ *
+ * If @per_parent_data is true, @parent_handler_data_array is
+ * an array of @num_parents pointers, and is used to associate
+ * different data for each parent. This cannot be NULL if
+ * @per_parent_data is true.
+ */
+ void **parent_handler_data_array;
+ };
+
+ /**
+ * @num_parents:
+ *
+ * The number of interrupt parents of a GPIO chip.
+ */
+ unsigned int num_parents;
+
+ /**
+ * @parents:
+ *
+ * A list of interrupt parents of a GPIO chip. This is owned by the
+ * driver, so the core will only reference this list, not modify it.
+ */
+ unsigned int *parents;
+
+ /**
+ * @map:
+ *
+ * A list of interrupt parents for each line of a GPIO chip.
+ */
+ unsigned int *map;
+
+ /**
+ * @threaded:
+ *
+ * True if set the interrupt handling uses nested threads.
+ */
+ bool threaded;
+
+ /**
+ * @per_parent_data:
+ *
+ * True if parent_handler_data_array describes a @num_parents
+ * sized array to be used as parent data.
+ */
+ bool per_parent_data;
+
+ /**
+ * @initialized:
+ *
+ * Flag to track GPIO chip irq member's initialization.
+ * This flag will make sure GPIO chip irq members are not used
+ * before they are initialized.
+ */
+ bool initialized;
+
+ /**
+ * @domain_is_allocated_externally:
+ *
+ * True it the irq_domain was allocated outside of gpiolib, in which
+ * case gpiolib won't free the irq_domain itself.
+ */
+ bool domain_is_allocated_externally;
+
+ /**
+ * @init_hw: optional routine to initialize hardware before
+ * an IRQ chip will be added. This is quite useful when
+ * a particular driver wants to clear IRQ related registers
+ * in order to avoid undesired events.
+ */
+ int (*init_hw)(struct gpio_chip *gc);
+
+ /**
+ * @init_valid_mask: optional routine to initialize @valid_mask, to be
+ * used if not all GPIO lines are valid interrupts. Sometimes some
+ * lines just cannot fire interrupts, and this routine, when defined,
+ * is passed a bitmap in "valid_mask" and it will have ngpios
+ * bits from 0..(ngpios-1) set to "1" as in valid. The callback can
+ * then directly set some bits to "0" if they cannot be used for
+ * interrupts.
+ */
+ void (*init_valid_mask)(struct gpio_chip *gc,
+ unsigned long *valid_mask,
+ unsigned int ngpios);
+
+ /**
+ * @valid_mask:
+ *
+ * If not %NULL, holds bitmask of GPIOs which are valid to be included
+ * in IRQ domain of the chip.
+ */
+ unsigned long *valid_mask;
+
+ /**
+ * @first:
+ *
+ * Required for static IRQ allocation. If set,
+ * irq_domain_create_simple() will allocate and map all IRQs
+ * during initialization.
+ */
+ unsigned int first;
+
+ /**
+ * @irq_enable:
+ *
+ * Store old irq_chip irq_enable callback
+ */
+ void (*irq_enable)(struct irq_data *data);
+
+ /**
+ * @irq_disable:
+ *
+ * Store old irq_chip irq_disable callback
+ */
+ void (*irq_disable)(struct irq_data *data);
+ /**
+ * @irq_unmask:
+ *
+ * Store old irq_chip irq_unmask callback
+ */
+ void (*irq_unmask)(struct irq_data *data);
+
+ /**
+ * @irq_mask:
+ *
+ * Store old irq_chip irq_mask callback
+ */
+ void (*irq_mask)(struct irq_data *data);
+};
/**
* struct gpio_chip - abstract a GPIO controller
@@ -25,25 +327,47 @@ struct module;
* number or the name of the SoC IP-block implementing it.
* @gpiodev: the internal state holder, opaque struct
* @parent: optional parent device providing the GPIOs
+ * @fwnode: optional fwnode providing this controller's properties
* @owner: helps prevent removal of modules exporting active GPIOs
* @request: optional hook for chip-specific activation, such as
- * enabling module power and clock; may sleep
+ * enabling module power and clock; may sleep; must return 0 on success
+ * or negative error number on failure
* @free: optional hook for chip-specific deactivation, such as
* disabling module power and clock; may sleep
* @get_direction: returns direction for signal "offset", 0=out, 1=in,
- * (same as GPIOF_DIR_XXX), or negative error
- * @direction_input: configures signal "offset" as input, or returns error
- * @direction_output: configures signal "offset" as output, or returns error
+ * (same as GPIO_LINE_DIRECTION_OUT / GPIO_LINE_DIRECTION_IN),
+ * or negative error. It is recommended to always implement this
+ * function, even on input-only or output-only gpio chips.
+ * @direction_input: configures signal "offset" as input, returns 0 on success
+ * or a negative error number. This can be omitted on input-only or
+ * output-only gpio chips.
+ * @direction_output: configures signal "offset" as output, returns 0 on
+ * success or a negative error number. This can be omitted on input-only
+ * or output-only gpio chips.
* @get: returns value for signal "offset", 0=low, 1=high, or negative error
- * @set: assigns output value for signal "offset"
- * @set_multiple: assigns output values for multiple signals defined by "mask"
+ * @get_multiple: reads values for multiple signals defined by "mask" and
+ * stores them in "bits", returns 0 on success or negative error
+ * @set: assigns output value for signal "offset", returns 0 on success or
+ * negative error value
+ * @set_multiple: assigns output values for multiple signals defined by
+ * "mask", returns 0 on success or negative error value
* @set_config: optional hook for all kinds of settings. Uses the same
- * packed config format as generic pinconf.
- * @to_irq: optional hook supporting non-static gpio_to_irq() mappings;
+ * packed config format as generic pinconf. Must return 0 on success and
+ * a negative error number on failure.
+ * @to_irq: optional hook supporting non-static gpiod_to_irq() mappings;
* implementation may not sleep
* @dbg_show: optional routine to show contents in debugfs; default code
* will be used when this is omitted, but custom code can show extra
* state (such as pullup/pulldown configuration).
+ * @init_valid_mask: optional routine to initialize @valid_mask, to be used if
+ * not all GPIOs are valid.
+ * @add_pin_ranges: optional routine to initialize pin ranges, to be used when
+ * requires special mapping of the pins that provides GPIO functionality.
+ * It is called after adding GPIO chip and before adding IRQ chip.
+ * @en_hw_timestamp: Dependent on GPIO chip, an optional routine to
+ * enable hardware timestamp.
+ * @dis_hw_timestamp: Dependent on GPIO chip, an optional routine to
+ * disable hardware timestamp.
* @base: identifies the first GPIO number handled by this chip;
* or, if negative during registration, requests dynamic ID allocation.
* DEPRECATION: providing anything non-negative and nailing the base
@@ -52,54 +376,21 @@ struct module;
* get rid of the static GPIO number space in the long run.
* @ngpio: the number of GPIOs handled by this controller; the last GPIO
* handled is (base + ngpio - 1).
+ * @offset: when multiple gpio chips belong to the same device this
+ * can be used as offset within the device so friendly names can
+ * be properly assigned.
* @names: if set, must be an array of strings to use as alternative
* names for the GPIOs in this chip. Any entry in the array
* may be NULL if there is no alias for the GPIO, however the
- * array must be @ngpio entries long. A name can include a single printk
- * format specifier for an unsigned int. It is substituted by the actual
- * number of the gpio.
+ * array must be @ngpio entries long.
* @can_sleep: flag must be set iff get()/set() methods sleep, as they
* must while accessing GPIO expander chips over I2C or SPI. This
* implies that if the chip supports IRQs, these IRQs need to be threaded
* as the chip access may sleep when e.g. reading out the IRQ status
* registers.
- * @read_reg: reader function for generic GPIO
- * @write_reg: writer function for generic GPIO
- * @pin2mask: some generic GPIO controllers work with the big-endian bits
- * notation, e.g. in a 8-bits register, GPIO7 is the least significant
- * bit. This callback assigns the right bit mask.
- * @reg_dat: data (in) register for generic GPIO
- * @reg_set: output set register (out=high) for generic GPIO
- * @reg_clr: output clear register (out=low) for generic GPIO
- * @reg_dir: direction setting register for generic GPIO
- * @bgpio_bits: number of register bits used for a generic GPIO i.e.
- * <register width> * 8
- * @bgpio_lock: used to lock chip->bgpio_data. Also, this is needed to keep
- * shadowed and real data registers writes together.
- * @bgpio_data: shadowed data register for generic GPIO to clear/set bits
- * safely.
- * @bgpio_dir: shadowed direction register for generic GPIO to clear/set
- * direction safely.
- * @irqchip: GPIO IRQ chip impl, provided by GPIO driver
- * @irqdomain: Interrupt translation domain; responsible for mapping
- * between GPIO hwirq number and linux irq number
- * @irq_base: first linux IRQ number assigned to GPIO IRQ chip (deprecated)
- * @irq_handler: the irq handler to use (often a predefined irq core function)
- * for GPIO IRQs, provided by GPIO driver
- * @irq_default_type: default IRQ triggering type applied during GPIO driver
- * initialization, provided by GPIO driver
- * @irq_chained_parent: GPIO IRQ chip parent/bank linux irq number,
- * provided by GPIO driver for chained interrupt (not for nested
- * interrupts).
- * @irq_nested: True if set the interrupt handling is nested.
- * @irq_need_valid_mask: If set core allocates @irq_valid_mask with all
- * bits set to one
- * @irq_valid_mask: If not %NULL holds bitmask of GPIOs which are valid to
- * be included in IRQ domain of the chip
- * @lock_key: per GPIO IRQ chip lockdep class
*
* A gpio_chip can help platforms abstract various sources of GPIOs so
- * they can all be accessed through a common programing interface.
+ * they can all be accessed through a common programming interface.
* Example sources would be SOC controllers, FPGAs, multifunction
* chips, dedicated GPIO expanders, and so on.
*
@@ -112,88 +403,105 @@ struct gpio_chip {
const char *label;
struct gpio_device *gpiodev;
struct device *parent;
+ struct fwnode_handle *fwnode;
struct module *owner;
- int (*request)(struct gpio_chip *chip,
- unsigned offset);
- void (*free)(struct gpio_chip *chip,
- unsigned offset);
- int (*get_direction)(struct gpio_chip *chip,
- unsigned offset);
- int (*direction_input)(struct gpio_chip *chip,
- unsigned offset);
- int (*direction_output)(struct gpio_chip *chip,
- unsigned offset, int value);
- int (*get)(struct gpio_chip *chip,
- unsigned offset);
- void (*set)(struct gpio_chip *chip,
- unsigned offset, int value);
- void (*set_multiple)(struct gpio_chip *chip,
+ int (*request)(struct gpio_chip *gc,
+ unsigned int offset);
+ void (*free)(struct gpio_chip *gc,
+ unsigned int offset);
+ int (*get_direction)(struct gpio_chip *gc,
+ unsigned int offset);
+ int (*direction_input)(struct gpio_chip *gc,
+ unsigned int offset);
+ int (*direction_output)(struct gpio_chip *gc,
+ unsigned int offset, int value);
+ int (*get)(struct gpio_chip *gc,
+ unsigned int offset);
+ int (*get_multiple)(struct gpio_chip *gc,
unsigned long *mask,
unsigned long *bits);
- int (*set_config)(struct gpio_chip *chip,
- unsigned offset,
+ int (*set)(struct gpio_chip *gc,
+ unsigned int offset, int value);
+ int (*set_multiple)(struct gpio_chip *gc,
+ unsigned long *mask,
+ unsigned long *bits);
+ int (*set_config)(struct gpio_chip *gc,
+ unsigned int offset,
unsigned long config);
- int (*to_irq)(struct gpio_chip *chip,
- unsigned offset);
+ int (*to_irq)(struct gpio_chip *gc,
+ unsigned int offset);
void (*dbg_show)(struct seq_file *s,
- struct gpio_chip *chip);
+ struct gpio_chip *gc);
+
+ int (*init_valid_mask)(struct gpio_chip *gc,
+ unsigned long *valid_mask,
+ unsigned int ngpios);
+
+ int (*add_pin_ranges)(struct gpio_chip *gc);
+
+ int (*en_hw_timestamp)(struct gpio_chip *gc,
+ u32 offset,
+ unsigned long flags);
+ int (*dis_hw_timestamp)(struct gpio_chip *gc,
+ u32 offset,
+ unsigned long flags);
int base;
u16 ngpio;
+ u16 offset;
const char *const *names;
bool can_sleep;
-#if IS_ENABLED(CONFIG_GPIO_GENERIC)
- unsigned long (*read_reg)(void __iomem *reg);
- void (*write_reg)(void __iomem *reg, unsigned long data);
- unsigned long (*pin2mask)(struct gpio_chip *gc, unsigned int pin);
- void __iomem *reg_dat;
- void __iomem *reg_set;
- void __iomem *reg_clr;
- void __iomem *reg_dir;
- int bgpio_bits;
- spinlock_t bgpio_lock;
- unsigned long bgpio_data;
- unsigned long bgpio_dir;
-#endif
-
#ifdef CONFIG_GPIOLIB_IRQCHIP
/*
* With CONFIG_GPIOLIB_IRQCHIP we get an irqchip inside the gpiolib
* to handle IRQs for most practical cases.
*/
- struct irq_chip *irqchip;
- struct irq_domain *irqdomain;
- unsigned int irq_base;
- irq_flow_handler_t irq_handler;
- unsigned int irq_default_type;
- unsigned int irq_chained_parent;
- bool irq_nested;
- bool irq_need_valid_mask;
- unsigned long *irq_valid_mask;
- struct lock_class_key *lock_key;
-#endif
+
+ /**
+ * @irq:
+ *
+ * Integrates interrupt chip functionality with the GPIO chip. Can be
+ * used to handle IRQs for most practical cases.
+ */
+ struct gpio_irq_chip irq;
+#endif /* CONFIG_GPIOLIB_IRQCHIP */
#if defined(CONFIG_OF_GPIO)
/*
- * If CONFIG_OF is enabled, then all GPIO controllers described in the
- * device tree automatically may have an OF translation
+ * If CONFIG_OF_GPIO is enabled, then all GPIO controllers described in
+ * the device tree automatically may have an OF translation
*/
/**
- * @of_node:
+ * @of_gpio_n_cells:
+ *
+ * Number of cells used to form the GPIO specifier. The standard is 2
+ * cells:
+ *
+ * gpios = <&gpio offset flags>;
+ *
+ * some complex GPIO controllers instantiate more than one chip per
+ * device tree node and have 3 cells:
+ *
+ * gpios = <&gpio instance offset flags>;
*
- * Pointer to a device tree node representing this GPIO controller.
+ * Legacy GPIO controllers may even have 1 cell:
+ *
+ * gpios = <&gpio offset>;
*/
- struct device_node *of_node;
+ unsigned int of_gpio_n_cells;
/**
- * @of_gpio_n_cells:
+ * @of_node_instance_match:
*
- * Number of cells used to form the GPIO specifier.
+ * Determine if a chip is the right instance. Must be implemented by
+ * any driver using more than one gpio_chip per device tree node.
+ * Returns true if gc is the instance indicated by i (which is the
+ * first cell in the phandles for GPIO lines and gpio-ranges).
*/
- unsigned int of_gpio_n_cells;
+ bool (*of_node_instance_match)(struct gpio_chip *gc, unsigned int i);
/**
* @of_xlate:
@@ -203,147 +511,199 @@ struct gpio_chip {
*/
int (*of_xlate)(struct gpio_chip *gc,
const struct of_phandle_args *gpiospec, u32 *flags);
-#endif
+#endif /* CONFIG_OF_GPIO */
};
-extern const char *gpiochip_is_requested(struct gpio_chip *chip,
- unsigned offset);
+char *gpiochip_dup_line_label(struct gpio_chip *gc, unsigned int offset);
-/* add/remove chips */
-extern int gpiochip_add_data(struct gpio_chip *chip, void *data);
-static inline int gpiochip_add(struct gpio_chip *chip)
-{
- return gpiochip_add_data(chip, NULL);
-}
-extern void gpiochip_remove(struct gpio_chip *chip);
-extern int devm_gpiochip_add_data(struct device *dev, struct gpio_chip *chip,
- void *data);
-extern void devm_gpiochip_remove(struct device *dev, struct gpio_chip *chip);
-extern struct gpio_chip *gpiochip_find(void *data,
- int (*match)(struct gpio_chip *chip, void *data));
+struct _gpiochip_for_each_data {
+ const char **label;
+ unsigned int *i;
+};
-/* lock/unlock as IRQ */
-int gpiochip_lock_as_irq(struct gpio_chip *chip, unsigned int offset);
-void gpiochip_unlock_as_irq(struct gpio_chip *chip, unsigned int offset);
-bool gpiochip_line_is_irq(struct gpio_chip *chip, unsigned int offset);
+DEFINE_CLASS(_gpiochip_for_each_data,
+ struct _gpiochip_for_each_data,
+ if (*_T.label) kfree(*_T.label),
+ ({
+ struct _gpiochip_for_each_data _data = { label, i };
+ *_data.i = 0;
+ _data;
+ }),
+ const char **label, int *i)
-/* Line status inquiry for drivers */
-bool gpiochip_line_is_open_drain(struct gpio_chip *chip, unsigned int offset);
-bool gpiochip_line_is_open_source(struct gpio_chip *chip, unsigned int offset);
+/**
+ * for_each_hwgpio_in_range - Iterates over all GPIOs in a given range
+ * @_chip: Chip to iterate over.
+ * @_i: Loop counter.
+ * @_base: First GPIO in the ranger.
+ * @_size: Amount of GPIOs to check starting from @base.
+ * @_label: Place to store the address of the label if the GPIO is requested.
+ * Set to NULL for unused GPIOs.
+ */
+#define for_each_hwgpio_in_range(_chip, _i, _base, _size, _label) \
+ for (CLASS(_gpiochip_for_each_data, _data)(&_label, &_i); \
+ _i < _size; \
+ _i++, kfree(_label), _label = NULL) \
+ for_each_if(!IS_ERR(_label = gpiochip_dup_line_label(_chip, _base + _i)))
-/* Sleep persistence inquiry for drivers */
-bool gpiochip_line_is_persistent(struct gpio_chip *chip, unsigned int offset);
+/**
+ * for_each_hwgpio - Iterates over all GPIOs for given chip.
+ * @_chip: Chip to iterate over.
+ * @_i: Loop counter.
+ * @_label: Place to store the address of the label if the GPIO is requested.
+ * Set to NULL for unused GPIOs.
+ */
+#define for_each_hwgpio(_chip, _i, _label) \
+ for_each_hwgpio_in_range(_chip, _i, 0, _chip->ngpio, _label)
-/* get driver data */
-void *gpiochip_get_data(struct gpio_chip *chip);
+/**
+ * for_each_requested_gpio_in_range - iterates over requested GPIOs in a given range
+ * @_chip: the chip to query
+ * @_i: loop variable
+ * @_base: first GPIO in the range
+ * @_size: amount of GPIOs to check starting from @base
+ * @_label: label of current GPIO
+ */
+#define for_each_requested_gpio_in_range(_chip, _i, _base, _size, _label) \
+ for_each_hwgpio_in_range(_chip, _i, _base, _size, _label) \
+ for_each_if(_label)
-struct gpio_chip *gpiod_to_chip(const struct gpio_desc *desc);
+/* Iterates over all requested GPIO of the given @chip */
+#define for_each_requested_gpio(chip, i, label) \
+ for_each_requested_gpio_in_range(chip, i, 0, chip->ngpio, label)
-struct bgpio_pdata {
- const char *label;
- int base;
- int ngpio;
-};
+/* add/remove chips */
+int gpiochip_add_data_with_key(struct gpio_chip *gc, void *data,
+ struct lock_class_key *lock_key,
+ struct lock_class_key *request_key);
-#if IS_ENABLED(CONFIG_GPIO_GENERIC)
+/**
+ * gpiochip_add_data() - register a gpio_chip
+ * @gc: the chip to register, with gc->base initialized
+ * @data: driver-private data associated with this chip
+ *
+ * Context: potentially before irqs will work
+ *
+ * When gpiochip_add_data() is called very early during boot, so that GPIOs
+ * can be freely used, the gc->parent device must be registered before
+ * the gpio framework's arch_initcall(). Otherwise sysfs initialization
+ * for GPIOs will fail rudely.
+ *
+ * gpiochip_add_data() must only be called after gpiolib initialization,
+ * i.e. after core_initcall().
+ *
+ * If gc->base is negative, this requests dynamic assignment of
+ * a range of valid GPIOs.
+ *
+ * Returns:
+ * A negative errno if the chip can't be registered, such as because the
+ * gc->base is invalid or already associated with a different chip.
+ * Otherwise it returns zero as a success code.
+ */
+#ifdef CONFIG_LOCKDEP
+#define gpiochip_add_data(gc, data) ({ \
+ static struct lock_class_key lock_key; \
+ static struct lock_class_key request_key; \
+ gpiochip_add_data_with_key(gc, data, &lock_key, \
+ &request_key); \
+ })
+#define devm_gpiochip_add_data(dev, gc, data) ({ \
+ static struct lock_class_key lock_key; \
+ static struct lock_class_key request_key; \
+ devm_gpiochip_add_data_with_key(dev, gc, data, &lock_key, \
+ &request_key); \
+ })
+#else
+#define gpiochip_add_data(gc, data) gpiochip_add_data_with_key(gc, data, NULL, NULL)
+#define devm_gpiochip_add_data(dev, gc, data) \
+ devm_gpiochip_add_data_with_key(dev, gc, data, NULL, NULL)
+#endif /* CONFIG_LOCKDEP */
-int bgpio_init(struct gpio_chip *gc, struct device *dev,
- unsigned long sz, void __iomem *dat, void __iomem *set,
- void __iomem *clr, void __iomem *dirout, void __iomem *dirin,
- unsigned long flags);
+void gpiochip_remove(struct gpio_chip *gc);
+int devm_gpiochip_add_data_with_key(struct device *dev, struct gpio_chip *gc,
+ void *data, struct lock_class_key *lock_key,
+ struct lock_class_key *request_key);
-#define BGPIOF_BIG_ENDIAN BIT(0)
-#define BGPIOF_UNREADABLE_REG_SET BIT(1) /* reg_set is unreadable */
-#define BGPIOF_UNREADABLE_REG_DIR BIT(2) /* reg_dir is unreadable */
-#define BGPIOF_BIG_ENDIAN_BYTE_ORDER BIT(3)
-#define BGPIOF_READ_OUTPUT_REG_SET BIT(4) /* reg_set stores output value */
-#define BGPIOF_NO_OUTPUT BIT(5) /* only input */
+struct gpio_device *gpio_device_find(const void *data,
+ int (*match)(struct gpio_chip *gc,
+ const void *data));
-#endif
+struct gpio_device *gpio_device_get(struct gpio_device *gdev);
+void gpio_device_put(struct gpio_device *gdev);
-#ifdef CONFIG_GPIOLIB_IRQCHIP
+DEFINE_FREE(gpio_device_put, struct gpio_device *,
+ if (!IS_ERR_OR_NULL(_T)) gpio_device_put(_T))
-void gpiochip_set_chained_irqchip(struct gpio_chip *gpiochip,
- struct irq_chip *irqchip,
- unsigned int parent_irq,
- irq_flow_handler_t parent_handler);
+struct device *gpio_device_to_device(struct gpio_device *gdev);
-void gpiochip_set_nested_irqchip(struct gpio_chip *gpiochip,
- struct irq_chip *irqchip,
- unsigned int parent_irq);
+bool gpiochip_line_is_irq(struct gpio_chip *gc, unsigned int offset);
+int gpiochip_reqres_irq(struct gpio_chip *gc, unsigned int offset);
+void gpiochip_relres_irq(struct gpio_chip *gc, unsigned int offset);
+void gpiochip_disable_irq(struct gpio_chip *gc, unsigned int offset);
+void gpiochip_enable_irq(struct gpio_chip *gc, unsigned int offset);
-int gpiochip_irqchip_add_key(struct gpio_chip *gpiochip,
- struct irq_chip *irqchip,
- unsigned int first_irq,
- irq_flow_handler_t handler,
- unsigned int type,
- bool nested,
- struct lock_class_key *lock_key);
+/* irq_data versions of the above */
+int gpiochip_irq_reqres(struct irq_data *data);
+void gpiochip_irq_relres(struct irq_data *data);
-#ifdef CONFIG_LOCKDEP
+/* Paste this in your irq_chip structure */
+#define GPIOCHIP_IRQ_RESOURCE_HELPERS \
+ .irq_request_resources = gpiochip_irq_reqres, \
+ .irq_release_resources = gpiochip_irq_relres
-/*
- * Lockdep requires that each irqchip instance be created with a
- * unique key so as to avoid unnecessary warnings. This upfront
- * boilerplate static inlines provides such a key for each
- * unique instance.
- */
-static inline int gpiochip_irqchip_add(struct gpio_chip *gpiochip,
- struct irq_chip *irqchip,
- unsigned int first_irq,
- irq_flow_handler_t handler,
- unsigned int type)
+static inline void gpio_irq_chip_set_chip(struct gpio_irq_chip *girq,
+ const struct irq_chip *chip)
{
- static struct lock_class_key key;
-
- return gpiochip_irqchip_add_key(gpiochip, irqchip, first_irq,
- handler, type, false, &key);
+ /* Yes, dropping const is ugly, but it isn't like we have a choice */
+ girq->chip = (struct irq_chip *)chip;
}
-static inline int gpiochip_irqchip_add_nested(struct gpio_chip *gpiochip,
- struct irq_chip *irqchip,
- unsigned int first_irq,
- irq_flow_handler_t handler,
- unsigned int type)
-{
+/* Line status inquiry for drivers */
+bool gpiochip_line_is_open_drain(struct gpio_chip *gc, unsigned int offset);
+bool gpiochip_line_is_open_source(struct gpio_chip *gc, unsigned int offset);
- static struct lock_class_key key;
+/* Sleep persistence inquiry for drivers */
+bool gpiochip_line_is_persistent(struct gpio_chip *gc, unsigned int offset);
+bool gpiochip_line_is_valid(const struct gpio_chip *gc, unsigned int offset);
+const unsigned long *gpiochip_query_valid_mask(const struct gpio_chip *gc);
- return gpiochip_irqchip_add_key(gpiochip, irqchip, first_irq,
- handler, type, true, &key);
-}
+/* get driver data */
+void *gpiochip_get_data(struct gpio_chip *gc);
+
+#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
+
+int gpiochip_populate_parent_fwspec_twocell(struct gpio_chip *gc,
+ union gpio_irq_fwspec *gfwspec,
+ unsigned int parent_hwirq,
+ unsigned int parent_type);
+int gpiochip_populate_parent_fwspec_fourcell(struct gpio_chip *gc,
+ union gpio_irq_fwspec *gfwspec,
+ unsigned int parent_hwirq,
+ unsigned int parent_type);
+
+#endif /* CONFIG_IRQ_DOMAIN_HIERARCHY */
+
+#ifdef CONFIG_GPIOLIB_IRQCHIP
+int gpiochip_irqchip_add_domain(struct gpio_chip *gc,
+ struct irq_domain *domain);
#else
-static inline int gpiochip_irqchip_add(struct gpio_chip *gpiochip,
- struct irq_chip *irqchip,
- unsigned int first_irq,
- irq_flow_handler_t handler,
- unsigned int type)
-{
- return gpiochip_irqchip_add_key(gpiochip, irqchip, first_irq,
- handler, type, false, NULL);
-}
-static inline int gpiochip_irqchip_add_nested(struct gpio_chip *gpiochip,
- struct irq_chip *irqchip,
- unsigned int first_irq,
- irq_flow_handler_t handler,
- unsigned int type)
+#include <asm/bug.h>
+
+static inline int gpiochip_irqchip_add_domain(struct gpio_chip *gc,
+ struct irq_domain *domain)
{
- return gpiochip_irqchip_add_key(gpiochip, irqchip, first_irq,
- handler, type, true, NULL);
+ WARN_ON(1);
+ return -EINVAL;
}
-#endif /* CONFIG_LOCKDEP */
-
-#endif /* CONFIG_GPIOLIB_IRQCHIP */
+#endif
-int gpiochip_generic_request(struct gpio_chip *chip, unsigned offset);
-void gpiochip_generic_free(struct gpio_chip *chip, unsigned offset);
-int gpiochip_generic_config(struct gpio_chip *chip, unsigned offset,
+int gpiochip_generic_request(struct gpio_chip *gc, unsigned int offset);
+void gpiochip_generic_free(struct gpio_chip *gc, unsigned int offset);
+int gpiochip_generic_config(struct gpio_chip *gc, unsigned int offset,
unsigned long config);
-#ifdef CONFIG_PINCTRL
-
/**
* struct gpio_pin_range - pin range controlled by a gpio chip
* @node: list for maintaining set of pin ranges, used internally
@@ -356,25 +716,72 @@ struct gpio_pin_range {
struct pinctrl_gpio_range range;
};
-int gpiochip_add_pin_range(struct gpio_chip *chip, const char *pinctl_name,
- unsigned int gpio_offset, unsigned int pin_offset,
- unsigned int npins);
-int gpiochip_add_pingroup_range(struct gpio_chip *chip,
+#ifdef CONFIG_PINCTRL
+
+int gpiochip_add_pin_range_with_pins(struct gpio_chip *gc,
+ const char *pinctl_name,
+ unsigned int gpio_offset,
+ unsigned int pin_offset,
+ unsigned int const *pins,
+ unsigned int npins);
+int gpiochip_add_pingroup_range(struct gpio_chip *gc,
struct pinctrl_dev *pctldev,
unsigned int gpio_offset, const char *pin_group);
-void gpiochip_remove_pin_ranges(struct gpio_chip *chip);
+void gpiochip_remove_pin_ranges(struct gpio_chip *gc);
-#else
+static inline int
+gpiochip_add_pin_range(struct gpio_chip *gc,
+ const char *pinctl_name,
+ unsigned int gpio_offset,
+ unsigned int pin_offset,
+ unsigned int npins)
+{
+ return gpiochip_add_pin_range_with_pins(gc, pinctl_name, gpio_offset,
+ pin_offset, NULL, npins);
+}
+
+static inline int
+gpiochip_add_sparse_pin_range(struct gpio_chip *gc,
+ const char *pinctl_name,
+ unsigned int gpio_offset,
+ unsigned int const *pins,
+ unsigned int npins)
+{
+ return gpiochip_add_pin_range_with_pins(gc, pinctl_name, gpio_offset, 0,
+ pins, npins);
+}
+#else /* ! CONFIG_PINCTRL */
+
+static inline int
+gpiochip_add_pin_range_with_pins(struct gpio_chip *gc,
+ const char *pinctl_name,
+ unsigned int gpio_offset,
+ unsigned int pin_offset,
+ unsigned int npins)
+{
+ return 0;
+}
static inline int
-gpiochip_add_pin_range(struct gpio_chip *chip, const char *pinctl_name,
+gpiochip_add_pin_range(struct gpio_chip *gc, const char *pinctl_name,
unsigned int gpio_offset, unsigned int pin_offset,
unsigned int npins)
{
return 0;
}
+
+static inline int
+gpiochip_add_sparse_pin_range(struct gpio_chip *gc,
+ const char *pinctl_name,
+ unsigned int gpio_offset,
+ unsigned int const *pins,
+ unsigned int npins)
+{
+ return 0;
+}
+
static inline int
-gpiochip_add_pingroup_range(struct gpio_chip *chip,
+gpiochip_add_pingroup_range(struct gpio_chip *gc,
struct pinctrl_dev *pctldev,
unsigned int gpio_offset, const char *pin_group)
{
@@ -382,18 +789,44 @@ gpiochip_add_pingroup_range(struct gpio_chip *chip,
}
static inline void
-gpiochip_remove_pin_ranges(struct gpio_chip *chip)
+gpiochip_remove_pin_ranges(struct gpio_chip *gc)
{
}
#endif /* CONFIG_PINCTRL */
-struct gpio_desc *gpiochip_request_own_desc(struct gpio_chip *chip, u16 hwnum,
- const char *label);
+struct gpio_desc *gpiochip_request_own_desc(struct gpio_chip *gc,
+ unsigned int hwnum,
+ const char *label,
+ enum gpio_lookup_flags lflags,
+ enum gpiod_flags dflags);
void gpiochip_free_own_desc(struct gpio_desc *desc);
+struct gpio_desc *
+gpio_device_get_desc(struct gpio_device *gdev, unsigned int hwnum);
+
+struct gpio_chip *gpio_device_get_chip(struct gpio_device *gdev);
+
+#ifdef CONFIG_GPIOLIB
+
+/* lock/unlock as IRQ */
+int gpiochip_lock_as_irq(struct gpio_chip *gc, unsigned int offset);
+void gpiochip_unlock_as_irq(struct gpio_chip *gc, unsigned int offset);
+
+struct gpio_chip *gpiod_to_chip(const struct gpio_desc *desc);
+struct gpio_device *gpiod_to_gpio_device(struct gpio_desc *desc);
+
+/* struct gpio_device getters */
+int gpio_device_get_base(struct gpio_device *gdev);
+const char *gpio_device_get_label(struct gpio_device *gdev);
+
+struct gpio_device *gpio_device_find_by_label(const char *label);
+struct gpio_device *gpio_device_find_by_fwnode(const struct fwnode_handle *fwnode);
+
#else /* CONFIG_GPIOLIB */
+#include <asm/bug.h>
+
static inline struct gpio_chip *gpiod_to_chip(const struct gpio_desc *desc)
{
/* GPIO can never have been requested */
@@ -401,6 +834,73 @@ static inline struct gpio_chip *gpiod_to_chip(const struct gpio_desc *desc)
return ERR_PTR(-ENODEV);
}
+static inline struct gpio_device *gpiod_to_gpio_device(struct gpio_desc *desc)
+{
+ WARN_ON(1);
+ return ERR_PTR(-ENODEV);
+}
+
+static inline int gpio_device_get_base(struct gpio_device *gdev)
+{
+ WARN_ON(1);
+ return -ENODEV;
+}
+
+static inline const char *gpio_device_get_label(struct gpio_device *gdev)
+{
+ WARN_ON(1);
+ return NULL;
+}
+
+static inline struct gpio_device *gpio_device_find_by_label(const char *label)
+{
+ WARN_ON(1);
+ return NULL;
+}
+
+static inline struct gpio_device *gpio_device_find_by_fwnode(const struct fwnode_handle *fwnode)
+{
+ WARN_ON(1);
+ return NULL;
+}
+
+static inline int gpiochip_lock_as_irq(struct gpio_chip *gc,
+ unsigned int offset)
+{
+ WARN_ON(1);
+ return -EINVAL;
+}
+
+static inline void gpiochip_unlock_as_irq(struct gpio_chip *gc,
+ unsigned int offset)
+{
+ WARN_ON(1);
+}
#endif /* CONFIG_GPIOLIB */
-#endif
+#define for_each_gpiochip_node(dev, child) \
+ device_for_each_child_node(dev, child) \
+ for_each_if(fwnode_property_present(child, "gpio-controller"))
+
+static inline unsigned int gpiochip_node_count(struct device *dev)
+{
+ struct fwnode_handle *child;
+ unsigned int count = 0;
+
+ for_each_gpiochip_node(dev, child)
+ count++;
+
+ return count;
+}
+
+static inline struct fwnode_handle *gpiochip_node_get_first(struct device *dev)
+{
+ struct fwnode_handle *fwnode;
+
+ for_each_gpiochip_node(dev, fwnode)
+ return fwnode;
+
+ return NULL;
+}
+
+#endif /* __LINUX_GPIO_DRIVER_H */
diff --git a/include/linux/gpio/forwarder.h b/include/linux/gpio/forwarder.h
new file mode 100644
index 000000000000..ee5d8355f735
--- /dev/null
+++ b/include/linux/gpio/forwarder.h
@@ -0,0 +1,41 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __LINUX_GPIO_FORWARDER_H
+#define __LINUX_GPIO_FORWARDER_H
+
+struct gpio_desc;
+struct gpio_chip;
+struct gpiochip_fwd;
+
+struct gpiochip_fwd *devm_gpiochip_fwd_alloc(struct device *dev,
+ unsigned int ngpios);
+int gpiochip_fwd_desc_add(struct gpiochip_fwd *fwd,
+ struct gpio_desc *desc, unsigned int offset);
+void gpiochip_fwd_desc_free(struct gpiochip_fwd *fwd, unsigned int offset);
+int gpiochip_fwd_register(struct gpiochip_fwd *fwd, void *data);
+
+struct gpio_chip *gpiochip_fwd_get_gpiochip(struct gpiochip_fwd *fwd);
+
+void *gpiochip_fwd_get_data(struct gpiochip_fwd *fwd);
+
+int gpiochip_fwd_gpio_request(struct gpiochip_fwd *fwd, unsigned int offset);
+int gpiochip_fwd_gpio_get_direction(struct gpiochip_fwd *fwd,
+ unsigned int offset);
+int gpiochip_fwd_gpio_direction_input(struct gpiochip_fwd *fwd,
+ unsigned int offset);
+int gpiochip_fwd_gpio_direction_output(struct gpiochip_fwd *fwd,
+ unsigned int offset,
+ int value);
+int gpiochip_fwd_gpio_get(struct gpiochip_fwd *fwd, unsigned int offset);
+int gpiochip_fwd_gpio_get_multiple(struct gpiochip_fwd *fwd,
+ unsigned long *mask,
+ unsigned long *bits);
+int gpiochip_fwd_gpio_set(struct gpiochip_fwd *fwd, unsigned int offset,
+ int value);
+int gpiochip_fwd_gpio_set_multiple(struct gpiochip_fwd *fwd,
+ unsigned long *mask,
+ unsigned long *bits);
+int gpiochip_fwd_gpio_set_config(struct gpiochip_fwd *fwd, unsigned int offset,
+ unsigned long config);
+int gpiochip_fwd_gpio_to_irq(struct gpiochip_fwd *fwd, unsigned int offset);
+
+#endif
diff --git a/include/linux/gpio/generic.h b/include/linux/gpio/generic.h
new file mode 100644
index 000000000000..ff566dc9c3cb
--- /dev/null
+++ b/include/linux/gpio/generic.h
@@ -0,0 +1,190 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __LINUX_GPIO_GENERIC_H
+#define __LINUX_GPIO_GENERIC_H
+
+#include <linux/cleanup.h>
+#include <linux/gpio/driver.h>
+#include <linux/spinlock.h>
+
+struct device;
+
+#define GPIO_GENERIC_BIG_ENDIAN BIT(0)
+#define GPIO_GENERIC_UNREADABLE_REG_SET BIT(1) /* reg_set is unreadable */
+#define GPIO_GENERIC_UNREADABLE_REG_DIR BIT(2) /* reg_dir is unreadable */
+#define GPIO_GENERIC_BIG_ENDIAN_BYTE_ORDER BIT(3)
+#define GPIO_GENERIC_READ_OUTPUT_REG_SET BIT(4) /* reg_set stores output value */
+#define GPIO_GENERIC_NO_OUTPUT BIT(5) /* only input */
+#define GPIO_GENERIC_NO_SET_ON_INPUT BIT(6)
+#define GPIO_GENERIC_PINCTRL_BACKEND BIT(7) /* Call pinctrl direction setters */
+#define GPIO_GENERIC_NO_INPUT BIT(8) /* only output */
+
+/**
+ * struct gpio_generic_chip_config - Generic GPIO chip configuration data
+ * @dev: Parent device of the new GPIO chip (compulsory).
+ * @sz: Size (width) of the MMIO registers in bytes, typically 1, 2 or 4.
+ * @dat: MMIO address for the register to READ the value of the GPIO lines, it
+ * is expected that a 1 in the corresponding bit in this register means
+ * the line is asserted.
+ * @set: MMIO address for the register to SET the value of the GPIO lines, it
+ * is expected that we write the line with 1 in this register to drive
+ * the GPIO line high.
+ * @clr: MMIO address for the register to CLEAR the value of the GPIO lines,
+ * it is expected that we write the line with 1 in this register to
+ * drive the GPIO line low. It is allowed to leave this address as NULL,
+ * in that case the SET register will be assumed to also clear the GPIO
+ * lines, by actively writing the line with 0.
+ * @dirout: MMIO address for the register to set the line as OUTPUT. It is
+ * assumed that setting a line to 1 in this register will turn that
+ * line into an output line. Conversely, setting the line to 0 will
+ * turn that line into an input.
+ * @dirin: MMIO address for the register to set this line as INPUT. It is
+ * assumed that setting a line to 1 in this register will turn that
+ * line into an input line. Conversely, setting the line to 0 will
+ * turn that line into an output.
+ * @flags: Different flags that will affect the behaviour of the device, such
+ * as endianness etc.
+ */
+struct gpio_generic_chip_config {
+ struct device *dev;
+ unsigned long sz;
+ void __iomem *dat;
+ void __iomem *set;
+ void __iomem *clr;
+ void __iomem *dirout;
+ void __iomem *dirin;
+ unsigned long flags;
+};
+
+/**
+ * struct gpio_generic_chip - Generic GPIO chip implementation.
+ * @gc: The underlying struct gpio_chip object, implementing low-level GPIO
+ * chip routines.
+ * @read_reg: reader function for generic GPIO
+ * @write_reg: writer function for generic GPIO
+ * @be_bits: if the generic GPIO has big endian bit order (bit 31 is
+ * representing line 0, bit 30 is line 1 ... bit 0 is line 31) this
+ * is set to true by the generic GPIO core. It is for internal
+ * housekeeping only.
+ * @reg_dat: data (in) register for generic GPIO
+ * @reg_set: output set register (out=high) for generic GPIO
+ * @reg_clr: output clear register (out=low) for generic GPIO
+ * @reg_dir_out: direction out setting register for generic GPIO
+ * @reg_dir_in: direction in setting register for generic GPIO
+ * @dir_unreadable: indicates that the direction register(s) cannot be read and
+ * we need to rely on out internal state tracking.
+ * @pinctrl: the generic GPIO uses a pin control backend.
+ * @bits: number of register bits used for a generic GPIO
+ * i.e. <register width> * 8
+ * @lock: used to lock chip->sdata. Also, this is needed to keep
+ * shadowed and real data registers writes together.
+ * @sdata: shadowed data register for generic GPIO to clear/set bits safely.
+ * @sdir: shadowed direction register for generic GPIO to clear/set direction
+ * safely. A "1" in this word means the line is set as output.
+ */
+struct gpio_generic_chip {
+ struct gpio_chip gc;
+ unsigned long (*read_reg)(void __iomem *reg);
+ void (*write_reg)(void __iomem *reg, unsigned long data);
+ bool be_bits;
+ void __iomem *reg_dat;
+ void __iomem *reg_set;
+ void __iomem *reg_clr;
+ void __iomem *reg_dir_out;
+ void __iomem *reg_dir_in;
+ bool dir_unreadable;
+ bool pinctrl;
+ int bits;
+ raw_spinlock_t lock;
+ unsigned long sdata;
+ unsigned long sdir;
+};
+
+static inline struct gpio_generic_chip *
+to_gpio_generic_chip(struct gpio_chip *gc)
+{
+ return container_of(gc, struct gpio_generic_chip, gc);
+}
+
+int gpio_generic_chip_init(struct gpio_generic_chip *chip,
+ const struct gpio_generic_chip_config *cfg);
+
+/**
+ * gpio_generic_chip_set() - Set the GPIO line value of the generic GPIO chip.
+ * @chip: Generic GPIO chip to use.
+ * @offset: Hardware offset of the line to set.
+ * @value: New GPIO line value.
+ *
+ * Some modules using the generic GPIO chip, need to set line values in their
+ * direction setters but they don't have access to the gpio-mmio symbols so
+ * they use the function pointer in struct gpio_chip directly. This is not
+ * optimal and can lead to crashes at run-time in some instances. This wrapper
+ * provides a safe interface for users.
+ *
+ * Returns: 0 on success, negative error number of failure.
+ */
+static inline int
+gpio_generic_chip_set(struct gpio_generic_chip *chip, unsigned int offset,
+ int value)
+{
+ if (WARN_ON(!chip->gc.set))
+ return -EOPNOTSUPP;
+
+ return chip->gc.set(&chip->gc, offset, value);
+}
+
+/**
+ * gpio_generic_read_reg() - Read a register using the underlying callback.
+ * @chip: Generic GPIO chip to use.
+ * @reg: Register to read.
+ *
+ * Returns: value read from register.
+ */
+static inline unsigned long
+gpio_generic_read_reg(struct gpio_generic_chip *chip, void __iomem *reg)
+{
+ if (WARN_ON(!chip->read_reg))
+ return 0;
+
+ return chip->read_reg(reg);
+}
+
+/**
+ * gpio_generic_write_reg() - Write a register using the underlying callback.
+ * @chip: Generic GPIO chip to use.
+ * @reg: Register to write to.
+ * @val: New value to write.
+ */
+static inline void gpio_generic_write_reg(struct gpio_generic_chip *chip,
+ void __iomem *reg, unsigned long val)
+{
+ if (WARN_ON(!chip->write_reg))
+ return;
+
+ chip->write_reg(reg, val);
+}
+
+#define gpio_generic_chip_lock(gen_gc) \
+ raw_spin_lock(&(gen_gc)->lock)
+
+#define gpio_generic_chip_unlock(gen_gc) \
+ raw_spin_unlock(&(gen_gc)->lock)
+
+#define gpio_generic_chip_lock_irqsave(gen_gc, flags) \
+ raw_spin_lock_irqsave(&(gen_gc)->lock, flags)
+
+#define gpio_generic_chip_unlock_irqrestore(gen_gc, flags) \
+ raw_spin_unlock_irqrestore(&(gen_gc)->lock, flags)
+
+DEFINE_LOCK_GUARD_1(gpio_generic_lock,
+ struct gpio_generic_chip,
+ gpio_generic_chip_lock(_T->lock),
+ gpio_generic_chip_unlock(_T->lock))
+
+DEFINE_LOCK_GUARD_1(gpio_generic_lock_irqsave,
+ struct gpio_generic_chip,
+ gpio_generic_chip_lock_irqsave(_T->lock, _T->flags),
+ gpio_generic_chip_unlock_irqrestore(_T->lock, _T->flags),
+ unsigned long flags)
+
+#endif /* __LINUX_GPIO_GENERIC_H */
diff --git a/include/linux/gpio/gpio-nomadik.h b/include/linux/gpio/gpio-nomadik.h
new file mode 100644
index 000000000000..592a774a53cd
--- /dev/null
+++ b/include/linux/gpio/gpio-nomadik.h
@@ -0,0 +1,292 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __LINUX_GPIO_NOMADIK_H
+#define __LINUX_GPIO_NOMADIK_H
+
+struct fwnode_handle;
+
+/* Package definitions */
+#define PINCTRL_NMK_STN8815 0
+#define PINCTRL_NMK_DB8500 1
+
+#define GPIO_BLOCK_SHIFT 5
+#define NMK_GPIO_PER_CHIP BIT(GPIO_BLOCK_SHIFT)
+#define NMK_MAX_BANKS DIV_ROUND_UP(512, NMK_GPIO_PER_CHIP)
+
+/* Register in the logic block */
+#define NMK_GPIO_DAT 0x00
+#define NMK_GPIO_DATS 0x04
+#define NMK_GPIO_DATC 0x08
+#define NMK_GPIO_PDIS 0x0c
+#define NMK_GPIO_DIR 0x10
+#define NMK_GPIO_DIRS 0x14
+#define NMK_GPIO_DIRC 0x18
+#define NMK_GPIO_SLPC 0x1c
+#define NMK_GPIO_AFSLA 0x20
+#define NMK_GPIO_AFSLB 0x24
+#define NMK_GPIO_LOWEMI 0x28
+
+#define NMK_GPIO_RIMSC 0x40
+#define NMK_GPIO_FIMSC 0x44
+#define NMK_GPIO_IS 0x48
+#define NMK_GPIO_IC 0x4c
+#define NMK_GPIO_RWIMSC 0x50
+#define NMK_GPIO_FWIMSC 0x54
+#define NMK_GPIO_WKS 0x58
+/* These appear in DB8540 and later ASICs */
+#define NMK_GPIO_EDGELEVEL 0x5C
+#define NMK_GPIO_LEVEL 0x60
+
+/* Pull up/down values */
+enum nmk_gpio_pull {
+ NMK_GPIO_PULL_NONE,
+ NMK_GPIO_PULL_UP,
+ NMK_GPIO_PULL_DOWN,
+};
+
+/* Sleep mode */
+enum nmk_gpio_slpm {
+ NMK_GPIO_SLPM_INPUT,
+ NMK_GPIO_SLPM_WAKEUP_ENABLE = NMK_GPIO_SLPM_INPUT,
+ NMK_GPIO_SLPM_NOCHANGE,
+ NMK_GPIO_SLPM_WAKEUP_DISABLE = NMK_GPIO_SLPM_NOCHANGE,
+};
+
+struct nmk_gpio_chip {
+ struct gpio_chip chip;
+ void __iomem *addr;
+ struct clk *clk;
+ unsigned int bank;
+ void (*set_ioforce)(bool enable);
+ spinlock_t lock;
+ bool sleepmode;
+ bool is_mobileye_soc;
+ /* Keep track of configured edges */
+ u32 edge_rising;
+ u32 edge_falling;
+ u32 real_wake;
+ u32 rwimsc;
+ u32 fwimsc;
+ u32 rimsc;
+ u32 fimsc;
+ u32 pull_up;
+ u32 lowemi;
+};
+
+/* Alternate functions: function C is set in hw by setting both A and B */
+#define NMK_GPIO_ALT_GPIO 0
+#define NMK_GPIO_ALT_A 1
+#define NMK_GPIO_ALT_B 2
+#define NMK_GPIO_ALT_C (NMK_GPIO_ALT_A | NMK_GPIO_ALT_B)
+
+#define NMK_GPIO_ALT_CX_SHIFT 2
+#define NMK_GPIO_ALT_C1 ((1<<NMK_GPIO_ALT_CX_SHIFT) | NMK_GPIO_ALT_C)
+#define NMK_GPIO_ALT_C2 ((2<<NMK_GPIO_ALT_CX_SHIFT) | NMK_GPIO_ALT_C)
+#define NMK_GPIO_ALT_C3 ((3<<NMK_GPIO_ALT_CX_SHIFT) | NMK_GPIO_ALT_C)
+#define NMK_GPIO_ALT_C4 ((4<<NMK_GPIO_ALT_CX_SHIFT) | NMK_GPIO_ALT_C)
+
+#define PRCM_GPIOCR_ALTCX(pin_num,\
+ altc1_used, altc1_ri, altc1_cb,\
+ altc2_used, altc2_ri, altc2_cb,\
+ altc3_used, altc3_ri, altc3_cb,\
+ altc4_used, altc4_ri, altc4_cb)\
+{\
+ .pin = pin_num,\
+ .altcx[PRCM_IDX_GPIOCR_ALTC1] = {\
+ .used = altc1_used,\
+ .reg_index = altc1_ri,\
+ .control_bit = altc1_cb\
+ },\
+ .altcx[PRCM_IDX_GPIOCR_ALTC2] = {\
+ .used = altc2_used,\
+ .reg_index = altc2_ri,\
+ .control_bit = altc2_cb\
+ },\
+ .altcx[PRCM_IDX_GPIOCR_ALTC3] = {\
+ .used = altc3_used,\
+ .reg_index = altc3_ri,\
+ .control_bit = altc3_cb\
+ },\
+ .altcx[PRCM_IDX_GPIOCR_ALTC4] = {\
+ .used = altc4_used,\
+ .reg_index = altc4_ri,\
+ .control_bit = altc4_cb\
+ },\
+}
+
+/**
+ * enum prcm_gpiocr_reg_index
+ * Used to reference an PRCM GPIOCR register address.
+ */
+enum prcm_gpiocr_reg_index {
+ PRCM_IDX_GPIOCR1,
+ PRCM_IDX_GPIOCR2,
+ PRCM_IDX_GPIOCR3
+};
+/**
+ * enum prcm_gpiocr_altcx_index
+ * Used to reference an Other alternate-C function.
+ */
+enum prcm_gpiocr_altcx_index {
+ PRCM_IDX_GPIOCR_ALTC1,
+ PRCM_IDX_GPIOCR_ALTC2,
+ PRCM_IDX_GPIOCR_ALTC3,
+ PRCM_IDX_GPIOCR_ALTC4,
+ PRCM_IDX_GPIOCR_ALTC_MAX,
+};
+
+/**
+ * struct prcm_gpio_altcx - Other alternate-C function
+ * @used: other alternate-C function availability
+ * @reg_index: PRCM GPIOCR register index used to control the function
+ * @control_bit: PRCM GPIOCR bit used to control the function
+ */
+struct prcm_gpiocr_altcx {
+ bool used:1;
+ u8 reg_index:2;
+ u8 control_bit:5;
+} __packed;
+
+/**
+ * struct prcm_gpio_altcx_pin_desc - Other alternate-C pin
+ * @pin: The pin number
+ * @altcx: array of other alternate-C[1-4] functions
+ */
+struct prcm_gpiocr_altcx_pin_desc {
+ unsigned short pin;
+ struct prcm_gpiocr_altcx altcx[PRCM_IDX_GPIOCR_ALTC_MAX];
+};
+
+/**
+ * struct nmk_function - Nomadik pinctrl mux function
+ * @name: The name of the function, exported to pinctrl core.
+ * @groups: An array of pin groups that may select this function.
+ * @ngroups: The number of entries in @groups.
+ */
+struct nmk_function {
+ const char *name;
+ const char * const *groups;
+ unsigned int ngroups;
+};
+
+/**
+ * struct nmk_pingroup - describes a Nomadik pin group
+ * @grp: Generic data of the pin group (name and pins)
+ * @altsetting: the altsetting to apply to all pins in this group to
+ * configure them to be used by a function
+ */
+struct nmk_pingroup {
+ struct pingroup grp;
+ int altsetting;
+};
+
+#define NMK_PIN_GROUP(a, b) \
+ { \
+ .grp = PINCTRL_PINGROUP(#a, a##_pins, ARRAY_SIZE(a##_pins)), \
+ .altsetting = b, \
+ }
+
+/**
+ * struct nmk_pinctrl_soc_data - Nomadik pin controller per-SoC configuration
+ * @pins: An array describing all pins the pin controller affects.
+ * All pins which are also GPIOs must be listed first within the
+ * array, and be numbered identically to the GPIO controller's
+ * numbering.
+ * @npins: The number of entries in @pins.
+ * @functions: The functions supported on this SoC.
+ * @nfunction: The number of entries in @functions.
+ * @groups: An array describing all pin groups the pin SoC supports.
+ * @ngroups: The number of entries in @groups.
+ * @altcx_pins: The pins that support Other alternate-C function on this SoC
+ * @npins_altcx: The number of Other alternate-C pins
+ * @prcm_gpiocr_registers: The array of PRCM GPIOCR registers on this SoC
+ */
+struct nmk_pinctrl_soc_data {
+ const struct pinctrl_pin_desc *pins;
+ unsigned int npins;
+ const struct nmk_function *functions;
+ unsigned int nfunctions;
+ const struct nmk_pingroup *groups;
+ unsigned int ngroups;
+ const struct prcm_gpiocr_altcx_pin_desc *altcx_pins;
+ unsigned int npins_altcx;
+ const u16 *prcm_gpiocr_registers;
+};
+
+#ifdef CONFIG_PINCTRL_STN8815
+
+void nmk_pinctrl_stn8815_init(const struct nmk_pinctrl_soc_data **soc);
+
+#else
+
+static inline void
+nmk_pinctrl_stn8815_init(const struct nmk_pinctrl_soc_data **soc)
+{
+}
+
+#endif
+
+#ifdef CONFIG_PINCTRL_DB8500
+
+void nmk_pinctrl_db8500_init(const struct nmk_pinctrl_soc_data **soc);
+
+#else
+
+static inline void
+nmk_pinctrl_db8500_init(const struct nmk_pinctrl_soc_data **soc)
+{
+}
+
+#endif
+
+#ifdef CONFIG_PINCTRL_DB8540
+
+void nmk_pinctrl_db8540_init(const struct nmk_pinctrl_soc_data **soc);
+
+#else
+
+static inline void
+nmk_pinctrl_db8540_init(const struct nmk_pinctrl_soc_data **soc)
+{
+}
+
+#endif
+
+struct platform_device;
+
+#ifdef CONFIG_DEBUG_FS
+
+/*
+ * Symbols declared in gpio-nomadik used by pinctrl-nomadik. If pinctrl-nomadik
+ * is enabled, then gpio-nomadik is enabled as well; the reverse if not always
+ * true.
+ */
+void nmk_gpio_dbg_show_one(struct seq_file *s, struct pinctrl_dev *pctldev,
+ struct gpio_chip *chip, unsigned int offset);
+
+#else
+
+static inline void nmk_gpio_dbg_show_one(struct seq_file *s,
+ struct pinctrl_dev *pctldev,
+ struct gpio_chip *chip,
+ unsigned int offset)
+{
+}
+
+#endif
+
+void __nmk_gpio_make_output(struct nmk_gpio_chip *nmk_chip,
+ unsigned int offset, int val);
+void __nmk_gpio_set_slpm(struct nmk_gpio_chip *nmk_chip, unsigned int offset,
+ enum nmk_gpio_slpm mode);
+struct nmk_gpio_chip *nmk_gpio_populate_chip(struct fwnode_handle *fwnode,
+ struct platform_device *pdev);
+
+/* Symbols declared in pinctrl-nomadik used by gpio-nomadik. */
+#ifdef CONFIG_PINCTRL_NOMADIK
+extern struct nmk_gpio_chip *nmk_gpio_chips[NMK_MAX_BANKS];
+extern spinlock_t nmk_gpio_slpm_lock;
+int __maybe_unused nmk_prcm_gpiocr_get_mode(struct pinctrl_dev *pctldev,
+ int gpio);
+#endif
+
+#endif /* __LINUX_GPIO_NOMADIK_H */
diff --git a/include/linux/gpio/gpio-reg.h b/include/linux/gpio/gpio-reg.h
index 90e0b9060e6d..3913b6660ed1 100644
--- a/include/linux/gpio/gpio-reg.h
+++ b/include/linux/gpio/gpio-reg.h
@@ -1,13 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef GPIO_REG_H
#define GPIO_REG_H
+#include <linux/types.h>
+
struct device;
struct irq_domain;
+struct gpio_chip;
+
struct gpio_chip *gpio_reg_init(struct device *dev, void __iomem *reg,
int base, int num, const char *label, u32 direction, u32 def_out,
const char *const *names, struct irq_domain *irqdom, const int *irqs);
int gpio_reg_resume(struct gpio_chip *gc);
-#endif
+#endif /* GPIO_REG_H */
diff --git a/include/linux/gpio/machine.h b/include/linux/gpio/machine.h
index ba4ccfd900f9..44e5f162973e 100644
--- a/include/linux/gpio/machine.h
+++ b/include/linux/gpio/machine.h
@@ -1,35 +1,43 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __LINUX_GPIO_MACHINE_H
#define __LINUX_GPIO_MACHINE_H
#include <linux/types.h>
-#include <linux/list.h>
enum gpio_lookup_flags {
- GPIO_ACTIVE_HIGH = (0 << 0),
- GPIO_ACTIVE_LOW = (1 << 0),
- GPIO_OPEN_DRAIN = (1 << 1),
- GPIO_OPEN_SOURCE = (1 << 2),
- GPIO_SLEEP_MAINTAIN_VALUE = (0 << 3),
- GPIO_SLEEP_MAY_LOOSE_VALUE = (1 << 3),
+ GPIO_ACTIVE_HIGH = (0 << 0),
+ GPIO_ACTIVE_LOW = (1 << 0),
+ GPIO_OPEN_DRAIN = (1 << 1),
+ GPIO_OPEN_SOURCE = (1 << 2),
+ GPIO_PERSISTENT = (0 << 3),
+ GPIO_TRANSITORY = (1 << 3),
+ GPIO_PULL_UP = (1 << 4),
+ GPIO_PULL_DOWN = (1 << 5),
+ GPIO_PULL_DISABLE = (1 << 6),
+
+ GPIO_LOOKUP_FLAGS_DEFAULT = GPIO_ACTIVE_HIGH | GPIO_PERSISTENT,
};
/**
* struct gpiod_lookup - lookup table
- * @chip_label: name of the chip the GPIO belongs to
- * @chip_hwnum: hardware number (i.e. relative to the chip) of the GPIO
+ * @key: either the name of the chip the GPIO belongs to, or the GPIO line name
+ * Note that GPIO line names are not guaranteed to be globally unique,
+ * so this will use the first match found!
+ * @chip_hwnum: hardware number (i.e. relative to the chip) of the GPIO, or
+ * U16_MAX to indicate that @key is a GPIO line name
* @con_id: name of the GPIO from the device's point of view
* @idx: index of the GPIO in case several GPIOs share the same name
- * @flags: mask of GPIO_* values
+ * @flags: bitmask of gpio_lookup_flags GPIO_* values
*
* gpiod_lookup is a lookup table for associating GPIOs to specific devices and
* functions using platform data.
*/
struct gpiod_lookup {
- const char *chip_label;
+ const char *key;
u16 chip_hwnum;
const char *con_id;
unsigned int idx;
- enum gpio_lookup_flags flags;
+ unsigned long flags;
};
struct gpiod_lookup_table {
@@ -38,37 +46,82 @@ struct gpiod_lookup_table {
struct gpiod_lookup table[];
};
+/**
+ * struct gpiod_hog - GPIO line hog table
+ * @chip_label: name of the chip the GPIO belongs to
+ * @chip_hwnum: hardware number (i.e. relative to the chip) of the GPIO
+ * @line_name: consumer name for the hogged line
+ * @lflags: bitmask of gpio_lookup_flags GPIO_* values
+ * @dflags: GPIO flags used to specify the direction and value
+ */
+struct gpiod_hog {
+ struct list_head list;
+ const char *chip_label;
+ u16 chip_hwnum;
+ const char *line_name;
+ unsigned long lflags;
+ int dflags;
+};
+
+/*
+ * Helper for lookup tables with just one single lookup for a device.
+ */
+#define GPIO_LOOKUP_SINGLE(_name, _dev_id, _key, _chip_hwnum, _con_id, _flags) \
+static struct gpiod_lookup_table _name = { \
+ .dev_id = _dev_id, \
+ .table = { \
+ GPIO_LOOKUP(_key, _chip_hwnum, _con_id, _flags), \
+ {}, \
+ }, \
+}
+
/*
* Simple definition of a single GPIO under a con_id
*/
-#define GPIO_LOOKUP(_chip_label, _chip_hwnum, _con_id, _flags) \
- GPIO_LOOKUP_IDX(_chip_label, _chip_hwnum, _con_id, 0, _flags)
+#define GPIO_LOOKUP(_key, _chip_hwnum, _con_id, _flags) \
+ GPIO_LOOKUP_IDX(_key, _chip_hwnum, _con_id, 0, _flags)
/*
* Use this macro if you need to have several GPIOs under the same con_id.
* Each GPIO needs to use a different index and can be accessed using
* gpiod_get_index()
*/
-#define GPIO_LOOKUP_IDX(_chip_label, _chip_hwnum, _con_id, _idx, _flags) \
-{ \
- .chip_label = _chip_label, \
+#define GPIO_LOOKUP_IDX(_key, _chip_hwnum, _con_id, _idx, _flags) \
+(struct gpiod_lookup) { \
+ .key = _key, \
.chip_hwnum = _chip_hwnum, \
.con_id = _con_id, \
.idx = _idx, \
.flags = _flags, \
}
+/*
+ * Simple definition of a single GPIO hog in an array.
+ */
+#define GPIO_HOG(_chip_label, _chip_hwnum, _line_name, _lflags, _dflags) \
+(struct gpiod_hog) { \
+ .chip_label = _chip_label, \
+ .chip_hwnum = _chip_hwnum, \
+ .line_name = _line_name, \
+ .lflags = _lflags, \
+ .dflags = _dflags, \
+}
+
#ifdef CONFIG_GPIOLIB
void gpiod_add_lookup_table(struct gpiod_lookup_table *table);
void gpiod_add_lookup_tables(struct gpiod_lookup_table **tables, size_t n);
void gpiod_remove_lookup_table(struct gpiod_lookup_table *table);
-#else
+void gpiod_add_hogs(struct gpiod_hog *hogs);
+void gpiod_remove_hogs(struct gpiod_hog *hogs);
+#else /* ! CONFIG_GPIOLIB */
static inline
void gpiod_add_lookup_table(struct gpiod_lookup_table *table) {}
static inline
void gpiod_add_lookup_tables(struct gpiod_lookup_table **tables, size_t n) {}
static inline
void gpiod_remove_lookup_table(struct gpiod_lookup_table *table) {}
-#endif
+static inline void gpiod_add_hogs(struct gpiod_hog *hogs) {}
+static inline void gpiod_remove_hogs(struct gpiod_hog *hogs) {}
+#endif /* CONFIG_GPIOLIB */
#endif /* __LINUX_GPIO_MACHINE_H */
diff --git a/include/linux/gpio/property.h b/include/linux/gpio/property.h
new file mode 100644
index 000000000000..0d2209308002
--- /dev/null
+++ b/include/linux/gpio/property.h
@@ -0,0 +1,14 @@
+// SPDX-License-Identifier: GPL-2.0+
+#ifndef __LINUX_GPIO_PROPERTY_H
+#define __LINUX_GPIO_PROPERTY_H
+
+#include <linux/property.h>
+
+struct software_node;
+
+#define PROPERTY_ENTRY_GPIO(_name_, _chip_node_, _idx_, _flags_) \
+ PROPERTY_ENTRY_REF(_name_, _chip_node_, _idx_, _flags_)
+
+extern const struct software_node swnode_gpio_undefined;
+
+#endif /* __LINUX_GPIO_PROPERTY_H */
diff --git a/include/linux/gpio/regmap.h b/include/linux/gpio/regmap.h
new file mode 100644
index 000000000000..12d154732ca9
--- /dev/null
+++ b/include/linux/gpio/regmap.h
@@ -0,0 +1,117 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#ifndef _LINUX_GPIO_REGMAP_H
+#define _LINUX_GPIO_REGMAP_H
+
+struct device;
+struct fwnode_handle;
+struct gpio_regmap;
+struct gpio_chip;
+struct irq_domain;
+struct regmap;
+
+#define GPIO_REGMAP_ADDR_ZERO ((unsigned int)(-1))
+#define GPIO_REGMAP_ADDR(addr) ((addr) ? : GPIO_REGMAP_ADDR_ZERO)
+
+/**
+ * struct gpio_regmap_config - Description of a generic regmap gpio_chip.
+ * @parent: The parent device
+ * @regmap: The regmap used to access the registers
+ * given, the name of the device is used
+ * @fwnode: (Optional) The firmware node.
+ * If not given, the fwnode of the parent is used.
+ * @label: (Optional) Descriptive name for GPIO controller.
+ * If not given, the name of the device is used.
+ * @ngpio: (Optional) Number of GPIOs
+ * @names: (Optional) Array of names for gpios
+ * @reg_dat_base: (Optional) (in) register base address
+ * @reg_set_base: (Optional) set register base address
+ * @reg_clr_base: (Optional) clear register base address
+ * @reg_dir_in_base: (Optional) in setting register base address
+ * @reg_dir_out_base: (Optional) out setting register base address
+ * @reg_stride: (Optional) May be set if the registers (of the
+ * same type, dat, set, etc) are not consecutive.
+ * @ngpio_per_reg: (Optional) Number of GPIOs per register
+ * @irq_domain: (Optional) IRQ domain if the controller is
+ * interrupt-capable
+ * @reg_mask_xlate: (Optional) Translates base address and GPIO
+ * offset to a register/bitmask pair. If not
+ * given the default gpio_regmap_simple_xlate()
+ * is used.
+ * @fixed_direction_output:
+ * (Optional) Bitmap representing the fixed direction of
+ * the GPIO lines. Useful when there are GPIO lines with a
+ * fixed direction mixed together in the same register.
+ * @drvdata: (Optional) Pointer to driver specific data which is
+ * not used by gpio-remap but is provided "as is" to the
+ * driver callback(s).
+ * @init_valid_mask: (Optional) Routine to initialize @valid_mask, to be used
+ * if not all GPIOs are valid.
+ * @regmap_irq_chip: (Optional) Pointer on an regmap_irq_chip structure. If
+ * set, a regmap-irq device will be created and the IRQ
+ * domain will be set accordingly.
+ * @regmap_irq_line: (Optional) The IRQ the device uses to signal interrupts.
+ * @regmap_irq_flags: (Optional) The IRQF_ flags to use for the interrupt.
+ *
+ * The ->reg_mask_xlate translates a given base address and GPIO offset to
+ * register and mask pair. The base address is one of the given register
+ * base addresses in this structure.
+ *
+ * Although all register base addresses are marked as optional, there are
+ * several rules:
+ * 1. if you only have @reg_dat_base set, then it is input-only
+ * 2. if you only have @reg_set_base set, then it is output-only
+ * 3. if you have either @reg_dir_in_base or @reg_dir_out_base set, then
+ * you have to set both @reg_dat_base and @reg_set_base
+ * 4. if you have @reg_set_base set, you may also set @reg_clr_base to have
+ * two different registers for setting and clearing the output. This is
+ * also valid for the output-only case.
+ * 5. @reg_dir_in_base and @reg_dir_out_base are exclusive; is there really
+ * hardware which has redundant registers?
+ *
+ * Note: All base addresses may have the special value %GPIO_REGMAP_ADDR_ZERO
+ * which forces the address to the value 0.
+ */
+struct gpio_regmap_config {
+ struct device *parent;
+ struct regmap *regmap;
+ struct fwnode_handle *fwnode;
+
+ const char *label;
+ int ngpio;
+ const char *const *names;
+
+ unsigned int reg_dat_base;
+ unsigned int reg_set_base;
+ unsigned int reg_clr_base;
+ unsigned int reg_dir_in_base;
+ unsigned int reg_dir_out_base;
+ int reg_stride;
+ int ngpio_per_reg;
+ struct irq_domain *irq_domain;
+ unsigned long *fixed_direction_output;
+
+#ifdef CONFIG_REGMAP_IRQ
+ struct regmap_irq_chip *regmap_irq_chip;
+ int regmap_irq_line;
+ unsigned long regmap_irq_flags;
+#endif
+
+ int (*reg_mask_xlate)(struct gpio_regmap *gpio, unsigned int base,
+ unsigned int offset, unsigned int *reg,
+ unsigned int *mask);
+
+ int (*init_valid_mask)(struct gpio_chip *gc,
+ unsigned long *valid_mask,
+ unsigned int ngpios);
+
+ void *drvdata;
+};
+
+struct gpio_regmap *gpio_regmap_register(const struct gpio_regmap_config *config);
+void gpio_regmap_unregister(struct gpio_regmap *gpio);
+struct gpio_regmap *devm_gpio_regmap_register(struct device *dev,
+ const struct gpio_regmap_config *config);
+void *gpio_regmap_get_drvdata(struct gpio_regmap *gpio);
+
+#endif /* _LINUX_GPIO_REGMAP_H */
diff --git a/include/linux/gpio_keys.h b/include/linux/gpio_keys.h
index 0b71024c082c..80fa930b04c6 100644
--- a/include/linux/gpio_keys.h
+++ b/include/linux/gpio_keys.h
@@ -1,6 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _GPIO_KEYS_H
#define _GPIO_KEYS_H
+#include <linux/types.h>
+
struct device;
/**
@@ -12,11 +15,13 @@ struct device;
* @desc: label that will be attached to button's gpio
* @type: input event type (%EV_KEY, %EV_SW, %EV_ABS)
* @wakeup: configure the button as a wake-up source
+ * @wakeup_event_action: event action to trigger wakeup
* @debounce_interval: debounce ticks interval in msecs
* @can_disable: %true indicates that userspace is allowed to
* disable button via sysfs
* @value: axis value for %EV_ABS
* @irq: Irq number in case of interrupt keys
+ * @wakeirq: Optional dedicated wake-up interrupt
*/
struct gpio_keys_button {
unsigned int code;
@@ -25,10 +30,12 @@ struct gpio_keys_button {
const char *desc;
unsigned int type;
int wakeup;
+ int wakeup_event_action;
int debounce_interval;
bool can_disable;
int value;
unsigned int irq;
+ unsigned int wakeirq;
};
/**
diff --git a/include/linux/gpio_mouse.h b/include/linux/gpio_mouse.h
deleted file mode 100644
index 44ed7aa14d85..000000000000
--- a/include/linux/gpio_mouse.h
+++ /dev/null
@@ -1,61 +0,0 @@
-/*
- * Driver for simulating a mouse on GPIO lines.
- *
- * Copyright (C) 2007 Atmel Corporation
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#ifndef _GPIO_MOUSE_H
-#define _GPIO_MOUSE_H
-
-#define GPIO_MOUSE_POLARITY_ACT_HIGH 0x00
-#define GPIO_MOUSE_POLARITY_ACT_LOW 0x01
-
-#define GPIO_MOUSE_PIN_UP 0
-#define GPIO_MOUSE_PIN_DOWN 1
-#define GPIO_MOUSE_PIN_LEFT 2
-#define GPIO_MOUSE_PIN_RIGHT 3
-#define GPIO_MOUSE_PIN_BLEFT 4
-#define GPIO_MOUSE_PIN_BMIDDLE 5
-#define GPIO_MOUSE_PIN_BRIGHT 6
-#define GPIO_MOUSE_PIN_MAX 7
-
-/**
- * struct gpio_mouse_platform_data
- * @scan_ms: integer in ms specifying the scan periode.
- * @polarity: Pin polarity, active high or low.
- * @up: GPIO line for up value.
- * @down: GPIO line for down value.
- * @left: GPIO line for left value.
- * @right: GPIO line for right value.
- * @bleft: GPIO line for left button.
- * @bmiddle: GPIO line for middle button.
- * @bright: GPIO line for right button.
- *
- * This struct must be added to the platform_device in the board code.
- * It is used by the gpio_mouse driver to setup GPIO lines and to
- * calculate mouse movement.
- */
-struct gpio_mouse_platform_data {
- int scan_ms;
- int polarity;
-
- union {
- struct {
- int up;
- int down;
- int left;
- int right;
-
- int bleft;
- int bmiddle;
- int bright;
- };
- int pins[GPIO_MOUSE_PIN_MAX];
- };
-};
-
-#endif /* _GPIO_MOUSE_H */
diff --git a/include/linux/greybus.h b/include/linux/greybus.h
new file mode 100644
index 000000000000..4d58e27ceaf6
--- /dev/null
+++ b/include/linux/greybus.h
@@ -0,0 +1,122 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Greybus driver and device API
+ *
+ * Copyright 2014-2015 Google Inc.
+ * Copyright 2014-2015 Linaro Ltd.
+ */
+
+#ifndef __LINUX_GREYBUS_H
+#define __LINUX_GREYBUS_H
+
+#ifdef __KERNEL__
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/list.h>
+#include <linux/slab.h>
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/pm_runtime.h>
+#include <linux/idr.h>
+
+#include <linux/greybus/greybus_id.h>
+#include <linux/greybus/greybus_manifest.h>
+#include <linux/greybus/greybus_protocols.h>
+#include <linux/greybus/manifest.h>
+#include <linux/greybus/hd.h>
+#include <linux/greybus/svc.h>
+#include <linux/greybus/control.h>
+#include <linux/greybus/module.h>
+#include <linux/greybus/interface.h>
+#include <linux/greybus/bundle.h>
+#include <linux/greybus/connection.h>
+#include <linux/greybus/operation.h>
+
+/* Matches up with the Greybus Protocol specification document */
+#define GREYBUS_VERSION_MAJOR 0x00
+#define GREYBUS_VERSION_MINOR 0x01
+
+#define GREYBUS_ID_MATCH_DEVICE \
+ (GREYBUS_ID_MATCH_VENDOR | GREYBUS_ID_MATCH_PRODUCT)
+
+#define GREYBUS_DEVICE(v, p) \
+ .match_flags = GREYBUS_ID_MATCH_DEVICE, \
+ .vendor = (v), \
+ .product = (p),
+
+#define GREYBUS_DEVICE_CLASS(c) \
+ .match_flags = GREYBUS_ID_MATCH_CLASS, \
+ .class = (c),
+
+/* Maximum number of CPorts */
+#define CPORT_ID_MAX 4095 /* UniPro max id is 4095 */
+#define CPORT_ID_BAD U16_MAX
+
+struct greybus_driver {
+ const char *name;
+
+ int (*probe)(struct gb_bundle *bundle,
+ const struct greybus_bundle_id *id);
+ void (*disconnect)(struct gb_bundle *bundle);
+
+ const struct greybus_bundle_id *id_table;
+
+ struct device_driver driver;
+};
+#define to_greybus_driver(d) container_of_const(d, struct greybus_driver, driver)
+
+static inline void greybus_set_drvdata(struct gb_bundle *bundle, void *data)
+{
+ dev_set_drvdata(&bundle->dev, data);
+}
+
+static inline void *greybus_get_drvdata(struct gb_bundle *bundle)
+{
+ return dev_get_drvdata(&bundle->dev);
+}
+
+/* Don't call these directly, use the module_greybus_driver() macro instead */
+int greybus_register_driver(struct greybus_driver *driver,
+ struct module *module, const char *mod_name);
+void greybus_deregister_driver(struct greybus_driver *driver);
+
+/* define to get proper THIS_MODULE and KBUILD_MODNAME values */
+#define greybus_register(driver) \
+ greybus_register_driver(driver, THIS_MODULE, KBUILD_MODNAME)
+#define greybus_deregister(driver) \
+ greybus_deregister_driver(driver)
+
+/**
+ * module_greybus_driver() - Helper macro for registering a Greybus driver
+ * @__greybus_driver: greybus_driver structure
+ *
+ * Helper macro for Greybus drivers to set up proper module init / exit
+ * functions. Replaces module_init() and module_exit() and keeps people from
+ * printing pointless things to the kernel log when their driver is loaded.
+ */
+#define module_greybus_driver(__greybus_driver) \
+ module_driver(__greybus_driver, greybus_register, greybus_deregister)
+
+int greybus_disabled(void);
+
+void gb_debugfs_init(void);
+void gb_debugfs_cleanup(void);
+struct dentry *gb_debugfs_get(void);
+
+extern const struct bus_type greybus_bus_type;
+
+extern const struct device_type greybus_hd_type;
+extern const struct device_type greybus_module_type;
+extern const struct device_type greybus_interface_type;
+extern const struct device_type greybus_control_type;
+extern const struct device_type greybus_bundle_type;
+extern const struct device_type greybus_svc_type;
+
+static inline bool cport_id_valid(struct gb_host_device *hd, u16 cport_id)
+{
+ return cport_id != CPORT_ID_BAD && cport_id < hd->num_cports;
+}
+
+#endif /* __KERNEL__ */
+#endif /* __LINUX_GREYBUS_H */
diff --git a/include/linux/greybus/bundle.h b/include/linux/greybus/bundle.h
new file mode 100644
index 000000000000..df8d88424cb7
--- /dev/null
+++ b/include/linux/greybus/bundle.h
@@ -0,0 +1,92 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Greybus bundles
+ *
+ * Copyright 2014 Google Inc.
+ * Copyright 2014 Linaro Ltd.
+ */
+
+#ifndef __BUNDLE_H
+#define __BUNDLE_H
+
+#include <linux/types.h>
+#include <linux/list.h>
+#include <linux/pm_runtime.h>
+#include <linux/device.h>
+
+#define BUNDLE_ID_NONE U8_MAX
+
+/* Greybus "public" definitions" */
+struct gb_bundle {
+ struct device dev;
+ struct gb_interface *intf;
+
+ u8 id;
+ u8 class;
+ u8 class_major;
+ u8 class_minor;
+
+ size_t num_cports;
+ struct greybus_descriptor_cport *cport_desc;
+
+ struct list_head connections;
+ u8 *state;
+
+ struct list_head links; /* interface->bundles */
+};
+#define to_gb_bundle(d) container_of(d, struct gb_bundle, dev)
+
+/* Greybus "private" definitions" */
+struct gb_bundle *gb_bundle_create(struct gb_interface *intf, u8 bundle_id,
+ u8 class);
+int gb_bundle_add(struct gb_bundle *bundle);
+void gb_bundle_destroy(struct gb_bundle *bundle);
+
+/* Bundle Runtime PM wrappers */
+#ifdef CONFIG_PM
+static inline int gb_pm_runtime_get_sync(struct gb_bundle *bundle)
+{
+ int retval;
+
+ retval = pm_runtime_get_sync(&bundle->dev);
+ if (retval < 0) {
+ dev_err(&bundle->dev,
+ "pm_runtime_get_sync failed: %d\n", retval);
+ pm_runtime_put_noidle(&bundle->dev);
+ return retval;
+ }
+
+ return 0;
+}
+
+static inline int gb_pm_runtime_put_autosuspend(struct gb_bundle *bundle)
+{
+ int retval;
+
+ pm_runtime_mark_last_busy(&bundle->dev);
+ retval = pm_runtime_put_autosuspend(&bundle->dev);
+
+ return retval;
+}
+
+static inline void gb_pm_runtime_get_noresume(struct gb_bundle *bundle)
+{
+ pm_runtime_get_noresume(&bundle->dev);
+}
+
+static inline void gb_pm_runtime_put_noidle(struct gb_bundle *bundle)
+{
+ pm_runtime_put_noidle(&bundle->dev);
+}
+
+#else
+static inline int gb_pm_runtime_get_sync(struct gb_bundle *bundle)
+{ return 0; }
+static inline int gb_pm_runtime_put_autosuspend(struct gb_bundle *bundle)
+{ return 0; }
+
+static inline void gb_pm_runtime_get_noresume(struct gb_bundle *bundle) {}
+static inline void gb_pm_runtime_put_noidle(struct gb_bundle *bundle) {}
+#endif
+
+#endif /* __BUNDLE_H */
diff --git a/include/linux/greybus/connection.h b/include/linux/greybus/connection.h
new file mode 100644
index 000000000000..d59b7fc1de3e
--- /dev/null
+++ b/include/linux/greybus/connection.h
@@ -0,0 +1,131 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Greybus connections
+ *
+ * Copyright 2014 Google Inc.
+ * Copyright 2014 Linaro Ltd.
+ */
+
+#ifndef __CONNECTION_H
+#define __CONNECTION_H
+
+#include <linux/bits.h>
+#include <linux/list.h>
+#include <linux/kfifo.h>
+#include <linux/kref.h>
+#include <linux/workqueue.h>
+
+#define GB_CONNECTION_FLAG_CSD BIT(0)
+#define GB_CONNECTION_FLAG_NO_FLOWCTRL BIT(1)
+#define GB_CONNECTION_FLAG_OFFLOADED BIT(2)
+#define GB_CONNECTION_FLAG_CDSI1 BIT(3)
+#define GB_CONNECTION_FLAG_CONTROL BIT(4)
+#define GB_CONNECTION_FLAG_HIGH_PRIO BIT(5)
+
+#define GB_CONNECTION_FLAG_CORE_MASK GB_CONNECTION_FLAG_CONTROL
+
+enum gb_connection_state {
+ GB_CONNECTION_STATE_DISABLED = 0,
+ GB_CONNECTION_STATE_ENABLED_TX = 1,
+ GB_CONNECTION_STATE_ENABLED = 2,
+ GB_CONNECTION_STATE_DISCONNECTING = 3,
+};
+
+struct gb_operation;
+
+typedef int (*gb_request_handler_t)(struct gb_operation *);
+
+struct gb_connection {
+ struct gb_host_device *hd;
+ struct gb_interface *intf;
+ struct gb_bundle *bundle;
+ struct kref kref;
+ u16 hd_cport_id;
+ u16 intf_cport_id;
+
+ struct list_head hd_links;
+ struct list_head bundle_links;
+
+ gb_request_handler_t handler;
+ unsigned long flags;
+
+ struct mutex mutex;
+ spinlock_t lock;
+ enum gb_connection_state state;
+ struct list_head operations;
+
+ char name[16];
+ struct workqueue_struct *wq;
+
+ atomic_t op_cycle;
+
+ void *private;
+
+ bool mode_switch;
+};
+
+struct gb_connection *gb_connection_create_static(struct gb_host_device *hd,
+ u16 hd_cport_id, gb_request_handler_t handler);
+struct gb_connection *gb_connection_create_control(struct gb_interface *intf);
+struct gb_connection *gb_connection_create(struct gb_bundle *bundle,
+ u16 cport_id, gb_request_handler_t handler);
+struct gb_connection *gb_connection_create_flags(struct gb_bundle *bundle,
+ u16 cport_id, gb_request_handler_t handler,
+ unsigned long flags);
+struct gb_connection *gb_connection_create_offloaded(struct gb_bundle *bundle,
+ u16 cport_id, unsigned long flags);
+void gb_connection_destroy(struct gb_connection *connection);
+
+static inline bool gb_connection_is_static(struct gb_connection *connection)
+{
+ return !connection->intf;
+}
+
+int gb_connection_enable(struct gb_connection *connection);
+int gb_connection_enable_tx(struct gb_connection *connection);
+void gb_connection_disable_rx(struct gb_connection *connection);
+void gb_connection_disable(struct gb_connection *connection);
+void gb_connection_disable_forced(struct gb_connection *connection);
+
+void gb_connection_mode_switch_prepare(struct gb_connection *connection);
+void gb_connection_mode_switch_complete(struct gb_connection *connection);
+
+void greybus_data_rcvd(struct gb_host_device *hd, u16 cport_id,
+ u8 *data, size_t length);
+
+void gb_connection_latency_tag_enable(struct gb_connection *connection);
+void gb_connection_latency_tag_disable(struct gb_connection *connection);
+
+static inline bool gb_connection_e2efc_enabled(struct gb_connection *connection)
+{
+ return !(connection->flags & GB_CONNECTION_FLAG_CSD);
+}
+
+static inline bool
+gb_connection_flow_control_disabled(struct gb_connection *connection)
+{
+ return connection->flags & GB_CONNECTION_FLAG_NO_FLOWCTRL;
+}
+
+static inline bool gb_connection_is_offloaded(struct gb_connection *connection)
+{
+ return connection->flags & GB_CONNECTION_FLAG_OFFLOADED;
+}
+
+static inline bool gb_connection_is_control(struct gb_connection *connection)
+{
+ return connection->flags & GB_CONNECTION_FLAG_CONTROL;
+}
+
+static inline void *gb_connection_get_data(struct gb_connection *connection)
+{
+ return connection->private;
+}
+
+static inline void gb_connection_set_data(struct gb_connection *connection,
+ void *data)
+{
+ connection->private = data;
+}
+
+#endif /* __CONNECTION_H */
diff --git a/include/linux/greybus/control.h b/include/linux/greybus/control.h
new file mode 100644
index 000000000000..da11fe871653
--- /dev/null
+++ b/include/linux/greybus/control.h
@@ -0,0 +1,60 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Greybus CPort control protocol
+ *
+ * Copyright 2015 Google Inc.
+ * Copyright 2015 Linaro Ltd.
+ */
+
+#ifndef __CONTROL_H
+#define __CONTROL_H
+
+#include <linux/types.h>
+#include <linux/device.h>
+
+struct gb_control {
+ struct device dev;
+ struct gb_interface *intf;
+
+ struct gb_connection *connection;
+
+ u8 protocol_major;
+ u8 protocol_minor;
+
+ bool has_bundle_activate;
+ bool has_bundle_version;
+
+ char *vendor_string;
+ char *product_string;
+};
+#define to_gb_control(d) container_of(d, struct gb_control, dev)
+
+struct gb_control *gb_control_create(struct gb_interface *intf);
+int gb_control_enable(struct gb_control *control);
+void gb_control_disable(struct gb_control *control);
+int gb_control_suspend(struct gb_control *control);
+int gb_control_resume(struct gb_control *control);
+int gb_control_add(struct gb_control *control);
+void gb_control_del(struct gb_control *control);
+struct gb_control *gb_control_get(struct gb_control *control);
+void gb_control_put(struct gb_control *control);
+
+int gb_control_get_bundle_versions(struct gb_control *control);
+int gb_control_connected_operation(struct gb_control *control, u16 cport_id);
+int gb_control_disconnected_operation(struct gb_control *control, u16 cport_id);
+int gb_control_disconnecting_operation(struct gb_control *control,
+ u16 cport_id);
+int gb_control_mode_switch_operation(struct gb_control *control);
+void gb_control_mode_switch_prepare(struct gb_control *control);
+void gb_control_mode_switch_complete(struct gb_control *control);
+int gb_control_get_manifest_size_operation(struct gb_interface *intf);
+int gb_control_get_manifest_operation(struct gb_interface *intf, void *manifest,
+ size_t size);
+int gb_control_bundle_suspend(struct gb_control *control, u8 bundle_id);
+int gb_control_bundle_resume(struct gb_control *control, u8 bundle_id);
+int gb_control_bundle_deactivate(struct gb_control *control, u8 bundle_id);
+int gb_control_bundle_activate(struct gb_control *control, u8 bundle_id);
+int gb_control_interface_suspend_prepare(struct gb_control *control);
+int gb_control_interface_deactivate_prepare(struct gb_control *control);
+int gb_control_interface_hibernate_abort(struct gb_control *control);
+#endif /* __CONTROL_H */
diff --git a/include/linux/greybus/greybus_id.h b/include/linux/greybus/greybus_id.h
new file mode 100644
index 000000000000..f4c8440093e4
--- /dev/null
+++ b/include/linux/greybus/greybus_id.h
@@ -0,0 +1,27 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* FIXME
+ * move this to include/linux/mod_devicetable.h when merging
+ */
+
+#ifndef __LINUX_GREYBUS_ID_H
+#define __LINUX_GREYBUS_ID_H
+
+#include <linux/types.h>
+#include <linux/mod_devicetable.h>
+
+
+struct greybus_bundle_id {
+ __u16 match_flags;
+ __u32 vendor;
+ __u32 product;
+ __u8 class;
+
+ kernel_ulong_t driver_info __aligned(sizeof(kernel_ulong_t));
+};
+
+/* Used to match the greybus_bundle_id */
+#define GREYBUS_ID_MATCH_VENDOR BIT(0)
+#define GREYBUS_ID_MATCH_PRODUCT BIT(1)
+#define GREYBUS_ID_MATCH_CLASS BIT(2)
+
+#endif /* __LINUX_GREYBUS_ID_H */
diff --git a/include/linux/greybus/greybus_manifest.h b/include/linux/greybus/greybus_manifest.h
new file mode 100644
index 000000000000..bef9eb2093e9
--- /dev/null
+++ b/include/linux/greybus/greybus_manifest.h
@@ -0,0 +1,181 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Greybus manifest definition
+ *
+ * See "Greybus Application Protocol" document (version 0.1) for
+ * details on these values and structures.
+ *
+ * Copyright 2014-2015 Google Inc.
+ * Copyright 2014-2015 Linaro Ltd.
+ *
+ * Released under the GPLv2 and BSD licenses.
+ */
+
+#ifndef __GREYBUS_MANIFEST_H
+#define __GREYBUS_MANIFEST_H
+
+#include <linux/bits.h>
+#include <linux/types.h>
+
+enum greybus_descriptor_type {
+ GREYBUS_TYPE_INVALID = 0x00,
+ GREYBUS_TYPE_INTERFACE = 0x01,
+ GREYBUS_TYPE_STRING = 0x02,
+ GREYBUS_TYPE_BUNDLE = 0x03,
+ GREYBUS_TYPE_CPORT = 0x04,
+};
+
+enum greybus_protocol {
+ GREYBUS_PROTOCOL_CONTROL = 0x00,
+ /* 0x01 is unused */
+ GREYBUS_PROTOCOL_GPIO = 0x02,
+ GREYBUS_PROTOCOL_I2C = 0x03,
+ GREYBUS_PROTOCOL_UART = 0x04,
+ GREYBUS_PROTOCOL_HID = 0x05,
+ GREYBUS_PROTOCOL_USB = 0x06,
+ GREYBUS_PROTOCOL_SDIO = 0x07,
+ GREYBUS_PROTOCOL_POWER_SUPPLY = 0x08,
+ GREYBUS_PROTOCOL_PWM = 0x09,
+ /* 0x0a is unused */
+ GREYBUS_PROTOCOL_SPI = 0x0b,
+ GREYBUS_PROTOCOL_DISPLAY = 0x0c,
+ GREYBUS_PROTOCOL_CAMERA_MGMT = 0x0d,
+ GREYBUS_PROTOCOL_SENSOR = 0x0e,
+ GREYBUS_PROTOCOL_LIGHTS = 0x0f,
+ GREYBUS_PROTOCOL_VIBRATOR = 0x10,
+ GREYBUS_PROTOCOL_LOOPBACK = 0x11,
+ GREYBUS_PROTOCOL_AUDIO_MGMT = 0x12,
+ GREYBUS_PROTOCOL_AUDIO_DATA = 0x13,
+ GREYBUS_PROTOCOL_SVC = 0x14,
+ GREYBUS_PROTOCOL_BOOTROM = 0x15,
+ GREYBUS_PROTOCOL_CAMERA_DATA = 0x16,
+ GREYBUS_PROTOCOL_FW_DOWNLOAD = 0x17,
+ GREYBUS_PROTOCOL_FW_MANAGEMENT = 0x18,
+ GREYBUS_PROTOCOL_AUTHENTICATION = 0x19,
+ GREYBUS_PROTOCOL_LOG = 0x1a,
+ /* ... */
+ GREYBUS_PROTOCOL_RAW = 0xfe,
+ GREYBUS_PROTOCOL_VENDOR = 0xff,
+};
+
+enum greybus_class_type {
+ GREYBUS_CLASS_CONTROL = 0x00,
+ /* 0x01 is unused */
+ /* 0x02 is unused */
+ /* 0x03 is unused */
+ /* 0x04 is unused */
+ GREYBUS_CLASS_HID = 0x05,
+ /* 0x06 is unused */
+ /* 0x07 is unused */
+ GREYBUS_CLASS_POWER_SUPPLY = 0x08,
+ /* 0x09 is unused */
+ GREYBUS_CLASS_BRIDGED_PHY = 0x0a,
+ /* 0x0b is unused */
+ GREYBUS_CLASS_DISPLAY = 0x0c,
+ GREYBUS_CLASS_CAMERA = 0x0d,
+ GREYBUS_CLASS_SENSOR = 0x0e,
+ GREYBUS_CLASS_LIGHTS = 0x0f,
+ GREYBUS_CLASS_VIBRATOR = 0x10,
+ GREYBUS_CLASS_LOOPBACK = 0x11,
+ GREYBUS_CLASS_AUDIO = 0x12,
+ /* 0x13 is unused */
+ /* 0x14 is unused */
+ GREYBUS_CLASS_BOOTROM = 0x15,
+ GREYBUS_CLASS_FW_MANAGEMENT = 0x16,
+ GREYBUS_CLASS_LOG = 0x17,
+ /* ... */
+ GREYBUS_CLASS_RAW = 0xfe,
+ GREYBUS_CLASS_VENDOR = 0xff,
+};
+
+enum {
+ GREYBUS_INTERFACE_FEATURE_TIMESYNC = BIT(0),
+};
+
+/*
+ * The string in a string descriptor is not NUL-terminated. The
+ * size of the descriptor will be rounded up to a multiple of 4
+ * bytes, by padding the string with 0x00 bytes if necessary.
+ */
+struct greybus_descriptor_string {
+ __u8 length;
+ __u8 id;
+ __u8 string[];
+} __packed;
+
+/*
+ * An interface descriptor describes information about an interface as a whole,
+ * *not* the functions within it.
+ */
+struct greybus_descriptor_interface {
+ __u8 vendor_stringid;
+ __u8 product_stringid;
+ __u8 features;
+ __u8 pad;
+} __packed;
+
+/*
+ * An bundle descriptor defines an identification number and a class for
+ * each bundle.
+ *
+ * @id: Uniquely identifies a bundle within a interface, its sole purpose is to
+ * allow CPort descriptors to specify which bundle they are associated with.
+ * The first bundle will have id 0, second will have 1 and so on.
+ *
+ * The largest CPort id associated with an bundle (defined by a
+ * CPort descriptor in the manifest) is used to determine how to
+ * encode the device id and module number in UniPro packets
+ * that use the bundle.
+ *
+ * @class: It is used by kernel to know the functionality provided by the
+ * bundle and will be matched against drivers functinality while probing greybus
+ * driver. It should contain one of the values defined in
+ * 'enum greybus_class_type'.
+ *
+ */
+struct greybus_descriptor_bundle {
+ __u8 id; /* interface-relative id (0..) */
+ __u8 class;
+ __u8 pad[2];
+} __packed;
+
+/*
+ * A CPort descriptor indicates the id of the bundle within the
+ * module it's associated with, along with the CPort id used to
+ * address the CPort. The protocol id defines the format of messages
+ * exchanged using the CPort.
+ */
+struct greybus_descriptor_cport {
+ __le16 id;
+ __u8 bundle;
+ __u8 protocol_id; /* enum greybus_protocol */
+} __packed;
+
+struct greybus_descriptor_header {
+ __le16 size;
+ __u8 type; /* enum greybus_descriptor_type */
+ __u8 pad;
+} __packed;
+
+struct greybus_descriptor {
+ struct greybus_descriptor_header header;
+ union {
+ struct greybus_descriptor_string string;
+ struct greybus_descriptor_interface interface;
+ struct greybus_descriptor_bundle bundle;
+ struct greybus_descriptor_cport cport;
+ };
+} __packed;
+
+struct greybus_manifest_header {
+ __le16 size;
+ __u8 version_major;
+ __u8 version_minor;
+} __packed;
+
+struct greybus_manifest {
+ struct greybus_manifest_header header;
+ struct greybus_descriptor descriptors[];
+} __packed;
+
+#endif /* __GREYBUS_MANIFEST_H */
diff --git a/include/linux/greybus/greybus_protocols.h b/include/linux/greybus/greybus_protocols.h
new file mode 100644
index 000000000000..820134b0105c
--- /dev/null
+++ b/include/linux/greybus/greybus_protocols.h
@@ -0,0 +1,2174 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) */
+/*
+ * Copyright(c) 2014 - 2015 Google Inc. All rights reserved.
+ * Copyright(c) 2014 - 2015 Linaro Ltd. All rights reserved.
+ */
+
+#ifndef __GREYBUS_PROTOCOLS_H
+#define __GREYBUS_PROTOCOLS_H
+
+#include <linux/types.h>
+
+/* Fixed IDs for control/svc protocols */
+
+/* SVC switch-port device ids */
+#define GB_SVC_DEVICE_ID_SVC 0
+#define GB_SVC_DEVICE_ID_AP 1
+#define GB_SVC_DEVICE_ID_MIN 2
+#define GB_SVC_DEVICE_ID_MAX 31
+
+#define GB_SVC_CPORT_ID 0
+#define GB_CONTROL_BUNDLE_ID 0
+#define GB_CONTROL_CPORT_ID 0
+
+
+/*
+ * All operation messages (both requests and responses) begin with
+ * a header that encodes the size of the message (header included).
+ * This header also contains a unique identifier, that associates a
+ * response message with its operation. The header contains an
+ * operation type field, whose interpretation is dependent on what
+ * type of protocol is used over the connection. The high bit
+ * (0x80) of the operation type field is used to indicate whether
+ * the message is a request (clear) or a response (set).
+ *
+ * Response messages include an additional result byte, which
+ * communicates the result of the corresponding request. A zero
+ * result value means the operation completed successfully. Any
+ * other value indicates an error; in this case, the payload of the
+ * response message (if any) is ignored. The result byte must be
+ * zero in the header for a request message.
+ *
+ * The wire format for all numeric fields in the header is little
+ * endian. Any operation-specific data begins immediately after the
+ * header.
+ */
+struct gb_operation_msg_hdr {
+ __le16 size; /* Size in bytes of header + payload */
+ __le16 operation_id; /* Operation unique id */
+ __u8 type; /* E.g GB_I2C_TYPE_* or GB_GPIO_TYPE_* */
+ __u8 result; /* Result of request (in responses only) */
+ __u8 pad[2]; /* must be zero (ignore when read) */
+} __packed;
+
+
+/* Generic request types */
+#define GB_REQUEST_TYPE_CPORT_SHUTDOWN 0x00
+#define GB_REQUEST_TYPE_INVALID 0x7f
+
+struct gb_cport_shutdown_request {
+ __u8 phase;
+} __packed;
+
+
+/* Control Protocol */
+
+/* Greybus control request types */
+#define GB_CONTROL_TYPE_VERSION 0x01
+#define GB_CONTROL_TYPE_PROBE_AP 0x02
+#define GB_CONTROL_TYPE_GET_MANIFEST_SIZE 0x03
+#define GB_CONTROL_TYPE_GET_MANIFEST 0x04
+#define GB_CONTROL_TYPE_CONNECTED 0x05
+#define GB_CONTROL_TYPE_DISCONNECTED 0x06
+#define GB_CONTROL_TYPE_TIMESYNC_ENABLE 0x07
+#define GB_CONTROL_TYPE_TIMESYNC_DISABLE 0x08
+#define GB_CONTROL_TYPE_TIMESYNC_AUTHORITATIVE 0x09
+/* Unused 0x0a */
+#define GB_CONTROL_TYPE_BUNDLE_VERSION 0x0b
+#define GB_CONTROL_TYPE_DISCONNECTING 0x0c
+#define GB_CONTROL_TYPE_TIMESYNC_GET_LAST_EVENT 0x0d
+#define GB_CONTROL_TYPE_MODE_SWITCH 0x0e
+#define GB_CONTROL_TYPE_BUNDLE_SUSPEND 0x0f
+#define GB_CONTROL_TYPE_BUNDLE_RESUME 0x10
+#define GB_CONTROL_TYPE_BUNDLE_DEACTIVATE 0x11
+#define GB_CONTROL_TYPE_BUNDLE_ACTIVATE 0x12
+#define GB_CONTROL_TYPE_INTF_SUSPEND_PREPARE 0x13
+#define GB_CONTROL_TYPE_INTF_DEACTIVATE_PREPARE 0x14
+#define GB_CONTROL_TYPE_INTF_HIBERNATE_ABORT 0x15
+
+struct gb_control_version_request {
+ __u8 major;
+ __u8 minor;
+} __packed;
+
+struct gb_control_version_response {
+ __u8 major;
+ __u8 minor;
+} __packed;
+
+struct gb_control_bundle_version_request {
+ __u8 bundle_id;
+} __packed;
+
+struct gb_control_bundle_version_response {
+ __u8 major;
+ __u8 minor;
+} __packed;
+
+/* Control protocol manifest get size request has no payload*/
+struct gb_control_get_manifest_size_response {
+ __le16 size;
+} __packed;
+
+/* Control protocol manifest get request has no payload */
+struct gb_control_get_manifest_response {
+ __u8 data[0];
+} __packed;
+
+/* Control protocol [dis]connected request */
+struct gb_control_connected_request {
+ __le16 cport_id;
+} __packed;
+
+struct gb_control_disconnecting_request {
+ __le16 cport_id;
+} __packed;
+/* disconnecting response has no payload */
+
+struct gb_control_disconnected_request {
+ __le16 cport_id;
+} __packed;
+/* Control protocol [dis]connected response has no payload */
+
+/*
+ * All Bundle power management operations use the same request and response
+ * layout and status codes.
+ */
+
+#define GB_CONTROL_BUNDLE_PM_OK 0x00
+#define GB_CONTROL_BUNDLE_PM_INVAL 0x01
+#define GB_CONTROL_BUNDLE_PM_BUSY 0x02
+#define GB_CONTROL_BUNDLE_PM_FAIL 0x03
+#define GB_CONTROL_BUNDLE_PM_NA 0x04
+
+struct gb_control_bundle_pm_request {
+ __u8 bundle_id;
+} __packed;
+
+struct gb_control_bundle_pm_response {
+ __u8 status;
+} __packed;
+
+/*
+ * Interface Suspend Prepare and Deactivate Prepare operations use the same
+ * response layout and error codes. Define a single response structure and reuse
+ * it. Both operations have no payload.
+ */
+
+#define GB_CONTROL_INTF_PM_OK 0x00
+#define GB_CONTROL_INTF_PM_BUSY 0x01
+#define GB_CONTROL_INTF_PM_NA 0x02
+
+struct gb_control_intf_pm_response {
+ __u8 status;
+} __packed;
+
+/* APBridge protocol */
+
+/* request APB1 log */
+#define GB_APB_REQUEST_LOG 0x02
+
+/* request to map a cport to bulk in and bulk out endpoints */
+#define GB_APB_REQUEST_EP_MAPPING 0x03
+
+/* request to get the number of cports available */
+#define GB_APB_REQUEST_CPORT_COUNT 0x04
+
+/* request to reset a cport state */
+#define GB_APB_REQUEST_RESET_CPORT 0x05
+
+/* request to time the latency of messages on a given cport */
+#define GB_APB_REQUEST_LATENCY_TAG_EN 0x06
+#define GB_APB_REQUEST_LATENCY_TAG_DIS 0x07
+
+/* request to control the CSI transmitter */
+#define GB_APB_REQUEST_CSI_TX_CONTROL 0x08
+
+/* request to control audio streaming */
+#define GB_APB_REQUEST_AUDIO_CONTROL 0x09
+
+/* TimeSync requests */
+#define GB_APB_REQUEST_TIMESYNC_ENABLE 0x0d
+#define GB_APB_REQUEST_TIMESYNC_DISABLE 0x0e
+#define GB_APB_REQUEST_TIMESYNC_AUTHORITATIVE 0x0f
+#define GB_APB_REQUEST_TIMESYNC_GET_LAST_EVENT 0x10
+
+/* requests to set Greybus CPort flags */
+#define GB_APB_REQUEST_CPORT_FLAGS 0x11
+
+/* ARPC request */
+#define GB_APB_REQUEST_ARPC_RUN 0x12
+
+struct gb_apb_request_cport_flags {
+ __le32 flags;
+#define GB_APB_CPORT_FLAG_CONTROL 0x01
+#define GB_APB_CPORT_FLAG_HIGH_PRIO 0x02
+} __packed;
+
+
+/* Firmware Download Protocol */
+
+/* Request Types */
+#define GB_FW_DOWNLOAD_TYPE_FIND_FIRMWARE 0x01
+#define GB_FW_DOWNLOAD_TYPE_FETCH_FIRMWARE 0x02
+#define GB_FW_DOWNLOAD_TYPE_RELEASE_FIRMWARE 0x03
+
+#define GB_FIRMWARE_TAG_MAX_SIZE 10
+
+/* firmware download find firmware request/response */
+struct gb_fw_download_find_firmware_request {
+ __u8 firmware_tag[GB_FIRMWARE_TAG_MAX_SIZE];
+} __packed;
+
+struct gb_fw_download_find_firmware_response {
+ __u8 firmware_id;
+ __le32 size;
+} __packed;
+
+/* firmware download fetch firmware request/response */
+struct gb_fw_download_fetch_firmware_request {
+ __u8 firmware_id;
+ __le32 offset;
+ __le32 size;
+} __packed;
+
+/* gb_fw_download_fetch_firmware_response contains no other data */
+
+/* firmware download release firmware request */
+struct gb_fw_download_release_firmware_request {
+ __u8 firmware_id;
+} __packed;
+/* firmware download release firmware response has no payload */
+
+
+/* Firmware Management Protocol */
+
+/* Request Types */
+#define GB_FW_MGMT_TYPE_INTERFACE_FW_VERSION 0x01
+#define GB_FW_MGMT_TYPE_LOAD_AND_VALIDATE_FW 0x02
+#define GB_FW_MGMT_TYPE_LOADED_FW 0x03
+#define GB_FW_MGMT_TYPE_BACKEND_FW_VERSION 0x04
+#define GB_FW_MGMT_TYPE_BACKEND_FW_UPDATE 0x05
+#define GB_FW_MGMT_TYPE_BACKEND_FW_UPDATED 0x06
+
+#define GB_FW_LOAD_METHOD_UNIPRO 0x01
+#define GB_FW_LOAD_METHOD_INTERNAL 0x02
+
+#define GB_FW_LOAD_STATUS_FAILED 0x00
+#define GB_FW_LOAD_STATUS_UNVALIDATED 0x01
+#define GB_FW_LOAD_STATUS_VALIDATED 0x02
+#define GB_FW_LOAD_STATUS_VALIDATION_FAILED 0x03
+
+#define GB_FW_BACKEND_FW_STATUS_SUCCESS 0x01
+#define GB_FW_BACKEND_FW_STATUS_FAIL_FIND 0x02
+#define GB_FW_BACKEND_FW_STATUS_FAIL_FETCH 0x03
+#define GB_FW_BACKEND_FW_STATUS_FAIL_WRITE 0x04
+#define GB_FW_BACKEND_FW_STATUS_INT 0x05
+#define GB_FW_BACKEND_FW_STATUS_RETRY 0x06
+#define GB_FW_BACKEND_FW_STATUS_NOT_SUPPORTED 0x07
+
+#define GB_FW_BACKEND_VERSION_STATUS_SUCCESS 0x01
+#define GB_FW_BACKEND_VERSION_STATUS_NOT_AVAILABLE 0x02
+#define GB_FW_BACKEND_VERSION_STATUS_NOT_SUPPORTED 0x03
+#define GB_FW_BACKEND_VERSION_STATUS_RETRY 0x04
+#define GB_FW_BACKEND_VERSION_STATUS_FAIL_INT 0x05
+
+/* firmware management interface firmware version request has no payload */
+struct gb_fw_mgmt_interface_fw_version_response {
+ __u8 firmware_tag[GB_FIRMWARE_TAG_MAX_SIZE];
+ __le16 major;
+ __le16 minor;
+} __packed;
+
+/* firmware management load and validate firmware request/response */
+struct gb_fw_mgmt_load_and_validate_fw_request {
+ __u8 request_id;
+ __u8 load_method;
+ __u8 firmware_tag[GB_FIRMWARE_TAG_MAX_SIZE];
+} __packed;
+/* firmware management load and validate firmware response has no payload*/
+
+/* firmware management loaded firmware request */
+struct gb_fw_mgmt_loaded_fw_request {
+ __u8 request_id;
+ __u8 status;
+ __le16 major;
+ __le16 minor;
+} __packed;
+/* firmware management loaded firmware response has no payload */
+
+/* firmware management backend firmware version request/response */
+struct gb_fw_mgmt_backend_fw_version_request {
+ __u8 firmware_tag[GB_FIRMWARE_TAG_MAX_SIZE];
+} __packed;
+
+struct gb_fw_mgmt_backend_fw_version_response {
+ __le16 major;
+ __le16 minor;
+ __u8 status;
+} __packed;
+
+/* firmware management backend firmware update request */
+struct gb_fw_mgmt_backend_fw_update_request {
+ __u8 request_id;
+ __u8 firmware_tag[GB_FIRMWARE_TAG_MAX_SIZE];
+} __packed;
+/* firmware management backend firmware update response has no payload */
+
+/* firmware management backend firmware updated request */
+struct gb_fw_mgmt_backend_fw_updated_request {
+ __u8 request_id;
+ __u8 status;
+} __packed;
+/* firmware management backend firmware updated response has no payload */
+
+
+/* Component Authentication Protocol (CAP) */
+
+/* Request Types */
+#define GB_CAP_TYPE_GET_ENDPOINT_UID 0x01
+#define GB_CAP_TYPE_GET_IMS_CERTIFICATE 0x02
+#define GB_CAP_TYPE_AUTHENTICATE 0x03
+
+/* CAP get endpoint uid request has no payload */
+struct gb_cap_get_endpoint_uid_response {
+ __u8 uid[8];
+} __packed;
+
+/* CAP get endpoint ims certificate request/response */
+struct gb_cap_get_ims_certificate_request {
+ __le32 certificate_class;
+ __le32 certificate_id;
+} __packed;
+
+struct gb_cap_get_ims_certificate_response {
+ __u8 result_code;
+ __u8 certificate[];
+} __packed;
+
+/* CAP authenticate request/response */
+struct gb_cap_authenticate_request {
+ __le32 auth_type;
+ __u8 uid[8];
+ __u8 challenge[32];
+} __packed;
+
+struct gb_cap_authenticate_response {
+ __u8 result_code;
+ __u8 response[64];
+ __u8 signature[];
+} __packed;
+
+
+/* Bootrom Protocol */
+
+/* Version of the Greybus bootrom protocol we support */
+#define GB_BOOTROM_VERSION_MAJOR 0x00
+#define GB_BOOTROM_VERSION_MINOR 0x01
+
+/* Greybus bootrom request types */
+#define GB_BOOTROM_TYPE_VERSION 0x01
+#define GB_BOOTROM_TYPE_FIRMWARE_SIZE 0x02
+#define GB_BOOTROM_TYPE_GET_FIRMWARE 0x03
+#define GB_BOOTROM_TYPE_READY_TO_BOOT 0x04
+#define GB_BOOTROM_TYPE_AP_READY 0x05 /* Request with no-payload */
+#define GB_BOOTROM_TYPE_GET_VID_PID 0x06 /* Request with no-payload */
+
+/* Greybus bootrom boot stages */
+#define GB_BOOTROM_BOOT_STAGE_ONE 0x01 /* Reserved for the boot ROM */
+#define GB_BOOTROM_BOOT_STAGE_TWO 0x02 /* Bootrom package to be loaded by the boot ROM */
+#define GB_BOOTROM_BOOT_STAGE_THREE 0x03 /* Module personality package loaded by Stage 2 firmware */
+
+/* Greybus bootrom ready to boot status */
+#define GB_BOOTROM_BOOT_STATUS_INVALID 0x00 /* Firmware blob could not be validated */
+#define GB_BOOTROM_BOOT_STATUS_INSECURE 0x01 /* Firmware blob is valid but insecure */
+#define GB_BOOTROM_BOOT_STATUS_SECURE 0x02 /* Firmware blob is valid and secure */
+
+/* Max bootrom data fetch size in bytes */
+#define GB_BOOTROM_FETCH_MAX 2000
+
+struct gb_bootrom_version_request {
+ __u8 major;
+ __u8 minor;
+} __packed;
+
+struct gb_bootrom_version_response {
+ __u8 major;
+ __u8 minor;
+} __packed;
+
+/* Bootrom protocol firmware size request/response */
+struct gb_bootrom_firmware_size_request {
+ __u8 stage;
+} __packed;
+
+struct gb_bootrom_firmware_size_response {
+ __le32 size;
+} __packed;
+
+/* Bootrom protocol get firmware request/response */
+struct gb_bootrom_get_firmware_request {
+ __le32 offset;
+ __le32 size;
+} __packed;
+
+/* gb_bootrom_get_firmware_response contains no other data */
+
+/* Bootrom protocol Ready to boot request */
+struct gb_bootrom_ready_to_boot_request {
+ __u8 status;
+} __packed;
+/* Bootrom protocol Ready to boot response has no payload */
+
+/* Bootrom protocol get VID/PID request has no payload */
+struct gb_bootrom_get_vid_pid_response {
+ __le32 vendor_id;
+ __le32 product_id;
+} __packed;
+
+
+/* Power Supply */
+
+/* Greybus power supply request types */
+#define GB_POWER_SUPPLY_TYPE_GET_SUPPLIES 0x02
+#define GB_POWER_SUPPLY_TYPE_GET_DESCRIPTION 0x03
+#define GB_POWER_SUPPLY_TYPE_GET_PROP_DESCRIPTORS 0x04
+#define GB_POWER_SUPPLY_TYPE_GET_PROPERTY 0x05
+#define GB_POWER_SUPPLY_TYPE_SET_PROPERTY 0x06
+#define GB_POWER_SUPPLY_TYPE_EVENT 0x07
+
+/* Greybus power supply battery technologies types */
+#define GB_POWER_SUPPLY_TECH_UNKNOWN 0x0000
+#define GB_POWER_SUPPLY_TECH_NiMH 0x0001
+#define GB_POWER_SUPPLY_TECH_LION 0x0002
+#define GB_POWER_SUPPLY_TECH_LIPO 0x0003
+#define GB_POWER_SUPPLY_TECH_LiFe 0x0004
+#define GB_POWER_SUPPLY_TECH_NiCd 0x0005
+#define GB_POWER_SUPPLY_TECH_LiMn 0x0006
+
+/* Greybus power supply types */
+#define GB_POWER_SUPPLY_UNKNOWN_TYPE 0x0000
+#define GB_POWER_SUPPLY_BATTERY_TYPE 0x0001
+#define GB_POWER_SUPPLY_UPS_TYPE 0x0002
+#define GB_POWER_SUPPLY_MAINS_TYPE 0x0003
+#define GB_POWER_SUPPLY_USB_TYPE 0x0004
+#define GB_POWER_SUPPLY_USB_DCP_TYPE 0x0005
+#define GB_POWER_SUPPLY_USB_CDP_TYPE 0x0006
+#define GB_POWER_SUPPLY_USB_ACA_TYPE 0x0007
+
+/* Greybus power supply health values */
+#define GB_POWER_SUPPLY_HEALTH_UNKNOWN 0x0000
+#define GB_POWER_SUPPLY_HEALTH_GOOD 0x0001
+#define GB_POWER_SUPPLY_HEALTH_OVERHEAT 0x0002
+#define GB_POWER_SUPPLY_HEALTH_DEAD 0x0003
+#define GB_POWER_SUPPLY_HEALTH_OVERVOLTAGE 0x0004
+#define GB_POWER_SUPPLY_HEALTH_UNSPEC_FAILURE 0x0005
+#define GB_POWER_SUPPLY_HEALTH_COLD 0x0006
+#define GB_POWER_SUPPLY_HEALTH_WATCHDOG_TIMER_EXPIRE 0x0007
+#define GB_POWER_SUPPLY_HEALTH_SAFETY_TIMER_EXPIRE 0x0008
+
+/* Greybus power supply status values */
+#define GB_POWER_SUPPLY_STATUS_UNKNOWN 0x0000
+#define GB_POWER_SUPPLY_STATUS_CHARGING 0x0001
+#define GB_POWER_SUPPLY_STATUS_DISCHARGING 0x0002
+#define GB_POWER_SUPPLY_STATUS_NOT_CHARGING 0x0003
+#define GB_POWER_SUPPLY_STATUS_FULL 0x0004
+
+/* Greybus power supply capacity level values */
+#define GB_POWER_SUPPLY_CAPACITY_LEVEL_UNKNOWN 0x0000
+#define GB_POWER_SUPPLY_CAPACITY_LEVEL_CRITICAL 0x0001
+#define GB_POWER_SUPPLY_CAPACITY_LEVEL_LOW 0x0002
+#define GB_POWER_SUPPLY_CAPACITY_LEVEL_NORMAL 0x0003
+#define GB_POWER_SUPPLY_CAPACITY_LEVEL_HIGH 0x0004
+#define GB_POWER_SUPPLY_CAPACITY_LEVEL_FULL 0x0005
+
+/* Greybus power supply scope values */
+#define GB_POWER_SUPPLY_SCOPE_UNKNOWN 0x0000
+#define GB_POWER_SUPPLY_SCOPE_SYSTEM 0x0001
+#define GB_POWER_SUPPLY_SCOPE_DEVICE 0x0002
+
+struct gb_power_supply_get_supplies_response {
+ __u8 supplies_count;
+} __packed;
+
+struct gb_power_supply_get_description_request {
+ __u8 psy_id;
+} __packed;
+
+struct gb_power_supply_get_description_response {
+ __u8 manufacturer[32];
+ __u8 model[32];
+ __u8 serial_number[32];
+ __le16 type;
+ __u8 properties_count;
+} __packed;
+
+struct gb_power_supply_props_desc {
+ __u8 property;
+#define GB_POWER_SUPPLY_PROP_STATUS 0x00
+#define GB_POWER_SUPPLY_PROP_CHARGE_TYPE 0x01
+#define GB_POWER_SUPPLY_PROP_HEALTH 0x02
+#define GB_POWER_SUPPLY_PROP_PRESENT 0x03
+#define GB_POWER_SUPPLY_PROP_ONLINE 0x04
+#define GB_POWER_SUPPLY_PROP_AUTHENTIC 0x05
+#define GB_POWER_SUPPLY_PROP_TECHNOLOGY 0x06
+#define GB_POWER_SUPPLY_PROP_CYCLE_COUNT 0x07
+#define GB_POWER_SUPPLY_PROP_VOLTAGE_MAX 0x08
+#define GB_POWER_SUPPLY_PROP_VOLTAGE_MIN 0x09
+#define GB_POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN 0x0A
+#define GB_POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN 0x0B
+#define GB_POWER_SUPPLY_PROP_VOLTAGE_NOW 0x0C
+#define GB_POWER_SUPPLY_PROP_VOLTAGE_AVG 0x0D
+#define GB_POWER_SUPPLY_PROP_VOLTAGE_OCV 0x0E
+#define GB_POWER_SUPPLY_PROP_VOLTAGE_BOOT 0x0F
+#define GB_POWER_SUPPLY_PROP_CURRENT_MAX 0x10
+#define GB_POWER_SUPPLY_PROP_CURRENT_NOW 0x11
+#define GB_POWER_SUPPLY_PROP_CURRENT_AVG 0x12
+#define GB_POWER_SUPPLY_PROP_CURRENT_BOOT 0x13
+#define GB_POWER_SUPPLY_PROP_POWER_NOW 0x14
+#define GB_POWER_SUPPLY_PROP_POWER_AVG 0x15
+#define GB_POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN 0x16
+#define GB_POWER_SUPPLY_PROP_CHARGE_EMPTY_DESIGN 0x17
+#define GB_POWER_SUPPLY_PROP_CHARGE_FULL 0x18
+#define GB_POWER_SUPPLY_PROP_CHARGE_EMPTY 0x19
+#define GB_POWER_SUPPLY_PROP_CHARGE_NOW 0x1A
+#define GB_POWER_SUPPLY_PROP_CHARGE_AVG 0x1B
+#define GB_POWER_SUPPLY_PROP_CHARGE_COUNTER 0x1C
+#define GB_POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT 0x1D
+#define GB_POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX 0x1E
+#define GB_POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE 0x1F
+#define GB_POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE_MAX 0x20
+#define GB_POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT 0x21
+#define GB_POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT_MAX 0x22
+#define GB_POWER_SUPPLY_PROP_INPUT_CURRENT_LIMIT 0x23
+#define GB_POWER_SUPPLY_PROP_ENERGY_FULL_DESIGN 0x24
+#define GB_POWER_SUPPLY_PROP_ENERGY_EMPTY_DESIGN 0x25
+#define GB_POWER_SUPPLY_PROP_ENERGY_FULL 0x26
+#define GB_POWER_SUPPLY_PROP_ENERGY_EMPTY 0x27
+#define GB_POWER_SUPPLY_PROP_ENERGY_NOW 0x28
+#define GB_POWER_SUPPLY_PROP_ENERGY_AVG 0x29
+#define GB_POWER_SUPPLY_PROP_CAPACITY 0x2A
+#define GB_POWER_SUPPLY_PROP_CAPACITY_ALERT_MIN 0x2B
+#define GB_POWER_SUPPLY_PROP_CAPACITY_ALERT_MAX 0x2C
+#define GB_POWER_SUPPLY_PROP_CAPACITY_LEVEL 0x2D
+#define GB_POWER_SUPPLY_PROP_TEMP 0x2E
+#define GB_POWER_SUPPLY_PROP_TEMP_MAX 0x2F
+#define GB_POWER_SUPPLY_PROP_TEMP_MIN 0x30
+#define GB_POWER_SUPPLY_PROP_TEMP_ALERT_MIN 0x31
+#define GB_POWER_SUPPLY_PROP_TEMP_ALERT_MAX 0x32
+#define GB_POWER_SUPPLY_PROP_TEMP_AMBIENT 0x33
+#define GB_POWER_SUPPLY_PROP_TEMP_AMBIENT_ALERT_MIN 0x34
+#define GB_POWER_SUPPLY_PROP_TEMP_AMBIENT_ALERT_MAX 0x35
+#define GB_POWER_SUPPLY_PROP_TIME_TO_EMPTY_NOW 0x36
+#define GB_POWER_SUPPLY_PROP_TIME_TO_EMPTY_AVG 0x37
+#define GB_POWER_SUPPLY_PROP_TIME_TO_FULL_NOW 0x38
+#define GB_POWER_SUPPLY_PROP_TIME_TO_FULL_AVG 0x39
+#define GB_POWER_SUPPLY_PROP_TYPE 0x3A
+#define GB_POWER_SUPPLY_PROP_SCOPE 0x3B
+#define GB_POWER_SUPPLY_PROP_CHARGE_TERM_CURRENT 0x3C
+#define GB_POWER_SUPPLY_PROP_CALIBRATE 0x3D
+ __u8 is_writeable;
+} __packed;
+
+struct gb_power_supply_get_property_descriptors_request {
+ __u8 psy_id;
+} __packed;
+
+struct gb_power_supply_get_property_descriptors_response {
+ __u8 properties_count;
+ struct gb_power_supply_props_desc props[];
+} __packed;
+
+struct gb_power_supply_get_property_request {
+ __u8 psy_id;
+ __u8 property;
+} __packed;
+
+struct gb_power_supply_get_property_response {
+ __le32 prop_val;
+};
+
+struct gb_power_supply_set_property_request {
+ __u8 psy_id;
+ __u8 property;
+ __le32 prop_val;
+} __packed;
+
+struct gb_power_supply_event_request {
+ __u8 psy_id;
+ __u8 event;
+#define GB_POWER_SUPPLY_UPDATE 0x01
+} __packed;
+
+
+/* HID */
+
+/* Greybus HID operation types */
+#define GB_HID_TYPE_GET_DESC 0x02
+#define GB_HID_TYPE_GET_REPORT_DESC 0x03
+#define GB_HID_TYPE_PWR_ON 0x04
+#define GB_HID_TYPE_PWR_OFF 0x05
+#define GB_HID_TYPE_GET_REPORT 0x06
+#define GB_HID_TYPE_SET_REPORT 0x07
+#define GB_HID_TYPE_IRQ_EVENT 0x08
+
+/* Report type */
+#define GB_HID_INPUT_REPORT 0
+#define GB_HID_OUTPUT_REPORT 1
+#define GB_HID_FEATURE_REPORT 2
+
+/* Different request/response structures */
+/* HID get descriptor response */
+struct gb_hid_desc_response {
+ __u8 bLength;
+ __le16 wReportDescLength;
+ __le16 bcdHID;
+ __le16 wProductID;
+ __le16 wVendorID;
+ __u8 bCountryCode;
+} __packed;
+
+/* HID get report request/response */
+struct gb_hid_get_report_request {
+ __u8 report_type;
+ __u8 report_id;
+} __packed;
+
+/* HID set report request */
+struct gb_hid_set_report_request {
+ __u8 report_type;
+ __u8 report_id;
+ __u8 report[];
+} __packed;
+
+/* HID input report request, via interrupt pipe */
+struct gb_hid_input_report_request {
+ __u8 report[0];
+} __packed;
+
+
+/* I2C */
+
+/* Greybus i2c request types */
+#define GB_I2C_TYPE_FUNCTIONALITY 0x02
+#define GB_I2C_TYPE_TRANSFER 0x05
+
+/* functionality request has no payload */
+struct gb_i2c_functionality_response {
+ __le32 functionality;
+} __packed;
+
+/*
+ * Outgoing data immediately follows the op count and ops array.
+ * The data for each write (master -> slave) op in the array is sent
+ * in order, with no (e.g. pad) bytes separating them.
+ *
+ * Short reads cause the entire transfer request to fail So response
+ * payload consists only of bytes read, and the number of bytes is
+ * exactly what was specified in the corresponding op. Like
+ * outgoing data, the incoming data is in order and contiguous.
+ */
+struct gb_i2c_transfer_op {
+ __le16 addr;
+ __le16 flags;
+ __le16 size;
+} __packed;
+
+struct gb_i2c_transfer_request {
+ __le16 op_count;
+ struct gb_i2c_transfer_op ops[]; /* op_count of these */
+} __packed;
+struct gb_i2c_transfer_response {
+ __u8 data[0]; /* inbound data */
+} __packed;
+
+
+/* GPIO */
+
+/* Greybus GPIO request types */
+#define GB_GPIO_TYPE_LINE_COUNT 0x02
+#define GB_GPIO_TYPE_ACTIVATE 0x03
+#define GB_GPIO_TYPE_DEACTIVATE 0x04
+#define GB_GPIO_TYPE_GET_DIRECTION 0x05
+#define GB_GPIO_TYPE_DIRECTION_IN 0x06
+#define GB_GPIO_TYPE_DIRECTION_OUT 0x07
+#define GB_GPIO_TYPE_GET_VALUE 0x08
+#define GB_GPIO_TYPE_SET_VALUE 0x09
+#define GB_GPIO_TYPE_SET_DEBOUNCE 0x0a
+#define GB_GPIO_TYPE_IRQ_TYPE 0x0b
+#define GB_GPIO_TYPE_IRQ_MASK 0x0c
+#define GB_GPIO_TYPE_IRQ_UNMASK 0x0d
+#define GB_GPIO_TYPE_IRQ_EVENT 0x0e
+
+#define GB_GPIO_IRQ_TYPE_NONE 0x00
+#define GB_GPIO_IRQ_TYPE_EDGE_RISING 0x01
+#define GB_GPIO_IRQ_TYPE_EDGE_FALLING 0x02
+#define GB_GPIO_IRQ_TYPE_EDGE_BOTH 0x03
+#define GB_GPIO_IRQ_TYPE_LEVEL_HIGH 0x04
+#define GB_GPIO_IRQ_TYPE_LEVEL_LOW 0x08
+
+/* line count request has no payload */
+struct gb_gpio_line_count_response {
+ __u8 count;
+} __packed;
+
+struct gb_gpio_activate_request {
+ __u8 which;
+} __packed;
+/* activate response has no payload */
+
+struct gb_gpio_deactivate_request {
+ __u8 which;
+} __packed;
+/* deactivate response has no payload */
+
+struct gb_gpio_get_direction_request {
+ __u8 which;
+} __packed;
+struct gb_gpio_get_direction_response {
+ __u8 direction;
+} __packed;
+
+struct gb_gpio_direction_in_request {
+ __u8 which;
+} __packed;
+/* direction in response has no payload */
+
+struct gb_gpio_direction_out_request {
+ __u8 which;
+ __u8 value;
+} __packed;
+/* direction out response has no payload */
+
+struct gb_gpio_get_value_request {
+ __u8 which;
+} __packed;
+struct gb_gpio_get_value_response {
+ __u8 value;
+} __packed;
+
+struct gb_gpio_set_value_request {
+ __u8 which;
+ __u8 value;
+} __packed;
+/* set value response has no payload */
+
+struct gb_gpio_set_debounce_request {
+ __u8 which;
+ __le16 usec;
+} __packed;
+/* debounce response has no payload */
+
+struct gb_gpio_irq_type_request {
+ __u8 which;
+ __u8 type;
+} __packed;
+/* irq type response has no payload */
+
+struct gb_gpio_irq_mask_request {
+ __u8 which;
+} __packed;
+/* irq mask response has no payload */
+
+struct gb_gpio_irq_unmask_request {
+ __u8 which;
+} __packed;
+/* irq unmask response has no payload */
+
+/* irq event requests originate on another module and are handled on the AP */
+struct gb_gpio_irq_event_request {
+ __u8 which;
+} __packed;
+/* irq event has no response */
+
+
+/* PWM */
+
+/* Greybus PWM operation types */
+#define GB_PWM_TYPE_PWM_COUNT 0x02
+#define GB_PWM_TYPE_ACTIVATE 0x03
+#define GB_PWM_TYPE_DEACTIVATE 0x04
+#define GB_PWM_TYPE_CONFIG 0x05
+#define GB_PWM_TYPE_POLARITY 0x06
+#define GB_PWM_TYPE_ENABLE 0x07
+#define GB_PWM_TYPE_DISABLE 0x08
+
+/* pwm count request has no payload */
+struct gb_pwm_count_response {
+ __u8 count;
+} __packed;
+
+struct gb_pwm_activate_request {
+ __u8 which;
+} __packed;
+
+struct gb_pwm_deactivate_request {
+ __u8 which;
+} __packed;
+
+struct gb_pwm_config_request {
+ __u8 which;
+ __le32 duty;
+ __le32 period;
+} __packed;
+
+struct gb_pwm_polarity_request {
+ __u8 which;
+ __u8 polarity;
+} __packed;
+
+struct gb_pwm_enable_request {
+ __u8 which;
+} __packed;
+
+struct gb_pwm_disable_request {
+ __u8 which;
+} __packed;
+
+/* SPI */
+
+/* Should match up with modes in linux/spi/spi.h */
+#define GB_SPI_MODE_CPHA 0x01 /* clock phase */
+#define GB_SPI_MODE_CPOL 0x02 /* clock polarity */
+#define GB_SPI_MODE_MODE_0 (0 | 0) /* (original MicroWire) */
+#define GB_SPI_MODE_MODE_1 (0 | GB_SPI_MODE_CPHA)
+#define GB_SPI_MODE_MODE_2 (GB_SPI_MODE_CPOL | 0)
+#define GB_SPI_MODE_MODE_3 (GB_SPI_MODE_CPOL | GB_SPI_MODE_CPHA)
+#define GB_SPI_MODE_CS_HIGH 0x04 /* chipselect active high? */
+#define GB_SPI_MODE_LSB_FIRST 0x08 /* per-word bits-on-wire */
+#define GB_SPI_MODE_3WIRE 0x10 /* SI/SO signals shared */
+#define GB_SPI_MODE_LOOP 0x20 /* loopback mode */
+#define GB_SPI_MODE_NO_CS 0x40 /* 1 dev/bus, no chipselect */
+#define GB_SPI_MODE_READY 0x80 /* slave pulls low to pause */
+
+/* Should match up with flags in linux/spi/spi.h */
+#define GB_SPI_FLAG_HALF_DUPLEX BIT(0) /* can't do full duplex */
+#define GB_SPI_FLAG_NO_RX BIT(1) /* can't do buffer read */
+#define GB_SPI_FLAG_NO_TX BIT(2) /* can't do buffer write */
+
+/* Greybus spi operation types */
+#define GB_SPI_TYPE_MASTER_CONFIG 0x02
+#define GB_SPI_TYPE_DEVICE_CONFIG 0x03
+#define GB_SPI_TYPE_TRANSFER 0x04
+
+/* mode request has no payload */
+struct gb_spi_master_config_response {
+ __le32 bits_per_word_mask;
+ __le32 min_speed_hz;
+ __le32 max_speed_hz;
+ __le16 mode;
+ __le16 flags;
+ __u8 num_chipselect;
+} __packed;
+
+struct gb_spi_device_config_request {
+ __u8 chip_select;
+} __packed;
+
+struct gb_spi_device_config_response {
+ __le16 mode;
+ __u8 bits_per_word;
+ __le32 max_speed_hz;
+ __u8 device_type;
+#define GB_SPI_SPI_DEV 0x00
+#define GB_SPI_SPI_NOR 0x01
+#define GB_SPI_SPI_MODALIAS 0x02
+ __u8 name[32];
+} __packed;
+
+/**
+ * struct gb_spi_transfer - a read/write buffer pair
+ * @speed_hz: Select a speed other than the device default for this transfer. If
+ * 0 the default (from @spi_device) is used.
+ * @len: size of rx and tx buffers (in bytes)
+ * @delay_usecs: microseconds to delay after this transfer before (optionally)
+ * changing the chipselect status, then starting the next transfer or
+ * completing this spi_message.
+ * @cs_change: affects chipselect after this transfer completes
+ * @bits_per_word: select a bits_per_word other than the device default for this
+ * transfer. If 0 the default (from @spi_device) is used.
+ */
+struct gb_spi_transfer {
+ __le32 speed_hz;
+ __le32 len;
+ __le16 delay_usecs;
+ __u8 cs_change;
+ __u8 bits_per_word;
+ __u8 xfer_flags;
+#define GB_SPI_XFER_READ 0x01
+#define GB_SPI_XFER_WRITE 0x02
+#define GB_SPI_XFER_INPROGRESS 0x04
+} __packed;
+
+struct gb_spi_transfer_request {
+ __u8 chip_select; /* of the spi device */
+ __u8 mode; /* of the spi device */
+ __le16 count;
+ struct gb_spi_transfer transfers[]; /* count of these */
+} __packed;
+
+struct gb_spi_transfer_response {
+ __u8 data[0]; /* inbound data */
+} __packed;
+
+/* Version of the Greybus SVC protocol we support */
+#define GB_SVC_VERSION_MAJOR 0x00
+#define GB_SVC_VERSION_MINOR 0x01
+
+/* Greybus SVC request types */
+#define GB_SVC_TYPE_PROTOCOL_VERSION 0x01
+#define GB_SVC_TYPE_SVC_HELLO 0x02
+#define GB_SVC_TYPE_INTF_DEVICE_ID 0x03
+#define GB_SVC_TYPE_INTF_RESET 0x06
+#define GB_SVC_TYPE_CONN_CREATE 0x07
+#define GB_SVC_TYPE_CONN_DESTROY 0x08
+#define GB_SVC_TYPE_DME_PEER_GET 0x09
+#define GB_SVC_TYPE_DME_PEER_SET 0x0a
+#define GB_SVC_TYPE_ROUTE_CREATE 0x0b
+#define GB_SVC_TYPE_ROUTE_DESTROY 0x0c
+#define GB_SVC_TYPE_TIMESYNC_ENABLE 0x0d
+#define GB_SVC_TYPE_TIMESYNC_DISABLE 0x0e
+#define GB_SVC_TYPE_TIMESYNC_AUTHORITATIVE 0x0f
+#define GB_SVC_TYPE_INTF_SET_PWRM 0x10
+#define GB_SVC_TYPE_INTF_EJECT 0x11
+#define GB_SVC_TYPE_PING 0x13
+#define GB_SVC_TYPE_PWRMON_RAIL_COUNT_GET 0x14
+#define GB_SVC_TYPE_PWRMON_RAIL_NAMES_GET 0x15
+#define GB_SVC_TYPE_PWRMON_SAMPLE_GET 0x16
+#define GB_SVC_TYPE_PWRMON_INTF_SAMPLE_GET 0x17
+#define GB_SVC_TYPE_TIMESYNC_WAKE_PINS_ACQUIRE 0x18
+#define GB_SVC_TYPE_TIMESYNC_WAKE_PINS_RELEASE 0x19
+#define GB_SVC_TYPE_TIMESYNC_PING 0x1a
+#define GB_SVC_TYPE_MODULE_INSERTED 0x1f
+#define GB_SVC_TYPE_MODULE_REMOVED 0x20
+#define GB_SVC_TYPE_INTF_VSYS_ENABLE 0x21
+#define GB_SVC_TYPE_INTF_VSYS_DISABLE 0x22
+#define GB_SVC_TYPE_INTF_REFCLK_ENABLE 0x23
+#define GB_SVC_TYPE_INTF_REFCLK_DISABLE 0x24
+#define GB_SVC_TYPE_INTF_UNIPRO_ENABLE 0x25
+#define GB_SVC_TYPE_INTF_UNIPRO_DISABLE 0x26
+#define GB_SVC_TYPE_INTF_ACTIVATE 0x27
+#define GB_SVC_TYPE_INTF_RESUME 0x28
+#define GB_SVC_TYPE_INTF_MAILBOX_EVENT 0x29
+#define GB_SVC_TYPE_INTF_OOPS 0x2a
+
+/* Greybus SVC protocol status values */
+#define GB_SVC_OP_SUCCESS 0x00
+#define GB_SVC_OP_UNKNOWN_ERROR 0x01
+#define GB_SVC_INTF_NOT_DETECTED 0x02
+#define GB_SVC_INTF_NO_UPRO_LINK 0x03
+#define GB_SVC_INTF_UPRO_NOT_DOWN 0x04
+#define GB_SVC_INTF_UPRO_NOT_HIBERNATED 0x05
+#define GB_SVC_INTF_NO_V_SYS 0x06
+#define GB_SVC_INTF_V_CHG 0x07
+#define GB_SVC_INTF_WAKE_BUSY 0x08
+#define GB_SVC_INTF_NO_REFCLK 0x09
+#define GB_SVC_INTF_RELEASING 0x0a
+#define GB_SVC_INTF_NO_ORDER 0x0b
+#define GB_SVC_INTF_MBOX_SET 0x0c
+#define GB_SVC_INTF_BAD_MBOX 0x0d
+#define GB_SVC_INTF_OP_TIMEOUT 0x0e
+#define GB_SVC_PWRMON_OP_NOT_PRESENT 0x0f
+
+struct gb_svc_version_request {
+ __u8 major;
+ __u8 minor;
+} __packed;
+
+struct gb_svc_version_response {
+ __u8 major;
+ __u8 minor;
+} __packed;
+
+/* SVC protocol hello request */
+struct gb_svc_hello_request {
+ __le16 endo_id;
+ __u8 interface_id;
+} __packed;
+/* hello response has no payload */
+
+struct gb_svc_intf_device_id_request {
+ __u8 intf_id;
+ __u8 device_id;
+} __packed;
+/* device id response has no payload */
+
+struct gb_svc_intf_reset_request {
+ __u8 intf_id;
+} __packed;
+/* interface reset response has no payload */
+
+struct gb_svc_intf_eject_request {
+ __u8 intf_id;
+} __packed;
+/* interface eject response has no payload */
+
+struct gb_svc_conn_create_request {
+ __u8 intf1_id;
+ __le16 cport1_id;
+ __u8 intf2_id;
+ __le16 cport2_id;
+ __u8 tc;
+ __u8 flags;
+} __packed;
+/* connection create response has no payload */
+
+struct gb_svc_conn_destroy_request {
+ __u8 intf1_id;
+ __le16 cport1_id;
+ __u8 intf2_id;
+ __le16 cport2_id;
+} __packed;
+/* connection destroy response has no payload */
+
+struct gb_svc_dme_peer_get_request {
+ __u8 intf_id;
+ __le16 attr;
+ __le16 selector;
+} __packed;
+
+struct gb_svc_dme_peer_get_response {
+ __le16 result_code;
+ __le32 attr_value;
+} __packed;
+
+struct gb_svc_dme_peer_set_request {
+ __u8 intf_id;
+ __le16 attr;
+ __le16 selector;
+ __le32 value;
+} __packed;
+
+struct gb_svc_dme_peer_set_response {
+ __le16 result_code;
+} __packed;
+
+/* Greybus init-status values, currently retrieved using DME peer gets. */
+#define GB_INIT_SPI_BOOT_STARTED 0x02
+#define GB_INIT_TRUSTED_SPI_BOOT_FINISHED 0x03
+#define GB_INIT_UNTRUSTED_SPI_BOOT_FINISHED 0x04
+#define GB_INIT_BOOTROM_UNIPRO_BOOT_STARTED 0x06
+#define GB_INIT_BOOTROM_FALLBACK_UNIPRO_BOOT_STARTED 0x09
+#define GB_INIT_S2_LOADER_BOOT_STARTED 0x0D
+
+struct gb_svc_route_create_request {
+ __u8 intf1_id;
+ __u8 dev1_id;
+ __u8 intf2_id;
+ __u8 dev2_id;
+} __packed;
+/* route create response has no payload */
+
+struct gb_svc_route_destroy_request {
+ __u8 intf1_id;
+ __u8 intf2_id;
+} __packed;
+/* route destroy response has no payload */
+
+/* used for svc_intf_vsys_{enable,disable} */
+struct gb_svc_intf_vsys_request {
+ __u8 intf_id;
+} __packed;
+
+struct gb_svc_intf_vsys_response {
+ __u8 result_code;
+#define GB_SVC_INTF_VSYS_OK 0x00
+ /* 0x01 is reserved */
+#define GB_SVC_INTF_VSYS_FAIL 0x02
+} __packed;
+
+/* used for svc_intf_refclk_{enable,disable} */
+struct gb_svc_intf_refclk_request {
+ __u8 intf_id;
+} __packed;
+
+struct gb_svc_intf_refclk_response {
+ __u8 result_code;
+#define GB_SVC_INTF_REFCLK_OK 0x00
+ /* 0x01 is reserved */
+#define GB_SVC_INTF_REFCLK_FAIL 0x02
+} __packed;
+
+/* used for svc_intf_unipro_{enable,disable} */
+struct gb_svc_intf_unipro_request {
+ __u8 intf_id;
+} __packed;
+
+struct gb_svc_intf_unipro_response {
+ __u8 result_code;
+#define GB_SVC_INTF_UNIPRO_OK 0x00
+ /* 0x01 is reserved */
+#define GB_SVC_INTF_UNIPRO_FAIL 0x02
+#define GB_SVC_INTF_UNIPRO_NOT_OFF 0x03
+} __packed;
+
+#define GB_SVC_UNIPRO_FAST_MODE 0x01
+#define GB_SVC_UNIPRO_SLOW_MODE 0x02
+#define GB_SVC_UNIPRO_FAST_AUTO_MODE 0x04
+#define GB_SVC_UNIPRO_SLOW_AUTO_MODE 0x05
+#define GB_SVC_UNIPRO_MODE_UNCHANGED 0x07
+#define GB_SVC_UNIPRO_HIBERNATE_MODE 0x11
+#define GB_SVC_UNIPRO_OFF_MODE 0x12
+
+#define GB_SVC_SMALL_AMPLITUDE 0x01
+#define GB_SVC_LARGE_AMPLITUDE 0x02
+
+#define GB_SVC_NO_DE_EMPHASIS 0x00
+#define GB_SVC_SMALL_DE_EMPHASIS 0x01
+#define GB_SVC_LARGE_DE_EMPHASIS 0x02
+
+#define GB_SVC_PWRM_RXTERMINATION 0x01
+#define GB_SVC_PWRM_TXTERMINATION 0x02
+#define GB_SVC_PWRM_LINE_RESET 0x04
+#define GB_SVC_PWRM_SCRAMBLING 0x20
+
+#define GB_SVC_PWRM_QUIRK_HSSER 0x00000001
+
+#define GB_SVC_UNIPRO_HS_SERIES_A 0x01
+#define GB_SVC_UNIPRO_HS_SERIES_B 0x02
+
+#define GB_SVC_SETPWRM_PWR_OK 0x00
+#define GB_SVC_SETPWRM_PWR_LOCAL 0x01
+#define GB_SVC_SETPWRM_PWR_REMOTE 0x02
+#define GB_SVC_SETPWRM_PWR_BUSY 0x03
+#define GB_SVC_SETPWRM_PWR_ERROR_CAP 0x04
+#define GB_SVC_SETPWRM_PWR_FATAL_ERROR 0x05
+
+struct gb_svc_l2_timer_cfg {
+ __le16 tsb_fc0_protection_timeout;
+ __le16 tsb_tc0_replay_timeout;
+ __le16 tsb_afc0_req_timeout;
+ __le16 tsb_fc1_protection_timeout;
+ __le16 tsb_tc1_replay_timeout;
+ __le16 tsb_afc1_req_timeout;
+ __le16 reserved_for_tc2[3];
+ __le16 reserved_for_tc3[3];
+} __packed;
+
+struct gb_svc_intf_set_pwrm_request {
+ __u8 intf_id;
+ __u8 hs_series;
+ __u8 tx_mode;
+ __u8 tx_gear;
+ __u8 tx_nlanes;
+ __u8 tx_amplitude;
+ __u8 tx_hs_equalizer;
+ __u8 rx_mode;
+ __u8 rx_gear;
+ __u8 rx_nlanes;
+ __u8 flags;
+ __le32 quirks;
+ struct gb_svc_l2_timer_cfg local_l2timerdata, remote_l2timerdata;
+} __packed;
+
+struct gb_svc_intf_set_pwrm_response {
+ __u8 result_code;
+} __packed;
+
+struct gb_svc_key_event_request {
+ __le16 key_code;
+#define GB_KEYCODE_ARA 0x00
+
+ __u8 key_event;
+#define GB_SVC_KEY_RELEASED 0x00
+#define GB_SVC_KEY_PRESSED 0x01
+} __packed;
+
+#define GB_SVC_PWRMON_MAX_RAIL_COUNT 254
+
+struct gb_svc_pwrmon_rail_count_get_response {
+ __u8 rail_count;
+} __packed;
+
+#define GB_SVC_PWRMON_RAIL_NAME_BUFSIZE 32
+
+struct gb_svc_pwrmon_rail_names_get_response {
+ __u8 status;
+ __u8 name[][GB_SVC_PWRMON_RAIL_NAME_BUFSIZE];
+} __packed;
+
+#define GB_SVC_PWRMON_TYPE_CURR 0x01
+#define GB_SVC_PWRMON_TYPE_VOL 0x02
+#define GB_SVC_PWRMON_TYPE_PWR 0x03
+
+#define GB_SVC_PWRMON_GET_SAMPLE_OK 0x00
+#define GB_SVC_PWRMON_GET_SAMPLE_INVAL 0x01
+#define GB_SVC_PWRMON_GET_SAMPLE_NOSUPP 0x02
+#define GB_SVC_PWRMON_GET_SAMPLE_HWERR 0x03
+
+struct gb_svc_pwrmon_sample_get_request {
+ __u8 rail_id;
+ __u8 measurement_type;
+} __packed;
+
+struct gb_svc_pwrmon_sample_get_response {
+ __u8 result;
+ __le32 measurement;
+} __packed;
+
+struct gb_svc_pwrmon_intf_sample_get_request {
+ __u8 intf_id;
+ __u8 measurement_type;
+} __packed;
+
+struct gb_svc_pwrmon_intf_sample_get_response {
+ __u8 result;
+ __le32 measurement;
+} __packed;
+
+#define GB_SVC_MODULE_INSERTED_FLAG_NO_PRIMARY 0x0001
+
+struct gb_svc_module_inserted_request {
+ __u8 primary_intf_id;
+ __u8 intf_count;
+ __le16 flags;
+} __packed;
+/* module_inserted response has no payload */
+
+struct gb_svc_module_removed_request {
+ __u8 primary_intf_id;
+} __packed;
+/* module_removed response has no payload */
+
+struct gb_svc_intf_activate_request {
+ __u8 intf_id;
+} __packed;
+
+#define GB_SVC_INTF_TYPE_UNKNOWN 0x00
+#define GB_SVC_INTF_TYPE_DUMMY 0x01
+#define GB_SVC_INTF_TYPE_UNIPRO 0x02
+#define GB_SVC_INTF_TYPE_GREYBUS 0x03
+
+struct gb_svc_intf_activate_response {
+ __u8 status;
+ __u8 intf_type;
+} __packed;
+
+struct gb_svc_intf_resume_request {
+ __u8 intf_id;
+} __packed;
+
+struct gb_svc_intf_resume_response {
+ __u8 status;
+} __packed;
+
+#define GB_SVC_INTF_MAILBOX_NONE 0x00
+#define GB_SVC_INTF_MAILBOX_AP 0x01
+#define GB_SVC_INTF_MAILBOX_GREYBUS 0x02
+
+struct gb_svc_intf_mailbox_event_request {
+ __u8 intf_id;
+ __le16 result_code;
+ __le32 mailbox;
+} __packed;
+/* intf_mailbox_event response has no payload */
+
+struct gb_svc_intf_oops_request {
+ __u8 intf_id;
+ __u8 reason;
+} __packed;
+/* intf_oops response has no payload */
+
+
+/* RAW */
+
+/* Greybus raw request types */
+#define GB_RAW_TYPE_SEND 0x02
+
+struct gb_raw_send_request {
+ __le32 len;
+ __u8 data[];
+} __packed;
+
+
+/* UART */
+
+/* Greybus UART operation types */
+#define GB_UART_TYPE_SEND_DATA 0x02
+#define GB_UART_TYPE_RECEIVE_DATA 0x03 /* Unsolicited data */
+#define GB_UART_TYPE_SET_LINE_CODING 0x04
+#define GB_UART_TYPE_SET_CONTROL_LINE_STATE 0x05
+#define GB_UART_TYPE_SEND_BREAK 0x06
+#define GB_UART_TYPE_SERIAL_STATE 0x07 /* Unsolicited data */
+#define GB_UART_TYPE_RECEIVE_CREDITS 0x08
+#define GB_UART_TYPE_FLUSH_FIFOS 0x09
+
+/* Represents data from AP -> Module */
+struct gb_uart_send_data_request {
+ __le16 size;
+ __u8 data[];
+} __packed;
+
+/* recv-data-request flags */
+#define GB_UART_RECV_FLAG_FRAMING 0x01 /* Framing error */
+#define GB_UART_RECV_FLAG_PARITY 0x02 /* Parity error */
+#define GB_UART_RECV_FLAG_OVERRUN 0x04 /* Overrun error */
+#define GB_UART_RECV_FLAG_BREAK 0x08 /* Break */
+
+/* Represents data from Module -> AP */
+struct gb_uart_recv_data_request {
+ __le16 size;
+ __u8 flags;
+ __u8 data[];
+} __packed;
+
+struct gb_uart_receive_credits_request {
+ __le16 count;
+} __packed;
+
+struct gb_uart_set_line_coding_request {
+ __le32 rate;
+ __u8 format;
+#define GB_SERIAL_1_STOP_BITS 0
+#define GB_SERIAL_1_5_STOP_BITS 1
+#define GB_SERIAL_2_STOP_BITS 2
+
+ __u8 parity;
+#define GB_SERIAL_NO_PARITY 0
+#define GB_SERIAL_ODD_PARITY 1
+#define GB_SERIAL_EVEN_PARITY 2
+#define GB_SERIAL_MARK_PARITY 3
+#define GB_SERIAL_SPACE_PARITY 4
+
+ __u8 data_bits;
+
+ __u8 flow_control;
+#define GB_SERIAL_AUTO_RTSCTS_EN 0x1
+} __packed;
+
+/* output control lines */
+#define GB_UART_CTRL_DTR 0x01
+#define GB_UART_CTRL_RTS 0x02
+
+struct gb_uart_set_control_line_state_request {
+ __u8 control;
+} __packed;
+
+struct gb_uart_set_break_request {
+ __u8 state;
+} __packed;
+
+/* input control lines and line errors */
+#define GB_UART_CTRL_DCD 0x01
+#define GB_UART_CTRL_DSR 0x02
+#define GB_UART_CTRL_RI 0x04
+
+struct gb_uart_serial_state_request {
+ __u8 control;
+} __packed;
+
+struct gb_uart_serial_flush_request {
+ __u8 flags;
+#define GB_SERIAL_FLAG_FLUSH_TRANSMITTER 0x01
+#define GB_SERIAL_FLAG_FLUSH_RECEIVER 0x02
+} __packed;
+
+/* Loopback */
+
+/* Greybus loopback request types */
+#define GB_LOOPBACK_TYPE_PING 0x02
+#define GB_LOOPBACK_TYPE_TRANSFER 0x03
+#define GB_LOOPBACK_TYPE_SINK 0x04
+
+/*
+ * Loopback request/response header format should be identical
+ * to simplify bandwidth and data movement analysis.
+ */
+struct gb_loopback_transfer_request {
+ __le32 len;
+ __le32 reserved0;
+ __le32 reserved1;
+ __u8 data[];
+} __packed;
+
+struct gb_loopback_transfer_response {
+ __le32 len;
+ __le32 reserved0;
+ __le32 reserved1;
+ __u8 data[];
+} __packed;
+
+/* SDIO */
+/* Greybus SDIO operation types */
+#define GB_SDIO_TYPE_GET_CAPABILITIES 0x02
+#define GB_SDIO_TYPE_SET_IOS 0x03
+#define GB_SDIO_TYPE_COMMAND 0x04
+#define GB_SDIO_TYPE_TRANSFER 0x05
+#define GB_SDIO_TYPE_EVENT 0x06
+
+/* get caps response: request has no payload */
+struct gb_sdio_get_caps_response {
+ __le32 caps;
+#define GB_SDIO_CAP_NONREMOVABLE 0x00000001
+#define GB_SDIO_CAP_4_BIT_DATA 0x00000002
+#define GB_SDIO_CAP_8_BIT_DATA 0x00000004
+#define GB_SDIO_CAP_MMC_HS 0x00000008
+#define GB_SDIO_CAP_SD_HS 0x00000010
+#define GB_SDIO_CAP_ERASE 0x00000020
+#define GB_SDIO_CAP_1_2V_DDR 0x00000040
+#define GB_SDIO_CAP_1_8V_DDR 0x00000080
+#define GB_SDIO_CAP_POWER_OFF_CARD 0x00000100
+#define GB_SDIO_CAP_UHS_SDR12 0x00000200
+#define GB_SDIO_CAP_UHS_SDR25 0x00000400
+#define GB_SDIO_CAP_UHS_SDR50 0x00000800
+#define GB_SDIO_CAP_UHS_SDR104 0x00001000
+#define GB_SDIO_CAP_UHS_DDR50 0x00002000
+#define GB_SDIO_CAP_DRIVER_TYPE_A 0x00004000
+#define GB_SDIO_CAP_DRIVER_TYPE_C 0x00008000
+#define GB_SDIO_CAP_DRIVER_TYPE_D 0x00010000
+#define GB_SDIO_CAP_HS200_1_2V 0x00020000
+#define GB_SDIO_CAP_HS200_1_8V 0x00040000
+#define GB_SDIO_CAP_HS400_1_2V 0x00080000
+#define GB_SDIO_CAP_HS400_1_8V 0x00100000
+
+ /* see possible values below at vdd */
+ __le32 ocr;
+ __le32 f_min;
+ __le32 f_max;
+ __le16 max_blk_count;
+ __le16 max_blk_size;
+} __packed;
+
+/* set ios request: response has no payload */
+struct gb_sdio_set_ios_request {
+ __le32 clock;
+ __le32 vdd;
+#define GB_SDIO_VDD_165_195 0x00000001
+#define GB_SDIO_VDD_20_21 0x00000002
+#define GB_SDIO_VDD_21_22 0x00000004
+#define GB_SDIO_VDD_22_23 0x00000008
+#define GB_SDIO_VDD_23_24 0x00000010
+#define GB_SDIO_VDD_24_25 0x00000020
+#define GB_SDIO_VDD_25_26 0x00000040
+#define GB_SDIO_VDD_26_27 0x00000080
+#define GB_SDIO_VDD_27_28 0x00000100
+#define GB_SDIO_VDD_28_29 0x00000200
+#define GB_SDIO_VDD_29_30 0x00000400
+#define GB_SDIO_VDD_30_31 0x00000800
+#define GB_SDIO_VDD_31_32 0x00001000
+#define GB_SDIO_VDD_32_33 0x00002000
+#define GB_SDIO_VDD_33_34 0x00004000
+#define GB_SDIO_VDD_34_35 0x00008000
+#define GB_SDIO_VDD_35_36 0x00010000
+
+ __u8 bus_mode;
+#define GB_SDIO_BUSMODE_OPENDRAIN 0x00
+#define GB_SDIO_BUSMODE_PUSHPULL 0x01
+
+ __u8 power_mode;
+#define GB_SDIO_POWER_OFF 0x00
+#define GB_SDIO_POWER_UP 0x01
+#define GB_SDIO_POWER_ON 0x02
+#define GB_SDIO_POWER_UNDEFINED 0x03
+
+ __u8 bus_width;
+#define GB_SDIO_BUS_WIDTH_1 0x00
+#define GB_SDIO_BUS_WIDTH_4 0x02
+#define GB_SDIO_BUS_WIDTH_8 0x03
+
+ __u8 timing;
+#define GB_SDIO_TIMING_LEGACY 0x00
+#define GB_SDIO_TIMING_MMC_HS 0x01
+#define GB_SDIO_TIMING_SD_HS 0x02
+#define GB_SDIO_TIMING_UHS_SDR12 0x03
+#define GB_SDIO_TIMING_UHS_SDR25 0x04
+#define GB_SDIO_TIMING_UHS_SDR50 0x05
+#define GB_SDIO_TIMING_UHS_SDR104 0x06
+#define GB_SDIO_TIMING_UHS_DDR50 0x07
+#define GB_SDIO_TIMING_MMC_DDR52 0x08
+#define GB_SDIO_TIMING_MMC_HS200 0x09
+#define GB_SDIO_TIMING_MMC_HS400 0x0A
+
+ __u8 signal_voltage;
+#define GB_SDIO_SIGNAL_VOLTAGE_330 0x00
+#define GB_SDIO_SIGNAL_VOLTAGE_180 0x01
+#define GB_SDIO_SIGNAL_VOLTAGE_120 0x02
+
+ __u8 drv_type;
+#define GB_SDIO_SET_DRIVER_TYPE_B 0x00
+#define GB_SDIO_SET_DRIVER_TYPE_A 0x01
+#define GB_SDIO_SET_DRIVER_TYPE_C 0x02
+#define GB_SDIO_SET_DRIVER_TYPE_D 0x03
+} __packed;
+
+/* command request */
+struct gb_sdio_command_request {
+ __u8 cmd;
+ __u8 cmd_flags;
+#define GB_SDIO_RSP_NONE 0x00
+#define GB_SDIO_RSP_PRESENT 0x01
+#define GB_SDIO_RSP_136 0x02
+#define GB_SDIO_RSP_CRC 0x04
+#define GB_SDIO_RSP_BUSY 0x08
+#define GB_SDIO_RSP_OPCODE 0x10
+
+ __u8 cmd_type;
+#define GB_SDIO_CMD_AC 0x00
+#define GB_SDIO_CMD_ADTC 0x01
+#define GB_SDIO_CMD_BC 0x02
+#define GB_SDIO_CMD_BCR 0x03
+
+ __le32 cmd_arg;
+ __le16 data_blocks;
+ __le16 data_blksz;
+} __packed;
+
+struct gb_sdio_command_response {
+ __le32 resp[4];
+} __packed;
+
+/* transfer request */
+struct gb_sdio_transfer_request {
+ __u8 data_flags;
+#define GB_SDIO_DATA_WRITE 0x01
+#define GB_SDIO_DATA_READ 0x02
+#define GB_SDIO_DATA_STREAM 0x04
+
+ __le16 data_blocks;
+ __le16 data_blksz;
+ __u8 data[];
+} __packed;
+
+struct gb_sdio_transfer_response {
+ __le16 data_blocks;
+ __le16 data_blksz;
+ __u8 data[];
+} __packed;
+
+/* event request: generated by module and is defined as unidirectional */
+struct gb_sdio_event_request {
+ __u8 event;
+#define GB_SDIO_CARD_INSERTED 0x01
+#define GB_SDIO_CARD_REMOVED 0x02
+#define GB_SDIO_WP 0x04
+} __packed;
+
+/* Camera */
+
+/* Greybus Camera request types */
+#define GB_CAMERA_TYPE_CAPABILITIES 0x02
+#define GB_CAMERA_TYPE_CONFIGURE_STREAMS 0x03
+#define GB_CAMERA_TYPE_CAPTURE 0x04
+#define GB_CAMERA_TYPE_FLUSH 0x05
+#define GB_CAMERA_TYPE_METADATA 0x06
+
+#define GB_CAMERA_MAX_STREAMS 4
+#define GB_CAMERA_MAX_SETTINGS_SIZE 8192
+
+/* Greybus Camera Configure Streams request payload */
+struct gb_camera_stream_config_request {
+ __le16 width;
+ __le16 height;
+ __le16 format;
+ __le16 padding;
+} __packed;
+
+struct gb_camera_configure_streams_request {
+ __u8 num_streams;
+ __u8 flags;
+#define GB_CAMERA_CONFIGURE_STREAMS_TEST_ONLY 0x01
+ __le16 padding;
+ struct gb_camera_stream_config_request config[];
+} __packed;
+
+/* Greybus Camera Configure Streams response payload */
+struct gb_camera_stream_config_response {
+ __le16 width;
+ __le16 height;
+ __le16 format;
+ __u8 virtual_channel;
+ __u8 data_type[2];
+ __le16 max_pkt_size;
+ __u8 padding;
+ __le32 max_size;
+} __packed;
+
+struct gb_camera_configure_streams_response {
+ __u8 num_streams;
+#define GB_CAMERA_CONFIGURE_STREAMS_ADJUSTED 0x01
+ __u8 flags;
+ __u8 padding[2];
+ __le32 data_rate;
+ struct gb_camera_stream_config_response config[];
+};
+
+/* Greybus Camera Capture request payload - response has no payload */
+struct gb_camera_capture_request {
+ __le32 request_id;
+ __u8 streams;
+ __u8 padding;
+ __le16 num_frames;
+ __u8 settings[];
+} __packed;
+
+/* Greybus Camera Flush response payload - request has no payload */
+struct gb_camera_flush_response {
+ __le32 request_id;
+} __packed;
+
+/* Greybus Camera Metadata request payload - operation has no response */
+struct gb_camera_metadata_request {
+ __le32 request_id;
+ __le16 frame_number;
+ __u8 stream;
+ __u8 padding;
+ __u8 metadata[];
+} __packed;
+
+/* Lights */
+
+/* Greybus Lights request types */
+#define GB_LIGHTS_TYPE_GET_LIGHTS 0x02
+#define GB_LIGHTS_TYPE_GET_LIGHT_CONFIG 0x03
+#define GB_LIGHTS_TYPE_GET_CHANNEL_CONFIG 0x04
+#define GB_LIGHTS_TYPE_GET_CHANNEL_FLASH_CONFIG 0x05
+#define GB_LIGHTS_TYPE_SET_BRIGHTNESS 0x06
+#define GB_LIGHTS_TYPE_SET_BLINK 0x07
+#define GB_LIGHTS_TYPE_SET_COLOR 0x08
+#define GB_LIGHTS_TYPE_SET_FADE 0x09
+#define GB_LIGHTS_TYPE_EVENT 0x0A
+#define GB_LIGHTS_TYPE_SET_FLASH_INTENSITY 0x0B
+#define GB_LIGHTS_TYPE_SET_FLASH_STROBE 0x0C
+#define GB_LIGHTS_TYPE_SET_FLASH_TIMEOUT 0x0D
+#define GB_LIGHTS_TYPE_GET_FLASH_FAULT 0x0E
+
+/* Greybus Light modes */
+
+/*
+ * if you add any specific mode below, update also the
+ * GB_CHANNEL_MODE_DEFINED_RANGE value accordingly
+ */
+#define GB_CHANNEL_MODE_NONE 0x00000000
+#define GB_CHANNEL_MODE_BATTERY 0x00000001
+#define GB_CHANNEL_MODE_POWER 0x00000002
+#define GB_CHANNEL_MODE_WIRELESS 0x00000004
+#define GB_CHANNEL_MODE_BLUETOOTH 0x00000008
+#define GB_CHANNEL_MODE_KEYBOARD 0x00000010
+#define GB_CHANNEL_MODE_BUTTONS 0x00000020
+#define GB_CHANNEL_MODE_NOTIFICATION 0x00000040
+#define GB_CHANNEL_MODE_ATTENTION 0x00000080
+#define GB_CHANNEL_MODE_FLASH 0x00000100
+#define GB_CHANNEL_MODE_TORCH 0x00000200
+#define GB_CHANNEL_MODE_INDICATOR 0x00000400
+
+/* Lights Mode valid bit values */
+#define GB_CHANNEL_MODE_DEFINED_RANGE 0x000004FF
+#define GB_CHANNEL_MODE_VENDOR_RANGE 0x00F00000
+
+/* Greybus Light Channels Flags */
+#define GB_LIGHT_CHANNEL_MULTICOLOR 0x00000001
+#define GB_LIGHT_CHANNEL_FADER 0x00000002
+#define GB_LIGHT_CHANNEL_BLINK 0x00000004
+
+/* get count of lights in module */
+struct gb_lights_get_lights_response {
+ __u8 lights_count;
+} __packed;
+
+/* light config request payload */
+struct gb_lights_get_light_config_request {
+ __u8 id;
+} __packed;
+
+/* light config response payload */
+struct gb_lights_get_light_config_response {
+ __u8 channel_count;
+ __u8 name[32];
+} __packed;
+
+/* channel config request payload */
+struct gb_lights_get_channel_config_request {
+ __u8 light_id;
+ __u8 channel_id;
+} __packed;
+
+/* channel flash config request payload */
+struct gb_lights_get_channel_flash_config_request {
+ __u8 light_id;
+ __u8 channel_id;
+} __packed;
+
+/* channel config response payload */
+struct gb_lights_get_channel_config_response {
+ __u8 max_brightness;
+ __le32 flags;
+ __le32 color;
+ __u8 color_name[32];
+ __le32 mode;
+ __u8 mode_name[32];
+} __packed;
+
+/* channel flash config response payload */
+struct gb_lights_get_channel_flash_config_response {
+ __le32 intensity_min_uA;
+ __le32 intensity_max_uA;
+ __le32 intensity_step_uA;
+ __le32 timeout_min_us;
+ __le32 timeout_max_us;
+ __le32 timeout_step_us;
+} __packed;
+
+/* blink request payload: response have no payload */
+struct gb_lights_blink_request {
+ __u8 light_id;
+ __u8 channel_id;
+ __le16 time_on_ms;
+ __le16 time_off_ms;
+} __packed;
+
+/* set brightness request payload: response have no payload */
+struct gb_lights_set_brightness_request {
+ __u8 light_id;
+ __u8 channel_id;
+ __u8 brightness;
+} __packed;
+
+/* set color request payload: response have no payload */
+struct gb_lights_set_color_request {
+ __u8 light_id;
+ __u8 channel_id;
+ __le32 color;
+} __packed;
+
+/* set fade request payload: response have no payload */
+struct gb_lights_set_fade_request {
+ __u8 light_id;
+ __u8 channel_id;
+ __u8 fade_in;
+ __u8 fade_out;
+} __packed;
+
+/* event request: generated by module */
+struct gb_lights_event_request {
+ __u8 light_id;
+ __u8 event;
+#define GB_LIGHTS_LIGHT_CONFIG 0x01
+} __packed;
+
+/* set flash intensity request payload: response have no payload */
+struct gb_lights_set_flash_intensity_request {
+ __u8 light_id;
+ __u8 channel_id;
+ __le32 intensity_uA;
+} __packed;
+
+/* set flash strobe state request payload: response have no payload */
+struct gb_lights_set_flash_strobe_request {
+ __u8 light_id;
+ __u8 channel_id;
+ __u8 state;
+} __packed;
+
+/* set flash timeout request payload: response have no payload */
+struct gb_lights_set_flash_timeout_request {
+ __u8 light_id;
+ __u8 channel_id;
+ __le32 timeout_us;
+} __packed;
+
+/* get flash fault request payload */
+struct gb_lights_get_flash_fault_request {
+ __u8 light_id;
+ __u8 channel_id;
+} __packed;
+
+/* get flash fault response payload */
+struct gb_lights_get_flash_fault_response {
+ __le32 fault;
+#define GB_LIGHTS_FLASH_FAULT_OVER_VOLTAGE 0x00000000
+#define GB_LIGHTS_FLASH_FAULT_TIMEOUT 0x00000001
+#define GB_LIGHTS_FLASH_FAULT_OVER_TEMPERATURE 0x00000002
+#define GB_LIGHTS_FLASH_FAULT_SHORT_CIRCUIT 0x00000004
+#define GB_LIGHTS_FLASH_FAULT_OVER_CURRENT 0x00000008
+#define GB_LIGHTS_FLASH_FAULT_INDICATOR 0x00000010
+#define GB_LIGHTS_FLASH_FAULT_UNDER_VOLTAGE 0x00000020
+#define GB_LIGHTS_FLASH_FAULT_INPUT_VOLTAGE 0x00000040
+#define GB_LIGHTS_FLASH_FAULT_LED_OVER_TEMPERATURE 0x00000080
+} __packed;
+
+/* Audio */
+
+#define GB_AUDIO_TYPE_GET_TOPOLOGY_SIZE 0x02
+#define GB_AUDIO_TYPE_GET_TOPOLOGY 0x03
+#define GB_AUDIO_TYPE_GET_CONTROL 0x04
+#define GB_AUDIO_TYPE_SET_CONTROL 0x05
+#define GB_AUDIO_TYPE_ENABLE_WIDGET 0x06
+#define GB_AUDIO_TYPE_DISABLE_WIDGET 0x07
+#define GB_AUDIO_TYPE_GET_PCM 0x08
+#define GB_AUDIO_TYPE_SET_PCM 0x09
+#define GB_AUDIO_TYPE_SET_TX_DATA_SIZE 0x0a
+ /* 0x0b unused */
+#define GB_AUDIO_TYPE_ACTIVATE_TX 0x0c
+#define GB_AUDIO_TYPE_DEACTIVATE_TX 0x0d
+#define GB_AUDIO_TYPE_SET_RX_DATA_SIZE 0x0e
+ /* 0x0f unused */
+#define GB_AUDIO_TYPE_ACTIVATE_RX 0x10
+#define GB_AUDIO_TYPE_DEACTIVATE_RX 0x11
+#define GB_AUDIO_TYPE_JACK_EVENT 0x12
+#define GB_AUDIO_TYPE_BUTTON_EVENT 0x13
+#define GB_AUDIO_TYPE_STREAMING_EVENT 0x14
+#define GB_AUDIO_TYPE_SEND_DATA 0x15
+
+/* Module must be able to buffer 10ms of audio data, minimum */
+#define GB_AUDIO_SAMPLE_BUFFER_MIN_US 10000
+
+#define GB_AUDIO_PCM_NAME_MAX 32
+#define AUDIO_DAI_NAME_MAX 32
+#define AUDIO_CONTROL_NAME_MAX 32
+#define AUDIO_CTL_ELEM_NAME_MAX 44
+#define AUDIO_ENUM_NAME_MAX 64
+#define AUDIO_WIDGET_NAME_MAX 32
+
+/* See SNDRV_PCM_FMTBIT_* in Linux source */
+#define GB_AUDIO_PCM_FMT_S8 BIT(0)
+#define GB_AUDIO_PCM_FMT_U8 BIT(1)
+#define GB_AUDIO_PCM_FMT_S16_LE BIT(2)
+#define GB_AUDIO_PCM_FMT_S16_BE BIT(3)
+#define GB_AUDIO_PCM_FMT_U16_LE BIT(4)
+#define GB_AUDIO_PCM_FMT_U16_BE BIT(5)
+#define GB_AUDIO_PCM_FMT_S24_LE BIT(6)
+#define GB_AUDIO_PCM_FMT_S24_BE BIT(7)
+#define GB_AUDIO_PCM_FMT_U24_LE BIT(8)
+#define GB_AUDIO_PCM_FMT_U24_BE BIT(9)
+#define GB_AUDIO_PCM_FMT_S32_LE BIT(10)
+#define GB_AUDIO_PCM_FMT_S32_BE BIT(11)
+#define GB_AUDIO_PCM_FMT_U32_LE BIT(12)
+#define GB_AUDIO_PCM_FMT_U32_BE BIT(13)
+
+/* See SNDRV_PCM_RATE_* in Linux source */
+#define GB_AUDIO_PCM_RATE_5512 BIT(0)
+#define GB_AUDIO_PCM_RATE_8000 BIT(1)
+#define GB_AUDIO_PCM_RATE_11025 BIT(2)
+#define GB_AUDIO_PCM_RATE_16000 BIT(3)
+#define GB_AUDIO_PCM_RATE_22050 BIT(4)
+#define GB_AUDIO_PCM_RATE_32000 BIT(5)
+#define GB_AUDIO_PCM_RATE_44100 BIT(6)
+#define GB_AUDIO_PCM_RATE_48000 BIT(7)
+#define GB_AUDIO_PCM_RATE_64000 BIT(8)
+#define GB_AUDIO_PCM_RATE_88200 BIT(9)
+#define GB_AUDIO_PCM_RATE_96000 BIT(10)
+#define GB_AUDIO_PCM_RATE_176400 BIT(11)
+#define GB_AUDIO_PCM_RATE_192000 BIT(12)
+
+#define GB_AUDIO_STREAM_TYPE_CAPTURE 0x1
+#define GB_AUDIO_STREAM_TYPE_PLAYBACK 0x2
+
+#define GB_AUDIO_CTL_ELEM_ACCESS_READ BIT(0)
+#define GB_AUDIO_CTL_ELEM_ACCESS_WRITE BIT(1)
+
+/* See SNDRV_CTL_ELEM_TYPE_* in Linux source */
+#define GB_AUDIO_CTL_ELEM_TYPE_BOOLEAN 0x01
+#define GB_AUDIO_CTL_ELEM_TYPE_INTEGER 0x02
+#define GB_AUDIO_CTL_ELEM_TYPE_ENUMERATED 0x03
+#define GB_AUDIO_CTL_ELEM_TYPE_INTEGER64 0x06
+
+/* See SNDRV_CTL_ELEM_IFACE_* in Linux source */
+#define GB_AUDIO_CTL_ELEM_IFACE_CARD 0x00
+#define GB_AUDIO_CTL_ELEM_IFACE_HWDEP 0x01
+#define GB_AUDIO_CTL_ELEM_IFACE_MIXER 0x02
+#define GB_AUDIO_CTL_ELEM_IFACE_PCM 0x03
+#define GB_AUDIO_CTL_ELEM_IFACE_RAWMIDI 0x04
+#define GB_AUDIO_CTL_ELEM_IFACE_TIMER 0x05
+#define GB_AUDIO_CTL_ELEM_IFACE_SEQUENCER 0x06
+
+/* SNDRV_CTL_ELEM_ACCESS_* in Linux source */
+#define GB_AUDIO_ACCESS_READ BIT(0)
+#define GB_AUDIO_ACCESS_WRITE BIT(1)
+#define GB_AUDIO_ACCESS_VOLATILE BIT(2)
+#define GB_AUDIO_ACCESS_TIMESTAMP BIT(3)
+#define GB_AUDIO_ACCESS_TLV_READ BIT(4)
+#define GB_AUDIO_ACCESS_TLV_WRITE BIT(5)
+#define GB_AUDIO_ACCESS_TLV_COMMAND BIT(6)
+#define GB_AUDIO_ACCESS_INACTIVE BIT(7)
+#define GB_AUDIO_ACCESS_LOCK BIT(8)
+#define GB_AUDIO_ACCESS_OWNER BIT(9)
+
+/* enum snd_soc_dapm_type */
+#define GB_AUDIO_WIDGET_TYPE_INPUT 0x0
+#define GB_AUDIO_WIDGET_TYPE_OUTPUT 0x1
+#define GB_AUDIO_WIDGET_TYPE_MUX 0x2
+#define GB_AUDIO_WIDGET_TYPE_VIRT_MUX 0x3
+#define GB_AUDIO_WIDGET_TYPE_VALUE_MUX 0x4
+#define GB_AUDIO_WIDGET_TYPE_MIXER 0x5
+#define GB_AUDIO_WIDGET_TYPE_MIXER_NAMED_CTL 0x6
+#define GB_AUDIO_WIDGET_TYPE_PGA 0x7
+#define GB_AUDIO_WIDGET_TYPE_OUT_DRV 0x8
+#define GB_AUDIO_WIDGET_TYPE_ADC 0x9
+#define GB_AUDIO_WIDGET_TYPE_DAC 0xa
+#define GB_AUDIO_WIDGET_TYPE_MICBIAS 0xb
+#define GB_AUDIO_WIDGET_TYPE_MIC 0xc
+#define GB_AUDIO_WIDGET_TYPE_HP 0xd
+#define GB_AUDIO_WIDGET_TYPE_SPK 0xe
+#define GB_AUDIO_WIDGET_TYPE_LINE 0xf
+#define GB_AUDIO_WIDGET_TYPE_SWITCH 0x10
+#define GB_AUDIO_WIDGET_TYPE_VMID 0x11
+#define GB_AUDIO_WIDGET_TYPE_PRE 0x12
+#define GB_AUDIO_WIDGET_TYPE_POST 0x13
+#define GB_AUDIO_WIDGET_TYPE_SUPPLY 0x14
+#define GB_AUDIO_WIDGET_TYPE_REGULATOR_SUPPLY 0x15
+#define GB_AUDIO_WIDGET_TYPE_CLOCK_SUPPLY 0x16
+#define GB_AUDIO_WIDGET_TYPE_AIF_IN 0x17
+#define GB_AUDIO_WIDGET_TYPE_AIF_OUT 0x18
+#define GB_AUDIO_WIDGET_TYPE_SIGGEN 0x19
+#define GB_AUDIO_WIDGET_TYPE_DAI_IN 0x1a
+#define GB_AUDIO_WIDGET_TYPE_DAI_OUT 0x1b
+#define GB_AUDIO_WIDGET_TYPE_DAI_LINK 0x1c
+
+#define GB_AUDIO_WIDGET_STATE_DISABLED 0x01
+#define GB_AUDIO_WIDGET_STATE_ENAABLED 0x02
+
+#define GB_AUDIO_JACK_EVENT_INSERTION 0x1
+#define GB_AUDIO_JACK_EVENT_REMOVAL 0x2
+
+#define GB_AUDIO_BUTTON_EVENT_PRESS 0x1
+#define GB_AUDIO_BUTTON_EVENT_RELEASE 0x2
+
+#define GB_AUDIO_STREAMING_EVENT_UNSPECIFIED 0x1
+#define GB_AUDIO_STREAMING_EVENT_HALT 0x2
+#define GB_AUDIO_STREAMING_EVENT_INTERNAL_ERROR 0x3
+#define GB_AUDIO_STREAMING_EVENT_PROTOCOL_ERROR 0x4
+#define GB_AUDIO_STREAMING_EVENT_FAILURE 0x5
+#define GB_AUDIO_STREAMING_EVENT_UNDERRUN 0x6
+#define GB_AUDIO_STREAMING_EVENT_OVERRUN 0x7
+#define GB_AUDIO_STREAMING_EVENT_CLOCKING 0x8
+#define GB_AUDIO_STREAMING_EVENT_DATA_LEN 0x9
+
+#define GB_AUDIO_INVALID_INDEX 0xff
+
+/* enum snd_jack_types */
+#define GB_AUDIO_JACK_HEADPHONE 0x0000001
+#define GB_AUDIO_JACK_MICROPHONE 0x0000002
+#define GB_AUDIO_JACK_HEADSET (GB_AUDIO_JACK_HEADPHONE | \
+ GB_AUDIO_JACK_MICROPHONE)
+#define GB_AUDIO_JACK_LINEOUT 0x0000004
+#define GB_AUDIO_JACK_MECHANICAL 0x0000008
+#define GB_AUDIO_JACK_VIDEOOUT 0x0000010
+#define GB_AUDIO_JACK_AVOUT (GB_AUDIO_JACK_LINEOUT | \
+ GB_AUDIO_JACK_VIDEOOUT)
+#define GB_AUDIO_JACK_LINEIN 0x0000020
+#define GB_AUDIO_JACK_OC_HPHL 0x0000040
+#define GB_AUDIO_JACK_OC_HPHR 0x0000080
+#define GB_AUDIO_JACK_MICROPHONE2 0x0000200
+#define GB_AUDIO_JACK_ANC_HEADPHONE (GB_AUDIO_JACK_HEADPHONE | \
+ GB_AUDIO_JACK_MICROPHONE | \
+ GB_AUDIO_JACK_MICROPHONE2)
+/* Kept separate from switches to facilitate implementation */
+#define GB_AUDIO_JACK_BTN_0 0x4000000
+#define GB_AUDIO_JACK_BTN_1 0x2000000
+#define GB_AUDIO_JACK_BTN_2 0x1000000
+#define GB_AUDIO_JACK_BTN_3 0x0800000
+
+struct gb_audio_pcm {
+ __u8 stream_name[GB_AUDIO_PCM_NAME_MAX];
+ __le32 formats; /* GB_AUDIO_PCM_FMT_* */
+ __le32 rates; /* GB_AUDIO_PCM_RATE_* */
+ __u8 chan_min;
+ __u8 chan_max;
+ __u8 sig_bits; /* number of bits of content */
+} __packed;
+
+struct gb_audio_dai {
+ __u8 name[AUDIO_DAI_NAME_MAX];
+ __le16 data_cport;
+ struct gb_audio_pcm capture;
+ struct gb_audio_pcm playback;
+} __packed;
+
+struct gb_audio_integer {
+ __le32 min;
+ __le32 max;
+ __le32 step;
+} __packed;
+
+struct gb_audio_integer64 {
+ __le64 min;
+ __le64 max;
+ __le64 step;
+} __packed;
+
+struct gb_audio_enumerated {
+ __le32 items;
+ __le16 names_length;
+ __u8 names[];
+} __packed;
+
+struct gb_audio_ctl_elem_info { /* See snd_ctl_elem_info in Linux source */
+ __u8 type; /* GB_AUDIO_CTL_ELEM_TYPE_* */
+ __le16 dimen[4];
+ union {
+ struct gb_audio_integer integer;
+ struct gb_audio_integer64 integer64;
+ struct gb_audio_enumerated enumerated;
+ } value;
+} __packed;
+
+struct gb_audio_ctl_elem_value { /* See snd_ctl_elem_value in Linux source */
+ __le64 timestamp; /* XXX needed? */
+ union {
+ __le32 integer_value[2]; /* consider CTL_DOUBLE_xxx */
+ __le64 integer64_value[2];
+ __le32 enumerated_item[2];
+ } value;
+} __packed;
+
+struct gb_audio_control {
+ __u8 name[AUDIO_CONTROL_NAME_MAX];
+ __u8 id; /* 0-63 */
+ __u8 iface; /* GB_AUDIO_IFACE_* */
+ __le16 data_cport;
+ __le32 access; /* GB_AUDIO_ACCESS_* */
+ __u8 count; /* count of same elements */
+ __u8 count_values; /* count of values, max=2 for CTL_DOUBLE_xxx */
+ struct gb_audio_ctl_elem_info info;
+} __packed;
+
+struct gb_audio_widget {
+ __u8 name[AUDIO_WIDGET_NAME_MAX];
+ __u8 sname[AUDIO_WIDGET_NAME_MAX];
+ __u8 id;
+ __u8 type; /* GB_AUDIO_WIDGET_TYPE_* */
+ __u8 state; /* GB_AUDIO_WIDGET_STATE_* */
+ __u8 ncontrols;
+ struct gb_audio_control ctl[]; /* 'ncontrols' entries */
+} __packed;
+
+struct gb_audio_route {
+ __u8 source_id; /* widget id */
+ __u8 destination_id; /* widget id */
+ __u8 control_id; /* 0-63 */
+ __u8 index; /* Selection within the control */
+} __packed;
+
+struct gb_audio_topology {
+ __u8 num_dais;
+ __u8 num_controls;
+ __u8 num_widgets;
+ __u8 num_routes;
+ __le32 size_dais;
+ __le32 size_controls;
+ __le32 size_widgets;
+ __le32 size_routes;
+ __le32 jack_type;
+ /*
+ * struct gb_audio_dai dai[num_dais];
+ * struct gb_audio_control controls[num_controls];
+ * struct gb_audio_widget widgets[num_widgets];
+ * struct gb_audio_route routes[num_routes];
+ */
+ __u8 data[];
+} __packed;
+
+struct gb_audio_get_topology_size_response {
+ __le16 size;
+} __packed;
+
+struct gb_audio_get_topology_response {
+ struct gb_audio_topology topology;
+} __packed;
+
+struct gb_audio_get_control_request {
+ __u8 control_id;
+ __u8 index;
+} __packed;
+
+struct gb_audio_get_control_response {
+ struct gb_audio_ctl_elem_value value;
+} __packed;
+
+struct gb_audio_set_control_request {
+ __u8 control_id;
+ __u8 index;
+ struct gb_audio_ctl_elem_value value;
+} __packed;
+
+struct gb_audio_enable_widget_request {
+ __u8 widget_id;
+} __packed;
+
+struct gb_audio_disable_widget_request {
+ __u8 widget_id;
+} __packed;
+
+struct gb_audio_get_pcm_request {
+ __le16 data_cport;
+} __packed;
+
+struct gb_audio_get_pcm_response {
+ __le32 format;
+ __le32 rate;
+ __u8 channels;
+ __u8 sig_bits;
+} __packed;
+
+struct gb_audio_set_pcm_request {
+ __le16 data_cport;
+ __le32 format;
+ __le32 rate;
+ __u8 channels;
+ __u8 sig_bits;
+} __packed;
+
+struct gb_audio_set_tx_data_size_request {
+ __le16 data_cport;
+ __le16 size;
+} __packed;
+
+struct gb_audio_activate_tx_request {
+ __le16 data_cport;
+} __packed;
+
+struct gb_audio_deactivate_tx_request {
+ __le16 data_cport;
+} __packed;
+
+struct gb_audio_set_rx_data_size_request {
+ __le16 data_cport;
+ __le16 size;
+} __packed;
+
+struct gb_audio_activate_rx_request {
+ __le16 data_cport;
+} __packed;
+
+struct gb_audio_deactivate_rx_request {
+ __le16 data_cport;
+} __packed;
+
+struct gb_audio_jack_event_request {
+ __u8 widget_id;
+ __u8 jack_attribute;
+ __u8 event;
+} __packed;
+
+struct gb_audio_button_event_request {
+ __u8 widget_id;
+ __u8 button_id;
+ __u8 event;
+} __packed;
+
+struct gb_audio_streaming_event_request {
+ __le16 data_cport;
+ __u8 event;
+} __packed;
+
+struct gb_audio_send_data_request {
+ __le64 timestamp;
+ __u8 data[];
+} __packed;
+
+
+/* Log */
+
+/* operations */
+#define GB_LOG_TYPE_SEND_LOG 0x02
+
+/* length */
+#define GB_LOG_MAX_LEN 1024
+
+struct gb_log_send_log_request {
+ __le16 len;
+ __u8 msg[];
+} __packed;
+
+#endif /* __GREYBUS_PROTOCOLS_H */
+
diff --git a/include/linux/greybus/hd.h b/include/linux/greybus/hd.h
new file mode 100644
index 000000000000..718e2857054e
--- /dev/null
+++ b/include/linux/greybus/hd.h
@@ -0,0 +1,85 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Greybus Host Device
+ *
+ * Copyright 2014-2015 Google Inc.
+ * Copyright 2014-2015 Linaro Ltd.
+ */
+
+#ifndef __HD_H
+#define __HD_H
+
+#include <linux/types.h>
+#include <linux/device.h>
+
+struct gb_host_device;
+struct gb_message;
+
+struct gb_hd_driver {
+ size_t hd_priv_size;
+
+ int (*cport_allocate)(struct gb_host_device *hd, int cport_id,
+ unsigned long flags);
+ void (*cport_release)(struct gb_host_device *hd, u16 cport_id);
+ int (*cport_enable)(struct gb_host_device *hd, u16 cport_id,
+ unsigned long flags);
+ int (*cport_disable)(struct gb_host_device *hd, u16 cport_id);
+ int (*cport_connected)(struct gb_host_device *hd, u16 cport_id);
+ int (*cport_flush)(struct gb_host_device *hd, u16 cport_id);
+ int (*cport_shutdown)(struct gb_host_device *hd, u16 cport_id,
+ u8 phase, unsigned int timeout);
+ int (*cport_quiesce)(struct gb_host_device *hd, u16 cport_id,
+ size_t peer_space, unsigned int timeout);
+ int (*cport_clear)(struct gb_host_device *hd, u16 cport_id);
+
+ int (*message_send)(struct gb_host_device *hd, u16 dest_cport_id,
+ struct gb_message *message, gfp_t gfp_mask);
+ void (*message_cancel)(struct gb_message *message);
+ int (*latency_tag_enable)(struct gb_host_device *hd, u16 cport_id);
+ int (*latency_tag_disable)(struct gb_host_device *hd, u16 cport_id);
+ int (*output)(struct gb_host_device *hd, void *req, u16 size, u8 cmd,
+ bool async);
+};
+
+struct gb_host_device {
+ struct device dev;
+ int bus_id;
+ const struct gb_hd_driver *driver;
+
+ struct list_head modules;
+ struct list_head connections;
+ struct ida cport_id_map;
+
+ /* Number of CPorts supported by the UniPro IP */
+ size_t num_cports;
+
+ /* Host device buffer constraints */
+ size_t buffer_size_max;
+
+ struct gb_svc *svc;
+ /* Private data for the host driver */
+ unsigned long hd_priv[] __aligned(sizeof(s64));
+};
+#define to_gb_host_device(d) container_of(d, struct gb_host_device, dev)
+
+int gb_hd_cport_reserve(struct gb_host_device *hd, u16 cport_id);
+void gb_hd_cport_release_reserved(struct gb_host_device *hd, u16 cport_id);
+int gb_hd_cport_allocate(struct gb_host_device *hd, int cport_id,
+ unsigned long flags);
+void gb_hd_cport_release(struct gb_host_device *hd, u16 cport_id);
+
+struct gb_host_device *gb_hd_create(struct gb_hd_driver *driver,
+ struct device *parent,
+ size_t buffer_size_max,
+ size_t num_cports);
+int gb_hd_add(struct gb_host_device *hd);
+void gb_hd_del(struct gb_host_device *hd);
+void gb_hd_shutdown(struct gb_host_device *hd);
+void gb_hd_put(struct gb_host_device *hd);
+int gb_hd_output(struct gb_host_device *hd, void *req, u16 size, u8 cmd,
+ bool in_irq);
+
+int gb_hd_init(void);
+void gb_hd_exit(void);
+
+#endif /* __HD_H */
diff --git a/include/linux/greybus/interface.h b/include/linux/greybus/interface.h
new file mode 100644
index 000000000000..ce4def881e6f
--- /dev/null
+++ b/include/linux/greybus/interface.h
@@ -0,0 +1,85 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Greybus Interface Block code
+ *
+ * Copyright 2014 Google Inc.
+ * Copyright 2014 Linaro Ltd.
+ */
+
+#ifndef __INTERFACE_H
+#define __INTERFACE_H
+
+#include <linux/types.h>
+#include <linux/device.h>
+
+enum gb_interface_type {
+ GB_INTERFACE_TYPE_INVALID = 0,
+ GB_INTERFACE_TYPE_UNKNOWN,
+ GB_INTERFACE_TYPE_DUMMY,
+ GB_INTERFACE_TYPE_UNIPRO,
+ GB_INTERFACE_TYPE_GREYBUS,
+};
+
+#define GB_INTERFACE_QUIRK_NO_CPORT_FEATURES BIT(0)
+#define GB_INTERFACE_QUIRK_NO_INIT_STATUS BIT(1)
+#define GB_INTERFACE_QUIRK_NO_GMP_IDS BIT(2)
+#define GB_INTERFACE_QUIRK_FORCED_DISABLE BIT(3)
+#define GB_INTERFACE_QUIRK_LEGACY_MODE_SWITCH BIT(4)
+#define GB_INTERFACE_QUIRK_NO_BUNDLE_ACTIVATE BIT(5)
+#define GB_INTERFACE_QUIRK_NO_PM BIT(6)
+
+struct gb_interface {
+ struct device dev;
+ struct gb_control *control;
+
+ struct list_head bundles;
+ struct list_head module_node;
+ struct list_head manifest_descs;
+ u8 interface_id; /* Physical location within the Endo */
+ u8 device_id;
+ u8 features; /* Feature flags set in the manifest */
+
+ enum gb_interface_type type;
+
+ u32 ddbl1_manufacturer_id;
+ u32 ddbl1_product_id;
+ u32 vendor_id;
+ u32 product_id;
+ u64 serial_number;
+
+ struct gb_host_device *hd;
+ struct gb_module *module;
+
+ unsigned long quirks;
+
+ struct mutex mutex;
+
+ bool disconnected;
+
+ bool ejected;
+ bool removed;
+ bool active;
+ bool enabled;
+ bool mode_switch;
+ bool dme_read;
+
+ struct work_struct mode_switch_work;
+ struct completion mode_switch_completion;
+};
+#define to_gb_interface(d) container_of(d, struct gb_interface, dev)
+
+struct gb_interface *gb_interface_create(struct gb_module *module,
+ u8 interface_id);
+int gb_interface_activate(struct gb_interface *intf);
+void gb_interface_deactivate(struct gb_interface *intf);
+int gb_interface_enable(struct gb_interface *intf);
+void gb_interface_disable(struct gb_interface *intf);
+int gb_interface_add(struct gb_interface *intf);
+void gb_interface_del(struct gb_interface *intf);
+void gb_interface_put(struct gb_interface *intf);
+void gb_interface_mailbox_event(struct gb_interface *intf, u16 result,
+ u32 mailbox);
+
+int gb_interface_request_mode_switch(struct gb_interface *intf);
+
+#endif /* __INTERFACE_H */
diff --git a/include/linux/greybus/manifest.h b/include/linux/greybus/manifest.h
new file mode 100644
index 000000000000..830301b7a8bc
--- /dev/null
+++ b/include/linux/greybus/manifest.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Greybus manifest parsing
+ *
+ * Copyright 2014 Google Inc.
+ * Copyright 2014 Linaro Ltd.
+ */
+
+#ifndef __MANIFEST_H
+#define __MANIFEST_H
+
+#include <linux/types.h>
+
+struct gb_interface;
+bool gb_manifest_parse(struct gb_interface *intf, void *data, size_t size);
+
+#endif /* __MANIFEST_H */
diff --git a/include/linux/greybus/module.h b/include/linux/greybus/module.h
new file mode 100644
index 000000000000..3efe2133acfd
--- /dev/null
+++ b/include/linux/greybus/module.h
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Greybus Module code
+ *
+ * Copyright 2016 Google Inc.
+ * Copyright 2016 Linaro Ltd.
+ */
+
+#ifndef __MODULE_H
+#define __MODULE_H
+
+#include <linux/types.h>
+#include <linux/device.h>
+
+struct gb_module {
+ struct device dev;
+ struct gb_host_device *hd;
+
+ struct list_head hd_node;
+
+ u8 module_id;
+ size_t num_interfaces;
+
+ bool disconnected;
+
+ struct gb_interface *interfaces[];
+};
+#define to_gb_module(d) container_of(d, struct gb_module, dev)
+
+struct gb_module *gb_module_create(struct gb_host_device *hd, u8 module_id,
+ size_t num_interfaces);
+int gb_module_add(struct gb_module *module);
+void gb_module_del(struct gb_module *module);
+void gb_module_put(struct gb_module *module);
+
+#endif /* __MODULE_H */
diff --git a/include/linux/greybus/operation.h b/include/linux/greybus/operation.h
new file mode 100644
index 000000000000..cb8e4ef45222
--- /dev/null
+++ b/include/linux/greybus/operation.h
@@ -0,0 +1,229 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Greybus operations
+ *
+ * Copyright 2014 Google Inc.
+ * Copyright 2014 Linaro Ltd.
+ */
+
+#ifndef __OPERATION_H
+#define __OPERATION_H
+
+#include <linux/completion.h>
+#include <linux/kref.h>
+#include <linux/timer.h>
+#include <linux/types.h>
+#include <linux/workqueue.h>
+
+struct gb_host_device;
+struct gb_operation;
+
+/* The default amount of time a request is given to complete */
+#define GB_OPERATION_TIMEOUT_DEFAULT 1000 /* milliseconds */
+
+/*
+ * The top bit of the type in an operation message header indicates
+ * whether the message is a request (bit clear) or response (bit set)
+ */
+#define GB_MESSAGE_TYPE_RESPONSE ((u8)0x80)
+
+enum gb_operation_result {
+ GB_OP_SUCCESS = 0x00,
+ GB_OP_INTERRUPTED = 0x01,
+ GB_OP_TIMEOUT = 0x02,
+ GB_OP_NO_MEMORY = 0x03,
+ GB_OP_PROTOCOL_BAD = 0x04,
+ GB_OP_OVERFLOW = 0x05,
+ GB_OP_INVALID = 0x06,
+ GB_OP_RETRY = 0x07,
+ GB_OP_NONEXISTENT = 0x08,
+ GB_OP_UNKNOWN_ERROR = 0xfe,
+ GB_OP_MALFUNCTION = 0xff,
+};
+
+#define GB_OPERATION_MESSAGE_SIZE_MIN sizeof(struct gb_operation_msg_hdr)
+#define GB_OPERATION_MESSAGE_SIZE_MAX U16_MAX
+
+/*
+ * Protocol code should only examine the payload and payload_size fields, and
+ * host-controller drivers may use the hcpriv field. All other fields are
+ * intended to be private to the operations core code.
+ */
+struct gb_message {
+ struct gb_operation *operation;
+ struct gb_operation_msg_hdr *header;
+
+ void *payload;
+ size_t payload_size;
+
+ void *buffer;
+
+ void *hcpriv;
+};
+
+#define GB_OPERATION_FLAG_INCOMING BIT(0)
+#define GB_OPERATION_FLAG_UNIDIRECTIONAL BIT(1)
+#define GB_OPERATION_FLAG_SHORT_RESPONSE BIT(2)
+#define GB_OPERATION_FLAG_CORE BIT(3)
+
+#define GB_OPERATION_FLAG_USER_MASK (GB_OPERATION_FLAG_SHORT_RESPONSE | \
+ GB_OPERATION_FLAG_UNIDIRECTIONAL)
+
+/*
+ * A Greybus operation is a remote procedure call performed over a
+ * connection between two UniPro interfaces.
+ *
+ * Every operation consists of a request message sent to the other
+ * end of the connection coupled with a reply message returned to
+ * the sender. Every operation has a type, whose interpretation is
+ * dependent on the protocol associated with the connection.
+ *
+ * Only four things in an operation structure are intended to be
+ * directly usable by protocol handlers: the operation's connection
+ * pointer; the operation type; the request message payload (and
+ * size); and the response message payload (and size). Note that a
+ * message with a 0-byte payload has a null message payload pointer.
+ *
+ * In addition, every operation has a result, which is an errno
+ * value. Protocol handlers access the operation result using
+ * gb_operation_result().
+ */
+typedef void (*gb_operation_callback)(struct gb_operation *);
+struct gb_operation {
+ struct gb_connection *connection;
+ struct gb_message *request;
+ struct gb_message *response;
+
+ unsigned long flags;
+ u8 type;
+ u16 id;
+ int errno; /* Operation result */
+
+ struct work_struct work;
+ gb_operation_callback callback;
+ struct completion completion;
+ struct timer_list timer;
+
+ struct kref kref;
+ atomic_t waiters;
+
+ int active;
+ struct list_head links; /* connection->operations */
+
+ void *private;
+};
+
+static inline bool
+gb_operation_is_incoming(struct gb_operation *operation)
+{
+ return operation->flags & GB_OPERATION_FLAG_INCOMING;
+}
+
+static inline bool
+gb_operation_is_unidirectional(struct gb_operation *operation)
+{
+ return operation->flags & GB_OPERATION_FLAG_UNIDIRECTIONAL;
+}
+
+static inline bool
+gb_operation_short_response_allowed(struct gb_operation *operation)
+{
+ return operation->flags & GB_OPERATION_FLAG_SHORT_RESPONSE;
+}
+
+static inline bool gb_operation_is_core(struct gb_operation *operation)
+{
+ return operation->flags & GB_OPERATION_FLAG_CORE;
+}
+
+void gb_connection_recv(struct gb_connection *connection,
+ void *data, size_t size);
+
+int gb_operation_result(struct gb_operation *operation);
+
+size_t gb_operation_get_payload_size_max(struct gb_connection *connection);
+struct gb_operation *
+gb_operation_create_flags(struct gb_connection *connection,
+ u8 type, size_t request_size,
+ size_t response_size, unsigned long flags,
+ gfp_t gfp);
+
+static inline struct gb_operation *
+gb_operation_create(struct gb_connection *connection,
+ u8 type, size_t request_size,
+ size_t response_size, gfp_t gfp)
+{
+ return gb_operation_create_flags(connection, type, request_size,
+ response_size, 0, gfp);
+}
+
+struct gb_operation *
+gb_operation_create_core(struct gb_connection *connection,
+ u8 type, size_t request_size,
+ size_t response_size, unsigned long flags,
+ gfp_t gfp);
+
+void gb_operation_get(struct gb_operation *operation);
+void gb_operation_put(struct gb_operation *operation);
+
+bool gb_operation_response_alloc(struct gb_operation *operation,
+ size_t response_size, gfp_t gfp);
+
+int gb_operation_request_send(struct gb_operation *operation,
+ gb_operation_callback callback,
+ unsigned int timeout,
+ gfp_t gfp);
+int gb_operation_request_send_sync_timeout(struct gb_operation *operation,
+ unsigned int timeout);
+static inline int
+gb_operation_request_send_sync(struct gb_operation *operation)
+{
+ return gb_operation_request_send_sync_timeout(operation,
+ GB_OPERATION_TIMEOUT_DEFAULT);
+}
+
+void gb_operation_cancel(struct gb_operation *operation, int errno);
+void gb_operation_cancel_incoming(struct gb_operation *operation, int errno);
+
+void greybus_message_sent(struct gb_host_device *hd,
+ struct gb_message *message, int status);
+
+int gb_operation_sync_timeout(struct gb_connection *connection, int type,
+ void *request, int request_size,
+ void *response, int response_size,
+ unsigned int timeout);
+int gb_operation_unidirectional_timeout(struct gb_connection *connection,
+ int type, void *request, int request_size,
+ unsigned int timeout);
+
+static inline int gb_operation_sync(struct gb_connection *connection, int type,
+ void *request, int request_size,
+ void *response, int response_size)
+{
+ return gb_operation_sync_timeout(connection, type,
+ request, request_size, response, response_size,
+ GB_OPERATION_TIMEOUT_DEFAULT);
+}
+
+static inline int gb_operation_unidirectional(struct gb_connection *connection,
+ int type, void *request, int request_size)
+{
+ return gb_operation_unidirectional_timeout(connection, type,
+ request, request_size, GB_OPERATION_TIMEOUT_DEFAULT);
+}
+
+static inline void *gb_operation_get_data(struct gb_operation *operation)
+{
+ return operation->private;
+}
+
+static inline void gb_operation_set_data(struct gb_operation *operation,
+ void *data)
+{
+ operation->private = data;
+}
+
+int gb_operation_init(void);
+void gb_operation_exit(void);
+
+#endif /* !__OPERATION_H */
diff --git a/include/linux/greybus/svc.h b/include/linux/greybus/svc.h
new file mode 100644
index 000000000000..da547fb9071b
--- /dev/null
+++ b/include/linux/greybus/svc.h
@@ -0,0 +1,103 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Greybus SVC code
+ *
+ * Copyright 2015 Google Inc.
+ * Copyright 2015 Linaro Ltd.
+ */
+
+#ifndef __SVC_H
+#define __SVC_H
+
+#include <linux/types.h>
+#include <linux/device.h>
+
+struct gb_svc_l2_timer_cfg;
+
+#define GB_SVC_CPORT_FLAG_E2EFC BIT(0)
+#define GB_SVC_CPORT_FLAG_CSD_N BIT(1)
+#define GB_SVC_CPORT_FLAG_CSV_N BIT(2)
+
+enum gb_svc_state {
+ GB_SVC_STATE_RESET,
+ GB_SVC_STATE_PROTOCOL_VERSION,
+ GB_SVC_STATE_SVC_HELLO,
+};
+
+enum gb_svc_watchdog_bite {
+ GB_SVC_WATCHDOG_BITE_RESET_UNIPRO = 0,
+ GB_SVC_WATCHDOG_BITE_PANIC_KERNEL,
+};
+
+struct gb_svc_watchdog;
+
+struct svc_debugfs_pwrmon_rail {
+ u8 id;
+ struct gb_svc *svc;
+};
+
+struct gb_svc {
+ struct device dev;
+
+ struct gb_host_device *hd;
+ struct gb_connection *connection;
+ enum gb_svc_state state;
+ struct ida device_id_map;
+ struct workqueue_struct *wq;
+
+ u16 endo_id;
+ u8 ap_intf_id;
+
+ u8 protocol_major;
+ u8 protocol_minor;
+
+ struct gb_svc_watchdog *watchdog;
+ enum gb_svc_watchdog_bite action;
+
+ struct dentry *debugfs_dentry;
+ struct svc_debugfs_pwrmon_rail *pwrmon_rails;
+};
+#define to_gb_svc(d) container_of(d, struct gb_svc, dev)
+
+struct gb_svc *gb_svc_create(struct gb_host_device *hd);
+int gb_svc_add(struct gb_svc *svc);
+void gb_svc_del(struct gb_svc *svc);
+void gb_svc_put(struct gb_svc *svc);
+
+int gb_svc_pwrmon_intf_sample_get(struct gb_svc *svc, u8 intf_id,
+ u8 measurement_type, u32 *value);
+int gb_svc_intf_device_id(struct gb_svc *svc, u8 intf_id, u8 device_id);
+int gb_svc_route_create(struct gb_svc *svc, u8 intf1_id, u8 dev1_id,
+ u8 intf2_id, u8 dev2_id);
+void gb_svc_route_destroy(struct gb_svc *svc, u8 intf1_id, u8 intf2_id);
+int gb_svc_connection_create(struct gb_svc *svc, u8 intf1_id, u16 cport1_id,
+ u8 intf2_id, u16 cport2_id, u8 cport_flags);
+void gb_svc_connection_destroy(struct gb_svc *svc, u8 intf1_id, u16 cport1_id,
+ u8 intf2_id, u16 cport2_id);
+int gb_svc_intf_eject(struct gb_svc *svc, u8 intf_id);
+int gb_svc_intf_vsys_set(struct gb_svc *svc, u8 intf_id, bool enable);
+int gb_svc_intf_refclk_set(struct gb_svc *svc, u8 intf_id, bool enable);
+int gb_svc_intf_unipro_set(struct gb_svc *svc, u8 intf_id, bool enable);
+int gb_svc_intf_activate(struct gb_svc *svc, u8 intf_id, u8 *intf_type);
+int gb_svc_intf_resume(struct gb_svc *svc, u8 intf_id);
+
+int gb_svc_dme_peer_get(struct gb_svc *svc, u8 intf_id, u16 attr, u16 selector,
+ u32 *value);
+int gb_svc_dme_peer_set(struct gb_svc *svc, u8 intf_id, u16 attr, u16 selector,
+ u32 value);
+int gb_svc_intf_set_power_mode(struct gb_svc *svc, u8 intf_id, u8 hs_series,
+ u8 tx_mode, u8 tx_gear, u8 tx_nlanes,
+ u8 tx_amplitude, u8 tx_hs_equalizer,
+ u8 rx_mode, u8 rx_gear, u8 rx_nlanes,
+ u8 flags, u32 quirks,
+ struct gb_svc_l2_timer_cfg *local,
+ struct gb_svc_l2_timer_cfg *remote);
+int gb_svc_intf_set_power_mode_hibernate(struct gb_svc *svc, u8 intf_id);
+int gb_svc_ping(struct gb_svc *svc);
+int gb_svc_watchdog_create(struct gb_svc *svc);
+void gb_svc_watchdog_destroy(struct gb_svc *svc);
+bool gb_svc_watchdog_enabled(struct gb_svc *svc);
+int gb_svc_watchdog_enable(struct gb_svc *svc);
+int gb_svc_watchdog_disable(struct gb_svc *svc);
+
+#endif /* __SVC_H */
diff --git a/include/linux/group_cpus.h b/include/linux/group_cpus.h
new file mode 100644
index 000000000000..9d4e5ab6c314
--- /dev/null
+++ b/include/linux/group_cpus.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2016 Thomas Gleixner.
+ * Copyright (C) 2016-2017 Christoph Hellwig.
+ */
+
+#ifndef __LINUX_GROUP_CPUS_H
+#define __LINUX_GROUP_CPUS_H
+#include <linux/kernel.h>
+#include <linux/cpu.h>
+
+struct cpumask *group_cpus_evenly(unsigned int numgrps, unsigned int *nummasks);
+
+#endif
diff --git a/include/linux/habanalabs/cpucp_if.h b/include/linux/habanalabs/cpucp_if.h
new file mode 100644
index 000000000000..45f181bcf890
--- /dev/null
+++ b/include/linux/habanalabs/cpucp_if.h
@@ -0,0 +1,1437 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2020-2023 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+#ifndef CPUCP_IF_H
+#define CPUCP_IF_H
+
+#include <linux/types.h>
+#include <linux/if_ether.h>
+
+#include "hl_boot_if.h"
+
+#define NUM_HBM_PSEUDO_CH 2
+#define NUM_HBM_CH_PER_DEV 8
+#define CPUCP_PKT_HBM_ECC_INFO_WR_PAR_SHIFT 0
+#define CPUCP_PKT_HBM_ECC_INFO_WR_PAR_MASK 0x00000001
+#define CPUCP_PKT_HBM_ECC_INFO_RD_PAR_SHIFT 1
+#define CPUCP_PKT_HBM_ECC_INFO_RD_PAR_MASK 0x00000002
+#define CPUCP_PKT_HBM_ECC_INFO_CA_PAR_SHIFT 2
+#define CPUCP_PKT_HBM_ECC_INFO_CA_PAR_MASK 0x00000004
+#define CPUCP_PKT_HBM_ECC_INFO_DERR_SHIFT 3
+#define CPUCP_PKT_HBM_ECC_INFO_DERR_MASK 0x00000008
+#define CPUCP_PKT_HBM_ECC_INFO_SERR_SHIFT 4
+#define CPUCP_PKT_HBM_ECC_INFO_SERR_MASK 0x00000010
+#define CPUCP_PKT_HBM_ECC_INFO_TYPE_SHIFT 5
+#define CPUCP_PKT_HBM_ECC_INFO_TYPE_MASK 0x00000020
+#define CPUCP_PKT_HBM_ECC_INFO_HBM_CH_SHIFT 6
+#define CPUCP_PKT_HBM_ECC_INFO_HBM_CH_MASK 0x000007C0
+
+#define PLL_MAP_MAX_BITS 128
+#define PLL_MAP_LEN (PLL_MAP_MAX_BITS / 8)
+
+enum eq_event_id {
+ EQ_EVENT_NIC_STS_REQUEST = 0,
+ EQ_EVENT_PWR_MODE_0,
+ EQ_EVENT_PWR_MODE_1,
+ EQ_EVENT_PWR_MODE_2,
+ EQ_EVENT_PWR_MODE_3,
+ EQ_EVENT_PWR_BRK_ENTRY,
+ EQ_EVENT_PWR_BRK_EXIT,
+ EQ_EVENT_HEARTBEAT,
+ EQ_EVENT_CPLD_RESET_REASON,
+ EQ_EVENT_CPLD_SHUTDOWN,
+ EQ_EVENT_POWER_EVT_START,
+ EQ_EVENT_POWER_EVT_END,
+ EQ_EVENT_THERMAL_EVT_START,
+ EQ_EVENT_THERMAL_EVT_END,
+};
+
+/*
+ * info of the pkt queue pointers in the first async occurrence
+ */
+struct cpucp_pkt_sync_err {
+ __le32 pi;
+ __le32 ci;
+};
+
+struct hl_eq_hbm_ecc_data {
+ /* SERR counter */
+ __le32 sec_cnt;
+ /* DERR counter */
+ __le32 dec_cnt;
+ /* Supplemental Information according to the mask bits */
+ __le32 hbm_ecc_info;
+ /* Address in hbm where the ecc happened */
+ __le32 first_addr;
+ /* SERR continuous address counter */
+ __le32 sec_cont_cnt;
+ __le32 pad;
+};
+
+/*
+ * EVENT QUEUE
+ */
+
+struct hl_eq_header {
+ __le32 reserved;
+ __le32 ctl;
+};
+
+struct hl_eq_ecc_data {
+ __le64 ecc_address;
+ __le64 ecc_syndrom;
+ __u8 memory_wrapper_idx;
+ __u8 is_critical;
+ __le16 block_id;
+ __u8 pad[4];
+};
+
+enum hl_sm_sei_cause {
+ SM_SEI_SO_OVERFLOW,
+ SM_SEI_LBW_4B_UNALIGNED,
+ SM_SEI_AXI_RESPONSE_ERR
+};
+
+struct hl_eq_sm_sei_data {
+ __le32 sei_log;
+ /* enum hl_sm_sei_cause */
+ __u8 sei_cause;
+ __u8 pad[3];
+};
+
+enum hl_fw_alive_severity {
+ FW_ALIVE_SEVERITY_MINOR,
+ FW_ALIVE_SEVERITY_CRITICAL
+};
+
+struct hl_eq_fw_alive {
+ __le64 uptime_seconds;
+ __le32 process_id;
+ __le32 thread_id;
+ /* enum hl_fw_alive_severity */
+ __u8 severity;
+ __u8 pad[7];
+};
+
+struct hl_eq_intr_cause {
+ __le64 intr_cause_data;
+};
+
+struct hl_eq_pcie_drain_ind_data {
+ struct hl_eq_intr_cause intr_cause;
+ __le64 drain_wr_addr_lbw;
+ __le64 drain_rd_addr_lbw;
+ __le64 drain_wr_addr_hbw;
+ __le64 drain_rd_addr_hbw;
+};
+
+struct hl_eq_razwi_lbw_info_regs {
+ __le32 rr_aw_razwi_reg;
+ __le32 rr_aw_razwi_id_reg;
+ __le32 rr_ar_razwi_reg;
+ __le32 rr_ar_razwi_id_reg;
+};
+
+struct hl_eq_razwi_hbw_info_regs {
+ __le32 rr_aw_razwi_hi_reg;
+ __le32 rr_aw_razwi_lo_reg;
+ __le32 rr_aw_razwi_id_reg;
+ __le32 rr_ar_razwi_hi_reg;
+ __le32 rr_ar_razwi_lo_reg;
+ __le32 rr_ar_razwi_id_reg;
+};
+
+/* razwi_happened masks */
+#define RAZWI_HAPPENED_HBW 0x1
+#define RAZWI_HAPPENED_LBW 0x2
+#define RAZWI_HAPPENED_AW 0x4
+#define RAZWI_HAPPENED_AR 0x8
+
+struct hl_eq_razwi_info {
+ __le32 razwi_happened_mask;
+ union {
+ struct hl_eq_razwi_lbw_info_regs lbw;
+ struct hl_eq_razwi_hbw_info_regs hbw;
+ };
+ __le32 pad;
+};
+
+struct hl_eq_razwi_with_intr_cause {
+ struct hl_eq_razwi_info razwi_info;
+ struct hl_eq_intr_cause intr_cause;
+};
+
+#define HBM_CA_ERR_CMD_LIFO_LEN 8
+#define HBM_RD_ERR_DATA_LIFO_LEN 8
+#define HBM_WR_PAR_CMD_LIFO_LEN 11
+
+enum hl_hbm_sei_cause {
+ /* Command/address parity error event is split into 2 events due to
+ * size limitation: ODD suffix for odd HBM CK_t cycles and EVEN suffix
+ * for even HBM CK_t cycles
+ */
+ HBM_SEI_CMD_PARITY_EVEN,
+ HBM_SEI_CMD_PARITY_ODD,
+ /* Read errors can be reflected as a combination of SERR/DERR/parity
+ * errors. Therefore, we define one event for all read error types.
+ * LKD will perform further proccessing.
+ */
+ HBM_SEI_READ_ERR,
+ HBM_SEI_WRITE_DATA_PARITY_ERR,
+ HBM_SEI_CATTRIP,
+ HBM_SEI_MEM_BIST_FAIL,
+ HBM_SEI_DFI,
+ HBM_SEI_INV_TEMP_READ_OUT,
+ HBM_SEI_BIST_FAIL,
+};
+
+/* Masks for parsing hl_hbm_sei_headr fields */
+#define HBM_ECC_SERR_CNTR_MASK 0xFF
+#define HBM_ECC_DERR_CNTR_MASK 0xFF00
+#define HBM_RD_PARITY_CNTR_MASK 0xFF0000
+
+/* HBM index and MC index are known by the event_id */
+struct hl_hbm_sei_header {
+ union {
+ /* relevant only in case of HBM read error */
+ struct {
+ __u8 ecc_serr_cnt;
+ __u8 ecc_derr_cnt;
+ __u8 read_par_cnt;
+ __u8 reserved;
+ };
+ /* All other cases */
+ __le32 cnt;
+ };
+ __u8 sei_cause; /* enum hl_hbm_sei_cause */
+ __u8 mc_channel; /* range: 0-3 */
+ __u8 mc_pseudo_channel; /* range: 0-7 */
+ __u8 is_critical;
+};
+
+#define HBM_RD_ADDR_SID_SHIFT 0
+#define HBM_RD_ADDR_SID_MASK 0x1
+#define HBM_RD_ADDR_BG_SHIFT 1
+#define HBM_RD_ADDR_BG_MASK 0x6
+#define HBM_RD_ADDR_BA_SHIFT 3
+#define HBM_RD_ADDR_BA_MASK 0x18
+#define HBM_RD_ADDR_COL_SHIFT 5
+#define HBM_RD_ADDR_COL_MASK 0x7E0
+#define HBM_RD_ADDR_ROW_SHIFT 11
+#define HBM_RD_ADDR_ROW_MASK 0x3FFF800
+
+struct hbm_rd_addr {
+ union {
+ /* bit fields are only for FW use */
+ struct {
+ u32 dbg_rd_err_addr_sid:1;
+ u32 dbg_rd_err_addr_bg:2;
+ u32 dbg_rd_err_addr_ba:2;
+ u32 dbg_rd_err_addr_col:6;
+ u32 dbg_rd_err_addr_row:15;
+ u32 reserved:6;
+ };
+ __le32 rd_addr_val;
+ };
+};
+
+#define HBM_RD_ERR_BEAT_SHIFT 2
+/* dbg_rd_err_misc fields: */
+/* Read parity is calculated per DW on every beat */
+#define HBM_RD_ERR_PAR_ERR_BEAT0_SHIFT 0
+#define HBM_RD_ERR_PAR_ERR_BEAT0_MASK 0x3
+#define HBM_RD_ERR_PAR_DATA_BEAT0_SHIFT 8
+#define HBM_RD_ERR_PAR_DATA_BEAT0_MASK 0x300
+/* ECC is calculated per PC on every beat */
+#define HBM_RD_ERR_SERR_BEAT0_SHIFT 16
+#define HBM_RD_ERR_SERR_BEAT0_MASK 0x10000
+#define HBM_RD_ERR_DERR_BEAT0_SHIFT 24
+#define HBM_RD_ERR_DERR_BEAT0_MASK 0x100000
+
+struct hl_eq_hbm_sei_read_err_intr_info {
+ /* DFI_RD_ERR_REP_ADDR */
+ struct hbm_rd_addr dbg_rd_err_addr;
+ /* DFI_RD_ERR_REP_ERR */
+ union {
+ struct {
+ /* bit fields are only for FW use */
+ u32 dbg_rd_err_par:8;
+ u32 dbg_rd_err_par_data:8;
+ u32 dbg_rd_err_serr:4;
+ u32 dbg_rd_err_derr:4;
+ u32 reserved:8;
+ };
+ __le32 dbg_rd_err_misc;
+ };
+ /* DFI_RD_ERR_REP_DM */
+ __le32 dbg_rd_err_dm;
+ /* DFI_RD_ERR_REP_SYNDROME */
+ __le32 dbg_rd_err_syndrome;
+ /* DFI_RD_ERR_REP_DATA */
+ __le32 dbg_rd_err_data[HBM_RD_ERR_DATA_LIFO_LEN];
+};
+
+struct hl_eq_hbm_sei_ca_par_intr_info {
+ /* 14 LSBs */
+ __le16 dbg_row[HBM_CA_ERR_CMD_LIFO_LEN];
+ /* 18 LSBs */
+ __le32 dbg_col[HBM_CA_ERR_CMD_LIFO_LEN];
+};
+
+#define WR_PAR_LAST_CMD_COL_SHIFT 0
+#define WR_PAR_LAST_CMD_COL_MASK 0x3F
+#define WR_PAR_LAST_CMD_BG_SHIFT 6
+#define WR_PAR_LAST_CMD_BG_MASK 0xC0
+#define WR_PAR_LAST_CMD_BA_SHIFT 8
+#define WR_PAR_LAST_CMD_BA_MASK 0x300
+#define WR_PAR_LAST_CMD_SID_SHIFT 10
+#define WR_PAR_LAST_CMD_SID_MASK 0x400
+
+/* Row address isn't latched */
+struct hbm_sei_wr_cmd_address {
+ /* DFI_DERR_LAST_CMD */
+ union {
+ struct {
+ /* bit fields are only for FW use */
+ u32 col:6;
+ u32 bg:2;
+ u32 ba:2;
+ u32 sid:1;
+ u32 reserved:21;
+ };
+ __le32 dbg_wr_cmd_addr;
+ };
+};
+
+struct hl_eq_hbm_sei_wr_par_intr_info {
+ /* entry 0: WR command address from the 1st cycle prior to the error
+ * entry 1: WR command address from the 2nd cycle prior to the error
+ * and so on...
+ */
+ struct hbm_sei_wr_cmd_address dbg_last_wr_cmds[HBM_WR_PAR_CMD_LIFO_LEN];
+ /* derr[0:1] - 1st HBM cycle DERR output
+ * derr[2:3] - 2nd HBM cycle DERR output
+ */
+ __u8 dbg_derr;
+ /* extend to reach 8B */
+ __u8 pad[3];
+};
+
+/*
+ * this struct represents the following sei causes:
+ * command parity, ECC double error, ECC single error, dfi error, cattrip,
+ * temperature read-out, read parity error and write parity error.
+ * some only use the header while some have extra data.
+ */
+struct hl_eq_hbm_sei_data {
+ struct hl_hbm_sei_header hdr;
+ union {
+ struct hl_eq_hbm_sei_ca_par_intr_info ca_parity_even_info;
+ struct hl_eq_hbm_sei_ca_par_intr_info ca_parity_odd_info;
+ struct hl_eq_hbm_sei_read_err_intr_info read_err_info;
+ struct hl_eq_hbm_sei_wr_par_intr_info wr_parity_info;
+ };
+};
+
+/* Engine/farm arc interrupt type */
+enum hl_engine_arc_interrupt_type {
+ /* Qman/farm ARC DCCM QUEUE FULL interrupt type */
+ ENGINE_ARC_DCCM_QUEUE_FULL_IRQ = 1
+};
+
+/* Data structure specifies details of payload of DCCM QUEUE FULL interrupt */
+struct hl_engine_arc_dccm_queue_full_irq {
+ /* Queue index value which caused DCCM QUEUE FULL */
+ __le32 queue_index;
+ __le32 pad;
+};
+
+/* Data structure specifies details of QM/FARM ARC interrupt */
+struct hl_eq_engine_arc_intr_data {
+ /* ARC engine id e.g. DCORE0_TPC0_QM_ARC, DCORE0_TCP1_QM_ARC */
+ __le32 engine_id;
+ __le32 intr_type; /* enum hl_engine_arc_interrupt_type */
+ /* More info related to the interrupt e.g. queue index
+ * incase of DCCM_QUEUE_FULL interrupt.
+ */
+ __le64 payload;
+ __le64 pad[5];
+};
+
+#define ADDR_DEC_ADDRESS_COUNT_MAX 4
+
+/* Data structure specifies details of ADDR_DEC interrupt */
+struct hl_eq_addr_dec_intr_data {
+ struct hl_eq_intr_cause intr_cause;
+ __le64 addr[ADDR_DEC_ADDRESS_COUNT_MAX];
+ __u8 addr_cnt;
+ __u8 pad[7];
+};
+
+struct hl_eq_entry {
+ struct hl_eq_header hdr;
+ union {
+ __le64 data_placeholder;
+ struct hl_eq_ecc_data ecc_data;
+ struct hl_eq_hbm_ecc_data hbm_ecc_data; /* Obsolete */
+ struct hl_eq_sm_sei_data sm_sei_data;
+ struct cpucp_pkt_sync_err pkt_sync_err;
+ struct hl_eq_fw_alive fw_alive;
+ struct hl_eq_intr_cause intr_cause;
+ struct hl_eq_pcie_drain_ind_data pcie_drain_ind_data;
+ struct hl_eq_razwi_info razwi_info;
+ struct hl_eq_razwi_with_intr_cause razwi_with_intr_cause;
+ struct hl_eq_hbm_sei_data sei_data; /* Gaudi2 HBM */
+ struct hl_eq_engine_arc_intr_data arc_data;
+ struct hl_eq_addr_dec_intr_data addr_dec;
+ __le64 data[7];
+ };
+};
+
+#define HL_EQ_ENTRY_SIZE sizeof(struct hl_eq_entry)
+
+#define EQ_CTL_READY_SHIFT 31
+#define EQ_CTL_READY_MASK 0x80000000
+
+#define EQ_CTL_EVENT_MODE_SHIFT 28
+#define EQ_CTL_EVENT_MODE_MASK 0x70000000
+
+#define EQ_CTL_EVENT_TYPE_SHIFT 16
+#define EQ_CTL_EVENT_TYPE_MASK 0x0FFF0000
+
+#define EQ_CTL_INDEX_SHIFT 0
+#define EQ_CTL_INDEX_MASK 0x0000FFFF
+
+enum pq_init_status {
+ PQ_INIT_STATUS_NA = 0,
+ PQ_INIT_STATUS_READY_FOR_CP,
+ PQ_INIT_STATUS_READY_FOR_HOST,
+ PQ_INIT_STATUS_READY_FOR_CP_SINGLE_MSI,
+ PQ_INIT_STATUS_LEN_NOT_POWER_OF_TWO_ERR,
+ PQ_INIT_STATUS_ILLEGAL_Q_ADDR_ERR
+};
+
+/*
+ * CpuCP Primary Queue Packets
+ *
+ * During normal operation, the host's kernel driver needs to send various
+ * messages to CpuCP, usually either to SET some value into a H/W periphery or
+ * to GET the current value of some H/W periphery. For example, SET the
+ * frequency of MME/TPC and GET the value of the thermal sensor.
+ *
+ * These messages can be initiated either by the User application or by the
+ * host's driver itself, e.g. power management code. In either case, the
+ * communication from the host's driver to CpuCP will *always* be in
+ * synchronous mode, meaning that the host will send a single message and poll
+ * until the message was acknowledged and the results are ready (if results are
+ * needed).
+ *
+ * This means that only a single message can be sent at a time and the host's
+ * driver must wait for its result before sending the next message. Having said
+ * that, because these are control messages which are sent in a relatively low
+ * frequency, this limitation seems acceptable. It's important to note that
+ * in case of multiple devices, messages to different devices *can* be sent
+ * at the same time.
+ *
+ * The message, inputs/outputs (if relevant) and fence object will be located
+ * on the device DDR at an address that will be determined by the host's driver.
+ * During device initialization phase, the host will pass to CpuCP that address.
+ * Most of the message types will contain inputs/outputs inside the message
+ * itself. The common part of each message will contain the opcode of the
+ * message (its type) and a field representing a fence object.
+ *
+ * When the host's driver wishes to send a message to CPU CP, it will write the
+ * message contents to the device DDR, clear the fence object and then write to
+ * the PSOC_ARC1_AUX_SW_INTR, to issue interrupt 121 to ARC Management CPU.
+ *
+ * Upon receiving the interrupt (#121), CpuCP will read the message from the
+ * DDR. In case the message is a SET operation, CpuCP will first perform the
+ * operation and then write to the fence object on the device DDR. In case the
+ * message is a GET operation, CpuCP will first fill the results section on the
+ * device DDR and then write to the fence object. If an error occurred, CpuCP
+ * will fill the rc field with the right error code.
+ *
+ * In the meantime, the host's driver will poll on the fence object. Once the
+ * host sees that the fence object is signaled, it will read the results from
+ * the device DDR (if relevant) and resume the code execution in the host's
+ * driver.
+ *
+ * To use QMAN packets, the opcode must be the QMAN opcode, shifted by 8
+ * so the value being put by the host's driver matches the value read by CpuCP
+ *
+ * Non-QMAN packets should be limited to values 1 through (2^8 - 1)
+ *
+ * Detailed description:
+ *
+ * CPUCP_PACKET_DISABLE_PCI_ACCESS -
+ * After receiving this packet the embedded CPU must NOT issue PCI
+ * transactions (read/write) towards the Host CPU. This also include
+ * sending MSI-X interrupts.
+ * This packet is usually sent before the device is moved to D3Hot state.
+ *
+ * CPUCP_PACKET_ENABLE_PCI_ACCESS -
+ * After receiving this packet the embedded CPU is allowed to issue PCI
+ * transactions towards the Host CPU, including sending MSI-X interrupts.
+ * This packet is usually send after the device is moved to D0 state.
+ *
+ * CPUCP_PACKET_TEMPERATURE_GET -
+ * Fetch the current temperature / Max / Max Hyst / Critical /
+ * Critical Hyst of a specified thermal sensor. The packet's
+ * arguments specify the desired sensor and the field to get.
+ *
+ * CPUCP_PACKET_VOLTAGE_GET -
+ * Fetch the voltage / Max / Min of a specified sensor. The packet's
+ * arguments specify the sensor and type.
+ *
+ * CPUCP_PACKET_CURRENT_GET -
+ * Fetch the current / Max / Min of a specified sensor. The packet's
+ * arguments specify the sensor and type.
+ *
+ * CPUCP_PACKET_FAN_SPEED_GET -
+ * Fetch the speed / Max / Min of a specified fan. The packet's
+ * arguments specify the sensor and type.
+ *
+ * CPUCP_PACKET_PWM_GET -
+ * Fetch the pwm value / mode of a specified pwm. The packet's
+ * arguments specify the sensor and type.
+ *
+ * CPUCP_PACKET_PWM_SET -
+ * Set the pwm value / mode of a specified pwm. The packet's
+ * arguments specify the sensor, type and value.
+ *
+ * CPUCP_PACKET_FREQUENCY_SET -
+ * Set the frequency of a specified PLL. The packet's arguments specify
+ * the PLL and the desired frequency. The actual frequency in the device
+ * might differ from the requested frequency.
+ *
+ * CPUCP_PACKET_FREQUENCY_GET -
+ * Fetch the frequency of a specified PLL. The packet's arguments specify
+ * the PLL.
+ *
+ * CPUCP_PACKET_LED_SET -
+ * Set the state of a specified led. The packet's arguments
+ * specify the led and the desired state.
+ *
+ * CPUCP_PACKET_I2C_WR -
+ * Write 32-bit value to I2C device. The packet's arguments specify the
+ * I2C bus, address and value.
+ *
+ * CPUCP_PACKET_I2C_RD -
+ * Read 32-bit value from I2C device. The packet's arguments specify the
+ * I2C bus and address.
+ *
+ * CPUCP_PACKET_INFO_GET -
+ * Fetch information from the device as specified in the packet's
+ * structure. The host's driver passes the max size it allows the CpuCP to
+ * write to the structure, to prevent data corruption in case of
+ * mismatched driver/FW versions.
+ *
+ * CPUCP_PACKET_FLASH_PROGRAM_REMOVED - this packet was removed
+ *
+ * CPUCP_PACKET_UNMASK_RAZWI_IRQ -
+ * Unmask the given IRQ. The IRQ number is specified in the value field.
+ * The packet is sent after receiving an interrupt and printing its
+ * relevant information.
+ *
+ * CPUCP_PACKET_UNMASK_RAZWI_IRQ_ARRAY -
+ * Unmask the given IRQs. The IRQs numbers are specified in an array right
+ * after the cpucp_packet structure, where its first element is the array
+ * length. The packet is sent after a soft reset was done in order to
+ * handle any interrupts that were sent during the reset process.
+ *
+ * CPUCP_PACKET_TEST -
+ * Test packet for CpuCP connectivity. The CPU will put the fence value
+ * in the result field.
+ *
+ * CPUCP_PACKET_FREQUENCY_CURR_GET -
+ * Fetch the current frequency of a specified PLL. The packet's arguments
+ * specify the PLL.
+ *
+ * CPUCP_PACKET_MAX_POWER_GET -
+ * Fetch the maximal power of the device.
+ *
+ * CPUCP_PACKET_MAX_POWER_SET -
+ * Set the maximal power of the device. The packet's arguments specify
+ * the power.
+ *
+ * CPUCP_PACKET_EEPROM_DATA_GET -
+ * Get EEPROM data from the CpuCP kernel. The buffer is specified in the
+ * addr field. The CPU will put the returned data size in the result
+ * field. In addition, the host's driver passes the max size it allows the
+ * CpuCP to write to the structure, to prevent data corruption in case of
+ * mismatched driver/FW versions.
+ *
+ * CPUCP_PACKET_NIC_INFO_GET -
+ * Fetch information from the device regarding the NIC. the host's driver
+ * passes the max size it allows the CpuCP to write to the structure, to
+ * prevent data corruption in case of mismatched driver/FW versions.
+ *
+ * CPUCP_PACKET_TEMPERATURE_SET -
+ * Set the value of the offset property of a specified thermal sensor.
+ * The packet's arguments specify the desired sensor and the field to
+ * set.
+ *
+ * CPUCP_PACKET_VOLTAGE_SET -
+ * Trigger the reset_history property of a specified voltage sensor.
+ * The packet's arguments specify the desired sensor and the field to
+ * set.
+ *
+ * CPUCP_PACKET_CURRENT_SET -
+ * Trigger the reset_history property of a specified current sensor.
+ * The packet's arguments specify the desired sensor and the field to
+ * set.
+ *
+ * CPUCP_PACKET_PCIE_THROUGHPUT_GET -
+ * Get throughput of PCIe.
+ * The packet's arguments specify the transaction direction (TX/RX).
+ * The window measurement is 10[msec], and the return value is in KB/sec.
+ *
+ * CPUCP_PACKET_PCIE_REPLAY_CNT_GET
+ * Replay count measures number of "replay" events, which is basicly
+ * number of retries done by PCIe.
+ *
+ * CPUCP_PACKET_TOTAL_ENERGY_GET -
+ * Total Energy is measurement of energy from the time FW Linux
+ * is loaded. It is calculated by multiplying the average power
+ * by time (passed from armcp start). The units are in MilliJouls.
+ *
+ * CPUCP_PACKET_PLL_INFO_GET -
+ * Fetch frequencies of PLL from the required PLL IP.
+ * The packet's arguments specify the device PLL type
+ * Pll type is the PLL from device pll_index enum.
+ * The result is composed of 4 outputs, each is 16-bit
+ * frequency in MHz.
+ *
+ * CPUCP_PACKET_POWER_GET -
+ * Fetch the present power consumption of the device (Current * Voltage).
+ *
+ * CPUCP_PACKET_NIC_PFC_SET -
+ * Enable/Disable the NIC PFC feature. The packet's arguments specify the
+ * NIC port, relevant lanes to configure and one bit indication for
+ * enable/disable.
+ *
+ * CPUCP_PACKET_NIC_FAULT_GET -
+ * Fetch the current indication for local/remote faults from the NIC MAC.
+ * The result is 32-bit value of the relevant register.
+ *
+ * CPUCP_PACKET_NIC_LPBK_SET -
+ * Enable/Disable the MAC loopback feature. The packet's arguments specify
+ * the NIC port, relevant lanes to configure and one bit indication for
+ * enable/disable.
+ *
+ * CPUCP_PACKET_NIC_MAC_INIT -
+ * Configure the NIC MAC channels. The packet's arguments specify the
+ * NIC port and the speed.
+ *
+ * CPUCP_PACKET_MSI_INFO_SET -
+ * set the index number for each supported msi type going from
+ * host to device
+ *
+ * CPUCP_PACKET_NIC_XPCS91_REGS_GET -
+ * Fetch the un/correctable counters values from the NIC MAC.
+ *
+ * CPUCP_PACKET_NIC_STAT_REGS_GET -
+ * Fetch various NIC MAC counters from the NIC STAT.
+ *
+ * CPUCP_PACKET_NIC_STAT_REGS_CLR -
+ * Clear the various NIC MAC counters in the NIC STAT.
+ *
+ * CPUCP_PACKET_NIC_STAT_REGS_ALL_GET -
+ * Fetch all NIC MAC counters from the NIC STAT.
+ *
+ * CPUCP_PACKET_IS_IDLE_CHECK -
+ * Check if the device is IDLE in regard to the DMA/compute engines
+ * and QMANs. The f/w will return a bitmask where each bit represents
+ * a different engine or QMAN according to enum cpucp_idle_mask.
+ * The bit will be 1 if the engine is NOT idle.
+ *
+ * CPUCP_PACKET_HBM_REPLACED_ROWS_INFO_GET -
+ * Fetch all HBM replaced-rows and prending to be replaced rows data.
+ *
+ * CPUCP_PACKET_HBM_PENDING_ROWS_STATUS -
+ * Fetch status of HBM rows pending replacement and need a reboot to
+ * be replaced.
+ *
+ * CPUCP_PACKET_POWER_SET -
+ * Resets power history of device to 0
+ *
+ * CPUCP_PACKET_ENGINE_CORE_ASID_SET -
+ * Packet to perform engine core ASID configuration
+ *
+ * CPUCP_PACKET_SEC_ATTEST_GET -
+ * Get the attestaion data that is collected during various stages of the
+ * boot sequence. the attestation data is also hashed with some unique
+ * number (nonce) provided by the host to prevent replay attacks.
+ * public key and certificate also provided as part of the FW response.
+ *
+ * CPUCP_PACKET_INFO_SIGNED_GET -
+ * Get the device information signed by the Trusted Platform device.
+ * device info data is also hashed with some unique number (nonce) provided
+ * by the host to prevent replay attacks. public key and certificate also
+ * provided as part of the FW response.
+ *
+ * CPUCP_PACKET_MONITOR_DUMP_GET -
+ * Get monitors registers dump from the CpuCP kernel.
+ * The CPU will put the registers dump in the a buffer allocated by the driver
+ * which address is passed via the CpuCp packet. In addition, the host's driver
+ * passes the max size it allows the CpuCP to write to the structure, to prevent
+ * data corruption in case of mismatched driver/FW versions.
+ * Obsolete.
+ *
+ * CPUCP_PACKET_GENERIC_PASSTHROUGH -
+ * Generic opcode for all firmware info that is only passed to host
+ * through the LKD, without getting parsed there.
+ *
+ * CPUCP_PACKET_ACTIVE_STATUS_SET -
+ * LKD sends FW indication whether device is free or in use, this indication is reported
+ * also to the BMC.
+ *
+ * CPUCP_PACKET_SOFT_RESET -
+ * Packet to perform soft-reset.
+ *
+ * CPUCP_PACKET_INTS_REGISTER -
+ * Packet to inform FW that queues have been established and LKD is ready to receive
+ * EQ events.
+ */
+
+enum cpucp_packet_id {
+ CPUCP_PACKET_DISABLE_PCI_ACCESS = 1, /* internal */
+ CPUCP_PACKET_ENABLE_PCI_ACCESS, /* internal */
+ CPUCP_PACKET_TEMPERATURE_GET, /* sysfs */
+ CPUCP_PACKET_VOLTAGE_GET, /* sysfs */
+ CPUCP_PACKET_CURRENT_GET, /* sysfs */
+ CPUCP_PACKET_FAN_SPEED_GET, /* sysfs */
+ CPUCP_PACKET_PWM_GET, /* sysfs */
+ CPUCP_PACKET_PWM_SET, /* sysfs */
+ CPUCP_PACKET_FREQUENCY_SET, /* sysfs */
+ CPUCP_PACKET_FREQUENCY_GET, /* sysfs */
+ CPUCP_PACKET_LED_SET, /* debugfs */
+ CPUCP_PACKET_I2C_WR, /* debugfs */
+ CPUCP_PACKET_I2C_RD, /* debugfs */
+ CPUCP_PACKET_INFO_GET, /* IOCTL */
+ CPUCP_PACKET_FLASH_PROGRAM_REMOVED,
+ CPUCP_PACKET_UNMASK_RAZWI_IRQ, /* internal */
+ CPUCP_PACKET_UNMASK_RAZWI_IRQ_ARRAY, /* internal */
+ CPUCP_PACKET_TEST, /* internal */
+ CPUCP_PACKET_FREQUENCY_CURR_GET, /* sysfs */
+ CPUCP_PACKET_MAX_POWER_GET, /* sysfs */
+ CPUCP_PACKET_MAX_POWER_SET, /* sysfs */
+ CPUCP_PACKET_EEPROM_DATA_GET, /* sysfs */
+ CPUCP_PACKET_NIC_INFO_GET, /* internal */
+ CPUCP_PACKET_TEMPERATURE_SET, /* sysfs */
+ CPUCP_PACKET_VOLTAGE_SET, /* sysfs */
+ CPUCP_PACKET_CURRENT_SET, /* sysfs */
+ CPUCP_PACKET_PCIE_THROUGHPUT_GET, /* internal */
+ CPUCP_PACKET_PCIE_REPLAY_CNT_GET, /* internal */
+ CPUCP_PACKET_TOTAL_ENERGY_GET, /* internal */
+ CPUCP_PACKET_PLL_INFO_GET, /* internal */
+ CPUCP_PACKET_NIC_STATUS, /* internal */
+ CPUCP_PACKET_POWER_GET, /* internal */
+ CPUCP_PACKET_NIC_PFC_SET, /* internal */
+ CPUCP_PACKET_NIC_FAULT_GET, /* internal */
+ CPUCP_PACKET_NIC_LPBK_SET, /* internal */
+ CPUCP_PACKET_NIC_MAC_CFG, /* internal */
+ CPUCP_PACKET_MSI_INFO_SET, /* internal */
+ CPUCP_PACKET_NIC_XPCS91_REGS_GET, /* internal */
+ CPUCP_PACKET_NIC_STAT_REGS_GET, /* internal */
+ CPUCP_PACKET_NIC_STAT_REGS_CLR, /* internal */
+ CPUCP_PACKET_NIC_STAT_REGS_ALL_GET, /* internal */
+ CPUCP_PACKET_IS_IDLE_CHECK, /* internal */
+ CPUCP_PACKET_HBM_REPLACED_ROWS_INFO_GET,/* internal */
+ CPUCP_PACKET_HBM_PENDING_ROWS_STATUS, /* internal */
+ CPUCP_PACKET_POWER_SET, /* internal */
+ CPUCP_PACKET_RESERVED, /* not used */
+ CPUCP_PACKET_ENGINE_CORE_ASID_SET, /* internal */
+ CPUCP_PACKET_RESERVED2, /* not used */
+ CPUCP_PACKET_SEC_ATTEST_GET, /* internal */
+ CPUCP_PACKET_INFO_SIGNED_GET, /* internal */
+ CPUCP_PACKET_RESERVED4, /* not used */
+ CPUCP_PACKET_MONITOR_DUMP_GET, /* debugfs */
+ CPUCP_PACKET_RESERVED5, /* not used */
+ CPUCP_PACKET_RESERVED6, /* not used */
+ CPUCP_PACKET_RESERVED7, /* not used */
+ CPUCP_PACKET_GENERIC_PASSTHROUGH, /* IOCTL */
+ CPUCP_PACKET_RESERVED8, /* not used */
+ CPUCP_PACKET_ACTIVE_STATUS_SET, /* internal */
+ CPUCP_PACKET_RESERVED9, /* not used */
+ CPUCP_PACKET_RESERVED10, /* not used */
+ CPUCP_PACKET_RESERVED11, /* not used */
+ CPUCP_PACKET_RESERVED12, /* internal */
+ CPUCP_PACKET_RESERVED13, /* internal */
+ CPUCP_PACKET_SOFT_RESET, /* internal */
+ CPUCP_PACKET_INTS_REGISTER, /* internal */
+ CPUCP_PACKET_ID_MAX /* must be last */
+};
+
+#define CPUCP_PACKET_FENCE_VAL 0xFE8CE7A5
+
+#define CPUCP_PKT_CTL_RC_SHIFT 12
+#define CPUCP_PKT_CTL_RC_MASK 0x0000F000
+
+#define CPUCP_PKT_CTL_OPCODE_SHIFT 16
+#define CPUCP_PKT_CTL_OPCODE_MASK 0x1FFF0000
+
+#define CPUCP_PKT_RES_PLL_OUT0_SHIFT 0
+#define CPUCP_PKT_RES_PLL_OUT0_MASK 0x000000000000FFFFull
+#define CPUCP_PKT_RES_PLL_OUT1_SHIFT 16
+#define CPUCP_PKT_RES_PLL_OUT1_MASK 0x00000000FFFF0000ull
+#define CPUCP_PKT_RES_PLL_OUT2_SHIFT 32
+#define CPUCP_PKT_RES_PLL_OUT2_MASK 0x0000FFFF00000000ull
+#define CPUCP_PKT_RES_PLL_OUT3_SHIFT 48
+#define CPUCP_PKT_RES_PLL_OUT3_MASK 0xFFFF000000000000ull
+
+#define CPUCP_PKT_RES_EEPROM_OUT0_SHIFT 0
+#define CPUCP_PKT_RES_EEPROM_OUT0_MASK 0x000000000000FFFFull
+#define CPUCP_PKT_RES_EEPROM_OUT1_SHIFT 16
+#define CPUCP_PKT_RES_EEPROM_OUT1_MASK 0x0000000000FF0000ull
+
+#define CPUCP_PKT_VAL_PFC_IN1_SHIFT 0
+#define CPUCP_PKT_VAL_PFC_IN1_MASK 0x0000000000000001ull
+#define CPUCP_PKT_VAL_PFC_IN2_SHIFT 1
+#define CPUCP_PKT_VAL_PFC_IN2_MASK 0x000000000000001Eull
+
+#define CPUCP_PKT_VAL_LPBK_IN1_SHIFT 0
+#define CPUCP_PKT_VAL_LPBK_IN1_MASK 0x0000000000000001ull
+#define CPUCP_PKT_VAL_LPBK_IN2_SHIFT 1
+#define CPUCP_PKT_VAL_LPBK_IN2_MASK 0x000000000000001Eull
+
+#define CPUCP_PKT_VAL_MAC_CNT_IN1_SHIFT 0
+#define CPUCP_PKT_VAL_MAC_CNT_IN1_MASK 0x0000000000000001ull
+#define CPUCP_PKT_VAL_MAC_CNT_IN2_SHIFT 1
+#define CPUCP_PKT_VAL_MAC_CNT_IN2_MASK 0x00000000FFFFFFFEull
+
+/* heartbeat status bits */
+#define CPUCP_PKT_HB_STATUS_EQ_FAULT_SHIFT 0
+#define CPUCP_PKT_HB_STATUS_EQ_FAULT_MASK 0x00000001
+
+struct cpucp_packet {
+ union {
+ __le64 value; /* For SET packets */
+ __le64 result; /* For GET packets */
+ __le64 addr; /* For PQ */
+ };
+
+ __le32 ctl;
+
+ __le32 fence; /* Signal to host that message is completed */
+
+ union {
+ struct {/* For temperature/current/voltage/fan/pwm get/set */
+ __le16 sensor_index;
+ __le16 type;
+ };
+
+ struct { /* For I2C read/write */
+ __u8 i2c_bus;
+ __u8 i2c_addr;
+ __u8 i2c_reg;
+ /*
+ * In legacy implemetations, i2c_len was not present,
+ * was unused and just added as pad.
+ * So if i2c_len is 0, it is treated as legacy
+ * and r/w 1 Byte, else if i2c_len is specified,
+ * its treated as new multibyte r/w support.
+ */
+ __u8 i2c_len;
+ };
+
+ struct {/* For PLL info fetch */
+ __le16 pll_type;
+ /* TODO pll_reg is kept temporary before removal */
+ __le16 pll_reg;
+ };
+
+ /* For any general request */
+ __le32 index;
+
+ /* For frequency get/set */
+ __le32 pll_index;
+
+ /* For led set */
+ __le32 led_index;
+
+ /* For get CpuCP info/EEPROM data/NIC info */
+ __le32 data_max_size;
+
+ /*
+ * For any general status bitmask. Shall be used whenever the
+ * result cannot be used to hold general purpose data.
+ */
+ __le32 status_mask;
+ };
+
+ union {
+ /* For NIC requests */
+ __le32 port_index;
+
+ /* For Generic packet sub index */
+ __le32 pkt_subidx;
+
+ /* random, used once number, for security packets */
+ __le32 nonce;
+ };
+};
+
+struct cpucp_unmask_irq_arr_packet {
+ struct cpucp_packet cpucp_pkt;
+ __le32 length;
+ __le32 irqs[];
+};
+
+struct cpucp_nic_status_packet {
+ struct cpucp_packet cpucp_pkt;
+ __le32 length;
+ __le32 data[];
+};
+
+struct cpucp_array_data_packet {
+ struct cpucp_packet cpucp_pkt;
+ __le32 length;
+ __le32 data[];
+};
+
+enum cpucp_led_index {
+ CPUCP_LED0_INDEX = 0,
+ CPUCP_LED1_INDEX,
+ CPUCP_LED2_INDEX,
+ CPUCP_LED_MAX_INDEX = CPUCP_LED2_INDEX
+};
+
+/*
+ * enum cpucp_packet_rc - Error return code
+ * @cpucp_packet_success -> in case of success.
+ * @cpucp_packet_invalid -> this is to support first generation platforms.
+ * @cpucp_packet_fault -> in case of processing error like failing to
+ * get device binding or semaphore etc.
+ * @cpucp_packet_invalid_pkt -> when cpucp packet is un-supported.
+ * @cpucp_packet_invalid_params -> when checking parameter like length of buffer
+ * or attribute value etc.
+ * @cpucp_packet_rc_max -> It indicates size of enum so should be at last.
+ */
+enum cpucp_packet_rc {
+ cpucp_packet_success,
+ cpucp_packet_invalid,
+ cpucp_packet_fault,
+ cpucp_packet_invalid_pkt,
+ cpucp_packet_invalid_params,
+ cpucp_packet_rc_max
+};
+
+/*
+ * cpucp_temp_type should adhere to hwmon_temp_attributes
+ * defined in Linux kernel hwmon.h file
+ */
+enum cpucp_temp_type {
+ cpucp_temp_input,
+ cpucp_temp_min = 4,
+ cpucp_temp_min_hyst,
+ cpucp_temp_max = 6,
+ cpucp_temp_max_hyst,
+ cpucp_temp_crit,
+ cpucp_temp_crit_hyst,
+ cpucp_temp_offset = 19,
+ cpucp_temp_lowest = 21,
+ cpucp_temp_highest = 22,
+ cpucp_temp_reset_history = 23,
+ cpucp_temp_warn = 24,
+ cpucp_temp_max_crit = 25,
+ cpucp_temp_max_warn = 26,
+};
+
+enum cpucp_in_attributes {
+ cpucp_in_input,
+ cpucp_in_min,
+ cpucp_in_max,
+ cpucp_in_lowest = 6,
+ cpucp_in_highest = 7,
+ cpucp_in_reset_history,
+ cpucp_in_intr_alarm_a,
+ cpucp_in_intr_alarm_b,
+};
+
+enum cpucp_curr_attributes {
+ cpucp_curr_input,
+ cpucp_curr_min,
+ cpucp_curr_max,
+ cpucp_curr_lowest = 6,
+ cpucp_curr_highest = 7,
+ cpucp_curr_reset_history
+};
+
+enum cpucp_fan_attributes {
+ cpucp_fan_input,
+ cpucp_fan_min = 2,
+ cpucp_fan_max
+};
+
+enum cpucp_pwm_attributes {
+ cpucp_pwm_input,
+ cpucp_pwm_enable
+};
+
+enum cpucp_pcie_throughput_attributes {
+ cpucp_pcie_throughput_tx,
+ cpucp_pcie_throughput_rx
+};
+
+/* TODO temporary kept before removal */
+enum cpucp_pll_reg_attributes {
+ cpucp_pll_nr_reg,
+ cpucp_pll_nf_reg,
+ cpucp_pll_od_reg,
+ cpucp_pll_div_factor_reg,
+ cpucp_pll_div_sel_reg
+};
+
+/* TODO temporary kept before removal */
+enum cpucp_pll_type_attributes {
+ cpucp_pll_cpu,
+ cpucp_pll_pci,
+};
+
+/*
+ * cpucp_power_type aligns with hwmon_power_attributes
+ * defined in Linux kernel hwmon.h file
+ */
+enum cpucp_power_type {
+ CPUCP_POWER_INPUT = 8,
+ CPUCP_POWER_INPUT_HIGHEST = 9,
+ CPUCP_POWER_RESET_INPUT_HISTORY = 11
+};
+
+/*
+ * MSI type enumeration table for all ASICs and future SW versions.
+ * For future ASIC-LKD compatibility, we can only add new enumerations.
+ * at the end of the table (before CPUCP_NUM_OF_MSI_TYPES).
+ * Changing the order of entries or removing entries is not allowed.
+ */
+enum cpucp_msi_type {
+ CPUCP_EVENT_QUEUE_MSI_TYPE,
+ CPUCP_NIC_PORT1_MSI_TYPE,
+ CPUCP_NIC_PORT3_MSI_TYPE,
+ CPUCP_NIC_PORT5_MSI_TYPE,
+ CPUCP_NIC_PORT7_MSI_TYPE,
+ CPUCP_NIC_PORT9_MSI_TYPE,
+ CPUCP_EVENT_QUEUE_ERR_MSI_TYPE,
+ CPUCP_NUM_OF_MSI_TYPES
+};
+
+/*
+ * PLL enumeration table used for all ASICs and future SW versions.
+ * For future ASIC-LKD compatibility, we can only add new enumerations.
+ * at the end of the table.
+ * Changing the order of entries or removing entries is not allowed.
+ */
+enum pll_index {
+ CPU_PLL = 0,
+ PCI_PLL = 1,
+ NIC_PLL = 2,
+ DMA_PLL = 3,
+ MESH_PLL = 4,
+ MME_PLL = 5,
+ TPC_PLL = 6,
+ IF_PLL = 7,
+ SRAM_PLL = 8,
+ NS_PLL = 9,
+ HBM_PLL = 10,
+ MSS_PLL = 11,
+ DDR_PLL = 12,
+ VID_PLL = 13,
+ BANK_PLL = 14,
+ MMU_PLL = 15,
+ IC_PLL = 16,
+ MC_PLL = 17,
+ EMMC_PLL = 18,
+ D2D_PLL = 19,
+ CS_PLL = 20,
+ C2C_PLL = 21,
+ NCH_PLL = 22,
+ C2M_PLL = 23,
+ PLL_MAX
+};
+
+enum rl_index {
+ TPC_RL = 0,
+ MME_RL,
+ EDMA_RL,
+};
+
+enum pvt_index {
+ PVT_SW,
+ PVT_SE,
+ PVT_NW,
+ PVT_NE
+};
+
+/* Event Queue Packets */
+
+struct eq_generic_event {
+ __le64 data[7];
+};
+
+/*
+ * CpuCP info
+ */
+
+#define CARD_NAME_MAX_LEN 16
+#define CPUCP_MAX_SENSORS 128
+#define CPUCP_MAX_NICS 128
+#define CPUCP_LANES_PER_NIC 4
+#define CPUCP_NIC_QSFP_EEPROM_MAX_LEN 1024
+#define CPUCP_MAX_NIC_LANES (CPUCP_MAX_NICS * CPUCP_LANES_PER_NIC)
+#define CPUCP_NIC_MASK_ARR_LEN ((CPUCP_MAX_NICS + 63) / 64)
+#define CPUCP_NIC_POLARITY_ARR_LEN ((CPUCP_MAX_NIC_LANES + 63) / 64)
+#define CPUCP_HBM_ROW_REPLACE_MAX 32
+
+struct cpucp_sensor {
+ __le32 type;
+ __le32 flags;
+};
+
+/**
+ * struct cpucp_card_types - ASIC card type.
+ * @cpucp_card_type_pci: PCI card.
+ * @cpucp_card_type_pmc: PCI Mezzanine Card.
+ */
+enum cpucp_card_types {
+ cpucp_card_type_pci,
+ cpucp_card_type_pmc
+};
+
+#define CPUCP_SEC_CONF_ENABLED_SHIFT 0
+#define CPUCP_SEC_CONF_ENABLED_MASK 0x00000001
+
+#define CPUCP_SEC_CONF_FLASH_WP_SHIFT 1
+#define CPUCP_SEC_CONF_FLASH_WP_MASK 0x00000002
+
+#define CPUCP_SEC_CONF_EEPROM_WP_SHIFT 2
+#define CPUCP_SEC_CONF_EEPROM_WP_MASK 0x00000004
+
+/**
+ * struct cpucp_security_info - Security information.
+ * @config: configuration bit field
+ * @keys_num: number of stored keys
+ * @revoked_keys: revoked keys bit field
+ * @min_svn: minimal security version
+ */
+struct cpucp_security_info {
+ __u8 config;
+ __u8 keys_num;
+ __u8 revoked_keys;
+ __u8 min_svn;
+};
+
+/**
+ * struct cpucp_info - Info from CpuCP that is necessary to the host's driver
+ * @sensors: available sensors description.
+ * @kernel_version: CpuCP linux kernel version.
+ * @reserved: reserved field.
+ * @card_type: card configuration type.
+ * @card_location: in a server, each card has different connections topology
+ * depending on its location (relevant for PMC card type)
+ * @cpld_version: CPLD programmed F/W version.
+ * @infineon_version: Infineon main DC-DC version.
+ * @fuse_version: silicon production FUSE information.
+ * @thermal_version: thermald S/W version.
+ * @cpucp_version: CpuCP S/W version.
+ * @infineon_second_stage_version: Infineon 2nd stage DC-DC version.
+ * @dram_size: available DRAM size.
+ * @card_name: card name that will be displayed in HWMON subsystem on the host
+ * @tpc_binning_mask: TPC binning mask, 1 bit per TPC instance
+ * (0 = functional, 1 = binned)
+ * @decoder_binning_mask: Decoder binning mask, 1 bit per decoder instance
+ * (0 = functional, 1 = binned), maximum 1 per dcore
+ * @sram_binning: Categorize SRAM functionality
+ * (0 = fully functional, 1 = lower-half is not functional,
+ * 2 = upper-half is not functional)
+ * @sec_info: security information
+ * @cpld_timestamp: CPLD programmed F/W timestamp.
+ * @pll_map: Bit map of supported PLLs for current ASIC version.
+ * @mme_binning_mask: MME binning mask,
+ * bits [0:6] <==> dcore0 mme fma
+ * bits [7:13] <==> dcore1 mme fma
+ * bits [14:20] <==> dcore0 mme ima
+ * bits [21:27] <==> dcore1 mme ima
+ * For each group, if the 6th bit is set then first 5 bits
+ * represent the col's idx [0-31], otherwise these bits are
+ * ignored, and col idx 32 is binned. 7th bit is don't care.
+ * @dram_binning_mask: DRAM binning mask, 1 bit per dram instance
+ * (0 = functional 1 = binned)
+ * @memory_repair_flag: eFuse flag indicating memory repair
+ * @edma_binning_mask: EDMA binning mask, 1 bit per EDMA instance
+ * (0 = functional 1 = binned)
+ * @xbar_binning_mask: Xbar binning mask, 1 bit per Xbar instance
+ * (0 = functional 1 = binned)
+ * @interposer_version: Interposer version programmed in eFuse
+ * @substrate_version: Substrate version programmed in eFuse
+ * @eq_health_check_supported: eq health check feature supported in FW.
+ * @fw_hbm_region_size: Size in bytes of FW reserved region in HBM.
+ * @fw_os_version: Firmware OS Version
+ */
+struct cpucp_info {
+ struct cpucp_sensor sensors[CPUCP_MAX_SENSORS];
+ __u8 kernel_version[VERSION_MAX_LEN];
+ __le32 reserved1;
+ __le32 card_type;
+ __le32 card_location;
+ __le32 cpld_version;
+ __le32 infineon_version;
+ __u8 fuse_version[VERSION_MAX_LEN];
+ __u8 thermal_version[VERSION_MAX_LEN];
+ __u8 cpucp_version[VERSION_MAX_LEN];
+ __le32 infineon_second_stage_version;
+ __le64 dram_size;
+ char card_name[CARD_NAME_MAX_LEN];
+ __le64 tpc_binning_mask;
+ __le64 decoder_binning_mask;
+ __u8 sram_binning;
+ __u8 dram_binning_mask;
+ __u8 memory_repair_flag;
+ __u8 edma_binning_mask;
+ __u8 xbar_binning_mask;
+ __u8 interposer_version;
+ __u8 substrate_version;
+ __u8 eq_health_check_supported;
+ struct cpucp_security_info sec_info;
+ __le32 cpld_timestamp;
+ __u8 pll_map[PLL_MAP_LEN];
+ __le64 mme_binning_mask;
+ __u8 fw_os_version[VERSION_MAX_LEN];
+};
+
+struct cpucp_mac_addr {
+ __u8 mac_addr[ETH_ALEN];
+};
+
+enum cpucp_serdes_type {
+ TYPE_1_SERDES_TYPE,
+ TYPE_2_SERDES_TYPE,
+ HLS1_SERDES_TYPE,
+ HLS1H_SERDES_TYPE,
+ HLS2_SERDES_TYPE,
+ HLS2_TYPE_1_SERDES_TYPE,
+ MAX_NUM_SERDES_TYPE, /* number of types */
+ UNKNOWN_SERDES_TYPE = 0xFFFF /* serdes_type is u16 */
+};
+
+struct cpucp_nic_info {
+ struct cpucp_mac_addr mac_addrs[CPUCP_MAX_NICS];
+ __le64 link_mask[CPUCP_NIC_MASK_ARR_LEN];
+ __le64 pol_tx_mask[CPUCP_NIC_POLARITY_ARR_LEN];
+ __le64 pol_rx_mask[CPUCP_NIC_POLARITY_ARR_LEN];
+ __le64 link_ext_mask[CPUCP_NIC_MASK_ARR_LEN];
+ __u8 qsfp_eeprom[CPUCP_NIC_QSFP_EEPROM_MAX_LEN];
+ __le64 auto_neg_mask[CPUCP_NIC_MASK_ARR_LEN];
+ __le16 serdes_type; /* enum cpucp_serdes_type */
+ __le16 tx_swap_map[CPUCP_MAX_NICS];
+ __u8 reserved[6];
+};
+
+#define PAGE_DISCARD_MAX 64
+
+struct page_discard_info {
+ __u8 num_entries;
+ __u8 reserved[7];
+ __le32 mmu_page_idx[PAGE_DISCARD_MAX];
+};
+
+/*
+ * struct frac_val - fracture value represented by "integer.frac".
+ * @integer: the integer part of the fracture value;
+ * @frac: the fracture part of the fracture value.
+ */
+struct frac_val {
+ union {
+ struct {
+ __le16 integer;
+ __le16 frac;
+ };
+ __le32 val;
+ };
+};
+
+/*
+ * struct ser_val - the SER (symbol error rate) value is represented by "integer * 10 ^ -exp".
+ * @integer: the integer part of the SER value;
+ * @exp: the exponent part of the SER value.
+ */
+struct ser_val {
+ __le16 integer;
+ __le16 exp;
+};
+
+/*
+ * struct cpucp_nic_status - describes the status of a NIC port.
+ * @port: NIC port index.
+ * @bad_format_cnt: e.g. CRC.
+ * @responder_out_of_sequence_psn_cnt: e.g NAK.
+ * @high_ber_reinit_cnt: link reinit due to high BER.
+ * @correctable_err_cnt: e.g. bit-flip.
+ * @uncorrectable_err_cnt: e.g. MAC errors.
+ * @retraining_cnt: re-training counter.
+ * @up: is port up.
+ * @pcs_link: has PCS link.
+ * @phy_ready: is PHY ready.
+ * @auto_neg: is Autoneg enabled.
+ * @timeout_retransmission_cnt: timeout retransmission events.
+ * @high_ber_cnt: high ber events.
+ * @pre_fec_ser: pre FEC SER value.
+ * @post_fec_ser: post FEC SER value.
+ * @throughput: measured throughput.
+ * @latency: measured latency.
+ */
+struct cpucp_nic_status {
+ __le32 port;
+ __le32 bad_format_cnt;
+ __le32 responder_out_of_sequence_psn_cnt;
+ __le32 high_ber_reinit;
+ __le32 correctable_err_cnt;
+ __le32 uncorrectable_err_cnt;
+ __le32 retraining_cnt;
+ __u8 up;
+ __u8 pcs_link;
+ __u8 phy_ready;
+ __u8 auto_neg;
+ __le32 timeout_retransmission_cnt;
+ __le32 high_ber_cnt;
+ struct ser_val pre_fec_ser;
+ struct ser_val post_fec_ser;
+ struct frac_val bandwidth;
+ struct frac_val lat;
+};
+
+enum cpucp_hbm_row_replace_cause {
+ REPLACE_CAUSE_DOUBLE_ECC_ERR,
+ REPLACE_CAUSE_MULTI_SINGLE_ECC_ERR,
+};
+
+struct cpucp_hbm_row_info {
+ __u8 hbm_idx;
+ __u8 pc;
+ __u8 sid;
+ __u8 bank_idx;
+ __le16 row_addr;
+ __u8 replaced_row_cause; /* enum cpucp_hbm_row_replace_cause */
+ __u8 pad;
+};
+
+struct cpucp_hbm_row_replaced_rows_info {
+ __le16 num_replaced_rows;
+ __u8 pad[6];
+ struct cpucp_hbm_row_info replaced_rows[CPUCP_HBM_ROW_REPLACE_MAX];
+};
+
+enum cpu_reset_status {
+ CPU_RST_STATUS_NA = 0,
+ CPU_RST_STATUS_SOFT_RST_DONE = 1,
+};
+
+#define SEC_PCR_DATA_BUF_SZ 256
+#define SEC_PCR_QUOTE_BUF_SZ 510 /* (512 - 2) 2 bytes used for size */
+#define SEC_SIGNATURE_BUF_SZ 255 /* (256 - 1) 1 byte used for size */
+#define SEC_PUB_DATA_BUF_SZ 510 /* (512 - 2) 2 bytes used for size */
+#define SEC_CERTIFICATE_BUF_SZ 2046 /* (2048 - 2) 2 bytes used for size */
+
+/*
+ * struct cpucp_sec_attest_info - attestation report of the boot
+ * @pcr_data: raw values of the PCR registers
+ * @pcr_num_reg: number of PCR registers in the pcr_data array
+ * @pcr_reg_len: length of each PCR register in the pcr_data array (bytes)
+ * @nonce: number only used once. random number provided by host. this also
+ * passed to the quote command as a qualifying data.
+ * @pcr_quote_len: length of the attestation quote data (bytes)
+ * @pcr_quote: attestation report data structure
+ * @quote_sig_len: length of the attestation report signature (bytes)
+ * @quote_sig: signature structure of the attestation report
+ * @pub_data_len: length of the public data (bytes)
+ * @public_data: public key for the signed attestation
+ * (outPublic + name + qualifiedName)
+ * @certificate_len: length of the certificate (bytes)
+ * @certificate: certificate for the attestation signing key
+ */
+struct cpucp_sec_attest_info {
+ __u8 pcr_data[SEC_PCR_DATA_BUF_SZ];
+ __u8 pcr_num_reg;
+ __u8 pcr_reg_len;
+ __le16 pad0;
+ __le32 nonce;
+ __le16 pcr_quote_len;
+ __u8 pcr_quote[SEC_PCR_QUOTE_BUF_SZ];
+ __u8 quote_sig_len;
+ __u8 quote_sig[SEC_SIGNATURE_BUF_SZ];
+ __le16 pub_data_len;
+ __u8 public_data[SEC_PUB_DATA_BUF_SZ];
+ __le16 certificate_len;
+ __u8 certificate[SEC_CERTIFICATE_BUF_SZ];
+};
+
+/*
+ * struct cpucp_dev_info_signed - device information signed by a secured device
+ * @info: device information structure as defined above
+ * @nonce: number only used once. random number provided by host. this number is
+ * hashed and signed along with the device information.
+ * @info_sig_len: length of the attestation signature (bytes)
+ * @info_sig: signature of the info + nonce data.
+ * @pub_data_len: length of the public data (bytes)
+ * @public_data: public key info signed info data
+ * (outPublic + name + qualifiedName)
+ * @certificate_len: length of the certificate (bytes)
+ * @certificate: certificate for the signing key
+ */
+struct cpucp_dev_info_signed {
+ struct cpucp_info info; /* assumed to be 64bit aligned */
+ __le32 nonce;
+ __le32 pad0;
+ __u8 info_sig_len;
+ __u8 info_sig[SEC_SIGNATURE_BUF_SZ];
+ __le16 pub_data_len;
+ __u8 public_data[SEC_PUB_DATA_BUF_SZ];
+ __le16 certificate_len;
+ __u8 certificate[SEC_CERTIFICATE_BUF_SZ];
+};
+
+#define DCORE_MON_REGS_SZ 512
+/*
+ * struct dcore_monitor_regs_data - DCORE monitor regs data.
+ * the structure follows sync manager block layout. Obsolete.
+ * @mon_pay_addrl: array of payload address low bits.
+ * @mon_pay_addrh: array of payload address high bits.
+ * @mon_pay_data: array of payload data.
+ * @mon_arm: array of monitor arm.
+ * @mon_status: array of monitor status.
+ */
+struct dcore_monitor_regs_data {
+ __le32 mon_pay_addrl[DCORE_MON_REGS_SZ];
+ __le32 mon_pay_addrh[DCORE_MON_REGS_SZ];
+ __le32 mon_pay_data[DCORE_MON_REGS_SZ];
+ __le32 mon_arm[DCORE_MON_REGS_SZ];
+ __le32 mon_status[DCORE_MON_REGS_SZ];
+};
+
+/* contains SM data for each SYNC_MNGR (Obsolete) */
+struct cpucp_monitor_dump {
+ struct dcore_monitor_regs_data sync_mngr_w_s;
+ struct dcore_monitor_regs_data sync_mngr_e_s;
+ struct dcore_monitor_regs_data sync_mngr_w_n;
+ struct dcore_monitor_regs_data sync_mngr_e_n;
+};
+
+/*
+ * The Type of the generic request (and other input arguments) will be fetched from user by reading
+ * from "pkt_subidx" field in struct cpucp_packet.
+ *
+ * HL_PASSTHROUGHT_VERSIONS - Fetch all firmware versions.
+ * HL_GET_ERR_COUNTERS_CMD - Command to get error counters
+ * HL_GET_P_STATE - get performance state
+ */
+enum hl_passthrough_type {
+ HL_PASSTHROUGH_VERSIONS,
+ HL_GET_ERR_COUNTERS_CMD,
+ HL_GET_P_STATE,
+};
+
+#endif /* CPUCP_IF_H */
diff --git a/include/linux/habanalabs/hl_boot_if.h b/include/linux/habanalabs/hl_boot_if.h
new file mode 100644
index 000000000000..af5fb4ad77eb
--- /dev/null
+++ b/include/linux/habanalabs/hl_boot_if.h
@@ -0,0 +1,807 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2018-2023 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+#ifndef HL_BOOT_IF_H
+#define HL_BOOT_IF_H
+
+#define LKD_HARD_RESET_MAGIC 0xED7BD694 /* deprecated - do not use */
+#define HL_POWER9_HOST_MAGIC 0x1DA30009
+
+#define BOOT_FIT_SRAM_OFFSET 0x200000
+
+#define VERSION_MAX_LEN 128
+
+enum cpu_boot_err {
+ CPU_BOOT_ERR_DRAM_INIT_FAIL = 0,
+ CPU_BOOT_ERR_FIT_CORRUPTED = 1,
+ CPU_BOOT_ERR_TS_INIT_FAIL = 2,
+ CPU_BOOT_ERR_DRAM_SKIPPED = 3,
+ CPU_BOOT_ERR_BMC_WAIT_SKIPPED = 4,
+ CPU_BOOT_ERR_NIC_DATA_NOT_RDY = 5,
+ CPU_BOOT_ERR_NIC_FW_FAIL = 6,
+ CPU_BOOT_ERR_SECURITY_NOT_RDY = 7,
+ CPU_BOOT_ERR_SECURITY_FAIL = 8,
+ CPU_BOOT_ERR_EFUSE_FAIL = 9,
+ CPU_BOOT_ERR_PRI_IMG_VER_FAIL = 10,
+ CPU_BOOT_ERR_SEC_IMG_VER_FAIL = 11,
+ CPU_BOOT_ERR_PLL_FAIL = 12,
+ CPU_BOOT_ERR_DEVICE_UNUSABLE_FAIL = 13,
+ CPU_BOOT_ERR_BOOT_FW_CRIT_ERR = 18,
+ CPU_BOOT_ERR_BINNING_FAIL = 19,
+ CPU_BOOT_ERR_TPM_FAIL = 20,
+ CPU_BOOT_ERR_TMP_THRESH_INIT_FAIL = 21,
+ CPU_BOOT_ERR_EEPROM_FAIL = 22,
+ CPU_BOOT_ERR_ENG_ARC_MEM_SCRUB_FAIL = 23,
+ CPU_BOOT_ERR_ENABLED = 31,
+ CPU_BOOT_ERR_SCND_EN = 63,
+ CPU_BOOT_ERR_LAST = 64 /* we have 2 registers of 32 bits */
+};
+
+/*
+ * Mask for fatal failures
+ * This mask contains all possible fatal failures, and a dynamic code
+ * will clear the non-relevant ones.
+ */
+#define CPU_BOOT_ERR_FATAL_MASK \
+ ((1 << CPU_BOOT_ERR_DRAM_INIT_FAIL) | \
+ (1 << CPU_BOOT_ERR_PLL_FAIL) | \
+ (1 << CPU_BOOT_ERR_BINNING_FAIL) | \
+ (1 << CPU_BOOT_ERR_DRAM_SKIPPED) | \
+ (1 << CPU_BOOT_ERR_ENG_ARC_MEM_SCRUB_FAIL) | \
+ (1 << CPU_BOOT_ERR_EEPROM_FAIL))
+
+/*
+ * CPU error bits in BOOT_ERROR registers
+ *
+ * CPU_BOOT_ERR0_DRAM_INIT_FAIL DRAM initialization failed.
+ * DRAM is not reliable to use.
+ *
+ * CPU_BOOT_ERR0_FIT_CORRUPTED FIT data integrity verification of the
+ * image provided by the host has failed.
+ *
+ * CPU_BOOT_ERR0_TS_INIT_FAIL Thermal Sensor initialization failed.
+ * Boot continues as usual, but keep in
+ * mind this is a warning.
+ *
+ * CPU_BOOT_ERR0_DRAM_SKIPPED DRAM initialization has been skipped.
+ * Skipping DRAM initialization has been
+ * requested (e.g. strap, command, etc.)
+ * and FW skipped the DRAM initialization.
+ * Host can initialize the DRAM.
+ *
+ * CPU_BOOT_ERR0_BMC_WAIT_SKIPPED Waiting for BMC data will be skipped.
+ * Meaning the BMC data might not be
+ * available until reset.
+ *
+ * CPU_BOOT_ERR0_NIC_DATA_NOT_RDY NIC data from BMC is not ready.
+ * BMC has not provided the NIC data yet.
+ * Once provided this bit will be cleared.
+ *
+ * CPU_BOOT_ERR0_NIC_FW_FAIL NIC FW loading failed.
+ * The NIC FW loading and initialization
+ * failed. This means NICs are not usable.
+ *
+ * CPU_BOOT_ERR0_SECURITY_NOT_RDY Chip security initialization has been
+ * started, but is not ready yet - chip
+ * cannot be accessed.
+ *
+ * CPU_BOOT_ERR0_SECURITY_FAIL Security related tasks have failed.
+ * The tasks are security init (root of
+ * trust), boot authentication (chain of
+ * trust), data packets authentication.
+ *
+ * CPU_BOOT_ERR0_EFUSE_FAIL Reading from eFuse failed.
+ * The PCI device ID might be wrong.
+ *
+ * CPU_BOOT_ERR0_PRI_IMG_VER_FAIL Verification of primary image failed.
+ * It mean that ppboot checksum
+ * verification for the preboot primary
+ * image has failed to match expected
+ * checksum. Trying to program image again
+ * might solve this.
+ *
+ * CPU_BOOT_ERR0_SEC_IMG_VER_FAIL Verification of secondary image failed.
+ * It mean that ppboot checksum
+ * verification for the preboot secondary
+ * image has failed to match expected
+ * checksum. Trying to program image again
+ * might solve this.
+ *
+ * CPU_BOOT_ERR0_PLL_FAIL PLL settings failed, meaning that one
+ * of the PLLs remains in REF_CLK
+ *
+ * CPU_BOOT_ERR0_DEVICE_UNUSABLE_FAIL Device is unusable and customer support
+ * should be contacted.
+ *
+ * CPU_BOOT_ERR0_BOOT_FW_CRIT_ERR Critical error was detected during
+ * the execution of ppboot or preboot.
+ * for example: stack overflow.
+ *
+ * CPU_BOOT_ERR0_BINNING_FAIL Binning settings failed, meaning
+ * malfunctioning components might still be
+ * in use.
+ *
+ * CPU_BOOT_ERR0_TPM_FAIL TPM verification flow failed.
+ *
+ * CPU_BOOT_ERR0_TMP_THRESH_INIT_FAIL Failed to set threshold for tmperature
+ * sensor.
+ *
+ * CPU_BOOT_ERR_EEPROM_FAIL Failed reading EEPROM data. Defaults
+ * are used.
+ *
+ * CPU_BOOT_ERR_ENG_ARC_MEM_SCRUB_FAIL Failed scrubbing the Engines/ARCFarm
+ * memories. Boot disabled until reset.
+ *
+ * CPU_BOOT_ERR0_ENABLED Error registers enabled.
+ * This is a main indication that the
+ * running FW populates the error
+ * registers. Meaning the error bits are
+ * not garbage, but actual error statuses.
+ */
+#define CPU_BOOT_ERR0_DRAM_INIT_FAIL (1 << CPU_BOOT_ERR_DRAM_INIT_FAIL)
+#define CPU_BOOT_ERR0_FIT_CORRUPTED (1 << CPU_BOOT_ERR_FIT_CORRUPTED)
+#define CPU_BOOT_ERR0_TS_INIT_FAIL (1 << CPU_BOOT_ERR_TS_INIT_FAIL)
+#define CPU_BOOT_ERR0_DRAM_SKIPPED (1 << CPU_BOOT_ERR_DRAM_SKIPPED)
+#define CPU_BOOT_ERR0_BMC_WAIT_SKIPPED (1 << CPU_BOOT_ERR_BMC_WAIT_SKIPPED)
+#define CPU_BOOT_ERR0_NIC_DATA_NOT_RDY (1 << CPU_BOOT_ERR_NIC_DATA_NOT_RDY)
+#define CPU_BOOT_ERR0_NIC_FW_FAIL (1 << CPU_BOOT_ERR_NIC_FW_FAIL)
+#define CPU_BOOT_ERR0_SECURITY_NOT_RDY (1 << CPU_BOOT_ERR_SECURITY_NOT_RDY)
+#define CPU_BOOT_ERR0_SECURITY_FAIL (1 << CPU_BOOT_ERR_SECURITY_FAIL)
+#define CPU_BOOT_ERR0_EFUSE_FAIL (1 << CPU_BOOT_ERR_EFUSE_FAIL)
+#define CPU_BOOT_ERR0_PRI_IMG_VER_FAIL (1 << CPU_BOOT_ERR_PRI_IMG_VER_FAIL)
+#define CPU_BOOT_ERR0_SEC_IMG_VER_FAIL (1 << CPU_BOOT_ERR_SEC_IMG_VER_FAIL)
+#define CPU_BOOT_ERR0_PLL_FAIL (1 << CPU_BOOT_ERR_PLL_FAIL)
+#define CPU_BOOT_ERR0_DEVICE_UNUSABLE_FAIL (1 << CPU_BOOT_ERR_DEVICE_UNUSABLE_FAIL)
+#define CPU_BOOT_ERR0_BOOT_FW_CRIT_ERR (1 << CPU_BOOT_ERR_BOOT_FW_CRIT_ERR)
+#define CPU_BOOT_ERR0_BINNING_FAIL (1 << CPU_BOOT_ERR_BINNING_FAIL)
+#define CPU_BOOT_ERR0_TPM_FAIL (1 << CPU_BOOT_ERR_TPM_FAIL)
+#define CPU_BOOT_ERR0_TMP_THRESH_INIT_FAIL (1 << CPU_BOOT_ERR_TMP_THRESH_INIT_FAIL)
+#define CPU_BOOT_ERR0_EEPROM_FAIL (1 << CPU_BOOT_ERR_EEPROM_FAIL)
+#define CPU_BOOT_ERR0_ENG_ARC_MEM_SCRUB_FAIL (1 << CPU_BOOT_ERR_ENG_ARC_MEM_SCRUB_FAIL)
+#define CPU_BOOT_ERR0_ENABLED (1 << CPU_BOOT_ERR_ENABLED)
+#define CPU_BOOT_ERR1_ENABLED (1 << CPU_BOOT_ERR_ENABLED)
+
+enum cpu_boot_dev_sts {
+ CPU_BOOT_DEV_STS_SECURITY_EN = 0,
+ CPU_BOOT_DEV_STS_DEBUG_EN = 1,
+ CPU_BOOT_DEV_STS_WATCHDOG_EN = 2,
+ CPU_BOOT_DEV_STS_DRAM_INIT_EN = 3,
+ CPU_BOOT_DEV_STS_BMC_WAIT_EN = 4,
+ CPU_BOOT_DEV_STS_E2E_CRED_EN = 5,
+ CPU_BOOT_DEV_STS_HBM_CRED_EN = 6,
+ CPU_BOOT_DEV_STS_RL_EN = 7,
+ CPU_BOOT_DEV_STS_SRAM_SCR_EN = 8,
+ CPU_BOOT_DEV_STS_DRAM_SCR_EN = 9,
+ CPU_BOOT_DEV_STS_FW_HARD_RST_EN = 10,
+ CPU_BOOT_DEV_STS_PLL_INFO_EN = 11,
+ CPU_BOOT_DEV_STS_SP_SRAM_EN = 12,
+ CPU_BOOT_DEV_STS_CLK_GATE_EN = 13,
+ CPU_BOOT_DEV_STS_HBM_ECC_EN = 14,
+ CPU_BOOT_DEV_STS_PKT_PI_ACK_EN = 15,
+ CPU_BOOT_DEV_STS_FW_LD_COM_EN = 16,
+ CPU_BOOT_DEV_STS_FW_IATU_CONF_EN = 17,
+ CPU_BOOT_DEV_STS_FW_NIC_MAC_EN = 18,
+ CPU_BOOT_DEV_STS_DYN_PLL_EN = 19,
+ CPU_BOOT_DEV_STS_GIC_PRIVILEGED_EN = 20,
+ CPU_BOOT_DEV_STS_EQ_INDEX_EN = 21,
+ CPU_BOOT_DEV_STS_MULTI_IRQ_POLL_EN = 22,
+ CPU_BOOT_DEV_STS_FW_NIC_STAT_XPCS91_EN = 23,
+ CPU_BOOT_DEV_STS_FW_NIC_STAT_EXT_EN = 24,
+ CPU_BOOT_DEV_STS_IS_IDLE_CHECK_EN = 25,
+ CPU_BOOT_DEV_STS_MAP_HWMON_EN = 26,
+ CPU_BOOT_DEV_STS_NIC_MEM_CLEAR_EN = 27,
+ CPU_BOOT_DEV_STS_MMU_PGTBL_DRAM_EN = 28,
+ CPU_BOOT_DEV_STS_ENABLED = 31,
+ CPU_BOOT_DEV_STS_SCND_EN = 63,
+ CPU_BOOT_DEV_STS_LAST = 64 /* we have 2 registers of 32 bits */
+};
+
+/*
+ * BOOT DEVICE STATUS bits in BOOT_DEVICE_STS registers
+ *
+ * CPU_BOOT_DEV_STS0_SECURITY_EN Security is Enabled.
+ * This is an indication for security
+ * enabled in FW, which means that
+ * all conditions for security are met:
+ * device is indicated as security enabled,
+ * registers are protected, and device
+ * uses keys for image verification.
+ * Initialized in: preboot
+ *
+ * CPU_BOOT_DEV_STS0_DEBUG_EN Debug is enabled.
+ * Enabled when JTAG or DEBUG is enabled
+ * in FW.
+ * Initialized in: preboot
+ *
+ * CPU_BOOT_DEV_STS0_WATCHDOG_EN Watchdog is enabled.
+ * Watchdog is enabled in FW.
+ * Initialized in: preboot
+ *
+ * CPU_BOOT_DEV_STS0_DRAM_INIT_EN DRAM initialization is enabled.
+ * DRAM initialization has been done in FW.
+ * Initialized in: u-boot
+ *
+ * CPU_BOOT_DEV_STS0_BMC_WAIT_EN Waiting for BMC data enabled.
+ * If set, it means that during boot,
+ * FW waited for BMC data.
+ * Initialized in: u-boot
+ *
+ * CPU_BOOT_DEV_STS0_E2E_CRED_EN E2E credits initialized.
+ * FW initialized E2E credits.
+ * Initialized in: u-boot
+ *
+ * CPU_BOOT_DEV_STS0_HBM_CRED_EN HBM credits initialized.
+ * FW initialized HBM credits.
+ * Initialized in: u-boot
+ *
+ * CPU_BOOT_DEV_STS0_RL_EN Rate limiter initialized.
+ * FW initialized rate limiter.
+ * Initialized in: u-boot
+ *
+ * CPU_BOOT_DEV_STS0_SRAM_SCR_EN SRAM scrambler enabled.
+ * FW initialized SRAM scrambler.
+ * Initialized in: linux
+ *
+ * CPU_BOOT_DEV_STS0_DRAM_SCR_EN DRAM scrambler enabled.
+ * FW initialized DRAM scrambler.
+ * Initialized in: u-boot
+ *
+ * CPU_BOOT_DEV_STS0_FW_HARD_RST_EN FW hard reset procedure is enabled.
+ * FW has the hard reset procedure
+ * implemented. This means that FW will
+ * perform hard reset procedure on
+ * receiving the halt-machine event.
+ * Initialized in: preboot, u-boot, linux
+ *
+ * CPU_BOOT_DEV_STS0_PLL_INFO_EN FW retrieval of PLL info is enabled.
+ * Initialized in: linux
+ *
+ * CPU_BOOT_DEV_STS0_SP_SRAM_EN SP SRAM is initialized and available
+ * for use.
+ * Initialized in: preboot
+ *
+ * CPU_BOOT_DEV_STS0_CLK_GATE_EN Clock Gating enabled.
+ * FW initialized Clock Gating.
+ * Initialized in: preboot
+ *
+ * CPU_BOOT_DEV_STS0_HBM_ECC_EN HBM ECC handling Enabled.
+ * FW handles HBM ECC indications.
+ * Initialized in: linux
+ *
+ * CPU_BOOT_DEV_STS0_PKT_PI_ACK_EN Packets ack value used in the armcpd
+ * is set to the PI counter.
+ * Initialized in: linux
+ *
+ * CPU_BOOT_DEV_STS0_FW_LD_COM_EN Flexible FW loading communication
+ * protocol is enabled.
+ * Initialized in: preboot
+ *
+ * CPU_BOOT_DEV_STS0_FW_IATU_CONF_EN FW iATU configuration is enabled.
+ * This bit if set, means the iATU has been
+ * configured and is ready for use.
+ * Initialized in: ppboot
+ *
+ * CPU_BOOT_DEV_STS0_FW_NIC_MAC_EN NIC MAC channels init is done by FW and
+ * any access to them is done via the FW.
+ * Initialized in: linux
+ *
+ * CPU_BOOT_DEV_STS0_DYN_PLL_EN Dynamic PLL configuration is enabled.
+ * FW sends to host a bitmap of supported
+ * PLLs.
+ * Initialized in: linux
+ *
+ * CPU_BOOT_DEV_STS0_GIC_PRIVILEGED_EN GIC access permission only from
+ * privileged entity. FW sets this status
+ * bit for host. If this bit is set then
+ * GIC can not be accessed from host.
+ * Initialized in: linux
+ *
+ * CPU_BOOT_DEV_STS0_EQ_INDEX_EN Event Queue (EQ) index is a running
+ * index for each new event sent to host.
+ * This is used as a method in host to
+ * identify that the waiting event in
+ * queue is actually a new event which
+ * was not served before.
+ * Initialized in: linux
+ *
+ * CPU_BOOT_DEV_STS0_MULTI_IRQ_POLL_EN Use multiple scratchpad interfaces to
+ * prevent IRQs overriding each other.
+ * Initialized in: linux
+ *
+ * CPU_BOOT_DEV_STS0_FW_NIC_STAT_XPCS91_EN
+ * NIC STAT and XPCS91 access is restricted
+ * and is done via FW only.
+ * Initialized in: linux
+ *
+ * CPU_BOOT_DEV_STS0_FW_NIC_STAT_EXT_EN
+ * NIC STAT get all is supported.
+ * Initialized in: linux
+ *
+ * CPU_BOOT_DEV_STS0_IS_IDLE_CHECK_EN
+ * F/W checks if the device is idle by reading defined set
+ * of registers. It returns a bitmask of all the engines,
+ * where a bit is set if the engine is not idle.
+ * Initialized in: linux
+ *
+ * CPU_BOOT_DEV_STS0_MAP_HWMON_EN
+ * If set, means f/w supports proprietary
+ * HWMON enum mapping to cpucp enums.
+ * Initialized in: linux
+ *
+ * CPU_BOOT_DEV_STS0_NIC_MEM_CLEAR_EN
+ * If set, means f/w supports nic hbm memory clear and
+ * tmr,txs hbm memory init.
+ * Initialized in: zephyr-mgmt
+ *
+ * CPU_BOOT_DEV_STS_MMU_PGTBL_DRAM_EN
+ * MMU page tables are located in DRAM.
+ * F/W initializes security settings for MMU
+ * page tables to reside in DRAM.
+ * Initialized in: zephyr-mgmt
+ *
+ * CPU_BOOT_DEV_STS0_ENABLED Device status register enabled.
+ * This is a main indication that the
+ * running FW populates the device status
+ * register. Meaning the device status
+ * bits are not garbage, but actual
+ * statuses.
+ * Initialized in: preboot
+ *
+ */
+#define CPU_BOOT_DEV_STS0_SECURITY_EN (1 << CPU_BOOT_DEV_STS_SECURITY_EN)
+#define CPU_BOOT_DEV_STS0_DEBUG_EN (1 << CPU_BOOT_DEV_STS_DEBUG_EN)
+#define CPU_BOOT_DEV_STS0_WATCHDOG_EN (1 << CPU_BOOT_DEV_STS_WATCHDOG_EN)
+#define CPU_BOOT_DEV_STS0_DRAM_INIT_EN (1 << CPU_BOOT_DEV_STS_DRAM_INIT_EN)
+#define CPU_BOOT_DEV_STS0_BMC_WAIT_EN (1 << CPU_BOOT_DEV_STS_BMC_WAIT_EN)
+#define CPU_BOOT_DEV_STS0_E2E_CRED_EN (1 << CPU_BOOT_DEV_STS_E2E_CRED_EN)
+#define CPU_BOOT_DEV_STS0_HBM_CRED_EN (1 << CPU_BOOT_DEV_STS_HBM_CRED_EN)
+#define CPU_BOOT_DEV_STS0_RL_EN (1 << CPU_BOOT_DEV_STS_RL_EN)
+#define CPU_BOOT_DEV_STS0_SRAM_SCR_EN (1 << CPU_BOOT_DEV_STS_SRAM_SCR_EN)
+#define CPU_BOOT_DEV_STS0_DRAM_SCR_EN (1 << CPU_BOOT_DEV_STS_DRAM_SCR_EN)
+#define CPU_BOOT_DEV_STS0_FW_HARD_RST_EN (1 << CPU_BOOT_DEV_STS_FW_HARD_RST_EN)
+#define CPU_BOOT_DEV_STS0_PLL_INFO_EN (1 << CPU_BOOT_DEV_STS_PLL_INFO_EN)
+#define CPU_BOOT_DEV_STS0_SP_SRAM_EN (1 << CPU_BOOT_DEV_STS_SP_SRAM_EN)
+#define CPU_BOOT_DEV_STS0_CLK_GATE_EN (1 << CPU_BOOT_DEV_STS_CLK_GATE_EN)
+#define CPU_BOOT_DEV_STS0_HBM_ECC_EN (1 << CPU_BOOT_DEV_STS_HBM_ECC_EN)
+#define CPU_BOOT_DEV_STS0_PKT_PI_ACK_EN (1 << CPU_BOOT_DEV_STS_PKT_PI_ACK_EN)
+#define CPU_BOOT_DEV_STS0_FW_LD_COM_EN (1 << CPU_BOOT_DEV_STS_FW_LD_COM_EN)
+#define CPU_BOOT_DEV_STS0_FW_IATU_CONF_EN (1 << CPU_BOOT_DEV_STS_FW_IATU_CONF_EN)
+#define CPU_BOOT_DEV_STS0_FW_NIC_MAC_EN (1 << CPU_BOOT_DEV_STS_FW_NIC_MAC_EN)
+#define CPU_BOOT_DEV_STS0_DYN_PLL_EN (1 << CPU_BOOT_DEV_STS_DYN_PLL_EN)
+#define CPU_BOOT_DEV_STS0_GIC_PRIVILEGED_EN (1 << CPU_BOOT_DEV_STS_GIC_PRIVILEGED_EN)
+#define CPU_BOOT_DEV_STS0_EQ_INDEX_EN (1 << CPU_BOOT_DEV_STS_EQ_INDEX_EN)
+#define CPU_BOOT_DEV_STS0_MULTI_IRQ_POLL_EN (1 << CPU_BOOT_DEV_STS_MULTI_IRQ_POLL_EN)
+#define CPU_BOOT_DEV_STS0_FW_NIC_STAT_XPCS91_EN (1 << CPU_BOOT_DEV_STS_FW_NIC_STAT_XPCS91_EN)
+#define CPU_BOOT_DEV_STS0_FW_NIC_STAT_EXT_EN (1 << CPU_BOOT_DEV_STS_FW_NIC_STAT_EXT_EN)
+#define CPU_BOOT_DEV_STS0_IS_IDLE_CHECK_EN (1 << CPU_BOOT_DEV_STS_IS_IDLE_CHECK_EN)
+#define CPU_BOOT_DEV_STS0_MAP_HWMON_EN (1 << CPU_BOOT_DEV_STS_MAP_HWMON_EN)
+#define CPU_BOOT_DEV_STS0_NIC_MEM_CLEAR_EN (1 << CPU_BOOT_DEV_STS_NIC_MEM_CLEAR_EN)
+#define CPU_BOOT_DEV_STS0_MMU_PGTBL_DRAM_EN (1 << CPU_BOOT_DEV_STS_MMU_PGTBL_DRAM_EN)
+#define CPU_BOOT_DEV_STS0_ENABLED (1 << CPU_BOOT_DEV_STS_ENABLED)
+#define CPU_BOOT_DEV_STS1_ENABLED (1 << CPU_BOOT_DEV_STS_ENABLED)
+
+enum cpu_boot_status {
+ CPU_BOOT_STATUS_NA = 0, /* Default value after reset of chip */
+ CPU_BOOT_STATUS_IN_WFE = 1,
+ CPU_BOOT_STATUS_DRAM_RDY = 2,
+ CPU_BOOT_STATUS_SRAM_AVAIL = 3,
+ CPU_BOOT_STATUS_IN_BTL = 4, /* BTL is H/W FSM */
+ CPU_BOOT_STATUS_IN_PREBOOT = 5,
+ CPU_BOOT_STATUS_IN_SPL, /* deprecated - not reported */
+ CPU_BOOT_STATUS_IN_UBOOT = 7,
+ CPU_BOOT_STATUS_DRAM_INIT_FAIL, /* deprecated - will be removed */
+ CPU_BOOT_STATUS_FIT_CORRUPTED, /* deprecated - will be removed */
+ /* U-Boot console prompt activated, commands are not processed */
+ CPU_BOOT_STATUS_UBOOT_NOT_READY = 10,
+ /* Finished NICs init, reported after DRAM and NICs */
+ CPU_BOOT_STATUS_NIC_FW_RDY = 11,
+ CPU_BOOT_STATUS_TS_INIT_FAIL, /* deprecated - will be removed */
+ CPU_BOOT_STATUS_DRAM_SKIPPED, /* deprecated - will be removed */
+ CPU_BOOT_STATUS_BMC_WAITING_SKIPPED, /* deprecated - will be removed */
+ /* Last boot loader progress status, ready to receive commands */
+ CPU_BOOT_STATUS_READY_TO_BOOT = 15,
+ /* Internal Boot finished, ready for boot-fit */
+ CPU_BOOT_STATUS_WAITING_FOR_BOOT_FIT = 16,
+ /* Internal Security has been initialized, device can be accessed */
+ CPU_BOOT_STATUS_SECURITY_READY = 17,
+ /* FW component is preparing to shutdown and communication with host is not available */
+ CPU_BOOT_STATUS_FW_SHUTDOWN_PREP = 18,
+};
+
+enum kmd_msg {
+ KMD_MSG_NA = 0,
+ KMD_MSG_GOTO_WFE,
+ KMD_MSG_FIT_RDY,
+ KMD_MSG_SKIP_BMC,
+ RESERVED,
+ KMD_MSG_RST_DEV,
+ KMD_MSG_LAST
+};
+
+enum cpu_msg_status {
+ CPU_MSG_CLR = 0,
+ CPU_MSG_OK,
+ CPU_MSG_ERR,
+};
+
+/* communication registers mapping - consider ABI when changing */
+struct cpu_dyn_regs {
+ __le32 cpu_pq_base_addr_low;
+ __le32 cpu_pq_base_addr_high;
+ __le32 cpu_pq_length;
+ __le32 cpu_pq_init_status;
+ __le32 cpu_eq_base_addr_low;
+ __le32 cpu_eq_base_addr_high;
+ __le32 cpu_eq_length;
+ __le32 cpu_eq_ci;
+ __le32 cpu_cq_base_addr_low;
+ __le32 cpu_cq_base_addr_high;
+ __le32 cpu_cq_length;
+ __le32 cpu_pf_pq_pi;
+ __le32 cpu_boot_dev_sts0;
+ __le32 cpu_boot_dev_sts1;
+ __le32 cpu_boot_err0;
+ __le32 cpu_boot_err1;
+ __le32 cpu_boot_status;
+ __le32 fw_upd_sts;
+ __le32 fw_upd_cmd;
+ __le32 fw_upd_pending_sts;
+ __le32 fuse_ver_offset;
+ __le32 preboot_ver_offset;
+ __le32 uboot_ver_offset;
+ __le32 hw_state;
+ __le32 kmd_msg_to_cpu;
+ __le32 cpu_cmd_status_to_host;
+ __le32 gic_host_pi_upd_irq;
+ __le32 gic_tpc_qm_irq_ctrl;
+ __le32 gic_mme_qm_irq_ctrl;
+ __le32 gic_dma_qm_irq_ctrl;
+ __le32 gic_nic_qm_irq_ctrl;
+ __le32 gic_dma_core_irq_ctrl;
+ __le32 gic_host_halt_irq;
+ __le32 gic_host_ints_irq;
+ __le32 reserved0;
+ __le32 gic_rot_qm_irq_ctrl;
+ __le32 reserved1;
+ __le32 eng_arc_irq_ctrl;
+ __le32 reserved2[20]; /* reserve for future use */
+};
+
+/* TODO: remove the desc magic after the code is updated to use message */
+/* HCDM - Habana Communications Descriptor Magic */
+#define HL_COMMS_DESC_MAGIC 0x4843444D
+#define HL_COMMS_DESC_VER 3
+
+/* HCMv - Habana Communications Message + header version */
+#define HL_COMMS_MSG_MAGIC_VALUE 0x48434D00
+#define HL_COMMS_MSG_MAGIC_MASK 0xFFFFFF00
+#define HL_COMMS_MSG_MAGIC_VER_MASK 0xFF
+
+#define HL_COMMS_MSG_MAGIC_VER(ver) (HL_COMMS_MSG_MAGIC_VALUE | \
+ ((ver) & HL_COMMS_MSG_MAGIC_VER_MASK))
+#define HL_COMMS_MSG_MAGIC_V0 HL_COMMS_DESC_MAGIC
+#define HL_COMMS_MSG_MAGIC_V1 HL_COMMS_MSG_MAGIC_VER(1)
+#define HL_COMMS_MSG_MAGIC_V2 HL_COMMS_MSG_MAGIC_VER(2)
+#define HL_COMMS_MSG_MAGIC_V3 HL_COMMS_MSG_MAGIC_VER(3)
+
+#define HL_COMMS_MSG_MAGIC HL_COMMS_MSG_MAGIC_V3
+
+#define HL_COMMS_MSG_MAGIC_VALIDATE_MAGIC(magic) \
+ (((magic) & HL_COMMS_MSG_MAGIC_MASK) == \
+ HL_COMMS_MSG_MAGIC_VALUE)
+
+#define HL_COMMS_MSG_MAGIC_VALIDATE_VERSION(magic, ver) \
+ (((magic) & HL_COMMS_MSG_MAGIC_VER_MASK) >= \
+ ((ver) & HL_COMMS_MSG_MAGIC_VER_MASK))
+
+#define HL_COMMS_MSG_MAGIC_VALIDATE(magic, ver) \
+ (HL_COMMS_MSG_MAGIC_VALIDATE_MAGIC((magic)) && \
+ HL_COMMS_MSG_MAGIC_VALIDATE_VERSION((magic), (ver)))
+
+enum comms_msg_type {
+ HL_COMMS_DESC_TYPE = 0,
+ HL_COMMS_RESET_CAUSE_TYPE = 1,
+ HL_COMMS_FW_CFG_SKIP_TYPE = 2,
+ HL_COMMS_BINNING_CONF_TYPE = 3,
+};
+
+/*
+ * Binning information shared between LKD and FW
+ * @tpc_mask_l - TPC binning information lower 64 bit
+ * @dec_mask - Decoder binning information
+ * @dram_mask - DRAM binning information
+ * @edma_mask - EDMA binning information
+ * @mme_mask_l - MME binning information lower 32
+ * @mme_mask_h - MME binning information upper 32
+ * @rot_mask - Rotator binning information
+ * @xbar_mask - xBAR binning information
+ * @reserved - reserved field for future binning info w/o ABI change
+ * @tpc_mask_h - TPC binning information upper 64 bit
+ * @nic_mask - NIC binning information
+ */
+struct lkd_fw_binning_info {
+ __le64 tpc_mask_l;
+ __le32 dec_mask;
+ __le32 dram_mask;
+ __le32 edma_mask;
+ __le32 mme_mask_l;
+ __le32 mme_mask_h;
+ __le32 rot_mask;
+ __le32 xbar_mask;
+ __le32 reserved0;
+ __le64 tpc_mask_h;
+ __le64 nic_mask;
+ __le32 reserved1[8];
+};
+
+/* TODO: remove this struct after the code is updated to use message */
+/* this is the comms descriptor header - meta data */
+struct comms_desc_header {
+ __le32 magic; /* magic for validation */
+ __le32 crc32; /* CRC32 of the descriptor w/o header */
+ __le16 size; /* size of the descriptor w/o header */
+ __u8 version; /* descriptor version */
+ __u8 reserved[5]; /* pad to 64 bit */
+};
+
+/* this is the comms message header - meta data */
+struct comms_msg_header {
+ __le32 magic; /* magic for validation */
+ __le32 crc32; /* CRC32 of the message w/o header */
+ __le16 size; /* size of the message w/o header */
+ __u8 version; /* message payload version */
+ __u8 type; /* message type */
+ __u8 reserved[4]; /* pad to 64 bit */
+};
+
+enum lkd_fw_ascii_msg_lvls {
+ LKD_FW_ASCII_MSG_ERR = 0,
+ LKD_FW_ASCII_MSG_WRN = 1,
+ LKD_FW_ASCII_MSG_INF = 2,
+ LKD_FW_ASCII_MSG_DBG = 3,
+};
+
+#define LKD_FW_ASCII_MSG_MAX_LEN 128
+#define LKD_FW_ASCII_MSG_MAX 4 /* consider ABI when changing */
+#define LKD_FW_ASCII_MSG_MIN_DESC_VERSION 3
+
+struct lkd_fw_ascii_msg {
+ __u8 valid;
+ __u8 msg_lvl;
+ __u8 reserved[6];
+ char msg[LKD_FW_ASCII_MSG_MAX_LEN];
+};
+
+/* this is the main FW descriptor - consider ABI when changing */
+struct lkd_fw_comms_desc {
+ struct comms_desc_header header;
+ struct cpu_dyn_regs cpu_dyn_regs;
+ char fuse_ver[VERSION_MAX_LEN];
+ char cur_fw_ver[VERSION_MAX_LEN];
+ /* can be used for 1 more version w/o ABI change */
+ char reserved0[VERSION_MAX_LEN];
+ __le64 img_addr; /* address for next FW component load */
+ struct lkd_fw_binning_info binning_info;
+ struct lkd_fw_ascii_msg ascii_msg[LKD_FW_ASCII_MSG_MAX];
+ __le32 rsvd_mem_size_mb; /* reserved memory size [MB] for FW/SVE */
+ char reserved1[4];
+};
+
+enum comms_reset_cause {
+ HL_RESET_CAUSE_UNKNOWN = 0,
+ HL_RESET_CAUSE_HEARTBEAT = 1,
+ HL_RESET_CAUSE_TDR = 2,
+};
+
+/* TODO: remove define after struct name is aligned on all projects */
+#define lkd_msg_comms lkd_fw_comms_msg
+
+/* this is the comms message descriptor */
+struct lkd_fw_comms_msg {
+ struct comms_msg_header header;
+ /* union for future expantions of new messages */
+ union {
+ struct {
+ struct cpu_dyn_regs cpu_dyn_regs;
+ char fuse_ver[VERSION_MAX_LEN];
+ char cur_fw_ver[VERSION_MAX_LEN];
+ /* can be used for 1 more version w/o ABI change */
+ char reserved0[VERSION_MAX_LEN];
+ /* address for next FW component load */
+ __le64 img_addr;
+ struct lkd_fw_binning_info binning_info;
+ struct lkd_fw_ascii_msg ascii_msg[LKD_FW_ASCII_MSG_MAX];
+ /* reserved memory size [MB] for FW/SVE */
+ __le32 rsvd_mem_size_mb;
+ char reserved1[4];
+ };
+ struct {
+ __u8 reset_cause;
+ };
+ struct {
+ __u8 fw_cfg_skip; /* 1 - skip, 0 - don't skip */
+ };
+ struct lkd_fw_binning_info binning_conf;
+ };
+};
+
+/*
+ * LKD commands:
+ *
+ * COMMS_NOOP Used to clear the command register and no actual
+ * command is send.
+ *
+ * COMMS_CLR_STS Clear status command - FW should clear the
+ * status register. Used for synchronization
+ * between the commands as part of the race free
+ * protocol.
+ *
+ * COMMS_RST_STATE Reset the current communication state which is
+ * kept by FW for proper responses.
+ * Should be used in the beginning of the
+ * communication cycle to clean any leftovers from
+ * previous communication attempts.
+ *
+ * COMMS_PREP_DESC Prepare descriptor for setting up the
+ * communication and other dynamic data:
+ * struct lkd_fw_comms_desc.
+ * This command has a parameter stating the next FW
+ * component size, so the FW can actually prepare a
+ * space for it and in the status response provide
+ * the descriptor offset. The Offset of the next FW
+ * data component is a part of the descriptor
+ * structure.
+ *
+ * COMMS_DATA_RDY The FW data has been uploaded and is ready for
+ * validation.
+ *
+ * COMMS_EXEC Execute the next FW component.
+ *
+ * COMMS_RST_DEV Reset the device.
+ *
+ * COMMS_GOTO_WFE Execute WFE command. Allowed only on non-secure
+ * devices.
+ *
+ * COMMS_SKIP_BMC Perform actions required for BMC-less servers.
+ * Do not wait for BMC response.
+ *
+ * COMMS_PREP_DESC_ELBI Same as COMMS_PREP_DESC only that the memory
+ * space is allocated in a ELBI access only
+ * address range.
+ *
+ */
+enum comms_cmd {
+ COMMS_NOOP = 0,
+ COMMS_CLR_STS = 1,
+ COMMS_RST_STATE = 2,
+ COMMS_PREP_DESC = 3,
+ COMMS_DATA_RDY = 4,
+ COMMS_EXEC = 5,
+ COMMS_RST_DEV = 6,
+ COMMS_GOTO_WFE = 7,
+ COMMS_SKIP_BMC = 8,
+ COMMS_PREP_DESC_ELBI = 10,
+ COMMS_INVLD_LAST
+};
+
+#define COMMS_COMMAND_SIZE_SHIFT 0
+#define COMMS_COMMAND_SIZE_MASK 0x1FFFFFF
+#define COMMS_COMMAND_CMD_SHIFT 27
+#define COMMS_COMMAND_CMD_MASK 0xF8000000
+
+/*
+ * LKD command to FW register structure
+ * @size - FW component size
+ * @cmd - command from enum comms_cmd
+ */
+struct comms_command {
+ union { /* bit fields are only for FW use */
+ struct {
+ u32 size :25; /* 32MB max. */
+ u32 reserved :2;
+ enum comms_cmd cmd :5; /* 32 commands */
+ };
+ __le32 val;
+ };
+};
+
+/*
+ * FW status
+ *
+ * COMMS_STS_NOOP Used to clear the status register and no actual
+ * status is provided.
+ *
+ * COMMS_STS_ACK Command has been received and recognized.
+ *
+ * COMMS_STS_OK Command execution has finished successfully.
+ *
+ * COMMS_STS_ERR Command execution was unsuccessful and resulted
+ * in error.
+ *
+ * COMMS_STS_VALID_ERR FW validation has failed.
+ *
+ * COMMS_STS_TIMEOUT_ERR Command execution has timed out.
+ */
+enum comms_sts {
+ COMMS_STS_NOOP = 0,
+ COMMS_STS_ACK = 1,
+ COMMS_STS_OK = 2,
+ COMMS_STS_ERR = 3,
+ COMMS_STS_VALID_ERR = 4,
+ COMMS_STS_TIMEOUT_ERR = 5,
+ COMMS_STS_INVLD_LAST
+};
+
+/* RAM types for FW components loading - defines the base address */
+enum comms_ram_types {
+ COMMS_SRAM = 0,
+ COMMS_DRAM = 1,
+};
+
+#define COMMS_STATUS_OFFSET_SHIFT 0
+#define COMMS_STATUS_OFFSET_MASK 0x03FFFFFF
+#define COMMS_STATUS_OFFSET_ALIGN_SHIFT 2
+#define COMMS_STATUS_RAM_TYPE_SHIFT 26
+#define COMMS_STATUS_RAM_TYPE_MASK 0x0C000000
+#define COMMS_STATUS_STATUS_SHIFT 28
+#define COMMS_STATUS_STATUS_MASK 0xF0000000
+
+/*
+ * FW status to LKD register structure
+ * @offset - an offset from the base of the ram_type shifted right by
+ * 2 bits (always aligned to 32 bits).
+ * Allows a maximum addressable offset of 256MB from RAM base.
+ * Example: for real offset in RAM of 0x800000 (8MB), the value
+ * in offset field is (0x800000 >> 2) = 0x200000.
+ * @ram_type - the RAM type that should be used for offset from
+ * enum comms_ram_types
+ * @status - status from enum comms_sts
+ */
+struct comms_status {
+ union { /* bit fields are only for FW use */
+ struct {
+ u32 offset :26;
+ enum comms_ram_types ram_type :2;
+ enum comms_sts status :4; /* 16 statuses */
+ };
+ __le32 val;
+ };
+};
+
+#define NAME_MAX_LEN 32 /* bytes */
+struct hl_module_data {
+ __u8 name[NAME_MAX_LEN];
+ __u8 version[VERSION_MAX_LEN];
+};
+
+/**
+ * struct hl_component_versions - versions associated with hl component.
+ * @struct_size: size of all the struct (including dynamic size of modules).
+ * @modules_offset: offset of the modules field in this struct.
+ * @component: version of the component itself.
+ * @fw_os: Firmware OS Version.
+ * @comp_name: Name of the component.
+ * @modules_counter: number of set bits in modules_mask.
+ * @reserved: reserved for future use.
+ * @modules: versions of the component's modules. Elborated explanation in
+ * struct cpucp_versions.
+ */
+struct hl_component_versions {
+ __le16 struct_size;
+ __le16 modules_offset;
+ __u8 component[VERSION_MAX_LEN];
+ __u8 fw_os[VERSION_MAX_LEN];
+ __u8 comp_name[NAME_MAX_LEN];
+ __u8 modules_counter;
+ __u8 reserved[3];
+ struct hl_module_data modules[];
+};
+
+/* Max size of fit size */
+#define HL_FW_VERSIONS_FIT_SIZE 4096
+
+#endif /* HL_BOOT_IF_H */
diff --git a/include/linux/hardirq.h b/include/linux/hardirq.h
index c683996110b1..d57cab4d4c06 100644
--- a/include/linux/hardirq.h
+++ b/include/linux/hardirq.h
@@ -1,31 +1,30 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef LINUX_HARDIRQ_H
#define LINUX_HARDIRQ_H
+#include <linux/context_tracking_state.h>
#include <linux/preempt.h>
#include <linux/lockdep.h>
#include <linux/ftrace_irq.h>
+#include <linux/sched.h>
#include <linux/vtime.h>
#include <asm/hardirq.h>
-
extern void synchronize_irq(unsigned int irq);
extern bool synchronize_hardirq(unsigned int irq);
-#if defined(CONFIG_TINY_RCU)
-
-static inline void rcu_nmi_enter(void)
-{
-}
+#ifdef CONFIG_NO_HZ_FULL
+void __rcu_irq_enter_check_tick(void);
+#else
+static inline void __rcu_irq_enter_check_tick(void) { }
+#endif
-static inline void rcu_nmi_exit(void)
+static __always_inline void rcu_irq_enter_check_tick(void)
{
+ if (context_tracking_enabled())
+ __rcu_irq_enter_check_tick();
}
-#else
-extern void rcu_nmi_enter(void);
-extern void rcu_nmi_exit(void);
-#endif
-
/*
* It is safe to do non-atomic ops on ->hardirq_context,
* because NMI handlers may not preempt and the ops are
@@ -34,51 +33,111 @@ extern void rcu_nmi_exit(void);
*/
#define __irq_enter() \
do { \
- account_irq_enter_time(current); \
preempt_count_add(HARDIRQ_OFFSET); \
- trace_hardirq_enter(); \
+ lockdep_hardirq_enter(); \
+ account_hardirq_enter(current); \
+ } while (0)
+
+/*
+ * Like __irq_enter() without time accounting for fast
+ * interrupts, e.g. reschedule IPI where time accounting
+ * is more expensive than the actual interrupt.
+ */
+#define __irq_enter_raw() \
+ do { \
+ preempt_count_add(HARDIRQ_OFFSET); \
+ lockdep_hardirq_enter(); \
} while (0)
/*
* Enter irq context (on NO_HZ, update jiffies):
*/
-extern void irq_enter(void);
+void irq_enter(void);
+/*
+ * Like irq_enter(), but RCU is already watching.
+ */
+void irq_enter_rcu(void);
/*
* Exit irq context without processing softirqs:
*/
#define __irq_exit() \
do { \
- trace_hardirq_exit(); \
- account_irq_exit_time(current); \
+ account_hardirq_exit(current); \
+ lockdep_hardirq_exit(); \
+ preempt_count_sub(HARDIRQ_OFFSET); \
+ } while (0)
+
+/*
+ * Like __irq_exit() without time accounting
+ */
+#define __irq_exit_raw() \
+ do { \
+ lockdep_hardirq_exit(); \
preempt_count_sub(HARDIRQ_OFFSET); \
} while (0)
/*
* Exit irq context and process softirqs if needed:
*/
-extern void irq_exit(void);
+void irq_exit(void);
-#define nmi_enter() \
+/*
+ * Like irq_exit(), but return with RCU watching.
+ */
+void irq_exit_rcu(void);
+
+#ifndef arch_nmi_enter
+#define arch_nmi_enter() do { } while (0)
+#define arch_nmi_exit() do { } while (0)
+#endif
+
+/*
+ * NMI vs Tracing
+ * --------------
+ *
+ * We must not land in a tracer until (or after) we've changed preempt_count
+ * such that in_nmi() becomes true. To that effect all NMI C entry points must
+ * be marked 'notrace' and call nmi_enter() as soon as possible.
+ */
+
+/*
+ * nmi_enter() can nest up to 15 times; see NMI_BITS.
+ */
+#define __nmi_enter() \
do { \
- printk_nmi_enter(); \
lockdep_off(); \
+ arch_nmi_enter(); \
+ BUG_ON(in_nmi() == NMI_MASK); \
+ __preempt_count_add(NMI_OFFSET + HARDIRQ_OFFSET); \
+ } while (0)
+
+#define nmi_enter() \
+ do { \
+ __nmi_enter(); \
+ lockdep_hardirq_enter(); \
+ ct_nmi_enter(); \
+ instrumentation_begin(); \
ftrace_nmi_enter(); \
- BUG_ON(in_nmi()); \
- preempt_count_add(NMI_OFFSET + HARDIRQ_OFFSET); \
- rcu_nmi_enter(); \
- trace_hardirq_enter(); \
+ instrumentation_end(); \
} while (0)
-#define nmi_exit() \
+#define __nmi_exit() \
do { \
- trace_hardirq_exit(); \
- rcu_nmi_exit(); \
BUG_ON(!in_nmi()); \
- preempt_count_sub(NMI_OFFSET + HARDIRQ_OFFSET); \
- ftrace_nmi_exit(); \
+ __preempt_count_sub(NMI_OFFSET + HARDIRQ_OFFSET); \
+ arch_nmi_exit(); \
lockdep_on(); \
- printk_nmi_exit(); \
+ } while (0)
+
+#define nmi_exit() \
+ do { \
+ instrumentation_begin(); \
+ ftrace_nmi_exit(); \
+ instrumentation_end(); \
+ ct_nmi_exit(); \
+ lockdep_hardirq_exit(); \
+ __nmi_exit(); \
} while (0)
#endif /* LINUX_HARDIRQ_H */
diff --git a/include/linux/hash.h b/include/linux/hash.h
index ad6fa21d977b..38edaa08f862 100644
--- a/include/linux/hash.h
+++ b/include/linux/hash.h
@@ -62,10 +62,7 @@ static inline u32 __hash_32_generic(u32 val)
return val * GOLDEN_RATIO_32;
}
-#ifndef HAVE_ARCH_HASH_32
-#define hash_32 hash_32_generic
-#endif
-static inline u32 hash_32_generic(u32 val, unsigned int bits)
+static inline u32 hash_32(u32 val, unsigned int bits)
{
/* High bits are more random, so use them. */
return __hash_32(val) >> (32 - bits);
diff --git a/include/linux/hashtable.h b/include/linux/hashtable.h
index 082dc1bd0801..f6c666730b8c 100644
--- a/include/linux/hashtable.h
+++ b/include/linux/hashtable.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Statically sized hash table implementation
* (C) 2012 Sasha Levin <levinsasha928@gmail.com>
@@ -144,7 +145,7 @@ static inline void hash_del_rcu(struct hlist_node *node)
* hash entry
* @name: hashtable to iterate
* @bkt: integer to use as bucket loop cursor
- * @tmp: a &struct used for temporary storage
+ * @tmp: a &struct hlist_node used for temporary storage
* @obj: the type * to use as a loop cursor for each entry
* @member: the name of the hlist_node within the struct
*/
@@ -172,9 +173,9 @@ static inline void hash_del_rcu(struct hlist_node *node)
* @member: the name of the hlist_node within the struct
* @key: the key of the objects to iterate over
*/
-#define hash_for_each_possible_rcu(name, obj, member, key) \
+#define hash_for_each_possible_rcu(name, obj, member, key, cond...) \
hlist_for_each_entry_rcu(obj, &name[hash_min(key, HASH_BITS(name))],\
- member)
+ member, ## cond)
/**
* hash_for_each_possible_rcu_notrace - iterate over all possible objects hashing
@@ -196,7 +197,7 @@ static inline void hash_del_rcu(struct hlist_node *node)
* same bucket safe against removals
* @name: hashtable to iterate
* @obj: the type * to use as a loop cursor for each entry
- * @tmp: a &struct used for temporary storage
+ * @tmp: a &struct hlist_node used for temporary storage
* @member: the name of the hlist_node within the struct
* @key: the key of the objects to iterate over
*/
diff --git a/include/linux/hashtable_api.h b/include/linux/hashtable_api.h
new file mode 100644
index 000000000000..c268ac2c5c0e
--- /dev/null
+++ b/include/linux/hashtable_api.h
@@ -0,0 +1 @@
+#include <linux/hashtable.h>
diff --git a/include/linux/hdlc.h b/include/linux/hdlc.h
index 97585d9679f3..630a388035f1 100644
--- a/include/linux/hdlc.h
+++ b/include/linux/hdlc.h
@@ -1,11 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Generic HDLC support routines for Linux
*
* Copyright (C) 1999-2005 Krzysztof Halasa <khc@pm.waw.pl>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License
- * as published by the Free Software Foundation.
*/
#ifndef __HDLC_H
#define __HDLC_H
@@ -25,7 +22,7 @@ struct hdlc_proto {
void (*start)(struct net_device *dev); /* if open & DCD */
void (*stop)(struct net_device *dev); /* if open & !DCD */
void (*detach)(struct net_device *dev);
- int (*ioctl)(struct net_device *dev, struct ifreq *ifr);
+ int (*ioctl)(struct net_device *dev, struct if_settings *ifs);
__be16 (*type_trans)(struct sk_buff *skb, struct net_device *dev);
int (*netif_rx)(struct sk_buff *skb);
netdev_tx_t (*xmit)(struct sk_buff *skb, struct net_device *dev);
@@ -57,7 +54,7 @@ typedef struct hdlc_device {
/* Exported from hdlc module */
/* Called by hardware driver when a user requests HDLC service */
-int hdlc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd);
+int hdlc_ioctl(struct net_device *dev, struct if_settings *ifs);
/* Must be used by hardware driver on module startup/exit */
#define register_hdlc_device(dev) register_netdev(dev)
diff --git a/include/linux/hdlcdrv.h b/include/linux/hdlcdrv.h
index be3be25bb898..5d70c3f98f5b 100644
--- a/include/linux/hdlcdrv.h
+++ b/include/linux/hdlcdrv.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* hdlcdrv.h -- HDLC packet radio network driver.
* The Linux soundcard driver for 1200 baud and 9600 baud packet radio
@@ -78,7 +79,7 @@ struct hdlcdrv_ops {
*/
int (*open)(struct net_device *);
int (*close)(struct net_device *);
- int (*ioctl)(struct net_device *, struct ifreq *,
+ int (*ioctl)(struct net_device *, void __user *,
struct hdlcdrv_ioctl *, int);
};
diff --git a/include/linux/hdmi.h b/include/linux/hdmi.h
index d271ff23984f..96bda41d9148 100644
--- a/include/linux/hdmi.h
+++ b/include/linux/hdmi.h
@@ -27,11 +27,27 @@
#include <linux/types.h>
#include <linux/device.h>
+enum hdmi_packet_type {
+ HDMI_PACKET_TYPE_NULL = 0x00,
+ HDMI_PACKET_TYPE_AUDIO_CLOCK_REGEN = 0x01,
+ HDMI_PACKET_TYPE_AUDIO_SAMPLE = 0x02,
+ HDMI_PACKET_TYPE_GENERAL_CONTROL = 0x03,
+ HDMI_PACKET_TYPE_ACP = 0x04,
+ HDMI_PACKET_TYPE_ISRC1 = 0x05,
+ HDMI_PACKET_TYPE_ISRC2 = 0x06,
+ HDMI_PACKET_TYPE_ONE_BIT_AUDIO_SAMPLE = 0x07,
+ HDMI_PACKET_TYPE_DST_AUDIO = 0x08,
+ HDMI_PACKET_TYPE_HBR_AUDIO_STREAM = 0x09,
+ HDMI_PACKET_TYPE_GAMUT_METADATA = 0x0a,
+ /* + enum hdmi_infoframe_type */
+};
+
enum hdmi_infoframe_type {
HDMI_INFOFRAME_TYPE_VENDOR = 0x81,
HDMI_INFOFRAME_TYPE_AVI = 0x82,
HDMI_INFOFRAME_TYPE_SPD = 0x83,
HDMI_INFOFRAME_TYPE_AUDIO = 0x84,
+ HDMI_INFOFRAME_TYPE_DRM = 0x87,
};
#define HDMI_IEEE_OUI 0x000c03
@@ -40,6 +56,17 @@ enum hdmi_infoframe_type {
#define HDMI_AVI_INFOFRAME_SIZE 13
#define HDMI_SPD_INFOFRAME_SIZE 25
#define HDMI_AUDIO_INFOFRAME_SIZE 10
+#define HDMI_DRM_INFOFRAME_SIZE 26
+#define HDMI_VENDOR_INFOFRAME_SIZE 4
+
+/*
+ * HDMI 1.3a table 5-14 states that the largest InfoFrame_length is 27,
+ * not including the packet header or checksum byte. We include the
+ * checksum byte in HDMI_INFOFRAME_HEADER_SIZE, so this should allow
+ * HDMI_INFOFRAME_SIZE(MAX) to be the largest buffer we could ever need
+ * for any HDMI infoframe.
+ */
+#define HDMI_MAX_INFOFRAME_SIZE 27
#define HDMI_INFOFRAME_SIZE(type) \
(HDMI_INFOFRAME_HEADER_SIZE + HDMI_ ## type ## _INFOFRAME_SIZE)
@@ -101,8 +128,8 @@ enum hdmi_extended_colorimetry {
HDMI_EXTENDED_COLORIMETRY_XV_YCC_601,
HDMI_EXTENDED_COLORIMETRY_XV_YCC_709,
HDMI_EXTENDED_COLORIMETRY_S_YCC_601,
- HDMI_EXTENDED_COLORIMETRY_ADOBE_YCC_601,
- HDMI_EXTENDED_COLORIMETRY_ADOBE_RGB,
+ HDMI_EXTENDED_COLORIMETRY_OPYCC_601,
+ HDMI_EXTENDED_COLORIMETRY_OPRGB,
/* The following EC values are only defined in CEA-861-F. */
HDMI_EXTENDED_COLORIMETRY_BT2020_CONST_LUM,
@@ -137,32 +164,73 @@ enum hdmi_content_type {
HDMI_CONTENT_TYPE_GAME,
};
+enum hdmi_metadata_type {
+ HDMI_STATIC_METADATA_TYPE1 = 0,
+};
+
+enum hdmi_eotf {
+ HDMI_EOTF_TRADITIONAL_GAMMA_SDR,
+ HDMI_EOTF_TRADITIONAL_GAMMA_HDR,
+ HDMI_EOTF_SMPTE_ST2084,
+ HDMI_EOTF_BT_2100_HLG,
+};
+
struct hdmi_avi_infoframe {
enum hdmi_infoframe_type type;
unsigned char version;
unsigned char length;
+ bool itc;
+ unsigned char pixel_repeat;
enum hdmi_colorspace colorspace;
enum hdmi_scan_mode scan_mode;
enum hdmi_colorimetry colorimetry;
enum hdmi_picture_aspect picture_aspect;
enum hdmi_active_aspect active_aspect;
- bool itc;
enum hdmi_extended_colorimetry extended_colorimetry;
enum hdmi_quantization_range quantization_range;
enum hdmi_nups nups;
unsigned char video_code;
enum hdmi_ycc_quantization_range ycc_quantization_range;
enum hdmi_content_type content_type;
- unsigned char pixel_repeat;
unsigned short top_bar;
unsigned short bottom_bar;
unsigned short left_bar;
unsigned short right_bar;
};
-int hdmi_avi_infoframe_init(struct hdmi_avi_infoframe *frame);
+/* DRM Infoframe as per CTA 861.G spec */
+struct hdmi_drm_infoframe {
+ enum hdmi_infoframe_type type;
+ unsigned char version;
+ unsigned char length;
+ enum hdmi_eotf eotf;
+ enum hdmi_metadata_type metadata_type;
+ struct {
+ u16 x, y;
+ } display_primaries[3];
+ struct {
+ u16 x, y;
+ } white_point;
+ u16 max_display_mastering_luminance;
+ u16 min_display_mastering_luminance;
+ u16 max_cll;
+ u16 max_fall;
+};
+
+void hdmi_avi_infoframe_init(struct hdmi_avi_infoframe *frame);
ssize_t hdmi_avi_infoframe_pack(struct hdmi_avi_infoframe *frame, void *buffer,
size_t size);
+ssize_t hdmi_avi_infoframe_pack_only(const struct hdmi_avi_infoframe *frame,
+ void *buffer, size_t size);
+int hdmi_avi_infoframe_check(struct hdmi_avi_infoframe *frame);
+int hdmi_drm_infoframe_init(struct hdmi_drm_infoframe *frame);
+ssize_t hdmi_drm_infoframe_pack(struct hdmi_drm_infoframe *frame, void *buffer,
+ size_t size);
+ssize_t hdmi_drm_infoframe_pack_only(const struct hdmi_drm_infoframe *frame,
+ void *buffer, size_t size);
+int hdmi_drm_infoframe_check(struct hdmi_drm_infoframe *frame);
+int hdmi_drm_infoframe_unpack_only(struct hdmi_drm_infoframe *frame,
+ const void *buffer, size_t size);
enum hdmi_spd_sdi {
HDMI_SPD_SDI_UNKNOWN,
@@ -194,6 +262,9 @@ int hdmi_spd_infoframe_init(struct hdmi_spd_infoframe *frame,
const char *vendor, const char *product);
ssize_t hdmi_spd_infoframe_pack(struct hdmi_spd_infoframe *frame, void *buffer,
size_t size);
+ssize_t hdmi_spd_infoframe_pack_only(const struct hdmi_spd_infoframe *frame,
+ void *buffer, size_t size);
+int hdmi_spd_infoframe_check(struct hdmi_spd_infoframe *frame);
enum hdmi_audio_coding_type {
HDMI_AUDIO_CODING_TYPE_STREAM,
@@ -272,6 +343,14 @@ struct hdmi_audio_infoframe {
int hdmi_audio_infoframe_init(struct hdmi_audio_infoframe *frame);
ssize_t hdmi_audio_infoframe_pack(struct hdmi_audio_infoframe *frame,
void *buffer, size_t size);
+ssize_t hdmi_audio_infoframe_pack_only(const struct hdmi_audio_infoframe *frame,
+ void *buffer, size_t size);
+int hdmi_audio_infoframe_check(const struct hdmi_audio_infoframe *frame);
+
+struct dp_sdp;
+ssize_t
+hdmi_audio_infoframe_pack_for_dp(const struct hdmi_audio_infoframe *frame,
+ struct dp_sdp *sdp, u8 dp_version);
enum hdmi_3d_structure {
HDMI_3D_STRUCTURE_INVALID = -1,
@@ -296,9 +375,39 @@ struct hdmi_vendor_infoframe {
unsigned int s3d_ext_data;
};
+/* HDR Metadata as per 861.G spec */
+struct hdr_static_metadata {
+ __u8 eotf;
+ __u8 metadata_type;
+ __u16 max_cll;
+ __u16 max_fall;
+ __u16 min_cll;
+};
+
+/**
+ * struct hdr_sink_metadata - HDR sink metadata
+ *
+ * Metadata Information read from Sink's EDID
+ */
+struct hdr_sink_metadata {
+ /**
+ * @metadata_type: Static_Metadata_Descriptor_ID.
+ */
+ __u32 metadata_type;
+ /**
+ * @hdmi_type1: HDR Metadata Infoframe.
+ */
+ union {
+ struct hdr_static_metadata hdmi_type1;
+ };
+};
+
int hdmi_vendor_infoframe_init(struct hdmi_vendor_infoframe *frame);
ssize_t hdmi_vendor_infoframe_pack(struct hdmi_vendor_infoframe *frame,
void *buffer, size_t size);
+ssize_t hdmi_vendor_infoframe_pack_only(const struct hdmi_vendor_infoframe *frame,
+ void *buffer, size_t size);
+int hdmi_vendor_infoframe_check(struct hdmi_vendor_infoframe *frame);
union hdmi_vendor_any_infoframe {
struct {
@@ -317,6 +426,7 @@ union hdmi_vendor_any_infoframe {
* @spd: spd infoframe
* @vendor: union of all vendor infoframes
* @audio: audio infoframe
+ * @drm: Dynamic Range and Mastering infoframe
*
* This is used by the generic pack function. This works since all infoframes
* have the same header which also indicates which type of infoframe should be
@@ -328,12 +438,16 @@ union hdmi_infoframe {
struct hdmi_spd_infoframe spd;
union hdmi_vendor_any_infoframe vendor;
struct hdmi_audio_infoframe audio;
+ struct hdmi_drm_infoframe drm;
};
-ssize_t
-hdmi_infoframe_pack(union hdmi_infoframe *frame, void *buffer, size_t size);
-int hdmi_infoframe_unpack(union hdmi_infoframe *frame, void *buffer);
+ssize_t hdmi_infoframe_pack(union hdmi_infoframe *frame, void *buffer,
+ size_t size);
+ssize_t hdmi_infoframe_pack_only(const union hdmi_infoframe *frame,
+ void *buffer, size_t size);
+int hdmi_infoframe_unpack(union hdmi_infoframe *frame,
+ const void *buffer, size_t size);
void hdmi_infoframe_log(const char *level, struct device *dev,
- union hdmi_infoframe *frame);
+ const union hdmi_infoframe *frame);
#endif /* _DRM_HDMI_H */
diff --git a/include/linux/hex.h b/include/linux/hex.h
new file mode 100644
index 000000000000..2618382e5b0c
--- /dev/null
+++ b/include/linux/hex.h
@@ -0,0 +1,35 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_HEX_H
+#define _LINUX_HEX_H
+
+#include <linux/types.h>
+
+extern const char hex_asc[];
+#define hex_asc_lo(x) hex_asc[((x) & 0x0f)]
+#define hex_asc_hi(x) hex_asc[((x) & 0xf0) >> 4]
+
+static inline char *hex_byte_pack(char *buf, u8 byte)
+{
+ *buf++ = hex_asc_hi(byte);
+ *buf++ = hex_asc_lo(byte);
+ return buf;
+}
+
+extern const char hex_asc_upper[];
+#define hex_asc_upper_lo(x) hex_asc_upper[((x) & 0x0f)]
+#define hex_asc_upper_hi(x) hex_asc_upper[((x) & 0xf0) >> 4]
+
+static inline char *hex_byte_pack_upper(char *buf, u8 byte)
+{
+ *buf++ = hex_asc_upper_hi(byte);
+ *buf++ = hex_asc_upper_lo(byte);
+ return buf;
+}
+
+extern int hex_to_bin(unsigned char ch);
+extern int __must_check hex2bin(u8 *dst, const char *src, size_t count);
+extern char *bin2hex(char *dst, const void *src, size_t count);
+
+bool mac_pton(const char *s, u8 *mac);
+
+#endif
diff --git a/include/linux/hfs_common.h b/include/linux/hfs_common.h
new file mode 100644
index 000000000000..dadb5e0aa8a3
--- /dev/null
+++ b/include/linux/hfs_common.h
@@ -0,0 +1,653 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * HFS/HFS+ common definitions, inline functions,
+ * and shared functionality.
+ */
+
+#ifndef _HFS_COMMON_H_
+#define _HFS_COMMON_H_
+
+#ifdef pr_fmt
+#undef pr_fmt
+#endif
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#define hfs_dbg(fmt, ...) \
+ pr_debug("pid %d:%s:%d %s(): " fmt, \
+ current->pid, __FILE__, __LINE__, __func__, ##__VA_ARGS__) \
+
+/*
+ * Format of structures on disk
+ * Information taken from Apple Technote #1150 (HFS Plus Volume Format)
+ */
+
+/* offsets to various blocks */
+#define HFS_DD_BLK 0 /* Driver Descriptor block */
+#define HFS_PMAP_BLK 1 /* First block of partition map */
+#define HFS_MDB_BLK 2 /* Block (w/i partition) of MDB */
+
+/* magic numbers for various disk blocks */
+#define HFS_DRVR_DESC_MAGIC 0x4552 /* "ER": driver descriptor map */
+#define HFS_OLD_PMAP_MAGIC 0x5453 /* "TS": old-type partition map */
+#define HFS_NEW_PMAP_MAGIC 0x504D /* "PM": new-type partition map */
+#define HFS_SUPER_MAGIC 0x4244 /* "BD": HFS MDB (super block) */
+#define HFS_MFS_SUPER_MAGIC 0xD2D7 /* MFS MDB (super block) */
+
+#define HFSPLUS_VOLHEAD_SIG 0x482b
+#define HFSPLUS_VOLHEAD_SIGX 0x4858
+#define HFSPLUS_SUPER_MAGIC 0x482b
+
+#define HFSP_WRAP_MAGIC 0x4244
+#define HFSP_WRAP_ATTRIB_SLOCK 0x8000
+#define HFSP_WRAP_ATTRIB_SPARED 0x0200
+
+#define HFSP_WRAPOFF_SIG 0x00
+#define HFSP_WRAPOFF_ATTRIB 0x0A
+#define HFSP_WRAPOFF_ABLKSIZE 0x14
+#define HFSP_WRAPOFF_ABLKSTART 0x1C
+#define HFSP_WRAPOFF_EMBEDSIG 0x7C
+#define HFSP_WRAPOFF_EMBEDEXT 0x7E
+
+#define HFSP_HARDLINK_TYPE 0x686c6e6b /* 'hlnk' */
+#define HFSP_HFSPLUS_CREATOR 0x6866732b /* 'hfs+' */
+
+#define HFSP_SYMLINK_TYPE 0x736c6e6b /* 'slnk' */
+#define HFSP_SYMLINK_CREATOR 0x72686170 /* 'rhap' */
+
+#define HFSP_MOUNT_VERSION 0x482b4c78 /* 'H+Lx' */
+
+#define HFSP_HIDDENDIR_NAME \
+ "\xe2\x90\x80\xe2\x90\x80\xe2\x90\x80\xe2\x90\x80HFS+ Private Data"
+
+/* various FIXED size parameters */
+#define HFS_SECTOR_SIZE 512 /* size of an HFS sector */
+#define HFS_SECTOR_SIZE_BITS 9 /* log_2(HFS_SECTOR_SIZE) */
+#define HFS_MAX_VALENCE 32767U
+
+#define HFSPLUS_SECTOR_SIZE HFS_SECTOR_SIZE
+#define HFSPLUS_SECTOR_SHIFT HFS_SECTOR_SIZE_BITS
+#define HFSPLUS_VOLHEAD_SECTOR 2
+#define HFSPLUS_MIN_VERSION 4
+#define HFSPLUS_CURRENT_VERSION 5
+
+#define HFS_NAMELEN 31 /* maximum length of an HFS filename */
+#define HFS_MAX_NAMELEN 128
+
+#define HFSPLUS_MAX_STRLEN 255
+#define HFSPLUS_ATTR_MAX_STRLEN 127
+
+/* Meanings of the drAtrb field of the MDB,
+ * Reference: _Inside Macintosh: Files_ p. 2-61
+ */
+#define HFS_SB_ATTRIB_HLOCK (1 << 7)
+#define HFS_SB_ATTRIB_UNMNT (1 << 8)
+#define HFS_SB_ATTRIB_SPARED (1 << 9)
+#define HFS_SB_ATTRIB_INCNSTNT (1 << 11)
+#define HFS_SB_ATTRIB_SLOCK (1 << 15)
+
+/* values for hfs_cat_rec.cdrType */
+#define HFS_CDR_DIR 0x01 /* folder (directory) */
+#define HFS_CDR_FIL 0x02 /* file */
+#define HFS_CDR_THD 0x03 /* folder (directory) thread */
+#define HFS_CDR_FTH 0x04 /* file thread */
+
+/* legal values for hfs_ext_key.FkType and hfs_file.fork */
+#define HFS_FK_DATA 0x00
+#define HFS_FK_RSRC 0xFF
+
+/* bits in hfs_fil_entry.Flags */
+#define HFS_FIL_LOCK 0x01 /* locked */
+#define HFS_FIL_THD 0x02 /* file thread */
+#define HFS_FIL_DOPEN 0x04 /* data fork open */
+#define HFS_FIL_ROPEN 0x08 /* resource fork open */
+#define HFS_FIL_DIR 0x10 /* directory (always clear) */
+#define HFS_FIL_NOCOPY 0x40 /* copy-protected file */
+#define HFS_FIL_USED 0x80 /* open */
+
+/* bits in hfs_dir_entry.Flags. dirflags is 16 bits. */
+#define HFS_DIR_LOCK 0x01 /* locked */
+#define HFS_DIR_THD 0x02 /* directory thread */
+#define HFS_DIR_INEXPFOLDER 0x04 /* in a shared area */
+#define HFS_DIR_MOUNTED 0x08 /* mounted */
+#define HFS_DIR_DIR 0x10 /* directory (always set) */
+#define HFS_DIR_EXPFOLDER 0x20 /* share point */
+
+/* bits hfs_finfo.fdFlags */
+#define HFS_FLG_INITED 0x0100
+#define HFS_FLG_LOCKED 0x1000
+#define HFS_FLG_INVISIBLE 0x4000
+
+/* Some special File ID numbers */
+#define HFS_POR_CNID 1 /* Parent Of the Root */
+#define HFSPLUS_POR_CNID HFS_POR_CNID
+#define HFS_ROOT_CNID 2 /* ROOT directory */
+#define HFSPLUS_ROOT_CNID HFS_ROOT_CNID
+#define HFS_EXT_CNID 3 /* EXTents B-tree */
+#define HFSPLUS_EXT_CNID HFS_EXT_CNID
+#define HFS_CAT_CNID 4 /* CATalog B-tree */
+#define HFSPLUS_CAT_CNID HFS_CAT_CNID
+#define HFS_BAD_CNID 5 /* BAD blocks file */
+#define HFSPLUS_BAD_CNID HFS_BAD_CNID
+#define HFS_ALLOC_CNID 6 /* ALLOCation file (HFS+) */
+#define HFSPLUS_ALLOC_CNID HFS_ALLOC_CNID
+#define HFS_START_CNID 7 /* STARTup file (HFS+) */
+#define HFSPLUS_START_CNID HFS_START_CNID
+#define HFS_ATTR_CNID 8 /* ATTRibutes file (HFS+) */
+#define HFSPLUS_ATTR_CNID HFS_ATTR_CNID
+#define HFS_EXCH_CNID 15 /* ExchangeFiles temp id */
+#define HFSPLUS_EXCH_CNID HFS_EXCH_CNID
+#define HFS_FIRSTUSER_CNID 16 /* first available user id */
+#define HFSPLUS_FIRSTUSER_CNID HFS_FIRSTUSER_CNID
+
+/*======== HFS/HFS+ structures as they appear on the disk ========*/
+
+typedef __be32 hfsplus_cnid;
+typedef __be16 hfsplus_unichr;
+
+/* Pascal-style string of up to 31 characters */
+struct hfs_name {
+ u8 len;
+ u8 name[HFS_NAMELEN];
+} __packed;
+
+/* A "string" as used in filenames, etc. */
+struct hfsplus_unistr {
+ __be16 length;
+ hfsplus_unichr unicode[HFSPLUS_MAX_STRLEN];
+} __packed;
+
+/*
+ * A "string" is used in attributes file
+ * for name of extended attribute
+ */
+struct hfsplus_attr_unistr {
+ __be16 length;
+ hfsplus_unichr unicode[HFSPLUS_ATTR_MAX_STRLEN];
+} __packed;
+
+struct hfs_extent {
+ __be16 block;
+ __be16 count;
+};
+typedef struct hfs_extent hfs_extent_rec[3];
+
+/* A single contiguous area of a file */
+struct hfsplus_extent {
+ __be32 start_block;
+ __be32 block_count;
+} __packed;
+typedef struct hfsplus_extent hfsplus_extent_rec[8];
+
+/* Information for a "Fork" in a file */
+struct hfsplus_fork_raw {
+ __be64 total_size;
+ __be32 clump_size;
+ __be32 total_blocks;
+ hfsplus_extent_rec extents;
+} __packed;
+
+struct hfs_mdb {
+ __be16 drSigWord; /* Signature word indicating fs type */
+ __be32 drCrDate; /* fs creation date/time */
+ __be32 drLsMod; /* fs modification date/time */
+ __be16 drAtrb; /* fs attributes */
+ __be16 drNmFls; /* number of files in root directory */
+ __be16 drVBMSt; /* location (in 512-byte blocks)
+ of the volume bitmap */
+ __be16 drAllocPtr; /* location (in allocation blocks)
+ to begin next allocation search */
+ __be16 drNmAlBlks; /* number of allocation blocks */
+ __be32 drAlBlkSiz; /* bytes in an allocation block */
+ __be32 drClpSiz; /* clumpsize, the number of bytes to
+ allocate when extending a file */
+ __be16 drAlBlSt; /* location (in 512-byte blocks)
+ of the first allocation block */
+ __be32 drNxtCNID; /* CNID to assign to the next
+ file or directory created */
+ __be16 drFreeBks; /* number of free allocation blocks */
+ u8 drVN[28]; /* the volume label */
+ __be32 drVolBkUp; /* fs backup date/time */
+ __be16 drVSeqNum; /* backup sequence number */
+ __be32 drWrCnt; /* fs write count */
+ __be32 drXTClpSiz; /* clumpsize for the extents B-tree */
+ __be32 drCTClpSiz; /* clumpsize for the catalog B-tree */
+ __be16 drNmRtDirs; /* number of directories in
+ the root directory */
+ __be32 drFilCnt; /* number of files in the fs */
+ __be32 drDirCnt; /* number of directories in the fs */
+ u8 drFndrInfo[32]; /* data used by the Finder */
+ __be16 drEmbedSigWord; /* embedded volume signature */
+ __be32 drEmbedExtent; /* starting block number (xdrStABN)
+ and number of allocation blocks
+ (xdrNumABlks) occupied by embedded
+ volume */
+ __be32 drXTFlSize; /* bytes in the extents B-tree */
+ hfs_extent_rec drXTExtRec; /* extents B-tree's first 3 extents */
+ __be32 drCTFlSize; /* bytes in the catalog B-tree */
+ hfs_extent_rec drCTExtRec; /* catalog B-tree's first 3 extents */
+} __packed;
+
+/* HFS+ Volume Header */
+struct hfsplus_vh {
+ __be16 signature;
+ __be16 version;
+ __be32 attributes;
+ __be32 last_mount_vers;
+ u32 reserved;
+
+ __be32 create_date;
+ __be32 modify_date;
+ __be32 backup_date;
+ __be32 checked_date;
+
+ __be32 file_count;
+ __be32 folder_count;
+
+ __be32 blocksize;
+ __be32 total_blocks;
+ __be32 free_blocks;
+
+ __be32 next_alloc;
+ __be32 rsrc_clump_sz;
+ __be32 data_clump_sz;
+ hfsplus_cnid next_cnid;
+
+ __be32 write_count;
+ __be64 encodings_bmp;
+
+ u32 finder_info[8];
+
+ struct hfsplus_fork_raw alloc_file;
+ struct hfsplus_fork_raw ext_file;
+ struct hfsplus_fork_raw cat_file;
+ struct hfsplus_fork_raw attr_file;
+ struct hfsplus_fork_raw start_file;
+} __packed;
+
+/* HFS+ volume attributes */
+#define HFSPLUS_VOL_UNMNT (1 << 8)
+#define HFSPLUS_VOL_SPARE_BLK (1 << 9)
+#define HFSPLUS_VOL_NOCACHE (1 << 10)
+#define HFSPLUS_VOL_INCNSTNT (1 << 11)
+#define HFSPLUS_VOL_NODEID_REUSED (1 << 12)
+#define HFSPLUS_VOL_JOURNALED (1 << 13)
+#define HFSPLUS_VOL_SOFTLOCK (1 << 15)
+#define HFSPLUS_VOL_UNUSED_NODE_FIX (1 << 31)
+
+struct hfs_point {
+ __be16 v;
+ __be16 h;
+} __packed;
+
+typedef struct hfs_point hfsp_point;
+
+struct hfs_rect {
+ __be16 top;
+ __be16 left;
+ __be16 bottom;
+ __be16 right;
+} __packed;
+
+typedef struct hfs_rect hfsp_rect;
+
+struct hfs_finfo {
+ __be32 fdType;
+ __be32 fdCreator;
+ __be16 fdFlags;
+ struct hfs_point fdLocation;
+ __be16 fdFldr;
+} __packed;
+
+typedef struct hfs_finfo FInfo;
+
+struct hfs_fxinfo {
+ __be16 fdIconID;
+ u8 fdUnused[8];
+ __be16 fdComment;
+ __be32 fdPutAway;
+} __packed;
+
+typedef struct hfs_fxinfo FXInfo;
+
+struct hfs_dinfo {
+ struct hfs_rect frRect;
+ __be16 frFlags;
+ struct hfs_point frLocation;
+ __be16 frView;
+} __packed;
+
+typedef struct hfs_dinfo DInfo;
+
+struct hfs_dxinfo {
+ struct hfs_point frScroll;
+ __be32 frOpenChain;
+ __be16 frUnused;
+ __be16 frComment;
+ __be32 frPutAway;
+} __packed;
+
+typedef struct hfs_dxinfo DXInfo;
+
+union hfs_finder_info {
+ struct {
+ struct hfs_finfo finfo;
+ struct hfs_fxinfo fxinfo;
+ } file;
+ struct {
+ struct hfs_dinfo dinfo;
+ struct hfs_dxinfo dxinfo;
+ } dir;
+} __packed;
+
+/* The key used in the catalog b-tree: */
+struct hfs_cat_key {
+ u8 key_len; /* number of bytes in the key */
+ u8 reserved; /* padding */
+ __be32 ParID; /* CNID of the parent dir */
+ struct hfs_name CName; /* The filename of the entry */
+} __packed;
+
+/* HFS+ catalog entry key */
+struct hfsplus_cat_key {
+ __be16 key_len;
+ hfsplus_cnid parent;
+ struct hfsplus_unistr name;
+} __packed;
+
+#define HFSPLUS_CAT_KEYLEN (sizeof(struct hfsplus_cat_key))
+
+/* The key used in the extents b-tree: */
+struct hfs_ext_key {
+ u8 key_len; /* number of bytes in the key */
+ u8 FkType; /* HFS_FK_{DATA,RSRC} */
+ __be32 FNum; /* The File ID of the file */
+ __be16 FABN; /* allocation blocks number*/
+} __packed;
+
+/* HFS+ extents tree key */
+struct hfsplus_ext_key {
+ __be16 key_len;
+ u8 fork_type;
+ u8 pad;
+ hfsplus_cnid cnid;
+ __be32 start_block;
+} __packed;
+
+#define HFSPLUS_EXT_KEYLEN sizeof(struct hfsplus_ext_key)
+
+typedef union hfs_btree_key {
+ u8 key_len; /* number of bytes in the key */
+ struct hfs_cat_key cat;
+ struct hfs_ext_key ext;
+} hfs_btree_key;
+
+#define HFS_MAX_CAT_KEYLEN (sizeof(struct hfs_cat_key) - sizeof(u8))
+#define HFS_MAX_EXT_KEYLEN (sizeof(struct hfs_ext_key) - sizeof(u8))
+
+typedef union hfs_btree_key btree_key;
+
+/* The catalog record for a file */
+struct hfs_cat_file {
+ s8 type; /* The type of entry */
+ u8 reserved;
+ u8 Flags; /* Flags such as read-only */
+ s8 Typ; /* file version number = 0 */
+ struct hfs_finfo UsrWds; /* data used by the Finder */
+ __be32 FlNum; /* The CNID */
+ __be16 StBlk; /* obsolete */
+ __be32 LgLen; /* The logical EOF of the data fork*/
+ __be32 PyLen; /* The physical EOF of the data fork */
+ __be16 RStBlk; /* obsolete */
+ __be32 RLgLen; /* The logical EOF of the rsrc fork */
+ __be32 RPyLen; /* The physical EOF of the rsrc fork */
+ __be32 CrDat; /* The creation date */
+ __be32 MdDat; /* The modified date */
+ __be32 BkDat; /* The last backup date */
+ struct hfs_fxinfo FndrInfo; /* more data for the Finder */
+ __be16 ClpSize; /* number of bytes to allocate
+ when extending files */
+ hfs_extent_rec ExtRec; /* first extent record
+ for the data fork */
+ hfs_extent_rec RExtRec; /* first extent record
+ for the resource fork */
+ u32 Resrv; /* reserved by Apple */
+} __packed;
+
+/* the catalog record for a directory */
+struct hfs_cat_dir {
+ s8 type; /* The type of entry */
+ u8 reserved;
+ __be16 Flags; /* flags */
+ __be16 Val; /* Valence: number of files and
+ dirs in the directory */
+ __be32 DirID; /* The CNID */
+ __be32 CrDat; /* The creation date */
+ __be32 MdDat; /* The modification date */
+ __be32 BkDat; /* The last backup date */
+ struct hfs_dinfo UsrInfo; /* data used by the Finder */
+ struct hfs_dxinfo FndrInfo; /* more data used by Finder */
+ u8 Resrv[16]; /* reserved by Apple */
+} __packed;
+
+/* the catalog record for a thread */
+struct hfs_cat_thread {
+ s8 type; /* The type of entry */
+ u8 reserved[9]; /* reserved by Apple */
+ __be32 ParID; /* CNID of parent directory */
+ struct hfs_name CName; /* The name of this entry */
+} __packed;
+
+/* A catalog tree record */
+typedef union hfs_cat_rec {
+ s8 type; /* The type of entry */
+ struct hfs_cat_file file;
+ struct hfs_cat_dir dir;
+ struct hfs_cat_thread thread;
+} hfs_cat_rec;
+
+/* POSIX permissions */
+struct hfsplus_perm {
+ __be32 owner;
+ __be32 group;
+ u8 rootflags;
+ u8 userflags;
+ __be16 mode;
+ __be32 dev;
+} __packed;
+
+#define HFSPLUS_FLG_NODUMP 0x01
+#define HFSPLUS_FLG_IMMUTABLE 0x02
+#define HFSPLUS_FLG_APPEND 0x04
+
+/* HFS/HFS+ BTree node descriptor */
+struct hfs_bnode_desc {
+ __be32 next; /* (V) Number of the next node at this level */
+ __be32 prev; /* (V) Number of the prev node at this level */
+ u8 type; /* (F) The type of node */
+ u8 height; /* (F) The level of this node (leaves=1) */
+ __be16 num_recs; /* (V) The number of records in this node */
+ u16 reserved;
+} __packed;
+
+/* HFS/HFS+ BTree node types */
+#define HFS_NODE_INDEX 0x00 /* An internal (index) node */
+#define HFS_NODE_HEADER 0x01 /* The tree header node (node 0) */
+#define HFS_NODE_MAP 0x02 /* Holds part of the bitmap of used nodes */
+#define HFS_NODE_LEAF 0xFF /* A leaf (ndNHeight==1) node */
+
+/* HFS/HFS+ BTree header */
+struct hfs_btree_header_rec {
+ __be16 depth; /* (V) The number of levels in this B-tree */
+ __be32 root; /* (V) The node number of the root node */
+ __be32 leaf_count; /* (V) The number of leaf records */
+ __be32 leaf_head; /* (V) The number of the first leaf node */
+ __be32 leaf_tail; /* (V) The number of the last leaf node */
+ __be16 node_size; /* (F) The number of bytes in a node (=512) */
+ __be16 max_key_len; /* (F) The length of a key in an index node */
+ __be32 node_count; /* (V) The total number of nodes */
+ __be32 free_nodes; /* (V) The number of unused nodes */
+ u16 reserved1;
+ __be32 clump_size; /* (F) clump size. not usually used. */
+ u8 btree_type; /* (F) BTree type */
+ u8 key_type;
+ __be32 attributes; /* (F) attributes */
+ u32 reserved3[16];
+} __packed;
+
+/* BTree attributes */
+#define BTREE_ATTR_BADCLOSE 0x00000001 /* b-tree not closed properly. not
+ used by hfsplus. */
+#define HFS_TREE_BIGKEYS 0x00000002 /* key length is u16 instead of u8.
+ used by hfsplus. */
+#define HFS_TREE_VARIDXKEYS 0x00000004 /* variable key length instead of
+ max key length. use din catalog
+ b-tree but not in extents
+ b-tree (hfsplus). */
+
+/* HFS+ BTree misc info */
+#define HFSPLUS_TREE_HEAD 0
+#define HFSPLUS_NODE_MXSZ 32768
+#define HFSPLUS_ATTR_TREE_NODE_SIZE 8192
+#define HFSPLUS_BTREE_HDR_NODE_RECS_COUNT 3
+#define HFSPLUS_BTREE_HDR_USER_BYTES 128
+
+/* btree key type */
+#define HFSPLUS_KEY_CASEFOLDING 0xCF /* case-insensitive */
+#define HFSPLUS_KEY_BINARY 0xBC /* case-sensitive */
+
+/* HFS+ folder data (part of an hfsplus_cat_entry) */
+struct hfsplus_cat_folder {
+ __be16 type;
+ __be16 flags;
+ __be32 valence;
+ hfsplus_cnid id;
+ __be32 create_date;
+ __be32 content_mod_date;
+ __be32 attribute_mod_date;
+ __be32 access_date;
+ __be32 backup_date;
+ struct hfsplus_perm permissions;
+ struct_group_attr(info, __packed,
+ DInfo user_info;
+ DXInfo finder_info;
+ );
+ __be32 text_encoding;
+ __be32 subfolders; /* Subfolder count in HFSX. Reserved in HFS+. */
+} __packed;
+
+/* HFS+ file data (part of a cat_entry) */
+struct hfsplus_cat_file {
+ __be16 type;
+ __be16 flags;
+ u32 reserved1;
+ hfsplus_cnid id;
+ __be32 create_date;
+ __be32 content_mod_date;
+ __be32 attribute_mod_date;
+ __be32 access_date;
+ __be32 backup_date;
+ struct hfsplus_perm permissions;
+ struct_group_attr(info, __packed,
+ FInfo user_info;
+ FXInfo finder_info;
+ );
+ __be32 text_encoding;
+ u32 reserved2;
+
+ struct hfsplus_fork_raw data_fork;
+ struct hfsplus_fork_raw rsrc_fork;
+} __packed;
+
+/* File and folder flag bits */
+#define HFSPLUS_FILE_LOCKED 0x0001
+#define HFSPLUS_FILE_THREAD_EXISTS 0x0002
+#define HFSPLUS_XATTR_EXISTS 0x0004
+#define HFSPLUS_ACL_EXISTS 0x0008
+#define HFSPLUS_HAS_FOLDER_COUNT 0x0010 /* Folder has subfolder count
+ * (HFSX only) */
+
+/* HFS+ catalog thread (part of a cat_entry) */
+struct hfsplus_cat_thread {
+ __be16 type;
+ s16 reserved;
+ hfsplus_cnid parentID;
+ struct hfsplus_unistr nodeName;
+} __packed;
+
+#define HFSPLUS_MIN_THREAD_SZ 10
+
+/* A data record in the catalog tree */
+typedef union {
+ __be16 type;
+ struct hfsplus_cat_folder folder;
+ struct hfsplus_cat_file file;
+ struct hfsplus_cat_thread thread;
+} __packed hfsplus_cat_entry;
+
+/* HFS+ catalog entry type */
+#define HFSPLUS_FOLDER 0x0001
+#define HFSPLUS_FILE 0x0002
+#define HFSPLUS_FOLDER_THREAD 0x0003
+#define HFSPLUS_FILE_THREAD 0x0004
+
+#define HFSPLUS_XATTR_FINDER_INFO_NAME "com.apple.FinderInfo"
+#define HFSPLUS_XATTR_ACL_NAME "com.apple.system.Security"
+
+#define HFSPLUS_ATTR_INLINE_DATA 0x10
+#define HFSPLUS_ATTR_FORK_DATA 0x20
+#define HFSPLUS_ATTR_EXTENTS 0x30
+
+/* HFS+ attributes tree key */
+struct hfsplus_attr_key {
+ __be16 key_len;
+ __be16 pad;
+ hfsplus_cnid cnid;
+ __be32 start_block;
+ struct hfsplus_attr_unistr key_name;
+} __packed;
+
+#define HFSPLUS_ATTR_KEYLEN sizeof(struct hfsplus_attr_key)
+
+/* HFS+ fork data attribute */
+struct hfsplus_attr_fork_data {
+ __be32 record_type;
+ __be32 reserved;
+ struct hfsplus_fork_raw the_fork;
+} __packed;
+
+/* HFS+ extension attribute */
+struct hfsplus_attr_extents {
+ __be32 record_type;
+ __be32 reserved;
+ struct hfsplus_extent extents;
+} __packed;
+
+#define HFSPLUS_MAX_INLINE_DATA_SIZE 3802
+
+/* HFS+ attribute inline data */
+struct hfsplus_attr_inline_data {
+ __be32 record_type;
+ __be32 reserved1;
+ u8 reserved2[6];
+ __be16 length;
+ u8 raw_bytes[HFSPLUS_MAX_INLINE_DATA_SIZE];
+} __packed;
+
+/* A data record in the attributes tree */
+typedef union {
+ __be32 record_type;
+ struct hfsplus_attr_fork_data fork_data;
+ struct hfsplus_attr_extents extents;
+ struct hfsplus_attr_inline_data inline_data;
+} __packed hfsplus_attr_entry;
+
+/* HFS+ generic BTree key */
+typedef union {
+ __be16 key_len;
+ struct hfsplus_cat_key cat;
+ struct hfsplus_ext_key ext;
+ struct hfsplus_attr_key attr;
+} __packed hfsplus_btree_key;
+
+#endif /* _HFS_COMMON_H_ */
diff --git a/include/linux/hid-debug.h b/include/linux/hid-debug.h
index 8663f216c563..ea7b23d13bfd 100644
--- a/include/linux/hid-debug.h
+++ b/include/linux/hid-debug.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
#ifndef __HID_DEBUG_H
#define __HID_DEBUG_H
@@ -6,25 +7,14 @@
*/
/*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
*/
#ifdef CONFIG_DEBUG_FS
+#include <linux/kfifo.h>
+
#define HID_DEBUG_BUFSIZE 512
+#define HID_DEBUG_FIFOSIZE 512
void hid_dump_input(struct hid_device *, struct hid_usage *, __s32);
void hid_dump_report(struct hid_device *, int , u8 *, int);
@@ -37,11 +27,8 @@ void hid_debug_init(void);
void hid_debug_exit(void);
void hid_debug_event(struct hid_device *, char *);
-
struct hid_debug_list {
- char *hid_debug_buf;
- int head;
- int tail;
+ DECLARE_KFIFO_PTR(hid_debug_fifo, char);
struct fasync_struct *fasync;
struct hid_device *hdev;
struct list_head node;
@@ -64,4 +51,3 @@ struct hid_debug_list {
#endif
#endif
-
diff --git a/include/linux/hid-over-i2c.h b/include/linux/hid-over-i2c.h
new file mode 100644
index 000000000000..3b1a0208a6b8
--- /dev/null
+++ b/include/linux/hid-over-i2c.h
@@ -0,0 +1,117 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright 2024 Intel Corporation */
+
+#include <linux/bits.h>
+
+#ifndef _HID_OVER_I2C_H_
+#define _HID_OVER_I2C_H_
+
+#define HIDI2C_REG_LEN sizeof(__le16)
+
+/* Input report type definition in HIDI2C protocol */
+enum hidi2c_report_type {
+ HIDI2C_RESERVED = 0,
+ HIDI2C_INPUT,
+ HIDI2C_OUTPUT,
+ HIDI2C_FEATURE,
+};
+
+/* Power state type definition in HIDI2C protocol */
+enum hidi2c_power_state {
+ HIDI2C_ON,
+ HIDI2C_SLEEP,
+};
+
+/* Opcode type definition in HIDI2C protocol */
+enum hidi2c_opcode {
+ HIDI2C_RESET = 1,
+ HIDI2C_GET_REPORT,
+ HIDI2C_SET_REPORT,
+ HIDI2C_GET_IDLE,
+ HIDI2C_SET_IDLE,
+ HIDI2C_GET_PROTOCOL,
+ HIDI2C_SET_PROTOCOL,
+ HIDI2C_SET_POWER,
+};
+
+/**
+ * struct hidi2c_report_packet - Report packet definition in HIDI2C protocol
+ * @len: data field length
+ * @data: HIDI2C report packet data
+ */
+struct hidi2c_report_packet {
+ __le16 len;
+ u8 data[];
+} __packed;
+
+#define HIDI2C_LENGTH_LEN sizeof(__le16)
+
+#define HIDI2C_PACKET_LEN(data_len) ((data_len) + HIDI2C_LENGTH_LEN)
+#define HIDI2C_DATA_LEN(pkt_len) ((pkt_len) - HIDI2C_LENGTH_LEN)
+
+#define HIDI2C_CMD_MAX_RI 0x0F
+
+/**
+ * HIDI2C command data packet - Command packet definition in HIDI2C protocol
+ * @report_id: [0:3] report id (<15) for features or output reports
+ * @report_type: [4:5] indicate report type, reference to hidi2c_report_type
+ * @reserved0: [6:7] reserved bits
+ * @opcode: [8:11] command operation code, reference to hidi2c_opcode
+ * @reserved1: [12:15] reserved bits
+ * @report_id_optional: [23:16] appended 3rd byte.
+ * If the report_id in the low byte is set to the
+ * sentinel value (HIDI2C_CMD_MAX_RI), then this
+ * optional third byte represents the report id (>=15)
+ * Otherwise, not this 3rd byte.
+ */
+
+#define HIDI2C_CMD_LEN sizeof(__le16)
+#define HIDI2C_CMD_LEN_OPT (sizeof(__le16) + 1)
+#define HIDI2C_CMD_REPORT_ID GENMASK(3, 0)
+#define HIDI2C_CMD_REPORT_TYPE GENMASK(5, 4)
+#define HIDI2C_CMD_OPCODE GENMASK(11, 8)
+#define HIDI2C_CMD_OPCODE GENMASK(11, 8)
+#define HIDI2C_CMD_3RD_BYTE GENMASK(23, 16)
+
+#define HIDI2C_HID_DESC_BCDVERSION 0x100
+
+/**
+ * struct hidi2c_dev_descriptor - HIDI2C device descriptor definition
+ * @dev_desc_len: The length of the complete device descriptor, fixed to 0x1E (30).
+ * @bcd_ver: The version number of the HIDI2C protocol supported.
+ * In binary coded decimal (BCD) format.
+ * @report_desc_len: The length of the report descriptor
+ * @report_desc_reg: The register address to retrieve report descriptor
+ * @input_reg: the register address to retrieve input report
+ * @max_input_len: The length of the largest possible HID input (or feature) report
+ * @output_reg: the register address to send output report
+ * @max_output_len: The length of the largest output (or feature) report
+ * @cmd_reg: the register address to send command
+ * @data_reg: the register address to send command data
+ * @vendor_id: Device manufacturers vendor ID
+ * @product_id: Device unique model/product ID
+ * @version_id: Device’s unique version
+ * @reserved0: Reserved and should be 0
+ * @reserved1: Reserved and should be 0
+ */
+struct hidi2c_dev_descriptor {
+ __le16 dev_desc_len;
+ __le16 bcd_ver;
+ __le16 report_desc_len;
+ __le16 report_desc_reg;
+ __le16 input_reg;
+ __le16 max_input_len;
+ __le16 output_reg;
+ __le16 max_output_len;
+ __le16 cmd_reg;
+ __le16 data_reg;
+ __le16 vendor_id;
+ __le16 product_id;
+ __le16 version_id;
+ __le16 reserved0;
+ __le16 reserved1;
+} __packed;
+
+#define HIDI2C_DEV_DESC_LEN sizeof(struct hidi2c_dev_descriptor)
+
+#endif /* _HID_OVER_I2C_H_ */
diff --git a/include/linux/hid-over-spi.h b/include/linux/hid-over-spi.h
new file mode 100644
index 000000000000..da5a14b5e89b
--- /dev/null
+++ b/include/linux/hid-over-spi.h
@@ -0,0 +1,155 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright 2024 Intel Corporation */
+
+#ifndef _HID_OVER_SPI_H_
+#define _HID_OVER_SPI_H_
+
+#include <linux/bits.h>
+#include <linux/types.h>
+
+/* Input report type definition in HIDSPI protocol */
+enum input_report_type {
+ INVALID_INPUT_REPORT_TYPE_0 = 0,
+ DATA = 1,
+ INVALID_TYPE_2 = 2,
+ RESET_RESPONSE = 3,
+ COMMAND_RESPONSE = 4,
+ GET_FEATURE_RESPONSE = 5,
+ INVALID_TYPE_6 = 6,
+ DEVICE_DESCRIPTOR_RESPONSE = 7,
+ REPORT_DESCRIPTOR_RESPONSE = 8,
+ SET_FEATURE_RESPONSE = 9,
+ OUTPUT_REPORT_RESPONSE = 10,
+ GET_INPUT_REPORT_RESPONSE = 11,
+ INVALID_INPUT_REPORT_TYPE = 0xF,
+};
+
+/* Output report type definition in HIDSPI protocol */
+enum output_report_type {
+ INVALID_OUTPUT_REPORT_TYPE_0 = 0,
+ DEVICE_DESCRIPTOR = 1,
+ REPORT_DESCRIPTOR = 2,
+ SET_FEATURE = 3,
+ GET_FEATURE = 4,
+ OUTPUT_REPORT = 5,
+ GET_INPUT_REPORT = 6,
+ COMMAND_CONTENT = 7,
+};
+
+/* Set power command ID for output report */
+#define HIDSPI_SET_POWER_CMD_ID 1
+
+/* Power state definition in HIDSPI protocol */
+enum hidspi_power_state {
+ HIDSPI_ON = 1,
+ HIDSPI_SLEEP = 2,
+ HIDSPI_OFF = 3,
+};
+
+/**
+ * Input report header definition in HIDSPI protocol
+ * Report header size is 32bits, it includes:
+ * protocol_ver: [0:3] Current supported HIDSPI protocol version, must be 0x3
+ * reserved0: [4:7] Reserved bits
+ * input_report_len: [8:21] Input report length in number bytes divided by 4
+ * last_frag_flag: [22]Indicate if this packet is last fragment.
+ * 1 - indicates last fragment
+ * 0 - indicates additional fragments
+ * reserved1: [23] Reserved bits
+ * @sync_const: [24:31] Used to validate input report header, must be 0x5A
+ */
+#define HIDSPI_INPUT_HEADER_SIZE sizeof(u32)
+#define HIDSPI_INPUT_HEADER_VER GENMASK(3, 0)
+#define HIDSPI_INPUT_HEADER_REPORT_LEN GENMASK(21, 8)
+#define HIDSPI_INPUT_HEADER_LAST_FLAG BIT(22)
+#define HIDSPI_INPUT_HEADER_SYNC GENMASK(31, 24)
+
+/**
+ * struct input_report_body_header - Input report body header definition in HIDSPI protocol
+ * @input_report_type: indicate input report type, reference to enum input_report_type
+ * @content_len: this input report body packet length
+ * @content_id: indicate this input report's report id
+ */
+struct input_report_body_header {
+ u8 input_report_type;
+ __le16 content_len;
+ u8 content_id;
+} __packed;
+
+#define HIDSPI_INPUT_BODY_HEADER_SIZE sizeof(struct input_report_body_header)
+
+/**
+ * struct input_report_body - Input report body definition in HIDSPI protocol
+ * @body_hdr: input report body header
+ * @content: input report body content
+ */
+struct input_report_body {
+ struct input_report_body_header body_hdr;
+ u8 content[];
+} __packed;
+
+#define HIDSPI_INPUT_BODY_SIZE(content_len) ((content_len) + HIDSPI_INPUT_BODY_HEADER_SIZE)
+
+/**
+ * struct output_report_header - Output report header definition in HIDSPI protocol
+ * @report_type: output report type, reference to enum output_report_type
+ * @content_len: length of content
+ * @content_id: 0x00 - descriptors
+ * report id - Set/Feature feature or Input/Output Reports
+ * command opcode - for commands
+ */
+struct output_report_header {
+ u8 report_type;
+ __le16 content_len;
+ u8 content_id;
+} __packed;
+
+#define HIDSPI_OUTPUT_REPORT_HEADER_SIZE sizeof(struct output_report_header)
+
+/**
+ * struct output_report - Output report definition in HIDSPI protocol
+ * @output_hdr: output report header
+ * @content: output report content
+ */
+struct output_report {
+ struct output_report_header output_hdr;
+ u8 content[];
+} __packed;
+
+#define HIDSPI_OUTPUT_REPORT_SIZE(content_len) ((content_len) + HIDSPI_OUTPUT_REPORT_HEADER_SIZE)
+
+/**
+ * struct hidspi_dev_descriptor - HIDSPI device descriptor definition
+ * @dev_desc_len: The length of the complete device descriptor, fixed to 0x18 (24).
+ * @bcd_ver: The version number of the HIDSPI protocol supported.
+ * In binary coded decimal (BCD) format. Must be fixed to 0x0300.
+ * @rep_desc_len: The length of the report descriptor
+ * @max_input_len: The length of the largest possible HID input (or feature) report
+ * @max_output_len: The length of the largest output (or feature) report
+ * @max_frag_len: The length of the largest fragment, where a fragment represents
+ * the body of an input report.
+ * @vendor_id: Device manufacturers vendor ID
+ * @product_id: Device unique model/product ID
+ * @version_id: Device’s unique version
+ * @flags: Specify flags for the device’s operation
+ * @reserved: Reserved and should be 0
+ */
+struct hidspi_dev_descriptor {
+ __le16 dev_desc_len;
+ __le16 bcd_ver;
+ __le16 rep_desc_len;
+ __le16 max_input_len;
+ __le16 max_output_len;
+ __le16 max_frag_len;
+ __le16 vendor_id;
+ __le16 product_id;
+ __le16 version_id;
+ __le16 flags;
+ __le32 reserved;
+};
+
+#define HIDSPI_DEVICE_DESCRIPTOR_SIZE sizeof(struct hidspi_dev_descriptor)
+#define HIDSPI_INPUT_DEVICE_DESCRIPTOR_SIZE \
+ (HIDSPI_INPUT_BODY_HEADER_SIZE + HIDSPI_DEVICE_DESCRIPTOR_SIZE)
+
+#endif /* _HID_OVER_SPI_H_ */
diff --git a/include/linux/hid-roccat.h b/include/linux/hid-roccat.h
index 24e1ca01f9a0..753654fff07f 100644
--- a/include/linux/hid-roccat.h
+++ b/include/linux/hid-roccat.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
#ifndef __HID_ROCCAT_H
#define __HID_ROCCAT_H
@@ -6,10 +7,6 @@
*/
/*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the Free
- * Software Foundation; either version 2 of the License, or (at your option)
- * any later version.
*/
#include <linux/hid.h>
@@ -19,7 +16,7 @@
#ifdef __KERNEL__
-int roccat_connect(struct class *klass, struct hid_device *hid,
+int roccat_connect(const struct class *klass, struct hid_device *hid,
int report_size);
void roccat_disconnect(int minor);
int roccat_report_event(int minor, u8 const *data);
diff --git a/include/linux/hid-sensor-hub.h b/include/linux/hid-sensor-hub.h
index fc7aae64dcde..e71056553108 100644
--- a/include/linux/hid-sensor-hub.h
+++ b/include/linux/hid-sensor-hub.h
@@ -1,20 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* HID Sensors Driver
* Copyright (c) 2012, Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
- *
*/
#ifndef _HID_SENSORS_HUB_H
#define _HID_SENSORS_HUB_H
@@ -30,7 +17,7 @@
* @attrib_id: Attribute id for this attribute.
* @report_id: Report id in which this information resides.
* @index: Field index in the report.
- * @units: Measurment unit for this attribute.
+ * @units: Measurement unit for this attribute.
* @unit_expo: Exponent used in the data.
* @size: Size in bytes for data size.
* @logical_minimum: Logical minimum value for this attribute.
@@ -52,8 +39,8 @@ struct hid_sensor_hub_attribute_info {
* struct sensor_hub_pending - Synchronous read pending information
* @status: Pending status true/false.
* @ready: Completion synchronization data.
- * @usage_id: Usage id for physical device, E.g. Gyro usage id.
- * @attr_usage_id: Usage Id of a field, E.g. X-AXIS for a gyro.
+ * @usage_id: Usage id for physical device, e.g. gyro usage id.
+ * @attr_usage_id: Usage Id of a field, e.g. X-axis for a gyro.
* @raw_size: Response size for a read request.
* @raw_data: Place holder for received response.
*/
@@ -117,10 +104,10 @@ struct hid_sensor_hub_callbacks {
int sensor_hub_device_open(struct hid_sensor_hub_device *hsdev);
/**
-* sensor_hub_device_clode() - Close hub device
+* sensor_hub_device_close() - Close hub device
* @hsdev: Hub device instance.
*
-* Used to clode hid device for sensor hub.
+* Used to close hid device for sensor hub.
*/
void sensor_hub_device_close(struct hid_sensor_hub_device *hsdev);
@@ -141,12 +128,13 @@ int sensor_hub_register_callback(struct hid_sensor_hub_device *hsdev,
struct hid_sensor_hub_callbacks *usage_callback);
/**
-* sensor_hub_remove_callback() - Remove client callbacks
+* sensor_hub_remove_callback() - Remove client callback
* @hsdev: Hub device instance.
-* @usage_id: Usage id of the client (E.g. 0x200076 for Gyro).
+* @usage_id: Usage id of the client (e.g. 0x200076 for gyro).
*
-* If there is a callback registred, this call will remove that
-* callbacks, so that it will stop data and event notifications.
+* Removes a previously registered callback for the given usage_id
+* and hsdev. Once removed, the client will no longer receive data or
+* event notifications.
*/
int sensor_hub_remove_callback(struct hid_sensor_hub_device *hsdev,
u32 usage_id);
@@ -163,7 +151,7 @@ int sensor_hub_remove_callback(struct hid_sensor_hub_device *hsdev,
* @info: return information about attribute after parsing report
*
* Parses report and returns the attribute information such as report id,
-* field index, units and exponet etc.
+* field index, units and exponent etc.
*/
int sensor_hub_input_get_attribute_info(struct hid_sensor_hub_device *hsdev,
u8 type,
@@ -177,9 +165,10 @@ int sensor_hub_input_get_attribute_info(struct hid_sensor_hub_device *hsdev,
* @attr_usage_id: Attribute usage id as per spec
* @report_id: Report id to look for
* @flag: Synchronous or asynchronous read
+* @is_signed: If true then fields < 32 bits will be sign-extended
*
* Issues a synchronous or asynchronous read request for an input attribute.
-* Returns data upto 32 bits.
+* Return: data up to 32 bits.
*/
enum sensor_hub_read_flags {
@@ -190,7 +179,8 @@ enum sensor_hub_read_flags {
int sensor_hub_input_attr_get_raw_value(struct hid_sensor_hub_device *hsdev,
u32 usage_id,
u32 attr_usage_id, u32 report_id,
- enum sensor_hub_read_flags flag
+ enum sensor_hub_read_flags flag,
+ bool is_signed
);
/**
@@ -216,8 +206,9 @@ int sensor_hub_set_feature(struct hid_sensor_hub_device *hsdev, u32 report_id,
* @buffer: buffer to copy output
*
* Used to get a field in feature report. For example this can get polling
-* interval, sensitivity, activate/deactivate state. On success it returns
-* number of bytes copied to buffer. On failure, it returns value < 0.
+* interval, sensitivity, activate/deactivate state.
+* Return: On success, it returns the number of bytes copied to buffer.
+* On failure, it returns value < 0.
*/
int sensor_hub_get_feature(struct hid_sensor_hub_device *hsdev, u32 report_id,
u32 field_index, int buffer_size, void *buffer);
@@ -231,6 +222,7 @@ struct hid_sensor_common {
unsigned usage_id;
atomic_t data_ready;
atomic_t user_requested_state;
+ atomic_t runtime_pm_enable;
int poll_interval;
int raw_hystersis;
int latency_ms;
@@ -240,6 +232,7 @@ struct hid_sensor_common {
struct hid_sensor_hub_attribute_info report_state;
struct hid_sensor_hub_attribute_info power_state;
struct hid_sensor_hub_attribute_info sensitivity;
+ struct hid_sensor_hub_attribute_info sensitivity_rel;
struct hid_sensor_hub_attribute_info report_latency;
struct work_struct work;
};
@@ -257,11 +250,17 @@ static inline int hid_sensor_convert_exponent(int unit_expo)
int hid_sensor_parse_common_attributes(struct hid_sensor_hub_device *hsdev,
u32 usage_id,
- struct hid_sensor_common *st);
+ struct hid_sensor_common *st,
+ const u32 *sensitivity_addresses,
+ u32 sensitivity_addresses_len);
int hid_sensor_write_raw_hyst_value(struct hid_sensor_common *st,
int val1, int val2);
+int hid_sensor_write_raw_hyst_rel_value(struct hid_sensor_common *st, int val1,
+ int val2);
int hid_sensor_read_raw_hyst_value(struct hid_sensor_common *st,
int *val1, int *val2);
+int hid_sensor_read_raw_hyst_rel_value(struct hid_sensor_common *st,
+ int *val1, int *val2);
int hid_sensor_write_samp_freq_value(struct hid_sensor_common *st,
int val1, int val2);
int hid_sensor_read_samp_freq_value(struct hid_sensor_common *st,
diff --git a/include/linux/hid-sensor-ids.h b/include/linux/hid-sensor-ids.h
index 76033e0420a7..8a03d9696b1c 100644
--- a/include/linux/hid-sensor-ids.h
+++ b/include/linux/hid-sensor-ids.h
@@ -1,20 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* HID Sensors Driver
* Copyright (c) 2012, Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
- *
*/
#ifndef _HID_SENSORS_IDS_H
#define _HID_SENSORS_IDS_H
@@ -34,11 +21,17 @@
#define HID_USAGE_SENSOR_ALS 0x200041
#define HID_USAGE_SENSOR_DATA_LIGHT 0x2004d0
#define HID_USAGE_SENSOR_LIGHT_ILLUM 0x2004d1
+#define HID_USAGE_SENSOR_LIGHT_COLOR_TEMPERATURE 0x2004d2
+#define HID_USAGE_SENSOR_LIGHT_CHROMATICITY 0x2004d3
+#define HID_USAGE_SENSOR_LIGHT_CHROMATICITY_X 0x2004d4
+#define HID_USAGE_SENSOR_LIGHT_CHROMATICITY_Y 0x2004d5
/* PROX (200011) */
#define HID_USAGE_SENSOR_PROX 0x200011
#define HID_USAGE_SENSOR_DATA_PRESENCE 0x2004b0
#define HID_USAGE_SENSOR_HUMAN_PRESENCE 0x2004b1
+#define HID_USAGE_SENSOR_HUMAN_PROXIMITY 0x2004b2
+#define HID_USAGE_SENSOR_HUMAN_ATTENTION 0x2004bd
/* Pressure (200031) */
#define HID_USAGE_SENSOR_PRESSURE 0x200031
@@ -141,6 +134,11 @@
#define HID_USAGE_SENSOR_UNITS_DEGREES_PER_SECOND 0x15
/* Common selectors */
+#define HID_USAGE_SENSOR_PROP_DESC 0x200300
+#define HID_USAGE_SENSOR_PROP_FRIENDLY_NAME 0x200301
+#define HID_USAGE_SENSOR_PROP_SERIAL_NUM 0x200307
+#define HID_USAGE_SENSOR_PROP_MANUFACTURER 0x200305
+#define HID_USAGE_SENSOR_PROP_MODEL 0x200306
#define HID_USAGE_SENSOR_PROP_REPORT_INTERVAL 0x20030E
#define HID_USAGE_SENSOR_PROP_SENSITIVITY_ABS 0x20030F
#define HID_USAGE_SENSOR_PROP_SENSITIVITY_RANGE_PCT 0x200310
@@ -158,6 +156,7 @@
/* Per data field properties */
#define HID_USAGE_SENSOR_DATA_MOD_NONE 0x00
#define HID_USAGE_SENSOR_DATA_MOD_CHANGE_SENSITIVITY_ABS 0x1000
+#define HID_USAGE_SENSOR_DATA_MOD_CHANGE_SENSITIVITY_REL_PCT 0xE000
/* Power state enumerations */
#define HID_USAGE_SENSOR_PROP_POWER_STATE_UNDEFINED_ENUM 0x200850
@@ -171,4 +170,14 @@
#define HID_USAGE_SENSOR_PROP_REPORTING_STATE_NO_EVENTS_ENUM 0x200840
#define HID_USAGE_SENSOR_PROP_REPORTING_STATE_ALL_EVENTS_ENUM 0x200841
+/* Custom Sensor (2000e1) */
+#define HID_USAGE_SENSOR_HINGE 0x20020B
+#define HID_USAGE_SENSOR_DATA_FIELD_LOCATION 0x200400
+#define HID_USAGE_SENSOR_DATA_FIELE_TIME_SINCE_SYS_BOOT 0x20052B
+#define HID_USAGE_SENSOR_DATA_FIELD_CUSTOM_USAGE 0x200541
+#define HID_USAGE_SENSOR_DATA_FIELD_CUSTOM_VALUE_BASE 0x200543
+/* Custom Sensor data 28=>x>=0 */
+#define HID_USAGE_SENSOR_DATA_FIELD_CUSTOM_VALUE(x) \
+ (HID_USAGE_SENSOR_DATA_FIELD_CUSTOM_VALUE_BASE + (x))
+
#endif
diff --git a/include/linux/hid.h b/include/linux/hid.h
index ab05a86269dc..dce862cafbbd 100644
--- a/include/linux/hid.h
+++ b/include/linux/hid.h
@@ -1,22 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* Copyright (c) 1999 Andreas Gal
* Copyright (c) 2000-2001 Vojtech Pavlik
* Copyright (c) 2006-2007 Jiri Kosina
*/
/*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
* Should you need to contact me, the author, you can do so either by
* e-mail - mail your message to <vojtech@ucw.cz>, or by paper mail:
@@ -26,6 +14,7 @@
#define __HID_H
+#include <linux/bitops.h>
#include <linux/types.h>
#include <linux/slab.h>
#include <linux/list.h>
@@ -37,6 +26,7 @@
#include <linux/mutex.h>
#include <linux/power_supply.h>
#include <uapi/linux/hid.h>
+#include <linux/hid_bpf.h>
/*
* We parse each description item into this structure. Short items data
@@ -56,7 +46,7 @@ struct hid_item {
__s16 s16;
__u32 u32;
__s32 s32;
- __u8 *longdata;
+ const __u8 *longdata;
} data;
};
@@ -91,6 +81,8 @@ struct hid_item {
#define HID_MAIN_ITEM_TAG_FEATURE 11
#define HID_MAIN_ITEM_TAG_BEGIN_COLLECTION 10
#define HID_MAIN_ITEM_TAG_END_COLLECTION 12
+#define HID_MAIN_ITEM_TAG_RESERVED_MIN 13
+#define HID_MAIN_ITEM_TAG_RESERVED_MAX 15
/*
* HID report descriptor main item contents
@@ -113,6 +105,7 @@ struct hid_item {
#define HID_COLLECTION_PHYSICAL 0
#define HID_COLLECTION_APPLICATION 1
#define HID_COLLECTION_LOGICAL 2
+#define HID_COLLECTION_NAMED_ARRAY 4
/*
* HID report descriptor global item tags
@@ -163,7 +156,10 @@ struct hid_item {
#define HID_UP_TELEPHONY 0x000b0000
#define HID_UP_CONSUMER 0x000c0000
#define HID_UP_DIGITIZER 0x000d0000
+#define HID_UP_HAPTIC 0x000e0000
#define HID_UP_PID 0x000f0000
+#define HID_UP_BATTERY 0x00850000
+#define HID_UP_CAMERA 0x00900000
#define HID_UP_HPVENDOR 0xff7f0000
#define HID_UP_HPVENDOR2 0xff010000
#define HID_UP_MSVENDOR 0xff000000
@@ -174,6 +170,7 @@ struct hid_item {
#define HID_UP_LNVENDOR 0xffa00000
#define HID_UP_SENSOR 0x00200000
#define HID_UP_ASUSVENDOR 0xff310000
+#define HID_UP_GOOGLEVENDOR 0xffd10000
#define HID_USAGE 0x0000ffff
@@ -189,6 +186,12 @@ struct hid_item {
* http://www.usb.org/developers/hidpage/HUTRR40RadioHIDUsagesFinal.pdf
*/
#define HID_GD_WIRELESS_RADIO_CTLS 0x0001000c
+/*
+ * System Multi-Axis, see:
+ * http://www.usb.org/developers/hidpage/HUTRR62_-_Generic_Desktop_CA_for_System_Multi-Axis_Controllers.txt
+ */
+#define HID_GD_SYSTEM_MULTIAXIS 0x0001000e
+
#define HID_GD_X 0x00010030
#define HID_GD_Y 0x00010031
#define HID_GD_Z 0x00010032
@@ -212,11 +215,13 @@ struct hid_item {
#define HID_GD_VBRZ 0x00010045
#define HID_GD_VNO 0x00010046
#define HID_GD_FEATURE 0x00010047
+#define HID_GD_RESOLUTION_MULTIPLIER 0x00010048
#define HID_GD_SYSTEM_CONTROL 0x00010080
#define HID_GD_UP 0x00010090
#define HID_GD_DOWN 0x00010091
#define HID_GD_RIGHT 0x00010092
#define HID_GD_LEFT 0x00010093
+#define HID_GD_DO_NOT_DISTURB 0x0001009b
/* Microsoft Win8 Wireless Radio Controls CA usage codes */
#define HID_GD_RFKILL_BTN 0x000100c6
#define HID_GD_RFKILL_LED 0x000100c7
@@ -225,12 +230,14 @@ struct hid_item {
#define HID_DC_BATTERYSTRENGTH 0x00060020
#define HID_CP_CONSUMER_CONTROL 0x000c0001
+#define HID_CP_AC_PAN 0x000c0238
#define HID_DG_DIGITIZER 0x000d0001
#define HID_DG_PEN 0x000d0002
#define HID_DG_LIGHTPEN 0x000d0003
#define HID_DG_TOUCHSCREEN 0x000d0004
#define HID_DG_TOUCHPAD 0x000d0005
+#define HID_DG_WHITEBOARD 0x000d0006
#define HID_DG_STYLUS 0x000d0020
#define HID_DG_PUCK 0x000d0021
#define HID_DG_FINGER 0x000d0022
@@ -240,6 +247,7 @@ struct hid_item {
#define HID_DG_TOUCH 0x000d0033
#define HID_DG_UNTOUCH 0x000d0034
#define HID_DG_TAP 0x000d0035
+#define HID_DG_TRANSDUCER_INDEX 0x000d0038
#define HID_DG_TABLETFUNCTIONKEY 0x000d0039
#define HID_DG_PROGRAMCHANGEKEY 0x000d003a
#define HID_DG_BATTERYSTRENGTH 0x000d003b
@@ -252,6 +260,15 @@ struct hid_item {
#define HID_DG_BARRELSWITCH 0x000d0044
#define HID_DG_ERASER 0x000d0045
#define HID_DG_TABLETPICK 0x000d0046
+#define HID_DG_PEN_COLOR 0x000d005c
+#define HID_DG_PEN_LINE_WIDTH 0x000d005e
+#define HID_DG_PEN_LINE_STYLE 0x000d0070
+#define HID_DG_PEN_LINE_STYLE_INK 0x000d0072
+#define HID_DG_PEN_LINE_STYLE_PENCIL 0x000d0073
+#define HID_DG_PEN_LINE_STYLE_HIGHLIGHTER 0x000d0074
+#define HID_DG_PEN_LINE_STYLE_CHISEL_MARKER 0x000d0075
+#define HID_DG_PEN_LINE_STYLE_BRUSH 0x000d0076
+#define HID_DG_PEN_LINE_STYLE_NO_PREFERENCE 0x000d0077
#define HID_CP_CONSUMERCONTROL 0x000c0001
#define HID_CP_NUMERICKEYPAD 0x000c0002
@@ -263,6 +280,8 @@ struct hid_item {
#define HID_CP_SELECTION 0x000c0080
#define HID_CP_MEDIASELECTION 0x000c0087
#define HID_CP_SELECTDISC 0x000c00ba
+#define HID_CP_VOLUMEUP 0x000c00e9
+#define HID_CP_VOLUMEDOWN 0x000c00ea
#define HID_CP_PLAYBACKSPEED 0x000c00f1
#define HID_CP_PROXIMITY 0x000c0109
#define HID_CP_SPEAKERSYSTEM 0x000c0160
@@ -281,6 +300,7 @@ struct hid_item {
#define HID_DG_DEVICECONFIG 0x000d000e
#define HID_DG_DEVICESETTINGS 0x000d0023
+#define HID_DG_AZIMUTH 0x000d003f
#define HID_DG_CONFIDENCE 0x000d0047
#define HID_DG_WIDTH 0x000d0048
#define HID_DG_HEIGHT 0x000d0049
@@ -289,32 +309,52 @@ struct hid_item {
#define HID_DG_DEVICEINDEX 0x000d0053
#define HID_DG_CONTACTCOUNT 0x000d0054
#define HID_DG_CONTACTMAX 0x000d0055
+#define HID_DG_SCANTIME 0x000d0056
+#define HID_DG_SURFACESWITCH 0x000d0057
+#define HID_DG_BUTTONSWITCH 0x000d0058
#define HID_DG_BUTTONTYPE 0x000d0059
#define HID_DG_BARRELSWITCH2 0x000d005a
#define HID_DG_TOOLSERIALNUMBER 0x000d005b
+#define HID_DG_LATENCYMODE 0x000d0060
+
+#define HID_HP_SIMPLECONTROLLER 0x000e0001
+#define HID_HP_WAVEFORMLIST 0x000e0010
+#define HID_HP_DURATIONLIST 0x000e0011
+#define HID_HP_AUTOTRIGGER 0x000e0020
+#define HID_HP_MANUALTRIGGER 0x000e0021
+#define HID_HP_AUTOTRIGGERASSOCIATEDCONTROL 0x000e0022
+#define HID_HP_INTENSITY 0x000e0023
+#define HID_HP_REPEATCOUNT 0x000e0024
+#define HID_HP_RETRIGGERPERIOD 0x000e0025
+#define HID_HP_WAVEFORMVENDORPAGE 0x000e0026
+#define HID_HP_WAVEFORMVENDORID 0x000e0027
+#define HID_HP_WAVEFORMCUTOFFTIME 0x000e0028
+#define HID_HP_WAVEFORMNONE 0x000e1001
+#define HID_HP_WAVEFORMSTOP 0x000e1002
+#define HID_HP_WAVEFORMCLICK 0x000e1003
+#define HID_HP_WAVEFORMBUZZCONTINUOUS 0x000e1004
+#define HID_HP_WAVEFORMRUMBLECONTINUOUS 0x000e1005
+#define HID_HP_WAVEFORMPRESS 0x000e1006
+#define HID_HP_WAVEFORMRELEASE 0x000e1007
+#define HID_HP_VENDORWAVEFORMMIN 0x000e2001
+#define HID_HP_VENDORWAVEFORMMAX 0x000e2fff
+
+#define HID_BAT_ABSOLUTESTATEOFCHARGE 0x00850065
+#define HID_BAT_CHARGING 0x00850044
#define HID_VD_ASUS_CUSTOM_MEDIA_KEYS 0xff310076
-/*
- * HID report types --- Ouch! HID spec says 1 2 3!
- */
-
-#define HID_INPUT_REPORT 0
-#define HID_OUTPUT_REPORT 1
-#define HID_FEATURE_REPORT 2
-
-#define HID_REPORT_TYPES 3
/*
* HID connect requests
*/
-#define HID_CONNECT_HIDINPUT 0x01
-#define HID_CONNECT_HIDINPUT_FORCE 0x02
-#define HID_CONNECT_HIDRAW 0x04
-#define HID_CONNECT_HIDDEV 0x08
-#define HID_CONNECT_HIDDEV_FORCE 0x10
-#define HID_CONNECT_FF 0x20
-#define HID_CONNECT_DRIVER 0x40
+#define HID_CONNECT_HIDINPUT BIT(0)
+#define HID_CONNECT_HIDINPUT_FORCE BIT(1)
+#define HID_CONNECT_HIDRAW BIT(2)
+#define HID_CONNECT_HIDDEV BIT(3)
+#define HID_CONNECT_HIDDEV_FORCE BIT(4)
+#define HID_CONNECT_FF BIT(5)
+#define HID_CONNECT_DRIVER BIT(6)
#define HID_CONNECT_DEFAULT (HID_CONNECT_HIDINPUT|HID_CONNECT_HIDRAW| \
HID_CONNECT_HIDDEV|HID_CONNECT_FF)
@@ -322,29 +362,64 @@ struct hid_item {
* HID device quirks.
*/
-/*
+/*
* Increase this if you need to configure more HID quirks at module load time
*/
#define MAX_USBHID_BOOT_QUIRKS 4
-#define HID_QUIRK_INVERT 0x00000001
-#define HID_QUIRK_NOTOUCH 0x00000002
-#define HID_QUIRK_IGNORE 0x00000004
-#define HID_QUIRK_NOGET 0x00000008
-#define HID_QUIRK_HIDDEV_FORCE 0x00000010
-#define HID_QUIRK_BADPAD 0x00000020
-#define HID_QUIRK_MULTI_INPUT 0x00000040
-#define HID_QUIRK_HIDINPUT_FORCE 0x00000080
-#define HID_QUIRK_NO_EMPTY_INPUT 0x00000100
-/* 0x00000200 reserved for backward compatibility, was NO_INIT_INPUT_REPORTS */
-#define HID_QUIRK_ALWAYS_POLL 0x00000400
-#define HID_QUIRK_SKIP_OUTPUT_REPORTS 0x00010000
-#define HID_QUIRK_SKIP_OUTPUT_REPORT_ID 0x00020000
-#define HID_QUIRK_NO_OUTPUT_REPORTS_ON_INTR_EP 0x00040000
-#define HID_QUIRK_FULLSPEED_INTERVAL 0x10000000
-#define HID_QUIRK_NO_INIT_REPORTS 0x20000000
-#define HID_QUIRK_NO_IGNORE 0x40000000
-#define HID_QUIRK_NO_INPUT_SYNC 0x80000000
+/**
+ * DOC: HID quirks
+ * | @HID_QUIRK_NOTOUCH:
+ * | @HID_QUIRK_IGNORE: ignore this device
+ * | @HID_QUIRK_NOGET:
+ * | @HID_QUIRK_HIDDEV_FORCE:
+ * | @HID_QUIRK_BADPAD:
+ * | @HID_QUIRK_MULTI_INPUT:
+ * | @HID_QUIRK_HIDINPUT_FORCE:
+ * | @HID_QUIRK_ALWAYS_POLL:
+ * | @HID_QUIRK_INPUT_PER_APP:
+ * | @HID_QUIRK_X_INVERT:
+ * | @HID_QUIRK_Y_INVERT:
+ * | @HID_QUIRK_IGNORE_MOUSE:
+ * | @HID_QUIRK_SKIP_OUTPUT_REPORTS:
+ * | @HID_QUIRK_SKIP_OUTPUT_REPORT_ID:
+ * | @HID_QUIRK_NO_OUTPUT_REPORTS_ON_INTR_EP:
+ * | @HID_QUIRK_HAVE_SPECIAL_DRIVER:
+ * | @HID_QUIRK_INCREMENT_USAGE_ON_DUPLICATE:
+ * | @HID_QUIRK_IGNORE_SPECIAL_DRIVER
+ * | @HID_QUIRK_POWER_ON_AFTER_BACKLIGHT
+ * | @HID_QUIRK_FULLSPEED_INTERVAL:
+ * | @HID_QUIRK_NO_INIT_REPORTS:
+ * | @HID_QUIRK_NO_IGNORE:
+ * | @HID_QUIRK_NO_INPUT_SYNC:
+ */
+/* BIT(0) reserved for backward compatibility, was HID_QUIRK_INVERT */
+#define HID_QUIRK_NOTOUCH BIT(1)
+#define HID_QUIRK_IGNORE BIT(2)
+#define HID_QUIRK_NOGET BIT(3)
+#define HID_QUIRK_HIDDEV_FORCE BIT(4)
+#define HID_QUIRK_BADPAD BIT(5)
+#define HID_QUIRK_MULTI_INPUT BIT(6)
+#define HID_QUIRK_HIDINPUT_FORCE BIT(7)
+/* BIT(8) reserved for backward compatibility, was HID_QUIRK_NO_EMPTY_INPUT */
+/* BIT(9) reserved for backward compatibility, was NO_INIT_INPUT_REPORTS */
+#define HID_QUIRK_ALWAYS_POLL BIT(10)
+#define HID_QUIRK_INPUT_PER_APP BIT(11)
+#define HID_QUIRK_X_INVERT BIT(12)
+#define HID_QUIRK_Y_INVERT BIT(13)
+#define HID_QUIRK_IGNORE_MOUSE BIT(14)
+#define HID_QUIRK_SKIP_OUTPUT_REPORTS BIT(16)
+#define HID_QUIRK_SKIP_OUTPUT_REPORT_ID BIT(17)
+#define HID_QUIRK_NO_OUTPUT_REPORTS_ON_INTR_EP BIT(18)
+#define HID_QUIRK_HAVE_SPECIAL_DRIVER BIT(19)
+#define HID_QUIRK_INCREMENT_USAGE_ON_DUPLICATE BIT(20)
+#define HID_QUIRK_NOINVERT BIT(21)
+#define HID_QUIRK_IGNORE_SPECIAL_DRIVER BIT(22)
+#define HID_QUIRK_POWER_ON_AFTER_BACKLIGHT BIT(23)
+#define HID_QUIRK_FULLSPEED_INTERVAL BIT(28)
+#define HID_QUIRK_NO_INIT_REPORTS BIT(29)
+#define HID_QUIRK_NO_IGNORE BIT(30)
+#define HID_QUIRK_NO_INPUT_SYNC BIT(31)
/*
* HID device groups
@@ -363,6 +438,9 @@ struct hid_item {
#define HID_GROUP_RMI 0x0100
#define HID_GROUP_WACOM 0x0101
#define HID_GROUP_LOGITECH_DJ_DEVICE 0x0102
+#define HID_GROUP_STEAM 0x0103
+#define HID_GROUP_LOGITECH_27MHZ_DEVICE 0x0104
+#define HID_GROUP_VIVALDI 0x0105
/*
* HID protocol status
@@ -371,6 +449,12 @@ struct hid_item {
#define HID_BOOT_PROTOCOL 0
/*
+ * HID units
+ */
+#define HID_UNIT_GRAM 0x0101
+#define HID_UNIT_NEWTON 0xe111
+
+/*
* This is the global environment of the parser. This information is
* persistent for main-items. The global environment can be saved and
* restored with PUSH/POP statements.
@@ -398,6 +482,7 @@ struct hid_global {
struct hid_local {
unsigned usage[HID_MAX_USAGES]; /* usage array */
+ u8 usage_size[HID_MAX_USAGES]; /* usage size array */
unsigned collection_index[HID_MAX_USAGES]; /* collection index array */
unsigned usage_index;
unsigned usage_minimum;
@@ -411,6 +496,7 @@ struct hid_local {
*/
struct hid_collection {
+ int parent_idx; /* device->collection */
unsigned type;
unsigned usage;
unsigned level;
@@ -420,12 +506,16 @@ struct hid_usage {
unsigned hid; /* hid usage code */
unsigned collection_index; /* index into collection array */
unsigned usage_index; /* index into usage array */
+ __s8 resolution_multiplier;/* Effective Resolution Multiplier
+ (HUT v1.12, 4.3.1), default: 1 */
/* hidinput data */
+ __s8 wheel_factor; /* 120/resolution_multiplier */
__u16 code; /* input driver code */
__u8 type; /* input driver type */
- __s8 hat_min; /* hat switch fun */
- __s8 hat_max; /* ditto */
- __s8 hat_dir; /* ditto */
+ __s16 hat_min; /* hat switch fun */
+ __s16 hat_max; /* ditto */
+ __s16 hat_dir; /* ditto */
+ __s16 wheel_accumulated; /* hi-res wheel */
};
struct hid_input;
@@ -442,29 +532,50 @@ struct hid_field {
unsigned report_count; /* number of this field in the report */
unsigned report_type; /* (input,output,feature) */
__s32 *value; /* last known value(s) */
+ __s32 *new_value; /* newly read value(s) */
+ __s32 *usages_priorities; /* priority of each usage when reading the report
+ * bits 8-16 are reserved for hid-input usage
+ */
__s32 logical_minimum;
__s32 logical_maximum;
__s32 physical_minimum;
__s32 physical_maximum;
__s32 unit_exponent;
unsigned unit;
+ bool ignored; /* this field is ignored in this event */
struct hid_report *report; /* associated report */
unsigned index; /* index into report->field[] */
/* hidinput data */
struct hid_input *hidinput; /* associated input structure */
__u16 dpad; /* dpad input code */
+ unsigned int slot_idx; /* slot index in a report */
};
#define HID_MAX_FIELDS 256
+struct hid_field_entry {
+ struct list_head list;
+ struct hid_field *field;
+ unsigned int index;
+ __s32 priority;
+};
+
struct hid_report {
struct list_head list;
- unsigned id; /* id of this report */
- unsigned type; /* report type */
+ struct list_head hidinput_list;
+ struct list_head field_entry_list; /* ordered list of input fields */
+ unsigned int id; /* id of this report */
+ enum hid_report_type type; /* report type */
+ unsigned int application; /* application usage for this report */
struct hid_field *field[HID_MAX_FIELDS]; /* fields of the report */
+ struct hid_field_entry *field_entries; /* allocated memory of input field_entry */
unsigned maxfield; /* maximum valid field index */
unsigned size; /* size of the report (bits) */
struct hid_device *device; /* associated device */
+
+ /* tool related state */
+ bool tool_active; /* whether the current tool is active */
+ unsigned int tool; /* BTN_TOOL_* */
};
#define HID_MAX_IDS 256
@@ -476,7 +587,7 @@ struct hid_report_enum {
};
#define HID_MIN_BUFFER_SIZE 64 /* make sure there is at least a packet size of space */
-#define HID_MAX_BUFFER_SIZE 4096 /* 4kb */
+#define HID_MAX_BUFFER_SIZE 16384 /* 16kb */
#define HID_CONTROL_FIFO_SIZE 256 /* to init devices with >100 reports */
#define HID_OUTPUT_FIFO_SIZE 64
@@ -491,18 +602,23 @@ struct hid_output_fifo {
char *raw_report;
};
-#define HID_CLAIMED_INPUT 1
-#define HID_CLAIMED_HIDDEV 2
-#define HID_CLAIMED_HIDRAW 4
-#define HID_CLAIMED_DRIVER 8
+#define HID_CLAIMED_INPUT BIT(0)
+#define HID_CLAIMED_HIDDEV BIT(1)
+#define HID_CLAIMED_HIDRAW BIT(2)
+#define HID_CLAIMED_DRIVER BIT(3)
-#define HID_STAT_ADDED 1
-#define HID_STAT_PARSED 2
+#define HID_STAT_ADDED BIT(0)
+#define HID_STAT_PARSED BIT(1)
+#define HID_STAT_DUP_DETECTED BIT(2)
+#define HID_STAT_REPROBED BIT(3)
struct hid_input {
struct list_head list;
struct hid_report *report;
struct input_dev *input;
+ const char *name;
+ struct list_head reports; /* the list of reports */
+ unsigned int application; /* application usage for this input */
bool registered;
};
@@ -512,18 +628,26 @@ enum hid_type {
HID_TYPE_USBNONE
};
+enum hid_battery_status {
+ HID_BATTERY_UNKNOWN = 0,
+ HID_BATTERY_QUERIED, /* Kernel explicitly queried battery strength */
+ HID_BATTERY_REPORTED, /* Device sent unsolicited battery strength report */
+};
+
struct hid_driver;
struct hid_ll_driver;
-struct hid_device { /* device report descriptor */
- __u8 *dev_rdesc;
- unsigned dev_rsize;
- __u8 *rdesc;
- unsigned rsize;
+struct hid_device {
+ const __u8 *dev_rdesc; /* device report descriptor */
+ const __u8 *bpf_rdesc; /* bpf modified report descriptor, if any */
+ const __u8 *rdesc; /* currently used report descriptor */
+ unsigned int dev_rsize;
+ unsigned int bpf_rsize;
+ unsigned int rsize;
+ unsigned int collection_size; /* Number of allocated hid_collections */
struct hid_collection *collection; /* List of HID collections */
- unsigned collection_size; /* Number of allocated hid_collections */
- unsigned maxcollection; /* Number of parsed collections */
- unsigned maxapplication; /* Number of applications */
+ unsigned int maxcollection; /* Number of parsed collections */
+ unsigned int maxapplication; /* Number of applications */
__u16 bus; /* BUS ID */
__u16 group; /* Report group */
__u32 vendor; /* Vendor ID */
@@ -537,8 +661,9 @@ struct hid_device { /* device report descriptor */
struct semaphore driver_input_lock; /* protects the current driver */
struct device dev; /* device */
struct hid_driver *driver;
+ void *devres_group_id; /* ID of probe devres group */
- struct hid_ll_driver *ll_driver;
+ const struct hid_ll_driver *ll_driver;
struct mutex ll_open_lock;
unsigned int ll_open_count;
@@ -554,12 +679,16 @@ struct hid_device { /* device report descriptor */
__s32 battery_max;
__s32 battery_report_type;
__s32 battery_report_id;
- bool battery_reported;
+ __s32 battery_charge_status;
+ enum hid_battery_status battery_status;
+ bool battery_avoid_query;
+ ktime_t battery_ratelimit_time;
#endif
- unsigned int status; /* see STAT flags above */
+ unsigned long status; /* see STAT flags above */
unsigned claimed; /* Claimed by hidinput, hiddev? */
unsigned quirks; /* Various quirks the device can pull on us */
+ unsigned initial_quirks; /* Initial set of quirks supplied when creating device */
bool io_started; /* If IO has started */
struct list_head inputs; /* The list of inputs */
@@ -590,8 +719,17 @@ struct hid_device { /* device report descriptor */
struct list_head debug_list;
spinlock_t debug_list_lock;
wait_queue_head_t debug_wait;
+ struct kref ref;
+
+ unsigned int id; /* system unique id */
+
+#ifdef CONFIG_HID_BPF
+ struct hid_bpf bpf; /* hid-bpf data */
+#endif /* CONFIG_HID_BPF */
};
+void hiddev_free(struct kref *ref);
+
#define to_hid_device(pdev) \
container_of(pdev, struct hid_device, dev)
@@ -615,12 +753,13 @@ static inline void hid_set_drvdata(struct hid_device *hdev, void *data)
struct hid_parser {
struct hid_global global;
struct hid_global global_stack[HID_GLOBAL_STACK_SIZE];
- unsigned global_stack_ptr;
+ unsigned int global_stack_ptr;
struct hid_local local;
- unsigned collection_stack[HID_COLLECTION_STACK_SIZE];
- unsigned collection_stack_ptr;
+ unsigned int *collection_stack;
+ unsigned int collection_stack_ptr;
+ unsigned int collection_stack_size;
struct hid_device *device;
- unsigned scan_flags;
+ unsigned int scan_flags;
};
struct hid_class_descriptor {
@@ -634,8 +773,9 @@ struct hid_descriptor {
__le16 bcdHID;
__u8 bCountryCode;
__u8 bNumDescriptors;
+ struct hid_class_descriptor rpt_desc;
- struct hid_class_descriptor desc[1];
+ struct hid_class_descriptor opt_descs[];
} __attribute__ ((packed));
#define HID_DEVICE(b, g, ven, prod) \
@@ -670,6 +810,7 @@ struct hid_usage_id {
* to be called)
* @dyn_list: list of dynamically added device ids
* @dyn_lock: lock protecting @dyn_list
+ * @match: check if the given device is handled by this driver
* @probe: new device inserted
* @remove: device removed (NULL if not a hot-plug capable driver)
* @report_table: on which reports to call raw_event (NULL means all)
@@ -685,13 +826,15 @@ struct hid_usage_id {
* @suspend: invoked on suspend (NULL means nop)
* @resume: invoked on resume if device was not reset (NULL means nop)
* @reset_resume: invoked on resume if device was reset (NULL means nop)
+ * @on_hid_hw_open: invoked when hid core opens first instance (NULL means nop)
+ * @on_hid_hw_close: invoked when hid core closes last instance (NULL means nop)
*
* probe should return -errno on error, or 0 on success. During probe,
* input will not be passed to raw_event unless hid_device_io_start is
* called.
*
- * raw_event and event should return 0 on no action performed, 1 when no
- * further processing should be done and negative on error
+ * raw_event and event should return negative on error, any other value will
+ * pass the event on to .event() typically return 0 for success.
*
* input_mapping shall return a negative value to completely ignore this usage
* (e.g. doubled or invalid usage), zero to continue with parsing of this
@@ -704,12 +847,13 @@ struct hid_usage_id {
* zero from them.
*/
struct hid_driver {
- char *name;
+ const char *name;
const struct hid_device_id *id_table;
struct list_head dyn_list;
spinlock_t dyn_lock;
+ bool (*match)(struct hid_device *dev, bool ignore_special_driver);
int (*probe)(struct hid_device *dev, const struct hid_device_id *id);
void (*remove)(struct hid_device *dev);
@@ -721,7 +865,7 @@ struct hid_driver {
struct hid_usage *usage, __s32 value);
void (*report)(struct hid_device *hdev, struct hid_report *report);
- __u8 *(*report_fixup)(struct hid_device *hdev, __u8 *buf,
+ const __u8 *(*report_fixup)(struct hid_device *hdev, __u8 *buf,
unsigned int *size);
int (*input_mapping)(struct hid_device *hdev,
@@ -735,11 +879,13 @@ struct hid_driver {
void (*feature_mapping)(struct hid_device *hdev,
struct hid_field *field,
struct hid_usage *usage);
-#ifdef CONFIG_PM
+
int (*suspend)(struct hid_device *hdev, pm_message_t message);
int (*resume)(struct hid_device *hdev);
int (*reset_resume)(struct hid_device *hdev);
-#endif
+ void (*on_hid_hw_open)(struct hid_device *hdev);
+ void (*on_hid_hw_close)(struct hid_device *hdev);
+
/* private: */
struct device_driver driver;
};
@@ -748,11 +894,12 @@ struct hid_driver {
container_of(pdrv, struct hid_driver, driver)
/**
- * hid_ll_driver - low level driver callbacks
+ * struct hid_ll_driver - low level driver callbacks
* @start: called on probe to start the device
* @stop: called on remove
* @open: called by input layer on open
* @close: called by input layer on close
+ * @power: request underlying hardware to enter requested power mode
* @parse: this method is called only once to parse the device data,
* shouldn't allocate anything to not leak memory
* @request: send report request to device (e.g. feature report)
@@ -760,6 +907,8 @@ struct hid_driver {
* @raw_request: send raw report request to device (e.g. feature report)
* @output_report: send output report to device
* @idle: send idle request to device
+ * @may_wakeup: return if device may act as a wakeup source during system-suspend
+ * @max_buffer_size: over-ride maximum data buffer size (default: HID_MAX_BUFFER_SIZE)
*/
struct hid_ll_driver {
int (*start)(struct hid_device *hdev);
@@ -784,34 +933,32 @@ struct hid_ll_driver {
int (*output_report) (struct hid_device *hdev, __u8 *buf, size_t len);
int (*idle)(struct hid_device *hdev, int report, int idle, int reqtype);
-};
+ bool (*may_wakeup)(struct hid_device *hdev);
-extern struct hid_ll_driver i2c_hid_ll_driver;
-extern struct hid_ll_driver hidp_hid_driver;
-extern struct hid_ll_driver uhid_hid_driver;
-extern struct hid_ll_driver usb_hid_driver;
+ unsigned int max_buffer_size;
+};
-static inline bool hid_is_using_ll_driver(struct hid_device *hdev,
- struct hid_ll_driver *driver)
-{
- return hdev->ll_driver == driver;
-}
+extern bool hid_is_usb(const struct hid_device *hdev);
#define PM_HINT_FULLON 1<<5
#define PM_HINT_NORMAL 1<<1
/* Applications from HID Usage Tables 4/8/99 Version 1.1 */
/* We ignore a few input applications that are not widely used */
-#define IS_INPUT_APPLICATION(a) (((a >= 0x00010000) && (a <= 0x00010008)) || (a == 0x00010080) || (a == 0x000c0001) || ((a >= 0x000d0002) && (a <= 0x000d0006)))
+#define IS_INPUT_APPLICATION(a) \
+ (((a >= HID_UP_GENDESK) && (a <= HID_GD_MULTIAXIS)) \
+ || ((a >= HID_DG_DIGITIZER) && (a <= HID_DG_WHITEBOARD)) \
+ || (a == HID_GD_SYSTEM_CONTROL) || (a == HID_CP_CONSUMER_CONTROL) \
+ || (a == HID_GD_WIRELESS_RADIO_CTLS))
/* HID core API */
-extern int hid_debug;
-
extern bool hid_ignore(struct hid_device *);
extern int hid_add_device(struct hid_device *);
extern void hid_destroy_device(struct hid_device *);
+extern const struct bus_type hid_bus_type;
+
extern int __must_check __hid_register_driver(struct hid_driver *,
struct module *, const char *mod_name);
@@ -837,37 +984,59 @@ extern void hidinput_hid_event(struct hid_device *, struct hid_field *, struct h
extern void hidinput_report_event(struct hid_device *hid, struct hid_report *report);
extern int hidinput_connect(struct hid_device *hid, unsigned int force);
extern void hidinput_disconnect(struct hid_device *);
+void hidinput_reset_resume(struct hid_device *hid);
+struct hid_field *hid_find_field(struct hid_device *hdev, unsigned int report_type,
+ unsigned int application, unsigned int usage);
int hid_set_field(struct hid_field *, unsigned, __s32);
-int hid_input_report(struct hid_device *, int type, u8 *, int, int);
-int hidinput_find_field(struct hid_device *hid, unsigned int type, unsigned int code, struct hid_field **field);
+int hid_input_report(struct hid_device *hid, enum hid_report_type type, u8 *data, u32 size,
+ int interrupt);
struct hid_field *hidinput_get_led_field(struct hid_device *hid);
unsigned int hidinput_count_leds(struct hid_device *hid);
__s32 hidinput_calc_abs_res(const struct hid_field *field, __u16 code);
void hid_output_report(struct hid_report *report, __u8 *data);
-void __hid_request(struct hid_device *hid, struct hid_report *rep, int reqtype);
+int __hid_request(struct hid_device *hid, struct hid_report *rep, enum hid_class_request reqtype);
u8 *hid_alloc_report_buf(struct hid_report *report, gfp_t flags);
struct hid_device *hid_allocate_device(void);
-struct hid_report *hid_register_report(struct hid_device *device, unsigned type, unsigned id);
-int hid_parse_report(struct hid_device *hid, __u8 *start, unsigned size);
+struct hid_report *hid_register_report(struct hid_device *device,
+ enum hid_report_type type, unsigned int id,
+ unsigned int application);
+int hid_parse_report(struct hid_device *hid, const __u8 *start, unsigned size);
struct hid_report *hid_validate_values(struct hid_device *hid,
- unsigned int type, unsigned int id,
+ enum hid_report_type type, unsigned int id,
unsigned int field_index,
unsigned int report_counts);
+
+void hid_setup_resolution_multiplier(struct hid_device *hid);
int hid_open_report(struct hid_device *device);
int hid_check_keys_pressed(struct hid_device *hid);
int hid_connect(struct hid_device *hid, unsigned int connect_mask);
void hid_disconnect(struct hid_device *hid);
-const struct hid_device_id *hid_match_id(struct hid_device *hdev,
+bool hid_match_one_id(const struct hid_device *hdev,
+ const struct hid_device_id *id);
+const struct hid_device_id *hid_match_id(const struct hid_device *hdev,
const struct hid_device_id *id);
-s32 hid_snto32(__u32 value, unsigned n);
+const struct hid_device_id *hid_match_device(struct hid_device *hdev,
+ struct hid_driver *hdrv);
+bool hid_compare_device_paths(struct hid_device *hdev_a,
+ struct hid_device *hdev_b, char separator);
__u32 hid_field_extract(const struct hid_device *hid, __u8 *report,
unsigned offset, unsigned n);
+#ifdef CONFIG_PM
+int hid_driver_suspend(struct hid_device *hdev, pm_message_t state);
+int hid_driver_reset_resume(struct hid_device *hdev);
+int hid_driver_resume(struct hid_device *hdev);
+#else
+static inline int hid_driver_suspend(struct hid_device *hdev, pm_message_t state) { return 0; }
+static inline int hid_driver_reset_resume(struct hid_device *hdev) { return 0; }
+static inline int hid_driver_resume(struct hid_device *hdev) { return 0; }
+#endif
+
/**
* hid_device_io_start - enable HID input during probe, remove
*
- * @hid - the device
+ * @hid: the device
*
* This should only be called during probe or remove and only be
* called by the thread calling probe or remove. It will allow
@@ -885,7 +1054,7 @@ static inline void hid_device_io_start(struct hid_device *hid) {
/**
* hid_device_io_stop - disable HID input during probe, remove
*
- * @hid - the device
+ * @hid: the device
*
* Should only be called after hid_device_io_start. It will prevent
* incoming packets from going to the driver for the duration of
@@ -911,39 +1080,65 @@ static inline void hid_device_io_stop(struct hid_device *hid) {
* @max: maximal valid usage->code to consider later (out parameter)
* @type: input event type (EV_KEY, EV_REL, ...)
* @c: code which corresponds to this usage and type
+ *
+ * The value pointed to by @bit will be set to NULL if either @type is
+ * an unhandled event type, or if @c is out of range for @type. This
+ * can be used as an error condition.
*/
static inline void hid_map_usage(struct hid_input *hidinput,
struct hid_usage *usage, unsigned long **bit, int *max,
- __u8 type, __u16 c)
+ __u8 type, unsigned int c)
{
struct input_dev *input = hidinput->input;
-
- usage->type = type;
- usage->code = c;
+ unsigned long *bmap = NULL;
+ unsigned int limit = 0;
switch (type) {
case EV_ABS:
- *bit = input->absbit;
- *max = ABS_MAX;
+ bmap = input->absbit;
+ limit = ABS_MAX;
break;
case EV_REL:
- *bit = input->relbit;
- *max = REL_MAX;
+ bmap = input->relbit;
+ limit = REL_MAX;
break;
case EV_KEY:
- *bit = input->keybit;
- *max = KEY_MAX;
+ bmap = input->keybit;
+ limit = KEY_MAX;
break;
case EV_LED:
- *bit = input->ledbit;
- *max = LED_MAX;
+ bmap = input->ledbit;
+ limit = LED_MAX;
+ break;
+ case EV_MSC:
+ bmap = input->mscbit;
+ limit = MSC_MAX;
break;
}
+
+ if (unlikely(c > limit || !bmap)) {
+ pr_warn_ratelimited("%s: Invalid code %d type %d\n",
+ input->name, c, type);
+ *bit = NULL;
+ return;
+ }
+
+ usage->type = type;
+ usage->code = c;
+ *max = limit;
+ *bit = bmap;
}
/**
* hid_map_usage_clear - map usage input bits and clear the input bit
*
+ * @hidinput: hidinput which we are interested in
+ * @usage: usage to fill in
+ * @bit: pointer to input->{}bit (out parameter)
+ * @max: maximal valid usage->code to consider later (out parameter)
+ * @type: input event type (EV_KEY, EV_REL, ...)
+ * @c: code which corresponds to this usage and type
+ *
* The same as hid_map_usage, except the @c bit is also cleared in supported
* bits (@bit).
*/
@@ -952,7 +1147,8 @@ static inline void hid_map_usage_clear(struct hid_input *hidinput,
__u8 type, __u16 c)
{
hid_map_usage(hidinput, usage, bit, max, type, c);
- clear_bit(c, *bit);
+ if (*bit)
+ clear_bit(usage->code, *bit);
}
/**
@@ -974,6 +1170,20 @@ int __must_check hid_hw_start(struct hid_device *hdev,
void hid_hw_stop(struct hid_device *hdev);
int __must_check hid_hw_open(struct hid_device *hdev);
void hid_hw_close(struct hid_device *hdev);
+void hid_hw_request(struct hid_device *hdev,
+ struct hid_report *report, enum hid_class_request reqtype);
+int __hid_hw_raw_request(struct hid_device *hdev,
+ unsigned char reportnum, __u8 *buf,
+ size_t len, enum hid_report_type rtype,
+ enum hid_class_request reqtype,
+ __u64 source, bool from_bpf);
+int __hid_hw_output_report(struct hid_device *hdev, __u8 *buf, size_t len, __u64 source,
+ bool from_bpf);
+int hid_hw_raw_request(struct hid_device *hdev,
+ unsigned char reportnum, __u8 *buf,
+ size_t len, enum hid_report_type rtype,
+ enum hid_class_request reqtype);
+int hid_hw_output_report(struct hid_device *hdev, __u8 *buf, size_t len);
/**
* hid_hw_power - requests underlying HW to go into given power mode
@@ -992,82 +1202,36 @@ static inline int hid_hw_power(struct hid_device *hdev, int level)
/**
- * hid_hw_request - send report request to device
+ * hid_hw_idle - send idle request to device
*
* @hdev: hid device
- * @report: report to send
+ * @report: report to control
+ * @idle: idle state
* @reqtype: hid request type
*/
-static inline void hid_hw_request(struct hid_device *hdev,
- struct hid_report *report, int reqtype)
-{
- if (hdev->ll_driver->request)
- return hdev->ll_driver->request(hdev, report, reqtype);
-
- __hid_request(hdev, report, reqtype);
-}
-
-/**
- * hid_hw_raw_request - send report request to device
- *
- * @hdev: hid device
- * @reportnum: report ID
- * @buf: in/out data to transfer
- * @len: length of buf
- * @rtype: HID report type
- * @reqtype: HID_REQ_GET_REPORT or HID_REQ_SET_REPORT
- *
- * @return: count of data transfered, negative if error
- *
- * Same behavior as hid_hw_request, but with raw buffers instead.
- */
-static inline int hid_hw_raw_request(struct hid_device *hdev,
- unsigned char reportnum, __u8 *buf,
- size_t len, unsigned char rtype, int reqtype)
+static inline int hid_hw_idle(struct hid_device *hdev, int report, int idle,
+ enum hid_class_request reqtype)
{
- if (len < 1 || len > HID_MAX_BUFFER_SIZE || !buf)
- return -EINVAL;
+ if (hdev->ll_driver->idle)
+ return hdev->ll_driver->idle(hdev, report, idle, reqtype);
- return hdev->ll_driver->raw_request(hdev, reportnum, buf, len,
- rtype, reqtype);
+ return 0;
}
/**
- * hid_hw_output_report - send output report to device
+ * hid_hw_may_wakeup - return if the hid device may act as a wakeup source during system-suspend
*
* @hdev: hid device
- * @buf: raw data to transfer
- * @len: length of buf
- *
- * @return: count of data transfered, negative if error
*/
-static inline int hid_hw_output_report(struct hid_device *hdev, __u8 *buf,
- size_t len)
+static inline bool hid_hw_may_wakeup(struct hid_device *hdev)
{
- if (len < 1 || len > HID_MAX_BUFFER_SIZE || !buf)
- return -EINVAL;
-
- if (hdev->ll_driver->output_report)
- return hdev->ll_driver->output_report(hdev, buf, len);
-
- return -ENOSYS;
-}
+ if (hdev->ll_driver->may_wakeup)
+ return hdev->ll_driver->may_wakeup(hdev);
-/**
- * hid_hw_idle - send idle request to device
- *
- * @hdev: hid device
- * @report: report to control
- * @idle: idle state
- * @reqtype: hid request type
- */
-static inline int hid_hw_idle(struct hid_device *hdev, int report, int idle,
- int reqtype)
-{
- if (hdev->ll_driver->idle)
- return hdev->ll_driver->idle(hdev, report, idle, reqtype);
+ if (hdev->dev.parent)
+ return device_may_wakeup(hdev->dev.parent);
- return 0;
+ return false;
}
/**
@@ -1084,51 +1248,60 @@ static inline void hid_hw_wait(struct hid_device *hdev)
/**
* hid_report_len - calculate the report length
*
- * @report: the report we want to know the length
+ * @report: the report whose length we want to know
+ *
+ * The length counts the report ID byte, but only if the ID is nonzero
+ * and therefore is included in the report. Reports whose ID is zero
+ * never include an ID byte.
*/
-static inline int hid_report_len(struct hid_report *report)
+static inline u32 hid_report_len(struct hid_report *report)
{
- /* equivalent to DIV_ROUND_UP(report->size, 8) + !!(report->id > 0) */
- return ((report->size - 1) >> 3) + 1 + (report->id > 0);
+ return DIV_ROUND_UP(report->size, 8) + (report->id > 0);
}
-int hid_report_raw_event(struct hid_device *hid, int type, u8 *data, int size,
- int interrupt);
+int hid_report_raw_event(struct hid_device *hid, enum hid_report_type type, u8 *data, u32 size,
+ int interrupt);
/* HID quirks API */
-u32 usbhid_lookup_quirk(const u16 idVendor, const u16 idProduct);
-int usbhid_quirks_init(char **quirks_param);
-void usbhid_quirks_exit(void);
-
-#ifdef CONFIG_HID_PID
-int hid_pidff_init(struct hid_device *hid);
-#else
-#define hid_pidff_init NULL
-#endif
-
-#define dbg_hid(format, arg...) \
-do { \
- if (hid_debug) \
- printk(KERN_DEBUG "%s: " format, __FILE__, ##arg); \
-} while (0)
-
-#define hid_printk(level, hid, fmt, arg...) \
- dev_printk(level, &(hid)->dev, fmt, ##arg)
-#define hid_emerg(hid, fmt, arg...) \
- dev_emerg(&(hid)->dev, fmt, ##arg)
-#define hid_crit(hid, fmt, arg...) \
- dev_crit(&(hid)->dev, fmt, ##arg)
-#define hid_alert(hid, fmt, arg...) \
- dev_alert(&(hid)->dev, fmt, ##arg)
-#define hid_err(hid, fmt, arg...) \
- dev_err(&(hid)->dev, fmt, ##arg)
-#define hid_notice(hid, fmt, arg...) \
- dev_notice(&(hid)->dev, fmt, ##arg)
-#define hid_warn(hid, fmt, arg...) \
- dev_warn(&(hid)->dev, fmt, ##arg)
-#define hid_info(hid, fmt, arg...) \
- dev_info(&(hid)->dev, fmt, ##arg)
-#define hid_dbg(hid, fmt, arg...) \
- dev_dbg(&(hid)->dev, fmt, ##arg)
+unsigned long hid_lookup_quirk(const struct hid_device *hdev);
+int hid_quirks_init(char **quirks_param, __u16 bus, int count);
+void hid_quirks_exit(__u16 bus);
+
+#define dbg_hid(fmt, ...) pr_debug("%s: " fmt, __FILE__, ##__VA_ARGS__)
+
+#define hid_err(hid, fmt, ...) \
+ dev_err(&(hid)->dev, fmt, ##__VA_ARGS__)
+#define hid_notice(hid, fmt, ...) \
+ dev_notice(&(hid)->dev, fmt, ##__VA_ARGS__)
+#define hid_warn(hid, fmt, ...) \
+ dev_warn(&(hid)->dev, fmt, ##__VA_ARGS__)
+#define hid_warn_ratelimited(hid, fmt, ...) \
+ dev_warn_ratelimited(&(hid)->dev, fmt, ##__VA_ARGS__)
+#define hid_info(hid, fmt, ...) \
+ dev_info(&(hid)->dev, fmt, ##__VA_ARGS__)
+#define hid_dbg(hid, fmt, ...) \
+ dev_dbg(&(hid)->dev, fmt, ##__VA_ARGS__)
+
+#define hid_err_once(hid, fmt, ...) \
+ dev_err_once(&(hid)->dev, fmt, ##__VA_ARGS__)
+#define hid_notice_once(hid, fmt, ...) \
+ dev_notice_once(&(hid)->dev, fmt, ##__VA_ARGS__)
+#define hid_warn_once(hid, fmt, ...) \
+ dev_warn_once(&(hid)->dev, fmt, ##__VA_ARGS__)
+#define hid_info_once(hid, fmt, ...) \
+ dev_info_once(&(hid)->dev, fmt, ##__VA_ARGS__)
+#define hid_dbg_once(hid, fmt, ...) \
+ dev_dbg_once(&(hid)->dev, fmt, ##__VA_ARGS__)
+
+#define hid_err_ratelimited(hid, fmt, ...) \
+ dev_err_ratelimited(&(hid)->dev, fmt, ##__VA_ARGS__)
+#define hid_notice_ratelimited(hid, fmt, ...) \
+ dev_notice_ratelimited(&(hid)->dev, fmt, ##__VA_ARGS__)
+#define hid_warn_ratelimited(hid, fmt, ...) \
+ dev_warn_ratelimited(&(hid)->dev, fmt, ##__VA_ARGS__)
+#define hid_info_ratelimited(hid, fmt, ...) \
+ dev_info_ratelimited(&(hid)->dev, fmt, ##__VA_ARGS__)
+#define hid_dbg_ratelimited(hid, fmt, ...) \
+ dev_dbg_ratelimited(&(hid)->dev, fmt, ##__VA_ARGS__)
#endif
diff --git a/include/linux/hid_bpf.h b/include/linux/hid_bpf.h
new file mode 100644
index 000000000000..a2e47dbcf82c
--- /dev/null
+++ b/include/linux/hid_bpf.h
@@ -0,0 +1,236 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+
+#ifndef __HID_BPF_H
+#define __HID_BPF_H
+
+#include <linux/bpf.h>
+#include <linux/mutex.h>
+#include <linux/srcu.h>
+#include <uapi/linux/hid.h>
+
+struct hid_device;
+
+/*
+ * The following is the user facing HID BPF API.
+ *
+ * Extra care should be taken when editing this part, as
+ * it might break existing out of the tree bpf programs.
+ */
+
+/**
+ * struct hid_bpf_ctx - User accessible data for all HID programs
+ *
+ * ``data`` is not directly accessible from the context. We need to issue
+ * a call to hid_bpf_get_data() in order to get a pointer to that field.
+ *
+ * @hid: the &struct hid_device representing the device itself
+ * @allocated_size: Allocated size of data.
+ *
+ * This is how much memory is available and can be requested
+ * by the HID program.
+ * Note that for ``HID_BPF_RDESC_FIXUP``, that memory is set to
+ * ``4096`` (4 KB)
+ * @size: Valid data in the data field.
+ *
+ * Programs can get the available valid size in data by fetching this field.
+ * Programs can also change this value by returning a positive number in the
+ * program.
+ * To discard the event, return a negative error code.
+ *
+ * ``size`` must always be less or equal than ``allocated_size`` (it is enforced
+ * once all BPF programs have been run).
+ * @retval: Return value of the previous program.
+ *
+ * ``hid`` and ``allocated_size`` are read-only, ``size`` and ``retval`` are read-write.
+ */
+struct hid_bpf_ctx {
+ struct hid_device *hid;
+ __u32 allocated_size;
+ union {
+ __s32 retval;
+ __s32 size;
+ };
+};
+
+/*
+ * Below is HID internal
+ */
+
+#define HID_BPF_MAX_PROGS_PER_DEV 64
+#define HID_BPF_FLAG_MASK (((HID_BPF_FLAG_MAX - 1) << 1) - 1)
+
+
+struct hid_report_enum;
+
+struct hid_ops {
+ struct hid_report *(*hid_get_report)(struct hid_report_enum *report_enum, const u8 *data);
+ int (*hid_hw_raw_request)(struct hid_device *hdev,
+ unsigned char reportnum, __u8 *buf,
+ size_t len, enum hid_report_type rtype,
+ enum hid_class_request reqtype,
+ u64 source, bool from_bpf);
+ int (*hid_hw_output_report)(struct hid_device *hdev, __u8 *buf, size_t len,
+ u64 source, bool from_bpf);
+ int (*hid_input_report)(struct hid_device *hid, enum hid_report_type type,
+ u8 *data, u32 size, int interrupt, u64 source, bool from_bpf,
+ bool lock_already_taken);
+ struct module *owner;
+ const struct bus_type *bus_type;
+};
+
+extern const struct hid_ops *hid_ops;
+
+/**
+ * struct hid_bpf_ops - A BPF struct_ops of callbacks allowing to attach HID-BPF
+ * programs to a HID device
+ * @hid_id: the HID uniq ID to attach to. This is writeable before ``load()``, and
+ * cannot be changed after
+ * @flags: flags used while attaching the struct_ops to the device. Currently only
+ * available value is %0 or ``BPF_F_BEFORE``.
+ * Writeable only before ``load()``
+ */
+struct hid_bpf_ops {
+ /* hid_id needs to stay first so we can easily change it
+ * from userspace.
+ */
+ int hid_id;
+ u32 flags;
+
+ /* private: do not show up in the docs */
+ struct list_head list;
+
+ /* public: rest should show up in the docs */
+
+ /**
+ * @hid_device_event: called whenever an event is coming in from the device
+ *
+ * It has the following arguments:
+ *
+ * ``ctx``: The HID-BPF context as &struct hid_bpf_ctx
+ *
+ * Return: %0 on success and keep processing; a positive
+ * value to change the incoming size buffer; a negative
+ * error code to interrupt the processing of this event
+ *
+ * Context: Interrupt context.
+ */
+ int (*hid_device_event)(struct hid_bpf_ctx *ctx, enum hid_report_type report_type,
+ u64 source);
+
+ /**
+ * @hid_rdesc_fixup: called when the probe function parses the report descriptor
+ * of the HID device
+ *
+ * It has the following arguments:
+ *
+ * ``ctx``: The HID-BPF context as &struct hid_bpf_ctx
+ *
+ * Return: %0 on success and keep processing; a positive
+ * value to change the incoming size buffer; a negative
+ * error code to interrupt the processing of this device
+ */
+ int (*hid_rdesc_fixup)(struct hid_bpf_ctx *ctx);
+
+ /**
+ * @hid_hw_request: called whenever a hid_hw_raw_request() call is emitted
+ * on the HID device
+ *
+ * It has the following arguments:
+ *
+ * ``ctx``: The HID-BPF context as &struct hid_bpf_ctx
+ *
+ * ``reportnum``: the report number, as in hid_hw_raw_request()
+ *
+ * ``rtype``: the report type (``HID_INPUT_REPORT``, ``HID_FEATURE_REPORT``,
+ * ``HID_OUTPUT_REPORT``)
+ *
+ * ``reqtype``: the request
+ *
+ * ``source``: a u64 referring to a uniq but identifiable source. If %0, the
+ * kernel itself emitted that call. For hidraw, ``source`` is set
+ * to the associated ``struct file *``.
+ *
+ * Return: %0 to keep processing the request by hid-core; any other value
+ * stops hid-core from processing that event. A positive value should be
+ * returned with the number of bytes returned in the incoming buffer; a
+ * negative error code interrupts the processing of this call.
+ */
+ int (*hid_hw_request)(struct hid_bpf_ctx *ctx, unsigned char reportnum,
+ enum hid_report_type rtype, enum hid_class_request reqtype,
+ u64 source);
+
+ /**
+ * @hid_hw_output_report: called whenever a hid_hw_output_report() call is emitted
+ * on the HID device
+ *
+ * It has the following arguments:
+ *
+ * ``ctx``: The HID-BPF context as &struct hid_bpf_ctx
+ *
+ * ``source``: a u64 referring to a uniq but identifiable source. If %0, the
+ * kernel itself emitted that call. For hidraw, ``source`` is set
+ * to the associated ``struct file *``.
+ *
+ * Return: %0 to keep processing the request by hid-core; any other value
+ * stops hid-core from processing that event. A positive value should be
+ * returned with the number of bytes written to the device; a negative error
+ * code interrupts the processing of this call.
+ */
+ int (*hid_hw_output_report)(struct hid_bpf_ctx *ctx, u64 source);
+
+
+ /* private: do not show up in the docs */
+ struct hid_device *hdev;
+};
+
+/* stored in each device */
+struct hid_bpf {
+ u8 *device_data; /* allocated when a bpf program of type
+ * SEC(f.../hid_bpf_device_event) has been attached
+ * to this HID device
+ */
+ u32 allocated_data;
+ bool destroyed; /* prevents the assignment of any progs */
+
+ struct hid_bpf_ops *rdesc_ops;
+ struct list_head prog_list;
+ struct mutex prog_list_lock; /* protects prog_list update */
+ struct srcu_struct srcu; /* protects prog_list read-only access */
+};
+
+#ifdef CONFIG_HID_BPF
+u8 *dispatch_hid_bpf_device_event(struct hid_device *hid, enum hid_report_type type, u8 *data,
+ u32 *size, int interrupt, u64 source, bool from_bpf);
+int dispatch_hid_bpf_raw_requests(struct hid_device *hdev,
+ unsigned char reportnum, __u8 *buf,
+ u32 size, enum hid_report_type rtype,
+ enum hid_class_request reqtype,
+ u64 source, bool from_bpf);
+int dispatch_hid_bpf_output_report(struct hid_device *hdev, __u8 *buf, u32 size,
+ u64 source, bool from_bpf);
+int hid_bpf_connect_device(struct hid_device *hdev);
+void hid_bpf_disconnect_device(struct hid_device *hdev);
+void hid_bpf_destroy_device(struct hid_device *hid);
+int hid_bpf_device_init(struct hid_device *hid);
+const u8 *call_hid_bpf_rdesc_fixup(struct hid_device *hdev, const u8 *rdesc, unsigned int *size);
+#else /* CONFIG_HID_BPF */
+static inline u8 *dispatch_hid_bpf_device_event(struct hid_device *hid, enum hid_report_type type,
+ u8 *data, u32 *size, int interrupt,
+ u64 source, bool from_bpf) { return data; }
+static inline int dispatch_hid_bpf_raw_requests(struct hid_device *hdev,
+ unsigned char reportnum, u8 *buf,
+ u32 size, enum hid_report_type rtype,
+ enum hid_class_request reqtype,
+ u64 source, bool from_bpf) { return 0; }
+static inline int dispatch_hid_bpf_output_report(struct hid_device *hdev, __u8 *buf, u32 size,
+ u64 source, bool from_bpf) { return 0; }
+static inline int hid_bpf_connect_device(struct hid_device *hdev) { return 0; }
+static inline void hid_bpf_disconnect_device(struct hid_device *hdev) {}
+static inline void hid_bpf_destroy_device(struct hid_device *hid) {}
+static inline int hid_bpf_device_init(struct hid_device *hid) { return 0; }
+static inline const u8 *call_hid_bpf_rdesc_fixup(struct hid_device *hdev, const u8 *rdesc,
+ unsigned int *size) { return rdesc; }
+
+#endif /* CONFIG_HID_BPF */
+
+#endif /* __HID_BPF_H */
diff --git a/include/linux/hidden.h b/include/linux/hidden.h
new file mode 100644
index 000000000000..49a17b6b5962
--- /dev/null
+++ b/include/linux/hidden.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * When building position independent code with GCC using the -fPIC option,
+ * (or even the -fPIE one on older versions), it will assume that we are
+ * building a dynamic object (either a shared library or an executable) that
+ * may have symbol references that can only be resolved at load time. For a
+ * variety of reasons (ELF symbol preemption, the CoW footprint of the section
+ * that is modified by the loader), this results in all references to symbols
+ * with external linkage to go via entries in the Global Offset Table (GOT),
+ * which carries absolute addresses which need to be fixed up when the
+ * executable image is loaded at an offset which is different from its link
+ * time offset.
+ *
+ * Fortunately, there is a way to inform the compiler that such symbol
+ * references will be satisfied at link time rather than at load time, by
+ * giving them 'hidden' visibility.
+ */
+
+#pragma GCC visibility push(hidden)
diff --git a/include/linux/hiddev.h b/include/linux/hiddev.h
index 921622222957..2164c03d2c72 100644
--- a/include/linux/hiddev.h
+++ b/include/linux/hiddev.h
@@ -1,22 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* Copyright (c) 1999-2000 Vojtech Pavlik
*
* Sponsored by SuSE
*/
/*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
* Should you need to contact me, the author, you can do so either by
* e-mail - mail your message to <vojtech@suse.cz>, or by paper mail:
diff --git a/include/linux/hidraw.h b/include/linux/hidraw.h
index ddf52612eed8..18fd30a288de 100644
--- a/include/linux/hidraw.h
+++ b/include/linux/hidraw.h
@@ -1,15 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2007 Jiri Kosina
*/
-/*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
- */
#ifndef _HIDRAW_H
#define _HIDRAW_H
@@ -40,6 +32,7 @@ struct hidraw_list {
struct hidraw *hidraw;
struct list_head node;
struct mutex read_mutex;
+ bool revoked;
};
#ifdef CONFIG_HIDRAW
diff --git a/include/linux/highmem-internal.h b/include/linux/highmem-internal.h
new file mode 100644
index 000000000000..0574c21ca45d
--- /dev/null
+++ b/include/linux/highmem-internal.h
@@ -0,0 +1,298 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_HIGHMEM_INTERNAL_H
+#define _LINUX_HIGHMEM_INTERNAL_H
+
+/*
+ * Outside of CONFIG_HIGHMEM to support X86 32bit iomap_atomic() cruft.
+ */
+#ifdef CONFIG_KMAP_LOCAL
+void *__kmap_local_pfn_prot(unsigned long pfn, pgprot_t prot);
+void *__kmap_local_page_prot(const struct page *page, pgprot_t prot);
+void kunmap_local_indexed(const void *vaddr);
+void kmap_local_fork(struct task_struct *tsk);
+void __kmap_local_sched_out(void);
+void __kmap_local_sched_in(void);
+static inline void kmap_assert_nomap(void)
+{
+ DEBUG_LOCKS_WARN_ON(current->kmap_ctrl.idx);
+}
+#else
+static inline void kmap_local_fork(struct task_struct *tsk) { }
+static inline void kmap_assert_nomap(void) { }
+#endif
+
+#ifdef CONFIG_HIGHMEM
+#include <asm/highmem.h>
+
+#ifndef ARCH_HAS_KMAP_FLUSH_TLB
+static inline void kmap_flush_tlb(unsigned long addr) { }
+#endif
+
+#ifndef kmap_prot
+#define kmap_prot PAGE_KERNEL
+#endif
+
+void *kmap_high(struct page *page);
+void kunmap_high(const struct page *page);
+void __kmap_flush_unused(void);
+struct page *__kmap_to_page(void *addr);
+
+static inline void *kmap(struct page *page)
+{
+ void *addr;
+
+ might_sleep();
+ if (!PageHighMem(page))
+ addr = page_address(page);
+ else
+ addr = kmap_high(page);
+ kmap_flush_tlb((unsigned long)addr);
+ return addr;
+}
+
+static inline void kunmap(const struct page *page)
+{
+ might_sleep();
+ if (!PageHighMem(page))
+ return;
+ kunmap_high(page);
+}
+
+static inline struct page *kmap_to_page(void *addr)
+{
+ return __kmap_to_page(addr);
+}
+
+static inline void kmap_flush_unused(void)
+{
+ __kmap_flush_unused();
+}
+
+static inline void *kmap_local_page(const struct page *page)
+{
+ return __kmap_local_page_prot(page, kmap_prot);
+}
+
+static inline void *kmap_local_page_try_from_panic(const struct page *page)
+{
+ if (!PageHighMem(page))
+ return page_address(page);
+ /* If the page is in HighMem, it's not safe to kmap it.*/
+ return NULL;
+}
+
+static inline void *kmap_local_folio(const struct folio *folio, size_t offset)
+{
+ const struct page *page = folio_page(folio, offset / PAGE_SIZE);
+ return __kmap_local_page_prot(page, kmap_prot) + offset % PAGE_SIZE;
+}
+
+static inline void *kmap_local_page_prot(const struct page *page, pgprot_t prot)
+{
+ return __kmap_local_page_prot(page, prot);
+}
+
+static inline void *kmap_local_pfn(unsigned long pfn)
+{
+ return __kmap_local_pfn_prot(pfn, kmap_prot);
+}
+
+static inline void __kunmap_local(const void *vaddr)
+{
+ kunmap_local_indexed(vaddr);
+}
+
+static inline void *kmap_atomic_prot(const struct page *page, pgprot_t prot)
+{
+ if (IS_ENABLED(CONFIG_PREEMPT_RT))
+ migrate_disable();
+ else
+ preempt_disable();
+
+ pagefault_disable();
+ return __kmap_local_page_prot(page, prot);
+}
+
+static inline void *kmap_atomic(const struct page *page)
+{
+ return kmap_atomic_prot(page, kmap_prot);
+}
+
+static inline void *kmap_atomic_pfn(unsigned long pfn)
+{
+ if (IS_ENABLED(CONFIG_PREEMPT_RT))
+ migrate_disable();
+ else
+ preempt_disable();
+
+ pagefault_disable();
+ return __kmap_local_pfn_prot(pfn, kmap_prot);
+}
+
+static inline void __kunmap_atomic(const void *addr)
+{
+ kunmap_local_indexed(addr);
+ pagefault_enable();
+ if (IS_ENABLED(CONFIG_PREEMPT_RT))
+ migrate_enable();
+ else
+ preempt_enable();
+}
+
+unsigned long __nr_free_highpages(void);
+unsigned long __totalhigh_pages(void);
+
+static inline unsigned long nr_free_highpages(void)
+{
+ return __nr_free_highpages();
+}
+
+static inline unsigned long totalhigh_pages(void)
+{
+ return __totalhigh_pages();
+}
+
+static inline bool is_kmap_addr(const void *x)
+{
+ unsigned long addr = (unsigned long)x;
+
+ return (addr >= PKMAP_ADDR(0) && addr < PKMAP_ADDR(LAST_PKMAP)) ||
+ (addr >= __fix_to_virt(FIX_KMAP_END) &&
+ addr < __fix_to_virt(FIX_KMAP_BEGIN));
+}
+#else /* CONFIG_HIGHMEM */
+
+static inline struct page *kmap_to_page(void *addr)
+{
+ return virt_to_page(addr);
+}
+
+static inline void *kmap(struct page *page)
+{
+ might_sleep();
+ return page_address(page);
+}
+
+static inline void kunmap_high(const struct page *page) { }
+static inline void kmap_flush_unused(void) { }
+
+static inline void kunmap(const struct page *page)
+{
+#ifdef ARCH_HAS_FLUSH_ON_KUNMAP
+ kunmap_flush_on_unmap(page_address(page));
+#endif
+}
+
+static inline void *kmap_local_page(const struct page *page)
+{
+ return page_address(page);
+}
+
+static inline void *kmap_local_page_try_from_panic(const struct page *page)
+{
+ return page_address(page);
+}
+
+static inline void *kmap_local_folio(const struct folio *folio, size_t offset)
+{
+ return folio_address(folio) + offset;
+}
+
+static inline void *kmap_local_page_prot(const struct page *page, pgprot_t prot)
+{
+ return kmap_local_page(page);
+}
+
+static inline void *kmap_local_pfn(unsigned long pfn)
+{
+ return kmap_local_page(pfn_to_page(pfn));
+}
+
+static inline void __kunmap_local(const void *addr)
+{
+#ifdef ARCH_HAS_FLUSH_ON_KUNMAP
+ kunmap_flush_on_unmap(PTR_ALIGN_DOWN(addr, PAGE_SIZE));
+#endif
+}
+
+static inline void *kmap_atomic(const struct page *page)
+{
+ if (IS_ENABLED(CONFIG_PREEMPT_RT))
+ migrate_disable();
+ else
+ preempt_disable();
+ pagefault_disable();
+ return page_address(page);
+}
+
+static inline void *kmap_atomic_prot(const struct page *page, pgprot_t prot)
+{
+ return kmap_atomic(page);
+}
+
+static inline void *kmap_atomic_pfn(unsigned long pfn)
+{
+ return kmap_atomic(pfn_to_page(pfn));
+}
+
+static inline void __kunmap_atomic(const void *addr)
+{
+#ifdef ARCH_HAS_FLUSH_ON_KUNMAP
+ kunmap_flush_on_unmap(PTR_ALIGN_DOWN(addr, PAGE_SIZE));
+#endif
+ pagefault_enable();
+ if (IS_ENABLED(CONFIG_PREEMPT_RT))
+ migrate_enable();
+ else
+ preempt_enable();
+}
+
+static inline unsigned long nr_free_highpages(void) { return 0; }
+static inline unsigned long totalhigh_pages(void) { return 0; }
+
+static inline bool is_kmap_addr(const void *x)
+{
+ return false;
+}
+
+#endif /* CONFIG_HIGHMEM */
+
+/**
+ * kunmap_atomic - Unmap the virtual address mapped by kmap_atomic() - deprecated!
+ * @__addr: Virtual address to be unmapped
+ *
+ * Unmaps an address previously mapped by kmap_atomic() and re-enables
+ * pagefaults. Depending on PREEMP_RT configuration, re-enables also
+ * migration and preemption. Users should not count on these side effects.
+ *
+ * Mappings should be unmapped in the reverse order that they were mapped.
+ * See kmap_local_page() for details on nesting.
+ *
+ * @__addr can be any address within the mapped page, so there is no need
+ * to subtract any offset that has been added. In contrast to kunmap(),
+ * this function takes the address returned from kmap_atomic(), not the
+ * page passed to it. The compiler will warn you if you pass the page.
+ */
+#define kunmap_atomic(__addr) \
+do { \
+ BUILD_BUG_ON(__same_type((__addr), struct page *)); \
+ __kunmap_atomic(__addr); \
+} while (0)
+
+/**
+ * kunmap_local - Unmap a page mapped via kmap_local_page().
+ * @__addr: An address within the page mapped
+ *
+ * @__addr can be any address within the mapped page. Commonly it is the
+ * address return from kmap_local_page(), but it can also include offsets.
+ *
+ * Unmapping should be done in the reverse order of the mapping. See
+ * kmap_local_page() for details.
+ */
+#define kunmap_local(__addr) \
+do { \
+ BUILD_BUG_ON(__same_type((__addr), struct page *)); \
+ __kunmap_local(__addr); \
+} while (0)
+
+#endif
diff --git a/include/linux/highmem.h b/include/linux/highmem.h
index bb3f3297062a..abc20f9810fd 100644
--- a/include/linux/highmem.h
+++ b/include/linux/highmem.h
@@ -1,14 +1,186 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_HIGHMEM_H
#define _LINUX_HIGHMEM_H
#include <linux/fs.h>
#include <linux/kernel.h>
#include <linux/bug.h>
+#include <linux/cacheflush.h>
+#include <linux/kmsan.h>
#include <linux/mm.h>
#include <linux/uaccess.h>
#include <linux/hardirq.h>
-#include <asm/cacheflush.h>
+#include "highmem-internal.h"
+
+/**
+ * kmap - Map a page for long term usage
+ * @page: Pointer to the page to be mapped
+ *
+ * Returns: The virtual address of the mapping
+ *
+ * Can only be invoked from preemptible task context because on 32bit
+ * systems with CONFIG_HIGHMEM enabled this function might sleep.
+ *
+ * For systems with CONFIG_HIGHMEM=n and for pages in the low memory area
+ * this returns the virtual address of the direct kernel mapping.
+ *
+ * The returned virtual address is globally visible and valid up to the
+ * point where it is unmapped via kunmap(). The pointer can be handed to
+ * other contexts.
+ *
+ * For highmem pages on 32bit systems this can be slow as the mapping space
+ * is limited and protected by a global lock. In case that there is no
+ * mapping slot available the function blocks until a slot is released via
+ * kunmap().
+ */
+static inline void *kmap(struct page *page);
+
+/**
+ * kunmap - Unmap the virtual address mapped by kmap()
+ * @page: Pointer to the page which was mapped by kmap()
+ *
+ * Counterpart to kmap(). A NOOP for CONFIG_HIGHMEM=n and for mappings of
+ * pages in the low memory area.
+ */
+static inline void kunmap(const struct page *page);
+
+/**
+ * kmap_to_page - Get the page for a kmap'ed address
+ * @addr: The address to look up
+ *
+ * Returns: The page which is mapped to @addr.
+ */
+static inline struct page *kmap_to_page(void *addr);
+
+/**
+ * kmap_flush_unused - Flush all unused kmap mappings in order to
+ * remove stray mappings
+ */
+static inline void kmap_flush_unused(void);
+
+/**
+ * kmap_local_page - Map a page for temporary usage
+ * @page: Pointer to the page to be mapped
+ *
+ * Returns: The virtual address of the mapping
+ *
+ * Can be invoked from any context, including interrupts.
+ *
+ * Requires careful handling when nesting multiple mappings because the map
+ * management is stack based. The unmap has to be in the reverse order of
+ * the map operation:
+ *
+ * addr1 = kmap_local_page(page1);
+ * addr2 = kmap_local_page(page2);
+ * ...
+ * kunmap_local(addr2);
+ * kunmap_local(addr1);
+ *
+ * Unmapping addr1 before addr2 is invalid and causes malfunction.
+ *
+ * Contrary to kmap() mappings the mapping is only valid in the context of
+ * the caller and cannot be handed to other contexts.
+ *
+ * On CONFIG_HIGHMEM=n kernels and for low memory pages this returns the
+ * virtual address of the direct mapping. Only real highmem pages are
+ * temporarily mapped.
+ *
+ * While kmap_local_page() is significantly faster than kmap() for the highmem
+ * case it comes with restrictions about the pointer validity.
+ *
+ * On HIGHMEM enabled systems mapping a highmem page has the side effect of
+ * disabling migration in order to keep the virtual address stable across
+ * preemption. No caller of kmap_local_page() can rely on this side effect.
+ */
+static inline void *kmap_local_page(const struct page *page);
+
+/**
+ * kmap_local_folio - Map a page in this folio for temporary usage
+ * @folio: The folio containing the page.
+ * @offset: The byte offset within the folio which identifies the page.
+ *
+ * Requires careful handling when nesting multiple mappings because the map
+ * management is stack based. The unmap has to be in the reverse order of
+ * the map operation::
+ *
+ * addr1 = kmap_local_folio(folio1, offset1);
+ * addr2 = kmap_local_folio(folio2, offset2);
+ * ...
+ * kunmap_local(addr2);
+ * kunmap_local(addr1);
+ *
+ * Unmapping addr1 before addr2 is invalid and causes malfunction.
+ *
+ * Contrary to kmap() mappings the mapping is only valid in the context of
+ * the caller and cannot be handed to other contexts.
+ *
+ * On CONFIG_HIGHMEM=n kernels and for low memory pages this returns the
+ * virtual address of the direct mapping. Only real highmem pages are
+ * temporarily mapped.
+ *
+ * While it is significantly faster than kmap() for the highmem case it
+ * comes with restrictions about the pointer validity.
+ *
+ * On HIGHMEM enabled systems mapping a highmem page has the side effect of
+ * disabling migration in order to keep the virtual address stable across
+ * preemption. No caller of kmap_local_folio() can rely on this side effect.
+ *
+ * Context: Can be invoked from any context.
+ * Return: The virtual address of @offset.
+ */
+static inline void *kmap_local_folio(const struct folio *folio, size_t offset);
+
+/**
+ * kmap_atomic - Atomically map a page for temporary usage - Deprecated!
+ * @page: Pointer to the page to be mapped
+ *
+ * Returns: The virtual address of the mapping
+ *
+ * In fact a wrapper around kmap_local_page() which also disables pagefaults
+ * and, depending on PREEMPT_RT configuration, also CPU migration and
+ * preemption. Therefore users should not count on the latter two side effects.
+ *
+ * Mappings should always be released by kunmap_atomic().
+ *
+ * Do not use in new code. Use kmap_local_page() instead.
+ *
+ * It is used in atomic context when code wants to access the contents of a
+ * page that might be allocated from high memory (see __GFP_HIGHMEM), for
+ * example a page in the pagecache. The API has two functions, and they
+ * can be used in a manner similar to the following::
+ *
+ * // Find the page of interest.
+ * struct page *page = find_get_page(mapping, offset);
+ *
+ * // Gain access to the contents of that page.
+ * void *vaddr = kmap_atomic(page);
+ *
+ * // Do something to the contents of that page.
+ * memset(vaddr, 0, PAGE_SIZE);
+ *
+ * // Unmap that page.
+ * kunmap_atomic(vaddr);
+ *
+ * Note that the kunmap_atomic() call takes the result of the kmap_atomic()
+ * call, not the argument.
+ *
+ * If you need to map two pages because you want to copy from one page to
+ * another you need to keep the kmap_atomic calls strictly nested, like:
+ *
+ * vaddr1 = kmap_atomic(page1);
+ * vaddr2 = kmap_atomic(page2);
+ *
+ * memcpy(vaddr1, vaddr2, PAGE_SIZE);
+ *
+ * kunmap_atomic(vaddr2);
+ * kunmap_atomic(vaddr1);
+ */
+static inline void *kmap_atomic(const struct page *page);
+
+/* Highmem related interfaces for management code */
+static inline unsigned long nr_free_highpages(void);
+static inline unsigned long totalhigh_pages(void);
#ifndef ARCH_HAS_FLUSH_ANON_PAGE
static inline void flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vmaddr)
@@ -16,10 +188,7 @@ static inline void flush_anon_page(struct vm_area_struct *vma, struct page *page
}
#endif
-#ifndef ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE
-static inline void flush_kernel_dcache_page(struct page *page)
-{
-}
+#ifndef ARCH_IMPLEMENTS_FLUSH_KERNEL_VMAP_RANGE
static inline void flush_kernel_vmap_range(void *vaddr, int size)
{
}
@@ -28,223 +197,491 @@ static inline void invalidate_kernel_vmap_range(void *vaddr, int size)
}
#endif
-#include <asm/kmap_types.h>
+/* when CONFIG_HIGHMEM is not set these will be plain clear/copy_page */
+#ifndef clear_user_highpage
+static inline void clear_user_highpage(struct page *page, unsigned long vaddr)
+{
+ void *addr = kmap_local_page(page);
+ clear_user_page(addr, vaddr, page);
+ kunmap_local(addr);
+}
+#endif
-#ifdef CONFIG_HIGHMEM
-#include <asm/highmem.h>
+#ifndef vma_alloc_zeroed_movable_folio
+/**
+ * vma_alloc_zeroed_movable_folio - Allocate a zeroed page for a VMA.
+ * @vma: The VMA the page is to be allocated for.
+ * @vaddr: The virtual address the page will be inserted into.
+ *
+ * This function will allocate a page suitable for inserting into this
+ * VMA at this virtual address. It may be allocated from highmem or
+ * the movable zone. An architecture may provide its own implementation.
+ *
+ * Return: A folio containing one allocated and zeroed page or NULL if
+ * we are out of memory.
+ */
+static inline
+struct folio *vma_alloc_zeroed_movable_folio(struct vm_area_struct *vma,
+ unsigned long vaddr)
+{
+ struct folio *folio;
-/* declarations for linux/mm/highmem.c */
-unsigned int nr_free_highpages(void);
-extern unsigned long totalhigh_pages;
+ folio = vma_alloc_folio(GFP_HIGHUSER_MOVABLE, 0, vma, vaddr);
+ if (folio && user_alloc_needs_zeroing())
+ clear_user_highpage(&folio->page, vaddr);
-void kmap_flush_unused(void);
+ return folio;
+}
+#endif
-struct page *kmap_to_page(void *addr);
+static inline void clear_highpage(struct page *page)
+{
+ void *kaddr = kmap_local_page(page);
+ clear_page(kaddr);
+ kunmap_local(kaddr);
+}
+
+static inline void clear_highpage_kasan_tagged(struct page *page)
+{
+ void *kaddr = kmap_local_page(page);
-#else /* CONFIG_HIGHMEM */
+ clear_page(kasan_reset_tag(kaddr));
+ kunmap_local(kaddr);
+}
-static inline unsigned int nr_free_highpages(void) { return 0; }
+#ifndef __HAVE_ARCH_TAG_CLEAR_HIGHPAGES
-static inline struct page *kmap_to_page(void *addr)
+/* Return false to let people know we did not initialize the pages */
+static inline bool tag_clear_highpages(struct page *page, int numpages)
{
- return virt_to_page(addr);
+ return false;
}
-#define totalhigh_pages 0UL
+#endif
-#ifndef ARCH_HAS_KMAP
-static inline void *kmap(struct page *page)
+/*
+ * If we pass in a base or tail page, we can zero up to PAGE_SIZE.
+ * If we pass in a head page, we can zero up to the size of the compound page.
+ */
+#ifdef CONFIG_HIGHMEM
+void zero_user_segments(struct page *page, unsigned start1, unsigned end1,
+ unsigned start2, unsigned end2);
+#else
+static inline void zero_user_segments(struct page *page,
+ unsigned start1, unsigned end1,
+ unsigned start2, unsigned end2)
{
- might_sleep();
- return page_address(page);
+ void *kaddr = kmap_local_page(page);
+ unsigned int i;
+
+ BUG_ON(end1 > page_size(page) || end2 > page_size(page));
+
+ if (end1 > start1)
+ memset(kaddr + start1, 0, end1 - start1);
+
+ if (end2 > start2)
+ memset(kaddr + start2, 0, end2 - start2);
+
+ kunmap_local(kaddr);
+ for (i = 0; i < compound_nr(page); i++)
+ flush_dcache_page(page + i);
}
+#endif
-static inline void kunmap(struct page *page)
+static inline void zero_user_segment(struct page *page,
+ unsigned start, unsigned end)
{
+ zero_user_segments(page, start, end, 0, 0);
}
-static inline void *kmap_atomic(struct page *page)
+#ifndef __HAVE_ARCH_COPY_USER_HIGHPAGE
+
+static inline void copy_user_highpage(struct page *to, struct page *from,
+ unsigned long vaddr, struct vm_area_struct *vma)
{
- preempt_disable();
- pagefault_disable();
- return page_address(page);
+ char *vfrom, *vto;
+
+ vfrom = kmap_local_page(from);
+ vto = kmap_local_page(to);
+ copy_user_page(vto, vfrom, vaddr, to);
+ kmsan_unpoison_memory(page_address(to), PAGE_SIZE);
+ kunmap_local(vto);
+ kunmap_local(vfrom);
}
-#define kmap_atomic_prot(page, prot) kmap_atomic(page)
-static inline void __kunmap_atomic(void *addr)
+#endif
+
+#ifndef __HAVE_ARCH_COPY_HIGHPAGE
+
+static inline void copy_highpage(struct page *to, struct page *from)
{
- pagefault_enable();
- preempt_enable();
-}
+ char *vfrom, *vto;
-#define kmap_atomic_pfn(pfn) kmap_atomic(pfn_to_page(pfn))
+ vfrom = kmap_local_page(from);
+ vto = kmap_local_page(to);
+ copy_page(vto, vfrom);
+ kmsan_copy_page_meta(to, from);
+ kunmap_local(vto);
+ kunmap_local(vfrom);
+}
-#define kmap_flush_unused() do {} while(0)
#endif
-#endif /* CONFIG_HIGHMEM */
+#ifdef copy_mc_to_kernel
+/*
+ * If architecture supports machine check exception handling, define the
+ * #MC versions of copy_user_highpage and copy_highpage. They copy a memory
+ * page with #MC in source page (@from) handled, and return the number
+ * of bytes not copied if there was a #MC, otherwise 0 for success.
+ */
+static inline int copy_mc_user_highpage(struct page *to, struct page *from,
+ unsigned long vaddr, struct vm_area_struct *vma)
+{
+ unsigned long ret;
+ char *vfrom, *vto;
+
+ vfrom = kmap_local_page(from);
+ vto = kmap_local_page(to);
+ ret = copy_mc_to_kernel(vto, vfrom, PAGE_SIZE);
+ if (!ret)
+ kmsan_unpoison_memory(page_address(to), PAGE_SIZE);
+ kunmap_local(vto);
+ kunmap_local(vfrom);
-#if defined(CONFIG_HIGHMEM) || defined(CONFIG_X86_32)
+ if (ret)
+ memory_failure_queue(page_to_pfn(from), 0);
-DECLARE_PER_CPU(int, __kmap_atomic_idx);
+ return ret;
+}
-static inline int kmap_atomic_idx_push(void)
+static inline int copy_mc_highpage(struct page *to, struct page *from)
{
- int idx = __this_cpu_inc_return(__kmap_atomic_idx) - 1;
+ unsigned long ret;
+ char *vfrom, *vto;
-#ifdef CONFIG_DEBUG_HIGHMEM
- WARN_ON_ONCE(in_irq() && !irqs_disabled());
- BUG_ON(idx >= KM_TYPE_NR);
-#endif
- return idx;
+ vfrom = kmap_local_page(from);
+ vto = kmap_local_page(to);
+ ret = copy_mc_to_kernel(vto, vfrom, PAGE_SIZE);
+ if (!ret)
+ kmsan_copy_page_meta(to, from);
+ kunmap_local(vto);
+ kunmap_local(vfrom);
+
+ if (ret)
+ memory_failure_queue(page_to_pfn(from), 0);
+
+ return ret;
+}
+#else
+static inline int copy_mc_user_highpage(struct page *to, struct page *from,
+ unsigned long vaddr, struct vm_area_struct *vma)
+{
+ copy_user_highpage(to, from, vaddr, vma);
+ return 0;
}
-static inline int kmap_atomic_idx(void)
+static inline int copy_mc_highpage(struct page *to, struct page *from)
{
- return __this_cpu_read(__kmap_atomic_idx) - 1;
+ copy_highpage(to, from);
+ return 0;
}
+#endif
-static inline void kmap_atomic_idx_pop(void)
+static inline void memcpy_page(struct page *dst_page, size_t dst_off,
+ struct page *src_page, size_t src_off,
+ size_t len)
{
-#ifdef CONFIG_DEBUG_HIGHMEM
- int idx = __this_cpu_dec_return(__kmap_atomic_idx);
+ char *dst = kmap_local_page(dst_page);
+ char *src = kmap_local_page(src_page);
- BUG_ON(idx < 0);
-#else
- __this_cpu_dec(__kmap_atomic_idx);
-#endif
+ VM_BUG_ON(dst_off + len > PAGE_SIZE || src_off + len > PAGE_SIZE);
+ memcpy(dst + dst_off, src + src_off, len);
+ kunmap_local(src);
+ kunmap_local(dst);
}
-#endif
+static inline void memcpy_folio(struct folio *dst_folio, size_t dst_off,
+ struct folio *src_folio, size_t src_off, size_t len)
+{
+ VM_BUG_ON(dst_off + len > folio_size(dst_folio));
+ VM_BUG_ON(src_off + len > folio_size(src_folio));
+
+ do {
+ char *dst = kmap_local_folio(dst_folio, dst_off);
+ const char *src = kmap_local_folio(src_folio, src_off);
+ size_t chunk = len;
+
+ if (folio_test_highmem(dst_folio) &&
+ chunk > PAGE_SIZE - offset_in_page(dst_off))
+ chunk = PAGE_SIZE - offset_in_page(dst_off);
+ if (folio_test_highmem(src_folio) &&
+ chunk > PAGE_SIZE - offset_in_page(src_off))
+ chunk = PAGE_SIZE - offset_in_page(src_off);
+ memcpy(dst, src, chunk);
+ kunmap_local(src);
+ kunmap_local(dst);
+
+ dst_off += chunk;
+ src_off += chunk;
+ len -= chunk;
+ } while (len > 0);
+}
-/*
- * Prevent people trying to call kunmap_atomic() as if it were kunmap()
- * kunmap_atomic() should get the return value of kmap_atomic, not the page.
- */
-#define kunmap_atomic(addr) \
-do { \
- BUILD_BUG_ON(__same_type((addr), struct page *)); \
- __kunmap_atomic(addr); \
-} while (0)
+static inline void memset_page(struct page *page, size_t offset, int val,
+ size_t len)
+{
+ char *addr = kmap_local_page(page);
+ VM_BUG_ON(offset + len > PAGE_SIZE);
+ memset(addr + offset, val, len);
+ kunmap_local(addr);
+}
-/* when CONFIG_HIGHMEM is not set these will be plain clear/copy_page */
-#ifndef clear_user_highpage
-static inline void clear_user_highpage(struct page *page, unsigned long vaddr)
+static inline void memcpy_from_page(char *to, struct page *page,
+ size_t offset, size_t len)
{
- void *addr = kmap_atomic(page);
- clear_user_page(addr, vaddr, page);
- kunmap_atomic(addr);
+ char *from = kmap_local_page(page);
+
+ VM_BUG_ON(offset + len > PAGE_SIZE);
+ memcpy(to, from + offset, len);
+ kunmap_local(from);
}
-#endif
-#ifndef __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE
-/**
- * __alloc_zeroed_user_highpage - Allocate a zeroed HIGHMEM page for a VMA with caller-specified movable GFP flags
- * @movableflags: The GFP flags related to the pages future ability to move like __GFP_MOVABLE
- * @vma: The VMA the page is to be allocated for
- * @vaddr: The virtual address the page will be inserted into
- *
- * This function will allocate a page for a VMA but the caller is expected
- * to specify via movableflags whether the page will be movable in the
- * future or not
- *
- * An architecture may override this function by defining
- * __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE and providing their own
- * implementation.
- */
-static inline struct page *
-__alloc_zeroed_user_highpage(gfp_t movableflags,
- struct vm_area_struct *vma,
- unsigned long vaddr)
+static inline void memcpy_to_page(struct page *page, size_t offset,
+ const char *from, size_t len)
{
- struct page *page = alloc_page_vma(GFP_HIGHUSER | movableflags,
- vma, vaddr);
+ char *to = kmap_local_page(page);
- if (page)
- clear_user_highpage(page, vaddr);
+ VM_BUG_ON(offset + len > PAGE_SIZE);
+ memcpy(to + offset, from, len);
+ flush_dcache_page(page);
+ kunmap_local(to);
+}
- return page;
+static inline void memzero_page(struct page *page, size_t offset, size_t len)
+{
+ char *addr = kmap_local_page(page);
+
+ VM_BUG_ON(offset + len > PAGE_SIZE);
+ memset(addr + offset, 0, len);
+ flush_dcache_page(page);
+ kunmap_local(addr);
}
-#endif
/**
- * alloc_zeroed_user_highpage_movable - Allocate a zeroed HIGHMEM page for a VMA that the caller knows can move
- * @vma: The VMA the page is to be allocated for
- * @vaddr: The virtual address the page will be inserted into
- *
- * This function will allocate a page for a VMA that the caller knows will
- * be able to migrate in the future using move_pages() or reclaimed
+ * memcpy_from_folio - Copy a range of bytes from a folio.
+ * @to: The memory to copy to.
+ * @folio: The folio to read from.
+ * @offset: The first byte in the folio to read.
+ * @len: The number of bytes to copy.
*/
-static inline struct page *
-alloc_zeroed_user_highpage_movable(struct vm_area_struct *vma,
- unsigned long vaddr)
+static inline void memcpy_from_folio(char *to, struct folio *folio,
+ size_t offset, size_t len)
{
- return __alloc_zeroed_user_highpage(__GFP_MOVABLE, vma, vaddr);
+ VM_BUG_ON(offset + len > folio_size(folio));
+
+ do {
+ const char *from = kmap_local_folio(folio, offset);
+ size_t chunk = len;
+
+ if (folio_test_partial_kmap(folio) &&
+ chunk > PAGE_SIZE - offset_in_page(offset))
+ chunk = PAGE_SIZE - offset_in_page(offset);
+ memcpy(to, from, chunk);
+ kunmap_local(from);
+
+ to += chunk;
+ offset += chunk;
+ len -= chunk;
+ } while (len > 0);
}
-static inline void clear_highpage(struct page *page)
+/**
+ * memcpy_to_folio - Copy a range of bytes to a folio.
+ * @folio: The folio to write to.
+ * @offset: The first byte in the folio to store to.
+ * @from: The memory to copy from.
+ * @len: The number of bytes to copy.
+ */
+static inline void memcpy_to_folio(struct folio *folio, size_t offset,
+ const char *from, size_t len)
{
- void *kaddr = kmap_atomic(page);
- clear_page(kaddr);
- kunmap_atomic(kaddr);
+ VM_BUG_ON(offset + len > folio_size(folio));
+
+ do {
+ char *to = kmap_local_folio(folio, offset);
+ size_t chunk = len;
+
+ if (folio_test_partial_kmap(folio) &&
+ chunk > PAGE_SIZE - offset_in_page(offset))
+ chunk = PAGE_SIZE - offset_in_page(offset);
+ memcpy(to, from, chunk);
+ kunmap_local(to);
+
+ from += chunk;
+ offset += chunk;
+ len -= chunk;
+ } while (len > 0);
+
+ flush_dcache_folio(folio);
}
-static inline void zero_user_segments(struct page *page,
- unsigned start1, unsigned end1,
- unsigned start2, unsigned end2)
+/**
+ * folio_zero_tail - Zero the tail of a folio.
+ * @folio: The folio to zero.
+ * @offset: The byte offset in the folio to start zeroing at.
+ * @kaddr: The address the folio is currently mapped to.
+ *
+ * If you have already used kmap_local_folio() to map a folio, written
+ * some data to it and now need to zero the end of the folio (and flush
+ * the dcache), you can use this function. If you do not have the
+ * folio kmapped (eg the folio has been partially populated by DMA),
+ * use folio_zero_range() or folio_zero_segment() instead.
+ *
+ * Return: An address which can be passed to kunmap_local().
+ */
+static inline __must_check void *folio_zero_tail(struct folio *folio,
+ size_t offset, void *kaddr)
{
- void *kaddr = kmap_atomic(page);
+ size_t len = folio_size(folio) - offset;
- BUG_ON(end1 > PAGE_SIZE || end2 > PAGE_SIZE);
+ if (folio_test_partial_kmap(folio)) {
+ size_t max = PAGE_SIZE - offset_in_page(offset);
- if (end1 > start1)
- memset(kaddr + start1, 0, end1 - start1);
+ while (len > max) {
+ memset(kaddr, 0, max);
+ kunmap_local(kaddr);
+ len -= max;
+ offset += max;
+ max = PAGE_SIZE;
+ kaddr = kmap_local_folio(folio, offset);
+ }
+ }
- if (end2 > start2)
- memset(kaddr + start2, 0, end2 - start2);
+ memset(kaddr, 0, len);
+ flush_dcache_folio(folio);
- kunmap_atomic(kaddr);
- flush_dcache_page(page);
+ return kaddr;
}
-static inline void zero_user_segment(struct page *page,
- unsigned start, unsigned end)
+/**
+ * folio_fill_tail - Copy some data to a folio and pad with zeroes.
+ * @folio: The destination folio.
+ * @offset: The offset into @folio at which to start copying.
+ * @from: The data to copy.
+ * @len: How many bytes of data to copy.
+ *
+ * This function is most useful for filesystems which support inline data.
+ * When they want to copy data from the inode into the page cache, this
+ * function does everything for them. It supports large folios even on
+ * HIGHMEM configurations.
+ */
+static inline void folio_fill_tail(struct folio *folio, size_t offset,
+ const char *from, size_t len)
{
- zero_user_segments(page, start, end, 0, 0);
+ char *to = kmap_local_folio(folio, offset);
+
+ VM_BUG_ON(offset + len > folio_size(folio));
+
+ if (folio_test_partial_kmap(folio)) {
+ size_t max = PAGE_SIZE - offset_in_page(offset);
+
+ while (len > max) {
+ memcpy(to, from, max);
+ kunmap_local(to);
+ len -= max;
+ from += max;
+ offset += max;
+ max = PAGE_SIZE;
+ to = kmap_local_folio(folio, offset);
+ }
+ }
+
+ memcpy(to, from, len);
+ to = folio_zero_tail(folio, offset + len, to + len);
+ kunmap_local(to);
}
-static inline void zero_user(struct page *page,
- unsigned start, unsigned size)
+/**
+ * memcpy_from_file_folio - Copy some bytes from a file folio.
+ * @to: The destination buffer.
+ * @folio: The folio to copy from.
+ * @pos: The position in the file.
+ * @len: The maximum number of bytes to copy.
+ *
+ * Copy up to @len bytes from this folio. This may be limited by PAGE_SIZE
+ * if the folio comes from HIGHMEM, and by the size of the folio.
+ *
+ * Return: The number of bytes copied from the folio.
+ */
+static inline size_t memcpy_from_file_folio(char *to, struct folio *folio,
+ loff_t pos, size_t len)
{
- zero_user_segments(page, start, start + size, 0, 0);
-}
+ size_t offset = offset_in_folio(folio, pos);
+ char *from = kmap_local_folio(folio, offset);
-#ifndef __HAVE_ARCH_COPY_USER_HIGHPAGE
+ if (folio_test_partial_kmap(folio)) {
+ offset = offset_in_page(offset);
+ len = min_t(size_t, len, PAGE_SIZE - offset);
+ } else
+ len = min(len, folio_size(folio) - offset);
-static inline void copy_user_highpage(struct page *to, struct page *from,
- unsigned long vaddr, struct vm_area_struct *vma)
-{
- char *vfrom, *vto;
+ memcpy(to, from, len);
+ kunmap_local(from);
- vfrom = kmap_atomic(from);
- vto = kmap_atomic(to);
- copy_user_page(vto, vfrom, vaddr, to);
- kunmap_atomic(vto);
- kunmap_atomic(vfrom);
+ return len;
}
-#endif
+/**
+ * folio_zero_segments() - Zero two byte ranges in a folio.
+ * @folio: The folio to write to.
+ * @start1: The first byte to zero.
+ * @xend1: One more than the last byte in the first range.
+ * @start2: The first byte to zero in the second range.
+ * @xend2: One more than the last byte in the second range.
+ */
+static inline void folio_zero_segments(struct folio *folio,
+ size_t start1, size_t xend1, size_t start2, size_t xend2)
+{
+ zero_user_segments(&folio->page, start1, xend1, start2, xend2);
+}
-static inline void copy_highpage(struct page *to, struct page *from)
+/**
+ * folio_zero_segment() - Zero a byte range in a folio.
+ * @folio: The folio to write to.
+ * @start: The first byte to zero.
+ * @xend: One more than the last byte to zero.
+ */
+static inline void folio_zero_segment(struct folio *folio,
+ size_t start, size_t xend)
{
- char *vfrom, *vto;
+ zero_user_segments(&folio->page, start, xend, 0, 0);
+}
- vfrom = kmap_atomic(from);
- vto = kmap_atomic(to);
- copy_page(vto, vfrom);
- kunmap_atomic(vto);
- kunmap_atomic(vfrom);
+/**
+ * folio_zero_range() - Zero a byte range in a folio.
+ * @folio: The folio to write to.
+ * @start: The first byte to zero.
+ * @length: The number of bytes to zero.
+ */
+static inline void folio_zero_range(struct folio *folio,
+ size_t start, size_t length)
+{
+ zero_user_segments(&folio->page, start, start + length, 0, 0);
}
+/**
+ * folio_release_kmap - Unmap a folio and drop a refcount.
+ * @folio: The folio to release.
+ * @addr: The address previously returned by a call to kmap_local_folio().
+ *
+ * It is common, eg in directory handling to kmap a folio. This function
+ * unmaps the folio and drops the refcount that was being held to keep the
+ * folio alive while we accessed it.
+ */
+static inline void folio_release_kmap(struct folio *folio, void *addr)
+{
+ kunmap_local(addr);
+ folio_put(folio);
+}
#endif /* _LINUX_HIGHMEM_H */
diff --git a/include/linux/highuid.h b/include/linux/highuid.h
index 434e56246f67..50d383fd674d 100644
--- a/include/linux/highuid.h
+++ b/include/linux/highuid.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_HIGHUID_H
#define _LINUX_HIGHUID_H
diff --git a/include/linux/hil_mlc.h b/include/linux/hil_mlc.h
index 394a8405dd74..369221fd5518 100644
--- a/include/linux/hil_mlc.h
+++ b/include/linux/hil_mlc.h
@@ -103,7 +103,7 @@ struct hilse_node {
/* Methods for back-end drivers, e.g. hp_sdc_mlc */
typedef int (hil_mlc_cts) (hil_mlc *mlc);
-typedef void (hil_mlc_out) (hil_mlc *mlc);
+typedef int (hil_mlc_out) (hil_mlc *mlc);
typedef int (hil_mlc_in) (hil_mlc *mlc, suseconds_t timeout);
struct hil_mlc_devinfo {
@@ -144,12 +144,12 @@ struct hil_mlc {
hil_packet ipacket[16];
hil_packet imatch;
int icount;
- struct timeval instart;
- suseconds_t intimeout;
+ unsigned long instart;
+ unsigned long intimeout;
int ddi; /* Last operational device id */
int lcv; /* LCV to throttle loops */
- struct timeval lcv_tv; /* Time loop was started */
+ time64_t lcv_time; /* Time loop was started */
int di_map[7]; /* Maps below items to live devs */
struct hil_mlc_devinfo di[HIL_MLC_DEVMEM];
diff --git a/include/linux/hippidevice.h b/include/linux/hippidevice.h
index 402f99e328d4..07414c241e65 100644
--- a/include/linux/hippidevice.h
+++ b/include/linux/hippidevice.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* INET An implementation of the TCP/IP protocol suite for the LINUX
* operating system. INET is implemented using the BSD Socket
@@ -14,11 +15,6 @@
* Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
* Alan Cox, <gw4pts@gw4pts.ampr.org>
* Lawrence V. Stefani, <stefani@lkg.dec.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
*/
#ifndef _LINUX_HIPPIDEVICE_H
#define _LINUX_HIPPIDEVICE_H
@@ -27,6 +23,10 @@
#ifdef __KERNEL__
+struct neigh_parms;
+struct net_device;
+struct sk_buff;
+
struct hippi_cb {
__u32 ifield;
};
diff --git a/include/linux/hisi_acc_qm.h b/include/linux/hisi_acc_qm.h
new file mode 100644
index 000000000000..ca1ec437a3ca
--- /dev/null
+++ b/include/linux/hisi_acc_qm.h
@@ -0,0 +1,604 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2019 HiSilicon Limited. */
+#ifndef HISI_ACC_QM_H
+#define HISI_ACC_QM_H
+
+#include <linux/bitfield.h>
+#include <linux/debugfs.h>
+#include <linux/iopoll.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+
+#define QM_QNUM_V1 4096
+#define QM_QNUM_V2 1024
+#define QM_MAX_VFS_NUM_V2 63
+
+/* qm user domain */
+#define QM_ARUSER_M_CFG_1 0x100088
+#define AXUSER_SNOOP_ENABLE BIT(30)
+#define AXUSER_CMD_TYPE GENMASK(14, 12)
+#define AXUSER_CMD_SMMU_NORMAL 1
+#define AXUSER_NS BIT(6)
+#define AXUSER_NO BIT(5)
+#define AXUSER_FP BIT(4)
+#define AXUSER_SSV BIT(0)
+#define AXUSER_BASE (AXUSER_SNOOP_ENABLE | \
+ FIELD_PREP(AXUSER_CMD_TYPE, \
+ AXUSER_CMD_SMMU_NORMAL) | \
+ AXUSER_NS | AXUSER_NO | AXUSER_FP)
+#define QM_ARUSER_M_CFG_ENABLE 0x100090
+#define ARUSER_M_CFG_ENABLE 0xfffffffe
+#define QM_AWUSER_M_CFG_1 0x100098
+#define QM_AWUSER_M_CFG_ENABLE 0x1000a0
+#define AWUSER_M_CFG_ENABLE 0xfffffffe
+#define QM_WUSER_M_CFG_ENABLE 0x1000a8
+#define WUSER_M_CFG_ENABLE 0xffffffff
+
+/* mailbox */
+#define QM_MB_CMD_SQC 0x0
+#define QM_MB_CMD_CQC 0x1
+#define QM_MB_CMD_EQC 0x2
+#define QM_MB_CMD_AEQC 0x3
+#define QM_MB_CMD_SQC_BT 0x4
+#define QM_MB_CMD_CQC_BT 0x5
+#define QM_MB_CMD_SQC_VFT_V2 0x6
+#define QM_MB_CMD_STOP_QP 0x8
+#define QM_MB_CMD_FLUSH_QM 0x9
+#define QM_MB_CMD_SRC 0xc
+#define QM_MB_CMD_DST 0xd
+
+#define QM_MB_CMD_SEND_BASE 0x300
+#define QM_MB_EVENT_SHIFT 8
+#define QM_MB_BUSY_SHIFT 13
+#define QM_MB_OP_SHIFT 14
+#define QM_MB_CMD_DATA_ADDR_L 0x304
+#define QM_MB_CMD_DATA_ADDR_H 0x308
+#define QM_MB_MAX_WAIT_CNT 6000
+
+/* doorbell */
+#define QM_DOORBELL_CMD_SQ 0
+#define QM_DOORBELL_CMD_CQ 1
+#define QM_DOORBELL_CMD_EQ 2
+#define QM_DOORBELL_CMD_AEQ 3
+
+#define QM_DOORBELL_SQ_CQ_BASE_V2 0x1000
+#define QM_DOORBELL_EQ_AEQ_BASE_V2 0x2000
+#define QM_QP_MAX_NUM_SHIFT 11
+#define QM_DB_CMD_SHIFT_V2 12
+#define QM_DB_RAND_SHIFT_V2 16
+#define QM_DB_INDEX_SHIFT_V2 32
+#define QM_DB_PRIORITY_SHIFT_V2 48
+#define QM_VF_STATE 0x60
+
+/* qm cache */
+#define QM_CACHE_CTL 0x100050
+#define SQC_CACHE_ENABLE BIT(0)
+#define CQC_CACHE_ENABLE BIT(1)
+#define SQC_CACHE_WB_ENABLE BIT(4)
+#define SQC_CACHE_WB_THRD GENMASK(10, 5)
+#define CQC_CACHE_WB_ENABLE BIT(11)
+#define CQC_CACHE_WB_THRD GENMASK(17, 12)
+#define QM_AXI_M_CFG 0x1000ac
+#define AXI_M_CFG 0xffff
+#define QM_AXI_M_CFG_ENABLE 0x1000b0
+#define AM_CFG_SINGLE_PORT_MAX_TRANS 0x300014
+#define AXI_M_CFG_ENABLE 0xffffffff
+#define QM_PEH_AXUSER_CFG 0x1000cc
+#define QM_PEH_AXUSER_CFG_ENABLE 0x1000d0
+#define PEH_AXUSER_CFG 0x401001
+#define PEH_AXUSER_CFG_ENABLE 0xffffffff
+
+#define QM_MIN_QNUM 2
+#define HISI_ACC_SGL_SGE_NR_MAX 255
+#define QM_SHAPER_CFG 0x100164
+#define QM_SHAPER_ENABLE BIT(30)
+#define QM_SHAPER_TYPE1_OFFSET 10
+
+/* page number for queue file region */
+#define QM_DOORBELL_PAGE_NR 1
+
+#define QM_DEV_ALG_MAX_LEN 256
+
+#define QM_MIG_REGION_SEL 0x100198
+#define QM_MIG_REGION_EN BIT(0)
+
+/* uacce mode of the driver */
+#define UACCE_MODE_NOUACCE 0 /* don't use uacce */
+#define UACCE_MODE_SVA 1 /* use uacce sva mode */
+#define UACCE_MODE_DESC "0(default) means only register to crypto, 1 means both register to crypto and uacce"
+
+#define QM_ECC_MBIT BIT(2)
+
+enum qm_stop_reason {
+ QM_NORMAL,
+ QM_SOFT_RESET,
+ QM_DOWN,
+};
+
+enum qm_state {
+ QM_WORK = 0,
+ QM_STOP,
+};
+
+enum qp_state {
+ QP_START = 1,
+ QP_STOP,
+};
+
+enum qm_hw_ver {
+ QM_HW_V1 = 0x20,
+ QM_HW_V2 = 0x21,
+ QM_HW_V3 = 0x30,
+ QM_HW_V4 = 0x50,
+ QM_HW_V5 = 0x51,
+};
+
+enum qm_fun_type {
+ QM_HW_PF,
+ QM_HW_VF,
+};
+
+enum qm_debug_file {
+ CURRENT_QM,
+ CURRENT_Q,
+ CLEAR_ENABLE,
+ DEBUG_FILE_NUM,
+};
+
+enum qm_vf_state {
+ QM_READY = 0,
+ QM_NOT_READY,
+};
+
+enum qm_misc_ctl_bits {
+ QM_DRIVER_REMOVING = 0x0,
+ QM_RST_SCHED,
+ QM_RESETTING,
+ QM_MODULE_PARAM,
+};
+
+enum qm_cap_bits {
+ QM_SUPPORT_DB_ISOLATION = 0x0,
+ QM_SUPPORT_FUNC_QOS,
+ QM_SUPPORT_STOP_QP,
+ QM_SUPPORT_STOP_FUNC,
+ QM_SUPPORT_MB_COMMAND,
+ QM_SUPPORT_SVA_PREFETCH,
+ QM_SUPPORT_RPM,
+ QM_SUPPORT_DAE,
+};
+
+struct qm_dev_alg {
+ u64 alg_msk;
+ const char *alg;
+};
+
+struct qm_dev_dfx {
+ u32 dev_state;
+ u32 dev_timeout;
+};
+
+struct dfx_diff_registers {
+ u32 *regs;
+ u32 reg_offset;
+ u32 reg_len;
+};
+
+struct qm_dfx {
+ atomic64_t err_irq_cnt;
+ atomic64_t aeq_irq_cnt;
+ atomic64_t abnormal_irq_cnt;
+ atomic64_t create_qp_err_cnt;
+ atomic64_t mb_err_cnt;
+};
+
+struct debugfs_file {
+ enum qm_debug_file index;
+ struct mutex lock;
+ struct qm_debug *debug;
+};
+
+struct qm_debug {
+ u32 curr_qm_qp_num;
+ u32 sqe_mask_offset;
+ u32 sqe_mask_len;
+ struct qm_dfx dfx;
+ struct dentry *debug_root;
+ struct dentry *qm_d;
+ struct debugfs_file files[DEBUG_FILE_NUM];
+ struct qm_dev_dfx dev_dfx;
+ unsigned int *qm_last_words;
+ /* ACC engines recoreding last regs */
+ unsigned int *last_words;
+ struct dfx_diff_registers *qm_diff_regs;
+ struct dfx_diff_registers *acc_diff_regs;
+};
+
+struct qm_shaper_factor {
+ u32 func_qos;
+ u64 cir_b;
+ u64 cir_u;
+ u64 cir_s;
+ u64 cbs_s;
+};
+
+struct qm_dma {
+ void *va;
+ dma_addr_t dma;
+ size_t size;
+};
+
+struct hisi_qm_status {
+ u32 eq_head;
+ bool eqc_phase;
+ u32 aeq_head;
+ bool aeqc_phase;
+ atomic_t flags;
+ int stop_reason;
+};
+
+struct hisi_qm;
+
+enum acc_err_result {
+ ACC_ERR_NONE,
+ ACC_ERR_NEED_RESET,
+ ACC_ERR_RECOVERED,
+};
+
+struct hisi_qm_err_mask {
+ u32 ecc_2bits_mask;
+ u32 shutdown_mask;
+ u32 reset_mask;
+ u32 ce;
+ u32 nfe;
+ u32 fe;
+};
+
+struct hisi_qm_err_info {
+ char *acpi_rst;
+ u32 msi_wr_port;
+ struct hisi_qm_err_mask qm_err;
+ struct hisi_qm_err_mask dev_err;
+};
+
+struct hisi_qm_err_status {
+ u32 is_qm_ecc_mbit;
+ u32 is_dev_ecc_mbit;
+};
+
+struct hisi_qm_err_ini {
+ int (*hw_init)(struct hisi_qm *qm);
+ void (*hw_err_enable)(struct hisi_qm *qm);
+ void (*hw_err_disable)(struct hisi_qm *qm);
+ u32 (*get_dev_hw_err_status)(struct hisi_qm *qm);
+ void (*clear_dev_hw_err_status)(struct hisi_qm *qm, u32 err_sts);
+ void (*open_axi_master_ooo)(struct hisi_qm *qm);
+ void (*close_axi_master_ooo)(struct hisi_qm *qm);
+ void (*open_sva_prefetch)(struct hisi_qm *qm);
+ void (*close_sva_prefetch)(struct hisi_qm *qm);
+ void (*show_last_dfx_regs)(struct hisi_qm *qm);
+ void (*err_info_init)(struct hisi_qm *qm);
+ enum acc_err_result (*get_err_result)(struct hisi_qm *qm);
+ bool (*dev_is_abnormal)(struct hisi_qm *qm);
+ int (*set_priv_status)(struct hisi_qm *qm);
+ void (*disable_axi_error)(struct hisi_qm *qm);
+ void (*enable_axi_error)(struct hisi_qm *qm);
+};
+
+struct hisi_qm_cap_info {
+ u32 type;
+ /* Register offset */
+ u32 offset;
+ /* Bit offset in register */
+ u32 shift;
+ u32 mask;
+ u32 v1_val;
+ u32 v2_val;
+ u32 v3_val;
+};
+
+struct hisi_qm_cap_query_info {
+ u32 type;
+ const char *name;
+ u32 offset;
+ u32 v1_val;
+ u32 v2_val;
+ u32 v3_val;
+};
+
+struct hisi_qm_cap_record {
+ u32 type;
+ const char *name;
+ u32 cap_val;
+};
+
+struct hisi_qm_cap_tables {
+ u32 qm_cap_size;
+ struct hisi_qm_cap_record *qm_cap_table;
+ u32 dev_cap_size;
+ struct hisi_qm_cap_record *dev_cap_table;
+};
+
+struct hisi_qm_list {
+ struct mutex lock;
+ struct list_head list;
+ int (*register_to_crypto)(struct hisi_qm *qm);
+ void (*unregister_from_crypto)(struct hisi_qm *qm);
+};
+
+struct hisi_qm_poll_data {
+ struct hisi_qm *qm;
+ struct work_struct work;
+ u16 *qp_finish_id;
+ u16 eqe_num;
+};
+
+/**
+ * struct qm_err_isolate
+ * @isolate_lock: protects device error log
+ * @err_threshold: user config error threshold which triggers isolation
+ * @is_isolate: device isolation state
+ * @uacce_hw_errs: index into qm device error list
+ */
+struct qm_err_isolate {
+ struct mutex isolate_lock;
+ u32 err_threshold;
+ bool is_isolate;
+ struct list_head qm_hw_errs;
+};
+
+struct qm_rsv_buf {
+ struct qm_sqc *sqc;
+ struct qm_cqc *cqc;
+ struct qm_eqc *eqc;
+ struct qm_aeqc *aeqc;
+ dma_addr_t sqc_dma;
+ dma_addr_t cqc_dma;
+ dma_addr_t eqc_dma;
+ dma_addr_t aeqc_dma;
+ struct qm_dma qcdma;
+};
+
+struct hisi_qm {
+ enum qm_hw_ver ver;
+ enum qm_fun_type fun_type;
+ const char *dev_name;
+ struct pci_dev *pdev;
+ void __iomem *io_base;
+ void __iomem *db_io_base;
+
+ /* Capbility version, 0: not supports */
+ u32 cap_ver;
+ u32 sqe_size;
+ u32 qp_base;
+ u32 qp_num;
+ u32 qp_in_used;
+ u32 ctrl_qp_num;
+ u32 max_qp_num;
+ u32 vfs_num;
+ u32 db_interval;
+ u16 eq_depth;
+ u16 aeq_depth;
+ struct list_head list;
+ struct hisi_qm_list *qm_list;
+
+ struct qm_dma qdma;
+ struct qm_sqc *sqc;
+ struct qm_cqc *cqc;
+ struct qm_eqe *eqe;
+ struct qm_aeqe *aeqe;
+ dma_addr_t sqc_dma;
+ dma_addr_t cqc_dma;
+ dma_addr_t eqe_dma;
+ dma_addr_t aeqe_dma;
+ struct qm_rsv_buf xqc_buf;
+
+ struct hisi_qm_status status;
+ const struct hisi_qm_err_ini *err_ini;
+ struct hisi_qm_err_info err_info;
+ struct hisi_qm_err_status err_status;
+ /* driver removing and reset sched */
+ unsigned long misc_ctl;
+ /* Device capability bit */
+ unsigned long caps;
+
+ struct rw_semaphore qps_lock;
+ struct idr qp_idr;
+ struct hisi_qp *qp_array;
+ struct hisi_qm_poll_data *poll_data;
+
+ struct mutex mailbox_lock;
+
+ struct mutex ifc_lock;
+
+ const struct hisi_qm_hw_ops *ops;
+
+ struct qm_debug debug;
+
+ u32 error_mask;
+
+ struct workqueue_struct *wq;
+ struct work_struct rst_work;
+ struct work_struct cmd_process;
+
+ bool use_sva;
+
+ resource_size_t phys_base;
+ resource_size_t db_phys_base;
+ struct uacce_device *uacce;
+ int mode;
+ struct qm_shaper_factor *factor;
+ u32 mb_qos;
+ u32 type_rate;
+ struct qm_err_isolate isolate_data;
+
+ struct hisi_qm_cap_tables cap_tables;
+};
+
+struct hisi_qp_status {
+ atomic_t used;
+ u16 sq_tail;
+ u16 cq_head;
+ bool cqc_phase;
+ atomic_t flags;
+};
+
+struct hisi_qp_ops {
+ int (*fill_sqe)(void *sqe, void *q_parm, void *d_parm);
+};
+
+struct hisi_qp {
+ u32 qp_id;
+ u16 sq_depth;
+ u16 cq_depth;
+ u8 alg_type;
+ u8 req_type;
+
+ struct qm_dma qdma;
+ void *sqe;
+ struct qm_cqe *cqe;
+ dma_addr_t sqe_dma;
+ dma_addr_t cqe_dma;
+
+ struct hisi_qp_status qp_status;
+ struct hisi_qp_ops *hw_ops;
+ void *qp_ctx;
+ void (*req_cb)(struct hisi_qp *qp, void *data);
+ void (*event_cb)(struct hisi_qp *qp);
+
+ struct hisi_qm *qm;
+ bool is_resetting;
+ bool is_in_kernel;
+ u16 pasid;
+ struct uacce_queue *uacce_q;
+};
+
+static inline int vfs_num_set(const char *val, const struct kernel_param *kp)
+{
+ u32 n;
+ int ret;
+
+ if (!val)
+ return -EINVAL;
+
+ ret = kstrtou32(val, 10, &n);
+ if (ret < 0)
+ return ret;
+
+ if (n > QM_MAX_VFS_NUM_V2)
+ return -EINVAL;
+
+ return param_set_int(val, kp);
+}
+
+static inline int mode_set(const char *val, const struct kernel_param *kp)
+{
+ u32 n;
+ int ret;
+
+ if (!val)
+ return -EINVAL;
+
+ ret = kstrtou32(val, 10, &n);
+ if (ret != 0 || (n != UACCE_MODE_SVA &&
+ n != UACCE_MODE_NOUACCE))
+ return -EINVAL;
+
+ return param_set_int(val, kp);
+}
+
+static inline int uacce_mode_set(const char *val, const struct kernel_param *kp)
+{
+ return mode_set(val, kp);
+}
+
+static inline void hisi_qm_init_list(struct hisi_qm_list *qm_list)
+{
+ INIT_LIST_HEAD(&qm_list->list);
+ mutex_init(&qm_list->lock);
+}
+
+static inline void hisi_qm_add_list(struct hisi_qm *qm, struct hisi_qm_list *qm_list)
+{
+ mutex_lock(&qm_list->lock);
+ list_add_tail(&qm->list, &qm_list->list);
+ mutex_unlock(&qm_list->lock);
+}
+
+static inline void hisi_qm_del_list(struct hisi_qm *qm, struct hisi_qm_list *qm_list)
+{
+ mutex_lock(&qm_list->lock);
+ list_del(&qm->list);
+ mutex_unlock(&qm_list->lock);
+}
+
+int hisi_qm_q_num_set(const char *val, const struct kernel_param *kp,
+ unsigned int device);
+int hisi_qm_init(struct hisi_qm *qm);
+void hisi_qm_uninit(struct hisi_qm *qm);
+int hisi_qm_start(struct hisi_qm *qm);
+int hisi_qm_stop(struct hisi_qm *qm, enum qm_stop_reason r);
+int hisi_qm_start_qp(struct hisi_qp *qp, unsigned long arg);
+void hisi_qm_stop_qp(struct hisi_qp *qp);
+int hisi_qp_send(struct hisi_qp *qp, const void *msg);
+void hisi_qm_debug_init(struct hisi_qm *qm);
+void hisi_qm_debug_regs_clear(struct hisi_qm *qm);
+int hisi_qm_sriov_enable(struct pci_dev *pdev, int max_vfs);
+int hisi_qm_sriov_disable(struct pci_dev *pdev, bool is_frozen);
+int hisi_qm_sriov_configure(struct pci_dev *pdev, int num_vfs);
+void hisi_qm_dev_err_init(struct hisi_qm *qm);
+void hisi_qm_dev_err_uninit(struct hisi_qm *qm);
+int hisi_qm_regs_debugfs_init(struct hisi_qm *qm,
+ struct dfx_diff_registers *dregs, u32 reg_len);
+void hisi_qm_regs_debugfs_uninit(struct hisi_qm *qm, u32 reg_len);
+void hisi_qm_acc_diff_regs_dump(struct hisi_qm *qm, struct seq_file *s,
+ struct dfx_diff_registers *dregs, u32 regs_len);
+
+pci_ers_result_t hisi_qm_dev_err_detected(struct pci_dev *pdev,
+ pci_channel_state_t state);
+pci_ers_result_t hisi_qm_dev_slot_reset(struct pci_dev *pdev);
+void hisi_qm_reset_prepare(struct pci_dev *pdev);
+void hisi_qm_reset_done(struct pci_dev *pdev);
+
+int hisi_qm_wait_mb_ready(struct hisi_qm *qm);
+int hisi_qm_mb(struct hisi_qm *qm, u8 cmd, dma_addr_t dma_addr, u16 queue,
+ bool op);
+
+struct hisi_acc_sgl_pool;
+struct hisi_acc_hw_sgl *hisi_acc_sg_buf_map_to_hw_sgl(struct device *dev,
+ struct scatterlist *sgl, struct hisi_acc_sgl_pool *pool,
+ u32 index, dma_addr_t *hw_sgl_dma, enum dma_data_direction dir);
+void hisi_acc_sg_buf_unmap(struct device *dev, struct scatterlist *sgl,
+ struct hisi_acc_hw_sgl *hw_sgl, enum dma_data_direction dir);
+struct hisi_acc_sgl_pool *hisi_acc_create_sgl_pool(struct device *dev,
+ u32 count, u32 sge_nr);
+void hisi_acc_free_sgl_pool(struct device *dev,
+ struct hisi_acc_sgl_pool *pool);
+int hisi_qm_alloc_qps_node(struct hisi_qm_list *qm_list, int qp_num,
+ u8 alg_type, int node, struct hisi_qp **qps);
+void hisi_qm_free_qps(struct hisi_qp **qps, int qp_num);
+void hisi_qm_dev_shutdown(struct pci_dev *pdev);
+void hisi_qm_wait_task_finish(struct hisi_qm *qm, struct hisi_qm_list *qm_list);
+int hisi_qm_alg_register(struct hisi_qm *qm, struct hisi_qm_list *qm_list, int guard);
+void hisi_qm_alg_unregister(struct hisi_qm *qm, struct hisi_qm_list *qm_list, int guard);
+int hisi_qm_resume(struct device *dev);
+int hisi_qm_suspend(struct device *dev);
+void hisi_qm_pm_uninit(struct hisi_qm *qm);
+void hisi_qm_pm_init(struct hisi_qm *qm);
+int hisi_qm_get_dfx_access(struct hisi_qm *qm);
+void hisi_qm_put_dfx_access(struct hisi_qm *qm);
+void hisi_qm_regs_dump(struct seq_file *s, struct debugfs_regset32 *regset);
+u32 hisi_qm_get_hw_info(struct hisi_qm *qm,
+ const struct hisi_qm_cap_info *info_table,
+ u32 index, bool is_read);
+u32 hisi_qm_get_cap_value(struct hisi_qm *qm,
+ const struct hisi_qm_cap_query_info *info_table,
+ u32 index, bool is_read);
+int hisi_qm_set_algs(struct hisi_qm *qm, u64 alg_msk, const struct qm_dev_alg *dev_algs,
+ u32 dev_algs_size);
+
+/* Used by VFIO ACC live migration driver */
+struct pci_driver *hisi_sec_get_pf_driver(void);
+struct pci_driver *hisi_hpre_get_pf_driver(void);
+struct pci_driver *hisi_zip_get_pf_driver(void);
+#endif
diff --git a/include/linux/hmm-dma.h b/include/linux/hmm-dma.h
new file mode 100644
index 000000000000..f58b9fc71999
--- /dev/null
+++ b/include/linux/hmm-dma.h
@@ -0,0 +1,33 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */
+#ifndef LINUX_HMM_DMA_H
+#define LINUX_HMM_DMA_H
+
+#include <linux/dma-mapping.h>
+
+struct dma_iova_state;
+struct pci_p2pdma_map_state;
+
+/*
+ * struct hmm_dma_map - array of PFNs and DMA addresses
+ *
+ * @state: DMA IOVA state
+ * @pfns: array of PFNs
+ * @dma_list: array of DMA addresses
+ * @dma_entry_size: size of each DMA entry in the array
+ */
+struct hmm_dma_map {
+ struct dma_iova_state state;
+ unsigned long *pfn_list;
+ dma_addr_t *dma_list;
+ size_t dma_entry_size;
+};
+
+int hmm_dma_map_alloc(struct device *dev, struct hmm_dma_map *map,
+ size_t nr_entries, size_t dma_entry_size);
+void hmm_dma_map_free(struct device *dev, struct hmm_dma_map *map);
+dma_addr_t hmm_dma_map_pfn(struct device *dev, struct hmm_dma_map *map,
+ size_t idx,
+ struct pci_p2pdma_map_state *p2pdma_state);
+bool hmm_dma_unmap_pfn(struct device *dev, struct hmm_dma_map *map, size_t idx);
+#endif /* LINUX_HMM_DMA_H */
diff --git a/include/linux/hmm.h b/include/linux/hmm.h
new file mode 100644
index 000000000000..db75ffc949a7
--- /dev/null
+++ b/include/linux/hmm.h
@@ -0,0 +1,136 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright 2013 Red Hat Inc.
+ *
+ * Authors: Jérôme Glisse <jglisse@redhat.com>
+ *
+ * See Documentation/mm/hmm.rst for reasons and overview of what HMM is.
+ */
+#ifndef LINUX_HMM_H
+#define LINUX_HMM_H
+
+#include <linux/mm.h>
+
+struct mmu_interval_notifier;
+
+/*
+ * On output:
+ * 0 - The page is faultable and a future call with
+ * HMM_PFN_REQ_FAULT could succeed.
+ * HMM_PFN_VALID - the pfn field points to a valid PFN. This PFN is at
+ * least readable. If dev_private_owner is !NULL then this could
+ * point at a DEVICE_PRIVATE page.
+ * HMM_PFN_WRITE - if the page memory can be written to (requires HMM_PFN_VALID)
+ * HMM_PFN_ERROR - accessing the pfn is impossible and the device should
+ * fail. ie poisoned memory, special pages, no vma, etc
+ * HMM_PFN_P2PDMA - P2P page
+ * HMM_PFN_P2PDMA_BUS - Bus mapped P2P transfer
+ * HMM_PFN_DMA_MAPPED - Flag preserved on input-to-output transformation
+ * to mark that page is already DMA mapped
+ *
+ * On input:
+ * 0 - Return the current state of the page, do not fault it.
+ * HMM_PFN_REQ_FAULT - The output must have HMM_PFN_VALID or hmm_range_fault()
+ * will fail
+ * HMM_PFN_REQ_WRITE - The output must have HMM_PFN_WRITE or hmm_range_fault()
+ * will fail. Must be combined with HMM_PFN_REQ_FAULT.
+ */
+enum hmm_pfn_flags {
+ /* Output fields and flags */
+ HMM_PFN_VALID = 1UL << (BITS_PER_LONG - 1),
+ HMM_PFN_WRITE = 1UL << (BITS_PER_LONG - 2),
+ HMM_PFN_ERROR = 1UL << (BITS_PER_LONG - 3),
+ /*
+ * Sticky flags, carried from input to output,
+ * don't forget to update HMM_PFN_INOUT_FLAGS
+ */
+ HMM_PFN_DMA_MAPPED = 1UL << (BITS_PER_LONG - 4),
+ HMM_PFN_P2PDMA = 1UL << (BITS_PER_LONG - 5),
+ HMM_PFN_P2PDMA_BUS = 1UL << (BITS_PER_LONG - 6),
+
+ HMM_PFN_ORDER_SHIFT = (BITS_PER_LONG - 11),
+
+ /* Input flags */
+ HMM_PFN_REQ_FAULT = HMM_PFN_VALID,
+ HMM_PFN_REQ_WRITE = HMM_PFN_WRITE,
+
+ HMM_PFN_FLAGS = ~((1UL << HMM_PFN_ORDER_SHIFT) - 1),
+};
+
+/*
+ * hmm_pfn_to_page() - return struct page pointed to by a device entry
+ *
+ * This must be called under the caller 'user_lock' after a successful
+ * mmu_interval_read_begin(). The caller must have tested for HMM_PFN_VALID
+ * already.
+ */
+static inline struct page *hmm_pfn_to_page(unsigned long hmm_pfn)
+{
+ return pfn_to_page(hmm_pfn & ~HMM_PFN_FLAGS);
+}
+
+/*
+ * hmm_pfn_to_phys() - return physical address pointed to by a device entry
+ */
+static inline phys_addr_t hmm_pfn_to_phys(unsigned long hmm_pfn)
+{
+ return __pfn_to_phys(hmm_pfn & ~HMM_PFN_FLAGS);
+}
+
+/*
+ * hmm_pfn_to_map_order() - return the CPU mapping size order
+ *
+ * This is optionally useful to optimize processing of the pfn result
+ * array. It indicates that the page starts at the order aligned VA and is
+ * 1<<order bytes long. Every pfn within an high order page will have the
+ * same pfn flags, both access protections and the map_order. The caller must
+ * be careful with edge cases as the start and end VA of the given page may
+ * extend past the range used with hmm_range_fault().
+ *
+ * This must be called under the caller 'user_lock' after a successful
+ * mmu_interval_read_begin(). The caller must have tested for HMM_PFN_VALID
+ * already.
+ */
+static inline unsigned int hmm_pfn_to_map_order(unsigned long hmm_pfn)
+{
+ return (hmm_pfn >> HMM_PFN_ORDER_SHIFT) & 0x1F;
+}
+
+/*
+ * struct hmm_range - track invalidation lock on virtual address range
+ *
+ * @notifier: a mmu_interval_notifier that includes the start/end
+ * @notifier_seq: result of mmu_interval_read_begin()
+ * @start: range virtual start address (inclusive)
+ * @end: range virtual end address (exclusive)
+ * @hmm_pfns: array of pfns (big enough for the range)
+ * @default_flags: default flags for the range (write, read, ... see hmm doc)
+ * @pfn_flags_mask: allows to mask pfn flags so that only default_flags matter
+ * @dev_private_owner: owner of device private pages
+ */
+struct hmm_range {
+ struct mmu_interval_notifier *notifier;
+ unsigned long notifier_seq;
+ unsigned long start;
+ unsigned long end;
+ unsigned long *hmm_pfns;
+ unsigned long default_flags;
+ unsigned long pfn_flags_mask;
+ void *dev_private_owner;
+};
+
+/*
+ * Please see Documentation/mm/hmm.rst for how to use the range API.
+ */
+int hmm_range_fault(struct hmm_range *range);
+
+/*
+ * HMM_RANGE_DEFAULT_TIMEOUT - default timeout (ms) when waiting for a range
+ *
+ * When waiting for mmu notifiers we need some kind of time out otherwise we
+ * could potentially wait for ever, 1000ms ie 1s sounds like a long time to
+ * wait already.
+ */
+#define HMM_RANGE_DEFAULT_TIMEOUT 1000
+
+#endif /* LINUX_HMM_H */
diff --git a/include/linux/host1x.h b/include/linux/host1x.h
index 630b1a98ab58..9fa9c30a34e6 100644
--- a/include/linux/host1x.h
+++ b/include/linux/host1x.h
@@ -1,62 +1,104 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* Copyright (c) 2009-2013, NVIDIA Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
#ifndef __LINUX_HOST1X_H
#define __LINUX_HOST1X_H
#include <linux/device.h>
+#include <linux/dma-direction.h>
+#include <linux/dma-fence.h>
+#include <linux/spinlock.h>
#include <linux/types.h>
enum host1x_class {
HOST1X_CLASS_HOST1X = 0x1,
+ HOST1X_CLASS_NVJPG1 = 0x7,
+ HOST1X_CLASS_NVENC = 0x21,
+ HOST1X_CLASS_NVENC1 = 0x22,
HOST1X_CLASS_GR2D = 0x51,
HOST1X_CLASS_GR2D_SB = 0x52,
HOST1X_CLASS_VIC = 0x5D,
HOST1X_CLASS_GR3D = 0x60,
+ HOST1X_CLASS_NVJPG = 0xC0,
+ HOST1X_CLASS_NVDEC = 0xF0,
+ HOST1X_CLASS_NVDEC1 = 0xF5,
+ HOST1X_CLASS_OFA = 0xF8,
};
+struct host1x;
struct host1x_client;
+struct iommu_group;
+
+u64 host1x_get_dma_mask(struct host1x *host1x);
+
+/**
+ * struct host1x_bo_cache - host1x buffer object cache
+ * @mappings: list of mappings
+ * @lock: synchronizes accesses to the list of mappings
+ *
+ * Note that entries are not periodically evicted from this cache and instead need to be
+ * explicitly released. This is used primarily for DRM/KMS where the cache's reference is
+ * released when the last reference to a buffer object represented by a mapping in this
+ * cache is dropped.
+ */
+struct host1x_bo_cache {
+ struct list_head mappings;
+ struct mutex lock;
+};
+
+static inline void host1x_bo_cache_init(struct host1x_bo_cache *cache)
+{
+ INIT_LIST_HEAD(&cache->mappings);
+ mutex_init(&cache->lock);
+}
+
+static inline void host1x_bo_cache_destroy(struct host1x_bo_cache *cache)
+{
+ /* XXX warn if not empty? */
+ mutex_destroy(&cache->lock);
+}
/**
* struct host1x_client_ops - host1x client operations
+ * @early_init: host1x client early initialization code
* @init: host1x client initialization code
* @exit: host1x client tear down code
+ * @late_exit: host1x client late tear down code
+ * @suspend: host1x client suspend code
+ * @resume: host1x client resume code
*/
struct host1x_client_ops {
+ int (*early_init)(struct host1x_client *client);
int (*init)(struct host1x_client *client);
int (*exit)(struct host1x_client *client);
+ int (*late_exit)(struct host1x_client *client);
+ int (*suspend)(struct host1x_client *client);
+ int (*resume)(struct host1x_client *client);
};
/**
* struct host1x_client - host1x client structure
* @list: list node for the host1x client
- * @parent: pointer to struct device representing the host1x controller
+ * @host: pointer to struct device representing the host1x controller
* @dev: pointer to struct device backing this host1x client
+ * @group: IOMMU group that this client is a member of
* @ops: host1x client operations
* @class: host1x class represented by this client
* @channel: host1x channel associated with this client
* @syncpts: array of syncpoints requested for this client
* @num_syncpts: number of syncpoints requested for this client
+ * @parent: pointer to parent structure
+ * @usecount: reference count for this structure
+ * @lock: mutex for mutually exclusive concurrency
+ * @cache: host1x buffer object cache
*/
struct host1x_client {
struct list_head list;
- struct device *parent;
+ struct device *host;
struct device *dev;
+ struct iommu_group *group;
const struct host1x_client_ops *ops;
@@ -65,6 +107,12 @@ struct host1x_client {
struct host1x_syncpt **syncpts;
unsigned int num_syncpts;
+
+ struct host1x_client *parent;
+ unsigned int usecount;
+ struct mutex lock;
+
+ struct host1x_bo_cache cache;
};
/*
@@ -74,24 +122,48 @@ struct host1x_client {
struct host1x_bo;
struct sg_table;
+struct host1x_bo_mapping {
+ struct kref ref;
+ struct dma_buf_attachment *attach;
+ enum dma_data_direction direction;
+ struct list_head list;
+ struct host1x_bo *bo;
+ struct sg_table *sgt;
+ unsigned int chunks;
+ struct device *dev;
+ dma_addr_t phys;
+ size_t size;
+
+ struct host1x_bo_cache *cache;
+ struct list_head entry;
+};
+
+static inline struct host1x_bo_mapping *to_host1x_bo_mapping(struct kref *ref)
+{
+ return container_of(ref, struct host1x_bo_mapping, ref);
+}
+
struct host1x_bo_ops {
struct host1x_bo *(*get)(struct host1x_bo *bo);
void (*put)(struct host1x_bo *bo);
- dma_addr_t (*pin)(struct host1x_bo *bo, struct sg_table **sgt);
- void (*unpin)(struct host1x_bo *bo, struct sg_table *sgt);
+ struct host1x_bo_mapping *(*pin)(struct device *dev, struct host1x_bo *bo,
+ enum dma_data_direction dir);
+ void (*unpin)(struct host1x_bo_mapping *map);
void *(*mmap)(struct host1x_bo *bo);
void (*munmap)(struct host1x_bo *bo, void *addr);
- void *(*kmap)(struct host1x_bo *bo, unsigned int pagenum);
- void (*kunmap)(struct host1x_bo *bo, unsigned int pagenum, void *addr);
};
struct host1x_bo {
const struct host1x_bo_ops *ops;
+ struct list_head mappings;
+ spinlock_t lock;
};
static inline void host1x_bo_init(struct host1x_bo *bo,
const struct host1x_bo_ops *ops)
{
+ INIT_LIST_HEAD(&bo->mappings);
+ spin_lock_init(&bo->lock);
bo->ops = ops;
}
@@ -105,16 +177,10 @@ static inline void host1x_bo_put(struct host1x_bo *bo)
bo->ops->put(bo);
}
-static inline dma_addr_t host1x_bo_pin(struct host1x_bo *bo,
- struct sg_table **sgt)
-{
- return bo->ops->pin(bo, sgt);
-}
-
-static inline void host1x_bo_unpin(struct host1x_bo *bo, struct sg_table *sgt)
-{
- bo->ops->unpin(bo, sgt);
-}
+struct host1x_bo_mapping *host1x_bo_pin(struct device *dev, struct host1x_bo *bo,
+ enum dma_data_direction dir,
+ struct host1x_bo_cache *cache);
+void host1x_bo_unpin(struct host1x_bo_mapping *map);
static inline void *host1x_bo_mmap(struct host1x_bo *bo)
{
@@ -126,17 +192,6 @@ static inline void host1x_bo_munmap(struct host1x_bo *bo, void *addr)
bo->ops->munmap(bo, addr);
}
-static inline void *host1x_bo_kmap(struct host1x_bo *bo, unsigned int pagenum)
-{
- return bo->ops->kmap(bo, pagenum);
-}
-
-static inline void host1x_bo_kunmap(struct host1x_bo *bo,
- unsigned int pagenum, void *addr)
-{
- bo->ops->kunmap(bo, pagenum, addr);
-}
-
/*
* host1x syncpoints
*/
@@ -148,7 +203,9 @@ struct host1x_syncpt_base;
struct host1x_syncpt;
struct host1x;
-struct host1x_syncpt *host1x_syncpt_get(struct host1x *host, u32 id);
+struct host1x_syncpt *host1x_syncpt_get_by_id(struct host1x *host, u32 id);
+struct host1x_syncpt *host1x_syncpt_get_by_id_noref(struct host1x *host, u32 id);
+struct host1x_syncpt *host1x_syncpt_get(struct host1x_syncpt *sp);
u32 host1x_syncpt_id(struct host1x_syncpt *sp);
u32 host1x_syncpt_read_min(struct host1x_syncpt *sp);
u32 host1x_syncpt_read_max(struct host1x_syncpt *sp);
@@ -157,13 +214,23 @@ int host1x_syncpt_incr(struct host1x_syncpt *sp);
u32 host1x_syncpt_incr_max(struct host1x_syncpt *sp, u32 incrs);
int host1x_syncpt_wait(struct host1x_syncpt *sp, u32 thresh, long timeout,
u32 *value);
-struct host1x_syncpt *host1x_syncpt_request(struct device *dev,
+struct host1x_syncpt *host1x_syncpt_request(struct host1x_client *client,
unsigned long flags);
-void host1x_syncpt_free(struct host1x_syncpt *sp);
+void host1x_syncpt_put(struct host1x_syncpt *sp);
+struct host1x_syncpt *host1x_syncpt_alloc(struct host1x *host,
+ unsigned long flags,
+ const char *name);
struct host1x_syncpt_base *host1x_syncpt_get_base(struct host1x_syncpt *sp);
u32 host1x_syncpt_base_id(struct host1x_syncpt_base *base);
+void host1x_syncpt_release_vblank_reservation(struct host1x_client *client,
+ u32 syncpt_id);
+
+struct dma_fence *host1x_fence_create(struct host1x_syncpt *sp, u32 threshold,
+ bool timeout);
+void host1x_fence_cancel(struct dma_fence *fence);
+
/*
* host1x channel
*/
@@ -171,8 +238,9 @@ u32 host1x_syncpt_base_id(struct host1x_syncpt_base *base);
struct host1x_channel;
struct host1x_job;
-struct host1x_channel *host1x_channel_request(struct device *dev);
+struct host1x_channel *host1x_channel_request(struct host1x_client *client);
struct host1x_channel *host1x_channel_get(struct host1x_channel *channel);
+void host1x_channel_stop(struct host1x_channel *channel);
void host1x_channel_put(struct host1x_channel *channel);
int host1x_job_submit(struct host1x_job *job);
@@ -180,6 +248,9 @@ int host1x_job_submit(struct host1x_job *job);
* host1x job
*/
+#define HOST1X_RELOC_READ (1 << 0)
+#define HOST1X_RELOC_WRITE (1 << 1)
+
struct host1x_reloc {
struct {
struct host1x_bo *bo;
@@ -190,13 +261,7 @@ struct host1x_reloc {
unsigned long offset;
} target;
unsigned long shift;
-};
-
-struct host1x_waitchk {
- struct host1x_bo *bo;
- u32 offset;
- u32 syncpt_id;
- u32 thresh;
+ unsigned long flags;
};
struct host1x_job {
@@ -209,19 +274,15 @@ struct host1x_job {
/* Channel where job is submitted to */
struct host1x_channel *channel;
- u32 client;
+ /* client where the job originated */
+ struct host1x_client *client;
/* Gathers and their memory */
- struct host1x_job_gather *gathers;
- unsigned int num_gathers;
-
- /* Wait checks to be processed at submit time */
- struct host1x_waitchk *waitchk;
- unsigned int num_waitchk;
- u32 waitchk_mask;
+ struct host1x_job_cmd *cmds;
+ unsigned int num_cmds;
/* Array of handles to be pinned & unpinned */
- struct host1x_reloc *relocarray;
+ struct host1x_reloc *relocs;
unsigned int num_relocs;
struct host1x_job_unpin_data *unpins;
unsigned int num_unpins;
@@ -231,13 +292,20 @@ struct host1x_job {
dma_addr_t *reloc_addr_phys;
/* Sync point id, number of increments and end related to the submit */
- u32 syncpt_id;
+ struct host1x_syncpt *syncpt;
u32 syncpt_incrs;
u32 syncpt_end;
+ /* Completion fence for job tracking */
+ struct dma_fence *fence;
+ struct dma_fence_cb fence_cb;
+
/* Maximum time to wait for this job */
unsigned int timeout;
+ /* Job has timed out and should be released */
+ bool cancelled;
+
/* Index and number of slots used in the push buffer */
unsigned int first_get;
unsigned int num_slots;
@@ -258,13 +326,33 @@ struct host1x_job {
/* Add a channel wait for previous ops to complete */
bool serialize;
+
+ /* Fast-forward syncpoint increments on job timeout */
+ bool syncpt_recovery;
+
+ /* Callback called when job is freed */
+ void (*release)(struct host1x_job *job);
+ void *user_data;
+
+ /* Whether host1x-side firewall should be ran for this job or not */
+ bool enable_firewall;
+
+ /* Options for configuring engine data stream ID */
+ /* Context device to use for job */
+ struct host1x_memory_context *memory_context;
+ /* Stream ID to use if context isolation is disabled (!memory_context) */
+ u32 engine_fallback_streamid;
+ /* Engine offset to program stream ID to */
+ u32 engine_streamid_offset;
};
struct host1x_job *host1x_job_alloc(struct host1x_channel *ch,
u32 num_cmdbufs, u32 num_relocs,
- u32 num_waitchks);
-void host1x_job_add_gather(struct host1x_job *job, struct host1x_bo *mem_id,
- u32 words, u32 offset);
+ bool skip_firewall);
+void host1x_job_add_gather(struct host1x_job *job, struct host1x_bo *bo,
+ unsigned int words, unsigned int offset);
+void host1x_job_add_wait(struct host1x_job *job, u32 id, u32 thresh,
+ bool relative, u32 next_class);
struct host1x_job *host1x_job_get(struct host1x_job *job);
void host1x_job_put(struct host1x_job *job);
int host1x_job_pin(struct host1x_job *job, struct device *dev);
@@ -322,6 +410,8 @@ struct host1x_device {
struct list_head clients;
bool registered;
+
+ struct device_dma_parameters dma_parms;
};
static inline struct host1x_device *to_host1x_device(struct device *dev)
@@ -332,15 +422,82 @@ static inline struct host1x_device *to_host1x_device(struct device *dev)
int host1x_device_init(struct host1x_device *device);
int host1x_device_exit(struct host1x_device *device);
-int host1x_client_register(struct host1x_client *client);
-int host1x_client_unregister(struct host1x_client *client);
+void __host1x_client_init(struct host1x_client *client, struct lock_class_key *key);
+void host1x_client_exit(struct host1x_client *client);
+
+#define host1x_client_init(client) \
+ ({ \
+ static struct lock_class_key __key; \
+ __host1x_client_init(client, &__key); \
+ })
+
+int __host1x_client_register(struct host1x_client *client);
+
+/*
+ * Note that this wrapper calls __host1x_client_init() for compatibility
+ * with existing callers. Callers that want to separately initialize and
+ * register a host1x client must first initialize using either of the
+ * __host1x_client_init() or host1x_client_init() functions and then use
+ * the low-level __host1x_client_register() function to avoid the client
+ * getting reinitialized.
+ */
+#define host1x_client_register(client) \
+ ({ \
+ static struct lock_class_key __key; \
+ __host1x_client_init(client, &__key); \
+ __host1x_client_register(client); \
+ })
+
+void host1x_client_unregister(struct host1x_client *client);
+
+int host1x_client_suspend(struct host1x_client *client);
+int host1x_client_resume(struct host1x_client *client);
struct tegra_mipi_device;
-struct tegra_mipi_device *tegra_mipi_request(struct device *device);
+struct tegra_mipi_device *tegra_mipi_request(struct device *device,
+ struct device_node *np);
void tegra_mipi_free(struct tegra_mipi_device *device);
int tegra_mipi_enable(struct tegra_mipi_device *device);
int tegra_mipi_disable(struct tegra_mipi_device *device);
-int tegra_mipi_calibrate(struct tegra_mipi_device *device);
+int tegra_mipi_start_calibration(struct tegra_mipi_device *device);
+int tegra_mipi_finish_calibration(struct tegra_mipi_device *device);
+
+/* host1x memory contexts */
+
+struct host1x_memory_context {
+ struct host1x *host;
+
+ refcount_t ref;
+ struct pid *owner;
+
+ struct device_dma_parameters dma_parms;
+ struct device dev;
+ u64 dma_mask;
+ u32 stream_id;
+};
+
+#ifdef CONFIG_IOMMU_API
+struct host1x_memory_context *host1x_memory_context_alloc(struct host1x *host1x,
+ struct device *dev,
+ struct pid *pid);
+void host1x_memory_context_get(struct host1x_memory_context *cd);
+void host1x_memory_context_put(struct host1x_memory_context *cd);
+#else
+static inline struct host1x_memory_context *host1x_memory_context_alloc(struct host1x *host1x,
+ struct device *dev,
+ struct pid *pid)
+{
+ return NULL;
+}
+
+static inline void host1x_memory_context_get(struct host1x_memory_context *cd)
+{
+}
+
+static inline void host1x_memory_context_put(struct host1x_memory_context *cd)
+{
+}
+#endif
#endif
diff --git a/include/linux/host1x_context_bus.h b/include/linux/host1x_context_bus.h
new file mode 100644
index 000000000000..c928cb432680
--- /dev/null
+++ b/include/linux/host1x_context_bus.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (c) 2021, NVIDIA Corporation. All rights reserved.
+ */
+
+#ifndef __LINUX_HOST1X_CONTEXT_BUS_H
+#define __LINUX_HOST1X_CONTEXT_BUS_H
+
+#include <linux/device.h>
+
+#ifdef CONFIG_TEGRA_HOST1X_CONTEXT_BUS
+extern const struct bus_type host1x_context_device_bus_type;
+#endif
+
+#endif
diff --git a/include/linux/hp_sdc.h b/include/linux/hp_sdc.h
index d392975d8887..9be8704e2d38 100644
--- a/include/linux/hp_sdc.h
+++ b/include/linux/hp_sdc.h
@@ -180,7 +180,7 @@ switch (val) { \
#define HP_SDC_CMD_SET_IM 0x40 /* 010xxxxx == set irq mask */
-/* The documents provided do not explicitly state that all registers betweem
+/* The documents provided do not explicitly state that all registers between
* 0x01 and 0x1f inclusive can be read by sending their register index as a
* command, but this is implied and appears to be the case.
*/
@@ -281,7 +281,7 @@ typedef struct {
hp_sdc_transaction *tq[HP_SDC_QUEUE_LEN]; /* All pending read/writes */
int rcurr, rqty; /* Current read transact in process */
- struct timeval rtv; /* Time when current read started */
+ ktime_t rtime; /* Time when current read started */
int wcurr; /* Current write transact in process */
int dev_err; /* carries status from registration */
diff --git a/include/linux/hpet.h b/include/linux/hpet.h
index 9427ab4e01c3..21e69eaf7a36 100644
--- a/include/linux/hpet.h
+++ b/include/linux/hpet.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __HPET__
#define __HPET__ 1
@@ -29,7 +30,7 @@ struct hpet {
unsigned long _hpet_compare;
} _u1;
u64 hpet_fsb[2]; /* FSB route */
- } hpet_timers[1];
+ } hpet_timers[];
};
#define hpet_mc _u0._hpet_mc
diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h
index 012c37fdb688..2cf1bf65b225 100644
--- a/include/linux/hrtimer.h
+++ b/include/linux/hrtimer.h
@@ -1,6 +1,5 @@
+// SPDX-License-Identifier: GPL-2.0
/*
- * include/linux/hrtimer.h
- *
* hrtimers - High-resolution kernel timers
*
* Copyright(C) 2005, Thomas Gleixner <tglx@linutronix.de>
@@ -9,40 +8,51 @@
* data type definitions, declarations, prototypes
*
* Started by: Thomas Gleixner and Ingo Molnar
- *
- * For licencing details see kernel-base/COPYING
*/
#ifndef _LINUX_HRTIMER_H
#define _LINUX_HRTIMER_H
-#include <linux/rbtree.h>
-#include <linux/ktime.h>
+#include <linux/hrtimer_defs.h>
+#include <linux/hrtimer_types.h>
#include <linux/init.h>
#include <linux/list.h>
-#include <linux/percpu.h>
+#include <linux/percpu-defs.h>
+#include <linux/rbtree.h>
#include <linux/timer.h>
-#include <linux/timerqueue.h>
-
-struct hrtimer_clock_base;
-struct hrtimer_cpu_base;
/*
* Mode arguments of xxx_hrtimer functions:
+ *
+ * HRTIMER_MODE_ABS - Time value is absolute
+ * HRTIMER_MODE_REL - Time value is relative to now
+ * HRTIMER_MODE_PINNED - Timer is bound to CPU (is only considered
+ * when starting the timer)
+ * HRTIMER_MODE_SOFT - Timer callback function will be executed in
+ * soft irq context
+ * HRTIMER_MODE_HARD - Timer callback function will be executed in
+ * hard irq context even on PREEMPT_RT.
*/
enum hrtimer_mode {
- HRTIMER_MODE_ABS = 0x0, /* Time value is absolute */
- HRTIMER_MODE_REL = 0x1, /* Time value is relative to now */
- HRTIMER_MODE_PINNED = 0x02, /* Timer is bound to CPU */
- HRTIMER_MODE_ABS_PINNED = 0x02,
- HRTIMER_MODE_REL_PINNED = 0x03,
-};
+ HRTIMER_MODE_ABS = 0x00,
+ HRTIMER_MODE_REL = 0x01,
+ HRTIMER_MODE_PINNED = 0x02,
+ HRTIMER_MODE_SOFT = 0x04,
+ HRTIMER_MODE_HARD = 0x08,
-/*
- * Return values for the callback function
- */
-enum hrtimer_restart {
- HRTIMER_NORESTART, /* Timer is not restarted */
- HRTIMER_RESTART, /* Timer must be restarted */
+ HRTIMER_MODE_ABS_PINNED = HRTIMER_MODE_ABS | HRTIMER_MODE_PINNED,
+ HRTIMER_MODE_REL_PINNED = HRTIMER_MODE_REL | HRTIMER_MODE_PINNED,
+
+ HRTIMER_MODE_ABS_SOFT = HRTIMER_MODE_ABS | HRTIMER_MODE_SOFT,
+ HRTIMER_MODE_REL_SOFT = HRTIMER_MODE_REL | HRTIMER_MODE_SOFT,
+
+ HRTIMER_MODE_ABS_PINNED_SOFT = HRTIMER_MODE_ABS_PINNED | HRTIMER_MODE_SOFT,
+ HRTIMER_MODE_REL_PINNED_SOFT = HRTIMER_MODE_REL_PINNED | HRTIMER_MODE_SOFT,
+
+ HRTIMER_MODE_ABS_HARD = HRTIMER_MODE_ABS | HRTIMER_MODE_HARD,
+ HRTIMER_MODE_REL_HARD = HRTIMER_MODE_REL | HRTIMER_MODE_HARD,
+
+ HRTIMER_MODE_ABS_PINNED_HARD = HRTIMER_MODE_ABS_PINNED | HRTIMER_MODE_HARD,
+ HRTIMER_MODE_REL_PINNED_HARD = HRTIMER_MODE_REL_PINNED | HRTIMER_MODE_HARD,
};
/*
@@ -73,33 +83,6 @@ enum hrtimer_restart {
#define HRTIMER_STATE_ENQUEUED 0x01
/**
- * struct hrtimer - the basic hrtimer structure
- * @node: timerqueue node, which also manages node.expires,
- * the absolute expiry time in the hrtimers internal
- * representation. The time is related to the clock on
- * which the timer is based. Is setup by adding
- * slack to the _softexpires value. For non range timers
- * identical to _softexpires.
- * @_softexpires: the absolute earliest expiry time of the hrtimer.
- * The time which was given as expiry time when the timer
- * was armed.
- * @function: timer expiry callback function
- * @base: pointer to the timer base (per cpu and per clock)
- * @state: state information (See bit values above)
- * @is_rel: Set if the timer was armed relative
- *
- * The hrtimer structure must be initialized by hrtimer_init()
- */
-struct hrtimer {
- struct timerqueue_node node;
- ktime_t _softexpires;
- enum hrtimer_restart (*function)(struct hrtimer *);
- struct hrtimer_clock_base *base;
- u8 state;
- u8 is_rel;
-};
-
-/**
* struct hrtimer_sleeper - simple sleeper structure
* @timer: embedded timer structure
* @task: task to wake up
@@ -111,93 +94,8 @@ struct hrtimer_sleeper {
struct task_struct *task;
};
-#ifdef CONFIG_64BIT
-# define HRTIMER_CLOCK_BASE_ALIGN 64
-#else
-# define HRTIMER_CLOCK_BASE_ALIGN 32
-#endif
-
-/**
- * struct hrtimer_clock_base - the timer base for a specific clock
- * @cpu_base: per cpu clock base
- * @index: clock type index for per_cpu support when moving a
- * timer to a base on another cpu.
- * @clockid: clock id for per_cpu support
- * @active: red black tree root node for the active timers
- * @get_time: function to retrieve the current time of the clock
- * @offset: offset of this clock to the monotonic base
- */
-struct hrtimer_clock_base {
- struct hrtimer_cpu_base *cpu_base;
- int index;
- clockid_t clockid;
- struct timerqueue_head active;
- ktime_t (*get_time)(void);
- ktime_t offset;
-} __attribute__((__aligned__(HRTIMER_CLOCK_BASE_ALIGN)));
-
-enum hrtimer_base_type {
- HRTIMER_BASE_MONOTONIC,
- HRTIMER_BASE_REALTIME,
- HRTIMER_BASE_BOOTTIME,
- HRTIMER_BASE_TAI,
- HRTIMER_MAX_CLOCK_BASES,
-};
-
-/*
- * struct hrtimer_cpu_base - the per cpu clock bases
- * @lock: lock protecting the base and associated clock bases
- * and timers
- * @seq: seqcount around __run_hrtimer
- * @running: pointer to the currently running hrtimer
- * @cpu: cpu number
- * @active_bases: Bitfield to mark bases with active timers
- * @clock_was_set_seq: Sequence counter of clock was set events
- * @migration_enabled: The migration of hrtimers to other cpus is enabled
- * @nohz_active: The nohz functionality is enabled
- * @expires_next: absolute time of the next event which was scheduled
- * via clock_set_next_event()
- * @next_timer: Pointer to the first expiring timer
- * @in_hrtirq: hrtimer_interrupt() is currently executing
- * @hres_active: State of high resolution mode
- * @hang_detected: The last hrtimer interrupt detected a hang
- * @nr_events: Total number of hrtimer interrupt events
- * @nr_retries: Total number of hrtimer interrupt retries
- * @nr_hangs: Total number of hrtimer interrupt hangs
- * @max_hang_time: Maximum time spent in hrtimer_interrupt
- * @clock_base: array of clock bases for this cpu
- *
- * Note: next_timer is just an optimization for __remove_hrtimer().
- * Do not dereference the pointer because it is not reliable on
- * cross cpu removals.
- */
-struct hrtimer_cpu_base {
- raw_spinlock_t lock;
- seqcount_t seq;
- struct hrtimer *running;
- unsigned int cpu;
- unsigned int active_bases;
- unsigned int clock_was_set_seq;
- bool migration_enabled;
- bool nohz_active;
-#ifdef CONFIG_HIGH_RES_TIMERS
- unsigned int in_hrtirq : 1,
- hres_active : 1,
- hang_detected : 1;
- ktime_t expires_next;
- struct hrtimer *next_timer;
- unsigned int nr_events;
- unsigned int nr_retries;
- unsigned int nr_hangs;
- unsigned int max_hang_time;
-#endif
- struct hrtimer_clock_base clock_base[HRTIMER_MAX_CLOCK_BASES];
-} ____cacheline_aligned;
-
static inline void hrtimer_set_expires(struct hrtimer *timer, ktime_t time)
{
- BUILD_BUG_ON(sizeof(struct hrtimer_clock_base) > HRTIMER_CLOCK_BASE_ALIGN);
-
timer->node.expires = time;
timer->_softexpires = time;
}
@@ -256,14 +154,17 @@ static inline s64 hrtimer_get_expires_ns(const struct hrtimer *timer)
return ktime_to_ns(timer->node.expires);
}
+ktime_t hrtimer_cb_get_time(const struct hrtimer *timer);
+
static inline ktime_t hrtimer_expires_remaining(const struct hrtimer *timer)
{
- return ktime_sub(timer->node.expires, timer->base->get_time());
+ return ktime_sub(timer->node.expires, hrtimer_cb_get_time(timer));
}
-static inline ktime_t hrtimer_cb_get_time(struct hrtimer *timer)
+static inline int hrtimer_is_hres_active(struct hrtimer *timer)
{
- return timer->base->get_time();
+ return IS_ENABLED(CONFIG_HIGH_RES_TIMERS) ?
+ timer->base->cpu_base->hres_active : 0;
}
#ifdef CONFIG_HIGH_RES_TIMERS
@@ -271,40 +172,12 @@ struct clock_event_device;
extern void hrtimer_interrupt(struct clock_event_device *dev);
-static inline int hrtimer_is_hres_active(struct hrtimer *timer)
-{
- return timer->base->cpu_base->hres_active;
-}
-
-/*
- * The resolution of the clocks. The resolution value is returned in
- * the clock_getres() system call to give application programmers an
- * idea of the (in)accuracy of timers. Timer values are rounded up to
- * this resolution values.
- */
-# define HIGH_RES_NSEC 1
-# define KTIME_HIGH_RES (HIGH_RES_NSEC)
-# define MONOTONIC_RES_NSEC HIGH_RES_NSEC
-# define KTIME_MONOTONIC_RES KTIME_HIGH_RES
-
-extern void clock_was_set_delayed(void);
-
extern unsigned int hrtimer_resolution;
#else
-# define MONOTONIC_RES_NSEC LOW_RES_NSEC
-# define KTIME_MONOTONIC_RES KTIME_LOW_RES
-
#define hrtimer_resolution (unsigned int)LOW_RES_NSEC
-static inline int hrtimer_is_hres_active(struct hrtimer *timer)
-{
- return 0;
-}
-
-static inline void clock_was_set_delayed(void) { }
-
#endif
static inline ktime_t
@@ -324,39 +197,47 @@ __hrtimer_expires_remaining_adjusted(const struct hrtimer *timer, ktime_t now)
static inline ktime_t
hrtimer_expires_remaining_adjusted(const struct hrtimer *timer)
{
- return __hrtimer_expires_remaining_adjusted(timer,
- timer->base->get_time());
+ return __hrtimer_expires_remaining_adjusted(timer, hrtimer_cb_get_time(timer));
}
-extern void clock_was_set(void);
#ifdef CONFIG_TIMERFD
extern void timerfd_clock_was_set(void);
+extern void timerfd_resume(void);
#else
static inline void timerfd_clock_was_set(void) { }
+static inline void timerfd_resume(void) { }
#endif
-extern void hrtimers_resume(void);
DECLARE_PER_CPU(struct tick_device, tick_cpu_device);
+#ifdef CONFIG_PREEMPT_RT
+void hrtimer_cancel_wait_running(const struct hrtimer *timer);
+#else
+static inline void hrtimer_cancel_wait_running(struct hrtimer *timer)
+{
+ cpu_relax();
+}
+#endif
+
+static inline enum hrtimer_restart hrtimer_dummy_timeout(struct hrtimer *unused)
+{
+ return HRTIMER_NORESTART;
+}
/* Exported timer functions: */
/* Initialize timers: */
-extern void hrtimer_init(struct hrtimer *timer, clockid_t which_clock,
- enum hrtimer_mode mode);
+extern void hrtimer_setup(struct hrtimer *timer, enum hrtimer_restart (*function)(struct hrtimer *),
+ clockid_t clock_id, enum hrtimer_mode mode);
+extern void hrtimer_setup_on_stack(struct hrtimer *timer,
+ enum hrtimer_restart (*function)(struct hrtimer *),
+ clockid_t clock_id, enum hrtimer_mode mode);
+extern void hrtimer_setup_sleeper_on_stack(struct hrtimer_sleeper *sl, clockid_t clock_id,
+ enum hrtimer_mode mode);
#ifdef CONFIG_DEBUG_OBJECTS_TIMERS
-extern void hrtimer_init_on_stack(struct hrtimer *timer, clockid_t which_clock,
- enum hrtimer_mode mode);
-
extern void destroy_hrtimer_on_stack(struct hrtimer *timer);
#else
-static inline void hrtimer_init_on_stack(struct hrtimer *timer,
- clockid_t which_clock,
- enum hrtimer_mode mode)
-{
- hrtimer_init(timer, which_clock, mode);
-}
static inline void destroy_hrtimer_on_stack(struct hrtimer *timer) { }
#endif
@@ -365,11 +246,12 @@ extern void hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim,
u64 range_ns, const enum hrtimer_mode mode);
/**
- * hrtimer_start - (re)start an hrtimer on the current CPU
+ * hrtimer_start - (re)start an hrtimer
* @timer: the timer to be added
* @tim: expiry time
- * @mode: expiry mode: absolute (HRTIMER_MODE_ABS) or
- * relative (HRTIMER_MODE_REL)
+ * @mode: timer mode: absolute (HRTIMER_MODE_ABS) or
+ * relative (HRTIMER_MODE_REL), and pinned (HRTIMER_MODE_PINNED);
+ * softirq based mode is considered for debug purpose only!
*/
static inline void hrtimer_start(struct hrtimer *timer, ktime_t tim,
const enum hrtimer_mode mode)
@@ -391,6 +273,9 @@ static inline void hrtimer_start_expires(struct hrtimer *timer,
hrtimer_start_range_ns(timer, soft, delta, mode);
}
+void hrtimer_sleeper_start_expires(struct hrtimer_sleeper *sl,
+ enum hrtimer_mode mode);
+
static inline void hrtimer_restart(struct hrtimer *timer)
{
hrtimer_start_expires(timer, HRTIMER_MODE_ABS);
@@ -399,21 +284,32 @@ static inline void hrtimer_restart(struct hrtimer *timer)
/* Query timers: */
extern ktime_t __hrtimer_get_remaining(const struct hrtimer *timer, bool adjust);
+/**
+ * hrtimer_get_remaining - get remaining time for the timer
+ * @timer: the timer to read
+ */
static inline ktime_t hrtimer_get_remaining(const struct hrtimer *timer)
{
return __hrtimer_get_remaining(timer, false);
}
extern u64 hrtimer_get_next_event(void);
+extern u64 hrtimer_next_event_without(const struct hrtimer *exclude);
extern bool hrtimer_active(const struct hrtimer *timer);
-/*
- * Helper function to check, whether the timer is on one of the queues
+/**
+ * hrtimer_is_queued - check, whether the timer is on one of the queues
+ * @timer: Timer to check
+ *
+ * Returns: True if the timer is queued, false otherwise
+ *
+ * The function can be used lockless, but it gives only a current snapshot.
*/
-static inline int hrtimer_is_queued(struct hrtimer *timer)
+static inline bool hrtimer_is_queued(struct hrtimer *timer)
{
- return timer->state & HRTIMER_STATE_ENQUEUED;
+ /* The READ_ONCE pairs with the update functions of timer->state */
+ return !!(READ_ONCE(timer->state) & HRTIMER_STATE_ENQUEUED);
}
/*
@@ -422,7 +318,30 @@ static inline int hrtimer_is_queued(struct hrtimer *timer)
*/
static inline int hrtimer_callback_running(struct hrtimer *timer)
{
- return timer->base->cpu_base->running == timer;
+ return timer->base->running == timer;
+}
+
+/**
+ * hrtimer_update_function - Update the timer's callback function
+ * @timer: Timer to update
+ * @function: New callback function
+ *
+ * Only safe to call if the timer is not enqueued. Can be called in the callback function if the
+ * timer is not enqueued at the same time (see the comments above HRTIMER_STATE_ENQUEUED).
+ */
+static inline void hrtimer_update_function(struct hrtimer *timer,
+ enum hrtimer_restart (*function)(struct hrtimer *))
+{
+#ifdef CONFIG_PROVE_LOCKING
+ guard(raw_spinlock_irqsave)(&timer->base->cpu_base->lock);
+
+ if (WARN_ON_ONCE(hrtimer_is_queued(timer)))
+ return;
+
+ if (WARN_ON_ONCE(!function))
+ return;
+#endif
+ ACCESS_PRIVATE(timer, function) = function;
}
/* Forward a hrtimer so it expires after now: */
@@ -430,43 +349,31 @@ extern u64
hrtimer_forward(struct hrtimer *timer, ktime_t now, ktime_t interval);
/**
- * hrtimer_forward_now - forward the timer expiry so it expires after now
+ * hrtimer_forward_now() - forward the timer expiry so it expires after now
* @timer: hrtimer to forward
* @interval: the interval to forward
*
- * Forward the timer expiry so it will expire after the current time
- * of the hrtimer clock base. Returns the number of overruns.
- *
- * Can be safely called from the callback function of @timer. If
- * called from other contexts @timer must neither be enqueued nor
- * running the callback and the caller needs to take care of
- * serialization.
- *
- * Note: This only updates the timer expiry value and does not requeue
- * the timer.
+ * It is a variant of hrtimer_forward(). The timer will expire after the current
+ * time of the hrtimer clock base. See hrtimer_forward() for details.
*/
static inline u64 hrtimer_forward_now(struct hrtimer *timer,
ktime_t interval)
{
- return hrtimer_forward(timer, timer->base->get_time(), interval);
+ return hrtimer_forward(timer, hrtimer_cb_get_time(timer), interval);
}
/* Precise sleep: */
extern int nanosleep_copyout(struct restart_block *, struct timespec64 *);
-extern long hrtimer_nanosleep(const struct timespec64 *rqtp,
- const enum hrtimer_mode mode,
+extern long hrtimer_nanosleep(ktime_t rqtp, const enum hrtimer_mode mode,
const clockid_t clockid);
-extern void hrtimer_init_sleeper(struct hrtimer_sleeper *sl,
- struct task_struct *tsk);
-
extern int schedule_hrtimeout_range(ktime_t *expires, u64 delta,
- const enum hrtimer_mode mode);
+ const enum hrtimer_mode mode);
extern int schedule_hrtimeout_range_clock(ktime_t *expires,
u64 delta,
const enum hrtimer_mode mode,
- int clock);
+ clockid_t clock_id);
extern int schedule_hrtimeout(ktime_t *expires, const enum hrtimer_mode mode);
/* Soft interrupt function to run the hrtimer queues: */
@@ -479,10 +386,11 @@ extern void __init hrtimers_init(void);
extern void sysrq_timer_list_show(void);
int hrtimers_prepare_cpu(unsigned int cpu);
+int hrtimers_cpu_starting(unsigned int cpu);
#ifdef CONFIG_HOTPLUG_CPU
-int hrtimers_dead_cpu(unsigned int cpu);
+int hrtimers_cpu_dying(unsigned int cpu);
#else
-#define hrtimers_dead_cpu NULL
+#define hrtimers_cpu_dying NULL
#endif
#endif
diff --git a/include/linux/hrtimer_api.h b/include/linux/hrtimer_api.h
new file mode 100644
index 000000000000..8d9700894468
--- /dev/null
+++ b/include/linux/hrtimer_api.h
@@ -0,0 +1 @@
+#include <linux/hrtimer.h>
diff --git a/include/linux/hrtimer_defs.h b/include/linux/hrtimer_defs.h
new file mode 100644
index 000000000000..aa49ffa130e5
--- /dev/null
+++ b/include/linux/hrtimer_defs.h
@@ -0,0 +1,130 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_HRTIMER_DEFS_H
+#define _LINUX_HRTIMER_DEFS_H
+
+#include <linux/ktime.h>
+#include <linux/timerqueue.h>
+#include <linux/seqlock.h>
+
+#ifdef CONFIG_HIGH_RES_TIMERS
+
+/*
+ * The resolution of the clocks. The resolution value is returned in
+ * the clock_getres() system call to give application programmers an
+ * idea of the (in)accuracy of timers. Timer values are rounded up to
+ * this resolution values.
+ */
+# define HIGH_RES_NSEC 1
+# define KTIME_HIGH_RES (HIGH_RES_NSEC)
+# define MONOTONIC_RES_NSEC HIGH_RES_NSEC
+# define KTIME_MONOTONIC_RES KTIME_HIGH_RES
+
+#else
+
+# define MONOTONIC_RES_NSEC LOW_RES_NSEC
+# define KTIME_MONOTONIC_RES KTIME_LOW_RES
+
+#endif
+
+#ifdef CONFIG_64BIT
+# define __hrtimer_clock_base_align ____cacheline_aligned
+#else
+# define __hrtimer_clock_base_align
+#endif
+
+/**
+ * struct hrtimer_clock_base - the timer base for a specific clock
+ * @cpu_base: per cpu clock base
+ * @index: clock type index for per_cpu support when moving a
+ * timer to a base on another cpu.
+ * @clockid: clock id for per_cpu support
+ * @seq: seqcount around __run_hrtimer
+ * @running: pointer to the currently running hrtimer
+ * @active: red black tree root node for the active timers
+ * @offset: offset of this clock to the monotonic base
+ */
+struct hrtimer_clock_base {
+ struct hrtimer_cpu_base *cpu_base;
+ unsigned int index;
+ clockid_t clockid;
+ seqcount_raw_spinlock_t seq;
+ struct hrtimer *running;
+ struct timerqueue_head active;
+ ktime_t offset;
+} __hrtimer_clock_base_align;
+
+enum hrtimer_base_type {
+ HRTIMER_BASE_MONOTONIC,
+ HRTIMER_BASE_REALTIME,
+ HRTIMER_BASE_BOOTTIME,
+ HRTIMER_BASE_TAI,
+ HRTIMER_BASE_MONOTONIC_SOFT,
+ HRTIMER_BASE_REALTIME_SOFT,
+ HRTIMER_BASE_BOOTTIME_SOFT,
+ HRTIMER_BASE_TAI_SOFT,
+ HRTIMER_MAX_CLOCK_BASES,
+};
+
+/**
+ * struct hrtimer_cpu_base - the per cpu clock bases
+ * @lock: lock protecting the base and associated clock bases
+ * and timers
+ * @cpu: cpu number
+ * @active_bases: Bitfield to mark bases with active timers
+ * @clock_was_set_seq: Sequence counter of clock was set events
+ * @hres_active: State of high resolution mode
+ * @in_hrtirq: hrtimer_interrupt() is currently executing
+ * @hang_detected: The last hrtimer interrupt detected a hang
+ * @softirq_activated: displays, if the softirq is raised - update of softirq
+ * related settings is not required then.
+ * @nr_events: Total number of hrtimer interrupt events
+ * @nr_retries: Total number of hrtimer interrupt retries
+ * @nr_hangs: Total number of hrtimer interrupt hangs
+ * @max_hang_time: Maximum time spent in hrtimer_interrupt
+ * @softirq_expiry_lock: Lock which is taken while softirq based hrtimer are
+ * expired
+ * @online: CPU is online from an hrtimers point of view
+ * @timer_waiters: A hrtimer_cancel() invocation waits for the timer
+ * callback to finish.
+ * @expires_next: absolute time of the next event, is required for remote
+ * hrtimer enqueue; it is the total first expiry time (hard
+ * and soft hrtimer are taken into account)
+ * @next_timer: Pointer to the first expiring timer
+ * @softirq_expires_next: Time to check, if soft queues needs also to be expired
+ * @softirq_next_timer: Pointer to the first expiring softirq based timer
+ * @clock_base: array of clock bases for this cpu
+ *
+ * Note: next_timer is just an optimization for __remove_hrtimer().
+ * Do not dereference the pointer because it is not reliable on
+ * cross cpu removals.
+ */
+struct hrtimer_cpu_base {
+ raw_spinlock_t lock;
+ unsigned int cpu;
+ unsigned int active_bases;
+ unsigned int clock_was_set_seq;
+ unsigned int hres_active : 1,
+ in_hrtirq : 1,
+ hang_detected : 1,
+ softirq_activated : 1,
+ online : 1;
+#ifdef CONFIG_HIGH_RES_TIMERS
+ unsigned int nr_events;
+ unsigned short nr_retries;
+ unsigned short nr_hangs;
+ unsigned int max_hang_time;
+#endif
+#ifdef CONFIG_PREEMPT_RT
+ spinlock_t softirq_expiry_lock;
+ atomic_t timer_waiters;
+#endif
+ ktime_t expires_next;
+ struct hrtimer *next_timer;
+ ktime_t softirq_expires_next;
+ struct hrtimer *softirq_next_timer;
+ struct hrtimer_clock_base clock_base[HRTIMER_MAX_CLOCK_BASES];
+ call_single_data_t csd;
+} ____cacheline_aligned;
+
+
+#endif
diff --git a/include/linux/hrtimer_types.h b/include/linux/hrtimer_types.h
new file mode 100644
index 000000000000..8fbbb6bdf7a1
--- /dev/null
+++ b/include/linux/hrtimer_types.h
@@ -0,0 +1,50 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_HRTIMER_TYPES_H
+#define _LINUX_HRTIMER_TYPES_H
+
+#include <linux/types.h>
+#include <linux/timerqueue_types.h>
+
+struct hrtimer_clock_base;
+
+/*
+ * Return values for the callback function
+ */
+enum hrtimer_restart {
+ HRTIMER_NORESTART, /* Timer is not restarted */
+ HRTIMER_RESTART, /* Timer must be restarted */
+};
+
+/**
+ * struct hrtimer - the basic hrtimer structure
+ * @node: timerqueue node, which also manages node.expires,
+ * the absolute expiry time in the hrtimers internal
+ * representation. The time is related to the clock on
+ * which the timer is based. Is setup by adding
+ * slack to the _softexpires value. For non range timers
+ * identical to _softexpires.
+ * @_softexpires: the absolute earliest expiry time of the hrtimer.
+ * The time which was given as expiry time when the timer
+ * was armed.
+ * @function: timer expiry callback function
+ * @base: pointer to the timer base (per cpu and per clock)
+ * @state: state information (See bit values above)
+ * @is_rel: Set if the timer was armed relative
+ * @is_soft: Set if hrtimer will be expired in soft interrupt context.
+ * @is_hard: Set if hrtimer will be expired in hard interrupt context
+ * even on RT.
+ *
+ * The hrtimer structure must be initialized by hrtimer_setup()
+ */
+struct hrtimer {
+ struct timerqueue_node node;
+ ktime_t _softexpires;
+ enum hrtimer_restart (*__private function)(struct hrtimer *);
+ struct hrtimer_clock_base *base;
+ u8 state;
+ u8 is_rel;
+ u8 is_soft;
+ u8 is_hard;
+};
+
+#endif /* _LINUX_HRTIMER_TYPES_H */
diff --git a/include/linux/hsi/hsi.h b/include/linux/hsi/hsi.h
index 57402544b53f..6ca92bff02c6 100644
--- a/include/linux/hsi/hsi.h
+++ b/include/linux/hsi/hsi.h
@@ -1,23 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* HSI core header file.
*
* Copyright (C) 2010 Nokia Corporation. All rights reserved.
*
* Contact: Carlos Chinea <carlos.chinea@nokia.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
- * 02110-1301 USA
*/
#ifndef __LINUX_HSI_H__
diff --git a/include/linux/hsi/ssi_protocol.h b/include/linux/hsi/ssi_protocol.h
index 1433651be0dc..972434daa000 100644
--- a/include/linux/hsi/ssi_protocol.h
+++ b/include/linux/hsi/ssi_protocol.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* ssip_slave.h
*
@@ -6,20 +7,6 @@
* Copyright (C) 2010 Nokia Corporation. All rights reserved.
*
* Contact: Carlos Chinea <carlos.chinea@nokia.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
- * 02110-1301 USA
*/
#ifndef __LINUX_SSIP_SLAVE_H__
@@ -37,6 +24,7 @@ int ssip_slave_stop_tx(struct hsi_client *master);
void ssip_reset_event(struct hsi_client *master);
int ssip_slave_running(struct hsi_client *master);
+void ssi_waketest(struct hsi_client *cl, unsigned int enable);
#endif /* __LINUX_SSIP_SLAVE_H__ */
diff --git a/include/linux/htcpld.h b/include/linux/htcpld.h
deleted file mode 100644
index ab3f6cb4dddc..000000000000
--- a/include/linux/htcpld.h
+++ /dev/null
@@ -1,24 +0,0 @@
-#ifndef __LINUX_HTCPLD_H
-#define __LINUX_HTCPLD_H
-
-struct htcpld_chip_platform_data {
- unsigned int addr;
- unsigned int reset;
- unsigned int num_gpios;
- unsigned int gpio_out_base;
- unsigned int gpio_in_base;
- unsigned int irq_base;
- unsigned int num_irqs;
-};
-
-struct htcpld_core_platform_data {
- unsigned int int_reset_gpio_hi;
- unsigned int int_reset_gpio_lo;
- unsigned int i2c_adapter_id;
-
- struct htcpld_chip_platform_data *chip;
- unsigned int num_chip;
-};
-
-#endif /* __LINUX_HTCPLD_H */
-
diff --git a/include/linux/hte.h b/include/linux/hte.h
new file mode 100644
index 000000000000..8289055061ab
--- /dev/null
+++ b/include/linux/hte.h
@@ -0,0 +1,271 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __LINUX_HTE_H
+#define __LINUX_HTE_H
+
+#include <linux/errno.h>
+
+struct hte_chip;
+struct hte_device;
+struct of_phandle_args;
+
+/**
+ * enum hte_edge - HTE line edge flags.
+ *
+ * @HTE_EDGE_NO_SETUP: No edge setup. In this case consumer will setup edges,
+ * for example during request irq call.
+ * @HTE_RISING_EDGE_TS: Rising edge.
+ * @HTE_FALLING_EDGE_TS: Falling edge.
+ *
+ */
+enum hte_edge {
+ HTE_EDGE_NO_SETUP = 1U << 0,
+ HTE_RISING_EDGE_TS = 1U << 1,
+ HTE_FALLING_EDGE_TS = 1U << 2,
+};
+
+/**
+ * enum hte_return - HTE subsystem return values used during callback.
+ *
+ * @HTE_CB_HANDLED: The consumer handled the data.
+ * @HTE_RUN_SECOND_CB: The consumer needs further processing, in that case
+ * HTE subsystem calls secondary callback provided by the consumer where it
+ * is allowed to sleep.
+ */
+enum hte_return {
+ HTE_CB_HANDLED,
+ HTE_RUN_SECOND_CB,
+};
+
+/**
+ * struct hte_ts_data - HTE timestamp data.
+ *
+ * @tsc: Timestamp value.
+ * @seq: Sequence counter of the timestamps.
+ * @raw_level: Level of the line at the timestamp if provider supports it,
+ * -1 otherwise.
+ */
+struct hte_ts_data {
+ u64 tsc;
+ u64 seq;
+ int raw_level;
+};
+
+/**
+ * struct hte_clk_info - Clock source info that HTE provider uses to timestamp.
+ *
+ * @hz: Supported clock rate in HZ, for example 1KHz clock = 1000.
+ * @type: Supported clock type.
+ */
+struct hte_clk_info {
+ u64 hz;
+ clockid_t type;
+};
+
+/**
+ * typedef hte_ts_cb_t - HTE timestamp data processing primary callback.
+ *
+ * The callback is used to push timestamp data to the client and it is
+ * not allowed to sleep.
+ *
+ * @ts: HW timestamp data.
+ * @data: Client supplied data.
+ */
+typedef enum hte_return (*hte_ts_cb_t)(struct hte_ts_data *ts, void *data);
+
+/**
+ * typedef hte_ts_sec_cb_t - HTE timestamp data processing secondary callback.
+ *
+ * This is used when the client needs further processing where it is
+ * allowed to sleep.
+ *
+ * @data: Client supplied data.
+ *
+ */
+typedef enum hte_return (*hte_ts_sec_cb_t)(void *data);
+
+/**
+ * struct hte_line_attr - Line attributes.
+ *
+ * @line_id: The logical ID understood by the consumers and providers.
+ * @line_data: Line data related to line_id.
+ * @edge_flags: Edge setup flags.
+ * @name: Descriptive name of the entity that is being monitored for the
+ * hardware timestamping. If null, HTE core will construct the name.
+ *
+ */
+struct hte_line_attr {
+ u32 line_id;
+ void *line_data;
+ unsigned long edge_flags;
+ const char *name;
+};
+
+/**
+ * struct hte_ts_desc - HTE timestamp descriptor.
+ *
+ * This structure is a communication token between consumers to subsystem
+ * and subsystem to providers.
+ *
+ * @attr: The line attributes.
+ * @hte_data: Subsystem's private data, set by HTE subsystem.
+ */
+struct hte_ts_desc {
+ struct hte_line_attr attr;
+ void *hte_data;
+};
+
+/**
+ * struct hte_ops - HTE operations set by providers.
+ *
+ * @request: Hook for requesting a HTE timestamp. Returns 0 on success,
+ * non-zero for failures.
+ * @release: Hook for releasing a HTE timestamp. Returns 0 on success,
+ * non-zero for failures.
+ * @enable: Hook to enable the specified timestamp. Returns 0 on success,
+ * non-zero for failures.
+ * @disable: Hook to disable specified timestamp. Returns 0 on success,
+ * non-zero for failures.
+ * @get_clk_src_info: Hook to get the clock information the provider uses
+ * to timestamp. Returns 0 for success and negative error code for failure. On
+ * success HTE subsystem fills up provided struct hte_clk_info.
+ *
+ * xlated_id parameter is used to communicate between HTE subsystem and the
+ * providers and is translated by the provider.
+ */
+struct hte_ops {
+ int (*request)(struct hte_chip *chip, struct hte_ts_desc *desc,
+ u32 xlated_id);
+ int (*release)(struct hte_chip *chip, struct hte_ts_desc *desc,
+ u32 xlated_id);
+ int (*enable)(struct hte_chip *chip, u32 xlated_id);
+ int (*disable)(struct hte_chip *chip, u32 xlated_id);
+ int (*get_clk_src_info)(struct hte_chip *chip,
+ struct hte_clk_info *ci);
+};
+
+/**
+ * struct hte_chip - Abstract HTE chip.
+ *
+ * @name: functional name of the HTE IP block.
+ * @dev: device providing the HTE.
+ * @ops: callbacks for this HTE.
+ * @nlines: number of lines/signals supported by this chip.
+ * @xlate_of: Callback which translates consumer supplied logical ids to
+ * physical ids, return 0 for the success and negative for the failures.
+ * It stores (between 0 to @nlines) in xlated_id parameter for the success.
+ * @xlate_plat: Same as above but for the consumers with no DT node.
+ * @match_from_linedata: Match HTE device using the line_data.
+ * @of_hte_n_cells: Number of cells used to form the HTE specifier.
+ * @gdev: HTE subsystem abstract device, internal to the HTE subsystem.
+ * @data: chip specific private data.
+ */
+struct hte_chip {
+ const char *name;
+ struct device *dev;
+ const struct hte_ops *ops;
+ u32 nlines;
+ int (*xlate_of)(struct hte_chip *gc,
+ const struct of_phandle_args *args,
+ struct hte_ts_desc *desc, u32 *xlated_id);
+ int (*xlate_plat)(struct hte_chip *gc, struct hte_ts_desc *desc,
+ u32 *xlated_id);
+ bool (*match_from_linedata)(const struct hte_chip *chip,
+ const struct hte_ts_desc *hdesc);
+ u8 of_hte_n_cells;
+
+ struct hte_device *gdev;
+ void *data;
+};
+
+#if IS_ENABLED(CONFIG_HTE)
+/* HTE APIs for the providers */
+int devm_hte_register_chip(struct hte_chip *chip);
+int hte_push_ts_ns(const struct hte_chip *chip, u32 xlated_id,
+ struct hte_ts_data *data);
+
+/* HTE APIs for the consumers */
+int hte_init_line_attr(struct hte_ts_desc *desc, u32 line_id,
+ unsigned long edge_flags, const char *name,
+ void *data);
+int hte_ts_get(struct device *dev, struct hte_ts_desc *desc, int index);
+int hte_ts_put(struct hte_ts_desc *desc);
+int hte_request_ts_ns(struct hte_ts_desc *desc, hte_ts_cb_t cb,
+ hte_ts_sec_cb_t tcb, void *data);
+int devm_hte_request_ts_ns(struct device *dev, struct hte_ts_desc *desc,
+ hte_ts_cb_t cb, hte_ts_sec_cb_t tcb, void *data);
+int of_hte_req_count(struct device *dev);
+int hte_enable_ts(struct hte_ts_desc *desc);
+int hte_disable_ts(struct hte_ts_desc *desc);
+int hte_get_clk_src_info(const struct hte_ts_desc *desc,
+ struct hte_clk_info *ci);
+
+#else /* !CONFIG_HTE */
+static inline int devm_hte_register_chip(struct hte_chip *chip)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int hte_push_ts_ns(const struct hte_chip *chip,
+ u32 xlated_id,
+ const struct hte_ts_data *data)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int hte_init_line_attr(struct hte_ts_desc *desc, u32 line_id,
+ unsigned long edge_flags,
+ const char *name, void *data)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int hte_ts_get(struct device *dev, struct hte_ts_desc *desc,
+ int index)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int hte_ts_put(struct hte_ts_desc *desc)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int hte_request_ts_ns(struct hte_ts_desc *desc, hte_ts_cb_t cb,
+ hte_ts_sec_cb_t tcb, void *data)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int devm_hte_request_ts_ns(struct device *dev,
+ struct hte_ts_desc *desc,
+ hte_ts_cb_t cb,
+ hte_ts_sec_cb_t tcb,
+ void *data)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int of_hte_req_count(struct device *dev)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int hte_enable_ts(struct hte_ts_desc *desc)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int hte_disable_ts(struct hte_ts_desc *desc)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int hte_get_clk_src_info(const struct hte_ts_desc *desc,
+ struct hte_clk_info *ci)
+{
+ return -EOPNOTSUPP;
+}
+#endif /* !CONFIG_HTE */
+
+#endif
diff --git a/include/linux/htirq.h b/include/linux/htirq.h
deleted file mode 100644
index d4a527e58434..000000000000
--- a/include/linux/htirq.h
+++ /dev/null
@@ -1,38 +0,0 @@
-#ifndef LINUX_HTIRQ_H
-#define LINUX_HTIRQ_H
-
-struct pci_dev;
-struct irq_data;
-
-struct ht_irq_msg {
- u32 address_lo; /* low 32 bits of the ht irq message */
- u32 address_hi; /* high 32 bits of the it irq message */
-};
-
-typedef void (ht_irq_update_t)(struct pci_dev *dev, int irq,
- struct ht_irq_msg *msg);
-
-struct ht_irq_cfg {
- struct pci_dev *dev;
- /* Update callback used to cope with buggy hardware */
- ht_irq_update_t *update;
- unsigned pos;
- unsigned idx;
- struct ht_irq_msg msg;
-};
-
-/* Helper functions.. */
-void fetch_ht_irq_msg(unsigned int irq, struct ht_irq_msg *msg);
-void write_ht_irq_msg(unsigned int irq, struct ht_irq_msg *msg);
-void mask_ht_irq(struct irq_data *data);
-void unmask_ht_irq(struct irq_data *data);
-
-/* The arch hook for getting things started */
-int arch_setup_ht_irq(int idx, int pos, struct pci_dev *dev,
- ht_irq_update_t *update);
-void arch_teardown_ht_irq(unsigned int irq);
-
-/* For drivers of buggy hardware */
-int __ht_create_irq(struct pci_dev *dev, int idx, ht_irq_update_t *update);
-
-#endif /* LINUX_HTIRQ_H */
diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h
index ee696347f928..ae7f21aad0ac 100644
--- a/include/linux/huge_mm.h
+++ b/include/linux/huge_mm.h
@@ -1,55 +1,53 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_HUGE_MM_H
#define _LINUX_HUGE_MM_H
-#include <linux/sched/coredump.h>
+#include <linux/mm_types.h>
#include <linux/fs.h> /* only for vma_is_dax() */
+#include <linux/kobject.h>
-extern int do_huge_pmd_anonymous_page(struct vm_fault *vmf);
-extern int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm,
- pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long addr,
- struct vm_area_struct *vma);
-extern void huge_pmd_set_accessed(struct vm_fault *vmf, pmd_t orig_pmd);
-extern int copy_huge_pud(struct mm_struct *dst_mm, struct mm_struct *src_mm,
- pud_t *dst_pud, pud_t *src_pud, unsigned long addr,
- struct vm_area_struct *vma);
+vm_fault_t do_huge_pmd_anonymous_page(struct vm_fault *vmf);
+int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm,
+ pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long addr,
+ struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma);
+bool huge_pmd_set_accessed(struct vm_fault *vmf);
+int copy_huge_pud(struct mm_struct *dst_mm, struct mm_struct *src_mm,
+ pud_t *dst_pud, pud_t *src_pud, unsigned long addr,
+ struct vm_area_struct *vma);
#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
-extern void huge_pud_set_accessed(struct vm_fault *vmf, pud_t orig_pud);
+void huge_pud_set_accessed(struct vm_fault *vmf, pud_t orig_pud);
#else
static inline void huge_pud_set_accessed(struct vm_fault *vmf, pud_t orig_pud)
{
}
#endif
-extern int do_huge_pmd_wp_page(struct vm_fault *vmf, pmd_t orig_pmd);
-extern struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
- unsigned long addr,
- pmd_t *pmd,
- unsigned int flags);
-extern bool madvise_free_huge_pmd(struct mmu_gather *tlb,
- struct vm_area_struct *vma,
- pmd_t *pmd, unsigned long addr, unsigned long next);
-extern int zap_huge_pmd(struct mmu_gather *tlb,
- struct vm_area_struct *vma,
- pmd_t *pmd, unsigned long addr);
-extern int zap_huge_pud(struct mmu_gather *tlb,
- struct vm_area_struct *vma,
- pud_t *pud, unsigned long addr);
-extern int mincore_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
- unsigned long addr, unsigned long end,
- unsigned char *vec);
-extern bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr,
- unsigned long new_addr, unsigned long old_end,
- pmd_t *old_pmd, pmd_t *new_pmd, bool *need_flush);
-extern int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
- unsigned long addr, pgprot_t newprot,
- int prot_numa);
-int vmf_insert_pfn_pmd(struct vm_area_struct *vma, unsigned long addr,
- pmd_t *pmd, pfn_t pfn, bool write);
-int vmf_insert_pfn_pud(struct vm_area_struct *vma, unsigned long addr,
- pud_t *pud, pfn_t pfn, bool write);
+vm_fault_t do_huge_pmd_wp_page(struct vm_fault *vmf);
+bool madvise_free_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
+ pmd_t *pmd, unsigned long addr, unsigned long next);
+int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma, pmd_t *pmd,
+ unsigned long addr);
+int zap_huge_pud(struct mmu_gather *tlb, struct vm_area_struct *vma, pud_t *pud,
+ unsigned long addr);
+bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr,
+ unsigned long new_addr, pmd_t *old_pmd, pmd_t *new_pmd);
+int change_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
+ pmd_t *pmd, unsigned long addr, pgprot_t newprot,
+ unsigned long cp_flags);
+
+vm_fault_t vmf_insert_pfn_pmd(struct vm_fault *vmf, unsigned long pfn,
+ bool write);
+vm_fault_t vmf_insert_pfn_pud(struct vm_fault *vmf, unsigned long pfn,
+ bool write);
+vm_fault_t vmf_insert_folio_pmd(struct vm_fault *vmf, struct folio *folio,
+ bool write);
+vm_fault_t vmf_insert_folio_pud(struct vm_fault *vmf, struct folio *folio,
+ bool write);
+
enum transparent_hugepage_flag {
+ TRANSPARENT_HUGEPAGE_UNSUPPORTED,
TRANSPARENT_HUGEPAGE_FLAG,
TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG,
@@ -58,240 +56,640 @@ enum transparent_hugepage_flag {
TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG,
TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG,
TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG,
-#ifdef CONFIG_DEBUG_VM
- TRANSPARENT_HUGEPAGE_DEBUG_COW_FLAG,
-#endif
};
struct kobject;
struct kobj_attribute;
-extern ssize_t single_hugepage_flag_store(struct kobject *kobj,
- struct kobj_attribute *attr,
- const char *buf, size_t count,
- enum transparent_hugepage_flag flag);
-extern ssize_t single_hugepage_flag_show(struct kobject *kobj,
- struct kobj_attribute *attr, char *buf,
- enum transparent_hugepage_flag flag);
+ssize_t single_hugepage_flag_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buf, size_t count,
+ enum transparent_hugepage_flag flag);
+ssize_t single_hugepage_flag_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf,
+ enum transparent_hugepage_flag flag);
extern struct kobj_attribute shmem_enabled_attr;
+extern struct kobj_attribute thpsize_shmem_enabled_attr;
+
+/*
+ * Mask of all large folio orders supported for anonymous THP; all orders up to
+ * and including PMD_ORDER, except order-0 (which is not "huge") and order-1
+ * (which is a limitation of the THP implementation).
+ */
+#define THP_ORDERS_ALL_ANON ((BIT(PMD_ORDER + 1) - 1) & ~(BIT(0) | BIT(1)))
+
+/*
+ * Mask of all large folio orders supported for file THP. Folios in a DAX
+ * file is never split and the MAX_PAGECACHE_ORDER limit does not apply to
+ * it. Same to PFNMAPs where there's neither page* nor pagecache.
+ */
+#define THP_ORDERS_ALL_SPECIAL \
+ (BIT(PMD_ORDER) | BIT(PUD_ORDER))
+#define THP_ORDERS_ALL_FILE_DEFAULT \
+ ((BIT(MAX_PAGECACHE_ORDER + 1) - 1) & ~BIT(0))
+
+/*
+ * Mask of all large folio orders supported for THP.
+ */
+#define THP_ORDERS_ALL \
+ (THP_ORDERS_ALL_ANON | THP_ORDERS_ALL_SPECIAL | THP_ORDERS_ALL_FILE_DEFAULT)
+
+enum tva_type {
+ TVA_SMAPS, /* Exposing "THPeligible:" in smaps. */
+ TVA_PAGEFAULT, /* Serving a page fault. */
+ TVA_KHUGEPAGED, /* Khugepaged collapse. */
+ TVA_FORCED_COLLAPSE, /* Forced collapse (e.g. MADV_COLLAPSE). */
+};
-#define HPAGE_PMD_ORDER (HPAGE_PMD_SHIFT-PAGE_SHIFT)
-#define HPAGE_PMD_NR (1<<HPAGE_PMD_ORDER)
+#define thp_vma_allowable_order(vma, vm_flags, type, order) \
+ (!!thp_vma_allowable_orders(vma, vm_flags, type, BIT(order)))
-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+#define split_folio(f) split_folio_to_list(f, NULL)
+
+#ifdef CONFIG_PGTABLE_HAS_HUGE_LEAVES
#define HPAGE_PMD_SHIFT PMD_SHIFT
-#define HPAGE_PMD_SIZE ((1UL) << HPAGE_PMD_SHIFT)
+#define HPAGE_PUD_SHIFT PUD_SHIFT
+#else
+#define HPAGE_PMD_SHIFT ({ BUILD_BUG(); 0; })
+#define HPAGE_PUD_SHIFT ({ BUILD_BUG(); 0; })
+#endif
+
+#define HPAGE_PMD_ORDER (HPAGE_PMD_SHIFT-PAGE_SHIFT)
+#define HPAGE_PMD_NR (1<<HPAGE_PMD_ORDER)
#define HPAGE_PMD_MASK (~(HPAGE_PMD_SIZE - 1))
+#define HPAGE_PMD_SIZE ((1UL) << HPAGE_PMD_SHIFT)
-#define HPAGE_PUD_SHIFT PUD_SHIFT
-#define HPAGE_PUD_SIZE ((1UL) << HPAGE_PUD_SHIFT)
+#define HPAGE_PUD_ORDER (HPAGE_PUD_SHIFT-PAGE_SHIFT)
+#define HPAGE_PUD_NR (1<<HPAGE_PUD_ORDER)
#define HPAGE_PUD_MASK (~(HPAGE_PUD_SIZE - 1))
+#define HPAGE_PUD_SIZE ((1UL) << HPAGE_PUD_SHIFT)
+
+enum mthp_stat_item {
+ MTHP_STAT_ANON_FAULT_ALLOC,
+ MTHP_STAT_ANON_FAULT_FALLBACK,
+ MTHP_STAT_ANON_FAULT_FALLBACK_CHARGE,
+ MTHP_STAT_ZSWPOUT,
+ MTHP_STAT_SWPIN,
+ MTHP_STAT_SWPIN_FALLBACK,
+ MTHP_STAT_SWPIN_FALLBACK_CHARGE,
+ MTHP_STAT_SWPOUT,
+ MTHP_STAT_SWPOUT_FALLBACK,
+ MTHP_STAT_SHMEM_ALLOC,
+ MTHP_STAT_SHMEM_FALLBACK,
+ MTHP_STAT_SHMEM_FALLBACK_CHARGE,
+ MTHP_STAT_SPLIT,
+ MTHP_STAT_SPLIT_FAILED,
+ MTHP_STAT_SPLIT_DEFERRED,
+ MTHP_STAT_NR_ANON,
+ MTHP_STAT_NR_ANON_PARTIALLY_MAPPED,
+ __MTHP_STAT_COUNT
+};
-extern bool is_vma_temporary_stack(struct vm_area_struct *vma);
+#if defined(CONFIG_TRANSPARENT_HUGEPAGE) && defined(CONFIG_SYSFS)
+struct mthp_stat {
+ unsigned long stats[ilog2(MAX_PTRS_PER_PTE) + 1][__MTHP_STAT_COUNT];
+};
+
+DECLARE_PER_CPU(struct mthp_stat, mthp_stats);
+
+static inline void mod_mthp_stat(int order, enum mthp_stat_item item, int delta)
+{
+ if (order <= 0 || order > PMD_ORDER)
+ return;
+
+ this_cpu_add(mthp_stats.stats[order][item], delta);
+}
+
+static inline void count_mthp_stat(int order, enum mthp_stat_item item)
+{
+ mod_mthp_stat(order, item, 1);
+}
+
+#else
+static inline void mod_mthp_stat(int order, enum mthp_stat_item item, int delta)
+{
+}
+
+static inline void count_mthp_stat(int order, enum mthp_stat_item item)
+{
+}
+#endif
+
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
extern unsigned long transparent_hugepage_flags;
+extern unsigned long huge_anon_orders_always;
+extern unsigned long huge_anon_orders_madvise;
+extern unsigned long huge_anon_orders_inherit;
-static inline bool transparent_hugepage_enabled(struct vm_area_struct *vma)
+static inline bool hugepage_global_enabled(void)
{
- if (vma->vm_flags & VM_NOHUGEPAGE)
- return false;
+ return transparent_hugepage_flags &
+ ((1<<TRANSPARENT_HUGEPAGE_FLAG) |
+ (1<<TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG));
+}
- if (is_vma_temporary_stack(vma))
- return false;
+static inline bool hugepage_global_always(void)
+{
+ return transparent_hugepage_flags &
+ (1<<TRANSPARENT_HUGEPAGE_FLAG);
+}
+
+static inline int highest_order(unsigned long orders)
+{
+ return fls_long(orders) - 1;
+}
- if (test_bit(MMF_DISABLE_THP, &vma->vm_mm->flags))
+static inline int next_order(unsigned long *orders, int prev)
+{
+ *orders &= ~BIT(prev);
+ return highest_order(*orders);
+}
+
+/*
+ * Do the below checks:
+ * - For file vma, check if the linear page offset of vma is
+ * order-aligned within the file. The hugepage is
+ * guaranteed to be order-aligned within the file, but we must
+ * check that the order-aligned addresses in the VMA map to
+ * order-aligned offsets within the file, else the hugepage will
+ * not be mappable.
+ * - For all vmas, check if the haddr is in an aligned hugepage
+ * area.
+ */
+static inline bool thp_vma_suitable_order(struct vm_area_struct *vma,
+ unsigned long addr, int order)
+{
+ unsigned long hpage_size = PAGE_SIZE << order;
+ unsigned long haddr;
+
+ /* Don't have to check pgoff for anonymous vma */
+ if (!vma_is_anonymous(vma)) {
+ if (!IS_ALIGNED((vma->vm_start >> PAGE_SHIFT) - vma->vm_pgoff,
+ hpage_size >> PAGE_SHIFT))
+ return false;
+ }
+
+ haddr = ALIGN_DOWN(addr, hpage_size);
+
+ if (haddr < vma->vm_start || haddr + hpage_size > vma->vm_end)
return false;
+ return true;
+}
- if (transparent_hugepage_flags & (1 << TRANSPARENT_HUGEPAGE_FLAG))
- return true;
+/*
+ * Filter the bitfield of input orders to the ones suitable for use in the vma.
+ * See thp_vma_suitable_order().
+ * All orders that pass the checks are returned as a bitfield.
+ */
+static inline unsigned long thp_vma_suitable_orders(struct vm_area_struct *vma,
+ unsigned long addr, unsigned long orders)
+{
+ int order;
- if (vma_is_dax(vma))
- return true;
+ /*
+ * Iterate over orders, highest to lowest, removing orders that don't
+ * meet alignment requirements from the set. Exit loop at first order
+ * that meets requirements, since all lower orders must also meet
+ * requirements.
+ */
- if (transparent_hugepage_flags &
- (1 << TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG))
- return !!(vma->vm_flags & VM_HUGEPAGE);
+ order = highest_order(orders);
- return false;
+ while (orders) {
+ if (thp_vma_suitable_order(vma, addr, order))
+ break;
+ order = next_order(&orders, order);
+ }
+
+ return orders;
}
+unsigned long __thp_vma_allowable_orders(struct vm_area_struct *vma,
+ vm_flags_t vm_flags,
+ enum tva_type type,
+ unsigned long orders);
+
+/**
+ * thp_vma_allowable_orders - determine hugepage orders that are allowed for vma
+ * @vma: the vm area to check
+ * @vm_flags: use these vm_flags instead of vma->vm_flags
+ * @type: TVA type
+ * @orders: bitfield of all orders to consider
+ *
+ * Calculates the intersection of the requested hugepage orders and the allowed
+ * hugepage orders for the provided vma. Permitted orders are encoded as a set
+ * bit at the corresponding bit position (bit-2 corresponds to order-2, bit-3
+ * corresponds to order-3, etc). Order-0 is never considered a hugepage order.
+ *
+ * Return: bitfield of orders allowed for hugepage in the vma. 0 if no hugepage
+ * orders are allowed.
+ */
+static inline
+unsigned long thp_vma_allowable_orders(struct vm_area_struct *vma,
+ vm_flags_t vm_flags,
+ enum tva_type type,
+ unsigned long orders)
+{
+ /*
+ * Optimization to check if required orders are enabled early. Only
+ * forced collapse ignores sysfs configs.
+ */
+ if (type != TVA_FORCED_COLLAPSE && vma_is_anonymous(vma)) {
+ unsigned long mask = READ_ONCE(huge_anon_orders_always);
+
+ if (vm_flags & VM_HUGEPAGE)
+ mask |= READ_ONCE(huge_anon_orders_madvise);
+ if (hugepage_global_always() ||
+ ((vm_flags & VM_HUGEPAGE) && hugepage_global_enabled()))
+ mask |= READ_ONCE(huge_anon_orders_inherit);
+
+ orders &= mask;
+ if (!orders)
+ return 0;
+ }
+
+ return __thp_vma_allowable_orders(vma, vm_flags, type, orders);
+}
+
+struct thpsize {
+ struct kobject kobj;
+ struct list_head node;
+ int order;
+};
+
+#define to_thpsize(kobj) container_of(kobj, struct thpsize, kobj)
+
#define transparent_hugepage_use_zero_page() \
(transparent_hugepage_flags & \
(1<<TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG))
-#ifdef CONFIG_DEBUG_VM
-#define transparent_hugepage_debug_cow() \
- (transparent_hugepage_flags & \
- (1<<TRANSPARENT_HUGEPAGE_DEBUG_COW_FLAG))
-#else /* CONFIG_DEBUG_VM */
-#define transparent_hugepage_debug_cow() 0
-#endif /* CONFIG_DEBUG_VM */
-extern unsigned long thp_get_unmapped_area(struct file *filp,
- unsigned long addr, unsigned long len, unsigned long pgoff,
- unsigned long flags);
+/*
+ * Check whether THPs are explicitly disabled for this VMA, for example,
+ * through madvise or prctl.
+ */
+static inline bool vma_thp_disabled(struct vm_area_struct *vma,
+ vm_flags_t vm_flags, bool forced_collapse)
+{
+ /* Are THPs disabled for this VMA? */
+ if (vm_flags & VM_NOHUGEPAGE)
+ return true;
+ /* Are THPs disabled for all VMAs in the whole process? */
+ if (mm_flags_test(MMF_DISABLE_THP_COMPLETELY, vma->vm_mm))
+ return true;
+ /*
+ * Are THPs disabled only for VMAs where we didn't get an explicit
+ * advise to use them?
+ */
+ if (vm_flags & VM_HUGEPAGE)
+ return false;
+ /*
+ * Forcing a collapse (e.g., madv_collapse), is a clear advice to
+ * use THPs.
+ */
+ if (forced_collapse)
+ return false;
+ return mm_flags_test(MMF_DISABLE_THP_EXCEPT_ADVISED, vma->vm_mm);
+}
+
+static inline bool thp_disabled_by_hw(void)
+{
+ /* If the hardware/firmware marked hugepage support disabled. */
+ return transparent_hugepage_flags & (1 << TRANSPARENT_HUGEPAGE_UNSUPPORTED);
+}
+
+unsigned long thp_get_unmapped_area(struct file *filp, unsigned long addr,
+ unsigned long len, unsigned long pgoff, unsigned long flags);
+unsigned long thp_get_unmapped_area_vmflags(struct file *filp, unsigned long addr,
+ unsigned long len, unsigned long pgoff, unsigned long flags,
+ vm_flags_t vm_flags);
+
+enum split_type {
+ SPLIT_TYPE_UNIFORM,
+ SPLIT_TYPE_NON_UNIFORM,
+};
-extern void prep_transhuge_page(struct page *page);
-extern void free_transhuge_page(struct page *page);
+bool can_split_folio(struct folio *folio, int caller_pins, int *pextra_pins);
+int __split_huge_page_to_list_to_order(struct page *page, struct list_head *list,
+ unsigned int new_order);
+int folio_split_unmapped(struct folio *folio, unsigned int new_order);
+int min_order_for_split(struct folio *folio);
+int split_folio_to_list(struct folio *folio, struct list_head *list);
+bool folio_split_supported(struct folio *folio, unsigned int new_order,
+ enum split_type split_type, bool warns);
+int folio_split(struct folio *folio, unsigned int new_order, struct page *page,
+ struct list_head *list);
+
+static inline int split_huge_page_to_list_to_order(struct page *page, struct list_head *list,
+ unsigned int new_order)
+{
+ return __split_huge_page_to_list_to_order(page, list, new_order);
+}
+static inline int split_huge_page_to_order(struct page *page, unsigned int new_order)
+{
+ return split_huge_page_to_list_to_order(page, NULL, new_order);
+}
-bool can_split_huge_page(struct page *page, int *pextra_pins);
-int split_huge_page_to_list(struct page *page, struct list_head *list);
+/**
+ * try_folio_split_to_order() - try to split a @folio at @page to @new_order
+ * using non uniform split.
+ * @folio: folio to be split
+ * @page: split to @new_order at the given page
+ * @new_order: the target split order
+ *
+ * Try to split a @folio at @page using non uniform split to @new_order, if
+ * non uniform split is not supported, fall back to uniform split. After-split
+ * folios are put back to LRU list. Use min_order_for_split() to get the lower
+ * bound of @new_order.
+ *
+ * Return: 0 - split is successful, otherwise split failed.
+ */
+static inline int try_folio_split_to_order(struct folio *folio,
+ struct page *page, unsigned int new_order)
+{
+ if (!folio_split_supported(folio, new_order, SPLIT_TYPE_NON_UNIFORM, /* warns= */ false))
+ return split_huge_page_to_order(&folio->page, new_order);
+ return folio_split(folio, new_order, page, NULL);
+}
static inline int split_huge_page(struct page *page)
{
- return split_huge_page_to_list(page, NULL);
+ return split_huge_page_to_list_to_order(page, NULL, 0);
}
-void deferred_split_huge_page(struct page *page);
+void deferred_split_folio(struct folio *folio, bool partially_mapped);
+#ifdef CONFIG_MEMCG
+void reparent_deferred_split_queue(struct mem_cgroup *memcg);
+#endif
void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
- unsigned long address, bool freeze, struct page *page);
+ unsigned long address, bool freeze);
+
+/**
+ * pmd_is_huge() - Is this PMD either a huge PMD entry or a software leaf entry?
+ * @pmd: The PMD to check.
+ *
+ * A huge PMD entry is a non-empty entry which is present and marked huge or a
+ * software leaf entry. This check be performed without the appropriate locks
+ * held, in which case the condition should be rechecked after they are
+ * acquired.
+ *
+ * Returns: true if this PMD is huge, false otherwise.
+ */
+static inline bool pmd_is_huge(pmd_t pmd)
+{
+ if (pmd_present(pmd)) {
+ return pmd_trans_huge(pmd);
+ } else if (!pmd_none(pmd)) {
+ /*
+ * Non-present PMDs must be valid huge non-present entries. We
+ * cannot assert that here due to header dependency issues.
+ */
+ return true;
+ }
+
+ return false;
+}
#define split_huge_pmd(__vma, __pmd, __address) \
do { \
pmd_t *____pmd = (__pmd); \
- if (pmd_trans_huge(*____pmd) \
- || pmd_devmap(*____pmd)) \
+ if (pmd_is_huge(*____pmd)) \
__split_huge_pmd(__vma, __pmd, __address, \
- false, NULL); \
+ false); \
} while (0)
-
void split_huge_pmd_address(struct vm_area_struct *vma, unsigned long address,
- bool freeze, struct page *page);
+ bool freeze);
void __split_huge_pud(struct vm_area_struct *vma, pud_t *pud,
unsigned long address);
+#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
+int change_huge_pud(struct mmu_gather *tlb, struct vm_area_struct *vma,
+ pud_t *pudp, unsigned long addr, pgprot_t newprot,
+ unsigned long cp_flags);
+#else
+static inline int
+change_huge_pud(struct mmu_gather *tlb, struct vm_area_struct *vma,
+ pud_t *pudp, unsigned long addr, pgprot_t newprot,
+ unsigned long cp_flags) { return 0; }
+#endif
+
#define split_huge_pud(__vma, __pud, __address) \
do { \
pud_t *____pud = (__pud); \
- if (pud_trans_huge(*____pud) \
- || pud_devmap(*____pud)) \
+ if (pud_trans_huge(*____pud)) \
__split_huge_pud(__vma, __pud, __address); \
} while (0)
-extern int hugepage_madvise(struct vm_area_struct *vma,
- unsigned long *vm_flags, int advice);
-extern void vma_adjust_trans_huge(struct vm_area_struct *vma,
- unsigned long start,
- unsigned long end,
- long adjust_next);
-extern spinlock_t *__pmd_trans_huge_lock(pmd_t *pmd,
- struct vm_area_struct *vma);
-extern spinlock_t *__pud_trans_huge_lock(pud_t *pud,
- struct vm_area_struct *vma);
-/* mmap_sem must be held on entry */
+int hugepage_madvise(struct vm_area_struct *vma, vm_flags_t *vm_flags,
+ int advice);
+int madvise_collapse(struct vm_area_struct *vma, unsigned long start,
+ unsigned long end, bool *lock_dropped);
+void vma_adjust_trans_huge(struct vm_area_struct *vma, unsigned long start,
+ unsigned long end, struct vm_area_struct *next);
+spinlock_t *__pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma);
+spinlock_t *__pud_trans_huge_lock(pud_t *pud, struct vm_area_struct *vma);
+
+/* mmap_lock must be held on entry */
static inline spinlock_t *pmd_trans_huge_lock(pmd_t *pmd,
struct vm_area_struct *vma)
{
- VM_BUG_ON_VMA(!rwsem_is_locked(&vma->vm_mm->mmap_sem), vma);
- if (pmd_trans_huge(*pmd) || pmd_devmap(*pmd))
+ if (pmd_is_huge(*pmd))
return __pmd_trans_huge_lock(pmd, vma);
- else
- return NULL;
+
+ return NULL;
}
static inline spinlock_t *pud_trans_huge_lock(pud_t *pud,
struct vm_area_struct *vma)
{
- VM_BUG_ON_VMA(!rwsem_is_locked(&vma->vm_mm->mmap_sem), vma);
- if (pud_trans_huge(*pud) || pud_devmap(*pud))
+ if (pud_trans_huge(*pud))
return __pud_trans_huge_lock(pud, vma);
else
return NULL;
}
-static inline int hpage_nr_pages(struct page *page)
+
+/**
+ * folio_test_pmd_mappable - Can we map this folio with a PMD?
+ * @folio: The folio to test
+ *
+ * Return: true - @folio can be mapped, false - @folio cannot be mapped.
+ */
+static inline bool folio_test_pmd_mappable(struct folio *folio)
{
- if (unlikely(PageTransHuge(page)))
- return HPAGE_PMD_NR;
- return 1;
+ return folio_order(folio) >= HPAGE_PMD_ORDER;
}
-struct page *follow_devmap_pmd(struct vm_area_struct *vma, unsigned long addr,
- pmd_t *pmd, int flags);
-struct page *follow_devmap_pud(struct vm_area_struct *vma, unsigned long addr,
- pud_t *pud, int flags);
+vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf);
-extern int do_huge_pmd_numa_page(struct vm_fault *vmf, pmd_t orig_pmd);
+vm_fault_t do_huge_pmd_device_private(struct vm_fault *vmf);
-extern struct page *huge_zero_page;
+extern struct folio *huge_zero_folio;
+extern unsigned long huge_zero_pfn;
-static inline bool is_huge_zero_page(struct page *page)
+static inline bool is_huge_zero_folio(const struct folio *folio)
{
- return ACCESS_ONCE(huge_zero_page) == page;
+ VM_WARN_ON_ONCE(!folio);
+
+ return READ_ONCE(huge_zero_folio) == folio;
+}
+
+static inline bool is_huge_zero_pfn(unsigned long pfn)
+{
+ return READ_ONCE(huge_zero_pfn) == (pfn & ~(HPAGE_PMD_NR - 1));
}
static inline bool is_huge_zero_pmd(pmd_t pmd)
{
- return is_huge_zero_page(pmd_page(pmd));
+ return pmd_present(pmd) && is_huge_zero_pfn(pmd_pfn(pmd));
}
-static inline bool is_huge_zero_pud(pud_t pud)
+struct folio *mm_get_huge_zero_folio(struct mm_struct *mm);
+void mm_put_huge_zero_folio(struct mm_struct *mm);
+
+static inline struct folio *get_persistent_huge_zero_folio(void)
{
- return false;
+ if (!IS_ENABLED(CONFIG_PERSISTENT_HUGE_ZERO_FOLIO))
+ return NULL;
+
+ if (unlikely(!huge_zero_folio))
+ return NULL;
+
+ return huge_zero_folio;
}
-struct page *mm_get_huge_zero_page(struct mm_struct *mm);
-void mm_put_huge_zero_page(struct mm_struct *mm);
+static inline bool thp_migration_supported(void)
+{
+ return IS_ENABLED(CONFIG_ARCH_ENABLE_THP_MIGRATION);
+}
-#define mk_huge_pmd(page, prot) pmd_mkhuge(mk_pmd(page, prot))
+void split_huge_pmd_locked(struct vm_area_struct *vma, unsigned long address,
+ pmd_t *pmd, bool freeze);
+bool unmap_huge_pmd_locked(struct vm_area_struct *vma, unsigned long addr,
+ pmd_t *pmdp, struct folio *folio);
+void map_anon_folio_pmd_nopf(struct folio *folio, pmd_t *pmd,
+ struct vm_area_struct *vma, unsigned long haddr);
#else /* CONFIG_TRANSPARENT_HUGEPAGE */
-#define HPAGE_PMD_SHIFT ({ BUILD_BUG(); 0; })
-#define HPAGE_PMD_MASK ({ BUILD_BUG(); 0; })
-#define HPAGE_PMD_SIZE ({ BUILD_BUG(); 0; })
-
-#define HPAGE_PUD_SHIFT ({ BUILD_BUG(); 0; })
-#define HPAGE_PUD_MASK ({ BUILD_BUG(); 0; })
-#define HPAGE_PUD_SIZE ({ BUILD_BUG(); 0; })
-#define hpage_nr_pages(x) 1
+static inline bool folio_test_pmd_mappable(struct folio *folio)
+{
+ return false;
+}
-static inline bool transparent_hugepage_enabled(struct vm_area_struct *vma)
+static inline bool thp_vma_suitable_order(struct vm_area_struct *vma,
+ unsigned long addr, int order)
{
return false;
}
-static inline void prep_transhuge_page(struct page *page) {}
+static inline unsigned long thp_vma_suitable_orders(struct vm_area_struct *vma,
+ unsigned long addr, unsigned long orders)
+{
+ return 0;
+}
+
+static inline unsigned long thp_vma_allowable_orders(struct vm_area_struct *vma,
+ vm_flags_t vm_flags,
+ enum tva_type type,
+ unsigned long orders)
+{
+ return 0;
+}
#define transparent_hugepage_flags 0UL
#define thp_get_unmapped_area NULL
+static inline unsigned long
+thp_get_unmapped_area_vmflags(struct file *filp, unsigned long addr,
+ unsigned long len, unsigned long pgoff,
+ unsigned long flags, vm_flags_t vm_flags)
+{
+ return 0;
+}
+
static inline bool
-can_split_huge_page(struct page *page, int *pextra_pins)
+can_split_folio(struct folio *folio, int caller_pins, int *pextra_pins)
{
- BUILD_BUG();
return false;
}
static inline int
-split_huge_page_to_list(struct page *page, struct list_head *list)
+split_huge_page_to_list_to_order(struct page *page, struct list_head *list,
+ unsigned int new_order)
{
- return 0;
+ VM_WARN_ON_ONCE_PAGE(1, page);
+ return -EINVAL;
+}
+static inline int split_huge_page_to_order(struct page *page, unsigned int new_order)
+{
+ VM_WARN_ON_ONCE_PAGE(1, page);
+ return -EINVAL;
}
static inline int split_huge_page(struct page *page)
{
- return 0;
+ VM_WARN_ON_ONCE_PAGE(1, page);
+ return -EINVAL;
+}
+
+static inline int min_order_for_split(struct folio *folio)
+{
+ VM_WARN_ON_ONCE_FOLIO(1, folio);
+ return -EINVAL;
+}
+
+static inline int split_folio_to_list(struct folio *folio, struct list_head *list)
+{
+ VM_WARN_ON_ONCE_FOLIO(1, folio);
+ return -EINVAL;
+}
+
+static inline int try_folio_split_to_order(struct folio *folio,
+ struct page *page, unsigned int new_order)
+{
+ VM_WARN_ON_ONCE_FOLIO(1, folio);
+ return -EINVAL;
}
-static inline void deferred_split_huge_page(struct page *page) {}
+
+static inline void deferred_split_folio(struct folio *folio, bool partially_mapped) {}
+static inline void reparent_deferred_split_queue(struct mem_cgroup *memcg) {}
#define split_huge_pmd(__vma, __pmd, __address) \
do { } while (0)
static inline void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
- unsigned long address, bool freeze, struct page *page) {}
+ unsigned long address, bool freeze) {}
static inline void split_huge_pmd_address(struct vm_area_struct *vma,
- unsigned long address, bool freeze, struct page *page) {}
+ unsigned long address, bool freeze) {}
+static inline void split_huge_pmd_locked(struct vm_area_struct *vma,
+ unsigned long address, pmd_t *pmd,
+ bool freeze) {}
+
+static inline bool unmap_huge_pmd_locked(struct vm_area_struct *vma,
+ unsigned long addr, pmd_t *pmdp,
+ struct folio *folio)
+{
+ return false;
+}
#define split_huge_pud(__vma, __pmd, __address) \
do { } while (0)
static inline int hugepage_madvise(struct vm_area_struct *vma,
- unsigned long *vm_flags, int advice)
+ vm_flags_t *vm_flags, int advice)
{
- BUG();
- return 0;
+ return -EINVAL;
}
+
+static inline int madvise_collapse(struct vm_area_struct *vma,
+ unsigned long start,
+ unsigned long end, bool *lock_dropped)
+{
+ return -EINVAL;
+}
+
static inline void vma_adjust_trans_huge(struct vm_area_struct *vma,
unsigned long start,
unsigned long end,
- long adjust_next)
+ struct vm_area_struct *next)
{
}
static inline spinlock_t *pmd_trans_huge_lock(pmd_t *pmd,
@@ -305,37 +703,106 @@ static inline spinlock_t *pud_trans_huge_lock(pud_t *pud,
return NULL;
}
-static inline int do_huge_pmd_numa_page(struct vm_fault *vmf, pmd_t orig_pmd)
+static inline vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf)
+{
+ return 0;
+}
+
+static inline vm_fault_t do_huge_pmd_device_private(struct vm_fault *vmf)
{
return 0;
}
-static inline bool is_huge_zero_page(struct page *page)
+static inline bool is_huge_zero_folio(const struct folio *folio)
{
return false;
}
-static inline bool is_huge_zero_pud(pud_t pud)
+static inline bool is_huge_zero_pfn(unsigned long pfn)
{
return false;
}
-static inline void mm_put_huge_zero_page(struct mm_struct *mm)
+static inline bool is_huge_zero_pmd(pmd_t pmd)
+{
+ return false;
+}
+
+static inline void mm_put_huge_zero_folio(struct mm_struct *mm)
{
return;
}
-static inline struct page *follow_devmap_pmd(struct vm_area_struct *vma,
- unsigned long addr, pmd_t *pmd, int flags)
+static inline bool thp_migration_supported(void)
{
- return NULL;
+ return false;
+}
+
+static inline int highest_order(unsigned long orders)
+{
+ return 0;
}
-static inline struct page *follow_devmap_pud(struct vm_area_struct *vma,
- unsigned long addr, pud_t *pud, int flags)
+static inline int next_order(unsigned long *orders, int prev)
+{
+ return 0;
+}
+
+static inline void __split_huge_pud(struct vm_area_struct *vma, pud_t *pud,
+ unsigned long address)
+{
+}
+
+static inline int change_huge_pud(struct mmu_gather *tlb,
+ struct vm_area_struct *vma, pud_t *pudp,
+ unsigned long addr, pgprot_t newprot,
+ unsigned long cp_flags)
+{
+ return 0;
+}
+
+static inline struct folio *get_persistent_huge_zero_folio(void)
{
return NULL;
}
+
+static inline bool pmd_is_huge(pmd_t pmd)
+{
+ return false;
+}
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
+static inline int split_folio_to_list_to_order(struct folio *folio,
+ struct list_head *list, int new_order)
+{
+ return split_huge_page_to_list_to_order(&folio->page, list, new_order);
+}
+
+static inline int split_folio_to_order(struct folio *folio, int new_order)
+{
+ return split_folio_to_list_to_order(folio, NULL, new_order);
+}
+
+/**
+ * largest_zero_folio - Get the largest zero size folio available
+ *
+ * This function shall be used when mm_get_huge_zero_folio() cannot be
+ * used as there is no appropriate mm lifetime to tie the huge zero folio
+ * from the caller.
+ *
+ * Deduce the size of the folio with folio_size instead of assuming the
+ * folio size.
+ *
+ * Return: pointer to PMD sized zero folio if CONFIG_PERSISTENT_HUGE_ZERO_FOLIO
+ * is enabled or a single page sized zero folio
+ */
+static inline struct folio *largest_zero_folio(void)
+{
+ struct folio *folio = get_persistent_huge_zero_folio();
+
+ if (folio)
+ return folio;
+
+ return page_folio(ZERO_PAGE(0));
+}
#endif /* _LINUX_HUGE_MM_H */
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index 0ed8e41aaf11..019a1c5281e4 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -1,59 +1,51 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_HUGETLB_H
#define _LINUX_HUGETLB_H
+#include <linux/mm.h>
#include <linux/mm_types.h>
#include <linux/mmdebug.h>
#include <linux/fs.h>
#include <linux/hugetlb_inline.h>
#include <linux/cgroup.h>
+#include <linux/page_ref.h>
#include <linux/list.h>
#include <linux/kref.h>
-#include <asm/pgtable.h>
+#include <linux/pgtable.h>
+#include <linux/gfp.h>
+#include <linux/userfaultfd_k.h>
+#include <linux/nodemask.h>
struct ctl_table;
struct user_struct;
struct mmu_gather;
+struct node;
-#ifndef is_hugepd
-/*
- * Some architectures requires a hugepage directory format that is
- * required to support multiple hugepage sizes. For example
- * a4fe3ce76 "powerpc/mm: Allow more flexible layouts for hugepage pagetables"
- * introduced the same on powerpc. This allows for a more flexible hugepage
- * pagetable layout.
- */
-typedef struct { unsigned long pd; } hugepd_t;
-#define is_hugepd(hugepd) (0)
-#define __hugepd(x) ((hugepd_t) { (x) })
-static inline int gup_huge_pd(hugepd_t hugepd, unsigned long addr,
- unsigned pdshift, unsigned long end,
- int write, struct page **pages, int *nr)
-{
- return 0;
-}
-#else
-extern int gup_huge_pd(hugepd_t hugepd, unsigned long addr,
- unsigned pdshift, unsigned long end,
- int write, struct page **pages, int *nr);
-#endif
-
+void free_huge_folio(struct folio *folio);
#ifdef CONFIG_HUGETLB_PAGE
-#include <linux/mempolicy.h>
+#include <linux/pagemap.h>
#include <linux/shm.h>
#include <asm/tlbflush.h>
+/*
+ * For HugeTLB page, there are more metadata to save in the struct page. But
+ * the head struct page cannot meet our needs, so we have to abuse other tail
+ * struct page to store the metadata.
+ */
+#define __NR_USED_SUBPAGE 3
+
struct hugepage_subpool {
spinlock_t lock;
long count;
long max_hpages; /* Maximum huge pages or -1 if no maximum. */
long used_hpages; /* Used count against maximum, includes */
- /* both alloced and reserved pages. */
+ /* both allocated and reserved pages. */
struct hstate *hstate;
long min_hpages; /* Minimum huge pages or -1 if no minimum. */
long rsv_hpages; /* Pages reserved against global pool to */
- /* sasitfy minimum size. */
+ /* satisfy minimum size. */
};
struct resv_map {
@@ -63,7 +55,59 @@ struct resv_map {
long adds_in_progress;
struct list_head region_cache;
long region_cache_count;
+ struct rw_semaphore rw_sema;
+#ifdef CONFIG_CGROUP_HUGETLB
+ /*
+ * On private mappings, the counter to uncharge reservations is stored
+ * here. If these fields are 0, then either the mapping is shared, or
+ * cgroup accounting is disabled for this resv_map.
+ */
+ struct page_counter *reservation_counter;
+ unsigned long pages_per_hpage;
+ struct cgroup_subsys_state *css;
+#endif
+};
+
+/*
+ * Region tracking -- allows tracking of reservations and instantiated pages
+ * across the pages in a mapping.
+ *
+ * The region data structures are embedded into a resv_map and protected
+ * by a resv_map's lock. The set of regions within the resv_map represent
+ * reservations for huge pages, or huge pages that have already been
+ * instantiated within the map. The from and to elements are huge page
+ * indices into the associated mapping. from indicates the starting index
+ * of the region. to represents the first index past the end of the region.
+ *
+ * For example, a file region structure with from == 0 and to == 4 represents
+ * four huge pages in a mapping. It is important to note that the to element
+ * represents the first element past the end of the region. This is used in
+ * arithmetic as 4(to) - 0(from) = 4 huge pages in the region.
+ *
+ * Interval notation of the form [from, to) will be used to indicate that
+ * the endpoint from is inclusive and to is exclusive.
+ */
+struct file_region {
+ struct list_head link;
+ long from;
+ long to;
+#ifdef CONFIG_CGROUP_HUGETLB
+ /*
+ * On shared mappings, each reserved region appears as a struct
+ * file_region in resv_map. These fields hold the info needed to
+ * uncharge each reservation.
+ */
+ struct page_counter *reservation_counter;
+ struct cgroup_subsys_state *css;
+#endif
};
+
+struct hugetlb_vma_lock {
+ struct kref refs;
+ struct rw_semaphore rw_sema;
+ struct vm_area_struct *vma;
+};
+
extern struct resv_map *resv_map_alloc(void);
void resv_map_release(struct kref *ref);
@@ -76,90 +120,172 @@ struct hugepage_subpool *hugepage_new_subpool(struct hstate *h, long max_hpages,
long min_hpages);
void hugepage_put_subpool(struct hugepage_subpool *spool);
-void reset_vma_resv_huge_pages(struct vm_area_struct *vma);
-int hugetlb_sysctl_handler(struct ctl_table *, int, void __user *, size_t *, loff_t *);
-int hugetlb_overcommit_handler(struct ctl_table *, int, void __user *, size_t *, loff_t *);
-int hugetlb_treat_movable_handler(struct ctl_table *, int, void __user *, size_t *, loff_t *);
-
-#ifdef CONFIG_NUMA
-int hugetlb_mempolicy_sysctl_handler(struct ctl_table *, int,
- void __user *, size_t *, loff_t *);
-#endif
-
-int copy_hugetlb_page_range(struct mm_struct *, struct mm_struct *, struct vm_area_struct *);
-long follow_hugetlb_page(struct mm_struct *, struct vm_area_struct *,
- struct page **, struct vm_area_struct **,
- unsigned long *, unsigned long *, long, unsigned int,
- int *);
+void hugetlb_dup_vma_private(struct vm_area_struct *vma);
+void clear_vma_resv_huge_pages(struct vm_area_struct *vma);
+int move_hugetlb_page_tables(struct vm_area_struct *vma,
+ struct vm_area_struct *new_vma,
+ unsigned long old_addr, unsigned long new_addr,
+ unsigned long len);
+int copy_hugetlb_page_range(struct mm_struct *, struct mm_struct *,
+ struct vm_area_struct *, struct vm_area_struct *);
void unmap_hugepage_range(struct vm_area_struct *,
- unsigned long, unsigned long, struct page *);
-void __unmap_hugepage_range_final(struct mmu_gather *tlb,
+ unsigned long start, unsigned long end,
+ struct folio *, zap_flags_t);
+void __unmap_hugepage_range(struct mmu_gather *tlb,
struct vm_area_struct *vma,
unsigned long start, unsigned long end,
- struct page *ref_page);
-void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
- unsigned long start, unsigned long end,
- struct page *ref_page);
+ struct folio *, zap_flags_t zap_flags);
void hugetlb_report_meminfo(struct seq_file *);
-int hugetlb_report_node_meminfo(int, char *);
-void hugetlb_show_meminfo(void);
+int hugetlb_report_node_meminfo(char *buf, int len, int nid);
+void hugetlb_show_meminfo_node(int nid);
unsigned long hugetlb_total_pages(void);
-int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
+vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
unsigned long address, unsigned int flags);
-int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm, pte_t *dst_pte,
- struct vm_area_struct *dst_vma,
- unsigned long dst_addr,
- unsigned long src_addr,
- struct page **pagep);
-int hugetlb_reserve_pages(struct inode *inode, long from, long to,
- struct vm_area_struct *vma,
- vm_flags_t vm_flags);
+#ifdef CONFIG_USERFAULTFD
+int hugetlb_mfill_atomic_pte(pte_t *dst_pte,
+ struct vm_area_struct *dst_vma,
+ unsigned long dst_addr,
+ unsigned long src_addr,
+ uffd_flags_t flags,
+ struct folio **foliop);
+#endif /* CONFIG_USERFAULTFD */
+long hugetlb_reserve_pages(struct inode *inode, long from, long to,
+ struct vm_area_desc *desc, vm_flags_t vm_flags);
long hugetlb_unreserve_pages(struct inode *inode, long start, long end,
long freed);
-bool isolate_huge_page(struct page *page, struct list_head *list);
-void putback_active_hugepage(struct page *page);
-void free_huge_page(struct page *page);
+bool folio_isolate_hugetlb(struct folio *folio, struct list_head *list);
+int get_hwpoison_hugetlb_folio(struct folio *folio, bool *hugetlb, bool unpoison);
+int get_huge_page_for_hwpoison(unsigned long pfn, int flags,
+ bool *migratable_cleared);
+void folio_putback_hugetlb(struct folio *folio);
+void move_hugetlb_state(struct folio *old_folio, struct folio *new_folio, int reason);
void hugetlb_fix_reserve_counts(struct inode *inode);
extern struct mutex *hugetlb_fault_mutex_table;
-u32 hugetlb_fault_mutex_hash(struct hstate *h, struct mm_struct *mm,
- struct vm_area_struct *vma,
- struct address_space *mapping,
- pgoff_t idx, unsigned long address);
+u32 hugetlb_fault_mutex_hash(struct address_space *mapping, pgoff_t idx);
+
+pte_t *huge_pmd_share(struct mm_struct *mm, struct vm_area_struct *vma,
+ unsigned long addr, pud_t *pud);
+bool hugetlbfs_pagecache_present(struct hstate *h,
+ struct vm_area_struct *vma,
+ unsigned long address);
+
+struct address_space *hugetlb_folio_mapping_lock_write(struct folio *folio);
-pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud);
+extern int sysctl_hugetlb_shm_group __read_mostly;
+extern struct list_head huge_boot_pages[MAX_NUMNODES];
-extern int hugepages_treat_as_movable;
-extern int sysctl_hugetlb_shm_group;
-extern struct list_head huge_boot_pages;
+void hugetlb_bootmem_alloc(void);
+bool hugetlb_bootmem_allocated(void);
+extern nodemask_t hugetlb_bootmem_nodes;
+void hugetlb_bootmem_set_nodes(void);
/* arch callbacks */
-pte_t *huge_pte_alloc(struct mm_struct *mm,
+#ifndef CONFIG_HIGHPTE
+/*
+ * pte_offset_huge() and pte_alloc_huge() are helpers for those architectures
+ * which may go down to the lowest PTE level in their huge_pte_offset() and
+ * huge_pte_alloc(): to avoid reliance on pte_offset_map() without pte_unmap().
+ */
+static inline pte_t *pte_offset_huge(pmd_t *pmd, unsigned long address)
+{
+ return pte_offset_kernel(pmd, address);
+}
+static inline pte_t *pte_alloc_huge(struct mm_struct *mm, pmd_t *pmd,
+ unsigned long address)
+{
+ return pte_alloc(mm, pmd) ? NULL : pte_offset_huge(pmd, address);
+}
+#endif
+
+pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
unsigned long addr, unsigned long sz);
+/*
+ * huge_pte_offset(): Walk the hugetlb pgtable until the last level PTE.
+ * Returns the pte_t* if found, or NULL if the address is not mapped.
+ *
+ * IMPORTANT: we should normally not directly call this function, instead
+ * this is only a common interface to implement arch-specific
+ * walker. Please use hugetlb_walk() instead, because that will attempt to
+ * verify the locking for you.
+ *
+ * Since this function will walk all the pgtable pages (including not only
+ * high-level pgtable page, but also PUD entry that can be unshared
+ * concurrently for VM_SHARED), the caller of this function should be
+ * responsible of its thread safety. One can follow this rule:
+ *
+ * (1) For private mappings: pmd unsharing is not possible, so holding the
+ * mmap_lock for either read or write is sufficient. Most callers
+ * already hold the mmap_lock, so normally, no special action is
+ * required.
+ *
+ * (2) For shared mappings: pmd unsharing is possible (so the PUD-ranged
+ * pgtable page can go away from under us! It can be done by a pmd
+ * unshare with a follow up munmap() on the other process), then we
+ * need either:
+ *
+ * (2.1) hugetlb vma lock read or write held, to make sure pmd unshare
+ * won't happen upon the range (it also makes sure the pte_t we
+ * read is the right and stable one), or,
+ *
+ * (2.2) hugetlb mapping i_mmap_rwsem lock held read or write, to make
+ * sure even if unshare happened the racy unmap() will wait until
+ * i_mmap_rwsem is released.
+ *
+ * Option (2.1) is the safest, which guarantees pte stability from pmd
+ * sharing pov, until the vma lock released. Option (2.2) doesn't protect
+ * a concurrent pmd unshare, but it makes sure the pgtable page is safe to
+ * access.
+ */
pte_t *huge_pte_offset(struct mm_struct *mm,
unsigned long addr, unsigned long sz);
-int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep);
-struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address,
- int write);
-struct page *follow_huge_pd(struct vm_area_struct *vma,
- unsigned long address, hugepd_t hpd,
- int flags, int pdshift);
-struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address,
- pmd_t *pmd, int flags);
-struct page *follow_huge_pud(struct mm_struct *mm, unsigned long address,
- pud_t *pud, int flags);
-struct page *follow_huge_pgd(struct mm_struct *mm, unsigned long address,
- pgd_t *pgd, int flags);
-
-int pmd_huge(pmd_t pmd);
-int pud_huge(pud_t pud);
-unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
- unsigned long address, unsigned long end, pgprot_t newprot);
-
-bool is_hugetlb_entry_migration(pte_t pte);
+unsigned long hugetlb_mask_last_page(struct hstate *h);
+int huge_pmd_unshare(struct mm_struct *mm, struct vm_area_struct *vma,
+ unsigned long addr, pte_t *ptep);
+void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma,
+ unsigned long *start, unsigned long *end);
+
+extern void __hugetlb_zap_begin(struct vm_area_struct *vma,
+ unsigned long *begin, unsigned long *end);
+extern void __hugetlb_zap_end(struct vm_area_struct *vma,
+ struct zap_details *details);
+
+static inline void hugetlb_zap_begin(struct vm_area_struct *vma,
+ unsigned long *start, unsigned long *end)
+{
+ if (is_vm_hugetlb_page(vma))
+ __hugetlb_zap_begin(vma, start, end);
+}
+
+static inline void hugetlb_zap_end(struct vm_area_struct *vma,
+ struct zap_details *details)
+{
+ if (is_vm_hugetlb_page(vma))
+ __hugetlb_zap_end(vma, details);
+}
+
+void hugetlb_vma_lock_read(struct vm_area_struct *vma);
+void hugetlb_vma_unlock_read(struct vm_area_struct *vma);
+void hugetlb_vma_lock_write(struct vm_area_struct *vma);
+void hugetlb_vma_unlock_write(struct vm_area_struct *vma);
+int hugetlb_vma_trylock_write(struct vm_area_struct *vma);
+void hugetlb_vma_assert_locked(struct vm_area_struct *vma);
+void hugetlb_vma_lock_release(struct kref *kref);
+long hugetlb_change_protection(struct vm_area_struct *vma,
+ unsigned long address, unsigned long end, pgprot_t newprot,
+ unsigned long cp_flags);
+void hugetlb_unshare_all_pmds(struct vm_area_struct *vma);
+void fixup_hugetlb_reservations(struct vm_area_struct *vma);
+void hugetlb_split(struct vm_area_struct *vma, unsigned long addr);
+int hugetlb_vma_lock_alloc(struct vm_area_struct *vma);
+
#else /* !CONFIG_HUGETLB_PAGE */
-static inline void reset_vma_resv_huge_pages(struct vm_area_struct *vma)
+static inline void hugetlb_dup_vma_private(struct vm_area_struct *vma)
+{
+}
+
+static inline void clear_vma_resv_huge_pages(struct vm_area_struct *vma)
{
}
@@ -168,78 +294,185 @@ static inline unsigned long hugetlb_total_pages(void)
return 0;
}
-#define follow_hugetlb_page(m,v,p,vs,a,b,i,w,n) ({ BUG(); 0; })
-#define follow_huge_addr(mm, addr, write) ERR_PTR(-EINVAL)
-#define copy_hugetlb_page_range(src, dst, vma) ({ BUG(); 0; })
+static inline struct address_space *hugetlb_folio_mapping_lock_write(
+ struct folio *folio)
+{
+ return NULL;
+}
+
+static inline int huge_pmd_unshare(struct mm_struct *mm,
+ struct vm_area_struct *vma,
+ unsigned long addr, pte_t *ptep)
+{
+ return 0;
+}
+
+static inline void adjust_range_if_pmd_sharing_possible(
+ struct vm_area_struct *vma,
+ unsigned long *start, unsigned long *end)
+{
+}
+
+static inline void hugetlb_zap_begin(
+ struct vm_area_struct *vma,
+ unsigned long *start, unsigned long *end)
+{
+}
+
+static inline void hugetlb_zap_end(
+ struct vm_area_struct *vma,
+ struct zap_details *details)
+{
+}
+
+static inline int copy_hugetlb_page_range(struct mm_struct *dst,
+ struct mm_struct *src,
+ struct vm_area_struct *dst_vma,
+ struct vm_area_struct *src_vma)
+{
+ BUG();
+ return 0;
+}
+
+static inline int move_hugetlb_page_tables(struct vm_area_struct *vma,
+ struct vm_area_struct *new_vma,
+ unsigned long old_addr,
+ unsigned long new_addr,
+ unsigned long len)
+{
+ BUG();
+ return 0;
+}
+
static inline void hugetlb_report_meminfo(struct seq_file *m)
{
}
-#define hugetlb_report_node_meminfo(n, buf) 0
-static inline void hugetlb_show_meminfo(void)
+
+static inline int hugetlb_report_node_meminfo(char *buf, int len, int nid)
{
+ return 0;
}
-#define follow_huge_pd(vma, addr, hpd, flags, pdshift) NULL
-#define follow_huge_pmd(mm, addr, pmd, flags) NULL
-#define follow_huge_pud(mm, addr, pud, flags) NULL
-#define follow_huge_pgd(mm, addr, pgd, flags) NULL
-#define prepare_hugepage_range(file, addr, len) (-EINVAL)
-#define pmd_huge(x) 0
-#define pud_huge(x) 0
-#define is_hugepage_only_range(mm, addr, len) 0
-#define hugetlb_free_pgd_range(tlb, addr, end, floor, ceiling) ({BUG(); 0; })
-#define hugetlb_fault(mm, vma, addr, flags) ({ BUG(); 0; })
-#define hugetlb_mcopy_atomic_pte(dst_mm, dst_pte, dst_vma, dst_addr, \
- src_addr, pagep) ({ BUG(); 0; })
-#define huge_pte_offset(mm, address, sz) 0
-static inline bool isolate_huge_page(struct page *page, struct list_head *list)
+static inline void hugetlb_show_meminfo_node(int nid)
+{
+}
+
+static inline void hugetlb_vma_lock_read(struct vm_area_struct *vma)
+{
+}
+
+static inline void hugetlb_vma_unlock_read(struct vm_area_struct *vma)
+{
+}
+
+static inline void hugetlb_vma_lock_write(struct vm_area_struct *vma)
{
- return false;
}
-#define putback_active_hugepage(p) do {} while (0)
-static inline unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
- unsigned long address, unsigned long end, pgprot_t newprot)
+static inline void hugetlb_vma_unlock_write(struct vm_area_struct *vma)
+{
+}
+
+static inline int hugetlb_vma_trylock_write(struct vm_area_struct *vma)
+{
+ return 1;
+}
+
+static inline void hugetlb_vma_assert_locked(struct vm_area_struct *vma)
+{
+}
+
+static inline int is_hugepage_only_range(struct mm_struct *mm,
+ unsigned long addr, unsigned long len)
{
return 0;
}
-static inline void __unmap_hugepage_range_final(struct mmu_gather *tlb,
- struct vm_area_struct *vma, unsigned long start,
- unsigned long end, struct page *ref_page)
+#ifdef CONFIG_USERFAULTFD
+static inline int hugetlb_mfill_atomic_pte(pte_t *dst_pte,
+ struct vm_area_struct *dst_vma,
+ unsigned long dst_addr,
+ unsigned long src_addr,
+ uffd_flags_t flags,
+ struct folio **foliop)
{
BUG();
+ return 0;
+}
+#endif /* CONFIG_USERFAULTFD */
+
+static inline pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr,
+ unsigned long sz)
+{
+ return NULL;
+}
+
+static inline bool folio_isolate_hugetlb(struct folio *folio, struct list_head *list)
+{
+ return false;
+}
+
+static inline int get_hwpoison_hugetlb_folio(struct folio *folio, bool *hugetlb, bool unpoison)
+{
+ return 0;
+}
+
+static inline int get_huge_page_for_hwpoison(unsigned long pfn, int flags,
+ bool *migratable_cleared)
+{
+ return 0;
+}
+
+static inline void folio_putback_hugetlb(struct folio *folio)
+{
+}
+
+static inline void move_hugetlb_state(struct folio *old_folio,
+ struct folio *new_folio, int reason)
+{
+}
+
+static inline long hugetlb_change_protection(
+ struct vm_area_struct *vma, unsigned long address,
+ unsigned long end, pgprot_t newprot,
+ unsigned long cp_flags)
+{
+ return 0;
}
static inline void __unmap_hugepage_range(struct mmu_gather *tlb,
struct vm_area_struct *vma, unsigned long start,
- unsigned long end, struct page *ref_page)
+ unsigned long end, struct folio *folio,
+ zap_flags_t zap_flags)
{
BUG();
}
-#endif /* !CONFIG_HUGETLB_PAGE */
-/*
- * hugepages at page global directory. If arch support
- * hugepages at pgd level, they need to define this.
- */
-#ifndef pgd_huge
-#define pgd_huge(x) 0
-#endif
-#ifndef p4d_huge
-#define p4d_huge(x) 0
-#endif
-
-#ifndef pgd_write
-static inline int pgd_write(pgd_t pgd)
+static inline vm_fault_t hugetlb_fault(struct mm_struct *mm,
+ struct vm_area_struct *vma, unsigned long address,
+ unsigned int flags)
{
BUG();
return 0;
}
-#endif
-#ifndef pud_write
-static inline int pud_write(pud_t pud)
+static inline void hugetlb_unshare_all_pmds(struct vm_area_struct *vma) { }
+
+static inline void fixup_hugetlb_reservations(struct vm_area_struct *vma)
+{
+}
+
+static inline void hugetlb_split(struct vm_area_struct *vma, unsigned long addr) {}
+
+static inline int hugetlb_vma_lock_alloc(struct vm_area_struct *vma)
+{
+ return 0;
+}
+
+#endif /* !CONFIG_HUGETLB_PAGE */
+
+#ifndef pgd_write
+static inline int pgd_write(pgd_t pgd)
{
BUG();
return 0;
@@ -278,48 +511,160 @@ static inline struct hugetlbfs_sb_info *HUGETLBFS_SB(struct super_block *sb)
return sb->s_fs_info;
}
-extern const struct file_operations hugetlbfs_file_operations;
+struct hugetlbfs_inode_info {
+ struct inode vfs_inode;
+ unsigned int seals;
+};
+
+static inline struct hugetlbfs_inode_info *HUGETLBFS_I(struct inode *inode)
+{
+ return container_of(inode, struct hugetlbfs_inode_info, vfs_inode);
+}
+
extern const struct vm_operations_struct hugetlb_vm_ops;
struct file *hugetlb_file_setup(const char *name, size_t size, vm_flags_t acct,
- struct user_struct **user, int creat_flags,
- int page_size_log);
+ int creat_flags, int page_size_log);
-static inline bool is_file_hugepages(struct file *file)
+static inline bool is_file_hugepages(const struct file *file)
{
- if (file->f_op == &hugetlbfs_file_operations)
- return true;
-
- return is_file_shm_hugepages(file);
+ return file->f_op->fop_flags & FOP_HUGE_PAGES;
}
-
+static inline struct hstate *hstate_inode(struct inode *i)
+{
+ return HUGETLBFS_SB(i->i_sb)->hstate;
+}
#else /* !CONFIG_HUGETLBFS */
#define is_file_hugepages(file) false
static inline struct file *
hugetlb_file_setup(const char *name, size_t size, vm_flags_t acctflag,
- struct user_struct **user, int creat_flags,
- int page_size_log)
+ int creat_flags, int page_size_log)
{
return ERR_PTR(-ENOSYS);
}
+static inline struct hstate *hstate_inode(struct inode *i)
+{
+ return NULL;
+}
#endif /* !CONFIG_HUGETLBFS */
-#ifdef HAVE_ARCH_HUGETLB_UNMAPPED_AREA
-unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
- unsigned long len, unsigned long pgoff,
- unsigned long flags);
-#endif /* HAVE_ARCH_HUGETLB_UNMAPPED_AREA */
+unsigned long
+hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
+ unsigned long len, unsigned long pgoff,
+ unsigned long flags);
+
+/*
+ * huegtlb page specific state flags. These flags are located in page.private
+ * of the hugetlb head page. Functions created via the below macros should be
+ * used to manipulate these flags.
+ *
+ * HPG_restore_reserve - Set when a hugetlb page consumes a reservation at
+ * allocation time. Cleared when page is fully instantiated. Free
+ * routine checks flag to restore a reservation on error paths.
+ * Synchronization: Examined or modified by code that knows it has
+ * the only reference to page. i.e. After allocation but before use
+ * or when the page is being freed.
+ * HPG_migratable - Set after a newly allocated page is added to the page
+ * cache and/or page tables. Indicates the page is a candidate for
+ * migration.
+ * Synchronization: Initially set after new page allocation with no
+ * locking. When examined and modified during migration processing
+ * (isolate, migrate, putback) the hugetlb_lock is held.
+ * HPG_temporary - Set on a page that is temporarily allocated from the buddy
+ * allocator. Typically used for migration target pages when no pages
+ * are available in the pool. The hugetlb free page path will
+ * immediately free pages with this flag set to the buddy allocator.
+ * Synchronization: Can be set after huge page allocation from buddy when
+ * code knows it has only reference. All other examinations and
+ * modifications require hugetlb_lock.
+ * HPG_freed - Set when page is on the free lists.
+ * Synchronization: hugetlb_lock held for examination and modification.
+ * HPG_vmemmap_optimized - Set when the vmemmap pages of the page are freed.
+ * HPG_raw_hwp_unreliable - Set when the hugetlb page has a hwpoison sub-page
+ * that is not tracked by raw_hwp_page list.
+ */
+enum hugetlb_page_flags {
+ HPG_restore_reserve = 0,
+ HPG_migratable,
+ HPG_temporary,
+ HPG_freed,
+ HPG_vmemmap_optimized,
+ HPG_raw_hwp_unreliable,
+ HPG_cma,
+ __NR_HPAGEFLAGS,
+};
+
+/*
+ * Macros to create test, set and clear function definitions for
+ * hugetlb specific page flags.
+ */
+#ifdef CONFIG_HUGETLB_PAGE
+#define TESTHPAGEFLAG(uname, flname) \
+static __always_inline \
+bool folio_test_hugetlb_##flname(struct folio *folio) \
+ { void *private = &folio->private; \
+ return test_bit(HPG_##flname, private); \
+ }
+
+#define SETHPAGEFLAG(uname, flname) \
+static __always_inline \
+void folio_set_hugetlb_##flname(struct folio *folio) \
+ { void *private = &folio->private; \
+ set_bit(HPG_##flname, private); \
+ }
+
+#define CLEARHPAGEFLAG(uname, flname) \
+static __always_inline \
+void folio_clear_hugetlb_##flname(struct folio *folio) \
+ { void *private = &folio->private; \
+ clear_bit(HPG_##flname, private); \
+ }
+#else
+#define TESTHPAGEFLAG(uname, flname) \
+static inline bool \
+folio_test_hugetlb_##flname(struct folio *folio) \
+ { return 0; }
+
+#define SETHPAGEFLAG(uname, flname) \
+static inline void \
+folio_set_hugetlb_##flname(struct folio *folio) \
+ { }
+
+#define CLEARHPAGEFLAG(uname, flname) \
+static inline void \
+folio_clear_hugetlb_##flname(struct folio *folio) \
+ { }
+#endif
+
+#define HPAGEFLAG(uname, flname) \
+ TESTHPAGEFLAG(uname, flname) \
+ SETHPAGEFLAG(uname, flname) \
+ CLEARHPAGEFLAG(uname, flname) \
+
+/*
+ * Create functions associated with hugetlb page flags
+ */
+HPAGEFLAG(RestoreReserve, restore_reserve)
+HPAGEFLAG(Migratable, migratable)
+HPAGEFLAG(Temporary, temporary)
+HPAGEFLAG(Freed, freed)
+HPAGEFLAG(VmemmapOptimized, vmemmap_optimized)
+HPAGEFLAG(RawHwpUnreliable, raw_hwp_unreliable)
+HPAGEFLAG(Cma, cma)
#ifdef CONFIG_HUGETLB_PAGE
#define HSTATE_NAME_LEN 32
/* Defines one hugetlb page size */
struct hstate {
+ struct mutex resize_lock;
+ struct lock_class_key resize_key;
int next_nid_to_alloc;
int next_nid_to_free;
unsigned int order;
+ unsigned int demote_order;
unsigned long mask;
unsigned long max_huge_pages;
unsigned long nr_huge_pages;
@@ -329,39 +674,51 @@ struct hstate {
unsigned long nr_overcommit_huge_pages;
struct list_head hugepage_activelist;
struct list_head hugepage_freelists[MAX_NUMNODES];
+ unsigned int max_huge_pages_node[MAX_NUMNODES];
unsigned int nr_huge_pages_node[MAX_NUMNODES];
unsigned int free_huge_pages_node[MAX_NUMNODES];
unsigned int surplus_huge_pages_node[MAX_NUMNODES];
-#ifdef CONFIG_CGROUP_HUGETLB
- /* cgroup control files */
- struct cftype cgroup_files[5];
-#endif
char name[HSTATE_NAME_LEN];
};
+struct cma;
+
struct huge_bootmem_page {
struct list_head list;
struct hstate *hstate;
-#ifdef CONFIG_HIGHMEM
- phys_addr_t phys;
-#endif
+ unsigned long flags;
+ struct cma *cma;
};
-struct page *alloc_huge_page(struct vm_area_struct *vma,
- unsigned long addr, int avoid_reserve);
-struct page *alloc_huge_page_node(struct hstate *h, int nid);
-struct page *alloc_huge_page_noerr(struct vm_area_struct *vma,
- unsigned long addr, int avoid_reserve);
-struct page *alloc_huge_page_nodemask(struct hstate *h, int preferred_nid,
- nodemask_t *nmask);
-int huge_add_to_page_cache(struct page *page, struct address_space *mapping,
+#define HUGE_BOOTMEM_HVO 0x0001
+#define HUGE_BOOTMEM_ZONES_VALID 0x0002
+#define HUGE_BOOTMEM_CMA 0x0004
+
+bool hugetlb_bootmem_page_zones_valid(int nid, struct huge_bootmem_page *m);
+
+int isolate_or_dissolve_huge_folio(struct folio *folio, struct list_head *list);
+int replace_free_hugepage_folios(unsigned long start_pfn, unsigned long end_pfn);
+void wait_for_freed_hugetlb_folios(void);
+struct folio *alloc_hugetlb_folio(struct vm_area_struct *vma,
+ unsigned long addr, bool cow_from_owner);
+struct folio *alloc_hugetlb_folio_nodemask(struct hstate *h, int preferred_nid,
+ nodemask_t *nmask, gfp_t gfp_mask,
+ bool allow_alloc_fallback);
+struct folio *alloc_hugetlb_folio_reserve(struct hstate *h, int preferred_nid,
+ nodemask_t *nmask, gfp_t gfp_mask);
+
+int hugetlb_add_to_page_cache(struct folio *folio, struct address_space *mapping,
pgoff_t idx);
+void restore_reserve_on_error(struct hstate *h, struct vm_area_struct *vma,
+ unsigned long address, struct folio *folio);
/* arch callback */
-int __init alloc_bootmem_huge_page(struct hstate *h);
+int __init __alloc_bootmem_huge_page(struct hstate *h, int nid);
+int __init alloc_bootmem_huge_page(struct hstate *h, int nid);
+bool __init hugetlb_node_alloc_supported(void);
-void __init hugetlb_bad_size(void);
void __init hugetlb_add_hstate(unsigned order);
+bool __init arch_hugetlb_valid_size(unsigned long size);
struct hstate *size_to_hstate(unsigned long size);
#ifndef HUGE_MAX_HSTATE
@@ -373,9 +730,20 @@ extern unsigned int default_hstate_idx;
#define default_hstate (hstates[default_hstate_idx])
-static inline struct hstate *hstate_inode(struct inode *i)
+static inline struct hugepage_subpool *subpool_inode(struct inode *inode)
{
- return HUGETLBFS_SB(i->i_sb)->hstate;
+ return HUGETLBFS_SB(inode->i_sb)->spool;
+}
+
+static inline struct hugepage_subpool *hugetlb_folio_subpool(struct folio *folio)
+{
+ return folio->_hugetlb_subpool;
+}
+
+static inline void hugetlb_set_folio_subpool(struct folio *folio,
+ struct hugepage_subpool *subpool)
+{
+ folio->_hugetlb_subpool = subpool;
}
static inline struct hstate *hstate_file(struct file *f)
@@ -388,7 +756,10 @@ static inline struct hstate *hstate_sizelog(int page_size_log)
if (!page_size_log)
return &default_hstate;
- return size_to_hstate(1UL << page_size_log);
+ if (page_size_log < BITS_PER_LONG)
+ return size_to_hstate(1UL << page_size_log);
+
+ return NULL;
}
static inline struct hstate *hstate_vma(struct vm_area_struct *vma)
@@ -396,7 +767,7 @@ static inline struct hstate *hstate_vma(struct vm_area_struct *vma)
return hstate_file(vma->vm_file);
}
-static inline unsigned long huge_page_size(struct hstate *h)
+static inline unsigned long huge_page_size(const struct hstate *h)
{
return (unsigned long)PAGE_SIZE << h->order;
}
@@ -420,12 +791,17 @@ static inline unsigned huge_page_shift(struct hstate *h)
return h->order + PAGE_SHIFT;
}
+static inline bool order_is_gigantic(unsigned int order)
+{
+ return order > MAX_PAGE_ORDER;
+}
+
static inline bool hstate_is_gigantic(struct hstate *h)
{
- return huge_page_order(h) >= MAX_ORDER;
+ return order_is_gigantic(huge_page_order(h));
}
-static inline unsigned int pages_per_huge_page(struct hstate *h)
+static inline unsigned int pages_per_huge_page(const struct hstate *h)
{
return 1 << h->order;
}
@@ -435,20 +811,51 @@ static inline unsigned int blocks_per_huge_page(struct hstate *h)
return huge_page_size(h) / 512;
}
+static inline struct folio *filemap_lock_hugetlb_folio(struct hstate *h,
+ struct address_space *mapping, pgoff_t idx)
+{
+ return filemap_lock_folio(mapping, idx << huge_page_order(h));
+}
+
#include <asm/hugetlb.h>
+#ifndef is_hugepage_only_range
+static inline int is_hugepage_only_range(struct mm_struct *mm,
+ unsigned long addr, unsigned long len)
+{
+ return 0;
+}
+#define is_hugepage_only_range is_hugepage_only_range
+#endif
+
+#ifndef arch_clear_hugetlb_flags
+static inline void arch_clear_hugetlb_flags(struct folio *folio) { }
+#define arch_clear_hugetlb_flags arch_clear_hugetlb_flags
+#endif
+
#ifndef arch_make_huge_pte
-static inline pte_t arch_make_huge_pte(pte_t entry, struct vm_area_struct *vma,
- struct page *page, int writable)
+static inline pte_t arch_make_huge_pte(pte_t entry, unsigned int shift,
+ vm_flags_t flags)
{
- return entry;
+ return pte_mkhuge(entry);
}
#endif
-static inline struct hstate *page_hstate(struct page *page)
+#ifndef arch_has_huge_bootmem_alloc
+/*
+ * Some architectures do their own bootmem allocation, so they can't use
+ * early CMA allocation.
+ */
+static inline bool arch_has_huge_bootmem_alloc(void)
{
- VM_BUG_ON_PAGE(!PageHuge(page), page);
- return size_to_hstate(PAGE_SIZE << compound_order(page));
+ return false;
+}
+#endif
+
+static inline struct hstate *folio_hstate(struct folio *folio)
+{
+ VM_BUG_ON_FOLIO(!folio_test_hugetlb(folio), folio);
+ return size_to_hstate(folio_size(folio));
}
static inline unsigned hstate_index_to_shift(unsigned index)
@@ -461,40 +868,147 @@ static inline int hstate_index(struct hstate *h)
return h - hstates;
}
-pgoff_t __basepage_index(struct page *page);
+int dissolve_free_hugetlb_folio(struct folio *folio);
+int dissolve_free_hugetlb_folios(unsigned long start_pfn,
+ unsigned long end_pfn);
-/* Return page->index in PAGE_SIZE units */
-static inline pgoff_t basepage_index(struct page *page)
+#ifdef CONFIG_MEMORY_FAILURE
+extern void folio_clear_hugetlb_hwpoison(struct folio *folio);
+#else
+static inline void folio_clear_hugetlb_hwpoison(struct folio *folio)
{
- if (!PageCompound(page))
- return page->index;
-
- return __basepage_index(page);
}
+#endif
-extern int dissolve_free_huge_page(struct page *page);
-extern int dissolve_free_huge_pages(unsigned long start_pfn,
- unsigned long end_pfn);
-static inline bool hugepage_migration_supported(struct hstate *h)
-{
#ifdef CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION
+#ifndef arch_hugetlb_migration_supported
+static inline bool arch_hugetlb_migration_supported(struct hstate *h)
+{
if ((huge_page_shift(h) == PMD_SHIFT) ||
- (huge_page_shift(h) == PGDIR_SHIFT))
+ (huge_page_shift(h) == PUD_SHIFT) ||
+ (huge_page_shift(h) == PGDIR_SHIFT))
return true;
else
return false;
+}
+#endif
#else
+static inline bool arch_hugetlb_migration_supported(struct hstate *h)
+{
return false;
+}
#endif
+
+static inline bool hugepage_migration_supported(struct hstate *h)
+{
+ return arch_hugetlb_migration_supported(h);
+}
+
+/*
+ * Movability check is different as compared to migration check.
+ * It determines whether or not a huge page should be placed on
+ * movable zone or not. Movability of any huge page should be
+ * required only if huge page size is supported for migration.
+ * There won't be any reason for the huge page to be movable if
+ * it is not migratable to start with. Also the size of the huge
+ * page should be large enough to be placed under a movable zone
+ * and still feasible enough to be migratable. Just the presence
+ * in movable zone does not make the migration feasible.
+ *
+ * So even though large huge page sizes like the gigantic ones
+ * are migratable they should not be movable because its not
+ * feasible to migrate them from movable zone.
+ */
+static inline bool hugepage_movable_supported(struct hstate *h)
+{
+ if (!hugepage_migration_supported(h))
+ return false;
+
+ if (hstate_is_gigantic(h))
+ return false;
+ return true;
+}
+
+/* Movability of hugepages depends on migration support. */
+static inline gfp_t htlb_alloc_mask(struct hstate *h)
+{
+ gfp_t gfp = __GFP_COMP | __GFP_NOWARN;
+
+ gfp |= hugepage_movable_supported(h) ? GFP_HIGHUSER_MOVABLE : GFP_HIGHUSER;
+
+ return gfp;
+}
+
+static inline gfp_t htlb_modify_alloc_mask(struct hstate *h, gfp_t gfp_mask)
+{
+ gfp_t modified_mask = htlb_alloc_mask(h);
+
+ /* Some callers might want to enforce node */
+ modified_mask |= (gfp_mask & __GFP_THISNODE);
+
+ modified_mask |= (gfp_mask & __GFP_NOWARN);
+
+ return modified_mask;
+}
+
+static inline bool htlb_allow_alloc_fallback(int reason)
+{
+ bool allowed_fallback = false;
+
+ /*
+ * Note: the memory offline, memory failure and migration syscalls will
+ * be allowed to fallback to other nodes due to lack of a better chioce,
+ * that might break the per-node hugetlb pool. While other cases will
+ * set the __GFP_THISNODE to avoid breaking the per-node hugetlb pool.
+ */
+ switch (reason) {
+ case MR_MEMORY_HOTPLUG:
+ case MR_MEMORY_FAILURE:
+ case MR_SYSCALL:
+ case MR_MEMPOLICY_MBIND:
+ allowed_fallback = true;
+ break;
+ default:
+ break;
+ }
+
+ return allowed_fallback;
}
static inline spinlock_t *huge_pte_lockptr(struct hstate *h,
struct mm_struct *mm, pte_t *pte)
{
- if (huge_page_size(h) == PMD_SIZE)
+ const unsigned long size = huge_page_size(h);
+
+ VM_WARN_ON(size == PAGE_SIZE);
+
+ /*
+ * hugetlb must use the exact same PT locks as core-mm page table
+ * walkers would. When modifying a PTE table, hugetlb must take the
+ * PTE PT lock, when modifying a PMD table, hugetlb must take the PMD
+ * PT lock etc.
+ *
+ * The expectation is that any hugetlb folio smaller than a PMD is
+ * always mapped into a single PTE table and that any hugetlb folio
+ * smaller than a PUD (but at least as big as a PMD) is always mapped
+ * into a single PMD table.
+ *
+ * If that does not hold for an architecture, then that architecture
+ * must disable split PT locks such that all *_lockptr() functions
+ * will give us the same result: the per-MM PT lock.
+ *
+ * Note that with e.g., CONFIG_PGTABLE_LEVELS=2 where
+ * PGDIR_SIZE==P4D_SIZE==PUD_SIZE==PMD_SIZE, we'd use pud_lockptr()
+ * and core-mm would use pmd_lockptr(). However, in such configurations
+ * split PMD locks are disabled -- they don't make sense on a single
+ * PGDIR page table -- and the end result is the same.
+ */
+ if (size >= PUD_SIZE)
+ return pud_lockptr(mm, (pud_t *) pte);
+ else if (size >= PMD_SIZE || IS_ENABLED(CONFIG_HIGHPTE))
return pmd_lockptr(mm, (pmd_t *) pte);
- VM_BUG_ON(huge_page_size(h) == PAGE_SIZE);
- return &mm->page_table_lock;
+ /* pte_alloc_huge() only applies with !CONFIG_HIGHPTE */
+ return ptep_lockptr(mm, pte);
}
#ifndef hugepages_supported
@@ -508,6 +1022,11 @@ static inline spinlock_t *huge_pte_lockptr(struct hstate *h,
void hugetlb_report_usage(struct seq_file *m, struct mm_struct *mm);
+static inline void hugetlb_count_init(struct mm_struct *mm)
+{
+ atomic_long_set(&mm->hugetlb_usage, 0);
+}
+
static inline void hugetlb_count_add(long l, struct mm_struct *mm)
{
atomic_long_add(l, &mm->hugetlb_usage);
@@ -518,31 +1037,161 @@ static inline void hugetlb_count_sub(long l, struct mm_struct *mm)
atomic_long_sub(l, &mm->hugetlb_usage);
}
-#ifndef set_huge_swap_pte_at
-static inline void set_huge_swap_pte_at(struct mm_struct *mm, unsigned long addr,
- pte_t *ptep, pte_t pte, unsigned long sz)
+#ifndef huge_ptep_modify_prot_start
+#define huge_ptep_modify_prot_start huge_ptep_modify_prot_start
+static inline pte_t huge_ptep_modify_prot_start(struct vm_area_struct *vma,
+ unsigned long addr, pte_t *ptep)
+{
+ unsigned long psize = huge_page_size(hstate_vma(vma));
+
+ return huge_ptep_get_and_clear(vma->vm_mm, addr, ptep, psize);
+}
+#endif
+
+#ifndef huge_ptep_modify_prot_commit
+#define huge_ptep_modify_prot_commit huge_ptep_modify_prot_commit
+static inline void huge_ptep_modify_prot_commit(struct vm_area_struct *vma,
+ unsigned long addr, pte_t *ptep,
+ pte_t old_pte, pte_t pte)
{
- set_huge_pte_at(mm, addr, ptep, pte);
+ unsigned long psize = huge_page_size(hstate_vma(vma));
+
+ set_huge_pte_at(vma->vm_mm, addr, ptep, pte, psize);
}
#endif
+
+#ifdef CONFIG_NUMA
+void hugetlb_register_node(struct node *node);
+void hugetlb_unregister_node(struct node *node);
+#endif
+
+/*
+ * Check if a given raw @page in a hugepage is HWPOISON.
+ */
+bool is_raw_hwpoison_page_in_hugepage(struct page *page);
+
+static inline unsigned long huge_page_mask_align(struct file *file)
+{
+ return PAGE_MASK & ~huge_page_mask(hstate_file(file));
+}
+
#else /* CONFIG_HUGETLB_PAGE */
struct hstate {};
-#define alloc_huge_page(v, a, r) NULL
-#define alloc_huge_page_node(h, nid) NULL
-#define alloc_huge_page_nodemask(h, preferred_nid, nmask) NULL
-#define alloc_huge_page_noerr(v, a, r) NULL
-#define alloc_bootmem_huge_page(h) NULL
-#define hstate_file(f) NULL
-#define hstate_sizelog(s) NULL
-#define hstate_vma(v) NULL
-#define hstate_inode(i) NULL
-#define page_hstate(page) NULL
-#define huge_page_size(h) PAGE_SIZE
-#define huge_page_mask(h) PAGE_MASK
-#define vma_kernel_pagesize(v) PAGE_SIZE
-#define vma_mmu_pagesize(v) PAGE_SIZE
-#define huge_page_order(h) 0
-#define huge_page_shift(h) PAGE_SHIFT
+
+static inline unsigned long huge_page_mask_align(struct file *file)
+{
+ return 0;
+}
+
+static inline struct hugepage_subpool *hugetlb_folio_subpool(struct folio *folio)
+{
+ return NULL;
+}
+
+static inline struct folio *filemap_lock_hugetlb_folio(struct hstate *h,
+ struct address_space *mapping, pgoff_t idx)
+{
+ return NULL;
+}
+
+static inline int isolate_or_dissolve_huge_folio(struct folio *folio,
+ struct list_head *list)
+{
+ return -ENOMEM;
+}
+
+static inline int replace_free_hugepage_folios(unsigned long start_pfn,
+ unsigned long end_pfn)
+{
+ return 0;
+}
+
+static inline void wait_for_freed_hugetlb_folios(void)
+{
+}
+
+static inline struct folio *alloc_hugetlb_folio(struct vm_area_struct *vma,
+ unsigned long addr,
+ bool cow_from_owner)
+{
+ return NULL;
+}
+
+static inline struct folio *
+alloc_hugetlb_folio_reserve(struct hstate *h, int preferred_nid,
+ nodemask_t *nmask, gfp_t gfp_mask)
+{
+ return NULL;
+}
+
+static inline struct folio *
+alloc_hugetlb_folio_nodemask(struct hstate *h, int preferred_nid,
+ nodemask_t *nmask, gfp_t gfp_mask,
+ bool allow_alloc_fallback)
+{
+ return NULL;
+}
+
+static inline int __alloc_bootmem_huge_page(struct hstate *h)
+{
+ return 0;
+}
+
+static inline struct hstate *hstate_file(struct file *f)
+{
+ return NULL;
+}
+
+static inline struct hstate *hstate_sizelog(int page_size_log)
+{
+ return NULL;
+}
+
+static inline struct hstate *hstate_vma(struct vm_area_struct *vma)
+{
+ return NULL;
+}
+
+static inline struct hstate *folio_hstate(struct folio *folio)
+{
+ return NULL;
+}
+
+static inline struct hstate *size_to_hstate(unsigned long size)
+{
+ return NULL;
+}
+
+static inline unsigned long huge_page_size(struct hstate *h)
+{
+ return PAGE_SIZE;
+}
+
+static inline unsigned long huge_page_mask(struct hstate *h)
+{
+ return PAGE_MASK;
+}
+
+static inline unsigned long vma_kernel_pagesize(struct vm_area_struct *vma)
+{
+ return PAGE_SIZE;
+}
+
+static inline unsigned long vma_mmu_pagesize(struct vm_area_struct *vma)
+{
+ return PAGE_SIZE;
+}
+
+static inline unsigned int huge_page_order(struct hstate *h)
+{
+ return 0;
+}
+
+static inline unsigned int huge_page_shift(struct hstate *h)
+{
+ return PAGE_SHIFT;
+}
+
static inline bool hstate_is_gigantic(struct hstate *h)
{
return false;
@@ -563,23 +1212,38 @@ static inline int hstate_index(struct hstate *h)
return 0;
}
-static inline pgoff_t basepage_index(struct page *page)
+static inline int dissolve_free_hugetlb_folio(struct folio *folio)
{
- return page->index;
+ return 0;
}
-static inline int dissolve_free_huge_page(struct page *page)
+static inline int dissolve_free_hugetlb_folios(unsigned long start_pfn,
+ unsigned long end_pfn)
{
return 0;
}
-static inline int dissolve_free_huge_pages(unsigned long start_pfn,
- unsigned long end_pfn)
+static inline bool hugepage_migration_supported(struct hstate *h)
+{
+ return false;
+}
+
+static inline bool hugepage_movable_supported(struct hstate *h)
+{
+ return false;
+}
+
+static inline gfp_t htlb_alloc_mask(struct hstate *h)
{
return 0;
}
-static inline bool hugepage_migration_supported(struct hstate *h)
+static inline gfp_t htlb_modify_alloc_mask(struct hstate *h, gfp_t gfp_mask)
+{
+ return 0;
+}
+
+static inline bool htlb_allow_alloc_fallback(int reason)
{
return false;
}
@@ -590,6 +1254,10 @@ static inline spinlock_t *huge_pte_lockptr(struct hstate *h,
return &mm->page_table_lock;
}
+static inline void hugetlb_count_init(struct mm_struct *mm)
+{
+}
+
static inline void hugetlb_report_usage(struct seq_file *f, struct mm_struct *m)
{
}
@@ -598,10 +1266,43 @@ static inline void hugetlb_count_sub(long l, struct mm_struct *mm)
{
}
-static inline void set_huge_swap_pte_at(struct mm_struct *mm, unsigned long addr,
- pte_t *ptep, pte_t pte, unsigned long sz)
+static inline pte_t huge_ptep_clear_flush(struct vm_area_struct *vma,
+ unsigned long addr, pte_t *ptep)
+{
+#ifdef CONFIG_MMU
+ return ptep_get(ptep);
+#else
+ return *ptep;
+#endif
+}
+
+static inline void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
+ pte_t *ptep, pte_t pte, unsigned long sz)
+{
+}
+
+static inline void hugetlb_register_node(struct node *node)
{
}
+
+static inline void hugetlb_unregister_node(struct node *node)
+{
+}
+
+static inline bool hugetlbfs_pagecache_present(
+ struct hstate *h, struct vm_area_struct *vma, unsigned long address)
+{
+ return false;
+}
+
+static inline void hugetlb_bootmem_alloc(void)
+{
+}
+
+static inline bool hugetlb_bootmem_allocated(void)
+{
+ return false;
+}
#endif /* CONFIG_HUGETLB_PAGE */
static inline spinlock_t *huge_pte_lock(struct hstate *h,
@@ -614,4 +1315,66 @@ static inline spinlock_t *huge_pte_lock(struct hstate *h,
return ptl;
}
+#if defined(CONFIG_HUGETLB_PAGE) && defined(CONFIG_CMA)
+extern void __init hugetlb_cma_reserve(int order);
+#else
+static inline __init void hugetlb_cma_reserve(int order)
+{
+}
+#endif
+
+#ifdef CONFIG_HUGETLB_PMD_PAGE_TABLE_SHARING
+static inline bool hugetlb_pmd_shared(pte_t *pte)
+{
+ return page_count(virt_to_page(pte)) > 1;
+}
+#else
+static inline bool hugetlb_pmd_shared(pte_t *pte)
+{
+ return false;
+}
+#endif
+
+bool want_pmd_share(struct vm_area_struct *vma, unsigned long addr);
+
+#ifndef __HAVE_ARCH_FLUSH_HUGETLB_TLB_RANGE
+/*
+ * ARCHes with special requirements for evicting HUGETLB backing TLB entries can
+ * implement this.
+ */
+#define flush_hugetlb_tlb_range(vma, addr, end) flush_tlb_range(vma, addr, end)
+#endif
+
+static inline bool __vma_shareable_lock(struct vm_area_struct *vma)
+{
+ return (vma->vm_flags & VM_MAYSHARE) && vma->vm_private_data;
+}
+
+bool __vma_private_lock(struct vm_area_struct *vma);
+
+/*
+ * Safe version of huge_pte_offset() to check the locks. See comments
+ * above huge_pte_offset().
+ */
+static inline pte_t *
+hugetlb_walk(struct vm_area_struct *vma, unsigned long addr, unsigned long sz)
+{
+#if defined(CONFIG_HUGETLB_PMD_PAGE_TABLE_SHARING) && defined(CONFIG_LOCKDEP)
+ struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
+
+ /*
+ * If pmd sharing possible, locking needed to safely walk the
+ * hugetlb pgtables. More information can be found at the comment
+ * above huge_pte_offset() in the same file.
+ *
+ * NOTE: lockdep_is_held() is only defined with CONFIG_LOCKDEP.
+ */
+ if (__vma_shareable_lock(vma))
+ WARN_ON_ONCE(!lockdep_is_held(&vma_lock->rw_sema) &&
+ !lockdep_is_held(
+ &vma->vm_file->f_mapping->i_mmap_rwsem));
+#endif
+ return huge_pte_offset(vma->vm_mm, addr, sz);
+}
+
#endif /* _LINUX_HUGETLB_H */
diff --git a/include/linux/hugetlb_cgroup.h b/include/linux/hugetlb_cgroup.h
index 063962f6dfc6..e5d64b8b59c2 100644
--- a/include/linux/hugetlb_cgroup.h
+++ b/include/linux/hugetlb_cgroup.h
@@ -18,32 +18,86 @@
#include <linux/mmdebug.h>
struct hugetlb_cgroup;
-/*
- * Minimum page order trackable by hugetlb cgroup.
- * At least 3 pages are necessary for all the tracking information.
- */
-#define HUGETLB_CGROUP_MIN_ORDER 2
+struct resv_map;
+struct file_region;
#ifdef CONFIG_CGROUP_HUGETLB
+enum hugetlb_memory_event {
+ HUGETLB_MAX,
+ HUGETLB_NR_MEMORY_EVENTS,
+};
+
+struct hugetlb_cgroup_per_node {
+ /* hugetlb usage in pages over all hstates. */
+ unsigned long usage[HUGE_MAX_HSTATE];
+};
+
+struct hugetlb_cgroup {
+ struct cgroup_subsys_state css;
+
+ /*
+ * the counter to account for hugepages from hugetlb.
+ */
+ struct page_counter hugepage[HUGE_MAX_HSTATE];
+
+ /*
+ * the counter to account for hugepage reservations from hugetlb.
+ */
+ struct page_counter rsvd_hugepage[HUGE_MAX_HSTATE];
+
+ atomic_long_t events[HUGE_MAX_HSTATE][HUGETLB_NR_MEMORY_EVENTS];
+ atomic_long_t events_local[HUGE_MAX_HSTATE][HUGETLB_NR_MEMORY_EVENTS];
+
+ /* Handle for "hugetlb.events" */
+ struct cgroup_file events_file[HUGE_MAX_HSTATE];
+
+ /* Handle for "hugetlb.events.local" */
+ struct cgroup_file events_local_file[HUGE_MAX_HSTATE];
-static inline struct hugetlb_cgroup *hugetlb_cgroup_from_page(struct page *page)
+ struct hugetlb_cgroup_per_node *nodeinfo[];
+};
+
+static inline struct hugetlb_cgroup *
+__hugetlb_cgroup_from_folio(struct folio *folio, bool rsvd)
{
- VM_BUG_ON_PAGE(!PageHuge(page), page);
+ VM_BUG_ON_FOLIO(!folio_test_hugetlb(folio), folio);
+ if (rsvd)
+ return folio->_hugetlb_cgroup_rsvd;
+ else
+ return folio->_hugetlb_cgroup;
+}
- if (compound_order(page) < HUGETLB_CGROUP_MIN_ORDER)
- return NULL;
- return (struct hugetlb_cgroup *)page[2].private;
+static inline struct hugetlb_cgroup *hugetlb_cgroup_from_folio(struct folio *folio)
+{
+ return __hugetlb_cgroup_from_folio(folio, false);
}
-static inline
-int set_hugetlb_cgroup(struct page *page, struct hugetlb_cgroup *h_cg)
+static inline struct hugetlb_cgroup *
+hugetlb_cgroup_from_folio_rsvd(struct folio *folio)
{
- VM_BUG_ON_PAGE(!PageHuge(page), page);
+ return __hugetlb_cgroup_from_folio(folio, true);
+}
- if (compound_order(page) < HUGETLB_CGROUP_MIN_ORDER)
- return -1;
- page[2].private = (unsigned long)h_cg;
- return 0;
+static inline void __set_hugetlb_cgroup(struct folio *folio,
+ struct hugetlb_cgroup *h_cg, bool rsvd)
+{
+ VM_BUG_ON_FOLIO(!folio_test_hugetlb(folio), folio);
+ if (rsvd)
+ folio->_hugetlb_cgroup_rsvd = h_cg;
+ else
+ folio->_hugetlb_cgroup = h_cg;
+}
+
+static inline void set_hugetlb_cgroup(struct folio *folio,
+ struct hugetlb_cgroup *h_cg)
+{
+ __set_hugetlb_cgroup(folio, h_cg, false);
+}
+
+static inline void set_hugetlb_cgroup_rsvd(struct folio *folio,
+ struct hugetlb_cgroup *h_cg)
+{
+ __set_hugetlb_cgroup(folio, h_cg, true);
}
static inline bool hugetlb_cgroup_disabled(void)
@@ -51,29 +105,84 @@ static inline bool hugetlb_cgroup_disabled(void)
return !cgroup_subsys_enabled(hugetlb_cgrp_subsys);
}
+static inline void hugetlb_cgroup_put_rsvd_cgroup(struct hugetlb_cgroup *h_cg)
+{
+ css_put(&h_cg->css);
+}
+
+static inline void resv_map_dup_hugetlb_cgroup_uncharge_info(
+ struct resv_map *resv_map)
+{
+ if (resv_map->css)
+ css_get(resv_map->css);
+}
+
+static inline void resv_map_put_hugetlb_cgroup_uncharge_info(
+ struct resv_map *resv_map)
+{
+ if (resv_map->css)
+ css_put(resv_map->css);
+}
+
extern int hugetlb_cgroup_charge_cgroup(int idx, unsigned long nr_pages,
struct hugetlb_cgroup **ptr);
+extern int hugetlb_cgroup_charge_cgroup_rsvd(int idx, unsigned long nr_pages,
+ struct hugetlb_cgroup **ptr);
extern void hugetlb_cgroup_commit_charge(int idx, unsigned long nr_pages,
struct hugetlb_cgroup *h_cg,
- struct page *page);
-extern void hugetlb_cgroup_uncharge_page(int idx, unsigned long nr_pages,
- struct page *page);
+ struct folio *folio);
+extern void hugetlb_cgroup_commit_charge_rsvd(int idx, unsigned long nr_pages,
+ struct hugetlb_cgroup *h_cg,
+ struct folio *folio);
+extern void hugetlb_cgroup_uncharge_folio(int idx, unsigned long nr_pages,
+ struct folio *folio);
+extern void hugetlb_cgroup_uncharge_folio_rsvd(int idx, unsigned long nr_pages,
+ struct folio *folio);
+
extern void hugetlb_cgroup_uncharge_cgroup(int idx, unsigned long nr_pages,
struct hugetlb_cgroup *h_cg);
+extern void hugetlb_cgroup_uncharge_cgroup_rsvd(int idx, unsigned long nr_pages,
+ struct hugetlb_cgroup *h_cg);
+extern void hugetlb_cgroup_uncharge_counter(struct resv_map *resv,
+ unsigned long start,
+ unsigned long end);
+
+extern void hugetlb_cgroup_uncharge_file_region(struct resv_map *resv,
+ struct file_region *rg,
+ unsigned long nr_pages,
+ bool region_del);
+
extern void hugetlb_cgroup_file_init(void) __init;
-extern void hugetlb_cgroup_migrate(struct page *oldhpage,
- struct page *newhpage);
+extern void hugetlb_cgroup_migrate(struct folio *old_folio,
+ struct folio *new_folio);
#else
-static inline struct hugetlb_cgroup *hugetlb_cgroup_from_page(struct page *page)
+static inline void hugetlb_cgroup_uncharge_file_region(struct resv_map *resv,
+ struct file_region *rg,
+ unsigned long nr_pages,
+ bool region_del)
+{
+}
+
+static inline struct hugetlb_cgroup *hugetlb_cgroup_from_folio(struct folio *folio)
{
return NULL;
}
-static inline
-int set_hugetlb_cgroup(struct page *page, struct hugetlb_cgroup *h_cg)
+static inline struct hugetlb_cgroup *
+hugetlb_cgroup_from_folio_rsvd(struct folio *folio)
+{
+ return NULL;
+}
+
+static inline void set_hugetlb_cgroup(struct folio *folio,
+ struct hugetlb_cgroup *h_cg)
+{
+}
+
+static inline void set_hugetlb_cgroup_rsvd(struct folio *folio,
+ struct hugetlb_cgroup *h_cg)
{
- return 0;
}
static inline bool hugetlb_cgroup_disabled(void)
@@ -81,28 +190,71 @@ static inline bool hugetlb_cgroup_disabled(void)
return true;
}
-static inline int
-hugetlb_cgroup_charge_cgroup(int idx, unsigned long nr_pages,
- struct hugetlb_cgroup **ptr)
+static inline void hugetlb_cgroup_put_rsvd_cgroup(struct hugetlb_cgroup *h_cg)
+{
+}
+
+static inline void resv_map_dup_hugetlb_cgroup_uncharge_info(
+ struct resv_map *resv_map)
+{
+}
+
+static inline void resv_map_put_hugetlb_cgroup_uncharge_info(
+ struct resv_map *resv_map)
+{
+}
+
+static inline int hugetlb_cgroup_charge_cgroup(int idx, unsigned long nr_pages,
+ struct hugetlb_cgroup **ptr)
{
return 0;
}
-static inline void
-hugetlb_cgroup_commit_charge(int idx, unsigned long nr_pages,
- struct hugetlb_cgroup *h_cg,
- struct page *page)
+static inline int hugetlb_cgroup_charge_cgroup_rsvd(int idx,
+ unsigned long nr_pages,
+ struct hugetlb_cgroup **ptr)
+{
+ return 0;
+}
+
+static inline void hugetlb_cgroup_commit_charge(int idx, unsigned long nr_pages,
+ struct hugetlb_cgroup *h_cg,
+ struct folio *folio)
{
}
static inline void
-hugetlb_cgroup_uncharge_page(int idx, unsigned long nr_pages, struct page *page)
+hugetlb_cgroup_commit_charge_rsvd(int idx, unsigned long nr_pages,
+ struct hugetlb_cgroup *h_cg,
+ struct folio *folio)
+{
+}
+
+static inline void hugetlb_cgroup_uncharge_folio(int idx, unsigned long nr_pages,
+ struct folio *folio)
+{
+}
+
+static inline void hugetlb_cgroup_uncharge_folio_rsvd(int idx,
+ unsigned long nr_pages,
+ struct folio *folio)
+{
+}
+static inline void hugetlb_cgroup_uncharge_cgroup(int idx,
+ unsigned long nr_pages,
+ struct hugetlb_cgroup *h_cg)
{
}
static inline void
-hugetlb_cgroup_uncharge_cgroup(int idx, unsigned long nr_pages,
- struct hugetlb_cgroup *h_cg)
+hugetlb_cgroup_uncharge_cgroup_rsvd(int idx, unsigned long nr_pages,
+ struct hugetlb_cgroup *h_cg)
+{
+}
+
+static inline void hugetlb_cgroup_uncharge_counter(struct resv_map *resv,
+ unsigned long start,
+ unsigned long end)
{
}
@@ -110,8 +262,8 @@ static inline void hugetlb_cgroup_file_init(void)
{
}
-static inline void hugetlb_cgroup_migrate(struct page *oldhpage,
- struct page *newhpage)
+static inline void hugetlb_cgroup_migrate(struct folio *old_folio,
+ struct folio *new_folio)
{
}
diff --git a/include/linux/hugetlb_inline.h b/include/linux/hugetlb_inline.h
index a4e7ca0f3585..a27aa0162918 100644
--- a/include/linux/hugetlb_inline.h
+++ b/include/linux/hugetlb_inline.h
@@ -1,22 +1,28 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_HUGETLB_INLINE_H
#define _LINUX_HUGETLB_INLINE_H
-#ifdef CONFIG_HUGETLB_PAGE
-
#include <linux/mm.h>
-static inline bool is_vm_hugetlb_page(struct vm_area_struct *vma)
+#ifdef CONFIG_HUGETLB_PAGE
+
+static inline bool is_vm_hugetlb_flags(vm_flags_t vm_flags)
{
- return !!(vma->vm_flags & VM_HUGETLB);
+ return !!(vm_flags & VM_HUGETLB);
}
#else
-static inline bool is_vm_hugetlb_page(struct vm_area_struct *vma)
+static inline bool is_vm_hugetlb_flags(vm_flags_t vm_flags)
{
return false;
}
#endif
+static inline bool is_vm_hugetlb_page(struct vm_area_struct *vma)
+{
+ return is_vm_hugetlb_flags(vma->vm_flags);
+}
+
#endif
diff --git a/include/linux/hung_task.h b/include/linux/hung_task.h
new file mode 100644
index 000000000000..c4403eeb7144
--- /dev/null
+++ b/include/linux/hung_task.h
@@ -0,0 +1,101 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Detect Hung Task: detecting tasks stuck in D state
+ *
+ * Copyright (C) 2025 Tongcheng Travel (www.ly.com)
+ * Author: Lance Yang <mingzhe.yang@ly.com>
+ */
+#ifndef __LINUX_HUNG_TASK_H
+#define __LINUX_HUNG_TASK_H
+
+#include <linux/bug.h>
+#include <linux/sched.h>
+#include <linux/compiler.h>
+
+/*
+ * @blocker: Combines lock address and blocking type.
+ *
+ * Since lock pointers are at least 4-byte aligned(32-bit) or 8-byte
+ * aligned(64-bit). This leaves the 2 least bits (LSBs) of the pointer
+ * always zero. So we can use these bits to encode the specific blocking
+ * type.
+ *
+ * Note that on architectures where this is not guaranteed, or for any
+ * unaligned lock, this tracking mechanism is silently skipped for that
+ * lock.
+ *
+ * Type encoding:
+ * 00 - Blocked on mutex (BLOCKER_TYPE_MUTEX)
+ * 01 - Blocked on semaphore (BLOCKER_TYPE_SEM)
+ * 10 - Blocked on rw-semaphore as READER (BLOCKER_TYPE_RWSEM_READER)
+ * 11 - Blocked on rw-semaphore as WRITER (BLOCKER_TYPE_RWSEM_WRITER)
+ */
+#define BLOCKER_TYPE_MUTEX 0x00UL
+#define BLOCKER_TYPE_SEM 0x01UL
+#define BLOCKER_TYPE_RWSEM_READER 0x02UL
+#define BLOCKER_TYPE_RWSEM_WRITER 0x03UL
+
+#define BLOCKER_TYPE_MASK 0x03UL
+
+#ifdef CONFIG_DETECT_HUNG_TASK_BLOCKER
+static inline void hung_task_set_blocker(void *lock, unsigned long type)
+{
+ unsigned long lock_ptr = (unsigned long)lock;
+
+ WARN_ON_ONCE(!lock_ptr);
+ WARN_ON_ONCE(READ_ONCE(current->blocker));
+
+ /*
+ * If the lock pointer matches the BLOCKER_TYPE_MASK, return
+ * without writing anything.
+ */
+ if (lock_ptr & BLOCKER_TYPE_MASK)
+ return;
+
+ WRITE_ONCE(current->blocker, lock_ptr | type);
+}
+
+static inline void hung_task_clear_blocker(void)
+{
+ WRITE_ONCE(current->blocker, 0UL);
+}
+
+/*
+ * hung_task_get_blocker_type - Extracts blocker type from encoded blocker
+ * address.
+ *
+ * @blocker: Blocker pointer with encoded type (via LSB bits)
+ *
+ * Returns: BLOCKER_TYPE_MUTEX, BLOCKER_TYPE_SEM, etc.
+ */
+static inline unsigned long hung_task_get_blocker_type(unsigned long blocker)
+{
+ WARN_ON_ONCE(!blocker);
+
+ return blocker & BLOCKER_TYPE_MASK;
+}
+
+static inline void *hung_task_blocker_to_lock(unsigned long blocker)
+{
+ WARN_ON_ONCE(!blocker);
+
+ return (void *)(blocker & ~BLOCKER_TYPE_MASK);
+}
+#else
+static inline void hung_task_set_blocker(void *lock, unsigned long type)
+{
+}
+static inline void hung_task_clear_blocker(void)
+{
+}
+static inline unsigned long hung_task_get_blocker_type(unsigned long blocker)
+{
+ return 0UL;
+}
+static inline void *hung_task_blocker_to_lock(unsigned long blocker)
+{
+ return NULL;
+}
+#endif
+
+#endif /* __LINUX_HUNG_TASK_H */
diff --git a/include/linux/hw_bitfield.h b/include/linux/hw_bitfield.h
new file mode 100644
index 000000000000..df202e167ce4
--- /dev/null
+++ b/include/linux/hw_bitfield.h
@@ -0,0 +1,62 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (C) 2025, Collabora Ltd.
+ */
+
+#ifndef _LINUX_HW_BITFIELD_H
+#define _LINUX_HW_BITFIELD_H
+
+#include <linux/bitfield.h>
+#include <linux/build_bug.h>
+#include <linux/limits.h>
+
+/**
+ * FIELD_PREP_WM16() - prepare a bitfield element with a mask in the upper half
+ * @_mask: shifted mask defining the field's length and position
+ * @_val: value to put in the field
+ *
+ * FIELD_PREP_WM16() masks and shifts up the value, as well as bitwise ORs the
+ * result with the mask shifted up by 16.
+ *
+ * This is useful for a common design of hardware registers where the upper
+ * 16-bit half of a 32-bit register is used as a write-enable mask. In such a
+ * register, a bit in the lower half is only updated if the corresponding bit
+ * in the upper half is high.
+ */
+#define FIELD_PREP_WM16(_mask, _val) \
+ ({ \
+ typeof(_val) __val = _val; \
+ typeof(_mask) __mask = _mask; \
+ __BF_FIELD_CHECK(__mask, ((u16)0U), __val, \
+ "HWORD_UPDATE: "); \
+ (((typeof(__mask))(__val) << __bf_shf(__mask)) & (__mask)) | \
+ ((__mask) << 16); \
+ })
+
+/**
+ * FIELD_PREP_WM16_CONST() - prepare a constant bitfield element with a mask in
+ * the upper half
+ * @_mask: shifted mask defining the field's length and position
+ * @_val: value to put in the field
+ *
+ * FIELD_PREP_WM16_CONST() masks and shifts up the value, as well as bitwise ORs
+ * the result with the mask shifted up by 16.
+ *
+ * This is useful for a common design of hardware registers where the upper
+ * 16-bit half of a 32-bit register is used as a write-enable mask. In such a
+ * register, a bit in the lower half is only updated if the corresponding bit
+ * in the upper half is high.
+ *
+ * Unlike FIELD_PREP_WM16(), this is a constant expression and can therefore
+ * be used in initializers. Error checking is less comfortable for this
+ * version.
+ */
+#define FIELD_PREP_WM16_CONST(_mask, _val) \
+ ( \
+ FIELD_PREP_CONST(_mask, _val) | \
+ (BUILD_BUG_ON_ZERO(const_true((u64)(_mask) > U16_MAX)) + \
+ ((_mask) << 16)) \
+ )
+
+
+#endif /* _LINUX_HW_BITFIELD_H */
diff --git a/include/linux/hw_breakpoint.h b/include/linux/hw_breakpoint.h
index 0464c85e63fd..db199d653dd1 100644
--- a/include/linux/hw_breakpoint.h
+++ b/include/linux/hw_breakpoint.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_HW_BREAKPOINT_H
#define _LINUX_HW_BREAKPOINT_H
@@ -6,6 +7,16 @@
#ifdef CONFIG_HAVE_HW_BREAKPOINT
+enum bp_type_idx {
+ TYPE_INST = 0,
+#if defined(CONFIG_HAVE_MIXED_BREAKPOINTS_REGS)
+ TYPE_DATA = 0,
+#else
+ TYPE_DATA = 1,
+#endif
+ TYPE_MAX
+};
+
extern int __init init_hw_breakpoint(void);
static inline void hw_breakpoint_init(struct perf_event_attr *attr)
@@ -52,6 +63,9 @@ register_user_hw_breakpoint(struct perf_event_attr *attr,
/* FIXME: only change from the attr, and don't unregister */
extern int
modify_user_hw_breakpoint(struct perf_event *bp, struct perf_event_attr *attr);
+extern int
+modify_user_hw_breakpoint_check(struct perf_event *bp, struct perf_event_attr *attr,
+ bool check);
/*
* Kernel breakpoints are not associated with any particular thread.
@@ -68,9 +82,9 @@ register_wide_hw_breakpoint(struct perf_event_attr *attr,
void *context);
extern int register_perf_hw_breakpoint(struct perf_event *bp);
-extern int __register_perf_hw_breakpoint(struct perf_event *bp);
extern void unregister_hw_breakpoint(struct perf_event *bp);
extern void unregister_wide_hw_breakpoint(struct perf_event * __percpu *cpu_events);
+extern bool hw_breakpoint_is_used(void);
extern int dbg_reserve_bp_slot(struct perf_event *bp);
extern int dbg_release_bp_slot(struct perf_event *bp);
@@ -96,6 +110,10 @@ register_user_hw_breakpoint(struct perf_event_attr *attr,
static inline int
modify_user_hw_breakpoint(struct perf_event *bp,
struct perf_event_attr *attr) { return -ENOSYS; }
+static inline int
+modify_user_hw_breakpoint_check(struct perf_event *bp, struct perf_event_attr *attr,
+ bool check) { return -ENOSYS; }
+
static inline struct perf_event *
register_wide_hw_breakpoint_cpu(struct perf_event_attr *attr,
perf_overflow_handler_t triggered,
@@ -107,11 +125,11 @@ register_wide_hw_breakpoint(struct perf_event_attr *attr,
void *context) { return NULL; }
static inline int
register_perf_hw_breakpoint(struct perf_event *bp) { return -ENOSYS; }
-static inline int
-__register_perf_hw_breakpoint(struct perf_event *bp) { return -ENOSYS; }
static inline void unregister_hw_breakpoint(struct perf_event *bp) { }
static inline void
unregister_wide_hw_breakpoint(struct perf_event * __percpu *cpu_events) { }
+static inline bool hw_breakpoint_is_used(void) { return false; }
+
static inline int
reserve_bp_slot(struct perf_event *bp) {return -ENOSYS; }
static inline void release_bp_slot(struct perf_event *bp) { }
diff --git a/include/linux/hw_random.h b/include/linux/hw_random.h
index bee0827766a3..b424555753b1 100644
--- a/include/linux/hw_random.h
+++ b/include/linux/hw_random.h
@@ -1,7 +1,7 @@
/*
Hardware Random Number Generator
- Please read Documentation/hw_random.txt for details on use.
+ Please read Documentation/admin-guide/hw_random.rst for details on use.
----------------------------------------------------------
This software may be used and distributed according to the terms
@@ -13,9 +13,8 @@
#define LINUX_HWRANDOM_H_
#include <linux/completion.h>
-#include <linux/types.h>
-#include <linux/list.h>
#include <linux/kref.h>
+#include <linux/types.h>
/**
* struct hwrng - Hardware Random Number Generator driver
@@ -33,7 +32,8 @@
* and max is a multiple of 4 and >= 32 bytes.
* @priv: Private data, for use by the RNG driver.
* @quality: Estimation of true entropy in RNG's bitstream
- * (per mill).
+ * (in bits of entropy per 1024 bits of input;
+ * valid values: 1 to 1024, or 0 for maximum).
*/
struct hwrng {
const char *name;
@@ -49,6 +49,7 @@ struct hwrng {
struct list_head list;
struct kref ref;
struct completion cleanup_done;
+ struct completion dying;
};
struct device;
@@ -59,7 +60,8 @@ extern int devm_hwrng_register(struct device *dev, struct hwrng *rng);
/** Unregister a Hardware Random Number Generator driver. */
extern void hwrng_unregister(struct hwrng *rng);
extern void devm_hwrng_unregister(struct device *dve, struct hwrng *rng);
-/** Feed random bits into the pool. */
-extern void add_hwgenerator_randomness(const char *buffer, size_t count, size_t entropy);
+
+extern long hwrng_msleep(struct hwrng *rng, unsigned int msecs);
+extern long hwrng_yield(struct hwrng *rng);
#endif /* LINUX_HWRANDOM_H_ */
diff --git a/include/linux/hwmon-sysfs.h b/include/linux/hwmon-sysfs.h
index 1c7b89ae6bdc..d896713359cd 100644
--- a/include/linux/hwmon-sysfs.h
+++ b/include/linux/hwmon-sysfs.h
@@ -1,26 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* hwmon-sysfs.h - hardware monitoring chip driver sysfs defines
*
* Copyright (C) 2005 Yani Ioannou <yani.ioannou@gmail.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#ifndef _LINUX_HWMON_SYSFS_H
#define _LINUX_HWMON_SYSFS_H
#include <linux/device.h>
+#include <linux/kstrtox.h>
struct sensor_device_attribute{
struct device_attribute dev_attr;
@@ -33,10 +21,28 @@ struct sensor_device_attribute{
{ .dev_attr = __ATTR(_name, _mode, _show, _store), \
.index = _index }
+#define SENSOR_ATTR_RO(_name, _func, _index) \
+ SENSOR_ATTR(_name, 0444, _func##_show, NULL, _index)
+
+#define SENSOR_ATTR_RW(_name, _func, _index) \
+ SENSOR_ATTR(_name, 0644, _func##_show, _func##_store, _index)
+
+#define SENSOR_ATTR_WO(_name, _func, _index) \
+ SENSOR_ATTR(_name, 0200, NULL, _func##_store, _index)
+
#define SENSOR_DEVICE_ATTR(_name, _mode, _show, _store, _index) \
struct sensor_device_attribute sensor_dev_attr_##_name \
= SENSOR_ATTR(_name, _mode, _show, _store, _index)
+#define SENSOR_DEVICE_ATTR_RO(_name, _func, _index) \
+ SENSOR_DEVICE_ATTR(_name, 0444, _func##_show, NULL, _index)
+
+#define SENSOR_DEVICE_ATTR_RW(_name, _func, _index) \
+ SENSOR_DEVICE_ATTR(_name, 0644, _func##_show, _func##_store, _index)
+
+#define SENSOR_DEVICE_ATTR_WO(_name, _func, _index) \
+ SENSOR_DEVICE_ATTR(_name, 0200, NULL, _func##_store, _index)
+
struct sensor_device_attribute_2 {
struct device_attribute dev_attr;
u8 index;
@@ -50,8 +56,29 @@ struct sensor_device_attribute_2 {
.index = _index, \
.nr = _nr }
+#define SENSOR_ATTR_2_RO(_name, _func, _nr, _index) \
+ SENSOR_ATTR_2(_name, 0444, _func##_show, NULL, _nr, _index)
+
+#define SENSOR_ATTR_2_RW(_name, _func, _nr, _index) \
+ SENSOR_ATTR_2(_name, 0644, _func##_show, _func##_store, _nr, _index)
+
+#define SENSOR_ATTR_2_WO(_name, _func, _nr, _index) \
+ SENSOR_ATTR_2(_name, 0200, NULL, _func##_store, _nr, _index)
+
#define SENSOR_DEVICE_ATTR_2(_name,_mode,_show,_store,_nr,_index) \
struct sensor_device_attribute_2 sensor_dev_attr_##_name \
= SENSOR_ATTR_2(_name, _mode, _show, _store, _nr, _index)
+#define SENSOR_DEVICE_ATTR_2_RO(_name, _func, _nr, _index) \
+ SENSOR_DEVICE_ATTR_2(_name, 0444, _func##_show, NULL, \
+ _nr, _index)
+
+#define SENSOR_DEVICE_ATTR_2_RW(_name, _func, _nr, _index) \
+ SENSOR_DEVICE_ATTR_2(_name, 0644, _func##_show, _func##_store, \
+ _nr, _index)
+
+#define SENSOR_DEVICE_ATTR_2_WO(_name, _func, _nr, _index) \
+ SENSOR_DEVICE_ATTR_2(_name, 0200, NULL, _func##_store, \
+ _nr, _index)
+
#endif /* _LINUX_HWMON_SYSFS_H */
diff --git a/include/linux/hwmon-vid.h b/include/linux/hwmon-vid.h
index da0a680e2f6d..9409e1d207ef 100644
--- a/include/linux/hwmon-vid.h
+++ b/include/linux/hwmon-vid.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
hwmon-vid.h - VID/VRM/VRD voltage conversions
@@ -5,19 +6,6 @@
Copyright (c) 2002 Mark D. Studebaker <mdsxyz123@yahoo.com>
With assistance from Trent Piepho <xyzzy@speakeasy.org>
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#ifndef _LINUX_HWMON_VID_H
diff --git a/include/linux/hwmon.h b/include/linux/hwmon.h
index ceb751987c40..301a83afbd66 100644
--- a/include/linux/hwmon.h
+++ b/include/linux/hwmon.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
hwmon.h - part of lm_sensors, Linux kernel modules for hardware monitoring
@@ -6,9 +7,6 @@
Copyright (C) 2005 Mark M. Hoffman <mhoffman@lightlink.com>
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; version 2 of the License.
*/
#ifndef _HWMON_H_
@@ -26,9 +24,12 @@ enum hwmon_sensor_types {
hwmon_curr,
hwmon_power,
hwmon_energy,
+ hwmon_energy64,
hwmon_humidity,
hwmon_fan,
hwmon_pwm,
+ hwmon_intrusion,
+ hwmon_max,
};
enum hwmon_chip_attributes {
@@ -39,6 +40,13 @@ enum hwmon_chip_attributes {
hwmon_chip_register_tz,
hwmon_chip_update_interval,
hwmon_chip_alarms,
+ hwmon_chip_samples,
+ hwmon_chip_curr_samples,
+ hwmon_chip_in_samples,
+ hwmon_chip_power_samples,
+ hwmon_chip_temp_samples,
+ hwmon_chip_beep_enable,
+ hwmon_chip_pec,
};
#define HWMON_C_TEMP_RESET_HISTORY BIT(hwmon_chip_temp_reset_history)
@@ -48,9 +56,17 @@ enum hwmon_chip_attributes {
#define HWMON_C_REGISTER_TZ BIT(hwmon_chip_register_tz)
#define HWMON_C_UPDATE_INTERVAL BIT(hwmon_chip_update_interval)
#define HWMON_C_ALARMS BIT(hwmon_chip_alarms)
+#define HWMON_C_SAMPLES BIT(hwmon_chip_samples)
+#define HWMON_C_CURR_SAMPLES BIT(hwmon_chip_curr_samples)
+#define HWMON_C_IN_SAMPLES BIT(hwmon_chip_in_samples)
+#define HWMON_C_POWER_SAMPLES BIT(hwmon_chip_power_samples)
+#define HWMON_C_TEMP_SAMPLES BIT(hwmon_chip_temp_samples)
+#define HWMON_C_BEEP_ENABLE BIT(hwmon_chip_beep_enable)
+#define HWMON_C_PEC BIT(hwmon_chip_pec)
enum hwmon_temp_attributes {
- hwmon_temp_input = 0,
+ hwmon_temp_enable,
+ hwmon_temp_input,
hwmon_temp_type,
hwmon_temp_lcrit,
hwmon_temp_lcrit_hyst,
@@ -74,8 +90,12 @@ enum hwmon_temp_attributes {
hwmon_temp_lowest,
hwmon_temp_highest,
hwmon_temp_reset_history,
+ hwmon_temp_rated_min,
+ hwmon_temp_rated_max,
+ hwmon_temp_beep,
};
+#define HWMON_T_ENABLE BIT(hwmon_temp_enable)
#define HWMON_T_INPUT BIT(hwmon_temp_input)
#define HWMON_T_TYPE BIT(hwmon_temp_type)
#define HWMON_T_LCRIT BIT(hwmon_temp_lcrit)
@@ -92,6 +112,7 @@ enum hwmon_temp_attributes {
#define HWMON_T_MIN_ALARM BIT(hwmon_temp_min_alarm)
#define HWMON_T_MAX_ALARM BIT(hwmon_temp_max_alarm)
#define HWMON_T_CRIT_ALARM BIT(hwmon_temp_crit_alarm)
+#define HWMON_T_LCRIT_ALARM BIT(hwmon_temp_lcrit_alarm)
#define HWMON_T_EMERGENCY_ALARM BIT(hwmon_temp_emergency_alarm)
#define HWMON_T_FAULT BIT(hwmon_temp_fault)
#define HWMON_T_OFFSET BIT(hwmon_temp_offset)
@@ -99,8 +120,12 @@ enum hwmon_temp_attributes {
#define HWMON_T_LOWEST BIT(hwmon_temp_lowest)
#define HWMON_T_HIGHEST BIT(hwmon_temp_highest)
#define HWMON_T_RESET_HISTORY BIT(hwmon_temp_reset_history)
+#define HWMON_T_RATED_MIN BIT(hwmon_temp_rated_min)
+#define HWMON_T_RATED_MAX BIT(hwmon_temp_rated_max)
+#define HWMON_T_BEEP BIT(hwmon_temp_beep)
enum hwmon_in_attributes {
+ hwmon_in_enable,
hwmon_in_input,
hwmon_in_min,
hwmon_in_max,
@@ -116,8 +141,13 @@ enum hwmon_in_attributes {
hwmon_in_max_alarm,
hwmon_in_lcrit_alarm,
hwmon_in_crit_alarm,
+ hwmon_in_rated_min,
+ hwmon_in_rated_max,
+ hwmon_in_beep,
+ hwmon_in_fault,
};
+#define HWMON_I_ENABLE BIT(hwmon_in_enable)
#define HWMON_I_INPUT BIT(hwmon_in_input)
#define HWMON_I_MIN BIT(hwmon_in_min)
#define HWMON_I_MAX BIT(hwmon_in_max)
@@ -133,8 +163,13 @@ enum hwmon_in_attributes {
#define HWMON_I_MAX_ALARM BIT(hwmon_in_max_alarm)
#define HWMON_I_LCRIT_ALARM BIT(hwmon_in_lcrit_alarm)
#define HWMON_I_CRIT_ALARM BIT(hwmon_in_crit_alarm)
+#define HWMON_I_RATED_MIN BIT(hwmon_in_rated_min)
+#define HWMON_I_RATED_MAX BIT(hwmon_in_rated_max)
+#define HWMON_I_BEEP BIT(hwmon_in_beep)
+#define HWMON_I_FAULT BIT(hwmon_in_fault)
enum hwmon_curr_attributes {
+ hwmon_curr_enable,
hwmon_curr_input,
hwmon_curr_min,
hwmon_curr_max,
@@ -150,8 +185,12 @@ enum hwmon_curr_attributes {
hwmon_curr_max_alarm,
hwmon_curr_lcrit_alarm,
hwmon_curr_crit_alarm,
+ hwmon_curr_rated_min,
+ hwmon_curr_rated_max,
+ hwmon_curr_beep,
};
+#define HWMON_C_ENABLE BIT(hwmon_curr_enable)
#define HWMON_C_INPUT BIT(hwmon_curr_input)
#define HWMON_C_MIN BIT(hwmon_curr_min)
#define HWMON_C_MAX BIT(hwmon_curr_max)
@@ -167,8 +206,12 @@ enum hwmon_curr_attributes {
#define HWMON_C_MAX_ALARM BIT(hwmon_curr_max_alarm)
#define HWMON_C_LCRIT_ALARM BIT(hwmon_curr_lcrit_alarm)
#define HWMON_C_CRIT_ALARM BIT(hwmon_curr_crit_alarm)
+#define HWMON_C_RATED_MIN BIT(hwmon_curr_rated_min)
+#define HWMON_C_RATED_MAX BIT(hwmon_curr_rated_max)
+#define HWMON_C_BEEP BIT(hwmon_curr_beep)
enum hwmon_power_attributes {
+ hwmon_power_enable,
hwmon_power_average,
hwmon_power_average_interval,
hwmon_power_average_interval_max,
@@ -186,15 +229,22 @@ enum hwmon_power_attributes {
hwmon_power_cap_hyst,
hwmon_power_cap_max,
hwmon_power_cap_min,
+ hwmon_power_min,
hwmon_power_max,
hwmon_power_crit,
+ hwmon_power_lcrit,
hwmon_power_label,
hwmon_power_alarm,
hwmon_power_cap_alarm,
+ hwmon_power_min_alarm,
hwmon_power_max_alarm,
+ hwmon_power_lcrit_alarm,
hwmon_power_crit_alarm,
+ hwmon_power_rated_min,
+ hwmon_power_rated_max,
};
+#define HWMON_P_ENABLE BIT(hwmon_power_enable)
#define HWMON_P_AVERAGE BIT(hwmon_power_average)
#define HWMON_P_AVERAGE_INTERVAL BIT(hwmon_power_average_interval)
#define HWMON_P_AVERAGE_INTERVAL_MAX BIT(hwmon_power_average_interval_max)
@@ -212,23 +262,32 @@ enum hwmon_power_attributes {
#define HWMON_P_CAP_HYST BIT(hwmon_power_cap_hyst)
#define HWMON_P_CAP_MAX BIT(hwmon_power_cap_max)
#define HWMON_P_CAP_MIN BIT(hwmon_power_cap_min)
+#define HWMON_P_MIN BIT(hwmon_power_min)
#define HWMON_P_MAX BIT(hwmon_power_max)
+#define HWMON_P_LCRIT BIT(hwmon_power_lcrit)
#define HWMON_P_CRIT BIT(hwmon_power_crit)
#define HWMON_P_LABEL BIT(hwmon_power_label)
#define HWMON_P_ALARM BIT(hwmon_power_alarm)
#define HWMON_P_CAP_ALARM BIT(hwmon_power_cap_alarm)
+#define HWMON_P_MIN_ALARM BIT(hwmon_power_min_alarm)
#define HWMON_P_MAX_ALARM BIT(hwmon_power_max_alarm)
+#define HWMON_P_LCRIT_ALARM BIT(hwmon_power_lcrit_alarm)
#define HWMON_P_CRIT_ALARM BIT(hwmon_power_crit_alarm)
+#define HWMON_P_RATED_MIN BIT(hwmon_power_rated_min)
+#define HWMON_P_RATED_MAX BIT(hwmon_power_rated_max)
enum hwmon_energy_attributes {
+ hwmon_energy_enable,
hwmon_energy_input,
hwmon_energy_label,
};
+#define HWMON_E_ENABLE BIT(hwmon_energy_enable)
#define HWMON_E_INPUT BIT(hwmon_energy_input)
#define HWMON_E_LABEL BIT(hwmon_energy_label)
enum hwmon_humidity_attributes {
+ hwmon_humidity_enable,
hwmon_humidity_input,
hwmon_humidity_label,
hwmon_humidity_min,
@@ -237,8 +296,13 @@ enum hwmon_humidity_attributes {
hwmon_humidity_max_hyst,
hwmon_humidity_alarm,
hwmon_humidity_fault,
+ hwmon_humidity_rated_min,
+ hwmon_humidity_rated_max,
+ hwmon_humidity_min_alarm,
+ hwmon_humidity_max_alarm,
};
+#define HWMON_H_ENABLE BIT(hwmon_humidity_enable)
#define HWMON_H_INPUT BIT(hwmon_humidity_input)
#define HWMON_H_LABEL BIT(hwmon_humidity_label)
#define HWMON_H_MIN BIT(hwmon_humidity_min)
@@ -247,8 +311,13 @@ enum hwmon_humidity_attributes {
#define HWMON_H_MAX_HYST BIT(hwmon_humidity_max_hyst)
#define HWMON_H_ALARM BIT(hwmon_humidity_alarm)
#define HWMON_H_FAULT BIT(hwmon_humidity_fault)
+#define HWMON_H_RATED_MIN BIT(hwmon_humidity_rated_min)
+#define HWMON_H_RATED_MAX BIT(hwmon_humidity_rated_max)
+#define HWMON_H_MIN_ALARM BIT(hwmon_humidity_min_alarm)
+#define HWMON_H_MAX_ALARM BIT(hwmon_humidity_max_alarm)
enum hwmon_fan_attributes {
+ hwmon_fan_enable,
hwmon_fan_input,
hwmon_fan_label,
hwmon_fan_min,
@@ -260,8 +329,10 @@ enum hwmon_fan_attributes {
hwmon_fan_min_alarm,
hwmon_fan_max_alarm,
hwmon_fan_fault,
+ hwmon_fan_beep,
};
+#define HWMON_F_ENABLE BIT(hwmon_fan_enable)
#define HWMON_F_INPUT BIT(hwmon_fan_input)
#define HWMON_F_LABEL BIT(hwmon_fan_label)
#define HWMON_F_MIN BIT(hwmon_fan_min)
@@ -273,22 +344,34 @@ enum hwmon_fan_attributes {
#define HWMON_F_MIN_ALARM BIT(hwmon_fan_min_alarm)
#define HWMON_F_MAX_ALARM BIT(hwmon_fan_max_alarm)
#define HWMON_F_FAULT BIT(hwmon_fan_fault)
+#define HWMON_F_BEEP BIT(hwmon_fan_beep)
enum hwmon_pwm_attributes {
hwmon_pwm_input,
hwmon_pwm_enable,
hwmon_pwm_mode,
hwmon_pwm_freq,
+ hwmon_pwm_auto_channels_temp,
};
#define HWMON_PWM_INPUT BIT(hwmon_pwm_input)
#define HWMON_PWM_ENABLE BIT(hwmon_pwm_enable)
#define HWMON_PWM_MODE BIT(hwmon_pwm_mode)
#define HWMON_PWM_FREQ BIT(hwmon_pwm_freq)
+#define HWMON_PWM_AUTO_CHANNELS_TEMP BIT(hwmon_pwm_auto_channels_temp)
+
+enum hwmon_intrusion_attributes {
+ hwmon_intrusion_alarm,
+ hwmon_intrusion_beep,
+};
+#define HWMON_INTRUSION_ALARM BIT(hwmon_intrusion_alarm)
+#define HWMON_INTRUSION_BEEP BIT(hwmon_intrusion_beep)
/**
* struct hwmon_ops - hwmon device operations
- * @is_visible: Callback to return attribute visibility. Mandatory.
+ * @visible: Static visibility. If non-zero, 'is_visible' is ignored.
+ * @is_visible: Callback to return attribute visibility. Mandatory unless
+ * 'visible' is non-zero.
* Parameters are:
* @const void *drvdata:
* Pointer to driver-private data structure passed
@@ -332,6 +415,7 @@ enum hwmon_pwm_attributes {
* The function returns 0 on success or a negative error number.
*/
struct hwmon_ops {
+ umode_t visible;
umode_t (*is_visible)(const void *drvdata, enum hwmon_sensor_types type,
u32 attr, int channel);
int (*read)(struct device *dev, enum hwmon_sensor_types type,
@@ -343,7 +427,7 @@ struct hwmon_ops {
};
/**
- * Channel information
+ * struct hwmon_channel_info - Channel information
* @type: Channel type.
* @config: Pointer to NULL-terminated list of channel parameters.
* Use for per-channel attributes.
@@ -353,19 +437,31 @@ struct hwmon_channel_info {
const u32 *config;
};
+#define HWMON_CHANNEL_INFO(stype, ...) \
+ (&(const struct hwmon_channel_info) { \
+ .type = hwmon_##stype, \
+ .config = (const u32 []) { \
+ __VA_ARGS__, 0 \
+ } \
+ })
+
/**
- * Chip configuration
+ * struct hwmon_chip_info - Chip configuration
* @ops: Pointer to hwmon operations.
* @info: Null-terminated list of channel information.
*/
struct hwmon_chip_info {
const struct hwmon_ops *ops;
- const struct hwmon_channel_info **info;
+ const struct hwmon_channel_info * const *info;
};
/* hwmon_device_register() is deprecated */
struct device *hwmon_device_register(struct device *dev);
+/*
+ * hwmon_device_register_with_groups() and
+ * devm_hwmon_device_register_with_groups() are deprecated.
+ */
struct device *
hwmon_device_register_with_groups(struct device *dev, const char *name,
void *drvdata,
@@ -380,12 +476,46 @@ hwmon_device_register_with_info(struct device *dev,
const struct hwmon_chip_info *info,
const struct attribute_group **extra_groups);
struct device *
+hwmon_device_register_for_thermal(struct device *dev, const char *name,
+ void *drvdata);
+struct device *
devm_hwmon_device_register_with_info(struct device *dev,
const char *name, void *drvdata,
const struct hwmon_chip_info *info,
const struct attribute_group **extra_groups);
void hwmon_device_unregister(struct device *dev);
-void devm_hwmon_device_unregister(struct device *dev);
+
+int hwmon_notify_event(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel);
+
+char *hwmon_sanitize_name(const char *name);
+char *devm_hwmon_sanitize_name(struct device *dev, const char *name);
+
+void hwmon_lock(struct device *dev);
+void hwmon_unlock(struct device *dev);
+
+/**
+ * hwmon_is_bad_char - Is the char invalid in a hwmon name
+ * @ch: the char to be considered
+ *
+ * hwmon_is_bad_char() can be used to determine if the given character
+ * may not be used in a hwmon name.
+ *
+ * Returns true if the char is invalid, false otherwise.
+ */
+static inline bool hwmon_is_bad_char(const char ch)
+{
+ switch (ch) {
+ case '-':
+ case '*':
+ case ' ':
+ case '\t':
+ case '\n':
+ return true;
+ default:
+ return false;
+ }
+}
#endif
diff --git a/include/linux/hwspinlock.h b/include/linux/hwspinlock.h
index 859d673d98c8..f35b42e8c5de 100644
--- a/include/linux/hwspinlock.h
+++ b/include/linux/hwspinlock.h
@@ -1,18 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Hardware spinlock public header
*
* Copyright (C) 2010 Texas Instruments Incorporated - http://www.ti.com
*
* Contact: Ohad Ben-Cohen <ohad@wizery.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published
- * by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#ifndef __LINUX_HWSPINLOCK_H
@@ -22,8 +14,10 @@
#include <linux/sched.h>
/* hwspinlock mode argument */
-#define HWLOCK_IRQSTATE 0x01 /* Disable interrupts, save state */
-#define HWLOCK_IRQ 0x02 /* Disable interrupts, don't save state */
+#define HWLOCK_IRQSTATE 0x01 /* Disable interrupts, save state */
+#define HWLOCK_IRQ 0x02 /* Disable interrupts, don't save state */
+#define HWLOCK_RAW 0x03
+#define HWLOCK_IN_ATOMIC 0x04 /* Called while in atomic context */
struct device;
struct device_node;
@@ -59,20 +53,29 @@ struct hwspinlock_pdata {
int base_id;
};
-#if defined(CONFIG_HWSPINLOCK) || defined(CONFIG_HWSPINLOCK_MODULE)
+#ifdef CONFIG_HWSPINLOCK
int hwspin_lock_register(struct hwspinlock_device *bank, struct device *dev,
const struct hwspinlock_ops *ops, int base_id, int num_locks);
int hwspin_lock_unregister(struct hwspinlock_device *bank);
-struct hwspinlock *hwspin_lock_request(void);
struct hwspinlock *hwspin_lock_request_specific(unsigned int id);
int hwspin_lock_free(struct hwspinlock *hwlock);
int of_hwspin_lock_get_id(struct device_node *np, int index);
-int hwspin_lock_get_id(struct hwspinlock *hwlock);
int __hwspin_lock_timeout(struct hwspinlock *, unsigned int, int,
unsigned long *);
int __hwspin_trylock(struct hwspinlock *, int, unsigned long *);
void __hwspin_unlock(struct hwspinlock *, int, unsigned long *);
+int of_hwspin_lock_get_id_byname(struct device_node *np, const char *name);
+int hwspin_lock_bust(struct hwspinlock *hwlock, unsigned int id);
+int devm_hwspin_lock_free(struct device *dev, struct hwspinlock *hwlock);
+struct hwspinlock *devm_hwspin_lock_request_specific(struct device *dev,
+ unsigned int id);
+int devm_hwspin_lock_unregister(struct device *dev,
+ struct hwspinlock_device *bank);
+int devm_hwspin_lock_register(struct device *dev,
+ struct hwspinlock_device *bank,
+ const struct hwspinlock_ops *ops,
+ int base_id, int num_locks);
#else /* !CONFIG_HWSPINLOCK */
@@ -89,11 +92,6 @@ void __hwspin_unlock(struct hwspinlock *, int, unsigned long *);
* Note: ERR_PTR(-ENODEV) will still be considered a success for NULL-checking
* users. Others, which care, can still check this with IS_ERR.
*/
-static inline struct hwspinlock *hwspin_lock_request(void)
-{
- return ERR_PTR(-ENODEV);
-}
-
static inline struct hwspinlock *hwspin_lock_request_specific(unsigned int id)
{
return ERR_PTR(-ENODEV);
@@ -122,16 +120,35 @@ void __hwspin_unlock(struct hwspinlock *hwlock, int mode, unsigned long *flags)
{
}
+static inline int hwspin_lock_bust(struct hwspinlock *hwlock, unsigned int id)
+{
+ return 0;
+}
+
static inline int of_hwspin_lock_get_id(struct device_node *np, int index)
{
return 0;
}
-static inline int hwspin_lock_get_id(struct hwspinlock *hwlock)
+static inline
+int of_hwspin_lock_get_id_byname(struct device_node *np, const char *name)
+{
+ return 0;
+}
+
+static inline
+int devm_hwspin_lock_free(struct device *dev, struct hwspinlock *hwlock)
{
return 0;
}
+static inline
+struct hwspinlock *devm_hwspin_lock_request_specific(struct device *dev,
+ unsigned int id)
+{
+ return ERR_PTR(-ENODEV);
+}
+
#endif /* !CONFIG_HWSPINLOCK */
/**
@@ -176,6 +193,42 @@ static inline int hwspin_trylock_irq(struct hwspinlock *hwlock)
}
/**
+ * hwspin_trylock_raw() - attempt to lock a specific hwspinlock
+ * @hwlock: an hwspinlock which we want to trylock
+ *
+ * This function attempts to lock an hwspinlock, and will immediately fail
+ * if the hwspinlock is already taken.
+ *
+ * Caution: User must protect the routine of getting hardware lock with mutex
+ * or spinlock to avoid dead-lock, that will let user can do some time-consuming
+ * or sleepable operations under the hardware lock.
+ *
+ * Returns 0 if we successfully locked the hwspinlock, -EBUSY if
+ * the hwspinlock was already taken, and -EINVAL if @hwlock is invalid.
+ */
+static inline int hwspin_trylock_raw(struct hwspinlock *hwlock)
+{
+ return __hwspin_trylock(hwlock, HWLOCK_RAW, NULL);
+}
+
+/**
+ * hwspin_trylock_in_atomic() - attempt to lock a specific hwspinlock
+ * @hwlock: an hwspinlock which we want to trylock
+ *
+ * This function attempts to lock an hwspinlock, and will immediately fail
+ * if the hwspinlock is already taken.
+ *
+ * This function shall be called only from an atomic context.
+ *
+ * Returns 0 if we successfully locked the hwspinlock, -EBUSY if
+ * the hwspinlock was already taken, and -EINVAL if @hwlock is invalid.
+ */
+static inline int hwspin_trylock_in_atomic(struct hwspinlock *hwlock)
+{
+ return __hwspin_trylock(hwlock, HWLOCK_IN_ATOMIC, NULL);
+}
+
+/**
* hwspin_trylock() - attempt to lock a specific hwspinlock
* @hwlock: an hwspinlock which we want to trylock
*
@@ -243,6 +296,51 @@ int hwspin_lock_timeout_irq(struct hwspinlock *hwlock, unsigned int to)
}
/**
+ * hwspin_lock_timeout_raw() - lock an hwspinlock with timeout limit
+ * @hwlock: the hwspinlock to be locked
+ * @to: timeout value in msecs
+ *
+ * This function locks the underlying @hwlock. If the @hwlock
+ * is already taken, the function will busy loop waiting for it to
+ * be released, but give up when @timeout msecs have elapsed.
+ *
+ * Caution: User must protect the routine of getting hardware lock with mutex
+ * or spinlock to avoid dead-lock, that will let user can do some time-consuming
+ * or sleepable operations under the hardware lock.
+ *
+ * Returns 0 when the @hwlock was successfully taken, and an appropriate
+ * error code otherwise (most notably an -ETIMEDOUT if the @hwlock is still
+ * busy after @timeout msecs). The function will never sleep.
+ */
+static inline
+int hwspin_lock_timeout_raw(struct hwspinlock *hwlock, unsigned int to)
+{
+ return __hwspin_lock_timeout(hwlock, to, HWLOCK_RAW, NULL);
+}
+
+/**
+ * hwspin_lock_timeout_in_atomic() - lock an hwspinlock with timeout limit
+ * @hwlock: the hwspinlock to be locked
+ * @to: timeout value in msecs
+ *
+ * This function locks the underlying @hwlock. If the @hwlock
+ * is already taken, the function will busy loop waiting for it to
+ * be released, but give up when @timeout msecs have elapsed.
+ *
+ * This function shall be called only from an atomic context and the timeout
+ * value shall not exceed a few msecs.
+ *
+ * Returns 0 when the @hwlock was successfully taken, and an appropriate
+ * error code otherwise (most notably an -ETIMEDOUT if the @hwlock is still
+ * busy after @timeout msecs). The function will never sleep.
+ */
+static inline
+int hwspin_lock_timeout_in_atomic(struct hwspinlock *hwlock, unsigned int to)
+{
+ return __hwspin_lock_timeout(hwlock, to, HWLOCK_IN_ATOMIC, NULL);
+}
+
+/**
* hwspin_lock_timeout() - lock an hwspinlock with timeout limit
* @hwlock: the hwspinlock to be locked
* @to: timeout value in msecs
@@ -302,6 +400,36 @@ static inline void hwspin_unlock_irq(struct hwspinlock *hwlock)
}
/**
+ * hwspin_unlock_raw() - unlock hwspinlock
+ * @hwlock: a previously-acquired hwspinlock which we want to unlock
+ *
+ * This function will unlock a specific hwspinlock.
+ *
+ * @hwlock must be already locked (e.g. by hwspin_trylock()) before calling
+ * this function: it is a bug to call unlock on a @hwlock that is already
+ * unlocked.
+ */
+static inline void hwspin_unlock_raw(struct hwspinlock *hwlock)
+{
+ __hwspin_unlock(hwlock, HWLOCK_RAW, NULL);
+}
+
+/**
+ * hwspin_unlock_in_atomic() - unlock hwspinlock
+ * @hwlock: a previously-acquired hwspinlock which we want to unlock
+ *
+ * This function will unlock a specific hwspinlock.
+ *
+ * @hwlock must be already locked (e.g. by hwspin_trylock()) before calling
+ * this function: it is a bug to call unlock on a @hwlock that is already
+ * unlocked.
+ */
+static inline void hwspin_unlock_in_atomic(struct hwspinlock *hwlock)
+{
+ __hwspin_unlock(hwlock, HWLOCK_IN_ATOMIC, NULL);
+}
+
+/**
* hwspin_unlock() - unlock hwspinlock
* @hwlock: a previously-acquired hwspinlock which we want to unlock
*
diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h
index 07650d0232cc..dfc516c1c719 100644
--- a/include/linux/hyperv.h
+++ b/include/linux/hyperv.h
@@ -1,33 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
*
* Copyright (c) 2011, Microsoft Corporation.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
- * Place - Suite 330, Boston, MA 02111-1307 USA.
- *
* Authors:
* Haiyang Zhang <haiyangz@microsoft.com>
* Hank Janssen <hjanssen@microsoft.com>
* K. Y. Srinivasan <kys@microsoft.com>
- *
*/
#ifndef _HYPERV_H
#define _HYPERV_H
#include <uapi/linux/hyperv.h>
-#include <uapi/asm/hyperv.h>
+#include <linux/mm.h>
#include <linux/types.h>
#include <linux/scatterlist.h>
#include <linux/list.h>
@@ -36,12 +23,56 @@
#include <linux/device.h>
#include <linux/mod_devicetable.h>
#include <linux/interrupt.h>
+#include <linux/reciprocal_div.h>
+#include <hyperv/hvhdk.h>
#define MAX_PAGE_BUFFER_COUNT 32
#define MAX_MULTIPAGE_BUFFER_COUNT 32 /* 128K */
#pragma pack(push, 1)
+/*
+ * Types for GPADL, decides is how GPADL header is created.
+ *
+ * It doesn't make much difference between BUFFER and RING if PAGE_SIZE is the
+ * same as HV_HYP_PAGE_SIZE.
+ *
+ * If PAGE_SIZE is bigger than HV_HYP_PAGE_SIZE, the headers of ring buffers
+ * will be of PAGE_SIZE, however, only the first HV_HYP_PAGE will be put
+ * into gpadl, therefore the number for HV_HYP_PAGE and the indexes of each
+ * HV_HYP_PAGE will be different between different types of GPADL, for example
+ * if PAGE_SIZE is 64K:
+ *
+ * BUFFER:
+ *
+ * gva: |-- 64k --|-- 64k --| ... |
+ * gpa: | 4k | 4k | ... | 4k | 4k | 4k | ... | 4k |
+ * index: 0 1 2 15 16 17 18 .. 31 32 ...
+ * | | ... | | | ... | ...
+ * v V V V V V
+ * gpadl: | 4k | 4k | ... | 4k | 4k | 4k | ... | 4k | ... |
+ * index: 0 1 2 ... 15 16 17 18 .. 31 32 ...
+ *
+ * RING:
+ *
+ * | header | data | header | data |
+ * gva: |-- 64k --|-- 64k --| ... |-- 64k --|-- 64k --| ... |
+ * gpa: | 4k | .. | 4k | 4k | ... | 4k | ... | 4k | .. | 4k | .. | ... |
+ * index: 0 1 16 17 18 31 ... n n+1 n+16 ... 2n
+ * | / / / | / /
+ * | / / / | / /
+ * | / / ... / ... | / ... /
+ * | / / / | / /
+ * | / / / | / /
+ * V V V V V V v
+ * gpadl: | 4k | 4k | ... | ... | 4k | 4k | ... |
+ * index: 0 1 2 ... 16 ... n-15 n-14 n-13 ... 2n-30
+ */
+enum hv_gpadl_type {
+ HV_GPADL_BUFFER,
+ HV_GPADL_RING
+};
+
/* Single-page buffer */
struct hv_page_buffer {
u32 len;
@@ -89,18 +120,33 @@ struct hv_ring_buffer {
u32 interrupt_mask;
/*
- * Win8 uses some of the reserved bits to implement
- * interrupt driven flow management. On the send side
- * we can request that the receiver interrupt the sender
- * when the ring transitions from being full to being able
- * to handle a message of size "pending_send_sz".
+ * WS2012/Win8 and later versions of Hyper-V implement interrupt
+ * driven flow management. The feature bit feat_pending_send_sz
+ * is set by the host on the host->guest ring buffer, and by the
+ * guest on the guest->host ring buffer.
+ *
+ * The meaning of the feature bit is a bit complex in that it has
+ * semantics that apply to both ring buffers. If the guest sets
+ * the feature bit in the guest->host ring buffer, the guest is
+ * telling the host that:
+ * 1) It will set the pending_send_sz field in the guest->host ring
+ * buffer when it is waiting for space to become available, and
+ * 2) It will read the pending_send_sz field in the host->guest
+ * ring buffer and interrupt the host when it frees enough space
+ *
+ * Similarly, if the host sets the feature bit in the host->guest
+ * ring buffer, the host is telling the guest that:
+ * 1) It will set the pending_send_sz field in the host->guest ring
+ * buffer when it is waiting for space to become available, and
+ * 2) It will read the pending_send_sz field in the guest->host
+ * ring buffer and interrupt the guest when it frees enough space
*
- * Add necessary state for this enhancement.
+ * If either the guest or host does not set the feature bit that it
+ * owns, that guest or host must do polling if it encounters a full
+ * ring buffer, and not signal the other end with an interrupt.
*/
u32 pending_send_sz;
-
u32 reserved1[12];
-
union {
struct {
u32 feat_pending_send_sz:1;
@@ -109,46 +155,58 @@ struct hv_ring_buffer {
} feature_bits;
/* Pad it to PAGE_SIZE so that data starts on page boundary */
- u8 reserved2[4028];
+ u8 reserved2[PAGE_SIZE - 68];
/*
* Ring data starts here + RingDataStartOffset
* !!! DO NOT place any fields below this !!!
*/
- u8 buffer[0];
+ u8 buffer[];
} __packed;
+
+/*
+ * If the requested ring buffer size is at least 8 times the size of the
+ * header, steal space from the ring buffer for the header. Otherwise, add
+ * space for the header so that is doesn't take too much of the ring buffer
+ * space.
+ *
+ * The factor of 8 is somewhat arbitrary. The goal is to prevent adding a
+ * relatively small header (4 Kbytes on x86) to a large-ish power-of-2 ring
+ * buffer size (such as 128 Kbytes) and so end up making a nearly twice as
+ * large allocation that will be almost half wasted. As a contrasting example,
+ * on ARM64 with 64 Kbyte page size, we don't want to take 64 Kbytes for the
+ * header from a 128 Kbyte allocation, leaving only 64 Kbytes for the ring.
+ * In this latter case, we must add 64 Kbytes for the header and not worry
+ * about what's wasted.
+ */
+#define VMBUS_HEADER_ADJ(payload_sz) \
+ ((payload_sz) >= 8 * sizeof(struct hv_ring_buffer) ? \
+ 0 : sizeof(struct hv_ring_buffer))
+
+/* Calculate the proper size of a ringbuffer, it must be page-aligned */
+#define VMBUS_RING_SIZE(payload_sz) PAGE_ALIGN(VMBUS_HEADER_ADJ(payload_sz) + \
+ (payload_sz))
+
struct hv_ring_buffer_info {
struct hv_ring_buffer *ring_buffer;
u32 ring_size; /* Include the shared header */
+ struct reciprocal_value ring_size_div10_reciprocal;
spinlock_t ring_lock;
u32 ring_datasize; /* < ring_size */
u32 priv_read_index;
-};
-
-/*
- *
- * hv_get_ringbuffer_availbytes()
- *
- * Get number of bytes available to read and to write to
- * for the specified ring buffer
- */
-static inline void
-hv_get_ringbuffer_availbytes(const struct hv_ring_buffer_info *rbi,
- u32 *read, u32 *write)
-{
- u32 read_loc, write_loc, dsize;
+ /*
+ * The ring buffer mutex lock. This lock prevents the ring buffer from
+ * being freed while the ring buffer is being accessed.
+ */
+ struct mutex ring_buffer_mutex;
- /* Capture the read/write indices before they changed */
- read_loc = rbi->ring_buffer->read_index;
- write_loc = rbi->ring_buffer->write_index;
- dsize = rbi->ring_datasize;
+ /* Buffer that holds a copy of an incoming host packet */
+ void *pkt_buffer;
+ u32 pkt_buffer_size;
+};
- *write = write_loc >= read_loc ? dsize - (write_loc - read_loc) :
- read_loc - write_loc;
- *read = dsize - *write;
-}
static inline u32 hv_get_bytes_to_read(const struct hv_ring_buffer_info *rbi)
{
@@ -177,26 +235,48 @@ static inline u32 hv_get_bytes_to_write(const struct hv_ring_buffer_info *rbi)
return write;
}
+static inline u32 hv_get_avail_to_write_percent(
+ const struct hv_ring_buffer_info *rbi)
+{
+ u32 avail_write = hv_get_bytes_to_write(rbi);
+
+ return reciprocal_divide(
+ (avail_write << 3) + (avail_write << 1),
+ rbi->ring_size_div10_reciprocal);
+}
+
/*
* VMBUS version is 32 bit entity broken up into
* two 16 bit quantities: major_number. minor_number.
*
* 0 . 13 (Windows Server 2008)
- * 1 . 1 (Windows 7)
- * 2 . 4 (Windows 8)
- * 3 . 0 (Windows 8 R2)
+ * 1 . 1 (Windows 7, WS2008 R2)
+ * 2 . 4 (Windows 8, WS2012)
+ * 3 . 0 (Windows 8.1, WS2012 R2)
* 4 . 0 (Windows 10)
+ * 4 . 1 (Windows 10 RS3)
+ * 5 . 0 (Newer Windows 10)
+ * 5 . 1 (Windows 10 RS4)
+ * 5 . 2 (Windows Server 2019, RS5)
+ * 5 . 3 (Windows Server 2022)
+ *
+ * The WS2008 and WIN7 versions are listed here for
+ * completeness but are no longer supported in the
+ * Linux kernel.
*/
-#define VERSION_WS2008 ((0 << 16) | (13))
-#define VERSION_WIN7 ((1 << 16) | (1))
-#define VERSION_WIN8 ((2 << 16) | (4))
-#define VERSION_WIN8_1 ((3 << 16) | (0))
-#define VERSION_WIN10 ((4 << 16) | (0))
-
-#define VERSION_INVAL -1
-
-#define VERSION_CURRENT VERSION_WIN10
+#define VMBUS_MAKE_VERSION(MAJ, MIN) ((((u32)MAJ) << 16) | (MIN))
+#define VERSION_WS2008 VMBUS_MAKE_VERSION(0, 13)
+#define VERSION_WIN7 VMBUS_MAKE_VERSION(1, 1)
+#define VERSION_WIN8 VMBUS_MAKE_VERSION(2, 4)
+#define VERSION_WIN8_1 VMBUS_MAKE_VERSION(3, 0)
+#define VERSION_WIN10 VMBUS_MAKE_VERSION(4, 0)
+#define VERSION_WIN10_V4_1 VMBUS_MAKE_VERSION(4, 1)
+#define VERSION_WIN10_V5 VMBUS_MAKE_VERSION(5, 0)
+#define VERSION_WIN10_V5_1 VMBUS_MAKE_VERSION(5, 1)
+#define VERSION_WIN10_V5_2 VMBUS_MAKE_VERSION(5, 2)
+#define VERSION_WIN10_V5_3 VMBUS_MAKE_VERSION(5, 3)
+#define VERSION_WIN10_V6_0 VMBUS_MAKE_VERSION(6, 0)
/* Make maximum size of pipe payload of 16K */
#define MAX_PIPE_DATA_PAYLOAD (sizeof(u8) * 16384)
@@ -216,8 +296,8 @@ static inline u32 hv_get_bytes_to_write(const struct hv_ring_buffer_info *rbi)
* struct contains the fundamental information about an offer.
*/
struct vmbus_channel_offer {
- uuid_le if_type;
- uuid_le if_instance;
+ guid_t if_type;
+ guid_t if_instance;
/*
* These two fields are not currently used.
@@ -236,7 +316,7 @@ struct vmbus_channel_offer {
/*
* Pipes:
- * The following sructure is an integrated pipe protocol, which
+ * The following structure is an integrated pipe protocol, which
* is implemented on top of standard user-defined data. Pipe
* clients have MAX_PIPE_USER_DEFINED_BYTES left for their own
* use.
@@ -247,21 +327,32 @@ struct vmbus_channel_offer {
} pipe;
} u;
/*
- * The sub_channel_index is defined in win8.
+ * The sub_channel_index is defined in Win8: a value of zero means a
+ * primary channel and a value of non-zero means a sub-channel.
+ *
+ * Before Win8, the field is reserved, meaning it's always zero.
*/
u16 sub_channel_index;
u16 reserved3;
} __packed;
/* Server Flags */
-#define VMBUS_CHANNEL_ENUMERATE_DEVICE_INTERFACE 1
-#define VMBUS_CHANNEL_SERVER_SUPPORTS_TRANSFER_PAGES 2
-#define VMBUS_CHANNEL_SERVER_SUPPORTS_GPADLS 4
-#define VMBUS_CHANNEL_NAMED_PIPE_MODE 0x10
-#define VMBUS_CHANNEL_LOOPBACK_OFFER 0x100
-#define VMBUS_CHANNEL_PARENT_OFFER 0x200
-#define VMBUS_CHANNEL_REQUEST_MONITORED_NOTIFICATION 0x400
-#define VMBUS_CHANNEL_TLNPI_PROVIDER_OFFER 0x2000
+#define VMBUS_CHANNEL_ENUMERATE_DEVICE_INTERFACE 0x0001
+/*
+ * This flag indicates that the channel is offered by the paravisor, and must
+ * use encrypted memory for the channel ring buffer.
+ */
+#define VMBUS_CHANNEL_CONFIDENTIAL_RING_BUFFER 0x0002
+/*
+ * This flag indicates that the channel is offered by the paravisor, and must
+ * use encrypted memory for GPA direct packets and additional GPADLs.
+ */
+#define VMBUS_CHANNEL_CONFIDENTIAL_EXTERNAL_MEMORY 0x0004
+#define VMBUS_CHANNEL_NAMED_PIPE_MODE 0x0010
+#define VMBUS_CHANNEL_LOOPBACK_OFFER 0x0100
+#define VMBUS_CHANNEL_PARENT_OFFER 0x0200
+#define VMBUS_CHANNEL_REQUEST_MONITORED_NOTIFICATION 0x0400
+#define VMBUS_CHANNEL_TLNPI_PROVIDER_OFFER 0x2000
struct vmpacket_descriptor {
u16 type;
@@ -287,20 +378,7 @@ struct vmtransfer_page_packet_header {
u8 sender_owns_set;
u8 reserved;
u32 range_cnt;
- struct vmtransfer_page_range ranges[1];
-} __packed;
-
-struct vmgpadl_packet_header {
- struct vmpacket_descriptor d;
- u32 gpadl;
- u32 reserved;
-} __packed;
-
-struct vmadd_remove_transfer_page_set {
- struct vmpacket_descriptor d;
- u32 gpadl;
- u16 xfer_pageset_id;
- u16 reserved;
+ struct vmtransfer_page_range ranges[];
} __packed;
/*
@@ -310,34 +388,10 @@ struct vmadd_remove_transfer_page_set {
struct gpa_range {
u32 byte_count;
u32 byte_offset;
- u64 pfn_array[0];
+ u64 pfn_array[];
};
/*
- * This is the format for an Establish Gpadl packet, which contains a handle by
- * which this GPADL will be known and a set of GPA ranges associated with it.
- * This can be converted to a MDL by the guest OS. If there are multiple GPA
- * ranges, then the resulting MDL will be "chained," representing multiple VA
- * ranges.
- */
-struct vmestablish_gpadl {
- struct vmpacket_descriptor d;
- u32 gpadl;
- u32 range_cnt;
- struct gpa_range range[1];
-} __packed;
-
-/*
- * This is the format for a Teardown Gpadl packet, which indicates that the
- * GPADL handle in the Establish Gpadl packet will never be referenced again.
- */
-struct vmteardown_gpadl {
- struct vmpacket_descriptor d;
- u32 gpadl;
- u32 reserved; /* for alignment to a 8-byte boundary */
-} __packed;
-
-/*
* This is the format for a GPA-Direct packet, which contains a set of GPA
* ranges, in addition to commands and/or data.
*/
@@ -348,25 +402,6 @@ struct vmdata_gpa_direct {
struct gpa_range range[1];
} __packed;
-/* This is the format for a Additional Data Packet. */
-struct vmadditional_data {
- struct vmpacket_descriptor d;
- u64 total_bytes;
- u32 offset;
- u32 byte_cnt;
- unsigned char data[1];
-} __packed;
-
-union vmpacket_largest_possible_header {
- struct vmpacket_descriptor simple_hdr;
- struct vmtransfer_page_packet_header xfer_page_hdr;
- struct vmgpadl_packet_header gpadl_hdr;
- struct vmadd_remove_transfer_page_set add_rm_xfer_page_hdr;
- struct vmestablish_gpadl establish_gpadl_hdr;
- struct vmteardown_gpadl teardown_gpadl_hdr;
- struct vmdata_gpa_direct data_gpa_direct_hdr;
-};
-
#define VMPACKET_DATA_START_ADDRESS(__packet) \
(void *)(((unsigned char *)__packet) + \
((struct vmpacket_descriptor)__packet)->offset8 * 8)
@@ -422,9 +457,15 @@ enum vmbus_channel_message_type {
CHANNELMSG_19 = 19,
CHANNELMSG_20 = 20,
CHANNELMSG_TL_CONNECT_REQUEST = 21,
+ CHANNELMSG_MODIFYCHANNEL = 22,
+ CHANNELMSG_TL_CONNECT_RESULT = 23,
+ CHANNELMSG_MODIFYCHANNEL_RESPONSE = 24,
CHANNELMSG_COUNT
};
+/* Hyper-V supports about 2048 channels, and the RELIDs start with 1. */
+#define INVALID_RELID U32_MAX
+
struct vmbus_channel_message_header {
enum vmbus_channel_message_type msgtype;
u32 padding;
@@ -475,12 +516,6 @@ struct vmbus_channel_rescind_offer {
u32 child_relid;
} __packed;
-static inline u32
-hv_ringbuffer_pending_size(const struct hv_ring_buffer_info *rbi)
-{
- return rbi->ring_buffer->pending_send_sz;
-}
-
/*
* Request Offer -- no parameters, SynIC message contains the partition ID
* Set Snoop -- no parameters, SynIC message contains the partition ID
@@ -532,6 +567,13 @@ struct vmbus_channel_open_result {
u32 status;
} __packed;
+/* Modify Channel Result parameters */
+struct vmbus_channel_modifychannel_response {
+ struct vmbus_channel_message_header header;
+ u32 child_relid;
+ u32 status;
+} __packed;
+
/* Close channel parameters; */
struct vmbus_channel_close_channel {
struct vmbus_channel_message_header header;
@@ -555,7 +597,7 @@ struct vmbus_channel_gpadl_header {
u32 gpadl;
u16 range_buflen;
u16 rangecount;
- struct gpa_range range[0];
+ struct gpa_range range[];
} __packed;
/* This is the followup packet that contains more PFNs. */
@@ -563,7 +605,7 @@ struct vmbus_channel_gpadl_body {
struct vmbus_channel_message_header header;
u32 msgnumber;
u32 gpadl;
- u64 pfn[0];
+ u64 pfn[];
} __packed;
struct vmbus_channel_gpadl_created {
@@ -589,11 +631,25 @@ struct vmbus_channel_relid_released {
u32 child_relid;
} __packed;
+/*
+ * Used by the paravisor only, means that the encrypted ring buffers and
+ * the encrypted external memory are supported
+ */
+#define VMBUS_FEATURE_FLAG_CONFIDENTIAL_CHANNELS 0x10
+
struct vmbus_channel_initiate_contact {
struct vmbus_channel_message_header header;
u32 vmbus_version_requested;
u32 target_vcpu; /* The VCPU the host should respond to */
- u64 interrupt_page;
+ union {
+ u64 interrupt_page;
+ struct {
+ u8 msg_sint;
+ u8 msg_vtl;
+ u8 reserved[2];
+ u32 feature_flags; /* VMBus version 6.0 */
+ };
+ };
u64 monitor_page1;
u64 monitor_page2;
} __packed;
@@ -601,13 +657,33 @@ struct vmbus_channel_initiate_contact {
/* Hyper-V socket: guest's connect()-ing to host */
struct vmbus_channel_tl_connect_request {
struct vmbus_channel_message_header header;
- uuid_le guest_endpoint_id;
- uuid_le host_service_id;
+ guid_t guest_endpoint_id;
+ guid_t host_service_id;
+} __packed;
+
+/* Modify Channel parameters, cf. vmbus_send_modifychannel() */
+struct vmbus_channel_modifychannel {
+ struct vmbus_channel_message_header header;
+ u32 child_relid;
+ u32 target_vp;
} __packed;
struct vmbus_channel_version_response {
struct vmbus_channel_message_header header;
u8 version_supported;
+
+ u8 connection_state;
+ u16 padding;
+
+ /*
+ * On new hosts that support VMBus protocol 5.0, we must use
+ * VMBUS_MESSAGE_CONNECTION_ID_4 for the Initiate Contact Message,
+ * and for subsequent messages, we must use the Message Connection ID
+ * field in the host-returned Version Response Message.
+ *
+ * On old hosts, we should always use VMBUS_MESSAGE_CONNECTION_ID (1).
+ */
+ u32 msg_conn_id;
} __packed;
enum vmbus_channel_state {
@@ -637,6 +713,7 @@ struct vmbus_channel_msginfo {
struct vmbus_channel_gpadl_torndown gpadl_torndown;
struct vmbus_channel_gpadl_created gpadl_created;
struct vmbus_channel_version_response version_response;
+ struct vmbus_channel_modifychannel_response modify_response;
} response;
u32 msgsize;
@@ -644,38 +721,7 @@ struct vmbus_channel_msginfo {
* The channel message that goes out on the "wire".
* It will contain at minimum the VMBUS_CHANNEL_MESSAGE_HEADER header
*/
- unsigned char msg[0];
-};
-
-struct vmbus_close_msg {
- struct vmbus_channel_msginfo info;
- struct vmbus_channel_close_channel msg;
-};
-
-/* Define connection identifier type. */
-union hv_connection_id {
- u32 asu32;
- struct {
- u32 id:24;
- u32 reserved:8;
- } u;
-};
-
-/* Definition of the hv_signal_event hypercall input structure. */
-struct hv_input_signal_event {
- union hv_connection_id connectionid;
- u16 flag_number;
- u16 rsvdz;
-};
-
-struct hv_input_signal_event_buffer {
- u64 align8;
- struct hv_input_signal_event event;
-};
-
-enum hv_numa_policy {
- HV_BALANCED = 0,
- HV_LOCALIZED,
+ unsigned char msg[];
};
enum vmbus_device_type {
@@ -698,10 +744,44 @@ enum vmbus_device_type {
HV_UNKNOWN,
};
+/*
+ * Provides request ids for VMBus. Encapsulates guest memory
+ * addresses and stores the next available slot in req_arr
+ * to generate new ids in constant time.
+ */
+struct vmbus_requestor {
+ u64 *req_arr;
+ unsigned long *req_bitmap; /* is a given slot available? */
+ u32 size;
+ u64 next_request_id;
+ spinlock_t req_lock; /* provides atomicity */
+};
+
+#define VMBUS_NO_RQSTOR U64_MAX
+#define VMBUS_RQST_ERROR (U64_MAX - 1)
+#define VMBUS_RQST_ADDR_ANY U64_MAX
+/* NetVSC-specific */
+#define VMBUS_RQST_ID_NO_RESPONSE (U64_MAX - 2)
+/* StorVSC-specific */
+#define VMBUS_RQST_INIT (U64_MAX - 2)
+#define VMBUS_RQST_RESET (U64_MAX - 3)
+
struct vmbus_device {
+ /* preferred ring buffer size in KB, 0 means no preferred size for this device */
+ size_t pref_ring_size;
u16 dev_type;
- uuid_le guid;
+ guid_t guid;
bool perf_device;
+ bool allowed_in_isolated;
+};
+
+#define VMBUS_DEFAULT_MAX_PKT_SIZE 4096
+
+struct vmbus_gpadl {
+ u32 gpadl_handle;
+ u32 size;
+ void *buffer;
+ bool decrypted;
};
struct vmbus_channel {
@@ -720,22 +800,51 @@ struct vmbus_channel {
u8 monitor_bit;
bool rescind; /* got rescind msg */
+ bool rescind_ref; /* got rescind msg, got channel reference */
+ struct completion rescind_event;
- u32 ringbuffer_gpadlhandle;
+ struct vmbus_gpadl ringbuffer_gpadlhandle;
/* Allocated memory for ring buffer */
- void *ringbuffer_pages;
+ struct page *ringbuffer_page;
u32 ringbuffer_pagecount;
+ u32 ringbuffer_send_offset;
struct hv_ring_buffer_info outbound; /* send to parent */
struct hv_ring_buffer_info inbound; /* receive from parent */
- struct vmbus_close_msg close_msg;
+ struct vmbus_channel_close_channel close_msg;
+
+ /* Statistics */
+ u64 interrupts; /* Host to Guest interrupts */
+ u64 sig_events; /* Guest to Host events */
+
+ /*
+ * Guest to host interrupts caused by the outbound ring buffer changing
+ * from empty to not empty.
+ */
+ u64 intr_out_empty;
+
+ /*
+ * Indicates that a full outbound ring buffer was encountered. The flag
+ * is set to true when a full outbound ring buffer is encountered and
+ * set to false when a write to the outbound ring buffer is completed.
+ */
+ bool out_full_flag;
/* Channel callback's invoked in softirq context */
struct tasklet_struct callback_event;
void (*onchannel_callback)(void *context);
void *channel_callback_context;
+ void (*change_target_cpu_callback)(struct vmbus_channel *channel,
+ u32 old, u32 new);
+
+ /*
+ * Synchronize channel scheduling and channel removal; see the inline
+ * comments in vmbus_chan_sched() and vmbus_reset_channel_cb().
+ */
+ spinlock_t sched_lock;
+
/*
* A channel can be marked for one of three modes of reading:
* BATCHED - callback called from taslket and should read
@@ -754,34 +863,27 @@ struct vmbus_channel {
} callback_mode;
bool is_dedicated_interrupt;
- struct hv_input_signal_event_buffer sig_buf;
- struct hv_input_signal_event *sig_event;
+ u64 sig_event;
/*
- * Starting with win8, this field will be used to specify
- * the target virtual processor on which to deliver the interrupt for
- * the host to guest communication.
- * Prior to win8, incoming channel interrupts would only
- * be delivered on cpu 0. Setting this value to 0 would
- * preserve the earlier behavior.
+ * Starting with win8, this field will be used to specify the
+ * target CPU on which to deliver the interrupt for the host
+ * to guest communication.
+ *
+ * Prior to win8, incoming channel interrupts would only be
+ * delivered on CPU 0. Setting this value to 0 would preserve
+ * the earlier behavior.
*/
- u32 target_vp;
- /* The corresponding CPUID in the guest */
u32 target_cpu;
/*
- * State to manage the CPU affiliation of channels.
- */
- struct cpumask alloced_cpus_in_node;
- int numa_node;
- /*
* Support for sub-channels. For high performance devices,
* it will be useful to have multiple sub-channels to support
* a scalable communication infrastructure with the host.
- * The support for sub-channels is implemented as an extention
+ * The support for sub-channels is implemented as an extension
* to the current infrastructure.
* The initial offer is considered the primary channel and this
* offer message will indicate if the host supports sub-channels.
- * The guest is free to ask for sub-channels to be offerred and can
+ * The guest is free to ask for sub-channels to be offered and can
* open these sub-channels as a normal "primary" channel. However,
* all sub-channels will have the same type and instance guids as the
* primary channel. Requests sent on a given channel will result in a
@@ -802,25 +904,10 @@ struct vmbus_channel {
void (*chn_rescind_callback)(struct vmbus_channel *channel);
/*
- * The spinlock to protect the structure. It is being used to protect
- * test-and-set access to various attributes of the structure as well
- * as all sc_list operations.
- */
- spinlock_t lock;
- /*
* All Sub-channels of a primary channel are linked here.
*/
struct list_head sc_list;
/*
- * Current number of sub-channels.
- */
- int num_sc;
- /*
- * Number of a sub-channel (position within sc_list) which is supposed
- * to be used as the next outgoing channel.
- */
- int next_oc;
- /*
* The primary channel this sub-channel belongs to.
* This will be NULL for the primary channel.
*/
@@ -829,11 +916,6 @@ struct vmbus_channel {
* Support per-channel state for use by vmbus drivers.
*/
void *per_channel_state;
- /*
- * To support per-cpu lookup mapping of relid to channel,
- * link up channels based on their CPU affinity.
- */
- struct list_head percpu_list;
/*
* Defer freeing channel until after all cpu's have
@@ -842,6 +924,11 @@ struct vmbus_channel {
struct rcu_head rcu;
/*
+ * For sysfs per-channel properties.
+ */
+ struct kobject kobj;
+
+ /*
* For performance critical channels (storage, networking
* etc,), Hyper-V has a mechanism to enhance the throughput
* at the expense of latency:
@@ -852,7 +939,7 @@ struct vmbus_channel {
* mechanism improves throughput by:
*
* A) Making the host more efficient - each time it wakes up,
- * potentially it will process morev number of packets. The
+ * potentially it will process more number of packets. The
* monitor latency allows a batch to build up.
* B) By deferring the hypercall to signal, we will also minimize
* the interrupts.
@@ -860,39 +947,130 @@ struct vmbus_channel {
* Clearly, these optimizations improve throughput at the expense of
* latency. Furthermore, since the channel is shared for both
* control and data messages, control messages currently suffer
- * unnecessary latency adversley impacting performance and boot
+ * unnecessary latency adversely impacting performance and boot
* time. To fix this issue, permit tagging the channel as being
* in "low latency" mode. In this mode, we will bypass the monitor
* mechanism.
*/
bool low_latency;
+ bool probe_done;
+
/*
- * NUMA distribution policy:
- * We support teo policies:
- * 1) Balanced: Here all performance critical channels are
- * distributed evenly amongst all the NUMA nodes.
- * This policy will be the default policy.
- * 2) Localized: All channels of a given instance of a
- * performance critical service will be assigned CPUs
- * within a selected NUMA node.
+ * Cache the device ID here for easy access; this is useful, in
+ * particular, in situations where the channel's device_obj has
+ * not been allocated/initialized yet.
*/
- enum hv_numa_policy affinity_policy;
+ u16 device_id;
- bool probe_done;
+ /*
+ * We must offload the handling of the primary/sub channels
+ * from the single-threaded vmbus_connection.work_queue to
+ * two different workqueue, otherwise we can block
+ * vmbus_connection.work_queue and hang: see vmbus_process_offer().
+ */
+ struct work_struct add_channel_work;
+
+ /*
+ * Guest to host interrupts caused by the inbound ring buffer changing
+ * from full to not full while a packet is waiting.
+ */
+ u64 intr_in_full;
+
+ /*
+ * The total number of write operations that encountered a full
+ * outbound ring buffer.
+ */
+ u64 out_full_total;
+
+ /*
+ * The number of write operations that were the first to encounter a
+ * full outbound ring buffer.
+ */
+ u64 out_full_first;
+
+ /* enabling/disabling fuzz testing on the channel (default is false)*/
+ bool fuzz_testing_state;
+ /*
+ * Interrupt delay will delay the guest from emptying the ring buffer
+ * for a specific amount of time. The delay is in microseconds and will
+ * be between 1 to a maximum of 1000, its default is 0 (no delay).
+ * The Message delay will delay guest reading on a per message basis
+ * in microseconds between 1 to 1000 with the default being 0
+ * (no delay).
+ */
+ u32 fuzz_testing_interrupt_delay;
+ u32 fuzz_testing_message_delay;
+
+ /* callback to generate a request ID from a request address */
+ u64 (*next_request_id_callback)(struct vmbus_channel *channel, u64 rqst_addr);
+ /* callback to retrieve a request address from a request ID */
+ u64 (*request_addr_callback)(struct vmbus_channel *channel, u64 rqst_id);
+
+ /* request/transaction ids for VMBus */
+ struct vmbus_requestor requestor;
+ u32 rqstor_size;
+
+ /* The max size of a packet on this channel */
+ u32 max_pkt_size;
+
+ /* function to mmap ring buffer memory to the channel's sysfs ring attribute */
+ int (*mmap_ring_buffer)(struct vmbus_channel *channel, struct vm_area_struct *vma);
+
+ /* boolean to control visibility of sysfs for ring buffer */
+ bool ring_sysfs_visible;
+ /* The ring buffer is encrypted */
+ bool co_ring_buffer;
+ /* The external memory is encrypted */
+ bool co_external_memory;
};
+#define lock_requestor(channel, flags) \
+do { \
+ struct vmbus_requestor *rqstor = &(channel)->requestor; \
+ \
+ spin_lock_irqsave(&rqstor->req_lock, flags); \
+} while (0)
+
+static __always_inline void unlock_requestor(struct vmbus_channel *channel,
+ unsigned long flags)
+{
+ struct vmbus_requestor *rqstor = &channel->requestor;
+
+ spin_unlock_irqrestore(&rqstor->req_lock, flags);
+}
+
+u64 vmbus_next_request_id(struct vmbus_channel *channel, u64 rqst_addr);
+u64 __vmbus_request_addr_match(struct vmbus_channel *channel, u64 trans_id,
+ u64 rqst_addr);
+u64 vmbus_request_addr_match(struct vmbus_channel *channel, u64 trans_id,
+ u64 rqst_addr);
+u64 vmbus_request_addr(struct vmbus_channel *channel, u64 trans_id);
+
+static inline bool is_co_ring_buffer(const struct vmbus_channel_offer_channel *o)
+{
+ return !!(o->offer.chn_flags & VMBUS_CHANNEL_CONFIDENTIAL_RING_BUFFER);
+}
+
+static inline bool is_co_external_memory(const struct vmbus_channel_offer_channel *o)
+{
+ return !!(o->offer.chn_flags & VMBUS_CHANNEL_CONFIDENTIAL_EXTERNAL_MEMORY);
+}
+
+static inline bool is_hvsock_offer(const struct vmbus_channel_offer_channel *o)
+{
+ return !!(o->offer.chn_flags & VMBUS_CHANNEL_TLNPI_PROVIDER_OFFER);
+}
+
static inline bool is_hvsock_channel(const struct vmbus_channel *c)
{
- return !!(c->offermsg.offer.chn_flags &
- VMBUS_CHANNEL_TLNPI_PROVIDER_OFFER);
+ return is_hvsock_offer(&c->offermsg);
}
-static inline void set_channel_affinity_state(struct vmbus_channel *c,
- enum hv_numa_policy policy)
+static inline bool is_sub_channel(const struct vmbus_channel *c)
{
- c->affinity_policy = policy;
+ return c->offermsg.offer.sub_channel_index != 0;
}
static inline void set_channel_read_mode(struct vmbus_channel *c,
@@ -914,20 +1092,25 @@ static inline void *get_per_channel_state(struct vmbus_channel *c)
static inline void set_channel_pending_send_size(struct vmbus_channel *c,
u32 size)
{
- c->outbound.ring_buffer->pending_send_sz = size;
-}
+ unsigned long flags;
-static inline void set_low_latency_mode(struct vmbus_channel *c)
-{
- c->low_latency = true;
-}
+ if (size) {
+ spin_lock_irqsave(&c->outbound.ring_lock, flags);
+ ++c->out_full_total;
-static inline void clear_low_latency_mode(struct vmbus_channel *c)
-{
- c->low_latency = false;
+ if (!c->out_full_flag) {
+ ++c->out_full_first;
+ c->out_full_flag = true;
+ }
+ spin_unlock_irqrestore(&c->outbound.ring_lock, flags);
+ } else {
+ c->out_full_flag = false;
+ }
+
+ c->outbound.ring_buffer->pending_send_sz = size;
}
-void vmbus_onmessage(void *context);
+void vmbus_onmessage(struct vmbus_channel_message_header *hdr);
int vmbus_request_offers(void);
@@ -941,27 +1124,6 @@ void vmbus_set_sc_create_callback(struct vmbus_channel *primary_channel,
void vmbus_set_chn_rescind_callback(struct vmbus_channel *channel,
void (*chn_rescind_cb)(struct vmbus_channel *));
-/*
- * Retrieve the (sub) channel on which to send an outgoing request.
- * When a primary channel has multiple sub-channels, we choose a
- * channel whose VCPU binding is closest to the VCPU on which
- * this call is being made.
- */
-struct vmbus_channel *vmbus_get_outgoing_channel(struct vmbus_channel *primary);
-
-/*
- * Check if sub-channels have already been offerred. This API will be useful
- * when the driver is unloaded after establishing sub-channels. In this case,
- * when the driver is re-loaded, the driver would have to check if the
- * subchannels have already been established before attempting to request
- * the creation of sub-channels.
- * This function returns TRUE to indicate that subchannels have already been
- * created.
- * This function should be invoked after setting the callback function for
- * sub-channel creation.
- */
-bool vmbus_are_subchannels_present(struct vmbus_channel *primary);
-
/* The format must be the same as struct vmdata_gpa_direct */
struct vmbus_channel_packet_page_buffer {
u16 type;
@@ -998,6 +1160,14 @@ struct vmbus_packet_mpb_array {
struct hv_mpb_array range;
} __packed;
+int vmbus_alloc_ring(struct vmbus_channel *channel,
+ u32 send_size, u32 recv_size);
+void vmbus_free_ring(struct vmbus_channel *channel);
+
+int vmbus_connect_ring(struct vmbus_channel *channel,
+ void (*onchannel_callback)(void *context),
+ void *context);
+int vmbus_disconnect_ring(struct vmbus_channel *channel);
extern int vmbus_open(struct vmbus_channel *channel,
u32 send_ringbuffersize,
@@ -1009,41 +1179,20 @@ extern int vmbus_open(struct vmbus_channel *channel,
extern void vmbus_close(struct vmbus_channel *channel);
-extern int vmbus_sendpacket(struct vmbus_channel *channel,
+extern int vmbus_sendpacket_getid(struct vmbus_channel *channel,
void *buffer,
u32 bufferLen,
u64 requestid,
+ u64 *trans_id,
enum vmbus_packet_type type,
u32 flags);
-
-extern int vmbus_sendpacket_ctl(struct vmbus_channel *channel,
+extern int vmbus_sendpacket(struct vmbus_channel *channel,
void *buffer,
u32 bufferLen,
u64 requestid,
enum vmbus_packet_type type,
u32 flags);
-extern int vmbus_sendpacket_pagebuffer(struct vmbus_channel *channel,
- struct hv_page_buffer pagebuffers[],
- u32 pagecount,
- void *buffer,
- u32 bufferlen,
- u64 requestid);
-
-extern int vmbus_sendpacket_pagebuffer_ctl(struct vmbus_channel *channel,
- struct hv_page_buffer pagebuffers[],
- u32 pagecount,
- void *buffer,
- u32 bufferlen,
- u64 requestid,
- u32 flags);
-
-extern int vmbus_sendpacket_multipagebuffer(struct vmbus_channel *channel,
- struct hv_multipage_buffer *mpb,
- void *buffer,
- u32 bufferlen,
- u64 requestid);
-
extern int vmbus_sendpacket_mpb_desc(struct vmbus_channel *channel,
struct vmbus_packet_mpb_array *mpb,
u32 desc_size,
@@ -1054,10 +1203,12 @@ extern int vmbus_sendpacket_mpb_desc(struct vmbus_channel *channel,
extern int vmbus_establish_gpadl(struct vmbus_channel *channel,
void *kbuffer,
u32 size,
- u32 *gpadl_handle);
+ struct vmbus_gpadl *gpadl);
extern int vmbus_teardown_gpadl(struct vmbus_channel *channel,
- u32 gpadl_handle);
+ struct vmbus_gpadl *gpadl);
+
+void vmbus_reset_channel_cb(struct vmbus_channel *channel);
extern int vmbus_recvpacket(struct vmbus_channel *channel,
void *buffer,
@@ -1071,9 +1222,6 @@ extern int vmbus_recvpacket_raw(struct vmbus_channel *channel,
u32 *buffer_actual_len,
u64 *requestid);
-
-extern void vmbus_ontimer(unsigned long data);
-
/* Base driver object */
struct hv_driver {
const char *name;
@@ -1093,7 +1241,7 @@ struct hv_driver {
bool hvsock;
/* the device type supported by this driver */
- uuid_le dev_type;
+ guid_t dev_type;
const struct hv_vmbus_device_id *id_table;
struct device_driver driver;
@@ -1105,36 +1253,44 @@ struct hv_driver {
} dynids;
int (*probe)(struct hv_device *, const struct hv_vmbus_device_id *);
- int (*remove)(struct hv_device *);
+ void (*remove)(struct hv_device *dev);
void (*shutdown)(struct hv_device *);
+ int (*suspend)(struct hv_device *);
+ int (*resume)(struct hv_device *);
+
};
/* Base device object */
struct hv_device {
/* the device type id of this device */
- uuid_le dev_type;
+ guid_t dev_type;
/* the device instance id of this device */
- uuid_le dev_instance;
+ guid_t dev_instance;
u16 vendor_id;
u16 device_id;
struct device device;
+ /*
+ * Driver name to force a match. Do not set directly, because core
+ * frees it. Use driver_set_override() to set or clear it.
+ */
+ const char *driver_override;
struct vmbus_channel *channel;
-};
+ struct kset *channels_kset;
+ struct device_dma_parameters dma_parms;
+ u64 dma_mask;
+ /* place holder to keep track of the dir for hv device in debugfs */
+ struct dentry *debug_dir;
-static inline struct hv_device *device_to_hv_device(struct device *d)
-{
- return container_of(d, struct hv_device, device);
-}
+};
-static inline struct hv_driver *drv_to_hv_drv(struct device_driver *d)
-{
- return container_of(d, struct hv_driver, driver);
-}
+
+#define device_to_hv_device(d) container_of_const(d, struct hv_device, device)
+#define drv_to_hv_drv(d) container_of_const(d, struct hv_driver, driver)
static inline void hv_set_drvdata(struct hv_device *dev, void *data)
{
@@ -1146,6 +1302,8 @@ static inline void *hv_get_drvdata(struct hv_device *dev)
return dev_get_drvdata(&dev->device);
}
+struct device *hv_get_vmbus_root_device(void);
+
struct hv_ring_buffer_debug_info {
u32 current_interrupt_mask;
u32 current_read_index;
@@ -1154,8 +1312,11 @@ struct hv_ring_buffer_debug_info {
u32 bytes_avail_towrite;
};
-void hv_ringbuffer_get_debuginfo(const struct hv_ring_buffer_info *ring_info,
- struct hv_ring_buffer_debug_info *debug_info);
+
+int hv_ringbuffer_get_debuginfo(struct hv_ring_buffer_info *ring_info,
+ struct hv_ring_buffer_debug_info *debug_info);
+
+bool hv_ringbuffer_spinlock_busy(struct vmbus_channel *channel);
/* Vmbus interface */
#define vmbus_driver_register(driver) \
@@ -1172,8 +1333,6 @@ int vmbus_allocate_mmio(struct resource **new, struct hv_device *device_obj,
resource_size_t size, resource_size_t align,
bool fb_overlap_ok);
void vmbus_free_mmio(resource_size_t start, resource_size_t size);
-int vmbus_cpu_number_to_vp_number(int cpu_number);
-u64 hv_do_hypercall(u64 control, void *input, void *output);
/*
* GUID definitions of various offer types - services offered to the guest.
@@ -1184,102 +1343,102 @@ u64 hv_do_hypercall(u64 control, void *input, void *output);
* {f8615163-df3e-46c5-913f-f2d2f965ed0e}
*/
#define HV_NIC_GUID \
- .guid = UUID_LE(0xf8615163, 0xdf3e, 0x46c5, 0x91, 0x3f, \
- 0xf2, 0xd2, 0xf9, 0x65, 0xed, 0x0e)
+ .guid = GUID_INIT(0xf8615163, 0xdf3e, 0x46c5, 0x91, 0x3f, \
+ 0xf2, 0xd2, 0xf9, 0x65, 0xed, 0x0e)
/*
* IDE GUID
* {32412632-86cb-44a2-9b5c-50d1417354f5}
*/
#define HV_IDE_GUID \
- .guid = UUID_LE(0x32412632, 0x86cb, 0x44a2, 0x9b, 0x5c, \
- 0x50, 0xd1, 0x41, 0x73, 0x54, 0xf5)
+ .guid = GUID_INIT(0x32412632, 0x86cb, 0x44a2, 0x9b, 0x5c, \
+ 0x50, 0xd1, 0x41, 0x73, 0x54, 0xf5)
/*
* SCSI GUID
* {ba6163d9-04a1-4d29-b605-72e2ffb1dc7f}
*/
#define HV_SCSI_GUID \
- .guid = UUID_LE(0xba6163d9, 0x04a1, 0x4d29, 0xb6, 0x05, \
- 0x72, 0xe2, 0xff, 0xb1, 0xdc, 0x7f)
+ .guid = GUID_INIT(0xba6163d9, 0x04a1, 0x4d29, 0xb6, 0x05, \
+ 0x72, 0xe2, 0xff, 0xb1, 0xdc, 0x7f)
/*
* Shutdown GUID
* {0e0b6031-5213-4934-818b-38d90ced39db}
*/
#define HV_SHUTDOWN_GUID \
- .guid = UUID_LE(0x0e0b6031, 0x5213, 0x4934, 0x81, 0x8b, \
- 0x38, 0xd9, 0x0c, 0xed, 0x39, 0xdb)
+ .guid = GUID_INIT(0x0e0b6031, 0x5213, 0x4934, 0x81, 0x8b, \
+ 0x38, 0xd9, 0x0c, 0xed, 0x39, 0xdb)
/*
* Time Synch GUID
* {9527E630-D0AE-497b-ADCE-E80AB0175CAF}
*/
#define HV_TS_GUID \
- .guid = UUID_LE(0x9527e630, 0xd0ae, 0x497b, 0xad, 0xce, \
- 0xe8, 0x0a, 0xb0, 0x17, 0x5c, 0xaf)
+ .guid = GUID_INIT(0x9527e630, 0xd0ae, 0x497b, 0xad, 0xce, \
+ 0xe8, 0x0a, 0xb0, 0x17, 0x5c, 0xaf)
/*
* Heartbeat GUID
* {57164f39-9115-4e78-ab55-382f3bd5422d}
*/
#define HV_HEART_BEAT_GUID \
- .guid = UUID_LE(0x57164f39, 0x9115, 0x4e78, 0xab, 0x55, \
- 0x38, 0x2f, 0x3b, 0xd5, 0x42, 0x2d)
+ .guid = GUID_INIT(0x57164f39, 0x9115, 0x4e78, 0xab, 0x55, \
+ 0x38, 0x2f, 0x3b, 0xd5, 0x42, 0x2d)
/*
* KVP GUID
* {a9a0f4e7-5a45-4d96-b827-8a841e8c03e6}
*/
#define HV_KVP_GUID \
- .guid = UUID_LE(0xa9a0f4e7, 0x5a45, 0x4d96, 0xb8, 0x27, \
- 0x8a, 0x84, 0x1e, 0x8c, 0x03, 0xe6)
+ .guid = GUID_INIT(0xa9a0f4e7, 0x5a45, 0x4d96, 0xb8, 0x27, \
+ 0x8a, 0x84, 0x1e, 0x8c, 0x03, 0xe6)
/*
* Dynamic memory GUID
* {525074dc-8985-46e2-8057-a307dc18a502}
*/
#define HV_DM_GUID \
- .guid = UUID_LE(0x525074dc, 0x8985, 0x46e2, 0x80, 0x57, \
- 0xa3, 0x07, 0xdc, 0x18, 0xa5, 0x02)
+ .guid = GUID_INIT(0x525074dc, 0x8985, 0x46e2, 0x80, 0x57, \
+ 0xa3, 0x07, 0xdc, 0x18, 0xa5, 0x02)
/*
* Mouse GUID
* {cfa8b69e-5b4a-4cc0-b98b-8ba1a1f3f95a}
*/
#define HV_MOUSE_GUID \
- .guid = UUID_LE(0xcfa8b69e, 0x5b4a, 0x4cc0, 0xb9, 0x8b, \
- 0x8b, 0xa1, 0xa1, 0xf3, 0xf9, 0x5a)
+ .guid = GUID_INIT(0xcfa8b69e, 0x5b4a, 0x4cc0, 0xb9, 0x8b, \
+ 0x8b, 0xa1, 0xa1, 0xf3, 0xf9, 0x5a)
/*
* Keyboard GUID
* {f912ad6d-2b17-48ea-bd65-f927a61c7684}
*/
#define HV_KBD_GUID \
- .guid = UUID_LE(0xf912ad6d, 0x2b17, 0x48ea, 0xbd, 0x65, \
- 0xf9, 0x27, 0xa6, 0x1c, 0x76, 0x84)
+ .guid = GUID_INIT(0xf912ad6d, 0x2b17, 0x48ea, 0xbd, 0x65, \
+ 0xf9, 0x27, 0xa6, 0x1c, 0x76, 0x84)
/*
* VSS (Backup/Restore) GUID
*/
#define HV_VSS_GUID \
- .guid = UUID_LE(0x35fa2e29, 0xea23, 0x4236, 0x96, 0xae, \
- 0x3a, 0x6e, 0xba, 0xcb, 0xa4, 0x40)
+ .guid = GUID_INIT(0x35fa2e29, 0xea23, 0x4236, 0x96, 0xae, \
+ 0x3a, 0x6e, 0xba, 0xcb, 0xa4, 0x40)
/*
* Synthetic Video GUID
* {DA0A7802-E377-4aac-8E77-0558EB1073F8}
*/
#define HV_SYNTHVID_GUID \
- .guid = UUID_LE(0xda0a7802, 0xe377, 0x4aac, 0x8e, 0x77, \
- 0x05, 0x58, 0xeb, 0x10, 0x73, 0xf8)
+ .guid = GUID_INIT(0xda0a7802, 0xe377, 0x4aac, 0x8e, 0x77, \
+ 0x05, 0x58, 0xeb, 0x10, 0x73, 0xf8)
/*
* Synthetic FC GUID
* {2f9bcc4a-0069-4af3-b76b-6fd0be528cda}
*/
#define HV_SYNTHFC_GUID \
- .guid = UUID_LE(0x2f9bcc4a, 0x0069, 0x4af3, 0xb7, 0x6b, \
- 0x6f, 0xd0, 0xbe, 0x52, 0x8c, 0xda)
+ .guid = GUID_INIT(0x2f9bcc4a, 0x0069, 0x4af3, 0xb7, 0x6b, \
+ 0x6f, 0xd0, 0xbe, 0x52, 0x8c, 0xda)
/*
* Guest File Copy Service
@@ -1287,16 +1446,16 @@ u64 hv_do_hypercall(u64 control, void *input, void *output);
*/
#define HV_FCOPY_GUID \
- .guid = UUID_LE(0x34d14be3, 0xdee4, 0x41c8, 0x9a, 0xe7, \
- 0x6b, 0x17, 0x49, 0x77, 0xc1, 0x92)
+ .guid = GUID_INIT(0x34d14be3, 0xdee4, 0x41c8, 0x9a, 0xe7, \
+ 0x6b, 0x17, 0x49, 0x77, 0xc1, 0x92)
/*
* NetworkDirect. This is the guest RDMA service.
* {8c2eaf3d-32a7-4b09-ab99-bd1f1c86b501}
*/
#define HV_ND_GUID \
- .guid = UUID_LE(0x8c2eaf3d, 0x32a7, 0x4b09, 0xab, 0x99, \
- 0xbd, 0x1f, 0x1c, 0x86, 0xb5, 0x01)
+ .guid = GUID_INIT(0x8c2eaf3d, 0x32a7, 0x4b09, 0xab, 0x99, \
+ 0xbd, 0x1f, 0x1c, 0x86, 0xb5, 0x01)
/*
* PCI Express Pass Through
@@ -1304,29 +1463,35 @@ u64 hv_do_hypercall(u64 control, void *input, void *output);
*/
#define HV_PCIE_GUID \
- .guid = UUID_LE(0x44c4f61d, 0x4444, 0x4400, 0x9d, 0x52, \
- 0x80, 0x2e, 0x27, 0xed, 0xe1, 0x9f)
+ .guid = GUID_INIT(0x44c4f61d, 0x4444, 0x4400, 0x9d, 0x52, \
+ 0x80, 0x2e, 0x27, 0xed, 0xe1, 0x9f)
/*
- * Linux doesn't support the 3 devices: the first two are for
- * Automatic Virtual Machine Activation, and the third is for
- * Remote Desktop Virtualization.
+ * Linux doesn't support these 4 devices: the first two are for
+ * Automatic Virtual Machine Activation, the third is for
+ * Remote Desktop Virtualization, and the fourth is Initial
+ * Machine Configuration (IMC) used only by Windows guests.
* {f8e65716-3cb3-4a06-9a60-1889c5cccab5}
* {3375baf4-9e15-4b30-b765-67acb10d607b}
* {276aacf4-ac15-426c-98dd-7521ad3f01fe}
+ * {c376c1c3-d276-48d2-90a9-c04748072c60}
*/
#define HV_AVMA1_GUID \
- .guid = UUID_LE(0xf8e65716, 0x3cb3, 0x4a06, 0x9a, 0x60, \
- 0x18, 0x89, 0xc5, 0xcc, 0xca, 0xb5)
+ .guid = GUID_INIT(0xf8e65716, 0x3cb3, 0x4a06, 0x9a, 0x60, \
+ 0x18, 0x89, 0xc5, 0xcc, 0xca, 0xb5)
#define HV_AVMA2_GUID \
- .guid = UUID_LE(0x3375baf4, 0x9e15, 0x4b30, 0xb7, 0x65, \
- 0x67, 0xac, 0xb1, 0x0d, 0x60, 0x7b)
+ .guid = GUID_INIT(0x3375baf4, 0x9e15, 0x4b30, 0xb7, 0x65, \
+ 0x67, 0xac, 0xb1, 0x0d, 0x60, 0x7b)
#define HV_RDV_GUID \
- .guid = UUID_LE(0x276aacf4, 0xac15, 0x426c, 0x98, 0xdd, \
- 0x75, 0x21, 0xad, 0x3f, 0x01, 0xfe)
+ .guid = GUID_INIT(0x276aacf4, 0xac15, 0x426c, 0x98, 0xdd, \
+ 0x75, 0x21, 0xad, 0x3f, 0x01, 0xfe)
+
+#define HV_IMC_GUID \
+ .guid = GUID_INIT(0xc376c1c3, 0xd276, 0x48d2, 0x90, 0xa9, \
+ 0xc0, 0x47, 0x48, 0x07, 0x2c, 0x60)
/*
* Common header for Hyper-V ICs
@@ -1338,6 +1503,7 @@ u64 hv_do_hypercall(u64 control, void *input, void *output);
#define ICMSGTYPE_SHUTDOWN 3
#define ICMSGTYPE_TIMESYNC 4
#define ICMSGTYPE_VSS 5
+#define ICMSGTYPE_FCOPY 7
#define ICMSGHDRFLAG_TRANSACTION 1
#define ICMSGHDRFLAG_REQUEST 2
@@ -1355,7 +1521,10 @@ struct hv_util_service {
void *channel;
void (*util_cb)(void *);
int (*util_init)(struct hv_util_service *);
+ int (*util_init_transport)(void);
void (*util_deinit)(void);
+ int (*util_pre_suspend)(void);
+ int (*util_pre_resume)(void);
};
struct vmbuspipe_hdr {
@@ -1379,11 +1548,17 @@ struct icmsg_hdr {
u8 reserved[2];
} __packed;
+#define IC_VERSION_NEGOTIATION_MAX_VER_COUNT 100
+#define ICMSG_HDR (sizeof(struct vmbuspipe_hdr) + sizeof(struct icmsg_hdr))
+#define ICMSG_NEGOTIATE_PKT_SIZE(icframe_vercnt, icmsg_vercnt) \
+ (ICMSG_HDR + sizeof(struct icmsg_negotiate) + \
+ (((icframe_vercnt) + (icmsg_vercnt)) * sizeof(struct ic_version)))
+
struct icmsg_negotiate {
u16 icframe_vercnt;
u16 icmsg_vercnt;
u32 reserved;
- struct ic_version icversion_data[1]; /* any size array */
+ struct ic_version icversion_data[]; /* any size array */
} __packed;
struct shutdown_msg_data {
@@ -1428,18 +1603,23 @@ struct ictimesync_ref_data {
struct hyperv_service_callback {
u8 msg_type;
char *log_msg;
- uuid_le data;
+ guid_t data;
struct vmbus_channel *channel;
void (*callback)(void *context);
};
+struct hv_dma_range {
+ dma_addr_t dma;
+ u32 mapping_size;
+};
+
#define MAX_SRV_VER 0x7ffffff
-extern bool vmbus_prep_negotiate_resp(struct icmsg_hdr *icmsghdrp, u8 *buf,
+extern bool vmbus_prep_negotiate_resp(struct icmsg_hdr *icmsghdrp, u8 *buf, u32 buflen,
const int *fw_version, int fw_vercnt,
const int *srv_version, int srv_vercnt,
int *nego_fw_version, int *nego_srv_version);
-void hv_process_channel_removal(struct vmbus_channel *channel, u32 relid);
+void hv_process_channel_removal(struct vmbus_channel *channel);
void vmbus_setevent(struct vmbus_channel *channel);
/*
@@ -1448,9 +1628,11 @@ void vmbus_setevent(struct vmbus_channel *channel);
extern __u32 vmbus_proto_version;
-int vmbus_send_tl_connect_request(const uuid_le *shv_guest_servie_id,
- const uuid_le *shv_host_servie_id);
+int vmbus_send_tl_connect_request(const guid_t *shv_guest_servie_id,
+ const guid_t *shv_host_servie_id);
+int vmbus_send_modifychannel(struct vmbus_channel *channel, u32 target_vp);
void vmbus_set_event(struct vmbus_channel *channel);
+int vmbus_channel_set_cpu(struct vmbus_channel *channel, u32 target_cpu);
/* Get the start of the ring buffer. */
static inline void *
@@ -1505,6 +1687,11 @@ static inline u32 hv_pkt_datalen(const struct vmpacket_descriptor *desc)
return (desc->len8 << 3) - (desc->offset8 << 3);
}
+/* Get packet length associated with descriptor */
+static inline u32 hv_pkt_len(const struct vmpacket_descriptor *desc)
+{
+ return desc->len8 << 3;
+}
struct vmpacket_descriptor *
hv_pkt_iter_first(struct vmbus_channel *channel);
@@ -1515,10 +1702,6 @@ __hv_pkt_iter_next(struct vmbus_channel *channel,
void hv_pkt_iter_close(struct vmbus_channel *channel);
-/*
- * Get next packet descriptor from iterator
- * If at end of list, return NULL and update host.
- */
static inline struct vmpacket_descriptor *
hv_pkt_iter_next(struct vmbus_channel *channel,
const struct vmpacket_descriptor *pkt)
@@ -1536,4 +1719,52 @@ hv_pkt_iter_next(struct vmbus_channel *channel,
for (pkt = hv_pkt_iter_first(channel); pkt; \
pkt = hv_pkt_iter_next(channel, pkt))
+/*
+ * Interface for passing data between SR-IOV PF and VF drivers. The VF driver
+ * sends requests to read and write blocks. Each block must be 128 bytes or
+ * smaller. Optionally, the VF driver can register a callback function which
+ * will be invoked when the host says that one or more of the first 64 block
+ * IDs is "invalid" which means that the VF driver should reread them.
+ */
+#define HV_CONFIG_BLOCK_SIZE_MAX 128
+
+int hyperv_read_cfg_blk(struct pci_dev *dev, void *buf, unsigned int buf_len,
+ unsigned int block_id, unsigned int *bytes_returned);
+int hyperv_write_cfg_blk(struct pci_dev *dev, void *buf, unsigned int len,
+ unsigned int block_id);
+int hyperv_reg_block_invalidate(struct pci_dev *dev, void *context,
+ void (*block_invalidate)(void *context,
+ u64 block_mask));
+
+struct hyperv_pci_block_ops {
+ int (*read_block)(struct pci_dev *dev, void *buf, unsigned int buf_len,
+ unsigned int block_id, unsigned int *bytes_returned);
+ int (*write_block)(struct pci_dev *dev, void *buf, unsigned int len,
+ unsigned int block_id);
+ int (*reg_blk_invalidate)(struct pci_dev *dev, void *context,
+ void (*block_invalidate)(void *context,
+ u64 block_mask));
+};
+
+extern struct hyperv_pci_block_ops hvpci_block_ops;
+
+static inline unsigned long virt_to_hvpfn(void *addr)
+{
+ phys_addr_t paddr;
+
+ if (is_vmalloc_addr(addr))
+ paddr = page_to_phys(vmalloc_to_page(addr)) +
+ offset_in_page(addr);
+ else
+ paddr = __pa(addr);
+
+ return paddr >> HV_HYP_PAGE_SHIFT;
+}
+
+#define NR_HV_HYP_PAGES_IN_PAGE (PAGE_SIZE / HV_HYP_PAGE_SIZE)
+#define offset_in_hvpage(ptr) ((unsigned long)(ptr) & ~HV_HYP_PAGE_MASK)
+#define HVPFN_UP(x) (((x) + HV_HYP_PAGE_SIZE-1) >> HV_HYP_PAGE_SHIFT)
+#define HVPFN_DOWN(x) ((x) >> HV_HYP_PAGE_SHIFT)
+#define page_to_hvpfn(page) (page_to_pfn(page) * NR_HV_HYP_PAGES_IN_PAGE)
+
#endif /* _HYPERV_H */
diff --git a/include/linux/hypervisor.h b/include/linux/hypervisor.h
index 3fa5ef2b3759..be5417303ecf 100644
--- a/include/linux/hypervisor.h
+++ b/include/linux/hypervisor.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __LINUX_HYPEVISOR_H
#define __LINUX_HYPEVISOR_H
@@ -6,12 +7,40 @@
* Juergen Gross <jgross@suse.com>
*/
-#ifdef CONFIG_HYPERVISOR_GUEST
-#include <asm/hypervisor.h>
-#else
+#ifdef CONFIG_X86
+
+#include <asm/jailhouse_para.h>
+#include <asm/x86_init.h>
+
static inline void hypervisor_pin_vcpu(int cpu)
{
+ x86_platform.hyper.pin_vcpu(cpu);
+}
+
+#else /* !CONFIG_X86 */
+
+#include <linux/of.h>
+
+static inline void hypervisor_pin_vcpu(int cpu)
+{
+}
+
+static inline bool jailhouse_paravirt(void)
+{
+ return of_find_compatible_node(NULL, NULL, "jailhouse,cell");
+}
+
+#endif /* !CONFIG_X86 */
+
+static inline bool hypervisor_isolated_pci_functions(void)
+{
+ if (IS_ENABLED(CONFIG_S390))
+ return true;
+
+ if (IS_ENABLED(CONFIG_LOONGARCH))
+ return true;
+
+ return jailhouse_paravirt();
}
-#endif
#endif /* __LINUX_HYPEVISOR_H */
diff --git a/include/linux/i2c-algo-bit.h b/include/linux/i2c-algo-bit.h
index 63904ba6887e..7fd5575a368f 100644
--- a/include/linux/i2c-algo-bit.h
+++ b/include/linux/i2c-algo-bit.h
@@ -1,30 +1,17 @@
-/* ------------------------------------------------------------------------- */
-/* i2c-algo-bit.h i2c driver algorithms for bit-shift adapters */
-/* ------------------------------------------------------------------------- */
-/* Copyright (C) 1995-99 Simon G. Vogl
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
- MA 02110-1301 USA. */
-/* ------------------------------------------------------------------------- */
-
-/* With some changes from Kyösti Mälkki <kmalkki@cc.hut.fi> and even
- Frodo Looijaard <frodol@dds.nl> */
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * i2c-algo-bit.h: i2c driver algorithms for bit-shift adapters
+ *
+ * Copyright (C) 1995-99 Simon G. Vogl
+ * With some changes from Kyösti Mälkki <kmalkki@cc.hut.fi> and even
+ * Frodo Looijaard <frodol@dds.nl>
+ */
#ifndef _LINUX_I2C_ALGO_BIT_H
#define _LINUX_I2C_ALGO_BIT_H
+#include <linux/i2c.h>
+
/* --- Defines for bit-adapters --------------------------------------- */
/*
* This struct contains the hw-dependent functions of bit-style adapters to
@@ -46,6 +33,7 @@ struct i2c_algo_bit_data {
minimum 5 us for standard-mode I2C and SMBus,
maximum 50 us for SMBus */
int timeout; /* in jiffies */
+ bool can_do_atomic; /* callbacks don't sleep, we can be atomic */
};
int i2c_bit_add_bus(struct i2c_adapter *);
diff --git a/include/linux/i2c-algo-pca.h b/include/linux/i2c-algo-pca.h
index a3c3ecd59f08..e305bf32e40a 100644
--- a/include/linux/i2c-algo-pca.h
+++ b/include/linux/i2c-algo-pca.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_I2C_ALGO_PCA_H
#define _LINUX_I2C_ALGO_PCA_H
@@ -52,17 +53,32 @@
#define I2C_PCA_CON_SI 0x08 /* Serial Interrupt */
#define I2C_PCA_CON_CR 0x07 /* Clock Rate (MASK) */
+/**
+ * struct pca_i2c_bus_settings - The configured PCA i2c bus settings
+ * @mode: Configured i2c bus mode
+ * @tlow: Configured SCL LOW period
+ * @thi: Configured SCL HIGH period
+ * @clock_freq: The configured clock frequency
+ */
+struct pca_i2c_bus_settings {
+ int mode;
+ int tlow;
+ int thi;
+ int clock_freq;
+};
+
struct i2c_algo_pca_data {
void *data; /* private low level data */
void (*write_byte) (void *data, int reg, int val);
int (*read_byte) (void *data, int reg);
- int (*wait_for_completion) (void *data);
+ int (*wait_for_completion_cb) (void *data);
void (*reset_chip) (void *data);
/* For PCA9564, use one of the predefined frequencies:
* 330000, 288000, 217000, 146000, 88000, 59000, 44000, 36000
* For PCA9665, use the frequency you want here. */
unsigned int i2c_clock;
unsigned int chip;
+ struct pca_i2c_bus_settings bus_settings;
};
int i2c_pca_add_bus(struct i2c_adapter *);
diff --git a/include/linux/i2c-algo-pcf.h b/include/linux/i2c-algo-pcf.h
index 538e8f41a319..696e7de83c79 100644
--- a/include/linux/i2c-algo-pcf.h
+++ b/include/linux/i2c-algo-pcf.h
@@ -1,23 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/* ------------------------------------------------------------------------- */
/* adap-pcf.h i2c driver algorithms for PCF8584 adapters */
/* ------------------------------------------------------------------------- */
/* Copyright (C) 1995-97 Simon G. Vogl
1998-99 Hans Berglund
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
- MA 02110-1301 USA. */
+ */
/* ------------------------------------------------------------------------- */
/* With some changes from Kyösti Mälkki <kmalkki@cc.hut.fi> and even
diff --git a/include/linux/i2c-atr.h b/include/linux/i2c-atr.h
new file mode 100644
index 000000000000..2bb54dc87c8e
--- /dev/null
+++ b/include/linux/i2c-atr.h
@@ -0,0 +1,149 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * I2C Address Translator
+ *
+ * Copyright (c) 2019,2022 Luca Ceresoli <luca@lucaceresoli.net>
+ * Copyright (c) 2022,2023 Tomi Valkeinen <tomi.valkeinen@ideasonboard.com>
+ *
+ * Based on i2c-mux.h
+ */
+
+#ifndef _LINUX_I2C_ATR_H
+#define _LINUX_I2C_ATR_H
+
+#include <linux/i2c.h>
+#include <linux/types.h>
+
+struct device;
+struct fwnode_handle;
+struct i2c_atr;
+
+/**
+ * enum i2c_atr_flags - Flags for an I2C ATR driver
+ *
+ * @I2C_ATR_F_STATIC: ATR does not support dynamic mapping, use static mapping.
+ * Mappings will only be added or removed as a result of
+ * devices being added or removed from a child bus.
+ * The ATR pool will have to be big enough to accomodate all
+ * devices expected to be added to the child buses.
+ * @I2C_ATR_F_PASSTHROUGH: Allow unmapped incoming addresses to pass through
+ */
+enum i2c_atr_flags {
+ I2C_ATR_F_STATIC = BIT(0),
+ I2C_ATR_F_PASSTHROUGH = BIT(1),
+};
+
+/**
+ * struct i2c_atr_ops - Callbacks from ATR to the device driver.
+ * @attach_addr: Notify the driver of a new device connected on a child
+ * bus, with the alias assigned to it. The driver must
+ * configure the hardware to use the alias.
+ * @detach_addr: Notify the driver of a device getting disconnected. The
+ * driver must configure the hardware to stop using the
+ * alias.
+ *
+ * All these functions return 0 on success, a negative error code otherwise.
+ */
+struct i2c_atr_ops {
+ int (*attach_addr)(struct i2c_atr *atr, u32 chan_id,
+ u16 addr, u16 alias);
+ void (*detach_addr)(struct i2c_atr *atr, u32 chan_id,
+ u16 addr);
+};
+
+/**
+ * struct i2c_atr_adap_desc - An ATR downstream bus descriptor
+ * @chan_id: Index of the new adapter (0 .. max_adapters-1). This value is
+ * passed to the callbacks in `struct i2c_atr_ops`.
+ * @parent: The device used as the parent of the new i2c adapter, or NULL
+ * to use the i2c-atr device as the parent.
+ * @bus_handle: The fwnode handle that points to the adapter's i2c
+ * peripherals, or NULL.
+ * @num_aliases: The number of aliases in this adapter's private alias pool. Set
+ * to zero if this adapter uses the ATR's global alias pool.
+ * @aliases: An optional array of private aliases used by the adapter
+ * instead of the ATR's global pool of aliases. Must contain
+ * exactly num_aliases entries if num_aliases > 0, is ignored
+ * otherwise.
+ */
+struct i2c_atr_adap_desc {
+ u32 chan_id;
+ struct device *parent;
+ struct fwnode_handle *bus_handle;
+ size_t num_aliases;
+ u16 *aliases;
+};
+
+/**
+ * i2c_atr_new() - Allocate and initialize an I2C ATR helper.
+ * @parent: The parent (upstream) adapter
+ * @dev: The device acting as an ATR
+ * @ops: Driver-specific callbacks
+ * @max_adapters: Maximum number of child adapters
+ * @flags: Flags for ATR
+ *
+ * The new ATR helper is connected to the parent adapter but has no child
+ * adapters. Call i2c_atr_add_adapter() to add some.
+ *
+ * Call i2c_atr_delete() to remove.
+ *
+ * Return: pointer to the new ATR helper object, or ERR_PTR
+ */
+struct i2c_atr *i2c_atr_new(struct i2c_adapter *parent, struct device *dev,
+ const struct i2c_atr_ops *ops, int max_adapters,
+ u32 flags);
+
+/**
+ * i2c_atr_delete - Delete an I2C ATR helper.
+ * @atr: I2C ATR helper to be deleted.
+ *
+ * Precondition: all the adapters added with i2c_atr_add_adapter() must be
+ * removed by calling i2c_atr_del_adapter().
+ */
+void i2c_atr_delete(struct i2c_atr *atr);
+
+/**
+ * i2c_atr_add_adapter - Create a child ("downstream") I2C bus.
+ * @atr: The I2C ATR
+ * @desc: An ATR adapter descriptor
+ *
+ * After calling this function a new i2c bus will appear. Adding and removing
+ * devices on the downstream bus will result in calls to the
+ * &i2c_atr_ops->attach_client and &i2c_atr_ops->detach_client callbacks for the
+ * driver to assign an alias to the device.
+ *
+ * The adapter's fwnode is set to @bus_handle, or if @bus_handle is NULL the
+ * function looks for a child node whose 'reg' property matches the chan_id
+ * under the i2c-atr device's 'i2c-atr' node.
+ *
+ * Call i2c_atr_del_adapter() to remove the adapter.
+ *
+ * Return: 0 on success, a negative error code otherwise.
+ */
+int i2c_atr_add_adapter(struct i2c_atr *atr, struct i2c_atr_adap_desc *desc);
+
+/**
+ * i2c_atr_del_adapter - Remove a child ("downstream") I2C bus added by
+ * i2c_atr_add_adapter(). If no I2C bus has been added
+ * this function is a no-op.
+ * @atr: The I2C ATR
+ * @chan_id: Index of the adapter to be removed (0 .. max_adapters-1)
+ */
+void i2c_atr_del_adapter(struct i2c_atr *atr, u32 chan_id);
+
+/**
+ * i2c_atr_set_driver_data - Set private driver data to the i2c-atr instance.
+ * @atr: The I2C ATR
+ * @data: Pointer to the data to store
+ */
+void i2c_atr_set_driver_data(struct i2c_atr *atr, void *data);
+
+/**
+ * i2c_atr_get_driver_data - Get the stored drive data.
+ * @atr: The I2C ATR
+ *
+ * Return: Pointer to the stored data
+ */
+void *i2c_atr_get_driver_data(struct i2c_atr *atr);
+
+#endif /* _LINUX_I2C_ATR_H */
diff --git a/include/linux/i2c-dev.h b/include/linux/i2c-dev.h
index 79727144c5cd..4c86fce30a51 100644
--- a/include/linux/i2c-dev.h
+++ b/include/linux/i2c-dev.h
@@ -1,23 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
i2c-dev.h - i2c-bus driver, char device interface
Copyright (C) 1995-97 Simon G. Vogl
Copyright (C) 1998-99 Frodo Looijaard <frodol@dds.nl>
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
- MA 02110-1301 USA.
*/
#ifndef _LINUX_I2C_DEV_H
#define _LINUX_I2C_DEV_H
diff --git a/include/linux/i2c-mux-pinctrl.h b/include/linux/i2c-mux-pinctrl.h
deleted file mode 100644
index a65c86429e84..000000000000
--- a/include/linux/i2c-mux-pinctrl.h
+++ /dev/null
@@ -1,41 +0,0 @@
-/*
- * i2c-mux-pinctrl platform data
- *
- * Copyright (c) 2012, NVIDIA CORPORATION. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- */
-
-#ifndef _LINUX_I2C_MUX_PINCTRL_H
-#define _LINUX_I2C_MUX_PINCTRL_H
-
-/**
- * struct i2c_mux_pinctrl_platform_data - Platform data for i2c-mux-pinctrl
- * @parent_bus_num: Parent I2C bus number
- * @base_bus_num: Base I2C bus number for the child busses. 0 for dynamic.
- * @bus_count: Number of child busses. Also the number of elements in
- * @pinctrl_states
- * @pinctrl_states: The names of the pinctrl state to select for each child bus
- * @pinctrl_state_idle: The pinctrl state to select when no child bus is being
- * accessed. If NULL, the most recently used pinctrl state will be left
- * selected.
- */
-struct i2c_mux_pinctrl_platform_data {
- int parent_bus_num;
- int base_bus_num;
- int bus_count;
- const char **pinctrl_states;
- const char *pinctrl_state_idle;
-};
-
-#endif
diff --git a/include/linux/i2c-mux.h b/include/linux/i2c-mux.h
index bd74d5706f3b..1784ac7afb11 100644
--- a/include/linux/i2c-mux.h
+++ b/include/linux/i2c-mux.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
*
* i2c-mux.h - functions for the i2c-bus mux support
@@ -5,21 +6,6 @@
* Copyright (c) 2008-2009 Rodolfo Giometti <giometti@linux.it>
* Copyright (c) 2008-2009 Eurotech S.p.A. <info@eurotech.it>
* Michael Lawnick <michael.lawnick.ext@nsn.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
- * MA 02110-1301 USA.
*/
#ifndef _LINUX_I2C_MUX_H
@@ -43,7 +29,7 @@ struct i2c_mux_core {
int num_adapters;
int max_adapters;
- struct i2c_adapter *adapter[0];
+ struct i2c_adapter *adapter[];
};
struct i2c_mux_core *i2c_mux_alloc(struct i2c_adapter *parent,
@@ -70,8 +56,7 @@ struct i2c_adapter *i2c_root_adapter(struct device *dev);
* callback functions to perform hardware-specific mux control.
*/
int i2c_mux_add_adapter(struct i2c_mux_core *muxc,
- u32 force_nr, u32 chan_id,
- unsigned int class);
+ u32 force_nr, u32 chan_id);
void i2c_mux_del_adapters(struct i2c_mux_core *muxc);
diff --git a/include/linux/i2c-of-prober.h b/include/linux/i2c-of-prober.h
new file mode 100644
index 000000000000..bb6d47f50ee5
--- /dev/null
+++ b/include/linux/i2c-of-prober.h
@@ -0,0 +1,140 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Definitions for the Linux I2C OF component prober
+ *
+ * Copyright (C) 2024 Google LLC
+ */
+
+#ifndef _LINUX_I2C_OF_PROBER_H
+#define _LINUX_I2C_OF_PROBER_H
+
+#include <linux/kconfig.h>
+#include <linux/types.h>
+
+struct device;
+struct device_node;
+
+/**
+ * struct i2c_of_probe_ops - I2C OF component prober callbacks
+ *
+ * A set of callbacks to be used by i2c_of_probe_component().
+ *
+ * All callbacks are optional. Callbacks are called only once per run, and are
+ * used in the order they are defined in this structure.
+ *
+ * All callbacks that have return values shall return %0 on success,
+ * or a negative error number on failure.
+ *
+ * The @dev parameter passed to the callbacks is the same as @dev passed to
+ * i2c_of_probe_component(). It should only be used for dev_printk() calls
+ * and nothing else, especially not managed device resource (devres) APIs.
+ */
+struct i2c_of_probe_ops {
+ /**
+ * @enable: Retrieve and enable resources so that the components respond to probes.
+ *
+ * It is OK for this callback to return -EPROBE_DEFER since the intended use includes
+ * retrieving resources and enables them. Resources should be reverted to their initial
+ * state and released before returning if this fails.
+ */
+ int (*enable)(struct device *dev, struct device_node *bus_node, void *data);
+
+ /**
+ * @cleanup_early: Release exclusive resources prior to calling probe() on a
+ * detected component.
+ *
+ * Only called if a matching component is actually found. If none are found,
+ * resources that would have been released in this callback should be released in
+ * @free_resourcs_late instead.
+ */
+ void (*cleanup_early)(struct device *dev, void *data);
+
+ /**
+ * @cleanup: Opposite of @enable to balance refcounts and free resources after probing.
+ *
+ * Should check if resources were already freed by @cleanup_early.
+ */
+ void (*cleanup)(struct device *dev, void *data);
+};
+
+/**
+ * struct i2c_of_probe_cfg - I2C OF component prober configuration
+ * @ops: Callbacks for the prober to use.
+ * @type: A string to match the device node name prefix to probe for.
+ */
+struct i2c_of_probe_cfg {
+ const struct i2c_of_probe_ops *ops;
+ const char *type;
+};
+
+#if IS_ENABLED(CONFIG_OF_DYNAMIC)
+
+int i2c_of_probe_component(struct device *dev, const struct i2c_of_probe_cfg *cfg, void *ctx);
+
+/**
+ * DOC: I2C OF component prober simple helpers
+ *
+ * Components such as trackpads are commonly connected to a devices baseboard
+ * with a 6-pin ribbon cable. That gives at most one voltage supply and one
+ * GPIO (commonly a "enable" or "reset" line) besides the I2C bus, interrupt
+ * pin, and common ground. Touchscreens, while integrated into the display
+ * panel's connection, typically have the same set of connections.
+ *
+ * A simple set of helpers are provided here for use with the I2C OF component
+ * prober. This implementation targets such components, allowing for at most
+ * one regulator supply.
+ *
+ * The following helpers are provided:
+ * * i2c_of_probe_simple_enable()
+ * * i2c_of_probe_simple_cleanup_early()
+ * * i2c_of_probe_simple_cleanup()
+ */
+
+/**
+ * struct i2c_of_probe_simple_opts - Options for simple I2C component prober callbacks
+ * @res_node_compatible: Compatible string of device node to retrieve resources from.
+ * @supply_name: Name of regulator supply.
+ * @gpio_name: Name of GPIO. NULL if no GPIO line is used. Empty string ("") if GPIO
+ * line is unnamed.
+ * @post_power_on_delay_ms: Delay after regulators are powered on. Passed to msleep().
+ * @post_gpio_config_delay_ms: Delay after GPIO is configured. Passed to msleep().
+ * @gpio_assert_to_enable: %true if GPIO should be asserted, i.e. set to logical high,
+ * to enable the component.
+ *
+ * This describes power sequences common for the class of components supported by the
+ * simple component prober:
+ * * @gpio_name is configured to the non-active setting according to @gpio_assert_to_enable.
+ * * @supply_name regulator supply is enabled.
+ * * Wait for @post_power_on_delay_ms to pass.
+ * * @gpio_name is configured to the active setting according to @gpio_assert_to_enable.
+ * * Wait for @post_gpio_config_delay_ms to pass.
+ */
+struct i2c_of_probe_simple_opts {
+ const char *res_node_compatible;
+ const char *supply_name;
+ const char *gpio_name;
+ unsigned int post_power_on_delay_ms;
+ unsigned int post_gpio_config_delay_ms;
+ bool gpio_assert_to_enable;
+};
+
+struct gpio_desc;
+struct regulator;
+
+struct i2c_of_probe_simple_ctx {
+ /* public: provided by user before helpers are used. */
+ const struct i2c_of_probe_simple_opts *opts;
+ /* private: internal fields for helpers. */
+ struct regulator *supply;
+ struct gpio_desc *gpiod;
+};
+
+int i2c_of_probe_simple_enable(struct device *dev, struct device_node *bus_node, void *data);
+void i2c_of_probe_simple_cleanup_early(struct device *dev, void *data);
+void i2c_of_probe_simple_cleanup(struct device *dev, void *data);
+
+extern struct i2c_of_probe_ops i2c_of_probe_simple_ops;
+
+#endif /* IS_ENABLED(CONFIG_OF_DYNAMIC) */
+
+#endif /* _LINUX_I2C_OF_PROBER_H */
diff --git a/include/linux/i2c-pnx.h b/include/linux/i2c-pnx.h
deleted file mode 100644
index 5388326fbbff..000000000000
--- a/include/linux/i2c-pnx.h
+++ /dev/null
@@ -1,38 +0,0 @@
-/*
- * Header file for I2C support on PNX010x/4008.
- *
- * Author: Dennis Kovalev <dkovalev@ru.mvista.com>
- *
- * 2004-2006 (c) MontaVista Software, Inc. This file is licensed under
- * the terms of the GNU General Public License version 2. This program
- * is licensed "as is" without any warranty of any kind, whether express
- * or implied.
- */
-
-#ifndef __I2C_PNX_H__
-#define __I2C_PNX_H__
-
-struct platform_device;
-struct clk;
-
-struct i2c_pnx_mif {
- int ret; /* Return value */
- int mode; /* Interface mode */
- struct completion complete; /* I/O completion */
- struct timer_list timer; /* Timeout */
- u8 * buf; /* Data buffer */
- int len; /* Length of data buffer */
- int order; /* RX Bytes to order via TX */
-};
-
-struct i2c_pnx_algo_data {
- void __iomem *ioaddr;
- struct i2c_pnx_mif mif;
- int last;
- struct clk *clk;
- struct i2c_adapter adapter;
- int irq;
- u32 timeout;
-};
-
-#endif /* __I2C_PNX_H__ */
diff --git a/include/linux/i2c-pxa.h b/include/linux/i2c-pxa.h
deleted file mode 100644
index 41dcdfe7f625..000000000000
--- a/include/linux/i2c-pxa.h
+++ /dev/null
@@ -1,17 +0,0 @@
-#ifndef _LINUX_I2C_ALGO_PXA_H
-#define _LINUX_I2C_ALGO_PXA_H
-
-typedef enum i2c_slave_event_e {
- I2C_SLAVE_EVENT_START_READ,
- I2C_SLAVE_EVENT_START_WRITE,
- I2C_SLAVE_EVENT_STOP
-} i2c_slave_event_t;
-
-struct i2c_slave_client {
- void *data;
- void (*event)(void *ptr, i2c_slave_event_t event);
- int (*read) (void *ptr);
- void (*write)(void *ptr, unsigned int val);
-};
-
-#endif /* _LINUX_I2C_ALGO_PXA_H */
diff --git a/include/linux/i2c-smbus.h b/include/linux/i2c-smbus.h
index a1385023a29b..dc1bd2ab4c13 100644
--- a/include/linux/i2c-smbus.h
+++ b/include/linux/i2c-smbus.h
@@ -1,22 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* i2c-smbus.h - SMBus extensions to the I2C protocol
*
- * Copyright (C) 2010 Jean Delvare <jdelvare@suse.de>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
- * MA 02110-1301 USA.
+ * Copyright (C) 2010-2019 Jean Delvare <jdelvare@suse.de>
*/
#ifndef _LINUX_I2C_SMBUS_H
@@ -29,25 +15,40 @@
/**
* i2c_smbus_alert_setup - platform data for the smbus_alert i2c client
- * @alert_edge_triggered: whether the alert interrupt is edge (1) or level (0)
- * triggered
* @irq: IRQ number, if the smbus_alert driver should take care of interrupt
* handling
*
* If irq is not specified, the smbus_alert driver doesn't take care of
* interrupt handling. In that case it is up to the I2C bus driver to either
* handle the interrupts or to poll for alerts.
- *
- * If irq is specified then it it crucial that alert_edge_triggered is
- * properly set.
*/
struct i2c_smbus_alert_setup {
- unsigned int alert_edge_triggered:1;
int irq;
};
-struct i2c_client *i2c_setup_smbus_alert(struct i2c_adapter *adapter,
- struct i2c_smbus_alert_setup *setup);
+struct i2c_client *i2c_new_smbus_alert_device(struct i2c_adapter *adapter,
+ struct i2c_smbus_alert_setup *setup);
int i2c_handle_smbus_alert(struct i2c_client *ara);
+#if IS_ENABLED(CONFIG_I2C_SMBUS) && IS_ENABLED(CONFIG_I2C_SLAVE)
+struct i2c_client *i2c_new_slave_host_notify_device(struct i2c_adapter *adapter);
+void i2c_free_slave_host_notify_device(struct i2c_client *client);
+#else
+static inline struct i2c_client *i2c_new_slave_host_notify_device(struct i2c_adapter *adapter)
+{
+ return ERR_PTR(-ENOSYS);
+}
+static inline void i2c_free_slave_host_notify_device(struct i2c_client *client)
+{
+}
+#endif
+
+#if IS_ENABLED(CONFIG_I2C_SMBUS) && IS_ENABLED(CONFIG_DMI)
+void i2c_register_spd_write_disable(struct i2c_adapter *adap);
+void i2c_register_spd_write_enable(struct i2c_adapter *adap);
+#else
+static inline void i2c_register_spd_write_disable(struct i2c_adapter *adap) { }
+static inline void i2c_register_spd_write_enable(struct i2c_adapter *adap) { }
+#endif
+
#endif /* _LINUX_I2C_SMBUS_H */
diff --git a/include/linux/i2c.h b/include/linux/i2c.h
index d501d3956f13..20fd41b51d5c 100644
--- a/include/linux/i2c.h
+++ b/include/linux/i2c.h
@@ -1,103 +1,162 @@
-/* ------------------------------------------------------------------------- */
-/* */
-/* i2c.h - definitions for the i2c-bus interface */
-/* */
-/* ------------------------------------------------------------------------- */
-/* Copyright (C) 1995-2000 Simon G. Vogl
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
- MA 02110-1301 USA. */
-/* ------------------------------------------------------------------------- */
-
-/* With some changes from Kyösti Mälkki <kmalkki@cc.hut.fi> and
- Frodo Looijaard <frodol@dds.nl> */
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * i2c.h - definitions for the Linux i2c bus interface
+ * Copyright (C) 1995-2000 Simon G. Vogl
+ * Copyright (C) 2013-2019 Wolfram Sang <wsa@kernel.org>
+ *
+ * With some changes from Kyösti Mälkki <kmalkki@cc.hut.fi> and
+ * Frodo Looijaard <frodol@dds.nl>
+ */
#ifndef _LINUX_I2C_H
#define _LINUX_I2C_H
+#include <linux/acpi.h> /* for acpi_handle */
+#include <linux/bits.h>
#include <linux/mod_devicetable.h>
#include <linux/device.h> /* for struct device */
#include <linux/sched.h> /* for completion */
#include <linux/mutex.h>
+#include <linux/regulator/consumer.h>
#include <linux/rtmutex.h>
#include <linux/irqdomain.h> /* for Host Notify IRQ */
#include <linux/of.h> /* for struct device_node */
#include <linux/swab.h> /* for swab16 */
#include <uapi/linux/i2c.h>
-extern struct bus_type i2c_bus_type;
-extern struct device_type i2c_adapter_type;
-extern struct device_type i2c_client_type;
+extern const struct bus_type i2c_bus_type;
+extern const struct device_type i2c_adapter_type;
+extern const struct device_type i2c_client_type;
/* --- General options ------------------------------------------------ */
struct i2c_msg;
-struct i2c_algorithm;
struct i2c_adapter;
struct i2c_client;
struct i2c_driver;
+struct i2c_device_identity;
union i2c_smbus_data;
struct i2c_board_info;
enum i2c_slave_event;
-typedef int (*i2c_slave_cb_t)(struct i2c_client *, enum i2c_slave_event, u8 *);
+typedef int (*i2c_slave_cb_t)(struct i2c_client *client,
+ enum i2c_slave_event event, u8 *val);
+
+/* I2C Frequency Modes */
+#define I2C_MAX_STANDARD_MODE_FREQ 100000
+#define I2C_MAX_FAST_MODE_FREQ 400000
+#define I2C_MAX_FAST_MODE_PLUS_FREQ 1000000
+#define I2C_MAX_TURBO_MODE_FREQ 1400000
+#define I2C_MAX_HIGH_SPEED_MODE_FREQ 3400000
+#define I2C_MAX_ULTRA_FAST_MODE_FREQ 5000000
struct module;
struct property_entry;
-#if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE)
+#if IS_ENABLED(CONFIG_I2C)
+/* Return the Frequency mode string based on the bus frequency */
+const char *i2c_freq_mode_string(u32 bus_freq_hz);
+
/*
* The master routines are the ones normally used to transmit data to devices
* on a bus (or read from them). Apart from two basic transfer functions to
* transmit one message at a time, a more complex version can be used to
* transmit an arbitrary number of messages without interruption.
- * @count must be be less than 64k since msg.len is u16.
+ * @count must be less than 64k since msg.len is u16.
*/
-extern int i2c_master_send(const struct i2c_client *client, const char *buf,
- int count);
-extern int i2c_master_recv(const struct i2c_client *client, char *buf,
- int count);
+int i2c_transfer_buffer_flags(const struct i2c_client *client,
+ char *buf, int count, u16 flags);
+
+/**
+ * i2c_master_recv - issue a single I2C message in master receive mode
+ * @client: Handle to slave device
+ * @buf: Where to store data read from slave
+ * @count: How many bytes to read, must be less than 64k since msg.len is u16
+ *
+ * Returns negative errno, or else the number of bytes read.
+ */
+static inline int i2c_master_recv(const struct i2c_client *client,
+ char *buf, int count)
+{
+ return i2c_transfer_buffer_flags(client, buf, count, I2C_M_RD);
+};
+
+/**
+ * i2c_master_recv_dmasafe - issue a single I2C message in master receive mode
+ * using a DMA safe buffer
+ * @client: Handle to slave device
+ * @buf: Where to store data read from slave, must be safe to use with DMA
+ * @count: How many bytes to read, must be less than 64k since msg.len is u16
+ *
+ * Returns negative errno, or else the number of bytes read.
+ */
+static inline int i2c_master_recv_dmasafe(const struct i2c_client *client,
+ char *buf, int count)
+{
+ return i2c_transfer_buffer_flags(client, buf, count,
+ I2C_M_RD | I2C_M_DMA_SAFE);
+};
+
+/**
+ * i2c_master_send - issue a single I2C message in master transmit mode
+ * @client: Handle to slave device
+ * @buf: Data that will be written to the slave
+ * @count: How many bytes to write, must be less than 64k since msg.len is u16
+ *
+ * Returns negative errno, or else the number of bytes written.
+ */
+static inline int i2c_master_send(const struct i2c_client *client,
+ const char *buf, int count)
+{
+ return i2c_transfer_buffer_flags(client, (char *)buf, count, 0);
+};
+
+/**
+ * i2c_master_send_dmasafe - issue a single I2C message in master transmit mode
+ * using a DMA safe buffer
+ * @client: Handle to slave device
+ * @buf: Data that will be written to the slave, must be safe to use with DMA
+ * @count: How many bytes to write, must be less than 64k since msg.len is u16
+ *
+ * Returns negative errno, or else the number of bytes written.
+ */
+static inline int i2c_master_send_dmasafe(const struct i2c_client *client,
+ const char *buf, int count)
+{
+ return i2c_transfer_buffer_flags(client, (char *)buf, count,
+ I2C_M_DMA_SAFE);
+};
/* Transfer num messages.
*/
-extern int i2c_transfer(struct i2c_adapter *adap, struct i2c_msg *msgs,
- int num);
+int i2c_transfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num);
/* Unlocked flavor */
-extern int __i2c_transfer(struct i2c_adapter *adap, struct i2c_msg *msgs,
- int num);
+int __i2c_transfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num);
/* This is the very generalized SMBus access routine. You probably do not
want to use this, though; one of the functions below may be much easier,
and probably just as fast.
Note that we use i2c_adapter here, because you do not need a specific
smbus adapter to call this function. */
-extern s32 i2c_smbus_xfer(struct i2c_adapter *adapter, u16 addr,
- unsigned short flags, char read_write, u8 command,
- int size, union i2c_smbus_data *data);
+s32 i2c_smbus_xfer(struct i2c_adapter *adapter, u16 addr,
+ unsigned short flags, char read_write, u8 command,
+ int protocol, union i2c_smbus_data *data);
+
+/* Unlocked flavor */
+s32 __i2c_smbus_xfer(struct i2c_adapter *adapter, u16 addr,
+ unsigned short flags, char read_write, u8 command,
+ int protocol, union i2c_smbus_data *data);
/* Now follow the 'nice' access routines. These also document the calling
conventions of i2c_smbus_xfer. */
-extern s32 i2c_smbus_read_byte(const struct i2c_client *client);
-extern s32 i2c_smbus_write_byte(const struct i2c_client *client, u8 value);
-extern s32 i2c_smbus_read_byte_data(const struct i2c_client *client,
- u8 command);
-extern s32 i2c_smbus_write_byte_data(const struct i2c_client *client,
- u8 command, u8 value);
-extern s32 i2c_smbus_read_word_data(const struct i2c_client *client,
- u8 command);
-extern s32 i2c_smbus_write_word_data(const struct i2c_client *client,
- u8 command, u16 value);
+u8 i2c_smbus_pec(u8 crc, u8 *p, size_t count);
+s32 i2c_smbus_read_byte(const struct i2c_client *client);
+s32 i2c_smbus_write_byte(const struct i2c_client *client, u8 value);
+s32 i2c_smbus_read_byte_data(const struct i2c_client *client, u8 command);
+s32 i2c_smbus_write_byte_data(const struct i2c_client *client,
+ u8 command, u8 value);
+s32 i2c_smbus_read_word_data(const struct i2c_client *client, u8 command);
+s32 i2c_smbus_write_word_data(const struct i2c_client *client,
+ u8 command, u16 value);
static inline s32
i2c_smbus_read_word_swapped(const struct i2c_client *client, u8 command)
@@ -115,32 +174,68 @@ i2c_smbus_write_word_swapped(const struct i2c_client *client,
}
/* Returns the number of read bytes */
-extern s32 i2c_smbus_read_block_data(const struct i2c_client *client,
- u8 command, u8 *values);
-extern s32 i2c_smbus_write_block_data(const struct i2c_client *client,
- u8 command, u8 length, const u8 *values);
+s32 i2c_smbus_read_block_data(const struct i2c_client *client,
+ u8 command, u8 *values);
+s32 i2c_smbus_write_block_data(const struct i2c_client *client,
+ u8 command, u8 length, const u8 *values);
/* Returns the number of read bytes */
-extern s32 i2c_smbus_read_i2c_block_data(const struct i2c_client *client,
- u8 command, u8 length, u8 *values);
-extern s32 i2c_smbus_write_i2c_block_data(const struct i2c_client *client,
- u8 command, u8 length,
- const u8 *values);
-extern s32
-i2c_smbus_read_i2c_block_data_or_emulated(const struct i2c_client *client,
- u8 command, u8 length, u8 *values);
+s32 i2c_smbus_read_i2c_block_data(const struct i2c_client *client,
+ u8 command, u8 length, u8 *values);
+s32 i2c_smbus_write_i2c_block_data(const struct i2c_client *client,
+ u8 command, u8 length, const u8 *values);
+s32 i2c_smbus_read_i2c_block_data_or_emulated(const struct i2c_client *client,
+ u8 command, u8 length,
+ u8 *values);
+int i2c_get_device_id(const struct i2c_client *client,
+ struct i2c_device_identity *id);
+const struct i2c_device_id *i2c_client_get_device_id(const struct i2c_client *client);
#endif /* I2C */
+/**
+ * struct i2c_device_identity - i2c client device identification
+ * @manufacturer_id: 0 - 4095, database maintained by NXP
+ * @part_id: 0 - 511, according to manufacturer
+ * @die_revision: 0 - 7, according to manufacturer
+ */
+struct i2c_device_identity {
+ u16 manufacturer_id;
+#define I2C_DEVICE_ID_NXP_SEMICONDUCTORS 0
+#define I2C_DEVICE_ID_NXP_SEMICONDUCTORS_1 1
+#define I2C_DEVICE_ID_NXP_SEMICONDUCTORS_2 2
+#define I2C_DEVICE_ID_NXP_SEMICONDUCTORS_3 3
+#define I2C_DEVICE_ID_RAMTRON_INTERNATIONAL 4
+#define I2C_DEVICE_ID_ANALOG_DEVICES 5
+#define I2C_DEVICE_ID_STMICROELECTRONICS 6
+#define I2C_DEVICE_ID_ON_SEMICONDUCTOR 7
+#define I2C_DEVICE_ID_SPRINTEK_CORPORATION 8
+#define I2C_DEVICE_ID_ESPROS_PHOTONICS_AG 9
+#define I2C_DEVICE_ID_FUJITSU_SEMICONDUCTOR 10
+#define I2C_DEVICE_ID_FLIR 11
+#define I2C_DEVICE_ID_O2MICRO 12
+#define I2C_DEVICE_ID_ATMEL 13
+#define I2C_DEVICE_ID_NONE 0xffff
+ u16 part_id;
+ u8 die_revision;
+};
+
enum i2c_alert_protocol {
I2C_PROTOCOL_SMBUS_ALERT,
I2C_PROTOCOL_SMBUS_HOST_NOTIFY,
};
/**
+ * enum i2c_driver_flags - Flags for an I2C device driver
+ *
+ * @I2C_DRV_ACPI_WAIVE_D0_PROBE: Don't put the device in D0 state for probe
+ */
+enum i2c_driver_flags {
+ I2C_DRV_ACPI_WAIVE_D0_PROBE = BIT(0),
+};
+
+/**
* struct i2c_driver - represent an I2C device driver
* @class: What kind of i2c device we instantiate (for detect)
- * @attach_adapter: Callback for bus addition (deprecated)
- * @probe: Callback for device binding - soon to be deprecated
- * @probe_new: New callback for device binding
+ * @probe: Callback for device binding
* @remove: Callback for device unbinding
* @shutdown: Callback for device shutdown
* @alert: Alert callback, for example for the SMBus alert protocol
@@ -150,7 +245,7 @@ enum i2c_alert_protocol {
* @detect: Callback for device detection
* @address_list: The I2C addresses to probe (for detect)
* @clients: List of detected clients we created (for i2c-core use only)
- * @disable_i2c_core_irq_mapping: Tell the i2c-core to not do irq-mapping
+ * @flags: A bitmask of flags defined in &enum i2c_driver_flags
*
* The driver.owner field should be set to the module owner of this driver.
* The driver.name field should be set to the name of this driver.
@@ -175,22 +270,13 @@ enum i2c_alert_protocol {
struct i2c_driver {
unsigned int class;
- /* Notifies the driver that a new bus has appeared. You should avoid
- * using this, it will be removed in a near future.
- */
- int (*attach_adapter)(struct i2c_adapter *) __deprecated;
-
/* Standard driver model interfaces */
- int (*probe)(struct i2c_client *, const struct i2c_device_id *);
- int (*remove)(struct i2c_client *);
+ int (*probe)(struct i2c_client *client);
+ void (*remove)(struct i2c_client *client);
- /* New driver model interface to aid the seamless removal of the
- * current probe()'s, more commonly unused than used second parameter.
- */
- int (*probe_new)(struct i2c_client *);
/* driver model interfaces that don't relate to enumeration */
- void (*shutdown)(struct i2c_client *);
+ void (*shutdown)(struct i2c_client *client);
/* Alert callback, for example for the SMBus alert protocol.
* The format and meaning of the data value depends on the protocol.
@@ -199,7 +285,7 @@ struct i2c_driver {
* For the SMBus Host Notify protocol, the data corresponds to the
* 16-bit payload data reported by the slave device acting as master.
*/
- void (*alert)(struct i2c_client *, enum i2c_alert_protocol protocol,
+ void (*alert)(struct i2c_client *client, enum i2c_alert_protocol protocol,
unsigned int data);
/* a ioctl like command that can be used to perform specific functions
@@ -211,28 +297,32 @@ struct i2c_driver {
const struct i2c_device_id *id_table;
/* Device detection callback for automatic device creation */
- int (*detect)(struct i2c_client *, struct i2c_board_info *);
+ int (*detect)(struct i2c_client *client, struct i2c_board_info *info);
const unsigned short *address_list;
struct list_head clients;
- bool disable_i2c_core_irq_mapping;
+ u32 flags;
};
-#define to_i2c_driver(d) container_of(d, struct i2c_driver, driver)
+#define to_i2c_driver(d) container_of_const(d, struct i2c_driver, driver)
/**
* struct i2c_client - represent an I2C slave device
- * @flags: I2C_CLIENT_TEN indicates the device uses a ten bit chip address;
- * I2C_CLIENT_PEC indicates it uses SMBus Packet Error Checking
+ * @flags: see I2C_CLIENT_* for possible flags
* @addr: Address used on the I2C bus connected to the parent adapter.
* @name: Indicates the type of the device, usually a chip name that's
* generic enough to hide second-sourcing and compatible revisions.
* @adapter: manages the bus segment hosting this I2C device
* @dev: Driver model device node for the slave.
+ * @init_irq: IRQ that was set at initialization
* @irq: indicates the IRQ generated by this device (if any)
* @detected: member of an i2c_driver.clients list or i2c-core's
* userspace_devices list
* @slave_cb: Callback when I2C slave mode of an adapter is used. The adapter
* calls it to pass on slave events to the slave driver.
+ * @devres_group_id: id of the devres group that will be created for resources
+ * acquired when probing this device.
+ * @debugfs: pointer to the debugfs subdirectory which the I2C core created
+ * for this client.
*
* An i2c_client identifies a single device (i.e. chip) connected to an
* i2c bus. The behaviour exposed to Linux is defined by the driver
@@ -240,44 +330,56 @@ struct i2c_driver {
*/
struct i2c_client {
unsigned short flags; /* div., see below */
+#define I2C_CLIENT_PEC 0x04 /* Use Packet Error Checking */
+#define I2C_CLIENT_TEN 0x10 /* we have a ten bit chip address */
+ /* Must equal I2C_M_TEN below */
+#define I2C_CLIENT_SLAVE 0x20 /* we are the slave */
+#define I2C_CLIENT_HOST_NOTIFY 0x40 /* We want to use I2C host notify */
+#define I2C_CLIENT_WAKE 0x80 /* for board_info; true iff can wake */
+#define I2C_CLIENT_SCCB 0x9000 /* Use Omnivision SCCB protocol */
+ /* Must match I2C_M_STOP|IGNORE_NAK */
+
unsigned short addr; /* chip address - NOTE: 7bit */
/* addresses are stored in the */
/* _LOWER_ 7 bits */
char name[I2C_NAME_SIZE];
struct i2c_adapter *adapter; /* the adapter we sit on */
struct device dev; /* the device structure */
+ int init_irq; /* irq set at initialization */
int irq; /* irq issued by device */
struct list_head detected;
#if IS_ENABLED(CONFIG_I2C_SLAVE)
i2c_slave_cb_t slave_cb; /* callback for slave mode */
#endif
+ void *devres_group_id; /* ID of probe devres group */
+ struct dentry *debugfs; /* per-client debugfs dir */
};
#define to_i2c_client(d) container_of(d, struct i2c_client, dev)
-extern struct i2c_client *i2c_verify_client(struct device *dev);
-extern struct i2c_adapter *i2c_verify_adapter(struct device *dev);
-extern const struct i2c_device_id *i2c_match_id(const struct i2c_device_id *id,
- const struct i2c_client *client);
+struct i2c_adapter *i2c_verify_adapter(struct device *dev);
+const struct i2c_device_id *i2c_match_id(const struct i2c_device_id *id,
+ const struct i2c_client *client);
+
+const void *i2c_get_match_data(const struct i2c_client *client);
static inline struct i2c_client *kobj_to_i2c_client(struct kobject *kobj)
{
- struct device * const dev = container_of(kobj, struct device, kobj);
+ struct device * const dev = kobj_to_dev(kobj);
return to_i2c_client(dev);
}
-static inline void *i2c_get_clientdata(const struct i2c_client *dev)
+static inline void *i2c_get_clientdata(const struct i2c_client *client)
{
- return dev_get_drvdata(&dev->dev);
+ return dev_get_drvdata(&client->dev);
}
-static inline void i2c_set_clientdata(struct i2c_client *dev, void *data)
+static inline void i2c_set_clientdata(struct i2c_client *client, void *data)
{
- dev_set_drvdata(&dev->dev, data);
+ dev_set_drvdata(&client->dev, data);
}
/* I2C slave support */
-#if IS_ENABLED(CONFIG_I2C_SLAVE)
enum i2c_slave_event {
I2C_SLAVE_READ_REQUESTED,
I2C_SLAVE_WRITE_REQUESTED,
@@ -286,15 +388,12 @@ enum i2c_slave_event {
I2C_SLAVE_STOP,
};
-extern int i2c_slave_register(struct i2c_client *client, i2c_slave_cb_t slave_cb);
-extern int i2c_slave_unregister(struct i2c_client *client);
-extern bool i2c_detect_slave_mode(struct device *dev);
-
-static inline int i2c_slave_event(struct i2c_client *client,
- enum i2c_slave_event event, u8 *val)
-{
- return client->slave_cb(client, event, val);
-}
+int i2c_slave_register(struct i2c_client *client, i2c_slave_cb_t slave_cb);
+int i2c_slave_unregister(struct i2c_client *client);
+int i2c_slave_event(struct i2c_client *client,
+ enum i2c_slave_event event, u8 *val);
+#if IS_ENABLED(CONFIG_I2C_SLAVE)
+bool i2c_detect_slave_mode(struct device *dev);
#else
static inline bool i2c_detect_slave_mode(struct device *dev) { return false; }
#endif
@@ -304,11 +403,10 @@ static inline bool i2c_detect_slave_mode(struct device *dev) { return false; }
* @type: chip type, to initialize i2c_client.name
* @flags: to initialize i2c_client.flags
* @addr: stored in i2c_client.addr
+ * @dev_name: Overrides the default <busnr>-<addr> dev_name if set
* @platform_data: stored in i2c_client.dev.platform_data
- * @archdata: copied into i2c_client.dev.archdata
- * @of_node: pointer to OpenFirmware device node
* @fwnode: device node supplied by the platform firmware
- * @properties: additional device properties for the device
+ * @swnode: software node for the device
* @resources: resources associated with the device
* @num_resources: number of resources in the @resources array
* @irq: stored in i2c_client.irq
@@ -322,17 +420,16 @@ static inline bool i2c_detect_slave_mode(struct device *dev) { return false; }
* that are present. This information is used to grow the driver model tree.
* For mainboards this is done statically using i2c_register_board_info();
* bus numbers identify adapters that aren't yet available. For add-on boards,
- * i2c_new_device() does this dynamically with the adapter already known.
+ * i2c_new_client_device() does this dynamically with the adapter already known.
*/
struct i2c_board_info {
char type[I2C_NAME_SIZE];
unsigned short flags;
unsigned short addr;
+ const char *dev_name;
void *platform_data;
- struct dev_archdata *archdata;
- struct device_node *of_node;
struct fwnode_handle *fwnode;
- const struct property_entry *properties;
+ const struct software_node *swnode;
const struct resource *resources;
unsigned int num_resources;
int irq;
@@ -352,13 +449,14 @@ struct i2c_board_info {
.type = dev_type, .addr = (dev_addr)
-#if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE)
-/* Add-on boards should register/unregister their devices; e.g. a board
+#if IS_ENABLED(CONFIG_I2C)
+/*
+ * Add-on boards should register/unregister their devices; e.g. a board
* with integrated I2C, a config eeprom, sensors, and a codec that's
* used in conjunction with the primary hardware.
*/
-extern struct i2c_client *
-i2c_new_device(struct i2c_adapter *adap, struct i2c_board_info const *info);
+struct i2c_client *
+i2c_new_client_device(struct i2c_adapter *adap, struct i2c_board_info const *info);
/* If you don't know the exact address of an I2C device, use this variant
* instead, which can probe for device presence in a list of possible
@@ -366,27 +464,34 @@ i2c_new_device(struct i2c_adapter *adap, struct i2c_board_info const *info);
* it must return 1 on successful probe, 0 otherwise. If it is not provided,
* a default probing method is used.
*/
-extern struct i2c_client *
-i2c_new_probed_device(struct i2c_adapter *adap,
- struct i2c_board_info *info,
- unsigned short const *addr_list,
- int (*probe)(struct i2c_adapter *, unsigned short addr));
+struct i2c_client *
+i2c_new_scanned_device(struct i2c_adapter *adap,
+ struct i2c_board_info *info,
+ unsigned short const *addr_list,
+ int (*probe)(struct i2c_adapter *adap, unsigned short addr));
/* Common custom probe functions */
-extern int i2c_probe_func_quick_read(struct i2c_adapter *, unsigned short addr);
+int i2c_probe_func_quick_read(struct i2c_adapter *adap, unsigned short addr);
-/* For devices that use several addresses, use i2c_new_dummy() to make
- * client handles for the extra addresses.
- */
-extern struct i2c_client *
-i2c_new_dummy(struct i2c_adapter *adap, u16 address);
+struct i2c_client *
+i2c_new_dummy_device(struct i2c_adapter *adapter, u16 address);
-extern struct i2c_client *
-i2c_new_secondary_device(struct i2c_client *client,
- const char *name,
- u16 default_addr);
+struct i2c_client *
+devm_i2c_new_dummy_device(struct device *dev, struct i2c_adapter *adap, u16 address);
-extern void i2c_unregister_device(struct i2c_client *);
+struct i2c_client *
+i2c_new_ancillary_device(struct i2c_client *client,
+ const char *name,
+ u16 default_addr);
+
+void i2c_unregister_device(struct i2c_client *client);
+
+struct i2c_client *i2c_verify_client(struct device *dev);
+#else
+static inline struct i2c_client *i2c_verify_client(struct device *dev)
+{
+ return NULL;
+}
#endif /* I2C */
/* Mainboard arch_initcall() code should register all its I2C devices.
@@ -394,7 +499,7 @@ extern void i2c_unregister_device(struct i2c_client *);
* Modules for add-on boards must use other calls.
*/
#ifdef CONFIG_I2C_BOARDINFO
-extern int
+int
i2c_register_board_info(int busnum, struct i2c_board_info const *info,
unsigned n);
#else
@@ -407,46 +512,73 @@ i2c_register_board_info(int busnum, struct i2c_board_info const *info,
#endif /* I2C_BOARDINFO */
/**
- * struct i2c_algorithm - represent I2C transfer method
- * @master_xfer: Issue a set of i2c transactions to the given I2C adapter
- * defined by the msgs array, with num messages available to transfer via
- * the adapter specified by adap.
- * @smbus_xfer: Issue smbus transactions to the given I2C adapter. If this
+ * struct i2c_algorithm - represent I2C transfer methods
+ * @xfer: Transfer a given number of messages defined by the msgs array via
+ * the specified adapter.
+ * @xfer_atomic: Same as @xfer. Yet, only using atomic context so e.g. PMICs
+ * can be accessed very late before shutdown. Optional.
+ * @smbus_xfer: Issue SMBus transactions to the given I2C adapter. If this
* is not present, then the bus layer will try and convert the SMBus calls
* into I2C transfers instead.
+ * @smbus_xfer_atomic: Same as @smbus_xfer. Yet, only using atomic context
+ * so e.g. PMICs can be accessed very late before shutdown. Optional.
* @functionality: Return the flags that this algorithm/adapter pair supports
- * from the I2C_FUNC_* flags.
- * @reg_slave: Register given client to I2C slave mode of this adapter
- * @unreg_slave: Unregister given client from I2C slave mode of this adapter
+ * from the ``I2C_FUNC_*`` flags.
+ * @reg_target: Register given client to local target mode of this adapter
+ * @unreg_target: Unregister given client from local target mode of this adapter
+ *
+ * @master_xfer: deprecated, use @xfer
+ * @master_xfer_atomic: deprecated, use @xfer_atomic
+ * @reg_slave: deprecated, use @reg_target
+ * @unreg_slave: deprecated, use @unreg_target
*
- * The following structs are for those who like to implement new bus drivers:
* i2c_algorithm is the interface to a class of hardware solutions which can
* be addressed using the same bus algorithms - i.e. bit-banging or the PCF8584
* to name two of the most common.
*
- * The return codes from the @master_xfer field should indicate the type of
- * error code that occurred during the transfer, as documented in the kernel
- * Documentation file Documentation/i2c/fault-codes.
+ * The return codes from the ``xfer{_atomic}`` fields should indicate the
+ * type of error code that occurred during the transfer, as documented in the
+ * Kernel Documentation file Documentation/i2c/fault-codes.rst. Otherwise, the
+ * number of messages executed should be returned.
*/
struct i2c_algorithm {
- /* If an adapter algorithm can't do I2C-level access, set master_xfer
- to NULL. If an adapter algorithm can do SMBus access, set
- smbus_xfer. If set to NULL, the SMBus protocol is simulated
- using common I2C messages */
- /* master_xfer should return the number of messages successfully
- processed, or a negative value on error */
- int (*master_xfer)(struct i2c_adapter *adap, struct i2c_msg *msgs,
- int num);
- int (*smbus_xfer) (struct i2c_adapter *adap, u16 addr,
- unsigned short flags, char read_write,
- u8 command, int size, union i2c_smbus_data *data);
+ /*
+ * If an adapter algorithm can't do I2C-level access, set xfer
+ * to NULL. If an adapter algorithm can do SMBus access, set
+ * smbus_xfer. If set to NULL, the SMBus protocol is simulated
+ * using common I2C messages.
+ */
+ union {
+ int (*xfer)(struct i2c_adapter *adap, struct i2c_msg *msgs,
+ int num);
+ int (*master_xfer)(struct i2c_adapter *adap, struct i2c_msg *msgs,
+ int num);
+ };
+ union {
+ int (*xfer_atomic)(struct i2c_adapter *adap,
+ struct i2c_msg *msgs, int num);
+ int (*master_xfer_atomic)(struct i2c_adapter *adap,
+ struct i2c_msg *msgs, int num);
+ };
+ int (*smbus_xfer)(struct i2c_adapter *adap, u16 addr,
+ unsigned short flags, char read_write,
+ u8 command, int size, union i2c_smbus_data *data);
+ int (*smbus_xfer_atomic)(struct i2c_adapter *adap, u16 addr,
+ unsigned short flags, char read_write,
+ u8 command, int size, union i2c_smbus_data *data);
/* To determine what the adapter supports */
- u32 (*functionality) (struct i2c_adapter *);
+ u32 (*functionality)(struct i2c_adapter *adap);
#if IS_ENABLED(CONFIG_I2C_SLAVE)
- int (*reg_slave)(struct i2c_client *client);
- int (*unreg_slave)(struct i2c_client *client);
+ union {
+ int (*reg_target)(struct i2c_client *client);
+ int (*reg_slave)(struct i2c_client *client);
+ };
+ union {
+ int (*unreg_target)(struct i2c_client *client);
+ int (*unreg_slave)(struct i2c_client *client);
+ };
#endif
};
@@ -459,9 +591,9 @@ struct i2c_algorithm {
* The main operations are wrapped by i2c_lock_bus and i2c_unlock_bus.
*/
struct i2c_lock_operations {
- void (*lock_bus)(struct i2c_adapter *, unsigned int flags);
- int (*trylock_bus)(struct i2c_adapter *, unsigned int flags);
- void (*unlock_bus)(struct i2c_adapter *, unsigned int flags);
+ void (*lock_bus)(struct i2c_adapter *adapter, unsigned int flags);
+ int (*trylock_bus)(struct i2c_adapter *adapter, unsigned int flags);
+ void (*unlock_bus)(struct i2c_adapter *adapter, unsigned int flags);
};
/**
@@ -471,6 +603,11 @@ struct i2c_lock_operations {
* @scl_fall_ns: time SCL signal takes to fall in ns; t(f) in the I2C specification
* @scl_int_delay_ns: time IP core additionally needs to setup SCL in ns
* @sda_fall_ns: time SDA signal takes to fall in ns; t(f) in the I2C specification
+ * @sda_hold_ns: time IP core additionally needs to hold SDA in ns
+ * @digital_filter_width_ns: width in ns of spikes on i2c lines that the IP core
+ * digital filter can filter out
+ * @analog_filter_cutoff_freq_hz: threshold frequency for the low pass IP core
+ * analog filter
*/
struct i2c_timings {
u32 bus_freq_hz;
@@ -478,45 +615,65 @@ struct i2c_timings {
u32 scl_fall_ns;
u32 scl_int_delay_ns;
u32 sda_fall_ns;
+ u32 sda_hold_ns;
+ u32 digital_filter_width_ns;
+ u32 analog_filter_cutoff_freq_hz;
};
/**
* struct i2c_bus_recovery_info - I2C bus recovery information
* @recover_bus: Recover routine. Either pass driver's recover_bus() routine, or
- * i2c_generic_scl_recovery() or i2c_generic_gpio_recovery().
+ * i2c_generic_scl_recovery().
* @get_scl: This gets current value of SCL line. Mandatory for generic SCL
- * recovery. Used internally for generic GPIO recovery.
- * @set_scl: This sets/clears SCL line. Mandatory for generic SCL recovery. Used
- * internally for generic GPIO recovery.
- * @get_sda: This gets current value of SDA line. Optional for generic SCL
- * recovery. Used internally, if sda_gpio is a valid GPIO, for generic GPIO
- * recovery.
+ * recovery. Populated internally for generic GPIO recovery.
+ * @set_scl: This sets/clears the SCL line. Mandatory for generic SCL recovery.
+ * Populated internally for generic GPIO recovery.
+ * @get_sda: This gets current value of SDA line. This or set_sda() is mandatory
+ * for generic SCL recovery. Populated internally, if sda_gpio is a valid
+ * GPIO, for generic GPIO recovery.
+ * @set_sda: This sets/clears the SDA line. This or get_sda() is mandatory for
+ * generic SCL recovery. Populated internally, if sda_gpio is a valid GPIO,
+ * for generic GPIO recovery.
+ * @get_bus_free: Returns the bus free state as seen from the IP core in case it
+ * has a more complex internal logic than just reading SDA. Optional.
* @prepare_recovery: This will be called before starting recovery. Platform may
* configure padmux here for SDA/SCL line or something else they want.
* @unprepare_recovery: This will be called after completing recovery. Platform
* may configure padmux here for SDA/SCL line or something else they want.
- * @scl_gpio: gpio number of the SCL line. Only required for GPIO recovery.
- * @sda_gpio: gpio number of the SDA line. Only required for GPIO recovery.
+ * @scl_gpiod: gpiod of the SCL line. Only required for GPIO recovery.
+ * @sda_gpiod: gpiod of the SDA line. Only required for GPIO recovery.
+ * @pinctrl: pinctrl used by GPIO recovery to change the state of the I2C pins.
+ * Optional.
+ * @pins_default: default pinctrl state of SCL/SDA lines, when they are assigned
+ * to the I2C bus. Optional. Populated internally for GPIO recovery, if
+ * state with the name PINCTRL_STATE_DEFAULT is found and pinctrl is valid.
+ * @pins_gpio: recovery pinctrl state of SCL/SDA lines, when they are used as
+ * GPIOs. Optional. Populated internally for GPIO recovery, if this state
+ * is called "gpio" or "recovery" and pinctrl is valid.
*/
struct i2c_bus_recovery_info {
- int (*recover_bus)(struct i2c_adapter *);
+ int (*recover_bus)(struct i2c_adapter *adap);
- int (*get_scl)(struct i2c_adapter *);
- void (*set_scl)(struct i2c_adapter *, int val);
- int (*get_sda)(struct i2c_adapter *);
+ int (*get_scl)(struct i2c_adapter *adap);
+ void (*set_scl)(struct i2c_adapter *adap, int val);
+ int (*get_sda)(struct i2c_adapter *adap);
+ void (*set_sda)(struct i2c_adapter *adap, int val);
+ int (*get_bus_free)(struct i2c_adapter *adap);
- void (*prepare_recovery)(struct i2c_adapter *);
- void (*unprepare_recovery)(struct i2c_adapter *);
+ void (*prepare_recovery)(struct i2c_adapter *adap);
+ void (*unprepare_recovery)(struct i2c_adapter *adap);
/* gpio recovery */
- int scl_gpio;
- int sda_gpio;
+ struct gpio_desc *scl_gpiod;
+ struct gpio_desc *sda_gpiod;
+ struct pinctrl *pinctrl;
+ struct pinctrl_state *pins_default;
+ struct pinctrl_state *pins_gpio;
};
int i2c_recover_bus(struct i2c_adapter *adap);
/* Generic recovery routines */
-int i2c_generic_gpio_recovery(struct i2c_adapter *adap);
int i2c_generic_scl_recovery(struct i2c_adapter *adap);
/**
@@ -562,6 +719,12 @@ struct i2c_adapter_quirks {
I2C_AQ_COMB_READ_SECOND | I2C_AQ_COMB_SAME_ADDR)
/* clock stretching is not supported */
#define I2C_AQ_NO_CLK_STRETCH BIT(4)
+/* message cannot have length of 0 */
+#define I2C_AQ_NO_ZERO_LEN_READ BIT(5)
+#define I2C_AQ_NO_ZERO_LEN_WRITE BIT(6)
+#define I2C_AQ_NO_ZERO_LEN (I2C_AQ_NO_ZERO_LEN_READ | I2C_AQ_NO_ZERO_LEN_WRITE)
+/* adapter cannot do repeated START */
+#define I2C_AQ_NO_REP_START BIT(7)
/*
* i2c_adapter is the structure used to identify a physical i2c bus along
@@ -581,6 +744,9 @@ struct i2c_adapter {
int timeout; /* in jiffies */
int retries;
struct device dev; /* the adapter device */
+ unsigned long locked_flags; /* owned by the I2C core */
+#define I2C_ALF_IS_SUSPENDED 0
+#define I2C_ALF_SUSPEND_REPORTED 1
int nr;
char name[48];
@@ -593,17 +759,23 @@ struct i2c_adapter {
const struct i2c_adapter_quirks *quirks;
struct irq_domain *host_notify_domain;
+ struct regulator *bus_regulator;
+
+ struct dentry *debugfs;
+
+ /* 7bit address space */
+ DECLARE_BITMAP(addrs_in_instantiation, 1 << 7);
};
#define to_i2c_adapter(d) container_of(d, struct i2c_adapter, dev)
-static inline void *i2c_get_adapdata(const struct i2c_adapter *dev)
+static inline void *i2c_get_adapdata(const struct i2c_adapter *adap)
{
- return dev_get_drvdata(&dev->dev);
+ return dev_get_drvdata(&adap->dev);
}
-static inline void i2c_set_adapdata(struct i2c_adapter *dev, void *data)
+static inline void i2c_set_adapdata(struct i2c_adapter *adap, void *data)
{
- dev_set_drvdata(&dev->dev, data);
+ dev_set_drvdata(&adap->dev, data);
}
static inline struct i2c_adapter *
@@ -619,7 +791,7 @@ i2c_parent_is_i2c_adapter(const struct i2c_adapter *adapter)
return NULL;
}
-int i2c_for_each_dev(void *data, int (*fn)(struct device *, void *));
+int i2c_for_each_dev(void *data, int (*fn)(struct device *dev, void *data));
/* Adapter locking functions, exported for shared pin cases */
#define I2C_LOCK_ROOT_ADAPTER BIT(0)
@@ -663,32 +835,40 @@ i2c_unlock_bus(struct i2c_adapter *adapter, unsigned int flags)
adapter->lock_ops->unlock_bus(adapter, flags);
}
-static inline void
-i2c_lock_adapter(struct i2c_adapter *adapter)
+/**
+ * i2c_mark_adapter_suspended - Report suspended state of the adapter to the core
+ * @adap: Adapter to mark as suspended
+ *
+ * When using this helper to mark an adapter as suspended, the core will reject
+ * further transfers to this adapter. The usage of this helper is optional but
+ * recommended for devices having distinct handlers for system suspend and
+ * runtime suspend. More complex devices are free to implement custom solutions
+ * to reject transfers when suspended.
+ */
+static inline void i2c_mark_adapter_suspended(struct i2c_adapter *adap)
{
- i2c_lock_bus(adapter, I2C_LOCK_ROOT_ADAPTER);
+ i2c_lock_bus(adap, I2C_LOCK_ROOT_ADAPTER);
+ set_bit(I2C_ALF_IS_SUSPENDED, &adap->locked_flags);
+ i2c_unlock_bus(adap, I2C_LOCK_ROOT_ADAPTER);
}
-static inline void
-i2c_unlock_adapter(struct i2c_adapter *adapter)
+/**
+ * i2c_mark_adapter_resumed - Report resumed state of the adapter to the core
+ * @adap: Adapter to mark as resumed
+ *
+ * When using this helper to mark an adapter as resumed, the core will allow
+ * further transfers to this adapter. See also further notes to
+ * @i2c_mark_adapter_suspended().
+ */
+static inline void i2c_mark_adapter_resumed(struct i2c_adapter *adap)
{
- i2c_unlock_bus(adapter, I2C_LOCK_ROOT_ADAPTER);
+ i2c_lock_bus(adap, I2C_LOCK_ROOT_ADAPTER);
+ clear_bit(I2C_ALF_IS_SUSPENDED, &adap->locked_flags);
+ i2c_unlock_bus(adap, I2C_LOCK_ROOT_ADAPTER);
}
-/*flags for the client struct: */
-#define I2C_CLIENT_PEC 0x04 /* Use Packet Error Checking */
-#define I2C_CLIENT_TEN 0x10 /* we have a ten bit chip address */
- /* Must equal I2C_M_TEN below */
-#define I2C_CLIENT_SLAVE 0x20 /* we are the slave */
-#define I2C_CLIENT_HOST_NOTIFY 0x40 /* We want to use I2C host notify */
-#define I2C_CLIENT_WAKE 0x80 /* for board_info; true iff can wake */
-#define I2C_CLIENT_SCCB 0x9000 /* Use Omnivision SCCB protocol */
- /* Must match I2C_M_STOP|IGNORE_NAK */
-
/* i2c adapter classes (bitmask) */
#define I2C_CLASS_HWMON (1<<0) /* lm_sensors, ... */
-#define I2C_CLASS_DDC (1<<3) /* DDC bus on graphics adapters */
-#define I2C_CLASS_SPD (1<<7) /* Memory modules */
/* Warn users that the adapter doesn't support classes anymore */
#define I2C_CLASS_DEPRECATED (1<<8)
@@ -704,29 +884,32 @@ i2c_unlock_adapter(struct i2c_adapter *adapter)
/* administration...
*/
-#if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE)
-extern int i2c_add_adapter(struct i2c_adapter *);
-extern void i2c_del_adapter(struct i2c_adapter *);
-extern int i2c_add_numbered_adapter(struct i2c_adapter *);
+#if IS_ENABLED(CONFIG_I2C)
+int i2c_add_adapter(struct i2c_adapter *adap);
+int devm_i2c_add_adapter(struct device *dev, struct i2c_adapter *adapter);
+void i2c_del_adapter(struct i2c_adapter *adap);
+int i2c_add_numbered_adapter(struct i2c_adapter *adap);
-extern int i2c_register_driver(struct module *, struct i2c_driver *);
-extern void i2c_del_driver(struct i2c_driver *);
+int i2c_register_driver(struct module *owner, struct i2c_driver *driver);
+void i2c_del_driver(struct i2c_driver *driver);
/* use a define to avoid include chaining to get THIS_MODULE */
#define i2c_add_driver(driver) \
i2c_register_driver(THIS_MODULE, driver)
-extern struct i2c_client *i2c_use_client(struct i2c_client *client);
-extern void i2c_release_client(struct i2c_client *client);
+static inline bool i2c_client_has_driver(struct i2c_client *client)
+{
+ return !IS_ERR_OR_NULL(client) && client->dev.driver;
+}
/* call the i2c_client->command() of all attached clients with
* the given arguments */
-extern void i2c_clients_command(struct i2c_adapter *adap,
- unsigned int cmd, void *arg);
+void i2c_clients_command(struct i2c_adapter *adap,
+ unsigned int cmd, void *arg);
-extern struct i2c_adapter *i2c_get_adapter(int nr);
-extern void i2c_put_adapter(struct i2c_adapter *adap);
-extern unsigned int i2c_adapter_depth(struct i2c_adapter *adapter);
+struct i2c_adapter *i2c_get_adapter(int nr);
+void i2c_put_adapter(struct i2c_adapter *adap);
+unsigned int i2c_adapter_depth(struct i2c_adapter *adapter);
void i2c_parse_fw_timings(struct device *dev, struct i2c_timings *t, bool use_defaults);
@@ -764,9 +947,27 @@ static inline int i2c_adapter_id(struct i2c_adapter *adap)
static inline u8 i2c_8bit_addr_from_msg(const struct i2c_msg *msg)
{
- return (msg->addr << 1) | (msg->flags & I2C_M_RD ? 1 : 0);
+ return (msg->addr << 1) | (msg->flags & I2C_M_RD);
+}
+
+/*
+ * 10-bit address
+ * addr_1: 5'b11110 | addr[9:8] | (R/nW)
+ * addr_2: addr[7:0]
+ */
+static inline u8 i2c_10bit_addr_hi_from_msg(const struct i2c_msg *msg)
+{
+ return 0xf0 | ((msg->addr & GENMASK(9, 8)) >> 7) | (msg->flags & I2C_M_RD);
}
+static inline u8 i2c_10bit_addr_lo_from_msg(const struct i2c_msg *msg)
+{
+ return msg->addr & GENMASK(7, 0);
+}
+
+u8 *i2c_get_dma_safe_msg_buf(struct i2c_msg *msg, unsigned int threshold);
+void i2c_put_dma_safe_msg_buf(u8 *buf, struct i2c_msg *msg, bool xferred);
+
int i2c_handle_smbus_host_notify(struct i2c_adapter *adap, unsigned short addr);
/**
* module_i2c_driver() - Helper macro for registering a modular I2C driver
@@ -791,21 +992,58 @@ int i2c_handle_smbus_host_notify(struct i2c_adapter *adap, unsigned short addr);
#define builtin_i2c_driver(__i2c_driver) \
builtin_driver(__i2c_driver, i2c_add_driver)
-#endif /* I2C */
+/* must call put_device() when done with returned i2c_client device */
+struct i2c_client *i2c_find_device_by_fwnode(struct fwnode_handle *fwnode);
+
+/* must call put_device() when done with returned i2c_adapter device */
+struct i2c_adapter *i2c_find_adapter_by_fwnode(struct fwnode_handle *fwnode);
+
+/* must call i2c_put_adapter() when done with returned i2c_adapter device */
+struct i2c_adapter *i2c_get_adapter_by_fwnode(struct fwnode_handle *fwnode);
+
+#else /* I2C */
+
+static inline struct i2c_client *
+i2c_find_device_by_fwnode(struct fwnode_handle *fwnode)
+{
+ return NULL;
+}
+
+static inline struct i2c_adapter *
+i2c_find_adapter_by_fwnode(struct fwnode_handle *fwnode)
+{
+ return NULL;
+}
+
+static inline struct i2c_adapter *
+i2c_get_adapter_by_fwnode(struct fwnode_handle *fwnode)
+{
+ return NULL;
+}
+
+#endif /* !I2C */
#if IS_ENABLED(CONFIG_OF)
/* must call put_device() when done with returned i2c_client device */
-extern struct i2c_client *of_find_i2c_device_by_node(struct device_node *node);
+static inline struct i2c_client *of_find_i2c_device_by_node(struct device_node *node)
+{
+ return i2c_find_device_by_fwnode(of_fwnode_handle(node));
+}
/* must call put_device() when done with returned i2c_adapter device */
-extern struct i2c_adapter *of_find_i2c_adapter_by_node(struct device_node *node);
+static inline struct i2c_adapter *of_find_i2c_adapter_by_node(struct device_node *node)
+{
+ return i2c_find_adapter_by_fwnode(of_fwnode_handle(node));
+}
/* must call i2c_put_adapter() when done with returned i2c_adapter device */
-struct i2c_adapter *of_get_i2c_adapter_by_node(struct device_node *node);
+static inline struct i2c_adapter *of_get_i2c_adapter_by_node(struct device_node *node)
+{
+ return i2c_get_adapter_by_fwnode(of_fwnode_handle(node));
+}
-extern const struct of_device_id
-*i2c_of_match_device(const struct of_device_id *matches,
- struct i2c_client *client);
+int of_i2c_get_board_info(struct device *dev, struct device_node *node,
+ struct i2c_board_info *info);
#else
@@ -824,29 +1062,63 @@ static inline struct i2c_adapter *of_get_i2c_adapter_by_node(struct device_node
return NULL;
}
-static inline const struct of_device_id
-*i2c_of_match_device(const struct of_device_id *matches,
- struct i2c_client *client)
+static inline int of_i2c_get_board_info(struct device *dev,
+ struct device_node *node,
+ struct i2c_board_info *info)
{
- return NULL;
+ return -ENOTSUPP;
}
#endif /* CONFIG_OF */
-#if IS_ENABLED(CONFIG_ACPI)
+struct acpi_resource;
+struct acpi_resource_i2c_serialbus;
+
+#if IS_REACHABLE(CONFIG_ACPI) && IS_REACHABLE(CONFIG_I2C)
+bool i2c_acpi_get_i2c_resource(struct acpi_resource *ares,
+ struct acpi_resource_i2c_serialbus **i2c);
+int i2c_acpi_client_count(struct acpi_device *adev);
u32 i2c_acpi_find_bus_speed(struct device *dev);
-struct i2c_client *i2c_acpi_new_device(struct device *dev, int index,
- struct i2c_board_info *info);
+struct i2c_client *i2c_acpi_new_device_by_fwnode(struct fwnode_handle *fwnode,
+ int index,
+ struct i2c_board_info *info);
+struct i2c_adapter *i2c_acpi_find_adapter_by_handle(acpi_handle handle);
+bool i2c_acpi_waive_d0_probe(struct device *dev);
#else
+static inline bool i2c_acpi_get_i2c_resource(struct acpi_resource *ares,
+ struct acpi_resource_i2c_serialbus **i2c)
+{
+ return false;
+}
+static inline int i2c_acpi_client_count(struct acpi_device *adev)
+{
+ return 0;
+}
static inline u32 i2c_acpi_find_bus_speed(struct device *dev)
{
return 0;
}
-static inline struct i2c_client *i2c_acpi_new_device(struct device *dev,
- int index, struct i2c_board_info *info)
+static inline struct i2c_client *i2c_acpi_new_device_by_fwnode(
+ struct fwnode_handle *fwnode, int index,
+ struct i2c_board_info *info)
+{
+ return ERR_PTR(-ENODEV);
+}
+static inline struct i2c_adapter *i2c_acpi_find_adapter_by_handle(acpi_handle handle)
{
return NULL;
}
+static inline bool i2c_acpi_waive_d0_probe(struct device *dev)
+{
+ return false;
+}
#endif /* CONFIG_ACPI */
+static inline struct i2c_client *i2c_acpi_new_device(struct device *dev,
+ int index,
+ struct i2c_board_info *info)
+{
+ return i2c_acpi_new_device_by_fwnode(dev_fwnode(dev), index, info);
+}
+
#endif /* _LINUX_I2C_H */
diff --git a/include/linux/i2c/bfin_twi.h b/include/linux/i2c/bfin_twi.h
deleted file mode 100644
index 135a4e0876ae..000000000000
--- a/include/linux/i2c/bfin_twi.h
+++ /dev/null
@@ -1,145 +0,0 @@
-/*
- * i2c-bfin-twi.h - interface to ADI TWI controller
- *
- * Copyright 2005-2014 Analog Devices Inc.
- *
- * Licensed under the GPL-2 or later.
- */
-
-#ifndef __I2C_BFIN_TWI_H__
-#define __I2C_BFIN_TWI_H__
-
-#include <linux/types.h>
-#include <linux/i2c.h>
-
-/*
- * ADI twi registers layout
- */
-struct bfin_twi_regs {
- u16 clkdiv;
- u16 dummy1;
- u16 control;
- u16 dummy2;
- u16 slave_ctl;
- u16 dummy3;
- u16 slave_stat;
- u16 dummy4;
- u16 slave_addr;
- u16 dummy5;
- u16 master_ctl;
- u16 dummy6;
- u16 master_stat;
- u16 dummy7;
- u16 master_addr;
- u16 dummy8;
- u16 int_stat;
- u16 dummy9;
- u16 int_mask;
- u16 dummy10;
- u16 fifo_ctl;
- u16 dummy11;
- u16 fifo_stat;
- u16 dummy12;
- u32 __pad[20];
- u16 xmt_data8;
- u16 dummy13;
- u16 xmt_data16;
- u16 dummy14;
- u16 rcv_data8;
- u16 dummy15;
- u16 rcv_data16;
- u16 dummy16;
-};
-
-struct bfin_twi_iface {
- int irq;
- spinlock_t lock;
- char read_write;
- u8 command;
- u8 *transPtr;
- int readNum;
- int writeNum;
- int cur_mode;
- int manual_stop;
- int result;
- struct i2c_adapter adap;
- struct completion complete;
- struct i2c_msg *pmsg;
- int msg_num;
- int cur_msg;
- u16 saved_clkdiv;
- u16 saved_control;
- struct bfin_twi_regs __iomem *regs_base;
-};
-
-/* ******************** TWO-WIRE INTERFACE (TWI) MASKS ********************/
-/* TWI_CLKDIV Macros (Use: *pTWI_CLKDIV = CLKLOW(x)|CLKHI(y); ) */
-#define CLKLOW(x) ((x) & 0xFF) /* Periods Clock Is Held Low */
-#define CLKHI(y) (((y)&0xFF)<<0x8) /* Periods Before New Clock Low */
-
-/* TWI_PRESCALE Masks */
-#define PRESCALE 0x007F /* SCLKs Per Internal Time Reference (10MHz) */
-#define TWI_ENA 0x0080 /* TWI Enable */
-#define SCCB 0x0200 /* SCCB Compatibility Enable */
-
-/* TWI_SLAVE_CTL Masks */
-#define SEN 0x0001 /* Slave Enable */
-#define SADD_LEN 0x0002 /* Slave Address Length */
-#define STDVAL 0x0004 /* Slave Transmit Data Valid */
-#define NAK 0x0008 /* NAK Generated At Conclusion Of Transfer */
-#define GEN 0x0010 /* General Call Address Matching Enabled */
-
-/* TWI_SLAVE_STAT Masks */
-#define SDIR 0x0001 /* Slave Transfer Direction (RX/TX*) */
-#define GCALL 0x0002 /* General Call Indicator */
-
-/* TWI_MASTER_CTL Masks */
-#define MEN 0x0001 /* Master Mode Enable */
-#define MADD_LEN 0x0002 /* Master Address Length */
-#define MDIR 0x0004 /* Master Transmit Direction (RX/TX*) */
-#define FAST 0x0008 /* Use Fast Mode Timing Specs */
-#define STOP 0x0010 /* Issue Stop Condition */
-#define RSTART 0x0020 /* Repeat Start or Stop* At End Of Transfer */
-#define DCNT 0x3FC0 /* Data Bytes To Transfer */
-#define SDAOVR 0x4000 /* Serial Data Override */
-#define SCLOVR 0x8000 /* Serial Clock Override */
-
-/* TWI_MASTER_STAT Masks */
-#define MPROG 0x0001 /* Master Transfer In Progress */
-#define LOSTARB 0x0002 /* Lost Arbitration Indicator (Xfer Aborted) */
-#define ANAK 0x0004 /* Address Not Acknowledged */
-#define DNAK 0x0008 /* Data Not Acknowledged */
-#define BUFRDERR 0x0010 /* Buffer Read Error */
-#define BUFWRERR 0x0020 /* Buffer Write Error */
-#define SDASEN 0x0040 /* Serial Data Sense */
-#define SCLSEN 0x0080 /* Serial Clock Sense */
-#define BUSBUSY 0x0100 /* Bus Busy Indicator */
-
-/* TWI_INT_SRC and TWI_INT_ENABLE Masks */
-#define SINIT 0x0001 /* Slave Transfer Initiated */
-#define SCOMP 0x0002 /* Slave Transfer Complete */
-#define SERR 0x0004 /* Slave Transfer Error */
-#define SOVF 0x0008 /* Slave Overflow */
-#define MCOMP 0x0010 /* Master Transfer Complete */
-#define MERR 0x0020 /* Master Transfer Error */
-#define XMTSERV 0x0040 /* Transmit FIFO Service */
-#define RCVSERV 0x0080 /* Receive FIFO Service */
-
-/* TWI_FIFO_CTRL Masks */
-#define XMTFLUSH 0x0001 /* Transmit Buffer Flush */
-#define RCVFLUSH 0x0002 /* Receive Buffer Flush */
-#define XMTINTLEN 0x0004 /* Transmit Buffer Interrupt Length */
-#define RCVINTLEN 0x0008 /* Receive Buffer Interrupt Length */
-
-/* TWI_FIFO_STAT Masks */
-#define XMTSTAT 0x0003 /* Transmit FIFO Status */
-#define XMT_EMPTY 0x0000 /* Transmit FIFO Empty */
-#define XMT_HALF 0x0001 /* Transmit FIFO Has 1 Byte To Write */
-#define XMT_FULL 0x0003 /* Transmit FIFO Full (2 Bytes To Write) */
-
-#define RCVSTAT 0x000C /* Receive FIFO Status */
-#define RCV_EMPTY 0x0000 /* Receive FIFO Empty */
-#define RCV_HALF 0x0004 /* Receive FIFO Has 1 Byte To Read */
-#define RCV_FULL 0x000C /* Receive FIFO Full (2 Bytes To Read) */
-
-#endif
diff --git a/include/linux/i2c/dm355evm_msp.h b/include/linux/i2c/dm355evm_msp.h
deleted file mode 100644
index 372470350fab..000000000000
--- a/include/linux/i2c/dm355evm_msp.h
+++ /dev/null
@@ -1,79 +0,0 @@
-/*
- * dm355evm_msp.h - support MSP430 microcontroller on DM355EVM board
- */
-#ifndef __LINUX_I2C_DM355EVM_MSP
-#define __LINUX_I2C_DM355EVM_MSP
-
-/*
- * Written against Spectrum's writeup for the A4 firmware revision,
- * and tweaked to match source and rev D2 schematics by removing CPLD
- * and NOR flash hooks (which were last appropriate in rev B boards).
- *
- * Note that the firmware supports a flavor of write posting ... to be
- * sure a write completes, issue another read or write.
- */
-
-/* utilities to access "registers" emulated by msp430 firmware */
-extern int dm355evm_msp_write(u8 value, u8 reg);
-extern int dm355evm_msp_read(u8 reg);
-
-
-/* command/control registers */
-#define DM355EVM_MSP_COMMAND 0x00
-# define MSP_COMMAND_NULL 0
-# define MSP_COMMAND_RESET_COLD 1
-# define MSP_COMMAND_RESET_WARM 2
-# define MSP_COMMAND_RESET_WARM_I 3
-# define MSP_COMMAND_POWEROFF 4
-# define MSP_COMMAND_IR_REINIT 5
-#define DM355EVM_MSP_STATUS 0x01
-# define MSP_STATUS_BAD_OFFSET BIT(0)
-# define MSP_STATUS_BAD_COMMAND BIT(1)
-# define MSP_STATUS_POWER_ERROR BIT(2)
-# define MSP_STATUS_RXBUF_OVERRUN BIT(3)
-#define DM355EVM_MSP_RESET 0x02 /* 0 bits == in reset */
-# define MSP_RESET_DC5 BIT(0)
-# define MSP_RESET_TVP5154 BIT(2)
-# define MSP_RESET_IMAGER BIT(3)
-# define MSP_RESET_ETHERNET BIT(4)
-# define MSP_RESET_SYS BIT(5)
-# define MSP_RESET_AIC33 BIT(7)
-
-/* GPIO registers ... bit patterns mostly match the source MSP ports */
-#define DM355EVM_MSP_LED 0x03 /* active low (MSP P4) */
-#define DM355EVM_MSP_SWITCH1 0x04 /* (MSP P5, masked) */
-# define MSP_SWITCH1_SW6_1 BIT(0)
-# define MSP_SWITCH1_SW6_2 BIT(1)
-# define MSP_SWITCH1_SW6_3 BIT(2)
-# define MSP_SWITCH1_SW6_4 BIT(3)
-# define MSP_SWITCH1_J1 BIT(4) /* NTSC/PAL */
-# define MSP_SWITCH1_MSP_INT BIT(5) /* active low */
-#define DM355EVM_MSP_SWITCH2 0x05 /* (MSP P6, masked) */
-# define MSP_SWITCH2_SW10 BIT(3)
-# define MSP_SWITCH2_SW11 BIT(4)
-# define MSP_SWITCH2_SW12 BIT(5)
-# define MSP_SWITCH2_SW13 BIT(6)
-# define MSP_SWITCH2_SW14 BIT(7)
-#define DM355EVM_MSP_SDMMC 0x06 /* (MSP P2, masked) */
-# define MSP_SDMMC_0_WP BIT(1)
-# define MSP_SDMMC_0_CD BIT(2) /* active low */
-# define MSP_SDMMC_1_WP BIT(3)
-# define MSP_SDMMC_1_CD BIT(4) /* active low */
-#define DM355EVM_MSP_FIRMREV 0x07 /* not a GPIO (out of order) */
-#define DM355EVM_MSP_VIDEO_IN 0x08 /* (MSP P3, masked) */
-# define MSP_VIDEO_IMAGER BIT(7) /* low == tvp5146 */
-
-/* power supply registers are currently omitted */
-
-/* RTC registers */
-#define DM355EVM_MSP_RTC_0 0x12 /* LSB */
-#define DM355EVM_MSP_RTC_1 0x13
-#define DM355EVM_MSP_RTC_2 0x14
-#define DM355EVM_MSP_RTC_3 0x15 /* MSB */
-
-/* input event queue registers; code == ((HIGH << 8) | LOW) */
-#define DM355EVM_MSP_INPUT_COUNT 0x16 /* decrement by reading LOW */
-#define DM355EVM_MSP_INPUT_HIGH 0x17
-#define DM355EVM_MSP_INPUT_LOW 0x18
-
-#endif /* __LINUX_I2C_DM355EVM_MSP */
diff --git a/include/linux/i2c/mlxcpld.h b/include/linux/i2c/mlxcpld.h
deleted file mode 100644
index b08dcb183fca..000000000000
--- a/include/linux/i2c/mlxcpld.h
+++ /dev/null
@@ -1,52 +0,0 @@
-/*
- * mlxcpld.h - Mellanox I2C multiplexer support in CPLD
- *
- * Copyright (c) 2016 Mellanox Technologies. All rights reserved.
- * Copyright (c) 2016 Michael Shych <michaels@mellanox.com>
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. Neither the names of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef _LINUX_I2C_MLXCPLD_H
-#define _LINUX_I2C_MLXCPLD_H
-
-/* Platform data for the CPLD I2C multiplexers */
-
-/* mlxcpld_mux_plat_data - per mux data, used with i2c_register_board_info
- * @adap_ids - adapter array
- * @num_adaps - number of adapters
- * @sel_reg_addr - mux select register offset in CPLD space
- */
-struct mlxcpld_mux_plat_data {
- int *adap_ids;
- int num_adaps;
- int sel_reg_addr;
-};
-
-#endif /* _LINUX_I2C_MLXCPLD_H */
diff --git a/include/linux/i2c/pca954x.h b/include/linux/i2c/pca954x.h
deleted file mode 100644
index 1712677d5904..000000000000
--- a/include/linux/i2c/pca954x.h
+++ /dev/null
@@ -1,48 +0,0 @@
-/*
- *
- * pca954x.h - I2C multiplexer/switch support
- *
- * Copyright (c) 2008-2009 Rodolfo Giometti <giometti@linux.it>
- * Copyright (c) 2008-2009 Eurotech S.p.A. <info@eurotech.it>
- * Michael Lawnick <michael.lawnick.ext@nsn.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- */
-
-
-#ifndef _LINUX_I2C_PCA954X_H
-#define _LINUX_I2C_PCA954X_H
-
-/* Platform data for the PCA954x I2C multiplexers */
-
-/* Per channel initialisation data:
- * @adap_id: bus number for the adapter. 0 = don't care
- * @deselect_on_exit: set this entry to 1, if your H/W needs deselection
- * of this channel after transaction.
- *
- */
-struct pca954x_platform_mode {
- int adap_id;
- unsigned int deselect_on_exit:1;
- unsigned int class;
-};
-
-/* Per mux/switch data, used with i2c_register_board_info */
-struct pca954x_platform_data {
- struct pca954x_platform_mode *modes;
- int num_modes;
-};
-
-#endif /* _LINUX_I2C_PCA954X_H */
diff --git a/include/linux/i2c/pxa-i2c.h b/include/linux/i2c/pxa-i2c.h
deleted file mode 100644
index 53aab243cbd8..000000000000
--- a/include/linux/i2c/pxa-i2c.h
+++ /dev/null
@@ -1,85 +0,0 @@
-/*
- * i2c_pxa.h
- *
- * Copyright (C) 2002 Intrinsyc Software Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- */
-#ifndef _I2C_PXA_H_
-#define _I2C_PXA_H_
-
-#if 0
-#define DEF_TIMEOUT 3
-#else
-/* need a longer timeout if we're dealing with the fact we may well be
- * looking at a multi-master environment
-*/
-#define DEF_TIMEOUT 32
-#endif
-
-#define BUS_ERROR (-EREMOTEIO)
-#define XFER_NAKED (-ECONNREFUSED)
-#define I2C_RETRY (-2000) /* an error has occurred retry transmit */
-
-/* ICR initialize bit values
-*
-* 15. FM 0 (100 Khz operation)
-* 14. UR 0 (No unit reset)
-* 13. SADIE 0 (Disables the unit from interrupting on slave addresses
-* matching its slave address)
-* 12. ALDIE 0 (Disables the unit from interrupt when it loses arbitration
-* in master mode)
-* 11. SSDIE 0 (Disables interrupts from a slave stop detected, in slave mode)
-* 10. BEIE 1 (Enable interrupts from detected bus errors, no ACK sent)
-* 9. IRFIE 1 (Enable interrupts from full buffer received)
-* 8. ITEIE 1 (Enables the I2C unit to interrupt when transmit buffer empty)
-* 7. GCD 1 (Disables i2c unit response to general call messages as a slave)
-* 6. IUE 0 (Disable unit until we change settings)
-* 5. SCLE 1 (Enables the i2c clock output for master mode (drives SCL)
-* 4. MA 0 (Only send stop with the ICR stop bit)
-* 3. TB 0 (We are not transmitting a byte initially)
-* 2. ACKNAK 0 (Send an ACK after the unit receives a byte)
-* 1. STOP 0 (Do not send a STOP)
-* 0. START 0 (Do not send a START)
-*
-*/
-#define I2C_ICR_INIT (ICR_BEIE | ICR_IRFIE | ICR_ITEIE | ICR_GCD | ICR_SCLE)
-
-/* I2C status register init values
- *
- * 10. BED 1 (Clear bus error detected)
- * 9. SAD 1 (Clear slave address detected)
- * 7. IRF 1 (Clear IDBR Receive Full)
- * 6. ITE 1 (Clear IDBR Transmit Empty)
- * 5. ALD 1 (Clear Arbitration Loss Detected)
- * 4. SSD 1 (Clear Slave Stop Detected)
- */
-#define I2C_ISR_INIT 0x7FF /* status register init */
-
-struct i2c_slave_client;
-
-struct i2c_pxa_platform_data {
- unsigned int slave_addr;
- struct i2c_slave_client *slave;
- unsigned int class;
- unsigned int use_pio :1;
- unsigned int fast_mode :1;
- unsigned int high_mode:1;
- unsigned char master_code;
- unsigned long rate;
-};
-
-extern void pxa_set_i2c_info(struct i2c_pxa_platform_data *info);
-
-#ifdef CONFIG_PXA27x
-extern void pxa27x_set_i2c_power_info(struct i2c_pxa_platform_data *info);
-#endif
-
-#ifdef CONFIG_PXA3xx
-extern void pxa3xx_set_i2c_power_info(struct i2c_pxa_platform_data *info);
-#endif
-
-#endif
diff --git a/include/linux/i2c/tc35876x.h b/include/linux/i2c/tc35876x.h
deleted file mode 100644
index cd6a51c71e7e..000000000000
--- a/include/linux/i2c/tc35876x.h
+++ /dev/null
@@ -1,11 +0,0 @@
-
-#ifndef _TC35876X_H
-#define _TC35876X_H
-
-struct tc35876x_platform_data {
- int gpio_bridge_reset;
- int gpio_panel_bl_en;
- int gpio_panel_vadd;
-};
-
-#endif /* _TC35876X_H */
diff --git a/include/linux/i3c/ccc.h b/include/linux/i3c/ccc.h
new file mode 100644
index 000000000000..ad59a4ae60d1
--- /dev/null
+++ b/include/linux/i3c/ccc.h
@@ -0,0 +1,385 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2018 Cadence Design Systems Inc.
+ *
+ * Author: Boris Brezillon <boris.brezillon@bootlin.com>
+ */
+
+#ifndef I3C_CCC_H
+#define I3C_CCC_H
+
+#include <linux/bitops.h>
+#include <linux/i3c/device.h>
+
+/* I3C CCC (Common Command Codes) related definitions */
+#define I3C_CCC_DIRECT BIT(7)
+
+#define I3C_CCC_ID(id, broadcast) \
+ ((id) | ((broadcast) ? 0 : I3C_CCC_DIRECT))
+
+/* Commands valid in both broadcast and unicast modes */
+#define I3C_CCC_ENEC(broadcast) I3C_CCC_ID(0x0, broadcast)
+#define I3C_CCC_DISEC(broadcast) I3C_CCC_ID(0x1, broadcast)
+#define I3C_CCC_ENTAS(as, broadcast) I3C_CCC_ID(0x2 + (as), broadcast)
+#define I3C_CCC_RSTDAA(broadcast) I3C_CCC_ID(0x6, broadcast)
+#define I3C_CCC_SETMWL(broadcast) I3C_CCC_ID(0x9, broadcast)
+#define I3C_CCC_SETMRL(broadcast) I3C_CCC_ID(0xa, broadcast)
+#define I3C_CCC_SETXTIME(broadcast) ((broadcast) ? 0x28 : 0x98)
+#define I3C_CCC_VENDOR(id, broadcast) ((id) + ((broadcast) ? 0x61 : 0xe0))
+
+/* Broadcast-only commands */
+#define I3C_CCC_ENTDAA I3C_CCC_ID(0x7, true)
+#define I3C_CCC_DEFSLVS I3C_CCC_ID(0x8, true)
+#define I3C_CCC_ENTTM I3C_CCC_ID(0xb, true)
+#define I3C_CCC_ENTHDR(x) I3C_CCC_ID(0x20 + (x), true)
+
+/* Unicast-only commands */
+#define I3C_CCC_SETDASA I3C_CCC_ID(0x7, false)
+#define I3C_CCC_SETNEWDA I3C_CCC_ID(0x8, false)
+#define I3C_CCC_GETMWL I3C_CCC_ID(0xb, false)
+#define I3C_CCC_GETMRL I3C_CCC_ID(0xc, false)
+#define I3C_CCC_GETPID I3C_CCC_ID(0xd, false)
+#define I3C_CCC_GETBCR I3C_CCC_ID(0xe, false)
+#define I3C_CCC_GETDCR I3C_CCC_ID(0xf, false)
+#define I3C_CCC_GETSTATUS I3C_CCC_ID(0x10, false)
+#define I3C_CCC_GETACCMST I3C_CCC_ID(0x11, false)
+#define I3C_CCC_SETBRGTGT I3C_CCC_ID(0x13, false)
+#define I3C_CCC_GETMXDS I3C_CCC_ID(0x14, false)
+#define I3C_CCC_GETHDRCAP I3C_CCC_ID(0x15, false)
+#define I3C_CCC_GETXTIME I3C_CCC_ID(0x19, false)
+
+#define I3C_CCC_EVENT_SIR BIT(0)
+#define I3C_CCC_EVENT_MR BIT(1)
+#define I3C_CCC_EVENT_HJ BIT(3)
+
+/**
+ * struct i3c_ccc_events - payload passed to ENEC/DISEC CCC
+ *
+ * @events: bitmask of I3C_CCC_EVENT_xxx events.
+ *
+ * Depending on the CCC command, the specific events coming from all devices
+ * (broadcast version) or a specific device (unicast version) will be
+ * enabled (ENEC) or disabled (DISEC).
+ */
+struct i3c_ccc_events {
+ u8 events;
+};
+
+/**
+ * struct i3c_ccc_mwl - payload passed to SETMWL/GETMWL CCC
+ *
+ * @len: maximum write length in bytes
+ *
+ * The maximum write length is only applicable to SDR private messages or
+ * extended Write CCCs (like SETXTIME).
+ */
+struct i3c_ccc_mwl {
+ __be16 len;
+};
+
+/**
+ * struct i3c_ccc_mrl - payload passed to SETMRL/GETMRL CCC
+ *
+ * @len: maximum read length in bytes
+ * @ibi_len: maximum IBI payload length
+ *
+ * The maximum read length is only applicable to SDR private messages or
+ * extended Read CCCs (like GETXTIME).
+ * The IBI length is only valid if the I3C slave is IBI capable
+ * (%I3C_BCR_IBI_REQ_CAP is set).
+ */
+struct i3c_ccc_mrl {
+ __be16 read_len;
+ u8 ibi_len;
+} __packed;
+
+/**
+ * struct i3c_ccc_dev_desc - I3C/I2C device descriptor used for DEFSLVS
+ *
+ * @dyn_addr: dynamic address assigned to the I3C slave or 0 if the entry is
+ * describing an I2C slave.
+ * @dcr: DCR value (not applicable to entries describing I2C devices)
+ * @lvr: LVR value (not applicable to entries describing I3C devices)
+ * @bcr: BCR value or 0 if this entry is describing an I2C slave
+ * @static_addr: static address or 0 if the device does not have a static
+ * address
+ *
+ * The DEFSLVS command should be passed an array of i3c_ccc_dev_desc
+ * descriptors (one entry per I3C/I2C dev controlled by the master).
+ */
+struct i3c_ccc_dev_desc {
+ u8 dyn_addr;
+ union {
+ u8 dcr;
+ u8 lvr;
+ };
+ u8 bcr;
+ u8 static_addr;
+};
+
+/**
+ * struct i3c_ccc_defslvs - payload passed to DEFSLVS CCC
+ *
+ * @count: number of dev descriptors
+ * @master: descriptor describing the current master
+ * @slaves: array of descriptors describing slaves controlled by the
+ * current master
+ *
+ * Information passed to the broadcast DEFSLVS to propagate device
+ * information to all masters currently acting as slaves on the bus.
+ * This is only meaningful if you have more than one master.
+ */
+struct i3c_ccc_defslvs {
+ u8 count;
+ struct i3c_ccc_dev_desc master;
+ struct i3c_ccc_dev_desc slaves[];
+} __packed;
+
+/**
+ * enum i3c_ccc_test_mode - enum listing all available test modes
+ *
+ * @I3C_CCC_EXIT_TEST_MODE: exit test mode
+ * @I3C_CCC_VENDOR_TEST_MODE: enter vendor test mode
+ */
+enum i3c_ccc_test_mode {
+ I3C_CCC_EXIT_TEST_MODE,
+ I3C_CCC_VENDOR_TEST_MODE,
+};
+
+/**
+ * struct i3c_ccc_enttm - payload passed to ENTTM CCC
+ *
+ * @mode: one of the &enum i3c_ccc_test_mode modes
+ *
+ * Information passed to the ENTTM CCC to instruct an I3C device to enter a
+ * specific test mode.
+ */
+struct i3c_ccc_enttm {
+ u8 mode;
+};
+
+/**
+ * struct i3c_ccc_setda - payload passed to SETNEWDA and SETDASA CCCs
+ *
+ * @addr: dynamic address to assign to an I3C device
+ *
+ * Information passed to the SETNEWDA and SETDASA CCCs to assign/change the
+ * dynamic address of an I3C device.
+ */
+struct i3c_ccc_setda {
+ u8 addr;
+};
+
+/**
+ * struct i3c_ccc_getpid - payload passed to GETPID CCC
+ *
+ * @pid: 48 bits PID in big endian
+ */
+struct i3c_ccc_getpid {
+ u8 pid[6];
+};
+
+/**
+ * struct i3c_ccc_getbcr - payload passed to GETBCR CCC
+ *
+ * @bcr: BCR (Bus Characteristic Register) value
+ */
+struct i3c_ccc_getbcr {
+ u8 bcr;
+};
+
+/**
+ * struct i3c_ccc_getdcr - payload passed to GETDCR CCC
+ *
+ * @dcr: DCR (Device Characteristic Register) value
+ */
+struct i3c_ccc_getdcr {
+ u8 dcr;
+};
+
+#define I3C_CCC_STATUS_PENDING_INT(status) ((status) & GENMASK(3, 0))
+#define I3C_CCC_STATUS_PROTOCOL_ERROR BIT(5)
+#define I3C_CCC_STATUS_ACTIVITY_MODE(status) \
+ (((status) & GENMASK(7, 6)) >> 6)
+
+/**
+ * struct i3c_ccc_getstatus - payload passed to GETSTATUS CCC
+ *
+ * @status: status of the I3C slave (see I3C_CCC_STATUS_xxx macros for more
+ * information).
+ */
+struct i3c_ccc_getstatus {
+ __be16 status;
+};
+
+/**
+ * struct i3c_ccc_getaccmst - payload passed to GETACCMST CCC
+ *
+ * @newmaster: address of the master taking bus ownership
+ */
+struct i3c_ccc_getaccmst {
+ u8 newmaster;
+};
+
+/**
+ * struct i3c_ccc_bridged_slave_desc - bridged slave descriptor
+ *
+ * @addr: dynamic address of the bridged device
+ * @id: ID of the slave device behind the bridge
+ */
+struct i3c_ccc_bridged_slave_desc {
+ u8 addr;
+ __be16 id;
+} __packed;
+
+/**
+ * struct i3c_ccc_setbrgtgt - payload passed to SETBRGTGT CCC
+ *
+ * @count: number of bridged slaves
+ * @bslaves: bridged slave descriptors
+ */
+struct i3c_ccc_setbrgtgt {
+ u8 count;
+ struct i3c_ccc_bridged_slave_desc bslaves[];
+} __packed;
+
+/**
+ * enum i3c_sdr_max_data_rate - max data rate values for private SDR transfers
+ */
+enum i3c_sdr_max_data_rate {
+ I3C_SDR0_FSCL_MAX,
+ I3C_SDR1_FSCL_8MHZ,
+ I3C_SDR2_FSCL_6MHZ,
+ I3C_SDR3_FSCL_4MHZ,
+ I3C_SDR4_FSCL_2MHZ,
+};
+
+/**
+ * enum i3c_tsco - clock to data turn-around
+ */
+enum i3c_tsco {
+ I3C_TSCO_8NS,
+ I3C_TSCO_9NS,
+ I3C_TSCO_10NS,
+ I3C_TSCO_11NS,
+ I3C_TSCO_12NS,
+};
+
+#define I3C_CCC_MAX_SDR_FSCL_MASK GENMASK(2, 0)
+#define I3C_CCC_MAX_SDR_FSCL(x) ((x) & I3C_CCC_MAX_SDR_FSCL_MASK)
+
+/**
+ * struct i3c_ccc_getmxds - payload passed to GETMXDS CCC
+ *
+ * @maxwr: write limitations
+ * @maxrd: read limitations
+ * @maxrdturn: maximum read turn-around expressed micro-seconds and
+ * little-endian formatted
+ */
+struct i3c_ccc_getmxds {
+ u8 maxwr;
+ u8 maxrd;
+ u8 maxrdturn[3];
+} __packed;
+
+#define I3C_CCC_HDR_MODE(mode) BIT(mode)
+
+/**
+ * struct i3c_ccc_gethdrcap - payload passed to GETHDRCAP CCC
+ *
+ * @modes: bitmap of supported HDR modes
+ */
+struct i3c_ccc_gethdrcap {
+ u8 modes;
+} __packed;
+
+/**
+ * enum i3c_ccc_setxtime_subcmd - SETXTIME sub-commands
+ */
+enum i3c_ccc_setxtime_subcmd {
+ I3C_CCC_SETXTIME_ST = 0x7f,
+ I3C_CCC_SETXTIME_DT = 0xbf,
+ I3C_CCC_SETXTIME_ENTER_ASYNC_MODE0 = 0xdf,
+ I3C_CCC_SETXTIME_ENTER_ASYNC_MODE1 = 0xef,
+ I3C_CCC_SETXTIME_ENTER_ASYNC_MODE2 = 0xf7,
+ I3C_CCC_SETXTIME_ENTER_ASYNC_MODE3 = 0xfb,
+ I3C_CCC_SETXTIME_ASYNC_TRIGGER = 0xfd,
+ I3C_CCC_SETXTIME_TPH = 0x3f,
+ I3C_CCC_SETXTIME_TU = 0x9f,
+ I3C_CCC_SETXTIME_ODR = 0x8f,
+};
+
+/**
+ * struct i3c_ccc_setxtime - payload passed to SETXTIME CCC
+ *
+ * @subcmd: one of the sub-commands ddefined in &enum i3c_ccc_setxtime_subcmd
+ * @data: sub-command payload. Amount of data is determined by
+ * &i3c_ccc_setxtime->subcmd
+ */
+struct i3c_ccc_setxtime {
+ u8 subcmd;
+ u8 data[];
+} __packed;
+
+#define I3C_CCC_GETXTIME_SYNC_MODE BIT(0)
+#define I3C_CCC_GETXTIME_ASYNC_MODE(x) BIT((x) + 1)
+#define I3C_CCC_GETXTIME_OVERFLOW BIT(7)
+
+/**
+ * struct i3c_ccc_getxtime - payload retrieved from GETXTIME CCC
+ *
+ * @supported_modes: bitmap describing supported XTIME modes
+ * @state: current status (enabled mode and overflow status)
+ * @frequency: slave's internal oscillator frequency in 500KHz steps
+ * @inaccuracy: slave's internal oscillator inaccuracy in 0.1% steps
+ */
+struct i3c_ccc_getxtime {
+ u8 supported_modes;
+ u8 state;
+ u8 frequency;
+ u8 inaccuracy;
+} __packed;
+
+/**
+ * struct i3c_ccc_cmd_payload - CCC payload
+ *
+ * @len: payload length
+ * @data: payload data. This buffer must be DMA-able
+ */
+struct i3c_ccc_cmd_payload {
+ u16 len;
+ void *data;
+};
+
+/**
+ * struct i3c_ccc_cmd_dest - CCC command destination
+ *
+ * @addr: can be an I3C device address or the broadcast address if this is a
+ * broadcast CCC
+ * @payload: payload to be sent to this device or broadcasted
+ */
+struct i3c_ccc_cmd_dest {
+ u8 addr;
+ struct i3c_ccc_cmd_payload payload;
+};
+
+/**
+ * struct i3c_ccc_cmd - CCC command
+ *
+ * @rnw: true if the CCC should retrieve data from the device. Only valid for
+ * unicast commands
+ * @id: CCC command id
+ * @ndests: number of destinations. Should always be one for broadcast commands
+ * @dests: array of destinations and associated payload for this CCC. Most of
+ * the time, only one destination is provided
+ * @err: I3C error code
+ */
+struct i3c_ccc_cmd {
+ u8 rnw;
+ u8 id;
+ unsigned int ndests;
+ struct i3c_ccc_cmd_dest *dests;
+ enum i3c_error_code err;
+};
+
+#endif /* I3C_CCC_H */
diff --git a/include/linux/i3c/device.h b/include/linux/i3c/device.h
new file mode 100644
index 000000000000..9fcb6410a584
--- /dev/null
+++ b/include/linux/i3c/device.h
@@ -0,0 +1,363 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2018 Cadence Design Systems Inc.
+ *
+ * Author: Boris Brezillon <boris.brezillon@bootlin.com>
+ */
+
+#ifndef I3C_DEV_H
+#define I3C_DEV_H
+
+#include <linux/bitops.h>
+#include <linux/device.h>
+#include <linux/i2c.h>
+#include <linux/kconfig.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+
+/**
+ * enum i3c_error_code - I3C error codes
+ *
+ * @I3C_ERROR_UNKNOWN: unknown error, usually means the error is not I3C
+ * related
+ * @I3C_ERROR_M0: M0 error
+ * @I3C_ERROR_M1: M1 error
+ * @I3C_ERROR_M2: M2 error
+ *
+ * These are the standard error codes as defined by the I3C specification.
+ * When -EIO is returned by the i3c_device_do_priv_xfers() or
+ * i3c_device_send_hdr_cmds() one can check the error code in
+ * &struct_i3c_xfer.err or &struct i3c_hdr_cmd.err to get a better idea of
+ * what went wrong.
+ *
+ */
+enum i3c_error_code {
+ I3C_ERROR_UNKNOWN = 0,
+ I3C_ERROR_M0 = 1,
+ I3C_ERROR_M1,
+ I3C_ERROR_M2,
+};
+
+/**
+ * enum i3c_xfer_mode - I3C xfer mode ids
+ * @I3C_HDR_DDR: DDR mode
+ * @I3C_HDR_TSP: TSP mode
+ * @I3C_HDR_TSL: TSL mode
+ * @I3C_SDR: SDR mode (NOT HDR mode)
+ */
+enum i3c_xfer_mode {
+ /* The below 3 value (I3C_HDR*) must match GETCAP1 Byte bit position */
+ I3C_HDR_DDR = 0,
+ I3C_HDR_TSP = 1,
+ I3C_HDR_TSL = 2,
+ /* Use for default SDR transfer mode */
+ I3C_SDR = 31,
+};
+
+/**
+ * struct i3c_xfer - I3C data transfer
+ * @rnw: encodes the transfer direction. true for a read, false for a write
+ * @cmd: Read/Write command in HDR mode, read: 0x80 - 0xff, write: 0x00 - 0x7f
+ * @len: transfer length in bytes of the transfer
+ * @actual_len: actual length in bytes are transferred by the controller
+ * @data: input/output buffer
+ * @data.in: input buffer. Must point to a DMA-able buffer
+ * @data.out: output buffer. Must point to a DMA-able buffer
+ * @err: I3C error code
+ */
+struct i3c_xfer {
+ union {
+ u8 rnw;
+ u8 cmd;
+ };
+ u16 len;
+ u16 actual_len;
+ union {
+ void *in;
+ const void *out;
+ } data;
+ enum i3c_error_code err;
+};
+
+/* keep back compatible */
+#define i3c_priv_xfer i3c_xfer
+
+/**
+ * enum i3c_dcr - I3C DCR values
+ * @I3C_DCR_GENERIC_DEVICE: generic I3C device
+ */
+enum i3c_dcr {
+ I3C_DCR_GENERIC_DEVICE = 0,
+};
+
+#define I3C_PID_MANUF_ID(pid) (((pid) & GENMASK_ULL(47, 33)) >> 33)
+#define I3C_PID_RND_LOWER_32BITS(pid) (!!((pid) & BIT_ULL(32)))
+#define I3C_PID_RND_VAL(pid) ((pid) & GENMASK_ULL(31, 0))
+#define I3C_PID_PART_ID(pid) (((pid) & GENMASK_ULL(31, 16)) >> 16)
+#define I3C_PID_INSTANCE_ID(pid) (((pid) & GENMASK_ULL(15, 12)) >> 12)
+#define I3C_PID_EXTRA_INFO(pid) ((pid) & GENMASK_ULL(11, 0))
+
+#define I3C_BCR_DEVICE_ROLE(bcr) ((bcr) & GENMASK(7, 6))
+#define I3C_BCR_I3C_SLAVE (0 << 6)
+#define I3C_BCR_I3C_MASTER (1 << 6)
+#define I3C_BCR_HDR_CAP BIT(5)
+#define I3C_BCR_BRIDGE BIT(4)
+#define I3C_BCR_OFFLINE_CAP BIT(3)
+#define I3C_BCR_IBI_PAYLOAD BIT(2)
+#define I3C_BCR_IBI_REQ_CAP BIT(1)
+#define I3C_BCR_MAX_DATA_SPEED_LIM BIT(0)
+
+/**
+ * struct i3c_device_info - I3C device information
+ * @pid: Provisioned ID
+ * @bcr: Bus Characteristic Register
+ * @dcr: Device Characteristic Register
+ * @static_addr: static/I2C address
+ * @dyn_addr: dynamic address
+ * @hdr_cap: supported HDR modes
+ * @max_read_ds: max read speed information
+ * @max_write_ds: max write speed information
+ * @max_ibi_len: max IBI payload length
+ * @max_read_turnaround: max read turn-around time in micro-seconds
+ * @max_read_len: max private SDR read length in bytes
+ * @max_write_len: max private SDR write length in bytes
+ *
+ * These are all basic information that should be advertised by an I3C device.
+ * Some of them are optional depending on the device type and device
+ * capabilities.
+ * For each I3C slave attached to a master with
+ * i3c_master_add_i3c_dev_locked(), the core will send the relevant CCC command
+ * to retrieve these data.
+ */
+struct i3c_device_info {
+ u64 pid;
+ u8 bcr;
+ u8 dcr;
+ u8 static_addr;
+ u8 dyn_addr;
+ u8 hdr_cap;
+ u8 max_read_ds;
+ u8 max_write_ds;
+ u8 max_ibi_len;
+ u32 max_read_turnaround;
+ u16 max_read_len;
+ u16 max_write_len;
+};
+
+/*
+ * I3C device internals are kept hidden from I3C device users. It's just
+ * simpler to refactor things when everything goes through getter/setters, and
+ * I3C device drivers should not have to worry about internal representation
+ * anyway.
+ */
+struct i3c_device;
+
+/* These macros should be used to i3c_device_id entries. */
+#define I3C_MATCH_MANUF_AND_PART (I3C_MATCH_MANUF | I3C_MATCH_PART)
+
+#define I3C_DEVICE(_manufid, _partid, _drvdata) \
+ { \
+ .match_flags = I3C_MATCH_MANUF_AND_PART, \
+ .manuf_id = _manufid, \
+ .part_id = _partid, \
+ .data = _drvdata, \
+ }
+
+#define I3C_DEVICE_EXTRA_INFO(_manufid, _partid, _info, _drvdata) \
+ { \
+ .match_flags = I3C_MATCH_MANUF_AND_PART | \
+ I3C_MATCH_EXTRA_INFO, \
+ .manuf_id = _manufid, \
+ .part_id = _partid, \
+ .extra_info = _info, \
+ .data = _drvdata, \
+ }
+
+#define I3C_CLASS(_dcr, _drvdata) \
+ { \
+ .match_flags = I3C_MATCH_DCR, \
+ .dcr = _dcr, \
+ }
+
+/**
+ * struct i3c_driver - I3C device driver
+ * @driver: inherit from device_driver
+ * @probe: I3C device probe method
+ * @remove: I3C device remove method
+ * @id_table: I3C device match table. Will be used by the framework to decide
+ * which device to bind to this driver
+ */
+struct i3c_driver {
+ struct device_driver driver;
+ int (*probe)(struct i3c_device *dev);
+ void (*remove)(struct i3c_device *dev);
+ const struct i3c_device_id *id_table;
+};
+
+#define drv_to_i3cdrv(__drv) container_of_const(__drv, struct i3c_driver, driver)
+
+struct device *i3cdev_to_dev(struct i3c_device *i3cdev);
+
+/**
+ * dev_to_i3cdev() - Returns the I3C device containing @dev
+ * @__dev: device object
+ *
+ * Return: a pointer to an I3C device object.
+ */
+#define dev_to_i3cdev(__dev) container_of_const(__dev, struct i3c_device, dev)
+
+const struct i3c_device_id *
+i3c_device_match_id(struct i3c_device *i3cdev,
+ const struct i3c_device_id *id_table);
+
+static inline void i3cdev_set_drvdata(struct i3c_device *i3cdev,
+ void *data)
+{
+ struct device *dev = i3cdev_to_dev(i3cdev);
+
+ dev_set_drvdata(dev, data);
+}
+
+static inline void *i3cdev_get_drvdata(struct i3c_device *i3cdev)
+{
+ struct device *dev = i3cdev_to_dev(i3cdev);
+
+ return dev_get_drvdata(dev);
+}
+
+int i3c_driver_register_with_owner(struct i3c_driver *drv,
+ struct module *owner);
+void i3c_driver_unregister(struct i3c_driver *drv);
+
+#define i3c_driver_register(__drv) \
+ i3c_driver_register_with_owner(__drv, THIS_MODULE)
+
+/**
+ * module_i3c_driver() - Register a module providing an I3C driver
+ * @__drv: the I3C driver to register
+ *
+ * Provide generic init/exit functions that simply register/unregister an I3C
+ * driver.
+ * Should be used by any driver that does not require extra init/cleanup steps.
+ */
+#define module_i3c_driver(__drv) \
+ module_driver(__drv, i3c_driver_register, i3c_driver_unregister)
+
+/**
+ * i3c_i2c_driver_register() - Register an i2c and an i3c driver
+ * @i3cdrv: the I3C driver to register
+ * @i2cdrv: the I2C driver to register
+ *
+ * This function registers both @i2cdev and @i3cdev, and fails if one of these
+ * registrations fails. This is mainly useful for devices that support both I2C
+ * and I3C modes.
+ * Note that when CONFIG_I3C is not enabled, this function only registers the
+ * I2C driver.
+ *
+ * Return: 0 if both registrations succeeds, a negative error code otherwise.
+ */
+static __always_inline int i3c_i2c_driver_register(struct i3c_driver *i3cdrv,
+ struct i2c_driver *i2cdrv)
+{
+ int ret;
+
+ ret = i2c_add_driver(i2cdrv);
+ if (ret || !IS_ENABLED(CONFIG_I3C))
+ return ret;
+
+ ret = i3c_driver_register(i3cdrv);
+ if (ret)
+ i2c_del_driver(i2cdrv);
+
+ return ret;
+}
+
+/**
+ * i3c_i2c_driver_unregister() - Unregister an i2c and an i3c driver
+ * @i3cdrv: the I3C driver to register
+ * @i2cdrv: the I2C driver to register
+ *
+ * This function unregisters both @i3cdrv and @i2cdrv.
+ * Note that when CONFIG_I3C is not enabled, this function only unregisters the
+ * @i2cdrv.
+ */
+static __always_inline void i3c_i2c_driver_unregister(struct i3c_driver *i3cdrv,
+ struct i2c_driver *i2cdrv)
+{
+ if (IS_ENABLED(CONFIG_I3C))
+ i3c_driver_unregister(i3cdrv);
+
+ i2c_del_driver(i2cdrv);
+}
+
+/**
+ * module_i3c_i2c_driver() - Register a module providing an I3C and an I2C
+ * driver
+ * @__i3cdrv: the I3C driver to register
+ * @__i2cdrv: the I2C driver to register
+ *
+ * Provide generic init/exit functions that simply register/unregister an I3C
+ * and an I2C driver.
+ * This macro can be used even if CONFIG_I3C is disabled, in this case, only
+ * the I2C driver will be registered.
+ * Should be used by any driver that does not require extra init/cleanup steps.
+ */
+#define module_i3c_i2c_driver(__i3cdrv, __i2cdrv) \
+ module_driver(__i3cdrv, \
+ i3c_i2c_driver_register, \
+ i3c_i2c_driver_unregister, \
+ __i2cdrv)
+
+int i3c_device_do_xfers(struct i3c_device *dev, struct i3c_xfer *xfers,
+ int nxfers, enum i3c_xfer_mode mode);
+
+static inline int i3c_device_do_priv_xfers(struct i3c_device *dev,
+ struct i3c_xfer *xfers,
+ int nxfers)
+{
+ return i3c_device_do_xfers(dev, xfers, nxfers, I3C_SDR);
+}
+
+int i3c_device_do_setdasa(struct i3c_device *dev);
+
+void i3c_device_get_info(const struct i3c_device *dev, struct i3c_device_info *info);
+
+struct i3c_ibi_payload {
+ unsigned int len;
+ const void *data;
+};
+
+/**
+ * struct i3c_ibi_setup - IBI setup object
+ * @max_payload_len: maximum length of the payload associated to an IBI. If one
+ * IBI appears to have a payload that is bigger than this
+ * number, the IBI will be rejected.
+ * @num_slots: number of pre-allocated IBI slots. This should be chosen so that
+ * the system never runs out of IBI slots, otherwise you'll lose
+ * IBIs.
+ * @handler: IBI handler, every time an IBI is received. This handler is called
+ * in a workqueue context. It is allowed to sleep and send new
+ * messages on the bus, though it's recommended to keep the
+ * processing done there as fast as possible to avoid delaying
+ * processing of other queued on the same workqueue.
+ *
+ * Temporary structure used to pass information to i3c_device_request_ibi().
+ * This object can be allocated on the stack since i3c_device_request_ibi()
+ * copies every bit of information and do not use it after
+ * i3c_device_request_ibi() has returned.
+ */
+struct i3c_ibi_setup {
+ unsigned int max_payload_len;
+ unsigned int num_slots;
+ void (*handler)(struct i3c_device *dev,
+ const struct i3c_ibi_payload *payload);
+};
+
+int i3c_device_request_ibi(struct i3c_device *dev,
+ const struct i3c_ibi_setup *setup);
+void i3c_device_free_ibi(struct i3c_device *dev);
+int i3c_device_enable_ibi(struct i3c_device *dev);
+int i3c_device_disable_ibi(struct i3c_device *dev);
+u32 i3c_device_get_supported_xfer_mode(struct i3c_device *dev);
+
+#endif /* I3C_DEV_H */
diff --git a/include/linux/i3c/master.h b/include/linux/i3c/master.h
new file mode 100644
index 000000000000..2fd850f4678b
--- /dev/null
+++ b/include/linux/i3c/master.h
@@ -0,0 +1,737 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2018 Cadence Design Systems Inc.
+ *
+ * Author: Boris Brezillon <boris.brezillon@bootlin.com>
+ */
+
+#ifndef I3C_MASTER_H
+#define I3C_MASTER_H
+
+#include <asm/bitsperlong.h>
+
+#include <linux/bitops.h>
+#include <linux/i2c.h>
+#include <linux/i3c/ccc.h>
+#include <linux/i3c/device.h>
+#include <linux/rwsem.h>
+#include <linux/spinlock.h>
+#include <linux/workqueue.h>
+
+#define I3C_HOT_JOIN_ADDR 0x2
+#define I3C_BROADCAST_ADDR 0x7e
+#define I3C_MAX_ADDR GENMASK(6, 0)
+
+struct i2c_client;
+
+/* notifier actions. notifier call data is the struct i3c_bus */
+enum {
+ I3C_NOTIFY_BUS_ADD,
+ I3C_NOTIFY_BUS_REMOVE,
+};
+
+struct i3c_master_controller;
+struct i3c_bus;
+struct i3c_device;
+extern const struct bus_type i3c_bus_type;
+
+/**
+ * struct i3c_i2c_dev_desc - Common part of the I3C/I2C device descriptor
+ * @node: node element used to insert the slot into the I2C or I3C device
+ * list
+ * @master: I3C master that instantiated this device. Will be used to do
+ * I2C/I3C transfers
+ * @master_priv: master private data assigned to the device. Can be used to
+ * add master specific information
+ *
+ * This structure is describing common I3C/I2C dev information.
+ */
+struct i3c_i2c_dev_desc {
+ struct list_head node;
+ struct i3c_master_controller *master;
+ void *master_priv;
+};
+
+#define I3C_LVR_I2C_INDEX_MASK GENMASK(7, 5)
+#define I3C_LVR_I2C_INDEX(x) ((x) << 5)
+#define I3C_LVR_I2C_FM_MODE BIT(4)
+
+#define I2C_MAX_ADDR GENMASK(6, 0)
+
+/**
+ * struct i2c_dev_boardinfo - I2C device board information
+ * @node: used to insert the boardinfo object in the I2C boardinfo list
+ * @base: regular I2C board information
+ * @lvr: LVR (Legacy Virtual Register) needed by the I3C core to know about
+ * the I2C device limitations
+ *
+ * This structure is used to attach board-level information to an I2C device.
+ * Each I2C device connected on the I3C bus should have one.
+ */
+struct i2c_dev_boardinfo {
+ struct list_head node;
+ struct i2c_board_info base;
+ u8 lvr;
+};
+
+/**
+ * struct i2c_dev_desc - I2C device descriptor
+ * @common: common part of the I2C device descriptor
+ * @dev: I2C device object registered to the I2C framework
+ * @addr: I2C device address
+ * @lvr: LVR (Legacy Virtual Register) needed by the I3C core to know about
+ * the I2C device limitations
+ *
+ * Each I2C device connected on the bus will have an i2c_dev_desc.
+ * This object is created by the core and later attached to the controller
+ * using &struct_i3c_master_controller->ops->attach_i2c_dev().
+ *
+ * &struct_i2c_dev_desc is the internal representation of an I2C device
+ * connected on an I3C bus. This object is also passed to all
+ * &struct_i3c_master_controller_ops hooks.
+ */
+struct i2c_dev_desc {
+ struct i3c_i2c_dev_desc common;
+ struct i2c_client *dev;
+ u16 addr;
+ u8 lvr;
+};
+
+/**
+ * struct i3c_ibi_slot - I3C IBI (In-Band Interrupt) slot
+ * @work: work associated to this slot. The IBI handler will be called from
+ * there
+ * @dev: the I3C device that has generated this IBI
+ * @len: length of the payload associated to this IBI
+ * @data: payload buffer
+ *
+ * An IBI slot is an object pre-allocated by the controller and used when an
+ * IBI comes in.
+ * Every time an IBI comes in, the I3C master driver should find a free IBI
+ * slot in its IBI slot pool, retrieve the IBI payload and queue the IBI using
+ * i3c_master_queue_ibi().
+ *
+ * How IBI slots are allocated is left to the I3C master driver, though, for
+ * simple kmalloc-based allocation, the generic IBI slot pool can be used.
+ */
+struct i3c_ibi_slot {
+ struct work_struct work;
+ struct i3c_dev_desc *dev;
+ unsigned int len;
+ void *data;
+};
+
+/**
+ * struct i3c_device_ibi_info - IBI information attached to a specific device
+ * @all_ibis_handled: used to be informed when no more IBIs are waiting to be
+ * processed. Used by i3c_device_disable_ibi() to wait for
+ * all IBIs to be dequeued
+ * @pending_ibis: count the number of pending IBIs. Each pending IBI has its
+ * work element queued to the controller workqueue
+ * @max_payload_len: maximum payload length for an IBI coming from this device.
+ * this value is specified when calling
+ * i3c_device_request_ibi() and should not change at run
+ * time. All messages IBIs exceeding this limit should be
+ * rejected by the master
+ * @num_slots: number of IBI slots reserved for this device
+ * @enabled: reflect the IBI status
+ * @wq: workqueue used to execute IBI handlers.
+ * @handler: IBI handler specified at i3c_device_request_ibi() call time. This
+ * handler will be called from the controller workqueue, and as such
+ * is allowed to sleep (though it is recommended to process the IBI
+ * as fast as possible to not stall processing of other IBIs queued
+ * on the same workqueue).
+ * New I3C messages can be sent from the IBI handler
+ *
+ * The &struct_i3c_device_ibi_info object is allocated when
+ * i3c_device_request_ibi() is called and attached to a specific device. This
+ * object is here to manage IBIs coming from a specific I3C device.
+ *
+ * Note that this structure is the generic view of the IBI management
+ * infrastructure. I3C master drivers may have their own internal
+ * representation which they can associate to the device using
+ * controller-private data.
+ */
+struct i3c_device_ibi_info {
+ struct completion all_ibis_handled;
+ atomic_t pending_ibis;
+ unsigned int max_payload_len;
+ unsigned int num_slots;
+ unsigned int enabled;
+ struct workqueue_struct *wq;
+ void (*handler)(struct i3c_device *dev,
+ const struct i3c_ibi_payload *payload);
+};
+
+/**
+ * struct i3c_dev_boardinfo - I3C device board information
+ * @node: used to insert the boardinfo object in the I3C boardinfo list
+ * @init_dyn_addr: initial dynamic address requested by the FW. We provide no
+ * guarantee that the device will end up using this address,
+ * but try our best to assign this specific address to the
+ * device
+ * @static_addr: static address the I3C device listen on before it's been
+ * assigned a dynamic address by the master. Will be used during
+ * bus initialization to assign it a specific dynamic address
+ * before starting DAA (Dynamic Address Assignment)
+ * @pid: I3C Provisioned ID exposed by the device. This is a unique identifier
+ * that may be used to attach boardinfo to i3c_dev_desc when the device
+ * does not have a static address
+ * @of_node: optional DT node in case the device has been described in the DT
+ *
+ * This structure is used to attach board-level information to an I3C device.
+ * Not all I3C devices connected on the bus will have a boardinfo. It's only
+ * needed if you want to attach extra resources to a device or assign it a
+ * specific dynamic address.
+ */
+struct i3c_dev_boardinfo {
+ struct list_head node;
+ u8 init_dyn_addr;
+ u8 static_addr;
+ u64 pid;
+ struct device_node *of_node;
+};
+
+/**
+ * struct i3c_dev_desc - I3C device descriptor
+ * @common: common part of the I3C device descriptor
+ * @info: I3C device information. Will be automatically filled when you create
+ * your device with i3c_master_add_i3c_dev_locked()
+ * @ibi_lock: lock used to protect the &struct_i3c_device->ibi
+ * @ibi: IBI info attached to a device. Should be NULL until
+ * i3c_device_request_ibi() is called
+ * @dev: pointer to the I3C device object exposed to I3C device drivers. This
+ * should never be accessed from I3C master controller drivers. Only core
+ * code should manipulate it in when updating the dev <-> desc link or
+ * when propagating IBI events to the driver
+ * @boardinfo: pointer to the boardinfo attached to this I3C device
+ *
+ * Internal representation of an I3C device. This object is only used by the
+ * core and passed to I3C master controller drivers when they're requested to
+ * do some operations on the device.
+ * The core maintains the link between the internal I3C dev descriptor and the
+ * object exposed to the I3C device drivers (&struct_i3c_device).
+ */
+struct i3c_dev_desc {
+ struct i3c_i2c_dev_desc common;
+ struct i3c_device_info info;
+ struct mutex ibi_lock;
+ struct i3c_device_ibi_info *ibi;
+ struct i3c_device *dev;
+ const struct i3c_dev_boardinfo *boardinfo;
+};
+
+/**
+ * struct i3c_device - I3C device object
+ * @dev: device object to register the I3C dev to the device model
+ * @desc: pointer to an i3c device descriptor object. This link is updated
+ * every time the I3C device is rediscovered with a different dynamic
+ * address assigned
+ * @bus: I3C bus this device is attached to
+ *
+ * I3C device object exposed to I3C device drivers. The takes care of linking
+ * this object to the relevant &struct_i3c_dev_desc one.
+ * All I3C devs on the I3C bus are represented, including I3C masters. For each
+ * of them, we have an instance of &struct i3c_device.
+ */
+struct i3c_device {
+ struct device dev;
+ struct i3c_dev_desc *desc;
+ struct i3c_bus *bus;
+};
+
+/*
+ * The I3C specification says the maximum number of devices connected on the
+ * bus is 11, but this number depends on external parameters like trace length,
+ * capacitive load per Device, and the types of Devices present on the Bus.
+ * I3C master can also have limitations, so this number is just here as a
+ * reference and should be adjusted on a per-controller/per-board basis.
+ */
+#define I3C_BUS_MAX_DEVS 11
+
+/* Taken from the I3C Spec V1.1.1, chapter 6.2. "Timing specification" */
+#define I3C_BUS_I2C_FM_PLUS_SCL_MAX_RATE 1000000
+#define I3C_BUS_I2C_FM_SCL_MAX_RATE 400000
+#define I3C_BUS_I3C_SCL_MAX_RATE 12900000
+#define I3C_BUS_I3C_SCL_TYP_RATE 12500000
+#define I3C_BUS_TAVAL_MIN_NS 1000
+#define I3C_BUS_TBUF_MIXED_FM_MIN_NS 1300
+#define I3C_BUS_THIGH_MIXED_MAX_NS 41
+#define I3C_BUS_TIDLE_MIN_NS 200000
+#define I3C_BUS_TLOW_OD_MIN_NS 200
+
+/**
+ * enum i3c_bus_mode - I3C bus mode
+ * @I3C_BUS_MODE_PURE: only I3C devices are connected to the bus. No limitation
+ * expected
+ * @I3C_BUS_MODE_MIXED_FAST: I2C devices with 50ns spike filter are present on
+ * the bus. The only impact in this mode is that the
+ * high SCL pulse has to stay below 50ns to trick I2C
+ * devices when transmitting I3C frames
+ * @I3C_BUS_MODE_MIXED_LIMITED: I2C devices without 50ns spike filter are
+ * present on the bus. However they allow
+ * compliance up to the maximum SDR SCL clock
+ * frequency.
+ * @I3C_BUS_MODE_MIXED_SLOW: I2C devices without 50ns spike filter are present
+ * on the bus
+ */
+enum i3c_bus_mode {
+ I3C_BUS_MODE_PURE,
+ I3C_BUS_MODE_MIXED_FAST,
+ I3C_BUS_MODE_MIXED_LIMITED,
+ I3C_BUS_MODE_MIXED_SLOW,
+};
+
+/**
+ * enum i3c_open_drain_speed - I3C open-drain speed
+ * @I3C_OPEN_DRAIN_SLOW_SPEED: Slow open-drain speed for sending the first
+ * broadcast address. The first broadcast address at this speed
+ * will be visible to all devices on the I3C bus. I3C devices
+ * working in I2C mode will turn off their spike filter when
+ * switching into I3C mode.
+ * @I3C_OPEN_DRAIN_NORMAL_SPEED: Normal open-drain speed in I3C bus mode.
+ */
+enum i3c_open_drain_speed {
+ I3C_OPEN_DRAIN_SLOW_SPEED,
+ I3C_OPEN_DRAIN_NORMAL_SPEED,
+};
+
+/**
+ * enum i3c_addr_slot_status - I3C address slot status
+ * @I3C_ADDR_SLOT_FREE: address is free
+ * @I3C_ADDR_SLOT_RSVD: address is reserved
+ * @I3C_ADDR_SLOT_I2C_DEV: address is assigned to an I2C device
+ * @I3C_ADDR_SLOT_I3C_DEV: address is assigned to an I3C device
+ * @I3C_ADDR_SLOT_STATUS_MASK: address slot mask
+ * @I3C_ADDR_SLOT_EXT_STATUS_MASK: address slot mask with extended information
+ * @I3C_ADDR_SLOT_EXT_DESIRED: the bitmask represents addresses that are preferred by some devices,
+ * such as the "assigned-address" property in a device tree source.
+ * On an I3C bus, addresses are assigned dynamically, and we need to know which
+ * addresses are free to use and which ones are already assigned.
+ *
+ * Addresses marked as reserved are those reserved by the I3C protocol
+ * (broadcast address, ...).
+ */
+enum i3c_addr_slot_status {
+ I3C_ADDR_SLOT_FREE,
+ I3C_ADDR_SLOT_RSVD,
+ I3C_ADDR_SLOT_I2C_DEV,
+ I3C_ADDR_SLOT_I3C_DEV,
+ I3C_ADDR_SLOT_STATUS_MASK = 3,
+ I3C_ADDR_SLOT_EXT_STATUS_MASK = 7,
+ I3C_ADDR_SLOT_EXT_DESIRED = BIT(2),
+};
+
+#define I3C_ADDR_SLOT_STATUS_BITS 4
+
+/**
+ * struct i3c_bus - I3C bus object
+ * @cur_master: I3C master currently driving the bus. Since I3C is multi-master
+ * this can change over the time. Will be used to let a master
+ * know whether it needs to request bus ownership before sending
+ * a frame or not
+ * @id: bus ID. Assigned by the framework when register the bus
+ * @addrslots: a bitmap with 2-bits per-slot to encode the address status and
+ * ease the DAA (Dynamic Address Assignment) procedure (see
+ * &enum i3c_addr_slot_status)
+ * @mode: bus mode (see &enum i3c_bus_mode)
+ * @scl_rate.i3c: maximum rate for the clock signal when doing I3C SDR/priv
+ * transfers
+ * @scl_rate.i2c: maximum rate for the clock signal when doing I2C transfers
+ * @scl_rate: SCL signal rate for I3C and I2C mode
+ * @devs.i3c: contains a list of I3C device descriptors representing I3C
+ * devices connected on the bus and successfully attached to the
+ * I3C master
+ * @devs.i2c: contains a list of I2C device descriptors representing I2C
+ * devices connected on the bus and successfully attached to the
+ * I3C master
+ * @devs: 2 lists containing all I3C/I2C devices connected to the bus
+ * @lock: read/write lock on the bus. This is needed to protect against
+ * operations that have an impact on the whole bus and the devices
+ * connected to it. For example, when asking slaves to drop their
+ * dynamic address (RSTDAA CCC), we need to make sure no one is trying
+ * to send I3C frames to these devices.
+ * Note that this lock does not protect against concurrency between
+ * devices: several drivers can send different I3C/I2C frames through
+ * the same master in parallel. This is the responsibility of the
+ * master to guarantee that frames are actually sent sequentially and
+ * not interlaced
+ *
+ * The I3C bus is represented with its own object and not implicitly described
+ * by the I3C master to cope with the multi-master functionality, where one bus
+ * can be shared amongst several masters, each of them requesting bus ownership
+ * when they need to.
+ */
+struct i3c_bus {
+ struct i3c_dev_desc *cur_master;
+ int id;
+ unsigned long addrslots[((I2C_MAX_ADDR + 1) * I3C_ADDR_SLOT_STATUS_BITS) / BITS_PER_LONG];
+ enum i3c_bus_mode mode;
+ struct {
+ unsigned long i3c;
+ unsigned long i2c;
+ } scl_rate;
+ struct {
+ struct list_head i3c;
+ struct list_head i2c;
+ } devs;
+ struct rw_semaphore lock;
+};
+
+/**
+ * struct i3c_master_controller_ops - I3C master methods
+ * @bus_init: hook responsible for the I3C bus initialization. You should at
+ * least call master_set_info() from there and set the bus mode.
+ * You can also put controller specific initialization in there.
+ * This method is mandatory.
+ * @bus_cleanup: cleanup everything done in
+ * &i3c_master_controller_ops->bus_init().
+ * This method is optional.
+ * @attach_i3c_dev: called every time an I3C device is attached to the bus. It
+ * can be after a DAA or when a device is statically declared
+ * by the FW, in which case it will only have a static address
+ * and the dynamic address will be 0.
+ * When this function is called, device information have not
+ * been retrieved yet.
+ * This is a good place to attach master controller specific
+ * data to I3C devices.
+ * This method is optional.
+ * @reattach_i3c_dev: called every time an I3C device has its addressed
+ * changed. It can be because the device has been powered
+ * down and has lost its address, or it can happen when a
+ * device had a static address and has been assigned a
+ * dynamic address with SETDASA.
+ * This method is optional.
+ * @detach_i3c_dev: called when an I3C device is detached from the bus. Usually
+ * happens when the master device is unregistered.
+ * This method is optional.
+ * @do_daa: do a DAA (Dynamic Address Assignment) procedure. This is procedure
+ * should send an ENTDAA CCC command and then add all devices
+ * discovered sure the DAA using i3c_master_add_i3c_dev_locked().
+ * Add devices added with i3c_master_add_i3c_dev_locked() will then be
+ * attached or re-attached to the controller.
+ * This method is mandatory.
+ * @supports_ccc_cmd: should return true if the CCC command is supported, false
+ * otherwise.
+ * This method is optional, if not provided the core assumes
+ * all CCC commands are supported.
+ * @send_ccc_cmd: send a CCC command
+ * This method is mandatory.
+ * @priv_xfers: do one or several private I3C SDR transfers
+ * This method is mandatory when i3c_xfers is not implemented. It
+ * is deprecated.
+ * @i3c_xfers: do one or several I3C SDR or HDR transfers
+ * This method is mandatory when priv_xfers is not implemented but
+ * should be implemented instead of priv_xfers.
+ * @attach_i2c_dev: called every time an I2C device is attached to the bus.
+ * This is a good place to attach master controller specific
+ * data to I2C devices.
+ * This method is optional.
+ * @detach_i2c_dev: called when an I2C device is detached from the bus. Usually
+ * happens when the master device is unregistered.
+ * This method is optional.
+ * @i2c_xfers: do one or several I2C transfers. Note that, unlike i3c
+ * transfers, the core does not guarantee that buffers attached to
+ * the transfers are DMA-safe. If drivers want to have DMA-safe
+ * buffers, they should use the i2c_get_dma_safe_msg_buf()
+ * and i2c_put_dma_safe_msg_buf() helpers provided by the I2C
+ * framework.
+ * This method is mandatory.
+ * @request_ibi: attach an IBI handler to an I3C device. This implies defining
+ * an IBI handler and the constraints of the IBI (maximum payload
+ * length and number of pre-allocated slots).
+ * Some controllers support less IBI-capable devices than regular
+ * devices, so this method might return -%EBUSY if there's no
+ * more space for an extra IBI registration
+ * This method is optional.
+ * @free_ibi: free an IBI previously requested with ->request_ibi(). The IBI
+ * should have been disabled with ->disable_irq() prior to that
+ * This method is mandatory only if ->request_ibi is not NULL.
+ * @enable_ibi: enable the IBI. Only valid if ->request_ibi() has been called
+ * prior to ->enable_ibi(). The controller should first enable
+ * the IBI on the controller end (for example, unmask the hardware
+ * IRQ) and then send the ENEC CCC command (with the IBI flag set)
+ * to the I3C device.
+ * This method is mandatory only if ->request_ibi is not NULL.
+ * @disable_ibi: disable an IBI. First send the DISEC CCC command with the IBI
+ * flag set and then deactivate the hardware IRQ on the
+ * controller end.
+ * This method is mandatory only if ->request_ibi is not NULL.
+ * @recycle_ibi_slot: recycle an IBI slot. Called every time an IBI has been
+ * processed by its handler. The IBI slot should be put back
+ * in the IBI slot pool so that the controller can re-use it
+ * for a future IBI
+ * This method is mandatory only if ->request_ibi is not
+ * NULL.
+ * @enable_hotjoin: enable hot join event detect.
+ * @disable_hotjoin: disable hot join event detect.
+ * @set_speed: adjust I3C open drain mode timing.
+ */
+struct i3c_master_controller_ops {
+ int (*bus_init)(struct i3c_master_controller *master);
+ void (*bus_cleanup)(struct i3c_master_controller *master);
+ int (*attach_i3c_dev)(struct i3c_dev_desc *dev);
+ int (*reattach_i3c_dev)(struct i3c_dev_desc *dev, u8 old_dyn_addr);
+ void (*detach_i3c_dev)(struct i3c_dev_desc *dev);
+ int (*do_daa)(struct i3c_master_controller *master);
+ bool (*supports_ccc_cmd)(struct i3c_master_controller *master,
+ const struct i3c_ccc_cmd *cmd);
+ int (*send_ccc_cmd)(struct i3c_master_controller *master,
+ struct i3c_ccc_cmd *cmd);
+ /* Deprecated, please use i3c_xfers() */
+ int (*priv_xfers)(struct i3c_dev_desc *dev,
+ struct i3c_priv_xfer *xfers,
+ int nxfers);
+ int (*i3c_xfers)(struct i3c_dev_desc *dev,
+ struct i3c_xfer *xfers,
+ int nxfers, enum i3c_xfer_mode mode);
+ int (*attach_i2c_dev)(struct i2c_dev_desc *dev);
+ void (*detach_i2c_dev)(struct i2c_dev_desc *dev);
+ int (*i2c_xfers)(struct i2c_dev_desc *dev,
+ struct i2c_msg *xfers, int nxfers);
+ int (*request_ibi)(struct i3c_dev_desc *dev,
+ const struct i3c_ibi_setup *req);
+ void (*free_ibi)(struct i3c_dev_desc *dev);
+ int (*enable_ibi)(struct i3c_dev_desc *dev);
+ int (*disable_ibi)(struct i3c_dev_desc *dev);
+ void (*recycle_ibi_slot)(struct i3c_dev_desc *dev,
+ struct i3c_ibi_slot *slot);
+ int (*enable_hotjoin)(struct i3c_master_controller *master);
+ int (*disable_hotjoin)(struct i3c_master_controller *master);
+ int (*set_speed)(struct i3c_master_controller *master, enum i3c_open_drain_speed speed);
+};
+
+/**
+ * struct i3c_master_controller - I3C master controller object
+ * @dev: device to be registered to the device-model
+ * @this: an I3C device object representing this master. This device will be
+ * added to the list of I3C devs available on the bus
+ * @i2c: I2C adapter used for backward compatibility. This adapter is
+ * registered to the I2C subsystem to be as transparent as possible to
+ * existing I2C drivers
+ * @ops: master operations. See &struct i3c_master_controller_ops
+ * @secondary: true if the master is a secondary master
+ * @init_done: true when the bus initialization is done
+ * @hotjoin: true if the master support hotjoin
+ * @boardinfo.i3c: list of I3C boardinfo objects
+ * @boardinfo.i2c: list of I2C boardinfo objects
+ * @boardinfo: board-level information attached to devices connected on the bus
+ * @bus: I3C bus exposed by this master
+ * @wq: workqueue which can be used by master
+ * drivers if they need to postpone operations that need to take place
+ * in a thread context. Typical examples are Hot Join processing which
+ * requires taking the bus lock in maintenance, which in turn, can only
+ * be done from a sleep-able context
+ *
+ * A &struct i3c_master_controller has to be registered to the I3C subsystem
+ * through i3c_master_register(). None of &struct i3c_master_controller fields
+ * should be set manually, just pass appropriate values to
+ * i3c_master_register().
+ */
+struct i3c_master_controller {
+ struct device dev;
+ struct i3c_dev_desc *this;
+ struct i2c_adapter i2c;
+ const struct i3c_master_controller_ops *ops;
+ unsigned int secondary : 1;
+ unsigned int init_done : 1;
+ unsigned int hotjoin: 1;
+ struct {
+ struct list_head i3c;
+ struct list_head i2c;
+ } boardinfo;
+ struct i3c_bus bus;
+ struct workqueue_struct *wq;
+};
+
+/**
+ * i3c_bus_for_each_i2cdev() - iterate over all I2C devices present on the bus
+ * @bus: the I3C bus
+ * @dev: an I2C device descriptor pointer updated to point to the current slot
+ * at each iteration of the loop
+ *
+ * Iterate over all I2C devs present on the bus.
+ */
+#define i3c_bus_for_each_i2cdev(bus, dev) \
+ list_for_each_entry(dev, &(bus)->devs.i2c, common.node)
+
+/**
+ * i3c_bus_for_each_i3cdev() - iterate over all I3C devices present on the bus
+ * @bus: the I3C bus
+ * @dev: and I3C device descriptor pointer updated to point to the current slot
+ * at each iteration of the loop
+ *
+ * Iterate over all I3C devs present on the bus.
+ */
+#define i3c_bus_for_each_i3cdev(bus, dev) \
+ list_for_each_entry(dev, &(bus)->devs.i3c, common.node)
+
+/**
+ * struct i3c_dma - DMA transfer and mapping descriptor
+ * @dev: device object of a device doing DMA
+ * @buf: destination/source buffer for DMA
+ * @len: length of transfer
+ * @map_len: length of DMA mapping
+ * @addr: mapped DMA address for a Host Controller Driver
+ * @dir: DMA direction
+ * @bounce_buf: an allocated bounce buffer if transfer needs it or NULL
+ */
+struct i3c_dma {
+ struct device *dev;
+ void *buf;
+ size_t len;
+ size_t map_len;
+ dma_addr_t addr;
+ enum dma_data_direction dir;
+ void *bounce_buf;
+};
+
+int i3c_master_do_i2c_xfers(struct i3c_master_controller *master,
+ const struct i2c_msg *xfers,
+ int nxfers);
+
+int i3c_master_disec_locked(struct i3c_master_controller *master, u8 addr,
+ u8 evts);
+int i3c_master_enec_locked(struct i3c_master_controller *master, u8 addr,
+ u8 evts);
+int i3c_master_entdaa_locked(struct i3c_master_controller *master);
+int i3c_master_defslvs_locked(struct i3c_master_controller *master);
+
+int i3c_master_get_free_addr(struct i3c_master_controller *master,
+ u8 start_addr);
+
+int i3c_master_add_i3c_dev_locked(struct i3c_master_controller *master,
+ u8 addr);
+int i3c_master_do_daa(struct i3c_master_controller *master);
+struct i3c_dma *i3c_master_dma_map_single(struct device *dev, void *ptr,
+ size_t len, bool force_bounce,
+ enum dma_data_direction dir);
+void i3c_master_dma_unmap_single(struct i3c_dma *dma_xfer);
+DEFINE_FREE(i3c_master_dma_unmap_single, void *,
+ if (_T) i3c_master_dma_unmap_single(_T))
+
+int i3c_master_set_info(struct i3c_master_controller *master,
+ const struct i3c_device_info *info);
+
+int i3c_master_register(struct i3c_master_controller *master,
+ struct device *parent,
+ const struct i3c_master_controller_ops *ops,
+ bool secondary);
+void i3c_master_unregister(struct i3c_master_controller *master);
+int i3c_master_enable_hotjoin(struct i3c_master_controller *master);
+int i3c_master_disable_hotjoin(struct i3c_master_controller *master);
+
+/**
+ * i3c_dev_get_master_data() - get master private data attached to an I3C
+ * device descriptor
+ * @dev: the I3C device descriptor to get private data from
+ *
+ * Return: the private data previously attached with i3c_dev_set_master_data()
+ * or NULL if no data has been attached to the device.
+ */
+static inline void *i3c_dev_get_master_data(const struct i3c_dev_desc *dev)
+{
+ return dev->common.master_priv;
+}
+
+/**
+ * i3c_dev_set_master_data() - attach master private data to an I3C device
+ * descriptor
+ * @dev: the I3C device descriptor to attach private data to
+ * @data: private data
+ *
+ * This functions allows a master controller to attach per-device private data
+ * which can then be retrieved with i3c_dev_get_master_data().
+ */
+static inline void i3c_dev_set_master_data(struct i3c_dev_desc *dev,
+ void *data)
+{
+ dev->common.master_priv = data;
+}
+
+/**
+ * i2c_dev_get_master_data() - get master private data attached to an I2C
+ * device descriptor
+ * @dev: the I2C device descriptor to get private data from
+ *
+ * Return: the private data previously attached with i2c_dev_set_master_data()
+ * or NULL if no data has been attached to the device.
+ */
+static inline void *i2c_dev_get_master_data(const struct i2c_dev_desc *dev)
+{
+ return dev->common.master_priv;
+}
+
+/**
+ * i2c_dev_set_master_data() - attach master private data to an I2C device
+ * descriptor
+ * @dev: the I2C device descriptor to attach private data to
+ * @data: private data
+ *
+ * This functions allows a master controller to attach per-device private data
+ * which can then be retrieved with i2c_device_get_master_data().
+ */
+static inline void i2c_dev_set_master_data(struct i2c_dev_desc *dev,
+ void *data)
+{
+ dev->common.master_priv = data;
+}
+
+/**
+ * i3c_dev_get_master() - get master used to communicate with a device
+ * @dev: I3C dev
+ *
+ * Return: the master controller driving @dev
+ */
+static inline struct i3c_master_controller *
+i3c_dev_get_master(struct i3c_dev_desc *dev)
+{
+ return dev->common.master;
+}
+
+/**
+ * i2c_dev_get_master() - get master used to communicate with a device
+ * @dev: I2C dev
+ *
+ * Return: the master controller driving @dev
+ */
+static inline struct i3c_master_controller *
+i2c_dev_get_master(struct i2c_dev_desc *dev)
+{
+ return dev->common.master;
+}
+
+/**
+ * i3c_master_get_bus() - get the bus attached to a master
+ * @master: master object
+ *
+ * Return: the I3C bus @master is connected to
+ */
+static inline struct i3c_bus *
+i3c_master_get_bus(struct i3c_master_controller *master)
+{
+ return &master->bus;
+}
+
+struct i3c_generic_ibi_pool;
+
+struct i3c_generic_ibi_pool *
+i3c_generic_ibi_alloc_pool(struct i3c_dev_desc *dev,
+ const struct i3c_ibi_setup *req);
+void i3c_generic_ibi_free_pool(struct i3c_generic_ibi_pool *pool);
+
+struct i3c_ibi_slot *
+i3c_generic_ibi_get_free_slot(struct i3c_generic_ibi_pool *pool);
+void i3c_generic_ibi_recycle_slot(struct i3c_generic_ibi_pool *pool,
+ struct i3c_ibi_slot *slot);
+
+void i3c_master_queue_ibi(struct i3c_dev_desc *dev, struct i3c_ibi_slot *slot);
+
+struct i3c_ibi_slot *i3c_master_get_free_ibi_slot(struct i3c_dev_desc *dev);
+
+void i3c_for_each_bus_locked(int (*fn)(struct i3c_bus *bus, void *data),
+ void *data);
+int i3c_register_notifier(struct notifier_block *nb);
+int i3c_unregister_notifier(struct notifier_block *nb);
+
+#endif /* I3C_MASTER_H */
diff --git a/include/linux/i7300_idle.h b/include/linux/i7300_idle.h
deleted file mode 100644
index 1587b7dec505..000000000000
--- a/include/linux/i7300_idle.h
+++ /dev/null
@@ -1,83 +0,0 @@
-
-#ifndef I7300_IDLE_H
-#define I7300_IDLE_H
-
-#include <linux/pci.h>
-
-/*
- * I/O AT controls (PCI bus 0 device 8 function 0)
- * DIMM controls (PCI bus 0 device 16 function 1)
- */
-#define IOAT_BUS 0
-#define IOAT_DEVFN PCI_DEVFN(8, 0)
-#define MEMCTL_BUS 0
-#define MEMCTL_DEVFN PCI_DEVFN(16, 1)
-
-struct fbd_ioat {
- unsigned int vendor;
- unsigned int ioat_dev;
- unsigned int enabled;
-};
-
-/*
- * The i5000 chip-set has the same hooks as the i7300
- * but it is not enabled by default and must be manually
- * manually enabled with "forceload=1" because it is
- * only lightly validated.
- */
-
-static const struct fbd_ioat fbd_ioat_list[] = {
- {PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_CNB, 1},
- {PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT, 0},
- {0, 0}
-};
-
-/* table of devices that work with this driver */
-static const struct pci_device_id pci_tbl[] = {
- { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_FBD_CNB) },
- { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_5000_ERR) },
- { } /* Terminating entry */
-};
-
-/* Check for known platforms with I/O-AT */
-static inline int i7300_idle_platform_probe(struct pci_dev **fbd_dev,
- struct pci_dev **ioat_dev,
- int enable_all)
-{
- int i;
- struct pci_dev *memdev, *dmadev;
-
- memdev = pci_get_bus_and_slot(MEMCTL_BUS, MEMCTL_DEVFN);
- if (!memdev)
- return -ENODEV;
-
- for (i = 0; pci_tbl[i].vendor != 0; i++) {
- if (memdev->vendor == pci_tbl[i].vendor &&
- memdev->device == pci_tbl[i].device) {
- break;
- }
- }
- if (pci_tbl[i].vendor == 0)
- return -ENODEV;
-
- dmadev = pci_get_bus_and_slot(IOAT_BUS, IOAT_DEVFN);
- if (!dmadev)
- return -ENODEV;
-
- for (i = 0; fbd_ioat_list[i].vendor != 0; i++) {
- if (dmadev->vendor == fbd_ioat_list[i].vendor &&
- dmadev->device == fbd_ioat_list[i].ioat_dev) {
- if (!(fbd_ioat_list[i].enabled || enable_all))
- continue;
- if (fbd_dev)
- *fbd_dev = memdev;
- if (ioat_dev)
- *ioat_dev = dmadev;
-
- return 0;
- }
- }
- return -ENODEV;
-}
-
-#endif
diff --git a/include/linux/i8042.h b/include/linux/i8042.h
index d98780ca9604..00037c13abc8 100644
--- a/include/linux/i8042.h
+++ b/include/linux/i8042.h
@@ -1,12 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
#ifndef _LINUX_I8042_H
#define _LINUX_I8042_H
-/*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
- */
+#include <linux/errno.h>
#include <linux/types.h>
/*
@@ -57,15 +54,29 @@
struct serio;
+/**
+ * typedef i8042_filter_t - i8042 filter callback
+ * @data: Data received by the i8042 controller
+ * @str: Status register of the i8042 controller
+ * @serio: Serio of the i8042 controller
+ * @context: Context pointer associated with this callback
+ *
+ * This represents a i8042 filter callback which can be used with i8042_install_filter()
+ * and i8042_remove_filter() to filter the i8042 input for platform-specific key codes.
+ *
+ * Context: Interrupt context.
+ * Returns: true if the data should be filtered out, false if otherwise.
+ */
+typedef bool (*i8042_filter_t)(unsigned char data, unsigned char str, struct serio *serio,
+ void *context);
+
#if defined(CONFIG_SERIO_I8042) || defined(CONFIG_SERIO_I8042_MODULE)
void i8042_lock_chip(void);
void i8042_unlock_chip(void);
int i8042_command(unsigned char *param, int command);
-int i8042_install_filter(bool (*filter)(unsigned char data, unsigned char str,
- struct serio *serio));
-int i8042_remove_filter(bool (*filter)(unsigned char data, unsigned char str,
- struct serio *serio));
+int i8042_install_filter(i8042_filter_t filter, void *context);
+int i8042_remove_filter(i8042_filter_t filter);
#else
@@ -82,14 +93,12 @@ static inline int i8042_command(unsigned char *param, int command)
return -ENODEV;
}
-static inline int i8042_install_filter(bool (*filter)(unsigned char data, unsigned char str,
- struct serio *serio))
+static inline int i8042_install_filter(i8042_filter_t filter, void *context)
{
return -ENODEV;
}
-static inline int i8042_remove_filter(bool (*filter)(unsigned char data, unsigned char str,
- struct serio *serio))
+static inline int i8042_remove_filter(i8042_filter_t filter)
{
return -ENODEV;
}
diff --git a/include/linux/i8253.h b/include/linux/i8253.h
index e6bb36a97519..56c280eb2d4f 100644
--- a/include/linux/i8253.h
+++ b/include/linux/i8253.h
@@ -23,6 +23,7 @@
extern raw_spinlock_t i8253_lock;
extern struct clock_event_device i8253_clockevent;
extern void clockevent_i8253_init(bool oneshot);
+extern void clockevent_i8253_disable(void);
extern void setup_pit_timer(void);
diff --git a/include/linux/i8254.h b/include/linux/i8254.h
new file mode 100644
index 000000000000..a675c309232b
--- /dev/null
+++ b/include/linux/i8254.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) William Breathitt Gray */
+#ifndef _I8254_H_
+#define _I8254_H_
+
+struct device;
+struct regmap;
+
+/**
+ * struct i8254_regmap_config - Configuration for the register map of an i8254
+ * @parent: parent device
+ * @map: regmap for the i8254
+ */
+struct i8254_regmap_config {
+ struct device *parent;
+ struct regmap *map;
+};
+
+int devm_i8254_regmap_register(struct device *dev, const struct i8254_regmap_config *config);
+
+#endif /* _I8254_H_ */
diff --git a/include/linux/icmp.h b/include/linux/icmp.h
index efc18490627a..043ec5d9c882 100644
--- a/include/linux/icmp.h
+++ b/include/linux/icmp.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* INET An implementation of the TCP/IP protocol suite for the LINUX
* operating system. INET is implemented using the BSD Socket
@@ -8,20 +9,67 @@
* Version: @(#)icmp.h 1.0.3 04/28/93
*
* Author: Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
*/
#ifndef _LINUX_ICMP_H
#define _LINUX_ICMP_H
#include <linux/skbuff.h>
#include <uapi/linux/icmp.h>
+#include <uapi/linux/errqueue.h>
static inline struct icmphdr *icmp_hdr(const struct sk_buff *skb)
{
return (struct icmphdr *)skb_transport_header(skb);
}
+
+static inline bool icmp_is_err(int type)
+{
+ switch (type) {
+ case ICMP_DEST_UNREACH:
+ case ICMP_SOURCE_QUENCH:
+ case ICMP_REDIRECT:
+ case ICMP_TIME_EXCEEDED:
+ case ICMP_PARAMETERPROB:
+ return true;
+ }
+
+ return false;
+}
+
+void ip_icmp_error_rfc4884(const struct sk_buff *skb,
+ struct sock_ee_data_rfc4884 *out,
+ int thlen, int off);
+
+/* RFC 4884 */
+#define ICMP_EXT_ORIG_DGRAM_MIN_LEN 128
+#define ICMP_EXT_VERSION_2 2
+
+/* ICMP Extension Object Classes */
+#define ICMP_EXT_OBJ_CLASS_IIO 2 /* RFC 5837 */
+
+/* Interface Information Object - RFC 5837 */
+enum {
+ ICMP_EXT_CTYPE_IIO_ROLE_IIF,
+};
+
+#define ICMP_EXT_CTYPE_IIO_ROLE(ROLE) ((ROLE) << 6)
+#define ICMP_EXT_CTYPE_IIO_MTU BIT(0)
+#define ICMP_EXT_CTYPE_IIO_NAME BIT(1)
+#define ICMP_EXT_CTYPE_IIO_IPADDR BIT(2)
+#define ICMP_EXT_CTYPE_IIO_IFINDEX BIT(3)
+
+struct icmp_ext_iio_name_subobj {
+ u8 len;
+ char name[IFNAMSIZ];
+};
+
+enum {
+ /* RFC 5837 - Incoming IP Interface Role */
+ ICMP_ERR_EXT_IIO_IIF,
+ /* Add new constants above. Used by "icmp_errors_extension_mask"
+ * sysctl.
+ */
+ ICMP_ERR_EXT_COUNT,
+};
+
#endif /* _LINUX_ICMP_H */
diff --git a/include/linux/icmpv6.h b/include/linux/icmpv6.h
index 57086e9fc64c..e3b3b0fa2a8f 100644
--- a/include/linux/icmpv6.h
+++ b/include/linux/icmpv6.h
@@ -1,7 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_ICMPV6_H
#define _LINUX_ICMPV6_H
#include <linux/skbuff.h>
+#include <linux/ipv6.h>
#include <uapi/linux/icmpv6.h>
static inline struct icmp6hdr *icmp6_hdr(const struct sk_buff *skb)
@@ -12,21 +14,64 @@ static inline struct icmp6hdr *icmp6_hdr(const struct sk_buff *skb)
#include <linux/netdevice.h>
#if IS_ENABLED(CONFIG_IPV6)
-extern void icmpv6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info);
typedef void ip6_icmp_send_t(struct sk_buff *skb, u8 type, u8 code, __u32 info,
- const struct in6_addr *force_saddr);
+ const struct in6_addr *force_saddr,
+ const struct inet6_skb_parm *parm);
+void icmp6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info,
+ const struct in6_addr *force_saddr,
+ const struct inet6_skb_parm *parm);
+#if IS_BUILTIN(CONFIG_IPV6)
+static inline void __icmpv6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info,
+ const struct inet6_skb_parm *parm)
+{
+ icmp6_send(skb, type, code, info, NULL, parm);
+}
+static inline int inet6_register_icmp_sender(ip6_icmp_send_t *fn)
+{
+ BUILD_BUG_ON(fn != icmp6_send);
+ return 0;
+}
+static inline int inet6_unregister_icmp_sender(ip6_icmp_send_t *fn)
+{
+ BUILD_BUG_ON(fn != icmp6_send);
+ return 0;
+}
+#else
+extern void __icmpv6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info,
+ const struct inet6_skb_parm *parm);
extern int inet6_register_icmp_sender(ip6_icmp_send_t *fn);
extern int inet6_unregister_icmp_sender(ip6_icmp_send_t *fn);
+#endif
+
+static inline void icmpv6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info)
+{
+ __icmpv6_send(skb, type, code, info, IP6CB(skb));
+}
+
int ip6_err_gen_icmpv6_unreach(struct sk_buff *skb, int nhs, int type,
unsigned int data_len);
+#if IS_ENABLED(CONFIG_NF_NAT)
+void icmpv6_ndo_send(struct sk_buff *skb_in, u8 type, u8 code, __u32 info);
+#else
+static inline void icmpv6_ndo_send(struct sk_buff *skb_in, u8 type, u8 code, __u32 info)
+{
+ struct inet6_skb_parm parm = { 0 };
+ __icmpv6_send(skb_in, type, code, info, &parm);
+}
+#endif
+
#else
static inline void icmpv6_send(struct sk_buff *skb,
u8 type, u8 code, __u32 info)
{
+}
+static inline void icmpv6_ndo_send(struct sk_buff *skb,
+ u8 type, u8 code, __u32 info)
+{
}
#endif
@@ -34,15 +79,34 @@ extern int icmpv6_init(void);
extern int icmpv6_err_convert(u8 type, u8 code,
int *err);
extern void icmpv6_cleanup(void);
-extern void icmpv6_param_prob(struct sk_buff *skb,
- u8 code, int pos);
+extern void icmpv6_param_prob_reason(struct sk_buff *skb,
+ u8 code, int pos,
+ enum skb_drop_reason reason);
struct flowi6;
struct in6_addr;
-extern void icmpv6_flow_init(struct sock *sk,
- struct flowi6 *fl6,
- u8 type,
- const struct in6_addr *saddr,
- const struct in6_addr *daddr,
- int oif);
+
+void icmpv6_flow_init(const struct sock *sk, struct flowi6 *fl6, u8 type,
+ const struct in6_addr *saddr,
+ const struct in6_addr *daddr, int oif);
+
+static inline void icmpv6_param_prob(struct sk_buff *skb, u8 code, int pos)
+{
+ icmpv6_param_prob_reason(skb, code, pos,
+ SKB_DROP_REASON_NOT_SPECIFIED);
+}
+
+static inline bool icmpv6_is_err(int type)
+{
+ switch (type) {
+ case ICMPV6_DEST_UNREACH:
+ case ICMPV6_PKT_TOOBIG:
+ case ICMPV6_TIME_EXCEED:
+ case ICMPV6_PARAMPROB:
+ return true;
+ }
+
+ return false;
+}
+
#endif
diff --git a/include/linux/ide.h b/include/linux/ide.h
deleted file mode 100644
index dc152e4b7f73..000000000000
--- a/include/linux/ide.h
+++ /dev/null
@@ -1,1617 +0,0 @@
-#ifndef _IDE_H
-#define _IDE_H
-/*
- * linux/include/linux/ide.h
- *
- * Copyright (C) 1994-2002 Linus Torvalds & authors
- */
-
-#include <linux/init.h>
-#include <linux/ioport.h>
-#include <linux/ata.h>
-#include <linux/blkdev.h>
-#include <linux/proc_fs.h>
-#include <linux/interrupt.h>
-#include <linux/bitops.h>
-#include <linux/bio.h>
-#include <linux/pci.h>
-#include <linux/completion.h>
-#include <linux/pm.h>
-#include <linux/mutex.h>
-/* for request_sense */
-#include <linux/cdrom.h>
-#include <scsi/scsi_cmnd.h>
-#include <asm/byteorder.h>
-#include <asm/io.h>
-
-#if defined(CONFIG_CRIS) || defined(CONFIG_FRV) || defined(CONFIG_MN10300)
-# define SUPPORT_VLB_SYNC 0
-#else
-# define SUPPORT_VLB_SYNC 1
-#endif
-
-/*
- * Probably not wise to fiddle with these
- */
-#define IDE_DEFAULT_MAX_FAILURES 1
-#define ERROR_MAX 8 /* Max read/write errors per sector */
-#define ERROR_RESET 3 /* Reset controller every 4th retry */
-#define ERROR_RECAL 1 /* Recalibrate every 2nd retry */
-
-struct device;
-
-/* values for ide_request.type */
-enum ata_priv_type {
- ATA_PRIV_MISC,
- ATA_PRIV_TASKFILE,
- ATA_PRIV_PC,
- ATA_PRIV_SENSE, /* sense request */
- ATA_PRIV_PM_SUSPEND, /* suspend request */
- ATA_PRIV_PM_RESUME, /* resume request */
-};
-
-struct ide_request {
- struct scsi_request sreq;
- u8 sense[SCSI_SENSE_BUFFERSIZE];
- u8 type;
-};
-
-static inline struct ide_request *ide_req(struct request *rq)
-{
- return blk_mq_rq_to_pdu(rq);
-}
-
-static inline bool ata_misc_request(struct request *rq)
-{
- return blk_rq_is_private(rq) && ide_req(rq)->type == ATA_PRIV_MISC;
-}
-
-static inline bool ata_taskfile_request(struct request *rq)
-{
- return blk_rq_is_private(rq) && ide_req(rq)->type == ATA_PRIV_TASKFILE;
-}
-
-static inline bool ata_pc_request(struct request *rq)
-{
- return blk_rq_is_private(rq) && ide_req(rq)->type == ATA_PRIV_PC;
-}
-
-static inline bool ata_sense_request(struct request *rq)
-{
- return blk_rq_is_private(rq) && ide_req(rq)->type == ATA_PRIV_SENSE;
-}
-
-static inline bool ata_pm_request(struct request *rq)
-{
- return blk_rq_is_private(rq) &&
- (ide_req(rq)->type == ATA_PRIV_PM_SUSPEND ||
- ide_req(rq)->type == ATA_PRIV_PM_RESUME);
-}
-
-/* Error codes returned in result to the higher part of the driver. */
-enum {
- IDE_DRV_ERROR_GENERAL = 101,
- IDE_DRV_ERROR_FILEMARK = 102,
- IDE_DRV_ERROR_EOD = 103,
-};
-
-/*
- * Definitions for accessing IDE controller registers
- */
-#define IDE_NR_PORTS (10)
-
-struct ide_io_ports {
- unsigned long data_addr;
-
- union {
- unsigned long error_addr; /* read: error */
- unsigned long feature_addr; /* write: feature */
- };
-
- unsigned long nsect_addr;
- unsigned long lbal_addr;
- unsigned long lbam_addr;
- unsigned long lbah_addr;
-
- unsigned long device_addr;
-
- union {
- unsigned long status_addr; /*  read: status  */
- unsigned long command_addr; /* write: command */
- };
-
- unsigned long ctl_addr;
-
- unsigned long irq_addr;
-};
-
-#define OK_STAT(stat,good,bad) (((stat)&((good)|(bad)))==(good))
-
-#define BAD_R_STAT (ATA_BUSY | ATA_ERR)
-#define BAD_W_STAT (BAD_R_STAT | ATA_DF)
-#define BAD_STAT (BAD_R_STAT | ATA_DRQ)
-#define DRIVE_READY (ATA_DRDY | ATA_DSC)
-
-#define BAD_CRC (ATA_ABORTED | ATA_ICRC)
-
-#define SATA_NR_PORTS (3) /* 16 possible ?? */
-
-#define SATA_STATUS_OFFSET (0)
-#define SATA_ERROR_OFFSET (1)
-#define SATA_CONTROL_OFFSET (2)
-
-/*
- * Our Physical Region Descriptor (PRD) table should be large enough
- * to handle the biggest I/O request we are likely to see. Since requests
- * can have no more than 256 sectors, and since the typical blocksize is
- * two or more sectors, we could get by with a limit of 128 entries here for
- * the usual worst case. Most requests seem to include some contiguous blocks,
- * further reducing the number of table entries required.
- *
- * The driver reverts to PIO mode for individual requests that exceed
- * this limit (possible with 512 byte blocksizes, eg. MSDOS f/s), so handling
- * 100% of all crazy scenarios here is not necessary.
- *
- * As it turns out though, we must allocate a full 4KB page for this,
- * so the two PRD tables (ide0 & ide1) will each get half of that,
- * allowing each to have about 256 entries (8 bytes each) from this.
- */
-#define PRD_BYTES 8
-#define PRD_ENTRIES 256
-
-/*
- * Some more useful definitions
- */
-#define PARTN_BITS 6 /* number of minor dev bits for partitions */
-#define MAX_DRIVES 2 /* per interface; 2 assumed by lots of code */
-#define SECTOR_SIZE 512
-
-/*
- * Timeouts for various operations:
- */
-enum {
- /* spec allows up to 20ms, but CF cards and SSD drives need more */
- WAIT_DRQ = 1 * HZ, /* 1s */
- /* some laptops are very slow */
- WAIT_READY = 5 * HZ, /* 5s */
- /* should be less than 3ms (?), if all ATAPI CD is closed at boot */
- WAIT_PIDENTIFY = 10 * HZ, /* 10s */
- /* worst case when spinning up */
- WAIT_WORSTCASE = 30 * HZ, /* 30s */
- /* maximum wait for an IRQ to happen */
- WAIT_CMD = 10 * HZ, /* 10s */
- /* Some drives require a longer IRQ timeout. */
- WAIT_FLOPPY_CMD = 50 * HZ, /* 50s */
- /*
- * Some drives (for example, Seagate STT3401A Travan) require a very
- * long timeout, because they don't return an interrupt or clear their
- * BSY bit until after the command completes (even retension commands).
- */
- WAIT_TAPE_CMD = 900 * HZ, /* 900s */
- /* minimum sleep time */
- WAIT_MIN_SLEEP = HZ / 50, /* 20ms */
-};
-
-/*
- * Op codes for special requests to be handled by ide_special_rq().
- * Values should be in the range of 0x20 to 0x3f.
- */
-#define REQ_DRIVE_RESET 0x20
-#define REQ_DEVSET_EXEC 0x21
-#define REQ_PARK_HEADS 0x22
-#define REQ_UNPARK_HEADS 0x23
-
-/*
- * hwif_chipset_t is used to keep track of the specific hardware
- * chipset used by each IDE interface, if known.
- */
-enum { ide_unknown, ide_generic, ide_pci,
- ide_cmd640, ide_dtc2278, ide_ali14xx,
- ide_qd65xx, ide_umc8672, ide_ht6560b,
- ide_4drives, ide_pmac, ide_acorn,
- ide_au1xxx, ide_palm3710
-};
-
-typedef u8 hwif_chipset_t;
-
-/*
- * Structure to hold all information about the location of this port
- */
-struct ide_hw {
- union {
- struct ide_io_ports io_ports;
- unsigned long io_ports_array[IDE_NR_PORTS];
- };
-
- int irq; /* our irq number */
- struct device *dev, *parent;
- unsigned long config;
-};
-
-static inline void ide_std_init_ports(struct ide_hw *hw,
- unsigned long io_addr,
- unsigned long ctl_addr)
-{
- unsigned int i;
-
- for (i = 0; i <= 7; i++)
- hw->io_ports_array[i] = io_addr++;
-
- hw->io_ports.ctl_addr = ctl_addr;
-}
-
-#define MAX_HWIFS 10
-
-/*
- * Now for the data we need to maintain per-drive: ide_drive_t
- */
-
-#define ide_scsi 0x21
-#define ide_disk 0x20
-#define ide_optical 0x7
-#define ide_cdrom 0x5
-#define ide_tape 0x1
-#define ide_floppy 0x0
-
-/*
- * Special Driver Flags
- */
-enum {
- IDE_SFLAG_SET_GEOMETRY = (1 << 0),
- IDE_SFLAG_RECALIBRATE = (1 << 1),
- IDE_SFLAG_SET_MULTMODE = (1 << 2),
-};
-
-/*
- * Status returned from various ide_ functions
- */
-typedef enum {
- ide_stopped, /* no drive operation was started */
- ide_started, /* a drive operation was started, handler was set */
-} ide_startstop_t;
-
-enum {
- IDE_VALID_ERROR = (1 << 1),
- IDE_VALID_FEATURE = IDE_VALID_ERROR,
- IDE_VALID_NSECT = (1 << 2),
- IDE_VALID_LBAL = (1 << 3),
- IDE_VALID_LBAM = (1 << 4),
- IDE_VALID_LBAH = (1 << 5),
- IDE_VALID_DEVICE = (1 << 6),
- IDE_VALID_LBA = IDE_VALID_LBAL |
- IDE_VALID_LBAM |
- IDE_VALID_LBAH,
- IDE_VALID_OUT_TF = IDE_VALID_FEATURE |
- IDE_VALID_NSECT |
- IDE_VALID_LBA,
- IDE_VALID_IN_TF = IDE_VALID_NSECT |
- IDE_VALID_LBA,
- IDE_VALID_OUT_HOB = IDE_VALID_OUT_TF,
- IDE_VALID_IN_HOB = IDE_VALID_ERROR |
- IDE_VALID_NSECT |
- IDE_VALID_LBA,
-};
-
-enum {
- IDE_TFLAG_LBA48 = (1 << 0),
- IDE_TFLAG_WRITE = (1 << 1),
- IDE_TFLAG_CUSTOM_HANDLER = (1 << 2),
- IDE_TFLAG_DMA_PIO_FALLBACK = (1 << 3),
- /* force 16-bit I/O operations */
- IDE_TFLAG_IO_16BIT = (1 << 4),
- /* struct ide_cmd was allocated using kmalloc() */
- IDE_TFLAG_DYN = (1 << 5),
- IDE_TFLAG_FS = (1 << 6),
- IDE_TFLAG_MULTI_PIO = (1 << 7),
- IDE_TFLAG_SET_XFER = (1 << 8),
-};
-
-enum {
- IDE_FTFLAG_FLAGGED = (1 << 0),
- IDE_FTFLAG_SET_IN_FLAGS = (1 << 1),
- IDE_FTFLAG_OUT_DATA = (1 << 2),
- IDE_FTFLAG_IN_DATA = (1 << 3),
-};
-
-struct ide_taskfile {
- u8 data; /* 0: data byte (for TASKFILE ioctl) */
- union { /* 1: */
- u8 error; /* read: error */
- u8 feature; /* write: feature */
- };
- u8 nsect; /* 2: number of sectors */
- u8 lbal; /* 3: LBA low */
- u8 lbam; /* 4: LBA mid */
- u8 lbah; /* 5: LBA high */
- u8 device; /* 6: device select */
- union { /* 7: */
- u8 status; /* read: status */
- u8 command; /* write: command */
- };
-};
-
-struct ide_cmd {
- struct ide_taskfile tf;
- struct ide_taskfile hob;
- struct {
- struct {
- u8 tf;
- u8 hob;
- } out, in;
- } valid;
-
- u16 tf_flags;
- u8 ftf_flags; /* for TASKFILE ioctl */
- int protocol;
-
- int sg_nents; /* number of sg entries */
- int orig_sg_nents;
- int sg_dma_direction; /* DMA transfer direction */
-
- unsigned int nbytes;
- unsigned int nleft;
- unsigned int last_xfer_len;
-
- struct scatterlist *cursg;
- unsigned int cursg_ofs;
-
- struct request *rq; /* copy of request */
-};
-
-/* ATAPI packet command flags */
-enum {
- /* set when an error is considered normal - no retry (ide-tape) */
- PC_FLAG_ABORT = (1 << 0),
- PC_FLAG_SUPPRESS_ERROR = (1 << 1),
- PC_FLAG_WAIT_FOR_DSC = (1 << 2),
- PC_FLAG_DMA_OK = (1 << 3),
- PC_FLAG_DMA_IN_PROGRESS = (1 << 4),
- PC_FLAG_DMA_ERROR = (1 << 5),
- PC_FLAG_WRITING = (1 << 6),
-};
-
-#define ATAPI_WAIT_PC (60 * HZ)
-
-struct ide_atapi_pc {
- /* actual packet bytes */
- u8 c[12];
- /* incremented on each retry */
- int retries;
- int error;
-
- /* bytes to transfer */
- int req_xfer;
-
- /* the corresponding request */
- struct request *rq;
-
- unsigned long flags;
-
- /*
- * those are more or less driver-specific and some of them are subject
- * to change/removal later.
- */
- unsigned long timeout;
-};
-
-struct ide_devset;
-struct ide_driver;
-
-#ifdef CONFIG_BLK_DEV_IDEACPI
-struct ide_acpi_drive_link;
-struct ide_acpi_hwif_link;
-#endif
-
-struct ide_drive_s;
-
-struct ide_disk_ops {
- int (*check)(struct ide_drive_s *, const char *);
- int (*get_capacity)(struct ide_drive_s *);
- void (*unlock_native_capacity)(struct ide_drive_s *);
- void (*setup)(struct ide_drive_s *);
- void (*flush)(struct ide_drive_s *);
- int (*init_media)(struct ide_drive_s *, struct gendisk *);
- int (*set_doorlock)(struct ide_drive_s *, struct gendisk *,
- int);
- ide_startstop_t (*do_request)(struct ide_drive_s *, struct request *,
- sector_t);
- int (*ioctl)(struct ide_drive_s *, struct block_device *,
- fmode_t, unsigned int, unsigned long);
-};
-
-/* ATAPI device flags */
-enum {
- IDE_AFLAG_DRQ_INTERRUPT = (1 << 0),
-
- /* ide-cd */
- /* Drive cannot eject the disc. */
- IDE_AFLAG_NO_EJECT = (1 << 1),
- /* Drive is a pre ATAPI 1.2 drive. */
- IDE_AFLAG_PRE_ATAPI12 = (1 << 2),
- /* TOC addresses are in BCD. */
- IDE_AFLAG_TOCADDR_AS_BCD = (1 << 3),
- /* TOC track numbers are in BCD. */
- IDE_AFLAG_TOCTRACKS_AS_BCD = (1 << 4),
- /* Saved TOC information is current. */
- IDE_AFLAG_TOC_VALID = (1 << 6),
- /* We think that the drive door is locked. */
- IDE_AFLAG_DOOR_LOCKED = (1 << 7),
- /* SET_CD_SPEED command is unsupported. */
- IDE_AFLAG_NO_SPEED_SELECT = (1 << 8),
- IDE_AFLAG_VERTOS_300_SSD = (1 << 9),
- IDE_AFLAG_VERTOS_600_ESD = (1 << 10),
- IDE_AFLAG_SANYO_3CD = (1 << 11),
- IDE_AFLAG_FULL_CAPS_PAGE = (1 << 12),
- IDE_AFLAG_PLAY_AUDIO_OK = (1 << 13),
- IDE_AFLAG_LE_SPEED_FIELDS = (1 << 14),
-
- /* ide-floppy */
- /* Avoid commands not supported in Clik drive */
- IDE_AFLAG_CLIK_DRIVE = (1 << 15),
- /* Requires BH algorithm for packets */
- IDE_AFLAG_ZIP_DRIVE = (1 << 16),
- /* Supports format progress report */
- IDE_AFLAG_SRFP = (1 << 17),
-
- /* ide-tape */
- IDE_AFLAG_IGNORE_DSC = (1 << 18),
- /* 0 When the tape position is unknown */
- IDE_AFLAG_ADDRESS_VALID = (1 << 19),
- /* Device already opened */
- IDE_AFLAG_BUSY = (1 << 20),
- /* Attempt to auto-detect the current user block size */
- IDE_AFLAG_DETECT_BS = (1 << 21),
- /* Currently on a filemark */
- IDE_AFLAG_FILEMARK = (1 << 22),
- /* 0 = no tape is loaded, so we don't rewind after ejecting */
- IDE_AFLAG_MEDIUM_PRESENT = (1 << 23),
-
- IDE_AFLAG_NO_AUTOCLOSE = (1 << 24),
-};
-
-/* device flags */
-enum {
- /* restore settings after device reset */
- IDE_DFLAG_KEEP_SETTINGS = (1 << 0),
- /* device is using DMA for read/write */
- IDE_DFLAG_USING_DMA = (1 << 1),
- /* okay to unmask other IRQs */
- IDE_DFLAG_UNMASK = (1 << 2),
- /* don't attempt flushes */
- IDE_DFLAG_NOFLUSH = (1 << 3),
- /* DSC overlap */
- IDE_DFLAG_DSC_OVERLAP = (1 << 4),
- /* give potential excess bandwidth */
- IDE_DFLAG_NICE1 = (1 << 5),
- /* device is physically present */
- IDE_DFLAG_PRESENT = (1 << 6),
- /* disable Host Protected Area */
- IDE_DFLAG_NOHPA = (1 << 7),
- /* id read from device (synthetic if not set) */
- IDE_DFLAG_ID_READ = (1 << 8),
- IDE_DFLAG_NOPROBE = (1 << 9),
- /* need to do check_media_change() */
- IDE_DFLAG_REMOVABLE = (1 << 10),
- /* needed for removable devices */
- IDE_DFLAG_ATTACH = (1 << 11),
- IDE_DFLAG_FORCED_GEOM = (1 << 12),
- /* disallow setting unmask bit */
- IDE_DFLAG_NO_UNMASK = (1 << 13),
- /* disallow enabling 32-bit I/O */
- IDE_DFLAG_NO_IO_32BIT = (1 << 14),
- /* for removable only: door lock/unlock works */
- IDE_DFLAG_DOORLOCKING = (1 << 15),
- /* disallow DMA */
- IDE_DFLAG_NODMA = (1 << 16),
- /* powermanagement told us not to do anything, so sleep nicely */
- IDE_DFLAG_BLOCKED = (1 << 17),
- /* sleeping & sleep field valid */
- IDE_DFLAG_SLEEPING = (1 << 18),
- IDE_DFLAG_POST_RESET = (1 << 19),
- IDE_DFLAG_UDMA33_WARNED = (1 << 20),
- IDE_DFLAG_LBA48 = (1 << 21),
- /* status of write cache */
- IDE_DFLAG_WCACHE = (1 << 22),
- /* used for ignoring ATA_DF */
- IDE_DFLAG_NOWERR = (1 << 23),
- /* retrying in PIO */
- IDE_DFLAG_DMA_PIO_RETRY = (1 << 24),
- IDE_DFLAG_LBA = (1 << 25),
- /* don't unload heads */
- IDE_DFLAG_NO_UNLOAD = (1 << 26),
- /* heads unloaded, please don't reset port */
- IDE_DFLAG_PARKED = (1 << 27),
- IDE_DFLAG_MEDIA_CHANGED = (1 << 28),
- /* write protect */
- IDE_DFLAG_WP = (1 << 29),
- IDE_DFLAG_FORMAT_IN_PROGRESS = (1 << 30),
- IDE_DFLAG_NIEN_QUIRK = (1 << 31),
-};
-
-struct ide_drive_s {
- char name[4]; /* drive name, such as "hda" */
- char driver_req[10]; /* requests specific driver */
-
- struct request_queue *queue; /* request queue */
-
- struct request *rq; /* current request */
- void *driver_data; /* extra driver data */
- u16 *id; /* identification info */
-#ifdef CONFIG_IDE_PROC_FS
- struct proc_dir_entry *proc; /* /proc/ide/ directory entry */
- const struct ide_proc_devset *settings; /* /proc/ide/ drive settings */
-#endif
- struct hwif_s *hwif; /* actually (ide_hwif_t *) */
-
- const struct ide_disk_ops *disk_ops;
-
- unsigned long dev_flags;
-
- unsigned long sleep; /* sleep until this time */
- unsigned long timeout; /* max time to wait for irq */
-
- u8 special_flags; /* special action flags */
-
- u8 select; /* basic drive/head select reg value */
- u8 retry_pio; /* retrying dma capable host in pio */
- u8 waiting_for_dma; /* dma currently in progress */
- u8 dma; /* atapi dma flag */
-
- u8 init_speed; /* transfer rate set at boot */
- u8 current_speed; /* current transfer rate set */
- u8 desired_speed; /* desired transfer rate set */
- u8 pio_mode; /* for ->set_pio_mode _only_ */
- u8 dma_mode; /* for ->set_dma_mode _only_ */
- u8 dn; /* now wide spread use */
- u8 acoustic; /* acoustic management */
- u8 media; /* disk, cdrom, tape, floppy, ... */
- u8 ready_stat; /* min status value for drive ready */
- u8 mult_count; /* current multiple sector setting */
- u8 mult_req; /* requested multiple sector setting */
- u8 io_32bit; /* 0=16-bit, 1=32-bit, 2/3=32bit+sync */
- u8 bad_wstat; /* used for ignoring ATA_DF */
- u8 head; /* "real" number of heads */
- u8 sect; /* "real" sectors per track */
- u8 bios_head; /* BIOS/fdisk/LILO number of heads */
- u8 bios_sect; /* BIOS/fdisk/LILO sectors per track */
-
- /* delay this long before sending packet command */
- u8 pc_delay;
-
- unsigned int bios_cyl; /* BIOS/fdisk/LILO number of cyls */
- unsigned int cyl; /* "real" number of cyls */
- void *drive_data; /* used by set_pio_mode/dev_select() */
- unsigned int failures; /* current failure count */
- unsigned int max_failures; /* maximum allowed failure count */
- u64 probed_capacity;/* initial/native media capacity */
- u64 capacity64; /* total number of sectors */
-
- int lun; /* logical unit */
- int crc_count; /* crc counter to reduce drive speed */
-
- unsigned long debug_mask; /* debugging levels switch */
-
-#ifdef CONFIG_BLK_DEV_IDEACPI
- struct ide_acpi_drive_link *acpidata;
-#endif
- struct list_head list;
- struct device gendev;
- struct completion gendev_rel_comp; /* to deal with device release() */
-
- /* current packet command */
- struct ide_atapi_pc *pc;
-
- /* last failed packet command */
- struct ide_atapi_pc *failed_pc;
-
- /* callback for packet commands */
- int (*pc_callback)(struct ide_drive_s *, int);
-
- ide_startstop_t (*irq_handler)(struct ide_drive_s *);
-
- unsigned long atapi_flags;
-
- struct ide_atapi_pc request_sense_pc;
-
- /* current sense rq and buffer */
- bool sense_rq_armed;
- struct request *sense_rq;
- struct request_sense sense_data;
-};
-
-typedef struct ide_drive_s ide_drive_t;
-
-#define to_ide_device(dev) container_of(dev, ide_drive_t, gendev)
-
-#define to_ide_drv(obj, cont_type) \
- container_of(obj, struct cont_type, dev)
-
-#define ide_drv_g(disk, cont_type) \
- container_of((disk)->private_data, struct cont_type, driver)
-
-struct ide_port_info;
-
-struct ide_tp_ops {
- void (*exec_command)(struct hwif_s *, u8);
- u8 (*read_status)(struct hwif_s *);
- u8 (*read_altstatus)(struct hwif_s *);
- void (*write_devctl)(struct hwif_s *, u8);
-
- void (*dev_select)(ide_drive_t *);
- void (*tf_load)(ide_drive_t *, struct ide_taskfile *, u8);
- void (*tf_read)(ide_drive_t *, struct ide_taskfile *, u8);
-
- void (*input_data)(ide_drive_t *, struct ide_cmd *,
- void *, unsigned int);
- void (*output_data)(ide_drive_t *, struct ide_cmd *,
- void *, unsigned int);
-};
-
-extern const struct ide_tp_ops default_tp_ops;
-
-/**
- * struct ide_port_ops - IDE port operations
- *
- * @init_dev: host specific initialization of a device
- * @set_pio_mode: routine to program host for PIO mode
- * @set_dma_mode: routine to program host for DMA mode
- * @reset_poll: chipset polling based on hba specifics
- * @pre_reset: chipset specific changes to default for device-hba resets
- * @resetproc: routine to reset controller after a disk reset
- * @maskproc: special host masking for drive selection
- * @quirkproc: check host's drive quirk list
- * @clear_irq: clear IRQ
- *
- * @mdma_filter: filter MDMA modes
- * @udma_filter: filter UDMA modes
- *
- * @cable_detect: detect cable type
- */
-struct ide_port_ops {
- void (*init_dev)(ide_drive_t *);
- void (*set_pio_mode)(struct hwif_s *, ide_drive_t *);
- void (*set_dma_mode)(struct hwif_s *, ide_drive_t *);
- blk_status_t (*reset_poll)(ide_drive_t *);
- void (*pre_reset)(ide_drive_t *);
- void (*resetproc)(ide_drive_t *);
- void (*maskproc)(ide_drive_t *, int);
- void (*quirkproc)(ide_drive_t *);
- void (*clear_irq)(ide_drive_t *);
- int (*test_irq)(struct hwif_s *);
-
- u8 (*mdma_filter)(ide_drive_t *);
- u8 (*udma_filter)(ide_drive_t *);
-
- u8 (*cable_detect)(struct hwif_s *);
-};
-
-struct ide_dma_ops {
- void (*dma_host_set)(struct ide_drive_s *, int);
- int (*dma_setup)(struct ide_drive_s *, struct ide_cmd *);
- void (*dma_start)(struct ide_drive_s *);
- int (*dma_end)(struct ide_drive_s *);
- int (*dma_test_irq)(struct ide_drive_s *);
- void (*dma_lost_irq)(struct ide_drive_s *);
- /* below ones are optional */
- int (*dma_check)(struct ide_drive_s *, struct ide_cmd *);
- int (*dma_timer_expiry)(struct ide_drive_s *);
- void (*dma_clear)(struct ide_drive_s *);
- /*
- * The following method is optional and only required to be
- * implemented for the SFF-8038i compatible controllers.
- */
- u8 (*dma_sff_read_status)(struct hwif_s *);
-};
-
-enum {
- IDE_PFLAG_PROBING = (1 << 0),
-};
-
-struct ide_host;
-
-typedef struct hwif_s {
- struct hwif_s *mate; /* other hwif from same PCI chip */
- struct proc_dir_entry *proc; /* /proc/ide/ directory entry */
-
- struct ide_host *host;
-
- char name[6]; /* name of interface, eg. "ide0" */
-
- struct ide_io_ports io_ports;
-
- unsigned long sata_scr[SATA_NR_PORTS];
-
- ide_drive_t *devices[MAX_DRIVES + 1];
-
- unsigned long port_flags;
-
- u8 major; /* our major number */
- u8 index; /* 0 for ide0; 1 for ide1; ... */
- u8 channel; /* for dual-port chips: 0=primary, 1=secondary */
-
- u32 host_flags;
-
- u8 pio_mask;
-
- u8 ultra_mask;
- u8 mwdma_mask;
- u8 swdma_mask;
-
- u8 cbl; /* cable type */
-
- hwif_chipset_t chipset; /* sub-module for tuning.. */
-
- struct device *dev;
-
- void (*rw_disk)(ide_drive_t *, struct request *);
-
- const struct ide_tp_ops *tp_ops;
- const struct ide_port_ops *port_ops;
- const struct ide_dma_ops *dma_ops;
-
- /* dma physical region descriptor table (cpu view) */
- unsigned int *dmatable_cpu;
- /* dma physical region descriptor table (dma view) */
- dma_addr_t dmatable_dma;
-
- /* maximum number of PRD table entries */
- int prd_max_nents;
- /* PRD entry size in bytes */
- int prd_ent_size;
-
- /* Scatter-gather list used to build the above */
- struct scatterlist *sg_table;
- int sg_max_nents; /* Maximum number of entries in it */
-
- struct ide_cmd cmd; /* current command */
-
- int rqsize; /* max sectors per request */
- int irq; /* our irq number */
-
- unsigned long dma_base; /* base addr for dma ports */
-
- unsigned long config_data; /* for use by chipset-specific code */
- unsigned long select_data; /* for use by chipset-specific code */
-
- unsigned long extra_base; /* extra addr for dma ports */
- unsigned extra_ports; /* number of extra dma ports */
-
- unsigned present : 1; /* this interface exists */
- unsigned busy : 1; /* serializes devices on a port */
-
- struct device gendev;
- struct device *portdev;
-
- struct completion gendev_rel_comp; /* To deal with device release() */
-
- void *hwif_data; /* extra hwif data */
-
-#ifdef CONFIG_BLK_DEV_IDEACPI
- struct ide_acpi_hwif_link *acpidata;
-#endif
-
- /* IRQ handler, if active */
- ide_startstop_t (*handler)(ide_drive_t *);
-
- /* BOOL: polling active & poll_timeout field valid */
- unsigned int polling : 1;
-
- /* current drive */
- ide_drive_t *cur_dev;
-
- /* current request */
- struct request *rq;
-
- /* failsafe timer */
- struct timer_list timer;
- /* timeout value during long polls */
- unsigned long poll_timeout;
- /* queried upon timeouts */
- int (*expiry)(ide_drive_t *);
-
- int req_gen;
- int req_gen_timer;
-
- spinlock_t lock;
-} ____cacheline_internodealigned_in_smp ide_hwif_t;
-
-#define MAX_HOST_PORTS 4
-
-struct ide_host {
- ide_hwif_t *ports[MAX_HOST_PORTS + 1];
- unsigned int n_ports;
- struct device *dev[2];
-
- int (*init_chipset)(struct pci_dev *);
-
- void (*get_lock)(irq_handler_t, void *);
- void (*release_lock)(void);
-
- irq_handler_t irq_handler;
-
- unsigned long host_flags;
-
- int irq_flags;
-
- void *host_priv;
- ide_hwif_t *cur_port; /* for hosts requiring serialization */
-
- /* used for hosts requiring serialization */
- volatile unsigned long host_busy;
-};
-
-#define IDE_HOST_BUSY 0
-
-/*
- * internal ide interrupt handler type
- */
-typedef ide_startstop_t (ide_handler_t)(ide_drive_t *);
-typedef int (ide_expiry_t)(ide_drive_t *);
-
-/* used by ide-cd, ide-floppy, etc. */
-typedef void (xfer_func_t)(ide_drive_t *, struct ide_cmd *, void *, unsigned);
-
-extern struct mutex ide_setting_mtx;
-
-/*
- * configurable drive settings
- */
-
-#define DS_SYNC (1 << 0)
-
-struct ide_devset {
- int (*get)(ide_drive_t *);
- int (*set)(ide_drive_t *, int);
- unsigned int flags;
-};
-
-#define __DEVSET(_flags, _get, _set) { \
- .flags = _flags, \
- .get = _get, \
- .set = _set, \
-}
-
-#define ide_devset_get(name, field) \
-static int get_##name(ide_drive_t *drive) \
-{ \
- return drive->field; \
-}
-
-#define ide_devset_set(name, field) \
-static int set_##name(ide_drive_t *drive, int arg) \
-{ \
- drive->field = arg; \
- return 0; \
-}
-
-#define ide_devset_get_flag(name, flag) \
-static int get_##name(ide_drive_t *drive) \
-{ \
- return !!(drive->dev_flags & flag); \
-}
-
-#define ide_devset_set_flag(name, flag) \
-static int set_##name(ide_drive_t *drive, int arg) \
-{ \
- if (arg) \
- drive->dev_flags |= flag; \
- else \
- drive->dev_flags &= ~flag; \
- return 0; \
-}
-
-#define __IDE_DEVSET(_name, _flags, _get, _set) \
-const struct ide_devset ide_devset_##_name = \
- __DEVSET(_flags, _get, _set)
-
-#define IDE_DEVSET(_name, _flags, _get, _set) \
-static __IDE_DEVSET(_name, _flags, _get, _set)
-
-#define ide_devset_rw(_name, _func) \
-IDE_DEVSET(_name, 0, get_##_func, set_##_func)
-
-#define ide_devset_w(_name, _func) \
-IDE_DEVSET(_name, 0, NULL, set_##_func)
-
-#define ide_ext_devset_rw(_name, _func) \
-__IDE_DEVSET(_name, 0, get_##_func, set_##_func)
-
-#define ide_ext_devset_rw_sync(_name, _func) \
-__IDE_DEVSET(_name, DS_SYNC, get_##_func, set_##_func)
-
-#define ide_decl_devset(_name) \
-extern const struct ide_devset ide_devset_##_name
-
-ide_decl_devset(io_32bit);
-ide_decl_devset(keepsettings);
-ide_decl_devset(pio_mode);
-ide_decl_devset(unmaskirq);
-ide_decl_devset(using_dma);
-
-#ifdef CONFIG_IDE_PROC_FS
-/*
- * /proc/ide interface
- */
-
-#define ide_devset_rw_field(_name, _field) \
-ide_devset_get(_name, _field); \
-ide_devset_set(_name, _field); \
-IDE_DEVSET(_name, DS_SYNC, get_##_name, set_##_name)
-
-#define ide_devset_rw_flag(_name, _field) \
-ide_devset_get_flag(_name, _field); \
-ide_devset_set_flag(_name, _field); \
-IDE_DEVSET(_name, DS_SYNC, get_##_name, set_##_name)
-
-struct ide_proc_devset {
- const char *name;
- const struct ide_devset *setting;
- int min, max;
- int (*mulf)(ide_drive_t *);
- int (*divf)(ide_drive_t *);
-};
-
-#define __IDE_PROC_DEVSET(_name, _min, _max, _mulf, _divf) { \
- .name = __stringify(_name), \
- .setting = &ide_devset_##_name, \
- .min = _min, \
- .max = _max, \
- .mulf = _mulf, \
- .divf = _divf, \
-}
-
-#define IDE_PROC_DEVSET(_name, _min, _max) \
-__IDE_PROC_DEVSET(_name, _min, _max, NULL, NULL)
-
-typedef struct {
- const char *name;
- umode_t mode;
- const struct file_operations *proc_fops;
-} ide_proc_entry_t;
-
-void proc_ide_create(void);
-void proc_ide_destroy(void);
-void ide_proc_register_port(ide_hwif_t *);
-void ide_proc_port_register_devices(ide_hwif_t *);
-void ide_proc_unregister_device(ide_drive_t *);
-void ide_proc_unregister_port(ide_hwif_t *);
-void ide_proc_register_driver(ide_drive_t *, struct ide_driver *);
-void ide_proc_unregister_driver(ide_drive_t *, struct ide_driver *);
-
-extern const struct file_operations ide_capacity_proc_fops;
-extern const struct file_operations ide_geometry_proc_fops;
-#else
-static inline void proc_ide_create(void) { ; }
-static inline void proc_ide_destroy(void) { ; }
-static inline void ide_proc_register_port(ide_hwif_t *hwif) { ; }
-static inline void ide_proc_port_register_devices(ide_hwif_t *hwif) { ; }
-static inline void ide_proc_unregister_device(ide_drive_t *drive) { ; }
-static inline void ide_proc_unregister_port(ide_hwif_t *hwif) { ; }
-static inline void ide_proc_register_driver(ide_drive_t *drive,
- struct ide_driver *driver) { ; }
-static inline void ide_proc_unregister_driver(ide_drive_t *drive,
- struct ide_driver *driver) { ; }
-#endif
-
-enum {
- /* enter/exit functions */
- IDE_DBG_FUNC = (1 << 0),
- /* sense key/asc handling */
- IDE_DBG_SENSE = (1 << 1),
- /* packet commands handling */
- IDE_DBG_PC = (1 << 2),
- /* request handling */
- IDE_DBG_RQ = (1 << 3),
- /* driver probing/setup */
- IDE_DBG_PROBE = (1 << 4),
-};
-
-/* DRV_NAME has to be defined in the driver before using the macro below */
-#define __ide_debug_log(lvl, fmt, args...) \
-{ \
- if (unlikely(drive->debug_mask & lvl)) \
- printk(KERN_INFO DRV_NAME ": %s: " fmt "\n", \
- __func__, ## args); \
-}
-
-/*
- * Power Management state machine (rq->pm->pm_step).
- *
- * For each step, the core calls ide_start_power_step() first.
- * This can return:
- * - ide_stopped : In this case, the core calls us back again unless
- * step have been set to ide_power_state_completed.
- * - ide_started : In this case, the channel is left busy until an
- * async event (interrupt) occurs.
- * Typically, ide_start_power_step() will issue a taskfile request with
- * do_rw_taskfile().
- *
- * Upon reception of the interrupt, the core will call ide_complete_power_step()
- * with the error code if any. This routine should update the step value
- * and return. It should not start a new request. The core will call
- * ide_start_power_step() for the new step value, unless step have been
- * set to IDE_PM_COMPLETED.
- */
-enum {
- IDE_PM_START_SUSPEND,
- IDE_PM_FLUSH_CACHE = IDE_PM_START_SUSPEND,
- IDE_PM_STANDBY,
-
- IDE_PM_START_RESUME,
- IDE_PM_RESTORE_PIO = IDE_PM_START_RESUME,
- IDE_PM_IDLE,
- IDE_PM_RESTORE_DMA,
-
- IDE_PM_COMPLETED,
-};
-
-int generic_ide_suspend(struct device *, pm_message_t);
-int generic_ide_resume(struct device *);
-
-void ide_complete_power_step(ide_drive_t *, struct request *);
-ide_startstop_t ide_start_power_step(ide_drive_t *, struct request *);
-void ide_complete_pm_rq(ide_drive_t *, struct request *);
-void ide_check_pm_state(ide_drive_t *, struct request *);
-
-/*
- * Subdrivers support.
- *
- * The gendriver.owner field should be set to the module owner of this driver.
- * The gendriver.name field should be set to the name of this driver
- */
-struct ide_driver {
- const char *version;
- ide_startstop_t (*do_request)(ide_drive_t *, struct request *, sector_t);
- struct device_driver gen_driver;
- int (*probe)(ide_drive_t *);
- void (*remove)(ide_drive_t *);
- void (*resume)(ide_drive_t *);
- void (*shutdown)(ide_drive_t *);
-#ifdef CONFIG_IDE_PROC_FS
- ide_proc_entry_t * (*proc_entries)(ide_drive_t *);
- const struct ide_proc_devset * (*proc_devsets)(ide_drive_t *);
-#endif
-};
-
-#define to_ide_driver(drv) container_of(drv, struct ide_driver, gen_driver)
-
-int ide_device_get(ide_drive_t *);
-void ide_device_put(ide_drive_t *);
-
-struct ide_ioctl_devset {
- unsigned int get_ioctl;
- unsigned int set_ioctl;
- const struct ide_devset *setting;
-};
-
-int ide_setting_ioctl(ide_drive_t *, struct block_device *, unsigned int,
- unsigned long, const struct ide_ioctl_devset *);
-
-int generic_ide_ioctl(ide_drive_t *, struct block_device *, unsigned, unsigned long);
-
-extern int ide_vlb_clk;
-extern int ide_pci_clk;
-
-int ide_end_rq(ide_drive_t *, struct request *, blk_status_t, unsigned int);
-void ide_kill_rq(ide_drive_t *, struct request *);
-
-void __ide_set_handler(ide_drive_t *, ide_handler_t *, unsigned int);
-void ide_set_handler(ide_drive_t *, ide_handler_t *, unsigned int);
-
-void ide_execute_command(ide_drive_t *, struct ide_cmd *, ide_handler_t *,
- unsigned int);
-
-void ide_pad_transfer(ide_drive_t *, int, int);
-
-ide_startstop_t ide_error(ide_drive_t *, const char *, u8);
-
-void ide_fix_driveid(u16 *);
-
-extern void ide_fixstring(u8 *, const int, const int);
-
-int ide_busy_sleep(ide_drive_t *, unsigned long, int);
-
-int __ide_wait_stat(ide_drive_t *, u8, u8, unsigned long, u8 *);
-int ide_wait_stat(ide_startstop_t *, ide_drive_t *, u8, u8, unsigned long);
-
-ide_startstop_t ide_do_park_unpark(ide_drive_t *, struct request *);
-ide_startstop_t ide_do_devset(ide_drive_t *, struct request *);
-
-extern ide_startstop_t ide_do_reset (ide_drive_t *);
-
-extern int ide_devset_execute(ide_drive_t *drive,
- const struct ide_devset *setting, int arg);
-
-void ide_complete_cmd(ide_drive_t *, struct ide_cmd *, u8, u8);
-int ide_complete_rq(ide_drive_t *, blk_status_t, unsigned int);
-
-void ide_tf_readback(ide_drive_t *drive, struct ide_cmd *cmd);
-void ide_tf_dump(const char *, struct ide_cmd *);
-
-void ide_exec_command(ide_hwif_t *, u8);
-u8 ide_read_status(ide_hwif_t *);
-u8 ide_read_altstatus(ide_hwif_t *);
-void ide_write_devctl(ide_hwif_t *, u8);
-
-void ide_dev_select(ide_drive_t *);
-void ide_tf_load(ide_drive_t *, struct ide_taskfile *, u8);
-void ide_tf_read(ide_drive_t *, struct ide_taskfile *, u8);
-
-void ide_input_data(ide_drive_t *, struct ide_cmd *, void *, unsigned int);
-void ide_output_data(ide_drive_t *, struct ide_cmd *, void *, unsigned int);
-
-void SELECT_MASK(ide_drive_t *, int);
-
-u8 ide_read_error(ide_drive_t *);
-void ide_read_bcount_and_ireason(ide_drive_t *, u16 *, u8 *);
-
-int ide_check_ireason(ide_drive_t *, struct request *, int, int, int);
-
-int ide_check_atapi_device(ide_drive_t *, const char *);
-
-void ide_init_pc(struct ide_atapi_pc *);
-
-/* Disk head parking */
-extern wait_queue_head_t ide_park_wq;
-ssize_t ide_park_show(struct device *dev, struct device_attribute *attr,
- char *buf);
-ssize_t ide_park_store(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t len);
-
-/*
- * Special requests for ide-tape block device strategy routine.
- *
- * In order to service a character device command, we add special requests to
- * the tail of our block device request queue and wait for their completion.
- */
-enum {
- REQ_IDETAPE_PC1 = (1 << 0), /* packet command (first stage) */
- REQ_IDETAPE_PC2 = (1 << 1), /* packet command (second stage) */
- REQ_IDETAPE_READ = (1 << 2),
- REQ_IDETAPE_WRITE = (1 << 3),
-};
-
-int ide_queue_pc_tail(ide_drive_t *, struct gendisk *, struct ide_atapi_pc *,
- void *, unsigned int);
-
-int ide_do_test_unit_ready(ide_drive_t *, struct gendisk *);
-int ide_do_start_stop(ide_drive_t *, struct gendisk *, int);
-int ide_set_media_lock(ide_drive_t *, struct gendisk *, int);
-void ide_create_request_sense_cmd(ide_drive_t *, struct ide_atapi_pc *);
-void ide_retry_pc(ide_drive_t *drive);
-
-void ide_prep_sense(ide_drive_t *drive, struct request *rq);
-int ide_queue_sense_rq(ide_drive_t *drive, void *special);
-
-int ide_cd_expiry(ide_drive_t *);
-
-int ide_cd_get_xferlen(struct request *);
-
-ide_startstop_t ide_issue_pc(ide_drive_t *, struct ide_cmd *);
-
-ide_startstop_t do_rw_taskfile(ide_drive_t *, struct ide_cmd *);
-
-void ide_pio_bytes(ide_drive_t *, struct ide_cmd *, unsigned int, unsigned int);
-
-void ide_finish_cmd(ide_drive_t *, struct ide_cmd *, u8);
-
-int ide_raw_taskfile(ide_drive_t *, struct ide_cmd *, u8 *, u16);
-int ide_no_data_taskfile(ide_drive_t *, struct ide_cmd *);
-
-int ide_taskfile_ioctl(ide_drive_t *, unsigned long);
-
-int ide_dev_read_id(ide_drive_t *, u8, u16 *, int);
-
-extern int ide_driveid_update(ide_drive_t *);
-extern int ide_config_drive_speed(ide_drive_t *, u8);
-extern u8 eighty_ninty_three (ide_drive_t *);
-extern int taskfile_lib_get_identify(ide_drive_t *drive, u8 *);
-
-extern int ide_wait_not_busy(ide_hwif_t *hwif, unsigned long timeout);
-
-extern void ide_stall_queue(ide_drive_t *drive, unsigned long timeout);
-
-extern void ide_timer_expiry(unsigned long);
-extern irqreturn_t ide_intr(int irq, void *dev_id);
-extern void do_ide_request(struct request_queue *);
-extern void ide_requeue_and_plug(ide_drive_t *drive, struct request *rq);
-
-void ide_init_disk(struct gendisk *, ide_drive_t *);
-
-#ifdef CONFIG_IDEPCI_PCIBUS_ORDER
-extern int __ide_pci_register_driver(struct pci_driver *driver, struct module *owner, const char *mod_name);
-#define ide_pci_register_driver(d) __ide_pci_register_driver(d, THIS_MODULE, KBUILD_MODNAME)
-#else
-#define ide_pci_register_driver(d) pci_register_driver(d)
-#endif
-
-static inline int ide_pci_is_in_compatibility_mode(struct pci_dev *dev)
-{
- if ((dev->class >> 8) == PCI_CLASS_STORAGE_IDE && (dev->class & 5) != 5)
- return 1;
- return 0;
-}
-
-void ide_pci_setup_ports(struct pci_dev *, const struct ide_port_info *,
- struct ide_hw *, struct ide_hw **);
-void ide_setup_pci_noise(struct pci_dev *, const struct ide_port_info *);
-
-#ifdef CONFIG_BLK_DEV_IDEDMA_PCI
-int ide_pci_set_master(struct pci_dev *, const char *);
-unsigned long ide_pci_dma_base(ide_hwif_t *, const struct ide_port_info *);
-int ide_pci_check_simplex(ide_hwif_t *, const struct ide_port_info *);
-int ide_hwif_setup_dma(ide_hwif_t *, const struct ide_port_info *);
-#else
-static inline int ide_hwif_setup_dma(ide_hwif_t *hwif,
- const struct ide_port_info *d)
-{
- return -EINVAL;
-}
-#endif
-
-struct ide_pci_enablebit {
- u8 reg; /* byte pci reg holding the enable-bit */
- u8 mask; /* mask to isolate the enable-bit */
- u8 val; /* value of masked reg when "enabled" */
-};
-
-enum {
- /* Uses ISA control ports not PCI ones. */
- IDE_HFLAG_ISA_PORTS = (1 << 0),
- /* single port device */
- IDE_HFLAG_SINGLE = (1 << 1),
- /* don't use legacy PIO blacklist */
- IDE_HFLAG_PIO_NO_BLACKLIST = (1 << 2),
- /* set for the second port of QD65xx */
- IDE_HFLAG_QD_2ND_PORT = (1 << 3),
- /* use PIO8/9 for prefetch off/on */
- IDE_HFLAG_ABUSE_PREFETCH = (1 << 4),
- /* use PIO6/7 for fast-devsel off/on */
- IDE_HFLAG_ABUSE_FAST_DEVSEL = (1 << 5),
- /* use 100-102 and 200-202 PIO values to set DMA modes */
- IDE_HFLAG_ABUSE_DMA_MODES = (1 << 6),
- /*
- * keep DMA setting when programming PIO mode, may be used only
- * for hosts which have separate PIO and DMA timings (ie. PMAC)
- */
- IDE_HFLAG_SET_PIO_MODE_KEEP_DMA = (1 << 7),
- /* program host for the transfer mode after programming device */
- IDE_HFLAG_POST_SET_MODE = (1 << 8),
- /* don't program host/device for the transfer mode ("smart" hosts) */
- IDE_HFLAG_NO_SET_MODE = (1 << 9),
- /* trust BIOS for programming chipset/device for DMA */
- IDE_HFLAG_TRUST_BIOS_FOR_DMA = (1 << 10),
- /* host is CS5510/CS5520 */
- IDE_HFLAG_CS5520 = (1 << 11),
- /* ATAPI DMA is unsupported */
- IDE_HFLAG_NO_ATAPI_DMA = (1 << 12),
- /* set if host is a "non-bootable" controller */
- IDE_HFLAG_NON_BOOTABLE = (1 << 13),
- /* host doesn't support DMA */
- IDE_HFLAG_NO_DMA = (1 << 14),
- /* check if host is PCI IDE device before allowing DMA */
- IDE_HFLAG_NO_AUTODMA = (1 << 15),
- /* host uses MMIO */
- IDE_HFLAG_MMIO = (1 << 16),
- /* no LBA48 */
- IDE_HFLAG_NO_LBA48 = (1 << 17),
- /* no LBA48 DMA */
- IDE_HFLAG_NO_LBA48_DMA = (1 << 18),
- /* data FIFO is cleared by an error */
- IDE_HFLAG_ERROR_STOPS_FIFO = (1 << 19),
- /* serialize ports */
- IDE_HFLAG_SERIALIZE = (1 << 20),
- /* host is DTC2278 */
- IDE_HFLAG_DTC2278 = (1 << 21),
- /* 4 devices on a single set of I/O ports */
- IDE_HFLAG_4DRIVES = (1 << 22),
- /* host is TRM290 */
- IDE_HFLAG_TRM290 = (1 << 23),
- /* use 32-bit I/O ops */
- IDE_HFLAG_IO_32BIT = (1 << 24),
- /* unmask IRQs */
- IDE_HFLAG_UNMASK_IRQS = (1 << 25),
- IDE_HFLAG_BROKEN_ALTSTATUS = (1 << 26),
- /* serialize ports if DMA is possible (for sl82c105) */
- IDE_HFLAG_SERIALIZE_DMA = (1 << 27),
- /* force host out of "simplex" mode */
- IDE_HFLAG_CLEAR_SIMPLEX = (1 << 28),
- /* DSC overlap is unsupported */
- IDE_HFLAG_NO_DSC = (1 << 29),
- /* never use 32-bit I/O ops */
- IDE_HFLAG_NO_IO_32BIT = (1 << 30),
- /* never unmask IRQs */
- IDE_HFLAG_NO_UNMASK_IRQS = (1 << 31),
-};
-
-#ifdef CONFIG_BLK_DEV_OFFBOARD
-# define IDE_HFLAG_OFF_BOARD 0
-#else
-# define IDE_HFLAG_OFF_BOARD IDE_HFLAG_NON_BOOTABLE
-#endif
-
-struct ide_port_info {
- char *name;
-
- int (*init_chipset)(struct pci_dev *);
-
- void (*get_lock)(irq_handler_t, void *);
- void (*release_lock)(void);
-
- void (*init_iops)(ide_hwif_t *);
- void (*init_hwif)(ide_hwif_t *);
- int (*init_dma)(ide_hwif_t *,
- const struct ide_port_info *);
-
- const struct ide_tp_ops *tp_ops;
- const struct ide_port_ops *port_ops;
- const struct ide_dma_ops *dma_ops;
-
- struct ide_pci_enablebit enablebits[2];
-
- hwif_chipset_t chipset;
-
- u16 max_sectors; /* if < than the default one */
-
- u32 host_flags;
-
- int irq_flags;
-
- u8 pio_mask;
- u8 swdma_mask;
- u8 mwdma_mask;
- u8 udma_mask;
-};
-
-/*
- * State information carried for REQ_TYPE_ATA_PM_SUSPEND and REQ_TYPE_ATA_PM_RESUME
- * requests.
- */
-struct ide_pm_state {
- /* PM state machine step value, currently driver specific */
- int pm_step;
- /* requested PM state value (S1, S2, S3, S4, ...) */
- u32 pm_state;
- void* data; /* for driver use */
-};
-
-
-int ide_pci_init_one(struct pci_dev *, const struct ide_port_info *, void *);
-int ide_pci_init_two(struct pci_dev *, struct pci_dev *,
- const struct ide_port_info *, void *);
-void ide_pci_remove(struct pci_dev *);
-
-#ifdef CONFIG_PM
-int ide_pci_suspend(struct pci_dev *, pm_message_t);
-int ide_pci_resume(struct pci_dev *);
-#else
-#define ide_pci_suspend NULL
-#define ide_pci_resume NULL
-#endif
-
-void ide_map_sg(ide_drive_t *, struct ide_cmd *);
-void ide_init_sg_cmd(struct ide_cmd *, unsigned int);
-
-#define BAD_DMA_DRIVE 0
-#define GOOD_DMA_DRIVE 1
-
-struct drive_list_entry {
- const char *id_model;
- const char *id_firmware;
-};
-
-int ide_in_drive_list(u16 *, const struct drive_list_entry *);
-
-#ifdef CONFIG_BLK_DEV_IDEDMA
-int ide_dma_good_drive(ide_drive_t *);
-int __ide_dma_bad_drive(ide_drive_t *);
-
-u8 ide_find_dma_mode(ide_drive_t *, u8);
-
-static inline u8 ide_max_dma_mode(ide_drive_t *drive)
-{
- return ide_find_dma_mode(drive, XFER_UDMA_6);
-}
-
-void ide_dma_off_quietly(ide_drive_t *);
-void ide_dma_off(ide_drive_t *);
-void ide_dma_on(ide_drive_t *);
-int ide_set_dma(ide_drive_t *);
-void ide_check_dma_crc(ide_drive_t *);
-ide_startstop_t ide_dma_intr(ide_drive_t *);
-
-int ide_allocate_dma_engine(ide_hwif_t *);
-void ide_release_dma_engine(ide_hwif_t *);
-
-int ide_dma_prepare(ide_drive_t *, struct ide_cmd *);
-void ide_dma_unmap_sg(ide_drive_t *, struct ide_cmd *);
-
-#ifdef CONFIG_BLK_DEV_IDEDMA_SFF
-int config_drive_for_dma(ide_drive_t *);
-int ide_build_dmatable(ide_drive_t *, struct ide_cmd *);
-void ide_dma_host_set(ide_drive_t *, int);
-int ide_dma_setup(ide_drive_t *, struct ide_cmd *);
-extern void ide_dma_start(ide_drive_t *);
-int ide_dma_end(ide_drive_t *);
-int ide_dma_test_irq(ide_drive_t *);
-int ide_dma_sff_timer_expiry(ide_drive_t *);
-u8 ide_dma_sff_read_status(ide_hwif_t *);
-extern const struct ide_dma_ops sff_dma_ops;
-#else
-static inline int config_drive_for_dma(ide_drive_t *drive) { return 0; }
-#endif /* CONFIG_BLK_DEV_IDEDMA_SFF */
-
-void ide_dma_lost_irq(ide_drive_t *);
-ide_startstop_t ide_dma_timeout_retry(ide_drive_t *, int);
-
-#else
-static inline u8 ide_find_dma_mode(ide_drive_t *drive, u8 speed) { return 0; }
-static inline u8 ide_max_dma_mode(ide_drive_t *drive) { return 0; }
-static inline void ide_dma_off_quietly(ide_drive_t *drive) { ; }
-static inline void ide_dma_off(ide_drive_t *drive) { ; }
-static inline void ide_dma_on(ide_drive_t *drive) { ; }
-static inline void ide_dma_verbose(ide_drive_t *drive) { ; }
-static inline int ide_set_dma(ide_drive_t *drive) { return 1; }
-static inline void ide_check_dma_crc(ide_drive_t *drive) { ; }
-static inline ide_startstop_t ide_dma_intr(ide_drive_t *drive) { return ide_stopped; }
-static inline ide_startstop_t ide_dma_timeout_retry(ide_drive_t *drive, int error) { return ide_stopped; }
-static inline void ide_release_dma_engine(ide_hwif_t *hwif) { ; }
-static inline int ide_dma_prepare(ide_drive_t *drive,
- struct ide_cmd *cmd) { return 1; }
-static inline void ide_dma_unmap_sg(ide_drive_t *drive,
- struct ide_cmd *cmd) { ; }
-#endif /* CONFIG_BLK_DEV_IDEDMA */
-
-#ifdef CONFIG_BLK_DEV_IDEACPI
-int ide_acpi_init(void);
-bool ide_port_acpi(ide_hwif_t *hwif);
-extern int ide_acpi_exec_tfs(ide_drive_t *drive);
-extern void ide_acpi_get_timing(ide_hwif_t *hwif);
-extern void ide_acpi_push_timing(ide_hwif_t *hwif);
-void ide_acpi_init_port(ide_hwif_t *);
-void ide_acpi_port_init_devices(ide_hwif_t *);
-extern void ide_acpi_set_state(ide_hwif_t *hwif, int on);
-#else
-static inline int ide_acpi_init(void) { return 0; }
-static inline bool ide_port_acpi(ide_hwif_t *hwif) { return 0; }
-static inline int ide_acpi_exec_tfs(ide_drive_t *drive) { return 0; }
-static inline void ide_acpi_get_timing(ide_hwif_t *hwif) { ; }
-static inline void ide_acpi_push_timing(ide_hwif_t *hwif) { ; }
-static inline void ide_acpi_init_port(ide_hwif_t *hwif) { ; }
-static inline void ide_acpi_port_init_devices(ide_hwif_t *hwif) { ; }
-static inline void ide_acpi_set_state(ide_hwif_t *hwif, int on) {}
-#endif
-
-void ide_register_region(struct gendisk *);
-void ide_unregister_region(struct gendisk *);
-
-void ide_check_nien_quirk_list(ide_drive_t *);
-void ide_undecoded_slave(ide_drive_t *);
-
-void ide_port_apply_params(ide_hwif_t *);
-int ide_sysfs_register_port(ide_hwif_t *);
-
-struct ide_host *ide_host_alloc(const struct ide_port_info *, struct ide_hw **,
- unsigned int);
-void ide_host_free(struct ide_host *);
-int ide_host_register(struct ide_host *, const struct ide_port_info *,
- struct ide_hw **);
-int ide_host_add(const struct ide_port_info *, struct ide_hw **, unsigned int,
- struct ide_host **);
-void ide_host_remove(struct ide_host *);
-int ide_legacy_device_add(const struct ide_port_info *, unsigned long);
-void ide_port_unregister_devices(ide_hwif_t *);
-void ide_port_scan(ide_hwif_t *);
-
-static inline void *ide_get_hwifdata (ide_hwif_t * hwif)
-{
- return hwif->hwif_data;
-}
-
-static inline void ide_set_hwifdata (ide_hwif_t * hwif, void *data)
-{
- hwif->hwif_data = data;
-}
-
-extern void ide_toggle_bounce(ide_drive_t *drive, int on);
-
-u64 ide_get_lba_addr(struct ide_cmd *, int);
-u8 ide_dump_status(ide_drive_t *, const char *, u8);
-
-struct ide_timing {
- u8 mode;
- u8 setup; /* t1 */
- u16 act8b; /* t2 for 8-bit io */
- u16 rec8b; /* t2i for 8-bit io */
- u16 cyc8b; /* t0 for 8-bit io */
- u16 active; /* t2 or tD */
- u16 recover; /* t2i or tK */
- u16 cycle; /* t0 */
- u16 udma; /* t2CYCTYP/2 */
-};
-
-enum {
- IDE_TIMING_SETUP = (1 << 0),
- IDE_TIMING_ACT8B = (1 << 1),
- IDE_TIMING_REC8B = (1 << 2),
- IDE_TIMING_CYC8B = (1 << 3),
- IDE_TIMING_8BIT = IDE_TIMING_ACT8B | IDE_TIMING_REC8B |
- IDE_TIMING_CYC8B,
- IDE_TIMING_ACTIVE = (1 << 4),
- IDE_TIMING_RECOVER = (1 << 5),
- IDE_TIMING_CYCLE = (1 << 6),
- IDE_TIMING_UDMA = (1 << 7),
- IDE_TIMING_ALL = IDE_TIMING_SETUP | IDE_TIMING_8BIT |
- IDE_TIMING_ACTIVE | IDE_TIMING_RECOVER |
- IDE_TIMING_CYCLE | IDE_TIMING_UDMA,
-};
-
-struct ide_timing *ide_timing_find_mode(u8);
-u16 ide_pio_cycle_time(ide_drive_t *, u8);
-void ide_timing_merge(struct ide_timing *, struct ide_timing *,
- struct ide_timing *, unsigned int);
-int ide_timing_compute(ide_drive_t *, u8, struct ide_timing *, int, int);
-
-#ifdef CONFIG_IDE_XFER_MODE
-int ide_scan_pio_blacklist(char *);
-const char *ide_xfer_verbose(u8);
-int ide_pio_need_iordy(ide_drive_t *, const u8);
-int ide_set_pio_mode(ide_drive_t *, u8);
-int ide_set_dma_mode(ide_drive_t *, u8);
-void ide_set_pio(ide_drive_t *, u8);
-int ide_set_xfer_rate(ide_drive_t *, u8);
-#else
-static inline void ide_set_pio(ide_drive_t *drive, u8 pio) { ; }
-static inline int ide_set_xfer_rate(ide_drive_t *drive, u8 rate) { return -1; }
-#endif
-
-static inline void ide_set_max_pio(ide_drive_t *drive)
-{
- ide_set_pio(drive, 255);
-}
-
-char *ide_media_string(ide_drive_t *);
-
-extern const struct attribute_group *ide_dev_groups[];
-extern struct bus_type ide_bus_type;
-extern struct class *ide_port_class;
-
-static inline void ide_dump_identify(u8 *id)
-{
- print_hex_dump(KERN_INFO, "", DUMP_PREFIX_NONE, 16, 2, id, 512, 0);
-}
-
-static inline int hwif_to_node(ide_hwif_t *hwif)
-{
- return hwif->dev ? dev_to_node(hwif->dev) : -1;
-}
-
-static inline ide_drive_t *ide_get_pair_dev(ide_drive_t *drive)
-{
- ide_drive_t *peer = drive->hwif->devices[(drive->dn ^ 1) & 1];
-
- return (peer->dev_flags & IDE_DFLAG_PRESENT) ? peer : NULL;
-}
-
-static inline void *ide_get_drivedata(ide_drive_t *drive)
-{
- return drive->drive_data;
-}
-
-static inline void ide_set_drivedata(ide_drive_t *drive, void *data)
-{
- drive->drive_data = data;
-}
-
-#define ide_port_for_each_dev(i, dev, port) \
- for ((i) = 0; ((dev) = (port)->devices[i]) || (i) < MAX_DRIVES; (i)++)
-
-#define ide_port_for_each_present_dev(i, dev, port) \
- for ((i) = 0; ((dev) = (port)->devices[i]) || (i) < MAX_DRIVES; (i)++) \
- if ((dev)->dev_flags & IDE_DFLAG_PRESENT)
-
-#define ide_host_for_each_port(i, port, host) \
- for ((i) = 0; ((port) = (host)->ports[i]) || (i) < MAX_HOST_PORTS; (i)++)
-
-
-#endif /* _IDE_H */
diff --git a/include/linux/idle_inject.h b/include/linux/idle_inject.h
new file mode 100644
index 000000000000..a85d5dd40f72
--- /dev/null
+++ b/include/linux/idle_inject.h
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2018 Linaro Ltd
+ *
+ * Author: Daniel Lezcano <daniel.lezcano@linaro.org>
+ *
+ */
+#ifndef __IDLE_INJECT_H__
+#define __IDLE_INJECT_H__
+
+/* private idle injection device structure */
+struct idle_inject_device;
+
+struct idle_inject_device *idle_inject_register(struct cpumask *cpumask);
+
+struct idle_inject_device *idle_inject_register_full(struct cpumask *cpumask,
+ bool (*update)(void));
+
+void idle_inject_unregister(struct idle_inject_device *ii_dev);
+
+int idle_inject_start(struct idle_inject_device *ii_dev);
+
+void idle_inject_stop(struct idle_inject_device *ii_dev);
+
+void idle_inject_set_duration(struct idle_inject_device *ii_dev,
+ unsigned int run_duration_us,
+ unsigned int idle_duration_us);
+
+void idle_inject_get_duration(struct idle_inject_device *ii_dev,
+ unsigned int *run_duration_us,
+ unsigned int *idle_duration_us);
+
+void idle_inject_set_latency(struct idle_inject_device *ii_dev,
+ unsigned int latency_us);
+
+#endif /* __IDLE_INJECT_H__ */
diff --git a/include/linux/idr.h b/include/linux/idr.h
index bf70b3ef0a07..789e23e67444 100644
--- a/include/linux/idr.h
+++ b/include/linux/idr.h
@@ -1,9 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* include/linux/idr.h
*
* 2002-10-18 written by Jim Houston jim.houston@ccur.com
* Copyright (C) 2002 by Concurrent Computer Corporation
- * Distributed under the GNU GPL license version 2.
*
* Small id to pointer translation service avoiding fixed sized
* tables.
@@ -15,9 +15,11 @@
#include <linux/radix-tree.h>
#include <linux/gfp.h>
#include <linux/percpu.h>
+#include <linux/cleanup.h>
struct idr {
struct radix_tree_root idr_rt;
+ unsigned int idr_base;
unsigned int idr_next;
};
@@ -28,13 +30,31 @@ struct idr {
#define IDR_FREE 0
/* Set the IDR flag and the IDR_FREE tag */
-#define IDR_RT_MARKER ((__force gfp_t)(3 << __GFP_BITS_SHIFT))
+#define IDR_RT_MARKER (ROOT_IS_IDR | (__force gfp_t) \
+ (1 << (ROOT_TAG_SHIFT + IDR_FREE)))
-#define IDR_INIT \
-{ \
- .idr_rt = RADIX_TREE_INIT(IDR_RT_MARKER) \
+#define IDR_INIT_BASE(name, base) { \
+ .idr_rt = RADIX_TREE_INIT(name, IDR_RT_MARKER), \
+ .idr_base = (base), \
+ .idr_next = 0, \
}
-#define DEFINE_IDR(name) struct idr name = IDR_INIT
+
+/**
+ * IDR_INIT() - Initialise an IDR.
+ * @name: Name of IDR.
+ *
+ * A freshly-initialised IDR contains no IDs.
+ */
+#define IDR_INIT(name) IDR_INIT_BASE(name, 0)
+
+/**
+ * DEFINE_IDR() - Define a statically-allocated IDR.
+ * @name: Name of IDR.
+ *
+ * An IDR defined using this macro is ready for use with no additional
+ * initialisation required. It contains no IDs.
+ */
+#define DEFINE_IDR(name) struct idr name = IDR_INIT(name)
/**
* idr_get_cursor - Return the current position of the cyclic allocator
@@ -79,26 +99,81 @@ static inline void idr_set_cursor(struct idr *idr, unsigned int val)
* period).
*/
+#define idr_lock(idr) xa_lock(&(idr)->idr_rt)
+#define idr_unlock(idr) xa_unlock(&(idr)->idr_rt)
+#define idr_lock_bh(idr) xa_lock_bh(&(idr)->idr_rt)
+#define idr_unlock_bh(idr) xa_unlock_bh(&(idr)->idr_rt)
+#define idr_lock_irq(idr) xa_lock_irq(&(idr)->idr_rt)
+#define idr_unlock_irq(idr) xa_unlock_irq(&(idr)->idr_rt)
+#define idr_lock_irqsave(idr, flags) \
+ xa_lock_irqsave(&(idr)->idr_rt, flags)
+#define idr_unlock_irqrestore(idr, flags) \
+ xa_unlock_irqrestore(&(idr)->idr_rt, flags)
+
void idr_preload(gfp_t gfp_mask);
-int idr_alloc(struct idr *, void *entry, int start, int end, gfp_t);
-int idr_alloc_cyclic(struct idr *, void *entry, int start, int end, gfp_t);
+
+int idr_alloc(struct idr *, void *ptr, int start, int end, gfp_t);
+int __must_check idr_alloc_u32(struct idr *, void *ptr, u32 *id,
+ unsigned long max, gfp_t);
+int idr_alloc_cyclic(struct idr *, void *ptr, int start, int end, gfp_t);
+void *idr_remove(struct idr *, unsigned long id);
+void *idr_find(const struct idr *, unsigned long id);
int idr_for_each(const struct idr *,
int (*fn)(int id, void *p, void *data), void *data);
void *idr_get_next(struct idr *, int *nextid);
-void *idr_replace(struct idr *, void *, int id);
+void *idr_get_next_ul(struct idr *, unsigned long *nextid);
+void *idr_replace(struct idr *, void *, unsigned long id);
void idr_destroy(struct idr *);
-static inline void *idr_remove(struct idr *idr, int id)
+struct __class_idr {
+ struct idr *idr;
+ int id;
+};
+
+#define idr_null ((struct __class_idr){ NULL, -1 })
+#define take_idr_id(id) __get_and_null(id, idr_null)
+
+DEFINE_CLASS(idr_alloc, struct __class_idr,
+ if (_T.id >= 0) idr_remove(_T.idr, _T.id),
+ ((struct __class_idr){
+ .idr = idr,
+ .id = idr_alloc(idr, ptr, start, end, gfp),
+ }),
+ struct idr *idr, void *ptr, int start, int end, gfp_t gfp);
+
+/**
+ * idr_init_base() - Initialise an IDR.
+ * @idr: IDR handle.
+ * @base: The base value for the IDR.
+ *
+ * This variation of idr_init() creates an IDR which will allocate IDs
+ * starting at %base.
+ */
+static inline void idr_init_base(struct idr *idr, int base)
{
- return radix_tree_delete_item(&idr->idr_rt, id, NULL);
+ INIT_RADIX_TREE(&idr->idr_rt, IDR_RT_MARKER);
+ idr->idr_base = base;
+ idr->idr_next = 0;
}
+/**
+ * idr_init() - Initialise an IDR.
+ * @idr: IDR handle.
+ *
+ * Initialise a dynamically allocated IDR. To initialise a
+ * statically allocated IDR, use DEFINE_IDR().
+ */
static inline void idr_init(struct idr *idr)
{
- INIT_RADIX_TREE(&idr->idr_rt, IDR_RT_MARKER);
- idr->idr_next = 0;
+ idr_init_base(idr, 0);
}
+/**
+ * idr_is_empty() - Are there any IDs allocated?
+ * @idr: IDR handle.
+ *
+ * Return: %true if any IDs have been allocated from this IDR.
+ */
static inline bool idr_is_empty(const struct idr *idr)
{
return radix_tree_empty(&idr->idr_rt) &&
@@ -113,56 +188,69 @@ static inline bool idr_is_empty(const struct idr *idr)
*/
static inline void idr_preload_end(void)
{
- preempt_enable();
+ local_unlock(&radix_tree_preloads.lock);
}
/**
- * idr_find - return pointer for given id
- * @idr: idr handle
- * @id: lookup key
+ * idr_for_each_entry() - Iterate over an IDR's elements of a given type.
+ * @idr: IDR handle.
+ * @entry: The type * to use as cursor
+ * @id: Entry ID.
*
- * Return the pointer given the id it has been registered with. A %NULL
- * return indicates that @id is not valid or you passed %NULL in
- * idr_get_new().
- *
- * This function can be called under rcu_read_lock(), given that the leaf
- * pointers lifetimes are correctly managed.
+ * @entry and @id do not need to be initialized before the loop, and
+ * after normal termination @entry is left with the value NULL. This
+ * is convenient for a "not found" value.
*/
-static inline void *idr_find(const struct idr *idr, int id)
-{
- return radix_tree_lookup(&idr->idr_rt, id);
-}
+#define idr_for_each_entry(idr, entry, id) \
+ for (id = 0; ((entry) = idr_get_next(idr, &(id))) != NULL; id += 1U)
/**
- * idr_for_each_entry - iterate over an idr's elements of a given type
- * @idr: idr handle
- * @entry: the type * to use as cursor
- * @id: id entry's key
+ * idr_for_each_entry_ul() - Iterate over an IDR's elements of a given type.
+ * @idr: IDR handle.
+ * @entry: The type * to use as cursor.
+ * @tmp: A temporary placeholder for ID.
+ * @id: Entry ID.
*
* @entry and @id do not need to be initialized before the loop, and
- * after normal terminatinon @entry is left with the value NULL. This
+ * after normal termination @entry is left with the value NULL. This
* is convenient for a "not found" value.
*/
-#define idr_for_each_entry(idr, entry, id) \
- for (id = 0; ((entry) = idr_get_next(idr, &(id))) != NULL; ++id)
+#define idr_for_each_entry_ul(idr, entry, tmp, id) \
+ for (tmp = 0, id = 0; \
+ ((entry) = tmp <= id ? idr_get_next_ul(idr, &(id)) : NULL) != NULL; \
+ tmp = id, ++id)
/**
- * idr_for_each_entry_continue - continue iteration over an idr's elements of a given type
- * @idr: idr handle
- * @entry: the type * to use as cursor
- * @id: id entry's key
+ * idr_for_each_entry_continue() - Continue iteration over an IDR's elements of a given type
+ * @idr: IDR handle.
+ * @entry: The type * to use as a cursor.
+ * @id: Entry ID.
*
- * Continue to iterate over list of given type, continuing after
- * the current position.
+ * Continue to iterate over entries, continuing after the current position.
*/
#define idr_for_each_entry_continue(idr, entry, id) \
for ((entry) = idr_get_next((idr), &(id)); \
entry; \
++id, (entry) = idr_get_next((idr), &(id)))
+/**
+ * idr_for_each_entry_continue_ul() - Continue iteration over an IDR's elements of a given type
+ * @idr: IDR handle.
+ * @entry: The type * to use as a cursor.
+ * @tmp: A temporary placeholder for ID.
+ * @id: Entry ID.
+ *
+ * Continue to iterate over entries, continuing after the current position.
+ * After normal termination @entry is left with the value NULL. This
+ * is convenient for a "not found" value.
+ */
+#define idr_for_each_entry_continue_ul(idr, entry, tmp, id) \
+ for (tmp = id; \
+ ((entry) = tmp <= id ? idr_get_next_ul(idr, &(id)) : NULL) != NULL; \
+ tmp = id, ++id)
+
/*
- * IDA - IDR based id allocator, use when translation from id to
- * pointer isn't necessary.
+ * IDA - ID Allocator, use when translation from id to pointer isn't necessary.
*/
#define IDA_CHUNK_SIZE 128 /* 128 bytes per chunk */
#define IDA_BITMAP_LONGS (IDA_CHUNK_SIZE / sizeof(long))
@@ -172,45 +260,92 @@ struct ida_bitmap {
unsigned long bitmap[IDA_BITMAP_LONGS];
};
-DECLARE_PER_CPU(struct ida_bitmap *, ida_bitmap);
-
struct ida {
- struct radix_tree_root ida_rt;
+ struct xarray xa;
};
-#define IDA_INIT { \
- .ida_rt = RADIX_TREE_INIT(IDR_RT_MARKER | GFP_NOWAIT), \
+#define IDA_INIT_FLAGS (XA_FLAGS_LOCK_IRQ | XA_FLAGS_ALLOC)
+
+#define IDA_INIT(name) { \
+ .xa = XARRAY_INIT(name, IDA_INIT_FLAGS) \
}
-#define DEFINE_IDA(name) struct ida name = IDA_INIT
+#define DEFINE_IDA(name) struct ida name = IDA_INIT(name)
-int ida_pre_get(struct ida *ida, gfp_t gfp_mask);
-int ida_get_new_above(struct ida *ida, int starting_id, int *p_id);
-void ida_remove(struct ida *ida, int id);
+int ida_alloc_range(struct ida *, unsigned int min, unsigned int max, gfp_t);
+void ida_free(struct ida *, unsigned int id);
void ida_destroy(struct ida *ida);
+int ida_find_first_range(struct ida *ida, unsigned int min, unsigned int max);
-int ida_simple_get(struct ida *ida, unsigned int start, unsigned int end,
- gfp_t gfp_mask);
-void ida_simple_remove(struct ida *ida, unsigned int id);
+/**
+ * ida_alloc() - Allocate an unused ID.
+ * @ida: IDA handle.
+ * @gfp: Memory allocation flags.
+ *
+ * Allocate an ID between 0 and %INT_MAX, inclusive.
+ *
+ * Context: Any context. It is safe to call this function without
+ * locking in your code.
+ * Return: The allocated ID, or %-ENOMEM if memory could not be allocated,
+ * or %-ENOSPC if there are no free IDs.
+ */
+static inline int ida_alloc(struct ida *ida, gfp_t gfp)
+{
+ return ida_alloc_range(ida, 0, ~0, gfp);
+}
-static inline void ida_init(struct ida *ida)
+/**
+ * ida_alloc_min() - Allocate an unused ID.
+ * @ida: IDA handle.
+ * @min: Lowest ID to allocate.
+ * @gfp: Memory allocation flags.
+ *
+ * Allocate an ID between @min and %INT_MAX, inclusive.
+ *
+ * Context: Any context. It is safe to call this function without
+ * locking in your code.
+ * Return: The allocated ID, or %-ENOMEM if memory could not be allocated,
+ * or %-ENOSPC if there are no free IDs.
+ */
+static inline int ida_alloc_min(struct ida *ida, unsigned int min, gfp_t gfp)
{
- INIT_RADIX_TREE(&ida->ida_rt, IDR_RT_MARKER | GFP_NOWAIT);
+ return ida_alloc_range(ida, min, ~0, gfp);
}
/**
- * ida_get_new - allocate new ID
- * @ida: idr handle
- * @p_id: pointer to the allocated handle
+ * ida_alloc_max() - Allocate an unused ID.
+ * @ida: IDA handle.
+ * @max: Highest ID to allocate.
+ * @gfp: Memory allocation flags.
+ *
+ * Allocate an ID between 0 and @max, inclusive.
*
- * Simple wrapper around ida_get_new_above() w/ @starting_id of zero.
+ * Context: Any context. It is safe to call this function without
+ * locking in your code.
+ * Return: The allocated ID, or %-ENOMEM if memory could not be allocated,
+ * or %-ENOSPC if there are no free IDs.
*/
-static inline int ida_get_new(struct ida *ida, int *p_id)
+static inline int ida_alloc_max(struct ida *ida, unsigned int max, gfp_t gfp)
+{
+ return ida_alloc_range(ida, 0, max, gfp);
+}
+
+static inline void ida_init(struct ida *ida)
{
- return ida_get_new_above(ida, 0, p_id);
+ xa_init_flags(&ida->xa, IDA_INIT_FLAGS);
}
static inline bool ida_is_empty(const struct ida *ida)
{
- return radix_tree_empty(&ida->ida_rt);
+ return xa_empty(&ida->xa);
+}
+
+static inline bool ida_exists(struct ida *ida, unsigned int id)
+{
+ return ida_find_first_range(ida, id, id) == id;
+}
+
+static inline int ida_find_first(struct ida *ida)
+{
+ return ida_find_first_range(ida, 0, ~0);
}
#endif /* __IDR_H__ */
diff --git a/include/linux/ieee80211-eht.h b/include/linux/ieee80211-eht.h
new file mode 100644
index 000000000000..f9782e46c5e5
--- /dev/null
+++ b/include/linux/ieee80211-eht.h
@@ -0,0 +1,1182 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * IEEE 802.11 EHT definitions
+ *
+ * Copyright (c) 2001-2002, SSH Communications Security Corp and Jouni Malinen
+ * <jkmaline@cc.hut.fi>
+ * Copyright (c) 2002-2003, Jouni Malinen <jkmaline@cc.hut.fi>
+ * Copyright (c) 2005, Devicescape Software, Inc.
+ * Copyright (c) 2006, Michael Wu <flamingice@sourmilk.net>
+ * Copyright (c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright (c) 2016 - 2017 Intel Deutschland GmbH
+ * Copyright (c) 2018 - 2025 Intel Corporation
+ */
+
+#ifndef LINUX_IEEE80211_EHT_H
+#define LINUX_IEEE80211_EHT_H
+
+#include <linux/types.h>
+#include <linux/if_ether.h>
+/* need HE definitions for the inlines here */
+#include <linux/ieee80211-he.h>
+
+#define IEEE80211_TTLM_MAX_CNT 2
+#define IEEE80211_TTLM_CONTROL_DIRECTION 0x03
+#define IEEE80211_TTLM_CONTROL_DEF_LINK_MAP 0x04
+#define IEEE80211_TTLM_CONTROL_SWITCH_TIME_PRESENT 0x08
+#define IEEE80211_TTLM_CONTROL_EXPECTED_DUR_PRESENT 0x10
+#define IEEE80211_TTLM_CONTROL_LINK_MAP_SIZE 0x20
+
+#define IEEE80211_TTLM_DIRECTION_DOWN 0
+#define IEEE80211_TTLM_DIRECTION_UP 1
+#define IEEE80211_TTLM_DIRECTION_BOTH 2
+
+/**
+ * struct ieee80211_ttlm_elem - TID-To-Link Mapping element
+ *
+ * Defined in section 9.4.2.314 in P802.11be_D4
+ *
+ * @control: the first part of control field
+ * @optional: the second part of control field
+ */
+struct ieee80211_ttlm_elem {
+ u8 control;
+ u8 optional[];
+} __packed;
+
+#define IEEE80211_EHT_MCS_NSS_RX 0x0f
+#define IEEE80211_EHT_MCS_NSS_TX 0xf0
+
+/**
+ * struct ieee80211_eht_mcs_nss_supp_20mhz_only - EHT 20MHz only station max
+ * supported NSS for per MCS.
+ *
+ * For each field below, bits 0 - 3 indicate the maximal number of spatial
+ * streams for Rx, and bits 4 - 7 indicate the maximal number of spatial streams
+ * for Tx.
+ *
+ * @rx_tx_mcs7_max_nss: indicates the maximum number of spatial streams
+ * supported for reception and the maximum number of spatial streams
+ * supported for transmission for MCS 0 - 7.
+ * @rx_tx_mcs9_max_nss: indicates the maximum number of spatial streams
+ * supported for reception and the maximum number of spatial streams
+ * supported for transmission for MCS 8 - 9.
+ * @rx_tx_mcs11_max_nss: indicates the maximum number of spatial streams
+ * supported for reception and the maximum number of spatial streams
+ * supported for transmission for MCS 10 - 11.
+ * @rx_tx_mcs13_max_nss: indicates the maximum number of spatial streams
+ * supported for reception and the maximum number of spatial streams
+ * supported for transmission for MCS 12 - 13.
+ * @rx_tx_max_nss: array of the previous fields for easier loop access
+ */
+struct ieee80211_eht_mcs_nss_supp_20mhz_only {
+ union {
+ struct {
+ u8 rx_tx_mcs7_max_nss;
+ u8 rx_tx_mcs9_max_nss;
+ u8 rx_tx_mcs11_max_nss;
+ u8 rx_tx_mcs13_max_nss;
+ };
+ u8 rx_tx_max_nss[4];
+ };
+};
+
+/**
+ * struct ieee80211_eht_mcs_nss_supp_bw - EHT max supported NSS per MCS (except
+ * 20MHz only stations).
+ *
+ * For each field below, bits 0 - 3 indicate the maximal number of spatial
+ * streams for Rx, and bits 4 - 7 indicate the maximal number of spatial streams
+ * for Tx.
+ *
+ * @rx_tx_mcs9_max_nss: indicates the maximum number of spatial streams
+ * supported for reception and the maximum number of spatial streams
+ * supported for transmission for MCS 0 - 9.
+ * @rx_tx_mcs11_max_nss: indicates the maximum number of spatial streams
+ * supported for reception and the maximum number of spatial streams
+ * supported for transmission for MCS 10 - 11.
+ * @rx_tx_mcs13_max_nss: indicates the maximum number of spatial streams
+ * supported for reception and the maximum number of spatial streams
+ * supported for transmission for MCS 12 - 13.
+ * @rx_tx_max_nss: array of the previous fields for easier loop access
+ */
+struct ieee80211_eht_mcs_nss_supp_bw {
+ union {
+ struct {
+ u8 rx_tx_mcs9_max_nss;
+ u8 rx_tx_mcs11_max_nss;
+ u8 rx_tx_mcs13_max_nss;
+ };
+ u8 rx_tx_max_nss[3];
+ };
+};
+
+/**
+ * struct ieee80211_eht_cap_elem_fixed - EHT capabilities fixed data
+ *
+ * This structure is the "EHT Capabilities element" fixed fields as
+ * described in P802.11be_D2.0 section 9.4.2.313.
+ *
+ * @mac_cap_info: MAC capabilities, see IEEE80211_EHT_MAC_CAP*
+ * @phy_cap_info: PHY capabilities, see IEEE80211_EHT_PHY_CAP*
+ */
+struct ieee80211_eht_cap_elem_fixed {
+ u8 mac_cap_info[2];
+ u8 phy_cap_info[9];
+} __packed;
+
+/**
+ * struct ieee80211_eht_cap_elem - EHT capabilities element
+ * @fixed: fixed parts, see &ieee80211_eht_cap_elem_fixed
+ * @optional: optional parts
+ */
+struct ieee80211_eht_cap_elem {
+ struct ieee80211_eht_cap_elem_fixed fixed;
+
+ /*
+ * Followed by:
+ * Supported EHT-MCS And NSS Set field: 4, 3, 6 or 9 octets.
+ * EHT PPE Thresholds field: variable length.
+ */
+ u8 optional[];
+} __packed;
+
+#define IEEE80211_EHT_OPER_INFO_PRESENT 0x01
+#define IEEE80211_EHT_OPER_DISABLED_SUBCHANNEL_BITMAP_PRESENT 0x02
+#define IEEE80211_EHT_OPER_EHT_DEF_PE_DURATION 0x04
+#define IEEE80211_EHT_OPER_GROUP_ADDRESSED_BU_IND_LIMIT 0x08
+#define IEEE80211_EHT_OPER_GROUP_ADDRESSED_BU_IND_EXP_MASK 0x30
+#define IEEE80211_EHT_OPER_MCS15_DISABLE 0x40
+
+/**
+ * struct ieee80211_eht_operation - eht operation element
+ *
+ * This structure is the "EHT Operation Element" fields as
+ * described in P802.11be_D2.0 section 9.4.2.311
+ *
+ * @params: EHT operation element parameters. See &IEEE80211_EHT_OPER_*
+ * @basic_mcs_nss: indicates the EHT-MCSs for each number of spatial streams in
+ * EHT PPDUs that are supported by all EHT STAs in the BSS in transmit and
+ * receive.
+ * @optional: optional parts
+ */
+struct ieee80211_eht_operation {
+ u8 params;
+ struct ieee80211_eht_mcs_nss_supp_20mhz_only basic_mcs_nss;
+ u8 optional[];
+} __packed;
+
+/**
+ * struct ieee80211_eht_operation_info - eht operation information
+ *
+ * @control: EHT operation information control.
+ * @ccfs0: defines a channel center frequency for a 20, 40, 80, 160, or 320 MHz
+ * EHT BSS.
+ * @ccfs1: defines a channel center frequency for a 160 or 320 MHz EHT BSS.
+ * @optional: optional parts
+ */
+struct ieee80211_eht_operation_info {
+ u8 control;
+ u8 ccfs0;
+ u8 ccfs1;
+ u8 optional[];
+} __packed;
+
+/* EHT MAC capabilities as defined in P802.11be_D2.0 section 9.4.2.313.2 */
+#define IEEE80211_EHT_MAC_CAP0_EPCS_PRIO_ACCESS 0x01
+#define IEEE80211_EHT_MAC_CAP0_OM_CONTROL 0x02
+#define IEEE80211_EHT_MAC_CAP0_TRIG_TXOP_SHARING_MODE1 0x04
+#define IEEE80211_EHT_MAC_CAP0_TRIG_TXOP_SHARING_MODE2 0x08
+#define IEEE80211_EHT_MAC_CAP0_RESTRICTED_TWT 0x10
+#define IEEE80211_EHT_MAC_CAP0_SCS_TRAFFIC_DESC 0x20
+#define IEEE80211_EHT_MAC_CAP0_MAX_MPDU_LEN_MASK 0xc0
+#define IEEE80211_EHT_MAC_CAP0_MAX_MPDU_LEN_3895 0
+#define IEEE80211_EHT_MAC_CAP0_MAX_MPDU_LEN_7991 1
+#define IEEE80211_EHT_MAC_CAP0_MAX_MPDU_LEN_11454 2
+
+#define IEEE80211_EHT_MAC_CAP1_MAX_AMPDU_LEN_MASK 0x01
+#define IEEE80211_EHT_MAC_CAP1_EHT_TRS 0x02
+#define IEEE80211_EHT_MAC_CAP1_TXOP_RET 0x04
+#define IEEE80211_EHT_MAC_CAP1_TWO_BQRS 0x08
+#define IEEE80211_EHT_MAC_CAP1_EHT_LINK_ADAPT_MASK 0x30
+#define IEEE80211_EHT_MAC_CAP1_UNSOL_EPCS_PRIO_ACCESS 0x40
+
+/* EHT PHY capabilities as defined in P802.11be_D2.0 section 9.4.2.313.3 */
+#define IEEE80211_EHT_PHY_CAP0_320MHZ_IN_6GHZ 0x02
+#define IEEE80211_EHT_PHY_CAP0_242_TONE_RU_GT20MHZ 0x04
+#define IEEE80211_EHT_PHY_CAP0_NDP_4_EHT_LFT_32_GI 0x08
+#define IEEE80211_EHT_PHY_CAP0_PARTIAL_BW_UL_MU_MIMO 0x10
+#define IEEE80211_EHT_PHY_CAP0_SU_BEAMFORMER 0x20
+#define IEEE80211_EHT_PHY_CAP0_SU_BEAMFORMEE 0x40
+
+/* EHT beamformee number of spatial streams <= 80MHz is split */
+#define IEEE80211_EHT_PHY_CAP0_BEAMFORMEE_SS_80MHZ_MASK 0x80
+#define IEEE80211_EHT_PHY_CAP1_BEAMFORMEE_SS_80MHZ_MASK 0x03
+
+#define IEEE80211_EHT_PHY_CAP1_BEAMFORMEE_SS_160MHZ_MASK 0x1c
+#define IEEE80211_EHT_PHY_CAP1_BEAMFORMEE_SS_320MHZ_MASK 0xe0
+
+#define IEEE80211_EHT_PHY_CAP2_SOUNDING_DIM_80MHZ_MASK 0x07
+#define IEEE80211_EHT_PHY_CAP2_SOUNDING_DIM_160MHZ_MASK 0x38
+
+/* EHT number of sounding dimensions for 320MHz is split */
+#define IEEE80211_EHT_PHY_CAP2_SOUNDING_DIM_320MHZ_MASK 0xc0
+#define IEEE80211_EHT_PHY_CAP3_SOUNDING_DIM_320MHZ_MASK 0x01
+#define IEEE80211_EHT_PHY_CAP3_NG_16_SU_FEEDBACK 0x02
+#define IEEE80211_EHT_PHY_CAP3_NG_16_MU_FEEDBACK 0x04
+#define IEEE80211_EHT_PHY_CAP3_CODEBOOK_4_2_SU_FDBK 0x08
+#define IEEE80211_EHT_PHY_CAP3_CODEBOOK_7_5_MU_FDBK 0x10
+#define IEEE80211_EHT_PHY_CAP3_TRIG_SU_BF_FDBK 0x20
+#define IEEE80211_EHT_PHY_CAP3_TRIG_MU_BF_PART_BW_FDBK 0x40
+#define IEEE80211_EHT_PHY_CAP3_TRIG_CQI_FDBK 0x80
+
+#define IEEE80211_EHT_PHY_CAP4_PART_BW_DL_MU_MIMO 0x01
+#define IEEE80211_EHT_PHY_CAP4_PSR_SR_SUPP 0x02
+#define IEEE80211_EHT_PHY_CAP4_POWER_BOOST_FACT_SUPP 0x04
+#define IEEE80211_EHT_PHY_CAP4_EHT_MU_PPDU_4_EHT_LTF_08_GI 0x08
+#define IEEE80211_EHT_PHY_CAP4_MAX_NC_MASK 0xf0
+
+#define IEEE80211_EHT_PHY_CAP5_NON_TRIG_CQI_FEEDBACK 0x01
+#define IEEE80211_EHT_PHY_CAP5_TX_LESS_242_TONE_RU_SUPP 0x02
+#define IEEE80211_EHT_PHY_CAP5_RX_LESS_242_TONE_RU_SUPP 0x04
+#define IEEE80211_EHT_PHY_CAP5_PPE_THRESHOLD_PRESENT 0x08
+#define IEEE80211_EHT_PHY_CAP5_COMMON_NOMINAL_PKT_PAD_MASK 0x30
+#define IEEE80211_EHT_PHY_CAP5_COMMON_NOMINAL_PKT_PAD_0US 0
+#define IEEE80211_EHT_PHY_CAP5_COMMON_NOMINAL_PKT_PAD_8US 1
+#define IEEE80211_EHT_PHY_CAP5_COMMON_NOMINAL_PKT_PAD_16US 2
+#define IEEE80211_EHT_PHY_CAP5_COMMON_NOMINAL_PKT_PAD_20US 3
+
+/* Maximum number of supported EHT LTF is split */
+#define IEEE80211_EHT_PHY_CAP5_MAX_NUM_SUPP_EHT_LTF_MASK 0xc0
+#define IEEE80211_EHT_PHY_CAP5_SUPP_EXTRA_EHT_LTF 0x40
+#define IEEE80211_EHT_PHY_CAP6_MAX_NUM_SUPP_EHT_LTF_MASK 0x07
+
+#define IEEE80211_EHT_PHY_CAP6_MCS15_SUPP_80MHZ 0x08
+#define IEEE80211_EHT_PHY_CAP6_MCS15_SUPP_160MHZ 0x30
+#define IEEE80211_EHT_PHY_CAP6_MCS15_SUPP_320MHZ 0x40
+#define IEEE80211_EHT_PHY_CAP6_MCS15_SUPP_MASK 0x78
+#define IEEE80211_EHT_PHY_CAP6_EHT_DUP_6GHZ_SUPP 0x80
+
+#define IEEE80211_EHT_PHY_CAP7_20MHZ_STA_RX_NDP_WIDER_BW 0x01
+#define IEEE80211_EHT_PHY_CAP7_NON_OFDMA_UL_MU_MIMO_80MHZ 0x02
+#define IEEE80211_EHT_PHY_CAP7_NON_OFDMA_UL_MU_MIMO_160MHZ 0x04
+#define IEEE80211_EHT_PHY_CAP7_NON_OFDMA_UL_MU_MIMO_320MHZ 0x08
+#define IEEE80211_EHT_PHY_CAP7_MU_BEAMFORMER_80MHZ 0x10
+#define IEEE80211_EHT_PHY_CAP7_MU_BEAMFORMER_160MHZ 0x20
+#define IEEE80211_EHT_PHY_CAP7_MU_BEAMFORMER_320MHZ 0x40
+#define IEEE80211_EHT_PHY_CAP7_TB_SOUNDING_FDBK_RATE_LIMIT 0x80
+
+#define IEEE80211_EHT_PHY_CAP8_RX_1024QAM_WIDER_BW_DL_OFDMA 0x01
+#define IEEE80211_EHT_PHY_CAP8_RX_4096QAM_WIDER_BW_DL_OFDMA 0x02
+
+/*
+ * EHT operation channel width as defined in P802.11be_D2.0 section 9.4.2.311
+ */
+#define IEEE80211_EHT_OPER_CHAN_WIDTH 0x7
+#define IEEE80211_EHT_OPER_CHAN_WIDTH_20MHZ 0
+#define IEEE80211_EHT_OPER_CHAN_WIDTH_40MHZ 1
+#define IEEE80211_EHT_OPER_CHAN_WIDTH_80MHZ 2
+#define IEEE80211_EHT_OPER_CHAN_WIDTH_160MHZ 3
+#define IEEE80211_EHT_OPER_CHAN_WIDTH_320MHZ 4
+
+/* Calculate 802.11be EHT capabilities IE Tx/Rx EHT MCS NSS Support Field size */
+static inline u8
+ieee80211_eht_mcs_nss_size(const struct ieee80211_he_cap_elem *he_cap,
+ const struct ieee80211_eht_cap_elem_fixed *eht_cap,
+ bool from_ap)
+{
+ u8 count = 0;
+
+ /* on 2.4 GHz, if it supports 40 MHz, the result is 3 */
+ if (he_cap->phy_cap_info[0] &
+ IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_IN_2G)
+ return 3;
+
+ /* on 2.4 GHz, these three bits are reserved, so should be 0 */
+ if (he_cap->phy_cap_info[0] &
+ IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_80MHZ_IN_5G)
+ count += 3;
+
+ if (he_cap->phy_cap_info[0] &
+ IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G)
+ count += 3;
+
+ if (eht_cap->phy_cap_info[0] & IEEE80211_EHT_PHY_CAP0_320MHZ_IN_6GHZ)
+ count += 3;
+
+ if (count)
+ return count;
+
+ return from_ap ? 3 : 4;
+}
+
+/* 802.11be EHT PPE Thresholds */
+#define IEEE80211_EHT_PPE_THRES_NSS_POS 0
+#define IEEE80211_EHT_PPE_THRES_NSS_MASK 0xf
+#define IEEE80211_EHT_PPE_THRES_RU_INDEX_BITMASK_MASK 0x1f0
+#define IEEE80211_EHT_PPE_THRES_INFO_PPET_SIZE 3
+#define IEEE80211_EHT_PPE_THRES_INFO_HEADER_SIZE 9
+
+/*
+ * Calculate 802.11be EHT capabilities IE EHT field size
+ */
+static inline u8
+ieee80211_eht_ppe_size(u16 ppe_thres_hdr, const u8 *phy_cap_info)
+{
+ u32 n;
+
+ if (!(phy_cap_info[5] &
+ IEEE80211_EHT_PHY_CAP5_PPE_THRESHOLD_PRESENT))
+ return 0;
+
+ n = hweight16(ppe_thres_hdr &
+ IEEE80211_EHT_PPE_THRES_RU_INDEX_BITMASK_MASK);
+ n *= 1 + u16_get_bits(ppe_thres_hdr, IEEE80211_EHT_PPE_THRES_NSS_MASK);
+
+ /*
+ * Each pair is 6 bits, and we need to add the 9 "header" bits to the
+ * total size.
+ */
+ n = n * IEEE80211_EHT_PPE_THRES_INFO_PPET_SIZE * 2 +
+ IEEE80211_EHT_PPE_THRES_INFO_HEADER_SIZE;
+ return DIV_ROUND_UP(n, 8);
+}
+
+static inline bool
+ieee80211_eht_capa_size_ok(const u8 *he_capa, const u8 *data, u8 len,
+ bool from_ap)
+{
+ const struct ieee80211_eht_cap_elem_fixed *elem = (const void *)data;
+ u8 needed = sizeof(struct ieee80211_eht_cap_elem_fixed);
+
+ if (len < needed || !he_capa)
+ return false;
+
+ needed += ieee80211_eht_mcs_nss_size((const void *)he_capa,
+ (const void *)data,
+ from_ap);
+ if (len < needed)
+ return false;
+
+ if (elem->phy_cap_info[5] &
+ IEEE80211_EHT_PHY_CAP5_PPE_THRESHOLD_PRESENT) {
+ u16 ppe_thres_hdr;
+
+ if (len < needed + sizeof(ppe_thres_hdr))
+ return false;
+
+ ppe_thres_hdr = get_unaligned_le16(data + needed);
+ needed += ieee80211_eht_ppe_size(ppe_thres_hdr,
+ elem->phy_cap_info);
+ }
+
+ return len >= needed;
+}
+
+static inline bool
+ieee80211_eht_oper_size_ok(const u8 *data, u8 len)
+{
+ const struct ieee80211_eht_operation *elem = (const void *)data;
+ u8 needed = sizeof(*elem);
+
+ if (len < needed)
+ return false;
+
+ if (elem->params & IEEE80211_EHT_OPER_INFO_PRESENT) {
+ needed += 3;
+
+ if (elem->params &
+ IEEE80211_EHT_OPER_DISABLED_SUBCHANNEL_BITMAP_PRESENT)
+ needed += 2;
+ }
+
+ return len >= needed;
+}
+
+/* must validate ieee80211_eht_oper_size_ok() first */
+static inline u16
+ieee80211_eht_oper_dis_subchan_bitmap(const struct ieee80211_eht_operation *eht_oper)
+{
+ const struct ieee80211_eht_operation_info *info =
+ (const void *)eht_oper->optional;
+
+ if (!(eht_oper->params & IEEE80211_EHT_OPER_INFO_PRESENT))
+ return 0;
+
+ if (!(eht_oper->params & IEEE80211_EHT_OPER_DISABLED_SUBCHANNEL_BITMAP_PRESENT))
+ return 0;
+
+ return get_unaligned_le16(info->optional);
+}
+
+#define IEEE80211_BW_IND_DIS_SUBCH_PRESENT BIT(1)
+
+struct ieee80211_bandwidth_indication {
+ u8 params;
+ struct ieee80211_eht_operation_info info;
+} __packed;
+
+static inline bool
+ieee80211_bandwidth_indication_size_ok(const u8 *data, u8 len)
+{
+ const struct ieee80211_bandwidth_indication *bwi = (const void *)data;
+
+ if (len < sizeof(*bwi))
+ return false;
+
+ if (bwi->params & IEEE80211_BW_IND_DIS_SUBCH_PRESENT &&
+ len < sizeof(*bwi) + 2)
+ return false;
+
+ return true;
+}
+
+/* Protected EHT action codes */
+enum ieee80211_protected_eht_actioncode {
+ WLAN_PROTECTED_EHT_ACTION_TTLM_REQ = 0,
+ WLAN_PROTECTED_EHT_ACTION_TTLM_RES = 1,
+ WLAN_PROTECTED_EHT_ACTION_TTLM_TEARDOWN = 2,
+ WLAN_PROTECTED_EHT_ACTION_EPCS_ENABLE_REQ = 3,
+ WLAN_PROTECTED_EHT_ACTION_EPCS_ENABLE_RESP = 4,
+ WLAN_PROTECTED_EHT_ACTION_EPCS_ENABLE_TEARDOWN = 5,
+ WLAN_PROTECTED_EHT_ACTION_EML_OP_MODE_NOTIF = 6,
+ WLAN_PROTECTED_EHT_ACTION_LINK_RECOMMEND = 7,
+ WLAN_PROTECTED_EHT_ACTION_ML_OP_UPDATE_REQ = 8,
+ WLAN_PROTECTED_EHT_ACTION_ML_OP_UPDATE_RESP = 9,
+ WLAN_PROTECTED_EHT_ACTION_LINK_RECONFIG_NOTIF = 10,
+ WLAN_PROTECTED_EHT_ACTION_LINK_RECONFIG_REQ = 11,
+ WLAN_PROTECTED_EHT_ACTION_LINK_RECONFIG_RESP = 12,
+};
+
+/* multi-link device */
+#define IEEE80211_MLD_MAX_NUM_LINKS 15
+
+#define IEEE80211_ML_CONTROL_TYPE 0x0007
+#define IEEE80211_ML_CONTROL_TYPE_BASIC 0
+#define IEEE80211_ML_CONTROL_TYPE_PREQ 1
+#define IEEE80211_ML_CONTROL_TYPE_RECONF 2
+#define IEEE80211_ML_CONTROL_TYPE_TDLS 3
+#define IEEE80211_ML_CONTROL_TYPE_PRIO_ACCESS 4
+#define IEEE80211_ML_CONTROL_PRESENCE_MASK 0xfff0
+
+struct ieee80211_multi_link_elem {
+ __le16 control;
+ u8 variable[];
+} __packed;
+
+#define IEEE80211_MLC_BASIC_PRES_LINK_ID 0x0010
+#define IEEE80211_MLC_BASIC_PRES_BSS_PARAM_CH_CNT 0x0020
+#define IEEE80211_MLC_BASIC_PRES_MED_SYNC_DELAY 0x0040
+#define IEEE80211_MLC_BASIC_PRES_EML_CAPA 0x0080
+#define IEEE80211_MLC_BASIC_PRES_MLD_CAPA_OP 0x0100
+#define IEEE80211_MLC_BASIC_PRES_MLD_ID 0x0200
+#define IEEE80211_MLC_BASIC_PRES_EXT_MLD_CAPA_OP 0x0400
+
+#define IEEE80211_MED_SYNC_DELAY_DURATION 0x00ff
+#define IEEE80211_MED_SYNC_DELAY_SYNC_OFDM_ED_THRESH 0x0f00
+#define IEEE80211_MED_SYNC_DELAY_SYNC_MAX_NUM_TXOPS 0xf000
+
+/*
+ * Described in P802.11be_D3.0
+ * dot11MSDTimerDuration should default to 5484 (i.e. 171.375)
+ * dot11MSDOFDMEDthreshold defaults to -72 (i.e. 0)
+ * dot11MSDTXOPMAX defaults to 1
+ */
+#define IEEE80211_MED_SYNC_DELAY_DEFAULT 0x10ac
+
+#define IEEE80211_EML_CAP_EMLSR_SUPP 0x0001
+#define IEEE80211_EML_CAP_EMLSR_PADDING_DELAY 0x000e
+#define IEEE80211_EML_CAP_EMLSR_PADDING_DELAY_0US 0
+#define IEEE80211_EML_CAP_EMLSR_PADDING_DELAY_32US 1
+#define IEEE80211_EML_CAP_EMLSR_PADDING_DELAY_64US 2
+#define IEEE80211_EML_CAP_EMLSR_PADDING_DELAY_128US 3
+#define IEEE80211_EML_CAP_EMLSR_PADDING_DELAY_256US 4
+#define IEEE80211_EML_CAP_EMLSR_TRANSITION_DELAY 0x0070
+#define IEEE80211_EML_CAP_EMLSR_TRANSITION_DELAY_0US 0
+#define IEEE80211_EML_CAP_EMLSR_TRANSITION_DELAY_16US 1
+#define IEEE80211_EML_CAP_EMLSR_TRANSITION_DELAY_32US 2
+#define IEEE80211_EML_CAP_EMLSR_TRANSITION_DELAY_64US 3
+#define IEEE80211_EML_CAP_EMLSR_TRANSITION_DELAY_128US 4
+#define IEEE80211_EML_CAP_EMLSR_TRANSITION_DELAY_256US 5
+#define IEEE80211_EML_CAP_EMLMR_SUPPORT 0x0080
+#define IEEE80211_EML_CAP_EMLMR_DELAY 0x0700
+#define IEEE80211_EML_CAP_EMLMR_DELAY_0US 0
+#define IEEE80211_EML_CAP_EMLMR_DELAY_32US 1
+#define IEEE80211_EML_CAP_EMLMR_DELAY_64US 2
+#define IEEE80211_EML_CAP_EMLMR_DELAY_128US 3
+#define IEEE80211_EML_CAP_EMLMR_DELAY_256US 4
+#define IEEE80211_EML_CAP_TRANSITION_TIMEOUT 0x7800
+#define IEEE80211_EML_CAP_TRANSITION_TIMEOUT_0 0
+#define IEEE80211_EML_CAP_TRANSITION_TIMEOUT_128US 1
+#define IEEE80211_EML_CAP_TRANSITION_TIMEOUT_256US 2
+#define IEEE80211_EML_CAP_TRANSITION_TIMEOUT_512US 3
+#define IEEE80211_EML_CAP_TRANSITION_TIMEOUT_1TU 4
+#define IEEE80211_EML_CAP_TRANSITION_TIMEOUT_2TU 5
+#define IEEE80211_EML_CAP_TRANSITION_TIMEOUT_4TU 6
+#define IEEE80211_EML_CAP_TRANSITION_TIMEOUT_8TU 7
+#define IEEE80211_EML_CAP_TRANSITION_TIMEOUT_16TU 8
+#define IEEE80211_EML_CAP_TRANSITION_TIMEOUT_32TU 9
+#define IEEE80211_EML_CAP_TRANSITION_TIMEOUT_64TU 10
+#define IEEE80211_EML_CAP_TRANSITION_TIMEOUT_128TU 11
+
+#define IEEE80211_MLD_CAP_OP_MAX_SIMUL_LINKS 0x000f
+#define IEEE80211_MLD_CAP_OP_SRS_SUPPORT 0x0010
+#define IEEE80211_MLD_CAP_OP_TID_TO_LINK_MAP_NEG_SUPP 0x0060
+#define IEEE80211_MLD_CAP_OP_TID_TO_LINK_MAP_NEG_NO_SUPP 0
+#define IEEE80211_MLD_CAP_OP_TID_TO_LINK_MAP_NEG_SUPP_SAME 1
+#define IEEE80211_MLD_CAP_OP_TID_TO_LINK_MAP_NEG_RESERVED 2
+#define IEEE80211_MLD_CAP_OP_TID_TO_LINK_MAP_NEG_SUPP_DIFF 3
+#define IEEE80211_MLD_CAP_OP_FREQ_SEP_TYPE_IND 0x0f80
+#define IEEE80211_MLD_CAP_OP_AAR_SUPPORT 0x1000
+#define IEEE80211_MLD_CAP_OP_LINK_RECONF_SUPPORT 0x2000
+#define IEEE80211_MLD_CAP_OP_ALIGNED_TWT_SUPPORT 0x4000
+
+struct ieee80211_mle_basic_common_info {
+ u8 len;
+ u8 mld_mac_addr[ETH_ALEN];
+ u8 variable[];
+} __packed;
+
+#define IEEE80211_MLC_PREQ_PRES_MLD_ID 0x0010
+
+struct ieee80211_mle_preq_common_info {
+ u8 len;
+ u8 variable[];
+} __packed;
+
+#define IEEE80211_MLC_RECONF_PRES_MLD_MAC_ADDR 0x0010
+#define IEEE80211_MLC_RECONF_PRES_EML_CAPA 0x0020
+#define IEEE80211_MLC_RECONF_PRES_MLD_CAPA_OP 0x0040
+#define IEEE80211_MLC_RECONF_PRES_EXT_MLD_CAPA_OP 0x0080
+
+/* no fixed fields in RECONF */
+
+struct ieee80211_mle_tdls_common_info {
+ u8 len;
+ u8 ap_mld_mac_addr[ETH_ALEN];
+} __packed;
+
+#define IEEE80211_MLC_PRIO_ACCESS_PRES_AP_MLD_MAC_ADDR 0x0010
+
+/* no fixed fields in PRIO_ACCESS */
+
+/**
+ * ieee80211_mle_common_size - check multi-link element common size
+ * @data: multi-link element, must already be checked for size using
+ * ieee80211_mle_size_ok()
+ * Return: the size of the multi-link element's "common" subfield
+ */
+static inline u8 ieee80211_mle_common_size(const u8 *data)
+{
+ const struct ieee80211_multi_link_elem *mle = (const void *)data;
+ u16 control = le16_to_cpu(mle->control);
+
+ switch (u16_get_bits(control, IEEE80211_ML_CONTROL_TYPE)) {
+ case IEEE80211_ML_CONTROL_TYPE_BASIC:
+ case IEEE80211_ML_CONTROL_TYPE_PREQ:
+ case IEEE80211_ML_CONTROL_TYPE_TDLS:
+ case IEEE80211_ML_CONTROL_TYPE_RECONF:
+ case IEEE80211_ML_CONTROL_TYPE_PRIO_ACCESS:
+ /*
+ * The length is the first octet pointed by mle->variable so no
+ * need to add anything
+ */
+ break;
+ default:
+ WARN_ON(1);
+ return 0;
+ }
+
+ return sizeof(*mle) + mle->variable[0];
+}
+
+/**
+ * ieee80211_mle_get_link_id - returns the link ID
+ * @data: the basic multi link element
+ * Return: the link ID, or -1 if not present
+ *
+ * The element is assumed to be of the correct type (BASIC) and big enough,
+ * this must be checked using ieee80211_mle_type_ok().
+ */
+static inline int ieee80211_mle_get_link_id(const u8 *data)
+{
+ const struct ieee80211_multi_link_elem *mle = (const void *)data;
+ u16 control = le16_to_cpu(mle->control);
+ const u8 *common = mle->variable;
+
+ /* common points now at the beginning of ieee80211_mle_basic_common_info */
+ common += sizeof(struct ieee80211_mle_basic_common_info);
+
+ if (!(control & IEEE80211_MLC_BASIC_PRES_LINK_ID))
+ return -1;
+
+ return *common;
+}
+
+/**
+ * ieee80211_mle_get_bss_param_ch_cnt - returns the BSS parameter change count
+ * @data: pointer to the basic multi link element
+ * Return: the BSS Parameter Change Count field value, or -1 if not present
+ *
+ * The element is assumed to be of the correct type (BASIC) and big enough,
+ * this must be checked using ieee80211_mle_type_ok().
+ */
+static inline int
+ieee80211_mle_get_bss_param_ch_cnt(const u8 *data)
+{
+ const struct ieee80211_multi_link_elem *mle = (const void *)data;
+ u16 control = le16_to_cpu(mle->control);
+ const u8 *common = mle->variable;
+
+ /* common points now at the beginning of ieee80211_mle_basic_common_info */
+ common += sizeof(struct ieee80211_mle_basic_common_info);
+
+ if (!(control & IEEE80211_MLC_BASIC_PRES_BSS_PARAM_CH_CNT))
+ return -1;
+
+ if (control & IEEE80211_MLC_BASIC_PRES_LINK_ID)
+ common += 1;
+
+ return *common;
+}
+
+/**
+ * ieee80211_mle_get_eml_med_sync_delay - returns the medium sync delay
+ * @data: pointer to the multi-link element
+ * Return: the medium synchronization delay field value from the multi-link
+ * element, or the default value (%IEEE80211_MED_SYNC_DELAY_DEFAULT)
+ * if not present
+ *
+ * The element is assumed to be of the correct type (BASIC) and big enough,
+ * this must be checked using ieee80211_mle_type_ok().
+ */
+static inline u16 ieee80211_mle_get_eml_med_sync_delay(const u8 *data)
+{
+ const struct ieee80211_multi_link_elem *mle = (const void *)data;
+ u16 control = le16_to_cpu(mle->control);
+ const u8 *common = mle->variable;
+
+ /* common points now at the beginning of ieee80211_mle_basic_common_info */
+ common += sizeof(struct ieee80211_mle_basic_common_info);
+
+ if (!(control & IEEE80211_MLC_BASIC_PRES_MED_SYNC_DELAY))
+ return IEEE80211_MED_SYNC_DELAY_DEFAULT;
+
+ if (control & IEEE80211_MLC_BASIC_PRES_LINK_ID)
+ common += 1;
+ if (control & IEEE80211_MLC_BASIC_PRES_BSS_PARAM_CH_CNT)
+ common += 1;
+
+ return get_unaligned_le16(common);
+}
+
+/**
+ * ieee80211_mle_get_eml_cap - returns the EML capability
+ * @data: pointer to the multi-link element
+ * Return: the EML capability field value from the multi-link element,
+ * or 0 if not present
+ *
+ * The element is assumed to be of the correct type (BASIC) and big enough,
+ * this must be checked using ieee80211_mle_type_ok().
+ */
+static inline u16 ieee80211_mle_get_eml_cap(const u8 *data)
+{
+ const struct ieee80211_multi_link_elem *mle = (const void *)data;
+ u16 control = le16_to_cpu(mle->control);
+ const u8 *common = mle->variable;
+
+ /* common points now at the beginning of ieee80211_mle_basic_common_info */
+ common += sizeof(struct ieee80211_mle_basic_common_info);
+
+ if (!(control & IEEE80211_MLC_BASIC_PRES_EML_CAPA))
+ return 0;
+
+ if (control & IEEE80211_MLC_BASIC_PRES_LINK_ID)
+ common += 1;
+ if (control & IEEE80211_MLC_BASIC_PRES_BSS_PARAM_CH_CNT)
+ common += 1;
+ if (control & IEEE80211_MLC_BASIC_PRES_MED_SYNC_DELAY)
+ common += 2;
+
+ return get_unaligned_le16(common);
+}
+
+/**
+ * ieee80211_mle_get_mld_capa_op - returns the MLD capabilities and operations.
+ * @data: pointer to the multi-link element
+ * Return: the MLD capabilities and operations field value from the multi-link
+ * element, or 0 if not present
+ *
+ * The element is assumed to be of the correct type (BASIC) and big enough,
+ * this must be checked using ieee80211_mle_type_ok().
+ */
+static inline u16 ieee80211_mle_get_mld_capa_op(const u8 *data)
+{
+ const struct ieee80211_multi_link_elem *mle = (const void *)data;
+ u16 control = le16_to_cpu(mle->control);
+ const u8 *common = mle->variable;
+
+ /*
+ * common points now at the beginning of
+ * ieee80211_mle_basic_common_info
+ */
+ common += sizeof(struct ieee80211_mle_basic_common_info);
+
+ if (!(control & IEEE80211_MLC_BASIC_PRES_MLD_CAPA_OP))
+ return 0;
+
+ if (control & IEEE80211_MLC_BASIC_PRES_LINK_ID)
+ common += 1;
+ if (control & IEEE80211_MLC_BASIC_PRES_BSS_PARAM_CH_CNT)
+ common += 1;
+ if (control & IEEE80211_MLC_BASIC_PRES_MED_SYNC_DELAY)
+ common += 2;
+ if (control & IEEE80211_MLC_BASIC_PRES_EML_CAPA)
+ common += 2;
+
+ return get_unaligned_le16(common);
+}
+
+/* Defined in Figure 9-1074t in P802.11be_D7.0 */
+#define IEEE80211_EHT_ML_EXT_MLD_CAPA_OP_PARAM_UPDATE 0x0001
+#define IEEE80211_EHT_ML_EXT_MLD_CAPA_OP_RECO_MAX_LINKS_MASK 0x001e
+#define IEEE80211_EHT_ML_EXT_MLD_CAPA_NSTR_UPDATE 0x0020
+#define IEEE80211_EHT_ML_EXT_MLD_CAPA_EMLSR_ENA_ON_ONE_LINK 0x0040
+#define IEEE80211_EHT_ML_EXT_MLD_CAPA_BTM_MLD_RECO_MULTI_AP 0x0080
+
+/**
+ * ieee80211_mle_get_ext_mld_capa_op - returns the extended MLD capabilities
+ * and operations.
+ * @data: pointer to the multi-link element
+ * Return: the extended MLD capabilities and operations field value from
+ * the multi-link element, or 0 if not present
+ *
+ * The element is assumed to be of the correct type (BASIC) and big enough,
+ * this must be checked using ieee80211_mle_type_ok().
+ */
+static inline u16 ieee80211_mle_get_ext_mld_capa_op(const u8 *data)
+{
+ const struct ieee80211_multi_link_elem *mle = (const void *)data;
+ u16 control = le16_to_cpu(mle->control);
+ const u8 *common = mle->variable;
+
+ /*
+ * common points now at the beginning of
+ * ieee80211_mle_basic_common_info
+ */
+ common += sizeof(struct ieee80211_mle_basic_common_info);
+
+ if (!(control & IEEE80211_MLC_BASIC_PRES_EXT_MLD_CAPA_OP))
+ return 0;
+
+ if (control & IEEE80211_MLC_BASIC_PRES_LINK_ID)
+ common += 1;
+ if (control & IEEE80211_MLC_BASIC_PRES_BSS_PARAM_CH_CNT)
+ common += 1;
+ if (control & IEEE80211_MLC_BASIC_PRES_MED_SYNC_DELAY)
+ common += 2;
+ if (control & IEEE80211_MLC_BASIC_PRES_EML_CAPA)
+ common += 2;
+ if (control & IEEE80211_MLC_BASIC_PRES_MLD_CAPA_OP)
+ common += 2;
+ if (control & IEEE80211_MLC_BASIC_PRES_MLD_ID)
+ common += 1;
+
+ return get_unaligned_le16(common);
+}
+
+/**
+ * ieee80211_mle_get_mld_id - returns the MLD ID
+ * @data: pointer to the multi-link element
+ * Return: The MLD ID in the given multi-link element, or 0 if not present
+ *
+ * The element is assumed to be of the correct type (BASIC) and big enough,
+ * this must be checked using ieee80211_mle_type_ok().
+ */
+static inline u8 ieee80211_mle_get_mld_id(const u8 *data)
+{
+ const struct ieee80211_multi_link_elem *mle = (const void *)data;
+ u16 control = le16_to_cpu(mle->control);
+ const u8 *common = mle->variable;
+
+ /*
+ * common points now at the beginning of
+ * ieee80211_mle_basic_common_info
+ */
+ common += sizeof(struct ieee80211_mle_basic_common_info);
+
+ if (!(control & IEEE80211_MLC_BASIC_PRES_MLD_ID))
+ return 0;
+
+ if (control & IEEE80211_MLC_BASIC_PRES_LINK_ID)
+ common += 1;
+ if (control & IEEE80211_MLC_BASIC_PRES_BSS_PARAM_CH_CNT)
+ common += 1;
+ if (control & IEEE80211_MLC_BASIC_PRES_MED_SYNC_DELAY)
+ common += 2;
+ if (control & IEEE80211_MLC_BASIC_PRES_EML_CAPA)
+ common += 2;
+ if (control & IEEE80211_MLC_BASIC_PRES_MLD_CAPA_OP)
+ common += 2;
+
+ return *common;
+}
+
+/**
+ * ieee80211_mle_size_ok - validate multi-link element size
+ * @data: pointer to the element data
+ * @len: length of the containing element
+ * Return: whether or not the multi-link element size is OK
+ */
+static inline bool ieee80211_mle_size_ok(const u8 *data, size_t len)
+{
+ const struct ieee80211_multi_link_elem *mle = (const void *)data;
+ u8 fixed = sizeof(*mle);
+ u8 common = 0;
+ bool check_common_len = false;
+ u16 control;
+
+ if (!data || len < fixed)
+ return false;
+
+ control = le16_to_cpu(mle->control);
+
+ switch (u16_get_bits(control, IEEE80211_ML_CONTROL_TYPE)) {
+ case IEEE80211_ML_CONTROL_TYPE_BASIC:
+ common += sizeof(struct ieee80211_mle_basic_common_info);
+ check_common_len = true;
+ if (control & IEEE80211_MLC_BASIC_PRES_LINK_ID)
+ common += 1;
+ if (control & IEEE80211_MLC_BASIC_PRES_BSS_PARAM_CH_CNT)
+ common += 1;
+ if (control & IEEE80211_MLC_BASIC_PRES_MED_SYNC_DELAY)
+ common += 2;
+ if (control & IEEE80211_MLC_BASIC_PRES_EML_CAPA)
+ common += 2;
+ if (control & IEEE80211_MLC_BASIC_PRES_MLD_CAPA_OP)
+ common += 2;
+ if (control & IEEE80211_MLC_BASIC_PRES_MLD_ID)
+ common += 1;
+ if (control & IEEE80211_MLC_BASIC_PRES_EXT_MLD_CAPA_OP)
+ common += 2;
+ break;
+ case IEEE80211_ML_CONTROL_TYPE_PREQ:
+ common += sizeof(struct ieee80211_mle_preq_common_info);
+ if (control & IEEE80211_MLC_PREQ_PRES_MLD_ID)
+ common += 1;
+ check_common_len = true;
+ break;
+ case IEEE80211_ML_CONTROL_TYPE_RECONF:
+ if (control & IEEE80211_MLC_RECONF_PRES_MLD_MAC_ADDR)
+ common += ETH_ALEN;
+ if (control & IEEE80211_MLC_RECONF_PRES_EML_CAPA)
+ common += 2;
+ if (control & IEEE80211_MLC_RECONF_PRES_MLD_CAPA_OP)
+ common += 2;
+ if (control & IEEE80211_MLC_RECONF_PRES_EXT_MLD_CAPA_OP)
+ common += 2;
+ break;
+ case IEEE80211_ML_CONTROL_TYPE_TDLS:
+ common += sizeof(struct ieee80211_mle_tdls_common_info);
+ check_common_len = true;
+ break;
+ case IEEE80211_ML_CONTROL_TYPE_PRIO_ACCESS:
+ common = ETH_ALEN + 1;
+ break;
+ default:
+ /* we don't know this type */
+ return true;
+ }
+
+ if (len < fixed + common)
+ return false;
+
+ if (!check_common_len)
+ return true;
+
+ /* if present, common length is the first octet there */
+ return mle->variable[0] >= common;
+}
+
+/**
+ * ieee80211_mle_type_ok - validate multi-link element type and size
+ * @data: pointer to the element data
+ * @type: expected type of the element
+ * @len: length of the containing element
+ * Return: whether or not the multi-link element type matches and size is OK
+ */
+static inline bool ieee80211_mle_type_ok(const u8 *data, u8 type, size_t len)
+{
+ const struct ieee80211_multi_link_elem *mle = (const void *)data;
+ u16 control;
+
+ if (!ieee80211_mle_size_ok(data, len))
+ return false;
+
+ control = le16_to_cpu(mle->control);
+
+ if (u16_get_bits(control, IEEE80211_ML_CONTROL_TYPE) == type)
+ return true;
+
+ return false;
+}
+
+enum ieee80211_mle_subelems {
+ IEEE80211_MLE_SUBELEM_PER_STA_PROFILE = 0,
+ IEEE80211_MLE_SUBELEM_FRAGMENT = 254,
+};
+
+#define IEEE80211_MLE_STA_CONTROL_LINK_ID 0x000f
+#define IEEE80211_MLE_STA_CONTROL_COMPLETE_PROFILE 0x0010
+#define IEEE80211_MLE_STA_CONTROL_STA_MAC_ADDR_PRESENT 0x0020
+#define IEEE80211_MLE_STA_CONTROL_BEACON_INT_PRESENT 0x0040
+#define IEEE80211_MLE_STA_CONTROL_TSF_OFFS_PRESENT 0x0080
+#define IEEE80211_MLE_STA_CONTROL_DTIM_INFO_PRESENT 0x0100
+#define IEEE80211_MLE_STA_CONTROL_NSTR_LINK_PAIR_PRESENT 0x0200
+#define IEEE80211_MLE_STA_CONTROL_NSTR_BITMAP_SIZE 0x0400
+#define IEEE80211_MLE_STA_CONTROL_BSS_PARAM_CHANGE_CNT_PRESENT 0x0800
+
+struct ieee80211_mle_per_sta_profile {
+ __le16 control;
+ u8 sta_info_len;
+ u8 variable[];
+} __packed;
+
+/**
+ * ieee80211_mle_basic_sta_prof_size_ok - validate basic multi-link element sta
+ * profile size
+ * @data: pointer to the sub element data
+ * @len: length of the containing sub element
+ * Return: %true if the STA profile is large enough, %false otherwise
+ */
+static inline bool ieee80211_mle_basic_sta_prof_size_ok(const u8 *data,
+ size_t len)
+{
+ const struct ieee80211_mle_per_sta_profile *prof = (const void *)data;
+ u16 control;
+ u8 fixed = sizeof(*prof);
+ u8 info_len = 1;
+
+ if (len < fixed)
+ return false;
+
+ control = le16_to_cpu(prof->control);
+
+ if (control & IEEE80211_MLE_STA_CONTROL_STA_MAC_ADDR_PRESENT)
+ info_len += 6;
+ if (control & IEEE80211_MLE_STA_CONTROL_BEACON_INT_PRESENT)
+ info_len += 2;
+ if (control & IEEE80211_MLE_STA_CONTROL_TSF_OFFS_PRESENT)
+ info_len += 8;
+ if (control & IEEE80211_MLE_STA_CONTROL_DTIM_INFO_PRESENT)
+ info_len += 2;
+ if (control & IEEE80211_MLE_STA_CONTROL_COMPLETE_PROFILE &&
+ control & IEEE80211_MLE_STA_CONTROL_NSTR_LINK_PAIR_PRESENT) {
+ if (control & IEEE80211_MLE_STA_CONTROL_NSTR_BITMAP_SIZE)
+ info_len += 2;
+ else
+ info_len += 1;
+ }
+ if (control & IEEE80211_MLE_STA_CONTROL_BSS_PARAM_CHANGE_CNT_PRESENT)
+ info_len += 1;
+
+ return prof->sta_info_len >= info_len &&
+ fixed + prof->sta_info_len - 1 <= len;
+}
+
+/**
+ * ieee80211_mle_basic_sta_prof_bss_param_ch_cnt - get per-STA profile BSS
+ * parameter change count
+ * @prof: the per-STA profile, having been checked with
+ * ieee80211_mle_basic_sta_prof_size_ok() for the correct length
+ *
+ * Return: The BSS parameter change count value if present, 0 otherwise.
+ */
+static inline u8
+ieee80211_mle_basic_sta_prof_bss_param_ch_cnt(const struct ieee80211_mle_per_sta_profile *prof)
+{
+ u16 control = le16_to_cpu(prof->control);
+ const u8 *pos = prof->variable;
+
+ if (!(control & IEEE80211_MLE_STA_CONTROL_BSS_PARAM_CHANGE_CNT_PRESENT))
+ return 0;
+
+ if (control & IEEE80211_MLE_STA_CONTROL_STA_MAC_ADDR_PRESENT)
+ pos += 6;
+ if (control & IEEE80211_MLE_STA_CONTROL_BEACON_INT_PRESENT)
+ pos += 2;
+ if (control & IEEE80211_MLE_STA_CONTROL_TSF_OFFS_PRESENT)
+ pos += 8;
+ if (control & IEEE80211_MLE_STA_CONTROL_DTIM_INFO_PRESENT)
+ pos += 2;
+ if (control & IEEE80211_MLE_STA_CONTROL_COMPLETE_PROFILE &&
+ control & IEEE80211_MLE_STA_CONTROL_NSTR_LINK_PAIR_PRESENT) {
+ if (control & IEEE80211_MLE_STA_CONTROL_NSTR_BITMAP_SIZE)
+ pos += 2;
+ else
+ pos += 1;
+ }
+
+ return *pos;
+}
+
+#define IEEE80211_MLE_STA_RECONF_CONTROL_LINK_ID 0x000f
+#define IEEE80211_MLE_STA_RECONF_CONTROL_COMPLETE_PROFILE 0x0010
+#define IEEE80211_MLE_STA_RECONF_CONTROL_STA_MAC_ADDR_PRESENT 0x0020
+#define IEEE80211_MLE_STA_RECONF_CONTROL_AP_REM_TIMER_PRESENT 0x0040
+#define IEEE80211_MLE_STA_RECONF_CONTROL_OPERATION_TYPE 0x0780
+#define IEEE80211_MLE_STA_RECONF_CONTROL_OPERATION_TYPE_AP_REM 0
+#define IEEE80211_MLE_STA_RECONF_CONTROL_OPERATION_TYPE_OP_PARAM_UPDATE 1
+#define IEEE80211_MLE_STA_RECONF_CONTROL_OPERATION_TYPE_ADD_LINK 2
+#define IEEE80211_MLE_STA_RECONF_CONTROL_OPERATION_TYPE_DEL_LINK 3
+#define IEEE80211_MLE_STA_RECONF_CONTROL_OPERATION_TYPE_NSTR_STATUS 4
+#define IEEE80211_MLE_STA_RECONF_CONTROL_OPERATION_PARAMS_PRESENT 0x0800
+
+/**
+ * ieee80211_mle_reconf_sta_prof_size_ok - validate reconfiguration multi-link
+ * element sta profile size.
+ * @data: pointer to the sub element data
+ * @len: length of the containing sub element
+ * Return: %true if the STA profile is large enough, %false otherwise
+ */
+static inline bool ieee80211_mle_reconf_sta_prof_size_ok(const u8 *data,
+ size_t len)
+{
+ const struct ieee80211_mle_per_sta_profile *prof = (const void *)data;
+ u16 control;
+ u8 fixed = sizeof(*prof);
+ u8 info_len = 1;
+
+ if (len < fixed)
+ return false;
+
+ control = le16_to_cpu(prof->control);
+
+ if (control & IEEE80211_MLE_STA_RECONF_CONTROL_STA_MAC_ADDR_PRESENT)
+ info_len += ETH_ALEN;
+ if (control & IEEE80211_MLE_STA_RECONF_CONTROL_AP_REM_TIMER_PRESENT)
+ info_len += 2;
+ if (control & IEEE80211_MLE_STA_RECONF_CONTROL_OPERATION_PARAMS_PRESENT)
+ info_len += 2;
+
+ return prof->sta_info_len >= info_len &&
+ fixed + prof->sta_info_len - 1 <= len;
+}
+
+#define IEEE80211_MLE_STA_EPCS_CONTROL_LINK_ID 0x000f
+#define IEEE80211_EPCS_ENA_RESP_BODY_LEN 3
+
+static inline bool ieee80211_tid_to_link_map_size_ok(const u8 *data, size_t len)
+{
+ const struct ieee80211_ttlm_elem *t2l = (const void *)data;
+ u8 control, fixed = sizeof(*t2l), elem_len = 0;
+
+ if (len < fixed)
+ return false;
+
+ control = t2l->control;
+
+ if (control & IEEE80211_TTLM_CONTROL_SWITCH_TIME_PRESENT)
+ elem_len += 2;
+ if (control & IEEE80211_TTLM_CONTROL_EXPECTED_DUR_PRESENT)
+ elem_len += 3;
+
+ if (!(control & IEEE80211_TTLM_CONTROL_DEF_LINK_MAP)) {
+ u8 bm_size;
+
+ elem_len += 1;
+ if (len < fixed + elem_len)
+ return false;
+
+ if (control & IEEE80211_TTLM_CONTROL_LINK_MAP_SIZE)
+ bm_size = 1;
+ else
+ bm_size = 2;
+
+ elem_len += hweight8(t2l->optional[0]) * bm_size;
+ }
+
+ return len >= fixed + elem_len;
+}
+
+/**
+ * ieee80211_emlsr_pad_delay_in_us - Fetch the EMLSR Padding delay
+ * in microseconds
+ * @eml_cap: EML capabilities field value from common info field of
+ * the Multi-link element
+ * Return: the EMLSR Padding delay (in microseconds) encoded in the
+ * EML Capabilities field
+ */
+
+static inline u32 ieee80211_emlsr_pad_delay_in_us(u16 eml_cap)
+{
+ /* IEEE Std 802.11be-2024 Table 9-417i—Encoding of the EMLSR
+ * Padding Delay subfield.
+ */
+ u32 pad_delay = u16_get_bits(eml_cap,
+ IEEE80211_EML_CAP_EMLSR_PADDING_DELAY);
+
+ if (!pad_delay ||
+ pad_delay > IEEE80211_EML_CAP_EMLSR_PADDING_DELAY_256US)
+ return 0;
+
+ return 32 * (1 << (pad_delay - 1));
+}
+
+/**
+ * ieee80211_emlsr_trans_delay_in_us - Fetch the EMLSR Transition
+ * delay in microseconds
+ * @eml_cap: EML capabilities field value from common info field of
+ * the Multi-link element
+ * Return: the EMLSR Transition delay (in microseconds) encoded in the
+ * EML Capabilities field
+ */
+
+static inline u32 ieee80211_emlsr_trans_delay_in_us(u16 eml_cap)
+{
+ /* IEEE Std 802.11be-2024 Table 9-417j—Encoding of the EMLSR
+ * Transition Delay subfield.
+ */
+ u32 trans_delay =
+ u16_get_bits(eml_cap,
+ IEEE80211_EML_CAP_EMLSR_TRANSITION_DELAY);
+
+ /* invalid values also just use 0 */
+ if (!trans_delay ||
+ trans_delay > IEEE80211_EML_CAP_EMLSR_TRANSITION_DELAY_256US)
+ return 0;
+
+ return 16 * (1 << (trans_delay - 1));
+}
+
+/**
+ * ieee80211_eml_trans_timeout_in_us - Fetch the EMLSR Transition
+ * timeout value in microseconds
+ * @eml_cap: EML capabilities field value from common info field of
+ * the Multi-link element
+ * Return: the EMLSR Transition timeout (in microseconds) encoded in
+ * the EML Capabilities field
+ */
+
+static inline u32 ieee80211_eml_trans_timeout_in_us(u16 eml_cap)
+{
+ /* IEEE Std 802.11be-2024 Table 9-417m—Encoding of the
+ * Transition Timeout subfield.
+ */
+ u8 timeout = u16_get_bits(eml_cap,
+ IEEE80211_EML_CAP_TRANSITION_TIMEOUT);
+
+ /* invalid values also just use 0 */
+ if (!timeout || timeout > IEEE80211_EML_CAP_TRANSITION_TIMEOUT_128TU)
+ return 0;
+
+ return 128 * (1 << (timeout - 1));
+}
+
+#define for_each_mle_subelement(_elem, _data, _len) \
+ if (ieee80211_mle_size_ok(_data, _len)) \
+ for_each_element(_elem, \
+ _data + ieee80211_mle_common_size(_data),\
+ _len - ieee80211_mle_common_size(_data))
+
+#endif /* LINUX_IEEE80211_H */
diff --git a/include/linux/ieee80211-he.h b/include/linux/ieee80211-he.h
new file mode 100644
index 000000000000..a08c446fbb04
--- /dev/null
+++ b/include/linux/ieee80211-he.h
@@ -0,0 +1,825 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * IEEE 802.11 HE definitions
+ *
+ * Copyright (c) 2001-2002, SSH Communications Security Corp and Jouni Malinen
+ * <jkmaline@cc.hut.fi>
+ * Copyright (c) 2002-2003, Jouni Malinen <jkmaline@cc.hut.fi>
+ * Copyright (c) 2005, Devicescape Software, Inc.
+ * Copyright (c) 2006, Michael Wu <flamingice@sourmilk.net>
+ * Copyright (c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright (c) 2016 - 2017 Intel Deutschland GmbH
+ * Copyright (c) 2018 - 2025 Intel Corporation
+ */
+
+#ifndef LINUX_IEEE80211_HE_H
+#define LINUX_IEEE80211_HE_H
+
+#include <linux/types.h>
+#include <linux/if_ether.h>
+
+#define IEEE80211_TWT_CONTROL_NDP BIT(0)
+#define IEEE80211_TWT_CONTROL_RESP_MODE BIT(1)
+#define IEEE80211_TWT_CONTROL_NEG_TYPE_BROADCAST BIT(3)
+#define IEEE80211_TWT_CONTROL_RX_DISABLED BIT(4)
+#define IEEE80211_TWT_CONTROL_WAKE_DUR_UNIT BIT(5)
+
+#define IEEE80211_TWT_REQTYPE_REQUEST BIT(0)
+#define IEEE80211_TWT_REQTYPE_SETUP_CMD GENMASK(3, 1)
+#define IEEE80211_TWT_REQTYPE_TRIGGER BIT(4)
+#define IEEE80211_TWT_REQTYPE_IMPLICIT BIT(5)
+#define IEEE80211_TWT_REQTYPE_FLOWTYPE BIT(6)
+#define IEEE80211_TWT_REQTYPE_FLOWID GENMASK(9, 7)
+#define IEEE80211_TWT_REQTYPE_WAKE_INT_EXP GENMASK(14, 10)
+#define IEEE80211_TWT_REQTYPE_PROTECTION BIT(15)
+
+enum ieee80211_twt_setup_cmd {
+ TWT_SETUP_CMD_REQUEST,
+ TWT_SETUP_CMD_SUGGEST,
+ TWT_SETUP_CMD_DEMAND,
+ TWT_SETUP_CMD_GROUPING,
+ TWT_SETUP_CMD_ACCEPT,
+ TWT_SETUP_CMD_ALTERNATE,
+ TWT_SETUP_CMD_DICTATE,
+ TWT_SETUP_CMD_REJECT,
+};
+
+struct ieee80211_twt_params {
+ __le16 req_type;
+ __le64 twt;
+ u8 min_twt_dur;
+ __le16 mantissa;
+ u8 channel;
+} __packed;
+
+struct ieee80211_twt_setup {
+ u8 dialog_token;
+ u8 element_id;
+ u8 length;
+ u8 control;
+ u8 params[];
+} __packed;
+
+/**
+ * struct ieee80211_he_cap_elem - HE capabilities element
+ * @mac_cap_info: HE MAC Capabilities Information
+ * @phy_cap_info: HE PHY Capabilities Information
+ *
+ * This structure represents the fixed fields of the payload of the
+ * "HE capabilities element" as described in IEEE Std 802.11ax-2021
+ * sections 9.4.2.248.2 and 9.4.2.248.3.
+ */
+struct ieee80211_he_cap_elem {
+ u8 mac_cap_info[6];
+ u8 phy_cap_info[11];
+} __packed;
+
+#define IEEE80211_TX_RX_MCS_NSS_DESC_MAX_LEN 5
+
+/**
+ * enum ieee80211_he_mcs_support - HE MCS support definitions
+ * @IEEE80211_HE_MCS_SUPPORT_0_7: MCSes 0-7 are supported for the
+ * number of streams
+ * @IEEE80211_HE_MCS_SUPPORT_0_9: MCSes 0-9 are supported
+ * @IEEE80211_HE_MCS_SUPPORT_0_11: MCSes 0-11 are supported
+ * @IEEE80211_HE_MCS_NOT_SUPPORTED: This number of streams isn't supported
+ *
+ * These definitions are used in each 2-bit subfield of the rx_mcs_*
+ * and tx_mcs_* fields of &struct ieee80211_he_mcs_nss_supp, which are
+ * both split into 8 subfields by number of streams. These values indicate
+ * which MCSes are supported for the number of streams the value appears
+ * for.
+ */
+enum ieee80211_he_mcs_support {
+ IEEE80211_HE_MCS_SUPPORT_0_7 = 0,
+ IEEE80211_HE_MCS_SUPPORT_0_9 = 1,
+ IEEE80211_HE_MCS_SUPPORT_0_11 = 2,
+ IEEE80211_HE_MCS_NOT_SUPPORTED = 3,
+};
+
+/**
+ * struct ieee80211_he_mcs_nss_supp - HE Tx/Rx HE MCS NSS Support Field
+ *
+ * This structure holds the data required for the Tx/Rx HE MCS NSS Support Field
+ * described in P802.11ax_D2.0 section 9.4.2.237.4
+ *
+ * @rx_mcs_80: Rx MCS map 2 bits for each stream, total 8 streams, for channel
+ * widths less than 80MHz.
+ * @tx_mcs_80: Tx MCS map 2 bits for each stream, total 8 streams, for channel
+ * widths less than 80MHz.
+ * @rx_mcs_160: Rx MCS map 2 bits for each stream, total 8 streams, for channel
+ * width 160MHz.
+ * @tx_mcs_160: Tx MCS map 2 bits for each stream, total 8 streams, for channel
+ * width 160MHz.
+ * @rx_mcs_80p80: Rx MCS map 2 bits for each stream, total 8 streams, for
+ * channel width 80p80MHz.
+ * @tx_mcs_80p80: Tx MCS map 2 bits for each stream, total 8 streams, for
+ * channel width 80p80MHz.
+ */
+struct ieee80211_he_mcs_nss_supp {
+ __le16 rx_mcs_80;
+ __le16 tx_mcs_80;
+ __le16 rx_mcs_160;
+ __le16 tx_mcs_160;
+ __le16 rx_mcs_80p80;
+ __le16 tx_mcs_80p80;
+} __packed;
+
+/**
+ * struct ieee80211_he_operation - HE Operation element
+ * @he_oper_params: HE Operation Parameters + BSS Color Information
+ * @he_mcs_nss_set: Basic HE-MCS And NSS Set
+ * @optional: Optional fields VHT Operation Information, Max Co-Hosted
+ * BSSID Indicator, and 6 GHz Operation Information
+ *
+ * This structure represents the payload of the "HE Operation
+ * element" as described in IEEE Std 802.11ax-2021 section 9.4.2.249.
+ */
+struct ieee80211_he_operation {
+ __le32 he_oper_params;
+ __le16 he_mcs_nss_set;
+ u8 optional[];
+} __packed;
+
+/**
+ * struct ieee80211_he_spr - Spatial Reuse Parameter Set element
+ * @he_sr_control: SR Control
+ * @optional: Optional fields Non-SRG OBSS PD Max Offset, SRG OBSS PD
+ * Min Offset, SRG OBSS PD Max Offset, SRG BSS Color
+ * Bitmap, and SRG Partial BSSID Bitmap
+ *
+ * This structure represents the payload of the "Spatial Reuse
+ * Parameter Set element" as described in IEEE Std 802.11ax-2021
+ * section 9.4.2.252.
+ */
+struct ieee80211_he_spr {
+ u8 he_sr_control;
+ u8 optional[];
+} __packed;
+
+/**
+ * struct ieee80211_he_mu_edca_param_ac_rec - MU AC Parameter Record field
+ * @aifsn: ACI/AIFSN
+ * @ecw_min_max: ECWmin/ECWmax
+ * @mu_edca_timer: MU EDCA Timer
+ *
+ * This structure represents the "MU AC Parameter Record" as described
+ * in IEEE Std 802.11ax-2021 section 9.4.2.251, Figure 9-788p.
+ */
+struct ieee80211_he_mu_edca_param_ac_rec {
+ u8 aifsn;
+ u8 ecw_min_max;
+ u8 mu_edca_timer;
+} __packed;
+
+/**
+ * struct ieee80211_mu_edca_param_set - MU EDCA Parameter Set element
+ * @mu_qos_info: QoS Info
+ * @ac_be: MU AC_BE Parameter Record
+ * @ac_bk: MU AC_BK Parameter Record
+ * @ac_vi: MU AC_VI Parameter Record
+ * @ac_vo: MU AC_VO Parameter Record
+ *
+ * This structure represents the payload of the "MU EDCA Parameter Set
+ * element" as described in IEEE Std 802.11ax-2021 section 9.4.2.251.
+ */
+struct ieee80211_mu_edca_param_set {
+ u8 mu_qos_info;
+ struct ieee80211_he_mu_edca_param_ac_rec ac_be;
+ struct ieee80211_he_mu_edca_param_ac_rec ac_bk;
+ struct ieee80211_he_mu_edca_param_ac_rec ac_vi;
+ struct ieee80211_he_mu_edca_param_ac_rec ac_vo;
+} __packed;
+
+/* 802.11ax HE MAC capabilities */
+#define IEEE80211_HE_MAC_CAP0_HTC_HE 0x01
+#define IEEE80211_HE_MAC_CAP0_TWT_REQ 0x02
+#define IEEE80211_HE_MAC_CAP0_TWT_RES 0x04
+#define IEEE80211_HE_MAC_CAP0_DYNAMIC_FRAG_NOT_SUPP 0x00
+#define IEEE80211_HE_MAC_CAP0_DYNAMIC_FRAG_LEVEL_1 0x08
+#define IEEE80211_HE_MAC_CAP0_DYNAMIC_FRAG_LEVEL_2 0x10
+#define IEEE80211_HE_MAC_CAP0_DYNAMIC_FRAG_LEVEL_3 0x18
+#define IEEE80211_HE_MAC_CAP0_DYNAMIC_FRAG_MASK 0x18
+#define IEEE80211_HE_MAC_CAP0_MAX_NUM_FRAG_MSDU_1 0x00
+#define IEEE80211_HE_MAC_CAP0_MAX_NUM_FRAG_MSDU_2 0x20
+#define IEEE80211_HE_MAC_CAP0_MAX_NUM_FRAG_MSDU_4 0x40
+#define IEEE80211_HE_MAC_CAP0_MAX_NUM_FRAG_MSDU_8 0x60
+#define IEEE80211_HE_MAC_CAP0_MAX_NUM_FRAG_MSDU_16 0x80
+#define IEEE80211_HE_MAC_CAP0_MAX_NUM_FRAG_MSDU_32 0xa0
+#define IEEE80211_HE_MAC_CAP0_MAX_NUM_FRAG_MSDU_64 0xc0
+#define IEEE80211_HE_MAC_CAP0_MAX_NUM_FRAG_MSDU_UNLIMITED 0xe0
+#define IEEE80211_HE_MAC_CAP0_MAX_NUM_FRAG_MSDU_MASK 0xe0
+
+#define IEEE80211_HE_MAC_CAP1_MIN_FRAG_SIZE_UNLIMITED 0x00
+#define IEEE80211_HE_MAC_CAP1_MIN_FRAG_SIZE_128 0x01
+#define IEEE80211_HE_MAC_CAP1_MIN_FRAG_SIZE_256 0x02
+#define IEEE80211_HE_MAC_CAP1_MIN_FRAG_SIZE_512 0x03
+#define IEEE80211_HE_MAC_CAP1_MIN_FRAG_SIZE_MASK 0x03
+#define IEEE80211_HE_MAC_CAP1_TF_MAC_PAD_DUR_0US 0x00
+#define IEEE80211_HE_MAC_CAP1_TF_MAC_PAD_DUR_8US 0x04
+#define IEEE80211_HE_MAC_CAP1_TF_MAC_PAD_DUR_16US 0x08
+#define IEEE80211_HE_MAC_CAP1_TF_MAC_PAD_DUR_MASK 0x0c
+#define IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_RX_QOS_1 0x00
+#define IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_RX_QOS_2 0x10
+#define IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_RX_QOS_3 0x20
+#define IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_RX_QOS_4 0x30
+#define IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_RX_QOS_5 0x40
+#define IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_RX_QOS_6 0x50
+#define IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_RX_QOS_7 0x60
+#define IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_RX_QOS_8 0x70
+#define IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_RX_QOS_MASK 0x70
+
+/* Link adaptation is split between byte HE_MAC_CAP1 and
+ * HE_MAC_CAP2. It should be set only if IEEE80211_HE_MAC_CAP0_HTC_HE
+ * in which case the following values apply:
+ * 0 = No feedback.
+ * 1 = reserved.
+ * 2 = Unsolicited feedback.
+ * 3 = both
+ */
+#define IEEE80211_HE_MAC_CAP1_LINK_ADAPTATION 0x80
+
+#define IEEE80211_HE_MAC_CAP2_LINK_ADAPTATION 0x01
+#define IEEE80211_HE_MAC_CAP2_ALL_ACK 0x02
+#define IEEE80211_HE_MAC_CAP2_TRS 0x04
+#define IEEE80211_HE_MAC_CAP2_BSR 0x08
+#define IEEE80211_HE_MAC_CAP2_BCAST_TWT 0x10
+#define IEEE80211_HE_MAC_CAP2_32BIT_BA_BITMAP 0x20
+#define IEEE80211_HE_MAC_CAP2_MU_CASCADING 0x40
+#define IEEE80211_HE_MAC_CAP2_ACK_EN 0x80
+
+#define IEEE80211_HE_MAC_CAP3_OMI_CONTROL 0x02
+#define IEEE80211_HE_MAC_CAP3_OFDMA_RA 0x04
+
+/* The maximum length of an A-MDPU is defined by the combination of the Maximum
+ * A-MDPU Length Exponent field in the HT capabilities, VHT capabilities and the
+ * same field in the HE capabilities.
+ */
+#define IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_EXT_0 0x00
+#define IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_EXT_1 0x08
+#define IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_EXT_2 0x10
+#define IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_EXT_3 0x18
+#define IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_MASK 0x18
+#define IEEE80211_HE_MAC_CAP3_AMSDU_FRAG 0x20
+#define IEEE80211_HE_MAC_CAP3_FLEX_TWT_SCHED 0x40
+#define IEEE80211_HE_MAC_CAP3_RX_CTRL_FRAME_TO_MULTIBSS 0x80
+
+#define IEEE80211_HE_MAC_CAP4_BSRP_BQRP_A_MPDU_AGG 0x01
+#define IEEE80211_HE_MAC_CAP4_QTP 0x02
+#define IEEE80211_HE_MAC_CAP4_BQR 0x04
+#define IEEE80211_HE_MAC_CAP4_PSR_RESP 0x08
+#define IEEE80211_HE_MAC_CAP4_NDP_FB_REP 0x10
+#define IEEE80211_HE_MAC_CAP4_OPS 0x20
+#define IEEE80211_HE_MAC_CAP4_AMSDU_IN_AMPDU 0x40
+/* Multi TID agg TX is split between byte #4 and #5
+ * The value is a combination of B39,B40,B41
+ */
+#define IEEE80211_HE_MAC_CAP4_MULTI_TID_AGG_TX_QOS_B39 0x80
+
+#define IEEE80211_HE_MAC_CAP5_MULTI_TID_AGG_TX_QOS_B40 0x01
+#define IEEE80211_HE_MAC_CAP5_MULTI_TID_AGG_TX_QOS_B41 0x02
+#define IEEE80211_HE_MAC_CAP5_SUBCHAN_SELECTIVE_TRANSMISSION 0x04
+#define IEEE80211_HE_MAC_CAP5_UL_2x996_TONE_RU 0x08
+#define IEEE80211_HE_MAC_CAP5_OM_CTRL_UL_MU_DATA_DIS_RX 0x10
+#define IEEE80211_HE_MAC_CAP5_HE_DYNAMIC_SM_PS 0x20
+#define IEEE80211_HE_MAC_CAP5_PUNCTURED_SOUNDING 0x40
+#define IEEE80211_HE_MAC_CAP5_HT_VHT_TRIG_FRAME_RX 0x80
+
+#define IEEE80211_HE_VHT_MAX_AMPDU_FACTOR 20
+#define IEEE80211_HE_HT_MAX_AMPDU_FACTOR 16
+#define IEEE80211_HE_6GHZ_MAX_AMPDU_FACTOR 13
+
+/* 802.11ax HE PHY capabilities */
+#define IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_IN_2G 0x02
+#define IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_80MHZ_IN_5G 0x04
+#define IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G 0x08
+#define IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_80PLUS80_MHZ_IN_5G 0x10
+#define IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_MASK_ALL 0x1e
+
+#define IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_RU_MAPPING_IN_2G 0x20
+#define IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_RU_MAPPING_IN_5G 0x40
+#define IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_MASK 0xfe
+
+#define IEEE80211_HE_PHY_CAP1_PREAMBLE_PUNC_RX_80MHZ_ONLY_SECOND_20MHZ 0x01
+#define IEEE80211_HE_PHY_CAP1_PREAMBLE_PUNC_RX_80MHZ_ONLY_SECOND_40MHZ 0x02
+#define IEEE80211_HE_PHY_CAP1_PREAMBLE_PUNC_RX_160MHZ_ONLY_SECOND_20MHZ 0x04
+#define IEEE80211_HE_PHY_CAP1_PREAMBLE_PUNC_RX_160MHZ_ONLY_SECOND_40MHZ 0x08
+#define IEEE80211_HE_PHY_CAP1_PREAMBLE_PUNC_RX_MASK 0x0f
+#define IEEE80211_HE_PHY_CAP1_DEVICE_CLASS_A 0x10
+#define IEEE80211_HE_PHY_CAP1_LDPC_CODING_IN_PAYLOAD 0x20
+#define IEEE80211_HE_PHY_CAP1_HE_LTF_AND_GI_FOR_HE_PPDUS_0_8US 0x40
+/* Midamble RX/TX Max NSTS is split between byte #2 and byte #3 */
+#define IEEE80211_HE_PHY_CAP1_MIDAMBLE_RX_TX_MAX_NSTS 0x80
+
+#define IEEE80211_HE_PHY_CAP2_MIDAMBLE_RX_TX_MAX_NSTS 0x01
+#define IEEE80211_HE_PHY_CAP2_NDP_4x_LTF_AND_3_2US 0x02
+#define IEEE80211_HE_PHY_CAP2_STBC_TX_UNDER_80MHZ 0x04
+#define IEEE80211_HE_PHY_CAP2_STBC_RX_UNDER_80MHZ 0x08
+#define IEEE80211_HE_PHY_CAP2_DOPPLER_TX 0x10
+#define IEEE80211_HE_PHY_CAP2_DOPPLER_RX 0x20
+
+/* Note that the meaning of UL MU below is different between an AP and a non-AP
+ * sta, where in the AP case it indicates support for Rx and in the non-AP sta
+ * case it indicates support for Tx.
+ */
+#define IEEE80211_HE_PHY_CAP2_UL_MU_FULL_MU_MIMO 0x40
+#define IEEE80211_HE_PHY_CAP2_UL_MU_PARTIAL_MU_MIMO 0x80
+
+#define IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_TX_NO_DCM 0x00
+#define IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_TX_BPSK 0x01
+#define IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_TX_QPSK 0x02
+#define IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_TX_16_QAM 0x03
+#define IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_TX_MASK 0x03
+#define IEEE80211_HE_PHY_CAP3_DCM_MAX_TX_NSS_1 0x00
+#define IEEE80211_HE_PHY_CAP3_DCM_MAX_TX_NSS_2 0x04
+#define IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_NO_DCM 0x00
+#define IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_BPSK 0x08
+#define IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_QPSK 0x10
+#define IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_16_QAM 0x18
+#define IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_MASK 0x18
+#define IEEE80211_HE_PHY_CAP3_DCM_MAX_RX_NSS_1 0x00
+#define IEEE80211_HE_PHY_CAP3_DCM_MAX_RX_NSS_2 0x20
+#define IEEE80211_HE_PHY_CAP3_RX_PARTIAL_BW_SU_IN_20MHZ_MU 0x40
+#define IEEE80211_HE_PHY_CAP3_SU_BEAMFORMER 0x80
+
+#define IEEE80211_HE_PHY_CAP4_SU_BEAMFORMEE 0x01
+#define IEEE80211_HE_PHY_CAP4_MU_BEAMFORMER 0x02
+
+/* Minimal allowed value of Max STS under 80MHz is 3 */
+#define IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_UNDER_80MHZ_4 0x0c
+#define IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_UNDER_80MHZ_5 0x10
+#define IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_UNDER_80MHZ_6 0x14
+#define IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_UNDER_80MHZ_7 0x18
+#define IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_UNDER_80MHZ_8 0x1c
+#define IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_UNDER_80MHZ_MASK 0x1c
+
+/* Minimal allowed value of Max STS above 80MHz is 3 */
+#define IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_ABOVE_80MHZ_4 0x60
+#define IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_ABOVE_80MHZ_5 0x80
+#define IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_ABOVE_80MHZ_6 0xa0
+#define IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_ABOVE_80MHZ_7 0xc0
+#define IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_ABOVE_80MHZ_8 0xe0
+#define IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_ABOVE_80MHZ_MASK 0xe0
+
+#define IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_UNDER_80MHZ_1 0x00
+#define IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_UNDER_80MHZ_2 0x01
+#define IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_UNDER_80MHZ_3 0x02
+#define IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_UNDER_80MHZ_4 0x03
+#define IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_UNDER_80MHZ_5 0x04
+#define IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_UNDER_80MHZ_6 0x05
+#define IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_UNDER_80MHZ_7 0x06
+#define IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_UNDER_80MHZ_8 0x07
+#define IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_UNDER_80MHZ_MASK 0x07
+
+#define IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_ABOVE_80MHZ_1 0x00
+#define IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_ABOVE_80MHZ_2 0x08
+#define IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_ABOVE_80MHZ_3 0x10
+#define IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_ABOVE_80MHZ_4 0x18
+#define IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_ABOVE_80MHZ_5 0x20
+#define IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_ABOVE_80MHZ_6 0x28
+#define IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_ABOVE_80MHZ_7 0x30
+#define IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_ABOVE_80MHZ_8 0x38
+#define IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_ABOVE_80MHZ_MASK 0x38
+
+#define IEEE80211_HE_PHY_CAP5_NG16_SU_FEEDBACK 0x40
+#define IEEE80211_HE_PHY_CAP5_NG16_MU_FEEDBACK 0x80
+
+#define IEEE80211_HE_PHY_CAP6_CODEBOOK_SIZE_42_SU 0x01
+#define IEEE80211_HE_PHY_CAP6_CODEBOOK_SIZE_75_MU 0x02
+#define IEEE80211_HE_PHY_CAP6_TRIG_SU_BEAMFORMING_FB 0x04
+#define IEEE80211_HE_PHY_CAP6_TRIG_MU_BEAMFORMING_PARTIAL_BW_FB 0x08
+#define IEEE80211_HE_PHY_CAP6_TRIG_CQI_FB 0x10
+#define IEEE80211_HE_PHY_CAP6_PARTIAL_BW_EXT_RANGE 0x20
+#define IEEE80211_HE_PHY_CAP6_PARTIAL_BANDWIDTH_DL_MUMIMO 0x40
+#define IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT 0x80
+
+#define IEEE80211_HE_PHY_CAP7_PSR_BASED_SR 0x01
+#define IEEE80211_HE_PHY_CAP7_POWER_BOOST_FACTOR_SUPP 0x02
+#define IEEE80211_HE_PHY_CAP7_HE_SU_MU_PPDU_4XLTF_AND_08_US_GI 0x04
+#define IEEE80211_HE_PHY_CAP7_MAX_NC_1 0x08
+#define IEEE80211_HE_PHY_CAP7_MAX_NC_2 0x10
+#define IEEE80211_HE_PHY_CAP7_MAX_NC_3 0x18
+#define IEEE80211_HE_PHY_CAP7_MAX_NC_4 0x20
+#define IEEE80211_HE_PHY_CAP7_MAX_NC_5 0x28
+#define IEEE80211_HE_PHY_CAP7_MAX_NC_6 0x30
+#define IEEE80211_HE_PHY_CAP7_MAX_NC_7 0x38
+#define IEEE80211_HE_PHY_CAP7_MAX_NC_MASK 0x38
+#define IEEE80211_HE_PHY_CAP7_STBC_TX_ABOVE_80MHZ 0x40
+#define IEEE80211_HE_PHY_CAP7_STBC_RX_ABOVE_80MHZ 0x80
+
+#define IEEE80211_HE_PHY_CAP8_HE_ER_SU_PPDU_4XLTF_AND_08_US_GI 0x01
+#define IEEE80211_HE_PHY_CAP8_20MHZ_IN_40MHZ_HE_PPDU_IN_2G 0x02
+#define IEEE80211_HE_PHY_CAP8_20MHZ_IN_160MHZ_HE_PPDU 0x04
+#define IEEE80211_HE_PHY_CAP8_80MHZ_IN_160MHZ_HE_PPDU 0x08
+#define IEEE80211_HE_PHY_CAP8_HE_ER_SU_1XLTF_AND_08_US_GI 0x10
+#define IEEE80211_HE_PHY_CAP8_MIDAMBLE_RX_TX_2X_AND_1XLTF 0x20
+#define IEEE80211_HE_PHY_CAP8_DCM_MAX_RU_242 0x00
+#define IEEE80211_HE_PHY_CAP8_DCM_MAX_RU_484 0x40
+#define IEEE80211_HE_PHY_CAP8_DCM_MAX_RU_996 0x80
+#define IEEE80211_HE_PHY_CAP8_DCM_MAX_RU_2x996 0xc0
+#define IEEE80211_HE_PHY_CAP8_DCM_MAX_RU_MASK 0xc0
+
+#define IEEE80211_HE_PHY_CAP9_LONGER_THAN_16_SIGB_OFDM_SYM 0x01
+#define IEEE80211_HE_PHY_CAP9_NON_TRIGGERED_CQI_FEEDBACK 0x02
+#define IEEE80211_HE_PHY_CAP9_TX_1024_QAM_LESS_THAN_242_TONE_RU 0x04
+#define IEEE80211_HE_PHY_CAP9_RX_1024_QAM_LESS_THAN_242_TONE_RU 0x08
+#define IEEE80211_HE_PHY_CAP9_RX_FULL_BW_SU_USING_MU_WITH_COMP_SIGB 0x10
+#define IEEE80211_HE_PHY_CAP9_RX_FULL_BW_SU_USING_MU_WITH_NON_COMP_SIGB 0x20
+#define IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_0US 0x0
+#define IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_8US 0x1
+#define IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_16US 0x2
+#define IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_RESERVED 0x3
+#define IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_POS 6
+#define IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_MASK 0xc0
+
+#define IEEE80211_HE_PHY_CAP10_HE_MU_M1RU_MAX_LTF 0x01
+
+/* 802.11ax HE TX/RX MCS NSS Support */
+#define IEEE80211_TX_RX_MCS_NSS_SUPP_HIGHEST_MCS_POS (3)
+#define IEEE80211_TX_RX_MCS_NSS_SUPP_TX_BITMAP_POS (6)
+#define IEEE80211_TX_RX_MCS_NSS_SUPP_RX_BITMAP_POS (11)
+#define IEEE80211_TX_RX_MCS_NSS_SUPP_TX_BITMAP_MASK 0x07c0
+#define IEEE80211_TX_RX_MCS_NSS_SUPP_RX_BITMAP_MASK 0xf800
+
+/* TX/RX HE MCS Support field Highest MCS subfield encoding */
+enum ieee80211_he_highest_mcs_supported_subfield_enc {
+ HIGHEST_MCS_SUPPORTED_MCS7 = 0,
+ HIGHEST_MCS_SUPPORTED_MCS8,
+ HIGHEST_MCS_SUPPORTED_MCS9,
+ HIGHEST_MCS_SUPPORTED_MCS10,
+ HIGHEST_MCS_SUPPORTED_MCS11,
+};
+
+/* Calculate 802.11ax HE capabilities IE Tx/Rx HE MCS NSS Support Field size */
+static inline u8
+ieee80211_he_mcs_nss_size(const struct ieee80211_he_cap_elem *he_cap)
+{
+ u8 count = 4;
+
+ if (he_cap->phy_cap_info[0] &
+ IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G)
+ count += 4;
+
+ if (he_cap->phy_cap_info[0] &
+ IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_80PLUS80_MHZ_IN_5G)
+ count += 4;
+
+ return count;
+}
+
+/* 802.11ax HE PPE Thresholds */
+#define IEEE80211_PPE_THRES_NSS_SUPPORT_2NSS (1)
+#define IEEE80211_PPE_THRES_NSS_POS (0)
+#define IEEE80211_PPE_THRES_NSS_MASK (7)
+#define IEEE80211_PPE_THRES_RU_INDEX_BITMASK_2x966_AND_966_RU \
+ (BIT(5) | BIT(6))
+#define IEEE80211_PPE_THRES_RU_INDEX_BITMASK_MASK 0x78
+#define IEEE80211_PPE_THRES_RU_INDEX_BITMASK_POS (3)
+#define IEEE80211_PPE_THRES_INFO_PPET_SIZE (3)
+#define IEEE80211_HE_PPE_THRES_INFO_HEADER_SIZE (7)
+
+/*
+ * Calculate 802.11ax HE capabilities IE PPE field size
+ * Input: Header byte of ppe_thres (first byte), and HE capa IE's PHY cap u8*
+ */
+static inline u8
+ieee80211_he_ppe_size(u8 ppe_thres_hdr, const u8 *phy_cap_info)
+{
+ u8 n;
+
+ if ((phy_cap_info[6] &
+ IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT) == 0)
+ return 0;
+
+ n = hweight8(ppe_thres_hdr &
+ IEEE80211_PPE_THRES_RU_INDEX_BITMASK_MASK);
+ n *= (1 + ((ppe_thres_hdr & IEEE80211_PPE_THRES_NSS_MASK) >>
+ IEEE80211_PPE_THRES_NSS_POS));
+
+ /*
+ * Each pair is 6 bits, and we need to add the 7 "header" bits to the
+ * total size.
+ */
+ n = (n * IEEE80211_PPE_THRES_INFO_PPET_SIZE * 2) + 7;
+ n = DIV_ROUND_UP(n, 8);
+
+ return n;
+}
+
+static inline bool ieee80211_he_capa_size_ok(const u8 *data, u8 len)
+{
+ const struct ieee80211_he_cap_elem *he_cap_ie_elem = (const void *)data;
+ u8 needed = sizeof(*he_cap_ie_elem);
+
+ if (len < needed)
+ return false;
+
+ needed += ieee80211_he_mcs_nss_size(he_cap_ie_elem);
+ if (len < needed)
+ return false;
+
+ if (he_cap_ie_elem->phy_cap_info[6] &
+ IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT) {
+ if (len < needed + 1)
+ return false;
+ needed += ieee80211_he_ppe_size(data[needed],
+ he_cap_ie_elem->phy_cap_info);
+ }
+
+ return len >= needed;
+}
+
+/* HE Operation defines */
+#define IEEE80211_HE_OPERATION_DFLT_PE_DURATION_MASK 0x00000007
+#define IEEE80211_HE_OPERATION_TWT_REQUIRED 0x00000008
+#define IEEE80211_HE_OPERATION_RTS_THRESHOLD_MASK 0x00003ff0
+#define IEEE80211_HE_OPERATION_RTS_THRESHOLD_OFFSET 4
+#define IEEE80211_HE_OPERATION_VHT_OPER_INFO 0x00004000
+#define IEEE80211_HE_OPERATION_CO_HOSTED_BSS 0x00008000
+#define IEEE80211_HE_OPERATION_ER_SU_DISABLE 0x00010000
+#define IEEE80211_HE_OPERATION_6GHZ_OP_INFO 0x00020000
+#define IEEE80211_HE_OPERATION_BSS_COLOR_MASK 0x3f000000
+#define IEEE80211_HE_OPERATION_BSS_COLOR_OFFSET 24
+#define IEEE80211_HE_OPERATION_PARTIAL_BSS_COLOR 0x40000000
+#define IEEE80211_HE_OPERATION_BSS_COLOR_DISABLED 0x80000000
+
+#define IEEE80211_6GHZ_CTRL_REG_LPI_AP 0
+#define IEEE80211_6GHZ_CTRL_REG_SP_AP 1
+#define IEEE80211_6GHZ_CTRL_REG_VLP_AP 2
+#define IEEE80211_6GHZ_CTRL_REG_INDOOR_LPI_AP 3
+#define IEEE80211_6GHZ_CTRL_REG_INDOOR_SP_AP_OLD 4
+#define IEEE80211_6GHZ_CTRL_REG_AP_ROLE_NOT_RELEVANT 7
+#define IEEE80211_6GHZ_CTRL_REG_INDOOR_SP_AP 8
+
+/**
+ * struct ieee80211_he_6ghz_oper - HE 6 GHz operation Information field
+ * @primary: primary channel
+ * @control: control flags
+ * @ccfs0: channel center frequency segment 0
+ * @ccfs1: channel center frequency segment 1
+ * @minrate: minimum rate (in 1 Mbps units)
+ */
+struct ieee80211_he_6ghz_oper {
+ u8 primary;
+#define IEEE80211_HE_6GHZ_OPER_CTRL_CHANWIDTH 0x3
+#define IEEE80211_HE_6GHZ_OPER_CTRL_CHANWIDTH_20MHZ 0
+#define IEEE80211_HE_6GHZ_OPER_CTRL_CHANWIDTH_40MHZ 1
+#define IEEE80211_HE_6GHZ_OPER_CTRL_CHANWIDTH_80MHZ 2
+#define IEEE80211_HE_6GHZ_OPER_CTRL_CHANWIDTH_160MHZ 3
+#define IEEE80211_HE_6GHZ_OPER_CTRL_DUP_BEACON 0x4
+#define IEEE80211_HE_6GHZ_OPER_CTRL_REG_INFO 0x78
+ u8 control;
+ u8 ccfs0;
+ u8 ccfs1;
+ u8 minrate;
+} __packed;
+
+/**
+ * enum ieee80211_reg_conn_bits - represents Regulatory connectivity field bits.
+ *
+ * This enumeration defines bit flags used to represent regulatory connectivity
+ * field bits.
+ *
+ * @IEEE80211_REG_CONN_LPI_VALID: Indicates whether the LPI bit is valid.
+ * @IEEE80211_REG_CONN_LPI_VALUE: Represents the value of the LPI bit.
+ * @IEEE80211_REG_CONN_SP_VALID: Indicates whether the SP bit is valid.
+ * @IEEE80211_REG_CONN_SP_VALUE: Represents the value of the SP bit.
+ */
+enum ieee80211_reg_conn_bits {
+ IEEE80211_REG_CONN_LPI_VALID = BIT(0),
+ IEEE80211_REG_CONN_LPI_VALUE = BIT(1),
+ IEEE80211_REG_CONN_SP_VALID = BIT(2),
+ IEEE80211_REG_CONN_SP_VALUE = BIT(3),
+};
+
+/* transmit power interpretation type of transmit power envelope element */
+enum ieee80211_tx_power_intrpt_type {
+ IEEE80211_TPE_LOCAL_EIRP,
+ IEEE80211_TPE_LOCAL_EIRP_PSD,
+ IEEE80211_TPE_REG_CLIENT_EIRP,
+ IEEE80211_TPE_REG_CLIENT_EIRP_PSD,
+};
+
+/* category type of transmit power envelope element */
+enum ieee80211_tx_power_category_6ghz {
+ IEEE80211_TPE_CAT_6GHZ_DEFAULT = 0,
+ IEEE80211_TPE_CAT_6GHZ_SUBORDINATE = 1,
+};
+
+/*
+ * For IEEE80211_TPE_LOCAL_EIRP / IEEE80211_TPE_REG_CLIENT_EIRP,
+ * setting to 63.5 dBm means no constraint.
+ */
+#define IEEE80211_TPE_MAX_TX_PWR_NO_CONSTRAINT 127
+
+/*
+ * For IEEE80211_TPE_LOCAL_EIRP_PSD / IEEE80211_TPE_REG_CLIENT_EIRP_PSD,
+ * setting to 127 indicates no PSD limit for the 20 MHz channel.
+ */
+#define IEEE80211_TPE_PSD_NO_LIMIT 127
+
+/**
+ * struct ieee80211_tx_pwr_env - Transmit Power Envelope
+ * @info: Transmit Power Information field
+ * @variable: Maximum Transmit Power field
+ *
+ * This structure represents the payload of the "Transmit Power
+ * Envelope element" as described in IEEE Std 802.11ax-2021 section
+ * 9.4.2.161
+ */
+struct ieee80211_tx_pwr_env {
+ u8 info;
+ u8 variable[];
+} __packed;
+
+#define IEEE80211_TX_PWR_ENV_INFO_COUNT 0x7
+#define IEEE80211_TX_PWR_ENV_INFO_INTERPRET 0x38
+#define IEEE80211_TX_PWR_ENV_INFO_CATEGORY 0xC0
+
+#define IEEE80211_TX_PWR_ENV_EXT_COUNT 0xF
+
+static inline bool ieee80211_valid_tpe_element(const u8 *data, u8 len)
+{
+ const struct ieee80211_tx_pwr_env *env = (const void *)data;
+ u8 count, interpret, category;
+ u8 needed = sizeof(*env);
+ u8 N; /* also called N in the spec */
+
+ if (len < needed)
+ return false;
+
+ count = u8_get_bits(env->info, IEEE80211_TX_PWR_ENV_INFO_COUNT);
+ interpret = u8_get_bits(env->info, IEEE80211_TX_PWR_ENV_INFO_INTERPRET);
+ category = u8_get_bits(env->info, IEEE80211_TX_PWR_ENV_INFO_CATEGORY);
+
+ switch (category) {
+ case IEEE80211_TPE_CAT_6GHZ_DEFAULT:
+ case IEEE80211_TPE_CAT_6GHZ_SUBORDINATE:
+ break;
+ default:
+ return false;
+ }
+
+ switch (interpret) {
+ case IEEE80211_TPE_LOCAL_EIRP:
+ case IEEE80211_TPE_REG_CLIENT_EIRP:
+ if (count > 3)
+ return false;
+
+ /* count == 0 encodes 1 value for 20 MHz, etc. */
+ needed += count + 1;
+
+ if (len < needed)
+ return false;
+
+ /* there can be extension fields not accounted for in 'count' */
+
+ return true;
+ case IEEE80211_TPE_LOCAL_EIRP_PSD:
+ case IEEE80211_TPE_REG_CLIENT_EIRP_PSD:
+ if (count > 4)
+ return false;
+
+ N = count ? 1 << (count - 1) : 1;
+ needed += N;
+
+ if (len < needed)
+ return false;
+
+ if (len > needed) {
+ u8 K = u8_get_bits(env->variable[N],
+ IEEE80211_TX_PWR_ENV_EXT_COUNT);
+
+ needed += 1 + K;
+ if (len < needed)
+ return false;
+ }
+
+ return true;
+ }
+
+ return false;
+}
+
+/*
+ * ieee80211_he_oper_size - calculate 802.11ax HE Operations IE size
+ * @he_oper_ie: byte data of the He Operations IE, stating from the byte
+ * after the ext ID byte. It is assumed that he_oper_ie has at least
+ * sizeof(struct ieee80211_he_operation) bytes, the caller must have
+ * validated this.
+ * @return the actual size of the IE data (not including header), or 0 on error
+ */
+static inline u8
+ieee80211_he_oper_size(const u8 *he_oper_ie)
+{
+ const struct ieee80211_he_operation *he_oper = (const void *)he_oper_ie;
+ u8 oper_len = sizeof(struct ieee80211_he_operation);
+ u32 he_oper_params;
+
+ /* Make sure the input is not NULL */
+ if (!he_oper_ie)
+ return 0;
+
+ /* Calc required length */
+ he_oper_params = le32_to_cpu(he_oper->he_oper_params);
+ if (he_oper_params & IEEE80211_HE_OPERATION_VHT_OPER_INFO)
+ oper_len += 3;
+ if (he_oper_params & IEEE80211_HE_OPERATION_CO_HOSTED_BSS)
+ oper_len++;
+ if (he_oper_params & IEEE80211_HE_OPERATION_6GHZ_OP_INFO)
+ oper_len += sizeof(struct ieee80211_he_6ghz_oper);
+
+ /* Add the first byte (extension ID) to the total length */
+ oper_len++;
+
+ return oper_len;
+}
+
+/**
+ * ieee80211_he_6ghz_oper - obtain 6 GHz operation field
+ * @he_oper: HE operation element (must be pre-validated for size)
+ * but may be %NULL
+ *
+ * Return: a pointer to the 6 GHz operation field, or %NULL
+ */
+static inline const struct ieee80211_he_6ghz_oper *
+ieee80211_he_6ghz_oper(const struct ieee80211_he_operation *he_oper)
+{
+ const u8 *ret;
+ u32 he_oper_params;
+
+ if (!he_oper)
+ return NULL;
+
+ ret = (const void *)&he_oper->optional;
+
+ he_oper_params = le32_to_cpu(he_oper->he_oper_params);
+
+ if (!(he_oper_params & IEEE80211_HE_OPERATION_6GHZ_OP_INFO))
+ return NULL;
+ if (he_oper_params & IEEE80211_HE_OPERATION_VHT_OPER_INFO)
+ ret += 3;
+ if (he_oper_params & IEEE80211_HE_OPERATION_CO_HOSTED_BSS)
+ ret++;
+
+ return (const void *)ret;
+}
+
+/* HE Spatial Reuse defines */
+#define IEEE80211_HE_SPR_PSR_DISALLOWED BIT(0)
+#define IEEE80211_HE_SPR_NON_SRG_OBSS_PD_SR_DISALLOWED BIT(1)
+#define IEEE80211_HE_SPR_NON_SRG_OFFSET_PRESENT BIT(2)
+#define IEEE80211_HE_SPR_SRG_INFORMATION_PRESENT BIT(3)
+#define IEEE80211_HE_SPR_HESIGA_SR_VAL15_ALLOWED BIT(4)
+
+/*
+ * ieee80211_he_spr_size - calculate 802.11ax HE Spatial Reuse IE size
+ * @he_spr_ie: byte data of the He Spatial Reuse IE, stating from the byte
+ * after the ext ID byte. It is assumed that he_spr_ie has at least
+ * sizeof(struct ieee80211_he_spr) bytes, the caller must have validated
+ * this
+ * @return the actual size of the IE data (not including header), or 0 on error
+ */
+static inline u8
+ieee80211_he_spr_size(const u8 *he_spr_ie)
+{
+ const struct ieee80211_he_spr *he_spr = (const void *)he_spr_ie;
+ u8 spr_len = sizeof(struct ieee80211_he_spr);
+ u8 he_spr_params;
+
+ /* Make sure the input is not NULL */
+ if (!he_spr_ie)
+ return 0;
+
+ /* Calc required length */
+ he_spr_params = he_spr->he_sr_control;
+ if (he_spr_params & IEEE80211_HE_SPR_NON_SRG_OFFSET_PRESENT)
+ spr_len++;
+ if (he_spr_params & IEEE80211_HE_SPR_SRG_INFORMATION_PRESENT)
+ spr_len += 18;
+
+ /* Add the first byte (extension ID) to the total length */
+ spr_len++;
+
+ return spr_len;
+}
+
+struct ieee80211_he_6ghz_capa {
+ /* uses IEEE80211_HE_6GHZ_CAP_* below */
+ __le16 capa;
+} __packed;
+
+/* HE 6 GHz band capabilities */
+/* uses enum ieee80211_min_mpdu_spacing values */
+#define IEEE80211_HE_6GHZ_CAP_MIN_MPDU_START 0x0007
+/* uses enum ieee80211_vht_max_ampdu_length_exp values */
+#define IEEE80211_HE_6GHZ_CAP_MAX_AMPDU_LEN_EXP 0x0038
+/* uses IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_* values */
+#define IEEE80211_HE_6GHZ_CAP_MAX_MPDU_LEN 0x00c0
+/* WLAN_HT_CAP_SM_PS_* values */
+#define IEEE80211_HE_6GHZ_CAP_SM_PS 0x0600
+#define IEEE80211_HE_6GHZ_CAP_RD_RESPONDER 0x0800
+#define IEEE80211_HE_6GHZ_CAP_RX_ANTPAT_CONS 0x1000
+#define IEEE80211_HE_6GHZ_CAP_TX_ANTPAT_CONS 0x2000
+
+#endif /* LINUX_IEEE80211_HE_H */
diff --git a/include/linux/ieee80211-ht.h b/include/linux/ieee80211-ht.h
new file mode 100644
index 000000000000..21bbf470540f
--- /dev/null
+++ b/include/linux/ieee80211-ht.h
@@ -0,0 +1,292 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * IEEE 802.11 HT definitions
+ *
+ * Copyright (c) 2001-2002, SSH Communications Security Corp and Jouni Malinen
+ * <jkmaline@cc.hut.fi>
+ * Copyright (c) 2002-2003, Jouni Malinen <jkmaline@cc.hut.fi>
+ * Copyright (c) 2005, Devicescape Software, Inc.
+ * Copyright (c) 2006, Michael Wu <flamingice@sourmilk.net>
+ * Copyright (c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright (c) 2016 - 2017 Intel Deutschland GmbH
+ * Copyright (c) 2018 - 2025 Intel Corporation
+ */
+
+#ifndef LINUX_IEEE80211_HT_H
+#define LINUX_IEEE80211_HT_H
+
+#include <linux/types.h>
+#include <linux/if_ether.h>
+
+/* Maximal size of an A-MSDU that can be transported in a HT BA session */
+#define IEEE80211_MAX_MPDU_LEN_HT_BA 4095
+
+/* Maximal size of an A-MSDU */
+#define IEEE80211_MAX_MPDU_LEN_HT_3839 3839
+#define IEEE80211_MAX_MPDU_LEN_HT_7935 7935
+
+#define IEEE80211_HT_CTL_LEN 4
+
+enum ieee80211_ht_chanwidth_values {
+ IEEE80211_HT_CHANWIDTH_20MHZ = 0,
+ IEEE80211_HT_CHANWIDTH_ANY = 1,
+};
+
+/**
+ * struct ieee80211_bar - Block Ack Request frame format
+ * @frame_control: Frame Control
+ * @duration: Duration
+ * @ra: RA
+ * @ta: TA
+ * @control: BAR Control
+ * @start_seq_num: Starting Sequence Number (see Figure 9-37)
+ *
+ * This structure represents the "BlockAckReq frame format"
+ * as described in IEEE Std 802.11-2020 section 9.3.1.7.
+*/
+struct ieee80211_bar {
+ __le16 frame_control;
+ __le16 duration;
+ __u8 ra[ETH_ALEN];
+ __u8 ta[ETH_ALEN];
+ __le16 control;
+ __le16 start_seq_num;
+} __packed;
+
+/* 802.11 BAR control masks */
+#define IEEE80211_BAR_CTRL_ACK_POLICY_NORMAL 0x0000
+#define IEEE80211_BAR_CTRL_MULTI_TID 0x0002
+#define IEEE80211_BAR_CTRL_CBMTID_COMPRESSED_BA 0x0004
+#define IEEE80211_BAR_CTRL_TID_INFO_MASK 0xf000
+#define IEEE80211_BAR_CTRL_TID_INFO_SHIFT 12
+
+#define IEEE80211_HT_MCS_MASK_LEN 10
+
+/**
+ * struct ieee80211_mcs_info - Supported MCS Set field
+ * @rx_mask: RX mask
+ * @rx_highest: highest supported RX rate. If set represents
+ * the highest supported RX data rate in units of 1 Mbps.
+ * If this field is 0 this value should not be used to
+ * consider the highest RX data rate supported.
+ * @tx_params: TX parameters
+ * @reserved: Reserved bits
+ *
+ * This structure represents the "Supported MCS Set field" as
+ * described in IEEE Std 802.11-2020 section 9.4.2.55.4.
+ */
+struct ieee80211_mcs_info {
+ u8 rx_mask[IEEE80211_HT_MCS_MASK_LEN];
+ __le16 rx_highest;
+ u8 tx_params;
+ u8 reserved[3];
+} __packed;
+
+/* 802.11n HT capability MSC set */
+#define IEEE80211_HT_MCS_RX_HIGHEST_MASK 0x3ff
+#define IEEE80211_HT_MCS_TX_DEFINED 0x01
+#define IEEE80211_HT_MCS_TX_RX_DIFF 0x02
+/* value 0 == 1 stream etc */
+#define IEEE80211_HT_MCS_TX_MAX_STREAMS_MASK 0x0C
+#define IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT 2
+#define IEEE80211_HT_MCS_TX_MAX_STREAMS 4
+#define IEEE80211_HT_MCS_TX_UNEQUAL_MODULATION 0x10
+
+#define IEEE80211_HT_MCS_CHAINS(mcs) ((mcs) == 32 ? 1 : (1 + ((mcs) >> 3)))
+
+/*
+ * 802.11n D5.0 20.3.5 / 20.6 says:
+ * - indices 0 to 7 and 32 are single spatial stream
+ * - 8 to 31 are multiple spatial streams using equal modulation
+ * [8..15 for two streams, 16..23 for three and 24..31 for four]
+ * - remainder are multiple spatial streams using unequal modulation
+ */
+#define IEEE80211_HT_MCS_UNEQUAL_MODULATION_START 33
+#define IEEE80211_HT_MCS_UNEQUAL_MODULATION_START_BYTE \
+ (IEEE80211_HT_MCS_UNEQUAL_MODULATION_START / 8)
+
+/**
+ * struct ieee80211_ht_cap - HT capabilities element
+ * @cap_info: HT Capability Information
+ * @ampdu_params_info: A-MPDU Parameters
+ * @mcs: Supported MCS Set
+ * @extended_ht_cap_info: HT Extended Capabilities
+ * @tx_BF_cap_info: Transmit Beamforming Capabilities
+ * @antenna_selection_info: ASEL Capability
+ *
+ * This structure represents the payload of the "HT Capabilities
+ * element" as described in IEEE Std 802.11-2020 section 9.4.2.55.
+ */
+struct ieee80211_ht_cap {
+ __le16 cap_info;
+ u8 ampdu_params_info;
+
+ /* 16 bytes MCS information */
+ struct ieee80211_mcs_info mcs;
+
+ __le16 extended_ht_cap_info;
+ __le32 tx_BF_cap_info;
+ u8 antenna_selection_info;
+} __packed;
+
+/* 802.11n HT capabilities masks (for cap_info) */
+#define IEEE80211_HT_CAP_LDPC_CODING 0x0001
+#define IEEE80211_HT_CAP_SUP_WIDTH_20_40 0x0002
+#define IEEE80211_HT_CAP_SM_PS 0x000C
+#define IEEE80211_HT_CAP_SM_PS_SHIFT 2
+#define IEEE80211_HT_CAP_GRN_FLD 0x0010
+#define IEEE80211_HT_CAP_SGI_20 0x0020
+#define IEEE80211_HT_CAP_SGI_40 0x0040
+#define IEEE80211_HT_CAP_TX_STBC 0x0080
+#define IEEE80211_HT_CAP_RX_STBC 0x0300
+#define IEEE80211_HT_CAP_RX_STBC_SHIFT 8
+#define IEEE80211_HT_CAP_DELAY_BA 0x0400
+#define IEEE80211_HT_CAP_MAX_AMSDU 0x0800
+#define IEEE80211_HT_CAP_DSSSCCK40 0x1000
+#define IEEE80211_HT_CAP_RESERVED 0x2000
+#define IEEE80211_HT_CAP_40MHZ_INTOLERANT 0x4000
+#define IEEE80211_HT_CAP_LSIG_TXOP_PROT 0x8000
+
+/* 802.11n HT extended capabilities masks (for extended_ht_cap_info) */
+#define IEEE80211_HT_EXT_CAP_PCO 0x0001
+#define IEEE80211_HT_EXT_CAP_PCO_TIME 0x0006
+#define IEEE80211_HT_EXT_CAP_PCO_TIME_SHIFT 1
+#define IEEE80211_HT_EXT_CAP_MCS_FB 0x0300
+#define IEEE80211_HT_EXT_CAP_MCS_FB_SHIFT 8
+#define IEEE80211_HT_EXT_CAP_HTC_SUP 0x0400
+#define IEEE80211_HT_EXT_CAP_RD_RESPONDER 0x0800
+
+/* 802.11n HT capability AMPDU settings (for ampdu_params_info) */
+#define IEEE80211_HT_AMPDU_PARM_FACTOR 0x03
+#define IEEE80211_HT_AMPDU_PARM_DENSITY 0x1C
+#define IEEE80211_HT_AMPDU_PARM_DENSITY_SHIFT 2
+
+/*
+ * Maximum length of AMPDU that the STA can receive in high-throughput (HT).
+ * Length = 2 ^ (13 + max_ampdu_length_exp) - 1 (octets)
+ */
+enum ieee80211_max_ampdu_length_exp {
+ IEEE80211_HT_MAX_AMPDU_8K = 0,
+ IEEE80211_HT_MAX_AMPDU_16K = 1,
+ IEEE80211_HT_MAX_AMPDU_32K = 2,
+ IEEE80211_HT_MAX_AMPDU_64K = 3
+};
+
+#define IEEE80211_HT_MAX_AMPDU_FACTOR 13
+
+/* Minimum MPDU start spacing */
+enum ieee80211_min_mpdu_spacing {
+ IEEE80211_HT_MPDU_DENSITY_NONE = 0, /* No restriction */
+ IEEE80211_HT_MPDU_DENSITY_0_25 = 1, /* 1/4 usec */
+ IEEE80211_HT_MPDU_DENSITY_0_5 = 2, /* 1/2 usec */
+ IEEE80211_HT_MPDU_DENSITY_1 = 3, /* 1 usec */
+ IEEE80211_HT_MPDU_DENSITY_2 = 4, /* 2 usec */
+ IEEE80211_HT_MPDU_DENSITY_4 = 5, /* 4 usec */
+ IEEE80211_HT_MPDU_DENSITY_8 = 6, /* 8 usec */
+ IEEE80211_HT_MPDU_DENSITY_16 = 7 /* 16 usec */
+};
+
+/**
+ * struct ieee80211_ht_operation - HT operation IE
+ * @primary_chan: Primary Channel
+ * @ht_param: HT Operation Information parameters
+ * @operation_mode: HT Operation Information operation mode
+ * @stbc_param: HT Operation Information STBC params
+ * @basic_set: Basic HT-MCS Set
+ *
+ * This structure represents the payload of the "HT Operation
+ * element" as described in IEEE Std 802.11-2020 section 9.4.2.56.
+ */
+struct ieee80211_ht_operation {
+ u8 primary_chan;
+ u8 ht_param;
+ __le16 operation_mode;
+ __le16 stbc_param;
+ u8 basic_set[16];
+} __packed;
+
+/* for ht_param */
+#define IEEE80211_HT_PARAM_CHA_SEC_OFFSET 0x03
+#define IEEE80211_HT_PARAM_CHA_SEC_NONE 0x00
+#define IEEE80211_HT_PARAM_CHA_SEC_ABOVE 0x01
+#define IEEE80211_HT_PARAM_CHA_SEC_BELOW 0x03
+#define IEEE80211_HT_PARAM_CHAN_WIDTH_ANY 0x04
+#define IEEE80211_HT_PARAM_RIFS_MODE 0x08
+
+/* for operation_mode */
+#define IEEE80211_HT_OP_MODE_PROTECTION 0x0003
+#define IEEE80211_HT_OP_MODE_PROTECTION_NONE 0
+#define IEEE80211_HT_OP_MODE_PROTECTION_NONMEMBER 1
+#define IEEE80211_HT_OP_MODE_PROTECTION_20MHZ 2
+#define IEEE80211_HT_OP_MODE_PROTECTION_NONHT_MIXED 3
+#define IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT 0x0004
+#define IEEE80211_HT_OP_MODE_NON_HT_STA_PRSNT 0x0010
+#define IEEE80211_HT_OP_MODE_CCFS2_SHIFT 5
+#define IEEE80211_HT_OP_MODE_CCFS2_MASK 0x1fe0
+
+/* for stbc_param */
+#define IEEE80211_HT_STBC_PARAM_DUAL_BEACON 0x0040
+#define IEEE80211_HT_STBC_PARAM_DUAL_CTS_PROT 0x0080
+#define IEEE80211_HT_STBC_PARAM_STBC_BEACON 0x0100
+#define IEEE80211_HT_STBC_PARAM_LSIG_TXOP_FULLPROT 0x0200
+#define IEEE80211_HT_STBC_PARAM_PCO_ACTIVE 0x0400
+#define IEEE80211_HT_STBC_PARAM_PCO_PHASE 0x0800
+
+
+/* block-ack parameters */
+#define IEEE80211_ADDBA_PARAM_AMSDU_MASK 0x0001
+#define IEEE80211_ADDBA_PARAM_POLICY_MASK 0x0002
+#define IEEE80211_ADDBA_PARAM_TID_MASK 0x003C
+#define IEEE80211_ADDBA_PARAM_BUF_SIZE_MASK 0xFFC0
+#define IEEE80211_DELBA_PARAM_TID_MASK 0xF000
+#define IEEE80211_DELBA_PARAM_INITIATOR_MASK 0x0800
+
+/*
+ * A-MPDU buffer sizes
+ * According to HT size varies from 8 to 64 frames
+ * HE adds the ability to have up to 256 frames.
+ * EHT adds the ability to have up to 1K frames.
+ */
+#define IEEE80211_MIN_AMPDU_BUF 0x8
+#define IEEE80211_MAX_AMPDU_BUF_HT 0x40
+#define IEEE80211_MAX_AMPDU_BUF_HE 0x100
+#define IEEE80211_MAX_AMPDU_BUF_EHT 0x400
+
+
+/* Spatial Multiplexing Power Save Modes (for capability) */
+#define WLAN_HT_CAP_SM_PS_STATIC 0
+#define WLAN_HT_CAP_SM_PS_DYNAMIC 1
+#define WLAN_HT_CAP_SM_PS_INVALID 2
+#define WLAN_HT_CAP_SM_PS_DISABLED 3
+
+/* for SM power control field lower two bits */
+#define WLAN_HT_SMPS_CONTROL_DISABLED 0
+#define WLAN_HT_SMPS_CONTROL_STATIC 1
+#define WLAN_HT_SMPS_CONTROL_DYNAMIC 3
+
+/* HT action codes */
+enum ieee80211_ht_actioncode {
+ WLAN_HT_ACTION_NOTIFY_CHANWIDTH = 0,
+ WLAN_HT_ACTION_SMPS = 1,
+ WLAN_HT_ACTION_PSMP = 2,
+ WLAN_HT_ACTION_PCO_PHASE = 3,
+ WLAN_HT_ACTION_CSI = 4,
+ WLAN_HT_ACTION_NONCOMPRESSED_BF = 5,
+ WLAN_HT_ACTION_COMPRESSED_BF = 6,
+ WLAN_HT_ACTION_ASEL_IDX_FEEDBACK = 7,
+};
+
+/* BACK action code */
+enum ieee80211_back_actioncode {
+ WLAN_ACTION_ADDBA_REQ = 0,
+ WLAN_ACTION_ADDBA_RESP = 1,
+ WLAN_ACTION_DELBA = 2,
+};
+
+/* BACK (block-ack) parties */
+enum ieee80211_back_parties {
+ WLAN_BACK_RECIPIENT = 0,
+ WLAN_BACK_INITIATOR = 1,
+};
+
+#endif /* LINUX_IEEE80211_HT_H */
diff --git a/include/linux/ieee80211-mesh.h b/include/linux/ieee80211-mesh.h
new file mode 100644
index 000000000000..4b829bcb38b6
--- /dev/null
+++ b/include/linux/ieee80211-mesh.h
@@ -0,0 +1,230 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * IEEE 802.11 mesh definitions
+ *
+ * Copyright (c) 2001-2002, SSH Communications Security Corp and Jouni Malinen
+ * <jkmaline@cc.hut.fi>
+ * Copyright (c) 2002-2003, Jouni Malinen <jkmaline@cc.hut.fi>
+ * Copyright (c) 2005, Devicescape Software, Inc.
+ * Copyright (c) 2006, Michael Wu <flamingice@sourmilk.net>
+ * Copyright (c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright (c) 2016 - 2017 Intel Deutschland GmbH
+ * Copyright (c) 2018 - 2025 Intel Corporation
+ */
+
+#ifndef LINUX_IEEE80211_MESH_H
+#define LINUX_IEEE80211_MESH_H
+
+#include <linux/types.h>
+#include <linux/if_ether.h>
+
+#define IEEE80211_MAX_MESH_ID_LEN 32
+
+struct ieee80211s_hdr {
+ u8 flags;
+ u8 ttl;
+ __le32 seqnum;
+ u8 eaddr1[ETH_ALEN];
+ u8 eaddr2[ETH_ALEN];
+} __packed __aligned(2);
+
+/* Mesh flags */
+#define MESH_FLAGS_AE_A4 0x1
+#define MESH_FLAGS_AE_A5_A6 0x2
+#define MESH_FLAGS_AE 0x3
+#define MESH_FLAGS_PS_DEEP 0x4
+
+/**
+ * enum ieee80211_preq_flags - mesh PREQ element flags
+ *
+ * @IEEE80211_PREQ_PROACTIVE_PREP_FLAG: proactive PREP subfield
+ */
+enum ieee80211_preq_flags {
+ IEEE80211_PREQ_PROACTIVE_PREP_FLAG = 1<<2,
+};
+
+/**
+ * enum ieee80211_preq_target_flags - mesh PREQ element per target flags
+ *
+ * @IEEE80211_PREQ_TO_FLAG: target only subfield
+ * @IEEE80211_PREQ_USN_FLAG: unknown target HWMP sequence number subfield
+ */
+enum ieee80211_preq_target_flags {
+ IEEE80211_PREQ_TO_FLAG = 1<<0,
+ IEEE80211_PREQ_USN_FLAG = 1<<2,
+};
+
+/**
+ * struct ieee80211_mesh_chansw_params_ie - mesh channel switch parameters IE
+ * @mesh_ttl: Time To Live
+ * @mesh_flags: Flags
+ * @mesh_reason: Reason Code
+ * @mesh_pre_value: Precedence Value
+ *
+ * This structure represents the payload of the "Mesh Channel Switch
+ * Parameters element" as described in IEEE Std 802.11-2020 section
+ * 9.4.2.102.
+ */
+struct ieee80211_mesh_chansw_params_ie {
+ u8 mesh_ttl;
+ u8 mesh_flags;
+ __le16 mesh_reason;
+ __le16 mesh_pre_value;
+} __packed;
+
+/**
+ * struct ieee80211_meshconf_ie - Mesh Configuration element
+ * @meshconf_psel: Active Path Selection Protocol Identifier
+ * @meshconf_pmetric: Active Path Selection Metric Identifier
+ * @meshconf_congest: Congestion Control Mode Identifier
+ * @meshconf_synch: Synchronization Method Identifier
+ * @meshconf_auth: Authentication Protocol Identifier
+ * @meshconf_form: Mesh Formation Info
+ * @meshconf_cap: Mesh Capability (see &enum mesh_config_capab_flags)
+ *
+ * This structure represents the payload of the "Mesh Configuration
+ * element" as described in IEEE Std 802.11-2020 section 9.4.2.97.
+ */
+struct ieee80211_meshconf_ie {
+ u8 meshconf_psel;
+ u8 meshconf_pmetric;
+ u8 meshconf_congest;
+ u8 meshconf_synch;
+ u8 meshconf_auth;
+ u8 meshconf_form;
+ u8 meshconf_cap;
+} __packed;
+
+/**
+ * enum mesh_config_capab_flags - Mesh Configuration IE capability field flags
+ *
+ * @IEEE80211_MESHCONF_CAPAB_ACCEPT_PLINKS: STA is willing to establish
+ * additional mesh peerings with other mesh STAs
+ * @IEEE80211_MESHCONF_CAPAB_FORWARDING: the STA forwards MSDUs
+ * @IEEE80211_MESHCONF_CAPAB_TBTT_ADJUSTING: TBTT adjustment procedure
+ * is ongoing
+ * @IEEE80211_MESHCONF_CAPAB_POWER_SAVE_LEVEL: STA is in deep sleep mode or has
+ * neighbors in deep sleep mode
+ *
+ * Enumerates the "Mesh Capability" as described in IEEE Std
+ * 802.11-2020 section 9.4.2.97.7.
+ */
+enum mesh_config_capab_flags {
+ IEEE80211_MESHCONF_CAPAB_ACCEPT_PLINKS = 0x01,
+ IEEE80211_MESHCONF_CAPAB_FORWARDING = 0x08,
+ IEEE80211_MESHCONF_CAPAB_TBTT_ADJUSTING = 0x20,
+ IEEE80211_MESHCONF_CAPAB_POWER_SAVE_LEVEL = 0x40,
+};
+
+#define IEEE80211_MESHCONF_FORM_CONNECTED_TO_GATE 0x1
+
+/*
+ * mesh channel switch parameters element's flag indicator
+ *
+ */
+#define WLAN_EID_CHAN_SWITCH_PARAM_TX_RESTRICT BIT(0)
+#define WLAN_EID_CHAN_SWITCH_PARAM_INITIATOR BIT(1)
+#define WLAN_EID_CHAN_SWITCH_PARAM_REASON BIT(2)
+
+/**
+ * struct ieee80211_rann_ie - RANN (root announcement) element
+ * @rann_flags: Flags
+ * @rann_hopcount: Hop Count
+ * @rann_ttl: Element TTL
+ * @rann_addr: Root Mesh STA Address
+ * @rann_seq: HWMP Sequence Number
+ * @rann_interval: Interval
+ * @rann_metric: Metric
+ *
+ * This structure represents the payload of the "RANN element" as
+ * described in IEEE Std 802.11-2020 section 9.4.2.111.
+ */
+struct ieee80211_rann_ie {
+ u8 rann_flags;
+ u8 rann_hopcount;
+ u8 rann_ttl;
+ u8 rann_addr[ETH_ALEN];
+ __le32 rann_seq;
+ __le32 rann_interval;
+ __le32 rann_metric;
+} __packed;
+
+enum ieee80211_rann_flags {
+ RANN_FLAG_IS_GATE = 1 << 0,
+};
+
+/* Mesh action codes */
+enum ieee80211_mesh_actioncode {
+ WLAN_MESH_ACTION_LINK_METRIC_REPORT,
+ WLAN_MESH_ACTION_HWMP_PATH_SELECTION,
+ WLAN_MESH_ACTION_GATE_ANNOUNCEMENT,
+ WLAN_MESH_ACTION_CONGESTION_CONTROL_NOTIFICATION,
+ WLAN_MESH_ACTION_MCCA_SETUP_REQUEST,
+ WLAN_MESH_ACTION_MCCA_SETUP_REPLY,
+ WLAN_MESH_ACTION_MCCA_ADVERTISEMENT_REQUEST,
+ WLAN_MESH_ACTION_MCCA_ADVERTISEMENT,
+ WLAN_MESH_ACTION_MCCA_TEARDOWN,
+ WLAN_MESH_ACTION_TBTT_ADJUSTMENT_REQUEST,
+ WLAN_MESH_ACTION_TBTT_ADJUSTMENT_RESPONSE,
+};
+
+/**
+ * enum ieee80211_mesh_sync_method - mesh synchronization method identifier
+ *
+ * @IEEE80211_SYNC_METHOD_NEIGHBOR_OFFSET: the default synchronization method
+ * @IEEE80211_SYNC_METHOD_VENDOR: a vendor specific synchronization method
+ * that will be specified in a vendor specific information element
+ */
+enum ieee80211_mesh_sync_method {
+ IEEE80211_SYNC_METHOD_NEIGHBOR_OFFSET = 1,
+ IEEE80211_SYNC_METHOD_VENDOR = 255,
+};
+
+/**
+ * enum ieee80211_mesh_path_protocol - mesh path selection protocol identifier
+ *
+ * @IEEE80211_PATH_PROTOCOL_HWMP: the default path selection protocol
+ * @IEEE80211_PATH_PROTOCOL_VENDOR: a vendor specific protocol that will
+ * be specified in a vendor specific information element
+ */
+enum ieee80211_mesh_path_protocol {
+ IEEE80211_PATH_PROTOCOL_HWMP = 1,
+ IEEE80211_PATH_PROTOCOL_VENDOR = 255,
+};
+
+/**
+ * enum ieee80211_mesh_path_metric - mesh path selection metric identifier
+ *
+ * @IEEE80211_PATH_METRIC_AIRTIME: the default path selection metric
+ * @IEEE80211_PATH_METRIC_VENDOR: a vendor specific metric that will be
+ * specified in a vendor specific information element
+ */
+enum ieee80211_mesh_path_metric {
+ IEEE80211_PATH_METRIC_AIRTIME = 1,
+ IEEE80211_PATH_METRIC_VENDOR = 255,
+};
+
+/**
+ * enum ieee80211_root_mode_identifier - root mesh STA mode identifier
+ *
+ * These attribute are used by dot11MeshHWMPRootMode to set root mesh STA mode
+ *
+ * @IEEE80211_ROOTMODE_NO_ROOT: the mesh STA is not a root mesh STA (default)
+ * @IEEE80211_ROOTMODE_ROOT: the mesh STA is a root mesh STA if greater than
+ * this value
+ * @IEEE80211_PROACTIVE_PREQ_NO_PREP: the mesh STA is a root mesh STA supports
+ * the proactive PREQ with proactive PREP subfield set to 0
+ * @IEEE80211_PROACTIVE_PREQ_WITH_PREP: the mesh STA is a root mesh STA
+ * supports the proactive PREQ with proactive PREP subfield set to 1
+ * @IEEE80211_PROACTIVE_RANN: the mesh STA is a root mesh STA supports
+ * the proactive RANN
+ */
+enum ieee80211_root_mode_identifier {
+ IEEE80211_ROOTMODE_NO_ROOT = 0,
+ IEEE80211_ROOTMODE_ROOT = 1,
+ IEEE80211_PROACTIVE_PREQ_NO_PREP = 2,
+ IEEE80211_PROACTIVE_PREQ_WITH_PREP = 3,
+ IEEE80211_PROACTIVE_RANN = 4,
+};
+
+#endif /* LINUX_IEEE80211_MESH_H */
diff --git a/include/linux/ieee80211-nan.h b/include/linux/ieee80211-nan.h
new file mode 100644
index 000000000000..d07959bf8a90
--- /dev/null
+++ b/include/linux/ieee80211-nan.h
@@ -0,0 +1,35 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * WFA NAN definitions
+ *
+ * Copyright (c) 2001-2002, SSH Communications Security Corp and Jouni Malinen
+ * <jkmaline@cc.hut.fi>
+ * Copyright (c) 2002-2003, Jouni Malinen <jkmaline@cc.hut.fi>
+ * Copyright (c) 2005, Devicescape Software, Inc.
+ * Copyright (c) 2006, Michael Wu <flamingice@sourmilk.net>
+ * Copyright (c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright (c) 2016 - 2017 Intel Deutschland GmbH
+ * Copyright (c) 2018 - 2025 Intel Corporation
+ */
+
+#ifndef LINUX_IEEE80211_NAN_H
+#define LINUX_IEEE80211_NAN_H
+
+/* NAN operation mode, as defined in Wi-Fi Aware (TM) specification Table 81 */
+#define NAN_OP_MODE_PHY_MODE_VHT 0x01
+#define NAN_OP_MODE_PHY_MODE_HE 0x10
+#define NAN_OP_MODE_PHY_MODE_MASK 0x11
+#define NAN_OP_MODE_80P80MHZ 0x02
+#define NAN_OP_MODE_160MHZ 0x04
+#define NAN_OP_MODE_PNDL_SUPPRTED 0x08
+
+/* NAN Device capabilities, as defined in Wi-Fi Aware (TM) specification
+ * Table 79
+ */
+#define NAN_DEV_CAPA_DFS_OWNER 0x01
+#define NAN_DEV_CAPA_EXT_KEY_ID_SUPPORTED 0x02
+#define NAN_DEV_CAPA_SIM_NDP_RX_SUPPORTED 0x04
+#define NAN_DEV_CAPA_NDPE_SUPPORTED 0x08
+#define NAN_DEV_CAPA_S3_SUPPORTED 0x10
+
+#endif /* LINUX_IEEE80211_NAN_H */
diff --git a/include/linux/ieee80211-p2p.h b/include/linux/ieee80211-p2p.h
new file mode 100644
index 000000000000..180891c11f08
--- /dev/null
+++ b/include/linux/ieee80211-p2p.h
@@ -0,0 +1,71 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * WFA P2P definitions
+ *
+ * Copyright (c) 2001-2002, SSH Communications Security Corp and Jouni Malinen
+ * <jkmaline@cc.hut.fi>
+ * Copyright (c) 2002-2003, Jouni Malinen <jkmaline@cc.hut.fi>
+ * Copyright (c) 2005, Devicescape Software, Inc.
+ * Copyright (c) 2006, Michael Wu <flamingice@sourmilk.net>
+ * Copyright (c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright (c) 2016 - 2017 Intel Deutschland GmbH
+ * Copyright (c) 2018 - 2025 Intel Corporation
+ */
+
+#ifndef LINUX_IEEE80211_P2P_H
+#define LINUX_IEEE80211_P2P_H
+
+#include <linux/types.h>
+/*
+ * Peer-to-Peer IE attribute related definitions.
+ */
+/*
+ * enum ieee80211_p2p_attr_id - identifies type of peer-to-peer attribute.
+ */
+enum ieee80211_p2p_attr_id {
+ IEEE80211_P2P_ATTR_STATUS = 0,
+ IEEE80211_P2P_ATTR_MINOR_REASON,
+ IEEE80211_P2P_ATTR_CAPABILITY,
+ IEEE80211_P2P_ATTR_DEVICE_ID,
+ IEEE80211_P2P_ATTR_GO_INTENT,
+ IEEE80211_P2P_ATTR_GO_CONFIG_TIMEOUT,
+ IEEE80211_P2P_ATTR_LISTEN_CHANNEL,
+ IEEE80211_P2P_ATTR_GROUP_BSSID,
+ IEEE80211_P2P_ATTR_EXT_LISTEN_TIMING,
+ IEEE80211_P2P_ATTR_INTENDED_IFACE_ADDR,
+ IEEE80211_P2P_ATTR_MANAGABILITY,
+ IEEE80211_P2P_ATTR_CHANNEL_LIST,
+ IEEE80211_P2P_ATTR_ABSENCE_NOTICE,
+ IEEE80211_P2P_ATTR_DEVICE_INFO,
+ IEEE80211_P2P_ATTR_GROUP_INFO,
+ IEEE80211_P2P_ATTR_GROUP_ID,
+ IEEE80211_P2P_ATTR_INTERFACE,
+ IEEE80211_P2P_ATTR_OPER_CHANNEL,
+ IEEE80211_P2P_ATTR_INVITE_FLAGS,
+ /* 19 - 220: Reserved */
+ IEEE80211_P2P_ATTR_VENDOR_SPECIFIC = 221,
+
+ IEEE80211_P2P_ATTR_MAX
+};
+
+/* Notice of Absence attribute - described in P2P spec 4.1.14 */
+/* Typical max value used here */
+#define IEEE80211_P2P_NOA_DESC_MAX 4
+
+struct ieee80211_p2p_noa_desc {
+ u8 count;
+ __le32 duration;
+ __le32 interval;
+ __le32 start_time;
+} __packed;
+
+struct ieee80211_p2p_noa_attr {
+ u8 index;
+ u8 oppps_ctwindow;
+ struct ieee80211_p2p_noa_desc desc[IEEE80211_P2P_NOA_DESC_MAX];
+} __packed;
+
+#define IEEE80211_P2P_OPPPS_ENABLE_BIT BIT(7)
+#define IEEE80211_P2P_OPPPS_CTWINDOW_MASK 0x7F
+
+#endif /* LINUX_IEEE80211_P2P_H */
diff --git a/include/linux/ieee80211-s1g.h b/include/linux/ieee80211-s1g.h
new file mode 100644
index 000000000000..5b9ed2dcc00e
--- /dev/null
+++ b/include/linux/ieee80211-s1g.h
@@ -0,0 +1,575 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * IEEE 802.11 S1G definitions
+ *
+ * Copyright (c) 2001-2002, SSH Communications Security Corp and Jouni Malinen
+ * <jkmaline@cc.hut.fi>
+ * Copyright (c) 2002-2003, Jouni Malinen <jkmaline@cc.hut.fi>
+ * Copyright (c) 2005, Devicescape Software, Inc.
+ * Copyright (c) 2006, Michael Wu <flamingice@sourmilk.net>
+ * Copyright (c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright (c) 2016 - 2017 Intel Deutschland GmbH
+ * Copyright (c) 2018 - 2025 Intel Corporation
+ */
+
+#ifndef LINUX_IEEE80211_S1G_H
+#define LINUX_IEEE80211_S1G_H
+
+#include <linux/types.h>
+#include <linux/if_ether.h>
+
+/* bits unique to S1G beacon frame control */
+#define IEEE80211_S1G_BCN_NEXT_TBTT 0x100
+#define IEEE80211_S1G_BCN_CSSID 0x200
+#define IEEE80211_S1G_BCN_ANO 0x400
+
+/* see 802.11ah-2016 9.9 NDP CMAC frames */
+#define IEEE80211_S1G_1MHZ_NDP_BITS 25
+#define IEEE80211_S1G_1MHZ_NDP_BYTES 4
+#define IEEE80211_S1G_2MHZ_NDP_BITS 37
+#define IEEE80211_S1G_2MHZ_NDP_BYTES 5
+
+/**
+ * ieee80211_is_s1g_beacon - check if IEEE80211_FTYPE_EXT &&
+ * IEEE80211_STYPE_S1G_BEACON
+ * @fc: frame control bytes in little-endian byteorder
+ * Return: whether or not the frame is an S1G beacon
+ */
+static inline bool ieee80211_is_s1g_beacon(__le16 fc)
+{
+ return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE |
+ IEEE80211_FCTL_STYPE)) ==
+ cpu_to_le16(IEEE80211_FTYPE_EXT | IEEE80211_STYPE_S1G_BEACON);
+}
+
+/**
+ * ieee80211_s1g_has_next_tbtt - check if IEEE80211_S1G_BCN_NEXT_TBTT
+ * @fc: frame control bytes in little-endian byteorder
+ * Return: whether or not the frame contains the variable-length
+ * next TBTT field
+ */
+static inline bool ieee80211_s1g_has_next_tbtt(__le16 fc)
+{
+ return ieee80211_is_s1g_beacon(fc) &&
+ (fc & cpu_to_le16(IEEE80211_S1G_BCN_NEXT_TBTT));
+}
+
+/**
+ * ieee80211_s1g_has_ano - check if IEEE80211_S1G_BCN_ANO
+ * @fc: frame control bytes in little-endian byteorder
+ * Return: whether or not the frame contains the variable-length
+ * ANO field
+ */
+static inline bool ieee80211_s1g_has_ano(__le16 fc)
+{
+ return ieee80211_is_s1g_beacon(fc) &&
+ (fc & cpu_to_le16(IEEE80211_S1G_BCN_ANO));
+}
+
+/**
+ * ieee80211_s1g_has_cssid - check if IEEE80211_S1G_BCN_CSSID
+ * @fc: frame control bytes in little-endian byteorder
+ * Return: whether or not the frame contains the variable-length
+ * compressed SSID field
+ */
+static inline bool ieee80211_s1g_has_cssid(__le16 fc)
+{
+ return ieee80211_is_s1g_beacon(fc) &&
+ (fc & cpu_to_le16(IEEE80211_S1G_BCN_CSSID));
+}
+
+/**
+ * enum ieee80211_s1g_chanwidth - S1G channel widths
+ * These are defined in IEEE802.11-2016ah Table 10-20
+ * as BSS Channel Width
+ *
+ * @IEEE80211_S1G_CHANWIDTH_1MHZ: 1MHz operating channel
+ * @IEEE80211_S1G_CHANWIDTH_2MHZ: 2MHz operating channel
+ * @IEEE80211_S1G_CHANWIDTH_4MHZ: 4MHz operating channel
+ * @IEEE80211_S1G_CHANWIDTH_8MHZ: 8MHz operating channel
+ * @IEEE80211_S1G_CHANWIDTH_16MHZ: 16MHz operating channel
+ */
+enum ieee80211_s1g_chanwidth {
+ IEEE80211_S1G_CHANWIDTH_1MHZ = 0,
+ IEEE80211_S1G_CHANWIDTH_2MHZ = 1,
+ IEEE80211_S1G_CHANWIDTH_4MHZ = 3,
+ IEEE80211_S1G_CHANWIDTH_8MHZ = 7,
+ IEEE80211_S1G_CHANWIDTH_16MHZ = 15,
+};
+
+/**
+ * enum ieee80211_s1g_pri_chanwidth - S1G primary channel widths
+ * described in IEEE80211-2024 Table 10-39.
+ *
+ * @IEEE80211_S1G_PRI_CHANWIDTH_2MHZ: 2MHz primary channel
+ * @IEEE80211_S1G_PRI_CHANWIDTH_1MHZ: 1MHz primary channel
+ */
+enum ieee80211_s1g_pri_chanwidth {
+ IEEE80211_S1G_PRI_CHANWIDTH_2MHZ = 0,
+ IEEE80211_S1G_PRI_CHANWIDTH_1MHZ = 1,
+};
+
+/**
+ * struct ieee80211_s1g_bcn_compat_ie - S1G Beacon Compatibility element
+ * @compat_info: Compatibility Information
+ * @beacon_int: Beacon Interval
+ * @tsf_completion: TSF Completion
+ *
+ * This structure represents the payload of the "S1G Beacon
+ * Compatibility element" as described in IEEE Std 802.11-2020 section
+ * 9.4.2.196.
+ */
+struct ieee80211_s1g_bcn_compat_ie {
+ __le16 compat_info;
+ __le16 beacon_int;
+ __le32 tsf_completion;
+} __packed;
+
+/**
+ * struct ieee80211_s1g_oper_ie - S1G Operation element
+ * @ch_width: S1G Operation Information Channel Width
+ * @oper_class: S1G Operation Information Operating Class
+ * @primary_ch: S1G Operation Information Primary Channel Number
+ * @oper_ch: S1G Operation Information Channel Center Frequency
+ * @basic_mcs_nss: Basic S1G-MCS and NSS Set
+ *
+ * This structure represents the payload of the "S1G Operation
+ * element" as described in IEEE Std 802.11-2020 section 9.4.2.212.
+ */
+struct ieee80211_s1g_oper_ie {
+ u8 ch_width;
+ u8 oper_class;
+ u8 primary_ch;
+ u8 oper_ch;
+ __le16 basic_mcs_nss;
+} __packed;
+
+/**
+ * struct ieee80211_aid_response_ie - AID Response element
+ * @aid: AID/Group AID
+ * @switch_count: AID Switch Count
+ * @response_int: AID Response Interval
+ *
+ * This structure represents the payload of the "AID Response element"
+ * as described in IEEE Std 802.11-2020 section 9.4.2.194.
+ */
+struct ieee80211_aid_response_ie {
+ __le16 aid;
+ u8 switch_count;
+ __le16 response_int;
+} __packed;
+
+struct ieee80211_s1g_cap {
+ u8 capab_info[10];
+ u8 supp_mcs_nss[5];
+} __packed;
+
+/**
+ * ieee80211_s1g_optional_len - determine length of optional S1G beacon fields
+ * @fc: frame control bytes in little-endian byteorder
+ * Return: total length in bytes of the optional fixed-length fields
+ *
+ * S1G beacons may contain up to three optional fixed-length fields that
+ * precede the variable-length elements. Whether these fields are present
+ * is indicated by flags in the frame control field.
+ *
+ * From IEEE 802.11-2024 section 9.3.4.3:
+ * - Next TBTT field may be 0 or 3 bytes
+ * - Short SSID field may be 0 or 4 bytes
+ * - Access Network Options (ANO) field may be 0 or 1 byte
+ */
+static inline size_t
+ieee80211_s1g_optional_len(__le16 fc)
+{
+ size_t len = 0;
+
+ if (ieee80211_s1g_has_next_tbtt(fc))
+ len += 3;
+
+ if (ieee80211_s1g_has_cssid(fc))
+ len += 4;
+
+ if (ieee80211_s1g_has_ano(fc))
+ len += 1;
+
+ return len;
+}
+
+/* S1G Capabilities Information field */
+#define IEEE80211_S1G_CAPABILITY_LEN 15
+
+#define S1G_CAP0_S1G_LONG BIT(0)
+#define S1G_CAP0_SGI_1MHZ BIT(1)
+#define S1G_CAP0_SGI_2MHZ BIT(2)
+#define S1G_CAP0_SGI_4MHZ BIT(3)
+#define S1G_CAP0_SGI_8MHZ BIT(4)
+#define S1G_CAP0_SGI_16MHZ BIT(5)
+#define S1G_CAP0_SUPP_CH_WIDTH GENMASK(7, 6)
+
+#define S1G_SUPP_CH_WIDTH_2 0
+#define S1G_SUPP_CH_WIDTH_4 1
+#define S1G_SUPP_CH_WIDTH_8 2
+#define S1G_SUPP_CH_WIDTH_16 3
+#define S1G_SUPP_CH_WIDTH_MAX(cap) ((1 << FIELD_GET(S1G_CAP0_SUPP_CH_WIDTH, \
+ cap[0])) << 1)
+
+#define S1G_CAP1_RX_LDPC BIT(0)
+#define S1G_CAP1_TX_STBC BIT(1)
+#define S1G_CAP1_RX_STBC BIT(2)
+#define S1G_CAP1_SU_BFER BIT(3)
+#define S1G_CAP1_SU_BFEE BIT(4)
+#define S1G_CAP1_BFEE_STS GENMASK(7, 5)
+
+#define S1G_CAP2_SOUNDING_DIMENSIONS GENMASK(2, 0)
+#define S1G_CAP2_MU_BFER BIT(3)
+#define S1G_CAP2_MU_BFEE BIT(4)
+#define S1G_CAP2_PLUS_HTC_VHT BIT(5)
+#define S1G_CAP2_TRAVELING_PILOT GENMASK(7, 6)
+
+#define S1G_CAP3_RD_RESPONDER BIT(0)
+#define S1G_CAP3_HT_DELAYED_BA BIT(1)
+#define S1G_CAP3_MAX_MPDU_LEN BIT(2)
+#define S1G_CAP3_MAX_AMPDU_LEN_EXP GENMASK(4, 3)
+#define S1G_CAP3_MIN_MPDU_START GENMASK(7, 5)
+
+#define S1G_CAP4_UPLINK_SYNC BIT(0)
+#define S1G_CAP4_DYNAMIC_AID BIT(1)
+#define S1G_CAP4_BAT BIT(2)
+#define S1G_CAP4_TIME_ADE BIT(3)
+#define S1G_CAP4_NON_TIM BIT(4)
+#define S1G_CAP4_GROUP_AID BIT(5)
+#define S1G_CAP4_STA_TYPE GENMASK(7, 6)
+
+#define S1G_CAP5_CENT_AUTH_CONTROL BIT(0)
+#define S1G_CAP5_DIST_AUTH_CONTROL BIT(1)
+#define S1G_CAP5_AMSDU BIT(2)
+#define S1G_CAP5_AMPDU BIT(3)
+#define S1G_CAP5_ASYMMETRIC_BA BIT(4)
+#define S1G_CAP5_FLOW_CONTROL BIT(5)
+#define S1G_CAP5_SECTORIZED_BEAM GENMASK(7, 6)
+
+#define S1G_CAP6_OBSS_MITIGATION BIT(0)
+#define S1G_CAP6_FRAGMENT_BA BIT(1)
+#define S1G_CAP6_NDP_PS_POLL BIT(2)
+#define S1G_CAP6_RAW_OPERATION BIT(3)
+#define S1G_CAP6_PAGE_SLICING BIT(4)
+#define S1G_CAP6_TXOP_SHARING_IMP_ACK BIT(5)
+#define S1G_CAP6_VHT_LINK_ADAPT GENMASK(7, 6)
+
+#define S1G_CAP7_TACK_AS_PS_POLL BIT(0)
+#define S1G_CAP7_DUP_1MHZ BIT(1)
+#define S1G_CAP7_MCS_NEGOTIATION BIT(2)
+#define S1G_CAP7_1MHZ_CTL_RESPONSE_PREAMBLE BIT(3)
+#define S1G_CAP7_NDP_BFING_REPORT_POLL BIT(4)
+#define S1G_CAP7_UNSOLICITED_DYN_AID BIT(5)
+#define S1G_CAP7_SECTOR_TRAINING_OPERATION BIT(6)
+#define S1G_CAP7_TEMP_PS_MODE_SWITCH BIT(7)
+
+#define S1G_CAP8_TWT_GROUPING BIT(0)
+#define S1G_CAP8_BDT BIT(1)
+#define S1G_CAP8_COLOR GENMASK(4, 2)
+#define S1G_CAP8_TWT_REQUEST BIT(5)
+#define S1G_CAP8_TWT_RESPOND BIT(6)
+#define S1G_CAP8_PV1_FRAME BIT(7)
+
+#define S1G_CAP9_LINK_ADAPT_PER_CONTROL_RESPONSE BIT(0)
+
+#define S1G_OPER_CH_WIDTH_PRIMARY BIT(0)
+#define S1G_OPER_CH_WIDTH_OPER GENMASK(4, 1)
+#define S1G_OPER_CH_PRIMARY_LOCATION BIT(5)
+
+#define S1G_2M_PRIMARY_LOCATION_LOWER 0
+#define S1G_2M_PRIMARY_LOCATION_UPPER 1
+
+#define LISTEN_INT_USF GENMASK(15, 14)
+#define LISTEN_INT_UI GENMASK(13, 0)
+
+#define IEEE80211_MAX_USF FIELD_MAX(LISTEN_INT_USF)
+#define IEEE80211_MAX_UI FIELD_MAX(LISTEN_INT_UI)
+
+/* S1G encoding types */
+#define IEEE80211_S1G_TIM_ENC_MODE_BLOCK 0
+#define IEEE80211_S1G_TIM_ENC_MODE_SINGLE 1
+#define IEEE80211_S1G_TIM_ENC_MODE_OLB 2
+
+enum ieee80211_s1g_actioncode {
+ WLAN_S1G_AID_SWITCH_REQUEST,
+ WLAN_S1G_AID_SWITCH_RESPONSE,
+ WLAN_S1G_SYNC_CONTROL,
+ WLAN_S1G_STA_INFO_ANNOUNCE,
+ WLAN_S1G_EDCA_PARAM_SET,
+ WLAN_S1G_EL_OPERATION,
+ WLAN_S1G_TWT_SETUP,
+ WLAN_S1G_TWT_TEARDOWN,
+ WLAN_S1G_SECT_GROUP_ID_LIST,
+ WLAN_S1G_SECT_ID_FEEDBACK,
+ WLAN_S1G_TWT_INFORMATION = 11,
+};
+
+/**
+ * ieee80211_is_s1g_short_beacon - check if frame is an S1G short beacon
+ * @fc: frame control bytes in little-endian byteorder
+ * @variable: pointer to the beacon frame elements
+ * @variable_len: length of the frame elements
+ * Return: whether or not the frame is an S1G short beacon. As per
+ * IEEE80211-2024 11.1.3.10.1, The S1G beacon compatibility element shall
+ * always be present as the first element in beacon frames generated at a
+ * TBTT (Target Beacon Transmission Time), so any frame not containing
+ * this element must have been generated at a TSBTT (Target Short Beacon
+ * Transmission Time) that is not a TBTT. Additionally, short beacons are
+ * prohibited from containing the S1G beacon compatibility element as per
+ * IEEE80211-2024 9.3.4.3 Table 9-76, so if we have an S1G beacon with
+ * either no elements or the first element is not the beacon compatibility
+ * element, we have a short beacon.
+ */
+static inline bool ieee80211_is_s1g_short_beacon(__le16 fc, const u8 *variable,
+ size_t variable_len)
+{
+ if (!ieee80211_is_s1g_beacon(fc))
+ return false;
+
+ /*
+ * If the frame does not contain at least 1 element (this is perfectly
+ * valid in a short beacon) and is an S1G beacon, we have a short
+ * beacon.
+ */
+ if (variable_len < 2)
+ return true;
+
+ return variable[0] != WLAN_EID_S1G_BCN_COMPAT;
+}
+
+struct s1g_tim_aid {
+ u16 aid;
+ u8 target_blk; /* Target block index */
+ u8 target_subblk; /* Target subblock index */
+ u8 target_subblk_bit; /* Target subblock bit */
+};
+
+struct s1g_tim_enc_block {
+ u8 enc_mode;
+ bool inverse;
+ const u8 *ptr;
+ u8 len;
+
+ /*
+ * For an OLB encoded block that spans multiple blocks, this
+ * is the offset into the span described by that encoded block.
+ */
+ u8 olb_blk_offset;
+};
+
+/*
+ * Helper routines to quickly extract the length of an encoded block. Validation
+ * is also performed to ensure the length extracted lies within the TIM.
+ */
+
+static inline int ieee80211_s1g_len_bitmap(const u8 *ptr, const u8 *end)
+{
+ u8 blkmap;
+ u8 n_subblks;
+
+ if (ptr >= end)
+ return -EINVAL;
+
+ blkmap = *ptr;
+ n_subblks = hweight8(blkmap);
+
+ if (ptr + 1 + n_subblks > end)
+ return -EINVAL;
+
+ return 1 + n_subblks;
+}
+
+static inline int ieee80211_s1g_len_single(const u8 *ptr, const u8 *end)
+{
+ return (ptr + 1 > end) ? -EINVAL : 1;
+}
+
+static inline int ieee80211_s1g_len_olb(const u8 *ptr, const u8 *end)
+{
+ if (ptr >= end)
+ return -EINVAL;
+
+ return (ptr + 1 + *ptr > end) ? -EINVAL : 1 + *ptr;
+}
+
+/*
+ * Enumerate all encoded blocks until we find the encoded block that describes
+ * our target AID. OLB is a special case as a single encoded block can describe
+ * multiple blocks as a single encoded block.
+ */
+static inline int ieee80211_s1g_find_target_block(struct s1g_tim_enc_block *enc,
+ const struct s1g_tim_aid *aid,
+ const u8 *ptr, const u8 *end)
+{
+ /* need at least block-control octet */
+ while (ptr + 1 <= end) {
+ u8 ctrl = *ptr++;
+ u8 mode = ctrl & 0x03;
+ bool contains, inverse = ctrl & BIT(2);
+ u8 span, blk_off = ctrl >> 3;
+ int len;
+
+ switch (mode) {
+ case IEEE80211_S1G_TIM_ENC_MODE_BLOCK:
+ len = ieee80211_s1g_len_bitmap(ptr, end);
+ contains = blk_off == aid->target_blk;
+ break;
+ case IEEE80211_S1G_TIM_ENC_MODE_SINGLE:
+ len = ieee80211_s1g_len_single(ptr, end);
+ contains = blk_off == aid->target_blk;
+ break;
+ case IEEE80211_S1G_TIM_ENC_MODE_OLB:
+ len = ieee80211_s1g_len_olb(ptr, end);
+ /*
+ * An OLB encoded block can describe more then one
+ * block, meaning an encoded OLB block can span more
+ * then a single block.
+ */
+ if (len > 0) {
+ /* Minus one for the length octet */
+ span = DIV_ROUND_UP(len - 1, 8);
+ /*
+ * Check if our target block lies within the
+ * block span described by this encoded block.
+ */
+ contains = (aid->target_blk >= blk_off) &&
+ (aid->target_blk < blk_off + span);
+ }
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ if (len < 0)
+ return len;
+
+ if (contains) {
+ enc->enc_mode = mode;
+ enc->inverse = inverse;
+ enc->ptr = ptr;
+ enc->len = (u8)len;
+ enc->olb_blk_offset = blk_off;
+ return 0;
+ }
+
+ ptr += len;
+ }
+
+ return -ENOENT;
+}
+
+static inline bool ieee80211_s1g_parse_bitmap(struct s1g_tim_enc_block *enc,
+ struct s1g_tim_aid *aid)
+{
+ const u8 *ptr = enc->ptr;
+ u8 blkmap = *ptr++;
+
+ /*
+ * If our block bitmap does not contain a set bit that corresponds
+ * to our AID, it could mean a variety of things depending on if
+ * the encoding mode is inverted or not.
+ *
+ * 1. If inverted, it means the entire subblock is present and hence
+ * our AID has been set.
+ * 2. If not inverted, it means our subblock is not present and hence
+ * it is all zero meaning our AID is not set.
+ */
+ if (!(blkmap & BIT(aid->target_subblk)))
+ return enc->inverse;
+
+ /*
+ * Increment ptr by the number of set subblocks that appear before our
+ * target subblock. If our target subblock is 0, do nothing as ptr
+ * already points to our target subblock.
+ */
+ if (aid->target_subblk)
+ ptr += hweight8(blkmap & GENMASK(aid->target_subblk - 1, 0));
+
+ return !!(*ptr & BIT(aid->target_subblk_bit)) ^ enc->inverse;
+}
+
+static inline bool ieee80211_s1g_parse_single(struct s1g_tim_enc_block *enc,
+ struct s1g_tim_aid *aid)
+{
+ /*
+ * Single AID mode describes, as the name suggests, a single AID
+ * within the block described by the encoded block. The octet
+ * contains the 6 LSBs of the AID described in the block. The other
+ * 2 bits are reserved. When inversed, every single AID described
+ * by the current block have buffered traffic except for the AID
+ * described in the single AID octet.
+ */
+ return ((*enc->ptr & 0x3f) == (aid->aid & 0x3f)) ^ enc->inverse;
+}
+
+static inline bool ieee80211_s1g_parse_olb(struct s1g_tim_enc_block *enc,
+ struct s1g_tim_aid *aid)
+{
+ const u8 *ptr = enc->ptr;
+ u8 blk_len = *ptr++;
+ /*
+ * Given an OLB encoded block that describes multiple blocks,
+ * calculate the offset into the span. Then calculate the
+ * subblock location normally.
+ */
+ u16 span_offset = aid->target_blk - enc->olb_blk_offset;
+ u16 subblk_idx = span_offset * 8 + aid->target_subblk;
+
+ if (subblk_idx >= blk_len)
+ return enc->inverse;
+
+ return !!(ptr[subblk_idx] & BIT(aid->target_subblk_bit)) ^ enc->inverse;
+}
+
+/*
+ * An S1G PVB has 3 non optional encoding types, each that can be inverted.
+ * An S1G PVB is constructed with zero or more encoded block subfields. Each
+ * encoded block represents a single "block" of AIDs (64), and each encoded
+ * block can contain one of the 3 encoding types alongside a single bit for
+ * whether the bits should be inverted.
+ *
+ * As the standard makes no guarantee about the ordering of encoded blocks,
+ * we must parse every encoded block in the worst case scenario given an
+ * AID that lies within the last block.
+ */
+static inline bool ieee80211_s1g_check_tim(const struct ieee80211_tim_ie *tim,
+ u8 tim_len, u16 aid)
+{
+ int err;
+ struct s1g_tim_aid target_aid;
+ struct s1g_tim_enc_block enc_blk;
+
+ if (tim_len < 3)
+ return false;
+
+ target_aid.aid = aid;
+ target_aid.target_blk = (aid >> 6) & 0x1f;
+ target_aid.target_subblk = (aid >> 3) & 0x7;
+ target_aid.target_subblk_bit = aid & 0x7;
+
+ /*
+ * Find our AIDs target encoded block and fill &enc_blk with the
+ * encoded blocks information. If no entry is found or an error
+ * occurs return false.
+ */
+ err = ieee80211_s1g_find_target_block(&enc_blk, &target_aid,
+ tim->virtual_map,
+ (const u8 *)tim + tim_len + 2);
+ if (err)
+ return false;
+
+ switch (enc_blk.enc_mode) {
+ case IEEE80211_S1G_TIM_ENC_MODE_BLOCK:
+ return ieee80211_s1g_parse_bitmap(&enc_blk, &target_aid);
+ case IEEE80211_S1G_TIM_ENC_MODE_SINGLE:
+ return ieee80211_s1g_parse_single(&enc_blk, &target_aid);
+ case IEEE80211_S1G_TIM_ENC_MODE_OLB:
+ return ieee80211_s1g_parse_olb(&enc_blk, &target_aid);
+ default:
+ return false;
+ }
+}
+
+#endif /* LINUX_IEEE80211_H */
diff --git a/include/linux/ieee80211-vht.h b/include/linux/ieee80211-vht.h
new file mode 100644
index 000000000000..898dfb561fef
--- /dev/null
+++ b/include/linux/ieee80211-vht.h
@@ -0,0 +1,236 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * IEEE 802.11 VHT definitions
+ *
+ * Copyright (c) 2001-2002, SSH Communications Security Corp and Jouni Malinen
+ * <jkmaline@cc.hut.fi>
+ * Copyright (c) 2002-2003, Jouni Malinen <jkmaline@cc.hut.fi>
+ * Copyright (c) 2005, Devicescape Software, Inc.
+ * Copyright (c) 2006, Michael Wu <flamingice@sourmilk.net>
+ * Copyright (c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright (c) 2016 - 2017 Intel Deutschland GmbH
+ * Copyright (c) 2018 - 2025 Intel Corporation
+ */
+
+#ifndef LINUX_IEEE80211_VHT_H
+#define LINUX_IEEE80211_VHT_H
+
+#include <linux/types.h>
+#include <linux/if_ether.h>
+
+#define IEEE80211_MAX_MPDU_LEN_VHT_3895 3895
+#define IEEE80211_MAX_MPDU_LEN_VHT_7991 7991
+#define IEEE80211_MAX_MPDU_LEN_VHT_11454 11454
+
+/**
+ * enum ieee80211_vht_opmode_bits - VHT operating mode field bits
+ * @IEEE80211_OPMODE_NOTIF_CHANWIDTH_MASK: channel width mask
+ * @IEEE80211_OPMODE_NOTIF_CHANWIDTH_20MHZ: 20 MHz channel width
+ * @IEEE80211_OPMODE_NOTIF_CHANWIDTH_40MHZ: 40 MHz channel width
+ * @IEEE80211_OPMODE_NOTIF_CHANWIDTH_80MHZ: 80 MHz channel width
+ * @IEEE80211_OPMODE_NOTIF_CHANWIDTH_160MHZ: 160 MHz or 80+80 MHz channel width
+ * @IEEE80211_OPMODE_NOTIF_BW_160_80P80: 160 / 80+80 MHz indicator flag
+ * @IEEE80211_OPMODE_NOTIF_RX_NSS_MASK: number of spatial streams mask
+ * (the NSS value is the value of this field + 1)
+ * @IEEE80211_OPMODE_NOTIF_RX_NSS_SHIFT: number of spatial streams shift
+ * @IEEE80211_OPMODE_NOTIF_RX_NSS_TYPE_BF: indicates streams in SU-MIMO PPDU
+ * using a beamforming steering matrix
+ */
+enum ieee80211_vht_opmode_bits {
+ IEEE80211_OPMODE_NOTIF_CHANWIDTH_MASK = 0x03,
+ IEEE80211_OPMODE_NOTIF_CHANWIDTH_20MHZ = 0,
+ IEEE80211_OPMODE_NOTIF_CHANWIDTH_40MHZ = 1,
+ IEEE80211_OPMODE_NOTIF_CHANWIDTH_80MHZ = 2,
+ IEEE80211_OPMODE_NOTIF_CHANWIDTH_160MHZ = 3,
+ IEEE80211_OPMODE_NOTIF_BW_160_80P80 = 0x04,
+ IEEE80211_OPMODE_NOTIF_RX_NSS_MASK = 0x70,
+ IEEE80211_OPMODE_NOTIF_RX_NSS_SHIFT = 4,
+ IEEE80211_OPMODE_NOTIF_RX_NSS_TYPE_BF = 0x80,
+};
+
+/*
+ * Maximum length of AMPDU that the STA can receive in VHT.
+ * Length = 2 ^ (13 + max_ampdu_length_exp) - 1 (octets)
+ */
+enum ieee80211_vht_max_ampdu_length_exp {
+ IEEE80211_VHT_MAX_AMPDU_8K = 0,
+ IEEE80211_VHT_MAX_AMPDU_16K = 1,
+ IEEE80211_VHT_MAX_AMPDU_32K = 2,
+ IEEE80211_VHT_MAX_AMPDU_64K = 3,
+ IEEE80211_VHT_MAX_AMPDU_128K = 4,
+ IEEE80211_VHT_MAX_AMPDU_256K = 5,
+ IEEE80211_VHT_MAX_AMPDU_512K = 6,
+ IEEE80211_VHT_MAX_AMPDU_1024K = 7
+};
+
+/**
+ * struct ieee80211_vht_mcs_info - VHT MCS information
+ * @rx_mcs_map: RX MCS map 2 bits for each stream, total 8 streams
+ * @rx_highest: Indicates highest long GI VHT PPDU data rate
+ * STA can receive. Rate expressed in units of 1 Mbps.
+ * If this field is 0 this value should not be used to
+ * consider the highest RX data rate supported.
+ * The top 3 bits of this field indicate the Maximum NSTS,total
+ * (a beamformee capability.)
+ * @tx_mcs_map: TX MCS map 2 bits for each stream, total 8 streams
+ * @tx_highest: Indicates highest long GI VHT PPDU data rate
+ * STA can transmit. Rate expressed in units of 1 Mbps.
+ * If this field is 0 this value should not be used to
+ * consider the highest TX data rate supported.
+ * The top 2 bits of this field are reserved, the
+ * 3rd bit from the top indiciates VHT Extended NSS BW
+ * Capability.
+ */
+struct ieee80211_vht_mcs_info {
+ __le16 rx_mcs_map;
+ __le16 rx_highest;
+ __le16 tx_mcs_map;
+ __le16 tx_highest;
+} __packed;
+
+/* for rx_highest */
+#define IEEE80211_VHT_MAX_NSTS_TOTAL_SHIFT 13
+#define IEEE80211_VHT_MAX_NSTS_TOTAL_MASK (7 << IEEE80211_VHT_MAX_NSTS_TOTAL_SHIFT)
+
+/* for tx_highest */
+#define IEEE80211_VHT_EXT_NSS_BW_CAPABLE (1 << 13)
+
+/**
+ * enum ieee80211_vht_mcs_support - VHT MCS support definitions
+ * @IEEE80211_VHT_MCS_SUPPORT_0_7: MCSes 0-7 are supported for the
+ * number of streams
+ * @IEEE80211_VHT_MCS_SUPPORT_0_8: MCSes 0-8 are supported
+ * @IEEE80211_VHT_MCS_SUPPORT_0_9: MCSes 0-9 are supported
+ * @IEEE80211_VHT_MCS_NOT_SUPPORTED: This number of streams isn't supported
+ *
+ * These definitions are used in each 2-bit subfield of the @rx_mcs_map
+ * and @tx_mcs_map fields of &struct ieee80211_vht_mcs_info, which are
+ * both split into 8 subfields by number of streams. These values indicate
+ * which MCSes are supported for the number of streams the value appears
+ * for.
+ */
+enum ieee80211_vht_mcs_support {
+ IEEE80211_VHT_MCS_SUPPORT_0_7 = 0,
+ IEEE80211_VHT_MCS_SUPPORT_0_8 = 1,
+ IEEE80211_VHT_MCS_SUPPORT_0_9 = 2,
+ IEEE80211_VHT_MCS_NOT_SUPPORTED = 3,
+};
+
+/**
+ * struct ieee80211_vht_cap - VHT capabilities
+ *
+ * This structure is the "VHT capabilities element" as
+ * described in 802.11ac D3.0 8.4.2.160
+ * @vht_cap_info: VHT capability info
+ * @supp_mcs: VHT MCS supported rates
+ */
+struct ieee80211_vht_cap {
+ __le32 vht_cap_info;
+ struct ieee80211_vht_mcs_info supp_mcs;
+} __packed;
+
+/**
+ * enum ieee80211_vht_chanwidth - VHT channel width
+ * @IEEE80211_VHT_CHANWIDTH_USE_HT: use the HT operation IE to
+ * determine the channel width (20 or 40 MHz)
+ * @IEEE80211_VHT_CHANWIDTH_80MHZ: 80 MHz bandwidth
+ * @IEEE80211_VHT_CHANWIDTH_160MHZ: 160 MHz bandwidth
+ * @IEEE80211_VHT_CHANWIDTH_80P80MHZ: 80+80 MHz bandwidth
+ */
+enum ieee80211_vht_chanwidth {
+ IEEE80211_VHT_CHANWIDTH_USE_HT = 0,
+ IEEE80211_VHT_CHANWIDTH_80MHZ = 1,
+ IEEE80211_VHT_CHANWIDTH_160MHZ = 2,
+ IEEE80211_VHT_CHANWIDTH_80P80MHZ = 3,
+};
+
+/**
+ * struct ieee80211_vht_operation - VHT operation IE
+ *
+ * This structure is the "VHT operation element" as
+ * described in 802.11ac D3.0 8.4.2.161
+ * @chan_width: Operating channel width
+ * @center_freq_seg0_idx: center freq segment 0 index
+ * @center_freq_seg1_idx: center freq segment 1 index
+ * @basic_mcs_set: VHT Basic MCS rate set
+ */
+struct ieee80211_vht_operation {
+ u8 chan_width;
+ u8 center_freq_seg0_idx;
+ u8 center_freq_seg1_idx;
+ __le16 basic_mcs_set;
+} __packed;
+
+/* 802.11ac VHT Capabilities */
+#define IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_3895 0x00000000
+#define IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_7991 0x00000001
+#define IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454 0x00000002
+#define IEEE80211_VHT_CAP_MAX_MPDU_MASK 0x00000003
+#define IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ 0x00000004
+#define IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160_80PLUS80MHZ 0x00000008
+#define IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK 0x0000000C
+#define IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_SHIFT 2
+#define IEEE80211_VHT_CAP_RXLDPC 0x00000010
+#define IEEE80211_VHT_CAP_SHORT_GI_80 0x00000020
+#define IEEE80211_VHT_CAP_SHORT_GI_160 0x00000040
+#define IEEE80211_VHT_CAP_TXSTBC 0x00000080
+#define IEEE80211_VHT_CAP_RXSTBC_1 0x00000100
+#define IEEE80211_VHT_CAP_RXSTBC_2 0x00000200
+#define IEEE80211_VHT_CAP_RXSTBC_3 0x00000300
+#define IEEE80211_VHT_CAP_RXSTBC_4 0x00000400
+#define IEEE80211_VHT_CAP_RXSTBC_MASK 0x00000700
+#define IEEE80211_VHT_CAP_RXSTBC_SHIFT 8
+#define IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE 0x00000800
+#define IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE 0x00001000
+#define IEEE80211_VHT_CAP_BEAMFORMEE_STS_SHIFT 13
+#define IEEE80211_VHT_CAP_BEAMFORMEE_STS_MASK \
+ (7 << IEEE80211_VHT_CAP_BEAMFORMEE_STS_SHIFT)
+#define IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_SHIFT 16
+#define IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_MASK \
+ (7 << IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_SHIFT)
+#define IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE 0x00080000
+#define IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE 0x00100000
+#define IEEE80211_VHT_CAP_VHT_TXOP_PS 0x00200000
+#define IEEE80211_VHT_CAP_HTC_VHT 0x00400000
+#define IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_SHIFT 23
+#define IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK \
+ (7 << IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_SHIFT)
+#define IEEE80211_VHT_CAP_VHT_LINK_ADAPTATION_VHT_UNSOL_MFB 0x08000000
+#define IEEE80211_VHT_CAP_VHT_LINK_ADAPTATION_VHT_MRQ_MFB 0x0c000000
+#define IEEE80211_VHT_CAP_RX_ANTENNA_PATTERN 0x10000000
+#define IEEE80211_VHT_CAP_TX_ANTENNA_PATTERN 0x20000000
+#define IEEE80211_VHT_CAP_EXT_NSS_BW_SHIFT 30
+#define IEEE80211_VHT_CAP_EXT_NSS_BW_MASK 0xc0000000
+
+/**
+ * ieee80211_get_vht_max_nss - return max NSS for a given bandwidth/MCS
+ * @cap: VHT capabilities of the peer
+ * @bw: bandwidth to use
+ * @mcs: MCS index to use
+ * @ext_nss_bw_capable: indicates whether or not the local transmitter
+ * (rate scaling algorithm) can deal with the new logic
+ * (dot11VHTExtendedNSSBWCapable)
+ * @max_vht_nss: current maximum NSS as advertised by the STA in
+ * operating mode notification, can be 0 in which case the
+ * capability data will be used to derive this (from MCS support)
+ * Return: The maximum NSS that can be used for the given bandwidth/MCS
+ * combination
+ *
+ * Due to the VHT Extended NSS Bandwidth Support, the maximum NSS can
+ * vary for a given BW/MCS. This function parses the data.
+ *
+ * Note: This function is exported by cfg80211.
+ */
+int ieee80211_get_vht_max_nss(struct ieee80211_vht_cap *cap,
+ enum ieee80211_vht_chanwidth bw,
+ int mcs, bool ext_nss_bw_capable,
+ unsigned int max_vht_nss);
+
+/* VHT action codes */
+enum ieee80211_vht_actioncode {
+ WLAN_VHT_ACTION_COMPRESSED_BF = 0,
+ WLAN_VHT_ACTION_GROUPID_MGMT = 1,
+ WLAN_VHT_ACTION_OPMODE_NOTIF = 2,
+};
+
+#endif /* LINUX_IEEE80211_VHT_H */
diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h
index 55a604ad459f..96439de55f07 100644
--- a/include/linux/ieee80211.h
+++ b/include/linux/ieee80211.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* IEEE 802.11 defines
*
@@ -8,10 +9,7 @@
* Copyright (c) 2006, Michael Wu <flamingice@sourmilk.net>
* Copyright (c) 2013 - 2014 Intel Mobile Communications GmbH
* Copyright (c) 2016 - 2017 Intel Deutschland GmbH
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
+ * Copyright (c) 2018 - 2025 Intel Corporation
*/
#ifndef LINUX_IEEE80211_H
@@ -20,8 +18,9 @@
#include <linux/types.h>
#include <linux/if_ether.h>
#include <linux/etherdevice.h>
+#include <linux/bitfield.h>
#include <asm/byteorder.h>
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
/*
* DS bit usage
@@ -44,6 +43,7 @@
#define IEEE80211_FCTL_VERS 0x0003
#define IEEE80211_FCTL_FTYPE 0x000c
#define IEEE80211_FCTL_STYPE 0x00f0
+#define IEEE80211_FCTL_TYPE (IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)
#define IEEE80211_FCTL_TODS 0x0100
#define IEEE80211_FCTL_FROMDS 0x0200
#define IEEE80211_FCTL_MOREFRAGS 0x0400
@@ -77,6 +77,7 @@
#define IEEE80211_STYPE_ACTION 0x00D0
/* control */
+#define IEEE80211_STYPE_TRIGGER 0x0020
#define IEEE80211_STYPE_CTL_EXT 0x0060
#define IEEE80211_STYPE_BACK_REQ 0x0080
#define IEEE80211_STYPE_BACK 0x0090
@@ -107,6 +108,42 @@
/* extension, added by 802.11ad */
#define IEEE80211_STYPE_DMG_BEACON 0x0000
+#define IEEE80211_STYPE_S1G_BEACON 0x0010
+
+#define IEEE80211_NDP_FTYPE_CTS 0
+#define IEEE80211_NDP_FTYPE_CF_END 0
+#define IEEE80211_NDP_FTYPE_PS_POLL 1
+#define IEEE80211_NDP_FTYPE_ACK 2
+#define IEEE80211_NDP_FTYPE_PS_POLL_ACK 3
+#define IEEE80211_NDP_FTYPE_BA 4
+#define IEEE80211_NDP_FTYPE_BF_REPORT_POLL 5
+#define IEEE80211_NDP_FTYPE_PAGING 6
+#define IEEE80211_NDP_FTYPE_PREQ 7
+
+#define SM64(f, v) ((((u64)v) << f##_S) & f)
+
+/* NDP CMAC frame fields */
+#define IEEE80211_NDP_FTYPE 0x0000000000000007
+#define IEEE80211_NDP_FTYPE_S 0x0000000000000000
+
+/* 1M Probe Request 11ah 9.9.3.1.1 */
+#define IEEE80211_NDP_1M_PREQ_ANO 0x0000000000000008
+#define IEEE80211_NDP_1M_PREQ_ANO_S 3
+#define IEEE80211_NDP_1M_PREQ_CSSID 0x00000000000FFFF0
+#define IEEE80211_NDP_1M_PREQ_CSSID_S 4
+#define IEEE80211_NDP_1M_PREQ_RTYPE 0x0000000000100000
+#define IEEE80211_NDP_1M_PREQ_RTYPE_S 20
+#define IEEE80211_NDP_1M_PREQ_RSV 0x0000000001E00000
+#define IEEE80211_NDP_1M_PREQ_RSV 0x0000000001E00000
+/* 2M Probe Request 11ah 9.9.3.1.2 */
+#define IEEE80211_NDP_2M_PREQ_ANO 0x0000000000000008
+#define IEEE80211_NDP_2M_PREQ_ANO_S 3
+#define IEEE80211_NDP_2M_PREQ_CSSID 0x0000000FFFFFFFF0
+#define IEEE80211_NDP_2M_PREQ_CSSID_S 4
+#define IEEE80211_NDP_2M_PREQ_RTYPE 0x0000001000000000
+#define IEEE80211_NDP_2M_PREQ_RTYPE_S 36
+
+#define IEEE80211_ANO_NETTYPE_WILD 15
/* control extension - for IEEE80211_FTYPE_CTL | IEEE80211_STYPE_CTL_EXT */
#define IEEE80211_CTL_EXT_POLL 0x2000
@@ -123,11 +160,31 @@
#define IEEE80211_MAX_SN IEEE80211_SN_MASK
#define IEEE80211_SN_MODULO (IEEE80211_MAX_SN + 1)
+
+/* PV1 Layout IEEE 802.11-2020 9.8.3.1 */
+#define IEEE80211_PV1_FCTL_VERS 0x0003
+#define IEEE80211_PV1_FCTL_FTYPE 0x001c
+#define IEEE80211_PV1_FCTL_STYPE 0x00e0
+#define IEEE80211_PV1_FCTL_FROMDS 0x0100
+#define IEEE80211_PV1_FCTL_MOREFRAGS 0x0200
+#define IEEE80211_PV1_FCTL_PM 0x0400
+#define IEEE80211_PV1_FCTL_MOREDATA 0x0800
+#define IEEE80211_PV1_FCTL_PROTECTED 0x1000
+#define IEEE80211_PV1_FCTL_END_SP 0x2000
+#define IEEE80211_PV1_FCTL_RELAYED 0x4000
+#define IEEE80211_PV1_FCTL_ACK_POLICY 0x8000
+#define IEEE80211_PV1_FCTL_CTL_EXT 0x0f00
+
static inline bool ieee80211_sn_less(u16 sn1, u16 sn2)
{
return ((sn1 - sn2) & IEEE80211_SN_MASK) > (IEEE80211_SN_MODULO >> 1);
}
+static inline bool ieee80211_sn_less_eq(u16 sn1, u16 sn2)
+{
+ return ((sn2 - sn1) & IEEE80211_SN_MASK) <= (IEEE80211_SN_MODULO >> 1);
+}
+
static inline u16 ieee80211_sn_add(u16 sn1, u16 sn2)
{
return (sn1 + sn2) & IEEE80211_SN_MASK;
@@ -150,8 +207,10 @@ static inline u16 ieee80211_sn_sub(u16 sn1, u16 sn2)
#define IEEE80211_MAX_FRAG_THRESHOLD 2352
#define IEEE80211_MAX_RTS_THRESHOLD 2353
#define IEEE80211_MAX_AID 2007
+#define IEEE80211_MAX_AID_S1G 8191
#define IEEE80211_MAX_TIM_LEN 251
#define IEEE80211_MAX_MESH_PEERINGS 63
+
/* Maximum size for the MA-UNITDATA primitive, 802.11 standard section
6.2.1.1.2.
@@ -165,21 +224,8 @@ static inline u16 ieee80211_sn_sub(u16 sn1, u16 sn2)
/* 30 byte 4 addr hdr, 2 byte QoS, 2304 byte MSDU, 12 byte crypt, 4 byte FCS */
#define IEEE80211_MAX_FRAME_LEN 2352
-/* Maximal size of an A-MSDU that can be transported in a HT BA session */
-#define IEEE80211_MAX_MPDU_LEN_HT_BA 4095
-
-/* Maximal size of an A-MSDU */
-#define IEEE80211_MAX_MPDU_LEN_HT_3839 3839
-#define IEEE80211_MAX_MPDU_LEN_HT_7935 7935
-
-#define IEEE80211_MAX_MPDU_LEN_VHT_3895 3895
-#define IEEE80211_MAX_MPDU_LEN_VHT_7991 7991
-#define IEEE80211_MAX_MPDU_LEN_VHT_11454 11454
-
#define IEEE80211_MAX_SSID_LEN 32
-#define IEEE80211_MAX_MESH_ID_LEN 32
-
#define IEEE80211_FIRST_TSPEC_TSID 8
#define IEEE80211_NUM_TIDS 16
@@ -230,14 +276,32 @@ static inline u16 ieee80211_sn_sub(u16 sn1, u16 sn2)
#define IEEE80211_WMM_IE_STA_QOSINFO_SP_MASK 0x03
#define IEEE80211_WMM_IE_STA_QOSINFO_SP_SHIFT 5
-#define IEEE80211_HT_CTL_LEN 4
+/* trigger type within common_info of trigger frame */
+#define IEEE80211_TRIGGER_TYPE_MASK 0xf
+#define IEEE80211_TRIGGER_TYPE_BASIC 0x0
+#define IEEE80211_TRIGGER_TYPE_BFRP 0x1
+#define IEEE80211_TRIGGER_TYPE_MU_BAR 0x2
+#define IEEE80211_TRIGGER_TYPE_MU_RTS 0x3
+#define IEEE80211_TRIGGER_TYPE_BSRP 0x4
+#define IEEE80211_TRIGGER_TYPE_GCR_MU_BAR 0x5
+#define IEEE80211_TRIGGER_TYPE_BQRP 0x6
+#define IEEE80211_TRIGGER_TYPE_NFRP 0x7
+
+/* UL-bandwidth within common_info of trigger frame */
+#define IEEE80211_TRIGGER_ULBW_MASK 0xc0000
+#define IEEE80211_TRIGGER_ULBW_20MHZ 0x0
+#define IEEE80211_TRIGGER_ULBW_40MHZ 0x1
+#define IEEE80211_TRIGGER_ULBW_80MHZ 0x2
+#define IEEE80211_TRIGGER_ULBW_160_80P80MHZ 0x3
struct ieee80211_hdr {
__le16 frame_control;
__le16 duration_id;
- u8 addr1[ETH_ALEN];
- u8 addr2[ETH_ALEN];
- u8 addr3[ETH_ALEN];
+ struct_group(addrs,
+ u8 addr1[ETH_ALEN];
+ u8 addr2[ETH_ALEN];
+ u8 addr3[ETH_ALEN];
+ );
__le16 seq_ctrl;
u8 addr4[ETH_ALEN];
} __packed __aligned(2);
@@ -261,9 +325,30 @@ struct ieee80211_qos_hdr {
__le16 qos_ctrl;
} __packed __aligned(2);
+struct ieee80211_qos_hdr_4addr {
+ __le16 frame_control;
+ __le16 duration_id;
+ u8 addr1[ETH_ALEN];
+ u8 addr2[ETH_ALEN];
+ u8 addr3[ETH_ALEN];
+ __le16 seq_ctrl;
+ u8 addr4[ETH_ALEN];
+ __le16 qos_ctrl;
+} __packed __aligned(2);
+
+struct ieee80211_trigger {
+ __le16 frame_control;
+ __le16 duration;
+ u8 ra[ETH_ALEN];
+ u8 ta[ETH_ALEN];
+ __le64 common_info;
+ u8 variable[];
+} __packed __aligned(2);
+
/**
* ieee80211_has_tods - check if IEEE80211_FCTL_TODS is set
* @fc: frame control bytes in little-endian byteorder
+ * Return: whether or not the frame has to-DS set
*/
static inline bool ieee80211_has_tods(__le16 fc)
{
@@ -273,6 +358,7 @@ static inline bool ieee80211_has_tods(__le16 fc)
/**
* ieee80211_has_fromds - check if IEEE80211_FCTL_FROMDS is set
* @fc: frame control bytes in little-endian byteorder
+ * Return: whether or not the frame has from-DS set
*/
static inline bool ieee80211_has_fromds(__le16 fc)
{
@@ -282,6 +368,7 @@ static inline bool ieee80211_has_fromds(__le16 fc)
/**
* ieee80211_has_a4 - check if IEEE80211_FCTL_TODS and IEEE80211_FCTL_FROMDS are set
* @fc: frame control bytes in little-endian byteorder
+ * Return: whether or not it's a 4-address frame (from-DS and to-DS set)
*/
static inline bool ieee80211_has_a4(__le16 fc)
{
@@ -292,6 +379,7 @@ static inline bool ieee80211_has_a4(__le16 fc)
/**
* ieee80211_has_morefrags - check if IEEE80211_FCTL_MOREFRAGS is set
* @fc: frame control bytes in little-endian byteorder
+ * Return: whether or not the frame has more fragments (more frags bit set)
*/
static inline bool ieee80211_has_morefrags(__le16 fc)
{
@@ -301,6 +389,7 @@ static inline bool ieee80211_has_morefrags(__le16 fc)
/**
* ieee80211_has_retry - check if IEEE80211_FCTL_RETRY is set
* @fc: frame control bytes in little-endian byteorder
+ * Return: whether or not the retry flag is set
*/
static inline bool ieee80211_has_retry(__le16 fc)
{
@@ -310,6 +399,7 @@ static inline bool ieee80211_has_retry(__le16 fc)
/**
* ieee80211_has_pm - check if IEEE80211_FCTL_PM is set
* @fc: frame control bytes in little-endian byteorder
+ * Return: whether or not the power management flag is set
*/
static inline bool ieee80211_has_pm(__le16 fc)
{
@@ -319,6 +409,7 @@ static inline bool ieee80211_has_pm(__le16 fc)
/**
* ieee80211_has_moredata - check if IEEE80211_FCTL_MOREDATA is set
* @fc: frame control bytes in little-endian byteorder
+ * Return: whether or not the more data flag is set
*/
static inline bool ieee80211_has_moredata(__le16 fc)
{
@@ -328,6 +419,7 @@ static inline bool ieee80211_has_moredata(__le16 fc)
/**
* ieee80211_has_protected - check if IEEE80211_FCTL_PROTECTED is set
* @fc: frame control bytes in little-endian byteorder
+ * Return: whether or not the protected flag is set
*/
static inline bool ieee80211_has_protected(__le16 fc)
{
@@ -337,6 +429,7 @@ static inline bool ieee80211_has_protected(__le16 fc)
/**
* ieee80211_has_order - check if IEEE80211_FCTL_ORDER is set
* @fc: frame control bytes in little-endian byteorder
+ * Return: whether or not the order flag is set
*/
static inline bool ieee80211_has_order(__le16 fc)
{
@@ -346,6 +439,7 @@ static inline bool ieee80211_has_order(__le16 fc)
/**
* ieee80211_is_mgmt - check if type is IEEE80211_FTYPE_MGMT
* @fc: frame control bytes in little-endian byteorder
+ * Return: whether or not the frame type is management
*/
static inline bool ieee80211_is_mgmt(__le16 fc)
{
@@ -356,6 +450,7 @@ static inline bool ieee80211_is_mgmt(__le16 fc)
/**
* ieee80211_is_ctl - check if type is IEEE80211_FTYPE_CTL
* @fc: frame control bytes in little-endian byteorder
+ * Return: whether or not the frame type is control
*/
static inline bool ieee80211_is_ctl(__le16 fc)
{
@@ -366,6 +461,7 @@ static inline bool ieee80211_is_ctl(__le16 fc)
/**
* ieee80211_is_data - check if type is IEEE80211_FTYPE_DATA
* @fc: frame control bytes in little-endian byteorder
+ * Return: whether or not the frame is a data frame
*/
static inline bool ieee80211_is_data(__le16 fc)
{
@@ -374,8 +470,21 @@ static inline bool ieee80211_is_data(__le16 fc)
}
/**
+ * ieee80211_is_ext - check if type is IEEE80211_FTYPE_EXT
+ * @fc: frame control bytes in little-endian byteorder
+ * Return: whether or not the frame type is extended
+ */
+static inline bool ieee80211_is_ext(__le16 fc)
+{
+ return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE)) ==
+ cpu_to_le16(IEEE80211_FTYPE_EXT);
+}
+
+
+/**
* ieee80211_is_data_qos - check if type is IEEE80211_FTYPE_DATA and IEEE80211_STYPE_QOS_DATA is set
* @fc: frame control bytes in little-endian byteorder
+ * Return: whether or not the frame is a QoS data frame
*/
static inline bool ieee80211_is_data_qos(__le16 fc)
{
@@ -390,6 +499,8 @@ static inline bool ieee80211_is_data_qos(__le16 fc)
/**
* ieee80211_is_data_present - check if type is IEEE80211_FTYPE_DATA and has data
* @fc: frame control bytes in little-endian byteorder
+ * Return: whether or not the frame is a QoS data frame that has data
+ * (i.e. is not null data)
*/
static inline bool ieee80211_is_data_present(__le16 fc)
{
@@ -404,6 +515,7 @@ static inline bool ieee80211_is_data_present(__le16 fc)
/**
* ieee80211_is_assoc_req - check if IEEE80211_FTYPE_MGMT && IEEE80211_STYPE_ASSOC_REQ
* @fc: frame control bytes in little-endian byteorder
+ * Return: whether or not the frame is an association request
*/
static inline bool ieee80211_is_assoc_req(__le16 fc)
{
@@ -414,6 +526,7 @@ static inline bool ieee80211_is_assoc_req(__le16 fc)
/**
* ieee80211_is_assoc_resp - check if IEEE80211_FTYPE_MGMT && IEEE80211_STYPE_ASSOC_RESP
* @fc: frame control bytes in little-endian byteorder
+ * Return: whether or not the frame is an association response
*/
static inline bool ieee80211_is_assoc_resp(__le16 fc)
{
@@ -424,6 +537,7 @@ static inline bool ieee80211_is_assoc_resp(__le16 fc)
/**
* ieee80211_is_reassoc_req - check if IEEE80211_FTYPE_MGMT && IEEE80211_STYPE_REASSOC_REQ
* @fc: frame control bytes in little-endian byteorder
+ * Return: whether or not the frame is a reassociation request
*/
static inline bool ieee80211_is_reassoc_req(__le16 fc)
{
@@ -434,6 +548,7 @@ static inline bool ieee80211_is_reassoc_req(__le16 fc)
/**
* ieee80211_is_reassoc_resp - check if IEEE80211_FTYPE_MGMT && IEEE80211_STYPE_REASSOC_RESP
* @fc: frame control bytes in little-endian byteorder
+ * Return: whether or not the frame is a reassociation response
*/
static inline bool ieee80211_is_reassoc_resp(__le16 fc)
{
@@ -444,6 +559,7 @@ static inline bool ieee80211_is_reassoc_resp(__le16 fc)
/**
* ieee80211_is_probe_req - check if IEEE80211_FTYPE_MGMT && IEEE80211_STYPE_PROBE_REQ
* @fc: frame control bytes in little-endian byteorder
+ * Return: whether or not the frame is a probe request
*/
static inline bool ieee80211_is_probe_req(__le16 fc)
{
@@ -454,6 +570,7 @@ static inline bool ieee80211_is_probe_req(__le16 fc)
/**
* ieee80211_is_probe_resp - check if IEEE80211_FTYPE_MGMT && IEEE80211_STYPE_PROBE_RESP
* @fc: frame control bytes in little-endian byteorder
+ * Return: whether or not the frame is a probe response
*/
static inline bool ieee80211_is_probe_resp(__le16 fc)
{
@@ -464,6 +581,7 @@ static inline bool ieee80211_is_probe_resp(__le16 fc)
/**
* ieee80211_is_beacon - check if IEEE80211_FTYPE_MGMT && IEEE80211_STYPE_BEACON
* @fc: frame control bytes in little-endian byteorder
+ * Return: whether or not the frame is a (regular, not S1G) beacon
*/
static inline bool ieee80211_is_beacon(__le16 fc)
{
@@ -474,6 +592,7 @@ static inline bool ieee80211_is_beacon(__le16 fc)
/**
* ieee80211_is_atim - check if IEEE80211_FTYPE_MGMT && IEEE80211_STYPE_ATIM
* @fc: frame control bytes in little-endian byteorder
+ * Return: whether or not the frame is an ATIM frame
*/
static inline bool ieee80211_is_atim(__le16 fc)
{
@@ -484,6 +603,7 @@ static inline bool ieee80211_is_atim(__le16 fc)
/**
* ieee80211_is_disassoc - check if IEEE80211_FTYPE_MGMT && IEEE80211_STYPE_DISASSOC
* @fc: frame control bytes in little-endian byteorder
+ * Return: whether or not the frame is a disassociation frame
*/
static inline bool ieee80211_is_disassoc(__le16 fc)
{
@@ -494,6 +614,7 @@ static inline bool ieee80211_is_disassoc(__le16 fc)
/**
* ieee80211_is_auth - check if IEEE80211_FTYPE_MGMT && IEEE80211_STYPE_AUTH
* @fc: frame control bytes in little-endian byteorder
+ * Return: whether or not the frame is an authentication frame
*/
static inline bool ieee80211_is_auth(__le16 fc)
{
@@ -504,6 +625,7 @@ static inline bool ieee80211_is_auth(__le16 fc)
/**
* ieee80211_is_deauth - check if IEEE80211_FTYPE_MGMT && IEEE80211_STYPE_DEAUTH
* @fc: frame control bytes in little-endian byteorder
+ * Return: whether or not the frame is a deauthentication frame
*/
static inline bool ieee80211_is_deauth(__le16 fc)
{
@@ -514,6 +636,7 @@ static inline bool ieee80211_is_deauth(__le16 fc)
/**
* ieee80211_is_action - check if IEEE80211_FTYPE_MGMT && IEEE80211_STYPE_ACTION
* @fc: frame control bytes in little-endian byteorder
+ * Return: whether or not the frame is an action frame
*/
static inline bool ieee80211_is_action(__le16 fc)
{
@@ -524,6 +647,7 @@ static inline bool ieee80211_is_action(__le16 fc)
/**
* ieee80211_is_back_req - check if IEEE80211_FTYPE_CTL && IEEE80211_STYPE_BACK_REQ
* @fc: frame control bytes in little-endian byteorder
+ * Return: whether or not the frame is a block-ACK request frame
*/
static inline bool ieee80211_is_back_req(__le16 fc)
{
@@ -534,6 +658,7 @@ static inline bool ieee80211_is_back_req(__le16 fc)
/**
* ieee80211_is_back - check if IEEE80211_FTYPE_CTL && IEEE80211_STYPE_BACK
* @fc: frame control bytes in little-endian byteorder
+ * Return: whether or not the frame is a block-ACK frame
*/
static inline bool ieee80211_is_back(__le16 fc)
{
@@ -544,6 +669,7 @@ static inline bool ieee80211_is_back(__le16 fc)
/**
* ieee80211_is_pspoll - check if IEEE80211_FTYPE_CTL && IEEE80211_STYPE_PSPOLL
* @fc: frame control bytes in little-endian byteorder
+ * Return: whether or not the frame is a PS-poll frame
*/
static inline bool ieee80211_is_pspoll(__le16 fc)
{
@@ -554,6 +680,7 @@ static inline bool ieee80211_is_pspoll(__le16 fc)
/**
* ieee80211_is_rts - check if IEEE80211_FTYPE_CTL && IEEE80211_STYPE_RTS
* @fc: frame control bytes in little-endian byteorder
+ * Return: whether or not the frame is an RTS frame
*/
static inline bool ieee80211_is_rts(__le16 fc)
{
@@ -564,6 +691,7 @@ static inline bool ieee80211_is_rts(__le16 fc)
/**
* ieee80211_is_cts - check if IEEE80211_FTYPE_CTL && IEEE80211_STYPE_CTS
* @fc: frame control bytes in little-endian byteorder
+ * Return: whether or not the frame is a CTS frame
*/
static inline bool ieee80211_is_cts(__le16 fc)
{
@@ -574,6 +702,7 @@ static inline bool ieee80211_is_cts(__le16 fc)
/**
* ieee80211_is_ack - check if IEEE80211_FTYPE_CTL && IEEE80211_STYPE_ACK
* @fc: frame control bytes in little-endian byteorder
+ * Return: whether or not the frame is an ACK frame
*/
static inline bool ieee80211_is_ack(__le16 fc)
{
@@ -584,6 +713,7 @@ static inline bool ieee80211_is_ack(__le16 fc)
/**
* ieee80211_is_cfend - check if IEEE80211_FTYPE_CTL && IEEE80211_STYPE_CFEND
* @fc: frame control bytes in little-endian byteorder
+ * Return: whether or not the frame is a CF-end frame
*/
static inline bool ieee80211_is_cfend(__le16 fc)
{
@@ -594,6 +724,7 @@ static inline bool ieee80211_is_cfend(__le16 fc)
/**
* ieee80211_is_cfendack - check if IEEE80211_FTYPE_CTL && IEEE80211_STYPE_CFENDACK
* @fc: frame control bytes in little-endian byteorder
+ * Return: whether or not the frame is a CF-end-ack frame
*/
static inline bool ieee80211_is_cfendack(__le16 fc)
{
@@ -604,6 +735,7 @@ static inline bool ieee80211_is_cfendack(__le16 fc)
/**
* ieee80211_is_nullfunc - check if frame is a regular (non-QoS) nullfunc frame
* @fc: frame control bytes in little-endian byteorder
+ * Return: whether or not the frame is a nullfunc frame
*/
static inline bool ieee80211_is_nullfunc(__le16 fc)
{
@@ -614,6 +746,7 @@ static inline bool ieee80211_is_nullfunc(__le16 fc)
/**
* ieee80211_is_qos_nullfunc - check if frame is a QoS nullfunc frame
* @fc: frame control bytes in little-endian byteorder
+ * Return: whether or not the frame is a QoS nullfunc frame
*/
static inline bool ieee80211_is_qos_nullfunc(__le16 fc)
{
@@ -622,22 +755,31 @@ static inline bool ieee80211_is_qos_nullfunc(__le16 fc)
}
/**
- * ieee80211_is_bufferable_mmpdu - check if frame is bufferable MMPDU
+ * ieee80211_is_trigger - check if frame is trigger frame
* @fc: frame control field in little-endian byteorder
+ * Return: whether or not the frame is a trigger frame
+ */
+static inline bool ieee80211_is_trigger(__le16 fc)
+{
+ return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) ==
+ cpu_to_le16(IEEE80211_FTYPE_CTL | IEEE80211_STYPE_TRIGGER);
+}
+
+/**
+ * ieee80211_is_any_nullfunc - check if frame is regular or QoS nullfunc frame
+ * @fc: frame control bytes in little-endian byteorder
+ * Return: whether or not the frame is a nullfunc or QoS nullfunc frame
*/
-static inline bool ieee80211_is_bufferable_mmpdu(__le16 fc)
+static inline bool ieee80211_is_any_nullfunc(__le16 fc)
{
- /* IEEE 802.11-2012, definition of "bufferable management frame";
- * note that this ignores the IBSS special case. */
- return ieee80211_is_mgmt(fc) &&
- (ieee80211_is_action(fc) ||
- ieee80211_is_disassoc(fc) ||
- ieee80211_is_deauth(fc));
+ return (ieee80211_is_nullfunc(fc) || ieee80211_is_qos_nullfunc(fc));
}
/**
* ieee80211_is_first_frag - check if IEEE80211_SCTL_FRAG is not set
* @seq_ctrl: frame sequence control bytes in little-endian byteorder
+ * Return: whether or not the frame is the first fragment (also true if
+ * it's not fragmented at all)
*/
static inline bool ieee80211_is_first_frag(__le16 seq_ctrl)
{
@@ -647,6 +789,7 @@ static inline bool ieee80211_is_first_frag(__le16 seq_ctrl)
/**
* ieee80211_is_frag - check if a frame is a fragment
* @hdr: 802.11 header of the frame
+ * Return: whether or not the frame is a fragment
*/
static inline bool ieee80211_is_frag(struct ieee80211_hdr *hdr)
{
@@ -654,44 +797,20 @@ static inline bool ieee80211_is_frag(struct ieee80211_hdr *hdr)
hdr->seq_ctrl & cpu_to_le16(IEEE80211_SCTL_FRAG);
}
-struct ieee80211s_hdr {
- u8 flags;
- u8 ttl;
- __le32 seqnum;
- u8 eaddr1[ETH_ALEN];
- u8 eaddr2[ETH_ALEN];
-} __packed __aligned(2);
-
-/* Mesh flags */
-#define MESH_FLAGS_AE_A4 0x1
-#define MESH_FLAGS_AE_A5_A6 0x2
-#define MESH_FLAGS_AE 0x3
-#define MESH_FLAGS_PS_DEEP 0x4
-
-/**
- * enum ieee80211_preq_flags - mesh PREQ element flags
- *
- * @IEEE80211_PREQ_PROACTIVE_PREP_FLAG: proactive PREP subfield
- */
-enum ieee80211_preq_flags {
- IEEE80211_PREQ_PROACTIVE_PREP_FLAG = 1<<2,
-};
-
-/**
- * enum ieee80211_preq_target_flags - mesh PREQ element per target flags
- *
- * @IEEE80211_PREQ_TO_FLAG: target only subfield
- * @IEEE80211_PREQ_USN_FLAG: unknown target HWMP sequence number subfield
- */
-enum ieee80211_preq_target_flags {
- IEEE80211_PREQ_TO_FLAG = 1<<0,
- IEEE80211_PREQ_USN_FLAG = 1<<2,
-};
+static inline u16 ieee80211_get_sn(struct ieee80211_hdr *hdr)
+{
+ return le16_get_bits(hdr->seq_ctrl, IEEE80211_SCTL_SEQ);
+}
/**
- * struct ieee80211_quiet_ie
+ * struct ieee80211_quiet_ie - Quiet element
+ * @count: Quiet Count
+ * @period: Quiet Period
+ * @duration: Quiet Duration
+ * @offset: Quiet Offset
*
- * This structure refers to "Quiet information element"
+ * This structure represents the payload of the "Quiet element" as
+ * described in IEEE Std 802.11-2020 section 9.4.2.22.
*/
struct ieee80211_quiet_ie {
u8 count;
@@ -701,21 +820,32 @@ struct ieee80211_quiet_ie {
} __packed;
/**
- * struct ieee80211_msrment_ie
+ * struct ieee80211_msrment_ie - Measurement element
+ * @token: Measurement Token
+ * @mode: Measurement Report Mode
+ * @type: Measurement Type
+ * @request: Measurement Request or Measurement Report
*
- * This structure refers to "Measurement Request/Report information element"
+ * This structure represents the payload of both the "Measurement
+ * Request element" and the "Measurement Report element" as described
+ * in IEEE Std 802.11-2020 sections 9.4.2.20 and 9.4.2.21.
*/
struct ieee80211_msrment_ie {
u8 token;
u8 mode;
u8 type;
- u8 request[0];
+ u8 request[];
} __packed;
/**
- * struct ieee80211_channel_sw_ie
+ * struct ieee80211_channel_sw_ie - Channel Switch Announcement element
+ * @mode: Channel Switch Mode
+ * @new_ch_num: New Channel Number
+ * @count: Channel Switch Count
*
- * This structure refers to "Channel Switch Announcement information element"
+ * This structure represents the payload of the "Channel Switch
+ * Announcement element" as described in IEEE Std 802.11-2020 section
+ * 9.4.2.18.
*/
struct ieee80211_channel_sw_ie {
u8 mode;
@@ -724,9 +854,14 @@ struct ieee80211_channel_sw_ie {
} __packed;
/**
- * struct ieee80211_ext_chansw_ie
+ * struct ieee80211_ext_chansw_ie - Extended Channel Switch Announcement element
+ * @mode: Channel Switch Mode
+ * @new_operating_class: New Operating Class
+ * @new_ch_num: New Channel Number
+ * @count: Channel Switch Count
*
- * This structure represents the "Extended Channel Switch Announcement element"
+ * This structure represents the "Extended Channel Switch Announcement
+ * element" as described in IEEE Std 802.11-2020 section 9.4.2.52.
*/
struct ieee80211_ext_chansw_ie {
u8 mode;
@@ -746,19 +881,14 @@ struct ieee80211_sec_chan_offs_ie {
} __packed;
/**
- * struct ieee80211_mesh_chansw_params_ie - mesh channel switch parameters IE
- *
- * This structure represents the "Mesh Channel Switch Paramters element"
- */
-struct ieee80211_mesh_chansw_params_ie {
- u8 mesh_ttl;
- u8 mesh_flags;
- __le16 mesh_reason;
- __le16 mesh_pre_value;
-} __packed;
-
-/**
* struct ieee80211_wide_bw_chansw_ie - wide bandwidth channel switch IE
+ * @new_channel_width: New Channel Width
+ * @new_center_freq_seg0: New Channel Center Frequency Segment 0
+ * @new_center_freq_seg1: New Channel Center Frequency Segment 1
+ *
+ * This structure represents the payload of the "Wide Bandwidth
+ * Channel Switch element" as described in IEEE Std 802.11-2020
+ * section 9.4.2.160.
*/
struct ieee80211_wide_bw_chansw_ie {
u8 new_channel_width;
@@ -766,119 +896,86 @@ struct ieee80211_wide_bw_chansw_ie {
} __packed;
/**
- * struct ieee80211_tim
+ * struct ieee80211_tim_ie - Traffic Indication Map information element
+ * @dtim_count: DTIM Count
+ * @dtim_period: DTIM Period
+ * @bitmap_ctrl: Bitmap Control
+ * @required_octet: "Syntatic sugar" to force the struct size to the
+ * minimum valid size when carried in a non-S1G PPDU
+ * @virtual_map: Partial Virtual Bitmap
*
- * This structure refers to "Traffic Indication Map information element"
+ * This structure represents the payload of the "TIM element" as
+ * described in IEEE Std 802.11-2020 section 9.4.2.5. Note that this
+ * definition is only applicable when the element is carried in a
+ * non-S1G PPDU. When the TIM is carried in an S1G PPDU, the Bitmap
+ * Control and Partial Virtual Bitmap may not be present.
*/
struct ieee80211_tim_ie {
u8 dtim_count;
u8 dtim_period;
u8 bitmap_ctrl;
- /* variable size: 1 - 251 bytes */
- u8 virtual_map[1];
-} __packed;
-
-/**
- * struct ieee80211_meshconf_ie
- *
- * This structure refers to "Mesh Configuration information element"
- */
-struct ieee80211_meshconf_ie {
- u8 meshconf_psel;
- u8 meshconf_pmetric;
- u8 meshconf_congest;
- u8 meshconf_synch;
- u8 meshconf_auth;
- u8 meshconf_form;
- u8 meshconf_cap;
+ union {
+ u8 required_octet;
+ DECLARE_FLEX_ARRAY(u8, virtual_map);
+ };
} __packed;
-/**
- * enum mesh_config_capab_flags - Mesh Configuration IE capability field flags
- *
- * @IEEE80211_MESHCONF_CAPAB_ACCEPT_PLINKS: STA is willing to establish
- * additional mesh peerings with other mesh STAs
- * @IEEE80211_MESHCONF_CAPAB_FORWARDING: the STA forwards MSDUs
- * @IEEE80211_MESHCONF_CAPAB_TBTT_ADJUSTING: TBTT adjustment procedure
- * is ongoing
- * @IEEE80211_MESHCONF_CAPAB_POWER_SAVE_LEVEL: STA is in deep sleep mode or has
- * neighbors in deep sleep mode
- */
-enum mesh_config_capab_flags {
- IEEE80211_MESHCONF_CAPAB_ACCEPT_PLINKS = 0x01,
- IEEE80211_MESHCONF_CAPAB_FORWARDING = 0x08,
- IEEE80211_MESHCONF_CAPAB_TBTT_ADJUSTING = 0x20,
- IEEE80211_MESHCONF_CAPAB_POWER_SAVE_LEVEL = 0x40,
-};
+#define WLAN_SA_QUERY_TR_ID_LEN 2
+#define WLAN_MEMBERSHIP_LEN 8
+#define WLAN_USER_POSITION_LEN 16
/**
- * mesh channel switch parameters element's flag indicator
+ * struct ieee80211_tpc_report_ie - TPC Report element
+ * @tx_power: Transmit Power
+ * @link_margin: Link Margin
*
+ * This structure represents the payload of the "TPC Report element" as
+ * described in IEEE Std 802.11-2020 section 9.4.2.16.
*/
-#define WLAN_EID_CHAN_SWITCH_PARAM_TX_RESTRICT BIT(0)
-#define WLAN_EID_CHAN_SWITCH_PARAM_INITIATOR BIT(1)
-#define WLAN_EID_CHAN_SWITCH_PARAM_REASON BIT(2)
-
-/**
- * struct ieee80211_rann_ie
- *
- * This structure refers to "Root Announcement information element"
- */
-struct ieee80211_rann_ie {
- u8 rann_flags;
- u8 rann_hopcount;
- u8 rann_ttl;
- u8 rann_addr[ETH_ALEN];
- __le32 rann_seq;
- __le32 rann_interval;
- __le32 rann_metric;
+struct ieee80211_tpc_report_ie {
+ u8 tx_power;
+ u8 link_margin;
} __packed;
-enum ieee80211_rann_flags {
- RANN_FLAG_IS_GATE = 1 << 0,
-};
-
-enum ieee80211_ht_chanwidth_values {
- IEEE80211_HT_CHANWIDTH_20MHZ = 0,
- IEEE80211_HT_CHANWIDTH_ANY = 1,
-};
+#define IEEE80211_ADDBA_EXT_FRAG_LEVEL_MASK GENMASK(2, 1)
+#define IEEE80211_ADDBA_EXT_FRAG_LEVEL_SHIFT 1
+#define IEEE80211_ADDBA_EXT_NO_FRAG BIT(0)
+#define IEEE80211_ADDBA_EXT_BUF_SIZE_MASK GENMASK(7, 5)
+#define IEEE80211_ADDBA_EXT_BUF_SIZE_SHIFT 10
-/**
- * enum ieee80211_opmode_bits - VHT operating mode field bits
- * @IEEE80211_OPMODE_NOTIF_CHANWIDTH_MASK: channel width mask
- * @IEEE80211_OPMODE_NOTIF_CHANWIDTH_20MHZ: 20 MHz channel width
- * @IEEE80211_OPMODE_NOTIF_CHANWIDTH_40MHZ: 40 MHz channel width
- * @IEEE80211_OPMODE_NOTIF_CHANWIDTH_80MHZ: 80 MHz channel width
- * @IEEE80211_OPMODE_NOTIF_CHANWIDTH_160MHZ: 160 MHz or 80+80 MHz channel width
- * @IEEE80211_OPMODE_NOTIF_RX_NSS_MASK: number of spatial streams mask
- * (the NSS value is the value of this field + 1)
- * @IEEE80211_OPMODE_NOTIF_RX_NSS_SHIFT: number of spatial streams shift
- * @IEEE80211_OPMODE_NOTIF_RX_NSS_TYPE_BF: indicates streams in SU-MIMO PPDU
- * using a beamforming steering matrix
- */
-enum ieee80211_vht_opmode_bits {
- IEEE80211_OPMODE_NOTIF_CHANWIDTH_MASK = 3,
- IEEE80211_OPMODE_NOTIF_CHANWIDTH_20MHZ = 0,
- IEEE80211_OPMODE_NOTIF_CHANWIDTH_40MHZ = 1,
- IEEE80211_OPMODE_NOTIF_CHANWIDTH_80MHZ = 2,
- IEEE80211_OPMODE_NOTIF_CHANWIDTH_160MHZ = 3,
- IEEE80211_OPMODE_NOTIF_RX_NSS_MASK = 0x70,
- IEEE80211_OPMODE_NOTIF_RX_NSS_SHIFT = 4,
- IEEE80211_OPMODE_NOTIF_RX_NSS_TYPE_BF = 0x80,
-};
+struct ieee80211_addba_ext_ie {
+ u8 data;
+} __packed;
-#define WLAN_SA_QUERY_TR_ID_LEN 2
-#define WLAN_MEMBERSHIP_LEN 8
-#define WLAN_USER_POSITION_LEN 16
+struct ieee80211_ext {
+ __le16 frame_control;
+ __le16 duration;
+ union {
+ struct {
+ u8 sa[ETH_ALEN];
+ __le32 timestamp;
+ u8 change_seq;
+ u8 variable[];
+ } __packed s1g_beacon;
+ } u;
+} __packed __aligned(2);
/**
- * struct ieee80211_tpc_report_ie
+ * struct ieee80211_bss_load_elem - BSS Load elemen
*
- * This structure refers to "TPC Report element"
- */
-struct ieee80211_tpc_report_ie {
- u8 tx_power;
- u8 link_margin;
+ * Defined in section 9.4.2.26 in IEEE 802.11-REVme D4.1
+ *
+ * @sta_count: total number of STAs currently associated with the AP.
+ * @channel_util: Percentage of time that the access point sensed the channel
+ * was busy. This value is in range [0, 255], the highest value means
+ * 100% busy.
+ * @avail_admission_capa: remaining amount of medium time used for admission
+ * control.
+ */
+struct ieee80211_bss_load_elem {
+ __le16 sta_count;
+ u8 channel_util;
+ __le16 avail_admission_capa;
} __packed;
struct ieee80211_mgmt {
@@ -894,7 +991,7 @@ struct ieee80211_mgmt {
__le16 auth_transaction;
__le16 status_code;
/* possibly followed by Challenge text */
- u8 variable[0];
+ u8 variable[];
} __packed auth;
struct {
__le16 reason_code;
@@ -903,21 +1000,26 @@ struct ieee80211_mgmt {
__le16 capab_info;
__le16 listen_interval;
/* followed by SSID and Supported rates */
- u8 variable[0];
+ u8 variable[];
} __packed assoc_req;
struct {
__le16 capab_info;
__le16 status_code;
__le16 aid;
/* followed by Supported rates */
- u8 variable[0];
+ u8 variable[];
} __packed assoc_resp, reassoc_resp;
struct {
__le16 capab_info;
+ __le16 status_code;
+ u8 variable[];
+ } __packed s1g_assoc_resp, s1g_reassoc_resp;
+ struct {
+ __le16 capab_info;
__le16 listen_interval;
u8 current_ap[ETH_ALEN];
/* followed by SSID and Supported rates */
- u8 variable[0];
+ u8 variable[];
} __packed reassoc_req;
struct {
__le16 reason_code;
@@ -928,11 +1030,11 @@ struct ieee80211_mgmt {
__le16 capab_info;
/* followed by some of SSID, Supported rates,
* FH Params, DS Params, CF Params, IBSS Params, TIM */
- u8 variable[0];
+ u8 variable[];
} __packed beacon;
struct {
/* only variable items: SSID, Supported rates */
- u8 variable[0];
+ DECLARE_FLEX_ARRAY(u8, variable);
} __packed probe_req;
struct {
__le64 timestamp;
@@ -940,7 +1042,7 @@ struct ieee80211_mgmt {
__le16 capab_info;
/* followed by some of SSID, Supported rates,
* FH Params, DS Params, CF Params, IBSS Params */
- u8 variable[0];
+ u8 variable[];
} __packed probe_resp;
struct {
u8 category;
@@ -949,16 +1051,16 @@ struct ieee80211_mgmt {
u8 action_code;
u8 dialog_token;
u8 status_code;
- u8 variable[0];
+ u8 variable[];
} __packed wme_action;
struct{
u8 action_code;
- u8 variable[0];
+ u8 variable[];
} __packed chan_switch;
struct{
u8 action_code;
struct ieee80211_ext_chansw_ie data;
- u8 variable[0];
+ u8 variable[];
} __packed ext_chan_switch;
struct{
u8 action_code;
@@ -973,6 +1075,8 @@ struct ieee80211_mgmt {
__le16 capab;
__le16 timeout;
__le16 start_seq_num;
+ /* followed by BA Extension */
+ u8 variable[];
} __packed addba_req;
struct{
u8 action_code;
@@ -980,6 +1084,8 @@ struct ieee80211_mgmt {
__le16 status;
__le16 capab;
__le16 timeout;
+ /* followed by BA Extension */
+ u8 variable[];
} __packed addba_resp;
struct{
u8 action_code;
@@ -988,11 +1094,11 @@ struct ieee80211_mgmt {
} __packed delba;
struct {
u8 action_code;
- u8 variable[0];
+ u8 variable[];
} __packed self_prot;
struct{
u8 action_code;
- u8 variable[0];
+ u8 variable[];
} __packed mesh_action;
struct {
u8 action;
@@ -1010,7 +1116,7 @@ struct ieee80211_mgmt {
u8 action_code;
u8 dialog_token;
__le16 capability;
- u8 variable[0];
+ u8 variable[];
} __packed tdls_discover_resp;
struct {
u8 action_code;
@@ -1036,22 +1142,72 @@ struct ieee80211_mgmt {
u8 toa[6];
__le16 tod_error;
__le16 toa_error;
- u8 variable[0];
+ u8 variable[];
} __packed ftm;
+ struct {
+ u8 action_code;
+ u8 variable[];
+ } __packed s1g;
+ struct {
+ u8 action_code;
+ u8 dialog_token;
+ u8 follow_up;
+ u32 tod;
+ u32 toa;
+ u8 max_tod_error;
+ u8 max_toa_error;
+ } __packed wnm_timing_msr;
+ struct {
+ u8 action_code;
+ u8 dialog_token;
+ u8 variable[];
+ } __packed ttlm_req;
+ struct {
+ u8 action_code;
+ u8 dialog_token;
+ __le16 status_code;
+ u8 variable[];
+ } __packed ttlm_res;
+ struct {
+ u8 action_code;
+ } __packed ttlm_tear_down;
+ struct {
+ u8 action_code;
+ u8 dialog_token;
+ u8 variable[];
+ } __packed ml_reconf_req;
+ struct {
+ u8 action_code;
+ u8 dialog_token;
+ u8 count;
+ u8 variable[];
+ } __packed ml_reconf_resp;
+ struct {
+ u8 action_code;
+ u8 variable[];
+ } __packed epcs;
} u;
} __packed action;
+ DECLARE_FLEX_ARRAY(u8, body); /* Generic frame body */
} u;
} __packed __aligned(2);
/* Supported rates membership selectors */
#define BSS_MEMBERSHIP_SELECTOR_HT_PHY 127
#define BSS_MEMBERSHIP_SELECTOR_VHT_PHY 126
+#define BSS_MEMBERSHIP_SELECTOR_GLK 125
+#define BSS_MEMBERSHIP_SELECTOR_EPD 124
+#define BSS_MEMBERSHIP_SELECTOR_SAE_H2E 123
+#define BSS_MEMBERSHIP_SELECTOR_HE_PHY 122
+#define BSS_MEMBERSHIP_SELECTOR_EHT_PHY 121
+
+#define BSS_MEMBERSHIP_SELECTOR_MIN BSS_MEMBERSHIP_SELECTOR_EHT_PHY
/* mgmt header + 1 byte category code */
#define IEEE80211_MIN_ACTION_SIZE offsetof(struct ieee80211_mgmt, u.action.u)
-/* Management MIC information element (IEEE 802.11w) */
+/* Management MIC information element (IEEE 802.11w) for CMAC */
struct ieee80211_mmie {
u8 element_id;
u8 length;
@@ -1069,6 +1225,15 @@ struct ieee80211_mmie_16 {
u8 mic[16];
} __packed;
+/* Management MIC information element (IEEE 802.11w) for all variants */
+struct ieee80211_mmie_var {
+ u8 element_id;
+ u8 length;
+ __le16 key_id;
+ u8 sequence_number[6];
+ u8 mic[]; /* 8 or 16 bytes */
+} __packed;
+
struct ieee80211_vendor_ie {
u8 element_id;
u8 len;
@@ -1145,437 +1310,39 @@ struct ieee80211_tdls_data {
struct {
u8 dialog_token;
__le16 capability;
- u8 variable[0];
+ u8 variable[];
} __packed setup_req;
struct {
__le16 status_code;
u8 dialog_token;
__le16 capability;
- u8 variable[0];
+ u8 variable[];
} __packed setup_resp;
struct {
__le16 status_code;
u8 dialog_token;
- u8 variable[0];
+ u8 variable[];
} __packed setup_cfm;
struct {
__le16 reason_code;
- u8 variable[0];
+ u8 variable[];
} __packed teardown;
struct {
u8 dialog_token;
- u8 variable[0];
+ u8 variable[];
} __packed discover_req;
struct {
u8 target_channel;
u8 oper_class;
- u8 variable[0];
+ u8 variable[];
} __packed chan_switch_req;
struct {
__le16 status_code;
- u8 variable[0];
+ u8 variable[];
} __packed chan_switch_resp;
} u;
} __packed;
-/*
- * Peer-to-Peer IE attribute related definitions.
- */
-/**
- * enum ieee80211_p2p_attr_id - identifies type of peer-to-peer attribute.
- */
-enum ieee80211_p2p_attr_id {
- IEEE80211_P2P_ATTR_STATUS = 0,
- IEEE80211_P2P_ATTR_MINOR_REASON,
- IEEE80211_P2P_ATTR_CAPABILITY,
- IEEE80211_P2P_ATTR_DEVICE_ID,
- IEEE80211_P2P_ATTR_GO_INTENT,
- IEEE80211_P2P_ATTR_GO_CONFIG_TIMEOUT,
- IEEE80211_P2P_ATTR_LISTEN_CHANNEL,
- IEEE80211_P2P_ATTR_GROUP_BSSID,
- IEEE80211_P2P_ATTR_EXT_LISTEN_TIMING,
- IEEE80211_P2P_ATTR_INTENDED_IFACE_ADDR,
- IEEE80211_P2P_ATTR_MANAGABILITY,
- IEEE80211_P2P_ATTR_CHANNEL_LIST,
- IEEE80211_P2P_ATTR_ABSENCE_NOTICE,
- IEEE80211_P2P_ATTR_DEVICE_INFO,
- IEEE80211_P2P_ATTR_GROUP_INFO,
- IEEE80211_P2P_ATTR_GROUP_ID,
- IEEE80211_P2P_ATTR_INTERFACE,
- IEEE80211_P2P_ATTR_OPER_CHANNEL,
- IEEE80211_P2P_ATTR_INVITE_FLAGS,
- /* 19 - 220: Reserved */
- IEEE80211_P2P_ATTR_VENDOR_SPECIFIC = 221,
-
- IEEE80211_P2P_ATTR_MAX
-};
-
-/* Notice of Absence attribute - described in P2P spec 4.1.14 */
-/* Typical max value used here */
-#define IEEE80211_P2P_NOA_DESC_MAX 4
-
-struct ieee80211_p2p_noa_desc {
- u8 count;
- __le32 duration;
- __le32 interval;
- __le32 start_time;
-} __packed;
-
-struct ieee80211_p2p_noa_attr {
- u8 index;
- u8 oppps_ctwindow;
- struct ieee80211_p2p_noa_desc desc[IEEE80211_P2P_NOA_DESC_MAX];
-} __packed;
-
-#define IEEE80211_P2P_OPPPS_ENABLE_BIT BIT(7)
-#define IEEE80211_P2P_OPPPS_CTWINDOW_MASK 0x7F
-
-/**
- * struct ieee80211_bar - HT Block Ack Request
- *
- * This structure refers to "HT BlockAckReq" as
- * described in 802.11n draft section 7.2.1.7.1
- */
-struct ieee80211_bar {
- __le16 frame_control;
- __le16 duration;
- __u8 ra[ETH_ALEN];
- __u8 ta[ETH_ALEN];
- __le16 control;
- __le16 start_seq_num;
-} __packed;
-
-/* 802.11 BAR control masks */
-#define IEEE80211_BAR_CTRL_ACK_POLICY_NORMAL 0x0000
-#define IEEE80211_BAR_CTRL_MULTI_TID 0x0002
-#define IEEE80211_BAR_CTRL_CBMTID_COMPRESSED_BA 0x0004
-#define IEEE80211_BAR_CTRL_TID_INFO_MASK 0xf000
-#define IEEE80211_BAR_CTRL_TID_INFO_SHIFT 12
-
-#define IEEE80211_HT_MCS_MASK_LEN 10
-
-/**
- * struct ieee80211_mcs_info - MCS information
- * @rx_mask: RX mask
- * @rx_highest: highest supported RX rate. If set represents
- * the highest supported RX data rate in units of 1 Mbps.
- * If this field is 0 this value should not be used to
- * consider the highest RX data rate supported.
- * @tx_params: TX parameters
- */
-struct ieee80211_mcs_info {
- u8 rx_mask[IEEE80211_HT_MCS_MASK_LEN];
- __le16 rx_highest;
- u8 tx_params;
- u8 reserved[3];
-} __packed;
-
-/* 802.11n HT capability MSC set */
-#define IEEE80211_HT_MCS_RX_HIGHEST_MASK 0x3ff
-#define IEEE80211_HT_MCS_TX_DEFINED 0x01
-#define IEEE80211_HT_MCS_TX_RX_DIFF 0x02
-/* value 0 == 1 stream etc */
-#define IEEE80211_HT_MCS_TX_MAX_STREAMS_MASK 0x0C
-#define IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT 2
-#define IEEE80211_HT_MCS_TX_MAX_STREAMS 4
-#define IEEE80211_HT_MCS_TX_UNEQUAL_MODULATION 0x10
-
-/*
- * 802.11n D5.0 20.3.5 / 20.6 says:
- * - indices 0 to 7 and 32 are single spatial stream
- * - 8 to 31 are multiple spatial streams using equal modulation
- * [8..15 for two streams, 16..23 for three and 24..31 for four]
- * - remainder are multiple spatial streams using unequal modulation
- */
-#define IEEE80211_HT_MCS_UNEQUAL_MODULATION_START 33
-#define IEEE80211_HT_MCS_UNEQUAL_MODULATION_START_BYTE \
- (IEEE80211_HT_MCS_UNEQUAL_MODULATION_START / 8)
-
-/**
- * struct ieee80211_ht_cap - HT capabilities
- *
- * This structure is the "HT capabilities element" as
- * described in 802.11n D5.0 7.3.2.57
- */
-struct ieee80211_ht_cap {
- __le16 cap_info;
- u8 ampdu_params_info;
-
- /* 16 bytes MCS information */
- struct ieee80211_mcs_info mcs;
-
- __le16 extended_ht_cap_info;
- __le32 tx_BF_cap_info;
- u8 antenna_selection_info;
-} __packed;
-
-/* 802.11n HT capabilities masks (for cap_info) */
-#define IEEE80211_HT_CAP_LDPC_CODING 0x0001
-#define IEEE80211_HT_CAP_SUP_WIDTH_20_40 0x0002
-#define IEEE80211_HT_CAP_SM_PS 0x000C
-#define IEEE80211_HT_CAP_SM_PS_SHIFT 2
-#define IEEE80211_HT_CAP_GRN_FLD 0x0010
-#define IEEE80211_HT_CAP_SGI_20 0x0020
-#define IEEE80211_HT_CAP_SGI_40 0x0040
-#define IEEE80211_HT_CAP_TX_STBC 0x0080
-#define IEEE80211_HT_CAP_RX_STBC 0x0300
-#define IEEE80211_HT_CAP_RX_STBC_SHIFT 8
-#define IEEE80211_HT_CAP_DELAY_BA 0x0400
-#define IEEE80211_HT_CAP_MAX_AMSDU 0x0800
-#define IEEE80211_HT_CAP_DSSSCCK40 0x1000
-#define IEEE80211_HT_CAP_RESERVED 0x2000
-#define IEEE80211_HT_CAP_40MHZ_INTOLERANT 0x4000
-#define IEEE80211_HT_CAP_LSIG_TXOP_PROT 0x8000
-
-/* 802.11n HT extended capabilities masks (for extended_ht_cap_info) */
-#define IEEE80211_HT_EXT_CAP_PCO 0x0001
-#define IEEE80211_HT_EXT_CAP_PCO_TIME 0x0006
-#define IEEE80211_HT_EXT_CAP_PCO_TIME_SHIFT 1
-#define IEEE80211_HT_EXT_CAP_MCS_FB 0x0300
-#define IEEE80211_HT_EXT_CAP_MCS_FB_SHIFT 8
-#define IEEE80211_HT_EXT_CAP_HTC_SUP 0x0400
-#define IEEE80211_HT_EXT_CAP_RD_RESPONDER 0x0800
-
-/* 802.11n HT capability AMPDU settings (for ampdu_params_info) */
-#define IEEE80211_HT_AMPDU_PARM_FACTOR 0x03
-#define IEEE80211_HT_AMPDU_PARM_DENSITY 0x1C
-#define IEEE80211_HT_AMPDU_PARM_DENSITY_SHIFT 2
-
-/*
- * Maximum length of AMPDU that the STA can receive in high-throughput (HT).
- * Length = 2 ^ (13 + max_ampdu_length_exp) - 1 (octets)
- */
-enum ieee80211_max_ampdu_length_exp {
- IEEE80211_HT_MAX_AMPDU_8K = 0,
- IEEE80211_HT_MAX_AMPDU_16K = 1,
- IEEE80211_HT_MAX_AMPDU_32K = 2,
- IEEE80211_HT_MAX_AMPDU_64K = 3
-};
-
-/*
- * Maximum length of AMPDU that the STA can receive in VHT.
- * Length = 2 ^ (13 + max_ampdu_length_exp) - 1 (octets)
- */
-enum ieee80211_vht_max_ampdu_length_exp {
- IEEE80211_VHT_MAX_AMPDU_8K = 0,
- IEEE80211_VHT_MAX_AMPDU_16K = 1,
- IEEE80211_VHT_MAX_AMPDU_32K = 2,
- IEEE80211_VHT_MAX_AMPDU_64K = 3,
- IEEE80211_VHT_MAX_AMPDU_128K = 4,
- IEEE80211_VHT_MAX_AMPDU_256K = 5,
- IEEE80211_VHT_MAX_AMPDU_512K = 6,
- IEEE80211_VHT_MAX_AMPDU_1024K = 7
-};
-
-#define IEEE80211_HT_MAX_AMPDU_FACTOR 13
-
-/* Minimum MPDU start spacing */
-enum ieee80211_min_mpdu_spacing {
- IEEE80211_HT_MPDU_DENSITY_NONE = 0, /* No restriction */
- IEEE80211_HT_MPDU_DENSITY_0_25 = 1, /* 1/4 usec */
- IEEE80211_HT_MPDU_DENSITY_0_5 = 2, /* 1/2 usec */
- IEEE80211_HT_MPDU_DENSITY_1 = 3, /* 1 usec */
- IEEE80211_HT_MPDU_DENSITY_2 = 4, /* 2 usec */
- IEEE80211_HT_MPDU_DENSITY_4 = 5, /* 4 usec */
- IEEE80211_HT_MPDU_DENSITY_8 = 6, /* 8 usec */
- IEEE80211_HT_MPDU_DENSITY_16 = 7 /* 16 usec */
-};
-
-/**
- * struct ieee80211_ht_operation - HT operation IE
- *
- * This structure is the "HT operation element" as
- * described in 802.11n-2009 7.3.2.57
- */
-struct ieee80211_ht_operation {
- u8 primary_chan;
- u8 ht_param;
- __le16 operation_mode;
- __le16 stbc_param;
- u8 basic_set[16];
-} __packed;
-
-/* for ht_param */
-#define IEEE80211_HT_PARAM_CHA_SEC_OFFSET 0x03
-#define IEEE80211_HT_PARAM_CHA_SEC_NONE 0x00
-#define IEEE80211_HT_PARAM_CHA_SEC_ABOVE 0x01
-#define IEEE80211_HT_PARAM_CHA_SEC_BELOW 0x03
-#define IEEE80211_HT_PARAM_CHAN_WIDTH_ANY 0x04
-#define IEEE80211_HT_PARAM_RIFS_MODE 0x08
-
-/* for operation_mode */
-#define IEEE80211_HT_OP_MODE_PROTECTION 0x0003
-#define IEEE80211_HT_OP_MODE_PROTECTION_NONE 0
-#define IEEE80211_HT_OP_MODE_PROTECTION_NONMEMBER 1
-#define IEEE80211_HT_OP_MODE_PROTECTION_20MHZ 2
-#define IEEE80211_HT_OP_MODE_PROTECTION_NONHT_MIXED 3
-#define IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT 0x0004
-#define IEEE80211_HT_OP_MODE_NON_HT_STA_PRSNT 0x0010
-#define IEEE80211_HT_OP_MODE_CCFS2_SHIFT 5
-#define IEEE80211_HT_OP_MODE_CCFS2_MASK 0x1fe0
-
-/* for stbc_param */
-#define IEEE80211_HT_STBC_PARAM_DUAL_BEACON 0x0040
-#define IEEE80211_HT_STBC_PARAM_DUAL_CTS_PROT 0x0080
-#define IEEE80211_HT_STBC_PARAM_STBC_BEACON 0x0100
-#define IEEE80211_HT_STBC_PARAM_LSIG_TXOP_FULLPROT 0x0200
-#define IEEE80211_HT_STBC_PARAM_PCO_ACTIVE 0x0400
-#define IEEE80211_HT_STBC_PARAM_PCO_PHASE 0x0800
-
-
-/* block-ack parameters */
-#define IEEE80211_ADDBA_PARAM_AMSDU_MASK 0x0001
-#define IEEE80211_ADDBA_PARAM_POLICY_MASK 0x0002
-#define IEEE80211_ADDBA_PARAM_TID_MASK 0x003C
-#define IEEE80211_ADDBA_PARAM_BUF_SIZE_MASK 0xFFC0
-#define IEEE80211_DELBA_PARAM_TID_MASK 0xF000
-#define IEEE80211_DELBA_PARAM_INITIATOR_MASK 0x0800
-
-/*
- * A-PMDU buffer sizes
- * According to IEEE802.11n spec size varies from 8K to 64K (in powers of 2)
- */
-#define IEEE80211_MIN_AMPDU_BUF 0x8
-#define IEEE80211_MAX_AMPDU_BUF 0x40
-
-
-/* Spatial Multiplexing Power Save Modes (for capability) */
-#define WLAN_HT_CAP_SM_PS_STATIC 0
-#define WLAN_HT_CAP_SM_PS_DYNAMIC 1
-#define WLAN_HT_CAP_SM_PS_INVALID 2
-#define WLAN_HT_CAP_SM_PS_DISABLED 3
-
-/* for SM power control field lower two bits */
-#define WLAN_HT_SMPS_CONTROL_DISABLED 0
-#define WLAN_HT_SMPS_CONTROL_STATIC 1
-#define WLAN_HT_SMPS_CONTROL_DYNAMIC 3
-
-/**
- * struct ieee80211_vht_mcs_info - VHT MCS information
- * @rx_mcs_map: RX MCS map 2 bits for each stream, total 8 streams
- * @rx_highest: Indicates highest long GI VHT PPDU data rate
- * STA can receive. Rate expressed in units of 1 Mbps.
- * If this field is 0 this value should not be used to
- * consider the highest RX data rate supported.
- * The top 3 bits of this field are reserved.
- * @tx_mcs_map: TX MCS map 2 bits for each stream, total 8 streams
- * @tx_highest: Indicates highest long GI VHT PPDU data rate
- * STA can transmit. Rate expressed in units of 1 Mbps.
- * If this field is 0 this value should not be used to
- * consider the highest TX data rate supported.
- * The top 3 bits of this field are reserved.
- */
-struct ieee80211_vht_mcs_info {
- __le16 rx_mcs_map;
- __le16 rx_highest;
- __le16 tx_mcs_map;
- __le16 tx_highest;
-} __packed;
-
-/**
- * enum ieee80211_vht_mcs_support - VHT MCS support definitions
- * @IEEE80211_VHT_MCS_SUPPORT_0_7: MCSes 0-7 are supported for the
- * number of streams
- * @IEEE80211_VHT_MCS_SUPPORT_0_8: MCSes 0-8 are supported
- * @IEEE80211_VHT_MCS_SUPPORT_0_9: MCSes 0-9 are supported
- * @IEEE80211_VHT_MCS_NOT_SUPPORTED: This number of streams isn't supported
- *
- * These definitions are used in each 2-bit subfield of the @rx_mcs_map
- * and @tx_mcs_map fields of &struct ieee80211_vht_mcs_info, which are
- * both split into 8 subfields by number of streams. These values indicate
- * which MCSes are supported for the number of streams the value appears
- * for.
- */
-enum ieee80211_vht_mcs_support {
- IEEE80211_VHT_MCS_SUPPORT_0_7 = 0,
- IEEE80211_VHT_MCS_SUPPORT_0_8 = 1,
- IEEE80211_VHT_MCS_SUPPORT_0_9 = 2,
- IEEE80211_VHT_MCS_NOT_SUPPORTED = 3,
-};
-
-/**
- * struct ieee80211_vht_cap - VHT capabilities
- *
- * This structure is the "VHT capabilities element" as
- * described in 802.11ac D3.0 8.4.2.160
- * @vht_cap_info: VHT capability info
- * @supp_mcs: VHT MCS supported rates
- */
-struct ieee80211_vht_cap {
- __le32 vht_cap_info;
- struct ieee80211_vht_mcs_info supp_mcs;
-} __packed;
-
-/**
- * enum ieee80211_vht_chanwidth - VHT channel width
- * @IEEE80211_VHT_CHANWIDTH_USE_HT: use the HT operation IE to
- * determine the channel width (20 or 40 MHz)
- * @IEEE80211_VHT_CHANWIDTH_80MHZ: 80 MHz bandwidth
- * @IEEE80211_VHT_CHANWIDTH_160MHZ: 160 MHz bandwidth
- * @IEEE80211_VHT_CHANWIDTH_80P80MHZ: 80+80 MHz bandwidth
- */
-enum ieee80211_vht_chanwidth {
- IEEE80211_VHT_CHANWIDTH_USE_HT = 0,
- IEEE80211_VHT_CHANWIDTH_80MHZ = 1,
- IEEE80211_VHT_CHANWIDTH_160MHZ = 2,
- IEEE80211_VHT_CHANWIDTH_80P80MHZ = 3,
-};
-
-/**
- * struct ieee80211_vht_operation - VHT operation IE
- *
- * This structure is the "VHT operation element" as
- * described in 802.11ac D3.0 8.4.2.161
- * @chan_width: Operating channel width
- * @center_freq_seg0_idx: center freq segment 0 index
- * @center_freq_seg1_idx: center freq segment 1 index
- * @basic_mcs_set: VHT Basic MCS rate set
- */
-struct ieee80211_vht_operation {
- u8 chan_width;
- u8 center_freq_seg0_idx;
- u8 center_freq_seg1_idx;
- __le16 basic_mcs_set;
-} __packed;
-
-
-/* 802.11ac VHT Capabilities */
-#define IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_3895 0x00000000
-#define IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_7991 0x00000001
-#define IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454 0x00000002
-#define IEEE80211_VHT_CAP_MAX_MPDU_MASK 0x00000003
-#define IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ 0x00000004
-#define IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160_80PLUS80MHZ 0x00000008
-#define IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK 0x0000000C
-#define IEEE80211_VHT_CAP_RXLDPC 0x00000010
-#define IEEE80211_VHT_CAP_SHORT_GI_80 0x00000020
-#define IEEE80211_VHT_CAP_SHORT_GI_160 0x00000040
-#define IEEE80211_VHT_CAP_TXSTBC 0x00000080
-#define IEEE80211_VHT_CAP_RXSTBC_1 0x00000100
-#define IEEE80211_VHT_CAP_RXSTBC_2 0x00000200
-#define IEEE80211_VHT_CAP_RXSTBC_3 0x00000300
-#define IEEE80211_VHT_CAP_RXSTBC_4 0x00000400
-#define IEEE80211_VHT_CAP_RXSTBC_MASK 0x00000700
-#define IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE 0x00000800
-#define IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE 0x00001000
-#define IEEE80211_VHT_CAP_BEAMFORMEE_STS_SHIFT 13
-#define IEEE80211_VHT_CAP_BEAMFORMEE_STS_MASK \
- (7 << IEEE80211_VHT_CAP_BEAMFORMEE_STS_SHIFT)
-#define IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_SHIFT 16
-#define IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_MASK \
- (7 << IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_SHIFT)
-#define IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE 0x00080000
-#define IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE 0x00100000
-#define IEEE80211_VHT_CAP_VHT_TXOP_PS 0x00200000
-#define IEEE80211_VHT_CAP_HTC_VHT 0x00400000
-#define IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_SHIFT 23
-#define IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK \
- (7 << IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_SHIFT)
-#define IEEE80211_VHT_CAP_VHT_LINK_ADAPTATION_VHT_UNSOL_MFB 0x08000000
-#define IEEE80211_VHT_CAP_VHT_LINK_ADAPTATION_VHT_MRQ_MFB 0x0c000000
-#define IEEE80211_VHT_CAP_RX_ANTENNA_PATTERN 0x10000000
-#define IEEE80211_VHT_CAP_TX_ANTENNA_PATTERN 0x20000000
-
/* Authentication algorithms */
#define WLAN_AUTH_OPEN 0
#define WLAN_AUTH_SHARED_KEY 1
@@ -1639,6 +1406,8 @@ struct ieee80211_vht_operation {
#define IEEE80211_SPCT_MSR_RPRT_TYPE_BASIC 0
#define IEEE80211_SPCT_MSR_RPRT_TYPE_CCA 1
#define IEEE80211_SPCT_MSR_RPRT_TYPE_RPI 2
+#define IEEE80211_SPCT_MSR_RPRT_TYPE_LCI 8
+#define IEEE80211_SPCT_MSR_RPRT_TYPE_CIVIC 11
/* 802.11g ERP information element */
#define WLAN_ERP_NON_ERP_PRESENT (1<<0)
@@ -1724,8 +1493,12 @@ enum ieee80211_statuscode {
WLAN_STATUS_DENIED_WITH_SUGGESTED_BAND_AND_CHANNEL = 99,
WLAN_STATUS_DENIED_DUE_TO_SPECTRUM_MANAGEMENT = 103,
/* 802.11ai */
- WLAN_STATUS_FILS_AUTHENTICATION_FAILURE = 108,
- WLAN_STATUS_UNKNOWN_AUTHENTICATION_SERVER = 109,
+ WLAN_STATUS_FILS_AUTHENTICATION_FAILURE = 112,
+ WLAN_STATUS_UNKNOWN_AUTHENTICATION_SERVER = 113,
+ WLAN_STATUS_SAE_HASH_TO_ELEMENT = 126,
+ WLAN_STATUS_SAE_PK = 127,
+ WLAN_STATUS_DENIED_TID_TO_LINK_MAPPING = 133,
+ WLAN_STATUS_PREF_TID_TO_LINK_MAPPING_SUGGESTED = 134,
};
@@ -1963,19 +1736,29 @@ enum ieee80211_eid {
WLAN_EID_VHT_OPERATION = 192,
WLAN_EID_EXTENDED_BSS_LOAD = 193,
WLAN_EID_WIDE_BW_CHANNEL_SWITCH = 194,
- WLAN_EID_VHT_TX_POWER_ENVELOPE = 195,
+ WLAN_EID_TX_POWER_ENVELOPE = 195,
WLAN_EID_CHANNEL_SWITCH_WRAPPER = 196,
WLAN_EID_AID = 197,
WLAN_EID_QUIET_CHANNEL = 198,
WLAN_EID_OPMODE_NOTIF = 199,
+ WLAN_EID_REDUCED_NEIGHBOR_REPORT = 201,
+
+ WLAN_EID_AID_REQUEST = 210,
+ WLAN_EID_AID_RESPONSE = 211,
+ WLAN_EID_S1G_BCN_COMPAT = 213,
+ WLAN_EID_S1G_SHORT_BCN_INTERVAL = 214,
+ WLAN_EID_S1G_TWT = 216,
+ WLAN_EID_S1G_CAPABILITIES = 217,
WLAN_EID_VENDOR_SPECIFIC = 221,
WLAN_EID_QOS_PARAMETER = 222,
+ WLAN_EID_S1G_OPERATION = 232,
WLAN_EID_CAG_NUMBER = 237,
WLAN_EID_AP_CSN = 239,
WLAN_EID_FILS_INDICATION = 240,
WLAN_EID_DILS = 241,
WLAN_EID_FRAGMENT = 242,
+ WLAN_EID_RSNX = 244,
WLAN_EID_EXTENSION = 255
};
@@ -1991,6 +1774,33 @@ enum ieee80211_eid_ext {
WLAN_EID_EXT_FILS_WRAPPED_DATA = 8,
WLAN_EID_EXT_FILS_PUBLIC_KEY = 12,
WLAN_EID_EXT_FILS_NONCE = 13,
+ WLAN_EID_EXT_FUTURE_CHAN_GUIDANCE = 14,
+ WLAN_EID_EXT_DH_PARAMETER = 32,
+ WLAN_EID_EXT_HE_CAPABILITY = 35,
+ WLAN_EID_EXT_HE_OPERATION = 36,
+ WLAN_EID_EXT_UORA = 37,
+ WLAN_EID_EXT_HE_MU_EDCA = 38,
+ WLAN_EID_EXT_HE_SPR = 39,
+ WLAN_EID_EXT_NDP_FEEDBACK_REPORT_PARAMSET = 41,
+ WLAN_EID_EXT_BSS_COLOR_CHG_ANN = 42,
+ WLAN_EID_EXT_QUIET_TIME_PERIOD_SETUP = 43,
+ WLAN_EID_EXT_ESS_REPORT = 45,
+ WLAN_EID_EXT_OPS = 46,
+ WLAN_EID_EXT_HE_BSS_LOAD = 47,
+ WLAN_EID_EXT_MAX_CHANNEL_SWITCH_TIME = 52,
+ WLAN_EID_EXT_MULTIPLE_BSSID_CONFIGURATION = 55,
+ WLAN_EID_EXT_NON_INHERITANCE = 56,
+ WLAN_EID_EXT_KNOWN_BSSID = 57,
+ WLAN_EID_EXT_SHORT_SSID_LIST = 58,
+ WLAN_EID_EXT_HE_6GHZ_CAPA = 59,
+ WLAN_EID_EXT_UL_MU_POWER_CAPA = 60,
+ WLAN_EID_EXT_EHT_OPERATION = 106,
+ WLAN_EID_EXT_EHT_MULTI_LINK = 107,
+ WLAN_EID_EXT_EHT_CAPABILITY = 108,
+ WLAN_EID_EXT_TID_TO_LINK_MAPPING = 109,
+ WLAN_EID_EXT_BANDWIDTH_INDICATION = 135,
+ WLAN_EID_EXT_KNOWN_STA_IDENTIFCATION = 136,
+ WLAN_EID_EXT_NON_AP_STA_REG_CON = 137,
};
/* Action category code */
@@ -2001,6 +1811,7 @@ enum ieee80211_category {
WLAN_CATEGORY_BACK = 3,
WLAN_CATEGORY_PUBLIC = 4,
WLAN_CATEGORY_RADIO_MEASUREMENT = 5,
+ WLAN_CATEGORY_FAST_BBS_TRANSITION = 6,
WLAN_CATEGORY_HT = 7,
WLAN_CATEGORY_SA_QUERY = 8,
WLAN_CATEGORY_PROTECTED_DUAL_OF_ACTION = 9,
@@ -2015,6 +1826,8 @@ enum ieee80211_category {
WLAN_CATEGORY_FST = 18,
WLAN_CATEGORY_UNPROT_DMG = 20,
WLAN_CATEGORY_VHT = 21,
+ WLAN_CATEGORY_S1G = 22,
+ WLAN_CATEGORY_PROTECTED_EHT = 37,
WLAN_CATEGORY_VENDOR_SPECIFIC_PROTECTED = 126,
WLAN_CATEGORY_VENDOR_SPECIFIC = 127,
};
@@ -2028,25 +1841,6 @@ enum ieee80211_spectrum_mgmt_actioncode {
WLAN_ACTION_SPCT_CHL_SWITCH = 4,
};
-/* HT action codes */
-enum ieee80211_ht_actioncode {
- WLAN_HT_ACTION_NOTIFY_CHANWIDTH = 0,
- WLAN_HT_ACTION_SMPS = 1,
- WLAN_HT_ACTION_PSMP = 2,
- WLAN_HT_ACTION_PCO_PHASE = 3,
- WLAN_HT_ACTION_CSI = 4,
- WLAN_HT_ACTION_NONCOMPRESSED_BF = 5,
- WLAN_HT_ACTION_COMPRESSED_BF = 6,
- WLAN_HT_ACTION_ASEL_IDX_FEEDBACK = 7,
-};
-
-/* VHT action codes */
-enum ieee80211_vht_actioncode {
- WLAN_VHT_ACTION_COMPRESSED_BF = 0,
- WLAN_VHT_ACTION_GROUPID_MGMT = 1,
- WLAN_VHT_ACTION_OPMODE_NOTIF = 2,
-};
-
/* Self Protected Action codes */
enum ieee80211_self_protected_actioncode {
WLAN_SP_RESERVED = 0,
@@ -2057,19 +1851,10 @@ enum ieee80211_self_protected_actioncode {
WLAN_SP_MGK_ACK = 5,
};
-/* Mesh action codes */
-enum ieee80211_mesh_actioncode {
- WLAN_MESH_ACTION_LINK_METRIC_REPORT,
- WLAN_MESH_ACTION_HWMP_PATH_SELECTION,
- WLAN_MESH_ACTION_GATE_ANNOUNCEMENT,
- WLAN_MESH_ACTION_CONGESTION_CONTROL_NOTIFICATION,
- WLAN_MESH_ACTION_MCCA_SETUP_REQUEST,
- WLAN_MESH_ACTION_MCCA_SETUP_REPLY,
- WLAN_MESH_ACTION_MCCA_ADVERTISEMENT_REQUEST,
- WLAN_MESH_ACTION_MCCA_ADVERTISEMENT,
- WLAN_MESH_ACTION_MCCA_TEARDOWN,
- WLAN_MESH_ACTION_TBTT_ADJUSTMENT_REQUEST,
- WLAN_MESH_ACTION_TBTT_ADJUSTMENT_RESPONSE,
+/* Unprotected WNM action codes */
+enum ieee80211_unprotected_wnm_actioncode {
+ WLAN_UNPROTECTED_WNM_ACTION_TIM = 0,
+ WLAN_UNPROTECTED_WNM_ACTION_TIMING_MEASUREMENT_RESPONSE = 1,
};
/* Security key length */
@@ -2088,6 +1873,16 @@ enum ieee80211_key_len {
WLAN_KEY_LEN_BIP_GMAC_256 = 32,
};
+/* Radio measurement action codes as defined in IEEE 802.11-2024 - Table 9-470 */
+enum ieee80211_radio_measurement_actioncode {
+ WLAN_RM_ACTION_RADIO_MEASUREMENT_REQUEST = 0,
+ WLAN_RM_ACTION_RADIO_MEASUREMENT_REPORT = 1,
+ WLAN_RM_ACTION_LINK_MEASUREMENT_REQUEST = 2,
+ WLAN_RM_ACTION_LINK_MEASUREMENT_REPORT = 3,
+ WLAN_RM_ACTION_NEIGHBOR_REPORT_REQUEST = 4,
+ WLAN_RM_ACTION_NEIGHBOR_REPORT_RESPONSE = 5,
+};
+
#define IEEE80211_WEP_IV_LEN 4
#define IEEE80211_WEP_ICV_LEN 4
#define IEEE80211_CCMP_HDR_LEN 8
@@ -2103,6 +1898,9 @@ enum ieee80211_key_len {
#define IEEE80211_GCMP_HDR_LEN 8
#define IEEE80211_GCMP_MIC_LEN 16
#define IEEE80211_GCMP_PN_LEN 6
+#define IEEE80211_CMAC_128_MIC_LEN 8
+#define IEEE80211_CMAC_256_MIC_LEN 16
+#define IEEE80211_GMAC_MIC_LEN 16
#define FILS_NONCE_LEN 16
#define FILS_MAX_KEK_LEN 64
@@ -2111,7 +1909,8 @@ enum ieee80211_key_len {
#define FILS_ERP_MAX_REALM_LEN 253
#define FILS_ERP_MAX_RRK_LEN 64
-#define PMK_MAX_LEN 48
+#define PMK_MAX_LEN 64
+#define SAE_PASSWORD_MAX_LEN 128
/* Public action codes (IEEE Std 802.11-2016, 9.6.8.1, Table 9-307) */
enum ieee80211_pub_actioncode {
@@ -2148,7 +1947,7 @@ enum ieee80211_pub_actioncode {
WLAN_PUB_ACTION_NETWORK_CHANNEL_CONTROL = 30,
WLAN_PUB_ACTION_WHITE_SPACE_MAP_ANN = 31,
WLAN_PUB_ACTION_FTM_REQUEST = 32,
- WLAN_PUB_ACTION_FTM = 33,
+ WLAN_PUB_ACTION_FTM_RESPONSE = 33,
WLAN_PUB_ACTION_FILS_DISCOVERY = 34,
};
@@ -2172,7 +1971,17 @@ enum ieee80211_tdls_actioncode {
*/
#define WLAN_EXT_CAPA1_EXT_CHANNEL_SWITCHING BIT(2)
-/* TDLS capabilities in the the 4th byte of @WLAN_EID_EXT_CAPABILITY */
+/* Multiple BSSID capability is set in the 6th bit of 3rd byte of the
+ * @WLAN_EID_EXT_CAPABILITY information element
+ */
+#define WLAN_EXT_CAPA3_MULTI_BSSID_SUPPORT BIT(6)
+
+/* Timing Measurement protocol for time sync is set in the 7th bit of 3rd byte
+ * of the @WLAN_EID_EXT_CAPABILITY information element
+ */
+#define WLAN_EXT_CAPA3_TIMING_MEASUREMENT_SUPPORT BIT(7)
+
+/* TDLS capabilities in the 4th byte of @WLAN_EID_EXT_CAPABILITY */
#define WLAN_EXT_CAPA4_TDLS_BUFFER_STA BIT(4)
#define WLAN_EXT_CAPA4_TDLS_PEER_PSM BIT(5)
#define WLAN_EXT_CAPA4_TDLS_CHAN_SWITCH BIT(6)
@@ -2203,70 +2012,28 @@ enum ieee80211_tdls_actioncode {
*/
#define WLAN_EXT_CAPA9_FTM_INITIATOR BIT(7)
-/* TDLS specific payload type in the LLC/SNAP header */
-#define WLAN_TDLS_SNAP_RFTYPE 0x2
-
-/* BSS Coex IE information field bits */
-#define WLAN_BSS_COEX_INFORMATION_REQUEST BIT(0)
+/* Defines support for TWT Requester and TWT Responder */
+#define WLAN_EXT_CAPA10_TWT_REQUESTER_SUPPORT BIT(5)
+#define WLAN_EXT_CAPA10_TWT_RESPONDER_SUPPORT BIT(6)
-/**
- * enum ieee80211_mesh_sync_method - mesh synchronization method identifier
- *
- * @IEEE80211_SYNC_METHOD_NEIGHBOR_OFFSET: the default synchronization method
- * @IEEE80211_SYNC_METHOD_VENDOR: a vendor specific synchronization method
- * that will be specified in a vendor specific information element
+/*
+ * When set, indicates that the AP is able to tolerate 26-tone RU UL
+ * OFDMA transmissions using HE TB PPDU from OBSS (not falsely classify the
+ * 26-tone RU UL OFDMA transmissions as radar pulses).
*/
-enum ieee80211_mesh_sync_method {
- IEEE80211_SYNC_METHOD_NEIGHBOR_OFFSET = 1,
- IEEE80211_SYNC_METHOD_VENDOR = 255,
-};
+#define WLAN_EXT_CAPA10_OBSS_NARROW_BW_RU_TOLERANCE_SUPPORT BIT(7)
-/**
- * enum ieee80211_mesh_path_protocol - mesh path selection protocol identifier
- *
- * @IEEE80211_PATH_PROTOCOL_HWMP: the default path selection protocol
- * @IEEE80211_PATH_PROTOCOL_VENDOR: a vendor specific protocol that will
- * be specified in a vendor specific information element
- */
-enum ieee80211_mesh_path_protocol {
- IEEE80211_PATH_PROTOCOL_HWMP = 1,
- IEEE80211_PATH_PROTOCOL_VENDOR = 255,
-};
+/* Defines support for enhanced multi-bssid advertisement*/
+#define WLAN_EXT_CAPA11_EMA_SUPPORT BIT(3)
-/**
- * enum ieee80211_mesh_path_metric - mesh path selection metric identifier
- *
- * @IEEE80211_PATH_METRIC_AIRTIME: the default path selection metric
- * @IEEE80211_PATH_METRIC_VENDOR: a vendor specific metric that will be
- * specified in a vendor specific information element
- */
-enum ieee80211_mesh_path_metric {
- IEEE80211_PATH_METRIC_AIRTIME = 1,
- IEEE80211_PATH_METRIC_VENDOR = 255,
-};
+/* Enable Beacon Protection */
+#define WLAN_EXT_CAPA11_BCN_PROTECT BIT(4)
-/**
- * enum ieee80211_root_mode_identifier - root mesh STA mode identifier
- *
- * These attribute are used by dot11MeshHWMPRootMode to set root mesh STA mode
- *
- * @IEEE80211_ROOTMODE_NO_ROOT: the mesh STA is not a root mesh STA (default)
- * @IEEE80211_ROOTMODE_ROOT: the mesh STA is a root mesh STA if greater than
- * this value
- * @IEEE80211_PROACTIVE_PREQ_NO_PREP: the mesh STA is a root mesh STA supports
- * the proactive PREQ with proactive PREP subfield set to 0
- * @IEEE80211_PROACTIVE_PREQ_WITH_PREP: the mesh STA is a root mesh STA
- * supports the proactive PREQ with proactive PREP subfield set to 1
- * @IEEE80211_PROACTIVE_RANN: the mesh STA is a root mesh STA supports
- * the proactive RANN
- */
-enum ieee80211_root_mode_identifier {
- IEEE80211_ROOTMODE_NO_ROOT = 0,
- IEEE80211_ROOTMODE_ROOT = 1,
- IEEE80211_PROACTIVE_PREQ_NO_PREP = 2,
- IEEE80211_PROACTIVE_PREQ_WITH_PREP = 3,
- IEEE80211_PROACTIVE_RANN = 4,
-};
+/* TDLS specific payload type in the LLC/SNAP header */
+#define WLAN_TDLS_SNAP_RFTYPE 0x2
+
+/* BSS Coex IE information field bits */
+#define WLAN_BSS_COEX_INFORMATION_REQUEST BIT(0)
/*
* IEEE 802.11-2007 7.3.2.9 Country information element
@@ -2360,7 +2127,7 @@ enum ieee80211_idle_options {
};
/**
- * struct ieee80211_bss_max_idle_period_ie
+ * struct ieee80211_bss_max_idle_period_ie - BSS max idle period element struct
*
* This structure refers to "BSS Max idle period element"
*
@@ -2375,25 +2142,41 @@ struct ieee80211_bss_max_idle_period_ie {
u8 idle_options;
} __packed;
-/* BACK action code */
-enum ieee80211_back_actioncode {
- WLAN_ACTION_ADDBA_REQ = 0,
- WLAN_ACTION_ADDBA_RESP = 1,
- WLAN_ACTION_DELBA = 2,
-};
-
-/* BACK (block-ack) parties */
-enum ieee80211_back_parties {
- WLAN_BACK_RECIPIENT = 0,
- WLAN_BACK_INITIATOR = 1,
-};
-
/* SA Query action */
enum ieee80211_sa_query_action {
WLAN_ACTION_SA_QUERY_REQUEST = 0,
WLAN_ACTION_SA_QUERY_RESPONSE = 1,
};
+/**
+ * struct ieee80211_bssid_index - multiple BSSID index element structure
+ *
+ * This structure refers to "Multiple BSSID-index element"
+ *
+ * @bssid_index: BSSID index
+ * @dtim_period: optional, overrides transmitted BSS dtim period
+ * @dtim_count: optional, overrides transmitted BSS dtim count
+ */
+struct ieee80211_bssid_index {
+ u8 bssid_index;
+ u8 dtim_period;
+ u8 dtim_count;
+};
+
+/**
+ * struct ieee80211_multiple_bssid_configuration - multiple BSSID configuration
+ * element structure
+ *
+ * This structure refers to "Multiple BSSID Configuration element"
+ *
+ * @bssid_count: total number of active BSSIDs in the set
+ * @profile_periodicity: the least number of beacon frames need to be received
+ * in order to discover all the nontransmitted BSSIDs in the set.
+ */
+struct ieee80211_multiple_bssid_configuration {
+ u8 bssid_count;
+ u8 profile_periodicity;
+};
#define SUITE(oui, id) (((oui) << 8) | (id))
@@ -2424,12 +2207,19 @@ enum ieee80211_sa_query_action {
#define WLAN_AKM_SUITE_TDLS SUITE(0x000FAC, 7)
#define WLAN_AKM_SUITE_SAE SUITE(0x000FAC, 8)
#define WLAN_AKM_SUITE_FT_OVER_SAE SUITE(0x000FAC, 9)
+#define WLAN_AKM_SUITE_AP_PEER_KEY SUITE(0x000FAC, 10)
#define WLAN_AKM_SUITE_8021X_SUITE_B SUITE(0x000FAC, 11)
#define WLAN_AKM_SUITE_8021X_SUITE_B_192 SUITE(0x000FAC, 12)
+#define WLAN_AKM_SUITE_FT_8021X_SHA384 SUITE(0x000FAC, 13)
#define WLAN_AKM_SUITE_FILS_SHA256 SUITE(0x000FAC, 14)
#define WLAN_AKM_SUITE_FILS_SHA384 SUITE(0x000FAC, 15)
#define WLAN_AKM_SUITE_FT_FILS_SHA256 SUITE(0x000FAC, 16)
#define WLAN_AKM_SUITE_FT_FILS_SHA384 SUITE(0x000FAC, 17)
+#define WLAN_AKM_SUITE_OWE SUITE(0x000FAC, 18)
+#define WLAN_AKM_SUITE_FT_PSK_SHA384 SUITE(0x000FAC, 19)
+#define WLAN_AKM_SUITE_PSK_SHA384 SUITE(0x000FAC, 20)
+
+#define WLAN_AKM_SUITE_WFA_DPP SUITE(WLAN_OUI_WFA, 2)
#define WLAN_MAX_KEY_LEN 32
@@ -2441,10 +2231,12 @@ enum ieee80211_sa_query_action {
#define WLAN_OUI_WFA 0x506f9a
#define WLAN_OUI_TYPE_WFA_P2P 9
+#define WLAN_OUI_TYPE_WFA_DPP 0x1A
#define WLAN_OUI_MICROSOFT 0x0050f2
#define WLAN_OUI_TYPE_MICROSOFT_WPA 1
#define WLAN_OUI_TYPE_MICROSOFT_WMM 2
#define WLAN_OUI_TYPE_MICROSOFT_WPS 4
+#define WLAN_OUI_TYPE_MICROSOFT_TPC 8
/*
* WMM/802.11e Tspec Element
@@ -2486,23 +2278,42 @@ struct ieee80211_tspec_ie {
/**
* ieee80211_get_qos_ctl - get pointer to qos control bytes
* @hdr: the frame
+ * Return: a pointer to the QoS control field in the frame header
*
* The qos ctrl bytes come after the frame_control, duration, seq_num
- * and 3 or 4 addresses of length ETH_ALEN.
- * 3 addr: 2 + 2 + 2 + 3*6 = 24
- * 4 addr: 2 + 2 + 2 + 4*6 = 30
+ * and 3 or 4 addresses of length ETH_ALEN. Checks frame_control to choose
+ * between struct ieee80211_qos_hdr_4addr and struct ieee80211_qos_hdr.
*/
static inline u8 *ieee80211_get_qos_ctl(struct ieee80211_hdr *hdr)
{
- if (ieee80211_has_a4(hdr->frame_control))
- return (u8 *)hdr + 30;
+ union {
+ struct ieee80211_qos_hdr addr3;
+ struct ieee80211_qos_hdr_4addr addr4;
+ } *qos;
+
+ qos = (void *)hdr;
+ if (ieee80211_has_a4(qos->addr3.frame_control))
+ return (u8 *)&qos->addr4.qos_ctrl;
else
- return (u8 *)hdr + 24;
+ return (u8 *)&qos->addr3.qos_ctrl;
+}
+
+/**
+ * ieee80211_get_tid - get qos TID
+ * @hdr: the frame
+ * Return: the TID from the QoS control field
+ */
+static inline u8 ieee80211_get_tid(struct ieee80211_hdr *hdr)
+{
+ u8 *qc = ieee80211_get_qos_ctl(hdr);
+
+ return qc[0] & IEEE80211_QOS_CTL_TID_MASK;
}
/**
* ieee80211_get_SA - get pointer to SA
* @hdr: the frame
+ * Return: a pointer to the source address (SA)
*
* Given an 802.11 frame, this function returns the offset
* to the source address (SA). It does not verify that the
@@ -2522,6 +2333,7 @@ static inline u8 *ieee80211_get_SA(struct ieee80211_hdr *hdr)
/**
* ieee80211_get_DA - get pointer to DA
* @hdr: the frame
+ * Return: a pointer to the destination address (DA)
*
* Given an 802.11 frame, this function returns the offset
* to the destination address (DA). It does not verify that
@@ -2538,8 +2350,48 @@ static inline u8 *ieee80211_get_DA(struct ieee80211_hdr *hdr)
}
/**
+ * ieee80211_is_bufferable_mmpdu - check if frame is bufferable MMPDU
+ * @skb: the skb to check, starting with the 802.11 header
+ * Return: whether or not the MMPDU is bufferable
+ */
+static inline bool ieee80211_is_bufferable_mmpdu(struct sk_buff *skb)
+{
+ struct ieee80211_mgmt *mgmt = (void *)skb->data;
+ __le16 fc = mgmt->frame_control;
+
+ /*
+ * IEEE 802.11 REVme D2.0 definition of bufferable MMPDU;
+ * note that this ignores the IBSS special case.
+ */
+ if (!ieee80211_is_mgmt(fc))
+ return false;
+
+ if (ieee80211_is_disassoc(fc) || ieee80211_is_deauth(fc))
+ return true;
+
+ if (!ieee80211_is_action(fc))
+ return false;
+
+ if (skb->len < offsetofend(typeof(*mgmt), u.action.u.ftm.action_code))
+ return true;
+
+ /* action frame - additionally check for non-bufferable FTM */
+
+ if (mgmt->u.action.category != WLAN_CATEGORY_PUBLIC &&
+ mgmt->u.action.category != WLAN_CATEGORY_PROTECTED_DUAL_OF_ACTION)
+ return true;
+
+ if (mgmt->u.action.u.ftm.action_code == WLAN_PUB_ACTION_FTM_REQUEST ||
+ mgmt->u.action.u.ftm.action_code == WLAN_PUB_ACTION_FTM_RESPONSE)
+ return false;
+
+ return true;
+}
+
+/**
* _ieee80211_is_robust_mgmt_frame - check if frame is a robust management frame
* @hdr: the frame (buffer must include at least the first octet of payload)
+ * Return: whether or not the frame is a robust management frame
*/
static inline bool _ieee80211_is_robust_mgmt_frame(struct ieee80211_hdr *hdr)
{
@@ -2566,6 +2418,7 @@ static inline bool _ieee80211_is_robust_mgmt_frame(struct ieee80211_hdr *hdr)
*category != WLAN_CATEGORY_SELF_PROTECTED &&
*category != WLAN_CATEGORY_UNPROT_DMG &&
*category != WLAN_CATEGORY_VHT &&
+ *category != WLAN_CATEGORY_S1G &&
*category != WLAN_CATEGORY_VENDOR_SPECIFIC;
}
@@ -2575,6 +2428,7 @@ static inline bool _ieee80211_is_robust_mgmt_frame(struct ieee80211_hdr *hdr)
/**
* ieee80211_is_robust_mgmt_frame - check if skb contains a robust mgmt frame
* @skb: the skb containing the frame, length will be checked
+ * Return: whether or not the frame is a robust management frame
*/
static inline bool ieee80211_is_robust_mgmt_frame(struct sk_buff *skb)
{
@@ -2587,6 +2441,7 @@ static inline bool ieee80211_is_robust_mgmt_frame(struct sk_buff *skb)
* ieee80211_is_public_action - check if frame is a public action frame
* @hdr: the frame
* @len: length of the frame
+ * Return: whether or not the frame is a public action frame
*/
static inline bool ieee80211_is_public_action(struct ieee80211_hdr *hdr,
size_t len)
@@ -2601,9 +2456,40 @@ static inline bool ieee80211_is_public_action(struct ieee80211_hdr *hdr,
}
/**
+ * ieee80211_is_protected_dual_of_public_action - check if skb contains a
+ * protected dual of public action management frame
+ * @skb: the skb containing the frame, length will be checked
+ *
+ * Return: true if the skb contains a protected dual of public action
+ * management frame, false otherwise.
+ */
+static inline bool
+ieee80211_is_protected_dual_of_public_action(struct sk_buff *skb)
+{
+ u8 action;
+
+ if (!ieee80211_is_public_action((void *)skb->data, skb->len) ||
+ skb->len < IEEE80211_MIN_ACTION_SIZE + 1)
+ return false;
+
+ action = *(u8 *)(skb->data + IEEE80211_MIN_ACTION_SIZE);
+
+ return action != WLAN_PUB_ACTION_20_40_BSS_COEX &&
+ action != WLAN_PUB_ACTION_DSE_REG_LOC_ANN &&
+ action != WLAN_PUB_ACTION_MSMT_PILOT &&
+ action != WLAN_PUB_ACTION_TDLS_DISCOVER_RES &&
+ action != WLAN_PUB_ACTION_LOC_TRACK_NOTI &&
+ action != WLAN_PUB_ACTION_FTM_REQUEST &&
+ action != WLAN_PUB_ACTION_FTM_RESPONSE &&
+ action != WLAN_PUB_ACTION_FILS_DISCOVERY &&
+ action != WLAN_PUB_ACTION_VENDOR_SPECIFIC;
+}
+
+/**
* _ieee80211_is_group_privacy_action - check if frame is a group addressed
- * privacy action frame
+ * privacy action frame
* @hdr: the frame
+ * Return: whether or not the frame is a group addressed privacy action frame
*/
static inline bool _ieee80211_is_group_privacy_action(struct ieee80211_hdr *hdr)
{
@@ -2619,8 +2505,9 @@ static inline bool _ieee80211_is_group_privacy_action(struct ieee80211_hdr *hdr)
/**
* ieee80211_is_group_privacy_action - check if frame is a group addressed
- * privacy action frame
+ * privacy action frame
* @skb: the skb containing the frame, length will be checked
+ * Return: whether or not the frame is a group addressed privacy action frame
*/
static inline bool ieee80211_is_group_privacy_action(struct sk_buff *skb)
{
@@ -2632,20 +2519,15 @@ static inline bool ieee80211_is_group_privacy_action(struct sk_buff *skb)
/**
* ieee80211_tu_to_usec - convert time units (TU) to microseconds
* @tu: the TUs
+ * Return: the time value converted to microseconds
*/
static inline unsigned long ieee80211_tu_to_usec(unsigned long tu)
{
return 1024 * tu;
}
-/**
- * ieee80211_check_tim - check if AID bit is set in TIM
- * @tim: the TIM IE
- * @tim_len: length of the TIM IE
- * @aid: the AID to look for
- */
-static inline bool ieee80211_check_tim(const struct ieee80211_tim_ie *tim,
- u8 tim_len, u16 aid)
+static inline bool __ieee80211_check_tim(const struct ieee80211_tim_ie *tim,
+ u8 tim_len, u16 aid)
{
u8 mask;
u8 index, indexn1, indexn2;
@@ -2669,14 +2551,15 @@ static inline bool ieee80211_check_tim(const struct ieee80211_tim_ie *tim,
}
/**
- * ieee80211_get_tdls_action - get tdls packet action (or -1, if not tdls packet)
+ * ieee80211_get_tdls_action - get TDLS action code
* @skb: the skb containing the frame, length will not be checked
- * @hdr_size: the size of the ieee80211_hdr that starts at skb->data
+ * Return: the TDLS action code, or -1 if it's not an encapsulated TDLS action
+ * frame
*
* This function assumes the frame is a data frame, and that the network header
* is in the correct place.
*/
-static inline int ieee80211_get_tdls_action(struct sk_buff *skb, u32 hdr_size)
+static inline int ieee80211_get_tdls_action(struct sk_buff *skb)
{
if (!skb_is_nonlinear(skb) &&
skb->len > (skb_network_offset(skb) + 2)) {
@@ -2696,9 +2579,22 @@ static inline int ieee80211_get_tdls_action(struct sk_buff *skb, u32 hdr_size)
#define TU_TO_JIFFIES(x) (usecs_to_jiffies((x) * 1024))
#define TU_TO_EXP_TIME(x) (jiffies + TU_TO_JIFFIES(x))
+/* convert frequencies */
+#define MHZ_TO_KHZ(freq) ((freq) * 1000)
+#define KHZ_TO_MHZ(freq) ((freq) / 1000)
+#define PR_KHZ(f) KHZ_TO_MHZ(f), f % 1000
+#define KHZ_F "%d.%03d"
+
+/* convert powers */
+#define DBI_TO_MBI(gain) ((gain) * 100)
+#define MBI_TO_DBI(gain) ((gain) / 100)
+#define DBM_TO_MBM(gain) ((gain) * 100)
+#define MBM_TO_DBM(gain) ((gain) / 100)
+
/**
* ieee80211_action_contains_tpc - checks if the frame contains TPC element
* @skb: the skb containing the frame, length will be checked
+ * Return: %true if the frame contains a TPC element, %false otherwise
*
* This function checks if it's either TPC report action frame or Link
* Measurement report action frame as defined in IEEE Std. 802.11-2012 8.5.2.5
@@ -2743,4 +2639,205 @@ static inline bool ieee80211_action_contains_tpc(struct sk_buff *skb)
return true;
}
+/**
+ * ieee80211_is_timing_measurement - check if frame is timing measurement response
+ * @skb: the SKB to check
+ * Return: whether or not the frame is a valid timing measurement response
+ */
+static inline bool ieee80211_is_timing_measurement(struct sk_buff *skb)
+{
+ struct ieee80211_mgmt *mgmt = (void *)skb->data;
+
+ if (skb->len < IEEE80211_MIN_ACTION_SIZE)
+ return false;
+
+ if (!ieee80211_is_action(mgmt->frame_control))
+ return false;
+
+ if (mgmt->u.action.category == WLAN_CATEGORY_WNM_UNPROTECTED &&
+ mgmt->u.action.u.wnm_timing_msr.action_code ==
+ WLAN_UNPROTECTED_WNM_ACTION_TIMING_MEASUREMENT_RESPONSE &&
+ skb->len >= offsetofend(typeof(*mgmt), u.action.u.wnm_timing_msr))
+ return true;
+
+ return false;
+}
+
+/**
+ * ieee80211_is_ftm - check if frame is FTM response
+ * @skb: the SKB to check
+ * Return: whether or not the frame is a valid FTM response action frame
+ */
+static inline bool ieee80211_is_ftm(struct sk_buff *skb)
+{
+ struct ieee80211_mgmt *mgmt = (void *)skb->data;
+
+ if (!ieee80211_is_public_action((void *)mgmt, skb->len))
+ return false;
+
+ if (mgmt->u.action.u.ftm.action_code ==
+ WLAN_PUB_ACTION_FTM_RESPONSE &&
+ skb->len >= offsetofend(typeof(*mgmt), u.action.u.ftm))
+ return true;
+
+ return false;
+}
+
+struct element {
+ u8 id;
+ u8 datalen;
+ u8 data[];
+} __packed;
+
+/* element iteration helpers */
+#define for_each_element(_elem, _data, _datalen) \
+ for (_elem = (const struct element *)(_data); \
+ (const u8 *)(_data) + (_datalen) - (const u8 *)_elem >= \
+ (int)sizeof(*_elem) && \
+ (const u8 *)(_data) + (_datalen) - (const u8 *)_elem >= \
+ (int)sizeof(*_elem) + _elem->datalen; \
+ _elem = (const struct element *)(_elem->data + _elem->datalen))
+
+#define for_each_element_id(element, _id, data, datalen) \
+ for_each_element(element, data, datalen) \
+ if (element->id == (_id))
+
+#define for_each_element_extid(element, extid, _data, _datalen) \
+ for_each_element(element, _data, _datalen) \
+ if (element->id == WLAN_EID_EXTENSION && \
+ element->datalen > 0 && \
+ element->data[0] == (extid))
+
+#define for_each_subelement(sub, element) \
+ for_each_element(sub, (element)->data, (element)->datalen)
+
+#define for_each_subelement_id(sub, id, element) \
+ for_each_element_id(sub, id, (element)->data, (element)->datalen)
+
+#define for_each_subelement_extid(sub, extid, element) \
+ for_each_element_extid(sub, extid, (element)->data, (element)->datalen)
+
+/**
+ * for_each_element_completed - determine if element parsing consumed all data
+ * @element: element pointer after for_each_element() or friends
+ * @data: same data pointer as passed to for_each_element() or friends
+ * @datalen: same data length as passed to for_each_element() or friends
+ * Return: %true if all elements were iterated, %false otherwise; see notes
+ *
+ * This function returns %true if all the data was parsed or considered
+ * while walking the elements. Only use this if your for_each_element()
+ * loop cannot be broken out of, otherwise it always returns %false.
+ *
+ * If some data was malformed, this returns %false since the last parsed
+ * element will not fill the whole remaining data.
+ */
+static inline bool for_each_element_completed(const struct element *element,
+ const void *data, size_t datalen)
+{
+ return (const u8 *)element == (const u8 *)data + datalen;
+}
+
+/*
+ * RSNX Capabilities:
+ * bits 0-3: Field length (n-1)
+ */
+#define WLAN_RSNX_CAPA_PROTECTED_TWT BIT(4)
+#define WLAN_RSNX_CAPA_SAE_H2E BIT(5)
+
+/*
+ * reduced neighbor report, based on Draft P802.11ax_D6.1,
+ * section 9.4.2.170 and accepted contributions.
+ */
+#define IEEE80211_AP_INFO_TBTT_HDR_TYPE 0x03
+#define IEEE80211_AP_INFO_TBTT_HDR_FILTERED 0x04
+#define IEEE80211_AP_INFO_TBTT_HDR_COLOC 0x08
+#define IEEE80211_AP_INFO_TBTT_HDR_COUNT 0xF0
+#define IEEE80211_TBTT_INFO_TYPE_TBTT 0
+#define IEEE80211_TBTT_INFO_TYPE_MLD 1
+
+#define IEEE80211_RNR_TBTT_PARAMS_OCT_RECOMMENDED 0x01
+#define IEEE80211_RNR_TBTT_PARAMS_SAME_SSID 0x02
+#define IEEE80211_RNR_TBTT_PARAMS_MULTI_BSSID 0x04
+#define IEEE80211_RNR_TBTT_PARAMS_TRANSMITTED_BSSID 0x08
+#define IEEE80211_RNR_TBTT_PARAMS_COLOC_ESS 0x10
+#define IEEE80211_RNR_TBTT_PARAMS_PROBE_ACTIVE 0x20
+#define IEEE80211_RNR_TBTT_PARAMS_COLOC_AP 0x40
+
+#define IEEE80211_RNR_TBTT_PARAMS_PSD_NO_LIMIT 127
+#define IEEE80211_RNR_TBTT_PARAMS_PSD_RESERVED -128
+
+struct ieee80211_neighbor_ap_info {
+ u8 tbtt_info_hdr;
+ u8 tbtt_info_len;
+ u8 op_class;
+ u8 channel;
+} __packed;
+
+enum ieee80211_range_params_max_total_ltf {
+ IEEE80211_RANGE_PARAMS_MAX_TOTAL_LTF_4 = 0,
+ IEEE80211_RANGE_PARAMS_MAX_TOTAL_LTF_8,
+ IEEE80211_RANGE_PARAMS_MAX_TOTAL_LTF_16,
+ IEEE80211_RANGE_PARAMS_MAX_TOTAL_LTF_UNSPECIFIED,
+};
+
+/*
+ * reduced neighbor report, based on Draft P802.11be_D3.0,
+ * section 9.4.2.170.2.
+ */
+struct ieee80211_rnr_mld_params {
+ u8 mld_id;
+ __le16 params;
+} __packed;
+
+#define IEEE80211_RNR_MLD_PARAMS_LINK_ID 0x000F
+#define IEEE80211_RNR_MLD_PARAMS_BSS_CHANGE_COUNT 0x0FF0
+#define IEEE80211_RNR_MLD_PARAMS_UPDATES_INCLUDED 0x1000
+#define IEEE80211_RNR_MLD_PARAMS_DISABLED_LINK 0x2000
+
+/* Format of the TBTT information element if it has 7, 8 or 9 bytes */
+struct ieee80211_tbtt_info_7_8_9 {
+ u8 tbtt_offset;
+ u8 bssid[ETH_ALEN];
+
+ /* The following element is optional, structure may not grow */
+ u8 bss_params;
+ s8 psd_20;
+} __packed;
+
+/* Format of the TBTT information element if it has >= 11 bytes */
+struct ieee80211_tbtt_info_ge_11 {
+ u8 tbtt_offset;
+ u8 bssid[ETH_ALEN];
+ __le32 short_ssid;
+
+ /* The following elements are optional, structure may grow */
+ u8 bss_params;
+ s8 psd_20;
+ struct ieee80211_rnr_mld_params mld_params;
+} __packed;
+
+#include "ieee80211-ht.h"
+#include "ieee80211-vht.h"
+#include "ieee80211-he.h"
+#include "ieee80211-eht.h"
+#include "ieee80211-mesh.h"
+#include "ieee80211-s1g.h"
+#include "ieee80211-p2p.h"
+#include "ieee80211-nan.h"
+
+/**
+ * ieee80211_check_tim - check if AID bit is set in TIM
+ * @tim: the TIM IE
+ * @tim_len: length of the TIM IE
+ * @aid: the AID to look for
+ * @s1g: whether the TIM is from an S1G PPDU
+ * Return: whether or not traffic is indicated in the TIM for the given AID
+ */
+static inline bool ieee80211_check_tim(const struct ieee80211_tim_ie *tim,
+ u8 tim_len, u16 aid, bool s1g)
+{
+ return s1g ? ieee80211_s1g_check_tim(tim, tim_len, aid) :
+ __ieee80211_check_tim(tim, tim_len, aid);
+}
+
#endif /* LINUX_IEEE80211_H */
diff --git a/include/linux/ieee802154.h b/include/linux/ieee802154.h
index ddb890174a0e..140f61ec0f5f 100644
--- a/include/linux/ieee802154.h
+++ b/include/linux/ieee802154.h
@@ -1,17 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* IEEE802.15.4-2003 specification
*
* Copyright (C) 2007, 2008 Siemens AG
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
* Written by:
* Pavel Smolenskiy <pavel.smolenskiy@gmail.com>
* Maxim Gorbachyov <maxim.gorbachev@siemens.com>
@@ -52,6 +44,13 @@
#define IEEE802154_SHORT_ADDR_LEN 2
#define IEEE802154_PAN_ID_LEN 2
+/* Duration in superframe order */
+#define IEEE802154_MAX_SCAN_DURATION 14
+#define IEEE802154_ACTIVE_SCAN_DURATION 15
+/* Superframe duration in slots */
+#define IEEE802154_SUPERFRAME_PERIOD 16
+/* Various periods expressed in symbols */
+#define IEEE802154_SLOT_PERIOD 60
#define IEEE802154_LIFS_PERIOD 40
#define IEEE802154_SIFS_PERIOD 12
#define IEEE802154_MAX_SIFS_FRAME_SIZE 18
@@ -142,18 +141,46 @@ enum {
* a successful transmission.
*/
IEEE802154_SUCCESS = 0x0,
-
+ /* The requested operation failed. */
+ IEEE802154_MAC_ERROR = 0x1,
+ /* The requested operation has been cancelled. */
+ IEEE802154_CANCELLED = 0x2,
+ /*
+ * Device is ready to poll the coordinator for data in a non beacon
+ * enabled PAN.
+ */
+ IEEE802154_READY_FOR_POLL = 0x3,
+ /* Wrong frame counter. */
+ IEEE802154_COUNTER_ERROR = 0xdb,
+ /*
+ * The frame does not conforms to the incoming key usage policy checking
+ * procedure.
+ */
+ IEEE802154_IMPROPER_KEY_TYPE = 0xdc,
+ /*
+ * The frame does not conforms to the incoming security level usage
+ * policy checking procedure.
+ */
+ IEEE802154_IMPROPER_SECURITY_LEVEL = 0xdd,
+ /* Secured frame received with an empty Frame Version field. */
+ IEEE802154_UNSUPPORTED_LEGACY = 0xde,
+ /*
+ * A secured frame is received or must be sent but security is not
+ * enabled in the device. Or, the Auxiliary Security Header has security
+ * level of zero in it.
+ */
+ IEEE802154_UNSUPPORTED_SECURITY = 0xdf,
/* The beacon was lost following a synchronization request. */
- IEEE802154_BEACON_LOSS = 0xe0,
+ IEEE802154_BEACON_LOST = 0xe0,
/*
* A transmission could not take place due to activity on the
* channel, i.e., the CSMA-CA mechanism has failed.
*/
- IEEE802154_CHNL_ACCESS_FAIL = 0xe1,
+ IEEE802154_CHANNEL_ACCESS_FAILURE = 0xe1,
/* The GTS request has been denied by the PAN coordinator. */
- IEEE802154_DENINED = 0xe2,
+ IEEE802154_DENIED = 0xe2,
/* The attempt to disable the transceiver has failed. */
- IEEE802154_DISABLE_TRX_FAIL = 0xe3,
+ IEEE802154_DISABLE_TRX_FAILURE = 0xe3,
/*
* The received frame induces a failed security check according to
* the security suite.
@@ -193,9 +220,9 @@ enum {
* A PAN identifier conflict has been detected and communicated to the
* PAN coordinator.
*/
- IEEE802154_PANID_CONFLICT = 0xee,
+ IEEE802154_PAN_ID_CONFLICT = 0xee,
/* A coordinator realignment command has been received. */
- IEEE802154_REALIGMENT = 0xef,
+ IEEE802154_REALIGNMENT = 0xef,
/* The transaction has expired and its information discarded. */
IEEE802154_TRANSACTION_EXPIRED = 0xf0,
/* There is no capacity to store the transaction. */
@@ -211,12 +238,73 @@ enum {
* A SET/GET request was issued with the identifier of a PIB attribute
* that is not supported.
*/
- IEEE802154_UNSUPPORTED_ATTR = 0xf4,
+ IEEE802154_UNSUPPORTED_ATTRIBUTE = 0xf4,
+ /* Missing source or destination address or address mode. */
+ IEEE802154_INVALID_ADDRESS = 0xf5,
+ /*
+ * MLME asked to turn the receiver on, but the on time duration is too
+ * big compared to the macBeaconOrder.
+ */
+ IEEE802154_ON_TIME_TOO_LONG = 0xf6,
+ /*
+ * MLME asaked to turn the receiver on, but the request was delayed for
+ * too long before getting processed.
+ */
+ IEEE802154_PAST_TIME = 0xf7,
+ /*
+ * The StartTime parameter is nonzero, and the MLME is not currently
+ * tracking the beacon of the coordinator through which it is
+ * associated.
+ */
+ IEEE802154_TRACKING_OFF = 0xf8,
+ /*
+ * The index inside the hierarchical values in PIBAttribute is out of
+ * range.
+ */
+ IEEE802154_INVALID_INDEX = 0xf9,
+ /*
+ * The number of PAN descriptors discovered during a scan has been
+ * reached.
+ */
+ IEEE802154_LIMIT_REACHED = 0xfa,
+ /*
+ * The PIBAttribute parameter specifies an attribute that is a read-only
+ * attribute.
+ */
+ IEEE802154_READ_ONLY = 0xfb,
/*
* A request to perform a scan operation failed because the MLME was
* in the process of performing a previously initiated scan operation.
*/
IEEE802154_SCAN_IN_PROGRESS = 0xfc,
+ /* The outgoing superframe overlaps the incoming superframe. */
+ IEEE802154_SUPERFRAME_OVERLAP = 0xfd,
+ /* Any other error situation. */
+ IEEE802154_SYSTEM_ERROR = 0xff,
+};
+
+/**
+ * enum ieee802154_filtering_level - Filtering levels applicable to a PHY
+ *
+ * @IEEE802154_FILTERING_NONE: No filtering at all, what is received is
+ * forwarded to the softMAC
+ * @IEEE802154_FILTERING_1_FCS: First filtering level, frames with an invalid
+ * FCS should be dropped
+ * @IEEE802154_FILTERING_2_PROMISCUOUS: Second filtering level, promiscuous
+ * mode as described in the spec, identical in terms of filtering to the
+ * level one on PHY side, but at the MAC level the frame should be
+ * forwarded to the upper layer directly
+ * @IEEE802154_FILTERING_3_SCAN: Third filtering level, scan related, where
+ * only beacons must be processed, all remaining traffic gets dropped
+ * @IEEE802154_FILTERING_4_FRAME_FIELDS: Fourth filtering level actually
+ * enforcing the validity of the content of the frame with various checks
+ */
+enum ieee802154_filtering_level {
+ IEEE802154_FILTERING_NONE,
+ IEEE802154_FILTERING_1_FCS,
+ IEEE802154_FILTERING_2_PROMISCUOUS,
+ IEEE802154_FILTERING_3_SCAN,
+ IEEE802154_FILTERING_4_FRAME_FIELDS,
};
/* frame control handling */
diff --git a/include/linux/if_arp.h b/include/linux/if_arp.h
index 3355efc89781..10a1e81434cb 100644
--- a/include/linux/if_arp.h
+++ b/include/linux/if_arp.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* INET An implementation of the TCP/IP protocol suite for the LINUX
* operating system. INET is implemented using the BSD Socket
@@ -14,11 +15,6 @@
* Florian La Roche,
* Jonathan Layes <layes@loran.com>
* Arnaldo Carvalho de Melo <acme@conectiva.com.br> ARPHRD_HWX25
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
*/
#ifndef _LINUX_IF_ARP_H
#define _LINUX_IF_ARP_H
@@ -31,7 +27,7 @@ static inline struct arphdr *arp_hdr(const struct sk_buff *skb)
return (struct arphdr *)skb_network_header(skb);
}
-static inline int arp_hdr_len(struct net_device *dev)
+static inline unsigned int arp_hdr_len(const struct net_device *dev)
{
switch (dev->type) {
#if IS_ENABLED(CONFIG_FIREWIRE_NET)
@@ -52,8 +48,15 @@ static inline bool dev_is_mac_header_xmit(const struct net_device *dev)
case ARPHRD_TUNNEL6:
case ARPHRD_SIT:
case ARPHRD_IPGRE:
+ case ARPHRD_IP6GRE:
case ARPHRD_VOID:
case ARPHRD_NONE:
+ case ARPHRD_RAWIP:
+ case ARPHRD_PIMREG:
+ /* PPP adds its l2 header automatically in ppp_start_xmit().
+ * This makes it look like an l3 device to __bpf_redirect() and tcf_mirred_init().
+ */
+ case ARPHRD_PPP:
return false;
default:
return true;
diff --git a/include/linux/if_bridge.h b/include/linux/if_bridge.h
index 3cd18ac0697f..c5fe3b2a53e8 100644
--- a/include/linux/if_bridge.h
+++ b/include/linux/if_bridge.h
@@ -1,13 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* Linux ethernet bridge
*
* Authors:
* Lennert Buytenhek <buytenh@gnu.org>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
*/
#ifndef _LINUX_IF_BRIDGE_H
#define _LINUX_IF_BRIDGE_H
@@ -23,7 +19,14 @@ struct br_ip {
#if IS_ENABLED(CONFIG_IPV6)
struct in6_addr ip6;
#endif
- } u;
+ } src;
+ union {
+ __be32 ip4;
+#if IS_ENABLED(CONFIG_IPV6)
+ struct in6_addr ip6;
+#endif
+ unsigned char mac_addr[ETH_ALEN];
+ } dst;
__be16 proto;
__u16 vid;
};
@@ -49,20 +52,31 @@ struct br_ip_list {
#define BR_MULTICAST_TO_UNICAST BIT(12)
#define BR_VLAN_TUNNEL BIT(13)
#define BR_BCAST_FLOOD BIT(14)
+#define BR_NEIGH_SUPPRESS BIT(15)
+#define BR_ISOLATED BIT(16)
+#define BR_MRP_AWARE BIT(17)
+#define BR_MRP_LOST_CONT BIT(18)
+#define BR_MRP_LOST_IN_CONT BIT(19)
+#define BR_TX_FWD_OFFLOAD BIT(20)
+#define BR_PORT_LOCKED BIT(21)
+#define BR_PORT_MAB BIT(22)
+#define BR_NEIGH_VLAN_SUPPRESS BIT(23)
#define BR_DEFAULT_AGEING_TIME (300 * HZ)
-extern void brioctl_set(int (*ioctl_hook)(struct net *, unsigned int, void __user *));
-
-typedef int br_should_route_hook_t(struct sk_buff *skb);
-extern br_should_route_hook_t __rcu *br_should_route_hook;
+struct net_bridge;
+void brioctl_set(int (*hook)(struct net *net, unsigned int cmd,
+ void __user *uarg));
+int br_ioctl_call(struct net *net, unsigned int cmd, void __user *uarg);
#if IS_ENABLED(CONFIG_BRIDGE) && IS_ENABLED(CONFIG_BRIDGE_IGMP_SNOOPING)
int br_multicast_list_adjacent(struct net_device *dev,
struct list_head *br_ip_list);
bool br_multicast_has_querier_anywhere(struct net_device *dev, int proto);
bool br_multicast_has_querier_adjacent(struct net_device *dev, int proto);
+bool br_multicast_has_router_adjacent(struct net_device *dev, int proto);
bool br_multicast_enabled(const struct net_device *dev);
+bool br_multicast_router(const struct net_device *dev);
#else
static inline int br_multicast_list_adjacent(struct net_device *dev,
struct list_head *br_ip_list)
@@ -79,19 +93,121 @@ static inline bool br_multicast_has_querier_adjacent(struct net_device *dev,
{
return false;
}
+
+static inline bool br_multicast_has_router_adjacent(struct net_device *dev,
+ int proto)
+{
+ return true;
+}
+
static inline bool br_multicast_enabled(const struct net_device *dev)
{
return false;
}
+static inline bool br_multicast_router(const struct net_device *dev)
+{
+ return false;
+}
#endif
#if IS_ENABLED(CONFIG_BRIDGE) && IS_ENABLED(CONFIG_BRIDGE_VLAN_FILTERING)
bool br_vlan_enabled(const struct net_device *dev);
+int br_vlan_get_pvid(const struct net_device *dev, u16 *p_pvid);
+int br_vlan_get_pvid_rcu(const struct net_device *dev, u16 *p_pvid);
+int br_vlan_get_proto(const struct net_device *dev, u16 *p_proto);
+int br_vlan_get_info(const struct net_device *dev, u16 vid,
+ struct bridge_vlan_info *p_vinfo);
+int br_vlan_get_info_rcu(const struct net_device *dev, u16 vid,
+ struct bridge_vlan_info *p_vinfo);
+bool br_mst_enabled(const struct net_device *dev);
+int br_mst_get_info(const struct net_device *dev, u16 msti, unsigned long *vids);
+int br_mst_get_state(const struct net_device *dev, u16 msti, u8 *state);
#else
static inline bool br_vlan_enabled(const struct net_device *dev)
{
return false;
}
+
+static inline int br_vlan_get_pvid(const struct net_device *dev, u16 *p_pvid)
+{
+ return -EINVAL;
+}
+
+static inline int br_vlan_get_proto(const struct net_device *dev, u16 *p_proto)
+{
+ return -EINVAL;
+}
+
+static inline int br_vlan_get_pvid_rcu(const struct net_device *dev, u16 *p_pvid)
+{
+ return -EINVAL;
+}
+
+static inline int br_vlan_get_info(const struct net_device *dev, u16 vid,
+ struct bridge_vlan_info *p_vinfo)
+{
+ return -EINVAL;
+}
+
+static inline int br_vlan_get_info_rcu(const struct net_device *dev, u16 vid,
+ struct bridge_vlan_info *p_vinfo)
+{
+ return -EINVAL;
+}
+
+static inline bool br_mst_enabled(const struct net_device *dev)
+{
+ return false;
+}
+
+static inline int br_mst_get_info(const struct net_device *dev, u16 msti,
+ unsigned long *vids)
+{
+ return -EINVAL;
+}
+static inline int br_mst_get_state(const struct net_device *dev, u16 msti,
+ u8 *state)
+{
+ return -EINVAL;
+}
+#endif
+
+#if IS_ENABLED(CONFIG_BRIDGE)
+struct net_device *br_fdb_find_port(const struct net_device *br_dev,
+ const unsigned char *addr,
+ __u16 vid);
+void br_fdb_clear_offload(const struct net_device *dev, u16 vid);
+bool br_port_flag_is_set(const struct net_device *dev, unsigned long flag);
+u8 br_port_get_stp_state(const struct net_device *dev);
+clock_t br_get_ageing_time(const struct net_device *br_dev);
+#else
+static inline struct net_device *
+br_fdb_find_port(const struct net_device *br_dev,
+ const unsigned char *addr,
+ __u16 vid)
+{
+ return NULL;
+}
+
+static inline void br_fdb_clear_offload(const struct net_device *dev, u16 vid)
+{
+}
+
+static inline bool
+br_port_flag_is_set(const struct net_device *dev, unsigned long flag)
+{
+ return false;
+}
+
+static inline u8 br_port_get_stp_state(const struct net_device *dev)
+{
+ return BR_STATE_DISABLED;
+}
+
+static inline clock_t br_get_ageing_time(const struct net_device *br_dev)
+{
+ return 0;
+}
#endif
#endif
diff --git a/include/linux/if_eql.h b/include/linux/if_eql.h
index d984694c384d..07f9b660b741 100644
--- a/include/linux/if_eql.h
+++ b/include/linux/if_eql.h
@@ -21,11 +21,13 @@
#include <linux/timer.h>
#include <linux/spinlock.h>
+#include <net/net_trackers.h>
#include <uapi/linux/if_eql.h>
typedef struct slave {
struct list_head list;
struct net_device *dev;
+ netdevice_tracker dev_tracker;
long priority;
long priority_bps;
long priority_Bps;
diff --git a/include/linux/if_ether.h b/include/linux/if_ether.h
index 548fd535fd02..61b7335aa037 100644
--- a/include/linux/if_ether.h
+++ b/include/linux/if_ether.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* INET An implementation of the TCP/IP protocol suite for the LINUX
* operating system. INET is implemented using the BSD Socket
@@ -11,11 +12,6 @@
* Donald Becker, <becker@super.org>
* Alan Cox, <alan@lxorguk.ukuu.org.uk>
* Steve Whitehouse, <gw7rrm@eeshack3.swan.ac.uk>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
*/
#ifndef _LINUX_IF_ETHER_H
#define _LINUX_IF_ETHER_H
@@ -23,11 +19,22 @@
#include <linux/skbuff.h>
#include <uapi/linux/if_ether.h>
+/* XX:XX:XX:XX:XX:XX */
+#define MAC_ADDR_STR_LEN (3 * ETH_ALEN - 1)
+
static inline struct ethhdr *eth_hdr(const struct sk_buff *skb)
{
return (struct ethhdr *)skb_mac_header(skb);
}
+/* Prefer this version in TX path, instead of
+ * skb_reset_mac_header() + eth_hdr()
+ */
+static inline struct ethhdr *skb_eth_hdr(const struct sk_buff *skb)
+{
+ return (struct ethhdr *)skb->data;
+}
+
static inline struct ethhdr *inner_eth_hdr(const struct sk_buff *skb)
{
return (struct ethhdr *)skb_inner_mac_header(skb);
diff --git a/include/linux/if_fddi.h b/include/linux/if_fddi.h
index f5550b3eeeab..c796f452d646 100644
--- a/include/linux/if_fddi.h
+++ b/include/linux/if_fddi.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* INET An implementation of the TCP/IP protocol suite for the LINUX
* operating system. INET is implemented using the BSD Socket
@@ -15,11 +16,6 @@
* Alan Cox, <alan@lxorguk.ukuu.org.uk>
* Steve Whitehouse, <gw7rrm@eeshack3.swan.ac.uk>
* Peter De Schrijver, <stud11@cc4.kuleuven.ac.be>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
*/
#ifndef _LINUX_IF_FDDI_H
#define _LINUX_IF_FDDI_H
diff --git a/include/linux/if_frad.h b/include/linux/if_frad.h
deleted file mode 100644
index 46df7e565d6f..000000000000
--- a/include/linux/if_frad.h
+++ /dev/null
@@ -1,95 +0,0 @@
-/*
- * DLCI/FRAD Definitions for Frame Relay Access Devices. DLCI devices are
- * created for each DLCI associated with a FRAD. The FRAD driver
- * is not truly a network device, but the lower level device
- * handler. This allows other FRAD manufacturers to use the DLCI
- * code, including its RFC1490 encapsulation alongside the current
- * implementation for the Sangoma cards.
- *
- * Version: @(#)if_ifrad.h 0.15 31 Mar 96
- *
- * Author: Mike McLagan <mike.mclagan@linux.org>
- *
- * Changes:
- * 0.15 Mike McLagan changed structure defs (packed)
- * re-arranged flags
- * added DLCI_RET vars
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- */
-#ifndef _FRAD_H_
-#define _FRAD_H_
-
-#include <uapi/linux/if_frad.h>
-
-
-#if defined(CONFIG_DLCI) || defined(CONFIG_DLCI_MODULE)
-
-/* these are the fields of an RFC 1490 header */
-struct frhdr
-{
- unsigned char control;
-
- /* for IP packets, this can be the NLPID */
- unsigned char pad;
-
- unsigned char NLPID;
- unsigned char OUI[3];
- __be16 PID;
-
-#define IP_NLPID pad
-} __packed;
-
-/* see RFC 1490 for the definition of the following */
-#define FRAD_I_UI 0x03
-
-#define FRAD_P_PADDING 0x00
-#define FRAD_P_Q933 0x08
-#define FRAD_P_SNAP 0x80
-#define FRAD_P_CLNP 0x81
-#define FRAD_P_IP 0xCC
-
-struct dlci_local
-{
- struct net_device *master;
- struct net_device *slave;
- struct dlci_conf config;
- int configured;
- struct list_head list;
-
- /* callback function */
- void (*receive)(struct sk_buff *skb, struct net_device *);
-};
-
-struct frad_local
-{
- /* devices which this FRAD is slaved to */
- struct net_device *master[CONFIG_DLCI_MAX];
- short dlci[CONFIG_DLCI_MAX];
-
- struct frad_conf config;
- int configured; /* has this device been configured */
- int initialized; /* mem_start, port, irq set ? */
-
- /* callback functions */
- int (*activate)(struct net_device *, struct net_device *);
- int (*deactivate)(struct net_device *, struct net_device *);
- int (*assoc)(struct net_device *, struct net_device *);
- int (*deassoc)(struct net_device *, struct net_device *);
- int (*dlci_conf)(struct net_device *, struct net_device *, int get);
-
- /* fields that are used by the Sangoma SDLA cards */
- struct timer_list timer;
- int type; /* adapter type */
- int state; /* state of the S502/8 control latch */
- int buffer; /* current buffer for S508 firmware */
-};
-
-#endif /* CONFIG_DLCI || CONFIG_DLCI_MODULE */
-
-extern void dlci_ioctl_set(int (*hook)(unsigned int, void __user *));
-
-#endif
diff --git a/include/linux/if_hsr.h b/include/linux/if_hsr.h
new file mode 100644
index 000000000000..f4cf2dd36d19
--- /dev/null
+++ b/include/linux/if_hsr.h
@@ -0,0 +1,73 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_IF_HSR_H_
+#define _LINUX_IF_HSR_H_
+
+#include <linux/types.h>
+
+struct net_device;
+
+/* used to differentiate various protocols */
+enum hsr_version {
+ HSR_V0 = 0,
+ HSR_V1,
+ PRP_V1,
+};
+
+enum hsr_port_type {
+ HSR_PT_NONE = 0, /* Must be 0, used by framereg */
+ HSR_PT_SLAVE_A,
+ HSR_PT_SLAVE_B,
+ HSR_PT_INTERLINK,
+ HSR_PT_MASTER,
+ HSR_PT_PORTS, /* This must be the last item in the enum */
+};
+
+/* HSR Tag.
+ * As defined in IEC-62439-3:2010, the HSR tag is really { ethertype = 0x88FB,
+ * path, LSDU_size, sequence Nr }. But we let eth_header() create { h_dest,
+ * h_source, h_proto = 0x88FB }, and add { path, LSDU_size, sequence Nr,
+ * encapsulated protocol } instead.
+ *
+ * Field names as defined in the IEC:2010 standard for HSR.
+ */
+struct hsr_tag {
+ __be16 path_and_LSDU_size;
+ __be16 sequence_nr;
+ __be16 encap_proto;
+} __packed;
+
+#define HSR_HLEN 6
+
+#if IS_ENABLED(CONFIG_HSR)
+extern bool is_hsr_master(struct net_device *dev);
+extern int hsr_get_version(struct net_device *dev, enum hsr_version *ver);
+struct net_device *hsr_get_port_ndev(struct net_device *ndev,
+ enum hsr_port_type pt);
+int hsr_get_port_type(struct net_device *hsr_dev, struct net_device *dev,
+ enum hsr_port_type *type);
+#else
+static inline bool is_hsr_master(struct net_device *dev)
+{
+ return false;
+}
+static inline int hsr_get_version(struct net_device *dev,
+ enum hsr_version *ver)
+{
+ return -EINVAL;
+}
+
+static inline struct net_device *hsr_get_port_ndev(struct net_device *ndev,
+ enum hsr_port_type pt)
+{
+ return ERR_PTR(-EINVAL);
+}
+
+static inline int hsr_get_port_type(struct net_device *hsr_dev,
+ struct net_device *dev,
+ enum hsr_port_type *type)
+{
+ return -EINVAL;
+}
+#endif /* CONFIG_HSR */
+
+#endif /*_LINUX_IF_HSR_H_*/
diff --git a/include/linux/if_link.h b/include/linux/if_link.h
index 0b17c585b5cd..622658dfbf0a 100644
--- a/include/linux/if_link.h
+++ b/include/linux/if_link.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_IF_LINK_H
#define _LINUX_IF_LINK_H
@@ -12,6 +13,8 @@ struct ifla_vf_stats {
__u64 tx_bytes;
__u64 broadcast;
__u64 multicast;
+ __u64 rx_dropped;
+ __u64 tx_dropped;
};
struct ifla_vf_info {
diff --git a/include/linux/if_ltalk.h b/include/linux/if_ltalk.h
deleted file mode 100644
index 81e434c50790..000000000000
--- a/include/linux/if_ltalk.h
+++ /dev/null
@@ -1,7 +0,0 @@
-#ifndef __LINUX_LTALK_H
-#define __LINUX_LTALK_H
-
-#include <uapi/linux/if_ltalk.h>
-
-extern struct net_device *alloc_ltalkdev(int sizeof_priv);
-#endif
diff --git a/include/linux/if_macvlan.h b/include/linux/if_macvlan.h
index c9ec1343d187..0f7281e3e448 100644
--- a/include/linux/if_macvlan.h
+++ b/include/linux/if_macvlan.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_IF_MACVLAN_H
#define _LINUX_IF_MACVLAN_H
@@ -10,13 +11,6 @@
#include <linux/u64_stats_sync.h>
struct macvlan_port;
-struct macvtap_queue;
-
-/*
- * Maximum times a macvtap device can be opened. This can be used to
- * configure the number of receive queue, e.g. for multiqueue virtio.
- */
-#define MAX_TAP_QUEUES 256
#define MACVLAN_MC_FILTER_BITS 8
#define MACVLAN_MC_FILTER_SZ (1 << MACVLAN_MC_FILTER_BITS)
@@ -27,7 +21,8 @@ struct macvlan_dev {
struct hlist_node hlist;
struct macvlan_port *port;
struct net_device *lowerdev;
- void *fwd_priv;
+ netdevice_tracker dev_tracker;
+ void *accel_priv;
struct vlan_pcpu_stats __percpu *pcpu_stats;
DECLARE_BITMAP(mc_filter, MACVLAN_MC_FILTER_SZ);
@@ -35,19 +30,11 @@ struct macvlan_dev {
netdev_features_t set_features;
enum macvlan_mode mode;
u16 flags;
- /* This array tracks active taps. */
- struct tap_queue __rcu *taps[MAX_TAP_QUEUES];
- /* This list tracks all taps (both enabled and disabled) */
- struct list_head queue_list;
- int numvtaps;
- int numqueues;
- netdev_features_t tap_features;
- int minor;
- int nest_level;
+ unsigned int macaddr_count;
+ u32 bc_queue_len_req;
#ifdef CONFIG_NET_POLL_CONTROLLER
struct netpoll *netpoll;
#endif
- unsigned int macaddr_count;
};
static inline void macvlan_count_rx(const struct macvlan_dev *vlan,
@@ -57,13 +44,14 @@ static inline void macvlan_count_rx(const struct macvlan_dev *vlan,
if (likely(success)) {
struct vlan_pcpu_stats *pcpu_stats;
- pcpu_stats = this_cpu_ptr(vlan->pcpu_stats);
+ pcpu_stats = get_cpu_ptr(vlan->pcpu_stats);
u64_stats_update_begin(&pcpu_stats->syncp);
- pcpu_stats->rx_packets++;
- pcpu_stats->rx_bytes += len;
+ u64_stats_inc(&pcpu_stats->rx_packets);
+ u64_stats_add(&pcpu_stats->rx_bytes, len);
if (multicast)
- pcpu_stats->rx_multicast++;
+ u64_stats_inc(&pcpu_stats->rx_multicast);
u64_stats_update_end(&pcpu_stats->syncp);
+ put_cpu_ptr(vlan->pcpu_stats);
} else {
this_cpu_inc(vlan->pcpu_stats->rx_errors);
}
@@ -71,12 +59,11 @@ static inline void macvlan_count_rx(const struct macvlan_dev *vlan,
extern void macvlan_common_setup(struct net_device *dev);
-extern int macvlan_common_newlink(struct net *src_net, struct net_device *dev,
- struct nlattr *tb[], struct nlattr *data[]);
+struct rtnl_newlink_params;
-extern void macvlan_count_rx(const struct macvlan_dev *vlan,
- unsigned int len, bool success,
- bool multicast);
+extern int macvlan_common_newlink(struct net_device *dev,
+ struct rtnl_newlink_params *params,
+ struct netlink_ext_ack *extack);
extern void macvlan_dellink(struct net_device *dev, struct list_head *head);
@@ -99,4 +86,27 @@ macvlan_dev_real_dev(const struct net_device *dev)
}
#endif
+static inline void *macvlan_accel_priv(struct net_device *dev)
+{
+ struct macvlan_dev *macvlan = netdev_priv(dev);
+
+ return macvlan->accel_priv;
+}
+
+static inline bool macvlan_supports_dest_filter(struct net_device *dev)
+{
+ struct macvlan_dev *macvlan = netdev_priv(dev);
+
+ return macvlan->mode == MACVLAN_MODE_PRIVATE ||
+ macvlan->mode == MACVLAN_MODE_VEPA ||
+ macvlan->mode == MACVLAN_MODE_BRIDGE;
+}
+
+static inline int macvlan_release_l2fw_offload(struct net_device *dev)
+{
+ struct macvlan_dev *macvlan = netdev_priv(dev);
+
+ macvlan->accel_priv = NULL;
+ return dev_uc_add(macvlan->lowerdev, dev->dev_addr);
+}
#endif /* _LINUX_IF_MACVLAN_H */
diff --git a/include/linux/if_phonet.h b/include/linux/if_phonet.h
index bbcdb0a767d8..2d8486168ec5 100644
--- a/include/linux/if_phonet.h
+++ b/include/linux/if_phonet.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* File: if_phonet.h
*
@@ -10,5 +11,5 @@
#include <uapi/linux/if_phonet.h>
-extern struct header_ops phonet_header_ops;
+extern const struct header_ops phonet_header_ops;
#endif
diff --git a/include/linux/if_pppol2tp.h b/include/linux/if_pppol2tp.h
index 0fb71e532b2c..c87efd333faa 100644
--- a/include/linux/if_pppol2tp.h
+++ b/include/linux/if_pppol2tp.h
@@ -1,15 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/***************************************************************************
* Linux PPP over L2TP (PPPoL2TP) Socket Implementation (RFC 2661)
*
* This file supplies definitions required by the PPP over L2TP driver
* (l2tp_ppp.c). All version information wrt this file is located in l2tp_ppp.c
- *
- * License:
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- *
*/
#ifndef __LINUX_IF_PPPOL2TP_H
#define __LINUX_IF_PPPOL2TP_H
diff --git a/include/linux/if_pppox.h b/include/linux/if_pppox.h
index ba7a9b0c7c57..db45d6f1c4f4 100644
--- a/include/linux/if_pppox.h
+++ b/include/linux/if_pppox.h
@@ -1,16 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/***************************************************************************
* Linux PPP over X - Generic PPP transport layer sockets
* Linux PPP over Ethernet (PPPoE) Socket Implementation (RFC 2516)
*
* This file supplies definitions required by the PPP over Ethernet driver
* (pppox.c). All version information wrt this file is located in pppox.c
- *
- * License:
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- *
*/
#ifndef __LINUX_IF_PPPOX_H
#define __LINUX_IF_PPPOX_H
@@ -49,7 +43,7 @@ struct pppox_sock {
/* struct sock must be the first member of pppox_sock */
struct sock sk;
struct ppp_channel chan;
- struct pppox_sock *next; /* for hash table */
+ struct pppox_sock __rcu *next; /* for hash table */
union {
struct pppoe_opt pppoe;
struct pptp_opt pptp;
@@ -84,6 +78,9 @@ extern int register_pppox_proto(int proto_num, const struct pppox_proto *pp);
extern void unregister_pppox_proto(int proto_num);
extern void pppox_unbind_sock(struct sock *sk);/* delete ppp-channel binding */
extern int pppox_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg);
+extern int pppox_compat_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg);
+
+#define PPPOEIOCSFWD32 _IOW(0xB1 ,0, compat_size_t)
/* PPPoX socket states */
enum {
diff --git a/include/linux/if_rmnet.h b/include/linux/if_rmnet.h
new file mode 100644
index 000000000000..c44bf6e80ecb
--- /dev/null
+++ b/include/linux/if_rmnet.h
@@ -0,0 +1,74 @@
+/* SPDX-License-Identifier: GPL-2.0-only
+ * Copyright (c) 2013-2019, 2021 The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _LINUX_IF_RMNET_H_
+#define _LINUX_IF_RMNET_H_
+
+#include <linux/types.h>
+
+struct rmnet_map_header {
+ u8 flags; /* MAP_CMD_FLAG, MAP_PAD_LEN_MASK */
+ u8 mux_id;
+ __be16 pkt_len; /* Length of packet, including pad */
+} __aligned(1);
+
+/* rmnet_map_header flags field:
+ * PAD_LEN: number of pad bytes following packet data
+ * CMD: 1 = packet contains a MAP command; 0 = packet contains data
+ * NEXT_HEADER: 1 = packet contains V5 CSUM header 0 = no V5 CSUM header
+ */
+#define MAP_PAD_LEN_MASK GENMASK(5, 0)
+#define MAP_NEXT_HEADER_FLAG BIT(6)
+#define MAP_CMD_FLAG BIT(7)
+
+struct rmnet_map_dl_csum_trailer {
+ u8 reserved1;
+ u8 flags; /* MAP_CSUM_DL_VALID_FLAG */
+ __be16 csum_start_offset;
+ __be16 csum_length;
+ __sum16 csum_value;
+} __aligned(1);
+
+/* rmnet_map_dl_csum_trailer flags field:
+ * VALID: 1 = checksum and length valid; 0 = ignore them
+ */
+#define MAP_CSUM_DL_VALID_FLAG BIT(0)
+
+struct rmnet_map_ul_csum_header {
+ __be16 csum_start_offset;
+ __be16 csum_info; /* MAP_CSUM_UL_* */
+} __aligned(1);
+
+/* csum_info field:
+ * OFFSET: where (offset in bytes) to insert computed checksum
+ * UDP: 1 = UDP checksum (zero checksum means no checksum)
+ * ENABLED: 1 = checksum computation requested
+ */
+#define MAP_CSUM_UL_OFFSET_MASK GENMASK(13, 0)
+#define MAP_CSUM_UL_UDP_FLAG BIT(14)
+#define MAP_CSUM_UL_ENABLED_FLAG BIT(15)
+
+/* MAP CSUM headers */
+struct rmnet_map_v5_csum_header {
+ u8 header_info;
+ u8 csum_info;
+ __be16 reserved;
+} __aligned(1);
+
+/* v5 header_info field
+ * NEXT_HEADER: represents whether there is any next header
+ * HEADER_TYPE: represents the type of this header
+ *
+ * csum_info field
+ * CSUM_VALID_OR_REQ:
+ * 1 = for UL, checksum computation is requested.
+ * 1 = for DL, validated the checksum and has found it valid
+ */
+
+#define MAPV5_HDRINFO_NXT_HDR_FLAG BIT(0)
+#define MAPV5_HDRINFO_HDR_TYPE_FMASK GENMASK(7, 1)
+#define MAPV5_CSUMINFO_VALID_FLAG BIT(7)
+
+#define RMNET_MAP_HEADER_TYPE_CSUM_OFFLOAD 2
+#endif /* !(_LINUX_IF_RMNET_H_) */
diff --git a/include/linux/if_tap.h b/include/linux/if_tap.h
index 4837157da0dc..553552fa635c 100644
--- a/include/linux/if_tap.h
+++ b/include/linux/if_tap.h
@@ -1,27 +1,33 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_IF_TAP_H_
#define _LINUX_IF_TAP_H_
+#include <net/sock.h>
+#include <linux/skb_array.h>
+
+struct file;
+struct socket;
+
#if IS_ENABLED(CONFIG_TAP)
struct socket *tap_get_socket(struct file *);
-struct skb_array *tap_get_skb_array(struct file *file);
+struct ptr_ring *tap_get_ptr_ring(struct file *file);
#else
#include <linux/err.h>
#include <linux/errno.h>
-struct file;
-struct socket;
static inline struct socket *tap_get_socket(struct file *f)
{
return ERR_PTR(-EINVAL);
}
-static inline struct skb_array *tap_get_skb_array(struct file *f)
+static inline struct ptr_ring *tap_get_ptr_ring(struct file *f)
{
return ERR_PTR(-EINVAL);
}
#endif /* CONFIG_TAP */
-#include <net/sock.h>
-#include <linux/skb_array.h>
-
+/*
+ * Maximum times a tap device can be opened. This can be used to
+ * configure the number of receive queue, e.g. for multiqueue virtio.
+ */
#define MAX_TAP_QUEUES 256
struct tap_queue;
@@ -57,7 +63,6 @@ struct tap_dev {
struct tap_queue {
struct sock sk;
struct socket sock;
- struct socket_wq wq;
int vnet_hdr_sz;
struct tap_dev __rcu *tap;
struct file *file;
@@ -65,7 +70,7 @@ struct tap_queue {
u16 queue_index;
bool enabled;
struct list_head next;
- struct skb_array skb_array;
+ struct ptr_ring ring;
};
rx_handler_result_t tap_handle_frame(struct sk_buff **pskb);
@@ -73,8 +78,8 @@ void tap_del_queues(struct tap_dev *tap);
int tap_get_minor(dev_t major, struct tap_dev *tap);
void tap_free_minor(dev_t major, struct tap_dev *tap);
int tap_queue_resize(struct tap_dev *tap);
-int tap_create_cdev(struct cdev *tap_cdev,
- dev_t *tap_major, const char *device_name);
+int tap_create_cdev(struct cdev *tap_cdev, dev_t *tap_major,
+ const char *device_name, struct module *module);
void tap_destroy_cdev(dev_t major, struct cdev *tap_cdev);
#endif /*_LINUX_IF_TAP_H_*/
diff --git a/include/linux/if_team.h b/include/linux/if_team.h
index 30294603526f..ce97d891cf72 100644
--- a/include/linux/if_team.h
+++ b/include/linux/if_team.h
@@ -1,11 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* include/linux/if_team.h - Network team device driver header
* Copyright (c) 2011 Jiri Pirko <jpirko@redhat.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
*/
#ifndef _LINUX_IF_TEAM_H_
#define _LINUX_IF_TEAM_H_
@@ -16,11 +12,11 @@
#include <uapi/linux/if_team.h>
struct team_pcpu_stats {
- u64 rx_packets;
- u64 rx_bytes;
- u64 rx_multicast;
- u64 tx_packets;
- u64 tx_bytes;
+ u64_stats_t rx_packets;
+ u64_stats_t rx_bytes;
+ u64_stats_t rx_multicast;
+ u64_stats_t tx_packets;
+ u64_stats_t tx_bytes;
struct u64_stats_sync syncp;
u32 rx_dropped;
u32 tx_dropped;
@@ -71,9 +67,14 @@ struct team_port {
u16 queue_id;
struct list_head qom_list; /* node in queue override mapping list */
struct rcu_head rcu;
- long mode_priv[0];
+ long mode_priv[];
};
+static inline struct team_port *team_port_get_rcu(const struct net_device *dev)
+{
+ return rcu_dereference(dev->rx_handler_data);
+}
+
static inline bool team_port_enabled(struct team_port *port)
{
return port->index != -1;
@@ -84,14 +85,24 @@ static inline bool team_port_txable(struct team_port *port)
return port->linkup && team_port_enabled(port);
}
+static inline bool team_port_dev_txable(const struct net_device *port_dev)
+{
+ struct team_port *port;
+ bool txable;
+
+ rcu_read_lock();
+ port = team_port_get_rcu(port_dev);
+ txable = port ? team_port_txable(port) : false;
+ rcu_read_unlock();
+
+ return txable;
+}
+
#ifdef CONFIG_NET_POLL_CONTROLLER
static inline void team_netpoll_send_skb(struct team_port *port,
struct sk_buff *skb)
{
- struct netpoll *np = port->np;
-
- if (np)
- netpoll_send_skb(np, skb);
+ netpoll_send_skb(port->np, skb);
}
#else
static inline void team_netpoll_send_skb(struct team_port *port,
@@ -151,8 +162,8 @@ struct team_option {
bool per_port;
unsigned int array_size; /* != 0 means the option is array */
enum team_option_type type;
- int (*init)(struct team *team, struct team_option_inst_info *info);
- int (*getter)(struct team *team, struct team_gsetter_ctx *ctx);
+ void (*init)(struct team *team, struct team_option_inst_info *info);
+ void (*getter)(struct team *team, struct team_gsetter_ctx *ctx);
int (*setter)(struct team *team, struct team_gsetter_ctx *ctx);
};
@@ -178,7 +189,7 @@ struct team {
struct net_device *dev; /* associated netdevice */
struct team_pcpu_stats __percpu *pcpu_stats;
- struct mutex lock; /* used for overall locking, e.g. port lists write */
+ const struct header_ops *header_ops_cache;
/*
* List of enabled ports and their count
@@ -197,6 +208,7 @@ struct team {
bool queue_override_enabled;
struct list_head *qom_lists; /* array of queue override mapping lists */
bool port_mtu_change_allowed;
+ bool notifier_ctx;
struct {
unsigned int count;
unsigned int interval; /* in ms */
@@ -247,7 +259,7 @@ static inline struct team_port *team_get_port_by_index(struct team *team,
static inline int team_num_to_port_index(struct team *team, unsigned int num)
{
- int en_port_count = ACCESS_ONCE(team->en_port_count);
+ int en_port_count = READ_ONCE(team->en_port_count);
if (unlikely(!en_port_count))
return 0;
diff --git a/include/linux/if_tun.h b/include/linux/if_tun.h
index bf9bdf42d577..80166eb62f41 100644
--- a/include/linux/if_tun.h
+++ b/include/linux/if_tun.h
@@ -1,37 +1,77 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* Universal TUN/TAP device driver.
* Copyright (C) 1999-2000 Maxim Krasnyansky <max_mk@yahoo.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#ifndef __IF_TUN_H
#define __IF_TUN_H
#include <uapi/linux/if_tun.h>
+#include <uapi/linux/virtio_net.h>
+
+#define TUN_XDP_FLAG 0x1UL
+
+#define TUN_MSG_UBUF 1
+#define TUN_MSG_PTR 2
+struct tun_msg_ctl {
+ unsigned short type;
+ unsigned short num;
+ void *ptr;
+};
#if defined(CONFIG_TUN) || defined(CONFIG_TUN_MODULE)
struct socket *tun_get_socket(struct file *);
-struct skb_array *tun_get_skb_array(struct file *file);
+struct ptr_ring *tun_get_tx_ring(struct file *file);
+
+static inline bool tun_is_xdp_frame(void *ptr)
+{
+ return (unsigned long)ptr & TUN_XDP_FLAG;
+}
+
+static inline void *tun_xdp_to_ptr(struct xdp_frame *xdp)
+{
+ return (void *)((unsigned long)xdp | TUN_XDP_FLAG);
+}
+
+static inline struct xdp_frame *tun_ptr_to_xdp(void *ptr)
+{
+ return (void *)((unsigned long)ptr & ~TUN_XDP_FLAG);
+}
+
+void tun_ptr_free(void *ptr);
#else
#include <linux/err.h>
#include <linux/errno.h>
struct file;
struct socket;
+
static inline struct socket *tun_get_socket(struct file *f)
{
return ERR_PTR(-EINVAL);
}
-static inline struct skb_array *tun_get_skb_array(struct file *f)
+
+static inline struct ptr_ring *tun_get_tx_ring(struct file *f)
{
return ERR_PTR(-EINVAL);
}
+
+static inline bool tun_is_xdp_frame(void *ptr)
+{
+ return false;
+}
+
+static inline void *tun_xdp_to_ptr(struct xdp_frame *xdp)
+{
+ return NULL;
+}
+
+static inline struct xdp_frame *tun_ptr_to_xdp(void *ptr)
+{
+ return NULL;
+}
+
+static inline void tun_ptr_free(void *ptr)
+{
+}
#endif /* CONFIG_TUN */
#endif /* __IF_TUN_H */
diff --git a/include/linux/if_tunnel.h b/include/linux/if_tunnel.h
index 712710bc0580..26606523eca4 100644
--- a/include/linux/if_tunnel.h
+++ b/include/linux/if_tunnel.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _IF_TUNNEL_H_
#define _IF_TUNNEL_H_
diff --git a/include/linux/if_vlan.h b/include/linux/if_vlan.h
index 5e6a2d4dc366..f7f34eb15e06 100644
--- a/include/linux/if_vlan.h
+++ b/include/linux/if_vlan.h
@@ -1,13 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* VLAN An implementation of 802.1Q VLAN tagging.
*
* Authors: Ben Greear <greearb@candelatech.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- *
*/
#ifndef _LINUX_IF_VLAN_H_
#define _LINUX_IF_VLAN_H_
@@ -30,6 +25,8 @@
#define VLAN_ETH_DATA_LEN 1500 /* Max. octets in payload */
#define VLAN_ETH_FRAME_LEN 1518 /* Max. octets in frame sans FCS */
+#define VLAN_MAX_DEPTH 8 /* Max. number of nested VLAN tags parsed */
+
/*
* struct vlan_hdr - vlan header
* @h_vlan_TCI: priority and VLAN ID
@@ -49,8 +46,10 @@ struct vlan_hdr {
* @h_vlan_encapsulated_proto: packet type ID or len
*/
struct vlan_ethhdr {
- unsigned char h_dest[ETH_ALEN];
- unsigned char h_source[ETH_ALEN];
+ struct_group(addrs,
+ unsigned char h_dest[ETH_ALEN];
+ unsigned char h_source[ETH_ALEN];
+ );
__be16 h_vlan_proto;
__be16 h_vlan_TCI;
__be16 h_vlan_encapsulated_proto;
@@ -63,25 +62,52 @@ static inline struct vlan_ethhdr *vlan_eth_hdr(const struct sk_buff *skb)
return (struct vlan_ethhdr *)skb_mac_header(skb);
}
+/* Prefer this version in TX path, instead of
+ * skb_reset_mac_header() + vlan_eth_hdr()
+ */
+static inline struct vlan_ethhdr *skb_vlan_eth_hdr(const struct sk_buff *skb)
+{
+ return (struct vlan_ethhdr *)skb->data;
+}
+
#define VLAN_PRIO_MASK 0xe000 /* Priority Code Point */
#define VLAN_PRIO_SHIFT 13
-#define VLAN_CFI_MASK 0x1000 /* Canonical Format Indicator */
-#define VLAN_TAG_PRESENT VLAN_CFI_MASK
+#define VLAN_CFI_MASK 0x1000 /* Canonical Format Indicator / Drop Eligible Indicator */
#define VLAN_VID_MASK 0x0fff /* VLAN Identifier */
#define VLAN_N_VID 4096
/* found in socket.c */
extern void vlan_ioctl_set(int (*hook)(struct net *, void __user *));
-static inline bool is_vlan_dev(const struct net_device *dev)
+#define skb_vlan_tag_present(__skb) (!!(__skb)->vlan_all)
+#define skb_vlan_tag_get(__skb) ((__skb)->vlan_tci)
+#define skb_vlan_tag_get_id(__skb) ((__skb)->vlan_tci & VLAN_VID_MASK)
+#define skb_vlan_tag_get_cfi(__skb) (!!((__skb)->vlan_tci & VLAN_CFI_MASK))
+#define skb_vlan_tag_get_prio(__skb) (((__skb)->vlan_tci & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT)
+
+static inline int vlan_get_rx_ctag_filter_info(struct net_device *dev)
{
- return dev->priv_flags & IFF_802_1Q_VLAN;
+ ASSERT_RTNL();
+ return notifier_to_errno(call_netdevice_notifiers(NETDEV_CVLAN_FILTER_PUSH_INFO, dev));
}
-#define skb_vlan_tag_present(__skb) ((__skb)->vlan_tci & VLAN_TAG_PRESENT)
-#define skb_vlan_tag_get(__skb) ((__skb)->vlan_tci & ~VLAN_TAG_PRESENT)
-#define skb_vlan_tag_get_id(__skb) ((__skb)->vlan_tci & VLAN_VID_MASK)
-#define skb_vlan_tag_get_prio(__skb) ((__skb)->vlan_tci & VLAN_PRIO_MASK)
+static inline void vlan_drop_rx_ctag_filter_info(struct net_device *dev)
+{
+ ASSERT_RTNL();
+ call_netdevice_notifiers(NETDEV_CVLAN_FILTER_DROP_INFO, dev);
+}
+
+static inline int vlan_get_rx_stag_filter_info(struct net_device *dev)
+{
+ ASSERT_RTNL();
+ return notifier_to_errno(call_netdevice_notifiers(NETDEV_SVLAN_FILTER_PUSH_INFO, dev));
+}
+
+static inline void vlan_drop_rx_stag_filter_info(struct net_device *dev)
+{
+ ASSERT_RTNL();
+ call_netdevice_notifiers(NETDEV_SVLAN_FILTER_DROP_INFO, dev);
+}
/**
* struct vlan_pcpu_stats - VLAN percpu rx/tx stats
@@ -95,20 +121,23 @@ static inline bool is_vlan_dev(const struct net_device *dev)
* @tx_dropped: number of tx drops
*/
struct vlan_pcpu_stats {
- u64 rx_packets;
- u64 rx_bytes;
- u64 rx_multicast;
- u64 tx_packets;
- u64 tx_bytes;
+ u64_stats_t rx_packets;
+ u64_stats_t rx_bytes;
+ u64_stats_t rx_multicast;
+ u64_stats_t tx_packets;
+ u64_stats_t tx_bytes;
struct u64_stats_sync syncp;
u32 rx_errors;
u32 tx_dropped;
};
-#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
+#if IS_ENABLED(CONFIG_VLAN_8021Q)
extern struct net_device *__vlan_find_dev_deep_rcu(struct net_device *real_dev,
__be16 vlan_proto, u16 vlan_id);
+extern int vlan_for_each(struct net_device *dev,
+ int (*action)(struct net_device *dev, int vid,
+ void *arg), void *arg);
extern struct net_device *vlan_dev_real_dev(const struct net_device *dev);
extern u16 vlan_dev_vlan_id(const struct net_device *dev);
extern __be16 vlan_dev_vlan_proto(const struct net_device *dev);
@@ -138,9 +167,11 @@ struct netpoll;
* @vlan_id: VLAN identifier
* @flags: device flags
* @real_dev: underlying netdevice
+ * @dev_tracker: refcount tracker for @real_dev reference
* @real_dev_addr: address of underlying netdevice
* @dent: proc dir entry
* @vlan_pcpu_stats: ptr to percpu rx stats
+ * @netpoll: netpoll instance "propagated" down to @real_dev
*/
struct vlan_dev_priv {
unsigned int nr_ingress_mappings;
@@ -153,6 +184,8 @@ struct vlan_dev_priv {
u16 flags;
struct net_device *real_dev;
+ netdevice_tracker dev_tracker;
+
unsigned char real_dev_addr[ETH_ALEN];
struct proc_dir_entry *dent;
@@ -160,9 +193,13 @@ struct vlan_dev_priv {
#ifdef CONFIG_NET_POLL_CONTROLLER
struct netpoll *netpoll;
#endif
- unsigned int nest_level;
};
+static inline bool is_vlan_dev(const struct net_device *dev)
+{
+ return dev->priv_flags & IFF_802_1Q_VLAN;
+}
+
static inline struct vlan_dev_priv *vlan_dev_priv(const struct net_device *dev)
{
return netdev_priv(dev);
@@ -199,12 +236,12 @@ extern void vlan_vids_del_by_dev(struct net_device *dev,
extern bool vlan_uses_dev(const struct net_device *dev);
-static inline int vlan_get_encap_level(struct net_device *dev)
+#else
+static inline bool is_vlan_dev(const struct net_device *dev)
{
- BUG_ON(!is_vlan_dev(dev));
- return vlan_dev_priv(dev)->nest_level;
+ return false;
}
-#else
+
static inline struct net_device *
__vlan_find_dev_deep_rcu(struct net_device *real_dev,
__be16 vlan_proto, u16 vlan_id)
@@ -212,21 +249,29 @@ __vlan_find_dev_deep_rcu(struct net_device *real_dev,
return NULL;
}
+static inline int
+vlan_for_each(struct net_device *dev,
+ int (*action)(struct net_device *dev, int vid, void *arg),
+ void *arg)
+{
+ return 0;
+}
+
static inline struct net_device *vlan_dev_real_dev(const struct net_device *dev)
{
- BUG();
+ WARN_ON_ONCE(1);
return NULL;
}
static inline u16 vlan_dev_vlan_id(const struct net_device *dev)
{
- BUG();
+ WARN_ON_ONCE(1);
return 0;
}
static inline __be16 vlan_dev_vlan_proto(const struct net_device *dev)
{
- BUG();
+ WARN_ON_ONCE(1);
return 0;
}
@@ -265,18 +310,13 @@ static inline bool vlan_uses_dev(const struct net_device *dev)
{
return false;
}
-static inline int vlan_get_encap_level(struct net_device *dev)
-{
- BUG();
- return 0;
-}
#endif
/**
* eth_type_vlan - check for valid vlan ether type.
* @ethertype: ether type to check
*
- * Returns true if the ether type is a vlan ether type.
+ * Returns: true if the ether type is a vlan ether type.
*/
static inline bool eth_type_vlan(__be16 ethertype)
{
@@ -300,32 +340,49 @@ static inline bool vlan_hw_offload_capable(netdev_features_t features,
}
/**
- * __vlan_insert_tag - regular VLAN tag inserting
+ * __vlan_insert_inner_tag - inner VLAN tag inserting
* @skb: skbuff to tag
* @vlan_proto: VLAN encapsulation protocol
* @vlan_tci: VLAN TCI to insert
+ * @mac_len: MAC header length including outer vlan headers
*
- * Inserts the VLAN tag into @skb as part of the payload
- * Returns error if skb_cow_head failes.
- *
+ * Inserts the VLAN tag into @skb as part of the payload at offset mac_len
* Does not change skb->protocol so this function can be used during receive.
+ *
+ * Returns: error if skb_cow_head fails.
*/
-static inline int __vlan_insert_tag(struct sk_buff *skb,
- __be16 vlan_proto, u16 vlan_tci)
+static inline int __vlan_insert_inner_tag(struct sk_buff *skb,
+ __be16 vlan_proto, u16 vlan_tci,
+ unsigned int mac_len)
{
+ const u8 meta_len = mac_len > ETH_TLEN ? skb_metadata_len(skb) : 0;
struct vlan_ethhdr *veth;
- if (skb_cow_head(skb, VLAN_HLEN) < 0)
+ if (skb_cow_head(skb, meta_len + VLAN_HLEN) < 0)
return -ENOMEM;
- veth = skb_push(skb, VLAN_HLEN);
+ skb_push(skb, VLAN_HLEN);
+
+ /* Move the mac header sans proto to the beginning of the new header. */
+ if (likely(mac_len > ETH_TLEN))
+ skb_postpush_data_move(skb, VLAN_HLEN, mac_len - ETH_TLEN);
+ if (skb_mac_header_was_set(skb))
+ skb->mac_header -= VLAN_HLEN;
- /* Move the mac addresses to the beginning of the new header. */
- memmove(skb->data, skb->data + VLAN_HLEN, 2 * ETH_ALEN);
- skb->mac_header -= VLAN_HLEN;
+ veth = (struct vlan_ethhdr *)(skb->data + mac_len - ETH_HLEN);
/* first, the ethernet type */
- veth->h_vlan_proto = vlan_proto;
+ if (likely(mac_len >= ETH_TLEN)) {
+ /* h_vlan_encapsulated_proto should already be populated, and
+ * skb->data has space for h_vlan_proto
+ */
+ veth->h_vlan_proto = vlan_proto;
+ } else {
+ /* h_vlan_encapsulated_proto should not be populated, and
+ * skb->data has no space for h_vlan_proto
+ */
+ veth->h_vlan_encapsulated_proto = skb->protocol;
+ }
/* now, the TCI */
veth->h_vlan_TCI = htons(vlan_tci);
@@ -334,25 +391,47 @@ static inline int __vlan_insert_tag(struct sk_buff *skb,
}
/**
- * vlan_insert_tag - regular VLAN tag inserting
+ * __vlan_insert_tag - regular VLAN tag inserting
* @skb: skbuff to tag
* @vlan_proto: VLAN encapsulation protocol
* @vlan_tci: VLAN TCI to insert
*
* Inserts the VLAN tag into @skb as part of the payload
- * Returns a VLAN tagged skb. If a new skb is created, @skb is freed.
+ * Does not change skb->protocol so this function can be used during receive.
+ *
+ * Returns: error if skb_cow_head fails.
+ */
+static inline int __vlan_insert_tag(struct sk_buff *skb,
+ __be16 vlan_proto, u16 vlan_tci)
+{
+ return __vlan_insert_inner_tag(skb, vlan_proto, vlan_tci, ETH_HLEN);
+}
+
+/**
+ * vlan_insert_inner_tag - inner VLAN tag inserting
+ * @skb: skbuff to tag
+ * @vlan_proto: VLAN encapsulation protocol
+ * @vlan_tci: VLAN TCI to insert
+ * @mac_len: MAC header length including outer vlan headers
+ *
+ * Inserts the VLAN tag into @skb as part of the payload at offset mac_len
+ * Returns a VLAN tagged skb. This might change skb->head.
*
* Following the skb_unshare() example, in case of error, the calling function
* doesn't have to worry about freeing the original skb.
*
* Does not change skb->protocol so this function can be used during receive.
+ *
+ * Return: modified @skb on success, NULL on error (@skb is freed).
*/
-static inline struct sk_buff *vlan_insert_tag(struct sk_buff *skb,
- __be16 vlan_proto, u16 vlan_tci)
+static inline struct sk_buff *vlan_insert_inner_tag(struct sk_buff *skb,
+ __be16 vlan_proto,
+ u16 vlan_tci,
+ unsigned int mac_len)
{
int err;
- err = __vlan_insert_tag(skb, vlan_proto, vlan_tci);
+ err = __vlan_insert_inner_tag(skb, vlan_proto, vlan_tci, mac_len);
if (err) {
dev_kfree_skb_any(skb);
return NULL;
@@ -361,16 +440,40 @@ static inline struct sk_buff *vlan_insert_tag(struct sk_buff *skb,
}
/**
+ * vlan_insert_tag - regular VLAN tag inserting
+ * @skb: skbuff to tag
+ * @vlan_proto: VLAN encapsulation protocol
+ * @vlan_tci: VLAN TCI to insert
+ *
+ * Inserts the VLAN tag into @skb as part of the payload
+ * Returns a VLAN tagged skb. This might change skb->head.
+ *
+ * Following the skb_unshare() example, in case of error, the calling function
+ * doesn't have to worry about freeing the original skb.
+ *
+ * Does not change skb->protocol so this function can be used during receive.
+ *
+ * Return: modified @skb on success, NULL on error (@skb is freed).
+ */
+static inline struct sk_buff *vlan_insert_tag(struct sk_buff *skb,
+ __be16 vlan_proto, u16 vlan_tci)
+{
+ return vlan_insert_inner_tag(skb, vlan_proto, vlan_tci, ETH_HLEN);
+}
+
+/**
* vlan_insert_tag_set_proto - regular VLAN tag inserting
* @skb: skbuff to tag
* @vlan_proto: VLAN encapsulation protocol
* @vlan_tci: VLAN TCI to insert
*
* Inserts the VLAN tag into @skb as part of the payload
- * Returns a VLAN tagged skb. If a new skb is created, @skb is freed.
+ * Returns a VLAN tagged skb. This might change skb->head.
*
* Following the skb_unshare() example, in case of error, the calling function
* doesn't have to worry about freeing the original skb.
+ *
+ * Return: modified @skb on success, NULL on error (@skb is freed).
*/
static inline struct sk_buff *vlan_insert_tag_set_proto(struct sk_buff *skb,
__be16 vlan_proto,
@@ -382,6 +485,29 @@ static inline struct sk_buff *vlan_insert_tag_set_proto(struct sk_buff *skb,
return skb;
}
+/**
+ * __vlan_hwaccel_clear_tag - clear hardware accelerated VLAN info
+ * @skb: skbuff to clear
+ *
+ * Clears the VLAN information from @skb
+ */
+static inline void __vlan_hwaccel_clear_tag(struct sk_buff *skb)
+{
+ skb->vlan_all = 0;
+}
+
+/**
+ * __vlan_hwaccel_copy_tag - copy hardware accelerated VLAN info from another skb
+ * @dst: skbuff to copy to
+ * @src: skbuff to copy from
+ *
+ * Copies VLAN information from @src to @dst (for branchless code)
+ */
+static inline void __vlan_hwaccel_copy_tag(struct sk_buff *dst, const struct sk_buff *src)
+{
+ dst->vlan_all = src->vlan_all;
+}
+
/*
* __vlan_hwaccel_push_inside - pushes vlan tag to the payload
* @skb: skbuff to tag
@@ -396,7 +522,7 @@ static inline struct sk_buff *__vlan_hwaccel_push_inside(struct sk_buff *skb)
skb = vlan_insert_tag_set_proto(skb, skb->vlan_proto,
skb_vlan_tag_get(skb));
if (likely(skb))
- skb->vlan_tci = 0;
+ __vlan_hwaccel_clear_tag(skb);
return skb;
}
@@ -412,7 +538,7 @@ static inline void __vlan_hwaccel_put_tag(struct sk_buff *skb,
__be16 vlan_proto, u16 vlan_tci)
{
skb->vlan_proto = vlan_proto;
- skb->vlan_tci = VLAN_TAG_PRESENT | vlan_tci;
+ skb->vlan_tci = vlan_tci;
}
/**
@@ -420,14 +546,14 @@ static inline void __vlan_hwaccel_put_tag(struct sk_buff *skb,
* @skb: skbuff to query
* @vlan_tci: buffer to store value
*
- * Returns error if the skb is not of VLAN type
+ * Returns: error if the skb is not of VLAN type
*/
static inline int __vlan_get_tag(const struct sk_buff *skb, u16 *vlan_tci)
{
- struct vlan_ethhdr *veth = (struct vlan_ethhdr *)skb->data;
+ struct vlan_ethhdr *veth = skb_vlan_eth_hdr(skb);
if (!eth_type_vlan(veth->h_vlan_proto))
- return -EINVAL;
+ return -ENODATA;
*vlan_tci = ntohs(veth->h_vlan_TCI);
return 0;
@@ -438,7 +564,7 @@ static inline int __vlan_get_tag(const struct sk_buff *skb, u16 *vlan_tci)
* @skb: skbuff to query
* @vlan_tci: buffer to store value
*
- * Returns error if @skb->vlan_tci is not set correctly
+ * Returns: error if @skb->vlan_tci is not set correctly
*/
static inline int __vlan_hwaccel_get_tag(const struct sk_buff *skb,
u16 *vlan_tci)
@@ -448,18 +574,16 @@ static inline int __vlan_hwaccel_get_tag(const struct sk_buff *skb,
return 0;
} else {
*vlan_tci = 0;
- return -EINVAL;
+ return -ENODATA;
}
}
-#define HAVE_VLAN_GET_TAG
-
/**
* vlan_get_tag - get the VLAN ID from the skb
* @skb: skbuff to query
* @vlan_tci: buffer to store value
*
- * Returns error if the skb is not VLAN tagged
+ * Returns: error if the skb is not VLAN tagged
*/
static inline int vlan_get_tag(const struct sk_buff *skb, u16 *vlan_tci)
{
@@ -471,18 +595,21 @@ static inline int vlan_get_tag(const struct sk_buff *skb, u16 *vlan_tci)
}
/**
- * vlan_get_protocol - get protocol EtherType.
+ * __vlan_get_protocol_offset() - get protocol EtherType.
* @skb: skbuff to query
* @type: first vlan protocol
+ * @mac_offset: MAC offset
* @depth: buffer to store length of eth and vlan tags in bytes
*
- * Returns the EtherType of the packet, regardless of whether it is
+ * Returns: the EtherType of the packet, regardless of whether it is
* vlan encapsulated (normal or hardware accelerated) or not.
*/
-static inline __be16 __vlan_get_protocol(struct sk_buff *skb, __be16 type,
- int *depth)
+static inline __be16 __vlan_get_protocol_offset(const struct sk_buff *skb,
+ __be16 type,
+ int mac_offset,
+ int *depth)
{
- unsigned int vlan_depth = skb->mac_len;
+ unsigned int vlan_depth = skb->mac_len, parse_depth = VLAN_MAX_DEPTH;
/* if type is 802.1Q/AD then the header should already be
* present at mac_len - VLAN_HLEN (if mac_len > 0), or at
@@ -497,13 +624,13 @@ static inline __be16 __vlan_get_protocol(struct sk_buff *skb, __be16 type,
vlan_depth = ETH_HLEN;
}
do {
- struct vlan_hdr *vh;
+ struct vlan_hdr vhdr, *vh;
- if (unlikely(!pskb_may_pull(skb,
- vlan_depth + VLAN_HLEN)))
+ vh = skb_header_pointer(skb, mac_offset + vlan_depth,
+ sizeof(vhdr), &vhdr);
+ if (unlikely(!vh || !--parse_depth))
return 0;
- vh = (struct vlan_hdr *)(skb->data + vlan_depth);
type = vh->h_vlan_encapsulated_proto;
vlan_depth += VLAN_HLEN;
} while (eth_type_vlan(type));
@@ -515,18 +642,55 @@ static inline __be16 __vlan_get_protocol(struct sk_buff *skb, __be16 type,
return type;
}
+static inline __be16 __vlan_get_protocol(const struct sk_buff *skb, __be16 type,
+ int *depth)
+{
+ return __vlan_get_protocol_offset(skb, type, 0, depth);
+}
+
/**
* vlan_get_protocol - get protocol EtherType.
* @skb: skbuff to query
*
- * Returns the EtherType of the packet, regardless of whether it is
+ * Returns: the EtherType of the packet, regardless of whether it is
* vlan encapsulated (normal or hardware accelerated) or not.
*/
-static inline __be16 vlan_get_protocol(struct sk_buff *skb)
+static inline __be16 vlan_get_protocol(const struct sk_buff *skb)
{
return __vlan_get_protocol(skb, skb->protocol, NULL);
}
+/* This version of __vlan_get_protocol() also pulls mac header in skb->head */
+static inline __be16 vlan_get_protocol_and_depth(struct sk_buff *skb,
+ __be16 type, int *depth)
+{
+ int maclen;
+
+ type = __vlan_get_protocol(skb, type, &maclen);
+
+ if (type) {
+ if (!pskb_may_pull(skb, maclen))
+ type = 0;
+ else if (depth)
+ *depth = maclen;
+ }
+ return type;
+}
+
+/* A getter for the SKB protocol field which will handle VLAN tags consistently
+ * whether VLAN acceleration is enabled or not.
+ */
+static inline __be16 skb_protocol(const struct sk_buff *skb, bool skip_vlan)
+{
+ if (!skip_vlan)
+ /* VLAN acceleration strips the VLAN header from the skb and
+ * moves it to skb->vlan_proto
+ */
+ return skb_vlan_tag_present(skb) ? skb->vlan_proto : skb->protocol;
+
+ return vlan_get_protocol(skb);
+}
+
static inline void vlan_set_encap_proto(struct sk_buff *skb,
struct vlan_hdr *vhdr)
{
@@ -562,10 +726,29 @@ static inline void vlan_set_encap_proto(struct sk_buff *skb,
}
/**
+ * vlan_remove_tag - remove outer VLAN tag from payload
+ * @skb: skbuff to remove tag from
+ * @vlan_tci: buffer to store value
+ *
+ * Expects the skb to contain a VLAN tag in the payload, and to have skb->data
+ * pointing at the MAC header.
+ */
+static inline void vlan_remove_tag(struct sk_buff *skb, u16 *vlan_tci)
+{
+ struct vlan_hdr *vhdr = (struct vlan_hdr *)(skb->data + ETH_HLEN);
+
+ *vlan_tci = ntohs(vhdr->h_vlan_TCI);
+
+ vlan_set_encap_proto(skb, vhdr);
+ __skb_pull(skb, VLAN_HLEN);
+ skb_postpull_data_move(skb, VLAN_HLEN, 2 * ETH_ALEN);
+}
+
+/**
* skb_vlan_tagged - check if skb is vlan tagged.
* @skb: skbuff to query
*
- * Returns true if the skb is tagged, regardless of whether it is hardware
+ * Returns: true if the skb is tagged, regardless of whether it is hardware
* accelerated or not.
*/
static inline bool skb_vlan_tagged(const struct sk_buff *skb)
@@ -581,10 +764,10 @@ static inline bool skb_vlan_tagged(const struct sk_buff *skb)
* skb_vlan_tagged_multi - check if skb is vlan tagged with multiple headers.
* @skb: skbuff to query
*
- * Returns true if the skb is tagged with multiple vlan headers, regardless
+ * Returns: true if the skb is tagged with multiple vlan headers, regardless
* of whether it is hardware accelerated or not.
*/
-static inline bool skb_vlan_tagged_multi(const struct sk_buff *skb)
+static inline bool skb_vlan_tagged_multi(struct sk_buff *skb)
{
__be16 protocol = skb->protocol;
@@ -594,7 +777,10 @@ static inline bool skb_vlan_tagged_multi(const struct sk_buff *skb)
if (likely(!eth_type_vlan(protocol)))
return false;
- veh = (struct vlan_ethhdr *)skb->data;
+ if (unlikely(!pskb_may_pull(skb, VLAN_ETH_HLEN)))
+ return false;
+
+ veh = skb_vlan_eth_hdr(skb);
protocol = veh->h_vlan_encapsulated_proto;
}
@@ -609,9 +795,9 @@ static inline bool skb_vlan_tagged_multi(const struct sk_buff *skb)
* @skb: skbuff to query
* @features: features to be checked
*
- * Returns features without unsafe ones if the skb has multiple tags.
+ * Returns: features without unsafe ones if the skb has multiple tags.
*/
-static inline netdev_features_t vlan_features_check(const struct sk_buff *skb,
+static inline netdev_features_t vlan_features_check(struct sk_buff *skb,
netdev_features_t features)
{
if (skb_vlan_tagged_multi(skb)) {
@@ -633,9 +819,11 @@ static inline netdev_features_t vlan_features_check(const struct sk_buff *skb,
* @h1: Pointer to vlan header
* @h2: Pointer to vlan header
*
- * Compare two vlan headers, returns 0 if equal.
+ * Compare two vlan headers.
*
* Please note that alignment of h1 & h2 are only guaranteed to be 16 bits.
+ *
+ * Return: 0 if equal, arbitrary non-zero value if not equal.
*/
static inline unsigned long compare_vlan_header(const struct vlan_hdr *h1,
const struct vlan_hdr *h2)
diff --git a/include/linux/igmp.h b/include/linux/igmp.h
index 97caf1821de8..073b30a9b850 100644
--- a/include/linux/igmp.h
+++ b/include/linux/igmp.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* Linux NET3: Internet Group Management Protocol [IGMP]
*
@@ -5,12 +6,6 @@
* Alan Cox <alan@lxorguk.ukuu.org.uk>
*
* Extended to talk the BSD extended IGMP protocol of mrouted 3.6
- *
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
*/
#ifndef _LINUX_IGMP_H
#define _LINUX_IGMP_H
@@ -18,7 +13,9 @@
#include <linux/skbuff.h>
#include <linux/timer.h>
#include <linux/in.h>
+#include <linux/ip.h>
#include <linux/refcount.h>
+#include <linux/sockptr.h>
#include <uapi/linux/igmp.h>
static inline struct igmphdr *igmp_hdr(const struct sk_buff *skb)
@@ -42,12 +39,9 @@ struct ip_sf_socklist {
unsigned int sl_max;
unsigned int sl_count;
struct rcu_head rcu;
- __be32 sl_addr[0];
+ __be32 sl_addr[] __counted_by(sl_max);
};
-#define IP_SFLSIZE(count) (sizeof(struct ip_sf_socklist) + \
- (count) * sizeof(__be32))
-
#define IP_SFBLOCK 10 /* allocate this many at once */
/* ip_mc_socklist is real list now. Speed is not argument;
@@ -64,8 +58,8 @@ struct ip_mc_socklist {
struct ip_sf_list {
struct ip_sf_list *sf_next;
- __be32 sf_inaddr;
unsigned long sf_count[2]; /* include/exclude counts */
+ __be32 sf_inaddr;
unsigned char sf_gsresp; /* include in g & s response? */
unsigned char sf_oldin; /* change state */
unsigned char sf_crcount; /* retrans. left to send */
@@ -93,6 +87,8 @@ struct ip_mc_list {
char loaded;
unsigned char gsquery; /* check source marks? */
unsigned char crcount;
+ unsigned long mca_cstamp;
+ unsigned long mca_tstamp;
struct rcu_head rcu;
};
@@ -106,27 +102,44 @@ struct ip_mc_list {
#define IGMPV3_QQIC(value) IGMPV3_EXP(0x80, 4, 3, value)
#define IGMPV3_MRC(value) IGMPV3_EXP(0x80, 4, 3, value)
+static inline int ip_mc_may_pull(struct sk_buff *skb, unsigned int len)
+{
+ if (skb_transport_offset(skb) + ip_transport_len(skb) < len)
+ return 0;
+
+ return pskb_may_pull(skb, len);
+}
+
extern int ip_check_mc_rcu(struct in_device *dev, __be32 mc_addr, __be32 src_addr, u8 proto);
extern int igmp_rcv(struct sk_buff *);
extern int ip_mc_join_group(struct sock *sk, struct ip_mreqn *imr);
+extern int ip_mc_join_group_ssm(struct sock *sk, struct ip_mreqn *imr,
+ unsigned int mode);
extern int ip_mc_leave_group(struct sock *sk, struct ip_mreqn *imr);
extern void ip_mc_drop_socket(struct sock *sk);
extern int ip_mc_source(int add, int omode, struct sock *sk,
struct ip_mreq_source *mreqs, int ifindex);
extern int ip_mc_msfilter(struct sock *sk, struct ip_msfilter *msf,int ifindex);
extern int ip_mc_msfget(struct sock *sk, struct ip_msfilter *msf,
- struct ip_msfilter __user *optval, int __user *optlen);
+ sockptr_t optval, sockptr_t optlen);
extern int ip_mc_gsfget(struct sock *sk, struct group_filter *gsf,
- struct group_filter __user *optval, int __user *optlen);
-extern int ip_mc_sf_allow(struct sock *sk, __be32 local, __be32 rmt, int dif);
+ sockptr_t optval, size_t offset);
+extern int ip_mc_sf_allow(const struct sock *sk, __be32 local, __be32 rmt,
+ int dif, int sdif);
extern void ip_mc_init_dev(struct in_device *);
extern void ip_mc_destroy_dev(struct in_device *);
extern void ip_mc_up(struct in_device *);
extern void ip_mc_down(struct in_device *);
extern void ip_mc_unmap(struct in_device *);
extern void ip_mc_remap(struct in_device *);
-extern void ip_mc_dec_group(struct in_device *in_dev, __be32 addr);
+extern void __ip_mc_dec_group(struct in_device *in_dev, __be32 addr, gfp_t gfp);
+static inline void ip_mc_dec_group(struct in_device *in_dev, __be32 addr)
+{
+ return __ip_mc_dec_group(in_dev, addr, GFP_KERNEL);
+}
+extern void __ip_mc_inc_group(struct in_device *in_dev, __be32 addr,
+ gfp_t gfp);
extern void ip_mc_inc_group(struct in_device *in_dev, __be32 addr);
-int ip_mc_check_igmp(struct sk_buff *skb, struct sk_buff **skb_trimmed);
+int ip_mc_check_igmp(struct sk_buff *skb);
#endif
diff --git a/include/linux/ihex.h b/include/linux/ihex.h
index 31d8629e75a1..b824877e6d1b 100644
--- a/include/linux/ihex.h
+++ b/include/linux/ihex.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Compact binary representation of ihex records. Some devices need their
* firmware loaded in strange orders rather than a single big blob, but
@@ -17,15 +18,27 @@
struct ihex_binrec {
__be32 addr;
__be16 len;
- uint8_t data[0];
+ uint8_t data[];
} __attribute__((packed));
+static inline uint16_t ihex_binrec_size(const struct ihex_binrec *p)
+{
+ return be16_to_cpu(p->len) + sizeof(*p);
+}
+
/* Find the next record, taking into account the 4-byte alignment */
static inline const struct ihex_binrec *
+__ihex_next_binrec(const struct ihex_binrec *rec)
+{
+ const void *p = rec;
+
+ return p + ALIGN(ihex_binrec_size(rec), 4);
+}
+
+static inline const struct ihex_binrec *
ihex_next_binrec(const struct ihex_binrec *rec)
{
- int next = ((be16_to_cpu(rec->len) + 5) & ~3) - 2;
- rec = (void *)&rec->data[next];
+ rec = __ihex_next_binrec(rec);
return be16_to_cpu(rec->len) ? rec : NULL;
}
@@ -33,18 +46,15 @@ ihex_next_binrec(const struct ihex_binrec *rec)
/* Check that ihex_next_binrec() won't take us off the end of the image... */
static inline int ihex_validate_fw(const struct firmware *fw)
{
- const struct ihex_binrec *rec;
- size_t ofs = 0;
+ const struct ihex_binrec *end, *rec;
- while (ofs <= fw->size - sizeof(*rec)) {
- rec = (void *)&fw->data[ofs];
+ rec = (const void *)fw->data;
+ end = (const void *)&fw->data[fw->size - sizeof(*end)];
+ for (; rec <= end; rec = __ihex_next_binrec(rec)) {
/* Zero length marks end of records */
- if (!be16_to_cpu(rec->len))
+ if (rec == end && !be16_to_cpu(rec->len))
return 0;
-
- /* Point to next record... */
- ofs += (sizeof(*rec) + be16_to_cpu(rec->len) + 3) & ~3;
}
return -EINVAL;
}
diff --git a/include/linux/iio/accel/kxcjk_1013.h b/include/linux/iio/accel/kxcjk_1013.h
index fd1d540ea62d..ea0ecb774371 100644
--- a/include/linux/iio/accel/kxcjk_1013.h
+++ b/include/linux/iio/accel/kxcjk_1013.h
@@ -1,22 +1,17 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* KXCJK-1013 3-axis accelerometer Interface
* Copyright (c) 2014, Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
*/
#ifndef __IIO_KXCJK_1013_H__
#define __IIO_KXCJK_1013_H__
+#include <linux/iio/iio.h>
+
struct kxcjk_1013_platform_data {
bool active_high_intr;
+ struct iio_mount_matrix orientation;
};
#endif
diff --git a/include/linux/iio/adc-helpers.h b/include/linux/iio/adc-helpers.h
new file mode 100644
index 000000000000..56b092a2a4c4
--- /dev/null
+++ b/include/linux/iio/adc-helpers.h
@@ -0,0 +1,27 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+/*
+ * The industrial I/O ADC firmware property parsing helpers
+ *
+ * Copyright (c) 2025 Matti Vaittinen <mazziesaccount@gmail.com>
+ */
+
+#ifndef _INDUSTRIAL_IO_ADC_HELPERS_H_
+#define _INDUSTRIAL_IO_ADC_HELPERS_H_
+
+#include <linux/property.h>
+
+struct device;
+struct iio_chan_spec;
+
+static inline int iio_adc_device_num_channels(struct device *dev)
+{
+ return device_get_named_child_node_count(dev, "channel");
+}
+
+int devm_iio_adc_device_alloc_chaninfo_se(struct device *dev,
+ const struct iio_chan_spec *template,
+ int max_chan_id,
+ struct iio_chan_spec **cs);
+
+#endif /* _INDUSTRIAL_IO_ADC_HELPERS_H_ */
diff --git a/include/linux/iio/adc/ad_sigma_delta.h b/include/linux/iio/adc/ad_sigma_delta.h
index 5ba430cc9a87..6e70a412e218 100644
--- a/include/linux/iio/adc/ad_sigma_delta.h
+++ b/include/linux/iio/adc/ad_sigma_delta.h
@@ -1,14 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Support code for Analog Devices Sigma-Delta ADCs
*
* Copyright 2012 Analog Devices Inc.
* Author: Lars-Peter Clausen <lars@metafoo.de>
- *
- * Licensed under the GPL-2.
*/
#ifndef __AD_SIGMA_DELTA_H__
#define __AD_SIGMA_DELTA_H__
+#include <linux/iio/iio.h>
+
enum ad_sigma_delta_mode {
AD_SD_MODE_CONTINUOUS = 0,
AD_SD_MODE_SINGLE = 1,
@@ -27,26 +28,57 @@ struct ad_sd_calib_data {
};
struct ad_sigma_delta;
+struct device;
+struct gpio_desc;
struct iio_dev;
+struct spi_offload;
+struct spi_offload_trigger;
/**
* struct ad_sigma_delta_info - Sigma Delta driver specific callbacks and options
* @set_channel: Will be called to select the current channel, may be NULL.
+ * @append_status: Will be called to enable status append at the end of the sample, may be NULL.
* @set_mode: Will be called to select the current mode, may be NULL.
+ * @disable_all: Will be called to disable all channels, may be NULL.
+ * @disable_one: Will be called to disable a single channel after
+ * ad_sigma_delta_single_conversion(), may be NULL.
+ * Usage of this callback expects iio_chan_spec.address to contain
+ * the value required for the driver to identify the channel.
* @postprocess_sample: Is called for each sampled data word, can be used to
* modify or drop the sample data, it, may be NULL.
* @has_registers: true if the device has writable and readable registers, false
* if there is just one read-only sample data shift register.
+ * @has_named_irqs: Set to true if there is more than one IRQ line.
+ * @supports_spi_offload: Set to true if the driver supports SPI offload. Often
+ * special considerations are needed for scan_type and other channel
+ * info, so individual drivers have to set this to let the core
+ * code know that it can use SPI offload if it is available.
* @addr_shift: Shift of the register address in the communications register.
* @read_mask: Mask for the communications register having the read bit set.
+ * @status_ch_mask: Mask for the channel number stored in status register.
+ * @data_reg: Address of the data register, if 0 the default address of 0x3 will
+ * be used.
+ * @irq_flags: flags for the interrupt used by the triggered buffer
+ * @num_slots: Number of sequencer slots
+ * @num_resetclks: Number of SPI clk cycles with MOSI=1 to reset the chip.
*/
struct ad_sigma_delta_info {
int (*set_channel)(struct ad_sigma_delta *, unsigned int channel);
+ int (*append_status)(struct ad_sigma_delta *, bool append);
int (*set_mode)(struct ad_sigma_delta *, enum ad_sigma_delta_mode mode);
+ int (*disable_all)(struct ad_sigma_delta *);
+ int (*disable_one)(struct ad_sigma_delta *, unsigned int chan);
int (*postprocess_sample)(struct ad_sigma_delta *, unsigned int raw_sample);
bool has_registers;
+ bool has_named_irqs;
+ bool supports_spi_offload;
unsigned int addr_shift;
unsigned int read_mask;
+ unsigned int status_ch_mask;
+ unsigned int data_reg;
+ unsigned long irq_flags;
+ unsigned int num_slots;
+ unsigned int num_resetclks;
};
/**
@@ -63,21 +95,46 @@ struct ad_sigma_delta {
/* private: */
struct completion completion;
+ spinlock_t irq_lock; /* protects .irq_dis and irq en/disable state */
bool irq_dis;
bool bus_locked;
+ bool keep_cs_asserted;
- uint8_t comm;
+ u8 comm;
const struct ad_sigma_delta_info *info;
+ unsigned int active_slots;
+ unsigned int current_slot;
+ unsigned int num_slots;
+ struct gpio_desc *rdy_gpiod;
+ int irq_line;
+ bool status_appended;
+ /* map slots to channels in order to know what to expect from devices */
+ unsigned int *slots;
+ struct spi_message sample_msg;
+ struct spi_transfer sample_xfer[2];
+ u8 *samples_buf;
+ struct spi_offload *offload;
+ struct spi_offload_trigger *offload_trigger;
/*
* DMA (thus cache coherency maintenance) requires the
* transfer buffers to live in their own cache lines.
+ * 'tx_buf' is up to 32 bits.
+ * 'rx_buf' is up to 32 bits per sample + 64 bit timestamp,
+ * rounded to 16 bytes to take into account padding.
*/
- uint8_t data[4] ____cacheline_aligned;
+ u8 tx_buf[4] __aligned(IIO_DMA_MINALIGN);
+ u8 rx_buf[16] __aligned(8);
+ u8 sample_addr;
};
+static inline bool ad_sigma_delta_has_spi_offload(struct ad_sigma_delta *sd)
+{
+ return sd->offload != NULL;
+}
+
static inline int ad_sigma_delta_set_channel(struct ad_sigma_delta *sd,
unsigned int channel)
{
@@ -87,6 +144,38 @@ static inline int ad_sigma_delta_set_channel(struct ad_sigma_delta *sd,
return 0;
}
+static inline int ad_sigma_delta_append_status(struct ad_sigma_delta *sd, bool append)
+{
+ int ret;
+
+ if (sd->info->append_status) {
+ ret = sd->info->append_status(sd, append);
+ if (ret < 0)
+ return ret;
+
+ sd->status_appended = append;
+ }
+
+ return 0;
+}
+
+static inline int ad_sigma_delta_disable_all(struct ad_sigma_delta *sd)
+{
+ if (sd->info->disable_all)
+ return sd->info->disable_all(sd);
+
+ return 0;
+}
+
+static inline int ad_sigma_delta_disable_one(struct ad_sigma_delta *sd,
+ unsigned int chan)
+{
+ if (sd->info->disable_one)
+ return sd->info->disable_one(sd, chan);
+
+ return 0;
+}
+
static inline int ad_sigma_delta_set_mode(struct ad_sigma_delta *sd,
unsigned int mode)
{
@@ -105,70 +194,25 @@ static inline int ad_sigma_delta_postprocess_sample(struct ad_sigma_delta *sd,
return 0;
}
-void ad_sd_set_comm(struct ad_sigma_delta *sigma_delta, uint8_t comm);
+void ad_sd_set_comm(struct ad_sigma_delta *sigma_delta, u8 comm);
int ad_sd_write_reg(struct ad_sigma_delta *sigma_delta, unsigned int reg,
unsigned int size, unsigned int val);
int ad_sd_read_reg(struct ad_sigma_delta *sigma_delta, unsigned int reg,
unsigned int size, unsigned int *val);
+int ad_sd_reset(struct ad_sigma_delta *sigma_delta);
+
int ad_sigma_delta_single_conversion(struct iio_dev *indio_dev,
const struct iio_chan_spec *chan, int *val);
+int ad_sd_calibrate(struct ad_sigma_delta *sigma_delta,
+ unsigned int mode, unsigned int channel);
int ad_sd_calibrate_all(struct ad_sigma_delta *sigma_delta,
const struct ad_sd_calib_data *cd, unsigned int n);
int ad_sd_init(struct ad_sigma_delta *sigma_delta, struct iio_dev *indio_dev,
struct spi_device *spi, const struct ad_sigma_delta_info *info);
-int ad_sd_setup_buffer_and_trigger(struct iio_dev *indio_dev);
-void ad_sd_cleanup_buffer_and_trigger(struct iio_dev *indio_dev);
+int devm_ad_sd_setup_buffer_and_trigger(struct device *dev, struct iio_dev *indio_dev);
int ad_sd_validate_trigger(struct iio_dev *indio_dev, struct iio_trigger *trig);
-#define __AD_SD_CHANNEL(_si, _channel1, _channel2, _address, _bits, \
- _storagebits, _shift, _extend_name, _type) \
- { \
- .type = (_type), \
- .differential = (_channel2 == -1 ? 0 : 1), \
- .indexed = 1, \
- .channel = (_channel1), \
- .channel2 = (_channel2), \
- .address = (_address), \
- .extend_name = (_extend_name), \
- .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \
- BIT(IIO_CHAN_INFO_OFFSET), \
- .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \
- .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ), \
- .scan_index = (_si), \
- .scan_type = { \
- .sign = 'u', \
- .realbits = (_bits), \
- .storagebits = (_storagebits), \
- .shift = (_shift), \
- .endianness = IIO_BE, \
- }, \
- }
-
-#define AD_SD_DIFF_CHANNEL(_si, _channel1, _channel2, _address, _bits, \
- _storagebits, _shift) \
- __AD_SD_CHANNEL(_si, _channel1, _channel2, _address, _bits, \
- _storagebits, _shift, NULL, IIO_VOLTAGE)
-
-#define AD_SD_SHORTED_CHANNEL(_si, _channel, _address, _bits, \
- _storagebits, _shift) \
- __AD_SD_CHANNEL(_si, _channel, _channel, _address, _bits, \
- _storagebits, _shift, "shorted", IIO_VOLTAGE)
-
-#define AD_SD_CHANNEL(_si, _channel, _address, _bits, \
- _storagebits, _shift) \
- __AD_SD_CHANNEL(_si, _channel, -1, _address, _bits, \
- _storagebits, _shift, NULL, IIO_VOLTAGE)
-
-#define AD_SD_TEMP_CHANNEL(_si, _address, _bits, _storagebits, _shift) \
- __AD_SD_CHANNEL(_si, 0, -1, _address, _bits, \
- _storagebits, _shift, NULL, IIO_TEMP)
-
-#define AD_SD_SUPPLY_CHANNEL(_si, _channel, _address, _bits, _storagebits, \
- _shift) \
- __AD_SD_CHANNEL(_si, _channel, -1, _address, _bits, \
- _storagebits, _shift, "supply", IIO_VOLTAGE)
-
#endif
diff --git a/include/linux/iio/adc/qcom-vadc-common.h b/include/linux/iio/adc/qcom-vadc-common.h
new file mode 100644
index 000000000000..3bf4c49726a7
--- /dev/null
+++ b/include/linux/iio/adc/qcom-vadc-common.h
@@ -0,0 +1,168 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Code shared between the different Qualcomm PMIC voltage ADCs
+ */
+
+#ifndef QCOM_VADC_COMMON_H
+#define QCOM_VADC_COMMON_H
+
+#include <linux/math.h>
+#include <linux/types.h>
+
+#define VADC_CONV_TIME_MIN_US 2000
+#define VADC_CONV_TIME_MAX_US 2100
+
+/* Min ADC code represents 0V */
+#define VADC_MIN_ADC_CODE 0x6000
+/* Max ADC code represents full-scale range of 1.8V */
+#define VADC_MAX_ADC_CODE 0xa800
+
+#define VADC_ABSOLUTE_RANGE_UV 625000
+#define VADC_RATIOMETRIC_RANGE 1800
+
+#define VADC_DEF_PRESCALING 0 /* 1:1 */
+#define VADC_DEF_DECIMATION 0 /* 512 */
+#define VADC_DEF_HW_SETTLE_TIME 0 /* 0 us */
+#define VADC_DEF_AVG_SAMPLES 0 /* 1 sample */
+#define VADC_DEF_CALIB_TYPE VADC_CALIB_ABSOLUTE
+
+#define VADC_DECIMATION_MIN 512
+#define VADC_DECIMATION_MAX 4096
+#define ADC5_DEF_VBAT_PRESCALING 1 /* 1:3 */
+#define ADC5_DECIMATION_SHORT 250
+#define ADC5_DECIMATION_MEDIUM 420
+#define ADC5_DECIMATION_LONG 840
+/* Default decimation - 1024 for rev2, 840 for pmic5 */
+#define ADC5_DECIMATION_DEFAULT 2
+#define ADC5_DECIMATION_SAMPLES_MAX 3
+
+#define VADC_HW_SETTLE_DELAY_MAX 10000
+#define VADC_HW_SETTLE_SAMPLES_MAX 16
+#define VADC_AVG_SAMPLES_MAX 512
+#define ADC5_AVG_SAMPLES_MAX 16
+
+#define PMIC5_CHG_TEMP_SCALE_FACTOR 377500
+#define PMIC5_SMB_TEMP_CONSTANT 419400
+#define PMIC5_SMB_TEMP_SCALE_FACTOR 356
+
+#define PMI_CHG_SCALE_1 -138890
+#define PMI_CHG_SCALE_2 391750000000LL
+
+#define VADC5_MAX_CODE 0x7fff
+#define ADC5_FULL_SCALE_CODE 0x70e4
+#define ADC5_USR_DATA_CHECK 0x8000
+
+#define R_PU_100K 100000
+#define RATIO_MAX_ADC7 BIT(14)
+
+/*
+ * VADC_CALIB_ABSOLUTE: uses the 625mV and 1.25V as reference channels.
+ * VADC_CALIB_RATIOMETRIC: uses the reference voltage (1.8V) and GND for
+ * calibration.
+ */
+enum vadc_calibration {
+ VADC_CALIB_ABSOLUTE = 0,
+ VADC_CALIB_RATIOMETRIC
+};
+
+/**
+ * struct vadc_linear_graph - Represent ADC characteristics.
+ * @dy: numerator slope to calculate the gain.
+ * @dx: denominator slope to calculate the gain.
+ * @gnd: A/D word of the ground reference used for the channel.
+ *
+ * Each ADC device has different offset and gain parameters which are
+ * computed to calibrate the device.
+ */
+struct vadc_linear_graph {
+ s32 dy;
+ s32 dx;
+ s32 gnd;
+};
+
+/**
+ * enum vadc_scale_fn_type - Scaling function to convert ADC code to
+ * physical scaled units for the channel.
+ * @SCALE_DEFAULT: Default scaling to convert raw adc code to voltage (uV).
+ * @SCALE_THERM_100K_PULLUP: Returns temperature in millidegC.
+ * Uses a mapping table with 100K pullup.
+ * @SCALE_PMIC_THERM: Returns result in milli degree's Centigrade.
+ * @SCALE_XOTHERM: Returns XO thermistor voltage in millidegC.
+ * @SCALE_PMI_CHG_TEMP: Conversion for PMI CHG temp
+ * @SCALE_HW_CALIB_DEFAULT: Default scaling to convert raw adc code to
+ * voltage (uV) with hardware applied offset/slope values to adc code.
+ * @SCALE_HW_CALIB_THERM_100K_PULLUP: Returns temperature in millidegC using
+ * lookup table. The hardware applies offset/slope to adc code.
+ * @SCALE_HW_CALIB_XOTHERM: Returns XO thermistor voltage in millidegC using
+ * 100k pullup. The hardware applies offset/slope to adc code.
+ * @SCALE_HW_CALIB_THERM_100K_PU_PM7: Returns temperature in millidegC using
+ * lookup table for PMIC7. The hardware applies offset/slope to adc code.
+ * @SCALE_HW_CALIB_PMIC_THERM: Returns result in milli degree's Centigrade.
+ * The hardware applies offset/slope to adc code.
+ * @SCALE_HW_CALIB_PMIC_THERM: Returns result in milli degree's Centigrade.
+ * The hardware applies offset/slope to adc code. This is for PMIC7.
+ * @SCALE_HW_CALIB_PM5_CHG_TEMP: Returns result in millidegrees for PMIC5
+ * charger temperature.
+ * @SCALE_HW_CALIB_PM5_SMB_TEMP: Returns result in millidegrees for PMIC5
+ * SMB1390 temperature.
+ */
+enum vadc_scale_fn_type {
+ SCALE_DEFAULT = 0,
+ SCALE_THERM_100K_PULLUP,
+ SCALE_PMIC_THERM,
+ SCALE_XOTHERM,
+ SCALE_PMI_CHG_TEMP,
+ SCALE_HW_CALIB_DEFAULT,
+ SCALE_HW_CALIB_THERM_100K_PULLUP,
+ SCALE_HW_CALIB_XOTHERM,
+ SCALE_HW_CALIB_THERM_100K_PU_PM7,
+ SCALE_HW_CALIB_PMIC_THERM,
+ SCALE_HW_CALIB_PMIC_THERM_PM7,
+ SCALE_HW_CALIB_PM5_CHG_TEMP,
+ SCALE_HW_CALIB_PM5_SMB_TEMP,
+ /* private: */
+ SCALE_HW_CALIB_INVALID,
+};
+
+struct adc5_data {
+ const u32 full_scale_code_volt;
+ const u32 full_scale_code_cur;
+ const struct adc5_channels *adc_chans;
+ const struct iio_info *info;
+ unsigned int *decimation;
+ unsigned int *hw_settle_1;
+ unsigned int *hw_settle_2;
+};
+
+int qcom_vadc_scale(enum vadc_scale_fn_type scaletype,
+ const struct vadc_linear_graph *calib_graph,
+ const struct u32_fract *prescale,
+ bool absolute,
+ u16 adc_code, int *result_mdec);
+
+struct qcom_adc5_scale_type {
+ int (*scale_fn)(const struct u32_fract *prescale,
+ const struct adc5_data *data, u16 adc_code, int *result);
+};
+
+int qcom_adc5_hw_scale(enum vadc_scale_fn_type scaletype,
+ unsigned int prescale_ratio,
+ const struct adc5_data *data,
+ u16 adc_code, int *result_mdec);
+
+u16 qcom_adc_tm5_temp_volt_scale(unsigned int prescale_ratio,
+ u32 full_scale_code_volt, int temp);
+
+u16 qcom_adc_tm5_gen2_temp_res_scale(int temp);
+
+int qcom_adc5_prescaling_from_dt(u32 num, u32 den);
+
+int qcom_adc5_hw_settle_time_from_dt(u32 value, const unsigned int *hw_settle);
+
+int qcom_adc5_avg_samples_from_dt(u32 value);
+
+int qcom_adc5_decimation_from_dt(u32 value, const unsigned int *decimation);
+
+int qcom_vadc_decimation_from_dt(u32 value);
+
+#endif /* QCOM_VADC_COMMON_H */
diff --git a/include/linux/iio/adc/stm32-dfsdm-adc.h b/include/linux/iio/adc/stm32-dfsdm-adc.h
new file mode 100644
index 000000000000..0da298b41737
--- /dev/null
+++ b/include/linux/iio/adc/stm32-dfsdm-adc.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * This file discribe the STM32 DFSDM IIO driver API for audio part
+ *
+ * Copyright (C) 2017, STMicroelectronics - All Rights Reserved
+ * Author(s): Arnaud Pouliquen <arnaud.pouliquen@st.com>.
+ */
+
+#ifndef STM32_DFSDM_ADC_H
+#define STM32_DFSDM_ADC_H
+
+#include <linux/iio/iio.h>
+
+int stm32_dfsdm_get_buff_cb(struct iio_dev *iio_dev,
+ int (*cb)(const void *data, size_t size,
+ void *private),
+ void *private);
+int stm32_dfsdm_release_buff_cb(struct iio_dev *iio_dev);
+
+#endif
diff --git a/include/linux/iio/afe/rescale.h b/include/linux/iio/afe/rescale.h
new file mode 100644
index 000000000000..6eecb435488f
--- /dev/null
+++ b/include/linux/iio/afe/rescale.h
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2018 Axentia Technologies AB
+ */
+
+#ifndef __IIO_RESCALE_H__
+#define __IIO_RESCALE_H__
+
+#include <linux/types.h>
+#include <linux/iio/iio.h>
+
+struct device;
+struct rescale;
+
+struct rescale_cfg {
+ enum iio_chan_type type;
+ int (*props)(struct device *dev, struct rescale *rescale);
+};
+
+struct rescale {
+ const struct rescale_cfg *cfg;
+ struct iio_channel *source;
+ struct iio_chan_spec chan;
+ struct iio_chan_spec_ext_info *ext_info;
+ bool chan_processed;
+ s32 numerator;
+ s32 denominator;
+ s32 offset;
+};
+
+int rescale_process_scale(struct rescale *rescale, int scale_type,
+ int *val, int *val2);
+int rescale_process_offset(struct rescale *rescale, int scale_type,
+ int scale, int scale2, int schan_off,
+ int *val, int *val2);
+#endif /* __IIO_RESCALE_H__ */
diff --git a/include/linux/iio/backend.h b/include/linux/iio/backend.h
new file mode 100644
index 000000000000..7f815f3fed6a
--- /dev/null
+++ b/include/linux/iio/backend.h
@@ -0,0 +1,270 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+#ifndef _IIO_BACKEND_H_
+#define _IIO_BACKEND_H_
+
+#include <linux/types.h>
+#include <linux/iio/iio.h>
+
+struct iio_chan_spec;
+struct fwnode_handle;
+struct iio_backend;
+struct device;
+struct iio_dev;
+
+enum iio_backend_data_type {
+ IIO_BACKEND_TWOS_COMPLEMENT,
+ IIO_BACKEND_OFFSET_BINARY,
+ IIO_BACKEND_DATA_UNSIGNED,
+ IIO_BACKEND_DATA_TYPE_MAX
+};
+
+enum iio_backend_data_source {
+ IIO_BACKEND_INTERNAL_CONTINUOUS_WAVE,
+ IIO_BACKEND_EXTERNAL,
+ IIO_BACKEND_INTERNAL_RAMP_16BIT,
+ IIO_BACKEND_DATA_SOURCE_MAX
+};
+
+#define iio_backend_debugfs_ptr(ptr) PTR_IF(IS_ENABLED(CONFIG_DEBUG_FS), ptr)
+
+/**
+ * IIO_BACKEND_EX_INFO - Helper for an IIO extended channel attribute
+ * @_name: Attribute name
+ * @_shared: Whether the attribute is shared between all channels
+ * @_what: Data private to the driver
+ */
+#define IIO_BACKEND_EX_INFO(_name, _shared, _what) { \
+ .name = (_name), \
+ .shared = (_shared), \
+ .read = iio_backend_ext_info_get, \
+ .write = iio_backend_ext_info_set, \
+ .private = (_what), \
+}
+
+/**
+ * struct iio_backend_data_fmt - Backend data format
+ * @type: Data type.
+ * @sign_extend: Bool to tell if the data is sign extended.
+ * @enable: Enable/Disable the data format module. If disabled,
+ * not formatting will happen.
+ */
+struct iio_backend_data_fmt {
+ enum iio_backend_data_type type;
+ bool sign_extend;
+ bool enable;
+};
+
+/* vendor specific from 32 */
+enum iio_backend_test_pattern {
+ IIO_BACKEND_NO_TEST_PATTERN,
+ /* modified prbs9 */
+ IIO_BACKEND_ADI_PRBS_9A = 32,
+ /* modified prbs23 */
+ IIO_BACKEND_ADI_PRBS_23A,
+ IIO_BACKEND_TEST_PATTERN_MAX
+};
+
+enum iio_backend_sample_trigger {
+ IIO_BACKEND_SAMPLE_TRIGGER_EDGE_FALLING,
+ IIO_BACKEND_SAMPLE_TRIGGER_EDGE_RISING,
+ IIO_BACKEND_SAMPLE_TRIGGER_MAX
+};
+
+enum iio_backend_interface_type {
+ IIO_BACKEND_INTERFACE_SERIAL_LVDS,
+ IIO_BACKEND_INTERFACE_SERIAL_CMOS,
+ IIO_BACKEND_INTERFACE_MAX
+};
+
+enum iio_backend_filter_type {
+ IIO_BACKEND_FILTER_TYPE_DISABLED,
+ IIO_BACKEND_FILTER_TYPE_SINC1,
+ IIO_BACKEND_FILTER_TYPE_SINC5,
+ IIO_BACKEND_FILTER_TYPE_SINC5_PLUS_COMP,
+ IIO_BACKEND_FILTER_TYPE_MAX
+};
+
+/**
+ * struct iio_backend_ops - operations structure for an iio_backend
+ * @enable: Enable backend.
+ * @disable: Disable backend.
+ * @chan_enable: Enable one channel.
+ * @chan_disable: Disable one channel.
+ * @data_format_set: Configure the data format for a specific channel.
+ * @data_source_set: Configure the data source for a specific channel.
+ * @data_source_get: Data source getter for a specific channel.
+ * @set_sample_rate: Configure the sampling rate for a specific channel.
+ * @test_pattern_set: Configure a test pattern.
+ * @chan_status: Get the channel status.
+ * @iodelay_set: Set digital I/O delay.
+ * @data_sample_trigger: Control when to sample data.
+ * @request_buffer: Request an IIO buffer.
+ * @free_buffer: Free an IIO buffer.
+ * @extend_chan_spec: Extend an IIO channel.
+ * @ext_info_set: Extended info setter.
+ * @ext_info_get: Extended info getter.
+ * @interface_type_get: Interface type.
+ * @data_size_set: Data size.
+ * @oversampling_ratio_set: Set Oversampling ratio.
+ * @read_raw: Read a channel attribute from a backend device
+ * @debugfs_print_chan_status: Print channel status into a buffer.
+ * @debugfs_reg_access: Read or write register value of backend.
+ * @filter_type_set: Set filter type.
+ * @interface_data_align: Perform the data alignment process.
+ * @num_lanes_set: Set the number of lanes enabled.
+ * @ddr_enable: Enable interface DDR (Double Data Rate) mode.
+ * @ddr_disable: Disable interface DDR (Double Data Rate) mode.
+ * @data_stream_enable: Enable data stream.
+ * @data_stream_disable: Disable data stream.
+ * @data_transfer_addr: Set data address.
+ **/
+struct iio_backend_ops {
+ int (*enable)(struct iio_backend *back);
+ void (*disable)(struct iio_backend *back);
+ int (*chan_enable)(struct iio_backend *back, unsigned int chan);
+ int (*chan_disable)(struct iio_backend *back, unsigned int chan);
+ int (*data_format_set)(struct iio_backend *back, unsigned int chan,
+ const struct iio_backend_data_fmt *data);
+ int (*data_source_set)(struct iio_backend *back, unsigned int chan,
+ enum iio_backend_data_source data);
+ int (*data_source_get)(struct iio_backend *back, unsigned int chan,
+ enum iio_backend_data_source *data);
+ int (*set_sample_rate)(struct iio_backend *back, unsigned int chan,
+ u64 sample_rate_hz);
+ int (*test_pattern_set)(struct iio_backend *back,
+ unsigned int chan,
+ enum iio_backend_test_pattern pattern);
+ int (*chan_status)(struct iio_backend *back, unsigned int chan,
+ bool *error);
+ int (*iodelay_set)(struct iio_backend *back, unsigned int chan,
+ unsigned int taps);
+ int (*data_sample_trigger)(struct iio_backend *back,
+ enum iio_backend_sample_trigger trigger);
+ struct iio_buffer *(*request_buffer)(struct iio_backend *back,
+ struct iio_dev *indio_dev);
+ void (*free_buffer)(struct iio_backend *back,
+ struct iio_buffer *buffer);
+ int (*extend_chan_spec)(struct iio_backend *back,
+ struct iio_chan_spec *chan);
+ int (*ext_info_set)(struct iio_backend *back, uintptr_t private,
+ const struct iio_chan_spec *chan,
+ const char *buf, size_t len);
+ int (*ext_info_get)(struct iio_backend *back, uintptr_t private,
+ const struct iio_chan_spec *chan, char *buf);
+ int (*interface_type_get)(struct iio_backend *back,
+ enum iio_backend_interface_type *type);
+ int (*data_size_set)(struct iio_backend *back, unsigned int size);
+ int (*oversampling_ratio_set)(struct iio_backend *back,
+ unsigned int chan, unsigned int ratio);
+ int (*read_raw)(struct iio_backend *back,
+ struct iio_chan_spec const *chan, int *val, int *val2,
+ long mask);
+ int (*debugfs_print_chan_status)(struct iio_backend *back,
+ unsigned int chan, char *buf,
+ size_t len);
+ int (*debugfs_reg_access)(struct iio_backend *back, unsigned int reg,
+ unsigned int writeval, unsigned int *readval);
+ int (*filter_type_set)(struct iio_backend *back,
+ enum iio_backend_filter_type type);
+ int (*interface_data_align)(struct iio_backend *back, u32 timeout_us);
+ int (*num_lanes_set)(struct iio_backend *back, unsigned int num_lanes);
+ int (*ddr_enable)(struct iio_backend *back);
+ int (*ddr_disable)(struct iio_backend *back);
+ int (*data_stream_enable)(struct iio_backend *back);
+ int (*data_stream_disable)(struct iio_backend *back);
+ int (*data_transfer_addr)(struct iio_backend *back, u32 address);
+};
+
+/**
+ * struct iio_backend_info - info structure for an iio_backend
+ * @name: Backend name.
+ * @ops: Backend operations.
+ */
+struct iio_backend_info {
+ const char *name;
+ const struct iio_backend_ops *ops;
+};
+
+int iio_backend_chan_enable(struct iio_backend *back, unsigned int chan);
+int iio_backend_chan_disable(struct iio_backend *back, unsigned int chan);
+int devm_iio_backend_enable(struct device *dev, struct iio_backend *back);
+int iio_backend_enable(struct iio_backend *back);
+void iio_backend_disable(struct iio_backend *back);
+int iio_backend_data_format_set(struct iio_backend *back, unsigned int chan,
+ const struct iio_backend_data_fmt *data);
+int iio_backend_data_source_set(struct iio_backend *back, unsigned int chan,
+ enum iio_backend_data_source data);
+int iio_backend_data_source_get(struct iio_backend *back, unsigned int chan,
+ enum iio_backend_data_source *data);
+int iio_backend_set_sampling_freq(struct iio_backend *back, unsigned int chan,
+ u64 sample_rate_hz);
+int iio_backend_test_pattern_set(struct iio_backend *back,
+ unsigned int chan,
+ enum iio_backend_test_pattern pattern);
+int iio_backend_chan_status(struct iio_backend *back, unsigned int chan,
+ bool *error);
+int iio_backend_iodelay_set(struct iio_backend *back, unsigned int lane,
+ unsigned int taps);
+int iio_backend_data_sample_trigger(struct iio_backend *back,
+ enum iio_backend_sample_trigger trigger);
+int devm_iio_backend_request_buffer(struct device *dev,
+ struct iio_backend *back,
+ struct iio_dev *indio_dev);
+int iio_backend_filter_type_set(struct iio_backend *back,
+ enum iio_backend_filter_type type);
+int iio_backend_interface_data_align(struct iio_backend *back, u32 timeout_us);
+int iio_backend_num_lanes_set(struct iio_backend *back, unsigned int num_lanes);
+int iio_backend_ddr_enable(struct iio_backend *back);
+int iio_backend_ddr_disable(struct iio_backend *back);
+int iio_backend_data_stream_enable(struct iio_backend *back);
+int iio_backend_data_stream_disable(struct iio_backend *back);
+int iio_backend_data_transfer_addr(struct iio_backend *back, u32 address);
+ssize_t iio_backend_ext_info_set(struct iio_dev *indio_dev, uintptr_t private,
+ const struct iio_chan_spec *chan,
+ const char *buf, size_t len);
+ssize_t iio_backend_ext_info_get(struct iio_dev *indio_dev, uintptr_t private,
+ const struct iio_chan_spec *chan, char *buf);
+int iio_backend_interface_type_get(struct iio_backend *back,
+ enum iio_backend_interface_type *type);
+int iio_backend_data_size_set(struct iio_backend *back, unsigned int size);
+int iio_backend_oversampling_ratio_set(struct iio_backend *back,
+ unsigned int chan,
+ unsigned int ratio);
+int iio_backend_read_raw(struct iio_backend *back,
+ struct iio_chan_spec const *chan, int *val, int *val2,
+ long mask);
+int iio_backend_extend_chan_spec(struct iio_backend *back,
+ struct iio_chan_spec *chan);
+void *iio_backend_get_priv(const struct iio_backend *conv);
+struct iio_backend *devm_iio_backend_get(struct device *dev, const char *name);
+struct iio_backend *devm_iio_backend_fwnode_get(struct device *dev,
+ const char *name,
+ struct fwnode_handle *fwnode);
+struct iio_backend *
+__devm_iio_backend_get_from_fwnode_lookup(struct device *dev,
+ struct fwnode_handle *fwnode);
+
+int devm_iio_backend_register(struct device *dev,
+ const struct iio_backend_info *info, void *priv);
+
+static inline int iio_backend_read_scale(struct iio_backend *back,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2)
+{
+ return iio_backend_read_raw(back, chan, val, val2, IIO_CHAN_INFO_SCALE);
+}
+
+static inline int iio_backend_read_offset(struct iio_backend *back,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2)
+{
+ return iio_backend_read_raw(back, chan, val, val2,
+ IIO_CHAN_INFO_OFFSET);
+}
+
+ssize_t iio_backend_debugfs_print_chan_status(struct iio_backend *back,
+ unsigned int chan, char *buf,
+ size_t len);
+void iio_backend_debugfs_add(struct iio_backend *back,
+ struct iio_dev *indio_dev);
+#endif
diff --git a/include/linux/iio/buffer-dma.h b/include/linux/iio/buffer-dma.h
index 767467d886de..4f33e6a39797 100644
--- a/include/linux/iio/buffer-dma.h
+++ b/include/linux/iio/buffer-dma.h
@@ -1,38 +1,34 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright 2013-2015 Analog Devices Inc.
* Author: Lars-Peter Clausen <lars@metafoo.de>
- *
- * Licensed under the GPL-2.
*/
#ifndef __INDUSTRIALIO_DMA_BUFFER_H__
#define __INDUSTRIALIO_DMA_BUFFER_H__
+#include <linux/atomic.h>
#include <linux/list.h>
#include <linux/kref.h>
#include <linux/spinlock.h>
#include <linux/mutex.h>
-#include <linux/iio/buffer.h>
+#include <linux/iio/buffer_impl.h>
struct iio_dma_buffer_queue;
struct iio_dma_buffer_ops;
struct device;
-
-struct iio_buffer_block {
- u32 size;
- u32 bytes_used;
-};
+struct dma_buf_attachment;
+struct dma_fence;
+struct sg_table;
/**
* enum iio_block_state - State of a struct iio_dma_buffer_block
- * @IIO_BLOCK_STATE_DEQUEUED: Block is not queued
* @IIO_BLOCK_STATE_QUEUED: Block is on the incoming queue
* @IIO_BLOCK_STATE_ACTIVE: Block is currently being processed by the DMA
* @IIO_BLOCK_STATE_DONE: Block is on the outgoing queue
* @IIO_BLOCK_STATE_DEAD: Block has been marked as to be freed
*/
enum iio_block_state {
- IIO_BLOCK_STATE_DEQUEUED,
IIO_BLOCK_STATE_QUEUED,
IIO_BLOCK_STATE_ACTIVE,
IIO_BLOCK_STATE_DONE,
@@ -49,6 +45,10 @@ enum iio_block_state {
* @queue: Parent DMA buffer queue
* @kref: kref used to manage the lifetime of block
* @state: Current state of the block
+ * @cyclic: True if this is a cyclic buffer
+ * @fileio: True if this buffer is used for fileio mode
+ * @sg_table: DMA table for the transfer when transferring a DMABUF
+ * @fence: DMA fence to be signaled when a DMABUF transfer is complete
*/
struct iio_dma_buffer_block {
/* May only be accessed by the owner of the block */
@@ -71,6 +71,12 @@ struct iio_dma_buffer_block {
* queue->list_lock if the block is not owned by the core.
*/
enum iio_block_state state;
+
+ bool cyclic;
+ bool fileio;
+
+ struct sg_table *sg_table;
+ struct dma_fence *fence;
};
/**
@@ -79,12 +85,17 @@ struct iio_dma_buffer_block {
* @active_block: Block being used in read()
* @pos: Read offset in the active block
* @block_size: Size of each block
+ * @next_dequeue: index of next block that will be dequeued
+ * @enabled: Whether the buffer is operating in fileio mode
*/
struct iio_dma_buffer_queue_fileio {
struct iio_dma_buffer_block *blocks[2];
struct iio_dma_buffer_block *active_block;
size_t pos;
size_t block_size;
+
+ unsigned int next_dequeue;
+ bool enabled;
};
/**
@@ -99,8 +110,8 @@ struct iio_dma_buffer_queue_fileio {
* list and typically also a list of active blocks in the part that handles
* the DMA controller
* @incoming: List of buffers on the incoming queue
- * @outgoing: List of buffers on the outgoing queue
* @active: Whether the buffer is currently active
+ * @num_dmabufs: Total number of DMABUFs attached to this queue
* @fileio: FileIO state
*/
struct iio_dma_buffer_queue {
@@ -111,9 +122,9 @@ struct iio_dma_buffer_queue {
struct mutex lock;
spinlock_t list_lock;
struct list_head incoming;
- struct list_head outgoing;
bool active;
+ atomic_t num_dmabufs;
struct iio_dma_buffer_queue_fileio fileio;
};
@@ -139,9 +150,11 @@ int iio_dma_buffer_disable(struct iio_buffer *buffer,
struct iio_dev *indio_dev);
int iio_dma_buffer_read(struct iio_buffer *buffer, size_t n,
char __user *user_buffer);
-size_t iio_dma_buffer_data_available(struct iio_buffer *buffer);
+int iio_dma_buffer_write(struct iio_buffer *buffer, size_t n,
+ const char __user *user_buffer);
+size_t iio_dma_buffer_usage(struct iio_buffer *buffer);
int iio_dma_buffer_set_bytes_per_datum(struct iio_buffer *buffer, size_t bpd);
-int iio_dma_buffer_set_length(struct iio_buffer *buffer, int length);
+int iio_dma_buffer_set_length(struct iio_buffer *buffer, unsigned int length);
int iio_dma_buffer_request_update(struct iio_buffer *buffer);
int iio_dma_buffer_init(struct iio_dma_buffer_queue *queue,
@@ -149,4 +162,18 @@ int iio_dma_buffer_init(struct iio_dma_buffer_queue *queue,
void iio_dma_buffer_exit(struct iio_dma_buffer_queue *queue);
void iio_dma_buffer_release(struct iio_dma_buffer_queue *queue);
+struct iio_dma_buffer_block *
+iio_dma_buffer_attach_dmabuf(struct iio_buffer *buffer,
+ struct dma_buf_attachment *attach);
+void iio_dma_buffer_detach_dmabuf(struct iio_buffer *buffer,
+ struct iio_dma_buffer_block *block);
+int iio_dma_buffer_enqueue_dmabuf(struct iio_buffer *buffer,
+ struct iio_dma_buffer_block *block,
+ struct dma_fence *fence,
+ struct sg_table *sgt,
+ size_t size, bool cyclic);
+void iio_dma_buffer_lock_queue(struct iio_buffer *buffer);
+void iio_dma_buffer_unlock_queue(struct iio_buffer *buffer);
+struct device *iio_dma_buffer_get_dma_dev(struct iio_buffer *buffer);
+
#endif
diff --git a/include/linux/iio/buffer-dmaengine.h b/include/linux/iio/buffer-dmaengine.h
index 5dcddf427bb0..37f27545f69f 100644
--- a/include/linux/iio/buffer-dmaengine.h
+++ b/include/linux/iio/buffer-dmaengine.h
@@ -1,18 +1,39 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* Copyright 2014-2015 Analog Devices Inc.
* Author: Lars-Peter Clausen <lars@metafoo.de>
- *
- * Licensed under the GPL-2 or later.
*/
#ifndef __IIO_DMAENGINE_H__
#define __IIO_DMAENGINE_H__
-struct iio_buffer;
+#include <linux/iio/buffer.h>
+
+struct iio_dev;
struct device;
+struct dma_chan;
+
+void iio_dmaengine_buffer_teardown(struct iio_buffer *buffer);
+struct iio_buffer *iio_dmaengine_buffer_setup_ext(struct device *dev,
+ struct iio_dev *indio_dev,
+ const char *channel,
+ enum iio_buffer_direction dir);
+
+#define iio_dmaengine_buffer_setup(dev, indio_dev, channel) \
+ iio_dmaengine_buffer_setup_ext(dev, indio_dev, channel, \
+ IIO_BUFFER_DIRECTION_IN)
+
+int devm_iio_dmaengine_buffer_setup_ext(struct device *dev,
+ struct iio_dev *indio_dev,
+ const char *channel,
+ enum iio_buffer_direction dir);
+int devm_iio_dmaengine_buffer_setup_with_handle(struct device *dev,
+ struct iio_dev *indio_dev,
+ struct dma_chan *chan,
+ enum iio_buffer_direction dir);
-struct iio_buffer *iio_dmaengine_buffer_alloc(struct device *dev,
- const char *channel);
-void iio_dmaengine_buffer_free(struct iio_buffer *buffer);
+#define devm_iio_dmaengine_buffer_setup(dev, indio_dev, channel) \
+ devm_iio_dmaengine_buffer_setup_ext(dev, indio_dev, channel, \
+ IIO_BUFFER_DIRECTION_IN)
#endif
diff --git a/include/linux/iio/buffer.h b/include/linux/iio/buffer.h
index 48767c776119..d37f82678f71 100644
--- a/include/linux/iio/buffer.h
+++ b/include/linux/iio/buffer.h
@@ -1,10 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/* The industrial I/O core - generic buffer interfaces.
*
* Copyright (c) 2008 Jonathan Cameron
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
*/
#ifndef _IIO_BUFFER_GENERIC_H_
@@ -14,29 +11,29 @@
struct iio_buffer;
-void iio_buffer_set_attrs(struct iio_buffer *buffer,
- const struct attribute **attrs);
+enum iio_buffer_direction {
+ IIO_BUFFER_DIRECTION_IN,
+ IIO_BUFFER_DIRECTION_OUT,
+};
int iio_push_to_buffers(struct iio_dev *indio_dev, const void *data);
+int iio_pop_from_buffer(struct iio_buffer *buffer, void *data);
+
/**
* iio_push_to_buffers_with_timestamp() - push data and timestamp to buffers
* @indio_dev: iio_dev structure for device.
* @data: sample data
* @timestamp: timestamp for the sample data
*
- * Pushes data to the IIO device's buffers. If timestamps are enabled for the
- * device the function will store the supplied timestamp as the last element in
- * the sample data buffer before pushing it to the device buffers. The sample
- * data buffer needs to be large enough to hold the additional timestamp
- * (usually the buffer should be indio->scan_bytes bytes large).
+ * DEPRECATED: Use iio_push_to_buffers_with_ts() instead.
*
* Returns 0 on success, a negative error code otherwise.
*/
static inline int iio_push_to_buffers_with_timestamp(struct iio_dev *indio_dev,
void *data, int64_t timestamp)
{
- if (indio_dev->scan_timestamp) {
+ if (ACCESS_PRIVATE(indio_dev, scan_timestamp)) {
size_t ts_offset = indio_dev->scan_bytes / sizeof(int64_t) - 1;
((int64_t *)data)[ts_offset] = timestamp;
}
@@ -44,10 +41,42 @@ static inline int iio_push_to_buffers_with_timestamp(struct iio_dev *indio_dev,
return iio_push_to_buffers(indio_dev, data);
}
+/**
+ * iio_push_to_buffers_with_ts() - push data and timestamp to buffers
+ * @indio_dev: iio_dev structure for device.
+ * @data: Pointer to sample data buffer.
+ * @data_total_len: The size of @data in bytes.
+ * @timestamp: Timestamp for the sample data.
+ *
+ * Pushes data to the IIO device's buffers. If timestamps are enabled for the
+ * device the function will store the supplied timestamp as the last element in
+ * the sample data buffer before pushing it to the device buffers. The sample
+ * data buffer needs to be large enough to hold the additional timestamp
+ * (usually the buffer should be at least indio->scan_bytes bytes large).
+ *
+ * Context: Any context.
+ * Return: 0 on success, a negative error code otherwise.
+ */
+static inline int iio_push_to_buffers_with_ts(struct iio_dev *indio_dev,
+ void *data, size_t data_total_len,
+ s64 timestamp)
+{
+ if (unlikely(data_total_len < indio_dev->scan_bytes)) {
+ dev_err(&indio_dev->dev, "Undersized storage pushed to buffer\n");
+ return -ENOSPC;
+ }
+
+ return iio_push_to_buffers_with_timestamp(indio_dev, data, timestamp);
+}
+
+int iio_push_to_buffers_with_ts_unaligned(struct iio_dev *indio_dev,
+ const void *data, size_t data_sz,
+ int64_t timestamp);
+
bool iio_validate_scan_mask_onehot(struct iio_dev *indio_dev,
const unsigned long *mask);
-void iio_device_attach_buffer(struct iio_dev *indio_dev,
- struct iio_buffer *buffer);
+int iio_device_attach_buffer(struct iio_dev *indio_dev,
+ struct iio_buffer *buffer);
#endif /* _IIO_BUFFER_GENERIC_H_ */
diff --git a/include/linux/iio/buffer_impl.h b/include/linux/iio/buffer_impl.h
index 8daba198fafa..c0b0e0992a85 100644
--- a/include/linux/iio/buffer_impl.h
+++ b/include/linux/iio/buffer_impl.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _IIO_BUFFER_GENERIC_IMPL_H_
#define _IIO_BUFFER_GENERIC_IMPL_H_
#include <linux/sysfs.h>
@@ -5,8 +6,15 @@
#ifdef CONFIG_IIO_BUFFER
+#include <uapi/linux/iio/buffer.h>
+#include <linux/iio/buffer.h>
+
+struct dma_buf_attachment;
+struct dma_fence;
struct iio_dev;
+struct iio_dma_buffer_block;
struct iio_buffer;
+struct sg_table;
/**
* INDIO_BUFFER_FLAG_FIXED_WATERMARK - Watermark level of the buffer can not be
@@ -16,10 +24,15 @@ struct iio_buffer;
/**
* struct iio_buffer_access_funcs - access functions for buffers.
- * @store_to: actually store stuff to the buffer
- * @read_first_n: try to get a specified number of bytes (must exist)
+ * @store_to: actually store stuff to the buffer - must be safe to
+ * call from any context (e.g. must not sleep).
+ * @read: try to get a specified number of bytes (must exist)
* @data_available: indicates how much data is available for reading from
* the buffer.
+ * @remove_from: remove scan from buffer. Drivers should calls this to
+ * remove a scan from a buffer.
+ * @write: try to write a number of bytes
+ * @space_available: returns the amount of bytes available in a buffer
* @request_update: if a parameter change has been marked, update underlying
* storage.
* @set_bytes_per_datum:set number of bytes per datum
@@ -31,6 +44,17 @@ struct iio_buffer;
* device stops sampling. Calles are balanced with @enable.
* @release: called when the last reference to the buffer is dropped,
* should free all resources allocated by the buffer.
+ * @attach_dmabuf: called from userspace via ioctl to attach one external
+ * DMABUF.
+ * @detach_dmabuf: called from userspace via ioctl to detach one previously
+ * attached DMABUF.
+ * @enqueue_dmabuf: called from userspace via ioctl to queue this DMABUF
+ * object to this buffer. Requires a valid DMABUF fd, that
+ * was previouly attached to this buffer.
+ * @get_dma_dev: called to get the DMA channel associated with this buffer.
+ * @lock_queue: called when the core needs to lock the buffer queue;
+ * it is used when enqueueing DMABUF objects.
+ * @unlock_queue: used to unlock a previously locked buffer queue
* @modes: Supported operating modes by this buffer type
* @flags: A bitmask combination of INDIO_BUFFER_FLAG_*
*
@@ -44,21 +68,34 @@ struct iio_buffer;
**/
struct iio_buffer_access_funcs {
int (*store_to)(struct iio_buffer *buffer, const void *data);
- int (*read_first_n)(struct iio_buffer *buffer,
- size_t n,
- char __user *buf);
+ int (*read)(struct iio_buffer *buffer, size_t n, char __user *buf);
size_t (*data_available)(struct iio_buffer *buffer);
+ int (*remove_from)(struct iio_buffer *buffer, void *data);
+ int (*write)(struct iio_buffer *buffer, size_t n, const char __user *buf);
+ size_t (*space_available)(struct iio_buffer *buffer);
int (*request_update)(struct iio_buffer *buffer);
int (*set_bytes_per_datum)(struct iio_buffer *buffer, size_t bpd);
- int (*set_length)(struct iio_buffer *buffer, int length);
+ int (*set_length)(struct iio_buffer *buffer, unsigned int length);
int (*enable)(struct iio_buffer *buffer, struct iio_dev *indio_dev);
int (*disable)(struct iio_buffer *buffer, struct iio_dev *indio_dev);
void (*release)(struct iio_buffer *buffer);
+ struct iio_dma_buffer_block * (*attach_dmabuf)(struct iio_buffer *buffer,
+ struct dma_buf_attachment *attach);
+ void (*detach_dmabuf)(struct iio_buffer *buffer,
+ struct iio_dma_buffer_block *block);
+ int (*enqueue_dmabuf)(struct iio_buffer *buffer,
+ struct iio_dma_buffer_block *block,
+ struct dma_fence *fence, struct sg_table *sgt,
+ size_t size, bool cyclic);
+ struct device * (*get_dma_dev)(struct iio_buffer *buffer);
+ void (*lock_queue)(struct iio_buffer *buffer);
+ void (*unlock_queue)(struct iio_buffer *buffer);
+
unsigned int modes;
unsigned int flags;
};
@@ -71,10 +108,16 @@ struct iio_buffer_access_funcs {
*/
struct iio_buffer {
/** @length: Number of datums in buffer. */
- int length;
+ unsigned int length;
+
+ /** @flags: File ops flags including busy flag. */
+ unsigned long flags;
/** @bytes_per_datum: Size of individual datum including timestamp. */
- int bytes_per_datum;
+ size_t bytes_per_datum;
+
+ /* @direction: Direction of the data stream (in/out). */
+ enum iio_buffer_direction direction;
/**
* @access: Buffer access functions associated with the
@@ -95,41 +138,38 @@ struct iio_buffer {
unsigned int watermark;
/* private: */
- /*
- * @scan_el_attrs: Control of scan elements if that scan mode
- * control method is used.
- */
- struct attribute_group *scan_el_attrs;
-
/* @scan_timestamp: Does the scan mode include a timestamp. */
bool scan_timestamp;
- /* @scan_el_dev_attr_list: List of scan element related attributes. */
- struct list_head scan_el_dev_attr_list;
-
- /* @buffer_group: Attributes of the buffer group. */
- struct attribute_group buffer_group;
+ /* @buffer_attr_list: List of buffer attributes. */
+ struct list_head buffer_attr_list;
/*
- * @scan_el_group: Attribute group for those attributes not
- * created from the iio_chan_info array.
+ * @buffer_group: Attributes of the new buffer group.
+ * Includes scan elements attributes.
*/
- struct attribute_group scan_el_group;
-
- /* @stufftoread: Flag to indicate new data. */
- bool stufftoread;
+ struct attribute_group buffer_group;
/* @attrs: Standard attributes of the buffer. */
- const struct attribute **attrs;
+ const struct iio_dev_attr **attrs;
/* @demux_bounce: Buffer for doing gather from incoming scan. */
void *demux_bounce;
+ /* @attached_entry: Entry in the devices list of buffers attached by the driver. */
+ struct list_head attached_entry;
+
/* @buffer_list: Entry in the devices list of current buffers. */
struct list_head buffer_list;
/* @ref: Reference count of the buffer. */
struct kref ref;
+
+ /* @dmabufs: List of DMABUF attachments */
+ struct list_head dmabufs; /* P: dmabufs_mutex */
+
+ /* @dmabufs_mutex: Protects dmabufs */
+ struct mutex dmabufs_mutex;
};
/**
@@ -153,6 +193,8 @@ void iio_buffer_init(struct iio_buffer *buffer);
struct iio_buffer *iio_buffer_get(struct iio_buffer *buffer);
void iio_buffer_put(struct iio_buffer *buffer);
+void iio_buffer_signal_dmabuf_done(struct dma_fence *fence, int ret);
+
#else /* CONFIG_IIO_BUFFER */
static inline void iio_buffer_get(struct iio_buffer *buffer) {}
diff --git a/include/linux/iio/common/cros_ec_sensors_core.h b/include/linux/iio/common/cros_ec_sensors_core.h
new file mode 100644
index 000000000000..bb966abcde53
--- /dev/null
+++ b/include/linux/iio/common/cros_ec_sensors_core.h
@@ -0,0 +1,131 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * ChromeOS EC sensor hub
+ *
+ * Copyright (C) 2016 Google, Inc
+ */
+
+#ifndef __CROS_EC_SENSORS_CORE_H
+#define __CROS_EC_SENSORS_CORE_H
+
+#include <linux/iio/iio.h>
+#include <linux/irqreturn.h>
+#include <linux/platform_data/cros_ec_commands.h>
+#include <linux/platform_data/cros_ec_proto.h>
+#include <linux/platform_data/cros_ec_sensorhub.h>
+
+enum {
+ CROS_EC_SENSOR_X,
+ CROS_EC_SENSOR_Y,
+ CROS_EC_SENSOR_Z,
+ CROS_EC_SENSOR_MAX_AXIS,
+};
+
+/* EC returns sensor values using signed 16 bit registers */
+#define CROS_EC_SENSOR_BITS 16
+
+/*
+ * 4 16 bit channels are allowed.
+ * Good enough for current sensors, they use up to 3 16 bit vectors.
+ */
+#define CROS_EC_SAMPLE_SIZE (sizeof(s64) * 2)
+
+typedef irqreturn_t (*cros_ec_sensors_capture_t)(int irq, void *p);
+
+/**
+ * struct cros_ec_sensors_core_state - state data for EC sensors IIO driver
+ * @ec: cros EC device structure
+ * @cmd_lock: lock used to prevent simultaneous access to the
+ * commands.
+ * @msg: cros EC command structure
+ * @param: motion sensor parameters structure
+ * @resp: motion sensor response structure
+ * @type: type of motion sensor
+ * @range_updated: True if the range of the sensor has been
+ * updated.
+ * @curr_range: If updated, the current range value.
+ * It will be reapplied at every resume.
+ * @calib: calibration parameters. Note that trigger
+ * captured data will always provide the calibrated
+ * data
+ * @samples: static array to hold data from a single capture.
+ * For each channel we need 2 bytes, except for
+ * the timestamp. The timestamp is always last and
+ * is always 8-byte aligned.
+ * @read_ec_sensors_data: function used for accessing sensors values
+ * @fifo_max_event_count: Size of the EC sensor FIFO
+ * @frequencies: Table of known available frequencies:
+ * 0, Min and Max in mHz
+ */
+struct cros_ec_sensors_core_state {
+ struct cros_ec_device *ec;
+ struct mutex cmd_lock;
+
+ struct cros_ec_command *msg;
+ struct ec_params_motion_sense param;
+ struct ec_response_motion_sense *resp;
+
+ enum motionsensor_type type;
+
+ bool range_updated;
+ int curr_range;
+
+ struct calib_data {
+ s16 offset;
+ u16 scale;
+ } calib[CROS_EC_SENSOR_MAX_AXIS];
+ s8 sign[CROS_EC_SENSOR_MAX_AXIS];
+ u8 samples[CROS_EC_SAMPLE_SIZE] __aligned(8);
+
+ int (*read_ec_sensors_data)(struct iio_dev *indio_dev,
+ unsigned long scan_mask, s16 *data);
+
+ u32 fifo_max_event_count;
+ int frequencies[6];
+};
+
+int cros_ec_sensors_read_lpc(struct iio_dev *indio_dev, unsigned long scan_mask,
+ s16 *data);
+
+int cros_ec_sensors_read_cmd(struct iio_dev *indio_dev, unsigned long scan_mask,
+ s16 *data);
+
+struct platform_device;
+int cros_ec_sensors_core_init(struct platform_device *pdev,
+ struct iio_dev *indio_dev, bool physical_device,
+ cros_ec_sensors_capture_t trigger_capture);
+
+int cros_ec_sensors_core_register(struct device *dev,
+ struct iio_dev *indio_dev,
+ cros_ec_sensorhub_push_data_cb_t push_data);
+
+irqreturn_t cros_ec_sensors_capture(int irq, void *p);
+int cros_ec_sensors_push_data(struct iio_dev *indio_dev,
+ s16 *data,
+ s64 timestamp);
+
+int cros_ec_motion_send_host_cmd(struct cros_ec_sensors_core_state *st,
+ u16 opt_length);
+
+int cros_ec_sensors_core_read(struct cros_ec_sensors_core_state *st,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2, long mask);
+
+int cros_ec_sensors_core_read_avail(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ const int **vals,
+ int *type,
+ int *length,
+ long mask);
+
+int cros_ec_sensors_core_write(struct cros_ec_sensors_core_state *st,
+ struct iio_chan_spec const *chan,
+ int val, int val2, long mask);
+
+extern const struct dev_pm_ops cros_ec_sensors_pm_ops;
+
+/* List of extended channel specification for all sensors. */
+extern const struct iio_chan_spec_ext_info cros_ec_sensors_ext_info[];
+extern const struct iio_chan_spec_ext_info cros_ec_sensors_limited_info[];
+
+#endif /* __CROS_EC_SENSORS_CORE_H */
diff --git a/include/linux/iio/common/inv_sensors_timestamp.h b/include/linux/iio/common/inv_sensors_timestamp.h
new file mode 100644
index 000000000000..8d506f1e9df2
--- /dev/null
+++ b/include/linux/iio/common/inv_sensors_timestamp.h
@@ -0,0 +1,94 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (C) 2020 Invensense, Inc.
+ */
+
+#ifndef INV_SENSORS_TIMESTAMP_H_
+#define INV_SENSORS_TIMESTAMP_H_
+
+/**
+ * struct inv_sensors_timestamp_chip - chip internal properties
+ * @clock_period: internal clock period in ns
+ * @jitter: acceptable jitter in per-mille
+ * @init_period: chip initial period at reset in ns
+ */
+struct inv_sensors_timestamp_chip {
+ uint32_t clock_period;
+ uint32_t jitter;
+ uint32_t init_period;
+};
+
+/**
+ * struct inv_sensors_timestamp_interval - timestamps interval
+ * @lo: interval lower bound
+ * @up: interval upper bound
+ */
+struct inv_sensors_timestamp_interval {
+ int64_t lo;
+ int64_t up;
+};
+
+/**
+ * struct inv_sensors_timestamp_acc - accumulator for computing an estimation
+ * @val: current estimation of the value, the mean of all values
+ * @idx: current index of the next free place in values table
+ * @values: table of all measured values, use for computing the mean
+ */
+struct inv_sensors_timestamp_acc {
+ uint32_t val;
+ size_t idx;
+ uint32_t values[32];
+};
+
+/**
+ * struct inv_sensors_timestamp - timestamp management states
+ * @chip: chip internal characteristics
+ * @min_period: minimal acceptable clock period
+ * @max_period: maximal acceptable clock period
+ * @it: interrupts interval timestamps
+ * @timestamp: store last timestamp for computing next data timestamp
+ * @mult: current internal period multiplier
+ * @new_mult: new set internal period multiplier (not yet effective)
+ * @period: measured current period of the sensor
+ * @chip_period: accumulator for computing internal chip period
+ */
+struct inv_sensors_timestamp {
+ struct inv_sensors_timestamp_chip chip;
+ uint32_t min_period;
+ uint32_t max_period;
+ struct inv_sensors_timestamp_interval it;
+ int64_t timestamp;
+ uint32_t mult;
+ uint32_t new_mult;
+ uint32_t period;
+ struct inv_sensors_timestamp_acc chip_period;
+};
+
+void inv_sensors_timestamp_init(struct inv_sensors_timestamp *ts,
+ const struct inv_sensors_timestamp_chip *chip);
+
+int inv_sensors_timestamp_update_odr(struct inv_sensors_timestamp *ts,
+ uint32_t period, bool fifo);
+
+void inv_sensors_timestamp_interrupt(struct inv_sensors_timestamp *ts,
+ size_t sample_nb, int64_t timestamp);
+
+static inline int64_t inv_sensors_timestamp_pop(struct inv_sensors_timestamp *ts)
+{
+ ts->timestamp += ts->period;
+ return ts->timestamp;
+}
+
+void inv_sensors_timestamp_apply_odr(struct inv_sensors_timestamp *ts,
+ uint32_t fifo_period, size_t fifo_nb,
+ unsigned int fifo_no);
+
+static inline void inv_sensors_timestamp_reset(struct inv_sensors_timestamp *ts)
+{
+ const struct inv_sensors_timestamp_interval interval_init = {0LL, 0LL};
+
+ ts->it = interval_init;
+ ts->timestamp = 0;
+}
+
+#endif
diff --git a/include/linux/iio/common/ssp_sensors.h b/include/linux/iio/common/ssp_sensors.h
index f4d1b0edb432..06c9b4b563b3 100644
--- a/include/linux/iio/common/ssp_sensors.h
+++ b/include/linux/iio/common/ssp_sensors.h
@@ -1,16 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* Copyright (C) 2014, Samsung Electronics Co. Ltd. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
*/
#ifndef _SSP_SENSORS_H_
#define _SSP_SENSORS_H_
diff --git a/include/linux/iio/common/st_sensors.h b/include/linux/iio/common/st_sensors.h
index 7b0fa8b5c120..f9ae5cdd884f 100644
--- a/include/linux/iio/common/st_sensors.h
+++ b/include/linux/iio/common/st_sensors.h
@@ -1,11 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* STMicroelectronics sensors library driver
*
* Copyright 2012-2013 STMicroelectronics Inc.
*
* Denis Ciocca <denis.ciocca@st.com>
- *
- * Licensed under the GPL-2.
*/
#ifndef ST_SENSORS_H
@@ -14,14 +13,23 @@
#include <linux/i2c.h>
#include <linux/spi/spi.h>
#include <linux/irqreturn.h>
+#include <linux/iio/iio.h>
#include <linux/iio/trigger.h>
#include <linux/bitops.h>
#include <linux/regulator/consumer.h>
+#include <linux/regmap.h>
#include <linux/platform_data/st_sensors_pdata.h>
-#define ST_SENSORS_TX_MAX_LENGTH 2
-#define ST_SENSORS_RX_MAX_LENGTH 6
+#define LSM9DS0_IMU_DEV_NAME "lsm9ds0"
+#define LSM303D_IMU_DEV_NAME "lsm303d"
+
+/*
+ * Buffer size max case: 2bytes per channel, 3 channels in total +
+ * 8bytes timestamp channel (s64)
+ */
+#define ST_SENSORS_MAX_BUFFER_SIZE (ALIGN(2 * 3, sizeof(s64)) + \
+ sizeof(s64))
#define ST_SENSORS_ODR_LIST_MAX 10
#define ST_SENSORS_FULLSCALE_AVL_MAX 10
@@ -40,10 +48,10 @@
#define ST_SENSORS_DEFAULT_STAT_ADDR 0x27
#define ST_SENSORS_MAX_NAME 17
-#define ST_SENSORS_MAX_4WAI 7
+#define ST_SENSORS_MAX_4WAI 8
-#define ST_SENSORS_LSM_CHANNELS(device_type, mask, index, mod, \
- ch2, s, endian, rbits, sbits, addr) \
+#define ST_SENSORS_LSM_CHANNELS_EXT(device_type, mask, index, mod, \
+ ch2, s, endian, rbits, sbits, addr, ext) \
{ \
.type = device_type, \
.modified = mod, \
@@ -59,8 +67,14 @@
.storagebits = sbits, \
.endianness = endian, \
}, \
+ .ext_info = ext, \
}
+#define ST_SENSORS_LSM_CHANNELS(device_type, mask, index, mod, \
+ ch2, s, endian, rbits, sbits, addr) \
+ ST_SENSORS_LSM_CHANNELS_EXT(device_type, mask, index, mod, \
+ ch2, s, endian, rbits, sbits, addr, NULL)
+
#define ST_SENSORS_DEV_ATTR_SAMP_FREQ_AVAIL() \
IIO_DEV_ATTR_SAMP_FREQ_AVAIL( \
st_sensors_sysfs_sampling_frequency_avail)
@@ -131,28 +145,39 @@ struct st_sensor_das {
};
/**
+ * struct st_sensor_int_drdy - ST sensor device drdy line parameters
+ * @addr: address of INT drdy register.
+ * @mask: mask to enable drdy line.
+ * @addr_od: address to enable/disable Open Drain on the INT line.
+ * @mask_od: mask to enable/disable Open Drain on the INT line.
+ */
+struct st_sensor_int_drdy {
+ u8 addr;
+ u8 mask;
+ u8 addr_od;
+ u8 mask_od;
+};
+
+/**
* struct st_sensor_data_ready_irq - ST sensor device data-ready interrupt
- * @addr: address of the register.
- * @mask_int1: mask to enable/disable IRQ on INT1 pin.
- * @mask_int2: mask to enable/disable IRQ on INT2 pin.
+ * struct int1 - data-ready configuration register for INT1 pin.
+ * struct int2 - data-ready configuration register for INT2 pin.
* @addr_ihl: address to enable/disable active low on the INT lines.
* @mask_ihl: mask to enable/disable active low on the INT lines.
- * @addr_od: address to enable/disable Open Drain on the INT lines.
- * @mask_od: mask to enable/disable Open Drain on the INT lines.
- * @addr_stat_drdy: address to read status of DRDY (data ready) interrupt
+ * struct stat_drdy - status register of DRDY (data ready) interrupt.
* struct ig1 - represents the Interrupt Generator 1 of sensors.
* @en_addr: address of the enable ig1 register.
* @en_mask: mask to write the on/off value for enable.
*/
struct st_sensor_data_ready_irq {
- u8 addr;
- u8 mask_int1;
- u8 mask_int2;
+ struct st_sensor_int_drdy int1;
+ struct st_sensor_int_drdy int2;
u8 addr_ihl;
u8 mask_ihl;
- u8 addr_od;
- u8 mask_od;
- u8 addr_stat_drdy;
+ struct {
+ u8 addr;
+ u8 mask;
+ } stat_drdy;
struct {
u8 en_addr;
u8 en_mask;
@@ -160,36 +185,6 @@ struct st_sensor_data_ready_irq {
};
/**
- * struct st_sensor_transfer_buffer - ST sensor device I/O buffer
- * @buf_lock: Mutex to protect rx and tx buffers.
- * @tx_buf: Buffer used by SPI transfer function to send data to the sensors.
- * This buffer is used to avoid DMA not-aligned issue.
- * @rx_buf: Buffer used by SPI transfer to receive data from sensors.
- * This buffer is used to avoid DMA not-aligned issue.
- */
-struct st_sensor_transfer_buffer {
- struct mutex buf_lock;
- u8 rx_buf[ST_SENSORS_RX_MAX_LENGTH];
- u8 tx_buf[ST_SENSORS_TX_MAX_LENGTH] ____cacheline_aligned;
-};
-
-/**
- * struct st_sensor_transfer_function - ST sensor device I/O function
- * @read_byte: Function used to read one byte.
- * @write_byte: Function used to write one byte.
- * @read_multiple_byte: Function used to read multiple byte.
- */
-struct st_sensor_transfer_function {
- int (*read_byte) (struct st_sensor_transfer_buffer *tb,
- struct device *dev, u8 reg_addr, u8 *res_byte);
- int (*write_byte) (struct st_sensor_transfer_buffer *tb,
- struct device *dev, u8 reg_addr, u8 data);
- int (*read_multiple_byte) (struct st_sensor_transfer_buffer *tb,
- struct device *dev, u8 reg_addr, int len, u8 *data,
- bool multiread_bit);
-};
-
-/**
* struct st_sensor_settings - ST specific sensor settings
* @wai: Contents of WhoAmI register.
* @wai_addr: The address of WhoAmI register.
@@ -226,53 +221,46 @@ struct st_sensor_settings {
/**
* struct st_sensor_data - ST sensor device status
- * @dev: Pointer to instance of struct device (I2C or SPI).
* @trig: The trigger in use by the core driver.
+ * @mount_matrix: The mounting matrix of the sensor.
* @sensor_settings: Pointer to the specific sensor settings in use.
* @current_fullscale: Maximum range of measure by the sensor.
- * @vdd: Pointer to sensor's Vdd power supply
- * @vdd_io: Pointer to sensor's Vdd-IO power supply
+ * @regmap: Pointer to specific sensor regmap configuration.
* @enabled: Status of the sensor (false->off, true->on).
- * @multiread_bit: Use or not particular bit for [I2C/SPI] multiread.
- * @buffer_data: Data used by buffer part.
* @odr: Output data rate of the sensor [Hz].
* num_data_channels: Number of data channels used in buffer.
* @drdy_int_pin: Redirect DRDY on pin 1 (1) or pin 2 (2).
* @int_pin_open_drain: Set the interrupt/DRDY to open drain.
- * @get_irq_data_ready: Function to get the IRQ used for data ready signal.
- * @tf: Transfer function structure used by I/O operations.
- * @tb: Transfer buffers and mutex used by I/O operations.
+ * @irq: the IRQ number.
* @edge_irq: the IRQ triggers on edges and need special handling.
* @hw_irq_trigger: if we're using the hardware interrupt on the sensor.
* @hw_timestamp: Latest timestamp from the interrupt handler, when in use.
+ * @buffer_data: Data used by buffer part.
+ * @odr_lock: Local lock for preventing concurrent ODR accesses/changes
*/
struct st_sensor_data {
- struct device *dev;
struct iio_trigger *trig;
+ struct iio_mount_matrix mount_matrix;
struct st_sensor_settings *sensor_settings;
struct st_sensor_fullscale_avl *current_fullscale;
- struct regulator *vdd;
- struct regulator *vdd_io;
+ struct regmap *regmap;
bool enabled;
- bool multiread_bit;
-
- char *buffer_data;
unsigned int odr;
unsigned int num_data_channels;
u8 drdy_int_pin;
bool int_pin_open_drain;
-
- unsigned int (*get_irq_data_ready) (struct iio_dev *indio_dev);
-
- const struct st_sensor_transfer_function *tf;
- struct st_sensor_transfer_buffer tb;
+ int irq;
bool edge_irq;
bool hw_irq_trigger;
s64 hw_timestamp;
+
+ struct mutex odr_lock;
+
+ char buffer_data[ST_SENSORS_MAX_BUFFER_SIZE] __aligned(IIO_DMA_MINALIGN);
};
#ifdef CONFIG_IIO_BUFFER
@@ -283,7 +271,6 @@ irqreturn_t st_sensors_trigger_handler(int irq, void *p);
int st_sensors_allocate_trigger(struct iio_dev *indio_dev,
const struct iio_trigger_ops *trigger_ops);
-void st_sensors_deallocate_trigger(struct iio_dev *indio_dev);
int st_sensors_validate_device(struct iio_trigger *trig,
struct iio_dev *indio_dev);
#else
@@ -292,10 +279,6 @@ static inline int st_sensors_allocate_trigger(struct iio_dev *indio_dev,
{
return 0;
}
-static inline void st_sensors_deallocate_trigger(struct iio_dev *indio_dev)
-{
- return;
-}
#define st_sensors_validate_device NULL
#endif
@@ -308,8 +291,6 @@ int st_sensors_set_axis_enable(struct iio_dev *indio_dev, u8 axis_enable);
int st_sensors_power_enable(struct iio_dev *indio_dev);
-void st_sensors_power_disable(struct iio_dev *indio_dev);
-
int st_sensors_debugfs_reg_access(struct iio_dev *indio_dev,
unsigned reg, unsigned writeval,
unsigned *readval);
@@ -323,8 +304,11 @@ int st_sensors_set_fullscale_by_gain(struct iio_dev *indio_dev, int scale);
int st_sensors_read_info_raw(struct iio_dev *indio_dev,
struct iio_chan_spec const *ch, int *val);
-int st_sensors_check_device_support(struct iio_dev *indio_dev,
- int num_sensors_list, const struct st_sensor_settings *sensor_settings);
+int st_sensors_get_settings_index(const char *name,
+ const struct st_sensor_settings *list,
+ const int list_length);
+
+int st_sensors_verify_id(struct iio_dev *indio_dev);
ssize_t st_sensors_sysfs_sampling_frequency_avail(struct device *dev,
struct device_attribute *attr, char *buf);
@@ -332,16 +316,22 @@ ssize_t st_sensors_sysfs_sampling_frequency_avail(struct device *dev,
ssize_t st_sensors_sysfs_scale_avail(struct device *dev,
struct device_attribute *attr, char *buf);
-#ifdef CONFIG_OF
-void st_sensors_of_name_probe(struct device *dev,
- const struct of_device_id *match,
- char *name, int len);
-#else
-static inline void st_sensors_of_name_probe(struct device *dev,
- const struct of_device_id *match,
- char *name, int len)
-{
-}
-#endif
+void st_sensors_dev_name_probe(struct device *dev, char *name, int len);
+
+/* Accelerometer */
+const struct st_sensor_settings *st_accel_get_settings(const char *name);
+int st_accel_common_probe(struct iio_dev *indio_dev);
+
+/* Gyroscope */
+const struct st_sensor_settings *st_gyro_get_settings(const char *name);
+int st_gyro_common_probe(struct iio_dev *indio_dev);
+
+/* Magnetometer */
+const struct st_sensor_settings *st_magn_get_settings(const char *name);
+int st_magn_common_probe(struct iio_dev *indio_dev);
+
+/* Pressure */
+const struct st_sensor_settings *st_press_get_settings(const char *name);
+int st_press_common_probe(struct iio_dev *indio_dev);
#endif /* ST_SENSORS_H */
diff --git a/include/linux/iio/common/st_sensors_i2c.h b/include/linux/iio/common/st_sensors_i2c.h
index 0a2c25e06d1f..5f15cf01036c 100644
--- a/include/linux/iio/common/st_sensors_i2c.h
+++ b/include/linux/iio/common/st_sensors_i2c.h
@@ -1,11 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* STMicroelectronics sensors i2c library driver
*
* Copyright 2012-2013 STMicroelectronics Inc.
*
* Denis Ciocca <denis.ciocca@st.com>
- *
- * Licensed under the GPL-2.
*/
#ifndef ST_SENSORS_I2C_H
@@ -13,18 +12,8 @@
#include <linux/i2c.h>
#include <linux/iio/common/st_sensors.h>
-#include <linux/of.h>
-
-void st_sensors_i2c_configure(struct iio_dev *indio_dev,
- struct i2c_client *client, struct st_sensor_data *sdata);
-#ifdef CONFIG_ACPI
-int st_sensors_match_acpi_device(struct device *dev);
-#else
-static inline int st_sensors_match_acpi_device(struct device *dev)
-{
- return -ENODEV;
-}
-#endif
+int st_sensors_i2c_configure(struct iio_dev *indio_dev,
+ struct i2c_client *client);
#endif /* ST_SENSORS_I2C_H */
diff --git a/include/linux/iio/common/st_sensors_spi.h b/include/linux/iio/common/st_sensors_spi.h
index d964a3563dc6..90b25f087f06 100644
--- a/include/linux/iio/common/st_sensors_spi.h
+++ b/include/linux/iio/common/st_sensors_spi.h
@@ -1,11 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* STMicroelectronics sensors spi library driver
*
* Copyright 2012-2013 STMicroelectronics Inc.
*
* Denis Ciocca <denis.ciocca@st.com>
- *
- * Licensed under the GPL-2.
*/
#ifndef ST_SENSORS_SPI_H
@@ -14,7 +13,7 @@
#include <linux/spi/spi.h>
#include <linux/iio/common/st_sensors.h>
-void st_sensors_spi_configure(struct iio_dev *indio_dev,
- struct spi_device *spi, struct st_sensor_data *sdata);
+int st_sensors_spi_configure(struct iio_dev *indio_dev,
+ struct spi_device *spi);
#endif /* ST_SENSORS_SPI_H */
diff --git a/include/linux/iio/configfs.h b/include/linux/iio/configfs.h
index 93befd67c15c..84cab3f47e80 100644
--- a/include/linux/iio/configfs.h
+++ b/include/linux/iio/configfs.h
@@ -1,11 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Industrial I/O configfs support
*
* Copyright (c) 2015 Intel Corporation
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
*/
#ifndef __IIO_CONFIGFS
#define __IIO_CONFIGFS
diff --git a/include/linux/iio/consumer.h b/include/linux/iio/consumer.h
index 5e347a9805fd..5039558267e4 100644
--- a/include/linux/iio/consumer.h
+++ b/include/linux/iio/consumer.h
@@ -1,11 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Industrial I/O in kernel consumer interface
*
* Copyright (c) 2011 Jonathan Cameron
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
*/
#ifndef _IIO_INKERN_CONSUMER_H_
#define _IIO_INKERN_CONSUMER_H_
@@ -16,6 +13,7 @@
struct iio_dev;
struct iio_chan_spec;
struct device;
+struct fwnode_handle;
/**
* struct iio_channel - everything needed for a consumer to use a channel
@@ -67,15 +65,6 @@ void iio_channel_release(struct iio_channel *chan);
struct iio_channel *devm_iio_channel_get(struct device *dev,
const char *consumer_channel);
/**
- * devm_iio_channel_release() - Resource managed version of
- * iio_channel_release().
- * @dev: Pointer to consumer device for which resource
- * is allocared.
- * @chan: The channel to be released.
- */
-void devm_iio_channel_release(struct device *dev, struct iio_channel *chan);
-
-/**
* iio_channel_get_all() - get all channels associated with a client
* @dev: Pointer to consumer device.
*
@@ -110,19 +99,40 @@ void iio_channel_release_all(struct iio_channel *chan);
struct iio_channel *devm_iio_channel_get_all(struct device *dev);
/**
- * devm_iio_channel_release_all() - Resource managed version of
- * iio_channel_release_all().
- * @dev: Pointer to consumer device for which resource
- * is allocared.
- * @chan: Array channel to be released.
+ * fwnode_iio_channel_get_by_name() - get description of all that is needed to access channel.
+ * @fwnode: Pointer to consumer Firmware node
+ * @consumer_channel: Unique name to identify the channel on the consumer
+ * side. This typically describes the channels use within
+ * the consumer. E.g. 'battery_voltage'
+ */
+struct iio_channel *fwnode_iio_channel_get_by_name(struct fwnode_handle *fwnode,
+ const char *name);
+
+/**
+ * devm_fwnode_iio_channel_get_by_name() - Resource managed version of
+ * fwnode_iio_channel_get_by_name().
+ * @dev: Pointer to consumer device.
+ * @fwnode: Pointer to consumer Firmware node
+ * @consumer_channel: Unique name to identify the channel on the consumer
+ * side. This typically describes the channels use within
+ * the consumer. E.g. 'battery_voltage'
+ *
+ * Returns a pointer to negative errno if it is not able to get the iio channel
+ * otherwise returns valid pointer for iio channel.
+ *
+ * The allocated iio channel is automatically released when the device is
+ * unbound.
*/
-void devm_iio_channel_release_all(struct device *dev, struct iio_channel *chan);
+struct iio_channel *devm_fwnode_iio_channel_get_by_name(struct device *dev,
+ struct fwnode_handle *fwnode,
+ const char *consumer_channel);
struct iio_cb_buffer;
/**
* iio_channel_get_all_cb() - register callback for triggered capture
* @dev: Pointer to client device.
- * @cb: Callback function.
+ * @cb: Callback function. Must be safe to call from any context
+ * (e.g. must not sleep).
* @private: Private data passed to callback.
*
* NB right now we have no ability to mux data from multiple devices.
@@ -134,6 +144,17 @@ struct iio_cb_buffer *iio_channel_get_all_cb(struct device *dev,
void *private),
void *private);
/**
+ * iio_channel_cb_set_buffer_watermark() - set the buffer watermark.
+ * @cb_buffer: The callback buffer from whom we want the channel
+ * information.
+ * @watermark: buffer watermark in bytes.
+ *
+ * This function allows to configure the buffer watermark.
+ */
+int iio_channel_cb_set_buffer_watermark(struct iio_cb_buffer *cb_buffer,
+ size_t watermark);
+
+/**
* iio_channel_release_all_cb() - release and unregister the callback.
* @cb_buffer: The callback buffer that was allocated.
*/
@@ -181,8 +202,9 @@ struct iio_dev
* @chan: The channel being queried.
* @val: Value read back.
*
- * Note raw reads from iio channels are in adc counts and hence
- * scale will need to be applied if standard units required.
+ * Note, if standard units are required, raw reads from iio channels
+ * need the offset (default 0) and scale (default 1) to be applied
+ * as (raw + offset) * scale.
*/
int iio_read_channel_raw(struct iio_channel *chan,
int *val);
@@ -192,8 +214,9 @@ int iio_read_channel_raw(struct iio_channel *chan,
* @chan: The channel being queried.
* @val: Value read back.
*
- * Note raw reads from iio channels are in adc counts and hence
- * scale will need to be applied if standard units required.
+ * Note, if standard units are required, raw reads from iio channels
+ * need the offset (default 0) and scale (default 1) to be applied
+ * as (raw + offset) * scale.
*
* In opposit to the normal iio_read_channel_raw this function
* returns the average of multiple reads.
@@ -216,12 +239,54 @@ int iio_read_channel_average_raw(struct iio_channel *chan, int *val);
int iio_read_channel_processed(struct iio_channel *chan, int *val);
/**
+ * iio_read_channel_processed_scale() - read and scale a processed value
+ * @chan: The channel being queried.
+ * @val: Value read back.
+ * @scale: Scale factor to apply during the conversion
+ *
+ * Returns an error code or 0.
+ *
+ * This function will read a processed value from a channel. This will work
+ * like @iio_read_channel_processed() but also scale with an additional
+ * scale factor while attempting to minimize any precision loss.
+ */
+int iio_read_channel_processed_scale(struct iio_channel *chan, int *val,
+ unsigned int scale);
+
+/**
+ * iio_write_channel_attribute() - Write values to the device attribute.
+ * @chan: The channel being queried.
+ * @val: Value being written.
+ * @val2: Value being written.val2 use depends on attribute type.
+ * @attribute: info attribute to be read.
+ *
+ * Returns an error code or 0.
+ */
+int iio_write_channel_attribute(struct iio_channel *chan, int val,
+ int val2, enum iio_chan_info_enum attribute);
+
+/**
+ * iio_read_channel_attribute() - Read values from the device attribute.
+ * @chan: The channel being queried.
+ * @val: Value being written.
+ * @val2: Value being written.Val2 use depends on attribute type.
+ * @attribute: info attribute to be written.
+ *
+ * Returns an error code if failed. Else returns a description of what is in val
+ * and val2, such as IIO_VAL_INT_PLUS_MICRO telling us we have a value of val
+ * + val2/1e6
+ */
+int iio_read_channel_attribute(struct iio_channel *chan, int *val,
+ int *val2, enum iio_chan_info_enum attribute);
+
+/**
* iio_write_channel_raw() - write to a given channel
* @chan: The channel being queried.
* @val: Value being written.
*
- * Note raw writes to iio channels are in dac counts and hence
- * scale will need to be applied if standard units required.
+ * Note that for raw writes to iio channels, if the value provided is
+ * in standard units, the affect of the scale and offset must be removed
+ * as (value / scale) - offset.
*/
int iio_write_channel_raw(struct iio_channel *chan, int val);
@@ -231,12 +296,25 @@ int iio_write_channel_raw(struct iio_channel *chan, int val);
* @chan: The channel being queried.
* @val: Value read back.
*
- * Note raw reads from iio channels are in adc counts and hence
- * scale will need to be applied if standard units are required.
+ * Note, if standard units are required, raw reads from iio channels
+ * need the offset (default 0) and scale (default 1) to be applied
+ * as (raw + offset) * scale.
*/
int iio_read_max_channel_raw(struct iio_channel *chan, int *val);
/**
+ * iio_read_min_channel_raw() - read minimum available raw value from a given
+ * channel, i.e. the minimum possible value.
+ * @chan: The channel being queried.
+ * @val: Value read back.
+ *
+ * Note, if standard units are required, raw reads from iio channels
+ * need the offset (default 0) and scale (default 1) to be applied
+ * as (raw + offset) * scale.
+ */
+int iio_read_min_channel_raw(struct iio_channel *chan, int *val);
+
+/**
* iio_read_avail_channel_raw() - read available raw values from a given channel
* @chan: The channel being queried.
* @vals: Available values read back.
@@ -247,13 +325,28 @@ int iio_read_max_channel_raw(struct iio_channel *chan, int *val);
* For ranges, three vals are always returned; min, step and max.
* For lists, all the possible values are enumerated.
*
- * Note raw available values from iio channels are in adc counts and
- * hence scale will need to be applied if standard units are required.
+ * Note, if standard units are required, raw available values from iio
+ * channels need the offset (default 0) and scale (default 1) to be applied
+ * as (raw + offset) * scale.
*/
int iio_read_avail_channel_raw(struct iio_channel *chan,
const int **vals, int *length);
/**
+ * iio_read_avail_channel_attribute() - read available channel attribute values
+ * @chan: The channel being queried.
+ * @vals: Available values read back.
+ * @type: Type of values read back.
+ * @length: Number of entries in vals.
+ * @attribute: info attribute to be read back.
+ *
+ * Returns an error code, IIO_AVAIL_RANGE or IIO_AVAIL_LIST.
+ */
+int iio_read_avail_channel_attribute(struct iio_channel *chan,
+ const int **vals, int *type, int *length,
+ enum iio_chan_info_enum attribute);
+
+/**
* iio_get_channel_type() - get the type of a channel
* @channel: The channel being queried.
* @type: The type of the channel.
@@ -290,6 +383,24 @@ int iio_read_channel_scale(struct iio_channel *chan, int *val,
int *val2);
/**
+ * iio_multiply_value() - Multiply an IIO value
+ * @result: Destination pointer for the multiplication result
+ * @multiplier: Multiplier.
+ * @type: One of the IIO_VAL_* constants. This decides how the @val and
+ * @val2 parameters are interpreted.
+ * @val: Value being multiplied.
+ * @val2: Value being multiplied. @val2 use depends on type.
+ *
+ * Multiply an IIO value with a s64 multiplier storing the result as
+ * IIO_VAL_INT. This is typically used for scaling.
+ *
+ * Returns:
+ * IIO_VAL_INT on success or a negative error-number on failure.
+ */
+int iio_multiply_value(int *result, s64 multiplier,
+ unsigned int type, int val, int val2);
+
+/**
* iio_convert_raw_to_processed() - Converts a raw value to a processed value
* @chan: The channel being queried
* @raw: The raw IIO to convert
@@ -326,7 +437,7 @@ unsigned int iio_get_channel_ext_info_count(struct iio_channel *chan);
* @chan: The channel being queried.
* @attr: The ext_info attribute to read.
* @buf: Where to store the attribute value. Assumed to hold
- * at least PAGE_SIZE bytes.
+ * at least PAGE_SIZE bytes and to be aligned at PAGE_SIZE.
*
* Returns the number of bytes written to buf (perhaps w/o zero termination;
* it need not even be a string), or an error code.
@@ -349,4 +460,14 @@ ssize_t iio_read_channel_ext_info(struct iio_channel *chan,
ssize_t iio_write_channel_ext_info(struct iio_channel *chan, const char *attr,
const char *buf, size_t len);
+/**
+ * iio_read_channel_label() - read label for a given channel
+ * @chan: The channel being queried.
+ * @buf: Where to store the attribute value. Assumed to hold
+ * at least PAGE_SIZE bytes and to be aligned at PAGE_SIZE.
+ *
+ * Returns the number of bytes written to buf, or an error code.
+ */
+ssize_t iio_read_channel_label(struct iio_channel *chan, char *buf);
+
#endif
diff --git a/include/linux/iio/dac/ad5421.h b/include/linux/iio/dac/ad5421.h
index 8fd8f057a890..d8ee9a7f8a6a 100644
--- a/include/linux/iio/dac/ad5421.h
+++ b/include/linux/iio/dac/ad5421.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __IIO_DAC_AD5421_H__
#define __IIO_DAC_AD5421_H__
diff --git a/include/linux/iio/dac/ad5504.h b/include/linux/iio/dac/ad5504.h
index 43895376a9ca..9f23c90486ee 100644
--- a/include/linux/iio/dac/ad5504.h
+++ b/include/linux/iio/dac/ad5504.h
@@ -1,9 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* AD5504 SPI DAC driver
*
* Copyright 2011 Analog Devices Inc.
- *
- * Licensed under the GPL-2.
*/
#ifndef SPI_AD5504_H_
diff --git a/include/linux/iio/dac/ad5791.h b/include/linux/iio/dac/ad5791.h
index 45ee281c6660..02966553f71e 100644
--- a/include/linux/iio/dac/ad5791.h
+++ b/include/linux/iio/dac/ad5791.h
@@ -1,9 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* AD5791 SPI DAC driver
*
* Copyright 2011 Analog Devices Inc.
- *
- * Licensed under the GPL-2.
*/
#ifndef SPI_AD5791_H_
diff --git a/include/linux/iio/dac/max517.h b/include/linux/iio/dac/max517.h
index 7668716cd73c..4923645a18fd 100644
--- a/include/linux/iio/dac/max517.h
+++ b/include/linux/iio/dac/max517.h
@@ -1,9 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* MAX517 DAC driver
*
* Copyright 2011 Roland Stigge <stigge@antcom.de>
- *
- * Licensed under the GPL-2 or later.
*/
#ifndef IIO_DAC_MAX517_H_
#define IIO_DAC_MAX517_H_
diff --git a/include/linux/iio/dac/mcp4725.h b/include/linux/iio/dac/mcp4725.h
index 628b2cf54c50..1f7e53c506b6 100644
--- a/include/linux/iio/dac/mcp4725.h
+++ b/include/linux/iio/dac/mcp4725.h
@@ -1,9 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* MCP4725 DAC driver
*
* Copyright (C) 2012 Peter Meerwald <pmeerw@pmeerw.net>
- *
- * Licensed under the GPL-2 or later.
*/
#ifndef IIO_DAC_MCP4725_H_
@@ -16,7 +15,7 @@
* @vref_buffered: Controls buffering of the external reference voltage.
*
* Vref related settings are available only on MCP4756. See
- * Documentation/devicetree/bindings/iio/dac/mcp4725.txt for more information.
+ * Documentation/devicetree/bindings/iio/dac/microchip,mcp4725.yaml for more information.
*/
struct mcp4725_platform_data {
bool use_vref;
diff --git a/include/linux/iio/driver.h b/include/linux/iio/driver.h
index 7dfb10ee2669..7f8b55551ed0 100644
--- a/include/linux/iio/driver.h
+++ b/include/linux/iio/driver.h
@@ -1,16 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Industrial I/O in kernel access map interface.
*
* Copyright (c) 2011 Jonathan Cameron
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
*/
#ifndef _IIO_INKERN_H_
#define _IIO_INKERN_H_
+struct device;
+struct iio_dev;
struct iio_map;
/**
@@ -19,7 +18,7 @@ struct iio_map;
* @map: array of mappings specifying association of channel with client
*/
int iio_map_array_register(struct iio_dev *indio_dev,
- struct iio_map *map);
+ const struct iio_map *map);
/**
* iio_map_array_unregister() - tell the core to remove consumer mappings for
@@ -28,4 +27,18 @@ int iio_map_array_register(struct iio_dev *indio_dev,
*/
int iio_map_array_unregister(struct iio_dev *indio_dev);
+/**
+ * devm_iio_map_array_register - device-managed version of iio_map_array_register
+ * @dev: Device object to which to bind the unwinding of this registration
+ * @indio_dev: Pointer to the iio_dev structure
+ * @maps: Pointer to an IIO map object which is to be registered to this IIO device
+ *
+ * This function will call iio_map_array_register() to register an IIO map object
+ * and will also hook a callback to the iio_map_array_unregister() function to
+ * handle de-registration of the IIO map object when the device's refcount goes to
+ * zero.
+ */
+int devm_iio_map_array_register(struct device *dev, struct iio_dev *indio_dev,
+ const struct iio_map *maps);
+
#endif
diff --git a/include/linux/iio/events.h b/include/linux/iio/events.h
index 8ad87d1c5340..72062a0c7c87 100644
--- a/include/linux/iio/events.h
+++ b/include/linux/iio/events.h
@@ -1,10 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/* The industrial I/O - event passing to userspace
*
* Copyright (c) 2008-2011 Jonathan Cameron
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
*/
#ifndef _IIO_EVENTS_H_
#define _IIO_EVENTS_H_
@@ -13,7 +10,7 @@
#include <uapi/linux/iio/events.h>
/**
- * IIO_EVENT_CODE() - create event identifier
+ * _IIO_EVENT_CODE() - create event identifier
* @chan_type: Type of the channel. Should be one of enum iio_chan_type.
* @diff: Whether the event is for an differential channel or not.
* @modifier: Modifier for the channel. Should be one of enum iio_modifier.
@@ -22,10 +19,13 @@
* @chan: Channel number for non-differential channels.
* @chan1: First channel number for differential channels.
* @chan2: Second channel number for differential channels.
+ *
+ * Drivers should use the specialized macros below instead of using this one
+ * directly.
*/
-#define IIO_EVENT_CODE(chan_type, diff, modifier, direction, \
- type, chan, chan1, chan2) \
+#define _IIO_EVENT_CODE(chan_type, diff, modifier, direction, \
+ type, chan, chan1, chan2) \
(((u64)type << 56) | ((u64)diff << 55) | \
((u64)direction << 48) | ((u64)modifier << 40) | \
((u64)chan_type << 32) | (((u16)chan2) << 16) | ((u16)chan1) | \
@@ -33,7 +33,8 @@
/**
- * IIO_MOD_EVENT_CODE() - create event identifier for modified channels
+ * IIO_MOD_EVENT_CODE() - create event identifier for modified (non
+ * differential) channels
* @chan_type: Type of the channel. Should be one of enum iio_chan_type.
* @number: Channel number.
* @modifier: Modifier for the channel. Should be one of enum iio_modifier.
@@ -43,10 +44,11 @@
#define IIO_MOD_EVENT_CODE(chan_type, number, modifier, \
type, direction) \
- IIO_EVENT_CODE(chan_type, 0, modifier, direction, type, number, 0, 0)
+ _IIO_EVENT_CODE(chan_type, 0, modifier, direction, type, number, 0, 0)
/**
- * IIO_UNMOD_EVENT_CODE() - create event identifier for unmodified channels
+ * IIO_UNMOD_EVENT_CODE() - create event identifier for unmodified (non
+ * differential) channels
* @chan_type: Type of the channel. Should be one of enum iio_chan_type.
* @number: Channel number.
* @type: Type of the event. Should be one of enum iio_event_type.
@@ -54,6 +56,18 @@
*/
#define IIO_UNMOD_EVENT_CODE(chan_type, number, type, direction) \
- IIO_EVENT_CODE(chan_type, 0, 0, direction, type, number, 0, 0)
+ _IIO_EVENT_CODE(chan_type, 0, 0, direction, type, number, 0, 0)
+
+/**
+ * IIO_DIFF_EVENT_CODE() - create event identifier for differential channels
+ * @chan_type: Type of the channel. Should be one of enum iio_chan_type.
+ * @chan1: First channel number for differential channels.
+ * @chan2: Second channel number for differential channels.
+ * @type: Type of the event. Should be one of enum iio_event_type.
+ * @direction: Direction of the event. One of enum iio_event_direction.
+ */
+
+#define IIO_DIFF_EVENT_CODE(chan_type, chan1, chan2, type, direction) \
+ _IIO_EVENT_CODE(chan_type, 1, 0, direction, type, 0, chan1, chan2)
#endif
diff --git a/include/linux/iio/frequency/ad9523.h b/include/linux/iio/frequency/ad9523.h
index 12ce3ee427fd..ff22a0ac15f5 100644
--- a/include/linux/iio/frequency/ad9523.h
+++ b/include/linux/iio/frequency/ad9523.h
@@ -1,9 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* AD9523 SPI Low Jitter Clock Generator
*
* Copyright 2012 Analog Devices Inc.
- *
- * Licensed under the GPL-2.
*/
#ifndef IIO_FREQUENCY_AD9523_H_
@@ -129,8 +128,8 @@ enum cpole1_capacitor {
* @pll2_ndiv_b_cnt: PLL2 Feedback N-divider, B Counter, range 0..63.
* @pll2_freq_doubler_en: PLL2 frequency doubler enable.
* @pll2_r2_div: PLL2 R2 divider, range 0..31.
- * @pll2_vco_diff_m1: VCO1 divider, range 3..5.
- * @pll2_vco_diff_m2: VCO2 divider, range 3..5.
+ * @pll2_vco_div_m1: VCO1 divider, range 3..5.
+ * @pll2_vco_div_m2: VCO2 divider, range 3..5.
* @rpole2: PLL2 loop filter Rpole resistor value.
* @rzero: PLL2 loop filter Rzero resistor value.
* @cpole1: PLL2 loop filter Cpole capacitor value.
@@ -176,8 +175,8 @@ struct ad9523_platform_data {
unsigned char pll2_ndiv_b_cnt;
bool pll2_freq_doubler_en;
unsigned char pll2_r2_div;
- unsigned char pll2_vco_diff_m1; /* 3..5 */
- unsigned char pll2_vco_diff_m2; /* 3..5 */
+ unsigned char pll2_vco_div_m1; /* 3..5 */
+ unsigned char pll2_vco_div_m2; /* 3..5 */
/* Loop Filter PLL2 */
enum rpole2_resistor rpole2;
diff --git a/include/linux/iio/frequency/adf4350.h b/include/linux/iio/frequency/adf4350.h
index ffd8c8f90928..ce2086f97e3f 100644
--- a/include/linux/iio/frequency/adf4350.h
+++ b/include/linux/iio/frequency/adf4350.h
@@ -1,9 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* ADF4350/ADF4351 SPI PLL driver
*
* Copyright 2012-2013 Analog Devices Inc.
- *
- * Licensed under the GPL-2.
*/
#ifndef IIO_PLL_ADF4350_H_
@@ -52,7 +51,7 @@
/* REG3 Bit Definitions */
#define ADF4350_REG3_12BIT_CLKDIV(x) ((x) << 3)
-#define ADF4350_REG3_12BIT_CLKDIV_MODE(x) ((x) << 16)
+#define ADF4350_REG3_12BIT_CLKDIV_MODE(x) ((x) << 15)
#define ADF4350_REG3_12BIT_CSR_EN (1 << 18)
#define ADF4351_REG3_CHARGE_CANCELLATION_EN (1 << 21)
#define ADF4351_REG3_ANTI_BACKLASH_3ns_EN (1 << 22)
@@ -104,9 +103,6 @@
* @r2_user_settings: User defined settings for ADF4350/1 REGISTER_2.
* @r3_user_settings: User defined settings for ADF4350/1 REGISTER_3.
* @r4_user_settings: User defined settings for ADF4350/1 REGISTER_4.
- * @gpio_lock_detect: Optional, if set with a valid GPIO number,
- * pll lock state is tested upon read.
- * If not used - set to -1.
*/
struct adf4350_platform_data {
@@ -122,7 +118,6 @@ struct adf4350_platform_data {
unsigned r2_user_settings;
unsigned r3_user_settings;
unsigned r4_user_settings;
- int gpio_lock_detect;
};
#endif /* IIO_PLL_ADF4350_H_ */
diff --git a/include/linux/iio/gyro/itg3200.h b/include/linux/iio/gyro/itg3200.h
index 2a820850f284..74b6d1cadc86 100644
--- a/include/linux/iio/gyro/itg3200.h
+++ b/include/linux/iio/gyro/itg3200.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* itg3200.h -- support InvenSense ITG3200
* Digital 3-Axis Gyroscope driver
@@ -5,10 +6,6 @@
* Copyright (c) 2011 Christian Strobel <christian.strobel@iis.fraunhofer.de>
* Copyright (c) 2011 Manuel Stahl <manuel.stahl@iis.fraunhofer.de>
* Copyright (c) 2012 Thorsten Nowak <thorsten.nowak@iis.fraunhofer.de>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef I2C_ITG3200_H_
@@ -104,6 +101,9 @@
struct itg3200 {
struct i2c_client *i2c;
struct iio_trigger *trig;
+ struct iio_mount_matrix orientation;
+ /* lock to protect against multiple access to the device */
+ struct mutex lock;
};
enum ITG3200_SCAN_INDEX {
diff --git a/include/linux/iio/hw-consumer.h b/include/linux/iio/hw-consumer.h
new file mode 100644
index 000000000000..e8255c2e33bc
--- /dev/null
+++ b/include/linux/iio/hw-consumer.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Industrial I/O in kernel hardware consumer interface
+ *
+ * Copyright 2017 Analog Devices Inc.
+ * Author: Lars-Peter Clausen <lars@metafoo.de>
+ */
+
+#ifndef LINUX_IIO_HW_CONSUMER_H
+#define LINUX_IIO_HW_CONSUMER_H
+
+struct iio_hw_consumer;
+
+struct iio_hw_consumer *iio_hw_consumer_alloc(struct device *dev);
+void iio_hw_consumer_free(struct iio_hw_consumer *hwc);
+struct iio_hw_consumer *devm_iio_hw_consumer_alloc(struct device *dev);
+int iio_hw_consumer_enable(struct iio_hw_consumer *hwc);
+void iio_hw_consumer_disable(struct iio_hw_consumer *hwc);
+
+#endif
diff --git a/include/linux/iio/iio-gts-helper.h b/include/linux/iio/iio-gts-helper.h
new file mode 100644
index 000000000000..66f830ab9b49
--- /dev/null
+++ b/include/linux/iio/iio-gts-helper.h
@@ -0,0 +1,213 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* gain-time-scale conversion helpers for IIO light sensors
+ *
+ * Copyright (c) 2023 Matti Vaittinen <mazziesaccount@gmail.com>
+ */
+
+#ifndef __IIO_GTS_HELPER__
+#define __IIO_GTS_HELPER__
+
+#include <linux/types.h>
+
+struct device;
+
+/**
+ * struct iio_gain_sel_pair - gain - selector values
+ *
+ * In many cases devices like light sensors allow setting signal amplification
+ * (gain) using a register interface. This structure describes amplification
+ * and corresponding selector (register value)
+ *
+ * @gain: Gain (multiplication) value. Gain must be positive, negative
+ * values are reserved for error handling.
+ * @sel: Selector (usually register value) used to indicate this gain.
+ * NOTE: Only selectors >= 0 supported.
+ */
+struct iio_gain_sel_pair {
+ int gain;
+ int sel;
+};
+
+/**
+ * struct iio_itime_sel_mul - integration time description
+ *
+ * In many cases devices like light sensors allow setting the duration of
+ * collecting data. Typically this duration has also an impact to the magnitude
+ * of measured values (gain). This structure describes the relation of
+ * integration time and amplification as well as corresponding selector
+ * (register value).
+ *
+ * An example could be a sensor allowing 50, 100, 200 and 400 mS times. The
+ * respective multiplication values could be 50 mS => 1, 100 mS => 2,
+ * 200 mS => 4 and 400 mS => 8 assuming the impact of integration time would be
+ * linear in a way that when collecting data for 50 mS caused value X, doubling
+ * the data collection time caused value 2X etc.
+ *
+ * @time_us: Integration time in microseconds. Time values must be positive,
+ * negative values are reserved for error handling.
+ * @sel: Selector (usually register value) used to indicate this time
+ * NOTE: Only selectors >= 0 supported.
+ * @mul: Multiplication to the values caused by this time.
+ * NOTE: Only multipliers > 0 supported.
+ */
+struct iio_itime_sel_mul {
+ int time_us;
+ int sel;
+ int mul;
+};
+
+struct iio_gts {
+ u64 max_scale;
+ const struct iio_gain_sel_pair *hwgain_table;
+ int num_hwgain;
+ const struct iio_itime_sel_mul *itime_table;
+ int num_itime;
+ int **per_time_avail_scale_tables;
+ int *avail_all_scales_table;
+ int num_avail_all_scales;
+ int *avail_time_tables;
+ int num_avail_time_tables;
+};
+
+#define GAIN_SCALE_GAIN(_gain, _sel) \
+{ \
+ .gain = (_gain), \
+ .sel = (_sel), \
+}
+
+#define GAIN_SCALE_ITIME_US(_itime, _sel, _mul) \
+{ \
+ .time_us = (_itime), \
+ .sel = (_sel), \
+ .mul = (_mul), \
+}
+
+static inline const struct iio_itime_sel_mul *
+iio_gts_find_itime_by_time(struct iio_gts *gts, int time)
+{
+ int i;
+
+ if (!gts->num_itime)
+ return NULL;
+
+ for (i = 0; i < gts->num_itime; i++)
+ if (gts->itime_table[i].time_us == time)
+ return &gts->itime_table[i];
+
+ return NULL;
+}
+
+static inline const struct iio_itime_sel_mul *
+iio_gts_find_itime_by_sel(struct iio_gts *gts, int sel)
+{
+ int i;
+
+ for (i = 0; i < gts->num_itime; i++)
+ if (gts->itime_table[i].sel == sel)
+ return &gts->itime_table[i];
+
+ return NULL;
+}
+
+int devm_iio_init_iio_gts(struct device *dev, int max_scale_int, int max_scale_nano,
+ const struct iio_gain_sel_pair *gain_tbl, int num_gain,
+ const struct iio_itime_sel_mul *tim_tbl, int num_times,
+ struct iio_gts *gts);
+/**
+ * iio_gts_find_int_time_by_sel - find integration time matching a selector
+ * @gts: Gain time scale descriptor
+ * @sel: selector for which matching integration time is searched for
+ *
+ * Return: integration time matching given selector or -EINVAL if
+ * integration time was not found.
+ */
+static inline int iio_gts_find_int_time_by_sel(struct iio_gts *gts, int sel)
+{
+ const struct iio_itime_sel_mul *itime;
+
+ itime = iio_gts_find_itime_by_sel(gts, sel);
+ if (!itime)
+ return -EINVAL;
+
+ return itime->time_us;
+}
+
+/**
+ * iio_gts_find_sel_by_int_time - find selector matching integration time
+ * @gts: Gain time scale descriptor
+ * @time: Integration time for which matching selector is searched for
+ *
+ * Return: a selector matching given integration time or -EINVAL if
+ * selector was not found.
+ */
+static inline int iio_gts_find_sel_by_int_time(struct iio_gts *gts, int time)
+{
+ const struct iio_itime_sel_mul *itime;
+
+ itime = iio_gts_find_itime_by_time(gts, time);
+ if (!itime)
+ return -EINVAL;
+
+ return itime->sel;
+}
+
+/**
+ * iio_gts_valid_time - check if given integration time is valid
+ * @gts: Gain time scale descriptor
+ * @time_us: Integration time to check
+ *
+ * Return: True if given time is supported by device. False if not.
+ */
+static inline bool iio_gts_valid_time(struct iio_gts *gts, int time_us)
+{
+ return iio_gts_find_itime_by_time(gts, time_us) != NULL;
+}
+
+int iio_gts_find_sel_by_gain(struct iio_gts *gts, int gain);
+
+/**
+ * iio_gts_valid_gain - check if given HW-gain is valid
+ * @gts: Gain time scale descriptor
+ * @gain: HW-gain to check
+ *
+ * Return: True if given time is supported by device. False if not.
+ */
+static inline bool iio_gts_valid_gain(struct iio_gts *gts, int gain)
+{
+ return iio_gts_find_sel_by_gain(gts, gain) >= 0;
+}
+
+int iio_find_closest_gain_low(struct iio_gts *gts, int gain, bool *in_range);
+int iio_gts_find_gain_by_sel(struct iio_gts *gts, int sel);
+int iio_gts_get_min_gain(struct iio_gts *gts);
+int iio_gts_find_int_time_by_sel(struct iio_gts *gts, int sel);
+int iio_gts_find_sel_by_int_time(struct iio_gts *gts, int time);
+
+int iio_gts_total_gain_to_scale(struct iio_gts *gts, int total_gain,
+ int *scale_int, int *scale_nano);
+int iio_gts_find_gain_sel_for_scale_using_time(struct iio_gts *gts, int time_sel,
+ int scale_int, int scale_nano,
+ int *gain_sel);
+int iio_gts_find_gain_time_sel_for_scale(struct iio_gts *gts, int scale_int,
+ int scale_nano, int *gain_sel,
+ int *time_sel);
+int iio_gts_get_scale(struct iio_gts *gts, int gain, int time, int *scale_int,
+ int *scale_nano);
+int iio_gts_find_new_gain_sel_by_old_gain_time(struct iio_gts *gts,
+ int old_gain, int old_time_sel,
+ int new_time_sel, int *new_gain);
+int iio_gts_find_new_gain_by_old_gain_time(struct iio_gts *gts, int old_gain,
+ int old_time, int new_time,
+ int *new_gain);
+int iio_gts_find_new_gain_by_gain_time_min(struct iio_gts *gts, int old_gain,
+ int old_time, int new_time,
+ int *new_gain, bool *in_range);
+int iio_gts_avail_times(struct iio_gts *gts, const int **vals, int *type,
+ int *length);
+int iio_gts_all_avail_scales(struct iio_gts *gts, const int **vals, int *type,
+ int *length);
+int iio_gts_avail_scales_for_time(struct iio_gts *gts, int time,
+ const int **vals, int *type, int *length);
+int iio_gts_get_total_gain(struct iio_gts *gts, int gain, int time);
+
+#endif
diff --git a/include/linux/iio/iio-opaque.h b/include/linux/iio/iio-opaque.h
new file mode 100644
index 000000000000..4247497f3f8b
--- /dev/null
+++ b/include/linux/iio/iio-opaque.h
@@ -0,0 +1,82 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef _INDUSTRIAL_IO_OPAQUE_H_
+#define _INDUSTRIAL_IO_OPAQUE_H_
+
+/**
+ * struct iio_dev_opaque - industrial I/O device opaque information
+ * @indio_dev: public industrial I/O device information
+ * @id: used to identify device internally
+ * @currentmode: operating mode currently in use, may be eventually
+ * checked by device drivers but should be considered
+ * read-only as this is a core internal bit
+ * @driver_module: used to make it harder to undercut users
+ * @mlock: lock used to prevent simultaneous device state changes
+ * @mlock_key: lockdep class for iio_dev lock
+ * @info_exist_lock: lock to prevent use during removal
+ * @trig_readonly: mark the current trigger immutable
+ * @event_interface: event chrdevs associated with interrupt lines
+ * @attached_buffers: array of buffers statically attached by the driver
+ * @attached_buffers_cnt: number of buffers in the array of statically attached buffers
+ * @buffer_ioctl_handler: ioctl() handler for this IIO device's buffer interface
+ * @buffer_list: list of all buffers currently attached
+ * @channel_attr_list: keep track of automatically created channel
+ * attributes
+ * @chan_attr_group: group for all attrs in base directory
+ * @ioctl_handlers: ioctl handlers registered with the core handler
+ * @groups: attribute groups
+ * @groupcounter: index of next attribute group
+ * @legacy_scan_el_group: attribute group for legacy scan elements attribute group
+ * @legacy_buffer_group: attribute group for legacy buffer attributes group
+ * @bounce_buffer: for devices that call iio_push_to_buffers_with_ts_unaligned()
+ * @bounce_buffer_size: size of currently allocate bounce buffer
+ * @scan_index_timestamp: cache of the index to the timestamp
+ * @clock_id: timestamping clock posix identifier
+ * @chrdev: associated character device
+ * @flags: file ops related flags including busy flag.
+ * @debugfs_dentry: device specific debugfs dentry
+ * @cached_reg_addr: cached register address for debugfs reads
+ * @read_buf: read buffer to be used for the initial reg read
+ * @read_buf_len: data length in @read_buf
+ */
+struct iio_dev_opaque {
+ struct iio_dev indio_dev;
+ int currentmode;
+ int id;
+ struct module *driver_module;
+ struct mutex mlock;
+ struct lock_class_key mlock_key;
+ struct mutex info_exist_lock;
+ bool trig_readonly;
+ struct iio_event_interface *event_interface;
+ struct iio_buffer **attached_buffers;
+ unsigned int attached_buffers_cnt;
+ struct iio_ioctl_handler *buffer_ioctl_handler;
+ struct list_head buffer_list;
+ struct list_head channel_attr_list;
+ struct attribute_group chan_attr_group;
+ struct list_head ioctl_handlers;
+ const struct attribute_group **groups;
+ int groupcounter;
+ struct attribute_group legacy_scan_el_group;
+ struct attribute_group legacy_buffer_group;
+ void *bounce_buffer;
+ size_t bounce_buffer_size;
+
+ unsigned int scan_index_timestamp;
+ clockid_t clock_id;
+ struct cdev chrdev;
+ unsigned long flags;
+
+#if defined(CONFIG_DEBUG_FS)
+ struct dentry *debugfs_dentry;
+ unsigned int cached_reg_addr;
+ char read_buf[20];
+ unsigned int read_buf_len;
+#endif
+};
+
+#define to_iio_dev_opaque(_indio_dev) \
+ container_of((_indio_dev), struct iio_dev_opaque, indio_dev)
+
+#endif
diff --git a/include/linux/iio/iio.h b/include/linux/iio/iio.h
index c380daa40c0e..872ebdf0dd77 100644
--- a/include/linux/iio/iio.h
+++ b/include/linux/iio/iio.h
@@ -1,52 +1,26 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/* The industrial I/O core
*
* Copyright (c) 2008 Jonathan Cameron
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
*/
#ifndef _INDUSTRIAL_IO_H_
#define _INDUSTRIAL_IO_H_
+#include <linux/align.h>
#include <linux/device.h>
#include <linux/cdev.h>
+#include <linux/compiler_types.h>
+#include <linux/minmax.h>
+#include <linux/slab.h>
#include <linux/iio/types.h>
-#include <linux/of.h>
/* IIO TODO LIST */
/*
* Provide means of adjusting timer accuracy.
* Currently assumes nano seconds.
*/
-enum iio_chan_info_enum {
- IIO_CHAN_INFO_RAW = 0,
- IIO_CHAN_INFO_PROCESSED,
- IIO_CHAN_INFO_SCALE,
- IIO_CHAN_INFO_OFFSET,
- IIO_CHAN_INFO_CALIBSCALE,
- IIO_CHAN_INFO_CALIBBIAS,
- IIO_CHAN_INFO_PEAK,
- IIO_CHAN_INFO_PEAK_SCALE,
- IIO_CHAN_INFO_QUADRATURE_CORRECTION_RAW,
- IIO_CHAN_INFO_AVERAGE_RAW,
- IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY,
- IIO_CHAN_INFO_HIGH_PASS_FILTER_3DB_FREQUENCY,
- IIO_CHAN_INFO_SAMP_FREQ,
- IIO_CHAN_INFO_FREQUENCY,
- IIO_CHAN_INFO_PHASE,
- IIO_CHAN_INFO_HARDWAREGAIN,
- IIO_CHAN_INFO_HYSTERESIS,
- IIO_CHAN_INFO_INT_TIME,
- IIO_CHAN_INFO_ENABLE,
- IIO_CHAN_INFO_CALIBHEIGHT,
- IIO_CHAN_INFO_CALIBWEIGHT,
- IIO_CHAN_INFO_DEBOUNCE_COUNT,
- IIO_CHAN_INFO_DEBOUNCE_TIME,
- IIO_CHAN_INFO_CALIBEMISSIVITY,
- IIO_CHAN_INFO_OVERSAMPLING_RATIO,
-};
+struct fwnode_reference_args;
enum iio_shared_by {
IIO_SEPARATE,
@@ -134,15 +108,16 @@ ssize_t iio_enum_write(struct iio_dev *indio_dev,
/**
* IIO_ENUM_AVAILABLE() - Initialize enum available extended channel attribute
* @_name: Attribute name ("_available" will be appended to the name)
+ * @_shared: Whether the attribute is shared between all channels
* @_e: Pointer to an iio_enum struct
*
* Creates a read only attribute which lists all the available enum items in a
* space separated list. This should usually be used together with IIO_ENUM()
*/
-#define IIO_ENUM_AVAILABLE(_name, _e) \
+#define IIO_ENUM_AVAILABLE(_name, _shared, _e) \
{ \
.name = (_name "_available"), \
- .shared = IIO_SHARED_BY_TYPE, \
+ .shared = _shared, \
.read = iio_enum_available_read, \
.private = (uintptr_t)(_e), \
}
@@ -158,8 +133,7 @@ struct iio_mount_matrix {
ssize_t iio_show_mount_matrix(struct iio_dev *indio_dev, uintptr_t priv,
const struct iio_chan_spec *chan, char *buf);
-int of_iio_read_mount_matrix(const struct device *dev, const char *propname,
- struct iio_mount_matrix *matrix);
+int iio_read_mount_matrix(struct device *dev, struct iio_mount_matrix *matrix);
typedef const struct iio_mount_matrix *
(iio_get_mount_matrix_t)(const struct iio_dev *indio_dev,
@@ -202,6 +176,27 @@ struct iio_event_spec {
};
/**
+ * struct iio_scan_type - specification for channel data format in buffer
+ * @sign: 's' or 'u' to specify signed or unsigned
+ * @realbits: Number of valid bits of data
+ * @storagebits: Realbits + padding
+ * @shift: Shift right by this before masking out realbits.
+ * @repeat: Number of times real/storage bits repeats. When the
+ * repeat element is more than 1, then the type element in
+ * sysfs will show a repeat value. Otherwise, the number
+ * of repetitions is omitted.
+ * @endianness: little or big endian
+ */
+struct iio_scan_type {
+ char sign;
+ u8 realbits;
+ u8 storagebits;
+ u8 shift;
+ u8 repeat;
+ enum iio_endian endianness;
+};
+
+/**
* struct iio_chan_spec - specification of a single channel
* @type: What type of measurement is the channel making.
* @channel: What number do we wish to assign the channel.
@@ -211,18 +206,13 @@ struct iio_event_spec {
* @address: Driver specific identifier.
* @scan_index: Monotonic index to give ordering in scans when read
* from a buffer.
- * @scan_type: sign: 's' or 'u' to specify signed or unsigned
- * realbits: Number of valid bits of data
- * storagebits: Realbits + padding
- * shift: Shift right by this before masking out
- * realbits.
- * repeat: Number of times real/storage bits
- * repeats. When the repeat element is
- * more than 1, then the type element in
- * sysfs will show a repeat value.
- * Otherwise, the number of repetitions is
- * omitted.
- * endianness: little or big endian
+ * @scan_type: struct describing the scan type - mutually exclusive
+ * with ext_scan_type.
+ * @ext_scan_type: Used in rare cases where there is more than one scan
+ * format for a channel. When this is used, the flag
+ * has_ext_scan_type must be set and the driver must
+ * implement get_current_scan_type in struct iio_info.
+ * @num_ext_scan_type: Number of elements in ext_scan_type.
* @info_mask_separate: What information is to be exported that is specific to
* this channel.
* @info_mask_separate_available: What availability information is to be
@@ -250,6 +240,9 @@ struct iio_event_spec {
* @extend_name: Allows labeling of channel attributes with an
* informative name. Note this has no effect codes etc,
* unlike modifiers.
+ * This field is deprecated in favour of providing
+ * iio_info->read_label() to override the label, which
+ * unlike @extend_name does not affect sysfs filenames.
* @datasheet_name: A name used in in-kernel mapping of channels. It should
* correspond to the first name that the channel is referred
* to by in the datasheet (e.g. IND), or the nearest
@@ -263,6 +256,7 @@ struct iio_event_spec {
* attributes but not for event codes.
* @output: Channel is output.
* @differential: Channel is differential.
+ * @has_ext_scan_type: True if ext_scan_type is used instead of scan_type.
*/
struct iio_chan_spec {
enum iio_chan_type type;
@@ -270,31 +264,31 @@ struct iio_chan_spec {
int channel2;
unsigned long address;
int scan_index;
- struct {
- char sign;
- u8 realbits;
- u8 storagebits;
- u8 shift;
- u8 repeat;
- enum iio_endian endianness;
- } scan_type;
- long info_mask_separate;
- long info_mask_separate_available;
- long info_mask_shared_by_type;
- long info_mask_shared_by_type_available;
- long info_mask_shared_by_dir;
- long info_mask_shared_by_dir_available;
- long info_mask_shared_by_all;
- long info_mask_shared_by_all_available;
+ union {
+ struct iio_scan_type scan_type;
+ struct {
+ const struct iio_scan_type *ext_scan_type;
+ unsigned int num_ext_scan_type;
+ };
+ };
+ unsigned long info_mask_separate;
+ unsigned long info_mask_separate_available;
+ unsigned long info_mask_shared_by_type;
+ unsigned long info_mask_shared_by_type_available;
+ unsigned long info_mask_shared_by_dir;
+ unsigned long info_mask_shared_by_dir_available;
+ unsigned long info_mask_shared_by_all;
+ unsigned long info_mask_shared_by_all_available;
const struct iio_event_spec *event_spec;
unsigned int num_event_specs;
const struct iio_chan_spec_ext_info *ext_info;
const char *extend_name;
const char *datasheet_name;
- unsigned modified:1;
- unsigned indexed:1;
- unsigned output:1;
- unsigned differential:1;
+ unsigned int modified:1;
+ unsigned int indexed:1;
+ unsigned int output:1;
+ unsigned int differential:1;
+ unsigned int has_ext_scan_type:1;
};
@@ -344,9 +338,55 @@ static inline bool iio_channel_has_available(const struct iio_chan_spec *chan,
}
s64 iio_get_time_ns(const struct iio_dev *indio_dev);
-unsigned int iio_get_time_res(const struct iio_dev *indio_dev);
-/* Device operating modes */
+/*
+ * Device operating modes
+ * @INDIO_DIRECT_MODE: There is an access to either:
+ * a) The last single value available for devices that do not provide
+ * on-demand reads.
+ * b) A new value after performing an on-demand read otherwise.
+ * On most devices, this is a single-shot read. On some devices with data
+ * streams without an 'on-demand' function, this might also be the 'last value'
+ * feature. Above all, this mode internally means that we are not in any of the
+ * other modes, and sysfs reads should work.
+ * Device drivers should inform the core if they support this mode.
+ * @INDIO_BUFFER_TRIGGERED: Common mode when dealing with kfifo buffers.
+ * It indicates that an explicit trigger is required. This requests the core to
+ * attach a poll function when enabling the buffer, which is indicated by the
+ * _TRIGGERED suffix.
+ * The core will ensure this mode is set when registering a triggered buffer
+ * with iio_triggered_buffer_setup().
+ * @INDIO_BUFFER_SOFTWARE: Another kfifo buffer mode, but not event triggered.
+ * No poll function can be attached because there is no triggered infrastructure
+ * we can use to cause capture. There is a kfifo that the driver will fill, but
+ * not "only one scan at a time". Typically, hardware will have a buffer that
+ * can hold multiple scans. Software may read one or more scans at a single time
+ * and push the available data to a Kfifo. This means the core will not attach
+ * any poll function when enabling the buffer.
+ * The core will ensure this mode is set when registering a simple kfifo buffer
+ * with devm_iio_kfifo_buffer_setup().
+ * @INDIO_BUFFER_HARDWARE: For specific hardware, if unsure do not use this mode.
+ * Same as above but this time the buffer is not a kfifo where we have direct
+ * access to the data. Instead, the consumer driver must access the data through
+ * non software visible channels (or DMA when there is no demux possible in
+ * software)
+ * The core will ensure this mode is set when registering a dmaengine buffer
+ * with devm_iio_dmaengine_buffer_setup().
+ * @INDIO_EVENT_TRIGGERED: Very unusual mode.
+ * Triggers usually refer to an external event which will start data capture.
+ * Here it is kind of the opposite as, a particular state of the data might
+ * produce an event which can be considered as an event. We don't necessarily
+ * have access to the data itself, but to the event produced. For example, this
+ * can be a threshold detector. The internal path of this mode is very close to
+ * the INDIO_BUFFER_TRIGGERED mode.
+ * The core will ensure this mode is set when registering a triggered event.
+ * @INDIO_HARDWARE_TRIGGERED: Very unusual mode.
+ * Here, triggers can result in data capture and can be routed to multiple
+ * hardware components, which make them close to regular triggers in the way
+ * they must be managed by the core, but without the entire interrupts/poll
+ * functions burden. Interrupts are irrelevant as the data flow is hardware
+ * mediated and distributed.
+ */
#define INDIO_DIRECT_MODE 0x01
#define INDIO_BUFFER_TRIGGERED 0x02
#define INDIO_BUFFER_SOFTWARE 0x04
@@ -364,13 +404,15 @@ unsigned int iio_get_time_res(const struct iio_dev *indio_dev);
#define INDIO_MAX_RAW_ELEMENTS 4
+struct iio_val_int_plus_micro {
+ int integer;
+ int micro;
+};
+
struct iio_trigger; /* forward declaration */
-struct iio_dev;
/**
* struct iio_info - constant information about device
- * @driver_module: module structure used to ensure correct
- * ownership of chrdevs etc
* @event_attrs: event control attributes
* @attrs: general purpose device attributes
* @read_raw: function to request a value from the device.
@@ -396,6 +438,8 @@ struct iio_dev;
* and max. For lists, all possible values are enumerated.
* @write_raw: function to write a value to the device.
* Parameters are the same as for read_raw.
+ * @read_label: function to request label name for a specified label,
+ * for better channel identification.
* @write_raw_get_fmt: callback function to query the expected
* format/precision. If not set by the driver, write_raw
* returns IIO_VAL_INT_PLUS_MICRO.
@@ -403,16 +447,17 @@ struct iio_dev;
* @write_event_config: set if the event is enabled.
* @read_event_value: read a configuration value associated with the event.
* @write_event_value: write a configuration value for the event.
+ * @read_event_label: function to request label name for a specified label,
+ * for better event identification.
* @validate_trigger: function to validate the trigger when the
* current trigger gets changed.
+ * @get_current_scan_type: must be implemented by drivers that use ext_scan_type
+ * in the channel spec to return the index of the currently
+ * active ext_scan type for a channel.
* @update_scan_mode: function to configure device and scan buffer when
* channels have changed
* @debugfs_reg_access: function to read or write register value of device
- * @of_xlate: function pointer to obtain channel specifier index.
- * When #iio-cells is greater than '0', the driver could
- * provide a custom of_xlate function that reads the
- * *args* and returns the appropriate index in registered
- * IIO channels array.
+ * @fwnode_xlate: fwnode based function pointer to obtain channel specifier index.
* @hwfifo_set_watermark: function pointer to set the current hardware
* fifo watermark level; see hwfifo_* entries in
* Documentation/ABI/testing/sysfs-bus-iio for details on
@@ -425,7 +470,6 @@ struct iio_dev;
* were flushed and there was an error.
**/
struct iio_info {
- struct module *driver_module;
const struct attribute_group *event_attrs;
const struct attribute_group *attrs;
@@ -455,6 +499,10 @@ struct iio_info {
int val2,
long mask);
+ int (*read_label)(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ char *label);
+
int (*write_raw_get_fmt)(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan,
long mask);
@@ -468,7 +516,7 @@ struct iio_info {
const struct iio_chan_spec *chan,
enum iio_event_type type,
enum iio_event_direction dir,
- int state);
+ bool state);
int (*read_event_value)(struct iio_dev *indio_dev,
const struct iio_chan_spec *chan,
@@ -482,18 +530,26 @@ struct iio_info {
enum iio_event_direction dir,
enum iio_event_info info, int val, int val2);
+ int (*read_event_label)(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir,
+ char *label);
+
int (*validate_trigger)(struct iio_dev *indio_dev,
struct iio_trigger *trig);
+ int (*get_current_scan_type)(const struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan);
int (*update_scan_mode)(struct iio_dev *indio_dev,
const unsigned long *scan_mask);
int (*debugfs_reg_access)(struct iio_dev *indio_dev,
- unsigned reg, unsigned writeval,
- unsigned *readval);
- int (*of_xlate)(struct iio_dev *indio_dev,
- const struct of_phandle_args *iiospec);
- int (*hwfifo_set_watermark)(struct iio_dev *indio_dev, unsigned val);
+ unsigned int reg, unsigned int writeval,
+ unsigned int *readval);
+ int (*fwnode_xlate)(struct iio_dev *indio_dev,
+ const struct fwnode_reference_args *iiospec);
+ int (*hwfifo_set_watermark)(struct iio_dev *indio_dev, unsigned int val);
int (*hwfifo_flush_to_buffer)(struct iio_dev *indio_dev,
- unsigned count);
+ unsigned int count);
};
/**
@@ -517,102 +573,124 @@ struct iio_buffer_setup_ops {
/**
* struct iio_dev - industrial I/O device
- * @id: [INTERN] used to identify device internally
- * @modes: [DRIVER] operating modes supported by device
- * @currentmode: [DRIVER] current operating mode
+ * @modes: [DRIVER] bitmask listing all the operating modes
+ * supported by the IIO device. This list should be
+ * initialized before registering the IIO device. It can
+ * also be filed up by the IIO core, as a result of
+ * enabling particular features in the driver
+ * (see iio_triggered_event_setup()).
* @dev: [DRIVER] device structure, should be assigned a parent
* and owner
- * @event_interface: [INTERN] event chrdevs associated with interrupt lines
* @buffer: [DRIVER] any buffer present
- * @buffer_list: [INTERN] list of all buffers currently attached
* @scan_bytes: [INTERN] num bytes captured to be fed to buffer demux
- * @mlock: [DRIVER] lock used to prevent simultaneous device state
- * changes
- * @available_scan_masks: [DRIVER] optional array of allowed bitmasks
+ * @available_scan_masks: [DRIVER] optional array of allowed bitmasks. Sort the
+ * array in order of preference, the most preferred
+ * masks first.
* @masklength: [INTERN] the length of the mask established from
* channels
* @active_scan_mask: [INTERN] union of all scan masks requested by buffers
* @scan_timestamp: [INTERN] set if any buffers have requested timestamp
- * @scan_index_timestamp:[INTERN] cache of the index to the timestamp
* @trig: [INTERN] current device trigger (buffer modes)
- * @trig_readonly: [INTERN] mark the current trigger immutable
* @pollfunc: [DRIVER] function run on trigger being received
* @pollfunc_event: [DRIVER] function run on events trigger being received
* @channels: [DRIVER] channel specification structure table
* @num_channels: [DRIVER] number of channels specified in @channels.
- * @channel_attr_list: [INTERN] keep track of automatically created channel
- * attributes
- * @chan_attr_group: [INTERN] group for all attrs in base directory
* @name: [DRIVER] name of the device.
+ * @label: [DRIVER] unique name to identify which device this is
* @info: [DRIVER] callbacks and constant info from driver
- * @clock_id: [INTERN] timestamping clock posix identifier
- * @info_exist_lock: [INTERN] lock to prevent use during removal
* @setup_ops: [DRIVER] callbacks to call before and after buffer
* enable/disable
- * @chrdev: [INTERN] associated character device
- * @groups: [INTERN] attribute groups
- * @groupcounter: [INTERN] index of next attribute group
- * @flags: [INTERN] file ops related flags including busy flag.
- * @debugfs_dentry: [INTERN] device specific debugfs dentry.
- * @cached_reg_addr: [INTERN] cached register address for debugfs reads.
+ * @priv: [DRIVER] reference to driver's private information
+ * **MUST** be accessed **ONLY** via iio_priv() helper
*/
struct iio_dev {
- int id;
-
int modes;
- int currentmode;
struct device dev;
- struct iio_event_interface *event_interface;
-
struct iio_buffer *buffer;
- struct list_head buffer_list;
int scan_bytes;
- struct mutex mlock;
const unsigned long *available_scan_masks;
- unsigned masklength;
+ unsigned int __private masklength;
const unsigned long *active_scan_mask;
- bool scan_timestamp;
- unsigned scan_index_timestamp;
+ bool __private scan_timestamp;
struct iio_trigger *trig;
- bool trig_readonly;
struct iio_poll_func *pollfunc;
struct iio_poll_func *pollfunc_event;
struct iio_chan_spec const *channels;
int num_channels;
- struct list_head channel_attr_list;
- struct attribute_group chan_attr_group;
const char *name;
+ const char *label;
const struct iio_info *info;
- clockid_t clock_id;
- struct mutex info_exist_lock;
const struct iio_buffer_setup_ops *setup_ops;
- struct cdev chrdev;
-#define IIO_MAX_GROUPS 6
- const struct attribute_group *groups[IIO_MAX_GROUPS + 1];
- int groupcounter;
- unsigned long flags;
-#if defined(CONFIG_DEBUG_FS)
- struct dentry *debugfs_dentry;
- unsigned cached_reg_addr;
-#endif
+ void *__private priv;
};
+int iio_device_id(struct iio_dev *indio_dev);
+int iio_device_get_current_mode(struct iio_dev *indio_dev);
+bool iio_buffer_enabled(struct iio_dev *indio_dev);
+
const struct iio_chan_spec
*iio_find_channel_from_si(struct iio_dev *indio_dev, int si);
-int iio_device_register(struct iio_dev *indio_dev);
+/**
+ * iio_device_register() - register a device with the IIO subsystem
+ * @indio_dev: Device structure filled by the device driver
+ **/
+#define iio_device_register(indio_dev) \
+ __iio_device_register((indio_dev), THIS_MODULE)
+int __iio_device_register(struct iio_dev *indio_dev, struct module *this_mod);
void iio_device_unregister(struct iio_dev *indio_dev);
-int devm_iio_device_register(struct device *dev, struct iio_dev *indio_dev);
-void devm_iio_device_unregister(struct device *dev, struct iio_dev *indio_dev);
+/**
+ * devm_iio_device_register - Resource-managed iio_device_register()
+ * @dev: Device to allocate iio_dev for
+ * @indio_dev: Device structure filled by the device driver
+ *
+ * Managed iio_device_register. The IIO device registered with this
+ * function is automatically unregistered on driver detach. This function
+ * calls iio_device_register() internally. Refer to that function for more
+ * information.
+ *
+ * RETURNS:
+ * 0 on success, negative error number on failure.
+ */
+#define devm_iio_device_register(dev, indio_dev) \
+ __devm_iio_device_register((dev), (indio_dev), THIS_MODULE)
+int __devm_iio_device_register(struct device *dev, struct iio_dev *indio_dev,
+ struct module *this_mod);
int iio_push_event(struct iio_dev *indio_dev, u64 ev_code, s64 timestamp);
-int iio_device_claim_direct_mode(struct iio_dev *indio_dev);
-void iio_device_release_direct_mode(struct iio_dev *indio_dev);
+bool __iio_device_claim_direct(struct iio_dev *indio_dev);
+void __iio_device_release_direct(struct iio_dev *indio_dev);
+
+/*
+ * Helper functions that allow claim and release of direct mode
+ * in a fashion that doesn't generate many false positives from sparse.
+ * Note this must remain static inline in the header so that sparse
+ * can see the __acquire() marking. Revisit when sparse supports
+ * __cond_acquires()
+ */
+static inline bool iio_device_claim_direct(struct iio_dev *indio_dev)
+{
+ if (!__iio_device_claim_direct(indio_dev))
+ return false;
+
+ __acquire(iio_dev);
+
+ return true;
+}
+
+static inline void iio_device_release_direct(struct iio_dev *indio_dev)
+{
+ __iio_device_release_direct(indio_dev);
+ __release(indio_dev);
+}
-extern struct bus_type iio_bus_type;
+int iio_device_claim_buffer_mode(struct iio_dev *indio_dev);
+void iio_device_release_buffer_mode(struct iio_dev *indio_dev);
+
+extern const struct bus_type iio_bus_type;
/**
* iio_device_put() - reference counted deallocation of struct device
@@ -624,14 +702,8 @@ static inline void iio_device_put(struct iio_dev *indio_dev)
put_device(&indio_dev->dev);
}
-/**
- * iio_device_get_clock() - Retrieve current timestamping clock for the device
- * @indio_dev: IIO device structure containing the device
- */
-static inline clockid_t iio_device_get_clock(const struct iio_dev *indio_dev)
-{
- return indio_dev->clock_id;
-}
+clockid_t iio_device_get_clock(const struct iio_dev *indio_dev);
+int iio_device_set_clock(struct iio_dev *indio_dev, clockid_t clock_id);
/**
* dev_to_iio_dev() - Get IIO device struct from a device struct
@@ -655,6 +727,26 @@ static inline struct iio_dev *iio_device_get(struct iio_dev *indio_dev)
return indio_dev ? dev_to_iio_dev(get_device(&indio_dev->dev)) : NULL;
}
+/**
+ * iio_device_set_parent() - assign parent device to the IIO device object
+ * @indio_dev: IIO device structure
+ * @parent: reference to parent device object
+ *
+ * This utility must be called between IIO device allocation
+ * (via devm_iio_device_alloc()) & IIO device registration
+ * (via iio_device_register() and devm_iio_device_register())).
+ * By default, the device allocation will also assign a parent device to
+ * the IIO device object. In cases where devm_iio_device_alloc() is used,
+ * sometimes the parent device must be different than the device used to
+ * manage the allocation.
+ * In that case, this helper should be used to change the parent, hence the
+ * requirement to call this between allocation & registration.
+ **/
+static inline void iio_device_set_parent(struct iio_dev *indio_dev,
+ struct device *parent)
+{
+ indio_dev->dev.parent = parent;
+}
/**
* iio_device_set_drvdata() - Set device driver data
@@ -675,60 +767,176 @@ static inline void iio_device_set_drvdata(struct iio_dev *indio_dev, void *data)
*
* Returns the data previously set with iio_device_set_drvdata()
*/
-static inline void *iio_device_get_drvdata(struct iio_dev *indio_dev)
+static inline void *iio_device_get_drvdata(const struct iio_dev *indio_dev)
{
return dev_get_drvdata(&indio_dev->dev);
}
-/* Can we make this smaller? */
-#define IIO_ALIGN L1_CACHE_BYTES
-struct iio_dev *iio_device_alloc(int sizeof_priv);
-
-static inline void *iio_priv(const struct iio_dev *indio_dev)
-{
- return (char *)indio_dev + ALIGN(sizeof(struct iio_dev), IIO_ALIGN);
-}
+/*
+ * Used to ensure the iio_priv() structure is aligned to allow that structure
+ * to in turn include IIO_DMA_MINALIGN'd elements such as buffers which
+ * must not share cachelines with the rest of the structure, thus making
+ * them safe for use with non-coherent DMA.
+ *
+ * A number of drivers also use this on buffers that include a 64-bit timestamp
+ * that is used with iio_push_to_buffers_with_ts(). Therefore, in the case where
+ * DMA alignment is not sufficient for proper timestamp alignment, we align to
+ * 8 bytes instead.
+ */
+#define IIO_DMA_MINALIGN MAX(ARCH_DMA_MINALIGN, sizeof(s64))
-static inline struct iio_dev *iio_priv_to_dev(void *priv)
-{
- return (struct iio_dev *)((char *)priv -
- ALIGN(sizeof(struct iio_dev), IIO_ALIGN));
-}
+#define __IIO_DECLARE_BUFFER_WITH_TS(type, name, count) \
+ type name[ALIGN((count), sizeof(s64) / sizeof(type)) + sizeof(s64) / sizeof(type)]
-void iio_device_free(struct iio_dev *indio_dev);
-int devm_iio_device_match(struct device *dev, void *res, void *data);
-struct iio_dev *devm_iio_device_alloc(struct device *dev, int sizeof_priv);
-void devm_iio_device_free(struct device *dev, struct iio_dev *indio_dev);
-struct iio_trigger *devm_iio_trigger_alloc(struct device *dev,
- const char *fmt, ...);
-void devm_iio_trigger_free(struct device *dev, struct iio_trigger *iio_trig);
+/**
+ * IIO_DECLARE_BUFFER_WITH_TS() - Declare a buffer with timestamp
+ * @type: element type of the buffer
+ * @name: identifier name of the buffer
+ * @count: number of elements in the buffer
+ *
+ * Declares a buffer that is safe to use with iio_push_to_buffers_with_ts(). In
+ * addition to allocating enough space for @count elements of @type, it also
+ * allocates space for a s64 timestamp at the end of the buffer and ensures
+ * proper alignment of the timestamp.
+ */
+#define IIO_DECLARE_BUFFER_WITH_TS(type, name, count) \
+ __IIO_DECLARE_BUFFER_WITH_TS(type, name, count) __aligned(sizeof(s64))
/**
- * iio_buffer_enabled() - helper function to test if the buffer is enabled
- * @indio_dev: IIO device structure for device
- **/
-static inline bool iio_buffer_enabled(struct iio_dev *indio_dev)
+ * IIO_DECLARE_DMA_BUFFER_WITH_TS() - Declare a DMA-aligned buffer with timestamp
+ * @type: element type of the buffer
+ * @name: identifier name of the buffer
+ * @count: number of elements in the buffer
+ *
+ * Same as IIO_DECLARE_BUFFER_WITH_TS(), but is uses __aligned(IIO_DMA_MINALIGN)
+ * to ensure that the buffer doesn't share cachelines with anything that comes
+ * before it in a struct. This should not be used for stack-allocated buffers
+ * as stack memory cannot generally be used for DMA.
+ */
+#define IIO_DECLARE_DMA_BUFFER_WITH_TS(type, name, count) \
+ __IIO_DECLARE_BUFFER_WITH_TS(type, name, count) __aligned(IIO_DMA_MINALIGN)
+
+struct iio_dev *iio_device_alloc(struct device *parent, int sizeof_priv);
+
+/* The information at the returned address is guaranteed to be cacheline aligned */
+static inline void *iio_priv(const struct iio_dev *indio_dev)
{
- return indio_dev->currentmode
- & (INDIO_BUFFER_TRIGGERED | INDIO_BUFFER_HARDWARE |
- INDIO_BUFFER_SOFTWARE);
+ return ACCESS_PRIVATE(indio_dev, priv);
}
+void iio_device_free(struct iio_dev *indio_dev);
+struct iio_dev *devm_iio_device_alloc(struct device *parent, int sizeof_priv);
+
+#define devm_iio_trigger_alloc(parent, fmt, ...) \
+ __devm_iio_trigger_alloc((parent), THIS_MODULE, (fmt), ##__VA_ARGS__)
+__printf(3, 4)
+struct iio_trigger *__devm_iio_trigger_alloc(struct device *parent,
+ struct module *this_mod,
+ const char *fmt, ...);
/**
* iio_get_debugfs_dentry() - helper function to get the debugfs_dentry
* @indio_dev: IIO device structure for device
**/
#if defined(CONFIG_DEBUG_FS)
+struct dentry *iio_get_debugfs_dentry(struct iio_dev *indio_dev);
+#else
static inline struct dentry *iio_get_debugfs_dentry(struct iio_dev *indio_dev)
{
- return indio_dev->debugfs_dentry;
+ return NULL;
}
+#endif
+
+/**
+ * iio_device_suspend_triggering() - suspend trigger attached to an iio_dev
+ * @indio_dev: iio_dev associated with the device that will have triggers suspended
+ *
+ * Return 0 if successful, negative otherwise
+ **/
+int iio_device_suspend_triggering(struct iio_dev *indio_dev);
+
+/**
+ * iio_device_resume_triggering() - resume trigger attached to an iio_dev
+ * that was previously suspended with iio_device_suspend_triggering()
+ * @indio_dev: iio_dev associated with the device that will have triggers resumed
+ *
+ * Return 0 if successful, negative otherwise
+ **/
+int iio_device_resume_triggering(struct iio_dev *indio_dev);
+
+#ifdef CONFIG_ACPI
+bool iio_read_acpi_mount_matrix(struct device *dev,
+ struct iio_mount_matrix *orientation,
+ char *acpi_method);
+const char *iio_get_acpi_device_name_and_data(struct device *dev, const void **data);
#else
-static inline struct dentry *iio_get_debugfs_dentry(struct iio_dev *indio_dev)
+static inline bool iio_read_acpi_mount_matrix(struct device *dev,
+ struct iio_mount_matrix *orientation,
+ char *acpi_method)
+{
+ return false;
+}
+static inline const char *
+iio_get_acpi_device_name_and_data(struct device *dev, const void **data)
{
return NULL;
}
#endif
+static inline const char *iio_get_acpi_device_name(struct device *dev)
+{
+ return iio_get_acpi_device_name_and_data(dev, NULL);
+}
+
+/**
+ * iio_get_current_scan_type - Get the current scan type for a channel
+ * @indio_dev: the IIO device to get the scan type for
+ * @chan: the channel to get the scan type for
+ *
+ * Most devices only have one scan type per channel and can just access it
+ * directly without calling this function. Core IIO code and drivers that
+ * implement ext_scan_type in the channel spec should use this function to
+ * get the current scan type for a channel.
+ *
+ * Returns: the current scan type for the channel or error.
+ */
+static inline const struct iio_scan_type
+*iio_get_current_scan_type(const struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan)
+{
+ int ret;
+
+ if (chan->has_ext_scan_type) {
+ ret = indio_dev->info->get_current_scan_type(indio_dev, chan);
+ if (ret < 0)
+ return ERR_PTR(ret);
+
+ if (ret >= chan->num_ext_scan_type)
+ return ERR_PTR(-EINVAL);
+
+ return &chan->ext_scan_type[ret];
+ }
+
+ return &chan->scan_type;
+}
+
+/**
+ * iio_get_masklength - Get length of the channels mask
+ * @indio_dev: the IIO device to get the masklength for
+ */
+static inline unsigned int iio_get_masklength(const struct iio_dev *indio_dev)
+{
+ return ACCESS_PRIVATE(indio_dev, masklength);
+}
+
+int iio_active_scan_mask_index(struct iio_dev *indio_dev);
+
+/**
+ * iio_for_each_active_channel - Iterated over active channels
+ * @indio_dev: the IIO device
+ * @chan: Holds the index of the enabled channel
+ */
+#define iio_for_each_active_channel(indio_dev, chan) \
+ for_each_set_bit((chan), (indio_dev)->active_scan_mask, \
+ iio_get_masklength(indio_dev))
ssize_t iio_format_value(char *buf, unsigned int type, int size, int *vals);
diff --git a/include/linux/iio/imu/adis.h b/include/linux/iio/imu/adis.h
index 360da7d18a3d..bfb6df68e6c9 100644
--- a/include/linux/iio/imu/adis.h
+++ b/include/linux/iio/imu/adis.h
@@ -1,17 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* Common library for ADIS16XXX devices
*
* Copyright 2012 Analog Devices Inc.
* Author: Lars-Peter Clausen <lars@metafoo.de>
- *
- * Licensed under the GPL-2 or later.
*/
#ifndef __IIO_ADIS_H__
#define __IIO_ADIS_H__
+#include <linux/cleanup.h>
#include <linux/spi/spi.h>
#include <linux/interrupt.h>
+#include <linux/iio/iio.h>
#include <linux/iio/types.h>
#define ADIS_WRITE_REG(reg) ((0x80 | (reg)))
@@ -21,70 +22,290 @@
#define ADIS_REG_PAGE_ID 0x00
struct adis;
+struct iio_dev_attr;
+
+/**
+ * struct adis_timeouts - ADIS chip variant timeouts
+ * @reset_ms - Wait time after rst pin goes inactive
+ * @sw_reset_ms - Wait time after sw reset command
+ * @self_test_ms - Wait time after self test command
+ */
+struct adis_timeout {
+ u16 reset_ms;
+ u16 sw_reset_ms;
+ u16 self_test_ms;
+};
/**
* struct adis_data - ADIS chip variant specific data
* @read_delay: SPI delay for read operations in us
* @write_delay: SPI delay for write operations in us
+ * @cs_change_delay: SPI delay between CS changes in us
* @glob_cmd_reg: Register address of the GLOB_CMD register
* @msc_ctrl_reg: Register address of the MSC_CTRL register
* @diag_stat_reg: Register address of the DIAG_STAT register
- * @status_error_msgs: Array of error messgaes
- * @status_error_mask:
+ * @diag_stat_size: Length (in bytes) of the DIAG_STAT register. If 0 the
+ * default length is 2 bytes long.
+ * @prod_id_reg: Register address of the PROD_ID register
+ * @prod_id: Product ID code that should be expected when reading @prod_id_reg
+ * @self_test_mask: Bitmask of supported self-test operations
+ * @self_test_reg: Register address to request self test command
+ * @self_test_no_autoclear: True if device's self-test needs clear of ctrl reg
+ * @status_error_msgs: Array of error messages
+ * @status_error_mask: Bitmask of errors supported by the device
+ * @timeouts: Chip specific delays
+ * @enable_irq: Hook for ADIS devices that have a special IRQ enable/disable
+ * @unmasked_drdy: True for devices that cannot mask/unmask the data ready pin
+ * @has_paging: True if ADIS device has paged registers
+ * @has_fifo: True if ADIS device has a hardware FIFO
+ * @burst_reg_cmd: Register command that triggers burst
+ * @burst_len: Burst size in the SPI RX buffer. If @burst_max_len is defined,
+ * this should be the minimum size supported by the device.
+ * @burst_max_len: Holds the maximum burst size when the device supports
+ * more than one burst mode with different sizes
+ * @burst_max_speed_hz: Maximum spi speed that can be used in burst mode
*/
struct adis_data {
unsigned int read_delay;
unsigned int write_delay;
+ unsigned int cs_change_delay;
unsigned int glob_cmd_reg;
unsigned int msc_ctrl_reg;
unsigned int diag_stat_reg;
+ unsigned int diag_stat_size;
+ unsigned int prod_id_reg;
+
+ unsigned int prod_id;
unsigned int self_test_mask;
+ unsigned int self_test_reg;
bool self_test_no_autoclear;
- unsigned int startup_delay;
+ const struct adis_timeout *timeouts;
const char * const *status_error_msgs;
unsigned int status_error_mask;
int (*enable_irq)(struct adis *adis, bool enable);
+ bool unmasked_drdy;
bool has_paging;
+ bool has_fifo;
+
+ unsigned int burst_reg_cmd;
+ unsigned int burst_len;
+ unsigned int burst_max_len;
+ unsigned int burst_max_speed_hz;
};
+/**
+ * struct adis_ops: Custom ops for adis devices.
+ * @write: Custom spi write implementation.
+ * @read: Custom spi read implementation.
+ * @reset: Custom sw reset implementation. The custom implementation does not
+ * need to sleep after the reset. It's done by the library already.
+ */
+struct adis_ops {
+ int (*write)(struct adis *adis, unsigned int reg, unsigned int value,
+ unsigned int size);
+ int (*read)(struct adis *adis, unsigned int reg, unsigned int *value,
+ unsigned int size);
+ int (*reset)(struct adis *adis);
+};
+
+/**
+ * struct adis - ADIS device instance data
+ * @spi: Reference to SPI device which owns this ADIS IIO device
+ * @trig: IIO trigger object data
+ * @data: ADIS chip variant specific data
+ * @burst_extra_len: Burst extra length. Should only be used by devices that can
+ * dynamically change their burst mode length.
+ * @ops: ops struct for custom read and write functions
+ * @state_lock: Lock used by the device to protect state
+ * @msg: SPI message object
+ * @xfer: SPI transfer objects to be used for a @msg
+ * @current_page: Some ADIS devices have registers, this selects current page
+ * @irq_flag: IRQ handling flags as passed to request_irq()
+ * @buffer: Data buffer for information read from the device
+ * @tx: DMA safe TX buffer for SPI transfers
+ * @rx: DMA safe RX buffer for SPI transfers
+ */
struct adis {
struct spi_device *spi;
struct iio_trigger *trig;
const struct adis_data *data;
-
- struct mutex txrx_lock;
+ unsigned int burst_extra_len;
+ const struct adis_ops *ops;
+ /*
+ * The state_lock is meant to be used during operations that require
+ * a sequence of SPI R/W in order to protect the SPI transfer
+ * information (fields 'xfer', 'msg' & 'current_page') between
+ * potential concurrent accesses.
+ * This lock is used by all "adis_{functions}" that have to read/write
+ * registers. These functions also have unlocked variants
+ * (see "__adis_{functions}"), which don't hold this lock.
+ * This allows users of the ADIS library to group SPI R/W into
+ * the drivers, but they also must manage this lock themselves.
+ */
+ struct mutex state_lock;
struct spi_message msg;
struct spi_transfer *xfer;
unsigned int current_page;
+ unsigned long irq_flag;
void *buffer;
- uint8_t tx[10] ____cacheline_aligned;
- uint8_t rx[4];
+ u8 tx[10] __aligned(IIO_DMA_MINALIGN);
+ u8 rx[4];
};
int adis_init(struct adis *adis, struct iio_dev *indio_dev,
- struct spi_device *spi, const struct adis_data *data);
-int adis_reset(struct adis *adis);
+ struct spi_device *spi, const struct adis_data *data);
+int __adis_reset(struct adis *adis);
-int adis_write_reg(struct adis *adis, unsigned int reg,
- unsigned int val, unsigned int size);
-int adis_read_reg(struct adis *adis, unsigned int reg,
- unsigned int *val, unsigned int size);
+/**
+ * adis_reset() - Reset the device
+ * @adis: The adis device
+ *
+ * Returns: %0 on success, a negative error code otherwise
+ */
+static inline int adis_reset(struct adis *adis)
+{
+ guard(mutex)(&adis->state_lock);
+ return __adis_reset(adis);
+}
+
+int __adis_write_reg(struct adis *adis, unsigned int reg,
+ unsigned int val, unsigned int size);
+int __adis_read_reg(struct adis *adis, unsigned int reg,
+ unsigned int *val, unsigned int size);
+
+/**
+ * __adis_write_reg_8() - Write single byte to a register (unlocked)
+ * @adis: The adis device
+ * @reg: The address of the register to be written
+ * @val: The value to write
+ *
+ * Returns: %0 on success, a negative error code otherwise
+ */
+static inline int __adis_write_reg_8(struct adis *adis, unsigned int reg,
+ u8 val)
+{
+ return adis->ops->write(adis, reg, val, 1);
+}
+
+/**
+ * __adis_write_reg_16() - Write 2 bytes to a pair of registers (unlocked)
+ * @adis: The adis device
+ * @reg: The address of the lower of the two registers
+ * @val: Value to be written
+ *
+ * Returns: %0 on success, a negative error code otherwise
+ */
+static inline int __adis_write_reg_16(struct adis *adis, unsigned int reg,
+ u16 val)
+{
+ return adis->ops->write(adis, reg, val, 2);
+}
+
+/**
+ * __adis_write_reg_32() - write 4 bytes to four registers (unlocked)
+ * @adis: The adis device
+ * @reg: The address of the lower of the four register
+ * @val: Value to be written
+ *
+ * Returns: %0 on success, a negative error code otherwise
+ */
+static inline int __adis_write_reg_32(struct adis *adis, unsigned int reg,
+ u32 val)
+{
+ return adis->ops->write(adis, reg, val, 4);
+}
+
+/**
+ * __adis_read_reg_16() - read 2 bytes from a 16-bit register (unlocked)
+ * @adis: The adis device
+ * @reg: The address of the lower of the two registers
+ * @val: The value read back from the device
+ *
+ * Returns: %0 on success, a negative error code otherwise
+ */
+static inline int __adis_read_reg_16(struct adis *adis, unsigned int reg,
+ u16 *val)
+{
+ unsigned int tmp;
+ int ret;
+
+ ret = adis->ops->read(adis, reg, &tmp, 2);
+ if (ret == 0)
+ *val = tmp;
+
+ return ret;
+}
+
+/**
+ * __adis_read_reg_32() - read 4 bytes from a 32-bit register (unlocked)
+ * @adis: The adis device
+ * @reg: The address of the lower of the two registers
+ * @val: The value read back from the device
+ *
+ * Returns: %0 on success, a negative error code otherwise
+ */
+static inline int __adis_read_reg_32(struct adis *adis, unsigned int reg,
+ u32 *val)
+{
+ unsigned int tmp;
+ int ret;
+
+ ret = adis->ops->read(adis, reg, &tmp, 4);
+ if (ret == 0)
+ *val = tmp;
+
+ return ret;
+}
+
+/**
+ * adis_write_reg() - write N bytes to register
+ * @adis: The adis device
+ * @reg: The address of the lower of the two registers
+ * @val: The value to write to device (up to 4 bytes)
+ * @size: The size of the @value (in bytes)
+ *
+ * Returns: %0 on success, a negative error code otherwise
+ */
+static inline int adis_write_reg(struct adis *adis, unsigned int reg,
+ unsigned int val, unsigned int size)
+{
+ guard(mutex)(&adis->state_lock);
+ return adis->ops->write(adis, reg, val, size);
+}
+
+/**
+ * adis_read_reg() - read N bytes from register
+ * @adis: The adis device
+ * @reg: The address of the lower of the two registers
+ * @val: The value read back from the device
+ * @size: The size of the @val buffer
+ *
+ * Returns: %0 on success, a negative error code otherwise
+ */
+static int adis_read_reg(struct adis *adis, unsigned int reg,
+ unsigned int *val, unsigned int size)
+{
+ guard(mutex)(&adis->state_lock);
+ return adis->ops->read(adis, reg, val, size);
+}
/**
* adis_write_reg_8() - Write single byte to a register
* @adis: The adis device
* @reg: The address of the register to be written
- * @value: The value to write
+ * @val: The value to write
+ *
+ * Returns: %0 on success, a negative error code otherwise
*/
static inline int adis_write_reg_8(struct adis *adis, unsigned int reg,
- uint8_t val)
+ u8 val)
{
return adis_write_reg(adis, reg, val, 1);
}
@@ -93,10 +314,12 @@ static inline int adis_write_reg_8(struct adis *adis, unsigned int reg,
* adis_write_reg_16() - Write 2 bytes to a pair of registers
* @adis: The adis device
* @reg: The address of the lower of the two registers
- * @value: Value to be written
+ * @val: Value to be written
+ *
+ * Returns: %0 on success, a negative error code otherwise
*/
static inline int adis_write_reg_16(struct adis *adis, unsigned int reg,
- uint16_t val)
+ u16 val)
{
return adis_write_reg(adis, reg, val, 2);
}
@@ -105,10 +328,12 @@ static inline int adis_write_reg_16(struct adis *adis, unsigned int reg,
* adis_write_reg_32() - write 4 bytes to four registers
* @adis: The adis device
* @reg: The address of the lower of the four register
- * @value: Value to be written
+ * @val: Value to be written
+ *
+ * Returns: %0 on success, a negative error code otherwise
*/
static inline int adis_write_reg_32(struct adis *adis, unsigned int reg,
- uint32_t val)
+ u32 val)
{
return adis_write_reg(adis, reg, val, 4);
}
@@ -118,15 +343,18 @@ static inline int adis_write_reg_32(struct adis *adis, unsigned int reg,
* @adis: The adis device
* @reg: The address of the lower of the two registers
* @val: The value read back from the device
+ *
+ * Returns: %0 on success, a negative error code otherwise
*/
static inline int adis_read_reg_16(struct adis *adis, unsigned int reg,
- uint16_t *val)
+ u16 *val)
{
unsigned int tmp;
int ret;
ret = adis_read_reg(adis, reg, &tmp, 2);
- *val = tmp;
+ if (ret == 0)
+ *val = tmp;
return ret;
}
@@ -136,27 +364,98 @@ static inline int adis_read_reg_16(struct adis *adis, unsigned int reg,
* @adis: The adis device
* @reg: The address of the lower of the two registers
* @val: The value read back from the device
+ *
+ * Returns: %0 on success, a negative error code otherwise
*/
static inline int adis_read_reg_32(struct adis *adis, unsigned int reg,
- uint32_t *val)
+ u32 *val)
{
unsigned int tmp;
int ret;
ret = adis_read_reg(adis, reg, &tmp, 4);
- *val = tmp;
+ if (ret == 0)
+ *val = tmp;
return ret;
}
-int adis_enable_irq(struct adis *adis, bool enable);
-int adis_check_status(struct adis *adis);
+int __adis_update_bits_base(struct adis *adis, unsigned int reg, const u32 mask,
+ const u32 val, u8 size);
+/**
+ * adis_update_bits_base() - ADIS Update bits function - Locked version
+ * @adis: The adis device
+ * @reg: The address of the lower of the two registers
+ * @mask: Bitmask to change
+ * @val: Value to be written
+ * @size: Size of the register to update
+ *
+ * Updates the desired bits of @reg in accordance with @mask and @val.
+ *
+ * Returns: %0 on success, a negative error code otherwise
+ */
+static inline int adis_update_bits_base(struct adis *adis, unsigned int reg,
+ const u32 mask, const u32 val, u8 size)
+{
+ guard(mutex)(&adis->state_lock);
+ return __adis_update_bits_base(adis, reg, mask, val, size);
+}
+
+/**
+ * adis_update_bits() - Wrapper macro for adis_update_bits_base - Locked version
+ * @adis: The adis device
+ * @reg: The address of the lower of the two registers
+ * @mask: Bitmask to change
+ * @val: Value to be written
+ *
+ * This macro evaluates the sizeof of @val at compile time and calls
+ * adis_update_bits_base() accordingly. Be aware that using MACROS/DEFINES for
+ * @val can lead to undesired behavior if the register to update is 16bit.
+ */
+#define adis_update_bits(adis, reg, mask, val) ({ \
+ BUILD_BUG_ON(sizeof(val) != 2 && sizeof(val) != 4); \
+ adis_update_bits_base(adis, reg, mask, val, sizeof(val)); \
+})
+
+/**
+ * adis_update_bits() - Wrapper macro for adis_update_bits_base
+ * @adis: The adis device
+ * @reg: The address of the lower of the two registers
+ * @mask: Bitmask to change
+ * @val: Value to be written
+ *
+ * This macro evaluates the sizeof of @val at compile time and calls
+ * adis_update_bits_base() accordingly. Be aware that using MACROS/DEFINES for
+ * @val can lead to undesired behavior if the register to update is 16bit.
+ */
+#define __adis_update_bits(adis, reg, mask, val) ({ \
+ BUILD_BUG_ON(sizeof(val) != 2 && sizeof(val) != 4); \
+ __adis_update_bits_base(adis, reg, mask, val, sizeof(val)); \
+})
+
+int __adis_check_status(struct adis *adis);
+int __adis_initial_startup(struct adis *adis);
+int __adis_enable_irq(struct adis *adis, bool enable);
+
+static inline int adis_enable_irq(struct adis *adis, bool enable)
+{
+ guard(mutex)(&adis->state_lock);
+ return __adis_enable_irq(adis, enable);
+}
+
+static inline int adis_check_status(struct adis *adis)
+{
+ guard(mutex)(&adis->state_lock);
+ return __adis_check_status(adis);
+}
-int adis_initial_startup(struct adis *adis);
+#define adis_dev_auto_lock(adis) guard(mutex)(&(adis)->state_lock)
+#define adis_dev_auto_scoped_lock(adis) \
+ scoped_guard(mutex, &(adis)->state_lock)
int adis_single_conversion(struct iio_dev *indio_dev,
- const struct iio_chan_spec *chan, unsigned int error_mask,
- int *val);
+ const struct iio_chan_spec *chan,
+ unsigned int error_mask, int *val);
#define ADIS_VOLTAGE_CHAN(addr, si, chan, name, info_all, bits) { \
.type = IIO_VOLTAGE, \
@@ -205,7 +504,7 @@ int adis_single_conversion(struct iio_dev *indio_dev,
.modified = 1, \
.channel2 = IIO_MOD_ ## mod, \
.info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \
- info_sep, \
+ (info_sep), \
.info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \
.info_mask_shared_by_all = info_all, \
.address = (addr), \
@@ -230,42 +529,43 @@ int adis_single_conversion(struct iio_dev *indio_dev,
#define ADIS_ROT_CHAN(mod, addr, si, info_sep, info_all, bits) \
ADIS_MOD_CHAN(IIO_ROT, mod, addr, si, info_sep, info_all, bits)
+#define devm_adis_setup_buffer_and_trigger(adis, indio_dev, trigger_handler) \
+ devm_adis_setup_buffer_and_trigger_with_attrs((adis), (indio_dev), \
+ (trigger_handler), NULL, \
+ NULL)
+
#ifdef CONFIG_IIO_ADIS_LIB_BUFFER
-int adis_setup_buffer_and_trigger(struct adis *adis,
- struct iio_dev *indio_dev, irqreturn_t (*trigger_handler)(int, void *));
-void adis_cleanup_buffer_and_trigger(struct adis *adis,
- struct iio_dev *indio_dev);
+int
+devm_adis_setup_buffer_and_trigger_with_attrs(struct adis *adis,
+ struct iio_dev *indio_dev,
+ irq_handler_t trigger_handler,
+ const struct iio_buffer_setup_ops *ops,
+ const struct iio_dev_attr **buffer_attrs);
-int adis_probe_trigger(struct adis *adis, struct iio_dev *indio_dev);
-void adis_remove_trigger(struct adis *adis);
+int devm_adis_probe_trigger(struct adis *adis, struct iio_dev *indio_dev);
int adis_update_scan_mode(struct iio_dev *indio_dev,
- const unsigned long *scan_mask);
+ const unsigned long *scan_mask);
#else /* CONFIG_IIO_BUFFER */
-static inline int adis_setup_buffer_and_trigger(struct adis *adis,
- struct iio_dev *indio_dev, irqreturn_t (*trigger_handler)(int, void *))
+static inline int
+devm_adis_setup_buffer_and_trigger_with_attrs(struct adis *adis,
+ struct iio_dev *indio_dev,
+ irq_handler_t trigger_handler,
+ const struct iio_buffer_setup_ops *ops,
+ const struct iio_dev_attr **buffer_attrs)
{
return 0;
}
-static inline void adis_cleanup_buffer_and_trigger(struct adis *adis,
- struct iio_dev *indio_dev)
-{
-}
-
-static inline int adis_probe_trigger(struct adis *adis,
- struct iio_dev *indio_dev)
+static inline int devm_adis_probe_trigger(struct adis *adis,
+ struct iio_dev *indio_dev)
{
return 0;
}
-static inline void adis_remove_trigger(struct adis *adis)
-{
-}
-
#define adis_update_scan_mode NULL
#endif /* CONFIG_IIO_BUFFER */
@@ -273,7 +573,8 @@ static inline void adis_remove_trigger(struct adis *adis)
#ifdef CONFIG_DEBUG_FS
int adis_debugfs_reg_access(struct iio_dev *indio_dev,
- unsigned int reg, unsigned int writeval, unsigned int *readval);
+ unsigned int reg, unsigned int writeval,
+ unsigned int *readval);
#else
diff --git a/include/linux/iio/kfifo_buf.h b/include/linux/iio/kfifo_buf.h
index 027cfa9c3703..22874da0c8be 100644
--- a/include/linux/iio/kfifo_buf.h
+++ b/include/linux/iio/kfifo_buf.h
@@ -1,13 +1,22 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __LINUX_IIO_KFIFO_BUF_H__
#define __LINUX_IIO_KFIFO_BUF_H__
struct iio_buffer;
+struct iio_buffer_setup_ops;
+struct iio_dev;
+struct iio_dev_attr;
struct device;
struct iio_buffer *iio_kfifo_allocate(void);
void iio_kfifo_free(struct iio_buffer *r);
-struct iio_buffer *devm_iio_kfifo_allocate(struct device *dev);
-void devm_iio_kfifo_free(struct device *dev, struct iio_buffer *r);
+int devm_iio_kfifo_buffer_setup_ext(struct device *dev,
+ struct iio_dev *indio_dev,
+ const struct iio_buffer_setup_ops *setup_ops,
+ const struct iio_dev_attr **buffer_attrs);
+
+#define devm_iio_kfifo_buffer_setup(dev, indio_dev, setup_ops) \
+ devm_iio_kfifo_buffer_setup_ext((dev), (indio_dev), (setup_ops), NULL)
#endif
diff --git a/include/linux/iio/machine.h b/include/linux/iio/machine.h
index 1601a2a63a72..fe7ccbb81184 100644
--- a/include/linux/iio/machine.h
+++ b/include/linux/iio/machine.h
@@ -1,11 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Industrial I/O in kernel access map definitions for board files.
*
* Copyright (c) 2011 Jonathan Cameron
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
*/
#ifndef __LINUX_IIO_MACHINE_H__
@@ -28,4 +25,11 @@ struct iio_map {
void *consumer_data;
};
+#define IIO_MAP(_provider_channel, _consumer_dev_name, _consumer_channel) \
+{ \
+ .adc_channel_label = _provider_channel, \
+ .consumer_dev_name = _consumer_dev_name, \
+ .consumer_channel = _consumer_channel, \
+}
+
#endif
diff --git a/include/linux/iio/magnetometer/ak8975.h b/include/linux/iio/magnetometer/ak8975.h
deleted file mode 100644
index c8400959d197..000000000000
--- a/include/linux/iio/magnetometer/ak8975.h
+++ /dev/null
@@ -1,16 +0,0 @@
-#ifndef __IIO_MAGNETOMETER_AK8975_H__
-#define __IIO_MAGNETOMETER_AK8975_H__
-
-#include <linux/iio/iio.h>
-
-/**
- * struct ak8975_platform_data - AK8975 magnetometer driver platform data
- * @eoc_gpio: data ready event gpio
- * @orientation: mounting matrix relative to main hardware
- */
-struct ak8975_platform_data {
- int eoc_gpio;
- struct iio_mount_matrix orientation;
-};
-
-#endif
diff --git a/include/linux/iio/sw_device.h b/include/linux/iio/sw_device.h
index fa7931933067..0f7fe7b522e3 100644
--- a/include/linux/iio/sw_device.h
+++ b/include/linux/iio/sw_device.h
@@ -1,11 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Industrial I/O software device interface
*
* Copyright (c) 2016 Intel Corporation
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
*/
#ifndef __IIO_SW_DEVICE
@@ -54,13 +51,10 @@ void iio_unregister_sw_device_type(struct iio_sw_device_type *dt);
struct iio_sw_device *iio_sw_device_create(const char *, const char *);
void iio_sw_device_destroy(struct iio_sw_device *);
-int iio_sw_device_type_configfs_register(struct iio_sw_device_type *dt);
-void iio_sw_device_type_configfs_unregister(struct iio_sw_device_type *dt);
-
static inline
void iio_swd_group_init_type_name(struct iio_sw_device *d,
const char *name,
- struct config_item_type *type)
+ const struct config_item_type *type)
{
#if IS_ENABLED(CONFIG_CONFIGFS_FS)
config_group_init_type_name(&d->group, name, type);
diff --git a/include/linux/iio/sw_trigger.h b/include/linux/iio/sw_trigger.h
index c97eab67558f..bc77f88df303 100644
--- a/include/linux/iio/sw_trigger.h
+++ b/include/linux/iio/sw_trigger.h
@@ -1,11 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Industrial I/O software trigger interface
*
* Copyright (c) 2015 Intel Corporation
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
*/
#ifndef __IIO_SW_TRIGGER
@@ -54,13 +51,10 @@ void iio_unregister_sw_trigger_type(struct iio_sw_trigger_type *tt);
struct iio_sw_trigger *iio_sw_trigger_create(const char *, const char *);
void iio_sw_trigger_destroy(struct iio_sw_trigger *);
-int iio_sw_trigger_type_configfs_register(struct iio_sw_trigger_type *tt);
-void iio_sw_trigger_type_configfs_unregister(struct iio_sw_trigger_type *tt);
-
static inline
void iio_swt_group_init_type_name(struct iio_sw_trigger *t,
const char *name,
- struct config_item_type *type)
+ const struct config_item_type *type)
{
#if IS_ENABLED(CONFIG_CONFIGFS_FS)
config_group_init_type_name(&t->group, name, type);
diff --git a/include/linux/iio/sysfs.h b/include/linux/iio/sysfs.h
index ce9426c507fd..de5bb125815c 100644
--- a/include/linux/iio/sysfs.h
+++ b/include/linux/iio/sysfs.h
@@ -1,17 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/* The industrial I/O core
*
*Copyright (c) 2008 Jonathan Cameron
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
- *
* General attributes
*/
#ifndef _INDUSTRIAL_IO_SYSFS_H_
#define _INDUSTRIAL_IO_SYSFS_H_
+struct iio_buffer;
struct iio_chan_spec;
/**
@@ -20,12 +18,14 @@ struct iio_chan_spec;
* @address: associated register address
* @l: list head for maintaining list of dynamically created attrs
* @c: specification for the underlying channel
+ * @buffer: the IIO buffer to which this attribute belongs to (if any)
*/
struct iio_dev_attr {
struct device_attribute dev_attr;
u64 address;
struct list_head l;
struct iio_chan_spec const *c;
+ struct iio_buffer *buffer;
};
#define to_iio_dev_attr(_dev_attr) \
@@ -97,6 +97,17 @@ struct iio_const_attr {
= { .string = _string, \
.dev_attr = __ATTR(_name, S_IRUGO, iio_read_const_attr, NULL)}
+#define IIO_STATIC_CONST_DEVICE_ATTR(_name, _string) \
+ static ssize_t iio_const_dev_attr_show_##_name( \
+ struct device *dev, \
+ struct device_attribute *attr, \
+ char *buf) \
+ { \
+ return sysfs_emit(buf, "%s\n", _string); \
+ } \
+ static IIO_DEVICE_ATTR(_name, 0444, \
+ iio_const_dev_attr_show_##_name, NULL, 0)
+
/* Generic attributes of onetype or another */
/**
diff --git a/include/linux/iio/timer/stm32-lptim-trigger.h b/include/linux/iio/timer/stm32-lptim-trigger.h
new file mode 100644
index 000000000000..ce3cf0addb2e
--- /dev/null
+++ b/include/linux/iio/timer/stm32-lptim-trigger.h
@@ -0,0 +1,38 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) STMicroelectronics 2017
+ *
+ * Author: Fabrice Gasnier <fabrice.gasnier@st.com>
+ */
+
+#ifndef _STM32_LPTIM_TRIGGER_H_
+#define _STM32_LPTIM_TRIGGER_H_
+
+#include <linux/iio/iio.h>
+#include <linux/iio/trigger.h>
+
+#define LPTIM1_OUT "lptim1_out"
+#define LPTIM2_OUT "lptim2_out"
+#define LPTIM3_OUT "lptim3_out"
+#define LPTIM4_OUT "lptim4_out"
+#define LPTIM5_OUT "lptim5_out"
+
+#define LPTIM1_CH1 "lptim1_ch1"
+#define LPTIM1_CH2 "lptim1_ch2"
+#define LPTIM2_CH1 "lptim2_ch1"
+#define LPTIM2_CH2 "lptim2_ch2"
+#define LPTIM3_CH1 "lptim3_ch1"
+#define LPTIM4_CH1 "lptim4_ch1"
+
+#if IS_REACHABLE(CONFIG_IIO_STM32_LPTIMER_TRIGGER)
+bool is_stm32_lptim_trigger(struct iio_trigger *trig);
+#else
+static inline bool is_stm32_lptim_trigger(struct iio_trigger *trig)
+{
+#if IS_ENABLED(CONFIG_IIO_STM32_LPTIMER_TRIGGER)
+ pr_warn_once("stm32 lptim_trigger not linked in\n");
+#endif
+ return false;
+}
+#endif
+#endif
diff --git a/include/linux/iio/timer/stm32-timer-trigger.h b/include/linux/iio/timer/stm32-timer-trigger.h
index d68add80ab86..1ee237b56183 100644
--- a/include/linux/iio/timer/stm32-timer-trigger.h
+++ b/include/linux/iio/timer/stm32-timer-trigger.h
@@ -1,9 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) STMicroelectronics 2016
*
* Author: Benjamin Gaignard <benjamin.gaignard@st.com>
- *
- * License terms: GNU General Public License (GPL), version 2
*/
#ifndef _STM32_TIMER_TRIGGER_H_
@@ -73,6 +72,21 @@
#define TIM17_OC1 "tim17_oc1"
-bool is_stm32_timer_trigger(struct iio_trigger *trig);
+#define TIM20_OC1 "tim20_oc1"
+#define TIM20_OC2 "tim20_oc2"
+#define TIM20_OC3 "tim20_oc3"
+#define TIM20_TRGO "tim20_trgo"
+#define TIM20_TRGO2 "tim20_trgo2"
+#if IS_REACHABLE(CONFIG_IIO_STM32_TIMER_TRIGGER)
+bool is_stm32_timer_trigger(struct iio_trigger *trig);
+#else
+static inline bool is_stm32_timer_trigger(struct iio_trigger *trig)
+{
+#if IS_ENABLED(CONFIG_IIO_STM32_TIMER_TRIGGER)
+ pr_warn_once("stm32-timer-trigger not linked in\n");
+#endif
+ return false;
+}
+#endif
#endif
diff --git a/include/linux/iio/trigger.h b/include/linux/iio/trigger.h
index 7142d8d6e470..bce3b1788199 100644
--- a/include/linux/iio/trigger.h
+++ b/include/linux/iio/trigger.h
@@ -1,10 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/* The industrial I/O core, trigger handling functions
*
* Copyright (c) 2008 Jonathan Cameron
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
*/
#include <linux/irq.h>
#include <linux/module.h>
@@ -23,9 +20,8 @@ struct iio_trigger;
/**
* struct iio_trigger_ops - operations structure for an iio_trigger.
- * @owner: used to monitor usage count of the trigger.
* @set_trigger_state: switch on/off the trigger on demand
- * @try_reenable: function to reenable the trigger when the
+ * @reenable: function to reenable the trigger when the
* use count is zero (may be NULL)
* @validate_device: function to validate the device when the
* current trigger gets changed.
@@ -34,9 +30,8 @@ struct iio_trigger;
* instances of a given device.
**/
struct iio_trigger_ops {
- struct module *owner;
int (*set_trigger_state)(struct iio_trigger *trig, bool state);
- int (*try_reenable)(struct iio_trigger *trig);
+ void (*reenable)(struct iio_trigger *trig);
int (*validate_device)(struct iio_trigger *trig,
struct iio_dev *indio_dev);
};
@@ -45,12 +40,13 @@ struct iio_trigger_ops {
/**
* struct iio_trigger - industrial I/O trigger device
* @ops: [DRIVER] operations structure
+ * @owner: [INTERN] owner of this driver module
* @id: [INTERN] unique id number
* @name: [DRIVER] unique name
* @dev: [DRIVER] associated device (if relevant)
* @list: [INTERN] used in maintenance of global trigger list
* @alloc_list: [DRIVER] used for driver specific trigger list
- * @use_count: use count for the trigger
+ * @use_count: [INTERN] use count for the trigger.
* @subirq_chip: [INTERN] associate 'virtual' irq chip.
* @subirq_base: [INTERN] base number for irqs provided by trigger.
* @subirqs: [INTERN] information about the 'child' irqs.
@@ -59,9 +55,11 @@ struct iio_trigger_ops {
* @attached_own_device:[INTERN] if we are using our own device as trigger,
* i.e. if we registered a poll function to the same
* device as the one providing the trigger.
+ * @reenable_work: [INTERN] work item used to ensure reenable can sleep.
**/
struct iio_trigger {
const struct iio_trigger_ops *ops;
+ struct module *owner;
int id;
const char *name;
struct device dev;
@@ -77,6 +75,7 @@ struct iio_trigger {
unsigned long pool[BITS_TO_LONGS(CONFIG_IIO_CONSUMERS_PER_TRIGGER)];
struct mutex pool_lock;
bool attached_own_device;
+ struct work_struct reenable_work;
};
@@ -87,20 +86,25 @@ static inline struct iio_trigger *to_iio_trigger(struct device *d)
static inline void iio_trigger_put(struct iio_trigger *trig)
{
- module_put(trig->ops->owner);
+ module_put(trig->owner);
put_device(&trig->dev);
}
static inline struct iio_trigger *iio_trigger_get(struct iio_trigger *trig)
{
get_device(&trig->dev);
- __module_get(trig->ops->owner);
+
+ WARN_ONCE(list_empty(&trig->list),
+ "Getting non-registered iio trigger %s is prohibited\n",
+ trig->name);
+
+ __module_get(trig->owner);
return trig;
}
/**
- * iio_device_set_drvdata() - Set trigger driver data
+ * iio_trigger_set_drvdata() - Set trigger driver data
* @trig: IIO trigger structure
* @data: Driver specific data
*
@@ -138,9 +142,6 @@ int devm_iio_trigger_register(struct device *dev,
**/
void iio_trigger_unregister(struct iio_trigger *trig_info);
-void devm_iio_trigger_unregister(struct device *dev,
- struct iio_trigger *trig_info);
-
/**
* iio_trigger_set_immutable() - set an immutable trigger on destination
*
@@ -150,18 +151,18 @@ void devm_iio_trigger_unregister(struct device *dev,
**/
int iio_trigger_set_immutable(struct iio_dev *indio_dev, struct iio_trigger *trig);
-/**
- * iio_trigger_poll() - called on a trigger occurring
- * @trig: trigger which occurred
- *
- * Typically called in relevant hardware interrupt handler.
- **/
void iio_trigger_poll(struct iio_trigger *trig);
-void iio_trigger_poll_chained(struct iio_trigger *trig);
+void iio_trigger_poll_nested(struct iio_trigger *trig);
irqreturn_t iio_trigger_generic_data_rdy_poll(int irq, void *private);
-__printf(1, 2) struct iio_trigger *iio_trigger_alloc(const char *fmt, ...);
+#define iio_trigger_alloc(parent, fmt, ...) \
+ __iio_trigger_alloc((parent), THIS_MODULE, (fmt), ##__VA_ARGS__)
+
+__printf(3, 4)
+struct iio_trigger *__iio_trigger_alloc(struct device *parent,
+ struct module *this_mod,
+ const char *fmt, ...);
void iio_trigger_free(struct iio_trigger *trig);
/**
@@ -170,6 +171,7 @@ void iio_trigger_free(struct iio_trigger *trig);
*/
bool iio_trigger_using_own(struct iio_dev *indio_dev);
+int iio_validate_own_trigger(struct iio_dev *idev, struct iio_trigger *trig);
int iio_trigger_validate_own_device(struct iio_trigger *trig,
struct iio_dev *indio_dev);
diff --git a/include/linux/iio/trigger_consumer.h b/include/linux/iio/trigger_consumer.h
index c4f8c7409666..2c05dfad88d7 100644
--- a/include/linux/iio/trigger_consumer.h
+++ b/include/linux/iio/trigger_consumer.h
@@ -1,10 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/* The industrial I/O core, trigger consumer functions
*
* Copyright (c) 2008-2011 Jonathan Cameron
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
*/
#ifndef __LINUX_IIO_TRIGGER_CONSUMER_H__
@@ -41,7 +38,7 @@ struct iio_poll_func {
};
-struct iio_poll_func
+__printf(5, 6) struct iio_poll_func
*iio_alloc_pollfunc(irqreturn_t (*h)(int irq, void *p),
irqreturn_t (*thread)(int irq, void *p),
int type,
@@ -53,11 +50,4 @@ irqreturn_t iio_pollfunc_store_time(int irq, void *p);
void iio_trigger_notify_done(struct iio_trigger *trig);
-/*
- * Two functions for common case where all that happens is a pollfunc
- * is attached and detached from a trigger
- */
-int iio_triggered_buffer_postenable(struct iio_dev *indio_dev);
-int iio_triggered_buffer_predisable(struct iio_dev *indio_dev);
-
#endif
diff --git a/include/linux/iio/triggered_buffer.h b/include/linux/iio/triggered_buffer.h
index 30145616773d..29e1fe146879 100644
--- a/include/linux/iio/triggered_buffer.h
+++ b/include/linux/iio/triggered_buffer.h
@@ -1,23 +1,38 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_IIO_TRIGGERED_BUFFER_H_
#define _LINUX_IIO_TRIGGERED_BUFFER_H_
+#include <linux/iio/buffer.h>
#include <linux/interrupt.h>
struct iio_dev;
+struct iio_dev_attr;
struct iio_buffer_setup_ops;
-int iio_triggered_buffer_setup(struct iio_dev *indio_dev,
+int iio_triggered_buffer_setup_ext(struct iio_dev *indio_dev,
irqreturn_t (*h)(int irq, void *p),
irqreturn_t (*thread)(int irq, void *p),
- const struct iio_buffer_setup_ops *setup_ops);
+ enum iio_buffer_direction direction,
+ const struct iio_buffer_setup_ops *setup_ops,
+ const struct iio_dev_attr **buffer_attrs);
void iio_triggered_buffer_cleanup(struct iio_dev *indio_dev);
-int devm_iio_triggered_buffer_setup(struct device *dev,
- struct iio_dev *indio_dev,
- irqreturn_t (*h)(int irq, void *p),
- irqreturn_t (*thread)(int irq, void *p),
- const struct iio_buffer_setup_ops *ops);
-void devm_iio_triggered_buffer_cleanup(struct device *dev,
- struct iio_dev *indio_dev);
+#define iio_triggered_buffer_setup(indio_dev, h, thread, setup_ops) \
+ iio_triggered_buffer_setup_ext((indio_dev), (h), (thread), \
+ IIO_BUFFER_DIRECTION_IN, (setup_ops), \
+ NULL)
+
+int devm_iio_triggered_buffer_setup_ext(struct device *dev,
+ struct iio_dev *indio_dev,
+ irqreturn_t (*h)(int irq, void *p),
+ irqreturn_t (*thread)(int irq, void *p),
+ enum iio_buffer_direction direction,
+ const struct iio_buffer_setup_ops *ops,
+ const struct iio_dev_attr **buffer_attrs);
+
+#define devm_iio_triggered_buffer_setup(dev, indio_dev, h, thread, setup_ops) \
+ devm_iio_triggered_buffer_setup_ext((dev), (indio_dev), (h), (thread), \
+ IIO_BUFFER_DIRECTION_IN, \
+ (setup_ops), NULL)
#endif
diff --git a/include/linux/iio/triggered_event.h b/include/linux/iio/triggered_event.h
index 8fe8537085bb..13250fd99745 100644
--- a/include/linux/iio/triggered_event.h
+++ b/include/linux/iio/triggered_event.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_IIO_TRIGGERED_EVENT_H_
#define _LINUX_IIO_TRIGGERED_EVENT_H_
diff --git a/include/linux/iio/types.h b/include/linux/iio/types.h
index 2aa7b6384d64..34eebad12d2c 100644
--- a/include/linux/iio/types.h
+++ b/include/linux/iio/types.h
@@ -1,10 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/* industrial I/O data types needed both in and out of kernel
*
* Copyright (c) 2008 Jonathan Cameron
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
*/
#ifndef _IIO_TYPES_H_
@@ -19,6 +16,11 @@ enum iio_event_info {
IIO_EV_INFO_PERIOD,
IIO_EV_INFO_HIGH_PASS_FILTER_3DB,
IIO_EV_INFO_LOW_PASS_FILTER_3DB,
+ IIO_EV_INFO_TIMEOUT,
+ IIO_EV_INFO_RESET_TIMEOUT,
+ IIO_EV_INFO_TAP2_MIN_DELAY,
+ IIO_EV_INFO_RUNNING_PERIOD,
+ IIO_EV_INFO_RUNNING_COUNT,
};
#define IIO_VAL_INT 1
@@ -26,12 +28,49 @@ enum iio_event_info {
#define IIO_VAL_INT_PLUS_NANO 3
#define IIO_VAL_INT_PLUS_MICRO_DB 4
#define IIO_VAL_INT_MULTIPLE 5
+#define IIO_VAL_INT_64 6 /* 64-bit data, val is lower 32 bits */
#define IIO_VAL_FRACTIONAL 10
#define IIO_VAL_FRACTIONAL_LOG2 11
+#define IIO_VAL_CHAR 12
enum iio_available_type {
IIO_AVAIL_LIST,
IIO_AVAIL_RANGE,
};
+enum iio_chan_info_enum {
+ IIO_CHAN_INFO_RAW = 0,
+ IIO_CHAN_INFO_PROCESSED,
+ IIO_CHAN_INFO_SCALE,
+ IIO_CHAN_INFO_OFFSET,
+ IIO_CHAN_INFO_CALIBSCALE,
+ IIO_CHAN_INFO_CALIBBIAS,
+ IIO_CHAN_INFO_PEAK,
+ IIO_CHAN_INFO_PEAK_SCALE,
+ IIO_CHAN_INFO_QUADRATURE_CORRECTION_RAW,
+ IIO_CHAN_INFO_AVERAGE_RAW,
+ IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY,
+ IIO_CHAN_INFO_HIGH_PASS_FILTER_3DB_FREQUENCY,
+ IIO_CHAN_INFO_SAMP_FREQ,
+ IIO_CHAN_INFO_FREQUENCY,
+ IIO_CHAN_INFO_PHASE,
+ IIO_CHAN_INFO_HARDWAREGAIN,
+ IIO_CHAN_INFO_HYSTERESIS,
+ IIO_CHAN_INFO_HYSTERESIS_RELATIVE,
+ IIO_CHAN_INFO_INT_TIME,
+ IIO_CHAN_INFO_ENABLE,
+ IIO_CHAN_INFO_CALIBHEIGHT,
+ IIO_CHAN_INFO_CALIBWEIGHT,
+ IIO_CHAN_INFO_DEBOUNCE_COUNT,
+ IIO_CHAN_INFO_DEBOUNCE_TIME,
+ IIO_CHAN_INFO_CALIBEMISSIVITY,
+ IIO_CHAN_INFO_OVERSAMPLING_RATIO,
+ IIO_CHAN_INFO_THERMOCOUPLE_TYPE,
+ IIO_CHAN_INFO_CALIBAMBIENT,
+ IIO_CHAN_INFO_ZEROPOINT,
+ IIO_CHAN_INFO_TROUGH,
+ IIO_CHAN_INFO_CONVDELAY,
+ IIO_CHAN_INFO_POWERFACTOR,
+};
+
#endif /* _IIO_TYPES_H_ */
diff --git a/include/linux/ima.h b/include/linux/ima.h
index 0e4647e0eb60..8e29cb4e6a01 100644
--- a/include/linux/ima.h
+++ b/include/linux/ima.h
@@ -1,71 +1,90 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2008 IBM Corporation
* Author: Mimi Zohar <zohar@us.ibm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation, version 2 of the License.
*/
#ifndef _LINUX_IMA_H
#define _LINUX_IMA_H
+#include <linux/kernel_read_file.h>
#include <linux/fs.h>
+#include <linux/security.h>
#include <linux/kexec.h>
+#include <crypto/hash_info.h>
struct linux_binprm;
#ifdef CONFIG_IMA
-extern int ima_bprm_check(struct linux_binprm *bprm);
-extern int ima_file_check(struct file *file, int mask, int opened);
-extern void ima_file_free(struct file *file);
-extern int ima_file_mmap(struct file *file, unsigned long prot);
-extern int ima_read_file(struct file *file, enum kernel_read_file_id id);
-extern int ima_post_read_file(struct file *file, void *buf, loff_t size,
- enum kernel_read_file_id id);
-extern void ima_post_path_mknod(struct dentry *dentry);
+extern enum hash_algo ima_get_current_hash_algo(void);
+extern int ima_file_hash(struct file *file, char *buf, size_t buf_size);
+extern int ima_inode_hash(struct inode *inode, char *buf, size_t buf_size);
+extern void ima_kexec_cmdline(int kernel_fd, const void *buf, int size);
+extern int ima_measure_critical_data(const char *event_label,
+ const char *event_name,
+ const void *buf, size_t buf_len,
+ bool hash, u8 *digest, size_t digest_len);
+
+#ifdef CONFIG_IMA_APPRAISE_BOOTPARAM
+extern void ima_appraise_parse_cmdline(void);
+#else
+static inline void ima_appraise_parse_cmdline(void) {}
+#endif
#ifdef CONFIG_IMA_KEXEC
extern void ima_add_kexec_buffer(struct kimage *image);
+extern void ima_kexec_post_load(struct kimage *image);
+#else
+static inline void ima_kexec_post_load(struct kimage *image) {}
#endif
#else
-static inline int ima_bprm_check(struct linux_binprm *bprm)
+static inline enum hash_algo ima_get_current_hash_algo(void)
{
- return 0;
+ return HASH_ALGO__LAST;
}
-static inline int ima_file_check(struct file *file, int mask, int opened)
+static inline int ima_file_hash(struct file *file, char *buf, size_t buf_size)
{
- return 0;
+ return -EOPNOTSUPP;
}
-static inline void ima_file_free(struct file *file)
+static inline int ima_inode_hash(struct inode *inode, char *buf, size_t buf_size)
{
- return;
+ return -EOPNOTSUPP;
}
-static inline int ima_file_mmap(struct file *file, unsigned long prot)
-{
- return 0;
-}
+static inline void ima_kexec_cmdline(int kernel_fd, const void *buf, int size) {}
-static inline int ima_read_file(struct file *file, enum kernel_read_file_id id)
+static inline int ima_measure_critical_data(const char *event_label,
+ const char *event_name,
+ const void *buf, size_t buf_len,
+ bool hash, u8 *digest,
+ size_t digest_len)
{
- return 0;
+ return -ENOENT;
}
-static inline int ima_post_read_file(struct file *file, void *buf, loff_t size,
- enum kernel_read_file_id id)
+#endif /* CONFIG_IMA */
+
+#ifdef CONFIG_HAVE_IMA_KEXEC
+int __init ima_free_kexec_buffer(void);
+int __init ima_get_kexec_buffer(void **addr, size_t *size);
+#endif
+
+#ifdef CONFIG_IMA_SECURE_AND_OR_TRUSTED_BOOT
+extern bool arch_ima_get_secureboot(void);
+extern const char * const *arch_get_ima_policy(void);
+#else
+static inline bool arch_ima_get_secureboot(void)
{
- return 0;
+ return false;
}
-static inline void ima_post_path_mknod(struct dentry *dentry)
+static inline const char * const *arch_get_ima_policy(void)
{
- return;
+ return NULL;
}
-
-#endif /* CONFIG_IMA */
+#endif
#ifndef CONFIG_IMA_KEXEC
struct kimage;
@@ -76,33 +95,19 @@ static inline void ima_add_kexec_buffer(struct kimage *image)
#ifdef CONFIG_IMA_APPRAISE
extern bool is_ima_appraise_enabled(void);
-extern void ima_inode_post_setattr(struct dentry *dentry);
-extern int ima_inode_setxattr(struct dentry *dentry, const char *xattr_name,
- const void *xattr_value, size_t xattr_value_len);
-extern int ima_inode_removexattr(struct dentry *dentry, const char *xattr_name);
#else
static inline bool is_ima_appraise_enabled(void)
{
return 0;
}
+#endif /* CONFIG_IMA_APPRAISE */
-static inline void ima_inode_post_setattr(struct dentry *dentry)
-{
- return;
-}
-
-static inline int ima_inode_setxattr(struct dentry *dentry,
- const char *xattr_name,
- const void *xattr_value,
- size_t xattr_value_len)
-{
- return 0;
-}
-
-static inline int ima_inode_removexattr(struct dentry *dentry,
- const char *xattr_name)
+#if defined(CONFIG_IMA_APPRAISE) && defined(CONFIG_INTEGRITY_TRUSTED_KEYRING)
+extern bool ima_appraise_signature(enum kernel_read_file_id func);
+#else
+static inline bool ima_appraise_signature(enum kernel_read_file_id func)
{
- return 0;
+ return false;
}
-#endif /* CONFIG_IMA_APPRAISE */
+#endif /* CONFIG_IMA_APPRAISE && CONFIG_INTEGRITY_TRUSTED_KEYRING */
#endif /* _LINUX_IMA_H */
diff --git a/include/linux/imx-media.h b/include/linux/imx-media.h
index 77221ecad6fc..e017e1779118 100644
--- a/include/linux/imx-media.h
+++ b/include/linux/imx-media.h
@@ -1,10 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* Copyright (c) 2014-2017 Mentor Graphics Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the
- * License, or (at your option) any later version
*/
#ifndef __LINUX_IMX_MEDIA_H__
diff --git a/include/linux/in.h b/include/linux/in.h
index 31b493734763..1873ef642605 100644
--- a/include/linux/in.h
+++ b/include/linux/in.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* INET An implementation of the TCP/IP protocol suite for the LINUX
* operating system. INET is implemented using the BSD Socket
@@ -9,11 +10,6 @@
*
* Authors: Original taken from the GNU Project <netinet/in.h> file.
* Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
*/
#ifndef _LINUX_IN_H
#define _LINUX_IN_H
@@ -60,9 +56,14 @@ static inline bool ipv4_is_lbcast(__be32 addr)
return addr == htonl(INADDR_BROADCAST);
}
+static inline bool ipv4_is_all_snoopers(__be32 addr)
+{
+ return addr == htonl(INADDR_ALLSNOOPERS_GROUP);
+}
+
static inline bool ipv4_is_zeronet(__be32 addr)
{
- return (addr & htonl(0xff000000)) == htonl(0x00000000);
+ return (addr == 0);
}
/* Special-Use IPv4 Addresses (RFC3330) */
diff --git a/include/linux/in6.h b/include/linux/in6.h
index 34edf1f6c9a3..403f926d33d8 100644
--- a/include/linux/in6.h
+++ b/include/linux/in6.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* Types and definitions for AF_INET6
* Linux INET6 implementation
@@ -11,17 +12,19 @@
*
* Advanced Sockets API for IPv6
* <draft-stevens-advanced-api-00.txt>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
*/
#ifndef _LINUX_IN6_H
#define _LINUX_IN6_H
#include <uapi/linux/in6.h>
+/* Large enough to hold both sockaddr_in and sockaddr_in6. */
+struct sockaddr_inet {
+ unsigned short sa_family;
+ char sa_data[sizeof(struct sockaddr_in6) -
+ sizeof(unsigned short)];
+};
+
/* IPv6 Wildcard Address (::) and Loopback Address (::1) defined in RFC2553
* NOTE: Be aware the IN6ADDR_* constants and in6addr_* externals are defined
* in network byte order, not in host byte order as are the IPv4 equivalents
diff --git a/include/linux/indirect_call_wrapper.h b/include/linux/indirect_call_wrapper.h
new file mode 100644
index 000000000000..35227d47cfc9
--- /dev/null
+++ b/include/linux/indirect_call_wrapper.h
@@ -0,0 +1,71 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_INDIRECT_CALL_WRAPPER_H
+#define _LINUX_INDIRECT_CALL_WRAPPER_H
+
+#ifdef CONFIG_MITIGATION_RETPOLINE
+
+/*
+ * INDIRECT_CALL_$NR - wrapper for indirect calls with $NR known builtin
+ * @f: function pointer
+ * @f$NR: builtin functions names, up to $NR of them
+ * @__VA_ARGS__: arguments for @f
+ *
+ * Avoid retpoline overhead for known builtin, checking @f vs each of them and
+ * eventually invoking directly the builtin function. The functions are checked
+ * in the given order. Fallback to the indirect call.
+ */
+#define INDIRECT_CALL_1(f, f1, ...) \
+ ({ \
+ likely(f == f1) ? f1(__VA_ARGS__) : f(__VA_ARGS__); \
+ })
+#define INDIRECT_CALL_2(f, f2, f1, ...) \
+ ({ \
+ likely(f == f2) ? f2(__VA_ARGS__) : \
+ INDIRECT_CALL_1(f, f1, __VA_ARGS__); \
+ })
+#define INDIRECT_CALL_3(f, f3, f2, f1, ...) \
+ ({ \
+ likely(f == f3) ? f3(__VA_ARGS__) : \
+ INDIRECT_CALL_2(f, f2, f1, __VA_ARGS__); \
+ })
+#define INDIRECT_CALL_4(f, f4, f3, f2, f1, ...) \
+ ({ \
+ likely(f == f4) ? f4(__VA_ARGS__) : \
+ INDIRECT_CALL_3(f, f3, f2, f1, __VA_ARGS__); \
+ })
+
+#define INDIRECT_CALLABLE_DECLARE(f) f
+#define INDIRECT_CALLABLE_SCOPE
+#define EXPORT_INDIRECT_CALLABLE(f) EXPORT_SYMBOL(f)
+
+#else
+#define INDIRECT_CALL_1(f, f1, ...) f(__VA_ARGS__)
+#define INDIRECT_CALL_2(f, f2, f1, ...) f(__VA_ARGS__)
+#define INDIRECT_CALL_3(f, f3, f2, f1, ...) f(__VA_ARGS__)
+#define INDIRECT_CALL_4(f, f4, f3, f2, f1, ...) f(__VA_ARGS__)
+#define INDIRECT_CALLABLE_DECLARE(f)
+#define INDIRECT_CALLABLE_SCOPE static
+#define EXPORT_INDIRECT_CALLABLE(f)
+#endif
+
+/*
+ * We can use INDIRECT_CALL_$NR for ipv6 related functions only if ipv6 is
+ * builtin, this macro simplify dealing with indirect calls with only ipv4/ipv6
+ * alternatives
+ */
+#if IS_BUILTIN(CONFIG_IPV6)
+#define INDIRECT_CALL_INET(f, f2, f1, ...) \
+ INDIRECT_CALL_2(f, f2, f1, __VA_ARGS__)
+#elif IS_ENABLED(CONFIG_INET)
+#define INDIRECT_CALL_INET(f, f2, f1, ...) INDIRECT_CALL_1(f, f1, __VA_ARGS__)
+#else
+#define INDIRECT_CALL_INET(f, f2, f1, ...) f(__VA_ARGS__)
+#endif
+
+#if IS_ENABLED(CONFIG_INET)
+#define INDIRECT_CALL_INET_1(f, f1, ...) INDIRECT_CALL_1(f, f1, __VA_ARGS__)
+#else
+#define INDIRECT_CALL_INET_1(f, f1, ...) f(__VA_ARGS__)
+#endif
+
+#endif
diff --git a/include/linux/inet.h b/include/linux/inet.h
index 636ebe87e6f8..9158772f3559 100644
--- a/include/linux/inet.h
+++ b/include/linux/inet.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* Swansea University Computer Society NET3
*
@@ -33,11 +34,6 @@
* $Id: udp.h,v 0.8.4.1 1992/11/10 00:17:18 bir7 Exp $
* $Id: we.c,v 0.8.4.10 1993/01/23 18:00:11 bir7 Exp $
* $Id: wereg.h,v 0.8.4.1 1992/11/10 00:17:18 bir7 Exp $
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
*/
#ifndef _LINUX_INET_H
#define _LINUX_INET_H
@@ -59,5 +55,6 @@ extern int in6_pton(const char *src, int srclen, u8 *dst, int delim, const char
extern int inet_pton_with_scope(struct net *net, unsigned short af,
const char *src, const char *port, struct sockaddr_storage *addr);
+bool inet_addr_is_any(struct sockaddr_storage *addr);
#endif /* _LINUX_INET_H */
diff --git a/include/linux/inet_diag.h b/include/linux/inet_diag.h
index 65da430e260f..704fd415c2b4 100644
--- a/include/linux/inet_diag.h
+++ b/include/linux/inet_diag.h
@@ -1,30 +1,29 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _INET_DIAG_H_
#define _INET_DIAG_H_ 1
+#include <net/netlink.h>
#include <uapi/linux/inet_diag.h>
-struct net;
-struct sock;
struct inet_hashinfo;
-struct nlattr;
-struct nlmsghdr;
-struct sk_buff;
-struct netlink_callback;
struct inet_diag_handler {
+ struct module *owner;
void (*dump)(struct sk_buff *skb,
struct netlink_callback *cb,
- const struct inet_diag_req_v2 *r,
- struct nlattr *bc);
+ const struct inet_diag_req_v2 *r);
- int (*dump_one)(struct sk_buff *in_skb,
- const struct nlmsghdr *nlh,
+ int (*dump_one)(struct netlink_callback *cb,
const struct inet_diag_req_v2 *req);
void (*idiag_get_info)(struct sock *sk,
struct inet_diag_msg *r,
void *info);
+ int (*idiag_get_aux)(struct sock *sk,
+ bool net_admin,
+ struct sk_buff *skb);
+
int (*destroy)(struct sk_buff *in_skb,
const struct inet_diag_req_v2 *req);
@@ -32,28 +31,47 @@ struct inet_diag_handler {
__u16 idiag_info_size;
};
+struct bpf_sk_storage_diag;
+struct inet_diag_dump_data {
+ struct nlattr *req_nlas[__INET_DIAG_REQ_MAX];
+#define inet_diag_nla_bc req_nlas[INET_DIAG_REQ_BYTECODE]
+#define inet_diag_nla_bpf_stgs req_nlas[INET_DIAG_REQ_SK_BPF_STORAGES]
+
+ struct bpf_sk_storage_diag *bpf_stg_diag;
+ bool mark_needed; /* INET_DIAG_BC_MARK_COND present. */
+#ifdef CONFIG_SOCK_CGROUP_DATA
+ bool cgroup_needed; /* INET_DIAG_BC_CGROUP_COND present. */
+#endif
+ bool userlocks_needed; /* INET_DIAG_BC_AUTO present. */
+};
+
struct inet_connection_sock;
int inet_sk_diag_fill(struct sock *sk, struct inet_connection_sock *icsk,
- struct sk_buff *skb, const struct inet_diag_req_v2 *req,
- struct user_namespace *user_ns,
- u32 pid, u32 seq, u16 nlmsg_flags,
- const struct nlmsghdr *unlh, bool net_admin);
-void inet_diag_dump_icsk(struct inet_hashinfo *h, struct sk_buff *skb,
- struct netlink_callback *cb,
- const struct inet_diag_req_v2 *r,
- struct nlattr *bc);
-int inet_diag_dump_one_icsk(struct inet_hashinfo *hashinfo,
- struct sk_buff *in_skb, const struct nlmsghdr *nlh,
- const struct inet_diag_req_v2 *req);
-
-struct sock *inet_diag_find_one_icsk(struct net *net,
- struct inet_hashinfo *hashinfo,
- const struct inet_diag_req_v2 *req);
-
-int inet_diag_bc_sk(const struct nlattr *_bc, struct sock *sk);
+ struct sk_buff *skb, struct netlink_callback *cb,
+ const struct inet_diag_req_v2 *req,
+ u16 nlmsg_flags, bool net_admin);
+
+int inet_diag_bc_sk(const struct inet_diag_dump_data *cb_data, struct sock *sk);
void inet_diag_msg_common_fill(struct inet_diag_msg *r, struct sock *sk);
+static inline size_t inet_diag_msg_attrs_size(void)
+{
+ return nla_total_size(1) /* INET_DIAG_SHUTDOWN */
+ + nla_total_size(1) /* INET_DIAG_TOS */
+#if IS_ENABLED(CONFIG_IPV6)
+ + nla_total_size(1) /* INET_DIAG_TCLASS */
+ + nla_total_size(1) /* INET_DIAG_SKV6ONLY */
+#endif
+ + nla_total_size(4) /* INET_DIAG_MARK */
+ + nla_total_size(4) /* INET_DIAG_CLASS_ID */
+#ifdef CONFIG_SOCK_CGROUP_DATA
+ + nla_total_size_64bit(sizeof(u64)) /* INET_DIAG_CGROUP_ID */
+#endif
+ + nla_total_size(sizeof(struct inet_diag_sockopt))
+ /* INET_DIAG_SOCKOPT */
+ ;
+}
int inet_diag_msg_attrs_fill(struct sock *sk, struct sk_buff *skb,
struct inet_diag_msg *r, int ext,
struct user_namespace *user_ns, bool net_admin);
diff --git a/include/linux/inetdevice.h b/include/linux/inetdevice.h
index fb3f809e34e4..5730ba6b1cfa 100644
--- a/include/linux/inetdevice.h
+++ b/include/linux/inetdevice.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_INETDEVICE_H
#define _LINUX_INETDEVICE_H
@@ -23,9 +24,11 @@ struct ipv4_devconf {
struct in_device {
struct net_device *dev;
+ netdevice_tracker dev_tracker;
+
refcount_t refcnt;
int dead;
- struct in_ifaddr *ifa_list; /* IP ifaddr chain */
+ struct in_ifaddr __rcu *ifa_list;/* IP ifaddr chain */
struct ip_mc_list __rcu *mc_list; /* IP multicast filter chain */
struct ip_mc_list __rcu * __rcu *mc_hash;
@@ -36,9 +39,11 @@ struct in_device {
unsigned long mr_v1_seen;
unsigned long mr_v2_seen;
unsigned long mr_maxdelay;
- unsigned char mr_qrv;
+ unsigned long mr_qi; /* Query Interval */
+ unsigned long mr_qri; /* Query Response Interval */
+ unsigned char mr_qrv; /* Query Robustness Variable */
unsigned char mr_gq_running;
- unsigned char mr_ifc_count;
+ u32 mr_ifc_count;
struct timer_list mr_gq_timer; /* general query timer */
struct timer_list mr_ifc_timer; /* interface change timer */
@@ -48,13 +53,15 @@ struct in_device {
};
#define IPV4_DEVCONF(cnf, attr) ((cnf).data[IPV4_DEVCONF_ ## attr - 1])
+#define IPV4_DEVCONF_RO(cnf, attr) READ_ONCE(IPV4_DEVCONF(cnf, attr))
#define IPV4_DEVCONF_ALL(net, attr) \
IPV4_DEVCONF((*(net)->ipv4.devconf_all), attr)
+#define IPV4_DEVCONF_ALL_RO(net, attr) READ_ONCE(IPV4_DEVCONF_ALL(net, attr))
-static inline int ipv4_devconf_get(struct in_device *in_dev, int index)
+static inline int ipv4_devconf_get(const struct in_device *in_dev, int index)
{
index--;
- return in_dev->cnf.data[index];
+ return READ_ONCE(in_dev->cnf.data[index]);
}
static inline void ipv4_devconf_set(struct in_device *in_dev, int index,
@@ -62,7 +69,7 @@ static inline void ipv4_devconf_set(struct in_device *in_dev, int index,
{
index--;
set_bit(index, in_dev->cnf.state);
- in_dev->cnf.data[index] = val;
+ WRITE_ONCE(in_dev->cnf.data[index], val);
}
static inline void ipv4_devconf_setall(struct in_device *in_dev)
@@ -76,22 +83,23 @@ static inline void ipv4_devconf_setall(struct in_device *in_dev)
ipv4_devconf_set((in_dev), IPV4_DEVCONF_ ## attr, (val))
#define IN_DEV_ANDCONF(in_dev, attr) \
- (IPV4_DEVCONF_ALL(dev_net(in_dev->dev), attr) && \
+ (IPV4_DEVCONF_ALL_RO(dev_net(in_dev->dev), attr) && \
IN_DEV_CONF_GET((in_dev), attr))
#define IN_DEV_NET_ORCONF(in_dev, net, attr) \
- (IPV4_DEVCONF_ALL(net, attr) || \
+ (IPV4_DEVCONF_ALL_RO(net, attr) || \
IN_DEV_CONF_GET((in_dev), attr))
#define IN_DEV_ORCONF(in_dev, attr) \
IN_DEV_NET_ORCONF(in_dev, dev_net(in_dev->dev), attr)
#define IN_DEV_MAXCONF(in_dev, attr) \
- (max(IPV4_DEVCONF_ALL(dev_net(in_dev->dev), attr), \
+ (max(IPV4_DEVCONF_ALL_RO(dev_net(in_dev->dev), attr), \
IN_DEV_CONF_GET((in_dev), attr)))
#define IN_DEV_FORWARD(in_dev) IN_DEV_CONF_GET((in_dev), FORWARDING)
#define IN_DEV_MFORWARD(in_dev) IN_DEV_ANDCONF((in_dev), MC_FORWARDING)
+#define IN_DEV_BFORWARD(in_dev) IN_DEV_ANDCONF((in_dev), BC_FORWARDING)
#define IN_DEV_RPFILTER(in_dev) IN_DEV_MAXCONF((in_dev), RP_FILTER)
#define IN_DEV_SRC_VMARK(in_dev) IN_DEV_ORCONF((in_dev), SRC_VMARK)
#define IN_DEV_SOURCE_ROUTE(in_dev) IN_DEV_ANDCONF((in_dev), \
@@ -101,7 +109,7 @@ static inline void ipv4_devconf_setall(struct in_device *in_dev)
#define IN_DEV_LOG_MARTIANS(in_dev) IN_DEV_ORCONF((in_dev), LOG_MARTIANS)
#define IN_DEV_PROXY_ARP(in_dev) IN_DEV_ORCONF((in_dev), PROXY_ARP)
-#define IN_DEV_PROXY_ARP_PVLAN(in_dev) IN_DEV_CONF_GET(in_dev, PROXY_ARP_PVLAN)
+#define IN_DEV_PROXY_ARP_PVLAN(in_dev) IN_DEV_ORCONF((in_dev), PROXY_ARP_PVLAN)
#define IN_DEV_SHARED_MEDIA(in_dev) IN_DEV_ORCONF((in_dev), SHARED_MEDIA)
#define IN_DEV_TX_REDIRECTS(in_dev) IN_DEV_ORCONF((in_dev), SEND_REDIRECTS)
#define IN_DEV_SEC_REDIRECTS(in_dev) IN_DEV_ORCONF((in_dev), \
@@ -122,25 +130,29 @@ static inline void ipv4_devconf_setall(struct in_device *in_dev)
IN_DEV_ORCONF((in_dev), ACCEPT_REDIRECTS)))
#define IN_DEV_IGNORE_ROUTES_WITH_LINKDOWN(in_dev) \
- IN_DEV_CONF_GET((in_dev), IGNORE_ROUTES_WITH_LINKDOWN)
+ IN_DEV_ORCONF((in_dev), IGNORE_ROUTES_WITH_LINKDOWN)
#define IN_DEV_ARPFILTER(in_dev) IN_DEV_ORCONF((in_dev), ARPFILTER)
-#define IN_DEV_ARP_ACCEPT(in_dev) IN_DEV_ORCONF((in_dev), ARP_ACCEPT)
+#define IN_DEV_ARP_ACCEPT(in_dev) IN_DEV_MAXCONF((in_dev), ARP_ACCEPT)
#define IN_DEV_ARP_ANNOUNCE(in_dev) IN_DEV_MAXCONF((in_dev), ARP_ANNOUNCE)
#define IN_DEV_ARP_IGNORE(in_dev) IN_DEV_MAXCONF((in_dev), ARP_IGNORE)
#define IN_DEV_ARP_NOTIFY(in_dev) IN_DEV_MAXCONF((in_dev), ARP_NOTIFY)
+#define IN_DEV_ARP_EVICT_NOCARRIER(in_dev) IN_DEV_ANDCONF((in_dev), \
+ ARP_EVICT_NOCARRIER)
struct in_ifaddr {
- struct hlist_node hash;
- struct in_ifaddr *ifa_next;
+ struct hlist_node addr_lst;
+ struct in_ifaddr __rcu *ifa_next;
struct in_device *ifa_dev;
struct rcu_head rcu_head;
__be32 ifa_local;
__be32 ifa_address;
__be32 ifa_mask;
+ __u32 ifa_rt_priority;
__be32 ifa_broadcast;
unsigned char ifa_scope;
unsigned char ifa_prefixlen;
+ unsigned char ifa_proto;
__u32 ifa_flags;
char ifa_label[IFNAMSIZ];
@@ -154,6 +166,7 @@ struct in_ifaddr {
struct in_validator_info {
__be32 ivi_addr;
struct in_device *ivi_dev;
+ struct netlink_ext_ack *extack;
};
int register_inetaddr_notifier(struct notifier_block *nb);
@@ -171,7 +184,16 @@ static inline struct net_device *ip_dev_find(struct net *net, __be32 addr)
}
int inet_addr_onlink(struct in_device *in_dev, __be32 a, __be32 b);
-int devinet_ioctl(struct net *net, unsigned int cmd, void __user *);
+int devinet_ioctl(struct net *net, unsigned int cmd, struct ifreq *);
+#ifdef CONFIG_INET
+int inet_gifconf(struct net_device *dev, char __user *buf, int len, int size);
+#else
+static inline int inet_gifconf(struct net_device *dev, char __user *buf,
+ int len, int size)
+{
+ return 0;
+}
+#endif
void devinet_init(void);
struct in_device *inetdev_by_index(struct net *, int);
__be32 inet_select_addr(const struct net_device *dev, __be32 dst, int scope);
@@ -179,7 +201,8 @@ __be32 inet_confirm_addr(struct net *net, struct in_device *in_dev, __be32 dst,
__be32 local, int scope);
struct in_ifaddr *inet_ifa_byprefix(struct in_device *in_dev, __be32 prefix,
__be32 mask);
-static __inline__ bool inet_ifa_match(__be32 addr, struct in_ifaddr *ifa)
+struct in_ifaddr *inet_lookup_ifaddr_rcu(struct net *net, __be32 addr);
+static inline bool inet_ifa_match(__be32 addr, const struct in_ifaddr *ifa)
{
return !((addr^ifa->ifa_address)&ifa->ifa_mask);
}
@@ -199,14 +222,17 @@ static __inline__ bool bad_mask(__be32 mask, __be32 addr)
return false;
}
-#define for_primary_ifa(in_dev) { struct in_ifaddr *ifa; \
- for (ifa = (in_dev)->ifa_list; ifa && !(ifa->ifa_flags&IFA_F_SECONDARY); ifa = ifa->ifa_next)
+#define in_dev_for_each_ifa_rtnl(ifa, in_dev) \
+ for (ifa = rtnl_dereference((in_dev)->ifa_list); ifa; \
+ ifa = rtnl_dereference(ifa->ifa_next))
-#define for_ifa(in_dev) { struct in_ifaddr *ifa; \
- for (ifa = (in_dev)->ifa_list; ifa; ifa = ifa->ifa_next)
+#define in_dev_for_each_ifa_rtnl_net(net, ifa, in_dev) \
+ for (ifa = rtnl_net_dereference(net, (in_dev)->ifa_list); ifa; \
+ ifa = rtnl_net_dereference(net, ifa->ifa_next))
-
-#define endfor_ifa(in_dev) }
+#define in_dev_for_each_ifa_rcu(ifa, in_dev) \
+ for (ifa = rcu_dereference((in_dev)->ifa_list); ifa; \
+ ifa = rcu_dereference(ifa->ifa_next))
static inline struct in_device *__in_dev_get_rcu(const struct net_device *dev)
{
@@ -230,6 +256,25 @@ static inline struct in_device *__in_dev_get_rtnl(const struct net_device *dev)
return rtnl_dereference(dev->ip_ptr);
}
+static inline struct in_device *__in_dev_get_rtnl_net(const struct net_device *dev)
+{
+ return rtnl_net_dereference(dev_net(dev), dev->ip_ptr);
+}
+
+/* called with rcu_read_lock or rtnl held */
+static inline bool ip_ignore_linkdown(const struct net_device *dev)
+{
+ struct in_device *in_dev;
+ bool rc = false;
+
+ in_dev = rcu_dereference_rtnl(dev->ip_ptr);
+ if (in_dev &&
+ IN_DEV_IGNORE_ROUTES_WITH_LINKDOWN(in_dev))
+ rc = true;
+
+ return rc;
+}
+
static inline struct neigh_parms *__in_dev_arp_parms_get_rcu(const struct net_device *dev)
{
struct in_device *in_dev = __in_dev_get_rcu(dev);
diff --git a/include/linux/init.h b/include/linux/init.h
index 94769d687cf0..40331923b9f4 100644
--- a/include/linux/init.h
+++ b/include/linux/init.h
@@ -1,7 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_INIT_H
#define _LINUX_INIT_H
+#include <linux/build_bug.h>
#include <linux/compiler.h>
+#include <linux/stringify.h>
#include <linux/types.h>
/* These macros are used to mark some functions or
@@ -39,11 +42,12 @@
/* These are for everybody (although not all archs will actually
discard it in modules) */
-#define __init __section(.init.text) __cold __inittrace __latent_entropy
-#define __initdata __section(.init.data)
-#define __initconst __section(.init.rodata)
-#define __exitdata __section(.exit.data)
-#define __exit_call __used __section(.exitcall.exit)
+#define __init __section(".init.text") __cold __latent_entropy \
+ __no_kstack_erase
+#define __initdata __section(".init.data")
+#define __initconst __section(".init.rodata")
+#define __exitdata __section(".exit.data")
+#define __exit_call __used __section(".exitcall.exit")
/*
* modpost check for section mismatches during the kernel build.
@@ -62,28 +66,27 @@
*
* The markers follow same syntax rules as __init / __initdata.
*/
-#define __ref __section(.ref.text) noinline
-#define __refdata __section(.ref.data)
-#define __refconst __section(.ref.rodata)
+#define __ref __section(".ref.text") noinline
+#define __refdata __section(".ref.data")
+#define __refconst __section(".ref.rodata")
#ifdef MODULE
#define __exitused
-#define __inittrace notrace
#else
#define __exitused __used
-#define __inittrace
#endif
-#define __exit __section(.exit.text) __exitused __cold notrace
+#define __exit __section(".exit.text") __exitused __cold notrace
-/* Used for MEMORY_HOTPLUG */
-#define __meminit __section(.meminit.text) __cold notrace \
- __latent_entropy
-#define __meminitdata __section(.meminit.data)
-#define __meminitconst __section(.meminit.rodata)
-#define __memexit __section(.memexit.text) __exitused __cold notrace
-#define __memexitdata __section(.memexit.data)
-#define __memexitconst __section(.memexit.rodata)
+#ifdef CONFIG_MEMORY_HOTPLUG
+#define __meminit
+#define __meminitdata
+#define __meminitconst
+#else
+#define __meminit __init
+#define __meminitdata __initdata
+#define __meminitconst __initconst
+#endif
/* For assembly routines */
#define __HEAD .section ".head.text","ax"
@@ -94,10 +97,6 @@
#define __INITRODATA .section ".init.rodata","a",%progbits
#define __FINITDATA .previous
-#define __MEMINIT .section ".meminit.text", "ax"
-#define __MEMINITDATA .section ".meminit.data", "aw"
-#define __MEMINITRODATA .section ".meminit.rodata", "a"
-
/* silence warnings when references are OK */
#define __REF .section ".ref.text", "ax"
#define __REFDATA .section ".ref.data", "aw"
@@ -110,35 +109,73 @@
typedef int (*initcall_t)(void);
typedef void (*exitcall_t)(void);
-extern initcall_t __con_initcall_start[], __con_initcall_end[];
-extern initcall_t __security_initcall_start[], __security_initcall_end[];
+#ifdef CONFIG_HAVE_ARCH_PREL32_RELOCATIONS
+typedef int initcall_entry_t;
+
+static inline initcall_t initcall_from_entry(initcall_entry_t *entry)
+{
+ return offset_to_ptr(entry);
+}
+#else
+typedef initcall_t initcall_entry_t;
-/* Used for contructor calls. */
+static inline initcall_t initcall_from_entry(initcall_entry_t *entry)
+{
+ return *entry;
+}
+#endif
+
+extern initcall_entry_t __con_initcall_start[], __con_initcall_end[];
+
+/* Used for constructor calls. */
typedef void (*ctor_fn_t)(void);
+struct file_system_type;
+
/* Defined in init/main.c */
extern int do_one_initcall(initcall_t fn);
extern char __initdata boot_command_line[];
extern char *saved_command_line;
+extern unsigned int saved_command_line_len;
extern unsigned int reset_devices;
/* used by init/main.c */
void setup_arch(char **);
void prepare_namespace(void);
-void __init load_default_modules(void);
-int __init init_rootfs(void);
+void __init init_rootfs(void);
+
+void init_IRQ(void);
+void time_init(void);
+void poking_init(void);
+void pgtable_cache_init(void);
+
+extern initcall_entry_t __initcall_start[];
+extern initcall_entry_t __initcall0_start[];
+extern initcall_entry_t __initcall1_start[];
+extern initcall_entry_t __initcall2_start[];
+extern initcall_entry_t __initcall3_start[];
+extern initcall_entry_t __initcall4_start[];
+extern initcall_entry_t __initcall5_start[];
+extern initcall_entry_t __initcall6_start[];
+extern initcall_entry_t __initcall7_start[];
+extern initcall_entry_t __initcall_end[];
+
+extern struct file_system_type rootfs_fs_type;
-#if defined(CONFIG_STRICT_KERNEL_RWX) || defined(CONFIG_STRICT_MODULE_RWX)
extern bool rodata_enabled;
-#endif
-#ifdef CONFIG_STRICT_KERNEL_RWX
void mark_rodata_ro(void);
-#endif
extern void (*late_time_init)(void);
extern bool initcall_debug;
+#ifdef MODULE
+extern struct module __this_module;
+#define THIS_MODULE (&__this_module)
+#else
+#define THIS_MODULE ((struct module *)0)
+#endif
+
#endif
#ifndef MODULE
@@ -161,9 +198,83 @@ extern bool initcall_debug;
* as KEEP() in the linker script.
*/
-#define __define_initcall(fn, id) \
- static initcall_t __initcall_##fn##id __used \
- __attribute__((__section__(".initcall" #id ".init"))) = fn;
+/* Format: <modname>__<counter>_<line>_<fn> */
+#define __initcall_id(fn) \
+ __PASTE(kmod_, \
+ __PASTE(__KBUILD_MODNAME, \
+ __PASTE(__, \
+ __PASTE(__COUNTER__, \
+ __PASTE(_, \
+ __PASTE(__LINE__, \
+ __PASTE(_, fn)))))))
+
+/* Format: __<prefix>__<iid><id> */
+#define __initcall_name(prefix, __iid, id) \
+ __PASTE(__, \
+ __PASTE(prefix, \
+ __PASTE(__, \
+ __PASTE(__iid, id))))
+
+#ifdef CONFIG_LTO_CLANG
+/*
+ * With LTO, the compiler doesn't necessarily obey link order for
+ * initcalls. In order to preserve the correct order, we add each
+ * variable into its own section and generate a linker script (in
+ * scripts/link-vmlinux.sh) to specify the order of the sections.
+ */
+#define __initcall_section(__sec, __iid) \
+ #__sec ".init.." #__iid
+
+/*
+ * With LTO, the compiler can rename static functions to avoid
+ * global naming collisions. We use a global stub function for
+ * initcalls to create a stable symbol name whose address can be
+ * taken in inline assembly when PREL32 relocations are used.
+ */
+#define __initcall_stub(fn, __iid, id) \
+ __initcall_name(initstub, __iid, id)
+
+#define __define_initcall_stub(__stub, fn) \
+ int __init __stub(void); \
+ int __init __stub(void) \
+ { \
+ return fn(); \
+ } \
+ __ADDRESSABLE(__stub)
+#else
+#define __initcall_section(__sec, __iid) \
+ #__sec ".init"
+
+#define __initcall_stub(fn, __iid, id) fn
+
+#define __define_initcall_stub(__stub, fn) \
+ __ADDRESSABLE(fn)
+#endif
+
+#ifdef CONFIG_HAVE_ARCH_PREL32_RELOCATIONS
+#define ____define_initcall(fn, __stub, __name, __sec) \
+ __define_initcall_stub(__stub, fn) \
+ asm(".section \"" __sec "\", \"a\" \n" \
+ __stringify(__name) ": \n" \
+ ".long " __stringify(__stub) " - . \n" \
+ ".previous \n"); \
+ static_assert(__same_type(initcall_t, &fn));
+#else
+#define ____define_initcall(fn, __unused, __name, __sec) \
+ static initcall_t __name __used \
+ __attribute__((__section__(__sec))) = fn;
+#endif
+
+#define __unique_initcall(fn, id, __sec, __iid) \
+ ____define_initcall(fn, \
+ __initcall_stub(fn, __iid, id), \
+ __initcall_name(initcall, __iid, id), \
+ __initcall_section(__sec, __iid))
+
+#define ___define_initcall(fn, id, __sec) \
+ __unique_initcall(fn, id, __sec, __initcall_id(fn))
+
+#define __define_initcall(fn, id) ___define_initcall(fn, id, .initcall##id)
/*
* Early initcalls run before initializing SMP.
@@ -202,13 +313,7 @@ extern bool initcall_debug;
#define __exitcall(fn) \
static exitcall_t __exitcall_##fn __exit_call = fn
-#define console_initcall(fn) \
- static initcall_t __initcall_##fn \
- __used __section(.con_initcall.init) = fn
-
-#define security_initcall(fn) \
- static initcall_t __initcall_##fn \
- __used __section(.security_initcall.init) = fn
+#define console_initcall(fn) ___define_initcall(fn, con, .con_initcall)
struct obs_kernel_param {
const char *str;
@@ -216,6 +321,8 @@ struct obs_kernel_param {
int early;
};
+extern const struct obs_kernel_param __setup_start[], __setup_end[];
+
/*
* Only for really core code. See moduleparam.h for the normal way.
*
@@ -226,16 +333,23 @@ struct obs_kernel_param {
static const char __setup_str_##unique_id[] __initconst \
__aligned(1) = str; \
static struct obs_kernel_param __setup_##unique_id \
- __used __section(.init.setup) \
- __attribute__((aligned((sizeof(long))))) \
+ __used __section(".init.setup") \
+ __aligned(__alignof__(struct obs_kernel_param)) \
= { __setup_str_##unique_id, fn, early }
+/*
+ * NOTE: __setup functions return values:
+ * @fn returns 1 (or non-zero) if the option argument is "handled"
+ * and returns 0 if the option argument is "not handled".
+ */
#define __setup(str, fn) \
__setup_param(str, fn, fn, 0)
/*
- * NOTE: fn is as per module_param, not __setup!
- * Emits warning if fn returns non-zero.
+ * NOTE: @fn is as per module_param, not __setup!
+ * I.e., @fn returns 0 for no error or non-zero for error
+ * (possibly @fn returns a -errno value, but it does not matter).
+ * Emits warning if @fn returns non-zero.
*/
#define early_param(str, fn) \
__setup_param(str, fn, fn, 1)
@@ -249,14 +363,14 @@ struct obs_kernel_param {
var = 1; \
return 0; \
} \
- __setup_param(str_on, parse_##var##_on, parse_##var##_on, 1); \
+ early_param(str_on, parse_##var##_on); \
\
static int __init parse_##var##_off(char *arg) \
{ \
var = 0; \
return 0; \
} \
- __setup_param(str_off, parse_##var##_off, parse_##var##_off, 1)
+ early_param(str_off, parse_##var##_off)
/* Relies on boot_command_line being set */
void __init parse_early_param(void);
@@ -270,7 +384,7 @@ void __init parse_early_options(char *cmdline);
#endif
/* Data marked not to be saved by software suspend */
-#define __nosavedata __section(.data..nosave)
+#define __nosavedata __section(".data..nosave")
#ifdef MODULE
#define __exit_p(x) x
diff --git a/include/linux/init_ohci1394_dma.h b/include/linux/init_ohci1394_dma.h
index 3c03a4bba5e4..228afca432ac 100644
--- a/include/linux/init_ohci1394_dma.h
+++ b/include/linux/init_ohci1394_dma.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifdef CONFIG_PROVIDE_OHCI1394_DMA_INIT
extern int __initdata init_ohci1394_dma_early;
extern void __init init_ohci1394_dma_on_all_controllers(void);
diff --git a/include/linux/init_syscalls.h b/include/linux/init_syscalls.h
new file mode 100644
index 000000000000..92045d18cbfc
--- /dev/null
+++ b/include/linux/init_syscalls.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+int __init init_mount(const char *dev_name, const char *dir_name,
+ const char *type_page, unsigned long flags, void *data_page);
+int __init init_umount(const char *name, int flags);
+int __init init_chdir(const char *filename);
+int __init init_chroot(const char *filename);
+int __init init_chown(const char *filename, uid_t user, gid_t group, int flags);
+int __init init_chmod(const char *filename, umode_t mode);
+int __init init_eaccess(const char *filename);
+int __init init_stat(const char *filename, struct kstat *stat, int flags);
+int __init init_mknod(const char *filename, umode_t mode, unsigned int dev);
+int __init init_link(const char *oldname, const char *newname);
+int __init init_symlink(const char *oldname, const char *newname);
+int __init init_unlink(const char *pathname);
+int __init init_mkdir(const char *pathname, umode_t mode);
+int __init init_rmdir(const char *pathname);
+int __init init_utimes(char *filename, struct timespec64 *ts);
+int __init init_dup(struct file *file);
diff --git a/include/linux/init_task.h b/include/linux/init_task.h
index 0e849715e5be..a6cb241ea00c 100644
--- a/include/linux/init_task.h
+++ b/include/linux/init_task.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX__INIT_TASK_H
#define _LINUX__INIT_TASK_H
@@ -12,6 +13,7 @@
#include <linux/securebits.h>
#include <linux/seqlock.h>
#include <linux/rbtree.h>
+#include <linux/refcount.h>
#include <linux/sched/autogroup.h>
#include <net/net_namespace.h>
#include <linux/sched/rt.h>
@@ -20,22 +22,9 @@
#include <asm/thread_info.h>
-#ifdef CONFIG_SMP
-# define INIT_PUSHABLE_TASKS(tsk) \
- .pushable_tasks = PLIST_NODE_INIT(tsk.pushable_tasks, MAX_PRIO),
-#else
-# define INIT_PUSHABLE_TASKS(tsk)
-#endif
-
extern struct files_struct init_files;
extern struct fs_struct init_fs;
-
-#ifdef CONFIG_CPUSETS
-#define INIT_CPUSET_SEQ(tsk) \
- .mems_allowed_seq = SEQCNT_ZERO(tsk.mems_allowed_seq),
-#else
-#define INIT_CPUSET_SEQ(tsk)
-#endif
+extern struct nsproxy init_nsproxy;
#ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
#define INIT_PREV_CPUTIME(x) .prev_cputime = { \
@@ -45,265 +34,9 @@ extern struct fs_struct init_fs;
#define INIT_PREV_CPUTIME(x)
#endif
-#ifdef CONFIG_POSIX_TIMERS
-#define INIT_POSIX_TIMERS(s) \
- .posix_timers = LIST_HEAD_INIT(s.posix_timers),
-#define INIT_CPU_TIMERS(s) \
- .cpu_timers = { \
- LIST_HEAD_INIT(s.cpu_timers[0]), \
- LIST_HEAD_INIT(s.cpu_timers[1]), \
- LIST_HEAD_INIT(s.cpu_timers[2]), \
- },
-#define INIT_CPUTIMER(s) \
- .cputimer = { \
- .cputime_atomic = INIT_CPUTIME_ATOMIC, \
- .running = false, \
- .checking_timer = false, \
- },
-#else
-#define INIT_POSIX_TIMERS(s)
-#define INIT_CPU_TIMERS(s)
-#define INIT_CPUTIMER(s)
-#endif
-
-#define INIT_SIGNALS(sig) { \
- .nr_threads = 1, \
- .thread_head = LIST_HEAD_INIT(init_task.thread_node), \
- .wait_chldexit = __WAIT_QUEUE_HEAD_INITIALIZER(sig.wait_chldexit),\
- .shared_pending = { \
- .list = LIST_HEAD_INIT(sig.shared_pending.list), \
- .signal = {{0}}}, \
- INIT_POSIX_TIMERS(sig) \
- INIT_CPU_TIMERS(sig) \
- .rlim = INIT_RLIMITS, \
- INIT_CPUTIMER(sig) \
- INIT_PREV_CPUTIME(sig) \
- .cred_guard_mutex = \
- __MUTEX_INITIALIZER(sig.cred_guard_mutex), \
-}
-
-extern struct nsproxy init_nsproxy;
-
-#define INIT_SIGHAND(sighand) { \
- .count = ATOMIC_INIT(1), \
- .action = { { { .sa_handler = SIG_DFL, } }, }, \
- .siglock = __SPIN_LOCK_UNLOCKED(sighand.siglock), \
- .signalfd_wqh = __WAIT_QUEUE_HEAD_INITIALIZER(sighand.signalfd_wqh), \
-}
-
-extern struct group_info init_groups;
-
-#define INIT_STRUCT_PID { \
- .count = ATOMIC_INIT(1), \
- .tasks = { \
- { .first = NULL }, \
- { .first = NULL }, \
- { .first = NULL }, \
- }, \
- .level = 0, \
- .numbers = { { \
- .nr = 0, \
- .ns = &init_pid_ns, \
- .pid_chain = { .next = NULL, .pprev = NULL }, \
- }, } \
-}
-
-#define INIT_PID_LINK(type) \
-{ \
- .node = { \
- .next = NULL, \
- .pprev = NULL, \
- }, \
- .pid = &init_struct_pid, \
-}
-
-#ifdef CONFIG_AUDITSYSCALL
-#define INIT_IDS \
- .loginuid = INVALID_UID, \
- .sessionid = (unsigned int)-1,
-#else
-#define INIT_IDS
-#endif
-
-#ifdef CONFIG_PREEMPT_RCU
-#define INIT_TASK_RCU_PREEMPT(tsk) \
- .rcu_read_lock_nesting = 0, \
- .rcu_read_unlock_special.s = 0, \
- .rcu_node_entry = LIST_HEAD_INIT(tsk.rcu_node_entry), \
- .rcu_blocked_node = NULL,
-#else
-#define INIT_TASK_RCU_PREEMPT(tsk)
-#endif
-#ifdef CONFIG_TASKS_RCU
-#define INIT_TASK_RCU_TASKS(tsk) \
- .rcu_tasks_holdout = false, \
- .rcu_tasks_holdout_list = \
- LIST_HEAD_INIT(tsk.rcu_tasks_holdout_list), \
- .rcu_tasks_idle_cpu = -1,
-#else
-#define INIT_TASK_RCU_TASKS(tsk)
-#endif
-
-extern struct cred init_cred;
-
-#ifdef CONFIG_CGROUP_SCHED
-# define INIT_CGROUP_SCHED(tsk) \
- .sched_task_group = &root_task_group,
-#else
-# define INIT_CGROUP_SCHED(tsk)
-#endif
-
-#ifdef CONFIG_PERF_EVENTS
-# define INIT_PERF_EVENTS(tsk) \
- .perf_event_mutex = \
- __MUTEX_INITIALIZER(tsk.perf_event_mutex), \
- .perf_event_list = LIST_HEAD_INIT(tsk.perf_event_list),
-#else
-# define INIT_PERF_EVENTS(tsk)
-#endif
-
-#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
-# define INIT_VTIME(tsk) \
- .vtime.seqcount = SEQCNT_ZERO(tsk.vtime.seqcount), \
- .vtime.starttime = 0, \
- .vtime.state = VTIME_SYS,
-#else
-# define INIT_VTIME(tsk)
-#endif
-
#define INIT_TASK_COMM "swapper"
-#ifdef CONFIG_RT_MUTEXES
-# define INIT_RT_MUTEXES(tsk) \
- .pi_waiters = RB_ROOT, \
- .pi_top_task = NULL, \
- .pi_waiters_leftmost = NULL,
-#else
-# define INIT_RT_MUTEXES(tsk)
-#endif
-
-#ifdef CONFIG_NUMA_BALANCING
-# define INIT_NUMA_BALANCING(tsk) \
- .numa_preferred_nid = -1, \
- .numa_group = NULL, \
- .numa_faults = NULL,
-#else
-# define INIT_NUMA_BALANCING(tsk)
-#endif
-
-#ifdef CONFIG_KASAN
-# define INIT_KASAN(tsk) \
- .kasan_depth = 1,
-#else
-# define INIT_KASAN(tsk)
-#endif
-
-#ifdef CONFIG_LIVEPATCH
-# define INIT_LIVEPATCH(tsk) \
- .patch_state = KLP_UNDEFINED,
-#else
-# define INIT_LIVEPATCH(tsk)
-#endif
-
-#ifdef CONFIG_THREAD_INFO_IN_TASK
-# define INIT_TASK_TI(tsk) \
- .thread_info = INIT_THREAD_INFO(tsk), \
- .stack_refcount = ATOMIC_INIT(1),
-#else
-# define INIT_TASK_TI(tsk)
-#endif
-
-#ifdef CONFIG_SECURITY
-#define INIT_TASK_SECURITY .security = NULL,
-#else
-#define INIT_TASK_SECURITY
-#endif
-
-/*
- * INIT_TASK is used to set up the first task table, touch at
- * your own risk!. Base=0, limit=0x1fffff (=2MB)
- */
-#define INIT_TASK(tsk) \
-{ \
- INIT_TASK_TI(tsk) \
- .state = 0, \
- .stack = init_stack, \
- .usage = ATOMIC_INIT(2), \
- .flags = PF_KTHREAD, \
- .prio = MAX_PRIO-20, \
- .static_prio = MAX_PRIO-20, \
- .normal_prio = MAX_PRIO-20, \
- .policy = SCHED_NORMAL, \
- .cpus_allowed = CPU_MASK_ALL, \
- .nr_cpus_allowed= NR_CPUS, \
- .mm = NULL, \
- .active_mm = &init_mm, \
- .restart_block = { \
- .fn = do_no_restart_syscall, \
- }, \
- .se = { \
- .group_node = LIST_HEAD_INIT(tsk.se.group_node), \
- }, \
- .rt = { \
- .run_list = LIST_HEAD_INIT(tsk.rt.run_list), \
- .time_slice = RR_TIMESLICE, \
- }, \
- .tasks = LIST_HEAD_INIT(tsk.tasks), \
- INIT_PUSHABLE_TASKS(tsk) \
- INIT_CGROUP_SCHED(tsk) \
- .ptraced = LIST_HEAD_INIT(tsk.ptraced), \
- .ptrace_entry = LIST_HEAD_INIT(tsk.ptrace_entry), \
- .real_parent = &tsk, \
- .parent = &tsk, \
- .children = LIST_HEAD_INIT(tsk.children), \
- .sibling = LIST_HEAD_INIT(tsk.sibling), \
- .group_leader = &tsk, \
- RCU_POINTER_INITIALIZER(real_cred, &init_cred), \
- RCU_POINTER_INITIALIZER(cred, &init_cred), \
- .comm = INIT_TASK_COMM, \
- .thread = INIT_THREAD, \
- .fs = &init_fs, \
- .files = &init_files, \
- .signal = &init_signals, \
- .sighand = &init_sighand, \
- .nsproxy = &init_nsproxy, \
- .pending = { \
- .list = LIST_HEAD_INIT(tsk.pending.list), \
- .signal = {{0}}}, \
- .blocked = {{0}}, \
- .alloc_lock = __SPIN_LOCK_UNLOCKED(tsk.alloc_lock), \
- .journal_info = NULL, \
- INIT_CPU_TIMERS(tsk) \
- .pi_lock = __RAW_SPIN_LOCK_UNLOCKED(tsk.pi_lock), \
- .timer_slack_ns = 50000, /* 50 usec default slack */ \
- .pids = { \
- [PIDTYPE_PID] = INIT_PID_LINK(PIDTYPE_PID), \
- [PIDTYPE_PGID] = INIT_PID_LINK(PIDTYPE_PGID), \
- [PIDTYPE_SID] = INIT_PID_LINK(PIDTYPE_SID), \
- }, \
- .thread_group = LIST_HEAD_INIT(tsk.thread_group), \
- .thread_node = LIST_HEAD_INIT(init_signals.thread_head), \
- INIT_IDS \
- INIT_PERF_EVENTS(tsk) \
- INIT_TRACE_IRQFLAGS \
- INIT_LOCKDEP \
- INIT_FTRACE_GRAPH \
- INIT_TRACE_RECURSION \
- INIT_TASK_RCU_PREEMPT(tsk) \
- INIT_TASK_RCU_TASKS(tsk) \
- INIT_CPUSET_SEQ(tsk) \
- INIT_RT_MUTEXES(tsk) \
- INIT_PREV_CPUTIME(tsk) \
- INIT_VTIME(tsk) \
- INIT_NUMA_BALANCING(tsk) \
- INIT_KASAN(tsk) \
- INIT_LIVEPATCH(tsk) \
- INIT_TASK_SECURITY \
-}
-
-
-/* Attach to the init_task data structure for proper alignment */
-#define __init_task_data __attribute__((__section__(".data..init_task")))
-
+/* Attach to the thread_info data structure for proper alignment */
+#define __init_thread_info __section(".data..init_thread_info")
#endif
diff --git a/include/linux/initrd.h b/include/linux/initrd.h
index bc67b767f9ce..f1a1f4c92ded 100644
--- a/include/linux/initrd.h
+++ b/include/linux/initrd.h
@@ -1,11 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0 */
-#define INITRD_MINOR 250 /* shouldn't collide with /dev/ram* too soon ... */
-
-/* 1 = load ramdisk, 0 = don't load */
-extern int rd_doload;
+#ifndef __LINUX_INITRD_H
+#define __LINUX_INITRD_H
-/* 1 = prompt for ramdisk, 0 = don't prompt */
-extern int rd_prompt;
+#define INITRD_MINOR 250 /* shouldn't collide with /dev/ram* too soon ... */
/* starting block # of image */
extern int rd_image_start;
@@ -20,4 +18,20 @@ extern int initrd_below_start_ok;
extern unsigned long initrd_start, initrd_end;
extern void free_initrd_mem(unsigned long, unsigned long);
-extern unsigned int real_root_dev;
+#ifdef CONFIG_BLK_DEV_INITRD
+extern void __init reserve_initrd_mem(void);
+extern void wait_for_initramfs(void);
+#else
+static inline void __init reserve_initrd_mem(void) {}
+static inline void wait_for_initramfs(void) {}
+#endif
+
+extern phys_addr_t phys_initrd_start;
+extern unsigned long phys_initrd_size;
+
+extern char __initramfs_start[];
+extern unsigned long __initramfs_size;
+
+void console_on_rootfs(void);
+
+#endif /* __LINUX_INITRD_H */
diff --git a/include/linux/inotify.h b/include/linux/inotify.h
index 23aede0b5843..8d20caa1b268 100644
--- a/include/linux/inotify.h
+++ b/include/linux/inotify.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Inode based directory notification for Linux
*
@@ -6,17 +7,14 @@
#ifndef _LINUX_INOTIFY_H
#define _LINUX_INOTIFY_H
-#include <linux/sysctl.h>
#include <uapi/linux/inotify.h>
-extern struct ctl_table inotify_table[]; /* for sysctl */
-
#define ALL_INOTIFY_BITS (IN_ACCESS | IN_MODIFY | IN_ATTRIB | IN_CLOSE_WRITE | \
IN_CLOSE_NOWRITE | IN_OPEN | IN_MOVED_FROM | \
IN_MOVED_TO | IN_CREATE | IN_DELETE | \
IN_DELETE_SELF | IN_MOVE_SELF | IN_UNMOUNT | \
IN_Q_OVERFLOW | IN_IGNORED | IN_ONLYDIR | \
IN_DONT_FOLLOW | IN_EXCL_UNLINK | IN_MASK_ADD | \
- IN_ISDIR | IN_ONESHOT)
+ IN_MASK_CREATE | IN_ISDIR | IN_ONESHOT)
#endif /* _LINUX_INOTIFY_H */
diff --git a/include/linux/input-polldev.h b/include/linux/input-polldev.h
deleted file mode 100644
index 2465182670db..000000000000
--- a/include/linux/input-polldev.h
+++ /dev/null
@@ -1,61 +0,0 @@
-#ifndef _INPUT_POLLDEV_H
-#define _INPUT_POLLDEV_H
-
-/*
- * Copyright (c) 2007 Dmitry Torokhov
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
- */
-
-#include <linux/input.h>
-#include <linux/workqueue.h>
-
-/**
- * struct input_polled_dev - simple polled input device
- * @private: private driver data.
- * @open: driver-supplied method that prepares device for polling
- * (enabled the device and maybe flushes device state).
- * @close: driver-supplied method that is called when device is no
- * longer being polled. Used to put device into low power mode.
- * @poll: driver-supplied method that polls the device and posts
- * input events (mandatory).
- * @poll_interval: specifies how often the poll() method should be called.
- * Defaults to 500 msec unless overridden when registering the device.
- * @poll_interval_max: specifies upper bound for the poll interval.
- * Defaults to the initial value of @poll_interval.
- * @poll_interval_min: specifies lower bound for the poll interval.
- * Defaults to 0.
- * @input: input device structure associated with the polled device.
- * Must be properly initialized by the driver (id, name, phys, bits).
- *
- * Polled input device provides a skeleton for supporting simple input
- * devices that do not raise interrupts but have to be periodically
- * scanned or polled to detect changes in their state.
- */
-struct input_polled_dev {
- void *private;
-
- void (*open)(struct input_polled_dev *dev);
- void (*close)(struct input_polled_dev *dev);
- void (*poll)(struct input_polled_dev *dev);
- unsigned int poll_interval; /* msec */
- unsigned int poll_interval_max; /* msec */
- unsigned int poll_interval_min; /* msec */
-
- struct input_dev *input;
-
-/* private: */
- struct delayed_work work;
-
- bool devres_managed;
-};
-
-struct input_polled_dev *input_allocate_polled_device(void);
-struct input_polled_dev *devm_input_allocate_polled_device(struct device *dev);
-void input_free_polled_device(struct input_polled_dev *dev);
-int input_register_polled_device(struct input_polled_dev *dev);
-void input_unregister_polled_device(struct input_polled_dev *dev);
-
-#endif
diff --git a/include/linux/input.h b/include/linux/input.h
index a65e3b24fb18..7d7cb0593a63 100644
--- a/include/linux/input.h
+++ b/include/linux/input.h
@@ -1,9 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 1999-2002 Vojtech Pavlik
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
*/
#ifndef _INPUT_H
#define _INPUT_H
@@ -24,6 +21,8 @@
#include <linux/timer.h>
#include <linux/mod_devicetable.h>
+struct input_dev_poller;
+
/**
* struct input_value - input value representation
* @type: type of value (EV_KEY, EV_ABS, etc)
@@ -36,6 +35,13 @@ struct input_value {
__s32 value;
};
+enum input_clock_type {
+ INPUT_CLK_REAL = 0,
+ INPUT_CLK_MONO,
+ INPUT_CLK_BOOT,
+ INPUT_CLK_MAX
+};
+
/**
* struct input_dev - represents an input device
* @name: name of the device
@@ -67,6 +73,8 @@ struct input_value {
* not sleep
* @ff: force feedback structure associated with the device if device
* supports force feedback effects
+ * @poller: poller structure associated with the device if device is
+ * set up to use polling mode
* @repeat_key: stores key code of the last key pressed; used to implement
* software autorepeat
* @timer: timer for software autorepeat
@@ -82,9 +90,11 @@ struct input_value {
* @open: this method is called when the very first user calls
* input_open_device(). The driver must prepare the device
* to start generating events (start polling thread,
- * request an IRQ, submit URB, etc.)
+ * request an IRQ, submit URB, etc.). The meaning of open() is
+ * to start providing events to the input core.
* @close: this method is called when the very last user calls
- * input_close_device().
+ * input_close_device(). The meaning of close() is to stop
+ * providing events to the input core.
* @flush: purges the device. Most commonly used to get rid of force
* feedback effects loaded into the device when disconnecting
* from it
@@ -117,6 +127,12 @@ struct input_value {
* @vals: array of values queued in the current frame
* @devres_managed: indicates that devices is managed with devres framework
* and needs not be explicitly unregistered or freed.
+ * @timestamp: storage for a timestamp set by input_set_timestamp called
+ * by a driver
+ * @inhibited: indicates that the input device is inhibited. If that is
+ * the case then input core ignores any events generated by the device.
+ * Device's close() is called when it is being inhibited and its open()
+ * is called when it is being uninhibited.
*/
struct input_dev {
const char *name;
@@ -150,6 +166,8 @@ struct input_dev {
struct ff_device *ff;
+ struct input_dev_poller *poller;
+
unsigned int repeat_key;
struct timer_list timer;
@@ -187,6 +205,10 @@ struct input_dev {
struct input_value *vals;
bool devres_managed;
+
+ ktime_t timestamp[INPUT_CLK_MAX];
+
+ bool inhibited;
};
#define to_input_dev(d) container_of(d, struct input_dev, dev)
@@ -234,6 +256,10 @@ struct input_dev {
#error "SW_MAX and INPUT_DEVICE_ID_SW_MAX do not match"
#endif
+#if INPUT_PROP_MAX != INPUT_DEVICE_ID_PROP_MAX
+#error "INPUT_PROP_MAX and INPUT_DEVICE_ID_PROP_MAX do not match"
+#endif
+
#define INPUT_DEVICE_ID_MATCH_DEVICE \
(INPUT_DEVICE_ID_MATCH_BUS | INPUT_DEVICE_ID_MATCH_VENDOR | INPUT_DEVICE_ID_MATCH_PRODUCT)
#define INPUT_DEVICE_ID_MATCH_DEVICE_AND_VERSION \
@@ -249,7 +275,8 @@ struct input_handle;
* it may not sleep
* @events: event sequence handler. This method is being called by
* input core with interrupts disabled and dev->event_lock
- * spinlock held and so it may not sleep
+ * spinlock held and so it may not sleep. The method must return
+ * number of events passed to it.
* @filter: similar to @event; separates normal event handlers from
* "filters".
* @match: called after comparing device's id with handler's id_table
@@ -259,6 +286,10 @@ struct input_handle;
* @start: starts handler for given handle. This function is called by
* input core right after connect() method and also when a process
* that "grabbed" a device releases it
+ * @passive_observer: set to %true by drivers only interested in observing
+ * data stream from devices if there are other users present. Such
+ * drivers will not result in starting underlying hardware device
+ * when input_open_device() is called for their handles
* @legacy_minors: set to %true by drivers using legacy minor ranges
* @minor: beginning of range of 32 legacy minors for devices this driver
* can provide
@@ -286,14 +317,15 @@ struct input_handler {
void *private;
void (*event)(struct input_handle *handle, unsigned int type, unsigned int code, int value);
- void (*events)(struct input_handle *handle,
- const struct input_value *vals, unsigned int count);
+ unsigned int (*events)(struct input_handle *handle,
+ struct input_value *vals, unsigned int count);
bool (*filter)(struct input_handle *handle, unsigned int type, unsigned int code, int value);
bool (*match)(struct input_handler *handler, struct input_dev *dev);
int (*connect)(struct input_handler *handler, struct input_dev *dev, const struct input_device_id *id);
void (*disconnect)(struct input_handle *handle);
void (*start)(struct input_handle *handle);
+ bool passive_observer;
bool legacy_minors;
int minor;
const char *name;
@@ -312,12 +344,16 @@ struct input_handler {
* @name: name given to the handle by handler that created it
* @dev: input device the handle is attached to
* @handler: handler that works with the device through this handle
+ * @handle_events: event sequence handler. It is set up by the input core
+ * according to event handling method specified in the @handler. See
+ * input_handle_setup_event_handler().
+ * This method is being called by the input core with interrupts disabled
+ * and dev->event_lock spinlock held and so it may not sleep.
* @d_node: used to put the handle on device's list of attached handles
* @h_node: used to put the handle on handler's list of handles from which
* it gets events
*/
struct input_handle {
-
void *private;
int open;
@@ -326,6 +362,10 @@ struct input_handle {
struct input_dev *dev;
struct input_handler *handler;
+ unsigned int (*handle_events)(struct input_handle *handle,
+ struct input_value *vals,
+ unsigned int count);
+
struct list_head d_node;
struct list_head h_node;
};
@@ -360,6 +400,13 @@ void input_unregister_device(struct input_dev *);
void input_reset_device(struct input_dev *);
+int input_setup_polling(struct input_dev *dev,
+ void (*poll_fn)(struct input_dev *dev));
+void input_set_poll_interval(struct input_dev *dev, unsigned int interval);
+void input_set_min_poll_interval(struct input_dev *dev, unsigned int interval);
+void input_set_max_poll_interval(struct input_dev *dev, unsigned int interval);
+int input_get_poll_interval(struct input_dev *dev);
+
int __must_check input_register_handler(struct input_handler *);
void input_unregister_handler(struct input_handler *);
@@ -381,6 +428,9 @@ void input_close_device(struct input_handle *);
int input_flush_device(struct input_handle *handle, struct file *file);
+void input_set_timestamp(struct input_dev *dev, ktime_t timestamp);
+ktime_t *input_get_timestamp(struct input_dev *dev);
+
void input_event(struct input_dev *dev, unsigned int type, unsigned int code, int value);
void input_inject_event(struct input_handle *handle, unsigned int type, unsigned int code, int value);
@@ -439,6 +489,8 @@ static inline void input_set_events_per_packet(struct input_dev *dev, int n_even
void input_alloc_absinfo(struct input_dev *dev);
void input_set_abs_params(struct input_dev *dev, unsigned int axis,
int min, int max, int fuzz, int flat);
+void input_copy_abs(struct input_dev *dst, unsigned int dst_axis,
+ const struct input_dev *src, unsigned int src_axis);
#define INPUT_GENERATE_ABS_ACCESSORS(_suffix, _item) \
static inline int input_abs_get_##_suffix(struct input_dev *dev, \
@@ -469,9 +521,14 @@ int input_get_keycode(struct input_dev *dev, struct input_keymap_entry *ke);
int input_set_keycode(struct input_dev *dev,
const struct input_keymap_entry *ke);
+bool input_match_device_id(const struct input_dev *dev,
+ const struct input_device_id *id);
+
void input_enable_softrepeat(struct input_dev *dev, int delay, int period);
-extern struct class input_class;
+bool input_device_enabled(struct input_dev *dev);
+
+extern const struct class input_class;
/**
* struct ff_device - force-feedback part of an input device
@@ -519,7 +576,7 @@ struct ff_device {
int max_effects;
struct ff_effect *effects;
- struct file *effect_owners[];
+ struct file *effect_owners[] __counted_by(max_effects);
};
int input_ff_create(struct input_dev *dev, unsigned int max_effects);
@@ -529,6 +586,7 @@ int input_ff_event(struct input_dev *dev, unsigned int type, unsigned int code,
int input_ff_upload(struct input_dev *dev, struct ff_effect *effect, struct file *file);
int input_ff_erase(struct input_dev *dev, int effect_id, struct file *file);
+int input_ff_flush(struct input_dev *dev, struct file *file);
int input_ff_create_memless(struct input_dev *dev, void *data,
int (*play_effect)(struct input_dev *, void *, struct ff_effect *));
diff --git a/include/linux/input/ad714x.h b/include/linux/input/ad714x.h
index d388d857bf14..20aea668b007 100644
--- a/include/linux/input/ad714x.h
+++ b/include/linux/input/ad714x.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* include/linux/input/ad714x.h
*
@@ -7,8 +8,6 @@
* information.
*
* Copyright 2009-2011 Analog Devices Inc.
- *
- * Licensed under the GPL-2 or later.
*/
#ifndef __LINUX_INPUT_AD714X_H__
diff --git a/include/linux/input/adp5589.h b/include/linux/input/adp5589.h
index 1a05eee15e67..0e4742c8c81e 100644
--- a/include/linux/input/adp5589.h
+++ b/include/linux/input/adp5589.h
@@ -1,9 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Analog Devices ADP5589/ADP5585 I/O Expander and QWERTY Keypad Controller
*
* Copyright 2010-2011 Analog Devices Inc.
- *
- * Licensed under the GPL-2.
*/
#ifndef _ADP5589_H
@@ -176,13 +175,6 @@ struct i2c_client; /* forward declaration */
struct adp5589_gpio_platform_data {
int gpio_start; /* GPIO Chip base # */
- int (*setup)(struct i2c_client *client,
- int gpio, unsigned ngpio,
- void *context);
- int (*teardown)(struct i2c_client *client,
- int gpio, unsigned ngpio,
- void *context);
- void *context;
};
#endif
diff --git a/include/linux/input/adxl34x.h b/include/linux/input/adxl34x.h
index 010d98175efa..7efc9008f316 100644
--- a/include/linux/input/adxl34x.h
+++ b/include/linux/input/adxl34x.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* include/linux/input/adxl34x.h
*
@@ -6,8 +7,6 @@
* device's "struct device" holds this information.
*
* Copyright 2009 Analog Devices Inc.
- *
- * Licensed under the GPL-2 or later.
*/
#ifndef __LINUX_INPUT_ADXL34X_H__
diff --git a/include/linux/input/as5011.h b/include/linux/input/as5011.h
index 1affd0ddfa9d..5705d5de3aea 100644
--- a/include/linux/input/as5011.h
+++ b/include/linux/input/as5011.h
@@ -1,16 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
#ifndef _AS5011_H
#define _AS5011_H
/*
* Copyright (c) 2010, 2011 Fabien Marteau <fabien.marteau@armadeus.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
*/
struct as5011_platform_data {
- unsigned int button_gpio;
unsigned int axis_irq; /* irq number */
unsigned long axis_irqflags;
char xp, xn; /* threshold for x axis */
diff --git a/include/linux/input/auo-pixcir-ts.h b/include/linux/input/auo-pixcir-ts.h
deleted file mode 100644
index 5049f21928e4..000000000000
--- a/include/linux/input/auo-pixcir-ts.h
+++ /dev/null
@@ -1,54 +0,0 @@
-/*
- * Driver for AUO in-cell touchscreens
- *
- * Copyright (c) 2011 Heiko Stuebner <heiko@sntech.de>
- *
- * based on auo_touch.h from Dell Streak kernel
- *
- * Copyright (c) 2008 QUALCOMM Incorporated.
- * Copyright (c) 2008 QUALCOMM USA, INC.
- *
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
-
-#ifndef __AUO_PIXCIR_TS_H__
-#define __AUO_PIXCIR_TS_H__
-
-/*
- * Interrupt modes:
- * periodical: interrupt is asserted periodicaly
- * compare coordinates: interrupt is asserted when coordinates change
- * indicate touch: interrupt is asserted during touch
- */
-#define AUO_PIXCIR_INT_PERIODICAL 0x00
-#define AUO_PIXCIR_INT_COMP_COORD 0x01
-#define AUO_PIXCIR_INT_TOUCH_IND 0x02
-
-/*
- * @gpio_int interrupt gpio
- * @int_setting one of AUO_PIXCIR_INT_*
- * @init_hw hardwarespecific init
- * @exit_hw hardwarespecific shutdown
- * @x_max x-resolution
- * @y_max y-resolution
- */
-struct auo_pixcir_ts_platdata {
- int gpio_int;
- int gpio_rst;
-
- int int_setting;
-
- unsigned int x_max;
- unsigned int y_max;
-};
-
-#endif
diff --git a/include/linux/input/bu21013.h b/include/linux/input/bu21013.h
deleted file mode 100644
index 6230d76bde5d..000000000000
--- a/include/linux/input/bu21013.h
+++ /dev/null
@@ -1,34 +0,0 @@
-/*
- * Copyright (C) ST-Ericsson SA 2010
- * Author: Naveen Kumar G <naveen.gaddipati@stericsson.com> for ST-Ericsson
- * License terms:GNU General Public License (GPL) version 2
- */
-
-#ifndef _BU21013_H
-#define _BU21013_H
-
-/**
- * struct bu21013_platform_device - Handle the platform data
- * @touch_x_max: touch x max
- * @touch_y_max: touch y max
- * @cs_pin: chip select pin
- * @touch_pin: touch gpio pin
- * @ext_clk: external clock flag
- * @x_flip: x flip flag
- * @y_flip: y flip flag
- * @wakeup: wakeup flag
- *
- * This is used to handle the platform data
- */
-struct bu21013_platform_device {
- int touch_x_max;
- int touch_y_max;
- unsigned int cs_pin;
- unsigned int touch_pin;
- bool ext_clk;
- bool x_flip;
- bool y_flip;
- bool wakeup;
-};
-
-#endif
diff --git a/include/linux/input/cma3000.h b/include/linux/input/cma3000.h
index cbbaac27d311..aaab51fa909f 100644
--- a/include/linux/input/cma3000.h
+++ b/include/linux/input/cma3000.h
@@ -1,20 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* VTI CMA3000_Dxx Accelerometer driver
*
* Copyright (C) 2010 Texas Instruments
* Author: Hemanth V <hemanthv@ti.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef _LINUX_CMA3000_H
diff --git a/include/linux/input/cy8ctmg110_pdata.h b/include/linux/input/cy8ctmg110_pdata.h
deleted file mode 100644
index 09522cb59910..000000000000
--- a/include/linux/input/cy8ctmg110_pdata.h
+++ /dev/null
@@ -1,10 +0,0 @@
-#ifndef _LINUX_CY8CTMG110_PDATA_H
-#define _LINUX_CY8CTMG110_PDATA_H
-
-struct cy8ctmg110_pdata
-{
- int reset_pin; /* Reset pin is wired to this GPIO (optional) */
- int irq_pin; /* IRQ pin is wired to this GPIO */
-};
-
-#endif
diff --git a/include/linux/input/cyttsp.h b/include/linux/input/cyttsp.h
deleted file mode 100644
index 586c8c95dcb0..000000000000
--- a/include/linux/input/cyttsp.h
+++ /dev/null
@@ -1,43 +0,0 @@
-/*
- * Header file for:
- * Cypress TrueTouch(TM) Standard Product (TTSP) touchscreen drivers.
- * For use with Cypress Txx3xx parts.
- * Supported parts include:
- * CY8CTST341
- * CY8CTMA340
- *
- * Copyright (C) 2009, 2010, 2011 Cypress Semiconductor, Inc.
- * Copyright (C) 2012 Javier Martinez Canillas <javier@dowhile0.org>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * version 2, and only version 2, as published by the
- * Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
- *
- * Contact Cypress Semiconductor at www.cypress.com (kev@cypress.com)
- *
- */
-#ifndef _CYTTSP_H_
-#define _CYTTSP_H_
-
-#define CY_SPI_NAME "cyttsp-spi"
-#define CY_I2C_NAME "cyttsp-i2c"
-/* Active Power state scanning/processing refresh interval */
-#define CY_ACT_INTRVL_DFLT 0x00 /* ms */
-/* touch timeout for the Active power */
-#define CY_TCH_TMOUT_DFLT 0xFF /* ms */
-/* Low Power state scanning/processing refresh interval */
-#define CY_LP_INTRVL_DFLT 0x0A /* ms */
-/* Active distance in pixels for a gesture to be reported */
-#define CY_ACT_DIST_DFLT 0xF8 /* pixels */
-
-#endif /* _CYTTSP_H_ */
diff --git a/include/linux/input/elan-i2c-ids.h b/include/linux/input/elan-i2c-ids.h
new file mode 100644
index 000000000000..51cca17ee94c
--- /dev/null
+++ b/include/linux/input/elan-i2c-ids.h
@@ -0,0 +1,80 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Elan I2C/SMBus Touchpad device whitelist
+ *
+ * Copyright (c) 2013 ELAN Microelectronics Corp.
+ *
+ * Author: æ維 (Duson Lin) <dusonlin@emc.com.tw>
+ * Author: KT Liao <kt.liao@emc.com.tw>
+ * Version: 1.6.3
+ *
+ * Based on cyapa driver:
+ * copyright (c) 2011-2012 Cypress Semiconductor, Inc.
+ * copyright (c) 2011-2012 Google, Inc.
+ *
+ * Trademarks are the property of their respective owners.
+ */
+
+#ifndef __ELAN_I2C_IDS_H
+#define __ELAN_I2C_IDS_H
+
+#include <linux/mod_devicetable.h>
+
+static const struct acpi_device_id elan_acpi_id[] = {
+ { "ELAN0000", 0 },
+ { "ELAN0100", 0 },
+ { "ELAN0600", 0 },
+ { "ELAN0601", 0 },
+ { "ELAN0602", 0 },
+ { "ELAN0603", 0 },
+ { "ELAN0604", 0 },
+ { "ELAN0605", 0 },
+ { "ELAN0606", 0 },
+ { "ELAN0607", 0 },
+ { "ELAN0608", 0 },
+ { "ELAN0609", 0 },
+ { "ELAN060B", 0 },
+ { "ELAN060C", 0 },
+ { "ELAN060F", 0 },
+ { "ELAN0610", 0 },
+ { "ELAN0611", 0 },
+ { "ELAN0612", 0 },
+ { "ELAN0615", 0 },
+ { "ELAN0616", 0 },
+ { "ELAN0617", 0 },
+ { "ELAN0618", 0 },
+ { "ELAN0619", 0 },
+ { "ELAN061A", 0 },
+/* { "ELAN061B", 0 }, not working on the Lenovo Legion Y7000 */
+ { "ELAN061C", 0 },
+ { "ELAN061D", 0 },
+ { "ELAN061E", 0 },
+ { "ELAN061F", 0 },
+ { "ELAN0620", 0 },
+ { "ELAN0621", 0 },
+ { "ELAN0622", 0 },
+ { "ELAN0623", 0 },
+ { "ELAN0624", 0 },
+ { "ELAN0625", 0 },
+ { "ELAN0626", 0 },
+ { "ELAN0627", 0 },
+ { "ELAN0628", 0 },
+ { "ELAN0629", 0 },
+ { "ELAN062A", 0 },
+ { "ELAN062B", 0 },
+ { "ELAN062C", 0 },
+ { "ELAN062D", 0 },
+ { "ELAN062E", 0 }, /* Lenovo V340 Whiskey Lake U */
+ { "ELAN062F", 0 }, /* Lenovo V340 Comet Lake U */
+ { "ELAN0631", 0 },
+ { "ELAN0632", 0 },
+ { "ELAN0633", 0 }, /* Lenovo S145 */
+ { "ELAN0634", 0 }, /* Lenovo V340 Ice lake */
+ { "ELAN0635", 0 }, /* Lenovo V1415-IIL */
+ { "ELAN0636", 0 }, /* Lenovo V1415-Dali */
+ { "ELAN0637", 0 }, /* Lenovo V1415-IGLR */
+ { "ELAN1000", 0 },
+ { }
+};
+
+#endif /* __ELAN_I2C_IDS_H */
diff --git a/include/linux/input/gp2ap002a00f.h b/include/linux/input/gp2ap002a00f.h
deleted file mode 100644
index aad2fd44a61a..000000000000
--- a/include/linux/input/gp2ap002a00f.h
+++ /dev/null
@@ -1,22 +0,0 @@
-#ifndef _GP2AP002A00F_H_
-#define _GP2AP002A00F_H_
-
-#include <linux/i2c.h>
-
-#define GP2A_I2C_NAME "gp2ap002a00f"
-
-/**
- * struct gp2a_platform_data - Sharp gp2ap002a00f proximity platform data
- * @vout_gpio: The gpio connected to the object detected pin (VOUT)
- * @wakeup: Set to true if the proximity can wake the device from suspend
- * @hw_setup: Callback for setting up hardware such as gpios and vregs
- * @hw_shutdown: Callback for properly shutting down hardware
- */
-struct gp2a_platform_data {
- int vout_gpio;
- bool wakeup;
- int (*hw_setup)(struct i2c_client *client);
- int (*hw_shutdown)(struct i2c_client *client);
-};
-
-#endif
diff --git a/include/linux/input/gpio_tilt.h b/include/linux/input/gpio_tilt.h
deleted file mode 100644
index c1cc52d380e0..000000000000
--- a/include/linux/input/gpio_tilt.h
+++ /dev/null
@@ -1,73 +0,0 @@
-#ifndef _INPUT_GPIO_TILT_H
-#define _INPUT_GPIO_TILT_H
-
-/**
- * struct gpio_tilt_axis - Axis used by the tilt switch
- * @axis: Constant describing the axis, e.g. ABS_X
- * @min: minimum value for abs_param
- * @max: maximum value for abs_param
- * @fuzz: fuzz value for abs_param
- * @flat: flat value for abs_param
- */
-struct gpio_tilt_axis {
- int axis;
- int min;
- int max;
- int fuzz;
- int flat;
-};
-
-/**
- * struct gpio_tilt_state - state description
- * @gpios: bitfield of gpio target-states for the value
- * @axes: array containing the axes settings for the gpio state
- * The array indizes must correspond to the axes defined
- * in platform_data
- *
- * This structure describes a supported axis settings
- * and the necessary gpio-state which represent it.
- *
- * The n-th bit in the bitfield describes the state of the n-th GPIO
- * from the gpios-array defined in gpio_regulator_config below.
- */
-struct gpio_tilt_state {
- int gpios;
- int *axes;
-};
-
-/**
- * struct gpio_tilt_platform_data
- * @gpios: Array containing the gpios determining the tilt state
- * @nr_gpios: Number of gpios
- * @axes: Array of gpio_tilt_axis descriptions
- * @nr_axes: Number of axes
- * @states: Array of gpio_tilt_state entries describing
- * the gpio state for specific tilts
- * @nr_states: Number of states available
- * @debounce_interval: debounce ticks interval in msecs
- * @poll_interval: polling interval in msecs - for polling driver only
- * @enable: callback to enable the tilt switch
- * @disable: callback to disable the tilt switch
- *
- * This structure contains gpio-tilt-switch configuration
- * information that must be passed by platform code to the
- * gpio-tilt input driver.
- */
-struct gpio_tilt_platform_data {
- struct gpio *gpios;
- int nr_gpios;
-
- struct gpio_tilt_axis *axes;
- int nr_axes;
-
- struct gpio_tilt_state *states;
- int nr_states;
-
- int debounce_interval;
-
- unsigned int poll_interval;
- int (*enable)(struct device *dev);
- void (*disable)(struct device *dev);
-};
-
-#endif
diff --git a/include/linux/input/ili210x.h b/include/linux/input/ili210x.h
deleted file mode 100644
index a5471245a13c..000000000000
--- a/include/linux/input/ili210x.h
+++ /dev/null
@@ -1,10 +0,0 @@
-#ifndef _ILI210X_H
-#define _ILI210X_H
-
-struct ili210x_platform_data {
- unsigned long irq_flags;
- unsigned int poll_period;
- bool (*get_pendown_state)(void);
-};
-
-#endif
diff --git a/include/linux/input/kxtj9.h b/include/linux/input/kxtj9.h
index d415579b56fe..46e231986fde 100644
--- a/include/linux/input/kxtj9.h
+++ b/include/linux/input/kxtj9.h
@@ -1,20 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2011 Kionix, Inc.
* Written by Chris Hudson <chudson@kionix.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
- * 02111-1307, USA
*/
#ifndef __KXTJ9_H__
diff --git a/include/linux/input/lm8333.h b/include/linux/input/lm8333.h
index 79f918c6e8c5..906da5fc06e0 100644
--- a/include/linux/input/lm8333.h
+++ b/include/linux/input/lm8333.h
@@ -1,6 +1,6 @@
/*
* public include for LM8333 keypad driver - same license as driver
- * Copyright (C) 2012 Wolfram Sang, Pengutronix <w.sang@pengutronix.de>
+ * Copyright (C) 2012 Wolfram Sang, Pengutronix <kernel@pengutronix.de>
*/
#ifndef _LM8333_H
diff --git a/include/linux/input/matrix_keypad.h b/include/linux/input/matrix_keypad.h
index 6174733a57eb..90867f44ab4d 100644
--- a/include/linux/input/matrix_keypad.h
+++ b/include/linux/input/matrix_keypad.h
@@ -1,9 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _MATRIX_KEYPAD_H
#define _MATRIX_KEYPAD_H
#include <linux/types.h>
-#include <linux/input.h>
-#include <linux/of.h>
+
+struct device;
+struct input_dev;
#define MATRIX_MAX_ROWS 32
#define MATRIX_MAX_COLS 32
@@ -32,52 +34,6 @@ struct matrix_keymap_data {
unsigned int keymap_size;
};
-/**
- * struct matrix_keypad_platform_data - platform-dependent keypad data
- * @keymap_data: pointer to &matrix_keymap_data
- * @row_gpios: pointer to array of gpio numbers representing rows
- * @col_gpios: pointer to array of gpio numbers reporesenting colums
- * @num_row_gpios: actual number of row gpios used by device
- * @num_col_gpios: actual number of col gpios used by device
- * @col_scan_delay_us: delay, measured in microseconds, that is
- * needed before we can keypad after activating column gpio
- * @debounce_ms: debounce interval in milliseconds
- * @clustered_irq: may be specified if interrupts of all row/column GPIOs
- * are bundled to one single irq
- * @clustered_irq_flags: flags that are needed for the clustered irq
- * @active_low: gpio polarity
- * @wakeup: controls whether the device should be set up as wakeup
- * source
- * @no_autorepeat: disable key autorepeat
- * @drive_inactive_cols: drive inactive columns during scan, rather than
- * making them inputs.
- *
- * This structure represents platform-specific data that use used by
- * matrix_keypad driver to perform proper initialization.
- */
-struct matrix_keypad_platform_data {
- const struct matrix_keymap_data *keymap_data;
-
- const unsigned int *row_gpios;
- const unsigned int *col_gpios;
-
- unsigned int num_row_gpios;
- unsigned int num_col_gpios;
-
- unsigned int col_scan_delay_us;
-
- /* key debounce interval in milli-second */
- unsigned int debounce_ms;
-
- unsigned int clustered_irq;
- unsigned int clustered_irq_flags;
-
- bool active_low;
- bool wakeup;
- bool no_autorepeat;
- bool drive_inactive_cols;
-};
-
int matrix_keypad_build_keymap(const struct matrix_keymap_data *keymap_data,
const char *keymap_name,
unsigned int rows, unsigned int cols,
@@ -86,6 +42,4 @@ int matrix_keypad_build_keymap(const struct matrix_keymap_data *keymap_data,
int matrix_keypad_parse_properties(struct device *dev,
unsigned int *rows, unsigned int *cols);
-#define matrix_keypad_parse_of_params matrix_keypad_parse_properties
-
#endif /* _MATRIX_KEYPAD_H */
diff --git a/include/linux/input/mt.h b/include/linux/input/mt.h
index d7188de4db96..d30286298a00 100644
--- a/include/linux/input/mt.h
+++ b/include/linux/input/mt.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
#ifndef _INPUT_MT_H
#define _INPUT_MT_H
@@ -5,10 +6,6 @@
* Input Multitouch Library
*
* Copyright (c) 2010 Henrik Rydberg
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
*/
#include <linux/input.h>
@@ -20,6 +17,7 @@
#define INPUT_MT_DROP_UNUSED 0x0004 /* drop contacts not seen in frame */
#define INPUT_MT_TRACK 0x0008 /* use in-kernel tracking */
#define INPUT_MT_SEMI_MT 0x0010 /* semi-mt device, finger count handled manually */
+#define INPUT_MT_TOTAL_FORCE 0x0020 /* calculate total force from slots pressure */
/**
* struct input_mt_slot - represents the state of an input MT slot
@@ -50,7 +48,7 @@ struct input_mt {
unsigned int flags;
unsigned int frame;
int *red;
- struct input_mt_slot slots[];
+ struct input_mt_slot slots[] __counted_by(num_slots);
};
static inline void input_mt_set_value(struct input_mt_slot *slot,
@@ -100,9 +98,14 @@ static inline bool input_is_mt_axis(int axis)
return axis == ABS_MT_SLOT || input_is_mt_value(axis);
}
-void input_mt_report_slot_state(struct input_dev *dev,
+bool input_mt_report_slot_state(struct input_dev *dev,
unsigned int tool_type, bool active);
+static inline void input_mt_report_slot_inactive(struct input_dev *dev)
+{
+ input_mt_report_slot_state(dev, 0, false);
+}
+
void input_mt_report_finger_count(struct input_dev *dev, int count);
void input_mt_report_pointer_emulation(struct input_dev *dev, bool use_count);
void input_mt_drop_unused(struct input_dev *dev);
diff --git a/include/linux/input/navpoint.h b/include/linux/input/navpoint.h
deleted file mode 100644
index 45050eb34de3..000000000000
--- a/include/linux/input/navpoint.h
+++ /dev/null
@@ -1,12 +0,0 @@
-/*
- * Copyright (C) 2012 Paul Parsons <lost.distance@yahoo.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-struct navpoint_platform_data {
- int port; /* PXA SSP port for pxa_ssp_request() */
- int gpio; /* GPIO for power on/off */
-};
diff --git a/include/linux/input/samsung-keypad.h b/include/linux/input/samsung-keypad.h
index f25619bfd8a8..ab6b97114c08 100644
--- a/include/linux/input/samsung-keypad.h
+++ b/include/linux/input/samsung-keypad.h
@@ -1,13 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* Samsung Keypad platform data definitions
*
* Copyright (C) 2010 Samsung Electronics Co.Ltd
* Author: Joonyoung Shim <jy0922.shim@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
*/
#ifndef __SAMSUNG_KEYPAD_H
diff --git a/include/linux/input/sh_keysc.h b/include/linux/input/sh_keysc.h
index 5d253cd93691..b3c4f3b6679c 100644
--- a/include/linux/input/sh_keysc.h
+++ b/include/linux/input/sh_keysc.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __SH_KEYSC_H__
#define __SH_KEYSC_H__
diff --git a/include/linux/input/sparse-keymap.h b/include/linux/input/sparse-keymap.h
index c7346e33d958..d0dddc14ebc8 100644
--- a/include/linux/input/sparse-keymap.h
+++ b/include/linux/input/sparse-keymap.h
@@ -1,12 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
#ifndef _SPARSE_KEYMAP_H
#define _SPARSE_KEYMAP_H
/*
* Copyright (c) 2009 Dmitry Torokhov
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
*/
#define KE_END 0 /* Indicates end of keymap */
@@ -23,6 +20,7 @@
* private definitions.
* @code: Device-specific data identifying the button/switch
* @keycode: KEY_* code assigned to a key/button
+ * @sw: struct with code/value used by KE_SW and KE_VSW
* @sw.code: SW_* code assigned to a switch
* @sw.value: Value that should be sent in an input even when KE_SW
* switch is toggled. KE_VSW switches ignore this field and
diff --git a/include/linux/input/touch-overlay.h b/include/linux/input/touch-overlay.h
new file mode 100644
index 000000000000..0253e554d3cd
--- /dev/null
+++ b/include/linux/input/touch-overlay.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2023 Javier Carrasco <javier.carrasco@wolfvision.net>
+ */
+
+#ifndef _TOUCH_OVERLAY
+#define _TOUCH_OVERLAY
+
+#include <linux/types.h>
+
+struct input_dev;
+
+int touch_overlay_map(struct list_head *list, struct input_dev *input);
+
+void touch_overlay_get_touchscreen_abs(struct list_head *list, u16 *x, u16 *y);
+
+bool touch_overlay_mapped_touchscreen(struct list_head *list);
+
+bool touch_overlay_process_contact(struct list_head *list,
+ struct input_dev *input,
+ struct input_mt_pos *pos, int slot);
+
+void touch_overlay_sync_frame(struct list_head *list, struct input_dev *input);
+
+#endif
diff --git a/include/linux/input/touchscreen.h b/include/linux/input/touchscreen.h
index 09d22ccb9e41..fe66e2b58f62 100644
--- a/include/linux/input/touchscreen.h
+++ b/include/linux/input/touchscreen.h
@@ -1,9 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2014 Sebastian Reichel <sre@kernel.org>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
*/
#ifndef _TOUCHSCREEN_H
diff --git a/include/linux/input/vivaldi-fmap.h b/include/linux/input/vivaldi-fmap.h
new file mode 100644
index 000000000000..7e4b7023bf04
--- /dev/null
+++ b/include/linux/input/vivaldi-fmap.h
@@ -0,0 +1,27 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _VIVALDI_FMAP_H
+#define _VIVALDI_FMAP_H
+
+#include <linux/types.h>
+
+#define VIVALDI_MAX_FUNCTION_ROW_KEYS 24
+
+/**
+ * struct vivaldi_data - Function row map data for ChromeOS Vivaldi keyboards
+ * @function_row_physmap: An array of scancodes or their equivalent (HID usage
+ * codes, encoded rows/columns, etc) for the top
+ * row function keys, in an order from left to right
+ * @num_function_row_keys: The number of top row keys in a custom keyboard
+ *
+ * This structure is supposed to be used by ChromeOS keyboards using
+ * the Vivaldi keyboard function row design.
+ */
+struct vivaldi_data {
+ u32 function_row_physmap[VIVALDI_MAX_FUNCTION_ROW_KEYS];
+ unsigned int num_function_row_keys;
+};
+
+ssize_t vivaldi_function_row_physmap_show(const struct vivaldi_data *data,
+ char *buf);
+
+#endif /* _VIVALDI_FMAP_H */
diff --git a/include/linux/instruction_pointer.h b/include/linux/instruction_pointer.h
new file mode 100644
index 000000000000..aa0b3ffea935
--- /dev/null
+++ b/include/linux/instruction_pointer.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_INSTRUCTION_POINTER_H
+#define _LINUX_INSTRUCTION_POINTER_H
+
+#include <asm/linkage.h>
+
+#define _RET_IP_ (unsigned long)__builtin_return_address(0)
+
+#ifndef _THIS_IP_
+#define _THIS_IP_ ({ __label__ __here; __here: (unsigned long)&&__here; })
+#endif
+
+#endif /* _LINUX_INSTRUCTION_POINTER_H */
diff --git a/include/linux/instrumentation.h b/include/linux/instrumentation.h
new file mode 100644
index 000000000000..bf675a8aef8a
--- /dev/null
+++ b/include/linux/instrumentation.h
@@ -0,0 +1,60 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __LINUX_INSTRUMENTATION_H
+#define __LINUX_INSTRUMENTATION_H
+
+#ifdef CONFIG_NOINSTR_VALIDATION
+
+#include <linux/objtool.h>
+#include <linux/stringify.h>
+
+/* Begin/end of an instrumentation safe region */
+#define __instrumentation_begin(c) ({ \
+ asm volatile(__stringify(c) ": nop\n\t" \
+ ANNOTATE_INSTR_BEGIN(__ASM_BREF(c)) \
+ : : "i" (c)); \
+})
+#define instrumentation_begin() __instrumentation_begin(__COUNTER__)
+
+/*
+ * Because instrumentation_{begin,end}() can nest, objtool validation considers
+ * _begin() a +1 and _end() a -1 and computes a sum over the instructions.
+ * When the value is greater than 0, we consider instrumentation allowed.
+ *
+ * There is a problem with code like:
+ *
+ * noinstr void foo()
+ * {
+ * instrumentation_begin();
+ * ...
+ * if (cond) {
+ * instrumentation_begin();
+ * ...
+ * instrumentation_end();
+ * }
+ * bar();
+ * instrumentation_end();
+ * }
+ *
+ * If instrumentation_end() would be an empty label, like all the other
+ * annotations, the inner _end(), which is at the end of a conditional block,
+ * would land on the instruction after the block.
+ *
+ * If we then consider the sum of the !cond path, we'll see that the call to
+ * bar() is with a 0-value, even though, we meant it to happen with a positive
+ * value.
+ *
+ * To avoid this, have _end() be a NOP instruction, this ensures it will be
+ * part of the condition block and does not escape.
+ */
+#define __instrumentation_end(c) ({ \
+ asm volatile(__stringify(c) ": nop\n\t" \
+ ANNOTATE_INSTR_END(__ASM_BREF(c)) \
+ : : "i" (c)); \
+})
+#define instrumentation_end() __instrumentation_end(__COUNTER__)
+#else /* !CONFIG_NOINSTR_VALIDATION */
+# define instrumentation_begin() do { } while(0)
+# define instrumentation_end() do { } while(0)
+#endif /* CONFIG_NOINSTR_VALIDATION */
+
+#endif /* __LINUX_INSTRUMENTATION_H */
diff --git a/include/linux/instrumented.h b/include/linux/instrumented.h
new file mode 100644
index 000000000000..711a1f0d1a73
--- /dev/null
+++ b/include/linux/instrumented.h
@@ -0,0 +1,216 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+/*
+ * This header provides generic wrappers for memory access instrumentation that
+ * the compiler cannot emit for: KASAN, KCSAN, KMSAN.
+ */
+#ifndef _LINUX_INSTRUMENTED_H
+#define _LINUX_INSTRUMENTED_H
+
+#include <linux/compiler.h>
+#include <linux/kasan-checks.h>
+#include <linux/kcsan-checks.h>
+#include <linux/kmsan-checks.h>
+#include <linux/types.h>
+
+/**
+ * instrument_read - instrument regular read access
+ * @v: address of access
+ * @size: size of access
+ *
+ * Instrument a regular read access. The instrumentation should be inserted
+ * before the actual read happens.
+ */
+static __always_inline void instrument_read(const volatile void *v, size_t size)
+{
+ kasan_check_read(v, size);
+ kcsan_check_read(v, size);
+}
+
+/**
+ * instrument_write - instrument regular write access
+ * @v: address of access
+ * @size: size of access
+ *
+ * Instrument a regular write access. The instrumentation should be inserted
+ * before the actual write happens.
+ */
+static __always_inline void instrument_write(const volatile void *v, size_t size)
+{
+ kasan_check_write(v, size);
+ kcsan_check_write(v, size);
+}
+
+/**
+ * instrument_read_write - instrument regular read-write access
+ * @v: address of access
+ * @size: size of access
+ *
+ * Instrument a regular write access. The instrumentation should be inserted
+ * before the actual write happens.
+ */
+static __always_inline void instrument_read_write(const volatile void *v, size_t size)
+{
+ kasan_check_write(v, size);
+ kcsan_check_read_write(v, size);
+}
+
+/**
+ * instrument_atomic_read - instrument atomic read access
+ * @v: address of access
+ * @size: size of access
+ *
+ * Instrument an atomic read access. The instrumentation should be inserted
+ * before the actual read happens.
+ */
+static __always_inline void instrument_atomic_read(const volatile void *v, size_t size)
+{
+ kasan_check_read(v, size);
+ kcsan_check_atomic_read(v, size);
+}
+
+/**
+ * instrument_atomic_write - instrument atomic write access
+ * @v: address of access
+ * @size: size of access
+ *
+ * Instrument an atomic write access. The instrumentation should be inserted
+ * before the actual write happens.
+ */
+static __always_inline void instrument_atomic_write(const volatile void *v, size_t size)
+{
+ kasan_check_write(v, size);
+ kcsan_check_atomic_write(v, size);
+}
+
+/**
+ * instrument_atomic_read_write - instrument atomic read-write access
+ * @v: address of access
+ * @size: size of access
+ *
+ * Instrument an atomic read-write access. The instrumentation should be
+ * inserted before the actual write happens.
+ */
+static __always_inline void instrument_atomic_read_write(const volatile void *v, size_t size)
+{
+ kasan_check_write(v, size);
+ kcsan_check_atomic_read_write(v, size);
+}
+
+/**
+ * instrument_copy_to_user - instrument reads of copy_to_user
+ * @to: destination address
+ * @from: source address
+ * @n: number of bytes to copy
+ *
+ * Instrument reads from kernel memory, that are due to copy_to_user (and
+ * variants). The instrumentation must be inserted before the accesses.
+ */
+static __always_inline void
+instrument_copy_to_user(void __user *to, const void *from, unsigned long n)
+{
+ kasan_check_read(from, n);
+ kcsan_check_read(from, n);
+ kmsan_copy_to_user(to, from, n, 0);
+}
+
+/**
+ * instrument_copy_from_user_before - add instrumentation before copy_from_user
+ * @to: destination address
+ * @from: source address
+ * @n: number of bytes to copy
+ *
+ * Instrument writes to kernel memory, that are due to copy_from_user (and
+ * variants). The instrumentation should be inserted before the accesses.
+ */
+static __always_inline void
+instrument_copy_from_user_before(const void *to, const void __user *from, unsigned long n)
+{
+ kasan_check_write(to, n);
+ kcsan_check_write(to, n);
+}
+
+/**
+ * instrument_copy_from_user_after - add instrumentation after copy_from_user
+ * @to: destination address
+ * @from: source address
+ * @n: number of bytes to copy
+ * @left: number of bytes not copied (as returned by copy_from_user)
+ *
+ * Instrument writes to kernel memory, that are due to copy_from_user (and
+ * variants). The instrumentation should be inserted after the accesses.
+ */
+static __always_inline void
+instrument_copy_from_user_after(const void *to, const void __user *from,
+ unsigned long n, unsigned long left)
+{
+ kmsan_unpoison_memory(to, n - left);
+}
+
+/**
+ * instrument_memcpy_before - add instrumentation before non-instrumented memcpy
+ * @to: destination address
+ * @from: source address
+ * @n: number of bytes to copy
+ *
+ * Instrument memory accesses that happen in custom memcpy implementations. The
+ * instrumentation should be inserted before the memcpy call.
+ */
+static __always_inline void instrument_memcpy_before(void *to, const void *from,
+ unsigned long n)
+{
+ kasan_check_write(to, n);
+ kasan_check_read(from, n);
+ kcsan_check_write(to, n);
+ kcsan_check_read(from, n);
+}
+
+/**
+ * instrument_memcpy_after - add instrumentation after non-instrumented memcpy
+ * @to: destination address
+ * @from: source address
+ * @n: number of bytes to copy
+ * @left: number of bytes not copied (if known)
+ *
+ * Instrument memory accesses that happen in custom memcpy implementations. The
+ * instrumentation should be inserted after the memcpy call.
+ */
+static __always_inline void instrument_memcpy_after(void *to, const void *from,
+ unsigned long n,
+ unsigned long left)
+{
+ kmsan_memmove(to, from, n - left);
+}
+
+/**
+ * instrument_get_user() - add instrumentation to get_user()-like macros
+ * @to: destination variable, may not be address-taken
+ *
+ * get_user() and friends are fragile, so it may depend on the implementation
+ * whether the instrumentation happens before or after the data is copied from
+ * the userspace.
+ */
+#define instrument_get_user(to) \
+({ \
+ u64 __tmp = (u64)(to); \
+ kmsan_unpoison_memory(&__tmp, sizeof(__tmp)); \
+ to = __tmp; \
+})
+
+
+/**
+ * instrument_put_user() - add instrumentation to put_user()-like macros
+ * @from: source address
+ * @ptr: userspace pointer to copy to
+ * @size: number of bytes to copy
+ *
+ * put_user() and friends are fragile, so it may depend on the implementation
+ * whether the instrumentation happens before or after the data is copied from
+ * the userspace.
+ */
+#define instrument_put_user(from, ptr, size) \
+({ \
+ kmsan_copy_to_user(ptr, &from, sizeof(from), 0); \
+})
+
+#endif /* _LINUX_INSTRUMENTED_H */
diff --git a/include/linux/int_log.h b/include/linux/int_log.h
new file mode 100644
index 000000000000..0a6f58c38b61
--- /dev/null
+++ b/include/linux/int_log.h
@@ -0,0 +1,56 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Provides fixed-point logarithm operations.
+ *
+ * Copyright (C) 2006 Christoph Pfister (christophpfister@gmail.com)
+ */
+
+#ifndef __LINUX_INT_LOG_H
+#define __LINUX_INT_LOG_H
+
+#include <linux/types.h>
+
+/**
+ * intlog2 - computes log2 of a value; the result is shifted left by 24 bits
+ *
+ * @value: The value (must be != 0)
+ *
+ * to use rational values you can use the following method:
+ *
+ * intlog2(value) = intlog2(value * 2^x) - x * 2^24
+ *
+ * Some usecase examples:
+ *
+ * intlog2(8) will give 3 << 24 = 3 * 2^24
+ *
+ * intlog2(9) will give 3 << 24 + ... = 3.16... * 2^24
+ *
+ * intlog2(1.5) = intlog2(3) - 2^24 = 0.584... * 2^24
+ *
+ *
+ * return: log2(value) * 2^24
+ */
+extern unsigned int intlog2(u32 value);
+
+/**
+ * intlog10 - computes log10 of a value; the result is shifted left by 24 bits
+ *
+ * @value: The value (must be != 0)
+ *
+ * to use rational values you can use the following method:
+ *
+ * intlog10(value) = intlog10(value * 10^x) - x * 2^24
+ *
+ * An usecase example:
+ *
+ * intlog10(1000) will give 3 << 24 = 3 * 2^24
+ *
+ * due to the implementation intlog10(1000) might be not exactly 3 * 2^24
+ *
+ * look at intlog2 for similar examples
+ *
+ * return: log10(value) * 2^24
+ */
+extern unsigned int intlog10(u32 value);
+
+#endif
diff --git a/include/linux/integrity.h b/include/linux/integrity.h
index c2d6082a1a4c..f5842372359b 100644
--- a/include/linux/integrity.h
+++ b/include/linux/integrity.h
@@ -1,46 +1,65 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2009 IBM Corporation
* Author: Mimi Zohar <zohar@us.ibm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation, version 2 of the License.
*/
#ifndef _LINUX_INTEGRITY_H
#define _LINUX_INTEGRITY_H
#include <linux/fs.h>
+#include <linux/iversion.h>
enum integrity_status {
INTEGRITY_PASS = 0,
+ INTEGRITY_PASS_IMMUTABLE,
INTEGRITY_FAIL,
+ INTEGRITY_FAIL_IMMUTABLE,
INTEGRITY_NOLABEL,
INTEGRITY_NOXATTRS,
INTEGRITY_UNKNOWN,
};
-/* List of EVM protected security xattrs */
#ifdef CONFIG_INTEGRITY
-extern struct integrity_iint_cache *integrity_inode_get(struct inode *inode);
-extern void integrity_inode_free(struct inode *inode);
extern void __init integrity_load_keys(void);
#else
-static inline struct integrity_iint_cache *
- integrity_inode_get(struct inode *inode)
+static inline void integrity_load_keys(void)
{
- return NULL;
}
+#endif /* CONFIG_INTEGRITY */
+
+/* An inode's attributes for detection of changes */
+struct integrity_inode_attributes {
+ u64 version; /* track inode changes */
+ unsigned long ino;
+ dev_t dev;
+};
-static inline void integrity_inode_free(struct inode *inode)
+/*
+ * On stacked filesystems the i_version alone is not enough to detect file data
+ * or metadata change. Additional metadata is required.
+ */
+static inline void
+integrity_inode_attrs_store(struct integrity_inode_attributes *attrs,
+ u64 i_version, const struct inode *inode)
{
- return;
+ attrs->version = i_version;
+ attrs->dev = inode->i_sb->s_dev;
+ attrs->ino = inode->i_ino;
}
-static inline void integrity_load_keys(void)
+/*
+ * On stacked filesystems detect whether the inode or its content has changed.
+ */
+static inline bool
+integrity_inode_attrs_changed(const struct integrity_inode_attributes *attrs,
+ const struct inode *inode)
{
+ return (inode->i_sb->s_dev != attrs->dev ||
+ inode->i_ino != attrs->ino ||
+ !inode_eq_iversion(inode, attrs->version));
}
-#endif /* CONFIG_INTEGRITY */
+
#endif /* _LINUX_INTEGRITY_H */
diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h
deleted file mode 100644
index 485a5b48f038..000000000000
--- a/include/linux/intel-iommu.h
+++ /dev/null
@@ -1,494 +0,0 @@
-/*
- * Copyright © 2006-2015, Intel Corporation.
- *
- * Authors: Ashok Raj <ashok.raj@intel.com>
- * Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
- * David Woodhouse <David.Woodhouse@intel.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
- * Place - Suite 330, Boston, MA 02111-1307 USA.
- */
-
-#ifndef _INTEL_IOMMU_H_
-#define _INTEL_IOMMU_H_
-
-#include <linux/types.h>
-#include <linux/iova.h>
-#include <linux/io.h>
-#include <linux/idr.h>
-#include <linux/dma_remapping.h>
-#include <linux/mmu_notifier.h>
-#include <linux/list.h>
-#include <linux/iommu.h>
-#include <linux/io-64-nonatomic-lo-hi.h>
-
-#include <asm/cacheflush.h>
-#include <asm/iommu.h>
-
-/*
- * Intel IOMMU register specification per version 1.0 public spec.
- */
-
-#define DMAR_VER_REG 0x0 /* Arch version supported by this IOMMU */
-#define DMAR_CAP_REG 0x8 /* Hardware supported capabilities */
-#define DMAR_ECAP_REG 0x10 /* Extended capabilities supported */
-#define DMAR_GCMD_REG 0x18 /* Global command register */
-#define DMAR_GSTS_REG 0x1c /* Global status register */
-#define DMAR_RTADDR_REG 0x20 /* Root entry table */
-#define DMAR_CCMD_REG 0x28 /* Context command reg */
-#define DMAR_FSTS_REG 0x34 /* Fault Status register */
-#define DMAR_FECTL_REG 0x38 /* Fault control register */
-#define DMAR_FEDATA_REG 0x3c /* Fault event interrupt data register */
-#define DMAR_FEADDR_REG 0x40 /* Fault event interrupt addr register */
-#define DMAR_FEUADDR_REG 0x44 /* Upper address register */
-#define DMAR_AFLOG_REG 0x58 /* Advanced Fault control */
-#define DMAR_PMEN_REG 0x64 /* Enable Protected Memory Region */
-#define DMAR_PLMBASE_REG 0x68 /* PMRR Low addr */
-#define DMAR_PLMLIMIT_REG 0x6c /* PMRR low limit */
-#define DMAR_PHMBASE_REG 0x70 /* pmrr high base addr */
-#define DMAR_PHMLIMIT_REG 0x78 /* pmrr high limit */
-#define DMAR_IQH_REG 0x80 /* Invalidation queue head register */
-#define DMAR_IQT_REG 0x88 /* Invalidation queue tail register */
-#define DMAR_IQ_SHIFT 4 /* Invalidation queue head/tail shift */
-#define DMAR_IQA_REG 0x90 /* Invalidation queue addr register */
-#define DMAR_ICS_REG 0x9c /* Invalidation complete status register */
-#define DMAR_IRTA_REG 0xb8 /* Interrupt remapping table addr register */
-#define DMAR_PQH_REG 0xc0 /* Page request queue head register */
-#define DMAR_PQT_REG 0xc8 /* Page request queue tail register */
-#define DMAR_PQA_REG 0xd0 /* Page request queue address register */
-#define DMAR_PRS_REG 0xdc /* Page request status register */
-#define DMAR_PECTL_REG 0xe0 /* Page request event control register */
-#define DMAR_PEDATA_REG 0xe4 /* Page request event interrupt data register */
-#define DMAR_PEADDR_REG 0xe8 /* Page request event interrupt addr register */
-#define DMAR_PEUADDR_REG 0xec /* Page request event Upper address register */
-
-#define OFFSET_STRIDE (9)
-
-#define dmar_readq(a) readq(a)
-#define dmar_writeq(a,v) writeq(v,a)
-
-#define DMAR_VER_MAJOR(v) (((v) & 0xf0) >> 4)
-#define DMAR_VER_MINOR(v) ((v) & 0x0f)
-
-/*
- * Decoding Capability Register
- */
-#define cap_pi_support(c) (((c) >> 59) & 1)
-#define cap_read_drain(c) (((c) >> 55) & 1)
-#define cap_write_drain(c) (((c) >> 54) & 1)
-#define cap_max_amask_val(c) (((c) >> 48) & 0x3f)
-#define cap_num_fault_regs(c) ((((c) >> 40) & 0xff) + 1)
-#define cap_pgsel_inv(c) (((c) >> 39) & 1)
-
-#define cap_super_page_val(c) (((c) >> 34) & 0xf)
-#define cap_super_offset(c) (((find_first_bit(&cap_super_page_val(c), 4)) \
- * OFFSET_STRIDE) + 21)
-
-#define cap_fault_reg_offset(c) ((((c) >> 24) & 0x3ff) * 16)
-#define cap_max_fault_reg_offset(c) \
- (cap_fault_reg_offset(c) + cap_num_fault_regs(c) * 16)
-
-#define cap_zlr(c) (((c) >> 22) & 1)
-#define cap_isoch(c) (((c) >> 23) & 1)
-#define cap_mgaw(c) ((((c) >> 16) & 0x3f) + 1)
-#define cap_sagaw(c) (((c) >> 8) & 0x1f)
-#define cap_caching_mode(c) (((c) >> 7) & 1)
-#define cap_phmr(c) (((c) >> 6) & 1)
-#define cap_plmr(c) (((c) >> 5) & 1)
-#define cap_rwbf(c) (((c) >> 4) & 1)
-#define cap_afl(c) (((c) >> 3) & 1)
-#define cap_ndoms(c) (((unsigned long)1) << (4 + 2 * ((c) & 0x7)))
-/*
- * Extended Capability Register
- */
-
-#define ecap_pasid(e) ((e >> 40) & 0x1)
-#define ecap_pss(e) ((e >> 35) & 0x1f)
-#define ecap_eafs(e) ((e >> 34) & 0x1)
-#define ecap_nwfs(e) ((e >> 33) & 0x1)
-#define ecap_srs(e) ((e >> 31) & 0x1)
-#define ecap_ers(e) ((e >> 30) & 0x1)
-#define ecap_prs(e) ((e >> 29) & 0x1)
-#define ecap_broken_pasid(e) ((e >> 28) & 0x1)
-#define ecap_dis(e) ((e >> 27) & 0x1)
-#define ecap_nest(e) ((e >> 26) & 0x1)
-#define ecap_mts(e) ((e >> 25) & 0x1)
-#define ecap_ecs(e) ((e >> 24) & 0x1)
-#define ecap_iotlb_offset(e) ((((e) >> 8) & 0x3ff) * 16)
-#define ecap_max_iotlb_offset(e) (ecap_iotlb_offset(e) + 16)
-#define ecap_coherent(e) ((e) & 0x1)
-#define ecap_qis(e) ((e) & 0x2)
-#define ecap_pass_through(e) ((e >> 6) & 0x1)
-#define ecap_eim_support(e) ((e >> 4) & 0x1)
-#define ecap_ir_support(e) ((e >> 3) & 0x1)
-#define ecap_dev_iotlb_support(e) (((e) >> 2) & 0x1)
-#define ecap_max_handle_mask(e) ((e >> 20) & 0xf)
-#define ecap_sc_support(e) ((e >> 7) & 0x1) /* Snooping Control */
-
-/* IOTLB_REG */
-#define DMA_TLB_FLUSH_GRANU_OFFSET 60
-#define DMA_TLB_GLOBAL_FLUSH (((u64)1) << 60)
-#define DMA_TLB_DSI_FLUSH (((u64)2) << 60)
-#define DMA_TLB_PSI_FLUSH (((u64)3) << 60)
-#define DMA_TLB_IIRG(type) ((type >> 60) & 3)
-#define DMA_TLB_IAIG(val) (((val) >> 57) & 3)
-#define DMA_TLB_READ_DRAIN (((u64)1) << 49)
-#define DMA_TLB_WRITE_DRAIN (((u64)1) << 48)
-#define DMA_TLB_DID(id) (((u64)((id) & 0xffff)) << 32)
-#define DMA_TLB_IVT (((u64)1) << 63)
-#define DMA_TLB_IH_NONLEAF (((u64)1) << 6)
-#define DMA_TLB_MAX_SIZE (0x3f)
-
-/* INVALID_DESC */
-#define DMA_CCMD_INVL_GRANU_OFFSET 61
-#define DMA_ID_TLB_GLOBAL_FLUSH (((u64)1) << 4)
-#define DMA_ID_TLB_DSI_FLUSH (((u64)2) << 4)
-#define DMA_ID_TLB_PSI_FLUSH (((u64)3) << 4)
-#define DMA_ID_TLB_READ_DRAIN (((u64)1) << 7)
-#define DMA_ID_TLB_WRITE_DRAIN (((u64)1) << 6)
-#define DMA_ID_TLB_DID(id) (((u64)((id & 0xffff) << 16)))
-#define DMA_ID_TLB_IH_NONLEAF (((u64)1) << 6)
-#define DMA_ID_TLB_ADDR(addr) (addr)
-#define DMA_ID_TLB_ADDR_MASK(mask) (mask)
-
-/* PMEN_REG */
-#define DMA_PMEN_EPM (((u32)1)<<31)
-#define DMA_PMEN_PRS (((u32)1)<<0)
-
-/* GCMD_REG */
-#define DMA_GCMD_TE (((u32)1) << 31)
-#define DMA_GCMD_SRTP (((u32)1) << 30)
-#define DMA_GCMD_SFL (((u32)1) << 29)
-#define DMA_GCMD_EAFL (((u32)1) << 28)
-#define DMA_GCMD_WBF (((u32)1) << 27)
-#define DMA_GCMD_QIE (((u32)1) << 26)
-#define DMA_GCMD_SIRTP (((u32)1) << 24)
-#define DMA_GCMD_IRE (((u32) 1) << 25)
-#define DMA_GCMD_CFI (((u32) 1) << 23)
-
-/* GSTS_REG */
-#define DMA_GSTS_TES (((u32)1) << 31)
-#define DMA_GSTS_RTPS (((u32)1) << 30)
-#define DMA_GSTS_FLS (((u32)1) << 29)
-#define DMA_GSTS_AFLS (((u32)1) << 28)
-#define DMA_GSTS_WBFS (((u32)1) << 27)
-#define DMA_GSTS_QIES (((u32)1) << 26)
-#define DMA_GSTS_IRTPS (((u32)1) << 24)
-#define DMA_GSTS_IRES (((u32)1) << 25)
-#define DMA_GSTS_CFIS (((u32)1) << 23)
-
-/* DMA_RTADDR_REG */
-#define DMA_RTADDR_RTT (((u64)1) << 11)
-
-/* CCMD_REG */
-#define DMA_CCMD_ICC (((u64)1) << 63)
-#define DMA_CCMD_GLOBAL_INVL (((u64)1) << 61)
-#define DMA_CCMD_DOMAIN_INVL (((u64)2) << 61)
-#define DMA_CCMD_DEVICE_INVL (((u64)3) << 61)
-#define DMA_CCMD_FM(m) (((u64)((m) & 0x3)) << 32)
-#define DMA_CCMD_MASK_NOBIT 0
-#define DMA_CCMD_MASK_1BIT 1
-#define DMA_CCMD_MASK_2BIT 2
-#define DMA_CCMD_MASK_3BIT 3
-#define DMA_CCMD_SID(s) (((u64)((s) & 0xffff)) << 16)
-#define DMA_CCMD_DID(d) ((u64)((d) & 0xffff))
-
-/* FECTL_REG */
-#define DMA_FECTL_IM (((u32)1) << 31)
-
-/* FSTS_REG */
-#define DMA_FSTS_PPF ((u32)2)
-#define DMA_FSTS_PFO ((u32)1)
-#define DMA_FSTS_IQE (1 << 4)
-#define DMA_FSTS_ICE (1 << 5)
-#define DMA_FSTS_ITE (1 << 6)
-#define dma_fsts_fault_record_index(s) (((s) >> 8) & 0xff)
-
-/* FRCD_REG, 32 bits access */
-#define DMA_FRCD_F (((u32)1) << 31)
-#define dma_frcd_type(d) ((d >> 30) & 1)
-#define dma_frcd_fault_reason(c) (c & 0xff)
-#define dma_frcd_source_id(c) (c & 0xffff)
-/* low 64 bit */
-#define dma_frcd_page_addr(d) (d & (((u64)-1) << PAGE_SHIFT))
-
-/* PRS_REG */
-#define DMA_PRS_PPR ((u32)1)
-
-#define IOMMU_WAIT_OP(iommu, offset, op, cond, sts) \
-do { \
- cycles_t start_time = get_cycles(); \
- while (1) { \
- sts = op(iommu->reg + offset); \
- if (cond) \
- break; \
- if (DMAR_OPERATION_TIMEOUT < (get_cycles() - start_time))\
- panic("DMAR hardware is malfunctioning\n"); \
- cpu_relax(); \
- } \
-} while (0)
-
-#define QI_LENGTH 256 /* queue length */
-
-enum {
- QI_FREE,
- QI_IN_USE,
- QI_DONE,
- QI_ABORT
-};
-
-#define QI_CC_TYPE 0x1
-#define QI_IOTLB_TYPE 0x2
-#define QI_DIOTLB_TYPE 0x3
-#define QI_IEC_TYPE 0x4
-#define QI_IWD_TYPE 0x5
-#define QI_EIOTLB_TYPE 0x6
-#define QI_PC_TYPE 0x7
-#define QI_DEIOTLB_TYPE 0x8
-#define QI_PGRP_RESP_TYPE 0x9
-#define QI_PSTRM_RESP_TYPE 0xa
-
-#define QI_IEC_SELECTIVE (((u64)1) << 4)
-#define QI_IEC_IIDEX(idx) (((u64)(idx & 0xffff) << 32))
-#define QI_IEC_IM(m) (((u64)(m & 0x1f) << 27))
-
-#define QI_IWD_STATUS_DATA(d) (((u64)d) << 32)
-#define QI_IWD_STATUS_WRITE (((u64)1) << 5)
-
-#define QI_IOTLB_DID(did) (((u64)did) << 16)
-#define QI_IOTLB_DR(dr) (((u64)dr) << 7)
-#define QI_IOTLB_DW(dw) (((u64)dw) << 6)
-#define QI_IOTLB_GRAN(gran) (((u64)gran) >> (DMA_TLB_FLUSH_GRANU_OFFSET-4))
-#define QI_IOTLB_ADDR(addr) (((u64)addr) & VTD_PAGE_MASK)
-#define QI_IOTLB_IH(ih) (((u64)ih) << 6)
-#define QI_IOTLB_AM(am) (((u8)am))
-
-#define QI_CC_FM(fm) (((u64)fm) << 48)
-#define QI_CC_SID(sid) (((u64)sid) << 32)
-#define QI_CC_DID(did) (((u64)did) << 16)
-#define QI_CC_GRAN(gran) (((u64)gran) >> (DMA_CCMD_INVL_GRANU_OFFSET-4))
-
-#define QI_DEV_IOTLB_SID(sid) ((u64)((sid) & 0xffff) << 32)
-#define QI_DEV_IOTLB_QDEP(qdep) (((qdep) & 0x1f) << 16)
-#define QI_DEV_IOTLB_ADDR(addr) ((u64)(addr) & VTD_PAGE_MASK)
-#define QI_DEV_IOTLB_SIZE 1
-#define QI_DEV_IOTLB_MAX_INVS 32
-
-#define QI_PC_PASID(pasid) (((u64)pasid) << 32)
-#define QI_PC_DID(did) (((u64)did) << 16)
-#define QI_PC_GRAN(gran) (((u64)gran) << 4)
-
-#define QI_PC_ALL_PASIDS (QI_PC_TYPE | QI_PC_GRAN(0))
-#define QI_PC_PASID_SEL (QI_PC_TYPE | QI_PC_GRAN(1))
-
-#define QI_EIOTLB_ADDR(addr) ((u64)(addr) & VTD_PAGE_MASK)
-#define QI_EIOTLB_GL(gl) (((u64)gl) << 7)
-#define QI_EIOTLB_IH(ih) (((u64)ih) << 6)
-#define QI_EIOTLB_AM(am) (((u64)am))
-#define QI_EIOTLB_PASID(pasid) (((u64)pasid) << 32)
-#define QI_EIOTLB_DID(did) (((u64)did) << 16)
-#define QI_EIOTLB_GRAN(gran) (((u64)gran) << 4)
-
-#define QI_DEV_EIOTLB_ADDR(a) ((u64)(a) & VTD_PAGE_MASK)
-#define QI_DEV_EIOTLB_SIZE (((u64)1) << 11)
-#define QI_DEV_EIOTLB_GLOB(g) ((u64)g)
-#define QI_DEV_EIOTLB_PASID(p) (((u64)p) << 32)
-#define QI_DEV_EIOTLB_SID(sid) ((u64)((sid) & 0xffff) << 16)
-#define QI_DEV_EIOTLB_QDEP(qd) ((u64)((qd) & 0x1f) << 4)
-#define QI_DEV_EIOTLB_MAX_INVS 32
-
-#define QI_PGRP_IDX(idx) (((u64)(idx)) << 55)
-#define QI_PGRP_PRIV(priv) (((u64)(priv)) << 32)
-#define QI_PGRP_RESP_CODE(res) ((u64)(res))
-#define QI_PGRP_PASID(pasid) (((u64)(pasid)) << 32)
-#define QI_PGRP_DID(did) (((u64)(did)) << 16)
-#define QI_PGRP_PASID_P(p) (((u64)(p)) << 4)
-
-#define QI_PSTRM_ADDR(addr) (((u64)(addr)) & VTD_PAGE_MASK)
-#define QI_PSTRM_DEVFN(devfn) (((u64)(devfn)) << 4)
-#define QI_PSTRM_RESP_CODE(res) ((u64)(res))
-#define QI_PSTRM_IDX(idx) (((u64)(idx)) << 55)
-#define QI_PSTRM_PRIV(priv) (((u64)(priv)) << 32)
-#define QI_PSTRM_BUS(bus) (((u64)(bus)) << 24)
-#define QI_PSTRM_PASID(pasid) (((u64)(pasid)) << 4)
-
-#define QI_RESP_SUCCESS 0x0
-#define QI_RESP_INVALID 0x1
-#define QI_RESP_FAILURE 0xf
-
-#define QI_GRAN_ALL_ALL 0
-#define QI_GRAN_NONG_ALL 1
-#define QI_GRAN_NONG_PASID 2
-#define QI_GRAN_PSI_PASID 3
-
-struct qi_desc {
- u64 low, high;
-};
-
-struct q_inval {
- raw_spinlock_t q_lock;
- struct qi_desc *desc; /* invalidation queue */
- int *desc_status; /* desc status */
- int free_head; /* first free entry */
- int free_tail; /* last free entry */
- int free_cnt;
-};
-
-#ifdef CONFIG_IRQ_REMAP
-/* 1MB - maximum possible interrupt remapping table size */
-#define INTR_REMAP_PAGE_ORDER 8
-#define INTR_REMAP_TABLE_REG_SIZE 0xf
-#define INTR_REMAP_TABLE_REG_SIZE_MASK 0xf
-
-#define INTR_REMAP_TABLE_ENTRIES 65536
-
-struct irq_domain;
-
-struct ir_table {
- struct irte *base;
- unsigned long *bitmap;
-};
-#endif
-
-struct iommu_flush {
- void (*flush_context)(struct intel_iommu *iommu, u16 did, u16 sid,
- u8 fm, u64 type);
- void (*flush_iotlb)(struct intel_iommu *iommu, u16 did, u64 addr,
- unsigned int size_order, u64 type);
-};
-
-enum {
- SR_DMAR_FECTL_REG,
- SR_DMAR_FEDATA_REG,
- SR_DMAR_FEADDR_REG,
- SR_DMAR_FEUADDR_REG,
- MAX_SR_DMAR_REGS
-};
-
-#define VTD_FLAG_TRANS_PRE_ENABLED (1 << 0)
-#define VTD_FLAG_IRQ_REMAP_PRE_ENABLED (1 << 1)
-
-struct pasid_entry;
-struct pasid_state_entry;
-struct page_req_dsc;
-
-struct intel_iommu {
- void __iomem *reg; /* Pointer to hardware regs, virtual addr */
- u64 reg_phys; /* physical address of hw register set */
- u64 reg_size; /* size of hw register set */
- u64 cap;
- u64 ecap;
- u32 gcmd; /* Holds TE, EAFL. Don't need SRTP, SFL, WBF */
- raw_spinlock_t register_lock; /* protect register handling */
- int seq_id; /* sequence id of the iommu */
- int agaw; /* agaw of this iommu */
- int msagaw; /* max sagaw of this iommu */
- unsigned int irq, pr_irq;
- u16 segment; /* PCI segment# */
- unsigned char name[13]; /* Device Name */
-
-#ifdef CONFIG_INTEL_IOMMU
- unsigned long *domain_ids; /* bitmap of domains */
- struct dmar_domain ***domains; /* ptr to domains */
- spinlock_t lock; /* protect context, domain ids */
- struct root_entry *root_entry; /* virtual address */
-
- struct iommu_flush flush;
-#endif
-#ifdef CONFIG_INTEL_IOMMU_SVM
- /* These are large and need to be contiguous, so we allocate just
- * one for now. We'll maybe want to rethink that if we truly give
- * devices away to userspace processes (e.g. for DPDK) and don't
- * want to trust that userspace will use *only* the PASID it was
- * told to. But while it's all driver-arbitrated, we're fine. */
- struct pasid_entry *pasid_table;
- struct pasid_state_entry *pasid_state_table;
- struct page_req_dsc *prq;
- unsigned char prq_name[16]; /* Name for PRQ interrupt */
- struct idr pasid_idr;
- u32 pasid_max;
-#endif
- struct q_inval *qi; /* Queued invalidation info */
- u32 *iommu_state; /* Store iommu states between suspend and resume.*/
-
-#ifdef CONFIG_IRQ_REMAP
- struct ir_table *ir_table; /* Interrupt remapping info */
- struct irq_domain *ir_domain;
- struct irq_domain *ir_msi_domain;
-#endif
- struct iommu_device iommu; /* IOMMU core code handle */
- int node;
- u32 flags; /* Software defined flags */
-};
-
-static inline void __iommu_flush_cache(
- struct intel_iommu *iommu, void *addr, int size)
-{
- if (!ecap_coherent(iommu->ecap))
- clflush_cache_range(addr, size);
-}
-
-extern struct dmar_drhd_unit * dmar_find_matched_drhd_unit(struct pci_dev *dev);
-extern int dmar_find_matched_atsr_unit(struct pci_dev *dev);
-
-extern int dmar_enable_qi(struct intel_iommu *iommu);
-extern void dmar_disable_qi(struct intel_iommu *iommu);
-extern int dmar_reenable_qi(struct intel_iommu *iommu);
-extern void qi_global_iec(struct intel_iommu *iommu);
-
-extern void qi_flush_context(struct intel_iommu *iommu, u16 did, u16 sid,
- u8 fm, u64 type);
-extern void qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr,
- unsigned int size_order, u64 type);
-extern void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 qdep,
- u64 addr, unsigned mask);
-
-extern int qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu);
-
-extern int dmar_ir_support(void);
-
-#ifdef CONFIG_INTEL_IOMMU_SVM
-extern int intel_svm_alloc_pasid_tables(struct intel_iommu *iommu);
-extern int intel_svm_free_pasid_tables(struct intel_iommu *iommu);
-extern int intel_svm_enable_prq(struct intel_iommu *iommu);
-extern int intel_svm_finish_prq(struct intel_iommu *iommu);
-
-struct svm_dev_ops;
-
-struct intel_svm_dev {
- struct list_head list;
- struct rcu_head rcu;
- struct device *dev;
- struct svm_dev_ops *ops;
- int users;
- u16 did;
- u16 dev_iotlb:1;
- u16 sid, qdep;
-};
-
-struct intel_svm {
- struct mmu_notifier notifier;
- struct mm_struct *mm;
- struct intel_iommu *iommu;
- int flags;
- int pasid;
- struct list_head devs;
-};
-
-extern int intel_iommu_enable_pasid(struct intel_iommu *iommu, struct intel_svm_dev *sdev);
-extern struct intel_iommu *intel_svm_device_to_iommu(struct device *dev);
-#endif
-
-extern const struct attribute_group *intel_iommu_groups[];
-
-#endif
diff --git a/include/linux/intel-ish-client-if.h b/include/linux/intel-ish-client-if.h
new file mode 100644
index 000000000000..2cd4f65aaa37
--- /dev/null
+++ b/include/linux/intel-ish-client-if.h
@@ -0,0 +1,126 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Intel ISH client Interface definitions
+ *
+ * Copyright (c) 2019, Intel Corporation.
+ */
+
+#ifndef _INTEL_ISH_CLIENT_IF_H_
+#define _INTEL_ISH_CLIENT_IF_H_
+
+#include <linux/device.h>
+#include <linux/mod_devicetable.h>
+
+struct ishtp_cl_device;
+struct ishtp_device;
+struct ishtp_cl;
+struct ishtp_fw_client;
+
+typedef __printf(2, 3) void (*ishtp_print_log)(struct ishtp_device *dev,
+ const char *format, ...);
+
+/* Client state */
+enum cl_state {
+ ISHTP_CL_INITIALIZING = 0,
+ ISHTP_CL_CONNECTING,
+ ISHTP_CL_CONNECTED,
+ ISHTP_CL_DISCONNECTING,
+ ISHTP_CL_DISCONNECTED
+};
+
+/**
+ * struct ishtp_cl_device - ISHTP device handle
+ * @driver: driver instance on a bus
+ * @name: Name of the device for probe
+ * @probe: driver callback for device probe
+ * @remove: driver callback on device removal
+ *
+ * Client drivers defines to get probed/removed for ISHTP client device.
+ */
+struct ishtp_cl_driver {
+ struct device_driver driver;
+ const char *name;
+ const struct ishtp_device_id *id;
+ int (*probe)(struct ishtp_cl_device *dev);
+ void (*remove)(struct ishtp_cl_device *dev);
+ int (*reset)(struct ishtp_cl_device *dev);
+ const struct dev_pm_ops *pm;
+};
+
+/**
+ * struct ishtp_msg_data - ISHTP message data struct
+ * @size: Size of data in the *data
+ * @data: Pointer to data
+ */
+struct ishtp_msg_data {
+ uint32_t size;
+ unsigned char *data;
+};
+
+/*
+ * struct ishtp_cl_rb - request block structure
+ * @list: Link to list members
+ * @cl: ISHTP client instance
+ * @buffer: message header
+ * @buf_idx: Index into buffer
+ * @read_time: unused at this time
+ */
+struct ishtp_cl_rb {
+ struct list_head list;
+ struct ishtp_cl *cl;
+ struct ishtp_msg_data buffer;
+ unsigned long buf_idx;
+ unsigned long read_time;
+};
+
+int ishtp_cl_driver_register(struct ishtp_cl_driver *driver,
+ struct module *owner);
+void ishtp_cl_driver_unregister(struct ishtp_cl_driver *driver);
+int ishtp_register_event_cb(struct ishtp_cl_device *device,
+ void (*read_cb)(struct ishtp_cl_device *));
+
+/* Get the device * from ishtp device instance */
+struct device *ishtp_device(struct ishtp_cl_device *cl_device);
+/* wait for IPC resume */
+bool ishtp_wait_resume(struct ishtp_device *dev);
+/* Trace interface for clients */
+ishtp_print_log ishtp_trace_callback(struct ishtp_cl_device *cl_device);
+/* Get device pointer of PCI device for DMA acces */
+struct device *ishtp_get_pci_device(struct ishtp_cl_device *cl_device);
+/* Get the ISHTP workqueue */
+struct workqueue_struct *ishtp_get_workqueue(struct ishtp_cl_device *cl_device);
+
+struct ishtp_cl *ishtp_cl_allocate(struct ishtp_cl_device *cl_device);
+void ishtp_cl_free(struct ishtp_cl *cl);
+int ishtp_cl_link(struct ishtp_cl *cl);
+void ishtp_cl_unlink(struct ishtp_cl *cl);
+int ishtp_cl_disconnect(struct ishtp_cl *cl);
+int ishtp_cl_connect(struct ishtp_cl *cl);
+int ishtp_cl_establish_connection(struct ishtp_cl *cl, const guid_t *uuid,
+ int tx_size, int rx_size, bool reset);
+void ishtp_cl_destroy_connection(struct ishtp_cl *cl, bool reset);
+int ishtp_cl_send(struct ishtp_cl *cl, uint8_t *buf, size_t length);
+int ishtp_cl_flush_queues(struct ishtp_cl *cl);
+int ishtp_cl_io_rb_recycle(struct ishtp_cl_rb *rb);
+struct ishtp_cl_rb *ishtp_cl_rx_get_rb(struct ishtp_cl *cl);
+void *ishtp_get_client_data(struct ishtp_cl *cl);
+void ishtp_set_client_data(struct ishtp_cl *cl, void *data);
+struct ishtp_device *ishtp_get_ishtp_device(struct ishtp_cl *cl);
+void ishtp_set_tx_ring_size(struct ishtp_cl *cl, int size);
+void ishtp_set_rx_ring_size(struct ishtp_cl *cl, int size);
+void ishtp_set_connection_state(struct ishtp_cl *cl, int state);
+int ishtp_get_connection_state(struct ishtp_cl *cl);
+void ishtp_cl_set_fw_client_id(struct ishtp_cl *cl, int fw_client_id);
+
+void ishtp_put_device(struct ishtp_cl_device *cl_dev);
+void ishtp_get_device(struct ishtp_cl_device *cl_dev);
+void ishtp_set_drvdata(struct ishtp_cl_device *cl_device, void *data);
+void *ishtp_get_drvdata(struct ishtp_cl_device *cl_device);
+struct ishtp_cl_device *ishtp_dev_to_cl_device(struct device *dev);
+int ishtp_register_event_cb(struct ishtp_cl_device *device,
+ void (*read_cb)(struct ishtp_cl_device *));
+struct ishtp_fw_client *ishtp_fw_cl_get_client(struct ishtp_device *dev,
+ const guid_t *uuid);
+int ishtp_get_fw_client_id(struct ishtp_fw_client *fw_client);
+int ish_hw_reset(struct ishtp_device *dev);
+#endif /* _INTEL_ISH_CLIENT_IF_H_ */
diff --git a/include/linux/intel-svm.h b/include/linux/intel-svm.h
deleted file mode 100644
index 99bc5b3ae26e..000000000000
--- a/include/linux/intel-svm.h
+++ /dev/null
@@ -1,141 +0,0 @@
-/*
- * Copyright © 2015 Intel Corporation.
- *
- * Authors: David Woodhouse <David.Woodhouse@intel.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- */
-
-#ifndef __INTEL_SVM_H__
-#define __INTEL_SVM_H__
-
-struct device;
-
-struct svm_dev_ops {
- void (*fault_cb)(struct device *dev, int pasid, u64 address,
- u32 private, int rwxp, int response);
-};
-
-/* Values for rxwp in fault_cb callback */
-#define SVM_REQ_READ (1<<3)
-#define SVM_REQ_WRITE (1<<2)
-#define SVM_REQ_EXEC (1<<1)
-#define SVM_REQ_PRIV (1<<0)
-
-
-/*
- * The SVM_FLAG_PRIVATE_PASID flag requests a PASID which is *not* the "main"
- * PASID for the current process. Even if a PASID already exists, a new one
- * will be allocated. And the PASID allocated with SVM_FLAG_PRIVATE_PASID
- * will not be given to subsequent callers. This facility allows a driver to
- * disambiguate between multiple device contexts which access the same MM,
- * if there is no other way to do so. It should be used sparingly, if at all.
- */
-#define SVM_FLAG_PRIVATE_PASID (1<<0)
-
-/*
- * The SVM_FLAG_SUPERVISOR_MODE flag requests a PASID which can be used only
- * for access to kernel addresses. No IOTLB flushes are automatically done
- * for kernel mappings; it is valid only for access to the kernel's static
- * 1:1 mapping of physical memory — not to vmalloc or even module mappings.
- * A future API addition may permit the use of such ranges, by means of an
- * explicit IOTLB flush call (akin to the DMA API's unmap method).
- *
- * It is unlikely that we will ever hook into flush_tlb_kernel_range() to
- * do such IOTLB flushes automatically.
- */
-#define SVM_FLAG_SUPERVISOR_MODE (1<<1)
-
-#ifdef CONFIG_INTEL_IOMMU_SVM
-
-/**
- * intel_svm_bind_mm() - Bind the current process to a PASID
- * @dev: Device to be granted acccess
- * @pasid: Address for allocated PASID
- * @flags: Flags. Later for requesting supervisor mode, etc.
- * @ops: Callbacks to device driver
- *
- * This function attempts to enable PASID support for the given device.
- * If the @pasid argument is non-%NULL, a PASID is allocated for access
- * to the MM of the current process.
- *
- * By using a %NULL value for the @pasid argument, this function can
- * be used to simply validate that PASID support is available for the
- * given device — i.e. that it is behind an IOMMU which has the
- * requisite support, and is enabled.
- *
- * Page faults are handled transparently by the IOMMU code, and there
- * should be no need for the device driver to be involved. If a page
- * fault cannot be handled (i.e. is an invalid address rather than
- * just needs paging in), then the page request will be completed by
- * the core IOMMU code with appropriate status, and the device itself
- * can then report the resulting fault to its driver via whatever
- * mechanism is appropriate.
- *
- * Multiple calls from the same process may result in the same PASID
- * being re-used. A reference count is kept.
- */
-extern int intel_svm_bind_mm(struct device *dev, int *pasid, int flags,
- struct svm_dev_ops *ops);
-
-/**
- * intel_svm_unbind_mm() - Unbind a specified PASID
- * @dev: Device for which PASID was allocated
- * @pasid: PASID value to be unbound
- *
- * This function allows a PASID to be retired when the device no
- * longer requires access to the address space of a given process.
- *
- * If the use count for the PASID in question reaches zero, the
- * PASID is revoked and may no longer be used by hardware.
- *
- * Device drivers are required to ensure that no access (including
- * page requests) is currently outstanding for the PASID in question,
- * before calling this function.
- */
-extern int intel_svm_unbind_mm(struct device *dev, int pasid);
-
-/**
- * intel_svm_is_pasid_valid() - check if pasid is valid
- * @dev: Device for which PASID was allocated
- * @pasid: PASID value to be checked
- *
- * This function checks if the specified pasid is still valid. A
- * valid pasid means the backing mm is still having a valid user.
- * For kernel callers init_mm is always valid. for other mm, if mm->mm_users
- * is non-zero, it is valid.
- *
- * returns -EINVAL if invalid pasid, 0 if pasid ref count is invalid
- * 1 if pasid is valid.
- */
-extern int intel_svm_is_pasid_valid(struct device *dev, int pasid);
-
-#else /* CONFIG_INTEL_IOMMU_SVM */
-
-static inline int intel_svm_bind_mm(struct device *dev, int *pasid,
- int flags, struct svm_dev_ops *ops)
-{
- return -ENOSYS;
-}
-
-static inline int intel_svm_unbind_mm(struct device *dev, int pasid)
-{
- BUG();
-}
-
-static int intel_svm_is_pasid_valid(struct device *dev, int pasid)
-{
- return -EINVAL;
-}
-#endif /* CONFIG_INTEL_IOMMU_SVM */
-
-#define intel_svm_available(dev) (!intel_svm_bind_mm((dev), NULL, 0, NULL))
-
-#endif /* __INTEL_SVM_H__ */
diff --git a/include/linux/intel_dg_nvm_aux.h b/include/linux/intel_dg_nvm_aux.h
new file mode 100644
index 000000000000..625d46a6b96e
--- /dev/null
+++ b/include/linux/intel_dg_nvm_aux.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright(c) 2019-2025, Intel Corporation. All rights reserved.
+ */
+
+#ifndef __INTEL_DG_NVM_AUX_H__
+#define __INTEL_DG_NVM_AUX_H__
+
+#include <linux/auxiliary_bus.h>
+#include <linux/container_of.h>
+#include <linux/ioport.h>
+#include <linux/types.h>
+
+#define INTEL_DG_NVM_REGIONS 13
+
+struct intel_dg_nvm_region {
+ const char *name;
+};
+
+struct intel_dg_nvm_dev {
+ struct auxiliary_device aux_dev;
+ bool writable_override;
+ bool non_posted_erase;
+ struct resource bar;
+ struct resource bar2;
+ const struct intel_dg_nvm_region *regions;
+};
+
+#define auxiliary_dev_to_intel_dg_nvm_dev(auxiliary_dev) \
+ container_of(auxiliary_dev, struct intel_dg_nvm_dev, aux_dev)
+
+#endif /* __INTEL_DG_NVM_AUX_H__ */
diff --git a/include/linux/intel_pmt_features.h b/include/linux/intel_pmt_features.h
new file mode 100644
index 000000000000..53573a4a49b7
--- /dev/null
+++ b/include/linux/intel_pmt_features.h
@@ -0,0 +1,157 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _FEATURES_H
+#define _FEATURES_H
+
+#include <linux/bits.h>
+#include <linux/types.h>
+
+/* Common masks */
+#define PMT_CAP_TELEM BIT(0)
+#define PMT_CAP_WATCHER BIT(1)
+#define PMT_CAP_CRASHLOG BIT(2)
+#define PMT_CAP_STREAMING BIT(3)
+#define PMT_CAP_THRESHOLD BIT(4)
+#define PMT_CAP_WINDOW BIT(5)
+#define PMT_CAP_CONFIG BIT(6)
+#define PMT_CAP_TRACING BIT(7)
+#define PMT_CAP_INBAND BIT(8)
+#define PMT_CAP_OOB BIT(9)
+#define PMT_CAP_SECURED_CHAN BIT(10)
+
+#define PMT_CAP_PMT_SP BIT(11)
+#define PMT_CAP_PMT_SP_POLICY GENMASK(17, 12)
+
+/* Per Core Performance Telemetry (PCPT) specific masks */
+#define PMT_CAP_PCPT_CORE_PERF BIT(18)
+#define PMT_CAP_PCPT_CORE_C0_RES BIT(19)
+#define PMT_CAP_PCPT_CORE_ACTIVITY BIT(20)
+#define PMT_CAP_PCPT_CACHE_PERF BIT(21)
+#define PMT_CAP_PCPT_QUALITY_TELEM BIT(22)
+
+/* Per Core Environmental Telemetry (PCET) specific masks */
+#define PMT_CAP_PCET_WORKPOINT_HIST BIT(18)
+#define PMT_CAP_PCET_CORE_CURR_TEMP BIT(19)
+#define PMT_CAP_PCET_CORE_INST_RES BIT(20)
+#define PMT_CAP_PCET_QUALITY_TELEM BIT(21) /* Same as PMT_CAP_PCPT */
+#define PMT_CAP_PCET_CORE_CDYN_LVL BIT(22)
+#define PMT_CAP_PCET_CORE_STRESS_LVL BIT(23)
+#define PMT_CAP_PCET_CORE_DAS BIT(24)
+#define PMT_CAP_PCET_FIVR_HEALTH BIT(25)
+#define PMT_CAP_PCET_ENERGY BIT(26)
+#define PMT_CAP_PCET_PEM_STATUS BIT(27)
+#define PMT_CAP_PCET_CORE_C_STATE BIT(28)
+
+/* Per RMID Performance Telemetry specific masks */
+#define PMT_CAP_RMID_CORES_PERF BIT(18)
+#define PMT_CAP_RMID_CACHE_PERF BIT(19)
+#define PMT_CAP_RMID_PERF_QUAL BIT(20)
+
+/* Accelerator Telemetry specific masks */
+#define PMT_CAP_ACCEL_CPM_TELEM BIT(18)
+#define PMT_CAP_ACCEL_TIP_TELEM BIT(19)
+
+/* Uncore Telemetry specific masks */
+#define PMT_CAP_UNCORE_IO_CA_TELEM BIT(18)
+#define PMT_CAP_UNCORE_RMID_TELEM BIT(19)
+#define PMT_CAP_UNCORE_D2D_ULA_TELEM BIT(20)
+#define PMT_CAP_UNCORE_PKGC_TELEM BIT(21)
+
+/* Crash Log specific masks */
+#define PMT_CAP_CRASHLOG_MAN_TRIG BIT(11)
+#define PMT_CAP_CRASHLOG_CORE BIT(12)
+#define PMT_CAP_CRASHLOG_UNCORE BIT(13)
+#define PMT_CAP_CRASHLOG_TOR BIT(14)
+#define PMT_CAP_CRASHLOG_S3M BIT(15)
+#define PMT_CAP_CRASHLOG_PERSISTENCY BIT(16)
+#define PMT_CAP_CRASHLOG_CLIP_GPIO BIT(17)
+#define PMT_CAP_CRASHLOG_PRE_RESET BIT(18)
+#define PMT_CAP_CRASHLOG_POST_RESET BIT(19)
+
+/* PeTe Log specific masks */
+#define PMT_CAP_PETE_MAN_TRIG BIT(11)
+#define PMT_CAP_PETE_ENCRYPTION BIT(12)
+#define PMT_CAP_PETE_PERSISTENCY BIT(13)
+#define PMT_CAP_PETE_REQ_TOKENS BIT(14)
+#define PMT_CAP_PETE_PROD_ENABLED BIT(15)
+#define PMT_CAP_PETE_DEBUG_ENABLED BIT(16)
+
+/* TPMI control specific masks */
+#define PMT_CAP_TPMI_MAILBOX BIT(11)
+#define PMT_CAP_TPMI_LOCK BIT(12)
+
+/* Tracing specific masks */
+#define PMT_CAP_TRACE_SRAR BIT(11)
+#define PMT_CAP_TRACE_CORRECTABLE BIT(12)
+#define PMT_CAP_TRACE_MCTP BIT(13)
+#define PMT_CAP_TRACE_MRT BIT(14)
+
+/* Per RMID Energy Telemetry specific masks */
+#define PMT_CAP_RMID_ENERGY BIT(18)
+#define PMT_CAP_RMID_ACTIVITY BIT(19)
+#define PMT_CAP_RMID_ENERGY_QUAL BIT(20)
+
+enum pmt_feature_id {
+ FEATURE_INVALID = 0x0,
+ FEATURE_PER_CORE_PERF_TELEM = 0x1,
+ FEATURE_PER_CORE_ENV_TELEM = 0x2,
+ FEATURE_PER_RMID_PERF_TELEM = 0x3,
+ FEATURE_ACCEL_TELEM = 0x4,
+ FEATURE_UNCORE_TELEM = 0x5,
+ FEATURE_CRASH_LOG = 0x6,
+ FEATURE_PETE_LOG = 0x7,
+ FEATURE_TPMI_CTRL = 0x8,
+ FEATURE_RESERVED = 0x9,
+ FEATURE_TRACING = 0xA,
+ FEATURE_PER_RMID_ENERGY_TELEM = 0xB,
+ FEATURE_MAX = 0xB,
+};
+
+enum feature_layout {
+ LAYOUT_RMID,
+ LAYOUT_WATCHER,
+ LAYOUT_COMMAND,
+ LAYOUT_CAPS_ONLY,
+};
+
+struct pmt_cap {
+ u32 mask;
+ const char *name;
+};
+
+extern const char * const pmt_feature_names[];
+extern enum feature_layout feature_layout[];
+extern struct pmt_cap pmt_cap_common[];
+extern struct pmt_cap pmt_cap_pcpt[];
+extern struct pmt_cap *pmt_caps_pcpt[];
+extern struct pmt_cap pmt_cap_pcet[];
+extern struct pmt_cap *pmt_caps_pcet[];
+extern struct pmt_cap pmt_cap_rmid_perf[];
+extern struct pmt_cap *pmt_caps_rmid_perf[];
+extern struct pmt_cap pmt_cap_accel[];
+extern struct pmt_cap *pmt_caps_accel[];
+extern struct pmt_cap pmt_cap_uncore[];
+extern struct pmt_cap *pmt_caps_uncore[];
+extern struct pmt_cap pmt_cap_crashlog[];
+extern struct pmt_cap *pmt_caps_crashlog[];
+extern struct pmt_cap pmt_cap_pete[];
+extern struct pmt_cap *pmt_caps_pete[];
+extern struct pmt_cap pmt_cap_tpmi[];
+extern struct pmt_cap *pmt_caps_tpmi[];
+extern struct pmt_cap pmt_cap_s3m[];
+extern struct pmt_cap *pmt_caps_s3m[];
+extern struct pmt_cap pmt_cap_tracing[];
+extern struct pmt_cap *pmt_caps_tracing[];
+extern struct pmt_cap pmt_cap_rmid_energy[];
+extern struct pmt_cap *pmt_caps_rmid_energy[];
+
+static inline bool pmt_feature_id_is_valid(enum pmt_feature_id id)
+{
+ if (id > FEATURE_MAX)
+ return false;
+
+ if (id == FEATURE_INVALID || id == FEATURE_RESERVED)
+ return false;
+
+ return true;
+}
+#endif
diff --git a/include/linux/intel_rapl.h b/include/linux/intel_rapl.h
new file mode 100644
index 000000000000..e9ade2ff4af6
--- /dev/null
+++ b/include/linux/intel_rapl.h
@@ -0,0 +1,223 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Data types and headers for RAPL support
+ *
+ * Copyright (C) 2019 Intel Corporation.
+ *
+ * Author: Zhang Rui <rui.zhang@intel.com>
+ */
+
+#ifndef __INTEL_RAPL_H__
+#define __INTEL_RAPL_H__
+
+#include <linux/types.h>
+#include <linux/powercap.h>
+#include <linux/cpuhotplug.h>
+
+enum rapl_if_type {
+ RAPL_IF_MSR, /* RAPL I/F using MSR registers */
+ RAPL_IF_MMIO, /* RAPL I/F using MMIO registers */
+ RAPL_IF_TPMI, /* RAPL I/F using TPMI registers */
+};
+
+enum rapl_domain_type {
+ RAPL_DOMAIN_PACKAGE, /* entire package/socket */
+ RAPL_DOMAIN_PP0, /* core power plane */
+ RAPL_DOMAIN_PP1, /* graphics uncore */
+ RAPL_DOMAIN_DRAM, /* DRAM control_type */
+ RAPL_DOMAIN_PLATFORM, /* PSys control_type */
+ RAPL_DOMAIN_MAX,
+};
+
+enum rapl_domain_reg_id {
+ RAPL_DOMAIN_REG_LIMIT,
+ RAPL_DOMAIN_REG_STATUS,
+ RAPL_DOMAIN_REG_PERF,
+ RAPL_DOMAIN_REG_POLICY,
+ RAPL_DOMAIN_REG_INFO,
+ RAPL_DOMAIN_REG_PL4,
+ RAPL_DOMAIN_REG_UNIT,
+ RAPL_DOMAIN_REG_PL2,
+ RAPL_DOMAIN_REG_MAX,
+};
+
+struct rapl_domain;
+
+enum rapl_primitives {
+ POWER_LIMIT1,
+ POWER_LIMIT2,
+ POWER_LIMIT4,
+ ENERGY_COUNTER,
+ FW_LOCK,
+ FW_HIGH_LOCK,
+ PL1_LOCK,
+ PL2_LOCK,
+ PL4_LOCK,
+
+ PL1_ENABLE, /* power limit 1, aka long term */
+ PL1_CLAMP, /* allow frequency to go below OS request */
+ PL2_ENABLE, /* power limit 2, aka short term, instantaneous */
+ PL2_CLAMP,
+ PL4_ENABLE, /* power limit 4, aka max peak power */
+
+ TIME_WINDOW1, /* long term */
+ TIME_WINDOW2, /* short term */
+ THERMAL_SPEC_POWER,
+ MAX_POWER,
+
+ MIN_POWER,
+ MAX_TIME_WINDOW,
+ THROTTLED_TIME,
+ PRIORITY_LEVEL,
+
+ PSYS_POWER_LIMIT1,
+ PSYS_POWER_LIMIT2,
+ PSYS_PL1_ENABLE,
+ PSYS_PL2_ENABLE,
+ PSYS_TIME_WINDOW1,
+ PSYS_TIME_WINDOW2,
+ /* below are not raw primitive data */
+ AVERAGE_POWER,
+ NR_RAPL_PRIMITIVES,
+};
+
+struct rapl_domain_data {
+ u64 primitives[NR_RAPL_PRIMITIVES];
+ unsigned long timestamp;
+};
+
+#define NR_POWER_LIMITS (POWER_LIMIT4 + 1)
+
+struct rapl_power_limit {
+ struct powercap_zone_constraint *constraint;
+ struct rapl_domain *domain;
+ const char *name;
+ bool locked;
+ u64 last_power_limit;
+};
+
+struct rapl_package;
+
+#define RAPL_DOMAIN_NAME_LENGTH 16
+
+union rapl_reg {
+ void __iomem *mmio;
+ u32 msr;
+ u64 val;
+};
+
+struct rapl_domain {
+ char name[RAPL_DOMAIN_NAME_LENGTH];
+ enum rapl_domain_type id;
+ union rapl_reg regs[RAPL_DOMAIN_REG_MAX];
+ struct powercap_zone power_zone;
+ struct rapl_domain_data rdd;
+ struct rapl_power_limit rpl[NR_POWER_LIMITS];
+ u64 attr_map; /* track capabilities */
+ unsigned int state;
+ unsigned int power_unit;
+ unsigned int energy_unit;
+ unsigned int time_unit;
+ struct rapl_package *rp;
+};
+
+struct reg_action {
+ union rapl_reg reg;
+ u64 mask;
+ u64 value;
+ int err;
+};
+
+/**
+ * struct rapl_if_priv: private data for different RAPL interfaces
+ * @control_type: Each RAPL interface must have its own powercap
+ * control type.
+ * @platform_rapl_domain: Optional. Some RAPL interface may have platform
+ * level RAPL control.
+ * @pcap_rapl_online: CPU hotplug state for each RAPL interface.
+ * @reg_unit: Register for getting energy/power/time unit.
+ * @regs: Register sets for different RAPL Domains.
+ * @limits: Number of power limits supported by each domain.
+ * @read_raw: Callback for reading RAPL interface specific
+ * registers.
+ * @write_raw: Callback for writing RAPL interface specific
+ * registers.
+ * @defaults: internal pointer to interface default settings
+ * @rpi: internal pointer to interface primitive info
+ */
+struct rapl_if_priv {
+ enum rapl_if_type type;
+ struct powercap_control_type *control_type;
+ enum cpuhp_state pcap_rapl_online;
+ union rapl_reg reg_unit;
+ union rapl_reg regs[RAPL_DOMAIN_MAX][RAPL_DOMAIN_REG_MAX];
+ int limits[RAPL_DOMAIN_MAX];
+ int (*read_raw)(int id, struct reg_action *ra, bool atomic);
+ int (*write_raw)(int id, struct reg_action *ra);
+ void *defaults;
+ void *rpi;
+};
+
+#ifdef CONFIG_PERF_EVENTS
+/**
+ * struct rapl_package_pmu_data: Per package data for PMU support
+ * @scale: Scale of 2^-32 Joules for each energy counter increase.
+ * @lock: Lock to protect n_active and active_list.
+ * @n_active: Number of active events.
+ * @active_list: List of active events.
+ * @timer_interval: Maximum timer expiration time before counter overflow.
+ * @hrtimer: Periodically update the counter to prevent overflow.
+ */
+struct rapl_package_pmu_data {
+ u64 scale[RAPL_DOMAIN_MAX];
+ raw_spinlock_t lock;
+ int n_active;
+ struct list_head active_list;
+ ktime_t timer_interval;
+ struct hrtimer hrtimer;
+};
+#endif
+
+/* maximum rapl package domain name: package-%d-die-%d */
+#define PACKAGE_DOMAIN_NAME_LENGTH 30
+
+struct rapl_package {
+ unsigned int id; /* logical die id, equals physical 1-die systems */
+ unsigned int nr_domains;
+ unsigned long domain_map; /* bit map of active domains */
+ struct rapl_domain *domains; /* array of domains, sized at runtime */
+ struct powercap_zone *power_zone; /* keep track of parent zone */
+ unsigned long power_limit_irq; /* keep track of package power limit
+ * notify interrupt enable status.
+ */
+ struct list_head plist;
+ int lead_cpu; /* one active cpu per package for access */
+ /* Track active cpus */
+ struct cpumask cpumask;
+ char name[PACKAGE_DOMAIN_NAME_LENGTH];
+ struct rapl_if_priv *priv;
+#ifdef CONFIG_PERF_EVENTS
+ bool has_pmu;
+ struct rapl_package_pmu_data pmu_data;
+#endif
+};
+
+struct rapl_package *rapl_find_package_domain_cpuslocked(int id, struct rapl_if_priv *priv,
+ bool id_is_cpu);
+struct rapl_package *rapl_add_package_cpuslocked(int id, struct rapl_if_priv *priv,
+ bool id_is_cpu);
+void rapl_remove_package_cpuslocked(struct rapl_package *rp);
+
+struct rapl_package *rapl_find_package_domain(int id, struct rapl_if_priv *priv, bool id_is_cpu);
+struct rapl_package *rapl_add_package(int id, struct rapl_if_priv *priv, bool id_is_cpu);
+void rapl_remove_package(struct rapl_package *rp);
+
+#ifdef CONFIG_PERF_EVENTS
+int rapl_package_add_pmu(struct rapl_package *rp);
+void rapl_package_remove_pmu(struct rapl_package *rp);
+#else
+static inline int rapl_package_add_pmu(struct rapl_package *rp) { return 0; }
+static inline void rapl_package_remove_pmu(struct rapl_package *rp) { }
+#endif
+
+#endif /* __INTEL_RAPL_H__ */
diff --git a/include/linux/intel_tcc.h b/include/linux/intel_tcc.h
new file mode 100644
index 000000000000..fa788817acfc
--- /dev/null
+++ b/include/linux/intel_tcc.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * header for Intel TCC (thermal control circuitry) library
+ *
+ * Copyright (C) 2022 Intel Corporation.
+ */
+
+#ifndef __INTEL_TCC_H__
+#define __INTEL_TCC_H__
+
+#include <linux/types.h>
+
+int intel_tcc_get_tjmax(int cpu);
+int intel_tcc_get_offset(int cpu);
+int intel_tcc_set_offset(int cpu, int offset);
+int intel_tcc_get_temp(int cpu, int *temp, bool pkg);
+u32 intel_tcc_get_offset_mask(void);
+
+#endif /* __INTEL_TCC_H__ */
diff --git a/include/linux/intel_th.h b/include/linux/intel_th.h
new file mode 100644
index 000000000000..9b7f4c22499c
--- /dev/null
+++ b/include/linux/intel_th.h
@@ -0,0 +1,79 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Intel(R) Trace Hub data structures for implementing buffer sinks.
+ *
+ * Copyright (C) 2019 Intel Corporation.
+ */
+
+#ifndef _INTEL_TH_H_
+#define _INTEL_TH_H_
+
+#include <linux/scatterlist.h>
+
+/* MSC operating modes (MSC_MODE) */
+enum {
+ MSC_MODE_SINGLE = 0,
+ MSC_MODE_MULTI,
+ MSC_MODE_EXI,
+ MSC_MODE_DEBUG,
+};
+
+struct msu_buffer {
+ const char *name;
+ /*
+ * ->assign() called when buffer 'mode' is set to this driver
+ * (aka mode_store())
+ * @device: struct device * of the msc
+ * @mode: allows the driver to set HW mode (see the enum above)
+ * Returns: a pointer to a private structure associated with this
+ * msc or NULL in case of error. This private structure
+ * will then be passed into all other callbacks.
+ */
+ void *(*assign)(struct device *dev, int *mode);
+ /* ->unassign(): some other mode is selected, clean up */
+ void (*unassign)(void *priv);
+ /*
+ * ->alloc_window(): allocate memory for the window of a given
+ * size
+ * @sgt: pointer to sg_table, can be overridden by the buffer
+ * driver, or kept intact
+ * Returns: number of sg table entries <= number of pages;
+ * 0 is treated as an allocation failure.
+ */
+ int (*alloc_window)(void *priv, struct sg_table **sgt,
+ size_t size);
+ void (*free_window)(void *priv, struct sg_table *sgt);
+ /* ->activate(): trace has started */
+ void (*activate)(void *priv);
+ /* ->deactivate(): trace is about to stop */
+ void (*deactivate)(void *priv);
+ /*
+ * ->ready(): window @sgt is filled up to the last block OR
+ * tracing is stopped by the user; this window contains
+ * @bytes data. The window in question transitions into
+ * the "LOCKED" state, indicating that it can't be used
+ * by hardware. To clear this state and make the window
+ * available to the hardware again, call
+ * intel_th_msc_window_unlock().
+ */
+ int (*ready)(void *priv, struct sg_table *sgt, size_t bytes);
+};
+
+int intel_th_msu_buffer_register(const struct msu_buffer *mbuf,
+ struct module *owner);
+void intel_th_msu_buffer_unregister(const struct msu_buffer *mbuf);
+void intel_th_msc_window_unlock(struct device *dev, struct sg_table *sgt);
+
+#define module_intel_th_msu_buffer(__buffer) \
+static int __init __buffer##_init(void) \
+{ \
+ return intel_th_msu_buffer_register(&(__buffer), THIS_MODULE); \
+} \
+module_init(__buffer##_init); \
+static void __exit __buffer##_exit(void) \
+{ \
+ intel_th_msu_buffer_unregister(&(__buffer)); \
+} \
+module_exit(__buffer##_exit);
+
+#endif /* _INTEL_TH_H_ */
diff --git a/include/linux/intel_tpmi.h b/include/linux/intel_tpmi.h
new file mode 100644
index 000000000000..94c06bf214fb
--- /dev/null
+++ b/include/linux/intel_tpmi.h
@@ -0,0 +1,37 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * intel_tpmi.h: Intel TPMI core external interface
+ */
+
+#ifndef _INTEL_TPMI_H_
+#define _INTEL_TPMI_H_
+
+#include <linux/bitfield.h>
+
+struct oobmsm_plat_info;
+
+#define TPMI_VERSION_INVALID 0xff
+#define TPMI_MINOR_VERSION(val) FIELD_GET(GENMASK(4, 0), val)
+#define TPMI_MAJOR_VERSION(val) FIELD_GET(GENMASK(7, 5), val)
+
+/*
+ * List of supported TMPI IDs.
+ * Some TMPI IDs are not used by Linux, so the numbers are not consecutive.
+ */
+enum intel_tpmi_id {
+ TPMI_ID_RAPL = 0, /* Running Average Power Limit */
+ TPMI_ID_PEM = 1, /* Power and Perf excursion Monitor */
+ TPMI_ID_UNCORE = 2, /* Uncore Frequency Scaling */
+ TPMI_ID_SST = 5, /* Speed Select Technology */
+ TPMI_ID_PLR = 0xc, /* Performance Limit Reasons */
+ TPMI_CONTROL_ID = 0x80, /* Special ID for getting feature status */
+ TPMI_INFO_ID = 0x81, /* Special ID for PCI BDF and Package ID information */
+};
+
+struct oobmsm_plat_info *tpmi_get_platform_data(struct auxiliary_device *auxdev);
+struct resource *tpmi_get_resource_at_index(struct auxiliary_device *auxdev, int index);
+int tpmi_get_resource_count(struct auxiliary_device *auxdev);
+int tpmi_get_feature_status(struct auxiliary_device *auxdev, int feature_id, bool *read_blocked,
+ bool *write_blocked);
+struct dentry *tpmi_get_debugfs_dir(struct auxiliary_device *auxdev);
+#endif
diff --git a/include/linux/intel_vsec.h b/include/linux/intel_vsec.h
new file mode 100644
index 000000000000..53f6fe88e369
--- /dev/null
+++ b/include/linux/intel_vsec.h
@@ -0,0 +1,239 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _INTEL_VSEC_H
+#define _INTEL_VSEC_H
+
+#include <linux/auxiliary_bus.h>
+#include <linux/bits.h>
+#include <linux/err.h>
+#include <linux/intel_pmt_features.h>
+
+/*
+ * VSEC_CAP_UNUSED is reserved. It exists to prevent zero initialized
+ * intel_vsec devices from being automatically set to a known
+ * capability with ID 0
+ */
+#define VSEC_CAP_UNUSED BIT(0)
+#define VSEC_CAP_TELEMETRY BIT(1)
+#define VSEC_CAP_WATCHER BIT(2)
+#define VSEC_CAP_CRASHLOG BIT(3)
+#define VSEC_CAP_SDSI BIT(4)
+#define VSEC_CAP_TPMI BIT(5)
+#define VSEC_CAP_DISCOVERY BIT(6)
+#define VSEC_FEATURE_COUNT 7
+
+/* Intel DVSEC offsets */
+#define INTEL_DVSEC_ENTRIES 0xA
+#define INTEL_DVSEC_SIZE 0xB
+#define INTEL_DVSEC_TABLE 0xC
+#define INTEL_DVSEC_TABLE_BAR(x) ((x) & GENMASK(2, 0))
+#define INTEL_DVSEC_TABLE_OFFSET(x) ((x) & GENMASK(31, 3))
+#define TABLE_OFFSET_SHIFT 3
+
+struct pci_dev;
+struct resource;
+
+enum intel_vsec_id {
+ VSEC_ID_TELEMETRY = 2,
+ VSEC_ID_WATCHER = 3,
+ VSEC_ID_CRASHLOG = 4,
+ VSEC_ID_DISCOVERY = 12,
+ VSEC_ID_SDSI = 65,
+ VSEC_ID_TPMI = 66,
+};
+
+/**
+ * struct intel_vsec_header - Common fields of Intel VSEC and DVSEC registers.
+ * @rev: Revision ID of the VSEC/DVSEC register space
+ * @length: Length of the VSEC/DVSEC register space
+ * @id: ID of the feature
+ * @num_entries: Number of instances of the feature
+ * @entry_size: Size of the discovery table for each feature
+ * @tbir: BAR containing the discovery tables
+ * @offset: BAR offset of start of the first discovery table
+ */
+struct intel_vsec_header {
+ u8 rev;
+ u16 length;
+ u16 id;
+ u8 num_entries;
+ u8 entry_size;
+ u8 tbir;
+ u32 offset;
+};
+
+enum intel_vsec_quirks {
+ /* Watcher feature not supported */
+ VSEC_QUIRK_NO_WATCHER = BIT(0),
+
+ /* Crashlog feature not supported */
+ VSEC_QUIRK_NO_CRASHLOG = BIT(1),
+
+ /* Use shift instead of mask to read discovery table offset */
+ VSEC_QUIRK_TABLE_SHIFT = BIT(2),
+
+ /* DVSEC not present (provided in driver data) */
+ VSEC_QUIRK_NO_DVSEC = BIT(3),
+
+ /* Platforms requiring quirk in the auxiliary driver */
+ VSEC_QUIRK_EARLY_HW = BIT(4),
+};
+
+/**
+ * struct pmt_callbacks - Callback infrastructure for PMT devices
+ * ->read_telem() when specified, called by client driver to access PMT data (instead
+ * of direct copy).
+ * @pdev: PCI device reference for the callback's use
+ * @guid: ID of data to acccss
+ * @data: buffer for the data to be copied
+ * @off: offset into the requested buffer
+ * @count: size of buffer
+ */
+struct pmt_callbacks {
+ int (*read_telem)(struct pci_dev *pdev, u32 guid, u64 *data, loff_t off, u32 count);
+};
+
+struct vsec_feature_dependency {
+ unsigned long feature;
+ unsigned long supplier_bitmap;
+};
+
+/**
+ * struct intel_vsec_platform_info - Platform specific data
+ * @parent: parent device in the auxbus chain
+ * @headers: list of headers to define the PMT client devices to create
+ * @deps: array of feature dependencies
+ * @priv_data: private data, usable by parent devices, currently a callback
+ * @caps: bitmask of PMT capabilities for the given headers
+ * @quirks: bitmask of VSEC device quirks
+ * @base_addr: allow a base address to be specified (rather than derived)
+ * @num_deps: Count feature dependencies
+ */
+struct intel_vsec_platform_info {
+ struct device *parent;
+ struct intel_vsec_header **headers;
+ const struct vsec_feature_dependency *deps;
+ void *priv_data;
+ unsigned long caps;
+ unsigned long quirks;
+ u64 base_addr;
+ int num_deps;
+};
+
+/**
+ * struct intel_sec_device - Auxbus specific device information
+ * @auxdev: auxbus device struct for auxbus access
+ * @pcidev: pci device associated with the device
+ * @resource: any resources shared by the parent
+ * @ida: id reference
+ * @num_resources: number of resources
+ * @id: xarray id
+ * @priv_data: any private data needed
+ * @quirks: specified quirks
+ * @base_addr: base address of entries (if specified)
+ * @cap_id: the enumerated id of the vsec feature
+ */
+struct intel_vsec_device {
+ struct auxiliary_device auxdev;
+ struct pci_dev *pcidev;
+ struct resource *resource;
+ struct ida *ida;
+ int num_resources;
+ int id; /* xa */
+ void *priv_data;
+ size_t priv_data_size;
+ unsigned long quirks;
+ u64 base_addr;
+ unsigned long cap_id;
+};
+
+/**
+ * struct oobmsm_plat_info - Platform information for a device instance
+ * @cdie_mask: Mask of all compute dies in the partition
+ * @package_id: CPU Package id
+ * @partition: Package partition id when multiple VSEC PCI devices per package
+ * @segment: PCI segment ID
+ * @bus_number: PCI bus number
+ * @device_number: PCI device number
+ * @function_number: PCI function number
+ *
+ * Structure to store platform data for a OOBMSM device instance.
+ */
+struct oobmsm_plat_info {
+ u16 cdie_mask;
+ u8 package_id;
+ u8 partition;
+ u8 segment;
+ u8 bus_number;
+ u8 device_number;
+ u8 function_number;
+};
+
+struct telemetry_region {
+ struct oobmsm_plat_info plat_info;
+ void __iomem *addr;
+ size_t size;
+ u32 guid;
+ u32 num_rmids;
+};
+
+struct pmt_feature_group {
+ enum pmt_feature_id id;
+ int count;
+ struct kref kref;
+ struct telemetry_region regions[];
+};
+
+int intel_vsec_add_aux(struct pci_dev *pdev, struct device *parent,
+ struct intel_vsec_device *intel_vsec_dev,
+ const char *name);
+
+static inline struct intel_vsec_device *dev_to_ivdev(struct device *dev)
+{
+ return container_of(dev, struct intel_vsec_device, auxdev.dev);
+}
+
+static inline struct intel_vsec_device *auxdev_to_ivdev(struct auxiliary_device *auxdev)
+{
+ return container_of(auxdev, struct intel_vsec_device, auxdev);
+}
+
+#if IS_ENABLED(CONFIG_INTEL_VSEC)
+int intel_vsec_register(struct pci_dev *pdev,
+ struct intel_vsec_platform_info *info);
+int intel_vsec_set_mapping(struct oobmsm_plat_info *plat_info,
+ struct intel_vsec_device *vsec_dev);
+struct oobmsm_plat_info *intel_vsec_get_mapping(struct pci_dev *pdev);
+#else
+static inline int intel_vsec_register(struct pci_dev *pdev,
+ struct intel_vsec_platform_info *info)
+{
+ return -ENODEV;
+}
+static inline int intel_vsec_set_mapping(struct oobmsm_plat_info *plat_info,
+ struct intel_vsec_device *vsec_dev)
+{
+ return -ENODEV;
+}
+static inline struct oobmsm_plat_info *intel_vsec_get_mapping(struct pci_dev *pdev)
+{
+ return ERR_PTR(-ENODEV);
+}
+#endif
+
+#if IS_ENABLED(CONFIG_INTEL_PMT_TELEMETRY)
+struct pmt_feature_group *
+intel_pmt_get_regions_by_feature(enum pmt_feature_id id);
+
+void intel_pmt_put_feature_group(struct pmt_feature_group *feature_group);
+#else
+static inline struct pmt_feature_group *
+intel_pmt_get_regions_by_feature(enum pmt_feature_id id)
+{
+ return ERR_PTR(-ENODEV);
+}
+
+static inline void
+intel_pmt_put_feature_group(struct pmt_feature_group *feature_group) {}
+#endif
+
+#endif
diff --git a/include/linux/interconnect-clk.h b/include/linux/interconnect-clk.h
new file mode 100644
index 000000000000..9bcee3e9c56c
--- /dev/null
+++ b/include/linux/interconnect-clk.h
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2023, Linaro Ltd.
+ */
+
+#ifndef __LINUX_INTERCONNECT_CLK_H
+#define __LINUX_INTERCONNECT_CLK_H
+
+struct device;
+
+struct icc_clk_data {
+ struct clk *clk;
+ const char *name;
+ unsigned int master_id;
+ unsigned int slave_id;
+};
+
+struct icc_provider *icc_clk_register(struct device *dev,
+ unsigned int first_id,
+ unsigned int num_clocks,
+ const struct icc_clk_data *data);
+int devm_icc_clk_register(struct device *dev, unsigned int first_id,
+ unsigned int num_clocks, const struct icc_clk_data *data);
+void icc_clk_unregister(struct icc_provider *provider);
+
+#endif
diff --git a/include/linux/interconnect-provider.h b/include/linux/interconnect-provider.h
new file mode 100644
index 000000000000..8a2f652a05ff
--- /dev/null
+++ b/include/linux/interconnect-provider.h
@@ -0,0 +1,201 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2018, Linaro Ltd.
+ * Author: Georgi Djakov <georgi.djakov@linaro.org>
+ */
+
+#ifndef __LINUX_INTERCONNECT_PROVIDER_H
+#define __LINUX_INTERCONNECT_PROVIDER_H
+
+#include <linux/interconnect.h>
+
+#define icc_units_to_bps(bw) ((bw) * 1000ULL)
+
+struct icc_node;
+struct of_phandle_args;
+
+/**
+ * struct icc_node_data - icc node data
+ *
+ * @node: icc node
+ * @tag: tag
+ */
+struct icc_node_data {
+ struct icc_node *node;
+ u32 tag;
+};
+
+/**
+ * struct icc_onecell_data - driver data for onecell interconnect providers
+ *
+ * @num_nodes: number of nodes in this device
+ * @nodes: array of pointers to the nodes in this device
+ */
+struct icc_onecell_data {
+ unsigned int num_nodes;
+ struct icc_node *nodes[] __counted_by(num_nodes);
+};
+
+struct icc_node *of_icc_xlate_onecell(const struct of_phandle_args *spec,
+ void *data);
+
+/**
+ * struct icc_provider - interconnect provider (controller) entity that might
+ * provide multiple interconnect controls
+ *
+ * @provider_list: list of the registered interconnect providers
+ * @nodes: internal list of the interconnect provider nodes
+ * @set: pointer to device specific set operation function
+ * @aggregate: pointer to device specific aggregate operation function
+ * @pre_aggregate: pointer to device specific function that is called
+ * before the aggregation begins (optional)
+ * @get_bw: pointer to device specific function to get current bandwidth
+ * @xlate: provider-specific callback for mapping nodes from phandle arguments
+ * @xlate_extended: vendor-specific callback for mapping node data from phandle arguments
+ * @dev: the device this interconnect provider belongs to
+ * @users: count of active users
+ * @inter_set: whether inter-provider pairs will be configured with @set
+ * @data: pointer to private data
+ */
+struct icc_provider {
+ struct list_head provider_list;
+ struct list_head nodes;
+ int (*set)(struct icc_node *src, struct icc_node *dst);
+ int (*aggregate)(struct icc_node *node, u32 tag, u32 avg_bw,
+ u32 peak_bw, u32 *agg_avg, u32 *agg_peak);
+ void (*pre_aggregate)(struct icc_node *node);
+ int (*get_bw)(struct icc_node *node, u32 *avg, u32 *peak);
+ struct icc_node* (*xlate)(const struct of_phandle_args *spec, void *data);
+ struct icc_node_data* (*xlate_extended)(const struct of_phandle_args *spec,
+ void *data);
+ struct device *dev;
+ int users;
+ bool inter_set;
+ void *data;
+};
+
+/**
+ * struct icc_node - entity that is part of the interconnect topology
+ *
+ * @id: platform specific node id
+ * @name: node name used in debugfs
+ * @links: a list of targets pointing to where we can go next when traversing
+ * @num_links: number of links to other interconnect nodes
+ * @provider: points to the interconnect provider of this node
+ * @node_list: the list entry in the parent provider's "nodes" list
+ * @search_list: list used when walking the nodes graph
+ * @reverse: pointer to previous node when walking the nodes graph
+ * @is_traversed: flag that is used when walking the nodes graph
+ * @req_list: a list of QoS constraint requests associated with this node
+ * @avg_bw: aggregated value of average bandwidth requests from all consumers
+ * @peak_bw: aggregated value of peak bandwidth requests from all consumers
+ * @init_avg: average bandwidth value that is read from the hardware during init
+ * @init_peak: peak bandwidth value that is read from the hardware during init
+ * @data: pointer to private data
+ */
+struct icc_node {
+ int id;
+ const char *name;
+ struct icc_node **links;
+ size_t num_links;
+
+ struct icc_provider *provider;
+ struct list_head node_list;
+ struct list_head search_list;
+ struct icc_node *reverse;
+ u8 is_traversed:1;
+ struct hlist_head req_list;
+ u32 avg_bw;
+ u32 peak_bw;
+ u32 init_avg;
+ u32 init_peak;
+ void *data;
+};
+
+#if IS_ENABLED(CONFIG_INTERCONNECT)
+
+int icc_std_aggregate(struct icc_node *node, u32 tag, u32 avg_bw,
+ u32 peak_bw, u32 *agg_avg, u32 *agg_peak);
+struct icc_node *icc_node_create_dyn(void);
+struct icc_node *icc_node_create(int id);
+void icc_node_destroy(int id);
+int icc_node_set_name(struct icc_node *node, const struct icc_provider *provider, const char *name);
+int icc_link_nodes(struct icc_node *src_node, struct icc_node **dst_node);
+int icc_link_create(struct icc_node *node, const int dst_id);
+void icc_node_add(struct icc_node *node, struct icc_provider *provider);
+void icc_node_del(struct icc_node *node);
+int icc_nodes_remove(struct icc_provider *provider);
+void icc_provider_init(struct icc_provider *provider);
+int icc_provider_register(struct icc_provider *provider);
+void icc_provider_deregister(struct icc_provider *provider);
+struct icc_node_data *of_icc_get_from_provider(const struct of_phandle_args *spec);
+void icc_sync_state(struct device *dev);
+
+#else
+
+static inline int icc_std_aggregate(struct icc_node *node, u32 tag, u32 avg_bw,
+ u32 peak_bw, u32 *agg_avg, u32 *agg_peak)
+{
+ return -ENOTSUPP;
+}
+
+static inline struct icc_node *icc_node_create_dyn(void)
+{
+ return ERR_PTR(-EOPNOTSUPP);
+}
+
+static inline struct icc_node *icc_node_create(int id)
+{
+ return ERR_PTR(-ENOTSUPP);
+}
+
+static inline void icc_node_destroy(int id)
+{
+}
+
+static inline int icc_node_set_name(struct icc_node *node, const struct icc_provider *provider,
+ const char *name)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int icc_link_nodes(struct icc_node *src_node, struct icc_node **dst_node)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int icc_link_create(struct icc_node *node, const int dst_id)
+{
+ return -ENOTSUPP;
+}
+
+static inline void icc_node_add(struct icc_node *node, struct icc_provider *provider)
+{
+}
+
+static inline void icc_node_del(struct icc_node *node)
+{
+}
+
+static inline int icc_nodes_remove(struct icc_provider *provider)
+{
+ return -ENOTSUPP;
+}
+
+static inline void icc_provider_init(struct icc_provider *provider) { }
+
+static inline int icc_provider_register(struct icc_provider *provider)
+{
+ return -ENOTSUPP;
+}
+
+static inline void icc_provider_deregister(struct icc_provider *provider) { }
+
+static inline struct icc_node_data *of_icc_get_from_provider(const struct of_phandle_args *spec)
+{
+ return ERR_PTR(-ENOTSUPP);
+}
+
+#endif /* CONFIG_INTERCONNECT */
+
+#endif /* __LINUX_INTERCONNECT_PROVIDER_H */
diff --git a/include/linux/interconnect.h b/include/linux/interconnect.h
new file mode 100644
index 000000000000..4b12821528a6
--- /dev/null
+++ b/include/linux/interconnect.h
@@ -0,0 +1,141 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2018-2019, Linaro Ltd.
+ * Author: Georgi Djakov <georgi.djakov@linaro.org>
+ */
+
+#ifndef __LINUX_INTERCONNECT_H
+#define __LINUX_INTERCONNECT_H
+
+#include <linux/mutex.h>
+#include <linux/types.h>
+
+/* macros for converting to icc units */
+#define Bps_to_icc(x) ((x) / 1000)
+#define kBps_to_icc(x) (x)
+#define MBps_to_icc(x) ((x) * 1000)
+#define GBps_to_icc(x) ((x) * 1000 * 1000)
+#define bps_to_icc(x) (1)
+#define kbps_to_icc(x) (((x) + 7) / 8)
+#define Mbps_to_icc(x) ((x) * 1000 / 8)
+#define Gbps_to_icc(x) ((x) * 1000 * 1000 / 8)
+
+/* macro to indicate dynamic id allocation */
+#define ICC_ALLOC_DYN_ID -1
+
+struct icc_path;
+struct device;
+
+/**
+ * struct icc_bulk_data - Data used for bulk icc operations.
+ *
+ * @path: reference to the interconnect path (internal use)
+ * @name: the name from the "interconnect-names" DT property
+ * @avg_bw: average bandwidth in icc units
+ * @peak_bw: peak bandwidth in icc units
+ */
+struct icc_bulk_data {
+ struct icc_path *path;
+ const char *name;
+ u32 avg_bw;
+ u32 peak_bw;
+};
+
+#if IS_ENABLED(CONFIG_INTERCONNECT)
+
+struct icc_path *of_icc_get(struct device *dev, const char *name);
+struct icc_path *devm_of_icc_get(struct device *dev, const char *name);
+int devm_of_icc_bulk_get(struct device *dev, int num_paths, struct icc_bulk_data *paths);
+struct icc_path *of_icc_get_by_index(struct device *dev, int idx);
+void icc_put(struct icc_path *path);
+int icc_enable(struct icc_path *path);
+int icc_disable(struct icc_path *path);
+int icc_set_bw(struct icc_path *path, u32 avg_bw, u32 peak_bw);
+void icc_set_tag(struct icc_path *path, u32 tag);
+const char *icc_get_name(struct icc_path *path);
+int __must_check of_icc_bulk_get(struct device *dev, int num_paths,
+ struct icc_bulk_data *paths);
+void icc_bulk_put(int num_paths, struct icc_bulk_data *paths);
+int icc_bulk_set_bw(int num_paths, const struct icc_bulk_data *paths);
+int icc_bulk_enable(int num_paths, const struct icc_bulk_data *paths);
+void icc_bulk_disable(int num_paths, const struct icc_bulk_data *paths);
+
+#else
+
+static inline struct icc_path *of_icc_get(struct device *dev,
+ const char *name)
+{
+ return NULL;
+}
+
+static inline struct icc_path *devm_of_icc_get(struct device *dev,
+ const char *name)
+{
+ return NULL;
+}
+
+static inline struct icc_path *of_icc_get_by_index(struct device *dev, int idx)
+{
+ return NULL;
+}
+
+static inline void icc_put(struct icc_path *path)
+{
+}
+
+static inline int icc_enable(struct icc_path *path)
+{
+ return 0;
+}
+
+static inline int icc_disable(struct icc_path *path)
+{
+ return 0;
+}
+
+static inline int icc_set_bw(struct icc_path *path, u32 avg_bw, u32 peak_bw)
+{
+ return 0;
+}
+
+static inline void icc_set_tag(struct icc_path *path, u32 tag)
+{
+}
+
+static inline const char *icc_get_name(struct icc_path *path)
+{
+ return NULL;
+}
+
+static inline int of_icc_bulk_get(struct device *dev, int num_paths, struct icc_bulk_data *paths)
+{
+ return 0;
+}
+
+static inline int devm_of_icc_bulk_get(struct device *dev, int num_paths,
+ struct icc_bulk_data *paths)
+{
+ return 0;
+}
+
+static inline void icc_bulk_put(int num_paths, struct icc_bulk_data *paths)
+{
+}
+
+static inline int icc_bulk_set_bw(int num_paths, const struct icc_bulk_data *paths)
+{
+ return 0;
+}
+
+static inline int icc_bulk_enable(int num_paths, const struct icc_bulk_data *paths)
+{
+ return 0;
+}
+
+static inline void icc_bulk_disable(int num_paths, const struct icc_bulk_data *paths)
+{
+}
+
+#endif /* CONFIG_INTERCONNECT */
+
+#endif /* __LINUX_INTERCONNECT_H */
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
index 59ba11661b6e..266f2b39213a 100644
--- a/include/linux/interrupt.h
+++ b/include/linux/interrupt.h
@@ -1,19 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/* interrupt.h */
#ifndef _LINUX_INTERRUPT_H
#define _LINUX_INTERRUPT_H
#include <linux/kernel.h>
-#include <linux/linkage.h>
#include <linux/bitops.h>
-#include <linux/preempt.h>
-#include <linux/cpumask.h>
+#include <linux/cleanup.h>
#include <linux/irqreturn.h>
#include <linux/irqnr.h>
#include <linux/hardirq.h>
#include <linux/irqflags.h>
#include <linux/hrtimer.h>
#include <linux/kref.h>
+#include <linux/cpumask_types.h>
#include <linux/workqueue.h>
+#include <linux/jump_label.h>
#include <linux/atomic.h>
#include <asm/ptrace.h>
@@ -46,14 +47,14 @@
* IRQF_PERCPU - Interrupt is per cpu
* IRQF_NOBALANCING - Flag to exclude this interrupt from irq balancing
* IRQF_IRQPOLL - Interrupt is used for polling (only the interrupt that is
- * registered first in an shared interrupt is considered for
+ * registered first in a shared interrupt is considered for
* performance reasons)
* IRQF_ONESHOT - Interrupt is not reenabled after the hardirq handler finished.
* Used by threaded interrupts which need to keep the
* irq line disabled until the threaded handler has been run.
* IRQF_NO_SUSPEND - Do not disable this IRQ during suspend. Does not guarantee
* that this interrupt will wake the system from a suspended
- * state. See Documentation/power/suspend-and-interrupts.txt
+ * state. See Documentation/power/suspend-and-interrupts.rst
* IRQF_FORCE_RESUME - Force enable it on resume even if IRQF_NO_SUSPEND is set
* IRQF_NO_THREAD - Interrupt cannot be threaded
* IRQF_EARLY_RESUME - Resume IRQ early during syscore instead of at device
@@ -62,6 +63,13 @@
* interrupt handler after suspending interrupts. For system
* wakeup devices users need to implement wakeup detection in
* their interrupt handlers.
+ * IRQF_NO_AUTOEN - Don't enable IRQ or NMI automatically when users request it.
+ * Users will enable it explicitly by enable_irq() or enable_nmi()
+ * later.
+ * IRQF_NO_DEBUG - Exclude from runnaway detection for IPI and similar handlers,
+ * depends on IRQF_PERCPU.
+ * IRQF_COND_ONESHOT - Agree to do IRQF_ONESHOT if already set for a shared
+ * interrupt.
*/
#define IRQF_SHARED 0x00000080
#define IRQF_PROBE_SHARED 0x00000100
@@ -75,6 +83,9 @@
#define IRQF_NO_THREAD 0x00010000
#define IRQF_EARLY_RESUME 0x00020000
#define IRQF_COND_SUSPEND 0x00040000
+#define IRQF_NO_AUTOEN 0x00080000
+#define IRQF_NO_DEBUG 0x00100000
+#define IRQF_COND_ONESHOT 0x00200000
#define IRQF_TIMER (__IRQF_TIMER | IRQF_NO_SUSPEND | IRQF_NO_THREAD)
@@ -98,6 +109,7 @@ typedef irqreturn_t (*irq_handler_t)(int, void *);
* @name: name of the device
* @dev_id: cookie to identify the device
* @percpu_dev_id: cookie to identify the device
+ * @affinity: CPUs this irqaction is allowed to run on
* @next: pointer to the next irqaction for shared interrupts
* @irq: interrupt number
* @flags: flags (see IRQF_* above)
@@ -110,8 +122,11 @@ typedef irqreturn_t (*irq_handler_t)(int, void *);
*/
struct irqaction {
irq_handler_t handler;
- void *dev_id;
- void __percpu *percpu_dev_id;
+ union {
+ void *dev_id;
+ void __percpu *percpu_dev_id;
+ };
+ const struct cpumask *affinity;
struct irqaction *next;
irq_handler_t thread_fn;
struct task_struct *thread;
@@ -129,7 +144,7 @@ extern irqreturn_t no_action(int cpl, void *dev_id);
/*
* If a (PCI) device interrupt is not connected we set dev->irq to
* IRQ_NOTCONNECTED. This causes request_irq() to fail with -ENOTCONN, so we
- * can distingiush that case from other error returns.
+ * can distinguish that case from other error returns.
*
* 0x80000000 is guaranteed to be outside the available range of interrupts
* and easy to distinguish from other possible incorrect values.
@@ -141,11 +156,24 @@ request_threaded_irq(unsigned int irq, irq_handler_t handler,
irq_handler_t thread_fn,
unsigned long flags, const char *name, void *dev);
+/**
+ * request_irq - Add a handler for an interrupt line
+ * @irq: The interrupt line to allocate
+ * @handler: Function to be called when the IRQ occurs.
+ * Primary handler for threaded interrupts
+ * If NULL, the default primary handler is installed
+ * @flags: Handling flags
+ * @name: Name of the device generating this interrupt
+ * @dev: A cookie passed to the handler function
+ *
+ * This call allocates an interrupt and establishes a handler; see
+ * the documentation for request_threaded_irq() for details.
+ */
static inline int __must_check
request_irq(unsigned int irq, irq_handler_t handler, unsigned long flags,
const char *name, void *dev)
{
- return request_threaded_irq(irq, handler, NULL, flags, name, dev);
+ return request_threaded_irq(irq, handler, NULL, flags | IRQF_COND_ONESHOT, name, dev);
}
extern int __must_check
@@ -155,19 +183,39 @@ request_any_context_irq(unsigned int irq, irq_handler_t handler,
extern int __must_check
__request_percpu_irq(unsigned int irq, irq_handler_t handler,
unsigned long flags, const char *devname,
- void __percpu *percpu_dev_id);
+ const cpumask_t *affinity, void __percpu *percpu_dev_id);
+
+extern int __must_check
+request_nmi(unsigned int irq, irq_handler_t handler, unsigned long flags,
+ const char *name, void *dev);
static inline int __must_check
request_percpu_irq(unsigned int irq, irq_handler_t handler,
const char *devname, void __percpu *percpu_dev_id)
{
return __request_percpu_irq(irq, handler, 0,
- devname, percpu_dev_id);
+ devname, NULL, percpu_dev_id);
}
+static inline int __must_check
+request_percpu_irq_affinity(unsigned int irq, irq_handler_t handler,
+ const char *devname, const cpumask_t *affinity,
+ void __percpu *percpu_dev_id)
+{
+ return __request_percpu_irq(irq, handler, 0,
+ devname, affinity, percpu_dev_id);
+}
+
+extern int __must_check
+request_percpu_nmi(unsigned int irq, irq_handler_t handler, const char *name,
+ const struct cpumask *affinity, void __percpu *dev_id);
+
extern const void *free_irq(unsigned int, void *);
extern void free_percpu_irq(unsigned int, void __percpu *);
+extern const void *free_nmi(unsigned int irq, void *dev_id);
+extern void free_percpu_nmi(unsigned int irq, void __percpu *percpu_dev_id);
+
struct device;
extern int __must_check
@@ -191,24 +239,7 @@ devm_request_any_context_irq(struct device *dev, unsigned int irq,
extern void devm_free_irq(struct device *dev, unsigned int irq, void *dev_id);
-/*
- * On lockdep we dont want to enable hardirqs in hardirq
- * context. Use local_irq_enable_in_hardirq() to annotate
- * kernel code that has to do this nevertheless (pretty much
- * the only valid case is for old/broken hardware that is
- * insanely slow).
- *
- * NOTE: in theory this might break fragile code that relies
- * on hardirq delivery - in practice we dont seem to have such
- * places left. So the only effect should be slightly increased
- * irqs-off latencies.
- */
-#ifdef CONFIG_LOCKDEP
-# define local_irq_enable_in_hardirq() do { } while (0)
-#else
-# define local_irq_enable_in_hardirq() local_irq_enable()
-#endif
-
+bool irq_has_action(unsigned int irq);
extern void disable_irq_nosync(unsigned int irq);
extern bool disable_hardirq(unsigned int irq);
extern void disable_irq(unsigned int irq);
@@ -218,9 +249,22 @@ extern void enable_percpu_irq(unsigned int irq, unsigned int type);
extern bool irq_percpu_is_enabled(unsigned int irq);
extern void irq_wake_thread(unsigned int irq, void *dev_id);
+DEFINE_LOCK_GUARD_1(disable_irq, int,
+ disable_irq(*_T->lock), enable_irq(*_T->lock))
+
+extern void disable_nmi_nosync(unsigned int irq);
+extern void disable_percpu_nmi(unsigned int irq);
+extern void enable_nmi(unsigned int irq);
+extern void enable_percpu_nmi(unsigned int irq, unsigned int type);
+extern int prepare_percpu_nmi(unsigned int irq);
+extern void teardown_percpu_nmi(unsigned int irq);
+
+extern int irq_inject_interrupt(unsigned int irq);
+
/* The following three functions are for the core kernel use only. */
extern void suspend_device_irqs(void);
extern void resume_device_irqs(void);
+extern void rearm_wake_irq(unsigned int irq);
/**
* struct irq_affinity_notify - context for notification of IRQ affinity changes
@@ -242,66 +286,102 @@ struct irq_affinity_notify {
void (*release)(struct kref *ref);
};
+#define IRQ_AFFINITY_MAX_SETS 4
+
/**
- * struct irq_affinity - Description for automatic irq affinity assignements
+ * struct irq_affinity - Description for automatic irq affinity assignments
* @pre_vectors: Don't apply affinity to @pre_vectors at beginning of
* the MSI(-X) vector space
* @post_vectors: Don't apply affinity to @post_vectors at end of
* the MSI(-X) vector space
+ * @nr_sets: The number of interrupt sets for which affinity
+ * spreading is required
+ * @set_size: Array holding the size of each interrupt set
+ * @calc_sets: Callback for calculating the number and size
+ * of interrupt sets
+ * @priv: Private data for usage by @calc_sets, usually a
+ * pointer to driver/device specific data.
*/
struct irq_affinity {
- int pre_vectors;
- int post_vectors;
+ unsigned int pre_vectors;
+ unsigned int post_vectors;
+ unsigned int nr_sets;
+ unsigned int set_size[IRQ_AFFINITY_MAX_SETS];
+ void (*calc_sets)(struct irq_affinity *, unsigned int nvecs);
+ void *priv;
+};
+
+/**
+ * struct irq_affinity_desc - Interrupt affinity descriptor
+ * @mask: cpumask to hold the affinity assignment
+ * @is_managed: 1 if the interrupt is managed internally
+ */
+struct irq_affinity_desc {
+ struct cpumask mask;
+ unsigned int is_managed : 1;
};
#if defined(CONFIG_SMP)
extern cpumask_var_t irq_default_affinity;
-/* Internal implementation. Use the helpers below */
-extern int __irq_set_affinity(unsigned int irq, const struct cpumask *cpumask,
- bool force);
+extern int irq_set_affinity(unsigned int irq, const struct cpumask *cpumask);
+extern int irq_force_affinity(unsigned int irq, const struct cpumask *cpumask);
+
+extern int irq_can_set_affinity(unsigned int irq);
+extern int irq_select_affinity(unsigned int irq);
+
+extern int __irq_apply_affinity_hint(unsigned int irq, const struct cpumask *m,
+ bool setaffinity);
/**
- * irq_set_affinity - Set the irq affinity of a given irq
- * @irq: Interrupt to set affinity
- * @cpumask: cpumask
+ * irq_update_affinity_hint - Update the affinity hint
+ * @irq: Interrupt to update
+ * @m: cpumask pointer (NULL to clear the hint)
*
- * Fails if cpumask does not contain an online CPU
+ * Updates the affinity hint, but does not change the affinity of the interrupt.
*/
static inline int
-irq_set_affinity(unsigned int irq, const struct cpumask *cpumask)
+irq_update_affinity_hint(unsigned int irq, const struct cpumask *m)
{
- return __irq_set_affinity(irq, cpumask, false);
+ return __irq_apply_affinity_hint(irq, m, false);
}
/**
- * irq_force_affinity - Force the irq affinity of a given irq
- * @irq: Interrupt to set affinity
- * @cpumask: cpumask
- *
- * Same as irq_set_affinity, but without checking the mask against
- * online cpus.
+ * irq_set_affinity_and_hint - Update the affinity hint and apply the provided
+ * cpumask to the interrupt
+ * @irq: Interrupt to update
+ * @m: cpumask pointer (NULL to clear the hint)
*
- * Solely for low level cpu hotplug code, where we need to make per
- * cpu interrupts affine before the cpu becomes online.
+ * Updates the affinity hint and if @m is not NULL it applies it as the
+ * affinity of that interrupt.
*/
static inline int
-irq_force_affinity(unsigned int irq, const struct cpumask *cpumask)
+irq_set_affinity_and_hint(unsigned int irq, const struct cpumask *m)
{
- return __irq_set_affinity(irq, cpumask, true);
+ return __irq_apply_affinity_hint(irq, m, true);
}
-extern int irq_can_set_affinity(unsigned int irq);
-extern int irq_select_affinity(unsigned int irq);
+/*
+ * Deprecated. Use irq_update_affinity_hint() or irq_set_affinity_and_hint()
+ * instead.
+ */
+static inline int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m)
+{
+ return irq_set_affinity_and_hint(irq, m);
+}
-extern int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m);
+extern int irq_update_affinity_desc(unsigned int irq,
+ struct irq_affinity_desc *affinity);
extern int
irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify);
-struct cpumask *irq_create_affinity_masks(int nvec, const struct irq_affinity *affd);
-int irq_calc_affinity_vectors(int minvec, int maxvec, const struct irq_affinity *affd);
+struct irq_affinity_desc *
+irq_create_affinity_masks(unsigned int nvec, struct irq_affinity *affd);
+
+unsigned int irq_calc_affinity_vectors(unsigned int minvec, unsigned int maxvec,
+ const struct irq_affinity *affd);
#else /* CONFIG_SMP */
@@ -322,26 +402,45 @@ static inline int irq_can_set_affinity(unsigned int irq)
static inline int irq_select_affinity(unsigned int irq) { return 0; }
+static inline int irq_update_affinity_hint(unsigned int irq,
+ const struct cpumask *m)
+{
+ return -EINVAL;
+}
+
+static inline int irq_set_affinity_and_hint(unsigned int irq,
+ const struct cpumask *m)
+{
+ return -EINVAL;
+}
+
static inline int irq_set_affinity_hint(unsigned int irq,
const struct cpumask *m)
{
return -EINVAL;
}
+static inline int irq_update_affinity_desc(unsigned int irq,
+ struct irq_affinity_desc *affinity)
+{
+ return -EINVAL;
+}
+
static inline int
irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify)
{
return 0;
}
-static inline struct cpumask *
-irq_create_affinity_masks(int nvec, const struct irq_affinity *affd)
+static inline struct irq_affinity_desc *
+irq_create_affinity_masks(unsigned int nvec, struct irq_affinity *affd)
{
return NULL;
}
-static inline int
-irq_calc_affinity_vectors(int minvec, int maxvec, const struct irq_affinity *affd)
+static inline unsigned int
+irq_calc_affinity_vectors(unsigned int minvec, unsigned int maxvec,
+ const struct irq_affinity *affd)
{
return maxvec;
}
@@ -362,7 +461,7 @@ irq_calc_affinity_vectors(int minvec, int maxvec, const struct irq_affinity *aff
static inline void disable_irq_nosync_lockdep(unsigned int irq)
{
disable_irq_nosync(irq);
-#ifdef CONFIG_LOCKDEP
+#if defined(CONFIG_LOCKDEP) && !defined(CONFIG_PREEMPT_RT)
local_irq_disable();
#endif
}
@@ -370,22 +469,14 @@ static inline void disable_irq_nosync_lockdep(unsigned int irq)
static inline void disable_irq_nosync_lockdep_irqsave(unsigned int irq, unsigned long *flags)
{
disable_irq_nosync(irq);
-#ifdef CONFIG_LOCKDEP
+#if defined(CONFIG_LOCKDEP) && !defined(CONFIG_PREEMPT_RT)
local_irq_save(*flags);
#endif
}
-static inline void disable_irq_lockdep(unsigned int irq)
-{
- disable_irq(irq);
-#ifdef CONFIG_LOCKDEP
- local_irq_disable();
-#endif
-}
-
static inline void enable_irq_lockdep(unsigned int irq)
{
-#ifdef CONFIG_LOCKDEP
+#if defined(CONFIG_LOCKDEP) && !defined(CONFIG_PREEMPT_RT)
local_irq_enable();
#endif
enable_irq(irq);
@@ -393,7 +484,7 @@ static inline void enable_irq_lockdep(unsigned int irq)
static inline void enable_irq_lockdep_irqrestore(unsigned int irq, unsigned long *flags)
{
-#ifdef CONFIG_LOCKDEP
+#if defined(CONFIG_LOCKDEP) && !defined(CONFIG_PREEMPT_RT)
local_irq_restore(*flags);
#endif
enable_irq(irq);
@@ -428,16 +519,28 @@ extern int irq_set_irqchip_state(unsigned int irq, enum irqchip_irq_state which,
bool state);
#ifdef CONFIG_IRQ_FORCED_THREADING
-extern bool force_irqthreads;
+# ifdef CONFIG_PREEMPT_RT
+# define force_irqthreads() (true)
+# else
+DECLARE_STATIC_KEY_FALSE(force_irqthreads_key);
+# define force_irqthreads() (static_branch_unlikely(&force_irqthreads_key))
+# endif
#else
-#define force_irqthreads (0)
+#define force_irqthreads() (false)
#endif
-#ifndef __ARCH_SET_SOFTIRQ_PENDING
-#define set_softirq_pending(x) (local_softirq_pending() = (x))
-#define or_softirq_pending(x) (local_softirq_pending() |= (x))
+#ifndef local_softirq_pending
+
+#ifndef local_softirq_pending_ref
+#define local_softirq_pending_ref irq_stat.__softirq_pending
#endif
+#define local_softirq_pending() (__this_cpu_read(local_softirq_pending_ref))
+#define set_softirq_pending(x) (__this_cpu_write(local_softirq_pending_ref, (x)))
+#define or_softirq_pending(x) (__this_cpu_or(local_softirq_pending_ref, (x)))
+
+#endif /* local_softirq_pending */
+
/* Some architectures might implement lazy enabling/disabling of
* interrupts. In some cases, such as stop_machine, we might want
* to ensure that after a local_irq_disable(), interrupts have
@@ -464,14 +567,26 @@ enum
IRQ_POLL_SOFTIRQ,
TASKLET_SOFTIRQ,
SCHED_SOFTIRQ,
- HRTIMER_SOFTIRQ, /* Unused, but kept as tools rely on the
- numbering. Sigh! */
+ HRTIMER_SOFTIRQ,
RCU_SOFTIRQ, /* Preferable RCU should always be the last softirq */
NR_SOFTIRQS
};
-#define SOFTIRQ_STOP_IDLE_MASK (~(1 << RCU_SOFTIRQ))
+/*
+ * The following vectors can be safely ignored after ksoftirqd is parked:
+ *
+ * _ RCU:
+ * 1) rcutree_migrate_callbacks() migrates the queue.
+ * 2) rcutree_report_cpu_dead() reports the final quiescent states.
+ *
+ * _ IRQ_POLL: irq_poll_cpu_dead() migrates the queue
+ *
+ * _ (HR)TIMER_SOFTIRQ: (hr)timers_dead_cpu() migrates the queue
+ */
+#define SOFTIRQ_HOTPLUG_SAFE_MASK (BIT(TIMER_SOFTIRQ) | BIT(IRQ_POLL_SOFTIRQ) |\
+ BIT(HRTIMER_SOFTIRQ) | BIT(RCU_SOFTIRQ))
+
/* map softirq index to softirq name. update 'softirq_to_name' in
* kernel/softirq.c when adding a new softirq.
@@ -484,28 +599,75 @@ extern const char * const softirq_to_name[NR_SOFTIRQS];
struct softirq_action
{
- void (*action)(struct softirq_action *);
+ void (*action)(void);
};
asmlinkage void do_softirq(void);
asmlinkage void __do_softirq(void);
-#ifdef __ARCH_HAS_DO_SOFTIRQ
-void do_softirq_own_stack(void);
+#ifdef CONFIG_PREEMPT_RT
+extern void do_softirq_post_smp_call_flush(unsigned int was_pending);
#else
-static inline void do_softirq_own_stack(void)
+static inline void do_softirq_post_smp_call_flush(unsigned int unused)
{
- __do_softirq();
+ do_softirq();
}
#endif
-extern void open_softirq(int nr, void (*action)(struct softirq_action *));
+extern void open_softirq(int nr, void (*action)(void));
extern void softirq_init(void);
extern void __raise_softirq_irqoff(unsigned int nr);
extern void raise_softirq_irqoff(unsigned int nr);
extern void raise_softirq(unsigned int nr);
+/*
+ * With forced-threaded interrupts enabled a raised softirq is deferred to
+ * ksoftirqd unless it can be handled within the threaded interrupt. This
+ * affects timer_list timers and hrtimers which are explicitly marked with
+ * HRTIMER_MODE_SOFT.
+ * With PREEMPT_RT enabled more hrtimers are moved to softirq for processing
+ * which includes all timers which are not explicitly marked HRTIMER_MODE_HARD.
+ * Userspace controlled timers (like the clock_nanosleep() interface) is divided
+ * into two categories: Tasks with elevated scheduling policy including
+ * SCHED_{FIFO|RR|DL} and the remaining scheduling policy. The tasks with the
+ * elevated scheduling policy are woken up directly from the HARDIRQ while all
+ * other wake ups are delayed to softirq and so to ksoftirqd.
+ *
+ * The ksoftirqd runs at SCHED_OTHER policy at which it should remain since it
+ * handles the softirq in an overloaded situation (not handled everything
+ * within its last run).
+ * If the timers are handled at SCHED_OTHER priority then they competes with all
+ * other SCHED_OTHER tasks for CPU resources are possibly delayed.
+ * Moving timers softirqs to a low priority SCHED_FIFO thread instead ensures
+ * that timer are performed before scheduling any SCHED_OTHER thread.
+ */
+DECLARE_PER_CPU(struct task_struct *, ktimerd);
+DECLARE_PER_CPU(unsigned long, pending_timer_softirq);
+void raise_ktimers_thread(unsigned int nr);
+
+static inline unsigned int local_timers_pending_force_th(void)
+{
+ return __this_cpu_read(pending_timer_softirq);
+}
+
+static inline void raise_timer_softirq(unsigned int nr)
+{
+ lockdep_assert_in_irq();
+ if (force_irqthreads())
+ raise_ktimers_thread(nr);
+ else
+ __raise_softirq_irqoff(nr);
+}
+
+static inline unsigned int local_timers_pending(void)
+{
+ if (force_irqthreads())
+ return local_timers_pending_force_th();
+ else
+ return local_softirq_pending();
+}
+
DECLARE_PER_CPU(struct task_struct *, ksoftirqd);
static inline struct task_struct *this_cpu_ksoftirqd(void)
@@ -515,6 +677,9 @@ static inline struct task_struct *this_cpu_ksoftirqd(void)
/* Tasklets --- multithreaded analogue of BHs.
+ This API is deprecated. Please consider using threaded IRQs instead:
+ https://lore.kernel.org/lkml/20200716081538.2sivhkj4hcyrusem@linutronix.de
+
Main feature differing them of generic softirqs: tasklet
is running only on one CPU simultaneously.
@@ -538,16 +703,42 @@ struct tasklet_struct
struct tasklet_struct *next;
unsigned long state;
atomic_t count;
- void (*func)(unsigned long);
+ bool use_callback;
+ union {
+ void (*func)(unsigned long data);
+ void (*callback)(struct tasklet_struct *t);
+ };
unsigned long data;
};
-#define DECLARE_TASKLET(name, func, data) \
-struct tasklet_struct name = { NULL, 0, ATOMIC_INIT(0), func, data }
+#define DECLARE_TASKLET(name, _callback) \
+struct tasklet_struct name = { \
+ .count = ATOMIC_INIT(0), \
+ .callback = _callback, \
+ .use_callback = true, \
+}
+
+#define DECLARE_TASKLET_DISABLED(name, _callback) \
+struct tasklet_struct name = { \
+ .count = ATOMIC_INIT(1), \
+ .callback = _callback, \
+ .use_callback = true, \
+}
-#define DECLARE_TASKLET_DISABLED(name, func, data) \
-struct tasklet_struct name = { NULL, 0, ATOMIC_INIT(1), func, data }
+#define from_tasklet(var, callback_tasklet, tasklet_fieldname) \
+ container_of(callback_tasklet, typeof(*var), tasklet_fieldname)
+#define DECLARE_TASKLET_OLD(name, _func) \
+struct tasklet_struct name = { \
+ .count = ATOMIC_INIT(0), \
+ .func = _func, \
+}
+
+#define DECLARE_TASKLET_DISABLED_OLD(name, _func) \
+struct tasklet_struct name = { \
+ .count = ATOMIC_INIT(1), \
+ .func = _func, \
+}
enum
{
@@ -555,26 +746,21 @@ enum
TASKLET_STATE_RUN /* Tasklet is running (SMP only) */
};
-#ifdef CONFIG_SMP
+#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT)
static inline int tasklet_trylock(struct tasklet_struct *t)
{
return !test_and_set_bit(TASKLET_STATE_RUN, &(t)->state);
}
-static inline void tasklet_unlock(struct tasklet_struct *t)
-{
- smp_mb__before_atomic();
- clear_bit(TASKLET_STATE_RUN, &(t)->state);
-}
+void tasklet_unlock(struct tasklet_struct *t);
+void tasklet_unlock_wait(struct tasklet_struct *t);
+void tasklet_unlock_spin_wait(struct tasklet_struct *t);
-static inline void tasklet_unlock_wait(struct tasklet_struct *t)
-{
- while (test_bit(TASKLET_STATE_RUN, &(t)->state)) { barrier(); }
-}
#else
-#define tasklet_trylock(t) 1
-#define tasklet_unlock_wait(t) do { } while (0)
-#define tasklet_unlock(t) do { } while (0)
+static inline int tasklet_trylock(struct tasklet_struct *t) { return 1; }
+static inline void tasklet_unlock(struct tasklet_struct *t) { }
+static inline void tasklet_unlock_wait(struct tasklet_struct *t) { }
+static inline void tasklet_unlock_spin_wait(struct tasklet_struct *t) { }
#endif
extern void __tasklet_schedule(struct tasklet_struct *t);
@@ -593,27 +779,23 @@ static inline void tasklet_hi_schedule(struct tasklet_struct *t)
__tasklet_hi_schedule(t);
}
-extern void __tasklet_hi_schedule_first(struct tasklet_struct *t);
-
-/*
- * This version avoids touching any other tasklets. Needed for kmemcheck
- * in order not to take any page faults while enqueueing this tasklet;
- * consider VERY carefully whether you really need this or
- * tasklet_hi_schedule()...
- */
-static inline void tasklet_hi_schedule_first(struct tasklet_struct *t)
-{
- if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state))
- __tasklet_hi_schedule_first(t);
-}
-
-
static inline void tasklet_disable_nosync(struct tasklet_struct *t)
{
atomic_inc(&t->count);
smp_mb__after_atomic();
}
+/*
+ * Do not use in new code. Disabling tasklets from atomic contexts is
+ * error prone and should be avoided.
+ */
+static inline void tasklet_disable_in_atomic(struct tasklet_struct *t)
+{
+ tasklet_disable_nosync(t);
+ tasklet_unlock_spin_wait(t);
+ smp_mb();
+}
+
static inline void tasklet_disable(struct tasklet_struct *t)
{
tasklet_disable_nosync(t);
@@ -628,34 +810,10 @@ static inline void tasklet_enable(struct tasklet_struct *t)
}
extern void tasklet_kill(struct tasklet_struct *t);
-extern void tasklet_kill_immediate(struct tasklet_struct *t, unsigned int cpu);
extern void tasklet_init(struct tasklet_struct *t,
void (*func)(unsigned long), unsigned long data);
-
-struct tasklet_hrtimer {
- struct hrtimer timer;
- struct tasklet_struct tasklet;
- enum hrtimer_restart (*function)(struct hrtimer *);
-};
-
-extern void
-tasklet_hrtimer_init(struct tasklet_hrtimer *ttimer,
- enum hrtimer_restart (*function)(struct hrtimer *),
- clockid_t which_clock, enum hrtimer_mode mode);
-
-static inline
-void tasklet_hrtimer_start(struct tasklet_hrtimer *ttimer, ktime_t time,
- const enum hrtimer_mode mode)
-{
- hrtimer_start(&ttimer->timer, time, mode);
-}
-
-static inline
-void tasklet_hrtimer_cancel(struct tasklet_hrtimer *ttimer)
-{
- hrtimer_cancel(&ttimer->timer);
- tasklet_kill(&ttimer->tasklet);
-}
+extern void tasklet_setup(struct tasklet_struct *t,
+ void (*callback)(struct tasklet_struct *));
/*
* Autoprobing for irqs:
@@ -730,8 +888,10 @@ extern int arch_early_irq_init(void);
/*
* We want to know which function is an entrypoint of a hardirq or a softirq.
*/
-#define __irq_entry __attribute__((__section__(".irqentry.text")))
-#define __softirq_entry \
- __attribute__((__section__(".softirqentry.text")))
+#ifndef __irq_entry
+# define __irq_entry __section(".irqentry.text")
+#endif
+
+#define __softirq_entry __section(".softirqentry.text")
#endif
diff --git a/include/linux/interval_tree.h b/include/linux/interval_tree.h
index 724556aa3c95..9d5791e9f737 100644
--- a/include/linux/interval_tree.h
+++ b/include/linux/interval_tree.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_INTERVAL_TREE_H
#define _LINUX_INTERVAL_TREE_H
@@ -11,17 +12,81 @@ struct interval_tree_node {
};
extern void
-interval_tree_insert(struct interval_tree_node *node, struct rb_root *root);
+interval_tree_insert(struct interval_tree_node *node,
+ struct rb_root_cached *root);
extern void
-interval_tree_remove(struct interval_tree_node *node, struct rb_root *root);
+interval_tree_remove(struct interval_tree_node *node,
+ struct rb_root_cached *root);
extern struct interval_tree_node *
-interval_tree_iter_first(struct rb_root *root,
+interval_tree_subtree_search(struct interval_tree_node *node,
+ unsigned long start, unsigned long last);
+
+extern struct interval_tree_node *
+interval_tree_iter_first(struct rb_root_cached *root,
unsigned long start, unsigned long last);
extern struct interval_tree_node *
interval_tree_iter_next(struct interval_tree_node *node,
unsigned long start, unsigned long last);
+/**
+ * struct interval_tree_span_iter - Find used and unused spans.
+ * @start_hole: Start of an interval for a hole when is_hole == 1
+ * @last_hole: Inclusive end of an interval for a hole when is_hole == 1
+ * @start_used: Start of a used interval when is_hole == 0
+ * @last_used: Inclusive end of a used interval when is_hole == 0
+ * @is_hole: 0 == used, 1 == is_hole, -1 == done iteration
+ *
+ * This iterator travels over spans in an interval tree. It does not return
+ * nodes but classifies each span as either a hole, where no nodes intersect, or
+ * a used, which is fully covered by nodes. Each iteration step toggles between
+ * hole and used until the entire range is covered. The returned spans always
+ * fully cover the requested range.
+ *
+ * The iterator is greedy, it always returns the largest hole or used possible,
+ * consolidating all consecutive nodes.
+ *
+ * Use interval_tree_span_iter_done() to detect end of iteration.
+ */
+struct interval_tree_span_iter {
+ /* private: not for use by the caller */
+ struct interval_tree_node *nodes[2];
+ unsigned long first_index;
+ unsigned long last_index;
+
+ /* public: */
+ union {
+ unsigned long start_hole;
+ unsigned long start_used;
+ };
+ union {
+ unsigned long last_hole;
+ unsigned long last_used;
+ };
+ int is_hole;
+};
+
+void interval_tree_span_iter_first(struct interval_tree_span_iter *state,
+ struct rb_root_cached *itree,
+ unsigned long first_index,
+ unsigned long last_index);
+void interval_tree_span_iter_advance(struct interval_tree_span_iter *iter,
+ struct rb_root_cached *itree,
+ unsigned long new_index);
+void interval_tree_span_iter_next(struct interval_tree_span_iter *state);
+
+static inline bool
+interval_tree_span_iter_done(struct interval_tree_span_iter *state)
+{
+ return state->is_hole == -1;
+}
+
+#define interval_tree_for_each_span(span, itree, first_index, last_index) \
+ for (interval_tree_span_iter_first(span, itree, \
+ first_index, last_index); \
+ !interval_tree_span_iter_done(span); \
+ interval_tree_span_iter_next(span))
+
#endif /* _LINUX_INTERVAL_TREE_H */
diff --git a/include/linux/interval_tree_generic.h b/include/linux/interval_tree_generic.h
index 58370e1862ad..c5a2fed49eb0 100644
--- a/include/linux/interval_tree_generic.h
+++ b/include/linux/interval_tree_generic.h
@@ -1,20 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
Interval Trees
(C) 2012 Michel Lespinasse <walken@google.com>
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
include/linux/interval_tree_generic.h
*/
@@ -33,7 +21,7 @@
* ITSTATIC: 'static' or empty
* ITPREFIX: prefix to use for the inline tree definitions
*
- * Note - before using this, please consider if non-generic version
+ * Note - before using this, please consider if generic version
* (interval_tree.h) would work for you...
*/
@@ -42,34 +30,18 @@
\
/* Callbacks for augmented rbtree insert and remove */ \
\
-static inline ITTYPE ITPREFIX ## _compute_subtree_last(ITSTRUCT *node) \
-{ \
- ITTYPE max = ITLAST(node), subtree_last; \
- if (node->ITRB.rb_left) { \
- subtree_last = rb_entry(node->ITRB.rb_left, \
- ITSTRUCT, ITRB)->ITSUBTREE; \
- if (max < subtree_last) \
- max = subtree_last; \
- } \
- if (node->ITRB.rb_right) { \
- subtree_last = rb_entry(node->ITRB.rb_right, \
- ITSTRUCT, ITRB)->ITSUBTREE; \
- if (max < subtree_last) \
- max = subtree_last; \
- } \
- return max; \
-} \
- \
-RB_DECLARE_CALLBACKS(static, ITPREFIX ## _augment, ITSTRUCT, ITRB, \
- ITTYPE, ITSUBTREE, ITPREFIX ## _compute_subtree_last) \
+RB_DECLARE_CALLBACKS_MAX(static, ITPREFIX ## _augment, \
+ ITSTRUCT, ITRB, ITTYPE, ITSUBTREE, ITLAST) \
\
/* Insert / remove interval nodes from the tree */ \
\
-ITSTATIC void ITPREFIX ## _insert(ITSTRUCT *node, struct rb_root *root) \
+ITSTATIC void ITPREFIX ## _insert(ITSTRUCT *node, \
+ struct rb_root_cached *root) \
{ \
- struct rb_node **link = &root->rb_node, *rb_parent = NULL; \
+ struct rb_node **link = &root->rb_root.rb_node, *rb_parent = NULL; \
ITTYPE start = ITSTART(node), last = ITLAST(node); \
ITSTRUCT *parent; \
+ bool leftmost = true; \
\
while (*link) { \
rb_parent = *link; \
@@ -78,18 +50,22 @@ ITSTATIC void ITPREFIX ## _insert(ITSTRUCT *node, struct rb_root *root) \
parent->ITSUBTREE = last; \
if (start < ITSTART(parent)) \
link = &parent->ITRB.rb_left; \
- else \
+ else { \
link = &parent->ITRB.rb_right; \
+ leftmost = false; \
+ } \
} \
\
node->ITSUBTREE = last; \
rb_link_node(&node->ITRB, rb_parent, link); \
- rb_insert_augmented(&node->ITRB, root, &ITPREFIX ## _augment); \
+ rb_insert_augmented_cached(&node->ITRB, root, \
+ leftmost, &ITPREFIX ## _augment); \
} \
\
-ITSTATIC void ITPREFIX ## _remove(ITSTRUCT *node, struct rb_root *root) \
+ITSTATIC void ITPREFIX ## _remove(ITSTRUCT *node, \
+ struct rb_root_cached *root) \
{ \
- rb_erase_augmented(&node->ITRB, root, &ITPREFIX ## _augment); \
+ rb_erase_augmented_cached(&node->ITRB, root, &ITPREFIX ## _augment); \
} \
\
/* \
@@ -101,7 +77,7 @@ ITSTATIC void ITPREFIX ## _remove(ITSTRUCT *node, struct rb_root *root) \
* Cond2: start <= ITLAST(node) \
*/ \
\
-static ITSTRUCT * \
+ITSTATIC ITSTRUCT * \
ITPREFIX ## _subtree_search(ITSTRUCT *node, ITTYPE start, ITTYPE last) \
{ \
while (true) { \
@@ -128,27 +104,43 @@ ITPREFIX ## _subtree_search(ITSTRUCT *node, ITTYPE start, ITTYPE last) \
if (ITSTART(node) <= last) { /* Cond1 */ \
if (start <= ITLAST(node)) /* Cond2 */ \
return node; /* node is leftmost match */ \
- if (node->ITRB.rb_right) { \
- node = rb_entry(node->ITRB.rb_right, \
- ITSTRUCT, ITRB); \
- if (start <= node->ITSUBTREE) \
- continue; \
- } \
+ node = rb_entry(node->ITRB.rb_right, ITSTRUCT, ITRB); \
+ continue; \
} \
return NULL; /* No match */ \
} \
} \
\
ITSTATIC ITSTRUCT * \
-ITPREFIX ## _iter_first(struct rb_root *root, ITTYPE start, ITTYPE last) \
+ITPREFIX ## _iter_first(struct rb_root_cached *root, \
+ ITTYPE start, ITTYPE last) \
{ \
- ITSTRUCT *node; \
+ ITSTRUCT *node, *leftmost; \
\
- if (!root->rb_node) \
+ if (!root->rb_root.rb_node) \
return NULL; \
- node = rb_entry(root->rb_node, ITSTRUCT, ITRB); \
+ \
+ /* \
+ * Fastpath range intersection/overlap between A: [a0, a1] and \
+ * B: [b0, b1] is given by: \
+ * \
+ * a0 <= b1 && b0 <= a1 \
+ * \
+ * ... where A holds the lock range and B holds the smallest \
+ * 'start' and largest 'last' in the tree. For the later, we \
+ * rely on the root node, which by augmented interval tree \
+ * property, holds the largest value in its last-in-subtree. \
+ * This allows mitigating some of the tree walk overhead for \
+ * for non-intersecting ranges, maintained and consulted in O(1). \
+ */ \
+ node = rb_entry(root->rb_root.rb_node, ITSTRUCT, ITRB); \
if (node->ITSUBTREE < start) \
return NULL; \
+ \
+ leftmost = rb_entry(root->rb_leftmost, ITSTRUCT, ITRB); \
+ if (ITSTART(leftmost) > last) \
+ return NULL; \
+ \
return ITPREFIX ## _subtree_search(node, start, last); \
} \
\
diff --git a/include/linux/io-64-nonatomic-hi-lo.h b/include/linux/io-64-nonatomic-hi-lo.h
index defcc4644ce3..d3eade7cf663 100644
--- a/include/linux/io-64-nonatomic-hi-lo.h
+++ b/include/linux/io-64-nonatomic-hi-lo.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_IO_64_NONATOMIC_HI_LO_H_
#define _LINUX_IO_64_NONATOMIC_HI_LO_H_
@@ -54,4 +55,84 @@ static inline void hi_lo_writeq_relaxed(__u64 val, volatile void __iomem *addr)
#define writeq_relaxed hi_lo_writeq_relaxed
#endif
+#ifndef ioread64_hi_lo
+#define ioread64_hi_lo ioread64_hi_lo
+static inline u64 ioread64_hi_lo(const void __iomem *addr)
+{
+ u32 low, high;
+
+ high = ioread32(addr + sizeof(u32));
+ low = ioread32(addr);
+
+ return low + ((u64)high << 32);
+}
+#endif
+
+#ifndef iowrite64_hi_lo
+#define iowrite64_hi_lo iowrite64_hi_lo
+static inline void iowrite64_hi_lo(u64 val, void __iomem *addr)
+{
+ iowrite32(val >> 32, addr + sizeof(u32));
+ iowrite32(val, addr);
+}
+#endif
+
+#ifndef ioread64be_hi_lo
+#define ioread64be_hi_lo ioread64be_hi_lo
+static inline u64 ioread64be_hi_lo(const void __iomem *addr)
+{
+ u32 low, high;
+
+ high = ioread32be(addr);
+ low = ioread32be(addr + sizeof(u32));
+
+ return low + ((u64)high << 32);
+}
+#endif
+
+#ifndef iowrite64be_hi_lo
+#define iowrite64be_hi_lo iowrite64be_hi_lo
+static inline void iowrite64be_hi_lo(u64 val, void __iomem *addr)
+{
+ iowrite32be(val >> 32, addr);
+ iowrite32be(val, addr + sizeof(u32));
+}
+#endif
+
+#ifndef ioread64
+#define ioread64_is_nonatomic
+#if defined(CONFIG_GENERIC_IOMAP) && defined(CONFIG_64BIT)
+#define ioread64 __ioread64_hi_lo
+#else
+#define ioread64 ioread64_hi_lo
+#endif
+#endif
+
+#ifndef iowrite64
+#define iowrite64_is_nonatomic
+#if defined(CONFIG_GENERIC_IOMAP) && defined(CONFIG_64BIT)
+#define iowrite64 __iowrite64_hi_lo
+#else
+#define iowrite64 iowrite64_hi_lo
+#endif
+#endif
+
+#ifndef ioread64be
+#define ioread64be_is_nonatomic
+#if defined(CONFIG_GENERIC_IOMAP) && defined(CONFIG_64BIT)
+#define ioread64be __ioread64be_hi_lo
+#else
+#define ioread64be ioread64be_hi_lo
+#endif
+#endif
+
+#ifndef iowrite64be
+#define iowrite64be_is_nonatomic
+#if defined(CONFIG_GENERIC_IOMAP) && defined(CONFIG_64BIT)
+#define iowrite64be __iowrite64be_hi_lo
+#else
+#define iowrite64be iowrite64be_hi_lo
+#endif
+#endif
+
#endif /* _LINUX_IO_64_NONATOMIC_HI_LO_H_ */
diff --git a/include/linux/io-64-nonatomic-lo-hi.h b/include/linux/io-64-nonatomic-lo-hi.h
index 084461a4e5ab..94e676ec3d3f 100644
--- a/include/linux/io-64-nonatomic-lo-hi.h
+++ b/include/linux/io-64-nonatomic-lo-hi.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_IO_64_NONATOMIC_LO_HI_H_
#define _LINUX_IO_64_NONATOMIC_LO_HI_H_
@@ -54,4 +55,84 @@ static inline void lo_hi_writeq_relaxed(__u64 val, volatile void __iomem *addr)
#define writeq_relaxed lo_hi_writeq_relaxed
#endif
+#ifndef ioread64_lo_hi
+#define ioread64_lo_hi ioread64_lo_hi
+static inline u64 ioread64_lo_hi(const void __iomem *addr)
+{
+ u32 low, high;
+
+ low = ioread32(addr);
+ high = ioread32(addr + sizeof(u32));
+
+ return low + ((u64)high << 32);
+}
+#endif
+
+#ifndef iowrite64_lo_hi
+#define iowrite64_lo_hi iowrite64_lo_hi
+static inline void iowrite64_lo_hi(u64 val, void __iomem *addr)
+{
+ iowrite32(val, addr);
+ iowrite32(val >> 32, addr + sizeof(u32));
+}
+#endif
+
+#ifndef ioread64be_lo_hi
+#define ioread64be_lo_hi ioread64be_lo_hi
+static inline u64 ioread64be_lo_hi(const void __iomem *addr)
+{
+ u32 low, high;
+
+ low = ioread32be(addr + sizeof(u32));
+ high = ioread32be(addr);
+
+ return low + ((u64)high << 32);
+}
+#endif
+
+#ifndef iowrite64be_lo_hi
+#define iowrite64be_lo_hi iowrite64be_lo_hi
+static inline void iowrite64be_lo_hi(u64 val, void __iomem *addr)
+{
+ iowrite32be(val, addr + sizeof(u32));
+ iowrite32be(val >> 32, addr);
+}
+#endif
+
+#ifndef ioread64
+#define ioread64_is_nonatomic
+#if defined(CONFIG_GENERIC_IOMAP) && defined(CONFIG_64BIT)
+#define ioread64 __ioread64_lo_hi
+#else
+#define ioread64 ioread64_lo_hi
+#endif
+#endif
+
+#ifndef iowrite64
+#define iowrite64_is_nonatomic
+#if defined(CONFIG_GENERIC_IOMAP) && defined(CONFIG_64BIT)
+#define iowrite64 __iowrite64_lo_hi
+#else
+#define iowrite64 iowrite64_lo_hi
+#endif
+#endif
+
+#ifndef ioread64be
+#define ioread64be_is_nonatomic
+#if defined(CONFIG_GENERIC_IOMAP) && defined(CONFIG_64BIT)
+#define ioread64be __ioread64be_lo_hi
+#else
+#define ioread64be ioread64be_lo_hi
+#endif
+#endif
+
+#ifndef iowrite64be
+#define iowrite64be_is_nonatomic
+#if defined(CONFIG_GENERIC_IOMAP) && defined(CONFIG_64BIT)
+#define iowrite64be __iowrite64be_lo_hi
+#else
+#define iowrite64be iowrite64be_lo_hi
+#endif
+#endif
+
#endif /* _LINUX_IO_64_NONATOMIC_LO_HI_H_ */
diff --git a/include/linux/io-mapping.h b/include/linux/io-mapping.h
index 58df02bd93c9..c16353cc6e3c 100644
--- a/include/linux/io-mapping.h
+++ b/include/linux/io-mapping.h
@@ -1,18 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright © 2008 Keith Packard <keithp@keithp.com>
- *
- * This file is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA.
*/
#ifndef _LINUX_IO_MAPPING_H
@@ -22,13 +10,14 @@
#include <linux/slab.h>
#include <linux/bug.h>
#include <linux/io.h>
+#include <linux/pgtable.h>
#include <asm/page.h>
/*
* The io_mapping mechanism provides an abstraction for mapping
* individual pages from an io device to the CPU in an efficient fashion.
*
- * See Documentation/io-mapping.txt
+ * See Documentation/driver-api/io-mapping.rst
*/
struct io_mapping {
@@ -40,6 +29,7 @@ struct io_mapping {
#ifdef CONFIG_HAVE_ATOMIC_IOMAP
+#include <linux/pfn.h>
#include <asm/iomap.h>
/*
* For small address space machines, mapping large objects
@@ -76,18 +66,41 @@ io_mapping_map_atomic_wc(struct io_mapping *mapping,
unsigned long offset)
{
resource_size_t phys_addr;
- unsigned long pfn;
BUG_ON(offset >= mapping->size);
phys_addr = mapping->base + offset;
- pfn = (unsigned long) (phys_addr >> PAGE_SHIFT);
- return iomap_atomic_prot_pfn(pfn, mapping->prot);
+ if (!IS_ENABLED(CONFIG_PREEMPT_RT))
+ preempt_disable();
+ else
+ migrate_disable();
+ pagefault_disable();
+ return __iomap_local_pfn_prot(PHYS_PFN(phys_addr), mapping->prot);
}
static inline void
io_mapping_unmap_atomic(void __iomem *vaddr)
{
- iounmap_atomic(vaddr);
+ kunmap_local_indexed((void __force *)vaddr);
+ pagefault_enable();
+ if (!IS_ENABLED(CONFIG_PREEMPT_RT))
+ preempt_enable();
+ else
+ migrate_enable();
+}
+
+static inline void __iomem *
+io_mapping_map_local_wc(struct io_mapping *mapping, unsigned long offset)
+{
+ resource_size_t phys_addr;
+
+ BUG_ON(offset >= mapping->size);
+ phys_addr = mapping->base + offset;
+ return __iomap_local_pfn_prot(PHYS_PFN(phys_addr), mapping->prot);
+}
+
+static inline void io_mapping_unmap_local(void __iomem *vaddr)
+{
+ kunmap_local_indexed((void __force *)vaddr);
}
static inline void __iomem *
@@ -109,10 +122,9 @@ io_mapping_unmap(void __iomem *vaddr)
iounmap(vaddr);
}
-#else
+#else /* HAVE_ATOMIC_IOMAP */
#include <linux/uaccess.h>
-#include <asm/pgtable.h>
/* Create the io_mapping object*/
static inline struct io_mapping *
@@ -120,16 +132,13 @@ io_mapping_init_wc(struct io_mapping *iomap,
resource_size_t base,
unsigned long size)
{
+ iomap->iomem = ioremap_wc(base, size);
+ if (!iomap->iomem)
+ return NULL;
+
iomap->base = base;
iomap->size = size;
- iomap->iomem = ioremap_wc(base, size);
-#if defined(pgprot_noncached_wc) /* archs can't agree on a name ... */
- iomap->prot = pgprot_noncached_wc(PAGE_KERNEL);
-#elif defined(pgprot_writecombine)
iomap->prot = pgprot_writecombine(PAGE_KERNEL);
-#else
- iomap->prot = pgprot_noncached(PAGE_KERNEL);
-#endif
return iomap;
}
@@ -159,7 +168,10 @@ static inline void __iomem *
io_mapping_map_atomic_wc(struct io_mapping *mapping,
unsigned long offset)
{
- preempt_disable();
+ if (!IS_ENABLED(CONFIG_PREEMPT_RT))
+ preempt_disable();
+ else
+ migrate_disable();
pagefault_disable();
return io_mapping_map_wc(mapping, offset, PAGE_SIZE);
}
@@ -169,10 +181,24 @@ io_mapping_unmap_atomic(void __iomem *vaddr)
{
io_mapping_unmap(vaddr);
pagefault_enable();
- preempt_enable();
+ if (!IS_ENABLED(CONFIG_PREEMPT_RT))
+ preempt_enable();
+ else
+ migrate_enable();
+}
+
+static inline void __iomem *
+io_mapping_map_local_wc(struct io_mapping *mapping, unsigned long offset)
+{
+ return io_mapping_map_wc(mapping, offset, PAGE_SIZE);
+}
+
+static inline void io_mapping_unmap_local(void __iomem *vaddr)
+{
+ io_mapping_unmap(vaddr);
}
-#endif /* HAVE_ATOMIC_IOMAP */
+#endif /* !HAVE_ATOMIC_IOMAP */
static inline struct io_mapping *
io_mapping_create_wc(resource_size_t base,
diff --git a/include/linux/io-pgtable.h b/include/linux/io-pgtable.h
new file mode 100644
index 000000000000..7a1516011ccf
--- /dev/null
+++ b/include/linux/io-pgtable.h
@@ -0,0 +1,327 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __IO_PGTABLE_H
+#define __IO_PGTABLE_H
+
+#include <linux/bitops.h>
+#include <linux/iommu.h>
+
+/*
+ * Public API for use by IOMMU drivers
+ */
+enum io_pgtable_fmt {
+ ARM_32_LPAE_S1,
+ ARM_32_LPAE_S2,
+ ARM_64_LPAE_S1,
+ ARM_64_LPAE_S2,
+ ARM_V7S,
+ ARM_MALI_LPAE,
+ APPLE_DART,
+ APPLE_DART2,
+ IO_PGTABLE_NUM_FMTS,
+};
+
+/**
+ * struct iommu_flush_ops - IOMMU callbacks for TLB and page table management.
+ *
+ * @tlb_flush_all: Synchronously invalidate the entire TLB context.
+ * @tlb_flush_walk: Synchronously invalidate all intermediate TLB state
+ * (sometimes referred to as the "walk cache") for a virtual
+ * address range.
+ * @tlb_add_page: Optional callback to queue up leaf TLB invalidation for a
+ * single page. IOMMUs that cannot batch TLB invalidation
+ * operations efficiently will typically issue them here, but
+ * others may decide to update the iommu_iotlb_gather structure
+ * and defer the invalidation until iommu_iotlb_sync() instead.
+ *
+ * Note that these can all be called in atomic context and must therefore
+ * not block.
+ */
+struct iommu_flush_ops {
+ void (*tlb_flush_all)(void *cookie);
+ void (*tlb_flush_walk)(unsigned long iova, size_t size, size_t granule,
+ void *cookie);
+ void (*tlb_add_page)(struct iommu_iotlb_gather *gather,
+ unsigned long iova, size_t granule, void *cookie);
+};
+
+/**
+ * struct io_pgtable_cfg - Configuration data for a set of page tables.
+ *
+ * @quirks: A bitmap of hardware quirks that require some special
+ * action by the low-level page table allocator.
+ * @pgsize_bitmap: A bitmap of page sizes supported by this set of page
+ * tables.
+ * @ias: Input address (iova) size, in bits.
+ * @oas: Output address (paddr) size, in bits.
+ * @coherent_walk A flag to indicate whether or not page table walks made
+ * by the IOMMU are coherent with the CPU caches.
+ * @tlb: TLB management callbacks for this set of tables.
+ * @iommu_dev: The device representing the DMA configuration for the
+ * page table walker.
+ */
+struct io_pgtable_cfg {
+ /*
+ * IO_PGTABLE_QUIRK_ARM_NS: (ARM formats) Set NS and NSTABLE bits in
+ * stage 1 PTEs, for hardware which insists on validating them
+ * even in non-secure state where they should normally be ignored.
+ *
+ * IO_PGTABLE_QUIRK_NO_PERMS: Ignore the IOMMU_READ, IOMMU_WRITE and
+ * IOMMU_NOEXEC flags and map everything with full access, for
+ * hardware which does not implement the permissions of a given
+ * format, and/or requires some format-specific default value.
+ *
+ * IO_PGTABLE_QUIRK_ARM_MTK_EXT: (ARM v7s format) MediaTek IOMMUs extend
+ * to support up to 35 bits PA where the bit32, bit33 and bit34 are
+ * encoded in the bit9, bit4 and bit5 of the PTE respectively.
+ *
+ * IO_PGTABLE_QUIRK_ARM_MTK_TTBR_EXT: (ARM v7s format) MediaTek IOMMUs
+ * extend the translation table base support up to 35 bits PA, the
+ * encoding format is same with IO_PGTABLE_QUIRK_ARM_MTK_EXT.
+ *
+ * IO_PGTABLE_QUIRK_ARM_TTBR1: (ARM LPAE format) Configure the table
+ * for use in the upper half of a split address space.
+ *
+ * IO_PGTABLE_QUIRK_ARM_OUTER_WBWA: Override the outer-cacheability
+ * attributes set in the TCR for a non-coherent page-table walker.
+ *
+ * IO_PGTABLE_QUIRK_ARM_HD: Enables dirty tracking in stage 1 pagetable.
+ * IO_PGTABLE_QUIRK_ARM_S2FWB: Use the FWB format for the MemAttrs bits
+ *
+ * IO_PGTABLE_QUIRK_NO_WARN: Do not WARN_ON() on conflicting
+ * mappings, but silently return -EEXISTS. Normally an attempt
+ * to map over an existing mapping would indicate some sort of
+ * kernel bug, which would justify the WARN_ON(). But for GPU
+ * drivers, this could be under control of userspace. Which
+ * deserves an error return, but not to spam dmesg.
+ */
+ #define IO_PGTABLE_QUIRK_ARM_NS BIT(0)
+ #define IO_PGTABLE_QUIRK_NO_PERMS BIT(1)
+ #define IO_PGTABLE_QUIRK_ARM_MTK_EXT BIT(3)
+ #define IO_PGTABLE_QUIRK_ARM_MTK_TTBR_EXT BIT(4)
+ #define IO_PGTABLE_QUIRK_ARM_TTBR1 BIT(5)
+ #define IO_PGTABLE_QUIRK_ARM_OUTER_WBWA BIT(6)
+ #define IO_PGTABLE_QUIRK_ARM_HD BIT(7)
+ #define IO_PGTABLE_QUIRK_ARM_S2FWB BIT(8)
+ #define IO_PGTABLE_QUIRK_NO_WARN BIT(9)
+ unsigned long quirks;
+ unsigned long pgsize_bitmap;
+ unsigned int ias;
+ unsigned int oas;
+ bool coherent_walk;
+ const struct iommu_flush_ops *tlb;
+ struct device *iommu_dev;
+
+ /**
+ * @alloc: Custom page allocator.
+ *
+ * Optional hook used to allocate page tables. If this function is NULL,
+ * @free must be NULL too.
+ *
+ * Memory returned should be zeroed and suitable for dma_map_single() and
+ * virt_to_phys().
+ *
+ * Not all formats support custom page allocators. Before considering
+ * passing a non-NULL value, make sure the chosen page format supports
+ * this feature.
+ */
+ void *(*alloc)(void *cookie, size_t size, gfp_t gfp);
+
+ /**
+ * @free: Custom page de-allocator.
+ *
+ * Optional hook used to free page tables allocated with the @alloc
+ * hook. Must be non-NULL if @alloc is not NULL, must be NULL
+ * otherwise.
+ */
+ void (*free)(void *cookie, void *pages, size_t size);
+
+ /* Low-level data specific to the table format */
+ union {
+ struct {
+ u64 ttbr;
+ struct {
+ u32 ips:3;
+ u32 tg:2;
+ u32 sh:2;
+ u32 orgn:2;
+ u32 irgn:2;
+ u32 tsz:6;
+ } tcr;
+ u64 mair;
+ } arm_lpae_s1_cfg;
+
+ struct {
+ u64 vttbr;
+ struct {
+ u32 ps:3;
+ u32 tg:2;
+ u32 sh:2;
+ u32 orgn:2;
+ u32 irgn:2;
+ u32 sl:2;
+ u32 tsz:6;
+ } vtcr;
+ } arm_lpae_s2_cfg;
+
+ struct {
+ u32 ttbr;
+ u32 tcr;
+ u32 nmrr;
+ u32 prrr;
+ } arm_v7s_cfg;
+
+ struct {
+ u64 transtab;
+ u64 memattr;
+ } arm_mali_lpae_cfg;
+
+ struct {
+ u64 ttbr[4];
+ u32 n_ttbrs;
+ u32 n_levels;
+ } apple_dart_cfg;
+
+ struct {
+ int nid;
+ } amd;
+ };
+};
+
+/**
+ * struct arm_lpae_io_pgtable_walk_data - information from a pgtable walk
+ *
+ * @ptes: The recorded PTE values from the walk
+ */
+struct arm_lpae_io_pgtable_walk_data {
+ u64 ptes[4];
+};
+
+/**
+ * struct io_pgtable_ops - Page table manipulation API for IOMMU drivers.
+ *
+ * @map_pages: Map a physically contiguous range of pages of the same size.
+ * @unmap_pages: Unmap a range of virtually contiguous pages of the same size.
+ * @iova_to_phys: Translate iova to physical address.
+ * @pgtable_walk: (optional) Perform a page table walk for a given iova.
+ *
+ * These functions map directly onto the iommu_ops member functions with
+ * the same names.
+ */
+struct io_pgtable_ops {
+ int (*map_pages)(struct io_pgtable_ops *ops, unsigned long iova,
+ phys_addr_t paddr, size_t pgsize, size_t pgcount,
+ int prot, gfp_t gfp, size_t *mapped);
+ size_t (*unmap_pages)(struct io_pgtable_ops *ops, unsigned long iova,
+ size_t pgsize, size_t pgcount,
+ struct iommu_iotlb_gather *gather);
+ phys_addr_t (*iova_to_phys)(struct io_pgtable_ops *ops,
+ unsigned long iova);
+ int (*pgtable_walk)(struct io_pgtable_ops *ops, unsigned long iova, void *wd);
+ int (*read_and_clear_dirty)(struct io_pgtable_ops *ops,
+ unsigned long iova, size_t size,
+ unsigned long flags,
+ struct iommu_dirty_bitmap *dirty);
+};
+
+/**
+ * alloc_io_pgtable_ops() - Allocate a page table allocator for use by an IOMMU.
+ *
+ * @fmt: The page table format.
+ * @cfg: The page table configuration. This will be modified to represent
+ * the configuration actually provided by the allocator (e.g. the
+ * pgsize_bitmap may be restricted).
+ * @cookie: An opaque token provided by the IOMMU driver and passed back to
+ * the callback routines in cfg->tlb.
+ */
+struct io_pgtable_ops *alloc_io_pgtable_ops(enum io_pgtable_fmt fmt,
+ struct io_pgtable_cfg *cfg,
+ void *cookie);
+
+/**
+ * free_io_pgtable_ops() - Free an io_pgtable_ops structure. The caller
+ * *must* ensure that the page table is no longer
+ * live, but the TLB can be dirty.
+ *
+ * @ops: The ops returned from alloc_io_pgtable_ops.
+ */
+void free_io_pgtable_ops(struct io_pgtable_ops *ops);
+
+
+/*
+ * Internal structures for page table allocator implementations.
+ */
+
+/**
+ * struct io_pgtable - Internal structure describing a set of page tables.
+ *
+ * @fmt: The page table format.
+ * @cookie: An opaque token provided by the IOMMU driver and passed back to
+ * any callback routines.
+ * @cfg: A copy of the page table configuration.
+ * @ops: The page table operations in use for this set of page tables.
+ */
+struct io_pgtable {
+ enum io_pgtable_fmt fmt;
+ void *cookie;
+ struct io_pgtable_cfg cfg;
+ struct io_pgtable_ops ops;
+};
+
+#define io_pgtable_ops_to_pgtable(x) container_of((x), struct io_pgtable, ops)
+
+static inline void io_pgtable_tlb_flush_all(struct io_pgtable *iop)
+{
+ if (iop->cfg.tlb && iop->cfg.tlb->tlb_flush_all)
+ iop->cfg.tlb->tlb_flush_all(iop->cookie);
+}
+
+static inline void
+io_pgtable_tlb_flush_walk(struct io_pgtable *iop, unsigned long iova,
+ size_t size, size_t granule)
+{
+ if (iop->cfg.tlb && iop->cfg.tlb->tlb_flush_walk)
+ iop->cfg.tlb->tlb_flush_walk(iova, size, granule, iop->cookie);
+}
+
+static inline void
+io_pgtable_tlb_add_page(struct io_pgtable *iop,
+ struct iommu_iotlb_gather * gather, unsigned long iova,
+ size_t granule)
+{
+ if (iop->cfg.tlb && iop->cfg.tlb->tlb_add_page)
+ iop->cfg.tlb->tlb_add_page(gather, iova, granule, iop->cookie);
+}
+
+/**
+ * enum io_pgtable_caps - IO page table backend capabilities.
+ */
+enum io_pgtable_caps {
+ /** @IO_PGTABLE_CAP_CUSTOM_ALLOCATOR: Backend accepts custom page table allocators. */
+ IO_PGTABLE_CAP_CUSTOM_ALLOCATOR = BIT(0),
+};
+
+/**
+ * struct io_pgtable_init_fns - Alloc/free a set of page tables for a
+ * particular format.
+ *
+ * @alloc: Allocate a set of page tables described by cfg.
+ * @free: Free the page tables associated with iop.
+ * @caps: Combination of @io_pgtable_caps flags encoding the backend capabilities.
+ */
+struct io_pgtable_init_fns {
+ struct io_pgtable *(*alloc)(struct io_pgtable_cfg *cfg, void *cookie);
+ void (*free)(struct io_pgtable *iop);
+ u32 caps;
+};
+
+extern struct io_pgtable_init_fns io_pgtable_arm_32_lpae_s1_init_fns;
+extern struct io_pgtable_init_fns io_pgtable_arm_32_lpae_s2_init_fns;
+extern struct io_pgtable_init_fns io_pgtable_arm_64_lpae_s1_init_fns;
+extern struct io_pgtable_init_fns io_pgtable_arm_64_lpae_s2_init_fns;
+extern struct io_pgtable_init_fns io_pgtable_arm_v7s_init_fns;
+extern struct io_pgtable_init_fns io_pgtable_arm_mali_lpae_init_fns;
+extern struct io_pgtable_init_fns io_pgtable_amd_iommu_v1_init_fns;
+extern struct io_pgtable_init_fns io_pgtable_amd_iommu_v2_init_fns;
+extern struct io_pgtable_init_fns io_pgtable_apple_dart_init_fns;
+
+#endif /* __IO_PGTABLE_H */
diff --git a/include/linux/io.h b/include/linux/io.h
index 32e30e8fb9db..0642c7ee41db 100644
--- a/include/linux/io.h
+++ b/include/linux/io.h
@@ -1,54 +1,45 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright 2006 PathScale, Inc. All Rights Reserved.
- *
- * This file is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA.
*/
#ifndef _LINUX_IO_H
#define _LINUX_IO_H
+#include <linux/sizes.h>
#include <linux/types.h>
#include <linux/init.h>
-#include <linux/bug.h>
-#include <linux/err.h>
#include <asm/io.h>
#include <asm/page.h>
struct device;
-struct resource;
-__visible void __iowrite32_copy(void __iomem *to, const void *from, size_t count);
+#ifndef __iowrite32_copy
+void __iowrite32_copy(void __iomem *to, const void *from, size_t count);
+#endif
+
void __ioread32_copy(void *to, const void __iomem *from, size_t count);
+
+#ifndef __iowrite64_copy
void __iowrite64_copy(void __iomem *to, const void *from, size_t count);
+#endif
#ifdef CONFIG_MMU
int ioremap_page_range(unsigned long addr, unsigned long end,
phys_addr_t phys_addr, pgprot_t prot);
+int vmap_page_range(unsigned long addr, unsigned long end,
+ phys_addr_t phys_addr, pgprot_t prot);
#else
static inline int ioremap_page_range(unsigned long addr, unsigned long end,
phys_addr_t phys_addr, pgprot_t prot)
{
return 0;
}
-#endif
-
-#ifdef CONFIG_HAVE_ARCH_HUGE_VMAP
-void __init ioremap_huge_init(void);
-int arch_ioremap_pud_supported(void);
-int arch_ioremap_pmd_supported(void);
-#else
-static inline void ioremap_huge_init(void) { }
+static inline int vmap_page_range(unsigned long addr, unsigned long end,
+ phys_addr_t phys_addr, pgprot_t prot)
+{
+ return 0;
+}
#endif
/*
@@ -71,11 +62,9 @@ static inline void devm_ioport_unmap(struct device *dev, void __iomem *addr)
}
#endif
-#define IOMEM_ERR_PTR(err) (__force void __iomem *)ERR_PTR(err)
-
void __iomem *devm_ioremap(struct device *dev, resource_size_t offset,
resource_size_t size);
-void __iomem *devm_ioremap_nocache(struct device *dev, resource_size_t offset,
+void __iomem *devm_ioremap_uc(struct device *dev, resource_size_t offset,
resource_size_t size);
void __iomem *devm_ioremap_wc(struct device *dev, resource_size_t offset,
resource_size_t size);
@@ -88,25 +77,28 @@ void *devm_memremap(struct device *dev, resource_size_t offset,
size_t size, unsigned long flags);
void devm_memunmap(struct device *dev, void *addr);
-void *__devm_memremap_pages(struct device *dev, struct resource *res);
+/* architectures can override this */
+pgprot_t __init early_memremap_pgprot_adjust(resource_size_t phys_addr,
+ unsigned long size, pgprot_t prot);
+
#ifdef CONFIG_PCI
/*
* The PCI specifications (Rev 3.0, 3.2.5 "Transaction Ordering and
- * Posting") mandate non-posted configuration transactions. There is
- * no ioremap API in the kernel that can guarantee non-posted write
- * semantics across arches so provide a default implementation for
- * mapping PCI config space that defaults to ioremap_nocache(); arches
- * should override it if they have memory mapping implementations that
- * guarantee non-posted writes semantics to make the memory mapping
- * compliant with the PCI specification.
+ * Posting") mandate non-posted configuration transactions. This default
+ * implementation attempts to use the ioremap_np() API to provide this
+ * on arches that support it, and falls back to ioremap() on those that
+ * don't. Overriding this function is deprecated; arches that properly
+ * support non-posted accesses should implement ioremap_np() instead, which
+ * this default implementation can then use to return mappings compliant with
+ * the PCI specification.
*/
#ifndef pci_remap_cfgspace
#define pci_remap_cfgspace pci_remap_cfgspace
static inline void __iomem *pci_remap_cfgspace(phys_addr_t offset,
size_t size)
{
- return ioremap_nocache(offset, size);
+ return ioremap_np(offset, size) ?: ioremap(offset, size);
}
#endif
#endif
@@ -152,6 +144,8 @@ static inline int arch_phys_wc_index(int handle)
#endif
#endif
+int devm_arch_phys_wc_add(struct device *dev, unsigned long base, unsigned long size);
+
enum {
/* See memremap() kernel-doc for usage description... */
MEMREMAP_WB = 1 << 0,
@@ -186,4 +180,28 @@ static inline void arch_io_free_memtype_wc(resource_size_t base,
}
#endif
+int devm_arch_io_reserve_memtype_wc(struct device *dev, resource_size_t start,
+ resource_size_t size);
+
+#ifdef CONFIG_STRICT_DEVMEM
+static inline int range_is_allowed(unsigned long pfn, unsigned long size)
+{
+ u64 from = ((u64)pfn) << PAGE_SHIFT;
+ u64 to = from + size;
+ u64 cursor = from;
+
+ while (cursor < to) {
+ if (!devmem_is_allowed(pfn))
+ return 0;
+ cursor += PAGE_SIZE;
+ pfn++;
+ }
+ return 1;
+}
+#else
+static inline int range_is_allowed(unsigned long pfn, unsigned long size)
+{
+ return 1;
+}
+#endif
#endif /* _LINUX_IO_H */
diff --git a/include/linux/io_uring.h b/include/linux/io_uring.h
new file mode 100644
index 000000000000..85fe4e6b275c
--- /dev/null
+++ b/include/linux/io_uring.h
@@ -0,0 +1,51 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+#ifndef _LINUX_IO_URING_H
+#define _LINUX_IO_URING_H
+
+#include <linux/sched.h>
+#include <linux/xarray.h>
+#include <uapi/linux/io_uring.h>
+
+#if defined(CONFIG_IO_URING)
+void __io_uring_cancel(bool cancel_all);
+void __io_uring_free(struct task_struct *tsk);
+void io_uring_unreg_ringfd(void);
+const char *io_uring_get_opcode(u8 opcode);
+bool io_is_uring_fops(struct file *file);
+
+static inline void io_uring_files_cancel(void)
+{
+ if (current->io_uring)
+ __io_uring_cancel(false);
+}
+static inline void io_uring_task_cancel(void)
+{
+ if (current->io_uring)
+ __io_uring_cancel(true);
+}
+static inline void io_uring_free(struct task_struct *tsk)
+{
+ if (tsk->io_uring)
+ __io_uring_free(tsk);
+}
+#else
+static inline void io_uring_task_cancel(void)
+{
+}
+static inline void io_uring_files_cancel(void)
+{
+}
+static inline void io_uring_free(struct task_struct *tsk)
+{
+}
+static inline const char *io_uring_get_opcode(u8 opcode)
+{
+ return "";
+}
+static inline bool io_is_uring_fops(struct file *file)
+{
+ return false;
+}
+#endif
+
+#endif
diff --git a/include/linux/io_uring/cmd.h b/include/linux/io_uring/cmd.h
new file mode 100644
index 000000000000..375fd048c4cb
--- /dev/null
+++ b/include/linux/io_uring/cmd.h
@@ -0,0 +1,184 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+#ifndef _LINUX_IO_URING_CMD_H
+#define _LINUX_IO_URING_CMD_H
+
+#include <uapi/linux/io_uring.h>
+#include <linux/io_uring_types.h>
+#include <linux/blk-mq.h>
+
+/* only top 8 bits of sqe->uring_cmd_flags for kernel internal use */
+#define IORING_URING_CMD_CANCELABLE (1U << 30)
+/* io_uring_cmd is being issued again */
+#define IORING_URING_CMD_REISSUE (1U << 31)
+
+struct io_uring_cmd {
+ struct file *file;
+ const struct io_uring_sqe *sqe;
+ u32 cmd_op;
+ u32 flags;
+ u8 pdu[32]; /* available inline for free use */
+ u8 unused[8];
+};
+
+static inline const void *io_uring_sqe_cmd(const struct io_uring_sqe *sqe)
+{
+ return sqe->cmd;
+}
+
+static inline void io_uring_cmd_private_sz_check(size_t cmd_sz)
+{
+ BUILD_BUG_ON(cmd_sz > sizeof_field(struct io_uring_cmd, pdu));
+}
+#define io_uring_cmd_to_pdu(cmd, pdu_type) ( \
+ io_uring_cmd_private_sz_check(sizeof(pdu_type)), \
+ ((pdu_type *)&(cmd)->pdu) \
+)
+
+#if defined(CONFIG_IO_URING)
+int io_uring_cmd_import_fixed(u64 ubuf, unsigned long len, int rw,
+ struct iov_iter *iter,
+ struct io_uring_cmd *ioucmd,
+ unsigned int issue_flags);
+int io_uring_cmd_import_fixed_vec(struct io_uring_cmd *ioucmd,
+ const struct iovec __user *uvec,
+ size_t uvec_segs,
+ int ddir, struct iov_iter *iter,
+ unsigned issue_flags);
+
+/*
+ * Completes the request, i.e. posts an io_uring CQE and deallocates @ioucmd
+ * and the corresponding io_uring request.
+ *
+ * Note: the caller should never hard code @issue_flags and is only allowed
+ * to pass the mask provided by the core io_uring code.
+ */
+void __io_uring_cmd_done(struct io_uring_cmd *cmd, s32 ret, u64 res2,
+ unsigned issue_flags, bool is_cqe32);
+
+void __io_uring_cmd_do_in_task(struct io_uring_cmd *ioucmd,
+ io_req_tw_func_t task_work_cb,
+ unsigned flags);
+
+/*
+ * Note: the caller should never hard code @issue_flags and only use the
+ * mask provided by the core io_uring code.
+ */
+void io_uring_cmd_mark_cancelable(struct io_uring_cmd *cmd,
+ unsigned int issue_flags);
+
+/* Execute the request from a blocking context */
+void io_uring_cmd_issue_blocking(struct io_uring_cmd *ioucmd);
+
+/*
+ * Select a buffer from the provided buffer group for multishot uring_cmd.
+ * Returns the selected buffer address and size.
+ */
+struct io_br_sel io_uring_cmd_buffer_select(struct io_uring_cmd *ioucmd,
+ unsigned buf_group, size_t *len,
+ unsigned int issue_flags);
+
+/*
+ * Complete a multishot uring_cmd event. This will post a CQE to the completion
+ * queue and update the provided buffer.
+ */
+bool io_uring_mshot_cmd_post_cqe(struct io_uring_cmd *ioucmd,
+ struct io_br_sel *sel, unsigned int issue_flags);
+
+#else
+static inline int
+io_uring_cmd_import_fixed(u64 ubuf, unsigned long len, int rw,
+ struct iov_iter *iter, struct io_uring_cmd *ioucmd,
+ unsigned int issue_flags)
+{
+ return -EOPNOTSUPP;
+}
+static inline int io_uring_cmd_import_fixed_vec(struct io_uring_cmd *ioucmd,
+ const struct iovec __user *uvec,
+ size_t uvec_segs,
+ int ddir, struct iov_iter *iter,
+ unsigned issue_flags)
+{
+ return -EOPNOTSUPP;
+}
+static inline void __io_uring_cmd_done(struct io_uring_cmd *cmd, s32 ret,
+ u64 ret2, unsigned issue_flags, bool is_cqe32)
+{
+}
+static inline void __io_uring_cmd_do_in_task(struct io_uring_cmd *ioucmd,
+ io_req_tw_func_t task_work_cb, unsigned flags)
+{
+}
+static inline void io_uring_cmd_mark_cancelable(struct io_uring_cmd *cmd,
+ unsigned int issue_flags)
+{
+}
+static inline void io_uring_cmd_issue_blocking(struct io_uring_cmd *ioucmd)
+{
+}
+static inline struct io_br_sel
+io_uring_cmd_buffer_select(struct io_uring_cmd *ioucmd, unsigned buf_group,
+ size_t *len, unsigned int issue_flags)
+{
+ return (struct io_br_sel) { .val = -EOPNOTSUPP };
+}
+static inline bool io_uring_mshot_cmd_post_cqe(struct io_uring_cmd *ioucmd,
+ struct io_br_sel *sel, unsigned int issue_flags)
+{
+ return true;
+}
+#endif
+
+static inline struct io_uring_cmd *io_uring_cmd_from_tw(struct io_tw_req tw_req)
+{
+ return io_kiocb_to_cmd(tw_req.req, struct io_uring_cmd);
+}
+
+/* task_work executor checks the deferred list completion */
+#define IO_URING_CMD_TASK_WORK_ISSUE_FLAGS IO_URING_F_COMPLETE_DEFER
+
+/* users must follow the IOU_F_TWQ_LAZY_WAKE semantics */
+static inline void io_uring_cmd_do_in_task_lazy(struct io_uring_cmd *ioucmd,
+ io_req_tw_func_t task_work_cb)
+{
+ __io_uring_cmd_do_in_task(ioucmd, task_work_cb, IOU_F_TWQ_LAZY_WAKE);
+}
+
+static inline void io_uring_cmd_complete_in_task(struct io_uring_cmd *ioucmd,
+ io_req_tw_func_t task_work_cb)
+{
+ __io_uring_cmd_do_in_task(ioucmd, task_work_cb, 0);
+}
+
+static inline struct task_struct *io_uring_cmd_get_task(struct io_uring_cmd *cmd)
+{
+ return cmd_to_io_kiocb(cmd)->tctx->task;
+}
+
+/*
+ * Return uring_cmd's context reference as its context handle for driver to
+ * track per-context resource, such as registered kernel IO buffer
+ */
+static inline void *io_uring_cmd_ctx_handle(struct io_uring_cmd *cmd)
+{
+ return cmd_to_io_kiocb(cmd)->ctx;
+}
+
+static inline void io_uring_cmd_done(struct io_uring_cmd *ioucmd, s32 ret,
+ unsigned issue_flags)
+{
+ return __io_uring_cmd_done(ioucmd, ret, 0, issue_flags, false);
+}
+
+static inline void io_uring_cmd_done32(struct io_uring_cmd *ioucmd, s32 ret,
+ u64 res2, unsigned issue_flags)
+{
+ return __io_uring_cmd_done(ioucmd, ret, res2, issue_flags, true);
+}
+
+int io_buffer_register_bvec(struct io_uring_cmd *cmd, struct request *rq,
+ void (*release)(void *), unsigned int index,
+ unsigned int issue_flags);
+int io_buffer_unregister_bvec(struct io_uring_cmd *cmd, unsigned int index,
+ unsigned int issue_flags);
+
+#endif /* _LINUX_IO_URING_CMD_H */
diff --git a/include/linux/io_uring/net.h b/include/linux/io_uring/net.h
new file mode 100644
index 000000000000..b58f39fed4d5
--- /dev/null
+++ b/include/linux/io_uring/net.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+#ifndef _LINUX_IO_URING_NET_H
+#define _LINUX_IO_URING_NET_H
+
+struct io_uring_cmd;
+
+#if defined(CONFIG_IO_URING)
+int io_uring_cmd_sock(struct io_uring_cmd *cmd, unsigned int issue_flags);
+
+#else
+static inline int io_uring_cmd_sock(struct io_uring_cmd *cmd,
+ unsigned int issue_flags)
+{
+ return -EOPNOTSUPP;
+}
+#endif
+
+#endif
diff --git a/include/linux/io_uring_types.h b/include/linux/io_uring_types.h
new file mode 100644
index 000000000000..e1adb0d20a0a
--- /dev/null
+++ b/include/linux/io_uring_types.h
@@ -0,0 +1,744 @@
+#ifndef IO_URING_TYPES_H
+#define IO_URING_TYPES_H
+
+#include <linux/blkdev.h>
+#include <linux/hashtable.h>
+#include <linux/task_work.h>
+#include <linux/bitmap.h>
+#include <linux/llist.h>
+#include <uapi/linux/io_uring.h>
+
+enum {
+ /*
+ * A hint to not wake right away but delay until there are enough of
+ * tw's queued to match the number of CQEs the task is waiting for.
+ *
+ * Must not be used with requests generating more than one CQE.
+ * It's also ignored unless IORING_SETUP_DEFER_TASKRUN is set.
+ */
+ IOU_F_TWQ_LAZY_WAKE = 1,
+};
+
+enum io_uring_cmd_flags {
+ IO_URING_F_COMPLETE_DEFER = 1,
+ IO_URING_F_UNLOCKED = 2,
+ /* the request is executed from poll, it should not be freed */
+ IO_URING_F_MULTISHOT = 4,
+ /* executed by io-wq */
+ IO_URING_F_IOWQ = 8,
+ /* executed inline from syscall */
+ IO_URING_F_INLINE = 16,
+ /* int's last bit, sign checks are usually faster than a bit test */
+ IO_URING_F_NONBLOCK = INT_MIN,
+
+ /* ctx state flags, for URING_CMD */
+ IO_URING_F_SQE128 = (1 << 8),
+ IO_URING_F_CQE32 = (1 << 9),
+ IO_URING_F_IOPOLL = (1 << 10),
+
+ /* set when uring wants to cancel a previously issued command */
+ IO_URING_F_CANCEL = (1 << 11),
+ IO_URING_F_COMPAT = (1 << 12),
+};
+
+struct io_wq_work_node {
+ struct io_wq_work_node *next;
+};
+
+struct io_wq_work_list {
+ struct io_wq_work_node *first;
+ struct io_wq_work_node *last;
+};
+
+struct io_wq_work {
+ struct io_wq_work_node list;
+ atomic_t flags;
+ /* place it here instead of io_kiocb as it fills padding and saves 4B */
+ int cancel_seq;
+};
+
+struct io_rsrc_data {
+ unsigned int nr;
+ struct io_rsrc_node **nodes;
+};
+
+struct io_file_table {
+ struct io_rsrc_data data;
+ unsigned long *bitmap;
+ unsigned int alloc_hint;
+};
+
+struct io_hash_bucket {
+ struct hlist_head list;
+} ____cacheline_aligned_in_smp;
+
+struct io_hash_table {
+ struct io_hash_bucket *hbs;
+ unsigned hash_bits;
+};
+
+struct io_mapped_region {
+ struct page **pages;
+ void *ptr;
+ unsigned nr_pages;
+ unsigned flags;
+};
+
+/*
+ * Return value from io_buffer_list selection, to avoid stashing it in
+ * struct io_kiocb. For legacy/classic provided buffers, keeping a reference
+ * across execution contexts are fine. But for ring provided buffers, the
+ * list may go away as soon as ->uring_lock is dropped. As the io_kiocb
+ * persists, it's better to just keep the buffer local for those cases.
+ */
+struct io_br_sel {
+ struct io_buffer_list *buf_list;
+ /*
+ * Some selection parts return the user address, others return an error.
+ */
+ union {
+ void __user *addr;
+ ssize_t val;
+ };
+};
+
+
+/*
+ * Arbitrary limit, can be raised if need be
+ */
+#define IO_RINGFD_REG_MAX 16
+
+struct io_uring_task {
+ /* submission side */
+ int cached_refs;
+ const struct io_ring_ctx *last;
+ struct task_struct *task;
+ struct io_wq *io_wq;
+ struct file *registered_rings[IO_RINGFD_REG_MAX];
+
+ struct xarray xa;
+ struct wait_queue_head wait;
+ atomic_t in_cancel;
+ atomic_t inflight_tracked;
+ struct percpu_counter inflight;
+
+ struct { /* task_work */
+ struct llist_head task_list;
+ struct callback_head task_work;
+ } ____cacheline_aligned_in_smp;
+};
+
+struct iou_vec {
+ union {
+ struct iovec *iovec;
+ struct bio_vec *bvec;
+ };
+ unsigned nr; /* number of struct iovec it can hold */
+};
+
+struct io_uring {
+ u32 head;
+ u32 tail;
+};
+
+/*
+ * This data is shared with the application through the mmap at offsets
+ * IORING_OFF_SQ_RING and IORING_OFF_CQ_RING.
+ *
+ * The offsets to the member fields are published through struct
+ * io_sqring_offsets when calling io_uring_setup.
+ */
+struct io_rings {
+ /*
+ * Head and tail offsets into the ring; the offsets need to be
+ * masked to get valid indices.
+ *
+ * The kernel controls head of the sq ring and the tail of the cq ring,
+ * and the application controls tail of the sq ring and the head of the
+ * cq ring.
+ */
+ struct io_uring sq, cq;
+ /*
+ * Bitmasks to apply to head and tail offsets (constant, equals
+ * ring_entries - 1)
+ */
+ u32 sq_ring_mask, cq_ring_mask;
+ /* Ring sizes (constant, power of 2) */
+ u32 sq_ring_entries, cq_ring_entries;
+ /*
+ * Number of invalid entries dropped by the kernel due to
+ * invalid index stored in array
+ *
+ * Written by the kernel, shouldn't be modified by the
+ * application (i.e. get number of "new events" by comparing to
+ * cached value).
+ *
+ * After a new SQ head value was read by the application this
+ * counter includes all submissions that were dropped reaching
+ * the new SQ head (and possibly more).
+ */
+ u32 sq_dropped;
+ /*
+ * Runtime SQ flags
+ *
+ * Written by the kernel, shouldn't be modified by the
+ * application.
+ *
+ * The application needs a full memory barrier before checking
+ * for IORING_SQ_NEED_WAKEUP after updating the sq tail.
+ */
+ atomic_t sq_flags;
+ /*
+ * Runtime CQ flags
+ *
+ * Written by the application, shouldn't be modified by the
+ * kernel.
+ */
+ u32 cq_flags;
+ /*
+ * Number of completion events lost because the queue was full;
+ * this should be avoided by the application by making sure
+ * there are not more requests pending than there is space in
+ * the completion queue.
+ *
+ * Written by the kernel, shouldn't be modified by the
+ * application (i.e. get number of "new events" by comparing to
+ * cached value).
+ *
+ * As completion events come in out of order this counter is not
+ * ordered with any other data.
+ */
+ u32 cq_overflow;
+ /*
+ * Ring buffer of completion events.
+ *
+ * The kernel writes completion events fresh every time they are
+ * produced, so the application is allowed to modify pending
+ * entries.
+ */
+ struct io_uring_cqe cqes[] ____cacheline_aligned_in_smp;
+};
+
+struct io_restriction {
+ DECLARE_BITMAP(register_op, IORING_REGISTER_LAST);
+ DECLARE_BITMAP(sqe_op, IORING_OP_LAST);
+ u8 sqe_flags_allowed;
+ u8 sqe_flags_required;
+ bool registered;
+};
+
+struct io_submit_link {
+ struct io_kiocb *head;
+ struct io_kiocb *last;
+};
+
+struct io_submit_state {
+ /* inline/task_work completion list, under ->uring_lock */
+ struct io_wq_work_node free_list;
+ /* batch completion logic */
+ struct io_wq_work_list compl_reqs;
+ struct io_submit_link link;
+
+ bool plug_started;
+ bool need_plug;
+ bool cq_flush;
+ unsigned short submit_nr;
+ struct blk_plug plug;
+};
+
+struct io_alloc_cache {
+ void **entries;
+ unsigned int nr_cached;
+ unsigned int max_cached;
+ unsigned int elem_size;
+ unsigned int init_clear;
+};
+
+struct io_ring_ctx {
+ /* const or read-mostly hot data */
+ struct {
+ unsigned int flags;
+ unsigned int drain_next: 1;
+ unsigned int restricted: 1;
+ unsigned int off_timeout_used: 1;
+ unsigned int drain_active: 1;
+ unsigned int has_evfd: 1;
+ /* all CQEs should be posted only by the submitter task */
+ unsigned int task_complete: 1;
+ unsigned int lockless_cq: 1;
+ unsigned int syscall_iopoll: 1;
+ unsigned int poll_activated: 1;
+ unsigned int drain_disabled: 1;
+ unsigned int compat: 1;
+ unsigned int iowq_limits_set : 1;
+
+ struct task_struct *submitter_task;
+ struct io_rings *rings;
+ struct percpu_ref refs;
+
+ clockid_t clockid;
+ enum tk_offsets clock_offset;
+
+ enum task_work_notify_mode notify_method;
+ unsigned sq_thread_idle;
+ } ____cacheline_aligned_in_smp;
+
+ /* submission data */
+ struct {
+ struct mutex uring_lock;
+
+ /*
+ * Ring buffer of indices into array of io_uring_sqe, which is
+ * mmapped by the application using the IORING_OFF_SQES offset.
+ *
+ * This indirection could e.g. be used to assign fixed
+ * io_uring_sqe entries to operations and only submit them to
+ * the queue when needed.
+ *
+ * The kernel modifies neither the indices array nor the entries
+ * array.
+ */
+ u32 *sq_array;
+ struct io_uring_sqe *sq_sqes;
+ unsigned cached_sq_head;
+ unsigned sq_entries;
+
+ /*
+ * Fixed resources fast path, should be accessed only under
+ * uring_lock, and updated through io_uring_register(2)
+ */
+ atomic_t cancel_seq;
+
+ /*
+ * ->iopoll_list is protected by the ctx->uring_lock for
+ * io_uring instances that don't use IORING_SETUP_SQPOLL.
+ * For SQPOLL, only the single threaded io_sq_thread() will
+ * manipulate the list, hence no extra locking is needed there.
+ */
+ bool poll_multi_queue;
+ struct io_wq_work_list iopoll_list;
+
+ struct io_file_table file_table;
+ struct io_rsrc_data buf_table;
+ struct io_alloc_cache node_cache;
+ struct io_alloc_cache imu_cache;
+
+ struct io_submit_state submit_state;
+
+ /*
+ * Modifications are protected by ->uring_lock and ->mmap_lock.
+ * The buffer list's io mapped region should be stable once
+ * published.
+ */
+ struct xarray io_bl_xa;
+
+ struct io_hash_table cancel_table;
+ struct io_alloc_cache apoll_cache;
+ struct io_alloc_cache netmsg_cache;
+ struct io_alloc_cache rw_cache;
+ struct io_alloc_cache cmd_cache;
+
+ /*
+ * Any cancelable uring_cmd is added to this list in
+ * ->uring_cmd() by io_uring_cmd_insert_cancelable()
+ */
+ struct hlist_head cancelable_uring_cmd;
+ /*
+ * For Hybrid IOPOLL, runtime in hybrid polling, without
+ * scheduling time
+ */
+ u64 hybrid_poll_time;
+ } ____cacheline_aligned_in_smp;
+
+ struct {
+ /*
+ * We cache a range of free CQEs we can use, once exhausted it
+ * should go through a slower range setup, see __io_get_cqe()
+ */
+ struct io_uring_cqe *cqe_cached;
+ struct io_uring_cqe *cqe_sentinel;
+
+ unsigned cached_cq_tail;
+ unsigned cq_entries;
+ struct io_ev_fd __rcu *io_ev_fd;
+
+ void *cq_wait_arg;
+ size_t cq_wait_size;
+ } ____cacheline_aligned_in_smp;
+
+ /*
+ * task_work and async notification delivery cacheline. Expected to
+ * regularly bounce b/w CPUs.
+ */
+ struct {
+ struct llist_head work_llist;
+ struct llist_head retry_llist;
+ unsigned long check_cq;
+ atomic_t cq_wait_nr;
+ atomic_t cq_timeouts;
+ struct wait_queue_head cq_wait;
+ } ____cacheline_aligned_in_smp;
+
+ /* timeouts */
+ struct {
+ raw_spinlock_t timeout_lock;
+ struct list_head timeout_list;
+ struct list_head ltimeout_list;
+ unsigned cq_last_tm_flush;
+ } ____cacheline_aligned_in_smp;
+
+ spinlock_t completion_lock;
+
+ struct list_head cq_overflow_list;
+
+ struct hlist_head waitid_list;
+
+#ifdef CONFIG_FUTEX
+ struct hlist_head futex_list;
+ struct io_alloc_cache futex_cache;
+#endif
+
+ const struct cred *sq_creds; /* cred used for __io_sq_thread() */
+ struct io_sq_data *sq_data; /* if using sq thread polling */
+
+ struct wait_queue_head sqo_sq_wait;
+ struct list_head sqd_list;
+
+ unsigned int file_alloc_start;
+ unsigned int file_alloc_end;
+
+ /* Keep this last, we don't need it for the fast path */
+ struct wait_queue_head poll_wq;
+ struct io_restriction restrictions;
+
+ /* Stores zcrx object pointers of type struct io_zcrx_ifq */
+ struct xarray zcrx_ctxs;
+
+ u32 pers_next;
+ struct xarray personalities;
+
+ /* hashed buffered write serialization */
+ struct io_wq_hash *hash_map;
+
+ /* Only used for accounting purposes */
+ struct user_struct *user;
+ struct mm_struct *mm_account;
+
+ /* ctx exit and cancelation */
+ struct llist_head fallback_llist;
+ struct delayed_work fallback_work;
+ struct work_struct exit_work;
+ struct list_head tctx_list;
+ struct completion ref_comp;
+
+ /* io-wq management, e.g. thread count */
+ u32 iowq_limits[2];
+
+ struct callback_head poll_wq_task_work;
+ struct list_head defer_list;
+ unsigned nr_drained;
+
+#ifdef CONFIG_NET_RX_BUSY_POLL
+ struct list_head napi_list; /* track busy poll napi_id */
+ spinlock_t napi_lock; /* napi_list lock */
+
+ /* napi busy poll default timeout */
+ ktime_t napi_busy_poll_dt;
+ bool napi_prefer_busy_poll;
+ u8 napi_track_mode;
+
+ DECLARE_HASHTABLE(napi_ht, 4);
+#endif
+
+ /* protected by ->completion_lock */
+ unsigned evfd_last_cq_tail;
+ unsigned nr_req_allocated;
+
+ /*
+ * Protection for resize vs mmap races - both the mmap and resize
+ * side will need to grab this lock, to prevent either side from
+ * being run concurrently with the other.
+ */
+ struct mutex mmap_lock;
+
+ struct io_mapped_region sq_region;
+ struct io_mapped_region ring_region;
+ /* used for optimised request parameter and wait argument passing */
+ struct io_mapped_region param_region;
+};
+
+/*
+ * Token indicating function is called in task work context:
+ * ctx->uring_lock is held and any completions generated will be flushed.
+ * ONLY core io_uring.c should instantiate this struct.
+ */
+struct io_tw_state {
+ bool cancel;
+};
+/* Alias to use in code that doesn't instantiate struct io_tw_state */
+typedef struct io_tw_state io_tw_token_t;
+
+enum {
+ REQ_F_FIXED_FILE_BIT = IOSQE_FIXED_FILE_BIT,
+ REQ_F_IO_DRAIN_BIT = IOSQE_IO_DRAIN_BIT,
+ REQ_F_LINK_BIT = IOSQE_IO_LINK_BIT,
+ REQ_F_HARDLINK_BIT = IOSQE_IO_HARDLINK_BIT,
+ REQ_F_FORCE_ASYNC_BIT = IOSQE_ASYNC_BIT,
+ REQ_F_BUFFER_SELECT_BIT = IOSQE_BUFFER_SELECT_BIT,
+ REQ_F_CQE_SKIP_BIT = IOSQE_CQE_SKIP_SUCCESS_BIT,
+
+ /* first byte is taken by user flags, shift it to not overlap */
+ REQ_F_FAIL_BIT = 8,
+ REQ_F_INFLIGHT_BIT,
+ REQ_F_CUR_POS_BIT,
+ REQ_F_NOWAIT_BIT,
+ REQ_F_LINK_TIMEOUT_BIT,
+ REQ_F_NEED_CLEANUP_BIT,
+ REQ_F_POLLED_BIT,
+ REQ_F_HYBRID_IOPOLL_STATE_BIT,
+ REQ_F_BUFFER_SELECTED_BIT,
+ REQ_F_BUFFER_RING_BIT,
+ REQ_F_REISSUE_BIT,
+ REQ_F_CREDS_BIT,
+ REQ_F_REFCOUNT_BIT,
+ REQ_F_ARM_LTIMEOUT_BIT,
+ REQ_F_ASYNC_DATA_BIT,
+ REQ_F_SKIP_LINK_CQES_BIT,
+ REQ_F_SINGLE_POLL_BIT,
+ REQ_F_DOUBLE_POLL_BIT,
+ REQ_F_MULTISHOT_BIT,
+ REQ_F_APOLL_MULTISHOT_BIT,
+ REQ_F_CLEAR_POLLIN_BIT,
+ /* keep async read/write and isreg together and in order */
+ REQ_F_SUPPORT_NOWAIT_BIT,
+ REQ_F_ISREG_BIT,
+ REQ_F_POLL_NO_LAZY_BIT,
+ REQ_F_CAN_POLL_BIT,
+ REQ_F_BL_EMPTY_BIT,
+ REQ_F_BL_NO_RECYCLE_BIT,
+ REQ_F_BUFFERS_COMMIT_BIT,
+ REQ_F_BUF_NODE_BIT,
+ REQ_F_HAS_METADATA_BIT,
+ REQ_F_IMPORT_BUFFER_BIT,
+ REQ_F_SQE_COPIED_BIT,
+
+ /* not a real bit, just to check we're not overflowing the space */
+ __REQ_F_LAST_BIT,
+};
+
+typedef u64 __bitwise io_req_flags_t;
+#define IO_REQ_FLAG(bitno) ((__force io_req_flags_t) BIT_ULL((bitno)))
+
+enum {
+ /* ctx owns file */
+ REQ_F_FIXED_FILE = IO_REQ_FLAG(REQ_F_FIXED_FILE_BIT),
+ /* drain existing IO first */
+ REQ_F_IO_DRAIN = IO_REQ_FLAG(REQ_F_IO_DRAIN_BIT),
+ /* linked sqes */
+ REQ_F_LINK = IO_REQ_FLAG(REQ_F_LINK_BIT),
+ /* doesn't sever on completion < 0 */
+ REQ_F_HARDLINK = IO_REQ_FLAG(REQ_F_HARDLINK_BIT),
+ /* IOSQE_ASYNC */
+ REQ_F_FORCE_ASYNC = IO_REQ_FLAG(REQ_F_FORCE_ASYNC_BIT),
+ /* IOSQE_BUFFER_SELECT */
+ REQ_F_BUFFER_SELECT = IO_REQ_FLAG(REQ_F_BUFFER_SELECT_BIT),
+ /* IOSQE_CQE_SKIP_SUCCESS */
+ REQ_F_CQE_SKIP = IO_REQ_FLAG(REQ_F_CQE_SKIP_BIT),
+
+ /* fail rest of links */
+ REQ_F_FAIL = IO_REQ_FLAG(REQ_F_FAIL_BIT),
+ /* on inflight list, should be cancelled and waited on exit reliably */
+ REQ_F_INFLIGHT = IO_REQ_FLAG(REQ_F_INFLIGHT_BIT),
+ /* read/write uses file position */
+ REQ_F_CUR_POS = IO_REQ_FLAG(REQ_F_CUR_POS_BIT),
+ /* must not punt to workers */
+ REQ_F_NOWAIT = IO_REQ_FLAG(REQ_F_NOWAIT_BIT),
+ /* has or had linked timeout */
+ REQ_F_LINK_TIMEOUT = IO_REQ_FLAG(REQ_F_LINK_TIMEOUT_BIT),
+ /* needs cleanup */
+ REQ_F_NEED_CLEANUP = IO_REQ_FLAG(REQ_F_NEED_CLEANUP_BIT),
+ /* already went through poll handler */
+ REQ_F_POLLED = IO_REQ_FLAG(REQ_F_POLLED_BIT),
+ /* every req only blocks once in hybrid poll */
+ REQ_F_IOPOLL_STATE = IO_REQ_FLAG(REQ_F_HYBRID_IOPOLL_STATE_BIT),
+ /* buffer already selected */
+ REQ_F_BUFFER_SELECTED = IO_REQ_FLAG(REQ_F_BUFFER_SELECTED_BIT),
+ /* buffer selected from ring, needs commit */
+ REQ_F_BUFFER_RING = IO_REQ_FLAG(REQ_F_BUFFER_RING_BIT),
+ /* caller should reissue async */
+ REQ_F_REISSUE = IO_REQ_FLAG(REQ_F_REISSUE_BIT),
+ /* supports async reads/writes */
+ REQ_F_SUPPORT_NOWAIT = IO_REQ_FLAG(REQ_F_SUPPORT_NOWAIT_BIT),
+ /* regular file */
+ REQ_F_ISREG = IO_REQ_FLAG(REQ_F_ISREG_BIT),
+ /* has creds assigned */
+ REQ_F_CREDS = IO_REQ_FLAG(REQ_F_CREDS_BIT),
+ /* skip refcounting if not set */
+ REQ_F_REFCOUNT = IO_REQ_FLAG(REQ_F_REFCOUNT_BIT),
+ /* there is a linked timeout that has to be armed */
+ REQ_F_ARM_LTIMEOUT = IO_REQ_FLAG(REQ_F_ARM_LTIMEOUT_BIT),
+ /* ->async_data allocated */
+ REQ_F_ASYNC_DATA = IO_REQ_FLAG(REQ_F_ASYNC_DATA_BIT),
+ /* don't post CQEs while failing linked requests */
+ REQ_F_SKIP_LINK_CQES = IO_REQ_FLAG(REQ_F_SKIP_LINK_CQES_BIT),
+ /* single poll may be active */
+ REQ_F_SINGLE_POLL = IO_REQ_FLAG(REQ_F_SINGLE_POLL_BIT),
+ /* double poll may active */
+ REQ_F_DOUBLE_POLL = IO_REQ_FLAG(REQ_F_DOUBLE_POLL_BIT),
+ /* request posts multiple completions, should be set at prep time */
+ REQ_F_MULTISHOT = IO_REQ_FLAG(REQ_F_MULTISHOT_BIT),
+ /* fast poll multishot mode */
+ REQ_F_APOLL_MULTISHOT = IO_REQ_FLAG(REQ_F_APOLL_MULTISHOT_BIT),
+ /* recvmsg special flag, clear EPOLLIN */
+ REQ_F_CLEAR_POLLIN = IO_REQ_FLAG(REQ_F_CLEAR_POLLIN_BIT),
+ /* don't use lazy poll wake for this request */
+ REQ_F_POLL_NO_LAZY = IO_REQ_FLAG(REQ_F_POLL_NO_LAZY_BIT),
+ /* file is pollable */
+ REQ_F_CAN_POLL = IO_REQ_FLAG(REQ_F_CAN_POLL_BIT),
+ /* buffer list was empty after selection of buffer */
+ REQ_F_BL_EMPTY = IO_REQ_FLAG(REQ_F_BL_EMPTY_BIT),
+ /* don't recycle provided buffers for this request */
+ REQ_F_BL_NO_RECYCLE = IO_REQ_FLAG(REQ_F_BL_NO_RECYCLE_BIT),
+ /* buffer ring head needs incrementing on put */
+ REQ_F_BUFFERS_COMMIT = IO_REQ_FLAG(REQ_F_BUFFERS_COMMIT_BIT),
+ /* buf node is valid */
+ REQ_F_BUF_NODE = IO_REQ_FLAG(REQ_F_BUF_NODE_BIT),
+ /* request has read/write metadata assigned */
+ REQ_F_HAS_METADATA = IO_REQ_FLAG(REQ_F_HAS_METADATA_BIT),
+ /*
+ * For vectored fixed buffers, resolve iovec to registered buffers.
+ * For SEND_ZC, whether to import buffers (i.e. the first issue).
+ */
+ REQ_F_IMPORT_BUFFER = IO_REQ_FLAG(REQ_F_IMPORT_BUFFER_BIT),
+ /* ->sqe_copy() has been called, if necessary */
+ REQ_F_SQE_COPIED = IO_REQ_FLAG(REQ_F_SQE_COPIED_BIT),
+};
+
+struct io_tw_req {
+ struct io_kiocb *req;
+};
+
+typedef void (*io_req_tw_func_t)(struct io_tw_req tw_req, io_tw_token_t tw);
+
+struct io_task_work {
+ struct llist_node node;
+ io_req_tw_func_t func;
+};
+
+struct io_cqe {
+ __u64 user_data;
+ __s32 res;
+ /* fd initially, then cflags for completion */
+ union {
+ __u32 flags;
+ int fd;
+ };
+};
+
+/*
+ * Each request type overlays its private data structure on top of this one.
+ * They must not exceed this one in size.
+ */
+struct io_cmd_data {
+ struct file *file;
+ /* each command gets 56 bytes of data */
+ __u8 data[56];
+};
+
+static inline void io_kiocb_cmd_sz_check(size_t cmd_sz)
+{
+ BUILD_BUG_ON(cmd_sz > sizeof(struct io_cmd_data));
+}
+#define io_kiocb_to_cmd(req, cmd_type) ( \
+ io_kiocb_cmd_sz_check(sizeof(cmd_type)) , \
+ ((cmd_type *)&(req)->cmd) \
+)
+
+static inline struct io_kiocb *cmd_to_io_kiocb(void *ptr)
+{
+ return ptr;
+}
+
+struct io_kiocb {
+ union {
+ /*
+ * NOTE! Each of the io_kiocb union members has the file pointer
+ * as the first entry in their struct definition. So you can
+ * access the file pointer through any of the sub-structs,
+ * or directly as just 'file' in this struct.
+ */
+ struct file *file;
+ struct io_cmd_data cmd;
+ };
+
+ u8 opcode;
+ /* polled IO has completed */
+ u8 iopoll_completed;
+ /*
+ * Can be either a fixed buffer index, or used with provided buffers.
+ * For the latter, it points to the selected buffer ID.
+ */
+ u16 buf_index;
+
+ unsigned nr_tw;
+
+ /* REQ_F_* flags */
+ io_req_flags_t flags;
+
+ struct io_cqe cqe;
+
+ struct io_ring_ctx *ctx;
+ struct io_uring_task *tctx;
+
+ union {
+ /* stores selected buf, valid IFF REQ_F_BUFFER_SELECTED is set */
+ struct io_buffer *kbuf;
+
+ struct io_rsrc_node *buf_node;
+ };
+
+ union {
+ /* used by request caches, completion batching and iopoll */
+ struct io_wq_work_node comp_list;
+ /* cache ->apoll->events */
+ __poll_t apoll_events;
+ };
+
+ struct io_rsrc_node *file_node;
+
+ atomic_t refs;
+ bool cancel_seq_set;
+ struct io_task_work io_task_work;
+ union {
+ /*
+ * for polled requests, i.e. IORING_OP_POLL_ADD and async armed
+ * poll
+ */
+ struct hlist_node hash_node;
+ /* For IOPOLL setup queues, with hybrid polling */
+ u64 iopoll_start;
+ /* for private io_kiocb freeing */
+ struct rcu_head rcu_head;
+ };
+ /* internal polling, see IORING_FEAT_FAST_POLL */
+ struct async_poll *apoll;
+ /* opcode allocated if it needs to store data for async defer */
+ void *async_data;
+ /* linked requests, IFF REQ_F_HARDLINK or REQ_F_LINK are set */
+ atomic_t poll_refs;
+ struct io_kiocb *link;
+ /* custom credentials, valid IFF REQ_F_CREDS is set */
+ const struct cred *creds;
+ struct io_wq_work work;
+
+ struct io_big_cqe {
+ u64 extra1;
+ u64 extra2;
+ } big_cqe;
+};
+
+struct io_overflow_cqe {
+ struct list_head list;
+ struct io_uring_cqe cqe;
+};
+#endif
diff --git a/include/linux/ioam6.h b/include/linux/ioam6.h
new file mode 100644
index 000000000000..94a24b36998f
--- /dev/null
+++ b/include/linux/ioam6.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * IPv6 IOAM
+ *
+ * Author:
+ * Justin Iurman <justin.iurman@uliege.be>
+ */
+#ifndef _LINUX_IOAM6_H
+#define _LINUX_IOAM6_H
+
+#include <uapi/linux/ioam6.h>
+
+#endif /* _LINUX_IOAM6_H */
diff --git a/include/linux/ioam6_genl.h b/include/linux/ioam6_genl.h
new file mode 100644
index 000000000000..176e67919de3
--- /dev/null
+++ b/include/linux/ioam6_genl.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * IPv6 IOAM Generic Netlink API
+ *
+ * Author:
+ * Justin Iurman <justin.iurman@uliege.be>
+ */
+#ifndef _LINUX_IOAM6_GENL_H
+#define _LINUX_IOAM6_GENL_H
+
+#include <uapi/linux/ioam6_genl.h>
+
+#endif /* _LINUX_IOAM6_GENL_H */
diff --git a/include/linux/ioam6_iptunnel.h b/include/linux/ioam6_iptunnel.h
new file mode 100644
index 000000000000..07d9dfedd29d
--- /dev/null
+++ b/include/linux/ioam6_iptunnel.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * IPv6 IOAM Lightweight Tunnel API
+ *
+ * Author:
+ * Justin Iurman <justin.iurman@uliege.be>
+ */
+#ifndef _LINUX_IOAM6_IPTUNNEL_H
+#define _LINUX_IOAM6_IPTUNNEL_H
+
+#include <uapi/linux/ioam6_iptunnel.h>
+
+#endif /* _LINUX_IOAM6_IPTUNNEL_H */
diff --git a/include/linux/ioc3.h b/include/linux/ioc3.h
deleted file mode 100644
index 38b286e9a46c..000000000000
--- a/include/linux/ioc3.h
+++ /dev/null
@@ -1,93 +0,0 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (c) 2005 Stanislaw Skowronek <skylark@linux-mips.org>
- */
-
-#ifndef _LINUX_IOC3_H
-#define _LINUX_IOC3_H
-
-#include <asm/sn/ioc3.h>
-
-#define IOC3_MAX_SUBMODULES 32
-
-#define IOC3_CLASS_NONE 0
-#define IOC3_CLASS_BASE_IP27 1
-#define IOC3_CLASS_BASE_IP30 2
-#define IOC3_CLASS_MENET_123 3
-#define IOC3_CLASS_MENET_4 4
-#define IOC3_CLASS_CADDUO 5
-#define IOC3_CLASS_SERIAL 6
-
-/* One of these per IOC3 */
-struct ioc3_driver_data {
- struct list_head list;
- int id; /* IOC3 sequence number */
- /* PCI mapping */
- unsigned long pma; /* physical address */
- struct ioc3 __iomem *vma; /* pointer to registers */
- struct pci_dev *pdev; /* PCI device */
- /* IRQ stuff */
- int dual_irq; /* set if separate IRQs are used */
- int irq_io, irq_eth; /* IRQ numbers */
- /* GPIO magic */
- spinlock_t gpio_lock;
- unsigned int gpdr_shadow;
- /* NIC identifiers */
- char nic_part[32];
- char nic_serial[16];
- char nic_mac[6];
- /* submodule set */
- int class;
- void *data[IOC3_MAX_SUBMODULES]; /* for submodule use */
- int active[IOC3_MAX_SUBMODULES]; /* set if probe succeeds */
- /* is_ir_lock must be held while
- * modifying sio_ie values, so
- * we can be sure that sio_ie is
- * not changing when we read it
- * along with sio_ir.
- */
- spinlock_t ir_lock; /* SIO_IE[SC] mod lock */
-};
-
-/* One per submodule */
-struct ioc3_submodule {
- char *name; /* descriptive submodule name */
- struct module *owner; /* owning kernel module */
- int ethernet; /* set for ethernet drivers */
- int (*probe) (struct ioc3_submodule *, struct ioc3_driver_data *);
- int (*remove) (struct ioc3_submodule *, struct ioc3_driver_data *);
- int id; /* assigned by IOC3, index for the "data" array */
- /* IRQ stuff */
- unsigned int irq_mask; /* IOC3 IRQ mask, leave clear for Ethernet */
- int reset_mask; /* non-zero if you want the ioc3.c module to reset interrupts */
- int (*intr) (struct ioc3_submodule *, struct ioc3_driver_data *, unsigned int);
- /* private submodule data */
- void *data; /* assigned by submodule */
-};
-
-/**********************************
- * Functions needed by submodules *
- **********************************/
-
-#define IOC3_W_IES 0
-#define IOC3_W_IEC 1
-
-/* registers a submodule for all existing and future IOC3 chips */
-extern int ioc3_register_submodule(struct ioc3_submodule *);
-/* unregisters a submodule */
-extern void ioc3_unregister_submodule(struct ioc3_submodule *);
-/* enables IRQs indicated by irq_mask for a specified IOC3 chip */
-extern void ioc3_enable(struct ioc3_submodule *, struct ioc3_driver_data *, unsigned int);
-/* ackowledges specified IRQs */
-extern void ioc3_ack(struct ioc3_submodule *, struct ioc3_driver_data *, unsigned int);
-/* disables IRQs indicated by irq_mask for a specified IOC3 chip */
-extern void ioc3_disable(struct ioc3_submodule *, struct ioc3_driver_data *, unsigned int);
-/* atomically sets GPCR bits */
-extern void ioc3_gpcr_set(struct ioc3_driver_data *, unsigned int);
-/* general ireg writer */
-extern void ioc3_write_ireg(struct ioc3_driver_data *idd, uint32_t value, int reg);
-
-#endif
diff --git a/include/linux/ioc4.h b/include/linux/ioc4.h
deleted file mode 100644
index 51e2b9fb6372..000000000000
--- a/include/linux/ioc4.h
+++ /dev/null
@@ -1,184 +0,0 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (c) 2005 Silicon Graphics, Inc. All Rights Reserved.
- */
-
-#ifndef _LINUX_IOC4_H
-#define _LINUX_IOC4_H
-
-#include <linux/interrupt.h>
-
-/***************
- * Definitions *
- ***************/
-
-/* Miscellaneous values inherent to hardware */
-
-#define IOC4_EXTINT_COUNT_DIVISOR 520 /* PCI clocks per COUNT tick */
-
-/***********************************
- * Structures needed by subdrivers *
- ***********************************/
-
-/* This structure fully describes the IOC4 miscellaneous registers which
- * appear at bar[0]+0x00000 through bar[0]+0x0005c. The corresponding
- * PCI resource is managed by the main IOC4 driver because it contains
- * registers of interest to many different IOC4 subdrivers.
- */
-struct ioc4_misc_regs {
- /* Miscellaneous IOC4 registers */
- union ioc4_pci_err_addr_l {
- uint32_t raw;
- struct {
- uint32_t valid:1; /* Address captured */
- uint32_t master_id:4; /* Unit causing error
- * 0/1: Serial port 0 TX/RX
- * 2/3: Serial port 1 TX/RX
- * 4/5: Serial port 2 TX/RX
- * 6/7: Serial port 3 TX/RX
- * 8: ATA/ATAPI
- * 9-15: Undefined
- */
- uint32_t mul_err:1; /* Multiple errors occurred */
- uint32_t addr:26; /* Bits 31-6 of error addr */
- } fields;
- } pci_err_addr_l;
- uint32_t pci_err_addr_h; /* Bits 63-32 of error addr */
- union ioc4_sio_int {
- uint32_t raw;
- struct {
- uint8_t tx_mt:1; /* TX ring buffer empty */
- uint8_t rx_full:1; /* RX ring buffer full */
- uint8_t rx_high:1; /* RX high-water exceeded */
- uint8_t rx_timer:1; /* RX timer has triggered */
- uint8_t delta_dcd:1; /* DELTA_DCD seen */
- uint8_t delta_cts:1; /* DELTA_CTS seen */
- uint8_t intr_pass:1; /* Interrupt pass-through */
- uint8_t tx_explicit:1; /* TX, MCW, or delay complete */
- } fields[4];
- } sio_ir; /* Serial interrupt state */
- union ioc4_other_int {
- uint32_t raw;
- struct {
- uint32_t ata_int:1; /* ATA port passthru */
- uint32_t ata_memerr:1; /* ATA halted by mem error */
- uint32_t memerr:4; /* Serial halted by mem err */
- uint32_t kbd_int:1; /* kbd/mouse intr asserted */
- uint32_t reserved:16; /* zero */
- uint32_t rt_int:1; /* INT_OUT section latch */
- uint32_t gen_int:8; /* Intr. from generic pins */
- } fields;
- } other_ir; /* Other interrupt state */
- union ioc4_sio_int sio_ies; /* Serial interrupt enable set */
- union ioc4_other_int other_ies; /* Other interrupt enable set */
- union ioc4_sio_int sio_iec; /* Serial interrupt enable clear */
- union ioc4_other_int other_iec; /* Other interrupt enable clear */
- union ioc4_sio_cr {
- uint32_t raw;
- struct {
- uint32_t cmd_pulse:4; /* Bytebus strobe width */
- uint32_t arb_diag:3; /* PCI bus requester */
- uint32_t sio_diag_idle:1; /* Active ser req? */
- uint32_t ata_diag_idle:1; /* Active ATA req? */
- uint32_t ata_diag_active:1; /* ATA req is winner */
- uint32_t reserved:22; /* zero */
- } fields;
- } sio_cr;
- uint32_t unused1;
- union ioc4_int_out {
- uint32_t raw;
- struct {
- uint32_t count:16; /* Period control */
- uint32_t mode:3; /* Output signal shape */
- uint32_t reserved:11; /* zero */
- uint32_t diag:1; /* Timebase control */
- uint32_t int_out:1; /* Current value */
- } fields;
- } int_out; /* External interrupt output control */
- uint32_t unused2;
- union ioc4_gpcr {
- uint32_t raw;
- struct {
- uint32_t dir:8; /* Pin direction */
- uint32_t edge:8; /* Edge/level mode */
- uint32_t reserved1:4; /* zero */
- uint32_t int_out_en:1; /* INT_OUT enable */
- uint32_t reserved2:11; /* zero */
- } fields;
- } gpcr_s; /* Generic PIO control set */
- union ioc4_gpcr gpcr_c; /* Generic PIO control clear */
- union ioc4_gpdr {
- uint32_t raw;
- struct {
- uint32_t gen_pin:8; /* State of pins */
- uint32_t reserved:24;
- } fields;
- } gpdr; /* Generic PIO data */
- uint32_t unused3;
- union ioc4_gppr {
- uint32_t raw;
- struct {
- uint32_t gen_pin:1; /* Single pin state */
- uint32_t reserved:31;
- } fields;
- } gppr[8]; /* Generic PIO pins */
-};
-
-/* Masks for GPCR DIR pins */
-#define IOC4_GPCR_DIR_0 0x01 /* External interrupt output */
-#define IOC4_GPCR_DIR_1 0x02 /* External interrupt input */
-#define IOC4_GPCR_DIR_2 0x04
-#define IOC4_GPCR_DIR_3 0x08 /* Keyboard/mouse presence */
-#define IOC4_GPCR_DIR_4 0x10 /* Ser. port 0 xcvr select (0=232, 1=422) */
-#define IOC4_GPCR_DIR_5 0x20 /* Ser. port 1 xcvr select (0=232, 1=422) */
-#define IOC4_GPCR_DIR_6 0x40 /* Ser. port 2 xcvr select (0=232, 1=422) */
-#define IOC4_GPCR_DIR_7 0x80 /* Ser. port 3 xcvr select (0=232, 1=422) */
-
-/* Masks for GPCR EDGE pins */
-#define IOC4_GPCR_EDGE_0 0x01
-#define IOC4_GPCR_EDGE_1 0x02 /* External interrupt input */
-#define IOC4_GPCR_EDGE_2 0x04
-#define IOC4_GPCR_EDGE_3 0x08
-#define IOC4_GPCR_EDGE_4 0x10
-#define IOC4_GPCR_EDGE_5 0x20
-#define IOC4_GPCR_EDGE_6 0x40
-#define IOC4_GPCR_EDGE_7 0x80
-
-#define IOC4_VARIANT_IO9 0x0900
-#define IOC4_VARIANT_PCI_RT 0x0901
-#define IOC4_VARIANT_IO10 0x1000
-
-/* One of these per IOC4 */
-struct ioc4_driver_data {
- struct list_head idd_list;
- unsigned long idd_bar0;
- struct pci_dev *idd_pdev;
- const struct pci_device_id *idd_pci_id;
- struct ioc4_misc_regs __iomem *idd_misc_regs;
- unsigned long count_period;
- void *idd_serial_data;
- unsigned int idd_variant;
-};
-
-/* One per submodule */
-struct ioc4_submodule {
- struct list_head is_list;
- char *is_name;
- struct module *is_owner;
- int (*is_probe) (struct ioc4_driver_data *);
- int (*is_remove) (struct ioc4_driver_data *);
-};
-
-#define IOC4_NUM_CARDS 8 /* max cards per partition */
-
-/**********************************
- * Functions needed by submodules *
- **********************************/
-
-extern int ioc4_register_submodule(struct ioc4_submodule *);
-extern void ioc4_unregister_submodule(struct ioc4_submodule *);
-
-#endif /* _LINUX_IOC4_H */
diff --git a/include/linux/iocontext.h b/include/linux/iocontext.h
index df38db2ef45b..079d8773790c 100644
--- a/include/linux/iocontext.h
+++ b/include/linux/iocontext.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef IOCONTEXT_H
#define IOCONTEXT_H
@@ -7,6 +8,7 @@
enum {
ICQ_EXITED = 1 << 2,
+ ICQ_DESTROYED = 1 << 3,
};
/*
@@ -97,61 +99,40 @@ struct io_cq {
struct io_context {
atomic_long_t refcount;
atomic_t active_ref;
- atomic_t nr_tasks;
-
- /* all the fields below are protected by this lock */
- spinlock_t lock;
unsigned short ioprio;
- /*
- * For request batching
- */
- int nr_batch_requests; /* Number of requests left in the batch */
- unsigned long last_waited; /* Time last woken after wait for request */
+#ifdef CONFIG_BLK_ICQ
+ /* all the fields below are protected by this lock */
+ spinlock_t lock;
struct radix_tree_root icq_tree;
struct io_cq __rcu *icq_hint;
struct hlist_head icq_list;
struct work_struct release_work;
+#endif /* CONFIG_BLK_ICQ */
};
-/**
- * get_io_context_active - get active reference on ioc
- * @ioc: ioc of interest
- *
- * Only iocs with active reference can issue new IOs. This function
- * acquires an active reference on @ioc. The caller must already have an
- * active reference on @ioc.
- */
-static inline void get_io_context_active(struct io_context *ioc)
-{
- WARN_ON_ONCE(atomic_long_read(&ioc->refcount) <= 0);
- WARN_ON_ONCE(atomic_read(&ioc->active_ref) <= 0);
- atomic_long_inc(&ioc->refcount);
- atomic_inc(&ioc->active_ref);
-}
-
-static inline void ioc_task_link(struct io_context *ioc)
-{
- get_io_context_active(ioc);
-
- WARN_ON_ONCE(atomic_read(&ioc->nr_tasks) <= 0);
- atomic_inc(&ioc->nr_tasks);
-}
-
struct task_struct;
#ifdef CONFIG_BLOCK
void put_io_context(struct io_context *ioc);
-void put_io_context_active(struct io_context *ioc);
void exit_io_context(struct task_struct *task);
-struct io_context *get_task_io_context(struct task_struct *task,
- gfp_t gfp_flags, int node);
+int __copy_io(u64 clone_flags, struct task_struct *tsk);
+static inline int copy_io(u64 clone_flags, struct task_struct *tsk)
+{
+ if (!current->io_context)
+ return 0;
+ return __copy_io(clone_flags, tsk);
+}
#else
struct io_context;
static inline void put_io_context(struct io_context *ioc) { }
static inline void exit_io_context(struct task_struct *task) { }
-#endif
+static inline int copy_io(u64 clone_flags, struct task_struct *tsk)
+{
+ return 0;
+}
+#endif /* CONFIG_BLOCK */
-#endif
+#endif /* IOCONTEXT_H */
diff --git a/include/linux/iomap.h b/include/linux/iomap.h
index f64dc6ce5161..520e967cb501 100644
--- a/include/linux/iomap.h
+++ b/include/linux/iomap.h
@@ -1,47 +1,183 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef LINUX_IOMAP_H
#define LINUX_IOMAP_H 1
+#include <linux/atomic.h>
+#include <linux/bitmap.h>
+#include <linux/blk_types.h>
+#include <linux/mm.h>
#include <linux/types.h>
+#include <linux/mm_types.h>
+#include <linux/blkdev.h>
+#include <linux/pagevec.h>
+struct address_space;
struct fiemap_extent_info;
struct inode;
+struct iomap_iter;
+struct iomap_dio;
+struct iomap_writepage_ctx;
+struct iomap_read_folio_ctx;
struct iov_iter;
struct kiocb;
+struct page;
struct vm_area_struct;
struct vm_fault;
/*
* Types of block ranges for iomap mappings:
*/
-#define IOMAP_HOLE 0x01 /* no blocks allocated, need allocation */
-#define IOMAP_DELALLOC 0x02 /* delayed allocation blocks */
-#define IOMAP_MAPPED 0x03 /* blocks allocated @blkno */
-#define IOMAP_UNWRITTEN 0x04 /* blocks allocated @blkno in unwritten state */
+#define IOMAP_HOLE 0 /* no blocks allocated, need allocation */
+#define IOMAP_DELALLOC 1 /* delayed allocation blocks */
+#define IOMAP_MAPPED 2 /* blocks allocated at @addr */
+#define IOMAP_UNWRITTEN 3 /* blocks allocated at @addr in unwritten state */
+#define IOMAP_INLINE 4 /* data inline in the inode */
/*
- * Flags for all iomap mappings:
+ * Flags reported by the file system from iomap_begin:
+ *
+ * IOMAP_F_NEW indicates that the blocks have been newly allocated and need
+ * zeroing for areas that no data is copied to.
+ *
+ * IOMAP_F_DIRTY indicates the inode has uncommitted metadata needed to access
+ * written data and requires fdatasync to commit them to persistent storage.
+ * This needs to take into account metadata changes that *may* be made at IO
+ * completion, such as file size updates from direct IO.
+ *
+ * IOMAP_F_SHARED indicates that the blocks are shared, and will need to be
+ * unshared as part a write.
+ *
+ * IOMAP_F_MERGED indicates that the iomap contains the merge of multiple block
+ * mappings.
+ *
+ * IOMAP_F_BUFFER_HEAD indicates that the file system requires the use of
+ * buffer heads for this mapping.
+ *
+ * IOMAP_F_XATTR indicates that the iomap is for an extended attribute extent
+ * rather than a file data extent.
+ *
+ * IOMAP_F_BOUNDARY indicates that I/O and I/O completions for this iomap must
+ * never be merged with the mapping before it.
+ *
+ * IOMAP_F_ANON_WRITE indicates that (write) I/O does not have a target block
+ * assigned to it yet and the file system will do that in the bio submission
+ * handler, splitting the I/O as needed.
+ *
+ * IOMAP_F_ATOMIC_BIO indicates that (write) I/O will be issued as an atomic
+ * bio, i.e. set REQ_ATOMIC.
*/
-#define IOMAP_F_NEW 0x01 /* blocks have been newly allocated */
+#define IOMAP_F_NEW (1U << 0)
+#define IOMAP_F_DIRTY (1U << 1)
+#define IOMAP_F_SHARED (1U << 2)
+#define IOMAP_F_MERGED (1U << 3)
+#ifdef CONFIG_BUFFER_HEAD
+#define IOMAP_F_BUFFER_HEAD (1U << 4)
+#else
+#define IOMAP_F_BUFFER_HEAD 0
+#endif /* CONFIG_BUFFER_HEAD */
+#define IOMAP_F_XATTR (1U << 5)
+#define IOMAP_F_BOUNDARY (1U << 6)
+#define IOMAP_F_ANON_WRITE (1U << 7)
+#define IOMAP_F_ATOMIC_BIO (1U << 8)
/*
- * Flags that only need to be reported for IOMAP_REPORT requests:
+ * Flag reserved for file system specific usage
*/
-#define IOMAP_F_MERGED 0x10 /* contains multiple blocks/extents */
-#define IOMAP_F_SHARED 0x20 /* block shared with another file */
+#define IOMAP_F_PRIVATE (1U << 12)
/*
- * Magic value for blkno:
+ * Flags set by the core iomap code during operations:
+ *
+ * IOMAP_F_SIZE_CHANGED indicates to the iomap_end method that the file size
+ * has changed as the result of this write operation.
+ *
+ * IOMAP_F_STALE indicates that the iomap is not valid any longer and the file
+ * range it covers needs to be remapped by the high level before the operation
+ * can proceed.
*/
-#define IOMAP_NULL_BLOCK -1LL /* blkno is not valid */
+#define IOMAP_F_SIZE_CHANGED (1U << 14)
+#define IOMAP_F_STALE (1U << 15)
+
+/*
+ * Magic value for addr:
+ */
+#define IOMAP_NULL_ADDR -1ULL /* addr is not valid */
struct iomap {
- sector_t blkno; /* 1st sector of mapping, 512b units */
+ u64 addr; /* disk offset of mapping, bytes */
loff_t offset; /* file offset of mapping, bytes */
u64 length; /* length of mapping, bytes */
u16 type; /* type of mapping */
u16 flags; /* flags for mapping */
struct block_device *bdev; /* block device for I/O */
struct dax_device *dax_dev; /* dax_dev for dax operations */
+ void *inline_data;
+ void *private; /* filesystem private */
+ u64 validity_cookie; /* used with .iomap_valid() */
+};
+
+static inline sector_t iomap_sector(const struct iomap *iomap, loff_t pos)
+{
+ if (iomap->flags & IOMAP_F_ANON_WRITE)
+ return U64_MAX; /* invalid */
+ return (iomap->addr + pos - iomap->offset) >> SECTOR_SHIFT;
+}
+
+/*
+ * Returns the inline data pointer for logical offset @pos.
+ */
+static inline void *iomap_inline_data(const struct iomap *iomap, loff_t pos)
+{
+ return iomap->inline_data + pos - iomap->offset;
+}
+
+/*
+ * Check if the mapping's length is within the valid range for inline data.
+ * This is used to guard against accessing data beyond the page inline_data
+ * points at.
+ */
+static inline bool iomap_inline_data_valid(const struct iomap *iomap)
+{
+ return iomap->length <= PAGE_SIZE - offset_in_page(iomap->inline_data);
+}
+
+/*
+ * When get_folio succeeds, put_folio will always be called to do any
+ * cleanup work necessary. put_folio is responsible for unlocking and putting
+ * @folio.
+ */
+struct iomap_write_ops {
+ struct folio *(*get_folio)(struct iomap_iter *iter, loff_t pos,
+ unsigned len);
+ void (*put_folio)(struct inode *inode, loff_t pos, unsigned copied,
+ struct folio *folio);
+
+ /*
+ * Check that the cached iomap still maps correctly to the filesystem's
+ * internal extent map. FS internal extent maps can change while iomap
+ * is iterating a cached iomap, so this hook allows iomap to detect that
+ * the iomap needs to be refreshed during a long running write
+ * operation.
+ *
+ * The filesystem can store internal state (e.g. a sequence number) in
+ * iomap->validity_cookie when the iomap is first mapped to be able to
+ * detect changes between mapping time and whenever .iomap_valid() is
+ * called.
+ *
+ * This is called with the folio over the specified file position held
+ * locked by the iomap code.
+ */
+ bool (*iomap_valid)(struct inode *inode, const struct iomap *iomap);
+
+ /*
+ * Optional if the filesystem wishes to provide a custom handler for
+ * reading in the contents of a folio, otherwise iomap will default to
+ * submitting a bio read request.
+ *
+ * The read must be done synchronously.
+ */
+ int (*read_folio_range)(const struct iomap_iter *iter,
+ struct folio *folio, loff_t pos, size_t len);
};
/*
@@ -52,7 +188,16 @@ struct iomap {
#define IOMAP_REPORT (1 << 2) /* report extent status, e.g. FIEMAP */
#define IOMAP_FAULT (1 << 3) /* mapping for page fault */
#define IOMAP_DIRECT (1 << 4) /* direct I/O */
-#define IOMAP_NOWAIT (1 << 5) /* Don't wait for writeback */
+#define IOMAP_NOWAIT (1 << 5) /* do not block */
+#define IOMAP_OVERWRITE_ONLY (1 << 6) /* only pure overwrites allowed */
+#define IOMAP_UNSHARE (1 << 7) /* unshare_file_range */
+#ifdef CONFIG_FS_DAX
+#define IOMAP_DAX (1 << 8) /* DAX mapping */
+#else
+#define IOMAP_DAX 0
+#endif /* CONFIG_FS_DAX */
+#define IOMAP_ATOMIC (1 << 9) /* torn-write protection */
+#define IOMAP_DONTCACHE (1 << 10)
struct iomap_ops {
/*
@@ -61,7 +206,8 @@ struct iomap_ops {
* The actual length is returned in iomap->length.
*/
int (*iomap_begin)(struct inode *inode, loff_t pos, loff_t length,
- unsigned flags, struct iomap *iomap);
+ unsigned flags, struct iomap *iomap,
+ struct iomap *srcmap);
/*
* Commit and/or unreserve space previous allocated using iomap_begin.
@@ -73,30 +219,395 @@ struct iomap_ops {
ssize_t written, unsigned flags, struct iomap *iomap);
};
+/**
+ * struct iomap_iter - Iterate through a range of a file
+ * @inode: Set at the start of the iteration and should not change.
+ * @pos: The current file position we are operating on. It is updated by
+ * calls to iomap_iter(). Treat as read-only in the body.
+ * @len: The remaining length of the file segment we're operating on.
+ * It is updated at the same time as @pos.
+ * @iter_start_pos: The original start pos for the current iomap. Used for
+ * incremental iter advance.
+ * @status: Status of the most recent iteration. Zero on success or a negative
+ * errno on error.
+ * @flags: Zero or more of the iomap_begin flags above.
+ * @iomap: Map describing the I/O iteration
+ * @srcmap: Source map for COW operations
+ */
+struct iomap_iter {
+ struct inode *inode;
+ loff_t pos;
+ u64 len;
+ loff_t iter_start_pos;
+ int status;
+ unsigned flags;
+ struct iomap iomap;
+ struct iomap srcmap;
+ struct folio_batch *fbatch;
+ void *private;
+};
+
+int iomap_iter(struct iomap_iter *iter, const struct iomap_ops *ops);
+int iomap_iter_advance(struct iomap_iter *iter, u64 count);
+
+/**
+ * iomap_length_trim - trimmed length of the current iomap iteration
+ * @iter: iteration structure
+ * @pos: File position to trim from.
+ * @len: Length of the mapping to trim to.
+ *
+ * Returns a trimmed length that the operation applies to for the current
+ * iteration.
+ */
+static inline u64 iomap_length_trim(const struct iomap_iter *iter, loff_t pos,
+ u64 len)
+{
+ u64 end = iter->iomap.offset + iter->iomap.length;
+
+ if (iter->srcmap.type != IOMAP_HOLE)
+ end = min(end, iter->srcmap.offset + iter->srcmap.length);
+ return min(len, end - pos);
+}
+
+/**
+ * iomap_length - length of the current iomap iteration
+ * @iter: iteration structure
+ *
+ * Returns the length that the operation applies to for the current iteration.
+ */
+static inline u64 iomap_length(const struct iomap_iter *iter)
+{
+ return iomap_length_trim(iter, iter->pos, iter->len);
+}
+
+/**
+ * iomap_iter_advance_full - advance by the full length of current map
+ */
+static inline int iomap_iter_advance_full(struct iomap_iter *iter)
+{
+ return iomap_iter_advance(iter, iomap_length(iter));
+}
+
+/**
+ * iomap_iter_srcmap - return the source map for the current iomap iteration
+ * @i: iteration structure
+ *
+ * Write operations on file systems with reflink support might require a
+ * source and a destination map. This function retourns the source map
+ * for a given operation, which may or may no be identical to the destination
+ * map in &i->iomap.
+ */
+static inline const struct iomap *iomap_iter_srcmap(const struct iomap_iter *i)
+{
+ if (i->srcmap.type != IOMAP_HOLE)
+ return &i->srcmap;
+ return &i->iomap;
+}
+
+/*
+ * Return the file offset for the first unchanged block after a short write.
+ *
+ * If nothing was written, round @pos down to point at the first block in
+ * the range, else round up to include the partially written block.
+ */
+static inline loff_t iomap_last_written_block(struct inode *inode, loff_t pos,
+ ssize_t written)
+{
+ if (unlikely(!written))
+ return round_down(pos, i_blocksize(inode));
+ return round_up(pos + written, i_blocksize(inode));
+}
+
+/*
+ * Check if the range needs to be unshared for a FALLOC_FL_UNSHARE_RANGE
+ * operation.
+ *
+ * Don't bother with blocks that are not shared to start with; or mappings that
+ * cannot be shared, such as inline data, delalloc reservations, holes or
+ * unwritten extents.
+ *
+ * Note that we use srcmap directly instead of iomap_iter_srcmap as unsharing
+ * requires providing a separate source map, and the presence of one is a good
+ * indicator that unsharing is needed, unlike IOMAP_F_SHARED which can be set
+ * for any data that goes into the COW fork for XFS.
+ */
+static inline bool iomap_want_unshare_iter(const struct iomap_iter *iter)
+{
+ return (iter->iomap.flags & IOMAP_F_SHARED) &&
+ iter->srcmap.type == IOMAP_MAPPED;
+}
+
ssize_t iomap_file_buffered_write(struct kiocb *iocb, struct iov_iter *from,
- const struct iomap_ops *ops);
-int iomap_file_dirty(struct inode *inode, loff_t pos, loff_t len,
- const struct iomap_ops *ops);
+ const struct iomap_ops *ops,
+ const struct iomap_write_ops *write_ops, void *private);
+void iomap_read_folio(const struct iomap_ops *ops,
+ struct iomap_read_folio_ctx *ctx);
+void iomap_readahead(const struct iomap_ops *ops,
+ struct iomap_read_folio_ctx *ctx);
+bool iomap_is_partially_uptodate(struct folio *, size_t from, size_t count);
+struct folio *iomap_get_folio(struct iomap_iter *iter, loff_t pos, size_t len);
+bool iomap_release_folio(struct folio *folio, gfp_t gfp_flags);
+void iomap_invalidate_folio(struct folio *folio, size_t offset, size_t len);
+bool iomap_dirty_folio(struct address_space *mapping, struct folio *folio);
+int iomap_file_unshare(struct inode *inode, loff_t pos, loff_t len,
+ const struct iomap_ops *ops,
+ const struct iomap_write_ops *write_ops);
+loff_t iomap_fill_dirty_folios(struct iomap_iter *iter, loff_t offset,
+ loff_t length);
int iomap_zero_range(struct inode *inode, loff_t pos, loff_t len,
- bool *did_zero, const struct iomap_ops *ops);
+ bool *did_zero, const struct iomap_ops *ops,
+ const struct iomap_write_ops *write_ops, void *private);
int iomap_truncate_page(struct inode *inode, loff_t pos, bool *did_zero,
- const struct iomap_ops *ops);
-int iomap_page_mkwrite(struct vm_fault *vmf, const struct iomap_ops *ops);
+ const struct iomap_ops *ops,
+ const struct iomap_write_ops *write_ops, void *private);
+vm_fault_t iomap_page_mkwrite(struct vm_fault *vmf, const struct iomap_ops *ops,
+ void *private);
+typedef void (*iomap_punch_t)(struct inode *inode, loff_t offset, loff_t length,
+ struct iomap *iomap);
+void iomap_write_delalloc_release(struct inode *inode, loff_t start_byte,
+ loff_t end_byte, unsigned flags, struct iomap *iomap,
+ iomap_punch_t punch);
+
int iomap_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
- loff_t start, loff_t len, const struct iomap_ops *ops);
+ u64 start, u64 len, const struct iomap_ops *ops);
loff_t iomap_seek_hole(struct inode *inode, loff_t offset,
const struct iomap_ops *ops);
loff_t iomap_seek_data(struct inode *inode, loff_t offset,
const struct iomap_ops *ops);
+sector_t iomap_bmap(struct address_space *mapping, sector_t bno,
+ const struct iomap_ops *ops);
+
+/*
+ * Flags for iomap_ioend->io_flags.
+ */
+/* shared COW extent */
+#define IOMAP_IOEND_SHARED (1U << 0)
+/* unwritten extent */
+#define IOMAP_IOEND_UNWRITTEN (1U << 1)
+/* don't merge into previous ioend */
+#define IOMAP_IOEND_BOUNDARY (1U << 2)
+/* is direct I/O */
+#define IOMAP_IOEND_DIRECT (1U << 3)
+/* is DONTCACHE I/O */
+#define IOMAP_IOEND_DONTCACHE (1U << 4)
+
+/*
+ * Flags that if set on either ioend prevent the merge of two ioends.
+ * (IOMAP_IOEND_BOUNDARY also prevents merges, but only one-way)
+ */
+#define IOMAP_IOEND_NOMERGE_FLAGS \
+ (IOMAP_IOEND_SHARED | IOMAP_IOEND_UNWRITTEN | IOMAP_IOEND_DIRECT | \
+ IOMAP_IOEND_DONTCACHE)
+
+/*
+ * Structure for writeback I/O completions.
+ *
+ * File systems can split a bio generated by iomap. In that case the parent
+ * ioend it was split from is recorded in ioend->io_parent.
+ */
+struct iomap_ioend {
+ struct list_head io_list; /* next ioend in chain */
+ u16 io_flags; /* IOMAP_IOEND_* */
+ struct inode *io_inode; /* file being written to */
+ size_t io_size; /* size of the extent */
+ atomic_t io_remaining; /* completetion defer count */
+ int io_error; /* stashed away status */
+ struct iomap_ioend *io_parent; /* parent for completions */
+ loff_t io_offset; /* offset in the file */
+ sector_t io_sector; /* start sector of ioend */
+ void *io_private; /* file system private data */
+ struct bio io_bio; /* MUST BE LAST! */
+};
+
+static inline struct iomap_ioend *iomap_ioend_from_bio(struct bio *bio)
+{
+ return container_of(bio, struct iomap_ioend, io_bio);
+}
+
+struct iomap_writeback_ops {
+ /*
+ * Performs writeback on the passed in range
+ *
+ * Can map arbitrarily large regions, but we need to call into it at
+ * least once per folio to allow the file systems to synchronize with
+ * the write path that could be invalidating mappings.
+ *
+ * An existing mapping from a previous call to this method can be reused
+ * by the file system if it is still valid.
+ *
+ * If this succeeds, iomap_finish_folio_write() must be called once
+ * writeback completes for the range, regardless of whether the
+ * writeback succeeded or failed.
+ *
+ * Returns the number of bytes processed or a negative errno.
+ */
+ ssize_t (*writeback_range)(struct iomap_writepage_ctx *wpc,
+ struct folio *folio, u64 pos, unsigned int len,
+ u64 end_pos);
+
+ /*
+ * Submit a writeback context previously build up by ->writeback_range.
+ *
+ * Returns 0 if the context was successfully submitted, or a negative
+ * error code if not. If @error is non-zero a failure occurred, and
+ * the writeback context should be completed with an error.
+ */
+ int (*writeback_submit)(struct iomap_writepage_ctx *wpc, int error);
+};
+
+struct iomap_writepage_ctx {
+ struct iomap iomap;
+ struct inode *inode;
+ struct writeback_control *wbc;
+ const struct iomap_writeback_ops *ops;
+ u32 nr_folios; /* folios added to the ioend */
+ void *wb_ctx; /* pending writeback context */
+};
+
+struct iomap_ioend *iomap_init_ioend(struct inode *inode, struct bio *bio,
+ loff_t file_offset, u16 ioend_flags);
+struct iomap_ioend *iomap_split_ioend(struct iomap_ioend *ioend,
+ unsigned int max_len, bool is_append);
+void iomap_finish_ioends(struct iomap_ioend *ioend, int error);
+void iomap_ioend_try_merge(struct iomap_ioend *ioend,
+ struct list_head *more_ioends);
+void iomap_sort_ioends(struct list_head *ioend_list);
+ssize_t iomap_add_to_ioend(struct iomap_writepage_ctx *wpc, struct folio *folio,
+ loff_t pos, loff_t end_pos, unsigned int dirty_len);
+int iomap_ioend_writeback_submit(struct iomap_writepage_ctx *wpc, int error);
+
+void iomap_finish_folio_read(struct folio *folio, size_t off, size_t len,
+ int error);
+void iomap_finish_folio_write(struct inode *inode, struct folio *folio,
+ size_t len);
+
+int iomap_writeback_folio(struct iomap_writepage_ctx *wpc, struct folio *folio);
+int iomap_writepages(struct iomap_writepage_ctx *wpc);
+
+struct iomap_read_folio_ctx {
+ const struct iomap_read_ops *ops;
+ struct folio *cur_folio;
+ struct readahead_control *rac;
+ void *read_ctx;
+};
+
+struct iomap_read_ops {
+ /*
+ * Read in a folio range.
+ *
+ * If this succeeds, iomap_finish_folio_read() must be called after the
+ * range is read in, regardless of whether the read succeeded or failed.
+ *
+ * Returns 0 on success or a negative error on failure.
+ */
+ int (*read_folio_range)(const struct iomap_iter *iter,
+ struct iomap_read_folio_ctx *ctx, size_t len);
+
+ /*
+ * Submit any pending read requests.
+ *
+ * This is optional.
+ */
+ void (*submit_read)(struct iomap_read_folio_ctx *ctx);
+};
/*
* Flags for direct I/O ->end_io:
*/
#define IOMAP_DIO_UNWRITTEN (1 << 0) /* covers unwritten extent(s) */
#define IOMAP_DIO_COW (1 << 1) /* covers COW extent(s) */
-typedef int (iomap_dio_end_io_t)(struct kiocb *iocb, ssize_t ret,
- unsigned flags);
+
+struct iomap_dio_ops {
+ int (*end_io)(struct kiocb *iocb, ssize_t size, int error,
+ unsigned flags);
+ void (*submit_io)(const struct iomap_iter *iter, struct bio *bio,
+ loff_t file_offset);
+
+ /*
+ * Filesystems wishing to attach private information to a direct io bio
+ * must provide a ->submit_io method that attaches the additional
+ * information to the bio and changes the ->bi_end_io callback to a
+ * custom function. This function should, at a minimum, perform any
+ * relevant post-processing of the bio and end with a call to
+ * iomap_dio_bio_end_io.
+ */
+ struct bio_set *bio_set;
+};
+
+/*
+ * Wait for the I/O to complete in iomap_dio_rw even if the kiocb is not
+ * synchronous.
+ */
+#define IOMAP_DIO_FORCE_WAIT (1 << 0)
+
+/*
+ * Do not allocate blocks or zero partial blocks, but instead fall back to
+ * the caller by returning -EAGAIN. Used to optimize direct I/O writes that
+ * are not aligned to the file system block size.
+ */
+#define IOMAP_DIO_OVERWRITE_ONLY (1 << 1)
+
+/*
+ * When a page fault occurs, return a partial synchronous result and allow
+ * the caller to retry the rest of the operation after dealing with the page
+ * fault.
+ */
+#define IOMAP_DIO_PARTIAL (1 << 2)
+
+/*
+ * Ensure each bio is aligned to fs block size.
+ *
+ * For filesystems which need to calculate/verify the checksum of each fs
+ * block. Otherwise they may not be able to handle unaligned bios.
+ */
+#define IOMAP_DIO_FSBLOCK_ALIGNED (1 << 3)
+
ssize_t iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
- const struct iomap_ops *ops, iomap_dio_end_io_t end_io);
+ const struct iomap_ops *ops, const struct iomap_dio_ops *dops,
+ unsigned int dio_flags, void *private, size_t done_before);
+struct iomap_dio *__iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
+ const struct iomap_ops *ops, const struct iomap_dio_ops *dops,
+ unsigned int dio_flags, void *private, size_t done_before);
+ssize_t iomap_dio_complete(struct iomap_dio *dio);
+void iomap_dio_bio_end_io(struct bio *bio);
+
+#ifdef CONFIG_SWAP
+struct file;
+struct swap_info_struct;
+
+int iomap_swapfile_activate(struct swap_info_struct *sis,
+ struct file *swap_file, sector_t *pagespan,
+ const struct iomap_ops *ops);
+#else
+# define iomap_swapfile_activate(sis, swapfile, pagespan, ops) (-EIO)
+#endif /* CONFIG_SWAP */
+
+extern struct bio_set iomap_ioend_bioset;
+
+#ifdef CONFIG_BLOCK
+extern const struct iomap_read_ops iomap_bio_read_ops;
+
+static inline void iomap_bio_read_folio(struct folio *folio,
+ const struct iomap_ops *ops)
+{
+ struct iomap_read_folio_ctx ctx = {
+ .ops = &iomap_bio_read_ops,
+ .cur_folio = folio,
+ };
+
+ iomap_read_folio(ops, &ctx);
+}
+
+static inline void iomap_bio_readahead(struct readahead_control *rac,
+ const struct iomap_ops *ops)
+{
+ struct iomap_read_folio_ctx ctx = {
+ .ops = &iomap_bio_read_ops,
+ .rac = rac,
+ };
+
+ iomap_readahead(ops, &ctx);
+}
+#endif /* CONFIG_BLOCK */
#endif /* LINUX_IOMAP_H */
diff --git a/include/linux/iommu-common.h b/include/linux/iommu-common.h
deleted file mode 100644
index 376a27c9cc6a..000000000000
--- a/include/linux/iommu-common.h
+++ /dev/null
@@ -1,52 +0,0 @@
-#ifndef _LINUX_IOMMU_COMMON_H
-#define _LINUX_IOMMU_COMMON_H
-
-#include <linux/spinlock_types.h>
-#include <linux/device.h>
-#include <asm/page.h>
-
-#define IOMMU_POOL_HASHBITS 4
-#define IOMMU_NR_POOLS (1 << IOMMU_POOL_HASHBITS)
-#define IOMMU_ERROR_CODE (~(unsigned long) 0)
-
-struct iommu_pool {
- unsigned long start;
- unsigned long end;
- unsigned long hint;
- spinlock_t lock;
-};
-
-struct iommu_map_table {
- unsigned long table_map_base;
- unsigned long table_shift;
- unsigned long nr_pools;
- void (*lazy_flush)(struct iommu_map_table *);
- unsigned long poolsize;
- struct iommu_pool pools[IOMMU_NR_POOLS];
- u32 flags;
-#define IOMMU_HAS_LARGE_POOL 0x00000001
-#define IOMMU_NO_SPAN_BOUND 0x00000002
-#define IOMMU_NEED_FLUSH 0x00000004
- struct iommu_pool large_pool;
- unsigned long *map;
-};
-
-extern void iommu_tbl_pool_init(struct iommu_map_table *iommu,
- unsigned long num_entries,
- u32 table_shift,
- void (*lazy_flush)(struct iommu_map_table *),
- bool large_pool, u32 npools,
- bool skip_span_boundary_check);
-
-extern unsigned long iommu_tbl_range_alloc(struct device *dev,
- struct iommu_map_table *iommu,
- unsigned long npages,
- unsigned long *handle,
- unsigned long mask,
- unsigned int align_order);
-
-extern void iommu_tbl_range_free(struct iommu_map_table *iommu,
- u64 dma_addr, unsigned long npages,
- unsigned long entry);
-
-#endif
diff --git a/include/linux/iommu-dma.h b/include/linux/iommu-dma.h
new file mode 100644
index 000000000000..a92b3ff9b934
--- /dev/null
+++ b/include/linux/iommu-dma.h
@@ -0,0 +1,64 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2024, NVIDIA CORPORATION & AFFILIATES. All rights reserved
+ *
+ * DMA operations that map physical memory through IOMMU.
+ */
+#ifndef _LINUX_IOMMU_DMA_H
+#define _LINUX_IOMMU_DMA_H
+
+#include <linux/dma-direction.h>
+
+#ifdef CONFIG_IOMMU_DMA
+static inline bool use_dma_iommu(struct device *dev)
+{
+ return dev->dma_iommu;
+}
+#else
+static inline bool use_dma_iommu(struct device *dev)
+{
+ return false;
+}
+#endif /* CONFIG_IOMMU_DMA */
+
+dma_addr_t iommu_dma_map_phys(struct device *dev, phys_addr_t phys, size_t size,
+ enum dma_data_direction dir, unsigned long attrs);
+void iommu_dma_unmap_phys(struct device *dev, dma_addr_t dma_handle,
+ size_t size, enum dma_data_direction dir, unsigned long attrs);
+int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
+ enum dma_data_direction dir, unsigned long attrs);
+void iommu_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
+ enum dma_data_direction dir, unsigned long attrs);
+void *iommu_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
+ gfp_t gfp, unsigned long attrs);
+int iommu_dma_mmap(struct device *dev, struct vm_area_struct *vma,
+ void *cpu_addr, dma_addr_t dma_addr, size_t size,
+ unsigned long attrs);
+int iommu_dma_get_sgtable(struct device *dev, struct sg_table *sgt,
+ void *cpu_addr, dma_addr_t dma_addr, size_t size,
+ unsigned long attrs);
+unsigned long iommu_dma_get_merge_boundary(struct device *dev);
+size_t iommu_dma_opt_mapping_size(void);
+size_t iommu_dma_max_mapping_size(struct device *dev);
+void iommu_dma_free(struct device *dev, size_t size, void *cpu_addr,
+ dma_addr_t handle, unsigned long attrs);
+struct sg_table *iommu_dma_alloc_noncontiguous(struct device *dev, size_t size,
+ enum dma_data_direction dir, gfp_t gfp, unsigned long attrs);
+void iommu_dma_free_noncontiguous(struct device *dev, size_t size,
+ struct sg_table *sgt, enum dma_data_direction dir);
+void *iommu_dma_vmap_noncontiguous(struct device *dev, size_t size,
+ struct sg_table *sgt);
+#define iommu_dma_vunmap_noncontiguous(dev, vaddr) \
+ vunmap(vaddr);
+int iommu_dma_mmap_noncontiguous(struct device *dev, struct vm_area_struct *vma,
+ size_t size, struct sg_table *sgt);
+void iommu_dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle,
+ size_t size, enum dma_data_direction dir);
+void iommu_dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle,
+ size_t size, enum dma_data_direction dir);
+void iommu_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sgl,
+ int nelems, enum dma_data_direction dir);
+void iommu_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sgl,
+ int nelems, enum dma_data_direction dir);
+
+#endif /* _LINUX_IOMMU_DMA_H */
diff --git a/include/linux/iommu-helper.h b/include/linux/iommu-helper.h
index 86bdeffe43ad..74be34f3a20a 100644
--- a/include/linux/iommu-helper.h
+++ b/include/linux/iommu-helper.h
@@ -1,7 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_IOMMU_HELPER_H
#define _LINUX_IOMMU_HELPER_H
-#include <linux/kernel.h>
+#include <linux/bug.h>
+#include <linux/log2.h>
+#include <linux/math.h>
+#include <linux/types.h>
static inline unsigned long iommu_device_max_index(unsigned long size,
unsigned long offset,
@@ -13,9 +17,15 @@ static inline unsigned long iommu_device_max_index(unsigned long size,
return size;
}
-extern int iommu_is_span_boundary(unsigned int index, unsigned int nr,
- unsigned long shift,
- unsigned long boundary_size);
+static inline int iommu_is_span_boundary(unsigned int index, unsigned int nr,
+ unsigned long shift, unsigned long boundary_size)
+{
+ BUG_ON(!is_power_of_2(boundary_size));
+
+ shift = (shift + index) & (boundary_size - 1);
+ return shift + nr > boundary_size;
+}
+
extern unsigned long iommu_area_alloc(unsigned long *map, unsigned long size,
unsigned long start, unsigned int nr,
unsigned long shift,
diff --git a/include/linux/iommu.h b/include/linux/iommu.h
index 176f7569d874..8c66284a91a8 100644
--- a/include/linux/iommu.h
+++ b/include/linux/iommu.h
@@ -1,19 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2007-2008 Advanced Micro Devices, Inc.
* Author: Joerg Roedel <joerg.roedel@amd.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published
- * by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#ifndef __LINUX_IOMMU_H
@@ -25,6 +13,8 @@
#include <linux/errno.h>
#include <linux/err.h>
#include <linux/of.h>
+#include <linux/iova_bitmap.h>
+#include <uapi/linux/iommufd.h>
#define IOMMU_READ (1 << 0)
#define IOMMU_WRITE (1 << 1)
@@ -47,7 +37,122 @@ struct iommu_group;
struct bus_type;
struct device;
struct iommu_domain;
+struct iommu_domain_ops;
+struct iommu_dirty_ops;
struct notifier_block;
+struct iommu_sva;
+struct iommu_dma_cookie;
+struct iommu_dma_msi_cookie;
+struct iommu_fault_param;
+struct iommufd_ctx;
+struct iommufd_viommu;
+struct msi_desc;
+struct msi_msg;
+
+#define IOMMU_FAULT_PERM_READ (1 << 0) /* read */
+#define IOMMU_FAULT_PERM_WRITE (1 << 1) /* write */
+#define IOMMU_FAULT_PERM_EXEC (1 << 2) /* exec */
+#define IOMMU_FAULT_PERM_PRIV (1 << 3) /* privileged */
+
+/* Generic fault types, can be expanded IRQ remapping fault */
+enum iommu_fault_type {
+ IOMMU_FAULT_PAGE_REQ = 1, /* page request fault */
+};
+
+/**
+ * struct iommu_fault_page_request - Page Request data
+ * @flags: encodes whether the corresponding fields are valid and whether this
+ * is the last page in group (IOMMU_FAULT_PAGE_REQUEST_* values).
+ * When IOMMU_FAULT_PAGE_RESPONSE_NEEDS_PASID is set, the page response
+ * must have the same PASID value as the page request. When it is clear,
+ * the page response should not have a PASID.
+ * @pasid: Process Address Space ID
+ * @grpid: Page Request Group Index
+ * @perm: requested page permissions (IOMMU_FAULT_PERM_* values)
+ * @addr: page address
+ * @private_data: device-specific private information
+ */
+struct iommu_fault_page_request {
+#define IOMMU_FAULT_PAGE_REQUEST_PASID_VALID (1 << 0)
+#define IOMMU_FAULT_PAGE_REQUEST_LAST_PAGE (1 << 1)
+#define IOMMU_FAULT_PAGE_RESPONSE_NEEDS_PASID (1 << 2)
+ u32 flags;
+ u32 pasid;
+ u32 grpid;
+ u32 perm;
+ u64 addr;
+ u64 private_data[2];
+};
+
+/**
+ * struct iommu_fault - Generic fault data
+ * @type: fault type from &enum iommu_fault_type
+ * @prm: Page Request message, when @type is %IOMMU_FAULT_PAGE_REQ
+ */
+struct iommu_fault {
+ u32 type;
+ struct iommu_fault_page_request prm;
+};
+
+/**
+ * enum iommu_page_response_code - Return status of fault handlers
+ * @IOMMU_PAGE_RESP_SUCCESS: Fault has been handled and the page tables
+ * populated, retry the access. This is "Success" in PCI PRI.
+ * @IOMMU_PAGE_RESP_FAILURE: General error. Drop all subsequent faults from
+ * this device if possible. This is "Response Failure" in PCI PRI.
+ * @IOMMU_PAGE_RESP_INVALID: Could not handle this fault, don't retry the
+ * access. This is "Invalid Request" in PCI PRI.
+ */
+enum iommu_page_response_code {
+ IOMMU_PAGE_RESP_SUCCESS = 0,
+ IOMMU_PAGE_RESP_INVALID,
+ IOMMU_PAGE_RESP_FAILURE,
+};
+
+/**
+ * struct iommu_page_response - Generic page response information
+ * @pasid: Process Address Space ID
+ * @grpid: Page Request Group Index
+ * @code: response code from &enum iommu_page_response_code
+ */
+struct iommu_page_response {
+ u32 pasid;
+ u32 grpid;
+ u32 code;
+};
+
+struct iopf_fault {
+ struct iommu_fault fault;
+ /* node for pending lists */
+ struct list_head list;
+};
+
+struct iopf_group {
+ struct iopf_fault last_fault;
+ struct list_head faults;
+ size_t fault_count;
+ /* list node for iommu_fault_param::faults */
+ struct list_head pending_node;
+ struct work_struct work;
+ struct iommu_attach_handle *attach_handle;
+ /* The device's fault data parameter. */
+ struct iommu_fault_param *fault_param;
+ /* Used by handler provider to hook the group on its own lists. */
+ struct list_head node;
+ u32 cookie;
+};
+
+/**
+ * struct iopf_queue - IO Page Fault queue
+ * @wq: the fault workqueue
+ * @devices: devices attached to this queue
+ * @lock: protects the device list
+ */
+struct iopf_queue {
+ struct workqueue_struct *wq;
+ struct list_head devices;
+ struct mutex lock;
+};
/* iommu fault flags */
#define IOMMU_FAULT_READ 0x0
@@ -62,12 +167,29 @@ struct iommu_domain_geometry {
bool force_aperture; /* DMA only allowed in mappable range? */
};
+enum iommu_domain_cookie_type {
+ IOMMU_COOKIE_NONE,
+ IOMMU_COOKIE_DMA_IOVA,
+ IOMMU_COOKIE_DMA_MSI,
+ IOMMU_COOKIE_FAULT_HANDLER,
+ IOMMU_COOKIE_SVA,
+ IOMMU_COOKIE_IOMMUFD,
+};
+
/* Domain feature flags */
#define __IOMMU_DOMAIN_PAGING (1U << 0) /* Support for iommu_map/unmap */
#define __IOMMU_DOMAIN_DMA_API (1U << 1) /* Domain for use in DMA-API
implementation */
#define __IOMMU_DOMAIN_PT (1U << 2) /* Domain is identity mapped */
+#define __IOMMU_DOMAIN_DMA_FQ (1U << 3) /* DMA-API uses flush queue */
+
+#define __IOMMU_DOMAIN_SVA (1U << 4) /* Shared process address space */
+#define __IOMMU_DOMAIN_PLATFORM (1U << 5)
+#define __IOMMU_DOMAIN_NESTED (1U << 6) /* User-managed address space nested
+ on a stage-2 translation */
+
+#define IOMMU_DOMAIN_ALLOC_FLAGS ~__IOMMU_DOMAIN_DMA_FQ
/*
* This are the possible domain-types
*
@@ -79,58 +201,88 @@ struct iommu_domain_geometry {
* IOMMU_DOMAIN_DMA - Internally used for DMA-API implementations.
* This flag allows IOMMU drivers to implement
* certain optimizations for these domains
+ * IOMMU_DOMAIN_DMA_FQ - As above, but definitely using batched TLB
+ * invalidation.
+ * IOMMU_DOMAIN_SVA - DMA addresses are shared process addresses
+ * represented by mm_struct's.
+ * IOMMU_DOMAIN_PLATFORM - Legacy domain for drivers that do their own
+ * dma_api stuff. Do not use in new drivers.
*/
#define IOMMU_DOMAIN_BLOCKED (0U)
#define IOMMU_DOMAIN_IDENTITY (__IOMMU_DOMAIN_PT)
#define IOMMU_DOMAIN_UNMANAGED (__IOMMU_DOMAIN_PAGING)
#define IOMMU_DOMAIN_DMA (__IOMMU_DOMAIN_PAGING | \
__IOMMU_DOMAIN_DMA_API)
+#define IOMMU_DOMAIN_DMA_FQ (__IOMMU_DOMAIN_PAGING | \
+ __IOMMU_DOMAIN_DMA_API | \
+ __IOMMU_DOMAIN_DMA_FQ)
+#define IOMMU_DOMAIN_SVA (__IOMMU_DOMAIN_SVA)
+#define IOMMU_DOMAIN_PLATFORM (__IOMMU_DOMAIN_PLATFORM)
+#define IOMMU_DOMAIN_NESTED (__IOMMU_DOMAIN_NESTED)
struct iommu_domain {
unsigned type;
- const struct iommu_ops *ops;
+ enum iommu_domain_cookie_type cookie_type;
+ const struct iommu_domain_ops *ops;
+ const struct iommu_dirty_ops *dirty_ops;
+ const struct iommu_ops *owner; /* Whose domain_alloc we came from */
unsigned long pgsize_bitmap; /* Bitmap of page sizes in use */
- iommu_fault_handler_t handler;
- void *handler_token;
struct iommu_domain_geometry geometry;
- void *iova_cookie;
+ int (*iopf_handler)(struct iopf_group *group);
+
+ union { /* cookie */
+ struct iommu_dma_cookie *iova_cookie;
+ struct iommu_dma_msi_cookie *msi_cookie;
+ struct iommufd_hw_pagetable *iommufd_hwpt;
+ struct {
+ iommu_fault_handler_t handler;
+ void *handler_token;
+ };
+ struct { /* IOMMU_DOMAIN_SVA */
+ struct mm_struct *mm;
+ int users;
+ /*
+ * Next iommu_domain in mm->iommu_mm->sva-domains list
+ * protected by iommu_sva_lock.
+ */
+ struct list_head next;
+ };
+ };
};
+static inline bool iommu_is_dma_domain(struct iommu_domain *domain)
+{
+ return domain->type & __IOMMU_DOMAIN_DMA_API;
+}
+
enum iommu_cap {
- IOMMU_CAP_CACHE_COHERENCY, /* IOMMU can enforce cache coherent DMA
- transactions */
- IOMMU_CAP_INTR_REMAP, /* IOMMU supports interrupt isolation */
+ IOMMU_CAP_CACHE_COHERENCY, /* IOMMU_CACHE is supported */
IOMMU_CAP_NOEXEC, /* IOMMU_NOEXEC flag */
-};
-
-/*
- * Following constraints are specifc to FSL_PAMUV1:
- * -aperture must be power of 2, and naturally aligned
- * -number of windows must be power of 2, and address space size
- * of each window is determined by aperture size / # of windows
- * -the actual size of the mapped region of a window must be power
- * of 2 starting with 4KB and physical address must be naturally
- * aligned.
- * DOMAIN_ATTR_FSL_PAMUV1 corresponds to the above mentioned contraints.
- * The caller can invoke iommu_domain_get_attr to check if the underlying
- * iommu implementation supports these constraints.
- */
-
-enum iommu_attr {
- DOMAIN_ATTR_GEOMETRY,
- DOMAIN_ATTR_PAGING,
- DOMAIN_ATTR_WINDOWS,
- DOMAIN_ATTR_FSL_PAMU_STASH,
- DOMAIN_ATTR_FSL_PAMU_ENABLE,
- DOMAIN_ATTR_FSL_PAMUV1,
- DOMAIN_ATTR_NESTING, /* two stages of translation */
- DOMAIN_ATTR_MAX,
+ IOMMU_CAP_PRE_BOOT_PROTECTION, /* Firmware says it used the IOMMU for
+ DMA protection and we should too */
+ /*
+ * Per-device flag indicating if enforce_cache_coherency() will work on
+ * this device.
+ */
+ IOMMU_CAP_ENFORCE_CACHE_COHERENCY,
+ /*
+ * IOMMU driver does not issue TLB maintenance during .unmap, so can
+ * usefully support the non-strict DMA flush queue.
+ */
+ IOMMU_CAP_DEFERRED_FLUSH,
+ IOMMU_CAP_DIRTY_TRACKING, /* IOMMU supports dirty tracking */
};
/* These are the possible reserved region types */
enum iommu_resv_type {
/* Memory regions which must be mapped 1:1 at all times */
IOMMU_RESV_DIRECT,
+ /*
+ * Memory regions which are advertised to be 1:1 but are
+ * commonly considered relaxable in some conditions,
+ * for instance in device assignment use case (USB, Graphics)
+ */
+ IOMMU_RESV_DIRECT_RELAXABLE,
/* Arbitrary "never map this or give it to a device" address ranges */
IOMMU_RESV_RESERVED,
/* Hardware MSI region (untranslated) */
@@ -146,6 +298,7 @@ enum iommu_resv_type {
* @length: Length of the region in bytes
* @prot: IOMMU Protection flags (READ/WRITE/...)
* @type: Type of the reserved region
+ * @free: Callback to free associated memory allocations
*/
struct iommu_resv_region {
struct list_head list;
@@ -153,80 +306,479 @@ struct iommu_resv_region {
size_t length;
int prot;
enum iommu_resv_type type;
+ void (*free)(struct device *dev, struct iommu_resv_region *region);
+};
+
+struct iommu_iort_rmr_data {
+ struct iommu_resv_region rr;
+
+ /* Stream IDs associated with IORT RMR entry */
+ const u32 *sids;
+ u32 num_sids;
+};
+
+#define IOMMU_NO_PASID (0U) /* Reserved for DMA w/o PASID */
+#define IOMMU_FIRST_GLOBAL_PASID (1U) /*starting range for allocation */
+#define IOMMU_PASID_INVALID (-1U)
+typedef unsigned int ioasid_t;
+
+/* Read but do not clear any dirty bits */
+#define IOMMU_DIRTY_NO_CLEAR (1 << 0)
+
+/*
+ * Pages allocated through iommu_alloc_pages_node_sz() can be placed on this
+ * list using iommu_pages_list_add(). Note: ONLY pages from
+ * iommu_alloc_pages_node_sz() can be used this way!
+ */
+struct iommu_pages_list {
+ struct list_head pages;
};
+#define IOMMU_PAGES_LIST_INIT(name) \
+ ((struct iommu_pages_list){ .pages = LIST_HEAD_INIT(name.pages) })
+
#ifdef CONFIG_IOMMU_API
/**
+ * struct iommu_iotlb_gather - Range information for a pending IOTLB flush
+ *
+ * @start: IOVA representing the start of the range to be flushed
+ * @end: IOVA representing the end of the range to be flushed (inclusive)
+ * @pgsize: The interval at which to perform the flush
+ * @freelist: Removed pages to free after sync
+ * @queued: Indicates that the flush will be queued
+ *
+ * This structure is intended to be updated by multiple calls to the
+ * ->unmap() function in struct iommu_ops before eventually being passed
+ * into ->iotlb_sync(). Drivers can add pages to @freelist to be freed after
+ * ->iotlb_sync() or ->iotlb_flush_all() have cleared all cached references to
+ * them. @queued is set to indicate when ->iotlb_flush_all() will be called
+ * later instead of ->iotlb_sync(), so drivers may optimise accordingly.
+ */
+struct iommu_iotlb_gather {
+ unsigned long start;
+ unsigned long end;
+ size_t pgsize;
+ struct iommu_pages_list freelist;
+ bool queued;
+};
+
+/**
+ * struct iommu_dirty_bitmap - Dirty IOVA bitmap state
+ * @bitmap: IOVA bitmap
+ * @gather: Range information for a pending IOTLB flush
+ */
+struct iommu_dirty_bitmap {
+ struct iova_bitmap *bitmap;
+ struct iommu_iotlb_gather *gather;
+};
+
+/**
+ * struct iommu_dirty_ops - domain specific dirty tracking operations
+ * @set_dirty_tracking: Enable or Disable dirty tracking on the iommu domain
+ * @read_and_clear_dirty: Walk IOMMU page tables for dirtied PTEs marshalled
+ * into a bitmap, with a bit represented as a page.
+ * Reads the dirty PTE bits and clears it from IO
+ * pagetables.
+ */
+struct iommu_dirty_ops {
+ int (*set_dirty_tracking)(struct iommu_domain *domain, bool enabled);
+ int (*read_and_clear_dirty)(struct iommu_domain *domain,
+ unsigned long iova, size_t size,
+ unsigned long flags,
+ struct iommu_dirty_bitmap *dirty);
+};
+
+/**
+ * struct iommu_user_data - iommu driver specific user space data info
+ * @type: The data type of the user buffer
+ * @uptr: Pointer to the user buffer for copy_from_user()
+ * @len: The length of the user buffer in bytes
+ *
+ * A user space data is an uAPI that is defined in include/uapi/linux/iommufd.h
+ * @type, @uptr and @len should be just copied from an iommufd core uAPI struct.
+ */
+struct iommu_user_data {
+ unsigned int type;
+ void __user *uptr;
+ size_t len;
+};
+
+/**
+ * struct iommu_user_data_array - iommu driver specific user space data array
+ * @type: The data type of all the entries in the user buffer array
+ * @uptr: Pointer to the user buffer array
+ * @entry_len: The fixed-width length of an entry in the array, in bytes
+ * @entry_num: The number of total entries in the array
+ *
+ * The user buffer includes an array of requests with format defined in
+ * include/uapi/linux/iommufd.h
+ */
+struct iommu_user_data_array {
+ unsigned int type;
+ void __user *uptr;
+ size_t entry_len;
+ u32 entry_num;
+};
+
+/**
+ * __iommu_copy_struct_from_user - Copy iommu driver specific user space data
+ * @dst_data: Pointer to an iommu driver specific user data that is defined in
+ * include/uapi/linux/iommufd.h
+ * @src_data: Pointer to a struct iommu_user_data for user space data info
+ * @data_type: The data type of the @dst_data. Must match with @src_data.type
+ * @data_len: Length of current user data structure, i.e. sizeof(struct _dst)
+ * @min_len: Initial length of user data structure for backward compatibility.
+ * This should be offsetofend using the last member in the user data
+ * struct that was initially added to include/uapi/linux/iommufd.h
+ */
+static inline int __iommu_copy_struct_from_user(
+ void *dst_data, const struct iommu_user_data *src_data,
+ unsigned int data_type, size_t data_len, size_t min_len)
+{
+ if (WARN_ON(!dst_data || !src_data))
+ return -EINVAL;
+ if (src_data->type != data_type)
+ return -EINVAL;
+ if (src_data->len < min_len || data_len < src_data->len)
+ return -EINVAL;
+ return copy_struct_from_user(dst_data, data_len, src_data->uptr,
+ src_data->len);
+}
+
+/**
+ * iommu_copy_struct_from_user - Copy iommu driver specific user space data
+ * @kdst: Pointer to an iommu driver specific user data that is defined in
+ * include/uapi/linux/iommufd.h
+ * @user_data: Pointer to a struct iommu_user_data for user space data info
+ * @data_type: The data type of the @kdst. Must match with @user_data->type
+ * @min_last: The last member of the data structure @kdst points in the initial
+ * version.
+ * Return 0 for success, otherwise -error.
+ */
+#define iommu_copy_struct_from_user(kdst, user_data, data_type, min_last) \
+ __iommu_copy_struct_from_user(kdst, user_data, data_type, \
+ sizeof(*kdst), \
+ offsetofend(typeof(*kdst), min_last))
+
+/**
+ * __iommu_copy_struct_from_user_array - Copy iommu driver specific user space
+ * data from an iommu_user_data_array
+ * @dst_data: Pointer to an iommu driver specific user data that is defined in
+ * include/uapi/linux/iommufd.h
+ * @src_array: Pointer to a struct iommu_user_data_array for a user space array
+ * @data_type: The data type of the @dst_data. Must match with @src_array.type
+ * @index: Index to the location in the array to copy user data from
+ * @data_len: Length of current user data structure, i.e. sizeof(struct _dst)
+ * @min_len: Initial length of user data structure for backward compatibility.
+ * This should be offsetofend using the last member in the user data
+ * struct that was initially added to include/uapi/linux/iommufd.h
+ */
+static inline int __iommu_copy_struct_from_user_array(
+ void *dst_data, const struct iommu_user_data_array *src_array,
+ unsigned int data_type, unsigned int index, size_t data_len,
+ size_t min_len)
+{
+ struct iommu_user_data src_data;
+
+ if (WARN_ON(!src_array || index >= src_array->entry_num))
+ return -EINVAL;
+ if (!src_array->entry_num)
+ return -EINVAL;
+ src_data.uptr = src_array->uptr + src_array->entry_len * index;
+ src_data.len = src_array->entry_len;
+ src_data.type = src_array->type;
+
+ return __iommu_copy_struct_from_user(dst_data, &src_data, data_type,
+ data_len, min_len);
+}
+
+/**
+ * iommu_copy_struct_from_user_array - Copy iommu driver specific user space
+ * data from an iommu_user_data_array
+ * @kdst: Pointer to an iommu driver specific user data that is defined in
+ * include/uapi/linux/iommufd.h
+ * @user_array: Pointer to a struct iommu_user_data_array for a user space
+ * array
+ * @data_type: The data type of the @kdst. Must match with @user_array->type
+ * @index: Index to the location in the array to copy user data from
+ * @min_last: The last member of the data structure @kdst points in the
+ * initial version.
+ *
+ * Copy a single entry from a user array. Return 0 for success, otherwise
+ * -error.
+ */
+#define iommu_copy_struct_from_user_array(kdst, user_array, data_type, index, \
+ min_last) \
+ __iommu_copy_struct_from_user_array( \
+ kdst, user_array, data_type, index, sizeof(*(kdst)), \
+ offsetofend(typeof(*(kdst)), min_last))
+
+/**
+ * iommu_copy_struct_from_full_user_array - Copy iommu driver specific user
+ * space data from an iommu_user_data_array
+ * @kdst: Pointer to an iommu driver specific user data that is defined in
+ * include/uapi/linux/iommufd.h
+ * @kdst_entry_size: sizeof(*kdst)
+ * @user_array: Pointer to a struct iommu_user_data_array for a user space
+ * array
+ * @data_type: The data type of the @kdst. Must match with @user_array->type
+ *
+ * Copy the entire user array. kdst must have room for kdst_entry_size *
+ * user_array->entry_num bytes. Return 0 for success, otherwise -error.
+ */
+static inline int
+iommu_copy_struct_from_full_user_array(void *kdst, size_t kdst_entry_size,
+ struct iommu_user_data_array *user_array,
+ unsigned int data_type)
+{
+ unsigned int i;
+ int ret;
+
+ if (user_array->type != data_type)
+ return -EINVAL;
+ if (!user_array->entry_num)
+ return -EINVAL;
+ if (likely(user_array->entry_len == kdst_entry_size)) {
+ if (copy_from_user(kdst, user_array->uptr,
+ user_array->entry_num *
+ user_array->entry_len))
+ return -EFAULT;
+ }
+
+ /* Copy item by item */
+ for (i = 0; i != user_array->entry_num; i++) {
+ ret = copy_struct_from_user(
+ kdst + kdst_entry_size * i, kdst_entry_size,
+ user_array->uptr + user_array->entry_len * i,
+ user_array->entry_len);
+ if (ret)
+ return ret;
+ }
+ return 0;
+}
+
+/**
+ * __iommu_copy_struct_to_user - Report iommu driver specific user space data
+ * @dst_data: Pointer to a struct iommu_user_data for user space data location
+ * @src_data: Pointer to an iommu driver specific user data that is defined in
+ * include/uapi/linux/iommufd.h
+ * @data_type: The data type of the @src_data. Must match with @dst_data.type
+ * @data_len: Length of current user data structure, i.e. sizeof(struct _src)
+ * @min_len: Initial length of user data structure for backward compatibility.
+ * This should be offsetofend using the last member in the user data
+ * struct that was initially added to include/uapi/linux/iommufd.h
+ */
+static inline int
+__iommu_copy_struct_to_user(const struct iommu_user_data *dst_data,
+ void *src_data, unsigned int data_type,
+ size_t data_len, size_t min_len)
+{
+ if (WARN_ON(!dst_data || !src_data))
+ return -EINVAL;
+ if (dst_data->type != data_type)
+ return -EINVAL;
+ if (dst_data->len < min_len || data_len < dst_data->len)
+ return -EINVAL;
+ return copy_struct_to_user(dst_data->uptr, dst_data->len, src_data,
+ data_len, NULL);
+}
+
+/**
+ * iommu_copy_struct_to_user - Report iommu driver specific user space data
+ * @user_data: Pointer to a struct iommu_user_data for user space data location
+ * @ksrc: Pointer to an iommu driver specific user data that is defined in
+ * include/uapi/linux/iommufd.h
+ * @data_type: The data type of the @ksrc. Must match with @user_data->type
+ * @min_last: The last member of the data structure @ksrc points in the initial
+ * version.
+ * Return 0 for success, otherwise -error.
+ */
+#define iommu_copy_struct_to_user(user_data, ksrc, data_type, min_last) \
+ __iommu_copy_struct_to_user(user_data, ksrc, data_type, sizeof(*ksrc), \
+ offsetofend(typeof(*ksrc), min_last))
+
+/**
* struct iommu_ops - iommu ops and capabilities
* @capable: check capability
- * @domain_alloc: allocate iommu domain
- * @domain_free: free iommu domain
- * @attach_dev: attach device to an iommu domain
- * @detach_dev: detach device from an iommu domain
- * @map: map a physically contiguous memory region to an iommu domain
- * @unmap: unmap a physically contiguous memory region from an iommu domain
- * @map_sg: map a scatter-gather list of physically contiguous memory chunks
- * to an iommu domain
- * @iova_to_phys: translate iova to physical address
- * @add_device: add device to iommu grouping
- * @remove_device: remove device from iommu grouping
+ * @hw_info: report iommu hardware information. The data buffer returned by this
+ * op is allocated in the iommu driver and freed by the caller after
+ * use. @type can input a requested type and output a supported type.
+ * Driver should reject an unsupported data @type input
+ * @domain_alloc: Do not use in new drivers
+ * @domain_alloc_identity: allocate an IDENTITY domain. Drivers should prefer to
+ * use identity_domain instead. This should only be used
+ * if dynamic logic is necessary.
+ * @domain_alloc_paging_flags: Allocate an iommu domain corresponding to the
+ * input parameters as defined in
+ * include/uapi/linux/iommufd.h. The @user_data can be
+ * optionally provided, the new domain must support
+ * __IOMMU_DOMAIN_PAGING. Upon failure, ERR_PTR must be
+ * returned.
+ * @domain_alloc_paging: Allocate an iommu_domain that can be used for
+ * UNMANAGED, DMA, and DMA_FQ domain types. This is the
+ * same as invoking domain_alloc_paging_flags() with
+ * @flags=0, @user_data=NULL. A driver should implement
+ * only one of the two ops.
+ * @domain_alloc_sva: Allocate an iommu_domain for Shared Virtual Addressing.
+ * @domain_alloc_nested: Allocate an iommu_domain for nested translation.
+ * @probe_device: Add device to iommu driver handling
+ * @release_device: Remove device from iommu driver handling
+ * @probe_finalize: Do final setup work after the device is added to an IOMMU
+ * group and attached to the groups domain
* @device_group: find iommu group for a particular device
- * @domain_get_attr: Query domain attributes
- * @domain_set_attr: Change domain attributes
* @get_resv_regions: Request list of reserved regions for a device
- * @put_resv_regions: Free list of reserved regions for a device
- * @apply_resv_region: Temporary helper call-back for iova reserved ranges
- * @domain_window_enable: Configure and enable a particular window for a domain
- * @domain_window_disable: Disable a particular window for a domain
- * @domain_set_windows: Set the number of windows for a domain
- * @domain_get_windows: Return the number of windows for a domain
* @of_xlate: add OF master IDs to iommu grouping
- * @pgsize_bitmap: bitmap of all possible supported page sizes
+ * @is_attach_deferred: Check if domain attach should be deferred from iommu
+ * driver init to device driver init (default no)
+ * @page_response: handle page request response
+ * @def_domain_type: device default domain type, return value:
+ * - IOMMU_DOMAIN_IDENTITY: must use an identity domain
+ * - IOMMU_DOMAIN_DMA: must use a dma domain
+ * - 0: use the default setting
+ * @default_domain_ops: the default ops for domains
+ * @get_viommu_size: Get the size of a driver-level vIOMMU structure for a given
+ * @dev corresponding to @viommu_type. Driver should return 0
+ * if vIOMMU isn't supported accordingly. It is required for
+ * driver to use the VIOMMU_STRUCT_SIZE macro to sanitize the
+ * driver-level vIOMMU structure related to the core one
+ * @viommu_init: Init the driver-level struct of an iommufd_viommu on a physical
+ * IOMMU instance @viommu->iommu_dev, as the set of virtualization
+ * resources shared/passed to user space IOMMU instance. Associate
+ * it with a nesting @parent_domain. It is required for driver to
+ * set @viommu->ops pointing to its own viommu_ops
+ * @owner: Driver module providing these ops
+ * @identity_domain: An always available, always attachable identity
+ * translation.
+ * @blocked_domain: An always available, always attachable blocking
+ * translation.
+ * @default_domain: If not NULL this will always be set as the default domain.
+ * This should be an IDENTITY/BLOCKED/PLATFORM domain.
+ * Do not use in new drivers.
+ * @user_pasid_table: IOMMU driver supports user-managed PASID table. There is
+ * no user domain for each PASID and the I/O page faults are
+ * forwarded through the user domain attached to the device
+ * RID.
*/
struct iommu_ops {
- bool (*capable)(enum iommu_cap);
+ bool (*capable)(struct device *dev, enum iommu_cap);
+ void *(*hw_info)(struct device *dev, u32 *length,
+ enum iommu_hw_info_type *type);
/* Domain allocation and freeing by the iommu driver */
+#if IS_ENABLED(CONFIG_FSL_PAMU)
struct iommu_domain *(*domain_alloc)(unsigned iommu_domain_type);
- void (*domain_free)(struct iommu_domain *);
-
- int (*attach_dev)(struct iommu_domain *domain, struct device *dev);
- void (*detach_dev)(struct iommu_domain *domain, struct device *dev);
- int (*map)(struct iommu_domain *domain, unsigned long iova,
- phys_addr_t paddr, size_t size, int prot);
- size_t (*unmap)(struct iommu_domain *domain, unsigned long iova,
- size_t size);
- size_t (*map_sg)(struct iommu_domain *domain, unsigned long iova,
- struct scatterlist *sg, unsigned int nents, int prot);
- phys_addr_t (*iova_to_phys)(struct iommu_domain *domain, dma_addr_t iova);
- int (*add_device)(struct device *dev);
- void (*remove_device)(struct device *dev);
+#endif
+ struct iommu_domain *(*domain_alloc_identity)(struct device *dev);
+ struct iommu_domain *(*domain_alloc_paging_flags)(
+ struct device *dev, u32 flags,
+ const struct iommu_user_data *user_data);
+ struct iommu_domain *(*domain_alloc_paging)(struct device *dev);
+ struct iommu_domain *(*domain_alloc_sva)(struct device *dev,
+ struct mm_struct *mm);
+ struct iommu_domain *(*domain_alloc_nested)(
+ struct device *dev, struct iommu_domain *parent, u32 flags,
+ const struct iommu_user_data *user_data);
+
+ struct iommu_device *(*probe_device)(struct device *dev);
+ void (*release_device)(struct device *dev);
+ void (*probe_finalize)(struct device *dev);
struct iommu_group *(*device_group)(struct device *dev);
- int (*domain_get_attr)(struct iommu_domain *domain,
- enum iommu_attr attr, void *data);
- int (*domain_set_attr)(struct iommu_domain *domain,
- enum iommu_attr attr, void *data);
/* Request/Free a list of reserved regions for a device */
void (*get_resv_regions)(struct device *dev, struct list_head *list);
- void (*put_resv_regions)(struct device *dev, struct list_head *list);
- void (*apply_resv_region)(struct device *dev,
- struct iommu_domain *domain,
- struct iommu_resv_region *region);
-
- /* Window handling functions */
- int (*domain_window_enable)(struct iommu_domain *domain, u32 wnd_nr,
- phys_addr_t paddr, u64 size, int prot);
- void (*domain_window_disable)(struct iommu_domain *domain, u32 wnd_nr);
- /* Set the number of windows per domain */
- int (*domain_set_windows)(struct iommu_domain *domain, u32 w_count);
- /* Get the number of windows per domain */
- u32 (*domain_get_windows)(struct iommu_domain *domain);
-
- int (*of_xlate)(struct device *dev, struct of_phandle_args *args);
-
- unsigned long pgsize_bitmap;
+
+ int (*of_xlate)(struct device *dev, const struct of_phandle_args *args);
+ bool (*is_attach_deferred)(struct device *dev);
+
+ /* Per device IOMMU features */
+ void (*page_response)(struct device *dev, struct iopf_fault *evt,
+ struct iommu_page_response *msg);
+
+ int (*def_domain_type)(struct device *dev);
+
+ size_t (*get_viommu_size)(struct device *dev,
+ enum iommu_viommu_type viommu_type);
+ int (*viommu_init)(struct iommufd_viommu *viommu,
+ struct iommu_domain *parent_domain,
+ const struct iommu_user_data *user_data);
+
+ const struct iommu_domain_ops *default_domain_ops;
+ struct module *owner;
+ struct iommu_domain *identity_domain;
+ struct iommu_domain *blocked_domain;
+ struct iommu_domain *release_domain;
+ struct iommu_domain *default_domain;
+ u8 user_pasid_table:1;
+};
+
+/**
+ * struct iommu_domain_ops - domain specific operations
+ * @attach_dev: attach an iommu domain to a device
+ * Return:
+ * * 0 - success
+ * * EINVAL - can indicate that device and domain are incompatible due to
+ * some previous configuration of the domain, in which case the
+ * driver shouldn't log an error, since it is legitimate for a
+ * caller to test reuse of existing domains. Otherwise, it may
+ * still represent some other fundamental problem
+ * * ENOMEM - out of memory
+ * * ENOSPC - non-ENOMEM type of resource allocation failures
+ * * EBUSY - device is attached to a domain and cannot be changed
+ * * ENODEV - device specific errors, not able to be attached
+ * * <others> - treated as ENODEV by the caller. Use is discouraged
+ * @set_dev_pasid: set or replace an iommu domain to a pasid of device. The pasid of
+ * the device should be left in the old config in error case.
+ * @map_pages: map a physically contiguous set of pages of the same size to
+ * an iommu domain.
+ * @unmap_pages: unmap a number of pages of the same size from an iommu domain
+ * @flush_iotlb_all: Synchronously flush all hardware TLBs for this domain
+ * @iotlb_sync_map: Sync mappings created recently using @map to the hardware
+ * @iotlb_sync: Flush all queued ranges from the hardware TLBs and empty flush
+ * queue
+ * @cache_invalidate_user: Flush hardware cache for user space IO page table.
+ * The @domain must be IOMMU_DOMAIN_NESTED. The @array
+ * passes in the cache invalidation requests, in form
+ * of a driver data structure. The driver must update
+ * array->entry_num to report the number of handled
+ * invalidation requests. The driver data structure
+ * must be defined in include/uapi/linux/iommufd.h
+ * @iova_to_phys: translate iova to physical address
+ * @enforce_cache_coherency: Prevent any kind of DMA from bypassing IOMMU_CACHE,
+ * including no-snoop TLPs on PCIe or other platform
+ * specific mechanisms.
+ * @set_pgtable_quirks: Set io page table quirks (IO_PGTABLE_QUIRK_*)
+ * @free: Release the domain after use.
+ */
+struct iommu_domain_ops {
+ int (*attach_dev)(struct iommu_domain *domain, struct device *dev,
+ struct iommu_domain *old);
+ int (*set_dev_pasid)(struct iommu_domain *domain, struct device *dev,
+ ioasid_t pasid, struct iommu_domain *old);
+
+ int (*map_pages)(struct iommu_domain *domain, unsigned long iova,
+ phys_addr_t paddr, size_t pgsize, size_t pgcount,
+ int prot, gfp_t gfp, size_t *mapped);
+ size_t (*unmap_pages)(struct iommu_domain *domain, unsigned long iova,
+ size_t pgsize, size_t pgcount,
+ struct iommu_iotlb_gather *iotlb_gather);
+
+ void (*flush_iotlb_all)(struct iommu_domain *domain);
+ int (*iotlb_sync_map)(struct iommu_domain *domain, unsigned long iova,
+ size_t size);
+ void (*iotlb_sync)(struct iommu_domain *domain,
+ struct iommu_iotlb_gather *iotlb_gather);
+ int (*cache_invalidate_user)(struct iommu_domain *domain,
+ struct iommu_user_data_array *array);
+
+ phys_addr_t (*iova_to_phys)(struct iommu_domain *domain,
+ dma_addr_t iova);
+
+ bool (*enforce_cache_coherency)(struct iommu_domain *domain);
+ int (*set_pgtable_quirks)(struct iommu_domain *domain,
+ unsigned long quirks);
+
+ void (*free)(struct iommu_domain *domain);
};
/**
@@ -235,15 +787,77 @@ struct iommu_ops {
* @list: Used by the iommu-core to keep a list of registered iommus
* @ops: iommu-ops for talking to this iommu
* @dev: struct device for sysfs handling
+ * @singleton_group: Used internally for drivers that have only one group
+ * @max_pasids: number of supported PASIDs
+ * @ready: set once iommu_device_register() has completed successfully
*/
struct iommu_device {
struct list_head list;
const struct iommu_ops *ops;
struct fwnode_handle *fwnode;
struct device *dev;
+ struct iommu_group *singleton_group;
+ u32 max_pasids;
+ bool ready;
+};
+
+/**
+ * struct iommu_fault_param - per-device IOMMU fault data
+ * @lock: protect pending faults list
+ * @users: user counter to manage the lifetime of the data
+ * @rcu: rcu head for kfree_rcu()
+ * @dev: the device that owns this param
+ * @queue: IOPF queue
+ * @queue_list: index into queue->devices
+ * @partial: faults that are part of a Page Request Group for which the last
+ * request hasn't been submitted yet.
+ * @faults: holds the pending faults which need response
+ */
+struct iommu_fault_param {
+ struct mutex lock;
+ refcount_t users;
+ struct rcu_head rcu;
+
+ struct device *dev;
+ struct iopf_queue *queue;
+ struct list_head queue_list;
+
+ struct list_head partial;
+ struct list_head faults;
};
-int iommu_device_register(struct iommu_device *iommu);
+/**
+ * struct dev_iommu - Collection of per-device IOMMU data
+ *
+ * @fault_param: IOMMU detected device fault reporting data
+ * @fwspec: IOMMU fwspec data
+ * @iommu_dev: IOMMU device this device is linked to
+ * @priv: IOMMU Driver private data
+ * @max_pasids: number of PASIDs this device can consume
+ * @attach_deferred: the dma domain attachment is deferred
+ * @pci_32bit_workaround: Limit DMA allocations to 32-bit IOVAs
+ * @require_direct: device requires IOMMU_RESV_DIRECT regions
+ * @shadow_on_flush: IOTLB flushes are used to sync shadow tables
+ *
+ * TODO: migrate other per device data pointers under iommu_dev_data, e.g.
+ * struct iommu_group *iommu_group;
+ */
+struct dev_iommu {
+ struct mutex lock;
+ struct iommu_fault_param __rcu *fault_param;
+ struct iommu_fwspec *fwspec;
+ struct iommu_device *iommu_dev;
+ void *priv;
+ u32 max_pasids;
+ u32 attach_deferred:1;
+ u32 pci_32bit_workaround:1;
+ u32 require_direct:1;
+ u32 shadow_on_flush:1;
+};
+
+int iommu_device_register(struct iommu_device *iommu,
+ const struct iommu_ops *ops,
+ struct device *hwdev);
void iommu_device_unregister(struct iommu_device *iommu);
int iommu_device_sysfs_add(struct iommu_device *iommu,
struct device *parent,
@@ -252,59 +866,77 @@ int iommu_device_sysfs_add(struct iommu_device *iommu,
void iommu_device_sysfs_remove(struct iommu_device *iommu);
int iommu_device_link(struct iommu_device *iommu, struct device *link);
void iommu_device_unlink(struct iommu_device *iommu, struct device *link);
+int iommu_deferred_attach(struct device *dev, struct iommu_domain *domain);
-static inline void iommu_device_set_ops(struct iommu_device *iommu,
- const struct iommu_ops *ops)
+static inline struct iommu_device *dev_to_iommu_device(struct device *dev)
{
- iommu->ops = ops;
+ return (struct iommu_device *)dev_get_drvdata(dev);
}
-static inline void iommu_device_set_fwnode(struct iommu_device *iommu,
- struct fwnode_handle *fwnode)
+/**
+ * iommu_get_iommu_dev - Get iommu_device for a device
+ * @dev: an end-point device
+ *
+ * Note that this function must be called from the iommu_ops
+ * to retrieve the iommu_device for a device, which the core code
+ * guarentees it will not invoke the op without an attached iommu.
+ */
+static inline struct iommu_device *__iommu_get_iommu_dev(struct device *dev)
{
- iommu->fwnode = fwnode;
+ return dev->iommu->iommu_dev;
}
-static inline struct iommu_device *dev_to_iommu_device(struct device *dev)
+#define iommu_get_iommu_dev(dev, type, member) \
+ container_of(__iommu_get_iommu_dev(dev), type, member)
+
+static inline void iommu_iotlb_gather_init(struct iommu_iotlb_gather *gather)
{
- return (struct iommu_device *)dev_get_drvdata(dev);
+ *gather = (struct iommu_iotlb_gather) {
+ .start = ULONG_MAX,
+ .freelist = IOMMU_PAGES_LIST_INIT(gather->freelist),
+ };
}
-#define IOMMU_GROUP_NOTIFY_ADD_DEVICE 1 /* Device added */
-#define IOMMU_GROUP_NOTIFY_DEL_DEVICE 2 /* Pre Device removed */
-#define IOMMU_GROUP_NOTIFY_BIND_DRIVER 3 /* Pre Driver bind */
-#define IOMMU_GROUP_NOTIFY_BOUND_DRIVER 4 /* Post Driver bind */
-#define IOMMU_GROUP_NOTIFY_UNBIND_DRIVER 5 /* Pre Driver unbind */
-#define IOMMU_GROUP_NOTIFY_UNBOUND_DRIVER 6 /* Post Driver unbind */
-
-extern int bus_set_iommu(struct bus_type *bus, const struct iommu_ops *ops);
-extern bool iommu_present(struct bus_type *bus);
-extern bool iommu_capable(struct bus_type *bus, enum iommu_cap cap);
-extern struct iommu_domain *iommu_domain_alloc(struct bus_type *bus);
-extern struct iommu_group *iommu_group_get_by_id(int id);
+extern bool device_iommu_capable(struct device *dev, enum iommu_cap cap);
+extern bool iommu_group_has_isolated_msi(struct iommu_group *group);
+struct iommu_domain *iommu_paging_domain_alloc_flags(struct device *dev, unsigned int flags);
+static inline struct iommu_domain *iommu_paging_domain_alloc(struct device *dev)
+{
+ return iommu_paging_domain_alloc_flags(dev, 0);
+}
extern void iommu_domain_free(struct iommu_domain *domain);
extern int iommu_attach_device(struct iommu_domain *domain,
struct device *dev);
extern void iommu_detach_device(struct iommu_domain *domain,
struct device *dev);
extern struct iommu_domain *iommu_get_domain_for_dev(struct device *dev);
+extern struct iommu_domain *iommu_get_dma_domain(struct device *dev);
extern int iommu_map(struct iommu_domain *domain, unsigned long iova,
- phys_addr_t paddr, size_t size, int prot);
+ phys_addr_t paddr, size_t size, int prot, gfp_t gfp);
+int iommu_map_nosync(struct iommu_domain *domain, unsigned long iova,
+ phys_addr_t paddr, size_t size, int prot, gfp_t gfp);
+int iommu_sync_map(struct iommu_domain *domain, unsigned long iova,
+ size_t size);
extern size_t iommu_unmap(struct iommu_domain *domain, unsigned long iova,
- size_t size);
-extern size_t default_iommu_map_sg(struct iommu_domain *domain, unsigned long iova,
- struct scatterlist *sg,unsigned int nents,
- int prot);
+ size_t size);
+extern size_t iommu_unmap_fast(struct iommu_domain *domain,
+ unsigned long iova, size_t size,
+ struct iommu_iotlb_gather *iotlb_gather);
+extern ssize_t iommu_map_sg(struct iommu_domain *domain, unsigned long iova,
+ struct scatterlist *sg, unsigned int nents,
+ int prot, gfp_t gfp);
extern phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova);
extern void iommu_set_fault_handler(struct iommu_domain *domain,
iommu_fault_handler_t handler, void *token);
extern void iommu_get_resv_regions(struct device *dev, struct list_head *list);
extern void iommu_put_resv_regions(struct device *dev, struct list_head *list);
-extern int iommu_request_dm_for_dev(struct device *dev);
+extern void iommu_set_default_passthrough(bool cmd_line);
+extern void iommu_set_default_translated(bool cmd_line);
+extern bool iommu_default_passthrough(void);
extern struct iommu_resv_region *
iommu_alloc_resv_region(phys_addr_t start, size_t length, int prot,
- enum iommu_resv_type type);
+ enum iommu_resv_type type, gfp_t gfp);
extern int iommu_get_group_resv_regions(struct iommu_group *group,
struct list_head *head);
@@ -326,87 +958,260 @@ extern int iommu_group_for_each_dev(struct iommu_group *group, void *data,
extern struct iommu_group *iommu_group_get(struct device *dev);
extern struct iommu_group *iommu_group_ref_get(struct iommu_group *group);
extern void iommu_group_put(struct iommu_group *group);
-extern int iommu_group_register_notifier(struct iommu_group *group,
- struct notifier_block *nb);
-extern int iommu_group_unregister_notifier(struct iommu_group *group,
- struct notifier_block *nb);
+
extern int iommu_group_id(struct iommu_group *group);
-extern struct iommu_group *iommu_group_get_for_dev(struct device *dev);
extern struct iommu_domain *iommu_group_default_domain(struct iommu_group *);
-extern int iommu_domain_get_attr(struct iommu_domain *domain, enum iommu_attr,
- void *data);
-extern int iommu_domain_set_attr(struct iommu_domain *domain, enum iommu_attr,
- void *data);
+int iommu_set_pgtable_quirks(struct iommu_domain *domain,
+ unsigned long quirks);
-/* Window handling function prototypes */
-extern int iommu_domain_window_enable(struct iommu_domain *domain, u32 wnd_nr,
- phys_addr_t offset, u64 size,
- int prot);
-extern void iommu_domain_window_disable(struct iommu_domain *domain, u32 wnd_nr);
+void iommu_set_dma_strict(void);
extern int report_iommu_fault(struct iommu_domain *domain, struct device *dev,
unsigned long iova, int flags);
-static inline size_t iommu_map_sg(struct iommu_domain *domain,
- unsigned long iova, struct scatterlist *sg,
- unsigned int nents, int prot)
+static inline void iommu_flush_iotlb_all(struct iommu_domain *domain)
+{
+ if (domain->ops->flush_iotlb_all)
+ domain->ops->flush_iotlb_all(domain);
+}
+
+static inline void iommu_iotlb_sync(struct iommu_domain *domain,
+ struct iommu_iotlb_gather *iotlb_gather)
{
- return domain->ops->map_sg(domain, iova, sg, nents, prot);
+ if (domain->ops->iotlb_sync)
+ domain->ops->iotlb_sync(domain, iotlb_gather);
+
+ iommu_iotlb_gather_init(iotlb_gather);
+}
+
+/**
+ * iommu_iotlb_gather_is_disjoint - Checks whether a new range is disjoint
+ *
+ * @gather: TLB gather data
+ * @iova: start of page to invalidate
+ * @size: size of page to invalidate
+ *
+ * Helper for IOMMU drivers to check whether a new range and the gathered range
+ * are disjoint. For many IOMMUs, flushing the IOMMU in this case is better
+ * than merging the two, which might lead to unnecessary invalidations.
+ */
+static inline
+bool iommu_iotlb_gather_is_disjoint(struct iommu_iotlb_gather *gather,
+ unsigned long iova, size_t size)
+{
+ unsigned long start = iova, end = start + size - 1;
+
+ return gather->end != 0 &&
+ (end + 1 < gather->start || start > gather->end + 1);
+}
+
+
+/**
+ * iommu_iotlb_gather_add_range - Gather for address-based TLB invalidation
+ * @gather: TLB gather data
+ * @iova: start of page to invalidate
+ * @size: size of page to invalidate
+ *
+ * Helper for IOMMU drivers to build arbitrarily-sized invalidation commands
+ * where only the address range matters, and simply minimising intermediate
+ * syncs is preferred.
+ */
+static inline void iommu_iotlb_gather_add_range(struct iommu_iotlb_gather *gather,
+ unsigned long iova, size_t size)
+{
+ unsigned long end = iova + size - 1;
+
+ if (gather->start > iova)
+ gather->start = iova;
+ if (gather->end < end)
+ gather->end = end;
+}
+
+/**
+ * iommu_iotlb_gather_add_page - Gather for page-based TLB invalidation
+ * @domain: IOMMU domain to be invalidated
+ * @gather: TLB gather data
+ * @iova: start of page to invalidate
+ * @size: size of page to invalidate
+ *
+ * Helper for IOMMU drivers to build invalidation commands based on individual
+ * pages, or with page size/table level hints which cannot be gathered if they
+ * differ.
+ */
+static inline void iommu_iotlb_gather_add_page(struct iommu_domain *domain,
+ struct iommu_iotlb_gather *gather,
+ unsigned long iova, size_t size)
+{
+ /*
+ * If the new page is disjoint from the current range or is mapped at
+ * a different granularity, then sync the TLB so that the gather
+ * structure can be rewritten.
+ */
+ if ((gather->pgsize && gather->pgsize != size) ||
+ iommu_iotlb_gather_is_disjoint(gather, iova, size))
+ iommu_iotlb_sync(domain, gather);
+
+ gather->pgsize = size;
+ iommu_iotlb_gather_add_range(gather, iova, size);
+}
+
+static inline bool iommu_iotlb_gather_queued(struct iommu_iotlb_gather *gather)
+{
+ return gather && gather->queued;
+}
+
+static inline void iommu_dirty_bitmap_init(struct iommu_dirty_bitmap *dirty,
+ struct iova_bitmap *bitmap,
+ struct iommu_iotlb_gather *gather)
+{
+ if (gather)
+ iommu_iotlb_gather_init(gather);
+
+ dirty->bitmap = bitmap;
+ dirty->gather = gather;
+}
+
+static inline void iommu_dirty_bitmap_record(struct iommu_dirty_bitmap *dirty,
+ unsigned long iova,
+ unsigned long length)
+{
+ if (dirty->bitmap)
+ iova_bitmap_set(dirty->bitmap, iova, length);
+
+ if (dirty->gather)
+ iommu_iotlb_gather_add_range(dirty->gather, iova, length);
}
/* PCI device grouping function */
extern struct iommu_group *pci_device_group(struct device *dev);
/* Generic device grouping function */
extern struct iommu_group *generic_device_group(struct device *dev);
+/* FSL-MC device grouping function */
+struct iommu_group *fsl_mc_device_group(struct device *dev);
+extern struct iommu_group *generic_single_device_group(struct device *dev);
/**
* struct iommu_fwspec - per-device IOMMU instance data
- * @ops: ops for this device's IOMMU
* @iommu_fwnode: firmware handle for this device's IOMMU
- * @iommu_priv: IOMMU driver private data for this device
+ * @flags: IOMMU_FWSPEC_* flags
* @num_ids: number of associated device IDs
* @ids: IDs which this device may present to the IOMMU
+ *
+ * Note that the IDs (and any other information, really) stored in this structure should be
+ * considered private to the IOMMU device driver and are not to be used directly by IOMMU
+ * consumers.
*/
struct iommu_fwspec {
- const struct iommu_ops *ops;
struct fwnode_handle *iommu_fwnode;
- void *iommu_priv;
+ u32 flags;
unsigned int num_ids;
- u32 ids[1];
+ u32 ids[];
+};
+
+/* ATS is supported */
+#define IOMMU_FWSPEC_PCI_RC_ATS (1 << 0)
+/* CANWBS is supported */
+#define IOMMU_FWSPEC_PCI_RC_CANWBS (1 << 1)
+
+/*
+ * An iommu attach handle represents a relationship between an iommu domain
+ * and a PASID or RID of a device. It is allocated and managed by the component
+ * that manages the domain and is stored in the iommu group during the time the
+ * domain is attached.
+ */
+struct iommu_attach_handle {
+ struct iommu_domain *domain;
+};
+
+/**
+ * struct iommu_sva - handle to a device-mm bond
+ */
+struct iommu_sva {
+ struct iommu_attach_handle handle;
+ struct device *dev;
+ refcount_t users;
+};
+
+struct iommu_mm_data {
+ u32 pasid;
+ struct mm_struct *mm;
+ struct list_head sva_domains;
+ struct list_head mm_list_elm;
};
-int iommu_fwspec_init(struct device *dev, struct fwnode_handle *iommu_fwnode,
- const struct iommu_ops *ops);
-void iommu_fwspec_free(struct device *dev);
-int iommu_fwspec_add_ids(struct device *dev, u32 *ids, int num_ids);
-const struct iommu_ops *iommu_ops_from_fwnode(struct fwnode_handle *fwnode);
+int iommu_fwspec_init(struct device *dev, struct fwnode_handle *iommu_fwnode);
+int iommu_fwspec_add_ids(struct device *dev, const u32 *ids, int num_ids);
+
+static inline struct iommu_fwspec *dev_iommu_fwspec_get(struct device *dev)
+{
+ if (dev->iommu)
+ return dev->iommu->fwspec;
+ else
+ return NULL;
+}
+
+static inline void dev_iommu_fwspec_set(struct device *dev,
+ struct iommu_fwspec *fwspec)
+{
+ dev->iommu->fwspec = fwspec;
+}
+
+static inline void *dev_iommu_priv_get(struct device *dev)
+{
+ if (dev->iommu)
+ return dev->iommu->priv;
+ else
+ return NULL;
+}
+
+void dev_iommu_priv_set(struct device *dev, void *priv);
+
+extern struct mutex iommu_probe_device_lock;
+int iommu_probe_device(struct device *dev);
+int iommu_device_use_default_domain(struct device *dev);
+void iommu_device_unuse_default_domain(struct device *dev);
+
+int iommu_group_claim_dma_owner(struct iommu_group *group, void *owner);
+void iommu_group_release_dma_owner(struct iommu_group *group);
+bool iommu_group_dma_owner_claimed(struct iommu_group *group);
+
+int iommu_device_claim_dma_owner(struct device *dev, void *owner);
+void iommu_device_release_dma_owner(struct device *dev);
+
+int iommu_attach_device_pasid(struct iommu_domain *domain,
+ struct device *dev, ioasid_t pasid,
+ struct iommu_attach_handle *handle);
+void iommu_detach_device_pasid(struct iommu_domain *domain,
+ struct device *dev, ioasid_t pasid);
+ioasid_t iommu_alloc_global_pasid(struct device *dev);
+void iommu_free_global_pasid(ioasid_t pasid);
#else /* CONFIG_IOMMU_API */
struct iommu_ops {};
struct iommu_group {};
struct iommu_fwspec {};
struct iommu_device {};
+struct iommu_fault_param {};
+struct iommu_iotlb_gather {};
+struct iommu_dirty_bitmap {};
+struct iommu_dirty_ops {};
-static inline bool iommu_present(struct bus_type *bus)
+static inline bool device_iommu_capable(struct device *dev, enum iommu_cap cap)
{
return false;
}
-static inline bool iommu_capable(struct bus_type *bus, enum iommu_cap cap)
+static inline struct iommu_domain *iommu_paging_domain_alloc_flags(struct device *dev,
+ unsigned int flags)
{
- return false;
-}
-
-static inline struct iommu_domain *iommu_domain_alloc(struct bus_type *bus)
-{
- return NULL;
+ return ERR_PTR(-ENODEV);
}
-static inline struct iommu_group *iommu_group_get_by_id(int id)
+static inline struct iommu_domain *iommu_paging_domain_alloc(struct device *dev)
{
- return NULL;
+ return ERR_PTR(-ENODEV);
}
static inline void iommu_domain_free(struct iommu_domain *domain)
@@ -430,33 +1235,37 @@ static inline struct iommu_domain *iommu_get_domain_for_dev(struct device *dev)
}
static inline int iommu_map(struct iommu_domain *domain, unsigned long iova,
- phys_addr_t paddr, int gfp_order, int prot)
+ phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
{
return -ENODEV;
}
-static inline int iommu_unmap(struct iommu_domain *domain, unsigned long iova,
- int gfp_order)
+static inline size_t iommu_unmap(struct iommu_domain *domain,
+ unsigned long iova, size_t size)
{
- return -ENODEV;
+ return 0;
}
-static inline size_t iommu_map_sg(struct iommu_domain *domain,
- unsigned long iova, struct scatterlist *sg,
- unsigned int nents, int prot)
+static inline size_t iommu_unmap_fast(struct iommu_domain *domain,
+ unsigned long iova, int gfp_order,
+ struct iommu_iotlb_gather *iotlb_gather)
{
- return -ENODEV;
+ return 0;
}
-static inline int iommu_domain_window_enable(struct iommu_domain *domain,
- u32 wnd_nr, phys_addr_t paddr,
- u64 size, int prot)
+static inline ssize_t iommu_map_sg(struct iommu_domain *domain,
+ unsigned long iova, struct scatterlist *sg,
+ unsigned int nents, int prot, gfp_t gfp)
{
return -ENODEV;
}
-static inline void iommu_domain_window_disable(struct iommu_domain *domain,
- u32 wnd_nr)
+static inline void iommu_flush_iotlb_all(struct iommu_domain *domain)
+{
+}
+
+static inline void iommu_iotlb_sync(struct iommu_domain *domain,
+ struct iommu_iotlb_gather *iotlb_gather)
{
}
@@ -486,9 +1295,17 @@ static inline int iommu_get_group_resv_regions(struct iommu_group *group,
return -ENODEV;
}
-static inline int iommu_request_dm_for_dev(struct device *dev)
+static inline void iommu_set_default_passthrough(bool cmd_line)
+{
+}
+
+static inline void iommu_set_default_translated(bool cmd_line)
{
- return -ENODEV;
+}
+
+static inline bool iommu_default_passthrough(void)
+{
+ return true;
}
static inline int iommu_attach_group(struct iommu_domain *domain,
@@ -550,53 +1367,54 @@ static inline void iommu_group_put(struct iommu_group *group)
{
}
-static inline int iommu_group_register_notifier(struct iommu_group *group,
- struct notifier_block *nb)
+static inline int iommu_group_id(struct iommu_group *group)
{
return -ENODEV;
}
-static inline int iommu_group_unregister_notifier(struct iommu_group *group,
- struct notifier_block *nb)
+static inline int iommu_set_pgtable_quirks(struct iommu_domain *domain,
+ unsigned long quirks)
{
return 0;
}
-static inline int iommu_group_id(struct iommu_group *group)
+static inline int iommu_device_register(struct iommu_device *iommu,
+ const struct iommu_ops *ops,
+ struct device *hwdev)
{
return -ENODEV;
}
-static inline int iommu_domain_get_attr(struct iommu_domain *domain,
- enum iommu_attr attr, void *data)
+static inline struct iommu_device *dev_to_iommu_device(struct device *dev)
{
- return -EINVAL;
+ return NULL;
}
-static inline int iommu_domain_set_attr(struct iommu_domain *domain,
- enum iommu_attr attr, void *data)
+static inline void iommu_iotlb_gather_init(struct iommu_iotlb_gather *gather)
{
- return -EINVAL;
}
-static inline int iommu_device_register(struct iommu_device *iommu)
+static inline void iommu_iotlb_gather_add_page(struct iommu_domain *domain,
+ struct iommu_iotlb_gather *gather,
+ unsigned long iova, size_t size)
{
- return -ENODEV;
}
-static inline void iommu_device_set_ops(struct iommu_device *iommu,
- const struct iommu_ops *ops)
+static inline bool iommu_iotlb_gather_queued(struct iommu_iotlb_gather *gather)
{
+ return false;
}
-static inline void iommu_device_set_fwnode(struct iommu_device *iommu,
- struct fwnode_handle *fwnode)
+static inline void iommu_dirty_bitmap_init(struct iommu_dirty_bitmap *dirty,
+ struct iova_bitmap *bitmap,
+ struct iommu_iotlb_gather *gather)
{
}
-static inline struct iommu_device *dev_to_iommu_device(struct device *dev)
+static inline void iommu_dirty_bitmap_record(struct iommu_dirty_bitmap *dirty,
+ unsigned long iova,
+ unsigned long length)
{
- return NULL;
}
static inline void iommu_device_unregister(struct iommu_device *iommu)
@@ -625,28 +1443,264 @@ static inline void iommu_device_unlink(struct device *dev, struct device *link)
}
static inline int iommu_fwspec_init(struct device *dev,
- struct fwnode_handle *iommu_fwnode,
- const struct iommu_ops *ops)
+ struct fwnode_handle *iommu_fwnode)
{
return -ENODEV;
}
-static inline void iommu_fwspec_free(struct device *dev)
-{
-}
-
static inline int iommu_fwspec_add_ids(struct device *dev, u32 *ids,
int num_ids)
{
return -ENODEV;
}
-static inline
-const struct iommu_ops *iommu_ops_from_fwnode(struct fwnode_handle *fwnode)
+static inline struct iommu_fwspec *dev_iommu_fwspec_get(struct device *dev)
{
return NULL;
}
+static inline int iommu_device_use_default_domain(struct device *dev)
+{
+ return 0;
+}
+
+static inline void iommu_device_unuse_default_domain(struct device *dev)
+{
+}
+
+static inline int
+iommu_group_claim_dma_owner(struct iommu_group *group, void *owner)
+{
+ return -ENODEV;
+}
+
+static inline void iommu_group_release_dma_owner(struct iommu_group *group)
+{
+}
+
+static inline bool iommu_group_dma_owner_claimed(struct iommu_group *group)
+{
+ return false;
+}
+
+static inline void iommu_device_release_dma_owner(struct device *dev)
+{
+}
+
+static inline int iommu_device_claim_dma_owner(struct device *dev, void *owner)
+{
+ return -ENODEV;
+}
+
+static inline int iommu_attach_device_pasid(struct iommu_domain *domain,
+ struct device *dev, ioasid_t pasid,
+ struct iommu_attach_handle *handle)
+{
+ return -ENODEV;
+}
+
+static inline void iommu_detach_device_pasid(struct iommu_domain *domain,
+ struct device *dev, ioasid_t pasid)
+{
+}
+
+static inline ioasid_t iommu_alloc_global_pasid(struct device *dev)
+{
+ return IOMMU_PASID_INVALID;
+}
+
+static inline void iommu_free_global_pasid(ioasid_t pasid) {}
#endif /* CONFIG_IOMMU_API */
+#ifdef CONFIG_IRQ_MSI_IOMMU
+#ifdef CONFIG_IOMMU_API
+int iommu_dma_prepare_msi(struct msi_desc *desc, phys_addr_t msi_addr);
+#else
+static inline int iommu_dma_prepare_msi(struct msi_desc *desc,
+ phys_addr_t msi_addr)
+{
+ return 0;
+}
+#endif /* CONFIG_IOMMU_API */
+#endif /* CONFIG_IRQ_MSI_IOMMU */
+
+#if IS_ENABLED(CONFIG_LOCKDEP) && IS_ENABLED(CONFIG_IOMMU_API)
+void iommu_group_mutex_assert(struct device *dev);
+#else
+static inline void iommu_group_mutex_assert(struct device *dev)
+{
+}
+#endif
+
+/**
+ * iommu_map_sgtable - Map the given buffer to the IOMMU domain
+ * @domain: The IOMMU domain to perform the mapping
+ * @iova: The start address to map the buffer
+ * @sgt: The sg_table object describing the buffer
+ * @prot: IOMMU protection bits
+ *
+ * Creates a mapping at @iova for the buffer described by a scatterlist
+ * stored in the given sg_table object in the provided IOMMU domain.
+ */
+static inline ssize_t iommu_map_sgtable(struct iommu_domain *domain,
+ unsigned long iova, struct sg_table *sgt, int prot)
+{
+ return iommu_map_sg(domain, iova, sgt->sgl, sgt->orig_nents, prot,
+ GFP_KERNEL);
+}
+
+#ifdef CONFIG_IOMMU_DEBUGFS
+extern struct dentry *iommu_debugfs_dir;
+void iommu_debugfs_setup(void);
+#else
+static inline void iommu_debugfs_setup(void) {}
+#endif
+
+#ifdef CONFIG_IOMMU_DMA
+int iommu_get_msi_cookie(struct iommu_domain *domain, dma_addr_t base);
+#else /* CONFIG_IOMMU_DMA */
+static inline int iommu_get_msi_cookie(struct iommu_domain *domain, dma_addr_t base)
+{
+ return -ENODEV;
+}
+#endif /* CONFIG_IOMMU_DMA */
+
+/*
+ * Newer generations of Tegra SoCs require devices' stream IDs to be directly programmed into
+ * some registers. These are always paired with a Tegra SMMU or ARM SMMU, for which the contents
+ * of the struct iommu_fwspec are known. Use this helper to formalize access to these internals.
+ */
+#define TEGRA_STREAM_ID_BYPASS 0x7f
+
+static inline bool tegra_dev_iommu_get_stream_id(struct device *dev, u32 *stream_id)
+{
+#ifdef CONFIG_IOMMU_API
+ struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
+
+ if (fwspec && fwspec->num_ids == 1) {
+ *stream_id = fwspec->ids[0] & 0xffff;
+ return true;
+ }
+#endif
+
+ return false;
+}
+
+#ifdef CONFIG_IOMMU_MM_DATA
+static inline void mm_pasid_init(struct mm_struct *mm)
+{
+ /*
+ * During dup_mm(), a new mm will be memcpy'd from an old one and that makes
+ * the new mm and the old one point to a same iommu_mm instance. When either
+ * one of the two mms gets released, the iommu_mm instance is freed, leaving
+ * the other mm running into a use-after-free/double-free problem. To avoid
+ * the problem, zeroing the iommu_mm pointer of a new mm is needed here.
+ */
+ mm->iommu_mm = NULL;
+}
+
+static inline bool mm_valid_pasid(struct mm_struct *mm)
+{
+ return READ_ONCE(mm->iommu_mm);
+}
+
+static inline u32 mm_get_enqcmd_pasid(struct mm_struct *mm)
+{
+ struct iommu_mm_data *iommu_mm = READ_ONCE(mm->iommu_mm);
+
+ if (!iommu_mm)
+ return IOMMU_PASID_INVALID;
+ return iommu_mm->pasid;
+}
+
+void mm_pasid_drop(struct mm_struct *mm);
+struct iommu_sva *iommu_sva_bind_device(struct device *dev,
+ struct mm_struct *mm);
+void iommu_sva_unbind_device(struct iommu_sva *handle);
+u32 iommu_sva_get_pasid(struct iommu_sva *handle);
+void iommu_sva_invalidate_kva_range(unsigned long start, unsigned long end);
+#else
+static inline struct iommu_sva *
+iommu_sva_bind_device(struct device *dev, struct mm_struct *mm)
+{
+ return ERR_PTR(-ENODEV);
+}
+
+static inline void iommu_sva_unbind_device(struct iommu_sva *handle)
+{
+}
+
+static inline u32 iommu_sva_get_pasid(struct iommu_sva *handle)
+{
+ return IOMMU_PASID_INVALID;
+}
+static inline void mm_pasid_init(struct mm_struct *mm) {}
+static inline bool mm_valid_pasid(struct mm_struct *mm) { return false; }
+
+static inline u32 mm_get_enqcmd_pasid(struct mm_struct *mm)
+{
+ return IOMMU_PASID_INVALID;
+}
+
+static inline void mm_pasid_drop(struct mm_struct *mm) {}
+static inline void iommu_sva_invalidate_kva_range(unsigned long start, unsigned long end) {}
+#endif /* CONFIG_IOMMU_SVA */
+
+#ifdef CONFIG_IOMMU_IOPF
+int iopf_queue_add_device(struct iopf_queue *queue, struct device *dev);
+void iopf_queue_remove_device(struct iopf_queue *queue, struct device *dev);
+int iopf_queue_flush_dev(struct device *dev);
+struct iopf_queue *iopf_queue_alloc(const char *name);
+void iopf_queue_free(struct iopf_queue *queue);
+int iopf_queue_discard_partial(struct iopf_queue *queue);
+void iopf_free_group(struct iopf_group *group);
+int iommu_report_device_fault(struct device *dev, struct iopf_fault *evt);
+void iopf_group_response(struct iopf_group *group,
+ enum iommu_page_response_code status);
+#else
+static inline int
+iopf_queue_add_device(struct iopf_queue *queue, struct device *dev)
+{
+ return -ENODEV;
+}
+
+static inline void
+iopf_queue_remove_device(struct iopf_queue *queue, struct device *dev)
+{
+}
+
+static inline int iopf_queue_flush_dev(struct device *dev)
+{
+ return -ENODEV;
+}
+
+static inline struct iopf_queue *iopf_queue_alloc(const char *name)
+{
+ return NULL;
+}
+
+static inline void iopf_queue_free(struct iopf_queue *queue)
+{
+}
+
+static inline int iopf_queue_discard_partial(struct iopf_queue *queue)
+{
+ return -ENODEV;
+}
+
+static inline void iopf_free_group(struct iopf_group *group)
+{
+}
+
+static inline int
+iommu_report_device_fault(struct device *dev, struct iopf_fault *evt)
+{
+ return -ENODEV;
+}
+
+static inline void iopf_group_response(struct iopf_group *group,
+ enum iommu_page_response_code status)
+{
+}
+#endif /* CONFIG_IOMMU_IOPF */
#endif /* __LINUX_IOMMU_H */
diff --git a/include/linux/iommufd.h b/include/linux/iommufd.h
new file mode 100644
index 000000000000..6e7efe83bc5d
--- /dev/null
+++ b/include/linux/iommufd.h
@@ -0,0 +1,400 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2021 Intel Corporation
+ * Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES
+ */
+#ifndef __LINUX_IOMMUFD_H
+#define __LINUX_IOMMUFD_H
+
+#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/iommu.h>
+#include <linux/refcount.h>
+#include <linux/types.h>
+#include <linux/xarray.h>
+#include <uapi/linux/iommufd.h>
+
+struct device;
+struct file;
+struct iommu_group;
+struct iommu_user_data;
+struct iommu_user_data_array;
+struct iommufd_access;
+struct iommufd_ctx;
+struct iommufd_device;
+struct iommufd_viommu_ops;
+struct page;
+
+enum iommufd_object_type {
+ IOMMUFD_OBJ_NONE,
+ IOMMUFD_OBJ_ANY = IOMMUFD_OBJ_NONE,
+ IOMMUFD_OBJ_DEVICE,
+ IOMMUFD_OBJ_HWPT_PAGING,
+ IOMMUFD_OBJ_HWPT_NESTED,
+ IOMMUFD_OBJ_IOAS,
+ IOMMUFD_OBJ_ACCESS,
+ IOMMUFD_OBJ_FAULT,
+ IOMMUFD_OBJ_VIOMMU,
+ IOMMUFD_OBJ_VDEVICE,
+ IOMMUFD_OBJ_VEVENTQ,
+ IOMMUFD_OBJ_HW_QUEUE,
+#ifdef CONFIG_IOMMUFD_TEST
+ IOMMUFD_OBJ_SELFTEST,
+#endif
+ IOMMUFD_OBJ_MAX,
+};
+
+/* Base struct for all objects with a userspace ID handle. */
+struct iommufd_object {
+ /*
+ * Destroy will sleep and wait for wait_cnt to go to zero. This allows
+ * concurrent users of the ID to reliably avoid causing a spurious
+ * destroy failure. Incrementing this count should either be short
+ * lived or be revoked and blocked during pre_destroy().
+ */
+ refcount_t wait_cnt;
+ refcount_t users;
+ enum iommufd_object_type type;
+ unsigned int id;
+};
+
+struct iommufd_device *iommufd_device_bind(struct iommufd_ctx *ictx,
+ struct device *dev, u32 *id);
+void iommufd_device_unbind(struct iommufd_device *idev);
+
+int iommufd_device_attach(struct iommufd_device *idev, ioasid_t pasid,
+ u32 *pt_id);
+int iommufd_device_replace(struct iommufd_device *idev, ioasid_t pasid,
+ u32 *pt_id);
+void iommufd_device_detach(struct iommufd_device *idev, ioasid_t pasid);
+
+struct iommufd_ctx *iommufd_device_to_ictx(struct iommufd_device *idev);
+u32 iommufd_device_to_id(struct iommufd_device *idev);
+
+struct iommufd_access_ops {
+ u8 needs_pin_pages : 1;
+ void (*unmap)(void *data, unsigned long iova, unsigned long length);
+};
+
+enum {
+ IOMMUFD_ACCESS_RW_READ = 0,
+ IOMMUFD_ACCESS_RW_WRITE = 1 << 0,
+ /* Set if the caller is in a kthread then rw will use kthread_use_mm() */
+ IOMMUFD_ACCESS_RW_KTHREAD = 1 << 1,
+
+ /* Only for use by selftest */
+ __IOMMUFD_ACCESS_RW_SLOW_PATH = 1 << 2,
+};
+
+struct iommufd_access *
+iommufd_access_create(struct iommufd_ctx *ictx,
+ const struct iommufd_access_ops *ops, void *data, u32 *id);
+void iommufd_access_destroy(struct iommufd_access *access);
+int iommufd_access_attach(struct iommufd_access *access, u32 ioas_id);
+int iommufd_access_replace(struct iommufd_access *access, u32 ioas_id);
+void iommufd_access_detach(struct iommufd_access *access);
+
+void iommufd_ctx_get(struct iommufd_ctx *ictx);
+
+struct iommufd_viommu {
+ struct iommufd_object obj;
+ struct iommufd_ctx *ictx;
+ struct iommu_device *iommu_dev;
+ struct iommufd_hwpt_paging *hwpt;
+
+ const struct iommufd_viommu_ops *ops;
+
+ struct xarray vdevs;
+ struct list_head veventqs;
+ struct rw_semaphore veventqs_rwsem;
+
+ enum iommu_viommu_type type;
+};
+
+struct iommufd_vdevice {
+ struct iommufd_object obj;
+ struct iommufd_viommu *viommu;
+ struct iommufd_device *idev;
+
+ /*
+ * Virtual device ID per vIOMMU, e.g. vSID of ARM SMMUv3, vDeviceID of
+ * AMD IOMMU, and vRID of Intel VT-d
+ */
+ u64 virt_id;
+
+ /* Clean up all driver-specific parts of an iommufd_vdevice */
+ void (*destroy)(struct iommufd_vdevice *vdev);
+};
+
+struct iommufd_hw_queue {
+ struct iommufd_object obj;
+ struct iommufd_viommu *viommu;
+ struct iommufd_access *access;
+
+ u64 base_addr; /* in guest physical address space */
+ size_t length;
+
+ enum iommu_hw_queue_type type;
+
+ /* Clean up all driver-specific parts of an iommufd_hw_queue */
+ void (*destroy)(struct iommufd_hw_queue *hw_queue);
+};
+
+/**
+ * struct iommufd_viommu_ops - vIOMMU specific operations
+ * @destroy: Clean up all driver-specific parts of an iommufd_viommu. The memory
+ * of the vIOMMU will be free-ed by iommufd core after calling this op
+ * @alloc_domain_nested: Allocate a IOMMU_DOMAIN_NESTED on a vIOMMU that holds a
+ * nesting parent domain (IOMMU_DOMAIN_PAGING). @user_data
+ * must be defined in include/uapi/linux/iommufd.h.
+ * It must fully initialize the new iommu_domain before
+ * returning. Upon failure, ERR_PTR must be returned.
+ * @cache_invalidate: Flush hardware cache used by a vIOMMU. It can be used for
+ * any IOMMU hardware specific cache: TLB and device cache.
+ * The @array passes in the cache invalidation requests, in
+ * form of a driver data structure. A driver must update the
+ * array->entry_num to report the number of handled requests.
+ * The data structure of the array entry must be defined in
+ * include/uapi/linux/iommufd.h
+ * @vdevice_size: Size of the driver-defined vDEVICE structure per this vIOMMU
+ * @vdevice_init: Initialize the driver-level structure of a vDEVICE object, or
+ * related HW procedure. @vdev is already initialized by iommufd
+ * core: vdev->dev and vdev->viommu pointers; vdev->id carries a
+ * per-vIOMMU virtual ID (refer to struct iommu_vdevice_alloc in
+ * include/uapi/linux/iommufd.h)
+ * If driver has a deinit function to revert what vdevice_init op
+ * does, it should set it to the @vdev->destroy function pointer
+ * @get_hw_queue_size: Get the size of a driver-defined HW queue structure for a
+ * given @viommu corresponding to @queue_type. Driver should
+ * return 0 if HW queue aren't supported accordingly. It is
+ * required for driver to use the HW_QUEUE_STRUCT_SIZE macro
+ * to sanitize the driver-level HW queue structure related
+ * to the core one
+ * @hw_queue_init_phys: Initialize the driver-level structure of a HW queue that
+ * is initialized with its core-level structure that holds
+ * all the info about a guest queue memory.
+ * Driver providing this op indicates that HW accesses the
+ * guest queue memory via physical addresses.
+ * @index carries the logical HW QUEUE ID per vIOMMU in a
+ * guest VM, for a multi-queue model. @base_addr_pa carries
+ * the physical location of the guest queue
+ * If driver has a deinit function to revert what this op
+ * does, it should set it to the @hw_queue->destroy pointer
+ */
+struct iommufd_viommu_ops {
+ void (*destroy)(struct iommufd_viommu *viommu);
+ struct iommu_domain *(*alloc_domain_nested)(
+ struct iommufd_viommu *viommu, u32 flags,
+ const struct iommu_user_data *user_data);
+ int (*cache_invalidate)(struct iommufd_viommu *viommu,
+ struct iommu_user_data_array *array);
+ const size_t vdevice_size;
+ int (*vdevice_init)(struct iommufd_vdevice *vdev);
+ size_t (*get_hw_queue_size)(struct iommufd_viommu *viommu,
+ enum iommu_hw_queue_type queue_type);
+ /* AMD's HW will add hw_queue_init simply using @hw_queue->base_addr */
+ int (*hw_queue_init_phys)(struct iommufd_hw_queue *hw_queue, u32 index,
+ phys_addr_t base_addr_pa);
+};
+
+#if IS_ENABLED(CONFIG_IOMMUFD)
+struct iommufd_ctx *iommufd_ctx_from_file(struct file *file);
+struct iommufd_ctx *iommufd_ctx_from_fd(int fd);
+void iommufd_ctx_put(struct iommufd_ctx *ictx);
+bool iommufd_ctx_has_group(struct iommufd_ctx *ictx, struct iommu_group *group);
+
+int iommufd_access_pin_pages(struct iommufd_access *access, unsigned long iova,
+ unsigned long length, struct page **out_pages,
+ unsigned int flags);
+void iommufd_access_unpin_pages(struct iommufd_access *access,
+ unsigned long iova, unsigned long length);
+int iommufd_access_rw(struct iommufd_access *access, unsigned long iova,
+ void *data, size_t len, unsigned int flags);
+int iommufd_vfio_compat_ioas_get_id(struct iommufd_ctx *ictx, u32 *out_ioas_id);
+int iommufd_vfio_compat_ioas_create(struct iommufd_ctx *ictx);
+int iommufd_vfio_compat_set_no_iommu(struct iommufd_ctx *ictx);
+#else /* !CONFIG_IOMMUFD */
+static inline struct iommufd_ctx *iommufd_ctx_from_file(struct file *file)
+{
+ return ERR_PTR(-EOPNOTSUPP);
+}
+
+static inline void iommufd_ctx_put(struct iommufd_ctx *ictx)
+{
+}
+
+static inline int iommufd_access_pin_pages(struct iommufd_access *access,
+ unsigned long iova,
+ unsigned long length,
+ struct page **out_pages,
+ unsigned int flags)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline void iommufd_access_unpin_pages(struct iommufd_access *access,
+ unsigned long iova,
+ unsigned long length)
+{
+}
+
+static inline int iommufd_access_rw(struct iommufd_access *access,
+ unsigned long iova, void *data, size_t len,
+ unsigned int flags)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int iommufd_vfio_compat_ioas_create(struct iommufd_ctx *ictx)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int iommufd_vfio_compat_set_no_iommu(struct iommufd_ctx *ictx)
+{
+ return -EOPNOTSUPP;
+}
+#endif /* CONFIG_IOMMUFD */
+
+#if IS_ENABLED(CONFIG_IOMMUFD_DRIVER_CORE)
+int _iommufd_object_depend(struct iommufd_object *obj_dependent,
+ struct iommufd_object *obj_depended);
+void _iommufd_object_undepend(struct iommufd_object *obj_dependent,
+ struct iommufd_object *obj_depended);
+int _iommufd_alloc_mmap(struct iommufd_ctx *ictx, struct iommufd_object *owner,
+ phys_addr_t mmio_addr, size_t length,
+ unsigned long *offset);
+void _iommufd_destroy_mmap(struct iommufd_ctx *ictx,
+ struct iommufd_object *owner, unsigned long offset);
+struct device *iommufd_vdevice_to_device(struct iommufd_vdevice *vdev);
+struct device *iommufd_viommu_find_dev(struct iommufd_viommu *viommu,
+ unsigned long vdev_id);
+int iommufd_viommu_get_vdev_id(struct iommufd_viommu *viommu,
+ struct device *dev, unsigned long *vdev_id);
+int iommufd_viommu_report_event(struct iommufd_viommu *viommu,
+ enum iommu_veventq_type type, void *event_data,
+ size_t data_len);
+#else /* !CONFIG_IOMMUFD_DRIVER_CORE */
+static inline int _iommufd_object_depend(struct iommufd_object *obj_dependent,
+ struct iommufd_object *obj_depended)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline void
+_iommufd_object_undepend(struct iommufd_object *obj_dependent,
+ struct iommufd_object *obj_depended)
+{
+}
+
+static inline int _iommufd_alloc_mmap(struct iommufd_ctx *ictx,
+ struct iommufd_object *owner,
+ phys_addr_t mmio_addr, size_t length,
+ unsigned long *offset)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline void _iommufd_destroy_mmap(struct iommufd_ctx *ictx,
+ struct iommufd_object *owner,
+ unsigned long offset)
+{
+}
+
+static inline struct device *
+iommufd_vdevice_to_device(struct iommufd_vdevice *vdev)
+{
+ return NULL;
+}
+
+static inline struct device *
+iommufd_viommu_find_dev(struct iommufd_viommu *viommu, unsigned long vdev_id)
+{
+ return NULL;
+}
+
+static inline int iommufd_viommu_get_vdev_id(struct iommufd_viommu *viommu,
+ struct device *dev,
+ unsigned long *vdev_id)
+{
+ return -ENOENT;
+}
+
+static inline int iommufd_viommu_report_event(struct iommufd_viommu *viommu,
+ enum iommu_veventq_type type,
+ void *event_data, size_t data_len)
+{
+ return -EOPNOTSUPP;
+}
+#endif /* CONFIG_IOMMUFD_DRIVER_CORE */
+
+#define VIOMMU_STRUCT_SIZE(drv_struct, member) \
+ (sizeof(drv_struct) + \
+ BUILD_BUG_ON_ZERO(offsetof(drv_struct, member)) + \
+ BUILD_BUG_ON_ZERO(!__same_type(struct iommufd_viommu, \
+ ((drv_struct *)NULL)->member)))
+
+#define VDEVICE_STRUCT_SIZE(drv_struct, member) \
+ (sizeof(drv_struct) + \
+ BUILD_BUG_ON_ZERO(offsetof(drv_struct, member)) + \
+ BUILD_BUG_ON_ZERO(!__same_type(struct iommufd_vdevice, \
+ ((drv_struct *)NULL)->member)))
+
+#define HW_QUEUE_STRUCT_SIZE(drv_struct, member) \
+ (sizeof(drv_struct) + \
+ BUILD_BUG_ON_ZERO(offsetof(drv_struct, member)) + \
+ BUILD_BUG_ON_ZERO(!__same_type(struct iommufd_hw_queue, \
+ ((drv_struct *)NULL)->member)))
+
+/*
+ * Helpers for IOMMU driver to build/destroy a dependency between two sibling
+ * structures created by one of the allocators above
+ */
+#define iommufd_hw_queue_depend(dependent, depended, member) \
+ ({ \
+ int ret = -EINVAL; \
+ \
+ static_assert(__same_type(struct iommufd_hw_queue, \
+ dependent->member)); \
+ static_assert(__same_type(typeof(*dependent), *depended)); \
+ if (!WARN_ON_ONCE(dependent->member.viommu != \
+ depended->member.viommu)) \
+ ret = _iommufd_object_depend(&dependent->member.obj, \
+ &depended->member.obj); \
+ ret; \
+ })
+
+#define iommufd_hw_queue_undepend(dependent, depended, member) \
+ ({ \
+ static_assert(__same_type(struct iommufd_hw_queue, \
+ dependent->member)); \
+ static_assert(__same_type(typeof(*dependent), *depended)); \
+ WARN_ON_ONCE(dependent->member.viommu != \
+ depended->member.viommu); \
+ _iommufd_object_undepend(&dependent->member.obj, \
+ &depended->member.obj); \
+ })
+
+/*
+ * Helpers for IOMMU driver to alloc/destroy an mmapable area for a structure.
+ *
+ * To support an mmappable MMIO region, kernel driver must first register it to
+ * iommufd core to allocate an @offset, during a driver-structure initialization
+ * (e.g. viommu_init op). Then, it should report to user space this @offset and
+ * the @length of the MMIO region for mmap syscall.
+ */
+static inline int iommufd_viommu_alloc_mmap(struct iommufd_viommu *viommu,
+ phys_addr_t mmio_addr,
+ size_t length,
+ unsigned long *offset)
+{
+ return _iommufd_alloc_mmap(viommu->ictx, &viommu->obj, mmio_addr,
+ length, offset);
+}
+
+static inline void iommufd_viommu_destroy_mmap(struct iommufd_viommu *viommu,
+ unsigned long offset)
+{
+ _iommufd_destroy_mmap(viommu->ictx, &viommu->obj, offset);
+}
+#endif
diff --git a/include/linux/iopoll.h b/include/linux/iopoll.h
index d29e1e21bf3f..bdd2e0652bc3 100644
--- a/include/linux/iopoll.h
+++ b/include/linux/iopoll.h
@@ -1,15 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2012-2014 The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
*/
#ifndef _LINUX_IOPOLL_H
@@ -23,75 +14,203 @@
#include <linux/io.h>
/**
- * readx_poll_timeout - Periodically poll an address until a condition is met or a timeout occurs
- * @op: accessor function (takes @addr as its only argument)
- * @addr: Address to poll
- * @val: Variable to read the value into
- * @cond: Break condition (usually involving @val)
- * @sleep_us: Maximum time to sleep between reads in us (0
- * tight-loops). Should be less than ~20ms since usleep_range
- * is used (see Documentation/timers/timers-howto.txt).
- * @timeout_us: Timeout in us, 0 means never timeout
+ * poll_timeout_us - Periodically poll and perform an operation until
+ * a condition is met or a timeout occurs
*
- * Returns 0 on success and -ETIMEDOUT upon a timeout. In either
- * case, the last read value at @addr is stored in @val. Must not
- * be called from atomic context if sleep_us or timeout_us are used.
+ * @op: Operation
+ * @cond: Break condition
+ * @sleep_us: Maximum time to sleep between operations in us (0 tight-loops).
+ * Please read usleep_range() function description for details and
+ * limitations.
+ * @timeout_us: Timeout in us, 0 means never timeout
+ * @sleep_before_op: if it is true, sleep @sleep_us before operation.
*
* When available, you'll probably want to use one of the specialized
* macros defined below rather than this macro directly.
+ *
+ * Returns: 0 on success and -ETIMEDOUT upon a timeout. Must not
+ * be called from atomic context if sleep_us or timeout_us are used.
*/
-#define readx_poll_timeout(op, addr, val, cond, sleep_us, timeout_us) \
+#define poll_timeout_us(op, cond, sleep_us, timeout_us, sleep_before_op) \
({ \
- ktime_t timeout = ktime_add_us(ktime_get(), timeout_us); \
- might_sleep_if(sleep_us); \
+ u64 __timeout_us = (timeout_us); \
+ unsigned long __sleep_us = (sleep_us); \
+ ktime_t __timeout = ktime_add_us(ktime_get(), __timeout_us); \
+ int ___ret; \
+ might_sleep_if((__sleep_us) != 0); \
+ if ((sleep_before_op) && __sleep_us) \
+ usleep_range((__sleep_us >> 2) + 1, __sleep_us); \
for (;;) { \
- (val) = op(addr); \
- if (cond) \
+ bool __expired = __timeout_us && \
+ ktime_compare(ktime_get(), __timeout) > 0; \
+ /* guarantee 'op' and 'cond' are evaluated after timeout expired */ \
+ barrier(); \
+ op; \
+ if (cond) { \
+ ___ret = 0; \
break; \
- if (timeout_us && ktime_compare(ktime_get(), timeout) > 0) { \
- (val) = op(addr); \
+ } \
+ if (__expired) { \
+ ___ret = -ETIMEDOUT; \
break; \
} \
- if (sleep_us) \
- usleep_range((sleep_us >> 2) + 1, sleep_us); \
+ if (__sleep_us) \
+ usleep_range((__sleep_us >> 2) + 1, __sleep_us); \
+ cpu_relax(); \
} \
- (cond) ? 0 : -ETIMEDOUT; \
+ ___ret; \
})
/**
- * readx_poll_timeout_atomic - Periodically poll an address until a condition is met or a timeout occurs
- * @op: accessor function (takes @addr as its only argument)
- * @addr: Address to poll
- * @val: Variable to read the value into
- * @cond: Break condition (usually involving @val)
- * @delay_us: Time to udelay between reads in us (0 tight-loops). Should
- * be less than ~10us since udelay is used (see
- * Documentation/timers/timers-howto.txt).
+ * poll_timeout_us_atomic - Periodically poll and perform an operation until
+ * a condition is met or a timeout occurs
+ *
+ * @op: Operation
+ * @cond: Break condition
+ * @delay_us: Time to udelay between operations in us (0 tight-loops).
+ * Please read udelay() function description for details and
+ * limitations.
* @timeout_us: Timeout in us, 0 means never timeout
+ * @delay_before_op: if it is true, delay @delay_us before operation.
*
- * Returns 0 on success and -ETIMEDOUT upon a timeout. In either
- * case, the last read value at @addr is stored in @val.
+ * This macro does not rely on timekeeping. Hence it is safe to call even when
+ * timekeeping is suspended, at the expense of an underestimation of wall clock
+ * time, which is rather minimal with a non-zero delay_us.
*
* When available, you'll probably want to use one of the specialized
* macros defined below rather than this macro directly.
+ *
+ * Returns: 0 on success and -ETIMEDOUT upon a timeout.
*/
-#define readx_poll_timeout_atomic(op, addr, val, cond, delay_us, timeout_us) \
+#define poll_timeout_us_atomic(op, cond, delay_us, timeout_us, \
+ delay_before_op) \
({ \
- ktime_t timeout = ktime_add_us(ktime_get(), timeout_us); \
+ u64 __timeout_us = (timeout_us); \
+ s64 __left_ns = __timeout_us * NSEC_PER_USEC; \
+ unsigned long __delay_us = (delay_us); \
+ u64 __delay_ns = __delay_us * NSEC_PER_USEC; \
+ int ___ret; \
+ if ((delay_before_op) && __delay_us) { \
+ udelay(__delay_us); \
+ if (__timeout_us) \
+ __left_ns -= __delay_ns; \
+ } \
for (;;) { \
- (val) = op(addr); \
- if (cond) \
+ bool __expired = __timeout_us && __left_ns < 0; \
+ /* guarantee 'op' and 'cond' are evaluated after timeout expired */ \
+ barrier(); \
+ op; \
+ if (cond) { \
+ ___ret = 0; \
break; \
- if (timeout_us && ktime_compare(ktime_get(), timeout) > 0) { \
- (val) = op(addr); \
+ } \
+ if (__expired) { \
+ ___ret = -ETIMEDOUT; \
break; \
} \
- if (delay_us) \
- udelay(delay_us); \
+ if (__delay_us) { \
+ udelay(__delay_us); \
+ if (__timeout_us) \
+ __left_ns -= __delay_ns; \
+ } \
+ cpu_relax(); \
+ if (__timeout_us) \
+ __left_ns--; \
} \
- (cond) ? 0 : -ETIMEDOUT; \
+ ___ret; \
})
+/**
+ * read_poll_timeout - Periodically poll an address until a condition is
+ * met or a timeout occurs
+ * @op: accessor function (takes @args as its arguments)
+ * @val: Variable to read the value into
+ * @cond: Break condition (usually involving @val)
+ * @sleep_us: Maximum time to sleep between reads in us (0 tight-loops). Please
+ * read usleep_range() function description for details and
+ * limitations.
+ * @timeout_us: Timeout in us, 0 means never timeout
+ * @sleep_before_read: if it is true, sleep @sleep_us before read.
+ * @args: arguments for @op poll
+ *
+ * When available, you'll probably want to use one of the specialized
+ * macros defined below rather than this macro directly.
+ *
+ * Returns: 0 on success and -ETIMEDOUT upon a timeout. In either
+ * case, the last read value at @args is stored in @val. Must not
+ * be called from atomic context if sleep_us or timeout_us are used.
+ */
+#define read_poll_timeout(op, val, cond, sleep_us, timeout_us, \
+ sleep_before_read, args...) \
+ poll_timeout_us((val) = op(args), cond, sleep_us, timeout_us, sleep_before_read)
+
+/**
+ * read_poll_timeout_atomic - Periodically poll an address until a condition is
+ * met or a timeout occurs
+ * @op: accessor function (takes @args as its arguments)
+ * @val: Variable to read the value into
+ * @cond: Break condition (usually involving @val)
+ * @delay_us: Time to udelay between reads in us (0 tight-loops). Please
+ * read udelay() function description for details and
+ * limitations.
+ * @timeout_us: Timeout in us, 0 means never timeout
+ * @delay_before_read: if it is true, delay @delay_us before read.
+ * @args: arguments for @op poll
+ *
+ * This macro does not rely on timekeeping. Hence it is safe to call even when
+ * timekeeping is suspended, at the expense of an underestimation of wall clock
+ * time, which is rather minimal with a non-zero delay_us.
+ *
+ * When available, you'll probably want to use one of the specialized
+ * macros defined below rather than this macro directly.
+ *
+ * Returns: 0 on success and -ETIMEDOUT upon a timeout. In either
+ * case, the last read value at @args is stored in @val.
+ */
+#define read_poll_timeout_atomic(op, val, cond, sleep_us, timeout_us, \
+ sleep_before_read, args...) \
+ poll_timeout_us_atomic((val) = op(args), cond, sleep_us, timeout_us, sleep_before_read)
+
+/**
+ * readx_poll_timeout - Periodically poll an address until a condition is met or a timeout occurs
+ * @op: accessor function (takes @addr as its only argument)
+ * @addr: Address to poll
+ * @val: Variable to read the value into
+ * @cond: Break condition (usually involving @val)
+ * @sleep_us: Maximum time to sleep between reads in us (0 tight-loops). Please
+ * read usleep_range() function description for details and
+ * limitations.
+ * @timeout_us: Timeout in us, 0 means never timeout
+ *
+ * When available, you'll probably want to use one of the specialized
+ * macros defined below rather than this macro directly.
+ *
+ * Returns: 0 on success and -ETIMEDOUT upon a timeout. In either
+ * case, the last read value at @addr is stored in @val. Must not
+ * be called from atomic context if sleep_us or timeout_us are used.
+ */
+#define readx_poll_timeout(op, addr, val, cond, sleep_us, timeout_us) \
+ read_poll_timeout(op, val, cond, sleep_us, timeout_us, false, addr)
+
+/**
+ * readx_poll_timeout_atomic - Periodically poll an address until a condition is met or a timeout occurs
+ * @op: accessor function (takes @addr as its only argument)
+ * @addr: Address to poll
+ * @val: Variable to read the value into
+ * @cond: Break condition (usually involving @val)
+ * @delay_us: Time to udelay between reads in us (0 tight-loops). Please
+ * read udelay() function description for details and
+ * limitations.
+ * @timeout_us: Timeout in us, 0 means never timeout
+ *
+ * When available, you'll probably want to use one of the specialized
+ * macros defined below rather than this macro directly.
+ *
+ * Returns: 0 on success and -ETIMEDOUT upon a timeout. In either
+ * case, the last read value at @addr is stored in @val.
+ */
+#define readx_poll_timeout_atomic(op, addr, val, cond, delay_us, timeout_us) \
+ read_poll_timeout_atomic(op, val, cond, delay_us, timeout_us, false, addr)
#define readb_poll_timeout(addr, val, cond, delay_us, timeout_us) \
readx_poll_timeout(readb, addr, val, cond, delay_us, timeout_us)
diff --git a/include/linux/ioport.h b/include/linux/ioport.h
index 6230064d7f95..9afa30f9346f 100644
--- a/include/linux/ioport.h
+++ b/include/linux/ioport.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* ioport.h Definitions of routines for detecting, reserving and
* allocating system resources.
@@ -9,7 +10,9 @@
#define _LINUX_IOPORT_H
#ifndef __ASSEMBLY__
+#include <linux/bits.h>
#include <linux/compiler.h>
+#include <linux/minmax.h>
#include <linux/types.h>
/*
* Resources are tree-like, allowing
@@ -56,6 +59,10 @@ struct resource {
#define IORESOURCE_EXT_TYPE_BITS 0x01000000 /* Resource extended types */
#define IORESOURCE_SYSRAM 0x01000000 /* System RAM (modifier) */
+/* IORESOURCE_SYSRAM specific bits. */
+#define IORESOURCE_SYSRAM_DRIVER_MANAGED 0x02000000 /* Always detected via a driver. */
+#define IORESOURCE_SYSRAM_MERGEABLE 0x04000000 /* Resource can be merged. */
+
#define IORESOURCE_EXCLUSIVE 0x08000000 /* Userland may not map this resource */
#define IORESOURCE_DISABLED 0x10000000
@@ -72,7 +79,8 @@ struct resource {
#define IORESOURCE_IRQ_HIGHLEVEL (1<<2)
#define IORESOURCE_IRQ_LOWLEVEL (1<<3)
#define IORESOURCE_IRQ_SHAREABLE (1<<4)
-#define IORESOURCE_IRQ_OPTIONAL (1<<5)
+#define IORESOURCE_IRQ_OPTIONAL (1<<5)
+#define IORESOURCE_IRQ_WAKECAPABLE (1<<6)
/* PnP DMA specific bits (IORESOURCE_BITS) */
#define IORESOURCE_DMA_TYPE_MASK (3<<0)
@@ -101,6 +109,7 @@ struct resource {
#define IORESOURCE_MEM_32BIT (3<<3)
#define IORESOURCE_MEM_SHADOWABLE (1<<5) /* dup: IORESOURCE_SHADOWABLE */
#define IORESOURCE_MEM_EXPANSIONROM (1<<6)
+#define IORESOURCE_MEM_NONPOSTED (1<<7)
/* PnP I/O specific bits (IORESOURCE_BITS) */
#define IORESOURCE_IO_16BIT_ADDR (1<<0)
@@ -130,18 +139,35 @@ enum {
IORES_DESC_ACPI_NV_STORAGE = 3,
IORES_DESC_PERSISTENT_MEMORY = 4,
IORES_DESC_PERSISTENT_MEMORY_LEGACY = 5,
+ IORES_DESC_DEVICE_PRIVATE_MEMORY = 6,
+ IORES_DESC_RESERVED = 7,
+ IORES_DESC_SOFT_RESERVED = 8,
+ IORES_DESC_CXL = 9,
+};
+
+/*
+ * Flags controlling ioremap() behavior.
+ */
+enum {
+ IORES_MAP_SYSTEM_RAM = BIT(0),
+ IORES_MAP_ENCRYPTED = BIT(1),
};
/* helpers to define resources */
-#define DEFINE_RES_NAMED(_start, _size, _name, _flags) \
- { \
+#define DEFINE_RES_NAMED_DESC(_start, _size, _name, _flags, _desc) \
+(struct resource) { \
.start = (_start), \
.end = (_start) + (_size) - 1, \
.name = (_name), \
.flags = (_flags), \
- .desc = IORES_DESC_NONE, \
+ .desc = (_desc), \
}
+#define DEFINE_RES_NAMED(_start, _size, _name, _flags) \
+ DEFINE_RES_NAMED_DESC(_start, _size, _name, _flags, IORES_DESC_NONE)
+#define DEFINE_RES(_start, _size, _flags) \
+ DEFINE_RES_NAMED(_start, _size, NULL, _flags)
+
#define DEFINE_RES_IO_NAMED(_start, _size, _name) \
DEFINE_RES_NAMED((_start), (_size), (_name), IORESOURCE_IO)
#define DEFINE_RES_IO(_start, _size) \
@@ -152,6 +178,11 @@ enum {
#define DEFINE_RES_MEM(_start, _size) \
DEFINE_RES_MEM_NAMED((_start), (_size), NULL)
+#define DEFINE_RES_REG_NAMED(_start, _size, _name) \
+ DEFINE_RES_NAMED((_start), (_size), (_name), IORESOURCE_REG)
+#define DEFINE_RES_REG(_start, _size) \
+ DEFINE_RES_REG_NAMED((_start), (_size), NULL)
+
#define DEFINE_RES_IRQ_NAMED(_irq, _name) \
DEFINE_RES_NAMED((_irq), 1, (_name), IORESOURCE_IRQ)
#define DEFINE_RES_IRQ(_irq) \
@@ -162,6 +193,42 @@ enum {
#define DEFINE_RES_DMA(_dma) \
DEFINE_RES_DMA_NAMED((_dma), NULL)
+/**
+ * typedef resource_alignf - Resource alignment callback
+ * @data: Private data used by the callback
+ * @res: Resource candidate range (an empty resource space)
+ * @size: The minimum size of the empty space
+ * @align: Alignment from the constraints
+ *
+ * Callback allows calculating resource placement and alignment beyond min,
+ * max, and align fields in the struct resource_constraint.
+ *
+ * Return: Start address for the resource.
+ */
+typedef resource_size_t (*resource_alignf)(void *data,
+ const struct resource *res,
+ resource_size_t size,
+ resource_size_t align);
+
+/**
+ * struct resource_constraint - constraints to be met while searching empty
+ * resource space
+ * @min: The minimum address for the memory range
+ * @max: The maximum address for the memory range
+ * @align: Alignment for the start address of the empty space
+ * @alignf: Additional alignment constraints callback
+ * @alignf_data: Data provided for @alignf callback
+ *
+ * Contains the range and alignment constraints that have to be met during
+ * find_resource_space(). @alignf can be NULL indicating no alignment beyond
+ * @align is necessary.
+ */
+struct resource_constraint {
+ resource_size_t min, max, align;
+ resource_alignf alignf;
+ void *alignf_data;
+};
+
/* PC/ISA/whatever - the normal PC address spaces: IO and memory */
extern struct resource ioport_resource;
extern struct resource iomem_resource;
@@ -181,15 +248,44 @@ extern void arch_remove_reservations(struct resource *avail);
extern int allocate_resource(struct resource *root, struct resource *new,
resource_size_t size, resource_size_t min,
resource_size_t max, resource_size_t align,
- resource_size_t (*alignf)(void *,
- const struct resource *,
- resource_size_t,
- resource_size_t),
+ resource_alignf alignf,
void *alignf_data);
struct resource *lookup_resource(struct resource *root, resource_size_t start);
int adjust_resource(struct resource *res, resource_size_t start,
resource_size_t size);
resource_size_t resource_alignment(struct resource *res);
+
+/**
+ * resource_set_size - Calculate resource end address from size and start
+ * @res: Resource descriptor
+ * @size: Size of the resource
+ *
+ * Calculate the end address for @res based on @size.
+ *
+ * Note: The start address of @res must be set when calling this function.
+ * Prefer resource_set_range() if setting both the start address and @size.
+ */
+static inline void resource_set_size(struct resource *res, resource_size_t size)
+{
+ res->end = res->start + size - 1;
+}
+
+/**
+ * resource_set_range - Set resource start and end addresses
+ * @res: Resource descriptor
+ * @start: Start address for the resource
+ * @size: Size of the resource
+ *
+ * Set @res start address and calculate the end address based on @size.
+ */
+static inline void resource_set_range(struct resource *res,
+ resource_size_t start,
+ resource_size_t size)
+{
+ res->start = start;
+ resource_set_size(res, size);
+}
+
static inline resource_size_t resource_size(const struct resource *res)
{
return res->end - res->start + 1;
@@ -203,7 +299,7 @@ static inline unsigned long resource_ext_type(const struct resource *res)
return res->flags & IORESOURCE_EXT_TYPE_BITS;
}
/* True iff r1 completely contains r2 */
-static inline bool resource_contains(struct resource *r1, struct resource *r2)
+static inline bool resource_contains(const struct resource *r1, const struct resource *r2)
{
if (resource_type(r1) != resource_type(r2))
return false;
@@ -212,12 +308,51 @@ static inline bool resource_contains(struct resource *r1, struct resource *r2)
return r1->start <= r2->start && r1->end >= r2->end;
}
+/* True if any part of r1 overlaps r2 */
+static inline bool resource_overlaps(const struct resource *r1, const struct resource *r2)
+{
+ return r1->start <= r2->end && r1->end >= r2->start;
+}
+
+static inline bool resource_intersection(const struct resource *r1, const struct resource *r2,
+ struct resource *r)
+{
+ if (!resource_overlaps(r1, r2))
+ return false;
+ r->start = max(r1->start, r2->start);
+ r->end = min(r1->end, r2->end);
+ return true;
+}
+
+static inline bool resource_union(const struct resource *r1, const struct resource *r2,
+ struct resource *r)
+{
+ if (!resource_overlaps(r1, r2))
+ return false;
+ r->start = min(r1->start, r2->start);
+ r->end = max(r1->end, r2->end);
+ return true;
+}
+
+/*
+ * Check if this resource is added to a resource tree or detached. Caller is
+ * responsible for not racing assignment.
+ */
+static inline bool resource_assigned(struct resource *res)
+{
+ return res->parent;
+}
+
+int find_resource_space(struct resource *root, struct resource *new,
+ resource_size_t size, struct resource_constraint *constraint);
/* Convenience shorthand with allocation */
#define request_region(start,n,name) __request_region(&ioport_resource, (start), (n), (name), 0)
#define request_muxed_region(start,n,name) __request_region(&ioport_resource, (start), (n), (name), IORESOURCE_MUXED)
#define __request_mem_region(start,n,name, excl) __request_region(&iomem_resource, (start), (n), (name), excl)
#define request_mem_region(start,n,name) __request_region(&iomem_resource, (start), (n), (name), 0)
+#define request_mem_region_muxed(start, n, name) \
+ __request_region(&iomem_resource, (start), (n), (name), IORESOURCE_MUXED)
#define request_mem_region_exclusive(start,n,name) \
__request_region(&iomem_resource, (start), (n), (name), IORESOURCE_EXCLUSIVE)
#define rename_region(region, newname) do { (region)->name = (newname); } while (0)
@@ -234,8 +369,10 @@ extern struct resource * __request_region(struct resource *,
extern void __release_region(struct resource *, resource_size_t,
resource_size_t);
#ifdef CONFIG_MEMORY_HOTREMOVE
-extern int release_mem_region_adjustable(struct resource *, resource_size_t,
- resource_size_t);
+extern void release_mem_region_adjustable(resource_size_t, resource_size_t);
+#endif
+#ifdef CONFIG_MEMORY_HOTPLUG
+extern void merge_system_ram_resource(struct resource *res);
#endif
/* Wrappers for managed devices */
@@ -262,24 +399,41 @@ extern struct resource * __devm_request_region(struct device *dev,
extern void __devm_release_region(struct device *dev, struct resource *parent,
resource_size_t start, resource_size_t n);
extern int iomem_map_sanity_check(resource_size_t addr, unsigned long size);
-extern int iomem_is_exclusive(u64 addr);
+extern bool iomem_is_exclusive(u64 addr);
+extern bool resource_is_exclusive(struct resource *resource, u64 addr,
+ resource_size_t size);
extern int
walk_system_ram_range(unsigned long start_pfn, unsigned long nr_pages,
void *arg, int (*func)(unsigned long, unsigned long, void *));
extern int
+walk_mem_res(u64 start, u64 end, void *arg,
+ int (*func)(struct resource *, void *));
+extern int
walk_system_ram_res(u64 start, u64 end, void *arg,
- int (*func)(u64, u64, void *));
+ int (*func)(struct resource *, void *));
+extern int
+walk_system_ram_res_rev(u64 start, u64 end, void *arg,
+ int (*func)(struct resource *, void *));
extern int
walk_iomem_res_desc(unsigned long desc, unsigned long flags, u64 start, u64 end,
- void *arg, int (*func)(u64, u64, void *));
+ void *arg, int (*func)(struct resource *, void *));
-/* True if any part of r1 overlaps r2 */
-static inline bool resource_overlaps(struct resource *r1, struct resource *r2)
+struct resource *devm_request_free_mem_region(struct device *dev,
+ struct resource *base, unsigned long size);
+struct resource *request_free_mem_region(struct resource *base,
+ unsigned long size, const char *name);
+struct resource *alloc_free_mem_region(struct resource *base,
+ unsigned long size, unsigned long align, const char *name);
+
+static inline void irqresource_disabled(struct resource *res, u32 irq)
{
- return (r1->start <= r2->end && r1->end >= r2->start);
+ res->start = irq;
+ res->end = irq;
+ res->flags |= IORESOURCE_IRQ | IORESOURCE_DISABLED | IORESOURCE_UNSET;
}
+extern struct address_space *iomem_get_mapping(void);
#endif /* __ASSEMBLY__ */
#endif /* _LINUX_IOPORT_H */
diff --git a/include/linux/ioprio.h b/include/linux/ioprio.h
index 8c1239020d79..5210e8371238 100644
--- a/include/linux/ioprio.h
+++ b/include/linux/ioprio.h
@@ -1,49 +1,27 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef IOPRIO_H
#define IOPRIO_H
#include <linux/sched.h>
+#include <linux/sched/rt.h>
#include <linux/iocontext.h>
-/*
- * Gives us 8 prio classes with 13-bits of data for each class
- */
-#define IOPRIO_CLASS_SHIFT (13)
-#define IOPRIO_PRIO_MASK ((1UL << IOPRIO_CLASS_SHIFT) - 1)
-
-#define IOPRIO_PRIO_CLASS(mask) ((mask) >> IOPRIO_CLASS_SHIFT)
-#define IOPRIO_PRIO_DATA(mask) ((mask) & IOPRIO_PRIO_MASK)
-#define IOPRIO_PRIO_VALUE(class, data) (((class) << IOPRIO_CLASS_SHIFT) | data)
-
-#define ioprio_valid(mask) (IOPRIO_PRIO_CLASS((mask)) != IOPRIO_CLASS_NONE)
+#include <uapi/linux/ioprio.h>
/*
- * These are the io priority groups as implemented by CFQ. RT is the realtime
- * class, it always gets premium service. BE is the best-effort scheduling
- * class, the default for any process. IDLE is the idle scheduling class, it
- * is only served when no one else is using the disk.
+ * Default IO priority.
*/
-enum {
- IOPRIO_CLASS_NONE,
- IOPRIO_CLASS_RT,
- IOPRIO_CLASS_BE,
- IOPRIO_CLASS_IDLE,
-};
+#define IOPRIO_DEFAULT IOPRIO_PRIO_VALUE(IOPRIO_CLASS_NONE, 0)
/*
- * 8 best effort priority levels are supported
+ * Check that a priority value has a valid class.
*/
-#define IOPRIO_BE_NR (8)
-
-enum {
- IOPRIO_WHO_PROCESS = 1,
- IOPRIO_WHO_PGRP,
- IOPRIO_WHO_USER,
-};
+static inline bool ioprio_valid(unsigned short ioprio)
+{
+ unsigned short class = IOPRIO_PRIO_CLASS(ioprio);
-/*
- * Fallback BE priority
- */
-#define IOPRIO_NORM (4)
+ return class > IOPRIO_CLASS_NONE && class <= IOPRIO_CLASS_IDLE;
+}
/*
* if process has set io priority explicitly, use that. if not, convert
@@ -62,17 +40,59 @@ static inline int task_nice_ioclass(struct task_struct *task)
{
if (task->policy == SCHED_IDLE)
return IOPRIO_CLASS_IDLE;
- else if (task->policy == SCHED_FIFO || task->policy == SCHED_RR)
+ else if (rt_or_dl_task_policy(task))
return IOPRIO_CLASS_RT;
else
return IOPRIO_CLASS_BE;
}
+#ifdef CONFIG_BLOCK
/*
- * For inheritance, return the highest of the two given priorities
+ * If the task has set an I/O priority, use that. Otherwise, return
+ * the default I/O priority.
+ *
+ * Expected to be called for current task or with task_lock() held to keep
+ * io_context stable.
*/
-extern int ioprio_best(unsigned short aprio, unsigned short bprio);
+static inline int __get_task_ioprio(struct task_struct *p)
+{
+ struct io_context *ioc = p->io_context;
+ int prio;
+
+ if (!ioc)
+ return IOPRIO_PRIO_VALUE(task_nice_ioclass(p),
+ task_nice_ioprio(p));
+
+ if (p != current)
+ lockdep_assert_held(&p->alloc_lock);
+
+ prio = ioc->ioprio;
+ if (IOPRIO_PRIO_CLASS(prio) == IOPRIO_CLASS_NONE)
+ prio = IOPRIO_PRIO_VALUE(task_nice_ioclass(p),
+ task_nice_ioprio(p));
+ return prio;
+}
+#else
+static inline int __get_task_ioprio(struct task_struct *p)
+{
+ return IOPRIO_DEFAULT;
+}
+#endif /* CONFIG_BLOCK */
+
+static inline int get_current_ioprio(void)
+{
+ return __get_task_ioprio(current);
+}
extern int set_task_ioprio(struct task_struct *task, int ioprio);
+#ifdef CONFIG_BLOCK
+extern int ioprio_check_cap(int ioprio);
+#else
+static inline int ioprio_check_cap(int ioprio)
+{
+ return -ENOTBLK;
+}
+#endif /* CONFIG_BLOCK */
+
#endif
diff --git a/include/linux/ioremap.h b/include/linux/ioremap.h
new file mode 100644
index 000000000000..2bd1661fe9ad
--- /dev/null
+++ b/include/linux/ioremap.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_IOREMAP_H
+#define _LINUX_IOREMAP_H
+
+#include <linux/kasan.h>
+#include <asm/pgtable.h>
+#include <asm/vmalloc.h>
+
+#if defined(CONFIG_HAS_IOMEM) || defined(CONFIG_GENERIC_IOREMAP)
+/*
+ * Ioremap often, but not always uses the generic vmalloc area. E.g on
+ * Power ARCH, it could have different ioremap space.
+ */
+#ifndef IOREMAP_START
+#define IOREMAP_START VMALLOC_START
+#define IOREMAP_END VMALLOC_END
+#endif
+static inline bool is_ioremap_addr(const void *x)
+{
+ unsigned long addr = (unsigned long)kasan_reset_tag(x);
+
+ return addr >= IOREMAP_START && addr < IOREMAP_END;
+}
+#else
+static inline bool is_ioremap_addr(const void *x)
+{
+ return false;
+}
+#endif
+
+#endif /* _LINUX_IOREMAP_H */
diff --git a/include/linux/iosys-map.h b/include/linux/iosys-map.h
new file mode 100644
index 000000000000..3e85afe794c0
--- /dev/null
+++ b/include/linux/iosys-map.h
@@ -0,0 +1,511 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Pointer abstraction for IO/system memory
+ */
+
+#ifndef __IOSYS_MAP_H__
+#define __IOSYS_MAP_H__
+
+#include <linux/compiler_types.h>
+#include <linux/io.h>
+#include <linux/string.h>
+
+/**
+ * DOC: overview
+ *
+ * When accessing a memory region, depending on its location, users may have to
+ * access it with I/O operations or memory load/store operations. For example,
+ * copying to system memory could be done with memcpy(), copying to I/O memory
+ * would be done with memcpy_toio().
+ *
+ * .. code-block:: c
+ *
+ * void *vaddr = ...; // pointer to system memory
+ * memcpy(vaddr, src, len);
+ *
+ * void *vaddr_iomem = ...; // pointer to I/O memory
+ * memcpy_toio(vaddr_iomem, src, len);
+ *
+ * The user of such pointer may not have information about the mapping of that
+ * region or may want to have a single code path to handle operations on that
+ * buffer, regardless if it's located in system or IO memory. The type
+ * :c:type:`struct iosys_map <iosys_map>` and its helpers abstract that so the
+ * buffer can be passed around to other drivers or have separate duties inside
+ * the same driver for allocation, read and write operations.
+ *
+ * Open-coding access to :c:type:`struct iosys_map <iosys_map>` is considered
+ * bad style. Rather than accessing its fields directly, use one of the provided
+ * helper functions, or implement your own. For example, instances of
+ * :c:type:`struct iosys_map <iosys_map>` can be initialized statically with
+ * IOSYS_MAP_INIT_VADDR(), or at runtime with iosys_map_set_vaddr(). These
+ * helpers will set an address in system memory.
+ *
+ * .. code-block:: c
+ *
+ * struct iosys_map map = IOSYS_MAP_INIT_VADDR(0xdeadbeaf);
+ *
+ * iosys_map_set_vaddr(&map, 0xdeadbeaf);
+ *
+ * To set an address in I/O memory, use IOSYS_MAP_INIT_VADDR_IOMEM() or
+ * iosys_map_set_vaddr_iomem().
+ *
+ * .. code-block:: c
+ *
+ * struct iosys_map map = IOSYS_MAP_INIT_VADDR_IOMEM(0xdeadbeaf);
+ *
+ * iosys_map_set_vaddr_iomem(&map, 0xdeadbeaf);
+ *
+ * Instances of struct iosys_map do not have to be cleaned up, but
+ * can be cleared to NULL with iosys_map_clear(). Cleared mappings
+ * always refer to system memory.
+ *
+ * .. code-block:: c
+ *
+ * iosys_map_clear(&map);
+ *
+ * Test if a mapping is valid with either iosys_map_is_set() or
+ * iosys_map_is_null().
+ *
+ * .. code-block:: c
+ *
+ * if (iosys_map_is_set(&map) != iosys_map_is_null(&map))
+ * // always true
+ *
+ * Instances of :c:type:`struct iosys_map <iosys_map>` can be compared for
+ * equality with iosys_map_is_equal(). Mappings that point to different memory
+ * spaces, system or I/O, are never equal. That's even true if both spaces are
+ * located in the same address space, both mappings contain the same address
+ * value, or both mappings refer to NULL.
+ *
+ * .. code-block:: c
+ *
+ * struct iosys_map sys_map; // refers to system memory
+ * struct iosys_map io_map; // refers to I/O memory
+ *
+ * if (iosys_map_is_equal(&sys_map, &io_map))
+ * // always false
+ *
+ * A set up instance of struct iosys_map can be used to access or manipulate the
+ * buffer memory. Depending on the location of the memory, the provided helpers
+ * will pick the correct operations. Data can be copied into the memory with
+ * iosys_map_memcpy_to(). The address can be manipulated with iosys_map_incr().
+ *
+ * .. code-block:: c
+ *
+ * const void *src = ...; // source buffer
+ * size_t len = ...; // length of src
+ *
+ * iosys_map_memcpy_to(&map, src, len);
+ * iosys_map_incr(&map, len); // go to first byte after the memcpy
+ */
+
+/**
+ * struct iosys_map - Pointer to IO/system memory
+ * @vaddr_iomem: The buffer's address if in I/O memory
+ * @vaddr: The buffer's address if in system memory
+ * @is_iomem: True if the buffer is located in I/O memory, or false
+ * otherwise.
+ */
+struct iosys_map {
+ union {
+ void __iomem *vaddr_iomem;
+ void *vaddr;
+ };
+ bool is_iomem;
+};
+
+/**
+ * IOSYS_MAP_INIT_VADDR - Initializes struct iosys_map to an address in system memory
+ * @vaddr_: A system-memory address
+ */
+#define IOSYS_MAP_INIT_VADDR(vaddr_) \
+ { \
+ .vaddr = (vaddr_), \
+ .is_iomem = false, \
+ }
+
+/**
+ * IOSYS_MAP_INIT_VADDR_IOMEM - Initializes struct iosys_map to an address in I/O memory
+ * @vaddr_iomem_: An I/O-memory address
+ */
+#define IOSYS_MAP_INIT_VADDR_IOMEM(vaddr_iomem_) \
+ { \
+ .vaddr_iomem = (vaddr_iomem_), \
+ .is_iomem = true, \
+ }
+
+/**
+ * IOSYS_MAP_INIT_OFFSET - Initializes struct iosys_map from another iosys_map
+ * @map_: The dma-buf mapping structure to copy from
+ * @offset_: Offset to add to the other mapping
+ *
+ * Initializes a new iosys_map struct based on another passed as argument. It
+ * does a shallow copy of the struct so it's possible to update the back storage
+ * without changing where the original map points to. It is the equivalent of
+ * doing:
+ *
+ * .. code-block:: c
+ *
+ * iosys_map map = other_map;
+ * iosys_map_incr(&map, &offset);
+ *
+ * Example usage:
+ *
+ * .. code-block:: c
+ *
+ * void foo(struct device *dev, struct iosys_map *base_map)
+ * {
+ * ...
+ * struct iosys_map map = IOSYS_MAP_INIT_OFFSET(base_map, FIELD_OFFSET);
+ * ...
+ * }
+ *
+ * The advantage of using the initializer over just increasing the offset with
+ * iosys_map_incr() like above is that the new map will always point to the
+ * right place of the buffer during its scope. It reduces the risk of updating
+ * the wrong part of the buffer and having no compiler warning about that. If
+ * the assignment to IOSYS_MAP_INIT_OFFSET() is forgotten, the compiler can warn
+ * about the use of uninitialized variable.
+ */
+#define IOSYS_MAP_INIT_OFFSET(map_, offset_) ({ \
+ struct iosys_map copy_ = *map_; \
+ iosys_map_incr(&copy_, offset_); \
+ copy_; \
+})
+
+/**
+ * iosys_map_set_vaddr - Sets a iosys mapping structure to an address in system memory
+ * @map: The iosys_map structure
+ * @vaddr: A system-memory address
+ *
+ * Sets the address and clears the I/O-memory flag.
+ */
+static inline void iosys_map_set_vaddr(struct iosys_map *map, void *vaddr)
+{
+ map->vaddr = vaddr;
+ map->is_iomem = false;
+}
+
+/**
+ * iosys_map_set_vaddr_iomem - Sets a iosys mapping structure to an address in I/O memory
+ * @map: The iosys_map structure
+ * @vaddr_iomem: An I/O-memory address
+ *
+ * Sets the address and the I/O-memory flag.
+ */
+static inline void iosys_map_set_vaddr_iomem(struct iosys_map *map,
+ void __iomem *vaddr_iomem)
+{
+ map->vaddr_iomem = vaddr_iomem;
+ map->is_iomem = true;
+}
+
+/**
+ * iosys_map_is_equal - Compares two iosys mapping structures for equality
+ * @lhs: The iosys_map structure
+ * @rhs: A iosys_map structure to compare with
+ *
+ * Two iosys mapping structures are equal if they both refer to the same type of memory
+ * and to the same address within that memory.
+ *
+ * Returns:
+ * True is both structures are equal, or false otherwise.
+ */
+static inline bool iosys_map_is_equal(const struct iosys_map *lhs,
+ const struct iosys_map *rhs)
+{
+ if (lhs->is_iomem != rhs->is_iomem)
+ return false;
+ else if (lhs->is_iomem)
+ return lhs->vaddr_iomem == rhs->vaddr_iomem;
+ else
+ return lhs->vaddr == rhs->vaddr;
+}
+
+/**
+ * iosys_map_is_null - Tests for a iosys mapping to be NULL
+ * @map: The iosys_map structure
+ *
+ * Depending on the state of struct iosys_map.is_iomem, tests if the
+ * mapping is NULL.
+ *
+ * Returns:
+ * True if the mapping is NULL, or false otherwise.
+ */
+static inline bool iosys_map_is_null(const struct iosys_map *map)
+{
+ if (map->is_iomem)
+ return !map->vaddr_iomem;
+ return !map->vaddr;
+}
+
+/**
+ * iosys_map_is_set - Tests if the iosys mapping has been set
+ * @map: The iosys_map structure
+ *
+ * Depending on the state of struct iosys_map.is_iomem, tests if the
+ * mapping has been set.
+ *
+ * Returns:
+ * True if the mapping is been set, or false otherwise.
+ */
+static inline bool iosys_map_is_set(const struct iosys_map *map)
+{
+ return !iosys_map_is_null(map);
+}
+
+/**
+ * iosys_map_clear - Clears a iosys mapping structure
+ * @map: The iosys_map structure
+ *
+ * Clears all fields to zero, including struct iosys_map.is_iomem, so
+ * mapping structures that were set to point to I/O memory are reset for
+ * system memory. Pointers are cleared to NULL. This is the default.
+ */
+static inline void iosys_map_clear(struct iosys_map *map)
+{
+ memset(map, 0, sizeof(*map));
+}
+
+/**
+ * iosys_map_memcpy_to - Memcpy into offset of iosys_map
+ * @dst: The iosys_map structure
+ * @dst_offset: The offset from which to copy
+ * @src: The source buffer
+ * @len: The number of byte in src
+ *
+ * Copies data into a iosys_map with an offset. The source buffer is in
+ * system memory. Depending on the buffer's location, the helper picks the
+ * correct method of accessing the memory.
+ */
+static inline void iosys_map_memcpy_to(struct iosys_map *dst, size_t dst_offset,
+ const void *src, size_t len)
+{
+ if (dst->is_iomem)
+ memcpy_toio(dst->vaddr_iomem + dst_offset, src, len);
+ else
+ memcpy(dst->vaddr + dst_offset, src, len);
+}
+
+/**
+ * iosys_map_memcpy_from - Memcpy from iosys_map into system memory
+ * @dst: Destination in system memory
+ * @src: The iosys_map structure
+ * @src_offset: The offset from which to copy
+ * @len: The number of byte in src
+ *
+ * Copies data from a iosys_map with an offset. The dest buffer is in
+ * system memory. Depending on the mapping location, the helper picks the
+ * correct method of accessing the memory.
+ */
+static inline void iosys_map_memcpy_from(void *dst, const struct iosys_map *src,
+ size_t src_offset, size_t len)
+{
+ if (src->is_iomem)
+ memcpy_fromio(dst, src->vaddr_iomem + src_offset, len);
+ else
+ memcpy(dst, src->vaddr + src_offset, len);
+}
+
+/**
+ * iosys_map_incr - Increments the address stored in a iosys mapping
+ * @map: The iosys_map structure
+ * @incr: The number of bytes to increment
+ *
+ * Increments the address stored in a iosys mapping. Depending on the
+ * buffer's location, the correct value will be updated.
+ */
+static inline void iosys_map_incr(struct iosys_map *map, size_t incr)
+{
+ if (map->is_iomem)
+ map->vaddr_iomem += incr;
+ else
+ map->vaddr += incr;
+}
+
+/**
+ * iosys_map_memset - Memset iosys_map
+ * @dst: The iosys_map structure
+ * @offset: Offset from dst where to start setting value
+ * @value: The value to set
+ * @len: The number of bytes to set in dst
+ *
+ * Set value in iosys_map. Depending on the buffer's location, the helper
+ * picks the correct method of accessing the memory.
+ */
+static inline void iosys_map_memset(struct iosys_map *dst, size_t offset,
+ int value, size_t len)
+{
+ if (dst->is_iomem)
+ memset_io(dst->vaddr_iomem + offset, value, len);
+ else
+ memset(dst->vaddr + offset, value, len);
+}
+
+#ifdef CONFIG_64BIT
+#define __iosys_map_rd_io_u64_case(val_, vaddr_iomem_) \
+ u64: val_ = readq(vaddr_iomem_)
+#define __iosys_map_wr_io_u64_case(val_, vaddr_iomem_) \
+ u64: writeq(val_, vaddr_iomem_)
+#else
+#define __iosys_map_rd_io_u64_case(val_, vaddr_iomem_) \
+ u64: memcpy_fromio(&(val_), vaddr_iomem_, sizeof(u64))
+#define __iosys_map_wr_io_u64_case(val_, vaddr_iomem_) \
+ u64: memcpy_toio(vaddr_iomem_, &(val_), sizeof(u64))
+#endif
+
+#define __iosys_map_rd_io(val__, vaddr_iomem__, type__) _Generic(val__, \
+ u8: val__ = readb(vaddr_iomem__), \
+ u16: val__ = readw(vaddr_iomem__), \
+ u32: val__ = readl(vaddr_iomem__), \
+ __iosys_map_rd_io_u64_case(val__, vaddr_iomem__))
+
+#define __iosys_map_rd_sys(val__, vaddr__, type__) \
+ val__ = READ_ONCE(*(type__ *)(vaddr__))
+
+#define __iosys_map_wr_io(val__, vaddr_iomem__, type__) _Generic(val__, \
+ u8: writeb(val__, vaddr_iomem__), \
+ u16: writew(val__, vaddr_iomem__), \
+ u32: writel(val__, vaddr_iomem__), \
+ __iosys_map_wr_io_u64_case(val__, vaddr_iomem__))
+
+#define __iosys_map_wr_sys(val__, vaddr__, type__) \
+ WRITE_ONCE(*(type__ *)(vaddr__), val__)
+
+/**
+ * iosys_map_rd - Read a C-type value from the iosys_map
+ *
+ * @map__: The iosys_map structure
+ * @offset__: The offset from which to read
+ * @type__: Type of the value being read
+ *
+ * Read a C type value (u8, u16, u32 and u64) from iosys_map. For other types or
+ * if pointer may be unaligned (and problematic for the architecture supported),
+ * use iosys_map_memcpy_from().
+ *
+ * Returns:
+ * The value read from the mapping.
+ */
+#define iosys_map_rd(map__, offset__, type__) ({ \
+ type__ val_; \
+ if ((map__)->is_iomem) { \
+ __iosys_map_rd_io(val_, (map__)->vaddr_iomem + (offset__), type__); \
+ } else { \
+ __iosys_map_rd_sys(val_, (map__)->vaddr + (offset__), type__); \
+ } \
+ val_; \
+})
+
+/**
+ * iosys_map_wr - Write a C-type value to the iosys_map
+ *
+ * @map__: The iosys_map structure
+ * @offset__: The offset from the mapping to write to
+ * @type__: Type of the value being written
+ * @val__: Value to write
+ *
+ * Write a C type value (u8, u16, u32 and u64) to the iosys_map. For other types
+ * or if pointer may be unaligned (and problematic for the architecture
+ * supported), use iosys_map_memcpy_to()
+ */
+#define iosys_map_wr(map__, offset__, type__, val__) ({ \
+ type__ val_ = (val__); \
+ if ((map__)->is_iomem) { \
+ __iosys_map_wr_io(val_, (map__)->vaddr_iomem + (offset__), type__); \
+ } else { \
+ __iosys_map_wr_sys(val_, (map__)->vaddr + (offset__), type__); \
+ } \
+})
+
+/**
+ * iosys_map_rd_field - Read a member from a struct in the iosys_map
+ *
+ * @map__: The iosys_map structure
+ * @struct_offset__: Offset from the beginning of the map, where the struct
+ * is located
+ * @struct_type__: The struct describing the layout of the mapping
+ * @field__: Member of the struct to read
+ *
+ * Read a value from iosys_map considering its layout is described by a C struct
+ * starting at @struct_offset__. The field offset and size is calculated and its
+ * value read. If the field access would incur in un-aligned access, then either
+ * iosys_map_memcpy_from() needs to be used or the architecture must support it.
+ * For example: suppose there is a @struct foo defined as below and the value
+ * ``foo.field2.inner2`` needs to be read from the iosys_map:
+ *
+ * .. code-block:: c
+ *
+ * struct foo {
+ * int field1;
+ * struct {
+ * int inner1;
+ * int inner2;
+ * } field2;
+ * int field3;
+ * } __packed;
+ *
+ * This is the expected memory layout of a buffer using iosys_map_rd_field():
+ *
+ * +------------------------------+--------------------------+
+ * | Address | Content |
+ * +==============================+==========================+
+ * | buffer + 0000 | start of mmapped buffer |
+ * | | pointed by iosys_map |
+ * +------------------------------+--------------------------+
+ * | ... | ... |
+ * +------------------------------+--------------------------+
+ * | buffer + ``struct_offset__`` | start of ``struct foo`` |
+ * +------------------------------+--------------------------+
+ * | ... | ... |
+ * +------------------------------+--------------------------+
+ * | buffer + wwww | ``foo.field2.inner2`` |
+ * +------------------------------+--------------------------+
+ * | ... | ... |
+ * +------------------------------+--------------------------+
+ * | buffer + yyyy | end of ``struct foo`` |
+ * +------------------------------+--------------------------+
+ * | ... | ... |
+ * +------------------------------+--------------------------+
+ * | buffer + zzzz | end of mmaped buffer |
+ * +------------------------------+--------------------------+
+ *
+ * Values automatically calculated by this macro or not needed are denoted by
+ * wwww, yyyy and zzzz. This is the code to read that value:
+ *
+ * .. code-block:: c
+ *
+ * x = iosys_map_rd_field(&map, offset, struct foo, field2.inner2);
+ *
+ * Returns:
+ * The value read from the mapping.
+ */
+#define iosys_map_rd_field(map__, struct_offset__, struct_type__, field__) ({ \
+ struct_type__ *s_; \
+ iosys_map_rd(map__, struct_offset__ + offsetof(struct_type__, field__), \
+ typeof(s_->field__)); \
+})
+
+/**
+ * iosys_map_wr_field - Write to a member of a struct in the iosys_map
+ *
+ * @map__: The iosys_map structure
+ * @struct_offset__: Offset from the beginning of the map, where the struct
+ * is located
+ * @struct_type__: The struct describing the layout of the mapping
+ * @field__: Member of the struct to read
+ * @val__: Value to write
+ *
+ * Write a value to the iosys_map considering its layout is described by a C
+ * struct starting at @struct_offset__. The field offset and size is calculated
+ * and the @val__ is written. If the field access would incur in un-aligned
+ * access, then either iosys_map_memcpy_to() needs to be used or the
+ * architecture must support it. Refer to iosys_map_rd_field() for expected
+ * usage and memory layout.
+ */
+#define iosys_map_wr_field(map__, struct_offset__, struct_type__, field__, val__) ({ \
+ struct_type__ *s_; \
+ iosys_map_wr(map__, struct_offset__ + offsetof(struct_type__, field__), \
+ typeof(s_->field__), val__); \
+})
+
+#endif /* __IOSYS_MAP_H__ */
diff --git a/include/linux/iov_iter.h b/include/linux/iov_iter.h
new file mode 100644
index 000000000000..f9a17fbbd398
--- /dev/null
+++ b/include/linux/iov_iter.h
@@ -0,0 +1,380 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/* I/O iterator iteration building functions.
+ *
+ * Copyright (C) 2023 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ */
+
+#ifndef _LINUX_IOV_ITER_H
+#define _LINUX_IOV_ITER_H
+
+#include <linux/uio.h>
+#include <linux/bvec.h>
+#include <linux/folio_queue.h>
+
+typedef size_t (*iov_step_f)(void *iter_base, size_t progress, size_t len,
+ void *priv, void *priv2);
+typedef size_t (*iov_ustep_f)(void __user *iter_base, size_t progress, size_t len,
+ void *priv, void *priv2);
+
+/*
+ * Handle ITER_UBUF.
+ */
+static __always_inline
+size_t iterate_ubuf(struct iov_iter *iter, size_t len, void *priv, void *priv2,
+ iov_ustep_f step)
+{
+ void __user *base = iter->ubuf;
+ size_t progress = 0, remain;
+
+ remain = step(base + iter->iov_offset, 0, len, priv, priv2);
+ progress = len - remain;
+ iter->iov_offset += progress;
+ iter->count -= progress;
+ return progress;
+}
+
+/*
+ * Handle ITER_IOVEC.
+ */
+static __always_inline
+size_t iterate_iovec(struct iov_iter *iter, size_t len, void *priv, void *priv2,
+ iov_ustep_f step)
+{
+ const struct iovec *p = iter->__iov;
+ size_t progress = 0, skip = iter->iov_offset;
+
+ do {
+ size_t remain, consumed;
+ size_t part = min(len, p->iov_len - skip);
+
+ if (likely(part)) {
+ remain = step(p->iov_base + skip, progress, part, priv, priv2);
+ consumed = part - remain;
+ progress += consumed;
+ skip += consumed;
+ len -= consumed;
+ if (skip < p->iov_len)
+ break;
+ }
+ p++;
+ skip = 0;
+ } while (len);
+
+ iter->nr_segs -= p - iter->__iov;
+ iter->__iov = p;
+ iter->iov_offset = skip;
+ iter->count -= progress;
+ return progress;
+}
+
+/*
+ * Handle ITER_KVEC.
+ */
+static __always_inline
+size_t iterate_kvec(struct iov_iter *iter, size_t len, void *priv, void *priv2,
+ iov_step_f step)
+{
+ const struct kvec *p = iter->kvec;
+ size_t progress = 0, skip = iter->iov_offset;
+
+ do {
+ size_t remain, consumed;
+ size_t part = min(len, p->iov_len - skip);
+
+ if (likely(part)) {
+ remain = step(p->iov_base + skip, progress, part, priv, priv2);
+ consumed = part - remain;
+ progress += consumed;
+ skip += consumed;
+ len -= consumed;
+ if (skip < p->iov_len)
+ break;
+ }
+ p++;
+ skip = 0;
+ } while (len);
+
+ iter->nr_segs -= p - iter->kvec;
+ iter->kvec = p;
+ iter->iov_offset = skip;
+ iter->count -= progress;
+ return progress;
+}
+
+/*
+ * Handle ITER_BVEC.
+ */
+static __always_inline
+size_t iterate_bvec(struct iov_iter *iter, size_t len, void *priv, void *priv2,
+ iov_step_f step)
+{
+ const struct bio_vec *p = iter->bvec;
+ size_t progress = 0, skip = iter->iov_offset;
+
+ do {
+ size_t remain, consumed;
+ size_t offset = p->bv_offset + skip, part;
+ void *kaddr = kmap_local_page(p->bv_page + offset / PAGE_SIZE);
+
+ part = min3(len,
+ (size_t)(p->bv_len - skip),
+ (size_t)(PAGE_SIZE - offset % PAGE_SIZE));
+ remain = step(kaddr + offset % PAGE_SIZE, progress, part, priv, priv2);
+ kunmap_local(kaddr);
+ consumed = part - remain;
+ len -= consumed;
+ progress += consumed;
+ skip += consumed;
+ if (skip >= p->bv_len) {
+ skip = 0;
+ p++;
+ }
+ if (remain)
+ break;
+ } while (len);
+
+ iter->nr_segs -= p - iter->bvec;
+ iter->bvec = p;
+ iter->iov_offset = skip;
+ iter->count -= progress;
+ return progress;
+}
+
+/*
+ * Handle ITER_FOLIOQ.
+ */
+static __always_inline
+size_t iterate_folioq(struct iov_iter *iter, size_t len, void *priv, void *priv2,
+ iov_step_f step)
+{
+ const struct folio_queue *folioq = iter->folioq;
+ unsigned int slot = iter->folioq_slot;
+ size_t progress = 0, skip = iter->iov_offset;
+
+ if (slot == folioq_nr_slots(folioq)) {
+ /* The iterator may have been extended. */
+ folioq = folioq->next;
+ slot = 0;
+ }
+
+ do {
+ struct folio *folio = folioq_folio(folioq, slot);
+ size_t part, remain = 0, consumed;
+ size_t fsize;
+ void *base;
+
+ if (!folio)
+ break;
+
+ fsize = folioq_folio_size(folioq, slot);
+ if (skip < fsize) {
+ base = kmap_local_folio(folio, skip);
+ part = umin(len, PAGE_SIZE - skip % PAGE_SIZE);
+ remain = step(base, progress, part, priv, priv2);
+ kunmap_local(base);
+ consumed = part - remain;
+ len -= consumed;
+ progress += consumed;
+ skip += consumed;
+ }
+ if (skip >= fsize) {
+ skip = 0;
+ slot++;
+ if (slot == folioq_nr_slots(folioq) && folioq->next) {
+ folioq = folioq->next;
+ slot = 0;
+ }
+ }
+ if (remain)
+ break;
+ } while (len);
+
+ iter->folioq_slot = slot;
+ iter->folioq = folioq;
+ iter->iov_offset = skip;
+ iter->count -= progress;
+ return progress;
+}
+
+/*
+ * Handle ITER_XARRAY.
+ */
+static __always_inline
+size_t iterate_xarray(struct iov_iter *iter, size_t len, void *priv, void *priv2,
+ iov_step_f step)
+{
+ struct folio *folio;
+ size_t progress = 0;
+ loff_t start = iter->xarray_start + iter->iov_offset;
+ pgoff_t index = start / PAGE_SIZE;
+ XA_STATE(xas, iter->xarray, index);
+
+ rcu_read_lock();
+ xas_for_each(&xas, folio, ULONG_MAX) {
+ size_t remain, consumed, offset, part, flen;
+
+ if (xas_retry(&xas, folio))
+ continue;
+ if (WARN_ON(xa_is_value(folio)))
+ break;
+ if (WARN_ON(folio_test_hugetlb(folio)))
+ break;
+
+ offset = offset_in_folio(folio, start + progress);
+ flen = min(folio_size(folio) - offset, len);
+
+ while (flen) {
+ void *base = kmap_local_folio(folio, offset);
+
+ part = min_t(size_t, flen,
+ PAGE_SIZE - offset_in_page(offset));
+ remain = step(base, progress, part, priv, priv2);
+ kunmap_local(base);
+
+ consumed = part - remain;
+ progress += consumed;
+ len -= consumed;
+
+ if (remain || len == 0)
+ goto out;
+ flen -= consumed;
+ offset += consumed;
+ }
+ }
+
+out:
+ rcu_read_unlock();
+ iter->iov_offset += progress;
+ iter->count -= progress;
+ return progress;
+}
+
+/*
+ * Handle ITER_DISCARD.
+ */
+static __always_inline
+size_t iterate_discard(struct iov_iter *iter, size_t len, void *priv, void *priv2,
+ iov_step_f step)
+{
+ size_t progress = len;
+
+ iter->count -= progress;
+ return progress;
+}
+
+/**
+ * iterate_and_advance2 - Iterate over an iterator
+ * @iter: The iterator to iterate over.
+ * @len: The amount to iterate over.
+ * @priv: Data for the step functions.
+ * @priv2: More data for the step functions.
+ * @ustep: Function for UBUF/IOVEC iterators; given __user addresses.
+ * @step: Function for other iterators; given kernel addresses.
+ *
+ * Iterate over the next part of an iterator, up to the specified length. The
+ * buffer is presented in segments, which for kernel iteration are broken up by
+ * physical pages and mapped, with the mapped address being presented.
+ *
+ * Two step functions, @step and @ustep, must be provided, one for handling
+ * mapped kernel addresses and the other is given user addresses which have the
+ * potential to fault since no pinning is performed.
+ *
+ * The step functions are passed the address and length of the segment, @priv,
+ * @priv2 and the amount of data so far iterated over (which can, for example,
+ * be added to @priv to point to the right part of a second buffer). The step
+ * functions should return the amount of the segment they didn't process (ie. 0
+ * indicates complete processsing).
+ *
+ * This function returns the amount of data processed (ie. 0 means nothing was
+ * processed and the value of @len means processes to completion).
+ */
+static __always_inline
+size_t iterate_and_advance2(struct iov_iter *iter, size_t len, void *priv,
+ void *priv2, iov_ustep_f ustep, iov_step_f step)
+{
+ if (unlikely(iter->count < len))
+ len = iter->count;
+ if (unlikely(!len))
+ return 0;
+
+ if (likely(iter_is_ubuf(iter)))
+ return iterate_ubuf(iter, len, priv, priv2, ustep);
+ if (likely(iter_is_iovec(iter)))
+ return iterate_iovec(iter, len, priv, priv2, ustep);
+ if (iov_iter_is_bvec(iter))
+ return iterate_bvec(iter, len, priv, priv2, step);
+ if (iov_iter_is_kvec(iter))
+ return iterate_kvec(iter, len, priv, priv2, step);
+ if (iov_iter_is_folioq(iter))
+ return iterate_folioq(iter, len, priv, priv2, step);
+ if (iov_iter_is_xarray(iter))
+ return iterate_xarray(iter, len, priv, priv2, step);
+ return iterate_discard(iter, len, priv, priv2, step);
+}
+
+/**
+ * iterate_and_advance - Iterate over an iterator
+ * @iter: The iterator to iterate over.
+ * @len: The amount to iterate over.
+ * @priv: Data for the step functions.
+ * @ustep: Function for UBUF/IOVEC iterators; given __user addresses.
+ * @step: Function for other iterators; given kernel addresses.
+ *
+ * As iterate_and_advance2(), but priv2 is always NULL.
+ */
+static __always_inline
+size_t iterate_and_advance(struct iov_iter *iter, size_t len, void *priv,
+ iov_ustep_f ustep, iov_step_f step)
+{
+ return iterate_and_advance2(iter, len, priv, NULL, ustep, step);
+}
+
+/**
+ * iterate_and_advance_kernel - Iterate over a kernel-internal iterator
+ * @iter: The iterator to iterate over.
+ * @len: The amount to iterate over.
+ * @priv: Data for the step functions.
+ * @priv2: More data for the step functions.
+ * @step: Function for other iterators; given kernel addresses.
+ *
+ * Iterate over the next part of an iterator, up to the specified length. The
+ * buffer is presented in segments, which for kernel iteration are broken up by
+ * physical pages and mapped, with the mapped address being presented.
+ *
+ * [!] Note This will only handle BVEC, KVEC, FOLIOQ, XARRAY and DISCARD-type
+ * iterators; it will not handle UBUF or IOVEC-type iterators.
+ *
+ * A step functions, @step, must be provided, one for handling mapped kernel
+ * addresses and the other is given user addresses which have the potential to
+ * fault since no pinning is performed.
+ *
+ * The step functions are passed the address and length of the segment, @priv,
+ * @priv2 and the amount of data so far iterated over (which can, for example,
+ * be added to @priv to point to the right part of a second buffer). The step
+ * functions should return the amount of the segment they didn't process (ie. 0
+ * indicates complete processsing).
+ *
+ * This function returns the amount of data processed (ie. 0 means nothing was
+ * processed and the value of @len means processes to completion).
+ */
+static __always_inline
+size_t iterate_and_advance_kernel(struct iov_iter *iter, size_t len, void *priv,
+ void *priv2, iov_step_f step)
+{
+ if (unlikely(iter->count < len))
+ len = iter->count;
+ if (unlikely(!len))
+ return 0;
+ if (iov_iter_is_bvec(iter))
+ return iterate_bvec(iter, len, priv, priv2, step);
+ if (iov_iter_is_kvec(iter))
+ return iterate_kvec(iter, len, priv, priv2, step);
+ if (iov_iter_is_folioq(iter))
+ return iterate_folioq(iter, len, priv, priv2, step);
+ if (iov_iter_is_xarray(iter))
+ return iterate_xarray(iter, len, priv, priv2, step);
+ return iterate_discard(iter, len, priv, priv2, step);
+}
+
+#endif /* _LINUX_IOV_ITER_H */
diff --git a/include/linux/iova.h b/include/linux/iova.h
index e0a892ae45c0..d2c4fd923efa 100644
--- a/include/linux/iova.h
+++ b/include/linux/iova.h
@@ -1,11 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2006, Intel Corporation.
*
- * This file is released under the GPLv2.
- *
* Copyright (C) 2006-2008 Intel Corporation
* Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
- *
*/
#ifndef _IOVA_H_
@@ -23,28 +21,23 @@ struct iova {
unsigned long pfn_lo; /* Lowest allocated pfn */
};
-struct iova_magazine;
-struct iova_cpu_rcache;
-#define IOVA_RANGE_CACHE_MAX_SIZE 6 /* log of max cached IOVA range size (in pages) */
-#define MAX_GLOBAL_MAGS 32 /* magazines per bin */
-
-struct iova_rcache {
- spinlock_t lock;
- unsigned long depot_size;
- struct iova_magazine *depot[MAX_GLOBAL_MAGS];
- struct iova_cpu_rcache __percpu *cpu_rcaches;
-};
+struct iova_rcache;
/* holds all the iova translations for a domain */
struct iova_domain {
spinlock_t iova_rbtree_lock; /* Lock to protect update of rbtree */
struct rb_root rbroot; /* iova domain rbtree root */
- struct rb_node *cached32_node; /* Save last alloced node */
+ struct rb_node *cached_node; /* Save last alloced node */
+ struct rb_node *cached32_node; /* Save last 32-bit alloced node */
unsigned long granule; /* pfn granularity for this domain */
unsigned long start_pfn; /* Lower limit for this domain */
unsigned long dma_32bit_pfn;
- struct iova_rcache rcaches[IOVA_RANGE_CACHE_MAX_SIZE]; /* IOVA range caches */
+ unsigned long max32_alloc_size; /* Size of last failed allocation */
+ struct iova anchor; /* rbtree lookup anchor */
+
+ struct iova_rcache *rcaches;
+ struct hlist_node cpuhp_dead;
};
static inline unsigned long iova_size(struct iova *iova)
@@ -72,6 +65,11 @@ static inline size_t iova_align(struct iova_domain *iovad, size_t size)
return ALIGN(size, iovad->granule);
}
+static inline size_t iova_align_down(struct iova_domain *iovad, size_t size)
+{
+ return ALIGN_DOWN(size, iovad->granule);
+}
+
static inline dma_addr_t iova_dma_addr(struct iova_domain *iovad, struct iova *iova)
{
return (dma_addr_t)iova->pfn_lo << iova_shift(iovad);
@@ -82,12 +80,12 @@ static inline unsigned long iova_pfn(struct iova_domain *iovad, dma_addr_t iova)
return iova >> iova_shift(iovad);
}
-#if IS_ENABLED(CONFIG_IOMMU_IOVA)
+#if IS_REACHABLE(CONFIG_IOMMU_IOVA)
int iova_cache_get(void);
void iova_cache_put(void);
-struct iova *alloc_iova_mem(void);
-void free_iova_mem(struct iova *iova);
+unsigned long iova_rcache_range(void);
+
void free_iova(struct iova_domain *iovad, unsigned long pfn);
void __free_iova(struct iova_domain *iovad, struct iova *iova);
struct iova *alloc_iova(struct iova_domain *iovad, unsigned long size,
@@ -96,17 +94,14 @@ struct iova *alloc_iova(struct iova_domain *iovad, unsigned long size,
void free_iova_fast(struct iova_domain *iovad, unsigned long pfn,
unsigned long size);
unsigned long alloc_iova_fast(struct iova_domain *iovad, unsigned long size,
- unsigned long limit_pfn);
+ unsigned long limit_pfn, bool flush_rcache);
struct iova *reserve_iova(struct iova_domain *iovad, unsigned long pfn_lo,
unsigned long pfn_hi);
-void copy_reserved_iova(struct iova_domain *from, struct iova_domain *to);
void init_iova_domain(struct iova_domain *iovad, unsigned long granule,
- unsigned long start_pfn, unsigned long pfn_32bit);
+ unsigned long start_pfn);
+int iova_domain_init_rcaches(struct iova_domain *iovad);
struct iova *find_iova(struct iova_domain *iovad, unsigned long pfn);
void put_iova_domain(struct iova_domain *iovad);
-struct iova *split_and_remove_iova(struct iova_domain *iovad,
- struct iova *iova, unsigned long pfn_lo, unsigned long pfn_hi);
-void free_cpu_cached_iovas(unsigned int cpu, struct iova_domain *iovad);
#else
static inline int iova_cache_get(void)
{
@@ -117,15 +112,6 @@ static inline void iova_cache_put(void)
{
}
-static inline struct iova *alloc_iova_mem(void)
-{
- return NULL;
-}
-
-static inline void free_iova_mem(struct iova *iova)
-{
-}
-
static inline void free_iova(struct iova_domain *iovad, unsigned long pfn)
{
}
@@ -150,7 +136,8 @@ static inline void free_iova_fast(struct iova_domain *iovad,
static inline unsigned long alloc_iova_fast(struct iova_domain *iovad,
unsigned long size,
- unsigned long limit_pfn)
+ unsigned long limit_pfn,
+ bool flush_rcache)
{
return 0;
}
@@ -162,15 +149,9 @@ static inline struct iova *reserve_iova(struct iova_domain *iovad,
return NULL;
}
-static inline void copy_reserved_iova(struct iova_domain *from,
- struct iova_domain *to)
-{
-}
-
static inline void init_iova_domain(struct iova_domain *iovad,
unsigned long granule,
- unsigned long start_pfn,
- unsigned long pfn_32bit)
+ unsigned long start_pfn)
{
}
@@ -184,18 +165,6 @@ static inline void put_iova_domain(struct iova_domain *iovad)
{
}
-static inline struct iova *split_and_remove_iova(struct iova_domain *iovad,
- struct iova *iova,
- unsigned long pfn_lo,
- unsigned long pfn_hi)
-{
- return NULL;
-}
-
-static inline void free_cpu_cached_iovas(unsigned int cpu,
- struct iova_domain *iovad)
-{
-}
#endif
#endif
diff --git a/include/linux/iova_bitmap.h b/include/linux/iova_bitmap.h
new file mode 100644
index 000000000000..1c338f5e5b7a
--- /dev/null
+++ b/include/linux/iova_bitmap.h
@@ -0,0 +1,52 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2022, Oracle and/or its affiliates.
+ * Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved
+ */
+#ifndef _IOVA_BITMAP_H_
+#define _IOVA_BITMAP_H_
+
+#include <linux/types.h>
+#include <linux/errno.h>
+
+struct iova_bitmap;
+
+typedef int (*iova_bitmap_fn_t)(struct iova_bitmap *bitmap,
+ unsigned long iova, size_t length,
+ void *opaque);
+
+#if IS_ENABLED(CONFIG_IOMMUFD_DRIVER)
+struct iova_bitmap *iova_bitmap_alloc(unsigned long iova, size_t length,
+ unsigned long page_size,
+ u64 __user *data);
+void iova_bitmap_free(struct iova_bitmap *bitmap);
+int iova_bitmap_for_each(struct iova_bitmap *bitmap, void *opaque,
+ iova_bitmap_fn_t fn);
+void iova_bitmap_set(struct iova_bitmap *bitmap,
+ unsigned long iova, size_t length);
+#else
+static inline struct iova_bitmap *iova_bitmap_alloc(unsigned long iova,
+ size_t length,
+ unsigned long page_size,
+ u64 __user *data)
+{
+ return NULL;
+}
+
+static inline void iova_bitmap_free(struct iova_bitmap *bitmap)
+{
+}
+
+static inline int iova_bitmap_for_each(struct iova_bitmap *bitmap, void *opaque,
+ iova_bitmap_fn_t fn)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline void iova_bitmap_set(struct iova_bitmap *bitmap,
+ unsigned long iova, size_t length)
+{
+}
+#endif
+
+#endif
diff --git a/include/linux/ip.h b/include/linux/ip.h
index 492bc6513533..d11c25f5030a 100644
--- a/include/linux/ip.h
+++ b/include/linux/ip.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* INET An implementation of the TCP/IP protocol suite for the LINUX
* operating system. INET is implemented using the BSD Socket
@@ -8,11 +9,6 @@
* Version: @(#)ip.h 1.0.2 04/28/93
*
* Authors: Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
*/
#ifndef _LINUX_IP_H
#define _LINUX_IP_H
@@ -34,4 +30,30 @@ static inline struct iphdr *ipip_hdr(const struct sk_buff *skb)
{
return (struct iphdr *)skb_transport_header(skb);
}
+
+static inline unsigned int ip_transport_len(const struct sk_buff *skb)
+{
+ return ntohs(ip_hdr(skb)->tot_len) - skb_network_header_len(skb);
+}
+
+static inline unsigned int iph_totlen(const struct sk_buff *skb, const struct iphdr *iph)
+{
+ u32 len = ntohs(iph->tot_len);
+
+ return (len || !skb_is_gso(skb) || !skb_is_gso_tcp(skb)) ?
+ len : skb->len - skb_network_offset(skb);
+}
+
+static inline unsigned int skb_ip_totlen(const struct sk_buff *skb)
+{
+ return iph_totlen(skb, ip_hdr(skb));
+}
+
+/* IPv4 datagram length is stored into 16bit field (tot_len) */
+#define IP_MAX_MTU 0xFFFFU
+
+static inline void iph_set_totlen(struct iphdr *iph, unsigned int len)
+{
+ iph->tot_len = len <= IP_MAX_MTU ? htons(len) : 0;
+}
#endif /* _LINUX_IP_H */
diff --git a/include/linux/ipack.h b/include/linux/ipack.h
index 8bddc3fbdddf..455f6c2a1903 100644
--- a/include/linux/ipack.h
+++ b/include/linux/ipack.h
@@ -1,12 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Industry-pack bus.
*
* Copyright (C) 2011-2012 CERN (www.cern.ch)
* Author: Samuel Iglesias Gonsalvez <siglesias@igalia.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the Free
- * Software Foundation; version 2 of the License.
*/
#include <linux/mod_devicetable.h>
@@ -73,15 +70,13 @@ enum ipack_space {
IPACK_SPACE_COUNT,
};
-/**
- */
struct ipack_region {
phys_addr_t start;
size_t size;
};
/**
- * struct ipack_device
+ * struct ipack_device - subsystem representation of an IPack device
*
* @slot: Slot where the device is plugged in the carrier board
* @bus: ipack_bus_device where the device is plugged to.
@@ -92,7 +87,7 @@ struct ipack_region {
*
* Warning: Direct access to mapped memory is possible but the endianness
* is not the same with PCI carrier or VME carrier. The endianness is managed
- * by the carrier board throught bus->ops.
+ * by the carrier board through bus->ops.
*/
struct ipack_device {
unsigned int slot;
@@ -127,6 +122,7 @@ struct ipack_driver_ops {
* struct ipack_driver -- Specific data to each ipack device driver
*
* @driver: Device driver kernel representation
+ * @id_table: Device ID table for this driver
* @ops: Callbacks provided by the IPack device driver
*/
struct ipack_driver {
@@ -164,7 +160,7 @@ struct ipack_bus_ops {
};
/**
- * struct ipack_bus_device
+ * struct ipack_bus_device - IPack bus representation
*
* @dev: pointer to carrier device
* @slots: number of slots available
@@ -188,6 +184,8 @@ struct ipack_bus_device {
*
* The carrier board device should call this function to register itself as
* available bus device in ipack.
+ *
+ * Return: %NULL on error or &struct ipack_bus_device on success
*/
struct ipack_bus_device *ipack_bus_register(struct device *parent, int slots,
const struct ipack_bus_ops *ops,
@@ -195,6 +193,8 @@ struct ipack_bus_device *ipack_bus_register(struct device *parent, int slots,
/**
* ipack_bus_unregister -- unregister an ipack bus
+ *
+ * Return: %0
*/
int ipack_bus_unregister(struct ipack_bus_device *bus);
@@ -203,6 +203,8 @@ int ipack_bus_unregister(struct ipack_bus_device *bus);
*
* Called by a ipack driver to register itself as a driver
* that can manage ipack devices.
+ *
+ * Return: zero on success or error code on failure.
*/
int ipack_driver_register(struct ipack_driver *edrv, struct module *owner,
const char *name);
@@ -218,7 +220,7 @@ void ipack_driver_unregister(struct ipack_driver *edrv);
* function. The rest of the fields will be allocated and populated
* during initalization.
*
- * Return zero on success or error code on failure.
+ * Return: zero on success or error code on failure.
*
* NOTE: _Never_ directly free @dev after calling this function, even
* if it returned an error! Always use ipack_put_device() to give up the
@@ -233,7 +235,7 @@ int ipack_device_init(struct ipack_device *dev);
* Add a new IPack device. The call is done by the carrier driver
* after calling ipack_device_init().
*
- * Return zero on success or error code on failure.
+ * Return: zero on success or error code on failure.
*
* NOTE: _Never_ directly free @dev after calling this function, even
* if it returned an error! Always use ipack_put_device() to give up the
@@ -269,9 +271,11 @@ void ipack_put_device(struct ipack_device *dev);
.device = (dev)
/**
- * ipack_get_carrier - it increase the carrier ref. counter of
+ * ipack_get_carrier - try to increase the carrier ref. counter of
* the carrier module
* @dev: mezzanine device which wants to get the carrier
+ *
+ * Return: true on success.
*/
static inline int ipack_get_carrier(struct ipack_device *dev)
{
diff --git a/include/linux/ipc.h b/include/linux/ipc.h
index fadd579d577d..9b1434247aab 100644
--- a/include/linux/ipc.h
+++ b/include/linux/ipc.h
@@ -1,11 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_IPC_H
#define _LINUX_IPC_H
-#include <linux/spinlock.h>
+#include <linux/spinlock_types.h>
#include <linux/uidgid.h>
+#include <linux/rhashtable-types.h>
#include <uapi/linux/ipc.h>
-
-#define IPCMNI 32768 /* <= MAX_INT limit for ipc arrays (including sysctl changes) */
+#include <linux/refcount.h>
/* used by in-kernel data structures */
struct kern_ipc_perm {
@@ -21,8 +22,10 @@ struct kern_ipc_perm {
unsigned long seq;
void *security;
+ struct rhash_head khtnode;
+
struct rcu_head rcu;
- atomic_t refcount;
+ refcount_t refcount;
} ____cacheline_aligned_in_smp __randomize_layout;
#endif /* _LINUX_IPC_H */
diff --git a/include/linux/ipc_namespace.h b/include/linux/ipc_namespace.h
index 65327ee0936b..12faca29bbb9 100644
--- a/include/linux/ipc_namespace.h
+++ b/include/linux/ipc_namespace.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __IPC_NAMESPACE_H__
#define __IPC_NAMESPACE_H__
@@ -7,6 +8,10 @@
#include <linux/notifier.h>
#include <linux/nsproxy.h>
#include <linux/ns_common.h>
+#include <linux/refcount.h>
+#include <linux/rhashtable-types.h>
+#include <linux/sysctl.h>
+#include <linux/percpu_counter.h>
struct user_namespace;
@@ -15,11 +20,15 @@ struct ipc_ids {
unsigned short seq;
struct rw_semaphore rwsem;
struct idr ipcs_idr;
+ int max_idx;
+ int last_idx; /* For wrap around detection */
+#ifdef CONFIG_CHECKPOINT_RESTORE
int next_id;
+#endif
+ struct rhashtable key_ht;
};
struct ipc_namespace {
- atomic_t count;
struct ipc_ids ids[3];
int sem_ctls[4];
@@ -28,8 +37,8 @@ struct ipc_namespace {
unsigned int msg_ctlmax;
unsigned int msg_ctlmnb;
unsigned int msg_ctlmni;
- atomic_t msg_bytes;
- atomic_t msg_hdrs;
+ struct percpu_counter percpu_msg_bytes;
+ struct percpu_counter percpu_msg_hdrs;
size_t shm_ctlmax;
size_t shm_ctlall;
@@ -56,10 +65,18 @@ struct ipc_namespace {
unsigned int mq_msg_default;
unsigned int mq_msgsize_default;
+ struct ctl_table_set mq_set;
+ struct ctl_table_header *mq_sysctls;
+
+ struct ctl_table_set ipc_set;
+ struct ctl_table_header *ipc_sysctls;
+
/* user_ns which owns the ipc ns */
struct user_namespace *user_ns;
struct ucounts *ucounts;
+ struct llist_node mnt_llist;
+
struct ns_common ns;
} __randomize_layout;
@@ -112,19 +129,34 @@ static inline int mq_init_ns(struct ipc_namespace *ns) { return 0; }
#endif
#if defined(CONFIG_IPC_NS)
-extern struct ipc_namespace *copy_ipcs(unsigned long flags,
+static inline struct ipc_namespace *to_ipc_ns(struct ns_common *ns)
+{
+ return container_of(ns, struct ipc_namespace, ns);
+}
+
+extern struct ipc_namespace *copy_ipcs(u64 flags,
struct user_namespace *user_ns, struct ipc_namespace *ns);
static inline struct ipc_namespace *get_ipc_ns(struct ipc_namespace *ns)
{
if (ns)
- atomic_inc(&ns->count);
+ ns_ref_inc(ns);
return ns;
}
+static inline struct ipc_namespace *get_ipc_ns_not_zero(struct ipc_namespace *ns)
+{
+ if (ns) {
+ if (ns_ref_get(ns))
+ return ns;
+ }
+
+ return NULL;
+}
+
extern void put_ipc_ns(struct ipc_namespace *ns);
#else
-static inline struct ipc_namespace *copy_ipcs(unsigned long flags,
+static inline struct ipc_namespace *copy_ipcs(u64 flags,
struct user_namespace *user_ns, struct ipc_namespace *ns)
{
if (flags & CLONE_NEWIPC)
@@ -138,6 +170,11 @@ static inline struct ipc_namespace *get_ipc_ns(struct ipc_namespace *ns)
return ns;
}
+static inline struct ipc_namespace *get_ipc_ns_not_zero(struct ipc_namespace *ns)
+{
+ return ns;
+}
+
static inline void put_ipc_ns(struct ipc_namespace *ns)
{
}
@@ -145,15 +182,37 @@ static inline void put_ipc_ns(struct ipc_namespace *ns)
#ifdef CONFIG_POSIX_MQUEUE_SYSCTL
-struct ctl_table_header;
-extern struct ctl_table_header *mq_register_sysctl_table(void);
+void retire_mq_sysctls(struct ipc_namespace *ns);
+bool setup_mq_sysctls(struct ipc_namespace *ns);
#else /* CONFIG_POSIX_MQUEUE_SYSCTL */
-static inline struct ctl_table_header *mq_register_sysctl_table(void)
+static inline void retire_mq_sysctls(struct ipc_namespace *ns)
{
- return NULL;
+}
+
+static inline bool setup_mq_sysctls(struct ipc_namespace *ns)
+{
+ return true;
}
#endif /* CONFIG_POSIX_MQUEUE_SYSCTL */
+
+#ifdef CONFIG_SYSVIPC_SYSCTL
+
+bool setup_ipc_sysctls(struct ipc_namespace *ns);
+void retire_ipc_sysctls(struct ipc_namespace *ns);
+
+#else /* CONFIG_SYSVIPC_SYSCTL */
+
+static inline void retire_ipc_sysctls(struct ipc_namespace *ns)
+{
+}
+
+static inline bool setup_ipc_sysctls(struct ipc_namespace *ns)
+{
+ return true;
+}
+
+#endif /* CONFIG_SYSVIPC_SYSCTL */
#endif
diff --git a/include/linux/ipmi-fru.h b/include/linux/ipmi-fru.h
deleted file mode 100644
index 4d3a76380e32..000000000000
--- a/include/linux/ipmi-fru.h
+++ /dev/null
@@ -1,135 +0,0 @@
-/*
- * Copyright (C) 2012 CERN (www.cern.ch)
- * Author: Alessandro Rubini <rubini@gnudd.com>
- *
- * Released according to the GNU GPL, version 2 or any later version.
- *
- * This work is part of the White Rabbit project, a research effort led
- * by CERN, the European Institute for Nuclear Research.
- */
-#ifndef __LINUX_IPMI_FRU_H__
-#define __LINUX_IPMI_FRU_H__
-#ifdef __KERNEL__
-# include <linux/types.h>
-# include <linux/string.h>
-#else
-# include <stdint.h>
-# include <string.h>
-#endif
-
-/*
- * These structures match the unaligned crap we have in FRU1011.pdf
- * (http://download.intel.com/design/servers/ipmi/FRU1011.pdf)
- */
-
-/* chapter 8, page 5 */
-struct fru_common_header {
- uint8_t format; /* 0x01 */
- uint8_t internal_use_off; /* multiple of 8 bytes */
- uint8_t chassis_info_off; /* multiple of 8 bytes */
- uint8_t board_area_off; /* multiple of 8 bytes */
- uint8_t product_area_off; /* multiple of 8 bytes */
- uint8_t multirecord_off; /* multiple of 8 bytes */
- uint8_t pad; /* must be 0 */
- uint8_t checksum; /* sum modulo 256 must be 0 */
-};
-
-/* chapter 9, page 5 -- internal_use: not used by us */
-
-/* chapter 10, page 6 -- chassis info: not used by us */
-
-/* chapter 13, page 9 -- used by board_info_area below */
-struct fru_type_length {
- uint8_t type_length;
- uint8_t data[0];
-};
-
-/* chapter 11, page 7 */
-struct fru_board_info_area {
- uint8_t format; /* 0x01 */
- uint8_t area_len; /* multiple of 8 bytes */
- uint8_t language; /* I hope it's 0 */
- uint8_t mfg_date[3]; /* LSB, minutes since 1996-01-01 */
- struct fru_type_length tl[0]; /* type-length stuff follows */
-
- /*
- * the TL there are in order:
- * Board Manufacturer
- * Board Product Name
- * Board Serial Number
- * Board Part Number
- * FRU File ID (may be null)
- * more manufacturer-specific stuff
- * 0xc1 as a terminator
- * 0x00 pad to a multiple of 8 bytes - 1
- * checksum (sum of all stuff module 256 must be zero)
- */
-};
-
-enum fru_type {
- FRU_TYPE_BINARY = 0x00,
- FRU_TYPE_BCDPLUS = 0x40,
- FRU_TYPE_ASCII6 = 0x80,
- FRU_TYPE_ASCII = 0xc0, /* not ascii: depends on language */
-};
-
-/*
- * some helpers
- */
-static inline struct fru_board_info_area *fru_get_board_area(
- const struct fru_common_header *header)
-{
- /* we know for sure that the header is 8 bytes in size */
- return (struct fru_board_info_area *)(header + header->board_area_off);
-}
-
-static inline int fru_type(struct fru_type_length *tl)
-{
- return tl->type_length & 0xc0;
-}
-
-static inline int fru_length(struct fru_type_length *tl)
-{
- return (tl->type_length & 0x3f) + 1; /* len of whole record */
-}
-
-/* assume ascii-latin1 encoding */
-static inline int fru_strlen(struct fru_type_length *tl)
-{
- return fru_length(tl) - 1;
-}
-
-static inline char *fru_strcpy(char *dest, struct fru_type_length *tl)
-{
- int len = fru_strlen(tl);
- memcpy(dest, tl->data, len);
- dest[len] = '\0';
- return dest;
-}
-
-static inline struct fru_type_length *fru_next_tl(struct fru_type_length *tl)
-{
- return tl + fru_length(tl);
-}
-
-static inline int fru_is_eof(struct fru_type_length *tl)
-{
- return tl->type_length == 0xc1;
-}
-
-/*
- * External functions defined in fru-parse.c.
- */
-extern int fru_header_cksum_ok(struct fru_common_header *header);
-extern int fru_bia_cksum_ok(struct fru_board_info_area *bia);
-
-/* All these 4 return allocated strings by calling fru_alloc() */
-extern char *fru_get_board_manufacturer(struct fru_common_header *header);
-extern char *fru_get_product_name(struct fru_common_header *header);
-extern char *fru_get_serial_number(struct fru_common_header *header);
-extern char *fru_get_part_number(struct fru_common_header *header);
-
-/* This must be defined by the caller of the above functions */
-extern void *fru_alloc(size_t size);
-
-#endif /* __LINUX_IMPI_FRU_H__ */
diff --git a/include/linux/ipmi.h b/include/linux/ipmi.h
index f1045b2c6a00..7da6602eab71 100644
--- a/include/linux/ipmi.h
+++ b/include/linux/ipmi.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* ipmi.h
*
@@ -9,26 +10,6 @@
*
* Copyright 2002 MontaVista Software Inc.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- *
- * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
- * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
- * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
- * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
- * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
- * USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#ifndef __LINUX_IPMI_H
#define __LINUX_IPMI_H
@@ -42,9 +23,11 @@
struct module;
struct device;
-/* Opaque type for a IPMI message user. One of these is needed to
- send and receive messages. */
-typedef struct ipmi_user *ipmi_user_t;
+/*
+ * Opaque type for a IPMI message user. One of these is needed to
+ * send and receive messages.
+ */
+struct ipmi_user;
/*
* Stuff coming from the receive interface comes as one of these.
@@ -56,83 +39,120 @@ typedef struct ipmi_user *ipmi_user_t;
struct ipmi_recv_msg {
struct list_head link;
- /* The type of message as defined in the "Receive Types"
- defines above. */
+ /*
+ * The type of message as defined in the "Receive Types"
+ * defines above.
+ */
int recv_type;
- ipmi_user_t user;
+ struct ipmi_user *user;
struct ipmi_addr addr;
long msgid;
struct kernel_ipmi_msg msg;
- /* The user_msg_data is the data supplied when a message was
- sent, if this is a response to a sent message. If this is
- not a response to a sent message, then user_msg_data will
- be NULL. If the user above is NULL, then this will be the
- intf. */
+ /*
+ * The user_msg_data is the data supplied when a message was
+ * sent, if this is a response to a sent message. If this is
+ * not a response to a sent message, then user_msg_data will
+ * be NULL. If the user above is NULL, then this will be the
+ * intf.
+ */
void *user_msg_data;
- /* Call this when done with the message. It will presumably free
- the message and do any other necessary cleanup. */
+ /*
+ * Call this when done with the message. It will presumably free
+ * the message and do any other necessary cleanup.
+ */
void (*done)(struct ipmi_recv_msg *msg);
- /* Place-holder for the data, don't make any assumptions about
- the size or existence of this, since it may change. */
+ /*
+ * Place-holder for the data, don't make any assumptions about
+ * the size or existence of this, since it may change.
+ */
unsigned char msg_data[IPMI_MAX_MSG_LENGTH];
};
+#define INIT_IPMI_RECV_MSG(done_handler) \
+{ \
+ .done = done_handler \
+}
+
/* Allocate and free the receive message. */
void ipmi_free_recv_msg(struct ipmi_recv_msg *msg);
struct ipmi_user_hndl {
- /* Routine type to call when a message needs to be routed to
- the upper layer. This will be called with some locks held,
- the only IPMI routines that can be called are ipmi_request
- and the alloc/free operations. The handler_data is the
- variable supplied when the receive handler was registered. */
+ /*
+ * Routine type to call when a message needs to be routed to
+ * the upper layer. This will be called with some locks held,
+ * the only IPMI routines that can be called are ipmi_request
+ * and the alloc/free operations. The handler_data is the
+ * variable supplied when the receive handler was registered.
+ */
void (*ipmi_recv_hndl)(struct ipmi_recv_msg *msg,
void *user_msg_data);
- /* Called when the interface detects a watchdog pre-timeout. If
- this is NULL, it will be ignored for the user. */
+ /*
+ * Called when the interface detects a watchdog pre-timeout. If
+ * this is NULL, it will be ignored for the user. Note that you
+ * can't do any IPMI calls from here, it's called with locks held.
+ */
void (*ipmi_watchdog_pretimeout)(void *handler_data);
+
+ /*
+ * If not NULL, called at panic time after the interface has
+ * been set up to handle run to completion.
+ */
+ void (*ipmi_panic_handler)(void *handler_data);
+
+ /*
+ * Called when the interface has been removed. After this returns
+ * the user handle will be invalid. The interface may or may
+ * not be usable when this is called, but it will return errors
+ * if it is not usable.
+ */
+ void (*shutdown)(void *handler_data);
};
/* Create a new user of the IPMI layer on the given interface number. */
int ipmi_create_user(unsigned int if_num,
const struct ipmi_user_hndl *handler,
void *handler_data,
- ipmi_user_t *user);
+ struct ipmi_user **user);
-/* Destroy the given user of the IPMI layer. Note that after this
- function returns, the system is guaranteed to not call any
- callbacks for the user. Thus as long as you destroy all the users
- before you unload a module, you will be safe. And if you destroy
- the users before you destroy the callback structures, it should be
- safe, too. */
-int ipmi_destroy_user(ipmi_user_t user);
+/*
+ * Destroy the given user of the IPMI layer. Note that after this
+ * function returns, the system is guaranteed to not call any
+ * callbacks for the user. Thus as long as you destroy all the users
+ * before you unload a module, you will be safe. And if you destroy
+ * the users before you destroy the callback structures, it should be
+ * safe, too.
+ */
+void ipmi_destroy_user(struct ipmi_user *user);
/* Get the IPMI version of the BMC we are talking to. */
-void ipmi_get_version(ipmi_user_t user,
- unsigned char *major,
- unsigned char *minor);
-
-/* Set and get the slave address and LUN that we will use for our
- source messages. Note that this affects the interface, not just
- this user, so it will affect all users of this interface. This is
- so some initialization code can come in and do the OEM-specific
- things it takes to determine your address (if not the BMC) and set
- it for everyone else. Note that each channel can have its own address. */
-int ipmi_set_my_address(ipmi_user_t user,
+int ipmi_get_version(struct ipmi_user *user,
+ unsigned char *major,
+ unsigned char *minor);
+
+/*
+ * Set and get the slave address and LUN that we will use for our
+ * source messages. Note that this affects the interface, not just
+ * this user, so it will affect all users of this interface. This is
+ * so some initialization code can come in and do the OEM-specific
+ * things it takes to determine your address (if not the BMC) and set
+ * it for everyone else. Note that each channel can have its own
+ * address.
+ */
+int ipmi_set_my_address(struct ipmi_user *user,
unsigned int channel,
unsigned char address);
-int ipmi_get_my_address(ipmi_user_t user,
+int ipmi_get_my_address(struct ipmi_user *user,
unsigned int channel,
unsigned char *address);
-int ipmi_set_my_LUN(ipmi_user_t user,
+int ipmi_set_my_LUN(struct ipmi_user *user,
unsigned int channel,
unsigned char LUN);
-int ipmi_get_my_LUN(ipmi_user_t user,
+int ipmi_get_my_LUN(struct ipmi_user *user,
unsigned int channel,
unsigned char *LUN);
@@ -149,7 +169,7 @@ int ipmi_get_my_LUN(ipmi_user_t user,
* it makes no sense to do it here. However, this can be used if you
* have unusual requirements.
*/
-int ipmi_request_settime(ipmi_user_t user,
+int ipmi_request_settime(struct ipmi_user *user,
struct ipmi_addr *addr,
long msgid,
struct kernel_ipmi_msg *msg,
@@ -167,7 +187,7 @@ int ipmi_request_settime(ipmi_user_t user,
* change as the system changes, so don't use it unless you REALLY
* have to.
*/
-int ipmi_request_supply_msgs(ipmi_user_t user,
+int ipmi_request_supply_msgs(struct ipmi_user *user,
struct ipmi_addr *addr,
long msgid,
struct kernel_ipmi_msg *msg,
@@ -183,7 +203,7 @@ int ipmi_request_supply_msgs(ipmi_user_t user,
* way. This is useful if you need to spin waiting for something to
* happen in the IPMI driver.
*/
-void ipmi_poll_interface(ipmi_user_t user);
+void ipmi_poll_interface(struct ipmi_user *user);
/*
* When commands come in to the SMS, the user can register to receive
@@ -194,11 +214,11 @@ void ipmi_poll_interface(ipmi_user_t user);
* error. Channels are specified as a bitfield, use IPMI_CHAN_ALL to
* mean all channels.
*/
-int ipmi_register_for_cmd(ipmi_user_t user,
+int ipmi_register_for_cmd(struct ipmi_user *user,
unsigned char netfn,
unsigned char cmd,
unsigned int chans);
-int ipmi_unregister_for_cmd(ipmi_user_t user,
+int ipmi_unregister_for_cmd(struct ipmi_user *user,
unsigned char netfn,
unsigned char cmd,
unsigned int chans);
@@ -229,8 +249,8 @@ int ipmi_unregister_for_cmd(ipmi_user_t user,
*
* See the IPMI_MAINTENANCE_MODE_xxx defines for what the mode means.
*/
-int ipmi_get_maintenance_mode(ipmi_user_t user);
-int ipmi_set_maintenance_mode(ipmi_user_t user, int mode);
+int ipmi_get_maintenance_mode(struct ipmi_user *user);
+int ipmi_set_maintenance_mode(struct ipmi_user *user, int mode);
/*
* When the user is created, it will not receive IPMI events by
@@ -238,7 +258,7 @@ int ipmi_set_maintenance_mode(ipmi_user_t user, int mode);
* The first user that sets this to TRUE will receive all events that
* have been queued while no one was waiting for events.
*/
-int ipmi_set_gets_events(ipmi_user_t user, bool val);
+int ipmi_set_gets_events(struct ipmi_user *user, bool val);
/*
* Called when a new SMI is registered. This will also be called on
@@ -248,14 +268,18 @@ int ipmi_set_gets_events(ipmi_user_t user, bool val);
struct ipmi_smi_watcher {
struct list_head link;
- /* You must set the owner to the current module, if you are in
- a module (generally just set it to "THIS_MODULE"). */
+ /*
+ * You must set the owner to the current module, if you are in
+ * a module (generally just set it to "THIS_MODULE").
+ */
struct module *owner;
- /* These two are called with read locks held for the interface
- the watcher list. So you can add and remove users from the
- IPMI interface, send messages, etc., but you cannot add
- or remove SMI watchers or SMI interfaces. */
+ /*
+ * These two are called with read locks held for the interface
+ * the watcher list. So you can add and remove users from the
+ * IPMI interface, send messages, etc., but you cannot add
+ * or remove SMI watchers or SMI interfaces.
+ */
void (*new_smi)(int if_num, struct device *dev);
void (*smi_gone)(int if_num);
};
@@ -263,8 +287,10 @@ struct ipmi_smi_watcher {
int ipmi_smi_watcher_register(struct ipmi_smi_watcher *watcher);
int ipmi_smi_watcher_unregister(struct ipmi_smi_watcher *watcher);
-/* The following are various helper functions for dealing with IPMI
- addresses. */
+/*
+ * The following are various helper functions for dealing with IPMI
+ * addresses.
+ */
/* Return the maximum length of an IPMI address given it's type. */
unsigned int ipmi_addr_length(int addr_type);
@@ -277,7 +303,7 @@ int ipmi_validate_addr(struct ipmi_addr *addr, int len);
*/
enum ipmi_addr_src {
SI_INVALID = 0, SI_HOTMOD, SI_HARDCODED, SI_SPMI, SI_ACPI, SI_SMBIOS,
- SI_PCI, SI_DEVICETREE, SI_LAST
+ SI_PCI, SI_DEVICETREE, SI_PLATFORM, SI_LAST
};
const char *ipmi_addr_src_to_str(enum ipmi_addr_src src);
@@ -310,7 +336,22 @@ struct ipmi_smi_info {
union ipmi_smi_info_union addr_info;
};
-/* This is to get the private info of ipmi_smi_t */
+/* This is to get the private info of struct ipmi_smi */
extern int ipmi_get_smi_info(int if_num, struct ipmi_smi_info *data);
+#define GET_DEVICE_ID_MAX_RETRY 5
+
+/* Helper function for computing the IPMB checksum of some data. */
+unsigned char ipmb_checksum(unsigned char *data, int size);
+
+/*
+ * For things that must send messages at panic time, like the IPMI watchdog
+ * driver that extends the reset time on a panic, use this to send messages
+ * from panic context. Note that this puts the driver into a mode that
+ * only works at panic time, so only use it then.
+ */
+void ipmi_panic_request_and_wait(struct ipmi_user *user,
+ struct ipmi_addr *addr,
+ struct kernel_ipmi_msg *msg);
+
#endif /* __LINUX_IPMI_H */
diff --git a/include/linux/ipmi_smi.h b/include/linux/ipmi_smi.h
index f8cea14485dd..892e2d656e1e 100644
--- a/include/linux/ipmi_smi.h
+++ b/include/linux/ipmi_smi.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* ipmi_smi.h
*
@@ -9,26 +10,6 @@
*
* Copyright 2002 MontaVista Software Inc.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- *
- * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
- * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
- * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
- * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
- * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
- * USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#ifndef __LINUX_IPMI_SMI_H
@@ -41,11 +22,74 @@
struct device;
-/* This files describes the interface for IPMI system management interface
- drivers to bind into the IPMI message handler. */
+/*
+ * This files describes the interface for IPMI system management interface
+ * drivers to bind into the IPMI message handler.
+ */
/* Structure for the low-level drivers. */
-typedef struct ipmi_smi *ipmi_smi_t;
+struct ipmi_smi;
+
+/*
+ * Flags for set_check_watch() below. Tells if the SMI should be
+ * waiting for watchdog timeouts, commands and/or messages.
+ */
+#define IPMI_WATCH_MASK_CHECK_MESSAGES (1 << 0)
+#define IPMI_WATCH_MASK_CHECK_WATCHDOG (1 << 1)
+#define IPMI_WATCH_MASK_CHECK_COMMANDS (1 << 2)
+
+/*
+ * SMI messages
+ *
+ * When communicating with an SMI, messages come in two formats:
+ *
+ * * Normal (to a BMC over a BMC interface)
+ *
+ * * IPMB (over a IPMB to another MC)
+ *
+ * When normal, commands are sent using the format defined by a
+ * standard message over KCS (NetFn must be even):
+ *
+ * +-----------+-----+------+
+ * | NetFn/LUN | Cmd | Data |
+ * +-----------+-----+------+
+ *
+ * And responses, similarly, with an completion code added (NetFn must
+ * be odd):
+ *
+ * +-----------+-----+------+------+
+ * | NetFn/LUN | Cmd | CC | Data |
+ * +-----------+-----+------+------+
+ *
+ * With normal messages, only commands are sent and only responses are
+ * received.
+ *
+ * In IPMB mode, we are acting as an IPMB device. Commands will be in
+ * the following format (NetFn must be even):
+ *
+ * +-------------+------+-------------+-----+------+
+ * | NetFn/rsLUN | Addr | rqSeq/rqLUN | Cmd | Data |
+ * +-------------+------+-------------+-----+------+
+ *
+ * Responses will using the following format:
+ *
+ * +-------------+------+-------------+-----+------+------+
+ * | NetFn/rqLUN | Addr | rqSeq/rsLUN | Cmd | CC | Data |
+ * +-------------+------+-------------+-----+------+------+
+ *
+ * This is similar to the format defined in the IPMB manual section
+ * 2.11.1 with the checksums and the first address removed. Also, the
+ * address is always the remote address.
+ *
+ * IPMB messages can be commands and responses in both directions.
+ * Received commands are handled as received commands from the message
+ * queue.
+ */
+
+enum ipmi_smi_msg_type {
+ IPMI_SMI_MSG_TYPE_NORMAL = 0,
+ IPMI_SMI_MSG_TYPE_IPMB_DIRECT
+};
/*
* Messages to/from the lower layer. The smi interface will take one
@@ -63,8 +107,11 @@ typedef struct ipmi_smi *ipmi_smi_t;
struct ipmi_smi_msg {
struct list_head link;
- long msgid;
- void *user_data;
+ enum ipmi_smi_msg_type type;
+
+ long msgid;
+ /* Response to this message, will be NULL if not from a user request. */
+ struct ipmi_recv_msg *recv_msg;
int data_size;
unsigned char data[IPMI_MAX_MSG_LENGTH];
@@ -72,20 +119,40 @@ struct ipmi_smi_msg {
int rsp_size;
unsigned char rsp[IPMI_MAX_MSG_LENGTH];
- /* Will be called when the system is done with the message
- (presumably to free it). */
+ /*
+ * Will be called when the system is done with the message
+ * (presumably to free it).
+ */
void (*done)(struct ipmi_smi_msg *msg);
};
+#define INIT_IPMI_SMI_MSG(done_handler) \
+{ \
+ .done = done_handler, \
+ .type = IPMI_SMI_MSG_TYPE_NORMAL \
+}
+
struct ipmi_smi_handlers {
struct module *owner;
- /* The low-level interface cannot start sending messages to
- the upper layer until this function is called. This may
- not be NULL, the lower layer must take the interface from
- this call. */
- int (*start_processing)(void *send_info,
- ipmi_smi_t new_intf);
+ /* Capabilities of the SMI. */
+#define IPMI_SMI_CAN_HANDLE_IPMB_DIRECT (1 << 0)
+ unsigned int flags;
+
+ /*
+ * The low-level interface cannot start sending messages to
+ * the upper layer until this function is called. This may
+ * not be NULL, the lower layer must take the interface from
+ * this call.
+ */
+ int (*start_processing)(void *send_info,
+ struct ipmi_smi *new_intf);
+
+ /*
+ * When called, the low-level interface should disable all
+ * processing, it should be complete shut down when it returns.
+ */
+ void (*shutdown)(void *send_info);
/*
* Get the detailed private info of the low level interface and store
@@ -94,56 +161,66 @@ struct ipmi_smi_handlers {
*/
int (*get_smi_info)(void *send_info, struct ipmi_smi_info *data);
- /* Called to enqueue an SMI message to be sent. This
- operation is not allowed to fail. If an error occurs, it
- should report back the error in a received message. It may
- do this in the current call context, since no write locks
- are held when this is run. Message are delivered one at
- a time by the message handler, a new message will not be
- delivered until the previous message is returned. */
- void (*sender)(void *send_info,
- struct ipmi_smi_msg *msg);
-
- /* Called by the upper layer to request that we try to get
- events from the BMC we are attached to. */
+ /*
+ * Called to enqueue an SMI message to be sent. This
+ * operation is not allowed to fail. If an error occurs, it
+ * should report back the error in a received message. It may
+ * do this in the current call context, since no write locks
+ * are held when this is run. Message are delivered one at
+ * a time by the message handler, a new message will not be
+ * delivered until the previous message is returned.
+ *
+ * This can return an error if the SMI is not in a state where it
+ * can send a message.
+ */
+ int (*sender)(void *send_info, struct ipmi_smi_msg *msg);
+
+ /*
+ * Called by the upper layer to request that we try to get
+ * events from the BMC we are attached to.
+ */
void (*request_events)(void *send_info);
- /* Called by the upper layer when some user requires that the
- interface watch for events, received messages, watchdog
- pretimeouts, or not. Used by the SMI to know if it should
- watch for these. This may be NULL if the SMI does not
- implement it. */
- void (*set_need_watch)(void *send_info, bool enable);
+ /*
+ * Called by the upper layer when some user requires that the
+ * interface watch for received messages and watchdog
+ * pretimeouts (basically do a "Get Flags", or not. Used by
+ * the SMI to know if it should watch for these. This may be
+ * NULL if the SMI does not implement it. watch_mask is from
+ * IPMI_WATCH_MASK_xxx above. The interface should run slower
+ * timeouts for just watchdog checking or faster timeouts when
+ * waiting for the message queue.
+ */
+ void (*set_need_watch)(void *send_info, unsigned int watch_mask);
/*
* Called when flushing all pending messages.
*/
void (*flush_messages)(void *send_info);
- /* Called when the interface should go into "run to
- completion" mode. If this call sets the value to true, the
- interface should make sure that all messages are flushed
- out and that none are pending, and any new requests are run
- to completion immediately. */
+ /*
+ * Called when the interface should go into "run to
+ * completion" mode. If this call sets the value to true, the
+ * interface should make sure that all messages are flushed
+ * out and that none are pending, and any new requests are run
+ * to completion immediately.
+ */
void (*set_run_to_completion)(void *send_info, bool run_to_completion);
- /* Called to poll for work to do. This is so upper layers can
- poll for operations during things like crash dumps. */
+ /*
+ * Called to poll for work to do. This is so upper layers can
+ * poll for operations during things like crash dumps.
+ */
void (*poll)(void *send_info);
- /* Enable/disable firmware maintenance mode. Note that this
- is *not* the modes defined, this is simply an on/off
- setting. The message handler does the mode handling. Note
- that this is called from interrupt context, so it cannot
- block. */
+ /*
+ * Enable/disable firmware maintenance mode. Note that this
+ * is *not* the modes defined, this is simply an on/off
+ * setting. The message handler does the mode handling. Note
+ * that this is called from interrupt context, so it cannot
+ * block.
+ */
void (*set_maintenance_mode)(void *send_info, bool enable);
-
- /* Tell the handler that we are using it/not using it. The
- message handler get the modules that this handler belongs
- to; this function lets the SMI claim any modules that it
- uses. These may be NULL if this is not required. */
- int (*inc_usecount)(void *send_info);
- void (*dec_usecount)(void *send_info);
};
struct ipmi_device_id {
@@ -162,27 +239,28 @@ struct ipmi_device_id {
#define ipmi_version_major(v) ((v)->ipmi_version & 0xf)
#define ipmi_version_minor(v) ((v)->ipmi_version >> 4)
-/* Take a pointer to a raw data buffer and a length and extract device
- id information from it. The first byte of data must point to the
- netfn << 2, the data should be of the format:
- netfn << 2, cmd, completion code, data
- as normally comes from a device interface. */
-static inline int ipmi_demangle_device_id(const unsigned char *data,
+/*
+ * Take a pointer to an IPMI response and extract device id information from
+ * it. @netfn is in the IPMI_NETFN_ format, so may need to be shifted from
+ * a SI response.
+ */
+static inline int ipmi_demangle_device_id(uint8_t netfn, uint8_t cmd,
+ const unsigned char *data,
unsigned int data_len,
struct ipmi_device_id *id)
{
- if (data_len < 9)
+ if (data_len < 7)
return -EINVAL;
- if (data[0] != IPMI_NETFN_APP_RESPONSE << 2 ||
- data[1] != IPMI_GET_DEVICE_ID_CMD)
+ if (netfn != IPMI_NETFN_APP_RESPONSE || cmd != IPMI_GET_DEVICE_ID_CMD)
/* Strange, didn't get the response we expected. */
return -EINVAL;
- if (data[2] != 0)
+ if (data[0] != 0)
/* That's odd, it shouldn't be able to fail. */
return -EINVAL;
- data += 3;
- data_len -= 3;
+ data++;
+ data_len--;
+
id->device_id = data[0];
id->device_revision = data[1];
id->firmware_revision_1 = data[2];
@@ -206,23 +284,28 @@ static inline int ipmi_demangle_device_id(const unsigned char *data,
return 0;
}
-/* Add a low-level interface to the IPMI driver. Note that if the
- interface doesn't know its slave address, it should pass in zero.
- The low-level interface should not deliver any messages to the
- upper layer until the start_processing() function in the handlers
- is called, and the lower layer must get the interface from that
- call. */
-int ipmi_register_smi(const struct ipmi_smi_handlers *handlers,
- void *send_info,
- struct ipmi_device_id *device_id,
- struct device *dev,
- unsigned char slave_addr);
+/*
+ * Add a low-level interface to the IPMI driver. Note that if the
+ * interface doesn't know its slave address, it should pass in zero.
+ * The low-level interface should not deliver any messages to the
+ * upper layer until the start_processing() function in the handlers
+ * is called, and the lower layer must get the interface from that
+ * call.
+ */
+int ipmi_add_smi(struct module *owner,
+ const struct ipmi_smi_handlers *handlers,
+ void *send_info,
+ struct device *dev,
+ unsigned char slave_addr);
+
+#define ipmi_register_smi(handlers, send_info, dev, slave_addr) \
+ ipmi_add_smi(THIS_MODULE, handlers, send_info, dev, slave_addr)
/*
* Remove a low-level interface from the IPMI driver. This will
* return an error if the interface is still in use by a user.
*/
-int ipmi_unregister_smi(ipmi_smi_t intf);
+void ipmi_unregister_smi(struct ipmi_smi *intf);
/*
* The lower layer reports received messages through this interface.
@@ -230,11 +313,11 @@ int ipmi_unregister_smi(ipmi_smi_t intf);
* the lower layer gets an error sending a message, it should format
* an error response in the message response.
*/
-void ipmi_smi_msg_received(ipmi_smi_t intf,
+void ipmi_smi_msg_received(struct ipmi_smi *intf,
struct ipmi_smi_msg *msg);
/* The lower layer received a watchdog pre-timeout on interface. */
-void ipmi_smi_watchdog_pretimeout(ipmi_smi_t intf);
+void ipmi_smi_watchdog_pretimeout(struct ipmi_smi *intf);
struct ipmi_smi_msg *ipmi_alloc_smi_msg(void);
static inline void ipmi_free_smi_msg(struct ipmi_smi_msg *msg)
@@ -242,11 +325,4 @@ static inline void ipmi_free_smi_msg(struct ipmi_smi_msg *msg)
msg->done(msg);
}
-/* Allow the lower layer to add things to the proc filesystem
- directory for this interface. Note that the entry will
- automatically be dstroyed when the interface is destroyed. */
-int ipmi_smi_add_proc_entry(ipmi_smi_t smi, char *name,
- const struct file_operations *proc_ops,
- void *data);
-
#endif /* __LINUX_IPMI_SMI_H */
diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h
index 474d6bbc158c..7294e4e89b79 100644
--- a/include/linux/ipv6.h
+++ b/include/linux/ipv6.h
@@ -1,7 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _IPV6_H
#define _IPV6_H
#include <uapi/linux/ipv6.h>
+#include <linux/cache.h>
#define ipv6_optlen(p) (((p)->hdrlen+1) << 3)
#define ipv6_authlen(p) (((p)->hdrlen+2) << 2)
@@ -9,9 +11,17 @@
* This structure contains configuration options per IPv6 link.
*/
struct ipv6_devconf {
- __s32 forwarding;
+ /* RX & TX fastpath fields. */
+ __cacheline_group_begin(ipv6_devconf_read_txrx);
+ __s32 disable_ipv6;
__s32 hop_limit;
__s32 mtu6;
+ __s32 forwarding;
+ __s32 force_forwarding;
+ __s32 disable_policy;
+ __s32 proxy_ndp;
+ __cacheline_group_end(ipv6_devconf_read_txrx);
+
__s32 accept_ra;
__s32 accept_redirects;
__s32 autoconf;
@@ -26,11 +36,14 @@ struct ipv6_devconf {
__s32 use_tempaddr;
__s32 temp_valid_lft;
__s32 temp_prefered_lft;
+ __s32 regen_min_advance;
__s32 regen_max_retry;
__s32 max_desync_factor;
__s32 max_addresses;
__s32 accept_ra_defrtr;
+ __u32 ra_defrtr_metric;
__s32 accept_ra_min_hop_limit;
+ __s32 accept_ra_min_lft;
__s32 accept_ra_pinfo;
__s32 ignore_routes_with_linkdown;
#ifdef CONFIG_IPV6_ROUTER_PREF
@@ -41,7 +54,6 @@ struct ipv6_devconf {
__s32 accept_ra_rt_info_max_plen;
#endif
#endif
- __s32 proxy_ndp;
__s32 accept_source_route;
__s32 accept_ra_from_local;
#ifdef CONFIG_IPV6_OPTIMISTIC_DAD
@@ -49,9 +61,8 @@ struct ipv6_devconf {
__s32 use_optimistic;
#endif
#ifdef CONFIG_IPV6_MROUTE
- __s32 mc_forwarding;
+ atomic_t mc_forwarding;
#endif
- __s32 disable_ipv6;
__s32 drop_unicast_in_l2_multicast;
__s32 accept_dad;
__s32 force_tllao;
@@ -59,6 +70,7 @@ struct ipv6_devconf {
__s32 suppress_frag_ndisc;
__s32 accept_ra_mtu;
__s32 drop_unsolicited_na;
+ __s32 accept_untracked_na;
struct ipv6_stable_secret {
bool initialized;
struct in6_addr secret;
@@ -71,7 +83,14 @@ struct ipv6_devconf {
#endif
__u32 enhanced_dad;
__u32 addr_gen_mode;
- __s32 disable_policy;
+ __s32 ndisc_tclass;
+ __s32 rpl_seg_enabled;
+ __u32 ioam6_id;
+ __u32 ioam6_id_wide;
+ __u8 ioam6_enabled;
+ __u8 ndisc_evict_nocarrier;
+ __u8 ra_honor_pio_life;
+ __u8 ra_honor_pio_pflag;
struct ctl_table_header *sysctl_header;
};
@@ -81,7 +100,6 @@ struct ipv6_params {
__s32 autoconf;
};
extern struct ipv6_params ipv6_defaults;
-#include <linux/icmpv6.h>
#include <linux/tcp.h>
#include <linux/udp.h>
@@ -102,6 +120,12 @@ static inline struct ipv6hdr *ipipv6_hdr(const struct sk_buff *skb)
return (struct ipv6hdr *)skb_transport_header(skb);
}
+static inline unsigned int ipv6_transport_len(const struct sk_buff *skb)
+{
+ return ntohs(ipv6_hdr(skb)->payload_len) + sizeof(struct ipv6hdr) -
+ skb_network_header_len(skb);
+}
+
/*
This structure contains results of exthdrs parsing
as offsets from skb->nh.
@@ -120,6 +144,7 @@ struct inet6_skb_parm {
__u16 dsthao;
#endif
__u16 frag_max_size;
+ __u16 srhoff;
#define IP6SKB_XFRM_TRANSFORMED 1
#define IP6SKB_FORWARDED 2
@@ -129,6 +154,10 @@ struct inet6_skb_parm {
#define IP6SKB_HOPBYHOP 32
#define IP6SKB_L3SLAVE 64
#define IP6SKB_JUMBOGRAM 128
+#define IP6SKB_SEG6 256
+#define IP6SKB_FAKEJUMBO 512
+#define IP6SKB_MULTIPATH 1024
+#define IP6SKB_MCROUTE 2048
};
#if defined(CONFIG_NET_L3_MASTER_DEV)
@@ -159,14 +188,13 @@ static inline bool inet6_is_jumbogram(const struct sk_buff *skb)
}
/* can not be used in TCP layer after tcp_v6_fill_cb */
-static inline bool inet6_exact_dif_match(struct net *net, struct sk_buff *skb)
+static inline int inet6_sdif(const struct sk_buff *skb)
{
-#if defined(CONFIG_NET_L3_MASTER_DEV)
- if (!net->ipv4.sysctl_tcp_l3mdev_accept &&
- skb && ipv6_l3mdev_skb(IP6CB(skb)->flags))
- return true;
+#if IS_ENABLED(CONFIG_NET_L3_MASTER_DEV)
+ if (skb && ipv6_l3mdev_skb(IP6CB(skb)->flags))
+ return IP6CB(skb)->iif;
#endif
- return false;
+ return 0;
}
struct tcp6_request_sock {
@@ -181,56 +209,34 @@ struct inet6_cork {
struct ipv6_txoptions *opt;
u8 hop_limit;
u8 tclass;
+ u8 dontfrag:1;
};
-/**
- * struct ipv6_pinfo - ipv6 private area
- *
- * In the struct sock hierarchy (tcp6_sock, upd6_sock, etc)
- * this _must_ be the last member, so that inet6_sk_generic
- * is able to calculate its offset from the base struct sock
- * by using the struct proto->slab_obj_size member. -acme
- */
+/* struct ipv6_pinfo - ipv6 private area */
struct ipv6_pinfo {
+ /* Used in tx path (inet6_csk_route_socket(), ip6_xmit()) */
struct in6_addr saddr;
- struct in6_pktinfo sticky_pktinfo;
- const struct in6_addr *daddr_cache;
+ __be32 flow_label;
+ u32 dst_cookie;
+ struct ipv6_txoptions __rcu *opt;
+ s16 hop_limit;
+ u8 pmtudisc;
+ u8 tclass;
#ifdef CONFIG_IPV6_SUBTREES
- const struct in6_addr *saddr_cache;
+ bool saddr_cache;
#endif
+ bool daddr_cache;
- __be32 flow_label;
- __u32 frag_size;
-
- /*
- * Packed in 16bits.
- * Omit one shift by by putting the signed field at MSB.
- */
-#if defined(__BIG_ENDIAN_BITFIELD)
- __s16 hop_limit:9;
- __u16 __unused_1:7;
-#else
- __u16 __unused_1:7;
- __s16 hop_limit:9;
-#endif
+ u8 mcast_hops;
+ u32 frag_size;
-#if defined(__BIG_ENDIAN_BITFIELD)
- /* Packed in 16bits. */
- __s16 mcast_hops:9;
- __u16 __unused_2:6,
- mc_loop:1;
-#else
- __u16 mc_loop:1,
- __unused_2:6;
- __s16 mcast_hops:9;
-#endif
int ucast_oif;
int mcast_oif;
/* pktoption flags */
union {
struct {
- __u16 srcrt:1,
+ u16 srcrt:1,
osrcrt:1,
rxinfo:1,
rxoinfo:1,
@@ -247,38 +253,38 @@ struct ipv6_pinfo {
recvfragsize:1;
/* 1 bits hole */
} bits;
- __u16 all;
+ u16 all;
} rxopt;
/* sockopt flags */
- __u16 recverr:1,
- sndflow:1,
- repflow:1,
- pmtudisc:3,
- padding:1, /* 1 bit hole */
- srcprefs:3, /* 001: prefer temporary address
+ u8 srcprefs; /* 001: prefer temporary address
* 010: prefer public address
* 100: prefer care-of address
*/
- dontfrag:1,
- autoflowlabel:1;
- __u8 min_hopcount;
- __u8 tclass;
+ u8 min_hopcount;
__be32 rcv_flowinfo;
+ struct in6_pktinfo sticky_pktinfo;
- __u32 dst_cookie;
- __u32 rx_dst_cookie;
-
- struct ipv6_mc_socklist __rcu *ipv6_mc_list;
- struct ipv6_ac_socklist *ipv6_ac_list;
- struct ipv6_fl_socklist __rcu *ipv6_fl_list;
-
- struct ipv6_txoptions __rcu *opt;
struct sk_buff *pktoptions;
struct sk_buff *rxpmtu;
struct inet6_cork cork;
+
+ struct ipv6_mc_socklist __rcu *ipv6_mc_list;
+ struct ipv6_ac_socklist *ipv6_ac_list;
};
+/* We currently use available bits from inet_sk(sk)->inet_flags,
+ * this could change in the future.
+ */
+#define inet6_test_bit(nr, sk) \
+ test_bit(INET_FLAGS_##nr, &inet_sk(sk)->inet_flags)
+#define inet6_set_bit(nr, sk) \
+ set_bit(INET_FLAGS_##nr, &inet_sk(sk)->inet_flags)
+#define inet6_clear_bit(nr, sk) \
+ clear_bit(INET_FLAGS_##nr, &inet_sk(sk)->inet_flags)
+#define inet6_assign_bit(nr, sk, val) \
+ assign_bit(INET_FLAGS_##nr, &inet_sk(sk)->inet_flags, val)
+
/* WARNING: don't change the layout of the members in {raw,udp,tcp}6_sock! */
struct raw6_sock {
/* inet_sock has to be the first member of raw6_sock */
@@ -287,19 +293,19 @@ struct raw6_sock {
__u32 offset; /* checksum offset */
struct icmp6_filter filter;
__u32 ip6mr_table;
- /* ipv6_pinfo has to be the last member of raw6_sock, see inet6_sk_generic */
+ struct numa_drop_counters drop_counters;
struct ipv6_pinfo inet6;
};
struct udp6_sock {
struct udp_sock udp;
- /* ipv6_pinfo has to be the last member of udp6_sock, see inet6_sk_generic */
+
struct ipv6_pinfo inet6;
};
struct tcp6_sock {
struct tcp_sock tcp;
- /* ipv6_pinfo has to be the last member of tcp6_sock, see inet6_sk_generic */
+
struct ipv6_pinfo inet6;
};
@@ -317,24 +323,9 @@ static inline struct ipv6_pinfo *inet6_sk(const struct sock *__sk)
return sk_fullsock(__sk) ? inet_sk(__sk)->pinet6 : NULL;
}
-static inline struct raw6_sock *raw6_sk(const struct sock *sk)
-{
- return (struct raw6_sock *)sk;
-}
-
-static inline void inet_sk_copy_descendant(struct sock *sk_to,
- const struct sock *sk_from)
-{
- int ancestor_size = sizeof(struct inet_sock);
-
- if (sk_from->sk_family == PF_INET6)
- ancestor_size += sizeof(struct ipv6_pinfo);
+#define raw6_sk(ptr) container_of_const(ptr, struct raw6_sock, inet.sk)
- __inet_sk_copy_descendant(sk_to, sk_from, ancestor_size);
-}
-
-#define __ipv6_only_sock(sk) (sk->sk_ipv6only)
-#define ipv6_only_sock(sk) (__ipv6_only_sock(sk))
+#define ipv6_only_sock(sk) (sk->sk_ipv6only)
#define ipv6_sk_rxinfo(sk) ((sk)->sk_family == PF_INET6 && \
inet6_sk(sk)->rxopt.bits.rxinfo)
@@ -351,7 +342,6 @@ static inline int inet_v6_ipv6only(const struct sock *sk)
return ipv6_only_sock(sk);
}
#else
-#define __ipv6_only_sock(sk) 0
#define ipv6_only_sock(sk) 0
#define ipv6_sk_rxinfo(sk) 0
@@ -365,19 +355,12 @@ static inline struct ipv6_pinfo * inet6_sk(const struct sock *__sk)
return NULL;
}
-static inline struct inet6_request_sock *
- inet6_rsk(const struct request_sock *rsk)
-{
- return NULL;
-}
-
static inline struct raw6_sock *raw6_sk(const struct sock *sk)
{
return NULL;
}
#define inet6_rcv_saddr(__sk) NULL
-#define tcp_twsk_ipv6only(__sk) 0
#define inet_v6_ipv6only(__sk) 0
#endif /* IS_ENABLED(CONFIG_IPV6) */
#endif /* _IPV6_H */
diff --git a/include/linux/ipv6_route.h b/include/linux/ipv6_route.h
index 25b5f1f5e780..5711e246c39c 100644
--- a/include/linux/ipv6_route.h
+++ b/include/linux/ipv6_route.h
@@ -1,13 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* Linux INET6 implementation
*
* Authors:
* Pedro Roque <roque@di.fc.ul.pt>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
*/
#ifndef _LINUX_IPV6_ROUTE_H
#define _LINUX_IPV6_ROUTE_H
diff --git a/include/linux/irq-entry-common.h b/include/linux/irq-entry-common.h
new file mode 100644
index 000000000000..6ab913e57da0
--- /dev/null
+++ b/include/linux/irq-entry-common.h
@@ -0,0 +1,458 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __LINUX_IRQENTRYCOMMON_H
+#define __LINUX_IRQENTRYCOMMON_H
+
+#include <linux/context_tracking.h>
+#include <linux/kmsan.h>
+#include <linux/rseq_entry.h>
+#include <linux/static_call_types.h>
+#include <linux/syscalls.h>
+#include <linux/tick.h>
+#include <linux/unwind_deferred.h>
+
+#include <asm/entry-common.h>
+
+/*
+ * Define dummy _TIF work flags if not defined by the architecture or for
+ * disabled functionality.
+ */
+#ifndef _TIF_PATCH_PENDING
+# define _TIF_PATCH_PENDING (0)
+#endif
+
+/*
+ * TIF flags handled in exit_to_user_mode_loop()
+ */
+#ifndef ARCH_EXIT_TO_USER_MODE_WORK
+# define ARCH_EXIT_TO_USER_MODE_WORK (0)
+#endif
+
+#define EXIT_TO_USER_MODE_WORK \
+ (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_UPROBE | \
+ _TIF_NEED_RESCHED | _TIF_NEED_RESCHED_LAZY | \
+ _TIF_PATCH_PENDING | _TIF_NOTIFY_SIGNAL | _TIF_RSEQ | \
+ ARCH_EXIT_TO_USER_MODE_WORK)
+
+/**
+ * arch_enter_from_user_mode - Architecture specific sanity check for user mode regs
+ * @regs: Pointer to currents pt_regs
+ *
+ * Defaults to an empty implementation. Can be replaced by architecture
+ * specific code.
+ *
+ * Invoked from syscall_enter_from_user_mode() in the non-instrumentable
+ * section. Use __always_inline so the compiler cannot push it out of line
+ * and make it instrumentable.
+ */
+static __always_inline void arch_enter_from_user_mode(struct pt_regs *regs);
+
+#ifndef arch_enter_from_user_mode
+static __always_inline void arch_enter_from_user_mode(struct pt_regs *regs) {}
+#endif
+
+/**
+ * arch_in_rcu_eqs - Architecture specific check for RCU extended quiescent
+ * states.
+ *
+ * Returns: true if the CPU is potentially in an RCU EQS, false otherwise.
+ *
+ * Architectures only need to define this if threads other than the idle thread
+ * may have an interruptible EQS. This does not need to handle idle threads. It
+ * is safe to over-estimate at the cost of redundant RCU management work.
+ *
+ * Invoked from irqentry_enter()
+ */
+#ifndef arch_in_rcu_eqs
+static __always_inline bool arch_in_rcu_eqs(void) { return false; }
+#endif
+
+/**
+ * enter_from_user_mode - Establish state when coming from user mode
+ * @regs: Pointer to currents pt_regs
+ *
+ * Syscall/interrupt entry disables interrupts, but user mode is traced as
+ * interrupts enabled. Also with NO_HZ_FULL RCU might be idle.
+ *
+ * 1) Tell lockdep that interrupts are disabled
+ * 2) Invoke context tracking if enabled to reactivate RCU
+ * 3) Trace interrupts off state
+ *
+ * Invoked from architecture specific syscall entry code with interrupts
+ * disabled. The calling code has to be non-instrumentable. When the
+ * function returns all state is correct and interrupts are still
+ * disabled. The subsequent functions can be instrumented.
+ *
+ * This is invoked when there is architecture specific functionality to be
+ * done between establishing state and enabling interrupts. The caller must
+ * enable interrupts before invoking syscall_enter_from_user_mode_work().
+ */
+static __always_inline void enter_from_user_mode(struct pt_regs *regs)
+{
+ arch_enter_from_user_mode(regs);
+ lockdep_hardirqs_off(CALLER_ADDR0);
+
+ CT_WARN_ON(__ct_state() != CT_STATE_USER);
+ user_exit_irqoff();
+
+ instrumentation_begin();
+ kmsan_unpoison_entry_regs(regs);
+ trace_hardirqs_off_finish();
+ instrumentation_end();
+}
+
+/**
+ * local_irq_enable_exit_to_user - Exit to user variant of local_irq_enable()
+ * @ti_work: Cached TIF flags gathered with interrupts disabled
+ *
+ * Defaults to local_irq_enable(). Can be supplied by architecture specific
+ * code.
+ */
+static inline void local_irq_enable_exit_to_user(unsigned long ti_work);
+
+#ifndef local_irq_enable_exit_to_user
+static inline void local_irq_enable_exit_to_user(unsigned long ti_work)
+{
+ local_irq_enable();
+}
+#endif
+
+/**
+ * local_irq_disable_exit_to_user - Exit to user variant of local_irq_disable()
+ *
+ * Defaults to local_irq_disable(). Can be supplied by architecture specific
+ * code.
+ */
+static inline void local_irq_disable_exit_to_user(void);
+
+#ifndef local_irq_disable_exit_to_user
+static inline void local_irq_disable_exit_to_user(void)
+{
+ local_irq_disable();
+}
+#endif
+
+/**
+ * arch_exit_to_user_mode_work - Architecture specific TIF work for exit
+ * to user mode.
+ * @regs: Pointer to currents pt_regs
+ * @ti_work: Cached TIF flags gathered with interrupts disabled
+ *
+ * Invoked from exit_to_user_mode_loop() with interrupt enabled
+ *
+ * Defaults to NOOP. Can be supplied by architecture specific code.
+ */
+static inline void arch_exit_to_user_mode_work(struct pt_regs *regs,
+ unsigned long ti_work);
+
+#ifndef arch_exit_to_user_mode_work
+static inline void arch_exit_to_user_mode_work(struct pt_regs *regs,
+ unsigned long ti_work)
+{
+}
+#endif
+
+/**
+ * arch_exit_to_user_mode_prepare - Architecture specific preparation for
+ * exit to user mode.
+ * @regs: Pointer to currents pt_regs
+ * @ti_work: Cached TIF flags gathered with interrupts disabled
+ *
+ * Invoked from exit_to_user_mode_prepare() with interrupt disabled as the last
+ * function before return. Defaults to NOOP.
+ */
+static inline void arch_exit_to_user_mode_prepare(struct pt_regs *regs,
+ unsigned long ti_work);
+
+#ifndef arch_exit_to_user_mode_prepare
+static inline void arch_exit_to_user_mode_prepare(struct pt_regs *regs,
+ unsigned long ti_work)
+{
+}
+#endif
+
+/**
+ * arch_exit_to_user_mode - Architecture specific final work before
+ * exit to user mode.
+ *
+ * Invoked from exit_to_user_mode() with interrupt disabled as the last
+ * function before return. Defaults to NOOP.
+ *
+ * This needs to be __always_inline because it is non-instrumentable code
+ * invoked after context tracking switched to user mode.
+ *
+ * An architecture implementation must not do anything complex, no locking
+ * etc. The main purpose is for speculation mitigations.
+ */
+static __always_inline void arch_exit_to_user_mode(void);
+
+#ifndef arch_exit_to_user_mode
+static __always_inline void arch_exit_to_user_mode(void) { }
+#endif
+
+/**
+ * arch_do_signal_or_restart - Architecture specific signal delivery function
+ * @regs: Pointer to currents pt_regs
+ *
+ * Invoked from exit_to_user_mode_loop().
+ */
+void arch_do_signal_or_restart(struct pt_regs *regs);
+
+/* Handle pending TIF work */
+unsigned long exit_to_user_mode_loop(struct pt_regs *regs, unsigned long ti_work);
+
+/**
+ * __exit_to_user_mode_prepare - call exit_to_user_mode_loop() if required
+ * @regs: Pointer to pt_regs on entry stack
+ *
+ * 1) check that interrupts are disabled
+ * 2) call tick_nohz_user_enter_prepare()
+ * 3) call exit_to_user_mode_loop() if any flags from
+ * EXIT_TO_USER_MODE_WORK are set
+ * 4) check that interrupts are still disabled
+ *
+ * Don't invoke directly, use the syscall/irqentry_ prefixed variants below
+ */
+static __always_inline void __exit_to_user_mode_prepare(struct pt_regs *regs)
+{
+ unsigned long ti_work;
+
+ lockdep_assert_irqs_disabled();
+
+ /* Flush pending rcuog wakeup before the last need_resched() check */
+ tick_nohz_user_enter_prepare();
+
+ ti_work = read_thread_flags();
+ if (unlikely(ti_work & EXIT_TO_USER_MODE_WORK))
+ ti_work = exit_to_user_mode_loop(regs, ti_work);
+
+ arch_exit_to_user_mode_prepare(regs, ti_work);
+}
+
+static __always_inline void __exit_to_user_mode_validate(void)
+{
+ /* Ensure that kernel state is sane for a return to userspace */
+ kmap_assert_nomap();
+ lockdep_assert_irqs_disabled();
+ lockdep_sys_exit();
+}
+
+/* Temporary workaround to keep ARM64 alive */
+static __always_inline void exit_to_user_mode_prepare_legacy(struct pt_regs *regs)
+{
+ __exit_to_user_mode_prepare(regs);
+ rseq_exit_to_user_mode_legacy();
+ __exit_to_user_mode_validate();
+}
+
+/**
+ * syscall_exit_to_user_mode_prepare - call exit_to_user_mode_loop() if required
+ * @regs: Pointer to pt_regs on entry stack
+ *
+ * Wrapper around __exit_to_user_mode_prepare() to separate the exit work for
+ * syscalls and interrupts.
+ */
+static __always_inline void syscall_exit_to_user_mode_prepare(struct pt_regs *regs)
+{
+ __exit_to_user_mode_prepare(regs);
+ rseq_syscall_exit_to_user_mode();
+ __exit_to_user_mode_validate();
+}
+
+/**
+ * irqentry_exit_to_user_mode_prepare - call exit_to_user_mode_loop() if required
+ * @regs: Pointer to pt_regs on entry stack
+ *
+ * Wrapper around __exit_to_user_mode_prepare() to separate the exit work for
+ * syscalls and interrupts.
+ */
+static __always_inline void irqentry_exit_to_user_mode_prepare(struct pt_regs *regs)
+{
+ __exit_to_user_mode_prepare(regs);
+ rseq_irqentry_exit_to_user_mode();
+ __exit_to_user_mode_validate();
+}
+
+/**
+ * exit_to_user_mode - Fixup state when exiting to user mode
+ *
+ * Syscall/interrupt exit enables interrupts, but the kernel state is
+ * interrupts disabled when this is invoked. Also tell RCU about it.
+ *
+ * 1) Trace interrupts on state
+ * 2) Invoke context tracking if enabled to adjust RCU state
+ * 3) Invoke architecture specific last minute exit code, e.g. speculation
+ * mitigations, etc.: arch_exit_to_user_mode()
+ * 4) Tell lockdep that interrupts are enabled
+ *
+ * Invoked from architecture specific code when syscall_exit_to_user_mode()
+ * is not suitable as the last step before returning to userspace. Must be
+ * invoked with interrupts disabled and the caller must be
+ * non-instrumentable.
+ * The caller has to invoke syscall_exit_to_user_mode_work() before this.
+ */
+static __always_inline void exit_to_user_mode(void)
+{
+ instrumentation_begin();
+ unwind_reset_info();
+ trace_hardirqs_on_prepare();
+ lockdep_hardirqs_on_prepare();
+ instrumentation_end();
+
+ user_enter_irqoff();
+ arch_exit_to_user_mode();
+ lockdep_hardirqs_on(CALLER_ADDR0);
+}
+
+/**
+ * irqentry_enter_from_user_mode - Establish state before invoking the irq handler
+ * @regs: Pointer to currents pt_regs
+ *
+ * Invoked from architecture specific entry code with interrupts disabled.
+ * Can only be called when the interrupt entry came from user mode. The
+ * calling code must be non-instrumentable. When the function returns all
+ * state is correct and the subsequent functions can be instrumented.
+ *
+ * The function establishes state (lockdep, RCU (context tracking), tracing)
+ */
+static __always_inline void irqentry_enter_from_user_mode(struct pt_regs *regs)
+{
+ enter_from_user_mode(regs);
+ rseq_note_user_irq_entry();
+}
+
+/**
+ * irqentry_exit_to_user_mode - Interrupt exit work
+ * @regs: Pointer to current's pt_regs
+ *
+ * Invoked with interrupts disabled and fully valid regs. Returns with all
+ * work handled, interrupts disabled such that the caller can immediately
+ * switch to user mode. Called from architecture specific interrupt
+ * handling code.
+ *
+ * The call order is #2 and #3 as described in syscall_exit_to_user_mode().
+ * Interrupt exit is not invoking #1 which is the syscall specific one time
+ * work.
+ */
+static __always_inline void irqentry_exit_to_user_mode(struct pt_regs *regs)
+{
+ instrumentation_begin();
+ irqentry_exit_to_user_mode_prepare(regs);
+ instrumentation_end();
+ exit_to_user_mode();
+}
+
+#ifndef irqentry_state
+/**
+ * struct irqentry_state - Opaque object for exception state storage
+ * @exit_rcu: Used exclusively in the irqentry_*() calls; signals whether the
+ * exit path has to invoke ct_irq_exit().
+ * @lockdep: Used exclusively in the irqentry_nmi_*() calls; ensures that
+ * lockdep state is restored correctly on exit from nmi.
+ *
+ * This opaque object is filled in by the irqentry_*_enter() functions and
+ * must be passed back into the corresponding irqentry_*_exit() functions
+ * when the exception is complete.
+ *
+ * Callers of irqentry_*_[enter|exit]() must consider this structure opaque
+ * and all members private. Descriptions of the members are provided to aid in
+ * the maintenance of the irqentry_*() functions.
+ */
+typedef struct irqentry_state {
+ union {
+ bool exit_rcu;
+ bool lockdep;
+ };
+} irqentry_state_t;
+#endif
+
+/**
+ * irqentry_enter - Handle state tracking on ordinary interrupt entries
+ * @regs: Pointer to pt_regs of interrupted context
+ *
+ * Invokes:
+ * - lockdep irqflag state tracking as low level ASM entry disabled
+ * interrupts.
+ *
+ * - Context tracking if the exception hit user mode.
+ *
+ * - The hardirq tracer to keep the state consistent as low level ASM
+ * entry disabled interrupts.
+ *
+ * As a precondition, this requires that the entry came from user mode,
+ * idle, or a kernel context in which RCU is watching.
+ *
+ * For kernel mode entries RCU handling is done conditional. If RCU is
+ * watching then the only RCU requirement is to check whether the tick has
+ * to be restarted. If RCU is not watching then ct_irq_enter() has to be
+ * invoked on entry and ct_irq_exit() on exit.
+ *
+ * Avoiding the ct_irq_enter/exit() calls is an optimization but also
+ * solves the problem of kernel mode pagefaults which can schedule, which
+ * is not possible after invoking ct_irq_enter() without undoing it.
+ *
+ * For user mode entries irqentry_enter_from_user_mode() is invoked to
+ * establish the proper context for NOHZ_FULL. Otherwise scheduling on exit
+ * would not be possible.
+ *
+ * Returns: An opaque object that must be passed to idtentry_exit()
+ */
+irqentry_state_t noinstr irqentry_enter(struct pt_regs *regs);
+
+/**
+ * irqentry_exit_cond_resched - Conditionally reschedule on return from interrupt
+ *
+ * Conditional reschedule with additional sanity checks.
+ */
+void raw_irqentry_exit_cond_resched(void);
+
+#ifdef CONFIG_PREEMPT_DYNAMIC
+#if defined(CONFIG_HAVE_PREEMPT_DYNAMIC_CALL)
+#define irqentry_exit_cond_resched_dynamic_enabled raw_irqentry_exit_cond_resched
+#define irqentry_exit_cond_resched_dynamic_disabled NULL
+DECLARE_STATIC_CALL(irqentry_exit_cond_resched, raw_irqentry_exit_cond_resched);
+#define irqentry_exit_cond_resched() static_call(irqentry_exit_cond_resched)()
+#elif defined(CONFIG_HAVE_PREEMPT_DYNAMIC_KEY)
+DECLARE_STATIC_KEY_TRUE(sk_dynamic_irqentry_exit_cond_resched);
+void dynamic_irqentry_exit_cond_resched(void);
+#define irqentry_exit_cond_resched() dynamic_irqentry_exit_cond_resched()
+#endif
+#else /* CONFIG_PREEMPT_DYNAMIC */
+#define irqentry_exit_cond_resched() raw_irqentry_exit_cond_resched()
+#endif /* CONFIG_PREEMPT_DYNAMIC */
+
+/**
+ * irqentry_exit - Handle return from exception that used irqentry_enter()
+ * @regs: Pointer to pt_regs (exception entry regs)
+ * @state: Return value from matching call to irqentry_enter()
+ *
+ * Depending on the return target (kernel/user) this runs the necessary
+ * preemption and work checks if possible and required and returns to
+ * the caller with interrupts disabled and no further work pending.
+ *
+ * This is the last action before returning to the low level ASM code which
+ * just needs to return to the appropriate context.
+ *
+ * Counterpart to irqentry_enter().
+ */
+void noinstr irqentry_exit(struct pt_regs *regs, irqentry_state_t state);
+
+/**
+ * irqentry_nmi_enter - Handle NMI entry
+ * @regs: Pointer to currents pt_regs
+ *
+ * Similar to irqentry_enter() but taking care of the NMI constraints.
+ */
+irqentry_state_t noinstr irqentry_nmi_enter(struct pt_regs *regs);
+
+/**
+ * irqentry_nmi_exit - Handle return from NMI handling
+ * @regs: Pointer to pt_regs (NMI entry regs)
+ * @irq_state: Return value from matching call to irqentry_nmi_enter()
+ *
+ * Last action before returning to the low level assembly code.
+ *
+ * Counterpart to irqentry_nmi_enter().
+ */
+void noinstr irqentry_nmi_exit(struct pt_regs *regs, irqentry_state_t irq_state);
+
+#endif
diff --git a/include/linux/irq.h b/include/linux/irq.h
index b99a784635ff..4a9f1d7b08c3 100644
--- a/include/linux/irq.h
+++ b/include/linux/irq.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_IRQ_H
#define _LINUX_IRQ_H
@@ -9,18 +10,13 @@
* Thanks. --rmk
*/
-#include <linux/smp.h>
-#include <linux/linkage.h>
#include <linux/cache.h>
#include <linux/spinlock.h>
#include <linux/cpumask.h>
-#include <linux/gfp.h>
#include <linux/irqhandler.h>
#include <linux/irqreturn.h>
#include <linux/irqnr.h>
-#include <linux/errno.h>
#include <linux/topology.h>
-#include <linux/wait.h>
#include <linux/io.h>
#include <linux/slab.h>
@@ -31,6 +27,7 @@
struct seq_file;
struct module;
struct msi_msg;
+struct irq_affinity_desc;
enum irqchip_irq_state;
/*
@@ -67,13 +64,14 @@ enum irqchip_irq_state;
* IRQ_NOAUTOEN - Interrupt is not automatically enabled in
* request/setup_irq()
* IRQ_NO_BALANCING - Interrupt cannot be balanced (affinity set)
- * IRQ_MOVE_PCNTXT - Interrupt can be migrated from process context
* IRQ_NESTED_THREAD - Interrupt nests into another thread
* IRQ_PER_CPU_DEVID - Dev_id is a per-cpu variable
* IRQ_IS_POLLED - Always polled by another interrupt. Exclude
* it from the spurious interrupt detection
* mechanism and from core side polling.
* IRQ_DISABLE_UNLAZY - Disable lazy irq disable
+ * IRQ_HIDDEN - Don't show up in /proc/interrupts
+ * IRQ_NO_DEBUG - Exclude from note_interrupt() debugging
*/
enum {
IRQ_TYPE_NONE = 0x00000000,
@@ -94,19 +92,20 @@ enum {
IRQ_NOREQUEST = (1 << 11),
IRQ_NOAUTOEN = (1 << 12),
IRQ_NO_BALANCING = (1 << 13),
- IRQ_MOVE_PCNTXT = (1 << 14),
IRQ_NESTED_THREAD = (1 << 15),
IRQ_NOTHREAD = (1 << 16),
IRQ_PER_CPU_DEVID = (1 << 17),
IRQ_IS_POLLED = (1 << 18),
IRQ_DISABLE_UNLAZY = (1 << 19),
+ IRQ_HIDDEN = (1 << 20),
+ IRQ_NO_DEBUG = (1 << 21),
};
#define IRQF_MODIFY_MASK \
(IRQ_TYPE_SENSE_MASK | IRQ_NOPROBE | IRQ_NOREQUEST | \
- IRQ_NOAUTOEN | IRQ_MOVE_PCNTXT | IRQ_LEVEL | IRQ_NO_BALANCING | \
+ IRQ_NOAUTOEN | IRQ_LEVEL | IRQ_NO_BALANCING | \
IRQ_PER_CPU | IRQ_NESTED_THREAD | IRQ_NOTHREAD | IRQ_PER_CPU_DEVID | \
- IRQ_IS_POLLED | IRQ_DISABLE_UNLAZY)
+ IRQ_IS_POLLED | IRQ_DISABLE_UNLAZY | IRQ_HIDDEN)
#define IRQ_NO_BALANCING_MASK (IRQ_PER_CPU | IRQ_NO_BALANCING)
@@ -114,10 +113,10 @@ enum {
* Return value for chip->irq_set_affinity()
*
* IRQ_SET_MASK_OK - OK, core updates irq_common_data.affinity
- * IRQ_SET_MASK_NOCPY - OK, chip did update irq_common_data.affinity
+ * IRQ_SET_MASK_NOCOPY - OK, chip did update irq_common_data.affinity
* IRQ_SET_MASK_OK_DONE - Same as IRQ_SET_MASK_OK for core. Special code to
* support stacked irqchips, which indicates skipping
- * all descendent irqchips.
+ * all descendant irqchips.
*/
enum {
IRQ_SET_MASK_OK = 0,
@@ -150,7 +149,9 @@ struct irq_common_data {
#endif
void *handler_data;
struct msi_desc *msi_desc;
+#ifdef CONFIG_SMP
cpumask_var_t affinity;
+#endif
#ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK
cpumask_var_t effective_affinity;
#endif
@@ -176,7 +177,7 @@ struct irq_common_data {
struct irq_data {
u32 mask;
unsigned int irq;
- unsigned long hwirq;
+ irq_hw_number_t hwirq;
struct irq_common_data *common;
struct irq_chip *chip;
struct irq_domain *domain;
@@ -198,8 +199,6 @@ struct irq_data {
* IRQD_LEVEL - Interrupt is level triggered
* IRQD_WAKEUP_STATE - Interrupt is configured for wakeup
* from suspend
- * IRDQ_MOVE_PCNTXT - Interrupt can be moved in process
- * context
* IRQD_IRQ_DISABLED - Disabled state of the interrupt
* IRQD_IRQ_MASKED - Masked state of the interrupt
* IRQD_IRQ_INPROGRESS - In progress state of the interrupt
@@ -210,26 +209,41 @@ struct irq_data {
* IRQD_MANAGED_SHUTDOWN - Interrupt was shutdown due to empty affinity
* mask. Applies only to affinity managed irqs.
* IRQD_SINGLE_TARGET - IRQ allows only a single affinity target
+ * IRQD_DEFAULT_TRIGGER_SET - Expected trigger already been set
+ * IRQD_CAN_RESERVE - Can use reservation mode
+ * IRQD_HANDLE_ENFORCE_IRQCTX - Enforce that handle_irq_*() is only invoked
+ * from actual interrupt context.
+ * IRQD_AFFINITY_ON_ACTIVATE - Affinity is set on activation. Don't call
+ * irq_chip::irq_set_affinity() when deactivated.
+ * IRQD_IRQ_ENABLED_ON_SUSPEND - Interrupt is enabled on suspend by irq pm if
+ * irqchip have flag IRQCHIP_ENABLE_WAKEUP_ON_SUSPEND set.
+ * IRQD_RESEND_WHEN_IN_PROGRESS - Interrupt may fire when already in progress in which
+ * case it must be resent at the next available opportunity.
*/
enum {
IRQD_TRIGGER_MASK = 0xf,
- IRQD_SETAFFINITY_PENDING = (1 << 8),
- IRQD_ACTIVATED = (1 << 9),
- IRQD_NO_BALANCING = (1 << 10),
- IRQD_PER_CPU = (1 << 11),
- IRQD_AFFINITY_SET = (1 << 12),
- IRQD_LEVEL = (1 << 13),
- IRQD_WAKEUP_STATE = (1 << 14),
- IRQD_MOVE_PCNTXT = (1 << 15),
- IRQD_IRQ_DISABLED = (1 << 16),
- IRQD_IRQ_MASKED = (1 << 17),
- IRQD_IRQ_INPROGRESS = (1 << 18),
- IRQD_WAKEUP_ARMED = (1 << 19),
- IRQD_FORWARDED_TO_VCPU = (1 << 20),
- IRQD_AFFINITY_MANAGED = (1 << 21),
- IRQD_IRQ_STARTED = (1 << 22),
- IRQD_MANAGED_SHUTDOWN = (1 << 23),
- IRQD_SINGLE_TARGET = (1 << 24),
+ IRQD_SETAFFINITY_PENDING = BIT(8),
+ IRQD_ACTIVATED = BIT(9),
+ IRQD_NO_BALANCING = BIT(10),
+ IRQD_PER_CPU = BIT(11),
+ IRQD_AFFINITY_SET = BIT(12),
+ IRQD_LEVEL = BIT(13),
+ IRQD_WAKEUP_STATE = BIT(14),
+ IRQD_IRQ_DISABLED = BIT(16),
+ IRQD_IRQ_MASKED = BIT(17),
+ IRQD_IRQ_INPROGRESS = BIT(18),
+ IRQD_WAKEUP_ARMED = BIT(19),
+ IRQD_FORWARDED_TO_VCPU = BIT(20),
+ IRQD_AFFINITY_MANAGED = BIT(21),
+ IRQD_IRQ_STARTED = BIT(22),
+ IRQD_MANAGED_SHUTDOWN = BIT(23),
+ IRQD_SINGLE_TARGET = BIT(24),
+ IRQD_DEFAULT_TRIGGER_SET = BIT(25),
+ IRQD_CAN_RESERVE = BIT(26),
+ IRQD_HANDLE_ENFORCE_IRQCTX = BIT(27),
+ IRQD_AFFINITY_ON_ACTIVATE = BIT(28),
+ IRQD_IRQ_ENABLED_ON_SUSPEND = BIT(29),
+ IRQD_RESEND_WHEN_IN_PROGRESS = BIT(30),
};
#define __irqd_to_state(d) ACCESS_PRIVATE((d)->common, state_use_accessors)
@@ -259,18 +273,25 @@ static inline void irqd_mark_affinity_was_set(struct irq_data *d)
__irqd_to_state(d) |= IRQD_AFFINITY_SET;
}
+static inline bool irqd_trigger_type_was_set(struct irq_data *d)
+{
+ return __irqd_to_state(d) & IRQD_DEFAULT_TRIGGER_SET;
+}
+
static inline u32 irqd_get_trigger_type(struct irq_data *d)
{
return __irqd_to_state(d) & IRQD_TRIGGER_MASK;
}
/*
- * Must only be called inside irq_chip.irq_set_type() functions.
+ * Must only be called inside irq_chip.irq_set_type() functions or
+ * from the DT/ACPI setup code.
*/
static inline void irqd_set_trigger_type(struct irq_data *d, u32 type)
{
__irqd_to_state(d) &= ~IRQD_TRIGGER_MASK;
__irqd_to_state(d) |= type & IRQD_TRIGGER_MASK;
+ __irqd_to_state(d) |= IRQD_DEFAULT_TRIGGER_SET;
}
static inline bool irqd_is_level_type(struct irq_data *d)
@@ -280,7 +301,7 @@ static inline bool irqd_is_level_type(struct irq_data *d)
/*
* Must only be called of irqchip.irq_set_affinity() or low level
- * hieararchy domain allocation functions.
+ * hierarchy domain allocation functions.
*/
static inline void irqd_set_single_target(struct irq_data *d)
{
@@ -292,14 +313,24 @@ static inline bool irqd_is_single_target(struct irq_data *d)
return __irqd_to_state(d) & IRQD_SINGLE_TARGET;
}
-static inline bool irqd_is_wakeup_set(struct irq_data *d)
+static inline void irqd_set_handle_enforce_irqctx(struct irq_data *d)
{
- return __irqd_to_state(d) & IRQD_WAKEUP_STATE;
+ __irqd_to_state(d) |= IRQD_HANDLE_ENFORCE_IRQCTX;
}
-static inline bool irqd_can_move_in_process_context(struct irq_data *d)
+static inline bool irqd_is_handle_enforce_irqctx(struct irq_data *d)
{
- return __irqd_to_state(d) & IRQD_MOVE_PCNTXT;
+ return __irqd_to_state(d) & IRQD_HANDLE_ENFORCE_IRQCTX;
+}
+
+static inline bool irqd_is_enabled_on_suspend(struct irq_data *d)
+{
+ return __irqd_to_state(d) & IRQD_IRQ_ENABLED_ON_SUSPEND;
+}
+
+static inline bool irqd_is_wakeup_set(struct irq_data *d)
+{
+ return __irqd_to_state(d) & IRQD_WAKEUP_STATE;
}
static inline bool irqd_irq_disabled(struct irq_data *d)
@@ -367,6 +398,41 @@ static inline bool irqd_is_managed_and_shutdown(struct irq_data *d)
return __irqd_to_state(d) & IRQD_MANAGED_SHUTDOWN;
}
+static inline void irqd_set_can_reserve(struct irq_data *d)
+{
+ __irqd_to_state(d) |= IRQD_CAN_RESERVE;
+}
+
+static inline void irqd_clr_can_reserve(struct irq_data *d)
+{
+ __irqd_to_state(d) &= ~IRQD_CAN_RESERVE;
+}
+
+static inline bool irqd_can_reserve(struct irq_data *d)
+{
+ return __irqd_to_state(d) & IRQD_CAN_RESERVE;
+}
+
+static inline void irqd_set_affinity_on_activate(struct irq_data *d)
+{
+ __irqd_to_state(d) |= IRQD_AFFINITY_ON_ACTIVATE;
+}
+
+static inline bool irqd_affinity_on_activate(struct irq_data *d)
+{
+ return __irqd_to_state(d) & IRQD_AFFINITY_ON_ACTIVATE;
+}
+
+static inline void irqd_set_resend_when_in_progress(struct irq_data *d)
+{
+ __irqd_to_state(d) |= IRQD_RESEND_WHEN_IN_PROGRESS;
+}
+
+static inline bool irqd_needs_resend_when_in_progress(struct irq_data *d)
+{
+ return __irqd_to_state(d) & IRQD_RESEND_WHEN_IN_PROGRESS;
+}
+
#undef __irqd_to_state
static inline irq_hw_number_t irqd_to_hwirq(struct irq_data *d)
@@ -377,7 +443,6 @@ static inline irq_hw_number_t irqd_to_hwirq(struct irq_data *d)
/**
* struct irq_chip - hardware interrupt chip descriptor
*
- * @parent_device: pointer to parent device for irqchip
* @name: name for /proc/interrupts
* @irq_startup: start up the interrupt (defaults to ->enable if NULL)
* @irq_shutdown: shut down the interrupt (defaults to ->disable if NULL)
@@ -419,10 +484,12 @@ static inline irq_hw_number_t irqd_to_hwirq(struct irq_data *d)
* @irq_set_vcpu_affinity: optional to target a vCPU in a virtual machine
* @ipi_send_single: send a single IPI to destination cpus
* @ipi_send_mask: send an IPI to destination cpus in cpumask
+ * @irq_nmi_setup: function called from core code before enabling an NMI
+ * @irq_nmi_teardown: function called from core code after disabling an NMI
+ * @irq_force_complete_move: optional function to force complete pending irq move
* @flags: chip specific flags
*/
struct irq_chip {
- struct device *parent_device;
const char *name;
unsigned int (*irq_startup)(struct irq_data *data);
void (*irq_shutdown)(struct irq_data *data);
@@ -443,9 +510,10 @@ struct irq_chip {
void (*irq_bus_lock)(struct irq_data *data);
void (*irq_bus_sync_unlock)(struct irq_data *data);
+#ifdef CONFIG_DEPRECATED_IRQ_CPU_ONOFFLINE
void (*irq_cpu_online)(struct irq_data *data);
void (*irq_cpu_offline)(struct irq_data *data);
-
+#endif
void (*irq_suspend)(struct irq_data *data);
void (*irq_resume)(struct irq_data *data);
void (*irq_pm_shutdown)(struct irq_data *data);
@@ -467,29 +535,47 @@ struct irq_chip {
void (*ipi_send_single)(struct irq_data *data, unsigned int cpu);
void (*ipi_send_mask)(struct irq_data *data, const struct cpumask *dest);
+ int (*irq_nmi_setup)(struct irq_data *data);
+ void (*irq_nmi_teardown)(struct irq_data *data);
+
+ void (*irq_force_complete_move)(struct irq_data *data);
+
unsigned long flags;
};
/*
* irq_chip specific flags
*
- * IRQCHIP_SET_TYPE_MASKED: Mask before calling chip.irq_set_type()
- * IRQCHIP_EOI_IF_HANDLED: Only issue irq_eoi() when irq was handled
- * IRQCHIP_MASK_ON_SUSPEND: Mask non wake irqs in the suspend path
- * IRQCHIP_ONOFFLINE_ENABLED: Only call irq_on/off_line callbacks
- * when irq enabled
- * IRQCHIP_SKIP_SET_WAKE: Skip chip.irq_set_wake(), for this irq chip
- * IRQCHIP_ONESHOT_SAFE: One shot does not require mask/unmask
- * IRQCHIP_EOI_THREADED: Chip requires eoi() on unmask in threaded mode
+ * IRQCHIP_SET_TYPE_MASKED: Mask before calling chip.irq_set_type()
+ * IRQCHIP_EOI_IF_HANDLED: Only issue irq_eoi() when irq was handled
+ * IRQCHIP_MASK_ON_SUSPEND: Mask non wake irqs in the suspend path
+ * IRQCHIP_ONOFFLINE_ENABLED: Only call irq_on/off_line callbacks
+ * when irq enabled
+ * IRQCHIP_SKIP_SET_WAKE: Skip chip.irq_set_wake(), for this irq chip
+ * IRQCHIP_ONESHOT_SAFE: One shot does not require mask/unmask
+ * IRQCHIP_EOI_THREADED: Chip requires eoi() on unmask in threaded mode
+ * IRQCHIP_SUPPORTS_LEVEL_MSI: Chip can provide two doorbells for Level MSIs
+ * IRQCHIP_SUPPORTS_NMI: Chip can deliver NMIs, only for root irqchips
+ * IRQCHIP_ENABLE_WAKEUP_ON_SUSPEND: Invokes __enable_irq()/__disable_irq() for wake irqs
+ * in the suspend path if they are in disabled state
+ * IRQCHIP_AFFINITY_PRE_STARTUP: Default affinity update before startup
+ * IRQCHIP_IMMUTABLE: Don't ever change anything in this chip
+ * IRQCHIP_MOVE_DEFERRED: Move the interrupt in actual interrupt context
*/
enum {
- IRQCHIP_SET_TYPE_MASKED = (1 << 0),
- IRQCHIP_EOI_IF_HANDLED = (1 << 1),
- IRQCHIP_MASK_ON_SUSPEND = (1 << 2),
- IRQCHIP_ONOFFLINE_ENABLED = (1 << 3),
- IRQCHIP_SKIP_SET_WAKE = (1 << 4),
- IRQCHIP_ONESHOT_SAFE = (1 << 5),
- IRQCHIP_EOI_THREADED = (1 << 6),
+ IRQCHIP_SET_TYPE_MASKED = (1 << 0),
+ IRQCHIP_EOI_IF_HANDLED = (1 << 1),
+ IRQCHIP_MASK_ON_SUSPEND = (1 << 2),
+ IRQCHIP_ONOFFLINE_ENABLED = (1 << 3),
+ IRQCHIP_SKIP_SET_WAKE = (1 << 4),
+ IRQCHIP_ONESHOT_SAFE = (1 << 5),
+ IRQCHIP_EOI_THREADED = (1 << 6),
+ IRQCHIP_SUPPORTS_LEVEL_MSI = (1 << 7),
+ IRQCHIP_SUPPORTS_NMI = (1 << 8),
+ IRQCHIP_ENABLE_WAKEUP_ON_SUSPEND = (1 << 9),
+ IRQCHIP_AFFINITY_PRE_STARTUP = (1 << 10),
+ IRQCHIP_IMMUTABLE = (1 << 11),
+ IRQCHIP_MOVE_DEFERRED = (1 << 12),
};
#include <linux/irqdesc.h>
@@ -510,13 +596,12 @@ enum {
#define IRQ_DEFAULT_INIT_FLAGS ARCH_IRQ_INIT_FLAGS
struct irqaction;
-extern int setup_irq(unsigned int irq, struct irqaction *new);
-extern void remove_irq(unsigned int irq, struct irqaction *act);
extern int setup_percpu_irq(unsigned int irq, struct irqaction *new);
-extern void remove_percpu_irq(unsigned int irq, struct irqaction *act);
+#ifdef CONFIG_DEPRECATED_IRQ_CPU_ONOFFLINE
extern void irq_cpu_online(void);
extern void irq_cpu_offline(void);
+#endif
extern int irq_set_affinity_locked(struct irq_data *data,
const struct cpumask *cpumask, bool force);
extern int irq_set_vcpu_affinity(unsigned int irq, void *vcpu_info);
@@ -529,13 +614,18 @@ extern int irq_affinity_online_cpu(unsigned int cpu);
#endif
#if defined(CONFIG_SMP) && defined(CONFIG_GENERIC_PENDING_IRQ)
-void irq_move_irq(struct irq_data *data);
+bool irq_can_move_in_process_context(struct irq_data *data);
+void __irq_move_irq(struct irq_data *data);
+static inline void irq_move_irq(struct irq_data *data)
+{
+ if (unlikely(irqd_is_setaffinity_pending(data)))
+ __irq_move_irq(data);
+}
void irq_move_masked_irq(struct irq_data *data);
-void irq_force_complete_move(struct irq_desc *desc);
#else
+static inline bool irq_can_move_in_process_context(struct irq_data *data) { return true; }
static inline void irq_move_irq(struct irq_data *data) { }
static inline void irq_move_masked_irq(struct irq_data *data) { }
-static inline void irq_force_complete_move(struct irq_desc *desc) { }
#endif
extern int no_irq_affinity;
@@ -564,17 +654,28 @@ extern void handle_percpu_devid_irq(struct irq_desc *desc);
extern void handle_bad_irq(struct irq_desc *desc);
extern void handle_nested_irq(unsigned int irq);
+extern void handle_fasteoi_nmi(struct irq_desc *desc);
+
extern int irq_chip_compose_msi_msg(struct irq_data *data, struct msi_msg *msg);
extern int irq_chip_pm_get(struct irq_data *data);
extern int irq_chip_pm_put(struct irq_data *data);
#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
extern void handle_fasteoi_ack_irq(struct irq_desc *desc);
extern void handle_fasteoi_mask_irq(struct irq_desc *desc);
+extern int irq_chip_set_parent_state(struct irq_data *data,
+ enum irqchip_irq_state which,
+ bool val);
+extern int irq_chip_get_parent_state(struct irq_data *data,
+ enum irqchip_irq_state which,
+ bool *state);
+extern void irq_chip_shutdown_parent(struct irq_data *data);
+extern unsigned int irq_chip_startup_parent(struct irq_data *data);
extern void irq_chip_enable_parent(struct irq_data *data);
extern void irq_chip_disable_parent(struct irq_data *data);
extern void irq_chip_ack_parent(struct irq_data *data);
extern int irq_chip_retrigger_hierarchy(struct irq_data *data);
extern void irq_chip_mask_parent(struct irq_data *data);
+extern void irq_chip_mask_ack_parent(struct irq_data *data);
extern void irq_chip_unmask_parent(struct irq_data *data);
extern void irq_chip_eoi_parent(struct irq_data *data);
extern int irq_chip_set_affinity_parent(struct irq_data *data,
@@ -584,8 +685,13 @@ extern int irq_chip_set_wake_parent(struct irq_data *data, unsigned int on);
extern int irq_chip_set_vcpu_affinity_parent(struct irq_data *data,
void *vcpu_info);
extern int irq_chip_set_type_parent(struct irq_data *data, unsigned int type);
+extern int irq_chip_request_resources_parent(struct irq_data *data);
+extern void irq_chip_release_resources_parent(struct irq_data *data);
#endif
+/* Disable or mask interrupts during a kernel kexec */
+extern void machine_kexec_mask_interrupts(void);
+
/* Handling of unhandled and spurious interrupts: */
extern void note_interrupt(struct irq_desc *desc, irqreturn_t action_ret);
@@ -594,27 +700,24 @@ extern void note_interrupt(struct irq_desc *desc, irqreturn_t action_ret);
extern int noirqdebug_setup(char *str);
/* Checks whether the interrupt can be requested by request_irq(): */
-extern int can_request_irq(unsigned int irq, unsigned long irqflags);
+extern bool can_request_irq(unsigned int irq, unsigned long irqflags);
/* Dummy irq-chip implementations: */
extern struct irq_chip no_irq_chip;
extern struct irq_chip dummy_irq_chip;
extern void
-irq_set_chip_and_handler_name(unsigned int irq, struct irq_chip *chip,
+irq_set_chip_and_handler_name(unsigned int irq, const struct irq_chip *chip,
irq_flow_handler_t handle, const char *name);
-static inline void irq_set_chip_and_handler(unsigned int irq, struct irq_chip *chip,
+static inline void irq_set_chip_and_handler(unsigned int irq,
+ const struct irq_chip *chip,
irq_flow_handler_t handle)
{
irq_set_chip_and_handler_name(irq, chip, handle, NULL);
}
extern int irq_set_percpu_devid(unsigned int irq);
-extern int irq_set_percpu_devid_partition(unsigned int irq,
- const struct cpumask *affinity);
-extern int irq_get_percpu_devid_partition(unsigned int irq,
- struct cpumask *affinity);
extern void
__irq_set_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained,
@@ -694,7 +797,7 @@ static inline void irq_set_percpu_devid_flags(unsigned int irq)
}
/* Set/get chip/data for an IRQ: */
-extern int irq_set_chip(unsigned int irq, struct irq_chip *chip);
+extern int irq_set_chip(unsigned int irq, const struct irq_chip *chip);
extern int irq_set_handler_data(unsigned int irq, void *data);
extern int irq_set_chip_data(unsigned int irq, void *data);
extern int irq_set_irq_type(unsigned int irq, unsigned int type);
@@ -767,26 +870,36 @@ static inline int irq_data_get_node(struct irq_data *d)
return irq_common_data_get_node(d->common);
}
-static inline struct cpumask *irq_get_affinity_mask(int irq)
+static inline
+const struct cpumask *irq_data_get_affinity_mask(struct irq_data *d)
{
- struct irq_data *d = irq_get_irq_data(irq);
+#ifdef CONFIG_SMP
+ return d->common->affinity;
+#else
+ return cpumask_of(0);
+#endif
+}
- return d ? d->common->affinity : NULL;
+static inline void irq_data_update_affinity(struct irq_data *d,
+ const struct cpumask *m)
+{
+#ifdef CONFIG_SMP
+ cpumask_copy(d->common->affinity, m);
+#endif
}
-static inline struct cpumask *irq_data_get_affinity_mask(struct irq_data *d)
+static inline const struct cpumask *irq_get_affinity_mask(int irq)
{
- return d->common->affinity;
+ struct irq_data *d = irq_get_irq_data(irq);
+
+ return d ? irq_data_get_affinity_mask(d) : NULL;
}
#ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK
static inline
-struct cpumask *irq_data_get_effective_affinity_mask(struct irq_data *d)
+const struct cpumask *irq_data_get_effective_affinity_mask(struct irq_data *d)
{
- if (!cpumask_empty(d->common->effective_affinity))
- return d->common->effective_affinity;
-
- return d->common->affinity;
+ return d->common->effective_affinity;
}
static inline void irq_data_update_effective_affinity(struct irq_data *d,
const struct cpumask *m)
@@ -799,27 +912,36 @@ static inline void irq_data_update_effective_affinity(struct irq_data *d,
{
}
static inline
-struct cpumask *irq_data_get_effective_affinity_mask(struct irq_data *d)
+const struct cpumask *irq_data_get_effective_affinity_mask(struct irq_data *d)
{
- return d->common->affinity;
+ return irq_data_get_affinity_mask(d);
}
#endif
+static inline
+const struct cpumask *irq_get_effective_affinity_mask(unsigned int irq)
+{
+ struct irq_data *d = irq_get_irq_data(irq);
+
+ return d ? irq_data_get_effective_affinity_mask(d) : NULL;
+}
+
unsigned int arch_dynirq_lower_bound(unsigned int from);
int __irq_alloc_descs(int irq, unsigned int from, unsigned int cnt, int node,
- struct module *owner, const struct cpumask *affinity);
+ struct module *owner,
+ const struct irq_affinity_desc *affinity);
int __devm_irq_alloc_descs(struct device *dev, int irq, unsigned int from,
unsigned int cnt, int node, struct module *owner,
- const struct cpumask *affinity);
+ const struct irq_affinity_desc *affinity);
/* use macros to avoid needing export.h for THIS_MODULE */
#define irq_alloc_descs(irq, from, cnt, node) \
__irq_alloc_descs(irq, from, cnt, node, THIS_MODULE, NULL)
#define irq_alloc_desc(node) \
- irq_alloc_descs(-1, 0, 1, node)
+ irq_alloc_descs(-1, 1, 1, node)
#define irq_alloc_desc_at(at, node) \
irq_alloc_descs(at, at, 1, node)
@@ -834,7 +956,7 @@ int __devm_irq_alloc_descs(struct device *dev, int irq, unsigned int from,
__devm_irq_alloc_descs(dev, irq, from, cnt, node, THIS_MODULE, NULL)
#define devm_irq_alloc_desc(dev, node) \
- devm_irq_alloc_descs(dev, -1, 0, 1, node)
+ devm_irq_alloc_descs(dev, -1, 1, 1, node)
#define devm_irq_alloc_desc_at(dev, at, node) \
devm_irq_alloc_descs(dev, at, at, 1, node)
@@ -851,25 +973,6 @@ static inline void irq_free_desc(unsigned int irq)
irq_free_descs(irq, 1);
}
-#ifdef CONFIG_GENERIC_IRQ_LEGACY_ALLOC_HWIRQ
-unsigned int irq_alloc_hwirqs(int cnt, int node);
-static inline unsigned int irq_alloc_hwirq(int node)
-{
- return irq_alloc_hwirqs(1, node);
-}
-void irq_free_hwirqs(unsigned int from, int cnt);
-static inline void irq_free_hwirq(unsigned int irq)
-{
- return irq_free_hwirqs(irq, 1);
-}
-int arch_setup_hwirq(unsigned int irq, int node);
-void arch_teardown_hwirq(unsigned int irq);
-#endif
-
-#ifdef CONFIG_GENERIC_IRQ_LEGACY
-void irq_init_desc(unsigned int irq);
-#endif
-
/**
* struct irq_chip_regs - register offsets for struct irq_gci
* @enable: Enable register offset to reg_base
@@ -878,7 +981,6 @@ void irq_init_desc(unsigned int irq);
* @ack: Ack register offset to reg_base
* @eoi: Eoi register offset to reg_base
* @type: Type configuration register offset to reg_base
- * @polarity: Polarity configuration register offset to reg_base
*/
struct irq_chip_regs {
unsigned long enable;
@@ -887,7 +989,6 @@ struct irq_chip_regs {
unsigned long ack;
unsigned long eoi;
unsigned long type;
- unsigned long polarity;
};
/**
@@ -927,8 +1028,6 @@ struct irq_chip_type {
* @irq_base: Interrupt base nr for this chip
* @irq_cnt: Number of interrupts handled by this chip
* @mask_cache: Cached mask register shared between all chip types
- * @type_cache: Cached type register
- * @polarity_cache: Cached polarity register
* @wake_enabled: Interrupt can wakeup from suspend
* @wake_active: Interrupt is marked as an wakeup from suspend source
* @num_ct: Number of available irq_chip_type instances (usually 1)
@@ -955,8 +1054,6 @@ struct irq_chip_generic {
unsigned int irq_base;
unsigned int irq_cnt;
u32 mask_cache;
- u32 type_cache;
- u32 polarity_cache;
u32 wake_enabled;
u32 wake_active;
unsigned int num_ct;
@@ -965,7 +1062,7 @@ struct irq_chip_generic {
unsigned long unused;
struct irq_domain *domain;
struct list_head list;
- struct irq_chip_type chip_types[0];
+ struct irq_chip_type chip_types[];
};
/**
@@ -993,6 +1090,7 @@ enum irq_gc_flags {
* @irq_flags_to_set: IRQ* flags to set on irq setup
* @irq_flags_to_clear: IRQ* flags to clear on irq setup
* @gc_flags: Generic chip specific setup flags
+ * @exit: Function called on each chip when they are destroyed.
* @gc: Array of pointers to generic interrupt chips
*/
struct irq_domain_chip_generic {
@@ -1001,7 +1099,35 @@ struct irq_domain_chip_generic {
unsigned int irq_flags_to_clear;
unsigned int irq_flags_to_set;
enum irq_gc_flags gc_flags;
- struct irq_chip_generic *gc[0];
+ void (*exit)(struct irq_chip_generic *gc);
+ struct irq_chip_generic *gc[];
+};
+
+/**
+ * struct irq_domain_chip_generic_info - Generic chip information structure
+ * @name: Name of the generic interrupt chip
+ * @handler: Interrupt handler used by the generic interrupt chip
+ * @irqs_per_chip: Number of interrupts each chip handles (max 32)
+ * @num_ct: Number of irq_chip_type instances associated with each
+ * chip
+ * @irq_flags_to_clear: IRQ_* bits to clear in the mapping function
+ * @irq_flags_to_set: IRQ_* bits to set in the mapping function
+ * @gc_flags: Generic chip specific setup flags
+ * @init: Function called on each chip when they are created.
+ * Allow to do some additional chip initialisation.
+ * @exit: Function called on each chip when they are destroyed.
+ * Allow to do some chip cleanup operation.
+ */
+struct irq_domain_chip_generic_info {
+ const char *name;
+ irq_flow_handler_t handler;
+ unsigned int irqs_per_chip;
+ unsigned int num_ct;
+ unsigned int irq_flags_to_clear;
+ unsigned int irq_flags_to_set;
+ enum irq_gc_flags gc_flags;
+ int (*init)(struct irq_chip_generic *gc);
+ void (*exit)(struct irq_chip_generic *gc);
};
/* Generic chip callback functions */
@@ -1012,13 +1138,14 @@ void irq_gc_mask_clr_bit(struct irq_data *d);
void irq_gc_unmask_enable_reg(struct irq_data *d);
void irq_gc_ack_set_bit(struct irq_data *d);
void irq_gc_ack_clr_bit(struct irq_data *d);
-void irq_gc_mask_disable_reg_and_ack(struct irq_data *d);
+void irq_gc_mask_disable_and_ack_set(struct irq_data *d);
void irq_gc_eoi(struct irq_data *d);
int irq_gc_set_wake(struct irq_data *d, unsigned int on);
/* Setup functions for irq_chip_generic */
int irq_map_generic_chip(struct irq_domain *d, unsigned int virq,
irq_hw_number_t hw_irq);
+void irq_unmap_generic_chip(struct irq_domain *d, unsigned int virq);
struct irq_chip_generic *
irq_alloc_generic_chip(const char *name, int nr_ct, unsigned int irq_base,
void __iomem *reg_base, irq_flow_handler_t handler);
@@ -1039,6 +1166,20 @@ int devm_irq_setup_generic_chip(struct device *dev, struct irq_chip_generic *gc,
struct irq_chip_generic *irq_get_domain_generic_chip(struct irq_domain *d, unsigned int hw_irq);
+#ifdef CONFIG_GENERIC_IRQ_CHIP
+int irq_domain_alloc_generic_chips(struct irq_domain *d,
+ const struct irq_domain_chip_generic_info *info);
+void irq_domain_remove_generic_chips(struct irq_domain *d);
+#else
+static inline int
+irq_domain_alloc_generic_chips(struct irq_domain *d,
+ const struct irq_domain_chip_generic_info *info)
+{
+ return -EINVAL;
+}
+static inline void irq_domain_remove_generic_chips(struct irq_domain *d) { }
+#endif /* CONFIG_GENERIC_IRQ_CHIP */
+
int __irq_alloc_domain_generic_chips(struct irq_domain *d, int irqs_per_chip,
int num_ct, const char *name,
irq_flow_handler_t handler,
@@ -1073,31 +1214,6 @@ static inline struct irq_chip_type *irq_data_get_chip_type(struct irq_data *d)
#define IRQ_MSK(n) (u32)((n) < 32 ? ((1 << (n)) - 1) : UINT_MAX)
-#ifdef CONFIG_SMP
-static inline void irq_gc_lock(struct irq_chip_generic *gc)
-{
- raw_spin_lock(&gc->lock);
-}
-
-static inline void irq_gc_unlock(struct irq_chip_generic *gc)
-{
- raw_spin_unlock(&gc->lock);
-}
-#else
-static inline void irq_gc_lock(struct irq_chip_generic *gc) { }
-static inline void irq_gc_unlock(struct irq_chip_generic *gc) { }
-#endif
-
-/*
- * The irqsave variants are for usage in non interrupt code. Do not use
- * them in irq_chip callbacks. Use irq_gc_lock() instead.
- */
-#define irq_gc_lock_irqsave(gc, flags) \
- raw_spin_lock_irqsave(&(gc)->lock, flags)
-
-#define irq_gc_unlock_irqrestore(gc, flags) \
- raw_spin_unlock_irqrestore(&(gc)->lock, flags)
-
static inline void irq_reg_writel(struct irq_chip_generic *gc,
u32 val, int reg_offset)
{
@@ -1116,6 +1232,29 @@ static inline u32 irq_reg_readl(struct irq_chip_generic *gc,
return readl(gc->reg_base + reg_offset);
}
+struct irq_matrix;
+struct irq_matrix *irq_alloc_matrix(unsigned int matrix_bits,
+ unsigned int alloc_start,
+ unsigned int alloc_end);
+void irq_matrix_online(struct irq_matrix *m);
+void irq_matrix_offline(struct irq_matrix *m);
+void irq_matrix_assign_system(struct irq_matrix *m, unsigned int bit, bool replace);
+int irq_matrix_reserve_managed(struct irq_matrix *m, const struct cpumask *msk);
+void irq_matrix_remove_managed(struct irq_matrix *m, const struct cpumask *msk);
+int irq_matrix_alloc_managed(struct irq_matrix *m, const struct cpumask *msk,
+ unsigned int *mapped_cpu);
+void irq_matrix_reserve(struct irq_matrix *m);
+void irq_matrix_remove_reserved(struct irq_matrix *m);
+int irq_matrix_alloc(struct irq_matrix *m, const struct cpumask *msk,
+ bool reserved, unsigned int *mapped_cpu);
+void irq_matrix_free(struct irq_matrix *m, unsigned int cpu,
+ unsigned int bit, bool managed);
+void irq_matrix_assign(struct irq_matrix *m, unsigned int bit);
+unsigned int irq_matrix_available(struct irq_matrix *m, bool cpudown);
+unsigned int irq_matrix_allocated(struct irq_matrix *m);
+unsigned int irq_matrix_reserved(struct irq_matrix *m);
+void irq_matrix_debug_show(struct seq_file *sf, struct irq_matrix *m, int ind);
+
/* Contrary to Linux irqs, for hardware irqs the irq number 0 is valid */
#define INVALID_HWIRQ (~0UL)
irq_hw_number_t ipi_get_hwirq(unsigned int irq, unsigned int cpu);
@@ -1124,4 +1263,34 @@ int __ipi_send_mask(struct irq_desc *desc, const struct cpumask *dest);
int ipi_send_single(unsigned int virq, unsigned int cpu);
int ipi_send_mask(unsigned int virq, const struct cpumask *dest);
+void ipi_mux_process(void);
+int ipi_mux_create(unsigned int nr_ipi, void (*mux_send)(unsigned int cpu));
+
+#ifdef CONFIG_GENERIC_IRQ_MULTI_HANDLER
+/*
+ * Registers a generic IRQ handling function as the top-level IRQ handler in
+ * the system, which is generally the first C code called from an assembly
+ * architecture-specific interrupt handler.
+ *
+ * Returns 0 on success, or -EBUSY if an IRQ handler has already been
+ * registered.
+ */
+int __init set_handle_irq(void (*handle_irq)(struct pt_regs *));
+
+/*
+ * Allows interrupt handlers to find the irqchip that's been registered as the
+ * top-level IRQ handler.
+ */
+extern void (*handle_arch_irq)(struct pt_regs *) __ro_after_init;
+asmlinkage void generic_handle_arch_irq(struct pt_regs *regs);
+#else
+#ifndef set_handle_irq
+#define set_handle_irq(handle_irq) \
+ do { \
+ (void)handle_irq; \
+ WARN_ON(1); \
+ } while (0)
+#endif
+#endif
+
#endif /* _LINUX_IRQ_H */
diff --git a/include/linux/irq_cpustat.h b/include/linux/irq_cpustat.h
deleted file mode 100644
index 77e4bac29287..000000000000
--- a/include/linux/irq_cpustat.h
+++ /dev/null
@@ -1,31 +0,0 @@
-#ifndef __irq_cpustat_h
-#define __irq_cpustat_h
-
-/*
- * Contains default mappings for irq_cpustat_t, used by almost every
- * architecture. Some arch (like s390) have per cpu hardware pages and
- * they define their own mappings for irq_stat.
- *
- * Keith Owens <kaos@ocs.com.au> July 2000.
- */
-
-
-/*
- * Simple wrappers reducing source bloat. Define all irq_stat fields
- * here, even ones that are arch dependent. That way we get common
- * definitions instead of differing sets for each arch.
- */
-
-#ifndef __ARCH_IRQ_STAT
-extern irq_cpustat_t irq_stat[]; /* defined in asm/hardirq.h */
-#define __IRQ_STAT(cpu, member) (irq_stat[cpu].member)
-#endif
-
- /* arch independent irq_stat fields */
-#define local_softirq_pending() \
- __IRQ_STAT(smp_processor_id(), __softirq_pending)
-
- /* arch dependent irq_stat fields */
-#define nmi_count(cpu) __IRQ_STAT((cpu), __nmi_count) /* i386 */
-
-#endif /* __irq_cpustat_h */
diff --git a/include/linux/irq_poll.h b/include/linux/irq_poll.h
index 3e8c1b8fb9be..16aaeccb65cb 100644
--- a/include/linux/irq_poll.h
+++ b/include/linux/irq_poll.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef IRQ_POLL_H
#define IRQ_POLL_H
diff --git a/include/linux/irq_sim.h b/include/linux/irq_sim.h
index 0380d899b955..89b4d8ff274b 100644
--- a/include/linux/irq_sim.h
+++ b/include/linux/irq_sim.h
@@ -1,44 +1,43 @@
-#ifndef _LINUX_IRQ_SIM_H
-#define _LINUX_IRQ_SIM_H
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
- * Copyright (C) 2017 Bartosz Golaszewski <brgl@bgdev.pl>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
+ * Copyright (C) 2017-2018 Bartosz Golaszewski <brgl@bgdev.pl>
+ * Copyright (C) 2020 Bartosz Golaszewski <bgolaszewski@baylibre.com>
*/
-#include <linux/irq_work.h>
+#ifndef _LINUX_IRQ_SIM_H
+#define _LINUX_IRQ_SIM_H
+
#include <linux/device.h>
+#include <linux/fwnode.h>
+#include <linux/irqdomain.h>
/*
* Provides a framework for allocating simulated interrupts which can be
* requested like normal irqs and enqueued from process context.
*/
-struct irq_sim_work_ctx {
- struct irq_work work;
- int irq;
-};
-
-struct irq_sim_irq_ctx {
- int irqnum;
- bool enabled;
-};
-
-struct irq_sim {
- struct irq_sim_work_ctx work_ctx;
- int irq_base;
- unsigned int irq_count;
- struct irq_sim_irq_ctx *irqs;
+struct irq_sim_ops {
+ int (*irq_sim_irq_requested)(struct irq_domain *domain,
+ irq_hw_number_t hwirq, void *data);
+ void (*irq_sim_irq_released)(struct irq_domain *domain,
+ irq_hw_number_t hwirq, void *data);
};
-int irq_sim_init(struct irq_sim *sim, unsigned int num_irqs);
-int devm_irq_sim_init(struct device *dev, struct irq_sim *sim,
- unsigned int num_irqs);
-void irq_sim_fini(struct irq_sim *sim);
-void irq_sim_fire(struct irq_sim *sim, unsigned int offset);
-int irq_sim_irqnum(struct irq_sim *sim, unsigned int offset);
+struct irq_domain *irq_domain_create_sim(struct fwnode_handle *fwnode,
+ unsigned int num_irqs);
+struct irq_domain *devm_irq_domain_create_sim(struct device *dev,
+ struct fwnode_handle *fwnode,
+ unsigned int num_irqs);
+struct irq_domain *irq_domain_create_sim_full(struct fwnode_handle *fwnode,
+ unsigned int num_irqs,
+ const struct irq_sim_ops *ops,
+ void *data);
+struct irq_domain *
+devm_irq_domain_create_sim_full(struct device *dev,
+ struct fwnode_handle *fwnode,
+ unsigned int num_irqs,
+ const struct irq_sim_ops *ops,
+ void *data);
+void irq_domain_remove_sim(struct irq_domain *domain);
#endif /* _LINUX_IRQ_SIM_H */
diff --git a/include/linux/irq_work.h b/include/linux/irq_work.h
index 47b9ebd4a74f..c5afd053ae32 100644
--- a/include/linux/irq_work.h
+++ b/include/linux/irq_work.h
@@ -1,7 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_IRQ_WORK_H
#define _LINUX_IRQ_WORK_H
-#include <linux/llist.h>
+#include <linux/irq_work_types.h>
+#include <linux/rcuwait.h>
+#include <linux/smp_types.h>
/*
* An entry can be in one of four states:
@@ -12,31 +15,42 @@
* busy NULL, 2 -> {free, claimed} : callback in progress, can be claimed
*/
-#define IRQ_WORK_PENDING 1UL
-#define IRQ_WORK_BUSY 2UL
-#define IRQ_WORK_FLAGS 3UL
-#define IRQ_WORK_LAZY 4UL /* Doesn't want IPI, wait for tick */
+#define __IRQ_WORK_INIT(_func, _flags) (struct irq_work){ \
+ .node = { .u_flags = (_flags), }, \
+ .func = (_func), \
+ .irqwait = __RCUWAIT_INITIALIZER(irqwait), \
+}
+
+#define IRQ_WORK_INIT(_func) __IRQ_WORK_INIT(_func, 0)
+#define IRQ_WORK_INIT_LAZY(_func) __IRQ_WORK_INIT(_func, IRQ_WORK_LAZY)
+#define IRQ_WORK_INIT_HARD(_func) __IRQ_WORK_INIT(_func, IRQ_WORK_HARD_IRQ)
-struct irq_work {
- unsigned long flags;
- struct llist_node llnode;
- void (*func)(struct irq_work *);
-};
+#define DEFINE_IRQ_WORK(name, _f) \
+ struct irq_work name = IRQ_WORK_INIT(_f)
static inline
void init_irq_work(struct irq_work *work, void (*func)(struct irq_work *))
{
- work->flags = 0;
- work->func = func;
+ *work = IRQ_WORK_INIT(func);
}
-#define DEFINE_IRQ_WORK(name, _f) struct irq_work name = { .func = (_f), }
+static inline bool irq_work_is_pending(struct irq_work *work)
+{
+ return atomic_read(&work->node.a_flags) & IRQ_WORK_PENDING;
+}
-bool irq_work_queue(struct irq_work *work);
+static inline bool irq_work_is_busy(struct irq_work *work)
+{
+ return atomic_read(&work->node.a_flags) & IRQ_WORK_BUSY;
+}
-#ifdef CONFIG_SMP
+static inline bool irq_work_is_hard(struct irq_work *work)
+{
+ return atomic_read(&work->node.a_flags) & IRQ_WORK_HARD_IRQ;
+}
+
+bool irq_work_queue(struct irq_work *work);
bool irq_work_queue_on(struct irq_work *work, int cpu);
-#endif
void irq_work_tick(void);
void irq_work_sync(struct irq_work *work);
@@ -46,9 +60,14 @@ void irq_work_sync(struct irq_work *work);
void irq_work_run(void);
bool irq_work_needs_cpu(void);
+void irq_work_single(void *arg);
+
+void arch_irq_work_raise(void);
+
#else
static inline bool irq_work_needs_cpu(void) { return false; }
static inline void irq_work_run(void) { }
+static inline void irq_work_single(void *arg) { }
#endif
#endif /* _LINUX_IRQ_WORK_H */
diff --git a/include/linux/irq_work_types.h b/include/linux/irq_work_types.h
new file mode 100644
index 000000000000..73abec5bb06e
--- /dev/null
+++ b/include/linux/irq_work_types.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_IRQ_WORK_TYPES_H
+#define _LINUX_IRQ_WORK_TYPES_H
+
+#include <linux/smp_types.h>
+#include <linux/types.h>
+
+struct irq_work {
+ struct __call_single_node node;
+ void (*func)(struct irq_work *);
+ struct rcuwait irqwait;
+};
+
+#endif
diff --git a/include/linux/irqbypass.h b/include/linux/irqbypass.h
index f0f5d2671509..ede1fa938152 100644
--- a/include/linux/irqbypass.h
+++ b/include/linux/irqbypass.h
@@ -1,18 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* IRQ offload/bypass manager
*
* Copyright (C) 2015 Red Hat, Inc.
* Copyright (c) 2015 Linaro Ltd.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef IRQBYPASS_H
#define IRQBYPASS_H
#include <linux/list.h>
+struct eventfd_ctx;
struct irq_bypass_consumer;
/*
@@ -21,20 +19,22 @@ struct irq_bypass_consumer;
* The IRQ bypass manager is a simple set of lists and callbacks that allows
* IRQ producers (ex. physical interrupt sources) to be matched to IRQ
* consumers (ex. virtualization hardware that allows IRQ bypass or offload)
- * via a shared token (ex. eventfd_ctx). Producers and consumers register
- * independently. When a token match is found, the optional @stop callback
- * will be called for each participant. The pair will then be connected via
- * the @add_* callbacks, and finally the optional @start callback will allow
- * any final coordination. When either participant is unregistered, the
- * process is repeated using the @del_* callbacks in place of the @add_*
- * callbacks. Match tokens must be unique per producer/consumer, 1:N pairings
- * are not supported.
+ * via a shared eventfd_ctx. Producers and consumers register independently.
+ * When a producer and consumer are paired, i.e. an eventfd match is found, the
+ * optional @stop callback will be called for each participant. The pair will
+ * then be connected via the @add_* callbacks, and finally the optional @start
+ * callback will allow any final coordination. When either participant is
+ * unregistered, the process is repeated using the @del_* callbacks in place of
+ * the @add_* callbacks. eventfds must be unique per producer/consumer, 1:N
+ * pairings are not supported.
*/
+struct irq_bypass_consumer;
+
/**
* struct irq_bypass_producer - IRQ bypass producer definition
- * @node: IRQ bypass manager private list management
- * @token: opaque token to match between producer and consumer (non-NULL)
+ * @eventfd: eventfd context used to match producers and consumers
+ * @consumer: The connected consumer (NULL if no connection)
* @irq: Linux IRQ number for the producer device
* @add_consumer: Connect the IRQ producer to an IRQ consumer (optional)
* @del_consumer: Disconnect the IRQ producer from an IRQ consumer (optional)
@@ -46,8 +46,8 @@ struct irq_bypass_consumer;
* for a physical device assigned to a VM.
*/
struct irq_bypass_producer {
- struct list_head node;
- void *token;
+ struct eventfd_ctx *eventfd;
+ struct irq_bypass_consumer *consumer;
int irq;
int (*add_consumer)(struct irq_bypass_producer *,
struct irq_bypass_consumer *);
@@ -59,8 +59,8 @@ struct irq_bypass_producer {
/**
* struct irq_bypass_consumer - IRQ bypass consumer definition
- * @node: IRQ bypass manager private list management
- * @token: opaque token to match between producer and consumer (non-NULL)
+ * @eventfd: eventfd context used to match producers and consumers
+ * @producer: The connected producer (NULL if no connection)
* @add_producer: Connect the IRQ consumer to an IRQ producer
* @del_producer: Disconnect the IRQ consumer from an IRQ producer
* @stop: Perform any quiesce operations necessary prior to add/del (optional)
@@ -72,8 +72,9 @@ struct irq_bypass_producer {
* portions of the interrupt handling to the VM.
*/
struct irq_bypass_consumer {
- struct list_head node;
- void *token;
+ struct eventfd_ctx *eventfd;
+ struct irq_bypass_producer *producer;
+
int (*add_producer)(struct irq_bypass_consumer *,
struct irq_bypass_producer *);
void (*del_producer)(struct irq_bypass_consumer *,
@@ -82,9 +83,11 @@ struct irq_bypass_consumer {
void (*start)(struct irq_bypass_consumer *);
};
-int irq_bypass_register_producer(struct irq_bypass_producer *);
-void irq_bypass_unregister_producer(struct irq_bypass_producer *);
-int irq_bypass_register_consumer(struct irq_bypass_consumer *);
-void irq_bypass_unregister_consumer(struct irq_bypass_consumer *);
+int irq_bypass_register_producer(struct irq_bypass_producer *producer,
+ struct eventfd_ctx *eventfd, int irq);
+void irq_bypass_unregister_producer(struct irq_bypass_producer *producer);
+int irq_bypass_register_consumer(struct irq_bypass_consumer *consumer,
+ struct eventfd_ctx *eventfd);
+void irq_bypass_unregister_consumer(struct irq_bypass_consumer *consumer);
#endif /* IRQBYPASS_H */
diff --git a/include/linux/irqchip.h b/include/linux/irqchip.h
index 89c34b200671..bc4ddacd6ddc 100644
--- a/include/linux/irqchip.h
+++ b/include/linux/irqchip.h
@@ -12,25 +12,67 @@
#define _LINUX_IRQCHIP_H
#include <linux/acpi.h>
+#include <linux/module.h>
#include <linux/of.h>
+#include <linux/of_irq.h>
+#include <linux/platform_device.h>
+
+typedef int (*platform_irq_probe_t)(struct platform_device *, struct device_node *);
+
+/* Undefined on purpose */
+extern of_irq_init_cb_t typecheck_irq_init_cb;
+extern platform_irq_probe_t typecheck_irq_probe;
+
+#define typecheck_irq_init_cb(fn) \
+ (__typecheck(typecheck_irq_init_cb, &fn) ? fn : fn)
+
+#define typecheck_irq_probe(fn) \
+ (__typecheck(typecheck_irq_probe, &fn) ? fn : fn)
/*
* This macro must be used by the different irqchip drivers to declare
* the association between their DT compatible string and their
* initialization function.
*
- * @name: name that must be unique accross all IRQCHIP_DECLARE of the
+ * @name: name that must be unique across all IRQCHIP_DECLARE of the
* same file.
- * @compstr: compatible string of the irqchip driver
+ * @compat: compatible string of the irqchip driver
* @fn: initialization function
*/
-#define IRQCHIP_DECLARE(name, compat, fn) OF_DECLARE_2(irqchip, name, compat, fn)
+#define IRQCHIP_DECLARE(name, compat, fn) \
+ OF_DECLARE_2(irqchip, name, compat, typecheck_irq_init_cb(fn))
+
+extern int platform_irqchip_probe(struct platform_device *pdev);
+
+#define IRQCHIP_PLATFORM_DRIVER_BEGIN(drv_name) \
+static const struct of_device_id drv_name##_irqchip_match_table[] = {
+
+#define IRQCHIP_MATCH(compat, fn) { .compatible = compat, \
+ .data = typecheck_irq_probe(fn), },
+
+
+#define IRQCHIP_PLATFORM_DRIVER_END(drv_name, ...) \
+ {}, \
+}; \
+MODULE_DEVICE_TABLE(of, drv_name##_irqchip_match_table); \
+static struct platform_driver drv_name##_driver = { \
+ .probe = IS_ENABLED(CONFIG_IRQCHIP) ? \
+ platform_irqchip_probe : NULL, \
+ .driver = { \
+ .name = #drv_name, \
+ .owner = THIS_MODULE, \
+ .of_match_table = drv_name##_irqchip_match_table, \
+ .suppress_bind_attrs = true, \
+ __VA_ARGS__ \
+ }, \
+}; \
+builtin_platform_driver(drv_name##_driver)
/*
* This macro must be used by the different irqchip drivers to declare
* the association between their version and their initialization function.
*
- * @name: name that must be unique accross all IRQCHIP_ACPI_DECLARE of the
+ * @name: name that must be unique across all IRQCHIP_ACPI_DECLARE of the
* same file.
* @subtable: Subtable to be identified in MADT
* @validate: Function to be called on that subtable to check its validity.
@@ -39,8 +81,9 @@
* @fn: initialization function
*/
#define IRQCHIP_ACPI_DECLARE(name, subtable, validate, data, fn) \
- ACPI_DECLARE_PROBE_ENTRY(irqchip, name, ACPI_SIG_MADT, \
- subtable, validate, data, fn)
+ ACPI_DECLARE_SUBTABLE_PROBE_ENTRY(irqchip, name, \
+ ACPI_SIG_MADT, subtable, \
+ validate, data, fn)
#ifdef CONFIG_IRQCHIP
void irqchip_init(void);
diff --git a/include/linux/irqchip/arm-gic-common.h b/include/linux/irqchip/arm-gic-common.h
index 0a83b4379f34..fc0246cc05ac 100644
--- a/include/linux/irqchip/arm-gic-common.h
+++ b/include/linux/irqchip/arm-gic-common.h
@@ -1,36 +1,19 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* include/linux/irqchip/arm-gic-common.h
*
* Copyright (C) 2016 ARM Limited, All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef __LINUX_IRQCHIP_ARM_GIC_COMMON_H
#define __LINUX_IRQCHIP_ARM_GIC_COMMON_H
-#include <linux/types.h>
-#include <linux/ioport.h>
-
-enum gic_type {
- GIC_V2,
- GIC_V3,
-};
+#include <linux/irqchip/arm-vgic-info.h>
-struct gic_kvm_info {
- /* GIC type */
- enum gic_type type;
- /* Virtual CPU interface */
- struct resource vcpu;
- /* Interrupt number */
- unsigned int maint_irq;
- /* Virtual control interface */
- struct resource vctrl;
- /* vlpi support */
- bool has_v4;
-};
+#define GICD_INT_DEF_PRI 0xa0
-const struct gic_kvm_info *gic_get_kvm_info(void);
+struct irq_domain;
+struct fwnode_handle;
+int gicv2m_init(struct fwnode_handle *parent_handle,
+ struct irq_domain *parent);
#endif /* __LINUX_IRQCHIP_ARM_GIC_COMMON_H */
diff --git a/include/linux/irqchip/arm-gic-v3-prio.h b/include/linux/irqchip/arm-gic-v3-prio.h
new file mode 100644
index 000000000000..44157c9abb78
--- /dev/null
+++ b/include/linux/irqchip/arm-gic-v3-prio.h
@@ -0,0 +1,52 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#ifndef __LINUX_IRQCHIP_ARM_GIC_V3_PRIO_H
+#define __LINUX_IRQCHIP_ARM_GIC_V3_PRIO_H
+
+/*
+ * GIC priorities from the view of the PMR/RPR.
+ *
+ * These values are chosen to be valid in either the absolute priority space or
+ * the NS view of the priority space. The value programmed into the distributor
+ * and ITS will be chosen at boot time such that these values appear in the
+ * PMR/RPR.
+ *
+ * GICV3_PRIO_UNMASKED is the PMR view of the priority to use to permit both
+ * IRQs and pseudo-NMIs.
+ *
+ * GICV3_PRIO_IRQ is the PMR view of the priority of regular interrupts. This
+ * can be written to the PMR to mask regular IRQs.
+ *
+ * GICV3_PRIO_NMI is the PMR view of the priority of pseudo-NMIs. This can be
+ * written to the PMR to mask pseudo-NMIs.
+ *
+ * On arm64 some code sections either automatically switch back to PSR.I or
+ * explicitly require to not use priority masking. If bit GICV3_PRIO_PSR_I_SET
+ * is included in the priority mask, it indicates that PSR.I should be set and
+ * interrupt disabling temporarily does not rely on IRQ priorities.
+ */
+#define GICV3_PRIO_UNMASKED 0xe0
+#define GICV3_PRIO_IRQ 0xc0
+#define GICV3_PRIO_NMI 0x80
+
+#define GICV3_PRIO_PSR_I_SET (1 << 4)
+
+#ifndef __ASSEMBLER__
+
+#define __gicv3_prio_to_ns(p) (0xff & ((p) << 1))
+#define __gicv3_ns_to_prio(ns) (0x80 | ((ns) >> 1))
+
+#define __gicv3_prio_valid_ns(p) \
+ (__gicv3_ns_to_prio(__gicv3_prio_to_ns(p)) == (p))
+
+static_assert(__gicv3_prio_valid_ns(GICV3_PRIO_NMI));
+static_assert(__gicv3_prio_valid_ns(GICV3_PRIO_IRQ));
+
+static_assert(GICV3_PRIO_NMI < GICV3_PRIO_IRQ);
+static_assert(GICV3_PRIO_IRQ < GICV3_PRIO_UNMASKED);
+
+static_assert(GICV3_PRIO_IRQ < (GICV3_PRIO_IRQ | GICV3_PRIO_PSR_I_SET));
+
+#endif /* __ASSEMBLER */
+
+#endif /* __LINUX_IRQCHIP_ARM_GIC_V3_PRIO_H */
diff --git a/include/linux/irqchip/arm-gic-v3.h b/include/linux/irqchip/arm-gic-v3.h
index 1ea576c8126f..70c0948f978e 100644
--- a/include/linux/irqchip/arm-gic-v3.h
+++ b/include/linux/irqchip/arm-gic-v3.h
@@ -1,19 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2013, 2014 ARM Limited, All Rights Reserved.
* Author: Marc Zyngier <marc.zyngier@arm.com>
- *
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef __LINUX_IRQCHIP_ARM_GIC_V3_H
#define __LINUX_IRQCHIP_ARM_GIC_V3_H
@@ -25,12 +13,12 @@
#define GICD_CTLR 0x0000
#define GICD_TYPER 0x0004
#define GICD_IIDR 0x0008
+#define GICD_TYPER2 0x000C
#define GICD_STATUSR 0x0010
#define GICD_SETSPI_NSR 0x0040
#define GICD_CLRSPI_NSR 0x0048
#define GICD_SETSPI_SR 0x0050
#define GICD_CLRSPI_SR 0x0058
-#define GICD_SEIR 0x0068
#define GICD_IGROUPR 0x0080
#define GICD_ISENABLER 0x0100
#define GICD_ICENABLER 0x0180
@@ -42,10 +30,22 @@
#define GICD_ICFGR 0x0C00
#define GICD_IGRPMODR 0x0D00
#define GICD_NSACR 0x0E00
+#define GICD_IGROUPRnE 0x1000
+#define GICD_ISENABLERnE 0x1200
+#define GICD_ICENABLERnE 0x1400
+#define GICD_ISPENDRnE 0x1600
+#define GICD_ICPENDRnE 0x1800
+#define GICD_ISACTIVERnE 0x1A00
+#define GICD_ICACTIVERnE 0x1C00
+#define GICD_IPRIORITYRnE 0x2000
+#define GICD_ICFGRnE 0x3000
#define GICD_IROUTER 0x6000
+#define GICD_IROUTERnE 0x8000
#define GICD_IDREGS 0xFFD0
#define GICD_PIDR2 0xFFE8
+#define ESPI_BASE_INTID 4096
+
/*
* Those registers are actually from GICv2, but the spec demands that they
* are implemented as RES0 if ARE is 1 (which we do in KVM's emulated GICv3).
@@ -56,11 +56,22 @@
#define GICD_SPENDSGIR 0x0F20
#define GICD_CTLR_RWP (1U << 31)
+#define GICD_CTLR_nASSGIreq (1U << 8)
#define GICD_CTLR_DS (1U << 6)
#define GICD_CTLR_ARE_NS (1U << 4)
#define GICD_CTLR_ENABLE_G1A (1U << 1)
#define GICD_CTLR_ENABLE_G1 (1U << 0)
+#define GICD_IIDR_IMPLEMENTER_SHIFT 0
+#define GICD_IIDR_IMPLEMENTER_MASK (0xfff << GICD_IIDR_IMPLEMENTER_SHIFT)
+#define GICD_IIDR_REVISION_SHIFT 12
+#define GICD_IIDR_REVISION_MASK (0xf << GICD_IIDR_REVISION_SHIFT)
+#define GICD_IIDR_VARIANT_SHIFT 16
+#define GICD_IIDR_VARIANT_MASK (0xf << GICD_IIDR_VARIANT_SHIFT)
+#define GICD_IIDR_PRODUCT_ID_SHIFT 24
+#define GICD_IIDR_PRODUCT_ID_MASK (0xff << GICD_IIDR_PRODUCT_ID_SHIFT)
+
+
/*
* In systems with a single security state (what we emulate in KVM)
* the meaning of the interrupt group enable bits is slightly different
@@ -68,11 +79,20 @@
#define GICD_CTLR_ENABLE_SS_G1 (1U << 1)
#define GICD_CTLR_ENABLE_SS_G0 (1U << 0)
+#define GICD_TYPER_RSS (1U << 26)
#define GICD_TYPER_LPIS (1U << 17)
#define GICD_TYPER_MBIS (1U << 16)
+#define GICD_TYPER_ESPI (1U << 8)
#define GICD_TYPER_ID_BITS(typer) ((((typer) >> 19) & 0x1f) + 1)
-#define GICD_TYPER_IRQS(typer) ((((typer) & 0x1f) + 1) * 32)
+#define GICD_TYPER_NUM_LPIS(typer) ((((typer) >> 11) & 0x1f) + 1)
+#define GICD_TYPER_SPIS(typer) ((((typer) & 0x1f) + 1) * 32)
+#define GICD_TYPER_ESPIS(typer) \
+ (((typer) & GICD_TYPER_ESPI) ? GICD_TYPER_SPIS((typer) >> 27) : 0)
+
+#define GICD_TYPER2_nASSGIcap (1U << 8)
+#define GICD_TYPER2_VIL (1U << 7)
+#define GICD_TYPER2_VID GENMASK(4, 0)
#define GICD_IROUTER_SPI_MODE_ONE (0U << 31)
#define GICD_IROUTER_SPI_MODE_ANY (1U << 31)
@@ -83,6 +103,11 @@
#define GIC_V3_DIST_SIZE 0x10000
+#define GIC_PAGE_SIZE_4K 0ULL
+#define GIC_PAGE_SIZE_16K 1ULL
+#define GIC_PAGE_SIZE_64K 2ULL
+#define GIC_PAGE_SIZE_MASK 3ULL
+
/*
* Re-Distributor registers, offsets from RD_base
*/
@@ -93,21 +118,33 @@
#define GICR_WAKER 0x0014
#define GICR_SETLPIR 0x0040
#define GICR_CLRLPIR 0x0048
-#define GICR_SEIR GICD_SEIR
#define GICR_PROPBASER 0x0070
#define GICR_PENDBASER 0x0078
#define GICR_INVLPIR 0x00A0
#define GICR_INVALLR 0x00B0
#define GICR_SYNCR 0x00C0
-#define GICR_MOVLPIR 0x0100
-#define GICR_MOVALLR 0x0110
#define GICR_IDREGS GICD_IDREGS
#define GICR_PIDR2 GICD_PIDR2
#define GICR_CTLR_ENABLE_LPIS (1UL << 0)
+#define GICR_CTLR_CES (1UL << 1)
+#define GICR_CTLR_IR (1UL << 2)
+#define GICR_CTLR_RWP (1UL << 3)
#define GICR_TYPER_CPU_NUMBER(r) (((r) >> 8) & 0xffff)
+#define EPPI_BASE_INTID 1056
+
+#define GICR_TYPER_NR_PPIS(r) \
+ ({ \
+ unsigned int __ppinum = ((r) >> 27) & 0x1f; \
+ unsigned int __nr_ppis = 16; \
+ if (__ppinum == 1 || __ppinum == 2) \
+ __nr_ppis += __ppinum * 32; \
+ \
+ __nr_ppis; \
+ })
+
#define GICR_WAKER_ProcessorSleep (1U << 1)
#define GICR_WAKER_ChildrenAsleep (1U << 2)
@@ -152,7 +189,7 @@
#define GICR_PROPBASER_nCnB GIC_BASER_CACHEABILITY(GICR_PROPBASER, INNER, nCnB)
#define GICR_PROPBASER_nC GIC_BASER_CACHEABILITY(GICR_PROPBASER, INNER, nC)
#define GICR_PROPBASER_RaWt GIC_BASER_CACHEABILITY(GICR_PROPBASER, INNER, RaWt)
-#define GICR_PROPBASER_RaWb GIC_BASER_CACHEABILITY(GICR_PROPBASER, INNER, RaWt)
+#define GICR_PROPBASER_RaWb GIC_BASER_CACHEABILITY(GICR_PROPBASER, INNER, RaWb)
#define GICR_PROPBASER_WaWt GIC_BASER_CACHEABILITY(GICR_PROPBASER, INNER, WaWt)
#define GICR_PROPBASER_WaWb GIC_BASER_CACHEABILITY(GICR_PROPBASER, INNER, WaWb)
#define GICR_PROPBASER_RaWaWt GIC_BASER_CACHEABILITY(GICR_PROPBASER, INNER, RaWaWt)
@@ -179,7 +216,7 @@
#define GICR_PENDBASER_nCnB GIC_BASER_CACHEABILITY(GICR_PENDBASER, INNER, nCnB)
#define GICR_PENDBASER_nC GIC_BASER_CACHEABILITY(GICR_PENDBASER, INNER, nC)
#define GICR_PENDBASER_RaWt GIC_BASER_CACHEABILITY(GICR_PENDBASER, INNER, RaWt)
-#define GICR_PENDBASER_RaWb GIC_BASER_CACHEABILITY(GICR_PENDBASER, INNER, RaWt)
+#define GICR_PENDBASER_RaWb GIC_BASER_CACHEABILITY(GICR_PENDBASER, INNER, RaWb)
#define GICR_PENDBASER_WaWt GIC_BASER_CACHEABILITY(GICR_PENDBASER, INNER, WaWt)
#define GICR_PENDBASER_WaWb GIC_BASER_CACHEABILITY(GICR_PENDBASER, INNER, WaWb)
#define GICR_PENDBASER_RaWaWt GIC_BASER_CACHEABILITY(GICR_PENDBASER, INNER, RaWaWt)
@@ -204,8 +241,19 @@
#define GICR_TYPER_PLPIS (1U << 0)
#define GICR_TYPER_VLPIS (1U << 1)
+#define GICR_TYPER_DIRTY (1U << 2)
#define GICR_TYPER_DirectLPIS (1U << 3)
#define GICR_TYPER_LAST (1U << 4)
+#define GICR_TYPER_RVPEID (1U << 7)
+#define GICR_TYPER_COMMON_LPI_AFF GENMASK_ULL(25, 24)
+#define GICR_TYPER_AFFINITY GENMASK_ULL(63, 32)
+
+#define GICR_INVLPIR_INTID GENMASK_ULL(31, 0)
+#define GICR_INVLPIR_VPEID GENMASK_ULL(47, 32)
+#define GICR_INVLPIR_V GENMASK_ULL(63, 63)
+
+#define GICR_INVALLR_VPEID GICR_INVLPIR_VPEID
+#define GICR_INVALLR_V GICR_INVLPIR_V
#define GIC_V3_REDIST_SIZE 0x20000
@@ -238,12 +286,24 @@
#define GICR_VPROPBASER_nCnB GIC_BASER_CACHEABILITY(GICR_VPROPBASER, INNER, nCnB)
#define GICR_VPROPBASER_nC GIC_BASER_CACHEABILITY(GICR_VPROPBASER, INNER, nC)
#define GICR_VPROPBASER_RaWt GIC_BASER_CACHEABILITY(GICR_VPROPBASER, INNER, RaWt)
-#define GICR_VPROPBASER_RaWb GIC_BASER_CACHEABILITY(GICR_VPROPBASER, INNER, RaWt)
+#define GICR_VPROPBASER_RaWb GIC_BASER_CACHEABILITY(GICR_VPROPBASER, INNER, RaWb)
#define GICR_VPROPBASER_WaWt GIC_BASER_CACHEABILITY(GICR_VPROPBASER, INNER, WaWt)
#define GICR_VPROPBASER_WaWb GIC_BASER_CACHEABILITY(GICR_VPROPBASER, INNER, WaWb)
#define GICR_VPROPBASER_RaWaWt GIC_BASER_CACHEABILITY(GICR_VPROPBASER, INNER, RaWaWt)
#define GICR_VPROPBASER_RaWaWb GIC_BASER_CACHEABILITY(GICR_VPROPBASER, INNER, RaWaWb)
+/*
+ * GICv4.1 VPROPBASER reinvention. A subtle mix between the old
+ * VPROPBASER and ITS_BASER. Just not quite any of the two.
+ */
+#define GICR_VPROPBASER_4_1_VALID (1ULL << 63)
+#define GICR_VPROPBASER_4_1_ENTRY_SIZE GENMASK_ULL(61, 59)
+#define GICR_VPROPBASER_4_1_INDIRECT (1ULL << 55)
+#define GICR_VPROPBASER_4_1_PAGE_SIZE GENMASK_ULL(54, 53)
+#define GICR_VPROPBASER_4_1_Z (1ULL << 52)
+#define GICR_VPROPBASER_4_1_ADDR GENMASK_ULL(51, 12)
+#define GICR_VPROPBASER_4_1_SIZE GENMASK_ULL(6, 0)
+
#define GICR_VPENDBASER 0x0078
#define GICR_VPENDBASER_SHAREABILITY_SHIFT (10)
@@ -261,10 +321,13 @@
#define GICR_VPENDBASER_NonShareable \
GIC_BASER_SHAREABILITY(GICR_VPENDBASER, NonShareable)
+#define GICR_VPENDBASER_InnerShareable \
+ GIC_BASER_SHAREABILITY(GICR_VPENDBASER, InnerShareable)
+
#define GICR_VPENDBASER_nCnB GIC_BASER_CACHEABILITY(GICR_VPENDBASER, INNER, nCnB)
#define GICR_VPENDBASER_nC GIC_BASER_CACHEABILITY(GICR_VPENDBASER, INNER, nC)
#define GICR_VPENDBASER_RaWt GIC_BASER_CACHEABILITY(GICR_VPENDBASER, INNER, RaWt)
-#define GICR_VPENDBASER_RaWb GIC_BASER_CACHEABILITY(GICR_VPENDBASER, INNER, RaWt)
+#define GICR_VPENDBASER_RaWb GIC_BASER_CACHEABILITY(GICR_VPENDBASER, INNER, RaWb)
#define GICR_VPENDBASER_WaWt GIC_BASER_CACHEABILITY(GICR_VPENDBASER, INNER, WaWt)
#define GICR_VPENDBASER_WaWb GIC_BASER_CACHEABILITY(GICR_VPENDBASER, INNER, WaWb)
#define GICR_VPENDBASER_RaWaWt GIC_BASER_CACHEABILITY(GICR_VPENDBASER, INNER, RaWaWt)
@@ -276,11 +339,30 @@
#define GICR_VPENDBASER_Valid (1ULL << 63)
/*
+ * GICv4.1 VPENDBASER, used for VPE residency. On top of these fields,
+ * also use the above Valid, PendingLast and Dirty.
+ */
+#define GICR_VPENDBASER_4_1_DB (1ULL << 62)
+#define GICR_VPENDBASER_4_1_VGRP0EN (1ULL << 59)
+#define GICR_VPENDBASER_4_1_VGRP1EN (1ULL << 58)
+#define GICR_VPENDBASER_4_1_VPEID GENMASK_ULL(15, 0)
+
+#define GICR_VSGIR 0x0080
+
+#define GICR_VSGIR_VPEID GENMASK(15, 0)
+
+#define GICR_VSGIPENDR 0x0088
+
+#define GICR_VSGIPENDR_BUSY (1U << 31)
+#define GICR_VSGIPENDR_PENDING GENMASK(15, 0)
+
+/*
* ITS registers, offsets from ITS_base
*/
#define GITS_CTLR 0x0000
#define GITS_IIDR 0x0004
#define GITS_TYPER 0x0008
+#define GITS_MPIDR 0x0018
#define GITS_CBASER 0x0080
#define GITS_CWRITER 0x0088
#define GITS_CREADR 0x0090
@@ -297,6 +379,11 @@
#define GITS_TRANSLATER 0x10040
+#define GITS_SGIR 0x20020
+
+#define GITS_SGIR_VPEID GENMASK_ULL(47, 32)
+#define GITS_SGIR_VINTID GENMASK_ULL(3, 0)
+
#define GITS_CTLR_ENABLE (1U << 0)
#define GITS_CTLR_ImDe (1U << 1)
#define GITS_CTLR_ITS_NUMBER_SHIFT 4
@@ -306,13 +393,16 @@
#define GITS_TYPER_PLPIS (1UL << 0)
#define GITS_TYPER_VLPIS (1UL << 1)
#define GITS_TYPER_ITT_ENTRY_SIZE_SHIFT 4
-#define GITS_TYPER_ITT_ENTRY_SIZE(r) ((((r) >> GITS_TYPER_ITT_ENTRY_SIZE_SHIFT) & 0x1f) + 1)
+#define GITS_TYPER_ITT_ENTRY_SIZE GENMASK_ULL(7, 4)
#define GITS_TYPER_IDBITS_SHIFT 8
#define GITS_TYPER_DEVBITS_SHIFT 13
-#define GITS_TYPER_DEVBITS(r) ((((r) >> GITS_TYPER_DEVBITS_SHIFT) & 0x1f) + 1)
+#define GITS_TYPER_DEVBITS GENMASK_ULL(17, 13)
#define GITS_TYPER_PTA (1UL << 19)
-#define GITS_TYPER_HWCOLLCNT_SHIFT 24
+#define GITS_TYPER_HCC_SHIFT 24
+#define GITS_TYPER_HCC(r) (((r) >> GITS_TYPER_HCC_SHIFT) & 0xff)
#define GITS_TYPER_VMOVP (1ULL << 37)
+#define GITS_TYPER_VMAPP (1ULL << 40)
+#define GITS_TYPER_SVPET GENMASK_ULL(42, 41)
#define GITS_IIDR_REV_SHIFT 12
#define GITS_IIDR_REV_MASK (0xf << GITS_IIDR_REV_SHIFT)
@@ -337,12 +427,14 @@
#define GITS_CBASER_nCnB GIC_BASER_CACHEABILITY(GITS_CBASER, INNER, nCnB)
#define GITS_CBASER_nC GIC_BASER_CACHEABILITY(GITS_CBASER, INNER, nC)
#define GITS_CBASER_RaWt GIC_BASER_CACHEABILITY(GITS_CBASER, INNER, RaWt)
-#define GITS_CBASER_RaWb GIC_BASER_CACHEABILITY(GITS_CBASER, INNER, RaWt)
+#define GITS_CBASER_RaWb GIC_BASER_CACHEABILITY(GITS_CBASER, INNER, RaWb)
#define GITS_CBASER_WaWt GIC_BASER_CACHEABILITY(GITS_CBASER, INNER, WaWt)
#define GITS_CBASER_WaWb GIC_BASER_CACHEABILITY(GITS_CBASER, INNER, WaWb)
#define GITS_CBASER_RaWaWt GIC_BASER_CACHEABILITY(GITS_CBASER, INNER, RaWaWt)
#define GITS_CBASER_RaWaWb GIC_BASER_CACHEABILITY(GITS_CBASER, INNER, RaWaWb)
+#define GITS_CBASER_ADDRESS(cbaser) ((cbaser) & GENMASK_ULL(51, 12))
+
#define GITS_BASER_NR_REGS 8
#define GITS_BASER_VALID (1ULL << 63)
@@ -361,7 +453,7 @@
#define GITS_BASER_nCnB GIC_BASER_CACHEABILITY(GITS_BASER, INNER, nCnB)
#define GITS_BASER_nC GIC_BASER_CACHEABILITY(GITS_BASER, INNER, nC)
#define GITS_BASER_RaWt GIC_BASER_CACHEABILITY(GITS_BASER, INNER, RaWt)
-#define GITS_BASER_RaWb GIC_BASER_CACHEABILITY(GITS_BASER, INNER, RaWt)
+#define GITS_BASER_RaWb GIC_BASER_CACHEABILITY(GITS_BASER, INNER, RaWb)
#define GITS_BASER_WaWt GIC_BASER_CACHEABILITY(GITS_BASER, INNER, WaWt)
#define GITS_BASER_WaWb GIC_BASER_CACHEABILITY(GITS_BASER, INNER, WaWb)
#define GITS_BASER_RaWaWt GIC_BASER_CACHEABILITY(GITS_BASER, INNER, RaWaWt)
@@ -372,14 +464,20 @@
#define GITS_BASER_ENTRY_SIZE_SHIFT (48)
#define GITS_BASER_ENTRY_SIZE(r) ((((r) >> GITS_BASER_ENTRY_SIZE_SHIFT) & 0x1f) + 1)
#define GITS_BASER_ENTRY_SIZE_MASK GENMASK_ULL(52, 48)
+#define GITS_BASER_PHYS_52_to_48(phys) \
+ (((phys) & GENMASK_ULL(47, 16)) | (((phys) >> 48) & 0xf) << 12)
+#define GITS_BASER_ADDR_48_to_52(baser) \
+ (((baser) & GENMASK_ULL(47, 16)) | (((baser) >> 12) & 0xf) << 48)
+
#define GITS_BASER_SHAREABILITY_SHIFT (10)
#define GITS_BASER_InnerShareable \
GIC_BASER_SHAREABILITY(GITS_BASER, InnerShareable)
#define GITS_BASER_PAGE_SIZE_SHIFT (8)
-#define GITS_BASER_PAGE_SIZE_4K (0ULL << GITS_BASER_PAGE_SIZE_SHIFT)
-#define GITS_BASER_PAGE_SIZE_16K (1ULL << GITS_BASER_PAGE_SIZE_SHIFT)
-#define GITS_BASER_PAGE_SIZE_64K (2ULL << GITS_BASER_PAGE_SIZE_SHIFT)
-#define GITS_BASER_PAGE_SIZE_MASK (3ULL << GITS_BASER_PAGE_SIZE_SHIFT)
+#define __GITS_BASER_PSZ(sz) (GIC_PAGE_SIZE_ ## sz << GITS_BASER_PAGE_SIZE_SHIFT)
+#define GITS_BASER_PAGE_SIZE_4K __GITS_BASER_PSZ(4K)
+#define GITS_BASER_PAGE_SIZE_16K __GITS_BASER_PSZ(16K)
+#define GITS_BASER_PAGE_SIZE_64K __GITS_BASER_PSZ(64K)
+#define GITS_BASER_PAGE_SIZE_MASK __GITS_BASER_PSZ(MASK)
#define GITS_BASER_PAGES_MAX 256
#define GITS_BASER_PAGES_SHIFT (0)
#define GITS_BASER_NR_PAGES(r) (((r) & 0xff) + 1)
@@ -420,8 +518,10 @@
#define GITS_CMD_VMAPTI GITS_CMD_GICv4(GITS_CMD_MAPTI)
#define GITS_CMD_VMOVI GITS_CMD_GICv4(GITS_CMD_MOVI)
#define GITS_CMD_VSYNC GITS_CMD_GICv4(GITS_CMD_SYNC)
-/* VMOVP is the odd one, as it doesn't have a physical counterpart */
+/* VMOVP, VSGI and INVDB are the odd ones, as they dont have a physical counterpart */
#define GITS_CMD_VMOVP GITS_CMD_GICv4(2)
+#define GITS_CMD_VSGI GITS_CMD_GICv4(3)
+#define GITS_CMD_INVDB GITS_CMD_GICv4(0xe)
/*
* ITS error numbers
@@ -451,6 +551,8 @@
#define ICC_CTLR_EL1_EOImode_MASK (1 << ICC_CTLR_EL1_EOImode_SHIFT)
#define ICC_CTLR_EL1_CBPR_SHIFT 0
#define ICC_CTLR_EL1_CBPR_MASK (1 << ICC_CTLR_EL1_CBPR_SHIFT)
+#define ICC_CTLR_EL1_PMHE_SHIFT 6
+#define ICC_CTLR_EL1_PMHE_MASK (1 << ICC_CTLR_EL1_PMHE_SHIFT)
#define ICC_CTLR_EL1_PRI_BITS_SHIFT 8
#define ICC_CTLR_EL1_PRI_BITS_MASK (0x7 << ICC_CTLR_EL1_PRI_BITS_SHIFT)
#define ICC_CTLR_EL1_ID_BITS_SHIFT 11
@@ -459,6 +561,8 @@
#define ICC_CTLR_EL1_SEIS_MASK (0x1 << ICC_CTLR_EL1_SEIS_SHIFT)
#define ICC_CTLR_EL1_A3V_SHIFT 15
#define ICC_CTLR_EL1_A3V_MASK (0x1 << ICC_CTLR_EL1_A3V_SHIFT)
+#define ICC_CTLR_EL1_RSS (0x1 << 18)
+#define ICC_CTLR_EL1_ExtRange (0x1 << 19)
#define ICC_PMR_EL1_SHIFT 0
#define ICC_PMR_EL1_MASK (0xff << ICC_PMR_EL1_SHIFT)
#define ICC_BPR0_EL1_SHIFT 0
@@ -473,66 +577,11 @@
#define ICC_SRE_EL1_DFB (1U << 1)
#define ICC_SRE_EL1_SRE (1U << 0)
-/*
- * Hypervisor interface registers (SRE only)
- */
-#define ICH_LR_VIRTUAL_ID_MASK ((1ULL << 32) - 1)
-
-#define ICH_LR_EOI (1ULL << 41)
-#define ICH_LR_GROUP (1ULL << 60)
-#define ICH_LR_HW (1ULL << 61)
-#define ICH_LR_STATE (3ULL << 62)
-#define ICH_LR_PENDING_BIT (1ULL << 62)
-#define ICH_LR_ACTIVE_BIT (1ULL << 63)
-#define ICH_LR_PHYS_ID_SHIFT 32
-#define ICH_LR_PHYS_ID_MASK (0x3ffULL << ICH_LR_PHYS_ID_SHIFT)
-#define ICH_LR_PRIORITY_SHIFT 48
-#define ICH_LR_PRIORITY_MASK (0xffULL << ICH_LR_PRIORITY_SHIFT)
-
/* These are for GICv2 emulation only */
#define GICH_LR_VIRTUALID (0x3ffUL << 0)
#define GICH_LR_PHYSID_CPUID_SHIFT (10)
#define GICH_LR_PHYSID_CPUID (7UL << GICH_LR_PHYSID_CPUID_SHIFT)
-#define ICH_MISR_EOI (1 << 0)
-#define ICH_MISR_U (1 << 1)
-
-#define ICH_HCR_EN (1 << 0)
-#define ICH_HCR_UIE (1 << 1)
-#define ICH_HCR_TC (1 << 10)
-#define ICH_HCR_TALL0 (1 << 11)
-#define ICH_HCR_TALL1 (1 << 12)
-#define ICH_HCR_EOIcount_SHIFT 27
-#define ICH_HCR_EOIcount_MASK (0x1f << ICH_HCR_EOIcount_SHIFT)
-
-#define ICH_VMCR_ACK_CTL_SHIFT 2
-#define ICH_VMCR_ACK_CTL_MASK (1 << ICH_VMCR_ACK_CTL_SHIFT)
-#define ICH_VMCR_FIQ_EN_SHIFT 3
-#define ICH_VMCR_FIQ_EN_MASK (1 << ICH_VMCR_FIQ_EN_SHIFT)
-#define ICH_VMCR_CBPR_SHIFT 4
-#define ICH_VMCR_CBPR_MASK (1 << ICH_VMCR_CBPR_SHIFT)
-#define ICH_VMCR_EOIM_SHIFT 9
-#define ICH_VMCR_EOIM_MASK (1 << ICH_VMCR_EOIM_SHIFT)
-#define ICH_VMCR_BPR1_SHIFT 18
-#define ICH_VMCR_BPR1_MASK (7 << ICH_VMCR_BPR1_SHIFT)
-#define ICH_VMCR_BPR0_SHIFT 21
-#define ICH_VMCR_BPR0_MASK (7 << ICH_VMCR_BPR0_SHIFT)
-#define ICH_VMCR_PMR_SHIFT 24
-#define ICH_VMCR_PMR_MASK (0xffUL << ICH_VMCR_PMR_SHIFT)
-#define ICH_VMCR_ENG0_SHIFT 0
-#define ICH_VMCR_ENG0_MASK (1 << ICH_VMCR_ENG0_SHIFT)
-#define ICH_VMCR_ENG1_SHIFT 1
-#define ICH_VMCR_ENG1_MASK (1 << ICH_VMCR_ENG1_SHIFT)
-
-#define ICH_VTR_PRI_BITS_SHIFT 29
-#define ICH_VTR_PRI_BITS_MASK (7 << ICH_VTR_PRI_BITS_SHIFT)
-#define ICH_VTR_ID_BITS_SHIFT 23
-#define ICH_VTR_ID_BITS_MASK (7 << ICH_VTR_ID_BITS_SHIFT)
-#define ICH_VTR_SEIS_SHIFT 22
-#define ICH_VTR_SEIS_MASK (1 << ICH_VTR_SEIS_SHIFT)
-#define ICH_VTR_A3V_SHIFT 21
-#define ICH_VTR_A3V_MASK (1 << ICH_VTR_A3V_SHIFT)
-
#define ICC_IAR1_EL1_SPURIOUS 0x3ff
#define ICC_SRE_EL2_SRE (1 << 0)
@@ -547,6 +596,8 @@
#define ICC_SGI1R_AFFINITY_2_SHIFT 32
#define ICC_SGI1R_AFFINITY_2_MASK (0xffULL << ICC_SGI1R_AFFINITY_2_SHIFT)
#define ICC_SGI1R_IRQ_ROUTING_MODE_BIT 40
+#define ICC_SGI1R_RS_SHIFT 44
+#define ICC_SGI1R_RS_MASK (0xfULL << ICC_SGI1R_RS_SHIFT)
#define ICC_SGI1R_AFFINITY_3_SHIFT 48
#define ICC_SGI1R_AFFINITY_3_MASK (0xffULL << ICC_SGI1R_AFFINITY_3_SHIFT)
@@ -562,22 +613,33 @@
struct rdists {
struct {
+ raw_spinlock_t rd_lock;
void __iomem *rd_base;
struct page *pend_page;
phys_addr_t phys_base;
+ u64 flags;
+ cpumask_t *vpe_table_mask;
+ void *vpe_l1_base;
} __percpu *rdist;
- struct page *prop_page;
- int id_bits;
+ phys_addr_t prop_table_pa;
+ void *prop_table_va;
u64 flags;
+ u32 gicd_typer;
+ u32 gicd_typer2;
+ int cpuhp_memreserve_state;
bool has_vlpis;
+ bool has_rvpeid;
bool has_direct_lpi;
+ bool has_vpend_valid_dirty;
};
struct irq_domain;
struct fwnode_handle;
+int __init its_lpi_memreserve_init(void);
int its_cpu_init(void);
int its_init(struct fwnode_handle *handle, struct rdists *rdists,
- struct irq_domain *domain);
+ struct irq_domain *domain, u8 irq_prio);
+int mbi_init(struct fwnode_handle *fwnode, struct irq_domain *parent);
static inline bool gic_enable_sre(void)
{
diff --git a/include/linux/irqchip/arm-gic-v4.h b/include/linux/irqchip/arm-gic-v4.h
index 58a4d89aa82c..0b0887099fd7 100644
--- a/include/linux/irqchip/arm-gic-v4.h
+++ b/include/linux/irqchip/arm-gic-v4.h
@@ -1,18 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2016,2017 ARM Limited, All Rights Reserved.
* Author: Marc Zyngier <marc.zyngier@arm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef __LINUX_IRQCHIP_ARM_GIC_V4_H
@@ -20,6 +9,12 @@
struct its_vpe;
+/*
+ * Maximum number of ITTs when GITS_TYPER.VMOVP == 0, using the
+ * ITSList mechanism to perform inter-ITS synchronization.
+ */
+#define GICv4_ITS_LIST_MAX 16
+
/* Embedded in kvm.arch */
struct its_vm {
struct fwnode_handle *fwnode;
@@ -30,17 +25,58 @@ struct its_vm {
irq_hw_number_t db_lpi_base;
unsigned long *db_bitmap;
int nr_db_lpis;
+ /*
+ * Ensures mutual exclusion between updates to vlpi_count[]
+ * and map/unmap when using the ITSList mechanism.
+ *
+ * The lock order for any sequence involving the ITSList is
+ * vmapp_lock -> vpe_lock ->vmovp_lock.
+ */
+ raw_spinlock_t vmapp_lock;
+ u32 vlpi_count[GICv4_ITS_LIST_MAX];
};
/* Embedded in kvm_vcpu.arch */
struct its_vpe {
struct page *vpt_page;
struct its_vm *its_vm;
+ /* per-vPE VLPI tracking */
+ atomic_t vlpi_count;
/* Doorbell interrupt */
int irq;
irq_hw_number_t vpe_db_lpi;
- /* VPE proxy mapping */
- int vpe_proxy_event;
+ /* VPE resident */
+ bool resident;
+ /* VPT parse complete */
+ bool ready;
+ union {
+ /* GICv4.0 implementations */
+ struct {
+ /* VPE proxy mapping */
+ int vpe_proxy_event;
+ /* Implementation Defined Area Invalid */
+ bool idai;
+ };
+ /* GICv4.1 implementations */
+ struct {
+ struct fwnode_handle *fwnode;
+ struct irq_domain *sgi_domain;
+ struct {
+ u8 priority;
+ bool enabled;
+ bool group;
+ } sgi_config[16];
+ };
+ };
+
+ /* Track the VPE being mapped */
+ atomic_t vmapp_count;
+
+ /*
+ * Ensures mutual exclusion between affinity setting of the
+ * vPE and vLPI operations using vpe->col_idx.
+ */
+ raw_spinlock_t vpe_lock;
/*
* This collection ID is used to indirect the target
* redistributor for this VPE. The ID itself isn't involved in
@@ -49,8 +85,6 @@ struct its_vpe {
u16 col_idx;
/* Unique (system-wide) VPE identifier */
u16 vpe_id;
- /* Implementation Defined Area Invalid */
- bool idai;
/* Pending VLPIs on schedule out? */
bool pending_last;
};
@@ -64,12 +98,14 @@ struct its_vpe {
* @vm: Pointer to the GICv4 notion of a VM
* @vpe: Pointer to the GICv4 notion of a virtual CPU (VPE)
* @vintid: Virtual LPI number
+ * @properties: Priority and enable bits (as written in the prop table)
* @db_enabled: Is the VPE doorbell to be generated?
*/
struct its_vlpi_map {
struct its_vm *vm;
struct its_vpe *vpe;
u32 vintid;
+ u8 properties;
bool db_enabled;
};
@@ -80,7 +116,9 @@ enum its_vcpu_info_cmd_type {
PROP_UPDATE_AND_INV_VLPI,
SCHEDULE_VPE,
DESCHEDULE_VPE,
+ COMMIT_VPE,
INVALL_VPE,
+ PROP_UPDATE_VSGI,
};
struct its_cmd_info {
@@ -88,18 +126,35 @@ struct its_cmd_info {
union {
struct its_vlpi_map *map;
u8 config;
+ bool req_db;
+ struct {
+ bool g0en;
+ bool g1en;
+ };
+ struct {
+ u8 priority;
+ bool group;
+ };
};
};
int its_alloc_vcpu_irqs(struct its_vm *vm);
void its_free_vcpu_irqs(struct its_vm *vm);
-int its_schedule_vpe(struct its_vpe *vpe, bool on);
+int its_make_vpe_resident(struct its_vpe *vpe, bool g0en, bool g1en);
+int its_make_vpe_non_resident(struct its_vpe *vpe, bool db);
+int its_commit_vpe(struct its_vpe *vpe);
int its_invall_vpe(struct its_vpe *vpe);
int its_map_vlpi(int irq, struct its_vlpi_map *map);
int its_get_vlpi(int irq, struct its_vlpi_map *map);
-int its_unmap_vlpi(int irq);
+void its_unmap_vlpi(int irq);
int its_prop_update_vlpi(int irq, u8 config, bool inv);
+int its_prop_update_vsgi(int irq, u8 priority, bool group);
+
+struct irq_domain_ops;
+int its_init_v4(struct irq_domain *domain,
+ const struct irq_domain_ops *vpe_ops,
+ const struct irq_domain_ops *sgi_ops);
-int its_init_v4(struct irq_domain *domain, const struct irq_domain_ops *ops);
+bool gic_cpuif_has_vsgi(void);
#endif
diff --git a/include/linux/irqchip/arm-gic-v5.h b/include/linux/irqchip/arm-gic-v5.h
new file mode 100644
index 000000000000..68ddcdb1cec5
--- /dev/null
+++ b/include/linux/irqchip/arm-gic-v5.h
@@ -0,0 +1,394 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2025 ARM Limited, All Rights Reserved.
+ */
+#ifndef __LINUX_IRQCHIP_ARM_GIC_V5_H
+#define __LINUX_IRQCHIP_ARM_GIC_V5_H
+
+#include <linux/iopoll.h>
+
+#include <asm/cacheflush.h>
+#include <asm/smp.h>
+#include <asm/sysreg.h>
+
+#define GICV5_IPIS_PER_CPU MAX_IPI
+
+/*
+ * INTID handling
+ */
+#define GICV5_HWIRQ_ID GENMASK(23, 0)
+#define GICV5_HWIRQ_TYPE GENMASK(31, 29)
+#define GICV5_HWIRQ_INTID GENMASK_ULL(31, 0)
+
+#define GICV5_HWIRQ_TYPE_PPI UL(0x1)
+#define GICV5_HWIRQ_TYPE_LPI UL(0x2)
+#define GICV5_HWIRQ_TYPE_SPI UL(0x3)
+
+/*
+ * Tables attributes
+ */
+#define GICV5_NO_READ_ALLOC 0b0
+#define GICV5_READ_ALLOC 0b1
+#define GICV5_NO_WRITE_ALLOC 0b0
+#define GICV5_WRITE_ALLOC 0b1
+
+#define GICV5_NON_CACHE 0b00
+#define GICV5_WB_CACHE 0b01
+#define GICV5_WT_CACHE 0b10
+
+#define GICV5_NON_SHARE 0b00
+#define GICV5_OUTER_SHARE 0b10
+#define GICV5_INNER_SHARE 0b11
+
+/*
+ * IRS registers and tables structures
+ */
+#define GICV5_IRS_IDR1 0x0004
+#define GICV5_IRS_IDR2 0x0008
+#define GICV5_IRS_IDR5 0x0014
+#define GICV5_IRS_IDR6 0x0018
+#define GICV5_IRS_IDR7 0x001c
+#define GICV5_IRS_CR0 0x0080
+#define GICV5_IRS_CR1 0x0084
+#define GICV5_IRS_SYNCR 0x00c0
+#define GICV5_IRS_SYNC_STATUSR 0x00c4
+#define GICV5_IRS_SPI_SELR 0x0108
+#define GICV5_IRS_SPI_CFGR 0x0114
+#define GICV5_IRS_SPI_STATUSR 0x0118
+#define GICV5_IRS_PE_SELR 0x0140
+#define GICV5_IRS_PE_STATUSR 0x0144
+#define GICV5_IRS_PE_CR0 0x0148
+#define GICV5_IRS_IST_BASER 0x0180
+#define GICV5_IRS_IST_CFGR 0x0190
+#define GICV5_IRS_IST_STATUSR 0x0194
+#define GICV5_IRS_MAP_L2_ISTR 0x01c0
+
+#define GICV5_IRS_IDR1_PRIORITY_BITS GENMASK(22, 20)
+#define GICV5_IRS_IDR1_IAFFID_BITS GENMASK(19, 16)
+
+#define GICV5_IRS_IDR1_PRIORITY_BITS_1BITS 0b000
+#define GICV5_IRS_IDR1_PRIORITY_BITS_2BITS 0b001
+#define GICV5_IRS_IDR1_PRIORITY_BITS_3BITS 0b010
+#define GICV5_IRS_IDR1_PRIORITY_BITS_4BITS 0b011
+#define GICV5_IRS_IDR1_PRIORITY_BITS_5BITS 0b100
+
+#define GICV5_IRS_IDR2_ISTMD_SZ GENMASK(19, 15)
+#define GICV5_IRS_IDR2_ISTMD BIT(14)
+#define GICV5_IRS_IDR2_IST_L2SZ GENMASK(13, 11)
+#define GICV5_IRS_IDR2_IST_LEVELS BIT(10)
+#define GICV5_IRS_IDR2_MIN_LPI_ID_BITS GENMASK(9, 6)
+#define GICV5_IRS_IDR2_LPI BIT(5)
+#define GICV5_IRS_IDR2_ID_BITS GENMASK(4, 0)
+
+#define GICV5_IRS_IDR5_SPI_RANGE GENMASK(24, 0)
+#define GICV5_IRS_IDR6_SPI_IRS_RANGE GENMASK(24, 0)
+#define GICV5_IRS_IDR7_SPI_BASE GENMASK(23, 0)
+
+#define GICV5_IRS_IST_L2SZ_SUPPORT_4KB(r) FIELD_GET(BIT(11), (r))
+#define GICV5_IRS_IST_L2SZ_SUPPORT_16KB(r) FIELD_GET(BIT(12), (r))
+#define GICV5_IRS_IST_L2SZ_SUPPORT_64KB(r) FIELD_GET(BIT(13), (r))
+
+#define GICV5_IRS_CR0_IDLE BIT(1)
+#define GICV5_IRS_CR0_IRSEN BIT(0)
+
+#define GICV5_IRS_CR1_VPED_WA BIT(15)
+#define GICV5_IRS_CR1_VPED_RA BIT(14)
+#define GICV5_IRS_CR1_VMD_WA BIT(13)
+#define GICV5_IRS_CR1_VMD_RA BIT(12)
+#define GICV5_IRS_CR1_VPET_WA BIT(11)
+#define GICV5_IRS_CR1_VPET_RA BIT(10)
+#define GICV5_IRS_CR1_VMT_WA BIT(9)
+#define GICV5_IRS_CR1_VMT_RA BIT(8)
+#define GICV5_IRS_CR1_IST_WA BIT(7)
+#define GICV5_IRS_CR1_IST_RA BIT(6)
+#define GICV5_IRS_CR1_IC GENMASK(5, 4)
+#define GICV5_IRS_CR1_OC GENMASK(3, 2)
+#define GICV5_IRS_CR1_SH GENMASK(1, 0)
+
+#define GICV5_IRS_SYNCR_SYNC BIT(31)
+
+#define GICV5_IRS_SYNC_STATUSR_IDLE BIT(0)
+
+#define GICV5_IRS_SPI_STATUSR_V BIT(1)
+#define GICV5_IRS_SPI_STATUSR_IDLE BIT(0)
+
+#define GICV5_IRS_SPI_SELR_ID GENMASK(23, 0)
+
+#define GICV5_IRS_SPI_CFGR_TM BIT(0)
+
+#define GICV5_IRS_PE_SELR_IAFFID GENMASK(15, 0)
+
+#define GICV5_IRS_PE_STATUSR_V BIT(1)
+#define GICV5_IRS_PE_STATUSR_IDLE BIT(0)
+
+#define GICV5_IRS_PE_CR0_DPS BIT(0)
+
+#define GICV5_IRS_IST_STATUSR_IDLE BIT(0)
+
+#define GICV5_IRS_IST_CFGR_STRUCTURE BIT(16)
+#define GICV5_IRS_IST_CFGR_ISTSZ GENMASK(8, 7)
+#define GICV5_IRS_IST_CFGR_L2SZ GENMASK(6, 5)
+#define GICV5_IRS_IST_CFGR_LPI_ID_BITS GENMASK(4, 0)
+
+#define GICV5_IRS_IST_CFGR_STRUCTURE_LINEAR 0b0
+#define GICV5_IRS_IST_CFGR_STRUCTURE_TWO_LEVEL 0b1
+
+#define GICV5_IRS_IST_CFGR_ISTSZ_4 0b00
+#define GICV5_IRS_IST_CFGR_ISTSZ_8 0b01
+#define GICV5_IRS_IST_CFGR_ISTSZ_16 0b10
+
+#define GICV5_IRS_IST_CFGR_L2SZ_4K 0b00
+#define GICV5_IRS_IST_CFGR_L2SZ_16K 0b01
+#define GICV5_IRS_IST_CFGR_L2SZ_64K 0b10
+
+#define GICV5_IRS_IST_BASER_ADDR_MASK GENMASK_ULL(55, 6)
+#define GICV5_IRS_IST_BASER_VALID BIT_ULL(0)
+
+#define GICV5_IRS_MAP_L2_ISTR_ID GENMASK(23, 0)
+
+#define GICV5_ISTL1E_VALID BIT_ULL(0)
+
+#define GICV5_ISTL1E_L2_ADDR_MASK GENMASK_ULL(55, 12)
+
+/*
+ * ITS registers and tables structures
+ */
+#define GICV5_ITS_IDR1 0x0004
+#define GICV5_ITS_IDR2 0x0008
+#define GICV5_ITS_CR0 0x0080
+#define GICV5_ITS_CR1 0x0084
+#define GICV5_ITS_DT_BASER 0x00c0
+#define GICV5_ITS_DT_CFGR 0x00d0
+#define GICV5_ITS_DIDR 0x0100
+#define GICV5_ITS_EIDR 0x0108
+#define GICV5_ITS_INV_EVENTR 0x010c
+#define GICV5_ITS_INV_DEVICER 0x0110
+#define GICV5_ITS_STATUSR 0x0120
+#define GICV5_ITS_SYNCR 0x0140
+#define GICV5_ITS_SYNC_STATUSR 0x0148
+
+#define GICV5_ITS_IDR1_L2SZ GENMASK(10, 8)
+#define GICV5_ITS_IDR1_ITT_LEVELS BIT(7)
+#define GICV5_ITS_IDR1_DT_LEVELS BIT(6)
+#define GICV5_ITS_IDR1_DEVICEID_BITS GENMASK(5, 0)
+
+#define GICV5_ITS_IDR1_L2SZ_SUPPORT_4KB(r) FIELD_GET(BIT(8), (r))
+#define GICV5_ITS_IDR1_L2SZ_SUPPORT_16KB(r) FIELD_GET(BIT(9), (r))
+#define GICV5_ITS_IDR1_L2SZ_SUPPORT_64KB(r) FIELD_GET(BIT(10), (r))
+
+#define GICV5_ITS_IDR2_XDMN_EVENTs GENMASK(6, 5)
+#define GICV5_ITS_IDR2_EVENTID_BITS GENMASK(4, 0)
+
+#define GICV5_ITS_CR0_IDLE BIT(1)
+#define GICV5_ITS_CR0_ITSEN BIT(0)
+
+#define GICV5_ITS_CR1_ITT_RA BIT(7)
+#define GICV5_ITS_CR1_DT_RA BIT(6)
+#define GICV5_ITS_CR1_IC GENMASK(5, 4)
+#define GICV5_ITS_CR1_OC GENMASK(3, 2)
+#define GICV5_ITS_CR1_SH GENMASK(1, 0)
+
+#define GICV5_ITS_DT_CFGR_STRUCTURE BIT(16)
+#define GICV5_ITS_DT_CFGR_L2SZ GENMASK(7, 6)
+#define GICV5_ITS_DT_CFGR_DEVICEID_BITS GENMASK(5, 0)
+
+#define GICV5_ITS_DT_BASER_ADDR_MASK GENMASK_ULL(55, 3)
+
+#define GICV5_ITS_INV_DEVICER_I BIT(31)
+#define GICV5_ITS_INV_DEVICER_EVENTID_BITS GENMASK(5, 1)
+#define GICV5_ITS_INV_DEVICER_L1 BIT(0)
+
+#define GICV5_ITS_DIDR_DEVICEID GENMASK_ULL(31, 0)
+
+#define GICV5_ITS_EIDR_EVENTID GENMASK(15, 0)
+
+#define GICV5_ITS_INV_EVENTR_I BIT(31)
+#define GICV5_ITS_INV_EVENTR_ITT_L2SZ GENMASK(2, 1)
+#define GICV5_ITS_INV_EVENTR_L1 BIT(0)
+
+#define GICV5_ITS_STATUSR_IDLE BIT(0)
+
+#define GICV5_ITS_SYNCR_SYNC BIT_ULL(63)
+#define GICV5_ITS_SYNCR_SYNCALL BIT_ULL(32)
+#define GICV5_ITS_SYNCR_DEVICEID GENMASK_ULL(31, 0)
+
+#define GICV5_ITS_SYNC_STATUSR_IDLE BIT(0)
+
+#define GICV5_DTL1E_VALID BIT_ULL(0)
+/* Note that there is no shift for the address by design */
+#define GICV5_DTL1E_L2_ADDR_MASK GENMASK_ULL(55, 3)
+#define GICV5_DTL1E_SPAN GENMASK_ULL(63, 60)
+
+#define GICV5_DTL2E_VALID BIT_ULL(0)
+#define GICV5_DTL2E_ITT_L2SZ GENMASK_ULL(2, 1)
+/* Note that there is no shift for the address by design */
+#define GICV5_DTL2E_ITT_ADDR_MASK GENMASK_ULL(55, 3)
+#define GICV5_DTL2E_ITT_DSWE BIT_ULL(57)
+#define GICV5_DTL2E_ITT_STRUCTURE BIT_ULL(58)
+#define GICV5_DTL2E_EVENT_ID_BITS GENMASK_ULL(63, 59)
+
+#define GICV5_ITTL1E_VALID BIT_ULL(0)
+/* Note that there is no shift for the address by design */
+#define GICV5_ITTL1E_L2_ADDR_MASK GENMASK_ULL(55, 3)
+#define GICV5_ITTL1E_SPAN GENMASK_ULL(63, 60)
+
+#define GICV5_ITTL2E_LPI_ID GENMASK_ULL(23, 0)
+#define GICV5_ITTL2E_DAC GENMASK_ULL(29, 28)
+#define GICV5_ITTL2E_VIRTUAL BIT_ULL(30)
+#define GICV5_ITTL2E_VALID BIT_ULL(31)
+#define GICV5_ITTL2E_VM_ID GENMASK_ULL(47, 32)
+
+#define GICV5_ITS_DT_ITT_CFGR_L2SZ_4k 0b00
+#define GICV5_ITS_DT_ITT_CFGR_L2SZ_16k 0b01
+#define GICV5_ITS_DT_ITT_CFGR_L2SZ_64k 0b10
+
+#define GICV5_ITS_DT_ITT_CFGR_STRUCTURE_LINEAR 0
+#define GICV5_ITS_DT_ITT_CFGR_STRUCTURE_TWO_LEVEL 1
+
+#define GICV5_ITS_HWIRQ_DEVICE_ID GENMASK_ULL(31, 0)
+#define GICV5_ITS_HWIRQ_EVENT_ID GENMASK_ULL(63, 32)
+
+/*
+ * IWB registers
+ */
+#define GICV5_IWB_IDR0 0x0000
+#define GICV5_IWB_CR0 0x0080
+#define GICV5_IWB_WENABLE_STATUSR 0x00c0
+#define GICV5_IWB_WENABLER 0x2000
+#define GICV5_IWB_WTMR 0x4000
+
+#define GICV5_IWB_IDR0_INT_DOMS GENMASK(14, 11)
+#define GICV5_IWB_IDR0_IW_RANGE GENMASK(10, 0)
+
+#define GICV5_IWB_CR0_IDLE BIT(1)
+#define GICV5_IWB_CR0_IWBEN BIT(0)
+
+#define GICV5_IWB_WENABLE_STATUSR_IDLE BIT(0)
+
+/*
+ * Global Data structures and functions
+ */
+struct gicv5_chip_data {
+ struct fwnode_handle *fwnode;
+ struct irq_domain *ppi_domain;
+ struct irq_domain *spi_domain;
+ struct irq_domain *lpi_domain;
+ struct irq_domain *ipi_domain;
+ u32 global_spi_count;
+ u8 cpuif_pri_bits;
+ u8 cpuif_id_bits;
+ u8 irs_pri_bits;
+ struct {
+ __le64 *l1ist_addr;
+ u32 l2_size;
+ u8 l2_bits;
+ bool l2;
+ } ist;
+};
+
+extern struct gicv5_chip_data gicv5_global_data __read_mostly;
+
+struct gicv5_irs_chip_data {
+ struct list_head entry;
+ struct fwnode_handle *fwnode;
+ void __iomem *irs_base;
+ u32 flags;
+ u32 spi_min;
+ u32 spi_range;
+ raw_spinlock_t spi_config_lock;
+};
+
+static inline int gicv5_wait_for_op_s_atomic(void __iomem *addr, u32 offset,
+ const char *reg_s, u32 mask,
+ u32 *val)
+{
+ void __iomem *reg = addr + offset;
+ u32 tmp;
+ int ret;
+
+ ret = readl_poll_timeout_atomic(reg, tmp, tmp & mask, 1, 10 * USEC_PER_MSEC);
+ if (unlikely(ret == -ETIMEDOUT)) {
+ pr_err_ratelimited("%s timeout...\n", reg_s);
+ return ret;
+ }
+
+ if (val)
+ *val = tmp;
+
+ return 0;
+}
+
+static inline int gicv5_wait_for_op_s(void __iomem *addr, u32 offset,
+ const char *reg_s, u32 mask)
+{
+ void __iomem *reg = addr + offset;
+ u32 val;
+ int ret;
+
+ ret = readl_poll_timeout(reg, val, val & mask, 1, 10 * USEC_PER_MSEC);
+ if (unlikely(ret == -ETIMEDOUT)) {
+ pr_err_ratelimited("%s timeout...\n", reg_s);
+ return ret;
+ }
+
+ return 0;
+}
+
+#define gicv5_wait_for_op_atomic(base, reg, mask, val) \
+ gicv5_wait_for_op_s_atomic(base, reg, #reg, mask, val)
+
+#define gicv5_wait_for_op(base, reg, mask) \
+ gicv5_wait_for_op_s(base, reg, #reg, mask)
+
+void __init gicv5_init_lpi_domain(void);
+void __init gicv5_free_lpi_domain(void);
+
+int gicv5_irs_of_probe(struct device_node *parent);
+void gicv5_irs_remove(void);
+int gicv5_irs_enable(void);
+void gicv5_irs_its_probe(void);
+int gicv5_irs_register_cpu(int cpuid);
+int gicv5_irs_cpu_to_iaffid(int cpu_id, u16 *iaffid);
+struct gicv5_irs_chip_data *gicv5_irs_lookup_by_spi_id(u32 spi_id);
+int gicv5_spi_irq_set_type(struct irq_data *d, unsigned int type);
+int gicv5_irs_iste_alloc(u32 lpi);
+void gicv5_irs_syncr(void);
+
+struct gicv5_its_devtab_cfg {
+ union {
+ struct {
+ __le64 *devtab;
+ } linear;
+ struct {
+ __le64 *l1devtab;
+ __le64 **l2ptrs;
+ } l2;
+ };
+ u32 cfgr;
+};
+
+struct gicv5_its_itt_cfg {
+ union {
+ struct {
+ __le64 *itt;
+ unsigned int num_ents;
+ } linear;
+ struct {
+ __le64 *l1itt;
+ __le64 **l2ptrs;
+ unsigned int num_l1_ents;
+ u8 l2sz;
+ } l2;
+ };
+ u8 event_id_bits;
+ bool l2itt;
+};
+
+void gicv5_init_lpis(u32 max);
+void gicv5_deinit_lpis(void);
+
+int gicv5_alloc_lpi(void);
+void gicv5_free_lpi(u32 lpi);
+
+void __init gicv5_its_of_probe(struct device_node *parent);
+#endif
diff --git a/include/linux/irqchip/arm-gic.h b/include/linux/irqchip/arm-gic.h
index d3453ee072fc..d45fa19f9e47 100644
--- a/include/linux/irqchip/arm-gic.h
+++ b/include/linux/irqchip/arm-gic.h
@@ -1,11 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* include/linux/irqchip/arm-gic.h
*
* Copyright (C) 2002 ARM Limited, All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef __LINUX_IRQCHIP_ARM_GIC_H
#define __LINUX_IRQCHIP_ARM_GIC_H
@@ -65,11 +62,16 @@
#define GICD_INT_EN_CLR_X32 0xffffffff
#define GICD_INT_EN_SET_SGI 0x0000ffff
#define GICD_INT_EN_CLR_PPI 0xffff0000
-#define GICD_INT_DEF_PRI 0xa0
-#define GICD_INT_DEF_PRI_X4 ((GICD_INT_DEF_PRI << 24) |\
- (GICD_INT_DEF_PRI << 16) |\
- (GICD_INT_DEF_PRI << 8) |\
- GICD_INT_DEF_PRI)
+
+#define GICD_IIDR_IMPLEMENTER_SHIFT 0
+#define GICD_IIDR_IMPLEMENTER_MASK (0xfff << GICD_IIDR_IMPLEMENTER_SHIFT)
+#define GICD_IIDR_REVISION_SHIFT 12
+#define GICD_IIDR_REVISION_MASK (0xf << GICD_IIDR_REVISION_SHIFT)
+#define GICD_IIDR_VARIANT_SHIFT 16
+#define GICD_IIDR_VARIANT_MASK (0xf << GICD_IIDR_VARIANT_SHIFT)
+#define GICD_IIDR_PRODUCT_ID_SHIFT 24
+#define GICD_IIDR_PRODUCT_ID_MASK (0xff << GICD_IIDR_PRODUCT_ID_SHIFT)
+
#define GICH_HCR 0x0
#define GICH_VTR 0x4
@@ -84,6 +86,13 @@
#define GICH_HCR_EN (1 << 0)
#define GICH_HCR_UIE (1 << 1)
+#define GICH_HCR_LRENPIE (1 << 2)
+#define GICH_HCR_NPIE (1 << 3)
+#define GICH_HCR_VGrp0EIE (1 << 4)
+#define GICH_HCR_VGrp0DIE (1 << 5)
+#define GICH_HCR_VGrp1EIE (1 << 6)
+#define GICH_HCR_VGrp1DIE (1 << 7)
+#define GICH_HCR_EOICOUNT GENMASK(31, 27)
#define GICH_LR_VIRTUALID (0x3ff << 0)
#define GICH_LR_PHYSID_CPUID_SHIFT (10)
@@ -93,6 +102,7 @@
#define GICH_LR_PENDING_BIT (1 << 28)
#define GICH_LR_ACTIVE_BIT (1 << 29)
#define GICH_LR_EOI (1 << 19)
+#define GICH_LR_GROUP1 (1 << 30)
#define GICH_LR_HW (1 << 31)
#define GICH_VMCR_ENABLE_GRP0_SHIFT 0
@@ -147,16 +157,6 @@ int gic_of_init(struct device_node *node, struct device_node *parent);
*/
int gic_of_init_child(struct device *dev, struct gic_chip_data **gic, int irq);
-/*
- * Legacy platforms not converted to DT yet must use this to init
- * their GIC
- */
-void gic_init(unsigned int nr, int start,
- void __iomem *dist , void __iomem *cpu);
-
-int gicv2m_init(struct fwnode_handle *parent_handle,
- struct irq_domain *parent);
-
void gic_send_sgi(unsigned int cpu_id, unsigned int irq);
int gic_get_cpu_id(unsigned int cpu);
void gic_migrate_target(unsigned int new_cpu_id);
diff --git a/include/linux/irqchip/arm-vgic-info.h b/include/linux/irqchip/arm-vgic-info.h
new file mode 100644
index 000000000000..67d9d960273b
--- /dev/null
+++ b/include/linux/irqchip/arm-vgic-info.h
@@ -0,0 +1,49 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * include/linux/irqchip/arm-vgic-info.h
+ *
+ * Copyright (C) 2016 ARM Limited, All Rights Reserved.
+ */
+#ifndef __LINUX_IRQCHIP_ARM_VGIC_INFO_H
+#define __LINUX_IRQCHIP_ARM_VGIC_INFO_H
+
+#include <linux/types.h>
+#include <linux/ioport.h>
+
+enum gic_type {
+ /* Full GICv2 */
+ GIC_V2,
+ /* Full GICv3, optionally with v2 compat */
+ GIC_V3,
+ /* Full GICv5, optionally with v3 compat */
+ GIC_V5,
+};
+
+struct gic_kvm_info {
+ /* GIC type */
+ enum gic_type type;
+ /* Virtual CPU interface */
+ struct resource vcpu;
+ /* GICv2 GICC VA */
+ void __iomem *gicc_base;
+ /* Interrupt number */
+ unsigned int maint_irq;
+ /* No interrupt mask, no need to use the above field */
+ bool no_maint_irq_mask;
+ /* Virtual control interface */
+ struct resource vctrl;
+ /* vlpi support */
+ bool has_v4;
+ /* rvpeid support */
+ bool has_v4_1;
+ /* Deactivation impared, subpar stuff */
+ bool no_hw_deactivation;
+};
+
+#ifdef CONFIG_KVM
+void vgic_set_kvm_info(const struct gic_kvm_info *info);
+#else
+static inline void vgic_set_kvm_info(const struct gic_kvm_info *info) {}
+#endif
+
+#endif
diff --git a/include/linux/irqchip/arm-vic.h b/include/linux/irqchip/arm-vic.h
index ba46c794b4e5..f2b11d1df23d 100644
--- a/include/linux/irqchip/arm-vic.h
+++ b/include/linux/irqchip/arm-vic.h
@@ -1,38 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* arch/arm/include/asm/hardware/vic.h
*
* Copyright (c) ARM Limited 2003. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#ifndef __ASM_ARM_HARDWARE_VIC_H
#define __ASM_ARM_HARDWARE_VIC_H
#include <linux/types.h>
-#define VIC_RAW_STATUS 0x08
-#define VIC_INT_ENABLE 0x10 /* 1 = enable, 0 = disable */
-#define VIC_INT_ENABLE_CLEAR 0x14
-
-struct device_node;
-struct pt_regs;
-
-void __vic_init(void __iomem *base, int parent_irq, int irq_start,
- u32 vic_sources, u32 resume_sources, struct device_node *node);
void vic_init(void __iomem *base, unsigned int irq_start, u32 vic_sources, u32 resume_sources);
-int vic_init_cascaded(void __iomem *base, unsigned int parent_irq,
- u32 vic_sources, u32 resume_sources);
#endif
diff --git a/include/linux/irqchip/chained_irq.h b/include/linux/irqchip/chained_irq.h
index adf4c30f3af6..dd8b3c476666 100644
--- a/include/linux/irqchip/chained_irq.h
+++ b/include/linux/irqchip/chained_irq.h
@@ -1,19 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Chained IRQ handlers support.
*
* Copyright (C) 2011 ARM Ltd.
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef __IRQCHIP_CHAINED_IRQ_H
#define __IRQCHIP_CHAINED_IRQ_H
diff --git a/include/linux/irqchip/ingenic.h b/include/linux/irqchip/ingenic.h
deleted file mode 100644
index 0ee319a4029d..000000000000
--- a/include/linux/irqchip/ingenic.h
+++ /dev/null
@@ -1,23 +0,0 @@
-/*
- * Copyright (C) 2010, Lars-Peter Clausen <lars@metafoo.de>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 675 Mass Ave, Cambridge, MA 02139, USA.
- *
- */
-
-#ifndef __LINUX_IRQCHIP_INGENIC_H__
-#define __LINUX_IRQCHIP_INGENIC_H__
-
-#include <linux/irq.h>
-
-extern void ingenic_intc_irq_suspend(struct irq_data *data);
-extern void ingenic_intc_irq_resume(struct irq_data *data);
-
-#endif
diff --git a/include/linux/irqchip/irq-bcm2836.h b/include/linux/irqchip/irq-bcm2836.h
new file mode 100644
index 000000000000..ac5719d8f56b
--- /dev/null
+++ b/include/linux/irqchip/irq-bcm2836.h
@@ -0,0 +1,61 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Root interrupt controller for the BCM2836 (Raspberry Pi 2).
+ *
+ * Copyright 2015 Broadcom
+ */
+
+#define LOCAL_CONTROL 0x000
+#define LOCAL_PRESCALER 0x008
+
+/*
+ * The low 2 bits identify the CPU that the GPU IRQ goes to, and the
+ * next 2 bits identify the CPU that the GPU FIQ goes to.
+ */
+#define LOCAL_GPU_ROUTING 0x00c
+/* When setting bits 0-3, enables PMU interrupts on that CPU. */
+#define LOCAL_PM_ROUTING_SET 0x010
+/* When setting bits 0-3, disables PMU interrupts on that CPU. */
+#define LOCAL_PM_ROUTING_CLR 0x014
+/*
+ * The low 4 bits of this are the CPU's timer IRQ enables, and the
+ * next 4 bits are the CPU's timer FIQ enables (which override the IRQ
+ * bits).
+ */
+#define LOCAL_TIMER_INT_CONTROL0 0x040
+/*
+ * The low 4 bits of this are the CPU's per-mailbox IRQ enables, and
+ * the next 4 bits are the CPU's per-mailbox FIQ enables (which
+ * override the IRQ bits).
+ */
+#define LOCAL_MAILBOX_INT_CONTROL0 0x050
+/*
+ * The CPU's interrupt status register. Bits are defined by the
+ * LOCAL_IRQ_* bits below.
+ */
+#define LOCAL_IRQ_PENDING0 0x060
+/* Same status bits as above, but for FIQ. */
+#define LOCAL_FIQ_PENDING0 0x070
+/*
+ * Mailbox write-to-set bits. There are 16 mailboxes, 4 per CPU, and
+ * these bits are organized by mailbox number and then CPU number. We
+ * use mailbox 0 for IPIs. The mailbox's interrupt is raised while
+ * any bit is set.
+ */
+#define LOCAL_MAILBOX0_SET0 0x080
+#define LOCAL_MAILBOX3_SET0 0x08c
+/* Mailbox write-to-clear bits. */
+#define LOCAL_MAILBOX0_CLR0 0x0c0
+#define LOCAL_MAILBOX3_CLR0 0x0cc
+
+#define LOCAL_IRQ_CNTPSIRQ 0
+#define LOCAL_IRQ_CNTPNSIRQ 1
+#define LOCAL_IRQ_CNTHPIRQ 2
+#define LOCAL_IRQ_CNTVIRQ 3
+#define LOCAL_IRQ_MAILBOX0 4
+#define LOCAL_IRQ_MAILBOX1 5
+#define LOCAL_IRQ_MAILBOX2 6
+#define LOCAL_IRQ_MAILBOX3 7
+#define LOCAL_IRQ_GPU_FAST 8
+#define LOCAL_IRQ_PMU_FAST 9
+#define LAST_IRQ LOCAL_IRQ_PMU_FAST
diff --git a/include/linux/irqchip/irq-madera.h b/include/linux/irqchip/irq-madera.h
new file mode 100644
index 000000000000..1160fa3769ae
--- /dev/null
+++ b/include/linux/irqchip/irq-madera.h
@@ -0,0 +1,132 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Interrupt support for Cirrus Logic Madera codecs
+ *
+ * Copyright (C) 2016-2018 Cirrus Logic, Inc. and
+ * Cirrus Logic International Semiconductor Ltd.
+ */
+
+#ifndef IRQCHIP_MADERA_H
+#define IRQCHIP_MADERA_H
+
+#include <linux/interrupt.h>
+#include <linux/mfd/madera/core.h>
+
+#define MADERA_IRQ_FLL1_LOCK 0
+#define MADERA_IRQ_FLL2_LOCK 1
+#define MADERA_IRQ_FLL3_LOCK 2
+#define MADERA_IRQ_FLLAO_LOCK 3
+#define MADERA_IRQ_CLK_SYS_ERR 4
+#define MADERA_IRQ_CLK_ASYNC_ERR 5
+#define MADERA_IRQ_CLK_DSP_ERR 6
+#define MADERA_IRQ_HPDET 7
+#define MADERA_IRQ_MICDET1 8
+#define MADERA_IRQ_MICDET2 9
+#define MADERA_IRQ_JD1_RISE 10
+#define MADERA_IRQ_JD1_FALL 11
+#define MADERA_IRQ_JD2_RISE 12
+#define MADERA_IRQ_JD2_FALL 13
+#define MADERA_IRQ_MICD_CLAMP_RISE 14
+#define MADERA_IRQ_MICD_CLAMP_FALL 15
+#define MADERA_IRQ_DRC2_SIG_DET 16
+#define MADERA_IRQ_DRC1_SIG_DET 17
+#define MADERA_IRQ_ASRC1_IN1_LOCK 18
+#define MADERA_IRQ_ASRC1_IN2_LOCK 19
+#define MADERA_IRQ_ASRC2_IN1_LOCK 20
+#define MADERA_IRQ_ASRC2_IN2_LOCK 21
+#define MADERA_IRQ_DSP_IRQ1 22
+#define MADERA_IRQ_DSP_IRQ2 23
+#define MADERA_IRQ_DSP_IRQ3 24
+#define MADERA_IRQ_DSP_IRQ4 25
+#define MADERA_IRQ_DSP_IRQ5 26
+#define MADERA_IRQ_DSP_IRQ6 27
+#define MADERA_IRQ_DSP_IRQ7 28
+#define MADERA_IRQ_DSP_IRQ8 29
+#define MADERA_IRQ_DSP_IRQ9 30
+#define MADERA_IRQ_DSP_IRQ10 31
+#define MADERA_IRQ_DSP_IRQ11 32
+#define MADERA_IRQ_DSP_IRQ12 33
+#define MADERA_IRQ_DSP_IRQ13 34
+#define MADERA_IRQ_DSP_IRQ14 35
+#define MADERA_IRQ_DSP_IRQ15 36
+#define MADERA_IRQ_DSP_IRQ16 37
+#define MADERA_IRQ_HP1L_SC 38
+#define MADERA_IRQ_HP1R_SC 39
+#define MADERA_IRQ_HP2L_SC 40
+#define MADERA_IRQ_HP2R_SC 41
+#define MADERA_IRQ_HP3L_SC 42
+#define MADERA_IRQ_HP3R_SC 43
+#define MADERA_IRQ_SPKOUTL_SC 44
+#define MADERA_IRQ_SPKOUTR_SC 45
+#define MADERA_IRQ_HP1L_ENABLE_DONE 46
+#define MADERA_IRQ_HP1R_ENABLE_DONE 47
+#define MADERA_IRQ_HP2L_ENABLE_DONE 48
+#define MADERA_IRQ_HP2R_ENABLE_DONE 49
+#define MADERA_IRQ_HP3L_ENABLE_DONE 50
+#define MADERA_IRQ_HP3R_ENABLE_DONE 51
+#define MADERA_IRQ_SPKOUTL_ENABLE_DONE 52
+#define MADERA_IRQ_SPKOUTR_ENABLE_DONE 53
+#define MADERA_IRQ_SPK_SHUTDOWN 54
+#define MADERA_IRQ_SPK_OVERHEAT 55
+#define MADERA_IRQ_SPK_OVERHEAT_WARN 56
+#define MADERA_IRQ_GPIO1 57
+#define MADERA_IRQ_GPIO2 58
+#define MADERA_IRQ_GPIO3 59
+#define MADERA_IRQ_GPIO4 60
+#define MADERA_IRQ_GPIO5 61
+#define MADERA_IRQ_GPIO6 62
+#define MADERA_IRQ_GPIO7 63
+#define MADERA_IRQ_GPIO8 64
+#define MADERA_IRQ_DSP1_BUS_ERR 65
+#define MADERA_IRQ_DSP2_BUS_ERR 66
+#define MADERA_IRQ_DSP3_BUS_ERR 67
+#define MADERA_IRQ_DSP4_BUS_ERR 68
+#define MADERA_IRQ_DSP5_BUS_ERR 69
+#define MADERA_IRQ_DSP6_BUS_ERR 70
+#define MADERA_IRQ_DSP7_BUS_ERR 71
+
+#define MADERA_NUM_IRQ 72
+
+/*
+ * These wrapper functions are for use by other child drivers of the
+ * same parent MFD.
+ */
+static inline int madera_get_irq_mapping(struct madera *madera, int irq)
+{
+ if (!madera->irq_dev)
+ return -ENODEV;
+
+ return regmap_irq_get_virq(madera->irq_data, irq);
+}
+
+static inline int madera_request_irq(struct madera *madera, int irq,
+ const char *name,
+ irq_handler_t handler, void *data)
+{
+ irq = madera_get_irq_mapping(madera, irq);
+ if (irq < 0)
+ return irq;
+
+ return request_threaded_irq(irq, NULL, handler, IRQF_ONESHOT, name,
+ data);
+}
+
+static inline void madera_free_irq(struct madera *madera, int irq, void *data)
+{
+ irq = madera_get_irq_mapping(madera, irq);
+ if (irq < 0)
+ return;
+
+ free_irq(irq, data);
+}
+
+static inline int madera_set_irq_wake(struct madera *madera, int irq, int on)
+{
+ irq = madera_get_irq_mapping(madera, irq);
+ if (irq < 0)
+ return irq;
+
+ return irq_set_irq_wake(irq, on);
+}
+
+#endif
diff --git a/include/linux/irqchip/irq-msi-lib.h b/include/linux/irqchip/irq-msi-lib.h
new file mode 100644
index 000000000000..224ac28e88d7
--- /dev/null
+++ b/include/linux/irqchip/irq-msi-lib.h
@@ -0,0 +1,28 @@
+// SPDX-License-Identifier: GPL-2.0-only
+// Copyright (C) 2022 Linutronix GmbH
+// Copyright (C) 2022 Intel
+
+#ifndef _IRQCHIP_IRQ_MSI_LIB_H
+#define _IRQCHIP_IRQ_MSI_LIB_H
+
+#include <linux/bits.h>
+#include <linux/irqdomain.h>
+#include <linux/msi.h>
+
+#ifdef CONFIG_PCI_MSI
+#define MATCH_PCI_MSI BIT(DOMAIN_BUS_PCI_MSI)
+#else
+#define MATCH_PCI_MSI (0)
+#endif
+
+#define MATCH_PLATFORM_MSI BIT(DOMAIN_BUS_PLATFORM_MSI)
+
+struct msi_domain_info;
+int msi_lib_irq_domain_select(struct irq_domain *d, struct irq_fwspec *fwspec,
+ enum irq_domain_bus_token bus_token);
+
+bool msi_lib_init_dev_msi_info(struct device *dev, struct irq_domain *domain,
+ struct irq_domain *real_parent,
+ struct msi_domain_info *info);
+
+#endif /* _IRQCHIP_IRQ_MSI_LIB_H */
diff --git a/include/linux/irqchip/irq-omap-intc.h b/include/linux/irqchip/irq-omap-intc.h
index 2e3d1afeb674..dca379c0d7eb 100644
--- a/include/linux/irqchip/irq-omap-intc.h
+++ b/include/linux/irqchip/irq-omap-intc.h
@@ -1,25 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/**
* irq-omap-intc.h - INTC Idle Functions
*
- * Copyright (C) 2014 Texas Instruments Incorporated - http://www.ti.com
+ * Copyright (C) 2014 Texas Instruments Incorporated - https://www.ti.com
*
* Author: Felipe Balbi <balbi@ti.com>
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 of
- * the License as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#ifndef __INCLUDE_LINUX_IRQCHIP_IRQ_OMAP_INTC_H
#define __INCLUDE_LINUX_IRQCHIP_IRQ_OMAP_INTC_H
-void omap3_init_irq(void);
-
int omap_irq_pending(void);
void omap_intc_save_context(void);
void omap_intc_restore_context(void);
diff --git a/include/linux/irqchip/irq-partition-percpu.h b/include/linux/irqchip/irq-partition-percpu.h
deleted file mode 100644
index 87433a5d1285..000000000000
--- a/include/linux/irqchip/irq-partition-percpu.h
+++ /dev/null
@@ -1,59 +0,0 @@
-/*
- * Copyright (C) 2016 ARM Limited, All Rights Reserved.
- * Author: Marc Zyngier <marc.zyngier@arm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- */
-
-#include <linux/fwnode.h>
-#include <linux/cpumask.h>
-#include <linux/irqdomain.h>
-
-struct partition_affinity {
- cpumask_t mask;
- void *partition_id;
-};
-
-struct partition_desc;
-
-#ifdef CONFIG_PARTITION_PERCPU
-int partition_translate_id(struct partition_desc *desc, void *partition_id);
-struct partition_desc *partition_create_desc(struct fwnode_handle *fwnode,
- struct partition_affinity *parts,
- int nr_parts,
- int chained_irq,
- const struct irq_domain_ops *ops);
-struct irq_domain *partition_get_domain(struct partition_desc *dsc);
-#else
-static inline int partition_translate_id(struct partition_desc *desc,
- void *partition_id)
-{
- return -EINVAL;
-}
-
-static inline
-struct partition_desc *partition_create_desc(struct fwnode_handle *fwnode,
- struct partition_affinity *parts,
- int nr_parts,
- int chained_irq,
- const struct irq_domain_ops *ops)
-{
- return NULL;
-}
-
-static inline
-struct irq_domain *partition_get_domain(struct partition_desc *dsc)
-{
- return NULL;
-}
-#endif
diff --git a/include/linux/irqchip/irq-renesas-rzv2h.h b/include/linux/irqchip/irq-renesas-rzv2h.h
new file mode 100644
index 000000000000..618a60d2eac0
--- /dev/null
+++ b/include/linux/irqchip/irq-renesas-rzv2h.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Renesas RZ/V2H(P) Interrupt Control Unit (ICU)
+ *
+ * Copyright (C) 2025 Renesas Electronics Corporation.
+ */
+
+#ifndef __LINUX_IRQ_RENESAS_RZV2H
+#define __LINUX_IRQ_RENESAS_RZV2H
+
+#include <linux/platform_device.h>
+
+#define RZV2H_ICU_DMAC_REQ_NO_DEFAULT 0x3ff
+
+#ifdef CONFIG_RENESAS_RZV2H_ICU
+void rzv2h_icu_register_dma_req(struct platform_device *icu_dev, u8 dmac_index, u8 dmac_channel,
+ u16 req_no);
+#else
+static inline void rzv2h_icu_register_dma_req(struct platform_device *icu_dev, u8 dmac_index,
+ u8 dmac_channel, u16 req_no) { }
+#endif
+
+#endif /* __LINUX_IRQ_RENESAS_RZV2H */
diff --git a/include/linux/irqchip/irq-sa11x0.h b/include/linux/irqchip/irq-sa11x0.h
index 15db6829c1e4..68fd2d73b683 100644
--- a/include/linux/irqchip/irq-sa11x0.h
+++ b/include/linux/irqchip/irq-sa11x0.h
@@ -1,12 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Generic IRQ handling for the SA11x0.
*
* Copyright (C) 2015 Dmitry Eremin-Solenikov
* Copyright (C) 1999-2001 Nicolas Pitre
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef __INCLUDE_LINUX_IRQCHIP_IRQ_SA11x0_H
diff --git a/include/linux/irqchip/metag-ext.h b/include/linux/irqchip/metag-ext.h
deleted file mode 100644
index 697af0fe7c5a..000000000000
--- a/include/linux/irqchip/metag-ext.h
+++ /dev/null
@@ -1,33 +0,0 @@
-/*
- * Copyright (C) 2012 Imagination Technologies
- */
-
-#ifndef _LINUX_IRQCHIP_METAG_EXT_H_
-#define _LINUX_IRQCHIP_METAG_EXT_H_
-
-struct irq_data;
-struct platform_device;
-
-/* called from core irq code at init */
-int init_external_IRQ(void);
-
-/*
- * called from SoC init_irq() callback to dynamically indicate the lack of
- * HWMASKEXT registers.
- */
-void meta_intc_no_mask(void);
-
-/*
- * These allow SoCs to specialise the interrupt controller from their init_irq
- * callbacks.
- */
-
-extern struct irq_chip meta_intc_edge_chip;
-extern struct irq_chip meta_intc_level_chip;
-
-/* this should be called in the mask callback */
-void meta_intc_mask_irq_simple(struct irq_data *data);
-/* this should be called in the unmask callback */
-void meta_intc_unmask_irq_simple(struct irq_data *data);
-
-#endif /* _LINUX_IRQCHIP_METAG_EXT_H_ */
diff --git a/include/linux/irqchip/metag.h b/include/linux/irqchip/metag.h
deleted file mode 100644
index 4ebdfb3101ab..000000000000
--- a/include/linux/irqchip/metag.h
+++ /dev/null
@@ -1,24 +0,0 @@
-/*
- * Copyright (C) 2011 Imagination Technologies
- */
-
-#ifndef _LINUX_IRQCHIP_METAG_H_
-#define _LINUX_IRQCHIP_METAG_H_
-
-#include <linux/errno.h>
-
-#ifdef CONFIG_METAG_PERFCOUNTER_IRQS
-extern int init_internal_IRQ(void);
-extern int internal_irq_map(unsigned int hw);
-#else
-static inline int init_internal_IRQ(void)
-{
- return 0;
-}
-static inline int internal_irq_map(unsigned int hw)
-{
- return -EINVAL;
-}
-#endif
-
-#endif /* _LINUX_IRQCHIP_METAG_H_ */
diff --git a/include/linux/irqchip/mips-gic.h b/include/linux/irqchip/mips-gic.h
deleted file mode 100644
index 2b0e56619e53..000000000000
--- a/include/linux/irqchip/mips-gic.h
+++ /dev/null
@@ -1,297 +0,0 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 2000, 07 MIPS Technologies, Inc.
- */
-#ifndef __LINUX_IRQCHIP_MIPS_GIC_H
-#define __LINUX_IRQCHIP_MIPS_GIC_H
-
-#include <linux/clocksource.h>
-#include <linux/ioport.h>
-
-#define GIC_MAX_INTRS 256
-
-/* Constants */
-#define GIC_POL_POS 1
-#define GIC_POL_NEG 0
-#define GIC_TRIG_EDGE 1
-#define GIC_TRIG_LEVEL 0
-#define GIC_TRIG_DUAL_ENABLE 1
-#define GIC_TRIG_DUAL_DISABLE 0
-
-#define MSK(n) ((1 << (n)) - 1)
-
-/* Accessors */
-#define GIC_REG(segment, offset) (segment##_##SECTION_OFS + offset##_##OFS)
-
-/* GIC Address Space */
-#define SHARED_SECTION_OFS 0x0000
-#define SHARED_SECTION_SIZE 0x8000
-#define VPE_LOCAL_SECTION_OFS 0x8000
-#define VPE_LOCAL_SECTION_SIZE 0x4000
-#define VPE_OTHER_SECTION_OFS 0xc000
-#define VPE_OTHER_SECTION_SIZE 0x4000
-#define USM_VISIBLE_SECTION_OFS 0x10000
-#define USM_VISIBLE_SECTION_SIZE 0x10000
-
-/* Register Map for Shared Section */
-
-#define GIC_SH_CONFIG_OFS 0x0000
-
-/* Shared Global Counter */
-#define GIC_SH_COUNTER_31_00_OFS 0x0010
-/* 64-bit counter register for CM3 */
-#define GIC_SH_COUNTER_OFS GIC_SH_COUNTER_31_00_OFS
-#define GIC_SH_COUNTER_63_32_OFS 0x0014
-#define GIC_SH_REVISIONID_OFS 0x0020
-
-/* Convert an interrupt number to a byte offset/bit for multi-word registers */
-#define GIC_INTR_OFS(intr) ({ \
- unsigned bits = mips_cm_is64 ? 64 : 32; \
- unsigned reg_idx = (intr) / bits; \
- unsigned reg_width = bits / 8; \
- \
- reg_idx * reg_width; \
-})
-#define GIC_INTR_BIT(intr) ((intr) % (mips_cm_is64 ? 64 : 32))
-
-/* Polarity : Reset Value is always 0 */
-#define GIC_SH_SET_POLARITY_OFS 0x0100
-
-/* Triggering : Reset Value is always 0 */
-#define GIC_SH_SET_TRIGGER_OFS 0x0180
-
-/* Dual edge triggering : Reset Value is always 0 */
-#define GIC_SH_SET_DUAL_OFS 0x0200
-
-/* Set/Clear corresponding bit in Edge Detect Register */
-#define GIC_SH_WEDGE_OFS 0x0280
-
-/* Mask manipulation */
-#define GIC_SH_RMASK_OFS 0x0300
-#define GIC_SH_SMASK_OFS 0x0380
-
-/* Global Interrupt Mask Register (RO) - Bit Set == Interrupt enabled */
-#define GIC_SH_MASK_OFS 0x0400
-
-/* Pending Global Interrupts (RO) */
-#define GIC_SH_PEND_OFS 0x0480
-
-/* Maps Interrupt X to a Pin */
-#define GIC_SH_INTR_MAP_TO_PIN_BASE_OFS 0x0500
-#define GIC_SH_MAP_TO_PIN(intr) (4 * (intr))
-
-/* Maps Interrupt X to a VPE */
-#define GIC_SH_INTR_MAP_TO_VPE_BASE_OFS 0x2000
-#define GIC_SH_MAP_TO_VPE_REG_OFF(intr, vpe) \
- ((32 * (intr)) + (((vpe) / 32) * 4))
-#define GIC_SH_MAP_TO_VPE_REG_BIT(vpe) (1 << ((vpe) % 32))
-
-/* Register Map for Local Section */
-#define GIC_VPE_CTL_OFS 0x0000
-#define GIC_VPE_PEND_OFS 0x0004
-#define GIC_VPE_MASK_OFS 0x0008
-#define GIC_VPE_RMASK_OFS 0x000c
-#define GIC_VPE_SMASK_OFS 0x0010
-#define GIC_VPE_WD_MAP_OFS 0x0040
-#define GIC_VPE_COMPARE_MAP_OFS 0x0044
-#define GIC_VPE_TIMER_MAP_OFS 0x0048
-#define GIC_VPE_FDC_MAP_OFS 0x004c
-#define GIC_VPE_PERFCTR_MAP_OFS 0x0050
-#define GIC_VPE_SWINT0_MAP_OFS 0x0054
-#define GIC_VPE_SWINT1_MAP_OFS 0x0058
-#define GIC_VPE_OTHER_ADDR_OFS 0x0080
-#define GIC_VP_IDENT_OFS 0x0088
-#define GIC_VPE_WD_CONFIG0_OFS 0x0090
-#define GIC_VPE_WD_COUNT0_OFS 0x0094
-#define GIC_VPE_WD_INITIAL0_OFS 0x0098
-#define GIC_VPE_COMPARE_LO_OFS 0x00a0
-/* 64-bit Compare register on CM3 */
-#define GIC_VPE_COMPARE_OFS GIC_VPE_COMPARE_LO_OFS
-#define GIC_VPE_COMPARE_HI_OFS 0x00a4
-
-#define GIC_VPE_EIC_SHADOW_SET_BASE_OFS 0x0100
-#define GIC_VPE_EIC_SS(intr) (4 * (intr))
-
-#define GIC_VPE_EIC_VEC_BASE_OFS 0x0800
-#define GIC_VPE_EIC_VEC(intr) (4 * (intr))
-
-#define GIC_VPE_TENABLE_NMI_OFS 0x1000
-#define GIC_VPE_TENABLE_YQ_OFS 0x1004
-#define GIC_VPE_TENABLE_INT_31_0_OFS 0x1080
-#define GIC_VPE_TENABLE_INT_63_32_OFS 0x1084
-
-/* User Mode Visible Section Register Map */
-#define GIC_UMV_SH_COUNTER_31_00_OFS 0x0000
-#define GIC_UMV_SH_COUNTER_63_32_OFS 0x0004
-
-/* Masks */
-#define GIC_SH_CONFIG_COUNTSTOP_SHF 28
-#define GIC_SH_CONFIG_COUNTSTOP_MSK (MSK(1) << GIC_SH_CONFIG_COUNTSTOP_SHF)
-
-#define GIC_SH_CONFIG_COUNTBITS_SHF 24
-#define GIC_SH_CONFIG_COUNTBITS_MSK (MSK(4) << GIC_SH_CONFIG_COUNTBITS_SHF)
-
-#define GIC_SH_CONFIG_NUMINTRS_SHF 16
-#define GIC_SH_CONFIG_NUMINTRS_MSK (MSK(8) << GIC_SH_CONFIG_NUMINTRS_SHF)
-
-#define GIC_SH_CONFIG_NUMVPES_SHF 0
-#define GIC_SH_CONFIG_NUMVPES_MSK (MSK(8) << GIC_SH_CONFIG_NUMVPES_SHF)
-
-#define GIC_SH_WEDGE_SET(intr) ((intr) | (0x1 << 31))
-#define GIC_SH_WEDGE_CLR(intr) ((intr) & ~(0x1 << 31))
-
-#define GIC_MAP_TO_PIN_SHF 31
-#define GIC_MAP_TO_PIN_MSK (MSK(1) << GIC_MAP_TO_PIN_SHF)
-#define GIC_MAP_TO_NMI_SHF 30
-#define GIC_MAP_TO_NMI_MSK (MSK(1) << GIC_MAP_TO_NMI_SHF)
-#define GIC_MAP_TO_YQ_SHF 29
-#define GIC_MAP_TO_YQ_MSK (MSK(1) << GIC_MAP_TO_YQ_SHF)
-#define GIC_MAP_SHF 0
-#define GIC_MAP_MSK (MSK(6) << GIC_MAP_SHF)
-
-/* GIC_VPE_CTL Masks */
-#define GIC_VPE_CTL_FDC_RTBL_SHF 4
-#define GIC_VPE_CTL_FDC_RTBL_MSK (MSK(1) << GIC_VPE_CTL_FDC_RTBL_SHF)
-#define GIC_VPE_CTL_SWINT_RTBL_SHF 3
-#define GIC_VPE_CTL_SWINT_RTBL_MSK (MSK(1) << GIC_VPE_CTL_SWINT_RTBL_SHF)
-#define GIC_VPE_CTL_PERFCNT_RTBL_SHF 2
-#define GIC_VPE_CTL_PERFCNT_RTBL_MSK (MSK(1) << GIC_VPE_CTL_PERFCNT_RTBL_SHF)
-#define GIC_VPE_CTL_TIMER_RTBL_SHF 1
-#define GIC_VPE_CTL_TIMER_RTBL_MSK (MSK(1) << GIC_VPE_CTL_TIMER_RTBL_SHF)
-#define GIC_VPE_CTL_EIC_MODE_SHF 0
-#define GIC_VPE_CTL_EIC_MODE_MSK (MSK(1) << GIC_VPE_CTL_EIC_MODE_SHF)
-
-/* GIC_VPE_PEND Masks */
-#define GIC_VPE_PEND_WD_SHF 0
-#define GIC_VPE_PEND_WD_MSK (MSK(1) << GIC_VPE_PEND_WD_SHF)
-#define GIC_VPE_PEND_CMP_SHF 1
-#define GIC_VPE_PEND_CMP_MSK (MSK(1) << GIC_VPE_PEND_CMP_SHF)
-#define GIC_VPE_PEND_TIMER_SHF 2
-#define GIC_VPE_PEND_TIMER_MSK (MSK(1) << GIC_VPE_PEND_TIMER_SHF)
-#define GIC_VPE_PEND_PERFCOUNT_SHF 3
-#define GIC_VPE_PEND_PERFCOUNT_MSK (MSK(1) << GIC_VPE_PEND_PERFCOUNT_SHF)
-#define GIC_VPE_PEND_SWINT0_SHF 4
-#define GIC_VPE_PEND_SWINT0_MSK (MSK(1) << GIC_VPE_PEND_SWINT0_SHF)
-#define GIC_VPE_PEND_SWINT1_SHF 5
-#define GIC_VPE_PEND_SWINT1_MSK (MSK(1) << GIC_VPE_PEND_SWINT1_SHF)
-#define GIC_VPE_PEND_FDC_SHF 6
-#define GIC_VPE_PEND_FDC_MSK (MSK(1) << GIC_VPE_PEND_FDC_SHF)
-
-/* GIC_VPE_RMASK Masks */
-#define GIC_VPE_RMASK_WD_SHF 0
-#define GIC_VPE_RMASK_WD_MSK (MSK(1) << GIC_VPE_RMASK_WD_SHF)
-#define GIC_VPE_RMASK_CMP_SHF 1
-#define GIC_VPE_RMASK_CMP_MSK (MSK(1) << GIC_VPE_RMASK_CMP_SHF)
-#define GIC_VPE_RMASK_TIMER_SHF 2
-#define GIC_VPE_RMASK_TIMER_MSK (MSK(1) << GIC_VPE_RMASK_TIMER_SHF)
-#define GIC_VPE_RMASK_PERFCNT_SHF 3
-#define GIC_VPE_RMASK_PERFCNT_MSK (MSK(1) << GIC_VPE_RMASK_PERFCNT_SHF)
-#define GIC_VPE_RMASK_SWINT0_SHF 4
-#define GIC_VPE_RMASK_SWINT0_MSK (MSK(1) << GIC_VPE_RMASK_SWINT0_SHF)
-#define GIC_VPE_RMASK_SWINT1_SHF 5
-#define GIC_VPE_RMASK_SWINT1_MSK (MSK(1) << GIC_VPE_RMASK_SWINT1_SHF)
-#define GIC_VPE_RMASK_FDC_SHF 6
-#define GIC_VPE_RMASK_FDC_MSK (MSK(1) << GIC_VPE_RMASK_FDC_SHF)
-
-/* GIC_VPE_SMASK Masks */
-#define GIC_VPE_SMASK_WD_SHF 0
-#define GIC_VPE_SMASK_WD_MSK (MSK(1) << GIC_VPE_SMASK_WD_SHF)
-#define GIC_VPE_SMASK_CMP_SHF 1
-#define GIC_VPE_SMASK_CMP_MSK (MSK(1) << GIC_VPE_SMASK_CMP_SHF)
-#define GIC_VPE_SMASK_TIMER_SHF 2
-#define GIC_VPE_SMASK_TIMER_MSK (MSK(1) << GIC_VPE_SMASK_TIMER_SHF)
-#define GIC_VPE_SMASK_PERFCNT_SHF 3
-#define GIC_VPE_SMASK_PERFCNT_MSK (MSK(1) << GIC_VPE_SMASK_PERFCNT_SHF)
-#define GIC_VPE_SMASK_SWINT0_SHF 4
-#define GIC_VPE_SMASK_SWINT0_MSK (MSK(1) << GIC_VPE_SMASK_SWINT0_SHF)
-#define GIC_VPE_SMASK_SWINT1_SHF 5
-#define GIC_VPE_SMASK_SWINT1_MSK (MSK(1) << GIC_VPE_SMASK_SWINT1_SHF)
-#define GIC_VPE_SMASK_FDC_SHF 6
-#define GIC_VPE_SMASK_FDC_MSK (MSK(1) << GIC_VPE_SMASK_FDC_SHF)
-
-/* GIC_VP_IDENT fields */
-#define GIC_VP_IDENT_VCNUM_SHF 0
-#define GIC_VP_IDENT_VCNUM_MSK (MSK(6) << GIC_VP_IDENT_VCNUM_SHF)
-
-/* GIC nomenclature for Core Interrupt Pins. */
-#define GIC_CPU_INT0 0 /* Core Interrupt 2 */
-#define GIC_CPU_INT1 1 /* . */
-#define GIC_CPU_INT2 2 /* . */
-#define GIC_CPU_INT3 3 /* . */
-#define GIC_CPU_INT4 4 /* . */
-#define GIC_CPU_INT5 5 /* Core Interrupt 7 */
-
-/* Add 2 to convert GIC CPU pin to core interrupt */
-#define GIC_CPU_PIN_OFFSET 2
-
-/* Add 2 to convert non-EIC hardware interrupt to EIC vector number. */
-#define GIC_CPU_TO_VEC_OFFSET 2
-
-/* Mapped interrupt to pin X, then GIC will generate the vector (X+1). */
-#define GIC_PIN_TO_VEC_OFFSET 1
-
-/* Local GIC interrupts. */
-#define GIC_LOCAL_INT_WD 0 /* GIC watchdog */
-#define GIC_LOCAL_INT_COMPARE 1 /* GIC count and compare timer */
-#define GIC_LOCAL_INT_TIMER 2 /* CPU timer interrupt */
-#define GIC_LOCAL_INT_PERFCTR 3 /* CPU performance counter */
-#define GIC_LOCAL_INT_SWINT0 4 /* CPU software interrupt 0 */
-#define GIC_LOCAL_INT_SWINT1 5 /* CPU software interrupt 1 */
-#define GIC_LOCAL_INT_FDC 6 /* CPU fast debug channel */
-#define GIC_NUM_LOCAL_INTRS 7
-
-/* Convert between local/shared IRQ number and GIC HW IRQ number. */
-#define GIC_LOCAL_HWIRQ_BASE 0
-#define GIC_LOCAL_TO_HWIRQ(x) (GIC_LOCAL_HWIRQ_BASE + (x))
-#define GIC_HWIRQ_TO_LOCAL(x) ((x) - GIC_LOCAL_HWIRQ_BASE)
-#define GIC_SHARED_HWIRQ_BASE GIC_NUM_LOCAL_INTRS
-#define GIC_SHARED_TO_HWIRQ(x) (GIC_SHARED_HWIRQ_BASE + (x))
-#define GIC_HWIRQ_TO_SHARED(x) ((x) - GIC_SHARED_HWIRQ_BASE)
-
-#ifdef CONFIG_MIPS_GIC
-
-extern unsigned int gic_present;
-
-extern void gic_init(unsigned long gic_base_addr,
- unsigned long gic_addrspace_size, unsigned int cpu_vec,
- unsigned int irqbase);
-extern u64 gic_read_count(void);
-extern unsigned int gic_get_count_width(void);
-extern u64 gic_read_compare(void);
-extern void gic_write_compare(u64 cnt);
-extern void gic_write_cpu_compare(u64 cnt, int cpu);
-extern void gic_start_count(void);
-extern void gic_stop_count(void);
-extern int gic_get_c0_compare_int(void);
-extern int gic_get_c0_perfcount_int(void);
-extern int gic_get_c0_fdc_int(void);
-extern int gic_get_usm_range(struct resource *gic_usm_res);
-
-#else /* CONFIG_MIPS_GIC */
-
-#define gic_present 0
-
-static inline int gic_get_usm_range(struct resource *gic_usm_res)
-{
- /* Shouldn't be called. */
- return -1;
-}
-
-#endif /* CONFIG_MIPS_GIC */
-
-/**
- * gic_read_local_vp_id() - read the local VPs VCNUM
- *
- * Read the VCNUM of the local VP from the GIC_VP_IDENT register and
- * return it to the caller. This ID should be used to refer to the VP
- * via the GICs VP-other region, or when calculating an offset to a
- * bit representing the VP in interrupt masks.
- *
- * Return: The VCNUM value for the local VP.
- */
-extern unsigned gic_read_local_vp_id(void);
-
-#endif /* __LINUX_IRQCHIP_MIPS_GIC_H */
diff --git a/include/linux/irqchip/mmp.h b/include/linux/irqchip/mmp.h
deleted file mode 100644
index c78a8921185d..000000000000
--- a/include/linux/irqchip/mmp.h
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef __IRQCHIP_MMP_H
-#define __IRQCHIP_MMP_H
-
-extern struct irq_chip icu_irq_chip;
-
-#endif /* __IRQCHIP_MMP_H */
diff --git a/include/linux/irqchip/mxs.h b/include/linux/irqchip/mxs.h
deleted file mode 100644
index 9039a538a919..000000000000
--- a/include/linux/irqchip/mxs.h
+++ /dev/null
@@ -1,14 +0,0 @@
-/*
- * Copyright (C) 2013 Freescale Semiconductor, Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#ifndef __LINUX_IRQCHIP_MXS_H
-#define __LINUX_IRQCHIP_MXS_H
-
-extern void icoll_handle_irq(struct pt_regs *);
-
-#endif
diff --git a/include/linux/irqchip/riscv-aplic.h b/include/linux/irqchip/riscv-aplic.h
new file mode 100644
index 000000000000..ec8f7df50583
--- /dev/null
+++ b/include/linux/irqchip/riscv-aplic.h
@@ -0,0 +1,145 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2021 Western Digital Corporation or its affiliates.
+ * Copyright (C) 2022 Ventana Micro Systems Inc.
+ */
+#ifndef __LINUX_IRQCHIP_RISCV_APLIC_H
+#define __LINUX_IRQCHIP_RISCV_APLIC_H
+
+#include <linux/bitops.h>
+
+#define APLIC_MAX_IDC BIT(14)
+#define APLIC_MAX_SOURCE 1024
+
+#define APLIC_DOMAINCFG 0x0000
+#define APLIC_DOMAINCFG_RDONLY 0x80000000
+#define APLIC_DOMAINCFG_IE BIT(8)
+#define APLIC_DOMAINCFG_DM BIT(2)
+#define APLIC_DOMAINCFG_BE BIT(0)
+
+#define APLIC_SOURCECFG_BASE 0x0004
+#define APLIC_SOURCECFG_D BIT(10)
+#define APLIC_SOURCECFG_CHILDIDX_MASK 0x000003ff
+#define APLIC_SOURCECFG_SM_MASK 0x00000007
+#define APLIC_SOURCECFG_SM_INACTIVE 0x0
+#define APLIC_SOURCECFG_SM_DETACH 0x1
+#define APLIC_SOURCECFG_SM_EDGE_RISE 0x4
+#define APLIC_SOURCECFG_SM_EDGE_FALL 0x5
+#define APLIC_SOURCECFG_SM_LEVEL_HIGH 0x6
+#define APLIC_SOURCECFG_SM_LEVEL_LOW 0x7
+
+#define APLIC_MMSICFGADDR 0x1bc0
+#define APLIC_MMSICFGADDRH 0x1bc4
+#define APLIC_SMSICFGADDR 0x1bc8
+#define APLIC_SMSICFGADDRH 0x1bcc
+
+#ifdef CONFIG_RISCV_M_MODE
+#define APLIC_xMSICFGADDR APLIC_MMSICFGADDR
+#define APLIC_xMSICFGADDRH APLIC_MMSICFGADDRH
+#else
+#define APLIC_xMSICFGADDR APLIC_SMSICFGADDR
+#define APLIC_xMSICFGADDRH APLIC_SMSICFGADDRH
+#endif
+
+#define APLIC_xMSICFGADDRH_L BIT(31)
+#define APLIC_xMSICFGADDRH_HHXS_MASK 0x1f
+#define APLIC_xMSICFGADDRH_HHXS_SHIFT 24
+#define APLIC_xMSICFGADDRH_HHXS (APLIC_xMSICFGADDRH_HHXS_MASK << \
+ APLIC_xMSICFGADDRH_HHXS_SHIFT)
+#define APLIC_xMSICFGADDRH_LHXS_MASK 0x7
+#define APLIC_xMSICFGADDRH_LHXS_SHIFT 20
+#define APLIC_xMSICFGADDRH_LHXS (APLIC_xMSICFGADDRH_LHXS_MASK << \
+ APLIC_xMSICFGADDRH_LHXS_SHIFT)
+#define APLIC_xMSICFGADDRH_HHXW_MASK 0x7
+#define APLIC_xMSICFGADDRH_HHXW_SHIFT 16
+#define APLIC_xMSICFGADDRH_HHXW (APLIC_xMSICFGADDRH_HHXW_MASK << \
+ APLIC_xMSICFGADDRH_HHXW_SHIFT)
+#define APLIC_xMSICFGADDRH_LHXW_MASK 0xf
+#define APLIC_xMSICFGADDRH_LHXW_SHIFT 12
+#define APLIC_xMSICFGADDRH_LHXW (APLIC_xMSICFGADDRH_LHXW_MASK << \
+ APLIC_xMSICFGADDRH_LHXW_SHIFT)
+#define APLIC_xMSICFGADDRH_BAPPN_MASK 0xfff
+#define APLIC_xMSICFGADDRH_BAPPN_SHIFT 0
+#define APLIC_xMSICFGADDRH_BAPPN (APLIC_xMSICFGADDRH_BAPPN_MASK << \
+ APLIC_xMSICFGADDRH_BAPPN_SHIFT)
+
+#define APLIC_xMSICFGADDR_PPN_SHIFT 12
+
+#define APLIC_xMSICFGADDR_PPN_HART(__lhxs) \
+ (BIT(__lhxs) - 1)
+
+#define APLIC_xMSICFGADDR_PPN_LHX_MASK(__lhxw) \
+ (BIT(__lhxw) - 1)
+#define APLIC_xMSICFGADDR_PPN_LHX_SHIFT(__lhxs) \
+ ((__lhxs))
+#define APLIC_xMSICFGADDR_PPN_LHX(__lhxw, __lhxs) \
+ (APLIC_xMSICFGADDR_PPN_LHX_MASK(__lhxw) << \
+ APLIC_xMSICFGADDR_PPN_LHX_SHIFT(__lhxs))
+
+#define APLIC_xMSICFGADDR_PPN_HHX_MASK(__hhxw) \
+ (BIT(__hhxw) - 1)
+#define APLIC_xMSICFGADDR_PPN_HHX_SHIFT(__hhxs) \
+ ((__hhxs) + APLIC_xMSICFGADDR_PPN_SHIFT)
+#define APLIC_xMSICFGADDR_PPN_HHX(__hhxw, __hhxs) \
+ (APLIC_xMSICFGADDR_PPN_HHX_MASK(__hhxw) << \
+ APLIC_xMSICFGADDR_PPN_HHX_SHIFT(__hhxs))
+
+#define APLIC_IRQBITS_PER_REG 32
+
+#define APLIC_SETIP_BASE 0x1c00
+#define APLIC_SETIPNUM 0x1cdc
+
+#define APLIC_CLRIP_BASE 0x1d00
+#define APLIC_CLRIPNUM 0x1ddc
+
+#define APLIC_SETIE_BASE 0x1e00
+#define APLIC_SETIENUM 0x1edc
+
+#define APLIC_CLRIE_BASE 0x1f00
+#define APLIC_CLRIENUM 0x1fdc
+
+#define APLIC_SETIPNUM_LE 0x2000
+#define APLIC_SETIPNUM_BE 0x2004
+
+#define APLIC_GENMSI 0x3000
+
+#define APLIC_TARGET_BASE 0x3004
+#define APLIC_TARGET_HART_IDX_SHIFT 18
+#define APLIC_TARGET_HART_IDX_MASK 0x3fff
+#define APLIC_TARGET_HART_IDX (APLIC_TARGET_HART_IDX_MASK << \
+ APLIC_TARGET_HART_IDX_SHIFT)
+#define APLIC_TARGET_GUEST_IDX_SHIFT 12
+#define APLIC_TARGET_GUEST_IDX_MASK 0x3f
+#define APLIC_TARGET_GUEST_IDX (APLIC_TARGET_GUEST_IDX_MASK << \
+ APLIC_TARGET_GUEST_IDX_SHIFT)
+#define APLIC_TARGET_IPRIO_SHIFT 0
+#define APLIC_TARGET_IPRIO_MASK 0xff
+#define APLIC_TARGET_IPRIO (APLIC_TARGET_IPRIO_MASK << \
+ APLIC_TARGET_IPRIO_SHIFT)
+#define APLIC_TARGET_EIID_SHIFT 0
+#define APLIC_TARGET_EIID_MASK 0x7ff
+#define APLIC_TARGET_EIID (APLIC_TARGET_EIID_MASK << \
+ APLIC_TARGET_EIID_SHIFT)
+
+#define APLIC_IDC_BASE 0x4000
+#define APLIC_IDC_SIZE 32
+
+#define APLIC_IDC_IDELIVERY 0x00
+
+#define APLIC_IDC_IFORCE 0x04
+
+#define APLIC_IDC_ITHRESHOLD 0x08
+
+#define APLIC_IDC_TOPI 0x18
+#define APLIC_IDC_TOPI_ID_SHIFT 16
+#define APLIC_IDC_TOPI_ID_MASK 0x3ff
+#define APLIC_IDC_TOPI_ID (APLIC_IDC_TOPI_ID_MASK << \
+ APLIC_IDC_TOPI_ID_SHIFT)
+#define APLIC_IDC_TOPI_PRIO_SHIFT 0
+#define APLIC_IDC_TOPI_PRIO_MASK 0xff
+#define APLIC_IDC_TOPI_PRIO (APLIC_IDC_TOPI_PRIO_MASK << \
+ APLIC_IDC_TOPI_PRIO_SHIFT)
+
+#define APLIC_IDC_CLAIMI 0x1c
+
+#endif
diff --git a/include/linux/irqchip/riscv-imsic.h b/include/linux/irqchip/riscv-imsic.h
new file mode 100644
index 000000000000..7f3ff5c5ea53
--- /dev/null
+++ b/include/linux/irqchip/riscv-imsic.h
@@ -0,0 +1,95 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2021 Western Digital Corporation or its affiliates.
+ * Copyright (C) 2022 Ventana Micro Systems Inc.
+ */
+#ifndef __LINUX_IRQCHIP_RISCV_IMSIC_H
+#define __LINUX_IRQCHIP_RISCV_IMSIC_H
+
+#include <linux/types.h>
+#include <linux/bitops.h>
+#include <linux/device.h>
+#include <linux/fwnode.h>
+
+#define IMSIC_MMIO_PAGE_SHIFT 12
+#define IMSIC_MMIO_PAGE_SZ BIT(IMSIC_MMIO_PAGE_SHIFT)
+#define IMSIC_MMIO_PAGE_LE 0x00
+#define IMSIC_MMIO_PAGE_BE 0x04
+
+#define IMSIC_MIN_ID 63
+#define IMSIC_MAX_ID 2048
+
+#define IMSIC_EIDELIVERY 0x70
+
+#define IMSIC_EITHRESHOLD 0x72
+
+#define IMSIC_EIP0 0x80
+#define IMSIC_EIP63 0xbf
+#define IMSIC_EIPx_BITS 32
+
+#define IMSIC_EIE0 0xc0
+#define IMSIC_EIE63 0xff
+#define IMSIC_EIEx_BITS 32
+
+#define IMSIC_FIRST IMSIC_EIDELIVERY
+#define IMSIC_LAST IMSIC_EIE63
+
+#define IMSIC_MMIO_SETIPNUM_LE 0x00
+#define IMSIC_MMIO_SETIPNUM_BE 0x04
+
+struct imsic_local_config {
+ phys_addr_t msi_pa;
+ void __iomem *msi_va;
+};
+
+struct imsic_global_config {
+ /*
+ * MSI Target Address Scheme
+ *
+ * XLEN-1 12 0
+ * | | |
+ * -------------------------------------------------------------
+ * |xxxxxx|Group Index|xxxxxxxxxxx|HART Index|Guest Index| 0 |
+ * -------------------------------------------------------------
+ */
+
+ /* Bits representing Guest index, HART index, and Group index */
+ u32 guest_index_bits;
+ u32 hart_index_bits;
+ u32 group_index_bits;
+ u32 group_index_shift;
+
+ /* Global base address matching all target MSI addresses */
+ phys_addr_t base_addr;
+
+ /* Number of interrupt identities */
+ u32 nr_ids;
+
+ /* Number of guest interrupt identities */
+ u32 nr_guest_ids;
+
+ /* Per-CPU IMSIC addresses */
+ struct imsic_local_config __percpu *local;
+};
+
+#ifdef CONFIG_RISCV_IMSIC
+
+const struct imsic_global_config *imsic_get_global_config(void);
+
+#else
+
+static inline const struct imsic_global_config *imsic_get_global_config(void)
+{
+ return NULL;
+}
+
+#endif
+
+#if IS_ENABLED(CONFIG_ACPI) && IS_ENABLED(CONFIG_RISCV_IMSIC)
+int imsic_platform_acpi_probe(struct fwnode_handle *fwnode);
+struct fwnode_handle *imsic_acpi_get_fwnode(struct device *dev);
+#else
+static inline struct fwnode_handle *imsic_acpi_get_fwnode(struct device *dev) { return NULL; }
+#endif
+
+#endif
diff --git a/include/linux/irqchip/versatile-fpga.h b/include/linux/irqchip/versatile-fpga.h
deleted file mode 100644
index 1fac9651d3ca..000000000000
--- a/include/linux/irqchip/versatile-fpga.h
+++ /dev/null
@@ -1,13 +0,0 @@
-#ifndef PLAT_FPGA_IRQ_H
-#define PLAT_FPGA_IRQ_H
-
-struct device_node;
-struct pt_regs;
-
-void fpga_handle_irq(struct pt_regs *regs);
-void fpga_irq_init(void __iomem *, const char *, int, int, u32,
- struct device_node *node);
-int fpga_irq_of_init(struct device_node *node,
- struct device_node *parent);
-
-#endif
diff --git a/include/linux/irqdesc.h b/include/linux/irqdesc.h
index 3e90a094798d..37e0b5b5600a 100644
--- a/include/linux/irqdesc.h
+++ b/include/linux/irqdesc.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_IRQDESC_H
#define _LINUX_IRQDESC_H
@@ -17,21 +18,33 @@ struct irq_domain;
struct pt_regs;
/**
+ * struct irqstat - interrupt statistics
+ * @cnt: real-time interrupt count
+ * @ref: snapshot of interrupt count
+ */
+struct irqstat {
+ unsigned int cnt;
+#ifdef CONFIG_GENERIC_IRQ_STAT_SNAPSHOT
+ unsigned int ref;
+#endif
+};
+
+/**
* struct irq_desc - interrupt descriptor
* @irq_common_data: per irq and chip data passed down to chip functions
* @kstat_irqs: irq stats per cpu
* @handle_irq: highlevel irq-events handler
- * @preflow_handler: handler called before the flow handler (currently used by sparc)
* @action: the irq action chain
- * @status: status information
+ * @status_use_accessors: status information
* @core_internal_state__do_not_mess_with_it: core internal status information
* @depth: disable-depth, for nested irq_disable() calls
* @wake_depth: enable depth, for multiple irq_set_irq_wake() callers
+ * @tot_count: stats field for non-percpu irqs
* @irq_count: stats field to detect stalled irqs
* @last_unhandled: aging timer for unhandled count
* @irqs_unhandled: stats field for spurious unhandled interrupts
* @threads_handled: stats field for deferred spurious detection of threaded handlers
- * @threads_handled_last: comparator field for deferred spurious detection of theraded handlers
+ * @threads_handled_last: comparator field for deferred spurious detection of threaded handlers
* @lock: locking for SMP
* @affinity_hint: hint to user space for preferred irq affinity
* @affinity_notify: context for notification of affinity changes
@@ -54,16 +67,14 @@ struct pt_regs;
struct irq_desc {
struct irq_common_data irq_common_data;
struct irq_data irq_data;
- unsigned int __percpu *kstat_irqs;
+ struct irqstat __percpu *kstat_irqs;
irq_flow_handler_t handle_irq;
-#ifdef CONFIG_IRQ_PREFLOW_FASTEOI
- irq_preflow_handler_t preflow_handler;
-#endif
struct irqaction *action; /* IRQ action list */
unsigned int status_use_accessors;
unsigned int core_internal_state__do_not_mess_with_it;
unsigned int depth; /* nested irq disables */
unsigned int wake_depth; /* nested wake enables */
+ unsigned int tot_count;
unsigned int irq_count; /* For detecting broken IRQs */
unsigned long last_unhandled; /* Aging timer for unhandled count */
unsigned int irqs_unhandled;
@@ -71,7 +82,6 @@ struct irq_desc {
int threads_handled_last;
raw_spinlock_t lock;
struct cpumask *percpu_enabled;
- const struct cpumask *percpu_affinity;
#ifdef CONFIG_SMP
const struct cpumask *affinity_hint;
struct irq_affinity_notify *affinity_notify;
@@ -93,6 +103,7 @@ struct irq_desc {
#endif
#ifdef CONFIG_GENERIC_IRQ_DEBUGFS
struct dentry *debugfs_file;
+ const char *dev_name;
#endif
#ifdef CONFIG_SPARSE_IRQ
struct rcu_head rcu;
@@ -102,6 +113,9 @@ struct irq_desc {
int parent_irq;
struct module *owner;
const char *name;
+#ifdef CONFIG_HARDIRQS_SW_RESEND
+ struct hlist_node resend_node;
+#endif
} ____cacheline_internodealigned_in_smp;
#ifdef CONFIG_SPARSE_IRQ
@@ -113,6 +127,12 @@ static inline void irq_unlock_sparse(void) { }
extern struct irq_desc irq_desc[NR_IRQS];
#endif
+static inline unsigned int irq_desc_kstat_cpu(struct irq_desc *desc,
+ unsigned int cpu)
+{
+ return desc->kstat_irqs ? per_cpu(desc->kstat_irqs->cnt, cpu) : 0;
+}
+
static inline struct irq_desc *irq_data_to_desc(struct irq_data *data)
{
return container_of(data->common, struct irq_desc, irq_common_data);
@@ -143,11 +163,6 @@ static inline void *irq_desc_get_handler_data(struct irq_desc *desc)
return desc->irq_common_data.handler_data;
}
-static inline struct msi_desc *irq_desc_get_msi_desc(struct irq_desc *desc)
-{
- return desc->irq_common_data.msi_desc;
-}
-
/*
* Architectures call this to let the generic IRQ layer
* handle an interrupt.
@@ -157,34 +172,25 @@ static inline void generic_handle_irq_desc(struct irq_desc *desc)
desc->handle_irq(desc);
}
+int handle_irq_desc(struct irq_desc *desc);
int generic_handle_irq(unsigned int irq);
+int generic_handle_irq_safe(unsigned int irq);
-#ifdef CONFIG_HANDLE_DOMAIN_IRQ
+#ifdef CONFIG_IRQ_DOMAIN
/*
* Convert a HW interrupt number to a logical one using a IRQ domain,
* and handle the result interrupt number. Return -EINVAL if
- * conversion failed. Providing a NULL domain indicates that the
- * conversion has already been done.
+ * conversion failed.
*/
-int __handle_domain_irq(struct irq_domain *domain, unsigned int hwirq,
- bool lookup, struct pt_regs *regs);
-
-static inline int handle_domain_irq(struct irq_domain *domain,
- unsigned int hwirq, struct pt_regs *regs)
-{
- return __handle_domain_irq(domain, hwirq, true, regs);
-}
+int generic_handle_domain_irq(struct irq_domain *domain, unsigned int hwirq);
+int generic_handle_domain_irq_safe(struct irq_domain *domain, unsigned int hwirq);
+int generic_handle_domain_nmi(struct irq_domain *domain, unsigned int hwirq);
#endif
/* Test to see if a driver has successfully requested an irq */
static inline int irq_desc_has_action(struct irq_desc *desc)
{
- return desc->action != NULL;
-}
-
-static inline int irq_has_action(unsigned int irq)
-{
- return irq_desc_has_action(irq_to_desc(irq));
+ return desc && desc->action != NULL;
}
/**
@@ -218,50 +224,42 @@ static inline void irq_set_handler_locked(struct irq_data *data,
* Must be called with irq_desc locked and valid parameters.
*/
static inline void
-irq_set_chip_handler_name_locked(struct irq_data *data, struct irq_chip *chip,
+irq_set_chip_handler_name_locked(struct irq_data *data,
+ const struct irq_chip *chip,
irq_flow_handler_t handler, const char *name)
{
struct irq_desc *desc = irq_data_to_desc(data);
desc->handle_irq = handler;
desc->name = name;
- data->chip = chip;
+ data->chip = (struct irq_chip *)chip;
}
-static inline int irq_balancing_disabled(unsigned int irq)
-{
- struct irq_desc *desc;
+bool irq_check_status_bit(unsigned int irq, unsigned int bitmask);
- desc = irq_to_desc(irq);
- return desc->status_use_accessors & IRQ_NO_BALANCING_MASK;
+static inline bool irq_balancing_disabled(unsigned int irq)
+{
+ return irq_check_status_bit(irq, IRQ_NO_BALANCING_MASK);
}
-static inline int irq_is_percpu(unsigned int irq)
+static inline bool irq_is_percpu(unsigned int irq)
{
- struct irq_desc *desc;
-
- desc = irq_to_desc(irq);
- return desc->status_use_accessors & IRQ_PER_CPU;
+ return irq_check_status_bit(irq, IRQ_PER_CPU);
}
-static inline void
-irq_set_lockdep_class(unsigned int irq, struct lock_class_key *class)
+static inline bool irq_is_percpu_devid(unsigned int irq)
{
- struct irq_desc *desc = irq_to_desc(irq);
-
- if (desc)
- lockdep_set_class(&desc->lock, class);
+ return irq_check_status_bit(irq, IRQ_PER_CPU_DEVID);
}
-#ifdef CONFIG_IRQ_PREFLOW_FASTEOI
+void __irq_set_lockdep_class(unsigned int irq, struct lock_class_key *lock_class,
+ struct lock_class_key *request_class);
static inline void
-__irq_set_preflow_handler(unsigned int irq, irq_preflow_handler_t handler)
+irq_set_lockdep_class(unsigned int irq, struct lock_class_key *lock_class,
+ struct lock_class_key *request_class)
{
- struct irq_desc *desc;
-
- desc = irq_to_desc(irq);
- desc->preflow_handler = handler;
+ if (IS_ENABLED(CONFIG_LOCKDEP))
+ __irq_set_lockdep_class(irq, lock_class, request_class);
}
-#endif
#endif
diff --git a/include/linux/irqdomain.h b/include/linux/irqdomain.h
index 81e4889ca6dd..952d3c8dd6b7 100644
--- a/include/linux/irqdomain.h
+++ b/include/linux/irqdomain.h
@@ -1,48 +1,30 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
- * irq_domain - IRQ translation domains
+ * irq_domain - IRQ Translation Domains
*
- * Translation infrastructure between hw and linux irq numbers. This is
- * helpful for interrupt controllers to implement mapping between hardware
- * irq numbers and the Linux irq number space.
- *
- * irq_domains also have hooks for translating device tree or other
- * firmware interrupt representations into a hardware irq number that
- * can be mapped back to a Linux irq number without any extra platform
- * support code.
- *
- * Interrupt controller "domain" data structure. This could be defined as a
- * irq domain controller. That is, it handles the mapping between hardware
- * and virtual interrupt numbers for a given interrupt domain. The domain
- * structure is generally created by the PIC code for a given PIC instance
- * (though a domain can cover more than one PIC if they have a flat number
- * model). It's the domain callbacks that are responsible for setting the
- * irq_chip on a given irq_desc after it's been mapped.
- *
- * The host code and data structures use a fwnode_handle pointer to
- * identify the domain. In some cases, and in order to preserve source
- * code compatibility, this fwnode pointer is "upgraded" to a DT
- * device_node. For those firmware infrastructures that do not provide
- * a unique identifier for an interrupt controller, the irq_domain
- * code offers a fwnode allocator.
+ * See Documentation/core-api/irq/irq-domain.rst for the details.
*/
#ifndef _LINUX_IRQDOMAIN_H
#define _LINUX_IRQDOMAIN_H
#include <linux/types.h>
+#include <linux/irqdomain_defs.h>
#include <linux/irqhandler.h>
#include <linux/of.h>
+#include <linux/mutex.h>
#include <linux/radix-tree.h>
struct device_node;
+struct fwnode_handle;
struct irq_domain;
-struct of_device_id;
struct irq_chip;
struct irq_data;
+struct irq_desc;
struct cpumask;
-
-/* Number of irqs reserved for a legacy isa controller */
-#define NUM_ISA_INTERRUPTS 16
+struct seq_file;
+struct irq_affinity_desc;
+struct msi_parent_ops;
#define IRQ_DOMAIN_IRQ_SPEC_PARAMS 16
@@ -57,37 +39,57 @@ struct cpumask;
* pass a device-specific description of an interrupt.
*/
struct irq_fwspec {
- struct fwnode_handle *fwnode;
- int param_count;
- u32 param[IRQ_DOMAIN_IRQ_SPEC_PARAMS];
+ struct fwnode_handle *fwnode;
+ int param_count;
+ u32 param[IRQ_DOMAIN_IRQ_SPEC_PARAMS];
};
-/*
- * Should several domains have the same device node, but serve
- * different purposes (for example one domain is for PCI/MSI, and the
- * other for wired IRQs), they can be distinguished using a
- * bus-specific token. Most domains are expected to only carry
- * DOMAIN_BUS_ANY.
+/**
+ * struct irq_fwspec_info - firmware provided IRQ information structure
+ *
+ * @flags: Information validity flags
+ * @affinity: Affinity mask for this interrupt
+ *
+ * This structure reports firmware-specific information about an
+ * interrupt. The only significant information is the affinity of a
+ * per-CPU interrupt, but this is designed to be extended as required.
*/
-enum irq_domain_bus_token {
- DOMAIN_BUS_ANY = 0,
- DOMAIN_BUS_WIRED,
- DOMAIN_BUS_PCI_MSI,
- DOMAIN_BUS_PLATFORM_MSI,
- DOMAIN_BUS_NEXUS,
- DOMAIN_BUS_IPI,
- DOMAIN_BUS_FSL_MC_MSI,
+struct irq_fwspec_info {
+ unsigned long flags;
+ const struct cpumask *affinity;
};
+#define IRQ_FWSPEC_INFO_AFFINITY_VALID BIT(0)
+
+/* Conversion function from of_phandle_args fields to fwspec */
+void of_phandle_args_to_fwspec(struct device_node *np, const u32 *args,
+ unsigned int count, struct irq_fwspec *fwspec);
+
/**
* struct irq_domain_ops - Methods for irq_domain objects
- * @match: Match an interrupt controller device node to a host, returns
- * 1 on a match
- * @map: Create or update a mapping between a virtual irq number and a hw
- * irq number. This is called only once for a given mapping.
- * @unmap: Dispose of such a mapping
- * @xlate: Given a device tree node and interrupt specifier, decode
- * the hardware irq number and linux irq type value.
+ * @match: Match an interrupt controller device node to a domain, returns
+ * 1 on a match
+ * @select: Match an interrupt controller fw specification. It is more generic
+ * than @match as it receives a complete struct irq_fwspec. Therefore,
+ * @select is preferred if provided. Returns 1 on a match.
+ * @map: Create or update a mapping between a virtual irq number and a hw
+ * irq number. This is called only once for a given mapping.
+ * @unmap: Dispose of such a mapping
+ * @xlate: Given a device tree node and interrupt specifier, decode
+ * the hardware irq number and linux irq type value.
+ * @alloc: Allocate @nr_irqs interrupts starting from @virq.
+ * @free: Free @nr_irqs interrupts starting from @virq.
+ * @activate: Activate one interrupt in HW (@irqd). If @reserve is set, only
+ * reserve the vector. If unset, assign the vector (called from
+ * request_irq()).
+ * @deactivate: Disarm one interrupt (@irqd).
+ * @translate: Given @fwspec, decode the hardware irq number (@out_hwirq) and
+ * linux irq type value (@out_type). This is a generalised @xlate
+ * (over struct irq_fwspec) and is preferred if provided.
+ * @get_fwspec_info:
+ * Given @fwspec, report additional firmware-provided information in
+ * @info. Optional.
+ * @debug_show: For domains to show specific data for an interrupt in debugfs.
*
* Functions below are provided by the driver and called whenever a new mapping
* is created or an old mapping is disposed. The driver can then proceed to
@@ -95,84 +97,103 @@ enum irq_domain_bus_token {
* to setup the irq_desc when returning from map().
*/
struct irq_domain_ops {
- int (*match)(struct irq_domain *d, struct device_node *node,
- enum irq_domain_bus_token bus_token);
- int (*select)(struct irq_domain *d, struct irq_fwspec *fwspec,
- enum irq_domain_bus_token bus_token);
- int (*map)(struct irq_domain *d, unsigned int virq, irq_hw_number_t hw);
- void (*unmap)(struct irq_domain *d, unsigned int virq);
- int (*xlate)(struct irq_domain *d, struct device_node *node,
- const u32 *intspec, unsigned int intsize,
- unsigned long *out_hwirq, unsigned int *out_type);
-
+ int (*match)(struct irq_domain *d, struct device_node *node,
+ enum irq_domain_bus_token bus_token);
+ int (*select)(struct irq_domain *d, struct irq_fwspec *fwspec,
+ enum irq_domain_bus_token bus_token);
+ int (*map)(struct irq_domain *d, unsigned int virq, irq_hw_number_t hw);
+ void (*unmap)(struct irq_domain *d, unsigned int virq);
+ int (*xlate)(struct irq_domain *d, struct device_node *node,
+ const u32 *intspec, unsigned int intsize,
+ unsigned long *out_hwirq, unsigned int *out_type);
#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
/* extended V2 interfaces to support hierarchy irq_domains */
- int (*alloc)(struct irq_domain *d, unsigned int virq,
- unsigned int nr_irqs, void *arg);
- void (*free)(struct irq_domain *d, unsigned int virq,
- unsigned int nr_irqs);
- void (*activate)(struct irq_domain *d, struct irq_data *irq_data);
- void (*deactivate)(struct irq_domain *d, struct irq_data *irq_data);
- int (*translate)(struct irq_domain *d, struct irq_fwspec *fwspec,
- unsigned long *out_hwirq, unsigned int *out_type);
+ int (*alloc)(struct irq_domain *d, unsigned int virq,
+ unsigned int nr_irqs, void *arg);
+ void (*free)(struct irq_domain *d, unsigned int virq,
+ unsigned int nr_irqs);
+ int (*activate)(struct irq_domain *d, struct irq_data *irqd, bool reserve);
+ void (*deactivate)(struct irq_domain *d, struct irq_data *irq_data);
+ int (*translate)(struct irq_domain *d, struct irq_fwspec *fwspec,
+ unsigned long *out_hwirq, unsigned int *out_type);
+ int (*get_fwspec_info)(struct irq_fwspec *fwspec, struct irq_fwspec_info *info);
+#endif
+#ifdef CONFIG_GENERIC_IRQ_DEBUGFS
+ void (*debug_show)(struct seq_file *m, struct irq_domain *d,
+ struct irq_data *irqd, int ind);
#endif
};
-extern struct irq_domain_ops irq_generic_chip_ops;
+extern const struct irq_domain_ops irq_generic_chip_ops;
struct irq_domain_chip_generic;
/**
* struct irq_domain - Hardware interrupt number translation object
- * @link: Element in global irq_domain list.
- * @name: Name of interrupt domain
- * @ops: pointer to irq_domain methods
- * @host_data: private data pointer for use by owner. Not touched by irq_domain
- * core code.
- * @flags: host per irq_domain flags
- * @mapcount: The number of mapped interrupts
+ * @link: Element in global irq_domain list.
+ * @name: Name of interrupt domain
+ * @ops: Pointer to irq_domain methods
+ * @host_data: Private data pointer for use by owner. Not touched by irq_domain
+ * core code.
+ * @flags: Per irq_domain flags
+ * @mapcount: The number of mapped interrupts
+ * @mutex: Domain lock, hierarchical domains use root domain's lock
+ * @root: Pointer to root domain, or containing structure if non-hierarchical
*
- * Optional elements
- * @of_node: Pointer to device tree nodes associated with the irq_domain. Used
- * when decoding device tree interrupt specifiers.
- * @gc: Pointer to a list of generic chips. There is a helper function for
- * setting up one or more generic chips for interrupt controllers
- * drivers using the generic chip library which uses this pointer.
- * @parent: Pointer to parent irq_domain to support hierarchy irq_domains
- * @debugfs_file: dentry for the domain debugfs file
+ * Optional elements:
+ * @fwnode: Pointer to firmware node associated with the irq_domain. Pretty easy
+ * to swap it for the of_node via the irq_domain_get_of_node accessor
+ * @bus_token: @fwnode's device_node might be used for several irq domains. But
+ * in connection with @bus_token, the pair shall be unique in a
+ * system.
+ * @gc: Pointer to a list of generic chips. There is a helper function for
+ * setting up one or more generic chips for interrupt controllers
+ * drivers using the generic chip library which uses this pointer.
+ * @dev: Pointer to the device which instantiated the irqdomain
+ * With per device irq domains this is not necessarily the same
+ * as @pm_dev.
+ * @pm_dev: Pointer to a device that can be utilized for power management
+ * purposes related to the irq domain.
+ * @parent: Pointer to parent irq_domain to support hierarchy irq_domains
+ * @msi_parent_ops: Pointer to MSI parent domain methods for per device domain init
+ * @exit: Function called when the domain is destroyed
*
- * Revmap data, used internally by irq_domain
- * @revmap_direct_max_irq: The largest hwirq that can be set for controllers that
- * support direct mapping
- * @revmap_size: Size of the linear map table @linear_revmap[]
- * @revmap_tree: Radix map tree for hwirqs that don't fit in the linear map
- * @linear_revmap: Linear table of hwirq->virq reverse mappings
+ * Revmap data, used internally by the irq domain code:
+ * @hwirq_max: Top limit for the HW irq number. Especially to avoid
+ * conflicts/failures with reserved HW irqs. Can be ~0.
+ * @revmap_size: Size of the linear map table @revmap
+ * @revmap_tree: Radix map tree for hwirqs that don't fit in the linear map
+ * @revmap: Linear table of irq_data pointers
*/
struct irq_domain {
- struct list_head link;
- const char *name;
- const struct irq_domain_ops *ops;
- void *host_data;
- unsigned int flags;
- unsigned int mapcount;
+ struct list_head link;
+ const char *name;
+ const struct irq_domain_ops *ops;
+ void *host_data;
+ unsigned int flags;
+ unsigned int mapcount;
+ struct mutex mutex;
+ struct irq_domain *root;
/* Optional data */
- struct fwnode_handle *fwnode;
- enum irq_domain_bus_token bus_token;
- struct irq_domain_chip_generic *gc;
+ struct fwnode_handle *fwnode;
+ enum irq_domain_bus_token bus_token;
+ struct irq_domain_chip_generic *gc;
+ struct device *dev;
+ struct device *pm_dev;
#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
- struct irq_domain *parent;
+ struct irq_domain *parent;
#endif
-#ifdef CONFIG_GENERIC_IRQ_DEBUGFS
- struct dentry *debugfs_file;
+#ifdef CONFIG_GENERIC_MSI_IRQ
+ const struct msi_parent_ops *msi_parent_ops;
#endif
+ void (*exit)(struct irq_domain *d);
/* reverse map data. The linear map gets appended to the irq_domain */
- irq_hw_number_t hwirq_max;
- unsigned int revmap_direct_max_irq;
- unsigned int revmap_size;
- struct radix_tree_root revmap_tree;
- unsigned int linear_revmap[];
+ irq_hw_number_t hwirq_max;
+ unsigned int revmap_size;
+ struct radix_tree_root revmap_tree;
+ struct irq_data __rcu *revmap[] __counted_by(revmap_size);
};
/* Irq domain flags */
@@ -180,8 +201,8 @@ enum {
/* Irq domain is hierarchical */
IRQ_DOMAIN_FLAG_HIERARCHY = (1 << 0),
- /* Irq domain name was allocated in __irq_domain_add() */
- IRQ_DOMAIN_NAME_ALLOCATED = (1 << 6),
+ /* Irq domain name was allocated internally */
+ IRQ_DOMAIN_NAME_ALLOCATED = (1 << 1),
/* Irq domain is an IPI domain with virq per cpu */
IRQ_DOMAIN_FLAG_IPI_PER_CPU = (1 << 2),
@@ -192,8 +213,28 @@ enum {
/* Irq domain implements MSIs */
IRQ_DOMAIN_FLAG_MSI = (1 << 4),
- /* Irq domain implements MSI remapping */
- IRQ_DOMAIN_FLAG_MSI_REMAP = (1 << 5),
+ /*
+ * Irq domain implements isolated MSI, see msi_device_has_isolated_msi()
+ */
+ IRQ_DOMAIN_FLAG_ISOLATED_MSI = (1 << 5),
+
+ /* Irq domain doesn't translate anything */
+ IRQ_DOMAIN_FLAG_NO_MAP = (1 << 6),
+
+ /* Irq domain is a MSI parent domain */
+ IRQ_DOMAIN_FLAG_MSI_PARENT = (1 << 8),
+
+ /* Irq domain is a MSI device domain */
+ IRQ_DOMAIN_FLAG_MSI_DEVICE = (1 << 9),
+
+ /* Irq domain must destroy generic chips when removed */
+ IRQ_DOMAIN_FLAG_DESTROY_GC = (1 << 10),
+
+ /* Address and data pair is mutable when irq_set_affinity() */
+ IRQ_DOMAIN_FLAG_MSI_IMMUTABLE = (1 << 11),
+
+ /* IRQ domain requires parent fwnode matching */
+ IRQ_DOMAIN_FLAG_FWNODE_PARENT = (1 << 12),
/*
* Flags starting from IRQ_DOMAIN_FLAG_NONCORE are reserved
@@ -208,9 +249,15 @@ static inline struct device_node *irq_domain_get_of_node(struct irq_domain *d)
return to_of_node(d->fwnode);
}
+static inline void irq_domain_set_pm_device(struct irq_domain *d, struct device *dev)
+{
+ if (d)
+ d->pm_dev = dev;
+}
+
#ifdef CONFIG_IRQ_DOMAIN
struct fwnode_handle *__irq_domain_alloc_fwnode(unsigned int type, int id,
- const char *name, void *data);
+ const char *name, phys_addr_t *pa);
enum {
IRQCHIP_FWNODE_REAL,
@@ -218,66 +265,104 @@ enum {
IRQCHIP_FWNODE_NAMED_ID,
};
-static inline
-struct fwnode_handle *irq_domain_alloc_named_fwnode(const char *name)
+static inline struct fwnode_handle *irq_domain_alloc_named_fwnode(const char *name)
{
return __irq_domain_alloc_fwnode(IRQCHIP_FWNODE_NAMED, 0, name, NULL);
}
-static inline
-struct fwnode_handle *irq_domain_alloc_named_id_fwnode(const char *name, int id)
+static inline struct fwnode_handle *irq_domain_alloc_named_id_fwnode(const char *name, int id)
{
return __irq_domain_alloc_fwnode(IRQCHIP_FWNODE_NAMED_ID, id, name,
NULL);
}
-static inline struct fwnode_handle *irq_domain_alloc_fwnode(void *data)
+static inline struct fwnode_handle *irq_domain_alloc_fwnode(phys_addr_t *pa)
{
- return __irq_domain_alloc_fwnode(IRQCHIP_FWNODE_REAL, 0, NULL, data);
+ return __irq_domain_alloc_fwnode(IRQCHIP_FWNODE_REAL, 0, NULL, pa);
}
void irq_domain_free_fwnode(struct fwnode_handle *fwnode);
-struct irq_domain *__irq_domain_add(struct fwnode_handle *fwnode, int size,
- irq_hw_number_t hwirq_max, int direct_max,
- const struct irq_domain_ops *ops,
- void *host_data);
-struct irq_domain *irq_domain_add_simple(struct device_node *of_node,
- unsigned int size,
- unsigned int first_irq,
- const struct irq_domain_ops *ops,
- void *host_data);
-struct irq_domain *irq_domain_add_legacy(struct device_node *of_node,
- unsigned int size,
- unsigned int first_irq,
- irq_hw_number_t first_hwirq,
- const struct irq_domain_ops *ops,
- void *host_data);
-extern struct irq_domain *irq_find_matching_fwspec(struct irq_fwspec *fwspec,
- enum irq_domain_bus_token bus_token);
-extern bool irq_domain_check_msi_remap(void);
-extern void irq_set_default_host(struct irq_domain *host);
-extern int irq_domain_alloc_descs(int virq, unsigned int nr_irqs,
- irq_hw_number_t hwirq, int node,
- const struct cpumask *affinity);
-
-static inline struct fwnode_handle *of_node_to_fwnode(struct device_node *node)
-{
- return node ? &node->fwnode : NULL;
-}
+
+DEFINE_FREE(irq_domain_free_fwnode, struct fwnode_handle *, if (_T) irq_domain_free_fwnode(_T))
+
+struct irq_domain_chip_generic_info;
+
+/**
+ * struct irq_domain_info - Domain information structure
+ * @fwnode: firmware node for the interrupt controller
+ * @domain_flags: Additional flags to add to the domain flags
+ * @size: Size of linear map; 0 for radix mapping only
+ * @hwirq_max: Maximum number of interrupts supported by controller
+ * @direct_max: Maximum value of direct maps;
+ * Use ~0 for no limit; 0 for no direct mapping
+ * @hwirq_base: The first hardware interrupt number (legacy domains only)
+ * @virq_base: The first Linux interrupt number for legacy domains to
+ * immediately associate the interrupts after domain creation
+ * @bus_token: Domain bus token
+ * @name_suffix: Optional name suffix to avoid collisions when multiple
+ * domains are added using same fwnode
+ * @ops: Domain operation callbacks
+ * @host_data: Controller private data pointer
+ * @dev: Device which creates the domain
+ * @dgc_info: Geneneric chip information structure pointer used to
+ * create generic chips for the domain if not NULL.
+ * @init: Function called when the domain is created.
+ * Allow to do some additional domain initialisation.
+ * @exit: Function called when the domain is destroyed.
+ * Allow to do some additional cleanup operation.
+ */
+struct irq_domain_info {
+ struct fwnode_handle *fwnode;
+ unsigned int domain_flags;
+ unsigned int size;
+ irq_hw_number_t hwirq_max;
+ int direct_max;
+ unsigned int hwirq_base;
+ unsigned int virq_base;
+ enum irq_domain_bus_token bus_token;
+ const char *name_suffix;
+ const struct irq_domain_ops *ops;
+ void *host_data;
+ struct device *dev;
+#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
+ /**
+ * @parent: Pointer to the parent irq domain used in a hierarchy domain
+ */
+ struct irq_domain *parent;
+#endif
+ struct irq_domain_chip_generic_info *dgc_info;
+ int (*init)(struct irq_domain *d);
+ void (*exit)(struct irq_domain *d);
+};
+
+struct irq_domain *irq_domain_instantiate(const struct irq_domain_info *info);
+struct irq_domain *devm_irq_domain_instantiate(struct device *dev,
+ const struct irq_domain_info *info);
+
+struct irq_domain *irq_domain_create_simple(struct fwnode_handle *fwnode, unsigned int size,
+ unsigned int first_irq,
+ const struct irq_domain_ops *ops, void *host_data);
+struct irq_domain *irq_domain_create_legacy(struct fwnode_handle *fwnode, unsigned int size,
+ unsigned int first_irq, irq_hw_number_t first_hwirq,
+ const struct irq_domain_ops *ops, void *host_data);
+struct irq_domain *irq_find_matching_fwspec(struct irq_fwspec *fwspec,
+ enum irq_domain_bus_token bus_token);
+void irq_set_default_domain(struct irq_domain *domain);
+struct irq_domain *irq_get_default_domain(void);
+int irq_domain_alloc_descs(int virq, unsigned int nr_irqs, irq_hw_number_t hwirq, int node,
+ const struct irq_affinity_desc *affinity);
extern const struct fwnode_operations irqchip_fwnode_ops;
-static inline bool is_fwnode_irqchip(struct fwnode_handle *fwnode)
+static inline bool is_fwnode_irqchip(const struct fwnode_handle *fwnode)
{
return fwnode && fwnode->ops == &irqchip_fwnode_ops;
}
-extern void irq_domain_update_bus_token(struct irq_domain *domain,
- enum irq_domain_bus_token bus_token);
+void irq_domain_update_bus_token(struct irq_domain *domain, enum irq_domain_bus_token bus_token);
-static inline
-struct irq_domain *irq_find_matching_fwnode(struct fwnode_handle *fwnode,
- enum irq_domain_bus_token bus_token)
+static inline struct irq_domain *irq_find_matching_fwnode(struct fwnode_handle *fwnode,
+ enum irq_domain_bus_token bus_token)
{
struct irq_fwspec fwspec = {
.fwnode = fwnode,
@@ -289,189 +374,256 @@ struct irq_domain *irq_find_matching_fwnode(struct fwnode_handle *fwnode,
static inline struct irq_domain *irq_find_matching_host(struct device_node *node,
enum irq_domain_bus_token bus_token)
{
- return irq_find_matching_fwnode(of_node_to_fwnode(node), bus_token);
+ return irq_find_matching_fwnode(of_fwnode_handle(node), bus_token);
}
static inline struct irq_domain *irq_find_host(struct device_node *node)
{
- return irq_find_matching_host(node, DOMAIN_BUS_ANY);
-}
+ struct irq_domain *d;
-/**
- * irq_domain_add_linear() - Allocate and register a linear revmap irq_domain.
- * @of_node: pointer to interrupt controller's device tree node.
- * @size: Number of interrupts in the domain.
- * @ops: map/unmap domain callbacks
- * @host_data: Controller private data pointer
- */
-static inline struct irq_domain *irq_domain_add_linear(struct device_node *of_node,
- unsigned int size,
- const struct irq_domain_ops *ops,
- void *host_data)
-{
- return __irq_domain_add(of_node_to_fwnode(of_node), size, size, 0, ops, host_data);
-}
-static inline struct irq_domain *irq_domain_add_nomap(struct device_node *of_node,
- unsigned int max_irq,
- const struct irq_domain_ops *ops,
- void *host_data)
-{
- return __irq_domain_add(of_node_to_fwnode(of_node), 0, max_irq, max_irq, ops, host_data);
-}
-static inline struct irq_domain *irq_domain_add_legacy_isa(
- struct device_node *of_node,
- const struct irq_domain_ops *ops,
- void *host_data)
-{
- return irq_domain_add_legacy(of_node, NUM_ISA_INTERRUPTS, 0, 0, ops,
- host_data);
+ d = irq_find_matching_host(node, DOMAIN_BUS_WIRED);
+ if (!d)
+ d = irq_find_matching_host(node, DOMAIN_BUS_ANY);
+
+ return d;
}
-static inline struct irq_domain *irq_domain_add_tree(struct device_node *of_node,
- const struct irq_domain_ops *ops,
- void *host_data)
+
+#ifdef CONFIG_IRQ_DOMAIN_NOMAP
+static inline struct irq_domain *irq_domain_create_nomap(struct fwnode_handle *fwnode,
+ unsigned int max_irq,
+ const struct irq_domain_ops *ops,
+ void *host_data)
{
- return __irq_domain_add(of_node_to_fwnode(of_node), 0, ~0, 0, ops, host_data);
+ const struct irq_domain_info info = {
+ .fwnode = fwnode,
+ .hwirq_max = max_irq,
+ .direct_max = max_irq,
+ .ops = ops,
+ .host_data = host_data,
+ };
+ struct irq_domain *d = irq_domain_instantiate(&info);
+
+ return IS_ERR(d) ? NULL : d;
}
+unsigned int irq_create_direct_mapping(struct irq_domain *domain);
+#endif
+
+/**
+ * irq_domain_create_linear - Allocate and register a linear revmap irq_domain.
+ * @fwnode: pointer to interrupt controller's FW node.
+ * @size: Number of interrupts in the domain.
+ * @ops: map/unmap domain callbacks
+ * @host_data: Controller private data pointer
+ *
+ * Returns: Newly created irq_domain
+ */
static inline struct irq_domain *irq_domain_create_linear(struct fwnode_handle *fwnode,
- unsigned int size,
- const struct irq_domain_ops *ops,
- void *host_data)
-{
- return __irq_domain_add(fwnode, size, size, 0, ops, host_data);
+ unsigned int size,
+ const struct irq_domain_ops *ops,
+ void *host_data)
+{
+ const struct irq_domain_info info = {
+ .fwnode = fwnode,
+ .size = size,
+ .hwirq_max = size,
+ .ops = ops,
+ .host_data = host_data,
+ };
+ struct irq_domain *d = irq_domain_instantiate(&info);
+
+ return IS_ERR(d) ? NULL : d;
}
static inline struct irq_domain *irq_domain_create_tree(struct fwnode_handle *fwnode,
- const struct irq_domain_ops *ops,
- void *host_data)
-{
- return __irq_domain_add(fwnode, 0, ~0, 0, ops, host_data);
+ const struct irq_domain_ops *ops,
+ void *host_data)
+{
+ const struct irq_domain_info info = {
+ .fwnode = fwnode,
+ .hwirq_max = ~0,
+ .ops = ops,
+ .host_data = host_data,
+ };
+ struct irq_domain *d = irq_domain_instantiate(&info);
+
+ return IS_ERR(d) ? NULL : d;
}
-extern void irq_domain_remove(struct irq_domain *host);
+void irq_domain_remove(struct irq_domain *domain);
-extern int irq_domain_associate(struct irq_domain *domain, unsigned int irq,
- irq_hw_number_t hwirq);
-extern void irq_domain_associate_many(struct irq_domain *domain,
- unsigned int irq_base,
- irq_hw_number_t hwirq_base, int count);
-extern void irq_domain_disassociate(struct irq_domain *domain,
- unsigned int irq);
+int irq_domain_associate(struct irq_domain *domain, unsigned int irq, irq_hw_number_t hwirq);
+void irq_domain_associate_many(struct irq_domain *domain, unsigned int irq_base,
+ irq_hw_number_t hwirq_base, int count);
-extern unsigned int irq_create_mapping(struct irq_domain *host,
- irq_hw_number_t hwirq);
-extern unsigned int irq_create_fwspec_mapping(struct irq_fwspec *fwspec);
-extern void irq_dispose_mapping(unsigned int virq);
+unsigned int irq_create_mapping_affinity(struct irq_domain *domain, irq_hw_number_t hwirq,
+ const struct irq_affinity_desc *affinity);
+unsigned int irq_create_fwspec_mapping(struct irq_fwspec *fwspec);
+void irq_dispose_mapping(unsigned int virq);
/**
- * irq_linear_revmap() - Find a linux irq from a hw irq number.
- * @domain: domain owning this hardware interrupt
- * @hwirq: hardware irq number in that domain space
+ * irq_create_mapping - Map a hardware interrupt into linux irq space
+ * @domain: domain owning this hardware interrupt or NULL for default domain
+ * @hwirq: hardware irq number in that domain space
*
- * This is a fast path alternative to irq_find_mapping() that can be
- * called directly by irq controller code to save a handful of
- * instructions. It is always safe to call, but won't find irqs mapped
- * using the radix tree.
+ * Only one mapping per hardware interrupt is permitted.
+ *
+ * If the sense/trigger is to be specified, set_irq_type() should be called
+ * on the number returned from that call.
+ *
+ * Returns: Linux irq number or 0 on error
+ */
+static inline unsigned int irq_create_mapping(struct irq_domain *domain, irq_hw_number_t hwirq)
+{
+ return irq_create_mapping_affinity(domain, hwirq, NULL);
+}
+
+struct irq_desc *__irq_resolve_mapping(struct irq_domain *domain,
+ irq_hw_number_t hwirq,
+ unsigned int *irq);
+
+/**
+ * irq_resolve_mapping - Find a linux irq from a hw irq number.
+ * @domain: domain owning this hardware interrupt
+ * @hwirq: hardware irq number in that domain space
+ *
+ * Returns: Interrupt descriptor
*/
-static inline unsigned int irq_linear_revmap(struct irq_domain *domain,
- irq_hw_number_t hwirq)
+static inline struct irq_desc *irq_resolve_mapping(struct irq_domain *domain,
+ irq_hw_number_t hwirq)
{
- return hwirq < domain->revmap_size ? domain->linear_revmap[hwirq] : 0;
+ return __irq_resolve_mapping(domain, hwirq, NULL);
}
-extern unsigned int irq_find_mapping(struct irq_domain *host,
- irq_hw_number_t hwirq);
-extern unsigned int irq_create_direct_mapping(struct irq_domain *host);
-extern int irq_create_strict_mappings(struct irq_domain *domain,
- unsigned int irq_base,
- irq_hw_number_t hwirq_base, int count);
-static inline int irq_create_identity_mapping(struct irq_domain *host,
- irq_hw_number_t hwirq)
+/**
+ * irq_find_mapping() - Find a linux irq from a hw irq number.
+ * @domain: domain owning this hardware interrupt
+ * @hwirq: hardware irq number in that domain space
+ *
+ * Returns: Linux irq number or 0 if not found
+ */
+static inline unsigned int irq_find_mapping(struct irq_domain *domain,
+ irq_hw_number_t hwirq)
{
- return irq_create_strict_mappings(host, hwirq, hwirq, 1);
+ unsigned int irq;
+
+ if (__irq_resolve_mapping(domain, hwirq, &irq))
+ return irq;
+
+ return 0;
}
extern const struct irq_domain_ops irq_domain_simple_ops;
/* stock xlate functions */
int irq_domain_xlate_onecell(struct irq_domain *d, struct device_node *ctrlr,
- const u32 *intspec, unsigned int intsize,
- irq_hw_number_t *out_hwirq, unsigned int *out_type);
+ const u32 *intspec, unsigned int intsize,
+ irq_hw_number_t *out_hwirq, unsigned int *out_type);
int irq_domain_xlate_twocell(struct irq_domain *d, struct device_node *ctrlr,
- const u32 *intspec, unsigned int intsize,
- irq_hw_number_t *out_hwirq, unsigned int *out_type);
+ const u32 *intspec, unsigned int intsize,
+ irq_hw_number_t *out_hwirq, unsigned int *out_type);
int irq_domain_xlate_onetwocell(struct irq_domain *d, struct device_node *ctrlr,
- const u32 *intspec, unsigned int intsize,
- irq_hw_number_t *out_hwirq, unsigned int *out_type);
+ const u32 *intspec, unsigned int intsize,
+ irq_hw_number_t *out_hwirq, unsigned int *out_type);
+int irq_domain_xlate_twothreecell(struct irq_domain *d, struct device_node *ctrlr,
+ const u32 *intspec, unsigned int intsize,
+ irq_hw_number_t *out_hwirq, unsigned int *out_type);
+
+int irq_domain_translate_onecell(struct irq_domain *d, struct irq_fwspec *fwspec,
+ unsigned long *out_hwirq, unsigned int *out_type);
+int irq_domain_translate_twocell(struct irq_domain *d, struct irq_fwspec *fwspec,
+ unsigned long *out_hwirq, unsigned int *out_type);
+int irq_domain_translate_twothreecell(struct irq_domain *d, struct irq_fwspec *fwspec,
+ unsigned long *out_hwirq, unsigned int *out_type);
/* IPI functions */
int irq_reserve_ipi(struct irq_domain *domain, const struct cpumask *dest);
int irq_destroy_ipi(unsigned int irq, const struct cpumask *dest);
/* V2 interfaces to support hierarchy IRQ domains. */
-extern struct irq_data *irq_domain_get_irq_data(struct irq_domain *domain,
- unsigned int virq);
-extern void irq_domain_set_info(struct irq_domain *domain, unsigned int virq,
- irq_hw_number_t hwirq, struct irq_chip *chip,
- void *chip_data, irq_flow_handler_t handler,
- void *handler_data, const char *handler_name);
+struct irq_data *irq_domain_get_irq_data(struct irq_domain *domain, unsigned int virq);
+void irq_domain_set_info(struct irq_domain *domain, unsigned int virq, irq_hw_number_t hwirq,
+ const struct irq_chip *chip, void *chip_data, irq_flow_handler_t handler,
+ void *handler_data, const char *handler_name);
+void irq_domain_reset_irq_data(struct irq_data *irq_data);
#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
-extern struct irq_domain *irq_domain_create_hierarchy(struct irq_domain *parent,
- unsigned int flags, unsigned int size,
- struct fwnode_handle *fwnode,
- const struct irq_domain_ops *ops, void *host_data);
-
-static inline struct irq_domain *irq_domain_add_hierarchy(struct irq_domain *parent,
- unsigned int flags,
- unsigned int size,
- struct device_node *node,
- const struct irq_domain_ops *ops,
- void *host_data)
-{
- return irq_domain_create_hierarchy(parent, flags, size,
- of_node_to_fwnode(node),
- ops, host_data);
-}
-
-extern int __irq_domain_alloc_irqs(struct irq_domain *domain, int irq_base,
- unsigned int nr_irqs, int node, void *arg,
- bool realloc, const struct cpumask *affinity);
-extern void irq_domain_free_irqs(unsigned int virq, unsigned int nr_irqs);
-extern void irq_domain_activate_irq(struct irq_data *irq_data);
-extern void irq_domain_deactivate_irq(struct irq_data *irq_data);
-
-static inline int irq_domain_alloc_irqs(struct irq_domain *domain,
- unsigned int nr_irqs, int node, void *arg)
-{
- return __irq_domain_alloc_irqs(domain, -1, nr_irqs, node, arg, false,
- NULL);
-}
-
-extern int irq_domain_alloc_irqs_hierarchy(struct irq_domain *domain,
- unsigned int irq_base,
- unsigned int nr_irqs, void *arg);
-extern int irq_domain_set_hwirq_and_chip(struct irq_domain *domain,
- unsigned int virq,
- irq_hw_number_t hwirq,
- struct irq_chip *chip,
- void *chip_data);
-extern void irq_domain_reset_irq_data(struct irq_data *irq_data);
-extern void irq_domain_free_irqs_common(struct irq_domain *domain,
- unsigned int virq,
- unsigned int nr_irqs);
-extern void irq_domain_free_irqs_top(struct irq_domain *domain,
- unsigned int virq, unsigned int nr_irqs);
-
-extern int irq_domain_push_irq(struct irq_domain *domain, int virq, void *arg);
-extern int irq_domain_pop_irq(struct irq_domain *domain, int virq);
-
-extern int irq_domain_alloc_irqs_parent(struct irq_domain *domain,
- unsigned int irq_base,
- unsigned int nr_irqs, void *arg);
-
-extern void irq_domain_free_irqs_parent(struct irq_domain *domain,
- unsigned int irq_base,
- unsigned int nr_irqs);
+/**
+ * irq_domain_create_hierarchy - Add a irqdomain into the hierarchy
+ * @parent: Parent irq domain to associate with the new domain
+ * @flags: Irq domain flags associated to the domain
+ * @size: Size of the domain. See below
+ * @fwnode: Optional fwnode of the interrupt controller
+ * @ops: Pointer to the interrupt domain callbacks
+ * @host_data: Controller private data pointer
+ *
+ * If @size is 0 a tree domain is created, otherwise a linear domain.
+ *
+ * If successful the parent is associated to the new domain and the
+ * domain flags are set.
+ *
+ * Returns: A pointer to IRQ domain, or %NULL on failure.
+ */
+static inline struct irq_domain *irq_domain_create_hierarchy(struct irq_domain *parent,
+ unsigned int flags, unsigned int size,
+ struct fwnode_handle *fwnode,
+ const struct irq_domain_ops *ops,
+ void *host_data)
+{
+ const struct irq_domain_info info = {
+ .fwnode = fwnode,
+ .size = size,
+ .hwirq_max = size ? : ~0U,
+ .ops = ops,
+ .host_data = host_data,
+ .domain_flags = flags,
+ .parent = parent,
+ };
+ struct irq_domain *d = irq_domain_instantiate(&info);
+
+ return IS_ERR(d) ? NULL : d;
+}
+
+int __irq_domain_alloc_irqs(struct irq_domain *domain, int irq_base, unsigned int nr_irqs,
+ int node, void *arg, bool realloc,
+ const struct irq_affinity_desc *affinity);
+void irq_domain_free_irqs(unsigned int virq, unsigned int nr_irqs);
+int irq_domain_activate_irq(struct irq_data *irq_data, bool early);
+void irq_domain_deactivate_irq(struct irq_data *irq_data);
+
+/**
+ * irq_domain_alloc_irqs - Allocate IRQs from domain
+ * @domain: domain to allocate from
+ * @nr_irqs: number of IRQs to allocate
+ * @node: NUMA node id for memory allocation
+ * @arg: domain specific argument
+ *
+ * See __irq_domain_alloc_irqs()' documentation.
+ */
+static inline int irq_domain_alloc_irqs(struct irq_domain *domain, unsigned int nr_irqs,
+ int node, void *arg)
+{
+ return __irq_domain_alloc_irqs(domain, -1, nr_irqs, node, arg, false, NULL);
+}
+
+int irq_domain_set_hwirq_and_chip(struct irq_domain *domain, unsigned int virq,
+ irq_hw_number_t hwirq, const struct irq_chip *chip,
+ void *chip_data);
+void irq_domain_free_irqs_common(struct irq_domain *domain, unsigned int virq,
+ unsigned int nr_irqs);
+void irq_domain_free_irqs_top(struct irq_domain *domain, unsigned int virq, unsigned int nr_irqs);
+
+int irq_domain_push_irq(struct irq_domain *domain, int virq, void *arg);
+int irq_domain_pop_irq(struct irq_domain *domain, int virq);
+
+int irq_domain_alloc_irqs_parent(struct irq_domain *domain, unsigned int irq_base,
+ unsigned int nr_irqs, void *arg);
+
+void irq_domain_free_irqs_parent(struct irq_domain *domain, unsigned int irq_base,
+ unsigned int nr_irqs);
+
+int irq_domain_disconnect_hierarchy(struct irq_domain *domain, unsigned int virq);
+
+int irq_populate_fwspec_info(struct irq_fwspec *fwspec, struct irq_fwspec_info *info);
static inline bool irq_domain_is_hierarchy(struct irq_domain *domain)
{
@@ -480,8 +632,7 @@ static inline bool irq_domain_is_hierarchy(struct irq_domain *domain)
static inline bool irq_domain_is_ipi(struct irq_domain *domain)
{
- return domain->flags &
- (IRQ_DOMAIN_FLAG_IPI_PER_CPU | IRQ_DOMAIN_FLAG_IPI_SINGLE);
+ return domain->flags & (IRQ_DOMAIN_FLAG_IPI_PER_CPU | IRQ_DOMAIN_FLAG_IPI_SINGLE);
}
static inline bool irq_domain_is_ipi_per_cpu(struct irq_domain *domain)
@@ -499,24 +650,28 @@ static inline bool irq_domain_is_msi(struct irq_domain *domain)
return domain->flags & IRQ_DOMAIN_FLAG_MSI;
}
-static inline bool irq_domain_is_msi_remap(struct irq_domain *domain)
+static inline bool irq_domain_is_msi_parent(struct irq_domain *domain)
{
- return domain->flags & IRQ_DOMAIN_FLAG_MSI_REMAP;
+ return domain->flags & IRQ_DOMAIN_FLAG_MSI_PARENT;
}
-extern bool irq_domain_hierarchical_is_msi_remap(struct irq_domain *domain);
+static inline bool irq_domain_is_msi_device(struct irq_domain *domain)
+{
+ return domain->flags & IRQ_DOMAIN_FLAG_MSI_DEVICE;
+}
+static inline bool irq_domain_is_msi_immutable(struct irq_domain *domain)
+{
+ return domain->flags & IRQ_DOMAIN_FLAG_MSI_IMMUTABLE;
+}
#else /* CONFIG_IRQ_DOMAIN_HIERARCHY */
-static inline void irq_domain_activate_irq(struct irq_data *data) { }
-static inline void irq_domain_deactivate_irq(struct irq_data *data) { }
-static inline int irq_domain_alloc_irqs(struct irq_domain *domain,
- unsigned int nr_irqs, int node, void *arg)
+static inline int irq_domain_alloc_irqs(struct irq_domain *domain, unsigned int nr_irqs,
+ int node, void *arg)
{
return -1;
}
-static inline void irq_domain_free_irqs(unsigned int virq,
- unsigned int nr_irqs) { }
+static inline void irq_domain_free_irqs(unsigned int virq, unsigned int nr_irqs) { }
static inline bool irq_domain_is_hierarchy(struct irq_domain *domain)
{
@@ -543,31 +698,79 @@ static inline bool irq_domain_is_msi(struct irq_domain *domain)
return false;
}
-static inline bool irq_domain_is_msi_remap(struct irq_domain *domain)
+static inline bool irq_domain_is_msi_parent(struct irq_domain *domain)
{
return false;
}
-static inline bool
-irq_domain_hierarchical_is_msi_remap(struct irq_domain *domain)
+static inline bool irq_domain_is_msi_device(struct irq_domain *domain)
{
return false;
}
+
+static inline int irq_populate_fwspec_info(struct irq_fwspec *fwspec, struct irq_fwspec_info *info)
+{
+ return -EINVAL;
+}
#endif /* CONFIG_IRQ_DOMAIN_HIERARCHY */
+#ifdef CONFIG_GENERIC_MSI_IRQ
+int msi_device_domain_alloc_wired(struct irq_domain *domain, unsigned int hwirq, unsigned int type);
+void msi_device_domain_free_wired(struct irq_domain *domain, unsigned int virq);
+#else
+static inline int msi_device_domain_alloc_wired(struct irq_domain *domain, unsigned int hwirq,
+ unsigned int type)
+{
+ WARN_ON_ONCE(1);
+ return -EINVAL;
+}
+static inline void msi_device_domain_free_wired(struct irq_domain *domain, unsigned int virq)
+{
+ WARN_ON_ONCE(1);
+}
+#endif
+
+static inline struct irq_domain *irq_domain_add_tree(struct device_node *of_node,
+ const struct irq_domain_ops *ops,
+ void *host_data)
+{
+ struct irq_domain_info info = {
+ .fwnode = of_fwnode_handle(of_node),
+ .hwirq_max = ~0U,
+ .ops = ops,
+ .host_data = host_data,
+ };
+ struct irq_domain *d;
+
+ d = irq_domain_instantiate(&info);
+ return IS_ERR(d) ? NULL : d;
+}
+
+static inline struct irq_domain *irq_domain_add_linear(struct device_node *of_node,
+ unsigned int size,
+ const struct irq_domain_ops *ops,
+ void *host_data)
+{
+ struct irq_domain_info info = {
+ .fwnode = of_fwnode_handle(of_node),
+ .size = size,
+ .hwirq_max = size,
+ .ops = ops,
+ .host_data = host_data,
+ };
+ struct irq_domain *d;
+
+ d = irq_domain_instantiate(&info);
+ return IS_ERR(d) ? NULL : d;
+}
+
#else /* CONFIG_IRQ_DOMAIN */
static inline void irq_dispose_mapping(unsigned int virq) { }
-static inline void irq_domain_activate_irq(struct irq_data *data) { }
-static inline void irq_domain_deactivate_irq(struct irq_data *data) { }
-static inline struct irq_domain *irq_find_matching_fwnode(
- struct fwnode_handle *fwnode, enum irq_domain_bus_token bus_token)
+static inline struct irq_domain *irq_find_matching_fwnode(struct fwnode_handle *fwnode,
+ enum irq_domain_bus_token bus_token)
{
return NULL;
}
-static inline bool irq_domain_check_msi_remap(void)
-{
- return false;
-}
#endif /* !CONFIG_IRQ_DOMAIN */
#endif /* _LINUX_IRQDOMAIN_H */
diff --git a/include/linux/irqdomain_defs.h b/include/linux/irqdomain_defs.h
new file mode 100644
index 000000000000..36653e2ee1c9
--- /dev/null
+++ b/include/linux/irqdomain_defs.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_IRQDOMAIN_DEFS_H
+#define _LINUX_IRQDOMAIN_DEFS_H
+
+/*
+ * Should several domains have the same device node, but serve
+ * different purposes (for example one domain is for PCI/MSI, and the
+ * other for wired IRQs), they can be distinguished using a
+ * bus-specific token. Most domains are expected to only carry
+ * DOMAIN_BUS_ANY.
+ */
+enum irq_domain_bus_token {
+ DOMAIN_BUS_ANY = 0,
+ DOMAIN_BUS_WIRED,
+ DOMAIN_BUS_GENERIC_MSI,
+ DOMAIN_BUS_PCI_MSI,
+ DOMAIN_BUS_PLATFORM_MSI,
+ DOMAIN_BUS_NEXUS,
+ DOMAIN_BUS_IPI,
+ DOMAIN_BUS_FSL_MC_MSI,
+ DOMAIN_BUS_TI_SCI_INTA_MSI,
+ DOMAIN_BUS_WAKEUP,
+ DOMAIN_BUS_VMD_MSI,
+ DOMAIN_BUS_PCI_DEVICE_MSI,
+ DOMAIN_BUS_PCI_DEVICE_MSIX,
+ DOMAIN_BUS_DMAR,
+ DOMAIN_BUS_AMDVI,
+ DOMAIN_BUS_DEVICE_MSI,
+ DOMAIN_BUS_WIRED_TO_MSI,
+};
+
+#endif /* _LINUX_IRQDOMAIN_DEFS_H */
diff --git a/include/linux/irqflags.h b/include/linux/irqflags.h
index 5fdd93bb9300..57b074e0cfbb 100644
--- a/include/linux/irqflags.h
+++ b/include/linux/irqflags.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* include/linux/irqflags.h
*
@@ -11,53 +12,134 @@
#ifndef _LINUX_TRACE_IRQFLAGS_H
#define _LINUX_TRACE_IRQFLAGS_H
+#include <linux/irqflags_types.h>
#include <linux/typecheck.h>
+#include <linux/cleanup.h>
#include <asm/irqflags.h>
+#include <asm/percpu.h>
+
+struct task_struct;
+
+/* Currently lockdep_softirqs_on/off is used only by lockdep */
+#ifdef CONFIG_PROVE_LOCKING
+ extern void lockdep_softirqs_on(unsigned long ip);
+ extern void lockdep_softirqs_off(unsigned long ip);
+ extern void lockdep_hardirqs_on_prepare(void);
+ extern void lockdep_hardirqs_on(unsigned long ip);
+ extern void lockdep_hardirqs_off(unsigned long ip);
+ extern void lockdep_cleanup_dead_cpu(unsigned int cpu,
+ struct task_struct *idle);
+#else
+ static inline void lockdep_softirqs_on(unsigned long ip) { }
+ static inline void lockdep_softirqs_off(unsigned long ip) { }
+ static inline void lockdep_hardirqs_on_prepare(void) { }
+ static inline void lockdep_hardirqs_on(unsigned long ip) { }
+ static inline void lockdep_hardirqs_off(unsigned long ip) { }
+ static inline void lockdep_cleanup_dead_cpu(unsigned int cpu,
+ struct task_struct *idle) {}
+#endif
#ifdef CONFIG_TRACE_IRQFLAGS
- extern void trace_softirqs_on(unsigned long ip);
- extern void trace_softirqs_off(unsigned long ip);
- extern void trace_hardirqs_on(void);
- extern void trace_hardirqs_off(void);
-# define trace_hardirq_context(p) ((p)->hardirq_context)
-# define trace_softirq_context(p) ((p)->softirq_context)
-# define trace_hardirqs_enabled(p) ((p)->hardirqs_enabled)
-# define trace_softirqs_enabled(p) ((p)->softirqs_enabled)
-# define trace_hardirq_enter() \
+
+DECLARE_PER_CPU(int, hardirqs_enabled);
+DECLARE_PER_CPU(int, hardirq_context);
+
+extern void trace_hardirqs_on_prepare(void);
+extern void trace_hardirqs_off_finish(void);
+extern void trace_hardirqs_on(void);
+extern void trace_hardirqs_off(void);
+
+# define lockdep_hardirq_context() (raw_cpu_read(hardirq_context))
+# define lockdep_softirq_context(p) ((p)->softirq_context)
+# define lockdep_hardirqs_enabled() (this_cpu_read(hardirqs_enabled))
+# define lockdep_softirqs_enabled(p) ((p)->softirqs_enabled)
+# define lockdep_hardirq_enter() \
+do { \
+ if (__this_cpu_inc_return(hardirq_context) == 1)\
+ current->hardirq_threaded = 0; \
+} while (0)
+# define lockdep_hardirq_threaded() \
do { \
- current->hardirq_context++; \
- crossrelease_hist_start(XHLOCK_HARD); \
+ current->hardirq_threaded = 1; \
} while (0)
-# define trace_hardirq_exit() \
+# define lockdep_hardirq_exit() \
do { \
- current->hardirq_context--; \
- crossrelease_hist_end(XHLOCK_HARD); \
+ __this_cpu_dec(hardirq_context); \
} while (0)
+
+# define lockdep_hrtimer_enter(__hrtimer) \
+({ \
+ bool __expires_hardirq = true; \
+ \
+ if (!__hrtimer->is_hard) { \
+ current->irq_config = 1; \
+ __expires_hardirq = false; \
+ } \
+ __expires_hardirq; \
+})
+
+# define lockdep_hrtimer_exit(__expires_hardirq) \
+ do { \
+ if (!__expires_hardirq) \
+ current->irq_config = 0; \
+ } while (0)
+
+# define lockdep_posixtimer_enter() \
+ do { \
+ current->irq_config = 1; \
+ } while (0)
+
+# define lockdep_posixtimer_exit() \
+ do { \
+ current->irq_config = 0; \
+ } while (0)
+
+# define lockdep_irq_work_enter(_flags) \
+ do { \
+ if (!((_flags) & IRQ_WORK_HARD_IRQ)) \
+ current->irq_config = 1; \
+ } while (0)
+# define lockdep_irq_work_exit(_flags) \
+ do { \
+ if (!((_flags) & IRQ_WORK_HARD_IRQ)) \
+ current->irq_config = 0; \
+ } while (0)
+
+#else
+# define trace_hardirqs_on_prepare() do { } while (0)
+# define trace_hardirqs_off_finish() do { } while (0)
+# define trace_hardirqs_on() do { } while (0)
+# define trace_hardirqs_off() do { } while (0)
+# define lockdep_hardirq_context() 0
+# define lockdep_softirq_context(p) 0
+# define lockdep_hardirqs_enabled() 0
+# define lockdep_softirqs_enabled(p) 0
+# define lockdep_hardirq_enter() do { } while (0)
+# define lockdep_hardirq_threaded() do { } while (0)
+# define lockdep_hardirq_exit() do { } while (0)
+# define lockdep_softirq_enter() do { } while (0)
+# define lockdep_softirq_exit() do { } while (0)
+# define lockdep_hrtimer_enter(__hrtimer) false
+# define lockdep_hrtimer_exit(__context) do { (void)(__context); } while (0)
+# define lockdep_posixtimer_enter() do { } while (0)
+# define lockdep_posixtimer_exit() do { } while (0)
+# define lockdep_irq_work_enter(__work) do { } while (0)
+# define lockdep_irq_work_exit(__work) do { } while (0)
+#endif
+
+#if defined(CONFIG_TRACE_IRQFLAGS) && !defined(CONFIG_PREEMPT_RT)
# define lockdep_softirq_enter() \
do { \
current->softirq_context++; \
- crossrelease_hist_start(XHLOCK_SOFT); \
} while (0)
# define lockdep_softirq_exit() \
do { \
current->softirq_context--; \
- crossrelease_hist_end(XHLOCK_SOFT); \
} while (0)
-# define INIT_TRACE_IRQFLAGS .softirqs_enabled = 1,
+
#else
-# define trace_hardirqs_on() do { } while (0)
-# define trace_hardirqs_off() do { } while (0)
-# define trace_softirqs_on(ip) do { } while (0)
-# define trace_softirqs_off(ip) do { } while (0)
-# define trace_hardirq_context(p) 0
-# define trace_softirq_context(p) 0
-# define trace_hardirqs_enabled(p) 0
-# define trace_softirqs_enabled(p) 0
-# define trace_hardirq_enter() do { } while (0)
-# define trace_hardirq_exit() do { } while (0)
-# define lockdep_softirq_enter() do { } while (0)
-# define lockdep_softirq_exit() do { } while (0)
-# define INIT_TRACE_IRQFLAGS
+# define lockdep_softirq_enter() do { } while (0)
+# define lockdep_softirq_exit() do { } while (0)
#endif
#if defined(CONFIG_IRQSOFF_TRACER) || \
@@ -69,6 +151,17 @@ do { \
# define start_critical_timings() do { } while (0)
#endif
+#ifdef CONFIG_DEBUG_IRQFLAGS
+extern void warn_bogus_irq_restore(void);
+#define raw_check_bogus_irq_restore() \
+ do { \
+ if (unlikely(!arch_irqs_disabled())) \
+ warn_bogus_irq_restore(); \
+ } while (0)
+#else
+#define raw_check_bogus_irq_restore() do { } while (0)
+#endif
+
/*
* Wrap the arch provided IRQ routines to provide appropriate checks.
*/
@@ -82,6 +175,7 @@ do { \
#define raw_local_irq_restore(flags) \
do { \
typecheck(unsigned long, flags); \
+ raw_check_bogus_irq_restore(); \
arch_local_irq_restore(flags); \
} while (0)
#define raw_local_save_flags(flags) \
@@ -102,26 +196,33 @@ do { \
* if !TRACE_IRQFLAGS.
*/
#ifdef CONFIG_TRACE_IRQFLAGS
-#define local_irq_enable() \
- do { trace_hardirqs_on(); raw_local_irq_enable(); } while (0)
-#define local_irq_disable() \
- do { raw_local_irq_disable(); trace_hardirqs_off(); } while (0)
+
+#define local_irq_enable() \
+ do { \
+ trace_hardirqs_on(); \
+ raw_local_irq_enable(); \
+ } while (0)
+
+#define local_irq_disable() \
+ do { \
+ bool was_disabled = raw_irqs_disabled();\
+ raw_local_irq_disable(); \
+ if (!was_disabled) \
+ trace_hardirqs_off(); \
+ } while (0)
+
#define local_irq_save(flags) \
do { \
raw_local_irq_save(flags); \
- trace_hardirqs_off(); \
+ if (!raw_irqs_disabled_flags(flags)) \
+ trace_hardirqs_off(); \
} while (0)
-
#define local_irq_restore(flags) \
do { \
- if (raw_irqs_disabled_flags(flags)) { \
- raw_local_irq_restore(flags); \
- trace_hardirqs_off(); \
- } else { \
+ if (!raw_irqs_disabled_flags(flags)) \
trace_hardirqs_on(); \
- raw_local_irq_restore(flags); \
- } \
+ raw_local_irq_restore(flags); \
} while (0)
#define safe_halt() \
@@ -135,10 +236,7 @@ do { \
#define local_irq_enable() do { raw_local_irq_enable(); } while (0)
#define local_irq_disable() do { raw_local_irq_disable(); } while (0)
-#define local_irq_save(flags) \
- do { \
- raw_local_irq_save(flags); \
- } while (0)
+#define local_irq_save(flags) do { raw_local_irq_save(flags); } while (0)
#define local_irq_restore(flags) do { raw_local_irq_restore(flags); } while (0)
#define safe_halt() do { raw_safe_halt(); } while (0)
@@ -164,4 +262,10 @@ do { \
#define irqs_disabled_flags(flags) raw_irqs_disabled_flags(flags)
+DEFINE_LOCK_GUARD_0(irq, local_irq_disable(), local_irq_enable())
+DEFINE_LOCK_GUARD_0(irqsave,
+ local_irq_save(_T->flags),
+ local_irq_restore(_T->flags),
+ unsigned long flags)
+
#endif
diff --git a/include/linux/irqflags_types.h b/include/linux/irqflags_types.h
new file mode 100644
index 000000000000..c13f0d915097
--- /dev/null
+++ b/include/linux/irqflags_types.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_IRQFLAGS_TYPES_H
+#define _LINUX_IRQFLAGS_TYPES_H
+
+#ifdef CONFIG_TRACE_IRQFLAGS
+
+/* Per-task IRQ trace events information. */
+struct irqtrace_events {
+ unsigned int irq_events;
+ unsigned long hardirq_enable_ip;
+ unsigned long hardirq_disable_ip;
+ unsigned int hardirq_enable_event;
+ unsigned int hardirq_disable_event;
+ unsigned long softirq_disable_ip;
+ unsigned long softirq_enable_ip;
+ unsigned int softirq_disable_event;
+ unsigned int softirq_enable_event;
+};
+
+#endif
+
+#endif /* _LINUX_IRQFLAGS_TYPES_H */
diff --git a/include/linux/irqhandler.h b/include/linux/irqhandler.h
index 661bed0ed1f3..72dd1eb3a0e7 100644
--- a/include/linux/irqhandler.h
+++ b/include/linux/irqhandler.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_IRQHANDLER_H
#define _LINUX_IRQHANDLER_H
@@ -7,8 +8,7 @@
*/
struct irq_desc;
-struct irq_data;
+
typedef void (*irq_flow_handler_t)(struct irq_desc *desc);
-typedef void (*irq_preflow_handler_t)(struct irq_data *data);
#endif
diff --git a/include/linux/irqnr.h b/include/linux/irqnr.h
index 9669bf9d4f48..e97206c721a0 100644
--- a/include/linux/irqnr.h
+++ b/include/linux/irqnr.h
@@ -1,33 +1,40 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_IRQNR_H
#define _LINUX_IRQNR_H
#include <uapi/linux/irqnr.h>
-extern int nr_irqs;
+unsigned int irq_get_nr_irqs(void) __pure;
+unsigned int irq_set_nr_irqs(unsigned int nr);
extern struct irq_desc *irq_to_desc(unsigned int irq);
unsigned int irq_get_next_irq(unsigned int offset);
-# define for_each_irq_desc(irq, desc) \
- for (irq = 0, desc = irq_to_desc(irq); irq < nr_irqs; \
- irq++, desc = irq_to_desc(irq)) \
- if (!desc) \
- ; \
- else
-
+#define for_each_irq_desc(irq, desc) \
+ for (unsigned int __nr_irqs__ = irq_get_nr_irqs(); __nr_irqs__; \
+ __nr_irqs__ = 0) \
+ for (irq = 0, desc = irq_to_desc(irq); irq < __nr_irqs__; \
+ irq++, desc = irq_to_desc(irq)) \
+ if (!desc) \
+ ; \
+ else
# define for_each_irq_desc_reverse(irq, desc) \
- for (irq = nr_irqs - 1, desc = irq_to_desc(irq); irq >= 0; \
- irq--, desc = irq_to_desc(irq)) \
+ for (irq = irq_get_nr_irqs() - 1, desc = irq_to_desc(irq); \
+ irq >= 0; irq--, desc = irq_to_desc(irq)) \
if (!desc) \
; \
else
-# define for_each_active_irq(irq) \
- for (irq = irq_get_next_irq(0); irq < nr_irqs; \
- irq = irq_get_next_irq(irq + 1))
+#define for_each_active_irq(irq) \
+ for (unsigned int __nr_irqs__ = irq_get_nr_irqs(); __nr_irqs__; \
+ __nr_irqs__ = 0) \
+ for (irq = irq_get_next_irq(0); irq < __nr_irqs__; \
+ irq = irq_get_next_irq(irq + 1))
-#define for_each_irq_nr(irq) \
- for (irq = 0; irq < nr_irqs; irq++)
+#define for_each_irq_nr(irq) \
+ for (unsigned int __nr_irqs__ = irq_get_nr_irqs(); __nr_irqs__; \
+ __nr_irqs__ = 0) \
+ for (irq = 0; irq < __nr_irqs__; irq++)
#endif
diff --git a/include/linux/irqreturn.h b/include/linux/irqreturn.h
index eb1bdcf95f2e..d426c7ad92bf 100644
--- a/include/linux/irqreturn.h
+++ b/include/linux/irqreturn.h
@@ -1,11 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_IRQRETURN_H
#define _LINUX_IRQRETURN_H
/**
- * enum irqreturn
- * @IRQ_NONE interrupt was not from this device or was not handled
- * @IRQ_HANDLED interrupt was handled by this device
- * @IRQ_WAKE_THREAD handler requests to wake the handler thread
+ * enum irqreturn - irqreturn type values
+ * @IRQ_NONE: interrupt was not from this device or was not handled
+ * @IRQ_HANDLED: interrupt was handled by this device
+ * @IRQ_WAKE_THREAD: handler requests to wake the handler thread
*/
enum irqreturn {
IRQ_NONE = (0 << 0),
diff --git a/include/linux/isa-dma.h b/include/linux/isa-dma.h
new file mode 100644
index 000000000000..61504a8c1b9e
--- /dev/null
+++ b/include/linux/isa-dma.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __LINUX_ISA_DMA_H
+#define __LINUX_ISA_DMA_H
+
+#include <asm/dma.h>
+
+#if defined(CONFIG_PCI) && defined(CONFIG_X86_32)
+extern int isa_dma_bridge_buggy;
+#else
+#define isa_dma_bridge_buggy (0)
+#endif
+
+#endif /* __LINUX_ISA_DMA_H */
diff --git a/include/linux/isa.h b/include/linux/isa.h
index f2d0258414cf..4fbbf5e36e08 100644
--- a/include/linux/isa.h
+++ b/include/linux/isa.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* ISA bus.
*/
@@ -12,7 +13,7 @@
struct isa_driver {
int (*match)(struct device *, unsigned int);
int (*probe)(struct device *, unsigned int);
- int (*remove)(struct device *, unsigned int);
+ void (*remove)(struct device *, unsigned int);
void (*shutdown)(struct device *, unsigned int);
int (*suspend)(struct device *, unsigned int, pm_message_t);
int (*resume)(struct device *, unsigned int);
@@ -37,6 +38,32 @@ static inline void isa_unregister_driver(struct isa_driver *d)
}
#endif
+#define module_isa_driver_init(__isa_driver, __num_isa_dev) \
+static int __init __isa_driver##_init(void) \
+{ \
+ return isa_register_driver(&(__isa_driver), __num_isa_dev); \
+} \
+module_init(__isa_driver##_init)
+
+#define module_isa_driver_with_irq_init(__isa_driver, __num_isa_dev, __num_irq) \
+static int __init __isa_driver##_init(void) \
+{ \
+ if (__num_irq != __num_isa_dev) { \
+ pr_err("%s: Number of irq (%u) does not match number of base (%u)\n", \
+ __isa_driver.driver.name, __num_irq, __num_isa_dev); \
+ return -EINVAL; \
+ } \
+ return isa_register_driver(&(__isa_driver), __num_isa_dev); \
+} \
+module_init(__isa_driver##_init)
+
+#define module_isa_driver_exit(__isa_driver) \
+static void __exit __isa_driver##_exit(void) \
+{ \
+ isa_unregister_driver(&(__isa_driver)); \
+} \
+module_exit(__isa_driver##_exit)
+
/**
* module_isa_driver() - Helper macro for registering a ISA driver
* @__isa_driver: isa_driver struct
@@ -47,16 +74,22 @@ static inline void isa_unregister_driver(struct isa_driver *d)
* use this macro once, and calling it replaces module_init and module_exit.
*/
#define module_isa_driver(__isa_driver, __num_isa_dev) \
-static int __init __isa_driver##_init(void) \
-{ \
- return isa_register_driver(&(__isa_driver), __num_isa_dev); \
-} \
-module_init(__isa_driver##_init); \
-static void __exit __isa_driver##_exit(void) \
-{ \
- isa_unregister_driver(&(__isa_driver)); \
-} \
-module_exit(__isa_driver##_exit);
+module_isa_driver_init(__isa_driver, __num_isa_dev); \
+module_isa_driver_exit(__isa_driver)
+
+/**
+ * module_isa_driver_with_irq() - Helper macro for registering an ISA driver with irq
+ * @__isa_driver: isa_driver struct
+ * @__num_isa_dev: number of devices to register
+ * @__num_irq: number of IRQ to register
+ *
+ * Helper macro for ISA drivers with irq that do not do anything special in
+ * module init/exit. Each module may only use this macro once, and calling it
+ * replaces module_init and module_exit.
+ */
+#define module_isa_driver_with_irq(__isa_driver, __num_isa_dev, __num_irq) \
+module_isa_driver_with_irq_init(__isa_driver, __num_isa_dev, __num_irq); \
+module_isa_driver_exit(__isa_driver)
/**
* max_num_isa_dev() - Maximum possible number registered of an ISA device
diff --git a/include/linux/isapnp.h b/include/linux/isapnp.h
index 3c77bf9b1efd..dba18c95844b 100644
--- a/include/linux/isapnp.h
+++ b/include/linux/isapnp.h
@@ -1,22 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* ISA Plug & Play support
* Copyright (c) by Jaroslav Kysela <perex@suse.cz>
- *
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
*/
#ifndef LINUX_ISAPNP_H
@@ -90,9 +75,6 @@ static inline int isapnp_proc_done(void) { return 0; }
#endif
/* compat */
-struct pnp_card *pnp_find_card(unsigned short vendor,
- unsigned short device,
- struct pnp_card *from);
struct pnp_dev *pnp_find_dev(struct pnp_card *card,
unsigned short vendor,
unsigned short function,
@@ -107,9 +89,6 @@ static inline int isapnp_cfg_end(void) { return -ENODEV; }
static inline unsigned char isapnp_read_byte(unsigned char idx) { return 0xff; }
static inline void isapnp_write_byte(unsigned char idx, unsigned char val) { ; }
-static inline struct pnp_card *pnp_find_card(unsigned short vendor,
- unsigned short device,
- struct pnp_card *from) { return NULL; }
static inline struct pnp_dev *pnp_find_dev(struct pnp_card *card,
unsigned short vendor,
unsigned short function,
diff --git a/include/linux/iscsi_boot_sysfs.h b/include/linux/iscsi_boot_sysfs.h
index 10923d730486..5f244d3f1472 100644
--- a/include/linux/iscsi_boot_sysfs.h
+++ b/include/linux/iscsi_boot_sysfs.h
@@ -1,17 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Export the iSCSI boot info to userland via sysfs.
*
* Copyright (C) 2010 Red Hat, Inc. All rights reserved.
* Copyright (C) 2010 Mike Christie
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License v2.0 as published by
- * the Free Software Foundation
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#ifndef _ISCSI_BOOT_SYSFS_
#define _ISCSI_BOOT_SYSFS_
diff --git a/include/linux/iscsi_ibft.h b/include/linux/iscsi_ibft.h
index 605cc5c333d9..e2742748104d 100644
--- a/include/linux/iscsi_ibft.h
+++ b/include/linux/iscsi_ibft.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright 2007 Red Hat, Inc.
* by Peter Jones <pjones@redhat.com>
@@ -7,40 +8,35 @@
* by Konrad Rzeszutek <ketuzsezr@darnok.org>
*
* This code exposes the iSCSI Boot Format Table to userland via sysfs.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License v2.0 as published by
- * the Free Software Foundation
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#ifndef ISCSI_IBFT_H
#define ISCSI_IBFT_H
-#include <linux/acpi.h>
+#include <linux/types.h>
/*
- * Logical location of iSCSI Boot Format Table.
- * If the value is NULL there is no iBFT on the machine.
+ * Physical location of iSCSI Boot Format Table.
+ * If the value is 0 there is no iBFT on the machine.
*/
-extern struct acpi_table_ibft *ibft_addr;
+extern phys_addr_t ibft_phys_addr;
+
+#ifdef CONFIG_ISCSI_IBFT_FIND
/*
* Routine used to find and reserve the iSCSI Boot Format Table. The
- * mapped address is set in the ibft_addr variable.
+ * physical address is set in the ibft_phys_addr variable.
*/
-#ifdef CONFIG_ISCSI_IBFT_FIND
-unsigned long find_ibft_region(unsigned long *sizep);
+void reserve_ibft_region(void);
+
+/*
+ * Physical bounds to search for the iSCSI Boot Format Table.
+ */
+#define IBFT_START 0x80000 /* 512kB */
+#define IBFT_END 0x100000 /* 1MB */
+
#else
-static inline unsigned long find_ibft_region(unsigned long *sizep)
-{
- *sizep = 0;
- return 0;
-}
+static inline void reserve_ibft_region(void) {}
#endif
#endif /* ISCSI_IBFT_H */
diff --git a/include/linux/isdn.h b/include/linux/isdn.h
deleted file mode 100644
index df97c8444f5d..000000000000
--- a/include/linux/isdn.h
+++ /dev/null
@@ -1,473 +0,0 @@
-/* $Id: isdn.h,v 1.125.2.3 2004/02/10 01:07:14 keil Exp $
- *
- * Main header for the Linux ISDN subsystem (linklevel).
- *
- * Copyright 1994,95,96 by Fritz Elfert (fritz@isdn4linux.de)
- * Copyright 1995,96 by Thinking Objects Software GmbH Wuerzburg
- * Copyright 1995,96 by Michael Hipp (Michael.Hipp@student.uni-tuebingen.de)
- *
- * This software may be used and distributed according to the terms
- * of the GNU General Public License, incorporated herein by reference.
- *
- */
-#ifndef __ISDN_H__
-#define __ISDN_H__
-
-
-#include <linux/errno.h>
-#include <linux/fs.h>
-#include <linux/major.h>
-#include <asm/io.h>
-#include <linux/kernel.h>
-#include <linux/signal.h>
-#include <linux/slab.h>
-#include <linux/timer.h>
-#include <linux/wait.h>
-#include <linux/tty.h>
-#include <linux/tty_flip.h>
-#include <linux/serial_reg.h>
-#include <linux/fcntl.h>
-#include <linux/types.h>
-#include <linux/interrupt.h>
-#include <linux/ip.h>
-#include <linux/in.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/skbuff.h>
-#include <linux/tcp.h>
-#include <linux/mutex.h>
-#include <uapi/linux/isdn.h>
-
-#define ISDN_TTY_MAJOR 43
-#define ISDN_TTYAUX_MAJOR 44
-#define ISDN_MAJOR 45
-
-/* The minor-devicenumbers for Channel 0 and 1 are used as arguments for
- * physical Channel-Mapping, so they MUST NOT be changed without changing
- * the correspondent code in isdn.c
- */
-
-#define ISDN_MINOR_B 0
-#define ISDN_MINOR_BMAX (ISDN_MAX_CHANNELS-1)
-#define ISDN_MINOR_CTRL 64
-#define ISDN_MINOR_CTRLMAX (64 + (ISDN_MAX_CHANNELS-1))
-#define ISDN_MINOR_PPP 128
-#define ISDN_MINOR_PPPMAX (128 + (ISDN_MAX_CHANNELS-1))
-#define ISDN_MINOR_STATUS 255
-
-#ifdef CONFIG_ISDN_PPP
-
-#ifdef CONFIG_ISDN_PPP_VJ
-# include <net/slhc_vj.h>
-#endif
-
-#include <linux/ppp_defs.h>
-#include <linux/ppp-ioctl.h>
-
-#include <linux/isdn_ppp.h>
-#endif
-
-#ifdef CONFIG_ISDN_X25
-# include <linux/concap.h>
-#endif
-
-#include <linux/isdnif.h>
-
-#define ISDN_DRVIOCTL_MASK 0x7f /* Mask for Device-ioctl */
-
-/* Until now unused */
-#define ISDN_SERVICE_VOICE 1
-#define ISDN_SERVICE_AB 1<<1
-#define ISDN_SERVICE_X21 1<<2
-#define ISDN_SERVICE_G4 1<<3
-#define ISDN_SERVICE_BTX 1<<4
-#define ISDN_SERVICE_DFUE 1<<5
-#define ISDN_SERVICE_X25 1<<6
-#define ISDN_SERVICE_TTX 1<<7
-#define ISDN_SERVICE_MIXED 1<<8
-#define ISDN_SERVICE_FW 1<<9
-#define ISDN_SERVICE_GTEL 1<<10
-#define ISDN_SERVICE_BTXN 1<<11
-#define ISDN_SERVICE_BTEL 1<<12
-
-/* Macros checking plain usage */
-#define USG_NONE(x) ((x & ISDN_USAGE_MASK)==ISDN_USAGE_NONE)
-#define USG_RAW(x) ((x & ISDN_USAGE_MASK)==ISDN_USAGE_RAW)
-#define USG_MODEM(x) ((x & ISDN_USAGE_MASK)==ISDN_USAGE_MODEM)
-#define USG_VOICE(x) ((x & ISDN_USAGE_MASK)==ISDN_USAGE_VOICE)
-#define USG_NET(x) ((x & ISDN_USAGE_MASK)==ISDN_USAGE_NET)
-#define USG_FAX(x) ((x & ISDN_USAGE_MASK)==ISDN_USAGE_FAX)
-#define USG_OUTGOING(x) ((x & ISDN_USAGE_OUTGOING)==ISDN_USAGE_OUTGOING)
-#define USG_MODEMORVOICE(x) (((x & ISDN_USAGE_MASK)==ISDN_USAGE_MODEM) || \
- ((x & ISDN_USAGE_MASK)==ISDN_USAGE_VOICE) )
-
-/* Timer-delays and scheduling-flags */
-#define ISDN_TIMER_RES 4 /* Main Timer-Resolution */
-#define ISDN_TIMER_02SEC (HZ/ISDN_TIMER_RES/5) /* Slow-Timer1 .2 sec */
-#define ISDN_TIMER_1SEC (HZ/ISDN_TIMER_RES) /* Slow-Timer2 1 sec */
-#define ISDN_TIMER_RINGING 5 /* tty RINGs = ISDN_TIMER_1SEC * this factor */
-#define ISDN_TIMER_KEEPINT 10 /* Cisco-Keepalive = ISDN_TIMER_1SEC * this factor */
-#define ISDN_TIMER_MODEMREAD 1
-#define ISDN_TIMER_MODEMPLUS 2
-#define ISDN_TIMER_MODEMRING 4
-#define ISDN_TIMER_MODEMXMIT 8
-#define ISDN_TIMER_NETDIAL 16
-#define ISDN_TIMER_NETHANGUP 32
-#define ISDN_TIMER_CARRIER 256 /* Wait for Carrier */
-#define ISDN_TIMER_FAST (ISDN_TIMER_MODEMREAD | ISDN_TIMER_MODEMPLUS | \
- ISDN_TIMER_MODEMXMIT)
-#define ISDN_TIMER_SLOW (ISDN_TIMER_MODEMRING | ISDN_TIMER_NETHANGUP | \
- ISDN_TIMER_NETDIAL | ISDN_TIMER_CARRIER)
-
-/* Timeout-Values for isdn_net_dial() */
-#define ISDN_TIMER_DTIMEOUT10 (10*HZ/(ISDN_TIMER_02SEC*(ISDN_TIMER_RES+1)))
-#define ISDN_TIMER_DTIMEOUT15 (15*HZ/(ISDN_TIMER_02SEC*(ISDN_TIMER_RES+1)))
-#define ISDN_TIMER_DTIMEOUT60 (60*HZ/(ISDN_TIMER_02SEC*(ISDN_TIMER_RES+1)))
-
-/* GLOBAL_FLAGS */
-#define ISDN_GLOBAL_STOPPED 1
-
-/*=================== Start of ip-over-ISDN stuff =========================*/
-
-/* Feature- and status-flags for a net-interface */
-#define ISDN_NET_CONNECTED 0x01 /* Bound to ISDN-Channel */
-#define ISDN_NET_SECURE 0x02 /* Accept calls from phonelist only */
-#define ISDN_NET_CALLBACK 0x04 /* activate callback */
-#define ISDN_NET_CBHUP 0x08 /* hangup before callback */
-#define ISDN_NET_CBOUT 0x10 /* remote machine does callback */
-
-#define ISDN_NET_MAGIC 0x49344C02 /* for paranoia-checking */
-
-/* Phone-list-element */
-typedef struct {
- void *next;
- char num[ISDN_MSNLEN];
-} isdn_net_phone;
-
-/*
- Principles when extending structures for generic encapsulation protocol
- ("concap") support:
- - Stuff which is hardware specific (here i4l-specific) goes in
- the netdev -> local structure (here: isdn_net_local)
- - Stuff which is encapsulation protocol specific goes in the structure
- which holds the linux device structure (here: isdn_net_device)
-*/
-
-/* Local interface-data */
-typedef struct isdn_net_local_s {
- ulong magic;
- struct net_device_stats stats; /* Ethernet Statistics */
- int isdn_device; /* Index to isdn-device */
- int isdn_channel; /* Index to isdn-channel */
- int ppp_slot; /* PPPD device slot number */
- int pre_device; /* Preselected isdn-device */
- int pre_channel; /* Preselected isdn-channel */
- int exclusive; /* If non-zero idx to reserved chan.*/
- int flags; /* Connection-flags */
- int dialretry; /* Counter for Dialout-retries */
- int dialmax; /* Max. Number of Dial-retries */
- int cbdelay; /* Delay before Callback starts */
- int dtimer; /* Timeout-counter for dialing */
- char msn[ISDN_MSNLEN]; /* MSNs/EAZs for this interface */
- u_char cbhup; /* Flag: Reject Call before Callback*/
- u_char dialstate; /* State for dialing */
- u_char p_encap; /* Packet encapsulation */
- /* 0 = Ethernet over ISDN */
- /* 1 = RAW-IP */
- /* 2 = IP with type field */
- u_char l2_proto; /* Layer-2-protocol */
- /* See ISDN_PROTO_L2..-constants in */
- /* isdnif.h */
- /* 0 = X75/LAPB with I-Frames */
- /* 1 = X75/LAPB with UI-Frames */
- /* 2 = X75/LAPB with BUI-Frames */
- /* 3 = HDLC */
- u_char l3_proto; /* Layer-3-protocol */
- /* See ISDN_PROTO_L3..-constants in */
- /* isdnif.h */
- /* 0 = Transparent */
- int huptimer; /* Timeout-counter for auto-hangup */
- int charge; /* Counter for charging units */
- ulong chargetime; /* Timer for Charging info */
- int hupflags; /* Flags for charge-unit-hangup: */
- /* bit0: chargeint is invalid */
- /* bit1: Getting charge-interval */
- /* bit2: Do charge-unit-hangup */
- /* bit3: Do hangup even on incoming */
- int outgoing; /* Flag: outgoing call */
- int onhtime; /* Time to keep link up */
- int chargeint; /* Interval between charge-infos */
- int onum; /* Flag: at least 1 outgoing number */
- int cps; /* current speed of this interface */
- int transcount; /* byte-counter for cps-calculation */
- int sqfull; /* Flag: netdev-queue overloaded */
- ulong sqfull_stamp; /* Start-Time of overload */
- ulong slavedelay; /* Dynamic bundling delaytime */
- int triggercps; /* BogoCPS needed for trigger slave */
- isdn_net_phone *phone[2]; /* List of remote-phonenumbers */
- /* phone[0] = Incoming Numbers */
- /* phone[1] = Outgoing Numbers */
- isdn_net_phone *dial; /* Pointer to dialed number */
- struct net_device *master; /* Ptr to Master device for slaves */
- struct net_device *slave; /* Ptr to Slave device for masters */
- struct isdn_net_local_s *next; /* Ptr to next link in bundle */
- struct isdn_net_local_s *last; /* Ptr to last link in bundle */
- struct isdn_net_dev_s *netdev; /* Ptr to netdev */
- struct sk_buff_head super_tx_queue; /* List of supervisory frames to */
- /* be transmitted asap */
- atomic_t frame_cnt; /* number of frames currently */
- /* queued in HL driver */
- /* Ptr to orig. hard_header_cache */
- spinlock_t xmit_lock; /* used to protect the xmit path of */
- /* a particular channel (including */
- /* the frame_cnt */
-
- int pppbind; /* ippp device for bindings */
- int dialtimeout; /* How long shall we try on dialing? (jiffies) */
- int dialwait; /* How long shall we wait after failed attempt? (jiffies) */
- ulong dialstarted; /* jiffies of first dialing-attempt */
- ulong dialwait_timer; /* jiffies of earliest next dialing-attempt */
- int huptimeout; /* How long will the connection be up? (seconds) */
-#ifdef CONFIG_ISDN_X25
- struct concap_device_ops *dops; /* callbacks used by encapsulator */
-#endif
- /* use an own struct for that in later versions */
- ulong cisco_myseq; /* Local keepalive seq. for Cisco */
- ulong cisco_mineseen; /* returned keepalive seq. from remote */
- ulong cisco_yourseq; /* Remote keepalive seq. for Cisco */
- int cisco_keepalive_period; /* keepalive period */
- ulong cisco_last_slarp_in; /* jiffie of last keepalive packet we received */
- char cisco_line_state; /* state of line according to keepalive packets */
- char cisco_debserint; /* debugging flag of cisco hdlc with slarp */
- struct timer_list cisco_timer;
- struct work_struct tqueue;
-} isdn_net_local;
-
-/* the interface itself */
-typedef struct isdn_net_dev_s {
- isdn_net_local *local;
- isdn_net_local *queue; /* circular list of all bundled
- channels, which are currently
- online */
- spinlock_t queue_lock; /* lock to protect queue */
- void *next; /* Pointer to next isdn-interface */
- struct net_device *dev; /* interface to upper levels */
-#ifdef CONFIG_ISDN_PPP
- ippp_bundle * pb; /* pointer to the common bundle structure
- * with the per-bundle data */
-#endif
-#ifdef CONFIG_ISDN_X25
- struct concap_proto *cprot; /* connection oriented encapsulation protocol */
-#endif
-
-} isdn_net_dev;
-
-/*===================== End of ip-over-ISDN stuff ===========================*/
-
-/*======================= Start of ISDN-tty stuff ===========================*/
-
-#define ISDN_ASYNC_MAGIC 0x49344C01 /* for paranoia-checking */
-#define ISDN_SERIAL_XMIT_SIZE 1024 /* Default bufsize for write */
-#define ISDN_SERIAL_XMIT_MAX 4000 /* Maximum bufsize for write */
-
-#ifdef CONFIG_ISDN_AUDIO
-/* For using sk_buffs with audio we need some private variables
- * within each sk_buff. For this purpose, we declare a struct here,
- * and put it always at the private skb->cb data array. A few macros help
- * accessing the variables.
- */
-typedef struct _isdn_audio_data {
- unsigned short dle_count;
- unsigned char lock;
-} isdn_audio_data_t;
-
-#define ISDN_AUDIO_SKB_DLECOUNT(skb) (((isdn_audio_data_t *)&skb->cb[0])->dle_count)
-#define ISDN_AUDIO_SKB_LOCK(skb) (((isdn_audio_data_t *)&skb->cb[0])->lock)
-#endif
-
-/* Private data of AT-command-interpreter */
-typedef struct atemu {
- u_char profile[ISDN_MODEM_NUMREG]; /* Modem-Regs. Profile 0 */
- u_char mdmreg[ISDN_MODEM_NUMREG]; /* Modem-Registers */
- char pmsn[ISDN_MSNLEN]; /* EAZ/MSNs Profile 0 */
- char msn[ISDN_MSNLEN]; /* EAZ/MSN */
- char plmsn[ISDN_LMSNLEN]; /* Listening MSNs Profile 0 */
- char lmsn[ISDN_LMSNLEN]; /* Listening MSNs */
- char cpn[ISDN_MSNLEN]; /* CalledPartyNumber on incoming call */
- char connmsg[ISDN_CMSGLEN]; /* CONNECT-Msg from HL-Driver */
-#ifdef CONFIG_ISDN_AUDIO
- u_char vpar[10]; /* Voice-parameters */
- int lastDLE; /* Flag for voice-coding: DLE seen */
-#endif
- int mdmcmdl; /* Length of Modem-Commandbuffer */
- int pluscount; /* Counter for +++ sequence */
- u_long lastplus; /* Timestamp of last + */
- int carrierwait; /* Seconds of carrier waiting */
- char mdmcmd[255]; /* Modem-Commandbuffer */
- unsigned int charge; /* Charge units of current connection */
-} atemu;
-
-/* Private data (similar to async_struct in <linux/serial.h>) */
-typedef struct modem_info {
- int magic;
- struct tty_port port;
- int x_char; /* xon/xoff character */
- int mcr; /* Modem control register */
- int msr; /* Modem status register */
- int lsr; /* Line status register */
- int line;
- int online; /* 1 = B-Channel is up, drop data */
- /* 2 = B-Channel is up, deliver d.*/
- int dialing; /* Dial in progress or ATA */
- int closing;
- int rcvsched; /* Receive needs schedule */
- int isdn_driver; /* Index to isdn-driver */
- int isdn_channel; /* Index to isdn-channel */
- int drv_index; /* Index to dev->usage */
- int ncarrier; /* Flag: schedule NO CARRIER */
- unsigned char last_cause[8]; /* Last cause message */
- unsigned char last_num[ISDN_MSNLEN];
- /* Last phone-number */
- unsigned char last_l2; /* Last layer-2 protocol */
- unsigned char last_si; /* Last service */
- unsigned char last_lhup; /* Last hangup local? */
- unsigned char last_dir; /* Last direction (in or out) */
- struct timer_list nc_timer; /* Timer for delayed NO CARRIER */
- int send_outstanding;/* # of outstanding send-requests */
- int xmit_size; /* max. # of chars in xmit_buf */
- int xmit_count; /* # of chars in xmit_buf */
- struct sk_buff_head xmit_queue; /* transmit queue */
- atomic_t xmit_lock; /* Semaphore for isdn_tty_write */
-#ifdef CONFIG_ISDN_AUDIO
- int vonline; /* Voice-channel status */
- /* Bit 0 = recording */
- /* Bit 1 = playback */
- /* Bit 2 = playback, DLE-ETX seen */
- struct sk_buff_head dtmf_queue; /* queue for dtmf results */
- void *adpcms; /* state for adpcm decompression */
- void *adpcmr; /* state for adpcm compression */
- void *dtmf_state; /* state for dtmf decoder */
- void *silence_state; /* state for silence detection */
-#endif
-#ifdef CONFIG_ISDN_TTY_FAX
- struct T30_s *fax; /* T30 Fax Group 3 data/interface */
- int faxonline; /* Fax-channel status */
-#endif
- atemu emu; /* AT-emulator data */
- spinlock_t readlock;
-} modem_info;
-
-#define ISDN_MODEM_WINSIZE 8
-
-/* Description of one ISDN-tty */
-typedef struct _isdn_modem {
- int refcount; /* Number of opens */
- struct tty_driver *tty_modem; /* tty-device */
- struct tty_struct *modem_table[ISDN_MAX_CHANNELS]; /* ?? copied from Orig */
- struct ktermios *modem_termios[ISDN_MAX_CHANNELS];
- struct ktermios *modem_termios_locked[ISDN_MAX_CHANNELS];
- modem_info info[ISDN_MAX_CHANNELS]; /* Private data */
-} isdn_modem_t;
-
-/*======================= End of ISDN-tty stuff ============================*/
-
-/*======================== Start of V.110 stuff ============================*/
-#define V110_BUFSIZE 1024
-
-typedef struct {
- int nbytes; /* 1 Matrixbyte -> nbytes in stream */
- int nbits; /* Number of used bits in streambyte */
- unsigned char key; /* Bitmask in stream eg. 11 (nbits=2) */
- int decodelen; /* Amount of data in decodebuf */
- int SyncInit; /* Number of sync frames to send */
- unsigned char *OnlineFrame; /* Precalculated V110 idle frame */
- unsigned char *OfflineFrame; /* Precalculated V110 sync Frame */
- int framelen; /* Length of frames */
- int skbuser; /* Number of unacked userdata skbs */
- int skbidle; /* Number of unacked idle/sync skbs */
- int introducer; /* Local vars for decoder */
- int dbit;
- unsigned char b;
- int skbres; /* space to reserve in outgoing skb */
- int maxsize; /* maxbufsize of lowlevel driver */
- unsigned char *encodebuf; /* temporary buffer for encoding */
- unsigned char decodebuf[V110_BUFSIZE]; /* incomplete V110 matrices */
-} isdn_v110_stream;
-
-/*========================= End of V.110 stuff =============================*/
-
-/*======================= Start of general stuff ===========================*/
-
-typedef struct {
- char *next;
- char *private;
-} infostruct;
-
-#define DRV_FLAG_RUNNING 1
-#define DRV_FLAG_REJBUS 2
-#define DRV_FLAG_LOADED 4
-
-/* Description of hardware-level-driver */
-typedef struct _isdn_driver {
- ulong online; /* Channel-Online flags */
- ulong flags; /* Misc driver Flags */
- int locks; /* Number of locks for this driver */
- int channels; /* Number of channels */
- wait_queue_head_t st_waitq; /* Wait-Queue for status-read's */
- int maxbufsize; /* Maximum Buffersize supported */
- unsigned long pktcount; /* Until now: unused */
- int stavail; /* Chars avail on Status-device */
- isdn_if *interface; /* Interface to driver */
- int *rcverr; /* Error-counters for B-Ch.-receive */
- int *rcvcount; /* Byte-counters for B-Ch.-receive */
-#ifdef CONFIG_ISDN_AUDIO
- unsigned long DLEflag; /* Flags: Insert DLE at next read */
-#endif
- struct sk_buff_head *rpqueue; /* Pointers to start of Rcv-Queue */
- wait_queue_head_t *rcv_waitq; /* Wait-Queues for B-Channel-Reads */
- wait_queue_head_t *snd_waitq; /* Wait-Queue for B-Channel-Send's */
- char msn2eaz[10][ISDN_MSNLEN]; /* Mapping-Table MSN->EAZ */
-} isdn_driver_t;
-
-/* Main driver-data */
-typedef struct isdn_devt {
- struct module *owner;
- spinlock_t lock;
- unsigned short flags; /* Bitmapped Flags: */
- int drivers; /* Current number of drivers */
- int channels; /* Current number of channels */
- int net_verbose; /* Verbose-Flag */
- int modempoll; /* Flag: tty-read active */
- spinlock_t timerlock;
- int tflags; /* Timer-Flags: */
- /* see ISDN_TIMER_..defines */
- int global_flags;
- infostruct *infochain; /* List of open info-devs. */
- wait_queue_head_t info_waitq; /* Wait-Queue for isdninfo */
- struct timer_list timer; /* Misc.-function Timer */
- int chanmap[ISDN_MAX_CHANNELS]; /* Map minor->device-channel */
- int drvmap[ISDN_MAX_CHANNELS]; /* Map minor->driver-index */
- int usage[ISDN_MAX_CHANNELS]; /* Used by tty/ip/voice */
- char num[ISDN_MAX_CHANNELS][ISDN_MSNLEN];
- /* Remote number of active ch.*/
- int m_idx[ISDN_MAX_CHANNELS]; /* Index for mdm.... */
- isdn_driver_t *drv[ISDN_MAX_DRIVERS]; /* Array of drivers */
- isdn_net_dev *netdev; /* Linked list of net-if's */
- char drvid[ISDN_MAX_DRIVERS][20];/* Driver-ID */
- struct task_struct *profd; /* For iprofd */
- isdn_modem_t mdm; /* tty-driver-data */
- isdn_net_dev *rx_netdev[ISDN_MAX_CHANNELS]; /* rx netdev-pointers */
- isdn_net_dev *st_netdev[ISDN_MAX_CHANNELS]; /* stat netdev-pointers */
- ulong ibytes[ISDN_MAX_CHANNELS]; /* Statistics incoming bytes */
- ulong obytes[ISDN_MAX_CHANNELS]; /* Statistics outgoing bytes */
- int v110emu[ISDN_MAX_CHANNELS]; /* V.110 emulator-mode 0=none */
- atomic_t v110use[ISDN_MAX_CHANNELS]; /* Usage-Semaphore for stream */
- isdn_v110_stream *v110[ISDN_MAX_CHANNELS]; /* V.110 private data */
- struct mutex mtx; /* serialize list access*/
- unsigned long global_features;
-} isdn_dev;
-
-extern isdn_dev *dev;
-
-
-#endif /* __ISDN_H__ */
diff --git a/include/linux/isdn/capilli.h b/include/linux/isdn/capilli.h
index 11b57c485854..12be09b6883b 100644
--- a/include/linux/isdn/capilli.h
+++ b/include/linux/isdn/capilli.h
@@ -50,7 +50,7 @@ struct capi_ctr {
u16 (*send_message)(struct capi_ctr *, struct sk_buff *skb);
char *(*procinfo)(struct capi_ctr *);
- const struct file_operations *proc_fops;
+ int (*proc_show)(struct seq_file *, void *);
/* filled in before calling ready callback */
u8 manu[CAPI_MANUFACTURER_LEN]; /* CAPI_GET_MANUFACTURER */
@@ -69,7 +69,6 @@ struct capi_ctr {
unsigned short state; /* controller state */
int blocked; /* output blocked */
int traceflag; /* capi trace */
- wait_queue_head_t state_wait_queue;
struct proc_dir_entry *procent;
char procfn[128];
@@ -80,8 +79,6 @@ int detach_capi_ctr(struct capi_ctr *);
void capi_ctr_ready(struct capi_ctr * card);
void capi_ctr_down(struct capi_ctr * card);
-void capi_ctr_suspend_output(struct capi_ctr * card);
-void capi_ctr_resume_output(struct capi_ctr * card);
void capi_ctr_handle_message(struct capi_ctr * card, u16 appl, struct sk_buff *skb);
// ---------------------------------------------------------------------------
@@ -91,23 +88,8 @@ struct capi_driver {
char name[32]; /* driver name */
char revision[32];
- int (*add_card)(struct capi_driver *driver, capicardparams *data);
-
/* management information for kcapi */
struct list_head list;
};
-void register_capi_driver(struct capi_driver *driver);
-void unregister_capi_driver(struct capi_driver *driver);
-
-// ---------------------------------------------------------------------------
-// library functions for use by hardware controller drivers
-
-void capilib_new_ncci(struct list_head *head, u16 applid, u32 ncci, u32 winsize);
-void capilib_free_ncci(struct list_head *head, u16 applid, u32 ncci);
-void capilib_release_appl(struct list_head *head, u16 applid);
-void capilib_release(struct list_head *head);
-void capilib_data_b3_conf(struct list_head *head, u16 applid, u32 ncci, u16 msgid);
-u16 capilib_data_b3_req(struct list_head *head, u16 applid, u32 ncci, u16 msgid);
-
#endif /* __CAPILLI_H__ */
diff --git a/include/linux/isdn/capiutil.h b/include/linux/isdn/capiutil.h
index 44bd6046e6e2..953fd500dff7 100644
--- a/include/linux/isdn/capiutil.h
+++ b/include/linux/isdn/capiutil.h
@@ -57,460 +57,4 @@ static inline void capimsg_setu32(void *m, int off, __u32 val)
#define CAPIMSG_SETCONTROL(m, contr) capimsg_setu32(m, 8, contr)
#define CAPIMSG_SETDATALEN(m, len) capimsg_setu16(m, 16, len)
-/*----- basic-type definitions -----*/
-
-typedef __u8 *_cstruct;
-
-typedef enum {
- CAPI_COMPOSE,
- CAPI_DEFAULT
-} _cmstruct;
-
-/*
- The _cmsg structure contains all possible CAPI 2.0 parameter.
- All parameters are stored here first. The function CAPI_CMSG_2_MESSAGE
- assembles the parameter and builds CAPI2.0 conform messages.
- CAPI_MESSAGE_2_CMSG disassembles CAPI 2.0 messages and stores the
- parameter in the _cmsg structure
- */
-
-typedef struct {
- /* Header */
- __u16 ApplId;
- __u8 Command;
- __u8 Subcommand;
- __u16 Messagenumber;
-
- /* Parameter */
- union {
- __u32 adrController;
- __u32 adrPLCI;
- __u32 adrNCCI;
- } adr;
-
- _cmstruct AdditionalInfo;
- _cstruct B1configuration;
- __u16 B1protocol;
- _cstruct B2configuration;
- __u16 B2protocol;
- _cstruct B3configuration;
- __u16 B3protocol;
- _cstruct BC;
- _cstruct BChannelinformation;
- _cmstruct BProtocol;
- _cstruct CalledPartyNumber;
- _cstruct CalledPartySubaddress;
- _cstruct CallingPartyNumber;
- _cstruct CallingPartySubaddress;
- __u32 CIPmask;
- __u32 CIPmask2;
- __u16 CIPValue;
- __u32 Class;
- _cstruct ConnectedNumber;
- _cstruct ConnectedSubaddress;
- __u32 Data;
- __u16 DataHandle;
- __u16 DataLength;
- _cstruct FacilityConfirmationParameter;
- _cstruct Facilitydataarray;
- _cstruct FacilityIndicationParameter;
- _cstruct FacilityRequestParameter;
- __u16 FacilitySelector;
- __u16 Flags;
- __u32 Function;
- _cstruct HLC;
- __u16 Info;
- _cstruct InfoElement;
- __u32 InfoMask;
- __u16 InfoNumber;
- _cstruct Keypadfacility;
- _cstruct LLC;
- _cstruct ManuData;
- __u32 ManuID;
- _cstruct NCPI;
- __u16 Reason;
- __u16 Reason_B3;
- __u16 Reject;
- _cstruct Useruserdata;
-
- /* intern */
- unsigned l, p;
- unsigned char *par;
- __u8 *m;
-
- /* buffer to construct message */
- __u8 buf[180];
-
-} _cmsg;
-
-/*
- * capi_cmsg2message() assembles the parameter from _cmsg to a CAPI 2.0
- * conform message
- */
-unsigned capi_cmsg2message(_cmsg * cmsg, __u8 * msg);
-
-/*
- * capi_message2cmsg disassembles a CAPI message an writes the parameter
- * into _cmsg for easy access
- */
-unsigned capi_message2cmsg(_cmsg * cmsg, __u8 * msg);
-
-/*
- * capi_cmsg_header() fills the _cmsg structure with default values, so only
- * parameter with non default values must be changed before sending the
- * message.
- */
-unsigned capi_cmsg_header(_cmsg * cmsg, __u16 _ApplId,
- __u8 _Command, __u8 _Subcommand,
- __u16 _Messagenumber, __u32 _Controller);
-
-/*-----------------------------------------------------------------------*/
-
-/*
- * Debugging / Tracing functions
- */
-
-char *capi_cmd2str(__u8 cmd, __u8 subcmd);
-
-typedef struct {
- u_char *buf;
- u_char *p;
- size_t size;
- size_t pos;
-} _cdebbuf;
-
-#define CDEBUG_SIZE 1024
-#define CDEBUG_GSIZE 4096
-
-void cdebbuf_free(_cdebbuf *cdb);
-int cdebug_init(void);
-void cdebug_exit(void);
-
-_cdebbuf *capi_cmsg2str(_cmsg *cmsg);
-_cdebbuf *capi_message2str(__u8 *msg);
-
-/*-----------------------------------------------------------------------*/
-
-static inline void capi_cmsg_answer(_cmsg * cmsg)
-{
- cmsg->Subcommand |= 0x01;
-}
-
-/*-----------------------------------------------------------------------*/
-
-static inline void capi_fill_CONNECT_B3_REQ(_cmsg * cmsg, __u16 ApplId, __u16 Messagenumber,
- __u32 adr,
- _cstruct NCPI)
-{
- capi_cmsg_header(cmsg, ApplId, 0x82, 0x80, Messagenumber, adr);
- cmsg->NCPI = NCPI;
-}
-
-static inline void capi_fill_FACILITY_REQ(_cmsg * cmsg, __u16 ApplId, __u16 Messagenumber,
- __u32 adr,
- __u16 FacilitySelector,
- _cstruct FacilityRequestParameter)
-{
- capi_cmsg_header(cmsg, ApplId, 0x80, 0x80, Messagenumber, adr);
- cmsg->FacilitySelector = FacilitySelector;
- cmsg->FacilityRequestParameter = FacilityRequestParameter;
-}
-
-static inline void capi_fill_INFO_REQ(_cmsg * cmsg, __u16 ApplId, __u16 Messagenumber,
- __u32 adr,
- _cstruct CalledPartyNumber,
- _cstruct BChannelinformation,
- _cstruct Keypadfacility,
- _cstruct Useruserdata,
- _cstruct Facilitydataarray)
-{
- capi_cmsg_header(cmsg, ApplId, 0x08, 0x80, Messagenumber, adr);
- cmsg->CalledPartyNumber = CalledPartyNumber;
- cmsg->BChannelinformation = BChannelinformation;
- cmsg->Keypadfacility = Keypadfacility;
- cmsg->Useruserdata = Useruserdata;
- cmsg->Facilitydataarray = Facilitydataarray;
-}
-
-static inline void capi_fill_LISTEN_REQ(_cmsg * cmsg, __u16 ApplId, __u16 Messagenumber,
- __u32 adr,
- __u32 InfoMask,
- __u32 CIPmask,
- __u32 CIPmask2,
- _cstruct CallingPartyNumber,
- _cstruct CallingPartySubaddress)
-{
- capi_cmsg_header(cmsg, ApplId, 0x05, 0x80, Messagenumber, adr);
- cmsg->InfoMask = InfoMask;
- cmsg->CIPmask = CIPmask;
- cmsg->CIPmask2 = CIPmask2;
- cmsg->CallingPartyNumber = CallingPartyNumber;
- cmsg->CallingPartySubaddress = CallingPartySubaddress;
-}
-
-static inline void capi_fill_ALERT_REQ(_cmsg * cmsg, __u16 ApplId, __u16 Messagenumber,
- __u32 adr,
- _cstruct BChannelinformation,
- _cstruct Keypadfacility,
- _cstruct Useruserdata,
- _cstruct Facilitydataarray)
-{
- capi_cmsg_header(cmsg, ApplId, 0x01, 0x80, Messagenumber, adr);
- cmsg->BChannelinformation = BChannelinformation;
- cmsg->Keypadfacility = Keypadfacility;
- cmsg->Useruserdata = Useruserdata;
- cmsg->Facilitydataarray = Facilitydataarray;
-}
-
-static inline void capi_fill_CONNECT_REQ(_cmsg * cmsg, __u16 ApplId, __u16 Messagenumber,
- __u32 adr,
- __u16 CIPValue,
- _cstruct CalledPartyNumber,
- _cstruct CallingPartyNumber,
- _cstruct CalledPartySubaddress,
- _cstruct CallingPartySubaddress,
- __u16 B1protocol,
- __u16 B2protocol,
- __u16 B3protocol,
- _cstruct B1configuration,
- _cstruct B2configuration,
- _cstruct B3configuration,
- _cstruct BC,
- _cstruct LLC,
- _cstruct HLC,
- _cstruct BChannelinformation,
- _cstruct Keypadfacility,
- _cstruct Useruserdata,
- _cstruct Facilitydataarray)
-{
-
- capi_cmsg_header(cmsg, ApplId, 0x02, 0x80, Messagenumber, adr);
- cmsg->CIPValue = CIPValue;
- cmsg->CalledPartyNumber = CalledPartyNumber;
- cmsg->CallingPartyNumber = CallingPartyNumber;
- cmsg->CalledPartySubaddress = CalledPartySubaddress;
- cmsg->CallingPartySubaddress = CallingPartySubaddress;
- cmsg->B1protocol = B1protocol;
- cmsg->B2protocol = B2protocol;
- cmsg->B3protocol = B3protocol;
- cmsg->B1configuration = B1configuration;
- cmsg->B2configuration = B2configuration;
- cmsg->B3configuration = B3configuration;
- cmsg->BC = BC;
- cmsg->LLC = LLC;
- cmsg->HLC = HLC;
- cmsg->BChannelinformation = BChannelinformation;
- cmsg->Keypadfacility = Keypadfacility;
- cmsg->Useruserdata = Useruserdata;
- cmsg->Facilitydataarray = Facilitydataarray;
-}
-
-static inline void capi_fill_DATA_B3_REQ(_cmsg * cmsg, __u16 ApplId, __u16 Messagenumber,
- __u32 adr,
- __u32 Data,
- __u16 DataLength,
- __u16 DataHandle,
- __u16 Flags)
-{
-
- capi_cmsg_header(cmsg, ApplId, 0x86, 0x80, Messagenumber, adr);
- cmsg->Data = Data;
- cmsg->DataLength = DataLength;
- cmsg->DataHandle = DataHandle;
- cmsg->Flags = Flags;
-}
-
-static inline void capi_fill_DISCONNECT_REQ(_cmsg * cmsg, __u16 ApplId, __u16 Messagenumber,
- __u32 adr,
- _cstruct BChannelinformation,
- _cstruct Keypadfacility,
- _cstruct Useruserdata,
- _cstruct Facilitydataarray)
-{
-
- capi_cmsg_header(cmsg, ApplId, 0x04, 0x80, Messagenumber, adr);
- cmsg->BChannelinformation = BChannelinformation;
- cmsg->Keypadfacility = Keypadfacility;
- cmsg->Useruserdata = Useruserdata;
- cmsg->Facilitydataarray = Facilitydataarray;
-}
-
-static inline void capi_fill_DISCONNECT_B3_REQ(_cmsg * cmsg, __u16 ApplId, __u16 Messagenumber,
- __u32 adr,
- _cstruct NCPI)
-{
-
- capi_cmsg_header(cmsg, ApplId, 0x84, 0x80, Messagenumber, adr);
- cmsg->NCPI = NCPI;
-}
-
-static inline void capi_fill_MANUFACTURER_REQ(_cmsg * cmsg, __u16 ApplId, __u16 Messagenumber,
- __u32 adr,
- __u32 ManuID,
- __u32 Class,
- __u32 Function,
- _cstruct ManuData)
-{
-
- capi_cmsg_header(cmsg, ApplId, 0xff, 0x80, Messagenumber, adr);
- cmsg->ManuID = ManuID;
- cmsg->Class = Class;
- cmsg->Function = Function;
- cmsg->ManuData = ManuData;
-}
-
-static inline void capi_fill_RESET_B3_REQ(_cmsg * cmsg, __u16 ApplId, __u16 Messagenumber,
- __u32 adr,
- _cstruct NCPI)
-{
-
- capi_cmsg_header(cmsg, ApplId, 0x87, 0x80, Messagenumber, adr);
- cmsg->NCPI = NCPI;
-}
-
-static inline void capi_fill_SELECT_B_PROTOCOL_REQ(_cmsg * cmsg, __u16 ApplId, __u16 Messagenumber,
- __u32 adr,
- __u16 B1protocol,
- __u16 B2protocol,
- __u16 B3protocol,
- _cstruct B1configuration,
- _cstruct B2configuration,
- _cstruct B3configuration)
-{
-
- capi_cmsg_header(cmsg, ApplId, 0x41, 0x80, Messagenumber, adr);
- cmsg->B1protocol = B1protocol;
- cmsg->B2protocol = B2protocol;
- cmsg->B3protocol = B3protocol;
- cmsg->B1configuration = B1configuration;
- cmsg->B2configuration = B2configuration;
- cmsg->B3configuration = B3configuration;
-}
-
-static inline void capi_fill_CONNECT_RESP(_cmsg * cmsg, __u16 ApplId, __u16 Messagenumber,
- __u32 adr,
- __u16 Reject,
- __u16 B1protocol,
- __u16 B2protocol,
- __u16 B3protocol,
- _cstruct B1configuration,
- _cstruct B2configuration,
- _cstruct B3configuration,
- _cstruct ConnectedNumber,
- _cstruct ConnectedSubaddress,
- _cstruct LLC,
- _cstruct BChannelinformation,
- _cstruct Keypadfacility,
- _cstruct Useruserdata,
- _cstruct Facilitydataarray)
-{
- capi_cmsg_header(cmsg, ApplId, 0x02, 0x83, Messagenumber, adr);
- cmsg->Reject = Reject;
- cmsg->B1protocol = B1protocol;
- cmsg->B2protocol = B2protocol;
- cmsg->B3protocol = B3protocol;
- cmsg->B1configuration = B1configuration;
- cmsg->B2configuration = B2configuration;
- cmsg->B3configuration = B3configuration;
- cmsg->ConnectedNumber = ConnectedNumber;
- cmsg->ConnectedSubaddress = ConnectedSubaddress;
- cmsg->LLC = LLC;
- cmsg->BChannelinformation = BChannelinformation;
- cmsg->Keypadfacility = Keypadfacility;
- cmsg->Useruserdata = Useruserdata;
- cmsg->Facilitydataarray = Facilitydataarray;
-}
-
-static inline void capi_fill_CONNECT_ACTIVE_RESP(_cmsg * cmsg, __u16 ApplId, __u16 Messagenumber,
- __u32 adr)
-{
-
- capi_cmsg_header(cmsg, ApplId, 0x03, 0x83, Messagenumber, adr);
-}
-
-static inline void capi_fill_CONNECT_B3_ACTIVE_RESP(_cmsg * cmsg, __u16 ApplId, __u16 Messagenumber,
- __u32 adr)
-{
-
- capi_cmsg_header(cmsg, ApplId, 0x83, 0x83, Messagenumber, adr);
-}
-
-static inline void capi_fill_CONNECT_B3_RESP(_cmsg * cmsg, __u16 ApplId, __u16 Messagenumber,
- __u32 adr,
- __u16 Reject,
- _cstruct NCPI)
-{
- capi_cmsg_header(cmsg, ApplId, 0x82, 0x83, Messagenumber, adr);
- cmsg->Reject = Reject;
- cmsg->NCPI = NCPI;
-}
-
-static inline void capi_fill_CONNECT_B3_T90_ACTIVE_RESP(_cmsg * cmsg, __u16 ApplId, __u16 Messagenumber,
- __u32 adr)
-{
-
- capi_cmsg_header(cmsg, ApplId, 0x88, 0x83, Messagenumber, adr);
-}
-
-static inline void capi_fill_DATA_B3_RESP(_cmsg * cmsg, __u16 ApplId, __u16 Messagenumber,
- __u32 adr,
- __u16 DataHandle)
-{
-
- capi_cmsg_header(cmsg, ApplId, 0x86, 0x83, Messagenumber, adr);
- cmsg->DataHandle = DataHandle;
-}
-
-static inline void capi_fill_DISCONNECT_B3_RESP(_cmsg * cmsg, __u16 ApplId, __u16 Messagenumber,
- __u32 adr)
-{
-
- capi_cmsg_header(cmsg, ApplId, 0x84, 0x83, Messagenumber, adr);
-}
-
-static inline void capi_fill_DISCONNECT_RESP(_cmsg * cmsg, __u16 ApplId, __u16 Messagenumber,
- __u32 adr)
-{
-
- capi_cmsg_header(cmsg, ApplId, 0x04, 0x83, Messagenumber, adr);
-}
-
-static inline void capi_fill_FACILITY_RESP(_cmsg * cmsg, __u16 ApplId, __u16 Messagenumber,
- __u32 adr,
- __u16 FacilitySelector)
-{
-
- capi_cmsg_header(cmsg, ApplId, 0x80, 0x83, Messagenumber, adr);
- cmsg->FacilitySelector = FacilitySelector;
-}
-
-static inline void capi_fill_INFO_RESP(_cmsg * cmsg, __u16 ApplId, __u16 Messagenumber,
- __u32 adr)
-{
-
- capi_cmsg_header(cmsg, ApplId, 0x08, 0x83, Messagenumber, adr);
-}
-
-static inline void capi_fill_MANUFACTURER_RESP(_cmsg * cmsg, __u16 ApplId, __u16 Messagenumber,
- __u32 adr,
- __u32 ManuID,
- __u32 Class,
- __u32 Function,
- _cstruct ManuData)
-{
-
- capi_cmsg_header(cmsg, ApplId, 0xff, 0x83, Messagenumber, adr);
- cmsg->ManuID = ManuID;
- cmsg->Class = Class;
- cmsg->Function = Function;
- cmsg->ManuData = ManuData;
-}
-
-static inline void capi_fill_RESET_B3_RESP(_cmsg * cmsg, __u16 ApplId, __u16 Messagenumber,
- __u32 adr)
-{
-
- capi_cmsg_header(cmsg, ApplId, 0x87, 0x83, Messagenumber, adr);
-}
-
#endif /* __CAPIUTIL_H__ */
diff --git a/include/linux/isdn/hdlc.h b/include/linux/isdn/hdlc.h
deleted file mode 100644
index 96521370c782..000000000000
--- a/include/linux/isdn/hdlc.h
+++ /dev/null
@@ -1,82 +0,0 @@
-/*
- * hdlc.h -- General purpose ISDN HDLC decoder.
- *
- * Implementation of a HDLC decoder/encoder in software.
- * Necessary because some ISDN devices don't have HDLC
- * controllers.
- *
- * Copyright (C)
- * 2009 Karsten Keil <keil@b1-systems.de>
- * 2002 Wolfgang Mües <wolfgang@iksw-muees.de>
- * 2001 Frode Isaksen <fisaksen@bewan.com>
- * 2001 Kai Germaschewski <kai.germaschewski@gmx.de>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- */
-
-#ifndef __ISDNHDLC_H__
-#define __ISDNHDLC_H__
-
-struct isdnhdlc_vars {
- int bit_shift;
- int hdlc_bits1;
- int data_bits;
- int ffbit_shift; /* encoding only */
- int state;
- int dstpos;
-
- u16 crc;
-
- u8 cbin;
- u8 shift_reg;
- u8 ffvalue;
-
- /* set if transferring data */
- u32 data_received:1;
- /* set if D channel (send idle instead of flags) */
- u32 dchannel:1;
- /* set if 56K adaptation */
- u32 do_adapt56:1;
- /* set if in closing phase (need to send CRC + flag) */
- u32 do_closing:1;
- /* set if data is bitreverse */
- u32 do_bitreverse:1;
-};
-
-/* Feature Flags */
-#define HDLC_56KBIT 0x01
-#define HDLC_DCHANNEL 0x02
-#define HDLC_BITREVERSE 0x04
-
-/*
- The return value from isdnhdlc_decode is
- the frame length, 0 if no complete frame was decoded,
- or a negative error number
-*/
-#define HDLC_FRAMING_ERROR 1
-#define HDLC_CRC_ERROR 2
-#define HDLC_LENGTH_ERROR 3
-
-extern void isdnhdlc_rcv_init(struct isdnhdlc_vars *hdlc, u32 features);
-
-extern int isdnhdlc_decode(struct isdnhdlc_vars *hdlc, const u8 *src,
- int slen, int *count, u8 *dst, int dsize);
-
-extern void isdnhdlc_out_init(struct isdnhdlc_vars *hdlc, u32 features);
-
-extern int isdnhdlc_encode(struct isdnhdlc_vars *hdlc, const u8 *src,
- u16 slen, int *count, u8 *dst, int dsize);
-
-#endif /* __ISDNHDLC_H__ */
diff --git a/include/linux/isdn_divertif.h b/include/linux/isdn_divertif.h
deleted file mode 100644
index 19ab361f9f07..000000000000
--- a/include/linux/isdn_divertif.h
+++ /dev/null
@@ -1,35 +0,0 @@
-/* $Id: isdn_divertif.h,v 1.4.6.1 2001/09/23 22:25:05 kai Exp $
- *
- * Header for the diversion supplementary interface for i4l.
- *
- * Author Werner Cornelius (werner@titro.de)
- * Copyright by Werner Cornelius (werner@titro.de)
- *
- * This software may be used and distributed according to the terms
- * of the GNU General Public License, incorporated herein by reference.
- *
- */
-#ifndef _LINUX_ISDN_DIVERTIF_H
-#define _LINUX_ISDN_DIVERTIF_H
-
-#include <linux/isdnif.h>
-#include <linux/types.h>
-#include <uapi/linux/isdn_divertif.h>
-
-/***************************************************************/
-/* structure exchanging data between isdn hl and divert module */
-/***************************************************************/
-typedef struct
- { ulong if_magic; /* magic info and version */
- int cmd; /* command */
- int (*stat_callback)(isdn_ctrl *); /* supplied by divert module when calling */
- int (*ll_cmd)(isdn_ctrl *); /* supplied by hl on return */
- char * (*drv_to_name)(int); /* map a driver id to name, supplied by hl */
- int (*name_to_drv)(char *); /* map a driver id to name, supplied by hl */
- } isdn_divert_if;
-
-/*********************/
-/* function register */
-/*********************/
-extern int DIVERT_REG_NAME(isdn_divert_if *);
-#endif /* _LINUX_ISDN_DIVERTIF_H */
diff --git a/include/linux/isdn_ppp.h b/include/linux/isdn_ppp.h
deleted file mode 100644
index a0070c6dfaf8..000000000000
--- a/include/linux/isdn_ppp.h
+++ /dev/null
@@ -1,194 +0,0 @@
-/* Linux ISDN subsystem, sync PPP, interface to ipppd
- *
- * Copyright 1994-1999 by Fritz Elfert (fritz@isdn4linux.de)
- * Copyright 1995,96 Thinking Objects Software GmbH Wuerzburg
- * Copyright 1995,96 by Michael Hipp (Michael.Hipp@student.uni-tuebingen.de)
- * Copyright 2000-2002 by Kai Germaschewski (kai@germaschewski.name)
- *
- * This software may be used and distributed according to the terms
- * of the GNU General Public License, incorporated herein by reference.
- *
- */
-#ifndef _LINUX_ISDN_PPP_H
-#define _LINUX_ISDN_PPP_H
-
-
-
-
-#ifdef CONFIG_IPPP_FILTER
-#include <linux/filter.h>
-#endif
-#include <uapi/linux/isdn_ppp.h>
-
-#define DECOMP_ERR_NOMEM (-10)
-
-#define MP_END_FRAG 0x40
-#define MP_BEGIN_FRAG 0x80
-
-#define MP_MAX_QUEUE_LEN 16
-
-/*
- * We need a way for the decompressor to influence the generation of CCP
- * Reset-Requests in a variety of ways. The decompressor is already returning
- * a lot of information (generated skb length, error conditions) so we use
- * another parameter. This parameter is a pointer to a structure which is
- * to be marked valid by the decompressor and only in this case is ever used.
- * Furthermore, the only case where this data is used is when the decom-
- * pressor returns DECOMP_ERROR.
- *
- * We use this same struct for the reset entry of the compressor to commu-
- * nicate to its caller how to deal with sending of a Reset Ack. In this
- * case, expra is not used, but other options still apply (suppressing
- * sending with rsend, appending arbitrary data, etc).
- */
-
-#define IPPP_RESET_MAXDATABYTES 32
-
-struct isdn_ppp_resetparams {
- unsigned char valid:1; /* rw Is this structure filled at all ? */
- unsigned char rsend:1; /* rw Should we send one at all ? */
- unsigned char idval:1; /* rw Is the id field valid ? */
- unsigned char dtval:1; /* rw Is the data field valid ? */
- unsigned char expra:1; /* rw Is an Ack expected for this Req ? */
- unsigned char id; /* wo Send CCP ResetReq with this id */
- unsigned short maxdlen; /* ro Max bytes to be stored in data field */
- unsigned short dlen; /* rw Bytes stored in data field */
- unsigned char *data; /* wo Data for ResetReq info field */
-};
-
-/*
- * this is an 'old friend' from ppp-comp.h under a new name
- * check the original include for more information
- */
-struct isdn_ppp_compressor {
- struct isdn_ppp_compressor *next, *prev;
- struct module *owner;
- int num; /* CCP compression protocol number */
-
- void *(*alloc) (struct isdn_ppp_comp_data *);
- void (*free) (void *state);
- int (*init) (void *state, struct isdn_ppp_comp_data *,
- int unit,int debug);
-
- /* The reset entry needs to get more exact information about the
- ResetReq or ResetAck it was called with. The parameters are
- obvious. If reset is called without a Req or Ack frame which
- could be handed into it, code MUST be set to 0. Using rsparm,
- the reset entry can control if and how a ResetAck is returned. */
-
- void (*reset) (void *state, unsigned char code, unsigned char id,
- unsigned char *data, unsigned len,
- struct isdn_ppp_resetparams *rsparm);
-
- int (*compress) (void *state, struct sk_buff *in,
- struct sk_buff *skb_out, int proto);
-
- int (*decompress) (void *state,struct sk_buff *in,
- struct sk_buff *skb_out,
- struct isdn_ppp_resetparams *rsparm);
-
- void (*incomp) (void *state, struct sk_buff *in,int proto);
- void (*stat) (void *state, struct compstat *stats);
-};
-
-extern int isdn_ppp_register_compressor(struct isdn_ppp_compressor *);
-extern int isdn_ppp_unregister_compressor(struct isdn_ppp_compressor *);
-extern int isdn_ppp_dial_slave(char *);
-extern int isdn_ppp_hangup_slave(char *);
-
-typedef struct {
- unsigned long seqerrs;
- unsigned long frame_drops;
- unsigned long overflows;
- unsigned long max_queue_len;
-} isdn_mppp_stats;
-
-typedef struct {
- int mp_mrru; /* unused */
- struct sk_buff * frags; /* fragments sl list -- use skb->next */
- long frames; /* number of frames in the frame list */
- unsigned int seq; /* last processed packet seq #: any packets
- * with smaller seq # will be dropped
- * unconditionally */
- spinlock_t lock;
- int ref_ct;
- /* statistics */
- isdn_mppp_stats stats;
-} ippp_bundle;
-
-#define NUM_RCV_BUFFS 64
-
-struct ippp_buf_queue {
- struct ippp_buf_queue *next;
- struct ippp_buf_queue *last;
- char *buf; /* NULL here indicates end of queue */
- int len;
-};
-
-/* The data structure for one CCP reset transaction */
-enum ippp_ccp_reset_states {
- CCPResetIdle,
- CCPResetSentReq,
- CCPResetRcvdReq,
- CCPResetSentAck,
- CCPResetRcvdAck
-};
-
-struct ippp_ccp_reset_state {
- enum ippp_ccp_reset_states state; /* State of this transaction */
- struct ippp_struct *is; /* Backlink to device stuff */
- unsigned char id; /* Backlink id index */
- unsigned char ta:1; /* The timer is active (flag) */
- unsigned char expra:1; /* We expect a ResetAck at all */
- int dlen; /* Databytes stored in data */
- struct timer_list timer; /* For timeouts/retries */
- /* This is a hack but seems sufficient for the moment. We do not want
- to have this be yet another allocation for some bytes, it is more
- memory management overhead than the whole mess is worth. */
- unsigned char data[IPPP_RESET_MAXDATABYTES];
-};
-
-/* The data structure keeping track of the currently outstanding CCP Reset
- transactions. */
-struct ippp_ccp_reset {
- struct ippp_ccp_reset_state *rs[256]; /* One per possible id */
- unsigned char lastid; /* Last id allocated by the engine */
-};
-
-struct ippp_struct {
- struct ippp_struct *next_link;
- int state;
- spinlock_t buflock;
- struct ippp_buf_queue rq[NUM_RCV_BUFFS]; /* packet queue for isdn_ppp_read() */
- struct ippp_buf_queue *first; /* pointer to (current) first packet */
- struct ippp_buf_queue *last; /* pointer to (current) last used packet in queue */
- wait_queue_head_t wq;
- struct task_struct *tk;
- unsigned int mpppcfg;
- unsigned int pppcfg;
- unsigned int mru;
- unsigned int mpmru;
- unsigned int mpmtu;
- unsigned int maxcid;
- struct isdn_net_local_s *lp;
- int unit;
- int minor;
- unsigned int last_link_seqno;
- long mp_seqno;
-#ifdef CONFIG_ISDN_PPP_VJ
- unsigned char *cbuf;
- struct slcompress *slcomp;
-#endif
-#ifdef CONFIG_IPPP_FILTER
- struct bpf_prog *pass_filter; /* filter for packets to pass */
- struct bpf_prog *active_filter; /* filter for pkts to reset idle */
-#endif
- unsigned long debug;
- struct isdn_ppp_compressor *compressor,*decompressor;
- struct isdn_ppp_compressor *link_compressor,*link_decompressor;
- void *decomp_stat,*comp_stat,*link_decomp_stat,*link_comp_stat;
- struct ippp_ccp_reset *reset; /* Allocated on demand, may never be needed */
- unsigned long compflags;
-};
-
-#endif /* _LINUX_ISDN_PPP_H */
diff --git a/include/linux/isdnif.h b/include/linux/isdnif.h
deleted file mode 100644
index 8d80fdc68647..000000000000
--- a/include/linux/isdnif.h
+++ /dev/null
@@ -1,505 +0,0 @@
-/* $Id: isdnif.h,v 1.43.2.2 2004/01/12 23:08:35 keil Exp $
- *
- * Linux ISDN subsystem
- * Definition of the interface between the subsystem and its low-level drivers.
- *
- * Copyright 1994,95,96 by Fritz Elfert (fritz@isdn4linux.de)
- * Copyright 1995,96 Thinking Objects Software GmbH Wuerzburg
- *
- * This software may be used and distributed according to the terms
- * of the GNU General Public License, incorporated herein by reference.
- *
- */
-#ifndef __ISDNIF_H__
-#define __ISDNIF_H__
-
-
-#include <linux/skbuff.h>
-#include <uapi/linux/isdnif.h>
-
-/***************************************************************************/
-/* Extensions made by Werner Cornelius (werner@ikt.de) */
-/* */
-/* The proceed command holds a incoming call in a state to leave processes */
-/* enough time to check whether ist should be accepted. */
-/* The PROT_IO Command extends the interface to make protocol dependent */
-/* features available (call diversion, call waiting...). */
-/* */
-/* The PROT_IO Command is executed with the desired driver id and the arg */
-/* parameter coded as follows: */
-/* The lower 8 bits of arg contain the desired protocol from ISDN_PTYPE */
-/* definitions. The upper 24 bits represent the protocol specific cmd/stat.*/
-/* Any additional data is protocol and command specific. */
-/* This mechanism also applies to the statcallb callback STAT_PROT. */
-/* */
-/* This suggested extension permits an easy expansion of protocol specific */
-/* handling. Extensions may be added at any time without changing the HL */
-/* driver code and not getting conflicts without certifications. */
-/* The well known CAPI 2.0 interface handles such extensions in a similar */
-/* way. Perhaps a protocol specific module may be added and separately */
-/* loaded and linked to the basic isdn module for handling. */
-/***************************************************************************/
-
-/*****************/
-/* DSS1 commands */
-/*****************/
-#define DSS1_CMD_INVOKE ((0x00 << 8) | ISDN_PTYPE_EURO) /* invoke a supplementary service */
-#define DSS1_CMD_INVOKE_ABORT ((0x01 << 8) | ISDN_PTYPE_EURO) /* abort a invoke cmd */
-
-/*******************************/
-/* DSS1 Status callback values */
-/*******************************/
-#define DSS1_STAT_INVOKE_RES ((0x80 << 8) | ISDN_PTYPE_EURO) /* Result for invocation */
-#define DSS1_STAT_INVOKE_ERR ((0x81 << 8) | ISDN_PTYPE_EURO) /* Error Return for invocation */
-#define DSS1_STAT_INVOKE_BRD ((0x82 << 8) | ISDN_PTYPE_EURO) /* Deliver invoke broadcast info */
-
-
-/*********************************************************************/
-/* structures for DSS1 commands and callback */
-/* */
-/* An action is invoked by sending a DSS1_CMD_INVOKE. The ll_id, proc*/
-/* timeout, datalen and data fields must be set before calling. */
-/* */
-/* The return value is a positive hl_id value also delivered in the */
-/* hl_id field. A value of zero signals no more left hl_id capacitys.*/
-/* A negative return value signals errors in LL. So if the return */
-/* value is <= 0 no action in LL will be taken -> request ignored */
-/* */
-/* The timeout field must be filled with a positive value specifying */
-/* the amount of time the INVOKED process waits for a reaction from */
-/* the network. */
-/* If a response (either error or result) is received during this */
-/* intervall, a reporting callback is initiated and the process will */
-/* be deleted, the hl identifier will be freed. */
-/* If no response is received during the specified intervall, a error*/
-/* callback is initiated with timeout set to -1 and a datalen set */
-/* to 0. */
-/* If timeout is set to a value <= 0 during INVOCATION the process is*/
-/* immediately deleted after sending the data. No callback occurs ! */
-/* */
-/* A currently waiting process may be aborted with INVOKE_ABORT. No */
-/* callback will occur when a process has been aborted. */
-/* */
-/* Broadcast invoke frames from the network are reported via the */
-/* STAT_INVOKE_BRD callback. The ll_id is set to 0, the other fields */
-/* are supplied by the network and not by the HL. */
-/*********************************************************************/
-
-/*****************/
-/* NI1 commands */
-/*****************/
-#define NI1_CMD_INVOKE ((0x00 << 8) | ISDN_PTYPE_NI1) /* invoke a supplementary service */
-#define NI1_CMD_INVOKE_ABORT ((0x01 << 8) | ISDN_PTYPE_NI1) /* abort a invoke cmd */
-
-/*******************************/
-/* NI1 Status callback values */
-/*******************************/
-#define NI1_STAT_INVOKE_RES ((0x80 << 8) | ISDN_PTYPE_NI1) /* Result for invocation */
-#define NI1_STAT_INVOKE_ERR ((0x81 << 8) | ISDN_PTYPE_NI1) /* Error Return for invocation */
-#define NI1_STAT_INVOKE_BRD ((0x82 << 8) | ISDN_PTYPE_NI1) /* Deliver invoke broadcast info */
-
-typedef struct
- { ulong ll_id; /* ID supplied by LL when executing */
- /* a command and returned by HL for */
- /* INVOKE_RES and INVOKE_ERR */
- int hl_id; /* ID supplied by HL when called */
- /* for executing a cmd and delivered */
- /* for results and errors */
- /* must be supplied by LL when aborting*/
- int proc; /* invoke procedure used by CMD_INVOKE */
- /* returned by callback and broadcast */
- int timeout; /* timeout for INVOKE CMD in ms */
- /* -1 in stat callback when timed out */
- /* error value when error callback */
- int datalen; /* length of cmd or stat data */
- u_char *data;/* pointer to data delivered or send */
- } isdn_cmd_stat;
-
-/*
- * Commands from linklevel to lowlevel
- *
- */
-#define ISDN_CMD_IOCTL 0 /* Perform ioctl */
-#define ISDN_CMD_DIAL 1 /* Dial out */
-#define ISDN_CMD_ACCEPTD 2 /* Accept an incoming call on D-Chan. */
-#define ISDN_CMD_ACCEPTB 3 /* Request B-Channel connect. */
-#define ISDN_CMD_HANGUP 4 /* Hangup */
-#define ISDN_CMD_CLREAZ 5 /* Clear EAZ(s) of channel */
-#define ISDN_CMD_SETEAZ 6 /* Set EAZ(s) of channel */
-#define ISDN_CMD_GETEAZ 7 /* Get EAZ(s) of channel */
-#define ISDN_CMD_SETSIL 8 /* Set Service-Indicator-List of channel */
-#define ISDN_CMD_GETSIL 9 /* Get Service-Indicator-List of channel */
-#define ISDN_CMD_SETL2 10 /* Set B-Chan. Layer2-Parameter */
-#define ISDN_CMD_GETL2 11 /* Get B-Chan. Layer2-Parameter */
-#define ISDN_CMD_SETL3 12 /* Set B-Chan. Layer3-Parameter */
-#define ISDN_CMD_GETL3 13 /* Get B-Chan. Layer3-Parameter */
-// #define ISDN_CMD_LOCK 14 /* Signal usage by upper levels */
-// #define ISDN_CMD_UNLOCK 15 /* Release usage-lock */
-#define ISDN_CMD_SUSPEND 16 /* Suspend connection */
-#define ISDN_CMD_RESUME 17 /* Resume connection */
-#define ISDN_CMD_PROCEED 18 /* Proceed with call establishment */
-#define ISDN_CMD_ALERT 19 /* Alert after Proceeding */
-#define ISDN_CMD_REDIR 20 /* Redir a incoming call */
-#define ISDN_CMD_PROT_IO 21 /* Protocol specific commands */
-#define CAPI_PUT_MESSAGE 22 /* CAPI message send down or up */
-#define ISDN_CMD_FAXCMD 23 /* FAX commands to HL-driver */
-#define ISDN_CMD_AUDIO 24 /* DSP, DTMF, ... settings */
-
-/*
- * Status-Values delivered from lowlevel to linklevel via
- * statcallb().
- *
- */
-#define ISDN_STAT_STAVAIL 256 /* Raw status-data available */
-#define ISDN_STAT_ICALL 257 /* Incoming call detected */
-#define ISDN_STAT_RUN 258 /* Signal protocol-code is running */
-#define ISDN_STAT_STOP 259 /* Signal halt of protocol-code */
-#define ISDN_STAT_DCONN 260 /* Signal D-Channel connect */
-#define ISDN_STAT_BCONN 261 /* Signal B-Channel connect */
-#define ISDN_STAT_DHUP 262 /* Signal D-Channel disconnect */
-#define ISDN_STAT_BHUP 263 /* Signal B-Channel disconnect */
-#define ISDN_STAT_CINF 264 /* Charge-Info */
-#define ISDN_STAT_LOAD 265 /* Signal new lowlevel-driver is loaded */
-#define ISDN_STAT_UNLOAD 266 /* Signal unload of lowlevel-driver */
-#define ISDN_STAT_BSENT 267 /* Signal packet sent */
-#define ISDN_STAT_NODCH 268 /* Signal no D-Channel */
-#define ISDN_STAT_ADDCH 269 /* Add more Channels */
-#define ISDN_STAT_CAUSE 270 /* Cause-Message */
-#define ISDN_STAT_ICALLW 271 /* Incoming call without B-chan waiting */
-#define ISDN_STAT_REDIR 272 /* Redir result */
-#define ISDN_STAT_PROT 273 /* protocol IO specific callback */
-#define ISDN_STAT_DISPLAY 274 /* deliver a received display message */
-#define ISDN_STAT_L1ERR 275 /* Signal Layer-1 Error */
-#define ISDN_STAT_FAXIND 276 /* FAX indications from HL-driver */
-#define ISDN_STAT_AUDIO 277 /* DTMF, DSP indications */
-#define ISDN_STAT_DISCH 278 /* Disable/Enable channel usage */
-
-/*
- * Audio commands
- */
-#define ISDN_AUDIO_SETDD 0 /* Set DTMF detection */
-#define ISDN_AUDIO_DTMF 1 /* Rx/Tx DTMF */
-
-/*
- * Values for errcode field
- */
-#define ISDN_STAT_L1ERR_SEND 1
-#define ISDN_STAT_L1ERR_RECV 2
-
-/*
- * Values for feature-field of interface-struct.
- */
-/* Layer 2 */
-#define ISDN_FEATURE_L2_X75I (0x0001 << ISDN_PROTO_L2_X75I)
-#define ISDN_FEATURE_L2_X75UI (0x0001 << ISDN_PROTO_L2_X75UI)
-#define ISDN_FEATURE_L2_X75BUI (0x0001 << ISDN_PROTO_L2_X75BUI)
-#define ISDN_FEATURE_L2_HDLC (0x0001 << ISDN_PROTO_L2_HDLC)
-#define ISDN_FEATURE_L2_TRANS (0x0001 << ISDN_PROTO_L2_TRANS)
-#define ISDN_FEATURE_L2_X25DTE (0x0001 << ISDN_PROTO_L2_X25DTE)
-#define ISDN_FEATURE_L2_X25DCE (0x0001 << ISDN_PROTO_L2_X25DCE)
-#define ISDN_FEATURE_L2_V11096 (0x0001 << ISDN_PROTO_L2_V11096)
-#define ISDN_FEATURE_L2_V11019 (0x0001 << ISDN_PROTO_L2_V11019)
-#define ISDN_FEATURE_L2_V11038 (0x0001 << ISDN_PROTO_L2_V11038)
-#define ISDN_FEATURE_L2_MODEM (0x0001 << ISDN_PROTO_L2_MODEM)
-#define ISDN_FEATURE_L2_FAX (0x0001 << ISDN_PROTO_L2_FAX)
-#define ISDN_FEATURE_L2_HDLC_56K (0x0001 << ISDN_PROTO_L2_HDLC_56K)
-
-#define ISDN_FEATURE_L2_MASK (0x0FFFF) /* Max. 16 protocols */
-#define ISDN_FEATURE_L2_SHIFT (0)
-
-/* Layer 3 */
-#define ISDN_FEATURE_L3_TRANS (0x10000 << ISDN_PROTO_L3_TRANS)
-#define ISDN_FEATURE_L3_TRANSDSP (0x10000 << ISDN_PROTO_L3_TRANSDSP)
-#define ISDN_FEATURE_L3_FCLASS2 (0x10000 << ISDN_PROTO_L3_FCLASS2)
-#define ISDN_FEATURE_L3_FCLASS1 (0x10000 << ISDN_PROTO_L3_FCLASS1)
-
-#define ISDN_FEATURE_L3_MASK (0x0FF0000) /* Max. 8 Protocols */
-#define ISDN_FEATURE_L3_SHIFT (16)
-
-/* Signaling */
-#define ISDN_FEATURE_P_UNKNOWN (0x1000000 << ISDN_PTYPE_UNKNOWN)
-#define ISDN_FEATURE_P_1TR6 (0x1000000 << ISDN_PTYPE_1TR6)
-#define ISDN_FEATURE_P_EURO (0x1000000 << ISDN_PTYPE_EURO)
-#define ISDN_FEATURE_P_NI1 (0x1000000 << ISDN_PTYPE_NI1)
-
-#define ISDN_FEATURE_P_MASK (0x0FF000000) /* Max. 8 Protocols */
-#define ISDN_FEATURE_P_SHIFT (24)
-
-typedef struct setup_parm {
- unsigned char phone[32]; /* Remote Phone-Number */
- unsigned char eazmsn[32]; /* Local EAZ or MSN */
- unsigned char si1; /* Service Indicator 1 */
- unsigned char si2; /* Service Indicator 2 */
- unsigned char plan; /* Numbering plan */
- unsigned char screen; /* Screening info */
-} setup_parm;
-
-
-#ifdef CONFIG_ISDN_TTY_FAX
-/* T.30 Fax G3 */
-
-#define FAXIDLEN 21
-
-typedef struct T30_s {
- /* session parameters */
- __u8 resolution;
- __u8 rate;
- __u8 width;
- __u8 length;
- __u8 compression;
- __u8 ecm;
- __u8 binary;
- __u8 scantime;
- __u8 id[FAXIDLEN];
- /* additional parameters */
- __u8 phase;
- __u8 direction;
- __u8 code;
- __u8 badlin;
- __u8 badmul;
- __u8 bor;
- __u8 fet;
- __u8 pollid[FAXIDLEN];
- __u8 cq;
- __u8 cr;
- __u8 ctcrty;
- __u8 minsp;
- __u8 phcto;
- __u8 rel;
- __u8 nbc;
- /* remote station parameters */
- __u8 r_resolution;
- __u8 r_rate;
- __u8 r_width;
- __u8 r_length;
- __u8 r_compression;
- __u8 r_ecm;
- __u8 r_binary;
- __u8 r_scantime;
- __u8 r_id[FAXIDLEN];
- __u8 r_code;
-} __packed T30_s;
-
-#define ISDN_TTY_FAX_CONN_IN 0
-#define ISDN_TTY_FAX_CONN_OUT 1
-
-#define ISDN_TTY_FAX_FCON 0
-#define ISDN_TTY_FAX_DIS 1
-#define ISDN_TTY_FAX_FTT 2
-#define ISDN_TTY_FAX_MCF 3
-#define ISDN_TTY_FAX_DCS 4
-#define ISDN_TTY_FAX_TRAIN_OK 5
-#define ISDN_TTY_FAX_EOP 6
-#define ISDN_TTY_FAX_EOM 7
-#define ISDN_TTY_FAX_MPS 8
-#define ISDN_TTY_FAX_DTC 9
-#define ISDN_TTY_FAX_RID 10
-#define ISDN_TTY_FAX_HNG 11
-#define ISDN_TTY_FAX_DT 12
-#define ISDN_TTY_FAX_FCON_I 13
-#define ISDN_TTY_FAX_DR 14
-#define ISDN_TTY_FAX_ET 15
-#define ISDN_TTY_FAX_CFR 16
-#define ISDN_TTY_FAX_PTS 17
-#define ISDN_TTY_FAX_SENT 18
-
-#define ISDN_FAX_PHASE_IDLE 0
-#define ISDN_FAX_PHASE_A 1
-#define ISDN_FAX_PHASE_B 2
-#define ISDN_FAX_PHASE_C 3
-#define ISDN_FAX_PHASE_D 4
-#define ISDN_FAX_PHASE_E 5
-
-#endif /* TTY_FAX */
-
-#define ISDN_FAX_CLASS1_FAE 0
-#define ISDN_FAX_CLASS1_FTS 1
-#define ISDN_FAX_CLASS1_FRS 2
-#define ISDN_FAX_CLASS1_FTM 3
-#define ISDN_FAX_CLASS1_FRM 4
-#define ISDN_FAX_CLASS1_FTH 5
-#define ISDN_FAX_CLASS1_FRH 6
-#define ISDN_FAX_CLASS1_CTRL 7
-
-#define ISDN_FAX_CLASS1_OK 0
-#define ISDN_FAX_CLASS1_CONNECT 1
-#define ISDN_FAX_CLASS1_NOCARR 2
-#define ISDN_FAX_CLASS1_ERROR 3
-#define ISDN_FAX_CLASS1_FCERROR 4
-#define ISDN_FAX_CLASS1_QUERY 5
-
-typedef struct {
- __u8 cmd;
- __u8 subcmd;
- __u8 para[50];
-} aux_s;
-
-#define AT_COMMAND 0
-#define AT_EQ_VALUE 1
-#define AT_QUERY 2
-#define AT_EQ_QUERY 3
-
-/* CAPI structs */
-
-/* this is compatible to the old union size */
-#define MAX_CAPI_PARA_LEN 50
-
-typedef struct {
- /* Header */
- __u16 Length;
- __u16 ApplId;
- __u8 Command;
- __u8 Subcommand;
- __u16 Messagenumber;
-
- /* Parameter */
- union {
- __u32 Controller;
- __u32 PLCI;
- __u32 NCCI;
- } adr;
- __u8 para[MAX_CAPI_PARA_LEN];
-} capi_msg;
-
-/*
- * Structure for exchanging above infos
- *
- */
-typedef struct {
- int driver; /* Lowlevel-Driver-ID */
- int command; /* Command or Status (see above) */
- ulong arg; /* Additional Data */
- union {
- ulong errcode; /* Type of error with STAT_L1ERR */
- int length; /* Amount of bytes sent with STAT_BSENT */
- u_char num[50]; /* Additional Data */
- setup_parm setup;/* For SETUP msg */
- capi_msg cmsg; /* For CAPI like messages */
- char display[85];/* display message data */
- isdn_cmd_stat isdn_io; /* ISDN IO-parameter/result */
- aux_s aux; /* for modem commands/indications */
-#ifdef CONFIG_ISDN_TTY_FAX
- T30_s *fax; /* Pointer to ttys fax struct */
-#endif
- ulong userdata; /* User Data */
- } parm;
-} isdn_ctrl;
-
-#define dss1_io isdn_io
-#define ni1_io isdn_io
-
-/*
- * The interface-struct itself (initialized at load-time of lowlevel-driver)
- *
- * See Documentation/isdn/INTERFACE for a description, how the communication
- * between the ISDN subsystem and its drivers is done.
- *
- */
-typedef struct {
- struct module *owner;
-
- /* Number of channels supported by this driver
- */
- int channels;
-
- /*
- * Maximum Size of transmit/receive-buffer this driver supports.
- */
- int maxbufsize;
-
- /* Feature-Flags for this driver.
- * See defines ISDN_FEATURE_... for Values
- */
- unsigned long features;
-
- /*
- * Needed for calculating
- * dev->hard_header_len = linklayer header + hl_hdrlen;
- * Drivers, not supporting sk_buff's should set this to 0.
- */
- unsigned short hl_hdrlen;
-
- /*
- * Receive-Callback using sk_buff's
- * Parameters:
- * int Driver-ID
- * int local channel-number (0 ...)
- * struct sk_buff *skb received Data
- */
- void (*rcvcallb_skb)(int, int, struct sk_buff *);
-
- /* Status-Callback
- * Parameters:
- * isdn_ctrl*
- * driver = Driver ID.
- * command = One of above ISDN_STAT_... constants.
- * arg = depending on status-type.
- * num = depending on status-type.
- */
- int (*statcallb)(isdn_ctrl*);
-
- /* Send command
- * Parameters:
- * isdn_ctrl*
- * driver = Driver ID.
- * command = One of above ISDN_CMD_... constants.
- * arg = depending on command.
- * num = depending on command.
- */
- int (*command)(isdn_ctrl*);
-
- /*
- * Send data using sk_buff's
- * Parameters:
- * int driverId
- * int local channel-number (0...)
- * int Flag: Need ACK for this packet.
- * struct sk_buff *skb Data to send
- */
- int (*writebuf_skb) (int, int, int, struct sk_buff *);
-
- /* Send raw D-Channel-Commands
- * Parameters:
- * u_char pointer data
- * int length of data
- * int driverId
- * int local channel-number (0 ...)
- */
- int (*writecmd)(const u_char __user *, int, int, int);
-
- /* Read raw Status replies
- * u_char pointer data (volatile)
- * int length of buffer
- * int driverId
- * int local channel-number (0 ...)
- */
- int (*readstat)(u_char __user *, int, int, int);
-
- char id[20];
-} isdn_if;
-
-/*
- * Function which must be called by lowlevel-driver at loadtime with
- * the following fields of above struct set:
- *
- * channels Number of channels that will be supported.
- * hl_hdrlen Space to preserve in sk_buff's when sending. Drivers, not
- * supporting sk_buff's should set this to 0.
- * command Address of Command-Handler.
- * features Bitwise coded Features of this driver. (use ISDN_FEATURE_...)
- * writebuf_skb Address of Skbuff-Send-Handler.
- * writecmd " " D-Channel " which accepts raw D-Ch-Commands.
- * readstat " " D-Channel " which delivers raw Status-Data.
- *
- * The linklevel-driver fills the following fields:
- *
- * channels Driver-ID assigned to this driver. (Must be used on all
- * subsequent callbacks.
- * rcvcallb_skb Address of handler for received Skbuff's.
- * statcallb " " " for status-changes.
- *
- */
-extern int register_isdn(isdn_if*);
-#include <linux/uaccess.h>
-
-#endif /* __ISDNIF_H__ */
diff --git a/include/linux/isicom.h b/include/linux/isicom.h
deleted file mode 100644
index b92e05650639..000000000000
--- a/include/linux/isicom.h
+++ /dev/null
@@ -1,84 +0,0 @@
-#ifndef _LINUX_ISICOM_H
-#define _LINUX_ISICOM_H
-
-#define YES 1
-#define NO 0
-
-/*
- * ISICOM Driver definitions ...
- *
- */
-
-#define ISICOM_NAME "ISICom"
-
-/*
- * PCI definitions
- */
-
-#define DEVID_COUNT 9
-#define VENDOR_ID 0x10b5
-
-/*
- * These are now officially allocated numbers
- */
-
-#define ISICOM_NMAJOR 112 /* normal */
-#define ISICOM_CMAJOR 113 /* callout */
-#define ISICOM_MAGIC (('M' << 8) | 'T')
-
-#define WAKEUP_CHARS 256 /* hard coded for now */
-#define TX_SIZE 254
-
-#define BOARD_COUNT 4
-#define PORT_COUNT (BOARD_COUNT*16)
-
-/* character sizes */
-
-#define ISICOM_CS5 0x0000
-#define ISICOM_CS6 0x0001
-#define ISICOM_CS7 0x0002
-#define ISICOM_CS8 0x0003
-
-/* stop bits */
-
-#define ISICOM_1SB 0x0000
-#define ISICOM_2SB 0x0004
-
-/* parity */
-
-#define ISICOM_NOPAR 0x0000
-#define ISICOM_ODPAR 0x0008
-#define ISICOM_EVPAR 0x0018
-
-/* flow control */
-
-#define ISICOM_CTSRTS 0x03
-#define ISICOM_INITIATE_XONXOFF 0x04
-#define ISICOM_RESPOND_XONXOFF 0x08
-
-#define BOARD(line) (((line) >> 4) & 0x3)
-
- /* isi kill queue bitmap */
-
-#define ISICOM_KILLTX 0x01
-#define ISICOM_KILLRX 0x02
-
- /* isi_board status bitmap */
-
-#define FIRMWARE_LOADED 0x0001
-#define BOARD_ACTIVE 0x0002
-#define BOARD_INIT 0x0004
-
- /* isi_port status bitmap */
-
-#define ISI_CTS 0x1000
-#define ISI_DSR 0x2000
-#define ISI_RI 0x4000
-#define ISI_DCD 0x8000
-#define ISI_DTR 0x0100
-#define ISI_RTS 0x0200
-
-
-#define ISI_TXOK 0x0001
-
-#endif /* ISICOM_H */
diff --git a/include/linux/ism.h b/include/linux/ism.h
new file mode 100644
index 000000000000..b7feb4dcd5a8
--- /dev/null
+++ b/include/linux/ism.h
@@ -0,0 +1,67 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Internal Shared Memory
+ *
+ * Definitions for the ISM module
+ *
+ * Copyright IBM Corp. 2022
+ */
+#ifndef _ISM_H
+#define _ISM_H
+
+#include <linux/workqueue.h>
+
+/* Unless we gain unexpected popularity, this limit should hold for a while */
+#define MAX_CLIENTS 8
+#define ISM_NR_DMBS 1920
+
+struct ism_dev {
+ spinlock_t lock; /* protects the ism device */
+ spinlock_t cmd_lock; /* serializes cmds */
+ struct list_head list;
+ struct dibs_dev *dibs;
+ struct pci_dev *pdev;
+
+ struct ism_sba *sba;
+ dma_addr_t sba_dma_addr;
+ DECLARE_BITMAP(sba_bitmap, ISM_NR_DMBS);
+ void *priv[MAX_CLIENTS];
+
+ struct ism_eq *ieq;
+ dma_addr_t ieq_dma_addr;
+
+ int ieq_idx;
+
+ struct ism_client *subs[MAX_CLIENTS];
+};
+
+struct ism_event {
+ u32 type;
+ u32 code;
+ u64 tok;
+ u64 time;
+ u64 info;
+};
+
+struct ism_client {
+ const char *name;
+ void (*handle_event)(struct ism_dev *dev, struct ism_event *event);
+ /* Private area - don't touch! */
+ u8 id;
+};
+
+int ism_register_client(struct ism_client *client);
+int ism_unregister_client(struct ism_client *client);
+static inline void *ism_get_priv(struct ism_dev *dev,
+ struct ism_client *client) {
+ return dev->priv[client->id];
+}
+
+static inline void ism_set_priv(struct ism_dev *dev, struct ism_client *client,
+ void *priv) {
+ dev->priv[client->id] = priv;
+}
+
+const struct smcd_ops *ism_get_smcd_ops(void);
+
+#endif /* _ISM_H */
diff --git a/include/linux/iversion.h b/include/linux/iversion.h
new file mode 100644
index 000000000000..8f972eaca2ed
--- /dev/null
+++ b/include/linux/iversion.h
@@ -0,0 +1,300 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_IVERSION_H
+#define _LINUX_IVERSION_H
+
+#include <linux/fs.h>
+
+/*
+ * The inode->i_version field:
+ * ---------------------------
+ * The change attribute (i_version) is mandated by NFSv4 and is mostly for
+ * knfsd, but is also used for other purposes (e.g. IMA). The i_version must
+ * appear larger to observers if there was an explicit change to the inode's
+ * data or metadata since it was last queried.
+ *
+ * An explicit change is one that would ordinarily result in a change to the
+ * inode status change time (aka ctime). i_version must appear to change, even
+ * if the ctime does not (since the whole point is to avoid missing updates due
+ * to timestamp granularity). If POSIX or other relevant spec mandates that the
+ * ctime must change due to an operation, then the i_version counter must be
+ * incremented as well.
+ *
+ * Making the i_version update completely atomic with the operation itself would
+ * be prohibitively expensive. Traditionally the kernel has updated the times on
+ * directories after an operation that changes its contents. For regular files,
+ * the ctime is usually updated before the data is copied into the cache for a
+ * write. This means that there is a window of time when an observer can
+ * associate a new timestamp with old file contents. Since the purpose of the
+ * i_version is to allow for better cache coherency, the i_version must always
+ * be updated after the results of the operation are visible. Updating it before
+ * and after a change is also permitted. (Note that no filesystems currently do
+ * this. Fixing that is a work-in-progress).
+ *
+ * Observers see the i_version as a 64-bit number that never decreases. If it
+ * remains the same since it was last checked, then nothing has changed in the
+ * inode. If it's different then something has changed. Observers cannot infer
+ * anything about the nature or magnitude of the changes from the value, only
+ * that the inode has changed in some fashion.
+ *
+ * Not all filesystems properly implement the i_version counter. Subsystems that
+ * want to use i_version field on an inode should first check whether the
+ * filesystem sets the SB_I_VERSION flag (usually via the IS_I_VERSION macro).
+ *
+ * Those that set SB_I_VERSION will automatically have their i_version counter
+ * incremented on writes to normal files. If the SB_I_VERSION is not set, then
+ * the VFS will not touch it on writes, and the filesystem can use it how it
+ * wishes. Note that the filesystem is always responsible for updating the
+ * i_version on namespace changes in directories (mkdir, rmdir, unlink, etc.).
+ * We consider these sorts of filesystems to have a kernel-managed i_version.
+ *
+ * It may be impractical for filesystems to keep i_version updates atomic with
+ * respect to the changes that cause them. They should, however, guarantee
+ * that i_version updates are never visible before the changes that caused
+ * them. Also, i_version updates should never be delayed longer than it takes
+ * the original change to reach disk.
+ *
+ * This implementation uses the low bit in the i_version field as a flag to
+ * track when the value has been queried. If it has not been queried since it
+ * was last incremented, we can skip the increment in most cases.
+ *
+ * In the event that we're updating the ctime, we will usually go ahead and
+ * bump the i_version anyway. Since that has to go to stable storage in some
+ * fashion, we might as well increment it as well.
+ *
+ * With this implementation, the value should always appear to observers to
+ * increase over time if the file has changed. It's recommended to use
+ * inode_eq_iversion() helper to compare values.
+ *
+ * Note that some filesystems (e.g. NFS and AFS) just use the field to store
+ * a server-provided value (for the most part). For that reason, those
+ * filesystems do not set SB_I_VERSION. These filesystems are considered to
+ * have a self-managed i_version.
+ *
+ * Persistently storing the i_version
+ * ----------------------------------
+ * Queries of the i_version field are not gated on them hitting the backing
+ * store. It's always possible that the host could crash after allowing
+ * a query of the value but before it has made it to disk.
+ *
+ * To mitigate this problem, filesystems should always use
+ * inode_set_iversion_queried when loading an existing inode from disk. This
+ * ensures that the next attempted inode increment will result in the value
+ * changing.
+ *
+ * Storing the value to disk therefore does not count as a query, so those
+ * filesystems should use inode_peek_iversion to grab the value to be stored.
+ * There is no need to flag the value as having been queried in that case.
+ */
+
+/*
+ * We borrow the lowest bit in the i_version to use as a flag to tell whether
+ * it has been queried since we last incremented it. If it has, then we must
+ * increment it on the next change. After that, we can clear the flag and
+ * avoid incrementing it again until it has again been queried.
+ */
+#define I_VERSION_QUERIED_SHIFT (1)
+#define I_VERSION_QUERIED (1ULL << (I_VERSION_QUERIED_SHIFT - 1))
+#define I_VERSION_INCREMENT (1ULL << I_VERSION_QUERIED_SHIFT)
+
+/**
+ * inode_set_iversion_raw - set i_version to the specified raw value
+ * @inode: inode to set
+ * @val: new i_version value to set
+ *
+ * Set @inode's i_version field to @val. This function is for use by
+ * filesystems that self-manage the i_version.
+ *
+ * For example, the NFS client stores its NFSv4 change attribute in this way,
+ * and the AFS client stores the data_version from the server here.
+ */
+static inline void
+inode_set_iversion_raw(struct inode *inode, u64 val)
+{
+ atomic64_set(&inode->i_version, val);
+}
+
+/**
+ * inode_peek_iversion_raw - grab a "raw" iversion value
+ * @inode: inode from which i_version should be read
+ *
+ * Grab a "raw" inode->i_version value and return it. The i_version is not
+ * flagged or converted in any way. This is mostly used to access a self-managed
+ * i_version.
+ *
+ * With those filesystems, we want to treat the i_version as an entirely
+ * opaque value.
+ */
+static inline u64
+inode_peek_iversion_raw(const struct inode *inode)
+{
+ return atomic64_read(&inode->i_version);
+}
+
+/**
+ * inode_set_max_iversion_raw - update i_version new value is larger
+ * @inode: inode to set
+ * @val: new i_version to set
+ *
+ * Some self-managed filesystems (e.g Ceph) will only update the i_version
+ * value if the new value is larger than the one we already have.
+ */
+static inline void
+inode_set_max_iversion_raw(struct inode *inode, u64 val)
+{
+ u64 cur = inode_peek_iversion_raw(inode);
+
+ do {
+ if (cur > val)
+ break;
+ } while (!atomic64_try_cmpxchg(&inode->i_version, &cur, val));
+}
+
+/**
+ * inode_set_iversion - set i_version to a particular value
+ * @inode: inode to set
+ * @val: new i_version value to set
+ *
+ * Set @inode's i_version field to @val. This function is for filesystems with
+ * a kernel-managed i_version, for initializing a newly-created inode from
+ * scratch.
+ *
+ * In this case, we do not set the QUERIED flag since we know that this value
+ * has never been queried.
+ */
+static inline void
+inode_set_iversion(struct inode *inode, u64 val)
+{
+ inode_set_iversion_raw(inode, val << I_VERSION_QUERIED_SHIFT);
+}
+
+/**
+ * inode_set_iversion_queried - set i_version to a particular value as quereied
+ * @inode: inode to set
+ * @val: new i_version value to set
+ *
+ * Set @inode's i_version field to @val, and flag it for increment on the next
+ * change.
+ *
+ * Filesystems that persistently store the i_version on disk should use this
+ * when loading an existing inode from disk.
+ *
+ * When loading in an i_version value from a backing store, we can't be certain
+ * that it wasn't previously viewed before being stored. Thus, we must assume
+ * that it was, to ensure that we don't end up handing out the same value for
+ * different versions of the same inode.
+ */
+static inline void
+inode_set_iversion_queried(struct inode *inode, u64 val)
+{
+ inode_set_iversion_raw(inode, (val << I_VERSION_QUERIED_SHIFT) |
+ I_VERSION_QUERIED);
+}
+
+bool inode_maybe_inc_iversion(struct inode *inode, bool force);
+
+/**
+ * inode_inc_iversion - forcibly increment i_version
+ * @inode: inode that needs to be updated
+ *
+ * Forcbily increment the i_version field. This always results in a change to
+ * the observable value.
+ */
+static inline void
+inode_inc_iversion(struct inode *inode)
+{
+ inode_maybe_inc_iversion(inode, true);
+}
+
+/**
+ * inode_iversion_need_inc - is the i_version in need of being incremented?
+ * @inode: inode to check
+ *
+ * Returns whether the inode->i_version counter needs incrementing on the next
+ * change. Just fetch the value and check the QUERIED flag.
+ */
+static inline bool
+inode_iversion_need_inc(struct inode *inode)
+{
+ return inode_peek_iversion_raw(inode) & I_VERSION_QUERIED;
+}
+
+/**
+ * inode_inc_iversion_raw - forcibly increment raw i_version
+ * @inode: inode that needs to be updated
+ *
+ * Forcbily increment the raw i_version field. This always results in a change
+ * to the raw value.
+ *
+ * NFS will use the i_version field to store the value from the server. It
+ * mostly treats it as opaque, but in the case where it holds a write
+ * delegation, it must increment the value itself. This function does that.
+ */
+static inline void
+inode_inc_iversion_raw(struct inode *inode)
+{
+ atomic64_inc(&inode->i_version);
+}
+
+/**
+ * inode_peek_iversion - read i_version without flagging it to be incremented
+ * @inode: inode from which i_version should be read
+ *
+ * Read the inode i_version counter for an inode without registering it as a
+ * query.
+ *
+ * This is typically used by local filesystems that need to store an i_version
+ * on disk. In that situation, it's not necessary to flag it as having been
+ * viewed, as the result won't be used to gauge changes from that point.
+ */
+static inline u64
+inode_peek_iversion(const struct inode *inode)
+{
+ return inode_peek_iversion_raw(inode) >> I_VERSION_QUERIED_SHIFT;
+}
+
+/*
+ * For filesystems without any sort of change attribute, the best we can
+ * do is fake one up from the ctime:
+ */
+static inline u64 time_to_chattr(const struct timespec64 *t)
+{
+ u64 chattr = t->tv_sec;
+
+ chattr <<= 32;
+ chattr += t->tv_nsec;
+ return chattr;
+}
+
+u64 inode_query_iversion(struct inode *inode);
+
+/**
+ * inode_eq_iversion_raw - check whether the raw i_version counter has changed
+ * @inode: inode to check
+ * @old: old value to check against its i_version
+ *
+ * Compare the current raw i_version counter with a previous one. Returns true
+ * if they are the same or false if they are different.
+ */
+static inline bool
+inode_eq_iversion_raw(const struct inode *inode, u64 old)
+{
+ return inode_peek_iversion_raw(inode) == old;
+}
+
+/**
+ * inode_eq_iversion - check whether the i_version counter has changed
+ * @inode: inode to check
+ * @old: old value to check against its i_version
+ *
+ * Compare an i_version counter with a previous one. Returns true if they are
+ * the same, and false if they are different.
+ *
+ * Note that we don't need to set the QUERIED flag in this case, as the value
+ * in the inode is not being recorded for later use.
+ */
+static inline bool
+inode_eq_iversion(const struct inode *inode, u64 old)
+{
+ return inode_peek_iversion(inode) == old;
+}
+#endif
diff --git a/include/linux/jbd2.h b/include/linux/jbd2.h
index 606b6bce3a5b..f5eaf76198f3 100644
--- a/include/linux/jbd2.h
+++ b/include/linux/jbd2.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* linux/include/linux/jbd2.h
*
@@ -5,10 +6,6 @@
*
* Copyright 1998-2000 Red Hat, Inc --- All Rights Reserved
*
- * This file is part of the Linux kernel and is made available under
- * the terms of the GNU General Public License, version 2, or at your
- * option, any later version, incorporated herein by reference.
- *
* Definitions for transaction data structures for the buffer cache
* filesystem journaling support.
*/
@@ -30,7 +27,8 @@
#include <linux/timer.h>
#include <linux/slab.h>
#include <linux/bit_spinlock.h>
-#include <crypto/hash.h>
+#include <linux/blkdev.h>
+#include <linux/crc32c.h>
#endif
#define journal_oom_retry 1
@@ -56,20 +54,20 @@
* CONFIG_JBD2_DEBUG is on.
*/
#define JBD2_EXPENSIVE_CHECKING
-extern ushort jbd2_journal_enable_debug;
void __jbd2_debug(int level, const char *file, const char *func,
unsigned int line, const char *fmt, ...);
-#define jbd_debug(n, fmt, a...) \
+#define jbd2_debug(n, fmt, a...) \
__jbd2_debug((n), __FILE__, __func__, __LINE__, (fmt), ##a)
#else
-#define jbd_debug(n, fmt, a...) /**/
+#define jbd2_debug(n, fmt, a...) no_printk(fmt, ##a)
#endif
extern void *jbd2_alloc(size_t size, gfp_t flags);
extern void jbd2_free(void *ptr, size_t size);
#define JBD2_MIN_JOURNAL_BLOCKS 1024
+#define JBD2_DEFAULT_FAST_COMMIT_BLOCKS 256
#ifdef __KERNEL__
@@ -265,7 +263,12 @@ typedef struct journal_superblock_s
/* 0x0050 */
__u8 s_checksum_type; /* checksum type */
__u8 s_padding2[3];
- __u32 s_padding[42];
+/* 0x0054 */
+ __be32 s_num_fc_blks; /* Number of fast commit blocks */
+ __be32 s_head; /* blocknr of head of log, only uptodate
+ * while the filesystem is clean */
+/* 0x005C */
+ __u32 s_padding[40];
__be32 s_checksum; /* crc32c(superblock) */
/* 0x0100 */
@@ -273,17 +276,6 @@ typedef struct journal_superblock_s
/* 0x0400 */
} journal_superblock_t;
-/* Use the jbd2_{has,set,clear}_feature_* helpers; these will be removed */
-#define JBD2_HAS_COMPAT_FEATURE(j,mask) \
- ((j)->j_format_version >= 2 && \
- ((j)->j_superblock->s_feature_compat & cpu_to_be32((mask))))
-#define JBD2_HAS_RO_COMPAT_FEATURE(j,mask) \
- ((j)->j_format_version >= 2 && \
- ((j)->j_superblock->s_feature_ro_compat & cpu_to_be32((mask))))
-#define JBD2_HAS_INCOMPAT_FEATURE(j,mask) \
- ((j)->j_format_version >= 2 && \
- ((j)->j_superblock->s_feature_incompat & cpu_to_be32((mask))))
-
#define JBD2_FEATURE_COMPAT_CHECKSUM 0x00000001
#define JBD2_FEATURE_INCOMPAT_REVOKE 0x00000001
@@ -291,6 +283,7 @@ typedef struct journal_superblock_s
#define JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT 0x00000004
#define JBD2_FEATURE_INCOMPAT_CSUM_V2 0x00000008
#define JBD2_FEATURE_INCOMPAT_CSUM_V3 0x00000010
+#define JBD2_FEATURE_INCOMPAT_FAST_COMMIT 0x00000020
/* See "journal feature predicate functions" below */
@@ -301,7 +294,8 @@ typedef struct journal_superblock_s
JBD2_FEATURE_INCOMPAT_64BIT | \
JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT | \
JBD2_FEATURE_INCOMPAT_CSUM_V2 | \
- JBD2_FEATURE_INCOMPAT_CSUM_V3)
+ JBD2_FEATURE_INCOMPAT_CSUM_V3 | \
+ JBD2_FEATURE_INCOMPAT_FAST_COMMIT)
#ifdef __KERNEL__
@@ -316,7 +310,6 @@ enum jbd_state_bits {
BH_Revoked, /* Has been revoked from the log */
BH_RevokeValid, /* Revoked flag is valid */
BH_JBDDirty, /* Is dirty but journaled */
- BH_State, /* Pins most journal_head state */
BH_JournalHead, /* Pins bh->b_private and jh->b_bh */
BH_Shadow, /* IO on shadow buffer is running */
BH_Verified, /* Metadata block has been verified ok */
@@ -345,26 +338,6 @@ static inline struct journal_head *bh2jh(struct buffer_head *bh)
return bh->b_private;
}
-static inline void jbd_lock_bh_state(struct buffer_head *bh)
-{
- bit_spin_lock(BH_State, &bh->b_state);
-}
-
-static inline int jbd_trylock_bh_state(struct buffer_head *bh)
-{
- return bit_spin_trylock(BH_State, &bh->b_state);
-}
-
-static inline int jbd_is_locked_bh_state(struct buffer_head *bh)
-{
- return bit_spin_is_locked(BH_State, &bh->b_state);
-}
-
-static inline void jbd_unlock_bh_state(struct buffer_head *bh)
-{
- bit_spin_unlock(BH_State, &bh->b_state);
-}
-
static inline void jbd_lock_bh_journal_head(struct buffer_head *bh)
{
bit_spin_lock(BH_JournalHead, &bh->b_state);
@@ -418,41 +391,82 @@ static inline void jbd_unlock_bh_journal_head(struct buffer_head *bh)
#define JI_WAIT_DATA (1 << __JI_WAIT_DATA)
/**
- * struct jbd_inode is the structure linking inodes in ordered mode
- * present in a transaction so that we can sync them during commit.
+ * struct jbd2_inode - The jbd_inode type is the structure linking inodes in
+ * ordered mode present in a transaction so that we can sync them during commit.
*/
struct jbd2_inode {
- /* Which transaction does this inode belong to? Either the running
- * transaction or the committing one. [j_list_lock] */
+ /**
+ * @i_transaction:
+ *
+ * Which transaction does this inode belong to? Either the running
+ * transaction or the committing one. [j_list_lock]
+ */
transaction_t *i_transaction;
- /* Pointer to the running transaction modifying inode's data in case
- * there is already a committing transaction touching it. [j_list_lock] */
+ /**
+ * @i_next_transaction:
+ *
+ * Pointer to the running transaction modifying inode's data in case
+ * there is already a committing transaction touching it. [j_list_lock]
+ */
transaction_t *i_next_transaction;
- /* List of inodes in the i_transaction [j_list_lock] */
+ /**
+ * @i_list: List of inodes in the i_transaction [j_list_lock]
+ */
struct list_head i_list;
- /* VFS inode this inode belongs to [constant during the lifetime
- * of the structure] */
+ /**
+ * @i_vfs_inode:
+ *
+ * VFS inode this inode belongs to [constant for lifetime of structure]
+ */
struct inode *i_vfs_inode;
- /* Flags of inode [j_list_lock] */
+ /**
+ * @i_flags: Flags of inode [j_list_lock]
+ */
unsigned long i_flags;
+
+ /**
+ * @i_dirty_start:
+ *
+ * Offset in bytes where the dirty range for this inode starts.
+ * [j_list_lock]
+ */
+ loff_t i_dirty_start;
+
+ /**
+ * @i_dirty_end:
+ *
+ * Inclusive offset in bytes where the dirty range for this inode
+ * ends. [j_list_lock]
+ */
+ loff_t i_dirty_end;
};
struct jbd2_revoke_table_s;
/**
- * struct handle_s - The handle_s type is the concrete type associated with
- * handle_t.
+ * struct jbd2_journal_handle - The jbd2_journal_handle type is the concrete
+ * type associated with handle_t.
* @h_transaction: Which compound transaction is this update a part of?
- * @h_buffer_credits: Number of remaining buffers we are allowed to dirty.
- * @h_ref: Reference count on this handle
- * @h_err: Field for caller's use to track errors through large fs operations
- * @h_sync: flag for sync-on-close
- * @h_jdata: flag to force data journaling
- * @h_aborted: flag indicating fatal error on handle
+ * @h_journal: Which journal handle belongs to - used iff h_reserved set.
+ * @h_rsv_handle: Handle reserved for finishing the logical operation.
+ * @h_total_credits: Number of remaining buffers we are allowed to add to
+ * journal. These are dirty buffers and revoke descriptor blocks.
+ * @h_revoke_credits: Number of remaining revoke records available for handle
+ * @h_ref: Reference count on this handle.
+ * @h_err: Field for caller's use to track errors through large fs operations.
+ * @h_sync: Flag for sync-on-close.
+ * @h_reserved: Flag for handle for reserved credits.
+ * @h_aborted: Flag indicating fatal error on handle.
+ * @h_type: For handle statistics.
+ * @h_line_no: For handle statistics.
+ * @h_start_jiffies: Handle Start time.
+ * @h_requested_credits: Holds @h_total_credits after handle is started.
+ * @h_revoke_credits_requested: Holds @h_revoke_credits after handle is started.
+ * @saved_alloc_context: Saved context while transaction is open.
**/
/* Docbook can't yet cope with the bit fields, but will leave the documentation
@@ -462,32 +476,24 @@ struct jbd2_revoke_table_s;
struct jbd2_journal_handle
{
union {
- /* Which compound transaction is this update a part of? */
transaction_t *h_transaction;
/* Which journal handle belongs to - used iff h_reserved set */
journal_t *h_journal;
};
- /* Handle reserved for finishing the logical operation */
handle_t *h_rsv_handle;
-
- /* Number of remaining buffers we are allowed to dirty: */
- int h_buffer_credits;
-
- /* Reference count on this handle */
+ int h_total_credits;
+ int h_revoke_credits;
+ int h_revoke_credits_requested;
int h_ref;
-
- /* Field for caller's use to track errors through large fs */
- /* operations */
int h_err;
/* Flags [no locking] */
- unsigned int h_sync: 1; /* sync-on-close */
- unsigned int h_jdata: 1; /* force data journaling */
- unsigned int h_reserved: 1; /* handle with reserved credits */
- unsigned int h_aborted: 1; /* fatal error on handle */
- unsigned int h_type: 8; /* for handle statistics */
- unsigned int h_line_no: 16; /* for handle statistics */
+ unsigned int h_sync: 1;
+ unsigned int h_reserved: 1;
+ unsigned int h_aborted: 1;
+ unsigned int h_type: 8;
+ unsigned int h_line_no: 16;
unsigned long h_start_jiffies;
unsigned int h_requested_credits;
@@ -520,6 +526,7 @@ struct transaction_chp_stats_s {
* The transaction keeps track of all of the buffers modified by a
* running transaction, and all of the buffers committed but not yet
* flushed to home for finished transactions.
+ * (Locking Documentation improved by LockDoc)
*/
/*
@@ -529,15 +536,12 @@ struct transaction_chp_stats_s {
* ->jbd_lock_bh_journal_head() (This is "innermost")
*
* j_state_lock
- * ->jbd_lock_bh_state()
+ * ->b_state_lock
*
- * jbd_lock_bh_state()
+ * b_state_lock
* ->j_list_lock
*
* j_state_lock
- * ->t_handle_lock
- *
- * j_state_lock
* ->j_list_lock (journal_unmap_buffer)
*
*/
@@ -561,6 +565,7 @@ struct transaction_s
enum {
T_RUNNING,
T_LOCKED,
+ T_SWITCH,
T_FLUSH,
T_COMMIT,
T_COMMIT_DFLUSH,
@@ -574,18 +579,22 @@ struct transaction_s
*/
unsigned long t_log_start;
- /* Number of buffers on the t_buffers list [j_list_lock] */
+ /*
+ * Number of buffers on the t_buffers list [j_list_lock, no locks
+ * needed for jbd2 thread]
+ */
int t_nr_buffers;
/*
* Doubly-linked circular list of all buffers reserved but not yet
- * modified by this transaction [j_list_lock]
+ * modified by this transaction [j_list_lock, no locks needed fo
+ * jbd2 thread]
*/
struct journal_head *t_reserved_list;
/*
* Doubly-linked circular list of all metadata buffers owned by this
- * transaction [j_list_lock]
+ * transaction [j_list_lock, no locks needed for jbd2 thread]
*/
struct journal_head *t_buffers;
@@ -603,30 +612,23 @@ struct transaction_s
struct journal_head *t_checkpoint_list;
/*
- * Doubly-linked circular list of all buffers submitted for IO while
- * checkpointing. [j_list_lock]
- */
- struct journal_head *t_checkpoint_io_list;
-
- /*
- * Doubly-linked circular list of metadata buffers being shadowed by log
- * IO. The IO buffers on the iobuf list and the shadow buffers on this
- * list match each other one for one at all times. [j_list_lock]
+ * Doubly-linked circular list of metadata buffers being
+ * shadowed by log IO. The IO buffers on the iobuf list and
+ * the shadow buffers on this list match each other one for
+ * one at all times. [j_list_lock, no locks needed for jbd2
+ * thread]
*/
struct journal_head *t_shadow_list;
/*
- * List of inodes whose data we've modified in data=ordered mode.
+ * List of inodes associated with the transaction; e.g., ext4 uses
+ * this to track inodes in data=ordered and data=journal mode that
+ * need special handling on transaction commit; also used by ocfs2.
* [j_list_lock]
*/
struct list_head t_inode_list;
/*
- * Protects info related to handles
- */
- spinlock_t t_handle_lock;
-
- /*
* Longest time some handle had to wait for running transaction
*/
unsigned long t_max_wait;
@@ -637,28 +639,41 @@ struct transaction_s
unsigned long t_start;
/*
- * When commit was requested
+ * When commit was requested [j_state_lock]
*/
unsigned long t_requested;
/*
- * Checkpointing stats [j_checkpoint_sem]
+ * Checkpointing stats [j_list_lock]
*/
struct transaction_chp_stats_s t_chp_stats;
/*
* Number of outstanding updates running on this transaction
- * [t_handle_lock]
+ * [none]
*/
atomic_t t_updates;
/*
- * Number of buffers reserved for use by all handles in this transaction
- * handle but not yet modified. [t_handle_lock]
+ * Number of blocks reserved for this transaction in the journal.
+ * This is including all credits reserved when starting transaction
+ * handles as well as all journal descriptor blocks needed for this
+ * transaction. [none]
*/
atomic_t t_outstanding_credits;
/*
+ * Number of revoke records for this transaction added by already
+ * stopped handles. [none]
+ */
+ atomic_t t_outstanding_revokes;
+
+ /*
+ * How many handles used this transaction? [none]
+ */
+ atomic_t t_handle_count;
+
+ /*
* Forward and backward links for the circular list of all transactions
* awaiting checkpoint. [j_list_lock]
*/
@@ -676,11 +691,6 @@ struct transaction_s
ktime_t t_start_time;
/*
- * How many handles used this transaction? [t_handle_lock]
- */
- atomic_t t_handle_count;
-
- /*
* This transaction is being forced and some process is
* waiting for it to finish.
*/
@@ -688,12 +698,6 @@ struct transaction_s
/* Disk flush needs to be sent to fs partition [no locking] */
int t_need_data_flush;
-
- /*
- * For use by the filesystem to store fs-specific data
- * structures associated with the transaction
- */
- struct list_head t_private_list;
};
struct transaction_run_stats_s {
@@ -726,231 +730,326 @@ jbd2_time_diff(unsigned long start, unsigned long end)
#define JBD2_NR_BATCH 64
+enum passtype {PASS_SCAN, PASS_REVOKE, PASS_REPLAY};
+
+#define JBD2_FC_REPLAY_STOP 0
+#define JBD2_FC_REPLAY_CONTINUE 1
+
/**
* struct journal_s - The journal_s type is the concrete type associated with
* journal_t.
- * @j_flags: General journaling state flags
- * @j_errno: Is there an outstanding uncleared error on the journal (from a
- * prior abort)?
- * @j_sb_buffer: First part of superblock buffer
- * @j_superblock: Second part of superblock buffer
- * @j_format_version: Version of the superblock format
- * @j_state_lock: Protect the various scalars in the journal
- * @j_barrier_count: Number of processes waiting to create a barrier lock
- * @j_barrier: The barrier lock itself
- * @j_running_transaction: The current running transaction..
- * @j_committing_transaction: the transaction we are pushing to disk
- * @j_checkpoint_transactions: a linked circular list of all transactions
- * waiting for checkpointing
- * @j_wait_transaction_locked: Wait queue for waiting for a locked transaction
- * to start committing, or for a barrier lock to be released
- * @j_wait_done_commit: Wait queue for waiting for commit to complete
- * @j_wait_commit: Wait queue to trigger commit
- * @j_wait_updates: Wait queue to wait for updates to complete
- * @j_wait_reserved: Wait queue to wait for reserved buffer credits to drop
- * @j_checkpoint_mutex: Mutex for locking against concurrent checkpoints
- * @j_head: Journal head - identifies the first unused block in the journal
- * @j_tail: Journal tail - identifies the oldest still-used block in the
- * journal.
- * @j_free: Journal free - how many free blocks are there in the journal?
- * @j_first: The block number of the first usable block
- * @j_last: The block number one beyond the last usable block
- * @j_dev: Device where we store the journal
- * @j_blocksize: blocksize for the location where we store the journal.
- * @j_blk_offset: starting block offset for into the device where we store the
- * journal
- * @j_fs_dev: Device which holds the client fs. For internal journal this will
- * be equal to j_dev
- * @j_reserved_credits: Number of buffers reserved from the running transaction
- * @j_maxlen: Total maximum capacity of the journal region on disk.
- * @j_list_lock: Protects the buffer lists and internal buffer state.
- * @j_inode: Optional inode where we store the journal. If present, all journal
- * block numbers are mapped into this inode via bmap().
- * @j_tail_sequence: Sequence number of the oldest transaction in the log
- * @j_transaction_sequence: Sequence number of the next transaction to grant
- * @j_commit_sequence: Sequence number of the most recently committed
- * transaction
- * @j_commit_request: Sequence number of the most recent transaction wanting
- * commit
- * @j_uuid: Uuid of client object.
- * @j_task: Pointer to the current commit thread for this journal
- * @j_max_transaction_buffers: Maximum number of metadata buffers to allow in a
- * single compound commit transaction
- * @j_commit_interval: What is the maximum transaction lifetime before we begin
- * a commit?
- * @j_commit_timer: The timer used to wakeup the commit thread
- * @j_revoke_lock: Protect the revoke table
- * @j_revoke: The revoke table - maintains the list of revoked blocks in the
- * current transaction.
- * @j_revoke_table: alternate revoke tables for j_revoke
- * @j_wbuf: array of buffer_heads for jbd2_journal_commit_transaction
- * @j_wbufsize: maximum number of buffer_heads allowed in j_wbuf, the
- * number that will fit in j_blocksize
- * @j_last_sync_writer: most recent pid which did a synchronous write
- * @j_history_lock: Protect the transactions statistics history
- * @j_proc_entry: procfs entry for the jbd statistics directory
- * @j_stats: Overall statistics
- * @j_private: An opaque pointer to fs-private information.
- * @j_trans_commit_map: Lockdep entity to track transaction commit dependencies
*/
-
struct journal_s
{
- /* General journaling state flags [j_state_lock] */
+ /**
+ * @j_flags: General journaling state flags [j_state_lock,
+ * no lock for quick racy checks]
+ */
unsigned long j_flags;
- /*
+ /**
+ * @j_errno:
+ *
* Is there an outstanding uncleared error on the journal (from a prior
* abort)? [j_state_lock]
*/
int j_errno;
- /* The superblock buffer */
+ /**
+ * @j_abort_mutex: Lock the whole aborting procedure.
+ */
+ struct mutex j_abort_mutex;
+
+ /**
+ * @j_sb_buffer: The first part of the superblock buffer.
+ */
struct buffer_head *j_sb_buffer;
- journal_superblock_t *j_superblock;
- /* Version of the superblock format */
- int j_format_version;
+ /**
+ * @j_superblock: The second part of the superblock buffer.
+ */
+ journal_superblock_t *j_superblock;
- /*
- * Protect the various scalars in the journal
+ /**
+ * @j_state_lock: Protect the various scalars in the journal.
*/
rwlock_t j_state_lock;
- /*
- * Number of processes waiting to create a barrier lock [j_state_lock]
+ /**
+ * @j_barrier_count:
+ *
+ * Number of processes waiting to create a barrier lock [j_state_lock,
+ * no lock for quick racy checks]
*/
int j_barrier_count;
- /* The barrier lock itself */
+ /**
+ * @j_barrier: The barrier lock itself.
+ */
struct mutex j_barrier;
- /*
+ /**
+ * @j_running_transaction:
+ *
* Transactions: The current running transaction...
- * [j_state_lock] [caller holding open handle]
+ * [j_state_lock, no lock for quick racy checks] [caller holding
+ * open handle]
*/
transaction_t *j_running_transaction;
- /*
+ /**
+ * @j_committing_transaction:
+ *
* the transaction we are pushing to disk
* [j_state_lock] [caller holding open handle]
*/
transaction_t *j_committing_transaction;
- /*
+ /**
+ * @j_checkpoint_transactions:
+ *
* ... and a linked circular list of all transactions waiting for
* checkpointing. [j_list_lock]
*/
transaction_t *j_checkpoint_transactions;
- /*
+ /**
+ * @j_wait_transaction_locked:
+ *
* Wait queue for waiting for a locked transaction to start committing,
- * or for a barrier lock to be released
+ * or for a barrier lock to be released.
*/
wait_queue_head_t j_wait_transaction_locked;
- /* Wait queue for waiting for commit to complete */
+ /**
+ * @j_wait_done_commit: Wait queue for waiting for commit to complete.
+ */
wait_queue_head_t j_wait_done_commit;
- /* Wait queue to trigger commit */
+ /**
+ * @j_wait_commit: Wait queue to trigger commit.
+ */
wait_queue_head_t j_wait_commit;
- /* Wait queue to wait for updates to complete */
+ /**
+ * @j_wait_updates: Wait queue to wait for updates to complete.
+ */
wait_queue_head_t j_wait_updates;
- /* Wait queue to wait for reserved buffer credits to drop */
+ /**
+ * @j_wait_reserved:
+ *
+ * Wait queue to wait for reserved buffer credits to drop.
+ */
wait_queue_head_t j_wait_reserved;
- /* Semaphore for locking against concurrent checkpoints */
+ /**
+ * @j_fc_wait:
+ *
+ * Wait queue to wait for completion of async fast commits.
+ */
+ wait_queue_head_t j_fc_wait;
+
+ /**
+ * @j_checkpoint_mutex:
+ *
+ * Semaphore for locking against concurrent checkpoints.
+ */
struct mutex j_checkpoint_mutex;
- /*
+ /**
+ * @j_chkpt_bhs:
+ *
* List of buffer heads used by the checkpoint routine. This
* was moved from jbd2_log_do_checkpoint() to reduce stack
* usage. Access to this array is controlled by the
- * j_checkpoint_mutex. [j_checkpoint_mutex]
+ * @j_checkpoint_mutex. [j_checkpoint_mutex]
*/
struct buffer_head *j_chkpt_bhs[JBD2_NR_BATCH];
-
- /*
+
+ /**
+ * @j_shrinker:
+ *
+ * Journal head shrinker, reclaim buffer's journal head which
+ * has been written back.
+ */
+ struct shrinker *j_shrinker;
+
+ /**
+ * @j_checkpoint_jh_count:
+ *
+ * Number of journal buffers on the checkpoint list. [j_list_lock]
+ */
+ struct percpu_counter j_checkpoint_jh_count;
+
+ /**
+ * @j_shrink_transaction:
+ *
+ * Record next transaction will shrink on the checkpoint list.
+ * [j_list_lock]
+ */
+ transaction_t *j_shrink_transaction;
+
+ /**
+ * @j_head:
+ *
* Journal head: identifies the first unused block in the journal.
* [j_state_lock]
*/
unsigned long j_head;
- /*
+ /**
+ * @j_tail:
+ *
* Journal tail: identifies the oldest still-used block in the journal.
* [j_state_lock]
*/
unsigned long j_tail;
- /*
+ /**
+ * @j_free:
+ *
* Journal free: how many free blocks are there in the journal?
* [j_state_lock]
*/
unsigned long j_free;
- /*
- * Journal start and end: the block numbers of the first usable block
- * and one beyond the last usable block in the journal. [j_state_lock]
+ /**
+ * @j_first:
+ *
+ * The block number of the first usable block in the journal
+ * [j_state_lock].
*/
unsigned long j_first;
+
+ /**
+ * @j_last:
+ *
+ * The block number one beyond the last usable block in the journal
+ * [j_state_lock].
+ */
unsigned long j_last;
- /*
- * Device, blocksize and starting block offset for the location where we
- * store the journal.
+ /**
+ * @j_fc_first:
+ *
+ * The block number of the first fast commit block in the journal
+ * [j_state_lock].
+ */
+ unsigned long j_fc_first;
+
+ /**
+ * @j_fc_off:
+ *
+ * Number of fast commit blocks currently allocated. Accessed only
+ * during fast commit. Currently only process can do fast commit, so
+ * this field is not protected by any lock.
+ */
+ unsigned long j_fc_off;
+
+ /**
+ * @j_fc_last:
+ *
+ * The block number one beyond the last fast commit block in the journal
+ * [j_state_lock].
+ */
+ unsigned long j_fc_last;
+
+ /**
+ * @j_dev: Device where we store the journal.
*/
struct block_device *j_dev;
+
+ /**
+ * @j_blocksize: Block size for the location where we store the journal.
+ */
int j_blocksize;
+
+ /**
+ * @j_blk_offset:
+ *
+ * Starting block offset into the device where we store the journal.
+ */
unsigned long long j_blk_offset;
+
+ /**
+ * @j_devname: Journal device name.
+ */
char j_devname[BDEVNAME_SIZE+24];
- /*
+ /**
+ * @j_fs_dev:
+ *
* Device which holds the client fs. For internal journal this will be
* equal to j_dev.
*/
struct block_device *j_fs_dev;
- /* Total maximum capacity of the journal region on disk. */
- unsigned int j_maxlen;
+ /**
+ * @j_fs_dev_wb_err:
+ *
+ * Records the errseq of the client fs's backing block device.
+ */
+ errseq_t j_fs_dev_wb_err;
+
+ /**
+ * @j_total_len: Total maximum capacity of the journal region on disk.
+ */
+ unsigned int j_total_len;
- /* Number of buffers reserved from the running transaction */
+ /**
+ * @j_reserved_credits:
+ *
+ * Number of buffers reserved from the running transaction.
+ */
atomic_t j_reserved_credits;
- /*
- * Protects the buffer lists and internal buffer state.
+ /**
+ * @j_list_lock: Protects the buffer lists and internal buffer state.
*/
spinlock_t j_list_lock;
- /* Optional inode where we store the journal. If present, all */
- /* journal block numbers are mapped into this inode via */
- /* bmap(). */
+ /**
+ * @j_inode:
+ *
+ * Optional inode where we store the journal. If present, all
+ * journal block numbers are mapped into this inode via bmap().
+ */
struct inode *j_inode;
- /*
+ /**
+ * @j_tail_sequence:
+ *
* Sequence number of the oldest transaction in the log [j_state_lock]
*/
tid_t j_tail_sequence;
- /*
+ /**
+ * @j_transaction_sequence:
+ *
* Sequence number of the next transaction to grant [j_state_lock]
*/
tid_t j_transaction_sequence;
- /*
+ /**
+ * @j_commit_sequence:
+ *
* Sequence number of the most recently committed transaction
- * [j_state_lock].
+ * [j_state_lock, no lock for quick racy checks]
*/
tid_t j_commit_sequence;
- /*
+ /**
+ * @j_commit_request:
+ *
* Sequence number of the most recent transaction wanting commit
- * [j_state_lock]
+ * [j_state_lock, no lock for quick racy checks]
*/
tid_t j_commit_request;
- /*
+ /**
+ * @j_uuid:
+ *
* Journal uuid: identifies the object (filesystem, LVM volume etc)
* backed by this journal. This will eventually be replaced by an array
* of uuids, allowing us to index multiple devices within a single
@@ -958,85 +1057,193 @@ struct journal_s
*/
__u8 j_uuid[16];
- /* Pointer to the current commit thread for this journal */
+ /**
+ * @j_task: Pointer to the current commit thread for this journal.
+ */
struct task_struct *j_task;
- /*
+ /**
+ * @j_max_transaction_buffers:
+ *
* Maximum number of metadata buffers to allow in a single compound
- * commit transaction
+ * commit transaction.
*/
int j_max_transaction_buffers;
- /*
+ /**
+ * @j_revoke_records_per_block:
+ *
+ * Number of revoke records that fit in one descriptor block.
+ */
+ int j_revoke_records_per_block;
+
+ /**
+ * @j_transaction_overhead_buffers:
+ *
+ * Number of blocks each transaction needs for its own bookkeeping
+ */
+ int j_transaction_overhead_buffers;
+
+ /**
+ * @j_commit_interval:
+ *
* What is the maximum transaction lifetime before we begin a commit?
*/
unsigned long j_commit_interval;
- /* The timer used to wakeup the commit thread: */
+ /**
+ * @j_commit_timer: The timer used to wakeup the commit thread.
+ */
struct timer_list j_commit_timer;
- /*
- * The revoke table: maintains the list of revoked blocks in the
- * current transaction. [j_revoke_lock]
+ /**
+ * @j_revoke_lock: Protect the revoke table.
*/
spinlock_t j_revoke_lock;
+
+ /**
+ * @j_revoke:
+ *
+ * The revoke table - maintains the list of revoked blocks in the
+ * current transaction.
+ */
struct jbd2_revoke_table_s *j_revoke;
+
+ /**
+ * @j_revoke_table: Alternate revoke tables for j_revoke.
+ */
struct jbd2_revoke_table_s *j_revoke_table[2];
- /*
- * array of bhs for jbd2_journal_commit_transaction
+ /**
+ * @j_wbuf: Array of bhs for jbd2_journal_commit_transaction.
*/
struct buffer_head **j_wbuf;
+
+ /**
+ * @j_fc_wbuf: Array of fast commit bhs for fast commit. Accessed only
+ * during a fast commit. Currently only process can do fast commit, so
+ * this field is not protected by any lock.
+ */
+ struct buffer_head **j_fc_wbuf;
+
+ /**
+ * @j_wbufsize:
+ *
+ * Size of @j_wbuf array.
+ */
int j_wbufsize;
- /*
- * this is the pid of hte last person to run a synchronous operation
- * through the journal
+ /**
+ * @j_fc_wbufsize:
+ *
+ * Size of @j_fc_wbuf array.
+ */
+ int j_fc_wbufsize;
+
+ /**
+ * @j_last_sync_writer:
+ *
+ * The pid of the last person to run a synchronous operation
+ * through the journal.
*/
pid_t j_last_sync_writer;
- /*
- * the average amount of time in nanoseconds it takes to commit a
+ /**
+ * @j_average_commit_time:
+ *
+ * The average amount of time in nanoseconds it takes to commit a
* transaction to disk. [j_state_lock]
*/
u64 j_average_commit_time;
- /*
- * minimum and maximum times that we should wait for
- * additional filesystem operations to get batched into a
- * synchronous handle in microseconds
+ /**
+ * @j_min_batch_time:
+ *
+ * Minimum time that we should wait for additional filesystem operations
+ * to get batched into a synchronous handle in microseconds.
*/
u32 j_min_batch_time;
+
+ /**
+ * @j_max_batch_time:
+ *
+ * Maximum time that we should wait for additional filesystem operations
+ * to get batched into a synchronous handle in microseconds.
+ */
u32 j_max_batch_time;
- /* This function is called when a transaction is closed */
+ /**
+ * @j_commit_callback:
+ *
+ * This function is called when a transaction is closed.
+ */
void (*j_commit_callback)(journal_t *,
transaction_t *);
+ /**
+ * @j_submit_inode_data_buffers:
+ *
+ * This function is called for all inodes associated with the
+ * committing transaction marked with JI_WRITE_DATA flag
+ * before we start to write out the transaction to the journal.
+ */
+ int (*j_submit_inode_data_buffers)
+ (struct jbd2_inode *);
+
+ /**
+ * @j_finish_inode_data_buffers:
+ *
+ * This function is called for all inodes associated with the
+ * committing transaction marked with JI_WAIT_DATA flag
+ * after we have written the transaction to the journal
+ * but before we write out the commit block.
+ */
+ int (*j_finish_inode_data_buffers)
+ (struct jbd2_inode *);
+
/*
* Journal statistics
*/
+
+ /**
+ * @j_history_lock: Protect the transactions statistics history.
+ */
spinlock_t j_history_lock;
+
+ /**
+ * @j_proc_entry: procfs entry for the jbd statistics directory.
+ */
struct proc_dir_entry *j_proc_entry;
+
+ /**
+ * @j_stats: Overall statistics.
+ */
struct transaction_stats_s j_stats;
- /* Failed journal commit ID */
+ /**
+ * @j_failed_commit: Failed journal commit ID.
+ */
unsigned int j_failed_commit;
- /*
+ /**
+ * @j_private:
+ *
* An opaque pointer to fs-private information. ext3 puts its
- * superblock pointer here
+ * superblock pointer here.
*/
void *j_private;
- /* Reference to checksum algorithm driver via cryptoapi */
- struct crypto_shash *j_chksum_driver;
-
- /* Precomputed journal UUID checksum for seeding other checksums */
+ /**
+ * @j_csum_seed:
+ *
+ * Precomputed journal UUID checksum for seeding other checksums.
+ */
__u32 j_csum_seed;
#ifdef CONFIG_DEBUG_LOCK_ALLOC
- /*
+ /**
+ * @j_trans_commit_map:
+ *
* Lockdep entity to track transaction commit dependencies. Handles
* hold this "lock" for read, when we wait for commit, we acquire the
* "lock" for writing. This matches the properties of jbd2 journalling
@@ -1046,19 +1253,68 @@ struct journal_s
*/
struct lockdep_map j_trans_commit_map;
#endif
+ /**
+ * @jbd2_trans_commit_key:
+ *
+ * "struct lock_class_key" for @j_trans_commit_map
+ */
+ struct lock_class_key jbd2_trans_commit_key;
+
+ /**
+ * @j_fc_cleanup_callback:
+ *
+ * Clean-up after fast commit or full commit. JBD2 calls this function
+ * after every commit operation.
+ */
+ void (*j_fc_cleanup_callback)(struct journal_s *journal, int full, tid_t tid);
+
+ /**
+ * @j_fc_replay_callback:
+ *
+ * File-system specific function that performs replay of a fast
+ * commit. JBD2 calls this function for each fast commit block found in
+ * the journal. This function should return JBD2_FC_REPLAY_CONTINUE
+ * to indicate that the block was processed correctly and more fast
+ * commit replay should continue. Return value of JBD2_FC_REPLAY_STOP
+ * indicates the end of replay (no more blocks remaining). A negative
+ * return value indicates error.
+ */
+ int (*j_fc_replay_callback)(struct journal_s *journal,
+ struct buffer_head *bh,
+ enum passtype pass, int off,
+ tid_t expected_commit_id);
+
+ /**
+ * @j_bmap:
+ *
+ * Bmap function that should be used instead of the generic
+ * VFS bmap function.
+ */
+ int (*j_bmap)(struct journal_s *journal, sector_t *block);
};
#define jbd2_might_wait_for_commit(j) \
do { \
rwsem_acquire(&j->j_trans_commit_map, 0, 0, _THIS_IP_); \
- rwsem_release(&j->j_trans_commit_map, 1, _THIS_IP_); \
+ rwsem_release(&j->j_trans_commit_map, _THIS_IP_); \
} while (0)
+/*
+ * We can support any known requested features iff the
+ * superblock is not in version 1. Otherwise we fail to support any
+ * extended sb features.
+ */
+static inline bool jbd2_format_support_feature(journal_t *j)
+{
+ return j->j_superblock->s_header.h_blocktype !=
+ cpu_to_be32(JBD2_SUPERBLOCK_V1);
+}
+
/* journal feature predicate functions */
#define JBD2_FEATURE_COMPAT_FUNCS(name, flagname) \
static inline bool jbd2_has_feature_##name(journal_t *j) \
{ \
- return ((j)->j_format_version >= 2 && \
+ return (jbd2_format_support_feature(j) && \
((j)->j_superblock->s_feature_compat & \
cpu_to_be32(JBD2_FEATURE_COMPAT_##flagname)) != 0); \
} \
@@ -1076,7 +1332,7 @@ static inline void jbd2_clear_feature_##name(journal_t *j) \
#define JBD2_FEATURE_RO_COMPAT_FUNCS(name, flagname) \
static inline bool jbd2_has_feature_##name(journal_t *j) \
{ \
- return ((j)->j_format_version >= 2 && \
+ return (jbd2_format_support_feature(j) && \
((j)->j_superblock->s_feature_ro_compat & \
cpu_to_be32(JBD2_FEATURE_RO_COMPAT_##flagname)) != 0); \
} \
@@ -1094,7 +1350,7 @@ static inline void jbd2_clear_feature_##name(journal_t *j) \
#define JBD2_FEATURE_INCOMPAT_FUNCS(name, flagname) \
static inline bool jbd2_has_feature_##name(journal_t *j) \
{ \
- return ((j)->j_format_version >= 2 && \
+ return (jbd2_format_support_feature(j) && \
((j)->j_superblock->s_feature_incompat & \
cpu_to_be32(JBD2_FEATURE_INCOMPAT_##flagname)) != 0); \
} \
@@ -1116,6 +1372,10 @@ JBD2_FEATURE_INCOMPAT_FUNCS(64bit, 64BIT)
JBD2_FEATURE_INCOMPAT_FUNCS(async_commit, ASYNC_COMMIT)
JBD2_FEATURE_INCOMPAT_FUNCS(csum2, CSUM_V2)
JBD2_FEATURE_INCOMPAT_FUNCS(csum3, CSUM_V3)
+JBD2_FEATURE_INCOMPAT_FUNCS(fast_commit, FAST_COMMIT)
+
+/* Journal high priority write IO operation flags */
+#define JBD2_JOURNAL_REQ_FLAGS (REQ_META | REQ_SYNC | REQ_IDLE)
/*
* Journal flag definitions
@@ -1126,10 +1386,15 @@ JBD2_FEATURE_INCOMPAT_FUNCS(csum3, CSUM_V3)
#define JBD2_FLUSHED 0x008 /* The journal superblock has been flushed */
#define JBD2_LOADED 0x010 /* The journal superblock has been loaded */
#define JBD2_BARRIER 0x020 /* Use IDE barriers */
-#define JBD2_ABORT_ON_SYNCDATA_ERR 0x040 /* Abort the journal on file
- * data write error in ordered
- * mode */
-#define JBD2_REC_ERR 0x080 /* The errno in the sb has been recorded */
+#define JBD2_CYCLE_RECORD 0x080 /* Journal cycled record log on
+ * clean and empty filesystem
+ * logging area */
+#define JBD2_FAST_COMMIT_ONGOING 0x100 /* Fast commit is ongoing */
+#define JBD2_FULL_COMMIT_ONGOING 0x200 /* Full commit is ongoing */
+#define JBD2_JOURNAL_FLUSH_DISCARD 0x0001
+#define JBD2_JOURNAL_FLUSH_ZEROOUT 0x0002
+#define JBD2_JOURNAL_FLUSH_VALID (JBD2_JOURNAL_FLUSH_DISCARD | \
+ JBD2_JOURNAL_FLUSH_ZEROOUT)
/*
* Function declarations for the journaling transaction and buffer
@@ -1137,13 +1402,10 @@ JBD2_FEATURE_INCOMPAT_FUNCS(csum3, CSUM_V3)
*/
/* Filing buffers */
-extern void jbd2_journal_unfile_buffer(journal_t *, struct journal_head *);
-extern void __jbd2_journal_refile_buffer(struct journal_head *);
+extern bool __jbd2_journal_refile_buffer(struct journal_head *);
extern void jbd2_journal_refile_buffer(journal_t *, struct journal_head *);
extern void __jbd2_journal_file_buffer(struct journal_head *, transaction_t *, int);
-extern void __journal_free_buffer(struct journal_head *bh);
extern void jbd2_journal_file_buffer(struct journal_head *, transaction_t *, int);
-extern void __journal_clean_data_list(transaction_t *transaction);
static inline void jbd2_file_log_bh(struct list_head *head, struct buffer_head *bh)
{
list_add_tail(&bh->b_assoc_buffers, head);
@@ -1166,8 +1428,12 @@ void jbd2_update_log_tail(journal_t *journal, tid_t tid, unsigned long block);
extern void jbd2_journal_commit_transaction(journal_t *);
/* Checkpoint list management */
-void __jbd2_journal_clean_checkpoint_list(journal_t *journal, bool destroy);
+enum jbd2_shrink_type {JBD2_SHRINK_DESTROY, JBD2_SHRINK_BUSY_STOP, JBD2_SHRINK_BUSY_SKIP};
+
+void __jbd2_journal_clean_checkpoint_list(journal_t *journal, enum jbd2_shrink_type type);
+unsigned long jbd2_journal_shrink_checkpoint_list(journal_t *journal, unsigned long *nr_to_scan);
int __jbd2_journal_remove_checkpoint(struct journal_head *);
+int jbd2_journal_try_remove_checkpoint(struct journal_head *jh);
void jbd2_journal_destroy_checkpoint(journal_t *journal);
void __jbd2_journal_insert_checkpoint(struct journal_head *, transaction_t *);
@@ -1207,12 +1473,9 @@ extern int jbd2_journal_write_metadata_buffer(transaction_t *transaction,
struct buffer_head **bh_out,
sector_t blocknr);
-/* Transaction locking */
-extern void __wait_on_journal (journal_t *);
-
/* Transaction cache support */
extern void jbd2_journal_destroy_transaction_cache(void);
-extern int jbd2_journal_init_transaction_cache(void);
+extern int __init jbd2_journal_init_transaction_cache(void);
extern void jbd2_journal_free_transaction(transaction_t *);
/*
@@ -1239,14 +1502,16 @@ static inline handle_t *journal_current_handle(void)
extern handle_t *jbd2_journal_start(journal_t *, int nblocks);
extern handle_t *jbd2__journal_start(journal_t *, int blocks, int rsv_blocks,
- gfp_t gfp_mask, unsigned int type,
- unsigned int line_no);
+ int revoke_records, gfp_t gfp_mask,
+ unsigned int type, unsigned int line_no);
extern int jbd2_journal_restart(handle_t *, int nblocks);
-extern int jbd2__journal_restart(handle_t *, int nblocks, gfp_t gfp_mask);
+extern int jbd2__journal_restart(handle_t *, int nblocks,
+ int revoke_records, gfp_t gfp_mask);
extern int jbd2_journal_start_reserved(handle_t *handle,
unsigned int type, unsigned int line_no);
extern void jbd2_journal_free_reserved(handle_t *handle);
-extern int jbd2_journal_extend (handle_t *, int nblocks);
+extern int jbd2_journal_extend(handle_t *handle, int nblocks,
+ int revoke_records);
extern int jbd2_journal_get_write_access(handle_t *, struct buffer_head *);
extern int jbd2_journal_get_create_access (handle_t *, struct buffer_head *);
extern int jbd2_journal_get_undo_access(handle_t *, struct buffer_head *);
@@ -1254,15 +1519,16 @@ void jbd2_journal_set_triggers(struct buffer_head *,
struct jbd2_buffer_trigger_type *type);
extern int jbd2_journal_dirty_metadata (handle_t *, struct buffer_head *);
extern int jbd2_journal_forget (handle_t *, struct buffer_head *);
-extern void journal_sync_buffer (struct buffer_head *);
-extern int jbd2_journal_invalidatepage(journal_t *,
- struct page *, unsigned int, unsigned int);
-extern int jbd2_journal_try_to_free_buffers(journal_t *, struct page *, gfp_t);
+int jbd2_journal_invalidate_folio(journal_t *, struct folio *,
+ size_t offset, size_t length);
+bool jbd2_journal_try_to_free_buffers(journal_t *journal, struct folio *folio);
extern int jbd2_journal_stop(handle_t *);
-extern int jbd2_journal_flush (journal_t *);
+extern int jbd2_journal_flush(journal_t *journal, unsigned int flags);
extern void jbd2_journal_lock_updates (journal_t *);
extern void jbd2_journal_unlock_updates (journal_t *);
+void jbd2_journal_wait_updates(journal_t *);
+
extern journal_t * jbd2_journal_init_dev(struct block_device *bdev,
struct block_device *fs_dev,
unsigned long long start, int len, int bsize);
@@ -1283,8 +1549,7 @@ extern int jbd2_journal_wipe (journal_t *, int);
extern int jbd2_journal_skip_recovery (journal_t *);
extern void jbd2_journal_update_sb_errno(journal_t *);
extern int jbd2_journal_update_sb_log_tail (journal_t *, tid_t,
- unsigned long, int);
-extern void __jbd2_journal_abort_hard (journal_t *);
+ unsigned long, blk_opf_t);
extern void jbd2_journal_abort (journal_t *, int);
extern int jbd2_journal_errno (journal_t *);
extern void jbd2_journal_ack_err (journal_t *);
@@ -1292,8 +1557,14 @@ extern int jbd2_journal_clear_err (journal_t *);
extern int jbd2_journal_bmap(journal_t *, unsigned long, unsigned long long *);
extern int jbd2_journal_force_commit(journal_t *);
extern int jbd2_journal_force_commit_nested(journal_t *);
-extern int jbd2_journal_inode_add_write(handle_t *handle, struct jbd2_inode *inode);
-extern int jbd2_journal_inode_add_wait(handle_t *handle, struct jbd2_inode *inode);
+extern int jbd2_journal_inode_ranged_write(handle_t *handle,
+ struct jbd2_inode *inode, loff_t start_byte,
+ loff_t length);
+extern int jbd2_journal_inode_ranged_wait(handle_t *handle,
+ struct jbd2_inode *inode, loff_t start_byte,
+ loff_t length);
+extern int jbd2_journal_finish_inode_data_buffers(
+ struct jbd2_inode *jinode);
extern int jbd2_journal_begin_ordered_truncate(journal_t *journal,
struct jbd2_inode *inode, loff_t new_size);
extern void jbd2_journal_init_jbd_inode(struct jbd2_inode *jinode, struct inode *inode);
@@ -1311,10 +1582,13 @@ void jbd2_journal_put_journal_head(struct journal_head *jh);
*/
extern struct kmem_cache *jbd2_handle_cache;
-static inline handle_t *jbd2_alloc_handle(gfp_t gfp_flags)
-{
- return kmem_cache_zalloc(jbd2_handle_cache, gfp_flags);
-}
+/*
+ * This specialized allocator has to be a macro for its allocations to be
+ * accounted separately (to have a separate alloc_tag). The typecast is
+ * intentional to enforce typesafety.
+ */
+#define jbd2_alloc_handle(_gfp_flags) \
+ ((handle_t *)kmem_cache_zalloc(jbd2_handle_cache, _gfp_flags))
static inline void jbd2_free_handle(handle_t *handle)
{
@@ -1327,10 +1601,13 @@ static inline void jbd2_free_handle(handle_t *handle)
*/
extern struct kmem_cache *jbd2_inode_cache;
-static inline struct jbd2_inode *jbd2_alloc_inode(gfp_t gfp_flags)
-{
- return kmem_cache_alloc(jbd2_inode_cache, gfp_flags);
-}
+/*
+ * This specialized allocator has to be a macro for its allocations to be
+ * accounted separately (to have a separate alloc_tag). The typecast is
+ * intentional to enforce typesafety.
+ */
+#define jbd2_alloc_inode(_gfp_flags) \
+ ((struct jbd2_inode *)kmem_cache_alloc(jbd2_inode_cache, _gfp_flags))
static inline void jbd2_free_inode(struct jbd2_inode *jinode)
{
@@ -1340,12 +1617,16 @@ static inline void jbd2_free_inode(struct jbd2_inode *jinode)
/* Primary revoke support */
#define JOURNAL_REVOKE_DEFAULT_HASH 256
extern int jbd2_journal_init_revoke(journal_t *, int);
-extern void jbd2_journal_destroy_revoke_caches(void);
-extern int jbd2_journal_init_revoke_caches(void);
+extern void jbd2_journal_destroy_revoke_record_cache(void);
+extern void jbd2_journal_destroy_revoke_table_cache(void);
+extern int __init jbd2_journal_init_revoke_record_cache(void);
+extern int __init jbd2_journal_init_revoke_table_cache(void);
+struct jbd2_revoke_table_s *jbd2_journal_init_revoke_table(int hash_size);
+void jbd2_journal_destroy_revoke_table(struct jbd2_revoke_table_s *table);
extern void jbd2_journal_destroy_revoke(journal_t *);
extern int jbd2_journal_revoke (handle_t *, unsigned long long, struct buffer_head *);
-extern int jbd2_journal_cancel_revoke(handle_t *, struct journal_head *);
+extern void jbd2_journal_cancel_revoke(handle_t *, struct journal_head *);
extern void jbd2_journal_write_revoke_records(transaction_t *transaction,
struct list_head *log_bufs);
@@ -1364,9 +1645,9 @@ extern void jbd2_clear_buffer_revoked_flags(journal_t *journal);
*/
int jbd2_log_start_commit(journal_t *journal, tid_t tid);
-int __jbd2_log_start_commit(journal_t *journal, tid_t tid);
int jbd2_journal_start_commit(journal_t *journal, tid_t *tid);
int jbd2_log_wait_commit(journal_t *journal, tid_t tid);
+int jbd2_transaction_committed(journal_t *journal, tid_t tid);
int jbd2_complete_transaction(journal_t *journal, tid_t tid);
int jbd2_log_do_checkpoint(journal_t *journal);
int jbd2_trans_will_send_data_barrier(journal_t *journal, tid_t tid);
@@ -1375,6 +1656,16 @@ void __jbd2_log_wait_for_space(journal_t *journal);
extern void __jbd2_journal_drop_transaction(journal_t *, transaction_t *);
extern int jbd2_cleanup_journal_tail(journal_t *);
+/* Fast commit related APIs */
+int jbd2_fc_begin_commit(journal_t *journal, tid_t tid);
+int jbd2_fc_end_commit(journal_t *journal);
+int jbd2_fc_end_commit_fallback(journal_t *journal);
+int jbd2_fc_get_buf(journal_t *journal, struct buffer_head **bh_out);
+int jbd2_submit_inode_data(journal_t *journal, struct jbd2_inode *jinode);
+int jbd2_wait_inode_data(journal_t *journal, struct jbd2_inode *jinode);
+int jbd2_fc_wait_bufs(journal_t *journal, int num_blks);
+void jbd2_fc_release_bufs(journal_t *journal);
+
/*
* is_journal_abort
*
@@ -1402,6 +1693,25 @@ static inline void jbd2_journal_abort_handle(handle_t *handle)
handle->h_aborted = 1;
}
+static inline void jbd2_init_fs_dev_write_error(journal_t *journal)
+{
+ struct address_space *mapping = journal->j_fs_dev->bd_mapping;
+
+ /*
+ * Save the original wb_err value of client fs's bdev mapping which
+ * could be used to detect the client fs's metadata async write error.
+ */
+ errseq_check_and_advance(&mapping->wb_err, &journal->j_fs_dev_wb_err);
+}
+
+static inline int jbd2_check_fs_dev_write_error(journal_t *journal)
+{
+ struct address_space *mapping = journal->j_fs_dev->bd_mapping;
+
+ return errseq_check(&mapping->wb_err,
+ READ_ONCE(journal->j_fs_dev_wb_err));
+}
+
#endif /* __KERNEL__ */
/* Comparison functions for transaction IDs: perform comparisons using
@@ -1419,36 +1729,20 @@ static inline int tid_geq(tid_t x, tid_t y)
return (difference >= 0);
}
-extern int jbd2_journal_blocks_per_page(struct inode *inode);
+extern int jbd2_journal_blocks_per_folio(struct inode *inode);
extern size_t journal_tag_bytes(journal_t *journal);
-static inline bool jbd2_journal_has_csum_v2or3_feature(journal_t *j)
-{
- return jbd2_has_feature_csum2(j) || jbd2_has_feature_csum3(j);
-}
-
static inline int jbd2_journal_has_csum_v2or3(journal_t *journal)
{
- WARN_ON_ONCE(jbd2_journal_has_csum_v2or3_feature(journal) &&
- journal->j_chksum_driver == NULL);
-
- return journal->j_chksum_driver != NULL;
+ return jbd2_has_feature_csum2(journal) ||
+ jbd2_has_feature_csum3(journal);
}
-/*
- * We reserve t_outstanding_credits >> JBD2_CONTROL_BLOCKS_SHIFT for
- * transaction control blocks.
- */
-#define JBD2_CONTROL_BLOCKS_SHIFT 5
-
-/*
- * Return the minimum number of blocks which must be free in the journal
- * before a new transaction may be started. Must be called under j_state_lock.
- */
-static inline int jbd2_space_needed(journal_t *journal)
+static inline int jbd2_journal_get_num_fc_blks(journal_superblock_t *jsb)
{
- int nblocks = journal->j_max_transaction_buffers;
- return nblocks + (nblocks >> JBD2_CONTROL_BLOCKS_SHIFT);
+ int num_fc_blocks = be32_to_cpu(jsb->s_num_fc_blks);
+
+ return num_fc_blocks ? num_fc_blocks : JBD2_DEFAULT_FAST_COMMIT_BLOCKS;
}
/*
@@ -1457,16 +1751,13 @@ static inline int jbd2_space_needed(journal_t *journal)
static inline unsigned long jbd2_log_space_left(journal_t *journal)
{
/* Allow for rounding errors */
- unsigned long free = journal->j_free - 32;
+ long free = journal->j_free - 32;
if (journal->j_committing_transaction) {
- unsigned long committing = atomic_read(&journal->
- j_committing_transaction->t_outstanding_credits);
-
- /* Transaction + control blocks */
- free -= committing + (committing >> JBD2_CONTROL_BLOCKS_SHIFT);
+ free -= atomic_read(&journal->
+ j_committing_transaction->t_outstanding_credits);
}
- return free;
+ return max_t(long, free, 0);
}
/*
@@ -1481,31 +1772,9 @@ static inline unsigned long jbd2_log_space_left(journal_t *journal)
#define BJ_Reserved 4 /* Buffer is reserved for access by journal */
#define BJ_Types 5
-extern int jbd_blocks_per_page(struct inode *inode);
-
-/* JBD uses a CRC32 checksum */
-#define JBD_MAX_CHECKSUM_SIZE 4
-
-static inline u32 jbd2_chksum(journal_t *journal, u32 crc,
- const void *address, unsigned int length)
+static inline u32 jbd2_chksum(u32 crc, const void *address, unsigned int length)
{
- struct {
- struct shash_desc shash;
- char ctx[JBD_MAX_CHECKSUM_SIZE];
- } desc;
- int err;
-
- BUG_ON(crypto_shash_descsize(journal->j_chksum_driver) >
- JBD_MAX_CHECKSUM_SIZE);
-
- desc.shash.tfm = journal->j_chksum_driver;
- desc.shash.flags = 0;
- *(u32 *)desc.ctx = crc;
-
- err = crypto_shash_update(&desc.shash, address, length);
- BUG_ON(err);
-
- return *(u32 *)desc.ctx;
+ return crc32c(crc, address, length);
}
/* Return most recent uncommitted transaction */
@@ -1521,6 +1790,20 @@ static inline tid_t jbd2_get_latest_transaction(journal_t *journal)
return tid;
}
+static inline int jbd2_handle_buffer_credits(handle_t *handle)
+{
+ journal_t *journal;
+
+ if (!handle->h_reserved)
+ journal = handle->h_transaction->t_journal;
+ else
+ journal = handle->h_journal;
+
+ return handle->h_total_credits -
+ DIV_ROUND_UP(handle->h_revoke_credits_requested,
+ journal->j_revoke_records_per_block);
+}
+
#ifdef __KERNEL__
#define buffer_trace_init(bh) do {} while (0)
diff --git a/include/linux/jhash.h b/include/linux/jhash.h
index 8037850f3104..7c1c1821c694 100644
--- a/include/linux/jhash.h
+++ b/include/linux/jhash.h
@@ -5,7 +5,7 @@
*
* Copyright (C) 2006. Bob Jenkins (bob_jenkins@burtleburtle.net)
*
- * http://burtleburtle.net/bob/hash/
+ * https://burtleburtle.net/bob/hash/
*
* These are the credits from Bob's sources:
*
@@ -17,21 +17,21 @@
* if SELF_TEST is defined. You can use this free for any purpose. It's in
* the public domain. It has no warranty.
*
- * Copyright (C) 2009-2010 Jozsef Kadlecsik (kadlec@blackhole.kfki.hu)
+ * Copyright (C) 2009-2010 Jozsef Kadlecsik (kadlec@netfilter.org)
*
* I've modified Bob's hash to be useful in the Linux kernel, and
* any bugs present are my fault.
* Jozsef
*/
#include <linux/bitops.h>
-#include <linux/unaligned/packed_struct.h>
+#include <linux/unaligned.h>
/* Best hash sizes are of power of two */
#define jhash_size(n) ((u32)1<<(n))
/* Mask the hash value, i.e (value & jhash_mask(n)) instead of (value % n) */
#define jhash_mask(n) (jhash_size(n)-1)
-/* __jhash_mix -- mix 3 32-bit values reversibly. */
+/* __jhash_mix - mix 3 32-bit values reversibly. */
#define __jhash_mix(a, b, c) \
{ \
a -= c; a ^= rol32(c, 4); c += b; \
@@ -60,7 +60,7 @@
/* jhash - hash an arbitrary key
* @k: sequence of bytes as key
* @length: the length of the key
- * @initval: the previous hash, or an arbitray value
+ * @initval: the previous hash, or an arbitrary value
*
* The generic version, hashes an arbitrary sequence of bytes.
* No alignment or length assumptions are made about the input key.
@@ -77,28 +77,29 @@ static inline u32 jhash(const void *key, u32 length, u32 initval)
/* All but the last block: affect some 32 bits of (a,b,c) */
while (length > 12) {
- a += __get_unaligned_cpu32(k);
- b += __get_unaligned_cpu32(k + 4);
- c += __get_unaligned_cpu32(k + 8);
+ a += get_unaligned((u32 *)k);
+ b += get_unaligned((u32 *)(k + 4));
+ c += get_unaligned((u32 *)(k + 8));
__jhash_mix(a, b, c);
length -= 12;
k += 12;
}
/* Last block: affect all 32 bits of (c) */
switch (length) {
- case 12: c += (u32)k[11]<<24; /* fall through */
- case 11: c += (u32)k[10]<<16; /* fall through */
- case 10: c += (u32)k[9]<<8; /* fall through */
- case 9: c += k[8]; /* fall through */
- case 8: b += (u32)k[7]<<24; /* fall through */
- case 7: b += (u32)k[6]<<16; /* fall through */
- case 6: b += (u32)k[5]<<8; /* fall through */
- case 5: b += k[4]; /* fall through */
- case 4: a += (u32)k[3]<<24; /* fall through */
- case 3: a += (u32)k[2]<<16; /* fall through */
- case 2: a += (u32)k[1]<<8; /* fall through */
+ case 12: c += (u32)k[11]<<24; fallthrough;
+ case 11: c += (u32)k[10]<<16; fallthrough;
+ case 10: c += (u32)k[9]<<8; fallthrough;
+ case 9: c += k[8]; fallthrough;
+ case 8: b += (u32)k[7]<<24; fallthrough;
+ case 7: b += (u32)k[6]<<16; fallthrough;
+ case 6: b += (u32)k[5]<<8; fallthrough;
+ case 5: b += k[4]; fallthrough;
+ case 4: a += (u32)k[3]<<24; fallthrough;
+ case 3: a += (u32)k[2]<<16; fallthrough;
+ case 2: a += (u32)k[1]<<8; fallthrough;
case 1: a += k[0];
__jhash_final(a, b, c);
+ break;
case 0: /* Nothing left to add */
break;
}
@@ -109,7 +110,7 @@ static inline u32 jhash(const void *key, u32 length, u32 initval)
/* jhash2 - hash an array of u32's
* @k: the key which must be an array of u32's
* @length: the number of u32's in the key
- * @initval: the previous hash, or an arbitray value
+ * @initval: the previous hash, or an arbitrary value
*
* Returns the hash value of the key.
*/
@@ -132,10 +133,11 @@ static inline u32 jhash2(const u32 *k, u32 length, u32 initval)
/* Handle the last 3 u32's */
switch (length) {
- case 3: c += k[2]; /* fall through */
- case 2: b += k[1]; /* fall through */
+ case 3: c += k[2]; fallthrough;
+ case 2: b += k[1]; fallthrough;
case 1: a += k[0];
__jhash_final(a, b, c);
+ break;
case 0: /* Nothing left to add */
break;
}
diff --git a/include/linux/jiffies.h b/include/linux/jiffies.h
index 734377ad42e9..fdef2c155c27 100644
--- a/include/linux/jiffies.h
+++ b/include/linux/jiffies.h
@@ -1,12 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_JIFFIES_H
#define _LINUX_JIFFIES_H
#include <linux/cache.h>
+#include <linux/limits.h>
#include <linux/math64.h>
-#include <linux/kernel.h>
+#include <linux/minmax.h>
#include <linux/types.h>
#include <linux/time.h>
#include <linux/timex.h>
+#include <vdso/jiffies.h>
#include <asm/param.h> /* for HZ */
#include <generated/timeconst.h>
@@ -56,22 +59,28 @@
/* LATCH is used in the interval timer and ftape setup. */
#define LATCH ((CLOCK_TICK_RATE + HZ/2) / HZ) /* For divider */
-extern int register_refined_jiffies(long clock_tick_rate);
+extern void register_refined_jiffies(long clock_tick_rate);
-/* TICK_NSEC is the time between ticks in nsec assuming SHIFTED_HZ */
-#define TICK_NSEC ((NSEC_PER_SEC+HZ/2)/HZ)
+/* TICK_USEC is the time between ticks in usec */
+#define TICK_USEC ((USEC_PER_SEC + HZ/2) / HZ)
-/* TICK_USEC is the time between ticks in usec assuming fake USER_HZ */
-#define TICK_USEC ((1000000UL + USER_HZ/2) / USER_HZ)
+/* USER_TICK_USEC is the time between ticks in usec assuming fake USER_HZ */
+#define USER_TICK_USEC ((1000000UL + USER_HZ/2) / USER_HZ)
#ifndef __jiffy_arch_data
#define __jiffy_arch_data
#endif
/*
- * The 64-bit value is not atomic - you MUST NOT read it
+ * The 64-bit value is not atomic on 32-bit systems - you MUST NOT read it
* without sampling the sequence number in jiffies_lock.
* get_jiffies_64() will do this for you as appropriate.
+ *
+ * jiffies and jiffies_64 are at the same address for little-endian systems
+ * and for 64-bit big-endian systems.
+ * On 32-bit big-endian systems, jiffies is the lower 32 bits of jiffies_64
+ * (i.e., at address @jiffies_64 + 4).
+ * See arch/ARCH/kernel/vmlinux.lds.S
*/
extern u64 __cacheline_aligned_in_smp jiffies_64;
extern unsigned long volatile __cacheline_aligned_in_smp __jiffy_arch_data jiffies;
@@ -79,46 +88,94 @@ extern unsigned long volatile __cacheline_aligned_in_smp __jiffy_arch_data jiffi
#if (BITS_PER_LONG < 64)
u64 get_jiffies_64(void);
#else
+/**
+ * get_jiffies_64 - read the 64-bit non-atomic jiffies_64 value
+ *
+ * When BITS_PER_LONG < 64, this uses sequence number sampling using
+ * jiffies_lock to protect the 64-bit read.
+ *
+ * Return: current 64-bit jiffies value
+ */
static inline u64 get_jiffies_64(void)
{
return (u64)jiffies;
}
#endif
-/*
- * These inlines deal with timer wrapping correctly. You are
- * strongly encouraged to use them
- * 1. Because people otherwise forget
- * 2. Because if the timer wrap changes in future you won't have to
- * alter your driver code.
+/**
+ * DOC: General information about time_* inlines
+ *
+ * These inlines deal with timer wrapping correctly. You are strongly encouraged
+ * to use them:
*
- * time_after(a,b) returns true if the time a is after time b.
+ * #. Because people otherwise forget
+ * #. Because if the timer wrap changes in future you won't have to alter your
+ * driver code.
+ */
+
+/**
+ * time_after - returns true if the time a is after time b.
+ * @a: first comparable as unsigned long
+ * @b: second comparable as unsigned long
*
* Do this with "<0" and ">=0" to only test the sign of the result. A
* good compiler would generate better code (and a really good compiler
* wouldn't care). Gcc is currently neither.
+ *
+ * Return: %true is time a is after time b, otherwise %false.
*/
#define time_after(a,b) \
(typecheck(unsigned long, a) && \
typecheck(unsigned long, b) && \
((long)((b) - (a)) < 0))
+/**
+ * time_before - returns true if the time a is before time b.
+ * @a: first comparable as unsigned long
+ * @b: second comparable as unsigned long
+ *
+ * Return: %true is time a is before time b, otherwise %false.
+ */
#define time_before(a,b) time_after(b,a)
+/**
+ * time_after_eq - returns true if the time a is after or the same as time b.
+ * @a: first comparable as unsigned long
+ * @b: second comparable as unsigned long
+ *
+ * Return: %true is time a is after or the same as time b, otherwise %false.
+ */
#define time_after_eq(a,b) \
(typecheck(unsigned long, a) && \
typecheck(unsigned long, b) && \
((long)((a) - (b)) >= 0))
+/**
+ * time_before_eq - returns true if the time a is before or the same as time b.
+ * @a: first comparable as unsigned long
+ * @b: second comparable as unsigned long
+ *
+ * Return: %true is time a is before or the same as time b, otherwise %false.
+ */
#define time_before_eq(a,b) time_after_eq(b,a)
-/*
- * Calculate whether a is in the range of [b, c].
+/**
+ * time_in_range - Calculate whether a is in the range of [b, c].
+ * @a: time to test
+ * @b: beginning of the range
+ * @c: end of the range
+ *
+ * Return: %true is time a is in the range [b, c], otherwise %false.
*/
#define time_in_range(a,b,c) \
(time_after_eq(a,b) && \
time_before_eq(a,c))
-/*
- * Calculate whether a is in the range of [b, c).
+/**
+ * time_in_range_open - Calculate whether a is in the range of [b, c).
+ * @a: time to test
+ * @b: beginning of the range
+ * @c: end of the range
+ *
+ * Return: %true is time a is in the range [b, c), otherwise %false.
*/
#define time_in_range_open(a,b,c) \
(time_after_eq(a,b) && \
@@ -126,45 +183,138 @@ static inline u64 get_jiffies_64(void)
/* Same as above, but does so with platform independent 64bit types.
* These must be used when utilizing jiffies_64 (i.e. return value of
- * get_jiffies_64() */
+ * get_jiffies_64()). */
+
+/**
+ * time_after64 - returns true if the time a is after time b.
+ * @a: first comparable as __u64
+ * @b: second comparable as __u64
+ *
+ * This must be used when utilizing jiffies_64 (i.e. return value of
+ * get_jiffies_64()).
+ *
+ * Return: %true is time a is after time b, otherwise %false.
+ */
#define time_after64(a,b) \
(typecheck(__u64, a) && \
typecheck(__u64, b) && \
((__s64)((b) - (a)) < 0))
+/**
+ * time_before64 - returns true if the time a is before time b.
+ * @a: first comparable as __u64
+ * @b: second comparable as __u64
+ *
+ * This must be used when utilizing jiffies_64 (i.e. return value of
+ * get_jiffies_64()).
+ *
+ * Return: %true is time a is before time b, otherwise %false.
+ */
#define time_before64(a,b) time_after64(b,a)
+/**
+ * time_after_eq64 - returns true if the time a is after or the same as time b.
+ * @a: first comparable as __u64
+ * @b: second comparable as __u64
+ *
+ * This must be used when utilizing jiffies_64 (i.e. return value of
+ * get_jiffies_64()).
+ *
+ * Return: %true is time a is after or the same as time b, otherwise %false.
+ */
#define time_after_eq64(a,b) \
(typecheck(__u64, a) && \
typecheck(__u64, b) && \
((__s64)((a) - (b)) >= 0))
+/**
+ * time_before_eq64 - returns true if the time a is before or the same as time b.
+ * @a: first comparable as __u64
+ * @b: second comparable as __u64
+ *
+ * This must be used when utilizing jiffies_64 (i.e. return value of
+ * get_jiffies_64()).
+ *
+ * Return: %true is time a is before or the same as time b, otherwise %false.
+ */
#define time_before_eq64(a,b) time_after_eq64(b,a)
+/**
+ * time_in_range64 - Calculate whether a is in the range of [b, c].
+ * @a: time to test
+ * @b: beginning of the range
+ * @c: end of the range
+ *
+ * Return: %true is time a is in the range [b, c], otherwise %false.
+ */
#define time_in_range64(a, b, c) \
(time_after_eq64(a, b) && \
time_before_eq64(a, c))
/*
- * These four macros compare jiffies and 'a' for convenience.
+ * These eight macros compare jiffies[_64] and 'a' for convenience.
*/
-/* time_is_before_jiffies(a) return true if a is before jiffies */
+/**
+ * time_is_before_jiffies - return true if a is before jiffies
+ * @a: time (unsigned long) to compare to jiffies
+ *
+ * Return: %true is time a is before jiffies, otherwise %false.
+ */
#define time_is_before_jiffies(a) time_after(jiffies, a)
+/**
+ * time_is_before_jiffies64 - return true if a is before jiffies_64
+ * @a: time (__u64) to compare to jiffies_64
+ *
+ * Return: %true is time a is before jiffies_64, otherwise %false.
+ */
#define time_is_before_jiffies64(a) time_after64(get_jiffies_64(), a)
-/* time_is_after_jiffies(a) return true if a is after jiffies */
+/**
+ * time_is_after_jiffies - return true if a is after jiffies
+ * @a: time (unsigned long) to compare to jiffies
+ *
+ * Return: %true is time a is after jiffies, otherwise %false.
+ */
#define time_is_after_jiffies(a) time_before(jiffies, a)
+/**
+ * time_is_after_jiffies64 - return true if a is after jiffies_64
+ * @a: time (__u64) to compare to jiffies_64
+ *
+ * Return: %true is time a is after jiffies_64, otherwise %false.
+ */
#define time_is_after_jiffies64(a) time_before64(get_jiffies_64(), a)
-/* time_is_before_eq_jiffies(a) return true if a is before or equal to jiffies*/
+/**
+ * time_is_before_eq_jiffies - return true if a is before or equal to jiffies
+ * @a: time (unsigned long) to compare to jiffies
+ *
+ * Return: %true is time a is before or the same as jiffies, otherwise %false.
+ */
#define time_is_before_eq_jiffies(a) time_after_eq(jiffies, a)
+/**
+ * time_is_before_eq_jiffies64 - return true if a is before or equal to jiffies_64
+ * @a: time (__u64) to compare to jiffies_64
+ *
+ * Return: %true is time a is before or the same jiffies_64, otherwise %false.
+ */
#define time_is_before_eq_jiffies64(a) time_after_eq64(get_jiffies_64(), a)
-/* time_is_after_eq_jiffies(a) return true if a is after or equal to jiffies*/
+/**
+ * time_is_after_eq_jiffies - return true if a is after or equal to jiffies
+ * @a: time (unsigned long) to compare to jiffies
+ *
+ * Return: %true is time a is after or the same as jiffies, otherwise %false.
+ */
#define time_is_after_eq_jiffies(a) time_before_eq(jiffies, a)
+/**
+ * time_is_after_eq_jiffies64 - return true if a is after or equal to jiffies_64
+ * @a: time (__u64) to compare to jiffies_64
+ *
+ * Return: %true is time a is after or the same as jiffies_64, otherwise %false.
+ */
#define time_is_after_eq_jiffies64(a) time_before_eq64(get_jiffies_64(), a)
/*
- * Have the 32 bit jiffies value wrap 5 minutes after boot
+ * Have the 32-bit jiffies value wrap 5 minutes after boot
* so jiffies wrap bugs show up earlier.
*/
#define INITIAL_JIFFIES ((unsigned long)(unsigned int) (-300*HZ))
@@ -268,14 +418,14 @@ extern unsigned long preset_lpj;
#define NSEC_CONVERSION ((unsigned long)((((u64)1 << NSEC_JIFFIE_SC) +\
TICK_NSEC -1) / (u64)TICK_NSEC))
/*
- * The maximum jiffie value is (MAX_INT >> 1). Here we translate that
+ * The maximum jiffy value is (MAX_INT >> 1). Here we translate that
* into seconds. The 64-bit case will overflow if we are not careful,
* so use the messy SH_DIV macro to do it. Still all constants.
*/
#if BITS_PER_LONG < 64
# define MAX_SEC_IN_JIFFIES \
(long)((u64)((u64)MAX_JIFFY_OFFSET * TICK_NSEC) / NSEC_PER_SEC)
-#else /* take care of overflow on 64 bits machines */
+#else /* take care of overflow on 64-bit machines */
# define MAX_SEC_IN_JIFFIES \
(SH_DIV((MAX_JIFFY_OFFSET >> SEC_JIFFIE_SC) * TICK_NSEC, NSEC_PER_SEC, 1) - 1)
@@ -287,12 +437,19 @@ extern unsigned long preset_lpj;
extern unsigned int jiffies_to_msecs(const unsigned long j);
extern unsigned int jiffies_to_usecs(const unsigned long j);
+/**
+ * jiffies_to_nsecs - Convert jiffies to nanoseconds
+ * @j: jiffies value
+ *
+ * Return: nanoseconds value
+ */
static inline u64 jiffies_to_nsecs(const unsigned long j)
{
return (u64)jiffies_to_usecs(j) * NSEC_PER_USEC;
}
extern u64 jiffies64_to_nsecs(u64 j);
+extern u64 jiffies64_to_msecs(u64 j);
extern unsigned long __msecs_to_jiffies(const unsigned int m);
#if HZ <= MSEC_PER_SEC && !(MSEC_PER_SEC % HZ)
@@ -345,16 +502,18 @@ static inline unsigned long _msecs_to_jiffies(const unsigned int m)
* - all other values are converted to jiffies by either multiplying
* the input value by a factor or dividing it with a factor and
* handling any 32-bit overflows.
- * for the details see __msecs_to_jiffies()
+ * for the details see _msecs_to_jiffies()
*
* msecs_to_jiffies() checks for the passed in value being a constant
* via __builtin_constant_p() allowing gcc to eliminate most of the
- * code, __msecs_to_jiffies() is called if the value passed does not
+ * code. __msecs_to_jiffies() is called if the value passed does not
* allow constant folding and the actual conversion must be done at
* runtime.
- * the HZ range specific helpers _msecs_to_jiffies() are called both
+ * The HZ range specific helpers _msecs_to_jiffies() are called both
* directly here and from __msecs_to_jiffies() in the case where
* constant folding is not possible.
+ *
+ * Return: jiffies value
*/
static __always_inline unsigned long msecs_to_jiffies(const unsigned int m)
{
@@ -367,6 +526,19 @@ static __always_inline unsigned long msecs_to_jiffies(const unsigned int m)
}
}
+/**
+ * secs_to_jiffies: - convert seconds to jiffies
+ * @_secs: time in seconds
+ *
+ * Conversion is done by simple multiplication with HZ
+ *
+ * secs_to_jiffies() is defined as a macro rather than a static inline
+ * function so it can be used in static initializers.
+ *
+ * Return: jiffies value
+ */
+#define secs_to_jiffies(_secs) (unsigned long)((_secs) * HZ)
+
extern unsigned long __usecs_to_jiffies(const unsigned int u);
#if !(USEC_PER_SEC % HZ)
static inline unsigned long _usecs_to_jiffies(const unsigned int u)
@@ -396,12 +568,14 @@ static inline unsigned long _usecs_to_jiffies(const unsigned int u)
*
* usecs_to_jiffies() checks for the passed in value being a constant
* via __builtin_constant_p() allowing gcc to eliminate most of the
- * code, __usecs_to_jiffies() is called if the value passed does not
+ * code. __usecs_to_jiffies() is called if the value passed does not
* allow constant folding and the actual conversion must be done at
* runtime.
- * the HZ range specific helpers _usecs_to_jiffies() are called both
+ * The HZ range specific helpers _usecs_to_jiffies() are called both
* directly here and from __msecs_to_jiffies() in the case where
* constant folding is not possible.
+ *
+ * Return: jiffies value
*/
static __always_inline unsigned long usecs_to_jiffies(const unsigned int u)
{
@@ -417,30 +591,16 @@ static __always_inline unsigned long usecs_to_jiffies(const unsigned int u)
extern unsigned long timespec64_to_jiffies(const struct timespec64 *value);
extern void jiffies_to_timespec64(const unsigned long jiffies,
struct timespec64 *value);
-static inline unsigned long timespec_to_jiffies(const struct timespec *value)
-{
- struct timespec64 ts = timespec_to_timespec64(*value);
-
- return timespec64_to_jiffies(&ts);
-}
+extern clock_t jiffies_to_clock_t(unsigned long x);
-static inline void jiffies_to_timespec(const unsigned long jiffies,
- struct timespec *value)
+static inline clock_t jiffies_delta_to_clock_t(long delta)
{
- struct timespec64 ts;
-
- jiffies_to_timespec64(jiffies, &ts);
- *value = timespec64_to_timespec(ts);
+ return jiffies_to_clock_t(max(0L, delta));
}
-extern unsigned long timeval_to_jiffies(const struct timeval *value);
-extern void jiffies_to_timeval(const unsigned long jiffies,
- struct timeval *value);
-
-extern clock_t jiffies_to_clock_t(unsigned long x);
-static inline clock_t jiffies_delta_to_clock_t(long delta)
+static inline unsigned int jiffies_delta_to_msecs(long delta)
{
- return jiffies_to_clock_t(max(0L, delta));
+ return jiffies_to_msecs(max(0L, delta));
}
extern unsigned long clock_t_to_jiffies(unsigned long x);
@@ -451,4 +611,16 @@ extern unsigned long nsecs_to_jiffies(u64 n);
#define TIMESTAMP_SIZE 30
+struct ctl_table;
+int proc_dointvec_jiffies(const struct ctl_table *table, int dir, void *buffer,
+ size_t *lenp, loff_t *ppos);
+int proc_dointvec_ms_jiffies_minmax(const struct ctl_table *table, int dir,
+ void *buffer, size_t *lenp, loff_t *ppos);
+int proc_dointvec_userhz_jiffies(const struct ctl_table *table, int dir,
+ void *buffer, size_t *lenp, loff_t *ppos);
+int proc_dointvec_ms_jiffies(const struct ctl_table *table, int dir, void *buffer,
+ size_t *lenp, loff_t *ppos);
+int proc_doulongvec_ms_jiffies_minmax(const struct ctl_table *table, int dir,
+ void *buffer, size_t *lenp, loff_t *ppos);
+
#endif
diff --git a/include/linux/journal-head.h b/include/linux/journal-head.h
index 98cd41bb39c8..75bc56109031 100644
--- a/include/linux/journal-head.h
+++ b/include/linux/journal-head.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* include/linux/journal-head.h
*
@@ -10,6 +11,8 @@
#ifndef JOURNAL_HEAD_H_INCLUDED
#define JOURNAL_HEAD_H_INCLUDED
+#include <linux/spinlock.h>
+
typedef unsigned int tid_t; /* Unique transaction ID */
typedef struct transaction_s transaction_t; /* Compound transaction type */
@@ -23,13 +26,18 @@ struct journal_head {
struct buffer_head *b_bh;
/*
+ * Protect the buffer head state
+ */
+ spinlock_t b_state_lock;
+
+ /*
* Reference count - see description in journal.c
* [jbd_lock_bh_journal_head()]
*/
int b_jcount;
/*
- * Journalling list for this buffer [jbd_lock_bh_state()]
+ * Journalling list for this buffer [b_state_lock]
* NOTE: We *cannot* combine this with b_modified into a bitfield
* as gcc would then (which the C standard allows but which is
* very unuseful) make 64-bit accesses to the bitfield and clobber
@@ -40,20 +48,20 @@ struct journal_head {
/*
* This flag signals the buffer has been modified by
* the currently running transaction
- * [jbd_lock_bh_state()]
+ * [b_state_lock]
*/
unsigned b_modified;
/*
* Copy of the buffer data frozen for writing to the log.
- * [jbd_lock_bh_state()]
+ * [b_state_lock]
*/
char *b_frozen_data;
/*
* Pointer to a saved copy of the buffer containing no uncommitted
* deallocation references, so that allocations can avoid overwriting
- * uncommitted deletes. [jbd_lock_bh_state()]
+ * uncommitted deletes. [b_state_lock]
*/
char *b_committed_data;
@@ -62,7 +70,7 @@ struct journal_head {
* metadata: either the running transaction or the committing
* transaction (if there is one). Only applies to buffers on a
* transaction's data or metadata journaling list.
- * [j_list_lock] [jbd_lock_bh_state()]
+ * [j_list_lock] [b_state_lock]
* Either of these locks is enough for reading, both are needed for
* changes.
*/
@@ -72,13 +80,13 @@ struct journal_head {
* Pointer to the running compound transaction which is currently
* modifying the buffer's metadata, if there was already a transaction
* committing it when the new transaction touched it.
- * [t_list_lock] [jbd_lock_bh_state()]
+ * [t_list_lock] [b_state_lock]
*/
transaction_t *b_next_transaction;
/*
* Doubly-linked list of buffers on a transaction's data, metadata or
- * forget queue. [t_list_lock] [jbd_lock_bh_state()]
+ * forget queue. [t_list_lock] [b_state_lock]
*/
struct journal_head *b_tnext, *b_tprev;
diff --git a/include/linux/joystick.h b/include/linux/joystick.h
index cbf2aa9e93b9..41b833b012f5 100644
--- a/include/linux/joystick.h
+++ b/include/linux/joystick.h
@@ -1,26 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* Copyright (C) 1996-2000 Vojtech Pavlik
*
* Sponsored by SuSE
*/
/*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
- * Should you need to contact me, the author, you can do so either by
- * e-mail - mail your message to <vojtech@suse.cz>, or by paper mail:
- * Vojtech Pavlik, Ucitelska 1576, Prague 8, 182 00 Czech Republic
*/
#ifndef _LINUX_JOYSTICK_H
#define _LINUX_JOYSTICK_H
diff --git a/include/linux/jump_label.h b/include/linux/jump_label.h
index cd5861651b17..fdb79dd1ebd8 100644
--- a/include/linux/jump_label.h
+++ b/include/linux/jump_label.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_JUMP_LABEL_H
#define _LINUX_JUMP_LABEL_H
@@ -67,28 +68,24 @@
* Lacking toolchain and or architecture support, static keys fall back to a
* simple conditional branch.
*
- * Additional babbling in: Documentation/static-keys.txt
+ * Additional babbling in: Documentation/staging/static-keys.rst
*/
-#if defined(CC_HAVE_ASM_GOTO) && defined(CONFIG_JUMP_LABEL)
-# define HAVE_JUMP_LABEL
-#endif
-
#ifndef __ASSEMBLY__
#include <linux/types.h>
#include <linux/compiler.h>
+#include <linux/cleanup.h>
extern bool static_key_initialized;
-#define STATIC_KEY_CHECK_USE() WARN(!static_key_initialized, \
- "%s used before call to jump_label_init", \
- __func__)
-
-#ifdef HAVE_JUMP_LABEL
+#define STATIC_KEY_CHECK_USE(key) WARN(!static_key_initialized, \
+ "%s(): static key '%pS' used before call to jump_label_init()", \
+ __func__, (key))
struct static_key {
atomic_t enabled;
+#ifdef CONFIG_JUMP_LABEL
/*
* Note:
* To make anonymous unions work with old compilers, the static
@@ -107,17 +104,87 @@ struct static_key {
struct jump_entry *entries;
struct static_key_mod *next;
};
+#endif /* CONFIG_JUMP_LABEL */
};
-#else
-struct static_key {
- atomic_t enabled;
-};
-#endif /* HAVE_JUMP_LABEL */
#endif /* __ASSEMBLY__ */
-#ifdef HAVE_JUMP_LABEL
+#ifdef CONFIG_JUMP_LABEL
#include <asm/jump_label.h>
+
+#ifndef __ASSEMBLY__
+#ifdef CONFIG_HAVE_ARCH_JUMP_LABEL_RELATIVE
+
+struct jump_entry {
+ s32 code;
+ s32 target;
+ long key; // key may be far away from the core kernel under KASLR
+};
+
+static inline unsigned long jump_entry_code(const struct jump_entry *entry)
+{
+ return (unsigned long)&entry->code + entry->code;
+}
+
+static inline unsigned long jump_entry_target(const struct jump_entry *entry)
+{
+ return (unsigned long)&entry->target + entry->target;
+}
+
+static inline struct static_key *jump_entry_key(const struct jump_entry *entry)
+{
+ long offset = entry->key & ~3L;
+
+ return (struct static_key *)((unsigned long)&entry->key + offset);
+}
+
+#else
+
+static inline unsigned long jump_entry_code(const struct jump_entry *entry)
+{
+ return entry->code;
+}
+
+static inline unsigned long jump_entry_target(const struct jump_entry *entry)
+{
+ return entry->target;
+}
+
+static inline struct static_key *jump_entry_key(const struct jump_entry *entry)
+{
+ return (struct static_key *)((unsigned long)entry->key & ~3UL);
+}
+
+#endif
+
+static inline bool jump_entry_is_branch(const struct jump_entry *entry)
+{
+ return (unsigned long)entry->key & 1UL;
+}
+
+static inline bool jump_entry_is_init(const struct jump_entry *entry)
+{
+ return (unsigned long)entry->key & 2UL;
+}
+
+static inline void jump_entry_set_init(struct jump_entry *entry, bool set)
+{
+ if (set)
+ entry->key |= 2;
+ else
+ entry->key &= ~2;
+}
+
+static inline int jump_entry_size(struct jump_entry *entry)
+{
+#ifdef JUMP_LABEL_NOP_SIZE
+ return JUMP_LABEL_NOP_SIZE;
+#else
+ return arch_jump_entry_size(entry);
+#endif
+}
+
+#endif
#endif
#ifndef __ASSEMBLY__
@@ -129,7 +196,7 @@ enum jump_label_type {
struct module;
-#ifdef HAVE_JUMP_LABEL
+#ifdef CONFIG_JUMP_LABEL
#define JUMP_TYPE_FALSE 0UL
#define JUMP_TYPE_TRUE 1UL
@@ -150,21 +217,26 @@ extern struct jump_entry __start___jump_table[];
extern struct jump_entry __stop___jump_table[];
extern void jump_label_init(void);
+extern void jump_label_init_ro(void);
extern void jump_label_lock(void);
extern void jump_label_unlock(void);
extern void arch_jump_label_transform(struct jump_entry *entry,
enum jump_label_type type);
-extern void arch_jump_label_transform_static(struct jump_entry *entry,
- enum jump_label_type type);
+extern bool arch_jump_label_transform_queue(struct jump_entry *entry,
+ enum jump_label_type type);
+extern void arch_jump_label_transform_apply(void);
extern int jump_label_text_reserved(void *start, void *end);
-extern void static_key_slow_inc(struct static_key *key);
+extern bool static_key_slow_inc(struct static_key *key);
+extern bool static_key_fast_inc_not_disabled(struct static_key *key);
extern void static_key_slow_dec(struct static_key *key);
-extern void jump_label_apply_nops(struct module *mod);
+extern bool static_key_slow_inc_cpuslocked(struct static_key *key);
+extern void static_key_slow_dec_cpuslocked(struct static_key *key);
extern int static_key_count(struct static_key *key);
extern void static_key_enable(struct static_key *key);
extern void static_key_disable(struct static_key *key);
extern void static_key_enable_cpuslocked(struct static_key *key);
extern void static_key_disable_cpuslocked(struct static_key *key);
+extern enum jump_label_type jump_label_init_type(struct jump_entry *entry);
/*
* We should be using ATOMIC_INIT() for initializing .enabled, but
@@ -175,19 +247,19 @@ extern void static_key_disable_cpuslocked(struct static_key *key);
*/
#define STATIC_KEY_INIT_TRUE \
{ .enabled = { 1 }, \
- { .entries = (void *)JUMP_TYPE_TRUE } }
+ { .type = JUMP_TYPE_TRUE } }
#define STATIC_KEY_INIT_FALSE \
{ .enabled = { 0 }, \
- { .entries = (void *)JUMP_TYPE_FALSE } }
+ { .type = JUMP_TYPE_FALSE } }
-#else /* !HAVE_JUMP_LABEL */
+#else /* !CONFIG_JUMP_LABEL */
#include <linux/atomic.h>
#include <linux/bug.h>
-static inline int static_key_count(struct static_key *key)
+static __always_inline int static_key_count(struct static_key *key)
{
- return atomic_read(&key->enabled);
+ return raw_atomic_read(&key->enabled);
}
static __always_inline void jump_label_init(void)
@@ -195,32 +267,49 @@ static __always_inline void jump_label_init(void)
static_key_initialized = true;
}
+static __always_inline void jump_label_init_ro(void) { }
+
static __always_inline bool static_key_false(struct static_key *key)
{
- if (unlikely(static_key_count(key) > 0))
+ if (unlikely_notrace(static_key_count(key) > 0))
return true;
return false;
}
static __always_inline bool static_key_true(struct static_key *key)
{
- if (likely(static_key_count(key) > 0))
+ if (likely_notrace(static_key_count(key) > 0))
return true;
return false;
}
-static inline void static_key_slow_inc(struct static_key *key)
+static inline bool static_key_fast_inc_not_disabled(struct static_key *key)
{
- STATIC_KEY_CHECK_USE();
- atomic_inc(&key->enabled);
+ int v;
+
+ STATIC_KEY_CHECK_USE(key);
+ /*
+ * Prevent key->enabled getting negative to follow the same semantics
+ * as for CONFIG_JUMP_LABEL=y, see kernel/jump_label.c comment.
+ */
+ v = atomic_read(&key->enabled);
+ do {
+ if (v < 0 || (v + 1) < 0)
+ return false;
+ } while (!likely(atomic_try_cmpxchg(&key->enabled, &v, v + 1)));
+ return true;
}
+#define static_key_slow_inc(key) static_key_fast_inc_not_disabled(key)
static inline void static_key_slow_dec(struct static_key *key)
{
- STATIC_KEY_CHECK_USE();
+ STATIC_KEY_CHECK_USE(key);
atomic_dec(&key->enabled);
}
+#define static_key_slow_inc_cpuslocked(key) static_key_slow_inc(key)
+#define static_key_slow_dec_cpuslocked(key) static_key_slow_dec(key)
+
static inline int jump_label_text_reserved(void *start, void *end)
{
return 0;
@@ -229,14 +318,9 @@ static inline int jump_label_text_reserved(void *start, void *end)
static inline void jump_label_lock(void) {}
static inline void jump_label_unlock(void) {}
-static inline int jump_label_apply_nops(struct module *mod)
-{
- return 0;
-}
-
static inline void static_key_enable(struct static_key *key)
{
- STATIC_KEY_CHECK_USE();
+ STATIC_KEY_CHECK_USE(key);
if (atomic_read(&key->enabled) != 0) {
WARN_ON_ONCE(atomic_read(&key->enabled) != 1);
@@ -247,7 +331,7 @@ static inline void static_key_enable(struct static_key *key)
static inline void static_key_disable(struct static_key *key)
{
- STATIC_KEY_CHECK_USE();
+ STATIC_KEY_CHECK_USE(key);
if (atomic_read(&key->enabled) != 1) {
WARN_ON_ONCE(atomic_read(&key->enabled) != 0);
@@ -262,7 +346,9 @@ static inline void static_key_disable(struct static_key *key)
#define STATIC_KEY_INIT_TRUE { .enabled = ATOMIC_INIT(1) }
#define STATIC_KEY_INIT_FALSE { .enabled = ATOMIC_INIT(0) }
-#endif /* HAVE_JUMP_LABEL */
+#endif /* CONFIG_JUMP_LABEL */
+
+DEFINE_LOCK_GUARD_0(jump_label_lock, jump_label_lock(), jump_label_unlock())
#define STATIC_KEY_INIT STATIC_KEY_INIT_FALSE
#define jump_label_enabled static_key_enabled
@@ -290,12 +376,18 @@ struct static_key_false {
#define DEFINE_STATIC_KEY_TRUE(name) \
struct static_key_true name = STATIC_KEY_TRUE_INIT
+#define DEFINE_STATIC_KEY_TRUE_RO(name) \
+ struct static_key_true name __ro_after_init = STATIC_KEY_TRUE_INIT
+
#define DECLARE_STATIC_KEY_TRUE(name) \
extern struct static_key_true name
#define DEFINE_STATIC_KEY_FALSE(name) \
struct static_key_false name = STATIC_KEY_FALSE_INIT
+#define DEFINE_STATIC_KEY_FALSE_RO(name) \
+ struct static_key_false name __ro_after_init = STATIC_KEY_FALSE_INIT
+
#define DECLARE_STATIC_KEY_FALSE(name) \
extern struct static_key_false name
@@ -309,6 +401,21 @@ struct static_key_false {
[0 ... (count) - 1] = STATIC_KEY_FALSE_INIT, \
}
+#define _DEFINE_STATIC_KEY_1(name) DEFINE_STATIC_KEY_TRUE(name)
+#define _DEFINE_STATIC_KEY_0(name) DEFINE_STATIC_KEY_FALSE(name)
+#define DEFINE_STATIC_KEY_MAYBE(cfg, name) \
+ __PASTE(_DEFINE_STATIC_KEY_, IS_ENABLED(cfg))(name)
+
+#define _DEFINE_STATIC_KEY_RO_1(name) DEFINE_STATIC_KEY_TRUE_RO(name)
+#define _DEFINE_STATIC_KEY_RO_0(name) DEFINE_STATIC_KEY_FALSE_RO(name)
+#define DEFINE_STATIC_KEY_MAYBE_RO(cfg, name) \
+ __PASTE(_DEFINE_STATIC_KEY_RO_, IS_ENABLED(cfg))(name)
+
+#define _DECLARE_STATIC_KEY_1(name) DECLARE_STATIC_KEY_TRUE(name)
+#define _DECLARE_STATIC_KEY_0(name) DECLARE_STATIC_KEY_FALSE(name)
+#define DECLARE_STATIC_KEY_MAYBE(cfg, name) \
+ __PASTE(_DECLARE_STATIC_KEY_, IS_ENABLED(cfg))(name)
+
extern bool ____wrong_branch_error(void);
#define static_key_enabled(x) \
@@ -320,7 +427,7 @@ extern bool ____wrong_branch_error(void);
static_key_count((struct static_key *)x) > 0; \
})
-#ifdef HAVE_JUMP_LABEL
+#ifdef CONFIG_JUMP_LABEL
/*
* Combine the right initial value (type) with the right branch order
@@ -387,7 +494,7 @@ extern bool ____wrong_branch_error(void);
branch = !arch_static_branch_jump(&(x)->key, true); \
else \
branch = ____wrong_branch_error(); \
- branch; \
+ likely_notrace(branch); \
})
#define static_branch_unlikely(x) \
@@ -399,15 +506,19 @@ extern bool ____wrong_branch_error(void);
branch = arch_static_branch(&(x)->key, false); \
else \
branch = ____wrong_branch_error(); \
- branch; \
+ unlikely_notrace(branch); \
})
-#else /* !HAVE_JUMP_LABEL */
+#else /* !CONFIG_JUMP_LABEL */
+
+#define static_branch_likely(x) likely_notrace(static_key_enabled(&(x)->key))
+#define static_branch_unlikely(x) unlikely_notrace(static_key_enabled(&(x)->key))
-#define static_branch_likely(x) likely(static_key_enabled(&(x)->key))
-#define static_branch_unlikely(x) unlikely(static_key_enabled(&(x)->key))
+#endif /* CONFIG_JUMP_LABEL */
-#endif /* HAVE_JUMP_LABEL */
+#define static_branch_maybe(config, x) \
+ (IS_ENABLED(config) ? static_branch_likely(x) \
+ : static_branch_unlikely(x))
/*
* Advanced usage; refcount, branch is enabled when: count != 0
@@ -415,6 +526,8 @@ extern bool ____wrong_branch_error(void);
#define static_branch_inc(x) static_key_slow_inc(&(x)->key)
#define static_branch_dec(x) static_key_slow_dec(&(x)->key)
+#define static_branch_inc_cpuslocked(x) static_key_slow_inc_cpuslocked(&(x)->key)
+#define static_branch_dec_cpuslocked(x) static_key_slow_dec_cpuslocked(&(x)->key)
/*
* Normal usage; boolean enable/disable.
diff --git a/include/linux/jump_label_ratelimit.h b/include/linux/jump_label_ratelimit.h
index 23da3af459fe..8c3ee291b2d8 100644
--- a/include/linux/jump_label_ratelimit.h
+++ b/include/linux/jump_label_ratelimit.h
@@ -1,41 +1,99 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_JUMP_LABEL_RATELIMIT_H
#define _LINUX_JUMP_LABEL_RATELIMIT_H
#include <linux/jump_label.h>
#include <linux/workqueue.h>
-#if defined(CC_HAVE_ASM_GOTO) && defined(CONFIG_JUMP_LABEL)
+#if defined(CONFIG_JUMP_LABEL)
struct static_key_deferred {
struct static_key key;
unsigned long timeout;
struct delayed_work work;
};
-#endif
-#ifdef HAVE_JUMP_LABEL
-extern void static_key_slow_dec_deferred(struct static_key_deferred *key);
-extern void static_key_deferred_flush(struct static_key_deferred *key);
+struct static_key_true_deferred {
+ struct static_key_true key;
+ unsigned long timeout;
+ struct delayed_work work;
+};
+
+struct static_key_false_deferred {
+ struct static_key_false key;
+ unsigned long timeout;
+ struct delayed_work work;
+};
+
+#define static_key_slow_dec_deferred(x) \
+ __static_key_slow_dec_deferred(&(x)->key, &(x)->work, (x)->timeout)
+#define static_branch_slow_dec_deferred(x) \
+ __static_key_slow_dec_deferred(&(x)->key.key, &(x)->work, (x)->timeout)
+
+#define static_key_deferred_flush(x) \
+ __static_key_deferred_flush((x), &(x)->work)
+
+extern void
+__static_key_slow_dec_deferred(struct static_key *key,
+ struct delayed_work *work,
+ unsigned long timeout);
+extern void __static_key_deferred_flush(void *key, struct delayed_work *work);
extern void
jump_label_rate_limit(struct static_key_deferred *key, unsigned long rl);
-#else /* !HAVE_JUMP_LABEL */
+extern void jump_label_update_timeout(struct work_struct *work);
+
+#define DEFINE_STATIC_KEY_DEFERRED_TRUE(name, rl) \
+ struct static_key_true_deferred name = { \
+ .key = { STATIC_KEY_INIT_TRUE }, \
+ .timeout = (rl), \
+ .work = __DELAYED_WORK_INITIALIZER((name).work, \
+ jump_label_update_timeout, \
+ 0), \
+ }
+
+#define DEFINE_STATIC_KEY_DEFERRED_FALSE(name, rl) \
+ struct static_key_false_deferred name = { \
+ .key = { STATIC_KEY_INIT_FALSE }, \
+ .timeout = (rl), \
+ .work = __DELAYED_WORK_INITIALIZER((name).work, \
+ jump_label_update_timeout, \
+ 0), \
+ }
+
+#else /* !CONFIG_JUMP_LABEL */
struct static_key_deferred {
struct static_key key;
};
+struct static_key_true_deferred {
+ struct static_key_true key;
+};
+struct static_key_false_deferred {
+ struct static_key_false key;
+};
+#define DEFINE_STATIC_KEY_DEFERRED_TRUE(name, rl) \
+ struct static_key_true_deferred name = { STATIC_KEY_TRUE_INIT }
+#define DEFINE_STATIC_KEY_DEFERRED_FALSE(name, rl) \
+ struct static_key_false_deferred name = { STATIC_KEY_FALSE_INIT }
+
+#define static_branch_slow_dec_deferred(x) static_branch_dec(&(x)->key)
+
static inline void static_key_slow_dec_deferred(struct static_key_deferred *key)
{
- STATIC_KEY_CHECK_USE();
+ STATIC_KEY_CHECK_USE(key);
static_key_slow_dec(&key->key);
}
-static inline void static_key_deferred_flush(struct static_key_deferred *key)
+static inline void static_key_deferred_flush(void *key)
{
- STATIC_KEY_CHECK_USE();
+ STATIC_KEY_CHECK_USE(key);
}
static inline void
jump_label_rate_limit(struct static_key_deferred *key,
unsigned long rl)
{
- STATIC_KEY_CHECK_USE();
+ STATIC_KEY_CHECK_USE(key);
}
-#endif /* HAVE_JUMP_LABEL */
+#endif /* CONFIG_JUMP_LABEL */
+
+#define static_branch_deferred_inc(x) static_branch_inc(&(x)->key)
+
#endif /* _LINUX_JUMP_LABEL_RATELIMIT_H */
diff --git a/include/linux/jz4740-adc.h b/include/linux/jz4740-adc.h
index 8184578fbfa4..19d995c8bf06 100644
--- a/include/linux/jz4740-adc.h
+++ b/include/linux/jz4740-adc.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __LINUX_JZ4740_ADC
#define __LINUX_JZ4740_ADC
diff --git a/include/linux/jz4780-nemc.h b/include/linux/jz4780-nemc.h
index e7f1cc7a2284..bd7fad910242 100644
--- a/include/linux/jz4780-nemc.h
+++ b/include/linux/jz4780-nemc.h
@@ -1,13 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* JZ4780 NAND/external memory controller (NEMC)
*
* Copyright (c) 2015 Imagination Technologies
* Author: Alex Smith <alex@alex-smith.me.uk>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
*/
#ifndef __LINUX_JZ4780_NEMC_H__
diff --git a/include/linux/kallsyms.h b/include/linux/kallsyms.h
index 6883e197acb9..d5dd54c53ace 100644
--- a/include/linux/kallsyms.h
+++ b/include/linux/kallsyms.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/* Rewritten and vastly simplified by Rusty Russell for in-kernel
* module loader:
* Copyright 2002 Rusty Russell <rusty@rustcorp.com.au> IBM Corporation
@@ -6,24 +7,76 @@
#define _LINUX_KALLSYMS_H
#include <linux/errno.h>
+#include <linux/buildid.h>
#include <linux/kernel.h>
#include <linux/stddef.h>
+#include <linux/mm.h>
+#include <linux/module.h>
-#define KSYM_NAME_LEN 128
-#define KSYM_SYMBOL_LEN (sizeof("%s+%#lx/%#lx [%s]") + (KSYM_NAME_LEN - 1) + \
- 2*(BITS_PER_LONG*3/10) + (MODULE_NAME_LEN - 1) + 1)
+#include <asm/sections.h>
+#define KSYM_NAME_LEN 512
+#define KSYM_SYMBOL_LEN (sizeof("%s+%#lx/%#lx [%s %s]") + \
+ (KSYM_NAME_LEN - 1) + \
+ 2*(BITS_PER_LONG*3/10) + (MODULE_NAME_LEN - 1) + \
+ (BUILD_ID_SIZE_MAX * 2) + 1)
+
+struct cred;
struct module;
+static inline int is_kernel_text(unsigned long addr)
+{
+ if (__is_kernel_text(addr))
+ return 1;
+ return in_gate_area_no_mm(addr);
+}
+
+static inline int is_kernel(unsigned long addr)
+{
+ if (__is_kernel(addr))
+ return 1;
+ return in_gate_area_no_mm(addr);
+}
+
+static inline int is_ksym_addr(unsigned long addr)
+{
+ if (IS_ENABLED(CONFIG_KALLSYMS_ALL))
+ return is_kernel(addr);
+
+ return is_kernel_text(addr) || is_kernel_inittext(addr);
+}
+
+static inline void *dereference_symbol_descriptor(void *ptr)
+{
+#ifdef CONFIG_HAVE_FUNCTION_DESCRIPTORS
+ struct module *mod;
+
+ ptr = dereference_kernel_function_descriptor(ptr);
+ if (is_ksym_addr((unsigned long)ptr))
+ return ptr;
+
+ guard(rcu)();
+ mod = __module_address((unsigned long)ptr);
+
+ if (mod)
+ ptr = dereference_module_function_descriptor(mod, ptr);
+#endif
+ return ptr;
+}
+
+/* How and when do we show kallsyms values? */
+extern bool kallsyms_show_value(const struct cred *cred);
+
#ifdef CONFIG_KALLSYMS
+unsigned long kallsyms_sym_address(int idx);
+int kallsyms_on_each_symbol(int (*fn)(void *, const char *, unsigned long),
+ void *data);
+int kallsyms_on_each_match_symbol(int (*fn)(void *, unsigned long),
+ const char *name, void *data);
+
/* Lookup the address for a symbol. Returns 0 if not found. */
unsigned long kallsyms_lookup_name(const char *name);
-/* Call a function on each kallsyms symbol in the core kernel */
-int kallsyms_on_each_symbol(int (*fn)(void *, const char *, struct module *,
- unsigned long),
- void *data);
-
extern int kallsyms_lookup_size_offset(unsigned long addr,
unsigned long *symbolsize,
unsigned long *offset);
@@ -36,14 +89,12 @@ const char *kallsyms_lookup(unsigned long addr,
/* Look up a kernel symbol and return it in a text buffer. */
extern int sprint_symbol(char *buffer, unsigned long address);
+extern int sprint_symbol_build_id(char *buffer, unsigned long address);
extern int sprint_symbol_no_offset(char *buffer, unsigned long address);
extern int sprint_backtrace(char *buffer, unsigned long address);
-
-/* Look up a kernel symbol and print it to the kernel messages. */
-extern void __print_symbol(const char *fmt, unsigned long address);
+extern int sprint_backtrace_build_id(char *buffer, unsigned long address);
int lookup_symbol_name(unsigned long addr, char *symname);
-int lookup_symbol_attrs(unsigned long addr, unsigned long *size, unsigned long *offset, char *modname, char *name);
#else /* !CONFIG_KALLSYMS */
@@ -52,14 +103,6 @@ static inline unsigned long kallsyms_lookup_name(const char *name)
return 0;
}
-static inline int kallsyms_on_each_symbol(int (*fn)(void *, const char *,
- struct module *,
- unsigned long),
- void *data)
-{
- return 0;
-}
-
static inline int kallsyms_lookup_size_offset(unsigned long addr,
unsigned long *symbolsize,
unsigned long *offset)
@@ -81,6 +124,12 @@ static inline int sprint_symbol(char *buffer, unsigned long addr)
return 0;
}
+static inline int sprint_symbol_build_id(char *buffer, unsigned long address)
+{
+ *buffer = '\0';
+ return 0;
+}
+
static inline int sprint_symbol_no_offset(char *buffer, unsigned long addr)
{
*buffer = '\0';
@@ -93,36 +142,33 @@ static inline int sprint_backtrace(char *buffer, unsigned long addr)
return 0;
}
-static inline int lookup_symbol_name(unsigned long addr, char *symname)
+static inline int sprint_backtrace_build_id(char *buffer, unsigned long addr)
{
- return -ERANGE;
+ *buffer = '\0';
+ return 0;
}
-static inline int lookup_symbol_attrs(unsigned long addr, unsigned long *size, unsigned long *offset, char *modname, char *name)
+static inline int lookup_symbol_name(unsigned long addr, char *symname)
{
return -ERANGE;
}
-/* Stupid that this does nothing, but I didn't create this mess. */
-#define __print_symbol(fmt, addr)
-#endif /*CONFIG_KALLSYMS*/
-
-/* This macro allows us to keep printk typechecking */
-static __printf(1, 2)
-void __check_printsym_format(const char *fmt, ...)
+static inline int kallsyms_on_each_symbol(int (*fn)(void *, const char *, unsigned long),
+ void *data)
{
+ return -EOPNOTSUPP;
}
-static inline void print_symbol(const char *fmt, unsigned long addr)
+static inline int kallsyms_on_each_match_symbol(int (*fn)(void *, unsigned long),
+ const char *name, void *data)
{
- __check_printsym_format(fmt, "");
- __print_symbol(fmt, (unsigned long)
- __builtin_extract_return_addr((void *)addr));
+ return -EOPNOTSUPP;
}
+#endif /*CONFIG_KALLSYMS*/
-static inline void print_ip_sym(unsigned long ip)
+static inline void print_ip_sym(const char *loglvl, unsigned long ip)
{
- printk("[<%p>] %pS\n", (void *) ip, (void *) ip);
+ printk("%s[<%px>] %pS\n", loglvl, (void *) ip, (void *) ip);
}
#endif /*_LINUX_KALLSYMS_H*/
diff --git a/include/linux/kasan-checks.h b/include/linux/kasan-checks.h
index 41960fecf783..3d6d22a25bdc 100644
--- a/include/linux/kasan-checks.h
+++ b/include/linux/kasan-checks.h
@@ -1,14 +1,50 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_KASAN_CHECKS_H
#define _LINUX_KASAN_CHECKS_H
-#ifdef CONFIG_KASAN
-void kasan_check_read(const volatile void *p, unsigned int size);
-void kasan_check_write(const volatile void *p, unsigned int size);
+#include <linux/types.h>
+
+/*
+ * The annotations present in this file are only relevant for the software
+ * KASAN modes that rely on compiler instrumentation, and will be optimized
+ * away for the hardware tag-based KASAN mode. Use kasan_check_byte() instead.
+ */
+
+/*
+ * __kasan_check_*: Always available when KASAN is enabled. This may be used
+ * even in compilation units that selectively disable KASAN, but must use KASAN
+ * to validate access to an address. Never use these in header files!
+ */
+#if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
+bool __kasan_check_read(const volatile void *p, unsigned int size);
+bool __kasan_check_write(const volatile void *p, unsigned int size);
+#else
+static inline bool __kasan_check_read(const volatile void *p, unsigned int size)
+{
+ return true;
+}
+static inline bool __kasan_check_write(const volatile void *p, unsigned int size)
+{
+ return true;
+}
+#endif
+
+/*
+ * kasan_check_*: Only available when the particular compilation unit has KASAN
+ * instrumentation enabled. May be used in header files.
+ */
+#ifdef __SANITIZE_ADDRESS__
+#define kasan_check_read __kasan_check_read
+#define kasan_check_write __kasan_check_write
#else
-static inline void kasan_check_read(const volatile void *p, unsigned int size)
-{ }
-static inline void kasan_check_write(const volatile void *p, unsigned int size)
-{ }
+static inline bool kasan_check_read(const volatile void *p, unsigned int size)
+{
+ return true;
+}
+static inline bool kasan_check_write(const volatile void *p, unsigned int size)
+{
+ return true;
+}
#endif
#endif
diff --git a/include/linux/kasan-enabled.h b/include/linux/kasan-enabled.h
new file mode 100644
index 000000000000..9eca967d8526
--- /dev/null
+++ b/include/linux/kasan-enabled.h
@@ -0,0 +1,49 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_KASAN_ENABLED_H
+#define _LINUX_KASAN_ENABLED_H
+
+#include <linux/static_key.h>
+
+#if defined(CONFIG_ARCH_DEFER_KASAN) || defined(CONFIG_KASAN_HW_TAGS)
+/*
+ * Global runtime flag for KASAN modes that need runtime control.
+ * Used by ARCH_DEFER_KASAN architectures and HW_TAGS mode.
+ */
+DECLARE_STATIC_KEY_FALSE(kasan_flag_enabled);
+
+/*
+ * Runtime control for shadow memory initialization or HW_TAGS mode.
+ * Uses static key for architectures that need deferred KASAN or HW_TAGS.
+ */
+static __always_inline bool kasan_enabled(void)
+{
+ return static_branch_likely(&kasan_flag_enabled);
+}
+
+static inline void kasan_enable(void)
+{
+ static_branch_enable(&kasan_flag_enabled);
+}
+#else
+/* For architectures that can enable KASAN early, use compile-time check. */
+static __always_inline bool kasan_enabled(void)
+{
+ return IS_ENABLED(CONFIG_KASAN);
+}
+
+static inline void kasan_enable(void) {}
+#endif /* CONFIG_ARCH_DEFER_KASAN || CONFIG_KASAN_HW_TAGS */
+
+#ifdef CONFIG_KASAN_HW_TAGS
+static inline bool kasan_hw_tags_enabled(void)
+{
+ return kasan_enabled();
+}
+#else
+static inline bool kasan_hw_tags_enabled(void)
+{
+ return false;
+}
+#endif /* CONFIG_KASAN_HW_TAGS */
+
+#endif /* LINUX_KASAN_ENABLED_H */
diff --git a/include/linux/kasan-tags.h b/include/linux/kasan-tags.h
new file mode 100644
index 000000000000..4f85f562512c
--- /dev/null
+++ b/include/linux/kasan-tags.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_KASAN_TAGS_H
+#define _LINUX_KASAN_TAGS_H
+
+#define KASAN_TAG_KERNEL 0xFF /* native kernel pointers tag */
+#define KASAN_TAG_INVALID 0xFE /* inaccessible memory tag */
+#define KASAN_TAG_MAX 0xFD /* maximum value for random tags */
+
+#ifdef CONFIG_KASAN_HW_TAGS
+#define KASAN_TAG_MIN 0xF0 /* minimum value for random tags */
+#else
+#define KASAN_TAG_MIN 0x00 /* minimum value for random tags */
+#endif
+
+#endif /* LINUX_KASAN_TAGS_H */
diff --git a/include/linux/kasan.h b/include/linux/kasan.h
index a5c7046f26b4..f335c1d7b61d 100644
--- a/include/linux/kasan.h
+++ b/include/linux/kasan.h
@@ -1,34 +1,73 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_KASAN_H
#define _LINUX_KASAN_H
+#include <linux/bug.h>
+#include <linux/kasan-enabled.h>
+#include <linux/kasan-tags.h>
+#include <linux/kernel.h>
+#include <linux/static_key.h>
#include <linux/types.h>
struct kmem_cache;
struct page;
+struct slab;
struct vm_struct;
struct task_struct;
#ifdef CONFIG_KASAN
-#define KASAN_SHADOW_SCALE_SHIFT 3
-
+#include <linux/linkage.h>
#include <asm/kasan.h>
-#include <asm/pgtable.h>
-extern unsigned char kasan_zero_page[PAGE_SIZE];
-extern pte_t kasan_zero_pte[PTRS_PER_PTE];
-extern pmd_t kasan_zero_pmd[PTRS_PER_PMD];
-extern pud_t kasan_zero_pud[PTRS_PER_PUD];
-extern p4d_t kasan_zero_p4d[PTRS_PER_P4D];
+#endif
+
+typedef unsigned int __bitwise kasan_vmalloc_flags_t;
+
+#define KASAN_VMALLOC_NONE ((__force kasan_vmalloc_flags_t)0x00u)
+#define KASAN_VMALLOC_INIT ((__force kasan_vmalloc_flags_t)0x01u)
+#define KASAN_VMALLOC_VM_ALLOC ((__force kasan_vmalloc_flags_t)0x02u)
+#define KASAN_VMALLOC_PROT_NORMAL ((__force kasan_vmalloc_flags_t)0x04u)
+
+#define KASAN_VMALLOC_PAGE_RANGE 0x1 /* Apply exsiting page range */
+#define KASAN_VMALLOC_TLB_FLUSH 0x2 /* TLB flush */
+
+#if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
+
+#include <linux/pgtable.h>
-void kasan_populate_zero_shadow(const void *shadow_start,
+/* Software KASAN implementations use shadow memory. */
+
+#ifdef CONFIG_KASAN_SW_TAGS
+/* This matches KASAN_TAG_INVALID. */
+#define KASAN_SHADOW_INIT 0xFE
+#else
+#define KASAN_SHADOW_INIT 0
+#endif
+
+#ifndef PTE_HWTABLE_PTRS
+#define PTE_HWTABLE_PTRS 0
+#endif
+
+extern unsigned char kasan_early_shadow_page[PAGE_SIZE];
+extern pte_t kasan_early_shadow_pte[MAX_PTRS_PER_PTE + PTE_HWTABLE_PTRS];
+extern pmd_t kasan_early_shadow_pmd[MAX_PTRS_PER_PMD];
+extern pud_t kasan_early_shadow_pud[MAX_PTRS_PER_PUD];
+extern p4d_t kasan_early_shadow_p4d[MAX_PTRS_PER_P4D];
+
+int kasan_populate_early_shadow(const void *shadow_start,
const void *shadow_end);
+#ifndef kasan_mem_to_shadow
static inline void *kasan_mem_to_shadow(const void *addr)
{
return (void *)((unsigned long)addr >> KASAN_SHADOW_SCALE_SHIFT)
+ KASAN_SHADOW_OFFSET;
}
+#endif
+
+int kasan_add_zero_shadow(void *start, unsigned long size);
+void kasan_remove_zero_shadow(void *start, unsigned long size);
/* Enable reporting bugs after kasan_disable_current() */
extern void kasan_enable_current(void);
@@ -36,97 +75,609 @@ extern void kasan_enable_current(void);
/* Disable reporting bugs for current task */
extern void kasan_disable_current(void);
-void kasan_unpoison_shadow(const void *address, size_t size);
+#else /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */
-void kasan_unpoison_task_stack(struct task_struct *task);
-void kasan_unpoison_stack_above_sp_to(const void *watermark);
+static inline int kasan_add_zero_shadow(void *start, unsigned long size)
+{
+ return 0;
+}
+static inline void kasan_remove_zero_shadow(void *start,
+ unsigned long size)
+{}
-void kasan_alloc_pages(struct page *page, unsigned int order);
-void kasan_free_pages(struct page *page, unsigned int order);
+static inline void kasan_enable_current(void) {}
+static inline void kasan_disable_current(void) {}
-void kasan_cache_create(struct kmem_cache *cache, size_t *size,
- unsigned long *flags);
-void kasan_cache_shrink(struct kmem_cache *cache);
-void kasan_cache_shutdown(struct kmem_cache *cache);
+#endif /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */
-void kasan_poison_slab(struct page *page);
-void kasan_unpoison_object_data(struct kmem_cache *cache, void *object);
-void kasan_poison_object_data(struct kmem_cache *cache, void *object);
-void kasan_init_slab_obj(struct kmem_cache *cache, const void *object);
+#ifdef CONFIG_KASAN_HW_TAGS
-void kasan_kmalloc_large(const void *ptr, size_t size, gfp_t flags);
-void kasan_kfree_large(const void *ptr);
-void kasan_poison_kfree(void *ptr);
-void kasan_kmalloc(struct kmem_cache *s, const void *object, size_t size,
- gfp_t flags);
-void kasan_krealloc(const void *object, size_t new_size, gfp_t flags);
+#else /* CONFIG_KASAN_HW_TAGS */
-void kasan_slab_alloc(struct kmem_cache *s, void *object, gfp_t flags);
-bool kasan_slab_free(struct kmem_cache *s, void *object);
+#endif /* CONFIG_KASAN_HW_TAGS */
-struct kasan_cache {
- int alloc_meta_offset;
- int free_meta_offset;
-};
+static inline bool kasan_has_integrated_init(void)
+{
+ return kasan_hw_tags_enabled();
+}
+
+#ifdef CONFIG_KASAN
+void __kasan_unpoison_range(const void *addr, size_t size);
+static __always_inline void kasan_unpoison_range(const void *addr, size_t size)
+{
+ if (kasan_enabled())
+ __kasan_unpoison_range(addr, size);
+}
+
+void __kasan_poison_pages(struct page *page, unsigned int order, bool init);
+static __always_inline void kasan_poison_pages(struct page *page,
+ unsigned int order, bool init)
+{
+ if (kasan_enabled())
+ __kasan_poison_pages(page, order, init);
+}
+
+bool __kasan_unpoison_pages(struct page *page, unsigned int order, bool init);
+static __always_inline bool kasan_unpoison_pages(struct page *page,
+ unsigned int order, bool init)
+{
+ if (kasan_enabled())
+ return __kasan_unpoison_pages(page, order, init);
+ return false;
+}
+
+void __kasan_poison_slab(struct slab *slab);
+static __always_inline void kasan_poison_slab(struct slab *slab)
+{
+ if (kasan_enabled())
+ __kasan_poison_slab(slab);
+}
+
+void __kasan_unpoison_new_object(struct kmem_cache *cache, void *object);
+/**
+ * kasan_unpoison_new_object - Temporarily unpoison a new slab object.
+ * @cache: Cache the object belong to.
+ * @object: Pointer to the object.
+ *
+ * This function is intended for the slab allocator's internal use. It
+ * temporarily unpoisons an object from a newly allocated slab without doing
+ * anything else. The object must later be repoisoned by
+ * kasan_poison_new_object().
+ */
+static __always_inline void kasan_unpoison_new_object(struct kmem_cache *cache,
+ void *object)
+{
+ if (kasan_enabled())
+ __kasan_unpoison_new_object(cache, object);
+}
+
+void __kasan_poison_new_object(struct kmem_cache *cache, void *object);
+/**
+ * kasan_poison_new_object - Repoison a new slab object.
+ * @cache: Cache the object belong to.
+ * @object: Pointer to the object.
+ *
+ * This function is intended for the slab allocator's internal use. It
+ * repoisons an object that was previously unpoisoned by
+ * kasan_unpoison_new_object() without doing anything else.
+ */
+static __always_inline void kasan_poison_new_object(struct kmem_cache *cache,
+ void *object)
+{
+ if (kasan_enabled())
+ __kasan_poison_new_object(cache, object);
+}
+
+void * __must_check __kasan_init_slab_obj(struct kmem_cache *cache,
+ const void *object);
+static __always_inline void * __must_check kasan_init_slab_obj(
+ struct kmem_cache *cache, const void *object)
+{
+ if (kasan_enabled())
+ return __kasan_init_slab_obj(cache, object);
+ return (void *)object;
+}
+
+bool __kasan_slab_pre_free(struct kmem_cache *s, void *object,
+ unsigned long ip);
+/**
+ * kasan_slab_pre_free - Check whether freeing a slab object is safe.
+ * @object: Object to be freed.
+ *
+ * This function checks whether freeing the given object is safe. It may
+ * check for double-free and invalid-free bugs and report them.
+ *
+ * This function is intended only for use by the slab allocator.
+ *
+ * @Return true if freeing the object is unsafe; false otherwise.
+ */
+static __always_inline bool kasan_slab_pre_free(struct kmem_cache *s,
+ void *object)
+{
+ if (kasan_enabled())
+ return __kasan_slab_pre_free(s, object, _RET_IP_);
+ return false;
+}
+
+bool __kasan_slab_free(struct kmem_cache *s, void *object, bool init,
+ bool still_accessible, bool no_quarantine);
+/**
+ * kasan_slab_free - Poison, initialize, and quarantine a slab object.
+ * @object: Object to be freed.
+ * @init: Whether to initialize the object.
+ * @still_accessible: Whether the object contents are still accessible.
+ *
+ * This function informs that a slab object has been freed and is not
+ * supposed to be accessed anymore, except when @still_accessible is set
+ * (indicating that the object is in a SLAB_TYPESAFE_BY_RCU cache and an RCU
+ * grace period might not have passed yet).
+ *
+ * For KASAN modes that have integrated memory initialization
+ * (kasan_has_integrated_init() == true), this function also initializes
+ * the object's memory. For other modes, the @init argument is ignored.
+ *
+ * This function might also take ownership of the object to quarantine it.
+ * When this happens, KASAN will defer freeing the object to a later
+ * stage and handle it internally until then. The return value indicates
+ * whether KASAN took ownership of the object.
+ *
+ * This function is intended only for use by the slab allocator.
+ *
+ * @Return true if KASAN took ownership of the object; false otherwise.
+ */
+static __always_inline bool kasan_slab_free(struct kmem_cache *s,
+ void *object, bool init,
+ bool still_accessible,
+ bool no_quarantine)
+{
+ if (kasan_enabled())
+ return __kasan_slab_free(s, object, init, still_accessible,
+ no_quarantine);
+ return false;
+}
-int kasan_module_alloc(void *addr, size_t size);
-void kasan_free_shadow(const struct vm_struct *vm);
+void __kasan_kfree_large(void *ptr, unsigned long ip);
+static __always_inline void kasan_kfree_large(void *ptr)
+{
+ if (kasan_enabled())
+ __kasan_kfree_large(ptr, _RET_IP_);
+}
-size_t ksize(const void *);
-static inline void kasan_unpoison_slab(const void *ptr) { ksize(ptr); }
-size_t kasan_metadata_size(struct kmem_cache *cache);
+void * __must_check __kasan_slab_alloc(struct kmem_cache *s,
+ void *object, gfp_t flags, bool init);
+static __always_inline void * __must_check kasan_slab_alloc(
+ struct kmem_cache *s, void *object, gfp_t flags, bool init)
+{
+ if (kasan_enabled())
+ return __kasan_slab_alloc(s, object, flags, init);
+ return object;
+}
-bool kasan_save_enable_multi_shot(void);
-void kasan_restore_multi_shot(bool enabled);
+void * __must_check __kasan_kmalloc(struct kmem_cache *s, const void *object,
+ size_t size, gfp_t flags);
+static __always_inline void * __must_check kasan_kmalloc(struct kmem_cache *s,
+ const void *object, size_t size, gfp_t flags)
+{
+ if (kasan_enabled())
+ return __kasan_kmalloc(s, object, size, flags);
+ return (void *)object;
+}
+
+void * __must_check __kasan_kmalloc_large(const void *ptr,
+ size_t size, gfp_t flags);
+static __always_inline void * __must_check kasan_kmalloc_large(const void *ptr,
+ size_t size, gfp_t flags)
+{
+ if (kasan_enabled())
+ return __kasan_kmalloc_large(ptr, size, flags);
+ return (void *)ptr;
+}
+
+void * __must_check __kasan_krealloc(const void *object,
+ size_t new_size, gfp_t flags);
+static __always_inline void * __must_check kasan_krealloc(const void *object,
+ size_t new_size, gfp_t flags)
+{
+ if (kasan_enabled())
+ return __kasan_krealloc(object, new_size, flags);
+ return (void *)object;
+}
+
+bool __kasan_mempool_poison_pages(struct page *page, unsigned int order,
+ unsigned long ip);
+/**
+ * kasan_mempool_poison_pages - Check and poison a mempool page allocation.
+ * @page: Pointer to the page allocation.
+ * @order: Order of the allocation.
+ *
+ * This function is intended for kernel subsystems that cache page allocations
+ * to reuse them instead of freeing them back to page_alloc (e.g. mempool).
+ *
+ * This function is similar to kasan_mempool_poison_object() but operates on
+ * page allocations.
+ *
+ * Before the poisoned allocation can be reused, it must be unpoisoned via
+ * kasan_mempool_unpoison_pages().
+ *
+ * Return: true if the allocation can be safely reused; false otherwise.
+ */
+static __always_inline bool kasan_mempool_poison_pages(struct page *page,
+ unsigned int order)
+{
+ if (kasan_enabled())
+ return __kasan_mempool_poison_pages(page, order, _RET_IP_);
+ return true;
+}
+
+void __kasan_mempool_unpoison_pages(struct page *page, unsigned int order,
+ unsigned long ip);
+/**
+ * kasan_mempool_unpoison_pages - Unpoison a mempool page allocation.
+ * @page: Pointer to the page allocation.
+ * @order: Order of the allocation.
+ *
+ * This function is intended for kernel subsystems that cache page allocations
+ * to reuse them instead of freeing them back to page_alloc (e.g. mempool).
+ *
+ * This function unpoisons a page allocation that was previously poisoned by
+ * kasan_mempool_poison_pages() without zeroing the allocation's memory. For
+ * the tag-based modes, this function assigns a new tag to the allocation.
+ */
+static __always_inline void kasan_mempool_unpoison_pages(struct page *page,
+ unsigned int order)
+{
+ if (kasan_enabled())
+ __kasan_mempool_unpoison_pages(page, order, _RET_IP_);
+}
+
+bool __kasan_mempool_poison_object(void *ptr, unsigned long ip);
+/**
+ * kasan_mempool_poison_object - Check and poison a mempool slab allocation.
+ * @ptr: Pointer to the slab allocation.
+ *
+ * This function is intended for kernel subsystems that cache slab allocations
+ * to reuse them instead of freeing them back to the slab allocator (e.g.
+ * mempool).
+ *
+ * This function poisons a slab allocation and saves a free stack trace for it
+ * without initializing the allocation's memory and without putting it into the
+ * quarantine (for the Generic mode).
+ *
+ * This function also performs checks to detect double-free and invalid-free
+ * bugs and reports them. The caller can use the return value of this function
+ * to find out if the allocation is buggy.
+ *
+ * Before the poisoned allocation can be reused, it must be unpoisoned via
+ * kasan_mempool_unpoison_object().
+ *
+ * This function operates on all slab allocations including large kmalloc
+ * allocations (the ones returned by kmalloc_large() or by kmalloc() with the
+ * size > KMALLOC_MAX_SIZE).
+ *
+ * Return: true if the allocation can be safely reused; false otherwise.
+ */
+static __always_inline bool kasan_mempool_poison_object(void *ptr)
+{
+ if (kasan_enabled())
+ return __kasan_mempool_poison_object(ptr, _RET_IP_);
+ return true;
+}
+
+void __kasan_mempool_unpoison_object(void *ptr, size_t size, unsigned long ip);
+/**
+ * kasan_mempool_unpoison_object - Unpoison a mempool slab allocation.
+ * @ptr: Pointer to the slab allocation.
+ * @size: Size to be unpoisoned.
+ *
+ * This function is intended for kernel subsystems that cache slab allocations
+ * to reuse them instead of freeing them back to the slab allocator (e.g.
+ * mempool).
+ *
+ * This function unpoisons a slab allocation that was previously poisoned via
+ * kasan_mempool_poison_object() and saves an alloc stack trace for it without
+ * initializing the allocation's memory. For the tag-based modes, this function
+ * does not assign a new tag to the allocation and instead restores the
+ * original tags based on the pointer value.
+ *
+ * This function operates on all slab allocations including large kmalloc
+ * allocations (the ones returned by kmalloc_large() or by kmalloc() with the
+ * size > KMALLOC_MAX_SIZE).
+ */
+static __always_inline void kasan_mempool_unpoison_object(void *ptr,
+ size_t size)
+{
+ if (kasan_enabled())
+ __kasan_mempool_unpoison_object(ptr, size, _RET_IP_);
+}
+
+/*
+ * Unlike kasan_check_read/write(), kasan_check_byte() is performed even for
+ * the hardware tag-based mode that doesn't rely on compiler instrumentation.
+ */
+bool __kasan_check_byte(const void *addr, unsigned long ip);
+static __always_inline bool kasan_check_byte(const void *addr)
+{
+ if (kasan_enabled())
+ return __kasan_check_byte(addr, _RET_IP_);
+ return true;
+}
#else /* CONFIG_KASAN */
-static inline void kasan_unpoison_shadow(const void *address, size_t size) {}
+static inline void kasan_unpoison_range(const void *address, size_t size) {}
+static inline void kasan_poison_pages(struct page *page, unsigned int order,
+ bool init) {}
+static inline bool kasan_unpoison_pages(struct page *page, unsigned int order,
+ bool init)
+{
+ return false;
+}
+static inline void kasan_poison_slab(struct slab *slab) {}
+static inline void kasan_unpoison_new_object(struct kmem_cache *cache,
+ void *object) {}
+static inline void kasan_poison_new_object(struct kmem_cache *cache,
+ void *object) {}
+static inline void *kasan_init_slab_obj(struct kmem_cache *cache,
+ const void *object)
+{
+ return (void *)object;
+}
+static inline bool kasan_slab_pre_free(struct kmem_cache *s, void *object)
+{
+ return false;
+}
+
+static inline bool kasan_slab_free(struct kmem_cache *s, void *object,
+ bool init, bool still_accessible,
+ bool no_quarantine)
+{
+ return false;
+}
+static inline void kasan_kfree_large(void *ptr) {}
+static inline void *kasan_slab_alloc(struct kmem_cache *s, void *object,
+ gfp_t flags, bool init)
+{
+ return object;
+}
+static inline void *kasan_kmalloc(struct kmem_cache *s, const void *object,
+ size_t size, gfp_t flags)
+{
+ return (void *)object;
+}
+static inline void *kasan_kmalloc_large(const void *ptr, size_t size, gfp_t flags)
+{
+ return (void *)ptr;
+}
+static inline void *kasan_krealloc(const void *object, size_t new_size,
+ gfp_t flags)
+{
+ return (void *)object;
+}
+static inline bool kasan_mempool_poison_pages(struct page *page, unsigned int order)
+{
+ return true;
+}
+static inline void kasan_mempool_unpoison_pages(struct page *page, unsigned int order) {}
+static inline bool kasan_mempool_poison_object(void *ptr)
+{
+ return true;
+}
+static inline void kasan_mempool_unpoison_object(void *ptr, size_t size) {}
+
+static inline bool kasan_check_byte(const void *address)
+{
+ return true;
+}
+
+#endif /* CONFIG_KASAN */
+
+#if defined(CONFIG_KASAN) && defined(CONFIG_KASAN_STACK)
+void kasan_unpoison_task_stack(struct task_struct *task);
+asmlinkage void kasan_unpoison_task_stack_below(const void *watermark);
+#else
static inline void kasan_unpoison_task_stack(struct task_struct *task) {}
-static inline void kasan_unpoison_stack_above_sp_to(const void *watermark) {}
+static inline void kasan_unpoison_task_stack_below(const void *watermark) {}
+#endif
-static inline void kasan_enable_current(void) {}
-static inline void kasan_disable_current(void) {}
+#ifdef CONFIG_KASAN_GENERIC
-static inline void kasan_alloc_pages(struct page *page, unsigned int order) {}
-static inline void kasan_free_pages(struct page *page, unsigned int order) {}
+struct kasan_cache {
+ int alloc_meta_offset;
+ int free_meta_offset;
+};
+
+size_t kasan_metadata_size(struct kmem_cache *cache, bool in_object);
+void kasan_cache_create(struct kmem_cache *cache, unsigned int *size,
+ slab_flags_t *flags);
+
+void kasan_cache_shrink(struct kmem_cache *cache);
+void kasan_cache_shutdown(struct kmem_cache *cache);
+void kasan_record_aux_stack(void *ptr);
+
+#else /* CONFIG_KASAN_GENERIC */
+/* Tag-based KASAN modes do not use per-object metadata. */
+static inline size_t kasan_metadata_size(struct kmem_cache *cache,
+ bool in_object)
+{
+ return 0;
+}
+/* And no cache-related metadata initialization is required. */
static inline void kasan_cache_create(struct kmem_cache *cache,
- size_t *size,
- unsigned long *flags) {}
+ unsigned int *size,
+ slab_flags_t *flags) {}
+
static inline void kasan_cache_shrink(struct kmem_cache *cache) {}
static inline void kasan_cache_shutdown(struct kmem_cache *cache) {}
+static inline void kasan_record_aux_stack(void *ptr) {}
-static inline void kasan_poison_slab(struct page *page) {}
-static inline void kasan_unpoison_object_data(struct kmem_cache *cache,
- void *object) {}
-static inline void kasan_poison_object_data(struct kmem_cache *cache,
- void *object) {}
-static inline void kasan_init_slab_obj(struct kmem_cache *cache,
- const void *object) {}
+#endif /* CONFIG_KASAN_GENERIC */
-static inline void kasan_kmalloc_large(void *ptr, size_t size, gfp_t flags) {}
-static inline void kasan_kfree_large(const void *ptr) {}
-static inline void kasan_poison_kfree(void *ptr) {}
-static inline void kasan_kmalloc(struct kmem_cache *s, const void *object,
- size_t size, gfp_t flags) {}
-static inline void kasan_krealloc(const void *object, size_t new_size,
- gfp_t flags) {}
+#if defined(CONFIG_KASAN_SW_TAGS) || defined(CONFIG_KASAN_HW_TAGS)
-static inline void kasan_slab_alloc(struct kmem_cache *s, void *object,
- gfp_t flags) {}
-static inline bool kasan_slab_free(struct kmem_cache *s, void *object)
+static inline void *kasan_reset_tag(const void *addr)
{
- return false;
+ return (void *)arch_kasan_reset_tag(addr);
}
-static inline int kasan_module_alloc(void *addr, size_t size) { return 0; }
-static inline void kasan_free_shadow(const struct vm_struct *vm) {}
+/**
+ * kasan_report - print a report about a bad memory access detected by KASAN
+ * @addr: address of the bad access
+ * @size: size of the bad access
+ * @is_write: whether the bad access is a write or a read
+ * @ip: instruction pointer for the accessibility check or the bad access itself
+ */
+bool kasan_report(const void *addr, size_t size,
+ bool is_write, unsigned long ip);
-static inline void kasan_unpoison_slab(const void *ptr) { }
-static inline size_t kasan_metadata_size(struct kmem_cache *cache) { return 0; }
+#else /* CONFIG_KASAN_SW_TAGS || CONFIG_KASAN_HW_TAGS */
-#endif /* CONFIG_KASAN */
+static inline void *kasan_reset_tag(const void *addr)
+{
+ return (void *)addr;
+}
+
+#endif /* CONFIG_KASAN_SW_TAGS || CONFIG_KASAN_HW_TAGS*/
+
+#ifdef CONFIG_KASAN_HW_TAGS
+
+void kasan_report_async(void);
+
+#endif /* CONFIG_KASAN_HW_TAGS */
+
+#ifdef CONFIG_KASAN_GENERIC
+void __init kasan_init_generic(void);
+#else
+static inline void kasan_init_generic(void) { }
+#endif
+
+#ifdef CONFIG_KASAN_SW_TAGS
+void __init kasan_init_sw_tags(void);
+#else
+static inline void kasan_init_sw_tags(void) { }
+#endif
+
+#ifdef CONFIG_KASAN_HW_TAGS
+void kasan_init_hw_tags_cpu(void);
+void __init kasan_init_hw_tags(void);
+#else
+static inline void kasan_init_hw_tags_cpu(void) { }
+static inline void kasan_init_hw_tags(void) { }
+#endif
+
+#ifdef CONFIG_KASAN_VMALLOC
+
+#if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
+
+void kasan_populate_early_vm_area_shadow(void *start, unsigned long size);
+int __kasan_populate_vmalloc(unsigned long addr, unsigned long size, gfp_t gfp_mask);
+static inline int kasan_populate_vmalloc(unsigned long addr,
+ unsigned long size, gfp_t gfp_mask)
+{
+ if (kasan_enabled())
+ return __kasan_populate_vmalloc(addr, size, gfp_mask);
+ return 0;
+}
+void __kasan_release_vmalloc(unsigned long start, unsigned long end,
+ unsigned long free_region_start,
+ unsigned long free_region_end,
+ unsigned long flags);
+static inline void kasan_release_vmalloc(unsigned long start, unsigned long end,
+ unsigned long free_region_start,
+ unsigned long free_region_end,
+ unsigned long flags)
+{
+ if (kasan_enabled())
+ return __kasan_release_vmalloc(start, end, free_region_start,
+ free_region_end, flags);
+}
+
+#else /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */
+
+static inline void kasan_populate_early_vm_area_shadow(void *start,
+ unsigned long size)
+{ }
+static inline int kasan_populate_vmalloc(unsigned long start,
+ unsigned long size, gfp_t gfp_mask)
+{
+ return 0;
+}
+static inline void kasan_release_vmalloc(unsigned long start,
+ unsigned long end,
+ unsigned long free_region_start,
+ unsigned long free_region_end,
+ unsigned long flags) { }
+
+#endif /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */
+
+void *__kasan_unpoison_vmalloc(const void *start, unsigned long size,
+ kasan_vmalloc_flags_t flags);
+static __always_inline void *kasan_unpoison_vmalloc(const void *start,
+ unsigned long size,
+ kasan_vmalloc_flags_t flags)
+{
+ if (kasan_enabled())
+ return __kasan_unpoison_vmalloc(start, size, flags);
+ return (void *)start;
+}
+
+void __kasan_poison_vmalloc(const void *start, unsigned long size);
+static __always_inline void kasan_poison_vmalloc(const void *start,
+ unsigned long size)
+{
+ if (kasan_enabled())
+ __kasan_poison_vmalloc(start, size);
+}
+
+#else /* CONFIG_KASAN_VMALLOC */
+
+static inline void kasan_populate_early_vm_area_shadow(void *start,
+ unsigned long size) { }
+static inline int kasan_populate_vmalloc(unsigned long start,
+ unsigned long size, gfp_t gfp_mask)
+{
+ return 0;
+}
+static inline void kasan_release_vmalloc(unsigned long start,
+ unsigned long end,
+ unsigned long free_region_start,
+ unsigned long free_region_end,
+ unsigned long flags) { }
+
+static inline void *kasan_unpoison_vmalloc(const void *start,
+ unsigned long size,
+ kasan_vmalloc_flags_t flags)
+{
+ return (void *)start;
+}
+static inline void kasan_poison_vmalloc(const void *start, unsigned long size)
+{ }
+
+#endif /* CONFIG_KASAN_VMALLOC */
+
+#if (defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)) && \
+ !defined(CONFIG_KASAN_VMALLOC)
+
+/*
+ * These functions allocate and free shadow memory for kernel modules.
+ * They are only required when KASAN_VMALLOC is not supported, as otherwise
+ * shadow memory is allocated by the generic vmalloc handlers.
+ */
+int kasan_alloc_module_shadow(void *addr, size_t size, gfp_t gfp_mask);
+void kasan_free_module_shadow(const struct vm_struct *vm);
+
+#else /* (CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS) && !CONFIG_KASAN_VMALLOC */
+
+static inline int kasan_alloc_module_shadow(void *addr, size_t size, gfp_t gfp_mask) { return 0; }
+static inline void kasan_free_module_shadow(const struct vm_struct *vm) {}
+
+#endif /* (CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS) && !CONFIG_KASAN_VMALLOC */
+
+#if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
+void kasan_non_canonical_hook(unsigned long addr);
+#else /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */
+static inline void kasan_non_canonical_hook(unsigned long addr) { }
+#endif /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */
#endif /* LINUX_KASAN_H */
diff --git a/include/linux/kbd_diacr.h b/include/linux/kbd_diacr.h
index 7274ec68c246..738c7340c151 100644
--- a/include/linux/kbd_diacr.h
+++ b/include/linux/kbd_diacr.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _DIACR_H
#define _DIACR_H
#include <linux/kd.h>
diff --git a/include/linux/kbd_kern.h b/include/linux/kbd_kern.h
index cbfb171bbcba..c40811d79769 100644
--- a/include/linux/kbd_kern.h
+++ b/include/linux/kbd_kern.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _KBD_KERN_H
#define _KBD_KERN_H
@@ -5,12 +6,7 @@
#include <linux/interrupt.h>
#include <linux/keyboard.h>
-extern struct tasklet_struct keyboard_tasklet;
-
extern char *func_table[MAX_NR_FUNC];
-extern char func_buf[];
-extern char *funcbufptr;
-extern int funcbufsize, funcbufleft;
/*
* kbd->xxx contains the VC-local things (flag settings etc..)
@@ -73,12 +69,6 @@ extern void (*kbd_ledfunc)(unsigned int led);
extern int set_console(int nr);
extern void schedule_console_callback(void);
-/* FIXME: review locking for vt.c callers */
-static inline void set_leds(void)
-{
- tasklet_schedule(&keyboard_tasklet);
-}
-
static inline int vc_kbd_mode(struct kbd_struct * kbd, int flag)
{
return ((kbd->modeflags >> flag) & 1);
@@ -137,7 +127,7 @@ static inline void chg_vc_kbd_led(struct kbd_struct * kbd, int flag)
struct console;
-void compute_shiftstate(void);
+void vt_set_leds_compute_shiftstate(void);
/* defkeymap.c */
diff --git a/include/linux/kbuild.h b/include/linux/kbuild.h
index 4e80f3a9ad58..e7be517aaaf6 100644
--- a/include/linux/kbuild.h
+++ b/include/linux/kbuild.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __LINUX_KBUILD_H
#define __LINUX_KBUILD_H
diff --git a/include/linux/kconfig.h b/include/linux/kconfig.h
index 4d748603e818..20d1079e92b4 100644
--- a/include/linux/kconfig.h
+++ b/include/linux/kconfig.h
@@ -1,8 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __LINUX_KCONFIG_H
#define __LINUX_KCONFIG_H
#include <generated/autoconf.h>
+#ifdef CONFIG_CPU_BIG_ENDIAN
+#define __BIG_ENDIAN 4321
+#else
+#define __LITTLE_ENDIAN 1234
+#endif
+
#define __ARG_PLACEHOLDER_1 0,
#define __take_second_arg(__ignored, val, ...) val
@@ -44,7 +51,8 @@
/*
* IS_MODULE(CONFIG_FOO) evaluates to 1 if CONFIG_FOO is set to 'm', 0
- * otherwise.
+ * otherwise. CONFIG_FOO=m results in "#define CONFIG_FOO_MODULE 1" in
+ * autoconf.h.
*/
#define IS_MODULE(option) __is_defined(option##_MODULE)
@@ -59,7 +67,8 @@
/*
* IS_ENABLED(CONFIG_FOO) evaluates to 1 if CONFIG_FOO is set to 'y' or 'm',
- * 0 otherwise.
+ * 0 otherwise. Note that CONFIG_FOO=y results in "#define CONFIG_FOO 1" in
+ * autoconf.h, while CONFIG_FOO=m results in "#define CONFIG_FOO_MODULE 1".
*/
#define IS_ENABLED(option) __or(IS_BUILTIN(option), IS_MODULE(option))
diff --git a/include/linux/kcore.h b/include/linux/kcore.h
index d92762286645..9a2fa013c91d 100644
--- a/include/linux/kcore.h
+++ b/include/linux/kcore.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* /proc/kcore definitions
*/
@@ -9,7 +10,7 @@ enum kcore_type {
KCORE_VMALLOC,
KCORE_RAM,
KCORE_VMEMMAP,
- KCORE_OTHER,
+ KCORE_USER,
};
struct kcore_list {
@@ -19,15 +20,10 @@ struct kcore_list {
int type;
};
-struct vmcore {
- struct list_head list;
- unsigned long long paddr;
- unsigned long long size;
- loff_t offset;
-};
-
#ifdef CONFIG_PROC_KCORE
-extern void kclist_add(struct kcore_list *, void *, size_t, int type);
+void __init kclist_add(struct kcore_list *, void *, size_t, int type);
+
+extern int __init register_mem_pfn_is_ram(int (*fn)(unsigned long pfn));
#else
static inline
void kclist_add(struct kcore_list *new, void *addr, size_t size, int type)
diff --git a/include/linux/kcov.h b/include/linux/kcov.h
index 2883ac98c280..0143358874b0 100644
--- a/include/linux/kcov.h
+++ b/include/linux/kcov.h
@@ -1,29 +1,112 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_KCOV_H
#define _LINUX_KCOV_H
+#include <linux/sched.h>
#include <uapi/linux/kcov.h>
struct task_struct;
#ifdef CONFIG_KCOV
-void kcov_task_init(struct task_struct *t);
-void kcov_task_exit(struct task_struct *t);
-
enum kcov_mode {
/* Coverage collection is not enabled yet. */
KCOV_MODE_DISABLED = 0,
+ /* KCOV was initialized, but tracing mode hasn't been chosen yet. */
+ KCOV_MODE_INIT = 1,
/*
* Tracing coverage collection mode.
* Covered PCs are collected in a per-task buffer.
*/
- KCOV_MODE_TRACE = 1,
+ KCOV_MODE_TRACE_PC = 2,
+ /* Collecting comparison operands mode. */
+ KCOV_MODE_TRACE_CMP = 3,
+ /* The process owns a KCOV remote reference. */
+ KCOV_MODE_REMOTE = 4,
};
+#define KCOV_IN_CTXSW (1 << 30)
+
+void kcov_task_init(struct task_struct *t);
+void kcov_task_exit(struct task_struct *t);
+
+#define kcov_prepare_switch(t) \
+do { \
+ (t)->kcov_mode |= KCOV_IN_CTXSW; \
+} while (0)
+
+#define kcov_finish_switch(t) \
+do { \
+ (t)->kcov_mode &= ~KCOV_IN_CTXSW; \
+} while (0)
+
+/* See Documentation/dev-tools/kcov.rst for usage details. */
+void kcov_remote_start(u64 handle);
+void kcov_remote_stop(void);
+u64 kcov_common_handle(void);
+
+static inline void kcov_remote_start_common(u64 id)
+{
+ kcov_remote_start(kcov_remote_handle(KCOV_SUBSYSTEM_COMMON, id));
+}
+
+static inline void kcov_remote_start_usb(u64 id)
+{
+ kcov_remote_start(kcov_remote_handle(KCOV_SUBSYSTEM_USB, id));
+}
+
+/*
+ * The softirq flavor of kcov_remote_*() functions is introduced as a temporary
+ * work around for kcov's lack of nested remote coverage sections support in
+ * task context. Adding support for nested sections is tracked in:
+ * https://bugzilla.kernel.org/show_bug.cgi?id=210337
+ */
+
+static inline void kcov_remote_start_usb_softirq(u64 id)
+{
+ if (in_serving_softirq() && !in_hardirq())
+ kcov_remote_start_usb(id);
+}
+
+static inline void kcov_remote_stop_softirq(void)
+{
+ if (in_serving_softirq() && !in_hardirq())
+ kcov_remote_stop();
+}
+
+#ifdef CONFIG_64BIT
+typedef unsigned long kcov_u64;
+#else
+typedef unsigned long long kcov_u64;
+#endif
+
+void __sanitizer_cov_trace_pc(void);
+void __sanitizer_cov_trace_cmp1(u8 arg1, u8 arg2);
+void __sanitizer_cov_trace_cmp2(u16 arg1, u16 arg2);
+void __sanitizer_cov_trace_cmp4(u32 arg1, u32 arg2);
+void __sanitizer_cov_trace_cmp8(kcov_u64 arg1, kcov_u64 arg2);
+void __sanitizer_cov_trace_const_cmp1(u8 arg1, u8 arg2);
+void __sanitizer_cov_trace_const_cmp2(u16 arg1, u16 arg2);
+void __sanitizer_cov_trace_const_cmp4(u32 arg1, u32 arg2);
+void __sanitizer_cov_trace_const_cmp8(kcov_u64 arg1, kcov_u64 arg2);
+void __sanitizer_cov_trace_switch(kcov_u64 val, void *cases);
+
#else
static inline void kcov_task_init(struct task_struct *t) {}
static inline void kcov_task_exit(struct task_struct *t) {}
+static inline void kcov_prepare_switch(struct task_struct *t) {}
+static inline void kcov_finish_switch(struct task_struct *t) {}
+static inline void kcov_remote_start(u64 handle) {}
+static inline void kcov_remote_stop(void) {}
+static inline u64 kcov_common_handle(void)
+{
+ return 0;
+}
+static inline void kcov_remote_start_common(u64 id) {}
+static inline void kcov_remote_start_usb(u64 id) {}
+static inline void kcov_remote_start_usb_softirq(u64 id) {}
+static inline void kcov_remote_stop_softirq(void) {}
#endif /* CONFIG_KCOV */
#endif /* _LINUX_KCOV_H */
diff --git a/include/linux/kcsan-checks.h b/include/linux/kcsan-checks.h
new file mode 100644
index 000000000000..92f3843d9ebb
--- /dev/null
+++ b/include/linux/kcsan-checks.h
@@ -0,0 +1,533 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * KCSAN access checks and modifiers. These can be used to explicitly check
+ * uninstrumented accesses, or change KCSAN checking behaviour of accesses.
+ *
+ * Copyright (C) 2019, Google LLC.
+ */
+
+#ifndef _LINUX_KCSAN_CHECKS_H
+#define _LINUX_KCSAN_CHECKS_H
+
+/* Note: Only include what is already included by compiler.h. */
+#include <linux/compiler_attributes.h>
+#include <linux/types.h>
+
+/* Access types -- if KCSAN_ACCESS_WRITE is not set, the access is a read. */
+#define KCSAN_ACCESS_WRITE (1 << 0) /* Access is a write. */
+#define KCSAN_ACCESS_COMPOUND (1 << 1) /* Compounded read-write instrumentation. */
+#define KCSAN_ACCESS_ATOMIC (1 << 2) /* Access is atomic. */
+/* The following are special, and never due to compiler instrumentation. */
+#define KCSAN_ACCESS_ASSERT (1 << 3) /* Access is an assertion. */
+#define KCSAN_ACCESS_SCOPED (1 << 4) /* Access is a scoped access. */
+
+/*
+ * __kcsan_*: Always calls into the runtime when KCSAN is enabled. This may be used
+ * even in compilation units that selectively disable KCSAN, but must use KCSAN
+ * to validate access to an address. Never use these in header files!
+ */
+#ifdef CONFIG_KCSAN
+/**
+ * __kcsan_check_access - check generic access for races
+ *
+ * @ptr: address of access
+ * @size: size of access
+ * @type: access type modifier
+ */
+void __kcsan_check_access(const volatile void *ptr, size_t size, int type);
+
+/*
+ * See definition of __tsan_atomic_signal_fence() in kernel/kcsan/core.c.
+ * Note: The mappings are arbitrary, and do not reflect any real mappings of C11
+ * memory orders to the LKMM memory orders and vice-versa!
+ */
+#define __KCSAN_BARRIER_TO_SIGNAL_FENCE_mb __ATOMIC_SEQ_CST
+#define __KCSAN_BARRIER_TO_SIGNAL_FENCE_wmb __ATOMIC_ACQ_REL
+#define __KCSAN_BARRIER_TO_SIGNAL_FENCE_rmb __ATOMIC_ACQUIRE
+#define __KCSAN_BARRIER_TO_SIGNAL_FENCE_release __ATOMIC_RELEASE
+
+/**
+ * __kcsan_mb - full memory barrier instrumentation
+ */
+void __kcsan_mb(void);
+
+/**
+ * __kcsan_wmb - write memory barrier instrumentation
+ */
+void __kcsan_wmb(void);
+
+/**
+ * __kcsan_rmb - read memory barrier instrumentation
+ */
+void __kcsan_rmb(void);
+
+/**
+ * __kcsan_release - release barrier instrumentation
+ */
+void __kcsan_release(void);
+
+/**
+ * kcsan_disable_current - disable KCSAN for the current context
+ *
+ * Supports nesting.
+ */
+void kcsan_disable_current(void);
+
+/**
+ * kcsan_enable_current - re-enable KCSAN for the current context
+ *
+ * Supports nesting.
+ */
+void kcsan_enable_current(void);
+void kcsan_enable_current_nowarn(void); /* Safe in uaccess regions. */
+
+/**
+ * kcsan_nestable_atomic_begin - begin nestable atomic region
+ *
+ * Accesses within the atomic region may appear to race with other accesses but
+ * should be considered atomic.
+ */
+void kcsan_nestable_atomic_begin(void);
+
+/**
+ * kcsan_nestable_atomic_end - end nestable atomic region
+ */
+void kcsan_nestable_atomic_end(void);
+
+/**
+ * kcsan_flat_atomic_begin - begin flat atomic region
+ *
+ * Accesses within the atomic region may appear to race with other accesses but
+ * should be considered atomic.
+ */
+void kcsan_flat_atomic_begin(void);
+
+/**
+ * kcsan_flat_atomic_end - end flat atomic region
+ */
+void kcsan_flat_atomic_end(void);
+
+/**
+ * kcsan_atomic_next - consider following accesses as atomic
+ *
+ * Force treating the next n memory accesses for the current context as atomic
+ * operations.
+ *
+ * @n: number of following memory accesses to treat as atomic.
+ */
+void kcsan_atomic_next(int n);
+
+/**
+ * kcsan_set_access_mask - set access mask
+ *
+ * Set the access mask for all accesses for the current context if non-zero.
+ * Only value changes to bits set in the mask will be reported.
+ *
+ * @mask: bitmask
+ */
+void kcsan_set_access_mask(unsigned long mask);
+
+/* Scoped access information. */
+struct kcsan_scoped_access {
+ union {
+ struct list_head list; /* scoped_accesses list */
+ /*
+ * Not an entry in scoped_accesses list; stack depth from where
+ * the access was initialized.
+ */
+ int stack_depth;
+ };
+
+ /* Access information. */
+ const volatile void *ptr;
+ size_t size;
+ int type;
+ /* Location where scoped access was set up. */
+ unsigned long ip;
+};
+/*
+ * Automatically call kcsan_end_scoped_access() when kcsan_scoped_access goes
+ * out of scope; relies on attribute "cleanup", which is supported by all
+ * compilers that support KCSAN.
+ */
+#define __kcsan_cleanup_scoped \
+ __maybe_unused __attribute__((__cleanup__(kcsan_end_scoped_access)))
+
+/**
+ * kcsan_begin_scoped_access - begin scoped access
+ *
+ * Begin scoped access and initialize @sa, which will cause KCSAN to
+ * continuously check the memory range in the current thread until
+ * kcsan_end_scoped_access() is called for @sa.
+ *
+ * Scoped accesses are implemented by appending @sa to an internal list for the
+ * current execution context, and then checked on every call into the KCSAN
+ * runtime.
+ *
+ * @ptr: address of access
+ * @size: size of access
+ * @type: access type modifier
+ * @sa: struct kcsan_scoped_access to use for the scope of the access
+ */
+struct kcsan_scoped_access *
+kcsan_begin_scoped_access(const volatile void *ptr, size_t size, int type,
+ struct kcsan_scoped_access *sa);
+
+/**
+ * kcsan_end_scoped_access - end scoped access
+ *
+ * End a scoped access, which will stop KCSAN checking the memory range.
+ * Requires that kcsan_begin_scoped_access() was previously called once for @sa.
+ *
+ * @sa: a previously initialized struct kcsan_scoped_access
+ */
+void kcsan_end_scoped_access(struct kcsan_scoped_access *sa);
+
+
+#else /* CONFIG_KCSAN */
+
+static inline void __kcsan_check_access(const volatile void *ptr, size_t size,
+ int type) { }
+
+static inline void __kcsan_mb(void) { }
+static inline void __kcsan_wmb(void) { }
+static inline void __kcsan_rmb(void) { }
+static inline void __kcsan_release(void) { }
+static inline void kcsan_disable_current(void) { }
+static inline void kcsan_enable_current(void) { }
+static inline void kcsan_enable_current_nowarn(void) { }
+static inline void kcsan_nestable_atomic_begin(void) { }
+static inline void kcsan_nestable_atomic_end(void) { }
+static inline void kcsan_flat_atomic_begin(void) { }
+static inline void kcsan_flat_atomic_end(void) { }
+static inline void kcsan_atomic_next(int n) { }
+static inline void kcsan_set_access_mask(unsigned long mask) { }
+
+struct kcsan_scoped_access { };
+#define __kcsan_cleanup_scoped __maybe_unused
+static inline struct kcsan_scoped_access *
+kcsan_begin_scoped_access(const volatile void *ptr, size_t size, int type,
+ struct kcsan_scoped_access *sa) { return sa; }
+static inline void kcsan_end_scoped_access(struct kcsan_scoped_access *sa) { }
+
+#endif /* CONFIG_KCSAN */
+
+#ifdef __SANITIZE_THREAD__
+/*
+ * Only calls into the runtime when the particular compilation unit has KCSAN
+ * instrumentation enabled. May be used in header files.
+ */
+#define kcsan_check_access __kcsan_check_access
+
+/*
+ * Only use these to disable KCSAN for accesses in the current compilation unit;
+ * calls into libraries may still perform KCSAN checks.
+ */
+#define __kcsan_disable_current kcsan_disable_current
+#define __kcsan_enable_current kcsan_enable_current_nowarn
+#else /* __SANITIZE_THREAD__ */
+static inline void kcsan_check_access(const volatile void *ptr, size_t size,
+ int type) { }
+static inline void __kcsan_enable_current(void) { }
+static inline void __kcsan_disable_current(void) { }
+#endif /* __SANITIZE_THREAD__ */
+
+#if defined(CONFIG_KCSAN_WEAK_MEMORY) && defined(__SANITIZE_THREAD__)
+/*
+ * Normal barrier instrumentation is not done via explicit calls, but by mapping
+ * to a repurposed __atomic_signal_fence(), which normally does not generate any
+ * real instructions, but is still intercepted by fsanitize=thread. This means,
+ * like any other compile-time instrumentation, barrier instrumentation can be
+ * disabled with the __no_kcsan function attribute.
+ *
+ * Also see definition of __tsan_atomic_signal_fence() in kernel/kcsan/core.c.
+ *
+ * These are all macros, like <asm/barrier.h>, since some architectures use them
+ * in non-static inline functions.
+ */
+#define __KCSAN_BARRIER_TO_SIGNAL_FENCE(name) \
+ do { \
+ barrier(); \
+ __atomic_signal_fence(__KCSAN_BARRIER_TO_SIGNAL_FENCE_##name); \
+ barrier(); \
+ } while (0)
+#define kcsan_mb() __KCSAN_BARRIER_TO_SIGNAL_FENCE(mb)
+#define kcsan_wmb() __KCSAN_BARRIER_TO_SIGNAL_FENCE(wmb)
+#define kcsan_rmb() __KCSAN_BARRIER_TO_SIGNAL_FENCE(rmb)
+#define kcsan_release() __KCSAN_BARRIER_TO_SIGNAL_FENCE(release)
+#elif defined(CONFIG_KCSAN_WEAK_MEMORY) && defined(__KCSAN_INSTRUMENT_BARRIERS__)
+#define kcsan_mb __kcsan_mb
+#define kcsan_wmb __kcsan_wmb
+#define kcsan_rmb __kcsan_rmb
+#define kcsan_release __kcsan_release
+#else /* CONFIG_KCSAN_WEAK_MEMORY && ... */
+#define kcsan_mb() do { } while (0)
+#define kcsan_wmb() do { } while (0)
+#define kcsan_rmb() do { } while (0)
+#define kcsan_release() do { } while (0)
+#endif /* CONFIG_KCSAN_WEAK_MEMORY && ... */
+
+/**
+ * __kcsan_check_read - check regular read access for races
+ *
+ * @ptr: address of access
+ * @size: size of access
+ */
+#define __kcsan_check_read(ptr, size) __kcsan_check_access(ptr, size, 0)
+
+/**
+ * __kcsan_check_write - check regular write access for races
+ *
+ * @ptr: address of access
+ * @size: size of access
+ */
+#define __kcsan_check_write(ptr, size) \
+ __kcsan_check_access(ptr, size, KCSAN_ACCESS_WRITE)
+
+/**
+ * __kcsan_check_read_write - check regular read-write access for races
+ *
+ * @ptr: address of access
+ * @size: size of access
+ */
+#define __kcsan_check_read_write(ptr, size) \
+ __kcsan_check_access(ptr, size, KCSAN_ACCESS_COMPOUND | KCSAN_ACCESS_WRITE)
+
+/**
+ * kcsan_check_read - check regular read access for races
+ *
+ * @ptr: address of access
+ * @size: size of access
+ */
+#define kcsan_check_read(ptr, size) kcsan_check_access(ptr, size, 0)
+
+/**
+ * kcsan_check_write - check regular write access for races
+ *
+ * @ptr: address of access
+ * @size: size of access
+ */
+#define kcsan_check_write(ptr, size) \
+ kcsan_check_access(ptr, size, KCSAN_ACCESS_WRITE)
+
+/**
+ * kcsan_check_read_write - check regular read-write access for races
+ *
+ * @ptr: address of access
+ * @size: size of access
+ */
+#define kcsan_check_read_write(ptr, size) \
+ kcsan_check_access(ptr, size, KCSAN_ACCESS_COMPOUND | KCSAN_ACCESS_WRITE)
+
+/*
+ * Check for atomic accesses: if atomic accesses are not ignored, this simply
+ * aliases to kcsan_check_access(), otherwise becomes a no-op.
+ */
+#ifdef CONFIG_KCSAN_IGNORE_ATOMICS
+#define kcsan_check_atomic_read(...) do { } while (0)
+#define kcsan_check_atomic_write(...) do { } while (0)
+#define kcsan_check_atomic_read_write(...) do { } while (0)
+#else
+#define kcsan_check_atomic_read(ptr, size) \
+ kcsan_check_access(ptr, size, KCSAN_ACCESS_ATOMIC)
+#define kcsan_check_atomic_write(ptr, size) \
+ kcsan_check_access(ptr, size, KCSAN_ACCESS_ATOMIC | KCSAN_ACCESS_WRITE)
+#define kcsan_check_atomic_read_write(ptr, size) \
+ kcsan_check_access(ptr, size, KCSAN_ACCESS_ATOMIC | KCSAN_ACCESS_WRITE | KCSAN_ACCESS_COMPOUND)
+#endif
+
+/**
+ * ASSERT_EXCLUSIVE_WRITER - assert no concurrent writes to @var
+ *
+ * Assert that there are no concurrent writes to @var; other readers are
+ * allowed. This assertion can be used to specify properties of concurrent code,
+ * where violation cannot be detected as a normal data race.
+ *
+ * For example, if we only have a single writer, but multiple concurrent
+ * readers, to avoid data races, all these accesses must be marked; even
+ * concurrent marked writes racing with the single writer are bugs.
+ * Unfortunately, due to being marked, they are no longer data races. For cases
+ * like these, we can use the macro as follows:
+ *
+ * .. code-block:: c
+ *
+ * void writer(void) {
+ * spin_lock(&update_foo_lock);
+ * ASSERT_EXCLUSIVE_WRITER(shared_foo);
+ * WRITE_ONCE(shared_foo, ...);
+ * spin_unlock(&update_foo_lock);
+ * }
+ * void reader(void) {
+ * // update_foo_lock does not need to be held!
+ * ... = READ_ONCE(shared_foo);
+ * }
+ *
+ * Note: ASSERT_EXCLUSIVE_WRITER_SCOPED(), if applicable, performs more thorough
+ * checking if a clear scope where no concurrent writes are expected exists.
+ *
+ * @var: variable to assert on
+ */
+#define ASSERT_EXCLUSIVE_WRITER(var) \
+ __kcsan_check_access(&(var), sizeof(var), KCSAN_ACCESS_ASSERT)
+
+/*
+ * Helper macros for implementation of for ASSERT_EXCLUSIVE_*_SCOPED(). @id is
+ * expected to be unique for the scope in which instances of kcsan_scoped_access
+ * are declared.
+ */
+#define __kcsan_scoped_name(c, suffix) __kcsan_scoped_##c##suffix
+#define __ASSERT_EXCLUSIVE_SCOPED(var, type, id) \
+ struct kcsan_scoped_access __kcsan_scoped_name(id, _) \
+ __kcsan_cleanup_scoped; \
+ struct kcsan_scoped_access *__kcsan_scoped_name(id, _dummy_p) \
+ __maybe_unused = kcsan_begin_scoped_access( \
+ &(var), sizeof(var), KCSAN_ACCESS_SCOPED | (type), \
+ &__kcsan_scoped_name(id, _))
+
+/**
+ * ASSERT_EXCLUSIVE_WRITER_SCOPED - assert no concurrent writes to @var in scope
+ *
+ * Scoped variant of ASSERT_EXCLUSIVE_WRITER().
+ *
+ * Assert that there are no concurrent writes to @var for the duration of the
+ * scope in which it is introduced. This provides a better way to fully cover
+ * the enclosing scope, compared to multiple ASSERT_EXCLUSIVE_WRITER(), and
+ * increases the likelihood for KCSAN to detect racing accesses.
+ *
+ * For example, it allows finding race-condition bugs that only occur due to
+ * state changes within the scope itself:
+ *
+ * .. code-block:: c
+ *
+ * void writer(void) {
+ * spin_lock(&update_foo_lock);
+ * {
+ * ASSERT_EXCLUSIVE_WRITER_SCOPED(shared_foo);
+ * WRITE_ONCE(shared_foo, 42);
+ * ...
+ * // shared_foo should still be 42 here!
+ * }
+ * spin_unlock(&update_foo_lock);
+ * }
+ * void buggy(void) {
+ * if (READ_ONCE(shared_foo) == 42)
+ * WRITE_ONCE(shared_foo, 1); // bug!
+ * }
+ *
+ * @var: variable to assert on
+ */
+#define ASSERT_EXCLUSIVE_WRITER_SCOPED(var) \
+ __ASSERT_EXCLUSIVE_SCOPED(var, KCSAN_ACCESS_ASSERT, __COUNTER__)
+
+/**
+ * ASSERT_EXCLUSIVE_ACCESS - assert no concurrent accesses to @var
+ *
+ * Assert that there are no concurrent accesses to @var (no readers nor
+ * writers). This assertion can be used to specify properties of concurrent
+ * code, where violation cannot be detected as a normal data race.
+ *
+ * For example, where exclusive access is expected after determining no other
+ * users of an object are left, but the object is not actually freed. We can
+ * check that this property actually holds as follows:
+ *
+ * .. code-block:: c
+ *
+ * if (refcount_dec_and_test(&obj->refcnt)) {
+ * ASSERT_EXCLUSIVE_ACCESS(*obj);
+ * do_some_cleanup(obj);
+ * release_for_reuse(obj);
+ * }
+ *
+ * Note:
+ *
+ * 1. ASSERT_EXCLUSIVE_ACCESS_SCOPED(), if applicable, performs more thorough
+ * checking if a clear scope where no concurrent accesses are expected exists.
+ *
+ * 2. For cases where the object is freed, `KASAN <kasan.html>`_ is a better
+ * fit to detect use-after-free bugs.
+ *
+ * @var: variable to assert on
+ */
+#define ASSERT_EXCLUSIVE_ACCESS(var) \
+ __kcsan_check_access(&(var), sizeof(var), KCSAN_ACCESS_WRITE | KCSAN_ACCESS_ASSERT)
+
+/**
+ * ASSERT_EXCLUSIVE_ACCESS_SCOPED - assert no concurrent accesses to @var in scope
+ *
+ * Scoped variant of ASSERT_EXCLUSIVE_ACCESS().
+ *
+ * Assert that there are no concurrent accesses to @var (no readers nor writers)
+ * for the entire duration of the scope in which it is introduced. This provides
+ * a better way to fully cover the enclosing scope, compared to multiple
+ * ASSERT_EXCLUSIVE_ACCESS(), and increases the likelihood for KCSAN to detect
+ * racing accesses.
+ *
+ * @var: variable to assert on
+ */
+#define ASSERT_EXCLUSIVE_ACCESS_SCOPED(var) \
+ __ASSERT_EXCLUSIVE_SCOPED(var, KCSAN_ACCESS_WRITE | KCSAN_ACCESS_ASSERT, __COUNTER__)
+
+/**
+ * ASSERT_EXCLUSIVE_BITS - assert no concurrent writes to subset of bits in @var
+ *
+ * Bit-granular variant of ASSERT_EXCLUSIVE_WRITER().
+ *
+ * Assert that there are no concurrent writes to a subset of bits in @var;
+ * concurrent readers are permitted. This assertion captures more detailed
+ * bit-level properties, compared to the other (word granularity) assertions.
+ * Only the bits set in @mask are checked for concurrent modifications, while
+ * ignoring the remaining bits, i.e. concurrent writes (or reads) to ~mask bits
+ * are ignored.
+ *
+ * Use this for variables, where some bits must not be modified concurrently,
+ * yet other bits are expected to be modified concurrently.
+ *
+ * For example, variables where, after initialization, some bits are read-only,
+ * but other bits may still be modified concurrently. A reader may wish to
+ * assert that this is true as follows:
+ *
+ * .. code-block:: c
+ *
+ * ASSERT_EXCLUSIVE_BITS(flags, READ_ONLY_MASK);
+ * foo = (READ_ONCE(flags) & READ_ONLY_MASK) >> READ_ONLY_SHIFT;
+ *
+ * Note: The access that immediately follows ASSERT_EXCLUSIVE_BITS() is assumed
+ * to access the masked bits only, and KCSAN optimistically assumes it is
+ * therefore safe, even in the presence of data races, and marking it with
+ * READ_ONCE() is optional from KCSAN's point-of-view. We caution, however, that
+ * it may still be advisable to do so, since we cannot reason about all compiler
+ * optimizations when it comes to bit manipulations (on the reader and writer
+ * side). If you are sure nothing can go wrong, we can write the above simply
+ * as:
+ *
+ * .. code-block:: c
+ *
+ * ASSERT_EXCLUSIVE_BITS(flags, READ_ONLY_MASK);
+ * foo = (flags & READ_ONLY_MASK) >> READ_ONLY_SHIFT;
+ *
+ * Another example, where this may be used, is when certain bits of @var may
+ * only be modified when holding the appropriate lock, but other bits may still
+ * be modified concurrently. Writers, where other bits may change concurrently,
+ * could use the assertion as follows:
+ *
+ * .. code-block:: c
+ *
+ * spin_lock(&foo_lock);
+ * ASSERT_EXCLUSIVE_BITS(flags, FOO_MASK);
+ * old_flags = flags;
+ * new_flags = (old_flags & ~FOO_MASK) | (new_foo << FOO_SHIFT);
+ * if (cmpxchg(&flags, old_flags, new_flags) != old_flags) { ... }
+ * spin_unlock(&foo_lock);
+ *
+ * @var: variable to assert on
+ * @mask: only check for modifications to bits set in @mask
+ */
+#define ASSERT_EXCLUSIVE_BITS(var, mask) \
+ do { \
+ kcsan_set_access_mask(mask); \
+ __kcsan_check_access(&(var), sizeof(var), KCSAN_ACCESS_ASSERT);\
+ kcsan_set_access_mask(0); \
+ kcsan_atomic_next(1); \
+ } while (0)
+
+#endif /* _LINUX_KCSAN_CHECKS_H */
diff --git a/include/linux/kcsan.h b/include/linux/kcsan.h
new file mode 100644
index 000000000000..c07c71f5ba4f
--- /dev/null
+++ b/include/linux/kcsan.h
@@ -0,0 +1,75 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * The Kernel Concurrency Sanitizer (KCSAN) infrastructure. Public interface and
+ * data structures to set up runtime. See kcsan-checks.h for explicit checks and
+ * modifiers. For more info please see Documentation/dev-tools/kcsan.rst.
+ *
+ * Copyright (C) 2019, Google LLC.
+ */
+
+#ifndef _LINUX_KCSAN_H
+#define _LINUX_KCSAN_H
+
+#include <linux/kcsan-checks.h>
+#include <linux/types.h>
+
+#ifdef CONFIG_KCSAN
+
+/*
+ * Context for each thread of execution: for tasks, this is stored in
+ * task_struct, and interrupts access internal per-CPU storage.
+ */
+struct kcsan_ctx {
+ int disable_count; /* disable counter */
+ int disable_scoped; /* disable scoped access counter */
+ int atomic_next; /* number of following atomic ops */
+
+ /*
+ * We distinguish between: (a) nestable atomic regions that may contain
+ * other nestable regions; and (b) flat atomic regions that do not keep
+ * track of nesting. Both (a) and (b) are entirely independent of each
+ * other, and a flat region may be started in a nestable region or
+ * vice-versa.
+ *
+ * This is required because, for example, in the annotations for
+ * seqlocks, we declare seqlock writer critical sections as (a) nestable
+ * atomic regions, but reader critical sections as (b) flat atomic
+ * regions, but have encountered cases where seqlock reader critical
+ * sections are contained within writer critical sections (the opposite
+ * may be possible, too).
+ *
+ * To support these cases, we independently track the depth of nesting
+ * for (a), and whether the leaf level is flat for (b).
+ */
+ int atomic_nest_count;
+ bool in_flat_atomic;
+
+ /*
+ * Access mask for all accesses if non-zero.
+ */
+ unsigned long access_mask;
+
+ /* List of scoped accesses; likely to be empty. */
+ struct list_head scoped_accesses;
+
+#ifdef CONFIG_KCSAN_WEAK_MEMORY
+ /*
+ * Scoped access for modeling access reordering to detect missing memory
+ * barriers; only keep 1 to keep fast-path complexity manageable.
+ */
+ struct kcsan_scoped_access reorder_access;
+#endif
+};
+
+/**
+ * kcsan_init - initialize KCSAN runtime
+ */
+void kcsan_init(void);
+
+#else /* CONFIG_KCSAN */
+
+static inline void kcsan_init(void) { }
+
+#endif /* CONFIG_KCSAN */
+
+#endif /* _LINUX_KCSAN_H */
diff --git a/include/linux/kd.h b/include/linux/kd.h
deleted file mode 100644
index 25bd17fad239..000000000000
--- a/include/linux/kd.h
+++ /dev/null
@@ -1,7 +0,0 @@
-#ifndef _LINUX_KD_H
-#define _LINUX_KD_H
-
-#include <uapi/linux/kd.h>
-
-#define KD_FONT_FLAG_OLD 0x80000000 /* Invoked via old interface [compat] */
-#endif /* _LINUX_KD_H */
diff --git a/include/linux/kdb.h b/include/linux/kdb.h
index 68bd88223417..741c58e86431 100644
--- a/include/linux/kdb.h
+++ b/include/linux/kdb.h
@@ -13,6 +13,9 @@
* Copyright (C) 2009 Jason Wessel <jason.wessel@windriver.com>
*/
+#include <linux/list.h>
+#include <linux/smp.h>
+
/* Shifted versions of the command enable bits are be used if the command
* has no arguments (see kdb_check_flags). This allows commands, such as
* go, to have different permissions depending upon whether it is called
@@ -64,6 +67,17 @@ typedef enum {
typedef int (*kdb_func_t)(int, const char **);
+/* The KDB shell command table */
+typedef struct _kdbtab {
+ char *name; /* Command name */
+ kdb_func_t func; /* Function to execute command */
+ char *usage; /* Usage String for this command */
+ char *help; /* Help message for this command */
+ short minlen; /* Minimum legal # cmd chars required */
+ kdb_cmdflags_t flags; /* Command behaviour flags */
+ struct list_head list_node; /* Command list */
+} kdbtab_t;
+
#ifdef CONFIG_KGDB_KDB
#include <linux/init.h>
#include <linux/sched.h>
@@ -91,7 +105,7 @@ extern int kdb_initial_cpu;
#define KDB_NOENVVALUE (-6)
#define KDB_NOTIMP (-7)
#define KDB_ENVFULL (-8)
-#define KDB_ENVBUFFULL (-9)
+#define KDB_KMALLOCFAILED (-9)
#define KDB_TOOMANYBPT (-10)
#define KDB_TOOMANYDBREGS (-11)
#define KDB_DUPBPT (-12)
@@ -125,10 +139,7 @@ extern const char *kdb_diemsg;
#define KDB_FLAG_NO_I8042 (1 << 7) /* No i8042 chip is available, do
* not use keyboard */
-extern int kdb_flags; /* Global flags, see kdb_state for per cpu state */
-
-extern void kdb_save_flags(void);
-extern void kdb_restore_flags(void);
+extern unsigned int kdb_flags; /* Global flags, see kdb_state for per cpu state */
#define KDB_FLAG(flag) (kdb_flags & KDB_FLAG_##flag)
#define KDB_FLAG_SET(flag) ((void)(kdb_flags |= KDB_FLAG_##flag))
@@ -183,8 +194,8 @@ int kdb_process_cpu(const struct task_struct *p)
return cpu;
}
-/* kdb access to register set for stack dumping */
-extern struct pt_regs *kdb_current_regs;
+extern void kdb_send_sig(struct task_struct *p, int sig);
+
#ifdef CONFIG_KALLSYMS
extern const char *kdb_walk_kallsyms(loff_t *pos);
#else /* ! CONFIG_KALLSYMS */
@@ -195,19 +206,28 @@ static inline const char *kdb_walk_kallsyms(loff_t *pos)
#endif /* ! CONFIG_KALLSYMS */
/* Dynamic kdb shell command registration */
-extern int kdb_register(char *, kdb_func_t, char *, char *, short);
-extern int kdb_register_flags(char *, kdb_func_t, char *, char *,
- short, kdb_cmdflags_t);
-extern int kdb_unregister(char *);
+extern int kdb_register(kdbtab_t *cmd);
+extern void kdb_unregister(kdbtab_t *cmd);
+
+/* Return true when KDB as locked for printing a message on this CPU. */
+static inline
+bool kdb_printf_on_this_cpu(void)
+{
+ /*
+ * We can use raw_smp_processor_id() here because the task could
+ * not get migrated when KDB has locked for printing on this CPU.
+ */
+ return unlikely(READ_ONCE(kdb_printf_cpu) == raw_smp_processor_id());
+}
+
#else /* ! CONFIG_KGDB_KDB */
static inline __printf(1, 2) int kdb_printf(const char *fmt, ...) { return 0; }
static inline void kdb_init(int level) {}
-static inline int kdb_register(char *cmd, kdb_func_t func, char *usage,
- char *help, short minlen) { return 0; }
-static inline int kdb_register_flags(char *cmd, kdb_func_t func, char *usage,
- char *help, short minlen,
- kdb_cmdflags_t flags) { return 0; }
-static inline int kdb_unregister(char *cmd) { return 0; }
+static inline int kdb_register(kdbtab_t *cmd) { return 0; }
+static inline void kdb_unregister(kdbtab_t *cmd) {}
+
+static inline bool kdb_printf_on_this_cpu(void) { return false; }
+
#endif /* CONFIG_KGDB_KDB */
enum {
KDB_NOT_INITIALIZED,
@@ -217,5 +237,6 @@ enum {
extern int kdbgetintenv(const char *, int *);
extern int kdb_set(int, const char **);
+int kdb_lsmod(int argc, const char **argv);
#endif /* !_KDB_H */
diff --git a/include/linux/kdebug.h b/include/linux/kdebug.h
index ed815090b3bc..fd311565fabf 100644
--- a/include/linux/kdebug.h
+++ b/include/linux/kdebug.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_KDEBUG_H
#define _LINUX_KDEBUG_H
diff --git a/include/linux/kdev_t.h b/include/linux/kdev_t.h
index 8e9e288b08c1..4856706fbfeb 100644
--- a/include/linux/kdev_t.h
+++ b/include/linux/kdev_t.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_KDEV_T_H
#define _LINUX_KDEV_T_H
@@ -20,61 +21,61 @@
})
/* acceptable for old filesystems */
-static inline bool old_valid_dev(dev_t dev)
+static __always_inline bool old_valid_dev(dev_t dev)
{
return MAJOR(dev) < 256 && MINOR(dev) < 256;
}
-static inline u16 old_encode_dev(dev_t dev)
+static __always_inline u16 old_encode_dev(dev_t dev)
{
return (MAJOR(dev) << 8) | MINOR(dev);
}
-static inline dev_t old_decode_dev(u16 val)
+static __always_inline dev_t old_decode_dev(u16 val)
{
return MKDEV((val >> 8) & 255, val & 255);
}
-static inline u32 new_encode_dev(dev_t dev)
+static __always_inline u32 new_encode_dev(dev_t dev)
{
unsigned major = MAJOR(dev);
unsigned minor = MINOR(dev);
return (minor & 0xff) | (major << 8) | ((minor & ~0xff) << 12);
}
-static inline dev_t new_decode_dev(u32 dev)
+static __always_inline dev_t new_decode_dev(u32 dev)
{
unsigned major = (dev & 0xfff00) >> 8;
unsigned minor = (dev & 0xff) | ((dev >> 12) & 0xfff00);
return MKDEV(major, minor);
}
-static inline u64 huge_encode_dev(dev_t dev)
+static __always_inline u64 huge_encode_dev(dev_t dev)
{
return new_encode_dev(dev);
}
-static inline dev_t huge_decode_dev(u64 dev)
+static __always_inline dev_t huge_decode_dev(u64 dev)
{
return new_decode_dev(dev);
}
-static inline int sysv_valid_dev(dev_t dev)
+static __always_inline int sysv_valid_dev(dev_t dev)
{
return MAJOR(dev) < (1<<14) && MINOR(dev) < (1<<18);
}
-static inline u32 sysv_encode_dev(dev_t dev)
+static __always_inline u32 sysv_encode_dev(dev_t dev)
{
return MINOR(dev) | (MAJOR(dev) << 18);
}
-static inline unsigned sysv_major(u32 dev)
+static __always_inline unsigned sysv_major(u32 dev)
{
return (dev >> 18) & 0x3fff;
}
-static inline unsigned sysv_minor(u32 dev)
+static __always_inline unsigned sysv_minor(u32 dev)
{
return dev & 0x3ffff;
}
diff --git a/include/linux/kern_levels.h b/include/linux/kern_levels.h
index f282d4e87258..bf2389c26ae3 100644
--- a/include/linux/kern_levels.h
+++ b/include/linux/kern_levels.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __KERN_LEVELS_H__
#define __KERN_LEVELS_H__
@@ -13,7 +14,7 @@
#define KERN_INFO KERN_SOH "6" /* informational */
#define KERN_DEBUG KERN_SOH "7" /* debug-level messages */
-#define KERN_DEFAULT KERN_SOH "d" /* the default kernel loglevel */
+#define KERN_DEFAULT "" /* the default kernel loglevel */
/*
* Annotation for a "continued" line of log printout (only done after a
diff --git a/include/linux/kernel-page-flags.h b/include/linux/kernel-page-flags.h
index f65ce09784f1..196778a087c4 100644
--- a/include/linux/kernel-page-flags.h
+++ b/include/linux/kernel-page-flags.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef LINUX_KERNEL_PAGE_FLAGS_H
#define LINUX_KERNEL_PAGE_FLAGS_H
@@ -9,12 +10,13 @@
*/
#define KPF_RESERVED 32
#define KPF_MLOCKED 33
-#define KPF_MAPPEDTODISK 34
+#define KPF_OWNER_2 34
#define KPF_PRIVATE 35
#define KPF_PRIVATE_2 36
#define KPF_OWNER_PRIVATE 37
#define KPF_ARCH 38
-#define KPF_UNCACHED 39
#define KPF_SOFTDIRTY 40
+#define KPF_ARCH_2 41
+#define KPF_ARCH_3 42
#endif /* LINUX_KERNEL_PAGE_FLAGS_H */
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index 6607225d0ea4..5b46924fdff5 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -1,261 +1,147 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * NOTE:
+ *
+ * This header has combined a lot of unrelated to each other stuff.
+ * The process of splitting its content is in progress while keeping
+ * backward compatibility. That's why it's highly recommended NOT to
+ * include this header inside another header file, especially under
+ * generic or architectural include/ directory.
+ */
#ifndef _LINUX_KERNEL_H
#define _LINUX_KERNEL_H
-
-#include <stdarg.h>
+#include <linux/stdarg.h>
+#include <linux/align.h>
+#include <linux/array_size.h>
+#include <linux/limits.h>
#include <linux/linkage.h>
#include <linux/stddef.h>
#include <linux/types.h>
#include <linux/compiler.h>
+#include <linux/container_of.h>
#include <linux/bitops.h>
+#include <linux/hex.h>
+#include <linux/kstrtox.h>
#include <linux/log2.h>
+#include <linux/math.h>
+#include <linux/minmax.h>
#include <linux/typecheck.h>
+#include <linux/panic.h>
#include <linux/printk.h>
#include <linux/build_bug.h>
+#include <linux/sprintf.h>
+#include <linux/static_call_types.h>
+#include <linux/instruction_pointer.h>
+#include <linux/util_macros.h>
+#include <linux/wordpart.h>
+
#include <asm/byteorder.h>
-#include <uapi/linux/kernel.h>
-#define USHRT_MAX ((u16)(~0U))
-#define SHRT_MAX ((s16)(USHRT_MAX>>1))
-#define SHRT_MIN ((s16)(-SHRT_MAX - 1))
-#define INT_MAX ((int)(~0U>>1))
-#define INT_MIN (-INT_MAX - 1)
-#define UINT_MAX (~0U)
-#define LONG_MAX ((long)(~0UL>>1))
-#define LONG_MIN (-LONG_MAX - 1)
-#define ULONG_MAX (~0UL)
-#define LLONG_MAX ((long long)(~0ULL>>1))
-#define LLONG_MIN (-LLONG_MAX - 1)
-#define ULLONG_MAX (~0ULL)
-#define SIZE_MAX (~(size_t)0)
-
-#define U8_MAX ((u8)~0U)
-#define S8_MAX ((s8)(U8_MAX>>1))
-#define S8_MIN ((s8)(-S8_MAX - 1))
-#define U16_MAX ((u16)~0U)
-#define S16_MAX ((s16)(U16_MAX>>1))
-#define S16_MIN ((s16)(-S16_MAX - 1))
-#define U32_MAX ((u32)~0U)
-#define S32_MAX ((s32)(U32_MAX>>1))
-#define S32_MIN ((s32)(-S32_MAX - 1))
-#define U64_MAX ((u64)~0ULL)
-#define S64_MAX ((s64)(U64_MAX>>1))
-#define S64_MIN ((s64)(-S64_MAX - 1))
+#include <uapi/linux/kernel.h>
#define STACK_MAGIC 0xdeadbeef
-#define REPEAT_BYTE(x) ((~0ul / 0xff) * (x))
-
-/* @a is a power of 2 value */
-#define ALIGN(x, a) __ALIGN_KERNEL((x), (a))
-#define ALIGN_DOWN(x, a) __ALIGN_KERNEL((x) - ((a) - 1), (a))
-#define __ALIGN_MASK(x, mask) __ALIGN_KERNEL_MASK((x), (mask))
-#define PTR_ALIGN(p, a) ((typeof(p))ALIGN((unsigned long)(p), (a)))
-#define IS_ALIGNED(x, a) (((x) & ((typeof(x))(a) - 1)) == 0)
-
-/* generic data direction definitions */
-#define READ 0
-#define WRITE 1
-
-#define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0]) + __must_be_array(arr))
+struct completion;
+struct user;
-#define u64_to_user_ptr(x) ( \
-{ \
- typecheck(u64, x); \
- (void __user *)(uintptr_t)x; \
-} \
-)
+#ifdef CONFIG_PREEMPT_VOLUNTARY_BUILD
-/*
- * This looks more complex than it should be. But we need to
- * get the type for the ~ right in round_down (it needs to be
- * as wide as the result!), and we want to evaluate the macro
- * arguments just once each.
- */
-#define __round_mask(x, y) ((__typeof__(x))((y)-1))
-#define round_up(x, y) ((((x)-1) | __round_mask(x, y))+1)
-#define round_down(x, y) ((x) & ~__round_mask(x, y))
+extern int __cond_resched(void);
+# define might_resched() __cond_resched()
-#define FIELD_SIZEOF(t, f) (sizeof(((t*)0)->f))
-#define DIV_ROUND_UP __KERNEL_DIV_ROUND_UP
-#define DIV_ROUND_UP_ULL(ll,d) \
- ({ unsigned long long _tmp = (ll)+(d)-1; do_div(_tmp, d); _tmp; })
+#elif defined(CONFIG_PREEMPT_DYNAMIC) && defined(CONFIG_HAVE_PREEMPT_DYNAMIC_CALL)
-#if BITS_PER_LONG == 32
-# define DIV_ROUND_UP_SECTOR_T(ll,d) DIV_ROUND_UP_ULL(ll, d)
-#else
-# define DIV_ROUND_UP_SECTOR_T(ll,d) DIV_ROUND_UP(ll,d)
-#endif
+extern int __cond_resched(void);
-/* The `const' in roundup() prevents gcc-3.3 from calling __divdi3 */
-#define roundup(x, y) ( \
-{ \
- const typeof(y) __y = y; \
- (((x) + (__y - 1)) / __y) * __y; \
-} \
-)
-#define rounddown(x, y) ( \
-{ \
- typeof(x) __x = (x); \
- __x - (__x % (y)); \
-} \
-)
+DECLARE_STATIC_CALL(might_resched, __cond_resched);
-/*
- * Divide positive or negative dividend by positive or negative divisor
- * and round to closest integer. Result is undefined for negative
- * divisors if he dividend variable type is unsigned and for negative
- * dividends if the divisor variable type is unsigned.
- */
-#define DIV_ROUND_CLOSEST(x, divisor)( \
-{ \
- typeof(x) __x = x; \
- typeof(divisor) __d = divisor; \
- (((typeof(x))-1) > 0 || \
- ((typeof(divisor))-1) > 0 || \
- (((__x) > 0) == ((__d) > 0))) ? \
- (((__x) + ((__d) / 2)) / (__d)) : \
- (((__x) - ((__d) / 2)) / (__d)); \
-} \
-)
-/*
- * Same as above but for u64 dividends. divisor must be a 32-bit
- * number.
- */
-#define DIV_ROUND_CLOSEST_ULL(x, divisor)( \
-{ \
- typeof(divisor) __d = divisor; \
- unsigned long long _tmp = (x) + (__d) / 2; \
- do_div(_tmp, __d); \
- _tmp; \
-} \
-)
-
-/*
- * Multiplies an integer by a fraction, while avoiding unnecessary
- * overflow or loss of precision.
- */
-#define mult_frac(x, numer, denom)( \
-{ \
- typeof(x) quot = (x) / (denom); \
- typeof(x) rem = (x) % (denom); \
- (quot * (numer)) + ((rem * (numer)) / (denom)); \
-} \
-)
+static __always_inline void might_resched(void)
+{
+ static_call_mod(might_resched)();
+}
+#elif defined(CONFIG_PREEMPT_DYNAMIC) && defined(CONFIG_HAVE_PREEMPT_DYNAMIC_KEY)
-#define _RET_IP_ (unsigned long)__builtin_return_address(0)
-#define _THIS_IP_ ({ __label__ __here; __here: (unsigned long)&&__here; })
+extern int dynamic_might_resched(void);
+# define might_resched() dynamic_might_resched()
-#ifdef CONFIG_LBDAF
-# include <asm/div64.h>
-# define sector_div(a, b) do_div(a, b)
#else
-# define sector_div(n, b)( \
-{ \
- int _res; \
- _res = (n) % (b); \
- (n) /= (b); \
- _res; \
-} \
-)
-#endif
-
-/**
- * upper_32_bits - return bits 32-63 of a number
- * @n: the number we're accessing
- *
- * A basic shift-right of a 64- or 32-bit quantity. Use this to suppress
- * the "right shift count >= width of type" warning when that quantity is
- * 32-bits.
- */
-#define upper_32_bits(n) ((u32)(((n) >> 16) >> 16))
-/**
- * lower_32_bits - return bits 0-31 of a number
- * @n: the number we're accessing
- */
-#define lower_32_bits(n) ((u32)(n))
-
-struct completion;
-struct pt_regs;
-struct user;
-
-#ifdef CONFIG_PREEMPT_VOLUNTARY
-extern int _cond_resched(void);
-# define might_resched() _cond_resched()
-#else
# define might_resched() do { } while (0)
-#endif
+
+#endif /* CONFIG_PREEMPT_* */
#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
- void ___might_sleep(const char *file, int line, int preempt_offset);
- void __might_sleep(const char *file, int line, int preempt_offset);
+extern void __might_resched(const char *file, int line, unsigned int offsets);
+extern void __might_sleep(const char *file, int line);
+extern void __cant_sleep(const char *file, int line, int preempt_offset);
+extern void __cant_migrate(const char *file, int line);
+
/**
* might_sleep - annotation for functions that can sleep
*
* this macro will print a stack trace if it is executed in an atomic
- * context (spinlock, irq-handler, ...).
+ * context (spinlock, irq-handler, ...). Additional sections where blocking is
+ * not allowed can be annotated with non_block_start() and non_block_end()
+ * pairs.
*
* This is a useful debugging help to be able to catch problems early and not
* be bitten later when the calling function happens to sleep when it is not
* supposed to.
*/
# define might_sleep() \
- do { __might_sleep(__FILE__, __LINE__, 0); might_resched(); } while (0)
+ do { __might_sleep(__FILE__, __LINE__); might_resched(); } while (0)
+/**
+ * cant_sleep - annotation for functions that cannot sleep
+ *
+ * this macro will print a stack trace if it is executed with preemption enabled
+ */
+# define cant_sleep() \
+ do { __cant_sleep(__FILE__, __LINE__, 0); } while (0)
# define sched_annotate_sleep() (current->task_state_change = 0)
-#else
- static inline void ___might_sleep(const char *file, int line,
- int preempt_offset) { }
- static inline void __might_sleep(const char *file, int line,
- int preempt_offset) { }
-# define might_sleep() do { might_resched(); } while (0)
-# define sched_annotate_sleep() do { } while (0)
-#endif
-
-#define might_sleep_if(cond) do { if (cond) might_sleep(); } while (0)
/**
- * abs - return absolute value of an argument
- * @x: the value. If it is unsigned type, it is converted to signed type first.
- * char is treated as if it was signed (regardless of whether it really is)
- * but the macro's return type is preserved as char.
+ * cant_migrate - annotation for functions that cannot migrate
*
- * Return: an absolute value of x.
+ * Will print a stack trace if executed in code which is migratable
*/
-#define abs(x) __abs_choose_expr(x, long long, \
- __abs_choose_expr(x, long, \
- __abs_choose_expr(x, int, \
- __abs_choose_expr(x, short, \
- __abs_choose_expr(x, char, \
- __builtin_choose_expr( \
- __builtin_types_compatible_p(typeof(x), char), \
- (char)({ signed char __x = (x); __x<0?-__x:__x; }), \
- ((void)0)))))))
-
-#define __abs_choose_expr(x, type, other) __builtin_choose_expr( \
- __builtin_types_compatible_p(typeof(x), signed type) || \
- __builtin_types_compatible_p(typeof(x), unsigned type), \
- ({ signed type __x = (x); __x < 0 ? -__x : __x; }), other)
+# define cant_migrate() \
+ do { \
+ if (IS_ENABLED(CONFIG_SMP)) \
+ __cant_migrate(__FILE__, __LINE__); \
+ } while (0)
/**
- * reciprocal_scale - "scale" a value into range [0, ep_ro)
- * @val: value
- * @ep_ro: right open interval endpoint
+ * non_block_start - annotate the start of section where sleeping is prohibited
*
- * Perform a "reciprocal multiplication" in order to "scale" a value into
- * range [0, ep_ro), where the upper interval endpoint is right-open.
- * This is useful, e.g. for accessing a index of an array containing
- * ep_ro elements, for example. Think of it as sort of modulus, only that
- * the result isn't that of modulo. ;) Note that if initial input is a
- * small value, then result will return 0.
+ * This is on behalf of the oom reaper, specifically when it is calling the mmu
+ * notifiers. The problem is that if the notifier were to block on, for example,
+ * mutex_lock() and if the process which holds that mutex were to perform a
+ * sleeping memory allocation, the oom reaper is now blocked on completion of
+ * that memory allocation. Other blocking calls like wait_event() pose similar
+ * issues.
+ */
+# define non_block_start() (current->non_block_count++)
+/**
+ * non_block_end - annotate the end of section where sleeping is prohibited
*
- * Return: a result based on val in interval [0, ep_ro).
+ * Closes a section opened by non_block_start().
*/
-static inline u32 reciprocal_scale(u32 val, u32 ep_ro)
-{
- return (u32)(((u64) val * ep_ro) >> 32);
-}
+# define non_block_end() WARN_ON(current->non_block_count-- == 0)
+#else
+ static inline void __might_resched(const char *file, int line,
+ unsigned int offsets) { }
+static inline void __might_sleep(const char *file, int line) { }
+# define might_sleep() do { might_resched(); } while (0)
+# define cant_sleep() do { } while (0)
+# define cant_migrate() do { } while (0)
+# define sched_annotate_sleep() do { } while (0)
+# define non_block_start() do { } while (0)
+# define non_block_end() do { } while (0)
+#endif
+
+#define might_sleep_if(cond) do { if (cond) might_sleep(); } while (0)
#if defined(CONFIG_MMU) && \
(defined(CONFIG_PROVE_LOCKING) || defined(CONFIG_DEBUG_ATOMIC_SLEEP))
@@ -265,305 +151,46 @@ void __might_fault(const char *file, int line);
static inline void might_fault(void) { }
#endif
-extern struct atomic_notifier_head panic_notifier_list;
-extern long (*panic_blink)(int state);
-__printf(1, 2)
-void panic(const char *fmt, ...) __noreturn __cold;
-void nmi_panic(struct pt_regs *regs, const char *msg);
-extern void oops_enter(void);
-extern void oops_exit(void);
-void print_oops_end_marker(void);
-extern int oops_may_print(void);
void do_exit(long error_code) __noreturn;
-void complete_and_exit(struct completion *, long) __noreturn;
-
-#ifdef CONFIG_ARCH_HAS_REFCOUNT
-void refcount_error_report(struct pt_regs *regs, const char *err);
-#else
-static inline void refcount_error_report(struct pt_regs *regs, const char *err)
-{ }
-#endif
-
-/* Internal, do not use. */
-int __must_check _kstrtoul(const char *s, unsigned int base, unsigned long *res);
-int __must_check _kstrtol(const char *s, unsigned int base, long *res);
-
-int __must_check kstrtoull(const char *s, unsigned int base, unsigned long long *res);
-int __must_check kstrtoll(const char *s, unsigned int base, long long *res);
-
-/**
- * kstrtoul - convert a string to an unsigned long
- * @s: The start of the string. The string must be null-terminated, and may also
- * include a single newline before its terminating null. The first character
- * may also be a plus sign, but not a minus sign.
- * @base: The number base to use. The maximum supported base is 16. If base is
- * given as 0, then the base of the string is automatically detected with the
- * conventional semantics - If it begins with 0x the number will be parsed as a
- * hexadecimal (case insensitive), if it otherwise begins with 0, it will be
- * parsed as an octal number. Otherwise it will be parsed as a decimal.
- * @res: Where to write the result of the conversion on success.
- *
- * Returns 0 on success, -ERANGE on overflow and -EINVAL on parsing error.
- * Used as a replacement for the obsolete simple_strtoull. Return code must
- * be checked.
-*/
-static inline int __must_check kstrtoul(const char *s, unsigned int base, unsigned long *res)
-{
- /*
- * We want to shortcut function call, but
- * __builtin_types_compatible_p(unsigned long, unsigned long long) = 0.
- */
- if (sizeof(unsigned long) == sizeof(unsigned long long) &&
- __alignof__(unsigned long) == __alignof__(unsigned long long))
- return kstrtoull(s, base, (unsigned long long *)res);
- else
- return _kstrtoul(s, base, res);
-}
-
-/**
- * kstrtol - convert a string to a long
- * @s: The start of the string. The string must be null-terminated, and may also
- * include a single newline before its terminating null. The first character
- * may also be a plus sign or a minus sign.
- * @base: The number base to use. The maximum supported base is 16. If base is
- * given as 0, then the base of the string is automatically detected with the
- * conventional semantics - If it begins with 0x the number will be parsed as a
- * hexadecimal (case insensitive), if it otherwise begins with 0, it will be
- * parsed as an octal number. Otherwise it will be parsed as a decimal.
- * @res: Where to write the result of the conversion on success.
- *
- * Returns 0 on success, -ERANGE on overflow and -EINVAL on parsing error.
- * Used as a replacement for the obsolete simple_strtoull. Return code must
- * be checked.
- */
-static inline int __must_check kstrtol(const char *s, unsigned int base, long *res)
-{
- /*
- * We want to shortcut function call, but
- * __builtin_types_compatible_p(long, long long) = 0.
- */
- if (sizeof(long) == sizeof(long long) &&
- __alignof__(long) == __alignof__(long long))
- return kstrtoll(s, base, (long long *)res);
- else
- return _kstrtol(s, base, res);
-}
-
-int __must_check kstrtouint(const char *s, unsigned int base, unsigned int *res);
-int __must_check kstrtoint(const char *s, unsigned int base, int *res);
-
-static inline int __must_check kstrtou64(const char *s, unsigned int base, u64 *res)
-{
- return kstrtoull(s, base, res);
-}
-
-static inline int __must_check kstrtos64(const char *s, unsigned int base, s64 *res)
-{
- return kstrtoll(s, base, res);
-}
-
-static inline int __must_check kstrtou32(const char *s, unsigned int base, u32 *res)
-{
- return kstrtouint(s, base, res);
-}
-
-static inline int __must_check kstrtos32(const char *s, unsigned int base, s32 *res)
-{
- return kstrtoint(s, base, res);
-}
-
-int __must_check kstrtou16(const char *s, unsigned int base, u16 *res);
-int __must_check kstrtos16(const char *s, unsigned int base, s16 *res);
-int __must_check kstrtou8(const char *s, unsigned int base, u8 *res);
-int __must_check kstrtos8(const char *s, unsigned int base, s8 *res);
-int __must_check kstrtobool(const char *s, bool *res);
-
-int __must_check kstrtoull_from_user(const char __user *s, size_t count, unsigned int base, unsigned long long *res);
-int __must_check kstrtoll_from_user(const char __user *s, size_t count, unsigned int base, long long *res);
-int __must_check kstrtoul_from_user(const char __user *s, size_t count, unsigned int base, unsigned long *res);
-int __must_check kstrtol_from_user(const char __user *s, size_t count, unsigned int base, long *res);
-int __must_check kstrtouint_from_user(const char __user *s, size_t count, unsigned int base, unsigned int *res);
-int __must_check kstrtoint_from_user(const char __user *s, size_t count, unsigned int base, int *res);
-int __must_check kstrtou16_from_user(const char __user *s, size_t count, unsigned int base, u16 *res);
-int __must_check kstrtos16_from_user(const char __user *s, size_t count, unsigned int base, s16 *res);
-int __must_check kstrtou8_from_user(const char __user *s, size_t count, unsigned int base, u8 *res);
-int __must_check kstrtos8_from_user(const char __user *s, size_t count, unsigned int base, s8 *res);
-int __must_check kstrtobool_from_user(const char __user *s, size_t count, bool *res);
-
-static inline int __must_check kstrtou64_from_user(const char __user *s, size_t count, unsigned int base, u64 *res)
-{
- return kstrtoull_from_user(s, count, base, res);
-}
-
-static inline int __must_check kstrtos64_from_user(const char __user *s, size_t count, unsigned int base, s64 *res)
-{
- return kstrtoll_from_user(s, count, base, res);
-}
-
-static inline int __must_check kstrtou32_from_user(const char __user *s, size_t count, unsigned int base, u32 *res)
-{
- return kstrtouint_from_user(s, count, base, res);
-}
-
-static inline int __must_check kstrtos32_from_user(const char __user *s, size_t count, unsigned int base, s32 *res)
-{
- return kstrtoint_from_user(s, count, base, res);
-}
-
-/* Obsolete, do not use. Use kstrto<foo> instead */
-
-extern unsigned long simple_strtoul(const char *,char **,unsigned int);
-extern long simple_strtol(const char *,char **,unsigned int);
-extern unsigned long long simple_strtoull(const char *,char **,unsigned int);
-extern long long simple_strtoll(const char *,char **,unsigned int);
-
-extern int num_to_str(char *buf, int size, unsigned long long num);
-
-/* lib/printf utilities */
-
-extern __printf(2, 3) int sprintf(char *buf, const char * fmt, ...);
-extern __printf(2, 0) int vsprintf(char *buf, const char *, va_list);
-extern __printf(3, 4)
-int snprintf(char *buf, size_t size, const char *fmt, ...);
-extern __printf(3, 0)
-int vsnprintf(char *buf, size_t size, const char *fmt, va_list args);
-extern __printf(3, 4)
-int scnprintf(char *buf, size_t size, const char *fmt, ...);
-extern __printf(3, 0)
-int vscnprintf(char *buf, size_t size, const char *fmt, va_list args);
-extern __printf(2, 3) __malloc
-char *kasprintf(gfp_t gfp, const char *fmt, ...);
-extern __printf(2, 0) __malloc
-char *kvasprintf(gfp_t gfp, const char *fmt, va_list args);
-extern __printf(2, 0)
-const char *kvasprintf_const(gfp_t gfp, const char *fmt, va_list args);
-
-extern __scanf(2, 3)
-int sscanf(const char *, const char *, ...);
-extern __scanf(2, 0)
-int vsscanf(const char *, const char *, va_list);
-
-extern int get_option(char **str, int *pint);
-extern char *get_options(const char *str, int nints, int *ints);
-extern unsigned long long memparse(const char *ptr, char **retptr);
-extern bool parse_option_str(const char *str, const char *option);
-extern char *next_arg(char *args, char **param, char **val);
extern int core_kernel_text(unsigned long addr);
-extern int core_kernel_data(unsigned long addr);
extern int __kernel_text_address(unsigned long addr);
extern int kernel_text_address(unsigned long addr);
extern int func_ptr_is_kernel_text(void *ptr);
-unsigned long int_sqrt(unsigned long);
-
extern void bust_spinlocks(int yes);
-extern int oops_in_progress; /* If set, an oops, panic(), BUG() or die() is in progress */
-extern int panic_timeout;
-extern int panic_on_oops;
-extern int panic_on_unrecovered_nmi;
-extern int panic_on_io_nmi;
-extern int panic_on_warn;
-extern int sysctl_panic_on_rcu_stall;
-extern int sysctl_panic_on_stackoverflow;
-
-extern bool crash_kexec_post_notifiers;
-/*
- * panic_cpu is used for synchronizing panic() and crash_kexec() execution. It
- * holds a CPU number which is executing panic() currently. A value of
- * PANIC_CPU_INVALID means no CPU has entered panic() or crash_kexec().
- */
-extern atomic_t panic_cpu;
-#define PANIC_CPU_INVALID -1
-
-/*
- * Only to be used by arch init code. If the user over-wrote the default
- * CONFIG_PANIC_TIMEOUT, honor it.
- */
-static inline void set_arch_panic_timeout(int timeout, int arch_default_timeout)
-{
- if (panic_timeout == arch_default_timeout)
- panic_timeout = timeout;
-}
-extern const char *print_tainted(void);
-enum lockdep_ok {
- LOCKDEP_STILL_OK,
- LOCKDEP_NOW_UNRELIABLE
-};
-extern void add_taint(unsigned flag, enum lockdep_ok);
-extern int test_taint(unsigned flag);
-extern unsigned long get_taint(void);
extern int root_mountflags;
extern bool early_boot_irqs_disabled;
-/*
- * Values used for system_state. Ordering of the states must not be changed
+/**
+ * enum system_states - Values used for system_state.
+ *
+ * @SYSTEM_BOOTING: %0, no init needed
+ * @SYSTEM_SCHEDULING: system is ready for scheduling; OK to use RCU
+ * @SYSTEM_FREEING_INITMEM: system is freeing all of initmem; almost running
+ * @SYSTEM_RUNNING: system is up and running
+ * @SYSTEM_HALT: system entered clean system halt state
+ * @SYSTEM_POWER_OFF: system entered shutdown/clean power off state
+ * @SYSTEM_RESTART: system entered emergency power off or normal restart
+ * @SYSTEM_SUSPEND: system entered suspend or hibernate state
+ *
+ * Note:
+ * Ordering of the states must not be changed
* as code checks for <, <=, >, >= STATE.
*/
-extern enum system_states {
+enum system_states {
SYSTEM_BOOTING,
SYSTEM_SCHEDULING,
+ SYSTEM_FREEING_INITMEM,
SYSTEM_RUNNING,
SYSTEM_HALT,
SYSTEM_POWER_OFF,
SYSTEM_RESTART,
-} system_state;
-
-#define TAINT_PROPRIETARY_MODULE 0
-#define TAINT_FORCED_MODULE 1
-#define TAINT_CPU_OUT_OF_SPEC 2
-#define TAINT_FORCED_RMMOD 3
-#define TAINT_MACHINE_CHECK 4
-#define TAINT_BAD_PAGE 5
-#define TAINT_USER 6
-#define TAINT_DIE 7
-#define TAINT_OVERRIDDEN_ACPI_TABLE 8
-#define TAINT_WARN 9
-#define TAINT_CRAP 10
-#define TAINT_FIRMWARE_WORKAROUND 11
-#define TAINT_OOT_MODULE 12
-#define TAINT_UNSIGNED_MODULE 13
-#define TAINT_SOFTLOCKUP 14
-#define TAINT_LIVEPATCH 15
-#define TAINT_FLAGS_COUNT 16
-
-struct taint_flag {
- char c_true; /* character printed when tainted */
- char c_false; /* character printed when not tainted */
- bool module; /* also show as a per-module taint flag */
+ SYSTEM_SUSPEND,
};
-
-extern const struct taint_flag taint_flags[TAINT_FLAGS_COUNT];
-
-extern const char hex_asc[];
-#define hex_asc_lo(x) hex_asc[((x) & 0x0f)]
-#define hex_asc_hi(x) hex_asc[((x) & 0xf0) >> 4]
-
-static inline char *hex_byte_pack(char *buf, u8 byte)
-{
- *buf++ = hex_asc_hi(byte);
- *buf++ = hex_asc_lo(byte);
- return buf;
-}
-
-extern const char hex_asc_upper[];
-#define hex_asc_upper_lo(x) hex_asc_upper[((x) & 0x0f)]
-#define hex_asc_upper_hi(x) hex_asc_upper[((x) & 0xf0) >> 4]
-
-static inline char *hex_byte_pack_upper(char *buf, u8 byte)
-{
- *buf++ = hex_asc_upper_hi(byte);
- *buf++ = hex_asc_upper_lo(byte);
- return buf;
-}
-
-extern int hex_to_bin(char ch);
-extern int __must_check hex2bin(u8 *dst, const char *src, size_t count);
-extern char *bin2hex(char *dst, const void *src, size_t count);
-
-bool mac_pton(const char *s, u8 *mac);
+extern enum system_states system_state;
/*
* General tracing related utility functions - trace_printk(),
@@ -571,7 +198,7 @@ bool mac_pton(const char *s, u8 *mac);
*
* Use tracing_on/tracing_off when you want to quickly turn on or off
* tracing. It simply enables or disables the recording of the trace events.
- * This also corresponds to the user space /sys/kernel/debug/tracing/tracing_on
+ * This also corresponds to the user space /sys/kernel/tracing/tracing_on
* file, which gives a means for the kernel and userspace to interact.
* Place a tracing_off() in the kernel where you want tracing to end.
* From user space, examine the trace, and then echo 1 > tracing_on
@@ -589,6 +216,7 @@ enum ftrace_dump_mode {
DUMP_NONE,
DUMP_ALL,
DUMP_ORIG,
+ DUMP_PARAM,
};
#ifdef CONFIG_TRACING
@@ -615,8 +243,8 @@ do { \
* trace_printk - printf formatting in the ftrace buffer
* @fmt: the printf format for printing
*
- * Note: __trace_printk is an internal function for trace_printk and
- * the @ip is passed in via the trace_printk macro.
+ * Note: __trace_printk is an internal function for trace_printk() and
+ * the @ip is passed in via the trace_printk() macro.
*
* This function allows a kernel developer to debug fast path sections
* that printk is not appropriate for. By scattering in various
@@ -626,9 +254,9 @@ do { \
* This is intended as a debugging tool for the developer only.
* Please refrain from leaving trace_printks scattered around in
* your code. (Extra memory is used for special buffers that are
- * allocated when trace_printk() is used)
+ * allocated when trace_printk() is used.)
*
- * A little optization trick is done here. If there's only one
+ * A little optimization trick is done here. If there's only one
* argument, there's no need to scan the string for printf formats.
* The trace_puts() will suffice. But how can we take advantage of
* using trace_puts() when trace_printk() has only one argument?
@@ -653,7 +281,7 @@ do { \
#define do_trace_printk(fmt, args...) \
do { \
static const char *trace_printk_fmt __used \
- __attribute__((section("__trace_printk_fmt"))) = \
+ __section("__trace_printk_fmt") = \
__builtin_constant_p(fmt) ? fmt : NULL; \
\
__trace_printk_check_format(fmt, ##args); \
@@ -678,7 +306,7 @@ int __trace_printk(unsigned long ip, const char *fmt, ...);
* the @ip is passed in via the trace_puts macro.
*
* This is similar to trace_printk() but is made for those really fast
- * paths that a developer wants the least amount of "Heisenbug" affects,
+ * paths that a developer wants the least amount of "Heisenbug" effects,
* where the processing of the print format is still too much.
*
* This function allows a kernel developer to debug fast path sections
@@ -689,7 +317,7 @@ int __trace_printk(unsigned long ip, const char *fmt, ...);
* This is intended as a debugging tool for the developer only.
* Please refrain from leaving trace_puts scattered around in
* your code. (Extra memory is used for special buffers that are
- * allocated when trace_puts() is used)
+ * allocated when trace_puts() is used.)
*
* Returns: 0 if nothing was written, positive # if string was.
* (1 when __trace_bputs is used, strlen(str) when __trace_puts is used)
@@ -697,7 +325,7 @@ int __trace_printk(unsigned long ip, const char *fmt, ...);
#define trace_puts(str) ({ \
static const char *trace_printk_fmt __used \
- __attribute__((section("__trace_printk_fmt"))) = \
+ __section("__trace_printk_fmt") = \
__builtin_constant_p(str) ? str : NULL; \
\
if (__builtin_constant_p(str)) \
@@ -719,7 +347,7 @@ extern void trace_dump_stack(int skip);
do { \
if (__builtin_constant_p(fmt)) { \
static const char *trace_printk_fmt __used \
- __attribute__((section("__trace_printk_fmt"))) = \
+ __section("__trace_printk_fmt") = \
__builtin_constant_p(fmt) ? fmt : NULL; \
\
__ftrace_vbprintk(_THIS_IP_, trace_printk_fmt, vargs); \
@@ -758,120 +386,9 @@ ftrace_vprintk(const char *fmt, va_list ap)
static inline void ftrace_dump(enum ftrace_dump_mode oops_dump_mode) { }
#endif /* CONFIG_TRACING */
-/*
- * min()/max()/clamp() macros that also do
- * strict type-checking.. See the
- * "unnecessary" pointer comparison.
- */
-#define __min(t1, t2, min1, min2, x, y) ({ \
- t1 min1 = (x); \
- t2 min2 = (y); \
- (void) (&min1 == &min2); \
- min1 < min2 ? min1 : min2; })
-#define min(x, y) \
- __min(typeof(x), typeof(y), \
- __UNIQUE_ID(min1_), __UNIQUE_ID(min2_), \
- x, y)
-
-#define __max(t1, t2, max1, max2, x, y) ({ \
- t1 max1 = (x); \
- t2 max2 = (y); \
- (void) (&max1 == &max2); \
- max1 > max2 ? max1 : max2; })
-#define max(x, y) \
- __max(typeof(x), typeof(y), \
- __UNIQUE_ID(max1_), __UNIQUE_ID(max2_), \
- x, y)
-
-#define min3(x, y, z) min((typeof(x))min(x, y), z)
-#define max3(x, y, z) max((typeof(x))max(x, y), z)
-
-/**
- * min_not_zero - return the minimum that is _not_ zero, unless both are zero
- * @x: value1
- * @y: value2
- */
-#define min_not_zero(x, y) ({ \
- typeof(x) __x = (x); \
- typeof(y) __y = (y); \
- __x == 0 ? __y : ((__y == 0) ? __x : min(__x, __y)); })
-
-/**
- * clamp - return a value clamped to a given range with strict typechecking
- * @val: current value
- * @lo: lowest allowable value
- * @hi: highest allowable value
- *
- * This macro does strict typechecking of lo/hi to make sure they are of the
- * same type as val. See the unnecessary pointer comparisons.
- */
-#define clamp(val, lo, hi) min((typeof(val))max(val, lo), hi)
-
-/*
- * ..and if you can't take the strict
- * types, you can specify one yourself.
- *
- * Or not use min/max/clamp at all, of course.
- */
-#define min_t(type, x, y) \
- __min(type, type, \
- __UNIQUE_ID(min1_), __UNIQUE_ID(min2_), \
- x, y)
-
-#define max_t(type, x, y) \
- __max(type, type, \
- __UNIQUE_ID(min1_), __UNIQUE_ID(min2_), \
- x, y)
-
-/**
- * clamp_t - return a value clamped to a given range using a given type
- * @type: the type of variable to use
- * @val: current value
- * @lo: minimum allowable value
- * @hi: maximum allowable value
- *
- * This macro does no typechecking and uses temporary variables of type
- * 'type' to make all the comparisons.
- */
-#define clamp_t(type, val, lo, hi) min_t(type, max_t(type, val, lo), hi)
-
-/**
- * clamp_val - return a value clamped to a given range using val's type
- * @val: current value
- * @lo: minimum allowable value
- * @hi: maximum allowable value
- *
- * This macro does no typechecking and uses temporary variables of whatever
- * type the input argument 'val' is. This is useful when val is an unsigned
- * type and min and max are literals that will otherwise be assigned a signed
- * integer type.
- */
-#define clamp_val(val, lo, hi) clamp_t(typeof(val), val, lo, hi)
-
-
-/*
- * swap - swap value of @a and @b
- */
-#define swap(a, b) \
- do { typeof(a) __tmp = (a); (a) = (b); (b) = __tmp; } while (0)
-
-/**
- * container_of - cast a member of a structure out to the containing structure
- * @ptr: the pointer to the member.
- * @type: the type of the container struct this is embedded in.
- * @member: the name of the member within the struct.
- *
- */
-#define container_of(ptr, type, member) ({ \
- void *__mptr = (void *)(ptr); \
- BUILD_BUG_ON_MSG(!__same_type(*(ptr), ((type *)0)->member) && \
- !__same_type(*(ptr), void), \
- "pointer type mismatch in container_of()"); \
- ((type *)(__mptr - offsetof(type, member))); })
-
-/* Rebuild everything on CONFIG_FTRACE_MCOUNT_RECORD */
-#ifdef CONFIG_FTRACE_MCOUNT_RECORD
-# define REBUILD_DUE_TO_FTRACE_MCOUNT_RECORD
+/* Rebuild everything on CONFIG_DYNAMIC_FTRACE */
+#ifdef CONFIG_DYNAMIC_FTRACE
+# define REBUILD_DUE_TO_DYNAMIC_FTRACE
#endif
/* Permissions on a sysfs file: you didn't miss the 0 prefix did you? */
diff --git a/include/linux/kernel_read_file.h b/include/linux/kernel_read_file.h
new file mode 100644
index 000000000000..d613a7b4dd35
--- /dev/null
+++ b/include/linux/kernel_read_file.h
@@ -0,0 +1,56 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_KERNEL_READ_FILE_H
+#define _LINUX_KERNEL_READ_FILE_H
+
+#include <linux/file.h>
+#include <linux/types.h>
+
+/* This is a list of *what* is being read, not *how* nor *where*. */
+#define __kernel_read_file_id(id) \
+ id(UNKNOWN, unknown) \
+ id(FIRMWARE, firmware) \
+ id(MODULE, kernel-module) \
+ id(KEXEC_IMAGE, kexec-image) \
+ id(KEXEC_INITRAMFS, kexec-initramfs) \
+ id(POLICY, security-policy) \
+ id(X509_CERTIFICATE, x509-certificate) \
+ id(MODULE_COMPRESSED, kernel-module-compressed) \
+ id(MAX_ID, )
+
+#define __fid_enumify(ENUM, dummy) READING_ ## ENUM,
+#define __fid_stringify(dummy, str) #str,
+
+enum kernel_read_file_id {
+ __kernel_read_file_id(__fid_enumify)
+};
+
+static const char * const kernel_read_file_str[] = {
+ __kernel_read_file_id(__fid_stringify)
+};
+
+static inline const char *kernel_read_file_id_str(enum kernel_read_file_id id)
+{
+ if ((unsigned int)id >= READING_MAX_ID)
+ return kernel_read_file_str[READING_UNKNOWN];
+
+ return kernel_read_file_str[id];
+}
+
+ssize_t kernel_read_file(struct file *file, loff_t offset,
+ void **buf, size_t buf_size,
+ size_t *file_size,
+ enum kernel_read_file_id id);
+ssize_t kernel_read_file_from_path(const char *path, loff_t offset,
+ void **buf, size_t buf_size,
+ size_t *file_size,
+ enum kernel_read_file_id id);
+ssize_t kernel_read_file_from_path_initns(const char *path, loff_t offset,
+ void **buf, size_t buf_size,
+ size_t *file_size,
+ enum kernel_read_file_id id);
+ssize_t kernel_read_file_from_fd(int fd, loff_t offset,
+ void **buf, size_t buf_size,
+ size_t *file_size,
+ enum kernel_read_file_id id);
+
+#endif /* _LINUX_KERNEL_READ_FILE_H */
diff --git a/include/linux/kernel_stat.h b/include/linux/kernel_stat.h
index 66be8b6beceb..b97ce2df376f 100644
--- a/include/linux/kernel_stat.h
+++ b/include/linux/kernel_stat.h
@@ -1,10 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_KERNEL_STAT_H
#define _LINUX_KERNEL_STAT_H
#include <linux/smp.h>
#include <linux/threads.h>
#include <linux/percpu.h>
-#include <linux/cpumask.h>
#include <linux/interrupt.h>
#include <linux/sched.h>
#include <linux/vtime.h>
@@ -27,6 +27,9 @@ enum cpu_usage_stat {
CPUTIME_STEAL,
CPUTIME_GUEST,
CPUTIME_GUEST_NICE,
+#ifdef CONFIG_SCHED_CORE
+ CPUTIME_FORCEIDLE,
+#endif
NR_STATS,
};
@@ -48,6 +51,7 @@ DECLARE_PER_CPU(struct kernel_cpustat, kernel_cpustat);
#define kstat_cpu(cpu) per_cpu(kstat, cpu)
#define kcpustat_cpu(cpu) per_cpu(kernel_cpustat, cpu)
+extern unsigned long long nr_context_switches_cpu(int cpu);
extern unsigned long long nr_context_switches(void);
extern unsigned int kstat_irqs_cpu(unsigned int irq, int cpu);
@@ -63,20 +67,56 @@ static inline unsigned int kstat_softirqs_cpu(unsigned int irq, int cpu)
return kstat_cpu(cpu).softirqs[irq];
}
+static inline unsigned int kstat_cpu_softirqs_sum(int cpu)
+{
+ int i;
+ unsigned int sum = 0;
+
+ for (i = 0; i < NR_SOFTIRQS; i++)
+ sum += kstat_softirqs_cpu(i, cpu);
+
+ return sum;
+}
+
+#ifdef CONFIG_GENERIC_IRQ_STAT_SNAPSHOT
+extern void kstat_snapshot_irqs(void);
+extern unsigned int kstat_get_irq_since_snapshot(unsigned int irq);
+#else
+static inline void kstat_snapshot_irqs(void) { }
+static inline unsigned int kstat_get_irq_since_snapshot(unsigned int irq) { return 0; }
+#endif
+
/*
* Number of interrupts per specific IRQ source, since bootup
*/
-extern unsigned int kstat_irqs(unsigned int irq);
extern unsigned int kstat_irqs_usr(unsigned int irq);
/*
* Number of interrupts per cpu, since bootup
*/
-static inline unsigned int kstat_cpu_irqs_sum(unsigned int cpu)
+static inline unsigned long kstat_cpu_irqs_sum(unsigned int cpu)
{
return kstat_cpu(cpu).irqs_sum;
}
+#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
+extern u64 kcpustat_field(struct kernel_cpustat *kcpustat,
+ enum cpu_usage_stat usage, int cpu);
+extern void kcpustat_cpu_fetch(struct kernel_cpustat *dst, int cpu);
+#else
+static inline u64 kcpustat_field(struct kernel_cpustat *kcpustat,
+ enum cpu_usage_stat usage, int cpu)
+{
+ return kcpustat->cpustat[usage];
+}
+
+static inline void kcpustat_cpu_fetch(struct kernel_cpustat *dst, int cpu)
+{
+ *dst = kcpustat_cpu(cpu);
+}
+
+#endif
+
extern void account_user_time(struct task_struct *, u64);
extern void account_guest_time(struct task_struct *, u64);
extern void account_system_time(struct task_struct *, int, u64);
@@ -84,6 +124,7 @@ extern void account_system_index_time(struct task_struct *, u64,
enum cpu_usage_stat);
extern void account_steal_time(u64);
extern void account_idle_time(u64);
+extern u64 get_idle_time(struct kernel_cpustat *kcs, int cpu);
#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
static inline void account_process_tick(struct task_struct *tsk, int user)
@@ -96,4 +137,8 @@ extern void account_process_tick(struct task_struct *, int user);
extern void account_idle_ticks(unsigned long ticks);
+#ifdef CONFIG_SCHED_CORE
+extern void __account_forceidle_time(struct task_struct *tsk, u64 delta);
+#endif
+
#endif /* _LINUX_KERNEL_STAT_H */
diff --git a/include/linux/kernelcapi.h b/include/linux/kernelcapi.h
index e985ba679c4a..94ba42bf9da1 100644
--- a/include/linux/kernelcapi.h
+++ b/include/linux/kernelcapi.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* $Id: kernelcapi.h,v 1.8.6.2 2001/02/07 11:31:31 kai Exp $
*
@@ -9,46 +10,12 @@
#ifndef __KERNELCAPI_H__
#define __KERNELCAPI_H__
-
#include <linux/list.h>
#include <linux/skbuff.h>
#include <linux/workqueue.h>
#include <linux/notifier.h>
#include <uapi/linux/kernelcapi.h>
-struct capi20_appl {
- u16 applid;
- capi_register_params rparam;
- void (*recv_message)(struct capi20_appl *ap, struct sk_buff *skb);
- void *private;
-
- /* internal to kernelcapi.o */
- unsigned long nrecvctlpkt;
- unsigned long nrecvdatapkt;
- unsigned long nsentctlpkt;
- unsigned long nsentdatapkt;
- struct mutex recv_mtx;
- struct sk_buff_head recv_queue;
- struct work_struct recv_work;
- int release_in_progress;
-};
-
-u16 capi20_isinstalled(void);
-u16 capi20_register(struct capi20_appl *ap);
-u16 capi20_release(struct capi20_appl *ap);
-u16 capi20_put_message(struct capi20_appl *ap, struct sk_buff *skb);
-u16 capi20_get_manufacturer(u32 contr, u8 buf[CAPI_MANUFACTURER_LEN]);
-u16 capi20_get_version(u32 contr, struct capi_version *verp);
-u16 capi20_get_serial(u32 contr, u8 serial[CAPI_SERIAL_LEN]);
-u16 capi20_get_profile(u32 contr, struct capi_profile *profp);
-int capi20_manufacturer(unsigned long cmd, void __user *data);
-
-#define CAPICTR_UP 0
-#define CAPICTR_DOWN 1
-
-int register_capictr_notifier(struct notifier_block *nb);
-int unregister_capictr_notifier(struct notifier_block *nb);
-
#define CAPI_NOERROR 0x0000
#define CAPI_TOOMANYAPPLS 0x1001
@@ -75,45 +42,4 @@ int unregister_capictr_notifier(struct notifier_block *nb);
#define CAPI_MSGCTRLERNOTSUPPORTEXTEQUIP 0x110a
#define CAPI_MSGCTRLERONLYSUPPORTEXTEQUIP 0x110b
-typedef enum {
- CapiMessageNotSupportedInCurrentState = 0x2001,
- CapiIllContrPlciNcci = 0x2002,
- CapiNoPlciAvailable = 0x2003,
- CapiNoNcciAvailable = 0x2004,
- CapiNoListenResourcesAvailable = 0x2005,
- CapiNoFaxResourcesAvailable = 0x2006,
- CapiIllMessageParmCoding = 0x2007,
-} RESOURCE_CODING_PROBLEM;
-
-typedef enum {
- CapiB1ProtocolNotSupported = 0x3001,
- CapiB2ProtocolNotSupported = 0x3002,
- CapiB3ProtocolNotSupported = 0x3003,
- CapiB1ProtocolParameterNotSupported = 0x3004,
- CapiB2ProtocolParameterNotSupported = 0x3005,
- CapiB3ProtocolParameterNotSupported = 0x3006,
- CapiBProtocolCombinationNotSupported = 0x3007,
- CapiNcpiNotSupported = 0x3008,
- CapiCipValueUnknown = 0x3009,
- CapiFlagsNotSupported = 0x300a,
- CapiFacilityNotSupported = 0x300b,
- CapiDataLengthNotSupportedByCurrentProtocol = 0x300c,
- CapiResetProcedureNotSupportedByCurrentProtocol = 0x300d,
- CapiTeiAssignmentFailed = 0x300e,
-} REQUESTED_SERVICES_PROBLEM;
-
-typedef enum {
- CapiSuccess = 0x0000,
- CapiSupplementaryServiceNotSupported = 0x300e,
- CapiRequestNotAllowedInThisState = 0x3010,
-} SUPPLEMENTARY_SERVICE_INFO;
-
-typedef enum {
- CapiProtocolErrorLayer1 = 0x3301,
- CapiProtocolErrorLayer2 = 0x3302,
- CapiProtocolErrorLayer3 = 0x3303,
- CapiTimeOut = 0x3303, // SuppServiceReason
- CapiCallGivenToOtherApplication = 0x3304,
-} CAPI_REASON;
-
#endif /* __KERNELCAPI_H__ */
diff --git a/include/linux/kernfs.h b/include/linux/kernfs.h
index a9b11b8d06f2..b5a5f32fdfd1 100644
--- a/include/linux/kernfs.h
+++ b/include/linux/kernfs.h
@@ -1,13 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* kernfs.h - pseudo filesystem decoupled from vfs locking
- *
- * This file is released under the GPLv2.
*/
#ifndef __LINUX_KERNFS_H
#define __LINUX_KERNFS_H
-#include <linux/kernel.h>
#include <linux/err.h>
#include <linux/list.h>
#include <linux/mutex.h>
@@ -15,27 +13,94 @@
#include <linux/lockdep.h>
#include <linux/rbtree.h>
#include <linux/atomic.h>
+#include <linux/bug.h>
+#include <linux/types.h>
+#include <linux/uidgid.h>
#include <linux/wait.h>
+#include <linux/rwsem.h>
+#include <linux/cache.h>
struct file;
struct dentry;
struct iattr;
struct seq_file;
struct vm_area_struct;
+struct vm_operations_struct;
struct super_block;
struct file_system_type;
+struct poll_table_struct;
+struct fs_context;
+struct kernfs_fs_context;
struct kernfs_open_node;
struct kernfs_iattrs;
+/*
+ * NR_KERNFS_LOCK_BITS determines size (NR_KERNFS_LOCKS) of hash
+ * table of locks.
+ * Having a small hash table would impact scalability, since
+ * more and more kernfs_node objects will end up using same lock
+ * and having a very large hash table would waste memory.
+ *
+ * At the moment size of hash table of locks is being set based on
+ * the number of CPUs as follows:
+ *
+ * NR_CPU NR_KERNFS_LOCK_BITS NR_KERNFS_LOCKS
+ * 1 1 2
+ * 2-3 2 4
+ * 4-7 4 16
+ * 8-15 6 64
+ * 16-31 8 256
+ * 32 and more 10 1024
+ *
+ * The above relation between NR_CPU and number of locks is based
+ * on some internal experimentation which involved booting qemu
+ * with different values of smp, performing some sysfs operations
+ * on all CPUs and observing how increase in number of locks impacts
+ * completion time of these sysfs operations on each CPU.
+ */
+#ifdef CONFIG_SMP
+#define NR_KERNFS_LOCK_BITS (2 * (ilog2(NR_CPUS < 32 ? NR_CPUS : 32)))
+#else
+#define NR_KERNFS_LOCK_BITS 1
+#endif
+
+#define NR_KERNFS_LOCKS (1 << NR_KERNFS_LOCK_BITS)
+
+/*
+ * There's one kernfs_open_file for each open file and one kernfs_open_node
+ * for each kernfs_node with one or more open files.
+ *
+ * filp->private_data points to seq_file whose ->private points to
+ * kernfs_open_file.
+ *
+ * kernfs_open_files are chained at kernfs_open_node->files, which is
+ * protected by kernfs_global_locks.open_file_mutex[i].
+ *
+ * To reduce possible contention in sysfs access, arising due to single
+ * locks, use an array of locks (e.g. open_file_mutex) and use kernfs_node
+ * object address as hash keys to get the index of these locks.
+ *
+ * Hashed mutexes are safe to use here because operations using these don't
+ * rely on global exclusion.
+ *
+ * In future we intend to replace other global locks with hashed ones as well.
+ * kernfs_global_locks acts as a holder for all such hash tables.
+ */
+struct kernfs_global_locks {
+ struct mutex open_file_mutex[NR_KERNFS_LOCKS];
+};
+
enum kernfs_node_type {
KERNFS_DIR = 0x0001,
KERNFS_FILE = 0x0002,
KERNFS_LINK = 0x0004,
};
-#define KERNFS_TYPE_MASK 0x000f
-#define KERNFS_FLAG_MASK ~KERNFS_TYPE_MASK
+#define KERNFS_TYPE_MASK 0x000f
+#define KERNFS_FLAG_MASK ~KERNFS_TYPE_MASK
+#define KERNFS_MAX_USER_XATTRS 128
+#define KERNFS_USER_XATTR_SIZE_LIMIT (128 << 10)
enum kernfs_node_flag {
KERNFS_ACTIVATED = 0x0010,
@@ -43,10 +108,12 @@ enum kernfs_node_flag {
KERNFS_HAS_SEQ_SHOW = 0x0040,
KERNFS_HAS_MMAP = 0x0080,
KERNFS_LOCKDEP = 0x0100,
+ KERNFS_HIDDEN = 0x0200,
KERNFS_SUICIDAL = 0x0400,
KERNFS_SUICIDED = 0x0800,
KERNFS_EMPTY_DIR = 0x1000,
KERNFS_HAS_RELEASE = 0x2000,
+ KERNFS_REMOVING = 0x4000,
};
/* @flags for kernfs_create_root() */
@@ -60,7 +127,7 @@ enum kernfs_root_flag {
KERNFS_ROOT_CREATE_DEACTIVATED = 0x0001,
/*
- * For regular flies, if the opener has CAP_DAC_OVERRIDE, open(2)
+ * For regular files, if the opener has CAP_DAC_OVERRIDE, open(2)
* succeeds regardless of the RW permissions. sysfs had an extra
* layer of enforcement where open(2) fails with -EACCES regardless
* of CAP_DAC_OVERRIDE if the permission doesn't have the
@@ -69,6 +136,22 @@ enum kernfs_root_flag {
* following flag enables that behavior.
*/
KERNFS_ROOT_EXTRA_OPEN_PERM_CHECK = 0x0002,
+
+ /*
+ * The filesystem supports exportfs operation, so userspace can use
+ * fhandle to access nodes of the fs.
+ */
+ KERNFS_ROOT_SUPPORT_EXPORTOP = 0x0004,
+
+ /*
+ * Support user xattrs to be written to nodes rooted at this root.
+ */
+ KERNFS_ROOT_SUPPORT_USER_XATTR = 0x0008,
+
+ /*
+ * Renames must not change the parent node.
+ */
+ KERNFS_ROOT_INVARIANT_PARENT = 0x0010,
};
/* type-specific structures for kernfs_node union members */
@@ -82,6 +165,11 @@ struct kernfs_elem_dir {
* better directly in kernfs_node but is here to save space.
*/
struct kernfs_root *root;
+ /*
+ * Monotonic revision counter, used to identify if a directory
+ * node has changed during negative dentry revalidation.
+ */
+ unsigned long rev;
};
struct kernfs_elem_symlink {
@@ -90,7 +178,7 @@ struct kernfs_elem_symlink {
struct kernfs_elem_attr {
const struct kernfs_ops *ops;
- struct kernfs_open_node *open;
+ struct kernfs_open_node __rcu *open;
loff_t size;
struct kernfs_node *notify_next; /* for kernfs_notify() */
};
@@ -100,7 +188,7 @@ struct kernfs_elem_attr {
* kernfs node is represented by single kernfs_node. Most fields are
* private to kernfs and shouldn't be accessed directly by kernfs users.
*
- * As long as s_count reference is held, the kernfs_node itself is
+ * As long as count reference is held, the kernfs_node itself is
* accessible. Dereferencing elem or any other outer entity requires
* active reference.
*/
@@ -116,25 +204,32 @@ struct kernfs_node {
* never moved to a different parent, it is safe to access the
* parent directly.
*/
- struct kernfs_node *parent;
- const char *name;
+ struct kernfs_node __rcu *__parent;
+ const char __rcu *name;
struct rb_node rb;
const void *ns; /* namespace tag */
unsigned int hash; /* ns + name hash */
+ unsigned short flags;
+ umode_t mode;
+
union {
struct kernfs_elem_dir dir;
struct kernfs_elem_symlink symlink;
struct kernfs_elem_attr attr;
};
- void *priv;
+ /*
+ * 64bit unique ID. On 64bit ino setups, id is the ino. On 32bit,
+ * the low 32bits are ino and upper generation.
+ */
+ u64 id;
- unsigned short flags;
- umode_t mode;
- unsigned int ino;
+ void *priv;
struct kernfs_iattrs *iattr;
+
+ struct rcu_head rcu;
};
/*
@@ -145,7 +240,6 @@ struct kernfs_node {
* kernfs_node parameter.
*/
struct kernfs_syscall_ops {
- int (*remount_fs)(struct kernfs_root *root, int *flags, char *data);
int (*show_options)(struct seq_file *sf, struct kernfs_root *root);
int (*mkdir)(struct kernfs_node *parent, const char *name,
@@ -157,20 +251,7 @@ struct kernfs_syscall_ops {
struct kernfs_root *root);
};
-struct kernfs_root {
- /* published fields */
- struct kernfs_node *kn;
- unsigned int flags; /* KERNFS_ROOT_* flags */
-
- /* private fields, do not use outside kernfs proper */
- struct ida ino_ida;
- struct kernfs_syscall_ops *syscall_ops;
-
- /* list of kernfs_super_info of this root, protected by kernfs_mutex */
- struct list_head supers;
-
- wait_queue_head_t deactivate_waitq;
-};
+struct kernfs_node *kernfs_root_to_node(struct kernfs_root *root);
struct kernfs_open_file {
/* published fields */
@@ -238,11 +319,23 @@ struct kernfs_ops {
ssize_t (*write)(struct kernfs_open_file *of, char *buf, size_t bytes,
loff_t off);
+ __poll_t (*poll)(struct kernfs_open_file *of,
+ struct poll_table_struct *pt);
+
int (*mmap)(struct kernfs_open_file *of, struct vm_area_struct *vma);
+ loff_t (*llseek)(struct kernfs_open_file *of, loff_t offset, int whence);
+};
-#ifdef CONFIG_DEBUG_LOCK_ALLOC
- struct lock_class_key lockdep_key;
-#endif
+/*
+ * The kernfs superblock creation/mount parameter context.
+ */
+struct kernfs_fs_context {
+ struct kernfs_root *root; /* Root of the hierarchy being mounted */
+ void *ns_tag; /* Namespace tag of the mount (or NULL) */
+ unsigned long magic; /* File system specific magic number */
+
+ /* The following are set/used by kernfs_mount() */
+ bool new_sb_created; /* Set to T if we allocated a new sb */
};
#ifdef CONFIG_KERNFS
@@ -252,6 +345,34 @@ static inline enum kernfs_node_type kernfs_type(struct kernfs_node *kn)
return kn->flags & KERNFS_TYPE_MASK;
}
+static inline ino_t kernfs_id_ino(u64 id)
+{
+ /* id is ino if ino_t is 64bit; otherwise, low 32bits */
+ if (sizeof(ino_t) >= sizeof(u64))
+ return id;
+ else
+ return (u32)id;
+}
+
+static inline u32 kernfs_id_gen(u64 id)
+{
+ /* gen is fixed at 1 if ino_t is 64bit; otherwise, high 32bits */
+ if (sizeof(ino_t) >= sizeof(u64))
+ return 1;
+ else
+ return id >> 32;
+}
+
+static inline ino_t kernfs_ino(struct kernfs_node *kn)
+{
+ return kernfs_id_ino(kn->id);
+}
+
+static inline ino_t kernfs_gen(struct kernfs_node *kn)
+{
+ return kernfs_id_gen(kn->id);
+}
+
/**
* kernfs_enable_ns - enable namespace under a directory
* @kn: directory of interest, should be empty
@@ -279,7 +400,7 @@ static inline bool kernfs_ns_enabled(struct kernfs_node *kn)
}
int kernfs_name(struct kernfs_node *kn, char *buf, size_t buflen);
-int kernfs_path_from_node(struct kernfs_node *root_kn, struct kernfs_node *kn,
+int kernfs_path_from_node(struct kernfs_node *kn_to, struct kernfs_node *kn_from,
char *buf, size_t buflen);
void pr_cont_kernfs_name(struct kernfs_node *kn);
void pr_cont_kernfs_path(struct kernfs_node *kn);
@@ -300,15 +421,18 @@ struct dentry *kernfs_node_dentry(struct kernfs_node *kn,
struct kernfs_root *kernfs_create_root(struct kernfs_syscall_ops *scops,
unsigned int flags, void *priv);
void kernfs_destroy_root(struct kernfs_root *root);
+unsigned int kernfs_root_flags(struct kernfs_node *kn);
struct kernfs_node *kernfs_create_dir_ns(struct kernfs_node *parent,
const char *name, umode_t mode,
+ kuid_t uid, kgid_t gid,
void *priv, const void *ns);
struct kernfs_node *kernfs_create_empty_dir(struct kernfs_node *parent,
const char *name);
struct kernfs_node *__kernfs_create_file(struct kernfs_node *parent,
- const char *name,
- umode_t mode, loff_t size,
+ const char *name, umode_t mode,
+ kuid_t uid, kgid_t gid,
+ loff_t size,
const struct kernfs_ops *ops,
void *priv, const void *ns,
struct lock_class_key *key);
@@ -316,6 +440,7 @@ struct kernfs_node *kernfs_create_link(struct kernfs_node *parent,
const char *name,
struct kernfs_node *target);
void kernfs_activate(struct kernfs_node *kn);
+void kernfs_show(struct kernfs_node *kn, bool show);
void kernfs_remove(struct kernfs_node *kn);
void kernfs_break_active_protection(struct kernfs_node *kn);
void kernfs_unbreak_active_protection(struct kernfs_node *kn);
@@ -325,17 +450,24 @@ int kernfs_remove_by_name_ns(struct kernfs_node *parent, const char *name,
int kernfs_rename_ns(struct kernfs_node *kn, struct kernfs_node *new_parent,
const char *new_name, const void *new_ns);
int kernfs_setattr(struct kernfs_node *kn, const struct iattr *iattr);
+__poll_t kernfs_generic_poll(struct kernfs_open_file *of,
+ struct poll_table_struct *pt);
void kernfs_notify(struct kernfs_node *kn);
+int kernfs_xattr_get(struct kernfs_node *kn, const char *name,
+ void *value, size_t size);
+int kernfs_xattr_set(struct kernfs_node *kn, const char *name,
+ const void *value, size_t size, int flags);
+
const void *kernfs_super_ns(struct super_block *sb);
-struct dentry *kernfs_mount_ns(struct file_system_type *fs_type, int flags,
- struct kernfs_root *root, unsigned long magic,
- bool *new_sb_created, const void *ns);
+int kernfs_get_tree(struct fs_context *fc);
+void kernfs_free_fs_context(struct fs_context *fc);
void kernfs_kill_sb(struct super_block *sb);
-struct super_block *kernfs_pin_sb(struct kernfs_root *root, const void *ns);
void kernfs_init(void);
+struct kernfs_node *kernfs_find_and_get_node_by_id(struct kernfs_root *root,
+ u64 id);
#else /* CONFIG_KERNFS */
static inline enum kernfs_node_type kernfs_type(struct kernfs_node *kn)
@@ -388,15 +520,19 @@ kernfs_create_root(struct kernfs_syscall_ops *scops, unsigned int flags,
{ return ERR_PTR(-ENOSYS); }
static inline void kernfs_destroy_root(struct kernfs_root *root) { }
+static inline unsigned int kernfs_root_flags(struct kernfs_node *kn)
+{ return 0; }
static inline struct kernfs_node *
kernfs_create_dir_ns(struct kernfs_node *parent, const char *name,
- umode_t mode, void *priv, const void *ns)
+ umode_t mode, kuid_t uid, kgid_t gid,
+ void *priv, const void *ns)
{ return ERR_PTR(-ENOSYS); }
static inline struct kernfs_node *
__kernfs_create_file(struct kernfs_node *parent, const char *name,
- umode_t mode, loff_t size, const struct kernfs_ops *ops,
+ umode_t mode, kuid_t uid, kgid_t gid,
+ loff_t size, const struct kernfs_ops *ops,
void *priv, const void *ns, struct lock_class_key *key)
{ return ERR_PTR(-ENOSYS); }
@@ -425,16 +561,27 @@ static inline int kernfs_setattr(struct kernfs_node *kn,
const struct iattr *iattr)
{ return -ENOSYS; }
+static inline __poll_t kernfs_generic_poll(struct kernfs_open_file *of,
+ struct poll_table_struct *pt)
+{ return -ENOSYS; }
+
static inline void kernfs_notify(struct kernfs_node *kn) { }
+static inline int kernfs_xattr_get(struct kernfs_node *kn, const char *name,
+ void *value, size_t size)
+{ return -ENOSYS; }
+
+static inline int kernfs_xattr_set(struct kernfs_node *kn, const char *name,
+ const void *value, size_t size, int flags)
+{ return -ENOSYS; }
+
static inline const void *kernfs_super_ns(struct super_block *sb)
{ return NULL; }
-static inline struct dentry *
-kernfs_mount_ns(struct file_system_type *fs_type, int flags,
- struct kernfs_root *root, unsigned long magic,
- bool *new_sb_created, const void *ns)
-{ return ERR_PTR(-ENOSYS); }
+static inline int kernfs_get_tree(struct fs_context *fc)
+{ return -ENOSYS; }
+
+static inline void kernfs_free_fs_context(struct fs_context *fc) { }
static inline void kernfs_kill_sb(struct super_block *sb) { }
@@ -448,10 +595,11 @@ static inline void kernfs_init(void) { }
* @buf: buffer to copy @kn's name into
* @buflen: size of @buf
*
- * Builds and returns the full path of @kn in @buf of @buflen bytes. The
- * path is built from the end of @buf so the returned pointer usually
- * doesn't match @buf. If @buf isn't long enough, @buf is nul terminated
- * and %NULL is returned.
+ * If @kn is NULL result will be "(null)".
+ *
+ * Returns the length of the full path. If the full length is equal to or
+ * greater than @buflen, @buf contains the truncated path with the trailing
+ * '\0'. On error, -errno is returned.
*/
static inline int kernfs_path(struct kernfs_node *kn, char *buf, size_t buflen)
{
@@ -474,28 +622,9 @@ static inline struct kernfs_node *
kernfs_create_dir(struct kernfs_node *parent, const char *name, umode_t mode,
void *priv)
{
- return kernfs_create_dir_ns(parent, name, mode, priv, NULL);
-}
-
-static inline struct kernfs_node *
-kernfs_create_file_ns(struct kernfs_node *parent, const char *name,
- umode_t mode, loff_t size, const struct kernfs_ops *ops,
- void *priv, const void *ns)
-{
- struct lock_class_key *key = NULL;
-
-#ifdef CONFIG_DEBUG_LOCK_ALLOC
- key = (struct lock_class_key *)&ops->lockdep_key;
-#endif
- return __kernfs_create_file(parent, name, mode, size, ops, priv, ns,
- key);
-}
-
-static inline struct kernfs_node *
-kernfs_create_file(struct kernfs_node *parent, const char *name, umode_t mode,
- loff_t size, const struct kernfs_ops *ops, void *priv)
-{
- return kernfs_create_file_ns(parent, name, mode, size, ops, priv, NULL);
+ return kernfs_create_dir_ns(parent, name, mode,
+ GLOBAL_ROOT_UID, GLOBAL_ROOT_GID,
+ priv, NULL);
}
static inline int kernfs_remove_by_name(struct kernfs_node *parent,
@@ -511,13 +640,4 @@ static inline int kernfs_rename(struct kernfs_node *kn,
return kernfs_rename_ns(kn, new_parent, new_name, NULL);
}
-static inline struct dentry *
-kernfs_mount(struct file_system_type *fs_type, int flags,
- struct kernfs_root *root, unsigned long magic,
- bool *new_sb_created)
-{
- return kernfs_mount_ns(fs_type, flags, root,
- magic, new_sb_created, NULL);
-}
-
#endif /* __LINUX_KERNFS_H */
diff --git a/include/linux/kexec.h b/include/linux/kexec.h
index 2b7590f5483a..ff7e231b0485 100644
--- a/include/linux/kexec.h
+++ b/include/linux/kexec.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef LINUX_KEXEC_H
#define LINUX_KEXEC_H
@@ -14,17 +15,28 @@
#if !defined(__ASSEMBLY__)
-#include <linux/crash_core.h>
+#include <linux/vmcore_info.h>
+#include <linux/crash_reserve.h>
#include <asm/io.h>
+#include <linux/range.h>
#include <uapi/linux/kexec.h>
+#include <linux/verification.h>
+
+extern note_buf_t __percpu *crash_notes;
+
+#ifdef CONFIG_CRASH_DUMP
+#include <linux/prandom.h>
+#endif
#ifdef CONFIG_KEXEC_CORE
#include <linux/list.h>
#include <linux/compat.h>
#include <linux/ioport.h>
#include <linux/module.h>
+#include <linux/highmem.h>
#include <asm/kexec.h>
+#include <linux/crash_core.h>
/* Verify architecture specific macros are defined */
@@ -60,8 +72,6 @@
#define KEXEC_CRASH_MEM_ALIGN PAGE_SIZE
#endif
-#define KEXEC_CORE_NOTE_NAME CRASH_CORE_NOTE_NAME
-
/*
* This structure is used to hold the arguments that are used when loading
* kernel binaries.
@@ -69,6 +79,12 @@
typedef unsigned long kimage_entry_t;
+/*
+ * This is a copy of the UAPI struct kexec_segment and must be identical
+ * to it because it gets copied straight from user space into kernel
+ * memory. Do not modify this structure unless you change the way segments
+ * get ingested from user space.
+ */
struct kexec_segment {
/*
* This pointer can point to user memory if kexec_load() system
@@ -98,21 +114,25 @@ struct compat_kexec_segment {
#ifdef CONFIG_KEXEC_FILE
struct purgatory_info {
- /* Pointer to elf header of read only purgatory */
- Elf_Ehdr *ehdr;
-
- /* Pointer to purgatory sechdrs which are modifiable */
+ /*
+ * Pointer to elf header at the beginning of kexec_purgatory.
+ * Note: kexec_purgatory is read only
+ */
+ const Elf_Ehdr *ehdr;
+ /*
+ * Temporary, modifiable buffer for sechdrs used for relocation.
+ * This memory can be freed post image load.
+ */
Elf_Shdr *sechdrs;
/*
- * Temporary buffer location where purgatory is loaded and relocated
- * This memory can be freed post image load
+ * Temporary, modifiable buffer for stripped purgatory used for
+ * relocation. This memory can be freed post image load.
*/
void *purgatory_buf;
-
- /* Address where purgatory is finally loaded and is executed from */
- unsigned long purgatory_load_addr;
};
+struct kimage;
+
typedef int (kexec_probe_t)(const char *kernel_buf, unsigned long kernel_size);
typedef void *(kexec_load_t)(struct kimage *image, char *kernel_buf,
unsigned long kernel_len, char *initrd,
@@ -120,7 +140,7 @@ typedef void *(kexec_load_t)(struct kimage *image, char *kernel_buf,
unsigned long cmdline_len);
typedef int (kexec_cleanup_t)(void *loader_data);
-#ifdef CONFIG_KEXEC_VERIFY_SIG
+#ifdef CONFIG_KEXEC_SIG
typedef int (kexec_verify_sig_t)(const char *kernel_buf,
unsigned long kernel_len);
#endif
@@ -129,11 +149,25 @@ struct kexec_file_ops {
kexec_probe_t *probe;
kexec_load_t *load;
kexec_cleanup_t *cleanup;
-#ifdef CONFIG_KEXEC_VERIFY_SIG
+#ifdef CONFIG_KEXEC_SIG
kexec_verify_sig_t *verify_sig;
#endif
};
+extern const struct kexec_file_ops * const kexec_file_loaders[];
+
+int kexec_image_probe_default(struct kimage *image, void *buf,
+ unsigned long buf_len);
+int kexec_image_post_load_cleanup_default(struct kimage *image);
+
+/*
+ * If kexec_buf.mem is set to this value, kexec_locate_mem_hole()
+ * will try to allocate free memory. Arch may overwrite it.
+ */
+#ifndef KEXEC_BUF_MEM_UNKNOWN
+#define KEXEC_BUF_MEM_UNKNOWN 0
+#endif
+
/**
* struct kexec_buf - parameters for finding a place for a buffer in memory
* @image: kexec image in which memory to search.
@@ -144,7 +178,9 @@ struct kexec_file_ops {
* @buf_align: Minimum alignment needed.
* @buf_min: The buffer can't be placed below this address.
* @buf_max: The buffer can't be placed above this address.
+ * @cma: CMA page if the buffer is backed by CMA.
* @top_down: Allocate from top of memory.
+ * @random: Place the buffer at a random position.
*/
struct kexec_buf {
struct kimage *image;
@@ -155,15 +191,151 @@ struct kexec_buf {
unsigned long buf_align;
unsigned long buf_min;
unsigned long buf_max;
+ struct page *cma;
bool top_down;
+#ifdef CONFIG_CRASH_DUMP
+ bool random;
+#endif
};
-int __weak arch_kexec_walk_mem(struct kexec_buf *kbuf,
- int (*func)(u64, u64, void *));
+
+#ifdef CONFIG_CRASH_DUMP
+static inline void kexec_random_range_start(unsigned long start,
+ unsigned long end,
+ struct kexec_buf *kbuf,
+ unsigned long *temp_start)
+{
+ unsigned short i;
+
+ if (kbuf->random) {
+ get_random_bytes(&i, sizeof(unsigned short));
+ *temp_start = start + (end - start) / USHRT_MAX * i;
+ }
+}
+#else
+static inline void kexec_random_range_start(unsigned long start,
+ unsigned long end,
+ struct kexec_buf *kbuf,
+ unsigned long *temp_start)
+{}
+#endif
+
+int kexec_load_purgatory(struct kimage *image, struct kexec_buf *kbuf);
+int kexec_purgatory_get_set_symbol(struct kimage *image, const char *name,
+ void *buf, unsigned int size,
+ bool get_value);
+void *kexec_purgatory_get_symbol_addr(struct kimage *image, const char *name);
+
+#ifndef arch_kexec_kernel_image_probe
+static inline int
+arch_kexec_kernel_image_probe(struct kimage *image, void *buf, unsigned long buf_len)
+{
+ return kexec_image_probe_default(image, buf, buf_len);
+}
+#endif
+
+#ifndef arch_kimage_file_post_load_cleanup
+static inline int arch_kimage_file_post_load_cleanup(struct kimage *image)
+{
+ return kexec_image_post_load_cleanup_default(image);
+}
+#endif
+
+#ifndef arch_check_excluded_range
+static inline int arch_check_excluded_range(struct kimage *image,
+ unsigned long start,
+ unsigned long end)
+{
+ return 0;
+}
+#endif
+
+#ifdef CONFIG_KEXEC_SIG
+#ifdef CONFIG_SIGNED_PE_FILE_VERIFICATION
+int kexec_kernel_verify_pe_sig(const char *kernel, unsigned long kernel_len);
+#endif
+#endif
+
extern int kexec_add_buffer(struct kexec_buf *kbuf);
int kexec_locate_mem_hole(struct kexec_buf *kbuf);
+
+#ifndef arch_kexec_locate_mem_hole
+/**
+ * arch_kexec_locate_mem_hole - Find free memory to place the segments.
+ * @kbuf: Parameters for the memory search.
+ *
+ * On success, kbuf->mem will have the start address of the memory region found.
+ *
+ * Return: 0 on success, negative errno on error.
+ */
+static inline int arch_kexec_locate_mem_hole(struct kexec_buf *kbuf)
+{
+ return kexec_locate_mem_hole(kbuf);
+}
+#endif
+
+#ifndef arch_kexec_apply_relocations_add
+/*
+ * arch_kexec_apply_relocations_add - apply relocations of type RELA
+ * @pi: Purgatory to be relocated.
+ * @section: Section relocations applying to.
+ * @relsec: Section containing RELAs.
+ * @symtab: Corresponding symtab.
+ *
+ * Return: 0 on success, negative errno on error.
+ */
+static inline int
+arch_kexec_apply_relocations_add(struct purgatory_info *pi, Elf_Shdr *section,
+ const Elf_Shdr *relsec, const Elf_Shdr *symtab)
+{
+ pr_err("RELA relocation unsupported.\n");
+ return -ENOEXEC;
+}
+#endif
+
+#ifndef arch_kexec_apply_relocations
+/*
+ * arch_kexec_apply_relocations - apply relocations of type REL
+ * @pi: Purgatory to be relocated.
+ * @section: Section relocations applying to.
+ * @relsec: Section containing RELs.
+ * @symtab: Corresponding symtab.
+ *
+ * Return: 0 on success, negative errno on error.
+ */
+static inline int
+arch_kexec_apply_relocations(struct purgatory_info *pi, Elf_Shdr *section,
+ const Elf_Shdr *relsec, const Elf_Shdr *symtab)
+{
+ pr_err("REL relocation unsupported.\n");
+ return -ENOEXEC;
+}
+#endif
#endif /* CONFIG_KEXEC_FILE */
+#ifdef CONFIG_KEXEC_ELF
+struct kexec_elf_info {
+ /*
+ * Where the ELF binary contents are kept.
+ * Memory managed by the user of the struct.
+ */
+ const char *buffer;
+
+ const struct elfhdr *ehdr;
+ const struct elf_phdr *proghdrs;
+};
+
+int kexec_build_elf_info(const char *buf, size_t len, struct elfhdr *ehdr,
+ struct kexec_elf_info *elf_info);
+
+int kexec_elf_load(struct kimage *image, struct elfhdr *ehdr,
+ struct kexec_elf_info *elf_info,
+ struct kexec_buf *kbuf,
+ unsigned long *lowest_load_addr);
+
+void kexec_free_elf_info(struct kexec_elf_info *elf_info);
+int kexec_elf_probe(const char *buf, unsigned long len);
+#endif
struct kimage {
kimage_entry_t head;
kimage_entry_t *entry;
@@ -176,6 +348,7 @@ struct kimage {
unsigned long nr_segments;
struct kexec_segment segment[KEXEC_SEGMENT_MAX];
+ struct page *segment_cma[KEXEC_SEGMENT_MAX];
struct list_head control_pages;
struct list_head dest_pages;
@@ -191,6 +364,13 @@ struct kimage {
unsigned int preserve_context : 1;
/* If set, we are using file mode kexec syscall */
unsigned int file_mode:1;
+#ifdef CONFIG_CRASH_HOTPLUG
+ /* If set, it is safe to update kexec segments that are
+ * excluded from SHA calculation.
+ */
+ unsigned int hotplug_support:1;
+#endif
+ unsigned int no_cma:1;
#ifdef ARCH_HAS_KIMAGE_ARCH
struct kimage_arch arch;
@@ -208,45 +388,66 @@ struct kimage {
unsigned long cmdline_buf_len;
/* File operations provided by image loader */
- struct kexec_file_ops *fops;
+ const struct kexec_file_ops *fops;
/* Image loader handling the kernel can store a pointer here */
void *image_loader_data;
/* Information for loading purgatory */
struct purgatory_info purgatory_info;
+
+ /* Force carrying over the DTB from the current boot */
+ bool force_dtb;
+#endif
+
+#ifdef CONFIG_CRASH_HOTPLUG
+ int hp_action;
+ int elfcorehdr_index;
+ bool elfcorehdr_updated;
#endif
+
+#ifdef CONFIG_IMA_KEXEC
+ /* Virtual address of IMA measurement buffer for kexec syscall */
+ void *ima_buffer;
+
+ phys_addr_t ima_buffer_addr;
+ size_t ima_buffer_size;
+
+ unsigned long ima_segment_index;
+ bool is_ima_segment_index_set;
+#endif
+
+ struct {
+ struct kexec_segment *scratch;
+ phys_addr_t fdt;
+ } kho;
+
+ /* Core ELF header buffer */
+ void *elf_headers;
+ unsigned long elf_headers_sz;
+ unsigned long elf_load_addr;
+
+ /* dm crypt keys buffer */
+ unsigned long dm_crypt_keys_addr;
+ unsigned long dm_crypt_keys_sz;
};
/* kexec interface functions */
extern void machine_kexec(struct kimage *image);
extern int machine_kexec_prepare(struct kimage *image);
extern void machine_kexec_cleanup(struct kimage *image);
-extern asmlinkage long sys_kexec_load(unsigned long entry,
- unsigned long nr_segments,
- struct kexec_segment __user *segments,
- unsigned long flags);
extern int kernel_kexec(void);
extern struct page *kimage_alloc_control_pages(struct kimage *image,
unsigned int order);
-extern int kexec_load_purgatory(struct kimage *image, unsigned long min,
- unsigned long max, int top_down,
- unsigned long *load_addr);
-extern int kexec_purgatory_get_set_symbol(struct kimage *image,
- const char *name, void *buf,
- unsigned int size, bool get_value);
-extern void *kexec_purgatory_get_symbol_addr(struct kimage *image,
- const char *name);
-extern void __crash_kexec(struct pt_regs *);
-extern void crash_kexec(struct pt_regs *);
-int kexec_should_crash(struct task_struct *);
-int kexec_crash_loaded(void);
-void crash_save_cpu(struct pt_regs *regs, int cpu);
-extern int kimage_crash_copy_vmcoreinfo(struct kimage *image);
+
+#ifndef machine_kexec_post_load
+static inline int machine_kexec_post_load(struct kimage *image) { return 0; }
+#endif
extern struct kimage *kexec_image;
extern struct kimage *kexec_crash_image;
-extern int kexec_load_disabled;
+
+bool kexec_load_permitted(int kexec_image_type);
#ifndef kexec_flush_icache_page
#define kexec_flush_icache_page(page)
@@ -254,41 +455,20 @@ extern int kexec_load_disabled;
/* List of defined/legal kexec flags */
#ifndef CONFIG_KEXEC_JUMP
-#define KEXEC_FLAGS KEXEC_ON_CRASH
+#define KEXEC_FLAGS (KEXEC_ON_CRASH | KEXEC_UPDATE_ELFCOREHDR | KEXEC_CRASH_HOTPLUG_SUPPORT)
#else
-#define KEXEC_FLAGS (KEXEC_ON_CRASH | KEXEC_PRESERVE_CONTEXT)
+#define KEXEC_FLAGS (KEXEC_ON_CRASH | KEXEC_PRESERVE_CONTEXT | KEXEC_UPDATE_ELFCOREHDR | \
+ KEXEC_CRASH_HOTPLUG_SUPPORT)
#endif
/* List of defined/legal kexec file flags */
#define KEXEC_FILE_FLAGS (KEXEC_FILE_UNLOAD | KEXEC_FILE_ON_CRASH | \
- KEXEC_FILE_NO_INITRAMFS)
-
-/* Location of a reserved region to hold the crash kernel.
- */
-extern struct resource crashk_res;
-extern struct resource crashk_low_res;
-extern note_buf_t __percpu *crash_notes;
+ KEXEC_FILE_NO_INITRAMFS | KEXEC_FILE_DEBUG | \
+ KEXEC_FILE_NO_CMA | KEXEC_FILE_FORCE_DTB)
/* flag to track if kexec reboot is in progress */
extern bool kexec_in_progress;
-int crash_shrink_memory(unsigned long new_size);
-size_t crash_get_memory_size(void);
-void crash_free_reserved_phys_range(unsigned long begin, unsigned long end);
-
-int __weak arch_kexec_kernel_image_probe(struct kimage *image, void *buf,
- unsigned long buf_len);
-void * __weak arch_kexec_kernel_image_load(struct kimage *image);
-int __weak arch_kimage_file_post_load_cleanup(struct kimage *image);
-int __weak arch_kexec_kernel_verify_sig(struct kimage *image, void *buf,
- unsigned long buf_len);
-int __weak arch_kexec_apply_relocations_add(const Elf_Ehdr *ehdr,
- Elf_Shdr *sechdrs, unsigned int relsec);
-int __weak arch_kexec_apply_relocations(const Elf_Ehdr *ehdr, Elf_Shdr *sechdrs,
- unsigned int relsec);
-void arch_kexec_protect_crashkres(void);
-void arch_kexec_unprotect_crashkres(void);
-
#ifndef page_to_boot_pfn
static inline unsigned long page_to_boot_pfn(struct page *page)
{
@@ -317,6 +497,16 @@ static inline phys_addr_t boot_phys_to_phys(unsigned long boot_phys)
}
#endif
+#ifndef crash_free_reserved_phys_range
+static inline void crash_free_reserved_phys_range(unsigned long begin, unsigned long end)
+{
+ unsigned long addr;
+
+ for (addr = begin; addr < end; addr += PAGE_SIZE)
+ free_reserved_page(boot_pfn_to_page(addr >> PAGE_SHIFT));
+}
+#endif
+
static inline unsigned long virt_to_boot_phys(void *addr)
{
return phys_to_boot_phys(__pa((unsigned long)addr));
@@ -335,16 +525,33 @@ static inline int arch_kexec_post_alloc_pages(void *vaddr, unsigned int pages, g
static inline void arch_kexec_pre_free_pages(void *vaddr, unsigned int pages) { }
#endif
+extern bool kexec_file_dbg_print;
+
+#define kexec_dprintk(fmt, arg...) \
+ do { if (kexec_file_dbg_print) pr_info(fmt, ##arg); } while (0)
+
+extern void *kimage_map_segment(struct kimage *image, unsigned long addr, unsigned long size);
+extern void kimage_unmap_segment(void *buffer);
#else /* !CONFIG_KEXEC_CORE */
struct pt_regs;
struct task_struct;
+struct kimage;
static inline void __crash_kexec(struct pt_regs *regs) { }
static inline void crash_kexec(struct pt_regs *regs) { }
static inline int kexec_should_crash(struct task_struct *p) { return 0; }
static inline int kexec_crash_loaded(void) { return 0; }
+static inline void *kimage_map_segment(struct kimage *image, unsigned long addr, unsigned long size)
+{ return NULL; }
+static inline void kimage_unmap_segment(void *buffer) { }
#define kexec_in_progress false
#endif /* CONFIG_KEXEC_CORE */
+#ifdef CONFIG_KEXEC_SIG
+void set_kexec_sig_enforced(void);
+#else
+static inline void set_kexec_sig_enforced(void) {}
+#endif
+
#endif /* !defined(__ASSEBMLY__) */
#endif /* LINUX_KEXEC_H */
diff --git a/include/linux/kexec_handover.h b/include/linux/kexec_handover.h
new file mode 100644
index 000000000000..5f7b9de97e8d
--- /dev/null
+++ b/include/linux/kexec_handover.h
@@ -0,0 +1,143 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef LINUX_KEXEC_HANDOVER_H
+#define LINUX_KEXEC_HANDOVER_H
+
+#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/types.h>
+
+struct kho_scratch {
+ phys_addr_t addr;
+ phys_addr_t size;
+};
+
+struct folio;
+struct page;
+
+#define DECLARE_KHOSER_PTR(name, type) \
+ union { \
+ phys_addr_t phys; \
+ type ptr; \
+ } name
+#define KHOSER_STORE_PTR(dest, val) \
+ ({ \
+ typeof(val) v = val; \
+ typecheck(typeof((dest).ptr), v); \
+ (dest).phys = virt_to_phys(v); \
+ })
+#define KHOSER_LOAD_PTR(src) \
+ ({ \
+ typeof(src) s = src; \
+ (typeof((s).ptr))((s).phys ? phys_to_virt((s).phys) : NULL); \
+ })
+
+struct kho_vmalloc_chunk;
+struct kho_vmalloc {
+ DECLARE_KHOSER_PTR(first, struct kho_vmalloc_chunk *);
+ unsigned int total_pages;
+ unsigned short flags;
+ unsigned short order;
+};
+
+#ifdef CONFIG_KEXEC_HANDOVER
+bool kho_is_enabled(void);
+bool is_kho_boot(void);
+
+int kho_preserve_folio(struct folio *folio);
+void kho_unpreserve_folio(struct folio *folio);
+int kho_preserve_pages(struct page *page, unsigned int nr_pages);
+void kho_unpreserve_pages(struct page *page, unsigned int nr_pages);
+int kho_preserve_vmalloc(void *ptr, struct kho_vmalloc *preservation);
+void kho_unpreserve_vmalloc(struct kho_vmalloc *preservation);
+void *kho_alloc_preserve(size_t size);
+void kho_unpreserve_free(void *mem);
+void kho_restore_free(void *mem);
+struct folio *kho_restore_folio(phys_addr_t phys);
+struct page *kho_restore_pages(phys_addr_t phys, unsigned int nr_pages);
+void *kho_restore_vmalloc(const struct kho_vmalloc *preservation);
+int kho_add_subtree(const char *name, void *fdt);
+void kho_remove_subtree(void *fdt);
+int kho_retrieve_subtree(const char *name, phys_addr_t *phys);
+
+void kho_memory_init(void);
+
+void kho_populate(phys_addr_t fdt_phys, u64 fdt_len, phys_addr_t scratch_phys,
+ u64 scratch_len);
+#else
+static inline bool kho_is_enabled(void)
+{
+ return false;
+}
+
+static inline bool is_kho_boot(void)
+{
+ return false;
+}
+
+static inline int kho_preserve_folio(struct folio *folio)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline void kho_unpreserve_folio(struct folio *folio) { }
+
+static inline int kho_preserve_pages(struct page *page, unsigned int nr_pages)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline void kho_unpreserve_pages(struct page *page, unsigned int nr_pages) { }
+
+static inline int kho_preserve_vmalloc(void *ptr,
+ struct kho_vmalloc *preservation)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline void kho_unpreserve_vmalloc(struct kho_vmalloc *preservation) { }
+
+static inline void *kho_alloc_preserve(size_t size)
+{
+ return ERR_PTR(-EOPNOTSUPP);
+}
+
+static inline void kho_unpreserve_free(void *mem) { }
+static inline void kho_restore_free(void *mem) { }
+
+static inline struct folio *kho_restore_folio(phys_addr_t phys)
+{
+ return NULL;
+}
+
+static inline struct page *kho_restore_pages(phys_addr_t phys,
+ unsigned int nr_pages)
+{
+ return NULL;
+}
+
+static inline void *kho_restore_vmalloc(const struct kho_vmalloc *preservation)
+{
+ return NULL;
+}
+
+static inline int kho_add_subtree(const char *name, void *fdt)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline void kho_remove_subtree(void *fdt) { }
+
+static inline int kho_retrieve_subtree(const char *name, phys_addr_t *phys)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline void kho_memory_init(void) { }
+
+static inline void kho_populate(phys_addr_t fdt_phys, u64 fdt_len,
+ phys_addr_t scratch_phys, u64 scratch_len)
+{
+}
+#endif /* CONFIG_KEXEC_HANDOVER */
+
+#endif /* LINUX_KEXEC_HANDOVER_H */
diff --git a/include/linux/key-type.h b/include/linux/key-type.h
index 9520fc3c3b9a..bb97bd3e5af4 100644
--- a/include/linux/key-type.h
+++ b/include/linux/key-type.h
@@ -1,12 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/* Definitions for key type implementations
*
* Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public Licence
- * as published by the Free Software Foundation; either version
- * 2 of the Licence, or (at your option) any later version.
*/
#ifndef _LINUX_KEY_TYPE_H
@@ -17,14 +13,8 @@
#ifdef CONFIG_KEYS
-/*
- * key under-construction record
- * - passed to the request_key actor if supplied
- */
-struct key_construction {
- struct key *key; /* key being constructed */
- struct key *authkey;/* authorisation for key being constructed */
-};
+struct kernel_pkey_query;
+struct kernel_pkey_params;
/*
* Pre-parsed payload, used by key add, update and instantiate.
@@ -39,16 +29,16 @@ struct key_construction {
* clear the contents.
*/
struct key_preparsed_payload {
+ const char *orig_description; /* Actual or proposed description (maybe NULL) */
char *description; /* Proposed key description (or NULL) */
union key_payload payload; /* Proposed payload */
const void *data; /* Raw data */
size_t datalen; /* Raw datalen */
size_t quotalen; /* Quota length for proposed payload */
- time_t expiry; /* Expiry time of key */
+ time64_t expiry; /* Expiry time of key */
} __randomize_layout;
-typedef int (*request_key_actor_t)(struct key_construction *key,
- const char *op, void *aux);
+typedef int (*request_key_actor_t)(struct key *auth_key, void *aux);
/*
* Preparsed matching criterion.
@@ -81,6 +71,10 @@ struct key_type {
*/
size_t def_datalen;
+ unsigned int flags;
+#define KEY_TYPE_NET_DOMAIN 0x00000001 /* Keys of this type have a net namespace domain */
+#define KEY_TYPE_INSTANT_REAP 0x00000002 /* Keys of this type don't have a delay after expiring */
+
/* vet a description */
int (*vet_description)(const char *description);
@@ -113,11 +107,14 @@ struct key_type {
*/
int (*match_preparse)(struct key_match_data *match_data);
- /* Free preparsed match data (optional). This should be supplied it
- * ->match_preparse() is supplied. */
+ /*
+ * Free preparsed match data (optional). This should be supplied if
+ * ->match_preparse() is supplied.
+ */
void (*match_free)(struct key_match_data *match_data);
- /* clear some of the data from a key on revokation (optional)
+ /*
+ * Clear some of the data from a key on revocation (optional).
* - the key's semaphore will be write-locked by the caller
*/
void (*revoke)(struct key *key);
@@ -135,7 +132,7 @@ struct key_type {
* much is copied into the buffer
* - shouldn't do the copy if the buffer is NULL
*/
- long (*read)(const struct key *key, char __user *buffer, size_t buflen);
+ long (*read)(const struct key *key, char *buffer, size_t buflen);
/* handle request_key() for this type instead of invoking
* /sbin/request-key (optional)
@@ -155,6 +152,14 @@ struct key_type {
*/
struct key_restriction *(*lookup_restriction)(const char *params);
+ /* Asymmetric key accessor functions. */
+ int (*asym_query)(const struct kernel_pkey_params *params,
+ struct kernel_pkey_query *info);
+ int (*asym_eds_op)(struct kernel_pkey_params *params,
+ const void *in, void *out);
+ int (*asym_verify_signature)(struct kernel_pkey_params *params,
+ const void *in, const void *in2);
+
/* internal fields */
struct list_head link; /* link in types list */
struct lock_class_key lock_class; /* key->sem lock class */
@@ -170,20 +175,20 @@ extern int key_instantiate_and_link(struct key *key,
const void *data,
size_t datalen,
struct key *keyring,
- struct key *instkey);
+ struct key *authkey);
extern int key_reject_and_link(struct key *key,
unsigned timeout,
unsigned error,
struct key *keyring,
- struct key *instkey);
-extern void complete_request_key(struct key_construction *cons, int error);
+ struct key *authkey);
+extern void complete_request_key(struct key *authkey, int error);
static inline int key_negate_and_link(struct key *key,
unsigned timeout,
struct key *keyring,
- struct key *instkey)
+ struct key *authkey)
{
- return key_reject_and_link(key, timeout, ENOKEY, keyring, instkey);
+ return key_reject_and_link(key, timeout, ENOKEY, keyring, authkey);
}
extern int generic_key_instantiate(struct key *key, struct key_preparsed_payload *prep);
diff --git a/include/linux/key.h b/include/linux/key.h
index 044114185120..81b8f05c6898 100644
--- a/include/linux/key.h
+++ b/include/linux/key.h
@@ -1,14 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/* Authentication token and access key management
*
* Copyright (C) 2004, 2007 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
*
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- *
- *
* See Documentation/security/keys/core.rst for information on keys/keyrings.
*/
@@ -24,6 +19,7 @@
#include <linux/atomic.h>
#include <linux/assoc_array.h>
#include <linux/refcount.h>
+#include <linux/time64.h>
#ifdef __KERNEL__
#include <linux/uidgid.h>
@@ -35,6 +31,7 @@ typedef int32_t key_serial_t;
typedef uint32_t key_perm_t;
struct key;
+struct net;
#ifdef CONFIG_KEYS
@@ -74,6 +71,29 @@ struct key;
#define KEY_PERM_UNDEF 0xffffffff
+/*
+ * The permissions required on a key that we're looking up.
+ */
+enum key_need_perm {
+ KEY_NEED_UNSPECIFIED, /* Needed permission unspecified */
+ KEY_NEED_VIEW, /* Require permission to view attributes */
+ KEY_NEED_READ, /* Require permission to read content */
+ KEY_NEED_WRITE, /* Require permission to update / modify */
+ KEY_NEED_SEARCH, /* Require permission to search (keyring) or find (key) */
+ KEY_NEED_LINK, /* Require permission to link */
+ KEY_NEED_SETATTR, /* Require permission to change attributes */
+ KEY_NEED_UNLINK, /* Require permission to unlink key */
+ KEY_SYSADMIN_OVERRIDE, /* Special: override by CAP_SYS_ADMIN */
+ KEY_AUTHTOKEN_OVERRIDE, /* Special: override by possession of auth token */
+ KEY_DEFER_PERM_CHECK, /* Special: permission check is deferred */
+};
+
+enum key_lookup_flag {
+ KEY_LOOKUP_CREATE = 0x01,
+ KEY_LOOKUP_PARTIAL = 0x02,
+ KEY_LOOKUP_ALL = (KEY_LOOKUP_CREATE | KEY_LOOKUP_PARTIAL),
+};
+
struct seq_file;
struct user_struct;
struct signal_struct;
@@ -81,13 +101,34 @@ struct cred;
struct key_type;
struct key_owner;
+struct key_tag;
struct keyring_list;
struct keyring_name;
+struct key_tag {
+ struct rcu_head rcu;
+ refcount_t usage;
+ bool removed; /* T when subject removed */
+};
+
struct keyring_index_key {
+ /* [!] If this structure is altered, the union in struct key must change too! */
+ unsigned long hash; /* Hash value */
+ union {
+ struct {
+#ifdef __LITTLE_ENDIAN /* Put desc_len at the LSB of x */
+ u16 desc_len;
+ char desc[sizeof(long) - 2]; /* First few chars of description */
+#else
+ char desc[sizeof(long) - 2]; /* First few chars of description */
+ u16 desc_len;
+#endif
+ };
+ unsigned long x;
+ };
struct key_type *type;
+ struct key_tag *domain_tag; /* Domain of operation */
const char *description;
- size_t desc_len;
};
union key_payload {
@@ -138,6 +179,11 @@ struct key_restriction {
struct key_type *keytype;
};
+enum key_state {
+ KEY_IS_UNINSTANTIATED,
+ KEY_IS_POSITIVE, /* Positively instantiated */
+};
+
/*****************************************************************************/
/*
* authentication token / access credential / keyring
@@ -153,14 +199,17 @@ struct key {
struct list_head graveyard_link;
struct rb_node serial_node;
};
+#ifdef CONFIG_KEY_NOTIFICATIONS
+ struct watch_list *watchers; /* Entities watching this key for changes */
+#endif
struct rw_semaphore sem; /* change vs change sem */
struct key_user *user; /* owner of this key */
void *security; /* security data for this key */
union {
- time_t expiry; /* time at which key expires (or 0) */
- time_t revoked_at; /* time at which key was revoked */
+ time64_t expiry; /* time at which key expires (or 0) */
+ time64_t revoked_at; /* time at which key was revoked */
};
- time_t last_used_at; /* last time used for LRU keyring discard */
+ time64_t last_used_at; /* last time used for LRU keyring discard */
kuid_t uid;
kgid_t gid;
key_perm_t perm; /* access permissions */
@@ -169,6 +218,7 @@ struct key {
* - may not match RCU dereferenced payload
* - payload should contain own length
*/
+ short state; /* Key state (+) or rejection error (-) */
#ifdef KEY_DEBUGGING
unsigned magic;
@@ -176,17 +226,17 @@ struct key {
#endif
unsigned long flags; /* status flags (change with bitops) */
-#define KEY_FLAG_INSTANTIATED 0 /* set if key has been instantiated */
-#define KEY_FLAG_DEAD 1 /* set if key type has been deleted */
-#define KEY_FLAG_REVOKED 2 /* set if key had been revoked */
-#define KEY_FLAG_IN_QUOTA 3 /* set if key consumes quota */
-#define KEY_FLAG_USER_CONSTRUCT 4 /* set if key is being constructed in userspace */
-#define KEY_FLAG_NEGATIVE 5 /* set if key is negative */
-#define KEY_FLAG_ROOT_CAN_CLEAR 6 /* set if key can be cleared by root without permission */
-#define KEY_FLAG_INVALIDATED 7 /* set if key has been invalidated */
-#define KEY_FLAG_BUILTIN 8 /* set if key is built in to the kernel */
-#define KEY_FLAG_ROOT_CAN_INVAL 9 /* set if key can be invalidated by root without permission */
-#define KEY_FLAG_KEEP 10 /* set if key should not be removed */
+#define KEY_FLAG_DEAD 0 /* set if key type has been deleted */
+#define KEY_FLAG_REVOKED 1 /* set if key had been revoked */
+#define KEY_FLAG_IN_QUOTA 2 /* set if key consumes quota */
+#define KEY_FLAG_USER_CONSTRUCT 3 /* set if key is being constructed in userspace */
+#define KEY_FLAG_ROOT_CAN_CLEAR 4 /* set if key can be cleared by root without permission */
+#define KEY_FLAG_INVALIDATED 5 /* set if key has been invalidated */
+#define KEY_FLAG_BUILTIN 6 /* set if key is built in to the kernel */
+#define KEY_FLAG_ROOT_CAN_INVAL 7 /* set if key can be invalidated by root without permission */
+#define KEY_FLAG_KEEP 8 /* set if key should not be removed */
+#define KEY_FLAG_UID_KEYRING 9 /* set if key is a user or user session keyring */
+#define KEY_FLAG_USER_ALIVE 10 /* set if final put has not happened on key yet */
/* the key type and key description string
* - the desc is used to match a key against search criteria
@@ -196,7 +246,10 @@ struct key {
union {
struct keyring_index_key index_key;
struct {
+ unsigned long hash;
+ unsigned long len_desc;
struct key_type *type; /* type of key */
+ struct key_tag *domain_tag; /* Domain of operation */
char *description;
};
};
@@ -212,7 +265,6 @@ struct key {
struct list_head name_link;
struct assoc_array keys;
};
- int reject_error;
};
/* This is set on a keyring to restrict the addition of a link to a key
@@ -243,10 +295,14 @@ extern struct key *key_alloc(struct key_type *type,
#define KEY_ALLOC_NOT_IN_QUOTA 0x0002 /* not in quota */
#define KEY_ALLOC_BUILT_IN 0x0004 /* Key is built into kernel */
#define KEY_ALLOC_BYPASS_RESTRICTION 0x0008 /* Override the check on restricted keyrings */
+#define KEY_ALLOC_UID_KEYRING 0x0010 /* allocating a user or user session keyring */
+#define KEY_ALLOC_SET_KEEP 0x0020 /* Set the KEEP flag on the key/keyring */
extern void key_revoke(struct key *key);
extern void key_invalidate(struct key *key);
extern void key_put(struct key *key);
+extern bool key_put_tag(struct key_tag *tag);
+extern void key_remove_domain(struct key_tag *domain_tag);
static inline struct key *__key_get(struct key *key)
{
@@ -264,31 +320,81 @@ static inline void key_ref_put(key_ref_t key_ref)
key_put(key_ref_to_ptr(key_ref));
}
-extern struct key *request_key(struct key_type *type,
- const char *description,
- const char *callout_info);
+extern struct key *request_key_tag(struct key_type *type,
+ const char *description,
+ struct key_tag *domain_tag,
+ const char *callout_info);
+
+extern struct key *request_key_rcu(struct key_type *type,
+ const char *description,
+ struct key_tag *domain_tag);
extern struct key *request_key_with_auxdata(struct key_type *type,
const char *description,
+ struct key_tag *domain_tag,
const void *callout_info,
size_t callout_len,
void *aux);
-extern struct key *request_key_async(struct key_type *type,
- const char *description,
- const void *callout_info,
- size_t callout_len);
+/**
+ * request_key - Request a key and wait for construction
+ * @type: Type of key.
+ * @description: The searchable description of the key.
+ * @callout_info: The data to pass to the instantiation upcall (or NULL).
+ *
+ * As for request_key_tag(), but with the default global domain tag.
+ */
+static inline struct key *request_key(struct key_type *type,
+ const char *description,
+ const char *callout_info)
+{
+ return request_key_tag(type, description, NULL, callout_info);
+}
+
+#ifdef CONFIG_NET
+/**
+ * request_key_net - Request a key for a net namespace and wait for construction
+ * @type: Type of key.
+ * @description: The searchable description of the key.
+ * @net: The network namespace that is the key's domain of operation.
+ * @callout_info: The data to pass to the instantiation upcall (or NULL).
+ *
+ * As for request_key() except that it does not add the returned key to a
+ * keyring if found, new keys are always allocated in the user's quota, the
+ * callout_info must be a NUL-terminated string and no auxiliary data can be
+ * passed. Only keys that operate the specified network namespace are used.
+ *
+ * Furthermore, it then works as wait_for_key_construction() to wait for the
+ * completion of keys undergoing construction with a non-interruptible wait.
+ */
+#define request_key_net(type, description, net, callout_info) \
+ request_key_tag(type, description, net->key_domain, callout_info)
-extern struct key *request_key_async_with_auxdata(struct key_type *type,
- const char *description,
- const void *callout_info,
- size_t callout_len,
- void *aux);
+/**
+ * request_key_net_rcu - Request a key for a net namespace under RCU conditions
+ * @type: Type of key.
+ * @description: The searchable description of the key.
+ * @net: The network namespace that is the key's domain of operation.
+ *
+ * As for request_key_rcu() except that only keys that operate the specified
+ * network namespace are used.
+ */
+#define request_key_net_rcu(type, description, net) \
+ request_key_rcu(type, description, net->key_domain)
+#endif /* CONFIG_NET */
extern int wait_for_key_construction(struct key *key, bool intr);
extern int key_validate(const struct key *key);
+extern key_ref_t key_create(key_ref_t keyring,
+ const char *type,
+ const char *description,
+ const void *payload,
+ size_t plen,
+ key_perm_t perm,
+ unsigned long flags);
+
extern key_ref_t key_create_or_update(key_ref_t keyring,
const char *type,
const char *description,
@@ -304,6 +410,11 @@ extern int key_update(key_ref_t key,
extern int key_link(struct key *keyring,
struct key *key);
+extern int key_move(struct key *key,
+ struct key *from_keyring,
+ struct key *to_keyring,
+ unsigned int flags);
+
extern int key_unlink(struct key *keyring,
struct key *key);
@@ -323,10 +434,8 @@ extern int keyring_clear(struct key *keyring);
extern key_ref_t keyring_search(key_ref_t keyring,
struct key_type *type,
- const char *description);
-
-extern int keyring_add_key(struct key *keyring,
- struct key *key);
+ const char *description,
+ bool recurse);
extern int keyring_restrict(key_ref_t keyring, const char *type,
const char *restriction);
@@ -340,28 +449,31 @@ static inline key_serial_t key_serial(const struct key *key)
extern void key_set_timeout(struct key *, unsigned);
-/*
- * The permissions required on a key that we're looking up.
- */
-#define KEY_NEED_VIEW 0x01 /* Require permission to view attributes */
-#define KEY_NEED_READ 0x02 /* Require permission to read content */
-#define KEY_NEED_WRITE 0x04 /* Require permission to update / modify */
-#define KEY_NEED_SEARCH 0x08 /* Require permission to search (keyring) or find (key) */
-#define KEY_NEED_LINK 0x10 /* Require permission to link */
-#define KEY_NEED_SETATTR 0x20 /* Require permission to change attributes */
-#define KEY_NEED_ALL 0x3f /* All the above permissions */
+extern key_ref_t lookup_user_key(key_serial_t id, unsigned long flags,
+ enum key_need_perm need_perm);
+extern void key_free_user_ns(struct user_namespace *);
+
+static inline short key_read_state(const struct key *key)
+{
+ /* Barrier versus mark_key_instantiated(). */
+ return smp_load_acquire(&key->state);
+}
/**
- * key_is_instantiated - Determine if a key has been positively instantiated
+ * key_is_positive - Determine if a key has been positively instantiated
* @key: The key to check.
*
* Return true if the specified key has been positively instantiated, false
* otherwise.
*/
-static inline bool key_is_instantiated(const struct key *key)
+static inline bool key_is_positive(const struct key *key)
{
- return test_bit(KEY_FLAG_INSTANTIATED, &key->flags) &&
- !test_bit(KEY_FLAG_NEGATIVE, &key->flags);
+ return key_read_state(key) == KEY_IS_POSITIVE;
+}
+
+static inline bool key_is_negative(const struct key *key)
+{
+ return key_read_state(key) < 0;
}
#define dereference_key_rcu(KEY) \
@@ -376,15 +488,12 @@ do { \
rcu_assign_pointer((KEY)->payload.rcu_data0, (PAYLOAD)); \
} while (0)
-#ifdef CONFIG_SYSCTL
-extern struct ctl_table key_sysctls[];
-#endif
/*
* the userspace interface
*/
extern int install_thread_keyring_to_cred(struct cred *cred);
-extern void key_fsuid_changed(struct task_struct *tsk);
-extern void key_fsgid_changed(struct task_struct *tsk);
+extern void key_fsuid_changed(struct cred *new_cred);
+extern void key_fsgid_changed(struct cred *new_cred);
extern void key_init(void);
#else /* CONFIG_KEYS */
@@ -399,9 +508,12 @@ extern void key_init(void);
#define make_key_ref(k, p) NULL
#define key_ref_to_ptr(k) NULL
#define is_key_possessed(k) 0
-#define key_fsuid_changed(t) do { } while(0)
-#define key_fsgid_changed(t) do { } while(0)
+#define key_fsuid_changed(c) do { } while(0)
+#define key_fsgid_changed(c) do { } while(0)
#define key_init() do { } while(0)
+#define key_free_user_ns(ns) do { } while(0)
+#define key_remove_domain(d) do { } while(0)
+#define key_lookup(k) NULL
#endif /* CONFIG_KEYS */
#endif /* __KERNEL__ */
diff --git a/include/linux/keyboard.h b/include/linux/keyboard.h
index 131ed5146521..73d11e4090cf 100644
--- a/include/linux/keyboard.h
+++ b/include/linux/keyboard.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __LINUX_KEYBOARD_H
#define __LINUX_KEYBOARD_H
diff --git a/include/linux/keyctl.h b/include/linux/keyctl.h
new file mode 100644
index 000000000000..5b79847207ef
--- /dev/null
+++ b/include/linux/keyctl.h
@@ -0,0 +1,42 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/* keyctl kernel bits
+ *
+ * Copyright (C) 2016 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ */
+
+#ifndef __LINUX_KEYCTL_H
+#define __LINUX_KEYCTL_H
+
+#include <uapi/linux/keyctl.h>
+
+struct kernel_pkey_query {
+ __u32 supported_ops; /* Which ops are supported */
+ __u32 key_size; /* Size of the key in bits */
+ __u16 max_data_size; /* Maximum size of raw data to sign in bytes */
+ __u16 max_sig_size; /* Maximum size of signature in bytes */
+ __u16 max_enc_size; /* Maximum size of encrypted blob in bytes */
+ __u16 max_dec_size; /* Maximum size of decrypted blob in bytes */
+};
+
+enum kernel_pkey_operation {
+ kernel_pkey_encrypt,
+ kernel_pkey_decrypt,
+ kernel_pkey_sign,
+ kernel_pkey_verify,
+};
+
+struct kernel_pkey_params {
+ struct key *key;
+ const char *encoding; /* Encoding (eg. "oaep" or "raw" for none) */
+ const char *hash_algo; /* Digest algorithm used (eg. "sha1") or NULL if N/A */
+ char *info; /* Modified info string to be released later */
+ __u32 in_len; /* Input data size */
+ union {
+ __u32 out_len; /* Output buffer size (enc/dec/sign) */
+ __u32 in2_len; /* 2nd input data size (verify) */
+ };
+ enum kernel_pkey_operation op : 8;
+};
+
+#endif /* __LINUX_KEYCTL_H */
diff --git a/include/linux/kfence.h b/include/linux/kfence.h
new file mode 100644
index 000000000000..0ad1ddbb8b99
--- /dev/null
+++ b/include/linux/kfence.h
@@ -0,0 +1,253 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Kernel Electric-Fence (KFENCE). Public interface for allocator and fault
+ * handler integration. For more info see Documentation/dev-tools/kfence.rst.
+ *
+ * Copyright (C) 2020, Google LLC.
+ */
+
+#ifndef _LINUX_KFENCE_H
+#define _LINUX_KFENCE_H
+
+#include <linux/mm.h>
+#include <linux/types.h>
+
+#ifdef CONFIG_KFENCE
+
+#include <linux/atomic.h>
+#include <linux/static_key.h>
+
+extern unsigned long kfence_sample_interval;
+
+/*
+ * We allocate an even number of pages, as it simplifies calculations to map
+ * address to metadata indices; effectively, the very first page serves as an
+ * extended guard page, but otherwise has no special purpose.
+ */
+#define KFENCE_POOL_SIZE ((CONFIG_KFENCE_NUM_OBJECTS + 1) * 2 * PAGE_SIZE)
+extern char *__kfence_pool;
+
+DECLARE_STATIC_KEY_FALSE(kfence_allocation_key);
+extern atomic_t kfence_allocation_gate;
+
+/**
+ * is_kfence_address() - check if an address belongs to KFENCE pool
+ * @addr: address to check
+ *
+ * Return: true or false depending on whether the address is within the KFENCE
+ * object range.
+ *
+ * KFENCE objects live in a separate page range and are not to be intermixed
+ * with regular heap objects (e.g. KFENCE objects must never be added to the
+ * allocator freelists). Failing to do so may and will result in heap
+ * corruptions, therefore is_kfence_address() must be used to check whether
+ * an object requires specific handling.
+ *
+ * Note: This function may be used in fast-paths, and is performance critical.
+ * Future changes should take this into account; for instance, we want to avoid
+ * introducing another load and therefore need to keep KFENCE_POOL_SIZE a
+ * constant (until immediate patching support is added to the kernel).
+ */
+static __always_inline bool is_kfence_address(const void *addr)
+{
+ /*
+ * The __kfence_pool != NULL check is required to deal with the case
+ * where __kfence_pool == NULL && addr < KFENCE_POOL_SIZE. Keep it in
+ * the slow-path after the range-check!
+ */
+ return unlikely((unsigned long)((char *)addr - __kfence_pool) < KFENCE_POOL_SIZE && __kfence_pool);
+}
+
+/**
+ * kfence_alloc_pool_and_metadata() - allocate the KFENCE pool and KFENCE
+ * metadata via memblock
+ */
+void __init kfence_alloc_pool_and_metadata(void);
+
+/**
+ * kfence_init() - perform KFENCE initialization at boot time
+ *
+ * Requires that kfence_alloc_pool_and_metadata() was called before. This sets
+ * up the allocation gate timer, and requires that workqueues are available.
+ */
+void __init kfence_init(void);
+
+/**
+ * kfence_shutdown_cache() - handle shutdown_cache() for KFENCE objects
+ * @s: cache being shut down
+ *
+ * Before shutting down a cache, one must ensure there are no remaining objects
+ * allocated from it. Because KFENCE objects are not referenced from the cache
+ * directly, we need to check them here.
+ *
+ * Note that shutdown_cache() is internal to SL*B, and kmem_cache_destroy() does
+ * not return if allocated objects still exist: it prints an error message and
+ * simply aborts destruction of a cache, leaking memory.
+ *
+ * If the only such objects are KFENCE objects, we will not leak the entire
+ * cache, but instead try to provide more useful debug info by making allocated
+ * objects "zombie allocations". Objects may then still be used or freed (which
+ * is handled gracefully), but usage will result in showing KFENCE error reports
+ * which include stack traces to the user of the object, the original allocation
+ * site, and caller to shutdown_cache().
+ */
+void kfence_shutdown_cache(struct kmem_cache *s);
+
+/*
+ * Allocate a KFENCE object. Allocators must not call this function directly,
+ * use kfence_alloc() instead.
+ */
+void *__kfence_alloc(struct kmem_cache *s, size_t size, gfp_t flags);
+
+/**
+ * kfence_alloc() - allocate a KFENCE object with a low probability
+ * @s: struct kmem_cache with object requirements
+ * @size: exact size of the object to allocate (can be less than @s->size
+ * e.g. for kmalloc caches)
+ * @flags: GFP flags
+ *
+ * Return:
+ * * NULL - must proceed with allocating as usual,
+ * * non-NULL - pointer to a KFENCE object.
+ *
+ * kfence_alloc() should be inserted into the heap allocation fast path,
+ * allowing it to transparently return KFENCE-allocated objects with a low
+ * probability using a static branch (the probability is controlled by the
+ * kfence.sample_interval boot parameter).
+ */
+static __always_inline void *kfence_alloc(struct kmem_cache *s, size_t size, gfp_t flags)
+{
+#if defined(CONFIG_KFENCE_STATIC_KEYS) || CONFIG_KFENCE_SAMPLE_INTERVAL == 0
+ if (!static_branch_unlikely(&kfence_allocation_key))
+ return NULL;
+#else
+ if (!static_branch_likely(&kfence_allocation_key))
+ return NULL;
+#endif
+ if (likely(atomic_read(&kfence_allocation_gate) > 0))
+ return NULL;
+ return __kfence_alloc(s, size, flags);
+}
+
+/**
+ * kfence_ksize() - get actual amount of memory allocated for a KFENCE object
+ * @addr: pointer to a heap object
+ *
+ * Return:
+ * * 0 - not a KFENCE object, must call __ksize() instead,
+ * * non-0 - this many bytes can be accessed without causing a memory error.
+ *
+ * kfence_ksize() returns the number of bytes requested for a KFENCE object at
+ * allocation time. This number may be less than the object size of the
+ * corresponding struct kmem_cache.
+ */
+size_t kfence_ksize(const void *addr);
+
+/**
+ * kfence_object_start() - find the beginning of a KFENCE object
+ * @addr: address within a KFENCE-allocated object
+ *
+ * Return: address of the beginning of the object.
+ *
+ * SL[AU]B-allocated objects are laid out within a page one by one, so it is
+ * easy to calculate the beginning of an object given a pointer inside it and
+ * the object size. The same is not true for KFENCE, which places a single
+ * object at either end of the page. This helper function is used to find the
+ * beginning of a KFENCE-allocated object.
+ */
+void *kfence_object_start(const void *addr);
+
+/**
+ * __kfence_free() - release a KFENCE heap object to KFENCE pool
+ * @addr: object to be freed
+ *
+ * Requires: is_kfence_address(addr)
+ *
+ * Release a KFENCE object and mark it as freed.
+ */
+void __kfence_free(void *addr);
+
+/**
+ * kfence_free() - try to release an arbitrary heap object to KFENCE pool
+ * @addr: object to be freed
+ *
+ * Return:
+ * * false - object doesn't belong to KFENCE pool and was ignored,
+ * * true - object was released to KFENCE pool.
+ *
+ * Release a KFENCE object and mark it as freed. May be called on any object,
+ * even non-KFENCE objects, to simplify integration of the hooks into the
+ * allocator's free codepath. The allocator must check the return value to
+ * determine if it was a KFENCE object or not.
+ */
+static __always_inline __must_check bool kfence_free(void *addr)
+{
+ if (!is_kfence_address(addr))
+ return false;
+ __kfence_free(addr);
+ return true;
+}
+
+/**
+ * kfence_handle_page_fault() - perform page fault handling for KFENCE pages
+ * @addr: faulting address
+ * @is_write: is access a write
+ * @regs: current struct pt_regs (can be NULL, but shows full stack trace)
+ *
+ * Return:
+ * * false - address outside KFENCE pool,
+ * * true - page fault handled by KFENCE, no additional handling required.
+ *
+ * A page fault inside KFENCE pool indicates a memory error, such as an
+ * out-of-bounds access, a use-after-free or an invalid memory access. In these
+ * cases KFENCE prints an error message and marks the offending page as
+ * present, so that the kernel can proceed.
+ */
+bool __must_check kfence_handle_page_fault(unsigned long addr, bool is_write, struct pt_regs *regs);
+
+#ifdef CONFIG_PRINTK
+struct kmem_obj_info;
+/**
+ * __kfence_obj_info() - fill kmem_obj_info struct
+ * @kpp: kmem_obj_info to be filled
+ * @object: the object
+ *
+ * Return:
+ * * false - not a KFENCE object
+ * * true - a KFENCE object, filled @kpp
+ *
+ * Copies information to @kpp for KFENCE objects.
+ */
+bool __kfence_obj_info(struct kmem_obj_info *kpp, void *object, struct slab *slab);
+#endif
+
+#else /* CONFIG_KFENCE */
+
+#define kfence_sample_interval (0)
+
+static inline bool is_kfence_address(const void *addr) { return false; }
+static inline void kfence_alloc_pool_and_metadata(void) { }
+static inline void kfence_init(void) { }
+static inline void kfence_shutdown_cache(struct kmem_cache *s) { }
+static inline void *kfence_alloc(struct kmem_cache *s, size_t size, gfp_t flags) { return NULL; }
+static inline size_t kfence_ksize(const void *addr) { return 0; }
+static inline void *kfence_object_start(const void *addr) { return NULL; }
+static inline void __kfence_free(void *addr) { }
+static inline bool __must_check kfence_free(void *addr) { return false; }
+static inline bool __must_check kfence_handle_page_fault(unsigned long addr, bool is_write,
+ struct pt_regs *regs)
+{
+ return false;
+}
+
+#ifdef CONFIG_PRINTK
+struct kmem_obj_info;
+static inline bool __kfence_obj_info(struct kmem_obj_info *kpp, void *object, struct slab *slab)
+{
+ return false;
+}
+#endif
+
+#endif
+
+#endif /* _LINUX_KFENCE_H */
diff --git a/include/linux/kfifo.h b/include/linux/kfifo.h
index 41eb6fdf87a8..8b81ac74829c 100644
--- a/include/linux/kfifo.h
+++ b/include/linux/kfifo.h
@@ -1,22 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* A generic kernel FIFO implementation
*
* Copyright (C) 2013 Stefani Seibold <stefani@seibold.net>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
*/
#ifndef _LINUX_KFIFO_H
@@ -41,19 +27,24 @@
*/
/*
- * Note about locking : There is no locking required until only * one reader
- * and one writer is using the fifo and no kfifo_reset() will be * called
- * kfifo_reset_out() can be safely used, until it will be only called
+ * Note about locking: There is no locking required until only one reader
+ * and one writer is using the fifo and no kfifo_reset() will be called.
+ * kfifo_reset_out() can be safely used, until it will be only called
* in the reader thread.
- * For multiple writer and one reader there is only a need to lock the writer.
+ * For multiple writer and one reader there is only a need to lock the writer.
* And vice versa for only one writer and multiple reader there is only a need
* to lock the reader.
*/
-#include <linux/kernel.h>
+#include <linux/array_size.h>
#include <linux/spinlock.h>
#include <linux/stddef.h>
-#include <linux/scatterlist.h>
+#include <linux/types.h>
+
+#include <asm/barrier.h>
+#include <asm/errno.h>
+
+struct scatterlist;
struct __kfifo {
unsigned int in;
@@ -113,7 +104,8 @@ struct kfifo_rec_ptr_2 __STRUCT_KFIFO_PTR(unsigned char, 2, void);
* array is a part of the structure and the fifo type where the array is
* outside of the fifo structure.
*/
-#define __is_kfifo_ptr(fifo) (sizeof(*fifo) == sizeof(struct __kfifo))
+#define __is_kfifo_ptr(fifo) \
+ (sizeof(*fifo) == sizeof(STRUCT_KFIFO_PTR(typeof(*(fifo)->type))))
/**
* DECLARE_KFIFO_PTR - macro to declare a fifo pointer object
@@ -260,6 +252,37 @@ __kfifo_int_must_check_helper(int val)
})
/**
+ * kfifo_is_empty_spinlocked - returns true if the fifo is empty using
+ * a spinlock for locking
+ * @fifo: address of the fifo to be used
+ * @lock: spinlock to be used for locking
+ */
+#define kfifo_is_empty_spinlocked(fifo, lock) \
+({ \
+ unsigned long __flags; \
+ bool __ret; \
+ spin_lock_irqsave(lock, __flags); \
+ __ret = kfifo_is_empty(fifo); \
+ spin_unlock_irqrestore(lock, __flags); \
+ __ret; \
+})
+
+/**
+ * kfifo_is_empty_spinlocked_noirqsave - returns true if the fifo is empty
+ * using a spinlock for locking, doesn't disable interrupts
+ * @fifo: address of the fifo to be used
+ * @lock: spinlock to be used for locking
+ */
+#define kfifo_is_empty_spinlocked_noirqsave(fifo, lock) \
+({ \
+ bool __ret; \
+ spin_lock(lock); \
+ __ret = kfifo_is_empty(fifo); \
+ spin_unlock(lock); \
+ __ret; \
+})
+
+/**
* kfifo_is_full - returns true if the fifo is full
* @fifo: address of the fifo to be used
*/
@@ -286,19 +309,25 @@ __kfifo_uint_must_check_helper( \
)
/**
- * kfifo_skip - skip output data
+ * kfifo_skip_count - skip output data
* @fifo: address of the fifo to be used
+ * @count: count of data to skip
*/
-#define kfifo_skip(fifo) \
-(void)({ \
+#define kfifo_skip_count(fifo, count) do { \
typeof((fifo) + 1) __tmp = (fifo); \
const size_t __recsize = sizeof(*__tmp->rectype); \
struct __kfifo *__kfifo = &__tmp->kfifo; \
if (__recsize) \
__kfifo_skip_r(__kfifo, __recsize); \
else \
- __kfifo->out++; \
-})
+ __kfifo->out += (count); \
+} while(0)
+
+/**
+ * kfifo_skip - skip output data
+ * @fifo: address of the fifo to be used
+ */
+#define kfifo_skip(fifo) kfifo_skip_count(fifo, 1)
/**
* kfifo_peek_len - gets the size of the next fifo record
@@ -325,7 +354,7 @@ __kfifo_uint_must_check_helper( \
*
* This macro dynamically allocates a new fifo buffer.
*
- * The numer of elements will be rounded-up to a power of 2.
+ * The number of elements will be rounded-up to a power of 2.
* The fifo will be release with kfifo_free().
* Return 0 if no error, otherwise an error code.
*/
@@ -341,6 +370,30 @@ __kfifo_int_must_check_helper( \
)
/**
+ * kfifo_alloc_node - dynamically allocates a new fifo buffer on a NUMA node
+ * @fifo: pointer to the fifo
+ * @size: the number of elements in the fifo, this must be a power of 2
+ * @gfp_mask: get_free_pages mask, passed to kmalloc()
+ * @node: NUMA node to allocate memory on
+ *
+ * This macro dynamically allocates a new fifo buffer with NUMA node awareness.
+ *
+ * The number of elements will be rounded-up to a power of 2.
+ * The fifo will be release with kfifo_free().
+ * Return 0 if no error, otherwise an error code.
+ */
+#define kfifo_alloc_node(fifo, size, gfp_mask, node) \
+__kfifo_int_must_check_helper( \
+({ \
+ typeof((fifo) + 1) __tmp = (fifo); \
+ struct __kfifo *__kfifo = &__tmp->kfifo; \
+ __is_kfifo_ptr(__tmp) ? \
+ __kfifo_alloc_node(__kfifo, size, sizeof(*__tmp->type), gfp_mask, node) : \
+ -EINVAL; \
+}) \
+)
+
+/**
* kfifo_free - frees the fifo
* @fifo: the fifo to be freed
*/
@@ -358,9 +411,9 @@ __kfifo_int_must_check_helper( \
* @buffer: the preallocated buffer to be used
* @size: the size of the internal buffer, this have to be a power of 2
*
- * This macro initialize a fifo using a preallocated buffer.
+ * This macro initializes a fifo using a preallocated buffer.
*
- * The numer of elements will be rounded-up to a power of 2.
+ * The number of elements will be rounded-up to a power of 2.
* Return 0 if no error, otherwise an error code.
*/
#define kfifo_init(fifo, buffer, size) \
@@ -530,6 +583,26 @@ __kfifo_uint_must_check_helper( \
__ret; \
})
+/**
+ * kfifo_in_spinlocked_noirqsave - put data into fifo using a spinlock for
+ * locking, don't disable interrupts
+ * @fifo: address of the fifo to be used
+ * @buf: the data to be added
+ * @n: number of elements to be added
+ * @lock: pointer to the spinlock to use for locking
+ *
+ * This is a variant of kfifo_in_spinlocked() but uses spin_lock/unlock()
+ * for locking and doesn't disable interrupts.
+ */
+#define kfifo_in_spinlocked_noirqsave(fifo, buf, n, lock) \
+({ \
+ unsigned int __ret; \
+ spin_lock(lock); \
+ __ret = kfifo_in(fifo, buf, n); \
+ spin_unlock(lock); \
+ __ret; \
+})
+
/* alias for kfifo_in_spinlocked, will be removed in a future release */
#define kfifo_in_locked(fifo, buf, n, lock) \
kfifo_in_spinlocked(fifo, buf, n, lock)
@@ -540,7 +613,7 @@ __kfifo_uint_must_check_helper( \
* @buf: pointer to the storage buffer
* @n: max. number of elements to get
*
- * This macro get some data from the fifo and return the numbers of elements
+ * This macro gets some data from the fifo and returns the numbers of elements
* copied.
*
* Note that with only one concurrent reader and one concurrent
@@ -567,7 +640,7 @@ __kfifo_uint_must_check_helper( \
* @n: max. number of elements to get
* @lock: pointer to the spinlock to use for locking
*
- * This macro get the data from the fifo and return the numbers of elements
+ * This macro gets the data from the fifo and returns the numbers of elements
* copied.
*/
#define kfifo_out_spinlocked(fifo, buf, n, lock) \
@@ -582,6 +655,28 @@ __kfifo_uint_must_check_helper( \
}) \
)
+/**
+ * kfifo_out_spinlocked_noirqsave - get data from the fifo using a spinlock
+ * for locking, don't disable interrupts
+ * @fifo: address of the fifo to be used
+ * @buf: pointer to the storage buffer
+ * @n: max. number of elements to get
+ * @lock: pointer to the spinlock to use for locking
+ *
+ * This is a variant of kfifo_out_spinlocked() which uses spin_lock/unlock()
+ * for locking and doesn't disable interrupts.
+ */
+#define kfifo_out_spinlocked_noirqsave(fifo, buf, n, lock) \
+__kfifo_uint_must_check_helper( \
+({ \
+ unsigned int __ret; \
+ spin_lock(lock); \
+ __ret = kfifo_out(fifo, buf, n); \
+ spin_unlock(lock); \
+ __ret; \
+}) \
+)
+
/* alias for kfifo_out_spinlocked, will be removed in a future release */
#define kfifo_out_locked(fifo, buf, n, lock) \
kfifo_out_spinlocked(fifo, buf, n, lock)
@@ -628,7 +723,7 @@ __kfifo_uint_must_check_helper( \
* writer, you don't need extra locking to use these macro.
*/
#define kfifo_to_user(fifo, to, len, copied) \
-__kfifo_uint_must_check_helper( \
+__kfifo_int_must_check_helper( \
({ \
typeof((fifo) + 1) __tmp = (fifo); \
void __user *__to = (to); \
@@ -643,11 +738,12 @@ __kfifo_uint_must_check_helper( \
)
/**
- * kfifo_dma_in_prepare - setup a scatterlist for DMA input
+ * kfifo_dma_in_prepare_mapped - setup a scatterlist for DMA input
* @fifo: address of the fifo to be used
* @sgl: pointer to the scatterlist array
* @nents: number of entries in the scatterlist array
* @len: number of elements to transfer
+ * @dma: mapped dma address to fill into @sgl
*
* This macro fills a scatterlist for DMA input.
* It returns the number entries in the scatterlist array.
@@ -655,7 +751,7 @@ __kfifo_uint_must_check_helper( \
* Note that with only one concurrent reader and one concurrent
* writer, you don't need extra locking to use these macros.
*/
-#define kfifo_dma_in_prepare(fifo, sgl, nents, len) \
+#define kfifo_dma_in_prepare_mapped(fifo, sgl, nents, len, dma) \
({ \
typeof((fifo) + 1) __tmp = (fifo); \
struct scatterlist *__sgl = (sgl); \
@@ -664,16 +760,20 @@ __kfifo_uint_must_check_helper( \
const size_t __recsize = sizeof(*__tmp->rectype); \
struct __kfifo *__kfifo = &__tmp->kfifo; \
(__recsize) ? \
- __kfifo_dma_in_prepare_r(__kfifo, __sgl, __nents, __len, __recsize) : \
- __kfifo_dma_in_prepare(__kfifo, __sgl, __nents, __len); \
+ __kfifo_dma_in_prepare_r(__kfifo, __sgl, __nents, __len, __recsize, \
+ dma) : \
+ __kfifo_dma_in_prepare(__kfifo, __sgl, __nents, __len, dma); \
})
+#define kfifo_dma_in_prepare(fifo, sgl, nents, len) \
+ kfifo_dma_in_prepare_mapped(fifo, sgl, nents, len, DMA_MAPPING_ERROR)
+
/**
* kfifo_dma_in_finish - finish a DMA IN operation
* @fifo: address of the fifo to be used
* @len: number of bytes to received
*
- * This macro finish a DMA IN operation. The in counter will be updated by
+ * This macro finishes a DMA IN operation. The in counter will be updated by
* the len parameter. No error checking will be done.
*
* Note that with only one concurrent reader and one concurrent
@@ -692,11 +792,12 @@ __kfifo_uint_must_check_helper( \
})
/**
- * kfifo_dma_out_prepare - setup a scatterlist for DMA output
+ * kfifo_dma_out_prepare_mapped - setup a scatterlist for DMA output
* @fifo: address of the fifo to be used
* @sgl: pointer to the scatterlist array
* @nents: number of entries in the scatterlist array
* @len: number of elements to transfer
+ * @dma: mapped dma address to fill into @sgl
*
* This macro fills a scatterlist for DMA output which at most @len bytes
* to transfer.
@@ -706,7 +807,7 @@ __kfifo_uint_must_check_helper( \
* Note that with only one concurrent reader and one concurrent
* writer, you don't need extra locking to use these macros.
*/
-#define kfifo_dma_out_prepare(fifo, sgl, nents, len) \
+#define kfifo_dma_out_prepare_mapped(fifo, sgl, nents, len, dma) \
({ \
typeof((fifo) + 1) __tmp = (fifo); \
struct scatterlist *__sgl = (sgl); \
@@ -715,32 +816,29 @@ __kfifo_uint_must_check_helper( \
const size_t __recsize = sizeof(*__tmp->rectype); \
struct __kfifo *__kfifo = &__tmp->kfifo; \
(__recsize) ? \
- __kfifo_dma_out_prepare_r(__kfifo, __sgl, __nents, __len, __recsize) : \
- __kfifo_dma_out_prepare(__kfifo, __sgl, __nents, __len); \
+ __kfifo_dma_out_prepare_r(__kfifo, __sgl, __nents, __len, __recsize, \
+ dma) : \
+ __kfifo_dma_out_prepare(__kfifo, __sgl, __nents, __len, dma); \
})
+#define kfifo_dma_out_prepare(fifo, sgl, nents, len) \
+ kfifo_dma_out_prepare_mapped(fifo, sgl, nents, len, DMA_MAPPING_ERROR)
+
/**
* kfifo_dma_out_finish - finish a DMA OUT operation
* @fifo: address of the fifo to be used
* @len: number of bytes transferred
*
- * This macro finish a DMA OUT operation. The out counter will be updated by
+ * This macro finishes a DMA OUT operation. The out counter will be updated by
* the len parameter. No error checking will be done.
*
* Note that with only one concurrent reader and one concurrent
* writer, you don't need extra locking to use these macros.
*/
-#define kfifo_dma_out_finish(fifo, len) \
-(void)({ \
- typeof((fifo) + 1) __tmp = (fifo); \
- unsigned int __len = (len); \
- const size_t __recsize = sizeof(*__tmp->rectype); \
- struct __kfifo *__kfifo = &__tmp->kfifo; \
- if (__recsize) \
- __kfifo_dma_out_finish_r(__kfifo, __recsize); \
- else \
- __kfifo->out += __len / sizeof(*__tmp->type); \
-})
+#define kfifo_dma_out_finish(fifo, len) do { \
+ typeof((fifo) + 1) ___tmp = (fifo); \
+ kfifo_skip_count(___tmp, (len) / sizeof(*___tmp->type)); \
+} while (0)
/**
* kfifo_out_peek - gets some data from the fifo
@@ -748,7 +846,7 @@ __kfifo_uint_must_check_helper( \
* @buf: pointer to the storage buffer
* @n: max. number of elements to get
*
- * This macro get the data from the fifo and return the numbers of elements
+ * This macro gets the data from the fifo and returns the numbers of elements
* copied. The data is not removed from the fifo.
*
* Note that with only one concurrent reader and one concurrent
@@ -768,8 +866,71 @@ __kfifo_uint_must_check_helper( \
}) \
)
-extern int __kfifo_alloc(struct __kfifo *fifo, unsigned int size,
- size_t esize, gfp_t gfp_mask);
+/**
+ * kfifo_out_linear - gets a tail of/offset to available data
+ * @fifo: address of the fifo to be used
+ * @tail: pointer to an unsigned int to store the value of tail
+ * @n: max. number of elements to point at
+ *
+ * This macro obtains the offset (tail) to the available data in the fifo
+ * buffer and returns the
+ * numbers of elements available. It returns the available count till the end
+ * of data or till the end of the buffer. So that it can be used for linear
+ * data processing (like memcpy() of (@fifo->data + @tail) with count
+ * returned).
+ *
+ * Note that with only one concurrent reader and one concurrent
+ * writer, you don't need extra locking to use these macro.
+ */
+#define kfifo_out_linear(fifo, tail, n) \
+__kfifo_uint_must_check_helper( \
+({ \
+ typeof((fifo) + 1) __tmp = (fifo); \
+ unsigned int *__tail = (tail); \
+ unsigned long __n = (n); \
+ const size_t __recsize = sizeof(*__tmp->rectype); \
+ struct __kfifo *__kfifo = &__tmp->kfifo; \
+ (__recsize) ? \
+ __kfifo_out_linear_r(__kfifo, __tail, __n, __recsize) : \
+ __kfifo_out_linear(__kfifo, __tail, __n); \
+}) \
+)
+
+/**
+ * kfifo_out_linear_ptr - gets a pointer to the available data
+ * @fifo: address of the fifo to be used
+ * @ptr: pointer to data to store the pointer to tail
+ * @n: max. number of elements to point at
+ *
+ * Similarly to kfifo_out_linear(), this macro obtains the pointer to the
+ * available data in the fifo buffer and returns the numbers of elements
+ * available. It returns the available count till the end of available data or
+ * till the end of the buffer. So that it can be used for linear data
+ * processing (like memcpy() of @ptr with count returned).
+ *
+ * Note that with only one concurrent reader and one concurrent
+ * writer, you don't need extra locking to use these macro.
+ */
+#define kfifo_out_linear_ptr(fifo, ptr, n) \
+__kfifo_uint_must_check_helper( \
+({ \
+ typeof((fifo) + 1) ___tmp = (fifo); \
+ unsigned int ___tail; \
+ unsigned int ___n = kfifo_out_linear(___tmp, &___tail, (n)); \
+ *(ptr) = ___tmp->kfifo.data + ___tail * kfifo_esize(___tmp); \
+ ___n; \
+}) \
+)
+
+
+extern int __kfifo_alloc_node(struct __kfifo *fifo, unsigned int size,
+ size_t esize, gfp_t gfp_mask, int node);
+
+static inline int __kfifo_alloc(struct __kfifo *fifo, unsigned int size,
+ size_t esize, gfp_t gfp_mask)
+{
+ return __kfifo_alloc_node(fifo, size, esize, gfp_mask, NUMA_NO_NODE);
+}
extern void __kfifo_free(struct __kfifo *fifo);
@@ -789,14 +950,17 @@ extern int __kfifo_to_user(struct __kfifo *fifo,
void __user *to, unsigned long len, unsigned int *copied);
extern unsigned int __kfifo_dma_in_prepare(struct __kfifo *fifo,
- struct scatterlist *sgl, int nents, unsigned int len);
+ struct scatterlist *sgl, int nents, unsigned int len, dma_addr_t dma);
extern unsigned int __kfifo_dma_out_prepare(struct __kfifo *fifo,
- struct scatterlist *sgl, int nents, unsigned int len);
+ struct scatterlist *sgl, int nents, unsigned int len, dma_addr_t dma);
extern unsigned int __kfifo_out_peek(struct __kfifo *fifo,
void *buf, unsigned int len);
+extern unsigned int __kfifo_out_linear(struct __kfifo *fifo,
+ unsigned int *tail, unsigned int n);
+
extern unsigned int __kfifo_in_r(struct __kfifo *fifo,
const void *buf, unsigned int len, size_t recsize);
@@ -811,15 +975,15 @@ extern int __kfifo_to_user_r(struct __kfifo *fifo, void __user *to,
unsigned long len, unsigned int *copied, size_t recsize);
extern unsigned int __kfifo_dma_in_prepare_r(struct __kfifo *fifo,
- struct scatterlist *sgl, int nents, unsigned int len, size_t recsize);
+ struct scatterlist *sgl, int nents, unsigned int len, size_t recsize,
+ dma_addr_t dma);
extern void __kfifo_dma_in_finish_r(struct __kfifo *fifo,
unsigned int len, size_t recsize);
extern unsigned int __kfifo_dma_out_prepare_r(struct __kfifo *fifo,
- struct scatterlist *sgl, int nents, unsigned int len, size_t recsize);
-
-extern void __kfifo_dma_out_finish_r(struct __kfifo *fifo, size_t recsize);
+ struct scatterlist *sgl, int nents, unsigned int len, size_t recsize,
+ dma_addr_t dma);
extern unsigned int __kfifo_len_r(struct __kfifo *fifo, size_t recsize);
@@ -828,6 +992,9 @@ extern void __kfifo_skip_r(struct __kfifo *fifo, size_t recsize);
extern unsigned int __kfifo_out_peek_r(struct __kfifo *fifo,
void *buf, unsigned int len, size_t recsize);
+extern unsigned int __kfifo_out_linear_r(struct __kfifo *fifo,
+ unsigned int *tail, unsigned int n, size_t recsize);
+
extern unsigned int __kfifo_max_r(unsigned int len, size_t recsize);
#endif
diff --git a/include/linux/kgdb.h b/include/linux/kgdb.h
index e465bb15912d..5eebbe7a3545 100644
--- a/include/linux/kgdb.h
+++ b/include/linux/kgdb.h
@@ -16,6 +16,7 @@
#include <linux/linkage.h>
#include <linux/init.h>
#include <linux/atomic.h>
+#include <linux/kprobes.h>
#ifdef CONFIG_HAVE_ARCH_KGDB
#include <asm/kgdb.h>
#endif
@@ -104,9 +105,9 @@ extern int dbg_set_reg(int regno, void *mem, struct pt_regs *regs);
*/
/**
- * kgdb_arch_init - Perform any architecture specific initalization.
+ * kgdb_arch_init - Perform any architecture specific initialization.
*
- * This function will handle the initalization of any architecture
+ * This function will handle the initialization of any architecture
* specific callbacks.
*/
extern int kgdb_arch_init(void);
@@ -177,22 +178,39 @@ kgdb_arch_handle_exception(int vector, int signo, int err_code,
struct pt_regs *regs);
/**
+ * kgdb_arch_handle_qxfer_pkt - Handle architecture specific GDB XML
+ * packets.
+ * @remcom_in_buffer: The buffer of the packet we have read.
+ * @remcom_out_buffer: The buffer of %BUFMAX bytes to write a packet into.
+ */
+
+extern void
+kgdb_arch_handle_qxfer_pkt(char *remcom_in_buffer,
+ char *remcom_out_buffer);
+
+/**
+ * kgdb_call_nmi_hook - Call kgdb_nmicallback() on the current CPU
+ * @ignored: This parameter is only here to match the prototype.
+ *
+ * If you're using the default implementation of kgdb_roundup_cpus()
+ * this function will be called per CPU. If you don't implement
+ * kgdb_call_nmi_hook() a default will be used.
+ */
+
+extern void kgdb_call_nmi_hook(void *ignored);
+
+/**
* kgdb_roundup_cpus - Get other CPUs into a holding pattern
- * @flags: Current IRQ state
*
* On SMP systems, we need to get the attention of the other CPUs
* and get them into a known state. This should do what is needed
* to get the other CPUs to call kgdb_wait(). Note that on some arches,
- * the NMI approach is not used for rounding up all the CPUs. For example,
- * in case of MIPS, smp_call_function() is used to roundup CPUs. In
- * this case, we have to make sure that interrupts are enabled before
- * calling smp_call_function(). The argument to this function is
- * the flags that will be used when restoring the interrupts. There is
- * local_irq_save() call before kgdb_roundup_cpus().
+ * the NMI approach is not used for rounding up all the CPUs. Normally
+ * those architectures can just not implement this and get the default.
*
* On non-SMP systems, this is not called.
*/
-extern void kgdb_roundup_cpus(unsigned long flags);
+extern void kgdb_roundup_cpus(void);
/**
* kgdb_arch_set_pc - Generic call back to the program counter
@@ -211,9 +229,9 @@ extern int kgdb_arch_set_breakpoint(struct kgdb_bkpt *bpt);
extern int kgdb_arch_remove_breakpoint(struct kgdb_bkpt *bpt);
/**
- * kgdb_arch_late - Perform any architecture specific initalization.
+ * kgdb_arch_late - Perform any architecture specific initialization.
*
- * This function will handle the late initalization of any
+ * This function will handle the late initialization of any
* architecture specific callbacks. This is an optional function for
* handling things like late initialization of hw breakpoints. The
* default implementation does nothing.
@@ -239,7 +257,6 @@ extern void kgdb_arch_late(void);
* hardware breakpoints.
* @correct_hw_break: Allow an architecture to specify how to correct the
* hardware debug registers.
- * @enable_nmi: Manage NMI-triggered entry to KGDB
*/
struct kgdb_arch {
unsigned char gdb_bpt_instr[BREAK_INSTR_SIZE];
@@ -252,8 +269,6 @@ struct kgdb_arch {
void (*disable_hw_break)(struct pt_regs *regs);
void (*remove_all_hw_break)(void);
void (*correct_hw_break)(void);
-
- void (*enable_nmi)(bool on);
};
/**
@@ -263,12 +278,14 @@ struct kgdb_arch {
* @write_char: Pointer to a function that will write one char.
* @flush: Pointer to a function that will flush any pending writes.
* @init: Pointer to a function that will initialize the device.
+ * @deinit: Pointer to a function that will deinit the device. Implies that
+ * this I/O driver is temporary and expects to be replaced. Called when
+ * an I/O driver is replaced or explicitly unregistered.
* @pre_exception: Pointer to a function that will do any prep work for
* the I/O driver.
* @post_exception: Pointer to a function that will do any cleanup work
* for the I/O driver.
- * @is_console: 1 if the end device is a console 0 if the I/O device is
- * not a console
+ * @cons: valid if the I/O device is a console; else NULL.
*/
struct kgdb_io {
const char *name;
@@ -276,25 +293,16 @@ struct kgdb_io {
void (*write_char) (u8);
void (*flush) (void);
int (*init) (void);
+ void (*deinit) (void);
void (*pre_exception) (void);
void (*post_exception) (void);
- int is_console;
+ struct console *cons;
};
-extern struct kgdb_arch arch_kgdb_ops;
+extern const struct kgdb_arch arch_kgdb_ops;
extern unsigned long kgdb_arch_pc(int exception, struct pt_regs *regs);
-#ifdef CONFIG_SERIAL_KGDB_NMI
-extern int kgdb_register_nmi_console(void);
-extern int kgdb_unregister_nmi_console(void);
-extern bool kgdb_nmi_poll_knock(void);
-#else
-static inline int kgdb_register_nmi_console(void) { return 0; }
-static inline int kgdb_unregister_nmi_console(void) { return 0; }
-static inline bool kgdb_nmi_poll_knock(void) { return 1; }
-#endif
-
extern int kgdb_register_io_module(struct kgdb_io *local_kgdb_io_ops);
extern void kgdb_unregister_io_module(struct kgdb_io *local_kgdb_io_ops);
extern struct kgdb_io *dbg_io_ops;
@@ -304,7 +312,7 @@ extern char *kgdb_mem2hex(char *mem, char *buf, int count);
extern int kgdb_hex2mem(char *buf, char *mem, int count);
extern int kgdb_isremovedbreak(unsigned long addr);
-extern void kgdb_schedule_breakpoint(void);
+extern int kgdb_has_hit_break(unsigned long addr);
extern int
kgdb_handle_exception(int ex_vector, int signo, int err_code,
@@ -314,14 +322,36 @@ extern int kgdb_nmicallin(int cpu, int trapnr, void *regs, int err_code,
atomic_t *snd_rdy);
extern void gdbstub_exit(int status);
+/*
+ * kgdb and kprobes both use the same (kprobe) blocklist (which makes sense
+ * given they are both typically hooked up to the same trap meaning on most
+ * architectures one cannot be used to debug the other)
+ *
+ * However on architectures where kprobes is not (yet) implemented we permit
+ * breakpoints everywhere rather than blocking everything by default.
+ */
+static inline bool kgdb_within_blocklist(unsigned long addr)
+{
+#ifdef CONFIG_KGDB_HONOUR_BLOCKLIST
+ return within_kprobe_blacklist(addr);
+#else
+ return false;
+#endif
+}
+
extern int kgdb_single_step;
extern atomic_t kgdb_active;
#define in_dbg_master() \
- (raw_smp_processor_id() == atomic_read(&kgdb_active))
+ (irqs_disabled() && (smp_processor_id() == atomic_read(&kgdb_active)))
extern bool dbg_is_early;
extern void __init dbg_late_init(void);
+extern void kgdb_panic(const char *msg);
+extern void kgdb_free_init_mem(void);
#else /* ! CONFIG_KGDB */
#define in_dbg_master() (0)
#define dbg_late_init()
+static inline void kgdb_panic(const char *msg) {}
+static inline void kgdb_free_init_mem(void) { }
+static inline int kgdb_nmicallback(int cpu, void *regs) { return 1; }
#endif /* ! CONFIG_KGDB */
#endif /* _KGDB_H_ */
diff --git a/include/linux/kho/abi/luo.h b/include/linux/kho/abi/luo.h
new file mode 100644
index 000000000000..bb099c92e469
--- /dev/null
+++ b/include/linux/kho/abi/luo.h
@@ -0,0 +1,166 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+/*
+ * Copyright (c) 2025, Google LLC.
+ * Pasha Tatashin <pasha.tatashin@soleen.com>
+ */
+
+/**
+ * DOC: Live Update Orchestrator ABI
+ *
+ * This header defines the stable Application Binary Interface used by the
+ * Live Update Orchestrator to pass state from a pre-update kernel to a
+ * post-update kernel. The ABI is built upon the Kexec HandOver framework
+ * and uses a Flattened Device Tree to describe the preserved data.
+ *
+ * This interface is a contract. Any modification to the FDT structure, node
+ * properties, compatible strings, or the layout of the `__packed` serialization
+ * structures defined here constitutes a breaking change. Such changes require
+ * incrementing the version number in the relevant `_COMPATIBLE` string to
+ * prevent a new kernel from misinterpreting data from an old kernel.
+ *
+ * Changes are allowed provided the compatibility version is incremented;
+ * however, backward/forward compatibility is only guaranteed for kernels
+ * supporting the same ABI version.
+ *
+ * FDT Structure Overview:
+ * The entire LUO state is encapsulated within a single KHO entry named "LUO".
+ * This entry contains an FDT with the following layout:
+ *
+ * .. code-block:: none
+ *
+ * / {
+ * compatible = "luo-v1";
+ * liveupdate-number = <...>;
+ *
+ * luo-session {
+ * compatible = "luo-session-v1";
+ * luo-session-header = <phys_addr_of_session_header_ser>;
+ * };
+ * };
+ *
+ * Main LUO Node (/):
+ *
+ * - compatible: "luo-v1"
+ * Identifies the overall LUO ABI version.
+ * - liveupdate-number: u64
+ * A counter tracking the number of successful live updates performed.
+ *
+ * Session Node (luo-session):
+ * This node describes all preserved user-space sessions.
+ *
+ * - compatible: "luo-session-v1"
+ * Identifies the session ABI version.
+ * - luo-session-header: u64
+ * The physical address of a `struct luo_session_header_ser`. This structure
+ * is the header for a contiguous block of memory containing an array of
+ * `struct luo_session_ser`, one for each preserved session.
+ *
+ * Serialization Structures:
+ * The FDT properties point to memory regions containing arrays of simple,
+ * `__packed` structures. These structures contain the actual preserved state.
+ *
+ * - struct luo_session_header_ser:
+ * Header for the session array. Contains the total page count of the
+ * preserved memory block and the number of `struct luo_session_ser`
+ * entries that follow.
+ *
+ * - struct luo_session_ser:
+ * Metadata for a single session, including its name and a physical pointer
+ * to another preserved memory block containing an array of
+ * `struct luo_file_ser` for all files in that session.
+ *
+ * - struct luo_file_ser:
+ * Metadata for a single preserved file. Contains the `compatible` string to
+ * find the correct handler in the new kernel, a user-provided `token` for
+ * identification, and an opaque `data` handle for the handler to use.
+ */
+
+#ifndef _LINUX_KHO_ABI_LUO_H
+#define _LINUX_KHO_ABI_LUO_H
+
+#include <uapi/linux/liveupdate.h>
+
+/*
+ * The LUO FDT hooks all LUO state for sessions, fds, etc.
+ * In the root it also carries "liveupdate-number" 64-bit property that
+ * corresponds to the number of live-updates performed on this machine.
+ */
+#define LUO_FDT_SIZE PAGE_SIZE
+#define LUO_FDT_KHO_ENTRY_NAME "LUO"
+#define LUO_FDT_COMPATIBLE "luo-v1"
+#define LUO_FDT_LIVEUPDATE_NUM "liveupdate-number"
+
+#define LIVEUPDATE_HNDL_COMPAT_LENGTH 48
+
+/**
+ * struct luo_file_ser - Represents the serialized preserves files.
+ * @compatible: File handler compatible string.
+ * @data: Private data
+ * @token: User provided token for this file
+ *
+ * If this structure is modified, LUO_SESSION_COMPATIBLE must be updated.
+ */
+struct luo_file_ser {
+ char compatible[LIVEUPDATE_HNDL_COMPAT_LENGTH];
+ u64 data;
+ u64 token;
+} __packed;
+
+/**
+ * struct luo_file_set_ser - Represents the serialized metadata for file set
+ * @files: The physical address of a contiguous memory block that holds
+ * the serialized state of files (array of luo_file_ser) in this file
+ * set.
+ * @count: The total number of files that were part of this session during
+ * serialization. Used for iteration and validation during
+ * restoration.
+ */
+struct luo_file_set_ser {
+ u64 files;
+ u64 count;
+} __packed;
+
+/*
+ * LUO FDT session node
+ * LUO_FDT_SESSION_HEADER: is a u64 physical address of struct
+ * luo_session_header_ser
+ */
+#define LUO_FDT_SESSION_NODE_NAME "luo-session"
+#define LUO_FDT_SESSION_COMPATIBLE "luo-session-v2"
+#define LUO_FDT_SESSION_HEADER "luo-session-header"
+
+/**
+ * struct luo_session_header_ser - Header for the serialized session data block.
+ * @count: The number of `struct luo_session_ser` entries that immediately
+ * follow this header in the memory block.
+ *
+ * This structure is located at the beginning of a contiguous block of
+ * physical memory preserved across the kexec. It provides the necessary
+ * metadata to interpret the array of session entries that follow.
+ *
+ * If this structure is modified, `LUO_FDT_SESSION_COMPATIBLE` must be updated.
+ */
+struct luo_session_header_ser {
+ u64 count;
+} __packed;
+
+/**
+ * struct luo_session_ser - Represents the serialized metadata for a LUO session.
+ * @name: The unique name of the session, provided by the userspace at
+ * the time of session creation.
+ * @file_set_ser: Serialized files belonging to this session,
+ *
+ * This structure is used to package session-specific metadata for transfer
+ * between kernels via Kexec Handover. An array of these structures (one per
+ * session) is created and passed to the new kernel, allowing it to reconstruct
+ * the session context.
+ *
+ * If this structure is modified, `LUO_FDT_SESSION_COMPATIBLE` must be updated.
+ */
+struct luo_session_ser {
+ char name[LIVEUPDATE_SESSION_NAME_LENGTH];
+ struct luo_file_set_ser file_set_ser;
+} __packed;
+
+#endif /* _LINUX_KHO_ABI_LUO_H */
diff --git a/include/linux/kho/abi/memfd.h b/include/linux/kho/abi/memfd.h
new file mode 100644
index 000000000000..da7d063474a1
--- /dev/null
+++ b/include/linux/kho/abi/memfd.h
@@ -0,0 +1,77 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+/*
+ * Copyright (c) 2025, Google LLC.
+ * Pasha Tatashin <pasha.tatashin@soleen.com>
+ *
+ * Copyright (C) 2025 Amazon.com Inc. or its affiliates.
+ * Pratyush Yadav <ptyadav@amazon.de>
+ */
+
+#ifndef _LINUX_KHO_ABI_MEMFD_H
+#define _LINUX_KHO_ABI_MEMFD_H
+
+#include <linux/types.h>
+#include <linux/kexec_handover.h>
+
+/**
+ * DOC: memfd Live Update ABI
+ *
+ * This header defines the ABI for preserving the state of a memfd across a
+ * kexec reboot using the LUO.
+ *
+ * The state is serialized into a packed structure `struct memfd_luo_ser`
+ * which is handed over to the next kernel via the KHO mechanism.
+ *
+ * This interface is a contract. Any modification to the structure layout
+ * constitutes a breaking change. Such changes require incrementing the
+ * version number in the MEMFD_LUO_FH_COMPATIBLE string.
+ */
+
+/**
+ * MEMFD_LUO_FOLIO_DIRTY - The folio is dirty.
+ *
+ * This flag indicates the folio contains data from user. A non-dirty folio is
+ * one that was allocated (say using fallocate(2)) but not written to.
+ */
+#define MEMFD_LUO_FOLIO_DIRTY BIT(0)
+
+/**
+ * MEMFD_LUO_FOLIO_UPTODATE - The folio is up-to-date.
+ *
+ * An up-to-date folio has been zeroed out. shmem zeroes out folios on first
+ * use. This flag tracks which folios need zeroing.
+ */
+#define MEMFD_LUO_FOLIO_UPTODATE BIT(1)
+
+/**
+ * struct memfd_luo_folio_ser - Serialized state of a single folio.
+ * @pfn: The page frame number of the folio.
+ * @flags: Flags to describe the state of the folio.
+ * @index: The page offset (pgoff_t) of the folio within the original file.
+ */
+struct memfd_luo_folio_ser {
+ u64 pfn:52;
+ u64 flags:12;
+ u64 index;
+} __packed;
+
+/**
+ * struct memfd_luo_ser - Main serialization structure for a memfd.
+ * @pos: The file's current position (f_pos).
+ * @size: The total size of the file in bytes (i_size).
+ * @nr_folios: Number of folios in the folios array.
+ * @folios: KHO vmalloc descriptor pointing to the array of
+ * struct memfd_luo_folio_ser.
+ */
+struct memfd_luo_ser {
+ u64 pos;
+ u64 size;
+ u64 nr_folios;
+ struct kho_vmalloc folios;
+} __packed;
+
+/* The compatibility string for memfd file handler */
+#define MEMFD_LUO_FH_COMPATIBLE "memfd-v1"
+
+#endif /* _LINUX_KHO_ABI_MEMFD_H */
diff --git a/include/linux/khugepaged.h b/include/linux/khugepaged.h
index f0d7335336cd..eb1946a70cff 100644
--- a/include/linux/khugepaged.h
+++ b/include/linux/khugepaged.h
@@ -1,77 +1,61 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_KHUGEPAGED_H
#define _LINUX_KHUGEPAGED_H
-#include <linux/sched/coredump.h> /* MMF_VM_HUGEPAGE */
-
+#include <linux/mm.h>
+extern unsigned int khugepaged_max_ptes_none __read_mostly;
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
extern struct attribute_group khugepaged_attr_group;
extern int khugepaged_init(void);
extern void khugepaged_destroy(void);
extern int start_stop_khugepaged(void);
-extern int __khugepaged_enter(struct mm_struct *mm);
+extern void __khugepaged_enter(struct mm_struct *mm);
extern void __khugepaged_exit(struct mm_struct *mm);
-extern int khugepaged_enter_vma_merge(struct vm_area_struct *vma,
- unsigned long vm_flags);
-
-#define khugepaged_enabled() \
- (transparent_hugepage_flags & \
- ((1<<TRANSPARENT_HUGEPAGE_FLAG) | \
- (1<<TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG)))
-#define khugepaged_always() \
- (transparent_hugepage_flags & \
- (1<<TRANSPARENT_HUGEPAGE_FLAG))
-#define khugepaged_req_madv() \
- (transparent_hugepage_flags & \
- (1<<TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG))
-#define khugepaged_defrag() \
- (transparent_hugepage_flags & \
- (1<<TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG))
-
-static inline int khugepaged_fork(struct mm_struct *mm, struct mm_struct *oldmm)
+extern void khugepaged_enter_vma(struct vm_area_struct *vma,
+ vm_flags_t vm_flags);
+extern void khugepaged_min_free_kbytes_update(void);
+extern bool current_is_khugepaged(void);
+extern int collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr,
+ bool install_pmd);
+
+static inline void khugepaged_fork(struct mm_struct *mm, struct mm_struct *oldmm)
{
- if (test_bit(MMF_VM_HUGEPAGE, &oldmm->flags))
- return __khugepaged_enter(mm);
- return 0;
+ if (mm_flags_test(MMF_VM_HUGEPAGE, oldmm))
+ __khugepaged_enter(mm);
}
static inline void khugepaged_exit(struct mm_struct *mm)
{
- if (test_bit(MMF_VM_HUGEPAGE, &mm->flags))
+ if (mm_flags_test(MMF_VM_HUGEPAGE, mm))
__khugepaged_exit(mm);
}
-
-static inline int khugepaged_enter(struct vm_area_struct *vma,
- unsigned long vm_flags)
-{
- if (!test_bit(MMF_VM_HUGEPAGE, &vma->vm_mm->flags))
- if ((khugepaged_always() ||
- (khugepaged_req_madv() && (vm_flags & VM_HUGEPAGE))) &&
- !(vm_flags & VM_NOHUGEPAGE) &&
- !test_bit(MMF_DISABLE_THP, &vma->vm_mm->flags))
- if (__khugepaged_enter(vma->vm_mm))
- return -ENOMEM;
- return 0;
-}
#else /* CONFIG_TRANSPARENT_HUGEPAGE */
-static inline int khugepaged_fork(struct mm_struct *mm, struct mm_struct *oldmm)
+static inline void khugepaged_fork(struct mm_struct *mm, struct mm_struct *oldmm)
{
- return 0;
}
static inline void khugepaged_exit(struct mm_struct *mm)
{
}
-static inline int khugepaged_enter(struct vm_area_struct *vma,
- unsigned long vm_flags)
+static inline void khugepaged_enter_vma(struct vm_area_struct *vma,
+ vm_flags_t vm_flags)
{
- return 0;
}
-static inline int khugepaged_enter_vma_merge(struct vm_area_struct *vma,
- unsigned long vm_flags)
+static inline int collapse_pte_mapped_thp(struct mm_struct *mm,
+ unsigned long addr, bool install_pmd)
{
return 0;
}
+
+static inline void khugepaged_min_free_kbytes_update(void)
+{
+}
+
+static inline bool current_is_khugepaged(void)
+{
+ return false;
+}
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
#endif /* _LINUX_KHUGEPAGED_H */
diff --git a/include/linux/klist.h b/include/linux/klist.h
index 953f283f8451..b0f238f20dbb 100644
--- a/include/linux/klist.h
+++ b/include/linux/klist.h
@@ -1,12 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* klist.h - Some generic list helpers, extending struct list_head a bit.
*
* Implementations are found in lib/klist.c
*
- *
* Copyright (C) 2005 Patrick Mochel
- *
- * This file is rleased under the GPL v2.
*/
#ifndef _LINUX_KLIST_H
diff --git a/include/linux/kmemcheck.h b/include/linux/kmemcheck.h
deleted file mode 100644
index 39f8453239f7..000000000000
--- a/include/linux/kmemcheck.h
+++ /dev/null
@@ -1,171 +0,0 @@
-#ifndef LINUX_KMEMCHECK_H
-#define LINUX_KMEMCHECK_H
-
-#include <linux/mm_types.h>
-#include <linux/types.h>
-
-#ifdef CONFIG_KMEMCHECK
-extern int kmemcheck_enabled;
-
-/* The slab-related functions. */
-void kmemcheck_alloc_shadow(struct page *page, int order, gfp_t flags, int node);
-void kmemcheck_free_shadow(struct page *page, int order);
-void kmemcheck_slab_alloc(struct kmem_cache *s, gfp_t gfpflags, void *object,
- size_t size);
-void kmemcheck_slab_free(struct kmem_cache *s, void *object, size_t size);
-
-void kmemcheck_pagealloc_alloc(struct page *p, unsigned int order,
- gfp_t gfpflags);
-
-void kmemcheck_show_pages(struct page *p, unsigned int n);
-void kmemcheck_hide_pages(struct page *p, unsigned int n);
-
-bool kmemcheck_page_is_tracked(struct page *p);
-
-void kmemcheck_mark_unallocated(void *address, unsigned int n);
-void kmemcheck_mark_uninitialized(void *address, unsigned int n);
-void kmemcheck_mark_initialized(void *address, unsigned int n);
-void kmemcheck_mark_freed(void *address, unsigned int n);
-
-void kmemcheck_mark_unallocated_pages(struct page *p, unsigned int n);
-void kmemcheck_mark_uninitialized_pages(struct page *p, unsigned int n);
-void kmemcheck_mark_initialized_pages(struct page *p, unsigned int n);
-
-int kmemcheck_show_addr(unsigned long address);
-int kmemcheck_hide_addr(unsigned long address);
-
-bool kmemcheck_is_obj_initialized(unsigned long addr, size_t size);
-
-/*
- * Bitfield annotations
- *
- * How to use: If you have a struct using bitfields, for example
- *
- * struct a {
- * int x:8, y:8;
- * };
- *
- * then this should be rewritten as
- *
- * struct a {
- * kmemcheck_bitfield_begin(flags);
- * int x:8, y:8;
- * kmemcheck_bitfield_end(flags);
- * };
- *
- * Now the "flags_begin" and "flags_end" members may be used to refer to the
- * beginning and end, respectively, of the bitfield (and things like
- * &x.flags_begin is allowed). As soon as the struct is allocated, the bit-
- * fields should be annotated:
- *
- * struct a *a = kmalloc(sizeof(struct a), GFP_KERNEL);
- * kmemcheck_annotate_bitfield(a, flags);
- */
-#define kmemcheck_bitfield_begin(name) \
- int name##_begin[0];
-
-#define kmemcheck_bitfield_end(name) \
- int name##_end[0];
-
-#define kmemcheck_annotate_bitfield(ptr, name) \
- do { \
- int _n; \
- \
- if (!ptr) \
- break; \
- \
- _n = (long) &((ptr)->name##_end) \
- - (long) &((ptr)->name##_begin); \
- BUILD_BUG_ON(_n < 0); \
- \
- kmemcheck_mark_initialized(&((ptr)->name##_begin), _n); \
- } while (0)
-
-#define kmemcheck_annotate_variable(var) \
- do { \
- kmemcheck_mark_initialized(&(var), sizeof(var)); \
- } while (0) \
-
-#else
-#define kmemcheck_enabled 0
-
-static inline void
-kmemcheck_alloc_shadow(struct page *page, int order, gfp_t flags, int node)
-{
-}
-
-static inline void
-kmemcheck_free_shadow(struct page *page, int order)
-{
-}
-
-static inline void
-kmemcheck_slab_alloc(struct kmem_cache *s, gfp_t gfpflags, void *object,
- size_t size)
-{
-}
-
-static inline void kmemcheck_slab_free(struct kmem_cache *s, void *object,
- size_t size)
-{
-}
-
-static inline void kmemcheck_pagealloc_alloc(struct page *p,
- unsigned int order, gfp_t gfpflags)
-{
-}
-
-static inline bool kmemcheck_page_is_tracked(struct page *p)
-{
- return false;
-}
-
-static inline void kmemcheck_mark_unallocated(void *address, unsigned int n)
-{
-}
-
-static inline void kmemcheck_mark_uninitialized(void *address, unsigned int n)
-{
-}
-
-static inline void kmemcheck_mark_initialized(void *address, unsigned int n)
-{
-}
-
-static inline void kmemcheck_mark_freed(void *address, unsigned int n)
-{
-}
-
-static inline void kmemcheck_mark_unallocated_pages(struct page *p,
- unsigned int n)
-{
-}
-
-static inline void kmemcheck_mark_uninitialized_pages(struct page *p,
- unsigned int n)
-{
-}
-
-static inline void kmemcheck_mark_initialized_pages(struct page *p,
- unsigned int n)
-{
-}
-
-static inline bool kmemcheck_is_obj_initialized(unsigned long addr, size_t size)
-{
- return true;
-}
-
-#define kmemcheck_bitfield_begin(name)
-#define kmemcheck_bitfield_end(name)
-#define kmemcheck_annotate_bitfield(ptr, name) \
- do { \
- } while (0)
-
-#define kmemcheck_annotate_variable(var) \
- do { \
- } while (0)
-
-#endif /* CONFIG_KMEMCHECK */
-
-#endif /* LINUX_KMEMCHECK_H */
diff --git a/include/linux/kmemleak.h b/include/linux/kmemleak.h
index 590343f6c1b1..fbd424b2abb1 100644
--- a/include/linux/kmemleak.h
+++ b/include/linux/kmemleak.h
@@ -1,21 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* include/linux/kmemleak.h
*
* Copyright (C) 2008 ARM Limited
* Written by Catalin Marinas <catalin.marinas@arm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#ifndef __KMEMLEAK_H
@@ -38,24 +26,25 @@ extern void kmemleak_free_part(const void *ptr, size_t size) __ref;
extern void kmemleak_free_percpu(const void __percpu *ptr) __ref;
extern void kmemleak_update_trace(const void *ptr) __ref;
extern void kmemleak_not_leak(const void *ptr) __ref;
+extern void kmemleak_transient_leak(const void *ptr) __ref;
extern void kmemleak_ignore(const void *ptr) __ref;
+extern void kmemleak_ignore_percpu(const void __percpu *ptr) __ref;
extern void kmemleak_scan_area(const void *ptr, size_t size, gfp_t gfp) __ref;
extern void kmemleak_no_scan(const void *ptr) __ref;
-extern void kmemleak_alloc_phys(phys_addr_t phys, size_t size, int min_count,
+extern void kmemleak_alloc_phys(phys_addr_t phys, size_t size,
gfp_t gfp) __ref;
extern void kmemleak_free_part_phys(phys_addr_t phys, size_t size) __ref;
-extern void kmemleak_not_leak_phys(phys_addr_t phys) __ref;
extern void kmemleak_ignore_phys(phys_addr_t phys) __ref;
static inline void kmemleak_alloc_recursive(const void *ptr, size_t size,
- int min_count, unsigned long flags,
+ int min_count, slab_flags_t flags,
gfp_t gfp)
{
if (!(flags & SLAB_NOLEAKTRACE))
kmemleak_alloc(ptr, size, min_count, gfp);
}
-static inline void kmemleak_free_recursive(const void *ptr, unsigned long flags)
+static inline void kmemleak_free_recursive(const void *ptr, slab_flags_t flags)
{
if (!(flags & SLAB_NOLEAKTRACE))
kmemleak_free(ptr);
@@ -76,7 +65,7 @@ static inline void kmemleak_alloc(const void *ptr, size_t size, int min_count,
{
}
static inline void kmemleak_alloc_recursive(const void *ptr, size_t size,
- int min_count, unsigned long flags,
+ int min_count, slab_flags_t flags,
gfp_t gfp)
{
}
@@ -94,7 +83,7 @@ static inline void kmemleak_free(const void *ptr)
static inline void kmemleak_free_part(const void *ptr, size_t size)
{
}
-static inline void kmemleak_free_recursive(const void *ptr, unsigned long flags)
+static inline void kmemleak_free_recursive(const void *ptr, slab_flags_t flags)
{
}
static inline void kmemleak_free_percpu(const void __percpu *ptr)
@@ -106,6 +95,12 @@ static inline void kmemleak_update_trace(const void *ptr)
static inline void kmemleak_not_leak(const void *ptr)
{
}
+static inline void kmemleak_transient_leak(const void *ptr)
+{
+}
+static inline void kmemleak_ignore_percpu(const void __percpu *ptr)
+{
+}
static inline void kmemleak_ignore(const void *ptr)
{
}
@@ -119,15 +114,12 @@ static inline void kmemleak_no_scan(const void *ptr)
{
}
static inline void kmemleak_alloc_phys(phys_addr_t phys, size_t size,
- int min_count, gfp_t gfp)
+ gfp_t gfp)
{
}
static inline void kmemleak_free_part_phys(phys_addr_t phys, size_t size)
{
}
-static inline void kmemleak_not_leak_phys(phys_addr_t phys)
-{
-}
static inline void kmemleak_ignore_phys(phys_addr_t phys)
{
}
diff --git a/include/linux/kmod.h b/include/linux/kmod.h
index 655082c88fd9..9a07c3215389 100644
--- a/include/linux/kmod.h
+++ b/include/linux/kmod.h
@@ -1,24 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
#ifndef __LINUX_KMOD_H__
#define __LINUX_KMOD_H__
/*
* include/linux/kmod.h
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
+#include <linux/umh.h>
#include <linux/gfp.h>
#include <linux/stddef.h>
#include <linux/errno.h>
@@ -26,10 +14,7 @@
#include <linux/workqueue.h>
#include <linux/sysctl.h>
-#define KMOD_PATH_LEN 256
-
#ifdef CONFIG_MODULES
-extern char modprobe_path[]; /* for sysctl */
/* modprobe exit status on success, -ve on error. Return value
* usually useless though. */
extern __printf(2, 3)
@@ -44,63 +29,4 @@ static inline int request_module_nowait(const char *name, ...) { return -ENOSYS;
#define try_then_request_module(x, mod...) (x)
#endif
-
-struct cred;
-struct file;
-
-#define UMH_NO_WAIT 0 /* don't wait at all */
-#define UMH_WAIT_EXEC 1 /* wait for the exec, but not the process */
-#define UMH_WAIT_PROC 2 /* wait for the process to complete */
-#define UMH_KILLABLE 4 /* wait for EXEC/PROC killable */
-
-struct subprocess_info {
- struct work_struct work;
- struct completion *complete;
- const char *path;
- char **argv;
- char **envp;
- int wait;
- int retval;
- int (*init)(struct subprocess_info *info, struct cred *new);
- void (*cleanup)(struct subprocess_info *info);
- void *data;
-} __randomize_layout;
-
-extern int
-call_usermodehelper(const char *path, char **argv, char **envp, int wait);
-
-extern struct subprocess_info *
-call_usermodehelper_setup(const char *path, char **argv, char **envp,
- gfp_t gfp_mask,
- int (*init)(struct subprocess_info *info, struct cred *new),
- void (*cleanup)(struct subprocess_info *), void *data);
-
-extern int
-call_usermodehelper_exec(struct subprocess_info *info, int wait);
-
-extern struct ctl_table usermodehelper_table[];
-
-enum umh_disable_depth {
- UMH_ENABLED = 0,
- UMH_FREEZING,
- UMH_DISABLED,
-};
-
-extern int __usermodehelper_disable(enum umh_disable_depth depth);
-extern void __usermodehelper_set_disable_depth(enum umh_disable_depth depth);
-
-static inline int usermodehelper_disable(void)
-{
- return __usermodehelper_disable(UMH_DISABLED);
-}
-
-static inline void usermodehelper_enable(void)
-{
- __usermodehelper_set_disable_depth(UMH_ENABLED);
-}
-
-extern int usermodehelper_read_trylock(void);
-extern long usermodehelper_read_lock_wait(long timeout);
-extern void usermodehelper_read_unlock(void);
-
#endif /* __LINUX_KMOD_H__ */
diff --git a/include/linux/kmsan-checks.h b/include/linux/kmsan-checks.h
new file mode 100644
index 000000000000..e1082dc40abc
--- /dev/null
+++ b/include/linux/kmsan-checks.h
@@ -0,0 +1,98 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * KMSAN checks to be used for one-off annotations in subsystems.
+ *
+ * Copyright (C) 2017-2022 Google LLC
+ * Author: Alexander Potapenko <glider@google.com>
+ *
+ */
+
+#ifndef _LINUX_KMSAN_CHECKS_H
+#define _LINUX_KMSAN_CHECKS_H
+
+#include <linux/types.h>
+
+#ifdef CONFIG_KMSAN
+
+/**
+ * kmsan_poison_memory() - Mark the memory range as uninitialized.
+ * @address: address to start with.
+ * @size: size of buffer to poison.
+ * @flags: GFP flags for allocations done by this function.
+ *
+ * Until other data is written to this range, KMSAN will treat it as
+ * uninitialized. Error reports for this memory will reference the call site of
+ * kmsan_poison_memory() as origin.
+ */
+void kmsan_poison_memory(const void *address, size_t size, gfp_t flags);
+
+/**
+ * kmsan_unpoison_memory() - Mark the memory range as initialized.
+ * @address: address to start with.
+ * @size: size of buffer to unpoison.
+ *
+ * Until other data is written to this range, KMSAN will treat it as
+ * initialized.
+ */
+void kmsan_unpoison_memory(const void *address, size_t size);
+
+/**
+ * kmsan_check_memory() - Check the memory range for being initialized.
+ * @address: address to start with.
+ * @size: size of buffer to check.
+ *
+ * If any piece of the given range is marked as uninitialized, KMSAN will report
+ * an error.
+ */
+void kmsan_check_memory(const void *address, size_t size);
+
+/**
+ * kmsan_copy_to_user() - Notify KMSAN about a data transfer to userspace.
+ * @to: destination address in the userspace.
+ * @from: source address in the kernel.
+ * @to_copy: number of bytes to copy.
+ * @left: number of bytes not copied.
+ *
+ * If this is a real userspace data transfer, KMSAN checks the bytes that were
+ * actually copied to ensure there was no information leak. If @to belongs to
+ * the kernel space (which is possible for compat syscalls), KMSAN just copies
+ * the metadata.
+ */
+void kmsan_copy_to_user(void __user *to, const void *from, size_t to_copy,
+ size_t left);
+
+/**
+ * kmsan_memmove() - Notify KMSAN about a data copy within kernel.
+ * @to: destination address in the kernel.
+ * @from: source address in the kernel.
+ * @size: number of bytes to copy.
+ *
+ * Invoked after non-instrumented version (e.g. implemented using assembly
+ * code) of memmove()/memcpy() is called, in order to copy KMSAN's metadata.
+ */
+void kmsan_memmove(void *to, const void *from, size_t to_copy);
+
+#else
+
+static inline void kmsan_poison_memory(const void *address, size_t size,
+ gfp_t flags)
+{
+}
+static inline void kmsan_unpoison_memory(const void *address, size_t size)
+{
+}
+static inline void kmsan_check_memory(const void *address, size_t size)
+{
+}
+static inline void kmsan_copy_to_user(void __user *to, const void *from,
+ size_t to_copy, size_t left)
+{
+}
+
+static inline void kmsan_memmove(void *to, const void *from, size_t to_copy)
+{
+}
+
+#endif
+
+#endif /* _LINUX_KMSAN_CHECKS_H */
diff --git a/include/linux/kmsan.h b/include/linux/kmsan.h
new file mode 100644
index 000000000000..7da9fd506b39
--- /dev/null
+++ b/include/linux/kmsan.h
@@ -0,0 +1,411 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * KMSAN API for subsystems.
+ *
+ * Copyright (C) 2017-2022 Google LLC
+ * Author: Alexander Potapenko <glider@google.com>
+ *
+ */
+#ifndef _LINUX_KMSAN_H
+#define _LINUX_KMSAN_H
+
+#include <linux/dma-direction.h>
+#include <linux/gfp.h>
+#include <linux/kmsan-checks.h>
+#include <linux/types.h>
+
+struct page;
+struct kmem_cache;
+struct task_struct;
+struct scatterlist;
+struct urb;
+
+#ifdef CONFIG_KMSAN
+
+/**
+ * kmsan_task_create() - Initialize KMSAN state for the task.
+ * @task: task to initialize.
+ */
+void kmsan_task_create(struct task_struct *task);
+
+/**
+ * kmsan_task_exit() - Notify KMSAN that a task has exited.
+ * @task: task about to finish.
+ */
+void kmsan_task_exit(struct task_struct *task);
+
+/**
+ * kmsan_init_shadow() - Initialize KMSAN shadow at boot time.
+ *
+ * Allocate and initialize KMSAN metadata for early allocations.
+ */
+void __init kmsan_init_shadow(void);
+
+/**
+ * kmsan_init_runtime() - Initialize KMSAN state and enable KMSAN.
+ */
+void __init kmsan_init_runtime(void);
+
+/**
+ * kmsan_memblock_free_pages() - handle freeing of memblock pages.
+ * @page: struct page to free.
+ * @order: order of @page.
+ *
+ * Freed pages are either returned to buddy allocator or held back to be used
+ * as metadata pages.
+ */
+bool __init __must_check kmsan_memblock_free_pages(struct page *page,
+ unsigned int order);
+
+/**
+ * kmsan_alloc_page() - Notify KMSAN about an alloc_pages() call.
+ * @page: struct page pointer returned by alloc_pages().
+ * @order: order of allocated struct page.
+ * @flags: GFP flags used by alloc_pages()
+ *
+ * KMSAN marks 1<<@order pages starting at @page as uninitialized, unless
+ * @flags contain __GFP_ZERO.
+ */
+void kmsan_alloc_page(struct page *page, unsigned int order, gfp_t flags);
+
+/**
+ * kmsan_free_page() - Notify KMSAN about a free_pages() call.
+ * @page: struct page pointer passed to free_pages().
+ * @order: order of deallocated struct page.
+ *
+ * KMSAN marks freed memory as uninitialized.
+ */
+void kmsan_free_page(struct page *page, unsigned int order);
+
+/**
+ * kmsan_copy_page_meta() - Copy KMSAN metadata between two pages.
+ * @dst: destination page.
+ * @src: source page.
+ *
+ * KMSAN copies the contents of metadata pages for @src into the metadata pages
+ * for @dst. If @dst has no associated metadata pages, nothing happens.
+ * If @src has no associated metadata pages, @dst metadata pages are unpoisoned.
+ */
+void kmsan_copy_page_meta(struct page *dst, struct page *src);
+
+/**
+ * kmsan_slab_alloc() - Notify KMSAN about a slab allocation.
+ * @s: slab cache the object belongs to.
+ * @object: object pointer.
+ * @flags: GFP flags passed to the allocator.
+ *
+ * Depending on cache flags and GFP flags, KMSAN sets up the metadata of the
+ * newly created object, marking it as initialized or uninitialized.
+ */
+void kmsan_slab_alloc(struct kmem_cache *s, void *object, gfp_t flags);
+
+/**
+ * kmsan_slab_free() - Notify KMSAN about a slab deallocation.
+ * @s: slab cache the object belongs to.
+ * @object: object pointer.
+ *
+ * KMSAN marks the freed object as uninitialized.
+ */
+void kmsan_slab_free(struct kmem_cache *s, void *object);
+
+/**
+ * kmsan_kmalloc_large() - Notify KMSAN about a large slab allocation.
+ * @ptr: object pointer.
+ * @size: object size.
+ * @flags: GFP flags passed to the allocator.
+ *
+ * Similar to kmsan_slab_alloc(), but for large allocations.
+ */
+void kmsan_kmalloc_large(const void *ptr, size_t size, gfp_t flags);
+
+/**
+ * kmsan_kfree_large() - Notify KMSAN about a large slab deallocation.
+ * @ptr: object pointer.
+ *
+ * Similar to kmsan_slab_free(), but for large allocations.
+ */
+void kmsan_kfree_large(const void *ptr);
+
+/**
+ * kmsan_map_kernel_range_noflush() - Notify KMSAN about a vmap.
+ * @start: start of vmapped range.
+ * @end: end of vmapped range.
+ * @prot: page protection flags used for vmap.
+ * @pages: array of pages.
+ * @page_shift: page_shift passed to vmap_range_noflush().
+ * @gfp_mask: gfp_mask to use internally.
+ *
+ * KMSAN maps shadow and origin pages of @pages into contiguous ranges in
+ * vmalloc metadata address range. Returns 0 on success, callers must check
+ * for non-zero return value.
+ */
+int __must_check kmsan_vmap_pages_range_noflush(unsigned long start,
+ unsigned long end,
+ pgprot_t prot,
+ struct page **pages,
+ unsigned int page_shift,
+ gfp_t gfp_mask);
+
+/**
+ * kmsan_vunmap_kernel_range_noflush() - Notify KMSAN about a vunmap.
+ * @start: start of vunmapped range.
+ * @end: end of vunmapped range.
+ *
+ * KMSAN unmaps the contiguous metadata ranges created by
+ * kmsan_map_kernel_range_noflush().
+ */
+void kmsan_vunmap_range_noflush(unsigned long start, unsigned long end);
+
+/**
+ * kmsan_ioremap_page_range() - Notify KMSAN about a ioremap_page_range() call.
+ * @addr: range start.
+ * @end: range end.
+ * @phys_addr: physical range start.
+ * @prot: page protection flags used for ioremap_page_range().
+ * @page_shift: page_shift argument passed to vmap_range_noflush().
+ *
+ * KMSAN creates new metadata pages for the physical pages mapped into the
+ * virtual memory. Returns 0 on success, callers must check for non-zero return
+ * value.
+ */
+int __must_check kmsan_ioremap_page_range(unsigned long addr, unsigned long end,
+ phys_addr_t phys_addr, pgprot_t prot,
+ unsigned int page_shift);
+
+/**
+ * kmsan_iounmap_page_range() - Notify KMSAN about a iounmap_page_range() call.
+ * @start: range start.
+ * @end: range end.
+ *
+ * KMSAN unmaps the metadata pages for the given range and, unlike for
+ * vunmap_page_range(), also deallocates them.
+ */
+void kmsan_iounmap_page_range(unsigned long start, unsigned long end);
+
+/**
+ * kmsan_handle_dma() - Handle a DMA data transfer.
+ * @phys: physical address of the buffer.
+ * @size: buffer size.
+ * @dir: one of possible dma_data_direction values.
+ *
+ * Depending on @direction, KMSAN:
+ * * checks the buffer, if it is copied to device;
+ * * initializes the buffer, if it is copied from device;
+ * * does both, if this is a DMA_BIDIRECTIONAL transfer.
+ */
+void kmsan_handle_dma(phys_addr_t phys, size_t size,
+ enum dma_data_direction dir);
+
+/**
+ * kmsan_handle_dma_sg() - Handle a DMA transfer using scatterlist.
+ * @sg: scatterlist holding DMA buffers.
+ * @nents: number of scatterlist entries.
+ * @dir: one of possible dma_data_direction values.
+ *
+ * Depending on @direction, KMSAN:
+ * * checks the buffers in the scatterlist, if they are copied to device;
+ * * initializes the buffers, if they are copied from device;
+ * * does both, if this is a DMA_BIDIRECTIONAL transfer.
+ */
+void kmsan_handle_dma_sg(struct scatterlist *sg, int nents,
+ enum dma_data_direction dir);
+
+/**
+ * kmsan_handle_urb() - Handle a USB data transfer.
+ * @urb: struct urb pointer.
+ * @is_out: data transfer direction (true means output to hardware).
+ *
+ * If @is_out is true, KMSAN checks the transfer buffer of @urb. Otherwise,
+ * KMSAN initializes the transfer buffer.
+ */
+void kmsan_handle_urb(const struct urb *urb, bool is_out);
+
+/**
+ * kmsan_unpoison_entry_regs() - Handle pt_regs in low-level entry code.
+ * @regs: struct pt_regs pointer received from assembly code.
+ *
+ * KMSAN unpoisons the contents of the passed pt_regs, preventing potential
+ * false positive reports. Unlike kmsan_unpoison_memory(),
+ * kmsan_unpoison_entry_regs() can be called from the regions where
+ * kmsan_in_runtime() returns true, which is the case in early entry code.
+ */
+void kmsan_unpoison_entry_regs(const struct pt_regs *regs);
+
+/**
+ * kmsan_get_metadata() - Return a pointer to KMSAN shadow or origins.
+ * @addr: kernel address.
+ * @is_origin: whether to return origins or shadow.
+ *
+ * Return NULL if metadata cannot be found.
+ */
+void *kmsan_get_metadata(void *addr, bool is_origin);
+
+/**
+ * kmsan_enable_current(): Enable KMSAN for the current task.
+ *
+ * Each kmsan_enable_current() current call must be preceded by a
+ * kmsan_disable_current() call. These call pairs may be nested.
+ */
+void kmsan_enable_current(void);
+
+/**
+ * kmsan_disable_current(): Disable KMSAN for the current task.
+ *
+ * Each kmsan_disable_current() current call must be followed by a
+ * kmsan_enable_current() call. These call pairs may be nested.
+ */
+void kmsan_disable_current(void);
+
+/**
+ * memset_no_sanitize_memory(): Fill memory without KMSAN instrumentation.
+ * @s: address of kernel memory to fill.
+ * @c: constant byte to fill the memory with.
+ * @n: number of bytes to fill.
+ *
+ * This is like memset(), but without KMSAN instrumentation.
+ */
+static inline void *memset_no_sanitize_memory(void *s, int c, size_t n)
+{
+ return __memset(s, c, n);
+}
+
+extern bool kmsan_enabled;
+extern int panic_on_kmsan;
+
+/*
+ * KMSAN performs a lot of consistency checks that are currently enabled by
+ * default. BUG_ON is normally discouraged in the kernel, unless used for
+ * debugging, but KMSAN itself is a debugging tool, so it makes little sense to
+ * recover if something goes wrong.
+ */
+#define KMSAN_WARN_ON(cond) \
+ ({ \
+ const bool __cond = WARN_ON(cond); \
+ if (unlikely(__cond)) { \
+ WRITE_ONCE(kmsan_enabled, false); \
+ if (panic_on_kmsan) { \
+ /* Can't call panic() here because */ \
+ /* of uaccess checks. */ \
+ BUG(); \
+ } \
+ } \
+ __cond; \
+ })
+
+#else
+
+static inline void kmsan_init_shadow(void)
+{
+}
+
+static inline void kmsan_init_runtime(void)
+{
+}
+
+static inline bool __must_check kmsan_memblock_free_pages(struct page *page,
+ unsigned int order)
+{
+ return true;
+}
+
+static inline void kmsan_task_create(struct task_struct *task)
+{
+}
+
+static inline void kmsan_task_exit(struct task_struct *task)
+{
+}
+
+static inline void kmsan_alloc_page(struct page *page, unsigned int order,
+ gfp_t flags)
+{
+}
+
+static inline void kmsan_free_page(struct page *page, unsigned int order)
+{
+}
+
+static inline void kmsan_copy_page_meta(struct page *dst, struct page *src)
+{
+}
+
+static inline void kmsan_slab_alloc(struct kmem_cache *s, void *object,
+ gfp_t flags)
+{
+}
+
+static inline void kmsan_slab_free(struct kmem_cache *s, void *object)
+{
+}
+
+static inline void kmsan_kmalloc_large(const void *ptr, size_t size,
+ gfp_t flags)
+{
+}
+
+static inline void kmsan_kfree_large(const void *ptr)
+{
+}
+
+static inline int __must_check kmsan_vmap_pages_range_noflush(
+ unsigned long start, unsigned long end, pgprot_t prot,
+ struct page **pages, unsigned int page_shift, gfp_t gfp_mask)
+{
+ return 0;
+}
+
+static inline void kmsan_vunmap_range_noflush(unsigned long start,
+ unsigned long end)
+{
+}
+
+static inline int __must_check kmsan_ioremap_page_range(unsigned long start,
+ unsigned long end,
+ phys_addr_t phys_addr,
+ pgprot_t prot,
+ unsigned int page_shift)
+{
+ return 0;
+}
+
+static inline void kmsan_iounmap_page_range(unsigned long start,
+ unsigned long end)
+{
+}
+
+static inline void kmsan_handle_dma(phys_addr_t phys, size_t size,
+ enum dma_data_direction dir)
+{
+}
+
+static inline void kmsan_handle_dma_sg(struct scatterlist *sg, int nents,
+ enum dma_data_direction dir)
+{
+}
+
+static inline void kmsan_handle_urb(const struct urb *urb, bool is_out)
+{
+}
+
+static inline void kmsan_unpoison_entry_regs(const struct pt_regs *regs)
+{
+}
+
+static inline void kmsan_enable_current(void)
+{
+}
+
+static inline void kmsan_disable_current(void)
+{
+}
+
+static inline void *memset_no_sanitize_memory(void *s, int c, size_t n)
+{
+ return memset(s, c, n);
+}
+
+#define KMSAN_WARN_ON WARN_ON
+
+#endif
+
+#endif /* _LINUX_KMSAN_H */
diff --git a/include/linux/kmsan_string.h b/include/linux/kmsan_string.h
new file mode 100644
index 000000000000..7287da6f52ef
--- /dev/null
+++ b/include/linux/kmsan_string.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * KMSAN string functions API used in other headers.
+ *
+ * Copyright (C) 2022 Google LLC
+ * Author: Alexander Potapenko <glider@google.com>
+ *
+ */
+#ifndef _LINUX_KMSAN_STRING_H
+#define _LINUX_KMSAN_STRING_H
+
+/*
+ * KMSAN overrides the default memcpy/memset/memmove implementations in the
+ * kernel, which requires having __msan_XXX function prototypes in several other
+ * headers. Keep them in one place instead of open-coding.
+ */
+void *__msan_memcpy(void *dst, const void *src, size_t size);
+void *__msan_memset(void *s, int c, size_t n);
+void *__msan_memmove(void *dest, const void *src, size_t len);
+
+#endif /* _LINUX_KMSAN_STRING_H */
diff --git a/include/linux/kmsan_types.h b/include/linux/kmsan_types.h
new file mode 100644
index 000000000000..dfc59918b3c0
--- /dev/null
+++ b/include/linux/kmsan_types.h
@@ -0,0 +1,37 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * A minimal header declaring types added by KMSAN to existing kernel structs.
+ *
+ * Copyright (C) 2017-2022 Google LLC
+ * Author: Alexander Potapenko <glider@google.com>
+ *
+ */
+#ifndef _LINUX_KMSAN_TYPES_H
+#define _LINUX_KMSAN_TYPES_H
+
+#include <linux/types.h>
+
+/* These constants are defined in the MSan LLVM instrumentation pass. */
+#define KMSAN_RETVAL_SIZE 800
+#define KMSAN_PARAM_SIZE 800
+
+struct kmsan_context_state {
+ char param_tls[KMSAN_PARAM_SIZE];
+ char retval_tls[KMSAN_RETVAL_SIZE];
+ char va_arg_tls[KMSAN_PARAM_SIZE];
+ char va_arg_origin_tls[KMSAN_PARAM_SIZE];
+ u64 va_arg_overflow_size_tls;
+ char param_origin_tls[KMSAN_PARAM_SIZE];
+ u32 retval_origin_tls;
+};
+
+#undef KMSAN_PARAM_SIZE
+#undef KMSAN_RETVAL_SIZE
+
+struct kmsan_ctx {
+ struct kmsan_context_state cstate;
+ int kmsan_in_runtime;
+ unsigned int depth;
+};
+
+#endif /* _LINUX_KMSAN_TYPES_H */
diff --git a/include/linux/kmsg_dump.h b/include/linux/kmsg_dump.h
index 2e7a1e032c71..6055fc969877 100644
--- a/include/linux/kmsg_dump.h
+++ b/include/linux/kmsg_dump.h
@@ -25,9 +25,29 @@ enum kmsg_dump_reason {
KMSG_DUMP_PANIC,
KMSG_DUMP_OOPS,
KMSG_DUMP_EMERG,
- KMSG_DUMP_RESTART,
- KMSG_DUMP_HALT,
- KMSG_DUMP_POWEROFF,
+ KMSG_DUMP_SHUTDOWN,
+ KMSG_DUMP_MAX
+};
+
+/**
+ * struct kmsg_dump_iter - iterator for retrieving kernel messages
+ * @cur_seq: Points to the oldest message to dump
+ * @next_seq: Points after the newest message to dump
+ */
+struct kmsg_dump_iter {
+ u64 cur_seq;
+ u64 next_seq;
+};
+
+/**
+ * struct kmsg_dump_detail - kernel crash detail
+ * @reason: reason for the crash, see kmsg_dump_reason.
+ * @description: optional short string, to provide additional information.
+ */
+
+struct kmsg_dump_detail {
+ enum kmsg_dump_reason reason;
+ const char *description;
};
/**
@@ -40,66 +60,45 @@ enum kmsg_dump_reason {
*/
struct kmsg_dumper {
struct list_head list;
- void (*dump)(struct kmsg_dumper *dumper, enum kmsg_dump_reason reason);
+ void (*dump)(struct kmsg_dumper *dumper, struct kmsg_dump_detail *detail);
enum kmsg_dump_reason max_reason;
- bool active;
bool registered;
-
- /* private state of the kmsg iterator */
- u32 cur_idx;
- u32 next_idx;
- u64 cur_seq;
- u64 next_seq;
};
#ifdef CONFIG_PRINTK
-void kmsg_dump(enum kmsg_dump_reason reason);
-
-bool kmsg_dump_get_line_nolock(struct kmsg_dumper *dumper, bool syslog,
- char *line, size_t size, size_t *len);
+void kmsg_dump_desc(enum kmsg_dump_reason reason, const char *desc);
-bool kmsg_dump_get_line(struct kmsg_dumper *dumper, bool syslog,
+bool kmsg_dump_get_line(struct kmsg_dump_iter *iter, bool syslog,
char *line, size_t size, size_t *len);
-bool kmsg_dump_get_buffer(struct kmsg_dumper *dumper, bool syslog,
- char *buf, size_t size, size_t *len);
+bool kmsg_dump_get_buffer(struct kmsg_dump_iter *iter, bool syslog,
+ char *buf, size_t size, size_t *len_out);
-void kmsg_dump_rewind_nolock(struct kmsg_dumper *dumper);
-
-void kmsg_dump_rewind(struct kmsg_dumper *dumper);
+void kmsg_dump_rewind(struct kmsg_dump_iter *iter);
int kmsg_dump_register(struct kmsg_dumper *dumper);
int kmsg_dump_unregister(struct kmsg_dumper *dumper);
-#else
-static inline void kmsg_dump(enum kmsg_dump_reason reason)
-{
-}
-static inline bool kmsg_dump_get_line_nolock(struct kmsg_dumper *dumper,
- bool syslog, const char *line,
- size_t size, size_t *len)
+const char *kmsg_dump_reason_str(enum kmsg_dump_reason reason);
+#else
+static inline void kmsg_dump_desc(enum kmsg_dump_reason reason, const char *desc)
{
- return false;
}
-static inline bool kmsg_dump_get_line(struct kmsg_dumper *dumper, bool syslog,
+static inline bool kmsg_dump_get_line(struct kmsg_dump_iter *iter, bool syslog,
const char *line, size_t size, size_t *len)
{
return false;
}
-static inline bool kmsg_dump_get_buffer(struct kmsg_dumper *dumper, bool syslog,
+static inline bool kmsg_dump_get_buffer(struct kmsg_dump_iter *iter, bool syslog,
char *buf, size_t size, size_t *len)
{
return false;
}
-static inline void kmsg_dump_rewind_nolock(struct kmsg_dumper *dumper)
-{
-}
-
-static inline void kmsg_dump_rewind(struct kmsg_dumper *dumper)
+static inline void kmsg_dump_rewind(struct kmsg_dump_iter *iter)
{
}
@@ -112,6 +111,16 @@ static inline int kmsg_dump_unregister(struct kmsg_dumper *dumper)
{
return -EINVAL;
}
+
+static inline const char *kmsg_dump_reason_str(enum kmsg_dump_reason reason)
+{
+ return "Disabled";
+}
#endif
+static inline void kmsg_dump(enum kmsg_dump_reason reason)
+{
+ kmsg_dump_desc(reason, NULL);
+}
+
#endif /* _LINUX_KMSG_DUMP_H */
diff --git a/include/linux/kobj_map.h b/include/linux/kobj_map.h
index 18ca75ffcc5a..c9919f8b2293 100644
--- a/include/linux/kobj_map.h
+++ b/include/linux/kobj_map.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* kobj_map.h
*/
diff --git a/include/linux/kobject.h b/include/linux/kobject.h
index e0a6205caa71..c8219505a79f 100644
--- a/include/linux/kobject.h
+++ b/include/linux/kobject.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* kobject.h - generic kernel object infrastructure.
*
@@ -6,9 +7,7 @@
* Copyright (c) 2006-2008 Greg Kroah-Hartman <greg@kroah.com>
* Copyright (c) 2006-2008 Novell Inc.
*
- * This file is released under the GPLv2.
- *
- * Please read Documentation/kobject.txt before using the kobject
+ * Please read Documentation/core-api/kobject.rst before using the kobject
* interface, ESPECIALLY the parts about reference counts and object
* destructors.
*/
@@ -20,16 +19,17 @@
#include <linux/list.h>
#include <linux/sysfs.h>
#include <linux/compiler.h>
+#include <linux/container_of.h>
#include <linux/spinlock.h>
#include <linux/kref.h>
#include <linux/kobject_ns.h>
-#include <linux/kernel.h>
#include <linux/wait.h>
#include <linux/atomic.h>
#include <linux/workqueue.h>
+#include <linux/uidgid.h>
#define UEVENT_HELPER_PATH_LEN 256
-#define UEVENT_NUM_ENVP 32 /* number of env pointers */
+#define UEVENT_NUM_ENVP 64 /* number of env pointers */
#define UEVENT_BUFFER_SIZE 2048 /* buffer for the variables */
#ifdef CONFIG_UEVENT_HELPER
@@ -38,7 +38,7 @@ extern char uevent_helper[];
#endif
/* counter to tag the uevent, read only except for the kobject core */
-extern u64 uevent_seqnum;
+extern atomic64_t uevent_seqnum;
/*
* The actions here must match the index to the string array
@@ -59,7 +59,6 @@ enum kobject_action {
KOBJ_OFFLINE,
KOBJ_BIND,
KOBJ_UNBIND,
- KOBJ_MAX
};
struct kobject {
@@ -67,62 +66,60 @@ struct kobject {
struct list_head entry;
struct kobject *parent;
struct kset *kset;
- struct kobj_type *ktype;
+ const struct kobj_type *ktype;
struct kernfs_node *sd; /* sysfs directory entry */
struct kref kref;
-#ifdef CONFIG_DEBUG_KOBJECT_RELEASE
- struct delayed_work release;
-#endif
+
unsigned int state_initialized:1;
unsigned int state_in_sysfs:1;
unsigned int state_add_uevent_sent:1;
unsigned int state_remove_uevent_sent:1;
unsigned int uevent_suppress:1;
+
+#ifdef CONFIG_DEBUG_KOBJECT_RELEASE
+ struct delayed_work release;
+#endif
};
-extern __printf(2, 3)
-int kobject_set_name(struct kobject *kobj, const char *name, ...);
-extern __printf(2, 0)
-int kobject_set_name_vargs(struct kobject *kobj, const char *fmt,
- va_list vargs);
+__printf(2, 3) int kobject_set_name(struct kobject *kobj, const char *name, ...);
+__printf(2, 0) int kobject_set_name_vargs(struct kobject *kobj, const char *fmt, va_list vargs);
static inline const char *kobject_name(const struct kobject *kobj)
{
return kobj->name;
}
-extern void kobject_init(struct kobject *kobj, struct kobj_type *ktype);
-extern __printf(3, 4) __must_check
-int kobject_add(struct kobject *kobj, struct kobject *parent,
- const char *fmt, ...);
-extern __printf(4, 5) __must_check
-int kobject_init_and_add(struct kobject *kobj,
- struct kobj_type *ktype, struct kobject *parent,
- const char *fmt, ...);
+void kobject_init(struct kobject *kobj, const struct kobj_type *ktype);
+__printf(3, 4) __must_check int kobject_add(struct kobject *kobj,
+ struct kobject *parent,
+ const char *fmt, ...);
+__printf(4, 5) __must_check int kobject_init_and_add(struct kobject *kobj,
+ const struct kobj_type *ktype,
+ struct kobject *parent,
+ const char *fmt, ...);
-extern void kobject_del(struct kobject *kobj);
+void kobject_del(struct kobject *kobj);
-extern struct kobject * __must_check kobject_create(void);
-extern struct kobject * __must_check kobject_create_and_add(const char *name,
- struct kobject *parent);
+struct kobject * __must_check kobject_create_and_add(const char *name, struct kobject *parent);
-extern int __must_check kobject_rename(struct kobject *, const char *new_name);
-extern int __must_check kobject_move(struct kobject *, struct kobject *);
+int __must_check kobject_rename(struct kobject *, const char *new_name);
+int __must_check kobject_move(struct kobject *, struct kobject *);
-extern struct kobject *kobject_get(struct kobject *kobj);
-extern struct kobject * __must_check kobject_get_unless_zero(
- struct kobject *kobj);
-extern void kobject_put(struct kobject *kobj);
+struct kobject *kobject_get(struct kobject *kobj);
+struct kobject * __must_check kobject_get_unless_zero(struct kobject *kobj);
+void kobject_put(struct kobject *kobj);
-extern const void *kobject_namespace(struct kobject *kobj);
-extern char *kobject_get_path(struct kobject *kobj, gfp_t flag);
+const void *kobject_namespace(const struct kobject *kobj);
+void kobject_get_ownership(const struct kobject *kobj, kuid_t *uid, kgid_t *gid);
+char *kobject_get_path(const struct kobject *kobj, gfp_t flag);
struct kobj_type {
void (*release)(struct kobject *kobj);
const struct sysfs_ops *sysfs_ops;
- struct attribute **default_attrs;
- const struct kobj_ns_type_operations *(*child_ns_type)(struct kobject *kobj);
- const void *(*namespace)(struct kobject *kobj);
+ const struct attribute_group **default_groups;
+ const struct kobj_ns_type_operations *(*child_ns_type)(const struct kobject *kobj);
+ const void *(*namespace)(const struct kobject *kobj);
+ void (*get_ownership)(const struct kobject *kobj, kuid_t *uid, kgid_t *gid);
};
struct kobj_uevent_env {
@@ -134,10 +131,9 @@ struct kobj_uevent_env {
};
struct kset_uevent_ops {
- int (* const filter)(struct kset *kset, struct kobject *kobj);
- const char *(* const name)(struct kset *kset, struct kobject *kobj);
- int (* const uevent)(struct kset *kset, struct kobject *kobj,
- struct kobj_uevent_env *env);
+ int (* const filter)(const struct kobject *kobj);
+ const char *(* const name)(const struct kobject *kobj);
+ int (* const uevent)(const struct kobject *kobj, struct kobj_uevent_env *env);
};
struct kobj_attribute {
@@ -176,12 +172,11 @@ struct kset {
const struct kset_uevent_ops *uevent_ops;
} __randomize_layout;
-extern void kset_init(struct kset *kset);
-extern int __must_check kset_register(struct kset *kset);
-extern void kset_unregister(struct kset *kset);
-extern struct kset * __must_check kset_create_and_add(const char *name,
- const struct kset_uevent_ops *u,
- struct kobject *parent_kobj);
+void kset_init(struct kset *kset);
+int __must_check kset_register(struct kset *kset);
+void kset_unregister(struct kset *kset);
+struct kset * __must_check kset_create_and_add(const char *name, const struct kset_uevent_ops *u,
+ struct kobject *parent_kobj);
static inline struct kset *to_kset(struct kobject *kobj)
{
@@ -198,12 +193,12 @@ static inline void kset_put(struct kset *k)
kobject_put(&k->kobj);
}
-static inline struct kobj_type *get_ktype(struct kobject *kobj)
+static inline const struct kobj_type *get_ktype(const struct kobject *kobj)
{
return kobj->ktype;
}
-extern struct kobject *kset_find_obj(struct kset *, const char *);
+struct kobject *kset_find_obj(struct kset *, const char *);
/* The global /sys/kernel/ kobject for people to chain off of */
extern struct kobject *kernel_kobj;
diff --git a/include/linux/kobject_api.h b/include/linux/kobject_api.h
new file mode 100644
index 000000000000..6e36a054c2d6
--- /dev/null
+++ b/include/linux/kobject_api.h
@@ -0,0 +1 @@
+#include <linux/kobject.h>
diff --git a/include/linux/kobject_ns.h b/include/linux/kobject_ns.h
index df32d2508290..150fe2ae1b6b 100644
--- a/include/linux/kobject_ns.h
+++ b/include/linux/kobject_ns.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/* Kernel object name space definitions
*
* Copyright (c) 2002-2003 Patrick Mochel
@@ -7,9 +8,7 @@
*
* Split from kobject.h by David Howells (dhowells@redhat.com)
*
- * This file is released under the GPLv2.
- *
- * Please read Documentation/kobject.txt before using the kobject
+ * Please read Documentation/core-api/kobject.rst before using the kobject
* interface, ESPECIALLY the parts about reference counts and object
* destructors.
*/
@@ -48,13 +47,11 @@ struct kobj_ns_type_operations {
int kobj_ns_type_register(const struct kobj_ns_type_operations *ops);
int kobj_ns_type_registered(enum kobj_ns_type type);
-const struct kobj_ns_type_operations *kobj_child_ns_ops(struct kobject *parent);
-const struct kobj_ns_type_operations *kobj_ns_ops(struct kobject *kobj);
+const struct kobj_ns_type_operations *kobj_child_ns_ops(const struct kobject *parent);
+const struct kobj_ns_type_operations *kobj_ns_ops(const struct kobject *kobj);
bool kobj_ns_current_may_mount(enum kobj_ns_type type);
void *kobj_ns_grab_current(enum kobj_ns_type type);
-const void *kobj_ns_netlink(enum kobj_ns_type type, struct sock *sk);
-const void *kobj_ns_initial(enum kobj_ns_type type);
void kobj_ns_drop(enum kobj_ns_type type, void *ns);
#endif /* _LINUX_KOBJECT_NS_H */
diff --git a/include/linux/kprobes.h b/include/linux/kprobes.h
index bd2684700b74..8c4f3bb24429 100644
--- a/include/linux/kprobes.h
+++ b/include/linux/kprobes.h
@@ -1,22 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
#ifndef _LINUX_KPROBES_H
#define _LINUX_KPROBES_H
/*
* Kernel Probes (KProbes)
- * include/linux/kprobes.h
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*
* Copyright (C) IBM Corporation, 2002, 2004
*
@@ -40,6 +26,8 @@
#include <linux/rcupdate.h>
#include <linux/mutex.h>
#include <linux/ftrace.h>
+#include <linux/objpool.h>
+#include <linux/rethook.h>
#include <asm/kprobes.h>
#ifdef CONFIG_KPROBES
@@ -50,7 +38,7 @@
#define KPROBE_REENTER 0x00000004
#define KPROBE_HIT_SSDONE 0x00000008
-#else /* CONFIG_KPROBES */
+#else /* !CONFIG_KPROBES */
#include <asm-generic/kprobes.h>
typedef int kprobe_opcode_t;
struct arch_specific_insn {
@@ -63,11 +51,8 @@ struct pt_regs;
struct kretprobe;
struct kretprobe_instance;
typedef int (*kprobe_pre_handler_t) (struct kprobe *, struct pt_regs *);
-typedef int (*kprobe_break_handler_t) (struct kprobe *, struct pt_regs *);
typedef void (*kprobe_post_handler_t) (struct kprobe *, struct pt_regs *,
unsigned long flags);
-typedef int (*kprobe_fault_handler_t) (struct kprobe *, struct pt_regs *,
- int trapnr);
typedef int (*kretprobe_handler_t) (struct kretprobe_instance *,
struct pt_regs *);
@@ -95,18 +80,6 @@ struct kprobe {
/* Called after addr is executed, unless... */
kprobe_post_handler_t post_handler;
- /*
- * ... called if executing addr causes a fault (eg. page fault).
- * Return 1 if it handled fault, otherwise kernel will see it.
- */
- kprobe_fault_handler_t fault_handler;
-
- /*
- * ... called if breakpoint trap occurs in probe handler.
- * Return 1 if it handled break, otherwise kernel will see it.
- */
- kprobe_break_handler_t break_handler;
-
/* Saved opcode (which has been replaced with breakpoint) */
kprobe_opcode_t opcode;
@@ -129,50 +102,33 @@ struct kprobe {
* this flag is only for optimized_kprobe.
*/
#define KPROBE_FLAG_FTRACE 8 /* probe is using ftrace */
+#define KPROBE_FLAG_ON_FUNC_ENTRY 16 /* probe is on the function entry */
/* Has this kprobe gone ? */
-static inline int kprobe_gone(struct kprobe *p)
+static inline bool kprobe_gone(struct kprobe *p)
{
return p->flags & KPROBE_FLAG_GONE;
}
/* Is this kprobe disabled ? */
-static inline int kprobe_disabled(struct kprobe *p)
+static inline bool kprobe_disabled(struct kprobe *p)
{
return p->flags & (KPROBE_FLAG_DISABLED | KPROBE_FLAG_GONE);
}
/* Is this kprobe really running optimized path ? */
-static inline int kprobe_optimized(struct kprobe *p)
+static inline bool kprobe_optimized(struct kprobe *p)
{
return p->flags & KPROBE_FLAG_OPTIMIZED;
}
/* Is this kprobe uses ftrace ? */
-static inline int kprobe_ftrace(struct kprobe *p)
+static inline bool kprobe_ftrace(struct kprobe *p)
{
return p->flags & KPROBE_FLAG_FTRACE;
}
/*
- * Special probe type that uses setjmp-longjmp type tricks to resume
- * execution at a specified entry with a matching prototype corresponding
- * to the probed function - a trick to enable arguments to become
- * accessible seamlessly by probe handling logic.
- * Note:
- * Because of the way compilers allocate stack space for local variables
- * etc upfront, regardless of sub-scopes within a function, this mirroring
- * principle currently works only for probes placed on function entry points.
- */
-struct jprobe {
- struct kprobe kp;
- void *entry; /* probe handling code to jump to */
-};
-
-/* For backward compatibility with old code using JPROBE_ENTRY() */
-#define JPROBE_ENTRY(handler) (handler)
-
-/*
* Function-return probe -
* Note:
* User needs to provide a handler function, and initialize maxactive.
@@ -182,6 +138,11 @@ struct jprobe {
* ignored, due to maxactive being too low.
*
*/
+struct kretprobe_holder {
+ struct kretprobe __rcu *rp;
+ struct objpool_head pool;
+};
+
struct kretprobe {
struct kprobe kp;
kretprobe_handler_t handler;
@@ -189,16 +150,26 @@ struct kretprobe {
int maxactive;
int nmissed;
size_t data_size;
- struct hlist_head free_instances;
- raw_spinlock_t lock;
+#ifdef CONFIG_KRETPROBE_ON_RETHOOK
+ struct rethook *rh;
+#else
+ struct kretprobe_holder *rph;
+#endif
};
+#define KRETPROBE_MAX_DATA_SIZE 4096
+
struct kretprobe_instance {
- struct hlist_node hlist;
- struct kretprobe *rp;
+#ifdef CONFIG_KRETPROBE_ON_RETHOOK
+ struct rethook_node node;
+#else
+ struct rcu_head rcu;
+ struct llist_node llist;
+ struct kretprobe_holder *rph;
kprobe_opcode_t *ret_addr;
- struct task_struct *task;
- char data[0];
+ void *fp;
+#endif
+ char data[];
};
struct kretprobe_blackpoint {
@@ -216,66 +187,106 @@ struct kprobe_blacklist_entry {
DECLARE_PER_CPU(struct kprobe *, current_kprobe);
DECLARE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
-/*
- * For #ifdef avoidance:
- */
-static inline int kprobes_built_in(void)
-{
- return 1;
-}
+extern void kprobe_busy_begin(void);
+extern void kprobe_busy_end(void);
#ifdef CONFIG_KRETPROBES
+/* Check whether @p is used for implementing a trampoline. */
+extern int arch_trampoline_kprobe(struct kprobe *p);
+
+#ifdef CONFIG_KRETPROBE_ON_RETHOOK
+static nokprobe_inline struct kretprobe *get_kretprobe(struct kretprobe_instance *ri)
+{
+ /* rethook::data is non-changed field, so that you can access it freely. */
+ return (struct kretprobe *)ri->node.rethook->data;
+}
+static nokprobe_inline unsigned long get_kretprobe_retaddr(struct kretprobe_instance *ri)
+{
+ return ri->node.ret_addr;
+}
+#else
extern void arch_prepare_kretprobe(struct kretprobe_instance *ri,
struct pt_regs *regs);
-extern int arch_trampoline_kprobe(struct kprobe *p);
-#else /* CONFIG_KRETPROBES */
-static inline void arch_prepare_kretprobe(struct kretprobe *rp,
- struct pt_regs *regs)
+void arch_kretprobe_fixup_return(struct pt_regs *regs,
+ kprobe_opcode_t *correct_ret_addr);
+
+void __kretprobe_trampoline(void);
+/*
+ * Since some architecture uses structured function pointer,
+ * use dereference_function_descriptor() to get real function address.
+ */
+static nokprobe_inline void *kretprobe_trampoline_addr(void)
{
+ return dereference_kernel_function_descriptor(__kretprobe_trampoline);
}
-static inline int arch_trampoline_kprobe(struct kprobe *p)
+
+/* If the trampoline handler called from a kprobe, use this version */
+unsigned long __kretprobe_trampoline_handler(struct pt_regs *regs,
+ void *frame_pointer);
+
+static nokprobe_inline
+unsigned long kretprobe_trampoline_handler(struct pt_regs *regs,
+ void *frame_pointer)
{
- return 0;
+ unsigned long ret;
+ /*
+ * Set a dummy kprobe for avoiding kretprobe recursion.
+ * Since kretprobe never runs in kprobe handler, no kprobe must
+ * be running at this point.
+ */
+ kprobe_busy_begin();
+ ret = __kretprobe_trampoline_handler(regs, frame_pointer);
+ kprobe_busy_end();
+
+ return ret;
}
-#endif /* CONFIG_KRETPROBES */
-extern struct kretprobe_blackpoint kretprobe_blacklist[];
+static nokprobe_inline struct kretprobe *get_kretprobe(struct kretprobe_instance *ri)
+{
+ return rcu_dereference_check(ri->rph->rp, rcu_read_lock_any_held());
+}
-static inline void kretprobe_assert(struct kretprobe_instance *ri,
- unsigned long orig_ret_address, unsigned long trampoline_address)
+static nokprobe_inline unsigned long get_kretprobe_retaddr(struct kretprobe_instance *ri)
{
- if (!orig_ret_address || (orig_ret_address == trampoline_address)) {
- printk("kretprobe BUG!: Processing kretprobe %p @ %p\n",
- ri->rp, ri->rp->kp.addr);
- BUG();
- }
+ return (unsigned long)ri->ret_addr;
}
+#endif /* CONFIG_KRETPROBE_ON_RETHOOK */
-#ifdef CONFIG_KPROBES_SANITY_TEST
-extern int init_test_probes(void);
-#else
-static inline int init_test_probes(void)
+#else /* !CONFIG_KRETPROBES */
+static inline void arch_prepare_kretprobe(struct kretprobe *rp,
+ struct pt_regs *regs)
+{
+}
+static inline int arch_trampoline_kprobe(struct kprobe *p)
{
return 0;
}
-#endif /* CONFIG_KPROBES_SANITY_TEST */
+#endif /* CONFIG_KRETPROBES */
+
+/* Markers of '_kprobe_blacklist' section */
+extern unsigned long __start_kprobe_blacklist[];
+extern unsigned long __stop_kprobe_blacklist[];
+
+extern struct kretprobe_blackpoint kretprobe_blacklist[];
extern int arch_prepare_kprobe(struct kprobe *p);
extern void arch_arm_kprobe(struct kprobe *p);
extern void arch_disarm_kprobe(struct kprobe *p);
extern int arch_init_kprobes(void);
-extern void show_registers(struct pt_regs *regs);
extern void kprobes_inc_nmissed_count(struct kprobe *p);
extern bool arch_within_kprobe_blacklist(unsigned long addr);
-extern bool arch_kprobe_on_func_entry(unsigned long offset);
-extern bool kprobe_on_func_entry(kprobe_opcode_t *addr, const char *sym, unsigned long offset);
+extern int arch_populate_kprobe_blacklist(void);
+extern int kprobe_on_func_entry(kprobe_opcode_t *addr, const char *sym, unsigned long offset);
extern bool within_kprobe_blacklist(unsigned long addr);
+extern int kprobe_add_ksym_blacklist(unsigned long entry);
+extern int kprobe_add_area_blacklist(unsigned long start, unsigned long end);
struct kprobe_insn_cache {
struct mutex mutex;
void *(*alloc)(void); /* allocate insn page */
void (*free)(void *); /* free insn page */
+ const char *sym; /* symbol for insn pages */
struct list_head pages; /* list of kprobe_insn_page */
size_t insn_size; /* size of instruction slot */
int nr_garbage;
@@ -306,7 +317,11 @@ static inline bool is_kprobe_##__name##_slot(unsigned long addr) \
{ \
return __is_insn_slot_addr(&kprobe_##__name##_slots, addr); \
}
-#else /* __ARCH_WANT_KPROBES_INSN_SLOT */
+#define KPROBE_INSN_PAGE_SYM "kprobe_insn_page"
+#define KPROBE_OPTINSN_PAGE_SYM "kprobe_optinsn_page"
+int kprobe_cache_get_kallsym(struct kprobe_insn_cache *c, unsigned int *symnum,
+ unsigned long *value, char *type, char *sym);
+#else /* !__ARCH_WANT_KPROBES_INSN_SLOT */
#define DEFINE_INSN_CACHE_OPS(__name) \
static inline bool is_kprobe_##__name##_slot(unsigned long addr) \
{ \
@@ -337,41 +352,41 @@ extern void arch_unoptimize_kprobes(struct list_head *oplist,
struct list_head *done_list);
extern void arch_unoptimize_kprobe(struct optimized_kprobe *op);
extern int arch_within_optimized_kprobe(struct optimized_kprobe *op,
- unsigned long addr);
+ kprobe_opcode_t *addr);
extern void opt_pre_handler(struct kprobe *p, struct pt_regs *regs);
DEFINE_INSN_CACHE_OPS(optinsn);
-#ifdef CONFIG_SYSCTL
-extern int sysctl_kprobes_optimization;
-extern int proc_kprobes_optimization_handler(struct ctl_table *table,
- int write, void __user *buffer,
- size_t *length, loff_t *ppos);
-#endif
extern void wait_for_kprobe_optimizer(void);
-#else
+bool optprobe_queued_unopt(struct optimized_kprobe *op);
+bool kprobe_disarmed(struct kprobe *p);
+#else /* !CONFIG_OPTPROBES */
static inline void wait_for_kprobe_optimizer(void) { }
#endif /* CONFIG_OPTPROBES */
+
#ifdef CONFIG_KPROBES_ON_FTRACE
extern void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip,
- struct ftrace_ops *ops, struct pt_regs *regs);
+ struct ftrace_ops *ops, struct ftrace_regs *fregs);
extern int arch_prepare_kprobe_ftrace(struct kprobe *p);
-#endif
-
-int arch_check_ftrace_location(struct kprobe *p);
+/* Set when ftrace has been killed: kprobes on ftrace must be disabled for safety */
+extern bool kprobe_ftrace_disabled __read_mostly;
+extern void kprobe_ftrace_kill(void);
+#else
+static inline int arch_prepare_kprobe_ftrace(struct kprobe *p)
+{
+ return -EINVAL;
+}
+static inline void kprobe_ftrace_kill(void) {}
+#endif /* CONFIG_KPROBES_ON_FTRACE */
/* Get the kprobe at this addr (if any) - called with preemption disabled */
struct kprobe *get_kprobe(void *addr);
-void kretprobe_hash_lock(struct task_struct *tsk,
- struct hlist_head **head, unsigned long *flags);
-void kretprobe_hash_unlock(struct task_struct *tsk, unsigned long *flags);
-struct hlist_head * kretprobe_inst_table_head(struct task_struct *tsk);
/* kprobe_running() will just return the current_kprobe on this CPU */
static inline struct kprobe *kprobe_running(void)
{
- return (__this_cpu_read(current_kprobe));
+ return __this_cpu_read(current_kprobe);
}
static inline void reset_current_kprobe(void)
@@ -385,38 +400,47 @@ static inline struct kprobe_ctlblk *get_kprobe_ctlblk(void)
}
kprobe_opcode_t *kprobe_lookup_name(const char *name, unsigned int offset);
+kprobe_opcode_t *arch_adjust_kprobe_addr(unsigned long addr, unsigned long offset, bool *on_func_entry);
+
int register_kprobe(struct kprobe *p);
void unregister_kprobe(struct kprobe *p);
int register_kprobes(struct kprobe **kps, int num);
void unregister_kprobes(struct kprobe **kps, int num);
-int setjmp_pre_handler(struct kprobe *, struct pt_regs *);
-int longjmp_break_handler(struct kprobe *, struct pt_regs *);
-int register_jprobe(struct jprobe *p);
-void unregister_jprobe(struct jprobe *p);
-int register_jprobes(struct jprobe **jps, int num);
-void unregister_jprobes(struct jprobe **jps, int num);
-void jprobe_return(void);
-unsigned long arch_deref_entry_point(void *);
int register_kretprobe(struct kretprobe *rp);
void unregister_kretprobe(struct kretprobe *rp);
int register_kretprobes(struct kretprobe **rps, int num);
void unregister_kretprobes(struct kretprobe **rps, int num);
+#if defined(CONFIG_KRETPROBE_ON_RETHOOK) || !defined(CONFIG_KRETPROBES)
+#define kprobe_flush_task(tk) do {} while (0)
+#else
void kprobe_flush_task(struct task_struct *tk);
-void recycle_rp_inst(struct kretprobe_instance *ri, struct hlist_head *head);
+#endif
+
+void kprobe_free_init_mem(void);
int disable_kprobe(struct kprobe *kp);
int enable_kprobe(struct kprobe *kp);
void dump_kprobe(struct kprobe *kp);
+void *alloc_insn_page(void);
+
+void *alloc_optinsn_page(void);
+void free_optinsn_page(void *page);
+
+int kprobe_get_kallsym(unsigned int symnum, unsigned long *value, char *type,
+ char *sym);
+
+int arch_kprobe_get_kallsym(unsigned int *symnum, unsigned long *value,
+ char *type, char *sym);
+
+int kprobe_exceptions_notify(struct notifier_block *self,
+ unsigned long val, void *data);
+
#else /* !CONFIG_KPROBES: */
-static inline int kprobes_built_in(void)
-{
- return 0;
-}
static inline int kprobe_fault_handler(struct pt_regs *regs, int trapnr)
{
return 0;
@@ -429,13 +453,16 @@ static inline struct kprobe *kprobe_running(void)
{
return NULL;
}
+#define kprobe_busy_begin() do {} while (0)
+#define kprobe_busy_end() do {} while (0)
+
static inline int register_kprobe(struct kprobe *p)
{
- return -ENOSYS;
+ return -EOPNOTSUPP;
}
static inline int register_kprobes(struct kprobe **kps, int num)
{
- return -ENOSYS;
+ return -EOPNOTSUPP;
}
static inline void unregister_kprobe(struct kprobe *p)
{
@@ -443,49 +470,49 @@ static inline void unregister_kprobe(struct kprobe *p)
static inline void unregister_kprobes(struct kprobe **kps, int num)
{
}
-static inline int register_jprobe(struct jprobe *p)
-{
- return -ENOSYS;
-}
-static inline int register_jprobes(struct jprobe **jps, int num)
+static inline int register_kretprobe(struct kretprobe *rp)
{
- return -ENOSYS;
+ return -EOPNOTSUPP;
}
-static inline void unregister_jprobe(struct jprobe *p)
+static inline int register_kretprobes(struct kretprobe **rps, int num)
{
+ return -EOPNOTSUPP;
}
-static inline void unregister_jprobes(struct jprobe **jps, int num)
+static inline void unregister_kretprobe(struct kretprobe *rp)
{
}
-static inline void jprobe_return(void)
+static inline void unregister_kretprobes(struct kretprobe **rps, int num)
{
}
-static inline int register_kretprobe(struct kretprobe *rp)
+static inline void kprobe_flush_task(struct task_struct *tk)
{
- return -ENOSYS;
}
-static inline int register_kretprobes(struct kretprobe **rps, int num)
+static inline void kprobe_free_init_mem(void)
{
- return -ENOSYS;
}
-static inline void unregister_kretprobe(struct kretprobe *rp)
+static inline void kprobe_ftrace_kill(void)
{
}
-static inline void unregister_kretprobes(struct kretprobe **rps, int num)
+static inline int disable_kprobe(struct kprobe *kp)
{
+ return -EOPNOTSUPP;
}
-static inline void kprobe_flush_task(struct task_struct *tk)
+static inline int enable_kprobe(struct kprobe *kp)
{
+ return -EOPNOTSUPP;
}
-static inline int disable_kprobe(struct kprobe *kp)
+
+static inline bool within_kprobe_blacklist(unsigned long addr)
{
- return -ENOSYS;
+ return true;
}
-static inline int enable_kprobe(struct kprobe *kp)
+static inline int kprobe_get_kallsym(unsigned int symnum, unsigned long *value,
+ char *type, char *sym)
{
- return -ENOSYS;
+ return -ERANGE;
}
#endif /* CONFIG_KPROBES */
+
static inline int disable_kretprobe(struct kretprobe *rp)
{
return disable_kprobe(&rp->kp);
@@ -494,26 +521,74 @@ static inline int enable_kretprobe(struct kretprobe *rp)
{
return enable_kprobe(&rp->kp);
}
-static inline int disable_jprobe(struct jprobe *jp)
-{
- return disable_kprobe(&jp->kp);
-}
-static inline int enable_jprobe(struct jprobe *jp)
-{
- return enable_kprobe(&jp->kp);
-}
#ifndef CONFIG_KPROBES
static inline bool is_kprobe_insn_slot(unsigned long addr)
{
return false;
}
-#endif
+#endif /* !CONFIG_KPROBES */
+
#ifndef CONFIG_OPTPROBES
static inline bool is_kprobe_optinsn_slot(unsigned long addr)
{
return false;
}
+#endif /* !CONFIG_OPTPROBES */
+
+#ifdef CONFIG_KRETPROBES
+#ifdef CONFIG_KRETPROBE_ON_RETHOOK
+static nokprobe_inline bool is_kretprobe_trampoline(unsigned long addr)
+{
+ return is_rethook_trampoline(addr);
+}
+
+static nokprobe_inline
+unsigned long kretprobe_find_ret_addr(struct task_struct *tsk, void *fp,
+ struct llist_node **cur)
+{
+ return rethook_find_ret_addr(tsk, (unsigned long)fp, cur);
+}
+#else
+static nokprobe_inline bool is_kretprobe_trampoline(unsigned long addr)
+{
+ return (void *)addr == kretprobe_trampoline_addr();
+}
+
+unsigned long kretprobe_find_ret_addr(struct task_struct *tsk, void *fp,
+ struct llist_node **cur);
+#endif
+#else
+static nokprobe_inline bool is_kretprobe_trampoline(unsigned long addr)
+{
+ return false;
+}
+
+static nokprobe_inline
+unsigned long kretprobe_find_ret_addr(struct task_struct *tsk, void *fp,
+ struct llist_node **cur)
+{
+ return 0;
+}
#endif
+/* Returns true if kprobes handled the fault */
+static nokprobe_inline bool kprobe_page_fault(struct pt_regs *regs,
+ unsigned int trap)
+{
+ if (!IS_ENABLED(CONFIG_KPROBES))
+ return false;
+ if (user_mode(regs))
+ return false;
+ /*
+ * To be potentially processing a kprobe fault and to be allowed
+ * to call kprobe_running(), we have to be non-preemptible.
+ */
+ if (preemptible())
+ return false;
+ if (!kprobe_running())
+ return false;
+ return kprobe_fault_handler(regs, trap);
+}
+
#endif /* _LINUX_KPROBES_H */
diff --git a/include/linux/kref.h b/include/linux/kref.h
index 29220724bf1c..88e82ab1367c 100644
--- a/include/linux/kref.h
+++ b/include/linux/kref.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* kref.h - library routines for handling generic reference counted objects
*
@@ -7,9 +8,6 @@
* based on kobject.h which was:
* Copyright (C) 2002-2003 Patrick Mochel <mochel@osdl.org>
* Copyright (C) 2002-2003 Open Source Development Labs
- *
- * This file is released under the GPLv2.
- *
*/
#ifndef _KREF_H_
@@ -48,21 +46,18 @@ static inline void kref_get(struct kref *kref)
}
/**
- * kref_put - decrement refcount for object.
- * @kref: object.
- * @release: pointer to the function that will clean up the object when the
+ * kref_put - Decrement refcount for object
+ * @kref: Object
+ * @release: Pointer to the function that will clean up the object when the
* last reference to the object is released.
- * This pointer is required, and it is not acceptable to pass kfree
- * in as this function. If the caller does pass kfree to this
- * function, you will be publicly mocked mercilessly by the kref
- * maintainer, and anyone else who happens to notice it. You have
- * been warned.
*
- * Decrement the refcount, and if 0, call release().
- * Return 1 if the object was removed, otherwise return 0. Beware, if this
- * function returns 0, you still can not count on the kref from remaining in
- * memory. Only use the return value if you want to see if the kref is now
- * gone, not present.
+ * Decrement the refcount, and if 0, call @release. The caller may not
+ * pass NULL or kfree() as the release function.
+ *
+ * Return: 1 if this call removed the object, otherwise return 0. Beware,
+ * if this function returns 0, another caller may have removed the object
+ * by the time this function returns. The return value is only certain
+ * if you want to see if the object is definitely released.
*/
static inline int kref_put(struct kref *kref, void (*release)(struct kref *kref))
{
@@ -73,17 +68,37 @@ static inline int kref_put(struct kref *kref, void (*release)(struct kref *kref)
return 0;
}
+/**
+ * kref_put_mutex - Decrement refcount for object
+ * @kref: Object
+ * @release: Pointer to the function that will clean up the object when the
+ * last reference to the object is released.
+ * @mutex: Mutex which protects the release function.
+ *
+ * This variant of kref_lock() calls the @release function with the @mutex
+ * held. The @release function will release the mutex.
+ */
static inline int kref_put_mutex(struct kref *kref,
void (*release)(struct kref *kref),
- struct mutex *lock)
+ struct mutex *mutex)
{
- if (refcount_dec_and_mutex_lock(&kref->refcount, lock)) {
+ if (refcount_dec_and_mutex_lock(&kref->refcount, mutex)) {
release(kref);
return 1;
}
return 0;
}
+/**
+ * kref_put_lock - Decrement refcount for object
+ * @kref: Object
+ * @release: Pointer to the function that will clean up the object when the
+ * last reference to the object is released.
+ * @lock: Spinlock which protects the release function.
+ *
+ * This variant of kref_lock() calls the @release function with the @lock
+ * held. The @release function will release the lock.
+ */
static inline int kref_put_lock(struct kref *kref,
void (*release)(struct kref *kref),
spinlock_t *lock)
@@ -99,8 +114,6 @@ static inline int kref_put_lock(struct kref *kref,
* kref_get_unless_zero - Increment refcount for object unless it is zero.
* @kref: object.
*
- * Return non-zero if the increment succeeded. Otherwise return 0.
- *
* This function is intended to simplify locking around refcounting for
* objects that can be looked up from a lookup structure, and which are
* removed from that lookup structure in the object destructor.
@@ -110,6 +123,8 @@ static inline int kref_put_lock(struct kref *kref,
* With a lookup followed by a kref_get_unless_zero *with return value check*
* locking in the kref_put path can be deferred to the actual removal from
* the lookup structure and RCU lookups become trivial.
+ *
+ * Return: non-zero if the increment succeeded. Otherwise return 0.
*/
static inline int __must_check kref_get_unless_zero(struct kref *kref)
{
diff --git a/include/linux/kref_api.h b/include/linux/kref_api.h
new file mode 100644
index 000000000000..d67e554721d2
--- /dev/null
+++ b/include/linux/kref_api.h
@@ -0,0 +1 @@
+#include <linux/kref.h>
diff --git a/include/linux/ks0108.h b/include/linux/ks0108.h
index cb311798e0bc..1a37a664f915 100644
--- a/include/linux/ks0108.h
+++ b/include/linux/ks0108.h
@@ -1,25 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Filename: ks0108.h
* Version: 0.1.0
* Description: ks0108 LCD Controller driver header
- * License: GPLv2
*
- * Author: Copyright (C) Miguel Ojeda Sandonis
+ * Author: Copyright (C) Miguel Ojeda <ojeda@kernel.org>
* Date: 2006-10-31
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
*/
#ifndef _KS0108_H_
diff --git a/include/linux/ks8842.h b/include/linux/ks8842.h
index 14ba4452296e..96ffdf3cbe21 100644
--- a/include/linux/ks8842.h
+++ b/include/linux/ks8842.h
@@ -1,19 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* ks8842.h KS8842 platform data struct definition
* Copyright (c) 2010 Intel Corporation
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#ifndef _LINUX_KS8842_H
diff --git a/include/linux/ks8851_mll.h b/include/linux/ks8851_mll.h
index e9ccfb59ed30..57c0a39ed796 100644
--- a/include/linux/ks8851_mll.h
+++ b/include/linux/ks8851_mll.h
@@ -1,19 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* ks8861_mll platform data struct definition
* Copyright (c) 2012 BTicino S.p.A.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#ifndef _LINUX_KS8851_MLL_H
diff --git a/include/linux/ksm.h b/include/linux/ksm.h
index 78b44a024eaa..c982694c987b 100644
--- a/include/linux/ksm.h
+++ b/include/linux/ksm.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __LINUX_KSM_H
#define __LINUX_KSM_H
/*
@@ -12,39 +13,71 @@
#include <linux/pagemap.h>
#include <linux/rmap.h>
#include <linux/sched.h>
-#include <linux/sched/coredump.h>
-
-struct stable_node;
-struct mem_cgroup;
#ifdef CONFIG_KSM
int ksm_madvise(struct vm_area_struct *vma, unsigned long start,
- unsigned long end, int advice, unsigned long *vm_flags);
+ unsigned long end, int advice, vm_flags_t *vm_flags);
+vm_flags_t ksm_vma_flags(struct mm_struct *mm, const struct file *file,
+ vm_flags_t vm_flags);
+int ksm_enable_merge_any(struct mm_struct *mm);
+int ksm_disable_merge_any(struct mm_struct *mm);
+int ksm_disable(struct mm_struct *mm);
+
int __ksm_enter(struct mm_struct *mm);
void __ksm_exit(struct mm_struct *mm);
+/*
+ * To identify zeropages that were mapped by KSM, we reuse the dirty bit
+ * in the PTE. If the PTE is dirty, the zeropage was mapped by KSM when
+ * deduplicating memory.
+ */
+#define is_ksm_zero_pte(pte) (is_zero_pfn(pte_pfn(pte)) && pte_dirty(pte))
+
+extern atomic_long_t ksm_zero_pages;
-static inline int ksm_fork(struct mm_struct *mm, struct mm_struct *oldmm)
+static inline void ksm_map_zero_page(struct mm_struct *mm)
{
- if (test_bit(MMF_VM_MERGEABLE, &oldmm->flags))
- return __ksm_enter(mm);
- return 0;
+ atomic_long_inc(&ksm_zero_pages);
+ atomic_long_inc(&mm->ksm_zero_pages);
}
-static inline void ksm_exit(struct mm_struct *mm)
+static inline void ksm_might_unmap_zero_page(struct mm_struct *mm, pte_t pte)
{
- if (test_bit(MMF_VM_MERGEABLE, &mm->flags))
- __ksm_exit(mm);
+ if (is_ksm_zero_pte(pte)) {
+ atomic_long_dec(&ksm_zero_pages);
+ atomic_long_dec(&mm->ksm_zero_pages);
+ }
}
-static inline struct stable_node *page_stable_node(struct page *page)
+static inline long mm_ksm_zero_pages(struct mm_struct *mm)
{
- return PageKsm(page) ? page_rmapping(page) : NULL;
+ return atomic_long_read(&mm->ksm_zero_pages);
}
-static inline void set_page_stable_node(struct page *page,
- struct stable_node *stable_node)
+static inline void ksm_fork(struct mm_struct *mm, struct mm_struct *oldmm)
+{
+ /* Adding mm to ksm is best effort on fork. */
+ if (mm_flags_test(MMF_VM_MERGEABLE, oldmm)) {
+ long nr_ksm_zero_pages = atomic_long_read(&mm->ksm_zero_pages);
+
+ mm->ksm_merging_pages = 0;
+ mm->ksm_rmap_items = 0;
+ atomic_long_add(nr_ksm_zero_pages, &ksm_zero_pages);
+ __ksm_enter(mm);
+ }
+}
+
+static inline int ksm_execve(struct mm_struct *mm)
+{
+ if (mm_flags_test(MMF_VM_MERGE_ANY, mm))
+ return __ksm_enter(mm);
+
+ return 0;
+}
+
+static inline void ksm_exit(struct mm_struct *mm)
{
- page->mapping = (void *)((unsigned long)stable_node | PAGE_MAPPING_KSM);
+ if (mm_flags_test(MMF_VM_MERGEABLE, mm))
+ __ksm_exit(mm);
}
/*
@@ -58,48 +91,71 @@ static inline void set_page_stable_node(struct page *page,
* We'd like to make this conditional on vma->vm_flags & VM_MERGEABLE,
* but what if the vma was unmerged while the page was swapped out?
*/
-struct page *ksm_might_need_to_copy(struct page *page,
- struct vm_area_struct *vma, unsigned long address);
+struct folio *ksm_might_need_to_copy(struct folio *folio,
+ struct vm_area_struct *vma, unsigned long addr);
-void rmap_walk_ksm(struct page *page, struct rmap_walk_control *rwc);
-void ksm_migrate_page(struct page *newpage, struct page *oldpage);
+void rmap_walk_ksm(struct folio *folio, struct rmap_walk_control *rwc);
+void folio_migrate_ksm(struct folio *newfolio, struct folio *folio);
+void collect_procs_ksm(const struct folio *folio, const struct page *page,
+ struct list_head *to_kill, int force_early);
+long ksm_process_profit(struct mm_struct *);
+bool ksm_process_mergeable(struct mm_struct *mm);
#else /* !CONFIG_KSM */
-static inline int ksm_fork(struct mm_struct *mm, struct mm_struct *oldmm)
+static inline vm_flags_t ksm_vma_flags(struct mm_struct *mm,
+ const struct file *file, vm_flags_t vm_flags)
+{
+ return vm_flags;
+}
+
+static inline int ksm_disable(struct mm_struct *mm)
{
return 0;
}
-static inline void ksm_exit(struct mm_struct *mm)
+static inline void ksm_fork(struct mm_struct *mm, struct mm_struct *oldmm)
{
}
-#ifdef CONFIG_MMU
-static inline int ksm_madvise(struct vm_area_struct *vma, unsigned long start,
- unsigned long end, int advice, unsigned long *vm_flags)
+static inline int ksm_execve(struct mm_struct *mm)
{
return 0;
}
-static inline struct page *ksm_might_need_to_copy(struct page *page,
- struct vm_area_struct *vma, unsigned long address)
+static inline void ksm_exit(struct mm_struct *mm)
{
- return page;
}
-static inline int page_referenced_ksm(struct page *page,
- struct mem_cgroup *memcg, unsigned long *vm_flags)
+static inline void ksm_might_unmap_zero_page(struct mm_struct *mm, pte_t pte)
+{
+}
+
+static inline void collect_procs_ksm(const struct folio *folio,
+ const struct page *page, struct list_head *to_kill,
+ int force_early)
+{
+}
+
+#ifdef CONFIG_MMU
+static inline int ksm_madvise(struct vm_area_struct *vma, unsigned long start,
+ unsigned long end, int advice, vm_flags_t *vm_flags)
{
return 0;
}
-static inline void rmap_walk_ksm(struct page *page,
+static inline struct folio *ksm_might_need_to_copy(struct folio *folio,
+ struct vm_area_struct *vma, unsigned long addr)
+{
+ return folio;
+}
+
+static inline void rmap_walk_ksm(struct folio *folio,
struct rmap_walk_control *rwc)
{
}
-static inline void ksm_migrate_page(struct page *newpage, struct page *oldpage)
+static inline void folio_migrate_ksm(struct folio *newfolio, struct folio *old)
{
}
#endif /* CONFIG_MMU */
diff --git a/include/linux/kstack_erase.h b/include/linux/kstack_erase.h
new file mode 100644
index 000000000000..bf3bf1905557
--- /dev/null
+++ b/include/linux/kstack_erase.h
@@ -0,0 +1,89 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_KSTACK_ERASE_H
+#define _LINUX_KSTACK_ERASE_H
+
+#include <linux/sched.h>
+#include <linux/sched/task_stack.h>
+
+/*
+ * Check that the poison value points to the unused hole in the
+ * virtual memory map for your platform.
+ */
+#define KSTACK_ERASE_POISON -0xBEEF
+#define KSTACK_ERASE_SEARCH_DEPTH 128
+
+#ifdef CONFIG_KSTACK_ERASE
+#include <asm/stacktrace.h>
+#include <linux/linkage.h>
+
+/*
+ * The lowest address on tsk's stack which we can plausibly erase.
+ */
+static __always_inline unsigned long
+stackleak_task_low_bound(const struct task_struct *tsk)
+{
+ /*
+ * The lowest unsigned long on the task stack contains STACK_END_MAGIC,
+ * which we must not corrupt.
+ */
+ return (unsigned long)end_of_stack(tsk) + sizeof(unsigned long);
+}
+
+/*
+ * The address immediately after the highest address on tsk's stack which we
+ * can plausibly erase.
+ */
+static __always_inline unsigned long
+stackleak_task_high_bound(const struct task_struct *tsk)
+{
+ /*
+ * The task's pt_regs lives at the top of the task stack and will be
+ * overwritten by exception entry, so there's no need to erase them.
+ */
+ return (unsigned long)task_pt_regs(tsk);
+}
+
+/*
+ * Find the address immediately above the poisoned region of the stack, where
+ * that region falls between 'low' (inclusive) and 'high' (exclusive).
+ */
+static __always_inline unsigned long
+stackleak_find_top_of_poison(const unsigned long low, const unsigned long high)
+{
+ const unsigned int depth = KSTACK_ERASE_SEARCH_DEPTH / sizeof(unsigned long);
+ unsigned int poison_count = 0;
+ unsigned long poison_high = high;
+ unsigned long sp = high;
+
+ while (sp > low && poison_count < depth) {
+ sp -= sizeof(unsigned long);
+
+ if (*(unsigned long *)sp == KSTACK_ERASE_POISON) {
+ poison_count++;
+ } else {
+ poison_count = 0;
+ poison_high = sp;
+ }
+ }
+
+ return poison_high;
+}
+
+static inline void stackleak_task_init(struct task_struct *t)
+{
+ t->lowest_stack = stackleak_task_low_bound(t);
+# ifdef CONFIG_KSTACK_ERASE_METRICS
+ t->prev_lowest_stack = t->lowest_stack;
+# endif
+}
+
+asmlinkage void noinstr stackleak_erase(void);
+asmlinkage void noinstr stackleak_erase_on_task_stack(void);
+asmlinkage void noinstr stackleak_erase_off_task_stack(void);
+void __no_caller_saved_registers noinstr __sanitizer_cov_stack_depth(void);
+
+#else /* !CONFIG_KSTACK_ERASE */
+static inline void stackleak_task_init(struct task_struct *t) { }
+#endif
+
+#endif
diff --git a/include/linux/kstrtox.h b/include/linux/kstrtox.h
new file mode 100644
index 000000000000..6ea897222af1
--- /dev/null
+++ b/include/linux/kstrtox.h
@@ -0,0 +1,151 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_KSTRTOX_H
+#define _LINUX_KSTRTOX_H
+
+#include <linux/compiler.h>
+#include <linux/types.h>
+
+/* Internal, do not use. */
+int __must_check _kstrtoul(const char *s, unsigned int base, unsigned long *res);
+int __must_check _kstrtol(const char *s, unsigned int base, long *res);
+
+int __must_check kstrtoull(const char *s, unsigned int base, unsigned long long *res);
+int __must_check kstrtoll(const char *s, unsigned int base, long long *res);
+
+/**
+ * kstrtoul - convert a string to an unsigned long
+ * @s: The start of the string. The string must be null-terminated, and may also
+ * include a single newline before its terminating null. The first character
+ * may also be a plus sign, but not a minus sign.
+ * @base: The number base to use. The maximum supported base is 16. If base is
+ * given as 0, then the base of the string is automatically detected with the
+ * conventional semantics - If it begins with 0x the number will be parsed as a
+ * hexadecimal (case insensitive), if it otherwise begins with 0, it will be
+ * parsed as an octal number. Otherwise it will be parsed as a decimal.
+ * @res: Where to write the result of the conversion on success.
+ *
+ * Returns 0 on success, -ERANGE on overflow and -EINVAL on parsing error.
+ * Preferred over simple_strtoul(). Return code must be checked.
+*/
+static inline int __must_check kstrtoul(const char *s, unsigned int base, unsigned long *res)
+{
+ /*
+ * We want to shortcut function call, but
+ * __builtin_types_compatible_p(unsigned long, unsigned long long) = 0.
+ */
+ if (sizeof(unsigned long) == sizeof(unsigned long long) &&
+ __alignof__(unsigned long) == __alignof__(unsigned long long))
+ return kstrtoull(s, base, (unsigned long long *)res);
+ else
+ return _kstrtoul(s, base, res);
+}
+
+/**
+ * kstrtol - convert a string to a long
+ * @s: The start of the string. The string must be null-terminated, and may also
+ * include a single newline before its terminating null. The first character
+ * may also be a plus sign or a minus sign.
+ * @base: The number base to use. The maximum supported base is 16. If base is
+ * given as 0, then the base of the string is automatically detected with the
+ * conventional semantics - If it begins with 0x the number will be parsed as a
+ * hexadecimal (case insensitive), if it otherwise begins with 0, it will be
+ * parsed as an octal number. Otherwise it will be parsed as a decimal.
+ * @res: Where to write the result of the conversion on success.
+ *
+ * Returns 0 on success, -ERANGE on overflow and -EINVAL on parsing error.
+ * Preferred over simple_strtol(). Return code must be checked.
+ */
+static inline int __must_check kstrtol(const char *s, unsigned int base, long *res)
+{
+ /*
+ * We want to shortcut function call, but
+ * __builtin_types_compatible_p(long, long long) = 0.
+ */
+ if (sizeof(long) == sizeof(long long) &&
+ __alignof__(long) == __alignof__(long long))
+ return kstrtoll(s, base, (long long *)res);
+ else
+ return _kstrtol(s, base, res);
+}
+
+int __must_check kstrtouint(const char *s, unsigned int base, unsigned int *res);
+int __must_check kstrtoint(const char *s, unsigned int base, int *res);
+
+static inline int __must_check kstrtou64(const char *s, unsigned int base, u64 *res)
+{
+ return kstrtoull(s, base, res);
+}
+
+static inline int __must_check kstrtos64(const char *s, unsigned int base, s64 *res)
+{
+ return kstrtoll(s, base, res);
+}
+
+static inline int __must_check kstrtou32(const char *s, unsigned int base, u32 *res)
+{
+ return kstrtouint(s, base, res);
+}
+
+static inline int __must_check kstrtos32(const char *s, unsigned int base, s32 *res)
+{
+ return kstrtoint(s, base, res);
+}
+
+int __must_check kstrtou16(const char *s, unsigned int base, u16 *res);
+int __must_check kstrtos16(const char *s, unsigned int base, s16 *res);
+int __must_check kstrtou8(const char *s, unsigned int base, u8 *res);
+int __must_check kstrtos8(const char *s, unsigned int base, s8 *res);
+int __must_check kstrtobool(const char *s, bool *res);
+
+int __must_check kstrtoull_from_user(const char __user *s, size_t count, unsigned int base, unsigned long long *res);
+int __must_check kstrtoll_from_user(const char __user *s, size_t count, unsigned int base, long long *res);
+int __must_check kstrtoul_from_user(const char __user *s, size_t count, unsigned int base, unsigned long *res);
+int __must_check kstrtol_from_user(const char __user *s, size_t count, unsigned int base, long *res);
+int __must_check kstrtouint_from_user(const char __user *s, size_t count, unsigned int base, unsigned int *res);
+int __must_check kstrtoint_from_user(const char __user *s, size_t count, unsigned int base, int *res);
+int __must_check kstrtou16_from_user(const char __user *s, size_t count, unsigned int base, u16 *res);
+int __must_check kstrtos16_from_user(const char __user *s, size_t count, unsigned int base, s16 *res);
+int __must_check kstrtou8_from_user(const char __user *s, size_t count, unsigned int base, u8 *res);
+int __must_check kstrtos8_from_user(const char __user *s, size_t count, unsigned int base, s8 *res);
+int __must_check kstrtobool_from_user(const char __user *s, size_t count, bool *res);
+
+static inline int __must_check kstrtou64_from_user(const char __user *s, size_t count, unsigned int base, u64 *res)
+{
+ return kstrtoull_from_user(s, count, base, res);
+}
+
+static inline int __must_check kstrtos64_from_user(const char __user *s, size_t count, unsigned int base, s64 *res)
+{
+ return kstrtoll_from_user(s, count, base, res);
+}
+
+static inline int __must_check kstrtou32_from_user(const char __user *s, size_t count, unsigned int base, u32 *res)
+{
+ return kstrtouint_from_user(s, count, base, res);
+}
+
+static inline int __must_check kstrtos32_from_user(const char __user *s, size_t count, unsigned int base, s32 *res)
+{
+ return kstrtoint_from_user(s, count, base, res);
+}
+
+/*
+ * Use kstrto<foo> instead.
+ *
+ * NOTE: simple_strto<foo> does not check for the range overflow and,
+ * depending on the input, may give interesting results.
+ *
+ * Use these functions if and only if you cannot use kstrto<foo>, because
+ * the conversion ends on the first non-digit character, which may be far
+ * beyond the supported range. It might be useful to parse the strings like
+ * 10x50 or 12:21 without altering original string or temporary buffer in use.
+ * Keep in mind above caveat.
+ */
+
+extern unsigned long simple_strtoul(const char *,char **,unsigned int);
+extern unsigned long simple_strntoul(const char *,char **,unsigned int,size_t);
+extern long simple_strtol(const char *,char **,unsigned int);
+extern unsigned long long simple_strtoull(const char *,char **,unsigned int);
+extern long long simple_strtoll(const char *,char **,unsigned int);
+
+#endif /* _LINUX_KSTRTOX_H */
diff --git a/include/linux/kthread.h b/include/linux/kthread.h
index 82e197eeac91..8d27403888ce 100644
--- a/include/linux/kthread.h
+++ b/include/linux/kthread.h
@@ -1,9 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_KTHREAD_H
#define _LINUX_KTHREAD_H
/* Simple interface for creating and stopping kernel threads without mess. */
#include <linux/err.h>
#include <linux/sched.h>
+struct mm_struct;
+
__printf(4, 5)
struct task_struct *kthread_create_on_node(int (*threadfn)(void *data),
void *data,
@@ -15,7 +18,7 @@ struct task_struct *kthread_create_on_node(int (*threadfn)(void *data),
* @threadfn: the function to run in the thread
* @data: data pointer for @threadfn()
* @namefmt: printf-style format string for the thread name
- * @arg...: arguments for @namefmt.
+ * @arg: arguments for @namefmt.
*
* This macro will create a kthread on the current node, leaving it in
* the stopped state. This is just a helper for kthread_create_on_node();
@@ -30,6 +33,12 @@ struct task_struct *kthread_create_on_cpu(int (*threadfn)(void *data),
unsigned int cpu,
const char *namefmt);
+void get_kthread_comm(char *buf, size_t buf_size, struct task_struct *tsk);
+bool set_kthread_struct(struct task_struct *p);
+
+void kthread_set_per_cpu(struct task_struct *k, int cpu);
+bool kthread_is_per_cpu(struct task_struct *k);
+
/**
* kthread_run - create and wake a thread.
* @threadfn: the function to run until signal_pending(current).
@@ -48,18 +57,49 @@ struct task_struct *kthread_create_on_cpu(int (*threadfn)(void *data),
__k; \
})
+/**
+ * kthread_run_on_cpu - create and wake a cpu bound thread.
+ * @threadfn: the function to run until signal_pending(current).
+ * @data: data ptr for @threadfn.
+ * @cpu: The cpu on which the thread should be bound,
+ * @namefmt: printf-style name for the thread. Format is restricted
+ * to "name.*%u". Code fills in cpu number.
+ *
+ * Description: Convenient wrapper for kthread_create_on_cpu()
+ * followed by wake_up_process(). Returns the kthread or
+ * ERR_PTR(-ENOMEM).
+ */
+static inline struct task_struct *
+kthread_run_on_cpu(int (*threadfn)(void *data), void *data,
+ unsigned int cpu, const char *namefmt)
+{
+ struct task_struct *p;
+
+ p = kthread_create_on_cpu(threadfn, data, cpu, namefmt);
+ if (!IS_ERR(p))
+ wake_up_process(p);
+
+ return p;
+}
+
void free_kthread_struct(struct task_struct *k);
void kthread_bind(struct task_struct *k, unsigned int cpu);
void kthread_bind_mask(struct task_struct *k, const struct cpumask *mask);
+int kthread_affine_preferred(struct task_struct *p, const struct cpumask *mask);
int kthread_stop(struct task_struct *k);
+int kthread_stop_put(struct task_struct *k);
bool kthread_should_stop(void);
bool kthread_should_park(void);
+bool kthread_should_stop_or_park(void);
bool kthread_freezable_should_stop(bool *was_frozen);
+void *kthread_func(struct task_struct *k);
void *kthread_data(struct task_struct *k);
void *kthread_probe_data(struct task_struct *k);
int kthread_park(struct task_struct *k);
void kthread_unpark(struct task_struct *k);
void kthread_parkme(void);
+void kthread_exit(long result) __noreturn;
+void kthread_complete_and_exit(struct completion *, long) __noreturn;
int kthreadd(void *unused);
extern struct task_struct *kthreadd_task;
@@ -75,7 +115,7 @@ extern int tsk_fork_get_node(struct task_struct *tsk);
*/
struct kthread_work;
typedef void (*kthread_work_func_t)(struct kthread_work *work);
-void kthread_delayed_work_timer_fn(unsigned long __data);
+void kthread_delayed_work_timer_fn(struct timer_list *t);
enum {
KTW_FREEZABLE = 1 << 0, /* freeze during suspend */
@@ -83,7 +123,7 @@ enum {
struct kthread_worker {
unsigned int flags;
- spinlock_t lock;
+ raw_spinlock_t lock;
struct list_head work_list;
struct list_head delayed_work_list;
struct task_struct *task;
@@ -103,12 +143,6 @@ struct kthread_delayed_work {
struct timer_list timer;
};
-#define KTHREAD_WORKER_INIT(worker) { \
- .lock = __SPIN_LOCK_UNLOCKED((worker).lock), \
- .work_list = LIST_HEAD_INIT((worker).work_list), \
- .delayed_work_list = LIST_HEAD_INIT((worker).delayed_work_list),\
- }
-
#define KTHREAD_WORK_INIT(work, fn) { \
.node = LIST_HEAD_INIT((work).node), \
.func = (fn), \
@@ -116,14 +150,10 @@ struct kthread_delayed_work {
#define KTHREAD_DELAYED_WORK_INIT(dwork, fn) { \
.work = KTHREAD_WORK_INIT((dwork).work, (fn)), \
- .timer = __TIMER_INITIALIZER(kthread_delayed_work_timer_fn, \
- 0, (unsigned long)&(dwork), \
+ .timer = __TIMER_INITIALIZER(kthread_delayed_work_timer_fn,\
TIMER_IRQSAFE), \
}
-#define DEFINE_KTHREAD_WORKER(worker) \
- struct kthread_worker worker = KTHREAD_WORKER_INIT(worker)
-
#define DEFINE_KTHREAD_WORK(work, fn) \
struct kthread_work work = KTHREAD_WORK_INIT(work, fn)
@@ -131,19 +161,6 @@ struct kthread_delayed_work {
struct kthread_delayed_work dwork = \
KTHREAD_DELAYED_WORK_INIT(dwork, fn)
-/*
- * kthread_worker.lock needs its own lockdep class key when defined on
- * stack with lockdep enabled. Use the following macros in such cases.
- */
-#ifdef CONFIG_LOCKDEP
-# define KTHREAD_WORKER_INIT_ONSTACK(worker) \
- ({ kthread_init_worker(&worker); worker; })
-# define DEFINE_KTHREAD_WORKER_ONSTACK(worker) \
- struct kthread_worker worker = KTHREAD_WORKER_INIT_ONSTACK(worker)
-#else
-# define DEFINE_KTHREAD_WORKER_ONSTACK(worker) DEFINE_KTHREAD_WORKER(worker)
-#endif
-
extern void __kthread_init_worker(struct kthread_worker *worker,
const char *name, struct lock_class_key *key);
@@ -163,21 +180,65 @@ extern void __kthread_init_worker(struct kthread_worker *worker,
#define kthread_init_delayed_work(dwork, fn) \
do { \
kthread_init_work(&(dwork)->work, (fn)); \
- __setup_timer(&(dwork)->timer, \
- kthread_delayed_work_timer_fn, \
- (unsigned long)(dwork), \
- TIMER_IRQSAFE); \
+ timer_setup(&(dwork)->timer, \
+ kthread_delayed_work_timer_fn, \
+ TIMER_IRQSAFE); \
} while (0)
int kthread_worker_fn(void *worker_ptr);
-__printf(2, 3)
-struct kthread_worker *
-kthread_create_worker(unsigned int flags, const char namefmt[], ...);
+__printf(3, 4)
+struct kthread_worker *kthread_create_worker_on_node(unsigned int flags,
+ int node,
+ const char namefmt[], ...);
+
+#define kthread_create_worker(flags, namefmt, ...) \
+ kthread_create_worker_on_node(flags, NUMA_NO_NODE, namefmt, ## __VA_ARGS__);
+
+/**
+ * kthread_run_worker - create and wake a kthread worker.
+ * @flags: flags modifying the default behavior of the worker
+ * @namefmt: printf-style name for the thread.
+ *
+ * Description: Convenient wrapper for kthread_create_worker() followed by
+ * wake_up_process(). Returns the kthread_worker or ERR_PTR(-ENOMEM).
+ */
+#define kthread_run_worker(flags, namefmt, ...) \
+({ \
+ struct kthread_worker *__kw \
+ = kthread_create_worker(flags, namefmt, ## __VA_ARGS__); \
+ if (!IS_ERR(__kw)) \
+ wake_up_process(__kw->task); \
+ __kw; \
+})
-__printf(3, 4) struct kthread_worker *
+struct kthread_worker *
kthread_create_worker_on_cpu(int cpu, unsigned int flags,
- const char namefmt[], ...);
+ const char namefmt[]);
+
+/**
+ * kthread_run_worker_on_cpu - create and wake a cpu bound kthread worker.
+ * @cpu: CPU number
+ * @flags: flags modifying the default behavior of the worker
+ * @namefmt: printf-style name for the thread. Format is restricted
+ * to "name.*%u". Code fills in cpu number.
+ *
+ * Description: Convenient wrapper for kthread_create_worker_on_cpu()
+ * followed by wake_up_process(). Returns the kthread_worker or
+ * ERR_PTR(-ENOMEM).
+ */
+static inline struct kthread_worker *
+kthread_run_worker_on_cpu(int cpu, unsigned int flags,
+ const char namefmt[])
+{
+ struct kthread_worker *kw;
+
+ kw = kthread_create_worker_on_cpu(cpu, flags, namefmt);
+ if (!IS_ERR(kw))
+ wake_up_process(kw->task);
+
+ return kw;
+}
bool kthread_queue_work(struct kthread_worker *worker,
struct kthread_work *work);
@@ -198,4 +259,15 @@ bool kthread_cancel_delayed_work_sync(struct kthread_delayed_work *work);
void kthread_destroy_worker(struct kthread_worker *worker);
+void kthread_use_mm(struct mm_struct *mm);
+void kthread_unuse_mm(struct mm_struct *mm);
+
+struct cgroup_subsys_state;
+
+#ifdef CONFIG_BLK_CGROUP
+void kthread_associate_blkcg(struct cgroup_subsys_state *css);
+struct cgroup_subsys_state *kthread_blkcg(void);
+#else
+static inline void kthread_associate_blkcg(struct cgroup_subsys_state *css) { }
+#endif
#endif /* _LINUX_KTHREAD_H */
diff --git a/include/linux/ktime.h b/include/linux/ktime.h
index 0c8bd45c8206..383ed9985802 100644
--- a/include/linux/ktime.h
+++ b/include/linux/ktime.h
@@ -21,11 +21,10 @@
#ifndef _LINUX_KTIME_H
#define _LINUX_KTIME_H
-#include <linux/time.h>
+#include <asm/bug.h>
#include <linux/jiffies.h>
-
-/* Nanosecond scalar representation for kernel time values */
-typedef s64 ktime_t;
+#include <linux/time.h>
+#include <linux/types.h>
/**
* ktime_set - Set a ktime_t variable from a seconds/nanoseconds value
@@ -66,35 +65,20 @@ static inline ktime_t ktime_set(const s64 secs, const unsigned long nsecs)
*/
#define ktime_sub_ns(kt, nsval) ((kt) - (nsval))
-/* convert a timespec to ktime_t format: */
-static inline ktime_t timespec_to_ktime(struct timespec ts)
-{
- return ktime_set(ts.tv_sec, ts.tv_nsec);
-}
-
/* convert a timespec64 to ktime_t format: */
static inline ktime_t timespec64_to_ktime(struct timespec64 ts)
{
return ktime_set(ts.tv_sec, ts.tv_nsec);
}
-/* convert a timeval to ktime_t format: */
-static inline ktime_t timeval_to_ktime(struct timeval tv)
-{
- return ktime_set(tv.tv_sec, tv.tv_usec * NSEC_PER_USEC);
-}
-
-/* Map the ktime_t to timespec conversion to ns_to_timespec function */
-#define ktime_to_timespec(kt) ns_to_timespec((kt))
-
/* Map the ktime_t to timespec conversion to ns_to_timespec function */
#define ktime_to_timespec64(kt) ns_to_timespec64((kt))
-/* Map the ktime_t to timeval conversion to ns_to_timeval function */
-#define ktime_to_timeval(kt) ns_to_timeval((kt))
-
-/* Convert ktime_t to nanoseconds - NOP in the scalar storage format: */
-#define ktime_to_ns(kt) (kt)
+/* Convert ktime_t to nanoseconds */
+static inline s64 ktime_to_ns(const ktime_t kt)
+{
+ return kt;
+}
/**
* ktime_compare - Compares two ktime_t variables for less, greater or equal
@@ -213,25 +197,6 @@ static inline ktime_t ktime_sub_ms(const ktime_t kt, const u64 msec)
extern ktime_t ktime_add_safe(const ktime_t lhs, const ktime_t rhs);
/**
- * ktime_to_timespec_cond - convert a ktime_t variable to timespec
- * format only if the variable contains data
- * @kt: the ktime_t variable to convert
- * @ts: the timespec variable to store the result in
- *
- * Return: %true if there was a successful conversion, %false if kt was 0.
- */
-static inline __must_check bool ktime_to_timespec_cond(const ktime_t kt,
- struct timespec *ts)
-{
- if (kt) {
- *ts = ktime_to_timespec(kt);
- return true;
- } else {
- return false;
- }
-}
-
-/**
* ktime_to_timespec64_cond - convert a ktime_t variable to timespec64
* format only if the variable contains data
* @kt: the ktime_t variable to convert
@@ -250,20 +215,18 @@ static inline __must_check bool ktime_to_timespec64_cond(const ktime_t kt,
}
}
-/*
- * The resolution of the clocks. The resolution value is returned in
- * the clock_getres() system call to give application programmers an
- * idea of the (in)accuracy of timers. Timer values are rounded up to
- * this resolution values.
- */
-#define LOW_RES_NSEC TICK_NSEC
-#define KTIME_LOW_RES (LOW_RES_NSEC)
+#include <vdso/ktime.h>
static inline ktime_t ns_to_ktime(u64 ns)
{
return ns;
}
+static inline ktime_t us_to_ktime(u64 us)
+{
+ return us * NSEC_PER_USEC;
+}
+
static inline ktime_t ms_to_ktime(u64 ms)
{
return ms * NSEC_PER_MSEC;
diff --git a/include/linux/ktime_api.h b/include/linux/ktime_api.h
new file mode 100644
index 000000000000..f697d493960f
--- /dev/null
+++ b/include/linux/ktime_api.h
@@ -0,0 +1 @@
+#include <linux/ktime.h>
diff --git a/include/linux/kvm_dirty_ring.h b/include/linux/kvm_dirty_ring.h
new file mode 100644
index 000000000000..eb10d87adf7d
--- /dev/null
+++ b/include/linux/kvm_dirty_ring.h
@@ -0,0 +1,94 @@
+#ifndef KVM_DIRTY_RING_H
+#define KVM_DIRTY_RING_H
+
+#include <linux/kvm.h>
+
+/**
+ * kvm_dirty_ring: KVM internal dirty ring structure
+ *
+ * @dirty_index: free running counter that points to the next slot in
+ * dirty_ring->dirty_gfns, where a new dirty page should go
+ * @reset_index: free running counter that points to the next dirty page
+ * in dirty_ring->dirty_gfns for which dirty trap needs to
+ * be reenabled
+ * @size: size of the compact list, dirty_ring->dirty_gfns
+ * @soft_limit: when the number of dirty pages in the list reaches this
+ * limit, vcpu that owns this ring should exit to userspace
+ * to allow userspace to harvest all the dirty pages
+ * @dirty_gfns: the array to keep the dirty gfns
+ * @index: index of this dirty ring
+ */
+struct kvm_dirty_ring {
+ u32 dirty_index;
+ u32 reset_index;
+ u32 size;
+ u32 soft_limit;
+ struct kvm_dirty_gfn *dirty_gfns;
+ int index;
+};
+
+#ifndef CONFIG_HAVE_KVM_DIRTY_RING
+/*
+ * If CONFIG_HAVE_HVM_DIRTY_RING not defined, kvm_dirty_ring.o should
+ * not be included as well, so define these nop functions for the arch.
+ */
+static inline u32 kvm_dirty_ring_get_rsvd_entries(struct kvm *kvm)
+{
+ return 0;
+}
+
+static inline bool kvm_use_dirty_bitmap(struct kvm *kvm)
+{
+ return true;
+}
+
+static inline int kvm_dirty_ring_alloc(struct kvm *kvm, struct kvm_dirty_ring *ring,
+ int index, u32 size)
+{
+ return 0;
+}
+
+static inline int kvm_dirty_ring_reset(struct kvm *kvm,
+ struct kvm_dirty_ring *ring,
+ int *nr_entries_reset)
+{
+ return -ENOENT;
+}
+
+static inline void kvm_dirty_ring_push(struct kvm_vcpu *vcpu,
+ u32 slot, u64 offset)
+{
+}
+
+static inline struct page *kvm_dirty_ring_get_page(struct kvm_dirty_ring *ring,
+ u32 offset)
+{
+ return NULL;
+}
+
+static inline void kvm_dirty_ring_free(struct kvm_dirty_ring *ring)
+{
+}
+
+#else /* CONFIG_HAVE_KVM_DIRTY_RING */
+
+int kvm_cpu_dirty_log_size(struct kvm *kvm);
+bool kvm_use_dirty_bitmap(struct kvm *kvm);
+bool kvm_arch_allow_write_without_running_vcpu(struct kvm *kvm);
+u32 kvm_dirty_ring_get_rsvd_entries(struct kvm *kvm);
+int kvm_dirty_ring_alloc(struct kvm *kvm, struct kvm_dirty_ring *ring,
+ int index, u32 size);
+int kvm_dirty_ring_reset(struct kvm *kvm, struct kvm_dirty_ring *ring,
+ int *nr_entries_reset);
+void kvm_dirty_ring_push(struct kvm_vcpu *vcpu, u32 slot, u64 offset);
+
+bool kvm_dirty_ring_check_request(struct kvm_vcpu *vcpu);
+
+/* for use in vm_operations_struct */
+struct page *kvm_dirty_ring_get_page(struct kvm_dirty_ring *ring, u32 offset);
+
+void kvm_dirty_ring_free(struct kvm_dirty_ring *ring);
+
+#endif /* CONFIG_HAVE_KVM_DIRTY_RING */
+
+#endif /* KVM_DIRTY_RING_H */
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index 21a6fd6c44af..d93f75b05ae2 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -1,11 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
#ifndef __KVM_HOST_H
#define __KVM_HOST_H
-/*
- * This work is licensed under the terms of the GNU GPL, version 2. See
- * the COPYING file in the top-level directory.
- */
-
+#include <linux/entry-virt.h>
#include <linux/types.h>
#include <linux/hardirq.h>
#include <linux/list.h>
@@ -13,20 +10,31 @@
#include <linux/spinlock.h>
#include <linux/signal.h>
#include <linux/sched.h>
+#include <linux/sched/stat.h>
#include <linux/bug.h>
+#include <linux/minmax.h>
#include <linux/mm.h>
#include <linux/mmu_notifier.h>
#include <linux/preempt.h>
#include <linux/msi.h>
#include <linux/slab.h>
+#include <linux/vmalloc.h>
#include <linux/rcupdate.h>
#include <linux/ratelimit.h>
#include <linux/err.h>
#include <linux/irqflags.h>
#include <linux/context_tracking.h>
#include <linux/irqbypass.h>
-#include <linux/swait.h>
+#include <linux/rcuwait.h>
#include <linux/refcount.h>
+#include <linux/nospec.h>
+#include <linux/notifier.h>
+#include <linux/ftrace.h>
+#include <linux/hashtable.h>
+#include <linux/instrumentation.h>
+#include <linux/interval_tree.h>
+#include <linux/rbtree.h>
+#include <linux/xarray.h>
#include <asm/signal.h>
#include <linux/kvm.h>
@@ -35,23 +43,46 @@
#include <linux/kvm_types.h>
#include <asm/kvm_host.h>
+#include <linux/kvm_dirty_ring.h>
-#ifndef KVM_MAX_VCPU_ID
-#define KVM_MAX_VCPU_ID KVM_MAX_VCPUS
+#ifndef KVM_MAX_VCPU_IDS
+#define KVM_MAX_VCPU_IDS KVM_MAX_VCPUS
#endif
/*
- * The bit 16 ~ bit 31 of kvm_memory_region::flags are internally used
- * in kvm, other bits are visible for userspace which are defined in
- * include/linux/kvm_h.
+ * The bit 16 ~ bit 31 of kvm_userspace_memory_region::flags are internally
+ * used in kvm, other bits are visible for userspace which are defined in
+ * include/uapi/linux/kvm.h.
+ */
+#define KVM_MEMSLOT_INVALID (1UL << 16)
+#define KVM_MEMSLOT_GMEM_ONLY (1UL << 17)
+
+/*
+ * Bit 63 of the memslot generation number is an "update in-progress flag",
+ * e.g. is temporarily set for the duration of kvm_swap_active_memslots().
+ * This flag effectively creates a unique generation number that is used to
+ * mark cached memslot data, e.g. MMIO accesses, as potentially being stale,
+ * i.e. may (or may not) have come from the previous memslots generation.
+ *
+ * This is necessary because the actual memslots update is not atomic with
+ * respect to the generation number update. Updating the generation number
+ * first would allow a vCPU to cache a spte from the old memslots using the
+ * new generation number, and updating the generation number after switching
+ * to the new memslots would allow cache hits using the old generation number
+ * to reference the defunct memslots.
+ *
+ * This mechanism is used to prevent getting hits in KVM's caches while a
+ * memslot update is in-progress, and to prevent cache hits *after* updating
+ * the actual generation number against accesses that were inserted into the
+ * cache *before* the memslots were updated.
*/
-#define KVM_MEMSLOT_INVALID (1UL << 16)
+#define KVM_MEMSLOT_GEN_UPDATE_IN_PROGRESS BIT_ULL(63)
/* Two fragments for cross MMIO pages. */
#define KVM_MAX_MMIO_FRAGMENTS 2
-#ifndef KVM_ADDRESS_SPACE_NUM
-#define KVM_ADDRESS_SPACE_NUM 1
+#ifndef KVM_MAX_NR_ADDRESS_SPACES
+#define KVM_MAX_NR_ADDRESS_SPACES 1
#endif
/*
@@ -66,6 +97,8 @@
#define KVM_PFN_ERR_FAULT (KVM_PFN_ERR_MASK)
#define KVM_PFN_ERR_HWPOISON (KVM_PFN_ERR_MASK + 1)
#define KVM_PFN_ERR_RO_FAULT (KVM_PFN_ERR_MASK + 2)
+#define KVM_PFN_ERR_SIGPENDING (KVM_PFN_ERR_MASK + 3)
+#define KVM_PFN_ERR_NEEDS_IO (KVM_PFN_ERR_MASK + 4)
/*
* error pfns indicate that the gfn is in slot but faild to
@@ -77,6 +110,15 @@ static inline bool is_error_pfn(kvm_pfn_t pfn)
}
/*
+ * KVM_PFN_ERR_SIGPENDING indicates that fetching the PFN was interrupted
+ * by a pending signal. Note, the signal may or may not be fatal.
+ */
+static inline bool is_sigpending_pfn(kvm_pfn_t pfn)
+{
+ return pfn == KVM_PFN_ERR_SIGPENDING;
+}
+
+/*
* error_noslot pfns indicate that the gfn can not be
* translated to pfn - it is not in slot or failed to
* translate it to pfn.
@@ -108,38 +150,50 @@ static inline bool kvm_is_error_hva(unsigned long addr)
#endif
-#define KVM_ERR_PTR_BAD_PAGE (ERR_PTR(-ENOENT))
-
-static inline bool is_error_page(struct page *page)
+static inline bool kvm_is_error_gpa(gpa_t gpa)
{
- return IS_ERR(page);
+ return gpa == INVALID_GPA;
}
#define KVM_REQUEST_MASK GENMASK(7,0)
#define KVM_REQUEST_NO_WAKEUP BIT(8)
#define KVM_REQUEST_WAIT BIT(9)
+#define KVM_REQUEST_NO_ACTION BIT(10)
/*
* Architecture-independent vcpu->requests bit members
- * Bits 4-7 are reserved for more arch-independent bits.
+ * Bits 3-7 are reserved for more arch-independent bits.
*/
-#define KVM_REQ_TLB_FLUSH (0 | KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
-#define KVM_REQ_MMU_RELOAD (1 | KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
-#define KVM_REQ_PENDING_TIMER 2
-#define KVM_REQ_UNHALT 3
-#define KVM_REQUEST_ARCH_BASE 8
+#define KVM_REQ_TLB_FLUSH (0 | KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
+#define KVM_REQ_VM_DEAD (1 | KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
+#define KVM_REQ_UNBLOCK 2
+#define KVM_REQ_DIRTY_RING_SOFT_FULL 3
+#define KVM_REQUEST_ARCH_BASE 8
+
+/*
+ * KVM_REQ_OUTSIDE_GUEST_MODE exists is purely as way to force the vCPU to
+ * OUTSIDE_GUEST_MODE. KVM_REQ_OUTSIDE_GUEST_MODE differs from a vCPU "kick"
+ * in that it ensures the vCPU has reached OUTSIDE_GUEST_MODE before continuing
+ * on. A kick only guarantees that the vCPU is on its way out, e.g. a previous
+ * kick may have set vcpu->mode to EXITING_GUEST_MODE, and so there's no
+ * guarantee the vCPU received an IPI and has actually exited guest mode.
+ */
+#define KVM_REQ_OUTSIDE_GUEST_MODE (KVM_REQUEST_NO_ACTION | KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
#define KVM_ARCH_REQ_FLAGS(nr, flags) ({ \
- BUILD_BUG_ON((unsigned)(nr) >= 32 - KVM_REQUEST_ARCH_BASE); \
+ BUILD_BUG_ON((unsigned)(nr) >= (sizeof_field(struct kvm_vcpu, requests) * 8) - KVM_REQUEST_ARCH_BASE); \
(unsigned)(((nr) + KVM_REQUEST_ARCH_BASE) | (flags)); \
})
#define KVM_ARCH_REQ(nr) KVM_ARCH_REQ_FLAGS(nr, 0)
+bool kvm_make_vcpus_request_mask(struct kvm *kvm, unsigned int req,
+ unsigned long *vcpu_bitmap);
+bool kvm_make_all_cpus_request(struct kvm *kvm, unsigned int req);
+
#define KVM_USERSPACE_IRQ_SOURCE_ID 0
#define KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID 1
+#define KVM_PIT_IRQ_SOURCE_ID 2
-extern struct kmem_cache *kvm_vcpu_cache;
-
-extern spinlock_t kvm_lock;
+extern struct mutex kvm_lock;
extern struct list_head vm_list;
struct kvm_io_range {
@@ -153,6 +207,7 @@ struct kvm_io_range {
struct kvm_io_bus {
int dev_count;
int ioeventfd_count;
+ struct rcu_head rcu;
struct kvm_io_range range[];
};
@@ -161,6 +216,7 @@ enum kvm_bus {
KVM_PIO_BUS,
KVM_VIRTIO_CCW_NOTIFY_BUS,
KVM_FAST_MMIO_BUS,
+ KVM_IOCSR_BUS,
KVM_NR_BUSES
};
@@ -172,8 +228,8 @@ int kvm_io_bus_read(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr,
int len, void *val);
int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
int len, struct kvm_io_device *dev);
-void kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx,
- struct kvm_io_device *dev);
+int kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx,
+ struct kvm_io_device *dev);
struct kvm_io_device *kvm_io_bus_get_dev(struct kvm *kvm, enum kvm_bus bus_idx,
gpa_t addr);
@@ -183,20 +239,44 @@ struct kvm_async_pf {
struct list_head link;
struct list_head queue;
struct kvm_vcpu *vcpu;
- struct mm_struct *mm;
- gva_t gva;
+ gpa_t cr2_or_gpa;
unsigned long addr;
struct kvm_arch_async_pf arch;
bool wakeup_all;
+ bool notpresent_injected;
};
void kvm_clear_async_pf_completion_queue(struct kvm_vcpu *vcpu);
void kvm_check_async_pf_completion(struct kvm_vcpu *vcpu);
-int kvm_setup_async_pf(struct kvm_vcpu *vcpu, gva_t gva, unsigned long hva,
- struct kvm_arch_async_pf *arch);
+bool kvm_setup_async_pf(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
+ unsigned long hva, struct kvm_arch_async_pf *arch);
int kvm_async_pf_wakeup_all(struct kvm_vcpu *vcpu);
#endif
+#ifdef CONFIG_KVM_GENERIC_MMU_NOTIFIER
+union kvm_mmu_notifier_arg {
+ unsigned long attributes;
+};
+
+enum kvm_gfn_range_filter {
+ KVM_FILTER_SHARED = BIT(0),
+ KVM_FILTER_PRIVATE = BIT(1),
+};
+
+struct kvm_gfn_range {
+ struct kvm_memory_slot *slot;
+ gfn_t start;
+ gfn_t end;
+ union kvm_mmu_notifier_arg arg;
+ enum kvm_gfn_range_filter attr_filter;
+ bool may_block;
+ bool lockless;
+};
+bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range);
+bool kvm_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range);
+bool kvm_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range);
+#endif
+
enum {
OUTSIDE_GUEST_MODE,
IN_GUEST_MODE,
@@ -204,6 +284,35 @@ enum {
READING_SHADOW_PAGE_TABLES,
};
+struct kvm_host_map {
+ /*
+ * Only valid if the 'pfn' is managed by the host kernel (i.e. There is
+ * a 'struct page' for it. When using mem= kernel parameter some memory
+ * can be used as guest memory but they are not managed by host
+ * kernel).
+ */
+ struct page *pinned_page;
+ struct page *page;
+ void *hva;
+ kvm_pfn_t pfn;
+ kvm_pfn_t gfn;
+ bool writable;
+};
+
+/*
+ * Used to check if the mapping is valid or not. Never use 'kvm_host_map'
+ * directly to check for that.
+ */
+static inline bool kvm_vcpu_mapped(struct kvm_host_map *map)
+{
+ return !!map->hva;
+}
+
+static inline bool kvm_vcpu_can_poll(ktime_t cur, ktime_t stop)
+{
+ return single_task_running() && !need_resched() && ktime_before(cur, stop);
+}
+
/*
* Sometimes a large or cross-page mmio needs to be broken up into separate
* exits for userspace servicing.
@@ -220,24 +329,26 @@ struct kvm_vcpu {
struct preempt_notifier preempt_notifier;
#endif
int cpu;
- int vcpu_id;
- int srcu_idx;
+ int vcpu_id; /* id given by userspace at creation */
+ int vcpu_idx; /* index into kvm->vcpu_array */
+ int ____srcu_idx; /* Don't use this directly. You've been warned. */
+#ifdef CONFIG_PROVE_RCU
+ int srcu_depth;
+#endif
int mode;
- unsigned long requests;
+ u64 requests;
unsigned long guest_debug;
- int pre_pcpu;
- struct list_head blocked_vcpu_list;
-
struct mutex mutex;
struct kvm_run *run;
- int guest_fpu_loaded, guest_xcr0_loaded;
- struct swait_queue_head wq;
- struct pid __rcu *pid;
+#ifndef __KVM_HAVE_ARCH_WQP
+ struct rcuwait wait;
+#endif
+ struct pid *pid;
+ rwlock_t pid_lock;
int sigset_active;
sigset_t sigset;
- struct kvm_vcpu_stat stat;
unsigned int halt_poll_ns;
bool valid_wakeup;
@@ -271,11 +382,182 @@ struct kvm_vcpu {
bool dy_eligible;
} spin_loop;
#endif
+ bool wants_to_run;
bool preempted;
+ bool ready;
+ bool scheduled_out;
struct kvm_vcpu_arch arch;
- struct dentry *debugfs_dentry;
+ struct kvm_vcpu_stat stat;
+ char stats_id[KVM_STATS_NAME_SIZE];
+ struct kvm_dirty_ring dirty_ring;
+
+ /*
+ * The most recently used memslot by this vCPU and the slots generation
+ * for which it is valid.
+ * No wraparound protection is needed since generations won't overflow in
+ * thousands of years, even assuming 1M memslot operations per second.
+ */
+ struct kvm_memory_slot *last_used_slot;
+ u64 last_used_slot_gen;
};
+/*
+ * Start accounting time towards a guest.
+ * Must be called before entering guest context.
+ */
+static __always_inline void guest_timing_enter_irqoff(void)
+{
+ /*
+ * This is running in ioctl context so its safe to assume that it's the
+ * stime pending cputime to flush.
+ */
+ instrumentation_begin();
+ vtime_account_guest_enter();
+ instrumentation_end();
+}
+
+/*
+ * Enter guest context and enter an RCU extended quiescent state.
+ *
+ * Between guest_context_enter_irqoff() and guest_context_exit_irqoff() it is
+ * unsafe to use any code which may directly or indirectly use RCU, tracing
+ * (including IRQ flag tracing), or lockdep. All code in this period must be
+ * non-instrumentable.
+ */
+static __always_inline void guest_context_enter_irqoff(void)
+{
+ /*
+ * KVM does not hold any references to rcu protected data when it
+ * switches CPU into a guest mode. In fact switching to a guest mode
+ * is very similar to exiting to userspace from rcu point of view. In
+ * addition CPU may stay in a guest mode for quite a long time (up to
+ * one time slice). Lets treat guest mode as quiescent state, just like
+ * we do with user-mode execution.
+ */
+ if (!context_tracking_guest_enter()) {
+ instrumentation_begin();
+ rcu_virt_note_context_switch();
+ instrumentation_end();
+ }
+}
+
+/*
+ * Deprecated. Architectures should move to guest_timing_enter_irqoff() and
+ * guest_state_enter_irqoff().
+ */
+static __always_inline void guest_enter_irqoff(void)
+{
+ guest_timing_enter_irqoff();
+ guest_context_enter_irqoff();
+}
+
+/**
+ * guest_state_enter_irqoff - Fixup state when entering a guest
+ *
+ * Entry to a guest will enable interrupts, but the kernel state is interrupts
+ * disabled when this is invoked. Also tell RCU about it.
+ *
+ * 1) Trace interrupts on state
+ * 2) Invoke context tracking if enabled to adjust RCU state
+ * 3) Tell lockdep that interrupts are enabled
+ *
+ * Invoked from architecture specific code before entering a guest.
+ * Must be called with interrupts disabled and the caller must be
+ * non-instrumentable.
+ * The caller has to invoke guest_timing_enter_irqoff() before this.
+ *
+ * Note: this is analogous to exit_to_user_mode().
+ */
+static __always_inline void guest_state_enter_irqoff(void)
+{
+ instrumentation_begin();
+ trace_hardirqs_on_prepare();
+ lockdep_hardirqs_on_prepare();
+ instrumentation_end();
+
+ guest_context_enter_irqoff();
+ lockdep_hardirqs_on(CALLER_ADDR0);
+}
+
+/*
+ * Exit guest context and exit an RCU extended quiescent state.
+ *
+ * Between guest_context_enter_irqoff() and guest_context_exit_irqoff() it is
+ * unsafe to use any code which may directly or indirectly use RCU, tracing
+ * (including IRQ flag tracing), or lockdep. All code in this period must be
+ * non-instrumentable.
+ */
+static __always_inline void guest_context_exit_irqoff(void)
+{
+ /*
+ * Guest mode is treated as a quiescent state, see
+ * guest_context_enter_irqoff() for more details.
+ */
+ if (!context_tracking_guest_exit()) {
+ instrumentation_begin();
+ rcu_virt_note_context_switch();
+ instrumentation_end();
+ }
+}
+
+/*
+ * Stop accounting time towards a guest.
+ * Must be called after exiting guest context.
+ */
+static __always_inline void guest_timing_exit_irqoff(void)
+{
+ instrumentation_begin();
+ /* Flush the guest cputime we spent on the guest */
+ vtime_account_guest_exit();
+ instrumentation_end();
+}
+
+/*
+ * Deprecated. Architectures should move to guest_state_exit_irqoff() and
+ * guest_timing_exit_irqoff().
+ */
+static __always_inline void guest_exit_irqoff(void)
+{
+ guest_context_exit_irqoff();
+ guest_timing_exit_irqoff();
+}
+
+static inline void guest_exit(void)
+{
+ unsigned long flags;
+
+ local_irq_save(flags);
+ guest_exit_irqoff();
+ local_irq_restore(flags);
+}
+
+/**
+ * guest_state_exit_irqoff - Establish state when returning from guest mode
+ *
+ * Entry from a guest disables interrupts, but guest mode is traced as
+ * interrupts enabled. Also with NO_HZ_FULL RCU might be idle.
+ *
+ * 1) Tell lockdep that interrupts are disabled
+ * 2) Invoke context tracking if enabled to reactivate RCU
+ * 3) Trace interrupts off state
+ *
+ * Invoked from architecture specific code after exiting a guest.
+ * Must be invoked with interrupts disabled and the caller must be
+ * non-instrumentable.
+ * The caller has to invoke guest_timing_exit_irqoff() after this.
+ *
+ * Note: this is analogous to enter_from_user_mode().
+ */
+static __always_inline void guest_state_exit_irqoff(void)
+{
+ lockdep_hardirqs_off(CALLER_ADDR0);
+ guest_context_exit_irqoff();
+
+ instrumentation_begin();
+ trace_hardirqs_off_finish();
+ instrumentation_end();
+}
+
static inline int kvm_vcpu_exiting_guest_mode(struct kvm_vcpu *vcpu)
{
/*
@@ -293,7 +575,26 @@ static inline int kvm_vcpu_exiting_guest_mode(struct kvm_vcpu *vcpu)
*/
#define KVM_MEM_MAX_NR_PAGES ((1UL << 31) - 1)
+/*
+ * Since at idle each memslot belongs to two memslot sets it has to contain
+ * two embedded nodes for each data structure that it forms a part of.
+ *
+ * Two memslot sets (one active and one inactive) are necessary so the VM
+ * continues to run on one memslot set while the other is being modified.
+ *
+ * These two memslot sets normally point to the same set of memslots.
+ * They can, however, be desynchronized when performing a memslot management
+ * operation by replacing the memslot to be modified by its copy.
+ * After the operation is complete, both memslot sets once again point to
+ * the same, common set of memslot data.
+ *
+ * The memslots themselves are independent of each other so they can be
+ * individually added or deleted.
+ */
struct kvm_memory_slot {
+ struct hlist_node id_node[2];
+ struct interval_tree_node hva_node[2];
+ struct rb_node gfn_node[2];
gfn_t base_gfn;
unsigned long npages;
unsigned long *dirty_bitmap;
@@ -301,13 +602,47 @@ struct kvm_memory_slot {
unsigned long userspace_addr;
u32 flags;
short id;
+ u16 as_id;
+
+#ifdef CONFIG_KVM_GUEST_MEMFD
+ struct {
+ /*
+ * Writes protected by kvm->slots_lock. Acquiring a
+ * reference via kvm_gmem_get_file() is protected by
+ * either kvm->slots_lock or kvm->srcu.
+ */
+ struct file *file;
+ pgoff_t pgoff;
+ } gmem;
+#endif
};
+static inline bool kvm_slot_has_gmem(const struct kvm_memory_slot *slot)
+{
+ return slot && (slot->flags & KVM_MEM_GUEST_MEMFD);
+}
+
+static inline bool kvm_slot_dirty_track_enabled(const struct kvm_memory_slot *slot)
+{
+ return slot->flags & KVM_MEM_LOG_DIRTY_PAGES;
+}
+
static inline unsigned long kvm_dirty_bitmap_bytes(struct kvm_memory_slot *memslot)
{
return ALIGN(memslot->npages, BITS_PER_LONG) / 8;
}
+static inline unsigned long *kvm_second_dirty_bitmap(struct kvm_memory_slot *memslot)
+{
+ unsigned long len = kvm_dirty_bitmap_bytes(memslot);
+
+ return memslot->dirty_bitmap + len / sizeof(*memslot->dirty_bitmap);
+}
+
+#ifndef KVM_DIRTY_LOG_MANUAL_CAPS
+#define KVM_DIRTY_LOG_MANUAL_CAPS KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE
+#endif
+
struct kvm_s390_adapter_int {
u64 ind_addr;
u64 summary_addr;
@@ -321,6 +656,13 @@ struct kvm_hv_sint {
u32 sint;
};
+struct kvm_xen_evtchn {
+ u32 port;
+ u32 vcpu_id;
+ int vcpu_idx;
+ u32 priority;
+};
+
struct kvm_kernel_irq_routing_entry {
u32 gsi;
u32 type;
@@ -341,6 +683,7 @@ struct kvm_kernel_irq_routing_entry {
} msi;
struct kvm_s390_adapter_int adapter;
struct kvm_hv_sint hv_sint;
+ struct kvm_xen_evtchn xen_evtchn;
};
struct hlist_node link;
};
@@ -353,45 +696,114 @@ struct kvm_irq_routing_table {
* Array indexed by gsi. Each entry contains list of irq chips
* the gsi is connected to.
*/
- struct hlist_head map[0];
+ struct hlist_head map[] __counted_by(nr_rt_entries);
};
#endif
-#ifndef KVM_PRIVATE_MEM_SLOTS
-#define KVM_PRIVATE_MEM_SLOTS 0
-#endif
+bool kvm_arch_irqchip_in_kernel(struct kvm *kvm);
-#ifndef KVM_MEM_SLOTS_NUM
-#define KVM_MEM_SLOTS_NUM (KVM_USER_MEM_SLOTS + KVM_PRIVATE_MEM_SLOTS)
+#ifndef KVM_INTERNAL_MEM_SLOTS
+#define KVM_INTERNAL_MEM_SLOTS 0
#endif
-#ifndef __KVM_VCPU_MULTIPLE_ADDRESS_SPACE
+#define KVM_MEM_SLOTS_NUM SHRT_MAX
+#define KVM_USER_MEM_SLOTS (KVM_MEM_SLOTS_NUM - KVM_INTERNAL_MEM_SLOTS)
+
+#if KVM_MAX_NR_ADDRESS_SPACES == 1
+static inline int kvm_arch_nr_memslot_as_ids(struct kvm *kvm)
+{
+ return KVM_MAX_NR_ADDRESS_SPACES;
+}
+
static inline int kvm_arch_vcpu_memslots_id(struct kvm_vcpu *vcpu)
{
return 0;
}
#endif
-/*
- * Note:
- * memslots are not sorted by id anymore, please use id_to_memslot()
- * to get the memslot by its id.
- */
+#ifndef CONFIG_KVM_GENERIC_MEMORY_ATTRIBUTES
+static inline bool kvm_arch_has_private_mem(struct kvm *kvm)
+{
+ return false;
+}
+#endif
+
+#ifdef CONFIG_KVM_GUEST_MEMFD
+bool kvm_arch_supports_gmem_init_shared(struct kvm *kvm);
+
+static inline u64 kvm_gmem_get_supported_flags(struct kvm *kvm)
+{
+ u64 flags = GUEST_MEMFD_FLAG_MMAP;
+
+ if (!kvm || kvm_arch_supports_gmem_init_shared(kvm))
+ flags |= GUEST_MEMFD_FLAG_INIT_SHARED;
+
+ return flags;
+}
+#endif
+
+#ifndef kvm_arch_has_readonly_mem
+static inline bool kvm_arch_has_readonly_mem(struct kvm *kvm)
+{
+ return IS_ENABLED(CONFIG_HAVE_KVM_READONLY_MEM);
+}
+#endif
+
struct kvm_memslots {
u64 generation;
- struct kvm_memory_slot memslots[KVM_MEM_SLOTS_NUM];
- /* The mapping table from slot id to the index in memslots[]. */
- short id_to_index[KVM_MEM_SLOTS_NUM];
- atomic_t lru_slot;
- int used_slots;
+ atomic_long_t last_used_slot;
+ struct rb_root_cached hva_tree;
+ struct rb_root gfn_tree;
+ /*
+ * The mapping table from slot id to memslot.
+ *
+ * 7-bit bucket count matches the size of the old id to index array for
+ * 512 slots, while giving good performance with this slot count.
+ * Higher bucket counts bring only small performance improvements but
+ * always result in higher memory usage (even for lower memslot counts).
+ */
+ DECLARE_HASHTABLE(id_hash, 7);
+ int node_idx;
};
struct kvm {
+#ifdef KVM_HAVE_MMU_RWLOCK
+ rwlock_t mmu_lock;
+#else
spinlock_t mmu_lock;
+#endif /* KVM_HAVE_MMU_RWLOCK */
+
struct mutex slots_lock;
+
+ /*
+ * Protects the arch-specific fields of struct kvm_memory_slots in
+ * use by the VM. To be used under the slots_lock (above) or in a
+ * kvm->srcu critical section where acquiring the slots_lock would
+ * lead to deadlock with the synchronize_srcu in
+ * kvm_swap_active_memslots().
+ */
+ struct mutex slots_arch_lock;
struct mm_struct *mm; /* userspace tied to this vm */
- struct kvm_memslots __rcu *memslots[KVM_ADDRESS_SPACE_NUM];
- struct kvm_vcpu *vcpus[KVM_MAX_VCPUS];
+ unsigned long nr_memslot_pages;
+ /* The two memslot sets - active and inactive (per address space) */
+ struct kvm_memslots __memslots[KVM_MAX_NR_ADDRESS_SPACES][2];
+ /* The current active memslot set for each address space */
+ struct kvm_memslots __rcu *memslots[KVM_MAX_NR_ADDRESS_SPACES];
+ struct xarray vcpu_array;
+ /*
+ * Protected by slots_lock, but can be read outside if an
+ * incorrect answer is acceptable.
+ */
+ atomic_t nr_memslots_dirty_logging;
+
+ /* Used to wait for completion of MMU notifiers. */
+ spinlock_t mn_invalidate_lock;
+ unsigned long mn_active_invalidate_count;
+ struct rcuwait mn_memslots_update_rcuwait;
+
+ /* For management / invalidation of gfn_to_pfn_caches */
+ spinlock_t gpc_lock;
+ struct list_head gpc_list;
/*
* created_vcpus is protected by kvm->lock, and is incremented
@@ -400,20 +812,22 @@ struct kvm {
* and is accessed atomically.
*/
atomic_t online_vcpus;
+ int max_vcpus;
int created_vcpus;
int last_boosted_vcpu;
struct list_head vm_list;
struct mutex lock;
struct kvm_io_bus __rcu *buses[KVM_NR_BUSES];
-#ifdef CONFIG_HAVE_KVM_EVENTFD
+#ifdef CONFIG_HAVE_KVM_IRQCHIP
struct {
spinlock_t lock;
struct list_head items;
+ /* resampler_list update side is protected by resampler_lock. */
struct list_head resampler_list;
struct mutex resampler_lock;
} irqfds;
- struct list_head ioeventfds;
#endif
+ struct list_head ioeventfds;
struct kvm_vm_stat stat;
struct kvm_arch arch;
refcount_t users_count;
@@ -429,23 +843,39 @@ struct kvm {
* Update side is protected by irq_lock.
*/
struct kvm_irq_routing_table __rcu *irq_routing;
-#endif
-#ifdef CONFIG_HAVE_KVM_IRQFD
+
struct hlist_head irq_ack_notifier_list;
#endif
-#if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
+#ifdef CONFIG_KVM_GENERIC_MMU_NOTIFIER
struct mmu_notifier mmu_notifier;
- unsigned long mmu_notifier_seq;
- long mmu_notifier_count;
+ unsigned long mmu_invalidate_seq;
+ long mmu_invalidate_in_progress;
+ gfn_t mmu_invalidate_range_start;
+ gfn_t mmu_invalidate_range_end;
#endif
- long tlbs_dirty;
struct list_head devices;
+ u64 manual_dirty_log_protect;
struct dentry *debugfs_dentry;
struct kvm_stat_data **debugfs_stat_data;
struct srcu_struct srcu;
struct srcu_struct irq_srcu;
pid_t userspace_pid;
+ bool override_halt_poll_ns;
+ unsigned int max_halt_poll_ns;
+ u32 dirty_ring_size;
+ bool dirty_ring_with_bitmap;
+ bool vm_bugged;
+ bool vm_dead;
+
+#ifdef CONFIG_HAVE_KVM_PM_NOTIFIER
+ struct notifier_block pm_notifier;
+#endif
+#ifdef CONFIG_KVM_GENERIC_MEMORY_ATTRIBUTES
+ /* Protected by slots_lock (for writes) and RCU (for reads) */
+ struct xarray mem_attr_array;
+#endif
+ char stats_id[KVM_STATS_NAME_SIZE];
};
#define kvm_err(fmt, ...) \
@@ -474,33 +904,119 @@ struct kvm {
#define vcpu_err(vcpu, fmt, ...) \
kvm_err("vcpu%i " fmt, (vcpu)->vcpu_id, ## __VA_ARGS__)
+static inline void kvm_vm_dead(struct kvm *kvm)
+{
+ kvm->vm_dead = true;
+ kvm_make_all_cpus_request(kvm, KVM_REQ_VM_DEAD);
+}
+
+static inline void kvm_vm_bugged(struct kvm *kvm)
+{
+ kvm->vm_bugged = true;
+ kvm_vm_dead(kvm);
+}
+
+
+#define KVM_BUG(cond, kvm, fmt...) \
+({ \
+ bool __ret = !!(cond); \
+ \
+ if (WARN_ONCE(__ret && !(kvm)->vm_bugged, fmt)) \
+ kvm_vm_bugged(kvm); \
+ unlikely(__ret); \
+})
+
+#define KVM_BUG_ON(cond, kvm) \
+({ \
+ bool __ret = !!(cond); \
+ \
+ if (WARN_ON_ONCE(__ret && !(kvm)->vm_bugged)) \
+ kvm_vm_bugged(kvm); \
+ unlikely(__ret); \
+})
+
+/*
+ * Note, "data corruption" refers to corruption of host kernel data structures,
+ * not guest data. Guest data corruption, suspected or confirmed, that is tied
+ * and contained to a single VM should *never* BUG() and potentially panic the
+ * host, i.e. use this variant of KVM_BUG() if and only if a KVM data structure
+ * is corrupted and that corruption can have a cascading effect to other parts
+ * of the hosts and/or to other VMs.
+ */
+#define KVM_BUG_ON_DATA_CORRUPTION(cond, kvm) \
+({ \
+ bool __ret = !!(cond); \
+ \
+ if (IS_ENABLED(CONFIG_BUG_ON_DATA_CORRUPTION)) \
+ BUG_ON(__ret); \
+ else if (WARN_ON_ONCE(__ret && !(kvm)->vm_bugged)) \
+ kvm_vm_bugged(kvm); \
+ unlikely(__ret); \
+})
+
+static inline void kvm_vcpu_srcu_read_lock(struct kvm_vcpu *vcpu)
+{
+#ifdef CONFIG_PROVE_RCU
+ WARN_ONCE(vcpu->srcu_depth++,
+ "KVM: Illegal vCPU srcu_idx LOCK, depth=%d", vcpu->srcu_depth - 1);
+#endif
+ vcpu->____srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
+}
+
+static inline void kvm_vcpu_srcu_read_unlock(struct kvm_vcpu *vcpu)
+{
+ srcu_read_unlock(&vcpu->kvm->srcu, vcpu->____srcu_idx);
+
+#ifdef CONFIG_PROVE_RCU
+ WARN_ONCE(--vcpu->srcu_depth,
+ "KVM: Illegal vCPU srcu_idx UNLOCK, depth=%d", vcpu->srcu_depth);
+#endif
+}
+
+static inline bool kvm_dirty_log_manual_protect_and_init_set(struct kvm *kvm)
+{
+ return !!(kvm->manual_dirty_log_protect & KVM_DIRTY_LOG_INITIALLY_SET);
+}
+
+/*
+ * Get a bus reference under the update-side lock. No long-term SRCU reader
+ * references are permitted, to avoid stale reads vs concurrent IO
+ * registrations.
+ */
static inline struct kvm_io_bus *kvm_get_bus(struct kvm *kvm, enum kvm_bus idx)
{
- return srcu_dereference_check(kvm->buses[idx], &kvm->srcu,
- lockdep_is_held(&kvm->slots_lock) ||
- !refcount_read(&kvm->users_count));
+ return rcu_dereference_protected(kvm->buses[idx],
+ lockdep_is_held(&kvm->slots_lock));
}
static inline struct kvm_vcpu *kvm_get_vcpu(struct kvm *kvm, int i)
{
- /* Pairs with smp_wmb() in kvm_vm_ioctl_create_vcpu, in case
- * the caller has read kvm->online_vcpus before (as is the case
- * for kvm_for_each_vcpu, for example).
+ int num_vcpus = atomic_read(&kvm->online_vcpus);
+
+ /*
+ * Explicitly verify the target vCPU is online, as the anti-speculation
+ * logic only limits the CPU's ability to speculate, e.g. given a "bad"
+ * index, clamping the index to 0 would return vCPU0, not NULL.
*/
+ if (i >= num_vcpus)
+ return NULL;
+
+ i = array_index_nospec(i, num_vcpus);
+
+ /* Pairs with smp_wmb() in kvm_vm_ioctl_create_vcpu. */
smp_rmb();
- return kvm->vcpus[i];
+ return xa_load(&kvm->vcpu_array, i);
}
-#define kvm_for_each_vcpu(idx, vcpup, kvm) \
- for (idx = 0; \
- idx < atomic_read(&kvm->online_vcpus) && \
- (vcpup = kvm_get_vcpu(kvm, idx)) != NULL; \
- idx++)
+#define kvm_for_each_vcpu(idx, vcpup, kvm) \
+ if (atomic_read(&kvm->online_vcpus)) \
+ xa_for_each_range(&kvm->vcpu_array, idx, vcpup, 0, \
+ (atomic_read(&kvm->online_vcpus) - 1))
static inline struct kvm_vcpu *kvm_get_vcpu_by_id(struct kvm *kvm, int id)
{
struct kvm_vcpu *vcpu = NULL;
- int i;
+ unsigned long i;
if (id < 0)
return NULL;
@@ -514,41 +1030,24 @@ static inline struct kvm_vcpu *kvm_get_vcpu_by_id(struct kvm *kvm, int id)
return NULL;
}
-static inline int kvm_vcpu_get_idx(struct kvm_vcpu *vcpu)
-{
- struct kvm_vcpu *tmp;
- int idx;
-
- kvm_for_each_vcpu(idx, tmp, vcpu->kvm)
- if (tmp == vcpu)
- return idx;
- BUG();
-}
-
-#define kvm_for_each_memslot(memslot, slots) \
- for (memslot = &slots->memslots[0]; \
- memslot < slots->memslots + KVM_MEM_SLOTS_NUM && memslot->npages;\
- memslot++)
+void kvm_destroy_vcpus(struct kvm *kvm);
-int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id);
-void kvm_vcpu_uninit(struct kvm_vcpu *vcpu);
+int kvm_trylock_all_vcpus(struct kvm *kvm);
+int kvm_lock_all_vcpus(struct kvm *kvm);
+void kvm_unlock_all_vcpus(struct kvm *kvm);
-int __must_check vcpu_load(struct kvm_vcpu *vcpu);
+void vcpu_load(struct kvm_vcpu *vcpu);
void vcpu_put(struct kvm_vcpu *vcpu);
-#ifdef __KVM_HAVE_IOAPIC
+#ifdef CONFIG_KVM_IOAPIC
void kvm_arch_post_irq_ack_notifier_list_update(struct kvm *kvm);
-void kvm_arch_post_irq_routing_update(struct kvm *kvm);
#else
static inline void kvm_arch_post_irq_ack_notifier_list_update(struct kvm *kvm)
{
}
-static inline void kvm_arch_post_irq_routing_update(struct kvm *kvm)
-{
-}
#endif
-#ifdef CONFIG_HAVE_KVM_IRQFD
+#ifdef CONFIG_HAVE_KVM_IRQCHIP
int kvm_irqfd_init(void);
void kvm_irqfd_exit(void);
#else
@@ -561,15 +1060,18 @@ static inline void kvm_irqfd_exit(void)
{
}
#endif
-int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
- struct module *module);
+int kvm_init(unsigned vcpu_size, unsigned vcpu_align, struct module *module);
void kvm_exit(void);
void kvm_get_kvm(struct kvm *kvm);
+bool kvm_get_kvm_safe(struct kvm *kvm);
void kvm_put_kvm(struct kvm *kvm);
+bool file_is_kvm(struct file *file);
+void kvm_put_kvm_no_destroy(struct kvm *kvm);
static inline struct kvm_memslots *__kvm_memslots(struct kvm *kvm, int as_id)
{
+ as_id = array_index_nospec(as_id, KVM_MAX_NR_ADDRESS_SPACES);
return srcu_dereference_check(kvm->memslots[as_id], &kvm->srcu,
lockdep_is_held(&kvm->slots_lock) ||
!refcount_read(&kvm->users_count));
@@ -587,18 +1089,130 @@ static inline struct kvm_memslots *kvm_vcpu_memslots(struct kvm_vcpu *vcpu)
return __kvm_memslots(vcpu->kvm, as_id);
}
-static inline struct kvm_memory_slot *
-id_to_memslot(struct kvm_memslots *slots, int id)
+static inline bool kvm_memslots_empty(struct kvm_memslots *slots)
{
- int index = slots->id_to_index[id];
+ return RB_EMPTY_ROOT(&slots->gfn_tree);
+}
+
+bool kvm_are_all_memslots_empty(struct kvm *kvm);
+
+#define kvm_for_each_memslot(memslot, bkt, slots) \
+ hash_for_each(slots->id_hash, bkt, memslot, id_node[slots->node_idx]) \
+ if (WARN_ON_ONCE(!memslot->npages)) { \
+ } else
+
+static inline
+struct kvm_memory_slot *id_to_memslot(struct kvm_memslots *slots, int id)
+{
+ struct kvm_memory_slot *slot;
+ int idx = slots->node_idx;
+
+ hash_for_each_possible(slots->id_hash, slot, id_node[idx], id) {
+ if (slot->id == id)
+ return slot;
+ }
+
+ return NULL;
+}
+
+/* Iterator used for walking memslots that overlap a gfn range. */
+struct kvm_memslot_iter {
+ struct kvm_memslots *slots;
+ struct rb_node *node;
+ struct kvm_memory_slot *slot;
+};
+
+static inline void kvm_memslot_iter_next(struct kvm_memslot_iter *iter)
+{
+ iter->node = rb_next(iter->node);
+ if (!iter->node)
+ return;
+
+ iter->slot = container_of(iter->node, struct kvm_memory_slot, gfn_node[iter->slots->node_idx]);
+}
+
+static inline void kvm_memslot_iter_start(struct kvm_memslot_iter *iter,
+ struct kvm_memslots *slots,
+ gfn_t start)
+{
+ int idx = slots->node_idx;
+ struct rb_node *tmp;
struct kvm_memory_slot *slot;
- slot = &slots->memslots[index];
+ iter->slots = slots;
+
+ /*
+ * Find the so called "upper bound" of a key - the first node that has
+ * its key strictly greater than the searched one (the start gfn in our case).
+ */
+ iter->node = NULL;
+ for (tmp = slots->gfn_tree.rb_node; tmp; ) {
+ slot = container_of(tmp, struct kvm_memory_slot, gfn_node[idx]);
+ if (start < slot->base_gfn) {
+ iter->node = tmp;
+ tmp = tmp->rb_left;
+ } else {
+ tmp = tmp->rb_right;
+ }
+ }
+
+ /*
+ * Find the slot with the lowest gfn that can possibly intersect with
+ * the range, so we'll ideally have slot start <= range start
+ */
+ if (iter->node) {
+ /*
+ * A NULL previous node means that the very first slot
+ * already has a higher start gfn.
+ * In this case slot start > range start.
+ */
+ tmp = rb_prev(iter->node);
+ if (tmp)
+ iter->node = tmp;
+ } else {
+ /* a NULL node below means no slots */
+ iter->node = rb_last(&slots->gfn_tree);
+ }
+
+ if (iter->node) {
+ iter->slot = container_of(iter->node, struct kvm_memory_slot, gfn_node[idx]);
+
+ /*
+ * It is possible in the slot start < range start case that the
+ * found slot ends before or at range start (slot end <= range start)
+ * and so it does not overlap the requested range.
+ *
+ * In such non-overlapping case the next slot (if it exists) will
+ * already have slot start > range start, otherwise the logic above
+ * would have found it instead of the current slot.
+ */
+ if (iter->slot->base_gfn + iter->slot->npages <= start)
+ kvm_memslot_iter_next(iter);
+ }
+}
+
+static inline bool kvm_memslot_iter_is_valid(struct kvm_memslot_iter *iter, gfn_t end)
+{
+ if (!iter->node)
+ return false;
- WARN_ON(slot->id != id);
- return slot;
+ /*
+ * If this slot starts beyond or at the end of the range so does
+ * every next one
+ */
+ return iter->slot->base_gfn < end;
}
+/* Iterate over each memslot at least partially intersecting [start, end) range */
+#define kvm_for_each_memslot_in_gfn_range(iter, slots, start, end) \
+ for (kvm_memslot_iter_start(iter, slots, start); \
+ kvm_memslot_iter_is_valid(iter, end); \
+ kvm_memslot_iter_next(iter))
+
+struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn);
+struct kvm_memslots *kvm_vcpu_memslots(struct kvm_vcpu *vcpu);
+struct kvm_memory_slot *kvm_vcpu_gfn_to_memslot(struct kvm_vcpu *vcpu, gfn_t gfn);
+
/*
* KVM_SET_USER_MEMORY_REGION ioctl allows the following operations:
* - create a new memory slot
@@ -608,7 +1222,7 @@ id_to_memslot(struct kvm_memslots *slots, int id)
* -- just change its flags
*
* Since flags can be changed by some of these operations, the following
- * differentiation is the best we can do for __kvm_set_memory_region():
+ * differentiation is the best we can do for kvm_set_memory_region():
*/
enum kvm_mr_change {
KVM_MR_CREATE,
@@ -617,67 +1231,96 @@ enum kvm_mr_change {
KVM_MR_FLAGS_ONLY,
};
-int kvm_set_memory_region(struct kvm *kvm,
- const struct kvm_userspace_memory_region *mem);
-int __kvm_set_memory_region(struct kvm *kvm,
- const struct kvm_userspace_memory_region *mem);
-void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free,
- struct kvm_memory_slot *dont);
-int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
- unsigned long npages);
-void kvm_arch_memslots_updated(struct kvm *kvm, struct kvm_memslots *slots);
+int kvm_set_internal_memslot(struct kvm *kvm,
+ const struct kvm_userspace_memory_region2 *mem);
+void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *slot);
+void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen);
int kvm_arch_prepare_memory_region(struct kvm *kvm,
- struct kvm_memory_slot *memslot,
- const struct kvm_userspace_memory_region *mem,
+ const struct kvm_memory_slot *old,
+ struct kvm_memory_slot *new,
enum kvm_mr_change change);
void kvm_arch_commit_memory_region(struct kvm *kvm,
- const struct kvm_userspace_memory_region *mem,
- const struct kvm_memory_slot *old,
+ struct kvm_memory_slot *old,
const struct kvm_memory_slot *new,
enum kvm_mr_change change);
-bool kvm_largepages_enabled(void);
-void kvm_disable_largepages(void);
/* flush all memory translations */
void kvm_arch_flush_shadow_all(struct kvm *kvm);
/* flush memory translations pointing to 'slot' */
void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
struct kvm_memory_slot *slot);
-int gfn_to_page_many_atomic(struct kvm_memory_slot *slot, gfn_t gfn,
- struct page **pages, int nr_pages);
+int kvm_prefetch_pages(struct kvm_memory_slot *slot, gfn_t gfn,
+ struct page **pages, int nr_pages);
+
+struct page *__gfn_to_page(struct kvm *kvm, gfn_t gfn, bool write);
+static inline struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn)
+{
+ return __gfn_to_page(kvm, gfn, true);
+}
-struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn);
unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn);
unsigned long gfn_to_hva_prot(struct kvm *kvm, gfn_t gfn, bool *writable);
unsigned long gfn_to_hva_memslot(struct kvm_memory_slot *slot, gfn_t gfn);
unsigned long gfn_to_hva_memslot_prot(struct kvm_memory_slot *slot, gfn_t gfn,
bool *writable);
+
+static inline void kvm_release_page_unused(struct page *page)
+{
+ if (!page)
+ return;
+
+ put_page(page);
+}
+
void kvm_release_page_clean(struct page *page);
void kvm_release_page_dirty(struct page *page);
-void kvm_set_page_accessed(struct page *page);
-
-kvm_pfn_t gfn_to_pfn_atomic(struct kvm *kvm, gfn_t gfn);
-kvm_pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn);
-kvm_pfn_t gfn_to_pfn_prot(struct kvm *kvm, gfn_t gfn, bool write_fault,
- bool *writable);
-kvm_pfn_t gfn_to_pfn_memslot(struct kvm_memory_slot *slot, gfn_t gfn);
-kvm_pfn_t gfn_to_pfn_memslot_atomic(struct kvm_memory_slot *slot, gfn_t gfn);
-kvm_pfn_t __gfn_to_pfn_memslot(struct kvm_memory_slot *slot, gfn_t gfn,
- bool atomic, bool *async, bool write_fault,
- bool *writable);
-
-void kvm_release_pfn_clean(kvm_pfn_t pfn);
-void kvm_set_pfn_dirty(kvm_pfn_t pfn);
-void kvm_set_pfn_accessed(kvm_pfn_t pfn);
-void kvm_get_pfn(kvm_pfn_t pfn);
+
+static inline void kvm_release_faultin_page(struct kvm *kvm, struct page *page,
+ bool unused, bool dirty)
+{
+ lockdep_assert_once(lockdep_is_held(&kvm->mmu_lock) || unused);
+
+ if (!page)
+ return;
+
+ /*
+ * If the page that KVM got from the *primary MMU* is writable, and KVM
+ * installed or reused a SPTE, mark the page/folio dirty. Note, this
+ * may mark a folio dirty even if KVM created a read-only SPTE, e.g. if
+ * the GFN is write-protected. Folios can't be safely marked dirty
+ * outside of mmu_lock as doing so could race with writeback on the
+ * folio. As a result, KVM can't mark folios dirty in the fast page
+ * fault handler, and so KVM must (somewhat) speculatively mark the
+ * folio dirty if KVM could locklessly make the SPTE writable.
+ */
+ if (unused)
+ kvm_release_page_unused(page);
+ else if (dirty)
+ kvm_release_page_dirty(page);
+ else
+ kvm_release_page_clean(page);
+}
+
+kvm_pfn_t __kvm_faultin_pfn(const struct kvm_memory_slot *slot, gfn_t gfn,
+ unsigned int foll, bool *writable,
+ struct page **refcounted_page);
+
+static inline kvm_pfn_t kvm_faultin_pfn(struct kvm_vcpu *vcpu, gfn_t gfn,
+ bool write, bool *writable,
+ struct page **refcounted_page)
+{
+ return __kvm_faultin_pfn(kvm_vcpu_gfn_to_memslot(vcpu, gfn), gfn,
+ write ? FOLL_WRITE : 0, writable, refcounted_page);
+}
int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset,
int len);
-int kvm_read_guest_atomic(struct kvm *kvm, gpa_t gpa, void *data,
- unsigned long len);
int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len);
int kvm_read_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
void *data, unsigned long len);
+int kvm_read_guest_offset_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
+ void *data, unsigned int offset,
+ unsigned long len);
int kvm_write_guest_page(struct kvm *kvm, gfn_t gfn, const void *data,
int offset, int len);
int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data,
@@ -685,21 +1328,76 @@ int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data,
int kvm_write_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
void *data, unsigned long len);
int kvm_write_guest_offset_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
- void *data, int offset, unsigned long len);
+ void *data, unsigned int offset,
+ unsigned long len);
int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
gpa_t gpa, unsigned long len);
-int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len);
+
+#define __kvm_get_guest(kvm, gfn, offset, v) \
+({ \
+ unsigned long __addr = gfn_to_hva(kvm, gfn); \
+ typeof(v) __user *__uaddr = (typeof(__uaddr))(__addr + offset); \
+ int __ret = -EFAULT; \
+ \
+ if (!kvm_is_error_hva(__addr)) \
+ __ret = get_user(v, __uaddr); \
+ __ret; \
+})
+
+#define kvm_get_guest(kvm, gpa, v) \
+({ \
+ gpa_t __gpa = gpa; \
+ struct kvm *__kvm = kvm; \
+ \
+ __kvm_get_guest(__kvm, __gpa >> PAGE_SHIFT, \
+ offset_in_page(__gpa), v); \
+})
+
+#define __kvm_put_guest(kvm, gfn, offset, v) \
+({ \
+ unsigned long __addr = gfn_to_hva(kvm, gfn); \
+ typeof(v) __user *__uaddr = (typeof(__uaddr))(__addr + offset); \
+ int __ret = -EFAULT; \
+ \
+ if (!kvm_is_error_hva(__addr)) \
+ __ret = put_user(v, __uaddr); \
+ if (!__ret) \
+ mark_page_dirty(kvm, gfn); \
+ __ret; \
+})
+
+#define kvm_put_guest(kvm, gpa, v) \
+({ \
+ gpa_t __gpa = gpa; \
+ struct kvm *__kvm = kvm; \
+ \
+ __kvm_put_guest(__kvm, __gpa >> PAGE_SHIFT, \
+ offset_in_page(__gpa), v); \
+})
+
int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len);
-struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn);
bool kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn);
-unsigned long kvm_host_page_size(struct kvm *kvm, gfn_t gfn);
+bool kvm_vcpu_is_visible_gfn(struct kvm_vcpu *vcpu, gfn_t gfn);
+unsigned long kvm_host_page_size(struct kvm_vcpu *vcpu, gfn_t gfn);
+void mark_page_dirty_in_slot(struct kvm *kvm, const struct kvm_memory_slot *memslot, gfn_t gfn);
void mark_page_dirty(struct kvm *kvm, gfn_t gfn);
-struct kvm_memslots *kvm_vcpu_memslots(struct kvm_vcpu *vcpu);
-struct kvm_memory_slot *kvm_vcpu_gfn_to_memslot(struct kvm_vcpu *vcpu, gfn_t gfn);
-kvm_pfn_t kvm_vcpu_gfn_to_pfn_atomic(struct kvm_vcpu *vcpu, gfn_t gfn);
-kvm_pfn_t kvm_vcpu_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn);
-struct page *kvm_vcpu_gfn_to_page(struct kvm_vcpu *vcpu, gfn_t gfn);
+int __kvm_vcpu_map(struct kvm_vcpu *vcpu, gpa_t gpa, struct kvm_host_map *map,
+ bool writable);
+void kvm_vcpu_unmap(struct kvm_vcpu *vcpu, struct kvm_host_map *map);
+
+static inline int kvm_vcpu_map(struct kvm_vcpu *vcpu, gpa_t gpa,
+ struct kvm_host_map *map)
+{
+ return __kvm_vcpu_map(vcpu, gpa, map, true);
+}
+
+static inline int kvm_vcpu_map_readonly(struct kvm_vcpu *vcpu, gpa_t gpa,
+ struct kvm_host_map *map)
+{
+ return __kvm_vcpu_map(vcpu, gpa, map, false);
+}
+
unsigned long kvm_vcpu_gfn_to_hva(struct kvm_vcpu *vcpu, gfn_t gfn);
unsigned long kvm_vcpu_gfn_to_hva_prot(struct kvm_vcpu *vcpu, gfn_t gfn, bool *writable);
int kvm_vcpu_read_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn, void *data, int offset,
@@ -714,46 +1412,176 @@ int kvm_vcpu_write_guest(struct kvm_vcpu *vcpu, gpa_t gpa, const void *data,
unsigned long len);
void kvm_vcpu_mark_page_dirty(struct kvm_vcpu *vcpu, gfn_t gfn);
-void kvm_vcpu_block(struct kvm_vcpu *vcpu);
+/**
+ * kvm_gpc_init - initialize gfn_to_pfn_cache.
+ *
+ * @gpc: struct gfn_to_pfn_cache object.
+ * @kvm: pointer to kvm instance.
+ *
+ * This sets up a gfn_to_pfn_cache by initializing locks and assigning the
+ * immutable attributes. Note, the cache must be zero-allocated (or zeroed by
+ * the caller before init).
+ */
+void kvm_gpc_init(struct gfn_to_pfn_cache *gpc, struct kvm *kvm);
+
+/**
+ * kvm_gpc_activate - prepare a cached kernel mapping and HPA for a given guest
+ * physical address.
+ *
+ * @gpc: struct gfn_to_pfn_cache object.
+ * @gpa: guest physical address to map.
+ * @len: sanity check; the range being access must fit a single page.
+ *
+ * @return: 0 for success.
+ * -EINVAL for a mapping which would cross a page boundary.
+ * -EFAULT for an untranslatable guest physical address.
+ *
+ * This primes a gfn_to_pfn_cache and links it into the @gpc->kvm's list for
+ * invalidations to be processed. Callers are required to use kvm_gpc_check()
+ * to ensure that the cache is valid before accessing the target page.
+ */
+int kvm_gpc_activate(struct gfn_to_pfn_cache *gpc, gpa_t gpa, unsigned long len);
+
+/**
+ * kvm_gpc_activate_hva - prepare a cached kernel mapping and HPA for a given HVA.
+ *
+ * @gpc: struct gfn_to_pfn_cache object.
+ * @hva: userspace virtual address to map.
+ * @len: sanity check; the range being access must fit a single page.
+ *
+ * @return: 0 for success.
+ * -EINVAL for a mapping which would cross a page boundary.
+ * -EFAULT for an untranslatable guest physical address.
+ *
+ * The semantics of this function are the same as those of kvm_gpc_activate(). It
+ * merely bypasses a layer of address translation.
+ */
+int kvm_gpc_activate_hva(struct gfn_to_pfn_cache *gpc, unsigned long hva, unsigned long len);
+
+/**
+ * kvm_gpc_check - check validity of a gfn_to_pfn_cache.
+ *
+ * @gpc: struct gfn_to_pfn_cache object.
+ * @len: sanity check; the range being access must fit a single page.
+ *
+ * @return: %true if the cache is still valid and the address matches.
+ * %false if the cache is not valid.
+ *
+ * Callers outside IN_GUEST_MODE context should hold a read lock on @gpc->lock
+ * while calling this function, and then continue to hold the lock until the
+ * access is complete.
+ *
+ * Callers in IN_GUEST_MODE may do so without locking, although they should
+ * still hold a read lock on kvm->scru for the memslot checks.
+ */
+bool kvm_gpc_check(struct gfn_to_pfn_cache *gpc, unsigned long len);
+
+/**
+ * kvm_gpc_refresh - update a previously initialized cache.
+ *
+ * @gpc: struct gfn_to_pfn_cache object.
+ * @len: sanity check; the range being access must fit a single page.
+ *
+ * @return: 0 for success.
+ * -EINVAL for a mapping which would cross a page boundary.
+ * -EFAULT for an untranslatable guest physical address.
+ *
+ * This will attempt to refresh a gfn_to_pfn_cache. Note that a successful
+ * return from this function does not mean the page can be immediately
+ * accessed because it may have raced with an invalidation. Callers must
+ * still lock and check the cache status, as this function does not return
+ * with the lock still held to permit access.
+ */
+int kvm_gpc_refresh(struct gfn_to_pfn_cache *gpc, unsigned long len);
+
+/**
+ * kvm_gpc_deactivate - deactivate and unlink a gfn_to_pfn_cache.
+ *
+ * @gpc: struct gfn_to_pfn_cache object.
+ *
+ * This removes a cache from the VM's list to be processed on MMU notifier
+ * invocation.
+ */
+void kvm_gpc_deactivate(struct gfn_to_pfn_cache *gpc);
+
+static inline bool kvm_gpc_is_gpa_active(struct gfn_to_pfn_cache *gpc)
+{
+ return gpc->active && !kvm_is_error_gpa(gpc->gpa);
+}
+
+static inline bool kvm_gpc_is_hva_active(struct gfn_to_pfn_cache *gpc)
+{
+ return gpc->active && kvm_is_error_gpa(gpc->gpa);
+}
+
+void kvm_sigset_activate(struct kvm_vcpu *vcpu);
+void kvm_sigset_deactivate(struct kvm_vcpu *vcpu);
+
+void kvm_vcpu_halt(struct kvm_vcpu *vcpu);
+bool kvm_vcpu_block(struct kvm_vcpu *vcpu);
void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu);
void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu);
bool kvm_vcpu_wake_up(struct kvm_vcpu *vcpu);
-void kvm_vcpu_kick(struct kvm_vcpu *vcpu);
+
+#ifndef CONFIG_S390
+void __kvm_vcpu_kick(struct kvm_vcpu *vcpu, bool wait);
+
+static inline void kvm_vcpu_kick(struct kvm_vcpu *vcpu)
+{
+ __kvm_vcpu_kick(vcpu, false);
+}
+#endif
+
int kvm_vcpu_yield_to(struct kvm_vcpu *target);
-void kvm_vcpu_on_spin(struct kvm_vcpu *vcpu);
-void kvm_load_guest_fpu(struct kvm_vcpu *vcpu);
-void kvm_put_guest_fpu(struct kvm_vcpu *vcpu);
+void kvm_vcpu_on_spin(struct kvm_vcpu *vcpu, bool yield_to_kernel_mode);
void kvm_flush_remote_tlbs(struct kvm *kvm);
-void kvm_reload_remote_mmus(struct kvm *kvm);
-bool kvm_make_all_cpus_request(struct kvm *kvm, unsigned int req);
+void kvm_flush_remote_tlbs_range(struct kvm *kvm, gfn_t gfn, u64 nr_pages);
+void kvm_flush_remote_tlbs_memslot(struct kvm *kvm,
+ const struct kvm_memory_slot *memslot);
+
+#ifdef KVM_ARCH_NR_OBJS_PER_MEMORY_CACHE
+int kvm_mmu_topup_memory_cache(struct kvm_mmu_memory_cache *mc, int min);
+int __kvm_mmu_topup_memory_cache(struct kvm_mmu_memory_cache *mc, int capacity, int min);
+int kvm_mmu_memory_cache_nr_free_objects(struct kvm_mmu_memory_cache *mc);
+void kvm_mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc);
+void *kvm_mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc);
+#endif
+
+void kvm_mmu_invalidate_begin(struct kvm *kvm);
+void kvm_mmu_invalidate_range_add(struct kvm *kvm, gfn_t start, gfn_t end);
+void kvm_mmu_invalidate_end(struct kvm *kvm);
+bool kvm_mmu_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range);
long kvm_arch_dev_ioctl(struct file *filp,
unsigned int ioctl, unsigned long arg);
long kvm_arch_vcpu_ioctl(struct file *filp,
unsigned int ioctl, unsigned long arg);
-int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf);
+long kvm_arch_vcpu_unlocked_ioctl(struct file *filp,
+ unsigned int ioctl, unsigned long arg);
+vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf);
int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext);
-int kvm_get_dirty_log(struct kvm *kvm,
- struct kvm_dirty_log *log, int *is_dirty);
-
-int kvm_get_dirty_log_protect(struct kvm *kvm,
- struct kvm_dirty_log *log, bool *is_dirty);
-
void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm,
struct kvm_memory_slot *slot,
gfn_t gfn_offset,
unsigned long mask);
+void kvm_arch_sync_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot);
-int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
- struct kvm_dirty_log *log);
+#ifndef CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT
+int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log);
+int kvm_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log,
+ int *is_dirty, struct kvm_memory_slot **memslot);
+#endif
int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_level,
bool line_status);
-long kvm_arch_vm_ioctl(struct file *filp,
- unsigned int ioctl, unsigned long arg);
+int kvm_vm_ioctl_enable_cap(struct kvm *kvm,
+ struct kvm_enable_cap *cap);
+int kvm_arch_vm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg);
+long kvm_arch_vm_compat_ioctl(struct file *filp, unsigned int ioctl,
+ unsigned long arg);
int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu);
int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu);
@@ -773,45 +1601,93 @@ int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
struct kvm_mp_state *mp_state);
int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
struct kvm_guest_debug *dbg);
-int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run);
-
-int kvm_arch_init(void *opaque);
-void kvm_arch_exit(void);
+int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu);
-int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu);
-void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu);
-
-void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu);
-
-void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu);
void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu);
void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu);
-struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id);
-int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu);
+int kvm_arch_vcpu_precreate(struct kvm *kvm, unsigned int id);
+int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu);
void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu);
void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu);
-bool kvm_arch_has_vcpu_debugfs(void);
-int kvm_arch_create_vcpu_debugfs(struct kvm_vcpu *vcpu);
+#ifdef CONFIG_HAVE_KVM_PM_NOTIFIER
+int kvm_arch_pm_notifier(struct kvm *kvm, unsigned long state);
+#endif
-int kvm_arch_hardware_enable(void);
-void kvm_arch_hardware_disable(void);
-int kvm_arch_hardware_setup(void);
-void kvm_arch_hardware_unsetup(void);
-void kvm_arch_check_processor_compat(void *rtn);
+#ifdef __KVM_HAVE_ARCH_VCPU_DEBUGFS
+void kvm_arch_create_vcpu_debugfs(struct kvm_vcpu *vcpu, struct dentry *debugfs_dentry);
+#else
+static inline void kvm_create_vcpu_debugfs(struct kvm_vcpu *vcpu) {}
+#endif
+
+#ifdef CONFIG_KVM_GENERIC_HARDWARE_ENABLING
+/*
+ * kvm_arch_{enable,disable}_virtualization() are called on one CPU, under
+ * kvm_usage_lock, immediately after/before 0=>1 and 1=>0 transitions of
+ * kvm_usage_count, i.e. at the beginning of the generic hardware enabling
+ * sequence, and at the end of the generic hardware disabling sequence.
+ */
+void kvm_arch_enable_virtualization(void);
+void kvm_arch_disable_virtualization(void);
+/*
+ * kvm_arch_{enable,disable}_virtualization_cpu() are called on "every" CPU to
+ * do the actual twiddling of hardware bits. The hooks are called on all
+ * online CPUs when KVM enables/disabled virtualization, and on a single CPU
+ * when that CPU is onlined/offlined (including for Resume/Suspend).
+ */
+int kvm_arch_enable_virtualization_cpu(void);
+void kvm_arch_disable_virtualization_cpu(void);
+#endif
+bool kvm_vcpu_has_events(struct kvm_vcpu *vcpu);
int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu);
+bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu);
int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu);
+bool kvm_arch_dy_runnable(struct kvm_vcpu *vcpu);
+bool kvm_arch_dy_has_pending_interrupt(struct kvm_vcpu *vcpu);
+bool kvm_arch_vcpu_preempted_in_kernel(struct kvm_vcpu *vcpu);
+void kvm_arch_pre_destroy_vm(struct kvm *kvm);
+void kvm_arch_create_vm_debugfs(struct kvm *kvm);
#ifndef __KVM_HAVE_ARCH_VM_ALLOC
+/*
+ * All architectures that want to use vzalloc currently also
+ * need their own kvm_arch_alloc_vm implementation.
+ */
static inline struct kvm *kvm_arch_alloc_vm(void)
{
- return kzalloc(sizeof(struct kvm), GFP_KERNEL);
+ return kzalloc(sizeof(struct kvm), GFP_KERNEL_ACCOUNT);
}
+#endif
+static inline void __kvm_arch_free_vm(struct kvm *kvm)
+{
+ kvfree(kvm);
+}
+
+#ifndef __KVM_HAVE_ARCH_VM_FREE
static inline void kvm_arch_free_vm(struct kvm *kvm)
{
- kfree(kvm);
+ __kvm_arch_free_vm(kvm);
+}
+#endif
+
+#ifndef __KVM_HAVE_ARCH_FLUSH_REMOTE_TLBS
+static inline int kvm_arch_flush_remote_tlbs(struct kvm *kvm)
+{
+ return -ENOTSUPP;
}
+#else
+int kvm_arch_flush_remote_tlbs(struct kvm *kvm);
+#endif
+
+#ifndef __KVM_HAVE_ARCH_FLUSH_REMOTE_TLBS_RANGE
+static inline int kvm_arch_flush_remote_tlbs_range(struct kvm *kvm,
+ gfn_t gfn, u64 nr_pages)
+{
+ return -EOPNOTSUPP;
+}
+#else
+int kvm_arch_flush_remote_tlbs_range(struct kvm *kvm, gfn_t gfn, u64 nr_pages);
#endif
#ifdef __KVM_HAVE_ARCH_NONCOHERENT_DMA
@@ -832,32 +1708,28 @@ static inline bool kvm_arch_has_noncoherent_dma(struct kvm *kvm)
return false;
}
#endif
-#ifdef __KVM_HAVE_ARCH_ASSIGNED_DEVICE
-void kvm_arch_start_assignment(struct kvm *kvm);
-void kvm_arch_end_assignment(struct kvm *kvm);
-bool kvm_arch_has_assigned_device(struct kvm *kvm);
-#else
-static inline void kvm_arch_start_assignment(struct kvm *kvm)
-{
-}
-static inline void kvm_arch_end_assignment(struct kvm *kvm)
+static inline struct rcuwait *kvm_arch_vcpu_get_wait(struct kvm_vcpu *vcpu)
{
+#ifdef __KVM_HAVE_ARCH_WQP
+ return vcpu->arch.waitp;
+#else
+ return &vcpu->wait;
+#endif
}
-static inline bool kvm_arch_has_assigned_device(struct kvm *kvm)
+/*
+ * Wake a vCPU if necessary, but don't do any stats/metadata updates. Returns
+ * true if the vCPU was blocking and was awakened, false otherwise.
+ */
+static inline bool __kvm_vcpu_wake_up(struct kvm_vcpu *vcpu)
{
- return false;
+ return !!rcuwait_wake_up(kvm_arch_vcpu_get_wait(vcpu));
}
-#endif
-static inline struct swait_queue_head *kvm_arch_vcpu_wq(struct kvm_vcpu *vcpu)
+static inline bool kvm_vcpu_is_blocking(struct kvm_vcpu *vcpu)
{
-#ifdef __KVM_HAVE_ARCH_WQP
- return vcpu->arch.wqp;
-#else
- return &vcpu->wq;
-#endif
+ return rcuwait_active(kvm_arch_vcpu_get_wait(vcpu));
}
#ifdef __KVM_HAVE_ARCH_INTC_INITIALIZED
@@ -874,14 +1746,20 @@ static inline bool kvm_arch_intc_initialized(struct kvm *kvm)
}
#endif
+#ifdef CONFIG_GUEST_PERF_EVENTS
+unsigned long kvm_arch_vcpu_get_ip(struct kvm_vcpu *vcpu);
+
+void kvm_register_perf_callbacks(unsigned int (*pt_intr_handler)(void));
+void kvm_unregister_perf_callbacks(void);
+#else
+static inline void kvm_register_perf_callbacks(void *ign) {}
+static inline void kvm_unregister_perf_callbacks(void) {}
+#endif /* CONFIG_GUEST_PERF_EVENTS */
+
int kvm_arch_init_vm(struct kvm *kvm, unsigned long type);
void kvm_arch_destroy_vm(struct kvm *kvm);
-void kvm_arch_sync_events(struct kvm *kvm);
int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu);
-void kvm_vcpu_kick(struct kvm_vcpu *vcpu);
-
-bool kvm_is_reserved_pfn(kvm_pfn_t pfn);
struct kvm_irq_ack_notifier {
struct hlist_node link;
@@ -907,54 +1785,94 @@ void kvm_register_irq_ack_notifier(struct kvm *kvm,
struct kvm_irq_ack_notifier *kian);
void kvm_unregister_irq_ack_notifier(struct kvm *kvm,
struct kvm_irq_ack_notifier *kian);
-int kvm_request_irq_source_id(struct kvm *kvm);
-void kvm_free_irq_source_id(struct kvm *kvm, int irq_source_id);
+bool kvm_arch_irqfd_allowed(struct kvm *kvm, struct kvm_irqfd *args);
/*
- * search_memslots() and __gfn_to_memslot() are here because they are
- * used in non-modular code in arch/powerpc/kvm/book3s_hv_rm_mmu.c.
- * gfn_to_memslot() itself isn't here as an inline because that would
- * bloat other code too much.
+ * Returns a pointer to the memslot if it contains gfn.
+ * Otherwise returns NULL.
*/
static inline struct kvm_memory_slot *
-search_memslots(struct kvm_memslots *slots, gfn_t gfn)
+try_get_memslot(struct kvm_memory_slot *slot, gfn_t gfn)
{
- int start = 0, end = slots->used_slots;
- int slot = atomic_read(&slots->lru_slot);
- struct kvm_memory_slot *memslots = slots->memslots;
-
- if (gfn >= memslots[slot].base_gfn &&
- gfn < memslots[slot].base_gfn + memslots[slot].npages)
- return &memslots[slot];
+ if (!slot)
+ return NULL;
- while (start < end) {
- slot = start + (end - start) / 2;
+ if (gfn >= slot->base_gfn && gfn < slot->base_gfn + slot->npages)
+ return slot;
+ else
+ return NULL;
+}
- if (gfn >= memslots[slot].base_gfn)
- end = slot;
- else
- start = slot + 1;
+/*
+ * Returns a pointer to the memslot that contains gfn. Otherwise returns NULL.
+ *
+ * With "approx" set returns the memslot also when the address falls
+ * in a hole. In that case one of the memslots bordering the hole is
+ * returned.
+ */
+static inline struct kvm_memory_slot *
+search_memslots(struct kvm_memslots *slots, gfn_t gfn, bool approx)
+{
+ struct kvm_memory_slot *slot;
+ struct rb_node *node;
+ int idx = slots->node_idx;
+
+ slot = NULL;
+ for (node = slots->gfn_tree.rb_node; node; ) {
+ slot = container_of(node, struct kvm_memory_slot, gfn_node[idx]);
+ if (gfn >= slot->base_gfn) {
+ if (gfn < slot->base_gfn + slot->npages)
+ return slot;
+ node = node->rb_right;
+ } else
+ node = node->rb_left;
}
- if (gfn >= memslots[start].base_gfn &&
- gfn < memslots[start].base_gfn + memslots[start].npages) {
- atomic_set(&slots->lru_slot, start);
- return &memslots[start];
+ return approx ? slot : NULL;
+}
+
+static inline struct kvm_memory_slot *
+____gfn_to_memslot(struct kvm_memslots *slots, gfn_t gfn, bool approx)
+{
+ struct kvm_memory_slot *slot;
+
+ slot = (struct kvm_memory_slot *)atomic_long_read(&slots->last_used_slot);
+ slot = try_get_memslot(slot, gfn);
+ if (slot)
+ return slot;
+
+ slot = search_memslots(slots, gfn, approx);
+ if (slot) {
+ atomic_long_set(&slots->last_used_slot, (unsigned long)slot);
+ return slot;
}
return NULL;
}
+/*
+ * __gfn_to_memslot() and its descendants are here to allow arch code to inline
+ * the lookups in hot paths. gfn_to_memslot() itself isn't here as an inline
+ * because that would bloat other code too much.
+ */
static inline struct kvm_memory_slot *
__gfn_to_memslot(struct kvm_memslots *slots, gfn_t gfn)
{
- return search_memslots(slots, gfn);
+ return ____gfn_to_memslot(slots, gfn, false);
}
static inline unsigned long
-__gfn_to_hva_memslot(struct kvm_memory_slot *slot, gfn_t gfn)
+__gfn_to_hva_memslot(const struct kvm_memory_slot *slot, gfn_t gfn)
{
- return slot->userspace_addr + (gfn - slot->base_gfn) * PAGE_SIZE;
+ /*
+ * The index was checked originally in search_memslots. To avoid
+ * that a malicious guest builds a Spectre gadget out of e.g. page
+ * table walks, do not let the processor speculate loads outside
+ * the guest's registered memslots.
+ */
+ unsigned long offset = gfn - slot->base_gfn;
+ offset = array_index_nospec(offset, slot->npages);
+ return slot->userspace_addr + offset * PAGE_SIZE;
}
static inline int memslot_id(struct kvm *kvm, gfn_t gfn)
@@ -985,11 +1903,21 @@ static inline hpa_t pfn_to_hpa(kvm_pfn_t pfn)
return (hpa_t)pfn << PAGE_SHIFT;
}
-static inline bool kvm_is_error_gpa(struct kvm *kvm, gpa_t gpa)
+static inline bool kvm_is_gpa_in_memslot(struct kvm *kvm, gpa_t gpa)
{
unsigned long hva = gfn_to_hva(kvm, gpa_to_gfn(gpa));
- return kvm_is_error_hva(hva);
+ return !kvm_is_error_hva(hva);
+}
+
+static inline void kvm_gpc_mark_dirty_in_slot(struct gfn_to_pfn_cache *gpc)
+{
+ lockdep_assert_held(&gpc->lock);
+
+ if (!gpc->memslot)
+ return;
+
+ mark_page_dirty_in_slot(gpc->kvm, gpc->memslot, gpa_to_gfn(gpc->gpa));
}
enum kvm_stat_kind {
@@ -998,55 +1926,272 @@ enum kvm_stat_kind {
};
struct kvm_stat_data {
- int offset;
struct kvm *kvm;
+ const struct _kvm_stats_desc *desc;
+ enum kvm_stat_kind kind;
};
-struct kvm_stats_debugfs_item {
- const char *name;
- int offset;
- enum kvm_stat_kind kind;
+struct _kvm_stats_desc {
+ struct kvm_stats_desc desc;
+ char name[KVM_STATS_NAME_SIZE];
};
-extern struct kvm_stats_debugfs_item debugfs_entries[];
-extern struct dentry *kvm_debugfs_dir;
-#if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
-static inline int mmu_notifier_retry(struct kvm *kvm, unsigned long mmu_seq)
+#define STATS_DESC_COMMON(type, unit, base, exp, sz, bsz) \
+ .flags = type | unit | base | \
+ BUILD_BUG_ON_ZERO(type & ~KVM_STATS_TYPE_MASK) | \
+ BUILD_BUG_ON_ZERO(unit & ~KVM_STATS_UNIT_MASK) | \
+ BUILD_BUG_ON_ZERO(base & ~KVM_STATS_BASE_MASK), \
+ .exponent = exp, \
+ .size = sz, \
+ .bucket_size = bsz
+
+#define VM_GENERIC_STATS_DESC(stat, type, unit, base, exp, sz, bsz) \
+ { \
+ { \
+ STATS_DESC_COMMON(type, unit, base, exp, sz, bsz), \
+ .offset = offsetof(struct kvm_vm_stat, generic.stat) \
+ }, \
+ .name = #stat, \
+ }
+#define VCPU_GENERIC_STATS_DESC(stat, type, unit, base, exp, sz, bsz) \
+ { \
+ { \
+ STATS_DESC_COMMON(type, unit, base, exp, sz, bsz), \
+ .offset = offsetof(struct kvm_vcpu_stat, generic.stat) \
+ }, \
+ .name = #stat, \
+ }
+#define VM_STATS_DESC(stat, type, unit, base, exp, sz, bsz) \
+ { \
+ { \
+ STATS_DESC_COMMON(type, unit, base, exp, sz, bsz), \
+ .offset = offsetof(struct kvm_vm_stat, stat) \
+ }, \
+ .name = #stat, \
+ }
+#define VCPU_STATS_DESC(stat, type, unit, base, exp, sz, bsz) \
+ { \
+ { \
+ STATS_DESC_COMMON(type, unit, base, exp, sz, bsz), \
+ .offset = offsetof(struct kvm_vcpu_stat, stat) \
+ }, \
+ .name = #stat, \
+ }
+/* SCOPE: VM, VM_GENERIC, VCPU, VCPU_GENERIC */
+#define STATS_DESC(SCOPE, stat, type, unit, base, exp, sz, bsz) \
+ SCOPE##_STATS_DESC(stat, type, unit, base, exp, sz, bsz)
+
+#define STATS_DESC_CUMULATIVE(SCOPE, name, unit, base, exponent) \
+ STATS_DESC(SCOPE, name, KVM_STATS_TYPE_CUMULATIVE, \
+ unit, base, exponent, 1, 0)
+#define STATS_DESC_INSTANT(SCOPE, name, unit, base, exponent) \
+ STATS_DESC(SCOPE, name, KVM_STATS_TYPE_INSTANT, \
+ unit, base, exponent, 1, 0)
+#define STATS_DESC_PEAK(SCOPE, name, unit, base, exponent) \
+ STATS_DESC(SCOPE, name, KVM_STATS_TYPE_PEAK, \
+ unit, base, exponent, 1, 0)
+#define STATS_DESC_LINEAR_HIST(SCOPE, name, unit, base, exponent, sz, bsz) \
+ STATS_DESC(SCOPE, name, KVM_STATS_TYPE_LINEAR_HIST, \
+ unit, base, exponent, sz, bsz)
+#define STATS_DESC_LOG_HIST(SCOPE, name, unit, base, exponent, sz) \
+ STATS_DESC(SCOPE, name, KVM_STATS_TYPE_LOG_HIST, \
+ unit, base, exponent, sz, 0)
+
+/* Cumulative counter, read/write */
+#define STATS_DESC_COUNTER(SCOPE, name) \
+ STATS_DESC_CUMULATIVE(SCOPE, name, KVM_STATS_UNIT_NONE, \
+ KVM_STATS_BASE_POW10, 0)
+/* Instantaneous counter, read only */
+#define STATS_DESC_ICOUNTER(SCOPE, name) \
+ STATS_DESC_INSTANT(SCOPE, name, KVM_STATS_UNIT_NONE, \
+ KVM_STATS_BASE_POW10, 0)
+/* Peak counter, read/write */
+#define STATS_DESC_PCOUNTER(SCOPE, name) \
+ STATS_DESC_PEAK(SCOPE, name, KVM_STATS_UNIT_NONE, \
+ KVM_STATS_BASE_POW10, 0)
+
+/* Instantaneous boolean value, read only */
+#define STATS_DESC_IBOOLEAN(SCOPE, name) \
+ STATS_DESC_INSTANT(SCOPE, name, KVM_STATS_UNIT_BOOLEAN, \
+ KVM_STATS_BASE_POW10, 0)
+/* Peak (sticky) boolean value, read/write */
+#define STATS_DESC_PBOOLEAN(SCOPE, name) \
+ STATS_DESC_PEAK(SCOPE, name, KVM_STATS_UNIT_BOOLEAN, \
+ KVM_STATS_BASE_POW10, 0)
+
+/* Cumulative time in nanosecond */
+#define STATS_DESC_TIME_NSEC(SCOPE, name) \
+ STATS_DESC_CUMULATIVE(SCOPE, name, KVM_STATS_UNIT_SECONDS, \
+ KVM_STATS_BASE_POW10, -9)
+/* Linear histogram for time in nanosecond */
+#define STATS_DESC_LINHIST_TIME_NSEC(SCOPE, name, sz, bsz) \
+ STATS_DESC_LINEAR_HIST(SCOPE, name, KVM_STATS_UNIT_SECONDS, \
+ KVM_STATS_BASE_POW10, -9, sz, bsz)
+/* Logarithmic histogram for time in nanosecond */
+#define STATS_DESC_LOGHIST_TIME_NSEC(SCOPE, name, sz) \
+ STATS_DESC_LOG_HIST(SCOPE, name, KVM_STATS_UNIT_SECONDS, \
+ KVM_STATS_BASE_POW10, -9, sz)
+
+#define KVM_GENERIC_VM_STATS() \
+ STATS_DESC_COUNTER(VM_GENERIC, remote_tlb_flush), \
+ STATS_DESC_COUNTER(VM_GENERIC, remote_tlb_flush_requests)
+
+#define KVM_GENERIC_VCPU_STATS() \
+ STATS_DESC_COUNTER(VCPU_GENERIC, halt_successful_poll), \
+ STATS_DESC_COUNTER(VCPU_GENERIC, halt_attempted_poll), \
+ STATS_DESC_COUNTER(VCPU_GENERIC, halt_poll_invalid), \
+ STATS_DESC_COUNTER(VCPU_GENERIC, halt_wakeup), \
+ STATS_DESC_TIME_NSEC(VCPU_GENERIC, halt_poll_success_ns), \
+ STATS_DESC_TIME_NSEC(VCPU_GENERIC, halt_poll_fail_ns), \
+ STATS_DESC_TIME_NSEC(VCPU_GENERIC, halt_wait_ns), \
+ STATS_DESC_LOGHIST_TIME_NSEC(VCPU_GENERIC, halt_poll_success_hist, \
+ HALT_POLL_HIST_COUNT), \
+ STATS_DESC_LOGHIST_TIME_NSEC(VCPU_GENERIC, halt_poll_fail_hist, \
+ HALT_POLL_HIST_COUNT), \
+ STATS_DESC_LOGHIST_TIME_NSEC(VCPU_GENERIC, halt_wait_hist, \
+ HALT_POLL_HIST_COUNT), \
+ STATS_DESC_IBOOLEAN(VCPU_GENERIC, blocking)
+
+ssize_t kvm_stats_read(char *id, const struct kvm_stats_header *header,
+ const struct _kvm_stats_desc *desc,
+ void *stats, size_t size_stats,
+ char __user *user_buffer, size_t size, loff_t *offset);
+
+/**
+ * kvm_stats_linear_hist_update() - Update bucket value for linear histogram
+ * statistics data.
+ *
+ * @data: start address of the stats data
+ * @size: the number of bucket of the stats data
+ * @value: the new value used to update the linear histogram's bucket
+ * @bucket_size: the size (width) of a bucket
+ */
+static inline void kvm_stats_linear_hist_update(u64 *data, size_t size,
+ u64 value, size_t bucket_size)
+{
+ size_t index = div64_u64(value, bucket_size);
+
+ index = min(index, size - 1);
+ ++data[index];
+}
+
+/**
+ * kvm_stats_log_hist_update() - Update bucket value for logarithmic histogram
+ * statistics data.
+ *
+ * @data: start address of the stats data
+ * @size: the number of bucket of the stats data
+ * @value: the new value used to update the logarithmic histogram's bucket
+ */
+static inline void kvm_stats_log_hist_update(u64 *data, size_t size, u64 value)
{
- if (unlikely(kvm->mmu_notifier_count))
+ size_t index = fls64(value);
+
+ index = min(index, size - 1);
+ ++data[index];
+}
+
+#define KVM_STATS_LINEAR_HIST_UPDATE(array, value, bsize) \
+ kvm_stats_linear_hist_update(array, ARRAY_SIZE(array), value, bsize)
+#define KVM_STATS_LOG_HIST_UPDATE(array, value) \
+ kvm_stats_log_hist_update(array, ARRAY_SIZE(array), value)
+
+
+extern const struct kvm_stats_header kvm_vm_stats_header;
+extern const struct _kvm_stats_desc kvm_vm_stats_desc[];
+extern const struct kvm_stats_header kvm_vcpu_stats_header;
+extern const struct _kvm_stats_desc kvm_vcpu_stats_desc[];
+
+#ifdef CONFIG_KVM_GENERIC_MMU_NOTIFIER
+static inline int mmu_invalidate_retry(struct kvm *kvm, unsigned long mmu_seq)
+{
+ if (unlikely(kvm->mmu_invalidate_in_progress))
return 1;
/*
- * Ensure the read of mmu_notifier_count happens before the read
- * of mmu_notifier_seq. This interacts with the smp_wmb() in
- * mmu_notifier_invalidate_range_end to make sure that the caller
- * either sees the old (non-zero) value of mmu_notifier_count or
- * the new (incremented) value of mmu_notifier_seq.
- * PowerPC Book3s HV KVM calls this under a per-page lock
- * rather than under kvm->mmu_lock, for scalability, so
- * can't rely on kvm->mmu_lock to keep things ordered.
+ * Ensure the read of mmu_invalidate_in_progress happens before
+ * the read of mmu_invalidate_seq. This interacts with the
+ * smp_wmb() in mmu_notifier_invalidate_range_end to make sure
+ * that the caller either sees the old (non-zero) value of
+ * mmu_invalidate_in_progress or the new (incremented) value of
+ * mmu_invalidate_seq.
+ *
+ * PowerPC Book3s HV KVM calls this under a per-page lock rather
+ * than under kvm->mmu_lock, for scalability, so can't rely on
+ * kvm->mmu_lock to keep things ordered.
*/
smp_rmb();
- if (kvm->mmu_notifier_seq != mmu_seq)
+ if (kvm->mmu_invalidate_seq != mmu_seq)
return 1;
return 0;
}
+
+static inline int mmu_invalidate_retry_gfn(struct kvm *kvm,
+ unsigned long mmu_seq,
+ gfn_t gfn)
+{
+ lockdep_assert_held(&kvm->mmu_lock);
+ /*
+ * If mmu_invalidate_in_progress is non-zero, then the range maintained
+ * by kvm_mmu_notifier_invalidate_range_start contains all addresses
+ * that might be being invalidated. Note that it may include some false
+ * positives, due to shortcuts when handing concurrent invalidations.
+ */
+ if (unlikely(kvm->mmu_invalidate_in_progress)) {
+ /*
+ * Dropping mmu_lock after bumping mmu_invalidate_in_progress
+ * but before updating the range is a KVM bug.
+ */
+ if (WARN_ON_ONCE(kvm->mmu_invalidate_range_start == INVALID_GPA ||
+ kvm->mmu_invalidate_range_end == INVALID_GPA))
+ return 1;
+
+ if (gfn >= kvm->mmu_invalidate_range_start &&
+ gfn < kvm->mmu_invalidate_range_end)
+ return 1;
+ }
+
+ if (kvm->mmu_invalidate_seq != mmu_seq)
+ return 1;
+ return 0;
+}
+
+/*
+ * This lockless version of the range-based retry check *must* be paired with a
+ * call to the locked version after acquiring mmu_lock, i.e. this is safe to
+ * use only as a pre-check to avoid contending mmu_lock. This version *will*
+ * get false negatives and false positives.
+ */
+static inline bool mmu_invalidate_retry_gfn_unsafe(struct kvm *kvm,
+ unsigned long mmu_seq,
+ gfn_t gfn)
+{
+ /*
+ * Use READ_ONCE() to ensure the in-progress flag and sequence counter
+ * are always read from memory, e.g. so that checking for retry in a
+ * loop won't result in an infinite retry loop. Don't force loads for
+ * start+end, as the key to avoiding infinite retry loops is observing
+ * the 1=>0 transition of in-progress, i.e. getting false negatives
+ * due to stale start+end values is acceptable.
+ */
+ if (unlikely(READ_ONCE(kvm->mmu_invalidate_in_progress)) &&
+ gfn >= kvm->mmu_invalidate_range_start &&
+ gfn < kvm->mmu_invalidate_range_end)
+ return true;
+
+ return READ_ONCE(kvm->mmu_invalidate_seq) != mmu_seq;
+}
#endif
#ifdef CONFIG_HAVE_KVM_IRQ_ROUTING
-#ifdef CONFIG_S390
-#define KVM_MAX_IRQ_ROUTES 4096 //FIXME: we can have more than that...
-#elif defined(CONFIG_ARM64)
-#define KVM_MAX_IRQ_ROUTES 4096
-#else
-#define KVM_MAX_IRQ_ROUTES 1024
-#endif
+#define KVM_MAX_IRQ_ROUTES 4096 /* might need extension/rework in the future */
bool kvm_arch_can_set_irq_routing(struct kvm *kvm);
int kvm_set_irq_routing(struct kvm *kvm,
const struct kvm_irq_routing_entry *entries,
unsigned nr,
unsigned flags);
+int kvm_init_irq_routing(struct kvm *kvm);
int kvm_set_routing_entry(struct kvm *kvm,
struct kvm_kernel_irq_routing_entry *e,
const struct kvm_irq_routing_entry *ue);
@@ -1056,18 +2201,24 @@ void kvm_free_irq_routing(struct kvm *kvm);
static inline void kvm_free_irq_routing(struct kvm *kvm) {}
+static inline int kvm_init_irq_routing(struct kvm *kvm)
+{
+ return 0;
+}
+
#endif
int kvm_send_userspace_msi(struct kvm *kvm, struct kvm_msi *msi);
-#ifdef CONFIG_HAVE_KVM_EVENTFD
-
void kvm_eventfd_init(struct kvm *kvm);
int kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args);
-#ifdef CONFIG_HAVE_KVM_IRQFD
+#ifdef CONFIG_HAVE_KVM_IRQCHIP
int kvm_irqfd(struct kvm *kvm, struct kvm_irqfd *args);
void kvm_irqfd_release(struct kvm *kvm);
+bool kvm_notify_irqfd_resampler(struct kvm *kvm,
+ unsigned int irqchip,
+ unsigned int pin);
void kvm_irq_routing_update(struct kvm *);
#else
static inline int kvm_irqfd(struct kvm *kvm, struct kvm_irqfd *args)
@@ -1076,43 +2227,48 @@ static inline int kvm_irqfd(struct kvm *kvm, struct kvm_irqfd *args)
}
static inline void kvm_irqfd_release(struct kvm *kvm) {}
-#endif
-#else
-
-static inline void kvm_eventfd_init(struct kvm *kvm) {}
-
-static inline int kvm_irqfd(struct kvm *kvm, struct kvm_irqfd *args)
+static inline bool kvm_notify_irqfd_resampler(struct kvm *kvm,
+ unsigned int irqchip,
+ unsigned int pin)
{
- return -EINVAL;
+ return false;
}
+#endif /* CONFIG_HAVE_KVM_IRQCHIP */
-static inline void kvm_irqfd_release(struct kvm *kvm) {}
-
-#ifdef CONFIG_HAVE_KVM_IRQCHIP
-static inline void kvm_irq_routing_update(struct kvm *kvm)
-{
-}
-#endif
void kvm_arch_irq_routing_update(struct kvm *kvm);
-static inline int kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)
-{
- return -ENOSYS;
-}
-
-#endif /* CONFIG_HAVE_KVM_EVENTFD */
-
-static inline void kvm_make_request(int req, struct kvm_vcpu *vcpu)
+static inline void __kvm_make_request(int req, struct kvm_vcpu *vcpu)
{
/*
* Ensure the rest of the request is published to kvm_check_request's
* caller. Paired with the smp_mb__after_atomic in kvm_check_request.
*/
smp_wmb();
- set_bit(req & KVM_REQUEST_MASK, &vcpu->requests);
+ set_bit(req & KVM_REQUEST_MASK, (void *)&vcpu->requests);
+}
+
+static __always_inline void kvm_make_request(int req, struct kvm_vcpu *vcpu)
+{
+ /*
+ * Request that don't require vCPU action should never be logged in
+ * vcpu->requests. The vCPU won't clear the request, so it will stay
+ * logged indefinitely and prevent the vCPU from entering the guest.
+ */
+ BUILD_BUG_ON(!__builtin_constant_p(req) ||
+ (req & KVM_REQUEST_NO_ACTION));
+
+ __kvm_make_request(req, vcpu);
}
+#ifndef CONFIG_S390
+static inline void kvm_make_request_and_kick(int req, struct kvm_vcpu *vcpu)
+{
+ kvm_make_request(req, vcpu);
+ __kvm_vcpu_kick(vcpu, req & KVM_REQUEST_WAIT);
+}
+#endif
+
static inline bool kvm_request_pending(struct kvm_vcpu *vcpu)
{
return READ_ONCE(vcpu->requests);
@@ -1120,12 +2276,12 @@ static inline bool kvm_request_pending(struct kvm_vcpu *vcpu)
static inline bool kvm_test_request(int req, struct kvm_vcpu *vcpu)
{
- return test_bit(req & KVM_REQUEST_MASK, &vcpu->requests);
+ return test_bit(req & KVM_REQUEST_MASK, (void *)&vcpu->requests);
}
static inline void kvm_clear_request(int req, struct kvm_vcpu *vcpu)
{
- clear_bit(req & KVM_REQUEST_MASK, &vcpu->requests);
+ clear_bit(req & KVM_REQUEST_MASK, (void *)&vcpu->requests);
}
static inline bool kvm_check_request(int req, struct kvm_vcpu *vcpu)
@@ -1144,14 +2300,18 @@ static inline bool kvm_check_request(int req, struct kvm_vcpu *vcpu)
}
}
+#ifdef CONFIG_KVM_GENERIC_HARDWARE_ENABLING
+extern bool enable_virt_at_load;
extern bool kvm_rebooting;
+#endif
extern unsigned int halt_poll_ns;
extern unsigned int halt_poll_ns_grow;
+extern unsigned int halt_poll_ns_grow_start;
extern unsigned int halt_poll_ns_shrink;
struct kvm_device {
- struct kvm_device_ops *ops;
+ const struct kvm_device_ops *ops;
struct kvm *kvm;
void *private;
struct list_head vm_node;
@@ -1184,17 +2344,25 @@ struct kvm_device_ops {
*/
void (*destroy)(struct kvm_device *dev);
+ /*
+ * Release is an alternative method to free the device. It is
+ * called when the device file descriptor is closed. Once
+ * release is called, the destroy method will not be called
+ * anymore as the device is removed from the device list of
+ * the VM. kvm->lock is held.
+ */
+ void (*release)(struct kvm_device *dev);
+
int (*set_attr)(struct kvm_device *dev, struct kvm_device_attr *attr);
int (*get_attr)(struct kvm_device *dev, struct kvm_device_attr *attr);
int (*has_attr)(struct kvm_device *dev, struct kvm_device_attr *attr);
long (*ioctl)(struct kvm_device *dev, unsigned int ioctl,
unsigned long arg);
+ int (*mmap)(struct kvm_device *dev, struct vm_area_struct *vma);
};
-void kvm_device_get(struct kvm_device *dev);
-void kvm_device_put(struct kvm_device *dev);
struct kvm_device *kvm_device_from_filp(struct file *filp);
-int kvm_register_device_ops(struct kvm_device_ops *ops, u32 type);
+int kvm_register_device_ops(const struct kvm_device_ops *ops, u32 type);
void kvm_unregister_device_ops(u32 type);
extern struct kvm_device_ops kvm_mpic_ops;
@@ -1223,7 +2391,18 @@ static inline void kvm_vcpu_set_dy_eligible(struct kvm_vcpu *vcpu, bool val)
}
#endif /* CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT */
-#ifdef CONFIG_HAVE_KVM_IRQ_BYPASS
+static inline bool kvm_is_visible_memslot(struct kvm_memory_slot *memslot)
+{
+ return (memslot && memslot->id < KVM_USER_MEM_SLOTS &&
+ !(memslot->flags & KVM_MEMSLOT_INVALID));
+}
+
+struct kvm_vcpu *kvm_get_running_vcpu(void);
+struct kvm_vcpu * __percpu *kvm_get_running_vcpus(void);
+
+#if IS_ENABLED(CONFIG_HAVE_KVM_IRQ_BYPASS)
+struct kvm_kernel_irqfd;
+
bool kvm_arch_has_irq_bypass(void);
int kvm_arch_irq_bypass_add_producer(struct irq_bypass_consumer *,
struct irq_bypass_producer *);
@@ -1231,8 +2410,9 @@ void kvm_arch_irq_bypass_del_producer(struct irq_bypass_consumer *,
struct irq_bypass_producer *);
void kvm_arch_irq_bypass_stop(struct irq_bypass_consumer *);
void kvm_arch_irq_bypass_start(struct irq_bypass_consumer *);
-int kvm_arch_update_irqfd_routing(struct kvm *kvm, unsigned int host_irq,
- uint32_t guest_irq, bool set);
+void kvm_arch_update_irqfd_routing(struct kvm_kernel_irqfd *irqfd,
+ struct kvm_kernel_irq_routing_entry *old,
+ struct kvm_kernel_irq_routing_entry *new);
#endif /* CONFIG_HAVE_KVM_IRQ_BYPASS */
#ifdef CONFIG_HAVE_KVM_INVALID_WAKEUPS
@@ -1249,4 +2429,179 @@ static inline bool vcpu_valid_wakeup(struct kvm_vcpu *vcpu)
}
#endif /* CONFIG_HAVE_KVM_INVALID_WAKEUPS */
+#ifdef CONFIG_HAVE_KVM_NO_POLL
+/* Callback that tells if we must not poll */
+bool kvm_arch_no_poll(struct kvm_vcpu *vcpu);
+#else
+static inline bool kvm_arch_no_poll(struct kvm_vcpu *vcpu)
+{
+ return false;
+}
+#endif /* CONFIG_HAVE_KVM_NO_POLL */
+
+void kvm_arch_guest_memory_reclaimed(struct kvm *kvm);
+
+#ifdef CONFIG_HAVE_KVM_VCPU_RUN_PID_CHANGE
+int kvm_arch_vcpu_run_pid_change(struct kvm_vcpu *vcpu);
+#else
+static inline int kvm_arch_vcpu_run_pid_change(struct kvm_vcpu *vcpu)
+{
+ return 0;
+}
+#endif /* CONFIG_HAVE_KVM_VCPU_RUN_PID_CHANGE */
+
+#ifdef CONFIG_VIRT_XFER_TO_GUEST_WORK
+static inline void kvm_handle_signal_exit(struct kvm_vcpu *vcpu)
+{
+ vcpu->run->exit_reason = KVM_EXIT_INTR;
+ vcpu->stat.signal_exits++;
+}
+
+static inline int kvm_xfer_to_guest_mode_handle_work(struct kvm_vcpu *vcpu)
+{
+ int r = xfer_to_guest_mode_handle_work();
+
+ if (r) {
+ WARN_ON_ONCE(r != -EINTR);
+ kvm_handle_signal_exit(vcpu);
+ }
+ return r;
+}
+#endif /* CONFIG_VIRT_XFER_TO_GUEST_WORK */
+
+/*
+ * If more than one page is being (un)accounted, @virt must be the address of
+ * the first page of a block of pages what were allocated together (i.e
+ * accounted together).
+ *
+ * kvm_account_pgtable_pages() is thread-safe because mod_lruvec_page_state()
+ * is thread-safe.
+ */
+static inline void kvm_account_pgtable_pages(void *virt, int nr)
+{
+ mod_lruvec_page_state(virt_to_page(virt), NR_SECONDARY_PAGETABLE, nr);
+}
+
+/*
+ * This defines how many reserved entries we want to keep before we
+ * kick the vcpu to the userspace to avoid dirty ring full. This
+ * value can be tuned to higher if e.g. PML is enabled on the host.
+ */
+#define KVM_DIRTY_RING_RSVD_ENTRIES 64
+
+/* Max number of entries allowed for each kvm dirty ring */
+#define KVM_DIRTY_RING_MAX_ENTRIES 65536
+
+static inline void kvm_prepare_memory_fault_exit(struct kvm_vcpu *vcpu,
+ gpa_t gpa, gpa_t size,
+ bool is_write, bool is_exec,
+ bool is_private)
+{
+ vcpu->run->exit_reason = KVM_EXIT_MEMORY_FAULT;
+ vcpu->run->memory_fault.gpa = gpa;
+ vcpu->run->memory_fault.size = size;
+
+ /* RWX flags are not (yet) defined or communicated to userspace. */
+ vcpu->run->memory_fault.flags = 0;
+ if (is_private)
+ vcpu->run->memory_fault.flags |= KVM_MEMORY_EXIT_FLAG_PRIVATE;
+}
+
+static inline bool kvm_memslot_is_gmem_only(const struct kvm_memory_slot *slot)
+{
+ if (!IS_ENABLED(CONFIG_KVM_GUEST_MEMFD))
+ return false;
+
+ return slot->flags & KVM_MEMSLOT_GMEM_ONLY;
+}
+
+#ifdef CONFIG_KVM_GENERIC_MEMORY_ATTRIBUTES
+static inline unsigned long kvm_get_memory_attributes(struct kvm *kvm, gfn_t gfn)
+{
+ return xa_to_value(xa_load(&kvm->mem_attr_array, gfn));
+}
+
+bool kvm_range_has_memory_attributes(struct kvm *kvm, gfn_t start, gfn_t end,
+ unsigned long mask, unsigned long attrs);
+bool kvm_arch_pre_set_memory_attributes(struct kvm *kvm,
+ struct kvm_gfn_range *range);
+bool kvm_arch_post_set_memory_attributes(struct kvm *kvm,
+ struct kvm_gfn_range *range);
+
+static inline bool kvm_mem_is_private(struct kvm *kvm, gfn_t gfn)
+{
+ return kvm_get_memory_attributes(kvm, gfn) & KVM_MEMORY_ATTRIBUTE_PRIVATE;
+}
+#else
+static inline bool kvm_mem_is_private(struct kvm *kvm, gfn_t gfn)
+{
+ return false;
+}
+#endif /* CONFIG_KVM_GENERIC_MEMORY_ATTRIBUTES */
+
+#ifdef CONFIG_KVM_GUEST_MEMFD
+int kvm_gmem_get_pfn(struct kvm *kvm, struct kvm_memory_slot *slot,
+ gfn_t gfn, kvm_pfn_t *pfn, struct page **page,
+ int *max_order);
+#else
+static inline int kvm_gmem_get_pfn(struct kvm *kvm,
+ struct kvm_memory_slot *slot, gfn_t gfn,
+ kvm_pfn_t *pfn, struct page **page,
+ int *max_order)
+{
+ KVM_BUG_ON(1, kvm);
+ return -EIO;
+}
+#endif /* CONFIG_KVM_GUEST_MEMFD */
+
+#ifdef CONFIG_HAVE_KVM_ARCH_GMEM_PREPARE
+int kvm_arch_gmem_prepare(struct kvm *kvm, gfn_t gfn, kvm_pfn_t pfn, int max_order);
+#endif
+
+#ifdef CONFIG_HAVE_KVM_ARCH_GMEM_POPULATE
+/**
+ * kvm_gmem_populate() - Populate/prepare a GPA range with guest data
+ *
+ * @kvm: KVM instance
+ * @gfn: starting GFN to be populated
+ * @src: userspace-provided buffer containing data to copy into GFN range
+ * (passed to @post_populate, and incremented on each iteration
+ * if not NULL)
+ * @npages: number of pages to copy from userspace-buffer
+ * @post_populate: callback to issue for each gmem page that backs the GPA
+ * range
+ * @opaque: opaque data to pass to @post_populate callback
+ *
+ * This is primarily intended for cases where a gmem-backed GPA range needs
+ * to be initialized with userspace-provided data prior to being mapped into
+ * the guest as a private page. This should be called with the slots->lock
+ * held so that caller-enforced invariants regarding the expected memory
+ * attributes of the GPA range do not race with KVM_SET_MEMORY_ATTRIBUTES.
+ *
+ * Returns the number of pages that were populated.
+ */
+typedef int (*kvm_gmem_populate_cb)(struct kvm *kvm, gfn_t gfn, kvm_pfn_t pfn,
+ void __user *src, int order, void *opaque);
+
+long kvm_gmem_populate(struct kvm *kvm, gfn_t gfn, void __user *src, long npages,
+ kvm_gmem_populate_cb post_populate, void *opaque);
+#endif
+
+#ifdef CONFIG_HAVE_KVM_ARCH_GMEM_INVALIDATE
+void kvm_arch_gmem_invalidate(kvm_pfn_t start, kvm_pfn_t end);
+#endif
+
+#ifdef CONFIG_KVM_GENERIC_PRE_FAULT_MEMORY
+long kvm_arch_vcpu_pre_fault_memory(struct kvm_vcpu *vcpu,
+ struct kvm_pre_fault_memory *range);
+#endif
+
+#ifdef CONFIG_KVM_GENERIC_HARDWARE_ENABLING
+int kvm_enable_virtualization(void);
+void kvm_disable_virtualization(void);
+#else
+static inline int kvm_enable_virtualization(void) { return 0; }
+static inline void kvm_disable_virtualization(void) { }
+#endif
+
#endif
diff --git a/include/linux/kvm_irqfd.h b/include/linux/kvm_irqfd.h
index 76c2fbc59f35..ef8c134ded8a 100644
--- a/include/linux/kvm_irqfd.h
+++ b/include/linux/kvm_irqfd.h
@@ -1,12 +1,5 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*
* irqfd: Allows an fd to be used to inject an interrupt to the guest
* Credit goes to Avi Kivity for the original idea.
@@ -38,7 +31,7 @@ struct kvm_kernel_irqfd_resampler {
/*
* Entry in list of kvm->irqfd.resampler_list. Use for sharing
* resamplers among irqfds on the same gsi.
- * Accessed and modified under kvm->irqfds.resampler_lock
+ * RCU list modified under kvm->irqfds.resampler_lock
*/
struct list_head link;
};
@@ -49,7 +42,7 @@ struct kvm_kernel_irqfd {
wait_queue_entry_t wait;
/* Update side is protected by irqfds.lock */
struct kvm_kernel_irq_routing_entry irq_entry;
- seqcount_t irq_entry_sc;
+ seqcount_spinlock_t irq_entry_sc;
/* Used for level IRQ fast-path */
int gsi;
struct work_struct inject;
@@ -62,10 +55,13 @@ struct kvm_kernel_irqfd {
/* Used for setup/shutdown */
struct eventfd_ctx *eventfd;
struct list_head list;
- poll_table pt;
struct work_struct shutdown;
struct irq_bypass_consumer consumer;
struct irq_bypass_producer *producer;
+
+ struct kvm_vcpu *irq_bypass_vcpu;
+ struct list_head vcpu_list;
+ void *irq_bypass_data;
};
#endif /* __LINUX_KVM_IRQFD_H */
diff --git a/include/linux/kvm_para.h b/include/linux/kvm_para.h
index 35e568f04b1e..f23b90b02898 100644
--- a/include/linux/kvm_para.h
+++ b/include/linux/kvm_para.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __LINUX_KVM_PARA_H
#define __LINUX_KVM_PARA_H
@@ -8,4 +9,9 @@ static inline bool kvm_para_has_feature(unsigned int feature)
{
return !!(kvm_arch_para_features() & (1UL << feature));
}
+
+static inline bool kvm_para_has_hint(unsigned int feature)
+{
+ return !!(kvm_arch_para_hints() & (1UL << feature));
+}
#endif /* __LINUX_KVM_PARA_H */
diff --git a/include/linux/kvm_types.h b/include/linux/kvm_types.h
index 8bf259dae9f6..a568d8e6f4e8 100644
--- a/include/linux/kvm_types.h
+++ b/include/linux/kvm_types.h
@@ -1,25 +1,43 @@
-/*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
- *
- */
+/* SPDX-License-Identifier: GPL-2.0-only */
#ifndef __KVM_TYPES_H__
#define __KVM_TYPES_H__
+#include <linux/bits.h>
+#include <linux/export.h>
+#include <linux/types.h>
+#include <asm/kvm_types.h>
+
+#ifdef KVM_SUB_MODULES
+#define EXPORT_SYMBOL_FOR_KVM_INTERNAL(symbol) \
+ EXPORT_SYMBOL_FOR_MODULES(symbol, __stringify(KVM_SUB_MODULES))
+#define EXPORT_SYMBOL_FOR_KVM(symbol) \
+ EXPORT_SYMBOL_FOR_MODULES(symbol, "kvm," __stringify(KVM_SUB_MODULES))
+#else
+#define EXPORT_SYMBOL_FOR_KVM_INTERNAL(symbol)
+/*
+ * Allow architectures to provide a custom EXPORT_SYMBOL_FOR_KVM, but only
+ * if there are no submodules, e.g. to allow suppressing exports if KVM=m, but
+ * kvm.ko won't actually be built (due to lack of at least one submodule).
+ */
+#ifndef EXPORT_SYMBOL_FOR_KVM
+#if IS_MODULE(CONFIG_KVM)
+#define EXPORT_SYMBOL_FOR_KVM(symbol) EXPORT_SYMBOL_FOR_MODULES(symbol, "kvm")
+#else
+#define EXPORT_SYMBOL_FOR_KVM(symbol)
+#endif /* IS_MODULE(CONFIG_KVM) */
+#endif /* EXPORT_SYMBOL_FOR_KVM */
+#endif
+
+#ifndef __ASSEMBLER__
+
+#include <linux/mutex.h>
+#include <linux/spinlock_types.h>
+
struct kvm;
struct kvm_async_pf;
struct kvm_device_ops;
+struct kvm_gfn_range;
struct kvm_interrupt;
struct kvm_irq_routing_table;
struct kvm_memory_slot;
@@ -32,8 +50,6 @@ struct kvm_memslots;
enum kvm_mr_change;
-#include <asm/types.h>
-
/*
* Address types:
*
@@ -49,6 +65,8 @@ typedef unsigned long gva_t;
typedef u64 gpa_t;
typedef u64 gfn_t;
+#define INVALID_GPA (~(gpa_t)0)
+
typedef unsigned long hva_t;
typedef u64 hpa_t;
typedef u64 hfn_t;
@@ -63,4 +81,65 @@ struct gfn_to_hva_cache {
struct kvm_memory_slot *memslot;
};
+struct gfn_to_pfn_cache {
+ u64 generation;
+ gpa_t gpa;
+ unsigned long uhva;
+ struct kvm_memory_slot *memslot;
+ struct kvm *kvm;
+ struct list_head list;
+ rwlock_t lock;
+ struct mutex refresh_lock;
+ void *khva;
+ kvm_pfn_t pfn;
+ bool active;
+ bool valid;
+};
+
+#ifdef KVM_ARCH_NR_OBJS_PER_MEMORY_CACHE
+/*
+ * Memory caches are used to preallocate memory ahead of various MMU flows,
+ * e.g. page fault handlers. Gracefully handling allocation failures deep in
+ * MMU flows is problematic, as is triggering reclaim, I/O, etc... while
+ * holding MMU locks. Note, these caches act more like prefetch buffers than
+ * classical caches, i.e. objects are not returned to the cache on being freed.
+ *
+ * The @capacity field and @objects array are lazily initialized when the cache
+ * is topped up (__kvm_mmu_topup_memory_cache()).
+ */
+struct kvm_mmu_memory_cache {
+ gfp_t gfp_zero;
+ gfp_t gfp_custom;
+ u64 init_value;
+ struct kmem_cache *kmem_cache;
+ int capacity;
+ int nobjs;
+ void **objects;
+};
+#endif
+
+#define HALT_POLL_HIST_COUNT 32
+
+struct kvm_vm_stat_generic {
+ u64 remote_tlb_flush;
+ u64 remote_tlb_flush_requests;
+};
+
+struct kvm_vcpu_stat_generic {
+ u64 halt_successful_poll;
+ u64 halt_attempted_poll;
+ u64 halt_poll_invalid;
+ u64 halt_wakeup;
+ u64 halt_poll_success_ns;
+ u64 halt_poll_fail_ns;
+ u64 halt_wait_ns;
+ u64 halt_poll_success_hist[HALT_POLL_HIST_COUNT];
+ u64 halt_poll_fail_hist[HALT_POLL_HIST_COUNT];
+ u64 halt_wait_hist[HALT_POLL_HIST_COUNT];
+ u64 blocking;
+};
+
+#define KVM_STATS_NAME_SIZE 48
+#endif /* !__ASSEMBLER__ */
+
#endif /* __KVM_TYPES_H__ */
diff --git a/include/linux/l2tp.h b/include/linux/l2tp.h
index bffdb962f1a6..0402eda1a94e 100644
--- a/include/linux/l2tp.h
+++ b/include/linux/l2tp.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* L2TP-over-IP socket for L2TPv3.
*
diff --git a/include/linux/lantiq.h b/include/linux/lantiq.h
new file mode 100644
index 000000000000..67921169d84d
--- /dev/null
+++ b/include/linux/lantiq.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __LINUX_LANTIQ_H
+#define __LINUX_LANTIQ_H
+
+#ifdef CONFIG_LANTIQ
+#include <lantiq_soc.h>
+#else
+
+#ifndef LTQ_EARLY_ASC
+#define LTQ_EARLY_ASC 0
+#endif
+
+#ifndef CPHYSADDR
+#define CPHYSADDR(a) 0
+#endif
+
+static inline struct clk *clk_get_fpi(void)
+{
+ return NULL;
+}
+#endif /* CONFIG_LANTIQ */
+#endif /* __LINUX_LANTIQ_H */
diff --git a/include/linux/lapb.h b/include/linux/lapb.h
index 873c1eb635e4..b5333f9413dc 100644
--- a/include/linux/lapb.h
+++ b/include/linux/lapb.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* These are the public elements of the Linux LAPB module.
*/
@@ -5,6 +6,11 @@
#ifndef LAPB_KERNEL_H
#define LAPB_KERNEL_H
+#include <linux/skbuff.h>
+#include <linux/timer.h>
+
+struct net_device;
+
#define LAPB_OK 0
#define LAPB_BADTOKEN 1
#define LAPB_INVALUE 2
diff --git a/include/linux/latencytop.h b/include/linux/latencytop.h
index 59ccab297ae0..84f1053cf2a8 100644
--- a/include/linux/latencytop.h
+++ b/include/linux/latencytop.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* latencytop.h: Infrastructure for displaying latency
*
@@ -35,10 +36,7 @@ account_scheduler_latency(struct task_struct *task, int usecs, int inter)
__account_scheduler_latency(task, usecs, inter);
}
-void clear_all_latency_tracing(struct task_struct *p);
-
-extern int sysctl_latencytop(struct ctl_table *table, int write,
- void __user *buffer, size_t *lenp, loff_t *ppos);
+void clear_tsk_latency_tracing(struct task_struct *p);
#else
@@ -47,7 +45,7 @@ account_scheduler_latency(struct task_struct *task, int usecs, int inter)
{
}
-static inline void clear_all_latency_tracing(struct task_struct *p)
+static inline void clear_tsk_latency_tracing(struct task_struct *p)
{
}
diff --git a/include/linux/lcd.h b/include/linux/lcd.h
index 504f6246f38f..d4fa03722b72 100644
--- a/include/linux/lcd.h
+++ b/include/linux/lcd.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* LCD Lowlevel Control Abstraction
*
@@ -10,8 +11,11 @@
#include <linux/device.h>
#include <linux/mutex.h>
-#include <linux/notifier.h>
-#include <linux/fb.h>
+
+#define LCD_POWER_ON (0)
+#define LCD_POWER_REDUCED (1) // deprecated; don't use in new code
+#define LCD_POWER_REDUCED_VSYNC_SUSPEND (2) // deprecated; don't use in new code
+#define LCD_POWER_OFF (4)
/* Notes on locking:
*
@@ -29,7 +33,6 @@
*/
struct lcd_device;
-struct fb_info;
struct lcd_properties {
/* The maximum value for contrast (read-only) */
@@ -40,27 +43,29 @@ struct lcd_ops {
/* Get the LCD panel power status (0: full on, 1..3: controller
power on, flat panel power off, 4: full off), see FB_BLANK_XXX */
int (*get_power)(struct lcd_device *);
- /*
- * Enable or disable power to the LCD(0: on; 4: off, see FB_BLANK_XXX)
- * and this callback would be called proir to fb driver's callback.
- *
- * P.S. note that if early_set_power is not NULL then early fb notifier
- * would be registered.
- */
- int (*early_set_power)(struct lcd_device *, int power);
- /* revert the effects of the early blank event. */
- int (*r_early_set_power)(struct lcd_device *, int power);
/* Enable or disable power to the LCD (0: on; 4: off, see FB_BLANK_XXX) */
int (*set_power)(struct lcd_device *, int power);
/* Get the current contrast setting (0-max_contrast) */
int (*get_contrast)(struct lcd_device *);
/* Set LCD panel contrast */
int (*set_contrast)(struct lcd_device *, int contrast);
- /* Set LCD panel mode (resolutions ...) */
- int (*set_mode)(struct lcd_device *, struct fb_videomode *);
- /* Check if given framebuffer device is the one LCD is bound to;
- return 0 if not, !=0 if it is. If NULL, lcd always matches the fb. */
- int (*check_fb)(struct lcd_device *, struct fb_info *);
+
+ /*
+ * Set LCD panel mode (resolutions ...)
+ */
+ int (*set_mode)(struct lcd_device *lcd, u32 xres, u32 yres);
+
+ /*
+ * Check if the LCD controls the given display device. This
+ * operation is optional and if not implemented it is assumed that
+ * the display is always the one controlled by the LCD.
+ *
+ * RETURNS:
+ *
+ * If display_dev is NULL or display_dev matches the device controlled by
+ * the LCD, return true. Otherwise return false.
+ */
+ bool (*controls_device)(struct lcd_device *lcd, struct device *display_device);
};
struct lcd_device {
@@ -70,11 +75,14 @@ struct lcd_device {
points to something in the body of that driver, it is also invalid. */
struct mutex ops_lock;
/* If this is NULL, the backing module is unloaded */
- struct lcd_ops *ops;
+ const struct lcd_ops *ops;
/* Serialise access to set_power method */
struct mutex update_lock;
- /* The framebuffer notifier block */
- struct notifier_block fb_notif;
+
+ /**
+ * @entry: List entry of all registered lcd devices
+ */
+ struct list_head entry;
struct device dev;
};
@@ -111,14 +119,27 @@ static inline void lcd_set_power(struct lcd_device *ld, int power)
}
extern struct lcd_device *lcd_device_register(const char *name,
- struct device *parent, void *devdata, struct lcd_ops *ops);
+ struct device *parent, void *devdata, const struct lcd_ops *ops);
extern struct lcd_device *devm_lcd_device_register(struct device *dev,
const char *name, struct device *parent,
- void *devdata, struct lcd_ops *ops);
+ void *devdata, const struct lcd_ops *ops);
extern void lcd_device_unregister(struct lcd_device *ld);
extern void devm_lcd_device_unregister(struct device *dev,
struct lcd_device *ld);
+#if IS_REACHABLE(CONFIG_LCD_CLASS_DEVICE)
+void lcd_notify_blank_all(struct device *display_dev, int power);
+void lcd_notify_mode_change_all(struct device *display_dev,
+ unsigned int width, unsigned int height);
+#else
+static inline void lcd_notify_blank_all(struct device *display_dev, int power)
+{}
+
+static inline void lcd_notify_mode_change_all(struct device *display_dev,
+ unsigned int width, unsigned int height)
+{}
+#endif
+
#define to_lcd_device(obj) container_of(obj, struct lcd_device, dev)
static inline void * lcd_get_data(struct lcd_device *ld_dev)
diff --git a/include/linux/lcm.h b/include/linux/lcm.h
index 1ce79a7f1daa..0db3efd56efb 100644
--- a/include/linux/lcm.h
+++ b/include/linux/lcm.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LCM_H
#define _LCM_H
diff --git a/include/linux/leafops.h b/include/linux/leafops.h
new file mode 100644
index 000000000000..cfafe7a5e7b1
--- /dev/null
+++ b/include/linux/leafops.h
@@ -0,0 +1,619 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Describes operations that can be performed on software-defined page table
+ * leaf entries. These are abstracted from the hardware page table entries
+ * themselves by the softleaf_t type, see mm_types.h.
+ */
+#ifndef _LINUX_LEAFOPS_H
+#define _LINUX_LEAFOPS_H
+
+#include <linux/mm_types.h>
+#include <linux/swapops.h>
+#include <linux/swap.h>
+
+#ifdef CONFIG_MMU
+
+/* Temporary until swp_entry_t eliminated. */
+#define LEAF_TYPE_SHIFT SWP_TYPE_SHIFT
+
+enum softleaf_type {
+ /* Fundamental types. */
+ SOFTLEAF_NONE,
+ SOFTLEAF_SWAP,
+ /* Migration types. */
+ SOFTLEAF_MIGRATION_READ,
+ SOFTLEAF_MIGRATION_READ_EXCLUSIVE,
+ SOFTLEAF_MIGRATION_WRITE,
+ /* Device types. */
+ SOFTLEAF_DEVICE_PRIVATE_READ,
+ SOFTLEAF_DEVICE_PRIVATE_WRITE,
+ SOFTLEAF_DEVICE_EXCLUSIVE,
+ /* H/W posion types. */
+ SOFTLEAF_HWPOISON,
+ /* Marker types. */
+ SOFTLEAF_MARKER,
+};
+
+/**
+ * softleaf_mk_none() - Create an empty ('none') leaf entry.
+ * Returns: empty leaf entry.
+ */
+static inline softleaf_t softleaf_mk_none(void)
+{
+ return ((softleaf_t) { 0 });
+}
+
+/**
+ * softleaf_from_pte() - Obtain a leaf entry from a PTE entry.
+ * @pte: PTE entry.
+ *
+ * If @pte is present (therefore not a leaf entry) the function returns an empty
+ * leaf entry. Otherwise, it returns a leaf entry.
+ *
+ * Returns: Leaf entry.
+ */
+static inline softleaf_t softleaf_from_pte(pte_t pte)
+{
+ softleaf_t arch_entry;
+
+ if (pte_present(pte) || pte_none(pte))
+ return softleaf_mk_none();
+
+ pte = pte_swp_clear_flags(pte);
+ arch_entry = __pte_to_swp_entry(pte);
+
+ /* Temporary until swp_entry_t eliminated. */
+ return swp_entry(__swp_type(arch_entry), __swp_offset(arch_entry));
+}
+
+/**
+ * softleaf_to_pte() - Obtain a PTE entry from a leaf entry.
+ * @entry: Leaf entry.
+ *
+ * This generates an architecture-specific PTE entry that can be utilised to
+ * encode the metadata the leaf entry encodes.
+ *
+ * Returns: Architecture-specific PTE entry encoding leaf entry.
+ */
+static inline pte_t softleaf_to_pte(softleaf_t entry)
+{
+ /* Temporary until swp_entry_t eliminated. */
+ return swp_entry_to_pte(entry);
+}
+
+#ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
+/**
+ * softleaf_from_pmd() - Obtain a leaf entry from a PMD entry.
+ * @pmd: PMD entry.
+ *
+ * If @pmd is present (therefore not a leaf entry) the function returns an empty
+ * leaf entry. Otherwise, it returns a leaf entry.
+ *
+ * Returns: Leaf entry.
+ */
+static inline softleaf_t softleaf_from_pmd(pmd_t pmd)
+{
+ softleaf_t arch_entry;
+
+ if (pmd_present(pmd) || pmd_none(pmd))
+ return softleaf_mk_none();
+
+ if (pmd_swp_soft_dirty(pmd))
+ pmd = pmd_swp_clear_soft_dirty(pmd);
+ if (pmd_swp_uffd_wp(pmd))
+ pmd = pmd_swp_clear_uffd_wp(pmd);
+ arch_entry = __pmd_to_swp_entry(pmd);
+
+ /* Temporary until swp_entry_t eliminated. */
+ return swp_entry(__swp_type(arch_entry), __swp_offset(arch_entry));
+}
+
+#else
+
+static inline softleaf_t softleaf_from_pmd(pmd_t pmd)
+{
+ return softleaf_mk_none();
+}
+
+#endif
+
+/**
+ * softleaf_is_none() - Is the leaf entry empty?
+ * @entry: Leaf entry.
+ *
+ * Empty entries are typically the result of a 'none' page table leaf entry
+ * being converted to a leaf entry.
+ *
+ * Returns: true if the entry is empty, false otherwise.
+ */
+static inline bool softleaf_is_none(softleaf_t entry)
+{
+ return entry.val == 0;
+}
+
+/**
+ * softleaf_type() - Identify the type of leaf entry.
+ * @enntry: Leaf entry.
+ *
+ * Returns: the leaf entry type associated with @entry.
+ */
+static inline enum softleaf_type softleaf_type(softleaf_t entry)
+{
+ unsigned int type_num;
+
+ if (softleaf_is_none(entry))
+ return SOFTLEAF_NONE;
+
+ type_num = entry.val >> LEAF_TYPE_SHIFT;
+
+ if (type_num < MAX_SWAPFILES)
+ return SOFTLEAF_SWAP;
+
+ switch (type_num) {
+#ifdef CONFIG_MIGRATION
+ case SWP_MIGRATION_READ:
+ return SOFTLEAF_MIGRATION_READ;
+ case SWP_MIGRATION_READ_EXCLUSIVE:
+ return SOFTLEAF_MIGRATION_READ_EXCLUSIVE;
+ case SWP_MIGRATION_WRITE:
+ return SOFTLEAF_MIGRATION_WRITE;
+#endif
+#ifdef CONFIG_DEVICE_PRIVATE
+ case SWP_DEVICE_WRITE:
+ return SOFTLEAF_DEVICE_PRIVATE_WRITE;
+ case SWP_DEVICE_READ:
+ return SOFTLEAF_DEVICE_PRIVATE_READ;
+ case SWP_DEVICE_EXCLUSIVE:
+ return SOFTLEAF_DEVICE_EXCLUSIVE;
+#endif
+#ifdef CONFIG_MEMORY_FAILURE
+ case SWP_HWPOISON:
+ return SOFTLEAF_HWPOISON;
+#endif
+ case SWP_PTE_MARKER:
+ return SOFTLEAF_MARKER;
+ }
+
+ /* Unknown entry type. */
+ VM_WARN_ON_ONCE(1);
+ return SOFTLEAF_NONE;
+}
+
+/**
+ * softleaf_is_swap() - Is this leaf entry a swap entry?
+ * @entry: Leaf entry.
+ *
+ * Returns: true if the leaf entry is a swap entry, otherwise false.
+ */
+static inline bool softleaf_is_swap(softleaf_t entry)
+{
+ return softleaf_type(entry) == SOFTLEAF_SWAP;
+}
+
+/**
+ * softleaf_is_migration_write() - Is this leaf entry a writable migration entry?
+ * @entry: Leaf entry.
+ *
+ * Returns: true if the leaf entry is a writable migration entry, otherwise
+ * false.
+ */
+static inline bool softleaf_is_migration_write(softleaf_t entry)
+{
+ return softleaf_type(entry) == SOFTLEAF_MIGRATION_WRITE;
+}
+
+/**
+ * softleaf_is_migration_read() - Is this leaf entry a readable migration entry?
+ * @entry: Leaf entry.
+ *
+ * Returns: true if the leaf entry is a readable migration entry, otherwise
+ * false.
+ */
+static inline bool softleaf_is_migration_read(softleaf_t entry)
+{
+ return softleaf_type(entry) == SOFTLEAF_MIGRATION_READ;
+}
+
+/**
+ * softleaf_is_migration_read_exclusive() - Is this leaf entry an exclusive
+ * readable migration entry?
+ * @entry: Leaf entry.
+ *
+ * Returns: true if the leaf entry is an exclusive readable migration entry,
+ * otherwise false.
+ */
+static inline bool softleaf_is_migration_read_exclusive(softleaf_t entry)
+{
+ return softleaf_type(entry) == SOFTLEAF_MIGRATION_READ_EXCLUSIVE;
+}
+
+/**
+ * softleaf_is_migration() - Is this leaf entry a migration entry?
+ * @entry: Leaf entry.
+ *
+ * Returns: true if the leaf entry is a migration entry, otherwise false.
+ */
+static inline bool softleaf_is_migration(softleaf_t entry)
+{
+ switch (softleaf_type(entry)) {
+ case SOFTLEAF_MIGRATION_READ:
+ case SOFTLEAF_MIGRATION_READ_EXCLUSIVE:
+ case SOFTLEAF_MIGRATION_WRITE:
+ return true;
+ default:
+ return false;
+ }
+}
+
+/**
+ * softleaf_is_device_private_write() - Is this leaf entry a device private
+ * writable entry?
+ * @entry: Leaf entry.
+ *
+ * Returns: true if the leaf entry is a device private writable entry, otherwise
+ * false.
+ */
+static inline bool softleaf_is_device_private_write(softleaf_t entry)
+{
+ return softleaf_type(entry) == SOFTLEAF_DEVICE_PRIVATE_WRITE;
+}
+
+/**
+ * softleaf_is_device_private() - Is this leaf entry a device private entry?
+ * @entry: Leaf entry.
+ *
+ * Returns: true if the leaf entry is a device private entry, otherwise false.
+ */
+static inline bool softleaf_is_device_private(softleaf_t entry)
+{
+ switch (softleaf_type(entry)) {
+ case SOFTLEAF_DEVICE_PRIVATE_WRITE:
+ case SOFTLEAF_DEVICE_PRIVATE_READ:
+ return true;
+ default:
+ return false;
+ }
+}
+
+/**
+ * softleaf_is_device_exclusive() - Is this leaf entry a device-exclusive entry?
+ * @entry: Leaf entry.
+ *
+ * Returns: true if the leaf entry is a device-exclusive entry, otherwise false.
+ */
+static inline bool softleaf_is_device_exclusive(softleaf_t entry)
+{
+ return softleaf_type(entry) == SOFTLEAF_DEVICE_EXCLUSIVE;
+}
+
+/**
+ * softleaf_is_hwpoison() - Is this leaf entry a hardware poison entry?
+ * @entry: Leaf entry.
+ *
+ * Returns: true if the leaf entry is a hardware poison entry, otherwise false.
+ */
+static inline bool softleaf_is_hwpoison(softleaf_t entry)
+{
+ return softleaf_type(entry) == SOFTLEAF_HWPOISON;
+}
+
+/**
+ * softleaf_is_marker() - Is this leaf entry a marker?
+ * @entry: Leaf entry.
+ *
+ * Returns: true if the leaf entry is a marker entry, otherwise false.
+ */
+static inline bool softleaf_is_marker(softleaf_t entry)
+{
+ return softleaf_type(entry) == SOFTLEAF_MARKER;
+}
+
+/**
+ * softleaf_to_marker() - Obtain marker associated with leaf entry.
+ * @entry: Leaf entry, softleaf_is_marker(@entry) must return true.
+ *
+ * Returns: Marker associated with the leaf entry.
+ */
+static inline pte_marker softleaf_to_marker(softleaf_t entry)
+{
+ VM_WARN_ON_ONCE(!softleaf_is_marker(entry));
+
+ return swp_offset(entry) & PTE_MARKER_MASK;
+}
+
+/**
+ * softleaf_has_pfn() - Does this leaf entry encode a valid PFN number?
+ * @entry: Leaf entry.
+ *
+ * A pfn swap entry is a special type of swap entry that always has a pfn stored
+ * in the swap offset. They can either be used to represent unaddressable device
+ * memory, to restrict access to a page undergoing migration or to represent a
+ * pfn which has been hwpoisoned and unmapped.
+ *
+ * Returns: true if the leaf entry encodes a PFN, otherwise false.
+ */
+static inline bool softleaf_has_pfn(softleaf_t entry)
+{
+ /* Make sure the swp offset can always store the needed fields. */
+ BUILD_BUG_ON(SWP_TYPE_SHIFT < SWP_PFN_BITS);
+
+ if (softleaf_is_migration(entry))
+ return true;
+ if (softleaf_is_device_private(entry))
+ return true;
+ if (softleaf_is_device_exclusive(entry))
+ return true;
+ if (softleaf_is_hwpoison(entry))
+ return true;
+
+ return false;
+}
+
+/**
+ * softleaf_to_pfn() - Obtain PFN encoded within leaf entry.
+ * @entry: Leaf entry, softleaf_has_pfn(@entry) must return true.
+ *
+ * Returns: The PFN associated with the leaf entry.
+ */
+static inline unsigned long softleaf_to_pfn(softleaf_t entry)
+{
+ VM_WARN_ON_ONCE(!softleaf_has_pfn(entry));
+
+ /* Temporary until swp_entry_t eliminated. */
+ return swp_offset(entry) & SWP_PFN_MASK;
+}
+
+/**
+ * softleaf_to_page() - Obtains struct page for PFN encoded within leaf entry.
+ * @entry: Leaf entry, softleaf_has_pfn(@entry) must return true.
+ *
+ * Returns: Pointer to the struct page associated with the leaf entry's PFN.
+ */
+static inline struct page *softleaf_to_page(softleaf_t entry)
+{
+ struct page *page = pfn_to_page(softleaf_to_pfn(entry));
+
+ VM_WARN_ON_ONCE(!softleaf_has_pfn(entry));
+ /*
+ * Any use of migration entries may only occur while the
+ * corresponding page is locked
+ */
+ VM_WARN_ON_ONCE(softleaf_is_migration(entry) && !PageLocked(page));
+
+ return page;
+}
+
+/**
+ * softleaf_to_folio() - Obtains struct folio for PFN encoded within leaf entry.
+ * @entry: Leaf entry, softleaf_has_pfn(@entry) must return true.
+ *
+ * Returns: Pointer to the struct folio associated with the leaf entry's PFN.
+ */
+static inline struct folio *softleaf_to_folio(softleaf_t entry)
+{
+ struct folio *folio = pfn_folio(softleaf_to_pfn(entry));
+
+ VM_WARN_ON_ONCE(!softleaf_has_pfn(entry));
+ /*
+ * Any use of migration entries may only occur while the
+ * corresponding folio is locked.
+ */
+ VM_WARN_ON_ONCE(softleaf_is_migration(entry) &&
+ !folio_test_locked(folio));
+
+ return folio;
+}
+
+/**
+ * softleaf_is_poison_marker() - Is this leaf entry a poison marker?
+ * @entry: Leaf entry.
+ *
+ * The poison marker is set via UFFDIO_POISON. Userfaultfd-specific.
+ *
+ * Returns: true if the leaf entry is a poison marker, otherwise false.
+ */
+static inline bool softleaf_is_poison_marker(softleaf_t entry)
+{
+ if (!softleaf_is_marker(entry))
+ return false;
+
+ return softleaf_to_marker(entry) & PTE_MARKER_POISONED;
+}
+
+/**
+ * softleaf_is_guard_marker() - Is this leaf entry a guard region marker?
+ * @entry: Leaf entry.
+ *
+ * Returns: true if the leaf entry is a guard marker, otherwise false.
+ */
+static inline bool softleaf_is_guard_marker(softleaf_t entry)
+{
+ if (!softleaf_is_marker(entry))
+ return false;
+
+ return softleaf_to_marker(entry) & PTE_MARKER_GUARD;
+}
+
+/**
+ * softleaf_is_uffd_wp_marker() - Is this leaf entry a userfautlfd write protect
+ * marker?
+ * @entry: Leaf entry.
+ *
+ * Userfaultfd-specific.
+ *
+ * Returns: true if the leaf entry is a UFFD WP marker, otherwise false.
+ */
+static inline bool softleaf_is_uffd_wp_marker(softleaf_t entry)
+{
+ if (!softleaf_is_marker(entry))
+ return false;
+
+ return softleaf_to_marker(entry) & PTE_MARKER_UFFD_WP;
+}
+
+#ifdef CONFIG_MIGRATION
+
+/**
+ * softleaf_is_migration_young() - Does this migration entry contain an accessed
+ * bit?
+ * @entry: Leaf entry.
+ *
+ * If the architecture can support storing A/D bits in migration entries, this
+ * determines whether the accessed (or 'young') bit was set on the migrated page
+ * table entry.
+ *
+ * Returns: true if the entry contains an accessed bit, otherwise false.
+ */
+static inline bool softleaf_is_migration_young(softleaf_t entry)
+{
+ VM_WARN_ON_ONCE(!softleaf_is_migration(entry));
+
+ if (migration_entry_supports_ad())
+ return swp_offset(entry) & SWP_MIG_YOUNG;
+ /* Keep the old behavior of aging page after migration */
+ return false;
+}
+
+/**
+ * softleaf_is_migration_dirty() - Does this migration entry contain a dirty bit?
+ * @entry: Leaf entry.
+ *
+ * If the architecture can support storing A/D bits in migration entries, this
+ * determines whether the dirty bit was set on the migrated page table entry.
+ *
+ * Returns: true if the entry contains a dirty bit, otherwise false.
+ */
+static inline bool softleaf_is_migration_dirty(softleaf_t entry)
+{
+ VM_WARN_ON_ONCE(!softleaf_is_migration(entry));
+
+ if (migration_entry_supports_ad())
+ return swp_offset(entry) & SWP_MIG_DIRTY;
+ /* Keep the old behavior of clean page after migration */
+ return false;
+}
+
+#else /* CONFIG_MIGRATION */
+
+static inline bool softleaf_is_migration_young(softleaf_t entry)
+{
+ return false;
+}
+
+static inline bool softleaf_is_migration_dirty(softleaf_t entry)
+{
+ return false;
+}
+#endif /* CONFIG_MIGRATION */
+
+/**
+ * pte_is_marker() - Does the PTE entry encode a marker leaf entry?
+ * @pte: PTE entry.
+ *
+ * Returns: true if this PTE is a marker leaf entry, otherwise false.
+ */
+static inline bool pte_is_marker(pte_t pte)
+{
+ return softleaf_is_marker(softleaf_from_pte(pte));
+}
+
+/**
+ * pte_is_uffd_wp_marker() - Does this PTE entry encode a userfaultfd write
+ * protect marker leaf entry?
+ * @pte: PTE entry.
+ *
+ * Returns: true if this PTE is a UFFD WP marker leaf entry, otherwise false.
+ */
+static inline bool pte_is_uffd_wp_marker(pte_t pte)
+{
+ const softleaf_t entry = softleaf_from_pte(pte);
+
+ return softleaf_is_uffd_wp_marker(entry);
+}
+
+/**
+ * pte_is_uffd_marker() - Does this PTE entry encode a userfault-specific marker
+ * leaf entry?
+ * @entry: Leaf entry.
+ *
+ * It's useful to be able to determine which leaf entries encode UFFD-specific
+ * markers so we can handle these correctly.
+ *
+ * Returns: true if this PTE entry is a UFFD-specific marker, otherwise false.
+ */
+static inline bool pte_is_uffd_marker(pte_t pte)
+{
+ const softleaf_t entry = softleaf_from_pte(pte);
+
+ if (!softleaf_is_marker(entry))
+ return false;
+
+ /* UFFD WP, poisoned swap entries are UFFD-handled. */
+ if (softleaf_is_uffd_wp_marker(entry))
+ return true;
+ if (softleaf_is_poison_marker(entry))
+ return true;
+
+ return false;
+}
+
+#if defined(CONFIG_ZONE_DEVICE) && defined(CONFIG_ARCH_ENABLE_THP_MIGRATION)
+
+/**
+ * pmd_is_device_private_entry() - Check if PMD contains a device private swap
+ * entry.
+ * @pmd: The PMD to check.
+ *
+ * Returns true if the PMD contains a swap entry that represents a device private
+ * page mapping. This is used for zone device private pages that have been
+ * swapped out but still need special handling during various memory management
+ * operations.
+ *
+ * Return: true if PMD contains device private entry, false otherwise
+ */
+static inline bool pmd_is_device_private_entry(pmd_t pmd)
+{
+ return softleaf_is_device_private(softleaf_from_pmd(pmd));
+}
+
+#else /* CONFIG_ZONE_DEVICE && CONFIG_ARCH_ENABLE_THP_MIGRATION */
+
+static inline bool pmd_is_device_private_entry(pmd_t pmd)
+{
+ return false;
+}
+
+#endif /* CONFIG_ZONE_DEVICE && CONFIG_ARCH_ENABLE_THP_MIGRATION */
+
+/**
+ * pmd_is_migration_entry() - Does this PMD entry encode a migration entry?
+ * @pmd: PMD entry.
+ *
+ * Returns: true if the PMD encodes a migration entry, otherwise false.
+ */
+static inline bool pmd_is_migration_entry(pmd_t pmd)
+{
+ return softleaf_is_migration(softleaf_from_pmd(pmd));
+}
+
+/**
+ * pmd_is_valid_softleaf() - Is this PMD entry a valid leaf entry?
+ * @pmd: PMD entry.
+ *
+ * PMD leaf entries are valid only if they are device private or migration
+ * entries. This function asserts that a PMD leaf entry is valid in this
+ * respect.
+ *
+ * Returns: true if the PMD entry is a valid leaf entry, otherwise false.
+ */
+static inline bool pmd_is_valid_softleaf(pmd_t pmd)
+{
+ const softleaf_t entry = softleaf_from_pmd(pmd);
+
+ /* Only device private, migration entries valid for PMD. */
+ return softleaf_is_device_private(entry) ||
+ softleaf_is_migration(entry);
+}
+
+#endif /* CONFIG_MMU */
+#endif /* _LINUX_LEAFOPS_H */
diff --git a/include/linux/led-class-flash.h b/include/linux/led-class-flash.h
index e97966d1fb8d..775a96217518 100644
--- a/include/linux/led-class-flash.h
+++ b/include/linux/led-class-flash.h
@@ -1,13 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* LED Flash class interface
*
* Copyright (C) 2015 Samsung Electronics Co., Ltd.
* Author: Jacek Anaszewski <j.anaszewski@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
*/
#ifndef __LINUX_FLASH_LEDS_H_INCLUDED
#define __LINUX_FLASH_LEDS_H_INCLUDED
@@ -49,6 +45,8 @@ struct led_flash_ops {
int (*timeout_set)(struct led_classdev_flash *fled_cdev, u32 timeout);
/* get the flash LED fault */
int (*fault_get)(struct led_classdev_flash *fled_cdev, u32 *fault);
+ /* set flash duration */
+ int (*duration_set)(struct led_classdev_flash *fled_cdev, u32 duration);
};
/*
@@ -79,6 +77,9 @@ struct led_classdev_flash {
/* flash timeout value in microseconds along with its constraints */
struct led_flash_setting timeout;
+ /* flash timeout value in microseconds along with its constraints */
+ struct led_flash_setting duration;
+
/* LED Flash class sysfs groups */
const struct attribute_group *sysfs_groups[LED_FLASH_SYSFS_GROUPS_SIZE];
};
@@ -90,15 +91,17 @@ static inline struct led_classdev_flash *lcdev_to_flcdev(
}
/**
- * led_classdev_flash_register - register a new object of led_classdev class
- * with support for flash LEDs
- * @parent: the flash LED to register
+ * led_classdev_flash_register_ext - register a new object of LED class with
+ * init data and with support for flash LEDs
+ * @parent: LED flash controller device this flash LED is driven by
* @fled_cdev: the led_classdev_flash structure for this device
+ * @init_data: the LED class flash device initialization data
*
* Returns: 0 on success or negative error value on failure
*/
-extern int led_classdev_flash_register(struct device *parent,
- struct led_classdev_flash *fled_cdev);
+int led_classdev_flash_register_ext(struct device *parent,
+ struct led_classdev_flash *fled_cdev,
+ struct led_init_data *init_data);
/**
* led_classdev_flash_unregister - unregisters an object of led_classdev class
@@ -107,7 +110,27 @@ extern int led_classdev_flash_register(struct device *parent,
*
* Unregister a previously registered via led_classdev_flash_register object
*/
-extern void led_classdev_flash_unregister(struct led_classdev_flash *fled_cdev);
+void led_classdev_flash_unregister(struct led_classdev_flash *fled_cdev);
+
+int devm_led_classdev_flash_register_ext(struct device *parent,
+ struct led_classdev_flash *fled_cdev,
+ struct led_init_data *init_data);
+
+
+void devm_led_classdev_flash_unregister(struct device *parent,
+ struct led_classdev_flash *fled_cdev);
+
+static inline int led_classdev_flash_register(struct device *parent,
+ struct led_classdev_flash *fled_cdev)
+{
+ return led_classdev_flash_register_ext(parent, fled_cdev, NULL);
+}
+
+static inline int devm_led_classdev_flash_register(struct device *parent,
+ struct led_classdev_flash *fled_cdev)
+{
+ return devm_led_classdev_flash_register_ext(parent, fled_cdev, NULL);
+}
/**
* led_set_flash_strobe - setup flash strobe
@@ -121,6 +144,8 @@ extern void led_classdev_flash_unregister(struct led_classdev_flash *fled_cdev);
static inline int led_set_flash_strobe(struct led_classdev_flash *fled_cdev,
bool state)
{
+ if (!fled_cdev)
+ return -EINVAL;
return fled_cdev->ops->strobe_set(fled_cdev, state);
}
@@ -136,6 +161,8 @@ static inline int led_set_flash_strobe(struct led_classdev_flash *fled_cdev,
static inline int led_get_flash_strobe(struct led_classdev_flash *fled_cdev,
bool *state)
{
+ if (!fled_cdev)
+ return -EINVAL;
if (fled_cdev->ops->strobe_get)
return fled_cdev->ops->strobe_get(fled_cdev, state);
@@ -151,8 +178,8 @@ static inline int led_get_flash_strobe(struct led_classdev_flash *fled_cdev,
*
* Returns: 0 on success or negative error value on failure
*/
-extern int led_set_flash_brightness(struct led_classdev_flash *fled_cdev,
- u32 brightness);
+int led_set_flash_brightness(struct led_classdev_flash *fled_cdev,
+ u32 brightness);
/**
* led_update_flash_brightness - update flash LED brightness
@@ -163,19 +190,18 @@ extern int led_set_flash_brightness(struct led_classdev_flash *fled_cdev,
*
* Returns: 0 on success or negative error value on failure
*/
-extern int led_update_flash_brightness(struct led_classdev_flash *fled_cdev);
+int led_update_flash_brightness(struct led_classdev_flash *fled_cdev);
/**
* led_set_flash_timeout - set flash LED timeout
* @fled_cdev: the flash LED to set
* @timeout: the flash timeout to set it to
*
- * Set the flash strobe duration.
+ * Set the flash strobe timeout.
*
* Returns: 0 on success or negative error value on failure
*/
-extern int led_set_flash_timeout(struct led_classdev_flash *fled_cdev,
- u32 timeout);
+int led_set_flash_timeout(struct led_classdev_flash *fled_cdev, u32 timeout);
/**
* led_get_flash_fault - get the flash LED fault
@@ -186,7 +212,17 @@ extern int led_set_flash_timeout(struct led_classdev_flash *fled_cdev,
*
* Returns: 0 on success or negative error value on failure
*/
-extern int led_get_flash_fault(struct led_classdev_flash *fled_cdev,
- u32 *fault);
+int led_get_flash_fault(struct led_classdev_flash *fled_cdev, u32 *fault);
+
+/**
+ * led_set_flash_duration - set flash LED duration
+ * @fled_cdev: the flash LED to set
+ * @timeout: the flash duration to set it to
+ *
+ * Set the flash strobe duration.
+ *
+ * Returns: 0 on success or negative error value on failure
+ */
+int led_set_flash_duration(struct led_classdev_flash *fled_cdev, u32 duration);
#endif /* __LINUX_FLASH_LEDS_H_INCLUDED */
diff --git a/include/linux/led-class-multicolor.h b/include/linux/led-class-multicolor.h
new file mode 100644
index 000000000000..db9f34c6736e
--- /dev/null
+++ b/include/linux/led-class-multicolor.h
@@ -0,0 +1,80 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* LED Multicolor class interface
+ * Copyright (C) 2019-20 Texas Instruments Incorporated - http://www.ti.com/
+ */
+
+#ifndef _LINUX_MULTICOLOR_LEDS_H_INCLUDED
+#define _LINUX_MULTICOLOR_LEDS_H_INCLUDED
+
+#include <linux/leds.h>
+#include <dt-bindings/leds/common.h>
+
+struct mc_subled {
+ unsigned int color_index;
+ unsigned int brightness;
+ unsigned int intensity;
+ unsigned int channel;
+};
+
+struct led_classdev_mc {
+ /* led class device */
+ struct led_classdev led_cdev;
+ unsigned int num_colors;
+
+ struct mc_subled *subled_info;
+};
+
+static inline struct led_classdev_mc *lcdev_to_mccdev(
+ struct led_classdev *led_cdev)
+{
+ return container_of(led_cdev, struct led_classdev_mc, led_cdev);
+}
+
+/**
+ * led_classdev_multicolor_register_ext - register a new object of led_classdev
+ * class with support for multicolor LEDs
+ * @parent: the multicolor LED to register
+ * @mcled_cdev: the led_classdev_mc structure for this device
+ * @init_data: the LED class multicolor device initialization data
+ *
+ * Returns: 0 on success or negative error value on failure
+ */
+int led_classdev_multicolor_register_ext(struct device *parent,
+ struct led_classdev_mc *mcled_cdev,
+ struct led_init_data *init_data);
+
+/**
+ * led_classdev_multicolor_unregister - unregisters an object of led_classdev
+ * class with support for multicolor LEDs
+ * @mcled_cdev: the multicolor LED to unregister
+ *
+ * Unregister a previously registered via led_classdev_multicolor_register
+ * object
+ */
+void led_classdev_multicolor_unregister(struct led_classdev_mc *mcled_cdev);
+
+/* Calculate brightness for the monochrome LED cluster */
+int led_mc_calc_color_components(struct led_classdev_mc *mcled_cdev,
+ enum led_brightness brightness);
+
+int devm_led_classdev_multicolor_register_ext(struct device *parent,
+ struct led_classdev_mc *mcled_cdev,
+ struct led_init_data *init_data);
+
+void devm_led_classdev_multicolor_unregister(struct device *parent,
+ struct led_classdev_mc *mcled_cdev);
+
+static inline int led_classdev_multicolor_register(struct device *parent,
+ struct led_classdev_mc *mcled_cdev)
+{
+ return led_classdev_multicolor_register_ext(parent, mcled_cdev, NULL);
+}
+
+static inline int devm_led_classdev_multicolor_register(struct device *parent,
+ struct led_classdev_mc *mcled_cdev)
+{
+ return devm_led_classdev_multicolor_register_ext(parent, mcled_cdev,
+ NULL);
+}
+
+#endif /* _LINUX_MULTICOLOR_LEDS_H_INCLUDED */
diff --git a/include/linux/led-lm3530.h b/include/linux/led-lm3530.h
index 4b133479d6ea..811f7ce4e218 100644
--- a/include/linux/led-lm3530.h
+++ b/include/linux/led-lm3530.h
@@ -1,9 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2011 ST-Ericsson SA.
* Copyright (C) 2009 Motorola, Inc.
*
- * License Terms: GNU General Public License v2
- *
* Simple driver for National Semiconductor LM35330 Backlight driver chip
*
* Author: Shreshtha Kumar SAHU <shreshthakumar.sahu@stericsson.com>
diff --git a/include/linux/leds-bd2802.h b/include/linux/leds-bd2802.h
index 42f854a1a199..ec577f5f8707 100644
--- a/include/linux/leds-bd2802.h
+++ b/include/linux/leds-bd2802.h
@@ -1,21 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* leds-bd2802.h - RGB LED Driver
*
* Copyright (C) 2009 Samsung Electronics
* Kim Kyuwon <q1.kim@samsung.com>
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
* Datasheet: http://www.rohm.com/products/databook/driver/pdf/bd2802gu-e.pdf
- *
*/
#ifndef _LEDS_BD2802_H_
#define _LEDS_BD2802_H_
struct bd2802_led_platform_data{
- int reset_gpio;
u8 rgb_time;
};
diff --git a/include/linux/leds-expresswire.h b/include/linux/leds-expresswire.h
new file mode 100644
index 000000000000..a422921f4159
--- /dev/null
+++ b/include/linux/leds-expresswire.h
@@ -0,0 +1,38 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Shared library for Kinetic's ExpressWire protocol.
+ * This protocol works by pulsing the ExpressWire IC's control GPIO.
+ * ktd2692 and ktd2801 are known to use this protocol.
+ */
+
+#ifndef _LEDS_EXPRESSWIRE_H
+#define _LEDS_EXPRESSWIRE_H
+
+#include <linux/types.h>
+
+struct gpio_desc;
+
+struct expresswire_timing {
+ unsigned long poweroff_us;
+ unsigned long detect_delay_us;
+ unsigned long detect_us;
+ unsigned long data_start_us;
+ unsigned long end_of_data_low_us;
+ unsigned long end_of_data_high_us;
+ unsigned long short_bitset_us;
+ unsigned long long_bitset_us;
+};
+
+struct expresswire_common_props {
+ struct gpio_desc *ctrl_gpio;
+ struct expresswire_timing timing;
+};
+
+void expresswire_power_off(struct expresswire_common_props *props);
+void expresswire_enable(struct expresswire_common_props *props);
+void expresswire_start(struct expresswire_common_props *props);
+void expresswire_end(struct expresswire_common_props *props);
+void expresswire_set_bit(struct expresswire_common_props *props, bool bit);
+void expresswire_write_u8(struct expresswire_common_props *props, u8 val);
+
+#endif /* _LEDS_EXPRESSWIRE_H */
diff --git a/include/linux/leds-lp3944.h b/include/linux/leds-lp3944.h
index 2618aa9063bc..f681fefff281 100644
--- a/include/linux/leds-lp3944.h
+++ b/include/linux/leds-lp3944.h
@@ -1,12 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* leds-lp3944.h - platform data structure for lp3944 led controller
*
* Copyright (C) 2009 Antonio Ospite <ospite@studenti.unina.it>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
*/
#ifndef __LINUX_LEDS_LP3944_H
diff --git a/include/linux/leds-lp3952.h b/include/linux/leds-lp3952.h
index 49b37ed8d456..937ae5f2eac9 100644
--- a/include/linux/leds-lp3952.h
+++ b/include/linux/leds-lp3952.h
@@ -1,13 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* LED driver for TI lp3952 controller
*
* Copyright (C) 2016, DAQRI, LLC.
* Author: Tony Makkiel <tony.makkiel@daqri.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
*/
#ifndef LEDS_LP3952_H_
diff --git a/include/linux/leds-pca9532.h b/include/linux/leds-pca9532.h
index 5e240b2b4d58..f4796d333974 100644
--- a/include/linux/leds-pca9532.h
+++ b/include/linux/leds-pca9532.h
@@ -1,14 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* pca9532.h - platform data structure for pca9532 led controller
*
* Copyright (C) 2008 Riku Voipio <riku.voipio@movial.fi>
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
* Datasheet: http://www.nxp.com/acrobat/datasheets/PCA9532_3.pdf
- *
*/
#ifndef __LINUX_PCA9532_H
diff --git a/include/linux/leds-regulator.h b/include/linux/leds-regulator.h
index e2337a8c90b0..899f816073a1 100644
--- a/include/linux/leds-regulator.h
+++ b/include/linux/leds-regulator.h
@@ -1,12 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* leds-regulator.h - platform data structure for regulator driven LEDs.
*
* Copyright (C) 2009 Antonio Ospite <ospite@studenti.unina.it>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
*/
#ifndef __LINUX_LEDS_REGULATOR_H
diff --git a/include/linux/leds-tca6507.h b/include/linux/leds-tca6507.h
deleted file mode 100644
index dcabf4fa2aef..000000000000
--- a/include/linux/leds-tca6507.h
+++ /dev/null
@@ -1,34 +0,0 @@
-/*
- * TCA6507 LED chip driver.
- *
- * Copyright (C) 2011 Neil Brown <neil@brown.name>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
- * 02110-1301 USA
- */
-
-#ifndef __LINUX_TCA6507_H
-#define __LINUX_TCA6507_H
-#include <linux/leds.h>
-
-struct tca6507_platform_data {
- struct led_platform_data leds;
-#ifdef CONFIG_GPIOLIB
- int gpio_base;
- void (*setup)(unsigned gpio_base, unsigned ngpio);
-#endif
-};
-
-#define TCA6507_MAKE_GPIO 1
-#endif /* __LINUX_TCA6507_H*/
diff --git a/include/linux/leds-ti-lmu-common.h b/include/linux/leds-ti-lmu-common.h
new file mode 100644
index 000000000000..420b61e5a213
--- /dev/null
+++ b/include/linux/leds-ti-lmu-common.h
@@ -0,0 +1,47 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+// TI LMU Common Core
+// Copyright (C) 2018 Texas Instruments Incorporated - https://www.ti.com/
+
+#ifndef _TI_LMU_COMMON_H_
+#define _TI_LMU_COMMON_H_
+
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/init.h>
+#include <linux/leds.h>
+#include <linux/module.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+#include <uapi/linux/uleds.h>
+
+#define LMU_11BIT_LSB_MASK (BIT(0) | BIT(1) | BIT(2))
+#define LMU_11BIT_MSB_SHIFT 3
+
+#define MAX_BRIGHTNESS_8BIT 255
+#define MAX_BRIGHTNESS_11BIT 2047
+
+struct ti_lmu_bank {
+ struct regmap *regmap;
+
+ int max_brightness;
+
+ u8 lsb_brightness_reg;
+ u8 msb_brightness_reg;
+
+ u8 runtime_ramp_reg;
+ u32 ramp_up_usec;
+ u32 ramp_down_usec;
+};
+
+int ti_lmu_common_set_brightness(struct ti_lmu_bank *lmu_bank, int brightness);
+
+int ti_lmu_common_set_ramp(struct ti_lmu_bank *lmu_bank);
+
+int ti_lmu_common_get_ramp_params(struct device *dev,
+ struct fwnode_handle *child,
+ struct ti_lmu_bank *lmu_data);
+
+int ti_lmu_common_get_brt_res(struct device *dev, struct fwnode_handle *child,
+ struct ti_lmu_bank *lmu_data);
+
+#endif /* _TI_LMU_COMMON_H_ */
diff --git a/include/linux/leds.h b/include/linux/leds.h
index 64c56d454f7d..b16b803cc1ac 100644
--- a/include/linux/leds.h
+++ b/include/linux/leds.h
@@ -1,31 +1,35 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Driver model for leds and led triggers
*
* Copyright (C) 2005 John Lenz <lenz@cs.wisc.edu>
* Copyright (C) 2005 Richard Purdie <rpurdie@openedhand.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
*/
#ifndef __LINUX_LEDS_H_INCLUDED
#define __LINUX_LEDS_H_INCLUDED
+#include <dt-bindings/leds/common.h>
#include <linux/device.h>
-#include <linux/kernfs.h>
-#include <linux/list.h>
#include <linux/mutex.h>
#include <linux/rwsem.h>
#include <linux/spinlock.h>
#include <linux/timer.h>
+#include <linux/types.h>
#include <linux/workqueue.h>
-struct device;
+struct attribute_group;
+struct device_node;
+struct fwnode_handle;
+struct gpio_desc;
+struct kernfs_node;
+struct led_pattern;
+struct platform_device;
+
/*
* LED Core
*/
+/* This is obsolete/useless. We now support variable maximum brightness. */
enum led_brightness {
LED_OFF = 0,
LED_ON = 1,
@@ -33,22 +37,78 @@ enum led_brightness {
LED_FULL = 255,
};
+enum led_default_state {
+ LEDS_DEFSTATE_OFF = 0,
+ LEDS_DEFSTATE_ON = 1,
+ LEDS_DEFSTATE_KEEP = 2,
+};
+
+/**
+ * struct led_lookup_data - represents a single LED lookup entry
+ *
+ * @list: internal list of all LED lookup entries
+ * @provider: name of led_classdev providing the LED
+ * @dev_id: name of the device associated with this LED
+ * @con_id: name of the LED from the device's point of view
+ */
+struct led_lookup_data {
+ struct list_head list;
+ const char *provider;
+ const char *dev_id;
+ const char *con_id;
+};
+
+struct led_init_data {
+ /* device fwnode handle */
+ struct fwnode_handle *fwnode;
+ /*
+ * default <color:function> tuple, for backward compatibility
+ * with in-driver hard-coded LED names used as a fallback when
+ * DT "label" property is absent; it should be set to NULL
+ * in new LED class drivers.
+ */
+ const char *default_label;
+ /*
+ * string to be used for devicename section of LED class device
+ * either for label based LED name composition path or for fwnode
+ * based when devname_mandatory is true
+ */
+ const char *devicename;
+ /*
+ * indicates if LED name should always comprise devicename section;
+ * only LEDs exposed by drivers of hot-pluggable devices should
+ * set it to true
+ */
+ bool devname_mandatory;
+};
+
+enum led_default_state led_init_default_state_get(struct fwnode_handle *fwnode);
+
+struct led_hw_trigger_type {
+ int dummy;
+};
+
struct led_classdev {
const char *name;
- enum led_brightness brightness;
- enum led_brightness max_brightness;
+ unsigned int brightness;
+ unsigned int max_brightness;
+ unsigned int color;
int flags;
/* Lower 16 bits reflect status */
-#define LED_SUSPENDED (1 << 0)
-#define LED_UNREGISTERING (1 << 1)
+#define LED_SUSPENDED BIT(0)
+#define LED_UNREGISTERING BIT(1)
/* Upper 16 bits reflect control information */
-#define LED_CORE_SUSPENDRESUME (1 << 16)
-#define LED_SYSFS_DISABLE (1 << 17)
-#define LED_DEV_CAP_FLASH (1 << 18)
-#define LED_HW_PLUGGABLE (1 << 19)
-#define LED_PANIC_INDICATOR (1 << 20)
-#define LED_BRIGHT_HW_CHANGED (1 << 21)
+#define LED_CORE_SUSPENDRESUME BIT(16)
+#define LED_SYSFS_DISABLE BIT(17)
+#define LED_DEV_CAP_FLASH BIT(18)
+#define LED_HW_PLUGGABLE BIT(19)
+#define LED_PANIC_INDICATOR BIT(20)
+#define LED_BRIGHT_HW_CHANGED BIT(21)
+#define LED_RETAIN_AT_SHUTDOWN BIT(22)
+#define LED_INIT_DEFAULT_TRIGGER BIT(23)
+#define LED_REJECT_NAME_CONFLICT BIT(24)
+#define LED_MULTI_COLOR BIT(25)
/* set_brightness_work / blink_timer flags, atomic, private. */
unsigned long work_flags;
@@ -59,6 +119,10 @@ struct led_classdev {
#define LED_BLINK_INVERT 3
#define LED_BLINK_BRIGHTNESS_CHANGE 4
#define LED_BLINK_DISABLE 5
+ /* Brightness off also disables hw-blinking so it is a separate action */
+#define LED_SET_BRIGHTNESS_OFF 6
+#define LED_SET_BRIGHTNESS 7
+#define LED_SET_BLINK 8
/* Set LED brightness level
* Must not sleep. Use brightness_set_blocking for drivers
@@ -82,11 +146,19 @@ struct led_classdev {
* match the values specified exactly.
* Deactivate blinking again when the brightness is set to LED_OFF
* via the brightness_set() callback.
+ * For led_blink_set_nosleep() the LED core assumes that blink_set
+ * implementations, of drivers which do not use brightness_set_blocking,
+ * will not sleep. Therefor if brightness_set_blocking is not set
+ * this function must not sleep!
*/
int (*blink_set)(struct led_classdev *led_cdev,
unsigned long *delay_on,
unsigned long *delay_off);
+ int (*pattern_set)(struct led_classdev *led_cdev,
+ struct led_pattern *pattern, u32 len, int repeat);
+ int (*pattern_clear)(struct led_classdev *led_cdev);
+
struct device *dev;
const struct attribute_group **groups;
@@ -99,8 +171,11 @@ struct led_classdev {
int new_blink_brightness;
void (*flash_resume)(struct led_classdev *led_cdev);
+ struct workqueue_struct *wq; /* LED workqueue */
struct work_struct set_brightness_work;
int delayed_set_value;
+ unsigned long delayed_delay_on;
+ unsigned long delayed_delay_off;
#ifdef CONFIG_LEDS_TRIGGERS
/* Protects the trigger data below */
@@ -111,6 +186,52 @@ struct led_classdev {
void *trigger_data;
/* true if activated - deactivate routine uses it to do cleanup */
bool activated;
+
+ /* LEDs that have private triggers have this set */
+ struct led_hw_trigger_type *trigger_type;
+
+ /* Unique trigger name supported by LED set in hw control mode */
+ const char *hw_control_trigger;
+ /*
+ * Check if the LED driver supports the requested mode provided by the
+ * defined supported trigger to setup the LED to hw control mode.
+ *
+ * Return 0 on success. Return -EOPNOTSUPP when the passed flags are not
+ * supported and software fallback needs to be used.
+ * Return a negative error number on any other case for check fail due
+ * to various reason like device not ready or timeouts.
+ */
+ int (*hw_control_is_supported)(struct led_classdev *led_cdev,
+ unsigned long flags);
+ /*
+ * Activate hardware control, LED driver will use the provided flags
+ * from the supported trigger and setup the LED to be driven by hardware
+ * following the requested mode from the trigger flags.
+ * Deactivate hardware blink control by setting brightness to LED_OFF via
+ * the brightness_set() callback.
+ *
+ * Return 0 on success, a negative error number on flags apply fail.
+ */
+ int (*hw_control_set)(struct led_classdev *led_cdev,
+ unsigned long flags);
+ /*
+ * Get from the LED driver the current mode that the LED is set in hw
+ * control mode and put them in flags.
+ * Trigger can use this to get the initial state of a LED already set in
+ * hardware blink control.
+ *
+ * Return 0 on success, a negative error number on failing parsing the
+ * initial mode. Error from this function is NOT FATAL as the device
+ * may be in a not supported initial state by the attached LED trigger.
+ */
+ int (*hw_control_get)(struct led_classdev *led_cdev,
+ unsigned long *flags);
+ /*
+ * Get the device this LED blinks in response to.
+ * e.g. for a PHY LED, it is the network device. If the LED is
+ * not yet associated to a device, return NULL.
+ */
+ struct device *(*hw_control_get_device)(struct led_classdev *led_cdev);
#endif
#ifdef CONFIG_LEDS_BRIGHTNESS_HW_CHANGED
@@ -118,25 +239,66 @@ struct led_classdev {
struct kernfs_node *brightness_hw_changed_kn;
#endif
- /* Ensures consistent access to the LED Flash Class device */
+ /* Ensures consistent access to the LED class device */
struct mutex led_access;
};
-extern int of_led_classdev_register(struct device *parent,
- struct device_node *np,
- struct led_classdev *led_cdev);
-#define led_classdev_register(parent, led_cdev) \
- of_led_classdev_register(parent, NULL, led_cdev)
-extern int devm_of_led_classdev_register(struct device *parent,
- struct device_node *np,
- struct led_classdev *led_cdev);
-#define devm_led_classdev_register(parent, led_cdev) \
- devm_of_led_classdev_register(parent, NULL, led_cdev)
-extern void led_classdev_unregister(struct led_classdev *led_cdev);
-extern void devm_led_classdev_unregister(struct device *parent,
- struct led_classdev *led_cdev);
-extern void led_classdev_suspend(struct led_classdev *led_cdev);
-extern void led_classdev_resume(struct led_classdev *led_cdev);
+/**
+ * led_classdev_register_ext - register a new object of LED class with
+ * init data
+ * @parent: LED controller device this LED is driven by
+ * @led_cdev: the led_classdev structure for this device
+ * @init_data: the LED class device initialization data
+ *
+ * Register a new object of LED class, with name derived from init_data.
+ *
+ * Returns: 0 on success or negative error value on failure
+ */
+int led_classdev_register_ext(struct device *parent,
+ struct led_classdev *led_cdev,
+ struct led_init_data *init_data);
+
+/**
+ * led_classdev_register - register a new object of LED class
+ * @parent: LED controller device this LED is driven by
+ * @led_cdev: the led_classdev structure for this device
+ *
+ * Register a new object of LED class, with name derived from the name property
+ * of passed led_cdev argument.
+ *
+ * Returns: 0 on success or negative error value on failure
+ */
+static inline int led_classdev_register(struct device *parent,
+ struct led_classdev *led_cdev)
+{
+ return led_classdev_register_ext(parent, led_cdev, NULL);
+}
+
+int devm_led_classdev_register_ext(struct device *parent,
+ struct led_classdev *led_cdev,
+ struct led_init_data *init_data);
+static inline int devm_led_classdev_register(struct device *parent,
+ struct led_classdev *led_cdev)
+{
+ return devm_led_classdev_register_ext(parent, led_cdev, NULL);
+}
+void led_classdev_unregister(struct led_classdev *led_cdev);
+void devm_led_classdev_unregister(struct device *parent,
+ struct led_classdev *led_cdev);
+void led_classdev_suspend(struct led_classdev *led_cdev);
+void led_classdev_resume(struct led_classdev *led_cdev);
+
+void led_add_lookup(struct led_lookup_data *led_lookup);
+void led_remove_lookup(struct led_lookup_data *led_lookup);
+
+struct led_classdev *__must_check led_get(struct device *dev, char *con_id);
+struct led_classdev *__must_check devm_led_get(struct device *dev, char *con_id);
+
+extern void led_put(struct led_classdev *led_cdev);
+struct led_classdev *__must_check devm_of_led_get(struct device *dev,
+ int index);
+struct led_classdev *__must_check devm_of_led_get_optional(struct device *dev,
+ int index);
/**
* led_blink_set - set blinking with software fallback
@@ -149,13 +311,27 @@ extern void led_classdev_resume(struct led_classdev *led_cdev);
* software blinking if there is no hardware blinking or if
* the LED refuses the passed values.
*
+ * This function may sleep!
+ *
* Note that if software blinking is active, simply calling
* led_cdev->brightness_set() will not stop the blinking,
- * use led_classdev_brightness_set() instead.
+ * use led_set_brightness() instead.
*/
-extern void led_blink_set(struct led_classdev *led_cdev,
- unsigned long *delay_on,
- unsigned long *delay_off);
+void led_blink_set(struct led_classdev *led_cdev, unsigned long *delay_on,
+ unsigned long *delay_off);
+
+/**
+ * led_blink_set_nosleep - set blinking, guaranteed to not sleep
+ * @led_cdev: the LED to start blinking
+ * @delay_on: the time it should be on (in ms)
+ * @delay_off: the time it should ble off (in ms)
+ *
+ * This function makes the LED blink and is guaranteed to not sleep. Otherwise
+ * this is the same as led_blink_set(), see led_blink_set() for details.
+ */
+void led_blink_set_nosleep(struct led_classdev *led_cdev, unsigned long delay_on,
+ unsigned long delay_off);
+
/**
* led_blink_set_oneshot - do a oneshot software blink
* @led_cdev: the LED to start blinking
@@ -169,11 +345,12 @@ extern void led_blink_set(struct led_classdev *led_cdev,
*
* If invert is set, led blinks for delay_off first, then for
* delay_on and leave the led on after the on-off cycle.
+ *
+ * This function is guaranteed not to sleep.
*/
-extern void led_blink_set_oneshot(struct led_classdev *led_cdev,
- unsigned long *delay_on,
- unsigned long *delay_off,
- int invert);
+void led_blink_set_oneshot(struct led_classdev *led_cdev,
+ unsigned long *delay_on, unsigned long *delay_off,
+ int invert);
/**
* led_set_brightness - set LED brightness
* @led_cdev: the LED to set
@@ -183,13 +360,12 @@ extern void led_blink_set_oneshot(struct led_classdev *led_cdev,
* software blink timer that implements blinking when the
* hardware doesn't. This function is guaranteed not to sleep.
*/
-extern void led_set_brightness(struct led_classdev *led_cdev,
- enum led_brightness brightness);
+void led_set_brightness(struct led_classdev *led_cdev, unsigned int brightness);
/**
* led_set_brightness_sync - set LED brightness synchronously
* @led_cdev: the LED to set
- * @brightness: the brightness to set it to
+ * @value: the brightness to set it to
*
* Set an LED's brightness immediately. This function will block
* the caller for the time required for accessing device registers,
@@ -197,8 +373,26 @@ extern void led_set_brightness(struct led_classdev *led_cdev,
*
* Returns: 0 on success or negative error value on failure
*/
-extern int led_set_brightness_sync(struct led_classdev *led_cdev,
- enum led_brightness value);
+int led_set_brightness_sync(struct led_classdev *led_cdev, unsigned int value);
+
+/**
+ * led_mc_set_brightness - set mc LED color intensity values and brightness
+ * @led_cdev: the LED to set
+ * @intensity_value: array of per color intensity values to set
+ * @num_colors: amount of entries in intensity_value array
+ * @brightness: the brightness to set the LED to
+ *
+ * Set a multi-color LED's per color intensity values and brightness.
+ * If necessary, this cancels the software blink timer. This function is
+ * guaranteed not to sleep.
+ *
+ * Calling this function on a non multi-color led_classdev or with the wrong
+ * num_colors value is an error. In this case an error will be logged once
+ * and the call will do nothing.
+ */
+void led_mc_set_brightness(struct led_classdev *led_cdev,
+ unsigned int *intensity_value, unsigned int num_colors,
+ unsigned int brightness);
/**
* led_update_brightness - update LED brightness
@@ -209,7 +403,19 @@ extern int led_set_brightness_sync(struct led_classdev *led_cdev,
*
* Returns: 0 on success or negative error value on failure
*/
-extern int led_update_brightness(struct led_classdev *led_cdev);
+int led_update_brightness(struct led_classdev *led_cdev);
+
+/**
+ * led_get_default_pattern - return default pattern
+ *
+ * @led_cdev: the LED to get default pattern for
+ * @size: pointer for storing the number of elements in returned array,
+ * modified only if return != NULL
+ *
+ * Return: Allocated array of integers with default pattern from device tree
+ * or NULL. Caller is responsible for kfree().
+ */
+u32 *led_get_default_pattern(struct led_classdev *led_cdev, unsigned int *size);
/**
* led_sysfs_disable - disable LED sysfs interface
@@ -217,7 +423,7 @@ extern int led_update_brightness(struct led_classdev *led_cdev);
*
* Disable the led_cdev's sysfs interface.
*/
-extern void led_sysfs_disable(struct led_classdev *led_cdev);
+void led_sysfs_disable(struct led_classdev *led_cdev);
/**
* led_sysfs_enable - enable LED sysfs interface
@@ -225,7 +431,32 @@ extern void led_sysfs_disable(struct led_classdev *led_cdev);
*
* Enable the led_cdev's sysfs interface.
*/
-extern void led_sysfs_enable(struct led_classdev *led_cdev);
+void led_sysfs_enable(struct led_classdev *led_cdev);
+
+/**
+ * led_compose_name - compose LED class device name
+ * @dev: LED controller device object
+ * @init_data: the LED class device initialization data
+ * @led_classdev_name: composed LED class device name
+ *
+ * Create LED class device name basing on the provided init_data argument.
+ * The name can have <devicename:color:function> or <color:function>.
+ * form, depending on the init_data configuration.
+ *
+ * Returns: 0 on success or negative error value on failure
+ */
+int led_compose_name(struct device *dev, struct led_init_data *init_data,
+ char *led_classdev_name);
+
+/**
+ * led_get_color_name - get string representation of color ID
+ * @color_id: The LED_COLOR_ID_* constant
+ *
+ * Get the string name of a LED_COLOR_ID_* constant.
+ *
+ * Returns: A string constant or NULL on an invalid ID.
+ */
+const char *led_get_color_name(u8 color_id);
/**
* led_sysfs_is_disabled - check if LED sysfs interface is disabled
@@ -252,67 +483,77 @@ static inline bool led_sysfs_is_disabled(struct led_classdev *led_cdev)
struct led_trigger {
/* Trigger Properties */
const char *name;
- void (*activate)(struct led_classdev *led_cdev);
+ int (*activate)(struct led_classdev *led_cdev);
void (*deactivate)(struct led_classdev *led_cdev);
+ /* Brightness set by led_trigger_event */
+ enum led_brightness brightness;
+
+ /* LED-private triggers have this set */
+ struct led_hw_trigger_type *trigger_type;
+
/* LEDs under control by this trigger (for simple triggers) */
- rwlock_t leddev_list_lock;
+ spinlock_t leddev_list_lock;
struct list_head led_cdevs;
/* Link to next registered trigger */
struct list_head next_trig;
+
+ const struct attribute_group **groups;
};
-ssize_t led_trigger_store(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count);
-ssize_t led_trigger_show(struct device *dev, struct device_attribute *attr,
- char *buf);
+/*
+ * Currently the attributes in struct led_trigger::groups are added directly to
+ * the LED device. As this might change in the future, the following
+ * macros abstract getting the LED device and its trigger_data from the dev
+ * parameter passed to the attribute accessor functions.
+ */
+#define led_trigger_get_led(dev) ((struct led_classdev *)dev_get_drvdata((dev)))
+#define led_trigger_get_drvdata(dev) (led_get_trigger_data(led_trigger_get_led(dev)))
/* Registration functions for complex triggers */
-extern int led_trigger_register(struct led_trigger *trigger);
-extern void led_trigger_unregister(struct led_trigger *trigger);
-extern int devm_led_trigger_register(struct device *dev,
+int led_trigger_register(struct led_trigger *trigger);
+void led_trigger_unregister(struct led_trigger *trigger);
+int devm_led_trigger_register(struct device *dev,
struct led_trigger *trigger);
-extern void led_trigger_register_simple(const char *name,
+void led_trigger_register_simple(const char *name,
struct led_trigger **trigger);
-extern void led_trigger_unregister_simple(struct led_trigger *trigger);
-extern void led_trigger_event(struct led_trigger *trigger,
- enum led_brightness event);
-extern void led_trigger_blink(struct led_trigger *trigger,
- unsigned long *delay_on,
- unsigned long *delay_off);
-extern void led_trigger_blink_oneshot(struct led_trigger *trigger,
- unsigned long *delay_on,
- unsigned long *delay_off,
- int invert);
-extern void led_trigger_set_default(struct led_classdev *led_cdev);
-extern void led_trigger_set(struct led_classdev *led_cdev,
- struct led_trigger *trigger);
-extern void led_trigger_remove(struct led_classdev *led_cdev);
+void led_trigger_unregister_simple(struct led_trigger *trigger);
+void led_trigger_event(struct led_trigger *trigger, enum led_brightness event);
+void led_mc_trigger_event(struct led_trigger *trig,
+ unsigned int *intensity_value, unsigned int num_colors,
+ enum led_brightness brightness);
+void led_trigger_blink(struct led_trigger *trigger, unsigned long delay_on,
+ unsigned long delay_off);
+void led_trigger_blink_oneshot(struct led_trigger *trigger,
+ unsigned long delay_on,
+ unsigned long delay_off,
+ int invert);
+void led_trigger_set_default(struct led_classdev *led_cdev);
+int led_trigger_set(struct led_classdev *led_cdev, struct led_trigger *trigger);
+void led_trigger_remove(struct led_classdev *led_cdev);
+
+static inline void led_set_trigger_data(struct led_classdev *led_cdev,
+ void *trigger_data)
+{
+ led_cdev->trigger_data = trigger_data;
+}
static inline void *led_get_trigger_data(struct led_classdev *led_cdev)
{
return led_cdev->trigger_data;
}
-/**
- * led_trigger_rename_static - rename a trigger
- * @name: the new trigger name
- * @trig: the LED trigger to rename
- *
- * Change a LED trigger name by copying the string passed in
- * name into current trigger name, which MUST be large
- * enough for the new string.
- *
- * Note that name must NOT point to the same string used
- * during LED registration, as that could lead to races.
- *
- * This is meant to be used on triggers with statically
- * allocated name.
- */
-extern void led_trigger_rename_static(const char *name,
- struct led_trigger *trig);
+static inline enum led_brightness
+led_trigger_get_brightness(const struct led_trigger *trigger)
+{
+ return trigger ? trigger->brightness : LED_OFF;
+}
+
+#define module_led_trigger(__led_trigger) \
+ module_driver(__led_trigger, led_trigger_register, \
+ led_trigger_unregister)
#else
@@ -325,45 +566,85 @@ static inline void led_trigger_register_simple(const char *name,
static inline void led_trigger_unregister_simple(struct led_trigger *trigger) {}
static inline void led_trigger_event(struct led_trigger *trigger,
enum led_brightness event) {}
+static inline void led_mc_trigger_event(struct led_trigger *trig,
+ unsigned int *intensity_value, unsigned int num_colors,
+ enum led_brightness brightness) {}
static inline void led_trigger_blink(struct led_trigger *trigger,
- unsigned long *delay_on,
- unsigned long *delay_off) {}
+ unsigned long delay_on,
+ unsigned long delay_off) {}
static inline void led_trigger_blink_oneshot(struct led_trigger *trigger,
- unsigned long *delay_on,
- unsigned long *delay_off,
+ unsigned long delay_on,
+ unsigned long delay_off,
int invert) {}
static inline void led_trigger_set_default(struct led_classdev *led_cdev) {}
-static inline void led_trigger_set(struct led_classdev *led_cdev,
- struct led_trigger *trigger) {}
+static inline int led_trigger_set(struct led_classdev *led_cdev,
+ struct led_trigger *trigger)
+{
+ return 0;
+}
+
static inline void led_trigger_remove(struct led_classdev *led_cdev) {}
+static inline void led_set_trigger_data(struct led_classdev *led_cdev) {}
static inline void *led_get_trigger_data(struct led_classdev *led_cdev)
{
return NULL;
}
+static inline enum led_brightness
+led_trigger_get_brightness(const struct led_trigger *trigger)
+{
+ return LED_OFF;
+}
+
#endif /* CONFIG_LEDS_TRIGGERS */
+/* Trigger specific enum */
+enum led_trigger_netdev_modes {
+ TRIGGER_NETDEV_LINK = 0,
+ TRIGGER_NETDEV_LINK_10,
+ TRIGGER_NETDEV_LINK_100,
+ TRIGGER_NETDEV_LINK_1000,
+ TRIGGER_NETDEV_LINK_2500,
+ TRIGGER_NETDEV_LINK_5000,
+ TRIGGER_NETDEV_LINK_10000,
+ TRIGGER_NETDEV_HALF_DUPLEX,
+ TRIGGER_NETDEV_FULL_DUPLEX,
+ TRIGGER_NETDEV_TX,
+ TRIGGER_NETDEV_RX,
+ TRIGGER_NETDEV_TX_ERR,
+ TRIGGER_NETDEV_RX_ERR,
+
+ /* Keep last */
+ __TRIGGER_NETDEV_MAX,
+};
+
/* Trigger specific functions */
#ifdef CONFIG_LEDS_TRIGGER_DISK
-extern void ledtrig_disk_activity(void);
+void ledtrig_disk_activity(bool write);
#else
-static inline void ledtrig_disk_activity(void) {}
+static inline void ledtrig_disk_activity(bool write) {}
#endif
#ifdef CONFIG_LEDS_TRIGGER_MTD
-extern void ledtrig_mtd_activity(void);
+void ledtrig_mtd_activity(void);
#else
static inline void ledtrig_mtd_activity(void) {}
#endif
#if defined(CONFIG_LEDS_TRIGGER_CAMERA) || defined(CONFIG_LEDS_TRIGGER_CAMERA_MODULE)
-extern void ledtrig_flash_ctrl(bool on);
-extern void ledtrig_torch_ctrl(bool on);
+void ledtrig_flash_ctrl(bool on);
+void ledtrig_torch_ctrl(bool on);
#else
static inline void ledtrig_flash_ctrl(bool on) {}
static inline void ledtrig_torch_ctrl(bool on) {}
#endif
+#if IS_REACHABLE(CONFIG_LEDS_TRIGGER_BACKLIGHT)
+void ledtrig_backlight_blank(bool blank);
+#else
+static inline void ledtrig_backlight_blank(bool blank) {}
+#endif
+
/*
* Generic LED platform data for describing LED names and default triggers.
*/
@@ -378,7 +659,15 @@ struct led_platform_data {
struct led_info *leds;
};
-struct gpio_desc;
+struct led_properties {
+ u32 color;
+ bool color_present;
+ const char *function;
+ u32 func_enum;
+ bool func_enum_present;
+ const char *label;
+};
+
typedef int (*gpio_blink_set_t)(struct gpio_desc *desc, int state,
unsigned long *delay_on,
unsigned long *delay_off);
@@ -392,12 +681,13 @@ struct gpio_led {
unsigned retain_state_suspended : 1;
unsigned panic_indicator : 1;
unsigned default_state : 2;
+ unsigned retain_state_shutdown : 1;
/* default_state should be one of LEDS_GPIO_DEFSTATE_(ON|OFF|KEEP) */
struct gpio_desc *gpiod;
};
-#define LEDS_GPIO_DEFSTATE_OFF 0
-#define LEDS_GPIO_DEFSTATE_ON 1
-#define LEDS_GPIO_DEFSTATE_KEEP 2
+#define LEDS_GPIO_DEFSTATE_OFF LEDS_DEFSTATE_OFF
+#define LEDS_GPIO_DEFSTATE_ON LEDS_DEFSTATE_ON
+#define LEDS_GPIO_DEFSTATE_KEEP LEDS_DEFSTATE_KEEP
struct gpio_led_platform_data {
int num_leds;
@@ -409,7 +699,7 @@ struct gpio_led_platform_data {
gpio_blink_set_t gpio_blink_set;
};
-#ifdef CONFIG_NEW_LEDS
+#ifdef CONFIG_LEDS_GPIO_REGISTER
struct platform_device *gpio_led_register_device(
int id, const struct gpio_led_platform_data *pdata);
#else
@@ -428,7 +718,7 @@ enum cpu_led_event {
CPU_LED_HALTED, /* Machine shutdown */
};
#ifdef CONFIG_LEDS_TRIGGER_CPU
-extern void ledtrig_cpu(enum cpu_led_event evt);
+void ledtrig_cpu(enum cpu_led_event evt);
#else
static inline void ledtrig_cpu(enum cpu_led_event evt)
{
@@ -437,11 +727,27 @@ static inline void ledtrig_cpu(enum cpu_led_event evt)
#endif
#ifdef CONFIG_LEDS_BRIGHTNESS_HW_CHANGED
-extern void led_classdev_notify_brightness_hw_changed(
- struct led_classdev *led_cdev, enum led_brightness brightness);
+void led_classdev_notify_brightness_hw_changed(
+ struct led_classdev *led_cdev, unsigned int brightness);
#else
static inline void led_classdev_notify_brightness_hw_changed(
struct led_classdev *led_cdev, enum led_brightness brightness) { }
#endif
+/**
+ * struct led_pattern - pattern interval settings
+ * @delta_t: pattern interval delay, in milliseconds
+ * @brightness: pattern interval brightness
+ */
+struct led_pattern {
+ u32 delta_t;
+ int brightness;
+};
+
+enum led_audio {
+ LED_AUDIO_MUTE, /* master mute LED */
+ LED_AUDIO_MICMUTE, /* mic mute LED */
+ NUM_AUDIO_LEDS
+};
+
#endif /* __LINUX_LEDS_H_INCLUDED */
diff --git a/include/linux/leds_pwm.h b/include/linux/leds_pwm.h
deleted file mode 100644
index a65e9646e4b1..000000000000
--- a/include/linux/leds_pwm.h
+++ /dev/null
@@ -1,21 +0,0 @@
-/*
- * PWM LED driver data - see drivers/leds/leds-pwm.c
- */
-#ifndef __LINUX_LEDS_PWM_H
-#define __LINUX_LEDS_PWM_H
-
-struct led_pwm {
- const char *name;
- const char *default_trigger;
- unsigned pwm_id __deprecated;
- u8 active_low;
- unsigned max_brightness;
- unsigned pwm_period_ns;
-};
-
-struct led_pwm_platform_data {
- int num_leds;
- struct led_pwm *leds;
-};
-
-#endif
diff --git a/include/linux/libata.h b/include/linux/libata.h
index 931c32f1f18d..39534fafa36a 100644
--- a/include/linux/libata.h
+++ b/include/linux/libata.h
@@ -1,26 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* Copyright 2003-2005 Red Hat, Inc. All rights reserved.
* Copyright 2003-2005 Jeff Garzik
*
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; see the file COPYING. If not, write to
- * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
- *
- *
* libata documentation is available via 'make {ps|pdf}docs',
* as Documentation/driver-api/libata.rst
- *
*/
#ifndef __LINUX_LIBATA_H__
@@ -38,6 +22,7 @@
#include <linux/acpi.h>
#include <linux/cdrom.h>
#include <linux/sched.h>
+#include <linux/async.h>
/*
* Define if arch has non-standard setup. This is a _PCI_ standard
@@ -54,80 +39,99 @@
* compile-time options: to be removed as soon as all the drivers are
* converted to the new debugging mechanism
*/
-#undef ATA_DEBUG /* debugging output */
-#undef ATA_VERBOSE_DEBUG /* yet more debugging output */
#undef ATA_IRQ_TRAP /* define to ack screaming irqs */
-#undef ATA_NDEBUG /* define to disable quick runtime checks */
-
-/* note: prints function name for you */
-#ifdef ATA_DEBUG
-#define DPRINTK(fmt, args...) printk(KERN_ERR "%s: " fmt, __func__, ## args)
-#ifdef ATA_VERBOSE_DEBUG
-#define VPRINTK(fmt, args...) printk(KERN_ERR "%s: " fmt, __func__, ## args)
-#else
-#define VPRINTK(fmt, args...)
-#endif /* ATA_VERBOSE_DEBUG */
-#else
-#define DPRINTK(fmt, args...)
-#define VPRINTK(fmt, args...)
-#endif /* ATA_DEBUG */
-
-#define BPRINTK(fmt, args...) if (ap->flags & ATA_FLAG_DEBUGMSG) printk(KERN_ERR "%s: " fmt, __func__, ## args)
-
-#define ata_print_version_once(dev, version) \
-({ \
- static bool __print_once; \
- \
- if (!__print_once) { \
- __print_once = true; \
- ata_print_version(dev, version); \
- } \
-})
+/* defines only for the constants which don't work well as enums */
+#define ATA_TAG_POISON 0xfafbfcfdU
-/* NEW: debug levels */
-#define HAVE_LIBATA_MSG 1
+/*
+ * Quirk flags bits.
+ * ata_device->quirks is an unsigned int, so __ATA_QUIRK_MAX must not exceed 32.
+ */
+enum ata_quirks {
+ __ATA_QUIRK_DIAGNOSTIC, /* Failed boot diag */
+ __ATA_QUIRK_NODMA, /* DMA problems */
+ __ATA_QUIRK_NONCQ, /* Don't use NCQ */
+ __ATA_QUIRK_MAX_SEC_128, /* Limit max sects to 128 */
+ __ATA_QUIRK_BROKEN_HPA, /* Broken HPA */
+ __ATA_QUIRK_DISABLE, /* Disable it */
+ __ATA_QUIRK_HPA_SIZE, /* Native size off by one */
+ __ATA_QUIRK_IVB, /* cbl det validity bit bugs */
+ __ATA_QUIRK_STUCK_ERR, /* Stuck ERR on next PACKET */
+ __ATA_QUIRK_BRIDGE_OK, /* No bridge limits */
+ __ATA_QUIRK_ATAPI_MOD16_DMA, /* Use ATAPI DMA for commands that */
+ /* are not a multiple of 16 bytes */
+ __ATA_QUIRK_FIRMWARE_WARN, /* Firmware update warning */
+ __ATA_QUIRK_1_5_GBPS, /* Force 1.5 Gbps */
+ __ATA_QUIRK_NOSETXFER, /* Skip SETXFER, SATA only */
+ __ATA_QUIRK_BROKEN_FPDMA_AA, /* Skip AA */
+ __ATA_QUIRK_DUMP_ID, /* Dump IDENTIFY data */
+ __ATA_QUIRK_MAX_SEC_LBA48, /* Set max sects to 65535 */
+ __ATA_QUIRK_ATAPI_DMADIR, /* Device requires dmadir */
+ __ATA_QUIRK_NO_NCQ_TRIM, /* Do not use queued TRIM */
+ __ATA_QUIRK_NOLPM, /* Do not use LPM */
+ __ATA_QUIRK_WD_BROKEN_LPM, /* Some WDs have broken LPM */
+ __ATA_QUIRK_ZERO_AFTER_TRIM, /* Guarantees zero after trim */
+ __ATA_QUIRK_NO_DMA_LOG, /* Do not use DMA for log read */
+ __ATA_QUIRK_NOTRIM, /* Do not use TRIM */
+ __ATA_QUIRK_MAX_SEC_1024, /* Limit max sects to 1024 */
+ __ATA_QUIRK_MAX_SEC_8191, /* Limit max sects to 8191 */
+ __ATA_QUIRK_MAX_TRIM_128M, /* Limit max trim size to 128M */
+ __ATA_QUIRK_NO_NCQ_ON_ATI, /* Disable NCQ on ATI chipset */
+ __ATA_QUIRK_NO_LPM_ON_ATI, /* Disable LPM on ATI chipset */
+ __ATA_QUIRK_NO_ID_DEV_LOG, /* Identify device log missing */
+ __ATA_QUIRK_NO_LOG_DIR, /* Do not read log directory */
+ __ATA_QUIRK_NO_FUA, /* Do not use FUA */
+
+ __ATA_QUIRK_MAX,
+};
+/*
+ * Quirk flags: may be set by libata or controller drivers on drives.
+ * Some quirks may be drive/controller pair dependent.
+ */
enum {
- ATA_MSG_DRV = 0x0001,
- ATA_MSG_INFO = 0x0002,
- ATA_MSG_PROBE = 0x0004,
- ATA_MSG_WARN = 0x0008,
- ATA_MSG_MALLOC = 0x0010,
- ATA_MSG_CTL = 0x0020,
- ATA_MSG_INTR = 0x0040,
- ATA_MSG_ERR = 0x0080,
+ ATA_QUIRK_DIAGNOSTIC = (1U << __ATA_QUIRK_DIAGNOSTIC),
+ ATA_QUIRK_NODMA = (1U << __ATA_QUIRK_NODMA),
+ ATA_QUIRK_NONCQ = (1U << __ATA_QUIRK_NONCQ),
+ ATA_QUIRK_MAX_SEC_128 = (1U << __ATA_QUIRK_MAX_SEC_128),
+ ATA_QUIRK_BROKEN_HPA = (1U << __ATA_QUIRK_BROKEN_HPA),
+ ATA_QUIRK_DISABLE = (1U << __ATA_QUIRK_DISABLE),
+ ATA_QUIRK_HPA_SIZE = (1U << __ATA_QUIRK_HPA_SIZE),
+ ATA_QUIRK_IVB = (1U << __ATA_QUIRK_IVB),
+ ATA_QUIRK_STUCK_ERR = (1U << __ATA_QUIRK_STUCK_ERR),
+ ATA_QUIRK_BRIDGE_OK = (1U << __ATA_QUIRK_BRIDGE_OK),
+ ATA_QUIRK_ATAPI_MOD16_DMA = (1U << __ATA_QUIRK_ATAPI_MOD16_DMA),
+ ATA_QUIRK_FIRMWARE_WARN = (1U << __ATA_QUIRK_FIRMWARE_WARN),
+ ATA_QUIRK_1_5_GBPS = (1U << __ATA_QUIRK_1_5_GBPS),
+ ATA_QUIRK_NOSETXFER = (1U << __ATA_QUIRK_NOSETXFER),
+ ATA_QUIRK_BROKEN_FPDMA_AA = (1U << __ATA_QUIRK_BROKEN_FPDMA_AA),
+ ATA_QUIRK_DUMP_ID = (1U << __ATA_QUIRK_DUMP_ID),
+ ATA_QUIRK_MAX_SEC_LBA48 = (1U << __ATA_QUIRK_MAX_SEC_LBA48),
+ ATA_QUIRK_ATAPI_DMADIR = (1U << __ATA_QUIRK_ATAPI_DMADIR),
+ ATA_QUIRK_NO_NCQ_TRIM = (1U << __ATA_QUIRK_NO_NCQ_TRIM),
+ ATA_QUIRK_NOLPM = (1U << __ATA_QUIRK_NOLPM),
+ ATA_QUIRK_WD_BROKEN_LPM = (1U << __ATA_QUIRK_WD_BROKEN_LPM),
+ ATA_QUIRK_ZERO_AFTER_TRIM = (1U << __ATA_QUIRK_ZERO_AFTER_TRIM),
+ ATA_QUIRK_NO_DMA_LOG = (1U << __ATA_QUIRK_NO_DMA_LOG),
+ ATA_QUIRK_NOTRIM = (1U << __ATA_QUIRK_NOTRIM),
+ ATA_QUIRK_MAX_SEC_1024 = (1U << __ATA_QUIRK_MAX_SEC_1024),
+ ATA_QUIRK_MAX_SEC_8191 = (1U << __ATA_QUIRK_MAX_SEC_8191),
+ ATA_QUIRK_MAX_TRIM_128M = (1U << __ATA_QUIRK_MAX_TRIM_128M),
+ ATA_QUIRK_NO_NCQ_ON_ATI = (1U << __ATA_QUIRK_NO_NCQ_ON_ATI),
+ ATA_QUIRK_NO_LPM_ON_ATI = (1U << __ATA_QUIRK_NO_LPM_ON_ATI),
+ ATA_QUIRK_NO_ID_DEV_LOG = (1U << __ATA_QUIRK_NO_ID_DEV_LOG),
+ ATA_QUIRK_NO_LOG_DIR = (1U << __ATA_QUIRK_NO_LOG_DIR),
+ ATA_QUIRK_NO_FUA = (1U << __ATA_QUIRK_NO_FUA),
};
-#define ata_msg_drv(p) ((p)->msg_enable & ATA_MSG_DRV)
-#define ata_msg_info(p) ((p)->msg_enable & ATA_MSG_INFO)
-#define ata_msg_probe(p) ((p)->msg_enable & ATA_MSG_PROBE)
-#define ata_msg_warn(p) ((p)->msg_enable & ATA_MSG_WARN)
-#define ata_msg_malloc(p) ((p)->msg_enable & ATA_MSG_MALLOC)
-#define ata_msg_ctl(p) ((p)->msg_enable & ATA_MSG_CTL)
-#define ata_msg_intr(p) ((p)->msg_enable & ATA_MSG_INTR)
-#define ata_msg_err(p) ((p)->msg_enable & ATA_MSG_ERR)
-
-static inline u32 ata_msg_init(int dval, int default_msg_enable_bits)
-{
- if (dval < 0 || dval >= (sizeof(u32) * 8))
- return default_msg_enable_bits; /* should be 0x1 - only driver info msgs */
- if (!dval)
- return 0;
- return (1 << dval) - 1;
-}
-
-/* defines only for the constants which don't work well as enums */
-#define ATA_TAG_POISON 0xfafbfcfdU
-
enum {
/* various global constants */
LIBATA_MAX_PRD = ATA_MAX_PRD / 2,
LIBATA_DUMB_MAX_PRD = ATA_MAX_PRD / 4, /* Worst case */
ATA_DEF_QUEUE = 1,
- /* tag ATA_MAX_QUEUE - 1 is reserved for internal commands */
ATA_MAX_QUEUE = 32,
- ATA_TAG_INTERNAL = ATA_MAX_QUEUE - 1,
+ ATA_TAG_INTERNAL = ATA_MAX_QUEUE,
ATA_SHORT_PAUSE = 16,
ATAPI_MAX_DRAIN = 16 << 10,
@@ -136,7 +140,6 @@ enum {
ATA_SHT_EMULATED = 1,
ATA_SHT_THIS_ID = -1,
- ATA_SHT_USE_CLUSTERING = 1,
/* struct ata_taskfile flags */
ATA_TFLAG_LBA48 = (1 << 0), /* enable 48-bit LBA and "HOB" */
@@ -157,28 +160,35 @@ enum {
ATA_DFLAG_ACPI_FAILED = (1 << 6), /* ACPI on devcfg has failed */
ATA_DFLAG_AN = (1 << 7), /* AN configured */
ATA_DFLAG_TRUSTED = (1 << 8), /* device supports trusted send/recv */
+ ATA_DFLAG_FUA = (1 << 9), /* device supports FUA */
ATA_DFLAG_DMADIR = (1 << 10), /* device requires DMADIR */
- ATA_DFLAG_CFG_MASK = (1 << 12) - 1,
-
- ATA_DFLAG_PIO = (1 << 12), /* device limited to PIO mode */
- ATA_DFLAG_NCQ_OFF = (1 << 13), /* device limited to non-NCQ mode */
- ATA_DFLAG_SLEEPING = (1 << 15), /* device is sleeping */
- ATA_DFLAG_DUBIOUS_XFER = (1 << 16), /* data transfer not verified */
- ATA_DFLAG_NO_UNLOAD = (1 << 17), /* device doesn't support unload */
- ATA_DFLAG_UNLOCK_HPA = (1 << 18), /* unlock HPA */
- ATA_DFLAG_NCQ_SEND_RECV = (1 << 19), /* device supports NCQ SEND and RECV */
- ATA_DFLAG_NCQ_PRIO = (1 << 20), /* device supports NCQ priority */
- ATA_DFLAG_NCQ_PRIO_ENABLE = (1 << 21), /* Priority cmds sent to dev */
- ATA_DFLAG_INIT_MASK = (1 << 24) - 1,
-
+ ATA_DFLAG_NCQ_SEND_RECV = (1 << 11), /* device supports NCQ SEND and RECV */
+ ATA_DFLAG_NCQ_PRIO = (1 << 12), /* device supports NCQ priority */
+ ATA_DFLAG_CDL = (1 << 13), /* supports cmd duration limits */
+ ATA_DFLAG_CFG_MASK = (1 << 14) - 1,
+
+ ATA_DFLAG_PIO = (1 << 14), /* device limited to PIO mode */
+ ATA_DFLAG_NCQ_OFF = (1 << 15), /* device limited to non-NCQ mode */
+ ATA_DFLAG_SLEEPING = (1 << 16), /* device is sleeping */
+ ATA_DFLAG_DUBIOUS_XFER = (1 << 17), /* data transfer not verified */
+ ATA_DFLAG_NO_UNLOAD = (1 << 18), /* device doesn't support unload */
+ ATA_DFLAG_UNLOCK_HPA = (1 << 19), /* unlock HPA */
+ ATA_DFLAG_INIT_MASK = (1 << 20) - 1,
+
+ ATA_DFLAG_NCQ_PRIO_ENABLED = (1 << 20), /* Priority cmds sent to dev */
+ ATA_DFLAG_CDL_ENABLED = (1 << 21), /* cmd duration limits is enabled */
+ ATA_DFLAG_RESUMING = (1 << 22), /* Device is resuming */
ATA_DFLAG_DETACH = (1 << 24),
ATA_DFLAG_DETACHED = (1 << 25),
-
ATA_DFLAG_DA = (1 << 26), /* device supports Device Attention */
ATA_DFLAG_DEVSLP = (1 << 27), /* device supports Device Sleep */
ATA_DFLAG_ACPI_DISABLED = (1 << 28), /* ACPI for the device is disabled */
ATA_DFLAG_D_SENSE = (1 << 29), /* Descriptor sense requested */
- ATA_DFLAG_ZAC = (1 << 30), /* ZAC device */
+
+ ATA_DFLAG_FEATURES_MASK = (ATA_DFLAG_TRUSTED | ATA_DFLAG_DA | \
+ ATA_DFLAG_DEVSLP | ATA_DFLAG_NCQ_SEND_RECV | \
+ ATA_DFLAG_NCQ_PRIO | ATA_DFLAG_FUA | \
+ ATA_DFLAG_CDL),
ATA_DEV_UNKNOWN = 0, /* unknown device */
ATA_DEV_ATA = 1, /* ATA device */
@@ -194,6 +204,7 @@ enum {
ATA_DEV_NONE = 11, /* no device */
/* struct ata_link flags */
+ /* NOTE: struct ata_force_param currently stores lflags in u16 */
ATA_LFLAG_NO_HRST = (1 << 1), /* avoid hardreset */
ATA_LFLAG_NO_SRST = (1 << 2), /* avoid softreset */
ATA_LFLAG_ASSUME_ATA = (1 << 3), /* assume ATA class */
@@ -205,12 +216,13 @@ enum {
ATA_LFLAG_NO_LPM = (1 << 8), /* disable LPM on this link */
ATA_LFLAG_RST_ONCE = (1 << 9), /* limit recovery to one reset */
ATA_LFLAG_CHANGED = (1 << 10), /* LPM state changed on this link */
- ATA_LFLAG_NO_DB_DELAY = (1 << 11), /* no debounce delay on link resume */
+ ATA_LFLAG_NO_DEBOUNCE_DELAY = (1 << 11), /* no debounce delay on link resume */
/* struct ata_port flags */
ATA_FLAG_SLAVE_POSS = (1 << 0), /* host supports slave dev */
/* (doesn't imply presence) */
ATA_FLAG_SATA = (1 << 1),
+ ATA_FLAG_NO_LPM = (1 << 2), /* host not happy with LPM */
ATA_FLAG_NO_LOG_PAGE = (1 << 5), /* do not issue log page read */
ATA_FLAG_NO_ATAPI = (1 << 6), /* No ATAPI support */
ATA_FLAG_PIO_DMA = (1 << 7), /* PIO cmds via DMA */
@@ -250,6 +262,7 @@ enum {
ATA_PFLAG_UNLOADING = (1 << 9), /* driver is being unloaded */
ATA_PFLAG_UNLOADED = (1 << 10), /* driver is unloaded */
+ ATA_PFLAG_RESUMING = (1 << 16), /* port is being resumed */
ATA_PFLAG_SUSPENDED = (1 << 17), /* port is suspended (power) */
ATA_PFLAG_PM_PENDING = (1 << 18), /* PM operation pending */
ATA_PFLAG_INIT_GTM_VALID = (1 << 19), /* initial gtm data valid */
@@ -261,15 +274,18 @@ enum {
/* struct ata_queued_cmd flags */
ATA_QCFLAG_ACTIVE = (1 << 0), /* cmd not yet ack'd to scsi lyer */
ATA_QCFLAG_DMAMAP = (1 << 1), /* SG table is DMA mapped */
+ ATA_QCFLAG_RTF_FILLED = (1 << 2), /* result TF has been filled */
ATA_QCFLAG_IO = (1 << 3), /* standard IO command */
ATA_QCFLAG_RESULT_TF = (1 << 4), /* result TF requested */
ATA_QCFLAG_CLEAR_EXCL = (1 << 5), /* clear excl_link on completion */
ATA_QCFLAG_QUIET = (1 << 6), /* don't report device error */
ATA_QCFLAG_RETRY = (1 << 7), /* retry after failure */
+ ATA_QCFLAG_HAS_CDL = (1 << 8), /* qc has CDL a descriptor set */
- ATA_QCFLAG_FAILED = (1 << 16), /* cmd failed and is owned by EH */
+ ATA_QCFLAG_EH = (1 << 16), /* cmd aborted and owned by EH */
ATA_QCFLAG_SENSE_VALID = (1 << 17), /* sense data valid */
ATA_QCFLAG_EH_SCHEDULED = (1 << 18), /* EH scheduled (obsolete) */
+ ATA_QCFLAG_EH_SUCCESS_CMD = (1 << 19), /* EH should fetch sense for this successful cmd */
/* host set flags */
ATA_HOST_SIMPLEX = (1 << 0), /* Host is simplex, one DMA channel per host only */
@@ -277,11 +293,13 @@ enum {
ATA_HOST_PARALLEL_SCAN = (1 << 2), /* Ports on this host can be scanned in parallel */
ATA_HOST_IGNORE_ATA = (1 << 3), /* Ignore ATA devices on this host. */
+ ATA_HOST_NO_PART = (1 << 4), /* Host does not support partial */
+ ATA_HOST_NO_SSC = (1 << 5), /* Host does not support slumber */
+ ATA_HOST_NO_DEVSLP = (1 << 6), /* Host does not support devslp */
+
/* bits 24:31 of host->flags are reserved for LLD specific flags */
- /* various lengths of time */
- ATA_TMOUT_BOOT = 30000, /* heuristic */
- ATA_TMOUT_BOOT_QUICK = 7000, /* heuristic */
+ /* Various lengths of time */
ATA_TMOUT_INTERNAL_QUICK = 5000,
ATA_TMOUT_MAX_PARK = 30000,
@@ -310,7 +328,7 @@ enum {
* advised to wait only for the following duration before
* doing SRST.
*/
- ATA_TMOUT_PMP_SRST_WAIT = 5000,
+ ATA_TMOUT_PMP_SRST_WAIT = 10000,
/* When the LPM policy is set to ATA_LPM_MAX_POWER, there might
* be a spurious PHY event, so ignore the first PHY event that
@@ -336,7 +354,7 @@ enum {
PORT_DISABLED = 2,
/* encoding various smaller bitmaps into a single
- * unsigned long bitmap
+ * unsigned int bitmap
*/
ATA_NR_PIO_MODES = 7,
ATA_NR_MWDMA_MODES = 5,
@@ -368,8 +386,11 @@ enum {
ATA_EH_RESET = ATA_EH_SOFTRESET | ATA_EH_HARDRESET,
ATA_EH_ENABLE_LINK = (1 << 3),
ATA_EH_PARK = (1 << 5), /* unload heads and stop I/O */
+ ATA_EH_GET_SUCCESS_SENSE = (1 << 6), /* Get sense data for successful cmd */
+ ATA_EH_SET_ACTIVE = (1 << 7), /* Set a device to active power mode */
- ATA_EH_PERDEV_MASK = ATA_EH_REVALIDATE | ATA_EH_PARK,
+ ATA_EH_PERDEV_MASK = ATA_EH_REVALIDATE | ATA_EH_PARK |
+ ATA_EH_GET_SUCCESS_SENSE | ATA_EH_SET_ACTIVE,
ATA_EH_ALL_ACTIONS = ATA_EH_REVALIDATE | ATA_EH_RESET |
ATA_EH_ENABLE_LINK,
@@ -384,6 +405,7 @@ enum {
ATA_EHI_PRINTINFO = (1 << 18), /* print configuration info */
ATA_EHI_SETMODE = (1 << 19), /* configure transfer mode */
ATA_EHI_POST_SETMODE = (1 << 20), /* revalidating after setmode */
+ ATA_EHI_DID_PRINT_QUIRKS = (1 << 21), /* already printed quirks info */
ATA_EHI_DID_RESET = ATA_EHI_DID_SOFTRESET | ATA_EHI_DID_HARDRESET,
@@ -397,7 +419,6 @@ enum {
ATA_LINK_RESUME_TRIES = 5,
/* how hard are we gonna try to probe/recover devices */
- ATA_PROBE_MAX_TRIES = 3,
ATA_EH_DEV_TRIES = 3,
ATA_EH_PMP_TRIES = 5,
ATA_EH_PMP_LINK_TRIES = 3,
@@ -407,40 +428,9 @@ enum {
/* This should match the actual table size of
* ata_eh_cmd_timeout_table in libata-eh.c.
*/
- ATA_EH_CMD_TIMEOUT_TABLE_SIZE = 6,
-
- /* Horkage types. May be set by libata or controller on drives
- (some horkage may be drive/controller pair dependent */
-
- ATA_HORKAGE_DIAGNOSTIC = (1 << 0), /* Failed boot diag */
- ATA_HORKAGE_NODMA = (1 << 1), /* DMA problems */
- ATA_HORKAGE_NONCQ = (1 << 2), /* Don't use NCQ */
- ATA_HORKAGE_MAX_SEC_128 = (1 << 3), /* Limit max sects to 128 */
- ATA_HORKAGE_BROKEN_HPA = (1 << 4), /* Broken HPA */
- ATA_HORKAGE_DISABLE = (1 << 5), /* Disable it */
- ATA_HORKAGE_HPA_SIZE = (1 << 6), /* native size off by one */
- ATA_HORKAGE_IVB = (1 << 8), /* cbl det validity bit bugs */
- ATA_HORKAGE_STUCK_ERR = (1 << 9), /* stuck ERR on next PACKET */
- ATA_HORKAGE_BRIDGE_OK = (1 << 10), /* no bridge limits */
- ATA_HORKAGE_ATAPI_MOD16_DMA = (1 << 11), /* use ATAPI DMA for commands
- not multiple of 16 bytes */
- ATA_HORKAGE_FIRMWARE_WARN = (1 << 12), /* firmware update warning */
- ATA_HORKAGE_1_5_GBPS = (1 << 13), /* force 1.5 Gbps */
- ATA_HORKAGE_NOSETXFER = (1 << 14), /* skip SETXFER, SATA only */
- ATA_HORKAGE_BROKEN_FPDMA_AA = (1 << 15), /* skip AA */
- ATA_HORKAGE_DUMP_ID = (1 << 16), /* dump IDENTIFY data */
- ATA_HORKAGE_MAX_SEC_LBA48 = (1 << 17), /* Set max sects to 65535 */
- ATA_HORKAGE_ATAPI_DMADIR = (1 << 18), /* device requires dmadir */
- ATA_HORKAGE_NO_NCQ_TRIM = (1 << 19), /* don't use queued TRIM */
- ATA_HORKAGE_NOLPM = (1 << 20), /* don't use LPM */
- ATA_HORKAGE_WD_BROKEN_LPM = (1 << 21), /* some WDs have broken LPM */
- ATA_HORKAGE_ZERO_AFTER_TRIM = (1 << 22),/* guarantees zero after trim */
- ATA_HORKAGE_NO_DMA_LOG = (1 << 23), /* don't use DMA for log read */
- ATA_HORKAGE_NOTRIM = (1 << 24), /* don't use TRIM */
- ATA_HORKAGE_MAX_SEC_1024 = (1 << 25), /* Limit max sects to 1024 */
-
- /* DMA mask for user DMA control: User visible values; DO NOT
- renumber */
+ ATA_EH_CMD_TIMEOUT_TABLE_SIZE = 8,
+
+ /* User visible DMA mask for DMA control. DO NOT renumber. */
ATA_DMA_MASK_ATA = (1 << 0), /* DMA on ATA Disk */
ATA_DMA_MASK_ATAPI = (1 << 1), /* DMA on ATAPI */
ATA_DMA_MASK_CFA = (1 << 2), /* DMA on CF Card */
@@ -483,12 +473,9 @@ enum {
};
enum ata_xfer_mask {
- ATA_MASK_PIO = ((1LU << ATA_NR_PIO_MODES) - 1)
- << ATA_SHIFT_PIO,
- ATA_MASK_MWDMA = ((1LU << ATA_NR_MWDMA_MODES) - 1)
- << ATA_SHIFT_MWDMA,
- ATA_MASK_UDMA = ((1LU << ATA_NR_UDMA_MODES) - 1)
- << ATA_SHIFT_UDMA,
+ ATA_MASK_PIO = ((1U << ATA_NR_PIO_MODES) - 1) << ATA_SHIFT_PIO,
+ ATA_MASK_MWDMA = ((1U << ATA_NR_MWDMA_MODES) - 1) << ATA_SHIFT_MWDMA,
+ ATA_MASK_UDMA = ((1U << ATA_NR_UDMA_MODES) - 1) << ATA_SHIFT_UDMA,
};
enum hsm_task_states {
@@ -501,6 +488,7 @@ enum hsm_task_states {
};
enum ata_completion_errors {
+ AC_ERR_OK = 0, /* no error */
AC_ERR_DEV = (1 << 0), /* device reported error */
AC_ERR_HSM = (1 << 1), /* host state machine violation */
AC_ERR_TIMEOUT = (1 << 2), /* timeout */
@@ -515,13 +503,27 @@ enum ata_completion_errors {
};
/*
- * Link power management policy: If you alter this, you also need to
- * alter libata-scsi.c (for the ascii descriptions)
+ * Link Power Management (LPM) policies.
+ *
+ * The default LPM policy to use for a device link is defined using these values
+ * with the CONFIG_SATA_MOBILE_LPM_POLICY config option and applied through the
+ * target_lpm_policy field of struct ata_port.
+ *
+ * If you alter this, you also need to alter the policy names used with the
+ * sysfs attribute link_power_management_policy defined in libata-sata.c.
*/
enum ata_lpm_policy {
+ /* Keep firmware settings */
ATA_LPM_UNKNOWN,
+ /* No power savings (maximum performance) */
ATA_LPM_MAX_POWER,
+ /* HIPM (Partial) */
ATA_LPM_MED_POWER,
+ /* HIPM (Partial) and DIPM (Partial and Slumber) */
+ ATA_LPM_MED_POWER_WITH_DIPM,
+ /* HIPM (Partial and DevSleep) and DIPM (Partial and Slumber) */
+ ATA_LPM_MIN_POWER_WITH_PARTIAL,
+ /* HIPM (Slumber and DevSleep) and DIPM (Partial and Slumber) */
ATA_LPM_MIN_POWER,
};
@@ -545,12 +547,16 @@ typedef int (*ata_reset_fn_t)(struct ata_link *link, unsigned int *classes,
unsigned long deadline);
typedef void (*ata_postreset_fn_t)(struct ata_link *link, unsigned int *classes);
-extern struct device_attribute dev_attr_link_power_management_policy;
extern struct device_attribute dev_attr_unload_heads;
+#ifdef CONFIG_SATA_HOST
+extern struct device_attribute dev_attr_link_power_management_supported;
+extern struct device_attribute dev_attr_link_power_management_policy;
+extern struct device_attribute dev_attr_ncq_prio_supported;
extern struct device_attribute dev_attr_ncq_prio_enable;
extern struct device_attribute dev_attr_em_message_type;
extern struct device_attribute dev_attr_em_message;
extern struct device_attribute dev_attr_sw_activity;
+#endif
enum sw_activity {
OFF,
@@ -570,7 +576,10 @@ struct ata_taskfile {
u8 hob_lbam;
u8 hob_lbah;
- u8 feature;
+ union {
+ u8 error;
+ u8 feature;
+ };
u8 nsect;
u8 lbal;
u8 lbam;
@@ -578,7 +587,10 @@ struct ata_taskfile {
u8 device;
- u8 command; /* IO operation */
+ union {
+ u8 status;
+ u8 command;
+ };
u32 auxiliary; /* auxiliary field */
/* from SATA 3.1 and */
@@ -616,12 +628,13 @@ struct ata_host {
void *private_data;
struct ata_port_operations *ops;
unsigned long flags;
+ struct kref kref;
struct mutex eh_mutex;
struct task_struct *eh_owner;
struct ata_port *simplex_claimed; /* channel owning the DMA */
- struct ata_port *ports[0];
+ struct ata_port *ports[];
};
struct ata_queued_cmd {
@@ -635,7 +648,8 @@ struct ata_queued_cmd {
u8 cdb[ATAPI_CDB_LEN];
unsigned long flags; /* ATA_QCFLAG_xxx */
- unsigned int tag;
+ unsigned int tag; /* libata core tag */
+ unsigned int hw_tag; /* driver tag */
unsigned int n_elem;
unsigned int orig_n_elem;
@@ -679,10 +693,37 @@ struct ata_ering {
struct ata_ering_entry ring[ATA_ERING_SIZE];
};
+struct ata_cpr {
+ u8 num;
+ u8 num_storage_elements;
+ u64 start_lba;
+ u64 num_lbas;
+};
+
+struct ata_cpr_log {
+ u8 nr_cpr;
+ struct ata_cpr cpr[] __counted_by(nr_cpr);
+};
+
+struct ata_cdl {
+ /*
+ * Buffer to cache the CDL log page 18h (command duration descriptors)
+ * for SCSI-ATA translation.
+ */
+ u8 desc_log_buf[ATA_LOG_CDL_SIZE];
+
+ /*
+ * Buffer to handle reading the sense data for successful NCQ Commands
+ * log page for commands using a CDL with one of the limits policy set
+ * to 0xD (successful completion with sense data available bit set).
+ */
+ u8 ncq_sense_log_buf[ATA_LOG_SENSE_NCQ_SIZE];
+};
+
struct ata_device {
struct ata_link *link;
unsigned int devno; /* 0 or 1 */
- unsigned int horkage; /* List of broken features */
+ unsigned int quirks; /* List of broken features */
unsigned long flags; /* ATA_DFLAG_xxx */
struct scsi_device *sdev; /* attached SCSI device */
void *private_data;
@@ -711,9 +752,9 @@ struct ata_device {
unsigned int cdb_len;
/* per-dev xfer mask */
- unsigned long pio_mask;
- unsigned long mwdma_mask;
- unsigned long udma_mask;
+ unsigned int pio_mask;
+ unsigned int mwdma_mask;
+ unsigned int udma_mask;
/* for CHS addressing */
u16 cylinders; /* Number of cylinders */
@@ -725,6 +766,9 @@ struct ata_device {
u32 gscr[SATA_PMP_GSCR_DWORDS]; /* PMP GSCR block */
} ____cacheline_aligned;
+ /* General Purpose Log Directory log page */
+ u8 gp_log_dir[ATA_SECT_SIZE] ____cacheline_aligned;
+
/* DEVSLP Timing Variables from Identify Device Data Log */
u8 devslp_timing[ATA_LOG_DEVSLP_SIZE];
@@ -738,10 +782,19 @@ struct ata_device {
u32 zac_zones_optimal_nonseq;
u32 zac_zones_max_open;
+ /* Concurrent positioning ranges */
+ struct ata_cpr_log *cpr_log;
+
+ /* Command Duration Limits support */
+ struct ata_cdl *cdl;
+
/* error history */
int spdn_cnt;
/* ering is CLEAR_END, read comment above CLEAR_END */
struct ata_ering ering;
+
+ /* For EH */
+ u8 sector_buf[ATA_SECT_SIZE] ____cacheline_aligned;
};
/* Fields between ATA_DEVICE_CLEAR_BEGIN and ATA_DEVICE_CLEAR_END are
@@ -827,7 +880,6 @@ struct ata_port {
/* Flags that change dynamically, protected by ap->lock */
unsigned int pflags; /* ATA_PFLAG_xxx */
unsigned int print_id; /* user visible unique port ID */
- unsigned int local_port_no; /* host local port num */
unsigned int port_no; /* 0 based port no. inside the host */
#ifdef CONFIG_ATA_SFF
@@ -847,11 +899,9 @@ struct ata_port {
unsigned int udma_mask;
unsigned int cbl; /* cable type; ATA_CBL_xxx */
- struct ata_queued_cmd qcmd[ATA_MAX_QUEUE];
- unsigned long sas_tag_allocated; /* for sas tag allocation only */
- unsigned int qc_active;
+ struct ata_queued_cmd qcmd[ATA_MAX_QUEUE + 1];
+ u64 qc_active;
int nr_active_links; /* #links with active qcs */
- unsigned int sas_last_tag; /* track next tag hw expects */
struct ata_link link; /* host default link */
struct ata_link *slave_link; /* see ata_slave_link_init() */
@@ -867,11 +917,10 @@ struct ata_port {
struct mutex scsi_scan_mutex;
struct delayed_work hotplug_task;
- struct work_struct scsi_rescan_task;
+ struct delayed_work scsi_rescan_task;
unsigned int hsm_task_state;
- u32 msg_enable;
struct list_head eh_done_q;
wait_queue_head_t eh_wait_q;
int eh_tries;
@@ -881,7 +930,9 @@ struct ata_port {
enum ata_lpm_policy target_lpm_policy;
struct timer_list fastdrain_timer;
- unsigned long fastdrain_cnt;
+ unsigned int fastdrain_cnt;
+
+ async_cookie_t cookie;
int em_message_type;
void *private_data;
@@ -889,8 +940,6 @@ struct ata_port {
#ifdef CONFIG_ATA_ACPI
struct ata_acpi_gtm __acpi_init_gtm; /* use ata_acpi_init_gtm() */
#endif
- /* owned by EH */
- u8 sector_buf[ATA_SECT_SIZE] ____cacheline_aligned;
};
/* The following initializer overrides a method to NULL whether one of
@@ -900,38 +949,41 @@ struct ata_port {
*/
#define ATA_OP_NULL (void *)(unsigned long)(-ENOENT)
+struct ata_reset_operations {
+ ata_prereset_fn_t prereset;
+ ata_reset_fn_t softreset;
+ ata_reset_fn_t hardreset;
+ ata_postreset_fn_t postreset;
+};
+
struct ata_port_operations {
/*
* Command execution
*/
- int (*qc_defer)(struct ata_queued_cmd *qc);
- int (*check_atapi_dma)(struct ata_queued_cmd *qc);
- void (*qc_prep)(struct ata_queued_cmd *qc);
+ int (*qc_defer)(struct ata_queued_cmd *qc);
+ int (*check_atapi_dma)(struct ata_queued_cmd *qc);
+ enum ata_completion_errors (*qc_prep)(struct ata_queued_cmd *qc);
unsigned int (*qc_issue)(struct ata_queued_cmd *qc);
- bool (*qc_fill_rtf)(struct ata_queued_cmd *qc);
+ void (*qc_fill_rtf)(struct ata_queued_cmd *qc);
+ void (*qc_ncq_fill_rtf)(struct ata_port *ap, u64 done_mask);
/*
* Configuration and exception handling
*/
int (*cable_detect)(struct ata_port *ap);
- unsigned long (*mode_filter)(struct ata_device *dev, unsigned long xfer_mask);
+ unsigned int (*mode_filter)(struct ata_device *dev, unsigned int xfer_mask);
void (*set_piomode)(struct ata_port *ap, struct ata_device *dev);
void (*set_dmamode)(struct ata_port *ap, struct ata_device *dev);
int (*set_mode)(struct ata_link *link, struct ata_device **r_failed_dev);
- unsigned int (*read_id)(struct ata_device *dev, struct ata_taskfile *tf, u16 *id);
+ unsigned int (*read_id)(struct ata_device *dev, struct ata_taskfile *tf,
+ __le16 *id);
void (*dev_config)(struct ata_device *dev);
void (*freeze)(struct ata_port *ap);
void (*thaw)(struct ata_port *ap);
- ata_prereset_fn_t prereset;
- ata_reset_fn_t softreset;
- ata_reset_fn_t hardreset;
- ata_postreset_fn_t postreset;
- ata_prereset_fn_t pmp_prereset;
- ata_reset_fn_t pmp_softreset;
- ata_reset_fn_t pmp_hardreset;
- ata_postreset_fn_t pmp_postreset;
+ struct ata_reset_operations reset;
+ struct ata_reset_operations pmp_reset;
void (*error_handler)(struct ata_port *ap);
void (*lost_interrupt)(struct ata_port *ap);
void (*post_internal_cmd)(struct ata_queued_cmd *qc);
@@ -994,12 +1046,6 @@ struct ata_port_operations {
ssize_t size);
/*
- * Obsolete
- */
- void (*phy_reset)(struct ata_port *ap);
- void (*eng_timeout)(struct ata_port *ap);
-
- /*
* ->inherits must be the last field and all the preceding
* fields must be pointers.
*/
@@ -1009,9 +1055,9 @@ struct ata_port_operations {
struct ata_port_info {
unsigned long flags;
unsigned long link_flags;
- unsigned long pio_mask;
- unsigned long mwdma_mask;
- unsigned long udma_mask;
+ unsigned int pio_mask;
+ unsigned int mwdma_mask;
+ unsigned int udma_mask;
struct ata_port_operations *port_ops;
void *private_data;
};
@@ -1032,10 +1078,6 @@ struct ata_timing {
/*
* Core layer - drivers/ata/libata-core.c
*/
-extern const unsigned long sata_deb_timing_normal[];
-extern const unsigned long sata_deb_timing_hotplug[];
-extern const unsigned long sata_deb_timing_long[];
-
extern struct ata_port_operations ata_dummy_port_ops;
extern const struct ata_port_info ata_dummy_port_info;
@@ -1073,73 +1115,53 @@ static inline int is_multi_taskfile(struct ata_taskfile *tf)
(tf->command == ATA_CMD_WRITE_MULTI_FUA_EXT);
}
-static inline const unsigned long *
-sata_ehc_deb_timing(struct ata_eh_context *ehc)
+static inline int ata_port_is_dummy(struct ata_port *ap)
{
- if (ehc->i.flags & ATA_EHI_HOTPLUGGED)
- return sata_deb_timing_hotplug;
- else
- return sata_deb_timing_normal;
+ return ap->ops == &ata_dummy_port_ops;
}
-static inline int ata_port_is_dummy(struct ata_port *ap)
+static inline bool ata_port_is_frozen(const struct ata_port *ap)
{
- return ap->ops == &ata_dummy_port_ops;
+ return ap->pflags & ATA_PFLAG_FROZEN;
}
-extern int sata_set_spd(struct ata_link *link);
extern int ata_std_prereset(struct ata_link *link, unsigned long deadline);
extern int ata_wait_after_reset(struct ata_link *link, unsigned long deadline,
int (*check_ready)(struct ata_link *link));
-extern int sata_link_debounce(struct ata_link *link,
- const unsigned long *params, unsigned long deadline);
-extern int sata_link_resume(struct ata_link *link, const unsigned long *params,
- unsigned long deadline);
-extern int sata_link_scr_lpm(struct ata_link *link, enum ata_lpm_policy policy,
- bool spm_wakeup);
-extern int sata_link_hardreset(struct ata_link *link,
- const unsigned long *timing, unsigned long deadline,
- bool *online, int (*check_ready)(struct ata_link *));
-extern int sata_std_hardreset(struct ata_link *link, unsigned int *class,
- unsigned long deadline);
extern void ata_std_postreset(struct ata_link *link, unsigned int *classes);
-extern struct ata_host *ata_host_alloc(struct device *dev, int max_ports);
+extern struct ata_host *ata_host_alloc(struct device *dev, int n_ports);
extern struct ata_host *ata_host_alloc_pinfo(struct device *dev,
const struct ata_port_info * const * ppi, int n_ports);
-extern int ata_slave_link_init(struct ata_port *ap);
+extern void ata_host_get(struct ata_host *host);
+extern void ata_host_put(struct ata_host *host);
extern int ata_host_start(struct ata_host *host);
extern int ata_host_register(struct ata_host *host,
- struct scsi_host_template *sht);
+ const struct scsi_host_template *sht);
extern int ata_host_activate(struct ata_host *host, int irq,
irq_handler_t irq_handler, unsigned long irq_flags,
- struct scsi_host_template *sht);
+ const struct scsi_host_template *sht);
extern void ata_host_detach(struct ata_host *host);
extern void ata_host_init(struct ata_host *, struct device *, struct ata_port_operations *);
-extern int ata_scsi_detect(struct scsi_host_template *sht);
-extern int ata_scsi_ioctl(struct scsi_device *dev, int cmd, void __user *arg);
+extern int ata_scsi_ioctl(struct scsi_device *dev, unsigned int cmd,
+ void __user *arg);
+#ifdef CONFIG_COMPAT
+#define ATA_SCSI_COMPAT_IOCTL .compat_ioctl = ata_scsi_ioctl,
+#else
+#define ATA_SCSI_COMPAT_IOCTL /* empty */
+#endif
extern int ata_scsi_queuecmd(struct Scsi_Host *h, struct scsi_cmnd *cmd);
+#if IS_REACHABLE(CONFIG_ATA)
+bool ata_scsi_dma_need_drain(struct request *rq);
+#else
+#define ata_scsi_dma_need_drain NULL
+#endif
extern int ata_sas_scsi_ioctl(struct ata_port *ap, struct scsi_device *dev,
- int cmd, void __user *arg);
-extern void ata_sas_port_destroy(struct ata_port *);
-extern struct ata_port *ata_sas_port_alloc(struct ata_host *,
- struct ata_port_info *, struct Scsi_Host *);
-extern void ata_sas_async_probe(struct ata_port *ap);
-extern int ata_sas_sync_probe(struct ata_port *ap);
-extern int ata_sas_port_init(struct ata_port *);
-extern int ata_sas_port_start(struct ata_port *ap);
-extern void ata_sas_port_stop(struct ata_port *ap);
-extern int ata_sas_slave_configure(struct scsi_device *, struct ata_port *);
-extern int ata_sas_queuecmd(struct scsi_cmnd *cmd, struct ata_port *ap);
-extern enum blk_eh_timer_return ata_scsi_timed_out(struct scsi_cmnd *cmd);
-extern int sata_scr_valid(struct ata_link *link);
-extern int sata_scr_read(struct ata_link *link, int reg, u32 *val);
-extern int sata_scr_write(struct ata_link *link, int reg, u32 val);
-extern int sata_scr_write_flush(struct ata_link *link, int reg, u32 val);
+ unsigned int cmd, void __user *arg);
extern bool ata_link_online(struct ata_link *link);
extern bool ata_link_offline(struct ata_link *link);
#ifdef CONFIG_PM
-extern int ata_host_suspend(struct ata_host *host, pm_message_t mesg);
+extern void ata_host_suspend(struct ata_host *host, pm_message_t mesg);
extern void ata_host_resume(struct ata_host *host);
extern void ata_sas_port_suspend(struct ata_port *ap);
extern void ata_sas_port_resume(struct ata_port *ap);
@@ -1154,51 +1176,150 @@ static inline void ata_sas_port_resume(struct ata_port *ap)
extern int ata_ratelimit(void);
extern void ata_msleep(struct ata_port *ap, unsigned int msecs);
extern u32 ata_wait_register(struct ata_port *ap, void __iomem *reg, u32 mask,
- u32 val, unsigned long interval, unsigned long timeout);
+ u32 val, unsigned int interval, unsigned int timeout);
extern int atapi_cmd_type(u8 opcode);
-extern void ata_tf_to_fis(const struct ata_taskfile *tf,
- u8 pmp, int is_cmd, u8 *fis);
-extern void ata_tf_from_fis(const u8 *fis, struct ata_taskfile *tf);
-extern unsigned long ata_pack_xfermask(unsigned long pio_mask,
- unsigned long mwdma_mask, unsigned long udma_mask);
-extern void ata_unpack_xfermask(unsigned long xfer_mask,
- unsigned long *pio_mask, unsigned long *mwdma_mask,
- unsigned long *udma_mask);
-extern u8 ata_xfer_mask2mode(unsigned long xfer_mask);
-extern unsigned long ata_xfer_mode2mask(u8 xfer_mode);
-extern int ata_xfer_mode2shift(unsigned long xfer_mode);
-extern const char *ata_mode_string(unsigned long xfer_mask);
-extern unsigned long ata_id_xfermask(const u16 *id);
+extern unsigned int ata_pack_xfermask(unsigned int pio_mask,
+ unsigned int mwdma_mask,
+ unsigned int udma_mask);
+extern void ata_unpack_xfermask(unsigned int xfer_mask,
+ unsigned int *pio_mask,
+ unsigned int *mwdma_mask,
+ unsigned int *udma_mask);
+extern u8 ata_xfer_mask2mode(unsigned int xfer_mask);
+extern unsigned int ata_xfer_mode2mask(u8 xfer_mode);
+extern int ata_xfer_mode2shift(u8 xfer_mode);
+extern const char *ata_mode_string(unsigned int xfer_mask);
+extern unsigned int ata_id_xfermask(const u16 *id);
extern int ata_std_qc_defer(struct ata_queued_cmd *qc);
-extern void ata_noop_qc_prep(struct ata_queued_cmd *qc);
extern void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg,
unsigned int n_elem);
extern unsigned int ata_dev_classify(const struct ata_taskfile *tf);
+extern unsigned int ata_port_classify(struct ata_port *ap,
+ const struct ata_taskfile *tf);
extern void ata_dev_disable(struct ata_device *adev);
extern void ata_id_string(const u16 *id, unsigned char *s,
unsigned int ofs, unsigned int len);
extern void ata_id_c_string(const u16 *id, unsigned char *s,
unsigned int ofs, unsigned int len);
extern unsigned int ata_do_dev_read_id(struct ata_device *dev,
- struct ata_taskfile *tf, u16 *id);
+ struct ata_taskfile *tf, __le16 *id);
extern void ata_qc_complete(struct ata_queued_cmd *qc);
-extern int ata_qc_complete_multiple(struct ata_port *ap, u32 qc_active);
+extern u64 ata_qc_get_active(struct ata_port *ap);
extern void ata_scsi_simulate(struct ata_device *dev, struct scsi_cmnd *cmd);
extern int ata_std_bios_param(struct scsi_device *sdev,
- struct block_device *bdev,
+ struct gendisk *unused,
sector_t capacity, int geom[]);
extern void ata_scsi_unlock_native_capacity(struct scsi_device *sdev);
-extern int ata_scsi_slave_config(struct scsi_device *sdev);
-extern void ata_scsi_slave_destroy(struct scsi_device *sdev);
+extern int ata_scsi_sdev_init(struct scsi_device *sdev);
+int ata_scsi_sdev_configure(struct scsi_device *sdev, struct queue_limits *lim);
+extern void ata_scsi_sdev_destroy(struct scsi_device *sdev);
extern int ata_scsi_change_queue_depth(struct scsi_device *sdev,
int queue_depth);
-extern int __ata_change_queue_depth(struct ata_port *ap, struct scsi_device *sdev,
- int queue_depth);
+extern int ata_change_queue_depth(struct ata_port *ap, struct scsi_device *sdev,
+ int queue_depth);
+extern int ata_ncq_prio_supported(struct ata_port *ap, struct scsi_device *sdev,
+ bool *supported);
+extern int ata_ncq_prio_enabled(struct ata_port *ap, struct scsi_device *sdev,
+ bool *enabled);
+extern int ata_ncq_prio_enable(struct ata_port *ap, struct scsi_device *sdev,
+ bool enable);
extern struct ata_device *ata_dev_pair(struct ata_device *adev);
-extern int ata_do_set_mode(struct ata_link *link, struct ata_device **r_failed_dev);
+int ata_set_mode(struct ata_link *link, struct ata_device **r_failed_dev);
extern void ata_scsi_port_error_handler(struct Scsi_Host *host, struct ata_port *ap);
extern void ata_scsi_cmd_error_handler(struct Scsi_Host *host, struct ata_port *ap, struct list_head *eh_q);
+
+/*
+ * SATA specific code - drivers/ata/libata-sata.c
+ */
+#ifdef CONFIG_SATA_HOST
+extern const unsigned int sata_deb_timing_normal[];
+extern const unsigned int sata_deb_timing_hotplug[];
+extern const unsigned int sata_deb_timing_long[];
+
+static inline const unsigned int *
+sata_ehc_deb_timing(struct ata_eh_context *ehc)
+{
+ if (ehc->i.flags & ATA_EHI_HOTPLUGGED)
+ return sata_deb_timing_hotplug;
+ else
+ return sata_deb_timing_normal;
+}
+
+extern int sata_scr_valid(struct ata_link *link);
+extern int sata_scr_read(struct ata_link *link, int reg, u32 *val);
+extern int sata_scr_write(struct ata_link *link, int reg, u32 val);
+extern int sata_scr_write_flush(struct ata_link *link, int reg, u32 val);
+extern int sata_set_spd(struct ata_link *link);
+int sata_std_hardreset(struct ata_link *link, unsigned int *class,
+ unsigned long deadline);
+extern int sata_link_hardreset(struct ata_link *link,
+ const unsigned int *timing, unsigned long deadline,
+ bool *online, int (*check_ready)(struct ata_link *));
+extern int sata_link_resume(struct ata_link *link, const unsigned int *params,
+ unsigned long deadline);
+extern void ata_eh_analyze_ncq_error(struct ata_link *link);
+#else
+static inline const unsigned int *
+sata_ehc_deb_timing(struct ata_eh_context *ehc)
+{
+ return NULL;
+}
+static inline int sata_scr_valid(struct ata_link *link) { return 0; }
+static inline int sata_scr_read(struct ata_link *link, int reg, u32 *val)
+{
+ return -EOPNOTSUPP;
+}
+static inline int sata_scr_write(struct ata_link *link, int reg, u32 val)
+{
+ return -EOPNOTSUPP;
+}
+static inline int sata_scr_write_flush(struct ata_link *link, int reg, u32 val)
+{
+ return -EOPNOTSUPP;
+}
+static inline int sata_set_spd(struct ata_link *link) { return -EOPNOTSUPP; }
+static inline int sata_std_hardreset(struct ata_link *link, unsigned int *class,
+ unsigned long deadline)
+{
+ return -EOPNOTSUPP;
+}
+static inline int sata_link_hardreset(struct ata_link *link,
+ const unsigned int *timing,
+ unsigned long deadline,
+ bool *online,
+ int (*check_ready)(struct ata_link *))
+{
+ if (online)
+ *online = false;
+ return -EOPNOTSUPP;
+}
+static inline int sata_link_resume(struct ata_link *link,
+ const unsigned int *params,
+ unsigned long deadline)
+{
+ return -EOPNOTSUPP;
+}
+static inline void ata_eh_analyze_ncq_error(struct ata_link *link) { }
+#endif
+extern int sata_link_debounce(struct ata_link *link,
+ const unsigned int *params, unsigned long deadline);
+extern int sata_link_scr_lpm(struct ata_link *link, enum ata_lpm_policy policy,
+ bool spm_wakeup);
+extern int ata_slave_link_init(struct ata_port *ap);
+extern void ata_port_probe(struct ata_port *ap);
+extern struct ata_port *ata_port_alloc(struct ata_host *host);
+extern void ata_port_free(struct ata_port *ap);
+extern int ata_tport_add(struct device *parent, struct ata_port *ap);
+extern void ata_tport_delete(struct ata_port *ap);
+int ata_sas_sdev_configure(struct scsi_device *sdev, struct queue_limits *lim,
+ struct ata_port *ap);
+extern int ata_sas_queuecmd(struct scsi_cmnd *cmd, struct ata_port *ap);
+extern void ata_tf_to_fis(const struct ata_taskfile *tf,
+ u8 pmp, int is_cmd, u8 *fis);
+extern void ata_tf_from_fis(const u8 *fis, struct ata_taskfile *tf);
+extern int ata_qc_complete_multiple(struct ata_port *ap, u64 qc_active);
extern bool sata_lpm_ignore_phy_events(struct ata_link *link);
+extern int sata_async_notification(struct ata_port *ap);
extern int ata_cable_40wire(struct ata_port *ap);
extern int ata_cable_80wire(struct ata_port *ap);
@@ -1208,12 +1329,6 @@ extern int ata_cable_unknown(struct ata_port *ap);
/* Timing helpers */
extern unsigned int ata_pio_need_iordy(const struct ata_device *);
-extern const struct ata_timing *ata_timing_find_mode(u8 xfer_mode);
-extern int ata_timing_compute(struct ata_device *, unsigned short,
- struct ata_timing *, int, int);
-extern void ata_timing_merge(const struct ata_timing *,
- const struct ata_timing *, struct ata_timing *,
- unsigned int);
extern u8 ata_timing_cycle2mode(unsigned int xfer_shift, int cycle);
/* PCI */
@@ -1228,6 +1343,7 @@ struct pci_bits {
};
extern int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bits);
+extern void ata_pci_shutdown_one(struct pci_dev *pdev);
extern void ata_pci_remove_one(struct pci_dev *pdev);
#ifdef CONFIG_PM
@@ -1240,7 +1356,7 @@ extern int ata_pci_device_resume(struct pci_dev *pdev);
struct platform_device;
-extern int ata_platform_remove_one(struct platform_device *pdev);
+extern void ata_platform_remove_one(struct platform_device *pdev);
/*
* ACPI - drivers/ata/libata-acpi.c
@@ -1254,9 +1370,9 @@ static inline const struct ata_acpi_gtm *ata_acpi_init_gtm(struct ata_port *ap)
}
int ata_acpi_stm(struct ata_port *ap, const struct ata_acpi_gtm *stm);
int ata_acpi_gtm(struct ata_port *ap, struct ata_acpi_gtm *stm);
-unsigned long ata_acpi_gtm_xfermask(struct ata_device *dev,
- const struct ata_acpi_gtm *gtm);
-int ata_acpi_cbl_80wire(struct ata_port *ap, const struct ata_acpi_gtm *gtm);
+unsigned int ata_acpi_gtm_xfermask(struct ata_device *dev,
+ const struct ata_acpi_gtm *gtm);
+int ata_acpi_cbl_pata_type(struct ata_port *ap);
#else
static inline const struct ata_acpi_gtm *ata_acpi_init_gtm(struct ata_port *ap)
{
@@ -1281,10 +1397,9 @@ static inline unsigned int ata_acpi_gtm_xfermask(struct ata_device *dev,
return 0;
}
-static inline int ata_acpi_cbl_80wire(struct ata_port *ap,
- const struct ata_acpi_gtm *gtm)
+static inline int ata_acpi_cbl_pata_type(struct ata_port *ap)
{
- return 0;
+ return ATA_CBL_PATA40;
}
#endif
@@ -1296,18 +1411,13 @@ extern void ata_port_wait_eh(struct ata_port *ap);
extern int ata_link_abort(struct ata_link *link);
extern int ata_port_abort(struct ata_port *ap);
extern int ata_port_freeze(struct ata_port *ap);
-extern int sata_async_notification(struct ata_port *ap);
extern void ata_eh_freeze_port(struct ata_port *ap);
extern void ata_eh_thaw_port(struct ata_port *ap);
extern void ata_eh_qc_complete(struct ata_queued_cmd *qc);
extern void ata_eh_qc_retry(struct ata_queued_cmd *qc);
-extern void ata_eh_analyze_ncq_error(struct ata_link *link);
-extern void ata_do_eh(struct ata_port *ap, ata_prereset_fn_t prereset,
- ata_reset_fn_t softreset, ata_reset_fn_t hardreset,
- ata_postreset_fn_t postreset);
extern void ata_std_error_handler(struct ata_port *ap);
extern void ata_std_sched_eh(struct ata_port *ap);
extern void ata_std_end_eh(struct ata_port *ap);
@@ -1336,7 +1446,7 @@ extern int ata_link_nr_enabled(struct ata_link *link);
*/
extern const struct ata_port_operations ata_base_port_ops;
extern const struct ata_port_operations sata_port_ops;
-extern struct device_attribute *ata_common_sdev_attrs[];
+extern const struct attribute_group *ata_common_sdev_groups[];
/*
* All sht initializers (BASE, PIO, BMDMA, NCQ) must be instantiated
@@ -1344,28 +1454,52 @@ extern struct device_attribute *ata_common_sdev_attrs[];
* edge driver's module reference, otherwise the driver can be unloaded
* even if the scsi_device is being accessed.
*/
-#define ATA_BASE_SHT(drv_name) \
+#define __ATA_BASE_SHT(drv_name) \
.module = THIS_MODULE, \
.name = drv_name, \
.ioctl = ata_scsi_ioctl, \
+ ATA_SCSI_COMPAT_IOCTL \
.queuecommand = ata_scsi_queuecmd, \
- .can_queue = ATA_DEF_QUEUE, \
- .tag_alloc_policy = BLK_TAG_ALLOC_RR, \
+ .dma_need_drain = ata_scsi_dma_need_drain, \
.this_id = ATA_SHT_THIS_ID, \
.emulated = ATA_SHT_EMULATED, \
- .use_clustering = ATA_SHT_USE_CLUSTERING, \
.proc_name = drv_name, \
- .slave_configure = ata_scsi_slave_config, \
- .slave_destroy = ata_scsi_slave_destroy, \
- .eh_timed_out = ata_scsi_timed_out, \
+ .sdev_init = ata_scsi_sdev_init, \
+ .sdev_destroy = ata_scsi_sdev_destroy, \
.bios_param = ata_std_bios_param, \
- .unlock_native_capacity = ata_scsi_unlock_native_capacity, \
- .sdev_attrs = ata_common_sdev_attrs
+ .unlock_native_capacity = ata_scsi_unlock_native_capacity,\
+ .max_sectors = ATA_MAX_SECTORS_LBA48
+
+#define ATA_SUBBASE_SHT(drv_name) \
+ __ATA_BASE_SHT(drv_name), \
+ .can_queue = ATA_DEF_QUEUE, \
+ .tag_alloc_policy_rr = true, \
+ .sdev_configure = ata_scsi_sdev_configure
+
+#define ATA_SUBBASE_SHT_QD(drv_name, drv_qd) \
+ __ATA_BASE_SHT(drv_name), \
+ .can_queue = drv_qd, \
+ .tag_alloc_policy_rr = true, \
+ .sdev_configure = ata_scsi_sdev_configure
+
+#define ATA_BASE_SHT(drv_name) \
+ ATA_SUBBASE_SHT(drv_name), \
+ .sdev_groups = ata_common_sdev_groups
+
+#ifdef CONFIG_SATA_HOST
+extern const struct attribute_group *ata_ncq_sdev_groups[];
#define ATA_NCQ_SHT(drv_name) \
- ATA_BASE_SHT(drv_name), \
+ ATA_SUBBASE_SHT(drv_name), \
+ .sdev_groups = ata_ncq_sdev_groups, \
.change_queue_depth = ata_scsi_change_queue_depth
+#define ATA_NCQ_SHT_QD(drv_name, drv_qd) \
+ ATA_SUBBASE_SHT_QD(drv_name, drv_qd), \
+ .sdev_groups = ata_ncq_sdev_groups, \
+ .change_queue_depth = ata_scsi_change_queue_depth
+#endif
+
/*
* PMP helpers
*/
@@ -1397,7 +1531,7 @@ static inline bool sata_pmp_attached(struct ata_port *ap)
static inline bool ata_is_host_link(const struct ata_link *link)
{
- return 1;
+ return true;
}
#endif /* CONFIG_SATA_PMP */
@@ -1408,53 +1542,73 @@ static inline int sata_srst_pmp(struct ata_link *link)
return link->pmp;
}
-/*
- * printk helpers
- */
-__printf(3, 4)
-void ata_port_printk(const struct ata_port *ap, const char *level,
- const char *fmt, ...);
-__printf(3, 4)
-void ata_link_printk(const struct ata_link *link, const char *level,
- const char *fmt, ...);
-__printf(3, 4)
-void ata_dev_printk(const struct ata_device *dev, const char *level,
- const char *fmt, ...);
+#define ata_port_printk(level, ap, fmt, ...) \
+ pr_ ## level ("ata%u: " fmt, (ap)->print_id, ##__VA_ARGS__)
#define ata_port_err(ap, fmt, ...) \
- ata_port_printk(ap, KERN_ERR, fmt, ##__VA_ARGS__)
+ ata_port_printk(err, ap, fmt, ##__VA_ARGS__)
#define ata_port_warn(ap, fmt, ...) \
- ata_port_printk(ap, KERN_WARNING, fmt, ##__VA_ARGS__)
+ ata_port_printk(warn, ap, fmt, ##__VA_ARGS__)
#define ata_port_notice(ap, fmt, ...) \
- ata_port_printk(ap, KERN_NOTICE, fmt, ##__VA_ARGS__)
+ ata_port_printk(notice, ap, fmt, ##__VA_ARGS__)
#define ata_port_info(ap, fmt, ...) \
- ata_port_printk(ap, KERN_INFO, fmt, ##__VA_ARGS__)
+ ata_port_printk(info, ap, fmt, ##__VA_ARGS__)
#define ata_port_dbg(ap, fmt, ...) \
- ata_port_printk(ap, KERN_DEBUG, fmt, ##__VA_ARGS__)
+ ata_port_printk(debug, ap, fmt, ##__VA_ARGS__)
+
+#define ata_link_printk(level, link, fmt, ...) \
+do { \
+ if (sata_pmp_attached((link)->ap) || \
+ (link)->ap->slave_link) \
+ pr_ ## level ("ata%u.%02u: " fmt, \
+ (link)->ap->print_id, \
+ (link)->pmp, \
+ ##__VA_ARGS__); \
+ else \
+ pr_ ## level ("ata%u: " fmt, \
+ (link)->ap->print_id, \
+ ##__VA_ARGS__); \
+} while (0)
#define ata_link_err(link, fmt, ...) \
- ata_link_printk(link, KERN_ERR, fmt, ##__VA_ARGS__)
+ ata_link_printk(err, link, fmt, ##__VA_ARGS__)
#define ata_link_warn(link, fmt, ...) \
- ata_link_printk(link, KERN_WARNING, fmt, ##__VA_ARGS__)
+ ata_link_printk(warn, link, fmt, ##__VA_ARGS__)
#define ata_link_notice(link, fmt, ...) \
- ata_link_printk(link, KERN_NOTICE, fmt, ##__VA_ARGS__)
+ ata_link_printk(notice, link, fmt, ##__VA_ARGS__)
#define ata_link_info(link, fmt, ...) \
- ata_link_printk(link, KERN_INFO, fmt, ##__VA_ARGS__)
+ ata_link_printk(info, link, fmt, ##__VA_ARGS__)
#define ata_link_dbg(link, fmt, ...) \
- ata_link_printk(link, KERN_DEBUG, fmt, ##__VA_ARGS__)
+ ata_link_printk(debug, link, fmt, ##__VA_ARGS__)
+
+#define ata_dev_printk(level, dev, fmt, ...) \
+ pr_ ## level("ata%u.%02u: " fmt, \
+ (dev)->link->ap->print_id, \
+ (dev)->link->pmp + (dev)->devno, \
+ ##__VA_ARGS__)
#define ata_dev_err(dev, fmt, ...) \
- ata_dev_printk(dev, KERN_ERR, fmt, ##__VA_ARGS__)
+ ata_dev_printk(err, dev, fmt, ##__VA_ARGS__)
#define ata_dev_warn(dev, fmt, ...) \
- ata_dev_printk(dev, KERN_WARNING, fmt, ##__VA_ARGS__)
+ ata_dev_printk(warn, dev, fmt, ##__VA_ARGS__)
#define ata_dev_notice(dev, fmt, ...) \
- ata_dev_printk(dev, KERN_NOTICE, fmt, ##__VA_ARGS__)
+ ata_dev_printk(notice, dev, fmt, ##__VA_ARGS__)
#define ata_dev_info(dev, fmt, ...) \
- ata_dev_printk(dev, KERN_INFO, fmt, ##__VA_ARGS__)
+ ata_dev_printk(info, dev, fmt, ##__VA_ARGS__)
#define ata_dev_dbg(dev, fmt, ...) \
- ata_dev_printk(dev, KERN_DEBUG, fmt, ##__VA_ARGS__)
+ ata_dev_printk(debug, dev, fmt, ##__VA_ARGS__)
-void ata_print_version(const struct device *dev, const char *version);
+#define ata_dev_warn_once(dev, fmt, ...) \
+ pr_warn_once("ata%u.%02u: " fmt, \
+ (dev)->link->ap->print_id, \
+ (dev)->link->pmp + (dev)->devno, \
+ ##__VA_ARGS__)
+
+static inline void ata_print_version_once(const struct device *dev,
+ const char *version)
+{
+ dev_dbg_once(dev, "version %s\n", version);
+}
/*
* ata_eh_info helpers
@@ -1482,17 +1636,47 @@ void ata_port_desc(struct ata_port *ap, const char *fmt, ...);
extern void ata_port_pbar_desc(struct ata_port *ap, int bar, ssize_t offset,
const char *name);
#endif
-
-static inline unsigned int ata_tag_valid(unsigned int tag)
+static inline void ata_port_desc_misc(struct ata_port *ap, int irq)
{
- return (tag < ATA_MAX_QUEUE) ? 1 : 0;
+ ata_port_desc(ap, "irq %d", irq);
+ ata_port_desc(ap, "lpm-pol %d", ap->target_lpm_policy);
+ if (ap->pflags & ATA_PFLAG_EXTERNAL)
+ ata_port_desc(ap, "ext");
}
-static inline unsigned int ata_tag_internal(unsigned int tag)
+static inline bool ata_tag_internal(unsigned int tag)
{
return tag == ATA_TAG_INTERNAL;
}
+static inline bool ata_tag_valid(unsigned int tag)
+{
+ return tag < ATA_MAX_QUEUE || ata_tag_internal(tag);
+}
+
+#define __ata_qc_for_each(ap, qc, tag, max_tag, fn) \
+ for ((tag) = 0; (tag) < (max_tag) && \
+ ({ qc = fn((ap), (tag)); 1; }); (tag)++) \
+
+/*
+ * Internal use only, iterate commands ignoring error handling and
+ * status of 'qc'.
+ */
+#define ata_qc_for_each_raw(ap, qc, tag) \
+ __ata_qc_for_each(ap, qc, tag, ATA_MAX_QUEUE, __ata_qc_from_tag)
+
+/*
+ * Iterate all potential commands that can be queued
+ */
+#define ata_qc_for_each(ap, qc, tag) \
+ __ata_qc_for_each(ap, qc, tag, ATA_MAX_QUEUE, ata_qc_from_tag)
+
+/*
+ * Like ata_qc_for_each, but with the internal tag included
+ */
+#define ata_qc_for_each_with_internal(ap, qc, tag) \
+ __ata_qc_for_each(ap, qc, tag, ATA_MAX_QUEUE + 1, ata_qc_from_tag)
+
/*
* device helpers
*/
@@ -1603,19 +1787,35 @@ extern struct ata_device *ata_dev_next(struct ata_device *dev,
(dev) = ata_dev_next((dev), (link), ATA_DITER_##mode))
/**
+ * ata_ncq_supported - Test whether NCQ is supported
+ * @dev: ATA device to test
+ *
+ * LOCKING:
+ * spin_lock_irqsave(host lock)
+ *
+ * RETURNS:
+ * true if @dev supports NCQ, false otherwise.
+ */
+static inline bool ata_ncq_supported(struct ata_device *dev)
+{
+ if (!IS_ENABLED(CONFIG_SATA_HOST))
+ return false;
+ return (dev->flags & (ATA_DFLAG_PIO | ATA_DFLAG_NCQ)) == ATA_DFLAG_NCQ;
+}
+
+/**
* ata_ncq_enabled - Test whether NCQ is enabled
- * @dev: ATA device to test for
+ * @dev: ATA device to test
*
* LOCKING:
* spin_lock_irqsave(host lock)
*
* RETURNS:
- * 1 if NCQ is enabled for @dev, 0 otherwise.
+ * true if NCQ is enabled for @dev, false otherwise.
*/
-static inline int ata_ncq_enabled(struct ata_device *dev)
+static inline bool ata_ncq_enabled(struct ata_device *dev)
{
- return (dev->flags & (ATA_DFLAG_PIO | ATA_DFLAG_NCQ_OFF |
- ATA_DFLAG_NCQ)) == ATA_DFLAG_NCQ;
+ return ata_ncq_supported(dev) && !(dev->flags & ATA_DFLAG_NCQ_OFF);
}
static inline bool ata_fpdma_dsm_supported(struct ata_device *dev)
@@ -1653,7 +1853,7 @@ static inline void ata_qc_set_polling(struct ata_queued_cmd *qc)
static inline struct ata_queued_cmd *__ata_qc_from_tag(struct ata_port *ap,
unsigned int tag)
{
- if (likely(ata_tag_valid(tag)))
+ if (ata_tag_valid(tag))
return &ap->qcmd[tag];
return NULL;
}
@@ -1663,11 +1863,11 @@ static inline struct ata_queued_cmd *ata_qc_from_tag(struct ata_port *ap,
{
struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag);
- if (unlikely(!qc) || !ap->ops->error_handler)
+ if (unlikely(!qc))
return qc;
if ((qc->flags & (ATA_QCFLAG_ACTIVE |
- ATA_QCFLAG_FAILED)) == ATA_QCFLAG_ACTIVE)
+ ATA_QCFLAG_EH)) == ATA_QCFLAG_ACTIVE)
return qc;
return NULL;
@@ -1754,7 +1954,7 @@ static inline int ata_check_ready(u8 status)
}
static inline unsigned long ata_deadline(unsigned long from_jiffies,
- unsigned long timeout_msecs)
+ unsigned int timeout_msecs)
{
return from_jiffies + msecs_to_jiffies(timeout_msecs);
}
@@ -1763,26 +1963,34 @@ static inline unsigned long ata_deadline(unsigned long from_jiffies,
change in future hardware and specs, secondly 0xFF means 'no DMA' but is
> UDMA_0. Dyma ddreigiau */
-static inline int ata_using_mwdma(struct ata_device *adev)
+static inline bool ata_using_mwdma(struct ata_device *adev)
{
- if (adev->dma_mode >= XFER_MW_DMA_0 && adev->dma_mode <= XFER_MW_DMA_4)
- return 1;
- return 0;
+ return adev->dma_mode >= XFER_MW_DMA_0 &&
+ adev->dma_mode <= XFER_MW_DMA_4;
}
-static inline int ata_using_udma(struct ata_device *adev)
+static inline bool ata_using_udma(struct ata_device *adev)
{
- if (adev->dma_mode >= XFER_UDMA_0 && adev->dma_mode <= XFER_UDMA_7)
- return 1;
- return 0;
+ return adev->dma_mode >= XFER_UDMA_0 &&
+ adev->dma_mode <= XFER_UDMA_7;
}
-static inline int ata_dma_enabled(struct ata_device *adev)
+static inline bool ata_dma_enabled(struct ata_device *adev)
{
- return (adev->dma_mode == 0xFF ? 0 : 1);
+ return adev->dma_mode != 0xFF;
}
/**************************************************************************
+ * PATA timings - drivers/ata/libata-pata-timings.c
+ */
+extern const struct ata_timing *ata_timing_find_mode(u8 xfer_mode);
+extern int ata_timing_compute(struct ata_device *, unsigned short,
+ struct ata_timing *, int, int);
+extern void ata_timing_merge(const struct ata_timing *,
+ const struct ata_timing *, struct ata_timing *,
+ unsigned int);
+
+/**************************************************************************
* PMP - drivers/ata/libata-pmp.c
*/
#ifdef CONFIG_SATA_PMP
@@ -1819,8 +2027,6 @@ extern void ata_sff_dev_select(struct ata_port *ap, unsigned int device);
extern u8 ata_sff_check_status(struct ata_port *ap);
extern void ata_sff_pause(struct ata_port *ap);
extern void ata_sff_dma_pause(struct ata_port *ap);
-extern int ata_sff_busy_sleep(struct ata_port *ap,
- unsigned long timeout_pat, unsigned long timeout);
extern int ata_sff_wait_ready(struct ata_link *link, unsigned long deadline);
extern void ata_sff_tf_load(struct ata_port *ap, const struct ata_taskfile *tf);
extern void ata_sff_tf_read(struct ata_port *ap, struct ata_taskfile *tf);
@@ -1830,10 +2036,7 @@ extern unsigned int ata_sff_data_xfer(struct ata_queued_cmd *qc,
unsigned char *buf, unsigned int buflen, int rw);
extern unsigned int ata_sff_data_xfer32(struct ata_queued_cmd *qc,
unsigned char *buf, unsigned int buflen, int rw);
-extern unsigned int ata_sff_data_xfer_noirq(struct ata_queued_cmd *qc,
- unsigned char *buf, unsigned int buflen, int rw);
extern void ata_sff_irq_on(struct ata_port *ap);
-extern void ata_sff_irq_clear(struct ata_port *ap);
extern int ata_sff_hsm_move(struct ata_port *ap, struct ata_queued_cmd *qc,
u8 status, int in_wq);
extern void ata_sff_queue_work(struct work_struct *work);
@@ -1841,7 +2044,7 @@ extern void ata_sff_queue_delayed_work(struct delayed_work *dwork,
unsigned long delay);
extern void ata_sff_queue_pio_task(struct ata_link *link, unsigned long delay);
extern unsigned int ata_sff_qc_issue(struct ata_queued_cmd *qc);
-extern bool ata_sff_qc_fill_rtf(struct ata_queued_cmd *qc);
+extern void ata_sff_qc_fill_rtf(struct ata_queued_cmd *qc);
extern unsigned int ata_sff_port_intr(struct ata_port *ap,
struct ata_queued_cmd *qc);
extern irqreturn_t ata_sff_interrupt(int irq, void *dev_instance);
@@ -1868,10 +2071,10 @@ extern int ata_pci_sff_prepare_host(struct pci_dev *pdev,
struct ata_host **r_host);
extern int ata_pci_sff_activate_host(struct ata_host *host,
irq_handler_t irq_handler,
- struct scsi_host_template *sht);
+ const struct scsi_host_template *sht);
extern int ata_pci_sff_init_one(struct pci_dev *pdev,
const struct ata_port_info * const * ppi,
- struct scsi_host_template *sht, void *host_priv, int hflags);
+ const struct scsi_host_template *sht, void *host_priv, int hflags);
#endif /* CONFIG_PCI */
#ifdef CONFIG_ATA_BMDMA
@@ -1883,9 +2086,9 @@ extern const struct ata_port_operations ata_bmdma_port_ops;
.sg_tablesize = LIBATA_MAX_PRD, \
.dma_boundary = ATA_DMA_BOUNDARY
-extern void ata_bmdma_qc_prep(struct ata_queued_cmd *qc);
+extern enum ata_completion_errors ata_bmdma_qc_prep(struct ata_queued_cmd *qc);
extern unsigned int ata_bmdma_qc_issue(struct ata_queued_cmd *qc);
-extern void ata_bmdma_dumb_qc_prep(struct ata_queued_cmd *qc);
+extern enum ata_completion_errors ata_bmdma_dumb_qc_prep(struct ata_queued_cmd *qc);
extern unsigned int ata_bmdma_port_intr(struct ata_port *ap,
struct ata_queued_cmd *qc);
extern irqreturn_t ata_bmdma_interrupt(int irq, void *dev_instance);
@@ -1907,7 +2110,7 @@ extern int ata_pci_bmdma_prepare_host(struct pci_dev *pdev,
struct ata_host **r_host);
extern int ata_pci_bmdma_init_one(struct pci_dev *pdev,
const struct ata_port_info * const * ppi,
- struct scsi_host_template *sht,
+ const struct scsi_host_template *sht,
void *host_priv, int hflags);
#endif /* CONFIG_PCI */
#endif /* CONFIG_ATA_BMDMA */
@@ -1953,14 +2156,17 @@ static inline u8 ata_wait_idle(struct ata_port *ap)
{
u8 status = ata_sff_busy_wait(ap, ATA_BUSY | ATA_DRQ, 1000);
-#ifdef ATA_DEBUG
if (status != 0xff && (status & (ATA_BUSY | ATA_DRQ)))
- ata_port_printk(ap, KERN_DEBUG, "abnormal Status 0x%X\n",
- status);
-#endif
+ ata_port_dbg(ap, "abnormal Status 0x%X\n", status);
return status;
}
+#else /* CONFIG_ATA_SFF */
+static inline int sata_sff_hardreset(struct ata_link *link, unsigned int *class,
+ unsigned long deadline)
+{
+ return -EOPNOTSUPP;
+}
#endif /* CONFIG_ATA_SFF */
#endif /* __LINUX_LIBATA_H__ */
diff --git a/include/linux/libfdt.h b/include/linux/libfdt.h
index 4c0306c69b4e..90ed4ebfa692 100644
--- a/include/linux/libfdt.h
+++ b/include/linux/libfdt.h
@@ -1,8 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _INCLUDE_LIBFDT_H_
#define _INCLUDE_LIBFDT_H_
#include <linux/libfdt_env.h>
-#include "../../scripts/dtc/libfdt/fdt.h"
#include "../../scripts/dtc/libfdt/libfdt.h"
#endif /* _INCLUDE_LIBFDT_H_ */
diff --git a/include/linux/libfdt_env.h b/include/linux/libfdt_env.h
index 2a663c6bb428..cea8574a29b1 100644
--- a/include/linux/libfdt_env.h
+++ b/include/linux/libfdt_env.h
@@ -1,10 +1,15 @@
-#ifndef _LIBFDT_ENV_H
-#define _LIBFDT_ENV_H
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef LIBFDT_ENV_H
+#define LIBFDT_ENV_H
+#include <linux/limits.h> /* For INT_MAX */
#include <linux/string.h>
#include <asm/byteorder.h>
+#define INT32_MAX S32_MAX
+#define UINT32_MAX U32_MAX
+
typedef __be16 fdt16_t;
typedef __be32 fdt32_t;
typedef __be64 fdt64_t;
@@ -14,4 +19,4 @@ typedef __be64 fdt64_t;
#define fdt64_to_cpu(x) be64_to_cpu(x)
#define cpu_to_fdt64(x) cpu_to_be64(x)
-#endif /* _LIBFDT_ENV_H */
+#endif /* LIBFDT_ENV_H */
diff --git a/include/linux/libgcc.h b/include/linux/libgcc.h
new file mode 100644
index 000000000000..0d68f9d6a6a7
--- /dev/null
+++ b/include/linux/libgcc.h
@@ -0,0 +1,41 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * include/lib/libgcc.h
+ */
+
+#ifndef __LIB_LIBGCC_H
+#define __LIB_LIBGCC_H
+
+#include <asm/byteorder.h>
+
+typedef int word_type __attribute__ ((mode (__word__)));
+
+#ifdef __BIG_ENDIAN
+struct DWstruct {
+ int high, low;
+};
+#elif defined(__LITTLE_ENDIAN)
+struct DWstruct {
+ int low, high;
+};
+#else
+#error I feel sick.
+#endif
+
+typedef union {
+ struct DWstruct s;
+ long long ll;
+} DWunion;
+
+long long notrace __ashldi3(long long u, word_type b);
+long long notrace __ashrdi3(long long u, word_type b);
+word_type notrace __cmpdi2(long long a, long long b);
+long long notrace __lshrdi3(long long u, word_type b);
+long long notrace __muldi3(long long u, long long v);
+word_type notrace __ucmpdi2(unsigned long long a, unsigned long long b);
+
+#ifdef CONFIG_HAVE_ARCH_LIBGCC_H
+#include <asm/libgcc.h>
+#endif
+
+#endif /* __ASM_LIBGCC_H */
diff --git a/include/linux/libnvdimm.h b/include/linux/libnvdimm.h
index f3d3e6af8838..28f086c4a187 100644
--- a/include/linux/libnvdimm.h
+++ b/include/linux/libnvdimm.h
@@ -1,31 +1,48 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* libnvdimm - Non-volatile-memory Devices Subsystem
*
* Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
*/
#ifndef __LIBNVDIMM_H__
#define __LIBNVDIMM_H__
-#include <linux/kernel.h>
+
+#include <linux/io.h>
#include <linux/sizes.h>
+#include <linux/spinlock.h>
#include <linux/types.h>
#include <linux/uuid.h>
+struct badrange_entry {
+ u64 start;
+ u64 length;
+ struct list_head list;
+};
+
+struct badrange {
+ struct list_head list;
+ spinlock_t lock;
+};
+
enum {
- /* when a dimm supports both PMEM and BLK access a label is required */
- NDD_ALIASING = 0,
/* unarmed memory devices may not persist writes */
NDD_UNARMED = 1,
/* locked memory devices should not be accessed */
NDD_LOCKED = 2,
+ /* memory under security wipes should not be accessed */
+ NDD_SECURITY_OVERWRITE = 3,
+ /* tracking whether or not there is a pending device reference */
+ NDD_WORK_PENDING = 4,
+ /* dimm supports namespace labels */
+ NDD_LABELING = 6,
+ /*
+ * dimm contents have changed requiring invalidation of CPU caches prior
+ * to activation of a region that includes this device
+ */
+ NDD_INCOHERENT = 7,
+
+ /* dimm provider wants synchronous registration by __nvdimm_create() */
+ NDD_REGISTER_SYNC = 8,
/* need to set a limit somewhere, but yes, this is likely overkill */
ND_IOCTL_MAX_BUFLEN = SZ_4M,
@@ -35,34 +52,50 @@ enum {
/* region flag indicating to direct-map persistent memory by default */
ND_REGION_PAGEMAP = 0,
+ /*
+ * Platform ensures entire CPU store data path is flushed to pmem on
+ * system power loss.
+ */
+ ND_REGION_PERSIST_CACHE = 1,
+ /*
+ * Platform provides mechanisms to automatically flush outstanding
+ * write data from memory controler to pmem on system power loss.
+ * (ADR)
+ */
+ ND_REGION_PERSIST_MEMCTRL = 2,
+
+ /* Platform provides asynchronous flush mechanism */
+ ND_REGION_ASYNC = 3,
+
+ /* Region was created by CXL subsystem */
+ ND_REGION_CXL = 4,
/* mark newly adjusted resources as requiring a label update */
DPA_RESOURCE_ADJUSTED = 1 << 0,
};
-extern struct attribute_group nvdimm_bus_attribute_group;
-extern struct attribute_group nvdimm_attribute_group;
-extern struct attribute_group nd_device_attribute_group;
-extern struct attribute_group nd_numa_attribute_group;
-extern struct attribute_group nd_region_attribute_group;
-extern struct attribute_group nd_mapping_attribute_group;
-
struct nvdimm;
struct nvdimm_bus_descriptor;
typedef int (*ndctl_fn)(struct nvdimm_bus_descriptor *nd_desc,
struct nvdimm *nvdimm, unsigned int cmd, void *buf,
unsigned int buf_len, int *cmd_rc);
+struct attribute_group;
+struct device_node;
+struct module;
struct nvdimm_bus_descriptor {
const struct attribute_group **attr_groups;
- unsigned long bus_dsm_mask;
unsigned long cmd_mask;
+ unsigned long dimm_family_mask;
+ unsigned long bus_family_mask;
struct module *module;
char *provider_name;
+ struct device_node *of_node;
ndctl_fn ndctl;
int (*flush_probe)(struct nvdimm_bus_descriptor *nd_desc);
int (*clear_to_send)(struct nvdimm_bus_descriptor *nd_desc,
- struct nvdimm *nvdimm, unsigned int cmd);
+ struct nvdimm *nvdimm, unsigned int cmd, void *data);
+ const struct nvdimm_bus_fw_ops *fw_ops;
};
struct nd_cmd_desc {
@@ -87,8 +120,12 @@ struct nd_mapping_desc {
struct nvdimm *nvdimm;
u64 start;
u64 size;
+ int position;
};
+struct bio;
+struct resource;
+struct nd_region;
struct nd_region_desc {
struct resource *res;
struct nd_mapping_desc *mapping;
@@ -98,7 +135,11 @@ struct nd_region_desc {
void *provider_data;
int num_lanes;
int numa_node;
+ int target_node;
unsigned long flags;
+ int memregion;
+ struct device_node *of_node;
+ int (*flush)(struct nd_region *nd_region, struct bio *bio);
};
struct device;
@@ -110,44 +151,135 @@ static inline void __iomem *devm_nvdimm_ioremap(struct device *dev,
return (void __iomem *) devm_nvdimm_memremap(dev, offset, size, 0);
}
-struct nvdimm_bus;
-struct module;
-struct device;
-struct nd_blk_region;
-struct nd_blk_region_desc {
- int (*enable)(struct nvdimm_bus *nvdimm_bus, struct device *dev);
- int (*do_io)(struct nd_blk_region *ndbr, resource_size_t dpa,
- void *iobuf, u64 len, int rw);
- struct nd_region_desc ndr_desc;
+/*
+ * Note that separate bits for locked + unlocked are defined so that
+ * 'flags == 0' corresponds to an error / not-supported state.
+ */
+enum nvdimm_security_bits {
+ NVDIMM_SECURITY_DISABLED,
+ NVDIMM_SECURITY_UNLOCKED,
+ NVDIMM_SECURITY_LOCKED,
+ NVDIMM_SECURITY_FROZEN,
+ NVDIMM_SECURITY_OVERWRITE,
};
-static inline struct nd_blk_region_desc *to_blk_region_desc(
- struct nd_region_desc *ndr_desc)
-{
- return container_of(ndr_desc, struct nd_blk_region_desc, ndr_desc);
+#define NVDIMM_PASSPHRASE_LEN 32
+#define NVDIMM_KEY_DESC_LEN 22
-}
+struct nvdimm_key_data {
+ u8 data[NVDIMM_PASSPHRASE_LEN];
+};
+
+enum nvdimm_passphrase_type {
+ NVDIMM_USER,
+ NVDIMM_MASTER,
+};
+
+struct nvdimm_security_ops {
+ unsigned long (*get_flags)(struct nvdimm *nvdimm,
+ enum nvdimm_passphrase_type pass_type);
+ int (*freeze)(struct nvdimm *nvdimm);
+ int (*change_key)(struct nvdimm *nvdimm,
+ const struct nvdimm_key_data *old_data,
+ const struct nvdimm_key_data *new_data,
+ enum nvdimm_passphrase_type pass_type);
+ int (*unlock)(struct nvdimm *nvdimm,
+ const struct nvdimm_key_data *key_data);
+ int (*disable)(struct nvdimm *nvdimm,
+ const struct nvdimm_key_data *key_data);
+ int (*erase)(struct nvdimm *nvdimm,
+ const struct nvdimm_key_data *key_data,
+ enum nvdimm_passphrase_type pass_type);
+ int (*overwrite)(struct nvdimm *nvdimm,
+ const struct nvdimm_key_data *key_data);
+ int (*query_overwrite)(struct nvdimm *nvdimm);
+ int (*disable_master)(struct nvdimm *nvdimm,
+ const struct nvdimm_key_data *key_data);
+};
+
+enum nvdimm_fwa_state {
+ NVDIMM_FWA_INVALID,
+ NVDIMM_FWA_IDLE,
+ NVDIMM_FWA_ARMED,
+ NVDIMM_FWA_BUSY,
+ NVDIMM_FWA_ARM_OVERFLOW,
+};
+
+enum nvdimm_fwa_trigger {
+ NVDIMM_FWA_ARM,
+ NVDIMM_FWA_DISARM,
+};
+
+enum nvdimm_fwa_capability {
+ NVDIMM_FWA_CAP_INVALID,
+ NVDIMM_FWA_CAP_NONE,
+ NVDIMM_FWA_CAP_QUIESCE,
+ NVDIMM_FWA_CAP_LIVE,
+};
+
+enum nvdimm_fwa_result {
+ NVDIMM_FWA_RESULT_INVALID,
+ NVDIMM_FWA_RESULT_NONE,
+ NVDIMM_FWA_RESULT_SUCCESS,
+ NVDIMM_FWA_RESULT_NOTSTAGED,
+ NVDIMM_FWA_RESULT_NEEDRESET,
+ NVDIMM_FWA_RESULT_FAIL,
+};
+
+struct nvdimm_bus_fw_ops {
+ enum nvdimm_fwa_state (*activate_state)
+ (struct nvdimm_bus_descriptor *nd_desc);
+ enum nvdimm_fwa_capability (*capability)
+ (struct nvdimm_bus_descriptor *nd_desc);
+ int (*activate)(struct nvdimm_bus_descriptor *nd_desc);
+};
+
+struct nvdimm_fw_ops {
+ enum nvdimm_fwa_state (*activate_state)(struct nvdimm *nvdimm);
+ enum nvdimm_fwa_result (*activate_result)(struct nvdimm *nvdimm);
+ int (*arm)(struct nvdimm *nvdimm, enum nvdimm_fwa_trigger arg);
+};
+
+struct kobject;
+struct nvdimm_bus;
-int nvdimm_bus_add_poison(struct nvdimm_bus *nvdimm_bus, u64 addr, u64 length);
-void nvdimm_forget_poison(struct nvdimm_bus *nvdimm_bus,
- phys_addr_t start, unsigned int len);
+void badrange_init(struct badrange *badrange);
+int badrange_add(struct badrange *badrange, u64 addr, u64 length);
+void badrange_forget(struct badrange *badrange, phys_addr_t start,
+ unsigned int len);
+int nvdimm_bus_add_badrange(struct nvdimm_bus *nvdimm_bus, u64 addr,
+ u64 length);
struct nvdimm_bus *nvdimm_bus_register(struct device *parent,
struct nvdimm_bus_descriptor *nfit_desc);
void nvdimm_bus_unregister(struct nvdimm_bus *nvdimm_bus);
struct nvdimm_bus *to_nvdimm_bus(struct device *dev);
+struct nvdimm_bus *nvdimm_to_bus(struct nvdimm *nvdimm);
struct nvdimm *to_nvdimm(struct device *dev);
struct nd_region *to_nd_region(struct device *dev);
-struct nd_blk_region *to_nd_blk_region(struct device *dev);
+struct device *nd_region_dev(struct nd_region *nd_region);
struct nvdimm_bus_descriptor *to_nd_desc(struct nvdimm_bus *nvdimm_bus);
struct device *to_nvdimm_bus_dev(struct nvdimm_bus *nvdimm_bus);
const char *nvdimm_name(struct nvdimm *nvdimm);
struct kobject *nvdimm_kobj(struct nvdimm *nvdimm);
unsigned long nvdimm_cmd_mask(struct nvdimm *nvdimm);
void *nvdimm_provider_data(struct nvdimm *nvdimm);
-struct nvdimm *nvdimm_create(struct nvdimm_bus *nvdimm_bus, void *provider_data,
- const struct attribute_group **groups, unsigned long flags,
- unsigned long cmd_mask, int num_flush,
- struct resource *flush_wpq);
+struct nvdimm *__nvdimm_create(struct nvdimm_bus *nvdimm_bus,
+ void *provider_data, const struct attribute_group **groups,
+ unsigned long flags, unsigned long cmd_mask, int num_flush,
+ struct resource *flush_wpq, const char *dimm_id,
+ const struct nvdimm_security_ops *sec_ops,
+ const struct nvdimm_fw_ops *fw_ops);
+static inline struct nvdimm *nvdimm_create(struct nvdimm_bus *nvdimm_bus,
+ void *provider_data, const struct attribute_group **groups,
+ unsigned long flags, unsigned long cmd_mask, int num_flush,
+ struct resource *flush_wpq)
+{
+ return __nvdimm_create(nvdimm_bus, provider_data, groups, flags,
+ cmd_mask, num_flush, flush_wpq, NULL, NULL, NULL);
+}
+void nvdimm_delete(struct nvdimm *nvdimm);
+void nvdimm_region_delete(struct nd_region *nd_region);
+
const struct nd_cmd_desc *nd_cmd_dimm_desc(int cmd);
const struct nd_cmd_desc *nd_cmd_bus_desc(int cmd);
u32 nd_cmd_in_size(struct nvdimm *nvdimm, int cmd,
@@ -163,14 +295,37 @@ struct nd_region *nvdimm_blk_region_create(struct nvdimm_bus *nvdimm_bus,
struct nd_region *nvdimm_volatile_region_create(struct nvdimm_bus *nvdimm_bus,
struct nd_region_desc *ndr_desc);
void *nd_region_provider_data(struct nd_region *nd_region);
-void *nd_blk_region_provider_data(struct nd_blk_region *ndbr);
-void nd_blk_region_set_provider_data(struct nd_blk_region *ndbr, void *data);
-struct nvdimm *nd_blk_region_to_dimm(struct nd_blk_region *ndbr);
-unsigned long nd_blk_memremap_flags(struct nd_blk_region *ndbr);
unsigned int nd_region_acquire_lane(struct nd_region *nd_region);
void nd_region_release_lane(struct nd_region *nd_region, unsigned int lane);
u64 nd_fletcher64(void *addr, size_t len, bool le);
-void nvdimm_flush(struct nd_region *nd_region);
+int nvdimm_flush(struct nd_region *nd_region, struct bio *bio);
+int generic_nvdimm_flush(struct nd_region *nd_region);
int nvdimm_has_flush(struct nd_region *nd_region);
int nvdimm_has_cache(struct nd_region *nd_region);
+int nvdimm_in_overwrite(struct nvdimm *nvdimm);
+bool is_nvdimm_sync(struct nd_region *nd_region);
+
+static inline int nvdimm_ctl(struct nvdimm *nvdimm, unsigned int cmd, void *buf,
+ unsigned int buf_len, int *cmd_rc)
+{
+ struct nvdimm_bus *nvdimm_bus = nvdimm_to_bus(nvdimm);
+ struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus);
+
+ return nd_desc->ndctl(nd_desc, nvdimm, cmd, buf, buf_len, cmd_rc);
+}
+
+#ifdef CONFIG_ARCH_HAS_PMEM_API
+#define ARCH_MEMREMAP_PMEM MEMREMAP_WB
+void arch_wb_cache_pmem(void *addr, size_t size);
+void arch_invalidate_pmem(void *addr, size_t size);
+#else
+#define ARCH_MEMREMAP_PMEM MEMREMAP_WT
+static inline void arch_wb_cache_pmem(void *addr, size_t size)
+{
+}
+static inline void arch_invalidate_pmem(void *addr, size_t size)
+{
+}
+#endif
+
#endif /* __LIBNVDIMM_H__ */
diff --git a/include/linux/libps2.h b/include/linux/libps2.h
index 4ad06e824f76..9ca9ce4e6e64 100644
--- a/include/linux/libps2.h
+++ b/include/linux/libps2.h
@@ -1,56 +1,75 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
#ifndef _LIBPS2_H
#define _LIBPS2_H
/*
* Copyright (C) 1999-2002 Vojtech Pavlik
* Copyright (C) 2004 Dmitry Torokhov
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
*/
+#include <linux/bitops.h>
+#include <linux/interrupt.h>
+#include <linux/mutex.h>
+#include <linux/types.h>
+#include <linux/wait.h>
-#define PS2_CMD_GETID 0x02f2
-#define PS2_CMD_RESET_BAT 0x02ff
+struct ps2dev;
-#define PS2_RET_BAT 0xaa
-#define PS2_RET_ID 0x00
-#define PS2_RET_ACK 0xfa
-#define PS2_RET_NAK 0xfe
-#define PS2_RET_ERR 0xfc
+/**
+ * enum ps2_disposition - indicates how received byte should be handled
+ * @PS2_PROCESS: pass to the main protocol handler, process normally
+ * @PS2_IGNORE: skip the byte
+ * @PS2_ERROR: do not process the byte, abort command in progress
+ */
+enum ps2_disposition {
+ PS2_PROCESS,
+ PS2_IGNORE,
+ PS2_ERROR,
+};
-#define PS2_FLAG_ACK 1 /* Waiting for ACK/NAK */
-#define PS2_FLAG_CMD 2 /* Waiting for command to finish */
-#define PS2_FLAG_CMD1 4 /* Waiting for the first byte of command response */
-#define PS2_FLAG_WAITID 8 /* Command execiting is GET ID */
-#define PS2_FLAG_NAK 16 /* Last transmission was NAKed */
+typedef enum ps2_disposition (*ps2_pre_receive_handler_t)(struct ps2dev *, u8,
+ unsigned int);
+typedef void (*ps2_receive_handler_t)(struct ps2dev *, u8);
+/**
+ * struct ps2dev - represents a device using PS/2 protocol
+ * @serio: a serio port used by the PS/2 device
+ * @cmd_mutex: a mutex ensuring that only one command is executing at a time
+ * @wait: a waitqueue used to signal completion from the serio interrupt handler
+ * @flags: various internal flags indicating stages of PS/2 command execution
+ * @cmdbuf: buffer holding command response
+ * @cmdcnt: outstanding number of bytes of the command response
+ * @nak: a byte transmitted by the device when it refuses command
+ * @pre_receive_handler: checks communication errors and returns disposition
+ * (&enum ps2_disposition) of the received data byte
+ * @receive_handler: main handler of particular PS/2 protocol, such as keyboard
+ * or mouse protocol
+ */
struct ps2dev {
struct serio *serio;
-
- /* Ensures that only one command is executing at a time */
struct mutex cmd_mutex;
-
- /* Used to signal completion from interrupt handler */
wait_queue_head_t wait;
-
unsigned long flags;
- unsigned char cmdbuf[8];
- unsigned char cmdcnt;
- unsigned char nak;
+ u8 cmdbuf[8];
+ u8 cmdcnt;
+ u8 nak;
+
+ ps2_pre_receive_handler_t pre_receive_handler;
+ ps2_receive_handler_t receive_handler;
};
-void ps2_init(struct ps2dev *ps2dev, struct serio *serio);
-int ps2_sendbyte(struct ps2dev *ps2dev, unsigned char byte, int timeout);
-void ps2_drain(struct ps2dev *ps2dev, int maxbytes, int timeout);
+void ps2_init(struct ps2dev *ps2dev, struct serio *serio,
+ ps2_pre_receive_handler_t pre_receive_handler,
+ ps2_receive_handler_t receive_handler);
+int ps2_sendbyte(struct ps2dev *ps2dev, u8 byte, unsigned int timeout);
+void ps2_drain(struct ps2dev *ps2dev, size_t maxbytes, unsigned int timeout);
void ps2_begin_command(struct ps2dev *ps2dev);
void ps2_end_command(struct ps2dev *ps2dev);
-int __ps2_command(struct ps2dev *ps2dev, unsigned char *param, int command);
-int ps2_command(struct ps2dev *ps2dev, unsigned char *param, int command);
-int ps2_handle_ack(struct ps2dev *ps2dev, unsigned char data);
-int ps2_handle_response(struct ps2dev *ps2dev, unsigned char data);
-void ps2_cmd_aborted(struct ps2dev *ps2dev);
-int ps2_is_keyboard_id(char id);
+int __ps2_command(struct ps2dev *ps2dev, u8 *param, unsigned int command);
+int ps2_command(struct ps2dev *ps2dev, u8 *param, unsigned int command);
+int ps2_sliced_command(struct ps2dev *ps2dev, u8 command);
+bool ps2_is_keyboard_id(u8 id);
+
+irqreturn_t ps2_interrupt(struct serio *serio, u8 data, unsigned int flags);
#endif /* _LIBPS2_H */
diff --git a/include/linux/license.h b/include/linux/license.h
index decdbf43cb5c..7cce390f120b 100644
--- a/include/linux/license.h
+++ b/include/linux/license.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
#ifndef __LICENSE_H
#define __LICENSE_H
diff --git a/include/linux/lightnvm.h b/include/linux/lightnvm.h
deleted file mode 100644
index 7dfa56ebbc6d..000000000000
--- a/include/linux/lightnvm.h
+++ /dev/null
@@ -1,510 +0,0 @@
-#ifndef NVM_H
-#define NVM_H
-
-#include <linux/blkdev.h>
-#include <linux/types.h>
-#include <uapi/linux/lightnvm.h>
-
-enum {
- NVM_IO_OK = 0,
- NVM_IO_REQUEUE = 1,
- NVM_IO_DONE = 2,
- NVM_IO_ERR = 3,
-
- NVM_IOTYPE_NONE = 0,
- NVM_IOTYPE_GC = 1,
-};
-
-#define NVM_BLK_BITS (16)
-#define NVM_PG_BITS (16)
-#define NVM_SEC_BITS (8)
-#define NVM_PL_BITS (8)
-#define NVM_LUN_BITS (8)
-#define NVM_CH_BITS (7)
-
-struct ppa_addr {
- /* Generic structure for all addresses */
- union {
- struct {
- u64 blk : NVM_BLK_BITS;
- u64 pg : NVM_PG_BITS;
- u64 sec : NVM_SEC_BITS;
- u64 pl : NVM_PL_BITS;
- u64 lun : NVM_LUN_BITS;
- u64 ch : NVM_CH_BITS;
- u64 reserved : 1;
- } g;
-
- struct {
- u64 line : 63;
- u64 is_cached : 1;
- } c;
-
- u64 ppa;
- };
-};
-
-struct nvm_rq;
-struct nvm_id;
-struct nvm_dev;
-struct nvm_tgt_dev;
-
-typedef int (nvm_l2p_update_fn)(u64, u32, __le64 *, void *);
-typedef int (nvm_id_fn)(struct nvm_dev *, struct nvm_id *);
-typedef int (nvm_get_l2p_tbl_fn)(struct nvm_dev *, u64, u32,
- nvm_l2p_update_fn *, void *);
-typedef int (nvm_op_bb_tbl_fn)(struct nvm_dev *, struct ppa_addr, u8 *);
-typedef int (nvm_op_set_bb_fn)(struct nvm_dev *, struct ppa_addr *, int, int);
-typedef int (nvm_submit_io_fn)(struct nvm_dev *, struct nvm_rq *);
-typedef void *(nvm_create_dma_pool_fn)(struct nvm_dev *, char *);
-typedef void (nvm_destroy_dma_pool_fn)(void *);
-typedef void *(nvm_dev_dma_alloc_fn)(struct nvm_dev *, void *, gfp_t,
- dma_addr_t *);
-typedef void (nvm_dev_dma_free_fn)(void *, void*, dma_addr_t);
-
-struct nvm_dev_ops {
- nvm_id_fn *identity;
- nvm_get_l2p_tbl_fn *get_l2p_tbl;
- nvm_op_bb_tbl_fn *get_bb_tbl;
- nvm_op_set_bb_fn *set_bb_tbl;
-
- nvm_submit_io_fn *submit_io;
-
- nvm_create_dma_pool_fn *create_dma_pool;
- nvm_destroy_dma_pool_fn *destroy_dma_pool;
- nvm_dev_dma_alloc_fn *dev_dma_alloc;
- nvm_dev_dma_free_fn *dev_dma_free;
-
- unsigned int max_phys_sect;
-};
-
-#ifdef CONFIG_NVM
-
-#include <linux/blkdev.h>
-#include <linux/file.h>
-#include <linux/dmapool.h>
-#include <uapi/linux/lightnvm.h>
-
-enum {
- /* HW Responsibilities */
- NVM_RSP_L2P = 1 << 0,
- NVM_RSP_ECC = 1 << 1,
-
- /* Physical Adressing Mode */
- NVM_ADDRMODE_LINEAR = 0,
- NVM_ADDRMODE_CHANNEL = 1,
-
- /* Plane programming mode for LUN */
- NVM_PLANE_SINGLE = 1,
- NVM_PLANE_DOUBLE = 2,
- NVM_PLANE_QUAD = 4,
-
- /* Status codes */
- NVM_RSP_SUCCESS = 0x0,
- NVM_RSP_NOT_CHANGEABLE = 0x1,
- NVM_RSP_ERR_FAILWRITE = 0x40ff,
- NVM_RSP_ERR_EMPTYPAGE = 0x42ff,
- NVM_RSP_ERR_FAILECC = 0x4281,
- NVM_RSP_ERR_FAILCRC = 0x4004,
- NVM_RSP_WARN_HIGHECC = 0x4700,
-
- /* Device opcodes */
- NVM_OP_HBREAD = 0x02,
- NVM_OP_HBWRITE = 0x81,
- NVM_OP_PWRITE = 0x91,
- NVM_OP_PREAD = 0x92,
- NVM_OP_ERASE = 0x90,
-
- /* PPA Command Flags */
- NVM_IO_SNGL_ACCESS = 0x0,
- NVM_IO_DUAL_ACCESS = 0x1,
- NVM_IO_QUAD_ACCESS = 0x2,
-
- /* NAND Access Modes */
- NVM_IO_SUSPEND = 0x80,
- NVM_IO_SLC_MODE = 0x100,
- NVM_IO_SCRAMBLE_ENABLE = 0x200,
-
- /* Block Types */
- NVM_BLK_T_FREE = 0x0,
- NVM_BLK_T_BAD = 0x1,
- NVM_BLK_T_GRWN_BAD = 0x2,
- NVM_BLK_T_DEV = 0x4,
- NVM_BLK_T_HOST = 0x8,
-
- /* Memory capabilities */
- NVM_ID_CAP_SLC = 0x1,
- NVM_ID_CAP_CMD_SUSPEND = 0x2,
- NVM_ID_CAP_SCRAMBLE = 0x4,
- NVM_ID_CAP_ENCRYPT = 0x8,
-
- /* Memory types */
- NVM_ID_FMTYPE_SLC = 0,
- NVM_ID_FMTYPE_MLC = 1,
-
- /* Device capabilities */
- NVM_ID_DCAP_BBLKMGMT = 0x1,
- NVM_UD_DCAP_ECC = 0x2,
-};
-
-struct nvm_id_lp_mlc {
- u16 num_pairs;
- u8 pairs[886];
-};
-
-struct nvm_id_lp_tbl {
- __u8 id[8];
- struct nvm_id_lp_mlc mlc;
-};
-
-struct nvm_id_group {
- u8 mtype;
- u8 fmtype;
- u8 num_ch;
- u8 num_lun;
- u8 num_pln;
- u16 num_blk;
- u16 num_pg;
- u16 fpg_sz;
- u16 csecs;
- u16 sos;
- u32 trdt;
- u32 trdm;
- u32 tprt;
- u32 tprm;
- u32 tbet;
- u32 tbem;
- u32 mpos;
- u32 mccap;
- u16 cpar;
-
- struct nvm_id_lp_tbl lptbl;
-};
-
-struct nvm_addr_format {
- u8 ch_offset;
- u8 ch_len;
- u8 lun_offset;
- u8 lun_len;
- u8 pln_offset;
- u8 pln_len;
- u8 blk_offset;
- u8 blk_len;
- u8 pg_offset;
- u8 pg_len;
- u8 sect_offset;
- u8 sect_len;
-};
-
-struct nvm_id {
- u8 ver_id;
- u8 vmnt;
- u32 cap;
- u32 dom;
- struct nvm_addr_format ppaf;
- struct nvm_id_group grp;
-} __packed;
-
-struct nvm_target {
- struct list_head list;
- struct nvm_tgt_dev *dev;
- struct nvm_tgt_type *type;
- struct gendisk *disk;
-};
-
-#define ADDR_EMPTY (~0ULL)
-
-#define NVM_VERSION_MAJOR 1
-#define NVM_VERSION_MINOR 0
-#define NVM_VERSION_PATCH 0
-
-struct nvm_rq;
-typedef void (nvm_end_io_fn)(struct nvm_rq *);
-
-struct nvm_rq {
- struct nvm_tgt_dev *dev;
-
- struct bio *bio;
-
- union {
- struct ppa_addr ppa_addr;
- dma_addr_t dma_ppa_list;
- };
-
- struct ppa_addr *ppa_list;
-
- void *meta_list;
- dma_addr_t dma_meta_list;
-
- struct completion *wait;
- nvm_end_io_fn *end_io;
-
- uint8_t opcode;
- uint16_t nr_ppas;
- uint16_t flags;
-
- u64 ppa_status; /* ppa media status */
- int error;
-
- void *private;
-};
-
-static inline struct nvm_rq *nvm_rq_from_pdu(void *pdu)
-{
- return pdu - sizeof(struct nvm_rq);
-}
-
-static inline void *nvm_rq_to_pdu(struct nvm_rq *rqdata)
-{
- return rqdata + 1;
-}
-
-enum {
- NVM_BLK_ST_FREE = 0x1, /* Free block */
- NVM_BLK_ST_TGT = 0x2, /* Block in use by target */
- NVM_BLK_ST_BAD = 0x8, /* Bad block */
-};
-
-/* Device generic information */
-struct nvm_geo {
- int nr_chnls;
- int nr_luns;
- int luns_per_chnl; /* -1 if channels are not symmetric */
- int nr_planes;
- int sec_per_pg; /* only sectors for a single page */
- int pgs_per_blk;
- int blks_per_lun;
- int fpg_size;
- int pfpg_size; /* size of buffer if all pages are to be read */
- int sec_size;
- int oob_size;
- int mccap;
- struct nvm_addr_format ppaf;
-
- /* Calculated/Cached values. These do not reflect the actual usable
- * blocks at run-time.
- */
- int max_rq_size;
- int plane_mode; /* drive device in single, double or quad mode */
-
- int sec_per_pl; /* all sectors across planes */
- int sec_per_blk;
- int sec_per_lun;
-};
-
-/* sub-device structure */
-struct nvm_tgt_dev {
- /* Device information */
- struct nvm_geo geo;
-
- /* Base ppas for target LUNs */
- struct ppa_addr *luns;
-
- sector_t total_secs;
-
- struct nvm_id identity;
- struct request_queue *q;
-
- struct nvm_dev *parent;
- void *map;
-};
-
-struct nvm_dev {
- struct nvm_dev_ops *ops;
-
- struct list_head devices;
-
- /* Device information */
- struct nvm_geo geo;
-
- /* lower page table */
- int lps_per_blk;
- int *lptbl;
-
- unsigned long total_secs;
-
- unsigned long *lun_map;
- void *dma_pool;
-
- struct nvm_id identity;
-
- /* Backend device */
- struct request_queue *q;
- char name[DISK_NAME_LEN];
- void *private_data;
-
- void *rmap;
-
- struct mutex mlock;
- spinlock_t lock;
-
- /* target management */
- struct list_head area_list;
- struct list_head targets;
-};
-
-static inline struct ppa_addr linear_to_generic_addr(struct nvm_geo *geo,
- u64 pba)
-{
- struct ppa_addr l;
- int secs, pgs, blks, luns;
- sector_t ppa = pba;
-
- l.ppa = 0;
-
- div_u64_rem(ppa, geo->sec_per_pg, &secs);
- l.g.sec = secs;
-
- sector_div(ppa, geo->sec_per_pg);
- div_u64_rem(ppa, geo->pgs_per_blk, &pgs);
- l.g.pg = pgs;
-
- sector_div(ppa, geo->pgs_per_blk);
- div_u64_rem(ppa, geo->blks_per_lun, &blks);
- l.g.blk = blks;
-
- sector_div(ppa, geo->blks_per_lun);
- div_u64_rem(ppa, geo->luns_per_chnl, &luns);
- l.g.lun = luns;
-
- sector_div(ppa, geo->luns_per_chnl);
- l.g.ch = ppa;
-
- return l;
-}
-
-static inline struct ppa_addr generic_to_dev_addr(struct nvm_tgt_dev *tgt_dev,
- struct ppa_addr r)
-{
- struct nvm_geo *geo = &tgt_dev->geo;
- struct ppa_addr l;
-
- l.ppa = ((u64)r.g.blk) << geo->ppaf.blk_offset;
- l.ppa |= ((u64)r.g.pg) << geo->ppaf.pg_offset;
- l.ppa |= ((u64)r.g.sec) << geo->ppaf.sect_offset;
- l.ppa |= ((u64)r.g.pl) << geo->ppaf.pln_offset;
- l.ppa |= ((u64)r.g.lun) << geo->ppaf.lun_offset;
- l.ppa |= ((u64)r.g.ch) << geo->ppaf.ch_offset;
-
- return l;
-}
-
-static inline struct ppa_addr dev_to_generic_addr(struct nvm_tgt_dev *tgt_dev,
- struct ppa_addr r)
-{
- struct nvm_geo *geo = &tgt_dev->geo;
- struct ppa_addr l;
-
- l.ppa = 0;
- /*
- * (r.ppa << X offset) & X len bitmask. X eq. blk, pg, etc.
- */
- l.g.blk = (r.ppa >> geo->ppaf.blk_offset) &
- (((1 << geo->ppaf.blk_len) - 1));
- l.g.pg |= (r.ppa >> geo->ppaf.pg_offset) &
- (((1 << geo->ppaf.pg_len) - 1));
- l.g.sec |= (r.ppa >> geo->ppaf.sect_offset) &
- (((1 << geo->ppaf.sect_len) - 1));
- l.g.pl |= (r.ppa >> geo->ppaf.pln_offset) &
- (((1 << geo->ppaf.pln_len) - 1));
- l.g.lun |= (r.ppa >> geo->ppaf.lun_offset) &
- (((1 << geo->ppaf.lun_len) - 1));
- l.g.ch |= (r.ppa >> geo->ppaf.ch_offset) &
- (((1 << geo->ppaf.ch_len) - 1));
-
- return l;
-}
-
-static inline int ppa_empty(struct ppa_addr ppa_addr)
-{
- return (ppa_addr.ppa == ADDR_EMPTY);
-}
-
-static inline void ppa_set_empty(struct ppa_addr *ppa_addr)
-{
- ppa_addr->ppa = ADDR_EMPTY;
-}
-
-static inline int ppa_cmp_blk(struct ppa_addr ppa1, struct ppa_addr ppa2)
-{
- if (ppa_empty(ppa1) || ppa_empty(ppa2))
- return 0;
-
- return ((ppa1.g.ch == ppa2.g.ch) && (ppa1.g.lun == ppa2.g.lun) &&
- (ppa1.g.blk == ppa2.g.blk));
-}
-
-typedef blk_qc_t (nvm_tgt_make_rq_fn)(struct request_queue *, struct bio *);
-typedef sector_t (nvm_tgt_capacity_fn)(void *);
-typedef void *(nvm_tgt_init_fn)(struct nvm_tgt_dev *, struct gendisk *,
- int flags);
-typedef void (nvm_tgt_exit_fn)(void *);
-typedef int (nvm_tgt_sysfs_init_fn)(struct gendisk *);
-typedef void (nvm_tgt_sysfs_exit_fn)(struct gendisk *);
-
-struct nvm_tgt_type {
- const char *name;
- unsigned int version[3];
-
- /* target entry points */
- nvm_tgt_make_rq_fn *make_rq;
- nvm_tgt_capacity_fn *capacity;
-
- /* module-specific init/teardown */
- nvm_tgt_init_fn *init;
- nvm_tgt_exit_fn *exit;
-
- /* sysfs */
- nvm_tgt_sysfs_init_fn *sysfs_init;
- nvm_tgt_sysfs_exit_fn *sysfs_exit;
-
- /* For internal use */
- struct list_head list;
-};
-
-extern struct nvm_tgt_type *nvm_find_target_type(const char *, int);
-
-extern int nvm_register_tgt_type(struct nvm_tgt_type *);
-extern void nvm_unregister_tgt_type(struct nvm_tgt_type *);
-
-extern void *nvm_dev_dma_alloc(struct nvm_dev *, gfp_t, dma_addr_t *);
-extern void nvm_dev_dma_free(struct nvm_dev *, void *, dma_addr_t);
-
-extern struct nvm_dev *nvm_alloc_dev(int);
-extern int nvm_register(struct nvm_dev *);
-extern void nvm_unregister(struct nvm_dev *);
-
-extern int nvm_set_tgt_bb_tbl(struct nvm_tgt_dev *, struct ppa_addr *,
- int, int);
-extern int nvm_max_phys_sects(struct nvm_tgt_dev *);
-extern int nvm_submit_io(struct nvm_tgt_dev *, struct nvm_rq *);
-extern int nvm_erase_sync(struct nvm_tgt_dev *, struct ppa_addr *, int);
-extern int nvm_set_rqd_ppalist(struct nvm_tgt_dev *, struct nvm_rq *,
- const struct ppa_addr *, int, int);
-extern void nvm_free_rqd_ppalist(struct nvm_tgt_dev *, struct nvm_rq *);
-extern int nvm_get_l2p_tbl(struct nvm_tgt_dev *, u64, u32, nvm_l2p_update_fn *,
- void *);
-extern int nvm_get_area(struct nvm_tgt_dev *, sector_t *, sector_t);
-extern void nvm_put_area(struct nvm_tgt_dev *, sector_t);
-extern void nvm_end_io(struct nvm_rq *);
-extern int nvm_bb_tbl_fold(struct nvm_dev *, u8 *, int);
-extern int nvm_get_tgt_bb_tbl(struct nvm_tgt_dev *, struct ppa_addr, u8 *);
-
-extern int nvm_dev_factory(struct nvm_dev *, int flags);
-
-extern void nvm_part_to_tgt(struct nvm_dev *, sector_t *, int);
-
-#else /* CONFIG_NVM */
-struct nvm_dev_ops;
-
-static inline struct nvm_dev *nvm_alloc_dev(int node)
-{
- return ERR_PTR(-EINVAL);
-}
-static inline int nvm_register(struct nvm_dev *dev)
-{
- return -EINVAL;
-}
-static inline void nvm_unregister(struct nvm_dev *dev) {}
-#endif /* CONFIG_NVM */
-#endif /* LIGHTNVM.H */
diff --git a/include/linux/limits.h b/include/linux/limits.h
new file mode 100644
index 000000000000..38eb7f6f7e88
--- /dev/null
+++ b/include/linux/limits.h
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_LIMITS_H
+#define _LINUX_LIMITS_H
+
+#include <uapi/linux/limits.h>
+#include <linux/types.h>
+#include <vdso/limits.h>
+
+#define SIZE_MAX (~(size_t)0)
+#define SSIZE_MAX ((ssize_t)(SIZE_MAX >> 1))
+#define PHYS_ADDR_MAX (~(phys_addr_t)0)
+
+#define RESOURCE_SIZE_MAX ((resource_size_t)~0)
+
+#define U8_MAX ((u8)~0U)
+#define S8_MAX ((s8)(U8_MAX >> 1))
+#define S8_MIN ((s8)(-S8_MAX - 1))
+#define U16_MAX ((u16)~0U)
+#define S16_MAX ((s16)(U16_MAX >> 1))
+#define S16_MIN ((s16)(-S16_MAX - 1))
+#define U32_MAX ((u32)~0U)
+#define U32_MIN ((u32)0)
+#define S32_MAX ((s32)(U32_MAX >> 1))
+#define S32_MIN ((s32)(-S32_MAX - 1))
+#define U64_MAX ((u64)~0ULL)
+#define S64_MAX ((s64)(U64_MAX >> 1))
+#define S64_MIN ((s64)(-S64_MAX - 1))
+
+#endif /* _LINUX_LIMITS_H */
diff --git a/include/linux/linear_range.h b/include/linux/linear_range.h
new file mode 100644
index 000000000000..2e4f4c3539c0
--- /dev/null
+++ b/include/linux/linear_range.h
@@ -0,0 +1,61 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2020 ROHM Semiconductors */
+
+#ifndef LINEAR_RANGE_H
+#define LINEAR_RANGE_H
+
+#include <linux/types.h>
+
+/**
+ * struct linear_range - table of selector - value pairs
+ *
+ * Define a lookup-table for range of values. Intended to help when looking
+ * for a register value matching certaing physical measure (like voltage).
+ * Usable when increment of one in register always results a constant increment
+ * of the physical measure (like voltage).
+ *
+ * @min: Lowest value in range
+ * @min_sel: Lowest selector for range
+ * @max_sel: Highest selector for range
+ * @step: Value step size
+ */
+struct linear_range {
+ unsigned int min;
+ unsigned int min_sel;
+ unsigned int max_sel;
+ unsigned int step;
+};
+
+#define LINEAR_RANGE(_min, _min_sel, _max_sel, _step) \
+ { \
+ .min = _min, \
+ .min_sel = _min_sel, \
+ .max_sel = _max_sel, \
+ .step = _step, \
+ }
+
+#define LINEAR_RANGE_IDX(_idx, _min, _min_sel, _max_sel, _step) \
+ [_idx] = LINEAR_RANGE(_min, _min_sel, _max_sel, _step)
+
+unsigned int linear_range_values_in_range(const struct linear_range *r);
+unsigned int linear_range_values_in_range_array(const struct linear_range *r,
+ int ranges);
+unsigned int linear_range_get_max_value(const struct linear_range *r);
+
+int linear_range_get_value(const struct linear_range *r, unsigned int selector,
+ unsigned int *val);
+int linear_range_get_value_array(const struct linear_range *r, int ranges,
+ unsigned int selector, unsigned int *val);
+int linear_range_get_selector_low(const struct linear_range *r,
+ unsigned int val, unsigned int *selector,
+ bool *found);
+int linear_range_get_selector_high(const struct linear_range *r,
+ unsigned int val, unsigned int *selector,
+ bool *found);
+void linear_range_get_selector_within(const struct linear_range *r,
+ unsigned int val, unsigned int *selector);
+int linear_range_get_selector_low_array(const struct linear_range *r,
+ int ranges, unsigned int val,
+ unsigned int *selector, bool *found);
+
+#endif
diff --git a/include/linux/linkage.h b/include/linux/linkage.h
index a6a42dd02466..b11660b706c5 100644
--- a/include/linux/linkage.h
+++ b/include/linux/linkage.h
@@ -1,7 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_LINKAGE_H
#define _LINUX_LINKAGE_H
-#include <linux/compiler.h>
+#include <linux/compiler_types.h>
#include <linux/stringify.h>
#include <linux/export.h>
#include <asm/linkage.h>
@@ -23,20 +24,20 @@
#ifndef cond_syscall
#define cond_syscall(x) asm( \
- ".weak " VMLINUX_SYMBOL_STR(x) "\n\t" \
- ".set " VMLINUX_SYMBOL_STR(x) "," \
- VMLINUX_SYMBOL_STR(sys_ni_syscall))
+ ".weak " __stringify(x) "\n\t" \
+ ".set " __stringify(x) "," \
+ __stringify(sys_ni_syscall))
#endif
#ifndef SYSCALL_ALIAS
#define SYSCALL_ALIAS(alias, name) asm( \
- ".globl " VMLINUX_SYMBOL_STR(alias) "\n\t" \
- ".set " VMLINUX_SYMBOL_STR(alias) "," \
- VMLINUX_SYMBOL_STR(name))
+ ".globl " __stringify(alias) "\n\t" \
+ ".set " __stringify(alias) "," \
+ __stringify(name))
#endif
-#define __page_aligned_data __section(.data..page_aligned) __aligned(PAGE_SIZE)
-#define __page_aligned_bss __section(.bss..page_aligned) __aligned(PAGE_SIZE)
+#define __page_aligned_data __section(".data..page_aligned") __aligned(PAGE_SIZE)
+#define __page_aligned_bss __section(".bss..page_aligned") __aligned(PAGE_SIZE)
/*
* For assembly routines.
@@ -68,45 +69,288 @@
#endif
#ifndef __ALIGN
-#define __ALIGN .align 4,0x90
-#define __ALIGN_STR ".align 4,0x90"
+#define __ALIGN .balign CONFIG_FUNCTION_ALIGNMENT
+#define __ALIGN_STR __stringify(__ALIGN)
#endif
#ifdef __ASSEMBLY__
+/* SYM_T_FUNC -- type used by assembler to mark functions */
+#ifndef SYM_T_FUNC
+#define SYM_T_FUNC STT_FUNC
+#endif
+
+/* SYM_T_OBJECT -- type used by assembler to mark data */
+#ifndef SYM_T_OBJECT
+#define SYM_T_OBJECT STT_OBJECT
+#endif
+
+/* SYM_T_NONE -- type used by assembler to mark entries of unknown type */
+#ifndef SYM_T_NONE
+#define SYM_T_NONE STT_NOTYPE
+#endif
+
+/* SYM_A_* -- align the symbol? */
+#define SYM_A_ALIGN ALIGN
+#define SYM_A_NONE /* nothing */
+
+/* SYM_L_* -- linkage of symbols */
+#define SYM_L_GLOBAL(name) .globl name
+#define SYM_L_WEAK(name) .weak name
+#define SYM_L_LOCAL(name) /* nothing */
+
#ifndef LINKER_SCRIPT
#define ALIGN __ALIGN
#define ALIGN_STR __ALIGN_STR
-#ifndef ENTRY
-#define ENTRY(name) \
+/* === DEPRECATED annotations === */
+
+#ifndef CONFIG_ARCH_USE_SYM_ANNOTATIONS
+#ifndef GLOBAL
+/* deprecated, use SYM_DATA*, SYM_ENTRY, or similar */
+#define GLOBAL(name) \
.globl name ASM_NL \
- ALIGN ASM_NL \
name:
#endif
+
+#ifndef ENTRY
+/* deprecated, use SYM_FUNC_START */
+#define ENTRY(name) \
+ SYM_FUNC_START(name)
+#endif
+#endif /* CONFIG_ARCH_USE_SYM_ANNOTATIONS */
#endif /* LINKER_SCRIPT */
+#ifndef CONFIG_ARCH_USE_SYM_ANNOTATIONS
#ifndef WEAK
+/* deprecated, use SYM_FUNC_START_WEAK* */
#define WEAK(name) \
- .weak name ASM_NL \
- name:
+ SYM_FUNC_START_WEAK(name)
#endif
#ifndef END
+/* deprecated, use SYM_FUNC_END, SYM_DATA_END, or SYM_END */
#define END(name) \
.size name, .-name
#endif
-/* If symbol 'name' is treated as a subroutine (gets called, and returns)
- * then please use ENDPROC to mark 'name' as STT_FUNC for the benefit of
- * static analysis tools such as stack depth analyzer.
- */
#ifndef ENDPROC
+/* deprecated, use SYM_FUNC_END */
#define ENDPROC(name) \
- .type name, @function ASM_NL \
- END(name)
+ SYM_FUNC_END(name)
+#endif
+#endif /* CONFIG_ARCH_USE_SYM_ANNOTATIONS */
+
+/* === generic annotations === */
+
+/* SYM_ENTRY -- use only if you have to for non-paired symbols */
+#ifndef SYM_ENTRY
+#define SYM_ENTRY(name, linkage, align...) \
+ linkage(name) ASM_NL \
+ align ASM_NL \
+ name:
+#endif
+
+/* SYM_START -- use only if you have to */
+#ifndef SYM_START
+#define SYM_START(name, linkage, align...) \
+ SYM_ENTRY(name, linkage, align)
#endif
+/* SYM_END -- use only if you have to */
+#ifndef SYM_END
+#define SYM_END(name, sym_type) \
+ .type name sym_type ASM_NL \
+ .set .L__sym_size_##name, .-name ASM_NL \
+ .size name, .L__sym_size_##name
#endif
+/* SYM_ALIAS -- use only if you have to */
+#ifndef SYM_ALIAS
+#define SYM_ALIAS(alias, name, linkage) \
+ linkage(alias) ASM_NL \
+ .set alias, name ASM_NL
#endif
+
+/* === code annotations === */
+
+/*
+ * FUNC -- C-like functions (proper stack frame etc.)
+ * CODE -- non-C code (e.g. irq handlers with different, special stack etc.)
+ *
+ * Objtool validates stack for FUNC, but not for CODE.
+ * Objtool generates debug info for both FUNC & CODE, but needs special
+ * annotations for each CODE's start (to describe the actual stack frame).
+ *
+ * Objtool requires that all code must be contained in an ELF symbol. Symbol
+ * names that have a .L prefix do not emit symbol table entries. .L
+ * prefixed symbols can be used within a code region, but should be avoided for
+ * denoting a range of code via ``SYM_*_START/END`` annotations.
+ *
+ * ALIAS -- does not generate debug info -- the aliased function will
+ */
+
+/* SYM_INNER_LABEL_ALIGN -- only for labels in the middle of code */
+#ifndef SYM_INNER_LABEL_ALIGN
+#define SYM_INNER_LABEL_ALIGN(name, linkage) \
+ .type name SYM_T_NONE ASM_NL \
+ SYM_ENTRY(name, linkage, SYM_A_ALIGN)
+#endif
+
+/* SYM_INNER_LABEL -- only for labels in the middle of code */
+#ifndef SYM_INNER_LABEL
+#define SYM_INNER_LABEL(name, linkage) \
+ .type name SYM_T_NONE ASM_NL \
+ SYM_ENTRY(name, linkage, SYM_A_NONE)
+#endif
+
+/* SYM_FUNC_START -- use for global functions */
+#ifndef SYM_FUNC_START
+#define SYM_FUNC_START(name) \
+ SYM_START(name, SYM_L_GLOBAL, SYM_A_ALIGN)
+#endif
+
+/* SYM_FUNC_START_NOALIGN -- use for global functions, w/o alignment */
+#ifndef SYM_FUNC_START_NOALIGN
+#define SYM_FUNC_START_NOALIGN(name) \
+ SYM_START(name, SYM_L_GLOBAL, SYM_A_NONE)
+#endif
+
+/* SYM_FUNC_START_LOCAL -- use for local functions */
+#ifndef SYM_FUNC_START_LOCAL
+#define SYM_FUNC_START_LOCAL(name) \
+ SYM_START(name, SYM_L_LOCAL, SYM_A_ALIGN)
+#endif
+
+/* SYM_FUNC_START_LOCAL_NOALIGN -- use for local functions, w/o alignment */
+#ifndef SYM_FUNC_START_LOCAL_NOALIGN
+#define SYM_FUNC_START_LOCAL_NOALIGN(name) \
+ SYM_START(name, SYM_L_LOCAL, SYM_A_NONE)
+#endif
+
+/* SYM_FUNC_START_WEAK -- use for weak functions */
+#ifndef SYM_FUNC_START_WEAK
+#define SYM_FUNC_START_WEAK(name) \
+ SYM_START(name, SYM_L_WEAK, SYM_A_ALIGN)
+#endif
+
+/* SYM_FUNC_START_WEAK_NOALIGN -- use for weak functions, w/o alignment */
+#ifndef SYM_FUNC_START_WEAK_NOALIGN
+#define SYM_FUNC_START_WEAK_NOALIGN(name) \
+ SYM_START(name, SYM_L_WEAK, SYM_A_NONE)
+#endif
+
+/*
+ * SYM_FUNC_END -- the end of SYM_FUNC_START_LOCAL, SYM_FUNC_START,
+ * SYM_FUNC_START_WEAK, ...
+ */
+#ifndef SYM_FUNC_END
+#define SYM_FUNC_END(name) \
+ SYM_END(name, SYM_T_FUNC)
+#endif
+
+/*
+ * SYM_FUNC_ALIAS -- define a global alias for an existing function
+ */
+#ifndef SYM_FUNC_ALIAS
+#define SYM_FUNC_ALIAS(alias, name) \
+ SYM_ALIAS(alias, name, SYM_L_GLOBAL)
+#endif
+
+/*
+ * SYM_FUNC_ALIAS_LOCAL -- define a local alias for an existing function
+ */
+#ifndef SYM_FUNC_ALIAS_LOCAL
+#define SYM_FUNC_ALIAS_LOCAL(alias, name) \
+ SYM_ALIAS(alias, name, SYM_L_LOCAL)
+#endif
+
+/*
+ * SYM_FUNC_ALIAS_WEAK -- define a weak global alias for an existing function
+ */
+#ifndef SYM_FUNC_ALIAS_WEAK
+#define SYM_FUNC_ALIAS_WEAK(alias, name) \
+ SYM_ALIAS(alias, name, SYM_L_WEAK)
+#endif
+
+/* SYM_CODE_START -- use for non-C (special) functions */
+#ifndef SYM_CODE_START
+#define SYM_CODE_START(name) \
+ SYM_START(name, SYM_L_GLOBAL, SYM_A_ALIGN)
+#endif
+
+/* SYM_CODE_START_NOALIGN -- use for non-C (special) functions, w/o alignment */
+#ifndef SYM_CODE_START_NOALIGN
+#define SYM_CODE_START_NOALIGN(name) \
+ SYM_START(name, SYM_L_GLOBAL, SYM_A_NONE)
+#endif
+
+/* SYM_CODE_START_LOCAL -- use for local non-C (special) functions */
+#ifndef SYM_CODE_START_LOCAL
+#define SYM_CODE_START_LOCAL(name) \
+ SYM_START(name, SYM_L_LOCAL, SYM_A_ALIGN)
+#endif
+
+/*
+ * SYM_CODE_START_LOCAL_NOALIGN -- use for local non-C (special) functions,
+ * w/o alignment
+ */
+#ifndef SYM_CODE_START_LOCAL_NOALIGN
+#define SYM_CODE_START_LOCAL_NOALIGN(name) \
+ SYM_START(name, SYM_L_LOCAL, SYM_A_NONE)
+#endif
+
+/* SYM_CODE_END -- the end of SYM_CODE_START_LOCAL, SYM_CODE_START, ... */
+#ifndef SYM_CODE_END
+#define SYM_CODE_END(name) \
+ SYM_END(name, SYM_T_NONE)
+#endif
+
+/* === data annotations === */
+
+/* SYM_DATA_START -- global data symbol */
+#ifndef SYM_DATA_START
+#define SYM_DATA_START(name) \
+ SYM_START(name, SYM_L_GLOBAL, SYM_A_NONE)
+#endif
+
+/* SYM_DATA_START -- local data symbol */
+#ifndef SYM_DATA_START_LOCAL
+#define SYM_DATA_START_LOCAL(name) \
+ SYM_START(name, SYM_L_LOCAL, SYM_A_NONE)
+#endif
+
+/* SYM_DATA_END -- the end of SYM_DATA_START symbol */
+#ifndef SYM_DATA_END
+#define SYM_DATA_END(name) \
+ SYM_END(name, SYM_T_OBJECT)
+#endif
+
+/* SYM_DATA_END_LABEL -- the labeled end of SYM_DATA_START symbol */
+#ifndef SYM_DATA_END_LABEL
+#define SYM_DATA_END_LABEL(name, linkage, label) \
+ linkage(label) ASM_NL \
+ .type label SYM_T_OBJECT ASM_NL \
+ label: \
+ SYM_END(name, SYM_T_OBJECT)
+#endif
+
+/* SYM_DATA -- start+end wrapper around simple global data */
+#ifndef SYM_DATA
+#define SYM_DATA(name, data...) \
+ SYM_DATA_START(name) ASM_NL \
+ data ASM_NL \
+ SYM_DATA_END(name)
+#endif
+
+/* SYM_DATA_LOCAL -- start+end wrapper around simple local data */
+#ifndef SYM_DATA_LOCAL
+#define SYM_DATA_LOCAL(name, data...) \
+ SYM_DATA_START_LOCAL(name) ASM_NL \
+ data ASM_NL \
+ SYM_DATA_END(name)
+#endif
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* _LINUX_LINKAGE_H */
diff --git a/include/linux/linkmode.h b/include/linux/linkmode.h
new file mode 100644
index 000000000000..3b9de09871f6
--- /dev/null
+++ b/include/linux/linkmode.h
@@ -0,0 +1,85 @@
+#ifndef __LINKMODE_H
+#define __LINKMODE_H
+
+#include <linux/bitmap.h>
+#include <linux/ethtool.h>
+#include <uapi/linux/ethtool.h>
+
+static inline void linkmode_zero(unsigned long *dst)
+{
+ bitmap_zero(dst, __ETHTOOL_LINK_MODE_MASK_NBITS);
+}
+
+static inline void linkmode_fill(unsigned long *dst)
+{
+ bitmap_fill(dst, __ETHTOOL_LINK_MODE_MASK_NBITS);
+}
+
+static inline void linkmode_copy(unsigned long *dst, const unsigned long *src)
+{
+ bitmap_copy(dst, src, __ETHTOOL_LINK_MODE_MASK_NBITS);
+}
+
+static inline void linkmode_and(unsigned long *dst, const unsigned long *a,
+ const unsigned long *b)
+{
+ bitmap_and(dst, a, b, __ETHTOOL_LINK_MODE_MASK_NBITS);
+}
+
+static inline void linkmode_or(unsigned long *dst, const unsigned long *a,
+ const unsigned long *b)
+{
+ bitmap_or(dst, a, b, __ETHTOOL_LINK_MODE_MASK_NBITS);
+}
+
+static inline bool linkmode_empty(const unsigned long *src)
+{
+ return bitmap_empty(src, __ETHTOOL_LINK_MODE_MASK_NBITS);
+}
+
+static inline bool linkmode_andnot(unsigned long *dst,
+ const unsigned long *src1,
+ const unsigned long *src2)
+{
+ return bitmap_andnot(dst, src1, src2, __ETHTOOL_LINK_MODE_MASK_NBITS);
+}
+
+#define linkmode_test_bit test_bit
+#define linkmode_set_bit __set_bit
+#define linkmode_clear_bit __clear_bit
+#define linkmode_mod_bit __assign_bit
+
+static inline void linkmode_set_bit_array(const int *array, int array_size,
+ unsigned long *addr)
+{
+ int i;
+
+ for (i = 0; i < array_size; i++)
+ linkmode_set_bit(array[i], addr);
+}
+
+static inline int linkmode_equal(const unsigned long *src1,
+ const unsigned long *src2)
+{
+ return bitmap_equal(src1, src2, __ETHTOOL_LINK_MODE_MASK_NBITS);
+}
+
+static inline int linkmode_intersects(const unsigned long *src1,
+ const unsigned long *src2)
+{
+ return bitmap_intersects(src1, src2, __ETHTOOL_LINK_MODE_MASK_NBITS);
+}
+
+static inline int linkmode_subset(const unsigned long *src1,
+ const unsigned long *src2)
+{
+ return bitmap_subset(src1, src2, __ETHTOOL_LINK_MODE_MASK_NBITS);
+}
+
+void linkmode_resolve_pause(const unsigned long *local_adv,
+ const unsigned long *partner_adv,
+ bool *tx_pause, bool *rx_pause);
+
+void linkmode_set_pause(unsigned long *advertisement, bool tx, bool rx);
+
+#endif /* __LINKMODE_H */
diff --git a/include/linux/linux_logo.h b/include/linux/linux_logo.h
index ca5bd91d12e1..e37699b7e839 100644
--- a/include/linux/linux_logo.h
+++ b/include/linux/linux_logo.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_LINUX_LOGO_H
#define _LINUX_LINUX_LOGO_H
@@ -9,9 +10,6 @@
* Copyright (C) 2001 Greg Banks <gnb@alphalink.com.au>
* Copyright (C) 2001 Jan-Benedict Glaw <jbglaw@lug-owl.de>
* Copyright (C) 2003 Geert Uytterhoeven <geert@linux-m68k.org>
- *
- * Serial_console ascii image can be any size,
- * but should contain %s to display the version
*/
#include <linux/init.h>
@@ -35,8 +33,6 @@ struct linux_logo {
extern const struct linux_logo logo_linux_mono;
extern const struct linux_logo logo_linux_vga16;
extern const struct linux_logo logo_linux_clut224;
-extern const struct linux_logo logo_blackfin_vga16;
-extern const struct linux_logo logo_blackfin_clut224;
extern const struct linux_logo logo_dec_clut224;
extern const struct linux_logo logo_mac_clut224;
extern const struct linux_logo logo_parisc_clut224;
@@ -45,7 +41,6 @@ extern const struct linux_logo logo_sun_clut224;
extern const struct linux_logo logo_superh_mono;
extern const struct linux_logo logo_superh_vga16;
extern const struct linux_logo logo_superh_clut224;
-extern const struct linux_logo logo_m32r_clut224;
extern const struct linux_logo logo_spe_clut224;
extern const struct linux_logo *fb_find_logo(int depth);
diff --git a/include/linux/lis3lv02d.h b/include/linux/lis3lv02d.h
index f1664c636af0..b72b8cdba765 100644
--- a/include/linux/lis3lv02d.h
+++ b/include/linux/lis3lv02d.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __LIS3LV02D_H_
#define __LIS3LV02D_H_
diff --git a/include/linux/list.h b/include/linux/list.h
index ae537fa46216..00ea8e5fb88b 100644
--- a/include/linux/list.h
+++ b/include/linux/list.h
@@ -1,14 +1,17 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_LIST_H
#define _LINUX_LIST_H
+#include <linux/container_of.h>
#include <linux/types.h>
#include <linux/stddef.h>
#include <linux/poison.h>
#include <linux/const.h>
-#include <linux/kernel.h>
+
+#include <asm/barrier.h>
/*
- * Simple doubly linked list implementation.
+ * Circular doubly linked list implementation.
*
* Some of the internal functions ("__xxx") are useful when
* manipulating whole lists rather than single entries, as
@@ -17,22 +20,118 @@
* using the generic single-entry routines.
*/
+/**
+ * LIST_HEAD_INIT - initialize a &struct list_head's links to point to itself
+ * @name: name of the list_head
+ */
#define LIST_HEAD_INIT(name) { &(name), &(name) }
+/**
+ * LIST_HEAD - definition of a &struct list_head with initialization values
+ * @name: name of the list_head
+ */
#define LIST_HEAD(name) \
struct list_head name = LIST_HEAD_INIT(name)
+/**
+ * INIT_LIST_HEAD - Initialize a list_head structure
+ * @list: list_head structure to be initialized.
+ *
+ * Initializes the list_head to point to itself. If it is a list header,
+ * the result is an empty list.
+ */
static inline void INIT_LIST_HEAD(struct list_head *list)
{
WRITE_ONCE(list->next, list);
- list->prev = list;
+ WRITE_ONCE(list->prev, list);
}
+#ifdef CONFIG_LIST_HARDENED
+
#ifdef CONFIG_DEBUG_LIST
-extern bool __list_add_valid(struct list_head *new,
- struct list_head *prev,
- struct list_head *next);
-extern bool __list_del_entry_valid(struct list_head *entry);
+# define __list_valid_slowpath
+#else
+# define __list_valid_slowpath __cold __preserve_most
+#endif
+
+/*
+ * Performs the full set of list corruption checks before __list_add().
+ * On list corruption reports a warning, and returns false.
+ */
+bool __list_valid_slowpath __list_add_valid_or_report(struct list_head *new,
+ struct list_head *prev,
+ struct list_head *next);
+
+/*
+ * Performs list corruption checks before __list_add(). Returns false if a
+ * corruption is detected, true otherwise.
+ *
+ * With CONFIG_LIST_HARDENED only, performs minimal list integrity checking
+ * inline to catch non-faulting corruptions, and only if a corruption is
+ * detected calls the reporting function __list_add_valid_or_report().
+ */
+static __always_inline bool __list_add_valid(struct list_head *new,
+ struct list_head *prev,
+ struct list_head *next)
+{
+ bool ret = true;
+
+ if (!IS_ENABLED(CONFIG_DEBUG_LIST)) {
+ /*
+ * With the hardening version, elide checking if next and prev
+ * are NULL, since the immediate dereference of them below would
+ * result in a fault if NULL.
+ *
+ * With the reduced set of checks, we can afford to inline the
+ * checks, which also gives the compiler a chance to elide some
+ * of them completely if they can be proven at compile-time. If
+ * one of the pre-conditions does not hold, the slow-path will
+ * show a report which pre-condition failed.
+ */
+ if (likely(next->prev == prev && prev->next == next && new != prev && new != next))
+ return true;
+ ret = false;
+ }
+
+ ret &= __list_add_valid_or_report(new, prev, next);
+ return ret;
+}
+
+/*
+ * Performs the full set of list corruption checks before __list_del_entry().
+ * On list corruption reports a warning, and returns false.
+ */
+bool __list_valid_slowpath __list_del_entry_valid_or_report(struct list_head *entry);
+
+/*
+ * Performs list corruption checks before __list_del_entry(). Returns false if a
+ * corruption is detected, true otherwise.
+ *
+ * With CONFIG_LIST_HARDENED only, performs minimal list integrity checking
+ * inline to catch non-faulting corruptions, and only if a corruption is
+ * detected calls the reporting function __list_del_entry_valid_or_report().
+ */
+static __always_inline bool __list_del_entry_valid(struct list_head *entry)
+{
+ bool ret = true;
+
+ if (!IS_ENABLED(CONFIG_DEBUG_LIST)) {
+ struct list_head *prev = entry->prev;
+ struct list_head *next = entry->next;
+
+ /*
+ * With the hardening version, elide checking if next and prev
+ * are NULL, LIST_POISON1 or LIST_POISON2, since the immediate
+ * dereference of them below would result in a fault.
+ */
+ if (likely(prev->next == entry && next->prev == entry))
+ return true;
+ ret = false;
+ }
+
+ ret &= __list_del_entry_valid_or_report(entry);
+ return ret;
+}
#else
static inline bool __list_add_valid(struct list_head *new,
struct list_head *prev,
@@ -105,12 +204,20 @@ static inline void __list_del(struct list_head * prev, struct list_head * next)
WRITE_ONCE(prev->next, next);
}
-/**
- * list_del - deletes entry from list.
- * @entry: the element to delete from the list.
- * Note: list_empty() on entry does not return true after this, the entry is
- * in an undefined state.
+/*
+ * Delete a list entry and clear the 'prev' pointer.
+ *
+ * This is a special-purpose list clearing method used in the networking code
+ * for lists allocated as per-cpu, where we don't want to incur the extra
+ * WRITE_ONCE() overhead of a regular list_del_init(). The code that uses this
+ * needs to check the node 'prev' pointer instead of calling list_empty().
*/
+static inline void __list_del_clearprev(struct list_head *entry)
+{
+ __list_del(entry->prev, entry->next);
+ entry->prev = NULL;
+}
+
static inline void __list_del_entry(struct list_head *entry)
{
if (!__list_del_entry_valid(entry))
@@ -119,6 +226,12 @@ static inline void __list_del_entry(struct list_head *entry)
__list_del(entry->prev, entry->next);
}
+/**
+ * list_del - deletes entry from list.
+ * @entry: the element to delete from the list.
+ * Note: list_empty() on entry does not return true after this, the entry is
+ * in an undefined state.
+ */
static inline void list_del(struct list_head *entry)
{
__list_del_entry(entry);
@@ -142,14 +255,38 @@ static inline void list_replace(struct list_head *old,
new->prev->next = new;
}
+/**
+ * list_replace_init - replace old entry by new one and initialize the old one
+ * @old : the element to be replaced
+ * @new : the new element to insert
+ *
+ * If @old was empty, it will be overwritten.
+ */
static inline void list_replace_init(struct list_head *old,
- struct list_head *new)
+ struct list_head *new)
{
list_replace(old, new);
INIT_LIST_HEAD(old);
}
/**
+ * list_swap - replace entry1 with entry2 and re-add entry1 at entry2's position
+ * @entry1: the location to place entry2
+ * @entry2: the location to place entry1
+ */
+static inline void list_swap(struct list_head *entry1,
+ struct list_head *entry2)
+{
+ struct list_head *pos = entry2->prev;
+
+ list_del(entry2);
+ list_replace(entry1, entry2);
+ if (pos == entry1)
+ pos = entry2;
+ list_add(entry1, pos);
+}
+
+/**
* list_del_init - deletes entry from list and reinitialize it.
* @entry: the element to delete from the list.
*/
@@ -183,17 +320,59 @@ static inline void list_move_tail(struct list_head *list,
}
/**
+ * list_bulk_move_tail - move a subsection of a list to its tail
+ * @head: the head that will follow our entry
+ * @first: first entry to move
+ * @last: last entry to move, can be the same as first
+ *
+ * Move all entries between @first and including @last before @head.
+ * All three entries must belong to the same linked list.
+ */
+static inline void list_bulk_move_tail(struct list_head *head,
+ struct list_head *first,
+ struct list_head *last)
+{
+ first->prev->next = last->next;
+ last->next->prev = first->prev;
+
+ head->prev->next = first;
+ first->prev = head->prev;
+
+ last->next = head;
+ head->prev = last;
+}
+
+/**
+ * list_is_first -- tests whether @list is the first entry in list @head
+ * @list: the entry to test
+ * @head: the head of the list
+ */
+static inline int list_is_first(const struct list_head *list, const struct list_head *head)
+{
+ return list->prev == head;
+}
+
+/**
* list_is_last - tests whether @list is the last entry in list @head
* @list: the entry to test
* @head: the head of the list
*/
-static inline int list_is_last(const struct list_head *list,
- const struct list_head *head)
+static inline int list_is_last(const struct list_head *list, const struct list_head *head)
{
return list->next == head;
}
/**
+ * list_is_head - tests whether @list is the list @head
+ * @list: the entry to test
+ * @head: the head of the list
+ */
+static inline int list_is_head(const struct list_head *list, const struct list_head *head)
+{
+ return list == head;
+}
+
+/**
* list_empty - tests whether a list is empty
* @head: the list to test.
*/
@@ -203,6 +382,24 @@ static inline int list_empty(const struct list_head *head)
}
/**
+ * list_del_init_careful - deletes entry from list and reinitialize it.
+ * @entry: the element to delete from the list.
+ *
+ * This is the same as list_del_init(), except designed to be used
+ * together with list_empty_careful() in a way to guarantee ordering
+ * of other memory operations.
+ *
+ * Any memory operations done before a list_del_init_careful() are
+ * guaranteed to be visible after a list_empty_careful() test.
+ */
+static inline void list_del_init_careful(struct list_head *entry)
+{
+ __list_del_entry(entry);
+ WRITE_ONCE(entry->prev, entry);
+ smp_store_release(&entry->next, entry);
+}
+
+/**
* list_empty_careful - tests whether a list is empty and not being modified
* @head: the list to test
*
@@ -217,8 +414,8 @@ static inline int list_empty(const struct list_head *head)
*/
static inline int list_empty_careful(const struct list_head *head)
{
- struct list_head *next = head->next;
- return (next == head) && (next == head->prev);
+ struct list_head *next = smp_load_acquire(&head->next);
+ return list_is_head(next, head) && (next == READ_ONCE(head->prev));
}
/**
@@ -236,6 +433,24 @@ static inline void list_rotate_left(struct list_head *head)
}
/**
+ * list_rotate_to_front() - Rotate list to specific item.
+ * @list: The desired new front of the list.
+ * @head: The head of the list.
+ *
+ * Rotates list so that @list becomes the new front of the list.
+ */
+static inline void list_rotate_to_front(struct list_head *list,
+ struct list_head *head)
+{
+ /*
+ * Deletes the list head from the list denoted by @head and
+ * places it as the tail of @list, this effectively rotates the
+ * list so that @list is at the front.
+ */
+ list_move_tail(head, list);
+}
+
+/**
* list_is_singular - tests whether a list has just one entry.
* @head: the list to test.
*/
@@ -275,15 +490,44 @@ static inline void list_cut_position(struct list_head *list,
{
if (list_empty(head))
return;
- if (list_is_singular(head) &&
- (head->next != entry && head != entry))
+ if (list_is_singular(head) && !list_is_head(entry, head) && (entry != head->next))
return;
- if (entry == head)
+ if (list_is_head(entry, head))
INIT_LIST_HEAD(list);
else
__list_cut_position(list, head, entry);
}
+/**
+ * list_cut_before - cut a list into two, before given entry
+ * @list: a new list to add all removed entries
+ * @head: a list with entries
+ * @entry: an entry within head, could be the head itself
+ *
+ * This helper moves the initial part of @head, up to but
+ * excluding @entry, from @head to @list. You should pass
+ * in @entry an element you know is on @head. @list should
+ * be an empty list or a list you do not care about losing
+ * its data.
+ * If @entry == @head, all entries on @head are moved to
+ * @list.
+ */
+static inline void list_cut_before(struct list_head *list,
+ struct list_head *head,
+ struct list_head *entry)
+{
+ if (head->next == entry) {
+ INIT_LIST_HEAD(list);
+ return;
+ }
+ list->next = head->next;
+ list->next->prev = list;
+ list->prev = entry->prev;
+ list->prev->next = list;
+ head->next = entry;
+ entry->prev = head;
+}
+
static inline void __list_splice(const struct list_head *list,
struct list_head *prev,
struct list_head *next)
@@ -401,6 +645,20 @@ static inline void list_splice_tail_init(struct list_head *list,
})
/**
+ * list_last_entry_or_null - get the last element from a list
+ * @ptr: the list head to take the element from.
+ * @type: the type of the struct this is embedded in.
+ * @member: the name of the list_head within the struct.
+ *
+ * Note that if the list is empty, it returns NULL.
+ */
+#define list_last_entry_or_null(ptr, type, member) ({ \
+ struct list_head *head__ = (ptr); \
+ struct list_head *pos__ = READ_ONCE(head__->prev); \
+ pos__ != head__ ? list_entry(pos__, type, member) : NULL; \
+})
+
+/**
* list_next_entry - get the next element in list
* @pos: the type * to cursor
* @member: the name of the list_head within the struct.
@@ -409,6 +667,19 @@ static inline void list_splice_tail_init(struct list_head *list,
list_entry((pos)->member.next, typeof(*(pos)), member)
/**
+ * list_next_entry_circular - get the next element in list
+ * @pos: the type * to cursor.
+ * @head: the list head to take the element from.
+ * @member: the name of the list_head within the struct.
+ *
+ * Wraparound if pos is the last element (return the first element).
+ * Note, that list is expected to be not empty.
+ */
+#define list_next_entry_circular(pos, head, member) \
+ (list_is_last(&(pos)->member, head) ? \
+ list_first_entry(head, typeof(*(pos)), member) : list_next_entry(pos, member))
+
+/**
* list_prev_entry - get the prev element in list
* @pos: the type * to cursor
* @member: the name of the list_head within the struct.
@@ -417,12 +688,35 @@ static inline void list_splice_tail_init(struct list_head *list,
list_entry((pos)->member.prev, typeof(*(pos)), member)
/**
+ * list_prev_entry_circular - get the prev element in list
+ * @pos: the type * to cursor.
+ * @head: the list head to take the element from.
+ * @member: the name of the list_head within the struct.
+ *
+ * Wraparound if pos is the first element (return the last element).
+ * Note, that list is expected to be not empty.
+ */
+#define list_prev_entry_circular(pos, head, member) \
+ (list_is_first(&(pos)->member, head) ? \
+ list_last_entry(head, typeof(*(pos)), member) : list_prev_entry(pos, member))
+
+/**
* list_for_each - iterate over a list
* @pos: the &struct list_head to use as a loop cursor.
* @head: the head for your list.
*/
#define list_for_each(pos, head) \
- for (pos = (head)->next; pos != (head); pos = pos->next)
+ for (pos = (head)->next; !list_is_head(pos, (head)); pos = pos->next)
+
+/**
+ * list_for_each_continue - continue iteration over a list
+ * @pos: the &struct list_head to use as a loop cursor.
+ * @head: the head for your list.
+ *
+ * Continue to iterate over a list, continuing after the current position.
+ */
+#define list_for_each_continue(pos, head) \
+ for (pos = pos->next; !list_is_head(pos, (head)); pos = pos->next)
/**
* list_for_each_prev - iterate over a list backwards
@@ -430,7 +724,7 @@ static inline void list_splice_tail_init(struct list_head *list,
* @head: the head for your list.
*/
#define list_for_each_prev(pos, head) \
- for (pos = (head)->prev; pos != (head); pos = pos->prev)
+ for (pos = (head)->prev; !list_is_head(pos, (head)); pos = pos->prev)
/**
* list_for_each_safe - iterate over a list safe against removal of list entry
@@ -439,8 +733,9 @@ static inline void list_splice_tail_init(struct list_head *list,
* @head: the head for your list.
*/
#define list_for_each_safe(pos, n, head) \
- for (pos = (head)->next, n = pos->next; pos != (head); \
- pos = n, n = pos->next)
+ for (pos = (head)->next, n = pos->next; \
+ !list_is_head(pos, (head)); \
+ pos = n, n = pos->next)
/**
* list_for_each_prev_safe - iterate over a list backwards safe against removal of list entry
@@ -450,10 +745,34 @@ static inline void list_splice_tail_init(struct list_head *list,
*/
#define list_for_each_prev_safe(pos, n, head) \
for (pos = (head)->prev, n = pos->prev; \
- pos != (head); \
+ !list_is_head(pos, (head)); \
pos = n, n = pos->prev)
/**
+ * list_count_nodes - count nodes in the list
+ * @head: the head for your list.
+ */
+static inline size_t list_count_nodes(struct list_head *head)
+{
+ struct list_head *pos;
+ size_t count = 0;
+
+ list_for_each(pos, head)
+ count++;
+
+ return count;
+}
+
+/**
+ * list_entry_is_head - test if the entry points to the head of the list
+ * @pos: the type * to cursor
+ * @head: the head for your list.
+ * @member: the name of the list_head within the struct.
+ */
+#define list_entry_is_head(pos, head, member) \
+ list_is_head(&pos->member, (head))
+
+/**
* list_for_each_entry - iterate over list of given type
* @pos: the type * to use as a loop cursor.
* @head: the head for your list.
@@ -461,7 +780,7 @@ static inline void list_splice_tail_init(struct list_head *list,
*/
#define list_for_each_entry(pos, head, member) \
for (pos = list_first_entry(head, typeof(*pos), member); \
- &pos->member != (head); \
+ !list_entry_is_head(pos, head, member); \
pos = list_next_entry(pos, member))
/**
@@ -472,7 +791,7 @@ static inline void list_splice_tail_init(struct list_head *list,
*/
#define list_for_each_entry_reverse(pos, head, member) \
for (pos = list_last_entry(head, typeof(*pos), member); \
- &pos->member != (head); \
+ !list_entry_is_head(pos, head, member); \
pos = list_prev_entry(pos, member))
/**
@@ -497,7 +816,7 @@ static inline void list_splice_tail_init(struct list_head *list,
*/
#define list_for_each_entry_continue(pos, head, member) \
for (pos = list_next_entry(pos, member); \
- &pos->member != (head); \
+ !list_entry_is_head(pos, head, member); \
pos = list_next_entry(pos, member))
/**
@@ -511,7 +830,7 @@ static inline void list_splice_tail_init(struct list_head *list,
*/
#define list_for_each_entry_continue_reverse(pos, head, member) \
for (pos = list_prev_entry(pos, member); \
- &pos->member != (head); \
+ !list_entry_is_head(pos, head, member); \
pos = list_prev_entry(pos, member))
/**
@@ -523,7 +842,7 @@ static inline void list_splice_tail_init(struct list_head *list,
* Iterate over list of given type, continuing from current position.
*/
#define list_for_each_entry_from(pos, head, member) \
- for (; &pos->member != (head); \
+ for (; !list_entry_is_head(pos, head, member); \
pos = list_next_entry(pos, member))
/**
@@ -536,7 +855,7 @@ static inline void list_splice_tail_init(struct list_head *list,
* Iterate backwards over list of given type, continuing from current position.
*/
#define list_for_each_entry_from_reverse(pos, head, member) \
- for (; &pos->member != (head); \
+ for (; !list_entry_is_head(pos, head, member); \
pos = list_prev_entry(pos, member))
/**
@@ -549,7 +868,7 @@ static inline void list_splice_tail_init(struct list_head *list,
#define list_for_each_entry_safe(pos, n, head, member) \
for (pos = list_first_entry(head, typeof(*pos), member), \
n = list_next_entry(pos, member); \
- &pos->member != (head); \
+ !list_entry_is_head(pos, head, member); \
pos = n, n = list_next_entry(n, member))
/**
@@ -565,7 +884,7 @@ static inline void list_splice_tail_init(struct list_head *list,
#define list_for_each_entry_safe_continue(pos, n, head, member) \
for (pos = list_next_entry(pos, member), \
n = list_next_entry(pos, member); \
- &pos->member != (head); \
+ !list_entry_is_head(pos, head, member); \
pos = n, n = list_next_entry(n, member))
/**
@@ -580,7 +899,7 @@ static inline void list_splice_tail_init(struct list_head *list,
*/
#define list_for_each_entry_safe_from(pos, n, head, member) \
for (n = list_next_entry(pos, member); \
- &pos->member != (head); \
+ !list_entry_is_head(pos, head, member); \
pos = n, n = list_next_entry(n, member))
/**
@@ -596,7 +915,7 @@ static inline void list_splice_tail_init(struct list_head *list,
#define list_for_each_entry_safe_reverse(pos, n, head, member) \
for (pos = list_last_entry(head, typeof(*pos), member), \
n = list_prev_entry(pos, member); \
- &pos->member != (head); \
+ !list_entry_is_head(pos, head, member); \
pos = n, n = list_prev_entry(n, member))
/**
@@ -630,11 +949,36 @@ static inline void INIT_HLIST_NODE(struct hlist_node *h)
h->pprev = NULL;
}
+/**
+ * hlist_unhashed - Has node been removed from list and reinitialized?
+ * @h: Node to be checked
+ *
+ * Not that not all removal functions will leave a node in unhashed
+ * state. For example, hlist_nulls_del_init_rcu() does leave the
+ * node in unhashed state, but hlist_nulls_del() does not.
+ */
static inline int hlist_unhashed(const struct hlist_node *h)
{
return !h->pprev;
}
+/**
+ * hlist_unhashed_lockless - Version of hlist_unhashed for lockless use
+ * @h: Node to be checked
+ *
+ * This variant of hlist_unhashed() must be used in lockless contexts
+ * to avoid potential load-tearing. The READ_ONCE() is paired with the
+ * various WRITE_ONCE() in hlist helpers that are defined below.
+ */
+static inline int hlist_unhashed_lockless(const struct hlist_node *h)
+{
+ return !READ_ONCE(h->pprev);
+}
+
+/**
+ * hlist_empty - Is the specified hlist_head structure an empty hlist?
+ * @h: Structure to check.
+ */
static inline int hlist_empty(const struct hlist_head *h)
{
return !READ_ONCE(h->first);
@@ -647,9 +991,16 @@ static inline void __hlist_del(struct hlist_node *n)
WRITE_ONCE(*pprev, next);
if (next)
- next->pprev = pprev;
+ WRITE_ONCE(next->pprev, pprev);
}
+/**
+ * hlist_del - Delete the specified hlist_node from its list
+ * @n: Node to delete.
+ *
+ * Note that this function leaves the node in hashed state. Use
+ * hlist_del_init() or similar instead to unhash @n.
+ */
static inline void hlist_del(struct hlist_node *n)
{
__hlist_del(n);
@@ -657,6 +1008,12 @@ static inline void hlist_del(struct hlist_node *n)
n->pprev = LIST_POISON2;
}
+/**
+ * hlist_del_init - Delete the specified hlist_node from its list and initialize
+ * @n: Node to delete.
+ *
+ * Note that this function leaves the node in unhashed state.
+ */
static inline void hlist_del_init(struct hlist_node *n)
{
if (!hlist_unhashed(n)) {
@@ -665,51 +1022,83 @@ static inline void hlist_del_init(struct hlist_node *n)
}
}
+/**
+ * hlist_add_head - add a new entry at the beginning of the hlist
+ * @n: new entry to be added
+ * @h: hlist head to add it after
+ *
+ * Insert a new entry after the specified head.
+ * This is good for implementing stacks.
+ */
static inline void hlist_add_head(struct hlist_node *n, struct hlist_head *h)
{
struct hlist_node *first = h->first;
- n->next = first;
+ WRITE_ONCE(n->next, first);
if (first)
- first->pprev = &n->next;
+ WRITE_ONCE(first->pprev, &n->next);
WRITE_ONCE(h->first, n);
- n->pprev = &h->first;
+ WRITE_ONCE(n->pprev, &h->first);
}
-/* next must be != NULL */
+/**
+ * hlist_add_before - add a new entry before the one specified
+ * @n: new entry to be added
+ * @next: hlist node to add it before, which must be non-NULL
+ */
static inline void hlist_add_before(struct hlist_node *n,
- struct hlist_node *next)
+ struct hlist_node *next)
{
- n->pprev = next->pprev;
- n->next = next;
- next->pprev = &n->next;
+ WRITE_ONCE(n->pprev, next->pprev);
+ WRITE_ONCE(n->next, next);
+ WRITE_ONCE(next->pprev, &n->next);
WRITE_ONCE(*(n->pprev), n);
}
+/**
+ * hlist_add_behind - add a new entry after the one specified
+ * @n: new entry to be added
+ * @prev: hlist node to add it after, which must be non-NULL
+ */
static inline void hlist_add_behind(struct hlist_node *n,
struct hlist_node *prev)
{
- n->next = prev->next;
+ WRITE_ONCE(n->next, prev->next);
WRITE_ONCE(prev->next, n);
- n->pprev = &prev->next;
+ WRITE_ONCE(n->pprev, &prev->next);
if (n->next)
- n->next->pprev = &n->next;
+ WRITE_ONCE(n->next->pprev, &n->next);
}
-/* after that we'll appear to be on some hlist and hlist_del will work */
+/**
+ * hlist_add_fake - create a fake hlist consisting of a single headless node
+ * @n: Node to make a fake list out of
+ *
+ * This makes @n appear to be its own predecessor on a headless hlist.
+ * The point of this is to allow things like hlist_del() to work correctly
+ * in cases where there is no list.
+ */
static inline void hlist_add_fake(struct hlist_node *n)
{
n->pprev = &n->next;
}
+/**
+ * hlist_fake: Is this node a fake hlist?
+ * @h: Node to check for being a self-referential fake hlist.
+ */
static inline bool hlist_fake(struct hlist_node *h)
{
return h->pprev == &h->next;
}
-/*
+/**
+ * hlist_is_singular_node - is node the only element of the specified hlist?
+ * @n: Node to check for singularity.
+ * @h: Header for potentially singular list.
+ *
* Check whether the node is the only node of the head without
- * accessing head:
+ * accessing head, thus avoiding unnecessary cache misses.
*/
static inline bool
hlist_is_singular_node(struct hlist_node *n, struct hlist_head *h)
@@ -717,7 +1106,11 @@ hlist_is_singular_node(struct hlist_node *n, struct hlist_head *h)
return !n->next && n->pprev == &h->first;
}
-/*
+/**
+ * hlist_move_list - Move an hlist
+ * @old: hlist_head for old list.
+ * @new: hlist_head for new list.
+ *
* Move a list from one list head to another. Fixup the pprev
* reference of the first entry if it exists.
*/
@@ -730,6 +1123,26 @@ static inline void hlist_move_list(struct hlist_head *old,
old->first = NULL;
}
+/**
+ * hlist_splice_init() - move all entries from one list to another
+ * @from: hlist_head from which entries will be moved
+ * @last: last entry on the @from list
+ * @to: hlist_head to which entries will be moved
+ *
+ * @to can be empty, @from must contain at least @last.
+ */
+static inline void hlist_splice_init(struct hlist_head *from,
+ struct hlist_node *last,
+ struct hlist_head *to)
+{
+ if (to->first)
+ to->first->pprev = &last->next;
+ last->next = to->first;
+ to->first = from->first;
+ from->first->pprev = &to->first;
+ from->first = NULL;
+}
+
#define hlist_entry(ptr, type, member) container_of(ptr,type,member)
#define hlist_for_each(pos, head) \
@@ -777,7 +1190,7 @@ static inline void hlist_move_list(struct hlist_head *old,
/**
* hlist_for_each_entry_safe - iterate over list of given type safe against removal of list entry
* @pos: the type * to use as a loop cursor.
- * @n: another &struct hlist_node to use as temporary storage
+ * @n: a &struct hlist_node to use as temporary storage
* @head: the head for your list.
* @member: the name of the hlist_node within the struct.
*/
@@ -786,4 +1199,19 @@ static inline void hlist_move_list(struct hlist_head *old,
pos && ({ n = pos->member.next; 1; }); \
pos = hlist_entry_safe(n, typeof(*pos), member))
+/**
+ * hlist_count_nodes - count nodes in the hlist
+ * @head: the head for your hlist.
+ */
+static inline size_t hlist_count_nodes(struct hlist_head *head)
+{
+ struct hlist_node *pos;
+ size_t count = 0;
+
+ hlist_for_each(pos, head)
+ count++;
+
+ return count;
+}
+
#endif
diff --git a/include/linux/list_bl.h b/include/linux/list_bl.h
index cb483305e1f5..ae1b541446c9 100644
--- a/include/linux/list_bl.h
+++ b/include/linux/list_bl.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_LIST_BL_H
#define _LINUX_LIST_BL_H
@@ -85,6 +86,32 @@ static inline void hlist_bl_add_head(struct hlist_bl_node *n,
hlist_bl_set_first(h, n);
}
+static inline void hlist_bl_add_before(struct hlist_bl_node *n,
+ struct hlist_bl_node *next)
+{
+ struct hlist_bl_node **pprev = next->pprev;
+
+ n->pprev = pprev;
+ n->next = next;
+ next->pprev = &n->next;
+
+ /* pprev may be `first`, so be careful not to lose the lock bit */
+ WRITE_ONCE(*pprev,
+ (struct hlist_bl_node *)
+ ((uintptr_t)n | ((uintptr_t)*pprev & LIST_BL_LOCKMASK)));
+}
+
+static inline void hlist_bl_add_behind(struct hlist_bl_node *n,
+ struct hlist_bl_node *prev)
+{
+ n->next = prev->next;
+ n->pprev = &prev->next;
+ prev->next = n;
+
+ if (n->next)
+ n->next->pprev = &n->next;
+}
+
static inline void __hlist_bl_del(struct hlist_bl_node *n)
{
struct hlist_bl_node *next = n->next;
diff --git a/include/linux/list_lru.h b/include/linux/list_lru.h
index fa7fd03cb5f9..fe739d35a864 100644
--- a/include/linux/list_lru.h
+++ b/include/linux/list_lru.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (c) 2013 Red Hat, Inc. and Parallels Inc. All rights reserved.
* Authors: David Chinner and Glauber Costa
@@ -10,6 +11,7 @@
#include <linux/list.h>
#include <linux/nodemask.h>
#include <linux/shrinker.h>
+#include <linux/xarray.h>
struct mem_cgroup;
@@ -22,79 +24,149 @@ enum lru_status {
LRU_SKIP, /* item cannot be locked, skip */
LRU_RETRY, /* item not freeable. May drop the lock
internally, but has to return locked. */
+ LRU_STOP, /* stop lru list walking. May drop the lock
+ internally, but has to return locked. */
};
struct list_lru_one {
struct list_head list;
/* may become negative during memcg reparenting */
long nr_items;
+ /* protects all fields above */
+ spinlock_t lock;
};
struct list_lru_memcg {
- /* array of per cgroup lists, indexed by memcg_cache_id */
- struct list_lru_one *lru[0];
+ struct rcu_head rcu;
+ /* array of per cgroup per node lists, indexed by node id */
+ struct list_lru_one node[];
};
struct list_lru_node {
- /* protects all lists on the node, including per cgroup */
- spinlock_t lock;
/* global list, used for the root cgroup in cgroup aware lrus */
struct list_lru_one lru;
-#if defined(CONFIG_MEMCG) && !defined(CONFIG_SLOB)
- /* for cgroup aware lrus points to per cgroup lists, otherwise NULL */
- struct list_lru_memcg *memcg_lrus;
-#endif
- long nr_items;
+ atomic_long_t nr_items;
} ____cacheline_aligned_in_smp;
struct list_lru {
struct list_lru_node *node;
-#if defined(CONFIG_MEMCG) && !defined(CONFIG_SLOB)
+#ifdef CONFIG_MEMCG
struct list_head list;
+ int shrinker_id;
+ bool memcg_aware;
+ struct xarray xa;
+#endif
+#ifdef CONFIG_LOCKDEP
+ struct lock_class_key *key;
#endif
};
void list_lru_destroy(struct list_lru *lru);
int __list_lru_init(struct list_lru *lru, bool memcg_aware,
- struct lock_class_key *key);
+ struct shrinker *shrinker);
-#define list_lru_init(lru) __list_lru_init((lru), false, NULL)
-#define list_lru_init_key(lru, key) __list_lru_init((lru), false, (key))
-#define list_lru_init_memcg(lru) __list_lru_init((lru), true, NULL)
+#define list_lru_init(lru) \
+ __list_lru_init((lru), false, NULL)
+#define list_lru_init_memcg(lru, shrinker) \
+ __list_lru_init((lru), true, shrinker)
-int memcg_update_all_list_lrus(int num_memcgs);
-void memcg_drain_all_list_lrus(int src_idx, int dst_idx);
+static inline int list_lru_init_memcg_key(struct list_lru *lru, struct shrinker *shrinker,
+ struct lock_class_key *key)
+{
+#ifdef CONFIG_LOCKDEP
+ lru->key = key;
+#endif
+ return list_lru_init_memcg(lru, shrinker);
+}
+
+int memcg_list_lru_alloc(struct mem_cgroup *memcg, struct list_lru *lru,
+ gfp_t gfp);
+void memcg_reparent_list_lrus(struct mem_cgroup *memcg, struct mem_cgroup *parent);
/**
* list_lru_add: add an element to the lru list's tail
- * @list_lru: the lru pointer
+ * @lru: the lru pointer
* @item: the item to be added.
+ * @nid: the node id of the sublist to add the item to.
+ * @memcg: the cgroup of the sublist to add the item to.
*
* If the element is already part of a list, this function returns doing
- * nothing. Therefore the caller does not need to keep state about whether or
- * not the element already belongs in the list and is allowed to lazy update
- * it. Note however that this is valid for *a* list, not *this* list. If
- * the caller organize itself in a way that elements can be in more than
- * one type of list, it is up to the caller to fully remove the item from
- * the previous list (with list_lru_del() for instance) before moving it
- * to @list_lru
- *
- * Return value: true if the list was updated, false otherwise
+ * nothing. This means that it is not necessary to keep state about whether or
+ * not the element already belongs in the list. That said, this logic only
+ * works if the item is in *this* list. If the item might be in some other
+ * list, then you cannot rely on this check and you must remove it from the
+ * other list before trying to insert it.
+ *
+ * The lru list consists of many sublists internally; the @nid and @memcg
+ * parameters are used to determine which sublist to insert the item into.
+ * It's important to use the right value of @nid and @memcg when deleting the
+ * item, since it might otherwise get deleted from the wrong sublist.
+ *
+ * This also applies when attempting to insert the item multiple times - if
+ * the item is currently in one sublist and you call list_lru_add() again, you
+ * must pass the right @nid and @memcg parameters so that the same sublist is
+ * used.
+ *
+ * You must ensure that the memcg is not freed during this call (e.g., with
+ * rcu or by taking a css refcnt).
+ *
+ * Return: true if the list was updated, false otherwise
+ */
+bool list_lru_add(struct list_lru *lru, struct list_head *item, int nid,
+ struct mem_cgroup *memcg);
+
+/**
+ * list_lru_add_obj: add an element to the lru list's tail
+ * @lru: the lru pointer
+ * @item: the item to be added.
+ *
+ * This function is similar to list_lru_add(), but the NUMA node and the
+ * memcg of the sublist is determined by @item list_head. This assumption is
+ * valid for slab objects LRU such as dentries, inodes, etc.
+ *
+ * Return: true if the list was updated, false otherwise
*/
-bool list_lru_add(struct list_lru *lru, struct list_head *item);
+bool list_lru_add_obj(struct list_lru *lru, struct list_head *item);
/**
- * list_lru_del: delete an element to the lru list
- * @list_lru: the lru pointer
+ * list_lru_del: delete an element from the lru list
+ * @lru: the lru pointer
* @item: the item to be deleted.
+ * @nid: the node id of the sublist to delete the item from.
+ * @memcg: the cgroup of the sublist to delete the item from.
+ *
+ * This function works analogously as list_lru_add() in terms of list
+ * manipulation.
+ *
+ * The comments in list_lru_add() about an element already being in a list are
+ * also valid for list_lru_del(), that is, you can delete an item that has
+ * already been removed or never been added. However, if the item is in a
+ * list, it must be in *this* list, and you must pass the right value of @nid
+ * and @memcg so that the right sublist is used.
*
- * This function works analogously as list_lru_add in terms of list
- * manipulation. The comments about an element already pertaining to
- * a list are also valid for list_lru_del.
+ * You must ensure that the memcg is not freed during this call (e.g., with
+ * rcu or by taking a css refcnt). When a memcg is deleted, list_lru entries
+ * are automatically moved to the parent memcg. This is done in a race-free
+ * way, so during deletion of an memcg both the old and new memcg will resolve
+ * to the same sublist internally.
*
- * Return value: true if the list was updated, false otherwise
+ * Return: true if the list was updated, false otherwise
*/
-bool list_lru_del(struct list_lru *lru, struct list_head *item);
+bool list_lru_del(struct list_lru *lru, struct list_head *item, int nid,
+ struct mem_cgroup *memcg);
+
+/**
+ * list_lru_del_obj: delete an element from the lru list
+ * @lru: the lru pointer
+ * @item: the item to be deleted.
+ *
+ * This function is similar to list_lru_del(), but the NUMA node and the
+ * memcg of the sublist is determined by @item list_head. This assumption is
+ * valid for slab objects LRU such as dentries, inodes, etc.
+ *
+ * Return: true if the list was updated, false otherwise.
+ */
+bool list_lru_del_obj(struct list_lru *lru, struct list_head *item);
/**
* list_lru_count_one: return the number of objects currently held by @lru
@@ -102,9 +174,11 @@ bool list_lru_del(struct list_lru *lru, struct list_head *item);
* @nid: the node id to count from.
* @memcg: the cgroup to count from.
*
- * Always return a non-negative number, 0 for empty lists. There is no
- * guarantee that the list is not updated while the count is being computed.
- * Callers that want such a guarantee need to provide an outer lock.
+ * There is no guarantee that the list is not updated while the count is being
+ * computed. Callers that want such a guarantee need to provide an outer lock.
+ *
+ * Return: 0 for empty lists, otherwise the number of objects
+ * currently held by @lru.
*/
unsigned long list_lru_count_one(struct list_lru *lru,
int nid, struct mem_cgroup *memcg);
@@ -132,34 +206,51 @@ void list_lru_isolate_move(struct list_lru_one *list, struct list_head *item,
struct list_head *head);
typedef enum lru_status (*list_lru_walk_cb)(struct list_head *item,
- struct list_lru_one *list, spinlock_t *lock, void *cb_arg);
+ struct list_lru_one *list, void *cb_arg);
/**
- * list_lru_walk_one: walk a list_lru, isolating and disposing freeable items.
+ * list_lru_walk_one: walk a @lru, isolating and disposing freeable items.
* @lru: the lru pointer.
* @nid: the node id to scan from.
* @memcg: the cgroup to scan from.
- * @isolate: callback function that is resposible for deciding what to do with
+ * @isolate: callback function that is responsible for deciding what to do with
* the item currently being scanned
* @cb_arg: opaque type that will be passed to @isolate
* @nr_to_walk: how many items to scan.
*
- * This function will scan all elements in a particular list_lru, calling the
+ * This function will scan all elements in a particular @lru, calling the
* @isolate callback for each of those items, along with the current list
* spinlock and a caller-provided opaque. The @isolate callback can choose to
* drop the lock internally, but *must* return with the lock held. The callback
- * will return an enum lru_status telling the list_lru infrastructure what to
+ * will return an enum lru_status telling the @lru infrastructure what to
* do with the object being scanned.
*
- * Please note that nr_to_walk does not mean how many objects will be freed,
+ * Please note that @nr_to_walk does not mean how many objects will be freed,
* just how many objects will be scanned.
*
- * Return value: the number of objects effectively removed from the LRU.
+ * Return: the number of objects effectively removed from the LRU.
*/
unsigned long list_lru_walk_one(struct list_lru *lru,
int nid, struct mem_cgroup *memcg,
list_lru_walk_cb isolate, void *cb_arg,
unsigned long *nr_to_walk);
+/**
+ * list_lru_walk_one_irq: walk a @lru, isolating and disposing freeable items.
+ * @lru: the lru pointer.
+ * @nid: the node id to scan from.
+ * @memcg: the cgroup to scan from.
+ * @isolate: callback function that is responsible for deciding what to do with
+ * the item currently being scanned
+ * @cb_arg: opaque type that will be passed to @isolate
+ * @nr_to_walk: how many items to scan.
+ *
+ * Same as list_lru_walk_one() except that the spinlock is acquired with
+ * spin_lock_irq().
+ */
+unsigned long list_lru_walk_one_irq(struct list_lru *lru,
+ int nid, struct mem_cgroup *memcg,
+ list_lru_walk_cb isolate, void *cb_arg,
+ unsigned long *nr_to_walk);
unsigned long list_lru_walk_node(struct list_lru *lru, int nid,
list_lru_walk_cb isolate, void *cb_arg,
unsigned long *nr_to_walk);
@@ -173,6 +264,14 @@ list_lru_shrink_walk(struct list_lru *lru, struct shrink_control *sc,
}
static inline unsigned long
+list_lru_shrink_walk_irq(struct list_lru *lru, struct shrink_control *sc,
+ list_lru_walk_cb isolate, void *cb_arg)
+{
+ return list_lru_walk_one_irq(lru, sc->nid, sc->memcg, isolate, cb_arg,
+ &sc->nr_to_scan);
+}
+
+static inline unsigned long
list_lru_walk(struct list_lru *lru, list_lru_walk_cb isolate,
void *cb_arg, unsigned long nr_to_walk)
{
diff --git a/include/linux/list_nulls.h b/include/linux/list_nulls.h
index 87ff4f58a2f0..248db9b77ee2 100644
--- a/include/linux/list_nulls.h
+++ b/include/linux/list_nulls.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_LIST_NULLS_H
#define _LINUX_LIST_NULLS_H
@@ -27,6 +28,7 @@ struct hlist_nulls_node {
#define NULLS_MARKER(value) (1UL | (((long)value) << 1))
#define INIT_HLIST_NULLS_HEAD(ptr, nulls) \
((ptr)->first = (struct hlist_nulls_node *) NULLS_MARKER(nulls))
+#define HLIST_NULLS_HEAD_INIT(nulls) {.first = (struct hlist_nulls_node *)NULLS_MARKER(nulls)}
#define hlist_nulls_entry(ptr, type, member) container_of(ptr,type,member)
@@ -55,11 +57,33 @@ static inline unsigned long get_nulls_value(const struct hlist_nulls_node *ptr)
return ((unsigned long)ptr) >> 1;
}
+/**
+ * hlist_nulls_unhashed - Has node been removed and reinitialized?
+ * @h: Node to be checked
+ *
+ * Not that not all removal functions will leave a node in unhashed state.
+ * For example, hlist_del_init_rcu() leaves the node in unhashed state,
+ * but hlist_nulls_del() does not.
+ */
static inline int hlist_nulls_unhashed(const struct hlist_nulls_node *h)
{
return !h->pprev;
}
+/**
+ * hlist_nulls_unhashed_lockless - Has node been removed and reinitialized?
+ * @h: Node to be checked
+ *
+ * Not that not all removal functions will leave a node in unhashed state.
+ * For example, hlist_del_init_rcu() leaves the node in unhashed state,
+ * but hlist_nulls_del() does not. Unlike hlist_nulls_unhashed(), this
+ * function may be used locklessly.
+ */
+static inline int hlist_nulls_unhashed_lockless(const struct hlist_nulls_node *h)
+{
+ return !READ_ONCE(h->pprev);
+}
+
static inline int hlist_nulls_empty(const struct hlist_nulls_head *h)
{
return is_a_nulls(READ_ONCE(h->first));
@@ -71,10 +95,10 @@ static inline void hlist_nulls_add_head(struct hlist_nulls_node *n,
struct hlist_nulls_node *first = h->first;
n->next = first;
- n->pprev = &h->first;
+ WRITE_ONCE(n->pprev, &h->first);
h->first = n;
if (!is_a_nulls(first))
- first->pprev = &n->next;
+ WRITE_ONCE(first->pprev, &n->next);
}
static inline void __hlist_nulls_del(struct hlist_nulls_node *n)
@@ -84,13 +108,13 @@ static inline void __hlist_nulls_del(struct hlist_nulls_node *n)
WRITE_ONCE(*pprev, next);
if (!is_a_nulls(next))
- next->pprev = pprev;
+ WRITE_ONCE(next->pprev, pprev);
}
static inline void hlist_nulls_del(struct hlist_nulls_node *n)
{
__hlist_nulls_del(n);
- n->pprev = LIST_POISON2;
+ WRITE_ONCE(n->pprev, LIST_POISON2);
}
/**
diff --git a/include/linux/list_sort.h b/include/linux/list_sort.h
index 1a2df2efb771..453105f74e05 100644
--- a/include/linux/list_sort.h
+++ b/include/linux/list_sort.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_LIST_SORT_H
#define _LINUX_LIST_SORT_H
@@ -5,7 +6,9 @@
struct list_head;
-void list_sort(void *priv, struct list_head *head,
- int (*cmp)(void *priv, struct list_head *a,
- struct list_head *b));
+typedef int __attribute__((nonnull(2,3))) (*list_cmp_func_t)(void *,
+ const struct list_head *, const struct list_head *);
+
+__attribute__((nonnull(2,3)))
+void list_sort(void *priv, struct list_head *head, list_cmp_func_t cmp);
#endif
diff --git a/include/linux/litex.h b/include/linux/litex.h
new file mode 100644
index 000000000000..f2edb86d5f44
--- /dev/null
+++ b/include/linux/litex.h
@@ -0,0 +1,83 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Common LiteX header providing
+ * helper functions for accessing CSRs.
+ *
+ * Copyright (C) 2019-2020 Antmicro <www.antmicro.com>
+ */
+
+#ifndef _LINUX_LITEX_H
+#define _LINUX_LITEX_H
+
+#include <linux/io.h>
+
+static inline void _write_litex_subregister(u32 val, void __iomem *addr)
+{
+ writel((u32 __force)cpu_to_le32(val), addr);
+}
+
+static inline u32 _read_litex_subregister(void __iomem *addr)
+{
+ return le32_to_cpu((__le32 __force)readl(addr));
+}
+
+/*
+ * LiteX SoC Generator, depending on the configuration, can split a single
+ * logical CSR (Control&Status Register) into a series of consecutive physical
+ * registers.
+ *
+ * For example, in the configuration with 8-bit CSR Bus, a 32-bit aligned,
+ * 32-bit wide logical CSR will be laid out as four 32-bit physical
+ * subregisters, each one containing one byte of meaningful data.
+ *
+ * For Linux support, upstream LiteX enforces a 32-bit wide CSR bus, which
+ * means that only larger-than-32-bit CSRs will be split across multiple
+ * subregisters (e.g., a 64-bit CSR will be spread across two consecutive
+ * 32-bit subregisters).
+ *
+ * For details see: https://github.com/enjoy-digital/litex/wiki/CSR-Bus
+ */
+
+static inline void litex_write8(void __iomem *reg, u8 val)
+{
+ _write_litex_subregister(val, reg);
+}
+
+static inline void litex_write16(void __iomem *reg, u16 val)
+{
+ _write_litex_subregister(val, reg);
+}
+
+static inline void litex_write32(void __iomem *reg, u32 val)
+{
+ _write_litex_subregister(val, reg);
+}
+
+static inline void litex_write64(void __iomem *reg, u64 val)
+{
+ _write_litex_subregister(val >> 32, reg);
+ _write_litex_subregister(val, reg + 4);
+}
+
+static inline u8 litex_read8(void __iomem *reg)
+{
+ return _read_litex_subregister(reg);
+}
+
+static inline u16 litex_read16(void __iomem *reg)
+{
+ return _read_litex_subregister(reg);
+}
+
+static inline u32 litex_read32(void __iomem *reg)
+{
+ return _read_litex_subregister(reg);
+}
+
+static inline u64 litex_read64(void __iomem *reg)
+{
+ return ((u64)_read_litex_subregister(reg) << 32) |
+ _read_litex_subregister(reg + 4);
+}
+
+#endif /* _LINUX_LITEX_H */
diff --git a/include/linux/livepatch.h b/include/linux/livepatch.h
index 194991ef9347..772919e8096a 100644
--- a/include/linux/livepatch.h
+++ b/include/linux/livepatch.h
@@ -1,21 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* livepatch.h - Kernel Live Patching Core
*
* Copyright (C) 2014 Seth Jennings <sjenning@redhat.com>
* Copyright (C) 2014 SUSE
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version 2
- * of the License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#ifndef _LINUX_LIVEPATCH_H_
@@ -24,15 +12,16 @@
#include <linux/module.h>
#include <linux/ftrace.h>
#include <linux/completion.h>
+#include <linux/list.h>
+#include <linux/livepatch_external.h>
+#include <linux/livepatch_sched.h>
#if IS_ENABLED(CONFIG_LIVEPATCH)
-#include <asm/livepatch.h>
-
/* task patch states */
-#define KLP_UNDEFINED -1
-#define KLP_UNPATCHED 0
-#define KLP_PATCHED 1
+#define KLP_TRANSITION_IDLE -1
+#define KLP_TRANSITION_UNPATCHED 0
+#define KLP_TRANSITION_PATCHED 1
/**
* struct klp_func - function structure for live patching
@@ -40,12 +29,13 @@
* @new_func: pointer to the patched function code
* @old_sympos: a hint indicating which symbol position the old function
* can be found (optional)
- * @immediate: patch the func immediately, bypassing safety mechanisms
- * @old_addr: the address of the function being patched
+ * @old_func: pointer to the function being patched
* @kobj: kobject for sysfs resources
+ * @node: list node for klp_object func_list
* @stack_node: list node for klp_ops func_stack list
* @old_size: size of the old function
* @new_size: size of the new function
+ * @nop: temporary patch to use the original code again; dyn. allocated
* @patched: the func has been added to the klp_ops list
* @transition: the func is currently being applied or reverted
*
@@ -76,13 +66,14 @@ struct klp_func {
* in kallsyms for the given object is used.
*/
unsigned long old_sympos;
- bool immediate;
/* internal */
- unsigned long old_addr;
+ void *old_func;
struct kobject kobj;
+ struct list_head node;
struct list_head stack_node;
unsigned long old_size, new_size;
+ bool nop;
bool patched;
bool transition;
};
@@ -91,60 +82,94 @@ struct klp_func {
* struct klp_object - kernel object structure for live patching
* @name: module name (or NULL for vmlinux)
* @funcs: function entries for functions to be patched in the object
+ * @callbacks: functions to be executed pre/post (un)patching
* @kobj: kobject for sysfs resources
+ * @func_list: dynamic list of the function entries
+ * @node: list node for klp_patch obj_list
* @mod: kernel module associated with the patched object
* (NULL for vmlinux)
+ * @dynamic: temporary object for nop functions; dynamically allocated
* @patched: the object's funcs have been added to the klp_ops list
*/
struct klp_object {
/* external */
const char *name;
struct klp_func *funcs;
+ struct klp_callbacks callbacks;
/* internal */
struct kobject kobj;
+ struct list_head func_list;
+ struct list_head node;
struct module *mod;
+ bool dynamic;
bool patched;
};
/**
+ * struct klp_state - state of the system modified by the livepatch
+ * @id: system state identifier (non-zero)
+ * @version: version of the change
+ * @data: custom data
+ */
+struct klp_state {
+ unsigned long id;
+ unsigned int version;
+ void *data;
+};
+
+/**
* struct klp_patch - patch structure for live patching
* @mod: reference to the live patch module
* @objs: object entries for kernel objects to be patched
- * @immediate: patch all funcs immediately, bypassing safety mechanisms
- * @list: list node for global list of registered patches
+ * @states: system states that can get modified
+ * @replace: replace all actively used patches
+ * @list: list node for global list of actively used patches
* @kobj: kobject for sysfs resources
+ * @obj_list: dynamic list of the object entries
* @enabled: the patch is enabled (but operation may be incomplete)
+ * @forced: was involved in a forced transition
+ * @free_work: patch cleanup from workqueue-context
* @finish: for waiting till it is safe to remove the patch module
*/
struct klp_patch {
/* external */
struct module *mod;
struct klp_object *objs;
- bool immediate;
+ struct klp_state *states;
+ bool replace;
/* internal */
struct list_head list;
struct kobject kobj;
+ struct list_head obj_list;
bool enabled;
+ bool forced;
+ struct work_struct free_work;
struct completion finish;
};
-#define klp_for_each_object(patch, obj) \
+#define klp_for_each_object_static(patch, obj) \
for (obj = patch->objs; obj->funcs || obj->name; obj++)
-#define klp_for_each_func(obj, func) \
+#define klp_for_each_object_safe(patch, obj, tmp_obj) \
+ list_for_each_entry_safe(obj, tmp_obj, &patch->obj_list, node)
+
+#define klp_for_each_object(patch, obj) \
+ list_for_each_entry(obj, &patch->obj_list, node)
+
+#define klp_for_each_func_static(obj, func) \
for (func = obj->funcs; \
func->old_name || func->new_func || func->old_sympos; \
func++)
-int klp_register_patch(struct klp_patch *);
-int klp_unregister_patch(struct klp_patch *);
-int klp_enable_patch(struct klp_patch *);
-int klp_disable_patch(struct klp_patch *);
+#define klp_for_each_func_safe(obj, func, tmp_func) \
+ list_for_each_entry_safe(func, tmp_func, &obj->func_list, node)
-void arch_klp_init_object_loaded(struct klp_patch *patch,
- struct klp_object *obj);
+#define klp_for_each_func(obj, func) \
+ list_for_each_entry(func, &obj->func_list, node)
+
+int klp_enable_patch(struct klp_patch *);
/* Called from the module loader during module coming/going states */
int klp_module_coming(struct module *mod);
@@ -164,6 +189,29 @@ static inline bool klp_have_reliable_stack(void)
IS_ENABLED(CONFIG_HAVE_RELIABLE_STACKTRACE);
}
+typedef int (*klp_shadow_ctor_t)(void *obj,
+ void *shadow_data,
+ void *ctor_data);
+typedef void (*klp_shadow_dtor_t)(void *obj, void *shadow_data);
+
+void *klp_shadow_get(void *obj, unsigned long id);
+void *klp_shadow_alloc(void *obj, unsigned long id,
+ size_t size, gfp_t gfp_flags,
+ klp_shadow_ctor_t ctor, void *ctor_data);
+void *klp_shadow_get_or_alloc(void *obj, unsigned long id,
+ size_t size, gfp_t gfp_flags,
+ klp_shadow_ctor_t ctor, void *ctor_data);
+void klp_shadow_free(void *obj, unsigned long id, klp_shadow_dtor_t dtor);
+void klp_shadow_free_all(unsigned long id, klp_shadow_dtor_t dtor);
+
+struct klp_state *klp_get_state(struct klp_patch *patch, unsigned long id);
+struct klp_state *klp_get_prev_state(unsigned long id);
+
+int klp_apply_section_relocs(struct module *pmod, Elf_Shdr *sechdrs,
+ const char *shstrtab, const char *strtab,
+ unsigned int symindex, unsigned int secindex,
+ const char *objname);
+
#else /* !CONFIG_LIVEPATCH */
static inline int klp_module_coming(struct module *mod) { return 0; }
@@ -172,6 +220,15 @@ static inline bool klp_patch_pending(struct task_struct *task) { return false; }
static inline void klp_update_patch_state(struct task_struct *task) {}
static inline void klp_copy_process(struct task_struct *child) {}
+static inline
+int klp_apply_section_relocs(struct module *pmod, Elf_Shdr *sechdrs,
+ const char *shstrtab, const char *strtab,
+ unsigned int symindex, unsigned int secindex,
+ const char *objname)
+{
+ return 0;
+}
+
#endif /* CONFIG_LIVEPATCH */
#endif /* _LINUX_LIVEPATCH_H_ */
diff --git a/include/linux/livepatch_external.h b/include/linux/livepatch_external.h
new file mode 100644
index 000000000000..138af19b0f5c
--- /dev/null
+++ b/include/linux/livepatch_external.h
@@ -0,0 +1,76 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * External livepatch interfaces for patch creation tooling
+ */
+
+#ifndef _LINUX_LIVEPATCH_EXTERNAL_H_
+#define _LINUX_LIVEPATCH_EXTERNAL_H_
+
+#include <linux/types.h>
+
+#define KLP_RELOC_SEC_PREFIX ".klp.rela."
+#define KLP_SYM_PREFIX ".klp.sym."
+
+#define __KLP_PRE_PATCH_PREFIX __klp_pre_patch_callback_
+#define __KLP_POST_PATCH_PREFIX __klp_post_patch_callback_
+#define __KLP_PRE_UNPATCH_PREFIX __klp_pre_unpatch_callback_
+#define __KLP_POST_UNPATCH_PREFIX __klp_post_unpatch_callback_
+
+#define KLP_PRE_PATCH_PREFIX __stringify(__KLP_PRE_PATCH_PREFIX)
+#define KLP_POST_PATCH_PREFIX __stringify(__KLP_POST_PATCH_PREFIX)
+#define KLP_PRE_UNPATCH_PREFIX __stringify(__KLP_PRE_UNPATCH_PREFIX)
+#define KLP_POST_UNPATCH_PREFIX __stringify(__KLP_POST_UNPATCH_PREFIX)
+
+struct klp_object;
+
+typedef int (*klp_pre_patch_t)(struct klp_object *obj);
+typedef void (*klp_post_patch_t)(struct klp_object *obj);
+typedef void (*klp_pre_unpatch_t)(struct klp_object *obj);
+typedef void (*klp_post_unpatch_t)(struct klp_object *obj);
+
+/**
+ * struct klp_callbacks - pre/post live-(un)patch callback structure
+ * @pre_patch: executed before code patching
+ * @post_patch: executed after code patching
+ * @pre_unpatch: executed before code unpatching
+ * @post_unpatch: executed after code unpatching
+ * @post_unpatch_enabled: flag indicating if post-unpatch callback
+ * should run
+ *
+ * All callbacks are optional. Only the pre-patch callback, if provided,
+ * will be unconditionally executed. If the parent klp_object fails to
+ * patch for any reason, including a non-zero error status returned from
+ * the pre-patch callback, no further callbacks will be executed.
+ */
+struct klp_callbacks {
+ klp_pre_patch_t pre_patch;
+ klp_post_patch_t post_patch;
+ klp_pre_unpatch_t pre_unpatch;
+ klp_post_unpatch_t post_unpatch;
+ bool post_unpatch_enabled;
+};
+
+/*
+ * 'struct klp_{func,object}_ext' are compact "external" representations of
+ * 'struct klp_{func,object}'. They are used by objtool for livepatch
+ * generation. The structs are then read by the livepatch module and converted
+ * to the real structs before calling klp_enable_patch().
+ *
+ * TODO make these the official API for klp_enable_patch(). That should
+ * simplify livepatch's interface as well as its data structure lifetime
+ * management.
+ */
+struct klp_func_ext {
+ const char *old_name;
+ void *new_func;
+ unsigned long sympos;
+};
+
+struct klp_object_ext {
+ const char *name;
+ struct klp_func_ext *funcs;
+ struct klp_callbacks callbacks;
+ unsigned int nr_funcs;
+};
+
+#endif /* _LINUX_LIVEPATCH_EXTERNAL_H_ */
diff --git a/include/linux/livepatch_helpers.h b/include/linux/livepatch_helpers.h
new file mode 100644
index 000000000000..99d68d0773fa
--- /dev/null
+++ b/include/linux/livepatch_helpers.h
@@ -0,0 +1,77 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_LIVEPATCH_HELPERS_H
+#define _LINUX_LIVEPATCH_HELPERS_H
+
+/*
+ * Interfaces for use by livepatch patches
+ */
+
+#include <linux/syscalls.h>
+#include <linux/livepatch.h>
+
+#ifdef MODULE
+#define KLP_OBJNAME __KBUILD_MODNAME
+#else
+#define KLP_OBJNAME vmlinux
+#endif
+
+/* Livepatch callback registration */
+
+#define KLP_CALLBACK_PTRS ".discard.klp_callback_ptrs"
+
+#define KLP_PRE_PATCH_CALLBACK(func) \
+ klp_pre_patch_t __used __section(KLP_CALLBACK_PTRS) \
+ __PASTE(__KLP_PRE_PATCH_PREFIX, KLP_OBJNAME) = func
+
+#define KLP_POST_PATCH_CALLBACK(func) \
+ klp_post_patch_t __used __section(KLP_CALLBACK_PTRS) \
+ __PASTE(__KLP_POST_PATCH_PREFIX, KLP_OBJNAME) = func
+
+#define KLP_PRE_UNPATCH_CALLBACK(func) \
+ klp_pre_unpatch_t __used __section(KLP_CALLBACK_PTRS) \
+ __PASTE(__KLP_PRE_UNPATCH_PREFIX, KLP_OBJNAME) = func
+
+#define KLP_POST_UNPATCH_CALLBACK(func) \
+ klp_post_unpatch_t __used __section(KLP_CALLBACK_PTRS) \
+ __PASTE(__KLP_POST_UNPATCH_PREFIX, KLP_OBJNAME) = func
+
+/*
+ * Replace static_call() usage with this macro when create-diff-object
+ * recommends it due to the original static call key living in a module.
+ *
+ * This converts the static call to a regular indirect call.
+ */
+#define KLP_STATIC_CALL(name) \
+ ((typeof(STATIC_CALL_TRAMP(name))*)(STATIC_CALL_KEY(name).func))
+
+/* Syscall patching */
+
+#define KLP_SYSCALL_DEFINE1(name, ...) KLP_SYSCALL_DEFINEx(1, _##name, __VA_ARGS__)
+#define KLP_SYSCALL_DEFINE2(name, ...) KLP_SYSCALL_DEFINEx(2, _##name, __VA_ARGS__)
+#define KLP_SYSCALL_DEFINE3(name, ...) KLP_SYSCALL_DEFINEx(3, _##name, __VA_ARGS__)
+#define KLP_SYSCALL_DEFINE4(name, ...) KLP_SYSCALL_DEFINEx(4, _##name, __VA_ARGS__)
+#define KLP_SYSCALL_DEFINE5(name, ...) KLP_SYSCALL_DEFINEx(5, _##name, __VA_ARGS__)
+#define KLP_SYSCALL_DEFINE6(name, ...) KLP_SYSCALL_DEFINEx(6, _##name, __VA_ARGS__)
+
+#define KLP_SYSCALL_DEFINEx(x, sname, ...) \
+ __KLP_SYSCALL_DEFINEx(x, sname, __VA_ARGS__)
+
+#ifdef CONFIG_X86_64
+// TODO move this to arch/x86/include/asm/syscall_wrapper.h and share code
+#define __KLP_SYSCALL_DEFINEx(x, name, ...) \
+ static long __se_sys##name(__MAP(x,__SC_LONG,__VA_ARGS__)); \
+ static inline long __klp_do_sys##name(__MAP(x,__SC_DECL,__VA_ARGS__));\
+ __X64_SYS_STUBx(x, name, __VA_ARGS__) \
+ __IA32_SYS_STUBx(x, name, __VA_ARGS__) \
+ static long __se_sys##name(__MAP(x,__SC_LONG,__VA_ARGS__)) \
+ { \
+ long ret = __klp_do_sys##name(__MAP(x,__SC_CAST,__VA_ARGS__));\
+ __MAP(x,__SC_TEST,__VA_ARGS__); \
+ __PROTECT(x, ret,__MAP(x,__SC_ARGS,__VA_ARGS__)); \
+ return ret; \
+ } \
+ static inline long __klp_do_sys##name(__MAP(x,__SC_DECL,__VA_ARGS__))
+
+#endif
+
+#endif /* _LINUX_LIVEPATCH_HELPERS_H */
diff --git a/include/linux/livepatch_sched.h b/include/linux/livepatch_sched.h
new file mode 100644
index 000000000000..065c185f2763
--- /dev/null
+++ b/include/linux/livepatch_sched.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+#ifndef _LINUX_LIVEPATCH_SCHED_H_
+#define _LINUX_LIVEPATCH_SCHED_H_
+
+#include <linux/jump_label.h>
+#include <linux/sched.h>
+
+#ifdef CONFIG_LIVEPATCH
+
+void __klp_sched_try_switch(void);
+
+DECLARE_STATIC_KEY_FALSE(klp_sched_try_switch_key);
+
+static __always_inline void klp_sched_try_switch(struct task_struct *curr)
+{
+ if (static_branch_unlikely(&klp_sched_try_switch_key) &&
+ READ_ONCE(curr->__state) & TASK_FREEZABLE)
+ __klp_sched_try_switch();
+}
+
+#else /* !CONFIG_LIVEPATCH */
+static inline void klp_sched_try_switch(struct task_struct *curr) {}
+#endif /* CONFIG_LIVEPATCH */
+
+#endif /* _LINUX_LIVEPATCH_SCHED_H_ */
diff --git a/include/linux/liveupdate.h b/include/linux/liveupdate.h
new file mode 100644
index 000000000000..a7f6ee5b6771
--- /dev/null
+++ b/include/linux/liveupdate.h
@@ -0,0 +1,138 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+/*
+ * Copyright (c) 2025, Google LLC.
+ * Pasha Tatashin <pasha.tatashin@soleen.com>
+ */
+#ifndef _LINUX_LIVEUPDATE_H
+#define _LINUX_LIVEUPDATE_H
+
+#include <linux/bug.h>
+#include <linux/compiler.h>
+#include <linux/kho/abi/luo.h>
+#include <linux/list.h>
+#include <linux/types.h>
+#include <uapi/linux/liveupdate.h>
+
+struct liveupdate_file_handler;
+struct file;
+
+/**
+ * struct liveupdate_file_op_args - Arguments for file operation callbacks.
+ * @handler: The file handler being called.
+ * @retrieved: The retrieve status for the 'can_finish / finish'
+ * operation.
+ * @file: The file object. For retrieve: [OUT] The callback sets
+ * this to the new file. For other ops: [IN] The caller sets
+ * this to the file being operated on.
+ * @serialized_data: The opaque u64 handle, preserve/prepare/freeze may update
+ * this field.
+ * @private_data: Private data for the file used to hold runtime state that
+ * is not preserved. Set by the handler's .preserve()
+ * callback, and must be freed in the handler's
+ * .unpreserve() callback.
+ *
+ * This structure bundles all parameters for the file operation callbacks.
+ * The 'data' and 'file' fields are used for both input and output.
+ */
+struct liveupdate_file_op_args {
+ struct liveupdate_file_handler *handler;
+ bool retrieved;
+ struct file *file;
+ u64 serialized_data;
+ void *private_data;
+};
+
+/**
+ * struct liveupdate_file_ops - Callbacks for live-updatable files.
+ * @can_preserve: Required. Lightweight check to see if this handler is
+ * compatible with the given file.
+ * @preserve: Required. Performs state-saving for the file.
+ * @unpreserve: Required. Cleans up any resources allocated by @preserve.
+ * @freeze: Optional. Final actions just before kernel transition.
+ * @unfreeze: Optional. Undo freeze operations.
+ * @retrieve: Required. Restores the file in the new kernel.
+ * @can_finish: Optional. Check if this FD can finish, i.e. all restoration
+ * pre-requirements for this FD are satisfied. Called prior to
+ * finish, in order to do successful finish calls for all
+ * resources in the session.
+ * @finish: Required. Final cleanup in the new kernel.
+ * @owner: Module reference
+ *
+ * All operations (except can_preserve) receive a pointer to a
+ * 'struct liveupdate_file_op_args' containing the necessary context.
+ */
+struct liveupdate_file_ops {
+ bool (*can_preserve)(struct liveupdate_file_handler *handler,
+ struct file *file);
+ int (*preserve)(struct liveupdate_file_op_args *args);
+ void (*unpreserve)(struct liveupdate_file_op_args *args);
+ int (*freeze)(struct liveupdate_file_op_args *args);
+ void (*unfreeze)(struct liveupdate_file_op_args *args);
+ int (*retrieve)(struct liveupdate_file_op_args *args);
+ bool (*can_finish)(struct liveupdate_file_op_args *args);
+ void (*finish)(struct liveupdate_file_op_args *args);
+ struct module *owner;
+};
+
+/**
+ * struct liveupdate_file_handler - Represents a handler for a live-updatable file type.
+ * @ops: Callback functions
+ * @compatible: The compatibility string (e.g., "memfd-v1", "vfiofd-v1")
+ * that uniquely identifies the file type this handler
+ * supports. This is matched against the compatible string
+ * associated with individual &struct file instances.
+ *
+ * Modules that want to support live update for specific file types should
+ * register an instance of this structure. LUO uses this registration to
+ * determine if a given file can be preserved and to find the appropriate
+ * operations to manage its state across the update.
+ */
+struct liveupdate_file_handler {
+ const struct liveupdate_file_ops *ops;
+ const char compatible[LIVEUPDATE_HNDL_COMPAT_LENGTH];
+
+ /* private: */
+
+ /*
+ * Used for linking this handler instance into a global list of
+ * registered file handlers.
+ */
+ struct list_head __private list;
+};
+
+#ifdef CONFIG_LIVEUPDATE
+
+/* Return true if live update orchestrator is enabled */
+bool liveupdate_enabled(void);
+
+/* Called during kexec to tell LUO that entered into reboot */
+int liveupdate_reboot(void);
+
+int liveupdate_register_file_handler(struct liveupdate_file_handler *fh);
+int liveupdate_unregister_file_handler(struct liveupdate_file_handler *fh);
+
+#else /* CONFIG_LIVEUPDATE */
+
+static inline bool liveupdate_enabled(void)
+{
+ return false;
+}
+
+static inline int liveupdate_reboot(void)
+{
+ return 0;
+}
+
+static inline int liveupdate_register_file_handler(struct liveupdate_file_handler *fh)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int liveupdate_unregister_file_handler(struct liveupdate_file_handler *fh)
+{
+ return -EOPNOTSUPP;
+}
+
+#endif /* CONFIG_LIVEUPDATE */
+#endif /* _LINUX_LIVEUPDATE_H */
diff --git a/include/linux/llist.h b/include/linux/llist.h
index 1957635e6d5f..607b2360c938 100644
--- a/include/linux/llist.h
+++ b/include/linux/llist.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
#ifndef LLIST_H
#define LLIST_H
/*
@@ -45,23 +46,12 @@
*
* Copyright 2010,2011 Intel Corp.
* Author: Huang Ying <ying.huang@intel.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version
- * 2 as published by the Free Software Foundation;
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/atomic.h>
-#include <linux/kernel.h>
+#include <linux/container_of.h>
+#include <linux/stddef.h>
+#include <linux/types.h>
struct llist_head {
struct llist_node *first;
@@ -84,6 +74,33 @@ static inline void init_llist_head(struct llist_head *list)
}
/**
+ * init_llist_node - initialize lock-less list node
+ * @node: the node to be initialised
+ *
+ * In cases where there is a need to test if a node is on
+ * a list or not, this initialises the node to clearly
+ * not be on any list.
+ */
+static inline void init_llist_node(struct llist_node *node)
+{
+ WRITE_ONCE(node->next, node);
+}
+
+/**
+ * llist_on_list - test if a lock-list list node is on a list
+ * @node: the node to test
+ *
+ * When a node is on a list the ->next pointer will be NULL or
+ * some other node. It can never point to itself. We use that
+ * in init_llist_node() to record that a node is not on any list,
+ * and here to test whether it is on any list.
+ */
+static inline bool llist_on_list(const struct llist_node *node)
+{
+ return READ_ONCE(node->next) != node;
+}
+
+/**
* llist_entry - get the struct of this entry
* @ptr: the &struct llist_node pointer.
* @type: the type of the struct this is embedded in.
@@ -198,17 +215,44 @@ static inline void init_llist_head(struct llist_head *list)
*/
static inline bool llist_empty(const struct llist_head *head)
{
- return ACCESS_ONCE(head->first) == NULL;
+ return READ_ONCE(head->first) == NULL;
}
static inline struct llist_node *llist_next(struct llist_node *node)
{
- return node->next;
+ return READ_ONCE(node->next);
+}
+
+/**
+ * llist_add_batch - add several linked entries in batch
+ * @new_first: first entry in batch to be added
+ * @new_last: last entry in batch to be added
+ * @head: the head for your lock-less list
+ *
+ * Return whether list is empty before adding.
+ */
+static inline bool llist_add_batch(struct llist_node *new_first,
+ struct llist_node *new_last,
+ struct llist_head *head)
+{
+ struct llist_node *first = READ_ONCE(head->first);
+
+ do {
+ new_last->next = first;
+ } while (!try_cmpxchg(&head->first, &first, new_first));
+
+ return !first;
+}
+
+static inline bool __llist_add_batch(struct llist_node *new_first,
+ struct llist_node *new_last,
+ struct llist_head *head)
+{
+ new_last->next = head->first;
+ head->first = new_first;
+ return new_last->next == NULL;
}
-extern bool llist_add_batch(struct llist_node *new_first,
- struct llist_node *new_last,
- struct llist_head *head);
/**
* llist_add - add a new entry
* @new: new entry to be added
@@ -221,6 +265,11 @@ static inline bool llist_add(struct llist_node *new, struct llist_head *head)
return llist_add_batch(new, new, head);
}
+static inline bool __llist_add(struct llist_node *new, struct llist_head *head)
+{
+ return __llist_add_batch(new, new, head);
+}
+
/**
* llist_del_all - delete all entries from lock-less list
* @head: the head of lock-less list to delete all entries
@@ -234,8 +283,35 @@ static inline struct llist_node *llist_del_all(struct llist_head *head)
return xchg(&head->first, NULL);
}
+static inline struct llist_node *__llist_del_all(struct llist_head *head)
+{
+ struct llist_node *first = head->first;
+
+ head->first = NULL;
+ return first;
+}
+
extern struct llist_node *llist_del_first(struct llist_head *head);
+/**
+ * llist_del_first_init - delete first entry from lock-list and mark is as being off-list
+ * @head: the head of lock-less list to delete from.
+ *
+ * This behave the same as llist_del_first() except that llist_init_node() is called
+ * on the returned node so that llist_on_list() will report false for the node.
+ */
+static inline struct llist_node *llist_del_first_init(struct llist_head *head)
+{
+ struct llist_node *n = llist_del_first(head);
+
+ if (n)
+ init_llist_node(n);
+ return n;
+}
+
+extern bool llist_del_first_this(struct llist_head *head,
+ struct llist_node *this);
+
struct llist_node *llist_reverse_order(struct llist_node *head);
#endif /* LLIST_H */
diff --git a/include/linux/llist_api.h b/include/linux/llist_api.h
new file mode 100644
index 000000000000..625bec0393a1
--- /dev/null
+++ b/include/linux/llist_api.h
@@ -0,0 +1 @@
+#include <linux/llist.h>
diff --git a/include/linux/local_lock.h b/include/linux/local_lock.h
new file mode 100644
index 000000000000..b0e6ab329b00
--- /dev/null
+++ b/include/linux/local_lock.h
@@ -0,0 +1,107 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_LOCAL_LOCK_H
+#define _LINUX_LOCAL_LOCK_H
+
+#include <linux/local_lock_internal.h>
+
+/**
+ * local_lock_init - Runtime initialize a lock instance
+ * @lock: The lock variable
+ */
+#define local_lock_init(lock) __local_lock_init(lock)
+
+/**
+ * local_lock - Acquire a per CPU local lock
+ * @lock: The lock variable
+ */
+#define local_lock(lock) __local_lock(this_cpu_ptr(lock))
+
+/**
+ * local_lock_irq - Acquire a per CPU local lock and disable interrupts
+ * @lock: The lock variable
+ */
+#define local_lock_irq(lock) __local_lock_irq(this_cpu_ptr(lock))
+
+/**
+ * local_lock_irqsave - Acquire a per CPU local lock, save and disable
+ * interrupts
+ * @lock: The lock variable
+ * @flags: Storage for interrupt flags
+ */
+#define local_lock_irqsave(lock, flags) \
+ __local_lock_irqsave(this_cpu_ptr(lock), flags)
+
+/**
+ * local_unlock - Release a per CPU local lock
+ * @lock: The lock variable
+ */
+#define local_unlock(lock) __local_unlock(this_cpu_ptr(lock))
+
+/**
+ * local_unlock_irq - Release a per CPU local lock and enable interrupts
+ * @lock: The lock variable
+ */
+#define local_unlock_irq(lock) __local_unlock_irq(this_cpu_ptr(lock))
+
+/**
+ * local_unlock_irqrestore - Release a per CPU local lock and restore
+ * interrupt flags
+ * @lock: The lock variable
+ * @flags: Interrupt flags to restore
+ */
+#define local_unlock_irqrestore(lock, flags) \
+ __local_unlock_irqrestore(this_cpu_ptr(lock), flags)
+
+/**
+ * local_trylock_init - Runtime initialize a lock instance
+ * @lock: The lock variable
+ */
+#define local_trylock_init(lock) __local_trylock_init(lock)
+
+/**
+ * local_trylock - Try to acquire a per CPU local lock
+ * @lock: The lock variable
+ *
+ * The function can be used in any context such as NMI or HARDIRQ. Due to
+ * locking constrains it will _always_ fail to acquire the lock in NMI or
+ * HARDIRQ context on PREEMPT_RT.
+ */
+#define local_trylock(lock) __local_trylock(this_cpu_ptr(lock))
+
+#define local_lock_is_locked(lock) __local_lock_is_locked(lock)
+
+/**
+ * local_trylock_irqsave - Try to acquire a per CPU local lock, save and disable
+ * interrupts if acquired
+ * @lock: The lock variable
+ * @flags: Storage for interrupt flags
+ *
+ * The function can be used in any context such as NMI or HARDIRQ. Due to
+ * locking constrains it will _always_ fail to acquire the lock in NMI or
+ * HARDIRQ context on PREEMPT_RT.
+ */
+#define local_trylock_irqsave(lock, flags) \
+ __local_trylock_irqsave(this_cpu_ptr(lock), flags)
+
+DEFINE_GUARD(local_lock, local_lock_t __percpu*,
+ local_lock(_T),
+ local_unlock(_T))
+DEFINE_GUARD(local_lock_irq, local_lock_t __percpu*,
+ local_lock_irq(_T),
+ local_unlock_irq(_T))
+DEFINE_LOCK_GUARD_1(local_lock_irqsave, local_lock_t __percpu,
+ local_lock_irqsave(_T->lock, _T->flags),
+ local_unlock_irqrestore(_T->lock, _T->flags),
+ unsigned long flags)
+
+#define local_lock_nested_bh(_lock) \
+ __local_lock_nested_bh(this_cpu_ptr(_lock))
+
+#define local_unlock_nested_bh(_lock) \
+ __local_unlock_nested_bh(this_cpu_ptr(_lock))
+
+DEFINE_GUARD(local_lock_nested_bh, local_lock_t __percpu*,
+ local_lock_nested_bh(_T),
+ local_unlock_nested_bh(_T))
+
+#endif
diff --git a/include/linux/local_lock_internal.h b/include/linux/local_lock_internal.h
new file mode 100644
index 000000000000..8f82b4eb542f
--- /dev/null
+++ b/include/linux/local_lock_internal.h
@@ -0,0 +1,295 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_LOCAL_LOCK_H
+# error "Do not include directly, include linux/local_lock.h"
+#endif
+
+#include <linux/percpu-defs.h>
+#include <linux/lockdep.h>
+
+#ifndef CONFIG_PREEMPT_RT
+
+typedef struct {
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+ struct lockdep_map dep_map;
+ struct task_struct *owner;
+#endif
+} local_lock_t;
+
+/* local_trylock() and local_trylock_irqsave() only work with local_trylock_t */
+typedef struct {
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+ struct lockdep_map dep_map;
+ struct task_struct *owner;
+#endif
+ u8 acquired;
+} local_trylock_t;
+
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+# define LOCAL_LOCK_DEBUG_INIT(lockname) \
+ .dep_map = { \
+ .name = #lockname, \
+ .wait_type_inner = LD_WAIT_CONFIG, \
+ .lock_type = LD_LOCK_PERCPU, \
+ }, \
+ .owner = NULL,
+
+# define LOCAL_TRYLOCK_DEBUG_INIT(lockname) \
+ LOCAL_LOCK_DEBUG_INIT(lockname)
+
+static inline void local_lock_acquire(local_lock_t *l)
+{
+ lock_map_acquire(&l->dep_map);
+ DEBUG_LOCKS_WARN_ON(l->owner);
+ l->owner = current;
+}
+
+static inline void local_trylock_acquire(local_lock_t *l)
+{
+ lock_map_acquire_try(&l->dep_map);
+ DEBUG_LOCKS_WARN_ON(l->owner);
+ l->owner = current;
+}
+
+static inline void local_lock_release(local_lock_t *l)
+{
+ DEBUG_LOCKS_WARN_ON(l->owner != current);
+ l->owner = NULL;
+ lock_map_release(&l->dep_map);
+}
+
+static inline void local_lock_debug_init(local_lock_t *l)
+{
+ l->owner = NULL;
+}
+#else /* CONFIG_DEBUG_LOCK_ALLOC */
+# define LOCAL_LOCK_DEBUG_INIT(lockname)
+# define LOCAL_TRYLOCK_DEBUG_INIT(lockname)
+static inline void local_lock_acquire(local_lock_t *l) { }
+static inline void local_trylock_acquire(local_lock_t *l) { }
+static inline void local_lock_release(local_lock_t *l) { }
+static inline void local_lock_debug_init(local_lock_t *l) { }
+#endif /* !CONFIG_DEBUG_LOCK_ALLOC */
+
+#define INIT_LOCAL_LOCK(lockname) { LOCAL_LOCK_DEBUG_INIT(lockname) }
+#define INIT_LOCAL_TRYLOCK(lockname) { LOCAL_TRYLOCK_DEBUG_INIT(lockname) }
+
+#define __local_lock_init(lock) \
+do { \
+ static struct lock_class_key __key; \
+ \
+ debug_check_no_locks_freed((void *)lock, sizeof(*lock));\
+ lockdep_init_map_type(&(lock)->dep_map, #lock, &__key, \
+ 0, LD_WAIT_CONFIG, LD_WAIT_INV, \
+ LD_LOCK_PERCPU); \
+ local_lock_debug_init(lock); \
+} while (0)
+
+#define __local_trylock_init(lock) __local_lock_init((local_lock_t *)lock)
+
+#define __spinlock_nested_bh_init(lock) \
+do { \
+ static struct lock_class_key __key; \
+ \
+ debug_check_no_locks_freed((void *)lock, sizeof(*lock));\
+ lockdep_init_map_type(&(lock)->dep_map, #lock, &__key, \
+ 0, LD_WAIT_CONFIG, LD_WAIT_INV, \
+ LD_LOCK_NORMAL); \
+ local_lock_debug_init(lock); \
+} while (0)
+
+#define __local_lock_acquire(lock) \
+ do { \
+ local_trylock_t *__tl; \
+ local_lock_t *__l; \
+ \
+ __l = (local_lock_t *)(lock); \
+ __tl = (local_trylock_t *)__l; \
+ _Generic((lock), \
+ local_trylock_t *: ({ \
+ lockdep_assert(__tl->acquired == 0); \
+ WRITE_ONCE(__tl->acquired, 1); \
+ }), \
+ local_lock_t *: (void)0); \
+ local_lock_acquire(__l); \
+ } while (0)
+
+#define __local_lock(lock) \
+ do { \
+ preempt_disable(); \
+ __local_lock_acquire(lock); \
+ } while (0)
+
+#define __local_lock_irq(lock) \
+ do { \
+ local_irq_disable(); \
+ __local_lock_acquire(lock); \
+ } while (0)
+
+#define __local_lock_irqsave(lock, flags) \
+ do { \
+ local_irq_save(flags); \
+ __local_lock_acquire(lock); \
+ } while (0)
+
+#define __local_trylock(lock) \
+ ({ \
+ local_trylock_t *__tl; \
+ \
+ preempt_disable(); \
+ __tl = (lock); \
+ if (READ_ONCE(__tl->acquired)) { \
+ preempt_enable(); \
+ __tl = NULL; \
+ } else { \
+ WRITE_ONCE(__tl->acquired, 1); \
+ local_trylock_acquire( \
+ (local_lock_t *)__tl); \
+ } \
+ !!__tl; \
+ })
+
+#define __local_trylock_irqsave(lock, flags) \
+ ({ \
+ local_trylock_t *__tl; \
+ \
+ local_irq_save(flags); \
+ __tl = (lock); \
+ if (READ_ONCE(__tl->acquired)) { \
+ local_irq_restore(flags); \
+ __tl = NULL; \
+ } else { \
+ WRITE_ONCE(__tl->acquired, 1); \
+ local_trylock_acquire( \
+ (local_lock_t *)__tl); \
+ } \
+ !!__tl; \
+ })
+
+/* preemption or migration must be disabled before calling __local_lock_is_locked */
+#define __local_lock_is_locked(lock) READ_ONCE(this_cpu_ptr(lock)->acquired)
+
+#define __local_lock_release(lock) \
+ do { \
+ local_trylock_t *__tl; \
+ local_lock_t *__l; \
+ \
+ __l = (local_lock_t *)(lock); \
+ __tl = (local_trylock_t *)__l; \
+ local_lock_release(__l); \
+ _Generic((lock), \
+ local_trylock_t *: ({ \
+ lockdep_assert(__tl->acquired == 1); \
+ WRITE_ONCE(__tl->acquired, 0); \
+ }), \
+ local_lock_t *: (void)0); \
+ } while (0)
+
+#define __local_unlock(lock) \
+ do { \
+ __local_lock_release(lock); \
+ preempt_enable(); \
+ } while (0)
+
+#define __local_unlock_irq(lock) \
+ do { \
+ __local_lock_release(lock); \
+ local_irq_enable(); \
+ } while (0)
+
+#define __local_unlock_irqrestore(lock, flags) \
+ do { \
+ __local_lock_release(lock); \
+ local_irq_restore(flags); \
+ } while (0)
+
+#define __local_lock_nested_bh(lock) \
+ do { \
+ lockdep_assert_in_softirq(); \
+ local_lock_acquire((lock)); \
+ } while (0)
+
+#define __local_unlock_nested_bh(lock) \
+ local_lock_release((lock))
+
+#else /* !CONFIG_PREEMPT_RT */
+
+/*
+ * On PREEMPT_RT local_lock maps to a per CPU spinlock, which protects the
+ * critical section while staying preemptible.
+ */
+typedef spinlock_t local_lock_t;
+typedef spinlock_t local_trylock_t;
+
+#define INIT_LOCAL_LOCK(lockname) __LOCAL_SPIN_LOCK_UNLOCKED((lockname))
+#define INIT_LOCAL_TRYLOCK(lockname) __LOCAL_SPIN_LOCK_UNLOCKED((lockname))
+
+#define __local_lock_init(__l) \
+ do { \
+ local_spin_lock_init((__l)); \
+ } while (0)
+
+#define __local_trylock_init(__l) __local_lock_init(__l)
+
+#define __local_lock(__lock) \
+ do { \
+ migrate_disable(); \
+ spin_lock((__lock)); \
+ } while (0)
+
+#define __local_lock_irq(lock) __local_lock(lock)
+
+#define __local_lock_irqsave(lock, flags) \
+ do { \
+ typecheck(unsigned long, flags); \
+ flags = 0; \
+ __local_lock(lock); \
+ } while (0)
+
+#define __local_unlock(__lock) \
+ do { \
+ spin_unlock((__lock)); \
+ migrate_enable(); \
+ } while (0)
+
+#define __local_unlock_irq(lock) __local_unlock(lock)
+
+#define __local_unlock_irqrestore(lock, flags) __local_unlock(lock)
+
+#define __local_lock_nested_bh(lock) \
+do { \
+ lockdep_assert_in_softirq_func(); \
+ spin_lock((lock)); \
+} while (0)
+
+#define __local_unlock_nested_bh(lock) \
+do { \
+ spin_unlock((lock)); \
+} while (0)
+
+#define __local_trylock(lock) \
+ ({ \
+ int __locked; \
+ \
+ if (in_nmi() | in_hardirq()) { \
+ __locked = 0; \
+ } else { \
+ migrate_disable(); \
+ __locked = spin_trylock((lock)); \
+ if (!__locked) \
+ migrate_enable(); \
+ } \
+ __locked; \
+ })
+
+#define __local_trylock_irqsave(lock, flags) \
+ ({ \
+ typecheck(unsigned long, flags); \
+ flags = 0; \
+ __local_trylock(lock); \
+ })
+
+/* migration must be disabled before calling __local_lock_is_locked */
+#define __local_lock_is_locked(__lock) \
+ (rt_mutex_owner(&this_cpu_ptr(__lock)->lock) == current)
+
+#endif /* CONFIG_PREEMPT_RT */
diff --git a/include/linux/lockd/bind.h b/include/linux/lockd/bind.h
index 05728396a1a1..c53c81242e72 100644
--- a/include/linux/lockd/bind.h
+++ b/include/linux/lockd/bind.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* linux/include/linux/lockd/bind.h
*
@@ -19,6 +20,7 @@
/* Dummy declarations */
struct svc_rqst;
struct rpc_task;
+struct rpc_clnt;
/*
* This is the set of functions for lockd->nfsd communication
@@ -26,7 +28,8 @@ struct rpc_task;
struct nlmsvc_binding {
__be32 (*fopen)(struct svc_rqst *,
struct nfs_fh *,
- struct file **);
+ struct file **,
+ int mode);
void (*fclose)(struct file *);
};
@@ -45,6 +48,7 @@ struct nlmclnt_initdata {
int noresvport;
struct net *net;
const struct nlmclnt_operations *nlmclnt_ops;
+ const struct cred *cred;
};
/*
@@ -53,6 +57,7 @@ struct nlmclnt_initdata {
extern struct nlm_host *nlmclnt_init(const struct nlmclnt_initdata *nlm_init);
extern void nlmclnt_done(struct nlm_host *host);
+extern struct rpc_clnt *nlmclnt_rpc_clnt(struct nlm_host *host);
/*
* NLM client operations provide a means to modify RPC processing of NLM
@@ -74,7 +79,7 @@ struct nlmclnt_operations {
};
extern int nlmclnt_proc(struct nlm_host *host, int cmd, struct file_lock *fl, void *data);
-extern int lockd_up(struct net *net);
+extern int lockd_up(struct net *net, const struct cred *cred);
extern void lockd_down(struct net *net);
#endif /* LINUX_LOCKD_BIND_H */
diff --git a/include/linux/lockd/debug.h b/include/linux/lockd/debug.h
index 0ca8109934e4..eede2ab5246f 100644
--- a/include/linux/lockd/debug.h
+++ b/include/linux/lockd/debug.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* linux/include/linux/lockd/debug.h
*
@@ -9,8 +10,6 @@
#ifndef LINUX_LOCKD_DEBUG_H
#define LINUX_LOCKD_DEBUG_H
-#ifdef __KERNEL__
-
#include <linux/sunrpc/debug.h>
/*
@@ -24,8 +23,6 @@
# define ifdebug(flag) if (0)
#endif
-#endif /* __KERNEL__ */
-
/*
* Debug flags
*/
diff --git a/include/linux/lockd/lockd.h b/include/linux/lockd/lockd.h
index 3eca67728366..330e38776bb2 100644
--- a/include/linux/lockd/lockd.h
+++ b/include/linux/lockd/lockd.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* linux/include/linux/lockd/lockd.h
*
@@ -9,13 +10,15 @@
#ifndef LINUX_LOCKD_LOCKD_H
#define LINUX_LOCKD_LOCKD_H
-#ifdef __KERNEL__
+/* XXX: a lot of this should really be under fs/lockd. */
+#include <linux/exportfs.h>
#include <linux/in.h>
#include <linux/in6.h>
#include <net/ipv6.h>
#include <linux/fs.h>
#include <linux/kref.h>
+#include <linux/refcount.h>
#include <linux/utsname.h>
#include <linux/lockd/bind.h>
#include <linux/lockd/xdr.h>
@@ -57,7 +60,7 @@ struct nlm_host {
u32 h_state; /* pseudo-state counter */
u32 h_nsmstate; /* true remote NSM state */
u32 h_pidcount; /* Pseudopids */
- atomic_t h_count; /* reference count */
+ refcount_t h_count; /* reference count */
struct mutex h_mutex; /* mutex for pmap binding */
unsigned long h_nextrebind; /* next portmap call */
unsigned long h_expires; /* eligible for GC */
@@ -68,6 +71,7 @@ struct nlm_host {
struct nsm_handle *h_nsmhandle; /* NSM status handle */
char *h_addrbuf; /* address eyecatcher */
struct net *net; /* host net */
+ const struct cred *h_cred;
char nodename[UNX_MAXNODENAME + 1];
const struct nlmclnt_operations *h_nlmclnt_ops; /* Callback ops for NLM users */
};
@@ -82,7 +86,7 @@ struct nlm_host {
struct nsm_handle {
struct list_head sm_link;
- atomic_t sm_count;
+ refcount_t sm_count;
char *sm_mon_name;
char *sm_name;
struct sockaddr_storage sm_addr;
@@ -96,21 +100,11 @@ struct nsm_handle {
/*
* Rigorous type checking on sockaddr type conversions
*/
-static inline struct sockaddr_in *nlm_addr_in(const struct nlm_host *host)
-{
- return (struct sockaddr_in *)&host->h_addr;
-}
-
static inline struct sockaddr *nlm_addr(const struct nlm_host *host)
{
return (struct sockaddr *)&host->h_addr;
}
-static inline struct sockaddr_in *nlm_srcaddr_in(const struct nlm_host *host)
-{
- return (struct sockaddr_in *)&host->h_srcaddr;
-}
-
static inline struct sockaddr *nlm_srcaddr(const struct nlm_host *host)
{
return (struct sockaddr *)&host->h_srcaddr;
@@ -121,21 +115,30 @@ static inline struct sockaddr *nlm_srcaddr(const struct nlm_host *host)
*/
struct nlm_lockowner {
struct list_head list;
- atomic_t count;
+ refcount_t count;
struct nlm_host *host;
fl_owner_t owner;
uint32_t pid;
};
-struct nlm_wait;
+/*
+ * This is the representation of a blocked client lock.
+ */
+struct nlm_wait {
+ struct list_head b_list; /* linked list */
+ wait_queue_head_t b_wait; /* where to wait on */
+ struct nlm_host *b_host;
+ struct file_lock *b_lock; /* local file lock */
+ __be32 b_status; /* grant callback status */
+};
/*
* Memory chunk for NLM client RPC request.
*/
#define NLMCLNT_OHSIZE ((__NEW_UTS_LEN) + 10u)
struct nlm_rqst {
- atomic_t a_count;
+ refcount_t a_count;
unsigned int a_flags; /* initial RPC task flags */
struct nlm_host * a_host; /* host handle */
struct nlm_args a_args; /* arguments */
@@ -153,7 +156,8 @@ struct nlm_rqst {
struct nlm_file {
struct hlist_node f_list; /* linked list */
struct nfs_fh f_handle; /* NFS file handle */
- struct file * f_file; /* VFS file pointer */
+ struct file * f_file[2]; /* VFS file pointers,
+ indexed by O_ flags */
struct nlm_share * f_shares; /* DOS shares */
struct list_head f_blocks; /* blocked locks */
unsigned int f_locks; /* guesstimate # of locks */
@@ -192,15 +196,17 @@ struct nlm_block {
* Global variables
*/
extern const struct rpc_program nlm_program;
-extern const struct svc_procedure nlmsvc_procedures[];
+extern const struct svc_procedure nlmsvc_procedures[24];
#ifdef CONFIG_LOCKD_V4
-extern const struct svc_procedure nlmsvc_procedures4[];
+extern const struct svc_procedure nlmsvc_procedures4[24];
#endif
extern int nlmsvc_grace_period;
-extern unsigned long nlmsvc_timeout;
+extern unsigned long nlm_timeout;
extern bool nsm_use_hostnames;
extern u32 nsm_local_state;
+extern struct timer_list nlmsvc_retry;
+
/*
* Lockd client functions
*/
@@ -208,9 +214,11 @@ struct nlm_rqst * nlm_alloc_call(struct nlm_host *host);
int nlm_async_call(struct nlm_rqst *, u32, const struct rpc_call_ops *);
int nlm_async_reply(struct nlm_rqst *, u32, const struct rpc_call_ops *);
void nlmclnt_release_call(struct nlm_rqst *);
-struct nlm_wait * nlmclnt_prepare_block(struct nlm_host *host, struct file_lock *fl);
-void nlmclnt_finish_block(struct nlm_wait *block);
-int nlmclnt_block(struct nlm_wait *block, struct nlm_rqst *req, long timeout);
+void nlmclnt_prepare_block(struct nlm_wait *block, struct nlm_host *host,
+ struct file_lock *fl);
+void nlmclnt_queue_block(struct nlm_wait *block);
+__be32 nlmclnt_dequeue_block(struct nlm_wait *block);
+int nlmclnt_wait(struct nlm_wait *block, struct nlm_rqst *req, long timeout);
__be32 nlmclnt_grant(const struct sockaddr *addr,
const struct nlm_lock *lock);
void nlmclnt_recovery(struct nlm_host *);
@@ -227,7 +235,8 @@ struct nlm_host *nlmclnt_lookup_host(const struct sockaddr *sap,
const u32 version,
const char *hostname,
int noresvport,
- struct net *net);
+ struct net *net,
+ const struct cred *cred);
void nlmclnt_release_host(struct nlm_host *);
struct nlm_host *nlmsvc_lookup_host(const struct svc_rqst *rqstp,
const char *hostname,
@@ -265,26 +274,30 @@ typedef int (*nlm_host_match_fn_t)(void *cur, struct nlm_host *ref);
/*
* Server-side lock handling
*/
+int lock_to_openmode(struct file_lock *);
__be32 nlmsvc_lock(struct svc_rqst *, struct nlm_file *,
struct nlm_host *, struct nlm_lock *, int,
struct nlm_cookie *, int);
__be32 nlmsvc_unlock(struct net *net, struct nlm_file *, struct nlm_lock *);
-__be32 nlmsvc_testlock(struct svc_rqst *, struct nlm_file *,
- struct nlm_host *, struct nlm_lock *,
- struct nlm_lock *, struct nlm_cookie *);
+__be32 nlmsvc_testlock(struct svc_rqst *rqstp, struct nlm_file *file,
+ struct nlm_host *host, struct nlm_lock *lock,
+ struct nlm_lock *conflock);
__be32 nlmsvc_cancel_blocked(struct net *net, struct nlm_file *, struct nlm_lock *);
-unsigned long nlmsvc_retry_blocked(void);
+void nlmsvc_retry_blocked(struct svc_rqst *rqstp);
void nlmsvc_traverse_blocks(struct nlm_host *, struct nlm_file *,
nlm_host_match_fn_t match);
void nlmsvc_grant_reply(struct nlm_cookie *, __be32);
void nlmsvc_release_call(struct nlm_rqst *);
+void nlmsvc_locks_init_private(struct file_lock *, struct nlm_host *, pid_t);
/*
* File handling for the server personality
*/
__be32 nlm_lookup_file(struct svc_rqst *, struct nlm_file **,
- struct nfs_fh *);
+ struct nlm_lock *);
void nlm_release_file(struct nlm_file *);
+void nlmsvc_put_lockowner(struct nlm_lockowner *);
+void nlmsvc_release_lockowner(struct nlm_lock *);
void nlmsvc_mark_resources(struct net *);
void nlmsvc_free_host_resources(struct nlm_host *);
void nlmsvc_invalidate_all(void);
@@ -295,9 +308,21 @@ void nlmsvc_invalidate_all(void);
int nlmsvc_unlock_all_by_sb(struct super_block *sb);
int nlmsvc_unlock_all_by_ip(struct sockaddr *server_addr);
+static inline struct file *nlmsvc_file_file(const struct nlm_file *file)
+{
+ return file->f_file[O_RDONLY] ?
+ file->f_file[O_RDONLY] : file->f_file[O_WRONLY];
+}
+
static inline struct inode *nlmsvc_file_inode(struct nlm_file *file)
{
- return file_inode(file->f_file);
+ return file_inode(nlmsvc_file_file(file));
+}
+
+static inline bool
+nlmsvc_file_cannot_lock(const struct nlm_file *file)
+{
+ return exportfs_cannot_lock(nlmsvc_file_file(file)->f_path.dentry->d_sb->s_export_op);
}
static inline int __nlm_privileged_request4(const struct sockaddr *sap)
@@ -357,16 +382,14 @@ static inline int nlm_privileged_requester(const struct svc_rqst *rqstp)
static inline int nlm_compare_locks(const struct file_lock *fl1,
const struct file_lock *fl2)
{
- return file_inode(fl1->fl_file) == file_inode(fl2->fl_file)
- && fl1->fl_pid == fl2->fl_pid
- && fl1->fl_owner == fl2->fl_owner
+ return file_inode(fl1->c.flc_file) == file_inode(fl2->c.flc_file)
+ && fl1->c.flc_pid == fl2->c.flc_pid
+ && fl1->c.flc_owner == fl2->c.flc_owner
&& fl1->fl_start == fl2->fl_start
&& fl1->fl_end == fl2->fl_end
- &&(fl1->fl_type == fl2->fl_type || fl2->fl_type == F_UNLCK);
+ &&(fl1->c.flc_type == fl2->c.flc_type || fl2->c.flc_type == F_UNLCK);
}
extern const struct lock_manager_operations nlmsvc_lock_operations;
-#endif /* __KERNEL__ */
-
#endif /* LINUX_LOCKD_LOCKD_H */
diff --git a/include/linux/lockd/nlm.h b/include/linux/lockd/nlm.h
index d9d46e442538..6e343ef760dc 100644
--- a/include/linux/lockd/nlm.h
+++ b/include/linux/lockd/nlm.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* linux/include/linux/lockd/nlm.h
*
diff --git a/include/linux/lockd/share.h b/include/linux/lockd/share.h
index 630c5bf69b07..1f18a9faf645 100644
--- a/include/linux/lockd/share.h
+++ b/include/linux/lockd/share.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* linux/include/linux/lockd/share.h
*
diff --git a/include/linux/lockd/xdr.h b/include/linux/lockd/xdr.h
index 7acbecc21a40..17d53165d9f2 100644
--- a/include/linux/lockd/xdr.h
+++ b/include/linux/lockd/xdr.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* linux/include/linux/lockd/xdr.h
*
@@ -10,6 +11,7 @@
#define LOCKD_XDR_H
#include <linux/fs.h>
+#include <linux/filelock.h>
#include <linux/nfs.h>
#include <linux/sunrpc/xdr.h>
@@ -40,6 +42,8 @@ struct nlm_lock {
struct nfs_fh fh;
struct xdr_netobj oh;
u32 svid;
+ u64 lock_start;
+ u64 lock_len;
struct file_lock fl;
};
@@ -48,7 +52,7 @@ struct nlm_lock {
* FreeBSD uses 16, Apple Mac OS X 10.3 uses 20. Therefore we set it to
* 32 bytes.
*/
-
+
struct nlm_cookie
{
unsigned char data[NLM_MAXCOOKIELEN];
@@ -69,8 +73,6 @@ struct nlm_args {
u32 fsm_mode;
};
-typedef struct nlm_args nlm_args;
-
/*
* Generic lockd result
*/
@@ -95,24 +97,19 @@ struct nlm_reboot {
*/
#define NLMSVC_XDRSIZE sizeof(struct nlm_args)
-int nlmsvc_decode_testargs(struct svc_rqst *, __be32 *);
-int nlmsvc_encode_testres(struct svc_rqst *, __be32 *);
-int nlmsvc_decode_lockargs(struct svc_rqst *, __be32 *);
-int nlmsvc_decode_cancargs(struct svc_rqst *, __be32 *);
-int nlmsvc_decode_unlockargs(struct svc_rqst *, __be32 *);
-int nlmsvc_encode_res(struct svc_rqst *, __be32 *);
-int nlmsvc_decode_res(struct svc_rqst *, __be32 *);
-int nlmsvc_encode_void(struct svc_rqst *, __be32 *);
-int nlmsvc_decode_void(struct svc_rqst *, __be32 *);
-int nlmsvc_decode_shareargs(struct svc_rqst *, __be32 *);
-int nlmsvc_encode_shareres(struct svc_rqst *, __be32 *);
-int nlmsvc_decode_notify(struct svc_rqst *, __be32 *);
-int nlmsvc_decode_reboot(struct svc_rqst *, __be32 *);
-/*
-int nlmclt_encode_testargs(struct rpc_rqst *, u32 *, struct nlm_args *);
-int nlmclt_encode_lockargs(struct rpc_rqst *, u32 *, struct nlm_args *);
-int nlmclt_encode_cancargs(struct rpc_rqst *, u32 *, struct nlm_args *);
-int nlmclt_encode_unlockargs(struct rpc_rqst *, u32 *, struct nlm_args *);
- */
+bool nlmsvc_decode_void(struct svc_rqst *rqstp, struct xdr_stream *xdr);
+bool nlmsvc_decode_testargs(struct svc_rqst *rqstp, struct xdr_stream *xdr);
+bool nlmsvc_decode_lockargs(struct svc_rqst *rqstp, struct xdr_stream *xdr);
+bool nlmsvc_decode_cancargs(struct svc_rqst *rqstp, struct xdr_stream *xdr);
+bool nlmsvc_decode_unlockargs(struct svc_rqst *rqstp, struct xdr_stream *xdr);
+bool nlmsvc_decode_res(struct svc_rqst *rqstp, struct xdr_stream *xdr);
+bool nlmsvc_decode_reboot(struct svc_rqst *rqstp, struct xdr_stream *xdr);
+bool nlmsvc_decode_shareargs(struct svc_rqst *rqstp, struct xdr_stream *xdr);
+bool nlmsvc_decode_notify(struct svc_rqst *rqstp, struct xdr_stream *xdr);
+
+bool nlmsvc_encode_testres(struct svc_rqst *rqstp, struct xdr_stream *xdr);
+bool nlmsvc_encode_res(struct svc_rqst *rqstp, struct xdr_stream *xdr);
+bool nlmsvc_encode_void(struct svc_rqst *rqstp, struct xdr_stream *xdr);
+bool nlmsvc_encode_shareres(struct svc_rqst *rqstp, struct xdr_stream *xdr);
#endif /* LOCKD_XDR_H */
diff --git a/include/linux/lockd/xdr4.h b/include/linux/lockd/xdr4.h
index bf1645609225..72831e35dca3 100644
--- a/include/linux/lockd/xdr4.h
+++ b/include/linux/lockd/xdr4.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* linux/include/linux/lockd/xdr4.h
*
@@ -21,27 +22,22 @@
#define nlm4_fbig cpu_to_be32(NLM_FBIG)
#define nlm4_failed cpu_to_be32(NLM_FAILED)
+void nlm4svc_set_file_lock_range(struct file_lock *fl, u64 off, u64 len);
+bool nlm4svc_decode_void(struct svc_rqst *rqstp, struct xdr_stream *xdr);
+bool nlm4svc_decode_testargs(struct svc_rqst *rqstp, struct xdr_stream *xdr);
+bool nlm4svc_decode_lockargs(struct svc_rqst *rqstp, struct xdr_stream *xdr);
+bool nlm4svc_decode_cancargs(struct svc_rqst *rqstp, struct xdr_stream *xdr);
+bool nlm4svc_decode_unlockargs(struct svc_rqst *rqstp, struct xdr_stream *xdr);
+bool nlm4svc_decode_res(struct svc_rqst *rqstp, struct xdr_stream *xdr);
+bool nlm4svc_decode_reboot(struct svc_rqst *rqstp, struct xdr_stream *xdr);
+bool nlm4svc_decode_shareargs(struct svc_rqst *rqstp, struct xdr_stream *xdr);
+bool nlm4svc_decode_notify(struct svc_rqst *rqstp, struct xdr_stream *xdr);
+bool nlm4svc_encode_testres(struct svc_rqst *rqstp, struct xdr_stream *xdr);
+bool nlm4svc_encode_res(struct svc_rqst *rqstp, struct xdr_stream *xdr);
+bool nlm4svc_encode_void(struct svc_rqst *rqstp, struct xdr_stream *xdr);
+bool nlm4svc_encode_shareres(struct svc_rqst *rqstp, struct xdr_stream *xdr);
-int nlm4svc_decode_testargs(struct svc_rqst *, __be32 *);
-int nlm4svc_encode_testres(struct svc_rqst *, __be32 *);
-int nlm4svc_decode_lockargs(struct svc_rqst *, __be32 *);
-int nlm4svc_decode_cancargs(struct svc_rqst *, __be32 *);
-int nlm4svc_decode_unlockargs(struct svc_rqst *, __be32 *);
-int nlm4svc_encode_res(struct svc_rqst *, __be32 *);
-int nlm4svc_decode_res(struct svc_rqst *, __be32 *);
-int nlm4svc_encode_void(struct svc_rqst *, __be32 *);
-int nlm4svc_decode_void(struct svc_rqst *, __be32 *);
-int nlm4svc_decode_shareargs(struct svc_rqst *, __be32 *);
-int nlm4svc_encode_shareres(struct svc_rqst *, __be32 *);
-int nlm4svc_decode_notify(struct svc_rqst *, __be32 *);
-int nlm4svc_decode_reboot(struct svc_rqst *, __be32 *);
-/*
-int nlmclt_encode_testargs(struct rpc_rqst *, u32 *, struct nlm_args *);
-int nlmclt_encode_lockargs(struct rpc_rqst *, u32 *, struct nlm_args *);
-int nlmclt_encode_cancargs(struct rpc_rqst *, u32 *, struct nlm_args *);
-int nlmclt_encode_unlockargs(struct rpc_rqst *, u32 *, struct nlm_args *);
- */
extern const struct rpc_version nlm_version4;
#endif /* LOCKD_XDR4_H */
diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h
index bfa8e0b0d6f1..dd634103b014 100644
--- a/include/linux/lockdep.h
+++ b/include/linux/lockdep.h
@@ -1,24 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Runtime locking correctness validator
*
* Copyright (C) 2006,2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
* Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra
*
- * see Documentation/locking/lockdep-design.txt for more details.
+ * see Documentation/locking/lockdep-design.rst for more details.
*/
#ifndef __LINUX_LOCKDEP_H
#define __LINUX_LOCKDEP_H
-struct task_struct;
-struct lockdep_map;
-
-/* for sysctl */
-extern int prove_locking;
-extern int lock_stat;
+#include <linux/lockdep_types.h>
+#include <linux/smp.h>
+#include <asm/percpu.h>
-#define MAX_LOCKDEP_SUBCLASSES 8UL
-
-#include <linux/types.h>
+struct task_struct;
#ifdef CONFIG_LOCKDEP
@@ -27,144 +23,6 @@ extern int lock_stat;
#include <linux/debug_locks.h>
#include <linux/stacktrace.h>
-/*
- * We'd rather not expose kernel/lockdep_states.h this wide, but we do need
- * the total number of states... :-(
- */
-#define XXX_LOCK_USAGE_STATES (1+2*4)
-
-/*
- * NR_LOCKDEP_CACHING_CLASSES ... Number of classes
- * cached in the instance of lockdep_map
- *
- * Currently main class (subclass == 0) and signle depth subclass
- * are cached in lockdep_map. This optimization is mainly targeting
- * on rq->lock. double_rq_lock() acquires this highly competitive with
- * single depth.
- */
-#define NR_LOCKDEP_CACHING_CLASSES 2
-
-/*
- * Lock-classes are keyed via unique addresses, by embedding the
- * lockclass-key into the kernel (or module) .data section. (For
- * static locks we use the lock address itself as the key.)
- */
-struct lockdep_subclass_key {
- char __one_byte;
-} __attribute__ ((__packed__));
-
-struct lock_class_key {
- struct lockdep_subclass_key subkeys[MAX_LOCKDEP_SUBCLASSES];
-};
-
-extern struct lock_class_key __lockdep_no_validate__;
-
-#define LOCKSTAT_POINTS 4
-
-/*
- * The lock-class itself:
- */
-struct lock_class {
- /*
- * class-hash:
- */
- struct hlist_node hash_entry;
-
- /*
- * global list of all lock-classes:
- */
- struct list_head lock_entry;
-
- struct lockdep_subclass_key *key;
- unsigned int subclass;
- unsigned int dep_gen_id;
-
- /*
- * IRQ/softirq usage tracking bits:
- */
- unsigned long usage_mask;
- struct stack_trace usage_traces[XXX_LOCK_USAGE_STATES];
-
- /*
- * These fields represent a directed graph of lock dependencies,
- * to every node we attach a list of "forward" and a list of
- * "backward" graph nodes.
- */
- struct list_head locks_after, locks_before;
-
- /*
- * Generation counter, when doing certain classes of graph walking,
- * to ensure that we check one node only once:
- */
- unsigned int version;
-
- /*
- * Statistics counter:
- */
- unsigned long ops;
-
- const char *name;
- int name_version;
-
-#ifdef CONFIG_LOCK_STAT
- unsigned long contention_point[LOCKSTAT_POINTS];
- unsigned long contending_point[LOCKSTAT_POINTS];
-#endif
-};
-
-#ifdef CONFIG_LOCK_STAT
-struct lock_time {
- s64 min;
- s64 max;
- s64 total;
- unsigned long nr;
-};
-
-enum bounce_type {
- bounce_acquired_write,
- bounce_acquired_read,
- bounce_contended_write,
- bounce_contended_read,
- nr_bounce_types,
-
- bounce_acquired = bounce_acquired_write,
- bounce_contended = bounce_contended_write,
-};
-
-struct lock_class_stats {
- unsigned long contention_point[LOCKSTAT_POINTS];
- unsigned long contending_point[LOCKSTAT_POINTS];
- struct lock_time read_waittime;
- struct lock_time write_waittime;
- struct lock_time read_holdtime;
- struct lock_time write_holdtime;
- unsigned long bounces[nr_bounce_types];
-};
-
-struct lock_class_stats lock_stats(struct lock_class *class);
-void clear_lock_stats(struct lock_class *class);
-#endif
-
-/*
- * Map the lock object (the lock instance) to the lock-class object.
- * This is embedded into specific lock instances:
- */
-struct lockdep_map {
- struct lock_class_key *key;
- struct lock_class *class_cache[NR_LOCKDEP_CACHING_CLASSES];
- const char *name;
-#ifdef CONFIG_LOCK_STAT
- int cpu;
- unsigned long ip;
-#endif
-#ifdef CONFIG_LOCKDEP_CROSSRELEASE
- /*
- * Whether it's a crosslock.
- */
- int cross;
-#endif
-};
-
static inline void lockdep_copy_map(struct lockdep_map *to,
struct lockdep_map *from)
{
@@ -190,8 +48,13 @@ static inline void lockdep_copy_map(struct lockdep_map *to,
struct lock_list {
struct list_head entry;
struct lock_class *class;
- struct stack_trace trace;
- int distance;
+ struct lock_class *links_to;
+ const struct lock_trace *trace;
+ u16 distance;
+ /* bitmap of different dependencies from head to this */
+ u8 dep;
+ /* used by BFS to record whether "prev -> this" only has -(*R)-> */
+ u8 only_xr;
/*
* The parent field is used to implement breadth-first search, and the
@@ -200,11 +63,17 @@ struct lock_list {
struct lock_list *parent;
};
-/*
- * We record lock dependency chains, so that we can cache them:
+/**
+ * struct lock_chain - lock dependency chain record
+ *
+ * @irq_context: the same as irq_context in held_lock below
+ * @depth: the number of held locks in this chain
+ * @base: the index in chain_hlocks for this chain
+ * @entry: the collided lock chains in lock_chain hash list
+ * @chain_key: the hash key of this lock_chain
*/
struct lock_chain {
- /* see BUILD_BUG_ON()s in lookup_chain_cache() */
+ /* see BUILD_BUG_ON()s in add_chain_cache() */
unsigned int irq_context : 2,
depth : 6,
base : 24;
@@ -213,159 +82,42 @@ struct lock_chain {
u64 chain_key;
};
-#define MAX_LOCKDEP_KEYS_BITS 13
/*
- * Subtract one because we offset hlock->class_idx by 1 in order
- * to make 0 mean no class. This avoids overflowing the class_idx
- * bitfield and hitting the BUG in hlock_class().
+ * Initialization, self-test and debugging-output methods:
*/
-#define MAX_LOCKDEP_KEYS ((1UL << MAX_LOCKDEP_KEYS_BITS) - 1)
-
-struct held_lock {
- /*
- * One-way hash of the dependency chain up to this point. We
- * hash the hashes step by step as the dependency chain grows.
- *
- * We use it for dependency-caching and we skip detection
- * passes and dependency-updates if there is a cache-hit, so
- * it is absolutely critical for 100% coverage of the validator
- * to have a unique key value for every unique dependency path
- * that can occur in the system, to make a unique hash value
- * as likely as possible - hence the 64-bit width.
- *
- * The task struct holds the current hash value (initialized
- * with zero), here we store the previous hash value:
- */
- u64 prev_chain_key;
- unsigned long acquire_ip;
- struct lockdep_map *instance;
- struct lockdep_map *nest_lock;
-#ifdef CONFIG_LOCK_STAT
- u64 waittime_stamp;
- u64 holdtime_stamp;
-#endif
- unsigned int class_idx:MAX_LOCKDEP_KEYS_BITS;
- /*
- * The lock-stack is unified in that the lock chains of interrupt
- * contexts nest ontop of process context chains, but we 'separate'
- * the hashes by starting with 0 if we cross into an interrupt
- * context, and we also keep do not add cross-context lock
- * dependencies - the lock usage graph walking covers that area
- * anyway, and we'd just unnecessarily increase the number of
- * dependencies otherwise. [Note: hardirq and softirq contexts
- * are separated from each other too.]
- *
- * The following field is used to detect when we cross into an
- * interrupt context:
- */
- unsigned int irq_context:2; /* bit 0 - soft, bit 1 - hard */
- unsigned int trylock:1; /* 16 bits */
-
- unsigned int read:2; /* see lock_acquire() comment */
- unsigned int check:1; /* see lock_acquire() comment */
- unsigned int hardirqs_off:1;
- unsigned int references:12; /* 32 bits */
- unsigned int pin_count;
-#ifdef CONFIG_LOCKDEP_CROSSRELEASE
- /*
- * Generation id.
- *
- * A value of cross_gen_id will be stored when holding this,
- * which is globally increased whenever each crosslock is held.
- */
- unsigned int gen_id;
-#endif
-};
+extern void lockdep_init(void);
+extern void lockdep_reset(void);
+extern void lockdep_reset_lock(struct lockdep_map *lock);
+extern void lockdep_free_key_range(void *start, unsigned long size);
+extern asmlinkage void lockdep_sys_exit(void);
+extern void lockdep_set_selftest_task(struct task_struct *task);
-#ifdef CONFIG_LOCKDEP_CROSSRELEASE
-#define MAX_XHLOCK_TRACE_ENTRIES 5
+extern void lockdep_init_task(struct task_struct *task);
/*
- * This is for keeping locks waiting for commit so that true dependencies
- * can be added at commit step.
+ * Split the recursion counter in two to readily detect 'off' vs recursion.
*/
-struct hist_lock {
- /*
- * Id for each entry in the ring buffer. This is used to
- * decide whether the ring buffer was overwritten or not.
- *
- * For example,
- *
- * |<----------- hist_lock ring buffer size ------->|
- * pppppppppppppppppppppiiiiiiiiiiiiiiiiiiiiiiiiiiiii
- * wrapped > iiiiiiiiiiiiiiiiiiiiiiiiiii.......................
- *
- * where 'p' represents an acquisition in process
- * context, 'i' represents an acquisition in irq
- * context.
- *
- * In this example, the ring buffer was overwritten by
- * acquisitions in irq context, that should be detected on
- * rollback or commit.
- */
- unsigned int hist_id;
-
- /*
- * Seperate stack_trace data. This will be used at commit step.
- */
- struct stack_trace trace;
- unsigned long trace_entries[MAX_XHLOCK_TRACE_ENTRIES];
-
- /*
- * Seperate hlock instance. This will be used at commit step.
- *
- * TODO: Use a smaller data structure containing only necessary
- * data. However, we should make lockdep code able to handle the
- * smaller one first.
- */
- struct held_lock hlock;
-};
+#define LOCKDEP_RECURSION_BITS 16
+#define LOCKDEP_OFF (1U << LOCKDEP_RECURSION_BITS)
+#define LOCKDEP_RECURSION_MASK (LOCKDEP_OFF - 1)
/*
- * To initialize a lock as crosslock, lockdep_init_map_crosslock() should
- * be called instead of lockdep_init_map().
+ * lockdep_{off,on}() are macros to avoid tracing and kprobes; not inlines due
+ * to header dependencies.
*/
-struct cross_lock {
- /*
- * When more than one acquisition of crosslocks are overlapped,
- * we have to perform commit for them based on cross_gen_id of
- * the first acquisition, which allows us to add more true
- * dependencies.
- *
- * Moreover, when no acquisition of a crosslock is in progress,
- * we should not perform commit because the lock might not exist
- * any more, which might cause incorrect memory access. So we
- * have to track the number of acquisitions of a crosslock.
- */
- int nr_acquire;
-
- /*
- * Seperate hlock instance. This will be used at commit step.
- *
- * TODO: Use a smaller data structure containing only necessary
- * data. However, we should make lockdep code able to handle the
- * smaller one first.
- */
- struct held_lock hlock;
-};
-struct lockdep_map_cross {
- struct lockdep_map map;
- struct cross_lock xlock;
-};
-#endif
+#define lockdep_off() \
+do { \
+ current->lockdep_recursion += LOCKDEP_OFF; \
+} while (0)
-/*
- * Initialization, self-test and debugging-output methods:
- */
-extern void lockdep_info(void);
-extern void lockdep_reset(void);
-extern void lockdep_reset_lock(struct lockdep_map *lock);
-extern void lockdep_free_key_range(void *start, unsigned long size);
-extern asmlinkage void lockdep_sys_exit(void);
+#define lockdep_on() \
+do { \
+ current->lockdep_recursion -= LOCKDEP_OFF; \
+} while (0)
-extern void lockdep_off(void);
-extern void lockdep_on(void);
+extern void lockdep_register_key(struct lock_class_key *key);
+extern void lockdep_unregister_key(struct lock_class_key *key);
/*
* These methods are used by specific locking variants (spinlocks,
@@ -373,8 +125,28 @@ extern void lockdep_on(void);
* to lockdep:
*/
-extern void lockdep_init_map(struct lockdep_map *lock, const char *name,
- struct lock_class_key *key, int subclass);
+extern void lockdep_init_map_type(struct lockdep_map *lock, const char *name,
+ struct lock_class_key *key, int subclass, u8 inner, u8 outer, u8 lock_type);
+
+static inline void
+lockdep_init_map_waits(struct lockdep_map *lock, const char *name,
+ struct lock_class_key *key, int subclass, u8 inner, u8 outer)
+{
+ lockdep_init_map_type(lock, name, key, subclass, inner, outer, LD_LOCK_NORMAL);
+}
+
+static inline void
+lockdep_init_map_wait(struct lockdep_map *lock, const char *name,
+ struct lock_class_key *key, int subclass, u8 inner)
+{
+ lockdep_init_map_waits(lock, name, key, subclass, inner, LD_WAIT_INV);
+}
+
+static inline void lockdep_init_map(struct lockdep_map *lock, const char *name,
+ struct lock_class_key *key, int subclass)
+{
+ lockdep_init_map_wait(lock, name, key, subclass, LD_WAIT_INV);
+}
/*
* Reinitialize a lock key - for cases where there is special locking or
@@ -382,18 +154,51 @@ extern void lockdep_init_map(struct lockdep_map *lock, const char *name,
* of dependencies wrong: they are either too broad (they need a class-split)
* or they are too narrow (they suffer from a false class-split):
*/
-#define lockdep_set_class(lock, key) \
- lockdep_init_map(&(lock)->dep_map, #key, key, 0)
-#define lockdep_set_class_and_name(lock, key, name) \
- lockdep_init_map(&(lock)->dep_map, name, key, 0)
-#define lockdep_set_class_and_subclass(lock, key, sub) \
- lockdep_init_map(&(lock)->dep_map, #key, key, sub)
-#define lockdep_set_subclass(lock, sub) \
- lockdep_init_map(&(lock)->dep_map, #lock, \
- (lock)->dep_map.key, sub)
-
+#define lockdep_set_class(lock, key) \
+ lockdep_init_map_type(&(lock)->dep_map, #key, key, 0, \
+ (lock)->dep_map.wait_type_inner, \
+ (lock)->dep_map.wait_type_outer, \
+ (lock)->dep_map.lock_type)
+
+#define lockdep_set_class_and_name(lock, key, name) \
+ lockdep_init_map_type(&(lock)->dep_map, name, key, 0, \
+ (lock)->dep_map.wait_type_inner, \
+ (lock)->dep_map.wait_type_outer, \
+ (lock)->dep_map.lock_type)
+
+#define lockdep_set_class_and_subclass(lock, key, sub) \
+ lockdep_init_map_type(&(lock)->dep_map, #key, key, sub, \
+ (lock)->dep_map.wait_type_inner, \
+ (lock)->dep_map.wait_type_outer, \
+ (lock)->dep_map.lock_type)
+
+#define lockdep_set_subclass(lock, sub) \
+ lockdep_init_map_type(&(lock)->dep_map, (lock)->dep_map.name, (lock)->dep_map.key, sub,\
+ (lock)->dep_map.wait_type_inner, \
+ (lock)->dep_map.wait_type_outer, \
+ (lock)->dep_map.lock_type)
+
+/**
+ * lockdep_set_novalidate_class: disable checking of lock ordering on a given
+ * lock
+ * @lock: Lock to mark
+ *
+ * Lockdep will still record that this lock has been taken, and print held
+ * instances when dumping locks
+ */
#define lockdep_set_novalidate_class(lock) \
lockdep_set_class_and_name(lock, &__lockdep_no_validate__, #lock)
+
+/**
+ * lockdep_set_notrack_class: disable lockdep tracking of a given lock entirely
+ * @lock: Lock to mark
+ *
+ * Bigger hammer than lockdep_set_novalidate_class: so far just for bcachefs,
+ * which takes more locks than lockdep is able to track (48).
+ */
+#define lockdep_set_notrack_class(lock) \
+ lockdep_set_class_and_name(lock, &__lockdep_no_track__, #lock)
+
/*
* Compare locking classes
*/
@@ -423,15 +228,23 @@ extern void lock_acquire(struct lockdep_map *lock, unsigned int subclass,
int trylock, int read, int check,
struct lockdep_map *nest_lock, unsigned long ip);
-extern void lock_release(struct lockdep_map *lock, int nested,
- unsigned long ip);
+extern void lock_release(struct lockdep_map *lock, unsigned long ip);
+
+extern void lock_sync(struct lockdep_map *lock, unsigned int subclass,
+ int read, int check, struct lockdep_map *nest_lock,
+ unsigned long ip);
+
+/* lock_is_held_type() returns */
+#define LOCK_STATE_UNKNOWN -1
+#define LOCK_STATE_NOT_HELD 0
+#define LOCK_STATE_HELD 1
/*
* Same "read" as for lock_acquire(), except -1 means any.
*/
-extern int lock_is_held_type(struct lockdep_map *lock, int read);
+extern int lock_is_held_type(const struct lockdep_map *lock, int read);
-static inline int lock_is_held(struct lockdep_map *lock)
+static inline int lock_is_held(const struct lockdep_map *lock)
{
return lock_is_held_type(lock, -1);
}
@@ -443,6 +256,9 @@ extern void lock_set_class(struct lockdep_map *lock, const char *name,
struct lock_class_key *key, unsigned int subclass,
unsigned long ip);
+#define lock_set_novalidate_class(l, n, i) \
+ lock_set_class(l, n, &__lockdep_no_validate__, 0, i)
+
static inline void lock_set_subclass(struct lockdep_map *lock,
unsigned int subclass, unsigned long ip)
{
@@ -451,33 +267,37 @@ static inline void lock_set_subclass(struct lockdep_map *lock,
extern void lock_downgrade(struct lockdep_map *lock, unsigned long ip);
-struct pin_cookie { unsigned int val; };
-
#define NIL_COOKIE (struct pin_cookie){ .val = 0U, }
extern struct pin_cookie lock_pin_lock(struct lockdep_map *lock);
extern void lock_repin_lock(struct lockdep_map *lock, struct pin_cookie);
extern void lock_unpin_lock(struct lockdep_map *lock, struct pin_cookie);
-# define INIT_LOCKDEP .lockdep_recursion = 0,
-
#define lockdep_depth(tsk) (debug_locks ? (tsk)->lockdep_depth : 0)
-#define lockdep_assert_held(l) do { \
- WARN_ON(debug_locks && !lockdep_is_held(l)); \
- } while (0)
+#define lockdep_assert(cond) \
+ do { WARN_ON(debug_locks && !(cond)); } while (0)
+
+#define lockdep_assert_once(cond) \
+ do { WARN_ON_ONCE(debug_locks && !(cond)); } while (0)
-#define lockdep_assert_held_exclusive(l) do { \
- WARN_ON(debug_locks && !lockdep_is_held_type(l, 0)); \
- } while (0)
+#define lockdep_assert_held(l) \
+ lockdep_assert(lockdep_is_held(l) != LOCK_STATE_NOT_HELD)
-#define lockdep_assert_held_read(l) do { \
- WARN_ON(debug_locks && !lockdep_is_held_type(l, 1)); \
- } while (0)
+#define lockdep_assert_not_held(l) \
+ lockdep_assert(lockdep_is_held(l) != LOCK_STATE_HELD)
-#define lockdep_assert_held_once(l) do { \
- WARN_ON_ONCE(debug_locks && !lockdep_is_held(l)); \
- } while (0)
+#define lockdep_assert_held_write(l) \
+ lockdep_assert(lockdep_is_held_type(l, 0))
+
+#define lockdep_assert_held_read(l) \
+ lockdep_assert(lockdep_is_held_type(l, 1))
+
+#define lockdep_assert_held_once(l) \
+ lockdep_assert_once(lockdep_is_held(l) != LOCK_STATE_NOT_HELD)
+
+#define lockdep_assert_none_held_once() \
+ lockdep_assert_once(!current->lockdep_depth)
#define lockdep_recursing(tsk) ((tsk)->lockdep_recursion)
@@ -485,8 +305,22 @@ extern void lock_unpin_lock(struct lockdep_map *lock, struct pin_cookie);
#define lockdep_repin_lock(l,c) lock_repin_lock(&(l)->dep_map, (c))
#define lockdep_unpin_lock(l,c) lock_unpin_lock(&(l)->dep_map, (c))
+/*
+ * Must use lock_map_aquire_try() with override maps to avoid
+ * lockdep thinking they participate in the block chain.
+ */
+#define DEFINE_WAIT_OVERRIDE_MAP(_name, _wait_type) \
+ struct lockdep_map _name = { \
+ .name = #_name "-wait-type-override", \
+ .wait_type_inner = _wait_type, \
+ .lock_type = LD_LOCK_WAIT_OVERRIDE, }
+
#else /* !CONFIG_LOCKDEP */
+static inline void lockdep_init_task(struct task_struct *task)
+{
+}
+
static inline void lockdep_off(void)
{
}
@@ -495,12 +329,23 @@ static inline void lockdep_on(void)
{
}
+static inline void lockdep_set_selftest_task(struct task_struct *task)
+{
+}
+
# define lock_acquire(l, s, t, r, c, n, i) do { } while (0)
-# define lock_release(l, n, i) do { } while (0)
+# define lock_release(l, i) do { } while (0)
# define lock_downgrade(l, i) do { } while (0)
-# define lock_set_class(l, n, k, s, i) do { } while (0)
+# define lock_set_class(l, n, key, s, i) do { (void)(key); } while (0)
+# define lock_set_novalidate_class(l, n, i) do { } while (0)
# define lock_set_subclass(l, s, i) do { } while (0)
-# define lockdep_info() do { } while (0)
+# define lockdep_init() do { } while (0)
+# define lockdep_init_map_type(lock, name, key, sub, inner, outer, type) \
+ do { (void)(name); (void)(key); } while (0)
+# define lockdep_init_map_waits(lock, name, key, sub, inner, outer) \
+ do { (void)(name); (void)(key); } while (0)
+# define lockdep_init_map_wait(lock, name, key, sub, inner) \
+ do { (void)(name); (void)(key); } while (0)
# define lockdep_init_map(lock, name, key, sub) \
do { (void)(name); (void)(key); } while (0)
# define lockdep_set_class(lock, key) do { (void)(key); } while (0)
@@ -511,6 +356,7 @@ static inline void lockdep_on(void)
#define lockdep_set_subclass(lock, sub) do { } while (0)
#define lockdep_set_novalidate_class(lock) do { } while (0)
+#define lockdep_set_notrack_class(lock) do { } while (0)
/*
* We don't define lockdep_match_class() and lockdep_match_key() for !LOCKDEP
@@ -518,74 +364,65 @@ static inline void lockdep_on(void)
* #ifdef the call himself.
*/
-# define INIT_LOCKDEP
# define lockdep_reset() do { debug_locks = 1; } while (0)
# define lockdep_free_key_range(start, size) do { } while (0)
# define lockdep_sys_exit() do { } while (0)
-/*
- * The class key takes no space if lockdep is disabled:
- */
-struct lock_class_key { };
+
+static inline void lockdep_register_key(struct lock_class_key *key)
+{
+}
+
+static inline void lockdep_unregister_key(struct lock_class_key *key)
+{
+}
#define lockdep_depth(tsk) (0)
+/*
+ * Dummy forward declarations, allow users to write less ifdef-y code
+ * and depend on dead code elimination.
+ */
+extern int lock_is_held(const void *);
+extern int lockdep_is_held(const void *);
#define lockdep_is_held_type(l, r) (1)
+#define lockdep_assert(c) do { } while (0)
+#define lockdep_assert_once(c) do { } while (0)
+
#define lockdep_assert_held(l) do { (void)(l); } while (0)
-#define lockdep_assert_held_exclusive(l) do { (void)(l); } while (0)
+#define lockdep_assert_not_held(l) do { (void)(l); } while (0)
+#define lockdep_assert_held_write(l) do { (void)(l); } while (0)
#define lockdep_assert_held_read(l) do { (void)(l); } while (0)
#define lockdep_assert_held_once(l) do { (void)(l); } while (0)
+#define lockdep_assert_none_held_once() do { } while (0)
#define lockdep_recursing(tsk) (0)
-struct pin_cookie { };
-
#define NIL_COOKIE (struct pin_cookie){ }
-#define lockdep_pin_lock(l) ({ struct pin_cookie cookie; cookie; })
+#define lockdep_pin_lock(l) ({ struct pin_cookie cookie = { }; cookie; })
#define lockdep_repin_lock(l, c) do { (void)(l); (void)(c); } while (0)
#define lockdep_unpin_lock(l, c) do { (void)(l); (void)(c); } while (0)
+#define DEFINE_WAIT_OVERRIDE_MAP(_name, _wait_type) \
+ struct lockdep_map __maybe_unused _name = {}
+
#endif /* !LOCKDEP */
+#ifdef CONFIG_PROVE_LOCKING
+void lockdep_set_lock_cmp_fn(struct lockdep_map *, lock_cmp_fn, lock_print_fn);
+
+#define lock_set_cmp_fn(lock, ...) lockdep_set_lock_cmp_fn(&(lock)->dep_map, __VA_ARGS__)
+#else
+#define lock_set_cmp_fn(lock, ...) do { } while (0)
+#endif
+
enum xhlock_context_t {
XHLOCK_HARD,
XHLOCK_SOFT,
XHLOCK_CTX_NR,
};
-#ifdef CONFIG_LOCKDEP_CROSSRELEASE
-extern void lockdep_init_map_crosslock(struct lockdep_map *lock,
- const char *name,
- struct lock_class_key *key,
- int subclass);
-extern void lock_commit_crosslock(struct lockdep_map *lock);
-
-/*
- * What we essencially have to initialize is 'nr_acquire'. Other members
- * will be initialized in add_xlock().
- */
-#define STATIC_CROSS_LOCK_INIT() \
- { .nr_acquire = 0,}
-
-#define STATIC_CROSS_LOCKDEP_MAP_INIT(_name, _key) \
- { .map.name = (_name), .map.key = (void *)(_key), \
- .map.cross = 1, .xlock = STATIC_CROSS_LOCK_INIT(), }
-
-/*
- * To initialize a lockdep_map statically use this macro.
- * Note that _name must not be NULL.
- */
-#define STATIC_LOCKDEP_MAP_INIT(_name, _key) \
- { .name = (_name), .key = (void *)(_key), .cross = 0, }
-
-extern void crossrelease_hist_start(enum xhlock_context_t c);
-extern void crossrelease_hist_end(enum xhlock_context_t c);
-extern void lockdep_invariant_state(bool force);
-extern void lockdep_init_task(struct task_struct *task);
-extern void lockdep_free_task(struct task_struct *task);
-#else /* !CROSSRELEASE */
-#define lockdep_init_map_crosslock(m, n, k, s) do {} while (0)
/*
* To initialize a lockdep_map statically use this macro.
* Note that _name must not be NULL.
@@ -593,12 +430,8 @@ extern void lockdep_free_task(struct task_struct *task);
#define STATIC_LOCKDEP_MAP_INIT(_name, _key) \
{ .name = (_name), .key = (void *)(_key), }
-static inline void crossrelease_hist_start(enum xhlock_context_t c) {}
-static inline void crossrelease_hist_end(enum xhlock_context_t c) {}
static inline void lockdep_invariant_state(bool force) {}
-static inline void lockdep_init_task(struct task_struct *task) {}
static inline void lockdep_free_task(struct task_struct *task) {}
-#endif /* CROSSRELEASE */
#ifdef CONFIG_LOCK_STAT
@@ -639,24 +472,7 @@ do { \
#endif /* CONFIG_LOCK_STAT */
-#ifdef CONFIG_LOCKDEP
-
-/*
- * On lockdep we dont want the hand-coded irq-enable of
- * _raw_*_lock_flags() code, because lockdep assumes
- * that interrupts are not re-enabled during lock-acquire:
- */
-#define LOCK_CONTENDED_FLAGS(_lock, try, lock, lockfl, flags) \
- LOCK_CONTENDED((_lock), (try), (lock))
-
-#else /* CONFIG_LOCKDEP */
-
-#define LOCK_CONTENDED_FLAGS(_lock, try, lock, lockfl, flags) \
- lockfl((_lock), (flags))
-
-#endif /* CONFIG_LOCKDEP */
-
-#ifdef CONFIG_TRACE_IRQFLAGS
+#ifdef CONFIG_PROVE_LOCKING
extern void print_irqtrace_events(struct task_struct *curr);
#else
static inline void print_irqtrace_events(struct task_struct *curr)
@@ -664,6 +480,20 @@ static inline void print_irqtrace_events(struct task_struct *curr)
}
#endif
+/* Variable used to make lockdep treat read_lock() as recursive in selftests */
+#ifdef CONFIG_DEBUG_LOCKING_API_SELFTESTS
+extern unsigned int force_read_lock_recursive;
+#else /* CONFIG_DEBUG_LOCKING_API_SELFTESTS */
+#define force_read_lock_recursive 0
+#endif /* CONFIG_DEBUG_LOCKING_API_SELFTESTS */
+
+#ifdef CONFIG_LOCKDEP
+extern bool read_lock_is_recursive(void);
+#else /* CONFIG_LOCKDEP */
+/* If !LOCKDEP, the value is meaningless */
+#define read_lock_is_recursive() 0
+#endif
+
/*
* For trivial one-depth nesting of a lock-class, the following
* global define can be used. (Subsystems with multiple levels
@@ -682,46 +512,144 @@ static inline void print_irqtrace_events(struct task_struct *curr)
#define spin_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i)
#define spin_acquire_nest(l, s, t, n, i) lock_acquire_exclusive(l, s, t, n, i)
-#define spin_release(l, n, i) lock_release(l, n, i)
+#define spin_release(l, i) lock_release(l, i)
#define rwlock_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i)
-#define rwlock_acquire_read(l, s, t, i) lock_acquire_shared_recursive(l, s, t, NULL, i)
-#define rwlock_release(l, n, i) lock_release(l, n, i)
+#define rwlock_acquire_read(l, s, t, i) \
+do { \
+ if (read_lock_is_recursive()) \
+ lock_acquire_shared_recursive(l, s, t, NULL, i); \
+ else \
+ lock_acquire_shared(l, s, t, NULL, i); \
+} while (0)
+
+#define rwlock_release(l, i) lock_release(l, i)
#define seqcount_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i)
#define seqcount_acquire_read(l, s, t, i) lock_acquire_shared_recursive(l, s, t, NULL, i)
-#define seqcount_release(l, n, i) lock_release(l, n, i)
+#define seqcount_release(l, i) lock_release(l, i)
#define mutex_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i)
#define mutex_acquire_nest(l, s, t, n, i) lock_acquire_exclusive(l, s, t, n, i)
-#define mutex_release(l, n, i) lock_release(l, n, i)
+#define mutex_release(l, i) lock_release(l, i)
#define rwsem_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i)
#define rwsem_acquire_nest(l, s, t, n, i) lock_acquire_exclusive(l, s, t, n, i)
#define rwsem_acquire_read(l, s, t, i) lock_acquire_shared(l, s, t, NULL, i)
-#define rwsem_release(l, n, i) lock_release(l, n, i)
+#define rwsem_release(l, i) lock_release(l, i)
#define lock_map_acquire(l) lock_acquire_exclusive(l, 0, 0, NULL, _THIS_IP_)
+#define lock_map_acquire_try(l) lock_acquire_exclusive(l, 0, 1, NULL, _THIS_IP_)
#define lock_map_acquire_read(l) lock_acquire_shared_recursive(l, 0, 0, NULL, _THIS_IP_)
#define lock_map_acquire_tryread(l) lock_acquire_shared_recursive(l, 0, 1, NULL, _THIS_IP_)
-#define lock_map_release(l) lock_release(l, 1, _THIS_IP_)
+#define lock_map_release(l) lock_release(l, _THIS_IP_)
+#define lock_map_sync(l) lock_sync(l, 0, 0, 1, NULL, _THIS_IP_)
#ifdef CONFIG_PROVE_LOCKING
-# define might_lock(lock) \
+# define might_lock(lock) \
do { \
typecheck(struct lockdep_map *, &(lock)->dep_map); \
lock_acquire(&(lock)->dep_map, 0, 0, 0, 1, NULL, _THIS_IP_); \
- lock_release(&(lock)->dep_map, 0, _THIS_IP_); \
+ lock_release(&(lock)->dep_map, _THIS_IP_); \
} while (0)
-# define might_lock_read(lock) \
+# define might_lock_read(lock) \
do { \
typecheck(struct lockdep_map *, &(lock)->dep_map); \
lock_acquire(&(lock)->dep_map, 0, 0, 1, 1, NULL, _THIS_IP_); \
- lock_release(&(lock)->dep_map, 0, _THIS_IP_); \
+ lock_release(&(lock)->dep_map, _THIS_IP_); \
+} while (0)
+# define might_lock_nested(lock, subclass) \
+do { \
+ typecheck(struct lockdep_map *, &(lock)->dep_map); \
+ lock_acquire(&(lock)->dep_map, subclass, 0, 1, 1, NULL, \
+ _THIS_IP_); \
+ lock_release(&(lock)->dep_map, _THIS_IP_); \
} while (0)
+
+DECLARE_PER_CPU(int, hardirqs_enabled);
+DECLARE_PER_CPU(int, hardirq_context);
+DECLARE_PER_CPU(unsigned int, lockdep_recursion);
+
+#define __lockdep_enabled (debug_locks && !this_cpu_read(lockdep_recursion))
+
+#define lockdep_assert_irqs_enabled() \
+do { \
+ WARN_ON_ONCE(__lockdep_enabled && !this_cpu_read(hardirqs_enabled)); \
+} while (0)
+
+#define lockdep_assert_irqs_disabled() \
+do { \
+ WARN_ON_ONCE(__lockdep_enabled && this_cpu_read(hardirqs_enabled)); \
+} while (0)
+
+#define lockdep_assert_in_irq() \
+do { \
+ WARN_ON_ONCE(__lockdep_enabled && !this_cpu_read(hardirq_context)); \
+} while (0)
+
+#define lockdep_assert_no_hardirq() \
+do { \
+ WARN_ON_ONCE(__lockdep_enabled && (this_cpu_read(hardirq_context) || \
+ !this_cpu_read(hardirqs_enabled))); \
+} while (0)
+
+#define lockdep_assert_preemption_enabled() \
+do { \
+ WARN_ON_ONCE(IS_ENABLED(CONFIG_PREEMPT_COUNT) && \
+ __lockdep_enabled && \
+ (preempt_count() != 0 || \
+ !this_cpu_read(hardirqs_enabled))); \
+} while (0)
+
+#define lockdep_assert_preemption_disabled() \
+do { \
+ WARN_ON_ONCE(IS_ENABLED(CONFIG_PREEMPT_COUNT) && \
+ __lockdep_enabled && \
+ (preempt_count() == 0 && \
+ this_cpu_read(hardirqs_enabled))); \
+} while (0)
+
+/*
+ * Acceptable for protecting per-CPU resources accessed from BH.
+ * Much like in_softirq() - semantics are ambiguous, use carefully.
+ */
+#define lockdep_assert_in_softirq() \
+do { \
+ WARN_ON_ONCE(__lockdep_enabled && \
+ (!in_softirq() || in_hardirq() || in_nmi())); \
+} while (0)
+
+extern void lockdep_assert_in_softirq_func(void);
+
#else
# define might_lock(lock) do { } while (0)
# define might_lock_read(lock) do { } while (0)
+# define might_lock_nested(lock, subclass) do { } while (0)
+
+# define lockdep_assert_irqs_enabled() do { } while (0)
+# define lockdep_assert_irqs_disabled() do { } while (0)
+# define lockdep_assert_in_irq() do { } while (0)
+# define lockdep_assert_no_hardirq() do { } while (0)
+
+# define lockdep_assert_preemption_enabled() do { } while (0)
+# define lockdep_assert_preemption_disabled() do { } while (0)
+# define lockdep_assert_in_softirq() do { } while (0)
+# define lockdep_assert_in_softirq_func() do { } while (0)
+#endif
+
+#ifdef CONFIG_PROVE_RAW_LOCK_NESTING
+
+# define lockdep_assert_RT_in_threaded_ctx() do { \
+ WARN_ONCE(debug_locks && !current->lockdep_recursion && \
+ lockdep_hardirq_context() && \
+ !(current->hardirq_threaded || current->irq_config), \
+ "Not in threaded context on PREEMPT_RT as expected\n"); \
+} while (0)
+
+#else
+
+# define lockdep_assert_RT_in_threaded_ctx() do { } while (0)
+
#endif
#ifdef CONFIG_LOCKDEP
diff --git a/include/linux/lockdep_api.h b/include/linux/lockdep_api.h
new file mode 100644
index 000000000000..907e66979ab2
--- /dev/null
+++ b/include/linux/lockdep_api.h
@@ -0,0 +1 @@
+#include <linux/lockdep.h>
diff --git a/include/linux/lockdep_types.h b/include/linux/lockdep_types.h
new file mode 100644
index 000000000000..eae115a26488
--- /dev/null
+++ b/include/linux/lockdep_types.h
@@ -0,0 +1,275 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Runtime locking correctness validator
+ *
+ * Copyright (C) 2006,2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
+ * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra
+ *
+ * see Documentation/locking/lockdep-design.rst for more details.
+ */
+#ifndef __LINUX_LOCKDEP_TYPES_H
+#define __LINUX_LOCKDEP_TYPES_H
+
+#include <linux/types.h>
+
+#define MAX_LOCKDEP_SUBCLASSES 8UL
+
+enum lockdep_wait_type {
+ LD_WAIT_INV = 0, /* not checked, catch all */
+
+ LD_WAIT_FREE, /* wait free, rcu etc.. */
+ LD_WAIT_SPIN, /* spin loops, raw_spinlock_t etc.. */
+
+#ifdef CONFIG_PROVE_RAW_LOCK_NESTING
+ LD_WAIT_CONFIG, /* preemptible in PREEMPT_RT, spinlock_t etc.. */
+#else
+ LD_WAIT_CONFIG = LD_WAIT_SPIN,
+#endif
+ LD_WAIT_SLEEP, /* sleeping locks, mutex_t etc.. */
+
+ LD_WAIT_MAX, /* must be last */
+};
+
+enum lockdep_lock_type {
+ LD_LOCK_NORMAL = 0, /* normal, catch all */
+ LD_LOCK_PERCPU, /* percpu */
+ LD_LOCK_WAIT_OVERRIDE, /* annotation */
+ LD_LOCK_MAX,
+};
+
+#ifdef CONFIG_LOCKDEP
+
+/*
+ * We'd rather not expose kernel/lockdep_states.h this wide, but we do need
+ * the total number of states... :-(
+ *
+ * XXX_LOCK_USAGE_STATES is the number of lines in lockdep_states.h, for each
+ * of those we generates 4 states, Additionally we report on USED and USED_READ.
+ */
+#define XXX_LOCK_USAGE_STATES 2
+#define LOCK_TRACE_STATES (XXX_LOCK_USAGE_STATES*4 + 2)
+
+/*
+ * NR_LOCKDEP_CACHING_CLASSES ... Number of classes
+ * cached in the instance of lockdep_map
+ *
+ * Currently main class (subclass == 0) and single depth subclass
+ * are cached in lockdep_map. This optimization is mainly targeting
+ * on rq->lock. double_rq_lock() acquires this highly competitive with
+ * single depth.
+ */
+#define NR_LOCKDEP_CACHING_CLASSES 2
+
+/*
+ * A lockdep key is associated with each lock object. For static locks we use
+ * the lock address itself as the key. Dynamically allocated lock objects can
+ * have a statically or dynamically allocated key. Dynamically allocated lock
+ * keys must be registered before being used and must be unregistered before
+ * the key memory is freed.
+ */
+struct lockdep_subclass_key {
+ char __one_byte;
+} __attribute__ ((__packed__));
+
+/* hash_entry is used to keep track of dynamically allocated keys. */
+struct lock_class_key {
+ union {
+ struct hlist_node hash_entry;
+ struct lockdep_subclass_key subkeys[MAX_LOCKDEP_SUBCLASSES];
+ };
+};
+
+extern struct lock_class_key __lockdep_no_validate__;
+extern struct lock_class_key __lockdep_no_track__;
+
+struct lock_trace;
+
+#define LOCKSTAT_POINTS 4
+
+struct lockdep_map;
+typedef int (*lock_cmp_fn)(const struct lockdep_map *a,
+ const struct lockdep_map *b);
+typedef void (*lock_print_fn)(const struct lockdep_map *map);
+
+/*
+ * The lock-class itself. The order of the structure members matters.
+ * reinit_class() zeroes the key member and all subsequent members.
+ */
+struct lock_class {
+ /*
+ * class-hash:
+ */
+ struct hlist_node hash_entry;
+
+ /*
+ * Entry in all_lock_classes when in use. Entry in free_lock_classes
+ * when not in use. Instances that are being freed are on one of the
+ * zapped_classes lists.
+ */
+ struct list_head lock_entry;
+
+ /*
+ * These fields represent a directed graph of lock dependencies,
+ * to every node we attach a list of "forward" and a list of
+ * "backward" graph nodes.
+ */
+ struct list_head locks_after, locks_before;
+
+ const struct lockdep_subclass_key *key;
+ lock_cmp_fn cmp_fn;
+ lock_print_fn print_fn;
+
+ unsigned int subclass;
+ unsigned int dep_gen_id;
+
+ /*
+ * IRQ/softirq usage tracking bits:
+ */
+ unsigned long usage_mask;
+ const struct lock_trace *usage_traces[LOCK_TRACE_STATES];
+
+ const char *name;
+ /*
+ * Generation counter, when doing certain classes of graph walking,
+ * to ensure that we check one node only once:
+ */
+ int name_version;
+
+ u8 wait_type_inner;
+ u8 wait_type_outer;
+ u8 lock_type;
+ /* u8 hole; */
+
+#ifdef CONFIG_LOCK_STAT
+ unsigned long contention_point[LOCKSTAT_POINTS];
+ unsigned long contending_point[LOCKSTAT_POINTS];
+#endif
+} __no_randomize_layout;
+
+#ifdef CONFIG_LOCK_STAT
+struct lock_time {
+ s64 min;
+ s64 max;
+ s64 total;
+ unsigned long nr;
+};
+
+enum bounce_type {
+ bounce_acquired_write,
+ bounce_acquired_read,
+ bounce_contended_write,
+ bounce_contended_read,
+ nr_bounce_types,
+
+ bounce_acquired = bounce_acquired_write,
+ bounce_contended = bounce_contended_write,
+};
+
+struct lock_class_stats {
+ unsigned long contention_point[LOCKSTAT_POINTS];
+ unsigned long contending_point[LOCKSTAT_POINTS];
+ struct lock_time read_waittime;
+ struct lock_time write_waittime;
+ struct lock_time read_holdtime;
+ struct lock_time write_holdtime;
+ unsigned long bounces[nr_bounce_types];
+};
+
+void lock_stats(struct lock_class *class, struct lock_class_stats *stats);
+void clear_lock_stats(struct lock_class *class);
+#endif
+
+/*
+ * Map the lock object (the lock instance) to the lock-class object.
+ * This is embedded into specific lock instances:
+ */
+struct lockdep_map {
+ struct lock_class_key *key;
+ struct lock_class *class_cache[NR_LOCKDEP_CACHING_CLASSES];
+ const char *name;
+ u8 wait_type_outer; /* can be taken in this context */
+ u8 wait_type_inner; /* presents this context */
+ u8 lock_type;
+ /* u8 hole; */
+#ifdef CONFIG_LOCK_STAT
+ int cpu;
+ unsigned long ip;
+#endif
+};
+
+struct pin_cookie { unsigned int val; };
+
+#define MAX_LOCKDEP_KEYS_BITS 13
+#define MAX_LOCKDEP_KEYS (1UL << MAX_LOCKDEP_KEYS_BITS)
+#define INITIAL_CHAIN_KEY -1
+
+struct held_lock {
+ /*
+ * One-way hash of the dependency chain up to this point. We
+ * hash the hashes step by step as the dependency chain grows.
+ *
+ * We use it for dependency-caching and we skip detection
+ * passes and dependency-updates if there is a cache-hit, so
+ * it is absolutely critical for 100% coverage of the validator
+ * to have a unique key value for every unique dependency path
+ * that can occur in the system, to make a unique hash value
+ * as likely as possible - hence the 64-bit width.
+ *
+ * The task struct holds the current hash value (initialized
+ * with zero), here we store the previous hash value:
+ */
+ u64 prev_chain_key;
+ unsigned long acquire_ip;
+ struct lockdep_map *instance;
+ struct lockdep_map *nest_lock;
+#ifdef CONFIG_LOCK_STAT
+ u64 waittime_stamp;
+ u64 holdtime_stamp;
+#endif
+ /*
+ * class_idx is zero-indexed; it points to the element in
+ * lock_classes this held lock instance belongs to. class_idx is in
+ * the range from 0 to (MAX_LOCKDEP_KEYS-1) inclusive.
+ */
+ unsigned int class_idx:MAX_LOCKDEP_KEYS_BITS;
+ /*
+ * The lock-stack is unified in that the lock chains of interrupt
+ * contexts nest ontop of process context chains, but we 'separate'
+ * the hashes by starting with 0 if we cross into an interrupt
+ * context, and we also keep do not add cross-context lock
+ * dependencies - the lock usage graph walking covers that area
+ * anyway, and we'd just unnecessarily increase the number of
+ * dependencies otherwise. [Note: hardirq and softirq contexts
+ * are separated from each other too.]
+ *
+ * The following field is used to detect when we cross into an
+ * interrupt context:
+ */
+ unsigned int irq_context:2; /* bit 0 - soft, bit 1 - hard */
+ unsigned int trylock:1; /* 16 bits */
+
+ unsigned int read:2; /* see lock_acquire() comment */
+ unsigned int check:1; /* see lock_acquire() comment */
+ unsigned int hardirqs_off:1;
+ unsigned int sync:1;
+ unsigned int references:11; /* 32 bits */
+ unsigned int pin_count;
+};
+
+#else /* !CONFIG_LOCKDEP */
+
+/*
+ * The class key takes no space if lockdep is disabled:
+ */
+struct lock_class_key { };
+
+/*
+ * The lockdep_map takes no space if lockdep is disabled:
+ */
+struct lockdep_map { };
+
+struct pin_cookie { };
+
+#endif /* !LOCKDEP */
+
+#endif /* __LINUX_LOCKDEP_TYPES_H */
diff --git a/include/linux/lockref.h b/include/linux/lockref.h
index b10b122dd099..815d871fadfc 100644
--- a/include/linux/lockref.h
+++ b/include/linux/lockref.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __LINUX_LOCKREF_H
#define __LINUX_LOCKREF_H
@@ -33,17 +34,30 @@ struct lockref {
};
};
-extern void lockref_get(struct lockref *);
-extern int lockref_put_return(struct lockref *);
-extern int lockref_get_not_zero(struct lockref *);
-extern int lockref_get_or_lock(struct lockref *);
-extern int lockref_put_or_lock(struct lockref *);
+/**
+ * lockref_init - Initialize a lockref
+ * @lockref: pointer to lockref structure
+ *
+ * Initializes @lockref->count to 1.
+ */
+static inline void lockref_init(struct lockref *lockref)
+{
+ spin_lock_init(&lockref->lock);
+ lockref->count = 1;
+}
+
+void lockref_get(struct lockref *lockref);
+int lockref_put_return(struct lockref *lockref);
+bool lockref_get_not_zero(struct lockref *lockref);
+bool lockref_put_or_lock(struct lockref *lockref);
+#define lockref_put_or_lock(_lockref) \
+ (!__cond_lock((_lockref)->lock, !lockref_put_or_lock(_lockref)))
-extern void lockref_mark_dead(struct lockref *);
-extern int lockref_get_not_dead(struct lockref *);
+void lockref_mark_dead(struct lockref *lockref);
+bool lockref_get_not_dead(struct lockref *lockref);
/* Must be called under spinlock for reliable results */
-static inline int __lockref_is_dead(const struct lockref *l)
+static inline bool __lockref_is_dead(const struct lockref *l)
{
return ((int)l->count < 0);
}
diff --git a/include/linux/log2.h b/include/linux/log2.h
index c373295f359f..2eac3fc9303d 100644
--- a/include/linux/log2.h
+++ b/include/linux/log2.h
@@ -1,12 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/* Integer base 2 logarithm calculation
*
* Copyright (C) 2006 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
*/
#ifndef _LINUX_LOG2_H
@@ -22,7 +18,7 @@
* - the arch is not required to handle n==0 if implementing the fallback
*/
#ifndef CONFIG_ARCH_HAS_ILOG2_U32
-static inline __attribute__((const))
+static __always_inline __attribute__((const))
int __ilog2_u32(u32 n)
{
return fls(n) - 1;
@@ -30,26 +26,30 @@ int __ilog2_u32(u32 n)
#endif
#ifndef CONFIG_ARCH_HAS_ILOG2_U64
-static inline __attribute__((const))
+static __always_inline __attribute__((const))
int __ilog2_u64(u64 n)
{
return fls64(n) - 1;
}
#endif
-/*
- * Determine whether some value is a power of two, where zero is
+/**
+ * is_power_of_2() - check if a value is a power of two
+ * @n: the value to check
+ *
+ * Determine whether some value is a power of two, where zero is
* *not* considered a power of two.
+ * Return: true if @n is a power of 2, otherwise false.
*/
-
-static inline __attribute__((const))
+static __always_inline __attribute__((const))
bool is_power_of_2(unsigned long n)
{
return (n != 0 && ((n & (n - 1)) == 0));
}
-/*
- * round up to nearest power of two
+/**
+ * __roundup_pow_of_two() - round up to nearest power of two
+ * @n: value to round up
*/
static inline __attribute__((const))
unsigned long __roundup_pow_of_two(unsigned long n)
@@ -57,8 +57,9 @@ unsigned long __roundup_pow_of_two(unsigned long n)
return 1UL << fls_long(n - 1);
}
-/*
- * round down to nearest power of two
+/**
+ * __rounddown_pow_of_two() - round down to nearest power of two
+ * @n: value to round down
*/
static inline __attribute__((const))
unsigned long __rounddown_pow_of_two(unsigned long n)
@@ -67,16 +68,13 @@ unsigned long __rounddown_pow_of_two(unsigned long n)
}
/**
- * ilog2 - log of base 2 of 32-bit or a 64-bit unsigned value
- * @n - parameter
- *
- * constant-capable log of base 2 calculation
- * - this can be used to initialise global variables from constant data, hence
- * the massive ternary operator construction
+ * const_ilog2 - log base 2 of 32-bit or a 64-bit constant unsigned value
+ * @n: parameter
*
- * selects the appropriately-sized optimised version depending on sizeof(n)
+ * Use this where sparse expects a true constant expression, e.g. for array
+ * indices.
*/
-#define ilog2(n) \
+#define const_ilog2(n) \
( \
__builtin_constant_p(n) ? ( \
(n) < 2 ? 0 : \
@@ -142,15 +140,32 @@ unsigned long __rounddown_pow_of_two(unsigned long n)
(n) & (1ULL << 4) ? 4 : \
(n) & (1ULL << 3) ? 3 : \
(n) & (1ULL << 2) ? 2 : \
- 1 ) : \
- (sizeof(n) <= 4) ? \
- __ilog2_u32(n) : \
- __ilog2_u64(n) \
+ 1) : \
+ -1)
+
+/**
+ * ilog2 - log base 2 of 32-bit or a 64-bit unsigned value
+ * @n: parameter
+ *
+ * constant-capable log of base 2 calculation
+ * - this can be used to initialise global variables from constant data, hence
+ * the massive ternary operator construction
+ *
+ * selects the appropriately-sized optimised version depending on sizeof(n)
+ */
+#define ilog2(n) \
+( \
+ __builtin_constant_p(n) ? \
+ ((n) < 2 ? 0 : \
+ 63 - __builtin_clzll(n)) : \
+ (sizeof(n) <= 4) ? \
+ __ilog2_u32(n) : \
+ __ilog2_u64(n) \
)
/**
* roundup_pow_of_two - round the given value up to nearest power of two
- * @n - parameter
+ * @n: parameter
*
* round the given value up to the nearest power of two
* - the result is undefined when n == 0
@@ -159,7 +174,7 @@ unsigned long __rounddown_pow_of_two(unsigned long n)
#define roundup_pow_of_two(n) \
( \
__builtin_constant_p(n) ? ( \
- (n == 1) ? 1 : \
+ ((n) == 1) ? 1 : \
(1UL << (ilog2((n) - 1) + 1)) \
) : \
__roundup_pow_of_two(n) \
@@ -167,7 +182,7 @@ unsigned long __rounddown_pow_of_two(unsigned long n)
/**
* rounddown_pow_of_two - round the given value down to nearest power of two
- * @n - parameter
+ * @n: parameter
*
* round the given value down to the nearest power of two
* - the result is undefined when n == 0
@@ -180,6 +195,12 @@ unsigned long __rounddown_pow_of_two(unsigned long n)
__rounddown_pow_of_two(n) \
)
+static inline __attribute_const__
+int __order_base_2(unsigned long n)
+{
+ return n > 1 ? ilog2(n - 1) + 1 : 0;
+}
+
/**
* order_base_2 - calculate the (rounded up) base 2 order of the argument
* @n: parameter
@@ -193,13 +214,6 @@ unsigned long __rounddown_pow_of_two(unsigned long n)
* ob2(5) = 3
* ... and so on.
*/
-
-static inline __attribute_const__
-int __order_base_2(unsigned long n)
-{
- return n > 1 ? ilog2(n - 1) + 1 : 0;
-}
-
#define order_base_2(n) \
( \
__builtin_constant_p(n) ? ( \
@@ -207,4 +221,52 @@ int __order_base_2(unsigned long n)
ilog2((n) - 1) + 1) : \
__order_base_2(n) \
)
+
+static inline __attribute__((const))
+int __bits_per(unsigned long n)
+{
+ if (n < 2)
+ return 1;
+ if (is_power_of_2(n))
+ return order_base_2(n) + 1;
+ return order_base_2(n);
+}
+
+/**
+ * bits_per - calculate the number of bits required for the argument
+ * @n: parameter
+ *
+ * This is constant-capable and can be used for compile time
+ * initializations, e.g bitfields.
+ *
+ * The first few values calculated by this routine:
+ * bf(0) = 1
+ * bf(1) = 1
+ * bf(2) = 2
+ * bf(3) = 2
+ * bf(4) = 3
+ * ... and so on.
+ */
+#define bits_per(n) \
+( \
+ __builtin_constant_p(n) ? ( \
+ ((n) == 0 || (n) == 1) \
+ ? 1 : ilog2(n) + 1 \
+ ) : \
+ __bits_per(n) \
+)
+
+/**
+ * max_pow_of_two_factor - return highest power-of-2 factor
+ * @n: parameter
+ *
+ * find highest power-of-2 which is evenly divisible into n.
+ * 0 is returned for n == 0 or 1.
+ */
+static inline __attribute__((const))
+unsigned int max_pow_of_two_factor(unsigned int n)
+{
+ return n & -n;
+}
+
#endif /* _LINUX_LOG2_H */
diff --git a/include/linux/logic_iomem.h b/include/linux/logic_iomem.h
new file mode 100644
index 000000000000..3fa65c964379
--- /dev/null
+++ b/include/linux/logic_iomem.h
@@ -0,0 +1,62 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2021 Intel Corporation
+ * Author: johannes@sipsolutions.net
+ */
+#ifndef __LOGIC_IOMEM_H
+#define __LOGIC_IOMEM_H
+#include <linux/types.h>
+#include <linux/ioport.h>
+
+/**
+ * struct logic_iomem_ops - emulated IO memory ops
+ * @read: read an 8, 16, 32 or 64 bit quantity from the given offset,
+ * size is given in bytes (1, 2, 4 or 8)
+ * (64-bit only necessary if CONFIG_64BIT is set)
+ * @write: write an 8, 16 32 or 64 bit quantity to the given offset,
+ * size is given in bytes (1, 2, 4 or 8)
+ * (64-bit only necessary if CONFIG_64BIT is set)
+ * @set: optional, for memset_io()
+ * @copy_from: optional, for memcpy_fromio()
+ * @copy_to: optional, for memcpy_toio()
+ * @unmap: optional, this region is getting unmapped
+ */
+struct logic_iomem_ops {
+ unsigned long (*read)(void *priv, unsigned int offset, int size);
+ void (*write)(void *priv, unsigned int offset, int size,
+ unsigned long val);
+
+ void (*set)(void *priv, unsigned int offset, u8 value, int size);
+ void (*copy_from)(void *priv, void *buffer, unsigned int offset,
+ int size);
+ void (*copy_to)(void *priv, unsigned int offset, const void *buffer,
+ int size);
+
+ void (*unmap)(void *priv);
+};
+
+/**
+ * struct logic_iomem_region_ops - ops for an IO memory handler
+ * @map: map a range in the registered IO memory region, must
+ * fill *ops with the ops and may fill *priv to be passed
+ * to the ops. The offset is given as the offset into the
+ * registered resource region.
+ * The return value is negative for errors, or >= 0 for
+ * success. On success, the return value is added to the
+ * offset for later ops, to allow for partial mappings.
+ */
+struct logic_iomem_region_ops {
+ long (*map)(unsigned long offset, size_t size,
+ const struct logic_iomem_ops **ops,
+ void **priv);
+};
+
+/**
+ * logic_iomem_add_region - register an IO memory region
+ * @resource: the resource description for this region
+ * @ops: the IO memory mapping ops for this resource
+ */
+int logic_iomem_add_region(struct resource *resource,
+ const struct logic_iomem_region_ops *ops);
+
+#endif /* __LOGIC_IOMEM_H */
diff --git a/include/linux/logic_pio.h b/include/linux/logic_pio.h
new file mode 100644
index 000000000000..8f1a9408302f
--- /dev/null
+++ b/include/linux/logic_pio.h
@@ -0,0 +1,121 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (C) 2017 HiSilicon Limited, All Rights Reserved.
+ * Author: Gabriele Paoloni <gabriele.paoloni@huawei.com>
+ * Author: Zhichang Yuan <yuanzhichang@hisilicon.com>
+ */
+
+#ifndef __LINUX_LOGIC_PIO_H
+#define __LINUX_LOGIC_PIO_H
+
+#include <linux/fwnode.h>
+
+enum {
+ LOGIC_PIO_INDIRECT, /* Indirect IO flag */
+ LOGIC_PIO_CPU_MMIO, /* Memory-mapped IO flag */
+};
+
+struct logic_pio_hwaddr {
+ struct list_head list;
+ const struct fwnode_handle *fwnode;
+ resource_size_t hw_start;
+ resource_size_t io_start;
+ resource_size_t size; /* range size populated */
+ unsigned long flags;
+
+ void *hostdata;
+ const struct logic_pio_host_ops *ops;
+};
+
+struct logic_pio_host_ops {
+ u32 (*in)(void *hostdata, unsigned long addr, size_t dwidth);
+ void (*out)(void *hostdata, unsigned long addr, u32 val,
+ size_t dwidth);
+ u32 (*ins)(void *hostdata, unsigned long addr, void *buffer,
+ size_t dwidth, unsigned int count);
+ void (*outs)(void *hostdata, unsigned long addr, const void *buffer,
+ size_t dwidth, unsigned int count);
+};
+
+#ifdef CONFIG_INDIRECT_PIO
+u8 logic_inb(unsigned long addr);
+u16 logic_inw(unsigned long addr);
+u32 logic_inl(unsigned long addr);
+void logic_outb(u8 value, unsigned long addr);
+void logic_outw(u16 value, unsigned long addr);
+void logic_outl(u32 value, unsigned long addr);
+void logic_insb(unsigned long addr, void *buffer, unsigned int count);
+void logic_insl(unsigned long addr, void *buffer, unsigned int count);
+void logic_insw(unsigned long addr, void *buffer, unsigned int count);
+void logic_outsb(unsigned long addr, const void *buffer, unsigned int count);
+void logic_outsw(unsigned long addr, const void *buffer, unsigned int count);
+void logic_outsl(unsigned long addr, const void *buffer, unsigned int count);
+
+#ifndef inb
+#define inb logic_inb
+#endif
+
+#ifndef inw
+#define inw logic_inw
+#endif
+
+#ifndef inl
+#define inl logic_inl
+#endif
+
+#ifndef outb
+#define outb logic_outb
+#endif
+
+#ifndef outw
+#define outw logic_outw
+#endif
+
+#ifndef outl
+#define outl logic_outl
+#endif
+
+#ifndef insb
+#define insb logic_insb
+#endif
+
+#ifndef insw
+#define insw logic_insw
+#endif
+
+#ifndef insl
+#define insl logic_insl
+#endif
+
+#ifndef outsb
+#define outsb logic_outsb
+#endif
+
+#ifndef outsw
+#define outsw logic_outsw
+#endif
+
+#ifndef outsl
+#define outsl logic_outsl
+#endif
+
+/*
+ * We reserve 0x4000 bytes for Indirect IO as so far this library is only
+ * used by the HiSilicon LPC Host. If needed, we can reserve a wider IO
+ * area by redefining the macro below.
+ */
+#define PIO_INDIRECT_SIZE 0x4000
+#else
+#define PIO_INDIRECT_SIZE 0
+#endif /* CONFIG_INDIRECT_PIO */
+#define MMIO_UPPER_LIMIT (IO_SPACE_LIMIT - PIO_INDIRECT_SIZE)
+
+struct logic_pio_hwaddr *find_io_range_by_fwnode(const struct fwnode_handle *fwnode);
+unsigned long logic_pio_trans_hwaddr(const struct fwnode_handle *fwnode,
+ resource_size_t hw_addr, resource_size_t size);
+int logic_pio_register_range(struct logic_pio_hwaddr *newrange);
+void logic_pio_unregister_range(struct logic_pio_hwaddr *range);
+resource_size_t logic_pio_to_hwaddr(unsigned long pio);
+unsigned long logic_pio_trans_cpuaddr(resource_size_t hw_addr);
+
+#endif /* __LINUX_LOGIC_PIO_H */
diff --git a/include/linux/lp.h b/include/linux/lp.h
index 0dd276af9e4e..be8a07eb2083 100644
--- a/include/linux/lp.h
+++ b/include/linux/lp.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* usr/include/linux/lp.h c.1991-1992 James Wiegand
* many modifications copyright (C) 1992 Michael K. Johnson
diff --git a/include/linux/lru_cache.h b/include/linux/lru_cache.h
index 04fc6e6c7ff0..ff82ef85a084 100644
--- a/include/linux/lru_cache.h
+++ b/include/linux/lru_cache.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
lru_cache.c
@@ -7,19 +8,6 @@
Copyright (C) 2003-2008, Philipp Reisner <philipp.reisner@linbit.com>.
Copyright (C) 2003-2008, Lars Ellenberg <lars.ellenberg@linbit.com>.
- drbd is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2, or (at your option)
- any later version.
-
- drbd is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with drbd; see the file COPYING. If not, write to
- the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
*/
@@ -44,7 +32,7 @@ This header file (and its .c file; kernel-doc of functions see there)
Because of this later property, it is called "lru_cache".
As it actually Tracks Objects in an Active SeT, we could also call it
toast (incidentally that is what may happen to the data on the
- backend storage uppon next resync, if we don't get it right).
+ backend storage upon next resync, if we don't get it right).
What for?
@@ -131,7 +119,7 @@ write intent log information, three of which are mentioned here.
*/
/* this defines an element in a tracked set
- * .colision is for hash table lookup.
+ * .collision is for hash table lookup.
* When we process a new IO request, we know its sector, thus can deduce the
* region number (label) easily. To do the label -> object lookup without a
* full list walk, we use a simple hash table.
@@ -157,14 +145,14 @@ write intent log information, three of which are mentioned here.
* But it avoids high order page allocations in kmalloc.
*/
struct lc_element {
- struct hlist_node colision;
+ struct hlist_node collision;
struct list_head list; /* LRU list or free list */
unsigned refcnt;
/* back "pointer" into lc_cache->element[index],
* for paranoia, and for "lc_element_to_index" */
unsigned lc_index;
/* if we want to track a larger set of objects,
- * it needs to become arch independend u64 */
+ * it needs to become an architecture independent u64 */
unsigned lc_number;
/* special label when on free list */
#define LC_FREE (~0U)
@@ -211,7 +199,6 @@ struct lru_cache {
unsigned long flags;
- void *lc_private;
const char *name;
/* nr_elements there */
@@ -253,7 +240,6 @@ extern struct lru_cache *lc_create(const char *name, struct kmem_cache *cache,
unsigned e_count, size_t e_size, size_t e_off);
extern void lc_reset(struct lru_cache *lc);
extern void lc_destroy(struct lru_cache *lc);
-extern void lc_set(struct lru_cache *lc, unsigned int enr, int index);
extern void lc_del(struct lru_cache *lc, struct lc_element *element);
extern struct lc_element *lc_get_cumulative(struct lru_cache *lc, unsigned int enr);
@@ -275,7 +261,7 @@ extern void lc_seq_dump_details(struct seq_file *seq, struct lru_cache *lc, char
*
* Allows (expects) the set to be "dirty". Note that the reference counts and
* order on the active and lru lists may still change. Used to serialize
- * changing transactions. Returns true if we aquired the lock.
+ * changing transactions. Returns true if we acquired the lock.
*/
static inline int lc_try_lock_for_transaction(struct lru_cache *lc)
{
@@ -287,7 +273,7 @@ static inline int lc_try_lock_for_transaction(struct lru_cache *lc)
* @lc: the lru cache to operate on
*
* Note that the reference counts and order on the active and lru lists may
- * still change. Only works on a "clean" set. Returns true if we aquired the
+ * still change. Only works on a "clean" set. Returns true if we acquired the
* lock, which means there are no pending changes, and any further attempt to
* change the set will not succeed until the next lc_unlock().
*/
@@ -309,6 +295,5 @@ extern bool lc_is_used(struct lru_cache *lc, unsigned int enr);
container_of(ptr, type, member)
extern struct lc_element *lc_element_by_index(struct lru_cache *lc, unsigned i);
-extern unsigned int lc_index_of(struct lru_cache *lc, struct lc_element *e);
#endif
diff --git a/include/linux/lsm/apparmor.h b/include/linux/lsm/apparmor.h
new file mode 100644
index 000000000000..612cbfacb072
--- /dev/null
+++ b/include/linux/lsm/apparmor.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Linux Security Module interface to other subsystems.
+ * AppArmor presents single pointer to an aa_label structure.
+ */
+#ifndef __LINUX_LSM_APPARMOR_H
+#define __LINUX_LSM_APPARMOR_H
+
+struct aa_label;
+
+struct lsm_prop_apparmor {
+#ifdef CONFIG_SECURITY_APPARMOR
+ struct aa_label *label;
+#endif
+};
+
+#endif /* ! __LINUX_LSM_APPARMOR_H */
diff --git a/include/linux/lsm/bpf.h b/include/linux/lsm/bpf.h
new file mode 100644
index 000000000000..8106e206fcef
--- /dev/null
+++ b/include/linux/lsm/bpf.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Linux Security Module interface to other subsystems.
+ * BPF may present a single u32 value.
+ */
+#ifndef __LINUX_LSM_BPF_H
+#define __LINUX_LSM_BPF_H
+#include <linux/types.h>
+
+struct lsm_prop_bpf {
+#ifdef CONFIG_BPF_LSM
+ u32 secid;
+#endif
+};
+
+#endif /* ! __LINUX_LSM_BPF_H */
diff --git a/include/linux/lsm/selinux.h b/include/linux/lsm/selinux.h
new file mode 100644
index 000000000000..9455a6b5b910
--- /dev/null
+++ b/include/linux/lsm/selinux.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Linux Security Module interface to other subsystems.
+ * SELinux presents a single u32 value which is known as a secid.
+ */
+#ifndef __LINUX_LSM_SELINUX_H
+#define __LINUX_LSM_SELINUX_H
+#include <linux/types.h>
+
+struct lsm_prop_selinux {
+#ifdef CONFIG_SECURITY_SELINUX
+ u32 secid;
+#endif
+};
+
+#endif /* ! __LINUX_LSM_SELINUX_H */
diff --git a/include/linux/lsm/smack.h b/include/linux/lsm/smack.h
new file mode 100644
index 000000000000..ff730dd7a734
--- /dev/null
+++ b/include/linux/lsm/smack.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Linux Security Module interface to other subsystems.
+ * Smack presents a pointer into the global Smack label list.
+ */
+#ifndef __LINUX_LSM_SMACK_H
+#define __LINUX_LSM_SMACK_H
+
+struct smack_known;
+
+struct lsm_prop_smack {
+#ifdef CONFIG_SECURITY_SMACK
+ struct smack_known *skp;
+#endif
+};
+
+#endif /* ! __LINUX_LSM_SMACK_H */
diff --git a/include/linux/lsm_audit.h b/include/linux/lsm_audit.h
index 22b5d4e687ce..382c56a97bba 100644
--- a/include/linux/lsm_audit.h
+++ b/include/linux/lsm_audit.h
@@ -1,10 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Common LSM logging functions
* Heavily borrowed from selinux/avc.h
*
* Author : Etienne BASSET <etienne.basset@ensta.org>
*
- * All credits to : Stephen Smalley, <sds@epoch.ncsc.mil>
+ * All credits to : Stephen Smalley
* All BUGS to : Etienne BASSET <etienne.basset@ensta.org>
*/
#ifndef _LSM_COMMON_LOGGING_
@@ -25,7 +26,7 @@
struct lsm_network_audit {
int netif;
- struct sock *sk;
+ const struct sock *sk;
u16 family;
__be16 dport;
__be16 sport;
@@ -47,13 +48,13 @@ struct lsm_ioctlop_audit {
};
struct lsm_ibpkey_audit {
- u64 subnet_prefix;
- u16 pkey;
+ u64 subnet_prefix;
+ u16 pkey;
};
struct lsm_ibendport_audit {
- char dev_name[IB_DEVICE_NAME_MAX];
- u8 port;
+ const char *dev_name;
+ u8 port;
};
/* Auxiliary data to use in generating the audit record. */
@@ -73,6 +74,10 @@ struct common_audit_data {
#define LSM_AUDIT_DATA_FILE 12
#define LSM_AUDIT_DATA_IBPKEY 13
#define LSM_AUDIT_DATA_IBENDPORT 14
+#define LSM_AUDIT_DATA_LOCKDOWN 15
+#define LSM_AUDIT_DATA_NOTIFICATION 16
+#define LSM_AUDIT_DATA_ANONINODE 17
+#define LSM_AUDIT_DATA_NLMSGTYPE 18
union {
struct path path;
struct dentry *dentry;
@@ -92,6 +97,9 @@ struct common_audit_data {
struct file *file;
struct lsm_ibpkey_audit *ibpkey;
struct lsm_ibendport_audit *ibendport;
+ int reason;
+ const char *anonclass;
+ u16 nlmsg_type;
} u;
/* this union contains LSM specific data */
union {
@@ -110,14 +118,36 @@ struct common_audit_data {
#define v4info fam.v4
#define v6info fam.v6
+#ifdef CONFIG_AUDIT
+
int ipv4_skb_to_auditdata(struct sk_buff *skb,
struct common_audit_data *ad, u8 *proto);
+#if IS_ENABLED(CONFIG_IPV6)
int ipv6_skb_to_auditdata(struct sk_buff *skb,
struct common_audit_data *ad, u8 *proto);
+#endif /* IS_ENABLED(CONFIG_IPV6) */
void common_lsm_audit(struct common_audit_data *a,
void (*pre_audit)(struct audit_buffer *, void *),
void (*post_audit)(struct audit_buffer *, void *));
+void audit_log_lsm_data(struct audit_buffer *ab,
+ const struct common_audit_data *a);
+
+#else /* CONFIG_AUDIT */
+
+static inline void common_lsm_audit(struct common_audit_data *a,
+ void (*pre_audit)(struct audit_buffer *, void *),
+ void (*post_audit)(struct audit_buffer *, void *))
+{
+}
+
+static inline void audit_log_lsm_data(struct audit_buffer *ab,
+ const struct common_audit_data *a)
+{
+}
+
+#endif /* CONFIG_AUDIT */
+
#endif
diff --git a/include/linux/lsm_count.h b/include/linux/lsm_count.h
new file mode 100644
index 000000000000..16eb49761b25
--- /dev/null
+++ b/include/linux/lsm_count.h
@@ -0,0 +1,135 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+/*
+ * Copyright (C) 2023 Google LLC.
+ */
+
+#ifndef __LINUX_LSM_COUNT_H
+#define __LINUX_LSM_COUNT_H
+
+#include <linux/args.h>
+
+#ifdef CONFIG_SECURITY
+
+/*
+ * Macros to count the number of LSMs enabled in the kernel at compile time.
+ */
+
+/*
+ * Capabilities is enabled when CONFIG_SECURITY is enabled.
+ */
+#if IS_ENABLED(CONFIG_SECURITY)
+#define CAPABILITIES_ENABLED 1,
+#else
+#define CAPABILITIES_ENABLED
+#endif
+
+#if IS_ENABLED(CONFIG_SECURITY_SELINUX)
+#define SELINUX_ENABLED 1,
+#else
+#define SELINUX_ENABLED
+#endif
+
+#if IS_ENABLED(CONFIG_SECURITY_SMACK)
+#define SMACK_ENABLED 1,
+#else
+#define SMACK_ENABLED
+#endif
+
+#if IS_ENABLED(CONFIG_SECURITY_APPARMOR)
+#define APPARMOR_ENABLED 1,
+#else
+#define APPARMOR_ENABLED
+#endif
+
+#if IS_ENABLED(CONFIG_SECURITY_TOMOYO)
+#define TOMOYO_ENABLED 1,
+#else
+#define TOMOYO_ENABLED
+#endif
+
+#if IS_ENABLED(CONFIG_SECURITY_YAMA)
+#define YAMA_ENABLED 1,
+#else
+#define YAMA_ENABLED
+#endif
+
+#if IS_ENABLED(CONFIG_SECURITY_LOADPIN)
+#define LOADPIN_ENABLED 1,
+#else
+#define LOADPIN_ENABLED
+#endif
+
+#if IS_ENABLED(CONFIG_SECURITY_LOCKDOWN_LSM)
+#define LOCKDOWN_ENABLED 1,
+#else
+#define LOCKDOWN_ENABLED
+#endif
+
+#if IS_ENABLED(CONFIG_SECURITY_SAFESETID)
+#define SAFESETID_ENABLED 1,
+#else
+#define SAFESETID_ENABLED
+#endif
+
+#if IS_ENABLED(CONFIG_BPF_LSM)
+#define BPF_LSM_ENABLED 1,
+#else
+#define BPF_LSM_ENABLED
+#endif
+
+#if IS_ENABLED(CONFIG_SECURITY_LANDLOCK)
+#define LANDLOCK_ENABLED 1,
+#else
+#define LANDLOCK_ENABLED
+#endif
+
+#if IS_ENABLED(CONFIG_IMA)
+#define IMA_ENABLED 1,
+#else
+#define IMA_ENABLED
+#endif
+
+#if IS_ENABLED(CONFIG_EVM)
+#define EVM_ENABLED 1,
+#else
+#define EVM_ENABLED
+#endif
+
+#if IS_ENABLED(CONFIG_SECURITY_IPE)
+#define IPE_ENABLED 1,
+#else
+#define IPE_ENABLED
+#endif
+
+/*
+ * There is a trailing comma that we need to be accounted for. This is done by
+ * using a skipped argument in __COUNT_LSMS
+ */
+#define __COUNT_LSMS(skipped_arg, args...) COUNT_ARGS(args...)
+#define COUNT_LSMS(args...) __COUNT_LSMS(args)
+
+#define MAX_LSM_COUNT \
+ COUNT_LSMS( \
+ CAPABILITIES_ENABLED \
+ SELINUX_ENABLED \
+ SMACK_ENABLED \
+ APPARMOR_ENABLED \
+ TOMOYO_ENABLED \
+ YAMA_ENABLED \
+ LOADPIN_ENABLED \
+ LOCKDOWN_ENABLED \
+ SAFESETID_ENABLED \
+ BPF_LSM_ENABLED \
+ LANDLOCK_ENABLED \
+ IMA_ENABLED \
+ EVM_ENABLED \
+ IPE_ENABLED)
+
+#else
+
+#define MAX_LSM_COUNT 0
+
+#endif /* CONFIG_SECURITY */
+
+#endif /* __LINUX_LSM_COUNT_H */
diff --git a/include/linux/lsm_hook_defs.h b/include/linux/lsm_hook_defs.h
new file mode 100644
index 000000000000..8c42b4bde09c
--- /dev/null
+++ b/include/linux/lsm_hook_defs.h
@@ -0,0 +1,468 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+/*
+ * Linux Security Module Hook declarations.
+ *
+ * Copyright (C) 2001 WireX Communications, Inc <chris@wirex.com>
+ * Copyright (C) 2001 Greg Kroah-Hartman <greg@kroah.com>
+ * Copyright (C) 2001 Networks Associates Technology, Inc <ssmalley@nai.com>
+ * Copyright (C) 2001 James Morris <jmorris@intercode.com.au>
+ * Copyright (C) 2001 Silicon Graphics, Inc. (Trust Technology Group)
+ * Copyright (C) 2015 Intel Corporation.
+ * Copyright (C) 2015 Casey Schaufler <casey@schaufler-ca.com>
+ * Copyright (C) 2016 Mellanox Techonologies
+ * Copyright (C) 2020 Google LLC.
+ */
+
+/*
+ * The macro LSM_HOOK is used to define the data structures required by
+ * the LSM framework using the pattern:
+ *
+ * LSM_HOOK(<return_type>, <default_value>, <hook_name>, args...)
+ *
+ * struct security_hook_heads {
+ * #define LSM_HOOK(RET, DEFAULT, NAME, ...) struct hlist_head NAME;
+ * #include <linux/lsm_hook_defs.h>
+ * #undef LSM_HOOK
+ * };
+ */
+LSM_HOOK(int, 0, binder_set_context_mgr, const struct cred *mgr)
+LSM_HOOK(int, 0, binder_transaction, const struct cred *from,
+ const struct cred *to)
+LSM_HOOK(int, 0, binder_transfer_binder, const struct cred *from,
+ const struct cred *to)
+LSM_HOOK(int, 0, binder_transfer_file, const struct cred *from,
+ const struct cred *to, const struct file *file)
+LSM_HOOK(int, 0, ptrace_access_check, struct task_struct *child,
+ unsigned int mode)
+LSM_HOOK(int, 0, ptrace_traceme, struct task_struct *parent)
+LSM_HOOK(int, 0, capget, const struct task_struct *target, kernel_cap_t *effective,
+ kernel_cap_t *inheritable, kernel_cap_t *permitted)
+LSM_HOOK(int, 0, capset, struct cred *new, const struct cred *old,
+ const kernel_cap_t *effective, const kernel_cap_t *inheritable,
+ const kernel_cap_t *permitted)
+LSM_HOOK(int, 0, capable, const struct cred *cred, struct user_namespace *ns,
+ int cap, unsigned int opts)
+LSM_HOOK(int, 0, quotactl, int cmds, int type, int id, const struct super_block *sb)
+LSM_HOOK(int, 0, quota_on, struct dentry *dentry)
+LSM_HOOK(int, 0, syslog, int type)
+LSM_HOOK(int, 0, settime, const struct timespec64 *ts,
+ const struct timezone *tz)
+LSM_HOOK(int, 0, vm_enough_memory, struct mm_struct *mm, long pages)
+LSM_HOOK(int, 0, bprm_creds_for_exec, struct linux_binprm *bprm)
+LSM_HOOK(int, 0, bprm_creds_from_file, struct linux_binprm *bprm, const struct file *file)
+LSM_HOOK(int, 0, bprm_check_security, struct linux_binprm *bprm)
+LSM_HOOK(void, LSM_RET_VOID, bprm_committing_creds, const struct linux_binprm *bprm)
+LSM_HOOK(void, LSM_RET_VOID, bprm_committed_creds, const struct linux_binprm *bprm)
+LSM_HOOK(int, 0, fs_context_submount, struct fs_context *fc, struct super_block *reference)
+LSM_HOOK(int, 0, fs_context_dup, struct fs_context *fc,
+ struct fs_context *src_sc)
+LSM_HOOK(int, -ENOPARAM, fs_context_parse_param, struct fs_context *fc,
+ struct fs_parameter *param)
+LSM_HOOK(int, 0, sb_alloc_security, struct super_block *sb)
+LSM_HOOK(void, LSM_RET_VOID, sb_delete, struct super_block *sb)
+LSM_HOOK(void, LSM_RET_VOID, sb_free_security, struct super_block *sb)
+LSM_HOOK(void, LSM_RET_VOID, sb_free_mnt_opts, void *mnt_opts)
+LSM_HOOK(int, 0, sb_eat_lsm_opts, char *orig, void **mnt_opts)
+LSM_HOOK(int, 0, sb_mnt_opts_compat, struct super_block *sb, void *mnt_opts)
+LSM_HOOK(int, 0, sb_remount, struct super_block *sb, void *mnt_opts)
+LSM_HOOK(int, 0, sb_kern_mount, const struct super_block *sb)
+LSM_HOOK(int, 0, sb_show_options, struct seq_file *m, struct super_block *sb)
+LSM_HOOK(int, 0, sb_statfs, struct dentry *dentry)
+LSM_HOOK(int, 0, sb_mount, const char *dev_name, const struct path *path,
+ const char *type, unsigned long flags, void *data)
+LSM_HOOK(int, 0, sb_umount, struct vfsmount *mnt, int flags)
+LSM_HOOK(int, 0, sb_pivotroot, const struct path *old_path,
+ const struct path *new_path)
+LSM_HOOK(int, 0, sb_set_mnt_opts, struct super_block *sb, void *mnt_opts,
+ unsigned long kern_flags, unsigned long *set_kern_flags)
+LSM_HOOK(int, 0, sb_clone_mnt_opts, const struct super_block *oldsb,
+ struct super_block *newsb, unsigned long kern_flags,
+ unsigned long *set_kern_flags)
+LSM_HOOK(int, 0, move_mount, const struct path *from_path,
+ const struct path *to_path)
+LSM_HOOK(int, -EOPNOTSUPP, dentry_init_security, struct dentry *dentry,
+ int mode, const struct qstr *name, const char **xattr_name,
+ struct lsm_context *cp)
+LSM_HOOK(int, 0, dentry_create_files_as, struct dentry *dentry, int mode,
+ const struct qstr *name, const struct cred *old, struct cred *new)
+
+#ifdef CONFIG_SECURITY_PATH
+LSM_HOOK(int, 0, path_unlink, const struct path *dir, struct dentry *dentry)
+LSM_HOOK(int, 0, path_mkdir, const struct path *dir, struct dentry *dentry,
+ umode_t mode)
+LSM_HOOK(int, 0, path_rmdir, const struct path *dir, struct dentry *dentry)
+LSM_HOOK(int, 0, path_mknod, const struct path *dir, struct dentry *dentry,
+ umode_t mode, unsigned int dev)
+LSM_HOOK(void, LSM_RET_VOID, path_post_mknod, struct mnt_idmap *idmap,
+ struct dentry *dentry)
+LSM_HOOK(int, 0, path_truncate, const struct path *path)
+LSM_HOOK(int, 0, path_symlink, const struct path *dir, struct dentry *dentry,
+ const char *old_name)
+LSM_HOOK(int, 0, path_link, struct dentry *old_dentry,
+ const struct path *new_dir, struct dentry *new_dentry)
+LSM_HOOK(int, 0, path_rename, const struct path *old_dir,
+ struct dentry *old_dentry, const struct path *new_dir,
+ struct dentry *new_dentry, unsigned int flags)
+LSM_HOOK(int, 0, path_chmod, const struct path *path, umode_t mode)
+LSM_HOOK(int, 0, path_chown, const struct path *path, kuid_t uid, kgid_t gid)
+LSM_HOOK(int, 0, path_chroot, const struct path *path)
+#endif /* CONFIG_SECURITY_PATH */
+
+/* Needed for inode based security check */
+LSM_HOOK(int, 0, path_notify, const struct path *path, u64 mask,
+ unsigned int obj_type)
+LSM_HOOK(int, 0, inode_alloc_security, struct inode *inode)
+LSM_HOOK(void, LSM_RET_VOID, inode_free_security, struct inode *inode)
+LSM_HOOK(void, LSM_RET_VOID, inode_free_security_rcu, void *inode_security)
+LSM_HOOK(int, -EOPNOTSUPP, inode_init_security, struct inode *inode,
+ struct inode *dir, const struct qstr *qstr, struct xattr *xattrs,
+ int *xattr_count)
+LSM_HOOK(int, 0, inode_init_security_anon, struct inode *inode,
+ const struct qstr *name, const struct inode *context_inode)
+LSM_HOOK(int, 0, inode_create, struct inode *dir, struct dentry *dentry,
+ umode_t mode)
+LSM_HOOK(void, LSM_RET_VOID, inode_post_create_tmpfile, struct mnt_idmap *idmap,
+ struct inode *inode)
+LSM_HOOK(int, 0, inode_link, struct dentry *old_dentry, struct inode *dir,
+ struct dentry *new_dentry)
+LSM_HOOK(int, 0, inode_unlink, struct inode *dir, struct dentry *dentry)
+LSM_HOOK(int, 0, inode_symlink, struct inode *dir, struct dentry *dentry,
+ const char *old_name)
+LSM_HOOK(int, 0, inode_mkdir, struct inode *dir, struct dentry *dentry,
+ umode_t mode)
+LSM_HOOK(int, 0, inode_rmdir, struct inode *dir, struct dentry *dentry)
+LSM_HOOK(int, 0, inode_mknod, struct inode *dir, struct dentry *dentry,
+ umode_t mode, dev_t dev)
+LSM_HOOK(int, 0, inode_rename, struct inode *old_dir, struct dentry *old_dentry,
+ struct inode *new_dir, struct dentry *new_dentry)
+LSM_HOOK(int, 0, inode_readlink, struct dentry *dentry)
+LSM_HOOK(int, 0, inode_follow_link, struct dentry *dentry, struct inode *inode,
+ bool rcu)
+LSM_HOOK(int, 0, inode_permission, struct inode *inode, int mask)
+LSM_HOOK(int, 0, inode_setattr, struct mnt_idmap *idmap, struct dentry *dentry,
+ struct iattr *attr)
+LSM_HOOK(void, LSM_RET_VOID, inode_post_setattr, struct mnt_idmap *idmap,
+ struct dentry *dentry, int ia_valid)
+LSM_HOOK(int, 0, inode_getattr, const struct path *path)
+LSM_HOOK(int, 0, inode_xattr_skipcap, const char *name)
+LSM_HOOK(int, 0, inode_setxattr, struct mnt_idmap *idmap,
+ struct dentry *dentry, const char *name, const void *value,
+ size_t size, int flags)
+LSM_HOOK(void, LSM_RET_VOID, inode_post_setxattr, struct dentry *dentry,
+ const char *name, const void *value, size_t size, int flags)
+LSM_HOOK(int, 0, inode_getxattr, struct dentry *dentry, const char *name)
+LSM_HOOK(int, 0, inode_listxattr, struct dentry *dentry)
+LSM_HOOK(int, 0, inode_removexattr, struct mnt_idmap *idmap,
+ struct dentry *dentry, const char *name)
+LSM_HOOK(void, LSM_RET_VOID, inode_post_removexattr, struct dentry *dentry,
+ const char *name)
+LSM_HOOK(int, 0, inode_file_setattr, struct dentry *dentry, struct file_kattr *fa)
+LSM_HOOK(int, 0, inode_file_getattr, struct dentry *dentry, struct file_kattr *fa)
+LSM_HOOK(int, 0, inode_set_acl, struct mnt_idmap *idmap,
+ struct dentry *dentry, const char *acl_name, struct posix_acl *kacl)
+LSM_HOOK(void, LSM_RET_VOID, inode_post_set_acl, struct dentry *dentry,
+ const char *acl_name, struct posix_acl *kacl)
+LSM_HOOK(int, 0, inode_get_acl, struct mnt_idmap *idmap,
+ struct dentry *dentry, const char *acl_name)
+LSM_HOOK(int, 0, inode_remove_acl, struct mnt_idmap *idmap,
+ struct dentry *dentry, const char *acl_name)
+LSM_HOOK(void, LSM_RET_VOID, inode_post_remove_acl, struct mnt_idmap *idmap,
+ struct dentry *dentry, const char *acl_name)
+LSM_HOOK(int, 0, inode_need_killpriv, struct dentry *dentry)
+LSM_HOOK(int, 0, inode_killpriv, struct mnt_idmap *idmap,
+ struct dentry *dentry)
+LSM_HOOK(int, -EOPNOTSUPP, inode_getsecurity, struct mnt_idmap *idmap,
+ struct inode *inode, const char *name, void **buffer, bool alloc)
+LSM_HOOK(int, -EOPNOTSUPP, inode_setsecurity, struct inode *inode,
+ const char *name, const void *value, size_t size, int flags)
+LSM_HOOK(int, 0, inode_listsecurity, struct inode *inode, char *buffer,
+ size_t buffer_size)
+LSM_HOOK(void, LSM_RET_VOID, inode_getlsmprop, struct inode *inode,
+ struct lsm_prop *prop)
+LSM_HOOK(int, 0, inode_copy_up, struct dentry *src, struct cred **new)
+LSM_HOOK(int, -EOPNOTSUPP, inode_copy_up_xattr, struct dentry *src,
+ const char *name)
+LSM_HOOK(int, 0, inode_setintegrity, const struct inode *inode,
+ enum lsm_integrity_type type, const void *value, size_t size)
+LSM_HOOK(int, 0, kernfs_init_security, struct kernfs_node *kn_dir,
+ struct kernfs_node *kn)
+LSM_HOOK(int, 0, file_permission, struct file *file, int mask)
+LSM_HOOK(int, 0, file_alloc_security, struct file *file)
+LSM_HOOK(void, LSM_RET_VOID, file_release, struct file *file)
+LSM_HOOK(void, LSM_RET_VOID, file_free_security, struct file *file)
+LSM_HOOK(int, 0, file_ioctl, struct file *file, unsigned int cmd,
+ unsigned long arg)
+LSM_HOOK(int, 0, file_ioctl_compat, struct file *file, unsigned int cmd,
+ unsigned long arg)
+LSM_HOOK(int, 0, mmap_addr, unsigned long addr)
+LSM_HOOK(int, 0, mmap_file, struct file *file, unsigned long reqprot,
+ unsigned long prot, unsigned long flags)
+LSM_HOOK(int, 0, file_mprotect, struct vm_area_struct *vma,
+ unsigned long reqprot, unsigned long prot)
+LSM_HOOK(int, 0, file_lock, struct file *file, unsigned int cmd)
+LSM_HOOK(int, 0, file_fcntl, struct file *file, unsigned int cmd,
+ unsigned long arg)
+LSM_HOOK(void, LSM_RET_VOID, file_set_fowner, struct file *file)
+LSM_HOOK(int, 0, file_send_sigiotask, struct task_struct *tsk,
+ struct fown_struct *fown, int sig)
+LSM_HOOK(int, 0, file_receive, struct file *file)
+LSM_HOOK(int, 0, file_open, struct file *file)
+LSM_HOOK(int, 0, file_post_open, struct file *file, int mask)
+LSM_HOOK(int, 0, file_truncate, struct file *file)
+LSM_HOOK(int, 0, task_alloc, struct task_struct *task,
+ u64 clone_flags)
+LSM_HOOK(void, LSM_RET_VOID, task_free, struct task_struct *task)
+LSM_HOOK(int, 0, cred_alloc_blank, struct cred *cred, gfp_t gfp)
+LSM_HOOK(void, LSM_RET_VOID, cred_free, struct cred *cred)
+LSM_HOOK(int, 0, cred_prepare, struct cred *new, const struct cred *old,
+ gfp_t gfp)
+LSM_HOOK(void, LSM_RET_VOID, cred_transfer, struct cred *new,
+ const struct cred *old)
+LSM_HOOK(void, LSM_RET_VOID, cred_getsecid, const struct cred *c, u32 *secid)
+LSM_HOOK(void, LSM_RET_VOID, cred_getlsmprop, const struct cred *c,
+ struct lsm_prop *prop)
+LSM_HOOK(int, 0, kernel_act_as, struct cred *new, u32 secid)
+LSM_HOOK(int, 0, kernel_create_files_as, struct cred *new, struct inode *inode)
+LSM_HOOK(int, 0, kernel_module_request, char *kmod_name)
+LSM_HOOK(int, 0, kernel_load_data, enum kernel_load_data_id id, bool contents)
+LSM_HOOK(int, 0, kernel_post_load_data, char *buf, loff_t size,
+ enum kernel_load_data_id id, char *description)
+LSM_HOOK(int, 0, kernel_read_file, struct file *file,
+ enum kernel_read_file_id id, bool contents)
+LSM_HOOK(int, 0, kernel_post_read_file, struct file *file, char *buf,
+ loff_t size, enum kernel_read_file_id id)
+LSM_HOOK(int, 0, task_fix_setuid, struct cred *new, const struct cred *old,
+ int flags)
+LSM_HOOK(int, 0, task_fix_setgid, struct cred *new, const struct cred * old,
+ int flags)
+LSM_HOOK(int, 0, task_fix_setgroups, struct cred *new, const struct cred * old)
+LSM_HOOK(int, 0, task_setpgid, struct task_struct *p, pid_t pgid)
+LSM_HOOK(int, 0, task_getpgid, struct task_struct *p)
+LSM_HOOK(int, 0, task_getsid, struct task_struct *p)
+LSM_HOOK(void, LSM_RET_VOID, current_getlsmprop_subj, struct lsm_prop *prop)
+LSM_HOOK(void, LSM_RET_VOID, task_getlsmprop_obj,
+ struct task_struct *p, struct lsm_prop *prop)
+LSM_HOOK(int, 0, task_setnice, struct task_struct *p, int nice)
+LSM_HOOK(int, 0, task_setioprio, struct task_struct *p, int ioprio)
+LSM_HOOK(int, 0, task_getioprio, struct task_struct *p)
+LSM_HOOK(int, 0, task_prlimit, const struct cred *cred,
+ const struct cred *tcred, unsigned int flags)
+LSM_HOOK(int, 0, task_setrlimit, struct task_struct *p, unsigned int resource,
+ struct rlimit *new_rlim)
+LSM_HOOK(int, 0, task_setscheduler, struct task_struct *p)
+LSM_HOOK(int, 0, task_getscheduler, struct task_struct *p)
+LSM_HOOK(int, 0, task_movememory, struct task_struct *p)
+LSM_HOOK(int, 0, task_kill, struct task_struct *p, struct kernel_siginfo *info,
+ int sig, const struct cred *cred)
+LSM_HOOK(int, -ENOSYS, task_prctl, int option, unsigned long arg2,
+ unsigned long arg3, unsigned long arg4, unsigned long arg5)
+LSM_HOOK(void, LSM_RET_VOID, task_to_inode, struct task_struct *p,
+ struct inode *inode)
+LSM_HOOK(int, 0, userns_create, const struct cred *cred)
+LSM_HOOK(int, 0, ipc_permission, struct kern_ipc_perm *ipcp, short flag)
+LSM_HOOK(void, LSM_RET_VOID, ipc_getlsmprop, struct kern_ipc_perm *ipcp,
+ struct lsm_prop *prop)
+LSM_HOOK(int, 0, msg_msg_alloc_security, struct msg_msg *msg)
+LSM_HOOK(void, LSM_RET_VOID, msg_msg_free_security, struct msg_msg *msg)
+LSM_HOOK(int, 0, msg_queue_alloc_security, struct kern_ipc_perm *perm)
+LSM_HOOK(void, LSM_RET_VOID, msg_queue_free_security,
+ struct kern_ipc_perm *perm)
+LSM_HOOK(int, 0, msg_queue_associate, struct kern_ipc_perm *perm, int msqflg)
+LSM_HOOK(int, 0, msg_queue_msgctl, struct kern_ipc_perm *perm, int cmd)
+LSM_HOOK(int, 0, msg_queue_msgsnd, struct kern_ipc_perm *perm,
+ struct msg_msg *msg, int msqflg)
+LSM_HOOK(int, 0, msg_queue_msgrcv, struct kern_ipc_perm *perm,
+ struct msg_msg *msg, struct task_struct *target, long type, int mode)
+LSM_HOOK(int, 0, shm_alloc_security, struct kern_ipc_perm *perm)
+LSM_HOOK(void, LSM_RET_VOID, shm_free_security, struct kern_ipc_perm *perm)
+LSM_HOOK(int, 0, shm_associate, struct kern_ipc_perm *perm, int shmflg)
+LSM_HOOK(int, 0, shm_shmctl, struct kern_ipc_perm *perm, int cmd)
+LSM_HOOK(int, 0, shm_shmat, struct kern_ipc_perm *perm, char __user *shmaddr,
+ int shmflg)
+LSM_HOOK(int, 0, sem_alloc_security, struct kern_ipc_perm *perm)
+LSM_HOOK(void, LSM_RET_VOID, sem_free_security, struct kern_ipc_perm *perm)
+LSM_HOOK(int, 0, sem_associate, struct kern_ipc_perm *perm, int semflg)
+LSM_HOOK(int, 0, sem_semctl, struct kern_ipc_perm *perm, int cmd)
+LSM_HOOK(int, 0, sem_semop, struct kern_ipc_perm *perm, struct sembuf *sops,
+ unsigned nsops, int alter)
+LSM_HOOK(int, 0, netlink_send, struct sock *sk, struct sk_buff *skb)
+LSM_HOOK(void, LSM_RET_VOID, d_instantiate, struct dentry *dentry,
+ struct inode *inode)
+LSM_HOOK(int, -EOPNOTSUPP, getselfattr, unsigned int attr,
+ struct lsm_ctx __user *ctx, u32 *size, u32 flags)
+LSM_HOOK(int, -EOPNOTSUPP, setselfattr, unsigned int attr,
+ struct lsm_ctx *ctx, u32 size, u32 flags)
+LSM_HOOK(int, -EINVAL, getprocattr, struct task_struct *p, const char *name,
+ char **value)
+LSM_HOOK(int, -EINVAL, setprocattr, const char *name, void *value, size_t size)
+LSM_HOOK(int, 0, ismaclabel, const char *name)
+LSM_HOOK(int, -EOPNOTSUPP, secid_to_secctx, u32 secid, struct lsm_context *cp)
+LSM_HOOK(int, -EOPNOTSUPP, lsmprop_to_secctx, struct lsm_prop *prop,
+ struct lsm_context *cp)
+LSM_HOOK(int, 0, secctx_to_secid, const char *secdata, u32 seclen, u32 *secid)
+LSM_HOOK(void, LSM_RET_VOID, release_secctx, struct lsm_context *cp)
+LSM_HOOK(void, LSM_RET_VOID, inode_invalidate_secctx, struct inode *inode)
+LSM_HOOK(int, 0, inode_notifysecctx, struct inode *inode, void *ctx, u32 ctxlen)
+LSM_HOOK(int, 0, inode_setsecctx, struct dentry *dentry, void *ctx, u32 ctxlen)
+LSM_HOOK(int, -EOPNOTSUPP, inode_getsecctx, struct inode *inode,
+ struct lsm_context *cp)
+
+#if defined(CONFIG_SECURITY) && defined(CONFIG_WATCH_QUEUE)
+LSM_HOOK(int, 0, post_notification, const struct cred *w_cred,
+ const struct cred *cred, struct watch_notification *n)
+#endif /* CONFIG_SECURITY && CONFIG_WATCH_QUEUE */
+
+#if defined(CONFIG_SECURITY) && defined(CONFIG_KEY_NOTIFICATIONS)
+LSM_HOOK(int, 0, watch_key, struct key *key)
+#endif /* CONFIG_SECURITY && CONFIG_KEY_NOTIFICATIONS */
+
+#ifdef CONFIG_SECURITY_NETWORK
+LSM_HOOK(int, 0, unix_stream_connect, struct sock *sock, struct sock *other,
+ struct sock *newsk)
+LSM_HOOK(int, 0, unix_may_send, struct socket *sock, struct socket *other)
+LSM_HOOK(int, 0, socket_create, int family, int type, int protocol, int kern)
+LSM_HOOK(int, 0, socket_post_create, struct socket *sock, int family, int type,
+ int protocol, int kern)
+LSM_HOOK(int, 0, socket_socketpair, struct socket *socka, struct socket *sockb)
+LSM_HOOK(int, 0, socket_bind, struct socket *sock, struct sockaddr *address,
+ int addrlen)
+LSM_HOOK(int, 0, socket_connect, struct socket *sock, struct sockaddr *address,
+ int addrlen)
+LSM_HOOK(int, 0, socket_listen, struct socket *sock, int backlog)
+LSM_HOOK(int, 0, socket_accept, struct socket *sock, struct socket *newsock)
+LSM_HOOK(int, 0, socket_sendmsg, struct socket *sock, struct msghdr *msg,
+ int size)
+LSM_HOOK(int, 0, socket_recvmsg, struct socket *sock, struct msghdr *msg,
+ int size, int flags)
+LSM_HOOK(int, 0, socket_getsockname, struct socket *sock)
+LSM_HOOK(int, 0, socket_getpeername, struct socket *sock)
+LSM_HOOK(int, 0, socket_getsockopt, struct socket *sock, int level, int optname)
+LSM_HOOK(int, 0, socket_setsockopt, struct socket *sock, int level, int optname)
+LSM_HOOK(int, 0, socket_shutdown, struct socket *sock, int how)
+LSM_HOOK(int, 0, socket_sock_rcv_skb, struct sock *sk, struct sk_buff *skb)
+LSM_HOOK(int, -ENOPROTOOPT, socket_getpeersec_stream, struct socket *sock,
+ sockptr_t optval, sockptr_t optlen, unsigned int len)
+LSM_HOOK(int, -ENOPROTOOPT, socket_getpeersec_dgram, struct socket *sock,
+ struct sk_buff *skb, u32 *secid)
+LSM_HOOK(int, 0, sk_alloc_security, struct sock *sk, int family, gfp_t priority)
+LSM_HOOK(void, LSM_RET_VOID, sk_free_security, struct sock *sk)
+LSM_HOOK(void, LSM_RET_VOID, sk_clone_security, const struct sock *sk,
+ struct sock *newsk)
+LSM_HOOK(void, LSM_RET_VOID, sk_getsecid, const struct sock *sk, u32 *secid)
+LSM_HOOK(void, LSM_RET_VOID, sock_graft, struct sock *sk, struct socket *parent)
+LSM_HOOK(int, 0, inet_conn_request, const struct sock *sk, struct sk_buff *skb,
+ struct request_sock *req)
+LSM_HOOK(void, LSM_RET_VOID, inet_csk_clone, struct sock *newsk,
+ const struct request_sock *req)
+LSM_HOOK(void, LSM_RET_VOID, inet_conn_established, struct sock *sk,
+ struct sk_buff *skb)
+LSM_HOOK(int, 0, secmark_relabel_packet, u32 secid)
+LSM_HOOK(void, LSM_RET_VOID, secmark_refcount_inc, void)
+LSM_HOOK(void, LSM_RET_VOID, secmark_refcount_dec, void)
+LSM_HOOK(void, LSM_RET_VOID, req_classify_flow, const struct request_sock *req,
+ struct flowi_common *flic)
+LSM_HOOK(int, 0, tun_dev_alloc_security, void *security)
+LSM_HOOK(int, 0, tun_dev_create, void)
+LSM_HOOK(int, 0, tun_dev_attach_queue, void *security)
+LSM_HOOK(int, 0, tun_dev_attach, struct sock *sk, void *security)
+LSM_HOOK(int, 0, tun_dev_open, void *security)
+LSM_HOOK(int, 0, sctp_assoc_request, struct sctp_association *asoc,
+ struct sk_buff *skb)
+LSM_HOOK(int, 0, sctp_bind_connect, struct sock *sk, int optname,
+ struct sockaddr *address, int addrlen)
+LSM_HOOK(void, LSM_RET_VOID, sctp_sk_clone, struct sctp_association *asoc,
+ struct sock *sk, struct sock *newsk)
+LSM_HOOK(int, 0, sctp_assoc_established, struct sctp_association *asoc,
+ struct sk_buff *skb)
+LSM_HOOK(int, 0, mptcp_add_subflow, struct sock *sk, struct sock *ssk)
+#endif /* CONFIG_SECURITY_NETWORK */
+
+#ifdef CONFIG_SECURITY_INFINIBAND
+LSM_HOOK(int, 0, ib_pkey_access, void *sec, u64 subnet_prefix, u16 pkey)
+LSM_HOOK(int, 0, ib_endport_manage_subnet, void *sec, const char *dev_name,
+ u8 port_num)
+LSM_HOOK(int, 0, ib_alloc_security, void *sec)
+#endif /* CONFIG_SECURITY_INFINIBAND */
+
+#ifdef CONFIG_SECURITY_NETWORK_XFRM
+LSM_HOOK(int, 0, xfrm_policy_alloc_security, struct xfrm_sec_ctx **ctxp,
+ struct xfrm_user_sec_ctx *sec_ctx, gfp_t gfp)
+LSM_HOOK(int, 0, xfrm_policy_clone_security, struct xfrm_sec_ctx *old_ctx,
+ struct xfrm_sec_ctx **new_ctx)
+LSM_HOOK(void, LSM_RET_VOID, xfrm_policy_free_security,
+ struct xfrm_sec_ctx *ctx)
+LSM_HOOK(int, 0, xfrm_policy_delete_security, struct xfrm_sec_ctx *ctx)
+LSM_HOOK(int, 0, xfrm_state_alloc, struct xfrm_state *x,
+ struct xfrm_user_sec_ctx *sec_ctx)
+LSM_HOOK(int, 0, xfrm_state_alloc_acquire, struct xfrm_state *x,
+ struct xfrm_sec_ctx *polsec, u32 secid)
+LSM_HOOK(void, LSM_RET_VOID, xfrm_state_free_security, struct xfrm_state *x)
+LSM_HOOK(int, 0, xfrm_state_delete_security, struct xfrm_state *x)
+LSM_HOOK(int, 0, xfrm_policy_lookup, struct xfrm_sec_ctx *ctx, u32 fl_secid)
+LSM_HOOK(int, 1, xfrm_state_pol_flow_match, struct xfrm_state *x,
+ struct xfrm_policy *xp, const struct flowi_common *flic)
+LSM_HOOK(int, 0, xfrm_decode_session, struct sk_buff *skb, u32 *secid,
+ int ckall)
+#endif /* CONFIG_SECURITY_NETWORK_XFRM */
+
+/* key management security hooks */
+#ifdef CONFIG_KEYS
+LSM_HOOK(int, 0, key_alloc, struct key *key, const struct cred *cred,
+ unsigned long flags)
+LSM_HOOK(int, 0, key_permission, key_ref_t key_ref, const struct cred *cred,
+ enum key_need_perm need_perm)
+LSM_HOOK(int, 0, key_getsecurity, struct key *key, char **buffer)
+LSM_HOOK(void, LSM_RET_VOID, key_post_create_or_update, struct key *keyring,
+ struct key *key, const void *payload, size_t payload_len,
+ unsigned long flags, bool create)
+#endif /* CONFIG_KEYS */
+
+#ifdef CONFIG_AUDIT
+LSM_HOOK(int, 0, audit_rule_init, u32 field, u32 op, char *rulestr,
+ void **lsmrule, gfp_t gfp)
+LSM_HOOK(int, 0, audit_rule_known, struct audit_krule *krule)
+LSM_HOOK(int, 0, audit_rule_match, struct lsm_prop *prop, u32 field, u32 op,
+ void *lsmrule)
+LSM_HOOK(void, LSM_RET_VOID, audit_rule_free, void *lsmrule)
+#endif /* CONFIG_AUDIT */
+
+#ifdef CONFIG_BPF_SYSCALL
+LSM_HOOK(int, 0, bpf, int cmd, union bpf_attr *attr, unsigned int size, bool kernel)
+LSM_HOOK(int, 0, bpf_map, struct bpf_map *map, fmode_t fmode)
+LSM_HOOK(int, 0, bpf_prog, struct bpf_prog *prog)
+LSM_HOOK(int, 0, bpf_map_create, struct bpf_map *map, union bpf_attr *attr,
+ struct bpf_token *token, bool kernel)
+LSM_HOOK(void, LSM_RET_VOID, bpf_map_free, struct bpf_map *map)
+LSM_HOOK(int, 0, bpf_prog_load, struct bpf_prog *prog, union bpf_attr *attr,
+ struct bpf_token *token, bool kernel)
+LSM_HOOK(void, LSM_RET_VOID, bpf_prog_free, struct bpf_prog *prog)
+LSM_HOOK(int, 0, bpf_token_create, struct bpf_token *token, union bpf_attr *attr,
+ const struct path *path)
+LSM_HOOK(void, LSM_RET_VOID, bpf_token_free, struct bpf_token *token)
+LSM_HOOK(int, 0, bpf_token_cmd, const struct bpf_token *token, enum bpf_cmd cmd)
+LSM_HOOK(int, 0, bpf_token_capable, const struct bpf_token *token, int cap)
+#endif /* CONFIG_BPF_SYSCALL */
+
+LSM_HOOK(int, 0, locked_down, enum lockdown_reason what)
+
+#ifdef CONFIG_PERF_EVENTS
+LSM_HOOK(int, 0, perf_event_open, int type)
+LSM_HOOK(int, 0, perf_event_alloc, struct perf_event *event)
+LSM_HOOK(int, 0, perf_event_read, struct perf_event *event)
+LSM_HOOK(int, 0, perf_event_write, struct perf_event *event)
+#endif /* CONFIG_PERF_EVENTS */
+
+#ifdef CONFIG_IO_URING
+LSM_HOOK(int, 0, uring_override_creds, const struct cred *new)
+LSM_HOOK(int, 0, uring_sqpoll, void)
+LSM_HOOK(int, 0, uring_cmd, struct io_uring_cmd *ioucmd)
+LSM_HOOK(int, 0, uring_allowed, void)
+#endif /* CONFIG_IO_URING */
+
+LSM_HOOK(void, LSM_RET_VOID, initramfs_populated, void)
+
+LSM_HOOK(int, 0, bdev_alloc_security, struct block_device *bdev)
+LSM_HOOK(void, LSM_RET_VOID, bdev_free_security, struct block_device *bdev)
+LSM_HOOK(int, 0, bdev_setintegrity, struct block_device *bdev,
+ enum lsm_integrity_type type, const void *value, size_t size)
diff --git a/include/linux/lsm_hooks.h b/include/linux/lsm_hooks.h
index 3a90febadbe2..b92008641242 100644
--- a/include/linux/lsm_hooks.h
+++ b/include/linux/lsm_hooks.h
@@ -25,1962 +25,194 @@
#ifndef __LINUX_LSM_HOOKS_H
#define __LINUX_LSM_HOOKS_H
+#include <uapi/linux/lsm.h>
#include <linux/security.h>
#include <linux/init.h>
#include <linux/rculist.h>
+#include <linux/xattr.h>
+#include <linux/static_call.h>
+#include <linux/unroll.h>
+#include <linux/jump_label.h>
+#include <linux/lsm_count.h>
-/**
- * union security_list_options - Linux Security Module hook function list
- *
- * Security hooks for program execution operations.
- *
- * @bprm_set_creds:
- * Save security information in the bprm->security field, typically based
- * on information about the bprm->file, for later use by the apply_creds
- * hook. This hook may also optionally check permissions (e.g. for
- * transitions between security domains).
- * This hook may be called multiple times during a single execve, e.g. for
- * interpreters. The hook can tell whether it has already been called by
- * checking to see if @bprm->security is non-NULL. If so, then the hook
- * may decide either to retain the security information saved earlier or
- * to replace it.
- * @bprm contains the linux_binprm structure.
- * Return 0 if the hook is successful and permission is granted.
- * @bprm_check_security:
- * This hook mediates the point when a search for a binary handler will
- * begin. It allows a check the @bprm->security value which is set in the
- * preceding set_creds call. The primary difference from set_creds is
- * that the argv list and envp list are reliably available in @bprm. This
- * hook may be called multiple times during a single execve; and in each
- * pass set_creds is called first.
- * @bprm contains the linux_binprm structure.
- * Return 0 if the hook is successful and permission is granted.
- * @bprm_committing_creds:
- * Prepare to install the new security attributes of a process being
- * transformed by an execve operation, based on the old credentials
- * pointed to by @current->cred and the information set in @bprm->cred by
- * the bprm_set_creds hook. @bprm points to the linux_binprm structure.
- * This hook is a good place to perform state changes on the process such
- * as closing open file descriptors to which access will no longer be
- * granted when the attributes are changed. This is called immediately
- * before commit_creds().
- * @bprm_committed_creds:
- * Tidy up after the installation of the new security attributes of a
- * process being transformed by an execve operation. The new credentials
- * have, by this point, been set to @current->cred. @bprm points to the
- * linux_binprm structure. This hook is a good place to perform state
- * changes on the process such as clearing out non-inheritable signal
- * state. This is called immediately after commit_creds().
- * @bprm_secureexec:
- * Return a boolean value (0 or 1) indicating whether a "secure exec"
- * is required. The flag is passed in the auxiliary table
- * on the initial stack to the ELF interpreter to indicate whether libc
- * should enable secure mode.
- * @bprm contains the linux_binprm structure.
- *
- * Security hooks for filesystem operations.
- *
- * @sb_alloc_security:
- * Allocate and attach a security structure to the sb->s_security field.
- * The s_security field is initialized to NULL when the structure is
- * allocated.
- * @sb contains the super_block structure to be modified.
- * Return 0 if operation was successful.
- * @sb_free_security:
- * Deallocate and clear the sb->s_security field.
- * @sb contains the super_block structure to be modified.
- * @sb_statfs:
- * Check permission before obtaining filesystem statistics for the @mnt
- * mountpoint.
- * @dentry is a handle on the superblock for the filesystem.
- * Return 0 if permission is granted.
- * @sb_mount:
- * Check permission before an object specified by @dev_name is mounted on
- * the mount point named by @nd. For an ordinary mount, @dev_name
- * identifies a device if the file system type requires a device. For a
- * remount (@flags & MS_REMOUNT), @dev_name is irrelevant. For a
- * loopback/bind mount (@flags & MS_BIND), @dev_name identifies the
- * pathname of the object being mounted.
- * @dev_name contains the name for object being mounted.
- * @path contains the path for mount point object.
- * @type contains the filesystem type.
- * @flags contains the mount flags.
- * @data contains the filesystem-specific data.
- * Return 0 if permission is granted.
- * @sb_copy_data:
- * Allow mount option data to be copied prior to parsing by the filesystem,
- * so that the security module can extract security-specific mount
- * options cleanly (a filesystem may modify the data e.g. with strsep()).
- * This also allows the original mount data to be stripped of security-
- * specific options to avoid having to make filesystems aware of them.
- * @type the type of filesystem being mounted.
- * @orig the original mount data copied from userspace.
- * @copy copied data which will be passed to the security module.
- * Returns 0 if the copy was successful.
- * @sb_remount:
- * Extracts security system specific mount options and verifies no changes
- * are being made to those options.
- * @sb superblock being remounted
- * @data contains the filesystem-specific data.
- * Return 0 if permission is granted.
- * @sb_umount:
- * Check permission before the @mnt file system is unmounted.
- * @mnt contains the mounted file system.
- * @flags contains the unmount flags, e.g. MNT_FORCE.
- * Return 0 if permission is granted.
- * @sb_pivotroot:
- * Check permission before pivoting the root filesystem.
- * @old_path contains the path for the new location of the
- * current root (put_old).
- * @new_path contains the path for the new root (new_root).
- * Return 0 if permission is granted.
- * @sb_set_mnt_opts:
- * Set the security relevant mount options used for a superblock
- * @sb the superblock to set security mount options for
- * @opts binary data structure containing all lsm mount data
- * @sb_clone_mnt_opts:
- * Copy all security options from a given superblock to another
- * @oldsb old superblock which contain information to clone
- * @newsb new superblock which needs filled in
- * @sb_parse_opts_str:
- * Parse a string of security data filling in the opts structure
- * @options string containing all mount options known by the LSM
- * @opts binary data structure usable by the LSM
- * @dentry_init_security:
- * Compute a context for a dentry as the inode is not yet available
- * since NFSv4 has no label backed by an EA anyway.
- * @dentry dentry to use in calculating the context.
- * @mode mode used to determine resource type.
- * @name name of the last path component used to create file
- * @ctx pointer to place the pointer to the resulting context in.
- * @ctxlen point to place the length of the resulting context.
- * @dentry_create_files_as:
- * Compute a context for a dentry as the inode is not yet available
- * and set that context in passed in creds so that new files are
- * created using that context. Context is calculated using the
- * passed in creds and not the creds of the caller.
- * @dentry dentry to use in calculating the context.
- * @mode mode used to determine resource type.
- * @name name of the last path component used to create file
- * @old creds which should be used for context calculation
- * @new creds to modify
- *
- *
- * Security hooks for inode operations.
- *
- * @inode_alloc_security:
- * Allocate and attach a security structure to @inode->i_security. The
- * i_security field is initialized to NULL when the inode structure is
- * allocated.
- * @inode contains the inode structure.
- * Return 0 if operation was successful.
- * @inode_free_security:
- * @inode contains the inode structure.
- * Deallocate the inode security structure and set @inode->i_security to
- * NULL.
- * @inode_init_security:
- * Obtain the security attribute name suffix and value to set on a newly
- * created inode and set up the incore security field for the new inode.
- * This hook is called by the fs code as part of the inode creation
- * transaction and provides for atomic labeling of the inode, unlike
- * the post_create/mkdir/... hooks called by the VFS. The hook function
- * is expected to allocate the name and value via kmalloc, with the caller
- * being responsible for calling kfree after using them.
- * If the security module does not use security attributes or does
- * not wish to put a security attribute on this particular inode,
- * then it should return -EOPNOTSUPP to skip this processing.
- * @inode contains the inode structure of the newly created inode.
- * @dir contains the inode structure of the parent directory.
- * @qstr contains the last path component of the new object
- * @name will be set to the allocated name suffix (e.g. selinux).
- * @value will be set to the allocated attribute value.
- * @len will be set to the length of the value.
- * Returns 0 if @name and @value have been successfully set,
- * -EOPNOTSUPP if no security attribute is needed, or
- * -ENOMEM on memory allocation failure.
- * @inode_create:
- * Check permission to create a regular file.
- * @dir contains inode structure of the parent of the new file.
- * @dentry contains the dentry structure for the file to be created.
- * @mode contains the file mode of the file to be created.
- * Return 0 if permission is granted.
- * @inode_link:
- * Check permission before creating a new hard link to a file.
- * @old_dentry contains the dentry structure for an existing
- * link to the file.
- * @dir contains the inode structure of the parent directory
- * of the new link.
- * @new_dentry contains the dentry structure for the new link.
- * Return 0 if permission is granted.
- * @path_link:
- * Check permission before creating a new hard link to a file.
- * @old_dentry contains the dentry structure for an existing link
- * to the file.
- * @new_dir contains the path structure of the parent directory of
- * the new link.
- * @new_dentry contains the dentry structure for the new link.
- * Return 0 if permission is granted.
- * @inode_unlink:
- * Check the permission to remove a hard link to a file.
- * @dir contains the inode structure of parent directory of the file.
- * @dentry contains the dentry structure for file to be unlinked.
- * Return 0 if permission is granted.
- * @path_unlink:
- * Check the permission to remove a hard link to a file.
- * @dir contains the path structure of parent directory of the file.
- * @dentry contains the dentry structure for file to be unlinked.
- * Return 0 if permission is granted.
- * @inode_symlink:
- * Check the permission to create a symbolic link to a file.
- * @dir contains the inode structure of parent directory of
- * the symbolic link.
- * @dentry contains the dentry structure of the symbolic link.
- * @old_name contains the pathname of file.
- * Return 0 if permission is granted.
- * @path_symlink:
- * Check the permission to create a symbolic link to a file.
- * @dir contains the path structure of parent directory of
- * the symbolic link.
- * @dentry contains the dentry structure of the symbolic link.
- * @old_name contains the pathname of file.
- * Return 0 if permission is granted.
- * @inode_mkdir:
- * Check permissions to create a new directory in the existing directory
- * associated with inode structure @dir.
- * @dir contains the inode structure of parent of the directory
- * to be created.
- * @dentry contains the dentry structure of new directory.
- * @mode contains the mode of new directory.
- * Return 0 if permission is granted.
- * @path_mkdir:
- * Check permissions to create a new directory in the existing directory
- * associated with path structure @path.
- * @dir contains the path structure of parent of the directory
- * to be created.
- * @dentry contains the dentry structure of new directory.
- * @mode contains the mode of new directory.
- * Return 0 if permission is granted.
- * @inode_rmdir:
- * Check the permission to remove a directory.
- * @dir contains the inode structure of parent of the directory
- * to be removed.
- * @dentry contains the dentry structure of directory to be removed.
- * Return 0 if permission is granted.
- * @path_rmdir:
- * Check the permission to remove a directory.
- * @dir contains the path structure of parent of the directory to be
- * removed.
- * @dentry contains the dentry structure of directory to be removed.
- * Return 0 if permission is granted.
- * @inode_mknod:
- * Check permissions when creating a special file (or a socket or a fifo
- * file created via the mknod system call). Note that if mknod operation
- * is being done for a regular file, then the create hook will be called
- * and not this hook.
- * @dir contains the inode structure of parent of the new file.
- * @dentry contains the dentry structure of the new file.
- * @mode contains the mode of the new file.
- * @dev contains the device number.
- * Return 0 if permission is granted.
- * @path_mknod:
- * Check permissions when creating a file. Note that this hook is called
- * even if mknod operation is being done for a regular file.
- * @dir contains the path structure of parent of the new file.
- * @dentry contains the dentry structure of the new file.
- * @mode contains the mode of the new file.
- * @dev contains the undecoded device number. Use new_decode_dev() to get
- * the decoded device number.
- * Return 0 if permission is granted.
- * @inode_rename:
- * Check for permission to rename a file or directory.
- * @old_dir contains the inode structure for parent of the old link.
- * @old_dentry contains the dentry structure of the old link.
- * @new_dir contains the inode structure for parent of the new link.
- * @new_dentry contains the dentry structure of the new link.
- * Return 0 if permission is granted.
- * @path_rename:
- * Check for permission to rename a file or directory.
- * @old_dir contains the path structure for parent of the old link.
- * @old_dentry contains the dentry structure of the old link.
- * @new_dir contains the path structure for parent of the new link.
- * @new_dentry contains the dentry structure of the new link.
- * Return 0 if permission is granted.
- * @path_chmod:
- * Check for permission to change DAC's permission of a file or directory.
- * @dentry contains the dentry structure.
- * @mnt contains the vfsmnt structure.
- * @mode contains DAC's mode.
- * Return 0 if permission is granted.
- * @path_chown:
- * Check for permission to change owner/group of a file or directory.
- * @path contains the path structure.
- * @uid contains new owner's ID.
- * @gid contains new group's ID.
- * Return 0 if permission is granted.
- * @path_chroot:
- * Check for permission to change root directory.
- * @path contains the path structure.
- * Return 0 if permission is granted.
- * @inode_readlink:
- * Check the permission to read the symbolic link.
- * @dentry contains the dentry structure for the file link.
- * Return 0 if permission is granted.
- * @inode_follow_link:
- * Check permission to follow a symbolic link when looking up a pathname.
- * @dentry contains the dentry structure for the link.
- * @inode contains the inode, which itself is not stable in RCU-walk
- * @rcu indicates whether we are in RCU-walk mode.
- * Return 0 if permission is granted.
- * @inode_permission:
- * Check permission before accessing an inode. This hook is called by the
- * existing Linux permission function, so a security module can use it to
- * provide additional checking for existing Linux permission checks.
- * Notice that this hook is called when a file is opened (as well as many
- * other operations), whereas the file_security_ops permission hook is
- * called when the actual read/write operations are performed.
- * @inode contains the inode structure to check.
- * @mask contains the permission mask.
- * Return 0 if permission is granted.
- * @inode_setattr:
- * Check permission before setting file attributes. Note that the kernel
- * call to notify_change is performed from several locations, whenever
- * file attributes change (such as when a file is truncated, chown/chmod
- * operations, transferring disk quotas, etc).
- * @dentry contains the dentry structure for the file.
- * @attr is the iattr structure containing the new file attributes.
- * Return 0 if permission is granted.
- * @path_truncate:
- * Check permission before truncating a file.
- * @path contains the path structure for the file.
- * Return 0 if permission is granted.
- * @inode_getattr:
- * Check permission before obtaining file attributes.
- * @path contains the path structure for the file.
- * Return 0 if permission is granted.
- * @inode_setxattr:
- * Check permission before setting the extended attributes
- * @value identified by @name for @dentry.
- * Return 0 if permission is granted.
- * @inode_post_setxattr:
- * Update inode security field after successful setxattr operation.
- * @value identified by @name for @dentry.
- * @inode_getxattr:
- * Check permission before obtaining the extended attributes
- * identified by @name for @dentry.
- * Return 0 if permission is granted.
- * @inode_listxattr:
- * Check permission before obtaining the list of extended attribute
- * names for @dentry.
- * Return 0 if permission is granted.
- * @inode_removexattr:
- * Check permission before removing the extended attribute
- * identified by @name for @dentry.
- * Return 0 if permission is granted.
- * @inode_getsecurity:
- * Retrieve a copy of the extended attribute representation of the
- * security label associated with @name for @inode via @buffer. Note that
- * @name is the remainder of the attribute name after the security prefix
- * has been removed. @alloc is used to specify of the call should return a
- * value via the buffer or just the value length Return size of buffer on
- * success.
- * @inode_setsecurity:
- * Set the security label associated with @name for @inode from the
- * extended attribute value @value. @size indicates the size of the
- * @value in bytes. @flags may be XATTR_CREATE, XATTR_REPLACE, or 0.
- * Note that @name is the remainder of the attribute name after the
- * security. prefix has been removed.
- * Return 0 on success.
- * @inode_listsecurity:
- * Copy the extended attribute names for the security labels
- * associated with @inode into @buffer. The maximum size of @buffer
- * is specified by @buffer_size. @buffer may be NULL to request
- * the size of the buffer required.
- * Returns number of bytes used/required on success.
- * @inode_need_killpriv:
- * Called when an inode has been changed.
- * @dentry is the dentry being changed.
- * Return <0 on error to abort the inode change operation.
- * Return 0 if inode_killpriv does not need to be called.
- * Return >0 if inode_killpriv does need to be called.
- * @inode_killpriv:
- * The setuid bit is being removed. Remove similar security labels.
- * Called with the dentry->d_inode->i_mutex held.
- * @dentry is the dentry being changed.
- * Return 0 on success. If error is returned, then the operation
- * causing setuid bit removal is failed.
- * @inode_getsecid:
- * Get the secid associated with the node.
- * @inode contains a pointer to the inode.
- * @secid contains a pointer to the location where result will be saved.
- * In case of failure, @secid will be set to zero.
- * @inode_copy_up:
- * A file is about to be copied up from lower layer to upper layer of
- * overlay filesystem. Security module can prepare a set of new creds
- * and modify as need be and return new creds. Caller will switch to
- * new creds temporarily to create new file and release newly allocated
- * creds.
- * @src indicates the union dentry of file that is being copied up.
- * @new pointer to pointer to return newly allocated creds.
- * Returns 0 on success or a negative error code on error.
- * @inode_copy_up_xattr:
- * Filter the xattrs being copied up when a unioned file is copied
- * up from a lower layer to the union/overlay layer.
- * @name indicates the name of the xattr.
- * Returns 0 to accept the xattr, 1 to discard the xattr, -EOPNOTSUPP if
- * security module does not know about attribute or a negative error code
- * to abort the copy up. Note that the caller is responsible for reading
- * and writing the xattrs as this hook is merely a filter.
- *
- * Security hooks for file operations
- *
- * @file_permission:
- * Check file permissions before accessing an open file. This hook is
- * called by various operations that read or write files. A security
- * module can use this hook to perform additional checking on these
- * operations, e.g. to revalidate permissions on use to support privilege
- * bracketing or policy changes. Notice that this hook is used when the
- * actual read/write operations are performed, whereas the
- * inode_security_ops hook is called when a file is opened (as well as
- * many other operations).
- * Caveat: Although this hook can be used to revalidate permissions for
- * various system call operations that read or write files, it does not
- * address the revalidation of permissions for memory-mapped files.
- * Security modules must handle this separately if they need such
- * revalidation.
- * @file contains the file structure being accessed.
- * @mask contains the requested permissions.
- * Return 0 if permission is granted.
- * @file_alloc_security:
- * Allocate and attach a security structure to the file->f_security field.
- * The security field is initialized to NULL when the structure is first
- * created.
- * @file contains the file structure to secure.
- * Return 0 if the hook is successful and permission is granted.
- * @file_free_security:
- * Deallocate and free any security structures stored in file->f_security.
- * @file contains the file structure being modified.
- * @file_ioctl:
- * @file contains the file structure.
- * @cmd contains the operation to perform.
- * @arg contains the operational arguments.
- * Check permission for an ioctl operation on @file. Note that @arg
- * sometimes represents a user space pointer; in other cases, it may be a
- * simple integer value. When @arg represents a user space pointer, it
- * should never be used by the security module.
- * Return 0 if permission is granted.
- * @mmap_addr :
- * Check permissions for a mmap operation at @addr.
- * @addr contains virtual address that will be used for the operation.
- * Return 0 if permission is granted.
- * @mmap_file :
- * Check permissions for a mmap operation. The @file may be NULL, e.g.
- * if mapping anonymous memory.
- * @file contains the file structure for file to map (may be NULL).
- * @reqprot contains the protection requested by the application.
- * @prot contains the protection that will be applied by the kernel.
- * @flags contains the operational flags.
- * Return 0 if permission is granted.
- * @file_mprotect:
- * Check permissions before changing memory access permissions.
- * @vma contains the memory region to modify.
- * @reqprot contains the protection requested by the application.
- * @prot contains the protection that will be applied by the kernel.
- * Return 0 if permission is granted.
- * @file_lock:
- * Check permission before performing file locking operations.
- * Note: this hook mediates both flock and fcntl style locks.
- * @file contains the file structure.
- * @cmd contains the posix-translated lock operation to perform
- * (e.g. F_RDLCK, F_WRLCK).
- * Return 0 if permission is granted.
- * @file_fcntl:
- * Check permission before allowing the file operation specified by @cmd
- * from being performed on the file @file. Note that @arg sometimes
- * represents a user space pointer; in other cases, it may be a simple
- * integer value. When @arg represents a user space pointer, it should
- * never be used by the security module.
- * @file contains the file structure.
- * @cmd contains the operation to be performed.
- * @arg contains the operational arguments.
- * Return 0 if permission is granted.
- * @file_set_fowner:
- * Save owner security information (typically from current->security) in
- * file->f_security for later use by the send_sigiotask hook.
- * @file contains the file structure to update.
- * Return 0 on success.
- * @file_send_sigiotask:
- * Check permission for the file owner @fown to send SIGIO or SIGURG to the
- * process @tsk. Note that this hook is sometimes called from interrupt.
- * Note that the fown_struct, @fown, is never outside the context of a
- * struct file, so the file structure (and associated security information)
- * can always be obtained: container_of(fown, struct file, f_owner)
- * @tsk contains the structure of task receiving signal.
- * @fown contains the file owner information.
- * @sig is the signal that will be sent. When 0, kernel sends SIGIO.
- * Return 0 if permission is granted.
- * @file_receive:
- * This hook allows security modules to control the ability of a process
- * to receive an open file descriptor via socket IPC.
- * @file contains the file structure being received.
- * Return 0 if permission is granted.
- * @file_open:
- * Save open-time permission checking state for later use upon
- * file_permission, and recheck access if anything has changed
- * since inode_permission.
- *
- * Security hooks for task operations.
- *
- * @task_create:
- * Check permission before creating a child process. See the clone(2)
- * manual page for definitions of the @clone_flags.
- * @clone_flags contains the flags indicating what should be shared.
- * Return 0 if permission is granted.
- * @task_alloc:
- * @task task being allocated.
- * @clone_flags contains the flags indicating what should be shared.
- * Handle allocation of task-related resources.
- * Returns a zero on success, negative values on failure.
- * @task_free:
- * @task task about to be freed.
- * Handle release of task-related resources. (Note that this can be called
- * from interrupt context.)
- * @cred_alloc_blank:
- * @cred points to the credentials.
- * @gfp indicates the atomicity of any memory allocations.
- * Only allocate sufficient memory and attach to @cred such that
- * cred_transfer() will not get ENOMEM.
- * @cred_free:
- * @cred points to the credentials.
- * Deallocate and clear the cred->security field in a set of credentials.
- * @cred_prepare:
- * @new points to the new credentials.
- * @old points to the original credentials.
- * @gfp indicates the atomicity of any memory allocations.
- * Prepare a new set of credentials by copying the data from the old set.
- * @cred_transfer:
- * @new points to the new credentials.
- * @old points to the original credentials.
- * Transfer data from original creds to new creds
- * @kernel_act_as:
- * Set the credentials for a kernel service to act as (subjective context).
- * @new points to the credentials to be modified.
- * @secid specifies the security ID to be set
- * The current task must be the one that nominated @secid.
- * Return 0 if successful.
- * @kernel_create_files_as:
- * Set the file creation context in a set of credentials to be the same as
- * the objective context of the specified inode.
- * @new points to the credentials to be modified.
- * @inode points to the inode to use as a reference.
- * The current task must be the one that nominated @inode.
- * Return 0 if successful.
- * @kernel_module_request:
- * Ability to trigger the kernel to automatically upcall to userspace for
- * userspace to load a kernel module with the given name.
- * @kmod_name name of the module requested by the kernel
- * Return 0 if successful.
- * @kernel_read_file:
- * Read a file specified by userspace.
- * @file contains the file structure pointing to the file being read
- * by the kernel.
- * @id kernel read file identifier
- * Return 0 if permission is granted.
- * @kernel_post_read_file:
- * Read a file specified by userspace.
- * @file contains the file structure pointing to the file being read
- * by the kernel.
- * @buf pointer to buffer containing the file contents.
- * @size length of the file contents.
- * @id kernel read file identifier
- * Return 0 if permission is granted.
- * @task_fix_setuid:
- * Update the module's state after setting one or more of the user
- * identity attributes of the current process. The @flags parameter
- * indicates which of the set*uid system calls invoked this hook. If
- * @new is the set of credentials that will be installed. Modifications
- * should be made to this rather than to @current->cred.
- * @old is the set of credentials that are being replaces
- * @flags contains one of the LSM_SETID_* values.
- * Return 0 on success.
- * @task_setpgid:
- * Check permission before setting the process group identifier of the
- * process @p to @pgid.
- * @p contains the task_struct for process being modified.
- * @pgid contains the new pgid.
- * Return 0 if permission is granted.
- * @task_getpgid:
- * Check permission before getting the process group identifier of the
- * process @p.
- * @p contains the task_struct for the process.
- * Return 0 if permission is granted.
- * @task_getsid:
- * Check permission before getting the session identifier of the process
- * @p.
- * @p contains the task_struct for the process.
- * Return 0 if permission is granted.
- * @task_getsecid:
- * Retrieve the security identifier of the process @p.
- * @p contains the task_struct for the process and place is into @secid.
- * In case of failure, @secid will be set to zero.
- *
- * @task_setnice:
- * Check permission before setting the nice value of @p to @nice.
- * @p contains the task_struct of process.
- * @nice contains the new nice value.
- * Return 0 if permission is granted.
- * @task_setioprio
- * Check permission before setting the ioprio value of @p to @ioprio.
- * @p contains the task_struct of process.
- * @ioprio contains the new ioprio value
- * Return 0 if permission is granted.
- * @task_getioprio
- * Check permission before getting the ioprio value of @p.
- * @p contains the task_struct of process.
- * Return 0 if permission is granted.
- * @task_prlimit:
- * Check permission before getting and/or setting the resource limits of
- * another task.
- * @cred points to the cred structure for the current task.
- * @tcred points to the cred structure for the target task.
- * @flags contains the LSM_PRLIMIT_* flag bits indicating whether the
- * resource limits are being read, modified, or both.
- * Return 0 if permission is granted.
- * @task_setrlimit:
- * Check permission before setting the resource limits of process @p
- * for @resource to @new_rlim. The old resource limit values can
- * be examined by dereferencing (p->signal->rlim + resource).
- * @p points to the task_struct for the target task's group leader.
- * @resource contains the resource whose limit is being set.
- * @new_rlim contains the new limits for @resource.
- * Return 0 if permission is granted.
- * @task_setscheduler:
- * Check permission before setting scheduling policy and/or parameters of
- * process @p based on @policy and @lp.
- * @p contains the task_struct for process.
- * @policy contains the scheduling policy.
- * @lp contains the scheduling parameters.
- * Return 0 if permission is granted.
- * @task_getscheduler:
- * Check permission before obtaining scheduling information for process
- * @p.
- * @p contains the task_struct for process.
- * Return 0 if permission is granted.
- * @task_movememory
- * Check permission before moving memory owned by process @p.
- * @p contains the task_struct for process.
- * Return 0 if permission is granted.
- * @task_kill:
- * Check permission before sending signal @sig to @p. @info can be NULL,
- * the constant 1, or a pointer to a siginfo structure. If @info is 1 or
- * SI_FROMKERNEL(info) is true, then the signal should be viewed as coming
- * from the kernel and should typically be permitted.
- * SIGIO signals are handled separately by the send_sigiotask hook in
- * file_security_ops.
- * @p contains the task_struct for process.
- * @info contains the signal information.
- * @sig contains the signal value.
- * @secid contains the sid of the process where the signal originated
- * Return 0 if permission is granted.
- * @task_prctl:
- * Check permission before performing a process control operation on the
- * current process.
- * @option contains the operation.
- * @arg2 contains a argument.
- * @arg3 contains a argument.
- * @arg4 contains a argument.
- * @arg5 contains a argument.
- * Return -ENOSYS if no-one wanted to handle this op, any other value to
- * cause prctl() to return immediately with that value.
- * @task_to_inode:
- * Set the security attributes for an inode based on an associated task's
- * security attributes, e.g. for /proc/pid inodes.
- * @p contains the task_struct for the task.
- * @inode contains the inode structure for the inode.
- *
- * Security hooks for Netlink messaging.
- *
- * @netlink_send:
- * Save security information for a netlink message so that permission
- * checking can be performed when the message is processed. The security
- * information can be saved using the eff_cap field of the
- * netlink_skb_parms structure. Also may be used to provide fine
- * grained control over message transmission.
- * @sk associated sock of task sending the message.
- * @skb contains the sk_buff structure for the netlink message.
- * Return 0 if the information was successfully saved and message
- * is allowed to be transmitted.
- *
- * Security hooks for Unix domain networking.
- *
- * @unix_stream_connect:
- * Check permissions before establishing a Unix domain stream connection
- * between @sock and @other.
- * @sock contains the sock structure.
- * @other contains the peer sock structure.
- * @newsk contains the new sock structure.
- * Return 0 if permission is granted.
- * @unix_may_send:
- * Check permissions before connecting or sending datagrams from @sock to
- * @other.
- * @sock contains the socket structure.
- * @other contains the peer socket structure.
- * Return 0 if permission is granted.
- *
- * The @unix_stream_connect and @unix_may_send hooks were necessary because
- * Linux provides an alternative to the conventional file name space for Unix
- * domain sockets. Whereas binding and connecting to sockets in the file name
- * space is mediated by the typical file permissions (and caught by the mknod
- * and permission hooks in inode_security_ops), binding and connecting to
- * sockets in the abstract name space is completely unmediated. Sufficient
- * control of Unix domain sockets in the abstract name space isn't possible
- * using only the socket layer hooks, since we need to know the actual target
- * socket, which is not looked up until we are inside the af_unix code.
- *
- * Security hooks for socket operations.
- *
- * @socket_create:
- * Check permissions prior to creating a new socket.
- * @family contains the requested protocol family.
- * @type contains the requested communications type.
- * @protocol contains the requested protocol.
- * @kern set to 1 if a kernel socket.
- * Return 0 if permission is granted.
- * @socket_post_create:
- * This hook allows a module to update or allocate a per-socket security
- * structure. Note that the security field was not added directly to the
- * socket structure, but rather, the socket security information is stored
- * in the associated inode. Typically, the inode alloc_security hook will
- * allocate and and attach security information to
- * sock->inode->i_security. This hook may be used to update the
- * sock->inode->i_security field with additional information that wasn't
- * available when the inode was allocated.
- * @sock contains the newly created socket structure.
- * @family contains the requested protocol family.
- * @type contains the requested communications type.
- * @protocol contains the requested protocol.
- * @kern set to 1 if a kernel socket.
- * @socket_bind:
- * Check permission before socket protocol layer bind operation is
- * performed and the socket @sock is bound to the address specified in the
- * @address parameter.
- * @sock contains the socket structure.
- * @address contains the address to bind to.
- * @addrlen contains the length of address.
- * Return 0 if permission is granted.
- * @socket_connect:
- * Check permission before socket protocol layer connect operation
- * attempts to connect socket @sock to a remote address, @address.
- * @sock contains the socket structure.
- * @address contains the address of remote endpoint.
- * @addrlen contains the length of address.
- * Return 0 if permission is granted.
- * @socket_listen:
- * Check permission before socket protocol layer listen operation.
- * @sock contains the socket structure.
- * @backlog contains the maximum length for the pending connection queue.
- * Return 0 if permission is granted.
- * @socket_accept:
- * Check permission before accepting a new connection. Note that the new
- * socket, @newsock, has been created and some information copied to it,
- * but the accept operation has not actually been performed.
- * @sock contains the listening socket structure.
- * @newsock contains the newly created server socket for connection.
- * Return 0 if permission is granted.
- * @socket_sendmsg:
- * Check permission before transmitting a message to another socket.
- * @sock contains the socket structure.
- * @msg contains the message to be transmitted.
- * @size contains the size of message.
- * Return 0 if permission is granted.
- * @socket_recvmsg:
- * Check permission before receiving a message from a socket.
- * @sock contains the socket structure.
- * @msg contains the message structure.
- * @size contains the size of message structure.
- * @flags contains the operational flags.
- * Return 0 if permission is granted.
- * @socket_getsockname:
- * Check permission before the local address (name) of the socket object
- * @sock is retrieved.
- * @sock contains the socket structure.
- * Return 0 if permission is granted.
- * @socket_getpeername:
- * Check permission before the remote address (name) of a socket object
- * @sock is retrieved.
- * @sock contains the socket structure.
- * Return 0 if permission is granted.
- * @socket_getsockopt:
- * Check permissions before retrieving the options associated with socket
- * @sock.
- * @sock contains the socket structure.
- * @level contains the protocol level to retrieve option from.
- * @optname contains the name of option to retrieve.
- * Return 0 if permission is granted.
- * @socket_setsockopt:
- * Check permissions before setting the options associated with socket
- * @sock.
- * @sock contains the socket structure.
- * @level contains the protocol level to set options for.
- * @optname contains the name of the option to set.
- * Return 0 if permission is granted.
- * @socket_shutdown:
- * Checks permission before all or part of a connection on the socket
- * @sock is shut down.
- * @sock contains the socket structure.
- * @how contains the flag indicating how future sends and receives
- * are handled.
- * Return 0 if permission is granted.
- * @socket_sock_rcv_skb:
- * Check permissions on incoming network packets. This hook is distinct
- * from Netfilter's IP input hooks since it is the first time that the
- * incoming sk_buff @skb has been associated with a particular socket, @sk.
- * Must not sleep inside this hook because some callers hold spinlocks.
- * @sk contains the sock (not socket) associated with the incoming sk_buff.
- * @skb contains the incoming network data.
- * @socket_getpeersec_stream:
- * This hook allows the security module to provide peer socket security
- * state for unix or connected tcp sockets to userspace via getsockopt
- * SO_GETPEERSEC. For tcp sockets this can be meaningful if the
- * socket is associated with an ipsec SA.
- * @sock is the local socket.
- * @optval userspace memory where the security state is to be copied.
- * @optlen userspace int where the module should copy the actual length
- * of the security state.
- * @len as input is the maximum length to copy to userspace provided
- * by the caller.
- * Return 0 if all is well, otherwise, typical getsockopt return
- * values.
- * @socket_getpeersec_dgram:
- * This hook allows the security module to provide peer socket security
- * state for udp sockets on a per-packet basis to userspace via
- * getsockopt SO_GETPEERSEC. The application must first have indicated
- * the IP_PASSSEC option via getsockopt. It can then retrieve the
- * security state returned by this hook for a packet via the SCM_SECURITY
- * ancillary message type.
- * @skb is the skbuff for the packet being queried
- * @secdata is a pointer to a buffer in which to copy the security data
- * @seclen is the maximum length for @secdata
- * Return 0 on success, error on failure.
- * @sk_alloc_security:
- * Allocate and attach a security structure to the sk->sk_security field,
- * which is used to copy security attributes between local stream sockets.
- * @sk_free_security:
- * Deallocate security structure.
- * @sk_clone_security:
- * Clone/copy security structure.
- * @sk_getsecid:
- * Retrieve the LSM-specific secid for the sock to enable caching
- * of network authorizations.
- * @sock_graft:
- * Sets the socket's isec sid to the sock's sid.
- * @inet_conn_request:
- * Sets the openreq's sid to socket's sid with MLS portion taken
- * from peer sid.
- * @inet_csk_clone:
- * Sets the new child socket's sid to the openreq sid.
- * @inet_conn_established:
- * Sets the connection's peersid to the secmark on skb.
- * @secmark_relabel_packet:
- * check if the process should be allowed to relabel packets to
- * the given secid
- * @security_secmark_refcount_inc
- * tells the LSM to increment the number of secmark labeling rules loaded
- * @security_secmark_refcount_dec
- * tells the LSM to decrement the number of secmark labeling rules loaded
- * @req_classify_flow:
- * Sets the flow's sid to the openreq sid.
- * @tun_dev_alloc_security:
- * This hook allows a module to allocate a security structure for a TUN
- * device.
- * @security pointer to a security structure pointer.
- * Returns a zero on success, negative values on failure.
- * @tun_dev_free_security:
- * This hook allows a module to free the security structure for a TUN
- * device.
- * @security pointer to the TUN device's security structure
- * @tun_dev_create:
- * Check permissions prior to creating a new TUN device.
- * @tun_dev_attach_queue:
- * Check permissions prior to attaching to a TUN device queue.
- * @security pointer to the TUN device's security structure.
- * @tun_dev_attach:
- * This hook can be used by the module to update any security state
- * associated with the TUN device's sock structure.
- * @sk contains the existing sock structure.
- * @security pointer to the TUN device's security structure.
- * @tun_dev_open:
- * This hook can be used by the module to update any security state
- * associated with the TUN device's security structure.
- * @security pointer to the TUN devices's security structure.
- *
- * Security hooks for Infiniband
- *
- * @ib_pkey_access:
- * Check permission to access a pkey when modifing a QP.
- * @subnet_prefix the subnet prefix of the port being used.
- * @pkey the pkey to be accessed.
- * @sec pointer to a security structure.
- * @ib_endport_manage_subnet:
- * Check permissions to send and receive SMPs on a end port.
- * @dev_name the IB device name (i.e. mlx4_0).
- * @port_num the port number.
- * @sec pointer to a security structure.
- * @ib_alloc_security:
- * Allocate a security structure for Infiniband objects.
- * @sec pointer to a security structure pointer.
- * Returns 0 on success, non-zero on failure
- * @ib_free_security:
- * Deallocate an Infiniband security structure.
- * @sec contains the security structure to be freed.
- *
- * Security hooks for XFRM operations.
- *
- * @xfrm_policy_alloc_security:
- * @ctxp is a pointer to the xfrm_sec_ctx being added to Security Policy
- * Database used by the XFRM system.
- * @sec_ctx contains the security context information being provided by
- * the user-level policy update program (e.g., setkey).
- * Allocate a security structure to the xp->security field; the security
- * field is initialized to NULL when the xfrm_policy is allocated.
- * Return 0 if operation was successful (memory to allocate, legal context)
- * @gfp is to specify the context for the allocation
- * @xfrm_policy_clone_security:
- * @old_ctx contains an existing xfrm_sec_ctx.
- * @new_ctxp contains a new xfrm_sec_ctx being cloned from old.
- * Allocate a security structure in new_ctxp that contains the
- * information from the old_ctx structure.
- * Return 0 if operation was successful (memory to allocate).
- * @xfrm_policy_free_security:
- * @ctx contains the xfrm_sec_ctx
- * Deallocate xp->security.
- * @xfrm_policy_delete_security:
- * @ctx contains the xfrm_sec_ctx.
- * Authorize deletion of xp->security.
- * @xfrm_state_alloc:
- * @x contains the xfrm_state being added to the Security Association
- * Database by the XFRM system.
- * @sec_ctx contains the security context information being provided by
- * the user-level SA generation program (e.g., setkey or racoon).
- * Allocate a security structure to the x->security field; the security
- * field is initialized to NULL when the xfrm_state is allocated. Set the
- * context to correspond to sec_ctx. Return 0 if operation was successful
- * (memory to allocate, legal context).
- * @xfrm_state_alloc_acquire:
- * @x contains the xfrm_state being added to the Security Association
- * Database by the XFRM system.
- * @polsec contains the policy's security context.
- * @secid contains the secid from which to take the mls portion of the
- * context.
- * Allocate a security structure to the x->security field; the security
- * field is initialized to NULL when the xfrm_state is allocated. Set the
- * context to correspond to secid. Return 0 if operation was successful
- * (memory to allocate, legal context).
- * @xfrm_state_free_security:
- * @x contains the xfrm_state.
- * Deallocate x->security.
- * @xfrm_state_delete_security:
- * @x contains the xfrm_state.
- * Authorize deletion of x->security.
- * @xfrm_policy_lookup:
- * @ctx contains the xfrm_sec_ctx for which the access control is being
- * checked.
- * @fl_secid contains the flow security label that is used to authorize
- * access to the policy xp.
- * @dir contains the direction of the flow (input or output).
- * Check permission when a flow selects a xfrm_policy for processing
- * XFRMs on a packet. The hook is called when selecting either a
- * per-socket policy or a generic xfrm policy.
- * Return 0 if permission is granted, -ESRCH otherwise, or -errno
- * on other errors.
- * @xfrm_state_pol_flow_match:
- * @x contains the state to match.
- * @xp contains the policy to check for a match.
- * @fl contains the flow to check for a match.
- * Return 1 if there is a match.
- * @xfrm_decode_session:
- * @skb points to skb to decode.
- * @secid points to the flow key secid to set.
- * @ckall says if all xfrms used should be checked for same secid.
- * Return 0 if ckall is zero or all xfrms used have the same secid.
- *
- * Security hooks affecting all Key Management operations
- *
- * @key_alloc:
- * Permit allocation of a key and assign security data. Note that key does
- * not have a serial number assigned at this point.
- * @key points to the key.
- * @flags is the allocation flags
- * Return 0 if permission is granted, -ve error otherwise.
- * @key_free:
- * Notification of destruction; free security data.
- * @key points to the key.
- * No return value.
- * @key_permission:
- * See whether a specific operational right is granted to a process on a
- * key.
- * @key_ref refers to the key (key pointer + possession attribute bit).
- * @cred points to the credentials to provide the context against which to
- * evaluate the security data on the key.
- * @perm describes the combination of permissions required of this key.
- * Return 0 if permission is granted, -ve error otherwise.
- * @key_getsecurity:
- * Get a textual representation of the security context attached to a key
- * for the purposes of honouring KEYCTL_GETSECURITY. This function
- * allocates the storage for the NUL-terminated string and the caller
- * should free it.
- * @key points to the key to be queried.
- * @_buffer points to a pointer that should be set to point to the
- * resulting string (if no label or an error occurs).
- * Return the length of the string (including terminating NUL) or -ve if
- * an error.
- * May also return 0 (and a NULL buffer pointer) if there is no label.
- *
- * Security hooks affecting all System V IPC operations.
- *
- * @ipc_permission:
- * Check permissions for access to IPC
- * @ipcp contains the kernel IPC permission structure
- * @flag contains the desired (requested) permission set
- * Return 0 if permission is granted.
- * @ipc_getsecid:
- * Get the secid associated with the ipc object.
- * @ipcp contains the kernel IPC permission structure.
- * @secid contains a pointer to the location where result will be saved.
- * In case of failure, @secid will be set to zero.
- *
- * Security hooks for individual messages held in System V IPC message queues
- * @msg_msg_alloc_security:
- * Allocate and attach a security structure to the msg->security field.
- * The security field is initialized to NULL when the structure is first
- * created.
- * @msg contains the message structure to be modified.
- * Return 0 if operation was successful and permission is granted.
- * @msg_msg_free_security:
- * Deallocate the security structure for this message.
- * @msg contains the message structure to be modified.
- *
- * Security hooks for System V IPC Message Queues
- *
- * @msg_queue_alloc_security:
- * Allocate and attach a security structure to the
- * msq->q_perm.security field. The security field is initialized to
- * NULL when the structure is first created.
- * @msq contains the message queue structure to be modified.
- * Return 0 if operation was successful and permission is granted.
- * @msg_queue_free_security:
- * Deallocate security structure for this message queue.
- * @msq contains the message queue structure to be modified.
- * @msg_queue_associate:
- * Check permission when a message queue is requested through the
- * msgget system call. This hook is only called when returning the
- * message queue identifier for an existing message queue, not when a
- * new message queue is created.
- * @msq contains the message queue to act upon.
- * @msqflg contains the operation control flags.
- * Return 0 if permission is granted.
- * @msg_queue_msgctl:
- * Check permission when a message control operation specified by @cmd
- * is to be performed on the message queue @msq.
- * The @msq may be NULL, e.g. for IPC_INFO or MSG_INFO.
- * @msq contains the message queue to act upon. May be NULL.
- * @cmd contains the operation to be performed.
- * Return 0 if permission is granted.
- * @msg_queue_msgsnd:
- * Check permission before a message, @msg, is enqueued on the message
- * queue, @msq.
- * @msq contains the message queue to send message to.
- * @msg contains the message to be enqueued.
- * @msqflg contains operational flags.
- * Return 0 if permission is granted.
- * @msg_queue_msgrcv:
- * Check permission before a message, @msg, is removed from the message
- * queue, @msq. The @target task structure contains a pointer to the
- * process that will be receiving the message (not equal to the current
- * process when inline receives are being performed).
- * @msq contains the message queue to retrieve message from.
- * @msg contains the message destination.
- * @target contains the task structure for recipient process.
- * @type contains the type of message requested.
- * @mode contains the operational flags.
- * Return 0 if permission is granted.
- *
- * Security hooks for System V Shared Memory Segments
- *
- * @shm_alloc_security:
- * Allocate and attach a security structure to the shp->shm_perm.security
- * field. The security field is initialized to NULL when the structure is
- * first created.
- * @shp contains the shared memory structure to be modified.
- * Return 0 if operation was successful and permission is granted.
- * @shm_free_security:
- * Deallocate the security struct for this memory segment.
- * @shp contains the shared memory structure to be modified.
- * @shm_associate:
- * Check permission when a shared memory region is requested through the
- * shmget system call. This hook is only called when returning the shared
- * memory region identifier for an existing region, not when a new shared
- * memory region is created.
- * @shp contains the shared memory structure to be modified.
- * @shmflg contains the operation control flags.
- * Return 0 if permission is granted.
- * @shm_shmctl:
- * Check permission when a shared memory control operation specified by
- * @cmd is to be performed on the shared memory region @shp.
- * The @shp may be NULL, e.g. for IPC_INFO or SHM_INFO.
- * @shp contains shared memory structure to be modified.
- * @cmd contains the operation to be performed.
- * Return 0 if permission is granted.
- * @shm_shmat:
- * Check permissions prior to allowing the shmat system call to attach the
- * shared memory segment @shp to the data segment of the calling process.
- * The attaching address is specified by @shmaddr.
- * @shp contains the shared memory structure to be modified.
- * @shmaddr contains the address to attach memory region to.
- * @shmflg contains the operational flags.
- * Return 0 if permission is granted.
- *
- * Security hooks for System V Semaphores
- *
- * @sem_alloc_security:
- * Allocate and attach a security structure to the sma->sem_perm.security
- * field. The security field is initialized to NULL when the structure is
- * first created.
- * @sma contains the semaphore structure
- * Return 0 if operation was successful and permission is granted.
- * @sem_free_security:
- * deallocate security struct for this semaphore
- * @sma contains the semaphore structure.
- * @sem_associate:
- * Check permission when a semaphore is requested through the semget
- * system call. This hook is only called when returning the semaphore
- * identifier for an existing semaphore, not when a new one must be
- * created.
- * @sma contains the semaphore structure.
- * @semflg contains the operation control flags.
- * Return 0 if permission is granted.
- * @sem_semctl:
- * Check permission when a semaphore operation specified by @cmd is to be
- * performed on the semaphore @sma. The @sma may be NULL, e.g. for
- * IPC_INFO or SEM_INFO.
- * @sma contains the semaphore structure. May be NULL.
- * @cmd contains the operation to be performed.
- * Return 0 if permission is granted.
- * @sem_semop:
- * Check permissions before performing operations on members of the
- * semaphore set @sma. If the @alter flag is nonzero, the semaphore set
- * may be modified.
- * @sma contains the semaphore structure.
- * @sops contains the operations to perform.
- * @nsops contains the number of operations to perform.
- * @alter contains the flag indicating whether changes are to be made.
- * Return 0 if permission is granted.
- *
- * @binder_set_context_mgr:
- * Check whether @mgr is allowed to be the binder context manager.
- * @mgr contains the task_struct for the task being registered.
- * Return 0 if permission is granted.
- * @binder_transaction:
- * Check whether @from is allowed to invoke a binder transaction call
- * to @to.
- * @from contains the task_struct for the sending task.
- * @to contains the task_struct for the receiving task.
- * @binder_transfer_binder:
- * Check whether @from is allowed to transfer a binder reference to @to.
- * @from contains the task_struct for the sending task.
- * @to contains the task_struct for the receiving task.
- * @binder_transfer_file:
- * Check whether @from is allowed to transfer @file to @to.
- * @from contains the task_struct for the sending task.
- * @file contains the struct file being transferred.
- * @to contains the task_struct for the receiving task.
- *
- * @ptrace_access_check:
- * Check permission before allowing the current process to trace the
- * @child process.
- * Security modules may also want to perform a process tracing check
- * during an execve in the set_security or apply_creds hooks of
- * tracing check during an execve in the bprm_set_creds hook of
- * binprm_security_ops if the process is being traced and its security
- * attributes would be changed by the execve.
- * @child contains the task_struct structure for the target process.
- * @mode contains the PTRACE_MODE flags indicating the form of access.
- * Return 0 if permission is granted.
- * @ptrace_traceme:
- * Check that the @parent process has sufficient permission to trace the
- * current process before allowing the current process to present itself
- * to the @parent process for tracing.
- * @parent contains the task_struct structure for debugger process.
- * Return 0 if permission is granted.
- * @capget:
- * Get the @effective, @inheritable, and @permitted capability sets for
- * the @target process. The hook may also perform permission checking to
- * determine if the current process is allowed to see the capability sets
- * of the @target process.
- * @target contains the task_struct structure for target process.
- * @effective contains the effective capability set.
- * @inheritable contains the inheritable capability set.
- * @permitted contains the permitted capability set.
- * Return 0 if the capability sets were successfully obtained.
- * @capset:
- * Set the @effective, @inheritable, and @permitted capability sets for
- * the current process.
- * @new contains the new credentials structure for target process.
- * @old contains the current credentials structure for target process.
- * @effective contains the effective capability set.
- * @inheritable contains the inheritable capability set.
- * @permitted contains the permitted capability set.
- * Return 0 and update @new if permission is granted.
- * @capable:
- * Check whether the @tsk process has the @cap capability in the indicated
- * credentials.
- * @cred contains the credentials to use.
- * @ns contains the user namespace we want the capability in
- * @cap contains the capability <include/linux/capability.h>.
- * @audit contains whether to write an audit message or not
- * Return 0 if the capability is granted for @tsk.
- * @syslog:
- * Check permission before accessing the kernel message ring or changing
- * logging to the console.
- * See the syslog(2) manual page for an explanation of the @type values.
- * @type contains the type of action.
- * @from_file indicates the context of action (if it came from /proc).
- * Return 0 if permission is granted.
- * @settime:
- * Check permission to change the system time.
- * struct timespec64 is defined in include/linux/time64.h and timezone
- * is defined in include/linux/time.h
- * @ts contains new time
- * @tz contains new timezone
- * Return 0 if permission is granted.
- * @vm_enough_memory:
- * Check permissions for allocating a new virtual mapping.
- * @mm contains the mm struct it is being added to.
- * @pages contains the number of pages.
- * Return 0 if permission is granted.
- *
- * @ismaclabel:
- * Check if the extended attribute specified by @name
- * represents a MAC label. Returns 1 if name is a MAC
- * attribute otherwise returns 0.
- * @name full extended attribute name to check against
- * LSM as a MAC label.
- *
- * @secid_to_secctx:
- * Convert secid to security context. If secdata is NULL the length of
- * the result will be returned in seclen, but no secdata will be returned.
- * This does mean that the length could change between calls to check the
- * length and the next call which actually allocates and returns the
- * secdata.
- * @secid contains the security ID.
- * @secdata contains the pointer that stores the converted security
- * context.
- * @seclen pointer which contains the length of the data
- * @secctx_to_secid:
- * Convert security context to secid.
- * @secid contains the pointer to the generated security ID.
- * @secdata contains the security context.
- *
- * @release_secctx:
- * Release the security context.
- * @secdata contains the security context.
- * @seclen contains the length of the security context.
- *
- * Security hooks for Audit
- *
- * @audit_rule_init:
- * Allocate and initialize an LSM audit rule structure.
- * @field contains the required Audit action.
- * Fields flags are defined in include/linux/audit.h
- * @op contains the operator the rule uses.
- * @rulestr contains the context where the rule will be applied to.
- * @lsmrule contains a pointer to receive the result.
- * Return 0 if @lsmrule has been successfully set,
- * -EINVAL in case of an invalid rule.
- *
- * @audit_rule_known:
- * Specifies whether given @rule contains any fields related to
- * current LSM.
- * @rule contains the audit rule of interest.
- * Return 1 in case of relation found, 0 otherwise.
- *
- * @audit_rule_match:
- * Determine if given @secid matches a rule previously approved
- * by @audit_rule_known.
- * @secid contains the security id in question.
- * @field contains the field which relates to current LSM.
- * @op contains the operator that will be used for matching.
- * @rule points to the audit rule that will be checked against.
- * @actx points to the audit context associated with the check.
- * Return 1 if secid matches the rule, 0 if it does not, -ERRNO on failure.
- *
- * @audit_rule_free:
- * Deallocate the LSM audit rule structure previously allocated by
- * audit_rule_init.
- * @rule contains the allocated rule
- *
- * @inode_invalidate_secctx:
- * Notify the security module that it must revalidate the security context
- * of an inode.
- *
- * @inode_notifysecctx:
- * Notify the security module of what the security context of an inode
- * should be. Initializes the incore security context managed by the
- * security module for this inode. Example usage: NFS client invokes
- * this hook to initialize the security context in its incore inode to the
- * value provided by the server for the file when the server returned the
- * file's attributes to the client.
- *
- * Must be called with inode->i_mutex locked.
- *
- * @inode we wish to set the security context of.
- * @ctx contains the string which we wish to set in the inode.
- * @ctxlen contains the length of @ctx.
- *
- * @inode_setsecctx:
- * Change the security context of an inode. Updates the
- * incore security context managed by the security module and invokes the
- * fs code as needed (via __vfs_setxattr_noperm) to update any backing
- * xattrs that represent the context. Example usage: NFS server invokes
- * this hook to change the security context in its incore inode and on the
- * backing filesystem to a value provided by the client on a SETATTR
- * operation.
- *
- * Must be called with inode->i_mutex locked.
- *
- * @dentry contains the inode we wish to set the security context of.
- * @ctx contains the string which we wish to set in the inode.
- * @ctxlen contains the length of @ctx.
- *
- * @inode_getsecctx:
- * On success, returns 0 and fills out @ctx and @ctxlen with the security
- * context for the given @inode.
- *
- * @inode we wish to get the security context of.
- * @ctx is a pointer in which to place the allocated security context.
- * @ctxlen points to the place to put the length of @ctx.
- */
union security_list_options {
- int (*binder_set_context_mgr)(struct task_struct *mgr);
- int (*binder_transaction)(struct task_struct *from,
- struct task_struct *to);
- int (*binder_transfer_binder)(struct task_struct *from,
- struct task_struct *to);
- int (*binder_transfer_file)(struct task_struct *from,
- struct task_struct *to,
- struct file *file);
-
- int (*ptrace_access_check)(struct task_struct *child,
- unsigned int mode);
- int (*ptrace_traceme)(struct task_struct *parent);
- int (*capget)(struct task_struct *target, kernel_cap_t *effective,
- kernel_cap_t *inheritable, kernel_cap_t *permitted);
- int (*capset)(struct cred *new, const struct cred *old,
- const kernel_cap_t *effective,
- const kernel_cap_t *inheritable,
- const kernel_cap_t *permitted);
- int (*capable)(const struct cred *cred, struct user_namespace *ns,
- int cap, int audit);
- int (*quotactl)(int cmds, int type, int id, struct super_block *sb);
- int (*quota_on)(struct dentry *dentry);
- int (*syslog)(int type);
- int (*settime)(const struct timespec64 *ts, const struct timezone *tz);
- int (*vm_enough_memory)(struct mm_struct *mm, long pages);
-
- int (*bprm_set_creds)(struct linux_binprm *bprm);
- int (*bprm_check_security)(struct linux_binprm *bprm);
- int (*bprm_secureexec)(struct linux_binprm *bprm);
- void (*bprm_committing_creds)(struct linux_binprm *bprm);
- void (*bprm_committed_creds)(struct linux_binprm *bprm);
-
- int (*sb_alloc_security)(struct super_block *sb);
- void (*sb_free_security)(struct super_block *sb);
- int (*sb_copy_data)(char *orig, char *copy);
- int (*sb_remount)(struct super_block *sb, void *data);
- int (*sb_kern_mount)(struct super_block *sb, int flags, void *data);
- int (*sb_show_options)(struct seq_file *m, struct super_block *sb);
- int (*sb_statfs)(struct dentry *dentry);
- int (*sb_mount)(const char *dev_name, const struct path *path,
- const char *type, unsigned long flags, void *data);
- int (*sb_umount)(struct vfsmount *mnt, int flags);
- int (*sb_pivotroot)(const struct path *old_path, const struct path *new_path);
- int (*sb_set_mnt_opts)(struct super_block *sb,
- struct security_mnt_opts *opts,
- unsigned long kern_flags,
- unsigned long *set_kern_flags);
- int (*sb_clone_mnt_opts)(const struct super_block *oldsb,
- struct super_block *newsb,
- unsigned long kern_flags,
- unsigned long *set_kern_flags);
- int (*sb_parse_opts_str)(char *options, struct security_mnt_opts *opts);
- int (*dentry_init_security)(struct dentry *dentry, int mode,
- const struct qstr *name, void **ctx,
- u32 *ctxlen);
- int (*dentry_create_files_as)(struct dentry *dentry, int mode,
- struct qstr *name,
- const struct cred *old,
- struct cred *new);
-
-
-#ifdef CONFIG_SECURITY_PATH
- int (*path_unlink)(const struct path *dir, struct dentry *dentry);
- int (*path_mkdir)(const struct path *dir, struct dentry *dentry,
- umode_t mode);
- int (*path_rmdir)(const struct path *dir, struct dentry *dentry);
- int (*path_mknod)(const struct path *dir, struct dentry *dentry,
- umode_t mode, unsigned int dev);
- int (*path_truncate)(const struct path *path);
- int (*path_symlink)(const struct path *dir, struct dentry *dentry,
- const char *old_name);
- int (*path_link)(struct dentry *old_dentry, const struct path *new_dir,
- struct dentry *new_dentry);
- int (*path_rename)(const struct path *old_dir, struct dentry *old_dentry,
- const struct path *new_dir,
- struct dentry *new_dentry);
- int (*path_chmod)(const struct path *path, umode_t mode);
- int (*path_chown)(const struct path *path, kuid_t uid, kgid_t gid);
- int (*path_chroot)(const struct path *path);
-#endif
-
- int (*inode_alloc_security)(struct inode *inode);
- void (*inode_free_security)(struct inode *inode);
- int (*inode_init_security)(struct inode *inode, struct inode *dir,
- const struct qstr *qstr,
- const char **name, void **value,
- size_t *len);
- int (*inode_create)(struct inode *dir, struct dentry *dentry,
- umode_t mode);
- int (*inode_link)(struct dentry *old_dentry, struct inode *dir,
- struct dentry *new_dentry);
- int (*inode_unlink)(struct inode *dir, struct dentry *dentry);
- int (*inode_symlink)(struct inode *dir, struct dentry *dentry,
- const char *old_name);
- int (*inode_mkdir)(struct inode *dir, struct dentry *dentry,
- umode_t mode);
- int (*inode_rmdir)(struct inode *dir, struct dentry *dentry);
- int (*inode_mknod)(struct inode *dir, struct dentry *dentry,
- umode_t mode, dev_t dev);
- int (*inode_rename)(struct inode *old_dir, struct dentry *old_dentry,
- struct inode *new_dir,
- struct dentry *new_dentry);
- int (*inode_readlink)(struct dentry *dentry);
- int (*inode_follow_link)(struct dentry *dentry, struct inode *inode,
- bool rcu);
- int (*inode_permission)(struct inode *inode, int mask);
- int (*inode_setattr)(struct dentry *dentry, struct iattr *attr);
- int (*inode_getattr)(const struct path *path);
- int (*inode_setxattr)(struct dentry *dentry, const char *name,
- const void *value, size_t size, int flags);
- void (*inode_post_setxattr)(struct dentry *dentry, const char *name,
- const void *value, size_t size,
- int flags);
- int (*inode_getxattr)(struct dentry *dentry, const char *name);
- int (*inode_listxattr)(struct dentry *dentry);
- int (*inode_removexattr)(struct dentry *dentry, const char *name);
- int (*inode_need_killpriv)(struct dentry *dentry);
- int (*inode_killpriv)(struct dentry *dentry);
- int (*inode_getsecurity)(struct inode *inode, const char *name,
- void **buffer, bool alloc);
- int (*inode_setsecurity)(struct inode *inode, const char *name,
- const void *value, size_t size,
- int flags);
- int (*inode_listsecurity)(struct inode *inode, char *buffer,
- size_t buffer_size);
- void (*inode_getsecid)(struct inode *inode, u32 *secid);
- int (*inode_copy_up)(struct dentry *src, struct cred **new);
- int (*inode_copy_up_xattr)(const char *name);
-
- int (*file_permission)(struct file *file, int mask);
- int (*file_alloc_security)(struct file *file);
- void (*file_free_security)(struct file *file);
- int (*file_ioctl)(struct file *file, unsigned int cmd,
- unsigned long arg);
- int (*mmap_addr)(unsigned long addr);
- int (*mmap_file)(struct file *file, unsigned long reqprot,
- unsigned long prot, unsigned long flags);
- int (*file_mprotect)(struct vm_area_struct *vma, unsigned long reqprot,
- unsigned long prot);
- int (*file_lock)(struct file *file, unsigned int cmd);
- int (*file_fcntl)(struct file *file, unsigned int cmd,
- unsigned long arg);
- void (*file_set_fowner)(struct file *file);
- int (*file_send_sigiotask)(struct task_struct *tsk,
- struct fown_struct *fown, int sig);
- int (*file_receive)(struct file *file);
- int (*file_open)(struct file *file, const struct cred *cred);
-
- int (*task_create)(unsigned long clone_flags);
- int (*task_alloc)(struct task_struct *task, unsigned long clone_flags);
- void (*task_free)(struct task_struct *task);
- int (*cred_alloc_blank)(struct cred *cred, gfp_t gfp);
- void (*cred_free)(struct cred *cred);
- int (*cred_prepare)(struct cred *new, const struct cred *old,
- gfp_t gfp);
- void (*cred_transfer)(struct cred *new, const struct cred *old);
- int (*kernel_act_as)(struct cred *new, u32 secid);
- int (*kernel_create_files_as)(struct cred *new, struct inode *inode);
- int (*kernel_module_request)(char *kmod_name);
- int (*kernel_read_file)(struct file *file, enum kernel_read_file_id id);
- int (*kernel_post_read_file)(struct file *file, char *buf, loff_t size,
- enum kernel_read_file_id id);
- int (*task_fix_setuid)(struct cred *new, const struct cred *old,
- int flags);
- int (*task_setpgid)(struct task_struct *p, pid_t pgid);
- int (*task_getpgid)(struct task_struct *p);
- int (*task_getsid)(struct task_struct *p);
- void (*task_getsecid)(struct task_struct *p, u32 *secid);
- int (*task_setnice)(struct task_struct *p, int nice);
- int (*task_setioprio)(struct task_struct *p, int ioprio);
- int (*task_getioprio)(struct task_struct *p);
- int (*task_prlimit)(const struct cred *cred, const struct cred *tcred,
- unsigned int flags);
- int (*task_setrlimit)(struct task_struct *p, unsigned int resource,
- struct rlimit *new_rlim);
- int (*task_setscheduler)(struct task_struct *p);
- int (*task_getscheduler)(struct task_struct *p);
- int (*task_movememory)(struct task_struct *p);
- int (*task_kill)(struct task_struct *p, struct siginfo *info,
- int sig, u32 secid);
- int (*task_prctl)(int option, unsigned long arg2, unsigned long arg3,
- unsigned long arg4, unsigned long arg5);
- void (*task_to_inode)(struct task_struct *p, struct inode *inode);
-
- int (*ipc_permission)(struct kern_ipc_perm *ipcp, short flag);
- void (*ipc_getsecid)(struct kern_ipc_perm *ipcp, u32 *secid);
-
- int (*msg_msg_alloc_security)(struct msg_msg *msg);
- void (*msg_msg_free_security)(struct msg_msg *msg);
-
- int (*msg_queue_alloc_security)(struct msg_queue *msq);
- void (*msg_queue_free_security)(struct msg_queue *msq);
- int (*msg_queue_associate)(struct msg_queue *msq, int msqflg);
- int (*msg_queue_msgctl)(struct msg_queue *msq, int cmd);
- int (*msg_queue_msgsnd)(struct msg_queue *msq, struct msg_msg *msg,
- int msqflg);
- int (*msg_queue_msgrcv)(struct msg_queue *msq, struct msg_msg *msg,
- struct task_struct *target, long type,
- int mode);
-
- int (*shm_alloc_security)(struct shmid_kernel *shp);
- void (*shm_free_security)(struct shmid_kernel *shp);
- int (*shm_associate)(struct shmid_kernel *shp, int shmflg);
- int (*shm_shmctl)(struct shmid_kernel *shp, int cmd);
- int (*shm_shmat)(struct shmid_kernel *shp, char __user *shmaddr,
- int shmflg);
-
- int (*sem_alloc_security)(struct sem_array *sma);
- void (*sem_free_security)(struct sem_array *sma);
- int (*sem_associate)(struct sem_array *sma, int semflg);
- int (*sem_semctl)(struct sem_array *sma, int cmd);
- int (*sem_semop)(struct sem_array *sma, struct sembuf *sops,
- unsigned nsops, int alter);
-
- int (*netlink_send)(struct sock *sk, struct sk_buff *skb);
-
- void (*d_instantiate)(struct dentry *dentry, struct inode *inode);
-
- int (*getprocattr)(struct task_struct *p, char *name, char **value);
- int (*setprocattr)(const char *name, void *value, size_t size);
- int (*ismaclabel)(const char *name);
- int (*secid_to_secctx)(u32 secid, char **secdata, u32 *seclen);
- int (*secctx_to_secid)(const char *secdata, u32 seclen, u32 *secid);
- void (*release_secctx)(char *secdata, u32 seclen);
-
- void (*inode_invalidate_secctx)(struct inode *inode);
- int (*inode_notifysecctx)(struct inode *inode, void *ctx, u32 ctxlen);
- int (*inode_setsecctx)(struct dentry *dentry, void *ctx, u32 ctxlen);
- int (*inode_getsecctx)(struct inode *inode, void **ctx, u32 *ctxlen);
-
-#ifdef CONFIG_SECURITY_NETWORK
- int (*unix_stream_connect)(struct sock *sock, struct sock *other,
- struct sock *newsk);
- int (*unix_may_send)(struct socket *sock, struct socket *other);
-
- int (*socket_create)(int family, int type, int protocol, int kern);
- int (*socket_post_create)(struct socket *sock, int family, int type,
- int protocol, int kern);
- int (*socket_bind)(struct socket *sock, struct sockaddr *address,
- int addrlen);
- int (*socket_connect)(struct socket *sock, struct sockaddr *address,
- int addrlen);
- int (*socket_listen)(struct socket *sock, int backlog);
- int (*socket_accept)(struct socket *sock, struct socket *newsock);
- int (*socket_sendmsg)(struct socket *sock, struct msghdr *msg,
- int size);
- int (*socket_recvmsg)(struct socket *sock, struct msghdr *msg,
- int size, int flags);
- int (*socket_getsockname)(struct socket *sock);
- int (*socket_getpeername)(struct socket *sock);
- int (*socket_getsockopt)(struct socket *sock, int level, int optname);
- int (*socket_setsockopt)(struct socket *sock, int level, int optname);
- int (*socket_shutdown)(struct socket *sock, int how);
- int (*socket_sock_rcv_skb)(struct sock *sk, struct sk_buff *skb);
- int (*socket_getpeersec_stream)(struct socket *sock,
- char __user *optval,
- int __user *optlen, unsigned len);
- int (*socket_getpeersec_dgram)(struct socket *sock,
- struct sk_buff *skb, u32 *secid);
- int (*sk_alloc_security)(struct sock *sk, int family, gfp_t priority);
- void (*sk_free_security)(struct sock *sk);
- void (*sk_clone_security)(const struct sock *sk, struct sock *newsk);
- void (*sk_getsecid)(struct sock *sk, u32 *secid);
- void (*sock_graft)(struct sock *sk, struct socket *parent);
- int (*inet_conn_request)(struct sock *sk, struct sk_buff *skb,
- struct request_sock *req);
- void (*inet_csk_clone)(struct sock *newsk,
- const struct request_sock *req);
- void (*inet_conn_established)(struct sock *sk, struct sk_buff *skb);
- int (*secmark_relabel_packet)(u32 secid);
- void (*secmark_refcount_inc)(void);
- void (*secmark_refcount_dec)(void);
- void (*req_classify_flow)(const struct request_sock *req,
- struct flowi *fl);
- int (*tun_dev_alloc_security)(void **security);
- void (*tun_dev_free_security)(void *security);
- int (*tun_dev_create)(void);
- int (*tun_dev_attach_queue)(void *security);
- int (*tun_dev_attach)(struct sock *sk, void *security);
- int (*tun_dev_open)(void *security);
-#endif /* CONFIG_SECURITY_NETWORK */
-
-#ifdef CONFIG_SECURITY_INFINIBAND
- int (*ib_pkey_access)(void *sec, u64 subnet_prefix, u16 pkey);
- int (*ib_endport_manage_subnet)(void *sec, const char *dev_name,
- u8 port_num);
- int (*ib_alloc_security)(void **sec);
- void (*ib_free_security)(void *sec);
-#endif /* CONFIG_SECURITY_INFINIBAND */
+ #define LSM_HOOK(RET, DEFAULT, NAME, ...) RET (*NAME)(__VA_ARGS__);
+ #include "lsm_hook_defs.h"
+ #undef LSM_HOOK
+ void *lsm_func_addr;
+};
-#ifdef CONFIG_SECURITY_NETWORK_XFRM
- int (*xfrm_policy_alloc_security)(struct xfrm_sec_ctx **ctxp,
- struct xfrm_user_sec_ctx *sec_ctx,
- gfp_t gfp);
- int (*xfrm_policy_clone_security)(struct xfrm_sec_ctx *old_ctx,
- struct xfrm_sec_ctx **new_ctx);
- void (*xfrm_policy_free_security)(struct xfrm_sec_ctx *ctx);
- int (*xfrm_policy_delete_security)(struct xfrm_sec_ctx *ctx);
- int (*xfrm_state_alloc)(struct xfrm_state *x,
- struct xfrm_user_sec_ctx *sec_ctx);
- int (*xfrm_state_alloc_acquire)(struct xfrm_state *x,
- struct xfrm_sec_ctx *polsec,
- u32 secid);
- void (*xfrm_state_free_security)(struct xfrm_state *x);
- int (*xfrm_state_delete_security)(struct xfrm_state *x);
- int (*xfrm_policy_lookup)(struct xfrm_sec_ctx *ctx, u32 fl_secid,
- u8 dir);
- int (*xfrm_state_pol_flow_match)(struct xfrm_state *x,
- struct xfrm_policy *xp,
- const struct flowi *fl);
- int (*xfrm_decode_session)(struct sk_buff *skb, u32 *secid, int ckall);
-#endif /* CONFIG_SECURITY_NETWORK_XFRM */
+/*
+ * @key: static call key as defined by STATIC_CALL_KEY
+ * @trampoline: static call trampoline as defined by STATIC_CALL_TRAMP
+ * @hl: The security_hook_list as initialized by the owning LSM.
+ * @active: Enabled when the static call has an LSM hook associated.
+ */
+struct lsm_static_call {
+ struct static_call_key *key;
+ void *trampoline;
+ struct security_hook_list *hl;
+ /* this needs to be true or false based on what the key defaults to */
+ struct static_key_false *active;
+} __randomize_layout;
- /* key management security hooks */
-#ifdef CONFIG_KEYS
- int (*key_alloc)(struct key *key, const struct cred *cred,
- unsigned long flags);
- void (*key_free)(struct key *key);
- int (*key_permission)(key_ref_t key_ref, const struct cred *cred,
- unsigned perm);
- int (*key_getsecurity)(struct key *key, char **_buffer);
-#endif /* CONFIG_KEYS */
+/*
+ * Table of the static calls for each LSM hook.
+ * Once the LSMs are initialized, their callbacks will be copied to these
+ * tables such that the calls are filled backwards (from last to first).
+ * This way, we can jump directly to the first used static call, and execute
+ * all of them after. This essentially makes the entry point
+ * dynamic to adapt the number of static calls to the number of callbacks.
+ */
+struct lsm_static_calls_table {
+ #define LSM_HOOK(RET, DEFAULT, NAME, ...) \
+ struct lsm_static_call NAME[MAX_LSM_COUNT];
+ #include <linux/lsm_hook_defs.h>
+ #undef LSM_HOOK
+} __packed __randomize_layout;
-#ifdef CONFIG_AUDIT
- int (*audit_rule_init)(u32 field, u32 op, char *rulestr,
- void **lsmrule);
- int (*audit_rule_known)(struct audit_krule *krule);
- int (*audit_rule_match)(u32 secid, u32 field, u32 op, void *lsmrule,
- struct audit_context *actx);
- void (*audit_rule_free)(void *lsmrule);
-#endif /* CONFIG_AUDIT */
+/**
+ * struct lsm_id - Identify a Linux Security Module.
+ * @lsm: name of the LSM, must be approved by the LSM maintainers
+ * @id: LSM ID number from uapi/linux/lsm.h
+ *
+ * Contains the information that identifies the LSM.
+ */
+struct lsm_id {
+ const char *name;
+ u64 id;
};
-struct security_hook_heads {
- struct list_head binder_set_context_mgr;
- struct list_head binder_transaction;
- struct list_head binder_transfer_binder;
- struct list_head binder_transfer_file;
- struct list_head ptrace_access_check;
- struct list_head ptrace_traceme;
- struct list_head capget;
- struct list_head capset;
- struct list_head capable;
- struct list_head quotactl;
- struct list_head quota_on;
- struct list_head syslog;
- struct list_head settime;
- struct list_head vm_enough_memory;
- struct list_head bprm_set_creds;
- struct list_head bprm_check_security;
- struct list_head bprm_secureexec;
- struct list_head bprm_committing_creds;
- struct list_head bprm_committed_creds;
- struct list_head sb_alloc_security;
- struct list_head sb_free_security;
- struct list_head sb_copy_data;
- struct list_head sb_remount;
- struct list_head sb_kern_mount;
- struct list_head sb_show_options;
- struct list_head sb_statfs;
- struct list_head sb_mount;
- struct list_head sb_umount;
- struct list_head sb_pivotroot;
- struct list_head sb_set_mnt_opts;
- struct list_head sb_clone_mnt_opts;
- struct list_head sb_parse_opts_str;
- struct list_head dentry_init_security;
- struct list_head dentry_create_files_as;
-#ifdef CONFIG_SECURITY_PATH
- struct list_head path_unlink;
- struct list_head path_mkdir;
- struct list_head path_rmdir;
- struct list_head path_mknod;
- struct list_head path_truncate;
- struct list_head path_symlink;
- struct list_head path_link;
- struct list_head path_rename;
- struct list_head path_chmod;
- struct list_head path_chown;
- struct list_head path_chroot;
-#endif
- struct list_head inode_alloc_security;
- struct list_head inode_free_security;
- struct list_head inode_init_security;
- struct list_head inode_create;
- struct list_head inode_link;
- struct list_head inode_unlink;
- struct list_head inode_symlink;
- struct list_head inode_mkdir;
- struct list_head inode_rmdir;
- struct list_head inode_mknod;
- struct list_head inode_rename;
- struct list_head inode_readlink;
- struct list_head inode_follow_link;
- struct list_head inode_permission;
- struct list_head inode_setattr;
- struct list_head inode_getattr;
- struct list_head inode_setxattr;
- struct list_head inode_post_setxattr;
- struct list_head inode_getxattr;
- struct list_head inode_listxattr;
- struct list_head inode_removexattr;
- struct list_head inode_need_killpriv;
- struct list_head inode_killpriv;
- struct list_head inode_getsecurity;
- struct list_head inode_setsecurity;
- struct list_head inode_listsecurity;
- struct list_head inode_getsecid;
- struct list_head inode_copy_up;
- struct list_head inode_copy_up_xattr;
- struct list_head file_permission;
- struct list_head file_alloc_security;
- struct list_head file_free_security;
- struct list_head file_ioctl;
- struct list_head mmap_addr;
- struct list_head mmap_file;
- struct list_head file_mprotect;
- struct list_head file_lock;
- struct list_head file_fcntl;
- struct list_head file_set_fowner;
- struct list_head file_send_sigiotask;
- struct list_head file_receive;
- struct list_head file_open;
- struct list_head task_create;
- struct list_head task_alloc;
- struct list_head task_free;
- struct list_head cred_alloc_blank;
- struct list_head cred_free;
- struct list_head cred_prepare;
- struct list_head cred_transfer;
- struct list_head kernel_act_as;
- struct list_head kernel_create_files_as;
- struct list_head kernel_read_file;
- struct list_head kernel_post_read_file;
- struct list_head kernel_module_request;
- struct list_head task_fix_setuid;
- struct list_head task_setpgid;
- struct list_head task_getpgid;
- struct list_head task_getsid;
- struct list_head task_getsecid;
- struct list_head task_setnice;
- struct list_head task_setioprio;
- struct list_head task_getioprio;
- struct list_head task_prlimit;
- struct list_head task_setrlimit;
- struct list_head task_setscheduler;
- struct list_head task_getscheduler;
- struct list_head task_movememory;
- struct list_head task_kill;
- struct list_head task_prctl;
- struct list_head task_to_inode;
- struct list_head ipc_permission;
- struct list_head ipc_getsecid;
- struct list_head msg_msg_alloc_security;
- struct list_head msg_msg_free_security;
- struct list_head msg_queue_alloc_security;
- struct list_head msg_queue_free_security;
- struct list_head msg_queue_associate;
- struct list_head msg_queue_msgctl;
- struct list_head msg_queue_msgsnd;
- struct list_head msg_queue_msgrcv;
- struct list_head shm_alloc_security;
- struct list_head shm_free_security;
- struct list_head shm_associate;
- struct list_head shm_shmctl;
- struct list_head shm_shmat;
- struct list_head sem_alloc_security;
- struct list_head sem_free_security;
- struct list_head sem_associate;
- struct list_head sem_semctl;
- struct list_head sem_semop;
- struct list_head netlink_send;
- struct list_head d_instantiate;
- struct list_head getprocattr;
- struct list_head setprocattr;
- struct list_head ismaclabel;
- struct list_head secid_to_secctx;
- struct list_head secctx_to_secid;
- struct list_head release_secctx;
- struct list_head inode_invalidate_secctx;
- struct list_head inode_notifysecctx;
- struct list_head inode_setsecctx;
- struct list_head inode_getsecctx;
-#ifdef CONFIG_SECURITY_NETWORK
- struct list_head unix_stream_connect;
- struct list_head unix_may_send;
- struct list_head socket_create;
- struct list_head socket_post_create;
- struct list_head socket_bind;
- struct list_head socket_connect;
- struct list_head socket_listen;
- struct list_head socket_accept;
- struct list_head socket_sendmsg;
- struct list_head socket_recvmsg;
- struct list_head socket_getsockname;
- struct list_head socket_getpeername;
- struct list_head socket_getsockopt;
- struct list_head socket_setsockopt;
- struct list_head socket_shutdown;
- struct list_head socket_sock_rcv_skb;
- struct list_head socket_getpeersec_stream;
- struct list_head socket_getpeersec_dgram;
- struct list_head sk_alloc_security;
- struct list_head sk_free_security;
- struct list_head sk_clone_security;
- struct list_head sk_getsecid;
- struct list_head sock_graft;
- struct list_head inet_conn_request;
- struct list_head inet_csk_clone;
- struct list_head inet_conn_established;
- struct list_head secmark_relabel_packet;
- struct list_head secmark_refcount_inc;
- struct list_head secmark_refcount_dec;
- struct list_head req_classify_flow;
- struct list_head tun_dev_alloc_security;
- struct list_head tun_dev_free_security;
- struct list_head tun_dev_create;
- struct list_head tun_dev_attach_queue;
- struct list_head tun_dev_attach;
- struct list_head tun_dev_open;
-#endif /* CONFIG_SECURITY_NETWORK */
-#ifdef CONFIG_SECURITY_INFINIBAND
- struct list_head ib_pkey_access;
- struct list_head ib_endport_manage_subnet;
- struct list_head ib_alloc_security;
- struct list_head ib_free_security;
-#endif /* CONFIG_SECURITY_INFINIBAND */
-#ifdef CONFIG_SECURITY_NETWORK_XFRM
- struct list_head xfrm_policy_alloc_security;
- struct list_head xfrm_policy_clone_security;
- struct list_head xfrm_policy_free_security;
- struct list_head xfrm_policy_delete_security;
- struct list_head xfrm_state_alloc;
- struct list_head xfrm_state_alloc_acquire;
- struct list_head xfrm_state_free_security;
- struct list_head xfrm_state_delete_security;
- struct list_head xfrm_policy_lookup;
- struct list_head xfrm_state_pol_flow_match;
- struct list_head xfrm_decode_session;
-#endif /* CONFIG_SECURITY_NETWORK_XFRM */
-#ifdef CONFIG_KEYS
- struct list_head key_alloc;
- struct list_head key_free;
- struct list_head key_permission;
- struct list_head key_getsecurity;
-#endif /* CONFIG_KEYS */
-#ifdef CONFIG_AUDIT
- struct list_head audit_rule_init;
- struct list_head audit_rule_known;
- struct list_head audit_rule_match;
- struct list_head audit_rule_free;
-#endif /* CONFIG_AUDIT */
-} __randomize_layout;
-
/*
* Security module hook list structure.
* For use with generic list macros for common operations.
+ *
+ * struct security_hook_list - Contents of a cacheable, mappable object.
+ * @scalls: The beginning of the array of static calls assigned to this hook.
+ * @hook: The callback for the hook.
+ * @lsm: The name of the lsm that owns this hook.
*/
struct security_hook_list {
- struct list_head list;
- struct list_head *head;
- union security_list_options hook;
- char *lsm;
+ struct lsm_static_call *scalls;
+ union security_list_options hook;
+ const struct lsm_id *lsmid;
} __randomize_layout;
/*
+ * Security blob size or offset data.
+ */
+struct lsm_blob_sizes {
+ unsigned int lbs_cred;
+ unsigned int lbs_file;
+ unsigned int lbs_ib;
+ unsigned int lbs_inode;
+ unsigned int lbs_sock;
+ unsigned int lbs_superblock;
+ unsigned int lbs_ipc;
+ unsigned int lbs_key;
+ unsigned int lbs_msg_msg;
+ unsigned int lbs_perf_event;
+ unsigned int lbs_task;
+ unsigned int lbs_xattr_count; /* num xattr slots in new_xattrs array */
+ unsigned int lbs_tun_dev;
+ unsigned int lbs_bdev;
+ unsigned int lbs_bpf_map;
+ unsigned int lbs_bpf_prog;
+ unsigned int lbs_bpf_token;
+};
+
+/*
+ * LSM_RET_VOID is used as the default value in LSM_HOOK definitions for void
+ * LSM hooks (in include/linux/lsm_hook_defs.h).
+ */
+#define LSM_RET_VOID ((void) 0)
+
+/*
* Initializing a security_hook_list structure takes
* up a lot of space in a source file. This macro takes
* care of the common case and reduces the amount of
* text involved.
*/
-#define LSM_HOOK_INIT(HEAD, HOOK) \
- { .head = &security_hook_heads.HEAD, .hook = { .HEAD = HOOK } }
-
-extern struct security_hook_heads security_hook_heads;
-extern char *lsm_names;
+#define LSM_HOOK_INIT(NAME, HOOK) \
+ { \
+ .scalls = static_calls_table.NAME, \
+ .hook = { .NAME = HOOK } \
+ }
extern void security_add_hooks(struct security_hook_list *hooks, int count,
- char *lsm);
+ const struct lsm_id *lsmid);
-#ifdef CONFIG_SECURITY_SELINUX_DISABLE
-/*
- * Assuring the safety of deleting a security module is up to
- * the security module involved. This may entail ordering the
- * module's hook list in a particular way, refusing to disable
- * the module once a policy is loaded or any number of other
- * actions better imagined than described.
- *
- * The name of the configuration option reflects the only module
- * that currently uses the mechanism. Any developer who thinks
- * disabling their module is a good idea needs to be at least as
- * careful as the SELinux team.
+#define LSM_FLAG_LEGACY_MAJOR BIT(0)
+#define LSM_FLAG_EXCLUSIVE BIT(1)
+
+enum lsm_order {
+ LSM_ORDER_FIRST = -1, /* This is only for capabilities. */
+ LSM_ORDER_MUTABLE = 0,
+ LSM_ORDER_LAST = 1, /* This is only for integrity. */
+};
+
+/**
+ * struct lsm_info - Define an individual LSM for the LSM framework.
+ * @id: LSM name/ID info
+ * @order: ordering with respect to other LSMs, optional
+ * @flags: descriptive flags, optional
+ * @blobs: LSM blob sharing, optional
+ * @enabled: controlled by CONFIG_LSM, optional
+ * @init: LSM specific initialization routine
+ * @initcall_pure: LSM callback for initcall_pure() setup, optional
+ * @initcall_early: LSM callback for early_initcall setup, optional
+ * @initcall_core: LSM callback for core_initcall() setup, optional
+ * @initcall_subsys: LSM callback for subsys_initcall() setup, optional
+ * @initcall_fs: LSM callback for fs_initcall setup, optional
+ * @nitcall_device: LSM callback for device_initcall() setup, optional
+ * @initcall_late: LSM callback for late_initcall() setup, optional
*/
-static inline void security_delete_hooks(struct security_hook_list *hooks,
- int count)
-{
- int i;
+struct lsm_info {
+ const struct lsm_id *id;
+ enum lsm_order order;
+ unsigned long flags;
+ struct lsm_blob_sizes *blobs;
+ int *enabled;
+ int (*init)(void);
+ int (*initcall_pure)(void);
+ int (*initcall_early)(void);
+ int (*initcall_core)(void);
+ int (*initcall_subsys)(void);
+ int (*initcall_fs)(void);
+ int (*initcall_device)(void);
+ int (*initcall_late)(void);
+};
- for (i = 0; i < count; i++)
- list_del_rcu(&hooks[i].list);
-}
-#endif /* CONFIG_SECURITY_SELINUX_DISABLE */
+#define DEFINE_LSM(lsm) \
+ static struct lsm_info __lsm_##lsm \
+ __used __section(".lsm_info.init") \
+ __aligned(sizeof(unsigned long))
+
+#define DEFINE_EARLY_LSM(lsm) \
+ static struct lsm_info __early_lsm_##lsm \
+ __used __section(".early_lsm_info.init") \
+ __aligned(sizeof(unsigned long))
-/* Currently required to handle SELinux runtime hook disable. */
-#ifdef CONFIG_SECURITY_WRITABLE_HOOKS
-#define __lsm_ro_after_init
-#else
-#define __lsm_ro_after_init __ro_after_init
-#endif /* CONFIG_SECURITY_WRITABLE_HOOKS */
-extern int __init security_module_enable(const char *module);
-extern void __init capability_add_hooks(void);
-#ifdef CONFIG_SECURITY_YAMA
-extern void __init yama_add_hooks(void);
-#else
-static inline void __init yama_add_hooks(void) { }
-#endif
-#ifdef CONFIG_SECURITY_LOADPIN
-void __init loadpin_add_hooks(void);
-#else
-static inline void loadpin_add_hooks(void) { };
-#endif
+/* DO NOT tamper with these variables outside of the LSM framework */
+extern struct lsm_static_calls_table static_calls_table __ro_after_init;
+
+/**
+ * lsm_get_xattr_slot - Return the next available slot and increment the index
+ * @xattrs: array storing LSM-provided xattrs
+ * @xattr_count: number of already stored xattrs (updated)
+ *
+ * Retrieve the first available slot in the @xattrs array to fill with an xattr,
+ * and increment @xattr_count.
+ *
+ * Return: The slot to fill in @xattrs if non-NULL, NULL otherwise.
+ */
+static inline struct xattr *lsm_get_xattr_slot(struct xattr *xattrs,
+ int *xattr_count)
+{
+ if (unlikely(!xattrs))
+ return NULL;
+ return &xattrs[(*xattr_count)++];
+}
#endif /* ! __LINUX_LSM_HOOKS_H */
diff --git a/include/linux/lwq.h b/include/linux/lwq.h
new file mode 100644
index 000000000000..d081d5cf8e33
--- /dev/null
+++ b/include/linux/lwq.h
@@ -0,0 +1,124 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#ifndef LWQ_H
+#define LWQ_H
+/*
+ * Light-weight single-linked queue built from llist
+ *
+ * Entries can be enqueued from any context with no locking.
+ * Entries can be dequeued from process context with integrated locking.
+ *
+ * This is particularly suitable when work items are queued in
+ * BH or IRQ context, and where work items are handled one at a time
+ * by dedicated threads.
+ */
+#include <linux/container_of.h>
+#include <linux/spinlock.h>
+#include <linux/llist.h>
+
+struct lwq_node {
+ struct llist_node node;
+};
+
+struct lwq {
+ spinlock_t lock;
+ struct llist_node *ready; /* entries to be dequeued */
+ struct llist_head new; /* entries being enqueued */
+};
+
+/**
+ * lwq_init - initialise a lwq
+ * @q: the lwq object
+ */
+static inline void lwq_init(struct lwq *q)
+{
+ spin_lock_init(&q->lock);
+ q->ready = NULL;
+ init_llist_head(&q->new);
+}
+
+/**
+ * lwq_empty - test if lwq contains any entry
+ * @q: the lwq object
+ *
+ * This empty test contains an acquire barrier so that if a wakeup
+ * is sent when lwq_dequeue returns true, it is safe to go to sleep after
+ * a test on lwq_empty().
+ */
+static inline bool lwq_empty(struct lwq *q)
+{
+ /* acquire ensures ordering wrt lwq_enqueue() */
+ return smp_load_acquire(&q->ready) == NULL && llist_empty(&q->new);
+}
+
+struct llist_node *__lwq_dequeue(struct lwq *q);
+/**
+ * lwq_dequeue - dequeue first (oldest) entry from lwq
+ * @q: the queue to dequeue from
+ * @type: the type of object to return
+ * @member: them member in returned object which is an lwq_node.
+ *
+ * Remove a single object from the lwq and return it. This will take
+ * a spinlock and so must always be called in the same context, typcially
+ * process contet.
+ */
+#define lwq_dequeue(q, type, member) \
+ ({ struct llist_node *_n = __lwq_dequeue(q); \
+ _n ? container_of(_n, type, member.node) : NULL; })
+
+struct llist_node *lwq_dequeue_all(struct lwq *q);
+
+/**
+ * lwq_for_each_safe - iterate over detached queue allowing deletion
+ * @_n: iterator variable
+ * @_t1: temporary struct llist_node **
+ * @_t2: temporary struct llist_node *
+ * @_l: address of llist_node pointer from lwq_dequeue_all()
+ * @_member: member in _n where lwq_node is found.
+ *
+ * Iterate over members in a dequeued list. If the iterator variable
+ * is set to NULL, the iterator removes that entry from the queue.
+ */
+#define lwq_for_each_safe(_n, _t1, _t2, _l, _member) \
+ for (_t1 = (_l); \
+ *(_t1) ? (_n = container_of(*(_t1), typeof(*(_n)), _member.node),\
+ _t2 = ((*_t1)->next), \
+ true) \
+ : false; \
+ (_n) ? (_t1 = &(_n)->_member.node.next, 0) \
+ : ((*(_t1) = (_t2)), 0))
+
+/**
+ * lwq_enqueue - add a new item to the end of the queue
+ * @n - the lwq_node embedded in the item to be added
+ * @q - the lwq to append to.
+ *
+ * No locking is needed to append to the queue so this can
+ * be called from any context.
+ * Return %true is the list may have previously been empty.
+ */
+static inline bool lwq_enqueue(struct lwq_node *n, struct lwq *q)
+{
+ /* acquire enqures ordering wrt lwq_dequeue */
+ return llist_add(&n->node, &q->new) &&
+ smp_load_acquire(&q->ready) == NULL;
+}
+
+/**
+ * lwq_enqueue_batch - add a list of new items to the end of the queue
+ * @n - the lwq_node embedded in the first item to be added
+ * @q - the lwq to append to.
+ *
+ * No locking is needed to append to the queue so this can
+ * be called from any context.
+ * Return %true is the list may have previously been empty.
+ */
+static inline bool lwq_enqueue_batch(struct llist_node *n, struct lwq *q)
+{
+ struct llist_node *e = n;
+
+ /* acquire enqures ordering wrt lwq_dequeue */
+ return llist_add_batch(llist_reverse_order(n), e, &q->new) &&
+ smp_load_acquire(&q->ready) == NULL;
+}
+#endif /* LWQ_H */
diff --git a/include/linux/lz4.h b/include/linux/lz4.h
index 394e3d9213b8..ad6042a718b5 100644
--- a/include/linux/lz4.h
+++ b/include/linux/lz4.h
@@ -278,7 +278,7 @@ int LZ4_decompress_fast(const char *source, char *dest, int originalSize);
* @compressedSize: is the precise full size of the compressed block
* @maxDecompressedSize: is the size of 'dest' buffer
*
- * Decompresses data fom 'source' into 'dest'.
+ * Decompresses data from 'source' into 'dest'.
* If the source stream is detected malformed, the function will
* stop decoding and return a negative result.
* This function is protected against buffer overflow exploits,
@@ -522,7 +522,7 @@ int LZ4_setStreamDecode(LZ4_streamDecode_t *LZ4_streamDecode,
const char *dictionary, int dictSize);
/**
- * LZ4_decompress_fast_continue() - Decompress blocks in streaming mode
+ * LZ4_decompress_safe_continue() - Decompress blocks in streaming mode
* @LZ4_streamDecode: the 'LZ4_streamDecode_t' structure
* @source: source address of the compressed data
* @dest: output buffer address of the uncompressed data
@@ -530,7 +530,7 @@ int LZ4_setStreamDecode(LZ4_streamDecode_t *LZ4_streamDecode,
* @compressedSize: is the precise full size of the compressed block
* @maxDecompressedSize: is the size of 'dest' buffer
*
- * These decoding function allows decompression of multiple blocks
+ * This decoding function allows decompression of multiple blocks
* in "streaming" mode.
* Previously decoded blocks *must* remain available at the memory position
* where they were decoded (up to 64 KB)
@@ -569,7 +569,7 @@ int LZ4_decompress_safe_continue(LZ4_streamDecode_t *LZ4_streamDecode,
* which must be already allocated with 'originalSize' bytes
* @originalSize: is the original and therefore uncompressed size
*
- * These decoding function allows decompression of multiple blocks
+ * This decoding function allows decompression of multiple blocks
* in "streaming" mode.
* Previously decoded blocks *must* remain available at the memory position
* where they were decoded (up to 64 KB)
@@ -610,10 +610,10 @@ int LZ4_decompress_fast_continue(LZ4_streamDecode_t *LZ4_streamDecode,
* @dictStart: pointer to the start of the dictionary in memory
* @dictSize: size of dictionary
*
- * These decoding function works the same as
+ * This decoding function works the same as
* a combination of LZ4_setStreamDecode() followed by
* LZ4_decompress_safe_continue()
- * It is stand-alone, and don'tn eed a LZ4_streamDecode_t structure.
+ * It is stand-alone, and doesn't need an LZ4_streamDecode_t structure.
*
* Return: number of bytes decompressed into destination buffer
* (necessarily <= maxDecompressedSize)
@@ -633,10 +633,10 @@ int LZ4_decompress_safe_usingDict(const char *source, char *dest,
* @dictStart: pointer to the start of the dictionary in memory
* @dictSize: size of dictionary
*
- * These decoding function works the same as
+ * This decoding function works the same as
* a combination of LZ4_setStreamDecode() followed by
- * LZ4_decompress_safe_continue()
- * It is stand-alone, and don'tn eed a LZ4_streamDecode_t structure.
+ * LZ4_decompress_fast_continue()
+ * It is stand-alone, and doesn't need an LZ4_streamDecode_t structure.
*
* Return: number of bytes decompressed into destination buffer
* (necessarily <= maxDecompressedSize)
@@ -645,4 +645,10 @@ int LZ4_decompress_safe_usingDict(const char *source, char *dest,
int LZ4_decompress_fast_usingDict(const char *source, char *dest,
int originalSize, const char *dictStart, int dictSize);
+#define LZ4_DECOMPRESS_INPLACE_MARGIN(compressedSize) (((compressedSize) >> 8) + 32)
+
+#ifndef LZ4_DISTANCE_MAX /* history window size; can be user-defined at compile time */
+#define LZ4_DISTANCE_MAX 65535 /* set to maximum value by default */
+#endif
+
#endif
diff --git a/include/linux/lzo.h b/include/linux/lzo.h
index a0848d9377e5..4d30e3624acd 100644
--- a/include/linux/lzo.h
+++ b/include/linux/lzo.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __LZO_H__
#define __LZO_H__
/*
@@ -17,12 +18,24 @@
#define LZO1X_1_MEM_COMPRESS (8192 * sizeof(unsigned short))
#define LZO1X_MEM_COMPRESS LZO1X_1_MEM_COMPRESS
-#define lzo1x_worst_compress(x) ((x) + ((x) / 16) + 64 + 3)
+#define lzo1x_worst_compress(x) ((x) + ((x) / 16) + 64 + 3 + 2)
/* This requires 'wrkmem' of size LZO1X_1_MEM_COMPRESS */
int lzo1x_1_compress(const unsigned char *src, size_t src_len,
unsigned char *dst, size_t *dst_len, void *wrkmem);
+/* Same as above but does not write more than dst_len to dst. */
+int lzo1x_1_compress_safe(const unsigned char *src, size_t src_len,
+ unsigned char *dst, size_t *dst_len, void *wrkmem);
+
+/* This requires 'wrkmem' of size LZO1X_1_MEM_COMPRESS */
+int lzorle1x_1_compress(const unsigned char *src, size_t src_len,
+ unsigned char *dst, size_t *dst_len, void *wrkmem);
+
+/* Same as above but does not write more than dst_len to dst. */
+int lzorle1x_1_compress_safe(const unsigned char *src, size_t src_len,
+ unsigned char *dst, size_t *dst_len, void *wrkmem);
+
/* safe decompression with overrun testing */
int lzo1x_decompress_safe(const unsigned char *src, size_t src_len,
unsigned char *dst, size_t *dst_len);
diff --git a/include/linux/mISDNdsp.h b/include/linux/mISDNdsp.h
index 41d1eeb9b3bd..00758f45fddc 100644
--- a/include/linux/mISDNdsp.h
+++ b/include/linux/mISDNdsp.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __mISDNdsp_H__
#define __mISDNdsp_H__
diff --git a/include/linux/mISDNhw.h b/include/linux/mISDNhw.h
index 9d96d5d4dfed..ef4f8eb02eac 100644
--- a/include/linux/mISDNhw.h
+++ b/include/linux/mISDNhw.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
*
* Author Karsten Keil <kkeil@novell.com>
@@ -5,16 +6,6 @@
* Basic declarations for the mISDN HW channels
*
* Copyright 2008 by Karsten Keil <kkeil@novell.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
*/
#ifndef MISDNHW_H
diff --git a/include/linux/mISDNif.h b/include/linux/mISDNif.h
index a7330eb3ec64..7aab4a769736 100644
--- a/include/linux/mISDNif.h
+++ b/include/linux/mISDNif.h
@@ -18,7 +18,6 @@
#ifndef mISDNIF_H
#define mISDNIF_H
-#include <stdarg.h>
#include <linux/types.h>
#include <linux/errno.h>
#include <linux/socket.h>
@@ -587,7 +586,7 @@ extern struct mISDNclock *mISDN_register_clock(char *, int, clockctl_func_t *,
void *);
extern void mISDN_unregister_clock(struct mISDNclock *);
-static inline struct mISDNdevice *dev_to_mISDN(struct device *dev)
+static inline struct mISDNdevice *dev_to_mISDN(const struct device *dev)
{
if (dev)
return dev_get_drvdata(dev);
diff --git a/include/linux/mailbox/arm_mhuv2_message.h b/include/linux/mailbox/arm_mhuv2_message.h
new file mode 100644
index 000000000000..821b9d96daa4
--- /dev/null
+++ b/include/linux/mailbox/arm_mhuv2_message.h
@@ -0,0 +1,20 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * ARM MHUv2 Mailbox Message
+ *
+ * Copyright (C) 2020 Arm Ltd.
+ * Copyright (C) 2020 Linaro Ltd.
+ */
+
+#ifndef _LINUX_ARM_MHUV2_MESSAGE_H_
+#define _LINUX_ARM_MHUV2_MESSAGE_H_
+
+#include <linux/types.h>
+
+/* Data structure for data-transfer protocol */
+struct arm_mhuv2_mbox_msg {
+ void *data;
+ size_t len;
+};
+
+#endif /* _LINUX_ARM_MHUV2_MESSAGE_H_ */
diff --git a/include/linux/mailbox/brcm-message.h b/include/linux/mailbox/brcm-message.h
index c20b4843fc2d..18da82115476 100644
--- a/include/linux/mailbox/brcm-message.h
+++ b/include/linux/mailbox/brcm-message.h
@@ -1,10 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2016 Broadcom
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
* Common header for Broadcom mailbox messages which is shared across
* Broadcom SoCs and Broadcom mailbox client drivers.
*/
diff --git a/include/linux/mailbox/exynos-message.h b/include/linux/mailbox/exynos-message.h
new file mode 100644
index 000000000000..5a9ed5ce2046
--- /dev/null
+++ b/include/linux/mailbox/exynos-message.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Exynos mailbox message.
+ *
+ * Copyright 2024 Linaro Ltd.
+ */
+
+#ifndef _LINUX_EXYNOS_MESSAGE_H_
+#define _LINUX_EXYNOS_MESSAGE_H_
+
+#define EXYNOS_MBOX_CHAN_TYPE_DOORBELL 0
+#define EXYNOS_MBOX_CHAN_TYPE_DATA 1
+
+struct exynos_mbox_msg {
+ unsigned int chan_id;
+ unsigned int chan_type;
+};
+
+#endif /* _LINUX_EXYNOS_MESSAGE_H_ */
diff --git a/include/linux/mailbox/mchp-ipc.h b/include/linux/mailbox/mchp-ipc.h
new file mode 100644
index 000000000000..f084ac9e291b
--- /dev/null
+++ b/include/linux/mailbox/mchp-ipc.h
@@ -0,0 +1,33 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ *Copyright (c) 2024 Microchip Technology Inc. All rights reserved.
+ */
+
+#ifndef _LINUX_MCHP_IPC_H_
+#define _LINUX_MCHP_IPC_H_
+
+#include <linux/mailbox_controller.h>
+#include <linux/types.h>
+
+struct mchp_ipc_msg {
+ u32 *buf;
+ u16 size;
+};
+
+struct mchp_ipc_sbi_chan {
+ void *buf_base_tx;
+ void *buf_base_rx;
+ void *msg_buf_tx;
+ void *msg_buf_rx;
+ phys_addr_t buf_base_tx_addr;
+ phys_addr_t buf_base_rx_addr;
+ phys_addr_t msg_buf_tx_addr;
+ phys_addr_t msg_buf_rx_addr;
+ int chan_aggregated_irq;
+ int mp_irq;
+ int mc_irq;
+ u32 id;
+ u32 max_msg_size;
+};
+
+#endif /* _LINUX_MCHP_IPC_H_ */
diff --git a/include/linux/mailbox/mtk-cmdq-mailbox.h b/include/linux/mailbox/mtk-cmdq-mailbox.h
new file mode 100644
index 000000000000..e1555e06e7e5
--- /dev/null
+++ b/include/linux/mailbox/mtk-cmdq-mailbox.h
@@ -0,0 +1,92 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2018 MediaTek Inc.
+ *
+ */
+
+#ifndef __MTK_CMDQ_MAILBOX_H__
+#define __MTK_CMDQ_MAILBOX_H__
+
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+
+#define CMDQ_INST_SIZE 8 /* instruction is 64-bit */
+#define CMDQ_SUBSYS_SHIFT 16
+#define CMDQ_OP_CODE_SHIFT 24
+#define CMDQ_JUMP_PASS CMDQ_INST_SIZE
+
+#define CMDQ_WFE_UPDATE BIT(31)
+#define CMDQ_WFE_UPDATE_VALUE BIT(16)
+#define CMDQ_WFE_WAIT BIT(15)
+#define CMDQ_WFE_WAIT_VALUE 0x1
+
+/*
+ * WFE arg_b
+ * bit 0-11: wait value
+ * bit 15: 1 - wait, 0 - no wait
+ * bit 16-27: update value
+ * bit 31: 1 - update, 0 - no update
+ */
+#define CMDQ_WFE_OPTION (CMDQ_WFE_WAIT | CMDQ_WFE_WAIT_VALUE)
+
+/** cmdq event maximum */
+#define CMDQ_MAX_EVENT 0x3ff
+
+/*
+ * CMDQ_CODE_MASK:
+ * set write mask
+ * format: op mask
+ * CMDQ_CODE_WRITE:
+ * write value into target register
+ * format: op subsys address value
+ * CMDQ_CODE_JUMP:
+ * jump by offset
+ * format: op offset
+ * CMDQ_CODE_WFE:
+ * wait for event and clear
+ * it is just clear if no wait
+ * format: [wait] op event update:1 to_wait:1 wait:1
+ * [clear] op event update:1 to_wait:0 wait:0
+ * CMDQ_CODE_EOC:
+ * end of command
+ * format: op irq_flag
+ */
+enum cmdq_code {
+ CMDQ_CODE_MASK = 0x02,
+ CMDQ_CODE_WRITE = 0x04,
+ CMDQ_CODE_POLL = 0x08,
+ CMDQ_CODE_JUMP = 0x10,
+ CMDQ_CODE_WFE = 0x20,
+ CMDQ_CODE_EOC = 0x40,
+ CMDQ_CODE_READ_S = 0x80,
+ CMDQ_CODE_WRITE_S = 0x90,
+ CMDQ_CODE_WRITE_S_MASK = 0x91,
+ CMDQ_CODE_LOGIC = 0xa0,
+};
+
+struct cmdq_cb_data {
+ int sta;
+ struct cmdq_pkt *pkt;
+};
+
+struct cmdq_pkt {
+ void *va_base;
+ dma_addr_t pa_base;
+ size_t cmd_buf_size; /* command occupied size */
+ size_t buf_size; /* real buffer size */
+};
+
+/**
+ * cmdq_get_shift_pa() - get the shift bits of physical address
+ * @chan: mailbox channel
+ *
+ * GCE can only fetch the command buffer address from a 32-bit register.
+ * Some SOCs support more than 32-bit command buffer address for GCE, which
+ * requires some shift bits to make the address fit into the 32-bit register.
+ *
+ * Return: the shift bits of physical address
+ */
+u8 cmdq_get_shift_pa(struct mbox_chan *chan);
+
+#endif /* __MTK_CMDQ_MAILBOX_H__ */
diff --git a/include/linux/mailbox/riscv-rpmi-message.h b/include/linux/mailbox/riscv-rpmi-message.h
new file mode 100644
index 000000000000..e135c6564d0c
--- /dev/null
+++ b/include/linux/mailbox/riscv-rpmi-message.h
@@ -0,0 +1,243 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (C) 2025 Ventana Micro Systems Inc. */
+
+#ifndef _LINUX_RISCV_RPMI_MESSAGE_H_
+#define _LINUX_RISCV_RPMI_MESSAGE_H_
+
+#include <linux/errno.h>
+#include <linux/mailbox_client.h>
+#include <linux/types.h>
+#include <linux/wordpart.h>
+
+/* RPMI version encode/decode macros */
+#define RPMI_VER_MAJOR(__ver) upper_16_bits(__ver)
+#define RPMI_VER_MINOR(__ver) lower_16_bits(__ver)
+#define RPMI_MKVER(__maj, __min) (((u32)(__maj) << 16) | (u16)(__min))
+
+/* RPMI message header */
+struct rpmi_message_header {
+ __le16 servicegroup_id;
+ u8 service_id;
+ u8 flags;
+ __le16 datalen;
+ __le16 token;
+};
+
+/* RPMI message */
+struct rpmi_message {
+ struct rpmi_message_header header;
+ u8 data[];
+};
+
+/* RPMI notification event */
+struct rpmi_notification_event {
+ __le16 event_datalen;
+ u8 event_id;
+ u8 reserved;
+ u8 event_data[];
+};
+
+/* RPMI error codes */
+enum rpmi_error_codes {
+ RPMI_SUCCESS = 0,
+ RPMI_ERR_FAILED = -1,
+ RPMI_ERR_NOTSUPP = -2,
+ RPMI_ERR_INVALID_PARAM = -3,
+ RPMI_ERR_DENIED = -4,
+ RPMI_ERR_INVALID_ADDR = -5,
+ RPMI_ERR_ALREADY = -6,
+ RPMI_ERR_EXTENSION = -7,
+ RPMI_ERR_HW_FAULT = -8,
+ RPMI_ERR_BUSY = -9,
+ RPMI_ERR_INVALID_STATE = -10,
+ RPMI_ERR_BAD_RANGE = -11,
+ RPMI_ERR_TIMEOUT = -12,
+ RPMI_ERR_IO = -13,
+ RPMI_ERR_NO_DATA = -14,
+ RPMI_ERR_RESERVED_START = -15,
+ RPMI_ERR_RESERVED_END = -127,
+ RPMI_ERR_VENDOR_START = -128,
+};
+
+static inline int rpmi_to_linux_error(int rpmi_error)
+{
+ switch (rpmi_error) {
+ case RPMI_SUCCESS:
+ return 0;
+ case RPMI_ERR_INVALID_PARAM:
+ case RPMI_ERR_BAD_RANGE:
+ case RPMI_ERR_INVALID_STATE:
+ return -EINVAL;
+ case RPMI_ERR_DENIED:
+ return -EPERM;
+ case RPMI_ERR_INVALID_ADDR:
+ case RPMI_ERR_HW_FAULT:
+ return -EFAULT;
+ case RPMI_ERR_ALREADY:
+ return -EALREADY;
+ case RPMI_ERR_BUSY:
+ return -EBUSY;
+ case RPMI_ERR_TIMEOUT:
+ return -ETIMEDOUT;
+ case RPMI_ERR_IO:
+ return -ECOMM;
+ case RPMI_ERR_FAILED:
+ case RPMI_ERR_NOTSUPP:
+ case RPMI_ERR_NO_DATA:
+ case RPMI_ERR_EXTENSION:
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+/* RPMI service group IDs */
+#define RPMI_SRVGRP_SYSTEM_MSI 0x00002
+#define RPMI_SRVGRP_CLOCK 0x00008
+
+/* RPMI clock service IDs */
+enum rpmi_clock_service_id {
+ RPMI_CLK_SRV_ENABLE_NOTIFICATION = 0x01,
+ RPMI_CLK_SRV_GET_NUM_CLOCKS = 0x02,
+ RPMI_CLK_SRV_GET_ATTRIBUTES = 0x03,
+ RPMI_CLK_SRV_GET_SUPPORTED_RATES = 0x04,
+ RPMI_CLK_SRV_SET_CONFIG = 0x05,
+ RPMI_CLK_SRV_GET_CONFIG = 0x06,
+ RPMI_CLK_SRV_SET_RATE = 0x07,
+ RPMI_CLK_SRV_GET_RATE = 0x08,
+ RPMI_CLK_SRV_ID_MAX_COUNT
+};
+
+/* RPMI system MSI service IDs */
+enum rpmi_sysmsi_service_id {
+ RPMI_SYSMSI_SRV_ENABLE_NOTIFICATION = 0x01,
+ RPMI_SYSMSI_SRV_GET_ATTRIBUTES = 0x02,
+ RPMI_SYSMSI_SRV_GET_MSI_ATTRIBUTES = 0x03,
+ RPMI_SYSMSI_SRV_SET_MSI_STATE = 0x04,
+ RPMI_SYSMSI_SRV_GET_MSI_STATE = 0x05,
+ RPMI_SYSMSI_SRV_SET_MSI_TARGET = 0x06,
+ RPMI_SYSMSI_SRV_GET_MSI_TARGET = 0x07,
+ RPMI_SYSMSI_SRV_ID_MAX_COUNT
+};
+
+/* RPMI Linux mailbox attribute IDs */
+enum rpmi_mbox_attribute_id {
+ RPMI_MBOX_ATTR_SPEC_VERSION,
+ RPMI_MBOX_ATTR_MAX_MSG_DATA_SIZE,
+ RPMI_MBOX_ATTR_SERVICEGROUP_ID,
+ RPMI_MBOX_ATTR_SERVICEGROUP_VERSION,
+ RPMI_MBOX_ATTR_IMPL_ID,
+ RPMI_MBOX_ATTR_IMPL_VERSION,
+ RPMI_MBOX_ATTR_MAX_ID
+};
+
+/* RPMI Linux mailbox message types */
+enum rpmi_mbox_message_type {
+ RPMI_MBOX_MSG_TYPE_GET_ATTRIBUTE,
+ RPMI_MBOX_MSG_TYPE_SET_ATTRIBUTE,
+ RPMI_MBOX_MSG_TYPE_SEND_WITH_RESPONSE,
+ RPMI_MBOX_MSG_TYPE_SEND_WITHOUT_RESPONSE,
+ RPMI_MBOX_MSG_TYPE_NOTIFICATION_EVENT,
+ RPMI_MBOX_MSG_MAX_TYPE
+};
+
+/* RPMI Linux mailbox message instance */
+struct rpmi_mbox_message {
+ enum rpmi_mbox_message_type type;
+ union {
+ struct {
+ enum rpmi_mbox_attribute_id id;
+ u32 value;
+ } attr;
+
+ struct {
+ u32 service_id;
+ void *request;
+ unsigned long request_len;
+ void *response;
+ unsigned long max_response_len;
+ unsigned long out_response_len;
+ } data;
+
+ struct {
+ u16 event_datalen;
+ u8 event_id;
+ u8 *event_data;
+ } notif;
+ };
+ int error;
+};
+
+/* RPMI Linux mailbox message helper routines */
+static inline void rpmi_mbox_init_get_attribute(struct rpmi_mbox_message *msg,
+ enum rpmi_mbox_attribute_id id)
+{
+ msg->type = RPMI_MBOX_MSG_TYPE_GET_ATTRIBUTE;
+ msg->attr.id = id;
+ msg->attr.value = 0;
+ msg->error = 0;
+}
+
+static inline void rpmi_mbox_init_set_attribute(struct rpmi_mbox_message *msg,
+ enum rpmi_mbox_attribute_id id,
+ u32 value)
+{
+ msg->type = RPMI_MBOX_MSG_TYPE_SET_ATTRIBUTE;
+ msg->attr.id = id;
+ msg->attr.value = value;
+ msg->error = 0;
+}
+
+static inline void rpmi_mbox_init_send_with_response(struct rpmi_mbox_message *msg,
+ u32 service_id,
+ void *request,
+ unsigned long request_len,
+ void *response,
+ unsigned long max_response_len)
+{
+ msg->type = RPMI_MBOX_MSG_TYPE_SEND_WITH_RESPONSE;
+ msg->data.service_id = service_id;
+ msg->data.request = request;
+ msg->data.request_len = request_len;
+ msg->data.response = response;
+ msg->data.max_response_len = max_response_len;
+ msg->data.out_response_len = 0;
+ msg->error = 0;
+}
+
+static inline void rpmi_mbox_init_send_without_response(struct rpmi_mbox_message *msg,
+ u32 service_id,
+ void *request,
+ unsigned long request_len)
+{
+ msg->type = RPMI_MBOX_MSG_TYPE_SEND_WITHOUT_RESPONSE;
+ msg->data.service_id = service_id;
+ msg->data.request = request;
+ msg->data.request_len = request_len;
+ msg->data.response = NULL;
+ msg->data.max_response_len = 0;
+ msg->data.out_response_len = 0;
+ msg->error = 0;
+}
+
+static inline void *rpmi_mbox_get_msg_response(struct rpmi_mbox_message *msg)
+{
+ return msg ? msg->data.response : NULL;
+}
+
+static inline int rpmi_mbox_send_message(struct mbox_chan *chan,
+ struct rpmi_mbox_message *msg)
+{
+ int ret;
+
+ /* Send message for the underlying mailbox channel */
+ ret = mbox_send_message(chan, msg);
+ if (ret < 0)
+ return ret;
+
+ /* Explicitly signal txdone for mailbox channel */
+ ret = msg->error;
+ mbox_client_txdone(chan, ret);
+ return ret;
+}
+
+#endif /* _LINUX_RISCV_RPMI_MESSAGE_H_ */
diff --git a/include/linux/mailbox/zynqmp-ipi-message.h b/include/linux/mailbox/zynqmp-ipi-message.h
new file mode 100644
index 000000000000..31d8046d945e
--- /dev/null
+++ b/include/linux/mailbox/zynqmp-ipi-message.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef _LINUX_ZYNQMP_IPI_MESSAGE_H_
+#define _LINUX_ZYNQMP_IPI_MESSAGE_H_
+
+/**
+ * struct zynqmp_ipi_message - ZynqMP IPI message structure
+ * @len: Length of message
+ * @data: message payload
+ *
+ * This is the structure for data used in mbox_send_message
+ * the maximum length of data buffer is fixed to 32 bytes.
+ * Client is supposed to be aware of this.
+ */
+struct zynqmp_ipi_message {
+ size_t len;
+ u8 data[];
+};
+
+#endif /* _LINUX_ZYNQMP_IPI_MESSAGE_H_ */
diff --git a/include/linux/mailbox_client.h b/include/linux/mailbox_client.h
index 44348710953f..c6eea9afb943 100644
--- a/include/linux/mailbox_client.h
+++ b/include/linux/mailbox_client.h
@@ -1,17 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2013-2014 Linaro Ltd.
* Author: Jassi Brar <jassisinghbrar@gmail.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef __MAILBOX_CLIENT_H
#define __MAILBOX_CLIENT_H
-#include <linux/of.h>
#include <linux/device.h>
+#include <linux/of.h>
struct mbox_chan;
@@ -40,10 +37,12 @@ struct mbox_client {
void (*tx_done)(struct mbox_client *cl, void *mssg, int r);
};
+int mbox_bind_client(struct mbox_chan *chan, struct mbox_client *cl);
struct mbox_chan *mbox_request_channel_byname(struct mbox_client *cl,
const char *name);
struct mbox_chan *mbox_request_channel(struct mbox_client *cl, int index);
int mbox_send_message(struct mbox_chan *chan, void *mssg);
+int mbox_flush(struct mbox_chan *chan, unsigned long timeout);
void mbox_client_txdone(struct mbox_chan *chan, int r); /* atomic */
bool mbox_client_peek_data(struct mbox_chan *chan); /* atomic */
void mbox_free_channel(struct mbox_chan *chan); /* may sleep */
diff --git a/include/linux/mailbox_controller.h b/include/linux/mailbox_controller.h
index 74deadb42d76..80a427c7ca29 100644
--- a/include/linux/mailbox_controller.h
+++ b/include/linux/mailbox_controller.h
@@ -1,17 +1,13 @@
-/*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
+/* SPDX-License-Identifier: GPL-2.0-only */
#ifndef __MAILBOX_CONTROLLER_H
#define __MAILBOX_CONTROLLER_H
+#include <linux/completion.h>
+#include <linux/device.h>
+#include <linux/hrtimer.h>
#include <linux/of.h>
#include <linux/types.h>
-#include <linux/hrtimer.h>
-#include <linux/device.h>
-#include <linux/completion.h>
struct mbox_chan;
@@ -24,6 +20,9 @@ struct mbox_chan;
* transmission of data is reported by the controller via
* mbox_chan_txdone (if it has some TX ACK irq). It must not
* sleep.
+ * @flush: Called when a client requests transmissions to be blocking but
+ * the context doesn't allow sleeping. Typically the controller
+ * will implement a busy loop waiting for the data to flush out.
* @startup: Called when a client requests the chan. The controller
* could ask clients for additional parameters of communication
* to be provided via client's chan_data. This call may
@@ -46,6 +45,7 @@ struct mbox_chan;
*/
struct mbox_chan_ops {
int (*send_data)(struct mbox_chan *chan, void *data);
+ int (*flush)(struct mbox_chan *chan, unsigned long timeout);
int (*startup)(struct mbox_chan *chan);
void (*shutdown)(struct mbox_chan *chan);
bool (*last_tx_done)(struct mbox_chan *chan);
@@ -66,6 +66,7 @@ struct mbox_chan_ops {
* no interrupt rises. Ignored if 'txdone_irq' is set.
* @txpoll_period: If 'txdone_poll' is in effect, the API polls for
* last TX's status after these many millisecs
+ * @fw_xlate: Controller driver specific mapping of channel via fwnode
* @of_xlate: Controller driver specific mapping of channel via DT
* @poll_hrt: API private. hrtimer used to poll for TXDONE on all
* channels.
@@ -79,10 +80,13 @@ struct mbox_controller {
bool txdone_irq;
bool txdone_poll;
unsigned txpoll_period;
+ struct mbox_chan *(*fw_xlate)(struct mbox_controller *mbox,
+ const struct fwnode_reference_args *sp);
struct mbox_chan *(*of_xlate)(struct mbox_controller *mbox,
const struct of_phandle_args *sp);
/* Internal to API */
struct hrtimer poll_hrt;
+ spinlock_t poll_hrt_lock;
struct list_head node;
};
@@ -131,4 +135,6 @@ void mbox_controller_unregister(struct mbox_controller *mbox); /* can sleep */
void mbox_chan_received_data(struct mbox_chan *chan, void *data); /* atomic */
void mbox_chan_txdone(struct mbox_chan *chan, int r); /* atomic */
+int devm_mbox_controller_register(struct device *dev,
+ struct mbox_controller *mbox);
#endif /* __MAILBOX_CONTROLLER_H */
diff --git a/include/linux/maple.h b/include/linux/maple.h
index c37288b23e0c..3be4e567473c 100644
--- a/include/linux/maple.h
+++ b/include/linux/maple.h
@@ -1,10 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __LINUX_MAPLE_H
#define __LINUX_MAPLE_H
#include <mach/maple.h>
struct device;
-extern struct bus_type maple_bus_type;
/* Maple Bus command and response codes */
enum maple_code {
@@ -97,7 +97,7 @@ int maple_add_packet(struct maple_device *mdev, u32 function,
void maple_clear_dev(struct maple_device *mdev);
#define to_maple_dev(n) container_of(n, struct maple_device, dev)
-#define to_maple_driver(n) container_of(n, struct maple_driver, drv)
+#define to_maple_driver(n) container_of_const(n, struct maple_driver, drv)
#define maple_get_drvdata(d) dev_get_drvdata(&(d)->dev)
#define maple_set_drvdata(d,p) dev_set_drvdata(&(d)->dev, (p))
diff --git a/include/linux/maple_tree.h b/include/linux/maple_tree.h
new file mode 100644
index 000000000000..66f98a3da8d8
--- /dev/null
+++ b/include/linux/maple_tree.h
@@ -0,0 +1,903 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+#ifndef _LINUX_MAPLE_TREE_H
+#define _LINUX_MAPLE_TREE_H
+/*
+ * Maple Tree - An RCU-safe adaptive tree for storing ranges
+ * Copyright (c) 2018-2022 Oracle
+ * Authors: Liam R. Howlett <Liam.Howlett@Oracle.com>
+ * Matthew Wilcox <willy@infradead.org>
+ */
+
+#include <linux/kernel.h>
+#include <linux/rcupdate.h>
+#include <linux/spinlock.h>
+/* #define CONFIG_MAPLE_RCU_DISABLED */
+
+/*
+ * Allocated nodes are mutable until they have been inserted into the tree,
+ * at which time they cannot change their type until they have been removed
+ * from the tree and an RCU grace period has passed.
+ *
+ * Removed nodes have their ->parent set to point to themselves. RCU readers
+ * check ->parent before relying on the value that they loaded from the
+ * slots array. This lets us reuse the slots array for the RCU head.
+ *
+ * Nodes in the tree point to their parent unless bit 0 is set.
+ */
+#if defined(CONFIG_64BIT) || defined(BUILD_VDSO32_64)
+/* 64bit sizes */
+#define MAPLE_NODE_SLOTS 31 /* 256 bytes including ->parent */
+#define MAPLE_RANGE64_SLOTS 16 /* 256 bytes */
+#define MAPLE_ARANGE64_SLOTS 10 /* 240 bytes */
+#define MAPLE_ALLOC_SLOTS (MAPLE_NODE_SLOTS - 1)
+#else
+/* 32bit sizes */
+#define MAPLE_NODE_SLOTS 63 /* 256 bytes including ->parent */
+#define MAPLE_RANGE64_SLOTS 32 /* 256 bytes */
+#define MAPLE_ARANGE64_SLOTS 21 /* 240 bytes */
+#define MAPLE_ALLOC_SLOTS (MAPLE_NODE_SLOTS - 2)
+#endif /* defined(CONFIG_64BIT) || defined(BUILD_VDSO32_64) */
+
+#define MAPLE_NODE_MASK 255UL
+
+/*
+ * The node->parent of the root node has bit 0 set and the rest of the pointer
+ * is a pointer to the tree itself. No more bits are available in this pointer
+ * (on m68k, the data structure may only be 2-byte aligned).
+ *
+ * Internal non-root nodes can only have maple_range_* nodes as parents. The
+ * parent pointer is 256B aligned like all other tree nodes. When storing a 32
+ * or 64 bit values, the offset can fit into 4 bits. The 16 bit values need an
+ * extra bit to store the offset. This extra bit comes from a reuse of the last
+ * bit in the node type. This is possible by using bit 1 to indicate if bit 2
+ * is part of the type or the slot.
+ *
+ * Once the type is decided, the decision of an allocation range type or a
+ * range type is done by examining the immutable tree flag for the
+ * MT_FLAGS_ALLOC_RANGE flag.
+ *
+ * Node types:
+ * 0b??1 = Root
+ * 0b?00 = 16 bit nodes
+ * 0b010 = 32 bit nodes
+ * 0b110 = 64 bit nodes
+ *
+ * Slot size and location in the parent pointer:
+ * type : slot location
+ * 0b??1 : Root
+ * 0b?00 : 16 bit values, type in 0-1, slot in 2-6
+ * 0b010 : 32 bit values, type in 0-2, slot in 3-6
+ * 0b110 : 64 bit values, type in 0-2, slot in 3-6
+ */
+
+/*
+ * This metadata is used to optimize the gap updating code and in reverse
+ * searching for gaps or any other code that needs to find the end of the data.
+ */
+struct maple_metadata {
+ unsigned char end; /* end of data */
+ unsigned char gap; /* offset of largest gap */
+};
+
+/*
+ * Leaf nodes do not store pointers to nodes, they store user data. Users may
+ * store almost any bit pattern. As noted above, the optimisation of storing an
+ * entry at 0 in the root pointer cannot be done for data which have the bottom
+ * two bits set to '10'. We also reserve values with the bottom two bits set to
+ * '10' which are below 4096 (ie 2, 6, 10 .. 4094) for internal use. Some APIs
+ * return errnos as a negative errno shifted right by two bits and the bottom
+ * two bits set to '10', and while choosing to store these values in the array
+ * is not an error, it may lead to confusion if you're testing for an error with
+ * mas_is_err().
+ *
+ * Non-leaf nodes store the type of the node pointed to (enum maple_type in bits
+ * 3-6), bit 2 is reserved. That leaves bits 0-1 unused for now.
+ *
+ * In regular B-Tree terms, pivots are called keys. The term pivot is used to
+ * indicate that the tree is specifying ranges, Pivots may appear in the
+ * subtree with an entry attached to the value whereas keys are unique to a
+ * specific position of a B-tree. Pivot values are inclusive of the slot with
+ * the same index.
+ */
+
+struct maple_range_64 {
+ struct maple_pnode *parent;
+ unsigned long pivot[MAPLE_RANGE64_SLOTS - 1];
+ union {
+ void __rcu *slot[MAPLE_RANGE64_SLOTS];
+ struct {
+ void __rcu *pad[MAPLE_RANGE64_SLOTS - 1];
+ struct maple_metadata meta;
+ };
+ };
+};
+
+/*
+ * At tree creation time, the user can specify that they're willing to trade off
+ * storing fewer entries in a tree in return for storing more information in
+ * each node.
+ *
+ * The maple tree supports recording the largest range of NULL entries available
+ * in this node, also called gaps. This optimises the tree for allocating a
+ * range.
+ */
+struct maple_arange_64 {
+ struct maple_pnode *parent;
+ unsigned long pivot[MAPLE_ARANGE64_SLOTS - 1];
+ void __rcu *slot[MAPLE_ARANGE64_SLOTS];
+ unsigned long gap[MAPLE_ARANGE64_SLOTS];
+ struct maple_metadata meta;
+};
+
+struct maple_alloc {
+ unsigned long total;
+ unsigned char node_count;
+ unsigned int request_count;
+ struct maple_alloc *slot[MAPLE_ALLOC_SLOTS];
+};
+
+struct maple_topiary {
+ struct maple_pnode *parent;
+ struct maple_enode *next; /* Overlaps the pivot */
+};
+
+enum maple_type {
+ maple_dense,
+ maple_leaf_64,
+ maple_range_64,
+ maple_arange_64,
+};
+
+enum store_type {
+ wr_invalid,
+ wr_new_root,
+ wr_store_root,
+ wr_exact_fit,
+ wr_spanning_store,
+ wr_split_store,
+ wr_rebalance,
+ wr_append,
+ wr_node_store,
+ wr_slot_store,
+};
+
+/**
+ * DOC: Maple tree flags
+ *
+ * * MT_FLAGS_ALLOC_RANGE - Track gaps in this tree
+ * * MT_FLAGS_USE_RCU - Operate in RCU mode
+ * * MT_FLAGS_HEIGHT_OFFSET - The position of the tree height in the flags
+ * * MT_FLAGS_HEIGHT_MASK - The mask for the maple tree height value
+ * * MT_FLAGS_LOCK_MASK - How the mt_lock is used
+ * * MT_FLAGS_LOCK_IRQ - Acquired irq-safe
+ * * MT_FLAGS_LOCK_BH - Acquired bh-safe
+ * * MT_FLAGS_LOCK_EXTERN - mt_lock is not used
+ *
+ * MAPLE_HEIGHT_MAX The largest height that can be stored
+ */
+#define MT_FLAGS_ALLOC_RANGE 0x01
+#define MT_FLAGS_USE_RCU 0x02
+#define MT_FLAGS_HEIGHT_OFFSET 0x02
+#define MT_FLAGS_HEIGHT_MASK 0x7C
+#define MT_FLAGS_LOCK_MASK 0x300
+#define MT_FLAGS_LOCK_IRQ 0x100
+#define MT_FLAGS_LOCK_BH 0x200
+#define MT_FLAGS_LOCK_EXTERN 0x300
+#define MT_FLAGS_ALLOC_WRAPPED 0x0800
+
+#define MAPLE_HEIGHT_MAX 31
+
+
+#define MAPLE_NODE_TYPE_MASK 0x0F
+#define MAPLE_NODE_TYPE_SHIFT 0x03
+
+#define MAPLE_RESERVED_RANGE 4096
+
+#ifdef CONFIG_LOCKDEP
+#define mt_lock_is_held(mt) \
+ (!(mt)->ma_external_lock || lock_is_held((mt)->ma_external_lock))
+
+#define mt_write_lock_is_held(mt) \
+ (!(mt)->ma_external_lock || \
+ lock_is_held_type((mt)->ma_external_lock, 0))
+
+#define mt_set_external_lock(mt, lock) \
+ (mt)->ma_external_lock = &(lock)->dep_map
+
+#define mt_on_stack(mt) (mt).ma_external_lock = NULL
+#else
+#define mt_lock_is_held(mt) 1
+#define mt_write_lock_is_held(mt) 1
+#define mt_set_external_lock(mt, lock) do { } while (0)
+#define mt_on_stack(mt) do { } while (0)
+#endif
+
+/*
+ * If the tree contains a single entry at index 0, it is usually stored in
+ * tree->ma_root. To optimise for the page cache, an entry which ends in '00',
+ * '01' or '11' is stored in the root, but an entry which ends in '10' will be
+ * stored in a node. Bits 3-6 are used to store enum maple_type.
+ *
+ * The flags are used both to store some immutable information about this tree
+ * (set at tree creation time) and dynamic information set under the spinlock.
+ *
+ * Another use of flags are to indicate global states of the tree. This is the
+ * case with the MT_FLAGS_USE_RCU flag, which indicates the tree is currently in
+ * RCU mode. This mode was added to allow the tree to reuse nodes instead of
+ * re-allocating and RCU freeing nodes when there is a single user.
+ */
+struct maple_tree {
+ union {
+ spinlock_t ma_lock;
+#ifdef CONFIG_LOCKDEP
+ struct lockdep_map *ma_external_lock;
+#endif
+ };
+ unsigned int ma_flags;
+ void __rcu *ma_root;
+};
+
+/**
+ * MTREE_INIT() - Initialize a maple tree
+ * @name: The maple tree name
+ * @__flags: The maple tree flags
+ *
+ */
+#define MTREE_INIT(name, __flags) { \
+ .ma_lock = __SPIN_LOCK_UNLOCKED((name).ma_lock), \
+ .ma_flags = __flags, \
+ .ma_root = NULL, \
+}
+
+/**
+ * MTREE_INIT_EXT() - Initialize a maple tree with an external lock.
+ * @name: The tree name
+ * @__flags: The maple tree flags
+ * @__lock: The external lock
+ */
+#ifdef CONFIG_LOCKDEP
+#define MTREE_INIT_EXT(name, __flags, __lock) { \
+ .ma_external_lock = &(__lock).dep_map, \
+ .ma_flags = (__flags), \
+ .ma_root = NULL, \
+}
+#else
+#define MTREE_INIT_EXT(name, __flags, __lock) MTREE_INIT(name, __flags)
+#endif
+
+#define DEFINE_MTREE(name) \
+ struct maple_tree name = MTREE_INIT(name, 0)
+
+#define mtree_lock(mt) spin_lock((&(mt)->ma_lock))
+#define mtree_lock_nested(mas, subclass) \
+ spin_lock_nested((&(mt)->ma_lock), subclass)
+#define mtree_unlock(mt) spin_unlock((&(mt)->ma_lock))
+
+/*
+ * The Maple Tree squeezes various bits in at various points which aren't
+ * necessarily obvious. Usually, this is done by observing that pointers are
+ * N-byte aligned and thus the bottom log_2(N) bits are available for use. We
+ * don't use the high bits of pointers to store additional information because
+ * we don't know what bits are unused on any given architecture.
+ *
+ * Nodes are 256 bytes in size and are also aligned to 256 bytes, giving us 8
+ * low bits for our own purposes. Nodes are currently of 4 types:
+ * 1. Single pointer (Range is 0-0)
+ * 2. Non-leaf Allocation Range nodes
+ * 3. Non-leaf Range nodes
+ * 4. Leaf Range nodes All nodes consist of a number of node slots,
+ * pivots, and a parent pointer.
+ */
+
+struct maple_node {
+ union {
+ struct {
+ struct maple_pnode *parent;
+ void __rcu *slot[MAPLE_NODE_SLOTS];
+ };
+ struct {
+ void *pad;
+ struct rcu_head rcu;
+ struct maple_enode *piv_parent;
+ unsigned char parent_slot;
+ enum maple_type type;
+ unsigned char slot_len;
+ unsigned int ma_flags;
+ };
+ struct maple_range_64 mr64;
+ struct maple_arange_64 ma64;
+ struct maple_alloc alloc;
+ };
+};
+
+/*
+ * More complicated stores can cause two nodes to become one or three and
+ * potentially alter the height of the tree. Either half of the tree may need
+ * to be rebalanced against the other. The ma_topiary struct is used to track
+ * which nodes have been 'cut' from the tree so that the change can be done
+ * safely at a later date. This is done to support RCU.
+ */
+struct ma_topiary {
+ struct maple_enode *head;
+ struct maple_enode *tail;
+ struct maple_tree *mtree;
+};
+
+void *mtree_load(struct maple_tree *mt, unsigned long index);
+
+int mtree_insert(struct maple_tree *mt, unsigned long index,
+ void *entry, gfp_t gfp);
+int mtree_insert_range(struct maple_tree *mt, unsigned long first,
+ unsigned long last, void *entry, gfp_t gfp);
+int mtree_alloc_range(struct maple_tree *mt, unsigned long *startp,
+ void *entry, unsigned long size, unsigned long min,
+ unsigned long max, gfp_t gfp);
+int mtree_alloc_cyclic(struct maple_tree *mt, unsigned long *startp,
+ void *entry, unsigned long range_lo, unsigned long range_hi,
+ unsigned long *next, gfp_t gfp);
+int mtree_alloc_rrange(struct maple_tree *mt, unsigned long *startp,
+ void *entry, unsigned long size, unsigned long min,
+ unsigned long max, gfp_t gfp);
+
+int mtree_store_range(struct maple_tree *mt, unsigned long first,
+ unsigned long last, void *entry, gfp_t gfp);
+int mtree_store(struct maple_tree *mt, unsigned long index,
+ void *entry, gfp_t gfp);
+void *mtree_erase(struct maple_tree *mt, unsigned long index);
+
+int mtree_dup(struct maple_tree *mt, struct maple_tree *new, gfp_t gfp);
+int __mt_dup(struct maple_tree *mt, struct maple_tree *new, gfp_t gfp);
+
+void mtree_destroy(struct maple_tree *mt);
+void __mt_destroy(struct maple_tree *mt);
+
+/**
+ * mtree_empty() - Determine if a tree has any present entries.
+ * @mt: Maple Tree.
+ *
+ * Context: Any context.
+ * Return: %true if the tree contains only NULL pointers.
+ */
+static inline bool mtree_empty(const struct maple_tree *mt)
+{
+ return mt->ma_root == NULL;
+}
+
+/* Advanced API */
+
+/*
+ * Maple State Status
+ * ma_active means the maple state is pointing to a node and offset and can
+ * continue operating on the tree.
+ * ma_start means we have not searched the tree.
+ * ma_root means we have searched the tree and the entry we found lives in
+ * the root of the tree (ie it has index 0, length 1 and is the only entry in
+ * the tree).
+ * ma_none means we have searched the tree and there is no node in the
+ * tree for this entry. For example, we searched for index 1 in an empty
+ * tree. Or we have a tree which points to a full leaf node and we
+ * searched for an entry which is larger than can be contained in that
+ * leaf node.
+ * ma_pause means the data within the maple state may be stale, restart the
+ * operation
+ * ma_overflow means the search has reached the upper limit of the search
+ * ma_underflow means the search has reached the lower limit of the search
+ * ma_error means there was an error, check the node for the error number.
+ */
+enum maple_status {
+ ma_active,
+ ma_start,
+ ma_root,
+ ma_none,
+ ma_pause,
+ ma_overflow,
+ ma_underflow,
+ ma_error,
+};
+
+/*
+ * The maple state is defined in the struct ma_state and is used to keep track
+ * of information during operations, and even between operations when using the
+ * advanced API.
+ *
+ * If state->node has bit 0 set then it references a tree location which is not
+ * a node (eg the root). If bit 1 is set, the rest of the bits are a negative
+ * errno. Bit 2 (the 'unallocated slots' bit) is clear. Bits 3-6 indicate the
+ * node type.
+ *
+ * state->alloc either has a request number of nodes or an allocated node. If
+ * stat->alloc has a requested number of nodes, the first bit will be set (0x1)
+ * and the remaining bits are the value. If state->alloc is a node, then the
+ * node will be of type maple_alloc. maple_alloc has MAPLE_NODE_SLOTS - 1 for
+ * storing more allocated nodes, a total number of nodes allocated, and the
+ * node_count in this node. node_count is the number of allocated nodes in this
+ * node. The scaling beyond MAPLE_NODE_SLOTS - 1 is handled by storing further
+ * nodes into state->alloc->slot[0]'s node. Nodes are taken from state->alloc
+ * by removing a node from the state->alloc node until state->alloc->node_count
+ * is 1, when state->alloc is returned and the state->alloc->slot[0] is promoted
+ * to state->alloc. Nodes are pushed onto state->alloc by putting the current
+ * state->alloc into the pushed node's slot[0].
+ *
+ * The state also contains the implied min/max of the state->node, the depth of
+ * this search, and the offset. The implied min/max are either from the parent
+ * node or are 0-oo for the root node. The depth is incremented or decremented
+ * every time a node is walked down or up. The offset is the slot/pivot of
+ * interest in the node - either for reading or writing.
+ *
+ * When returning a value the maple state index and last respectively contain
+ * the start and end of the range for the entry. Ranges are inclusive in the
+ * Maple Tree.
+ *
+ * The status of the state is used to determine how the next action should treat
+ * the state. For instance, if the status is ma_start then the next action
+ * should start at the root of the tree and walk down. If the status is
+ * ma_pause then the node may be stale data and should be discarded. If the
+ * status is ma_overflow, then the last action hit the upper limit.
+ *
+ */
+struct ma_state {
+ struct maple_tree *tree; /* The tree we're operating in */
+ unsigned long index; /* The index we're operating on - range start */
+ unsigned long last; /* The last index we're operating on - range end */
+ struct maple_enode *node; /* The node containing this entry */
+ unsigned long min; /* The minimum index of this node - implied pivot min */
+ unsigned long max; /* The maximum index of this node - implied pivot max */
+ struct slab_sheaf *sheaf; /* Allocated nodes for this operation */
+ struct maple_node *alloc; /* A single allocated node for fast path writes */
+ unsigned long node_request; /* The number of nodes to allocate for this operation */
+ enum maple_status status; /* The status of the state (active, start, none, etc) */
+ unsigned char depth; /* depth of tree descent during write */
+ unsigned char offset;
+ unsigned char mas_flags;
+ unsigned char end; /* The end of the node */
+ enum store_type store_type; /* The type of store needed for this operation */
+};
+
+struct ma_wr_state {
+ struct ma_state *mas;
+ struct maple_node *node; /* Decoded mas->node */
+ unsigned long r_min; /* range min */
+ unsigned long r_max; /* range max */
+ enum maple_type type; /* mas->node type */
+ unsigned char offset_end; /* The offset where the write ends */
+ unsigned long *pivots; /* mas->node->pivots pointer */
+ unsigned long end_piv; /* The pivot at the offset end */
+ void __rcu **slots; /* mas->node->slots pointer */
+ void *entry; /* The entry to write */
+ void *content; /* The existing entry that is being overwritten */
+ unsigned char vacant_height; /* Height of lowest node with free space */
+ unsigned char sufficient_height;/* Height of lowest node with min sufficiency + 1 nodes */
+};
+
+#define mas_lock(mas) spin_lock(&((mas)->tree->ma_lock))
+#define mas_lock_nested(mas, subclass) \
+ spin_lock_nested(&((mas)->tree->ma_lock), subclass)
+#define mas_unlock(mas) spin_unlock(&((mas)->tree->ma_lock))
+
+/*
+ * Special values for ma_state.node.
+ * MA_ERROR represents an errno. After dropping the lock and attempting
+ * to resolve the error, the walk would have to be restarted from the
+ * top of the tree as the tree may have been modified.
+ */
+#define MA_ERROR(err) \
+ ((struct maple_enode *)(((unsigned long)err << 2) | 2UL))
+
+/*
+ * When changing MA_STATE, remember to also change rust/kernel/maple_tree.rs
+ */
+#define MA_STATE(name, mt, first, end) \
+ struct ma_state name = { \
+ .tree = mt, \
+ .index = first, \
+ .last = end, \
+ .node = NULL, \
+ .status = ma_start, \
+ .min = 0, \
+ .max = ULONG_MAX, \
+ .sheaf = NULL, \
+ .alloc = NULL, \
+ .node_request = 0, \
+ .mas_flags = 0, \
+ .store_type = wr_invalid, \
+ }
+
+#define MA_WR_STATE(name, ma_state, wr_entry) \
+ struct ma_wr_state name = { \
+ .mas = ma_state, \
+ .content = NULL, \
+ .entry = wr_entry, \
+ .vacant_height = 0, \
+ .sufficient_height = 0 \
+ }
+
+#define MA_TOPIARY(name, tree) \
+ struct ma_topiary name = { \
+ .head = NULL, \
+ .tail = NULL, \
+ .mtree = tree, \
+ }
+
+void *mas_walk(struct ma_state *mas);
+void *mas_store(struct ma_state *mas, void *entry);
+void *mas_erase(struct ma_state *mas);
+int mas_store_gfp(struct ma_state *mas, void *entry, gfp_t gfp);
+void mas_store_prealloc(struct ma_state *mas, void *entry);
+void *mas_find(struct ma_state *mas, unsigned long max);
+void *mas_find_range(struct ma_state *mas, unsigned long max);
+void *mas_find_rev(struct ma_state *mas, unsigned long min);
+void *mas_find_range_rev(struct ma_state *mas, unsigned long max);
+int mas_preallocate(struct ma_state *mas, void *entry, gfp_t gfp);
+int mas_alloc_cyclic(struct ma_state *mas, unsigned long *startp,
+ void *entry, unsigned long range_lo, unsigned long range_hi,
+ unsigned long *next, gfp_t gfp);
+
+bool mas_nomem(struct ma_state *mas, gfp_t gfp);
+void mas_pause(struct ma_state *mas);
+void maple_tree_init(void);
+void mas_destroy(struct ma_state *mas);
+int mas_expected_entries(struct ma_state *mas, unsigned long nr_entries);
+
+void *mas_prev(struct ma_state *mas, unsigned long min);
+void *mas_prev_range(struct ma_state *mas, unsigned long max);
+void *mas_next(struct ma_state *mas, unsigned long max);
+void *mas_next_range(struct ma_state *mas, unsigned long max);
+
+int mas_empty_area(struct ma_state *mas, unsigned long min, unsigned long max,
+ unsigned long size);
+/*
+ * This finds an empty area from the highest address to the lowest.
+ * AKA "Topdown" version,
+ */
+int mas_empty_area_rev(struct ma_state *mas, unsigned long min,
+ unsigned long max, unsigned long size);
+
+static inline void mas_init(struct ma_state *mas, struct maple_tree *tree,
+ unsigned long addr)
+{
+ memset(mas, 0, sizeof(struct ma_state));
+ mas->tree = tree;
+ mas->index = mas->last = addr;
+ mas->max = ULONG_MAX;
+ mas->status = ma_start;
+ mas->node = NULL;
+}
+
+static inline bool mas_is_active(struct ma_state *mas)
+{
+ return mas->status == ma_active;
+}
+
+static inline bool mas_is_err(struct ma_state *mas)
+{
+ return mas->status == ma_error;
+}
+
+/**
+ * mas_reset() - Reset a Maple Tree operation state.
+ * @mas: Maple Tree operation state.
+ *
+ * Resets the error or walk state of the @mas so future walks of the
+ * array will start from the root. Use this if you have dropped the
+ * lock and want to reuse the ma_state.
+ *
+ * Context: Any context.
+ */
+static __always_inline void mas_reset(struct ma_state *mas)
+{
+ mas->status = ma_start;
+ mas->node = NULL;
+}
+
+/**
+ * mas_for_each() - Iterate over a range of the maple tree.
+ * @__mas: Maple Tree operation state (maple_state)
+ * @__entry: Entry retrieved from the tree
+ * @__max: maximum index to retrieve from the tree
+ *
+ * When returned, mas->index and mas->last will hold the entire range for the
+ * entry.
+ *
+ * Note: may return the zero entry.
+ */
+#define mas_for_each(__mas, __entry, __max) \
+ while (((__entry) = mas_find((__mas), (__max))) != NULL)
+
+/**
+ * mas_for_each_rev() - Iterate over a range of the maple tree in reverse order.
+ * @__mas: Maple Tree operation state (maple_state)
+ * @__entry: Entry retrieved from the tree
+ * @__min: minimum index to retrieve from the tree
+ *
+ * When returned, mas->index and mas->last will hold the entire range for the
+ * entry.
+ *
+ * Note: may return the zero entry.
+ */
+#define mas_for_each_rev(__mas, __entry, __min) \
+ while (((__entry) = mas_find_rev((__mas), (__min))) != NULL)
+
+#ifdef CONFIG_DEBUG_MAPLE_TREE
+enum mt_dump_format {
+ mt_dump_dec,
+ mt_dump_hex,
+};
+
+extern atomic_t maple_tree_tests_run;
+extern atomic_t maple_tree_tests_passed;
+
+void mt_dump(const struct maple_tree *mt, enum mt_dump_format format);
+void mas_dump(const struct ma_state *mas);
+void mas_wr_dump(const struct ma_wr_state *wr_mas);
+void mt_validate(struct maple_tree *mt);
+void mt_cache_shrink(void);
+#define MT_BUG_ON(__tree, __x) do { \
+ atomic_inc(&maple_tree_tests_run); \
+ if (__x) { \
+ pr_info("BUG at %s:%d (%u)\n", \
+ __func__, __LINE__, __x); \
+ mt_dump(__tree, mt_dump_hex); \
+ pr_info("Pass: %u Run:%u\n", \
+ atomic_read(&maple_tree_tests_passed), \
+ atomic_read(&maple_tree_tests_run)); \
+ dump_stack(); \
+ } else { \
+ atomic_inc(&maple_tree_tests_passed); \
+ } \
+} while (0)
+
+#define MAS_BUG_ON(__mas, __x) do { \
+ atomic_inc(&maple_tree_tests_run); \
+ if (__x) { \
+ pr_info("BUG at %s:%d (%u)\n", \
+ __func__, __LINE__, __x); \
+ mas_dump(__mas); \
+ mt_dump((__mas)->tree, mt_dump_hex); \
+ pr_info("Pass: %u Run:%u\n", \
+ atomic_read(&maple_tree_tests_passed), \
+ atomic_read(&maple_tree_tests_run)); \
+ dump_stack(); \
+ } else { \
+ atomic_inc(&maple_tree_tests_passed); \
+ } \
+} while (0)
+
+#define MAS_WR_BUG_ON(__wrmas, __x) do { \
+ atomic_inc(&maple_tree_tests_run); \
+ if (__x) { \
+ pr_info("BUG at %s:%d (%u)\n", \
+ __func__, __LINE__, __x); \
+ mas_wr_dump(__wrmas); \
+ mas_dump((__wrmas)->mas); \
+ mt_dump((__wrmas)->mas->tree, mt_dump_hex); \
+ pr_info("Pass: %u Run:%u\n", \
+ atomic_read(&maple_tree_tests_passed), \
+ atomic_read(&maple_tree_tests_run)); \
+ dump_stack(); \
+ } else { \
+ atomic_inc(&maple_tree_tests_passed); \
+ } \
+} while (0)
+
+#define MT_WARN_ON(__tree, __x) ({ \
+ int ret = !!(__x); \
+ atomic_inc(&maple_tree_tests_run); \
+ if (ret) { \
+ pr_info("WARN at %s:%d (%u)\n", \
+ __func__, __LINE__, __x); \
+ mt_dump(__tree, mt_dump_hex); \
+ pr_info("Pass: %u Run:%u\n", \
+ atomic_read(&maple_tree_tests_passed), \
+ atomic_read(&maple_tree_tests_run)); \
+ dump_stack(); \
+ } else { \
+ atomic_inc(&maple_tree_tests_passed); \
+ } \
+ unlikely(ret); \
+})
+
+#define MAS_WARN_ON(__mas, __x) ({ \
+ int ret = !!(__x); \
+ atomic_inc(&maple_tree_tests_run); \
+ if (ret) { \
+ pr_info("WARN at %s:%d (%u)\n", \
+ __func__, __LINE__, __x); \
+ mas_dump(__mas); \
+ mt_dump((__mas)->tree, mt_dump_hex); \
+ pr_info("Pass: %u Run:%u\n", \
+ atomic_read(&maple_tree_tests_passed), \
+ atomic_read(&maple_tree_tests_run)); \
+ dump_stack(); \
+ } else { \
+ atomic_inc(&maple_tree_tests_passed); \
+ } \
+ unlikely(ret); \
+})
+
+#define MAS_WR_WARN_ON(__wrmas, __x) ({ \
+ int ret = !!(__x); \
+ atomic_inc(&maple_tree_tests_run); \
+ if (ret) { \
+ pr_info("WARN at %s:%d (%u)\n", \
+ __func__, __LINE__, __x); \
+ mas_wr_dump(__wrmas); \
+ mas_dump((__wrmas)->mas); \
+ mt_dump((__wrmas)->mas->tree, mt_dump_hex); \
+ pr_info("Pass: %u Run:%u\n", \
+ atomic_read(&maple_tree_tests_passed), \
+ atomic_read(&maple_tree_tests_run)); \
+ dump_stack(); \
+ } else { \
+ atomic_inc(&maple_tree_tests_passed); \
+ } \
+ unlikely(ret); \
+})
+#else
+#define MT_BUG_ON(__tree, __x) BUG_ON(__x)
+#define MAS_BUG_ON(__mas, __x) BUG_ON(__x)
+#define MAS_WR_BUG_ON(__mas, __x) BUG_ON(__x)
+#define MT_WARN_ON(__tree, __x) WARN_ON(__x)
+#define MAS_WARN_ON(__mas, __x) WARN_ON(__x)
+#define MAS_WR_WARN_ON(__mas, __x) WARN_ON(__x)
+#endif /* CONFIG_DEBUG_MAPLE_TREE */
+
+/**
+ * __mas_set_range() - Set up Maple Tree operation state to a sub-range of the
+ * current location.
+ * @mas: Maple Tree operation state.
+ * @start: New start of range in the Maple Tree.
+ * @last: New end of range in the Maple Tree.
+ *
+ * set the internal maple state values to a sub-range.
+ * Please use mas_set_range() if you do not know where you are in the tree.
+ */
+static inline void __mas_set_range(struct ma_state *mas, unsigned long start,
+ unsigned long last)
+{
+ /* Ensure the range starts within the current slot */
+ MAS_WARN_ON(mas, mas_is_active(mas) &&
+ (mas->index > start || mas->last < start));
+ mas->index = start;
+ mas->last = last;
+}
+
+/**
+ * mas_set_range() - Set up Maple Tree operation state for a different index.
+ * @mas: Maple Tree operation state.
+ * @start: New start of range in the Maple Tree.
+ * @last: New end of range in the Maple Tree.
+ *
+ * Move the operation state to refer to a different range. This will
+ * have the effect of starting a walk from the top; see mas_next()
+ * to move to an adjacent index.
+ */
+static inline
+void mas_set_range(struct ma_state *mas, unsigned long start, unsigned long last)
+{
+ mas_reset(mas);
+ __mas_set_range(mas, start, last);
+}
+
+/**
+ * mas_set() - Set up Maple Tree operation state for a different index.
+ * @mas: Maple Tree operation state.
+ * @index: New index into the Maple Tree.
+ *
+ * Move the operation state to refer to a different index. This will
+ * have the effect of starting a walk from the top; see mas_next()
+ * to move to an adjacent index.
+ */
+static inline void mas_set(struct ma_state *mas, unsigned long index)
+{
+
+ mas_set_range(mas, index, index);
+}
+
+static inline bool mt_external_lock(const struct maple_tree *mt)
+{
+ return (mt->ma_flags & MT_FLAGS_LOCK_MASK) == MT_FLAGS_LOCK_EXTERN;
+}
+
+/**
+ * mt_init_flags() - Initialise an empty maple tree with flags.
+ * @mt: Maple Tree
+ * @flags: maple tree flags.
+ *
+ * If you need to initialise a Maple Tree with special flags (eg, an
+ * allocation tree), use this function.
+ *
+ * Context: Any context.
+ */
+static inline void mt_init_flags(struct maple_tree *mt, unsigned int flags)
+{
+ mt->ma_flags = flags;
+ if (!mt_external_lock(mt))
+ spin_lock_init(&mt->ma_lock);
+ rcu_assign_pointer(mt->ma_root, NULL);
+}
+
+/**
+ * mt_init() - Initialise an empty maple tree.
+ * @mt: Maple Tree
+ *
+ * An empty Maple Tree.
+ *
+ * Context: Any context.
+ */
+static inline void mt_init(struct maple_tree *mt)
+{
+ mt_init_flags(mt, 0);
+}
+
+static inline bool mt_in_rcu(struct maple_tree *mt)
+{
+#ifdef CONFIG_MAPLE_RCU_DISABLED
+ return false;
+#endif
+ return mt->ma_flags & MT_FLAGS_USE_RCU;
+}
+
+/**
+ * mt_clear_in_rcu() - Switch the tree to non-RCU mode.
+ * @mt: The Maple Tree
+ */
+static inline void mt_clear_in_rcu(struct maple_tree *mt)
+{
+ if (!mt_in_rcu(mt))
+ return;
+
+ if (mt_external_lock(mt)) {
+ WARN_ON(!mt_lock_is_held(mt));
+ mt->ma_flags &= ~MT_FLAGS_USE_RCU;
+ } else {
+ mtree_lock(mt);
+ mt->ma_flags &= ~MT_FLAGS_USE_RCU;
+ mtree_unlock(mt);
+ }
+}
+
+/**
+ * mt_set_in_rcu() - Switch the tree to RCU safe mode.
+ * @mt: The Maple Tree
+ */
+static inline void mt_set_in_rcu(struct maple_tree *mt)
+{
+ if (mt_in_rcu(mt))
+ return;
+
+ if (mt_external_lock(mt)) {
+ WARN_ON(!mt_lock_is_held(mt));
+ mt->ma_flags |= MT_FLAGS_USE_RCU;
+ } else {
+ mtree_lock(mt);
+ mt->ma_flags |= MT_FLAGS_USE_RCU;
+ mtree_unlock(mt);
+ }
+}
+
+static inline unsigned int mt_height(const struct maple_tree *mt)
+{
+ return (mt->ma_flags & MT_FLAGS_HEIGHT_MASK) >> MT_FLAGS_HEIGHT_OFFSET;
+}
+
+void *mt_find(struct maple_tree *mt, unsigned long *index, unsigned long max);
+void *mt_find_after(struct maple_tree *mt, unsigned long *index,
+ unsigned long max);
+void *mt_prev(struct maple_tree *mt, unsigned long index, unsigned long min);
+void *mt_next(struct maple_tree *mt, unsigned long index, unsigned long max);
+
+/**
+ * mt_for_each - Iterate over each entry starting at index until max.
+ * @__tree: The Maple Tree
+ * @__entry: The current entry
+ * @__index: The index to start the search from. Subsequently used as iterator.
+ * @__max: The maximum limit for @index
+ *
+ * This iterator skips all entries, which resolve to a NULL pointer,
+ * e.g. entries which has been reserved with XA_ZERO_ENTRY.
+ */
+#define mt_for_each(__tree, __entry, __index, __max) \
+ for (__entry = mt_find(__tree, &(__index), __max); \
+ __entry; __entry = mt_find_after(__tree, &(__index), __max))
+
+#endif /*_LINUX_MAPLE_TREE_H */
diff --git a/include/linux/marvell_phy.h b/include/linux/marvell_phy.h
index 4055cf8cc978..b1fbe4118414 100644
--- a/include/linux/marvell_phy.h
+++ b/include/linux/marvell_phy.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _MARVELL_PHY_H
#define _MARVELL_PHY_H
@@ -6,6 +7,7 @@
/* Known PHY IDs */
#define MARVELL_PHY_ID_88E1101 0x01410c60
+#define MARVELL_PHY_ID_88E3082 0x01410c80
#define MARVELL_PHY_ID_88E1112 0x01410c90
#define MARVELL_PHY_ID_88E1111 0x01410cc0
#define MARVELL_PHY_ID_88E1118 0x01410e10
@@ -14,20 +16,37 @@
#define MARVELL_PHY_ID_88E1149R 0x01410e50
#define MARVELL_PHY_ID_88E1240 0x01410e30
#define MARVELL_PHY_ID_88E1318S 0x01410e90
+#define MARVELL_PHY_ID_88E1340S 0x01410dc0
#define MARVELL_PHY_ID_88E1116R 0x01410e40
#define MARVELL_PHY_ID_88E1510 0x01410dd0
#define MARVELL_PHY_ID_88E1540 0x01410eb0
#define MARVELL_PHY_ID_88E1545 0x01410ea0
+#define MARVELL_PHY_ID_88E1548P 0x01410ec0
#define MARVELL_PHY_ID_88E3016 0x01410e60
+#define MARVELL_PHY_ID_88X3310 0x002b09a0
+#define MARVELL_PHY_ID_88E2110 0x002b09b0
+#define MARVELL_PHY_ID_88X2222 0x01410f10
+#define MARVELL_PHY_ID_88Q2110 0x002b0980
+#define MARVELL_PHY_ID_88Q2220 0x002b0b20
-/* The MV88e6390 Ethernet switch contains embedded PHYs. These PHYs do
+/* Marvel 88E1111 in Finisar SFP module with modified PHY ID */
+#define MARVELL_PHY_ID_88E1111_FINISAR 0x01ff0cc0
+
+/* ID from 88E6020, assumed to be the same for the whole 6250 family */
+#define MARVELL_PHY_ID_88E6250_FAMILY 0x01410db0
+/* These Ethernet switch families contain embedded PHYs, but they do
* not have a model ID. So the switch driver traps reads to the ID2
* register and returns the switch family ID
*/
-#define MARVELL_PHY_ID_88E6390 0x01410f90
+#define MARVELL_PHY_ID_88E6341_FAMILY 0x01410f41
+#define MARVELL_PHY_ID_88E6390_FAMILY 0x01410f90
+#define MARVELL_PHY_ID_88E6393_FAMILY 0x002b0b9b
+
+#define MARVELL_PHY_FAMILY_ID(id) ((id) >> 4)
/* struct phy_device dev_flags definitions */
#define MARVELL_PHY_M1145_FLAGS_RESISTANCE 0x00000001
#define MARVELL_PHY_M1118_DNS323_LEDS 0x00000002
+#define MARVELL_PHY_LED0_LINK_LED1_ACTIVE 0x00000004
#endif /* _MARVELL_PHY_H */
diff --git a/include/linux/math.h b/include/linux/math.h
new file mode 100644
index 000000000000..6dc1d1d32fbc
--- /dev/null
+++ b/include/linux/math.h
@@ -0,0 +1,227 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_MATH_H
+#define _LINUX_MATH_H
+
+#include <linux/types.h>
+#include <asm/div64.h>
+#include <uapi/linux/kernel.h>
+
+/*
+ * This looks more complex than it should be. But we need to
+ * get the type for the ~ right in round_down (it needs to be
+ * as wide as the result!), and we want to evaluate the macro
+ * arguments just once each.
+ */
+#define __round_mask(x, y) ((__typeof__(x))((y)-1))
+
+/**
+ * round_up - round up to next specified power of 2
+ * @x: the value to round
+ * @y: multiple to round up to (must be a power of 2)
+ *
+ * Rounds @x up to next multiple of @y (which must be a power of 2).
+ * To perform arbitrary rounding up, use roundup() below.
+ */
+#define round_up(x, y) ((((x)-1) | __round_mask(x, y))+1)
+
+/**
+ * round_down - round down to next specified power of 2
+ * @x: the value to round
+ * @y: multiple to round down to (must be a power of 2)
+ *
+ * Rounds @x down to next multiple of @y (which must be a power of 2).
+ * To perform arbitrary rounding down, use rounddown() below.
+ */
+#define round_down(x, y) ((x) & ~__round_mask(x, y))
+
+/**
+ * DIV_ROUND_UP_POW2 - divide and round up
+ * @n: numerator
+ * @d: denominator (must be a power of 2)
+ *
+ * Divides @n by @d and rounds up to next multiple of @d (which must be a power
+ * of 2). Avoids integer overflows that may occur with __KERNEL_DIV_ROUND_UP().
+ * Performance is roughly equivalent to __KERNEL_DIV_ROUND_UP().
+ */
+#define DIV_ROUND_UP_POW2(n, d) \
+ ((n) / (d) + !!((n) & ((d) - 1)))
+
+#define DIV_ROUND_UP __KERNEL_DIV_ROUND_UP
+
+#define DIV_ROUND_DOWN_ULL(ll, d) \
+ ({ unsigned long long _tmp = (ll); do_div(_tmp, d); _tmp; })
+
+#define DIV_ROUND_UP_ULL(ll, d) \
+ DIV_ROUND_DOWN_ULL((unsigned long long)(ll) + (d) - 1, (d))
+
+#if BITS_PER_LONG == 32
+# define DIV_ROUND_UP_SECTOR_T(ll,d) DIV_ROUND_UP_ULL(ll, d)
+#else
+# define DIV_ROUND_UP_SECTOR_T(ll,d) DIV_ROUND_UP(ll,d)
+#endif
+
+/**
+ * roundup - round up to the next specified multiple
+ * @x: the value to up
+ * @y: multiple to round up to
+ *
+ * Rounds @x up to next multiple of @y. If @y will always be a power
+ * of 2, consider using the faster round_up().
+ */
+#define roundup(x, y) ( \
+{ \
+ typeof(y) __y = y; \
+ (((x) + (__y - 1)) / __y) * __y; \
+} \
+)
+/**
+ * rounddown - round down to next specified multiple
+ * @x: the value to round
+ * @y: multiple to round down to
+ *
+ * Rounds @x down to next multiple of @y. If @y will always be a power
+ * of 2, consider using the faster round_down().
+ */
+#define rounddown(x, y) ( \
+{ \
+ typeof(x) __x = (x); \
+ __x - (__x % (y)); \
+} \
+)
+
+/*
+ * Divide positive or negative dividend by positive or negative divisor
+ * and round to closest integer. Result is undefined for negative
+ * divisors if the dividend variable type is unsigned and for negative
+ * dividends if the divisor variable type is unsigned.
+ */
+#define DIV_ROUND_CLOSEST(x, divisor)( \
+{ \
+ typeof(x) __x = x; \
+ typeof(divisor) __d = divisor; \
+ (((typeof(x))-1) > 0 || \
+ ((typeof(divisor))-1) > 0 || \
+ (((__x) > 0) == ((__d) > 0))) ? \
+ (((__x) + ((__d) / 2)) / (__d)) : \
+ (((__x) - ((__d) / 2)) / (__d)); \
+} \
+)
+/*
+ * Same as above but for u64 dividends. divisor must be a 32-bit
+ * number.
+ */
+#define DIV_ROUND_CLOSEST_ULL(x, divisor)( \
+{ \
+ typeof(divisor) __d = divisor; \
+ unsigned long long _tmp = (x) + (__d) / 2; \
+ do_div(_tmp, __d); \
+ _tmp; \
+} \
+)
+
+#define __STRUCT_FRACT(type) \
+struct type##_fract { \
+ __##type numerator; \
+ __##type denominator; \
+};
+__STRUCT_FRACT(s8)
+__STRUCT_FRACT(u8)
+__STRUCT_FRACT(s16)
+__STRUCT_FRACT(u16)
+__STRUCT_FRACT(s32)
+__STRUCT_FRACT(u32)
+#undef __STRUCT_FRACT
+
+/* Calculate "x * n / d" without unnecessary overflow or loss of precision. */
+#define mult_frac(x, n, d) \
+({ \
+ typeof(x) x_ = (x); \
+ typeof(n) n_ = (n); \
+ typeof(d) d_ = (d); \
+ \
+ typeof(x_) q = x_ / d_; \
+ typeof(x_) r = x_ % d_; \
+ q * n_ + r * n_ / d_; \
+})
+
+#define sector_div(a, b) do_div(a, b)
+
+/**
+ * abs - return absolute value of an argument
+ * @x: the value.
+ *
+ * If it is unsigned type, @x is converted to signed type first.
+ * char is treated as if it was signed (regardless of whether it really is)
+ * but the macro's return type is preserved as char.
+ *
+ * NOTE, for signed type if @x is the minimum, the returned result is undefined
+ * as there is not enough bits to represent it as a positive number.
+ *
+ * Return: an absolute value of @x.
+ */
+#define abs(x) __abs_choose_expr(x, long long, \
+ __abs_choose_expr(x, long, \
+ __abs_choose_expr(x, int, \
+ __abs_choose_expr(x, short, \
+ __abs_choose_expr(x, char, \
+ __builtin_choose_expr( \
+ __builtin_types_compatible_p(typeof(x), char), \
+ (char)({ signed char __x = (x); __x<0?-__x:__x; }), \
+ ((void)0)))))))
+
+#define __abs_choose_expr(x, type, other) __builtin_choose_expr( \
+ __builtin_types_compatible_p(typeof(x), signed type) || \
+ __builtin_types_compatible_p(typeof(x), unsigned type), \
+ ({ signed type __x = (x); __x < 0 ? -__x : __x; }), other)
+
+/**
+ * abs_diff - return absolute value of the difference between the arguments
+ * @a: the first argument
+ * @b: the second argument
+ *
+ * @a and @b have to be of the same type. With this restriction we compare
+ * signed to signed and unsigned to unsigned. The result is the subtraction
+ * the smaller of the two from the bigger, hence result is always a positive
+ * value.
+ *
+ * Return: an absolute value of the difference between the @a and @b.
+ */
+#define abs_diff(a, b) ({ \
+ typeof(a) __a = (a); \
+ typeof(b) __b = (b); \
+ (void)(&__a == &__b); \
+ __a > __b ? (__a - __b) : (__b - __a); \
+})
+
+/**
+ * reciprocal_scale - "scale" a value into range [0, ep_ro)
+ * @val: value
+ * @ep_ro: right open interval endpoint
+ *
+ * Perform a "reciprocal multiplication" in order to "scale" a value into
+ * range [0, @ep_ro), where the upper interval endpoint is right-open.
+ * This is useful, e.g. for accessing a index of an array containing
+ * @ep_ro elements, for example. Think of it as sort of modulus, only that
+ * the result isn't that of modulo. ;) Note that if initial input is a
+ * small value, then result will return 0.
+ *
+ * Return: a result based on @val in interval [0, @ep_ro).
+ */
+static inline u32 reciprocal_scale(u32 val, u32 ep_ro)
+{
+ return (u32)(((u64) val * ep_ro) >> 32);
+}
+
+u64 int_pow(u64 base, unsigned int exp);
+unsigned long int_sqrt(unsigned long);
+
+#if BITS_PER_LONG < 64
+u32 int_sqrt64(u64 x);
+#else
+static inline u32 int_sqrt64(u64 x)
+{
+ return (u32)int_sqrt(x);
+}
+#endif
+
+#endif /* _LINUX_MATH_H */
diff --git a/include/linux/math64.h b/include/linux/math64.h
index 80690c96c734..cc305206d89f 100644
--- a/include/linux/math64.h
+++ b/include/linux/math64.h
@@ -1,8 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_MATH64_H
#define _LINUX_MATH64_H
#include <linux/types.h>
+#include <linux/math.h>
#include <asm/div64.h>
+#include <vdso/math64.h>
#if BITS_PER_LONG == 64
@@ -11,6 +14,11 @@
/**
* div_u64_rem - unsigned 64bit divide with 32bit divisor with remainder
+ * @dividend: unsigned 64bit dividend
+ * @divisor: unsigned 32bit divisor
+ * @remainder: pointer to unsigned 32bit remainder
+ *
+ * Return: sets ``*remainder``, then returns dividend / divisor
*
* This is commonly provided by 32bit archs to provide an optimized 64bit
* divide.
@@ -23,6 +31,11 @@ static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
/**
* div_s64_rem - signed 64bit divide with 32bit divisor with remainder
+ * @dividend: signed 64bit dividend
+ * @divisor: signed 32bit divisor
+ * @remainder: pointer to signed 32bit remainder
+ *
+ * Return: sets ``*remainder``, then returns dividend / divisor
*/
static inline s64 div_s64_rem(s64 dividend, s32 divisor, s32 *remainder)
{
@@ -32,6 +45,11 @@ static inline s64 div_s64_rem(s64 dividend, s32 divisor, s32 *remainder)
/**
* div64_u64_rem - unsigned 64bit divide with 64bit divisor and remainder
+ * @dividend: unsigned 64bit dividend
+ * @divisor: unsigned 64bit divisor
+ * @remainder: pointer to unsigned 64bit remainder
+ *
+ * Return: sets ``*remainder``, then returns dividend / divisor
*/
static inline u64 div64_u64_rem(u64 dividend, u64 divisor, u64 *remainder)
{
@@ -41,6 +59,10 @@ static inline u64 div64_u64_rem(u64 dividend, u64 divisor, u64 *remainder)
/**
* div64_u64 - unsigned 64bit divide with 64bit divisor
+ * @dividend: unsigned 64bit dividend
+ * @divisor: unsigned 64bit divisor
+ *
+ * Return: dividend / divisor
*/
static inline u64 div64_u64(u64 dividend, u64 divisor)
{
@@ -49,6 +71,10 @@ static inline u64 div64_u64(u64 dividend, u64 divisor)
/**
* div64_s64 - signed 64bit divide with 64bit divisor
+ * @dividend: signed 64bit dividend
+ * @divisor: signed 64bit divisor
+ *
+ * Return: dividend / divisor
*/
static inline s64 div64_s64(s64 dividend, s64 divisor)
{
@@ -88,10 +114,14 @@ extern s64 div64_s64(s64 dividend, s64 divisor);
/**
* div_u64 - unsigned 64bit divide with 32bit divisor
+ * @dividend: unsigned 64bit dividend
+ * @divisor: unsigned 32bit divisor
*
* This is the most common 64bit divide and should be used if possible,
* as many 32bit archs can optimize this variant better than a full 64bit
* divide.
+ *
+ * Return: dividend / divisor
*/
#ifndef div_u64
static inline u64 div_u64(u64 dividend, u32 divisor)
@@ -103,6 +133,10 @@ static inline u64 div_u64(u64 dividend, u32 divisor)
/**
* div_s64 - signed 64bit divide with 32bit divisor
+ * @dividend: signed 64bit dividend
+ * @divisor: signed 32bit divisor
+ *
+ * Return: dividend / divisor
*/
#ifndef div_s64
static inline s64 div_s64(s64 dividend, s32 divisor)
@@ -114,25 +148,6 @@ static inline s64 div_s64(s64 dividend, s32 divisor)
u32 iter_div_u64_rem(u64 dividend, u32 divisor, u64 *remainder);
-static __always_inline u32
-__iter_div_u64_rem(u64 dividend, u32 divisor, u64 *remainder)
-{
- u32 ret = 0;
-
- while (dividend >= divisor) {
- /* The following asm() prevents the compiler from
- optimising this loop into a modulo operation. */
- asm("" : "+rm"(dividend));
-
- dividend -= divisor;
- ret++;
- }
-
- *remainder = dividend;
-
- return ret;
-}
-
#ifndef mul_u32_u32
/*
* Many a GCC version messes this up and generates a 64x64 mult :-(
@@ -143,17 +158,28 @@ static inline u64 mul_u32_u32(u32 a, u32 b)
}
#endif
+#ifndef add_u64_u32
+/*
+ * Many a GCC version also messes this up.
+ * Zero extending b and then spilling everything to stack.
+ */
+static inline u64 add_u64_u32(u64 a, u32 b)
+{
+ return a + b;
+}
+#endif
+
#if defined(CONFIG_ARCH_SUPPORTS_INT128) && defined(__SIZEOF_INT128__)
#ifndef mul_u64_u32_shr
-static inline u64 mul_u64_u32_shr(u64 a, u32 mul, unsigned int shift)
+static __always_inline u64 mul_u64_u32_shr(u64 a, u32 mul, unsigned int shift)
{
return (u64)(((unsigned __int128)a * mul) >> shift);
}
#endif /* mul_u64_u32_shr */
#ifndef mul_u64_u64_shr
-static inline u64 mul_u64_u64_shr(u64 a, u64 mul, unsigned int shift)
+static __always_inline u64 mul_u64_u64_shr(u64 a, u64 mul, unsigned int shift)
{
return (u64)(((unsigned __int128)a * mul) >> shift);
}
@@ -162,18 +188,14 @@ static inline u64 mul_u64_u64_shr(u64 a, u64 mul, unsigned int shift)
#else
#ifndef mul_u64_u32_shr
-static inline u64 mul_u64_u32_shr(u64 a, u32 mul, unsigned int shift)
+static __always_inline u64 mul_u64_u32_shr(u64 a, u32 mul, unsigned int shift)
{
- u32 ah, al;
+ u32 ah = a >> 32, al = a;
u64 ret;
- al = a;
- ah = a >> 32;
-
ret = mul_u32_u32(al, mul) >> shift;
if (ah)
ret += mul_u32_u32(ah, mul) << (32 - shift);
-
return ret;
}
#endif /* mul_u64_u32_shr */
@@ -224,6 +246,24 @@ static inline u64 mul_u64_u64_shr(u64 a, u64 b, unsigned int shift)
#endif
+#ifndef mul_s64_u64_shr
+static inline u64 mul_s64_u64_shr(s64 a, u64 b, unsigned int shift)
+{
+ u64 ret;
+
+ /*
+ * Extract the sign before the multiplication and put it back
+ * afterwards if needed.
+ */
+ ret = mul_u64_u64_shr(abs(a), b, shift);
+
+ if (a < 0)
+ ret = -((s64) ret);
+
+ return ret;
+}
+#endif /* mul_s64_u64_shr */
+
#ifndef mul_u64_u32_div
static inline u64 mul_u64_u32_div(u64 a, u32 mul, u32 divisor)
{
@@ -253,4 +293,138 @@ static inline u64 mul_u64_u32_div(u64 a, u32 mul, u32 divisor)
}
#endif /* mul_u64_u32_div */
+/**
+ * mul_u64_add_u64_div_u64 - unsigned 64bit multiply, add, and divide
+ * @a: first unsigned 64bit multiplicand
+ * @b: second unsigned 64bit multiplicand
+ * @c: unsigned 64bit addend
+ * @d: unsigned 64bit divisor
+ *
+ * Multiply two 64bit values together to generate a 128bit product
+ * add a third value and then divide by a fourth.
+ * The Generic code divides by 0 if @d is zero and returns ~0 on overflow.
+ * Architecture specific code may trap on zero or overflow.
+ *
+ * Return: (@a * @b + @c) / @d
+ */
+u64 mul_u64_add_u64_div_u64(u64 a, u64 b, u64 c, u64 d);
+
+/**
+ * mul_u64_u64_div_u64 - unsigned 64bit multiply and divide
+ * @a: first unsigned 64bit multiplicand
+ * @b: second unsigned 64bit multiplicand
+ * @d: unsigned 64bit divisor
+ *
+ * Multiply two 64bit values together to generate a 128bit product
+ * and then divide by a third value.
+ * The Generic code divides by 0 if @d is zero and returns ~0 on overflow.
+ * Architecture specific code may trap on zero or overflow.
+ *
+ * Return: @a * @b / @d
+ */
+#define mul_u64_u64_div_u64(a, b, d) mul_u64_add_u64_div_u64(a, b, 0, d)
+
+/**
+ * mul_u64_u64_div_u64_roundup - unsigned 64bit multiply and divide rounded up
+ * @a: first unsigned 64bit multiplicand
+ * @b: second unsigned 64bit multiplicand
+ * @d: unsigned 64bit divisor
+ *
+ * Multiply two 64bit values together to generate a 128bit product
+ * and then divide and round up.
+ * The Generic code divides by 0 if @d is zero and returns ~0 on overflow.
+ * Architecture specific code may trap on zero or overflow.
+ *
+ * Return: (@a * @b + @d - 1) / @d
+ */
+#define mul_u64_u64_div_u64_roundup(a, b, d) \
+ ({ u64 _tmp = (d); mul_u64_add_u64_div_u64(a, b, _tmp - 1, _tmp); })
+
+
+/**
+ * DIV64_U64_ROUND_UP - unsigned 64bit divide with 64bit divisor rounded up
+ * @ll: unsigned 64bit dividend
+ * @d: unsigned 64bit divisor
+ *
+ * Divide unsigned 64bit dividend by unsigned 64bit divisor
+ * and round up.
+ *
+ * Return: dividend / divisor rounded up
+ */
+#define DIV64_U64_ROUND_UP(ll, d) \
+ ({ u64 _tmp = (d); div64_u64((ll) + _tmp - 1, _tmp); })
+
+/**
+ * DIV_U64_ROUND_UP - unsigned 64bit divide with 32bit divisor rounded up
+ * @ll: unsigned 64bit dividend
+ * @d: unsigned 32bit divisor
+ *
+ * Divide unsigned 64bit dividend by unsigned 32bit divisor
+ * and round up.
+ *
+ * Return: dividend / divisor rounded up
+ */
+#define DIV_U64_ROUND_UP(ll, d) \
+ ({ u32 _tmp = (d); div_u64((ll) + _tmp - 1, _tmp); })
+
+/**
+ * DIV64_U64_ROUND_CLOSEST - unsigned 64bit divide with 64bit divisor rounded to nearest integer
+ * @dividend: unsigned 64bit dividend
+ * @divisor: unsigned 64bit divisor
+ *
+ * Divide unsigned 64bit dividend by unsigned 64bit divisor
+ * and round to closest integer.
+ *
+ * Return: dividend / divisor rounded to nearest integer
+ */
+#define DIV64_U64_ROUND_CLOSEST(dividend, divisor) \
+ ({ u64 _tmp = (divisor); div64_u64((dividend) + _tmp / 2, _tmp); })
+
+/**
+ * DIV_U64_ROUND_CLOSEST - unsigned 64bit divide with 32bit divisor rounded to nearest integer
+ * @dividend: unsigned 64bit dividend
+ * @divisor: unsigned 32bit divisor
+ *
+ * Divide unsigned 64bit dividend by unsigned 32bit divisor
+ * and round to closest integer.
+ *
+ * Return: dividend / divisor rounded to nearest integer
+ */
+#define DIV_U64_ROUND_CLOSEST(dividend, divisor) \
+ ({ u32 _tmp = (divisor); div_u64((u64)(dividend) + _tmp / 2, _tmp); })
+
+/**
+ * DIV_S64_ROUND_CLOSEST - signed 64bit divide with 32bit divisor rounded to nearest integer
+ * @dividend: signed 64bit dividend
+ * @divisor: signed 32bit divisor
+ *
+ * Divide signed 64bit dividend by signed 32bit divisor
+ * and round to closest integer.
+ *
+ * Return: dividend / divisor rounded to nearest integer
+ */
+#define DIV_S64_ROUND_CLOSEST(dividend, divisor)( \
+{ \
+ s64 __x = (dividend); \
+ s32 __d = (divisor); \
+ ((__x > 0) == (__d > 0)) ? \
+ div_s64((__x + (__d / 2)), __d) : \
+ div_s64((__x - (__d / 2)), __d); \
+} \
+)
+
+/**
+ * roundup_u64 - Round up a 64bit value to the next specified 32bit multiple
+ * @x: the value to up
+ * @y: 32bit multiple to round up to
+ *
+ * Rounds @x to the next multiple of @y. For 32bit @x values, see roundup and
+ * the faster round_up() for powers of 2.
+ *
+ * Return: rounded up value.
+ */
+static inline u64 roundup_u64(u64 x, u32 y)
+{
+ return DIV_U64_ROUND_UP(x, y) * y;
+}
#endif /* _LINUX_MATH64_H */
diff --git a/include/linux/max17040_battery.h b/include/linux/max17040_battery.h
deleted file mode 100644
index ad97b06cf930..000000000000
--- a/include/linux/max17040_battery.h
+++ /dev/null
@@ -1,19 +0,0 @@
-/*
- * Copyright (C) 2009 Samsung Electronics
- * Minkyu Kang <mk7.kang@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#ifndef __MAX17040_BATTERY_H_
-#define __MAX17040_BATTERY_H_
-
-struct max17040_platform_data {
- int (*battery_online)(void);
- int (*charger_online)(void);
- int (*charger_enable)(void);
-};
-
-#endif
diff --git a/include/linux/mbcache.h b/include/linux/mbcache.h
index e1bc73414983..97e64184767d 100644
--- a/include/linux/mbcache.h
+++ b/include/linux/mbcache.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_MBCACHE_H
#define _LINUX_MBCACHE_H
@@ -9,16 +10,29 @@
struct mb_cache;
+/* Cache entry flags */
+enum {
+ MBE_REFERENCED_B = 0,
+ MBE_REUSABLE_B
+};
+
struct mb_cache_entry {
/* List of entries in cache - protected by cache->c_list_lock */
struct list_head e_list;
- /* Hash table list - protected by hash chain bitlock */
+ /*
+ * Hash table list - protected by hash chain bitlock. The entry is
+ * guaranteed to be hashed while e_refcnt > 0.
+ */
struct hlist_bl_node e_hash_list;
+ /*
+ * Entry refcount. Once it reaches zero, entry is unhashed and freed.
+ * While refcount > 0, the entry is guaranteed to stay in the hash and
+ * e.g. mb_cache_entry_try_delete() will fail.
+ */
atomic_t e_refcnt;
/* Key in hash - stable during lifetime of the entry */
u32 e_key;
- u32 e_referenced:1;
- u32 e_reusable:1;
+ unsigned long e_flags;
/* User provided value - stable during lifetime of the entry */
u64 e_value;
};
@@ -28,17 +42,24 @@ void mb_cache_destroy(struct mb_cache *cache);
int mb_cache_entry_create(struct mb_cache *cache, gfp_t mask, u32 key,
u64 value, bool reusable);
-void __mb_cache_entry_free(struct mb_cache_entry *entry);
-static inline int mb_cache_entry_put(struct mb_cache *cache,
- struct mb_cache_entry *entry)
+void __mb_cache_entry_free(struct mb_cache *cache,
+ struct mb_cache_entry *entry);
+void mb_cache_entry_wait_unused(struct mb_cache_entry *entry);
+static inline void mb_cache_entry_put(struct mb_cache *cache,
+ struct mb_cache_entry *entry)
{
- if (!atomic_dec_and_test(&entry->e_refcnt))
- return 0;
- __mb_cache_entry_free(entry);
- return 1;
+ unsigned int cnt = atomic_dec_return(&entry->e_refcnt);
+
+ if (cnt > 0) {
+ if (cnt <= 2)
+ wake_up_var(&entry->e_refcnt);
+ return;
+ }
+ __mb_cache_entry_free(cache, entry);
}
-void mb_cache_entry_delete(struct mb_cache *cache, u32 key, u64 value);
+struct mb_cache_entry *mb_cache_entry_delete_or_get(struct mb_cache *cache,
+ u32 key, u64 value);
struct mb_cache_entry *mb_cache_entry_get(struct mb_cache *cache, u32 key,
u64 value);
struct mb_cache_entry *mb_cache_entry_find_first(struct mb_cache *cache,
diff --git a/include/linux/mbus.h b/include/linux/mbus.h
index 0d3f14fd2621..4773145246ed 100644
--- a/include/linux/mbus.h
+++ b/include/linux/mbus.h
@@ -31,8 +31,8 @@ struct mbus_dram_target_info
struct mbus_dram_window {
u8 cs_index;
u8 mbus_attr;
- u32 base;
- u32 size;
+ u64 base;
+ u64 size;
} cs[4];
};
diff --git a/include/linux/mc146818rtc.h b/include/linux/mc146818rtc.h
index 0661af17a758..34dfcc77f505 100644
--- a/include/linux/mc146818rtc.h
+++ b/include/linux/mc146818rtc.h
@@ -86,6 +86,8 @@ struct cmos_rtc_board_info {
/* 2 values for divider stage reset, others for "testing purposes only" */
# define RTC_DIV_RESET1 0x60
# define RTC_DIV_RESET2 0x70
+ /* In AMD BKDG bit 5 and 6 are reserved, bit 4 is for select dv0 bank */
+# define RTC_AMD_BANK_SELECT 0x10
/* Periodic intr. / Square wave rate select. 0=none, 1=32.8kHz,... 15=2Hz */
# define RTC_RATE_SELECT 0x0F
@@ -123,7 +125,12 @@ struct cmos_rtc_board_info {
#define RTC_IO_EXTENT_USED RTC_IO_EXTENT
#endif /* ARCH_RTC_LOCATION */
-unsigned int mc146818_get_time(struct rtc_time *time);
+bool mc146818_does_rtc_work(void);
+int mc146818_get_time(struct rtc_time *time, int timeout);
int mc146818_set_time(struct rtc_time *time);
+bool mc146818_avoid_UIP(void (*callback)(unsigned char seconds, void *param),
+ int timeout,
+ void *param);
+
#endif /* _MC146818RTC_H */
diff --git a/include/linux/mc33xs2410.h b/include/linux/mc33xs2410.h
new file mode 100644
index 000000000000..31c0edf10dd7
--- /dev/null
+++ b/include/linux/mc33xs2410.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2024 Liebherr-Electronics and Drives GmbH
+ */
+#ifndef _MC33XS2410_H
+#define _MC33XS2410_H
+
+#include <linux/spi/spi.h>
+
+MODULE_IMPORT_NS("PWM_MC33XS2410");
+
+int mc33xs2410_read_reg_ctrl(struct spi_device *spi, u8 reg, u16 *val);
+int mc33xs2410_read_reg_diag(struct spi_device *spi, u8 reg, u16 *val);
+int mc33xs2410_modify_reg(struct spi_device *spi, u8 reg, u8 mask, u8 val);
+
+#endif /* _MC33XS2410_H */
diff --git a/include/linux/mc6821.h b/include/linux/mc6821.h
index 28e301e295da..8dffab19b4ac 100644
--- a/include/linux/mc6821.h
+++ b/include/linux/mc6821.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _MC6821_H_
#define _MC6821_H_
diff --git a/include/linux/mcb.h b/include/linux/mcb.h
index b1a0ad9d23b3..4ab2691f51a6 100644
--- a/include/linux/mcb.h
+++ b/include/linux/mcb.h
@@ -1,12 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* MEN Chameleon Bus.
*
* Copyright (C) 2014 MEN Mikroelektronik GmbH (www.men.de)
* Author: Johannes Thumshirn <johannes.thumshirn@men.de>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the Free
- * Software Foundation; version 2 of the License.
*/
#ifndef _LINUX_MCB_H
#define _LINUX_MCB_H
@@ -66,7 +63,6 @@ static inline struct mcb_bus *to_mcb_bus(struct device *dev)
struct mcb_device {
struct device dev;
struct mcb_bus *bus;
- bool is_added;
struct mcb_driver *driver;
u16 id;
int inst;
@@ -79,10 +75,7 @@ struct mcb_device {
struct device *dma_dev;
};
-static inline struct mcb_device *to_mcb_device(struct device *dev)
-{
- return container_of(dev, struct mcb_device, dev);
-}
+#define to_mcb_device(__dev) container_of_const(__dev, struct mcb_device, dev)
/**
* struct mcb_driver - MEN Chameleon Bus device driver
@@ -101,10 +94,7 @@ struct mcb_driver {
void (*shutdown)(struct mcb_device *mdev);
};
-static inline struct mcb_driver *to_mcb_driver(struct device_driver *drv)
-{
- return container_of(drv, struct mcb_driver, driver);
-}
+#define to_mcb_driver(__drv) container_of_const(__drv, struct mcb_driver, driver)
static inline void *mcb_get_drvdata(struct mcb_device *dev)
{
@@ -123,7 +113,7 @@ extern int __must_check __mcb_register_driver(struct mcb_driver *drv,
__mcb_register_driver(driver, THIS_MODULE, KBUILD_MODNAME)
extern void mcb_unregister_driver(struct mcb_driver *driver);
#define module_mcb_driver(__mcb_driver) \
- module_driver(__mcb_driver, mcb_register_driver, mcb_unregister_driver);
+ module_driver(__mcb_driver, mcb_register_driver, mcb_unregister_driver)
extern void mcb_bus_add_devices(const struct mcb_bus *bus);
extern int mcb_device_register(struct mcb_bus *bus, struct mcb_device *dev);
extern struct mcb_bus *mcb_alloc_bus(struct device *carrier);
diff --git a/include/linux/mdev.h b/include/linux/mdev.h
index b6e048e1045f..139d05b26f82 100644
--- a/include/linux/mdev.h
+++ b/include/linux/mdev.h
@@ -1,138 +1,89 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Mediated device definition
*
* Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
* Author: Neo Jia <cjia@nvidia.com>
* Kirti Wankhede <kwankhede@nvidia.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef MDEV_H
#define MDEV_H
-struct mdev_device;
+#include <linux/device.h>
+#include <linux/uuid.h>
-/**
- * struct mdev_parent_ops - Structure to be registered for each parent device to
- * register the device to mdev module.
- *
- * @owner: The module owner.
- * @dev_attr_groups: Attributes of the parent device.
- * @mdev_attr_groups: Attributes of the mediated device.
- * @supported_type_groups: Attributes to define supported types. It is mandatory
- * to provide supported types.
- * @create: Called to allocate basic resources in parent device's
- * driver for a particular mediated device. It is
- * mandatory to provide create ops.
- * @kobj: kobject of type for which 'create' is called.
- * @mdev: mdev_device structure on of mediated device
- * that is being created
- * Returns integer: success (0) or error (< 0)
- * @remove: Called to free resources in parent device's driver for a
- * a mediated device. It is mandatory to provide 'remove'
- * ops.
- * @mdev: mdev_device device structure which is being
- * destroyed
- * Returns integer: success (0) or error (< 0)
- * @open: Open mediated device.
- * @mdev: mediated device.
- * Returns integer: success (0) or error (< 0)
- * @release: release mediated device
- * @mdev: mediated device.
- * @read: Read emulation callback
- * @mdev: mediated device structure
- * @buf: read buffer
- * @count: number of bytes to read
- * @ppos: address.
- * Retuns number on bytes read on success or error.
- * @write: Write emulation callback
- * @mdev: mediated device structure
- * @buf: write buffer
- * @count: number of bytes to be written
- * @ppos: address.
- * Retuns number on bytes written on success or error.
- * @ioctl: IOCTL callback
- * @mdev: mediated device structure
- * @cmd: ioctl command
- * @arg: arguments to ioctl
- * @mmap: mmap callback
- * @mdev: mediated device structure
- * @vma: vma structure
- * Parent device that support mediated device should be registered with mdev
- * module with mdev_parent_ops structure.
- **/
-struct mdev_parent_ops {
- struct module *owner;
- const struct attribute_group **dev_attr_groups;
- const struct attribute_group **mdev_attr_groups;
- struct attribute_group **supported_type_groups;
+struct mdev_type;
+
+struct mdev_device {
+ struct device dev;
+ guid_t uuid;
+ struct list_head next;
+ struct mdev_type *type;
+ bool active;
+};
- int (*create)(struct kobject *kobj, struct mdev_device *mdev);
- int (*remove)(struct mdev_device *mdev);
- int (*open)(struct mdev_device *mdev);
- void (*release)(struct mdev_device *mdev);
- ssize_t (*read)(struct mdev_device *mdev, char __user *buf,
- size_t count, loff_t *ppos);
- ssize_t (*write)(struct mdev_device *mdev, const char __user *buf,
- size_t count, loff_t *ppos);
- long (*ioctl)(struct mdev_device *mdev, unsigned int cmd,
- unsigned long arg);
- int (*mmap)(struct mdev_device *mdev, struct vm_area_struct *vma);
+struct mdev_type {
+ /* set by the driver before calling mdev_register parent: */
+ const char *sysfs_name;
+ const char *pretty_name;
+
+ /* set by the core, can be used drivers */
+ struct mdev_parent *parent;
+
+ /* internal only */
+ struct kobject kobj;
+ struct kobject *devices_kobj;
};
-/* interface for exporting mdev supported type attributes */
-struct mdev_type_attribute {
- struct attribute attr;
- ssize_t (*show)(struct kobject *kobj, struct device *dev, char *buf);
- ssize_t (*store)(struct kobject *kobj, struct device *dev,
- const char *buf, size_t count);
+/* embedded into the struct device that the mdev devices hang off */
+struct mdev_parent {
+ struct device *dev;
+ struct mdev_driver *mdev_driver;
+ struct kset *mdev_types_kset;
+ /* Synchronize device creation/removal with parent unregistration */
+ struct rw_semaphore unreg_sem;
+ struct mdev_type **types;
+ unsigned int nr_types;
+ atomic_t available_instances;
};
-#define MDEV_TYPE_ATTR(_name, _mode, _show, _store) \
-struct mdev_type_attribute mdev_type_attr_##_name = \
- __ATTR(_name, _mode, _show, _store)
-#define MDEV_TYPE_ATTR_RW(_name) \
- struct mdev_type_attribute mdev_type_attr_##_name = __ATTR_RW(_name)
-#define MDEV_TYPE_ATTR_RO(_name) \
- struct mdev_type_attribute mdev_type_attr_##_name = __ATTR_RO(_name)
-#define MDEV_TYPE_ATTR_WO(_name) \
- struct mdev_type_attribute mdev_type_attr_##_name = __ATTR_WO(_name)
+static inline struct mdev_device *to_mdev_device(struct device *dev)
+{
+ return container_of(dev, struct mdev_device, dev);
+}
/**
* struct mdev_driver - Mediated device driver
- * @name: driver name
+ * @device_api: string to return for the device_api sysfs
+ * @max_instances: maximum number of instances supported (optional)
* @probe: called when new device created
* @remove: called when device removed
+ * @get_available: Return the max number of instances that can be created
+ * @show_description: Print a description of the mtype
* @driver: device driver structure
- *
**/
struct mdev_driver {
- const char *name;
- int (*probe)(struct device *dev);
- void (*remove)(struct device *dev);
+ const char *device_api;
+ unsigned int max_instances;
+ int (*probe)(struct mdev_device *dev);
+ void (*remove)(struct mdev_device *dev);
+ unsigned int (*get_available)(struct mdev_type *mtype);
+ ssize_t (*show_description)(struct mdev_type *mtype, char *buf);
struct device_driver driver;
};
-#define to_mdev_driver(drv) container_of(drv, struct mdev_driver, driver)
-
-extern void *mdev_get_drvdata(struct mdev_device *mdev);
-extern void mdev_set_drvdata(struct mdev_device *mdev, void *data);
-extern uuid_le mdev_uuid(struct mdev_device *mdev);
-
-extern struct bus_type mdev_bus_type;
-
-extern int mdev_register_device(struct device *dev,
- const struct mdev_parent_ops *ops);
-extern void mdev_unregister_device(struct device *dev);
+int mdev_register_parent(struct mdev_parent *parent, struct device *dev,
+ struct mdev_driver *mdev_driver, struct mdev_type **types,
+ unsigned int nr_types);
+void mdev_unregister_parent(struct mdev_parent *parent);
-extern int mdev_register_driver(struct mdev_driver *drv, struct module *owner);
-extern void mdev_unregister_driver(struct mdev_driver *drv);
+int mdev_register_driver(struct mdev_driver *drv);
+void mdev_unregister_driver(struct mdev_driver *drv);
-extern struct device *mdev_parent_dev(struct mdev_device *mdev);
-extern struct device *mdev_dev(struct mdev_device *mdev);
-extern struct mdev_device *mdev_from_dev(struct device *dev);
+static inline struct device *mdev_dev(struct mdev_device *mdev)
+{
+ return &mdev->dev;
+}
#endif /* MDEV_H */
diff --git a/include/linux/mdio-bitbang.h b/include/linux/mdio-bitbang.h
index 76f52bbbb2f4..cffabdbce075 100644
--- a/include/linux/mdio-bitbang.h
+++ b/include/linux/mdio-bitbang.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __LINUX_MDIO_BITBANG_H
#define __LINUX_MDIO_BITBANG_H
@@ -32,10 +33,16 @@ struct mdiobb_ops {
struct mdiobb_ctrl {
const struct mdiobb_ops *ops;
- /* reset callback */
- int (*reset)(struct mii_bus *bus);
+ unsigned int override_op_c22;
+ u8 op_c22_read;
+ u8 op_c22_write;
};
+int mdiobb_read_c22(struct mii_bus *bus, int phy, int reg);
+int mdiobb_write_c22(struct mii_bus *bus, int phy, int reg, u16 val);
+int mdiobb_read_c45(struct mii_bus *bus, int devad, int phy, int reg);
+int mdiobb_write_c45(struct mii_bus *bus, int devad, int phy, int reg, u16 val);
+
/* The returned bus is not yet registered with the phy layer. */
struct mii_bus *alloc_mdio_bitbang(struct mdiobb_ctrl *ctrl);
diff --git a/include/linux/mdio-gpio.h b/include/linux/mdio-gpio.h
new file mode 100644
index 000000000000..cea443a672cb
--- /dev/null
+++ b/include/linux/mdio-gpio.h
@@ -0,0 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __LINUX_MDIO_GPIO_H
+#define __LINUX_MDIO_GPIO_H
+
+#define MDIO_GPIO_MDC 0
+#define MDIO_GPIO_MDIO 1
+#define MDIO_GPIO_MDO 2
+
+#endif
diff --git a/include/linux/mdio-mux.h b/include/linux/mdio-mux.h
index 61f5b21b31c7..a5d58f221939 100644
--- a/include/linux/mdio-mux.h
+++ b/include/linux/mdio-mux.h
@@ -12,7 +12,16 @@
#include <linux/device.h>
#include <linux/phy.h>
+/* mdio_mux_init() - Initialize a MDIO mux
+ * @dev The device owning the MDIO mux
+ * @mux_node The device node of the MDIO mux
+ * @switch_fn The function called for switching target MDIO child
+ * mux_handle A pointer to a (void *) used internaly by mdio-mux
+ * @data Private data used by switch_fn()
+ * @mux_bus An optional parent bus (Other case are to use parent_bus property)
+ */
int mdio_mux_init(struct device *dev,
+ struct device_node *mux_node,
int (*switch_fn) (int cur, int desired, void *data),
void **mux_handle,
void *data,
diff --git a/include/linux/mdio.h b/include/linux/mdio.h
index ca08ab16ecdc..42d6d47e445b 100644
--- a/include/linux/mdio.h
+++ b/include/linux/mdio.h
@@ -1,18 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* linux/mdio.h: definitions for MDIO (clause 45) transceivers
* Copyright 2006-2009 Solarflare Communications Inc.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published
- * by the Free Software Foundation, incorporated herein by reference.
*/
#ifndef __LINUX_MDIO_H__
#define __LINUX_MDIO_H__
#include <uapi/linux/mdio.h>
+#include <linux/bitfield.h>
#include <linux/mod_devicetable.h>
+struct gpio_desc;
struct mii_bus;
+struct reset_control;
/* Multiple levels of nesting are possible. However typically this is
* limited to nested DSA like layer, a MUX layer, and the normal
@@ -28,19 +28,24 @@ enum mdio_mutex_lock_class {
struct mdio_device {
struct device dev;
- const struct dev_pm_ops *pm_ops;
struct mii_bus *bus;
char modalias[MDIO_NAME_SIZE];
- int (*bus_match)(struct device *dev, struct device_driver *drv);
+ int (*bus_match)(struct device *dev, const struct device_driver *drv);
void (*device_free)(struct mdio_device *mdiodev);
void (*device_remove)(struct mdio_device *mdiodev);
/* Bus address of the MDIO device (0-31) */
int addr;
int flags;
+ int reset_state;
+ struct gpio_desc *reset_gpio;
+ struct reset_control *reset_ctrl;
+ unsigned int reset_assert_delay;
+ unsigned int reset_deassert_delay;
};
-#define to_mdio_device(d) container_of(d, struct mdio_device, dev)
+
+#define to_mdio_device(__dev) container_of_const(__dev, struct mdio_device, dev)
/* struct mdio_driver_common: Common to all MDIO drivers */
struct mdio_driver_common {
@@ -48,8 +53,9 @@ struct mdio_driver_common {
int flags;
};
#define MDIO_DEVICE_FLAG_PHY 1
-#define to_mdio_common_driver(d) \
- container_of(d, struct mdio_driver_common, driver)
+
+#define to_mdio_common_driver(__drv_c) container_of_const(__drv_c, struct mdio_driver_common, \
+ driver)
/* struct mdio_driver: Generic MDIO driver */
struct mdio_driver {
@@ -63,17 +69,42 @@ struct mdio_driver {
/* Clears up any memory if needed */
void (*remove)(struct mdio_device *mdiodev);
+
+ /* Quiesces the device on system shutdown, turns off interrupts etc */
+ void (*shutdown)(struct mdio_device *mdiodev);
};
-#define to_mdio_driver(d) \
- container_of(to_mdio_common_driver(d), struct mdio_driver, mdiodrv)
+
+#define to_mdio_driver(__drv_m) container_of_const(to_mdio_common_driver(__drv_m), \
+ struct mdio_driver, mdiodrv)
+
+/* device driver data */
+static inline void mdiodev_set_drvdata(struct mdio_device *mdio, void *data)
+{
+ dev_set_drvdata(&mdio->dev, data);
+}
+
+static inline void *mdiodev_get_drvdata(struct mdio_device *mdio)
+{
+ return dev_get_drvdata(&mdio->dev);
+}
void mdio_device_free(struct mdio_device *mdiodev);
struct mdio_device *mdio_device_create(struct mii_bus *bus, int addr);
int mdio_device_register(struct mdio_device *mdiodev);
void mdio_device_remove(struct mdio_device *mdiodev);
+void mdio_device_reset(struct mdio_device *mdiodev, int value);
int mdio_driver_register(struct mdio_driver *drv);
void mdio_driver_unregister(struct mdio_driver *drv);
-int mdio_device_bus_match(struct device *dev, struct device_driver *drv);
+
+static inline void mdio_device_get(struct mdio_device *mdiodev)
+{
+ get_device(&mdiodev->dev);
+}
+
+static inline void mdio_device_put(struct mdio_device *mdiodev)
+{
+ mdio_device_free(mdiodev);
+}
static inline bool mdio_phy_id_is_c45(int phy_id)
{
@@ -130,31 +161,12 @@ extern int mdio_set_flag(const struct mdio_if_info *mdio,
bool sense);
extern int mdio45_links_ok(const struct mdio_if_info *mdio, u32 mmds);
extern int mdio45_nway_restart(const struct mdio_if_info *mdio);
-extern void mdio45_ethtool_gset_npage(const struct mdio_if_info *mdio,
- struct ethtool_cmd *ecmd,
- u32 npage_adv, u32 npage_lpa);
extern void
mdio45_ethtool_ksettings_get_npage(const struct mdio_if_info *mdio,
struct ethtool_link_ksettings *cmd,
u32 npage_adv, u32 npage_lpa);
/**
- * mdio45_ethtool_gset - get settings for ETHTOOL_GSET
- * @mdio: MDIO interface
- * @ecmd: Ethtool request structure
- *
- * Since the CSRs for auto-negotiation using next pages are not fully
- * standardised, this function does not attempt to decode them. Use
- * mdio45_ethtool_gset_npage() to specify advertisement bits from next
- * pages.
- */
-static inline void mdio45_ethtool_gset(const struct mdio_if_info *mdio,
- struct ethtool_cmd *ecmd)
-{
- mdio45_ethtool_gset_npage(mdio, ecmd, 0, 0);
-}
-
-/**
* mdio45_ethtool_ksettings_get - get settings for ETHTOOL_GLINKSETTINGS
* @mdio: MDIO interface
* @cmd: Ethtool request structure
@@ -257,10 +269,412 @@ static inline u16 ethtool_adv_to_mmd_eee_adv_t(u32 adv)
return reg;
}
+/**
+ * linkmode_adv_to_mii_10gbt_adv_t
+ * @advertising: the linkmode advertisement settings
+ *
+ * A small helper function that translates linkmode advertisement
+ * settings to phy autonegotiation advertisements for the C45
+ * 10GBASE-T AN CONTROL (7.32) register.
+ */
+static inline u32 linkmode_adv_to_mii_10gbt_adv_t(unsigned long *advertising)
+{
+ u32 result = 0;
+
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_2500baseT_Full_BIT,
+ advertising))
+ result |= MDIO_AN_10GBT_CTRL_ADV2_5G;
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_5000baseT_Full_BIT,
+ advertising))
+ result |= MDIO_AN_10GBT_CTRL_ADV5G;
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_10000baseT_Full_BIT,
+ advertising))
+ result |= MDIO_AN_10GBT_CTRL_ADV10G;
+
+ return result;
+}
+
+/**
+ * mii_10gbt_stat_mod_linkmode_lpa_t
+ * @advertising: target the linkmode advertisement settings
+ * @lpa: value of the C45 10GBASE-T AN STATUS register
+ *
+ * A small helper function that translates C45 10GBASE-T AN STATUS register bits
+ * to linkmode advertisement settings. Other bits in advertising aren't changed.
+ */
+static inline void mii_10gbt_stat_mod_linkmode_lpa_t(unsigned long *advertising,
+ u32 lpa)
+{
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_2500baseT_Full_BIT,
+ advertising, lpa & MDIO_AN_10GBT_STAT_LP2_5G);
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_5000baseT_Full_BIT,
+ advertising, lpa & MDIO_AN_10GBT_STAT_LP5G);
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_10000baseT_Full_BIT,
+ advertising, lpa & MDIO_AN_10GBT_STAT_LP10G);
+}
+
+/**
+ * mii_t1_adv_l_mod_linkmode_t
+ * @advertising: target the linkmode advertisement settings
+ * @lpa: value of the BASE-T1 Autonegotiation Advertisement [15:0] Register
+ *
+ * A small helper function that translates BASE-T1 Autonegotiation
+ * Advertisement [15:0] Register bits to linkmode advertisement settings.
+ * Other bits in advertising aren't changed.
+ */
+static inline void mii_t1_adv_l_mod_linkmode_t(unsigned long *advertising, u32 lpa)
+{
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_Pause_BIT, advertising,
+ lpa & MDIO_AN_T1_ADV_L_PAUSE_CAP);
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, advertising,
+ lpa & MDIO_AN_T1_ADV_L_PAUSE_ASYM);
+}
+
+/**
+ * mii_t1_adv_m_mod_linkmode_t
+ * @advertising: target the linkmode advertisement settings
+ * @lpa: value of the BASE-T1 Autonegotiation Advertisement [31:16] Register
+ *
+ * A small helper function that translates BASE-T1 Autonegotiation
+ * Advertisement [31:16] Register bits to linkmode advertisement settings.
+ * Other bits in advertising aren't changed.
+ */
+static inline void mii_t1_adv_m_mod_linkmode_t(unsigned long *advertising, u32 lpa)
+{
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_10baseT1L_Full_BIT,
+ advertising, lpa & MDIO_AN_T1_ADV_M_B10L);
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_100baseT1_Full_BIT,
+ advertising, lpa & MDIO_AN_T1_ADV_M_100BT1);
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_1000baseT1_Full_BIT,
+ advertising, lpa & MDIO_AN_T1_ADV_M_1000BT1);
+}
+
+/**
+ * linkmode_adv_to_mii_t1_adv_l_t
+ * @advertising: the linkmode advertisement settings
+ *
+ * A small helper function that translates linkmode advertisement
+ * settings to phy autonegotiation advertisements for the
+ * BASE-T1 Autonegotiation Advertisement [15:0] Register.
+ */
+static inline u32 linkmode_adv_to_mii_t1_adv_l_t(unsigned long *advertising)
+{
+ u32 result = 0;
+
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_Pause_BIT, advertising))
+ result |= MDIO_AN_T1_ADV_L_PAUSE_CAP;
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, advertising))
+ result |= MDIO_AN_T1_ADV_L_PAUSE_ASYM;
+
+ return result;
+}
+
+/**
+ * linkmode_adv_to_mii_t1_adv_m_t
+ * @advertising: the linkmode advertisement settings
+ *
+ * A small helper function that translates linkmode advertisement
+ * settings to phy autonegotiation advertisements for the
+ * BASE-T1 Autonegotiation Advertisement [31:16] Register.
+ */
+static inline u32 linkmode_adv_to_mii_t1_adv_m_t(unsigned long *advertising)
+{
+ u32 result = 0;
+
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_10baseT1L_Full_BIT, advertising))
+ result |= MDIO_AN_T1_ADV_M_B10L;
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_100baseT1_Full_BIT, advertising))
+ result |= MDIO_AN_T1_ADV_M_100BT1;
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_1000baseT1_Full_BIT, advertising))
+ result |= MDIO_AN_T1_ADV_M_1000BT1;
+
+ return result;
+}
+
+/**
+ * mii_eee_cap1_mod_linkmode_t()
+ * @adv: target the linkmode advertisement settings
+ * @val: register value
+ *
+ * A function that translates value of following registers to the linkmode:
+ * IEEE 802.3-2018 45.2.3.10 "EEE control and capability 1" register (3.20)
+ * IEEE 802.3-2018 45.2.7.13 "EEE advertisement 1" register (7.60)
+ * IEEE 802.3-2018 45.2.7.14 "EEE link partner ability 1" register (7.61)
+ */
+static inline void mii_eee_cap1_mod_linkmode_t(unsigned long *adv, u32 val)
+{
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
+ adv, val & MDIO_EEE_100TX);
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
+ adv, val & MDIO_EEE_1000T);
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_10000baseT_Full_BIT,
+ adv, val & MDIO_EEE_10GT);
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
+ adv, val & MDIO_EEE_1000KX);
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT,
+ adv, val & MDIO_EEE_10GKX4);
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
+ adv, val & MDIO_EEE_10GKR);
+}
+
+/**
+ * mii_eee_cap2_mod_linkmode_sup_t()
+ * @adv: target the linkmode settings
+ * @val: register value
+ *
+ * A function that translates value of following registers to the linkmode:
+ * IEEE 802.3-2022 45.2.3.11 "EEE control and capability 2" register (3.21)
+ */
+static inline void mii_eee_cap2_mod_linkmode_sup_t(unsigned long *adv, u32 val)
+{
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_2500baseT_Full_BIT,
+ adv, val & MDIO_EEE_2_5GT);
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_5000baseT_Full_BIT,
+ adv, val & MDIO_EEE_5GT);
+}
+
+/**
+ * mii_eee_cap2_mod_linkmode_adv_t()
+ * @adv: target the linkmode advertisement settings
+ * @val: register value
+ *
+ * A function that translates value of following registers to the linkmode:
+ * IEEE 802.3-2022 45.2.7.16 "EEE advertisement 2" register (7.62)
+ * IEEE 802.3-2022 45.2.7.17 "EEE link partner ability 2" register (7.63)
+ * Note: Currently this function is the same as mii_eee_cap2_mod_linkmode_sup_t.
+ * For certain, not yet supported, modes however the bits differ.
+ * Therefore create separate functions already.
+ */
+static inline void mii_eee_cap2_mod_linkmode_adv_t(unsigned long *adv, u32 val)
+{
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_2500baseT_Full_BIT,
+ adv, val & MDIO_EEE_2_5GT);
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_5000baseT_Full_BIT,
+ adv, val & MDIO_EEE_5GT);
+}
+
+/**
+ * linkmode_to_mii_eee_cap1_t()
+ * @adv: the linkmode advertisement settings
+ *
+ * A function that translates linkmode to value for IEEE 802.3-2018 45.2.7.13
+ * "EEE advertisement 1" register (7.60)
+ */
+static inline u32 linkmode_to_mii_eee_cap1_t(unsigned long *adv)
+{
+ u32 result = 0;
+
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT, adv))
+ result |= MDIO_EEE_100TX;
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT, adv))
+ result |= MDIO_EEE_1000T;
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_10000baseT_Full_BIT, adv))
+ result |= MDIO_EEE_10GT;
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_1000baseKX_Full_BIT, adv))
+ result |= MDIO_EEE_1000KX;
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT, adv))
+ result |= MDIO_EEE_10GKX4;
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_10000baseKR_Full_BIT, adv))
+ result |= MDIO_EEE_10GKR;
+
+ return result;
+}
+
+/**
+ * linkmode_to_mii_eee_cap2_t()
+ * @adv: the linkmode advertisement settings
+ *
+ * A function that translates linkmode to value for IEEE 802.3-2022 45.2.7.16
+ * "EEE advertisement 2" register (7.62)
+ */
+static inline u32 linkmode_to_mii_eee_cap2_t(unsigned long *adv)
+{
+ u32 result = 0;
+
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_2500baseT_Full_BIT, adv))
+ result |= MDIO_EEE_2_5GT;
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_5000baseT_Full_BIT, adv))
+ result |= MDIO_EEE_5GT;
+
+ return result;
+}
+
+/**
+ * mii_10base_t1_adv_mod_linkmode_t()
+ * @adv: linkmode advertisement settings
+ * @val: register value
+ *
+ * A function that translates IEEE 802.3cg-2019 45.2.7.26 "10BASE-T1 AN status"
+ * register (7.527) value to the linkmode.
+ */
+static inline void mii_10base_t1_adv_mod_linkmode_t(unsigned long *adv, u16 val)
+{
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_10baseT1L_Full_BIT,
+ adv, val & MDIO_AN_10BT1_AN_CTRL_ADV_EEE_T1L);
+}
+
+/**
+ * linkmode_adv_to_mii_10base_t1_t()
+ * @adv: linkmode advertisement settings
+ *
+ * A function that translates the linkmode to IEEE 802.3cg-2019 45.2.7.25
+ * "10BASE-T1 AN control" register (7.526) value.
+ */
+static inline u32 linkmode_adv_to_mii_10base_t1_t(unsigned long *adv)
+{
+ u32 result = 0;
+
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_10baseT1L_Full_BIT, adv))
+ result |= MDIO_AN_10BT1_AN_CTRL_ADV_EEE_T1L;
+
+ return result;
+}
+
+/**
+ * mii_c73_mod_linkmode - convert a Clause 73 advertisement to linkmodes
+ * @adv: linkmode advertisement setting
+ * @lpa: array of three u16s containing the advertisement
+ *
+ * Convert an IEEE 802.3 Clause 73 advertisement to ethtool link modes.
+ */
+static inline void mii_c73_mod_linkmode(unsigned long *adv, u16 *lpa)
+{
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_Pause_BIT,
+ adv, lpa[0] & MDIO_AN_C73_0_PAUSE);
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT,
+ adv, lpa[0] & MDIO_AN_C73_0_ASM_DIR);
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
+ adv, lpa[1] & MDIO_AN_C73_1_1000BASE_KX);
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT,
+ adv, lpa[1] & MDIO_AN_C73_1_10GBASE_KX4);
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT,
+ adv, lpa[1] & MDIO_AN_C73_1_40GBASE_KR4);
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT,
+ adv, lpa[1] & MDIO_AN_C73_1_40GBASE_CR4);
+ /* 100GBASE_CR10 and 100GBASE_KP4 not implemented */
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT,
+ adv, lpa[1] & MDIO_AN_C73_1_100GBASE_KR4);
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT,
+ adv, lpa[1] & MDIO_AN_C73_1_100GBASE_CR4);
+ /* 25GBASE_R_S not implemented */
+ /* The 25GBASE_R bit can be used for 25Gbase KR or CR modes */
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
+ adv, lpa[1] & MDIO_AN_C73_1_25GBASE_R);
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
+ adv, lpa[1] & MDIO_AN_C73_1_25GBASE_R);
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
+ adv, lpa[1] & MDIO_AN_C73_1_10GBASE_KR);
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_2500baseX_Full_BIT,
+ adv, lpa[2] & MDIO_AN_C73_2_2500BASE_KX);
+ /* 5GBASE_KR not implemented */
+}
+
+int __mdiobus_read(struct mii_bus *bus, int addr, u32 regnum);
+int __mdiobus_write(struct mii_bus *bus, int addr, u32 regnum, u16 val);
+int __mdiobus_modify(struct mii_bus *bus, int addr, u32 regnum, u16 mask,
+ u16 set);
+int __mdiobus_modify_changed(struct mii_bus *bus, int addr, u32 regnum,
+ u16 mask, u16 set);
+
int mdiobus_read(struct mii_bus *bus, int addr, u32 regnum);
int mdiobus_read_nested(struct mii_bus *bus, int addr, u32 regnum);
int mdiobus_write(struct mii_bus *bus, int addr, u32 regnum, u16 val);
int mdiobus_write_nested(struct mii_bus *bus, int addr, u32 regnum, u16 val);
+int mdiobus_modify(struct mii_bus *bus, int addr, u32 regnum, u16 mask,
+ u16 set);
+int mdiobus_modify_changed(struct mii_bus *bus, int addr, u32 regnum,
+ u16 mask, u16 set);
+int __mdiobus_c45_read(struct mii_bus *bus, int addr, int devad, u32 regnum);
+int mdiobus_c45_read(struct mii_bus *bus, int addr, int devad, u32 regnum);
+int mdiobus_c45_read_nested(struct mii_bus *bus, int addr, int devad,
+ u32 regnum);
+int __mdiobus_c45_write(struct mii_bus *bus, int addr, int devad, u32 regnum,
+ u16 val);
+int mdiobus_c45_write(struct mii_bus *bus, int addr, int devad, u32 regnum,
+ u16 val);
+int mdiobus_c45_write_nested(struct mii_bus *bus, int addr, int devad,
+ u32 regnum, u16 val);
+int mdiobus_c45_modify(struct mii_bus *bus, int addr, int devad, u32 regnum,
+ u16 mask, u16 set);
+
+int mdiobus_c45_modify_changed(struct mii_bus *bus, int addr, int devad,
+ u32 regnum, u16 mask, u16 set);
+
+static inline int __mdiodev_read(struct mdio_device *mdiodev, u32 regnum)
+{
+ return __mdiobus_read(mdiodev->bus, mdiodev->addr, regnum);
+}
+
+static inline int __mdiodev_write(struct mdio_device *mdiodev, u32 regnum,
+ u16 val)
+{
+ return __mdiobus_write(mdiodev->bus, mdiodev->addr, regnum, val);
+}
+
+static inline int __mdiodev_modify(struct mdio_device *mdiodev, u32 regnum,
+ u16 mask, u16 set)
+{
+ return __mdiobus_modify(mdiodev->bus, mdiodev->addr, regnum, mask, set);
+}
+
+static inline int __mdiodev_modify_changed(struct mdio_device *mdiodev,
+ u32 regnum, u16 mask, u16 set)
+{
+ return __mdiobus_modify_changed(mdiodev->bus, mdiodev->addr, regnum,
+ mask, set);
+}
+
+static inline int mdiodev_read(struct mdio_device *mdiodev, u32 regnum)
+{
+ return mdiobus_read(mdiodev->bus, mdiodev->addr, regnum);
+}
+
+static inline int mdiodev_write(struct mdio_device *mdiodev, u32 regnum,
+ u16 val)
+{
+ return mdiobus_write(mdiodev->bus, mdiodev->addr, regnum, val);
+}
+
+static inline int mdiodev_modify(struct mdio_device *mdiodev, u32 regnum,
+ u16 mask, u16 set)
+{
+ return mdiobus_modify(mdiodev->bus, mdiodev->addr, regnum, mask, set);
+}
+
+static inline int mdiodev_modify_changed(struct mdio_device *mdiodev,
+ u32 regnum, u16 mask, u16 set)
+{
+ return mdiobus_modify_changed(mdiodev->bus, mdiodev->addr, regnum,
+ mask, set);
+}
+
+static inline int mdiodev_c45_modify(struct mdio_device *mdiodev, int devad,
+ u32 regnum, u16 mask, u16 set)
+{
+ return mdiobus_c45_modify(mdiodev->bus, mdiodev->addr, devad, regnum,
+ mask, set);
+}
+
+static inline int mdiodev_c45_modify_changed(struct mdio_device *mdiodev,
+ int devad, u32 regnum, u16 mask,
+ u16 set)
+{
+ return mdiobus_c45_modify_changed(mdiodev->bus, mdiodev->addr, devad,
+ regnum, mask, set);
+}
+
+static inline int mdiodev_c45_read(struct mdio_device *mdiodev, int devad,
+ u16 regnum)
+{
+ return mdiobus_c45_read(mdiodev->bus, mdiodev->addr, devad, regnum);
+}
+
+static inline int mdiodev_c45_write(struct mdio_device *mdiodev, u32 devad,
+ u16 regnum, u16 val)
+{
+ return mdiobus_c45_write(mdiodev->bus, mdiodev->addr, devad, regnum,
+ val);
+}
int mdiobus_register_device(struct mdio_device *mdiodev);
int mdiobus_unregister_device(struct mdio_device *mdiodev);
@@ -269,21 +683,13 @@ struct phy_device *mdiobus_get_phy(struct mii_bus *bus, int addr);
/**
* mdio_module_driver() - Helper macro for registering mdio drivers
+ * @_mdio_driver: driver to register
*
* Helper macro for MDIO drivers which do not do anything special in module
* init/exit. Each module may only use this macro once, and calling it
* replaces module_init() and module_exit().
*/
-#define mdio_module_driver(_mdio_driver) \
-static int __init mdio_module_init(void) \
-{ \
- return mdio_driver_register(&_mdio_driver); \
-} \
-module_init(mdio_module_init); \
-static void __exit mdio_module_exit(void) \
-{ \
- mdio_driver_unregister(&_mdio_driver); \
-} \
-module_exit(mdio_module_exit)
+#define mdio_module_driver(_mdio_driver) \
+ module_driver(_mdio_driver, mdio_driver_register, mdio_driver_unregister)
#endif /* __LINUX_MDIO_H__ */
diff --git a/include/linux/mdio/mdio-i2c.h b/include/linux/mdio/mdio-i2c.h
new file mode 100644
index 000000000000..65b550a6fc32
--- /dev/null
+++ b/include/linux/mdio/mdio-i2c.h
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * MDIO I2C bridge
+ *
+ * Copyright (C) 2015 Russell King
+ */
+#ifndef MDIO_I2C_H
+#define MDIO_I2C_H
+
+struct device;
+struct i2c_adapter;
+struct mii_bus;
+
+enum mdio_i2c_proto {
+ MDIO_I2C_NONE,
+ MDIO_I2C_MARVELL_C22,
+ MDIO_I2C_C45,
+ MDIO_I2C_ROLLBALL,
+};
+
+struct mii_bus *mdio_i2c_alloc(struct device *parent, struct i2c_adapter *i2c,
+ enum mdio_i2c_proto protocol);
+
+#endif
diff --git a/include/linux/mdio/mdio-mscc-miim.h b/include/linux/mdio/mdio-mscc-miim.h
new file mode 100644
index 000000000000..1ce699740af6
--- /dev/null
+++ b/include/linux/mdio/mdio-mscc-miim.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */
+/*
+ * Driver for the MDIO interface of Microsemi network switches.
+ *
+ * Author: Colin Foster <colin.foster@in-advantage.com>
+ * Copyright (C) 2021 Innovative Advantage
+ */
+#ifndef MDIO_MSCC_MIIM_H
+#define MDIO_MSCC_MIIM_H
+
+#include <linux/device.h>
+#include <linux/phy.h>
+#include <linux/regmap.h>
+
+int mscc_miim_setup(struct device *device, struct mii_bus **bus,
+ const char *name, struct regmap *mii_regmap,
+ int status_offset, bool ignore_read_errors);
+
+#endif
diff --git a/include/linux/mdio/mdio-regmap.h b/include/linux/mdio/mdio-regmap.h
new file mode 100644
index 000000000000..679d9069846b
--- /dev/null
+++ b/include/linux/mdio/mdio-regmap.h
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Driver for MMIO-Mapped MDIO devices. Some IPs expose internal PHYs or PCS
+ * within the MMIO-mapped area
+ *
+ * Copyright (C) 2023 Maxime Chevallier <maxime.chevallier@bootlin.com>
+ */
+#ifndef MDIO_REGMAP_H
+#define MDIO_REGMAP_H
+
+#include <linux/phy.h>
+
+struct device;
+struct regmap;
+
+struct mdio_regmap_config {
+ struct device *parent;
+ struct regmap *regmap;
+ char name[MII_BUS_ID_SIZE];
+ u8 valid_addr;
+ bool autoscan;
+};
+
+struct mii_bus *devm_mdio_regmap_register(struct device *dev,
+ const struct mdio_regmap_config *config);
+
+#endif
diff --git a/include/linux/mdio/mdio-xgene.h b/include/linux/mdio/mdio-xgene.h
new file mode 100644
index 000000000000..9e588965dc83
--- /dev/null
+++ b/include/linux/mdio/mdio-xgene.h
@@ -0,0 +1,134 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/* Applied Micro X-Gene SoC MDIO Driver
+ *
+ * Copyright (c) 2016, Applied Micro Circuits Corporation
+ * Author: Iyappan Subramanian <isubramanian@apm.com>
+ */
+
+#ifndef __MDIO_XGENE_H__
+#define __MDIO_XGENE_H__
+
+#include <linux/bits.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+
+#define BLOCK_XG_MDIO_CSR_OFFSET 0x5000
+#define BLOCK_DIAG_CSR_OFFSET 0xd000
+#define XGENET_CONFIG_REG_ADDR 0x20
+
+#define MAC_ADDR_REG_OFFSET 0x00
+#define MAC_COMMAND_REG_OFFSET 0x04
+#define MAC_WRITE_REG_OFFSET 0x08
+#define MAC_READ_REG_OFFSET 0x0c
+#define MAC_COMMAND_DONE_REG_OFFSET 0x10
+
+#define CLKEN_OFFSET 0x08
+#define SRST_OFFSET 0x00
+
+#define MENET_CFG_MEM_RAM_SHUTDOWN_ADDR 0x70
+#define MENET_BLOCK_MEM_RDY_ADDR 0x74
+
+#define MAC_CONFIG_1_ADDR 0x00
+#define MII_MGMT_COMMAND_ADDR 0x24
+#define MII_MGMT_ADDRESS_ADDR 0x28
+#define MII_MGMT_CONTROL_ADDR 0x2c
+#define MII_MGMT_STATUS_ADDR 0x30
+#define MII_MGMT_INDICATORS_ADDR 0x34
+#define SOFT_RESET BIT(31)
+
+#define MII_MGMT_CONFIG_ADDR 0x20
+#define MII_MGMT_COMMAND_ADDR 0x24
+#define MII_MGMT_ADDRESS_ADDR 0x28
+#define MII_MGMT_CONTROL_ADDR 0x2c
+#define MII_MGMT_STATUS_ADDR 0x30
+#define MII_MGMT_INDICATORS_ADDR 0x34
+
+#define MIIM_COMMAND_ADDR 0x20
+#define MIIM_FIELD_ADDR 0x24
+#define MIIM_CONFIGURATION_ADDR 0x28
+#define MIIM_LINKFAILVECTOR_ADDR 0x2c
+#define MIIM_INDICATOR_ADDR 0x30
+#define MIIMRD_FIELD_ADDR 0x34
+
+#define MDIO_CSR_OFFSET 0x5000
+
+#define REG_ADDR_POS 0
+#define REG_ADDR_LEN 5
+#define PHY_ADDR_POS 8
+#define PHY_ADDR_LEN 5
+
+#define HSTMIIMWRDAT_POS 0
+#define HSTMIIMWRDAT_LEN 16
+#define HSTPHYADX_POS 23
+#define HSTPHYADX_LEN 5
+#define HSTREGADX_POS 18
+#define HSTREGADX_LEN 5
+#define HSTLDCMD BIT(3)
+#define HSTMIIMCMD_POS 0
+#define HSTMIIMCMD_LEN 3
+
+#define BUSY_MASK BIT(0)
+#define READ_CYCLE_MASK BIT(0)
+
+enum xgene_enet_cmd {
+ XGENE_ENET_WR_CMD = BIT(31),
+ XGENE_ENET_RD_CMD = BIT(30)
+};
+
+enum {
+ MIIM_CMD_IDLE,
+ MIIM_CMD_LEGACY_WRITE,
+ MIIM_CMD_LEGACY_READ,
+};
+
+enum xgene_mdio_id {
+ XGENE_MDIO_RGMII = 1,
+ XGENE_MDIO_XFI
+};
+
+struct xgene_mdio_pdata {
+ struct clk *clk;
+ struct device *dev;
+ void __iomem *mac_csr_addr;
+ void __iomem *diag_csr_addr;
+ void __iomem *mdio_csr_addr;
+ struct mii_bus *mdio_bus;
+ int mdio_id;
+ spinlock_t mac_lock; /* mac lock */
+};
+
+/* Set the specified value into a bit-field defined by its starting position
+ * and length within a single u64.
+ */
+static inline u64 xgene_enet_set_field_value(int pos, int len, u64 val)
+{
+ return (val & ((1ULL << len) - 1)) << pos;
+}
+
+#define SET_VAL(field, val) \
+ xgene_enet_set_field_value(field ## _POS, field ## _LEN, val)
+
+#define SET_BIT(field) \
+ xgene_enet_set_field_value(field ## _POS, 1, 1)
+
+/* Get the value from a bit-field defined by its starting position
+ * and length within the specified u64.
+ */
+static inline u64 xgene_enet_get_field_value(int pos, int len, u64 src)
+{
+ return (src >> pos) & ((1ULL << len) - 1);
+}
+
+#define GET_VAL(field, src) \
+ xgene_enet_get_field_value(field ## _POS, field ## _LEN, src)
+
+#define GET_BIT(field, src) \
+ xgene_enet_get_field_value(field ## _POS, 1, src)
+
+u32 xgene_mdio_rd_mac(struct xgene_mdio_pdata *pdata, u32 rd_addr);
+void xgene_mdio_wr_mac(struct xgene_mdio_pdata *pdata, u32 wr_addr, u32 data);
+int xgene_mdio_rgmii_read(struct mii_bus *bus, int phy_id, int reg);
+int xgene_mdio_rgmii_write(struct mii_bus *bus, int phy_id, int reg, u16 data);
+struct phy_device *xgene_enet_phy_register(struct mii_bus *bus, int phy_addr);
+
+#endif /* __MDIO_XGENE_H__ */
diff --git a/include/linux/mei_aux.h b/include/linux/mei_aux.h
new file mode 100644
index 000000000000..506912ad363b
--- /dev/null
+++ b/include/linux/mei_aux.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2022, Intel Corporation. All rights reserved.
+ */
+#ifndef _LINUX_MEI_AUX_H
+#define _LINUX_MEI_AUX_H
+
+#include <linux/auxiliary_bus.h>
+
+/**
+ * struct mei_aux_device - mei auxiliary device
+ * @aux_dev: - auxiliary device object
+ * @irq: interrupt driving the mei auxiliary device
+ * @bar: mmio resource bar reserved to mei auxiliary device
+ * @ext_op_mem: resource for extend operational memory
+ * used in graphics PXP mode.
+ * @slow_firmware: The device has slow underlying firmware.
+ * Such firmware will require to use larger operation timeouts.
+ */
+struct mei_aux_device {
+ struct auxiliary_device aux_dev;
+ int irq;
+ struct resource bar;
+ struct resource ext_op_mem;
+ bool slow_firmware;
+};
+
+#define auxiliary_dev_to_mei_aux_dev(auxiliary_dev) \
+ container_of(auxiliary_dev, struct mei_aux_device, aux_dev)
+
+#endif /* _LINUX_MEI_AUX_H */
diff --git a/include/linux/mei_cl_bus.h b/include/linux/mei_cl_bus.h
index a0d274fe08f1..a82755e1fc40 100644
--- a/include/linux/mei_cl_bus.h
+++ b/include/linux/mei_cl_bus.h
@@ -1,3 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2013-2016, Intel Corporation. All rights reserved.
+ */
#ifndef _LINUX_MEI_CL_BUS_H
#define _LINUX_MEI_CL_BUS_H
@@ -7,6 +11,7 @@
struct mei_cl_device;
struct mei_device;
+struct scatterlist;
typedef void (*mei_cldev_cb_t)(struct mei_cl_device *cldev);
@@ -26,11 +31,11 @@ typedef void (*mei_cldev_cb_t)(struct mei_cl_device *cldev);
* @rx_work: async work to execute Rx event callback
* @rx_cb: Drivers register this callback to get asynchronous ME
* Rx buffer pending notifications.
- * @notif_work: async work to execute FW notif event callback
+ * @notif_work: async work to execute FW notify event callback
* @notif_cb: Drivers register this callback to get asynchronous ME
* FW notification pending notifications.
*
- * @do_match: wheather device can be matched with a driver
+ * @do_match: whether the device can be matched with a driver
* @is_added: device is already scanned
* @priv_data: client private data
*/
@@ -54,6 +59,8 @@ struct mei_cl_device {
void *priv_data;
};
+#define to_mei_cl_device(d) container_of(d, struct mei_cl_device, dev)
+
struct mei_cl_driver {
struct device_driver driver;
const char *name;
@@ -62,7 +69,7 @@ struct mei_cl_driver {
int (*probe)(struct mei_cl_device *cldev,
const struct mei_cl_device_id *id);
- int (*remove)(struct mei_cl_device *cldev);
+ void (*remove)(struct mei_cl_device *cldev);
};
int __mei_cldev_driver_register(struct mei_cl_driver *cldrv,
@@ -85,23 +92,42 @@ void mei_cldev_driver_unregister(struct mei_cl_driver *cldrv);
mei_cldev_driver_register,\
mei_cldev_driver_unregister)
-ssize_t mei_cldev_send(struct mei_cl_device *cldev, u8 *buf, size_t length);
+ssize_t mei_cldev_send(struct mei_cl_device *cldev, const u8 *buf,
+ size_t length);
+ssize_t mei_cldev_send_timeout(struct mei_cl_device *cldev, const u8 *buf,
+ size_t length, unsigned long timeout);
ssize_t mei_cldev_recv(struct mei_cl_device *cldev, u8 *buf, size_t length);
-ssize_t mei_cldev_recv_nonblock(struct mei_cl_device *cldev, u8 *buf,
- size_t length);
+ssize_t mei_cldev_recv_timeout(struct mei_cl_device *cldev, u8 *buf, size_t length,
+ unsigned long timeout);
+ssize_t mei_cldev_send_vtag(struct mei_cl_device *cldev, const u8 *buf,
+ size_t length, u8 vtag);
+ssize_t mei_cldev_send_vtag_timeout(struct mei_cl_device *cldev, const u8 *buf,
+ size_t length, u8 vtag, unsigned long timeout);
+ssize_t mei_cldev_recv_vtag(struct mei_cl_device *cldev, u8 *buf, size_t length,
+ u8 *vtag);
+ssize_t mei_cldev_recv_vtag_timeout(struct mei_cl_device *cldev, u8 *buf, size_t length,
+ u8 *vtag, unsigned long timeout);
int mei_cldev_register_rx_cb(struct mei_cl_device *cldev, mei_cldev_cb_t rx_cb);
int mei_cldev_register_notif_cb(struct mei_cl_device *cldev,
mei_cldev_cb_t notif_cb);
-const uuid_le *mei_cldev_uuid(const struct mei_cl_device *cldev);
u8 mei_cldev_ver(const struct mei_cl_device *cldev);
+size_t mei_cldev_mtu(const struct mei_cl_device *cldev);
void *mei_cldev_get_drvdata(const struct mei_cl_device *cldev);
void mei_cldev_set_drvdata(struct mei_cl_device *cldev, void *data);
int mei_cldev_enable(struct mei_cl_device *cldev);
int mei_cldev_disable(struct mei_cl_device *cldev);
-bool mei_cldev_enabled(struct mei_cl_device *cldev);
+bool mei_cldev_enabled(const struct mei_cl_device *cldev);
+ssize_t mei_cldev_send_gsc_command(struct mei_cl_device *cldev,
+ u8 client_id, u32 fence_id,
+ struct scatterlist *sg_in,
+ size_t total_in_len,
+ struct scatterlist *sg_out);
+
+void *mei_cldev_dma_map(struct mei_cl_device *cldev, u8 buffer_id, size_t size);
+int mei_cldev_dma_unmap(struct mei_cl_device *cldev);
#endif /* _LINUX_MEI_CL_BUS_H */
diff --git a/include/linux/mem_encrypt.h b/include/linux/mem_encrypt.h
index 1255f09f5e42..07584c5e36fb 100644
--- a/include/linux/mem_encrypt.h
+++ b/include/linux/mem_encrypt.h
@@ -1,13 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* AMD Memory Encryption Support
*
* Copyright (C) 2016 Advanced Micro Devices, Inc.
*
* Author: Tom Lendacky <thomas.lendacky@amd.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef __MEM_ENCRYPT_H__
@@ -19,29 +16,43 @@
#include <asm/mem_encrypt.h>
-#else /* !CONFIG_ARCH_HAS_MEM_ENCRYPT */
-
-#define sme_me_mask 0UL
-
#endif /* CONFIG_ARCH_HAS_MEM_ENCRYPT */
-static inline bool sme_active(void)
-{
- return !!sme_me_mask;
-}
-
-static inline unsigned long sme_get_me_mask(void)
-{
- return sme_me_mask;
-}
-
+#ifdef CONFIG_AMD_MEM_ENCRYPT
/*
* The __sme_set() and __sme_clr() macros are useful for adding or removing
* the encryption mask from a value (e.g. when dealing with pagetable
* entries).
*/
-#define __sme_set(x) ((unsigned long)(x) | sme_me_mask)
-#define __sme_clr(x) ((unsigned long)(x) & ~sme_me_mask)
+#define __sme_set(x) ((x) | sme_me_mask)
+#define __sme_clr(x) ((x) & ~sme_me_mask)
+
+#define dma_addr_encrypted(x) __sme_set(x)
+#define dma_addr_canonical(x) __sme_clr(x)
+
+#else
+#define __sme_set(x) (x)
+#define __sme_clr(x) (x)
+#endif
+
+/*
+ * dma_addr_encrypted() and dma_addr_unencrypted() are for converting a given DMA
+ * address to the respective type of addressing.
+ *
+ * dma_addr_canonical() is used to reverse any conversions for encrypted/decrypted
+ * back to the canonical address.
+ */
+#ifndef dma_addr_encrypted
+#define dma_addr_encrypted(x) (x)
+#endif
+
+#ifndef dma_addr_unencrypted
+#define dma_addr_unencrypted(x) (x)
+#endif
+
+#ifndef dma_addr_canonical
+#define dma_addr_canonical(x) (x)
+#endif
#endif /* __ASSEMBLY__ */
diff --git a/include/linux/memblock.h b/include/linux/memblock.h
index bae11c7e7bf3..221118b5a16e 100644
--- a/include/linux/memblock.h
+++ b/include/linux/memblock.h
@@ -1,119 +1,205 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
#ifndef _LINUX_MEMBLOCK_H
#define _LINUX_MEMBLOCK_H
-#ifdef __KERNEL__
-#ifdef CONFIG_HAVE_MEMBLOCK
/*
* Logical memory blocks.
*
* Copyright (C) 2001 Peter Bergner, IBM Corp.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
*/
#include <linux/init.h>
#include <linux/mm.h>
+#include <asm/dma.h>
-#define INIT_MEMBLOCK_REGIONS 128
-#define INIT_PHYSMEM_REGIONS 4
+extern unsigned long max_low_pfn;
+extern unsigned long min_low_pfn;
-/* Definition of memblock flags. */
-enum {
+/*
+ * highest page
+ */
+extern unsigned long max_pfn;
+/*
+ * highest possible page
+ */
+extern unsigned long long max_possible_pfn;
+
+/**
+ * enum memblock_flags - definition of memory region attributes
+ * @MEMBLOCK_NONE: no special request
+ * @MEMBLOCK_HOTPLUG: memory region indicated in the firmware-provided memory
+ * map during early boot as hot(un)pluggable system RAM (e.g., memory range
+ * that might get hotunplugged later). With "movable_node" set on the kernel
+ * commandline, try keeping this memory region hotunpluggable. Does not apply
+ * to memblocks added ("hotplugged") after early boot.
+ * @MEMBLOCK_MIRROR: mirrored region
+ * @MEMBLOCK_NOMAP: don't add to kernel direct mapping and treat as
+ * reserved in the memory map; refer to memblock_mark_nomap() description
+ * for further details
+ * @MEMBLOCK_DRIVER_MANAGED: memory region that is always detected and added
+ * via a driver, and never indicated in the firmware-provided memory map as
+ * system RAM. This corresponds to IORESOURCE_SYSRAM_DRIVER_MANAGED in the
+ * kernel resource tree.
+ * @MEMBLOCK_RSRV_NOINIT: reserved memory region for which struct pages are not
+ * fully initialized. Users of this flag are responsible to properly initialize
+ * struct pages of this region
+ * @MEMBLOCK_RSRV_KERN: memory region that is reserved for kernel use,
+ * either explictitly with memblock_reserve_kern() or via memblock
+ * allocation APIs. All memblock allocations set this flag.
+ * @MEMBLOCK_KHO_SCRATCH: memory region that kexec can pass to the next
+ * kernel in handover mode. During early boot, we do not know about all
+ * memory reservations yet, so we get scratch memory from the previous
+ * kernel that we know is good to use. It is the only memory that
+ * allocations may happen from in this phase.
+ */
+enum memblock_flags {
MEMBLOCK_NONE = 0x0, /* No special request */
MEMBLOCK_HOTPLUG = 0x1, /* hotpluggable region */
MEMBLOCK_MIRROR = 0x2, /* mirrored region */
MEMBLOCK_NOMAP = 0x4, /* don't add to kernel direct mapping */
+ MEMBLOCK_DRIVER_MANAGED = 0x8, /* always detected via a driver */
+ MEMBLOCK_RSRV_NOINIT = 0x10, /* don't initialize struct pages */
+ MEMBLOCK_RSRV_KERN = 0x20, /* memory reserved for kernel use */
+ MEMBLOCK_KHO_SCRATCH = 0x40, /* scratch memory for kexec handover */
};
+/**
+ * struct memblock_region - represents a memory region
+ * @base: base address of the region
+ * @size: size of the region
+ * @flags: memory region attributes
+ * @nid: NUMA node id
+ */
struct memblock_region {
phys_addr_t base;
phys_addr_t size;
- unsigned long flags;
-#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
+ enum memblock_flags flags;
+#ifdef CONFIG_NUMA
int nid;
#endif
};
+/**
+ * struct memblock_type - collection of memory regions of certain type
+ * @cnt: number of regions
+ * @max: size of the allocated array
+ * @total_size: size of all regions
+ * @regions: array of regions
+ * @name: the memory type symbolic name
+ */
struct memblock_type {
- unsigned long cnt; /* number of regions */
- unsigned long max; /* size of the allocated array */
- phys_addr_t total_size; /* size of all regions */
+ unsigned long cnt;
+ unsigned long max;
+ phys_addr_t total_size;
struct memblock_region *regions;
char *name;
};
+/**
+ * struct memblock - memblock allocator metadata
+ * @bottom_up: is bottom up direction?
+ * @current_limit: physical address of the current allocation limit
+ * @memory: usable memory regions
+ * @reserved: reserved memory regions
+ */
struct memblock {
bool bottom_up; /* is bottom up direction? */
phys_addr_t current_limit;
struct memblock_type memory;
struct memblock_type reserved;
-#ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
- struct memblock_type physmem;
-#endif
};
extern struct memblock memblock;
-extern int memblock_debug;
-#ifdef CONFIG_ARCH_DISCARD_MEMBLOCK
+#ifndef CONFIG_ARCH_KEEP_MEMBLOCK
#define __init_memblock __meminit
#define __initdata_memblock __meminitdata
void memblock_discard(void);
#else
#define __init_memblock
#define __initdata_memblock
+static inline void memblock_discard(void) {}
#endif
-#define memblock_dbg(fmt, ...) \
- if (memblock_debug) printk(KERN_INFO pr_fmt(fmt), ##__VA_ARGS__)
-
-phys_addr_t memblock_find_in_range_node(phys_addr_t size, phys_addr_t align,
- phys_addr_t start, phys_addr_t end,
- int nid, ulong flags);
-phys_addr_t memblock_find_in_range(phys_addr_t start, phys_addr_t end,
- phys_addr_t size, phys_addr_t align);
void memblock_allow_resize(void);
-int memblock_add_node(phys_addr_t base, phys_addr_t size, int nid);
+int memblock_add_node(phys_addr_t base, phys_addr_t size, int nid,
+ enum memblock_flags flags);
int memblock_add(phys_addr_t base, phys_addr_t size);
int memblock_remove(phys_addr_t base, phys_addr_t size);
-int memblock_free(phys_addr_t base, phys_addr_t size);
-int memblock_reserve(phys_addr_t base, phys_addr_t size);
+int memblock_phys_free(phys_addr_t base, phys_addr_t size);
+int __memblock_reserve(phys_addr_t base, phys_addr_t size, int nid,
+ enum memblock_flags flags);
+
+static __always_inline int memblock_reserve(phys_addr_t base, phys_addr_t size)
+{
+ return __memblock_reserve(base, size, NUMA_NO_NODE, 0);
+}
+
+static __always_inline int memblock_reserve_kern(phys_addr_t base, phys_addr_t size)
+{
+ return __memblock_reserve(base, size, NUMA_NO_NODE, MEMBLOCK_RSRV_KERN);
+}
+
+#ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
+int memblock_physmem_add(phys_addr_t base, phys_addr_t size);
+#endif
void memblock_trim_memory(phys_addr_t align);
+unsigned long memblock_addrs_overlap(phys_addr_t base1, phys_addr_t size1,
+ phys_addr_t base2, phys_addr_t size2);
bool memblock_overlaps_region(struct memblock_type *type,
phys_addr_t base, phys_addr_t size);
+bool memblock_validate_numa_coverage(unsigned long threshold_bytes);
int memblock_mark_hotplug(phys_addr_t base, phys_addr_t size);
int memblock_clear_hotplug(phys_addr_t base, phys_addr_t size);
int memblock_mark_mirror(phys_addr_t base, phys_addr_t size);
int memblock_mark_nomap(phys_addr_t base, phys_addr_t size);
int memblock_clear_nomap(phys_addr_t base, phys_addr_t size);
-ulong choose_memblock_flags(void);
+int memblock_reserved_mark_noinit(phys_addr_t base, phys_addr_t size);
+int memblock_mark_kho_scratch(phys_addr_t base, phys_addr_t size);
+int memblock_clear_kho_scratch(phys_addr_t base, phys_addr_t size);
-/* Low level functions */
-int memblock_add_range(struct memblock_type *type,
- phys_addr_t base, phys_addr_t size,
- int nid, unsigned long flags);
+void memblock_free(void *ptr, size_t size);
+void reset_all_zones_managed_pages(void);
-void __next_mem_range(u64 *idx, int nid, ulong flags,
+/* Low level functions */
+void __next_mem_range(u64 *idx, int nid, enum memblock_flags flags,
struct memblock_type *type_a,
struct memblock_type *type_b, phys_addr_t *out_start,
phys_addr_t *out_end, int *out_nid);
-void __next_mem_range_rev(u64 *idx, int nid, ulong flags,
+void __next_mem_range_rev(u64 *idx, int nid, enum memblock_flags flags,
struct memblock_type *type_a,
struct memblock_type *type_b, phys_addr_t *out_start,
phys_addr_t *out_end, int *out_nid);
-void __next_reserved_mem_region(u64 *idx, phys_addr_t *out_start,
- phys_addr_t *out_end);
+void memblock_free_late(phys_addr_t base, phys_addr_t size);
+
+#ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
+static inline void __next_physmem_range(u64 *idx, struct memblock_type *type,
+ phys_addr_t *out_start,
+ phys_addr_t *out_end)
+{
+ extern struct memblock_type physmem;
-void __memblock_free_early(phys_addr_t base, phys_addr_t size);
-void __memblock_free_late(phys_addr_t base, phys_addr_t size);
+ __next_mem_range(idx, NUMA_NO_NODE, MEMBLOCK_NONE, &physmem, type,
+ out_start, out_end, NULL);
+}
/**
- * for_each_mem_range - iterate through memblock areas from type_a and not
+ * for_each_physmem_range - iterate through physmem areas not included in type.
+ * @i: u64 used as loop variable
+ * @type: ptr to memblock_type which excludes from the iteration, can be %NULL
+ * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
+ * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
+ */
+#define for_each_physmem_range(i, type, p_start, p_end) \
+ for (i = 0, __next_physmem_range(&i, type, p_start, p_end); \
+ i != (u64)ULLONG_MAX; \
+ __next_physmem_range(&i, type, p_start, p_end))
+#endif /* CONFIG_HAVE_MEMBLOCK_PHYS_MAP */
+
+/**
+ * __for_each_mem_range - iterate through memblock areas from type_a and not
* included in type_b. Or just type_a if type_b is NULL.
* @i: u64 used as loop variable
* @type_a: ptr to memblock_type to iterate
@@ -124,7 +210,7 @@ void __memblock_free_late(phys_addr_t base, phys_addr_t size);
* @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
* @p_nid: ptr to int for nid of the range, can be %NULL
*/
-#define for_each_mem_range(i, type_a, type_b, nid, flags, \
+#define __for_each_mem_range(i, type_a, type_b, nid, flags, \
p_start, p_end, p_nid) \
for (i = 0, __next_mem_range(&i, nid, flags, type_a, type_b, \
p_start, p_end, p_nid); \
@@ -133,7 +219,7 @@ void __memblock_free_late(phys_addr_t base, phys_addr_t size);
p_start, p_end, p_nid))
/**
- * for_each_mem_range_rev - reverse iterate through memblock areas from
+ * __for_each_mem_range_rev - reverse iterate through memblock areas from
* type_a and not included in type_b. Or just type_a if type_b is NULL.
* @i: u64 used as loop variable
* @type_a: ptr to memblock_type to iterate
@@ -144,17 +230,40 @@ void __memblock_free_late(phys_addr_t base, phys_addr_t size);
* @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
* @p_nid: ptr to int for nid of the range, can be %NULL
*/
-#define for_each_mem_range_rev(i, type_a, type_b, nid, flags, \
- p_start, p_end, p_nid) \
+#define __for_each_mem_range_rev(i, type_a, type_b, nid, flags, \
+ p_start, p_end, p_nid) \
for (i = (u64)ULLONG_MAX, \
- __next_mem_range_rev(&i, nid, flags, type_a, type_b,\
+ __next_mem_range_rev(&i, nid, flags, type_a, type_b, \
p_start, p_end, p_nid); \
i != (u64)ULLONG_MAX; \
__next_mem_range_rev(&i, nid, flags, type_a, type_b, \
p_start, p_end, p_nid))
/**
- * for_each_reserved_mem_region - iterate over all reserved memblock areas
+ * for_each_mem_range - iterate through memory areas.
+ * @i: u64 used as loop variable
+ * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
+ * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
+ */
+#define for_each_mem_range(i, p_start, p_end) \
+ __for_each_mem_range(i, &memblock.memory, NULL, NUMA_NO_NODE, \
+ MEMBLOCK_HOTPLUG | MEMBLOCK_DRIVER_MANAGED, \
+ p_start, p_end, NULL)
+
+/**
+ * for_each_mem_range_rev - reverse iterate through memblock areas from
+ * type_a and not included in type_b. Or just type_a if type_b is NULL.
+ * @i: u64 used as loop variable
+ * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
+ * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
+ */
+#define for_each_mem_range_rev(i, p_start, p_end) \
+ __for_each_mem_range_rev(i, &memblock.memory, NULL, NUMA_NO_NODE, \
+ MEMBLOCK_HOTPLUG | MEMBLOCK_DRIVER_MANAGED,\
+ p_start, p_end, NULL)
+
+/**
+ * for_each_reserved_mem_range - iterate over all reserved memblock areas
* @i: u64 used as loop variable
* @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
* @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
@@ -162,10 +271,9 @@ void __memblock_free_late(phys_addr_t base, phys_addr_t size);
* Walks over reserved areas of memblock. Available as soon as memblock
* is initialized.
*/
-#define for_each_reserved_mem_region(i, p_start, p_end) \
- for (i = 0UL, __next_reserved_mem_region(&i, p_start, p_end); \
- i != (u64)ULLONG_MAX; \
- __next_reserved_mem_region(&i, p_start, p_end))
+#define for_each_reserved_mem_range(i, p_start, p_end) \
+ __for_each_mem_range(i, &memblock.reserved, NULL, NUMA_NO_NODE, \
+ MEMBLOCK_NONE, p_start, p_end, NULL)
static inline bool memblock_is_hotpluggable(struct memblock_region *m)
{
@@ -182,12 +290,25 @@ static inline bool memblock_is_nomap(struct memblock_region *m)
return m->flags & MEMBLOCK_NOMAP;
}
-#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
+static inline bool memblock_is_reserved_noinit(struct memblock_region *m)
+{
+ return m->flags & MEMBLOCK_RSRV_NOINIT;
+}
+
+static inline bool memblock_is_driver_managed(struct memblock_region *m)
+{
+ return m->flags & MEMBLOCK_DRIVER_MANAGED;
+}
+
+static inline bool memblock_is_kho_scratch(struct memblock_region *m)
+{
+ return m->flags & MEMBLOCK_KHO_SCRATCH;
+}
+
int memblock_search_pfn_nid(unsigned long pfn, unsigned long *start_pfn,
unsigned long *end_pfn);
void __next_mem_pfn_range(int *idx, int nid, unsigned long *out_start_pfn,
unsigned long *out_end_pfn, int *out_nid);
-unsigned long memblock_next_valid_pfn(unsigned long pfn, unsigned long max_pfn);
/**
* for_each_mem_pfn_range - early memory pfn range iterator
@@ -202,7 +323,7 @@ unsigned long memblock_next_valid_pfn(unsigned long pfn, unsigned long max_pfn);
#define for_each_mem_pfn_range(i, nid, p_start, p_end, p_nid) \
for (i = -1, __next_mem_pfn_range(&i, nid, p_start, p_end, p_nid); \
i >= 0; __next_mem_pfn_range(&i, nid, p_start, p_end, p_nid))
-#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
+
/**
* for_each_free_mem_range - iterate through free memblock areas
@@ -217,8 +338,8 @@ unsigned long memblock_next_valid_pfn(unsigned long pfn, unsigned long max_pfn);
* soon as memblock is initialized.
*/
#define for_each_free_mem_range(i, nid, flags, p_start, p_end, p_nid) \
- for_each_mem_range(i, &memblock.memory, &memblock.reserved, \
- nid, flags, p_start, p_end, p_nid)
+ __for_each_mem_range(i, &memblock.memory, &memblock.reserved, \
+ nid, flags, p_start, p_end, p_nid)
/**
* for_each_free_mem_range_reverse - rev-iterate through free memblock areas
@@ -234,25 +355,13 @@ unsigned long memblock_next_valid_pfn(unsigned long pfn, unsigned long max_pfn);
*/
#define for_each_free_mem_range_reverse(i, nid, flags, p_start, p_end, \
p_nid) \
- for_each_mem_range_rev(i, &memblock.memory, &memblock.reserved, \
- nid, flags, p_start, p_end, p_nid)
+ __for_each_mem_range_rev(i, &memblock.memory, &memblock.reserved, \
+ nid, flags, p_start, p_end, p_nid)
-static inline void memblock_set_region_flags(struct memblock_region *r,
- unsigned long flags)
-{
- r->flags |= flags;
-}
-
-static inline void memblock_clear_region_flags(struct memblock_region *r,
- unsigned long flags)
-{
- r->flags &= ~flags;
-}
-
-#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
int memblock_set_node(phys_addr_t base, phys_addr_t size,
struct memblock_type *type, int nid);
+#ifdef CONFIG_NUMA
static inline void memblock_set_region_node(struct memblock_region *r, int nid)
{
r->nid = nid;
@@ -271,17 +380,94 @@ static inline int memblock_get_region_node(const struct memblock_region *r)
{
return 0;
}
-#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
+#endif /* CONFIG_NUMA */
+
+/* Flags for memblock allocation APIs */
+#define MEMBLOCK_ALLOC_ANYWHERE (~(phys_addr_t)0)
+#define MEMBLOCK_ALLOC_ACCESSIBLE 0
+/*
+ * MEMBLOCK_ALLOC_NOLEAKTRACE avoids kmemleak tracing. It implies
+ * MEMBLOCK_ALLOC_ACCESSIBLE
+ */
+#define MEMBLOCK_ALLOC_NOLEAKTRACE 1
+
+/* We are using top down, so it is safe to use 0 here */
+#define MEMBLOCK_LOW_LIMIT 0
+
+#ifndef ARCH_LOW_ADDRESS_LIMIT
+#define ARCH_LOW_ADDRESS_LIMIT 0xffffffffUL
+#endif
+
+phys_addr_t memblock_phys_alloc_range(phys_addr_t size, phys_addr_t align,
+ phys_addr_t start, phys_addr_t end);
+phys_addr_t memblock_alloc_range_nid(phys_addr_t size,
+ phys_addr_t align, phys_addr_t start,
+ phys_addr_t end, int nid, bool exact_nid);
+phys_addr_t memblock_phys_alloc_try_nid(phys_addr_t size, phys_addr_t align, int nid);
+
+static __always_inline phys_addr_t memblock_phys_alloc(phys_addr_t size,
+ phys_addr_t align)
+{
+ return memblock_phys_alloc_range(size, align, 0,
+ MEMBLOCK_ALLOC_ACCESSIBLE);
+}
+
+void *memblock_alloc_exact_nid_raw(phys_addr_t size, phys_addr_t align,
+ phys_addr_t min_addr, phys_addr_t max_addr,
+ int nid);
+void *memblock_alloc_try_nid_raw(phys_addr_t size, phys_addr_t align,
+ phys_addr_t min_addr, phys_addr_t max_addr,
+ int nid);
+void *memblock_alloc_try_nid(phys_addr_t size, phys_addr_t align,
+ phys_addr_t min_addr, phys_addr_t max_addr,
+ int nid);
+
+static __always_inline void *memblock_alloc(phys_addr_t size, phys_addr_t align)
+{
+ return memblock_alloc_try_nid(size, align, MEMBLOCK_LOW_LIMIT,
+ MEMBLOCK_ALLOC_ACCESSIBLE, NUMA_NO_NODE);
+}
+
+void *__memblock_alloc_or_panic(phys_addr_t size, phys_addr_t align,
+ const char *func);
+
+#define memblock_alloc_or_panic(size, align) \
+ __memblock_alloc_or_panic(size, align, __func__)
+
+static inline void *memblock_alloc_raw(phys_addr_t size,
+ phys_addr_t align)
+{
+ return memblock_alloc_try_nid_raw(size, align, MEMBLOCK_LOW_LIMIT,
+ MEMBLOCK_ALLOC_ACCESSIBLE,
+ NUMA_NO_NODE);
+}
+
+static __always_inline void *memblock_alloc_from(phys_addr_t size,
+ phys_addr_t align,
+ phys_addr_t min_addr)
+{
+ return memblock_alloc_try_nid(size, align, min_addr,
+ MEMBLOCK_ALLOC_ACCESSIBLE, NUMA_NO_NODE);
+}
-phys_addr_t memblock_alloc_nid(phys_addr_t size, phys_addr_t align, int nid);
-phys_addr_t memblock_alloc_try_nid(phys_addr_t size, phys_addr_t align, int nid);
+static inline void *memblock_alloc_low(phys_addr_t size,
+ phys_addr_t align)
+{
+ return memblock_alloc_try_nid(size, align, MEMBLOCK_LOW_LIMIT,
+ ARCH_LOW_ADDRESS_LIMIT, NUMA_NO_NODE);
+}
-phys_addr_t memblock_alloc(phys_addr_t size, phys_addr_t align);
+static inline void *memblock_alloc_node(phys_addr_t size,
+ phys_addr_t align, int nid)
+{
+ return memblock_alloc_try_nid(size, align, MEMBLOCK_LOW_LIMIT,
+ MEMBLOCK_ALLOC_ACCESSIBLE, nid);
+}
/*
* Set the allocation direction to bottom-up or top-down.
*/
-static inline void __init memblock_set_bottom_up(bool enable)
+static inline __init_memblock void memblock_set_bottom_up(bool enable)
{
memblock.bottom_up = enable;
}
@@ -291,43 +477,27 @@ static inline void __init memblock_set_bottom_up(bool enable)
* if this is true, that said, memblock will allocate memory
* in bottom-up direction.
*/
-static inline bool memblock_bottom_up(void)
+static inline __init_memblock bool memblock_bottom_up(void)
{
return memblock.bottom_up;
}
-/* Flags for memblock_alloc_base() amd __memblock_alloc_base() */
-#define MEMBLOCK_ALLOC_ANYWHERE (~(phys_addr_t)0)
-#define MEMBLOCK_ALLOC_ACCESSIBLE 0
-
-phys_addr_t __init memblock_alloc_range(phys_addr_t size, phys_addr_t align,
- phys_addr_t start, phys_addr_t end,
- ulong flags);
-phys_addr_t memblock_alloc_base(phys_addr_t size, phys_addr_t align,
- phys_addr_t max_addr);
-phys_addr_t __memblock_alloc_base(phys_addr_t size, phys_addr_t align,
- phys_addr_t max_addr);
phys_addr_t memblock_phys_mem_size(void);
phys_addr_t memblock_reserved_size(void);
-phys_addr_t memblock_mem_size(unsigned long limit_pfn);
+phys_addr_t memblock_reserved_kern_size(phys_addr_t limit, int nid);
+unsigned long memblock_estimated_nr_free_pages(void);
phys_addr_t memblock_start_of_DRAM(void);
phys_addr_t memblock_end_of_DRAM(void);
void memblock_enforce_memory_limit(phys_addr_t memory_limit);
void memblock_cap_memory_range(phys_addr_t base, phys_addr_t size);
void memblock_mem_limit_remove_map(phys_addr_t limit);
bool memblock_is_memory(phys_addr_t addr);
-int memblock_is_map_memory(phys_addr_t addr);
-int memblock_is_region_memory(phys_addr_t base, phys_addr_t size);
+bool memblock_is_map_memory(phys_addr_t addr);
+bool memblock_is_region_memory(phys_addr_t base, phys_addr_t size);
bool memblock_is_reserved(phys_addr_t addr);
bool memblock_is_region_reserved(phys_addr_t base, phys_addr_t size);
-extern void __memblock_dump_all(void);
-
-static inline void memblock_dump_all(void)
-{
- if (memblock_debug)
- __memblock_dump_all();
-}
+void memblock_dump_all(void);
/**
* memblock_set_current_limit - Set the current allocation limit to allow
@@ -349,8 +519,10 @@ phys_addr_t memblock_get_current_limit(void);
*/
/**
- * memblock_region_memory_base_pfn - Return the lowest pfn intersecting with the memory region
+ * memblock_region_memory_base_pfn - get the lowest pfn of the memory region
* @reg: memblock_region structure
+ *
+ * Return: the lowest pfn intersecting with the memory region
*/
static inline unsigned long memblock_region_memory_base_pfn(const struct memblock_region *reg)
{
@@ -358,8 +530,10 @@ static inline unsigned long memblock_region_memory_base_pfn(const struct membloc
}
/**
- * memblock_region_memory_end_pfn - Return the end_pfn this region
+ * memblock_region_memory_end_pfn - get the end pfn of the memory region
* @reg: memblock_region structure
+ *
+ * Return: the end_pfn of the reserved region
*/
static inline unsigned long memblock_region_memory_end_pfn(const struct memblock_region *reg)
{
@@ -367,8 +541,10 @@ static inline unsigned long memblock_region_memory_end_pfn(const struct memblock
}
/**
- * memblock_region_reserved_base_pfn - Return the lowest pfn intersecting with the reserved region
+ * memblock_region_reserved_base_pfn - get the lowest pfn of the reserved region
* @reg: memblock_region structure
+ *
+ * Return: the lowest pfn intersecting with the reserved region
*/
static inline unsigned long memblock_region_reserved_base_pfn(const struct memblock_region *reg)
{
@@ -376,48 +552,73 @@ static inline unsigned long memblock_region_reserved_base_pfn(const struct membl
}
/**
- * memblock_region_reserved_end_pfn - Return the end_pfn this region
+ * memblock_region_reserved_end_pfn - get the end pfn of the reserved region
* @reg: memblock_region structure
+ *
+ * Return: the end_pfn of the reserved region
*/
static inline unsigned long memblock_region_reserved_end_pfn(const struct memblock_region *reg)
{
return PFN_UP(reg->base + reg->size);
}
-#define for_each_memblock(memblock_type, region) \
- for (region = memblock.memblock_type.regions; \
- region < (memblock.memblock_type.regions + memblock.memblock_type.cnt); \
+/**
+ * for_each_mem_region - iterate over memory regions
+ * @region: loop variable
+ */
+#define for_each_mem_region(region) \
+ for (region = memblock.memory.regions; \
+ region < (memblock.memory.regions + memblock.memory.cnt); \
+ region++)
+
+/**
+ * for_each_reserved_mem_region - itereate over reserved memory regions
+ * @region: loop variable
+ */
+#define for_each_reserved_mem_region(region) \
+ for (region = memblock.reserved.regions; \
+ region < (memblock.reserved.regions + memblock.reserved.cnt); \
region++)
-#define for_each_memblock_type(memblock_type, rgn) \
- for (idx = 0, rgn = &memblock_type->regions[0]; \
- idx < memblock_type->cnt; \
- idx++, rgn = &memblock_type->regions[idx])
+extern void *alloc_large_system_hash(const char *tablename,
+ unsigned long bucketsize,
+ unsigned long numentries,
+ int scale,
+ int flags,
+ unsigned int *_hash_shift,
+ unsigned int *_hash_mask,
+ unsigned long low_limit,
+ unsigned long high_limit);
+
+#define HASH_EARLY 0x00000001 /* Allocating during early boot? */
+#define HASH_ZERO 0x00000002 /* Zero allocated hash table */
+
+/* Only NUMA needs hash distribution. 64bit NUMA architectures have
+ * sufficient vmalloc space.
+ */
+#ifdef CONFIG_NUMA
+#define HASHDIST_DEFAULT IS_ENABLED(CONFIG_64BIT)
+extern int hashdist; /* Distribute hashes across NUMA nodes? */
+#else
+#define hashdist (0)
+#endif
#ifdef CONFIG_MEMTEST
-extern void early_memtest(phys_addr_t start, phys_addr_t end);
+void early_memtest(phys_addr_t start, phys_addr_t end);
+void memtest_report_meminfo(struct seq_file *m);
#else
-static inline void early_memtest(phys_addr_t start, phys_addr_t end)
-{
-}
+static inline void early_memtest(phys_addr_t start, phys_addr_t end) { }
+static inline void memtest_report_meminfo(struct seq_file *m) { }
#endif
-extern unsigned long memblock_reserved_memory_within(phys_addr_t start_addr,
- phys_addr_t end_addr);
+#ifdef CONFIG_MEMBLOCK_KHO_SCRATCH
+void memblock_set_kho_scratch_only(void);
+void memblock_clear_kho_scratch_only(void);
+void memmap_init_kho_scratch_pages(void);
#else
-static inline phys_addr_t memblock_alloc(phys_addr_t size, phys_addr_t align)
-{
- return 0;
-}
-
-static inline unsigned long memblock_reserved_memory_within(phys_addr_t start_addr,
- phys_addr_t end_addr)
-{
- return 0;
-}
-
-#endif /* CONFIG_HAVE_MEMBLOCK */
-
-#endif /* __KERNEL__ */
+static inline void memblock_set_kho_scratch_only(void) { }
+static inline void memblock_clear_kho_scratch_only(void) { }
+static inline void memmap_init_kho_scratch_pages(void) {}
+#endif
#endif /* _LINUX_MEMBLOCK_H */
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index 69966c461d1c..0651865a4564 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/* memcontrol.h - Memory Controller
*
* Copyright IBM Corporation, 2007
@@ -5,16 +6,6 @@
*
* Copyright 2007 OpenVZ SWsoft Inc
* Author: Pavel Emelianov <xemul@openvz.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#ifndef _LINUX_MEMCONTROL_H
@@ -23,6 +14,7 @@
#include <linux/vm_event_item.h>
#include <linux/hardirq.h>
#include <linux/jump_label.h>
+#include <linux/kernel.h>
#include <linux/page_counter.h>
#include <linux/vmpressure.h>
#include <linux/eventfd.h>
@@ -30,95 +22,104 @@
#include <linux/vmstat.h>
#include <linux/writeback.h>
#include <linux/page-flags.h>
+#include <linux/shrinker.h>
struct mem_cgroup;
+struct obj_cgroup;
struct page;
struct mm_struct;
struct kmem_cache;
/* Cgroup-specific page state, on top of universal node page state */
enum memcg_stat_item {
- MEMCG_CACHE = NR_VM_NODE_STAT_ITEMS,
- MEMCG_RSS,
- MEMCG_RSS_HUGE,
- MEMCG_SWAP,
+ MEMCG_SWAP = NR_VM_NODE_STAT_ITEMS,
MEMCG_SOCK,
- /* XXX: why are these zone and not node counters? */
- MEMCG_KERNEL_STACK_KB,
+ MEMCG_PERCPU_B,
+ MEMCG_VMALLOC,
+ MEMCG_KMEM,
+ MEMCG_ZSWAP_B,
+ MEMCG_ZSWAPPED,
MEMCG_NR_STAT,
};
-/* Cgroup-specific events, on top of universal VM events */
-enum memcg_event_item {
- MEMCG_LOW = NR_VM_EVENT_ITEMS,
+enum memcg_memory_event {
+ MEMCG_LOW,
MEMCG_HIGH,
MEMCG_MAX,
MEMCG_OOM,
- MEMCG_NR_EVENTS,
+ MEMCG_OOM_KILL,
+ MEMCG_OOM_GROUP_KILL,
+ MEMCG_SWAP_HIGH,
+ MEMCG_SWAP_MAX,
+ MEMCG_SWAP_FAIL,
+ MEMCG_SOCK_THROTTLED,
+ MEMCG_NR_MEMORY_EVENTS,
};
struct mem_cgroup_reclaim_cookie {
pg_data_t *pgdat;
- int priority;
- unsigned int generation;
+ int generation;
};
#ifdef CONFIG_MEMCG
#define MEM_CGROUP_ID_SHIFT 16
-#define MEM_CGROUP_ID_MAX USHRT_MAX
struct mem_cgroup_id {
int id;
- atomic_t ref;
-};
-
-/*
- * Per memcg event counter is incremented at every pagein/pageout. With THP,
- * it will be incremated by the number of pages. This counter is used for
- * for trigger some periodic events. This is straightforward and better
- * than using jiffies etc. to handle periodic memcg event.
- */
-enum mem_cgroup_events_target {
- MEM_CGROUP_TARGET_THRESH,
- MEM_CGROUP_TARGET_SOFTLIMIT,
- MEM_CGROUP_TARGET_NUMAINFO,
- MEM_CGROUP_NTARGETS,
+ refcount_t ref;
};
-struct mem_cgroup_stat_cpu {
- long count[MEMCG_NR_STAT];
- unsigned long events[MEMCG_NR_EVENTS];
- unsigned long nr_page_events;
- unsigned long targets[MEM_CGROUP_NTARGETS];
-};
+struct memcg_vmstats_percpu;
+struct memcg1_events_percpu;
+struct memcg_vmstats;
+struct lruvec_stats_percpu;
+struct lruvec_stats;
struct mem_cgroup_reclaim_iter {
struct mem_cgroup *position;
/* scan generation, increased every round-trip */
- unsigned int generation;
-};
-
-struct lruvec_stat {
- long count[NR_VM_NODE_STAT_ITEMS];
+ atomic_t generation;
};
/*
- * per-zone information in memory controller.
+ * per-node information in memory controller.
*/
struct mem_cgroup_per_node {
- struct lruvec lruvec;
- struct lruvec_stat __percpu *lruvec_stat;
- unsigned long lru_zone_size[MAX_NR_ZONES][NR_LRU_LISTS];
+ /* Keep the read-only fields at the start */
+ struct mem_cgroup *memcg; /* Back pointer, we cannot */
+ /* use container_of */
+
+ struct lruvec_stats_percpu __percpu *lruvec_stats_percpu;
+ struct lruvec_stats *lruvec_stats;
+ struct shrinker_info __rcu *shrinker_info;
- struct mem_cgroup_reclaim_iter iter[DEF_PRIORITY + 1];
+#ifdef CONFIG_MEMCG_V1
+ /*
+ * Memcg-v1 only stuff in middle as buffer between read mostly fields
+ * and update often fields to avoid false sharing. If v1 stuff is
+ * not present, an explicit padding is needed.
+ */
struct rb_node tree_node; /* RB tree node */
unsigned long usage_in_excess;/* Set to the value by which */
/* the soft limit is exceeded*/
bool on_tree;
- struct mem_cgroup *memcg; /* Back pointer, we cannot */
- /* use container_of */
+#else
+ CACHELINE_PADDING(_pad1_);
+#endif
+
+ /* Fields which get updated often at the end. */
+ struct lruvec lruvec;
+ CACHELINE_PADDING(_pad2_);
+ unsigned long lru_zone_size[MAX_NR_ZONES][NR_LRU_LISTS];
+ struct mem_cgroup_reclaim_iter iter;
+
+#ifdef CONFIG_MEMCG_NMI_SAFETY_REQUIRES_ATOMIC
+ /* slab stats for nmi context */
+ atomic_t slab_reclaimable;
+ atomic_t slab_unreclaimable;
+#endif
};
struct mem_cgroup_threshold {
@@ -133,7 +134,7 @@ struct mem_cgroup_threshold_ary {
/* Size of entries[] */
unsigned int size;
/* Array of thresholds */
- struct mem_cgroup_threshold entries[0];
+ struct mem_cgroup_threshold entries[] __counted_by(size);
};
struct mem_cgroup_thresholds {
@@ -147,10 +148,37 @@ struct mem_cgroup_thresholds {
struct mem_cgroup_threshold_ary *spare;
};
-enum memcg_kmem_state {
- KMEM_NONE,
- KMEM_ALLOCATED,
- KMEM_ONLINE,
+/*
+ * Remember four most recent foreign writebacks with dirty pages in this
+ * cgroup. Inode sharing is expected to be uncommon and, even if we miss
+ * one in a given round, we're likely to catch it later if it keeps
+ * foreign-dirtying, so a fairly low count should be enough.
+ *
+ * See mem_cgroup_track_foreign_dirty_slowpath() for details.
+ */
+#define MEMCG_CGWB_FRN_CNT 4
+
+struct memcg_cgwb_frn {
+ u64 bdi_id; /* bdi->id of the foreign inode */
+ int memcg_id; /* memcg->css.id of foreign inode */
+ u64 at; /* jiffies_64 at the time of dirtying */
+ struct wb_completion done; /* tracks in-flight foreign writebacks */
+};
+
+/*
+ * Bucket for arbitrarily byte-sized objects charged to a memory
+ * cgroup. The bucket can be reparented in one piece when the cgroup
+ * is destroyed, without having to round up the individual references
+ * of all live memory objects in the wild.
+ */
+struct obj_cgroup {
+ struct percpu_ref refcnt;
+ struct mem_cgroup *memcg;
+ atomic_t nr_charged_bytes;
+ union {
+ struct list_head list; /* protected by objcg_lock */
+ struct rcu_head rcu;
+ };
};
/*
@@ -166,41 +194,111 @@ struct mem_cgroup {
struct mem_cgroup_id id;
/* Accounted resources */
- struct page_counter memory;
- struct page_counter swap;
+ struct page_counter memory; /* Both v1 & v2 */
- /* Legacy consumer-oriented counters */
- struct page_counter memsw;
- struct page_counter kmem;
- struct page_counter tcpmem;
+ union {
+ struct page_counter swap; /* v2 only */
+ struct page_counter memsw; /* v1 only */
+ };
- /* Normal memory consumption range */
- unsigned long low;
- unsigned long high;
+ /* registered local peak watchers */
+ struct list_head memory_peaks;
+ struct list_head swap_peaks;
+ spinlock_t peaks_lock;
/* Range enforcement for interrupt charges */
struct work_struct high_work;
- unsigned long soft_limit;
+#ifdef CONFIG_ZSWAP
+ unsigned long zswap_max;
+
+ /*
+ * Prevent pages from this memcg from being written back from zswap to
+ * swap, and from being swapped out on zswap store failures.
+ */
+ bool zswap_writeback;
+#endif
/* vmpressure notifications */
struct vmpressure vmpressure;
/*
- * Should the accounting and control be hierarchical, per subtree?
+ * Should the OOM killer kill all belonging tasks, had it kill one?
*/
- bool use_hierarchy;
+ bool oom_group;
+
+ int swappiness;
+
+ /* memory.events and memory.events.local */
+ struct cgroup_file events_file;
+ struct cgroup_file events_local_file;
+
+ /* handle for "memory.swap.events" */
+ struct cgroup_file swap_events_file;
+
+ /* memory.stat */
+ struct memcg_vmstats *vmstats;
+
+ /* memory.events */
+ atomic_long_t memory_events[MEMCG_NR_MEMORY_EVENTS];
+ atomic_long_t memory_events_local[MEMCG_NR_MEMORY_EVENTS];
+
+#ifdef CONFIG_MEMCG_NMI_SAFETY_REQUIRES_ATOMIC
+ /* MEMCG_KMEM for nmi context */
+ atomic_t kmem_stat;
+#endif
+ /*
+ * Hint of reclaim pressure for socket memroy management. Note
+ * that this indicator should NOT be used in legacy cgroup mode
+ * where socket memory is accounted/charged separately.
+ */
+ u64 socket_pressure;
+#if BITS_PER_LONG < 64
+ seqlock_t socket_pressure_seqlock;
+#endif
+ int kmemcg_id;
+ /*
+ * memcg->objcg is wiped out as a part of the objcg repaprenting
+ * process. memcg->orig_objcg preserves a pointer (and a reference)
+ * to the original objcg until the end of live of memcg.
+ */
+ struct obj_cgroup __rcu *objcg;
+ struct obj_cgroup *orig_objcg;
+ /* list of inherited objcgs, protected by objcg_lock */
+ struct list_head objcg_list;
+
+ struct memcg_vmstats_percpu __percpu *vmstats_percpu;
+
+#ifdef CONFIG_CGROUP_WRITEBACK
+ struct list_head cgwb_list;
+ struct wb_domain cgwb_domain;
+ struct memcg_cgwb_frn cgwb_frn[MEMCG_CGWB_FRN_CNT];
+#endif
+
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+ struct deferred_split deferred_split_queue;
+#endif
+
+#ifdef CONFIG_LRU_GEN_WALKS_MMU
+ /* per-memcg mm_struct list */
+ struct lru_gen_mm_list mm_list;
+#endif
+
+#ifdef CONFIG_MEMCG_V1
+ /* Legacy consumer-oriented counters */
+ struct page_counter kmem; /* v1 only */
+ struct page_counter tcpmem; /* v1 only */
+
+ struct memcg1_events_percpu __percpu *events_percpu;
+
+ unsigned long soft_limit;
/* protected by memcg_oom_lock */
- bool oom_lock;
- int under_oom;
+ bool oom_lock;
+ int under_oom;
- int swappiness;
/* OOM-Killer disable */
- int oom_kill_disable;
-
- /* handle for "memory.events" */
- struct cgroup_file events_file;
+ int oom_kill_disable;
/* protect arrays of thresholds */
struct mutex thresholds_lock;
@@ -214,112 +312,412 @@ struct mem_cgroup {
/* For oom notifier event fd */
struct list_head oom_notify;
+ /* Legacy tcp memory accounting */
+ bool tcpmem_active;
+ int tcpmem_pressure;
+
+ /* List of events which userspace want to receive */
+ struct list_head event_list;
+ spinlock_t event_list_lock;
+#endif /* CONFIG_MEMCG_V1 */
+
+ struct mem_cgroup_per_node *nodeinfo[];
+};
+
+/*
+ * size of first charge trial.
+ * TODO: maybe necessary to use big numbers in big irons or dynamic based of the
+ * workload.
+ */
+#define MEMCG_CHARGE_BATCH 64U
+
+extern struct mem_cgroup *root_mem_cgroup;
+
+enum page_memcg_data_flags {
+ /* page->memcg_data is a pointer to an slabobj_ext vector */
+ MEMCG_DATA_OBJEXTS = (1UL << 0),
+ /* page has been accounted as a non-slab kernel page */
+ MEMCG_DATA_KMEM = (1UL << 1),
+ /* the next bit after the last actual flag */
+ __NR_MEMCG_DATA_FLAGS = (1UL << 2),
+};
+
+#define __OBJEXTS_ALLOC_FAIL MEMCG_DATA_OBJEXTS
+#define __FIRST_OBJEXT_FLAG __NR_MEMCG_DATA_FLAGS
+
+#else /* CONFIG_MEMCG */
+
+#define __OBJEXTS_ALLOC_FAIL (1UL << 0)
+#define __FIRST_OBJEXT_FLAG (1UL << 0)
+
+#endif /* CONFIG_MEMCG */
+
+enum objext_flags {
/*
- * Should we move charges of a task when a task is moved into this
- * mem_cgroup ? And what type of charges should we move ?
- */
- unsigned long move_charge_at_immigrate;
- /*
- * set > 0 if pages under this cgroup are moving to other cgroup.
+ * Use bit 0 with zero other bits to signal that slabobj_ext vector
+ * failed to allocate. The same bit 0 with valid upper bits means
+ * MEMCG_DATA_OBJEXTS.
*/
- atomic_t moving_account;
- /* taken only while moving_account > 0 */
- spinlock_t move_lock;
- struct task_struct *move_lock_task;
- unsigned long move_lock_flags;
+ OBJEXTS_ALLOC_FAIL = __OBJEXTS_ALLOC_FAIL,
+ /* slabobj_ext vector allocated with kmalloc_nolock() */
+ OBJEXTS_NOSPIN_ALLOC = __FIRST_OBJEXT_FLAG,
+ /* the next bit after the last actual flag */
+ __NR_OBJEXTS_FLAGS = (__FIRST_OBJEXT_FLAG << 1),
+};
+
+#define OBJEXTS_FLAGS_MASK (__NR_OBJEXTS_FLAGS - 1)
+
+#ifdef CONFIG_MEMCG
+
+static inline bool folio_memcg_kmem(struct folio *folio);
+
+/*
+ * After the initialization objcg->memcg is always pointing at
+ * a valid memcg, but can be atomically swapped to the parent memcg.
+ *
+ * The caller must ensure that the returned memcg won't be released.
+ */
+static inline struct mem_cgroup *obj_cgroup_memcg(struct obj_cgroup *objcg)
+{
+ lockdep_assert_once(rcu_read_lock_held() || lockdep_is_held(&cgroup_mutex));
+ return READ_ONCE(objcg->memcg);
+}
+
+/*
+ * __folio_memcg - Get the memory cgroup associated with a non-kmem folio
+ * @folio: Pointer to the folio.
+ *
+ * Returns a pointer to the memory cgroup associated with the folio,
+ * or NULL. This function assumes that the folio is known to have a
+ * proper memory cgroup pointer. It's not safe to call this function
+ * against some type of folios, e.g. slab folios or ex-slab folios or
+ * kmem folios.
+ */
+static inline struct mem_cgroup *__folio_memcg(struct folio *folio)
+{
+ unsigned long memcg_data = folio->memcg_data;
+
+ VM_BUG_ON_FOLIO(folio_test_slab(folio), folio);
+ VM_BUG_ON_FOLIO(memcg_data & MEMCG_DATA_OBJEXTS, folio);
+ VM_BUG_ON_FOLIO(memcg_data & MEMCG_DATA_KMEM, folio);
+
+ return (struct mem_cgroup *)(memcg_data & ~OBJEXTS_FLAGS_MASK);
+}
+
+/*
+ * __folio_objcg - get the object cgroup associated with a kmem folio.
+ * @folio: Pointer to the folio.
+ *
+ * Returns a pointer to the object cgroup associated with the folio,
+ * or NULL. This function assumes that the folio is known to have a
+ * proper object cgroup pointer. It's not safe to call this function
+ * against some type of folios, e.g. slab folios or ex-slab folios or
+ * LRU folios.
+ */
+static inline struct obj_cgroup *__folio_objcg(struct folio *folio)
+{
+ unsigned long memcg_data = folio->memcg_data;
+
+ VM_BUG_ON_FOLIO(folio_test_slab(folio), folio);
+ VM_BUG_ON_FOLIO(memcg_data & MEMCG_DATA_OBJEXTS, folio);
+ VM_BUG_ON_FOLIO(!(memcg_data & MEMCG_DATA_KMEM), folio);
+
+ return (struct obj_cgroup *)(memcg_data & ~OBJEXTS_FLAGS_MASK);
+}
+
+/*
+ * folio_memcg - Get the memory cgroup associated with a folio.
+ * @folio: Pointer to the folio.
+ *
+ * Returns a pointer to the memory cgroup associated with the folio,
+ * or NULL. This function assumes that the folio is known to have a
+ * proper memory cgroup pointer. It's not safe to call this function
+ * against some type of folios, e.g. slab folios or ex-slab folios.
+ *
+ * For a non-kmem folio any of the following ensures folio and memcg binding
+ * stability:
+ *
+ * - the folio lock
+ * - LRU isolation
+ * - exclusive reference
+ *
+ * For a kmem folio a caller should hold an rcu read lock to protect memcg
+ * associated with a kmem folio from being released.
+ */
+static inline struct mem_cgroup *folio_memcg(struct folio *folio)
+{
+ if (folio_memcg_kmem(folio))
+ return obj_cgroup_memcg(__folio_objcg(folio));
+ return __folio_memcg(folio);
+}
+
+/*
+ * folio_memcg_charged - If a folio is charged to a memory cgroup.
+ * @folio: Pointer to the folio.
+ *
+ * Returns true if folio is charged to a memory cgroup, otherwise returns false.
+ */
+static inline bool folio_memcg_charged(struct folio *folio)
+{
+ return folio->memcg_data != 0;
+}
+
+/*
+ * folio_memcg_check - Get the memory cgroup associated with a folio.
+ * @folio: Pointer to the folio.
+ *
+ * Returns a pointer to the memory cgroup associated with the folio,
+ * or NULL. This function unlike folio_memcg() can take any folio
+ * as an argument. It has to be used in cases when it's not known if a folio
+ * has an associated memory cgroup pointer or an object cgroups vector or
+ * an object cgroup.
+ *
+ * For a non-kmem folio any of the following ensures folio and memcg binding
+ * stability:
+ *
+ * - the folio lock
+ * - LRU isolation
+ * - exclusive reference
+ *
+ * For a kmem folio a caller should hold an rcu read lock to protect memcg
+ * associated with a kmem folio from being released.
+ */
+static inline struct mem_cgroup *folio_memcg_check(struct folio *folio)
+{
/*
- * percpu counter.
+ * Because folio->memcg_data might be changed asynchronously
+ * for slabs, READ_ONCE() should be used here.
*/
- struct mem_cgroup_stat_cpu __percpu *stat;
+ unsigned long memcg_data = READ_ONCE(folio->memcg_data);
- unsigned long socket_pressure;
+ if (memcg_data & MEMCG_DATA_OBJEXTS)
+ return NULL;
- /* Legacy tcp memory accounting */
- bool tcpmem_active;
- int tcpmem_pressure;
+ if (memcg_data & MEMCG_DATA_KMEM) {
+ struct obj_cgroup *objcg;
-#ifndef CONFIG_SLOB
- /* Index in the kmem_cache->memcg_params.memcg_caches array */
- int kmemcg_id;
- enum memcg_kmem_state kmem_state;
- struct list_head kmem_caches;
-#endif
+ objcg = (void *)(memcg_data & ~OBJEXTS_FLAGS_MASK);
+ return obj_cgroup_memcg(objcg);
+ }
- int last_scanned_node;
-#if MAX_NUMNODES > 1
- nodemask_t scan_nodes;
- atomic_t numainfo_events;
- atomic_t numainfo_updating;
-#endif
+ return (struct mem_cgroup *)(memcg_data & ~OBJEXTS_FLAGS_MASK);
+}
-#ifdef CONFIG_CGROUP_WRITEBACK
- struct list_head cgwb_list;
- struct wb_domain cgwb_domain;
-#endif
+static inline struct mem_cgroup *page_memcg_check(struct page *page)
+{
+ if (PageTail(page))
+ return NULL;
+ return folio_memcg_check((struct folio *)page);
+}
- /* List of events which userspace want to receive */
- struct list_head event_list;
- spinlock_t event_list_lock;
+static inline struct mem_cgroup *get_mem_cgroup_from_objcg(struct obj_cgroup *objcg)
+{
+ struct mem_cgroup *memcg;
- struct mem_cgroup_per_node *nodeinfo[0];
- /* WARNING: nodeinfo must be the last member here */
-};
+ rcu_read_lock();
+retry:
+ memcg = obj_cgroup_memcg(objcg);
+ if (unlikely(!css_tryget(&memcg->css)))
+ goto retry;
+ rcu_read_unlock();
-extern struct mem_cgroup *root_mem_cgroup;
+ return memcg;
+}
+
+/*
+ * folio_memcg_kmem - Check if the folio has the memcg_kmem flag set.
+ * @folio: Pointer to the folio.
+ *
+ * Checks if the folio has MemcgKmem flag set. The caller must ensure
+ * that the folio has an associated memory cgroup. It's not safe to call
+ * this function against some types of folios, e.g. slab folios.
+ */
+static inline bool folio_memcg_kmem(struct folio *folio)
+{
+ VM_BUG_ON_PGFLAGS(PageTail(&folio->page), &folio->page);
+ VM_BUG_ON_FOLIO(folio->memcg_data & MEMCG_DATA_OBJEXTS, folio);
+ return folio->memcg_data & MEMCG_DATA_KMEM;
+}
+
+static inline bool PageMemcgKmem(struct page *page)
+{
+ return folio_memcg_kmem(page_folio(page));
+}
+
+static inline bool mem_cgroup_is_root(struct mem_cgroup *memcg)
+{
+ return (memcg == root_mem_cgroup);
+}
static inline bool mem_cgroup_disabled(void)
{
return !cgroup_subsys_enabled(memory_cgrp_subsys);
}
-static inline void mem_cgroup_event(struct mem_cgroup *memcg,
- enum memcg_event_item event)
+static inline void mem_cgroup_protection(struct mem_cgroup *root,
+ struct mem_cgroup *memcg,
+ unsigned long *min,
+ unsigned long *low)
{
- this_cpu_inc(memcg->stat->events[event]);
- cgroup_file_notify(&memcg->events_file);
+ *min = *low = 0;
+
+ if (mem_cgroup_disabled())
+ return;
+
+ /*
+ * There is no reclaim protection applied to a targeted reclaim.
+ * We are special casing this specific case here because
+ * mem_cgroup_calculate_protection is not robust enough to keep
+ * the protection invariant for calculated effective values for
+ * parallel reclaimers with different reclaim target. This is
+ * especially a problem for tail memcgs (as they have pages on LRU)
+ * which would want to have effective values 0 for targeted reclaim
+ * but a different value for external reclaim.
+ *
+ * Example
+ * Let's have global and A's reclaim in parallel:
+ * |
+ * A (low=2G, usage = 3G, max = 3G, children_low_usage = 1.5G)
+ * |\
+ * | C (low = 1G, usage = 2.5G)
+ * B (low = 1G, usage = 0.5G)
+ *
+ * For the global reclaim
+ * A.elow = A.low
+ * B.elow = min(B.usage, B.low) because children_low_usage <= A.elow
+ * C.elow = min(C.usage, C.low)
+ *
+ * With the effective values resetting we have A reclaim
+ * A.elow = 0
+ * B.elow = B.low
+ * C.elow = C.low
+ *
+ * If the global reclaim races with A's reclaim then
+ * B.elow = C.elow = 0 because children_low_usage > A.elow)
+ * is possible and reclaiming B would be violating the protection.
+ *
+ */
+ if (root == memcg)
+ return;
+
+ *min = READ_ONCE(memcg->memory.emin);
+ *low = READ_ONCE(memcg->memory.elow);
}
-bool mem_cgroup_low(struct mem_cgroup *root, struct mem_cgroup *memcg);
+void mem_cgroup_calculate_protection(struct mem_cgroup *root,
+ struct mem_cgroup *memcg);
+
+static inline bool mem_cgroup_unprotected(struct mem_cgroup *target,
+ struct mem_cgroup *memcg)
+{
+ /*
+ * The root memcg doesn't account charges, and doesn't support
+ * protection. The target memcg's protection is ignored, see
+ * mem_cgroup_calculate_protection() and mem_cgroup_protection()
+ */
+ return mem_cgroup_disabled() || mem_cgroup_is_root(memcg) ||
+ memcg == target;
+}
-int mem_cgroup_try_charge(struct page *page, struct mm_struct *mm,
- gfp_t gfp_mask, struct mem_cgroup **memcgp,
- bool compound);
-void mem_cgroup_commit_charge(struct page *page, struct mem_cgroup *memcg,
- bool lrucare, bool compound);
-void mem_cgroup_cancel_charge(struct page *page, struct mem_cgroup *memcg,
- bool compound);
-void mem_cgroup_uncharge(struct page *page);
-void mem_cgroup_uncharge_list(struct list_head *page_list);
+static inline bool mem_cgroup_below_low(struct mem_cgroup *target,
+ struct mem_cgroup *memcg)
+{
+ if (mem_cgroup_unprotected(target, memcg))
+ return false;
-void mem_cgroup_migrate(struct page *oldpage, struct page *newpage);
+ return READ_ONCE(memcg->memory.elow) >=
+ page_counter_read(&memcg->memory);
+}
-static struct mem_cgroup_per_node *
-mem_cgroup_nodeinfo(struct mem_cgroup *memcg, int nid)
+static inline bool mem_cgroup_below_min(struct mem_cgroup *target,
+ struct mem_cgroup *memcg)
{
- return memcg->nodeinfo[nid];
+ if (mem_cgroup_unprotected(target, memcg))
+ return false;
+
+ return READ_ONCE(memcg->memory.emin) >=
+ page_counter_read(&memcg->memory);
}
+int __mem_cgroup_charge(struct folio *folio, struct mm_struct *mm, gfp_t gfp);
+
/**
- * mem_cgroup_lruvec - get the lru list vector for a node or a memcg zone
- * @node: node of the wanted lruvec
+ * mem_cgroup_charge - Charge a newly allocated folio to a cgroup.
+ * @folio: Folio to charge.
+ * @mm: mm context of the allocating task.
+ * @gfp: Reclaim mode.
+ *
+ * Try to charge @folio to the memcg that @mm belongs to, reclaiming
+ * pages according to @gfp if necessary. If @mm is NULL, try to
+ * charge to the active memcg.
+ *
+ * Do not use this for folios allocated for swapin.
+ *
+ * Return: 0 on success. Otherwise, an error code is returned.
+ */
+static inline int mem_cgroup_charge(struct folio *folio, struct mm_struct *mm,
+ gfp_t gfp)
+{
+ if (mem_cgroup_disabled())
+ return 0;
+ return __mem_cgroup_charge(folio, mm, gfp);
+}
+
+int mem_cgroup_charge_hugetlb(struct folio* folio, gfp_t gfp);
+
+int mem_cgroup_swapin_charge_folio(struct folio *folio, struct mm_struct *mm,
+ gfp_t gfp, swp_entry_t entry);
+
+void __mem_cgroup_uncharge(struct folio *folio);
+
+/**
+ * mem_cgroup_uncharge - Uncharge a folio.
+ * @folio: Folio to uncharge.
+ *
+ * Uncharge a folio previously charged with mem_cgroup_charge().
+ */
+static inline void mem_cgroup_uncharge(struct folio *folio)
+{
+ if (mem_cgroup_disabled())
+ return;
+ __mem_cgroup_uncharge(folio);
+}
+
+void __mem_cgroup_uncharge_folios(struct folio_batch *folios);
+static inline void mem_cgroup_uncharge_folios(struct folio_batch *folios)
+{
+ if (mem_cgroup_disabled())
+ return;
+ __mem_cgroup_uncharge_folios(folios);
+}
+
+void mem_cgroup_replace_folio(struct folio *old, struct folio *new);
+void mem_cgroup_migrate(struct folio *old, struct folio *new);
+
+/**
+ * mem_cgroup_lruvec - get the lru list vector for a memcg & node
* @memcg: memcg of the wanted lruvec
+ * @pgdat: pglist_data
*
- * Returns the lru list vector holding pages for a given @node or a given
- * @memcg and @zone. This can be the node lruvec, if the memory controller
- * is disabled.
+ * Returns the lru list vector holding pages for a given @memcg &
+ * @pgdat combination. This can be the node lruvec, if the memory
+ * controller is disabled.
*/
-static inline struct lruvec *mem_cgroup_lruvec(struct pglist_data *pgdat,
- struct mem_cgroup *memcg)
+static inline struct lruvec *mem_cgroup_lruvec(struct mem_cgroup *memcg,
+ struct pglist_data *pgdat)
{
struct mem_cgroup_per_node *mz;
struct lruvec *lruvec;
if (mem_cgroup_disabled()) {
- lruvec = node_lruvec(pgdat);
+ lruvec = &pgdat->__lruvec;
goto out;
}
- mz = mem_cgroup_nodeinfo(memcg, pgdat->node_id);
+ if (!memcg)
+ memcg = root_mem_cgroup;
+
+ mz = memcg->nodeinfo[pgdat->node_id];
lruvec = &mz->lruvec;
out:
/*
@@ -332,16 +730,85 @@ out:
return lruvec;
}
-struct lruvec *mem_cgroup_page_lruvec(struct page *, struct pglist_data *);
+/**
+ * folio_lruvec - return lruvec for isolating/putting an LRU folio
+ * @folio: Pointer to the folio.
+ *
+ * This function relies on folio->mem_cgroup being stable.
+ */
+static inline struct lruvec *folio_lruvec(struct folio *folio)
+{
+ struct mem_cgroup *memcg = folio_memcg(folio);
+
+ VM_WARN_ON_ONCE_FOLIO(!memcg && !mem_cgroup_disabled(), folio);
+ return mem_cgroup_lruvec(memcg, folio_pgdat(folio));
+}
-bool task_in_mem_cgroup(struct task_struct *task, struct mem_cgroup *memcg);
struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p);
+struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm);
+
+struct mem_cgroup *get_mem_cgroup_from_current(void);
+
+struct mem_cgroup *get_mem_cgroup_from_folio(struct folio *folio);
+
+struct lruvec *folio_lruvec_lock(struct folio *folio);
+struct lruvec *folio_lruvec_lock_irq(struct folio *folio);
+struct lruvec *folio_lruvec_lock_irqsave(struct folio *folio,
+ unsigned long *flags);
+
+#ifdef CONFIG_DEBUG_VM
+void lruvec_memcg_debug(struct lruvec *lruvec, struct folio *folio);
+#else
+static inline
+void lruvec_memcg_debug(struct lruvec *lruvec, struct folio *folio)
+{
+}
+#endif
+
static inline
struct mem_cgroup *mem_cgroup_from_css(struct cgroup_subsys_state *css){
return css ? container_of(css, struct mem_cgroup, css) : NULL;
}
+static inline bool obj_cgroup_tryget(struct obj_cgroup *objcg)
+{
+ return percpu_ref_tryget(&objcg->refcnt);
+}
+
+static inline void obj_cgroup_get(struct obj_cgroup *objcg)
+{
+ percpu_ref_get(&objcg->refcnt);
+}
+
+static inline void obj_cgroup_get_many(struct obj_cgroup *objcg,
+ unsigned long nr)
+{
+ percpu_ref_get_many(&objcg->refcnt, nr);
+}
+
+static inline void obj_cgroup_put(struct obj_cgroup *objcg)
+{
+ if (objcg)
+ percpu_ref_put(&objcg->refcnt);
+}
+
+static inline bool mem_cgroup_tryget(struct mem_cgroup *memcg)
+{
+ return !memcg || css_tryget(&memcg->css);
+}
+
+static inline bool mem_cgroup_tryget_online(struct mem_cgroup *memcg)
+{
+ return !memcg || css_tryget_online(&memcg->css);
+}
+
+static inline void mem_cgroup_put(struct mem_cgroup *memcg)
+{
+ if (memcg)
+ css_put(&memcg->css);
+}
+
#define mem_cgroup_from_counter(counter, member) \
container_of(counter, struct mem_cgroup, member)
@@ -349,8 +816,8 @@ struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *,
struct mem_cgroup *,
struct mem_cgroup_reclaim_cookie *);
void mem_cgroup_iter_break(struct mem_cgroup *, struct mem_cgroup *);
-int mem_cgroup_scan_tasks(struct mem_cgroup *,
- int (*)(struct task_struct *, void *), void *);
+void mem_cgroup_scan_tasks(struct mem_cgroup *memcg,
+ int (*)(struct task_struct *, void *), void *arg);
static inline unsigned short mem_cgroup_id(struct mem_cgroup *memcg)
{
@@ -361,6 +828,20 @@ static inline unsigned short mem_cgroup_id(struct mem_cgroup *memcg)
}
struct mem_cgroup *mem_cgroup_from_id(unsigned short id);
+#ifdef CONFIG_SHRINKER_DEBUG
+static inline unsigned long mem_cgroup_ino(struct mem_cgroup *memcg)
+{
+ return memcg ? cgroup_ino(memcg->css.cgroup) : 0;
+}
+
+struct mem_cgroup *mem_cgroup_get_from_ino(unsigned long ino);
+#endif
+
+static inline struct mem_cgroup *mem_cgroup_from_seq(struct seq_file *m)
+{
+ return mem_cgroup_from_css(seq_css(m));
+}
+
static inline struct mem_cgroup *lruvec_memcg(struct lruvec *lruvec)
{
struct mem_cgroup_per_node *mz;
@@ -376,14 +857,11 @@ static inline struct mem_cgroup *lruvec_memcg(struct lruvec *lruvec)
* parent_mem_cgroup - find the accounting parent of a memcg
* @memcg: memcg whose parent to find
*
- * Returns the parent memcg, or NULL if this is the root or the memory
- * controller is in legacy no-hierarchy mode.
+ * Returns the parent memcg, or NULL if this is the root.
*/
static inline struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg)
{
- if (!memcg->memory.parent)
- return NULL;
- return mem_cgroup_from_counter(memcg->memory.parent, memory);
+ return mem_cgroup_from_css(memcg->css.parent);
}
static inline bool mem_cgroup_is_descendant(struct mem_cgroup *memcg,
@@ -391,8 +869,6 @@ static inline bool mem_cgroup_is_descendant(struct mem_cgroup *memcg,
{
if (root == memcg)
return true;
- if (!root->use_hierarchy)
- return false;
return cgroup_is_descendant(memcg->css.cgroup, root->css.cgroup);
}
@@ -410,7 +886,7 @@ static inline bool mm_match_cgroup(struct mm_struct *mm,
return match;
}
-struct cgroup_subsys_state *mem_cgroup_css_from_page(struct page *page);
+struct cgroup_subsys_state *mem_cgroup_css_from_folio(struct folio *folio);
ino_t page_cgroup_ino(struct page *page);
static inline bool mem_cgroup_online(struct mem_cgroup *memcg)
@@ -420,328 +896,367 @@ static inline bool mem_cgroup_online(struct mem_cgroup *memcg)
return !!(memcg->css.flags & CSS_ONLINE);
}
-/*
- * For memory reclaim.
- */
-int mem_cgroup_select_victim_node(struct mem_cgroup *memcg);
-
void mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru,
int zid, int nr_pages);
-unsigned long mem_cgroup_node_nr_lru_pages(struct mem_cgroup *memcg,
- int nid, unsigned int lru_mask);
-
static inline
-unsigned long mem_cgroup_get_lru_size(struct lruvec *lruvec, enum lru_list lru)
+unsigned long mem_cgroup_get_zone_lru_size(struct lruvec *lruvec,
+ enum lru_list lru, int zone_idx)
{
struct mem_cgroup_per_node *mz;
- unsigned long nr_pages = 0;
- int zid;
mz = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
- for (zid = 0; zid < MAX_NR_ZONES; zid++)
- nr_pages += mz->lru_zone_size[zid][lru];
- return nr_pages;
+ return READ_ONCE(mz->lru_zone_size[zone_idx][lru]);
}
-static inline
-unsigned long mem_cgroup_get_zone_lru_size(struct lruvec *lruvec,
- enum lru_list lru, int zone_idx)
-{
- struct mem_cgroup_per_node *mz;
+void __mem_cgroup_handle_over_high(gfp_t gfp_mask);
- mz = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
- return mz->lru_zone_size[zone_idx][lru];
+static inline void mem_cgroup_handle_over_high(gfp_t gfp_mask)
+{
+ if (unlikely(current->memcg_nr_pages_over_high))
+ __mem_cgroup_handle_over_high(gfp_mask);
}
-void mem_cgroup_handle_over_high(void);
+unsigned long mem_cgroup_get_max(struct mem_cgroup *memcg);
-unsigned long mem_cgroup_get_limit(struct mem_cgroup *memcg);
+unsigned long mem_cgroup_size(struct mem_cgroup *memcg);
-void mem_cgroup_print_oom_info(struct mem_cgroup *memcg,
+void mem_cgroup_print_oom_context(struct mem_cgroup *memcg,
struct task_struct *p);
-static inline void mem_cgroup_oom_enable(void)
+void mem_cgroup_print_oom_meminfo(struct mem_cgroup *memcg);
+
+struct mem_cgroup *mem_cgroup_get_oom_group(struct task_struct *victim,
+ struct mem_cgroup *oom_domain);
+void mem_cgroup_print_oom_group(struct mem_cgroup *memcg);
+
+/* idx can be of type enum memcg_stat_item or node_stat_item */
+void mod_memcg_state(struct mem_cgroup *memcg,
+ enum memcg_stat_item idx, int val);
+
+static inline void mod_memcg_page_state(struct page *page,
+ enum memcg_stat_item idx, int val)
{
- WARN_ON(current->memcg_may_oom);
- current->memcg_may_oom = 1;
+ struct mem_cgroup *memcg;
+
+ if (mem_cgroup_disabled())
+ return;
+
+ rcu_read_lock();
+ memcg = folio_memcg(page_folio(page));
+ if (memcg)
+ mod_memcg_state(memcg, idx, val);
+ rcu_read_unlock();
}
-static inline void mem_cgroup_oom_disable(void)
+unsigned long memcg_page_state(struct mem_cgroup *memcg, int idx);
+unsigned long lruvec_page_state(struct lruvec *lruvec, enum node_stat_item idx);
+unsigned long lruvec_page_state_local(struct lruvec *lruvec,
+ enum node_stat_item idx);
+
+void mem_cgroup_flush_stats(struct mem_cgroup *memcg);
+void mem_cgroup_flush_stats_ratelimited(struct mem_cgroup *memcg);
+
+void mod_lruvec_kmem_state(void *p, enum node_stat_item idx, int val);
+
+void count_memcg_events(struct mem_cgroup *memcg, enum vm_event_item idx,
+ unsigned long count);
+
+static inline void count_memcg_folio_events(struct folio *folio,
+ enum vm_event_item idx, unsigned long nr)
{
- WARN_ON(!current->memcg_may_oom);
- current->memcg_may_oom = 0;
+ struct mem_cgroup *memcg = folio_memcg(folio);
+
+ if (memcg)
+ count_memcg_events(memcg, idx, nr);
}
-static inline bool task_in_memcg_oom(struct task_struct *p)
+static inline void count_memcg_events_mm(struct mm_struct *mm,
+ enum vm_event_item idx, unsigned long count)
{
- return p->memcg_in_oom;
+ struct mem_cgroup *memcg;
+
+ if (mem_cgroup_disabled())
+ return;
+
+ rcu_read_lock();
+ memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
+ if (likely(memcg))
+ count_memcg_events(memcg, idx, count);
+ rcu_read_unlock();
}
-bool mem_cgroup_oom_synchronize(bool wait);
+static inline void count_memcg_event_mm(struct mm_struct *mm,
+ enum vm_event_item idx)
+{
+ count_memcg_events_mm(mm, idx, 1);
+}
-#ifdef CONFIG_MEMCG_SWAP
-extern int do_swap_account;
-#endif
+void __memcg_memory_event(struct mem_cgroup *memcg,
+ enum memcg_memory_event event, bool allow_spinning);
-struct mem_cgroup *lock_page_memcg(struct page *page);
-void __unlock_page_memcg(struct mem_cgroup *memcg);
-void unlock_page_memcg(struct page *page);
+static inline void memcg_memory_event(struct mem_cgroup *memcg,
+ enum memcg_memory_event event)
+{
+ __memcg_memory_event(memcg, event, true);
+}
-/* idx can be of type enum memcg_stat_item or node_stat_item */
-static inline unsigned long memcg_page_state(struct mem_cgroup *memcg,
- int idx)
+static inline void memcg_memory_event_mm(struct mm_struct *mm,
+ enum memcg_memory_event event)
{
- long val = 0;
- int cpu;
+ struct mem_cgroup *memcg;
+
+ if (mem_cgroup_disabled())
+ return;
+
+ rcu_read_lock();
+ memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
+ if (likely(memcg))
+ memcg_memory_event(memcg, event);
+ rcu_read_unlock();
+}
- for_each_possible_cpu(cpu)
- val += per_cpu(memcg->stat->count[idx], cpu);
+void split_page_memcg(struct page *first, unsigned order);
+void folio_split_memcg_refs(struct folio *folio, unsigned old_order,
+ unsigned new_order);
- if (val < 0)
- val = 0;
+static inline u64 cgroup_id_from_mm(struct mm_struct *mm)
+{
+ struct mem_cgroup *memcg;
+ u64 id;
- return val;
+ if (mem_cgroup_disabled())
+ return 0;
+
+ rcu_read_lock();
+ memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
+ if (!memcg)
+ memcg = root_mem_cgroup;
+ id = cgroup_id(memcg->css.cgroup);
+ rcu_read_unlock();
+ return id;
}
-/* idx can be of type enum memcg_stat_item or node_stat_item */
-static inline void __mod_memcg_state(struct mem_cgroup *memcg,
- int idx, int val)
+extern int mem_cgroup_init(void);
+#else /* CONFIG_MEMCG */
+
+#define MEM_CGROUP_ID_SHIFT 0
+
+#define root_mem_cgroup (NULL)
+
+static inline struct mem_cgroup *folio_memcg(struct folio *folio)
{
- if (!mem_cgroup_disabled())
- __this_cpu_add(memcg->stat->count[idx], val);
+ return NULL;
}
-/* idx can be of type enum memcg_stat_item or node_stat_item */
-static inline void mod_memcg_state(struct mem_cgroup *memcg,
- int idx, int val)
+static inline bool folio_memcg_charged(struct folio *folio)
{
- if (!mem_cgroup_disabled())
- this_cpu_add(memcg->stat->count[idx], val);
+ return false;
}
-/**
- * mod_memcg_page_state - update page state statistics
- * @page: the page
- * @idx: page state item to account
- * @val: number of pages (positive or negative)
- *
- * The @page must be locked or the caller must use lock_page_memcg()
- * to prevent double accounting when the page is concurrently being
- * moved to another memcg:
- *
- * lock_page(page) or lock_page_memcg(page)
- * if (TestClearPageState(page))
- * mod_memcg_page_state(page, state, -1);
- * unlock_page(page) or unlock_page_memcg(page)
- *
- * Kernel pages are an exception to this, since they'll never move.
- */
-static inline void __mod_memcg_page_state(struct page *page,
- int idx, int val)
+static inline struct mem_cgroup *folio_memcg_check(struct folio *folio)
{
- if (page->mem_cgroup)
- __mod_memcg_state(page->mem_cgroup, idx, val);
+ return NULL;
}
-static inline void mod_memcg_page_state(struct page *page,
- int idx, int val)
+static inline struct mem_cgroup *page_memcg_check(struct page *page)
{
- if (page->mem_cgroup)
- mod_memcg_state(page->mem_cgroup, idx, val);
+ return NULL;
}
-static inline unsigned long lruvec_page_state(struct lruvec *lruvec,
- enum node_stat_item idx)
+static inline struct mem_cgroup *get_mem_cgroup_from_objcg(struct obj_cgroup *objcg)
{
- struct mem_cgroup_per_node *pn;
- long val = 0;
- int cpu;
+ return NULL;
+}
- if (mem_cgroup_disabled())
- return node_page_state(lruvec_pgdat(lruvec), idx);
+static inline bool folio_memcg_kmem(struct folio *folio)
+{
+ return false;
+}
- pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
- for_each_possible_cpu(cpu)
- val += per_cpu(pn->lruvec_stat->count[idx], cpu);
+static inline bool PageMemcgKmem(struct page *page)
+{
+ return false;
+}
- if (val < 0)
- val = 0;
+static inline bool mem_cgroup_is_root(struct mem_cgroup *memcg)
+{
+ return true;
+}
- return val;
+static inline bool mem_cgroup_disabled(void)
+{
+ return true;
}
-static inline void __mod_lruvec_state(struct lruvec *lruvec,
- enum node_stat_item idx, int val)
+static inline void memcg_memory_event(struct mem_cgroup *memcg,
+ enum memcg_memory_event event)
{
- struct mem_cgroup_per_node *pn;
+}
- __mod_node_page_state(lruvec_pgdat(lruvec), idx, val);
- if (mem_cgroup_disabled())
- return;
- pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
- __mod_memcg_state(pn->memcg, idx, val);
- __this_cpu_add(pn->lruvec_stat->count[idx], val);
+static inline void memcg_memory_event_mm(struct mm_struct *mm,
+ enum memcg_memory_event event)
+{
}
-static inline void mod_lruvec_state(struct lruvec *lruvec,
- enum node_stat_item idx, int val)
+static inline void mem_cgroup_protection(struct mem_cgroup *root,
+ struct mem_cgroup *memcg,
+ unsigned long *min,
+ unsigned long *low)
{
- struct mem_cgroup_per_node *pn;
+ *min = *low = 0;
+}
- mod_node_page_state(lruvec_pgdat(lruvec), idx, val);
- if (mem_cgroup_disabled())
- return;
- pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
- mod_memcg_state(pn->memcg, idx, val);
- this_cpu_add(pn->lruvec_stat->count[idx], val);
+static inline void mem_cgroup_calculate_protection(struct mem_cgroup *root,
+ struct mem_cgroup *memcg)
+{
}
-static inline void __mod_lruvec_page_state(struct page *page,
- enum node_stat_item idx, int val)
+static inline bool mem_cgroup_unprotected(struct mem_cgroup *target,
+ struct mem_cgroup *memcg)
+{
+ return true;
+}
+static inline bool mem_cgroup_below_low(struct mem_cgroup *target,
+ struct mem_cgroup *memcg)
{
- struct mem_cgroup_per_node *pn;
+ return false;
+}
- __mod_node_page_state(page_pgdat(page), idx, val);
- if (mem_cgroup_disabled() || !page->mem_cgroup)
- return;
- __mod_memcg_state(page->mem_cgroup, idx, val);
- pn = page->mem_cgroup->nodeinfo[page_to_nid(page)];
- __this_cpu_add(pn->lruvec_stat->count[idx], val);
+static inline bool mem_cgroup_below_min(struct mem_cgroup *target,
+ struct mem_cgroup *memcg)
+{
+ return false;
}
-static inline void mod_lruvec_page_state(struct page *page,
- enum node_stat_item idx, int val)
+static inline int mem_cgroup_charge(struct folio *folio,
+ struct mm_struct *mm, gfp_t gfp)
{
- struct mem_cgroup_per_node *pn;
+ return 0;
+}
- mod_node_page_state(page_pgdat(page), idx, val);
- if (mem_cgroup_disabled() || !page->mem_cgroup)
- return;
- mod_memcg_state(page->mem_cgroup, idx, val);
- pn = page->mem_cgroup->nodeinfo[page_to_nid(page)];
- this_cpu_add(pn->lruvec_stat->count[idx], val);
+static inline int mem_cgroup_charge_hugetlb(struct folio* folio, gfp_t gfp)
+{
+ return 0;
}
-unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order,
- gfp_t gfp_mask,
- unsigned long *total_scanned);
+static inline int mem_cgroup_swapin_charge_folio(struct folio *folio,
+ struct mm_struct *mm, gfp_t gfp, swp_entry_t entry)
+{
+ return 0;
+}
-static inline void count_memcg_events(struct mem_cgroup *memcg,
- enum vm_event_item idx,
- unsigned long count)
+static inline void mem_cgroup_uncharge(struct folio *folio)
{
- if (!mem_cgroup_disabled())
- this_cpu_add(memcg->stat->events[idx], count);
}
-/* idx can be of type enum memcg_stat_item or node_stat_item */
-static inline void count_memcg_page_event(struct page *page,
- int idx)
+static inline void mem_cgroup_uncharge_folios(struct folio_batch *folios)
{
- if (page->mem_cgroup)
- count_memcg_events(page->mem_cgroup, idx, 1);
}
-static inline void count_memcg_event_mm(struct mm_struct *mm,
- enum vm_event_item idx)
+static inline void mem_cgroup_replace_folio(struct folio *old,
+ struct folio *new)
{
- struct mem_cgroup *memcg;
+}
- if (mem_cgroup_disabled())
- return;
+static inline void mem_cgroup_migrate(struct folio *old, struct folio *new)
+{
+}
- rcu_read_lock();
- memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
- if (likely(memcg)) {
- this_cpu_inc(memcg->stat->events[idx]);
- if (idx == OOM_KILL)
- cgroup_file_notify(&memcg->events_file);
- }
- rcu_read_unlock();
+static inline struct lruvec *mem_cgroup_lruvec(struct mem_cgroup *memcg,
+ struct pglist_data *pgdat)
+{
+ return &pgdat->__lruvec;
}
-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
-void mem_cgroup_split_huge_fixup(struct page *head);
-#endif
-#else /* CONFIG_MEMCG */
+static inline struct lruvec *folio_lruvec(struct folio *folio)
+{
+ struct pglist_data *pgdat = folio_pgdat(folio);
+ return &pgdat->__lruvec;
+}
-#define MEM_CGROUP_ID_SHIFT 0
-#define MEM_CGROUP_ID_MAX 0
+static inline
+void lruvec_memcg_debug(struct lruvec *lruvec, struct folio *folio)
+{
+}
-struct mem_cgroup;
+static inline struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg)
+{
+ return NULL;
+}
-static inline bool mem_cgroup_disabled(void)
+static inline bool mm_match_cgroup(struct mm_struct *mm,
+ struct mem_cgroup *memcg)
{
return true;
}
-static inline void mem_cgroup_event(struct mem_cgroup *memcg,
- enum memcg_event_item event)
+static inline struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm)
{
+ return NULL;
}
-static inline bool mem_cgroup_low(struct mem_cgroup *root,
- struct mem_cgroup *memcg)
+static inline struct mem_cgroup *get_mem_cgroup_from_current(void)
{
- return false;
+ return NULL;
}
-static inline int mem_cgroup_try_charge(struct page *page, struct mm_struct *mm,
- gfp_t gfp_mask,
- struct mem_cgroup **memcgp,
- bool compound)
+static inline struct mem_cgroup *get_mem_cgroup_from_folio(struct folio *folio)
{
- *memcgp = NULL;
- return 0;
+ return NULL;
}
-static inline void mem_cgroup_commit_charge(struct page *page,
- struct mem_cgroup *memcg,
- bool lrucare, bool compound)
+static inline
+struct mem_cgroup *mem_cgroup_from_css(struct cgroup_subsys_state *css)
{
+ return NULL;
}
-static inline void mem_cgroup_cancel_charge(struct page *page,
- struct mem_cgroup *memcg,
- bool compound)
+static inline void obj_cgroup_get(struct obj_cgroup *objcg)
{
}
-static inline void mem_cgroup_uncharge(struct page *page)
+static inline void obj_cgroup_put(struct obj_cgroup *objcg)
{
}
-static inline void mem_cgroup_uncharge_list(struct list_head *page_list)
+static inline bool mem_cgroup_tryget(struct mem_cgroup *memcg)
{
+ return true;
}
-static inline void mem_cgroup_migrate(struct page *old, struct page *new)
+static inline bool mem_cgroup_tryget_online(struct mem_cgroup *memcg)
{
+ return true;
}
-static inline struct lruvec *mem_cgroup_lruvec(struct pglist_data *pgdat,
- struct mem_cgroup *memcg)
+static inline void mem_cgroup_put(struct mem_cgroup *memcg)
{
- return node_lruvec(pgdat);
}
-static inline struct lruvec *mem_cgroup_page_lruvec(struct page *page,
- struct pglist_data *pgdat)
+static inline struct lruvec *folio_lruvec_lock(struct folio *folio)
{
- return &pgdat->lruvec;
+ struct pglist_data *pgdat = folio_pgdat(folio);
+
+ spin_lock(&pgdat->__lruvec.lru_lock);
+ return &pgdat->__lruvec;
}
-static inline bool mm_match_cgroup(struct mm_struct *mm,
- struct mem_cgroup *memcg)
+static inline struct lruvec *folio_lruvec_lock_irq(struct folio *folio)
{
- return true;
+ struct pglist_data *pgdat = folio_pgdat(folio);
+
+ spin_lock_irq(&pgdat->__lruvec.lru_lock);
+ return &pgdat->__lruvec;
}
-static inline bool task_in_mem_cgroup(struct task_struct *task,
- const struct mem_cgroup *memcg)
+static inline struct lruvec *folio_lruvec_lock_irqsave(struct folio *folio,
+ unsigned long *flagsp)
{
- return true;
+ struct pglist_data *pgdat = folio_pgdat(folio);
+
+ spin_lock_irqsave(&pgdat->__lruvec.lru_lock, *flagsp);
+ return &pgdat->__lruvec;
}
static inline struct mem_cgroup *
@@ -757,10 +1272,9 @@ static inline void mem_cgroup_iter_break(struct mem_cgroup *root,
{
}
-static inline int mem_cgroup_scan_tasks(struct mem_cgroup *memcg,
+static inline void mem_cgroup_scan_tasks(struct mem_cgroup *memcg,
int (*fn)(struct task_struct *, void *), void *arg)
{
- return 0;
}
static inline unsigned short mem_cgroup_id(struct mem_cgroup *memcg)
@@ -775,108 +1289,88 @@ static inline struct mem_cgroup *mem_cgroup_from_id(unsigned short id)
return NULL;
}
-static inline struct mem_cgroup *lruvec_memcg(struct lruvec *lruvec)
-{
- return NULL;
-}
-
-static inline bool mem_cgroup_online(struct mem_cgroup *memcg)
-{
- return true;
-}
-
-static inline unsigned long
-mem_cgroup_get_lru_size(struct lruvec *lruvec, enum lru_list lru)
-{
- return 0;
-}
-static inline
-unsigned long mem_cgroup_get_zone_lru_size(struct lruvec *lruvec,
- enum lru_list lru, int zone_idx)
-{
- return 0;
-}
-
-static inline unsigned long
-mem_cgroup_node_nr_lru_pages(struct mem_cgroup *memcg,
- int nid, unsigned int lru_mask)
+#ifdef CONFIG_SHRINKER_DEBUG
+static inline unsigned long mem_cgroup_ino(struct mem_cgroup *memcg)
{
return 0;
}
-static inline unsigned long mem_cgroup_get_limit(struct mem_cgroup *memcg)
+static inline struct mem_cgroup *mem_cgroup_get_from_ino(unsigned long ino)
{
- return 0;
+ return NULL;
}
+#endif
-static inline void
-mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p)
+static inline struct mem_cgroup *mem_cgroup_from_seq(struct seq_file *m)
{
+ return NULL;
}
-static inline struct mem_cgroup *lock_page_memcg(struct page *page)
+static inline struct mem_cgroup *lruvec_memcg(struct lruvec *lruvec)
{
return NULL;
}
-static inline void __unlock_page_memcg(struct mem_cgroup *memcg)
+static inline bool mem_cgroup_online(struct mem_cgroup *memcg)
{
+ return true;
}
-static inline void unlock_page_memcg(struct page *page)
+static inline
+unsigned long mem_cgroup_get_zone_lru_size(struct lruvec *lruvec,
+ enum lru_list lru, int zone_idx)
{
+ return 0;
}
-static inline void mem_cgroup_handle_over_high(void)
+static inline unsigned long mem_cgroup_get_max(struct mem_cgroup *memcg)
{
+ return 0;
}
-static inline void mem_cgroup_oom_enable(void)
+static inline unsigned long mem_cgroup_size(struct mem_cgroup *memcg)
{
+ return 0;
}
-static inline void mem_cgroup_oom_disable(void)
+static inline void
+mem_cgroup_print_oom_context(struct mem_cgroup *memcg, struct task_struct *p)
{
}
-static inline bool task_in_memcg_oom(struct task_struct *p)
+static inline void
+mem_cgroup_print_oom_meminfo(struct mem_cgroup *memcg)
{
- return false;
}
-static inline bool mem_cgroup_oom_synchronize(bool wait)
+static inline void mem_cgroup_handle_over_high(gfp_t gfp_mask)
{
- return false;
}
-static inline unsigned long memcg_page_state(struct mem_cgroup *memcg,
- int idx)
+static inline struct mem_cgroup *mem_cgroup_get_oom_group(
+ struct task_struct *victim, struct mem_cgroup *oom_domain)
{
- return 0;
+ return NULL;
}
-static inline void __mod_memcg_state(struct mem_cgroup *memcg,
- int idx,
- int nr)
+static inline void mem_cgroup_print_oom_group(struct mem_cgroup *memcg)
{
}
static inline void mod_memcg_state(struct mem_cgroup *memcg,
- int idx,
+ enum memcg_stat_item idx,
int nr)
{
}
-static inline void __mod_memcg_page_state(struct page *page,
- int idx,
- int nr)
+static inline void mod_memcg_page_state(struct page *page,
+ enum memcg_stat_item idx, int val)
{
}
-static inline void mod_memcg_page_state(struct page *page,
- int idx,
- int nr)
+static inline unsigned long memcg_page_state(struct mem_cgroup *memcg, int idx)
{
+ return 0;
}
static inline unsigned long lruvec_page_state(struct lruvec *lruvec,
@@ -885,50 +1379,41 @@ static inline unsigned long lruvec_page_state(struct lruvec *lruvec,
return node_page_state(lruvec_pgdat(lruvec), idx);
}
-static inline void __mod_lruvec_state(struct lruvec *lruvec,
- enum node_stat_item idx, int val)
+static inline unsigned long lruvec_page_state_local(struct lruvec *lruvec,
+ enum node_stat_item idx)
{
- __mod_node_page_state(lruvec_pgdat(lruvec), idx, val);
+ return node_page_state(lruvec_pgdat(lruvec), idx);
}
-static inline void mod_lruvec_state(struct lruvec *lruvec,
- enum node_stat_item idx, int val)
+static inline void mem_cgroup_flush_stats(struct mem_cgroup *memcg)
{
- mod_node_page_state(lruvec_pgdat(lruvec), idx, val);
}
-static inline void __mod_lruvec_page_state(struct page *page,
- enum node_stat_item idx, int val)
+static inline void mem_cgroup_flush_stats_ratelimited(struct mem_cgroup *memcg)
{
- __mod_node_page_state(page_pgdat(page), idx, val);
}
-static inline void mod_lruvec_page_state(struct page *page,
- enum node_stat_item idx, int val)
+static inline void mod_lruvec_kmem_state(void *p, enum node_stat_item idx,
+ int val)
{
- mod_node_page_state(page_pgdat(page), idx, val);
-}
+ struct page *page = virt_to_head_page(p);
-static inline
-unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order,
- gfp_t gfp_mask,
- unsigned long *total_scanned)
-{
- return 0;
+ mod_node_page_state(page_pgdat(page), idx, val);
}
-static inline void mem_cgroup_split_huge_fixup(struct page *head)
+static inline void count_memcg_events(struct mem_cgroup *memcg,
+ enum vm_event_item idx,
+ unsigned long count)
{
}
-static inline void count_memcg_events(struct mem_cgroup *memcg,
- enum vm_event_item idx,
- unsigned long count)
+static inline void count_memcg_folio_events(struct folio *folio,
+ enum vm_event_item idx, unsigned long nr)
{
}
-static inline void count_memcg_page_event(struct page *page,
- int idx)
+static inline void count_memcg_events_mm(struct mm_struct *mm,
+ enum vm_event_item idx, unsigned long count)
{
}
@@ -936,120 +1421,127 @@ static inline
void count_memcg_event_mm(struct mm_struct *mm, enum vm_event_item idx)
{
}
-#endif /* CONFIG_MEMCG */
-/* idx can be of type enum memcg_stat_item or node_stat_item */
-static inline void __inc_memcg_state(struct mem_cgroup *memcg,
- int idx)
+static inline void split_page_memcg(struct page *first, unsigned order)
{
- __mod_memcg_state(memcg, idx, 1);
}
-/* idx can be of type enum memcg_stat_item or node_stat_item */
-static inline void __dec_memcg_state(struct mem_cgroup *memcg,
- int idx)
+static inline void folio_split_memcg_refs(struct folio *folio,
+ unsigned old_order, unsigned new_order)
{
- __mod_memcg_state(memcg, idx, -1);
}
-/* idx can be of type enum memcg_stat_item or node_stat_item */
-static inline void __inc_memcg_page_state(struct page *page,
- int idx)
+static inline u64 cgroup_id_from_mm(struct mm_struct *mm)
{
- __mod_memcg_page_state(page, idx, 1);
+ return 0;
}
-/* idx can be of type enum memcg_stat_item or node_stat_item */
-static inline void __dec_memcg_page_state(struct page *page,
- int idx)
-{
- __mod_memcg_page_state(page, idx, -1);
-}
+static inline int mem_cgroup_init(void) { return 0; }
+#endif /* CONFIG_MEMCG */
-static inline void __inc_lruvec_state(struct lruvec *lruvec,
- enum node_stat_item idx)
-{
- __mod_lruvec_state(lruvec, idx, 1);
-}
+/*
+ * Extended information for slab objects stored as an array in page->memcg_data
+ * if MEMCG_DATA_OBJEXTS is set.
+ */
+struct slabobj_ext {
+#ifdef CONFIG_MEMCG
+ struct obj_cgroup *objcg;
+#endif
+#ifdef CONFIG_MEM_ALLOC_PROFILING
+ union codetag_ref ref;
+#endif
+} __aligned(8);
-static inline void __dec_lruvec_state(struct lruvec *lruvec,
- enum node_stat_item idx)
+static inline struct lruvec *parent_lruvec(struct lruvec *lruvec)
{
- __mod_lruvec_state(lruvec, idx, -1);
-}
+ struct mem_cgroup *memcg;
-static inline void __inc_lruvec_page_state(struct page *page,
- enum node_stat_item idx)
-{
- __mod_lruvec_page_state(page, idx, 1);
+ memcg = lruvec_memcg(lruvec);
+ if (!memcg)
+ return NULL;
+ memcg = parent_mem_cgroup(memcg);
+ if (!memcg)
+ return NULL;
+ return mem_cgroup_lruvec(memcg, lruvec_pgdat(lruvec));
}
-static inline void __dec_lruvec_page_state(struct page *page,
- enum node_stat_item idx)
+static inline void unlock_page_lruvec(struct lruvec *lruvec)
{
- __mod_lruvec_page_state(page, idx, -1);
+ spin_unlock(&lruvec->lru_lock);
}
-/* idx can be of type enum memcg_stat_item or node_stat_item */
-static inline void inc_memcg_state(struct mem_cgroup *memcg,
- int idx)
+static inline void unlock_page_lruvec_irq(struct lruvec *lruvec)
{
- mod_memcg_state(memcg, idx, 1);
+ spin_unlock_irq(&lruvec->lru_lock);
}
-/* idx can be of type enum memcg_stat_item or node_stat_item */
-static inline void dec_memcg_state(struct mem_cgroup *memcg,
- int idx)
+static inline void unlock_page_lruvec_irqrestore(struct lruvec *lruvec,
+ unsigned long flags)
{
- mod_memcg_state(memcg, idx, -1);
+ spin_unlock_irqrestore(&lruvec->lru_lock, flags);
}
-/* idx can be of type enum memcg_stat_item or node_stat_item */
-static inline void inc_memcg_page_state(struct page *page,
- int idx)
+/* Test requires a stable folio->memcg binding, see folio_memcg() */
+static inline bool folio_matches_lruvec(struct folio *folio,
+ struct lruvec *lruvec)
{
- mod_memcg_page_state(page, idx, 1);
+ return lruvec_pgdat(lruvec) == folio_pgdat(folio) &&
+ lruvec_memcg(lruvec) == folio_memcg(folio);
}
-/* idx can be of type enum memcg_stat_item or node_stat_item */
-static inline void dec_memcg_page_state(struct page *page,
- int idx)
+/* Don't lock again iff page's lruvec locked */
+static inline struct lruvec *folio_lruvec_relock_irq(struct folio *folio,
+ struct lruvec *locked_lruvec)
{
- mod_memcg_page_state(page, idx, -1);
-}
+ if (locked_lruvec) {
+ if (folio_matches_lruvec(folio, locked_lruvec))
+ return locked_lruvec;
-static inline void inc_lruvec_state(struct lruvec *lruvec,
- enum node_stat_item idx)
-{
- mod_lruvec_state(lruvec, idx, 1);
-}
+ unlock_page_lruvec_irq(locked_lruvec);
+ }
-static inline void dec_lruvec_state(struct lruvec *lruvec,
- enum node_stat_item idx)
-{
- mod_lruvec_state(lruvec, idx, -1);
+ return folio_lruvec_lock_irq(folio);
}
-static inline void inc_lruvec_page_state(struct page *page,
- enum node_stat_item idx)
+/* Don't lock again iff folio's lruvec locked */
+static inline void folio_lruvec_relock_irqsave(struct folio *folio,
+ struct lruvec **lruvecp, unsigned long *flags)
{
- mod_lruvec_page_state(page, idx, 1);
-}
+ if (*lruvecp) {
+ if (folio_matches_lruvec(folio, *lruvecp))
+ return;
-static inline void dec_lruvec_page_state(struct page *page,
- enum node_stat_item idx)
-{
- mod_lruvec_page_state(page, idx, -1);
+ unlock_page_lruvec_irqrestore(*lruvecp, *flags);
+ }
+
+ *lruvecp = folio_lruvec_lock_irqsave(folio, flags);
}
#ifdef CONFIG_CGROUP_WRITEBACK
-struct list_head *mem_cgroup_cgwb_list(struct mem_cgroup *memcg);
struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb);
void mem_cgroup_wb_stats(struct bdi_writeback *wb, unsigned long *pfilepages,
unsigned long *pheadroom, unsigned long *pdirty,
unsigned long *pwriteback);
+void mem_cgroup_track_foreign_dirty_slowpath(struct folio *folio,
+ struct bdi_writeback *wb);
+
+static inline void mem_cgroup_track_foreign_dirty(struct folio *folio,
+ struct bdi_writeback *wb)
+{
+ struct mem_cgroup *memcg;
+
+ if (mem_cgroup_disabled())
+ return;
+
+ memcg = folio_memcg(folio);
+ if (unlikely(memcg && &memcg->css != wb->memcg_css))
+ mem_cgroup_track_foreign_dirty_slowpath(folio, wb);
+}
+
+void mem_cgroup_flush_foreign(struct bdi_writeback *wb);
+
#else /* CONFIG_CGROUP_WRITEBACK */
static inline struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb)
@@ -1065,96 +1557,369 @@ static inline void mem_cgroup_wb_stats(struct bdi_writeback *wb,
{
}
+static inline void mem_cgroup_track_foreign_dirty(struct folio *folio,
+ struct bdi_writeback *wb)
+{
+}
+
+static inline void mem_cgroup_flush_foreign(struct bdi_writeback *wb)
+{
+}
+
#endif /* CONFIG_CGROUP_WRITEBACK */
struct sock;
-bool mem_cgroup_charge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages);
-void mem_cgroup_uncharge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages);
#ifdef CONFIG_MEMCG
extern struct static_key_false memcg_sockets_enabled_key;
#define mem_cgroup_sockets_enabled static_branch_unlikely(&memcg_sockets_enabled_key)
+
void mem_cgroup_sk_alloc(struct sock *sk);
void mem_cgroup_sk_free(struct sock *sk);
-static inline bool mem_cgroup_under_socket_pressure(struct mem_cgroup *memcg)
+void mem_cgroup_sk_inherit(const struct sock *sk, struct sock *newsk);
+bool mem_cgroup_sk_charge(const struct sock *sk, unsigned int nr_pages,
+ gfp_t gfp_mask);
+void mem_cgroup_sk_uncharge(const struct sock *sk, unsigned int nr_pages);
+
+#if BITS_PER_LONG < 64
+static inline void mem_cgroup_set_socket_pressure(struct mem_cgroup *memcg)
{
- if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && memcg->tcpmem_pressure)
- return true;
+ u64 val = get_jiffies_64() + HZ;
+ unsigned long flags;
+
+ write_seqlock_irqsave(&memcg->socket_pressure_seqlock, flags);
+ memcg->socket_pressure = val;
+ write_sequnlock_irqrestore(&memcg->socket_pressure_seqlock, flags);
+}
+
+static inline u64 mem_cgroup_get_socket_pressure(struct mem_cgroup *memcg)
+{
+ unsigned int seq;
+ u64 val;
+
do {
- if (time_before(jiffies, memcg->socket_pressure))
- return true;
- } while ((memcg = parent_mem_cgroup(memcg)));
- return false;
+ seq = read_seqbegin(&memcg->socket_pressure_seqlock);
+ val = memcg->socket_pressure;
+ } while (read_seqretry(&memcg->socket_pressure_seqlock, seq));
+
+ return val;
+}
+#else
+static inline void mem_cgroup_set_socket_pressure(struct mem_cgroup *memcg)
+{
+ WRITE_ONCE(memcg->socket_pressure, jiffies + HZ);
+}
+
+static inline u64 mem_cgroup_get_socket_pressure(struct mem_cgroup *memcg)
+{
+ return READ_ONCE(memcg->socket_pressure);
+}
+#endif
+
+int alloc_shrinker_info(struct mem_cgroup *memcg);
+void free_shrinker_info(struct mem_cgroup *memcg);
+void set_shrinker_bit(struct mem_cgroup *memcg, int nid, int shrinker_id);
+void reparent_shrinker_deferred(struct mem_cgroup *memcg);
+
+static inline int shrinker_id(struct shrinker *shrinker)
+{
+ return shrinker->id;
}
#else
#define mem_cgroup_sockets_enabled 0
-static inline void mem_cgroup_sk_alloc(struct sock *sk) { };
-static inline void mem_cgroup_sk_free(struct sock *sk) { };
-static inline bool mem_cgroup_under_socket_pressure(struct mem_cgroup *memcg)
+
+static inline void mem_cgroup_sk_alloc(struct sock *sk)
+{
+}
+
+static inline void mem_cgroup_sk_free(struct sock *sk)
+{
+}
+
+static inline void mem_cgroup_sk_inherit(const struct sock *sk, struct sock *newsk)
+{
+}
+
+static inline bool mem_cgroup_sk_charge(const struct sock *sk,
+ unsigned int nr_pages,
+ gfp_t gfp_mask)
{
return false;
}
-#endif
-struct kmem_cache *memcg_kmem_get_cache(struct kmem_cache *cachep);
-void memcg_kmem_put_cache(struct kmem_cache *cachep);
-int memcg_kmem_charge_memcg(struct page *page, gfp_t gfp, int order,
- struct mem_cgroup *memcg);
-int memcg_kmem_charge(struct page *page, gfp_t gfp, int order);
-void memcg_kmem_uncharge(struct page *page, int order);
+static inline void mem_cgroup_sk_uncharge(const struct sock *sk,
+ unsigned int nr_pages)
+{
+}
-#if defined(CONFIG_MEMCG) && !defined(CONFIG_SLOB)
-extern struct static_key_false memcg_kmem_enabled_key;
-extern struct workqueue_struct *memcg_kmem_cache_wq;
+static inline void set_shrinker_bit(struct mem_cgroup *memcg,
+ int nid, int shrinker_id)
+{
+}
-extern int memcg_nr_cache_ids;
-void memcg_get_cache_ids(void);
-void memcg_put_cache_ids(void);
+static inline int shrinker_id(struct shrinker *shrinker)
+{
+ return -1;
+}
+#endif
+
+#ifdef CONFIG_MEMCG
+bool mem_cgroup_kmem_disabled(void);
+int __memcg_kmem_charge_page(struct page *page, gfp_t gfp, int order);
+void __memcg_kmem_uncharge_page(struct page *page, int order);
/*
- * Helper macro to loop through all memcg-specific caches. Callers must still
- * check if the cache is valid (it is either valid or NULL).
- * the slab_mutex must be held when looping through those caches
+ * The returned objcg pointer is safe to use without additional
+ * protection within a scope. The scope is defined either by
+ * the current task (similar to the "current" global variable)
+ * or by set_active_memcg() pair.
+ * Please, use obj_cgroup_get() to get a reference if the pointer
+ * needs to be used outside of the local scope.
*/
-#define for_each_memcg_cache_index(_idx) \
- for ((_idx) = 0; (_idx) < memcg_nr_cache_ids; (_idx)++)
+struct obj_cgroup *current_obj_cgroup(void);
+struct obj_cgroup *get_obj_cgroup_from_folio(struct folio *folio);
+
+static inline struct obj_cgroup *get_obj_cgroup_from_current(void)
+{
+ struct obj_cgroup *objcg = current_obj_cgroup();
+
+ if (objcg)
+ obj_cgroup_get(objcg);
+
+ return objcg;
+}
-static inline bool memcg_kmem_enabled(void)
+int obj_cgroup_charge(struct obj_cgroup *objcg, gfp_t gfp, size_t size);
+void obj_cgroup_uncharge(struct obj_cgroup *objcg, size_t size);
+
+extern struct static_key_false memcg_bpf_enabled_key;
+static inline bool memcg_bpf_enabled(void)
+{
+ return static_branch_likely(&memcg_bpf_enabled_key);
+}
+
+extern struct static_key_false memcg_kmem_online_key;
+
+static inline bool memcg_kmem_online(void)
{
- return static_branch_unlikely(&memcg_kmem_enabled_key);
+ return static_branch_likely(&memcg_kmem_online_key);
+}
+
+static inline int memcg_kmem_charge_page(struct page *page, gfp_t gfp,
+ int order)
+{
+ if (memcg_kmem_online())
+ return __memcg_kmem_charge_page(page, gfp, order);
+ return 0;
+}
+
+static inline void memcg_kmem_uncharge_page(struct page *page, int order)
+{
+ if (memcg_kmem_online())
+ __memcg_kmem_uncharge_page(page, order);
}
/*
- * helper for accessing a memcg's index. It will be used as an index in the
- * child cache array in kmem_cache, and also to derive its name. This function
- * will return -1 when this is not a kmem-limited memcg.
+ * A helper for accessing memcg's kmem_id, used for getting
+ * corresponding LRU lists.
*/
-static inline int memcg_cache_id(struct mem_cgroup *memcg)
+static inline int memcg_kmem_id(struct mem_cgroup *memcg)
{
return memcg ? memcg->kmemcg_id : -1;
}
+struct mem_cgroup *mem_cgroup_from_slab_obj(void *p);
+
+static inline void count_objcg_events(struct obj_cgroup *objcg,
+ enum vm_event_item idx,
+ unsigned long count)
+{
+ struct mem_cgroup *memcg;
+
+ if (!memcg_kmem_online())
+ return;
+
+ rcu_read_lock();
+ memcg = obj_cgroup_memcg(objcg);
+ count_memcg_events(memcg, idx, count);
+ rcu_read_unlock();
+}
+
+bool mem_cgroup_node_allowed(struct mem_cgroup *memcg, int nid);
+
+void mem_cgroup_show_protected_memory(struct mem_cgroup *memcg);
+
+static inline bool memcg_is_dying(struct mem_cgroup *memcg)
+{
+ return memcg ? css_is_dying(&memcg->css) : false;
+}
+
#else
-#define for_each_memcg_cache_index(_idx) \
- for (; NULL; )
+static inline bool mem_cgroup_kmem_disabled(void)
+{
+ return true;
+}
-static inline bool memcg_kmem_enabled(void)
+static inline int memcg_kmem_charge_page(struct page *page, gfp_t gfp,
+ int order)
+{
+ return 0;
+}
+
+static inline void memcg_kmem_uncharge_page(struct page *page, int order)
+{
+}
+
+static inline int __memcg_kmem_charge_page(struct page *page, gfp_t gfp,
+ int order)
+{
+ return 0;
+}
+
+static inline void __memcg_kmem_uncharge_page(struct page *page, int order)
+{
+}
+
+static inline struct obj_cgroup *get_obj_cgroup_from_folio(struct folio *folio)
+{
+ return NULL;
+}
+
+static inline bool memcg_bpf_enabled(void)
{
return false;
}
-static inline int memcg_cache_id(struct mem_cgroup *memcg)
+static inline bool memcg_kmem_online(void)
+{
+ return false;
+}
+
+static inline int memcg_kmem_id(struct mem_cgroup *memcg)
{
return -1;
}
-static inline void memcg_get_cache_ids(void)
+static inline struct mem_cgroup *mem_cgroup_from_slab_obj(void *p)
+{
+ return NULL;
+}
+
+static inline void count_objcg_events(struct obj_cgroup *objcg,
+ enum vm_event_item idx,
+ unsigned long count)
+{
+}
+
+static inline ino_t page_cgroup_ino(struct page *page)
+{
+ return 0;
+}
+
+static inline bool mem_cgroup_node_allowed(struct mem_cgroup *memcg, int nid)
+{
+ return true;
+}
+
+static inline void mem_cgroup_show_protected_memory(struct mem_cgroup *memcg)
+{
+}
+
+static inline bool memcg_is_dying(struct mem_cgroup *memcg)
+{
+ return false;
+}
+#endif /* CONFIG_MEMCG */
+
+#if defined(CONFIG_MEMCG) && defined(CONFIG_ZSWAP)
+bool obj_cgroup_may_zswap(struct obj_cgroup *objcg);
+void obj_cgroup_charge_zswap(struct obj_cgroup *objcg, size_t size);
+void obj_cgroup_uncharge_zswap(struct obj_cgroup *objcg, size_t size);
+bool mem_cgroup_zswap_writeback_enabled(struct mem_cgroup *memcg);
+#else
+static inline bool obj_cgroup_may_zswap(struct obj_cgroup *objcg)
+{
+ return true;
+}
+static inline void obj_cgroup_charge_zswap(struct obj_cgroup *objcg,
+ size_t size)
+{
+}
+static inline void obj_cgroup_uncharge_zswap(struct obj_cgroup *objcg,
+ size_t size)
+{
+}
+static inline bool mem_cgroup_zswap_writeback_enabled(struct mem_cgroup *memcg)
+{
+ /* if zswap is disabled, do not block pages going to the swapping device */
+ return true;
+}
+#endif
+
+
+/* Cgroup v1-related declarations */
+
+#ifdef CONFIG_MEMCG_V1
+unsigned long memcg1_soft_limit_reclaim(pg_data_t *pgdat, int order,
+ gfp_t gfp_mask,
+ unsigned long *total_scanned);
+
+bool mem_cgroup_oom_synchronize(bool wait);
+
+static inline bool task_in_memcg_oom(struct task_struct *p)
+{
+ return p->memcg_in_oom;
+}
+
+static inline void mem_cgroup_enter_user_fault(void)
+{
+ WARN_ON(current->in_user_fault);
+ current->in_user_fault = 1;
+}
+
+static inline void mem_cgroup_exit_user_fault(void)
+{
+ WARN_ON(!current->in_user_fault);
+ current->in_user_fault = 0;
+}
+
+void memcg1_swapout(struct folio *folio, swp_entry_t entry);
+void memcg1_swapin(swp_entry_t entry, unsigned int nr_pages);
+
+#else /* CONFIG_MEMCG_V1 */
+static inline
+unsigned long memcg1_soft_limit_reclaim(pg_data_t *pgdat, int order,
+ gfp_t gfp_mask,
+ unsigned long *total_scanned)
+{
+ return 0;
+}
+
+static inline bool task_in_memcg_oom(struct task_struct *p)
+{
+ return false;
+}
+
+static inline bool mem_cgroup_oom_synchronize(bool wait)
+{
+ return false;
+}
+
+static inline void mem_cgroup_enter_user_fault(void)
+{
+}
+
+static inline void mem_cgroup_exit_user_fault(void)
+{
+}
+
+static inline void memcg1_swapout(struct folio *folio, swp_entry_t entry)
{
}
-static inline void memcg_put_cache_ids(void)
+static inline void memcg1_swapin(swp_entry_t entry, unsigned int nr_pages)
{
}
-#endif /* CONFIG_MEMCG && !CONFIG_SLOB */
+#endif /* CONFIG_MEMCG_V1 */
#endif /* _LINUX_MEMCONTROL_H */
diff --git a/include/linux/memfd.h b/include/linux/memfd.h
new file mode 100644
index 000000000000..cc74de3dbcfe
--- /dev/null
+++ b/include/linux/memfd.h
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __LINUX_MEMFD_H
+#define __LINUX_MEMFD_H
+
+#include <linux/file.h>
+
+#define MEMFD_ANON_NAME "[memfd]"
+
+#ifdef CONFIG_MEMFD_CREATE
+extern long memfd_fcntl(struct file *file, unsigned int cmd, unsigned int arg);
+struct folio *memfd_alloc_folio(struct file *memfd, pgoff_t idx);
+/*
+ * Check for any existing seals on mmap, return an error if access is denied due
+ * to sealing, or 0 otherwise.
+ *
+ * We also update VMA flags if appropriate by manipulating the VMA flags pointed
+ * to by vm_flags_ptr.
+ */
+int memfd_check_seals_mmap(struct file *file, vm_flags_t *vm_flags_ptr);
+#else
+static inline long memfd_fcntl(struct file *f, unsigned int c, unsigned int a)
+{
+ return -EINVAL;
+}
+static inline struct folio *memfd_alloc_folio(struct file *memfd, pgoff_t idx)
+{
+ return ERR_PTR(-EINVAL);
+}
+static inline int memfd_check_seals_mmap(struct file *file,
+ vm_flags_t *vm_flags_ptr)
+{
+ return 0;
+}
+#endif
+
+#endif /* __LINUX_MEMFD_H */
diff --git a/include/linux/memory-failure.h b/include/linux/memory-failure.h
new file mode 100644
index 000000000000..bc326503d2d2
--- /dev/null
+++ b/include/linux/memory-failure.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_MEMORY_FAILURE_H
+#define _LINUX_MEMORY_FAILURE_H
+
+#include <linux/interval_tree.h>
+
+struct pfn_address_space;
+
+struct pfn_address_space {
+ struct interval_tree_node node;
+ struct address_space *mapping;
+};
+
+int register_pfn_address_space(struct pfn_address_space *pfn_space);
+void unregister_pfn_address_space(struct pfn_address_space *pfn_space);
+
+#endif /* _LINUX_MEMORY_FAILURE_H */
diff --git a/include/linux/memory-tiers.h b/include/linux/memory-tiers.h
new file mode 100644
index 000000000000..7a805796fcfd
--- /dev/null
+++ b/include/linux/memory-tiers.h
@@ -0,0 +1,155 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_MEMORY_TIERS_H
+#define _LINUX_MEMORY_TIERS_H
+
+#include <linux/types.h>
+#include <linux/nodemask.h>
+#include <linux/kref.h>
+#include <linux/mmzone.h>
+#include <linux/notifier.h>
+/*
+ * Each tier cover a abstrace distance chunk size of 128
+ */
+#define MEMTIER_CHUNK_BITS 7
+#define MEMTIER_CHUNK_SIZE (1 << MEMTIER_CHUNK_BITS)
+/*
+ * Smaller abstract distance values imply faster (higher) memory tiers. Offset
+ * the DRAM adistance so that we can accommodate devices with a slightly lower
+ * adistance value (slightly faster) than default DRAM adistance to be part of
+ * the same memory tier.
+ */
+#define MEMTIER_ADISTANCE_DRAM ((4L * MEMTIER_CHUNK_SIZE) + (MEMTIER_CHUNK_SIZE >> 1))
+
+struct memory_tier;
+struct memory_dev_type {
+ /* list of memory types that are part of same tier as this type */
+ struct list_head tier_sibling;
+ /* list of memory types that are managed by one driver */
+ struct list_head list;
+ /* abstract distance for this specific memory type */
+ int adistance;
+ /* Nodes of same abstract distance */
+ nodemask_t nodes;
+ struct kref kref;
+};
+
+struct access_coordinate;
+
+#ifdef CONFIG_NUMA
+extern bool numa_demotion_enabled;
+extern struct memory_dev_type *default_dram_type;
+extern nodemask_t default_dram_nodes;
+struct memory_dev_type *alloc_memory_type(int adistance);
+void put_memory_type(struct memory_dev_type *memtype);
+void init_node_memory_type(int node, struct memory_dev_type *default_type);
+void clear_node_memory_type(int node, struct memory_dev_type *memtype);
+int register_mt_adistance_algorithm(struct notifier_block *nb);
+int unregister_mt_adistance_algorithm(struct notifier_block *nb);
+int mt_calc_adistance(int node, int *adist);
+int mt_set_default_dram_perf(int nid, struct access_coordinate *perf,
+ const char *source);
+int mt_perf_to_adistance(struct access_coordinate *perf, int *adist);
+struct memory_dev_type *mt_find_alloc_memory_type(int adist,
+ struct list_head *memory_types);
+void mt_put_memory_types(struct list_head *memory_types);
+#ifdef CONFIG_MIGRATION
+int next_demotion_node(int node);
+void node_get_allowed_targets(pg_data_t *pgdat, nodemask_t *targets);
+bool node_is_toptier(int node);
+#else
+static inline int next_demotion_node(int node)
+{
+ return NUMA_NO_NODE;
+}
+
+static inline void node_get_allowed_targets(pg_data_t *pgdat, nodemask_t *targets)
+{
+ *targets = NODE_MASK_NONE;
+}
+
+static inline bool node_is_toptier(int node)
+{
+ return true;
+}
+#endif
+
+#else
+
+#define numa_demotion_enabled false
+#define default_dram_type NULL
+#define default_dram_nodes NODE_MASK_NONE
+/*
+ * CONFIG_NUMA implementation returns non NULL error.
+ */
+static inline struct memory_dev_type *alloc_memory_type(int adistance)
+{
+ return NULL;
+}
+
+static inline void put_memory_type(struct memory_dev_type *memtype)
+{
+
+}
+
+static inline void init_node_memory_type(int node, struct memory_dev_type *default_type)
+{
+
+}
+
+static inline void clear_node_memory_type(int node, struct memory_dev_type *memtype)
+{
+
+}
+
+static inline int next_demotion_node(int node)
+{
+ return NUMA_NO_NODE;
+}
+
+static inline void node_get_allowed_targets(pg_data_t *pgdat, nodemask_t *targets)
+{
+ *targets = NODE_MASK_NONE;
+}
+
+static inline bool node_is_toptier(int node)
+{
+ return true;
+}
+
+static inline int register_mt_adistance_algorithm(struct notifier_block *nb)
+{
+ return 0;
+}
+
+static inline int unregister_mt_adistance_algorithm(struct notifier_block *nb)
+{
+ return 0;
+}
+
+static inline int mt_calc_adistance(int node, int *adist)
+{
+ return NOTIFY_DONE;
+}
+
+static inline int mt_set_default_dram_perf(int nid, struct access_coordinate *perf,
+ const char *source)
+{
+ return -EIO;
+}
+
+static inline int mt_perf_to_adistance(struct access_coordinate *perf, int *adist)
+{
+ return -EIO;
+}
+
+static inline struct memory_dev_type *mt_find_alloc_memory_type(int adist,
+ struct list_head *memory_types)
+{
+ return NULL;
+}
+
+static inline void mt_put_memory_types(struct list_head *memory_types)
+{
+}
+#endif /* CONFIG_NUMA */
+#endif /* _LINUX_MEMORY_TIERS_H */
diff --git a/include/linux/memory.h b/include/linux/memory.h
index b723a686fc10..faeaa921e55b 100644
--- a/include/linux/memory.h
+++ b/include/linux/memory.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* include/linux/memory.h - generic memory definition
*
@@ -18,68 +19,111 @@
#include <linux/node.h>
#include <linux/compiler.h>
#include <linux/mutex.h>
-#include <linux/notifier.h>
#define MIN_MEMORY_BLOCK_SIZE (1UL << SECTION_SIZE_BITS)
+/**
+ * struct memory_group - a logical group of memory blocks
+ * @nid: The node id for all memory blocks inside the memory group.
+ * @memory_blocks: List of all memory blocks belonging to this memory group.
+ * @present_kernel_pages: Present (online) memory outside ZONE_MOVABLE of this
+ * memory group.
+ * @present_movable_pages: Present (online) memory in ZONE_MOVABLE of this
+ * memory group.
+ * @is_dynamic: The memory group type: static vs. dynamic
+ * @s.max_pages: Valid with &memory_group.is_dynamic == false. The maximum
+ * number of pages we'll have in this static memory group.
+ * @d.unit_pages: Valid with &memory_group.is_dynamic == true. Unit in pages
+ * in which memory is added/removed in this dynamic memory group.
+ * This granularity defines the alignment of a unit in physical
+ * address space; it has to be at least as big as a single
+ * memory block.
+ *
+ * A memory group logically groups memory blocks; each memory block
+ * belongs to at most one memory group. A memory group corresponds to
+ * a memory device, such as a DIMM or a NUMA node, which spans multiple
+ * memory blocks and might even span multiple non-contiguous physical memory
+ * ranges.
+ *
+ * Modification of members after registration is serialized by memory
+ * hot(un)plug code.
+ */
+struct memory_group {
+ int nid;
+ struct list_head memory_blocks;
+ unsigned long present_kernel_pages;
+ unsigned long present_movable_pages;
+ bool is_dynamic;
+ union {
+ struct {
+ unsigned long max_pages;
+ } s;
+ struct {
+ unsigned long unit_pages;
+ } d;
+ };
+};
+
+enum memory_block_state {
+ /* These states are exposed to userspace as text strings in sysfs */
+ MEM_ONLINE, /* exposed to userspace */
+ MEM_GOING_OFFLINE, /* exposed to userspace */
+ MEM_OFFLINE, /* exposed to userspace */
+ MEM_GOING_ONLINE,
+ MEM_CANCEL_ONLINE,
+ MEM_CANCEL_OFFLINE,
+};
+
struct memory_block {
unsigned long start_section_nr;
- unsigned long end_section_nr;
- unsigned long state; /* serialized by the dev->lock */
- int section_count; /* serialized by mem_sysfs_mutex */
+ enum memory_block_state state; /* serialized by the dev->lock */
int online_type; /* for passing data to online routine */
- int phys_device; /* to which fru does this belong? */
- void *hw; /* optional pointer to fw/hw data */
- int (*phys_callback)(struct memory_block *);
+ int nid; /* NID for this memory block */
+ /*
+ * The single zone of this memory block if all PFNs of this memory block
+ * that are System RAM (not a memory hole, not ZONE_DEVICE ranges) are
+ * managed by a single zone. NULL if multiple zones (including nodes)
+ * apply.
+ */
+ struct zone *zone;
struct device dev;
+ struct vmem_altmap *altmap;
+ struct memory_group *group; /* group (if any) for this block */
+ struct list_head group_next; /* next block inside memory group */
+#if defined(CONFIG_MEMORY_FAILURE) && defined(CONFIG_MEMORY_HOTPLUG)
+ atomic_long_t nr_hwpoison;
+#endif
};
int arch_get_memory_phys_device(unsigned long start_pfn);
unsigned long memory_block_size_bytes(void);
-
-/* These states are exposed to userspace as text strings in sysfs */
-#define MEM_ONLINE (1<<0) /* exposed to userspace */
-#define MEM_GOING_OFFLINE (1<<1) /* exposed to userspace */
-#define MEM_OFFLINE (1<<2) /* exposed to userspace */
-#define MEM_GOING_ONLINE (1<<3)
-#define MEM_CANCEL_ONLINE (1<<4)
-#define MEM_CANCEL_OFFLINE (1<<5)
+int set_memory_block_size_order(unsigned int order);
struct memory_notify {
unsigned long start_pfn;
unsigned long nr_pages;
- int status_change_nid_normal;
- int status_change_nid_high;
- int status_change_nid;
-};
-
-/*
- * During pageblock isolation, count the number of pages within the
- * range [start_pfn, start_pfn + nr_pages) which are owned by code
- * in the notifier chain.
- */
-#define MEM_ISOLATE_COUNT (1<<0)
-
-struct memory_isolate_notify {
- unsigned long start_pfn; /* Start of range to check */
- unsigned int nr_pages; /* # pages in range to check */
- unsigned int pages_found; /* # pages owned found by callbacks */
};
struct notifier_block;
struct mem_section;
/*
- * Priorities for the hotplug memory callback routines (stored in decreasing
- * order in the callback chain)
+ * Priorities for the hotplug memory callback routines. Invoked from
+ * high to low. Higher priorities correspond to higher numbers.
*/
-#define SLAB_CALLBACK_PRI 1
-#define IPC_CALLBACK_PRI 10
+#define DEFAULT_CALLBACK_PRI 0
+#define SLAB_CALLBACK_PRI 1
+#define CXL_CALLBACK_PRI 5
+#define HMAT_CALLBACK_PRI 6
+#define MM_COMPUTE_BATCH_PRI 10
+#define CPUSET_CALLBACK_PRI 10
+#define MEMTIER_HOTPLUG_PRI 100
+#define KSM_CALLBACK_PRI 100
-#ifndef CONFIG_MEMORY_HOTPLUG_SPARSE
-static inline int memory_dev_init(void)
+#ifndef CONFIG_MEMORY_HOTPLUG
+static inline void memory_dev_init(void)
{
- return 0;
+ return;
}
static inline int register_memory_notifier(struct notifier_block *nb)
{
@@ -88,53 +132,74 @@ static inline int register_memory_notifier(struct notifier_block *nb)
static inline void unregister_memory_notifier(struct notifier_block *nb)
{
}
-static inline int memory_notify(unsigned long val, void *v)
+static inline int memory_notify(enum memory_block_state state, void *v)
{
return 0;
}
-static inline int register_memory_isolate_notifier(struct notifier_block *nb)
+static inline int hotplug_memory_notifier(notifier_fn_t fn, int pri)
{
return 0;
}
-static inline void unregister_memory_isolate_notifier(struct notifier_block *nb)
+static inline int memory_block_advise_max_size(unsigned long size)
{
+ return -ENODEV;
}
-static inline int memory_isolate_notify(unsigned long val, void *v)
+static inline unsigned long memory_block_advised_max_size(void)
{
return 0;
}
-#else
+#else /* CONFIG_MEMORY_HOTPLUG */
extern int register_memory_notifier(struct notifier_block *nb);
extern void unregister_memory_notifier(struct notifier_block *nb);
-extern int register_memory_isolate_notifier(struct notifier_block *nb);
-extern void unregister_memory_isolate_notifier(struct notifier_block *nb);
-extern int register_new_memory(int, struct mem_section *);
-#ifdef CONFIG_MEMORY_HOTREMOVE
-extern int unregister_memory_section(struct mem_section *);
-#endif
-extern int memory_dev_init(void);
-extern int memory_notify(unsigned long val, void *v);
-extern int memory_isolate_notify(unsigned long val, void *v);
-extern struct memory_block *find_memory_block_hinted(struct mem_section *,
- struct memory_block *);
-extern struct memory_block *find_memory_block(struct mem_section *);
-#define CONFIG_MEM_BLOCK_SIZE (PAGES_PER_SECTION<<PAGE_SHIFT)
-#endif /* CONFIG_MEMORY_HOTPLUG_SPARSE */
-
-#ifdef CONFIG_MEMORY_HOTPLUG
+int create_memory_block_devices(unsigned long start, unsigned long size,
+ int nid, struct vmem_altmap *altmap,
+ struct memory_group *group);
+void remove_memory_block_devices(unsigned long start, unsigned long size);
+extern void memory_dev_init(void);
+extern int memory_notify(enum memory_block_state state, void *v);
+extern struct memory_block *find_memory_block(unsigned long section_nr);
+typedef int (*walk_memory_blocks_func_t)(struct memory_block *, void *);
+extern int walk_memory_blocks(unsigned long start, unsigned long size,
+ void *arg, walk_memory_blocks_func_t func);
+extern int for_each_memory_block(void *arg, walk_memory_blocks_func_t func);
+
+extern int memory_group_register_static(int nid, unsigned long max_pages);
+extern int memory_group_register_dynamic(int nid, unsigned long unit_pages);
+extern int memory_group_unregister(int mgid);
+struct memory_group *memory_group_find_by_id(int mgid);
+typedef int (*walk_memory_groups_func_t)(struct memory_group *, void *);
+int walk_dynamic_memory_groups(int nid, walk_memory_groups_func_t func,
+ struct memory_group *excluded, void *arg);
+struct memory_block *find_memory_block_by_id(unsigned long block_id);
#define hotplug_memory_notifier(fn, pri) ({ \
static __meminitdata struct notifier_block fn##_mem_nb =\
{ .notifier_call = fn, .priority = pri };\
register_memory_notifier(&fn##_mem_nb); \
})
-#define register_hotmemory_notifier(nb) register_memory_notifier(nb)
-#define unregister_hotmemory_notifier(nb) unregister_memory_notifier(nb)
-#else
-#define hotplug_memory_notifier(fn, pri) ({ 0; })
-/* These aren't inline functions due to a GCC bug. */
-#define register_hotmemory_notifier(nb) ({ (void)(nb); 0; })
-#define unregister_hotmemory_notifier(nb) ({ (void)(nb); })
-#endif
+
+extern int sections_per_block;
+
+static inline unsigned long memory_block_id(unsigned long section_nr)
+{
+ return section_nr / sections_per_block;
+}
+
+static inline unsigned long pfn_to_block_id(unsigned long pfn)
+{
+ return memory_block_id(pfn_to_section_nr(pfn));
+}
+
+static inline unsigned long phys_to_block_id(unsigned long phys)
+{
+ return pfn_to_block_id(PFN_DOWN(phys));
+}
+
+#ifdef CONFIG_NUMA
+void memory_block_add_nid_early(struct memory_block *mem, int nid);
+#endif /* CONFIG_NUMA */
+int memory_block_advise_max_size(unsigned long size);
+unsigned long memory_block_advised_max_size(void);
+#endif /* CONFIG_MEMORY_HOTPLUG */
/*
* Kernel text modification mutex, used for code patching. Users of this lock
diff --git a/include/linux/memory/ti-aemif.h b/include/linux/memory/ti-aemif.h
new file mode 100644
index 000000000000..da94a9d985e7
--- /dev/null
+++ b/include/linux/memory/ti-aemif.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __MEMORY_TI_AEMIF_H
+#define __MEMORY_TI_AEMIF_H
+
+/**
+ * struct aemif_cs_timings: structure to hold CS timing configuration
+ * values are expressed in number of clock cycles - 1
+ * @ta: minimum turn around time
+ * @rhold: read hold width
+ * @rstrobe: read strobe width
+ * @rsetup: read setup width
+ * @whold: write hold width
+ * @wstrobe: write strobe width
+ * @wsetup: write setup width
+ */
+struct aemif_cs_timings {
+ u32 ta;
+ u32 rhold;
+ u32 rstrobe;
+ u32 rsetup;
+ u32 whold;
+ u32 wstrobe;
+ u32 wsetup;
+};
+
+struct aemif_device;
+
+int aemif_set_cs_timings(struct aemif_device *aemif, u8 cs, struct aemif_cs_timings *timings);
+int aemif_check_cs_timings(struct aemif_cs_timings *timings);
+
+#endif // __MEMORY_TI_AEMIF_H
diff --git a/include/linux/memory_hotplug.h b/include/linux/memory_hotplug.h
index 5e6e4cc36ff4..f2f16cdd73ee 100644
--- a/include/linux/memory_hotplug.h
+++ b/include/linux/memory_hotplug.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __LINUX_MEMORY_HOTPLUG_H
#define __LINUX_MEMORY_HOTPLUG_H
@@ -10,63 +11,70 @@ struct page;
struct zone;
struct pglist_data;
struct mem_section;
-struct memory_block;
+struct memory_group;
struct resource;
+struct vmem_altmap;
+struct dev_pagemap;
#ifdef CONFIG_MEMORY_HOTPLUG
-/*
- * Return page for the valid pfn only if the page is online. All pfn
- * walkers which rely on the fully initialized page->flags and others
- * should use this rather than pfn_valid && pfn_to_page
- */
-#define pfn_to_online_page(pfn) \
-({ \
- struct page *___page = NULL; \
- unsigned long ___nr = pfn_to_section_nr(pfn); \
- \
- if (___nr < NR_MEM_SECTIONS && online_section_nr(___nr))\
- ___page = pfn_to_page(pfn); \
- ___page; \
-})
-
-/*
- * Types for free bootmem stored in page->lru.next. These have to be in
- * some random range in unsigned long space for debugging purposes.
- */
-enum {
- MEMORY_HOTPLUG_MIN_BOOTMEM_TYPE = 12,
- SECTION_INFO = MEMORY_HOTPLUG_MIN_BOOTMEM_TYPE,
- MIX_SECTION_INFO,
- NODE_INFO,
- MEMORY_HOTPLUG_MAX_BOOTMEM_TYPE = NODE_INFO,
-};
+struct page *pfn_to_online_page(unsigned long pfn);
/* Types for control the zone type of onlined and offlined memory */
enum {
- MMOP_OFFLINE = -1,
- MMOP_ONLINE_KEEP,
+ /* Offline the memory. */
+ MMOP_OFFLINE = 0,
+ /* Online the memory. Zone depends, see default_zone_for_pfn(). */
+ MMOP_ONLINE,
+ /* Online the memory to ZONE_NORMAL. */
MMOP_ONLINE_KERNEL,
+ /* Online the memory to ZONE_MOVABLE. */
MMOP_ONLINE_MOVABLE,
};
+/* Flags for add_memory() and friends to specify memory hotplug details. */
+typedef int __bitwise mhp_t;
+
+/* No special request */
+#define MHP_NONE ((__force mhp_t)0)
/*
- * pgdat resizing functions
+ * Allow merging of the added System RAM resource with adjacent,
+ * mergeable resources. After a successful call to add_memory_resource()
+ * with this flag set, the resource pointer must no longer be used as it
+ * might be stale, or the resource might have changed.
*/
-static inline
-void pgdat_resize_lock(struct pglist_data *pgdat, unsigned long *flags)
-{
- spin_lock_irqsave(&pgdat->node_size_lock, *flags);
-}
-static inline
-void pgdat_resize_unlock(struct pglist_data *pgdat, unsigned long *flags)
-{
- spin_unlock_irqrestore(&pgdat->node_size_lock, *flags);
-}
-static inline
-void pgdat_resize_init(struct pglist_data *pgdat)
-{
- spin_lock_init(&pgdat->node_size_lock);
-}
+#define MHP_MERGE_RESOURCE ((__force mhp_t)BIT(0))
+
+/*
+ * We want memmap (struct page array) to be self contained.
+ * To do so, we will use the beginning of the hot-added range to build
+ * the page tables for the memmap array that describes the entire range.
+ * Only selected architectures support it with SPARSE_VMEMMAP.
+ * This is only a hint, the core kernel can decide to not do this based on
+ * different alignment checks.
+ */
+#define MHP_MEMMAP_ON_MEMORY ((__force mhp_t)BIT(1))
+/*
+ * The nid field specifies a memory group id (mgid) instead. The memory group
+ * implies the node id (nid).
+ */
+#define MHP_NID_IS_MGID ((__force mhp_t)BIT(2))
+
+/*
+ * Extended parameters for memory hotplug:
+ * altmap: alternative allocator for memmap array (optional)
+ * pgprot: page protection flags to apply to newly created page tables
+ * (required)
+ */
+struct mhp_params {
+ struct vmem_altmap *altmap;
+ pgprot_t pgprot;
+ struct dev_pagemap *pgmap;
+};
+
+bool mhp_range_allowed(u64 start, u64 size, bool need_mapping);
+struct range mhp_get_pluggable_range(bool need_mapping);
+bool mhp_supports_memmap_on_memory(void);
+
/*
* Zone resizing functions
*
@@ -94,27 +102,32 @@ static inline void zone_seqlock_init(struct zone *zone)
{
seqlock_init(&zone->span_seqlock);
}
-extern int zone_grow_free_lists(struct zone *zone, unsigned long new_nr_pages);
-extern int zone_grow_waitqueues(struct zone *zone, unsigned long nr_pages);
-extern int add_one_highpage(struct page *page, int pfn, int bad_ppro);
+extern void adjust_present_page_count(struct page *page,
+ struct memory_group *group,
+ long nr_pages);
/* VM interface that may be used by firmware interface */
-extern int online_pages(unsigned long, unsigned long, int);
-extern int test_pages_in_a_zone(unsigned long start_pfn, unsigned long end_pfn,
- unsigned long *valid_start, unsigned long *valid_end);
-extern void __offline_isolated_pages(unsigned long, unsigned long);
+extern int mhp_init_memmap_on_memory(unsigned long pfn, unsigned long nr_pages,
+ struct zone *zone);
+extern void mhp_deinit_memmap_on_memory(unsigned long pfn, unsigned long nr_pages);
+extern int online_pages(unsigned long pfn, unsigned long nr_pages,
+ struct zone *zone, struct memory_group *group);
+extern unsigned long __offline_isolated_pages(unsigned long start_pfn,
+ unsigned long end_pfn);
-typedef void (*online_page_callback_t)(struct page *page);
+typedef void (*online_page_callback_t)(struct page *page, unsigned int order);
+extern void generic_online_page(struct page *page, unsigned int order);
extern int set_online_page_callback(online_page_callback_t callback);
extern int restore_online_page_callback(online_page_callback_t callback);
-extern void __online_page_set_limits(struct page *page);
-extern void __online_page_increment_counters(struct page *page);
-extern void __online_page_free(struct page *page);
-
extern int try_online_node(int nid);
-extern bool memhp_auto_online;
+extern int arch_add_memory(int nid, u64 start, u64 size,
+ struct mhp_params *params);
+extern u64 max_mem_size;
+
+extern int mhp_online_type_from_str(const char *str);
+
/* If movable_node boot option specified */
extern bool movable_node_enabled;
static inline bool movable_node_is_enabled(void)
@@ -122,105 +135,46 @@ static inline bool movable_node_is_enabled(void)
return movable_node_enabled;
}
-#ifdef CONFIG_MEMORY_HOTREMOVE
-extern bool is_pageblock_removable_nolock(struct page *page);
-extern int arch_remove_memory(u64 start, u64 size);
-extern int __remove_pages(struct zone *zone, unsigned long start_pfn,
- unsigned long nr_pages);
-#endif /* CONFIG_MEMORY_HOTREMOVE */
+extern void arch_remove_memory(u64 start, u64 size, struct vmem_altmap *altmap);
+extern void __remove_pages(unsigned long start_pfn, unsigned long nr_pages,
+ struct vmem_altmap *altmap);
/* reasonably generic interface to expand the physical pages */
-extern int __add_pages(int nid, unsigned long start_pfn,
- unsigned long nr_pages, bool want_memblock);
+extern int __add_pages(int nid, unsigned long start_pfn, unsigned long nr_pages,
+ struct mhp_params *params);
-#ifdef CONFIG_NUMA
-extern int memory_add_physaddr_to_nid(u64 start);
-#else
-static inline int memory_add_physaddr_to_nid(u64 start)
+#ifndef CONFIG_ARCH_HAS_ADD_PAGES
+static inline int add_pages(int nid, unsigned long start_pfn,
+ unsigned long nr_pages, struct mhp_params *params)
{
- return 0;
+ return __add_pages(nid, start_pfn, nr_pages, params);
}
-#endif
-
-#ifdef CONFIG_HAVE_ARCH_NODEDATA_EXTENSION
-/*
- * For supporting node-hotadd, we have to allocate a new pgdat.
- *
- * If an arch has generic style NODE_DATA(),
- * node_data[nid] = kzalloc() works well. But it depends on the architecture.
- *
- * In general, generic_alloc_nodedata() is used.
- * Now, arch_free_nodedata() is just defined for error path of node_hot_add.
- *
- */
-extern pg_data_t *arch_alloc_nodedata(int nid);
-extern void arch_free_nodedata(pg_data_t *pgdat);
-extern void arch_refresh_nodedata(int nid, pg_data_t *pgdat);
+#else /* ARCH_HAS_ADD_PAGES */
+int add_pages(int nid, unsigned long start_pfn, unsigned long nr_pages,
+ struct mhp_params *params);
+#endif /* ARCH_HAS_ADD_PAGES */
-#else /* CONFIG_HAVE_ARCH_NODEDATA_EXTENSION */
-
-#define arch_alloc_nodedata(nid) generic_alloc_nodedata(nid)
-#define arch_free_nodedata(pgdat) generic_free_nodedata(pgdat)
+void get_online_mems(void);
+void put_online_mems(void);
-#ifdef CONFIG_NUMA
-/*
- * If ARCH_HAS_NODEDATA_EXTENSION=n, this func is used to allocate pgdat.
- * XXX: kmalloc_node() can't work well to get new node's memory at this time.
- * Because, pgdat for the new node is not allocated/initialized yet itself.
- * To use new node's memory, more consideration will be necessary.
- */
-#define generic_alloc_nodedata(nid) \
-({ \
- kzalloc(sizeof(pg_data_t), GFP_KERNEL); \
-})
-/*
- * This definition is just for error path in node hotadd.
- * For node hotremove, we have to replace this.
- */
-#define generic_free_nodedata(pgdat) kfree(pgdat)
+void mem_hotplug_begin(void);
+void mem_hotplug_done(void);
-extern pg_data_t *node_data[];
-static inline void arch_refresh_nodedata(int nid, pg_data_t *pgdat)
+/* See kswapd_is_running() */
+static inline void pgdat_kswapd_lock(pg_data_t *pgdat)
{
- node_data[nid] = pgdat;
+ mutex_lock(&pgdat->kswapd_lock);
}
-#else /* !CONFIG_NUMA */
-
-/* never called */
-static inline pg_data_t *generic_alloc_nodedata(int nid)
-{
- BUG();
- return NULL;
-}
-static inline void generic_free_nodedata(pg_data_t *pgdat)
+static inline void pgdat_kswapd_unlock(pg_data_t *pgdat)
{
+ mutex_unlock(&pgdat->kswapd_lock);
}
-static inline void arch_refresh_nodedata(int nid, pg_data_t *pgdat)
-{
-}
-#endif /* CONFIG_NUMA */
-#endif /* CONFIG_HAVE_ARCH_NODEDATA_EXTENSION */
-#ifdef CONFIG_HAVE_BOOTMEM_INFO_NODE
-extern void __init register_page_bootmem_info_node(struct pglist_data *pgdat);
-#else
-static inline void register_page_bootmem_info_node(struct pglist_data *pgdat)
+static inline void pgdat_kswapd_lock_init(pg_data_t *pgdat)
{
+ mutex_init(&pgdat->kswapd_lock);
}
-#endif
-extern void put_page_bootmem(struct page *page);
-extern void get_page_bootmem(unsigned long ingo, struct page *page,
- unsigned long type);
-
-void get_online_mems(void);
-void put_online_mems(void);
-
-void mem_hotplug_begin(void);
-void mem_hotplug_done(void);
-
-extern void set_zone_contiguous(struct zone *zone);
-extern void clear_zone_contiguous(struct zone *zone);
#else /* ! CONFIG_MEMORY_HOTPLUG */
#define pfn_to_online_page(pfn) \
@@ -231,13 +185,6 @@ extern void clear_zone_contiguous(struct zone *zone);
___page; \
})
-/*
- * Stub functions for when hotplug is off
- */
-static inline void pgdat_resize_lock(struct pglist_data *p, unsigned long *f) {}
-static inline void pgdat_resize_unlock(struct pglist_data *p, unsigned long *f) {}
-static inline void pgdat_resize_init(struct pglist_data *pgdat) {}
-
static inline unsigned zone_span_seqbegin(struct zone *zone)
{
return 0;
@@ -250,17 +197,6 @@ static inline void zone_span_writelock(struct zone *zone) {}
static inline void zone_span_writeunlock(struct zone *zone) {}
static inline void zone_seqlock_init(struct zone *zone) {}
-static inline int mhp_notimplemented(const char *func)
-{
- printk(KERN_WARNING "%s() called, with CONFIG_MEMORY_HOTPLUG disabled\n", func);
- dump_stack();
- return -ENOSYS;
-}
-
-static inline void register_page_bootmem_info_node(struct pglist_data *pgdat)
-{
-}
-
static inline int try_online_node(int nid)
{
return 0;
@@ -276,49 +212,110 @@ static inline bool movable_node_is_enabled(void)
{
return false;
}
+
+static inline bool mhp_supports_memmap_on_memory(void)
+{
+ return false;
+}
+
+static inline void pgdat_kswapd_lock(pg_data_t *pgdat) {}
+static inline void pgdat_kswapd_unlock(pg_data_t *pgdat) {}
+static inline void pgdat_kswapd_lock_init(pg_data_t *pgdat) {}
#endif /* ! CONFIG_MEMORY_HOTPLUG */
+/*
+ * Keep this declaration outside CONFIG_MEMORY_HOTPLUG as some
+ * platforms might override and use arch_get_mappable_range()
+ * for internal non memory hotplug purposes.
+ */
+struct range arch_get_mappable_range(void);
+
+#if defined(CONFIG_MEMORY_HOTPLUG) || defined(CONFIG_DEFERRED_STRUCT_PAGE_INIT)
+/*
+ * pgdat resizing functions
+ */
+static inline
+void pgdat_resize_lock(struct pglist_data *pgdat, unsigned long *flags)
+{
+ spin_lock_irqsave(&pgdat->node_size_lock, *flags);
+}
+static inline
+void pgdat_resize_unlock(struct pglist_data *pgdat, unsigned long *flags)
+{
+ spin_unlock_irqrestore(&pgdat->node_size_lock, *flags);
+}
+static inline
+void pgdat_resize_init(struct pglist_data *pgdat)
+{
+ spin_lock_init(&pgdat->node_size_lock);
+}
+#else /* !(CONFIG_MEMORY_HOTPLUG || CONFIG_DEFERRED_STRUCT_PAGE_INIT) */
+/*
+ * Stub functions for when hotplug is off
+ */
+static inline void pgdat_resize_lock(struct pglist_data *p, unsigned long *f) {}
+static inline void pgdat_resize_unlock(struct pglist_data *p, unsigned long *f) {}
+static inline void pgdat_resize_init(struct pglist_data *pgdat) {}
+#endif /* !(CONFIG_MEMORY_HOTPLUG || CONFIG_DEFERRED_STRUCT_PAGE_INIT) */
+
#ifdef CONFIG_MEMORY_HOTREMOVE
-extern bool is_mem_section_removable(unsigned long pfn, unsigned long nr_pages);
extern void try_offline_node(int nid);
-extern int offline_pages(unsigned long start_pfn, unsigned long nr_pages);
-extern void remove_memory(int nid, u64 start, u64 size);
+extern int offline_pages(unsigned long start_pfn, unsigned long nr_pages,
+ struct zone *zone, struct memory_group *group);
+extern int remove_memory(u64 start, u64 size);
+extern void __remove_memory(u64 start, u64 size);
+extern int offline_and_remove_memory(u64 start, u64 size);
#else
-static inline bool is_mem_section_removable(unsigned long pfn,
- unsigned long nr_pages)
-{
- return false;
-}
-
static inline void try_offline_node(int nid) {}
-static inline int offline_pages(unsigned long start_pfn, unsigned long nr_pages)
+static inline int offline_pages(unsigned long start_pfn, unsigned long nr_pages,
+ struct zone *zone, struct memory_group *group)
{
return -EINVAL;
}
-static inline void remove_memory(int nid, u64 start, u64 size) {}
+static inline int remove_memory(u64 start, u64 size)
+{
+ return -EBUSY;
+}
+
+static inline void __remove_memory(u64 start, u64 size) {}
#endif /* CONFIG_MEMORY_HOTREMOVE */
-extern int walk_memory_range(unsigned long start_pfn, unsigned long end_pfn,
- void *arg, int (*func)(struct memory_block *, void *));
-extern int add_memory(int nid, u64 start, u64 size);
-extern int add_memory_resource(int nid, struct resource *resource, bool online);
-extern int arch_add_memory(int nid, u64 start, u64 size, bool want_memblock);
+#ifdef CONFIG_MEMORY_HOTPLUG
+/* Default online_type (MMOP_*) when new memory blocks are added. */
+extern int mhp_get_default_online_type(void);
+extern void mhp_set_default_online_type(int online_type);
+extern void __ref free_area_init_core_hotplug(struct pglist_data *pgdat);
+extern int __add_memory(int nid, u64 start, u64 size, mhp_t mhp_flags);
+extern int add_memory(int nid, u64 start, u64 size, mhp_t mhp_flags);
+extern int add_memory_resource(int nid, struct resource *resource,
+ mhp_t mhp_flags);
+extern int add_memory_driver_managed(int nid, u64 start, u64 size,
+ const char *resource_name,
+ mhp_t mhp_flags);
extern void move_pfn_range_to_zone(struct zone *zone, unsigned long start_pfn,
- unsigned long nr_pages);
-extern int offline_pages(unsigned long start_pfn, unsigned long nr_pages);
-extern bool is_memblock_offlined(struct memory_block *mem);
-extern void remove_memory(int nid, u64 start, u64 size);
-extern int sparse_add_one_section(struct pglist_data *pgdat, unsigned long start_pfn);
-extern void sparse_remove_one_section(struct zone *zone, struct mem_section *ms,
- unsigned long map_offset);
+ unsigned long nr_pages,
+ struct vmem_altmap *altmap, int migratetype,
+ bool isolate_pageblock);
+extern void remove_pfn_range_from_zone(struct zone *zone,
+ unsigned long start_pfn,
+ unsigned long nr_pages);
+extern int sparse_add_section(int nid, unsigned long pfn,
+ unsigned long nr_pages, struct vmem_altmap *altmap,
+ struct dev_pagemap *pgmap);
+extern void sparse_remove_section(unsigned long pfn, unsigned long nr_pages,
+ struct vmem_altmap *altmap);
extern struct page *sparse_decode_mem_map(unsigned long coded_mem_map,
unsigned long pnum);
-extern bool allow_online_pfn_range(int nid, unsigned long pfn, unsigned long nr_pages,
- int online_type);
-extern struct zone *zone_for_pfn_range(int online_type, int nid, unsigned start_pfn,
+extern struct zone *zone_for_pfn_range(int online_type, int nid,
+ struct memory_group *group, unsigned long start_pfn,
unsigned long nr_pages);
+extern int arch_create_linear_mapping(int nid, u64 start, u64 size,
+ struct mhp_params *params);
+void arch_remove_linear_mapping(u64 start, u64 size);
+#endif /* CONFIG_MEMORY_HOTPLUG */
+
#endif /* __LINUX_MEMORY_HOTPLUG_H */
diff --git a/include/linux/mempolicy.h b/include/linux/mempolicy.h
index 3a58b4be1b0c..0fe96f3ab3ef 100644
--- a/include/linux/mempolicy.h
+++ b/include/linux/mempolicy.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* NUMA memory policies for Linux.
* Copyright 2003,2004 Andi Kleen SuSE Labs
@@ -5,18 +6,20 @@
#ifndef _LINUX_MEMPOLICY_H
#define _LINUX_MEMPOLICY_H 1
-
+#include <linux/sched.h>
#include <linux/mmzone.h>
-#include <linux/dax.h>
#include <linux/slab.h>
#include <linux/rbtree.h>
#include <linux/spinlock.h>
+#include <linux/node.h>
#include <linux/nodemask.h>
#include <linux/pagemap.h>
#include <uapi/linux/mempolicy.h>
struct mm_struct;
+#define NO_INTERLEAVE_INDEX (-1UL) /* use task il_prev for interleaving */
+
#ifdef CONFIG_NUMA
/*
@@ -27,10 +30,10 @@ struct mm_struct;
* the process policy is used. Interrupts ignore the memory policy
* of the current process.
*
- * Locking policy for interlave:
+ * Locking policy for interleave:
* In process context there is no locking because only the process accesses
* its own state. All vma manipulation is somewhat protected by a down_read on
- * mmap_sem.
+ * mmap_lock.
*
* Freeing policy:
* Mempolicy objects are reference counted. A mempolicy will be freed when
@@ -45,11 +48,9 @@ struct mempolicy {
atomic_t refcnt;
unsigned short mode; /* See MPOL_* above */
unsigned short flags; /* See set_mempolicy() MPOL_F_* above */
- union {
- short preferred_node; /* preferred */
- nodemask_t nodes; /* interleave/bind */
- /* undefined for default */
- } v;
+ nodemask_t nodes; /* interleave/bind/preferred/etc */
+ int home_node; /* Home node to use for MPOL_BIND and MPOL_PREFERRED_MANY */
+
union {
nodemask_t cpuset_mems_allowed; /* relative to these nodes */
nodemask_t user_nodemask; /* nodemask passed by user */
@@ -91,8 +92,6 @@ static inline struct mempolicy *mpol_dup(struct mempolicy *pol)
return pol;
}
-#define vma_policy(vma) ((vma)->vm_policy)
-
static inline void mpol_get(struct mempolicy *pol)
{
if (pol)
@@ -109,35 +108,30 @@ static inline bool mpol_equal(struct mempolicy *a, struct mempolicy *b)
/*
* Tree of shared policies for a shared memory region.
- * Maintain the policies in a pseudo mm that contains vmas. The vmas
- * carry the policy. As a special twist the pseudo mm is indexed in pages, not
- * bytes, so that we can work with shared memory segments bigger than
- * unsigned long.
*/
-
-struct sp_node {
- struct rb_node nd;
- unsigned long start, end;
- struct mempolicy *policy;
-};
-
struct shared_policy {
struct rb_root root;
rwlock_t lock;
};
+struct sp_node {
+ struct rb_node nd;
+ pgoff_t start, end;
+ struct mempolicy *policy;
+};
int vma_dup_policy(struct vm_area_struct *src, struct vm_area_struct *dst);
void mpol_shared_policy_init(struct shared_policy *sp, struct mempolicy *mpol);
-int mpol_set_shared_policy(struct shared_policy *info,
- struct vm_area_struct *vma,
- struct mempolicy *new);
-void mpol_free_shared_policy(struct shared_policy *p);
+int mpol_set_shared_policy(struct shared_policy *sp,
+ struct vm_area_struct *vma, struct mempolicy *mpol);
+void mpol_free_shared_policy(struct shared_policy *sp);
struct mempolicy *mpol_shared_policy_lookup(struct shared_policy *sp,
- unsigned long idx);
+ pgoff_t idx);
struct mempolicy *get_task_policy(struct task_struct *p);
struct mempolicy *__get_vma_policy(struct vm_area_struct *vma,
- unsigned long addr);
+ unsigned long addr, pgoff_t *ilx);
+struct mempolicy *get_vma_policy(struct vm_area_struct *vma,
+ unsigned long addr, int order, pgoff_t *ilx);
bool vma_policy_mof(struct vm_area_struct *vma);
extern void numa_default_policy(void);
@@ -149,7 +143,7 @@ extern int huge_node(struct vm_area_struct *vma,
unsigned long addr, gfp_t gfp_flags,
struct mempolicy **mpol, nodemask_t **nodemask);
extern bool init_nodemask_of_mempolicy(nodemask_t *mask);
-extern bool mempolicy_nodemask_intersects(struct task_struct *tsk,
+extern bool mempolicy_in_oom_domain(struct task_struct *tsk,
const nodemask_t *mask);
extern unsigned int mempolicy_slab_node(void);
@@ -172,48 +166,37 @@ extern int mpol_parse_str(char *str, struct mempolicy **mpol);
extern void mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol);
/* Check if a vma is migratable */
-static inline bool vma_migratable(struct vm_area_struct *vma)
-{
- if (vma->vm_flags & (VM_IO | VM_PFNMAP))
- return false;
-
- /*
- * DAX device mappings require predictable access latency, so avoid
- * incurring periodic faults.
- */
- if (vma_is_dax(vma))
- return false;
-
-#ifndef CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION
- if (vma->vm_flags & VM_HUGETLB)
- return false;
-#endif
+extern bool vma_migratable(struct vm_area_struct *vma);
- /*
- * Migration allocates pages in the highest zone. If we cannot
- * do so then migration (at least from node to node) is not
- * possible.
- */
- if (vma->vm_file &&
- gfp_zone(mapping_gfp_mask(vma->vm_file->f_mapping))
- < policy_zone)
- return false;
- return true;
+int mpol_misplaced(struct folio *folio, struct vm_fault *vmf,
+ unsigned long addr);
+extern void mpol_put_task_policy(struct task_struct *);
+
+static inline bool mpol_is_preferred_many(struct mempolicy *pol)
+{
+ return (pol->mode == MPOL_PREFERRED_MANY);
}
-extern int mpol_misplaced(struct page *, struct vm_area_struct *, unsigned long);
-extern void mpol_put_task_policy(struct task_struct *);
+extern bool apply_policy_zone(struct mempolicy *policy, enum zone_type zone);
+
+extern int mempolicy_set_node_perf(unsigned int node,
+ struct access_coordinate *coords);
#else
struct mempolicy {};
+static inline struct mempolicy *get_task_policy(struct task_struct *p)
+{
+ return NULL;
+}
+
static inline bool mpol_equal(struct mempolicy *a, struct mempolicy *b)
{
return true;
}
-static inline void mpol_put(struct mempolicy *p)
+static inline void mpol_put(struct mempolicy *pol)
{
}
@@ -232,17 +215,22 @@ static inline void mpol_shared_policy_init(struct shared_policy *sp,
{
}
-static inline void mpol_free_shared_policy(struct shared_policy *p)
+static inline void mpol_free_shared_policy(struct shared_policy *sp)
{
}
static inline struct mempolicy *
-mpol_shared_policy_lookup(struct shared_policy *sp, unsigned long idx)
+mpol_shared_policy_lookup(struct shared_policy *sp, pgoff_t idx)
{
return NULL;
}
-#define vma_policy(vma) NULL
+static inline struct mempolicy *get_vma_policy(struct vm_area_struct *vma,
+ unsigned long addr, int order, pgoff_t *ilx)
+{
+ *ilx = 0;
+ return NULL;
+}
static inline int
vma_dup_policy(struct vm_area_struct *src, struct vm_area_struct *dst)
@@ -298,7 +286,8 @@ static inline int mpol_parse_str(char *str, struct mempolicy **mpol)
}
#endif
-static inline int mpol_misplaced(struct page *page, struct vm_area_struct *vma,
+static inline int mpol_misplaced(struct folio *folio,
+ struct vm_fault *vmf,
unsigned long address)
{
return -1; /* no node preference */
@@ -307,5 +296,11 @@ static inline int mpol_misplaced(struct page *page, struct vm_area_struct *vma,
static inline void mpol_put_task_policy(struct task_struct *task)
{
}
+
+static inline bool mpol_is_preferred_many(struct mempolicy *pol)
+{
+ return false;
+}
+
#endif /* CONFIG_NUMA */
#endif
diff --git a/include/linux/mempool.h b/include/linux/mempool.h
index b1086c936507..e8e440e04a06 100644
--- a/include/linux/mempool.h
+++ b/include/linux/mempool.h
@@ -1,9 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* memory buffer pool support
*/
#ifndef _LINUX_MEMPOOL_H
#define _LINUX_MEMPOOL_H
+#include <linux/sched.h>
+#include <linux/alloc_tag.h>
#include <linux/wait.h>
#include <linux/compiler.h>
@@ -12,7 +15,7 @@ struct kmem_cache;
typedef void * (mempool_alloc_t)(gfp_t gfp_mask, void *pool_data);
typedef void (mempool_free_t)(void *element, void *pool_data);
-typedef struct mempool_s {
+typedef struct mempool {
spinlock_t lock;
int min_nr; /* nr of elements at *elements */
int curr_nr; /* Current nr of elements at *elements */
@@ -24,16 +27,53 @@ typedef struct mempool_s {
wait_queue_head_t wait;
} mempool_t;
-extern mempool_t *mempool_create(int min_nr, mempool_alloc_t *alloc_fn,
- mempool_free_t *free_fn, void *pool_data);
-extern mempool_t *mempool_create_node(int min_nr, mempool_alloc_t *alloc_fn,
- mempool_free_t *free_fn, void *pool_data,
- gfp_t gfp_mask, int nid);
+static inline bool mempool_initialized(struct mempool *pool)
+{
+ return pool->elements != NULL;
+}
+
+static inline bool mempool_is_saturated(struct mempool *pool)
+{
+ return READ_ONCE(pool->curr_nr) >= pool->min_nr;
+}
+
+void mempool_exit(struct mempool *pool);
+int mempool_init_node(struct mempool *pool, int min_nr,
+ mempool_alloc_t *alloc_fn, mempool_free_t *free_fn,
+ void *pool_data, gfp_t gfp_mask, int node_id);
+int mempool_init_noprof(struct mempool *pool, int min_nr,
+ mempool_alloc_t *alloc_fn, mempool_free_t *free_fn,
+ void *pool_data);
+#define mempool_init(...) \
+ alloc_hooks(mempool_init_noprof(__VA_ARGS__))
+
+struct mempool *mempool_create(int min_nr, mempool_alloc_t *alloc_fn,
+ mempool_free_t *free_fn, void *pool_data);
+struct mempool *mempool_create_node_noprof(int min_nr,
+ mempool_alloc_t *alloc_fn, mempool_free_t *free_fn,
+ void *pool_data, gfp_t gfp_mask, int nid);
+#define mempool_create_node(...) \
+ alloc_hooks(mempool_create_node_noprof(__VA_ARGS__))
+
+#define mempool_create(_min_nr, _alloc_fn, _free_fn, _pool_data) \
+ mempool_create_node(_min_nr, _alloc_fn, _free_fn, _pool_data, \
+ GFP_KERNEL, NUMA_NO_NODE)
+
+int mempool_resize(struct mempool *pool, int new_min_nr);
+void mempool_destroy(struct mempool *pool);
+
+void *mempool_alloc_noprof(struct mempool *pool, gfp_t gfp_mask) __malloc;
+#define mempool_alloc(...) \
+ alloc_hooks(mempool_alloc_noprof(__VA_ARGS__))
+int mempool_alloc_bulk_noprof(struct mempool *pool, void **elem,
+ unsigned int count, unsigned int allocated);
+#define mempool_alloc_bulk(...) \
+ alloc_hooks(mempool_alloc_bulk_noprof(__VA_ARGS__))
-extern int mempool_resize(mempool_t *pool, int new_min_nr);
-extern void mempool_destroy(mempool_t *pool);
-extern void *mempool_alloc(mempool_t *pool, gfp_t gfp_mask) __malloc;
-extern void mempool_free(void *element, mempool_t *pool);
+void *mempool_alloc_preallocated(struct mempool *pool) __malloc;
+void mempool_free(void *element, struct mempool *pool);
+unsigned int mempool_free_bulk(struct mempool *pool, void **elem,
+ unsigned int count);
/*
* A mempool_alloc_t and mempool_free_t that get the memory from
@@ -42,12 +82,11 @@ extern void mempool_free(void *element, mempool_t *pool);
*/
void *mempool_alloc_slab(gfp_t gfp_mask, void *pool_data);
void mempool_free_slab(void *element, void *pool_data);
-static inline mempool_t *
-mempool_create_slab_pool(int min_nr, struct kmem_cache *kc)
-{
- return mempool_create(min_nr, mempool_alloc_slab, mempool_free_slab,
- (void *) kc);
-}
+
+#define mempool_init_slab_pool(_pool, _min_nr, _kc) \
+ mempool_init(_pool, (_min_nr), mempool_alloc_slab, mempool_free_slab, (void *)(_kc))
+#define mempool_create_slab_pool(_min_nr, _kc) \
+ mempool_create((_min_nr), mempool_alloc_slab, mempool_free_slab, (void *)(_kc))
/*
* a mempool_alloc_t and a mempool_free_t to kmalloc and kfree the
@@ -55,11 +94,13 @@ mempool_create_slab_pool(int min_nr, struct kmem_cache *kc)
*/
void *mempool_kmalloc(gfp_t gfp_mask, void *pool_data);
void mempool_kfree(void *element, void *pool_data);
-static inline mempool_t *mempool_create_kmalloc_pool(int min_nr, size_t size)
-{
- return mempool_create(min_nr, mempool_kmalloc, mempool_kfree,
- (void *) size);
-}
+
+#define mempool_init_kmalloc_pool(_pool, _min_nr, _size) \
+ mempool_init(_pool, (_min_nr), mempool_kmalloc, mempool_kfree, \
+ (void *)(unsigned long)(_size))
+#define mempool_create_kmalloc_pool(_min_nr, _size) \
+ mempool_create((_min_nr), mempool_kmalloc, mempool_kfree, \
+ (void *)(unsigned long)(_size))
/*
* A mempool_alloc_t and mempool_free_t for a simple page allocator that
@@ -67,10 +108,12 @@ static inline mempool_t *mempool_create_kmalloc_pool(int min_nr, size_t size)
*/
void *mempool_alloc_pages(gfp_t gfp_mask, void *pool_data);
void mempool_free_pages(void *element, void *pool_data);
-static inline mempool_t *mempool_create_page_pool(int min_nr, int order)
-{
- return mempool_create(min_nr, mempool_alloc_pages, mempool_free_pages,
- (void *)(long)order);
-}
+
+#define mempool_init_page_pool(_pool, _min_nr, _order) \
+ mempool_init(_pool, (_min_nr), mempool_alloc_pages, \
+ mempool_free_pages, (void *)(long)(_order))
+#define mempool_create_page_pool(_min_nr, _order) \
+ mempool_create((_min_nr), mempool_alloc_pages, \
+ mempool_free_pages, (void *)(long)(_order))
#endif /* _LINUX_MEMPOOL_H */
diff --git a/include/linux/memregion.h b/include/linux/memregion.h
new file mode 100644
index 000000000000..a55f62cc5266
--- /dev/null
+++ b/include/linux/memregion.h
@@ -0,0 +1,71 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _MEMREGION_H_
+#define _MEMREGION_H_
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/range.h>
+#include <linux/bug.h>
+
+struct memregion_info {
+ int target_node;
+ struct range range;
+};
+
+#ifdef CONFIG_MEMREGION
+int memregion_alloc(gfp_t gfp);
+void memregion_free(int id);
+#else
+static inline int memregion_alloc(gfp_t gfp)
+{
+ return -ENOMEM;
+}
+static inline void memregion_free(int id)
+{
+}
+#endif
+
+/**
+ * cpu_cache_invalidate_memregion - drop any CPU cached data for
+ * memregion
+ * @start: start physical address of the target memory region.
+ * @len: length of the target memory region. -1 for all the regions of
+ * the target type.
+ *
+ * Perform cache maintenance after a memory event / operation that
+ * changes the contents of physical memory in a cache-incoherent manner.
+ * For example, device memory technologies like NVDIMM and CXL have
+ * device secure erase, and dynamic region provision that can replace
+ * the memory mapped to a given physical address.
+ *
+ * Limit the functionality to architectures that have an efficient way
+ * to writeback and invalidate potentially terabytes of address space at
+ * once. Note that this routine may or may not write back any dirty
+ * contents while performing the invalidation. It is only exported for
+ * the explicit usage of the NVDIMM and CXL modules in the 'DEVMEM'
+ * symbol namespace on bare platforms.
+ *
+ * Returns 0 on success or negative error code on a failure to perform
+ * the cache maintenance.
+ */
+#ifdef CONFIG_ARCH_HAS_CPU_CACHE_INVALIDATE_MEMREGION
+int cpu_cache_invalidate_memregion(phys_addr_t start, size_t len);
+bool cpu_cache_has_invalidate_memregion(void);
+#else
+static inline bool cpu_cache_has_invalidate_memregion(void)
+{
+ return false;
+}
+
+static inline int cpu_cache_invalidate_memregion(phys_addr_t start, size_t len)
+{
+ WARN_ON_ONCE("CPU cache invalidation required");
+ return -ENXIO;
+}
+#endif
+
+static inline int cpu_cache_invalidate_all(void)
+{
+ return cpu_cache_invalidate_memregion(0, -1);
+}
+
+#endif /* _MEMREGION_H_ */
diff --git a/include/linux/memremap.h b/include/linux/memremap.h
index 93416196ba64..713ec0435b48 100644
--- a/include/linux/memremap.h
+++ b/include/linux/memremap.h
@@ -1,6 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_MEMREMAP_H_
#define _LINUX_MEMREMAP_H_
-#include <linux/mm.h>
+
+#include <linux/mmzone.h>
+#include <linux/range.h>
#include <linux/ioport.h>
#include <linux/percpu-refcount.h>
@@ -16,47 +19,248 @@ struct device;
* @alloc: track pages consumed, private to vmemmap_populate()
*/
struct vmem_altmap {
- const unsigned long base_pfn;
+ unsigned long base_pfn;
+ const unsigned long end_pfn;
const unsigned long reserve;
unsigned long free;
unsigned long align;
unsigned long alloc;
};
-unsigned long vmem_altmap_offset(struct vmem_altmap *altmap);
-void vmem_altmap_free(struct vmem_altmap *altmap, unsigned long nr_pfns);
+/*
+ * Specialize ZONE_DEVICE memory into multiple types each has a different
+ * usage.
+ *
+ * MEMORY_DEVICE_PRIVATE:
+ * Device memory that is not directly addressable by the CPU: CPU can neither
+ * read nor write private memory. In this case, we do still have struct pages
+ * backing the device memory. Doing so simplifies the implementation, but it is
+ * important to remember that there are certain points at which the struct page
+ * must be treated as an opaque object, rather than a "normal" struct page.
+ *
+ * A more complete discussion of unaddressable memory may be found in
+ * include/linux/hmm.h and Documentation/mm/hmm.rst.
+ *
+ * MEMORY_DEVICE_COHERENT:
+ * Device memory that is cache coherent from device and CPU point of view. This
+ * is used on platforms that have an advanced system bus (like CAPI or CXL). A
+ * driver can hotplug the device memory using ZONE_DEVICE and with that memory
+ * type. Any page of a process can be migrated to such memory. However no one
+ * should be allowed to pin such memory so that it can always be evicted.
+ *
+ * MEMORY_DEVICE_FS_DAX:
+ * Host memory that has similar access semantics as System RAM i.e. DMA
+ * coherent and supports page pinning. In support of coordinating page
+ * pinning vs other operations MEMORY_DEVICE_FS_DAX arranges for a
+ * wakeup event whenever a page is unpinned and becomes idle. This
+ * wakeup is used to coordinate physical address space management (ex:
+ * fs truncate/hole punch) vs pinned pages (ex: device dma).
+ *
+ * MEMORY_DEVICE_GENERIC:
+ * Host memory that has similar access semantics as System RAM i.e. DMA
+ * coherent and supports page pinning. This is for example used by DAX devices
+ * that expose memory using a character device.
+ *
+ * MEMORY_DEVICE_PCI_P2PDMA:
+ * Device memory residing in a PCI BAR intended for use with Peer-to-Peer
+ * transactions.
+ */
+enum memory_type {
+ /* 0 is reserved to catch uninitialized type fields */
+ MEMORY_DEVICE_PRIVATE = 1,
+ MEMORY_DEVICE_COHERENT,
+ MEMORY_DEVICE_FS_DAX,
+ MEMORY_DEVICE_GENERIC,
+ MEMORY_DEVICE_PCI_P2PDMA,
+};
-#ifdef CONFIG_ZONE_DEVICE
-struct vmem_altmap *to_vmem_altmap(unsigned long memmap_start);
-#else
-static inline struct vmem_altmap *to_vmem_altmap(unsigned long memmap_start)
-{
- return NULL;
-}
-#endif
+struct dev_pagemap_ops {
+ /*
+ * Called once the folio refcount reaches 0. The reference count will be
+ * reset to one by the core code after the method is called to prepare
+ * for handing out the folio again.
+ */
+ void (*folio_free)(struct folio *folio);
+
+ /*
+ * Used for private (un-addressable) device memory only. Must migrate
+ * the page back to a CPU accessible page.
+ */
+ vm_fault_t (*migrate_to_ram)(struct vm_fault *vmf);
+
+ /*
+ * Handle the memory failure happens on a range of pfns. Notify the
+ * processes who are using these pfns, and try to recover the data on
+ * them if necessary. The mf_flags is finally passed to the recover
+ * function through the whole notify routine.
+ *
+ * When this is not implemented, or it returns -EOPNOTSUPP, the caller
+ * will fall back to a common handler called mf_generic_kill_procs().
+ */
+ int (*memory_failure)(struct dev_pagemap *pgmap, unsigned long pfn,
+ unsigned long nr_pages, int mf_flags);
+
+ /*
+ * Used for private (un-addressable) device memory only.
+ * This callback is used when a folio is split into
+ * a smaller folio
+ */
+ void (*folio_split)(struct folio *head, struct folio *tail);
+};
+
+#define PGMAP_ALTMAP_VALID (1 << 0)
/**
* struct dev_pagemap - metadata for ZONE_DEVICE mappings
* @altmap: pre-allocated/reserved memory for vmemmap allocations
- * @res: physical address range covered by @ref
* @ref: reference count that pins the devm_memremap_pages() mapping
- * @dev: host device of the mapping for debug
+ * @done: completion for @ref
+ * @type: memory type: see MEMORY_* above in memremap.h
+ * @flags: PGMAP_* flags to specify defailed behavior
+ * @vmemmap_shift: structural definition of how the vmemmap page metadata
+ * is populated, specifically the metadata page order.
+ * A zero value (default) uses base pages as the vmemmap metadata
+ * representation. A bigger value will set up compound struct pages
+ * of the requested order value.
+ * @ops: method table
+ * @owner: an opaque pointer identifying the entity that manages this
+ * instance. Used by various helpers to make sure that no
+ * foreign ZONE_DEVICE memory is accessed.
+ * @nr_range: number of ranges to be mapped
+ * @range: range to be mapped when nr_range == 1
+ * @ranges: array of ranges to be mapped when nr_range > 1
*/
struct dev_pagemap {
- struct vmem_altmap *altmap;
- const struct resource *res;
- struct percpu_ref *ref;
- struct device *dev;
+ struct vmem_altmap altmap;
+ struct percpu_ref ref;
+ struct completion done;
+ enum memory_type type;
+ unsigned int flags;
+ unsigned long vmemmap_shift;
+ const struct dev_pagemap_ops *ops;
+ void *owner;
+ int nr_range;
+ union {
+ struct range range;
+ DECLARE_FLEX_ARRAY(struct range, ranges);
+ };
};
+static inline bool pgmap_has_memory_failure(struct dev_pagemap *pgmap)
+{
+ return pgmap->ops && pgmap->ops->memory_failure;
+}
+
+static inline struct vmem_altmap *pgmap_altmap(struct dev_pagemap *pgmap)
+{
+ if (pgmap->flags & PGMAP_ALTMAP_VALID)
+ return &pgmap->altmap;
+ return NULL;
+}
+
+static inline unsigned long pgmap_vmemmap_nr(struct dev_pagemap *pgmap)
+{
+ return 1 << pgmap->vmemmap_shift;
+}
+
+static inline bool folio_is_device_private(const struct folio *folio)
+{
+ return IS_ENABLED(CONFIG_DEVICE_PRIVATE) &&
+ folio_is_zone_device(folio) &&
+ folio->pgmap->type == MEMORY_DEVICE_PRIVATE;
+}
+
+static inline bool is_device_private_page(const struct page *page)
+{
+ return IS_ENABLED(CONFIG_DEVICE_PRIVATE) &&
+ folio_is_device_private(page_folio(page));
+}
+
+static inline bool folio_is_pci_p2pdma(const struct folio *folio)
+{
+ return IS_ENABLED(CONFIG_PCI_P2PDMA) &&
+ folio_is_zone_device(folio) &&
+ folio->pgmap->type == MEMORY_DEVICE_PCI_P2PDMA;
+}
+
+static inline void *folio_zone_device_data(const struct folio *folio)
+{
+ VM_WARN_ON_FOLIO(!folio_is_device_private(folio), folio);
+ return folio->page.zone_device_data;
+}
+
+static inline void folio_set_zone_device_data(struct folio *folio, void *data)
+{
+ VM_WARN_ON_FOLIO(!folio_is_device_private(folio), folio);
+ folio->page.zone_device_data = data;
+}
+
+static inline bool is_pci_p2pdma_page(const struct page *page)
+{
+ return IS_ENABLED(CONFIG_PCI_P2PDMA) &&
+ folio_is_pci_p2pdma(page_folio(page));
+}
+
+static inline bool folio_is_device_coherent(const struct folio *folio)
+{
+ return folio_is_zone_device(folio) &&
+ folio->pgmap->type == MEMORY_DEVICE_COHERENT;
+}
+
+static inline bool is_device_coherent_page(const struct page *page)
+{
+ return folio_is_device_coherent(page_folio(page));
+}
+
+static inline bool folio_is_fsdax(const struct folio *folio)
+{
+ return folio_is_zone_device(folio) &&
+ folio->pgmap->type == MEMORY_DEVICE_FS_DAX;
+}
+
+static inline bool is_fsdax_page(const struct page *page)
+{
+ return folio_is_fsdax(page_folio(page));
+}
+
#ifdef CONFIG_ZONE_DEVICE
-void *devm_memremap_pages(struct device *dev, struct resource *res,
- struct percpu_ref *ref, struct vmem_altmap *altmap);
-struct dev_pagemap *find_dev_pagemap(resource_size_t phys);
+void zone_device_page_init(struct page *page, unsigned int order);
+void *memremap_pages(struct dev_pagemap *pgmap, int nid);
+void memunmap_pages(struct dev_pagemap *pgmap);
+void *devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap);
+void devm_memunmap_pages(struct device *dev, struct dev_pagemap *pgmap);
+struct dev_pagemap *get_dev_pagemap(unsigned long pfn);
+bool pgmap_pfn_valid(struct dev_pagemap *pgmap, unsigned long pfn);
+
+unsigned long memremap_compat_align(void);
+
+static inline void zone_device_folio_init(struct folio *folio, unsigned int order)
+{
+ zone_device_page_init(&folio->page, order);
+ if (order)
+ folio_set_large_rmappable(folio);
+}
+
+static inline void zone_device_private_split_cb(struct folio *original_folio,
+ struct folio *new_folio)
+{
+ if (folio_is_device_private(original_folio)) {
+ if (!original_folio->pgmap->ops->folio_split) {
+ if (new_folio) {
+ new_folio->pgmap = original_folio->pgmap;
+ new_folio->page.mapping =
+ original_folio->page.mapping;
+ }
+ } else {
+ original_folio->pgmap->ops->folio_split(original_folio,
+ new_folio);
+ }
+ }
+}
+
#else
static inline void *devm_memremap_pages(struct device *dev,
- struct resource *res, struct percpu_ref *ref,
- struct vmem_altmap *altmap)
+ struct dev_pagemap *pgmap)
{
/*
* Fail attempts to call devm_memremap_pages() without
@@ -67,48 +271,37 @@ static inline void *devm_memremap_pages(struct device *dev,
return ERR_PTR(-ENXIO);
}
-static inline struct dev_pagemap *find_dev_pagemap(resource_size_t phys)
+static inline void devm_memunmap_pages(struct device *dev,
+ struct dev_pagemap *pgmap)
{
- return NULL;
}
-#endif
-/**
- * get_dev_pagemap() - take a new live reference on the dev_pagemap for @pfn
- * @pfn: page frame number to lookup page_map
- * @pgmap: optional known pgmap that already has a reference
- *
- * @pgmap allows the overhead of a lookup to be bypassed when @pfn lands in the
- * same mapping.
- */
-static inline struct dev_pagemap *get_dev_pagemap(unsigned long pfn,
- struct dev_pagemap *pgmap)
+static inline struct dev_pagemap *get_dev_pagemap(unsigned long pfn)
{
- const struct resource *res = pgmap ? pgmap->res : NULL;
- resource_size_t phys = PFN_PHYS(pfn);
+ return NULL;
+}
- /*
- * In the cached case we're already holding a live reference so
- * we can simply do a blind increment
- */
- if (res && phys >= res->start && phys <= res->end) {
- percpu_ref_get(pgmap->ref);
- return pgmap;
- }
+static inline bool pgmap_pfn_valid(struct dev_pagemap *pgmap, unsigned long pfn)
+{
+ return false;
+}
- /* fall back to slow path lookup */
- rcu_read_lock();
- pgmap = find_dev_pagemap(phys);
- if (pgmap && !percpu_ref_tryget_live(pgmap->ref))
- pgmap = NULL;
- rcu_read_unlock();
+/* when memremap_pages() is disabled all archs can remap a single page */
+static inline unsigned long memremap_compat_align(void)
+{
+ return PAGE_SIZE;
+}
- return pgmap;
+static inline void zone_device_private_split_cb(struct folio *original_folio,
+ struct folio *new_folio)
+{
}
+#endif /* CONFIG_ZONE_DEVICE */
static inline void put_dev_pagemap(struct dev_pagemap *pgmap)
{
if (pgmap)
- percpu_ref_put(pgmap->ref);
+ percpu_ref_put(&pgmap->ref);
}
+
#endif /* _LINUX_MEMREMAP_H_ */
diff --git a/include/linux/memstick.h b/include/linux/memstick.h
index 690c35a9d4cc..107bdcbedf79 100644
--- a/include/linux/memstick.h
+++ b/include/linux/memstick.h
@@ -1,12 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Sony MemoryStick support
*
* Copyright (C) 2007 Alex Dubov <oakad@yahoo.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
*/
#ifndef _MEMSTICK_H
@@ -285,6 +281,7 @@ struct memstick_host {
struct memstick_dev *card;
unsigned int retries;
+ bool removing;
/* Notify the host that some requests are pending. */
void (*request)(struct memstick_host *host);
@@ -292,11 +289,11 @@ struct memstick_host {
int (*set_param)(struct memstick_host *host,
enum memstick_param param,
int value);
- unsigned long private[0] ____cacheline_aligned;
+ unsigned long private[] ____cacheline_aligned;
};
struct memstick_driver {
- struct memstick_device_id *id_table;
+ const struct memstick_device_id *id_table;
int (*probe)(struct memstick_dev *card);
void (*remove)(struct memstick_dev *card);
int (*suspend)(struct memstick_dev *card,
diff --git a/include/linux/mfd/88pm80x.h b/include/linux/mfd/88pm80x.h
index c118a7ec94d6..551ef1c367d6 100644
--- a/include/linux/mfd/88pm80x.h
+++ b/include/linux/mfd/88pm80x.h
@@ -1,12 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Marvell 88PM80x Interface
*
* Copyright (C) 2012 Marvell International Ltd.
* Qiao Zhou <zhouqiao@marvell.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef __LINUX_MFD_88PM80X_H
@@ -297,7 +294,7 @@ struct pm80x_chip {
struct i2c_client *client;
struct i2c_client *companion;
struct regmap *regmap;
- struct regmap_irq_chip *regmap_irq_chip;
+ const struct regmap_irq_chip *regmap_irq_chip;
struct regmap_irq_chip_data *irq_data;
int type;
int irq;
diff --git a/include/linux/mfd/88pm860x.h b/include/linux/mfd/88pm860x.h
index cd97530205c2..6fa21791fc85 100644
--- a/include/linux/mfd/88pm860x.h
+++ b/include/linux/mfd/88pm860x.h
@@ -1,12 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Marvell 88PM860x Interface
*
* Copyright (C) 2009 Marvell International Ltd.
* Haojian Zhuang <haojian.zhuang@marvell.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef __LINUX_MFD_88PM860X_H
@@ -475,13 +472,7 @@ extern int pm860x_bulk_read(struct i2c_client *, int, int, unsigned char *);
extern int pm860x_bulk_write(struct i2c_client *, int, int, unsigned char *);
extern int pm860x_set_bits(struct i2c_client *, int, unsigned char,
unsigned char);
-extern int pm860x_page_reg_read(struct i2c_client *, int);
extern int pm860x_page_reg_write(struct i2c_client *, int, unsigned char);
extern int pm860x_page_bulk_read(struct i2c_client *, int, int,
unsigned char *);
-extern int pm860x_page_bulk_write(struct i2c_client *, int, int,
- unsigned char *);
-extern int pm860x_page_set_bits(struct i2c_client *, int, unsigned char,
- unsigned char);
-
#endif /* __LINUX_MFD_88PM860X_H */
diff --git a/include/linux/mfd/88pm886.h b/include/linux/mfd/88pm886.h
new file mode 100644
index 000000000000..38892ba7b8a4
--- /dev/null
+++ b/include/linux/mfd/88pm886.h
@@ -0,0 +1,136 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+#ifndef __MFD_88PM886_H
+#define __MFD_88PM886_H
+
+#include <linux/i2c.h>
+#include <linux/regmap.h>
+
+#define PM886_A1_CHIP_ID 0xa1
+
+#define PM886_IRQ_ONKEY 0
+
+#define PM886_PAGE_OFFSET_REGULATORS 1
+#define PM886_PAGE_OFFSET_GPADC 2
+
+#define PM886_REG_ID 0x00
+
+#define PM886_REG_STATUS1 0x01
+#define PM886_ONKEY_STS1 BIT(0)
+
+#define PM886_REG_INT_STATUS1 0x05
+
+#define PM886_REG_INT_ENA_1 0x0a
+#define PM886_INT_ENA1_ONKEY BIT(0)
+
+#define PM886_REG_MISC_CONFIG1 0x14
+#define PM886_SW_PDOWN BIT(5)
+
+#define PM886_REG_MISC_CONFIG2 0x15
+#define PM886_INT_INV BIT(0)
+#define PM886_INT_CLEAR BIT(1)
+#define PM886_INT_RC 0x00
+#define PM886_INT_WC BIT(1)
+#define PM886_INT_MASK_MODE BIT(2)
+
+#define PM886_REG_RTC_CNT1 0xd1
+#define PM886_REG_RTC_CNT2 0xd2
+#define PM886_REG_RTC_CNT3 0xd3
+#define PM886_REG_RTC_CNT4 0xd4
+#define PM886_REG_RTC_SPARE1 0xea
+#define PM886_REG_RTC_SPARE2 0xeb
+#define PM886_REG_RTC_SPARE3 0xec
+#define PM886_REG_RTC_SPARE4 0xed
+#define PM886_REG_RTC_SPARE5 0xee
+#define PM886_REG_RTC_SPARE6 0xef
+
+#define PM886_REG_BUCK_EN 0x08
+#define PM886_REG_LDO_EN1 0x09
+#define PM886_REG_LDO_EN2 0x0a
+#define PM886_REG_LDO1_VOUT 0x20
+#define PM886_REG_LDO2_VOUT 0x26
+#define PM886_REG_LDO3_VOUT 0x2c
+#define PM886_REG_LDO4_VOUT 0x32
+#define PM886_REG_LDO5_VOUT 0x38
+#define PM886_REG_LDO6_VOUT 0x3e
+#define PM886_REG_LDO7_VOUT 0x44
+#define PM886_REG_LDO8_VOUT 0x4a
+#define PM886_REG_LDO9_VOUT 0x50
+#define PM886_REG_LDO10_VOUT 0x56
+#define PM886_REG_LDO11_VOUT 0x5c
+#define PM886_REG_LDO12_VOUT 0x62
+#define PM886_REG_LDO13_VOUT 0x68
+#define PM886_REG_LDO14_VOUT 0x6e
+#define PM886_REG_LDO15_VOUT 0x74
+#define PM886_REG_LDO16_VOUT 0x7a
+#define PM886_REG_BUCK1_VOUT 0xa5
+#define PM886_REG_BUCK2_VOUT 0xb3
+#define PM886_REG_BUCK3_VOUT 0xc1
+#define PM886_REG_BUCK4_VOUT 0xcf
+#define PM886_REG_BUCK5_VOUT 0xdd
+
+#define PM886_LDO_VSEL_MASK 0x0f
+#define PM886_BUCK_VSEL_MASK 0x7f
+
+/* GPADC enable/disable registers */
+#define PM886_REG_GPADC_CONFIG(n) (n)
+
+#define PM886_GPADC_VSC_EN BIT(0)
+#define PM886_GPADC_VBAT_EN BIT(1)
+#define PM886_GPADC_GNDDET1_EN BIT(3)
+#define PM886_GPADC_VBUS_EN BIT(4)
+#define PM886_GPADC_VCHG_PWR_EN BIT(5)
+#define PM886_GPADC_VCF_OUT_EN BIT(6)
+#define PM886_GPADC_CONFIG1_EN_ALL \
+ (PM886_GPADC_VSC_EN | \
+ PM886_GPADC_VBAT_EN | \
+ PM886_GPADC_GNDDET1_EN | \
+ PM886_GPADC_VBUS_EN | \
+ PM886_GPADC_VCHG_PWR_EN | \
+ PM886_GPADC_VCF_OUT_EN)
+
+#define PM886_GPADC_TINT_EN BIT(0)
+#define PM886_GPADC_PMODE_EN BIT(1)
+#define PM886_GPADC_GPADC0_EN BIT(2)
+#define PM886_GPADC_GPADC1_EN BIT(3)
+#define PM886_GPADC_GPADC2_EN BIT(4)
+#define PM886_GPADC_GPADC3_EN BIT(5)
+#define PM886_GPADC_MIC_DET_EN BIT(6)
+#define PM886_GPADC_CONFIG2_EN_ALL \
+ (PM886_GPADC_TINT_EN | \
+ PM886_GPADC_GPADC0_EN | \
+ PM886_GPADC_GPADC1_EN | \
+ PM886_GPADC_GPADC2_EN | \
+ PM886_GPADC_GPADC3_EN | \
+ PM886_GPADC_MIC_DET_EN)
+
+/* No CONFIG3_EN_ALL because this is the only bit there. */
+#define PM886_GPADC_GND_DET2_EN BIT(0)
+
+/* GPADC channel registers */
+#define PM886_REG_GPADC_VSC 0x40
+#define PM886_REG_GPADC_VCHG_PWR 0x4c
+#define PM886_REG_GPADC_VCF_OUT 0x4e
+#define PM886_REG_GPADC_TINT 0x50
+#define PM886_REG_GPADC_GPADC0 0x54
+#define PM886_REG_GPADC_GPADC1 0x56
+#define PM886_REG_GPADC_GPADC2 0x58
+#define PM886_REG_GPADC_VBAT 0xa0
+#define PM886_REG_GPADC_GND_DET1 0xa4
+#define PM886_REG_GPADC_GND_DET2 0xa6
+#define PM886_REG_GPADC_VBUS 0xa8
+#define PM886_REG_GPADC_GPADC3 0xaa
+#define PM886_REG_GPADC_MIC_DET 0xac
+#define PM886_REG_GPADC_VBAT_SLP 0xb0
+
+/* VBAT_SLP is the last register and is 2 bytes wide like other channels. */
+#define PM886_GPADC_MAX_REGISTER (PM886_REG_GPADC_VBAT_SLP + 1)
+
+#define PM886_GPADC_BIAS_LEVELS 16
+#define PM886_GPADC_INDEX_TO_BIAS_uA(i) (1 + (i) * 5)
+
+struct pm886_chip {
+ struct i2c_client *client;
+ unsigned int chip_id;
+ struct regmap *regmap;
+};
+#endif /* __MFD_88PM886_H */
diff --git a/include/linux/mfd/aat2870.h b/include/linux/mfd/aat2870.h
index f7316c29bdec..c7a3c53eba68 100644
--- a/include/linux/mfd/aat2870.h
+++ b/include/linux/mfd/aat2870.h
@@ -1,22 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* linux/include/linux/mfd/aat2870.h
*
* Copyright (c) 2011, NVIDIA Corporation.
* Author: Jin Park <jinyoungp@nvidia.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
- * 02110-1301 USA
*/
#ifndef __LINUX_MFD_AAT2870_H
@@ -146,10 +133,6 @@ struct aat2870_data {
int (*read)(struct aat2870_data *aat2870, u8 addr, u8 *val);
int (*write)(struct aat2870_data *aat2870, u8 addr, u8 val);
int (*update)(struct aat2870_data *aat2870, u8 addr, u8 mask, u8 val);
-
- /* for debugfs */
- struct dentry *dentry_root;
- struct dentry *dentry_reg;
};
struct aat2870_subdev_info {
diff --git a/include/linux/mfd/ab3100.h b/include/linux/mfd/ab3100.h
deleted file mode 100644
index afd3080bde24..000000000000
--- a/include/linux/mfd/ab3100.h
+++ /dev/null
@@ -1,129 +0,0 @@
-/*
- * Copyright (C) 2007-2009 ST-Ericsson AB
- * License terms: GNU General Public License (GPL) version 2
- * AB3100 core access functions
- * Author: Linus Walleij <linus.walleij@stericsson.com>
- *
- */
-
-#include <linux/regulator/machine.h>
-
-struct device;
-
-#ifndef MFD_AB3100_H
-#define MFD_AB3100_H
-
-
-#define AB3100_P1A 0xc0
-#define AB3100_P1B 0xc1
-#define AB3100_P1C 0xc2
-#define AB3100_P1D 0xc3
-#define AB3100_P1E 0xc4
-#define AB3100_P1F 0xc5
-#define AB3100_P1G 0xc6
-#define AB3100_R2A 0xc7
-#define AB3100_R2B 0xc8
-
-/*
- * AB3100, EVENTA1, A2 and A3 event register flags
- * these are catenated into a single 32-bit flag in the code
- * for event notification broadcasts.
- */
-#define AB3100_EVENTA1_ONSWA (0x01<<16)
-#define AB3100_EVENTA1_ONSWB (0x02<<16)
-#define AB3100_EVENTA1_ONSWC (0x04<<16)
-#define AB3100_EVENTA1_DCIO (0x08<<16)
-#define AB3100_EVENTA1_OVER_TEMP (0x10<<16)
-#define AB3100_EVENTA1_SIM_OFF (0x20<<16)
-#define AB3100_EVENTA1_VBUS (0x40<<16)
-#define AB3100_EVENTA1_VSET_USB (0x80<<16)
-
-#define AB3100_EVENTA2_READY_TX (0x01<<8)
-#define AB3100_EVENTA2_READY_RX (0x02<<8)
-#define AB3100_EVENTA2_OVERRUN_ERROR (0x04<<8)
-#define AB3100_EVENTA2_FRAMING_ERROR (0x08<<8)
-#define AB3100_EVENTA2_CHARG_OVERCURRENT (0x10<<8)
-#define AB3100_EVENTA2_MIDR (0x20<<8)
-#define AB3100_EVENTA2_BATTERY_REM (0x40<<8)
-#define AB3100_EVENTA2_ALARM (0x80<<8)
-
-#define AB3100_EVENTA3_ADC_TRIG5 (0x01)
-#define AB3100_EVENTA3_ADC_TRIG4 (0x02)
-#define AB3100_EVENTA3_ADC_TRIG3 (0x04)
-#define AB3100_EVENTA3_ADC_TRIG2 (0x08)
-#define AB3100_EVENTA3_ADC_TRIGVBAT (0x10)
-#define AB3100_EVENTA3_ADC_TRIGVTX (0x20)
-#define AB3100_EVENTA3_ADC_TRIG1 (0x40)
-#define AB3100_EVENTA3_ADC_TRIG0 (0x80)
-
-/* AB3100, STR register flags */
-#define AB3100_STR_ONSWA (0x01)
-#define AB3100_STR_ONSWB (0x02)
-#define AB3100_STR_ONSWC (0x04)
-#define AB3100_STR_DCIO (0x08)
-#define AB3100_STR_BOOT_MODE (0x10)
-#define AB3100_STR_SIM_OFF (0x20)
-#define AB3100_STR_BATT_REMOVAL (0x40)
-#define AB3100_STR_VBUS (0x80)
-
-/*
- * AB3100 contains 8 regulators, one external regulator controller
- * and a buck converter, further the LDO E and buck converter can
- * have separate settings if they are in sleep mode, this is
- * modeled as a separate regulator.
- */
-#define AB3100_NUM_REGULATORS 10
-
-/**
- * struct ab3100
- * @access_mutex: lock out concurrent accesses to the AB3100 registers
- * @dev: pointer to the containing device
- * @i2c_client: I2C client for this chip
- * @testreg_client: secondary client for test registers
- * @chip_name: name of this chip variant
- * @chip_id: 8 bit chip ID for this chip variant
- * @event_subscribers: event subscribers are listed here
- * @startup_events: a copy of the first reading of the event registers
- * @startup_events_read: whether the first events have been read
- *
- * This struct is PRIVATE and devices using it should NOT
- * access ANY fields. It is used as a token for calling the
- * AB3100 functions.
- */
-struct ab3100 {
- struct mutex access_mutex;
- struct device *dev;
- struct i2c_client *i2c_client;
- struct i2c_client *testreg_client;
- char chip_name[32];
- u8 chip_id;
- struct blocking_notifier_head event_subscribers;
- u8 startup_events[3];
- bool startup_events_read;
-};
-
-/**
- * struct ab3100_platform_data
- * Data supplied to initialize board connections to the AB3100
- * @reg_constraints: regulator constraints for target board
- * the order of these constraints are: LDO A, C, D, E,
- * F, G, H, K, EXT and BUCK.
- * @reg_initvals: initial values for the regulator registers
- * plus two sleep settings for LDO E and the BUCK converter.
- * exactly AB3100_NUM_REGULATORS+2 values must be sent in.
- * Order: LDO A, C, E, E sleep, F, G, H, K, EXT, BUCK,
- * BUCK sleep, LDO D. (LDO D need to be initialized last.)
- * @external_voltage: voltage level of the external regulator.
- */
-struct ab3100_platform_data {
- struct regulator_init_data reg_constraints[AB3100_NUM_REGULATORS];
- u8 reg_initvals[AB3100_NUM_REGULATORS+2];
- int external_voltage;
-};
-
-int ab3100_event_register(struct ab3100 *ab3100,
- struct notifier_block *nb);
-int ab3100_event_unregister(struct ab3100 *ab3100,
- struct notifier_block *nb);
-
-#endif /* MFD_AB3100_H */
diff --git a/include/linux/mfd/abx500.h b/include/linux/mfd/abx500.h
index 44412c9d26e1..7f07cfe44753 100644
--- a/include/linux/mfd/abx500.h
+++ b/include/linux/mfd/abx500.h
@@ -1,6 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2007-2009 ST-Ericsson AB
- * License terms: GNU General Public License (GPL) version 2
*
* ABX500 core access functions.
* The abx500 interface is used for the Analog Baseband chips.
@@ -28,283 +28,6 @@ struct abx500_init_settings {
u8 setting;
};
-/* Battery driver related data */
-/*
- * ADC for the battery thermistor.
- * When using the ABx500_ADC_THERM_BATCTRL the battery ID resistor is combined
- * with a NTC resistor to both identify the battery and to measure its
- * temperature. Different phone manufactures uses different techniques to both
- * identify the battery and to read its temperature.
- */
-enum abx500_adc_therm {
- ABx500_ADC_THERM_BATCTRL,
- ABx500_ADC_THERM_BATTEMP,
-};
-
-/**
- * struct abx500_res_to_temp - defines one point in a temp to res curve. To
- * be used in battery packs that combines the identification resistor with a
- * NTC resistor.
- * @temp: battery pack temperature in Celsius
- * @resist: NTC resistor net total resistance
- */
-struct abx500_res_to_temp {
- int temp;
- int resist;
-};
-
-/**
- * struct abx500_v_to_cap - Table for translating voltage to capacity
- * @voltage: Voltage in mV
- * @capacity: Capacity in percent
- */
-struct abx500_v_to_cap {
- int voltage;
- int capacity;
-};
-
-/* Forward declaration */
-struct abx500_fg;
-
-/**
- * struct abx500_fg_parameters - Fuel gauge algorithm parameters, in seconds
- * if not specified
- * @recovery_sleep_timer: Time between measurements while recovering
- * @recovery_total_time: Total recovery time
- * @init_timer: Measurement interval during startup
- * @init_discard_time: Time we discard voltage measurement at startup
- * @init_total_time: Total init time during startup
- * @high_curr_time: Time current has to be high to go to recovery
- * @accu_charging: FG accumulation time while charging
- * @accu_high_curr: FG accumulation time in high current mode
- * @high_curr_threshold: High current threshold, in mA
- * @lowbat_threshold: Low battery threshold, in mV
- * @overbat_threshold: Over battery threshold, in mV
- * @battok_falling_th_sel0 Threshold in mV for battOk signal sel0
- * Resolution in 50 mV step.
- * @battok_raising_th_sel1 Threshold in mV for battOk signal sel1
- * Resolution in 50 mV step.
- * @user_cap_limit Capacity reported from user must be within this
- * limit to be considered as sane, in percentage
- * points.
- * @maint_thres This is the threshold where we stop reporting
- * battery full while in maintenance, in per cent
- * @pcut_enable: Enable power cut feature in ab8505
- * @pcut_max_time: Max time threshold
- * @pcut_flag_time: Flagtime threshold
- * @pcut_max_restart: Max number of restarts
- * @pcut_debounce_time: Sets battery debounce time
- */
-struct abx500_fg_parameters {
- int recovery_sleep_timer;
- int recovery_total_time;
- int init_timer;
- int init_discard_time;
- int init_total_time;
- int high_curr_time;
- int accu_charging;
- int accu_high_curr;
- int high_curr_threshold;
- int lowbat_threshold;
- int overbat_threshold;
- int battok_falling_th_sel0;
- int battok_raising_th_sel1;
- int user_cap_limit;
- int maint_thres;
- bool pcut_enable;
- u8 pcut_max_time;
- u8 pcut_flag_time;
- u8 pcut_max_restart;
- u8 pcut_debounce_time;
-};
-
-/**
- * struct abx500_charger_maximization - struct used by the board config.
- * @use_maxi: Enable maximization for this battery type
- * @maxi_chg_curr: Maximum charger current allowed
- * @maxi_wait_cycles: cycles to wait before setting charger current
- * @charger_curr_step delta between two charger current settings (mA)
- */
-struct abx500_maxim_parameters {
- bool ena_maxi;
- int chg_curr;
- int wait_cycles;
- int charger_curr_step;
-};
-
-/**
- * struct abx500_battery_type - different batteries supported
- * @name: battery technology
- * @resis_high: battery upper resistance limit
- * @resis_low: battery lower resistance limit
- * @charge_full_design: Maximum battery capacity in mAh
- * @nominal_voltage: Nominal voltage of the battery in mV
- * @termination_vol: max voltage upto which battery can be charged
- * @termination_curr battery charging termination current in mA
- * @recharge_cap battery capacity limit that will trigger a new
- * full charging cycle in the case where maintenan-
- * -ce charging has been disabled
- * @normal_cur_lvl: charger current in normal state in mA
- * @normal_vol_lvl: charger voltage in normal state in mV
- * @maint_a_cur_lvl: charger current in maintenance A state in mA
- * @maint_a_vol_lvl: charger voltage in maintenance A state in mV
- * @maint_a_chg_timer_h: charge time in maintenance A state
- * @maint_b_cur_lvl: charger current in maintenance B state in mA
- * @maint_b_vol_lvl: charger voltage in maintenance B state in mV
- * @maint_b_chg_timer_h: charge time in maintenance B state
- * @low_high_cur_lvl: charger current in temp low/high state in mA
- * @low_high_vol_lvl: charger voltage in temp low/high state in mV'
- * @battery_resistance: battery inner resistance in mOhm.
- * @n_r_t_tbl_elements: number of elements in r_to_t_tbl
- * @r_to_t_tbl: table containing resistance to temp points
- * @n_v_cap_tbl_elements: number of elements in v_to_cap_tbl
- * @v_to_cap_tbl: Voltage to capacity (in %) table
- * @n_batres_tbl_elements number of elements in the batres_tbl
- * @batres_tbl battery internal resistance vs temperature table
- */
-struct abx500_battery_type {
- int name;
- int resis_high;
- int resis_low;
- int charge_full_design;
- int nominal_voltage;
- int termination_vol;
- int termination_curr;
- int recharge_cap;
- int normal_cur_lvl;
- int normal_vol_lvl;
- int maint_a_cur_lvl;
- int maint_a_vol_lvl;
- int maint_a_chg_timer_h;
- int maint_b_cur_lvl;
- int maint_b_vol_lvl;
- int maint_b_chg_timer_h;
- int low_high_cur_lvl;
- int low_high_vol_lvl;
- int battery_resistance;
- int n_temp_tbl_elements;
- const struct abx500_res_to_temp *r_to_t_tbl;
- int n_v_cap_tbl_elements;
- const struct abx500_v_to_cap *v_to_cap_tbl;
- int n_batres_tbl_elements;
- const struct batres_vs_temp *batres_tbl;
-};
-
-/**
- * struct abx500_bm_capacity_levels - abx500 capacity level data
- * @critical: critical capacity level in percent
- * @low: low capacity level in percent
- * @normal: normal capacity level in percent
- * @high: high capacity level in percent
- * @full: full capacity level in percent
- */
-struct abx500_bm_capacity_levels {
- int critical;
- int low;
- int normal;
- int high;
- int full;
-};
-
-/**
- * struct abx500_bm_charger_parameters - Charger specific parameters
- * @usb_volt_max: maximum allowed USB charger voltage in mV
- * @usb_curr_max: maximum allowed USB charger current in mA
- * @ac_volt_max: maximum allowed AC charger voltage in mV
- * @ac_curr_max: maximum allowed AC charger current in mA
- */
-struct abx500_bm_charger_parameters {
- int usb_volt_max;
- int usb_curr_max;
- int ac_volt_max;
- int ac_curr_max;
-};
-
-/**
- * struct abx500_bm_data - abx500 battery management data
- * @temp_under under this temp, charging is stopped
- * @temp_low between this temp and temp_under charging is reduced
- * @temp_high between this temp and temp_over charging is reduced
- * @temp_over over this temp, charging is stopped
- * @temp_now present battery temperature
- * @temp_interval_chg temperature measurement interval in s when charging
- * @temp_interval_nochg temperature measurement interval in s when not charging
- * @main_safety_tmr_h safety timer for main charger
- * @usb_safety_tmr_h safety timer for usb charger
- * @bkup_bat_v voltage which we charge the backup battery with
- * @bkup_bat_i current which we charge the backup battery with
- * @no_maintenance indicates that maintenance charging is disabled
- * @capacity_scaling indicates whether capacity scaling is to be used
- * @abx500_adc_therm placement of thermistor, batctrl or battemp adc
- * @chg_unknown_bat flag to enable charging of unknown batteries
- * @enable_overshoot flag to enable VBAT overshoot control
- * @auto_trig flag to enable auto adc trigger
- * @fg_res resistance of FG resistor in 0.1mOhm
- * @n_btypes number of elements in array bat_type
- * @batt_id index of the identified battery in array bat_type
- * @interval_charging charge alg cycle period time when charging (sec)
- * @interval_not_charging charge alg cycle period time when not charging (sec)
- * @temp_hysteresis temperature hysteresis
- * @gnd_lift_resistance Battery ground to phone ground resistance (mOhm)
- * @n_chg_out_curr number of elements in array chg_output_curr
- * @n_chg_in_curr number of elements in array chg_input_curr
- * @chg_output_curr charger output current level map
- * @chg_input_curr charger input current level map
- * @maxi maximization parameters
- * @cap_levels capacity in percent for the different capacity levels
- * @bat_type table of supported battery types
- * @chg_params charger parameters
- * @fg_params fuel gauge parameters
- */
-struct abx500_bm_data {
- int temp_under;
- int temp_low;
- int temp_high;
- int temp_over;
- int temp_now;
- int temp_interval_chg;
- int temp_interval_nochg;
- int main_safety_tmr_h;
- int usb_safety_tmr_h;
- int bkup_bat_v;
- int bkup_bat_i;
- bool autopower_cfg;
- bool ac_enabled;
- bool usb_enabled;
- bool usb_power_path;
- bool no_maintenance;
- bool capacity_scaling;
- bool chg_unknown_bat;
- bool enable_overshoot;
- bool auto_trig;
- enum abx500_adc_therm adc_therm;
- int fg_res;
- int n_btypes;
- int batt_id;
- int interval_charging;
- int interval_not_charging;
- int temp_hysteresis;
- int gnd_lift_resistance;
- int n_chg_out_curr;
- int n_chg_in_curr;
- int *chg_output_curr;
- int *chg_input_curr;
- const struct abx500_maxim_parameters *maxi;
- const struct abx500_bm_capacity_levels *cap_levels;
- struct abx500_battery_type *bat_type;
- const struct abx500_bm_charger_parameters *chg_params;
- const struct abx500_fg_parameters *fg_params;
-};
-
-enum {
- NTC_EXTERNAL = 0,
- NTC_INTERNAL,
-};
-
-int ab8500_bm_of_probe(struct device *dev,
- struct device_node *np,
- struct abx500_bm_data *bm);
-
int abx500_set_register_interruptible(struct device *dev, u8 bank, u8 reg,
u8 value);
int abx500_get_register_interruptible(struct device *dev, u8 bank, u8 reg,
diff --git a/include/linux/mfd/abx500/ab8500-bm.h b/include/linux/mfd/abx500/ab8500-bm.h
deleted file mode 100644
index e63681eb6c62..000000000000
--- a/include/linux/mfd/abx500/ab8500-bm.h
+++ /dev/null
@@ -1,478 +0,0 @@
-/*
- * Copyright ST-Ericsson 2012.
- *
- * Author: Arun Murthy <arun.murthy@stericsson.com>
- * Licensed under GPLv2.
- */
-
-#ifndef _AB8500_BM_H
-#define _AB8500_BM_H
-
-#include <linux/kernel.h>
-#include <linux/mfd/abx500.h>
-
-/*
- * System control 2 register offsets.
- * bank = 0x02
- */
-#define AB8500_MAIN_WDOG_CTRL_REG 0x01
-#define AB8500_LOW_BAT_REG 0x03
-#define AB8500_BATT_OK_REG 0x04
-/*
- * USB/ULPI register offsets
- * Bank : 0x5
- */
-#define AB8500_USB_LINE_STAT_REG 0x80
-#define AB8500_USB_LINE_CTRL2_REG 0x82
-#define AB8500_USB_LINK1_STAT_REG 0x94
-
-/*
- * Charger / status register offfsets
- * Bank : 0x0B
- */
-#define AB8500_CH_STATUS1_REG 0x00
-#define AB8500_CH_STATUS2_REG 0x01
-#define AB8500_CH_USBCH_STAT1_REG 0x02
-#define AB8500_CH_USBCH_STAT2_REG 0x03
-#define AB8540_CH_USBCH_STAT3_REG 0x04
-#define AB8500_CH_STAT_REG 0x05
-
-/*
- * Charger / control register offfsets
- * Bank : 0x0B
- */
-#define AB8500_CH_VOLT_LVL_REG 0x40
-#define AB8500_CH_VOLT_LVL_MAX_REG 0x41 /*Only in Cut2.0*/
-#define AB8500_CH_OPT_CRNTLVL_REG 0x42
-#define AB8500_CH_OPT_CRNTLVL_MAX_REG 0x43 /*Only in Cut2.0*/
-#define AB8500_CH_WD_TIMER_REG 0x50
-#define AB8500_CHARG_WD_CTRL 0x51
-#define AB8500_BTEMP_HIGH_TH 0x52
-#define AB8500_LED_INDICATOR_PWM_CTRL 0x53
-#define AB8500_LED_INDICATOR_PWM_DUTY 0x54
-#define AB8500_BATT_OVV 0x55
-#define AB8500_CHARGER_CTRL 0x56
-#define AB8500_BAT_CTRL_CURRENT_SOURCE 0x60 /*Only in Cut2.0*/
-
-/*
- * Charger / main control register offsets
- * Bank : 0x0B
- */
-#define AB8500_MCH_CTRL1 0x80
-#define AB8500_MCH_CTRL2 0x81
-#define AB8500_MCH_IPT_CURLVL_REG 0x82
-#define AB8500_CH_WD_REG 0x83
-
-/*
- * Charger / USB control register offsets
- * Bank : 0x0B
- */
-#define AB8500_USBCH_CTRL1_REG 0xC0
-#define AB8500_USBCH_CTRL2_REG 0xC1
-#define AB8500_USBCH_IPT_CRNTLVL_REG 0xC2
-#define AB8540_USB_PP_MODE_REG 0xC5
-#define AB8540_USB_PP_CHR_REG 0xC6
-
-/*
- * Gas Gauge register offsets
- * Bank : 0x0C
- */
-#define AB8500_GASG_CC_CTRL_REG 0x00
-#define AB8500_GASG_CC_ACCU1_REG 0x01
-#define AB8500_GASG_CC_ACCU2_REG 0x02
-#define AB8500_GASG_CC_ACCU3_REG 0x03
-#define AB8500_GASG_CC_ACCU4_REG 0x04
-#define AB8500_GASG_CC_SMPL_CNTRL_REG 0x05
-#define AB8500_GASG_CC_SMPL_CNTRH_REG 0x06
-#define AB8500_GASG_CC_SMPL_CNVL_REG 0x07
-#define AB8500_GASG_CC_SMPL_CNVH_REG 0x08
-#define AB8500_GASG_CC_CNTR_AVGOFF_REG 0x09
-#define AB8500_GASG_CC_OFFSET_REG 0x0A
-#define AB8500_GASG_CC_NCOV_ACCU 0x10
-#define AB8500_GASG_CC_NCOV_ACCU_CTRL 0x11
-#define AB8500_GASG_CC_NCOV_ACCU_LOW 0x12
-#define AB8500_GASG_CC_NCOV_ACCU_MED 0x13
-#define AB8500_GASG_CC_NCOV_ACCU_HIGH 0x14
-
-/*
- * Interrupt register offsets
- * Bank : 0x0E
- */
-#define AB8500_IT_SOURCE2_REG 0x01
-#define AB8500_IT_SOURCE21_REG 0x14
-
-/*
- * RTC register offsets
- * Bank: 0x0F
- */
-#define AB8500_RTC_BACKUP_CHG_REG 0x0C
-#define AB8500_RTC_CC_CONF_REG 0x01
-#define AB8500_RTC_CTRL_REG 0x0B
-#define AB8500_RTC_CTRL1_REG 0x11
-
-/*
- * OTP register offsets
- * Bank : 0x15
- */
-#define AB8500_OTP_CONF_15 0x0E
-
-/* GPADC constants from AB8500 spec, UM0836 */
-#define ADC_RESOLUTION 1024
-#define ADC_CH_MAIN_MIN 0
-#define ADC_CH_MAIN_MAX 20030
-#define ADC_CH_VBUS_MIN 0
-#define ADC_CH_VBUS_MAX 20030
-#define ADC_CH_VBAT_MIN 2300
-#define ADC_CH_VBAT_MAX 4800
-#define ADC_CH_BKBAT_MIN 0
-#define ADC_CH_BKBAT_MAX 3200
-
-/* Main charge i/p current */
-#define MAIN_CH_IP_CUR_0P9A 0x80
-#define MAIN_CH_IP_CUR_1P0A 0x90
-#define MAIN_CH_IP_CUR_1P1A 0xA0
-#define MAIN_CH_IP_CUR_1P2A 0xB0
-#define MAIN_CH_IP_CUR_1P3A 0xC0
-#define MAIN_CH_IP_CUR_1P4A 0xD0
-#define MAIN_CH_IP_CUR_1P5A 0xE0
-
-/* ChVoltLevel */
-#define CH_VOL_LVL_3P5 0x00
-#define CH_VOL_LVL_4P0 0x14
-#define CH_VOL_LVL_4P05 0x16
-#define CH_VOL_LVL_4P1 0x1B
-#define CH_VOL_LVL_4P15 0x20
-#define CH_VOL_LVL_4P2 0x25
-#define CH_VOL_LVL_4P6 0x4D
-
-/* ChOutputCurrentLevel */
-#define CH_OP_CUR_LVL_0P1 0x00
-#define CH_OP_CUR_LVL_0P2 0x01
-#define CH_OP_CUR_LVL_0P3 0x02
-#define CH_OP_CUR_LVL_0P4 0x03
-#define CH_OP_CUR_LVL_0P5 0x04
-#define CH_OP_CUR_LVL_0P6 0x05
-#define CH_OP_CUR_LVL_0P7 0x06
-#define CH_OP_CUR_LVL_0P8 0x07
-#define CH_OP_CUR_LVL_0P9 0x08
-#define CH_OP_CUR_LVL_1P4 0x0D
-#define CH_OP_CUR_LVL_1P5 0x0E
-#define CH_OP_CUR_LVL_1P6 0x0F
-#define CH_OP_CUR_LVL_2P 0x3F
-
-/* BTEMP High thermal limits */
-#define BTEMP_HIGH_TH_57_0 0x00
-#define BTEMP_HIGH_TH_52 0x01
-#define BTEMP_HIGH_TH_57_1 0x02
-#define BTEMP_HIGH_TH_62 0x03
-
-/* current is mA */
-#define USB_0P1A 100
-#define USB_0P2A 200
-#define USB_0P3A 300
-#define USB_0P4A 400
-#define USB_0P5A 500
-
-#define LOW_BAT_3P1V 0x20
-#define LOW_BAT_2P3V 0x00
-#define LOW_BAT_RESET 0x01
-#define LOW_BAT_ENABLE 0x01
-
-/* Backup battery constants */
-#define BUP_ICH_SEL_50UA 0x00
-#define BUP_ICH_SEL_150UA 0x04
-#define BUP_ICH_SEL_300UA 0x08
-#define BUP_ICH_SEL_700UA 0x0C
-
-enum bup_vch_sel {
- BUP_VCH_SEL_2P5V,
- BUP_VCH_SEL_2P6V,
- BUP_VCH_SEL_2P8V,
- BUP_VCH_SEL_3P1V,
- /*
- * Note that the following 5 values 2.7v, 2.9v, 3.0v, 3.2v, 3.3v
- * are only available on ab8540. You can't choose these 5
- * voltage on ab8500/ab8505/ab9540.
- */
- BUP_VCH_SEL_2P7V,
- BUP_VCH_SEL_2P9V,
- BUP_VCH_SEL_3P0V,
- BUP_VCH_SEL_3P2V,
- BUP_VCH_SEL_3P3V,
-};
-
-#define BUP_VCH_RANGE 0x02
-#define VBUP33_VRTCN 0x01
-
-/* Battery OVV constants */
-#define BATT_OVV_ENA 0x02
-#define BATT_OVV_TH_3P7 0x00
-#define BATT_OVV_TH_4P75 0x01
-
-/* A value to indicate over voltage */
-#define BATT_OVV_VALUE 4750
-
-/* VBUS OVV constants */
-#define VBUS_OVV_SELECT_MASK 0x78
-#define VBUS_OVV_SELECT_5P6V 0x00
-#define VBUS_OVV_SELECT_5P7V 0x08
-#define VBUS_OVV_SELECT_5P8V 0x10
-#define VBUS_OVV_SELECT_5P9V 0x18
-#define VBUS_OVV_SELECT_6P0V 0x20
-#define VBUS_OVV_SELECT_6P1V 0x28
-#define VBUS_OVV_SELECT_6P2V 0x30
-#define VBUS_OVV_SELECT_6P3V 0x38
-
-#define VBUS_AUTO_IN_CURR_LIM_ENA 0x04
-
-/* Fuel Gauge constants */
-#define RESET_ACCU 0x02
-#define READ_REQ 0x01
-#define CC_DEEP_SLEEP_ENA 0x02
-#define CC_PWR_UP_ENA 0x01
-#define CC_SAMPLES_40 0x28
-#define RD_NCONV_ACCU_REQ 0x01
-#define CC_CALIB 0x08
-#define CC_INTAVGOFFSET_ENA 0x10
-#define CC_MUXOFFSET 0x80
-#define CC_INT_CAL_N_AVG_MASK 0x60
-#define CC_INT_CAL_SAMPLES_16 0x40
-#define CC_INT_CAL_SAMPLES_8 0x20
-#define CC_INT_CAL_SAMPLES_4 0x00
-
-/* RTC constants */
-#define RTC_BUP_CH_ENA 0x10
-
-/* BatCtrl Current Source Constants */
-#define BAT_CTRL_7U_ENA 0x01
-#define BAT_CTRL_20U_ENA 0x02
-#define BAT_CTRL_18U_ENA 0x01
-#define BAT_CTRL_16U_ENA 0x02
-#define BAT_CTRL_60U_ENA 0x01
-#define BAT_CTRL_120U_ENA 0x02
-#define BAT_CTRL_CMP_ENA 0x04
-#define FORCE_BAT_CTRL_CMP_HIGH 0x08
-#define BAT_CTRL_PULL_UP_ENA 0x10
-
-/* Battery type */
-#define BATTERY_UNKNOWN 00
-
-/* Registers for pcut feature in ab8505 and ab9540 */
-#define AB8505_RTC_PCUT_CTL_STATUS_REG 0x12
-#define AB8505_RTC_PCUT_TIME_REG 0x13
-#define AB8505_RTC_PCUT_MAX_TIME_REG 0x14
-#define AB8505_RTC_PCUT_FLAG_TIME_REG 0x15
-#define AB8505_RTC_PCUT_RESTART_REG 0x16
-#define AB8505_RTC_PCUT_DEBOUNCE_REG 0x17
-
-/* USB Power Path constants for ab8540 */
-#define BUS_VSYS_VOL_SELECT_MASK 0x06
-#define BUS_VSYS_VOL_SELECT_3P6V 0x00
-#define BUS_VSYS_VOL_SELECT_3P325V 0x02
-#define BUS_VSYS_VOL_SELECT_3P9V 0x04
-#define BUS_VSYS_VOL_SELECT_4P3V 0x06
-#define BUS_POWER_PATH_MODE_ENA 0x01
-#define BUS_PP_PRECHG_CURRENT_MASK 0x0E
-#define BUS_POWER_PATH_PRECHG_ENA 0x01
-
-/**
- * struct res_to_temp - defines one point in a temp to res curve. To
- * be used in battery packs that combines the identification resistor with a
- * NTC resistor.
- * @temp: battery pack temperature in Celsius
- * @resist: NTC resistor net total resistance
- */
-struct res_to_temp {
- int temp;
- int resist;
-};
-
-/**
- * struct batres_vs_temp - defines one point in a temp vs battery internal
- * resistance curve.
- * @temp: battery pack temperature in Celsius
- * @resist: battery internal reistance in mOhm
- */
-struct batres_vs_temp {
- int temp;
- int resist;
-};
-
-/* Forward declaration */
-struct ab8500_fg;
-
-/**
- * struct ab8500_fg_parameters - Fuel gauge algorithm parameters, in seconds
- * if not specified
- * @recovery_sleep_timer: Time between measurements while recovering
- * @recovery_total_time: Total recovery time
- * @init_timer: Measurement interval during startup
- * @init_discard_time: Time we discard voltage measurement at startup
- * @init_total_time: Total init time during startup
- * @high_curr_time: Time current has to be high to go to recovery
- * @accu_charging: FG accumulation time while charging
- * @accu_high_curr: FG accumulation time in high current mode
- * @high_curr_threshold: High current threshold, in mA
- * @lowbat_threshold: Low battery threshold, in mV
- * @battok_falling_th_sel0 Threshold in mV for battOk signal sel0
- * Resolution in 50 mV step.
- * @battok_raising_th_sel1 Threshold in mV for battOk signal sel1
- * Resolution in 50 mV step.
- * @user_cap_limit Capacity reported from user must be within this
- * limit to be considered as sane, in percentage
- * points.
- * @maint_thres This is the threshold where we stop reporting
- * battery full while in maintenance, in per cent
- * @pcut_enable: Enable power cut feature in ab8505
- * @pcut_max_time: Max time threshold
- * @pcut_flag_time: Flagtime threshold
- * @pcut_max_restart: Max number of restarts
- * @pcut_debunce_time: Sets battery debounce time
- */
-struct ab8500_fg_parameters {
- int recovery_sleep_timer;
- int recovery_total_time;
- int init_timer;
- int init_discard_time;
- int init_total_time;
- int high_curr_time;
- int accu_charging;
- int accu_high_curr;
- int high_curr_threshold;
- int lowbat_threshold;
- int battok_falling_th_sel0;
- int battok_raising_th_sel1;
- int user_cap_limit;
- int maint_thres;
- bool pcut_enable;
- u8 pcut_max_time;
- u8 pcut_flag_time;
- u8 pcut_max_restart;
- u8 pcut_debunce_time;
-};
-
-/**
- * struct ab8500_charger_maximization - struct used by the board config.
- * @use_maxi: Enable maximization for this battery type
- * @maxi_chg_curr: Maximum charger current allowed
- * @maxi_wait_cycles: cycles to wait before setting charger current
- * @charger_curr_step delta between two charger current settings (mA)
- */
-struct ab8500_maxim_parameters {
- bool ena_maxi;
- int chg_curr;
- int wait_cycles;
- int charger_curr_step;
-};
-
-/**
- * struct ab8500_bm_capacity_levels - ab8500 capacity level data
- * @critical: critical capacity level in percent
- * @low: low capacity level in percent
- * @normal: normal capacity level in percent
- * @high: high capacity level in percent
- * @full: full capacity level in percent
- */
-struct ab8500_bm_capacity_levels {
- int critical;
- int low;
- int normal;
- int high;
- int full;
-};
-
-/**
- * struct ab8500_bm_charger_parameters - Charger specific parameters
- * @usb_volt_max: maximum allowed USB charger voltage in mV
- * @usb_curr_max: maximum allowed USB charger current in mA
- * @ac_volt_max: maximum allowed AC charger voltage in mV
- * @ac_curr_max: maximum allowed AC charger current in mA
- */
-struct ab8500_bm_charger_parameters {
- int usb_volt_max;
- int usb_curr_max;
- int ac_volt_max;
- int ac_curr_max;
-};
-
-/**
- * struct ab8500_bm_data - ab8500 battery management data
- * @temp_under under this temp, charging is stopped
- * @temp_low between this temp and temp_under charging is reduced
- * @temp_high between this temp and temp_over charging is reduced
- * @temp_over over this temp, charging is stopped
- * @temp_interval_chg temperature measurement interval in s when charging
- * @temp_interval_nochg temperature measurement interval in s when not charging
- * @main_safety_tmr_h safety timer for main charger
- * @usb_safety_tmr_h safety timer for usb charger
- * @bkup_bat_v voltage which we charge the backup battery with
- * @bkup_bat_i current which we charge the backup battery with
- * @no_maintenance indicates that maintenance charging is disabled
- * @capacity_scaling indicates whether capacity scaling is to be used
- * @adc_therm placement of thermistor, batctrl or battemp adc
- * @chg_unknown_bat flag to enable charging of unknown batteries
- * @enable_overshoot flag to enable VBAT overshoot control
- * @fg_res resistance of FG resistor in 0.1mOhm
- * @n_btypes number of elements in array bat_type
- * @batt_id index of the identified battery in array bat_type
- * @interval_charging charge alg cycle period time when charging (sec)
- * @interval_not_charging charge alg cycle period time when not charging (sec)
- * @temp_hysteresis temperature hysteresis
- * @gnd_lift_resistance Battery ground to phone ground resistance (mOhm)
- * @maxi: maximization parameters
- * @cap_levels capacity in percent for the different capacity levels
- * @bat_type table of supported battery types
- * @chg_params charger parameters
- * @fg_params fuel gauge parameters
- */
-struct ab8500_bm_data {
- int temp_under;
- int temp_low;
- int temp_high;
- int temp_over;
- int temp_interval_chg;
- int temp_interval_nochg;
- int main_safety_tmr_h;
- int usb_safety_tmr_h;
- int bkup_bat_v;
- int bkup_bat_i;
- bool no_maintenance;
- bool capacity_scaling;
- bool chg_unknown_bat;
- bool enable_overshoot;
- enum abx500_adc_therm adc_therm;
- int fg_res;
- int n_btypes;
- int batt_id;
- int interval_charging;
- int interval_not_charging;
- int temp_hysteresis;
- int gnd_lift_resistance;
- const struct ab8500_maxim_parameters *maxi;
- const struct ab8500_bm_capacity_levels *cap_levels;
- const struct ab8500_bm_charger_parameters *chg_params;
- const struct ab8500_fg_parameters *fg_params;
-};
-
-struct ab8500_btemp;
-struct ab8500_gpadc;
-struct ab8500_fg;
-
-#ifdef CONFIG_AB8500_BM
-extern struct abx500_bm_data ab8500_bm_data;
-
-void ab8500_charger_usb_state_changed(u8 bm_usb_state, u16 mA);
-struct ab8500_btemp *ab8500_btemp_get(void);
-int ab8500_btemp_get_batctrl_temp(struct ab8500_btemp *btemp);
-int ab8500_btemp_get_temp(struct ab8500_btemp *btemp);
-struct ab8500_fg *ab8500_fg_get(void);
-int ab8500_fg_inst_curr_blocking(struct ab8500_fg *dev);
-int ab8500_fg_inst_curr_start(struct ab8500_fg *di);
-int ab8500_fg_inst_curr_finalize(struct ab8500_fg *di, int *res);
-int ab8500_fg_inst_curr_started(struct ab8500_fg *di);
-int ab8500_fg_inst_curr_done(struct ab8500_fg *di);
-
-#else
-static struct abx500_bm_data ab8500_bm_data;
-#endif
-#endif /* _AB8500_BM_H */
diff --git a/include/linux/mfd/abx500/ab8500-codec.h b/include/linux/mfd/abx500/ab8500-codec.h
index d7079413def0..c19f505122ac 100644
--- a/include/linux/mfd/abx500/ab8500-codec.h
+++ b/include/linux/mfd/abx500/ab8500-codec.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) ST-Ericsson SA 2012
*
@@ -5,10 +6,6 @@
* for ST-Ericsson.
*
* License terms:
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published
- * by the Free Software Foundation.
*/
#ifndef AB8500_CORE_CODEC_H
diff --git a/include/linux/mfd/abx500/ab8500-gpadc.h b/include/linux/mfd/abx500/ab8500-gpadc.h
deleted file mode 100644
index 49ded001049b..000000000000
--- a/include/linux/mfd/abx500/ab8500-gpadc.h
+++ /dev/null
@@ -1,75 +0,0 @@
-/*
- * Copyright (C) 2010 ST-Ericsson SA
- * Licensed under GPLv2.
- *
- * Author: Arun R Murthy <arun.murthy@stericsson.com>
- * Author: Daniel Willerud <daniel.willerud@stericsson.com>
- * Author: M'boumba Cedric Madianga <cedric.madianga@stericsson.com>
- */
-
-#ifndef _AB8500_GPADC_H
-#define _AB8500_GPADC_H
-
-/* GPADC source: From datasheet(ADCSwSel[4:0] in GPADCCtrl2
- * and ADCHwSel[4:0] in GPADCCtrl3 ) */
-#define BAT_CTRL 0x01
-#define BTEMP_BALL 0x02
-#define MAIN_CHARGER_V 0x03
-#define ACC_DETECT1 0x04
-#define ACC_DETECT2 0x05
-#define ADC_AUX1 0x06
-#define ADC_AUX2 0x07
-#define MAIN_BAT_V 0x08
-#define VBUS_V 0x09
-#define MAIN_CHARGER_C 0x0A
-#define USB_CHARGER_C 0x0B
-#define BK_BAT_V 0x0C
-#define DIE_TEMP 0x0D
-#define USB_ID 0x0E
-#define XTAL_TEMP 0x12
-#define VBAT_TRUE_MEAS 0x13
-#define BAT_CTRL_AND_IBAT 0x1C
-#define VBAT_MEAS_AND_IBAT 0x1D
-#define VBAT_TRUE_MEAS_AND_IBAT 0x1E
-#define BAT_TEMP_AND_IBAT 0x1F
-
-/* Virtual channel used only for ibat convertion to ampere
- * Battery current conversion (ibat) cannot be requested as a single conversion
- * but it is always in combination with other input requests
- */
-#define IBAT_VIRTUAL_CHANNEL 0xFF
-
-#define SAMPLE_1 1
-#define SAMPLE_4 4
-#define SAMPLE_8 8
-#define SAMPLE_16 16
-#define RISING_EDGE 0
-#define FALLING_EDGE 1
-
-/* Arbitrary ADC conversion type constants */
-#define ADC_SW 0
-#define ADC_HW 1
-
-struct ab8500_gpadc;
-
-struct ab8500_gpadc *ab8500_gpadc_get(char *name);
-int ab8500_gpadc_sw_hw_convert(struct ab8500_gpadc *gpadc, u8 channel,
- u8 avg_sample, u8 trig_edge, u8 trig_timer, u8 conv_type);
-static inline int ab8500_gpadc_convert(struct ab8500_gpadc *gpadc, u8 channel)
-{
- return ab8500_gpadc_sw_hw_convert(gpadc, channel,
- SAMPLE_16, 0, 0, ADC_SW);
-}
-
-int ab8500_gpadc_read_raw(struct ab8500_gpadc *gpadc, u8 channel,
- u8 avg_sample, u8 trig_edge, u8 trig_timer, u8 conv_type);
-int ab8500_gpadc_double_read_raw(struct ab8500_gpadc *gpadc, u8 channel,
- u8 avg_sample, u8 trig_edge, u8 trig_timer, u8 conv_type,
- int *ibat);
-int ab8500_gpadc_ad_to_voltage(struct ab8500_gpadc *gpadc,
- u8 channel, int ad_value);
-void ab8540_gpadc_get_otp(struct ab8500_gpadc *gpadc,
- u16 *vmain_l, u16 *vmain_h, u16 *btemp_l, u16 *btemp_h,
- u16 *vbat_l, u16 *vbat_h, u16 *ibat_l, u16 *ibat_h);
-
-#endif /* _AB8500_GPADC_H */
diff --git a/include/linux/mfd/abx500/ab8500-sysctrl.h b/include/linux/mfd/abx500/ab8500-sysctrl.h
index 01024d1aed0e..825f6059d4e3 100644
--- a/include/linux/mfd/abx500/ab8500-sysctrl.h
+++ b/include/linux/mfd/abx500/ab8500-sysctrl.h
@@ -1,7 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) ST-Ericsson SA 2010
* Author: Mattias Nilsson <mattias.i.nilsson@stericsson.com> for ST Ericsson.
- * License terms: GNU General Public License (GPL) version 2
*/
#ifndef __AB8500_SYSCTRL_H
#define __AB8500_SYSCTRL_H
diff --git a/include/linux/mfd/abx500/ab8500.h b/include/linux/mfd/abx500/ab8500.h
index d33c245e75ca..76d326ea8eba 100644
--- a/include/linux/mfd/abx500/ab8500.h
+++ b/include/linux/mfd/abx500/ab8500.h
@@ -1,7 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) ST-Ericsson SA 2010
*
- * License Terms: GNU General Public License v2
* Author: Srinidhi Kasagar <srinidhi.kasagar@stericsson.com>
*/
#ifndef MFD_AB8500_H
@@ -368,7 +368,6 @@ struct ab8500 {
int it_latchhier_num;
};
-struct ab8500_regulator_platform_data;
struct ab8500_codec_platform_data;
struct ab8500_sysctrl_platform_data;
@@ -376,19 +375,13 @@ struct ab8500_sysctrl_platform_data;
* struct ab8500_platform_data - AB8500 platform data
* @irq_base: start of AB8500 IRQs, AB8500_NR_IRQS will be used
* @init: board-specific initialization after detection of ab8500
- * @regulator: machine-specific constraints for regulators
*/
struct ab8500_platform_data {
void (*init) (struct ab8500 *);
- struct ab8500_regulator_platform_data *regulator;
struct ab8500_codec_platform_data *codec;
struct ab8500_sysctrl_platform_data *sysctrl;
};
-extern int ab8500_init(struct ab8500 *ab8500,
- enum ab8500_version version);
-extern int ab8500_exit(struct ab8500 *ab8500);
-
extern int ab8500_suspend(struct ab8500 *ab8500);
static inline int is_ab8500(struct ab8500 *ab)
@@ -506,13 +499,7 @@ static inline int is_ab9540_2p0_or_earlier(struct ab8500 *ab)
void ab8500_override_turn_on_stat(u8 mask, u8 set);
-#ifdef CONFIG_AB8500_DEBUG
-extern int prcmu_abb_read(u8 slave, u8 reg, u8 *value, u8 size);
-void ab8500_dump_all_banks(struct device *dev);
-void ab8500_debug_register_interrupt(int line);
-#else
static inline void ab8500_dump_all_banks(struct device *dev) {}
static inline void ab8500_debug_register_interrupt(int line) {}
-#endif
#endif /* MFD_AB8500_H */
diff --git a/include/linux/mfd/abx500/ux500_chargalg.h b/include/linux/mfd/abx500/ux500_chargalg.h
deleted file mode 100644
index 67703f23e7ba..000000000000
--- a/include/linux/mfd/abx500/ux500_chargalg.h
+++ /dev/null
@@ -1,55 +0,0 @@
-/*
- * Copyright (C) ST-Ericsson SA 2012
- * Author: Johan Gardsmark <johan.gardsmark@stericsson.com> for ST-Ericsson.
- * License terms: GNU General Public License (GPL), version 2
- */
-
-#ifndef _UX500_CHARGALG_H
-#define _UX500_CHARGALG_H
-
-#include <linux/power_supply.h>
-
-/*
- * Valid only for supplies of type:
- * - POWER_SUPPLY_TYPE_MAINS,
- * - POWER_SUPPLY_TYPE_USB,
- * because only them store as drv_data pointer to struct ux500_charger.
- */
-#define psy_to_ux500_charger(x) power_supply_get_drvdata(psy)
-
-/* Forward declaration */
-struct ux500_charger;
-
-struct ux500_charger_ops {
- int (*enable) (struct ux500_charger *, int, int, int);
- int (*check_enable) (struct ux500_charger *, int, int);
- int (*kick_wd) (struct ux500_charger *);
- int (*update_curr) (struct ux500_charger *, int);
- int (*pp_enable) (struct ux500_charger *, bool);
- int (*pre_chg_enable) (struct ux500_charger *, bool);
-};
-
-/**
- * struct ux500_charger - power supply ux500 charger sub class
- * @psy power supply base class
- * @ops ux500 charger operations
- * @max_out_volt maximum output charger voltage in mV
- * @max_out_curr maximum output charger current in mA
- * @enabled indicates if this charger is used or not
- * @external external charger unit (pm2xxx)
- * @power_path USB power path support
- */
-struct ux500_charger {
- struct power_supply *psy;
- struct ux500_charger_ops ops;
- int max_out_volt;
- int max_out_curr;
- int wdt_refresh;
- bool enabled;
- bool external;
- bool power_path;
-};
-
-extern struct blocking_notifier_head charger_notifier_list;
-
-#endif
diff --git a/include/linux/mfd/ac100.h b/include/linux/mfd/ac100.h
index 3c148f196b9f..88005c3a1b2d 100644
--- a/include/linux/mfd/ac100.h
+++ b/include/linux/mfd/ac100.h
@@ -1,13 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Functions and registers to access AC100 codec / RTC combo IC.
*
* Copyright (C) 2016 Chen-Yu Tsai
*
* Chen-Yu Tsai <wens@csie.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef __LINUX_MFD_AC100_H
diff --git a/include/linux/mfd/adp5520.h b/include/linux/mfd/adp5520.h
index ac37558a4673..9a14f80ec4ad 100644
--- a/include/linux/mfd/adp5520.h
+++ b/include/linux/mfd/adp5520.h
@@ -1,10 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* Definitions and platform data for Analog Devices
* ADP5520/ADP5501 MFD PMICs (Backlight, LED, GPIO and Keys)
*
* Copyright 2009 Analog Devices Inc.
- *
- * Licensed under the GPL-2 or later.
*/
diff --git a/include/linux/mfd/adp5585.h b/include/linux/mfd/adp5585.h
new file mode 100644
index 000000000000..5237da6b4a9f
--- /dev/null
+++ b/include/linux/mfd/adp5585.h
@@ -0,0 +1,226 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Analog Devices ADP5585 I/O expander, PWM controller and keypad controller
+ *
+ * Copyright 2022 NXP
+ * Copyright 2024 Ideas on Board Oy
+ */
+
+#ifndef __MFD_ADP5585_H_
+#define __MFD_ADP5585_H_
+
+#include <linux/bits.h>
+#include <linux/notifier.h>
+
+#define ADP5585_ID 0x00
+#define ADP5585_MAN_ID_VALUE 0x20
+#define ADP5585_MAN_ID_MASK GENMASK(7, 4)
+#define ADP5585_REV_ID_MASK GENMASK(3, 0)
+#define ADP5585_INT_STATUS 0x01
+#define ADP5585_OVRFLOW_INT BIT(2)
+#define ADP5585_EVENT_INT BIT(0)
+#define ADP5585_STATUS 0x02
+#define ADP5585_EC_MASK GENMASK(4, 0)
+#define ADP5585_FIFO_1 0x03
+#define ADP5585_KEV_EV_PRESS_MASK BIT(7)
+#define ADP5585_KEY_EVENT_MASK GENMASK(6, 0)
+#define ADP5585_FIFO_2 0x04
+#define ADP5585_FIFO_3 0x05
+#define ADP5585_FIFO_4 0x06
+#define ADP5585_FIFO_5 0x07
+#define ADP5585_FIFO_6 0x08
+#define ADP5585_FIFO_7 0x09
+#define ADP5585_FIFO_8 0x0a
+#define ADP5585_FIFO_9 0x0b
+#define ADP5585_FIFO_10 0x0c
+#define ADP5585_FIFO_11 0x0d
+#define ADP5585_FIFO_12 0x0e
+#define ADP5585_FIFO_13 0x0f
+#define ADP5585_FIFO_14 0x10
+#define ADP5585_FIFO_15 0x11
+#define ADP5585_FIFO_16 0x12
+#define ADP5585_EV_MAX (ADP5585_FIFO_16 - ADP5585_FIFO_1 + 1)
+#define ADP5585_GPI_INT_STAT_A 0x13
+#define ADP5585_GPI_INT_STAT_B 0x14
+#define ADP5585_GPI_STATUS_A 0x15
+#define ADP5585_GPI_STATUS_B 0x16
+#define ADP5585_RPULL_CONFIG_A 0x17
+#define ADP5585_RPULL_CONFIG_B 0x18
+#define ADP5585_RPULL_CONFIG_C 0x19
+#define ADP5585_RPULL_CONFIG_D 0x1a
+#define ADP5585_Rx_PULL_CFG_PU_300K 0
+#define ADP5585_Rx_PULL_CFG_PD_300K 1
+#define ADP5585_Rx_PULL_CFG_PU_100K 2
+#define ADP5585_Rx_PULL_CFG_DISABLE 3
+#define ADP5585_Rx_PULL_CFG_MASK 3
+#define ADP5585_GPI_INT_LEVEL_A 0x1b
+#define ADP5585_GPI_INT_LEVEL_B 0x1c
+#define ADP5585_GPI_EVENT_EN_A 0x1d
+#define ADP5585_GPI_EVENT_EN_B 0x1e
+#define ADP5585_GPI_INTERRUPT_EN_A 0x1f
+#define ADP5585_GPI_INTERRUPT_EN_B 0x20
+#define ADP5585_DEBOUNCE_DIS_A 0x21
+#define ADP5585_DEBOUNCE_DIS_B 0x22
+#define ADP5585_GPO_DATA_OUT_A 0x23
+#define ADP5585_GPO_DATA_OUT_B 0x24
+#define ADP5585_GPO_OUT_MODE_A 0x25
+#define ADP5585_GPO_OUT_MODE_B 0x26
+#define ADP5585_GPIO_DIRECTION_A 0x27
+#define ADP5585_GPIO_DIRECTION_B 0x28
+#define ADP5585_RESET1_EVENT_A 0x29
+#define ADP5585_RESET_EV_PRESS BIT(7)
+#define ADP5585_RESET1_EVENT_B 0x2a
+#define ADP5585_RESET1_EVENT_C 0x2b
+#define ADP5585_RESET2_EVENT_A 0x2c
+#define ADP5585_RESET2_EVENT_B 0x2d
+#define ADP5585_RESET_CFG 0x2e
+#define ADP5585_PWM_OFFT_LOW 0x2f
+#define ADP5585_PWM_OFFT_HIGH 0x30
+#define ADP5585_PWM_ONT_LOW 0x31
+#define ADP5585_PWM_ONT_HIGH 0x32
+#define ADP5585_PWM_CFG 0x33
+#define ADP5585_PWM_IN_AND BIT(2)
+#define ADP5585_PWM_MODE BIT(1)
+#define ADP5585_PWM_EN BIT(0)
+#define ADP5585_LOGIC_CFG 0x34
+#define ADP5585_LOGIC_FF_CFG 0x35
+#define ADP5585_LOGIC_INT_EVENT_EN 0x36
+#define ADP5585_POLL_PTIME_CFG 0x37
+#define ADP5585_PIN_CONFIG_A 0x38
+#define ADP5585_PIN_CONFIG_B 0x39
+#define ADP5585_PIN_CONFIG_C 0x3a
+#define ADP5585_PULL_SELECT BIT(7)
+#define ADP5585_C4_EXTEND_CFG_GPIO11 (0U << 6)
+#define ADP5585_C4_EXTEND_CFG_RESET2 (1U << 6)
+#define ADP5585_C4_EXTEND_CFG_MASK GENMASK(6, 6)
+#define ADP5585_R4_EXTEND_CFG_GPIO5 (0U << 5)
+#define ADP5585_R4_EXTEND_CFG_RESET1 (1U << 5)
+#define ADP5585_R4_EXTEND_CFG_MASK GENMASK(5, 5)
+#define ADP5585_R3_EXTEND_CFG_GPIO4 (0U << 2)
+#define ADP5585_R3_EXTEND_CFG_LC (1U << 2)
+#define ADP5585_R3_EXTEND_CFG_PWM_OUT (2U << 2)
+#define ADP5585_R3_EXTEND_CFG_MASK GENMASK(3, 2)
+#define ADP5585_R0_EXTEND_CFG_GPIO1 (0U << 0)
+#define ADP5585_R0_EXTEND_CFG_LY (1U << 0)
+#define ADP5585_R0_EXTEND_CFG_MASK GENMASK(0, 0)
+#define ADP5585_GENERAL_CFG 0x3b
+#define ADP5585_OSC_EN BIT(7)
+#define ADP5585_OSC_FREQ_50KHZ (0U << 5)
+#define ADP5585_OSC_FREQ_100KHZ (1U << 5)
+#define ADP5585_OSC_FREQ_200KHZ (2U << 5)
+#define ADP5585_OSC_FREQ_500KHZ (3U << 5)
+#define ADP5585_OSC_FREQ_MASK GENMASK(6, 5)
+#define ADP5585_INT_CFG BIT(1)
+#define ADP5585_RST_CFG BIT(0)
+#define ADP5585_INT_EN 0x3c
+#define ADP5585_OVRFLOW_IEN BIT(2)
+#define ADP5585_EVENT_IEN BIT(0)
+
+#define ADP5585_MAX_REG ADP5585_INT_EN
+
+#define ADP5585_PIN_MAX 11
+#define ADP5585_MAX_UNLOCK_TIME_SEC 7
+#define ADP5585_KEY_EVENT_START 1
+#define ADP5585_KEY_EVENT_END 25
+#define ADP5585_GPI_EVENT_START 37
+#define ADP5585_GPI_EVENT_END 47
+#define ADP5585_ROW5_KEY_EVENT_START 1
+#define ADP5585_ROW5_KEY_EVENT_END 30
+#define ADP5585_PWM_OUT 3
+#define ADP5585_RESET1_OUT 4
+#define ADP5585_RESET2_OUT 9
+#define ADP5585_ROW5 5
+
+/* ADP5589 */
+#define ADP5589_MAN_ID_VALUE 0x10
+#define ADP5589_GPI_STATUS_A 0x16
+#define ADP5589_GPI_STATUS_C 0x18
+#define ADP5589_RPULL_CONFIG_A 0x19
+#define ADP5589_GPI_INT_LEVEL_A 0x1e
+#define ADP5589_GPI_EVENT_EN_A 0x21
+#define ADP5589_DEBOUNCE_DIS_A 0x27
+#define ADP5589_GPO_DATA_OUT_A 0x2a
+#define ADP5589_GPO_OUT_MODE_A 0x2d
+#define ADP5589_GPIO_DIRECTION_A 0x30
+#define ADP5589_UNLOCK1 0x33
+#define ADP5589_UNLOCK_EV_PRESS BIT(7)
+#define ADP5589_UNLOCK_TIMERS 0x36
+#define ADP5589_UNLOCK_TIMER GENMASK(2, 0)
+#define ADP5589_LOCK_CFG 0x37
+#define ADP5589_LOCK_EN BIT(0)
+#define ADP5589_RESET1_EVENT_A 0x38
+#define ADP5589_RESET2_EVENT_A 0x3B
+#define ADP5589_RESET_CFG 0x3D
+#define ADP5585_RESET2_POL BIT(7)
+#define ADP5585_RESET1_POL BIT(6)
+#define ADP5585_RST_PASSTHRU_EN BIT(5)
+#define ADP5585_RESET_TRIG_TIME GENMASK(4, 2)
+#define ADP5585_PULSE_WIDTH GENMASK(1, 0)
+#define ADP5589_PWM_OFFT_LOW 0x3e
+#define ADP5589_PWM_ONT_LOW 0x40
+#define ADP5589_PWM_CFG 0x42
+#define ADP5589_POLL_PTIME_CFG 0x48
+#define ADP5589_PIN_CONFIG_A 0x49
+#define ADP5589_PIN_CONFIG_D 0x4C
+#define ADP5589_GENERAL_CFG 0x4d
+#define ADP5589_INT_EN 0x4e
+#define ADP5589_MAX_REG ADP5589_INT_EN
+
+#define ADP5589_PIN_MAX 19
+#define ADP5589_KEY_EVENT_START 1
+#define ADP5589_KEY_EVENT_END 88
+#define ADP5589_GPI_EVENT_START 97
+#define ADP5589_GPI_EVENT_END 115
+#define ADP5589_UNLOCK_WILDCARD 127
+#define ADP5589_RESET2_OUT 12
+
+struct regmap;
+
+enum adp5585_variant {
+ ADP5585_00 = 1,
+ ADP5585_01,
+ ADP5585_02,
+ ADP5585_03,
+ ADP5585_04,
+ ADP5589_00,
+ ADP5589_01,
+ ADP5589_02,
+ ADP5585_MAX
+};
+
+struct adp5585_regs {
+ unsigned int gen_cfg;
+ unsigned int ext_cfg;
+ unsigned int int_en;
+ unsigned int poll_ptime_cfg;
+ unsigned int reset_cfg;
+ unsigned int reset1_event_a;
+ unsigned int reset2_event_a;
+ unsigned int pin_cfg_a;
+};
+
+struct adp5585_dev {
+ struct device *dev;
+ struct regmap *regmap;
+ const struct adp5585_regs *regs;
+ struct blocking_notifier_head event_notifier;
+ unsigned long *pin_usage;
+ unsigned int n_pins;
+ unsigned int reset2_out;
+ enum adp5585_variant variant;
+ unsigned int id;
+ bool has_unlock;
+ bool has_pin6;
+ int irq;
+ unsigned int ev_poll_time;
+ unsigned int unlock_time;
+ unsigned int unlock_keys[2];
+ unsigned int nkeys_unlock;
+ unsigned int reset1_keys[3];
+ unsigned int nkeys_reset1;
+ unsigned int reset2_keys[2];
+ unsigned int nkeys_reset2;
+ u8 reset_cfg;
+};
+
+#endif
diff --git a/include/linux/mfd/altera-a10sr.h b/include/linux/mfd/altera-a10sr.h
index 45a5e6e7db54..d616da4b3c4c 100644
--- a/include/linux/mfd/altera-a10sr.h
+++ b/include/linux/mfd/altera-a10sr.h
@@ -1,18 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright Intel Corporation (C) 2014-2016. All Rights Reserved
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program. If not, see <http://www.gnu.org/licenses/>.
- *
* Declarations for Altera Arria10 MAX5 System Resource Chip
*
* Adapted from DA9052
diff --git a/include/linux/mfd/altera-sysmgr.h b/include/linux/mfd/altera-sysmgr.h
new file mode 100644
index 000000000000..b1ef11a83872
--- /dev/null
+++ b/include/linux/mfd/altera-sysmgr.h
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2018-2019 Intel Corporation
+ * Copyright (C) 2012 Freescale Semiconductor, Inc.
+ * Copyright (C) 2012 Linaro Ltd.
+ */
+
+#ifndef __LINUX_MFD_ALTERA_SYSMGR_H__
+#define __LINUX_MFD_ALTERA_SYSMGR_H__
+
+#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/firmware/intel/stratix10-smc.h>
+
+struct device_node;
+
+#ifdef CONFIG_MFD_ALTERA_SYSMGR
+struct regmap *altr_sysmgr_regmap_lookup_by_phandle(struct device_node *np,
+ const char *property);
+#else
+static inline struct regmap *
+altr_sysmgr_regmap_lookup_by_phandle(struct device_node *np,
+ const char *property)
+{
+ return ERR_PTR(-ENOTSUPP);
+}
+#endif
+
+#endif /* __LINUX_MFD_ALTERA_SYSMGR_H__ */
diff --git a/include/linux/mfd/arizona/core.h b/include/linux/mfd/arizona/core.h
index b31b3be7f8c9..6d6f96b2b29f 100644
--- a/include/linux/mfd/arizona/core.h
+++ b/include/linux/mfd/arizona/core.h
@@ -1,13 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Arizona MFD internals
*
* Copyright 2012 Wolfson Microelectronics plc
*
* Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef _WM_ARIZONA_CORE_H
diff --git a/include/linux/mfd/arizona/pdata.h b/include/linux/mfd/arizona/pdata.h
index bfeecf179895..f72e6d4b14a7 100644
--- a/include/linux/mfd/arizona/pdata.h
+++ b/include/linux/mfd/arizona/pdata.h
@@ -1,11 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Platform data for Arizona devices
*
* Copyright 2012 Wolfson Microelectronics. PLC.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef _ARIZONA_PDATA_H
@@ -56,6 +53,7 @@
#define ARIZONA_MAX_PDM_SPK 2
struct regulator_init_data;
+struct gpio_desc;
struct arizona_micbias {
int mV; /** Regulated voltage */
@@ -77,7 +75,7 @@ struct arizona_micd_range {
};
struct arizona_pdata {
- int reset; /** GPIO controlling /RESET, if any */
+ struct gpio_desc *reset; /** GPIO controlling /RESET, if any */
/** Regulator configuration for MICVDD */
struct arizona_micsupp_pdata micvdd;
@@ -119,8 +117,10 @@ struct arizona_pdata {
/** Check for line output with HPDET method */
bool hpdet_acc_id_line;
+#ifdef CONFIG_GPIOLIB_LEGACY
/** GPIO used for mic isolation with HPDET */
int hpdet_id_gpio;
+#endif
/** Channel to use for headphone detection */
unsigned int hpdet_channel;
@@ -131,8 +131,10 @@ struct arizona_pdata {
/** Extra debounce timeout used during initial mic detection (ms) */
unsigned int micd_detect_debounce;
+#ifdef CONFIG_GPIOLIB_LEGACY
/** GPIO for mic detection polarity */
int micd_pol_gpio;
+#endif
/** Mic detect ramp rate */
unsigned int micd_bias_start_time;
@@ -174,6 +176,9 @@ struct arizona_pdata {
/** Mode for outputs */
int out_mono[ARIZONA_MAX_OUTPUT];
+ /** Limit output volumes */
+ unsigned int out_vol_limit[2 * ARIZONA_MAX_OUTPUT];
+
/** PDM speaker mute setting */
unsigned int spk_mute[ARIZONA_MAX_PDM_SPK];
@@ -183,8 +188,10 @@ struct arizona_pdata {
/** Haptic actuator type */
unsigned int hap_act;
+#ifdef CONFIG_GPIOLIB_LEGACY
/** GPIO for primary IRQ (used for edge triggered emulation) */
int irq_gpio;
+#endif
/** General purpose switch control */
unsigned int gpsw;
diff --git a/include/linux/mfd/arizona/registers.h b/include/linux/mfd/arizona/registers.h
index 0d06c5d0af93..49e24d1de8d4 100644
--- a/include/linux/mfd/arizona/registers.h
+++ b/include/linux/mfd/arizona/registers.h
@@ -1,13 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* ARIZONA register definitions
*
* Copyright 2012 Wolfson Microelectronics plc
*
* Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef _ARIZONA_REGISTERS_H
@@ -1189,13 +1186,6 @@
#define ARIZONA_DSP4_SCRATCH_1 0x1441
#define ARIZONA_DSP4_SCRATCH_2 0x1442
#define ARIZONA_DSP4_SCRATCH_3 0x1443
-#define ARIZONA_FRF_COEFF_1 0x1700
-#define ARIZONA_FRF_COEFF_2 0x1701
-#define ARIZONA_FRF_COEFF_3 0x1702
-#define ARIZONA_FRF_COEFF_4 0x1703
-#define ARIZONA_V2_DAC_COMP_1 0x1704
-#define ARIZONA_V2_DAC_COMP_2 0x1705
-
/*
* Field Definitions.
diff --git a/include/linux/mfd/as3711.h b/include/linux/mfd/as3711.h
index 34cc85864be5..4be16b4d2c8a 100644
--- a/include/linux/mfd/as3711.h
+++ b/include/linux/mfd/as3711.h
@@ -1,12 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* AS3711 PMIC MFC driver header
*
* Copyright (C) 2012 Renesas Electronics Corporation
* Author: Guennadi Liakhovetski, <g.liakhovetski@gmx.de>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the version 2 of the GNU General Public License as
- * published by the Free Software Foundation
*/
#ifndef MFD_AS3711_H
@@ -108,9 +105,9 @@ struct as3711_regulator_pdata {
};
struct as3711_bl_pdata {
- const char *su1_fb;
+ bool su1_fb;
int su1_max_uA;
- const char *su2_fb;
+ bool su2_fb;
int su2_max_uA;
enum as3711_su2_feedback su2_feedback;
enum as3711_su2_fbprot su2_fbprot;
diff --git a/include/linux/mfd/as3722.h b/include/linux/mfd/as3722.h
index 51e6f9414575..5162dfc7c24b 100644
--- a/include/linux/mfd/as3722.h
+++ b/include/linux/mfd/as3722.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* as3722 definitions
*
@@ -6,21 +7,6 @@
*
* Author: Florian Lobmaier <florian.lobmaier@ams.com>
* Author: Laxman Dewangan <ldewangan@nvidia.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
*/
#ifndef __LINUX_MFD_AS3722_H__
@@ -296,6 +282,8 @@
#define AS3722_ADC1_CONV_NOTREADY BIT(7)
#define AS3722_ADC1_SOURCE_SELECT_MASK 0x1F
+#define AS3722_CTRL_SEQU1_AC_OK_PWR_ON BIT(0)
+
/* GPIO modes */
#define AS3722_GPIO_MODE_MASK 0x07
#define AS3722_GPIO_MODE_INPUT 0x00
@@ -391,6 +379,7 @@ struct as3722 {
unsigned long irq_flags;
bool en_intern_int_pullup;
bool en_intern_i2c_pullup;
+ bool en_ac_ok_pwr_on;
struct regmap_irq_chip_data *irq_data;
};
diff --git a/include/linux/mfd/asic3.h b/include/linux/mfd/asic3.h
deleted file mode 100644
index e1148d037e7b..000000000000
--- a/include/linux/mfd/asic3.h
+++ /dev/null
@@ -1,316 +0,0 @@
-/*
- * include/linux/mfd/asic3.h
- *
- * Compaq ASIC3 headers.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * Copyright 2001 Compaq Computer Corporation.
- * Copyright 2007-2008 OpenedHand Ltd.
- */
-
-#ifndef __ASIC3_H__
-#define __ASIC3_H__
-
-#include <linux/types.h>
-
-struct led_classdev;
-struct asic3_led {
- const char *name;
- const char *default_trigger;
- struct led_classdev *cdev;
-};
-
-struct asic3_platform_data {
- u16 *gpio_config;
- unsigned int gpio_config_num;
-
- unsigned int irq_base;
-
- unsigned int gpio_base;
-
- unsigned int clock_rate;
-
- struct asic3_led *leds;
-};
-
-#define ASIC3_NUM_GPIO_BANKS 4
-#define ASIC3_GPIOS_PER_BANK 16
-#define ASIC3_NUM_GPIOS 64
-#define ASIC3_NR_IRQS ASIC3_NUM_GPIOS + 6
-
-#define ASIC3_IRQ_LED0 64
-#define ASIC3_IRQ_LED1 65
-#define ASIC3_IRQ_LED2 66
-#define ASIC3_IRQ_SPI 67
-#define ASIC3_IRQ_SMBUS 68
-#define ASIC3_IRQ_OWM 69
-
-#define ASIC3_TO_GPIO(gpio) (NR_BUILTIN_GPIO + (gpio))
-
-#define ASIC3_GPIO_BANK_A 0
-#define ASIC3_GPIO_BANK_B 1
-#define ASIC3_GPIO_BANK_C 2
-#define ASIC3_GPIO_BANK_D 3
-
-#define ASIC3_GPIO(bank, gpio) \
- ((ASIC3_GPIOS_PER_BANK * ASIC3_GPIO_BANK_##bank) + (gpio))
-#define ASIC3_GPIO_bit(gpio) (1 << (gpio & 0xf))
-/* All offsets below are specified with this address bus shift */
-#define ASIC3_DEFAULT_ADDR_SHIFT 2
-
-#define ASIC3_OFFSET(base, reg) (ASIC3_##base##_BASE + ASIC3_##base##_##reg)
-#define ASIC3_GPIO_OFFSET(base, reg) \
- (ASIC3_GPIO_##base##_BASE + ASIC3_GPIO_##reg)
-
-#define ASIC3_GPIO_A_BASE 0x0000
-#define ASIC3_GPIO_B_BASE 0x0100
-#define ASIC3_GPIO_C_BASE 0x0200
-#define ASIC3_GPIO_D_BASE 0x0300
-
-#define ASIC3_GPIO_TO_BANK(gpio) ((gpio) >> 4)
-#define ASIC3_GPIO_TO_BIT(gpio) ((gpio) - \
- (ASIC3_GPIOS_PER_BANK * ((gpio) >> 4)))
-#define ASIC3_GPIO_TO_MASK(gpio) (1 << ASIC3_GPIO_TO_BIT(gpio))
-#define ASIC3_GPIO_TO_BASE(gpio) (ASIC3_GPIO_A_BASE + (((gpio) >> 4) * 0x0100))
-#define ASIC3_BANK_TO_BASE(bank) (ASIC3_GPIO_A_BASE + ((bank) * 0x100))
-
-#define ASIC3_GPIO_MASK 0x00 /* R/W 0:don't mask */
-#define ASIC3_GPIO_DIRECTION 0x04 /* R/W 0:input */
-#define ASIC3_GPIO_OUT 0x08 /* R/W 0:output low */
-#define ASIC3_GPIO_TRIGGER_TYPE 0x0c /* R/W 0:level */
-#define ASIC3_GPIO_EDGE_TRIGGER 0x10 /* R/W 0:falling */
-#define ASIC3_GPIO_LEVEL_TRIGGER 0x14 /* R/W 0:low level detect */
-#define ASIC3_GPIO_SLEEP_MASK 0x18 /* R/W 0:don't mask in sleep mode */
-#define ASIC3_GPIO_SLEEP_OUT 0x1c /* R/W level 0:low in sleep mode */
-#define ASIC3_GPIO_BAT_FAULT_OUT 0x20 /* R/W level 0:low in batt_fault */
-#define ASIC3_GPIO_INT_STATUS 0x24 /* R/W 0:none, 1:detect */
-#define ASIC3_GPIO_ALT_FUNCTION 0x28 /* R/W 1:LED register control */
-#define ASIC3_GPIO_SLEEP_CONF 0x2c /*
- * R/W bit 1: autosleep
- * 0: disable gposlpout in normal mode,
- * enable gposlpout in sleep mode.
- */
-#define ASIC3_GPIO_STATUS 0x30 /* R Pin status */
-
-/*
- * ASIC3 GPIO config
- *
- * Bits 0..6 gpio number
- * Bits 7..13 Alternate function
- * Bit 14 Direction
- * Bit 15 Initial value
- *
- */
-#define ASIC3_CONFIG_GPIO_PIN(config) ((config) & 0x7f)
-#define ASIC3_CONFIG_GPIO_ALT(config) (((config) & (0x7f << 7)) >> 7)
-#define ASIC3_CONFIG_GPIO_DIR(config) ((config & (1 << 14)) >> 14)
-#define ASIC3_CONFIG_GPIO_INIT(config) ((config & (1 << 15)) >> 15)
-#define ASIC3_CONFIG_GPIO(gpio, alt, dir, init) (((gpio) & 0x7f) \
- | (((alt) & 0x7f) << 7) | (((dir) & 0x1) << 14) \
- | (((init) & 0x1) << 15))
-#define ASIC3_CONFIG_GPIO_DEFAULT(gpio, dir, init) \
- ASIC3_CONFIG_GPIO((gpio), 0, (dir), (init))
-#define ASIC3_CONFIG_GPIO_DEFAULT_OUT(gpio, init) \
- ASIC3_CONFIG_GPIO((gpio), 0, 1, (init))
-
-/*
- * Alternate functions
- */
-#define ASIC3_GPIOA11_PWM0 ASIC3_CONFIG_GPIO(11, 1, 1, 0)
-#define ASIC3_GPIOA12_PWM1 ASIC3_CONFIG_GPIO(12, 1, 1, 0)
-#define ASIC3_GPIOA15_CONTROL_CX ASIC3_CONFIG_GPIO(15, 1, 1, 0)
-#define ASIC3_GPIOC0_LED0 ASIC3_CONFIG_GPIO(32, 1, 0, 0)
-#define ASIC3_GPIOC1_LED1 ASIC3_CONFIG_GPIO(33, 1, 0, 0)
-#define ASIC3_GPIOC2_LED2 ASIC3_CONFIG_GPIO(34, 1, 0, 0)
-#define ASIC3_GPIOC3_SPI_RXD ASIC3_CONFIG_GPIO(35, 1, 0, 0)
-#define ASIC3_GPIOC4_CF_nCD ASIC3_CONFIG_GPIO(36, 1, 0, 0)
-#define ASIC3_GPIOC4_SPI_TXD ASIC3_CONFIG_GPIO(36, 1, 1, 0)
-#define ASIC3_GPIOC5_SPI_CLK ASIC3_CONFIG_GPIO(37, 1, 1, 0)
-#define ASIC3_GPIOC5_nCIOW ASIC3_CONFIG_GPIO(37, 1, 1, 0)
-#define ASIC3_GPIOC6_nCIOR ASIC3_CONFIG_GPIO(38, 1, 1, 0)
-#define ASIC3_GPIOC7_nPCE_1 ASIC3_CONFIG_GPIO(39, 1, 0, 0)
-#define ASIC3_GPIOC8_nPCE_2 ASIC3_CONFIG_GPIO(40, 1, 0, 0)
-#define ASIC3_GPIOC9_nPOE ASIC3_CONFIG_GPIO(41, 1, 0, 0)
-#define ASIC3_GPIOC10_nPWE ASIC3_CONFIG_GPIO(42, 1, 0, 0)
-#define ASIC3_GPIOC11_PSKTSEL ASIC3_CONFIG_GPIO(43, 1, 0, 0)
-#define ASIC3_GPIOC12_nPREG ASIC3_CONFIG_GPIO(44, 1, 0, 0)
-#define ASIC3_GPIOC13_nPWAIT ASIC3_CONFIG_GPIO(45, 1, 1, 0)
-#define ASIC3_GPIOC14_nPIOIS16 ASIC3_CONFIG_GPIO(46, 1, 1, 0)
-#define ASIC3_GPIOC15_nPIOR ASIC3_CONFIG_GPIO(47, 1, 0, 0)
-#define ASIC3_GPIOD4_CF_nCD ASIC3_CONFIG_GPIO(52, 1, 0, 0)
-#define ASIC3_GPIOD11_nCIOIS16 ASIC3_CONFIG_GPIO(59, 1, 0, 0)
-#define ASIC3_GPIOD12_nCWAIT ASIC3_CONFIG_GPIO(60, 1, 0, 0)
-#define ASIC3_GPIOD15_nPIOW ASIC3_CONFIG_GPIO(63, 1, 0, 0)
-
-
-#define ASIC3_SPI_Base 0x0400
-#define ASIC3_SPI_Control 0x0000
-#define ASIC3_SPI_TxData 0x0004
-#define ASIC3_SPI_RxData 0x0008
-#define ASIC3_SPI_Int 0x000c
-#define ASIC3_SPI_Status 0x0010
-
-#define SPI_CONTROL_SPR(clk) ((clk) & 0x0f) /* Clock rate */
-
-#define ASIC3_PWM_0_Base 0x0500
-#define ASIC3_PWM_1_Base 0x0600
-#define ASIC3_PWM_TimeBase 0x0000
-#define ASIC3_PWM_PeriodTime 0x0004
-#define ASIC3_PWM_DutyTime 0x0008
-
-#define PWM_TIMEBASE_VALUE(x) ((x)&0xf) /* Low 4 bits sets time base */
-#define PWM_TIMEBASE_ENABLE (1 << 4) /* Enable clock */
-
-#define ASIC3_NUM_LEDS 3
-#define ASIC3_LED_0_Base 0x0700
-#define ASIC3_LED_1_Base 0x0800
-#define ASIC3_LED_2_Base 0x0900
-#define ASIC3_LED_TimeBase 0x0000 /* R/W 7 bits */
-#define ASIC3_LED_PeriodTime 0x0004 /* R/W 12 bits */
-#define ASIC3_LED_DutyTime 0x0008 /* R/W 12 bits */
-#define ASIC3_LED_AutoStopCount 0x000c /* R/W 16 bits */
-
-/* LED TimeBase bits - match ASIC2 */
-#define LED_TBS 0x0f /* Low 4 bits sets time base, max = 13 */
- /* Note: max = 5 on hx4700 */
- /* 0: maximum time base */
- /* 1: maximum time base / 2 */
- /* n: maximum time base / 2^n */
-
-#define LED_EN (1 << 4) /* LED ON/OFF 0:off, 1:on */
-#define LED_AUTOSTOP (1 << 5) /* LED ON/OFF auto stop 0:disable, 1:enable */
-#define LED_ALWAYS (1 << 6) /* LED Interrupt Mask 0:No mask, 1:mask */
-
-#define ASIC3_CLOCK_BASE 0x0A00
-#define ASIC3_CLOCK_CDEX 0x00
-#define ASIC3_CLOCK_SEL 0x04
-
-#define CLOCK_CDEX_SOURCE (1 << 0) /* 2 bits */
-#define CLOCK_CDEX_SOURCE0 (1 << 0)
-#define CLOCK_CDEX_SOURCE1 (1 << 1)
-#define CLOCK_CDEX_SPI (1 << 2)
-#define CLOCK_CDEX_OWM (1 << 3)
-#define CLOCK_CDEX_PWM0 (1 << 4)
-#define CLOCK_CDEX_PWM1 (1 << 5)
-#define CLOCK_CDEX_LED0 (1 << 6)
-#define CLOCK_CDEX_LED1 (1 << 7)
-#define CLOCK_CDEX_LED2 (1 << 8)
-
-/* Clocks settings: 1 for 24.576 MHz, 0 for 12.288Mhz */
-#define CLOCK_CDEX_SD_HOST (1 << 9) /* R/W: SD host clock source */
-#define CLOCK_CDEX_SD_BUS (1 << 10) /* R/W: SD bus clock source ctrl */
-#define CLOCK_CDEX_SMBUS (1 << 11)
-#define CLOCK_CDEX_CONTROL_CX (1 << 12)
-
-#define CLOCK_CDEX_EX0 (1 << 13) /* R/W: 32.768 kHz crystal */
-#define CLOCK_CDEX_EX1 (1 << 14) /* R/W: 24.576 MHz crystal */
-
-#define CLOCK_SEL_SD_HCLK_SEL (1 << 0) /* R/W: SDIO host clock select */
-#define CLOCK_SEL_SD_BCLK_SEL (1 << 1) /* R/W: SDIO bus clock select */
-
-/* R/W: INT clock source control (32.768 kHz) */
-#define CLOCK_SEL_CX (1 << 2)
-
-
-#define ASIC3_INTR_BASE 0x0B00
-
-#define ASIC3_INTR_INT_MASK 0x00 /* Interrupt mask control */
-#define ASIC3_INTR_P_INT_STAT 0x04 /* Peripheral interrupt status */
-#define ASIC3_INTR_INT_CPS 0x08 /* Interrupt timer clock pre-scale */
-#define ASIC3_INTR_INT_TBS 0x0c /* Interrupt timer set */
-
-#define ASIC3_INTMASK_GINTMASK (1 << 0) /* Global INTs mask 1:enable */
-#define ASIC3_INTMASK_GINTEL (1 << 1) /* 1: rising edge, 0: hi level */
-#define ASIC3_INTMASK_MASK0 (1 << 2)
-#define ASIC3_INTMASK_MASK1 (1 << 3)
-#define ASIC3_INTMASK_MASK2 (1 << 4)
-#define ASIC3_INTMASK_MASK3 (1 << 5)
-#define ASIC3_INTMASK_MASK4 (1 << 6)
-#define ASIC3_INTMASK_MASK5 (1 << 7)
-
-#define ASIC3_INTR_PERIPHERAL_A (1 << 0)
-#define ASIC3_INTR_PERIPHERAL_B (1 << 1)
-#define ASIC3_INTR_PERIPHERAL_C (1 << 2)
-#define ASIC3_INTR_PERIPHERAL_D (1 << 3)
-#define ASIC3_INTR_LED0 (1 << 4)
-#define ASIC3_INTR_LED1 (1 << 5)
-#define ASIC3_INTR_LED2 (1 << 6)
-#define ASIC3_INTR_SPI (1 << 7)
-#define ASIC3_INTR_SMBUS (1 << 8)
-#define ASIC3_INTR_OWM (1 << 9)
-
-#define ASIC3_INTR_CPS(x) ((x)&0x0f) /* 4 bits, max 14 */
-#define ASIC3_INTR_CPS_SET (1 << 4) /* Time base enable */
-
-
-/* Basic control of the SD ASIC */
-#define ASIC3_SDHWCTRL_BASE 0x0E00
-#define ASIC3_SDHWCTRL_SDCONF 0x00
-
-#define ASIC3_SDHWCTRL_SUSPEND (1 << 0) /* 1=suspend all SD operations */
-#define ASIC3_SDHWCTRL_CLKSEL (1 << 1) /* 1=SDICK, 0=HCLK */
-#define ASIC3_SDHWCTRL_PCLR (1 << 2) /* All registers of SDIO cleared */
-#define ASIC3_SDHWCTRL_LEVCD (1 << 3) /* SD card detection: 0:low */
-
-/* SD card write protection: 0=high */
-#define ASIC3_SDHWCTRL_LEVWP (1 << 4)
-#define ASIC3_SDHWCTRL_SDLED (1 << 5) /* SD card LED signal 0=disable */
-
-/* SD card power supply ctrl 1=enable */
-#define ASIC3_SDHWCTRL_SDPWR (1 << 6)
-
-#define ASIC3_EXTCF_BASE 0x1100
-
-#define ASIC3_EXTCF_SELECT 0x00
-#define ASIC3_EXTCF_RESET 0x04
-
-#define ASIC3_EXTCF_SMOD0 (1 << 0) /* slot number of mode 0 */
-#define ASIC3_EXTCF_SMOD1 (1 << 1) /* slot number of mode 1 */
-#define ASIC3_EXTCF_SMOD2 (1 << 2) /* slot number of mode 2 */
-#define ASIC3_EXTCF_OWM_EN (1 << 4) /* enable onewire module */
-#define ASIC3_EXTCF_OWM_SMB (1 << 5) /* OWM bus selection */
-#define ASIC3_EXTCF_OWM_RESET (1 << 6) /* ?? used by OWM and CF */
-#define ASIC3_EXTCF_CF0_SLEEP_MODE (1 << 7) /* CF0 sleep state */
-#define ASIC3_EXTCF_CF1_SLEEP_MODE (1 << 8) /* CF1 sleep state */
-#define ASIC3_EXTCF_CF0_PWAIT_EN (1 << 10) /* CF0 PWAIT_n control */
-#define ASIC3_EXTCF_CF1_PWAIT_EN (1 << 11) /* CF1 PWAIT_n control */
-#define ASIC3_EXTCF_CF0_BUF_EN (1 << 12) /* CF0 buffer control */
-#define ASIC3_EXTCF_CF1_BUF_EN (1 << 13) /* CF1 buffer control */
-#define ASIC3_EXTCF_SD_MEM_ENABLE (1 << 14)
-#define ASIC3_EXTCF_CF_SLEEP (1 << 15) /* CF sleep mode control */
-
-/*********************************************
- * The Onewire interface (DS1WM) is handled
- * by the ds1wm driver.
- *
- *********************************************/
-
-#define ASIC3_OWM_BASE 0xC00
-
-/*****************************************************************************
- * The SD configuration registers are at a completely different location
- * in memory. They are divided into three sets of registers:
- *
- * SD_CONFIG Core configuration register
- * SD_CTRL Control registers for SD operations
- * SDIO_CTRL Control registers for SDIO operations
- *
- *****************************************************************************/
-#define ASIC3_SD_CONFIG_BASE 0x0400 /* Assumes 32 bit addressing */
-#define ASIC3_SD_CONFIG_SIZE 0x0200 /* Assumes 32 bit addressing */
-#define ASIC3_SD_CTRL_BASE 0x1000
-#define ASIC3_SDIO_CTRL_BASE 0x1200
-
-#define ASIC3_MAP_SIZE_32BIT 0x2000
-#define ASIC3_MAP_SIZE_16BIT 0x1000
-
-/* Functions needed by leds-asic3 */
-
-struct asic3;
-extern void asic3_write_register(struct asic3 *asic, unsigned int reg, u32 val);
-extern u32 asic3_read_register(struct asic3 *asic, unsigned int reg);
-
-#endif /* __ASIC3_H__ */
diff --git a/include/linux/mfd/atc260x/atc2603c.h b/include/linux/mfd/atc260x/atc2603c.h
new file mode 100644
index 000000000000..07ac640ef3e1
--- /dev/null
+++ b/include/linux/mfd/atc260x/atc2603c.h
@@ -0,0 +1,281 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * ATC2603C PMIC register definitions
+ *
+ * Copyright (C) 2020 Cristian Ciocaltea <cristian.ciocaltea@gmail.com>
+ */
+
+#ifndef __LINUX_MFD_ATC260X_ATC2603C_H
+#define __LINUX_MFD_ATC260X_ATC2603C_H
+
+enum atc2603c_irq_def {
+ ATC2603C_IRQ_AUDIO = 0,
+ ATC2603C_IRQ_OV,
+ ATC2603C_IRQ_OC,
+ ATC2603C_IRQ_OT,
+ ATC2603C_IRQ_UV,
+ ATC2603C_IRQ_ALARM,
+ ATC2603C_IRQ_ONOFF,
+ ATC2603C_IRQ_SGPIO,
+ ATC2603C_IRQ_IR,
+ ATC2603C_IRQ_REMCON,
+ ATC2603C_IRQ_POWER_IN,
+};
+
+/* PMU Registers */
+#define ATC2603C_PMU_SYS_CTL0 0x00
+#define ATC2603C_PMU_SYS_CTL1 0x01
+#define ATC2603C_PMU_SYS_CTL2 0x02
+#define ATC2603C_PMU_SYS_CTL3 0x03
+#define ATC2603C_PMU_SYS_CTL4 0x04
+#define ATC2603C_PMU_SYS_CTL5 0x05
+#define ATC2603C_PMU_SYS_CTL6 0x06
+#define ATC2603C_PMU_SYS_CTL7 0x07
+#define ATC2603C_PMU_SYS_CTL8 0x08
+#define ATC2603C_PMU_SYS_CTL9 0x09
+#define ATC2603C_PMU_BAT_CTL0 0x0A
+#define ATC2603C_PMU_BAT_CTL1 0x0B
+#define ATC2603C_PMU_VBUS_CTL0 0x0C
+#define ATC2603C_PMU_VBUS_CTL1 0x0D
+#define ATC2603C_PMU_WALL_CTL0 0x0E
+#define ATC2603C_PMU_WALL_CTL1 0x0F
+#define ATC2603C_PMU_SYS_PENDING 0x10
+#define ATC2603C_PMU_DC1_CTL0 0x11
+#define ATC2603C_PMU_DC1_CTL1 0x12 // Undocumented
+#define ATC2603C_PMU_DC1_CTL2 0x13 // Undocumented
+#define ATC2603C_PMU_DC2_CTL0 0x14
+#define ATC2603C_PMU_DC2_CTL1 0x15 // Undocumented
+#define ATC2603C_PMU_DC2_CTL2 0x16 // Undocumented
+#define ATC2603C_PMU_DC3_CTL0 0x17
+#define ATC2603C_PMU_DC3_CTL1 0x18 // Undocumented
+#define ATC2603C_PMU_DC3_CTL2 0x19 // Undocumented
+#define ATC2603C_PMU_DC4_CTL0 0x1A // Undocumented
+#define ATC2603C_PMU_DC4_CTL1 0x1B // Undocumented
+#define ATC2603C_PMU_DC5_CTL0 0x1C // Undocumented
+#define ATC2603C_PMU_DC5_CTL1 0x1D // Undocumented
+#define ATC2603C_PMU_LDO1_CTL 0x1E
+#define ATC2603C_PMU_LDO2_CTL 0x1F
+#define ATC2603C_PMU_LDO3_CTL 0x20
+#define ATC2603C_PMU_LDO4_CTL 0x21 // Undocumented
+#define ATC2603C_PMU_LDO5_CTL 0x22
+#define ATC2603C_PMU_LDO6_CTL 0x23
+#define ATC2603C_PMU_LDO7_CTL 0x24
+#define ATC2603C_PMU_LDO8_CTL 0x25 // Undocumented
+#define ATC2603C_PMU_LDO9_CTL 0x26 // Undocumented
+#define ATC2603C_PMU_LDO10_CTL 0x27 // Undocumented
+#define ATC2603C_PMU_LDO11_CTL 0x28
+#define ATC2603C_PMU_SWITCH_CTL 0x29
+#define ATC2603C_PMU_OV_CTL0 0x2A
+#define ATC2603C_PMU_OV_CTL1 0x2B
+#define ATC2603C_PMU_OV_STATUS 0x2C
+#define ATC2603C_PMU_OV_EN 0x2D
+#define ATC2603C_PMU_OV_INT_EN 0x2E
+#define ATC2603C_PMU_OC_CTL 0x2F
+#define ATC2603C_PMU_OC_STATUS 0x30
+#define ATC2603C_PMU_OC_EN 0x31
+#define ATC2603C_PMU_OC_INT_EN 0x32
+#define ATC2603C_PMU_UV_CTL0 0x33
+#define ATC2603C_PMU_UV_CTL1 0x34
+#define ATC2603C_PMU_UV_STATUS 0x35
+#define ATC2603C_PMU_UV_EN 0x36
+#define ATC2603C_PMU_UV_INT_EN 0x37
+#define ATC2603C_PMU_OT_CTL 0x38
+#define ATC2603C_PMU_CHARGER_CTL0 0x39
+#define ATC2603C_PMU_CHARGER_CTL1 0x3A
+#define ATC2603C_PMU_CHARGER_CTL2 0x3B
+#define ATC2603C_PMU_BAKCHARGER_CTL 0x3C // Undocumented
+#define ATC2603C_PMU_APDS_CTL 0x3D
+#define ATC2603C_PMU_AUXADC_CTL0 0x3E
+#define ATC2603C_PMU_AUXADC_CTL1 0x3F
+#define ATC2603C_PMU_BATVADC 0x40
+#define ATC2603C_PMU_BATIADC 0x41
+#define ATC2603C_PMU_WALLVADC 0x42
+#define ATC2603C_PMU_WALLIADC 0x43
+#define ATC2603C_PMU_VBUSVADC 0x44
+#define ATC2603C_PMU_VBUSIADC 0x45
+#define ATC2603C_PMU_SYSPWRADC 0x46
+#define ATC2603C_PMU_REMCONADC 0x47
+#define ATC2603C_PMU_SVCCADC 0x48
+#define ATC2603C_PMU_CHGIADC 0x49
+#define ATC2603C_PMU_IREFADC 0x4A
+#define ATC2603C_PMU_BAKBATADC 0x4B
+#define ATC2603C_PMU_ICTEMPADC 0x4C
+#define ATC2603C_PMU_AUXADC0 0x4D
+#define ATC2603C_PMU_AUXADC1 0x4E
+#define ATC2603C_PMU_AUXADC2 0x4F
+#define ATC2603C_PMU_ICMADC 0x50
+#define ATC2603C_PMU_BDG_CTL 0x51 // Undocumented
+#define ATC2603C_RTC_CTL 0x52
+#define ATC2603C_RTC_MSALM 0x53
+#define ATC2603C_RTC_HALM 0x54
+#define ATC2603C_RTC_YMDALM 0x55
+#define ATC2603C_RTC_MS 0x56
+#define ATC2603C_RTC_H 0x57
+#define ATC2603C_RTC_DC 0x58
+#define ATC2603C_RTC_YMD 0x59
+#define ATC2603C_EFUSE_DAT 0x5A // Undocumented
+#define ATC2603C_EFUSECRTL1 0x5B // Undocumented
+#define ATC2603C_EFUSECRTL2 0x5C // Undocumented
+#define ATC2603C_PMU_FW_USE0 0x5D // Undocumented
+#define ATC2603C_PMU_FW_USE1 0x5E // Undocumented
+#define ATC2603C_PMU_FW_USE2 0x5F // Undocumented
+#define ATC2603C_PMU_FW_USE3 0x60 // Undocumented
+#define ATC2603C_PMU_FW_USE4 0x61 // Undocumented
+#define ATC2603C_PMU_ABNORMAL_STATUS 0x62
+#define ATC2603C_PMU_WALL_APDS_CTL 0x63
+#define ATC2603C_PMU_REMCON_CTL0 0x64
+#define ATC2603C_PMU_REMCON_CTL1 0x65
+#define ATC2603C_PMU_MUX_CTL0 0x66
+#define ATC2603C_PMU_SGPIO_CTL0 0x67
+#define ATC2603C_PMU_SGPIO_CTL1 0x68
+#define ATC2603C_PMU_SGPIO_CTL2 0x69
+#define ATC2603C_PMU_SGPIO_CTL3 0x6A
+#define ATC2603C_PMU_SGPIO_CTL4 0x6B
+#define ATC2603C_PWMCLK_CTL 0x6C
+#define ATC2603C_PWM0_CTL 0x6D
+#define ATC2603C_PWM1_CTL 0x6E
+#define ATC2603C_PMU_ADC_DBG0 0x70
+#define ATC2603C_PMU_ADC_DBG1 0x71
+#define ATC2603C_PMU_ADC_DBG2 0x72
+#define ATC2603C_PMU_ADC_DBG3 0x73
+#define ATC2603C_PMU_ADC_DBG4 0x74
+#define ATC2603C_IRC_CTL 0x80
+#define ATC2603C_IRC_STAT 0x81
+#define ATC2603C_IRC_CC 0x82
+#define ATC2603C_IRC_KDC 0x83
+#define ATC2603C_IRC_WK 0x84
+#define ATC2603C_IRC_RCC 0x85
+#define ATC2603C_IRC_FILTER 0x86
+
+/* AUDIO_OUT Registers */
+#define ATC2603C_AUDIOINOUT_CTL 0xA0
+#define ATC2603C_AUDIO_DEBUGOUTCTL 0xA1
+#define ATC2603C_DAC_DIGITALCTL 0xA2
+#define ATC2603C_DAC_VOLUMECTL0 0xA3
+#define ATC2603C_DAC_ANALOG0 0xA4
+#define ATC2603C_DAC_ANALOG1 0xA5
+#define ATC2603C_DAC_ANALOG2 0xA6
+#define ATC2603C_DAC_ANALOG3 0xA7
+
+/* AUDIO_IN Registers */
+#define ATC2603C_ADC_DIGITALCTL 0xA8
+#define ATC2603C_ADC_HPFCTL 0xA9
+#define ATC2603C_ADC_CTL 0xAA
+#define ATC2603C_AGC_CTL0 0xAB
+#define ATC2603C_AGC_CTL1 0xAC // Undocumented
+#define ATC2603C_AGC_CTL2 0xAD
+#define ATC2603C_ADC_ANALOG0 0xAE
+#define ATC2603C_ADC_ANALOG1 0xAF
+
+/* PCM_IF Registers */
+#define ATC2603C_PCM0_CTL 0xB0 // Undocumented
+#define ATC2603C_PCM1_CTL 0xB1 // Undocumented
+#define ATC2603C_PCM2_CTL 0xB2 // Undocumented
+#define ATC2603C_PCMIF_CTL 0xB3 // Undocumented
+
+/* CMU_CONTROL Registers */
+#define ATC2603C_CMU_DEVRST 0xC1 // Undocumented
+
+/* INTS Registers */
+#define ATC2603C_INTS_PD 0xC8
+#define ATC2603C_INTS_MSK 0xC9
+
+/* MFP Registers */
+#define ATC2603C_MFP_CTL 0xD0
+#define ATC2603C_PAD_VSEL 0xD1 // Undocumented
+#define ATC2603C_GPIO_OUTEN 0xD2
+#define ATC2603C_GPIO_INEN 0xD3
+#define ATC2603C_GPIO_DAT 0xD4
+#define ATC2603C_PAD_DRV 0xD5
+#define ATC2603C_PAD_EN 0xD6
+#define ATC2603C_DEBUG_SEL 0xD7 // Undocumented
+#define ATC2603C_DEBUG_IE 0xD8 // Undocumented
+#define ATC2603C_DEBUG_OE 0xD9 // Undocumented
+#define ATC2603C_BIST_START 0x0A // Undocumented
+#define ATC2603C_BIST_RESULT 0x0B // Undocumented
+#define ATC2603C_CHIP_VER 0xDC
+
+/* TWSI Registers */
+#define ATC2603C_SADDR 0xFF
+
+/* PMU_SYS_CTL0 Register Mask Bits */
+#define ATC2603C_PMU_SYS_CTL0_IR_WK_EN BIT(5)
+#define ATC2603C_PMU_SYS_CTL0_RESET_WK_EN BIT(6)
+#define ATC2603C_PMU_SYS_CTL0_HDSW_WK_EN BIT(7)
+#define ATC2603C_PMU_SYS_CTL0_ALARM_WK_EN BIT(8)
+#define ATC2603C_PMU_SYS_CTL0_REM_CON_WK_EN BIT(9)
+#define ATC2603C_PMU_SYS_CTL0_RESTART_EN BIT(10)
+#define ATC2603C_PMU_SYS_CTL0_SGPIOIRQ_WK_EN BIT(11)
+#define ATC2603C_PMU_SYS_CTL0_ONOFF_SHORT_WK_EN BIT(12)
+#define ATC2603C_PMU_SYS_CTL0_ONOFF_LONG_WK_EN BIT(13)
+#define ATC2603C_PMU_SYS_CTL0_WALL_WK_EN BIT(14)
+#define ATC2603C_PMU_SYS_CTL0_USB_WK_EN BIT(15)
+#define ATC2603C_PMU_SYS_CTL0_WK_ALL (GENMASK(15, 5) & (~BIT(10)))
+
+/* PMU_SYS_CTL1 Register Mask Bits */
+#define ATC2603C_PMU_SYS_CTL1_EN_S1 BIT(0)
+#define ATC2603C_PMU_SYS_CTL1_LB_S4_EN BIT(2)
+#define ATC2603C_PMU_SYS_CTL1_LB_S4 GENMASK(4, 3)
+#define ATC2603C_PMU_SYS_CTL1_LB_S4_3_1V BIT(4)
+#define ATC2603C_PMU_SYS_CTL1_IR_WK_FLAG BIT(5)
+#define ATC2603C_PMU_SYS_CTL1_RESET_WK_FLAG BIT(6)
+#define ATC2603C_PMU_SYS_CTL1_HDSW_WK_FLAG BIT(7)
+#define ATC2603C_PMU_SYS_CTL1_ALARM_WK_FLAG BIT(8)
+#define ATC2603C_PMU_SYS_CTL1_REM_CON_WK_FLAG BIT(9)
+#define ATC2603C_PMU_SYS_CTL1_ONOFF_PRESS_RESET_IRQ_PD BIT(10)
+#define ATC2603C_PMU_SYS_CTL1_SGPIOIRQ_WK_FLAG BIT(11)
+#define ATC2603C_PMU_SYS_CTL1_ONOFF_SHORT_WK_FLAG BIT(12)
+#define ATC2603C_PMU_SYS_CTL1_ONOFF_LONG_WK_FLAG BIT(13)
+#define ATC2603C_PMU_SYS_CTL1_WALL_WK_FLAG BIT(14)
+#define ATC2603C_PMU_SYS_CTL1_USB_WK_FLAG BIT(15)
+
+/* PMU_SYS_CTL2 Register Mask Bits */
+#define ATC2603C_PMU_SYS_CTL2_PMU_A_EN BIT(0)
+#define ATC2603C_PMU_SYS_CTL2_ONOFF_PRESS_INT_EN BIT(1)
+#define ATC2603C_PMU_SYS_CTL2_ONOFF_PRESS_PD BIT(2)
+#define ATC2603C_PMU_SYS_CTL2_S2TIMER GENMASK(5, 3)
+#define ATC2603C_PMU_SYS_CTL2_S2_TIMER_EN BIT(6)
+#define ATC2603C_PMU_SYS_CTL2_ONOFF_RESET_TIME_SEL GENMASK(8, 7)
+#define ATC2603C_PMU_SYS_CTL2_ONOFF_PRESS_RESET_EN BIT(9)
+#define ATC2603C_PMU_SYS_CTL2_ONOFF_PRESS_TIME GENMASK(11, 10)
+#define ATC2603C_PMU_SYS_CTL2_ONOFF_INT_EN BIT(12)
+#define ATC2603C_PMU_SYS_CTL2_ONOFF_LONG_PRESS BIT(13)
+#define ATC2603C_PMU_SYS_CTL2_ONOFF_SHORT_PRESS BIT(14)
+#define ATC2603C_PMU_SYS_CTL2_ONOFF_PRESS BIT(15)
+
+/* PMU_SYS_CTL3 Register Mask Bits */
+#define ATC2603C_PMU_SYS_CTL3_S2S3TOS1_TIMER GENMASK(8, 7)
+#define ATC2603C_PMU_SYS_CTL3_S2S3TOS1_TIMER_EN BIT(9)
+#define ATC2603C_PMU_SYS_CTL3_S3_TIMER GENMASK(12, 10)
+#define ATC2603C_PMU_SYS_CTL3_S3_TIMER_EN BIT(13)
+#define ATC2603C_PMU_SYS_CTL3_EN_S3 BIT(14)
+#define ATC2603C_PMU_SYS_CTL3_EN_S2 BIT(15)
+
+/* PMU_SYS_CTL5 Register Mask Bits */
+#define ATC2603C_PMU_SYS_CTL5_WALLWKDTEN BIT(7)
+#define ATC2603C_PMU_SYS_CTL5_VBUSWKDTEN BIT(8)
+#define ATC2603C_PMU_SYS_CTL5_REMCON_DECT_EN BIT(9)
+#define ATC2603C_PMU_SYS_CTL5_ONOFF_8S_SEL BIT(10)
+
+/* INTS_MSK Register Mask Bits */
+#define ATC2603C_INTS_MSK_AUDIO BIT(0)
+#define ATC2603C_INTS_MSK_OV BIT(1)
+#define ATC2603C_INTS_MSK_OC BIT(2)
+#define ATC2603C_INTS_MSK_OT BIT(3)
+#define ATC2603C_INTS_MSK_UV BIT(4)
+#define ATC2603C_INTS_MSK_ALARM BIT(5)
+#define ATC2603C_INTS_MSK_ONOFF BIT(6)
+#define ATC2603C_INTS_MSK_SGPIO BIT(7)
+#define ATC2603C_INTS_MSK_IR BIT(8)
+#define ATC2603C_INTS_MSK_REMCON BIT(9)
+#define ATC2603C_INTS_MSK_POWERIN BIT(10)
+
+/* CMU_DEVRST Register Mask Bits */
+#define ATC2603C_CMU_DEVRST_MFP BIT(1)
+#define ATC2603C_CMU_DEVRST_INTS BIT(2)
+#define ATC2603C_CMU_DEVRST_AUDIO BIT(4)
+
+/* PAD_EN Register Mask Bits */
+#define ATC2603C_PAD_EN_EXTIRQ BIT(0)
+
+#endif /* __LINUX_MFD_ATC260X_ATC2603C_H */
diff --git a/include/linux/mfd/atc260x/atc2609a.h b/include/linux/mfd/atc260x/atc2609a.h
new file mode 100644
index 000000000000..b957d7bd73e9
--- /dev/null
+++ b/include/linux/mfd/atc260x/atc2609a.h
@@ -0,0 +1,308 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * ATC2609A PMIC register definitions
+ *
+ * Copyright (C) 2019 Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
+ */
+
+#ifndef __LINUX_MFD_ATC260X_ATC2609A_H
+#define __LINUX_MFD_ATC260X_ATC2609A_H
+
+enum atc2609a_irq_def {
+ ATC2609A_IRQ_AUDIO = 0,
+ ATC2609A_IRQ_OV,
+ ATC2609A_IRQ_OC,
+ ATC2609A_IRQ_OT,
+ ATC2609A_IRQ_UV,
+ ATC2609A_IRQ_ALARM,
+ ATC2609A_IRQ_ONOFF,
+ ATC2609A_IRQ_WKUP,
+ ATC2609A_IRQ_IR,
+ ATC2609A_IRQ_REMCON,
+ ATC2609A_IRQ_POWER_IN,
+};
+
+/* PMU Registers */
+#define ATC2609A_PMU_SYS_CTL0 0x00
+#define ATC2609A_PMU_SYS_CTL1 0x01
+#define ATC2609A_PMU_SYS_CTL2 0x02
+#define ATC2609A_PMU_SYS_CTL3 0x03
+#define ATC2609A_PMU_SYS_CTL4 0x04
+#define ATC2609A_PMU_SYS_CTL5 0x05
+#define ATC2609A_PMU_SYS_CTL6 0x06
+#define ATC2609A_PMU_SYS_CTL7 0x07
+#define ATC2609A_PMU_SYS_CTL8 0x08
+#define ATC2609A_PMU_SYS_CTL9 0x09
+#define ATC2609A_PMU_BAT_CTL0 0x0A
+#define ATC2609A_PMU_BAT_CTL1 0x0B
+#define ATC2609A_PMU_VBUS_CTL0 0x0C
+#define ATC2609A_PMU_VBUS_CTL1 0x0D
+#define ATC2609A_PMU_WALL_CTL0 0x0E
+#define ATC2609A_PMU_WALL_CTL1 0x0F
+#define ATC2609A_PMU_SYS_PENDING 0x10
+#define ATC2609A_PMU_APDS_CTL0 0x11
+#define ATC2609A_PMU_APDS_CTL1 0x12
+#define ATC2609A_PMU_APDS_CTL2 0x13
+#define ATC2609A_PMU_CHARGER_CTL 0x14
+#define ATC2609A_PMU_BAKCHARGER_CTL 0x15
+#define ATC2609A_PMU_SWCHG_CTL0 0x16
+#define ATC2609A_PMU_SWCHG_CTL1 0x17
+#define ATC2609A_PMU_SWCHG_CTL2 0x18
+#define ATC2609A_PMU_SWCHG_CTL3 0x19
+#define ATC2609A_PMU_SWCHG_CTL4 0x1A
+#define ATC2609A_PMU_DC_OSC 0x1B
+#define ATC2609A_PMU_DC0_CTL0 0x1C
+#define ATC2609A_PMU_DC0_CTL1 0x1D
+#define ATC2609A_PMU_DC0_CTL2 0x1E
+#define ATC2609A_PMU_DC0_CTL3 0x1F
+#define ATC2609A_PMU_DC0_CTL4 0x20
+#define ATC2609A_PMU_DC0_CTL5 0x21
+#define ATC2609A_PMU_DC0_CTL6 0x22
+#define ATC2609A_PMU_DC1_CTL0 0x23
+#define ATC2609A_PMU_DC1_CTL1 0x24
+#define ATC2609A_PMU_DC1_CTL2 0x25
+#define ATC2609A_PMU_DC1_CTL3 0x26
+#define ATC2609A_PMU_DC1_CTL4 0x27
+#define ATC2609A_PMU_DC1_CTL5 0x28
+#define ATC2609A_PMU_DC1_CTL6 0x29
+#define ATC2609A_PMU_DC2_CTL0 0x2A
+#define ATC2609A_PMU_DC2_CTL1 0x2B
+#define ATC2609A_PMU_DC2_CTL2 0x2C
+#define ATC2609A_PMU_DC2_CTL3 0x2D
+#define ATC2609A_PMU_DC2_CTL4 0x2E
+#define ATC2609A_PMU_DC2_CTL5 0x2F
+#define ATC2609A_PMU_DC2_CTL6 0x30
+#define ATC2609A_PMU_DC3_CTL0 0x31
+#define ATC2609A_PMU_DC3_CTL1 0x32
+#define ATC2609A_PMU_DC3_CTL2 0x33
+#define ATC2609A_PMU_DC3_CTL3 0x34
+#define ATC2609A_PMU_DC3_CTL4 0x35
+#define ATC2609A_PMU_DC3_CTL5 0x36
+#define ATC2609A_PMU_DC3_CTL6 0x37
+#define ATC2609A_PMU_DC_ZR 0x38
+#define ATC2609A_PMU_LDO0_CTL0 0x39
+#define ATC2609A_PMU_LDO0_CTL1 0x3A
+#define ATC2609A_PMU_LDO1_CTL0 0x3B
+#define ATC2609A_PMU_LDO1_CTL1 0x3C
+#define ATC2609A_PMU_LDO2_CTL0 0x3D
+#define ATC2609A_PMU_LDO2_CTL1 0x3E
+#define ATC2609A_PMU_LDO3_CTL0 0x3F
+#define ATC2609A_PMU_LDO3_CTL1 0x40
+#define ATC2609A_PMU_LDO4_CTL0 0x41
+#define ATC2609A_PMU_LDO4_CTL1 0x42
+#define ATC2609A_PMU_LDO5_CTL0 0x43
+#define ATC2609A_PMU_LDO5_CTL1 0x44
+#define ATC2609A_PMU_LDO6_CTL0 0x45
+#define ATC2609A_PMU_LDO6_CTL1 0x46
+#define ATC2609A_PMU_LDO7_CTL0 0x47
+#define ATC2609A_PMU_LDO7_CTL1 0x48
+#define ATC2609A_PMU_LDO8_CTL0 0x49
+#define ATC2609A_PMU_LDO8_CTL1 0x4A
+#define ATC2609A_PMU_LDO9_CTL 0x4B
+#define ATC2609A_PMU_OV_INT_EN 0x4C
+#define ATC2609A_PMU_OV_STATUS 0x4D
+#define ATC2609A_PMU_UV_INT_EN 0x4E
+#define ATC2609A_PMU_UV_STATUS 0x4F
+#define ATC2609A_PMU_OC_INT_EN 0x50
+#define ATC2609A_PMU_OC_STATUS 0x51
+#define ATC2609A_PMU_OT_CTL 0x52
+#define ATC2609A_PMU_CM_CTL0 0x53
+#define ATC2609A_PMU_FW_USE0 0x54
+#define ATC2609A_PMU_FW_USE1 0x55
+#define ATC2609A_PMU_ADC12B_I 0x56
+#define ATC2609A_PMU_ADC12B_V 0x57
+#define ATC2609A_PMU_ADC12B_DUMMY 0x58
+#define ATC2609A_PMU_AUXADC_CTL0 0x59
+#define ATC2609A_PMU_AUXADC_CTL1 0x5A
+#define ATC2609A_PMU_BATVADC 0x5B
+#define ATC2609A_PMU_BATIADC 0x5C
+#define ATC2609A_PMU_WALLVADC 0x5D
+#define ATC2609A_PMU_WALLIADC 0x5E
+#define ATC2609A_PMU_VBUSVADC 0x5F
+#define ATC2609A_PMU_VBUSIADC 0x60
+#define ATC2609A_PMU_SYSPWRADC 0x61
+#define ATC2609A_PMU_REMCONADC 0x62
+#define ATC2609A_PMU_SVCCADC 0x63
+#define ATC2609A_PMU_CHGIADC 0x64
+#define ATC2609A_PMU_IREFADC 0x65
+#define ATC2609A_PMU_BAKBATADC 0x66
+#define ATC2609A_PMU_ICTEMPADC 0x67
+#define ATC2609A_PMU_AUXADC0 0x68
+#define ATC2609A_PMU_AUXADC1 0x69
+#define ATC2609A_PMU_AUXADC2 0x6A
+#define ATC2609A_PMU_AUXADC3 0x6B
+#define ATC2609A_PMU_ICTEMPADC_ADJ 0x6C
+#define ATC2609A_PMU_BDG_CTL 0x6D
+#define ATC2609A_RTC_CTL 0x6E
+#define ATC2609A_RTC_MSALM 0x6F
+#define ATC2609A_RTC_HALM 0x70
+#define ATC2609A_RTC_YMDALM 0x71
+#define ATC2609A_RTC_MS 0x72
+#define ATC2609A_RTC_H 0x73
+#define ATC2609A_RTC_DC 0x74
+#define ATC2609A_RTC_YMD 0x75
+#define ATC2609A_EFUSE_DAT 0x76
+#define ATC2609A_EFUSECRTL1 0x77
+#define ATC2609A_EFUSECRTL2 0x78
+#define ATC2609A_PMU_DC4_CTL0 0x79
+#define ATC2609A_PMU_DC4_CTL1 0x7A
+#define ATC2609A_PMU_DC4_CTL2 0x7B
+#define ATC2609A_PMU_DC4_CTL3 0x7C
+#define ATC2609A_PMU_DC4_CTL4 0x7D
+#define ATC2609A_PMU_DC4_CTL5 0x7E
+#define ATC2609A_PMU_DC4_CTL6 0x7F
+#define ATC2609A_PMU_PWR_STATUS 0x80
+#define ATC2609A_PMU_S2_PWR 0x81
+#define ATC2609A_CLMT_CTL0 0x82
+#define ATC2609A_CLMT_DATA0 0x83
+#define ATC2609A_CLMT_DATA1 0x84
+#define ATC2609A_CLMT_DATA2 0x85
+#define ATC2609A_CLMT_DATA3 0x86
+#define ATC2609A_CLMT_ADD0 0x87
+#define ATC2609A_CLMT_ADD1 0x88
+#define ATC2609A_CLMT_OCV_TABLE 0x89
+#define ATC2609A_CLMT_R_TABLE 0x8A
+#define ATC2609A_PMU_PWRON_CTL0 0x8D
+#define ATC2609A_PMU_PWRON_CTL1 0x8E
+#define ATC2609A_PMU_PWRON_CTL2 0x8F
+#define ATC2609A_IRC_CTL 0x90
+#define ATC2609A_IRC_STAT 0x91
+#define ATC2609A_IRC_CC 0x92
+#define ATC2609A_IRC_KDC 0x93
+#define ATC2609A_IRC_WK 0x94
+#define ATC2609A_IRC_RCC 0x95
+
+/* AUDIO_OUT Registers */
+#define ATC2609A_AUDIOINOUT_CTL 0xA0
+#define ATC2609A_AUDIO_DEBUGOUTCTL 0xA1
+#define ATC2609A_DAC_DIGITALCTL 0xA2
+#define ATC2609A_DAC_VOLUMECTL0 0xA3
+#define ATC2609A_DAC_ANALOG0 0xA4
+#define ATC2609A_DAC_ANALOG1 0xA5
+#define ATC2609A_DAC_ANALOG2 0xA6
+#define ATC2609A_DAC_ANALOG3 0xA7
+
+/* AUDIO_IN Registers */
+#define ATC2609A_ADC_DIGITALCTL 0xA8
+#define ATC2609A_ADC_HPFCTL 0xA9
+#define ATC2609A_ADC_CTL 0xAA
+#define ATC2609A_AGC_CTL0 0xAB
+#define ATC2609A_AGC_CTL1 0xAC
+#define ATC2609A_AGC_CTL2 0xAD
+#define ATC2609A_ADC_ANALOG0 0xAE
+#define ATC2609A_ADC_ANALOG1 0xAF
+
+/* PCM_IF Registers */
+#define ATC2609A_PCM0_CTL 0xB0
+#define ATC2609A_PCM1_CTL 0xB1
+#define ATC2609A_PCM2_CTL 0xB2
+#define ATC2609A_PCMIF_CTL 0xB3
+
+/* CMU_CONTROL Registers */
+#define ATC2609A_CMU_DEVRST 0xC1
+
+/* INTS Registers */
+#define ATC2609A_INTS_PD 0xC8
+#define ATC2609A_INTS_MSK 0xC9
+
+/* MFP Registers */
+#define ATC2609A_MFP_CTL 0xD0
+#define ATC2609A_PAD_VSEL 0xD1
+#define ATC2609A_GPIO_OUTEN 0xD2
+#define ATC2609A_GPIO_INEN 0xD3
+#define ATC2609A_GPIO_DAT 0xD4
+#define ATC2609A_PAD_DRV 0xD5
+#define ATC2609A_PAD_EN 0xD6
+#define ATC2609A_DEBUG_SEL 0xD7
+#define ATC2609A_DEBUG_IE 0xD8
+#define ATC2609A_DEBUG_OE 0xD9
+#define ATC2609A_CHIP_VER 0xDC
+
+/* PWSI Registers */
+#define ATC2609A_PWSI_CTL 0xF0
+#define ATC2609A_PWSI_STATUS 0xF1
+
+/* TWSI Registers */
+#define ATC2609A_SADDR 0xFF
+
+/* PMU_SYS_CTL0 Register Mask Bits */
+#define ATC2609A_PMU_SYS_CTL0_IR_WK_EN BIT(5)
+#define ATC2609A_PMU_SYS_CTL0_RESET_WK_EN BIT(6)
+#define ATC2609A_PMU_SYS_CTL0_HDSW_WK_EN BIT(7)
+#define ATC2609A_PMU_SYS_CTL0_ALARM_WK_EN BIT(8)
+#define ATC2609A_PMU_SYS_CTL0_REM_CON_WK_EN BIT(9)
+#define ATC2609A_PMU_SYS_CTL0_RESTART_EN BIT(10)
+#define ATC2609A_PMU_SYS_CTL0_WKIRQ_WK_EN BIT(11)
+#define ATC2609A_PMU_SYS_CTL0_ONOFF_SHORT_WK_EN BIT(12)
+#define ATC2609A_PMU_SYS_CTL0_ONOFF_LONG_WK_EN BIT(13)
+#define ATC2609A_PMU_SYS_CTL0_WALL_WK_EN BIT(14)
+#define ATC2609A_PMU_SYS_CTL0_USB_WK_EN BIT(15)
+#define ATC2609A_PMU_SYS_CTL0_WK_ALL (GENMASK(15, 5) & (~BIT(10)))
+
+/* PMU_SYS_CTL1 Register Mask Bits */
+#define ATC2609A_PMU_SYS_CTL1_EN_S1 BIT(0)
+#define ATC2609A_PMU_SYS_CTL1_LB_S4_EN BIT(2)
+#define ATC2609A_PMU_SYS_CTL1_LB_S4 GENMASK(4, 3)
+#define ATC2609A_PMU_SYS_CTL1_LB_S4_3_1V BIT(4)
+#define ATC2609A_PMU_SYS_CTL1_IR_WK_FLAG BIT(5)
+#define ATC2609A_PMU_SYS_CTL1_RESET_WK_FLAG BIT(6)
+#define ATC2609A_PMU_SYS_CTL1_HDSW_WK_FLAG BIT(7)
+#define ATC2609A_PMU_SYS_CTL1_ALARM_WK_FLAG BIT(8)
+#define ATC2609A_PMU_SYS_CTL1_REM_CON_WK_FLAG BIT(9)
+#define ATC2609A_PMU_SYS_CTL1_RESTART_WK_FLAG BIT(10)
+#define ATC2609A_PMU_SYS_CTL1_WKIRQ_WK_FLAG BIT(11)
+#define ATC2609A_PMU_SYS_CTL1_ONOFF_SHORT_WK_FLAG BIT(12)
+#define ATC2609A_PMU_SYS_CTL1_ONOFF_LONG_WK_FLAG BIT(13)
+#define ATC2609A_PMU_SYS_CTL1_WALL_WK_FLAG BIT(14)
+#define ATC2609A_PMU_SYS_CTL1_USB_WK_FLAG BIT(15)
+
+/* PMU_SYS_CTL2 Register Mask Bits */
+#define ATC2609A_PMU_SYS_CTL2_PMU_A_EN BIT(0)
+#define ATC2609A_PMU_SYS_CTL2_ONOFF_PRESS_INT_EN BIT(1)
+#define ATC2609A_PMU_SYS_CTL2_ONOFF_PRESS_PD BIT(2)
+#define ATC2609A_PMU_SYS_CTL2_S2TIMER GENMASK(5, 3)
+#define ATC2609A_PMU_SYS_CTL2_S2_TIMER_EN BIT(6)
+#define ATC2609A_PMU_SYS_CTL2_ONOFF_RESET_TIME_SEL GENMASK(8, 7)
+#define ATC2609A_PMU_SYS_CTL2_ONOFF_RESET_EN BIT(9)
+#define ATC2609A_PMU_SYS_CTL2_ONOFF_PRESS_TIME GENMASK(11, 10)
+#define ATC2609A_PMU_SYS_CTL2_ONOFF_LSP_INT_EN BIT(12)
+#define ATC2609A_PMU_SYS_CTL2_ONOFF_LONG_PRESS BIT(13)
+#define ATC2609A_PMU_SYS_CTL2_ONOFF_SHORT_PRESS BIT(14)
+#define ATC2609A_PMU_SYS_CTL2_ONOFF_PRESS BIT(15)
+
+/* PMU_SYS_CTL3 Register Mask Bits */
+#define ATC2609A_PMU_SYS_CTL3_S2S3TOS1_TIMER GENMASK(8, 7)
+#define ATC2609A_PMU_SYS_CTL3_S2S3TOS1_TIMER_EN BIT(9)
+#define ATC2609A_PMU_SYS_CTL3_S3_TIMER GENMASK(12, 10)
+#define ATC2609A_PMU_SYS_CTL3_S3_TIMER_EN BIT(13)
+#define ATC2609A_PMU_SYS_CTL3_EN_S3 BIT(14)
+#define ATC2609A_PMU_SYS_CTL3_EN_S2 BIT(15)
+
+/* PMU_SYS_CTL5 Register Mask Bits */
+#define ATC2609A_PMU_SYS_CTL5_WALLWKDTEN BIT(7)
+#define ATC2609A_PMU_SYS_CTL5_VBUSWKDTEN BIT(8)
+#define ATC2609A_PMU_SYS_CTL5_REMCON_DECT_EN BIT(9)
+#define ATC2609A_PMU_SYS_CTL5_ONOFF_8S_SEL BIT(10)
+
+/* INTS_MSK Register Mask Bits */
+#define ATC2609A_INTS_MSK_AUDIO BIT(0)
+#define ATC2609A_INTS_MSK_OV BIT(1)
+#define ATC2609A_INTS_MSK_OC BIT(2)
+#define ATC2609A_INTS_MSK_OT BIT(3)
+#define ATC2609A_INTS_MSK_UV BIT(4)
+#define ATC2609A_INTS_MSK_ALARM BIT(5)
+#define ATC2609A_INTS_MSK_ONOFF BIT(6)
+#define ATC2609A_INTS_MSK_WKUP BIT(7)
+#define ATC2609A_INTS_MSK_IR BIT(8)
+#define ATC2609A_INTS_MSK_REMCON BIT(9)
+#define ATC2609A_INTS_MSK_POWERIN BIT(10)
+
+/* CMU_DEVRST Register Mask Bits */
+#define ATC2609A_CMU_DEVRST_AUDIO BIT(0)
+#define ATC2609A_CMU_DEVRST_MFP BIT(1)
+#define ATC2609A_CMU_DEVRST_INTS BIT(2)
+
+/* PAD_EN Register Mask Bits */
+#define ATC2609A_PAD_EN_EXTIRQ BIT(0)
+
+#endif /* __LINUX_MFD_ATC260X_ATC2609A_H */
diff --git a/include/linux/mfd/atc260x/core.h b/include/linux/mfd/atc260x/core.h
new file mode 100644
index 000000000000..777b6c345d44
--- /dev/null
+++ b/include/linux/mfd/atc260x/core.h
@@ -0,0 +1,58 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Core MFD defines for ATC260x PMICs
+ *
+ * Copyright (C) 2019 Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
+ * Copyright (C) 2020 Cristian Ciocaltea <cristian.ciocaltea@gmail.com>
+ */
+
+#ifndef __LINUX_MFD_ATC260X_CORE_H
+#define __LINUX_MFD_ATC260X_CORE_H
+
+#include <linux/mfd/atc260x/atc2603c.h>
+#include <linux/mfd/atc260x/atc2609a.h>
+
+enum atc260x_type {
+ ATC2603A = 0,
+ ATC2603C,
+ ATC2609A,
+};
+
+enum atc260x_ver {
+ ATC260X_A = 0,
+ ATC260X_B,
+ ATC260X_C,
+ ATC260X_D,
+ ATC260X_E,
+ ATC260X_F,
+ ATC260X_G,
+ ATC260X_H,
+};
+
+struct atc260x {
+ struct device *dev;
+
+ struct regmap *regmap;
+ const struct regmap_irq_chip *regmap_irq_chip;
+ struct regmap_irq_chip_data *irq_data;
+
+ struct mutex *regmap_mutex; /* mutex for custom regmap locking */
+
+ const struct mfd_cell *cells;
+ int nr_cells;
+ int irq;
+
+ enum atc260x_type ic_type;
+ enum atc260x_ver ic_ver;
+ const char *type_name;
+ unsigned int rev_reg;
+
+ const struct atc260x_init_regs *init_regs; /* regs for device init */
+};
+
+struct regmap_config;
+
+int atc260x_match_device(struct atc260x *atc260x, struct regmap_config *regmap_cfg);
+int atc260x_device_probe(struct atc260x *atc260x);
+
+#endif /* __LINUX_MFD_ATC260X_CORE_H */
diff --git a/include/linux/mfd/atmel-hlcdc.h b/include/linux/mfd/atmel-hlcdc.h
index 1279ab1644b5..80d675a03b39 100644
--- a/include/linux/mfd/atmel-hlcdc.h
+++ b/include/linux/mfd/atmel-hlcdc.h
@@ -1,20 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2014 Free Electrons
* Copyright (C) 2014 Atmel
*
* Author: Boris BREZILLON <boris.brezillon@free-electrons.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef __LINUX_MFD_HLCDC_H
@@ -33,6 +22,8 @@
#define ATMEL_HLCDC_DITHER BIT(6)
#define ATMEL_HLCDC_DISPDLY BIT(7)
#define ATMEL_HLCDC_MODE_MASK GENMASK(9, 8)
+#define ATMEL_XLCDC_MODE_MASK GENMASK(10, 8)
+#define ATMEL_XLCDC_DPI BIT(11)
#define ATMEL_HLCDC_PP BIT(10)
#define ATMEL_HLCDC_VSPSU BIT(12)
#define ATMEL_HLCDC_VSPHO BIT(13)
@@ -45,6 +36,12 @@
#define ATMEL_HLCDC_IDR 0x30
#define ATMEL_HLCDC_IMR 0x34
#define ATMEL_HLCDC_ISR 0x38
+#define ATMEL_XLCDC_ATTRE 0x3c
+
+#define ATMEL_XLCDC_BASE_UPDATE BIT(0)
+#define ATMEL_XLCDC_OVR1_UPDATE BIT(1)
+#define ATMEL_XLCDC_OVR3_UPDATE BIT(2)
+#define ATMEL_XLCDC_HEO_UPDATE BIT(3)
#define ATMEL_HLCDC_CLKPOL BIT(0)
#define ATMEL_HLCDC_CLKSEL BIT(2)
@@ -59,6 +56,8 @@
#define ATMEL_HLCDC_DISP BIT(2)
#define ATMEL_HLCDC_PWM BIT(3)
#define ATMEL_HLCDC_SIP BIT(4)
+#define ATMEL_XLCDC_SD BIT(5)
+#define ATMEL_XLCDC_CM BIT(6)
#define ATMEL_HLCDC_SOF BIT(0)
#define ATMEL_HLCDC_SYNCDIS BIT(1)
diff --git a/include/linux/mfd/axp20x.h b/include/linux/mfd/axp20x.h
index 965b027e31b3..3c5aecf1d4b5 100644
--- a/include/linux/mfd/axp20x.h
+++ b/include/linux/mfd/axp20x.h
@@ -1,11 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Functions and registers to access AXP20X power management chip.
*
* Copyright (C) 2013, Carlo Caione <carlo@caione.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef __LINUX_MFD_AXP20X_H
@@ -15,17 +12,24 @@
enum axp20x_variants {
AXP152_ID = 0,
+ AXP192_ID,
AXP202_ID,
AXP209_ID,
AXP221_ID,
AXP223_ID,
AXP288_ID,
+ AXP313A_ID,
+ AXP323_ID,
+ AXP717_ID,
AXP803_ID,
AXP806_ID,
AXP809_ID,
+ AXP813_ID,
+ AXP15060_ID,
NR_AXP20X_VARIANTS,
};
+#define AXP192_DATACACHE(m) (0x06 + (m))
#define AXP20X_DATACACHE(m) (0x04 + (m))
/* Power supply */
@@ -34,7 +38,7 @@ enum axp20x_variants {
#define AXP152_ALDO_OP_MODE 0x13
#define AXP152_LDO0_CTRL 0x15
#define AXP152_DCDC2_V_OUT 0x23
-#define AXP152_DCDC2_V_SCAL 0x25
+#define AXP152_DCDC2_V_RAMP 0x25
#define AXP152_DCDC1_V_OUT 0x26
#define AXP152_DCDC3_V_OUT 0x27
#define AXP152_ALDO12_V_OUT 0x28
@@ -47,12 +51,19 @@ enum axp20x_variants {
#define AXP152_DCDC_FREQ 0x37
#define AXP152_DCDC_MODE 0x80
+#define AXP192_USB_OTG_STATUS 0x04
+#define AXP192_PWR_OUT_CTRL 0x12
+#define AXP192_DCDC2_V_OUT 0x23
+#define AXP192_DCDC1_V_OUT 0x26
+#define AXP192_DCDC3_V_OUT 0x27
+#define AXP192_LDO2_3_V_OUT 0x28
+
#define AXP20X_PWR_INPUT_STATUS 0x00
#define AXP20X_PWR_OP_MODE 0x01
#define AXP20X_USB_OTG_STATUS 0x02
#define AXP20X_PWR_OUT_CTRL 0x12
#define AXP20X_DCDC2_V_OUT 0x23
-#define AXP20X_DCDC2_LDO3_V_SCAL 0x25
+#define AXP20X_DCDC2_LDO3_V_RAMP 0x25
#define AXP20X_DCDC3_V_OUT 0x27
#define AXP20X_LDO24_V_OUT 0x28
#define AXP20X_LDO3_V_OUT 0x29
@@ -93,6 +104,77 @@ enum axp20x_variants {
#define AXP22X_ALDO3_V_OUT 0x2a
#define AXP22X_CHRG_CTRL3 0x35
+#define AXP313A_ON_INDICATE 0x00
+#define AXP313A_OUTPUT_CONTROL 0x10
+#define AXP313A_DCDC1_CONTROL 0x13
+#define AXP313A_DCDC2_CONTROL 0x14
+#define AXP313A_DCDC3_CONTROL 0x15
+#define AXP313A_ALDO1_CONTROL 0x16
+#define AXP313A_DLDO1_CONTROL 0x17
+#define AXP313A_SHUTDOWN_CTRL 0x1a
+#define AXP313A_IRQ_EN 0x20
+#define AXP313A_IRQ_STATE 0x21
+#define AXP323_DCDC_MODE_CTRL2 0x22
+
+#define AXP717_ON_INDICATE 0x00
+#define AXP717_PMU_STATUS_2 0x01
+#define AXP717_BC_DETECT 0x05
+#define AXP717_PMU_FAULT 0x08
+#define AXP717_MODULE_EN_CONTROL_1 0x0b
+#define AXP717_MIN_SYS_V_CONTROL 0x15
+#define AXP717_INPUT_VOL_LIMIT_CTRL 0x16
+#define AXP717_INPUT_CUR_LIMIT_CTRL 0x17
+#define AXP717_MODULE_EN_CONTROL_2 0x19
+#define AXP717_BOOST_CONTROL 0x1e
+#define AXP717_VSYS_V_POWEROFF 0x24
+#define AXP717_IRQ0_EN 0x40
+#define AXP717_IRQ1_EN 0x41
+#define AXP717_IRQ2_EN 0x42
+#define AXP717_IRQ3_EN 0x43
+#define AXP717_IRQ4_EN 0x44
+#define AXP717_IRQ0_STATE 0x48
+#define AXP717_IRQ1_STATE 0x49
+#define AXP717_IRQ2_STATE 0x4a
+#define AXP717_IRQ3_STATE 0x4b
+#define AXP717_IRQ4_STATE 0x4c
+#define AXP717_TS_PIN_CFG 0x50
+#define AXP717_ICC_CHG_SET 0x62
+#define AXP717_ITERM_CHG_SET 0x63
+#define AXP717_CV_CHG_SET 0x64
+#define AXP717_DCDC_OUTPUT_CONTROL 0x80
+#define AXP717_DCDC1_CONTROL 0x83
+#define AXP717_DCDC2_CONTROL 0x84
+#define AXP717_DCDC3_CONTROL 0x85
+#define AXP717_DCDC4_CONTROL 0x86
+#define AXP717_LDO0_OUTPUT_CONTROL 0x90
+#define AXP717_LDO1_OUTPUT_CONTROL 0x91
+#define AXP717_ALDO1_CONTROL 0x93
+#define AXP717_ALDO2_CONTROL 0x94
+#define AXP717_ALDO3_CONTROL 0x95
+#define AXP717_ALDO4_CONTROL 0x96
+#define AXP717_BLDO1_CONTROL 0x97
+#define AXP717_BLDO2_CONTROL 0x98
+#define AXP717_BLDO3_CONTROL 0x99
+#define AXP717_BLDO4_CONTROL 0x9a
+#define AXP717_CLDO1_CONTROL 0x9b
+#define AXP717_CLDO2_CONTROL 0x9c
+#define AXP717_CLDO3_CONTROL 0x9d
+#define AXP717_CLDO4_CONTROL 0x9e
+#define AXP717_CPUSLDO_CONTROL 0x9f
+#define AXP717_BATT_PERCENT_DATA 0xa4
+#define AXP717_ADC_CH_EN_CONTROL 0xc0
+#define AXP717_BATT_V_H 0xc4
+#define AXP717_BATT_V_L 0xc5
+#define AXP717_VBUS_V_H 0xc6
+#define AXP717_VBUS_V_L 0xc7
+#define AXP717_VSYS_V_H 0xc8
+#define AXP717_VSYS_V_L 0xc9
+#define AXP717_BATT_CHRG_I_H 0xca
+#define AXP717_BATT_CHRG_I_L 0xcb
+#define AXP717_ADC_DATA_SEL 0xcd
+#define AXP717_ADC_DATA_H 0xce
+#define AXP717_ADC_DATA_L 0xcf
+
#define AXP806_STARTUP_SRC 0x00
#define AXP806_CHIP_ID 0x03
#define AXP806_PWR_OUT_CTRL1 0x10
@@ -130,6 +212,42 @@ enum axp20x_variants {
#define AXP803_DCDC6_V_OUT 0x25
#define AXP803_DCDC_FREQ_CTRL 0x3b
+/* Other DCDC regulator control registers are the same as AXP803 */
+#define AXP813_DCDC7_V_OUT 0x26
+
+#define AXP15060_STARTUP_SRC 0x00
+#define AXP15060_PWR_OUT_CTRL1 0x10
+#define AXP15060_PWR_OUT_CTRL2 0x11
+#define AXP15060_PWR_OUT_CTRL3 0x12
+#define AXP15060_DCDC1_V_CTRL 0x13
+#define AXP15060_DCDC2_V_CTRL 0x14
+#define AXP15060_DCDC3_V_CTRL 0x15
+#define AXP15060_DCDC4_V_CTRL 0x16
+#define AXP15060_DCDC5_V_CTRL 0x17
+#define AXP15060_DCDC6_V_CTRL 0x18
+#define AXP15060_ALDO1_V_CTRL 0x19
+#define AXP15060_DCDC_MODE_CTRL1 0x1a
+#define AXP15060_DCDC_MODE_CTRL2 0x1b
+#define AXP15060_OUTPUT_MONITOR_DISCHARGE 0x1e
+#define AXP15060_IRQ_PWROK_VOFF 0x1f
+#define AXP15060_ALDO2_V_CTRL 0x20
+#define AXP15060_ALDO3_V_CTRL 0x21
+#define AXP15060_ALDO4_V_CTRL 0x22
+#define AXP15060_ALDO5_V_CTRL 0x23
+#define AXP15060_BLDO1_V_CTRL 0x24
+#define AXP15060_BLDO2_V_CTRL 0x25
+#define AXP15060_BLDO3_V_CTRL 0x26
+#define AXP15060_BLDO4_V_CTRL 0x27
+#define AXP15060_BLDO5_V_CTRL 0x28
+#define AXP15060_CLDO1_V_CTRL 0x29
+#define AXP15060_CLDO2_V_CTRL 0x2a
+#define AXP15060_CLDO3_V_CTRL 0x2b
+#define AXP15060_CLDO4_V_CTRL 0x2d
+#define AXP15060_CPUSLDO_V_CTRL 0x2e
+#define AXP15060_PWR_WAKEUP_CTRL 0x31
+#define AXP15060_PWR_DISABLE_DOWN_SEQ 0x32
+#define AXP15060_PEK_KEY 0x36
+
/* Interrupt */
#define AXP152_IRQ1_EN 0x40
#define AXP152_IRQ2_EN 0x41
@@ -138,6 +256,17 @@ enum axp20x_variants {
#define AXP152_IRQ2_STATE 0x49
#define AXP152_IRQ3_STATE 0x4a
+#define AXP192_IRQ1_EN 0x40
+#define AXP192_IRQ2_EN 0x41
+#define AXP192_IRQ3_EN 0x42
+#define AXP192_IRQ4_EN 0x43
+#define AXP192_IRQ1_STATE 0x44
+#define AXP192_IRQ2_STATE 0x45
+#define AXP192_IRQ3_STATE 0x46
+#define AXP192_IRQ4_STATE 0x47
+#define AXP192_IRQ5_EN 0x4a
+#define AXP192_IRQ5_STATE 0x4d
+
#define AXP20X_IRQ1_EN 0x40
#define AXP20X_IRQ2_EN 0x41
#define AXP20X_IRQ3_EN 0x42
@@ -151,7 +280,17 @@ enum axp20x_variants {
#define AXP20X_IRQ5_STATE 0x4c
#define AXP20X_IRQ6_STATE 0x4d
+#define AXP15060_IRQ1_EN 0x40
+#define AXP15060_IRQ2_EN 0x41
+#define AXP15060_IRQ1_STATE 0x48
+#define AXP15060_IRQ2_STATE 0x49
+
/* ADC */
+#define AXP192_GPIO2_V_ADC_H 0x68
+#define AXP192_GPIO2_V_ADC_L 0x69
+#define AXP192_GPIO3_V_ADC_H 0x6a
+#define AXP192_GPIO3_V_ADC_L 0x6b
+
#define AXP20X_ACIN_V_ADC_H 0x56
#define AXP20X_ACIN_V_ADC_L 0x57
#define AXP20X_ACIN_I_ADC_H 0x58
@@ -181,6 +320,8 @@ enum axp20x_variants {
#define AXP20X_IPSOUT_V_HIGH_L 0x7f
/* Power supply */
+#define AXP192_GPIO30_IN_RANGE 0x85
+
#define AXP20X_DCDC_MODE 0x80
#define AXP20X_ADC_EN1 0x82
#define AXP20X_ADC_EN2 0x83
@@ -209,6 +350,16 @@ enum axp20x_variants {
#define AXP152_PWM1_FREQ_Y 0x9c
#define AXP152_PWM1_DUTY_CYCLE 0x9d
+#define AXP192_GPIO0_CTRL 0x90
+#define AXP192_LDO_IO0_V_OUT 0x91
+#define AXP192_GPIO1_CTRL 0x92
+#define AXP192_GPIO2_CTRL 0x93
+#define AXP192_GPIO2_0_STATE 0x94
+#define AXP192_GPIO4_3_CTRL 0x95
+#define AXP192_GPIO4_3_STATE 0x96
+#define AXP192_GPIO2_0_PULL 0x97
+#define AXP192_N_RSTO_CTRL 0x9e
+
#define AXP20X_GPIO0_CTRL 0x90
#define AXP20X_LDO5_V_OUT 0x91
#define AXP20X_GPIO1_CTRL 0x92
@@ -221,6 +372,8 @@ enum axp20x_variants {
#define AXP22X_GPIO_STATE 0x94
#define AXP22X_GPIO_PULL_DOWN 0x95
+#define AXP15060_CLDO4_GPIO2_MODESET 0x2c
+
/* Battery */
#define AXP20X_CHRG_CC_31_24 0xb0
#define AXP20X_CHRG_CC_23_16 0xb1
@@ -262,6 +415,9 @@ enum axp20x_variants {
#define AXP288_RT_BATT_V_H 0xa0
#define AXP288_RT_BATT_V_L 0xa1
+#define AXP813_ACIN_PATH_CTRL 0x3a
+#define AXP813_ADC_RATE 0x85
+
/* Fuel Gauge */
#define AXP288_FG_RDC1_REG 0xba
#define AXP288_FG_RDC0_REG 0xbb
@@ -284,6 +440,17 @@ enum axp20x_variants {
/* Regulators IDs */
enum {
+ AXP192_DCDC1 = 0,
+ AXP192_DCDC2,
+ AXP192_DCDC3,
+ AXP192_LDO1,
+ AXP192_LDO2,
+ AXP192_LDO3,
+ AXP192_LDO_IO0,
+ AXP192_REG_ID_MAX
+};
+
+enum {
AXP20X_LDO1 = 0,
AXP20X_LDO2,
AXP20X_LDO3,
@@ -319,6 +486,38 @@ enum {
};
enum {
+ AXP313A_DCDC1 = 0,
+ AXP313A_DCDC2,
+ AXP313A_DCDC3,
+ AXP313A_ALDO1,
+ AXP313A_DLDO1,
+ AXP313A_RTC_LDO,
+ AXP313A_REG_ID_MAX,
+};
+
+enum {
+ AXP717_DCDC1 = 0,
+ AXP717_DCDC2,
+ AXP717_DCDC3,
+ AXP717_DCDC4,
+ AXP717_ALDO1,
+ AXP717_ALDO2,
+ AXP717_ALDO3,
+ AXP717_ALDO4,
+ AXP717_BLDO1,
+ AXP717_BLDO2,
+ AXP717_BLDO3,
+ AXP717_BLDO4,
+ AXP717_CLDO1,
+ AXP717_CLDO2,
+ AXP717_CLDO3,
+ AXP717_CLDO4,
+ AXP717_CPUSLDO,
+ AXP717_BOOST,
+ AXP717_REG_ID_MAX,
+};
+
+enum {
AXP806_DCDCA = 0,
AXP806_DCDCB,
AXP806_DCDCC,
@@ -387,6 +586,61 @@ enum {
AXP803_REG_ID_MAX,
};
+enum {
+ AXP813_DCDC1 = 0,
+ AXP813_DCDC2,
+ AXP813_DCDC3,
+ AXP813_DCDC4,
+ AXP813_DCDC5,
+ AXP813_DCDC6,
+ AXP813_DCDC7,
+ AXP813_ALDO1,
+ AXP813_ALDO2,
+ AXP813_ALDO3,
+ AXP813_DLDO1,
+ AXP813_DLDO2,
+ AXP813_DLDO3,
+ AXP813_DLDO4,
+ AXP813_ELDO1,
+ AXP813_ELDO2,
+ AXP813_ELDO3,
+ AXP813_FLDO1,
+ AXP813_FLDO2,
+ AXP813_FLDO3,
+ AXP813_RTC_LDO,
+ AXP813_LDO_IO0,
+ AXP813_LDO_IO1,
+ AXP813_SW,
+ AXP813_REG_ID_MAX,
+};
+
+enum {
+ AXP15060_DCDC1 = 0,
+ AXP15060_DCDC2,
+ AXP15060_DCDC3,
+ AXP15060_DCDC4,
+ AXP15060_DCDC5,
+ AXP15060_DCDC6,
+ AXP15060_ALDO1,
+ AXP15060_ALDO2,
+ AXP15060_ALDO3,
+ AXP15060_ALDO4,
+ AXP15060_ALDO5,
+ AXP15060_BLDO1,
+ AXP15060_BLDO2,
+ AXP15060_BLDO3,
+ AXP15060_BLDO4,
+ AXP15060_BLDO5,
+ AXP15060_CLDO1,
+ AXP15060_CLDO2,
+ AXP15060_CLDO3,
+ AXP15060_CLDO4,
+ AXP15060_CPUSLDO,
+ AXP15060_SW,
+ AXP15060_RTC_LDO,
+ AXP15060_REG_ID_MAX,
+};
+
/* IRQs */
enum {
AXP152_IRQ_LDO0IN_CONNECT = 1,
@@ -400,14 +654,51 @@ enum {
AXP152_IRQ_PEK_SHORT,
AXP152_IRQ_PEK_LONG,
AXP152_IRQ_TIMER,
- AXP152_IRQ_PEK_RIS_EDGE,
+ /* out of bit order to make sure the press event is handled first */
AXP152_IRQ_PEK_FAL_EDGE,
+ AXP152_IRQ_PEK_RIS_EDGE,
AXP152_IRQ_GPIO3_INPUT,
AXP152_IRQ_GPIO2_INPUT,
AXP152_IRQ_GPIO1_INPUT,
AXP152_IRQ_GPIO0_INPUT,
};
+enum axp192_irqs {
+ AXP192_IRQ_ACIN_OVER_V = 1,
+ AXP192_IRQ_ACIN_PLUGIN,
+ AXP192_IRQ_ACIN_REMOVAL,
+ AXP192_IRQ_VBUS_OVER_V,
+ AXP192_IRQ_VBUS_PLUGIN,
+ AXP192_IRQ_VBUS_REMOVAL,
+ AXP192_IRQ_VBUS_V_LOW,
+ AXP192_IRQ_BATT_PLUGIN,
+ AXP192_IRQ_BATT_REMOVAL,
+ AXP192_IRQ_BATT_ENT_ACT_MODE,
+ AXP192_IRQ_BATT_EXIT_ACT_MODE,
+ AXP192_IRQ_CHARG,
+ AXP192_IRQ_CHARG_DONE,
+ AXP192_IRQ_BATT_TEMP_HIGH,
+ AXP192_IRQ_BATT_TEMP_LOW,
+ AXP192_IRQ_DIE_TEMP_HIGH,
+ AXP192_IRQ_CHARG_I_LOW,
+ AXP192_IRQ_DCDC1_V_LONG,
+ AXP192_IRQ_DCDC2_V_LONG,
+ AXP192_IRQ_DCDC3_V_LONG,
+ AXP192_IRQ_PEK_SHORT = 22,
+ AXP192_IRQ_PEK_LONG,
+ AXP192_IRQ_N_OE_PWR_ON,
+ AXP192_IRQ_N_OE_PWR_OFF,
+ AXP192_IRQ_VBUS_VALID,
+ AXP192_IRQ_VBUS_NOT_VALID,
+ AXP192_IRQ_VBUS_SESS_VALID,
+ AXP192_IRQ_VBUS_SESS_END,
+ AXP192_IRQ_LOW_PWR_LVL = 31,
+ AXP192_IRQ_TIMER,
+ AXP192_IRQ_GPIO2_INPUT = 37,
+ AXP192_IRQ_GPIO1_INPUT,
+ AXP192_IRQ_GPIO0_INPUT,
+};
+
enum {
AXP20X_IRQ_ACIN_OVER_V = 1,
AXP20X_IRQ_ACIN_PLUGIN,
@@ -440,8 +731,9 @@ enum {
AXP20X_IRQ_LOW_PWR_LVL1,
AXP20X_IRQ_LOW_PWR_LVL2,
AXP20X_IRQ_TIMER,
- AXP20X_IRQ_PEK_RIS_EDGE,
+ /* out of bit order to make sure the press event is handled first */
AXP20X_IRQ_PEK_FAL_EDGE,
+ AXP20X_IRQ_PEK_RIS_EDGE,
AXP20X_IRQ_GPIO3_INPUT,
AXP20X_IRQ_GPIO2_INPUT,
AXP20X_IRQ_GPIO1_INPUT,
@@ -470,8 +762,9 @@ enum axp22x_irqs {
AXP22X_IRQ_LOW_PWR_LVL1,
AXP22X_IRQ_LOW_PWR_LVL2,
AXP22X_IRQ_TIMER,
- AXP22X_IRQ_PEK_RIS_EDGE,
+ /* out of bit order to make sure the press event is handled first */
AXP22X_IRQ_PEK_FAL_EDGE,
+ AXP22X_IRQ_PEK_RIS_EDGE,
AXP22X_IRQ_GPIO1_INPUT,
AXP22X_IRQ_GPIO0_INPUT,
};
@@ -513,6 +806,50 @@ enum axp288_irqs {
AXP288_IRQ_BC_USB_CHNG,
};
+enum axp313a_irqs {
+ AXP313A_IRQ_DIE_TEMP_HIGH,
+ AXP313A_IRQ_DCDC2_V_LOW = 2,
+ AXP313A_IRQ_DCDC3_V_LOW,
+ AXP313A_IRQ_PEK_LONG,
+ AXP313A_IRQ_PEK_SHORT,
+ AXP313A_IRQ_PEK_FAL_EDGE,
+ AXP313A_IRQ_PEK_RIS_EDGE,
+};
+
+enum axp717_irqs {
+ AXP717_IRQ_VBUS_FAULT,
+ AXP717_IRQ_VBUS_OVER_V,
+ AXP717_IRQ_BOOST_OVER_V,
+ AXP717_IRQ_GAUGE_NEW_SOC = 4,
+ AXP717_IRQ_SOC_DROP_LVL1 = 6,
+ AXP717_IRQ_SOC_DROP_LVL2,
+ AXP717_IRQ_PEK_RIS_EDGE,
+ AXP717_IRQ_PEK_FAL_EDGE,
+ AXP717_IRQ_PEK_LONG,
+ AXP717_IRQ_PEK_SHORT,
+ AXP717_IRQ_BATT_REMOVAL,
+ AXP717_IRQ_BATT_PLUGIN,
+ AXP717_IRQ_VBUS_REMOVAL,
+ AXP717_IRQ_VBUS_PLUGIN,
+ AXP717_IRQ_BATT_OVER_V,
+ AXP717_IRQ_CHARG_TIMER,
+ AXP717_IRQ_DIE_TEMP_HIGH,
+ AXP717_IRQ_CHARG,
+ AXP717_IRQ_CHARG_DONE,
+ AXP717_IRQ_BATT_OVER_CURR,
+ AXP717_IRQ_LDO_OVER_CURR,
+ AXP717_IRQ_WDOG_EXPIRE,
+ AXP717_IRQ_BATT_ACT_TEMP_LOW,
+ AXP717_IRQ_BATT_ACT_TEMP_HIGH,
+ AXP717_IRQ_BATT_CHG_TEMP_LOW,
+ AXP717_IRQ_BATT_CHG_TEMP_HIGH,
+ AXP717_IRQ_BATT_QUIT_TEMP_HIGH,
+ AXP717_IRQ_BC_USB_CHNG = 30,
+ AXP717_IRQ_BC_USB_DONE,
+ AXP717_IRQ_TYPEC_PLUGIN = 37,
+ AXP717_IRQ_TYPEC_REMOVE,
+};
+
enum axp803_irqs {
AXP803_IRQ_ACIN_OVER_V = 1,
AXP803_IRQ_ACIN_PLUGIN,
@@ -539,8 +876,9 @@ enum axp803_irqs {
AXP803_IRQ_LOW_PWR_LVL1,
AXP803_IRQ_LOW_PWR_LVL2,
AXP803_IRQ_TIMER,
- AXP803_IRQ_PEK_RIS_EDGE,
+ /* out of bit order to make sure the press event is handled first */
AXP803_IRQ_PEK_FAL_EDGE,
+ AXP803_IRQ_PEK_RIS_EDGE,
AXP803_IRQ_PEK_SHORT,
AXP803_IRQ_PEK_LONG,
AXP803_IRQ_PEK_OVER_OFF,
@@ -558,11 +896,11 @@ enum axp806_irqs {
AXP806_IRQ_DCDCC_V_LOW,
AXP806_IRQ_DCDCD_V_LOW,
AXP806_IRQ_DCDCE_V_LOW,
- AXP806_IRQ_PWROK_LONG,
- AXP806_IRQ_PWROK_SHORT,
+ AXP806_IRQ_POK_LONG,
+ AXP806_IRQ_POK_SHORT,
AXP806_IRQ_WAKEUP,
- AXP806_IRQ_PWROK_FALL,
- AXP806_IRQ_PWROK_RISE,
+ AXP806_IRQ_POK_FALL,
+ AXP806_IRQ_POK_RISE,
};
enum axp809_irqs {
@@ -591,8 +929,9 @@ enum axp809_irqs {
AXP809_IRQ_LOW_PWR_LVL1,
AXP809_IRQ_LOW_PWR_LVL2,
AXP809_IRQ_TIMER,
- AXP809_IRQ_PEK_RIS_EDGE,
+ /* out of bit order to make sure the press event is handled first */
AXP809_IRQ_PEK_FAL_EDGE,
+ AXP809_IRQ_PEK_RIS_EDGE,
AXP809_IRQ_PEK_SHORT,
AXP809_IRQ_PEK_LONG,
AXP809_IRQ_PEK_OVER_OFF,
@@ -600,24 +939,36 @@ enum axp809_irqs {
AXP809_IRQ_GPIO0_INPUT,
};
+enum axp15060_irqs {
+ AXP15060_IRQ_DIE_TEMP_HIGH_LV1 = 1,
+ AXP15060_IRQ_DIE_TEMP_HIGH_LV2,
+ AXP15060_IRQ_DCDC1_V_LOW,
+ AXP15060_IRQ_DCDC2_V_LOW,
+ AXP15060_IRQ_DCDC3_V_LOW,
+ AXP15060_IRQ_DCDC4_V_LOW,
+ AXP15060_IRQ_DCDC5_V_LOW,
+ AXP15060_IRQ_DCDC6_V_LOW,
+ AXP15060_IRQ_PEK_LONG,
+ AXP15060_IRQ_PEK_SHORT,
+ AXP15060_IRQ_GPIO1_INPUT,
+ AXP15060_IRQ_PEK_FAL_EDGE,
+ AXP15060_IRQ_PEK_RIS_EDGE,
+ AXP15060_IRQ_GPIO2_INPUT,
+};
+
struct axp20x_dev {
struct device *dev;
int irq;
unsigned long irq_flags;
struct regmap *regmap;
struct regmap_irq_chip_data *regmap_irqc;
- long variant;
+ enum axp20x_variants variant;
int nr_cells;
- struct mfd_cell *cells;
+ const struct mfd_cell *cells;
const struct regmap_config *regmap_cfg;
const struct regmap_irq_chip *regmap_irq_chip;
};
-struct axp288_extcon_pdata {
- /* GPIO pin control to switch D+/D- lines b/w PMIC and SOC */
- struct gpio_desc *gpio_mux_cntl;
-};
-
/* generic helper function for reading 9-16 bit wide regs */
static inline int axp20x_read_variable_width(struct regmap *regmap,
unsigned int reg, unsigned int width)
@@ -669,6 +1020,6 @@ int axp20x_device_probe(struct axp20x_dev *axp20x);
*
* This tells the axp20x core to remove the associated mfd devices
*/
-int axp20x_device_remove(struct axp20x_dev *axp20x);
+void axp20x_device_remove(struct axp20x_dev *axp20x);
#endif /* __LINUX_MFD_AXP20X_H */
diff --git a/include/linux/mfd/bcm2835-pm.h b/include/linux/mfd/bcm2835-pm.h
new file mode 100644
index 000000000000..f70a810c55f7
--- /dev/null
+++ b/include/linux/mfd/bcm2835-pm.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+
+#ifndef BCM2835_MFD_PM_H
+#define BCM2835_MFD_PM_H
+
+#include <linux/regmap.h>
+
+struct bcm2835_pm {
+ struct device *dev;
+ void __iomem *base;
+ void __iomem *asb;
+ void __iomem *rpivid_asb;
+};
+
+#endif /* BCM2835_MFD_PM_H */
diff --git a/include/linux/mfd/bcm590xx.h b/include/linux/mfd/bcm590xx.h
index 267aedee1c7a..5a5783abd47b 100644
--- a/include/linux/mfd/bcm590xx.h
+++ b/include/linux/mfd/bcm590xx.h
@@ -1,14 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* Broadcom BCM590xx PMU
*
* Copyright 2014 Linaro Limited
* Author: Matt Porter <mporter@linaro.org>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
*/
#ifndef __LINUX_MFD_BCM590XX_H
@@ -18,6 +13,26 @@
#include <linux/i2c.h>
#include <linux/regmap.h>
+/* PMU ID register values; also used as device type */
+#define BCM590XX_PMUID_BCM59054 0x54
+#define BCM590XX_PMUID_BCM59056 0x56
+
+/* Known chip revision IDs */
+#define BCM59054_REV_DIGITAL_A1 1
+#define BCM59054_REV_ANALOG_A1 2
+
+#define BCM59056_REV_DIGITAL_A0 1
+#define BCM59056_REV_ANALOG_A0 1
+
+#define BCM59056_REV_DIGITAL_B0 2
+#define BCM59056_REV_ANALOG_B0 2
+
+/* regmap types */
+enum bcm590xx_regmap_type {
+ BCM590XX_REGMAP_PRI,
+ BCM590XX_REGMAP_SEC,
+};
+
/* max register address */
#define BCM590XX_MAX_REGISTER_PRI 0xe7
#define BCM590XX_MAX_REGISTER_SEC 0xf0
@@ -28,7 +43,13 @@ struct bcm590xx {
struct i2c_client *i2c_sec;
struct regmap *regmap_pri;
struct regmap *regmap_sec;
- unsigned int id;
+
+ /* PMU ID value; also used as device type */
+ u8 pmu_id;
+
+ /* Chip revision, read from PMUREV reg */
+ u8 rev_digital;
+ u8 rev_analog;
};
#endif /* __LINUX_MFD_BCM590XX_H */
diff --git a/include/linux/mfd/bd9571mwv.h b/include/linux/mfd/bd9571mwv.h
new file mode 100644
index 000000000000..8efd99d07c9e
--- /dev/null
+++ b/include/linux/mfd/bd9571mwv.h
@@ -0,0 +1,109 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * ROHM BD9571MWV-M and BD9574MWF-M driver
+ *
+ * Copyright (C) 2017 Marek Vasut <marek.vasut+renesas@gmail.com>
+ * Copyright (C) 2020 Renesas Electronics Corporation
+ *
+ * Based on the TPS65086 driver
+ */
+
+#ifndef __LINUX_MFD_BD9571MWV_H
+#define __LINUX_MFD_BD9571MWV_H
+
+#include <linux/device.h>
+#include <linux/regmap.h>
+
+/* List of registers for BD9571MWV and BD9574MWF */
+#define BD9571MWV_VENDOR_CODE 0x00
+#define BD9571MWV_VENDOR_CODE_VAL 0xdb
+#define BD9571MWV_PRODUCT_CODE 0x01
+#define BD9571MWV_PRODUCT_CODE_BD9571MWV 0x60
+#define BD9571MWV_PRODUCT_CODE_BD9574MWF 0x74
+#define BD9571MWV_PRODUCT_REVISION 0x02
+
+#define BD9571MWV_I2C_FUSA_MODE 0x10
+#define BD9571MWV_I2C_MD2_E1_BIT_1 0x11
+#define BD9571MWV_I2C_MD2_E1_BIT_2 0x12
+
+#define BD9571MWV_BKUP_MODE_CNT 0x20
+#define BD9571MWV_BKUP_MODE_CNT_KEEPON_MASK GENMASK(3, 0)
+#define BD9571MWV_BKUP_MODE_CNT_KEEPON_DDR0 BIT(0)
+#define BD9571MWV_BKUP_MODE_CNT_KEEPON_DDR1 BIT(1)
+#define BD9571MWV_BKUP_MODE_CNT_KEEPON_DDR0C BIT(2)
+#define BD9571MWV_BKUP_MODE_CNT_KEEPON_DDR1C BIT(3)
+#define BD9571MWV_BKUP_MODE_STATUS 0x21
+#define BD9571MWV_BKUP_RECOVERY_CNT 0x22
+#define BD9571MWV_BKUP_CTRL_TIM_CNT 0x23
+#define BD9571MWV_WAITBKUP_WDT_CNT 0x24
+#define BD9571MWV_128H_TIM_CNT 0x26
+#define BD9571MWV_QLLM_CNT 0x27
+
+#define BD9571MWV_AVS_SET_MONI 0x31
+#define BD9571MWV_AVS_SET_MONI_MASK 0x3
+#define BD9571MWV_AVS_VD09_VID(n) (0x32 + (n))
+#define BD9571MWV_AVS_DVFS_VID(n) (0x36 + (n))
+
+#define BD9571MWV_VD18_VID 0x42
+#define BD9571MWV_VD25_VID 0x43
+#define BD9571MWV_VD33_VID 0x44
+
+#define BD9571MWV_DVFS_VINIT 0x50
+#define BD9574MWF_VD09_VINIT 0x51
+#define BD9571MWV_DVFS_SETVMAX 0x52
+#define BD9571MWV_DVFS_BOOSTVID 0x53
+#define BD9571MWV_DVFS_SETVID 0x54
+#define BD9571MWV_DVFS_MONIVDAC 0x55
+#define BD9571MWV_DVFS_PGD_CNT 0x56
+
+#define BD9571MWV_GPIO_DIR 0x60
+#define BD9571MWV_GPIO_OUT 0x61
+#define BD9571MWV_GPIO_IN 0x62
+#define BD9571MWV_GPIO_DEB 0x63
+#define BD9571MWV_GPIO_INT_SET 0x64
+#define BD9571MWV_GPIO_INT 0x65
+#define BD9571MWV_GPIO_INTMASK 0x66
+#define BD9574MWF_GPIO_MUX 0x67
+
+#define BD9571MWV_REG_KEEP(n) (0x70 + (n))
+
+#define BD9571MWV_PMIC_INTERNAL_STATUS 0x80
+#define BD9571MWV_PROT_ERROR_STATUS0 0x81
+#define BD9571MWV_PROT_ERROR_STATUS1 0x82
+#define BD9571MWV_PROT_ERROR_STATUS2 0x83
+#define BD9571MWV_PROT_ERROR_STATUS3 0x84
+#define BD9571MWV_PROT_ERROR_STATUS4 0x85
+#define BD9574MWF_PROT_ERROR_STATUS5 0x86
+#define BD9574MWF_SYSTEM_ERROR_STATUS 0x87
+
+#define BD9571MWV_INT_INTREQ 0x90
+#define BD9571MWV_INT_INTREQ_MD1_INT BIT(0)
+#define BD9571MWV_INT_INTREQ_MD2_E1_INT BIT(1)
+#define BD9571MWV_INT_INTREQ_MD2_E2_INT BIT(2)
+#define BD9571MWV_INT_INTREQ_PROT_ERR_INT BIT(3)
+#define BD9571MWV_INT_INTREQ_GP_INT BIT(4)
+#define BD9571MWV_INT_INTREQ_128H_OF_INT BIT(5)
+#define BD9571MWV_INT_INTREQ_WDT_OF_INT BIT(6)
+#define BD9571MWV_INT_INTREQ_BKUP_TRG_INT BIT(7)
+#define BD9571MWV_INT_INTMASK 0x91
+
+#define BD9574MWF_SSCG_CNT 0xA0
+#define BD9574MWF_POFFB_MRB 0xA1
+#define BD9574MWF_SMRB_WR_PROT 0xA2
+#define BD9574MWF_SMRB_ASSERT 0xA3
+#define BD9574MWF_SMRB_STATUS 0xA4
+
+#define BD9571MWV_ACCESS_KEY 0xff
+
+/* Define the BD9571MWV IRQ numbers */
+enum bd9571mwv_irqs {
+ BD9571MWV_IRQ_MD1,
+ BD9571MWV_IRQ_MD2_E1,
+ BD9571MWV_IRQ_MD2_E2,
+ BD9571MWV_IRQ_PROT_ERR,
+ BD9571MWV_IRQ_GP,
+ BD9571MWV_IRQ_128H_OF, /* BKUP_HOLD on BD9574MWF */
+ BD9571MWV_IRQ_WDT_OF,
+ BD9571MWV_IRQ_BKUP_TRG,
+};
+#endif /* __LINUX_MFD_BD9571MWV_H */
diff --git a/include/linux/mfd/bq257xx.h b/include/linux/mfd/bq257xx.h
new file mode 100644
index 000000000000..1d6ddc7fb09f
--- /dev/null
+++ b/include/linux/mfd/bq257xx.h
@@ -0,0 +1,104 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Register definitions for TI BQ257XX
+ * Copyright (C) 2020 Texas Instruments Incorporated - http://www.ti.com/
+ */
+
+#define BQ25703_CHARGE_OPTION_0 0x00
+#define BQ25703_CHARGE_CURRENT 0x02
+#define BQ25703_MAX_CHARGE_VOLT 0x04
+#define BQ25703_OTG_VOLT 0x06
+#define BQ25703_OTG_CURRENT 0x08
+#define BQ25703_INPUT_VOLTAGE 0x0a
+#define BQ25703_MIN_VSYS 0x0c
+#define BQ25703_IIN_HOST 0x0e
+#define BQ25703_CHARGER_STATUS 0x20
+#define BQ25703_PROCHOT_STATUS 0x22
+#define BQ25703_IIN_DPM 0x24
+#define BQ25703_ADCIBAT_CHG 0x28
+#define BQ25703_ADCIINCMPIN 0x2a
+#define BQ25703_ADCVSYSVBAT 0x2c
+#define BQ25703_MANUFACT_DEV_ID 0x2e
+#define BQ25703_CHARGE_OPTION_1 0x30
+#define BQ25703_CHARGE_OPTION_2 0x32
+#define BQ25703_CHARGE_OPTION_3 0x34
+#define BQ25703_ADC_OPTION 0x3a
+
+#define BQ25703_EN_LWPWR BIT(15)
+#define BQ25703_WDTMR_ADJ_MASK GENMASK(14, 13)
+#define BQ25703_WDTMR_DISABLE 0
+#define BQ25703_WDTMR_5_SEC 1
+#define BQ25703_WDTMR_88_SEC 2
+#define BQ25703_WDTMR_175_SEC 3
+
+#define BQ25703_ICHG_MASK GENMASK(12, 6)
+#define BQ25703_ICHG_STEP_UA 64000
+#define BQ25703_ICHG_MIN_UA 64000
+#define BQ25703_ICHG_MAX_UA 8128000
+
+#define BQ25703_MAX_CHARGE_VOLT_MASK GENMASK(15, 4)
+#define BQ25703_VBATREG_STEP_UV 16000
+#define BQ25703_VBATREG_MIN_UV 1024000
+#define BQ25703_VBATREG_MAX_UV 19200000
+
+#define BQ25703_OTG_VOLT_MASK GENMASK(13, 6)
+#define BQ25703_OTG_VOLT_STEP_UV 64000
+#define BQ25703_OTG_VOLT_MIN_UV 4480000
+#define BQ25703_OTG_VOLT_MAX_UV 20800000
+#define BQ25703_OTG_VOLT_NUM_VOLT 256
+
+#define BQ25703_OTG_CUR_MASK GENMASK(14, 8)
+#define BQ25703_OTG_CUR_STEP_UA 50000
+#define BQ25703_OTG_CUR_MAX_UA 6350000
+
+#define BQ25703_MINVSYS_MASK GENMASK(13, 8)
+#define BQ25703_MINVSYS_STEP_UV 256000
+#define BQ25703_MINVSYS_MIN_UV 1024000
+#define BQ25703_MINVSYS_MAX_UV 16128000
+
+#define BQ25703_STS_AC_STAT BIT(15)
+#define BQ25703_STS_IN_FCHRG BIT(10)
+#define BQ25703_STS_IN_PCHRG BIT(9)
+#define BQ25703_STS_FAULT_ACOV BIT(7)
+#define BQ25703_STS_FAULT_BATOC BIT(6)
+#define BQ25703_STS_FAULT_ACOC BIT(5)
+
+#define BQ25703_IINDPM_MASK GENMASK(14, 8)
+#define BQ25703_IINDPM_STEP_UA 50000
+#define BQ25703_IINDPM_MIN_UA 50000
+#define BQ25703_IINDPM_MAX_UA 6400000
+#define BQ25703_IINDPM_DEFAULT_UA 3300000
+#define BQ25703_IINDPM_OFFSET_UA 50000
+
+#define BQ25703_ADCIBAT_DISCHG_MASK GENMASK(6, 0)
+#define BQ25703_ADCIBAT_CHG_MASK GENMASK(14, 8)
+#define BQ25703_ADCIBAT_CHG_STEP_UA 64000
+#define BQ25703_ADCIBAT_DIS_STEP_UA 256000
+
+#define BQ25703_ADCIIN GENMASK(15, 8)
+#define BQ25703_ADCIINCMPIN_STEP 50000
+
+#define BQ25703_ADCVSYS_MASK GENMASK(15, 8)
+#define BQ25703_ADCVBAT_MASK GENMASK(7, 0)
+#define BQ25703_ADCVSYSVBAT_OFFSET_UV 2880000
+#define BQ25703_ADCVSYSVBAT_STEP 64000
+
+#define BQ25703_ADC_CH_MASK GENMASK(7, 0)
+#define BQ25703_ADC_CONV_EN BIT(15)
+#define BQ25703_ADC_START BIT(14)
+#define BQ25703_ADC_FULL_SCALE BIT(13)
+#define BQ25703_ADC_CMPIN_EN BIT(7)
+#define BQ25703_ADC_VBUS_EN BIT(6)
+#define BQ25703_ADC_PSYS_EN BIT(5)
+#define BQ25703_ADC_IIN_EN BIT(4)
+#define BQ25703_ADC_IDCHG_EN BIT(3)
+#define BQ25703_ADC_ICHG_EN BIT(2)
+#define BQ25703_ADC_VSYS_EN BIT(1)
+#define BQ25703_ADC_VBAT_EN BIT(0)
+
+#define BQ25703_EN_OTG_MASK BIT(12)
+
+struct bq257xx_device {
+ struct i2c_client *client;
+ struct regmap *regmap;
+};
diff --git a/include/linux/mfd/cgbc.h b/include/linux/mfd/cgbc.h
new file mode 100644
index 000000000000..badbec4c7033
--- /dev/null
+++ b/include/linux/mfd/cgbc.h
@@ -0,0 +1,44 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Congatec Board Controller driver definitions
+ *
+ * Copyright (C) 2024 Bootlin
+ * Author: Thomas Richard <thomas.richard@bootlin.com>
+ */
+
+#ifndef _LINUX_MFD_CGBC_H_
+
+/**
+ * struct cgbc_version - Board Controller device version structure
+ * @feature: Board Controller feature number
+ * @major: Board Controller major revision
+ * @minor: Board Controller minor revision
+ */
+struct cgbc_version {
+ unsigned char feature;
+ unsigned char major;
+ unsigned char minor;
+};
+
+/**
+ * struct cgbc_device_data - Internal representation of the Board Controller device
+ * @io_session: Pointer to the session IO memory
+ * @io_cmd: Pointer to the command IO memory
+ * @session: Session id returned by the Board Controller
+ * @dev: Pointer to kernel device structure
+ * @cgbc_version: Board Controller version structure
+ * @mutex: Board Controller mutex
+ */
+struct cgbc_device_data {
+ void __iomem *io_session;
+ void __iomem *io_cmd;
+ u8 session;
+ struct device *dev;
+ struct cgbc_version version;
+ struct mutex lock;
+};
+
+int cgbc_command(struct cgbc_device_data *cgbc, void *cmd, unsigned int cmd_size,
+ void *data, unsigned int data_size, u8 *status);
+
+#endif /*_LINUX_MFD_CGBC_H_*/
diff --git a/include/linux/mfd/core.h b/include/linux/mfd/core.h
index 99c0395fe1f9..faeea7abd688 100644
--- a/include/linux/mfd/core.h
+++ b/include/linux/mfd/core.h
@@ -1,14 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* drivers/mfd/mfd-core.h
*
* core MFD support
* Copyright (c) 2006 Ian Molton
* Copyright (c) 2007 Dmitry Baryshkov
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
*/
#ifndef MFD_CORE_H
@@ -16,8 +12,45 @@
#include <linux/platform_device.h>
+#define MFD_RES_SIZE(arr) (sizeof(arr) / sizeof(struct resource))
+
+#define MFD_CELL_ALL(_name, _res, _pdata, _pdsize, _id, _compat, _of_reg, _use_of_reg, _match) \
+ { \
+ .name = (_name), \
+ .resources = (_res), \
+ .num_resources = MFD_RES_SIZE((_res)), \
+ .platform_data = (_pdata), \
+ .pdata_size = (_pdsize), \
+ .of_compatible = (_compat), \
+ .of_reg = (_of_reg), \
+ .use_of_reg = (_use_of_reg), \
+ .acpi_match = (_match), \
+ .id = (_id), \
+ }
+
+#define MFD_CELL_OF_REG(_name, _res, _pdata, _pdsize, _id, _compat, _of_reg) \
+ MFD_CELL_ALL(_name, _res, _pdata, _pdsize, _id, _compat, _of_reg, true, NULL)
+
+#define MFD_CELL_OF(_name, _res, _pdata, _pdsize, _id, _compat) \
+ MFD_CELL_ALL(_name, _res, _pdata, _pdsize, _id, _compat, 0, false, NULL)
+
+#define MFD_CELL_ACPI(_name, _res, _pdata, _pdsize, _id, _match) \
+ MFD_CELL_ALL(_name, _res, _pdata, _pdsize, _id, NULL, 0, false, _match)
+
+#define MFD_CELL_BASIC(_name, _res, _pdata, _pdsize, _id) \
+ MFD_CELL_ALL(_name, _res, _pdata, _pdsize, _id, NULL, 0, false, NULL)
+
+#define MFD_CELL_RES(_name, _res) \
+ MFD_CELL_ALL(_name, _res, NULL, 0, 0, NULL, 0, false, NULL)
+
+#define MFD_CELL_NAME(_name) \
+ MFD_CELL_ALL(_name, NULL, NULL, 0, 0, NULL, 0, false, NULL)
+
+#define MFD_DEP_LEVEL_NORMAL 0
+#define MFD_DEP_LEVEL_HIGH 1
+
struct irq_domain;
-struct property_entry;
+struct software_node;
/* Matches ACPI PNP id, either _HID or _CID, or ACPI _ADR */
struct mfd_cell_acpi_match {
@@ -33,30 +66,36 @@ struct mfd_cell_acpi_match {
struct mfd_cell {
const char *name;
int id;
-
- /* refcounting for multiple drivers to use a single cell */
- atomic_t *usage_count;
- int (*enable)(struct platform_device *dev);
- int (*disable)(struct platform_device *dev);
+ int level;
int (*suspend)(struct platform_device *dev);
int (*resume)(struct platform_device *dev);
/* platform data passed to the sub devices drivers */
- void *platform_data;
+ const void *platform_data;
size_t pdata_size;
- /* device properties passed to the sub devices drivers */
- struct property_entry *properties;
+ /* Matches ACPI */
+ const struct mfd_cell_acpi_match *acpi_match;
+
+ /* Software node for the device. */
+ const struct software_node *swnode;
/*
* Device Tree compatible string
- * See: Documentation/devicetree/usage-model.txt Chapter 2.2 for details
+ * See: Documentation/devicetree/usage-model.rst Chapter 2.2 for details
*/
const char *of_compatible;
- /* Matches ACPI */
- const struct mfd_cell_acpi_match *acpi_match;
+ /*
+ * Address as defined in Device Tree. Used to complement 'of_compatible'
+ * (above) when matching OF nodes with devices that have identical
+ * compatible strings
+ */
+ u64 of_reg;
+
+ /* Set to 'true' to use 'of_reg' (above) - allows for of_reg=0 */
+ bool use_of_reg;
/*
* These resources can be specified relative to the parent device.
@@ -77,38 +116,11 @@ struct mfd_cell {
/* A list of regulator supplies that should be mapped to the MFD
* device rather than the child device when requested
*/
- const char * const *parent_supplies;
int num_parent_supplies;
+ const char * const *parent_supplies;
};
/*
- * Convenience functions for clients using shared cells. Refcounting
- * happens automatically, with the cell's enable/disable callbacks
- * being called only when a device is first being enabled or no other
- * clients are making use of it.
- */
-extern int mfd_cell_enable(struct platform_device *pdev);
-extern int mfd_cell_disable(struct platform_device *pdev);
-
-/*
- * "Clone" multiple platform devices for a single cell. This is to be used
- * for devices that have multiple users of a cell. For example, if an mfd
- * driver wants the cell "foo" to be used by a GPIO driver, an MTD driver,
- * and a platform driver, the following bit of code would be use after first
- * calling mfd_add_devices():
- *
- * const char *fclones[] = { "foo-gpio", "foo-mtd" };
- * err = mfd_clone_cells("foo", fclones, ARRAY_SIZE(fclones));
- *
- * Each driver (MTD, GPIO, and platform driver) would then register
- * platform_drivers for "foo-mtd", "foo-gpio", and "foo", respectively.
- * The cell's .enable/.disable hooks should be used to deal with hardware
- * resource contention.
- */
-extern int mfd_clone_cell(const char *cell, const char **clones,
- size_t n_clones);
-
-/*
* Given a platform device that's been created by mfd_add_devices(), fetch
* the mfd_cell that created it.
*/
@@ -130,6 +142,7 @@ static inline int mfd_add_hotplug_devices(struct device *parent,
}
extern void mfd_remove_devices(struct device *parent);
+extern void mfd_remove_devices_late(struct device *parent);
extern int devm_mfd_add_devices(struct device *dev, int id,
const struct mfd_cell *cells, int n_devs,
diff --git a/include/linux/mfd/cros_ec.h b/include/linux/mfd/cros_ec.h
deleted file mode 100644
index 4e887ba22635..000000000000
--- a/include/linux/mfd/cros_ec.h
+++ /dev/null
@@ -1,343 +0,0 @@
-/*
- * ChromeOS EC multi-function device
- *
- * Copyright (C) 2012 Google, Inc
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#ifndef __LINUX_MFD_CROS_EC_H
-#define __LINUX_MFD_CROS_EC_H
-
-#include <linux/cdev.h>
-#include <linux/device.h>
-#include <linux/notifier.h>
-#include <linux/mfd/cros_ec_commands.h>
-#include <linux/mutex.h>
-
-#define CROS_EC_DEV_NAME "cros_ec"
-#define CROS_EC_DEV_PD_NAME "cros_pd"
-
-/*
- * The EC is unresponsive for a time after a reboot command. Add a
- * simple delay to make sure that the bus stays locked.
- */
-#define EC_REBOOT_DELAY_MS 50
-
-/*
- * Max bus-specific overhead incurred by request/responses.
- * I2C requires 1 additional byte for requests.
- * I2C requires 2 additional bytes for responses.
- * SPI requires up to 32 additional bytes for responses.
- * */
-#define EC_PROTO_VERSION_UNKNOWN 0
-#define EC_MAX_REQUEST_OVERHEAD 1
-#define EC_MAX_RESPONSE_OVERHEAD 32
-
-/*
- * Command interface between EC and AP, for LPC, I2C and SPI interfaces.
- */
-enum {
- EC_MSG_TX_HEADER_BYTES = 3,
- EC_MSG_TX_TRAILER_BYTES = 1,
- EC_MSG_TX_PROTO_BYTES = EC_MSG_TX_HEADER_BYTES +
- EC_MSG_TX_TRAILER_BYTES,
- EC_MSG_RX_PROTO_BYTES = 3,
-
- /* Max length of messages for proto 2*/
- EC_PROTO2_MSG_BYTES = EC_PROTO2_MAX_PARAM_SIZE +
- EC_MSG_TX_PROTO_BYTES,
-
- EC_MAX_MSG_BYTES = 64 * 1024,
-};
-
-/*
- * @version: Command version number (often 0)
- * @command: Command to send (EC_CMD_...)
- * @outsize: Outgoing length in bytes
- * @insize: Max number of bytes to accept from EC
- * @result: EC's response to the command (separate from communication failure)
- * @data: Where to put the incoming data from EC and outgoing data to EC
- */
-struct cros_ec_command {
- uint32_t version;
- uint32_t command;
- uint32_t outsize;
- uint32_t insize;
- uint32_t result;
- uint8_t data[0];
-};
-
-/**
- * struct cros_ec_device - Information about a ChromeOS EC device
- *
- * @phys_name: name of physical comms layer (e.g. 'i2c-4')
- * @dev: Device pointer for physical comms device
- * @was_wake_device: true if this device was set to wake the system from
- * sleep at the last suspend
- * @cmd_readmem: direct read of the EC memory-mapped region, if supported
- * @offset is within EC_LPC_ADDR_MEMMAP region.
- * @bytes: number of bytes to read. zero means "read a string" (including
- * the trailing '\0'). At most only EC_MEMMAP_SIZE bytes can be read.
- * Caller must ensure that the buffer is large enough for the result when
- * reading a string.
- *
- * @priv: Private data
- * @irq: Interrupt to use
- * @id: Device id
- * @din: input buffer (for data from EC)
- * @dout: output buffer (for data to EC)
- * \note
- * These two buffers will always be dword-aligned and include enough
- * space for up to 7 word-alignment bytes also, so we can ensure that
- * the body of the message is always dword-aligned (64-bit).
- * We use this alignment to keep ARM and x86 happy. Probably word
- * alignment would be OK, there might be a small performance advantage
- * to using dword.
- * @din_size: size of din buffer to allocate (zero to use static din)
- * @dout_size: size of dout buffer to allocate (zero to use static dout)
- * @wake_enabled: true if this device can wake the system from sleep
- * @suspended: true if this device had been suspended
- * @cmd_xfer: send command to EC and get response
- * Returns the number of bytes received if the communication succeeded, but
- * that doesn't mean the EC was happy with the command. The caller
- * should check msg.result for the EC's result code.
- * @pkt_xfer: send packet to EC and get response
- * @lock: one transaction at a time
- * @mkbp_event_supported: true if this EC supports the MKBP event protocol.
- * @event_notifier: interrupt event notifier for transport devices.
- * @event_data: raw payload transferred with the MKBP event.
- * @event_size: size in bytes of the event data.
- */
-struct cros_ec_device {
-
- /* These are used by other drivers that want to talk to the EC */
- const char *phys_name;
- struct device *dev;
- bool was_wake_device;
- struct class *cros_class;
- int (*cmd_readmem)(struct cros_ec_device *ec, unsigned int offset,
- unsigned int bytes, void *dest);
-
- /* These are used to implement the platform-specific interface */
- u16 max_request;
- u16 max_response;
- u16 max_passthru;
- u16 proto_version;
- void *priv;
- int irq;
- u8 *din;
- u8 *dout;
- int din_size;
- int dout_size;
- bool wake_enabled;
- bool suspended;
- int (*cmd_xfer)(struct cros_ec_device *ec,
- struct cros_ec_command *msg);
- int (*pkt_xfer)(struct cros_ec_device *ec,
- struct cros_ec_command *msg);
- struct mutex lock;
- bool mkbp_event_supported;
- struct blocking_notifier_head event_notifier;
-
- struct ec_response_get_next_event event_data;
- int event_size;
- u32 host_event_wake_mask;
-};
-
-/**
- * struct cros_ec_sensor_platform - ChromeOS EC sensor platform information
- *
- * @sensor_num: Id of the sensor, as reported by the EC.
- */
-struct cros_ec_sensor_platform {
- u8 sensor_num;
-};
-
-/* struct cros_ec_platform - ChromeOS EC platform information
- *
- * @ec_name: name of EC device (e.g. 'cros-ec', 'cros-pd', ...)
- * used in /dev/ and sysfs.
- * @cmd_offset: offset to apply for each command. Set when
- * registering a devicde behind another one.
- */
-struct cros_ec_platform {
- const char *ec_name;
- u16 cmd_offset;
-};
-
-struct cros_ec_debugfs;
-
-/*
- * struct cros_ec_dev - ChromeOS EC device entry point
- *
- * @class_dev: Device structure used in sysfs
- * @cdev: Character device structure in /dev
- * @ec_dev: cros_ec_device structure to talk to the physical device
- * @dev: pointer to the platform device
- * @debug_info: cros_ec_debugfs structure for debugging information
- * @cmd_offset: offset to apply for each command.
- */
-struct cros_ec_dev {
- struct device class_dev;
- struct cdev cdev;
- struct cros_ec_device *ec_dev;
- struct device *dev;
- struct cros_ec_debugfs *debug_info;
- u16 cmd_offset;
- u32 features[2];
-};
-
-/**
- * cros_ec_suspend - Handle a suspend operation for the ChromeOS EC device
- *
- * This can be called by drivers to handle a suspend event.
- *
- * ec_dev: Device to suspend
- * @return 0 if ok, -ve on error
- */
-int cros_ec_suspend(struct cros_ec_device *ec_dev);
-
-/**
- * cros_ec_resume - Handle a resume operation for the ChromeOS EC device
- *
- * This can be called by drivers to handle a resume event.
- *
- * @ec_dev: Device to resume
- * @return 0 if ok, -ve on error
- */
-int cros_ec_resume(struct cros_ec_device *ec_dev);
-
-/**
- * cros_ec_prepare_tx - Prepare an outgoing message in the output buffer
- *
- * This is intended to be used by all ChromeOS EC drivers, but at present
- * only SPI uses it. Once LPC uses the same protocol it can start using it.
- * I2C could use it now, with a refactor of the existing code.
- *
- * @ec_dev: Device to register
- * @msg: Message to write
- */
-int cros_ec_prepare_tx(struct cros_ec_device *ec_dev,
- struct cros_ec_command *msg);
-
-/**
- * cros_ec_check_result - Check ec_msg->result
- *
- * This is used by ChromeOS EC drivers to check the ec_msg->result for
- * errors and to warn about them.
- *
- * @ec_dev: EC device
- * @msg: Message to check
- */
-int cros_ec_check_result(struct cros_ec_device *ec_dev,
- struct cros_ec_command *msg);
-
-/**
- * cros_ec_cmd_xfer - Send a command to the ChromeOS EC
- *
- * Call this to send a command to the ChromeOS EC. This should be used
- * instead of calling the EC's cmd_xfer() callback directly.
- *
- * @ec_dev: EC device
- * @msg: Message to write
- */
-int cros_ec_cmd_xfer(struct cros_ec_device *ec_dev,
- struct cros_ec_command *msg);
-
-/**
- * cros_ec_cmd_xfer_status - Send a command to the ChromeOS EC
- *
- * This function is identical to cros_ec_cmd_xfer, except it returns success
- * status only if both the command was transmitted successfully and the EC
- * replied with success status. It's not necessary to check msg->result when
- * using this function.
- *
- * @ec_dev: EC device
- * @msg: Message to write
- * @return: Num. of bytes transferred on success, <0 on failure
- */
-int cros_ec_cmd_xfer_status(struct cros_ec_device *ec_dev,
- struct cros_ec_command *msg);
-
-/**
- * cros_ec_remove - Remove a ChromeOS EC
- *
- * Call this to deregister a ChromeOS EC, then clean up any private data.
- *
- * @ec_dev: Device to register
- * @return 0 if ok, -ve on error
- */
-int cros_ec_remove(struct cros_ec_device *ec_dev);
-
-/**
- * cros_ec_register - Register a new ChromeOS EC, using the provided info
- *
- * Before calling this, allocate a pointer to a new device and then fill
- * in all the fields up to the --private-- marker.
- *
- * @ec_dev: Device to register
- * @return 0 if ok, -ve on error
- */
-int cros_ec_register(struct cros_ec_device *ec_dev);
-
-/**
- * cros_ec_query_all - Query the protocol version supported by the ChromeOS EC
- *
- * @ec_dev: Device to register
- * @return 0 if ok, -ve on error
- */
-int cros_ec_query_all(struct cros_ec_device *ec_dev);
-
-/**
- * cros_ec_get_next_event - Fetch next event from the ChromeOS EC
- *
- * @ec_dev: Device to fetch event from
- * @wake_event: Pointer to a bool set to true upon return if the event might be
- * treated as a wake event. Ignored if null.
- *
- * Returns: 0 on success, Linux error number on failure
- */
-int cros_ec_get_next_event(struct cros_ec_device *ec_dev, bool *wake_event);
-
-/**
- * cros_ec_get_host_event - Return a mask of event set by the EC.
- *
- * When MKBP is supported, when the EC raises an interrupt,
- * We collect the events raised and call the functions in the ec notifier.
- *
- * This function is a helper to know which events are raised.
- */
-u32 cros_ec_get_host_event(struct cros_ec_device *ec_dev);
-
-/* sysfs stuff */
-extern struct attribute_group cros_ec_attr_group;
-extern struct attribute_group cros_ec_lightbar_attr_group;
-extern struct attribute_group cros_ec_vbc_attr_group;
-
-/* ACPI GPE handler */
-#ifdef CONFIG_ACPI
-
-int cros_ec_acpi_install_gpe_handler(struct device *dev);
-void cros_ec_acpi_remove_gpe_handler(void);
-void cros_ec_acpi_clear_gpe(void);
-
-#else /* CONFIG_ACPI */
-
-static inline int cros_ec_acpi_install_gpe_handler(struct device *dev)
-{
- return -ENODEV;
-}
-static inline void cros_ec_acpi_remove_gpe_handler(void) {}
-static inline void cros_ec_acpi_clear_gpe(void) {}
-
-#endif /* CONFIG_ACPI */
-
-#endif /* __LINUX_MFD_CROS_EC_H */
diff --git a/include/linux/mfd/cros_ec_commands.h b/include/linux/mfd/cros_ec_commands.h
deleted file mode 100644
index 2b16e95b9bb8..000000000000
--- a/include/linux/mfd/cros_ec_commands.h
+++ /dev/null
@@ -1,3021 +0,0 @@
-/*
- * Host communication command constants for ChromeOS EC
- *
- * Copyright (C) 2012 Google, Inc
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * The ChromeOS EC multi function device is used to mux all the requests
- * to the EC device for its multiple features: keyboard controller,
- * battery charging and regulator control, firmware update.
- *
- * NOTE: This file is copied verbatim from the ChromeOS EC Open Source
- * project in an attempt to make future updates easy to make.
- */
-
-#ifndef __CROS_EC_COMMANDS_H
-#define __CROS_EC_COMMANDS_H
-
-/*
- * Current version of this protocol
- *
- * TODO(crosbug.com/p/11223): This is effectively useless; protocol is
- * determined in other ways. Remove this once the kernel code no longer
- * depends on it.
- */
-#define EC_PROTO_VERSION 0x00000002
-
-/* Command version mask */
-#define EC_VER_MASK(version) (1UL << (version))
-
-/* I/O addresses for ACPI commands */
-#define EC_LPC_ADDR_ACPI_DATA 0x62
-#define EC_LPC_ADDR_ACPI_CMD 0x66
-
-/* I/O addresses for host command */
-#define EC_LPC_ADDR_HOST_DATA 0x200
-#define EC_LPC_ADDR_HOST_CMD 0x204
-
-/* I/O addresses for host command args and params */
-/* Protocol version 2 */
-#define EC_LPC_ADDR_HOST_ARGS 0x800 /* And 0x801, 0x802, 0x803 */
-#define EC_LPC_ADDR_HOST_PARAM 0x804 /* For version 2 params; size is
- * EC_PROTO2_MAX_PARAM_SIZE */
-/* Protocol version 3 */
-#define EC_LPC_ADDR_HOST_PACKET 0x800 /* Offset of version 3 packet */
-#define EC_LPC_HOST_PACKET_SIZE 0x100 /* Max size of version 3 packet */
-
-/* The actual block is 0x800-0x8ff, but some BIOSes think it's 0x880-0x8ff
- * and they tell the kernel that so we have to think of it as two parts. */
-#define EC_HOST_CMD_REGION0 0x800
-#define EC_HOST_CMD_REGION1 0x880
-#define EC_HOST_CMD_REGION_SIZE 0x80
-
-/* EC command register bit functions */
-#define EC_LPC_CMDR_DATA (1 << 0) /* Data ready for host to read */
-#define EC_LPC_CMDR_PENDING (1 << 1) /* Write pending to EC */
-#define EC_LPC_CMDR_BUSY (1 << 2) /* EC is busy processing a command */
-#define EC_LPC_CMDR_CMD (1 << 3) /* Last host write was a command */
-#define EC_LPC_CMDR_ACPI_BRST (1 << 4) /* Burst mode (not used) */
-#define EC_LPC_CMDR_SCI (1 << 5) /* SCI event is pending */
-#define EC_LPC_CMDR_SMI (1 << 6) /* SMI event is pending */
-
-#define EC_LPC_ADDR_MEMMAP 0x900
-#define EC_MEMMAP_SIZE 255 /* ACPI IO buffer max is 255 bytes */
-#define EC_MEMMAP_TEXT_MAX 8 /* Size of a string in the memory map */
-
-/* The offset address of each type of data in mapped memory. */
-#define EC_MEMMAP_TEMP_SENSOR 0x00 /* Temp sensors 0x00 - 0x0f */
-#define EC_MEMMAP_FAN 0x10 /* Fan speeds 0x10 - 0x17 */
-#define EC_MEMMAP_TEMP_SENSOR_B 0x18 /* More temp sensors 0x18 - 0x1f */
-#define EC_MEMMAP_ID 0x20 /* 0x20 == 'E', 0x21 == 'C' */
-#define EC_MEMMAP_ID_VERSION 0x22 /* Version of data in 0x20 - 0x2f */
-#define EC_MEMMAP_THERMAL_VERSION 0x23 /* Version of data in 0x00 - 0x1f */
-#define EC_MEMMAP_BATTERY_VERSION 0x24 /* Version of data in 0x40 - 0x7f */
-#define EC_MEMMAP_SWITCHES_VERSION 0x25 /* Version of data in 0x30 - 0x33 */
-#define EC_MEMMAP_EVENTS_VERSION 0x26 /* Version of data in 0x34 - 0x3f */
-#define EC_MEMMAP_HOST_CMD_FLAGS 0x27 /* Host cmd interface flags (8 bits) */
-/* Unused 0x28 - 0x2f */
-#define EC_MEMMAP_SWITCHES 0x30 /* 8 bits */
-/* Unused 0x31 - 0x33 */
-#define EC_MEMMAP_HOST_EVENTS 0x34 /* 32 bits */
-/* Reserve 0x38 - 0x3f for additional host event-related stuff */
-/* Battery values are all 32 bits */
-#define EC_MEMMAP_BATT_VOLT 0x40 /* Battery Present Voltage */
-#define EC_MEMMAP_BATT_RATE 0x44 /* Battery Present Rate */
-#define EC_MEMMAP_BATT_CAP 0x48 /* Battery Remaining Capacity */
-#define EC_MEMMAP_BATT_FLAG 0x4c /* Battery State, defined below */
-#define EC_MEMMAP_BATT_DCAP 0x50 /* Battery Design Capacity */
-#define EC_MEMMAP_BATT_DVLT 0x54 /* Battery Design Voltage */
-#define EC_MEMMAP_BATT_LFCC 0x58 /* Battery Last Full Charge Capacity */
-#define EC_MEMMAP_BATT_CCNT 0x5c /* Battery Cycle Count */
-/* Strings are all 8 bytes (EC_MEMMAP_TEXT_MAX) */
-#define EC_MEMMAP_BATT_MFGR 0x60 /* Battery Manufacturer String */
-#define EC_MEMMAP_BATT_MODEL 0x68 /* Battery Model Number String */
-#define EC_MEMMAP_BATT_SERIAL 0x70 /* Battery Serial Number String */
-#define EC_MEMMAP_BATT_TYPE 0x78 /* Battery Type String */
-#define EC_MEMMAP_ALS 0x80 /* ALS readings in lux (2 X 16 bits) */
-/* Unused 0x84 - 0x8f */
-#define EC_MEMMAP_ACC_STATUS 0x90 /* Accelerometer status (8 bits )*/
-/* Unused 0x91 */
-#define EC_MEMMAP_ACC_DATA 0x92 /* Accelerometer data 0x92 - 0x9f */
-#define EC_MEMMAP_GYRO_DATA 0xa0 /* Gyroscope data 0xa0 - 0xa5 */
-/* Unused 0xa6 - 0xfe (remember, 0xff is NOT part of the memmap region) */
-
-
-/* Define the format of the accelerometer mapped memory status byte. */
-#define EC_MEMMAP_ACC_STATUS_SAMPLE_ID_MASK 0x0f
-#define EC_MEMMAP_ACC_STATUS_BUSY_BIT (1 << 4)
-#define EC_MEMMAP_ACC_STATUS_PRESENCE_BIT (1 << 7)
-
-/* Number of temp sensors at EC_MEMMAP_TEMP_SENSOR */
-#define EC_TEMP_SENSOR_ENTRIES 16
-/*
- * Number of temp sensors at EC_MEMMAP_TEMP_SENSOR_B.
- *
- * Valid only if EC_MEMMAP_THERMAL_VERSION returns >= 2.
- */
-#define EC_TEMP_SENSOR_B_ENTRIES 8
-
-/* Special values for mapped temperature sensors */
-#define EC_TEMP_SENSOR_NOT_PRESENT 0xff
-#define EC_TEMP_SENSOR_ERROR 0xfe
-#define EC_TEMP_SENSOR_NOT_POWERED 0xfd
-#define EC_TEMP_SENSOR_NOT_CALIBRATED 0xfc
-/*
- * The offset of temperature value stored in mapped memory. This allows
- * reporting a temperature range of 200K to 454K = -73C to 181C.
- */
-#define EC_TEMP_SENSOR_OFFSET 200
-
-/*
- * Number of ALS readings at EC_MEMMAP_ALS
- */
-#define EC_ALS_ENTRIES 2
-
-/*
- * The default value a temperature sensor will return when it is present but
- * has not been read this boot. This is a reasonable number to avoid
- * triggering alarms on the host.
- */
-#define EC_TEMP_SENSOR_DEFAULT (296 - EC_TEMP_SENSOR_OFFSET)
-
-#define EC_FAN_SPEED_ENTRIES 4 /* Number of fans at EC_MEMMAP_FAN */
-#define EC_FAN_SPEED_NOT_PRESENT 0xffff /* Entry not present */
-#define EC_FAN_SPEED_STALLED 0xfffe /* Fan stalled */
-
-/* Battery bit flags at EC_MEMMAP_BATT_FLAG. */
-#define EC_BATT_FLAG_AC_PRESENT 0x01
-#define EC_BATT_FLAG_BATT_PRESENT 0x02
-#define EC_BATT_FLAG_DISCHARGING 0x04
-#define EC_BATT_FLAG_CHARGING 0x08
-#define EC_BATT_FLAG_LEVEL_CRITICAL 0x10
-
-/* Switch flags at EC_MEMMAP_SWITCHES */
-#define EC_SWITCH_LID_OPEN 0x01
-#define EC_SWITCH_POWER_BUTTON_PRESSED 0x02
-#define EC_SWITCH_WRITE_PROTECT_DISABLED 0x04
-/* Was recovery requested via keyboard; now unused. */
-#define EC_SWITCH_IGNORE1 0x08
-/* Recovery requested via dedicated signal (from servo board) */
-#define EC_SWITCH_DEDICATED_RECOVERY 0x10
-/* Was fake developer mode switch; now unused. Remove in next refactor. */
-#define EC_SWITCH_IGNORE0 0x20
-
-/* Host command interface flags */
-/* Host command interface supports LPC args (LPC interface only) */
-#define EC_HOST_CMD_FLAG_LPC_ARGS_SUPPORTED 0x01
-/* Host command interface supports version 3 protocol */
-#define EC_HOST_CMD_FLAG_VERSION_3 0x02
-
-/* Wireless switch flags */
-#define EC_WIRELESS_SWITCH_ALL ~0x00 /* All flags */
-#define EC_WIRELESS_SWITCH_WLAN 0x01 /* WLAN radio */
-#define EC_WIRELESS_SWITCH_BLUETOOTH 0x02 /* Bluetooth radio */
-#define EC_WIRELESS_SWITCH_WWAN 0x04 /* WWAN power */
-#define EC_WIRELESS_SWITCH_WLAN_POWER 0x08 /* WLAN power */
-
-/*
- * This header file is used in coreboot both in C and ACPI code. The ACPI code
- * is pre-processed to handle constants but the ASL compiler is unable to
- * handle actual C code so keep it separate.
- */
-#ifndef __ACPI__
-
-/*
- * Define __packed if someone hasn't beat us to it. Linux kernel style
- * checking prefers __packed over __attribute__((packed)).
- */
-#ifndef __packed
-#define __packed __attribute__((packed))
-#endif
-
-/* LPC command status byte masks */
-/* EC has written a byte in the data register and host hasn't read it yet */
-#define EC_LPC_STATUS_TO_HOST 0x01
-/* Host has written a command/data byte and the EC hasn't read it yet */
-#define EC_LPC_STATUS_FROM_HOST 0x02
-/* EC is processing a command */
-#define EC_LPC_STATUS_PROCESSING 0x04
-/* Last write to EC was a command, not data */
-#define EC_LPC_STATUS_LAST_CMD 0x08
-/* EC is in burst mode. Unsupported by Chrome EC, so this bit is never set */
-#define EC_LPC_STATUS_BURST_MODE 0x10
-/* SCI event is pending (requesting SCI query) */
-#define EC_LPC_STATUS_SCI_PENDING 0x20
-/* SMI event is pending (requesting SMI query) */
-#define EC_LPC_STATUS_SMI_PENDING 0x40
-/* (reserved) */
-#define EC_LPC_STATUS_RESERVED 0x80
-
-/*
- * EC is busy. This covers both the EC processing a command, and the host has
- * written a new command but the EC hasn't picked it up yet.
- */
-#define EC_LPC_STATUS_BUSY_MASK \
- (EC_LPC_STATUS_FROM_HOST | EC_LPC_STATUS_PROCESSING)
-
-/* Host command response codes */
-enum ec_status {
- EC_RES_SUCCESS = 0,
- EC_RES_INVALID_COMMAND = 1,
- EC_RES_ERROR = 2,
- EC_RES_INVALID_PARAM = 3,
- EC_RES_ACCESS_DENIED = 4,
- EC_RES_INVALID_RESPONSE = 5,
- EC_RES_INVALID_VERSION = 6,
- EC_RES_INVALID_CHECKSUM = 7,
- EC_RES_IN_PROGRESS = 8, /* Accepted, command in progress */
- EC_RES_UNAVAILABLE = 9, /* No response available */
- EC_RES_TIMEOUT = 10, /* We got a timeout */
- EC_RES_OVERFLOW = 11, /* Table / data overflow */
- EC_RES_INVALID_HEADER = 12, /* Header contains invalid data */
- EC_RES_REQUEST_TRUNCATED = 13, /* Didn't get the entire request */
- EC_RES_RESPONSE_TOO_BIG = 14 /* Response was too big to handle */
-};
-
-/*
- * Host event codes. Note these are 1-based, not 0-based, because ACPI query
- * EC command uses code 0 to mean "no event pending". We explicitly specify
- * each value in the enum listing so they won't change if we delete/insert an
- * item or rearrange the list (it needs to be stable across platforms, not
- * just within a single compiled instance).
- */
-enum host_event_code {
- EC_HOST_EVENT_LID_CLOSED = 1,
- EC_HOST_EVENT_LID_OPEN = 2,
- EC_HOST_EVENT_POWER_BUTTON = 3,
- EC_HOST_EVENT_AC_CONNECTED = 4,
- EC_HOST_EVENT_AC_DISCONNECTED = 5,
- EC_HOST_EVENT_BATTERY_LOW = 6,
- EC_HOST_EVENT_BATTERY_CRITICAL = 7,
- EC_HOST_EVENT_BATTERY = 8,
- EC_HOST_EVENT_THERMAL_THRESHOLD = 9,
- EC_HOST_EVENT_THERMAL_OVERLOAD = 10,
- EC_HOST_EVENT_THERMAL = 11,
- EC_HOST_EVENT_USB_CHARGER = 12,
- EC_HOST_EVENT_KEY_PRESSED = 13,
- /*
- * EC has finished initializing the host interface. The host can check
- * for this event following sending a EC_CMD_REBOOT_EC command to
- * determine when the EC is ready to accept subsequent commands.
- */
- EC_HOST_EVENT_INTERFACE_READY = 14,
- /* Keyboard recovery combo has been pressed */
- EC_HOST_EVENT_KEYBOARD_RECOVERY = 15,
-
- /* Shutdown due to thermal overload */
- EC_HOST_EVENT_THERMAL_SHUTDOWN = 16,
- /* Shutdown due to battery level too low */
- EC_HOST_EVENT_BATTERY_SHUTDOWN = 17,
-
- /* Suggest that the AP throttle itself */
- EC_HOST_EVENT_THROTTLE_START = 18,
- /* Suggest that the AP resume normal speed */
- EC_HOST_EVENT_THROTTLE_STOP = 19,
-
- /* Hang detect logic detected a hang and host event timeout expired */
- EC_HOST_EVENT_HANG_DETECT = 20,
- /* Hang detect logic detected a hang and warm rebooted the AP */
- EC_HOST_EVENT_HANG_REBOOT = 21,
- /* PD MCU triggering host event */
- EC_HOST_EVENT_PD_MCU = 22,
-
- /* EC desires to change state of host-controlled USB mux */
- EC_HOST_EVENT_USB_MUX = 28,
-
- /*
- * The high bit of the event mask is not used as a host event code. If
- * it reads back as set, then the entire event mask should be
- * considered invalid by the host. This can happen when reading the
- * raw event status via EC_MEMMAP_HOST_EVENTS but the LPC interface is
- * not initialized on the EC, or improperly configured on the host.
- */
- EC_HOST_EVENT_INVALID = 32
-};
-/* Host event mask */
-#define EC_HOST_EVENT_MASK(event_code) (1UL << ((event_code) - 1))
-
-/* Arguments at EC_LPC_ADDR_HOST_ARGS */
-struct ec_lpc_host_args {
- uint8_t flags;
- uint8_t command_version;
- uint8_t data_size;
- /*
- * Checksum; sum of command + flags + command_version + data_size +
- * all params/response data bytes.
- */
- uint8_t checksum;
-} __packed;
-
-/* Flags for ec_lpc_host_args.flags */
-/*
- * Args are from host. Data area at EC_LPC_ADDR_HOST_PARAM contains command
- * params.
- *
- * If EC gets a command and this flag is not set, this is an old-style command.
- * Command version is 0 and params from host are at EC_LPC_ADDR_OLD_PARAM with
- * unknown length. EC must respond with an old-style response (that is,
- * withouth setting EC_HOST_ARGS_FLAG_TO_HOST).
- */
-#define EC_HOST_ARGS_FLAG_FROM_HOST 0x01
-/*
- * Args are from EC. Data area at EC_LPC_ADDR_HOST_PARAM contains response.
- *
- * If EC responds to a command and this flag is not set, this is an old-style
- * response. Command version is 0 and response data from EC is at
- * EC_LPC_ADDR_OLD_PARAM with unknown length.
- */
-#define EC_HOST_ARGS_FLAG_TO_HOST 0x02
-
-/*****************************************************************************/
-/*
- * Byte codes returned by EC over SPI interface.
- *
- * These can be used by the AP to debug the EC interface, and to determine
- * when the EC is not in a state where it will ever get around to responding
- * to the AP.
- *
- * Example of sequence of bytes read from EC for a current good transfer:
- * 1. - - AP asserts chip select (CS#)
- * 2. EC_SPI_OLD_READY - AP sends first byte(s) of request
- * 3. - - EC starts handling CS# interrupt
- * 4. EC_SPI_RECEIVING - AP sends remaining byte(s) of request
- * 5. EC_SPI_PROCESSING - EC starts processing request; AP is clocking in
- * bytes looking for EC_SPI_FRAME_START
- * 6. - - EC finishes processing and sets up response
- * 7. EC_SPI_FRAME_START - AP reads frame byte
- * 8. (response packet) - AP reads response packet
- * 9. EC_SPI_PAST_END - Any additional bytes read by AP
- * 10 - - AP deasserts chip select
- * 11 - - EC processes CS# interrupt and sets up DMA for
- * next request
- *
- * If the AP is waiting for EC_SPI_FRAME_START and sees any value other than
- * the following byte values:
- * EC_SPI_OLD_READY
- * EC_SPI_RX_READY
- * EC_SPI_RECEIVING
- * EC_SPI_PROCESSING
- *
- * Then the EC found an error in the request, or was not ready for the request
- * and lost data. The AP should give up waiting for EC_SPI_FRAME_START,
- * because the EC is unable to tell when the AP is done sending its request.
- */
-
-/*
- * Framing byte which precedes a response packet from the EC. After sending a
- * request, the AP will clock in bytes until it sees the framing byte, then
- * clock in the response packet.
- */
-#define EC_SPI_FRAME_START 0xec
-
-/*
- * Padding bytes which are clocked out after the end of a response packet.
- */
-#define EC_SPI_PAST_END 0xed
-
-/*
- * EC is ready to receive, and has ignored the byte sent by the AP. EC expects
- * that the AP will send a valid packet header (starting with
- * EC_COMMAND_PROTOCOL_3) in the next 32 bytes.
- */
-#define EC_SPI_RX_READY 0xf8
-
-/*
- * EC has started receiving the request from the AP, but hasn't started
- * processing it yet.
- */
-#define EC_SPI_RECEIVING 0xf9
-
-/* EC has received the entire request from the AP and is processing it. */
-#define EC_SPI_PROCESSING 0xfa
-
-/*
- * EC received bad data from the AP, such as a packet header with an invalid
- * length. EC will ignore all data until chip select deasserts.
- */
-#define EC_SPI_RX_BAD_DATA 0xfb
-
-/*
- * EC received data from the AP before it was ready. That is, the AP asserted
- * chip select and started clocking data before the EC was ready to receive it.
- * EC will ignore all data until chip select deasserts.
- */
-#define EC_SPI_NOT_READY 0xfc
-
-/*
- * EC was ready to receive a request from the AP. EC has treated the byte sent
- * by the AP as part of a request packet, or (for old-style ECs) is processing
- * a fully received packet but is not ready to respond yet.
- */
-#define EC_SPI_OLD_READY 0xfd
-
-/*****************************************************************************/
-
-/*
- * Protocol version 2 for I2C and SPI send a request this way:
- *
- * 0 EC_CMD_VERSION0 + (command version)
- * 1 Command number
- * 2 Length of params = N
- * 3..N+2 Params, if any
- * N+3 8-bit checksum of bytes 0..N+2
- *
- * The corresponding response is:
- *
- * 0 Result code (EC_RES_*)
- * 1 Length of params = M
- * 2..M+1 Params, if any
- * M+2 8-bit checksum of bytes 0..M+1
- */
-#define EC_PROTO2_REQUEST_HEADER_BYTES 3
-#define EC_PROTO2_REQUEST_TRAILER_BYTES 1
-#define EC_PROTO2_REQUEST_OVERHEAD (EC_PROTO2_REQUEST_HEADER_BYTES + \
- EC_PROTO2_REQUEST_TRAILER_BYTES)
-
-#define EC_PROTO2_RESPONSE_HEADER_BYTES 2
-#define EC_PROTO2_RESPONSE_TRAILER_BYTES 1
-#define EC_PROTO2_RESPONSE_OVERHEAD (EC_PROTO2_RESPONSE_HEADER_BYTES + \
- EC_PROTO2_RESPONSE_TRAILER_BYTES)
-
-/* Parameter length was limited by the LPC interface */
-#define EC_PROTO2_MAX_PARAM_SIZE 0xfc
-
-/* Maximum request and response packet sizes for protocol version 2 */
-#define EC_PROTO2_MAX_REQUEST_SIZE (EC_PROTO2_REQUEST_OVERHEAD + \
- EC_PROTO2_MAX_PARAM_SIZE)
-#define EC_PROTO2_MAX_RESPONSE_SIZE (EC_PROTO2_RESPONSE_OVERHEAD + \
- EC_PROTO2_MAX_PARAM_SIZE)
-
-/*****************************************************************************/
-
-/*
- * Value written to legacy command port / prefix byte to indicate protocol
- * 3+ structs are being used. Usage is bus-dependent.
- */
-#define EC_COMMAND_PROTOCOL_3 0xda
-
-#define EC_HOST_REQUEST_VERSION 3
-
-/* Version 3 request from host */
-struct ec_host_request {
- /* Struct version (=3)
- *
- * EC will return EC_RES_INVALID_HEADER if it receives a header with a
- * version it doesn't know how to parse.
- */
- uint8_t struct_version;
-
- /*
- * Checksum of request and data; sum of all bytes including checksum
- * should total to 0.
- */
- uint8_t checksum;
-
- /* Command code */
- uint16_t command;
-
- /* Command version */
- uint8_t command_version;
-
- /* Unused byte in current protocol version; set to 0 */
- uint8_t reserved;
-
- /* Length of data which follows this header */
- uint16_t data_len;
-} __packed;
-
-#define EC_HOST_RESPONSE_VERSION 3
-
-/* Version 3 response from EC */
-struct ec_host_response {
- /* Struct version (=3) */
- uint8_t struct_version;
-
- /*
- * Checksum of response and data; sum of all bytes including checksum
- * should total to 0.
- */
- uint8_t checksum;
-
- /* Result code (EC_RES_*) */
- uint16_t result;
-
- /* Length of data which follows this header */
- uint16_t data_len;
-
- /* Unused bytes in current protocol version; set to 0 */
- uint16_t reserved;
-} __packed;
-
-/*****************************************************************************/
-/*
- * Notes on commands:
- *
- * Each command is an 16-bit command value. Commands which take params or
- * return response data specify structs for that data. If no struct is
- * specified, the command does not input or output data, respectively.
- * Parameter/response length is implicit in the structs. Some underlying
- * communication protocols (I2C, SPI) may add length or checksum headers, but
- * those are implementation-dependent and not defined here.
- */
-
-/*****************************************************************************/
-/* General / test commands */
-
-/*
- * Get protocol version, used to deal with non-backward compatible protocol
- * changes.
- */
-#define EC_CMD_PROTO_VERSION 0x00
-
-struct ec_response_proto_version {
- uint32_t version;
-} __packed;
-
-/*
- * Hello. This is a simple command to test the EC is responsive to
- * commands.
- */
-#define EC_CMD_HELLO 0x01
-
-struct ec_params_hello {
- uint32_t in_data; /* Pass anything here */
-} __packed;
-
-struct ec_response_hello {
- uint32_t out_data; /* Output will be in_data + 0x01020304 */
-} __packed;
-
-/* Get version number */
-#define EC_CMD_GET_VERSION 0x02
-
-enum ec_current_image {
- EC_IMAGE_UNKNOWN = 0,
- EC_IMAGE_RO,
- EC_IMAGE_RW
-};
-
-struct ec_response_get_version {
- /* Null-terminated version strings for RO, RW */
- char version_string_ro[32];
- char version_string_rw[32];
- char reserved[32]; /* Was previously RW-B string */
- uint32_t current_image; /* One of ec_current_image */
-} __packed;
-
-/* Read test */
-#define EC_CMD_READ_TEST 0x03
-
-struct ec_params_read_test {
- uint32_t offset; /* Starting value for read buffer */
- uint32_t size; /* Size to read in bytes */
-} __packed;
-
-struct ec_response_read_test {
- uint32_t data[32];
-} __packed;
-
-/*
- * Get build information
- *
- * Response is null-terminated string.
- */
-#define EC_CMD_GET_BUILD_INFO 0x04
-
-/* Get chip info */
-#define EC_CMD_GET_CHIP_INFO 0x05
-
-struct ec_response_get_chip_info {
- /* Null-terminated strings */
- char vendor[32];
- char name[32];
- char revision[32]; /* Mask version */
-} __packed;
-
-/* Get board HW version */
-#define EC_CMD_GET_BOARD_VERSION 0x06
-
-struct ec_response_board_version {
- uint16_t board_version; /* A monotonously incrementing number. */
-} __packed;
-
-/*
- * Read memory-mapped data.
- *
- * This is an alternate interface to memory-mapped data for bus protocols
- * which don't support direct-mapped memory - I2C, SPI, etc.
- *
- * Response is params.size bytes of data.
- */
-#define EC_CMD_READ_MEMMAP 0x07
-
-struct ec_params_read_memmap {
- uint8_t offset; /* Offset in memmap (EC_MEMMAP_*) */
- uint8_t size; /* Size to read in bytes */
-} __packed;
-
-/* Read versions supported for a command */
-#define EC_CMD_GET_CMD_VERSIONS 0x08
-
-struct ec_params_get_cmd_versions {
- uint8_t cmd; /* Command to check */
-} __packed;
-
-struct ec_params_get_cmd_versions_v1 {
- uint16_t cmd; /* Command to check */
-} __packed;
-
-struct ec_response_get_cmd_versions {
- /*
- * Mask of supported versions; use EC_VER_MASK() to compare with a
- * desired version.
- */
- uint32_t version_mask;
-} __packed;
-
-/*
- * Check EC communcations status (busy). This is needed on i2c/spi but not
- * on lpc since it has its own out-of-band busy indicator.
- *
- * lpc must read the status from the command register. Attempting this on
- * lpc will overwrite the args/parameter space and corrupt its data.
- */
-#define EC_CMD_GET_COMMS_STATUS 0x09
-
-/* Avoid using ec_status which is for return values */
-enum ec_comms_status {
- EC_COMMS_STATUS_PROCESSING = 1 << 0, /* Processing cmd */
-};
-
-struct ec_response_get_comms_status {
- uint32_t flags; /* Mask of enum ec_comms_status */
-} __packed;
-
-/* Fake a variety of responses, purely for testing purposes. */
-#define EC_CMD_TEST_PROTOCOL 0x0a
-
-/* Tell the EC what to send back to us. */
-struct ec_params_test_protocol {
- uint32_t ec_result;
- uint32_t ret_len;
- uint8_t buf[32];
-} __packed;
-
-/* Here it comes... */
-struct ec_response_test_protocol {
- uint8_t buf[32];
-} __packed;
-
-/* Get prococol information */
-#define EC_CMD_GET_PROTOCOL_INFO 0x0b
-
-/* Flags for ec_response_get_protocol_info.flags */
-/* EC_RES_IN_PROGRESS may be returned if a command is slow */
-#define EC_PROTOCOL_INFO_IN_PROGRESS_SUPPORTED (1 << 0)
-
-struct ec_response_get_protocol_info {
- /* Fields which exist if at least protocol version 3 supported */
-
- /* Bitmask of protocol versions supported (1 << n means version n)*/
- uint32_t protocol_versions;
-
- /* Maximum request packet size, in bytes */
- uint16_t max_request_packet_size;
-
- /* Maximum response packet size, in bytes */
- uint16_t max_response_packet_size;
-
- /* Flags; see EC_PROTOCOL_INFO_* */
- uint32_t flags;
-} __packed;
-
-
-/*****************************************************************************/
-/* Get/Set miscellaneous values */
-
-/* The upper byte of .flags tells what to do (nothing means "get") */
-#define EC_GSV_SET 0x80000000
-
-/* The lower three bytes of .flags identifies the parameter, if that has
- meaning for an individual command. */
-#define EC_GSV_PARAM_MASK 0x00ffffff
-
-struct ec_params_get_set_value {
- uint32_t flags;
- uint32_t value;
-} __packed;
-
-struct ec_response_get_set_value {
- uint32_t flags;
- uint32_t value;
-} __packed;
-
-/* More than one command can use these structs to get/set paramters. */
-#define EC_CMD_GSV_PAUSE_IN_S5 0x0c
-
-/*****************************************************************************/
-/* List the features supported by the firmware */
-#define EC_CMD_GET_FEATURES 0x0d
-
-/* Supported features */
-enum ec_feature_code {
- /*
- * This image contains a limited set of features. Another image
- * in RW partition may support more features.
- */
- EC_FEATURE_LIMITED = 0,
- /*
- * Commands for probing/reading/writing/erasing the flash in the
- * EC are present.
- */
- EC_FEATURE_FLASH = 1,
- /*
- * Can control the fan speed directly.
- */
- EC_FEATURE_PWM_FAN = 2,
- /*
- * Can control the intensity of the keyboard backlight.
- */
- EC_FEATURE_PWM_KEYB = 3,
- /*
- * Support Google lightbar, introduced on Pixel.
- */
- EC_FEATURE_LIGHTBAR = 4,
- /* Control of LEDs */
- EC_FEATURE_LED = 5,
- /* Exposes an interface to control gyro and sensors.
- * The host goes through the EC to access these sensors.
- * In addition, the EC may provide composite sensors, like lid angle.
- */
- EC_FEATURE_MOTION_SENSE = 6,
- /* The keyboard is controlled by the EC */
- EC_FEATURE_KEYB = 7,
- /* The AP can use part of the EC flash as persistent storage. */
- EC_FEATURE_PSTORE = 8,
- /* The EC monitors BIOS port 80h, and can return POST codes. */
- EC_FEATURE_PORT80 = 9,
- /*
- * Thermal management: include TMP specific commands.
- * Higher level than direct fan control.
- */
- EC_FEATURE_THERMAL = 10,
- /* Can switch the screen backlight on/off */
- EC_FEATURE_BKLIGHT_SWITCH = 11,
- /* Can switch the wifi module on/off */
- EC_FEATURE_WIFI_SWITCH = 12,
- /* Monitor host events, through for example SMI or SCI */
- EC_FEATURE_HOST_EVENTS = 13,
- /* The EC exposes GPIO commands to control/monitor connected devices. */
- EC_FEATURE_GPIO = 14,
- /* The EC can send i2c messages to downstream devices. */
- EC_FEATURE_I2C = 15,
- /* Command to control charger are included */
- EC_FEATURE_CHARGER = 16,
- /* Simple battery support. */
- EC_FEATURE_BATTERY = 17,
- /*
- * Support Smart battery protocol
- * (Common Smart Battery System Interface Specification)
- */
- EC_FEATURE_SMART_BATTERY = 18,
- /* EC can dectect when the host hangs. */
- EC_FEATURE_HANG_DETECT = 19,
- /* Report power information, for pit only */
- EC_FEATURE_PMU = 20,
- /* Another Cros EC device is present downstream of this one */
- EC_FEATURE_SUB_MCU = 21,
- /* Support USB Power delivery (PD) commands */
- EC_FEATURE_USB_PD = 22,
- /* Control USB multiplexer, for audio through USB port for instance. */
- EC_FEATURE_USB_MUX = 23,
- /* Motion Sensor code has an internal software FIFO */
- EC_FEATURE_MOTION_SENSE_FIFO = 24,
-};
-
-#define EC_FEATURE_MASK_0(event_code) (1UL << (event_code % 32))
-#define EC_FEATURE_MASK_1(event_code) (1UL << (event_code - 32))
-struct ec_response_get_features {
- uint32_t flags[2];
-} __packed;
-
-/*****************************************************************************/
-/* Flash commands */
-
-/* Get flash info */
-#define EC_CMD_FLASH_INFO 0x10
-
-/* Version 0 returns these fields */
-struct ec_response_flash_info {
- /* Usable flash size, in bytes */
- uint32_t flash_size;
- /*
- * Write block size. Write offset and size must be a multiple
- * of this.
- */
- uint32_t write_block_size;
- /*
- * Erase block size. Erase offset and size must be a multiple
- * of this.
- */
- uint32_t erase_block_size;
- /*
- * Protection block size. Protection offset and size must be a
- * multiple of this.
- */
- uint32_t protect_block_size;
-} __packed;
-
-/* Flags for version 1+ flash info command */
-/* EC flash erases bits to 0 instead of 1 */
-#define EC_FLASH_INFO_ERASE_TO_0 (1 << 0)
-
-/*
- * Version 1 returns the same initial fields as version 0, with additional
- * fields following.
- *
- * gcc anonymous structs don't seem to get along with the __packed directive;
- * if they did we'd define the version 0 struct as a sub-struct of this one.
- */
-struct ec_response_flash_info_1 {
- /* Version 0 fields; see above for description */
- uint32_t flash_size;
- uint32_t write_block_size;
- uint32_t erase_block_size;
- uint32_t protect_block_size;
-
- /* Version 1 adds these fields: */
- /*
- * Ideal write size in bytes. Writes will be fastest if size is
- * exactly this and offset is a multiple of this. For example, an EC
- * may have a write buffer which can do half-page operations if data is
- * aligned, and a slower word-at-a-time write mode.
- */
- uint32_t write_ideal_size;
-
- /* Flags; see EC_FLASH_INFO_* */
- uint32_t flags;
-} __packed;
-
-/*
- * Read flash
- *
- * Response is params.size bytes of data.
- */
-#define EC_CMD_FLASH_READ 0x11
-
-struct ec_params_flash_read {
- uint32_t offset; /* Byte offset to read */
- uint32_t size; /* Size to read in bytes */
-} __packed;
-
-/* Write flash */
-#define EC_CMD_FLASH_WRITE 0x12
-#define EC_VER_FLASH_WRITE 1
-
-/* Version 0 of the flash command supported only 64 bytes of data */
-#define EC_FLASH_WRITE_VER0_SIZE 64
-
-struct ec_params_flash_write {
- uint32_t offset; /* Byte offset to write */
- uint32_t size; /* Size to write in bytes */
- /* Followed by data to write */
-} __packed;
-
-/* Erase flash */
-#define EC_CMD_FLASH_ERASE 0x13
-
-struct ec_params_flash_erase {
- uint32_t offset; /* Byte offset to erase */
- uint32_t size; /* Size to erase in bytes */
-} __packed;
-
-/*
- * Get/set flash protection.
- *
- * If mask!=0, sets/clear the requested bits of flags. Depending on the
- * firmware write protect GPIO, not all flags will take effect immediately;
- * some flags require a subsequent hard reset to take effect. Check the
- * returned flags bits to see what actually happened.
- *
- * If mask=0, simply returns the current flags state.
- */
-#define EC_CMD_FLASH_PROTECT 0x15
-#define EC_VER_FLASH_PROTECT 1 /* Command version 1 */
-
-/* Flags for flash protection */
-/* RO flash code protected when the EC boots */
-#define EC_FLASH_PROTECT_RO_AT_BOOT (1 << 0)
-/*
- * RO flash code protected now. If this bit is set, at-boot status cannot
- * be changed.
- */
-#define EC_FLASH_PROTECT_RO_NOW (1 << 1)
-/* Entire flash code protected now, until reboot. */
-#define EC_FLASH_PROTECT_ALL_NOW (1 << 2)
-/* Flash write protect GPIO is asserted now */
-#define EC_FLASH_PROTECT_GPIO_ASSERTED (1 << 3)
-/* Error - at least one bank of flash is stuck locked, and cannot be unlocked */
-#define EC_FLASH_PROTECT_ERROR_STUCK (1 << 4)
-/*
- * Error - flash protection is in inconsistent state. At least one bank of
- * flash which should be protected is not protected. Usually fixed by
- * re-requesting the desired flags, or by a hard reset if that fails.
- */
-#define EC_FLASH_PROTECT_ERROR_INCONSISTENT (1 << 5)
-/* Entile flash code protected when the EC boots */
-#define EC_FLASH_PROTECT_ALL_AT_BOOT (1 << 6)
-
-struct ec_params_flash_protect {
- uint32_t mask; /* Bits in flags to apply */
- uint32_t flags; /* New flags to apply */
-} __packed;
-
-struct ec_response_flash_protect {
- /* Current value of flash protect flags */
- uint32_t flags;
- /*
- * Flags which are valid on this platform. This allows the caller
- * to distinguish between flags which aren't set vs. flags which can't
- * be set on this platform.
- */
- uint32_t valid_flags;
- /* Flags which can be changed given the current protection state */
- uint32_t writable_flags;
-} __packed;
-
-/*
- * Note: commands 0x14 - 0x19 version 0 were old commands to get/set flash
- * write protect. These commands may be reused with version > 0.
- */
-
-/* Get the region offset/size */
-#define EC_CMD_FLASH_REGION_INFO 0x16
-#define EC_VER_FLASH_REGION_INFO 1
-
-enum ec_flash_region {
- /* Region which holds read-only EC image */
- EC_FLASH_REGION_RO = 0,
- /* Region which holds rewritable EC image */
- EC_FLASH_REGION_RW,
- /*
- * Region which should be write-protected in the factory (a superset of
- * EC_FLASH_REGION_RO)
- */
- EC_FLASH_REGION_WP_RO,
- /* Number of regions */
- EC_FLASH_REGION_COUNT,
-};
-
-struct ec_params_flash_region_info {
- uint32_t region; /* enum ec_flash_region */
-} __packed;
-
-struct ec_response_flash_region_info {
- uint32_t offset;
- uint32_t size;
-} __packed;
-
-/* Read/write VbNvContext */
-#define EC_CMD_VBNV_CONTEXT 0x17
-#define EC_VER_VBNV_CONTEXT 1
-#define EC_VBNV_BLOCK_SIZE 16
-
-enum ec_vbnvcontext_op {
- EC_VBNV_CONTEXT_OP_READ,
- EC_VBNV_CONTEXT_OP_WRITE,
-};
-
-struct ec_params_vbnvcontext {
- uint32_t op;
- uint8_t block[EC_VBNV_BLOCK_SIZE];
-} __packed;
-
-struct ec_response_vbnvcontext {
- uint8_t block[EC_VBNV_BLOCK_SIZE];
-} __packed;
-
-/*****************************************************************************/
-/* PWM commands */
-
-/* Get fan target RPM */
-#define EC_CMD_PWM_GET_FAN_TARGET_RPM 0x20
-
-struct ec_response_pwm_get_fan_rpm {
- uint32_t rpm;
-} __packed;
-
-/* Set target fan RPM */
-#define EC_CMD_PWM_SET_FAN_TARGET_RPM 0x21
-
-struct ec_params_pwm_set_fan_target_rpm {
- uint32_t rpm;
-} __packed;
-
-/* Get keyboard backlight */
-#define EC_CMD_PWM_GET_KEYBOARD_BACKLIGHT 0x22
-
-struct ec_response_pwm_get_keyboard_backlight {
- uint8_t percent;
- uint8_t enabled;
-} __packed;
-
-/* Set keyboard backlight */
-#define EC_CMD_PWM_SET_KEYBOARD_BACKLIGHT 0x23
-
-struct ec_params_pwm_set_keyboard_backlight {
- uint8_t percent;
-} __packed;
-
-/* Set target fan PWM duty cycle */
-#define EC_CMD_PWM_SET_FAN_DUTY 0x24
-
-struct ec_params_pwm_set_fan_duty {
- uint32_t percent;
-} __packed;
-
-#define EC_CMD_PWM_SET_DUTY 0x25
-/* 16 bit duty cycle, 0xffff = 100% */
-#define EC_PWM_MAX_DUTY 0xffff
-
-enum ec_pwm_type {
- /* All types, indexed by board-specific enum pwm_channel */
- EC_PWM_TYPE_GENERIC = 0,
- /* Keyboard backlight */
- EC_PWM_TYPE_KB_LIGHT,
- /* Display backlight */
- EC_PWM_TYPE_DISPLAY_LIGHT,
- EC_PWM_TYPE_COUNT,
-};
-
-struct ec_params_pwm_set_duty {
- uint16_t duty; /* Duty cycle, EC_PWM_MAX_DUTY = 100% */
- uint8_t pwm_type; /* ec_pwm_type */
- uint8_t index; /* Type-specific index, or 0 if unique */
-} __packed;
-
-#define EC_CMD_PWM_GET_DUTY 0x26
-
-struct ec_params_pwm_get_duty {
- uint8_t pwm_type; /* ec_pwm_type */
- uint8_t index; /* Type-specific index, or 0 if unique */
-} __packed;
-
-struct ec_response_pwm_get_duty {
- uint16_t duty; /* Duty cycle, EC_PWM_MAX_DUTY = 100% */
-} __packed;
-
-/*****************************************************************************/
-/*
- * Lightbar commands. This looks worse than it is. Since we only use one HOST
- * command to say "talk to the lightbar", we put the "and tell it to do X" part
- * into a subcommand. We'll make separate structs for subcommands with
- * different input args, so that we know how much to expect.
- */
-#define EC_CMD_LIGHTBAR_CMD 0x28
-
-struct rgb_s {
- uint8_t r, g, b;
-};
-
-#define LB_BATTERY_LEVELS 4
-/* List of tweakable parameters. NOTE: It's __packed so it can be sent in a
- * host command, but the alignment is the same regardless. Keep it that way.
- */
-struct lightbar_params_v0 {
- /* Timing */
- int32_t google_ramp_up;
- int32_t google_ramp_down;
- int32_t s3s0_ramp_up;
- int32_t s0_tick_delay[2]; /* AC=0/1 */
- int32_t s0a_tick_delay[2]; /* AC=0/1 */
- int32_t s0s3_ramp_down;
- int32_t s3_sleep_for;
- int32_t s3_ramp_up;
- int32_t s3_ramp_down;
-
- /* Oscillation */
- uint8_t new_s0;
- uint8_t osc_min[2]; /* AC=0/1 */
- uint8_t osc_max[2]; /* AC=0/1 */
- uint8_t w_ofs[2]; /* AC=0/1 */
-
- /* Brightness limits based on the backlight and AC. */
- uint8_t bright_bl_off_fixed[2]; /* AC=0/1 */
- uint8_t bright_bl_on_min[2]; /* AC=0/1 */
- uint8_t bright_bl_on_max[2]; /* AC=0/1 */
-
- /* Battery level thresholds */
- uint8_t battery_threshold[LB_BATTERY_LEVELS - 1];
-
- /* Map [AC][battery_level] to color index */
- uint8_t s0_idx[2][LB_BATTERY_LEVELS]; /* AP is running */
- uint8_t s3_idx[2][LB_BATTERY_LEVELS]; /* AP is sleeping */
-
- /* Color palette */
- struct rgb_s color[8]; /* 0-3 are Google colors */
-} __packed;
-
-struct lightbar_params_v1 {
- /* Timing */
- int32_t google_ramp_up;
- int32_t google_ramp_down;
- int32_t s3s0_ramp_up;
- int32_t s0_tick_delay[2]; /* AC=0/1 */
- int32_t s0a_tick_delay[2]; /* AC=0/1 */
- int32_t s0s3_ramp_down;
- int32_t s3_sleep_for;
- int32_t s3_ramp_up;
- int32_t s3_ramp_down;
- int32_t tap_tick_delay;
- int32_t tap_display_time;
-
- /* Tap-for-battery params */
- uint8_t tap_pct_red;
- uint8_t tap_pct_green;
- uint8_t tap_seg_min_on;
- uint8_t tap_seg_max_on;
- uint8_t tap_seg_osc;
- uint8_t tap_idx[3];
-
- /* Oscillation */
- uint8_t osc_min[2]; /* AC=0/1 */
- uint8_t osc_max[2]; /* AC=0/1 */
- uint8_t w_ofs[2]; /* AC=0/1 */
-
- /* Brightness limits based on the backlight and AC. */
- uint8_t bright_bl_off_fixed[2]; /* AC=0/1 */
- uint8_t bright_bl_on_min[2]; /* AC=0/1 */
- uint8_t bright_bl_on_max[2]; /* AC=0/1 */
-
- /* Battery level thresholds */
- uint8_t battery_threshold[LB_BATTERY_LEVELS - 1];
-
- /* Map [AC][battery_level] to color index */
- uint8_t s0_idx[2][LB_BATTERY_LEVELS]; /* AP is running */
- uint8_t s3_idx[2][LB_BATTERY_LEVELS]; /* AP is sleeping */
-
- /* Color palette */
- struct rgb_s color[8]; /* 0-3 are Google colors */
-} __packed;
-
-/* Lightbar program */
-#define EC_LB_PROG_LEN 192
-struct lightbar_program {
- uint8_t size;
- uint8_t data[EC_LB_PROG_LEN];
-};
-
-struct ec_params_lightbar {
- uint8_t cmd; /* Command (see enum lightbar_command) */
- union {
- struct {
- /* no args */
- } dump, off, on, init, get_seq, get_params_v0, get_params_v1,
- version, get_brightness, get_demo, suspend, resume;
-
- struct {
- uint8_t num;
- } set_brightness, seq, demo;
-
- struct {
- uint8_t ctrl, reg, value;
- } reg;
-
- struct {
- uint8_t led, red, green, blue;
- } set_rgb;
-
- struct {
- uint8_t led;
- } get_rgb;
-
- struct {
- uint8_t enable;
- } manual_suspend_ctrl;
-
- struct lightbar_params_v0 set_params_v0;
- struct lightbar_params_v1 set_params_v1;
- struct lightbar_program set_program;
- };
-} __packed;
-
-struct ec_response_lightbar {
- union {
- struct {
- struct {
- uint8_t reg;
- uint8_t ic0;
- uint8_t ic1;
- } vals[23];
- } dump;
-
- struct {
- uint8_t num;
- } get_seq, get_brightness, get_demo;
-
- struct lightbar_params_v0 get_params_v0;
- struct lightbar_params_v1 get_params_v1;
-
- struct {
- uint32_t num;
- uint32_t flags;
- } version;
-
- struct {
- uint8_t red, green, blue;
- } get_rgb;
-
- struct {
- /* no return params */
- } off, on, init, set_brightness, seq, reg, set_rgb,
- demo, set_params_v0, set_params_v1,
- set_program, manual_suspend_ctrl, suspend, resume;
- };
-} __packed;
-
-/* Lightbar commands */
-enum lightbar_command {
- LIGHTBAR_CMD_DUMP = 0,
- LIGHTBAR_CMD_OFF = 1,
- LIGHTBAR_CMD_ON = 2,
- LIGHTBAR_CMD_INIT = 3,
- LIGHTBAR_CMD_SET_BRIGHTNESS = 4,
- LIGHTBAR_CMD_SEQ = 5,
- LIGHTBAR_CMD_REG = 6,
- LIGHTBAR_CMD_SET_RGB = 7,
- LIGHTBAR_CMD_GET_SEQ = 8,
- LIGHTBAR_CMD_DEMO = 9,
- LIGHTBAR_CMD_GET_PARAMS_V0 = 10,
- LIGHTBAR_CMD_SET_PARAMS_V0 = 11,
- LIGHTBAR_CMD_VERSION = 12,
- LIGHTBAR_CMD_GET_BRIGHTNESS = 13,
- LIGHTBAR_CMD_GET_RGB = 14,
- LIGHTBAR_CMD_GET_DEMO = 15,
- LIGHTBAR_CMD_GET_PARAMS_V1 = 16,
- LIGHTBAR_CMD_SET_PARAMS_V1 = 17,
- LIGHTBAR_CMD_SET_PROGRAM = 18,
- LIGHTBAR_CMD_MANUAL_SUSPEND_CTRL = 19,
- LIGHTBAR_CMD_SUSPEND = 20,
- LIGHTBAR_CMD_RESUME = 21,
- LIGHTBAR_NUM_CMDS
-};
-
-/*****************************************************************************/
-/* LED control commands */
-
-#define EC_CMD_LED_CONTROL 0x29
-
-enum ec_led_id {
- /* LED to indicate battery state of charge */
- EC_LED_ID_BATTERY_LED = 0,
- /*
- * LED to indicate system power state (on or in suspend).
- * May be on power button or on C-panel.
- */
- EC_LED_ID_POWER_LED,
- /* LED on power adapter or its plug */
- EC_LED_ID_ADAPTER_LED,
-
- EC_LED_ID_COUNT
-};
-
-/* LED control flags */
-#define EC_LED_FLAGS_QUERY (1 << 0) /* Query LED capability only */
-#define EC_LED_FLAGS_AUTO (1 << 1) /* Switch LED back to automatic control */
-
-enum ec_led_colors {
- EC_LED_COLOR_RED = 0,
- EC_LED_COLOR_GREEN,
- EC_LED_COLOR_BLUE,
- EC_LED_COLOR_YELLOW,
- EC_LED_COLOR_WHITE,
-
- EC_LED_COLOR_COUNT
-};
-
-struct ec_params_led_control {
- uint8_t led_id; /* Which LED to control */
- uint8_t flags; /* Control flags */
-
- uint8_t brightness[EC_LED_COLOR_COUNT];
-} __packed;
-
-struct ec_response_led_control {
- /*
- * Available brightness value range.
- *
- * Range 0 means color channel not present.
- * Range 1 means on/off control.
- * Other values means the LED is control by PWM.
- */
- uint8_t brightness_range[EC_LED_COLOR_COUNT];
-} __packed;
-
-/*****************************************************************************/
-/* Verified boot commands */
-
-/*
- * Note: command code 0x29 version 0 was VBOOT_CMD in Link EVT; it may be
- * reused for other purposes with version > 0.
- */
-
-/* Verified boot hash command */
-#define EC_CMD_VBOOT_HASH 0x2A
-
-struct ec_params_vboot_hash {
- uint8_t cmd; /* enum ec_vboot_hash_cmd */
- uint8_t hash_type; /* enum ec_vboot_hash_type */
- uint8_t nonce_size; /* Nonce size; may be 0 */
- uint8_t reserved0; /* Reserved; set 0 */
- uint32_t offset; /* Offset in flash to hash */
- uint32_t size; /* Number of bytes to hash */
- uint8_t nonce_data[64]; /* Nonce data; ignored if nonce_size=0 */
-} __packed;
-
-struct ec_response_vboot_hash {
- uint8_t status; /* enum ec_vboot_hash_status */
- uint8_t hash_type; /* enum ec_vboot_hash_type */
- uint8_t digest_size; /* Size of hash digest in bytes */
- uint8_t reserved0; /* Ignore; will be 0 */
- uint32_t offset; /* Offset in flash which was hashed */
- uint32_t size; /* Number of bytes hashed */
- uint8_t hash_digest[64]; /* Hash digest data */
-} __packed;
-
-enum ec_vboot_hash_cmd {
- EC_VBOOT_HASH_GET = 0, /* Get current hash status */
- EC_VBOOT_HASH_ABORT = 1, /* Abort calculating current hash */
- EC_VBOOT_HASH_START = 2, /* Start computing a new hash */
- EC_VBOOT_HASH_RECALC = 3, /* Synchronously compute a new hash */
-};
-
-enum ec_vboot_hash_type {
- EC_VBOOT_HASH_TYPE_SHA256 = 0, /* SHA-256 */
-};
-
-enum ec_vboot_hash_status {
- EC_VBOOT_HASH_STATUS_NONE = 0, /* No hash (not started, or aborted) */
- EC_VBOOT_HASH_STATUS_DONE = 1, /* Finished computing a hash */
- EC_VBOOT_HASH_STATUS_BUSY = 2, /* Busy computing a hash */
-};
-
-/*
- * Special values for offset for EC_VBOOT_HASH_START and EC_VBOOT_HASH_RECALC.
- * If one of these is specified, the EC will automatically update offset and
- * size to the correct values for the specified image (RO or RW).
- */
-#define EC_VBOOT_HASH_OFFSET_RO 0xfffffffe
-#define EC_VBOOT_HASH_OFFSET_RW 0xfffffffd
-
-/*****************************************************************************/
-/*
- * Motion sense commands. We'll make separate structs for sub-commands with
- * different input args, so that we know how much to expect.
- */
-#define EC_CMD_MOTION_SENSE_CMD 0x2B
-
-/* Motion sense commands */
-enum motionsense_command {
- /*
- * Dump command returns all motion sensor data including motion sense
- * module flags and individual sensor flags.
- */
- MOTIONSENSE_CMD_DUMP = 0,
-
- /*
- * Info command returns data describing the details of a given sensor,
- * including enum motionsensor_type, enum motionsensor_location, and
- * enum motionsensor_chip.
- */
- MOTIONSENSE_CMD_INFO = 1,
-
- /*
- * EC Rate command is a setter/getter command for the EC sampling rate
- * of all motion sensors in milliseconds.
- */
- MOTIONSENSE_CMD_EC_RATE = 2,
-
- /*
- * Sensor ODR command is a setter/getter command for the output data
- * rate of a specific motion sensor in millihertz.
- */
- MOTIONSENSE_CMD_SENSOR_ODR = 3,
-
- /*
- * Sensor range command is a setter/getter command for the range of
- * a specified motion sensor in +/-G's or +/- deg/s.
- */
- MOTIONSENSE_CMD_SENSOR_RANGE = 4,
-
- /*
- * Setter/getter command for the keyboard wake angle. When the lid
- * angle is greater than this value, keyboard wake is disabled in S3,
- * and when the lid angle goes less than this value, keyboard wake is
- * enabled. Note, the lid angle measurement is an approximate,
- * un-calibrated value, hence the wake angle isn't exact.
- */
- MOTIONSENSE_CMD_KB_WAKE_ANGLE = 5,
-
- /*
- * Returns a single sensor data.
- */
- MOTIONSENSE_CMD_DATA = 6,
-
- /*
- * Perform low level calibration.. On sensors that support it, ask to
- * do offset calibration.
- */
- MOTIONSENSE_CMD_PERFORM_CALIB = 10,
-
- /*
- * Sensor Offset command is a setter/getter command for the offset used
- * for calibration. The offsets can be calculated by the host, or via
- * PERFORM_CALIB command.
- */
- MOTIONSENSE_CMD_SENSOR_OFFSET = 11,
-
- /* Number of motionsense sub-commands. */
- MOTIONSENSE_NUM_CMDS
-};
-
-enum motionsensor_id {
- EC_MOTION_SENSOR_ACCEL_BASE = 0,
- EC_MOTION_SENSOR_ACCEL_LID = 1,
- EC_MOTION_SENSOR_GYRO = 2,
-
- /*
- * Note, if more sensors are added and this count changes, the padding
- * in ec_response_motion_sense dump command must be modified.
- */
- EC_MOTION_SENSOR_COUNT = 3
-};
-
-/* List of motion sensor types. */
-enum motionsensor_type {
- MOTIONSENSE_TYPE_ACCEL = 0,
- MOTIONSENSE_TYPE_GYRO = 1,
- MOTIONSENSE_TYPE_MAG = 2,
- MOTIONSENSE_TYPE_PROX = 3,
- MOTIONSENSE_TYPE_LIGHT = 4,
- MOTIONSENSE_TYPE_ACTIVITY = 5,
- MOTIONSENSE_TYPE_BARO = 6,
- MOTIONSENSE_TYPE_MAX,
-};
-
-/* List of motion sensor locations. */
-enum motionsensor_location {
- MOTIONSENSE_LOC_BASE = 0,
- MOTIONSENSE_LOC_LID = 1,
- MOTIONSENSE_LOC_MAX,
-};
-
-/* List of motion sensor chips. */
-enum motionsensor_chip {
- MOTIONSENSE_CHIP_KXCJ9 = 0,
-};
-
-/* Module flag masks used for the dump sub-command. */
-#define MOTIONSENSE_MODULE_FLAG_ACTIVE (1<<0)
-
-/* Sensor flag masks used for the dump sub-command. */
-#define MOTIONSENSE_SENSOR_FLAG_PRESENT (1<<0)
-
-/*
- * Send this value for the data element to only perform a read. If you
- * send any other value, the EC will interpret it as data to set and will
- * return the actual value set.
- */
-#define EC_MOTION_SENSE_NO_VALUE -1
-
-#define EC_MOTION_SENSE_INVALID_CALIB_TEMP 0x8000
-
-/* Set Calibration information */
-#define MOTION_SENSE_SET_OFFSET 1
-
-struct ec_response_motion_sensor_data {
- /* Flags for each sensor. */
- uint8_t flags;
- /* Sensor number the data comes from */
- uint8_t sensor_num;
- /* Each sensor is up to 3-axis. */
- union {
- int16_t data[3];
- struct {
- uint16_t rsvd;
- uint32_t timestamp;
- } __packed;
- struct {
- uint8_t activity; /* motionsensor_activity */
- uint8_t state;
- int16_t add_info[2];
- };
- };
-} __packed;
-
-struct ec_params_motion_sense {
- uint8_t cmd;
- union {
- /* Used for MOTIONSENSE_CMD_DUMP. */
- struct {
- /* no args */
- } dump;
-
- /*
- * Used for MOTIONSENSE_CMD_EC_RATE and
- * MOTIONSENSE_CMD_KB_WAKE_ANGLE.
- */
- struct {
- /* Data to set or EC_MOTION_SENSE_NO_VALUE to read. */
- int16_t data;
- } ec_rate, kb_wake_angle;
-
- /* Used for MOTIONSENSE_CMD_SENSOR_OFFSET */
- struct {
- uint8_t sensor_num;
-
- /*
- * bit 0: If set (MOTION_SENSE_SET_OFFSET), set
- * the calibration information in the EC.
- * If unset, just retrieve calibration information.
- */
- uint16_t flags;
-
- /*
- * Temperature at calibration, in units of 0.01 C
- * 0x8000: invalid / unknown.
- * 0x0: 0C
- * 0x7fff: +327.67C
- */
- int16_t temp;
-
- /*
- * Offset for calibration.
- * Unit:
- * Accelerometer: 1/1024 g
- * Gyro: 1/1024 deg/s
- * Compass: 1/16 uT
- */
- int16_t offset[3];
- } __packed sensor_offset;
-
- /* Used for MOTIONSENSE_CMD_INFO. */
- struct {
- uint8_t sensor_num;
- } info;
-
- /*
- * Used for MOTIONSENSE_CMD_SENSOR_ODR and
- * MOTIONSENSE_CMD_SENSOR_RANGE.
- */
- struct {
- /* Should be element of enum motionsensor_id. */
- uint8_t sensor_num;
-
- /* Rounding flag, true for round-up, false for down. */
- uint8_t roundup;
-
- uint16_t reserved;
-
- /* Data to set or EC_MOTION_SENSE_NO_VALUE to read. */
- int32_t data;
- } sensor_odr, sensor_range;
- };
-} __packed;
-
-struct ec_response_motion_sense {
- union {
- /* Used for MOTIONSENSE_CMD_DUMP. */
- struct {
- /* Flags representing the motion sensor module. */
- uint8_t module_flags;
-
- /* Number of sensors managed directly by the EC. */
- uint8_t sensor_count;
-
- /*
- * Sensor data is truncated if response_max is too small
- * for holding all the data.
- */
- struct ec_response_motion_sensor_data sensor[0];
- } dump;
-
- /* Used for MOTIONSENSE_CMD_INFO. */
- struct {
- /* Should be element of enum motionsensor_type. */
- uint8_t type;
-
- /* Should be element of enum motionsensor_location. */
- uint8_t location;
-
- /* Should be element of enum motionsensor_chip. */
- uint8_t chip;
- } info;
-
- /* Used for MOTIONSENSE_CMD_DATA */
- struct ec_response_motion_sensor_data data;
-
- /*
- * Used for MOTIONSENSE_CMD_EC_RATE, MOTIONSENSE_CMD_SENSOR_ODR,
- * MOTIONSENSE_CMD_SENSOR_RANGE, and
- * MOTIONSENSE_CMD_KB_WAKE_ANGLE.
- */
- struct {
- /* Current value of the parameter queried. */
- int32_t ret;
- } ec_rate, sensor_odr, sensor_range, kb_wake_angle;
-
- /* Used for MOTIONSENSE_CMD_SENSOR_OFFSET */
- struct {
- int16_t temp;
- int16_t offset[3];
- } sensor_offset, perform_calib;
- };
-} __packed;
-
-/*****************************************************************************/
-/* USB charging control commands */
-
-/* Set USB port charging mode */
-#define EC_CMD_USB_CHARGE_SET_MODE 0x30
-
-struct ec_params_usb_charge_set_mode {
- uint8_t usb_port_id;
- uint8_t mode;
-} __packed;
-
-/*****************************************************************************/
-/* Persistent storage for host */
-
-/* Maximum bytes that can be read/written in a single command */
-#define EC_PSTORE_SIZE_MAX 64
-
-/* Get persistent storage info */
-#define EC_CMD_PSTORE_INFO 0x40
-
-struct ec_response_pstore_info {
- /* Persistent storage size, in bytes */
- uint32_t pstore_size;
- /* Access size; read/write offset and size must be a multiple of this */
- uint32_t access_size;
-} __packed;
-
-/*
- * Read persistent storage
- *
- * Response is params.size bytes of data.
- */
-#define EC_CMD_PSTORE_READ 0x41
-
-struct ec_params_pstore_read {
- uint32_t offset; /* Byte offset to read */
- uint32_t size; /* Size to read in bytes */
-} __packed;
-
-/* Write persistent storage */
-#define EC_CMD_PSTORE_WRITE 0x42
-
-struct ec_params_pstore_write {
- uint32_t offset; /* Byte offset to write */
- uint32_t size; /* Size to write in bytes */
- uint8_t data[EC_PSTORE_SIZE_MAX];
-} __packed;
-
-/*****************************************************************************/
-/* Real-time clock */
-
-/* RTC params and response structures */
-struct ec_params_rtc {
- uint32_t time;
-} __packed;
-
-struct ec_response_rtc {
- uint32_t time;
-} __packed;
-
-/* These use ec_response_rtc */
-#define EC_CMD_RTC_GET_VALUE 0x44
-#define EC_CMD_RTC_GET_ALARM 0x45
-
-/* These all use ec_params_rtc */
-#define EC_CMD_RTC_SET_VALUE 0x46
-#define EC_CMD_RTC_SET_ALARM 0x47
-
-/*****************************************************************************/
-/* Port80 log access */
-
-/* Maximum entries that can be read/written in a single command */
-#define EC_PORT80_SIZE_MAX 32
-
-/* Get last port80 code from previous boot */
-#define EC_CMD_PORT80_LAST_BOOT 0x48
-#define EC_CMD_PORT80_READ 0x48
-
-enum ec_port80_subcmd {
- EC_PORT80_GET_INFO = 0,
- EC_PORT80_READ_BUFFER,
-};
-
-struct ec_params_port80_read {
- uint16_t subcmd;
- union {
- struct {
- uint32_t offset;
- uint32_t num_entries;
- } read_buffer;
- };
-} __packed;
-
-struct ec_response_port80_read {
- union {
- struct {
- uint32_t writes;
- uint32_t history_size;
- uint32_t last_boot;
- } get_info;
- struct {
- uint16_t codes[EC_PORT80_SIZE_MAX];
- } data;
- };
-} __packed;
-
-struct ec_response_port80_last_boot {
- uint16_t code;
-} __packed;
-
-/*****************************************************************************/
-/* Thermal engine commands. Note that there are two implementations. We'll
- * reuse the command number, but the data and behavior is incompatible.
- * Version 0 is what originally shipped on Link.
- * Version 1 separates the CPU thermal limits from the fan control.
- */
-
-#define EC_CMD_THERMAL_SET_THRESHOLD 0x50
-#define EC_CMD_THERMAL_GET_THRESHOLD 0x51
-
-/* The version 0 structs are opaque. You have to know what they are for
- * the get/set commands to make any sense.
- */
-
-/* Version 0 - set */
-struct ec_params_thermal_set_threshold {
- uint8_t sensor_type;
- uint8_t threshold_id;
- uint16_t value;
-} __packed;
-
-/* Version 0 - get */
-struct ec_params_thermal_get_threshold {
- uint8_t sensor_type;
- uint8_t threshold_id;
-} __packed;
-
-struct ec_response_thermal_get_threshold {
- uint16_t value;
-} __packed;
-
-
-/* The version 1 structs are visible. */
-enum ec_temp_thresholds {
- EC_TEMP_THRESH_WARN = 0,
- EC_TEMP_THRESH_HIGH,
- EC_TEMP_THRESH_HALT,
-
- EC_TEMP_THRESH_COUNT
-};
-
-/* Thermal configuration for one temperature sensor. Temps are in degrees K.
- * Zero values will be silently ignored by the thermal task.
- */
-struct ec_thermal_config {
- uint32_t temp_host[EC_TEMP_THRESH_COUNT]; /* levels of hotness */
- uint32_t temp_fan_off; /* no active cooling needed */
- uint32_t temp_fan_max; /* max active cooling needed */
-} __packed;
-
-/* Version 1 - get config for one sensor. */
-struct ec_params_thermal_get_threshold_v1 {
- uint32_t sensor_num;
-} __packed;
-/* This returns a struct ec_thermal_config */
-
-/* Version 1 - set config for one sensor.
- * Use read-modify-write for best results! */
-struct ec_params_thermal_set_threshold_v1 {
- uint32_t sensor_num;
- struct ec_thermal_config cfg;
-} __packed;
-/* This returns no data */
-
-/****************************************************************************/
-
-/* Toggle automatic fan control */
-#define EC_CMD_THERMAL_AUTO_FAN_CTRL 0x52
-
-/* Get TMP006 calibration data */
-#define EC_CMD_TMP006_GET_CALIBRATION 0x53
-
-struct ec_params_tmp006_get_calibration {
- uint8_t index;
-} __packed;
-
-struct ec_response_tmp006_get_calibration {
- float s0;
- float b0;
- float b1;
- float b2;
-} __packed;
-
-/* Set TMP006 calibration data */
-#define EC_CMD_TMP006_SET_CALIBRATION 0x54
-
-struct ec_params_tmp006_set_calibration {
- uint8_t index;
- uint8_t reserved[3]; /* Reserved; set 0 */
- float s0;
- float b0;
- float b1;
- float b2;
-} __packed;
-
-/* Read raw TMP006 data */
-#define EC_CMD_TMP006_GET_RAW 0x55
-
-struct ec_params_tmp006_get_raw {
- uint8_t index;
-} __packed;
-
-struct ec_response_tmp006_get_raw {
- int32_t t; /* In 1/100 K */
- int32_t v; /* In nV */
-};
-
-/*****************************************************************************/
-/* MKBP - Matrix KeyBoard Protocol */
-
-/*
- * Read key state
- *
- * Returns raw data for keyboard cols; see ec_response_mkbp_info.cols for
- * expected response size.
- *
- * NOTE: This has been superseded by EC_CMD_MKBP_GET_NEXT_EVENT. If you wish
- * to obtain the instantaneous state, use EC_CMD_MKBP_INFO with the type
- * EC_MKBP_INFO_CURRENT and event EC_MKBP_EVENT_KEY_MATRIX.
- */
-#define EC_CMD_MKBP_STATE 0x60
-
-/*
- * Provide information about various MKBP things. See enum ec_mkbp_info_type.
- */
-#define EC_CMD_MKBP_INFO 0x61
-
-struct ec_response_mkbp_info {
- uint32_t rows;
- uint32_t cols;
- /* Formerly "switches", which was 0. */
- uint8_t reserved;
-} __packed;
-
-struct ec_params_mkbp_info {
- uint8_t info_type;
- uint8_t event_type;
-} __packed;
-
-enum ec_mkbp_info_type {
- /*
- * Info about the keyboard matrix: number of rows and columns.
- *
- * Returns struct ec_response_mkbp_info.
- */
- EC_MKBP_INFO_KBD = 0,
-
- /*
- * For buttons and switches, info about which specifically are
- * supported. event_type must be set to one of the values in enum
- * ec_mkbp_event.
- *
- * For EC_MKBP_EVENT_BUTTON and EC_MKBP_EVENT_SWITCH, returns a 4 byte
- * bitmask indicating which buttons or switches are present. See the
- * bit inidices below.
- */
- EC_MKBP_INFO_SUPPORTED = 1,
-
- /*
- * Instantaneous state of buttons and switches.
- *
- * event_type must be set to one of the values in enum ec_mkbp_event.
- *
- * For EC_MKBP_EVENT_KEY_MATRIX, returns uint8_t key_matrix[13]
- * indicating the current state of the keyboard matrix.
- *
- * For EC_MKBP_EVENT_HOST_EVENT, return uint32_t host_event, the raw
- * event state.
- *
- * For EC_MKBP_EVENT_BUTTON, returns uint32_t buttons, indicating the
- * state of supported buttons.
- *
- * For EC_MKBP_EVENT_SWITCH, returns uint32_t switches, indicating the
- * state of supported switches.
- */
- EC_MKBP_INFO_CURRENT = 2,
-};
-
-/* Simulate key press */
-#define EC_CMD_MKBP_SIMULATE_KEY 0x62
-
-struct ec_params_mkbp_simulate_key {
- uint8_t col;
- uint8_t row;
- uint8_t pressed;
-} __packed;
-
-/* Configure keyboard scanning */
-#define EC_CMD_MKBP_SET_CONFIG 0x64
-#define EC_CMD_MKBP_GET_CONFIG 0x65
-
-/* flags */
-enum mkbp_config_flags {
- EC_MKBP_FLAGS_ENABLE = 1, /* Enable keyboard scanning */
-};
-
-enum mkbp_config_valid {
- EC_MKBP_VALID_SCAN_PERIOD = 1 << 0,
- EC_MKBP_VALID_POLL_TIMEOUT = 1 << 1,
- EC_MKBP_VALID_MIN_POST_SCAN_DELAY = 1 << 3,
- EC_MKBP_VALID_OUTPUT_SETTLE = 1 << 4,
- EC_MKBP_VALID_DEBOUNCE_DOWN = 1 << 5,
- EC_MKBP_VALID_DEBOUNCE_UP = 1 << 6,
- EC_MKBP_VALID_FIFO_MAX_DEPTH = 1 << 7,
-};
-
-/* Configuration for our key scanning algorithm */
-struct ec_mkbp_config {
- uint32_t valid_mask; /* valid fields */
- uint8_t flags; /* some flags (enum mkbp_config_flags) */
- uint8_t valid_flags; /* which flags are valid */
- uint16_t scan_period_us; /* period between start of scans */
- /* revert to interrupt mode after no activity for this long */
- uint32_t poll_timeout_us;
- /*
- * minimum post-scan relax time. Once we finish a scan we check
- * the time until we are due to start the next one. If this time is
- * shorter this field, we use this instead.
- */
- uint16_t min_post_scan_delay_us;
- /* delay between setting up output and waiting for it to settle */
- uint16_t output_settle_us;
- uint16_t debounce_down_us; /* time for debounce on key down */
- uint16_t debounce_up_us; /* time for debounce on key up */
- /* maximum depth to allow for fifo (0 = no keyscan output) */
- uint8_t fifo_max_depth;
-} __packed;
-
-struct ec_params_mkbp_set_config {
- struct ec_mkbp_config config;
-} __packed;
-
-struct ec_response_mkbp_get_config {
- struct ec_mkbp_config config;
-} __packed;
-
-/* Run the key scan emulation */
-#define EC_CMD_KEYSCAN_SEQ_CTRL 0x66
-
-enum ec_keyscan_seq_cmd {
- EC_KEYSCAN_SEQ_STATUS = 0, /* Get status information */
- EC_KEYSCAN_SEQ_CLEAR = 1, /* Clear sequence */
- EC_KEYSCAN_SEQ_ADD = 2, /* Add item to sequence */
- EC_KEYSCAN_SEQ_START = 3, /* Start running sequence */
- EC_KEYSCAN_SEQ_COLLECT = 4, /* Collect sequence summary data */
-};
-
-enum ec_collect_flags {
- /*
- * Indicates this scan was processed by the EC. Due to timing, some
- * scans may be skipped.
- */
- EC_KEYSCAN_SEQ_FLAG_DONE = 1 << 0,
-};
-
-struct ec_collect_item {
- uint8_t flags; /* some flags (enum ec_collect_flags) */
-};
-
-struct ec_params_keyscan_seq_ctrl {
- uint8_t cmd; /* Command to send (enum ec_keyscan_seq_cmd) */
- union {
- struct {
- uint8_t active; /* still active */
- uint8_t num_items; /* number of items */
- /* Current item being presented */
- uint8_t cur_item;
- } status;
- struct {
- /*
- * Absolute time for this scan, measured from the
- * start of the sequence.
- */
- uint32_t time_us;
- uint8_t scan[0]; /* keyscan data */
- } add;
- struct {
- uint8_t start_item; /* First item to return */
- uint8_t num_items; /* Number of items to return */
- } collect;
- };
-} __packed;
-
-struct ec_result_keyscan_seq_ctrl {
- union {
- struct {
- uint8_t num_items; /* Number of items */
- /* Data for each item */
- struct ec_collect_item item[0];
- } collect;
- };
-} __packed;
-
-/*
- * Command for retrieving the next pending MKBP event from the EC device
- *
- * The device replies with UNAVAILABLE if there aren't any pending events.
- */
-#define EC_CMD_GET_NEXT_EVENT 0x67
-
-enum ec_mkbp_event {
- /* Keyboard matrix changed. The event data is the new matrix state. */
- EC_MKBP_EVENT_KEY_MATRIX = 0,
-
- /* New host event. The event data is 4 bytes of host event flags. */
- EC_MKBP_EVENT_HOST_EVENT = 1,
-
- /* New Sensor FIFO data. The event data is fifo_info structure. */
- EC_MKBP_EVENT_SENSOR_FIFO = 2,
-
- /* The state of the non-matrixed buttons have changed. */
- EC_MKBP_EVENT_BUTTON = 3,
-
- /* The state of the switches have changed. */
- EC_MKBP_EVENT_SWITCH = 4,
-
- /* EC sent a sysrq command */
- EC_MKBP_EVENT_SYSRQ = 6,
-
- /* Number of MKBP events */
- EC_MKBP_EVENT_COUNT,
-};
-
-union ec_response_get_next_data {
- uint8_t key_matrix[13];
-
- /* Unaligned */
- uint32_t host_event;
-
- uint32_t buttons;
- uint32_t switches;
- uint32_t sysrq;
-} __packed;
-
-struct ec_response_get_next_event {
- uint8_t event_type;
- /* Followed by event data if any */
- union ec_response_get_next_data data;
-} __packed;
-
-/* Bit indices for buttons and switches.*/
-/* Buttons */
-#define EC_MKBP_POWER_BUTTON 0
-#define EC_MKBP_VOL_UP 1
-#define EC_MKBP_VOL_DOWN 2
-
-/* Switches */
-#define EC_MKBP_LID_OPEN 0
-#define EC_MKBP_TABLET_MODE 1
-
-/*****************************************************************************/
-/* Temperature sensor commands */
-
-/* Read temperature sensor info */
-#define EC_CMD_TEMP_SENSOR_GET_INFO 0x70
-
-struct ec_params_temp_sensor_get_info {
- uint8_t id;
-} __packed;
-
-struct ec_response_temp_sensor_get_info {
- char sensor_name[32];
- uint8_t sensor_type;
-} __packed;
-
-/*****************************************************************************/
-
-/*
- * Note: host commands 0x80 - 0x87 are reserved to avoid conflict with ACPI
- * commands accidentally sent to the wrong interface. See the ACPI section
- * below.
- */
-
-/*****************************************************************************/
-/* Host event commands */
-
-/*
- * Host event mask params and response structures, shared by all of the host
- * event commands below.
- */
-struct ec_params_host_event_mask {
- uint32_t mask;
-} __packed;
-
-struct ec_response_host_event_mask {
- uint32_t mask;
-} __packed;
-
-/* These all use ec_response_host_event_mask */
-#define EC_CMD_HOST_EVENT_GET_B 0x87
-#define EC_CMD_HOST_EVENT_GET_SMI_MASK 0x88
-#define EC_CMD_HOST_EVENT_GET_SCI_MASK 0x89
-#define EC_CMD_HOST_EVENT_GET_WAKE_MASK 0x8d
-
-/* These all use ec_params_host_event_mask */
-#define EC_CMD_HOST_EVENT_SET_SMI_MASK 0x8a
-#define EC_CMD_HOST_EVENT_SET_SCI_MASK 0x8b
-#define EC_CMD_HOST_EVENT_CLEAR 0x8c
-#define EC_CMD_HOST_EVENT_SET_WAKE_MASK 0x8e
-#define EC_CMD_HOST_EVENT_CLEAR_B 0x8f
-
-/*****************************************************************************/
-/* Switch commands */
-
-/* Enable/disable LCD backlight */
-#define EC_CMD_SWITCH_ENABLE_BKLIGHT 0x90
-
-struct ec_params_switch_enable_backlight {
- uint8_t enabled;
-} __packed;
-
-/* Enable/disable WLAN/Bluetooth */
-#define EC_CMD_SWITCH_ENABLE_WIRELESS 0x91
-#define EC_VER_SWITCH_ENABLE_WIRELESS 1
-
-/* Version 0 params; no response */
-struct ec_params_switch_enable_wireless_v0 {
- uint8_t enabled;
-} __packed;
-
-/* Version 1 params */
-struct ec_params_switch_enable_wireless_v1 {
- /* Flags to enable now */
- uint8_t now_flags;
-
- /* Which flags to copy from now_flags */
- uint8_t now_mask;
-
- /*
- * Flags to leave enabled in S3, if they're on at the S0->S3
- * transition. (Other flags will be disabled by the S0->S3
- * transition.)
- */
- uint8_t suspend_flags;
-
- /* Which flags to copy from suspend_flags */
- uint8_t suspend_mask;
-} __packed;
-
-/* Version 1 response */
-struct ec_response_switch_enable_wireless_v1 {
- /* Flags to enable now */
- uint8_t now_flags;
-
- /* Flags to leave enabled in S3 */
- uint8_t suspend_flags;
-} __packed;
-
-/*****************************************************************************/
-/* GPIO commands. Only available on EC if write protect has been disabled. */
-
-/* Set GPIO output value */
-#define EC_CMD_GPIO_SET 0x92
-
-struct ec_params_gpio_set {
- char name[32];
- uint8_t val;
-} __packed;
-
-/* Get GPIO value */
-#define EC_CMD_GPIO_GET 0x93
-
-/* Version 0 of input params and response */
-struct ec_params_gpio_get {
- char name[32];
-} __packed;
-struct ec_response_gpio_get {
- uint8_t val;
-} __packed;
-
-/* Version 1 of input params and response */
-struct ec_params_gpio_get_v1 {
- uint8_t subcmd;
- union {
- struct {
- char name[32];
- } get_value_by_name;
- struct {
- uint8_t index;
- } get_info;
- };
-} __packed;
-
-struct ec_response_gpio_get_v1 {
- union {
- struct {
- uint8_t val;
- } get_value_by_name, get_count;
- struct {
- uint8_t val;
- char name[32];
- uint32_t flags;
- } get_info;
- };
-} __packed;
-
-enum gpio_get_subcmd {
- EC_GPIO_GET_BY_NAME = 0,
- EC_GPIO_GET_COUNT = 1,
- EC_GPIO_GET_INFO = 2,
-};
-
-/*****************************************************************************/
-/* I2C commands. Only available when flash write protect is unlocked. */
-
-/*
- * TODO(crosbug.com/p/23570): These commands are deprecated, and will be
- * removed soon. Use EC_CMD_I2C_XFER instead.
- */
-
-/* Read I2C bus */
-#define EC_CMD_I2C_READ 0x94
-
-struct ec_params_i2c_read {
- uint16_t addr; /* 8-bit address (7-bit shifted << 1) */
- uint8_t read_size; /* Either 8 or 16. */
- uint8_t port;
- uint8_t offset;
-} __packed;
-struct ec_response_i2c_read {
- uint16_t data;
-} __packed;
-
-/* Write I2C bus */
-#define EC_CMD_I2C_WRITE 0x95
-
-struct ec_params_i2c_write {
- uint16_t data;
- uint16_t addr; /* 8-bit address (7-bit shifted << 1) */
- uint8_t write_size; /* Either 8 or 16. */
- uint8_t port;
- uint8_t offset;
-} __packed;
-
-/*****************************************************************************/
-/* Charge state commands. Only available when flash write protect unlocked. */
-
-/* Force charge state machine to stop charging the battery or force it to
- * discharge the battery.
- */
-#define EC_CMD_CHARGE_CONTROL 0x96
-#define EC_VER_CHARGE_CONTROL 1
-
-enum ec_charge_control_mode {
- CHARGE_CONTROL_NORMAL = 0,
- CHARGE_CONTROL_IDLE,
- CHARGE_CONTROL_DISCHARGE,
-};
-
-struct ec_params_charge_control {
- uint32_t mode; /* enum charge_control_mode */
-} __packed;
-
-/*****************************************************************************/
-/* Console commands. Only available when flash write protect is unlocked. */
-
-/* Snapshot console output buffer for use by EC_CMD_CONSOLE_READ. */
-#define EC_CMD_CONSOLE_SNAPSHOT 0x97
-
-/*
- * Read data from the saved snapshot. If the subcmd parameter is
- * CONSOLE_READ_NEXT, this will return data starting from the beginning of
- * the latest snapshot. If it is CONSOLE_READ_RECENT, it will start from the
- * end of the previous snapshot.
- *
- * The params are only looked at in version >= 1 of this command. Prior
- * versions will just default to CONSOLE_READ_NEXT behavior.
- *
- * Response is null-terminated string. Empty string, if there is no more
- * remaining output.
- */
-#define EC_CMD_CONSOLE_READ 0x98
-
-enum ec_console_read_subcmd {
- CONSOLE_READ_NEXT = 0,
- CONSOLE_READ_RECENT
-};
-
-struct ec_params_console_read_v1 {
- uint8_t subcmd; /* enum ec_console_read_subcmd */
-} __packed;
-
-/*****************************************************************************/
-
-/*
- * Cut off battery power immediately or after the host has shut down.
- *
- * return EC_RES_INVALID_COMMAND if unsupported by a board/battery.
- * EC_RES_SUCCESS if the command was successful.
- * EC_RES_ERROR if the cut off command failed.
- */
-
-#define EC_CMD_BATTERY_CUT_OFF 0x99
-
-#define EC_BATTERY_CUTOFF_FLAG_AT_SHUTDOWN (1 << 0)
-
-struct ec_params_battery_cutoff {
- uint8_t flags;
-} __packed;
-
-/*****************************************************************************/
-/* USB port mux control. */
-
-/*
- * Switch USB mux or return to automatic switching.
- */
-#define EC_CMD_USB_MUX 0x9a
-
-struct ec_params_usb_mux {
- uint8_t mux;
-} __packed;
-
-/*****************************************************************************/
-/* LDOs / FETs control. */
-
-enum ec_ldo_state {
- EC_LDO_STATE_OFF = 0, /* the LDO / FET is shut down */
- EC_LDO_STATE_ON = 1, /* the LDO / FET is ON / providing power */
-};
-
-/*
- * Switch on/off a LDO.
- */
-#define EC_CMD_LDO_SET 0x9b
-
-struct ec_params_ldo_set {
- uint8_t index;
- uint8_t state;
-} __packed;
-
-/*
- * Get LDO state.
- */
-#define EC_CMD_LDO_GET 0x9c
-
-struct ec_params_ldo_get {
- uint8_t index;
-} __packed;
-
-struct ec_response_ldo_get {
- uint8_t state;
-} __packed;
-
-/*****************************************************************************/
-/* Power info. */
-
-/*
- * Get power info.
- */
-#define EC_CMD_POWER_INFO 0x9d
-
-struct ec_response_power_info {
- uint32_t usb_dev_type;
- uint16_t voltage_ac;
- uint16_t voltage_system;
- uint16_t current_system;
- uint16_t usb_current_limit;
-} __packed;
-
-/*****************************************************************************/
-/* I2C passthru command */
-
-#define EC_CMD_I2C_PASSTHRU 0x9e
-
-/* Read data; if not present, message is a write */
-#define EC_I2C_FLAG_READ (1 << 15)
-
-/* Mask for address */
-#define EC_I2C_ADDR_MASK 0x3ff
-
-#define EC_I2C_STATUS_NAK (1 << 0) /* Transfer was not acknowledged */
-#define EC_I2C_STATUS_TIMEOUT (1 << 1) /* Timeout during transfer */
-
-/* Any error */
-#define EC_I2C_STATUS_ERROR (EC_I2C_STATUS_NAK | EC_I2C_STATUS_TIMEOUT)
-
-struct ec_params_i2c_passthru_msg {
- uint16_t addr_flags; /* I2C slave address (7 or 10 bits) and flags */
- uint16_t len; /* Number of bytes to read or write */
-} __packed;
-
-struct ec_params_i2c_passthru {
- uint8_t port; /* I2C port number */
- uint8_t num_msgs; /* Number of messages */
- struct ec_params_i2c_passthru_msg msg[];
- /* Data to write for all messages is concatenated here */
-} __packed;
-
-struct ec_response_i2c_passthru {
- uint8_t i2c_status; /* Status flags (EC_I2C_STATUS_...) */
- uint8_t num_msgs; /* Number of messages processed */
- uint8_t data[]; /* Data read by messages concatenated here */
-} __packed;
-
-/*****************************************************************************/
-/* Power button hang detect */
-
-#define EC_CMD_HANG_DETECT 0x9f
-
-/* Reasons to start hang detection timer */
-/* Power button pressed */
-#define EC_HANG_START_ON_POWER_PRESS (1 << 0)
-
-/* Lid closed */
-#define EC_HANG_START_ON_LID_CLOSE (1 << 1)
-
- /* Lid opened */
-#define EC_HANG_START_ON_LID_OPEN (1 << 2)
-
-/* Start of AP S3->S0 transition (booting or resuming from suspend) */
-#define EC_HANG_START_ON_RESUME (1 << 3)
-
-/* Reasons to cancel hang detection */
-
-/* Power button released */
-#define EC_HANG_STOP_ON_POWER_RELEASE (1 << 8)
-
-/* Any host command from AP received */
-#define EC_HANG_STOP_ON_HOST_COMMAND (1 << 9)
-
-/* Stop on end of AP S0->S3 transition (suspending or shutting down) */
-#define EC_HANG_STOP_ON_SUSPEND (1 << 10)
-
-/*
- * If this flag is set, all the other fields are ignored, and the hang detect
- * timer is started. This provides the AP a way to start the hang timer
- * without reconfiguring any of the other hang detect settings. Note that
- * you must previously have configured the timeouts.
- */
-#define EC_HANG_START_NOW (1 << 30)
-
-/*
- * If this flag is set, all the other fields are ignored (including
- * EC_HANG_START_NOW). This provides the AP a way to stop the hang timer
- * without reconfiguring any of the other hang detect settings.
- */
-#define EC_HANG_STOP_NOW (1 << 31)
-
-struct ec_params_hang_detect {
- /* Flags; see EC_HANG_* */
- uint32_t flags;
-
- /* Timeout in msec before generating host event, if enabled */
- uint16_t host_event_timeout_msec;
-
- /* Timeout in msec before generating warm reboot, if enabled */
- uint16_t warm_reboot_timeout_msec;
-} __packed;
-
-/*****************************************************************************/
-/* Commands for battery charging */
-
-/*
- * This is the single catch-all host command to exchange data regarding the
- * charge state machine (v2 and up).
- */
-#define EC_CMD_CHARGE_STATE 0xa0
-
-/* Subcommands for this host command */
-enum charge_state_command {
- CHARGE_STATE_CMD_GET_STATE,
- CHARGE_STATE_CMD_GET_PARAM,
- CHARGE_STATE_CMD_SET_PARAM,
- CHARGE_STATE_NUM_CMDS
-};
-
-/*
- * Known param numbers are defined here. Ranges are reserved for board-specific
- * params, which are handled by the particular implementations.
- */
-enum charge_state_params {
- CS_PARAM_CHG_VOLTAGE, /* charger voltage limit */
- CS_PARAM_CHG_CURRENT, /* charger current limit */
- CS_PARAM_CHG_INPUT_CURRENT, /* charger input current limit */
- CS_PARAM_CHG_STATUS, /* charger-specific status */
- CS_PARAM_CHG_OPTION, /* charger-specific options */
- /* How many so far? */
- CS_NUM_BASE_PARAMS,
-
- /* Range for CONFIG_CHARGER_PROFILE_OVERRIDE params */
- CS_PARAM_CUSTOM_PROFILE_MIN = 0x10000,
- CS_PARAM_CUSTOM_PROFILE_MAX = 0x1ffff,
-
- /* Other custom param ranges go here... */
-};
-
-struct ec_params_charge_state {
- uint8_t cmd; /* enum charge_state_command */
- union {
- struct {
- /* no args */
- } get_state;
-
- struct {
- uint32_t param; /* enum charge_state_param */
- } get_param;
-
- struct {
- uint32_t param; /* param to set */
- uint32_t value; /* value to set */
- } set_param;
- };
-} __packed;
-
-struct ec_response_charge_state {
- union {
- struct {
- int ac;
- int chg_voltage;
- int chg_current;
- int chg_input_current;
- int batt_state_of_charge;
- } get_state;
-
- struct {
- uint32_t value;
- } get_param;
- struct {
- /* no return values */
- } set_param;
- };
-} __packed;
-
-
-/*
- * Set maximum battery charging current.
- */
-#define EC_CMD_CHARGE_CURRENT_LIMIT 0xa1
-
-struct ec_params_current_limit {
- uint32_t limit; /* in mA */
-} __packed;
-
-/*
- * Set maximum external power current.
- */
-#define EC_CMD_EXT_POWER_CURRENT_LIMIT 0xa2
-
-struct ec_params_ext_power_current_limit {
- uint32_t limit; /* in mA */
-} __packed;
-
-/* Inform the EC when entering a sleep state */
-#define EC_CMD_HOST_SLEEP_EVENT 0xa9
-
-enum host_sleep_event {
- HOST_SLEEP_EVENT_S3_SUSPEND = 1,
- HOST_SLEEP_EVENT_S3_RESUME = 2,
- HOST_SLEEP_EVENT_S0IX_SUSPEND = 3,
- HOST_SLEEP_EVENT_S0IX_RESUME = 4
-};
-
-struct ec_params_host_sleep_event {
- uint8_t sleep_event;
-} __packed;
-
-/*****************************************************************************/
-/* Smart battery pass-through */
-
-/* Get / Set 16-bit smart battery registers */
-#define EC_CMD_SB_READ_WORD 0xb0
-#define EC_CMD_SB_WRITE_WORD 0xb1
-
-/* Get / Set string smart battery parameters
- * formatted as SMBUS "block".
- */
-#define EC_CMD_SB_READ_BLOCK 0xb2
-#define EC_CMD_SB_WRITE_BLOCK 0xb3
-
-struct ec_params_sb_rd {
- uint8_t reg;
-} __packed;
-
-struct ec_response_sb_rd_word {
- uint16_t value;
-} __packed;
-
-struct ec_params_sb_wr_word {
- uint8_t reg;
- uint16_t value;
-} __packed;
-
-struct ec_response_sb_rd_block {
- uint8_t data[32];
-} __packed;
-
-struct ec_params_sb_wr_block {
- uint8_t reg;
- uint16_t data[32];
-} __packed;
-
-/*****************************************************************************/
-/* Battery vendor parameters
- *
- * Get or set vendor-specific parameters in the battery. Implementations may
- * differ between boards or batteries. On a set operation, the response
- * contains the actual value set, which may be rounded or clipped from the
- * requested value.
- */
-
-#define EC_CMD_BATTERY_VENDOR_PARAM 0xb4
-
-enum ec_battery_vendor_param_mode {
- BATTERY_VENDOR_PARAM_MODE_GET = 0,
- BATTERY_VENDOR_PARAM_MODE_SET,
-};
-
-struct ec_params_battery_vendor_param {
- uint32_t param;
- uint32_t value;
- uint8_t mode;
-} __packed;
-
-struct ec_response_battery_vendor_param {
- uint32_t value;
-} __packed;
-
-/*****************************************************************************/
-/* System commands */
-
-/*
- * TODO(crosbug.com/p/23747): This is a confusing name, since it doesn't
- * necessarily reboot the EC. Rename to "image" or something similar?
- */
-#define EC_CMD_REBOOT_EC 0xd2
-
-/* Command */
-enum ec_reboot_cmd {
- EC_REBOOT_CANCEL = 0, /* Cancel a pending reboot */
- EC_REBOOT_JUMP_RO = 1, /* Jump to RO without rebooting */
- EC_REBOOT_JUMP_RW = 2, /* Jump to RW without rebooting */
- /* (command 3 was jump to RW-B) */
- EC_REBOOT_COLD = 4, /* Cold-reboot */
- EC_REBOOT_DISABLE_JUMP = 5, /* Disable jump until next reboot */
- EC_REBOOT_HIBERNATE = 6 /* Hibernate EC */
-};
-
-/* Flags for ec_params_reboot_ec.reboot_flags */
-#define EC_REBOOT_FLAG_RESERVED0 (1 << 0) /* Was recovery request */
-#define EC_REBOOT_FLAG_ON_AP_SHUTDOWN (1 << 1) /* Reboot after AP shutdown */
-
-struct ec_params_reboot_ec {
- uint8_t cmd; /* enum ec_reboot_cmd */
- uint8_t flags; /* See EC_REBOOT_FLAG_* */
-} __packed;
-
-/*
- * Get information on last EC panic.
- *
- * Returns variable-length platform-dependent panic information. See panic.h
- * for details.
- */
-#define EC_CMD_GET_PANIC_INFO 0xd3
-
-/*****************************************************************************/
-/*
- * ACPI commands
- *
- * These are valid ONLY on the ACPI command/data port.
- */
-
-/*
- * ACPI Read Embedded Controller
- *
- * This reads from ACPI memory space on the EC (EC_ACPI_MEM_*).
- *
- * Use the following sequence:
- *
- * - Write EC_CMD_ACPI_READ to EC_LPC_ADDR_ACPI_CMD
- * - Wait for EC_LPC_CMDR_PENDING bit to clear
- * - Write address to EC_LPC_ADDR_ACPI_DATA
- * - Wait for EC_LPC_CMDR_DATA bit to set
- * - Read value from EC_LPC_ADDR_ACPI_DATA
- */
-#define EC_CMD_ACPI_READ 0x80
-
-/*
- * ACPI Write Embedded Controller
- *
- * This reads from ACPI memory space on the EC (EC_ACPI_MEM_*).
- *
- * Use the following sequence:
- *
- * - Write EC_CMD_ACPI_WRITE to EC_LPC_ADDR_ACPI_CMD
- * - Wait for EC_LPC_CMDR_PENDING bit to clear
- * - Write address to EC_LPC_ADDR_ACPI_DATA
- * - Wait for EC_LPC_CMDR_PENDING bit to clear
- * - Write value to EC_LPC_ADDR_ACPI_DATA
- */
-#define EC_CMD_ACPI_WRITE 0x81
-
-/*
- * ACPI Query Embedded Controller
- *
- * This clears the lowest-order bit in the currently pending host events, and
- * sets the result code to the 1-based index of the bit (event 0x00000001 = 1,
- * event 0x80000000 = 32), or 0 if no event was pending.
- */
-#define EC_CMD_ACPI_QUERY_EVENT 0x84
-
-/* Valid addresses in ACPI memory space, for read/write commands */
-
-/* Memory space version; set to EC_ACPI_MEM_VERSION_CURRENT */
-#define EC_ACPI_MEM_VERSION 0x00
-/*
- * Test location; writing value here updates test compliment byte to (0xff -
- * value).
- */
-#define EC_ACPI_MEM_TEST 0x01
-/* Test compliment; writes here are ignored. */
-#define EC_ACPI_MEM_TEST_COMPLIMENT 0x02
-
-/* Keyboard backlight brightness percent (0 - 100) */
-#define EC_ACPI_MEM_KEYBOARD_BACKLIGHT 0x03
-/* DPTF Target Fan Duty (0-100, 0xff for auto/none) */
-#define EC_ACPI_MEM_FAN_DUTY 0x04
-
-/*
- * DPTF temp thresholds. Any of the EC's temp sensors can have up to two
- * independent thresholds attached to them. The current value of the ID
- * register determines which sensor is affected by the THRESHOLD and COMMIT
- * registers. The THRESHOLD register uses the same EC_TEMP_SENSOR_OFFSET scheme
- * as the memory-mapped sensors. The COMMIT register applies those settings.
- *
- * The spec does not mandate any way to read back the threshold settings
- * themselves, but when a threshold is crossed the AP needs a way to determine
- * which sensor(s) are responsible. Each reading of the ID register clears and
- * returns one sensor ID that has crossed one of its threshold (in either
- * direction) since the last read. A value of 0xFF means "no new thresholds
- * have tripped". Setting or enabling the thresholds for a sensor will clear
- * the unread event count for that sensor.
- */
-#define EC_ACPI_MEM_TEMP_ID 0x05
-#define EC_ACPI_MEM_TEMP_THRESHOLD 0x06
-#define EC_ACPI_MEM_TEMP_COMMIT 0x07
-/*
- * Here are the bits for the COMMIT register:
- * bit 0 selects the threshold index for the chosen sensor (0/1)
- * bit 1 enables/disables the selected threshold (0 = off, 1 = on)
- * Each write to the commit register affects one threshold.
- */
-#define EC_ACPI_MEM_TEMP_COMMIT_SELECT_MASK (1 << 0)
-#define EC_ACPI_MEM_TEMP_COMMIT_ENABLE_MASK (1 << 1)
-/*
- * Example:
- *
- * Set the thresholds for sensor 2 to 50 C and 60 C:
- * write 2 to [0x05] -- select temp sensor 2
- * write 0x7b to [0x06] -- C_TO_K(50) - EC_TEMP_SENSOR_OFFSET
- * write 0x2 to [0x07] -- enable threshold 0 with this value
- * write 0x85 to [0x06] -- C_TO_K(60) - EC_TEMP_SENSOR_OFFSET
- * write 0x3 to [0x07] -- enable threshold 1 with this value
- *
- * Disable the 60 C threshold, leaving the 50 C threshold unchanged:
- * write 2 to [0x05] -- select temp sensor 2
- * write 0x1 to [0x07] -- disable threshold 1
- */
-
-/* DPTF battery charging current limit */
-#define EC_ACPI_MEM_CHARGING_LIMIT 0x08
-
-/* Charging limit is specified in 64 mA steps */
-#define EC_ACPI_MEM_CHARGING_LIMIT_STEP_MA 64
-/* Value to disable DPTF battery charging limit */
-#define EC_ACPI_MEM_CHARGING_LIMIT_DISABLED 0xff
-
-/* Current version of ACPI memory address space */
-#define EC_ACPI_MEM_VERSION_CURRENT 1
-
-
-/*****************************************************************************/
-/*
- * Special commands
- *
- * These do not follow the normal rules for commands. See each command for
- * details.
- */
-
-/*
- * Reboot NOW
- *
- * This command will work even when the EC LPC interface is busy, because the
- * reboot command is processed at interrupt level. Note that when the EC
- * reboots, the host will reboot too, so there is no response to this command.
- *
- * Use EC_CMD_REBOOT_EC to reboot the EC more politely.
- */
-#define EC_CMD_REBOOT 0xd1 /* Think "die" */
-
-/*
- * Resend last response (not supported on LPC).
- *
- * Returns EC_RES_UNAVAILABLE if there is no response available - for example,
- * there was no previous command, or the previous command's response was too
- * big to save.
- */
-#define EC_CMD_RESEND_RESPONSE 0xdb
-
-/*
- * This header byte on a command indicate version 0. Any header byte less
- * than this means that we are talking to an old EC which doesn't support
- * versioning. In that case, we assume version 0.
- *
- * Header bytes greater than this indicate a later version. For example,
- * EC_CMD_VERSION0 + 1 means we are using version 1.
- *
- * The old EC interface must not use commands 0xdc or higher.
- */
-#define EC_CMD_VERSION0 0xdc
-
-#endif /* !__ACPI__ */
-
-/*****************************************************************************/
-/*
- * PD commands
- *
- * These commands are for PD MCU communication.
- */
-
-/* EC to PD MCU exchange status command */
-#define EC_CMD_PD_EXCHANGE_STATUS 0x100
-
-/* Status of EC being sent to PD */
-struct ec_params_pd_status {
- int8_t batt_soc; /* battery state of charge */
-} __packed;
-
-/* Status of PD being sent back to EC */
-struct ec_response_pd_status {
- int8_t status; /* PD MCU status */
- uint32_t curr_lim_ma; /* input current limit */
-} __packed;
-
-/* Set USB type-C port role and muxes */
-#define EC_CMD_USB_PD_CONTROL 0x101
-
-enum usb_pd_control_role {
- USB_PD_CTRL_ROLE_NO_CHANGE = 0,
- USB_PD_CTRL_ROLE_TOGGLE_ON = 1, /* == AUTO */
- USB_PD_CTRL_ROLE_TOGGLE_OFF = 2,
- USB_PD_CTRL_ROLE_FORCE_SINK = 3,
- USB_PD_CTRL_ROLE_FORCE_SOURCE = 4,
-};
-
-enum usb_pd_control_mux {
- USB_PD_CTRL_MUX_NO_CHANGE = 0,
- USB_PD_CTRL_MUX_NONE = 1,
- USB_PD_CTRL_MUX_USB = 2,
- USB_PD_CTRL_MUX_DP = 3,
- USB_PD_CTRL_MUX_DOCK = 4,
- USB_PD_CTRL_MUX_AUTO = 5,
-};
-
-struct ec_params_usb_pd_control {
- uint8_t port;
- uint8_t role;
- uint8_t mux;
-} __packed;
-
-#define PD_CTRL_RESP_ENABLED_COMMS (1 << 0) /* Communication enabled */
-#define PD_CTRL_RESP_ENABLED_CONNECTED (1 << 1) /* Device connected */
-#define PD_CTRL_RESP_ENABLED_PD_CAPABLE (1 << 2) /* Partner is PD capable */
-
-struct ec_response_usb_pd_control_v1 {
- uint8_t enabled;
- uint8_t role;
- uint8_t polarity;
- char state[32];
-} __packed;
-
-#define EC_CMD_USB_PD_PORTS 0x102
-
-struct ec_response_usb_pd_ports {
- uint8_t num_ports;
-} __packed;
-
-#define EC_CMD_USB_PD_POWER_INFO 0x103
-
-#define PD_POWER_CHARGING_PORT 0xff
-struct ec_params_usb_pd_power_info {
- uint8_t port;
-} __packed;
-
-enum usb_chg_type {
- USB_CHG_TYPE_NONE,
- USB_CHG_TYPE_PD,
- USB_CHG_TYPE_C,
- USB_CHG_TYPE_PROPRIETARY,
- USB_CHG_TYPE_BC12_DCP,
- USB_CHG_TYPE_BC12_CDP,
- USB_CHG_TYPE_BC12_SDP,
- USB_CHG_TYPE_OTHER,
- USB_CHG_TYPE_VBUS,
- USB_CHG_TYPE_UNKNOWN,
-};
-
-struct usb_chg_measures {
- uint16_t voltage_max;
- uint16_t voltage_now;
- uint16_t current_max;
- uint16_t current_lim;
-} __packed;
-
-struct ec_response_usb_pd_power_info {
- uint8_t role;
- uint8_t type;
- uint8_t dualrole;
- uint8_t reserved1;
- struct usb_chg_measures meas;
- uint32_t max_power;
-} __packed;
-
-/* Get info about USB-C SS muxes */
-#define EC_CMD_USB_PD_MUX_INFO 0x11a
-
-struct ec_params_usb_pd_mux_info {
- uint8_t port; /* USB-C port number */
-} __packed;
-
-/* Flags representing mux state */
-#define USB_PD_MUX_USB_ENABLED (1 << 0)
-#define USB_PD_MUX_DP_ENABLED (1 << 1)
-#define USB_PD_MUX_POLARITY_INVERTED (1 << 2)
-#define USB_PD_MUX_HPD_IRQ (1 << 3)
-
-struct ec_response_usb_pd_mux_info {
- uint8_t flags; /* USB_PD_MUX_*-encoded USB mux state */
-} __packed;
-
-/*****************************************************************************/
-/*
- * Passthru commands
- *
- * Some platforms have sub-processors chained to each other. For example.
- *
- * AP <--> EC <--> PD MCU
- *
- * The top 2 bits of the command number are used to indicate which device the
- * command is intended for. Device 0 is always the device receiving the
- * command; other device mapping is board-specific.
- *
- * When a device receives a command to be passed to a sub-processor, it passes
- * it on with the device number set back to 0. This allows the sub-processor
- * to remain blissfully unaware of whether the command originated on the next
- * device up the chain, or was passed through from the AP.
- *
- * In the above example, if the AP wants to send command 0x0002 to the PD MCU,
- * AP sends command 0x4002 to the EC
- * EC sends command 0x0002 to the PD MCU
- * EC forwards PD MCU response back to the AP
- */
-
-/* Offset and max command number for sub-device n */
-#define EC_CMD_PASSTHRU_OFFSET(n) (0x4000 * (n))
-#define EC_CMD_PASSTHRU_MAX(n) (EC_CMD_PASSTHRU_OFFSET(n) + 0x3fff)
-
-/*****************************************************************************/
-/*
- * Deprecated constants. These constants have been renamed for clarity. The
- * meaning and size has not changed. Programs that use the old names should
- * switch to the new names soon, as the old names may not be carried forward
- * forever.
- */
-#define EC_HOST_PARAM_SIZE EC_PROTO2_MAX_PARAM_SIZE
-#define EC_LPC_ADDR_OLD_PARAM EC_HOST_CMD_REGION1
-#define EC_OLD_PARAM_SIZE EC_HOST_CMD_REGION_SIZE
-
-#endif /* __CROS_EC_COMMANDS_H */
diff --git a/include/linux/mfd/cros_ec_lpc_mec.h b/include/linux/mfd/cros_ec_lpc_mec.h
deleted file mode 100644
index 176496ddc66c..000000000000
--- a/include/linux/mfd/cros_ec_lpc_mec.h
+++ /dev/null
@@ -1,90 +0,0 @@
-/*
- * cros_ec_lpc_mec - LPC variant I/O for Microchip EC
- *
- * Copyright (C) 2016 Google, Inc
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * This driver uses the Chrome OS EC byte-level message-based protocol for
- * communicating the keyboard state (which keys are pressed) from a keyboard EC
- * to the AP over some bus (such as i2c, lpc, spi). The EC does debouncing,
- * but everything else (including deghosting) is done here. The main
- * motivation for this is to keep the EC firmware as simple as possible, since
- * it cannot be easily upgraded and EC flash/IRAM space is relatively
- * expensive.
- */
-
-#ifndef __LINUX_MFD_CROS_EC_MEC_H
-#define __LINUX_MFD_CROS_EC_MEC_H
-
-#include <linux/mfd/cros_ec_commands.h>
-
-enum cros_ec_lpc_mec_emi_access_mode {
- /* 8-bit access */
- ACCESS_TYPE_BYTE = 0x0,
- /* 16-bit access */
- ACCESS_TYPE_WORD = 0x1,
- /* 32-bit access */
- ACCESS_TYPE_LONG = 0x2,
- /*
- * 32-bit access, read or write of MEC_EMI_EC_DATA_B3 causes the
- * EC data register to be incremented.
- */
- ACCESS_TYPE_LONG_AUTO_INCREMENT = 0x3,
-};
-
-enum cros_ec_lpc_mec_io_type {
- MEC_IO_READ,
- MEC_IO_WRITE,
-};
-
-/* Access IO ranges 0x800 thru 0x9ff using EMI interface instead of LPC */
-#define MEC_EMI_RANGE_START EC_HOST_CMD_REGION0
-#define MEC_EMI_RANGE_END (EC_LPC_ADDR_MEMMAP + EC_MEMMAP_SIZE)
-
-/* EMI registers are relative to base */
-#define MEC_EMI_BASE 0x800
-#define MEC_EMI_HOST_TO_EC (MEC_EMI_BASE + 0)
-#define MEC_EMI_EC_TO_HOST (MEC_EMI_BASE + 1)
-#define MEC_EMI_EC_ADDRESS_B0 (MEC_EMI_BASE + 2)
-#define MEC_EMI_EC_ADDRESS_B1 (MEC_EMI_BASE + 3)
-#define MEC_EMI_EC_DATA_B0 (MEC_EMI_BASE + 4)
-#define MEC_EMI_EC_DATA_B1 (MEC_EMI_BASE + 5)
-#define MEC_EMI_EC_DATA_B2 (MEC_EMI_BASE + 6)
-#define MEC_EMI_EC_DATA_B3 (MEC_EMI_BASE + 7)
-
-/*
- * cros_ec_lpc_mec_init
- *
- * Initialize MEC I/O.
- */
-void cros_ec_lpc_mec_init(void);
-
-/*
- * cros_ec_lpc_mec_destroy
- *
- * Cleanup MEC I/O.
- */
-void cros_ec_lpc_mec_destroy(void);
-
-/**
- * cros_ec_lpc_io_bytes_mec - Read / write bytes to MEC EMI port
- *
- * @io_type: MEC_IO_READ or MEC_IO_WRITE, depending on request
- * @offset: Base read / write address
- * @length: Number of bytes to read / write
- * @buf: Destination / source buffer
- *
- * @return 8-bit checksum of all bytes read / written
- */
-u8 cros_ec_lpc_io_bytes_mec(enum cros_ec_lpc_mec_io_type io_type,
- unsigned int offset, unsigned int length, u8 *buf);
-
-#endif /* __LINUX_MFD_CROS_EC_MEC_H */
diff --git a/include/linux/mfd/cros_ec_lpc_reg.h b/include/linux/mfd/cros_ec_lpc_reg.h
deleted file mode 100644
index 5560bef63c2b..000000000000
--- a/include/linux/mfd/cros_ec_lpc_reg.h
+++ /dev/null
@@ -1,61 +0,0 @@
-/*
- * cros_ec_lpc_reg - LPC access to the Chrome OS Embedded Controller
- *
- * Copyright (C) 2016 Google, Inc
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * This driver uses the Chrome OS EC byte-level message-based protocol for
- * communicating the keyboard state (which keys are pressed) from a keyboard EC
- * to the AP over some bus (such as i2c, lpc, spi). The EC does debouncing,
- * but everything else (including deghosting) is done here. The main
- * motivation for this is to keep the EC firmware as simple as possible, since
- * it cannot be easily upgraded and EC flash/IRAM space is relatively
- * expensive.
- */
-
-#ifndef __LINUX_MFD_CROS_EC_REG_H
-#define __LINUX_MFD_CROS_EC_REG_H
-
-/**
- * cros_ec_lpc_read_bytes - Read bytes from a given LPC-mapped address.
- * Returns 8-bit checksum of all bytes read.
- *
- * @offset: Base read address
- * @length: Number of bytes to read
- * @dest: Destination buffer
- */
-u8 cros_ec_lpc_read_bytes(unsigned int offset, unsigned int length, u8 *dest);
-
-/**
- * cros_ec_lpc_write_bytes - Write bytes to a given LPC-mapped address.
- * Returns 8-bit checksum of all bytes written.
- *
- * @offset: Base write address
- * @length: Number of bytes to write
- * @msg: Write data buffer
- */
-u8 cros_ec_lpc_write_bytes(unsigned int offset, unsigned int length, u8 *msg);
-
-/**
- * cros_ec_lpc_reg_init
- *
- * Initialize register I/O.
- */
-void cros_ec_lpc_reg_init(void);
-
-/**
- * cros_ec_lpc_reg_destroy
- *
- * Cleanup reg I/O.
- */
-void cros_ec_lpc_reg_destroy(void);
-
-#endif /* __LINUX_MFD_CROS_EC_REG_H */
diff --git a/include/linux/mfd/cs40l50.h b/include/linux/mfd/cs40l50.h
new file mode 100644
index 000000000000..e5dc49860944
--- /dev/null
+++ b/include/linux/mfd/cs40l50.h
@@ -0,0 +1,137 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * CS40L50 Advanced Haptic Driver with waveform memory,
+ * integrated DSP, and closed-loop algorithms
+ *
+ * Copyright 2024 Cirrus Logic, Inc.
+ *
+ * Author: James Ogletree <james.ogletree@cirrus.com>
+ */
+
+#ifndef __MFD_CS40L50_H__
+#define __MFD_CS40L50_H__
+
+#include <linux/firmware/cirrus/cs_dsp.h>
+#include <linux/gpio/consumer.h>
+#include <linux/pm.h>
+#include <linux/regmap.h>
+
+/* Power Supply Configuration */
+#define CS40L50_BLOCK_ENABLES2 0x201C
+#define CS40L50_ERR_RLS 0x2034
+#define CS40L50_BST_LPMODE_SEL 0x3810
+#define CS40L50_DCM_LOW_POWER 0x1
+#define CS40L50_OVERTEMP_WARN 0x4000010
+
+/* Interrupts */
+#define CS40L50_IRQ1_INT_1 0xE010
+#define CS40L50_IRQ1_BASE CS40L50_IRQ1_INT_1
+#define CS40L50_IRQ1_INT_2 0xE014
+#define CS40L50_IRQ1_INT_8 0xE02C
+#define CS40L50_IRQ1_INT_9 0xE030
+#define CS40L50_IRQ1_INT_10 0xE034
+#define CS40L50_IRQ1_INT_18 0xE054
+#define CS40L50_IRQ1_MASK_1 0xE090
+#define CS40L50_IRQ1_MASK_2 0xE094
+#define CS40L50_IRQ1_MASK_20 0xE0DC
+#define CS40L50_IRQ1_INT_1_OFFSET (CS40L50_IRQ1_INT_1 - CS40L50_IRQ1_BASE)
+#define CS40L50_IRQ1_INT_2_OFFSET (CS40L50_IRQ1_INT_2 - CS40L50_IRQ1_BASE)
+#define CS40L50_IRQ1_INT_8_OFFSET (CS40L50_IRQ1_INT_8 - CS40L50_IRQ1_BASE)
+#define CS40L50_IRQ1_INT_9_OFFSET (CS40L50_IRQ1_INT_9 - CS40L50_IRQ1_BASE)
+#define CS40L50_IRQ1_INT_10_OFFSET (CS40L50_IRQ1_INT_10 - CS40L50_IRQ1_BASE)
+#define CS40L50_IRQ1_INT_18_OFFSET (CS40L50_IRQ1_INT_18 - CS40L50_IRQ1_BASE)
+#define CS40L50_IRQ_MASK_2_OVERRIDE 0xFFDF7FFF
+#define CS40L50_IRQ_MASK_20_OVERRIDE 0x15C01000
+#define CS40L50_AMP_SHORT_MASK BIT(31)
+#define CS40L50_DSP_QUEUE_MASK BIT(21)
+#define CS40L50_TEMP_ERR_MASK BIT(31)
+#define CS40L50_BST_UVP_MASK BIT(6)
+#define CS40L50_BST_SHORT_MASK BIT(7)
+#define CS40L50_BST_ILIMIT_MASK BIT(18)
+#define CS40L50_UVLO_VDDBATT_MASK BIT(16)
+#define CS40L50_GLOBAL_ERROR_MASK BIT(15)
+
+enum cs40l50_irq_list {
+ CS40L50_DSP_QUEUE_IRQ,
+ CS40L50_GLOBAL_ERROR_IRQ,
+ CS40L50_UVLO_VDDBATT_IRQ,
+ CS40L50_BST_ILIMIT_IRQ,
+ CS40L50_BST_SHORT_IRQ,
+ CS40L50_BST_UVP_IRQ,
+ CS40L50_TEMP_ERR_IRQ,
+ CS40L50_AMP_SHORT_IRQ,
+};
+
+/* DSP */
+#define CS40L50_XMEM_PACKED_0 0x2000000
+#define CS40L50_XMEM_UNPACKED24_0 0x2800000
+#define CS40L50_SYS_INFO_ID 0x25E0000
+#define CS40L50_DSP_QUEUE_WT 0x28042C8
+#define CS40L50_DSP_QUEUE_RD 0x28042CC
+#define CS40L50_NUM_WAVES 0x2805C18
+#define CS40L50_CORE_BASE 0x2B80000
+#define CS40L50_YMEM_PACKED_0 0x2C00000
+#define CS40L50_YMEM_UNPACKED24_0 0x3400000
+#define CS40L50_PMEM_0 0x3800000
+#define CS40L50_DSP_POLL_US 1000
+#define CS40L50_DSP_TIMEOUT_COUNT 100
+#define CS40L50_RESET_PULSE_US 2200
+#define CS40L50_CP_READY_US 3100
+#define CS40L50_AUTOSUSPEND_MS 2000
+#define CS40L50_PM_ALGO 0x9F206
+#define CS40L50_GLOBAL_ERR_RLS_SET BIT(11)
+#define CS40L50_GLOBAL_ERR_RLS_CLEAR 0
+
+enum cs40l50_wseqs {
+ CS40L50_PWR_ON,
+ CS40L50_STANDBY,
+ CS40L50_ACTIVE,
+ CS40L50_NUM_WSEQS,
+};
+
+/* DSP Queue */
+#define CS40L50_DSP_QUEUE_BASE 0x11004
+#define CS40L50_DSP_QUEUE_END 0x1101C
+#define CS40L50_DSP_QUEUE 0x11020
+#define CS40L50_PREVENT_HIBER 0x2000003
+#define CS40L50_ALLOW_HIBER 0x2000004
+#define CS40L50_SHUTDOWN 0x2000005
+#define CS40L50_SYSTEM_RESET 0x2000007
+#define CS40L50_START_I2S 0x3000002
+#define CS40L50_OWT_PUSH 0x3000008
+#define CS40L50_STOP_PLAYBACK 0x5000000
+#define CS40L50_OWT_DELETE 0xD000000
+
+/* Firmware files */
+#define CS40L50_FW "cs40l50.wmfw"
+#define CS40L50_WT "cs40l50.bin"
+
+/* Device */
+#define CS40L50_DEVID 0x0
+#define CS40L50_REVID 0x4
+#define CS40L50_DEVID_A 0x40A50
+#define CS40L50_REVID_B0 0xB0
+
+struct cs40l50 {
+ struct device *dev;
+ struct regmap *regmap;
+ struct mutex lock;
+ struct cs_dsp dsp;
+ struct gpio_desc *reset_gpio;
+ struct regmap_irq_chip_data *irq_data;
+ const struct firmware *fw;
+ const struct firmware *bin;
+ struct cs_dsp_wseq wseqs[CS40L50_NUM_WSEQS];
+ int irq;
+ u32 devid;
+ u32 revid;
+};
+
+int cs40l50_dsp_write(struct device *dev, struct regmap *regmap, u32 val);
+int cs40l50_probe(struct cs40l50 *cs40l50);
+int cs40l50_remove(struct cs40l50 *cs40l50);
+
+extern const struct regmap_config cs40l50_regmap;
+extern const struct dev_pm_ops cs40l50_pm_ops;
+
+#endif /* __MFD_CS40L50_H__ */
diff --git a/include/linux/mfd/cs42l43-regs.h b/include/linux/mfd/cs42l43-regs.h
new file mode 100644
index 000000000000..c39a49269cb7
--- /dev/null
+++ b/include/linux/mfd/cs42l43-regs.h
@@ -0,0 +1,1184 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * cs42l43 register definitions
+ *
+ * Copyright (c) 2022-2023 Cirrus Logic, Inc. and
+ * Cirrus Logic International Semiconductor Ltd.
+ */
+
+#ifndef CS42L43_CORE_REGS_H
+#define CS42L43_CORE_REGS_H
+
+/* Registers */
+#define CS42L43_GEN_INT_STAT_1 0x000000C0
+#define CS42L43_GEN_INT_MASK_1 0x000000C1
+#define CS42L43_DEVID 0x00003000
+#define CS42L43_REVID 0x00003004
+#define CS42L43_RELID 0x0000300C
+#define CS42L43_SFT_RESET 0x00003020
+#define CS42L43_DRV_CTRL1 0x00006004
+#define CS42L43_DRV_CTRL3 0x0000600C
+#define CS42L43_DRV_CTRL4 0x00006010
+#define CS42L43_DRV_CTRL_5 0x00006014
+#define CS42L43_GPIO_CTRL1 0x00006034
+#define CS42L43_GPIO_CTRL2 0x00006038
+#define CS42L43_GPIO_STS 0x0000603C
+#define CS42L43_GPIO_FN_SEL 0x00006040
+#define CS42L43_MCLK_SRC_SEL 0x00007004
+#define CS42L43_CCM_BLK_CLK_CONTROL 0x00007010
+#define CS42L43_SAMPLE_RATE1 0x00007014
+#define CS42L43_SAMPLE_RATE2 0x00007018
+#define CS42L43_SAMPLE_RATE3 0x0000701C
+#define CS42L43_SAMPLE_RATE4 0x00007020
+#define CS42L43_PLL_CONTROL 0x00007034
+#define CS42L43_FS_SELECT1 0x00007038
+#define CS42L43_FS_SELECT2 0x0000703C
+#define CS42L43_FS_SELECT3 0x00007040
+#define CS42L43_FS_SELECT4 0x00007044
+#define CS42L43_PDM_CONTROL 0x0000704C
+#define CS42L43_ASP_CLK_CONFIG1 0x00007058
+#define CS42L43_ASP_CLK_CONFIG2 0x0000705C
+#define CS42L43_OSC_DIV_SEL 0x00007068
+#define CS42L43_ADC_B_CTRL1 0x00008000
+#define CS42L43_ADC_B_CTRL2 0x00008004
+#define CS42L43_DECIM_HPF_WNF_CTRL1 0x0000803C
+#define CS42L43_DECIM_HPF_WNF_CTRL2 0x00008040
+#define CS42L43_DECIM_HPF_WNF_CTRL3 0x00008044
+#define CS42L43_DECIM_HPF_WNF_CTRL4 0x00008048
+#define CS42L43_DMIC_PDM_CTRL 0x0000804C
+#define CS42L43_DECIM_VOL_CTRL_CH1_CH2 0x00008050
+#define CS42L43_DECIM_VOL_CTRL_CH3_CH4 0x00008054
+#define CS42L43_DECIM_VOL_CTRL_UPDATE 0x00008058
+#define CS42L43_INTP_VOLUME_CTRL1 0x00009008
+#define CS42L43_INTP_VOLUME_CTRL2 0x0000900C
+#define CS42L43_AMP1_2_VOL_RAMP 0x00009010
+#define CS42L43_ASP_CTRL 0x0000A000
+#define CS42L43_ASP_FSYNC_CTRL1 0x0000A004
+#define CS42L43_ASP_FSYNC_CTRL2 0x0000A008
+#define CS42L43_ASP_FSYNC_CTRL3 0x0000A00C
+#define CS42L43_ASP_FSYNC_CTRL4 0x0000A010
+#define CS42L43_ASP_DATA_CTRL 0x0000A018
+#define CS42L43_ASP_RX_EN 0x0000A020
+#define CS42L43_ASP_TX_EN 0x0000A024
+#define CS42L43_ASP_RX_CH1_CTRL 0x0000A028
+#define CS42L43_ASP_RX_CH2_CTRL 0x0000A02C
+#define CS42L43_ASP_RX_CH3_CTRL 0x0000A030
+#define CS42L43_ASP_RX_CH4_CTRL 0x0000A034
+#define CS42L43_ASP_RX_CH5_CTRL 0x0000A038
+#define CS42L43_ASP_RX_CH6_CTRL 0x0000A03C
+#define CS42L43_ASP_TX_CH1_CTRL 0x0000A068
+#define CS42L43_ASP_TX_CH2_CTRL 0x0000A06C
+#define CS42L43_ASP_TX_CH3_CTRL 0x0000A070
+#define CS42L43_ASP_TX_CH4_CTRL 0x0000A074
+#define CS42L43_ASP_TX_CH5_CTRL 0x0000A078
+#define CS42L43_ASP_TX_CH6_CTRL 0x0000A07C
+#define CS42L43_OTP_REVISION_ID 0x0000B02C
+#define CS42L43_ASPTX1_INPUT 0x0000C200
+#define CS42L43_ASPTX2_INPUT 0x0000C210
+#define CS42L43_ASPTX3_INPUT 0x0000C220
+#define CS42L43_ASPTX4_INPUT 0x0000C230
+#define CS42L43_ASPTX5_INPUT 0x0000C240
+#define CS42L43_ASPTX6_INPUT 0x0000C250
+#define CS42L43_SWIRE_DP1_CH1_INPUT 0x0000C280
+#define CS42L43_SWIRE_DP1_CH2_INPUT 0x0000C290
+#define CS42L43_SWIRE_DP1_CH3_INPUT 0x0000C2A0
+#define CS42L43_SWIRE_DP1_CH4_INPUT 0x0000C2B0
+#define CS42L43_SWIRE_DP2_CH1_INPUT 0x0000C2C0
+#define CS42L43_SWIRE_DP2_CH2_INPUT 0x0000C2D0
+#define CS42L43_SWIRE_DP3_CH1_INPUT 0x0000C2E0
+#define CS42L43_SWIRE_DP3_CH2_INPUT 0x0000C2F0
+#define CS42L43_SWIRE_DP4_CH1_INPUT 0x0000C300
+#define CS42L43_SWIRE_DP4_CH2_INPUT 0x0000C310
+#define CS42L43_ASRC_INT1_INPUT1 0x0000C400
+#define CS42L43_ASRC_INT2_INPUT1 0x0000C410
+#define CS42L43_ASRC_INT3_INPUT1 0x0000C420
+#define CS42L43_ASRC_INT4_INPUT1 0x0000C430
+#define CS42L43_ASRC_DEC1_INPUT1 0x0000C440
+#define CS42L43_ASRC_DEC2_INPUT1 0x0000C450
+#define CS42L43_ASRC_DEC3_INPUT1 0x0000C460
+#define CS42L43_ASRC_DEC4_INPUT1 0x0000C470
+#define CS42L43_ISRC1INT1_INPUT1 0x0000C500
+#define CS42L43_ISRC1INT2_INPUT1 0x0000C510
+#define CS42L43_ISRC1DEC1_INPUT1 0x0000C520
+#define CS42L43_ISRC1DEC2_INPUT1 0x0000C530
+#define CS42L43_ISRC2INT1_INPUT1 0x0000C540
+#define CS42L43_ISRC2INT2_INPUT1 0x0000C550
+#define CS42L43_ISRC2DEC1_INPUT1 0x0000C560
+#define CS42L43_ISRC2DEC2_INPUT1 0x0000C570
+#define CS42L43_EQ1MIX_INPUT1 0x0000C580
+#define CS42L43_EQ1MIX_INPUT2 0x0000C584
+#define CS42L43_EQ1MIX_INPUT3 0x0000C588
+#define CS42L43_EQ1MIX_INPUT4 0x0000C58C
+#define CS42L43_EQ2MIX_INPUT1 0x0000C590
+#define CS42L43_EQ2MIX_INPUT2 0x0000C594
+#define CS42L43_EQ2MIX_INPUT3 0x0000C598
+#define CS42L43_EQ2MIX_INPUT4 0x0000C59C
+#define CS42L43_SPDIF1_INPUT1 0x0000C600
+#define CS42L43_SPDIF2_INPUT1 0x0000C610
+#define CS42L43_AMP1MIX_INPUT1 0x0000C620
+#define CS42L43_AMP1MIX_INPUT2 0x0000C624
+#define CS42L43_AMP1MIX_INPUT3 0x0000C628
+#define CS42L43_AMP1MIX_INPUT4 0x0000C62C
+#define CS42L43_AMP2MIX_INPUT1 0x0000C630
+#define CS42L43_AMP2MIX_INPUT2 0x0000C634
+#define CS42L43_AMP2MIX_INPUT3 0x0000C638
+#define CS42L43_AMP2MIX_INPUT4 0x0000C63C
+#define CS42L43_AMP3MIX_INPUT1 0x0000C640
+#define CS42L43_AMP3MIX_INPUT2 0x0000C644
+#define CS42L43_AMP3MIX_INPUT3 0x0000C648
+#define CS42L43_AMP3MIX_INPUT4 0x0000C64C
+#define CS42L43_AMP4MIX_INPUT1 0x0000C650
+#define CS42L43_AMP4MIX_INPUT2 0x0000C654
+#define CS42L43_AMP4MIX_INPUT3 0x0000C658
+#define CS42L43_AMP4MIX_INPUT4 0x0000C65C
+#define CS42L43_ASRC_INT_ENABLES 0x0000E000
+#define CS42L43_ASRC_DEC_ENABLES 0x0000E004
+#define CS42L43_PDNCNTL 0x00010000
+#define CS42L43_RINGSENSE_DEB_CTRL 0x0001001C
+#define CS42L43_TIPSENSE_DEB_CTRL 0x00010020
+#define CS42L43_TIP_RING_SENSE_INTERRUPT_STATUS 0x00010028
+#define CS42L43_HS2 0x00010040
+#define CS42L43_HS_STAT 0x00010048
+#define CS42L43_MCU_SW_INTERRUPT 0x00010094
+#define CS42L43_STEREO_MIC_CTRL 0x000100A4
+#define CS42L43_STEREO_MIC_CLAMP_CTRL 0x000100C4
+#define CS42L43_BLOCK_EN2 0x00010104
+#define CS42L43_BLOCK_EN3 0x00010108
+#define CS42L43_BLOCK_EN4 0x0001010C
+#define CS42L43_BLOCK_EN5 0x00010110
+#define CS42L43_BLOCK_EN6 0x00010114
+#define CS42L43_BLOCK_EN7 0x00010118
+#define CS42L43_BLOCK_EN8 0x0001011C
+#define CS42L43_BLOCK_EN9 0x00010120
+#define CS42L43_BLOCK_EN10 0x00010124
+#define CS42L43_BLOCK_EN11 0x00010128
+#define CS42L43_TONE_CH1_CTRL 0x00010134
+#define CS42L43_TONE_CH2_CTRL 0x00010138
+#define CS42L43_MIC_DETECT_CONTROL_1 0x00011074
+#define CS42L43_DETECT_STATUS_1 0x0001107C
+#define CS42L43_HS_BIAS_SENSE_AND_CLAMP_AUTOCONTROL 0x00011090
+#define CS42L43_MIC_DETECT_CONTROL_ANDROID 0x000110B0
+#define CS42L43_ISRC1_CTRL 0x00012004
+#define CS42L43_ISRC2_CTRL 0x00013004
+#define CS42L43_CTRL_REG 0x00014000
+#define CS42L43_FDIV_FRAC 0x00014004
+#define CS42L43_CAL_RATIO 0x00014008
+#define CS42L43_SPI_CLK_CONFIG1 0x00016004
+#define CS42L43_SPI_CONFIG1 0x00016010
+#define CS42L43_SPI_CONFIG2 0x00016014
+#define CS42L43_SPI_CONFIG3 0x00016018
+#define CS42L43_SPI_CONFIG4 0x00016024
+#define CS42L43_SPI_STATUS1 0x00016100
+#define CS42L43_SPI_STATUS2 0x00016104
+#define CS42L43_TRAN_CONFIG1 0x00016200
+#define CS42L43_TRAN_CONFIG2 0x00016204
+#define CS42L43_TRAN_CONFIG3 0x00016208
+#define CS42L43_TRAN_CONFIG4 0x0001620C
+#define CS42L43_TRAN_CONFIG5 0x00016220
+#define CS42L43_TRAN_CONFIG6 0x00016224
+#define CS42L43_TRAN_CONFIG7 0x00016228
+#define CS42L43_TRAN_CONFIG8 0x0001622C
+#define CS42L43_TRAN_STATUS1 0x00016300
+#define CS42L43_TRAN_STATUS2 0x00016304
+#define CS42L43_TRAN_STATUS3 0x00016308
+#define CS42L43_TX_DATA 0x00016400
+#define CS42L43_RX_DATA 0x00016600
+#define CS42L43_DACCNFG1 0x00017000
+#define CS42L43_DACCNFG2 0x00017004
+#define CS42L43_HPPATHVOL 0x0001700C
+#define CS42L43_PGAVOL 0x00017014
+#define CS42L43_LOADDETRESULTS 0x00017018
+#define CS42L43_LOADDETENA 0x00017024
+#define CS42L43_CTRL 0x00017028
+#define CS42L43_COEFF_DATA_IN0 0x00018000
+#define CS42L43_COEFF_RD_WR0 0x00018008
+#define CS42L43_INIT_DONE0 0x00018010
+#define CS42L43_START_EQZ0 0x00018014
+#define CS42L43_MUTE_EQ_IN0 0x0001801C
+#define CS42L43_DECIM_INT 0x0001B000
+#define CS42L43_EQ_INT 0x0001B004
+#define CS42L43_ASP_INT 0x0001B008
+#define CS42L43_PLL_INT 0x0001B00C
+#define CS42L43_SOFT_INT 0x0001B010
+#define CS42L43_SWIRE_INT 0x0001B014
+#define CS42L43_MSM_INT 0x0001B018
+#define CS42L43_ACC_DET_INT 0x0001B01C
+#define CS42L43_I2C_TGT_INT 0x0001B020
+#define CS42L43_SPI_MSTR_INT 0x0001B024
+#define CS42L43_SW_TO_SPI_BRIDGE_INT 0x0001B028
+#define CS42L43_OTP_INT 0x0001B02C
+#define CS42L43_CLASS_D_AMP_INT 0x0001B030
+#define CS42L43_GPIO_INT 0x0001B034
+#define CS42L43_ASRC_INT 0x0001B038
+#define CS42L43_HPOUT_INT 0x0001B03C
+#define CS42L43_DECIM_MASK 0x0001B0A0
+#define CS42L43_EQ_MIX_MASK 0x0001B0A4
+#define CS42L43_ASP_MASK 0x0001B0A8
+#define CS42L43_PLL_MASK 0x0001B0AC
+#define CS42L43_SOFT_MASK 0x0001B0B0
+#define CS42L43_SWIRE_MASK 0x0001B0B4
+#define CS42L43_MSM_MASK 0x0001B0B8
+#define CS42L43_ACC_DET_MASK 0x0001B0BC
+#define CS42L43_I2C_TGT_MASK 0x0001B0C0
+#define CS42L43_SPI_MSTR_MASK 0x0001B0C4
+#define CS42L43_SW_TO_SPI_BRIDGE_MASK 0x0001B0C8
+#define CS42L43_OTP_MASK 0x0001B0CC
+#define CS42L43_CLASS_D_AMP_MASK 0x0001B0D0
+#define CS42L43_GPIO_INT_MASK 0x0001B0D4
+#define CS42L43_ASRC_MASK 0x0001B0D8
+#define CS42L43_HPOUT_MASK 0x0001B0DC
+#define CS42L43_DECIM_INT_SHADOW 0x0001B300
+#define CS42L43_EQ_MIX_INT_SHADOW 0x0001B304
+#define CS42L43_ASP_INT_SHADOW 0x0001B308
+#define CS42L43_PLL_INT_SHADOW 0x0001B30C
+#define CS42L43_SOFT_INT_SHADOW 0x0001B310
+#define CS42L43_SWIRE_INT_SHADOW 0x0001B314
+#define CS42L43_MSM_INT_SHADOW 0x0001B318
+#define CS42L43_ACC_DET_INT_SHADOW 0x0001B31C
+#define CS42L43_I2C_TGT_INT_SHADOW 0x0001B320
+#define CS42L43_SPI_MSTR_INT_SHADOW 0x0001B324
+#define CS42L43_SW_TO_SPI_BRIDGE_SHADOW 0x0001B328
+#define CS42L43_OTP_INT_SHADOW 0x0001B32C
+#define CS42L43_CLASS_D_AMP_INT_SHADOW 0x0001B330
+#define CS42L43_GPIO_SHADOW 0x0001B334
+#define CS42L43_ASRC_SHADOW 0x0001B338
+#define CS42L43_HP_OUT_SHADOW 0x0001B33C
+#define CS42L43_BOOT_CONTROL 0x00101000
+#define CS42L43_BLOCK_EN 0x00101008
+#define CS42L43_SHUTTER_CONTROL 0x0010100C
+#define CS42L43_MCU_SW_REV 0x00114000
+#define CS42L43_PATCH_START_ADDR 0x00114004
+#define CS42L43_NEED_CONFIGS 0x0011400C
+#define CS42L43_BOOT_STATUS 0x0011401C
+#define CS42L43_FW_SH_BOOT_CFG_NEED_CONFIGS 0x0011F8F8
+#define CS42L43_FW_MISSION_CTRL_NEED_CONFIGS 0x0011FE00
+#define CS42L43_FW_MISSION_CTRL_HAVE_CONFIGS 0x0011FE04
+#define CS42L43_FW_MISSION_CTRL_MM_CTRL_SELECTION 0x0011FE0C
+#define CS42L43_FW_MISSION_CTRL_MM_MCU_CFG_REG 0x0011FE10
+#define CS42L43_MCU_RAM_MAX 0x0011FFFF
+
+/* CS42L43_DEVID */
+#define CS42L43_DEVID_VAL 0x00042A43
+
+/* CS42L43_GEN_INT_STAT_1 */
+#define CS42L43_INT_STAT_GEN1_MASK 0x00000001
+#define CS42L43_INT_STAT_GEN1_SHIFT 0
+
+/* CS42L43_SFT_RESET */
+#define CS42L43_SFT_RESET_MASK 0xFF000000
+#define CS42L43_SFT_RESET_SHIFT 24
+
+#define CS42L43_SFT_RESET_VAL 0x5A000000
+
+/* CS42L43_DRV_CTRL1 */
+#define CS42L43_ASP_DOUT_DRV_MASK 0x00038000
+#define CS42L43_ASP_DOUT_DRV_SHIFT 15
+#define CS42L43_ASP_FSYNC_DRV_MASK 0x00000E00
+#define CS42L43_ASP_FSYNC_DRV_SHIFT 9
+#define CS42L43_ASP_BCLK_DRV_MASK 0x000001C0
+#define CS42L43_ASP_BCLK_DRV_SHIFT 6
+
+/* CS42L43_DRV_CTRL3 */
+#define CS42L43_I2C_ADDR_DRV_MASK 0x30000000
+#define CS42L43_I2C_ADDR_DRV_SHIFT 28
+#define CS42L43_I2C_SDA_DRV_MASK 0x0C000000
+#define CS42L43_I2C_SDA_DRV_SHIFT 26
+#define CS42L43_PDMOUT2_CLK_DRV_MASK 0x00E00000
+#define CS42L43_PDMOUT2_CLK_DRV_SHIFT 21
+#define CS42L43_PDMOUT2_DATA_DRV_MASK 0x001C0000
+#define CS42L43_PDMOUT2_DATA_DRV_SHIFT 18
+#define CS42L43_PDMOUT1_CLK_DRV_MASK 0x00038000
+#define CS42L43_PDMOUT1_CLK_DRV_SHIFT 15
+#define CS42L43_PDMOUT1_DATA_DRV_MASK 0x00007000
+#define CS42L43_PDMOUT1_DATA_DRV_SHIFT 12
+#define CS42L43_SPI_MISO_DRV_MASK 0x00000038
+#define CS42L43_SPI_MISO_DRV_SHIFT 3
+
+/* CS42L43_DRV_CTRL4 */
+#define CS42L43_GPIO3_DRV_MASK 0x00000E00
+#define CS42L43_GPIO3_DRV_SHIFT 9
+#define CS42L43_GPIO2_DRV_MASK 0x000001C0
+#define CS42L43_GPIO2_DRV_SHIFT 6
+#define CS42L43_GPIO1_DRV_MASK 0x00000038
+#define CS42L43_GPIO1_DRV_SHIFT 3
+
+/* CS42L43_DRV_CTRL_5 */
+#define CS42L43_I2C_SCL_DRV_MASK 0x18000000
+#define CS42L43_I2C_SCL_DRV_SHIFT 27
+#define CS42L43_SPI_SCK_DRV_MASK 0x07000000
+#define CS42L43_SPI_SCK_DRV_SHIFT 24
+#define CS42L43_SPI_MOSI_DRV_MASK 0x00E00000
+#define CS42L43_SPI_MOSI_DRV_SHIFT 21
+#define CS42L43_SPI_SSB_DRV_MASK 0x001C0000
+#define CS42L43_SPI_SSB_DRV_SHIFT 18
+#define CS42L43_ASP_DIN_DRV_MASK 0x000001C0
+#define CS42L43_ASP_DIN_DRV_SHIFT 6
+
+/* CS42L43_GPIO_CTRL1 */
+#define CS42L43_GPIO3_POL_MASK 0x00040000
+#define CS42L43_GPIO3_POL_SHIFT 18
+#define CS42L43_GPIO2_POL_MASK 0x00020000
+#define CS42L43_GPIO2_POL_SHIFT 17
+#define CS42L43_GPIO1_POL_MASK 0x00010000
+#define CS42L43_GPIO1_POL_SHIFT 16
+#define CS42L43_GPIO3_LVL_MASK 0x00000400
+#define CS42L43_GPIO3_LVL_SHIFT 10
+#define CS42L43_GPIO2_LVL_MASK 0x00000200
+#define CS42L43_GPIO2_LVL_SHIFT 9
+#define CS42L43_GPIO1_LVL_MASK 0x00000100
+#define CS42L43_GPIO1_LVL_SHIFT 8
+#define CS42L43_GPIO3_DIR_MASK 0x00000004
+#define CS42L43_GPIO3_DIR_SHIFT 2
+#define CS42L43_GPIO2_DIR_MASK 0x00000002
+#define CS42L43_GPIO2_DIR_SHIFT 1
+#define CS42L43_GPIO1_DIR_MASK 0x00000001
+#define CS42L43_GPIO1_DIR_SHIFT 0
+
+/* CS42L43_GPIO_CTRL2 */
+#define CS42L43_GPIO3_DEGLITCH_BYP_MASK 0x00000004
+#define CS42L43_GPIO3_DEGLITCH_BYP_SHIFT 2
+#define CS42L43_GPIO2_DEGLITCH_BYP_MASK 0x00000002
+#define CS42L43_GPIO2_DEGLITCH_BYP_SHIFT 1
+#define CS42L43_GPIO1_DEGLITCH_BYP_MASK 0x00000001
+#define CS42L43_GPIO1_DEGLITCH_BYP_SHIFT 0
+
+/* CS42L43_GPIO_STS */
+#define CS42L43_GPIO3_STS_MASK 0x00000004
+#define CS42L43_GPIO3_STS_SHIFT 2
+#define CS42L43_GPIO2_STS_MASK 0x00000002
+#define CS42L43_GPIO2_STS_SHIFT 1
+#define CS42L43_GPIO1_STS_MASK 0x00000001
+#define CS42L43_GPIO1_STS_SHIFT 0
+
+/* CS42L43_GPIO_FN_SEL */
+#define CS42L43_GPIO3_FN_SEL_MASK 0x00000004
+#define CS42L43_GPIO3_FN_SEL_SHIFT 2
+#define CS42L43_GPIO1_FN_SEL_MASK 0x00000001
+#define CS42L43_GPIO1_FN_SEL_SHIFT 0
+
+/* CS42L43_MCLK_SRC_SEL */
+#define CS42L43_OSC_PLL_MCLK_SEL_MASK 0x00000001
+#define CS42L43_OSC_PLL_MCLK_SEL_SHIFT 0
+
+/* CS42L43_SAMPLE_RATE1..CS42L43_SAMPLE_RATE4 */
+#define CS42L43_SAMPLE_RATE_MASK 0x0000001F
+#define CS42L43_SAMPLE_RATE_SHIFT 0
+
+/* CS42L43_PLL_CONTROL */
+#define CS42L43_PLL_REFCLK_EN_MASK 0x00000008
+#define CS42L43_PLL_REFCLK_EN_SHIFT 3
+#define CS42L43_PLL_REFCLK_DIV_MASK 0x00000006
+#define CS42L43_PLL_REFCLK_DIV_SHIFT 1
+#define CS42L43_PLL_REFCLK_SRC_MASK 0x00000001
+#define CS42L43_PLL_REFCLK_SRC_SHIFT 0
+
+/* CS42L43_FS_SELECT1 */
+#define CS42L43_ASP_RATE_MASK 0x00000003
+#define CS42L43_ASP_RATE_SHIFT 0
+
+/* CS42L43_FS_SELECT2 */
+#define CS42L43_ASRC_DEC_OUT_RATE_MASK 0x000000C0
+#define CS42L43_ASRC_DEC_OUT_RATE_SHIFT 6
+#define CS42L43_ASRC_INT_OUT_RATE_MASK 0x00000030
+#define CS42L43_ASRC_INT_OUT_RATE_SHIFT 4
+#define CS42L43_ASRC_DEC_IN_RATE_MASK 0x0000000C
+#define CS42L43_ASRC_DEC_IN_RATE_SHIFT 2
+#define CS42L43_ASRC_INT_IN_RATE_MASK 0x00000003
+#define CS42L43_ASRC_INT_IN_RATE_SHIFT 0
+
+/* CS42L43_FS_SELECT3 */
+#define CS42L43_HPOUT_RATE_MASK 0x0000C000
+#define CS42L43_HPOUT_RATE_SHIFT 14
+#define CS42L43_EQZ_RATE_MASK 0x00003000
+#define CS42L43_EQZ_RATE_SHIFT 12
+#define CS42L43_DIAGGEN_RATE_MASK 0x00000C00
+#define CS42L43_DIAGGEN_RATE_SHIFT 10
+#define CS42L43_DECIM_CH4_RATE_MASK 0x00000300
+#define CS42L43_DECIM_CH4_RATE_SHIFT 8
+#define CS42L43_DECIM_CH3_RATE_MASK 0x000000C0
+#define CS42L43_DECIM_CH3_RATE_SHIFT 6
+#define CS42L43_DECIM_CH2_RATE_MASK 0x00000030
+#define CS42L43_DECIM_CH2_RATE_SHIFT 4
+#define CS42L43_DECIM_CH1_RATE_MASK 0x0000000C
+#define CS42L43_DECIM_CH1_RATE_SHIFT 2
+#define CS42L43_AMP1_2_RATE_MASK 0x00000003
+#define CS42L43_AMP1_2_RATE_SHIFT 0
+
+/* CS42L43_FS_SELECT4 */
+#define CS42L43_SW_DP7_RATE_MASK 0x00C00000
+#define CS42L43_SW_DP7_RATE_SHIFT 22
+#define CS42L43_SW_DP6_RATE_MASK 0x00300000
+#define CS42L43_SW_DP6_RATE_SHIFT 20
+#define CS42L43_SPDIF_RATE_MASK 0x000C0000
+#define CS42L43_SPDIF_RATE_SHIFT 18
+#define CS42L43_SW_DP5_RATE_MASK 0x00030000
+#define CS42L43_SW_DP5_RATE_SHIFT 16
+#define CS42L43_SW_DP4_RATE_MASK 0x0000C000
+#define CS42L43_SW_DP4_RATE_SHIFT 14
+#define CS42L43_SW_DP3_RATE_MASK 0x00003000
+#define CS42L43_SW_DP3_RATE_SHIFT 12
+#define CS42L43_SW_DP2_RATE_MASK 0x00000C00
+#define CS42L43_SW_DP2_RATE_SHIFT 10
+#define CS42L43_SW_DP1_RATE_MASK 0x00000300
+#define CS42L43_SW_DP1_RATE_SHIFT 8
+#define CS42L43_ISRC2_LOW_RATE_MASK 0x000000C0
+#define CS42L43_ISRC2_LOW_RATE_SHIFT 6
+#define CS42L43_ISRC2_HIGH_RATE_MASK 0x00000030
+#define CS42L43_ISRC2_HIGH_RATE_SHIFT 4
+#define CS42L43_ISRC1_LOW_RATE_MASK 0x0000000C
+#define CS42L43_ISRC1_LOW_RATE_SHIFT 2
+#define CS42L43_ISRC1_HIGH_RATE_MASK 0x00000003
+#define CS42L43_ISRC1_HIGH_RATE_SHIFT 0
+
+/* CS42L43_PDM_CONTROL */
+#define CS42L43_PDM2_CLK_DIV_MASK 0x0000000C
+#define CS42L43_PDM2_CLK_DIV_SHIFT 2
+#define CS42L43_PDM1_CLK_DIV_MASK 0x00000003
+#define CS42L43_PDM1_CLK_DIV_SHIFT 0
+
+/* CS42L43_ASP_CLK_CONFIG1 */
+#define CS42L43_ASP_BCLK_N_MASK 0x03FF0000
+#define CS42L43_ASP_BCLK_N_SHIFT 16
+#define CS42L43_ASP_BCLK_M_MASK 0x000003FF
+#define CS42L43_ASP_BCLK_M_SHIFT 0
+
+/* CS42L43_ASP_CLK_CONFIG2 */
+#define CS42L43_ASP_MASTER_MODE_MASK 0x00000002
+#define CS42L43_ASP_MASTER_MODE_SHIFT 1
+#define CS42L43_ASP_BCLK_INV_MASK 0x00000001
+#define CS42L43_ASP_BCLK_INV_SHIFT 0
+
+/* CS42L43_OSC_DIV_SEL */
+#define CS42L43_OSC_DIV2_EN_MASK 0x00000001
+#define CS42L43_OSC_DIV2_EN_SHIFT 0
+
+/* CS42L43_ADC_B_CTRL1..CS42L43_ADC_B_CTRL1 */
+#define CS42L43_PGA_WIDESWING_MODE_EN_MASK 0x00000080
+#define CS42L43_PGA_WIDESWING_MODE_EN_SHIFT 7
+#define CS42L43_ADC_AIN_SEL_MASK 0x00000010
+#define CS42L43_ADC_AIN_SEL_SHIFT 4
+#define CS42L43_ADC_PGA_GAIN_MASK 0x0000000F
+#define CS42L43_ADC_PGA_GAIN_SHIFT 0
+
+/* CS42L43_DECIM_HPF_WNF_CTRL1..CS42L43_DECIM_HPF_WNF_CTRL4 */
+#define CS42L43_DECIM_WNF_CF_MASK 0x00000070
+#define CS42L43_DECIM_WNF_CF_SHIFT 4
+#define CS42L43_DECIM_WNF_EN_MASK 0x00000008
+#define CS42L43_DECIM_WNF_EN_SHIFT 3
+#define CS42L43_DECIM_HPF_CF_MASK 0x00000006
+#define CS42L43_DECIM_HPF_CF_SHIFT 1
+#define CS42L43_DECIM_HPF_EN_MASK 0x00000001
+#define CS42L43_DECIM_HPF_EN_SHIFT 0
+
+/* CS42L43_DMIC_PDM_CTRL */
+#define CS42L43_PDM2R_INV_MASK 0x00000020
+#define CS42L43_PDM2R_INV_SHIFT 5
+#define CS42L43_PDM2L_INV_MASK 0x00000010
+#define CS42L43_PDM2L_INV_SHIFT 4
+#define CS42L43_PDM1R_INV_MASK 0x00000008
+#define CS42L43_PDM1R_INV_SHIFT 3
+#define CS42L43_PDM1L_INV_MASK 0x00000004
+#define CS42L43_PDM1L_INV_SHIFT 2
+
+/* CS42L43_DECIM_VOL_CTRL_CH1_CH2 */
+#define CS42L43_DECIM2_MUTE_MASK 0x80000000
+#define CS42L43_DECIM2_MUTE_SHIFT 31
+#define CS42L43_DECIM2_VOL_MASK 0x3FC00000
+#define CS42L43_DECIM2_VOL_SHIFT 22
+#define CS42L43_DECIM2_VD_RAMP_MASK 0x00380000
+#define CS42L43_DECIM2_VD_RAMP_SHIFT 19
+#define CS42L43_DECIM2_VI_RAMP_MASK 0x00070000
+#define CS42L43_DECIM2_VI_RAMP_SHIFT 16
+#define CS42L43_DECIM1_MUTE_MASK 0x00008000
+#define CS42L43_DECIM1_MUTE_SHIFT 15
+#define CS42L43_DECIM1_VOL_MASK 0x00003FC0
+#define CS42L43_DECIM1_VOL_SHIFT 6
+#define CS42L43_DECIM1_VD_RAMP_MASK 0x00000038
+#define CS42L43_DECIM1_VD_RAMP_SHIFT 3
+#define CS42L43_DECIM1_VI_RAMP_MASK 0x00000007
+#define CS42L43_DECIM1_VI_RAMP_SHIFT 0
+
+/* CS42L43_DECIM_VOL_CTRL_CH3_CH4 */
+#define CS42L43_DECIM4_MUTE_MASK 0x80000000
+#define CS42L43_DECIM4_MUTE_SHIFT 31
+#define CS42L43_DECIM4_VOL_MASK 0x3FC00000
+#define CS42L43_DECIM4_VOL_SHIFT 22
+#define CS42L43_DECIM4_VD_RAMP_MASK 0x00380000
+#define CS42L43_DECIM4_VD_RAMP_SHIFT 19
+#define CS42L43_DECIM4_VI_RAMP_MASK 0x00070000
+#define CS42L43_DECIM4_VI_RAMP_SHIFT 16
+#define CS42L43_DECIM3_MUTE_MASK 0x00008000
+#define CS42L43_DECIM3_MUTE_SHIFT 15
+#define CS42L43_DECIM3_VOL_MASK 0x00003FC0
+#define CS42L43_DECIM3_VOL_SHIFT 6
+#define CS42L43_DECIM3_VD_RAMP_MASK 0x00000038
+#define CS42L43_DECIM3_VD_RAMP_SHIFT 3
+#define CS42L43_DECIM3_VI_RAMP_MASK 0x00000007
+#define CS42L43_DECIM3_VI_RAMP_SHIFT 0
+
+/* CS42L43_DECIM_VOL_CTRL_UPDATE */
+#define CS42L43_DECIM4_VOL_UPDATE_MASK 0x00000008
+#define CS42L43_DECIM4_VOL_UPDATE_SHIFT 3
+#define CS42L43_DECIM3_VOL_UPDATE_MASK 0x00000004
+#define CS42L43_DECIM3_VOL_UPDATE_SHIFT 2
+#define CS42L43_DECIM2_VOL_UPDATE_MASK 0x00000002
+#define CS42L43_DECIM2_VOL_UPDATE_SHIFT 1
+#define CS42L43_DECIM1_VOL_UPDATE_MASK 0x00000001
+#define CS42L43_DECIM1_VOL_UPDATE_SHIFT 0
+
+/* CS42L43_INTP_VOLUME_CTRL1..CS42L43_INTP_VOLUME_CTRL2 */
+#define CS42L43_AMP1_2_VU_MASK 0x00000200
+#define CS42L43_AMP1_2_VU_SHIFT 9
+#define CS42L43_AMP_MUTE_MASK 0x00000100
+#define CS42L43_AMP_MUTE_SHIFT 8
+#define CS42L43_AMP_VOL_MASK 0x000000FF
+#define CS42L43_AMP_VOL_SHIFT 0
+
+/* CS42L43_AMP1_2_VOL_RAMP */
+#define CS42L43_AMP1_2_VD_RAMP_MASK 0x00000070
+#define CS42L43_AMP1_2_VD_RAMP_SHIFT 4
+#define CS42L43_AMP1_2_VI_RAMP_MASK 0x00000007
+#define CS42L43_AMP1_2_VI_RAMP_SHIFT 0
+
+/* CS42L43_ASP_CTRL */
+#define CS42L43_ASP_FSYNC_MODE_MASK 0x00000004
+#define CS42L43_ASP_FSYNC_MODE_SHIFT 2
+#define CS42L43_ASP_BCLK_EN_MASK 0x00000002
+#define CS42L43_ASP_BCLK_EN_SHIFT 1
+#define CS42L43_ASP_FSYNC_EN_MASK 0x00000001
+#define CS42L43_ASP_FSYNC_EN_SHIFT 0
+
+/* CS42L43_ASP_FSYNC_CTRL1 */
+#define CS42L43_ASP_FSYNC_M_MASK 0x0007FFFF
+#define CS42L43_ASP_FSYNC_M_SHIFT 0
+
+/* CS42L43_ASP_FSYNC_CTRL3 */
+#define CS42L43_ASP_FSYNC_IN_INV_MASK 0x00000002
+#define CS42L43_ASP_FSYNC_IN_INV_SHIFT 1
+#define CS42L43_ASP_FSYNC_OUT_INV_MASK 0x00000001
+#define CS42L43_ASP_FSYNC_OUT_INV_SHIFT 0
+
+/* CS42L43_ASP_FSYNC_CTRL4 */
+#define CS42L43_ASP_NUM_BCLKS_PER_FSYNC_MASK 0x00001FFE
+#define CS42L43_ASP_NUM_BCLKS_PER_FSYNC_SHIFT 1
+
+/* CS42L43_ASP_DATA_CTRL */
+#define CS42L43_ASP_FSYNC_FRAME_START_PHASE_MASK 0x00000008
+#define CS42L43_ASP_FSYNC_FRAME_START_PHASE_SHIFT 3
+#define CS42L43_ASP_FSYNC_FRAME_START_DLY_MASK 0x00000007
+#define CS42L43_ASP_FSYNC_FRAME_START_DLY_SHIFT 0
+
+/* CS42L43_ASP_RX_EN */
+#define CS42L43_ASP_RX_CH6_EN_MASK 0x00000020
+#define CS42L43_ASP_RX_CH6_EN_SHIFT 5
+#define CS42L43_ASP_RX_CH5_EN_MASK 0x00000010
+#define CS42L43_ASP_RX_CH5_EN_SHIFT 4
+#define CS42L43_ASP_RX_CH4_EN_MASK 0x00000008
+#define CS42L43_ASP_RX_CH4_EN_SHIFT 3
+#define CS42L43_ASP_RX_CH3_EN_MASK 0x00000004
+#define CS42L43_ASP_RX_CH3_EN_SHIFT 2
+#define CS42L43_ASP_RX_CH2_EN_MASK 0x00000002
+#define CS42L43_ASP_RX_CH2_EN_SHIFT 1
+#define CS42L43_ASP_RX_CH1_EN_MASK 0x00000001
+#define CS42L43_ASP_RX_CH1_EN_SHIFT 0
+
+/* CS42L43_ASP_TX_EN */
+#define CS42L43_ASP_TX_CH6_EN_MASK 0x00000020
+#define CS42L43_ASP_TX_CH6_EN_SHIFT 5
+#define CS42L43_ASP_TX_CH5_EN_MASK 0x00000010
+#define CS42L43_ASP_TX_CH5_EN_SHIFT 4
+#define CS42L43_ASP_TX_CH4_EN_MASK 0x00000008
+#define CS42L43_ASP_TX_CH4_EN_SHIFT 3
+#define CS42L43_ASP_TX_CH3_EN_MASK 0x00000004
+#define CS42L43_ASP_TX_CH3_EN_SHIFT 2
+#define CS42L43_ASP_TX_CH2_EN_MASK 0x00000002
+#define CS42L43_ASP_TX_CH2_EN_SHIFT 1
+#define CS42L43_ASP_TX_CH1_EN_MASK 0x00000001
+#define CS42L43_ASP_TX_CH1_EN_SHIFT 0
+
+/* CS42L43_ASP_RX_CH1_CTRL..CS42L43_ASP_TX_CH6_CTRL */
+#define CS42L43_ASP_CH_WIDTH_MASK 0x001F0000
+#define CS42L43_ASP_CH_WIDTH_SHIFT 16
+#define CS42L43_ASP_CH_SLOT_MASK 0x00001FFE
+#define CS42L43_ASP_CH_SLOT_SHIFT 1
+#define CS42L43_ASP_CH_SLOT_PHASE_MASK 0x00000001
+#define CS42L43_ASP_CH_SLOT_PHASE_SHIFT 0
+
+/* CS42L43_ASPTX1_INPUT..CS42L43_AMP4MIX_INPUT4 */
+#define CS42L43_MIXER_VOL_MASK 0x00FE0000
+#define CS42L43_MIXER_VOL_SHIFT 17
+#define CS42L43_MIXER_SRC_MASK 0x000001FF
+#define CS42L43_MIXER_SRC_SHIFT 0
+
+/* CS42L43_ASRC_INT_ENABLES */
+#define CS42L43_ASRC_INT4_EN_MASK 0x00000008
+#define CS42L43_ASRC_INT4_EN_SHIFT 3
+#define CS42L43_ASRC_INT3_EN_MASK 0x00000004
+#define CS42L43_ASRC_INT3_EN_SHIFT 2
+#define CS42L43_ASRC_INT2_EN_MASK 0x00000002
+#define CS42L43_ASRC_INT2_EN_SHIFT 1
+#define CS42L43_ASRC_INT1_EN_MASK 0x00000001
+#define CS42L43_ASRC_INT1_EN_SHIFT 0
+
+/* CS42L43_ASRC_DEC_ENABLES */
+#define CS42L43_ASRC_DEC4_EN_MASK 0x00000008
+#define CS42L43_ASRC_DEC4_EN_SHIFT 3
+#define CS42L43_ASRC_DEC3_EN_MASK 0x00000004
+#define CS42L43_ASRC_DEC3_EN_SHIFT 2
+#define CS42L43_ASRC_DEC2_EN_MASK 0x00000002
+#define CS42L43_ASRC_DEC2_EN_SHIFT 1
+#define CS42L43_ASRC_DEC1_EN_MASK 0x00000001
+#define CS42L43_ASRC_DEC1_EN_SHIFT 0
+
+/* CS42L43_PDNCNTL */
+#define CS42L43_RING_SENSE_EN_MASK 0x00000002
+#define CS42L43_RING_SENSE_EN_SHIFT 1
+
+/* CS42L43_RINGSENSE_DEB_CTRL */
+#define CS42L43_RINGSENSE_INV_MASK 0x00000080
+#define CS42L43_RINGSENSE_INV_SHIFT 7
+#define CS42L43_RINGSENSE_PULLUP_PDNB_MASK 0x00000040
+#define CS42L43_RINGSENSE_PULLUP_PDNB_SHIFT 6
+#define CS42L43_RINGSENSE_FALLING_DB_TIME_MASK 0x00000038
+#define CS42L43_RINGSENSE_FALLING_DB_TIME_SHIFT 3
+#define CS42L43_RINGSENSE_RISING_DB_TIME_MASK 0x00000007
+#define CS42L43_RINGSENSE_RISING_DB_TIME_SHIFT 0
+
+/* CS42L43_TIPSENSE_DEB_CTRL */
+#define CS42L43_TIPSENSE_INV_MASK 0x00000080
+#define CS42L43_TIPSENSE_INV_SHIFT 7
+#define CS42L43_TIPSENSE_FALLING_DB_TIME_MASK 0x00000038
+#define CS42L43_TIPSENSE_FALLING_DB_TIME_SHIFT 3
+#define CS42L43_TIPSENSE_RISING_DB_TIME_MASK 0x00000007
+#define CS42L43_TIPSENSE_RISING_DB_TIME_SHIFT 0
+
+/* CS42L43_TIP_RING_SENSE_INTERRUPT_STATUS */
+#define CS42L43_TIPSENSE_UNPLUG_DB_STS_MASK 0x00000008
+#define CS42L43_TIPSENSE_UNPLUG_DB_STS_SHIFT 3
+#define CS42L43_TIPSENSE_PLUG_DB_STS_MASK 0x00000004
+#define CS42L43_TIPSENSE_PLUG_DB_STS_SHIFT 2
+#define CS42L43_RINGSENSE_UNPLUG_DB_STS_MASK 0x00000002
+#define CS42L43_RINGSENSE_UNPLUG_DB_STS_SHIFT 1
+#define CS42L43_RINGSENSE_PLUG_DB_STS_MASK 0x00000001
+#define CS42L43_RINGSENSE_PLUG_DB_STS_SHIFT 0
+
+/* CS42L43_HS2 */
+#define CS42L43_HS_CLAMP_DISABLE_MASK 0x10000000
+#define CS42L43_HS_CLAMP_DISABLE_SHIFT 28
+#define CS42L43_HSBIAS_RAMP_MASK 0x0C000000
+#define CS42L43_HSBIAS_RAMP_SHIFT 26
+#define CS42L43_HSDET_MODE_MASK 0x00018000
+#define CS42L43_HSDET_MODE_SHIFT 15
+#define CS42L43_HSDET_MANUAL_MODE_MASK 0x00006000
+#define CS42L43_HSDET_MANUAL_MODE_SHIFT 13
+#define CS42L43_AUTO_HSDET_TIME_MASK 0x00000700
+#define CS42L43_AUTO_HSDET_TIME_SHIFT 8
+#define CS42L43_AMP3_4_GNDREF_HS3_SEL_MASK 0x00000080
+#define CS42L43_AMP3_4_GNDREF_HS3_SEL_SHIFT 7
+#define CS42L43_AMP3_4_GNDREF_HS4_SEL_MASK 0x00000040
+#define CS42L43_AMP3_4_GNDREF_HS4_SEL_SHIFT 6
+#define CS42L43_HSBIAS_GNDREF_HS3_SEL_MASK 0x00000020
+#define CS42L43_HSBIAS_GNDREF_HS3_SEL_SHIFT 5
+#define CS42L43_HSBIAS_GNDREF_HS4_SEL_MASK 0x00000010
+#define CS42L43_HSBIAS_GNDREF_HS4_SEL_SHIFT 4
+#define CS42L43_HSBIAS_OUT_HS3_SEL_MASK 0x00000008
+#define CS42L43_HSBIAS_OUT_HS3_SEL_SHIFT 3
+#define CS42L43_HSBIAS_OUT_HS4_SEL_MASK 0x00000004
+#define CS42L43_HSBIAS_OUT_HS4_SEL_SHIFT 2
+#define CS42L43_HSGND_HS3_SEL_MASK 0x00000002
+#define CS42L43_HSGND_HS3_SEL_SHIFT 1
+#define CS42L43_HSGND_HS4_SEL_MASK 0x00000001
+#define CS42L43_HSGND_HS4_SEL_SHIFT 0
+
+/* CS42L43_HS_STAT */
+#define CS42L43_HSDET_TYPE_STS_MASK 0x00000007
+#define CS42L43_HSDET_TYPE_STS_SHIFT 0
+
+/* CS42L43_MCU_SW_INTERRUPT */
+#define CS42L43_CONTROL_IND_MASK 0x00000004
+#define CS42L43_CONTROL_IND_SHIFT 2
+#define CS42L43_CONFIGS_IND_MASK 0x00000002
+#define CS42L43_CONFIGS_IND_SHIFT 1
+#define CS42L43_PATCH_IND_MASK 0x00000001
+#define CS42L43_PATCH_IND_SHIFT 0
+
+/* CS42L43_STEREO_MIC_CTRL */
+#define CS42L43_HS2_BIAS_SENSE_EN_MASK 0x00000020
+#define CS42L43_HS2_BIAS_SENSE_EN_SHIFT 5
+#define CS42L43_HS1_BIAS_SENSE_EN_MASK 0x00000010
+#define CS42L43_HS1_BIAS_SENSE_EN_SHIFT 4
+#define CS42L43_HS2_BIAS_EN_MASK 0x00000008
+#define CS42L43_HS2_BIAS_EN_SHIFT 3
+#define CS42L43_HS1_BIAS_EN_MASK 0x00000004
+#define CS42L43_HS1_BIAS_EN_SHIFT 2
+#define CS42L43_JACK_STEREO_CONFIG_MASK 0x00000003
+#define CS42L43_JACK_STEREO_CONFIG_SHIFT 0
+
+/* CS42L43_STEREO_MIC_CLAMP_CTRL */
+#define CS42L43_SMIC_HPAMP_CLAMP_DIS_FRC_VAL_MASK 0x00000002
+#define CS42L43_SMIC_HPAMP_CLAMP_DIS_FRC_VAL_SHIFT 1
+#define CS42L43_SMIC_HPAMP_CLAMP_DIS_FRC_MASK 0x00000001
+#define CS42L43_SMIC_HPAMP_CLAMP_DIS_FRC_SHIFT 0
+
+/* CS42L43_BLOCK_EN2 */
+#define CS42L43_SPI_MSTR_EN_MASK 0x00000001
+#define CS42L43_SPI_MSTR_EN_SHIFT 0
+
+/* CS42L43_BLOCK_EN3 */
+#define CS42L43_PDM2_DIN_R_EN_MASK 0x00000020
+#define CS42L43_PDM2_DIN_R_EN_SHIFT 5
+#define CS42L43_PDM2_DIN_L_EN_MASK 0x00000010
+#define CS42L43_PDM2_DIN_L_EN_SHIFT 4
+#define CS42L43_PDM1_DIN_R_EN_MASK 0x00000008
+#define CS42L43_PDM1_DIN_R_EN_SHIFT 3
+#define CS42L43_PDM1_DIN_L_EN_MASK 0x00000004
+#define CS42L43_PDM1_DIN_L_EN_SHIFT 2
+#define CS42L43_ADC2_EN_MASK 0x00000002
+#define CS42L43_ADC2_EN_SHIFT 1
+#define CS42L43_ADC1_EN_MASK 0x00000001
+#define CS42L43_ADC1_EN_SHIFT 0
+
+/* CS42L43_BLOCK_EN4 */
+#define CS42L43_ASRC_DEC_BANK_EN_MASK 0x00000002
+#define CS42L43_ASRC_DEC_BANK_EN_SHIFT 1
+#define CS42L43_ASRC_INT_BANK_EN_MASK 0x00000001
+#define CS42L43_ASRC_INT_BANK_EN_SHIFT 0
+
+/* CS42L43_BLOCK_EN5 */
+#define CS42L43_ISRC2_BANK_EN_MASK 0x00000002
+#define CS42L43_ISRC2_BANK_EN_SHIFT 1
+#define CS42L43_ISRC1_BANK_EN_MASK 0x00000001
+#define CS42L43_ISRC1_BANK_EN_SHIFT 0
+
+/* CS42L43_BLOCK_EN6 */
+#define CS42L43_MIXER_EN_MASK 0x00000001
+#define CS42L43_MIXER_EN_SHIFT 0
+
+/* CS42L43_BLOCK_EN7 */
+#define CS42L43_EQ_EN_MASK 0x00000001
+#define CS42L43_EQ_EN_SHIFT 0
+
+/* CS42L43_BLOCK_EN8 */
+#define CS42L43_HP_EN_MASK 0x00000001
+#define CS42L43_HP_EN_SHIFT 0
+
+/* CS42L43_BLOCK_EN9 */
+#define CS42L43_TONE_EN_MASK 0x00000001
+#define CS42L43_TONE_EN_SHIFT 0
+
+/* CS42L43_BLOCK_EN10 */
+#define CS42L43_AMP2_EN_MASK 0x00000002
+#define CS42L43_AMP2_EN_SHIFT 1
+#define CS42L43_AMP1_EN_MASK 0x00000001
+#define CS42L43_AMP1_EN_SHIFT 0
+
+/* CS42L43_BLOCK_EN11 */
+#define CS42L43_SPDIF_EN_MASK 0x00000001
+#define CS42L43_SPDIF_EN_SHIFT 0
+
+/* CS42L43_TONE_CH1_CTRL..CS42L43_TONE_CH2_CTRL */
+#define CS42L43_TONE_FREQ_MASK 0x00000070
+#define CS42L43_TONE_FREQ_SHIFT 4
+#define CS42L43_TONE_SEL_MASK 0x0000000F
+#define CS42L43_TONE_SEL_SHIFT 0
+
+/* CS42L43_MIC_DETECT_CONTROL_1 */
+#define CS42L43_BUTTON_DETECT_MODE_MASK 0x00000018
+#define CS42L43_BUTTON_DETECT_MODE_SHIFT 3
+#define CS42L43_HSBIAS_MODE_MASK 0x00000006
+#define CS42L43_HSBIAS_MODE_SHIFT 1
+#define CS42L43_MIC_LVL_DET_DISABLE_MASK 0x00000001
+#define CS42L43_MIC_LVL_DET_DISABLE_SHIFT 0
+
+/* CS42L43_DETECT_STATUS_1 */
+#define CS42L43_HSDET_DC_STS_MASK 0x01FF0000
+#define CS42L43_HSDET_DC_STS_SHIFT 16
+#define CS42L43_JACKDET_STS_MASK 0x00000080
+#define CS42L43_JACKDET_STS_SHIFT 7
+#define CS42L43_HSBIAS_CLAMP_STS_MASK 0x00000040
+#define CS42L43_HSBIAS_CLAMP_STS_SHIFT 6
+
+/* CS42L43_HS_BIAS_SENSE_AND_CLAMP_AUTOCONTROL */
+#define CS42L43_JACKDET_MODE_MASK 0xC0000000
+#define CS42L43_JACKDET_MODE_SHIFT 30
+#define CS42L43_JACKDET_INV_MASK 0x20000000
+#define CS42L43_JACKDET_INV_SHIFT 29
+#define CS42L43_JACKDET_DB_TIME_MASK 0x03000000
+#define CS42L43_JACKDET_DB_TIME_SHIFT 24
+#define CS42L43_S0_AUTO_ADCMUTE_DISABLE_MASK 0x00800000
+#define CS42L43_S0_AUTO_ADCMUTE_DISABLE_SHIFT 23
+#define CS42L43_HSBIAS_SENSE_EN_MASK 0x00000080
+#define CS42L43_HSBIAS_SENSE_EN_SHIFT 7
+#define CS42L43_AUTO_HSBIAS_CLAMP_EN_MASK 0x00000040
+#define CS42L43_AUTO_HSBIAS_CLAMP_EN_SHIFT 6
+#define CS42L43_JACKDET_SENSE_EN_MASK 0x00000020
+#define CS42L43_JACKDET_SENSE_EN_SHIFT 5
+#define CS42L43_HSBIAS_SENSE_TRIP_MASK 0x00000007
+#define CS42L43_HSBIAS_SENSE_TRIP_SHIFT 0
+
+/* CS42L43_MIC_DETECT_CONTROL_ANDROID */
+#define CS42L43_HSDET_LVL_COMBWIDTH_MASK 0xC0000000
+#define CS42L43_HSDET_LVL_COMBWIDTH_SHIFT 30
+#define CS42L43_HSDET_LVL2_THRESH_MASK 0x01FF0000
+#define CS42L43_HSDET_LVL2_THRESH_SHIFT 16
+#define CS42L43_HSDET_LVL1_THRESH_MASK 0x000001FF
+#define CS42L43_HSDET_LVL1_THRESH_SHIFT 0
+
+/* CS42L43_ISRC1_CTRL..CS42L43_ISRC2_CTRL */
+#define CS42L43_ISRC_INT2_EN_MASK 0x00000200
+#define CS42L43_ISRC_INT2_EN_SHIFT 9
+#define CS42L43_ISRC_INT1_EN_MASK 0x00000100
+#define CS42L43_ISRC_INT1_EN_SHIFT 8
+#define CS42L43_ISRC_DEC2_EN_MASK 0x00000002
+#define CS42L43_ISRC_DEC2_EN_SHIFT 1
+#define CS42L43_ISRC_DEC1_EN_MASK 0x00000001
+#define CS42L43_ISRC_DEC1_EN_SHIFT 0
+
+/* CS42L43_CTRL_REG */
+#define CS42L43_PLL_MODE_BYPASS_500_MASK 0x00000004
+#define CS42L43_PLL_MODE_BYPASS_500_SHIFT 2
+#define CS42L43_PLL_MODE_BYPASS_1029_MASK 0x00000002
+#define CS42L43_PLL_MODE_BYPASS_1029_SHIFT 1
+#define CS42L43_PLL_EN_MASK 0x00000001
+#define CS42L43_PLL_EN_SHIFT 0
+
+/* CS42L43_FDIV_FRAC */
+#define CS42L43_PLL_DIV_INT_MASK 0xFF000000
+#define CS42L43_PLL_DIV_INT_SHIFT 24
+#define CS42L43_PLL_DIV_FRAC_BYTE2_MASK 0x00FF0000
+#define CS42L43_PLL_DIV_FRAC_BYTE2_SHIFT 16
+#define CS42L43_PLL_DIV_FRAC_BYTE1_MASK 0x0000FF00
+#define CS42L43_PLL_DIV_FRAC_BYTE1_SHIFT 8
+#define CS42L43_PLL_DIV_FRAC_BYTE0_MASK 0x000000FF
+#define CS42L43_PLL_DIV_FRAC_BYTE0_SHIFT 0
+
+/* CS42L43_CAL_RATIO */
+#define CS42L43_PLL_CAL_RATIO_MASK 0x000000FF
+#define CS42L43_PLL_CAL_RATIO_SHIFT 0
+
+/* CS42L43_SPI_CLK_CONFIG1 */
+#define CS42L43_SCLK_DIV_MASK 0x0000000F
+#define CS42L43_SCLK_DIV_SHIFT 0
+
+/* CS42L43_SPI_CONFIG1 */
+#define CS42L43_SPI_SS_IDLE_DUR_MASK 0x0F000000
+#define CS42L43_SPI_SS_IDLE_DUR_SHIFT 24
+#define CS42L43_SPI_SS_DELAY_DUR_MASK 0x000F0000
+#define CS42L43_SPI_SS_DELAY_DUR_SHIFT 16
+#define CS42L43_SPI_THREE_WIRE_MASK 0x00000100
+#define CS42L43_SPI_THREE_WIRE_SHIFT 8
+#define CS42L43_SPI_DPHA_MASK 0x00000040
+#define CS42L43_SPI_DPHA_SHIFT 6
+#define CS42L43_SPI_CPHA_MASK 0x00000020
+#define CS42L43_SPI_CPHA_SHIFT 5
+#define CS42L43_SPI_CPOL_MASK 0x00000010
+#define CS42L43_SPI_CPOL_SHIFT 4
+#define CS42L43_SPI_SS_SEL_MASK 0x00000007
+#define CS42L43_SPI_SS_SEL_SHIFT 0
+
+/* CS42L43_SPI_CONFIG2 */
+#define CS42L43_SPI_SS_FRC_MASK 0x00000001
+#define CS42L43_SPI_SS_FRC_SHIFT 0
+
+/* CS42L43_SPI_CONFIG3 */
+#define CS42L43_SPI_WDT_ENA_MASK 0x00000001
+#define CS42L43_SPI_WDT_ENA_SHIFT 0
+
+/* CS42L43_SPI_CONFIG4 */
+#define CS42L43_SPI_STALL_ENA_MASK 0x00010000
+#define CS42L43_SPI_STALL_ENA_SHIFT 16
+
+/* CS42L43_SPI_STATUS1 */
+#define CS42L43_SPI_ABORT_STS_MASK 0x00000002
+#define CS42L43_SPI_ABORT_STS_SHIFT 1
+#define CS42L43_SPI_DONE_STS_MASK 0x00000001
+#define CS42L43_SPI_DONE_STS_SHIFT 0
+
+/* CS42L43_SPI_STATUS2 */
+#define CS42L43_SPI_RX_DONE_STS_MASK 0x00000010
+#define CS42L43_SPI_RX_DONE_STS_SHIFT 4
+#define CS42L43_SPI_TX_DONE_STS_MASK 0x00000001
+#define CS42L43_SPI_TX_DONE_STS_SHIFT 0
+
+/* CS42L43_TRAN_CONFIG1 */
+#define CS42L43_SPI_START_MASK 0x00000001
+#define CS42L43_SPI_START_SHIFT 0
+
+/* CS42L43_TRAN_CONFIG2 */
+#define CS42L43_SPI_ABORT_MASK 0x00000001
+#define CS42L43_SPI_ABORT_SHIFT 0
+
+/* CS42L43_TRAN_CONFIG3 */
+#define CS42L43_SPI_WORD_SIZE_MASK 0x00070000
+#define CS42L43_SPI_WORD_SIZE_SHIFT 16
+#define CS42L43_SPI_CMD_MASK 0x00000003
+#define CS42L43_SPI_CMD_SHIFT 0
+
+/* CS42L43_TRAN_CONFIG4 */
+#define CS42L43_SPI_TX_LENGTH_MASK 0x0000FFFF
+#define CS42L43_SPI_TX_LENGTH_SHIFT 0
+
+/* CS42L43_TRAN_CONFIG5 */
+#define CS42L43_SPI_RX_LENGTH_MASK 0x0000FFFF
+#define CS42L43_SPI_RX_LENGTH_SHIFT 0
+
+/* CS42L43_TRAN_CONFIG6 */
+#define CS42L43_SPI_TX_BLOCK_LENGTH_MASK 0x0000000F
+#define CS42L43_SPI_TX_BLOCK_LENGTH_SHIFT 0
+
+/* CS42L43_TRAN_CONFIG7 */
+#define CS42L43_SPI_RX_BLOCK_LENGTH_MASK 0x0000000F
+#define CS42L43_SPI_RX_BLOCK_LENGTH_SHIFT 0
+
+/* CS42L43_TRAN_CONFIG8 */
+#define CS42L43_SPI_RX_DONE_MASK 0x00000010
+#define CS42L43_SPI_RX_DONE_SHIFT 4
+#define CS42L43_SPI_TX_DONE_MASK 0x00000001
+#define CS42L43_SPI_TX_DONE_SHIFT 0
+
+/* CS42L43_TRAN_STATUS1 */
+#define CS42L43_SPI_BUSY_STS_MASK 0x00000100
+#define CS42L43_SPI_BUSY_STS_SHIFT 8
+#define CS42L43_SPI_RX_REQUEST_MASK 0x00000010
+#define CS42L43_SPI_RX_REQUEST_SHIFT 4
+#define CS42L43_SPI_TX_REQUEST_MASK 0x00000001
+#define CS42L43_SPI_TX_REQUEST_SHIFT 0
+
+/* CS42L43_TRAN_STATUS2 */
+#define CS42L43_SPI_TX_BYTE_COUNT_MASK 0x0000FFFF
+#define CS42L43_SPI_TX_BYTE_COUNT_SHIFT 0
+
+/* CS42L43_TRAN_STATUS3 */
+#define CS42L43_SPI_RX_BYTE_COUNT_MASK 0x0000FFFF
+#define CS42L43_SPI_RX_BYTE_COUNT_SHIFT 0
+
+/* CS42L43_TX_DATA */
+#define CS42L43_SPI_TX_DATA_MASK 0xFFFFFFFF
+#define CS42L43_SPI_TX_DATA_SHIFT 0
+
+/* CS42L43_RX_DATA */
+#define CS42L43_SPI_RX_DATA_MASK 0xFFFFFFFF
+#define CS42L43_SPI_RX_DATA_SHIFT 0
+
+/* CS42L43_DACCNFG1 */
+#define CS42L43_HP_MSTR_VOL_CTRL_EN_MASK 0x00000008
+#define CS42L43_HP_MSTR_VOL_CTRL_EN_SHIFT 3
+#define CS42L43_AMP4_INV_MASK 0x00000002
+#define CS42L43_AMP4_INV_SHIFT 1
+#define CS42L43_AMP3_INV_MASK 0x00000001
+#define CS42L43_AMP3_INV_SHIFT 0
+
+/* CS42L43_DACCNFG2 */
+#define CS42L43_HP_AUTO_CLAMP_DISABLE_MASK 0x00000002
+#define CS42L43_HP_AUTO_CLAMP_DISABLE_SHIFT 1
+#define CS42L43_HP_HPF_EN_MASK 0x00000001
+#define CS42L43_HP_HPF_EN_SHIFT 0
+
+/* CS42L43_HPPATHVOL */
+#define CS42L43_AMP4_PATH_VOL_MASK 0x01FF0000
+#define CS42L43_AMP4_PATH_VOL_SHIFT 16
+#define CS42L43_AMP3_PATH_VOL_MASK 0x000001FF
+#define CS42L43_AMP3_PATH_VOL_SHIFT 0
+
+/* CS42L43_PGAVOL */
+#define CS42L43_HP_PATH_VOL_RAMP_MASK 0x0003C000
+#define CS42L43_HP_PATH_VOL_RAMP_SHIFT 14
+#define CS42L43_HP_PATH_VOL_ZC_MASK 0x00002000
+#define CS42L43_HP_PATH_VOL_ZC_SHIFT 13
+#define CS42L43_HP_PATH_VOL_SFT_MASK 0x00001000
+#define CS42L43_HP_PATH_VOL_SFT_SHIFT 12
+#define CS42L43_HP_DIG_VOL_RAMP_MASK 0x00000F00
+#define CS42L43_HP_DIG_VOL_RAMP_SHIFT 8
+#define CS42L43_HP_ANA_VOL_RAMP_MASK 0x0000000F
+#define CS42L43_HP_ANA_VOL_RAMP_SHIFT 0
+
+/* CS42L43_LOADDETRESULTS */
+#define CS42L43_AMP3_RES_DET_MASK 0x00000003
+#define CS42L43_AMP3_RES_DET_SHIFT 0
+
+/* CS42L43_LOADDETENA */
+#define CS42L43_HPLOAD_DET_EN_MASK 0x00000001
+#define CS42L43_HPLOAD_DET_EN_SHIFT 0
+
+/* CS42L43_CTRL */
+#define CS42L43_ADPTPWR_MODE_MASK 0x00000007
+#define CS42L43_ADPTPWR_MODE_SHIFT 0
+
+/* CS42L43_COEFF_RD_WR0 */
+#define CS42L43_WRITE_MODE_MASK 0x00000002
+#define CS42L43_WRITE_MODE_SHIFT 1
+
+/* CS42L43_INIT_DONE0 */
+#define CS42L43_INITIALIZE_DONE_MASK 0x00000001
+#define CS42L43_INITIALIZE_DONE_SHIFT 0
+
+/* CS42L43_START_EQZ0 */
+#define CS42L43_START_FILTER_MASK 0x00000001
+#define CS42L43_START_FILTER_SHIFT 0
+
+/* CS42L43_MUTE_EQ_IN0 */
+#define CS42L43_MUTE_EQ_CH2_MASK 0x00000002
+#define CS42L43_MUTE_EQ_CH2_SHIFT 1
+#define CS42L43_MUTE_EQ_CH1_MASK 0x00000001
+#define CS42L43_MUTE_EQ_CH1_SHIFT 0
+
+/* CS42L43_PLL_INT */
+#define CS42L43_PLL_LOST_LOCK_INT_MASK 0x00000002
+#define CS42L43_PLL_LOST_LOCK_INT_SHIFT 1
+#define CS42L43_PLL_READY_INT_MASK 0x00000001
+#define CS42L43_PLL_READY_INT_SHIFT 0
+
+/* CS42L43_SOFT_INT */
+#define CS42L43_CONTROL_APPLIED_INT_MASK 0x00000010
+#define CS42L43_CONTROL_APPLIED_INT_SHIFT 4
+#define CS42L43_CONTROL_WARN_INT_MASK 0x00000008
+#define CS42L43_CONTROL_WARN_INT_SHIFT 3
+#define CS42L43_PATCH_WARN_INT_MASK 0x00000002
+#define CS42L43_PATCH_WARN_INT_SHIFT 1
+#define CS42L43_PATCH_APPLIED_INT_MASK 0x00000001
+#define CS42L43_PATCH_APPLIED_INT_SHIFT 0
+
+/* CS42L43_MSM_INT */
+#define CS42L43_HP_STARTUP_DONE_INT_MASK 0x00000800
+#define CS42L43_HP_STARTUP_DONE_INT_SHIFT 11
+#define CS42L43_HP_SHUTDOWN_DONE_INT_MASK 0x00000400
+#define CS42L43_HP_SHUTDOWN_DONE_INT_SHIFT 10
+#define CS42L43_HSDET_DONE_INT_MASK 0x00000200
+#define CS42L43_HSDET_DONE_INT_SHIFT 9
+#define CS42L43_TIPSENSE_UNPLUG_DB_INT_MASK 0x00000080
+#define CS42L43_TIPSENSE_UNPLUG_DB_INT_SHIFT 7
+#define CS42L43_TIPSENSE_PLUG_DB_INT_MASK 0x00000040
+#define CS42L43_TIPSENSE_PLUG_DB_INT_SHIFT 6
+#define CS42L43_RINGSENSE_UNPLUG_DB_INT_MASK 0x00000020
+#define CS42L43_RINGSENSE_UNPLUG_DB_INT_SHIFT 5
+#define CS42L43_RINGSENSE_PLUG_DB_INT_MASK 0x00000010
+#define CS42L43_RINGSENSE_PLUG_DB_INT_SHIFT 4
+#define CS42L43_TIPSENSE_UNPLUG_PDET_INT_MASK 0x00000008
+#define CS42L43_TIPSENSE_UNPLUG_PDET_INT_SHIFT 3
+#define CS42L43_TIPSENSE_PLUG_PDET_INT_MASK 0x00000004
+#define CS42L43_TIPSENSE_PLUG_PDET_INT_SHIFT 2
+#define CS42L43_RINGSENSE_UNPLUG_PDET_INT_MASK 0x00000002
+#define CS42L43_RINGSENSE_UNPLUG_PDET_INT_SHIFT 1
+#define CS42L43_RINGSENSE_PLUG_PDET_INT_MASK 0x00000001
+#define CS42L43_RINGSENSE_PLUG_PDET_INT_SHIFT 0
+
+/* CS42L43_ACC_DET_INT */
+#define CS42L43_HS2_BIAS_SENSE_INT_MASK 0x00000800
+#define CS42L43_HS2_BIAS_SENSE_INT_SHIFT 11
+#define CS42L43_HS1_BIAS_SENSE_INT_MASK 0x00000400
+#define CS42L43_HS1_BIAS_SENSE_INT_SHIFT 10
+#define CS42L43_DC_DETECT1_FALSE_INT_MASK 0x00000080
+#define CS42L43_DC_DETECT1_FALSE_INT_SHIFT 7
+#define CS42L43_DC_DETECT1_TRUE_INT_MASK 0x00000040
+#define CS42L43_DC_DETECT1_TRUE_INT_SHIFT 6
+#define CS42L43_HSBIAS_CLAMPED_INT_MASK 0x00000008
+#define CS42L43_HSBIAS_CLAMPED_INT_SHIFT 3
+#define CS42L43_HS3_4_BIAS_SENSE_INT_MASK 0x00000001
+#define CS42L43_HS3_4_BIAS_SENSE_INT_SHIFT 0
+
+/* CS42L43_SPI_MSTR_INT */
+#define CS42L43_IRQ_SPI_STALLING_INT_MASK 0x00000004
+#define CS42L43_IRQ_SPI_STALLING_INT_SHIFT 2
+#define CS42L43_IRQ_SPI_STS_INT_MASK 0x00000002
+#define CS42L43_IRQ_SPI_STS_INT_SHIFT 1
+#define CS42L43_IRQ_SPI_BLOCK_INT_MASK 0x00000001
+#define CS42L43_IRQ_SPI_BLOCK_INT_SHIFT 0
+
+/* CS42L43_SW_TO_SPI_BRIDGE_INT */
+#define CS42L43_SW2SPI_BUF_OVF_UDF_INT_MASK 0x00000001
+#define CS42L43_SW2SPI_BUF_OVF_UDF_INT_SHIFT 0
+
+/* CS42L43_CLASS_D_AMP_INT */
+#define CS42L43_AMP2_CLK_STOP_FAULT_INT_MASK 0x00002000
+#define CS42L43_AMP2_CLK_STOP_FAULT_INT_SHIFT 13
+#define CS42L43_AMP1_CLK_STOP_FAULT_INT_MASK 0x00001000
+#define CS42L43_AMP1_CLK_STOP_FAULT_INT_SHIFT 12
+#define CS42L43_AMP2_VDDSPK_FAULT_INT_MASK 0x00000800
+#define CS42L43_AMP2_VDDSPK_FAULT_INT_SHIFT 11
+#define CS42L43_AMP1_VDDSPK_FAULT_INT_MASK 0x00000400
+#define CS42L43_AMP1_VDDSPK_FAULT_INT_SHIFT 10
+#define CS42L43_AMP2_SHUTDOWN_DONE_INT_MASK 0x00000200
+#define CS42L43_AMP2_SHUTDOWN_DONE_INT_SHIFT 9
+#define CS42L43_AMP1_SHUTDOWN_DONE_INT_MASK 0x00000100
+#define CS42L43_AMP1_SHUTDOWN_DONE_INT_SHIFT 8
+#define CS42L43_AMP2_STARTUP_DONE_INT_MASK 0x00000080
+#define CS42L43_AMP2_STARTUP_DONE_INT_SHIFT 7
+#define CS42L43_AMP1_STARTUP_DONE_INT_MASK 0x00000040
+#define CS42L43_AMP1_STARTUP_DONE_INT_SHIFT 6
+#define CS42L43_AMP2_THERM_SHDN_INT_MASK 0x00000020
+#define CS42L43_AMP2_THERM_SHDN_INT_SHIFT 5
+#define CS42L43_AMP1_THERM_SHDN_INT_MASK 0x00000010
+#define CS42L43_AMP1_THERM_SHDN_INT_SHIFT 4
+#define CS42L43_AMP2_THERM_WARN_INT_MASK 0x00000008
+#define CS42L43_AMP2_THERM_WARN_INT_SHIFT 3
+#define CS42L43_AMP1_THERM_WARN_INT_MASK 0x00000004
+#define CS42L43_AMP1_THERM_WARN_INT_SHIFT 2
+#define CS42L43_AMP2_SCDET_INT_MASK 0x00000002
+#define CS42L43_AMP2_SCDET_INT_SHIFT 1
+#define CS42L43_AMP1_SCDET_INT_MASK 0x00000001
+#define CS42L43_AMP1_SCDET_INT_SHIFT 0
+
+/* CS42L43_GPIO_INT */
+#define CS42L43_GPIO3_FALL_INT_MASK 0x00000020
+#define CS42L43_GPIO3_FALL_INT_SHIFT 5
+#define CS42L43_GPIO3_RISE_INT_MASK 0x00000010
+#define CS42L43_GPIO3_RISE_INT_SHIFT 4
+#define CS42L43_GPIO2_FALL_INT_MASK 0x00000008
+#define CS42L43_GPIO2_FALL_INT_SHIFT 3
+#define CS42L43_GPIO2_RISE_INT_MASK 0x00000004
+#define CS42L43_GPIO2_RISE_INT_SHIFT 2
+#define CS42L43_GPIO1_FALL_INT_MASK 0x00000002
+#define CS42L43_GPIO1_FALL_INT_SHIFT 1
+#define CS42L43_GPIO1_RISE_INT_MASK 0x00000001
+#define CS42L43_GPIO1_RISE_INT_SHIFT 0
+
+/* CS42L43_HPOUT_INT */
+#define CS42L43_HP_ILIMIT_INT_MASK 0x00000002
+#define CS42L43_HP_ILIMIT_INT_SHIFT 1
+#define CS42L43_HP_LOADDET_DONE_INT_MASK 0x00000001
+#define CS42L43_HP_LOADDET_DONE_INT_SHIFT 0
+
+/* CS42L43_BOOT_CONTROL */
+#define CS42L43_LOCK_HW_STS_MASK 0x00000002
+#define CS42L43_LOCK_HW_STS_SHIFT 1
+
+/* CS42L43_BLOCK_EN */
+#define CS42L43_MCU_EN_MASK 0x00000001
+#define CS42L43_MCU_EN_SHIFT 0
+
+/* CS42L43_SHUTTER_CONTROL */
+#define CS42L43_STATUS_SPK_SHUTTER_MUTE_MASK 0x00008000
+#define CS42L43_STATUS_SPK_SHUTTER_MUTE_SHIFT 15
+#define CS42L43_SPK_SHUTTER_CFG_MASK 0x00000F00
+#define CS42L43_SPK_SHUTTER_CFG_SHIFT 8
+#define CS42L43_STATUS_MIC_SHUTTER_MUTE_MASK 0x00000080
+#define CS42L43_STATUS_MIC_SHUTTER_MUTE_SHIFT 7
+#define CS42L43_MIC_SHUTTER_CFG_MASK 0x0000000F
+#define CS42L43_MIC_SHUTTER_CFG_SHIFT 0
+
+/* CS42L43_MCU_SW_REV */
+#define CS42L43_BIOS_SUBMINOR_REV_MASK 0xFF000000
+#define CS42L43_BIOS_SUBMINOR_REV_SHIFT 24
+#define CS42L43_BIOS_MINOR_REV_MASK 0x00F00000
+#define CS42L43_BIOS_MINOR_REV_SHIFT 20
+#define CS42L43_BIOS_MAJOR_REV_MASK 0x000F0000
+#define CS42L43_BIOS_MAJOR_REV_SHIFT 16
+#define CS42L43_FW_SUBMINOR_REV_MASK 0x0000FF00
+#define CS42L43_FW_SUBMINOR_REV_SHIFT 8
+#define CS42L43_FW_MINOR_REV_MASK 0x000000F0
+#define CS42L43_FW_MINOR_REV_SHIFT 4
+#define CS42L43_FW_MAJOR_REV_MASK 0x0000000F
+#define CS42L43_FW_MAJOR_REV_SHIFT 0
+
+/* CS42L43_NEED_CONFIGS */
+#define CS42L43_FW_PATCH_NEED_CFG_MASK 0x80000000
+#define CS42L43_FW_PATCH_NEED_CFG_SHIFT 31
+
+/* CS42L43_FW_MISSION_CTRL_MM_CTRL_SELECTION */
+#define CS42L43_FW_MM_CTRL_MCU_SEL_MASK 0x00000001
+#define CS42L43_FW_MM_CTRL_MCU_SEL_SHIFT 0
+
+/* CS42L43_FW_MISSION_CTRL_MM_MCU_CFG_REG */
+#define CS42L43_FW_MISSION_CTRL_MM_MCU_CFG_DISABLE_VAL 0xF05AA50F
+
+#endif /* CS42L43_CORE_REGS_H */
diff --git a/include/linux/mfd/cs42l43.h b/include/linux/mfd/cs42l43.h
new file mode 100644
index 000000000000..2239d8585e78
--- /dev/null
+++ b/include/linux/mfd/cs42l43.h
@@ -0,0 +1,103 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * CS42L43 core driver external data
+ *
+ * Copyright (C) 2022-2023 Cirrus Logic, Inc. and
+ * Cirrus Logic International Semiconductor Ltd.
+ */
+
+#ifndef CS42L43_CORE_EXT_H
+#define CS42L43_CORE_EXT_H
+
+#include <linux/completion.h>
+#include <linux/mutex.h>
+#include <linux/regmap.h>
+#include <linux/regulator/consumer.h>
+#include <linux/workqueue.h>
+
+#define CS42L43_N_SUPPLIES 3
+
+struct device;
+struct gpio_desc;
+struct sdw_slave;
+
+enum cs42l43_irq_numbers {
+ CS42L43_PLL_LOST_LOCK,
+ CS42L43_PLL_READY,
+
+ CS42L43_HP_STARTUP_DONE,
+ CS42L43_HP_SHUTDOWN_DONE,
+ CS42L43_HSDET_DONE,
+ CS42L43_TIPSENSE_UNPLUG_DB,
+ CS42L43_TIPSENSE_PLUG_DB,
+ CS42L43_RINGSENSE_UNPLUG_DB,
+ CS42L43_RINGSENSE_PLUG_DB,
+ CS42L43_TIPSENSE_UNPLUG_PDET,
+ CS42L43_TIPSENSE_PLUG_PDET,
+ CS42L43_RINGSENSE_UNPLUG_PDET,
+ CS42L43_RINGSENSE_PLUG_PDET,
+
+ CS42L43_HS2_BIAS_SENSE,
+ CS42L43_HS1_BIAS_SENSE,
+ CS42L43_DC_DETECT1_FALSE,
+ CS42L43_DC_DETECT1_TRUE,
+ CS42L43_HSBIAS_CLAMPED,
+ CS42L43_HS3_4_BIAS_SENSE,
+
+ CS42L43_AMP2_CLK_STOP_FAULT,
+ CS42L43_AMP1_CLK_STOP_FAULT,
+ CS42L43_AMP2_VDDSPK_FAULT,
+ CS42L43_AMP1_VDDSPK_FAULT,
+ CS42L43_AMP2_SHUTDOWN_DONE,
+ CS42L43_AMP1_SHUTDOWN_DONE,
+ CS42L43_AMP2_STARTUP_DONE,
+ CS42L43_AMP1_STARTUP_DONE,
+ CS42L43_AMP2_THERM_SHDN,
+ CS42L43_AMP1_THERM_SHDN,
+ CS42L43_AMP2_THERM_WARN,
+ CS42L43_AMP1_THERM_WARN,
+ CS42L43_AMP2_SCDET,
+ CS42L43_AMP1_SCDET,
+
+ CS42L43_GPIO3_FALL,
+ CS42L43_GPIO3_RISE,
+ CS42L43_GPIO2_FALL,
+ CS42L43_GPIO2_RISE,
+ CS42L43_GPIO1_FALL,
+ CS42L43_GPIO1_RISE,
+
+ CS42L43_HP_ILIMIT,
+ CS42L43_HP_LOADDET_DONE,
+};
+
+struct cs42l43 {
+ struct device *dev;
+ struct regmap *regmap;
+ struct sdw_slave *sdw;
+
+ struct regulator *vdd_p;
+ struct regulator *vdd_d;
+ struct regulator_bulk_data core_supplies[CS42L43_N_SUPPLIES];
+
+ struct gpio_desc *reset;
+
+ int irq;
+ struct regmap_irq_chip irq_chip;
+ struct regmap_irq_chip_data *irq_data;
+
+ struct work_struct boot_work;
+ struct completion device_attach;
+ struct completion device_detach;
+ struct completion firmware_download;
+ int firmware_error;
+
+ unsigned int sdw_freq;
+ /* Lock to gate control of the PLL and its sources. */
+ struct mutex pll_lock;
+
+ bool sdw_pll_active;
+ bool attached;
+ bool hw_lock;
+};
+
+#endif /* CS42L43_CORE_EXT_H */
diff --git a/include/linux/mfd/da8xx-cfgchip.h b/include/linux/mfd/da8xx-cfgchip.h
index 304985e288d2..93bbfc2c1d54 100644
--- a/include/linux/mfd/da8xx-cfgchip.h
+++ b/include/linux/mfd/da8xx-cfgchip.h
@@ -1,17 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* TI DaVinci DA8xx CHIPCFGx registers for syscon consumers.
*
* Copyright (C) 2016 David Lechner <david@lechnology.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#ifndef __LINUX_MFD_DA8XX_CFGCHIP_H
diff --git a/include/linux/mfd/da903x.h b/include/linux/mfd/da903x.h
index 0aa3a1a49ee3..d1c57b8dbba4 100644
--- a/include/linux/mfd/da903x.h
+++ b/include/linux/mfd/da903x.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __LINUX_PMIC_DA903X_H
#define __LINUX_PMIC_DA903X_H
diff --git a/include/linux/mfd/da9052/da9052.h b/include/linux/mfd/da9052/da9052.h
index ae5b663836d0..9cb2fc2938ce 100644
--- a/include/linux/mfd/da9052/da9052.h
+++ b/include/linux/mfd/da9052/da9052.h
@@ -1,24 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* da9052 declarations for DA9052 PMICs.
*
* Copyright(c) 2011 Dialog Semiconductor Ltd.
*
* Author: David Dajun Chen <dchen@diasemi.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
*/
#ifndef __MFD_DA9052_DA9052_H
@@ -107,6 +93,8 @@ struct da9052 {
int chip_irq;
+ int fault_log;
+
/* SOC I/O transfer related fixes for DA9052/53 */
int (*fix_io) (struct da9052 *da9052, unsigned char reg);
};
diff --git a/include/linux/mfd/da9052/pdata.h b/include/linux/mfd/da9052/pdata.h
index 62c5c3c2992e..60fcab32542d 100644
--- a/include/linux/mfd/da9052/pdata.h
+++ b/include/linux/mfd/da9052/pdata.h
@@ -1,24 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* Platform data declarations for DA9052 PMICs.
*
* Copyright(c) 2011 Dialog Semiconductor Ltd.
*
* Author: David Dajun Chen <dchen@diasemi.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
*/
#ifndef __MFD_DA9052_PDATA_H__
diff --git a/include/linux/mfd/da9052/reg.h b/include/linux/mfd/da9052/reg.h
index 76780ea8849c..752b20b16dc3 100644
--- a/include/linux/mfd/da9052/reg.h
+++ b/include/linux/mfd/da9052/reg.h
@@ -1,24 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* Register declarations for DA9052 PMICs.
*
* Copyright(c) 2011 Dialog Semiconductor Ltd.
*
* Author: David Dajun Chen <dchen@diasemi.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
*/
#ifndef __LINUX_MFD_DA9052_REG_H
diff --git a/include/linux/mfd/da9055/core.h b/include/linux/mfd/da9055/core.h
index 5dc743fd63a6..a96eba52c4d6 100644
--- a/include/linux/mfd/da9055/core.h
+++ b/include/linux/mfd/da9055/core.h
@@ -1,24 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* da9055 declarations for DA9055 PMICs.
*
* Copyright(c) 2012 Dialog Semiconductor Ltd.
*
* Author: David Dajun Chen <dchen@diasemi.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
*/
#ifndef __DA9055_CORE_H
diff --git a/include/linux/mfd/da9055/pdata.h b/include/linux/mfd/da9055/pdata.h
index 04e092be4b07..137a2b067512 100644
--- a/include/linux/mfd/da9055/pdata.h
+++ b/include/linux/mfd/da9055/pdata.h
@@ -1,10 +1,5 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/* Copyright (C) 2012 Dialog Semiconductor Ltd.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
*/
#ifndef __DA9055_PDATA_H
#define __DA9055_PDATA_H
@@ -28,18 +23,8 @@ struct da9055_pdata {
/* Enable RTC in RESET Mode */
bool reset_enable;
/*
- * GPI muxed pin to control
- * regulator state A/B, 0 if not available.
- */
- int *gpio_ren;
- /*
- * GPI muxed pin to control
- * regulator set, 0 if not available.
- */
- int *gpio_rsel;
- /*
* Regulator mode control bits value (GPI offset) that
- * that controls the regulator state, 0 if not available.
+ * controls the regulator state, 0 if not available.
*/
enum gpio_select *reg_ren;
/*
@@ -47,7 +32,5 @@ struct da9055_pdata {
* controls the regulator set A/B, 0 if not available.
*/
enum gpio_select *reg_rsel;
- /* GPIOs to enable regulator, 0 if not available */
- int *ena_gpio;
};
#endif /* __DA9055_PDATA_H */
diff --git a/include/linux/mfd/da9055/reg.h b/include/linux/mfd/da9055/reg.h
index 2b592e072dbf..54a717b6c3de 100644
--- a/include/linux/mfd/da9055/reg.h
+++ b/include/linux/mfd/da9055/reg.h
@@ -1,24 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* DA9055 declarations for DA9055 PMICs.
*
* Copyright(c) 2012 Dialog Semiconductor Ltd.
*
* Author: David Dajun Chen <dchen@diasemi.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
*/
#ifndef __DA9055_REG_H
diff --git a/include/linux/mfd/da9062/core.h b/include/linux/mfd/da9062/core.h
index 74d33a01ddae..ea0c670992de 100644
--- a/include/linux/mfd/da9062/core.h
+++ b/include/linux/mfd/da9062/core.h
@@ -1,15 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* Copyright (C) 2015-2017 Dialog Semiconductor
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version 2
- * of the License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#ifndef __MFD_DA9062_CORE_H__
diff --git a/include/linux/mfd/da9062/registers.h b/include/linux/mfd/da9062/registers.h
index 18d576aed902..2906bf6160fb 100644
--- a/include/linux/mfd/da9062/registers.h
+++ b/include/linux/mfd/da9062/registers.h
@@ -1,15 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* Copyright (C) 2015-2017 Dialog Semiconductor
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version 2
- * of the License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#ifndef __DA9062_H__
@@ -806,6 +797,9 @@
#define DA9062AA_BUCK3_SL_A_SHIFT 7
#define DA9062AA_BUCK3_SL_A_MASK BIT(7)
+/* DA9062AA_VLDO[1-4]_A common */
+#define DA9062AA_VLDO_A_MIN_SEL 2
+
/* DA9062AA_VLDO1_A = 0x0A9 */
#define DA9062AA_VLDO1_A_SHIFT 0
#define DA9062AA_VLDO1_A_MASK 0x3f
diff --git a/include/linux/mfd/da9063/core.h b/include/linux/mfd/da9063/core.h
index f3ae65db4c86..eae82f421414 100644
--- a/include/linux/mfd/da9063/core.h
+++ b/include/linux/mfd/da9063/core.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* Definitions for DA9063 MFD driver
*
@@ -5,12 +6,6 @@
*
* Author: Michal Hajduk, Dialog Semiconductor
* Author: Krystian Garbaciak, Dialog Semiconductor
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
*/
#ifndef __MFD_DA9063_CORE_H__
@@ -29,14 +24,19 @@
#define DA9063_DRVNAME_RTC "da9063-rtc"
#define DA9063_DRVNAME_VIBRATION "da9063-vibration"
-enum da9063_models {
- PMIC_DA9063 = 0x61,
+#define PMIC_CHIP_ID_DA9063 0x61
+
+enum da9063_type {
+ PMIC_TYPE_DA9063 = 0,
+ PMIC_TYPE_DA9063L,
};
enum da9063_variant_codes {
PMIC_DA9063_AD = 0x3,
PMIC_DA9063_BB = 0x5,
PMIC_DA9063_CA = 0x6,
+ PMIC_DA9063_DA = 0x7,
+ PMIC_DA9063_EA = 0x8,
};
/* Interrupts */
@@ -72,15 +72,13 @@ enum da9063_irqs {
DA9063_IRQ_GPI15,
};
-#define DA9063_IRQ_BASE_OFFSET 0
-#define DA9063_NUM_IRQ (DA9063_IRQ_GPI15 + 1 - DA9063_IRQ_BASE_OFFSET)
-
struct da9063 {
/* Device */
struct device *dev;
- unsigned short model;
+ enum da9063_type type;
unsigned char variant_code;
unsigned int flags;
+ bool use_sw_pm;
/* Control interface */
struct regmap *regmap;
@@ -94,7 +92,4 @@ struct da9063 {
int da9063_device_init(struct da9063 *da9063, unsigned int irq);
int da9063_irq_init(struct da9063 *da9063);
-void da9063_device_exit(struct da9063 *da9063);
-void da9063_irq_exit(struct da9063 *da9063);
-
#endif /* __MFD_DA9063_CORE_H__ */
diff --git a/include/linux/mfd/da9063/pdata.h b/include/linux/mfd/da9063/pdata.h
deleted file mode 100644
index 8a125701ef7b..000000000000
--- a/include/linux/mfd/da9063/pdata.h
+++ /dev/null
@@ -1,112 +0,0 @@
-/*
- * Platform configuration options for DA9063
- *
- * Copyright 2012 Dialog Semiconductor Ltd.
- *
- * Author: Michal Hajduk, Dialog Semiconductor
- * Author: Krystian Garbaciak, Dialog Semiconductor
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- */
-
-#ifndef __MFD_DA9063_PDATA_H__
-#define __MFD_DA9063_PDATA_H__
-
-#include <linux/regulator/machine.h>
-
-/*
- * Regulator configuration
- */
-/* DA9063 regulator IDs */
-enum {
- /* BUCKs */
- DA9063_ID_BCORE1,
- DA9063_ID_BCORE2,
- DA9063_ID_BPRO,
- DA9063_ID_BMEM,
- DA9063_ID_BIO,
- DA9063_ID_BPERI,
-
- /* BCORE1 and BCORE2 in merged mode */
- DA9063_ID_BCORES_MERGED,
- /* BMEM and BIO in merged mode */
- DA9063_ID_BMEM_BIO_MERGED,
- /* When two BUCKs are merged, they cannot be reused separately */
-
- /* LDOs */
- DA9063_ID_LDO1,
- DA9063_ID_LDO2,
- DA9063_ID_LDO3,
- DA9063_ID_LDO4,
- DA9063_ID_LDO5,
- DA9063_ID_LDO6,
- DA9063_ID_LDO7,
- DA9063_ID_LDO8,
- DA9063_ID_LDO9,
- DA9063_ID_LDO10,
- DA9063_ID_LDO11,
-};
-
-/* Regulators platform data */
-struct da9063_regulator_data {
- int id;
- struct regulator_init_data *initdata;
-};
-
-struct da9063_regulators_pdata {
- unsigned n_regulators;
- struct da9063_regulator_data *regulator_data;
-};
-
-
-/*
- * RGB LED configuration
- */
-/* LED IDs for flags in struct led_info. */
-enum {
- DA9063_GPIO11_LED,
- DA9063_GPIO14_LED,
- DA9063_GPIO15_LED,
-
- DA9063_LED_NUM
-};
-#define DA9063_LED_ID_MASK 0x3
-
-/* LED polarity for flags in struct led_info. */
-#define DA9063_LED_HIGH_LEVEL_ACTIVE 0x0
-#define DA9063_LED_LOW_LEVEL_ACTIVE 0x4
-
-
-/*
- * General PMIC configuration
- */
-/* HWMON ADC channels configuration */
-#define DA9063_FLG_FORCE_IN0_MANUAL_MODE 0x0010
-#define DA9063_FLG_FORCE_IN0_AUTO_MODE 0x0020
-#define DA9063_FLG_FORCE_IN1_MANUAL_MODE 0x0040
-#define DA9063_FLG_FORCE_IN1_AUTO_MODE 0x0080
-#define DA9063_FLG_FORCE_IN2_MANUAL_MODE 0x0100
-#define DA9063_FLG_FORCE_IN2_AUTO_MODE 0x0200
-#define DA9063_FLG_FORCE_IN3_MANUAL_MODE 0x0400
-#define DA9063_FLG_FORCE_IN3_AUTO_MODE 0x0800
-
-/* Disable register caching. */
-#define DA9063_FLG_NO_CACHE 0x0008
-
-struct da9063;
-
-/* DA9063 platform data */
-struct da9063_pdata {
- int (*init)(struct da9063 *da9063);
- int irq_base;
- bool key_power;
- unsigned flags;
- struct da9063_regulators_pdata *regulators_pdata;
- struct led_platform_data *leds_pdata;
-};
-
-#endif /* __MFD_DA9063_PDATA_H__ */
diff --git a/include/linux/mfd/da9063/registers.h b/include/linux/mfd/da9063/registers.h
index 5d42859cb441..7b8364bd08a0 100644
--- a/include/linux/mfd/da9063/registers.h
+++ b/include/linux/mfd/da9063/registers.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* Registers definition for DA9063 modules
*
@@ -5,12 +6,6 @@
*
* Author: Michal Hajduk, Dialog Semiconductor
* Author: Krystian Garbaciak, Dialog Semiconductor
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
*/
#ifndef _DA9063_REG_H
@@ -215,9 +210,9 @@
/* DA9063 Configuration registers */
/* OTP */
-#define DA9063_REG_OPT_COUNT 0x101
-#define DA9063_REG_OPT_ADDR 0x102
-#define DA9063_REG_OPT_DATA 0x103
+#define DA9063_REG_OTP_CONT 0x101
+#define DA9063_REG_OTP_ADDR 0x102
+#define DA9063_REG_OTP_DATA 0x103
/* Customer Trim and Configuration */
#define DA9063_REG_T_OFFSET 0x104
@@ -297,8 +292,10 @@
#define DA9063_BB_REG_GP_ID_19 0x134
/* Chip ID and variant */
-#define DA9063_REG_CHIP_ID 0x181
-#define DA9063_REG_CHIP_VARIANT 0x182
+#define DA9063_REG_DEVICE_ID 0x181
+#define DA9063_REG_VARIANT_ID 0x182
+#define DA9063_REG_CUSTOMER_ID 0x183
+#define DA9063_REG_CONFIG_ID 0x184
/*
* PMIC registers bits
@@ -934,9 +931,6 @@
#define DA9063_RTC_CLOCK 0x40
#define DA9063_OUT_32K_EN 0x80
-/* DA9063_REG_CHIP_VARIANT */
-#define DA9063_CHIP_VARIANT_SHIFT 4
-
/* DA9063_REG_BUCK_ILIM_A (addr=0x9A) */
#define DA9063_BIO_ILIM_MASK 0x0F
#define DA9063_BMEM_ILIM_MASK 0xF0
@@ -1043,6 +1037,32 @@
#define DA9063_NONKEY_PIN_AUTODOWN 0x02
#define DA9063_NONKEY_PIN_AUTOFLPRT 0x03
+/* DA9063_REG_CONFIG_J (addr=0x10F) */
+#define DA9063_TWOWIRE_TO 0x40
+
+/* DA9063_REG_MON_REG_2 (addr=0x115) */
+#define DA9063_LDO1_MON_EN 0x01
+#define DA9063_LDO2_MON_EN 0x02
+#define DA9063_LDO3_MON_EN 0x04
+#define DA9063_LDO4_MON_EN 0x08
+#define DA9063_LDO5_MON_EN 0x10
+#define DA9063_LDO6_MON_EN 0x20
+#define DA9063_LDO7_MON_EN 0x40
+#define DA9063_LDO8_MON_EN 0x80
+
+/* DA9063_REG_MON_REG_3 (addr=0x116) */
+#define DA9063_LDO9_MON_EN 0x01
+#define DA9063_LDO10_MON_EN 0x02
+#define DA9063_LDO11_MON_EN 0x04
+
+/* DA9063_REG_MON_REG_4 (addr=0x117) */
+#define DA9063_BCORE1_MON_EN 0x04
+#define DA9063_BCORE2_MON_EN 0x08
+#define DA9063_BPRO_MON_EN 0x10
+#define DA9063_BIO_MON_EN 0x20
+#define DA9063_BMEM_MON_EN 0x40
+#define DA9063_BPERI_MON_EN 0x80
+
/* DA9063_REG_MON_REG_5 (addr=0x116) */
#define DA9063_MON_A8_IDX_MASK 0x07
#define DA9063_MON_A8_IDX_NONE 0x00
@@ -1070,4 +1090,10 @@
#define DA9063_MON_A10_IDX_LDO9 0x04
#define DA9063_MON_A10_IDX_LDO10 0x05
+/* DA9063_REG_VARIANT_ID (addr=0x182) */
+#define DA9063_VARIANT_ID_VRC_SHIFT 0
+#define DA9063_VARIANT_ID_VRC_MASK 0x0F
+#define DA9063_VARIANT_ID_MRC_SHIFT 4
+#define DA9063_VARIANT_ID_MRC_MASK 0xF0
+
#endif /* _DA9063_REG_H */
diff --git a/include/linux/mfd/da9150/core.h b/include/linux/mfd/da9150/core.h
index 1bf50caeb9fa..d116d5f3ef56 100644
--- a/include/linux/mfd/da9150/core.h
+++ b/include/linux/mfd/da9150/core.h
@@ -1,14 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* DA9150 MFD Driver - Core Data
*
* Copyright (c) 2014 Dialog Semiconductor
*
* Author: Adam Thomson <Adam.Thomson.Opensource@diasemi.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
*/
#ifndef __DA9150_CORE_H
diff --git a/include/linux/mfd/da9150/registers.h b/include/linux/mfd/da9150/registers.h
index 27ca6ee4d840..1fd8f5968817 100644
--- a/include/linux/mfd/da9150/registers.h
+++ b/include/linux/mfd/da9150/registers.h
@@ -1,14 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* DA9150 MFD Driver - Registers
*
* Copyright (c) 2014 Dialog Semiconductor
*
* Author: Adam Thomson <Adam.Thomson.Opensource@diasemi.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
*/
#ifndef __DA9150_REGISTERS_H
diff --git a/include/linux/mfd/davinci_voicecodec.h b/include/linux/mfd/davinci_voicecodec.h
index 2c0127cb06c5..9acd703dd5ca 100644
--- a/include/linux/mfd/davinci_voicecodec.h
+++ b/include/linux/mfd/davinci_voicecodec.h
@@ -1,33 +1,22 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* DaVinci Voice Codec Core Interface for TI platforms
*
* Copyright (C) 2010 Texas Instruments, Inc
*
* Author: Miguel Aguilar <miguel.aguilar@ridgerun.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#ifndef __LINUX_MFD_DAVINCI_VOICECODEC_H_
#define __LINUX_MFD_DAVINCI_VOICECODEC_H_
-#include <linux/kernel.h>
-#include <linux/platform_device.h>
+#include <linux/bits.h>
#include <linux/mfd/core.h>
-#include <linux/platform_data/edma.h>
+#include <linux/types.h>
+struct clk;
+struct device;
+struct platform_device;
struct regmap;
/*
diff --git a/include/linux/mfd/db8500-prcmu.h b/include/linux/mfd/db8500-prcmu.h
index 7ba67b55b312..a62de3d155ed 100644
--- a/include/linux/mfd/db8500-prcmu.h
+++ b/include/linux/mfd/db8500-prcmu.h
@@ -1,8 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) STMicroelectronics 2009
* Copyright (C) ST-Ericsson SA 2010
*
- * License Terms: GNU General Public License v2
* Author: Kumar Sanghvi <kumar.sanghvi@stericsson.com>
*
* PRCMU f/w APIs
@@ -489,7 +489,7 @@ struct prcmu_auto_pm_config {
#ifdef CONFIG_MFD_DB8500_PRCMU
-void db8500_prcmu_early_init(u32 phy_base, u32 size);
+void db8500_prcmu_early_init(void);
int prcmu_set_rc_a2p(enum romcode_write);
enum romcode_read prcmu_get_rc_p2a(void);
enum ap_pwrst prcmu_get_xp70_current_state(void);
@@ -525,9 +525,6 @@ u8 db8500_prcmu_get_power_state_result(void);
void db8500_prcmu_enable_wakeups(u32 wakeups);
int db8500_prcmu_set_epod(u16 epod_id, u8 epod_state);
int db8500_prcmu_request_clock(u8 clock, bool enable);
-int db8500_prcmu_set_display_clocks(void);
-int db8500_prcmu_disable_dsipll(void);
-int db8500_prcmu_enable_dsipll(void);
void db8500_prcmu_config_abb_event_readout(u32 abb_events);
void db8500_prcmu_get_abb_event_buffer(void __iomem **buf);
int db8500_prcmu_config_esram0_deep_sleep(u8 state);
@@ -546,7 +543,7 @@ void db8500_prcmu_write_masked(unsigned int reg, u32 mask, u32 value);
#else /* !CONFIG_MFD_DB8500_PRCMU */
-static inline void db8500_prcmu_early_init(u32 phy_base, u32 size) {}
+static inline void db8500_prcmu_early_init(void) {}
static inline int prcmu_set_rc_a2p(enum romcode_write code)
{
@@ -682,21 +679,6 @@ static inline int db8500_prcmu_request_clock(u8 clock, bool enable)
return 0;
}
-static inline int db8500_prcmu_set_display_clocks(void)
-{
- return 0;
-}
-
-static inline int db8500_prcmu_disable_dsipll(void)
-{
- return 0;
-}
-
-static inline int db8500_prcmu_enable_dsipll(void)
-{
- return 0;
-}
-
static inline int db8500_prcmu_config_esram0_deep_sleep(u8 state)
{
return 0;
@@ -738,7 +720,7 @@ static inline int db8500_prcmu_load_a9wdog(u8 id, u32 val)
static inline bool db8500_prcmu_is_ac_wake_requested(void)
{
- return 0;
+ return false;
}
static inline int db8500_prcmu_set_arm_opp(u8 opp)
diff --git a/include/linux/mfd/dbx500-prcmu.h b/include/linux/mfd/dbx500-prcmu.h
index 2e2c6a63a065..828362b7860c 100644
--- a/include/linux/mfd/dbx500-prcmu.h
+++ b/include/linux/mfd/dbx500-prcmu.h
@@ -1,8 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) ST Ericsson SA 2011
*
- * License Terms: GNU General Public License v2
- *
* STE Ux500 PRCMU API
*/
#ifndef __MACH_PRCMU_H
@@ -187,10 +186,12 @@ enum ddr_pwrst {
#define PRCMU_FW_PROJECT_U8500_C3 8
#define PRCMU_FW_PROJECT_U8500_C4 9
#define PRCMU_FW_PROJECT_U9500_MBL 10
-#define PRCMU_FW_PROJECT_U8500_MBL 11 /* Customer specific */
+#define PRCMU_FW_PROJECT_U8500_SSG1 11 /* Samsung specific */
#define PRCMU_FW_PROJECT_U8500_MBL2 12 /* Customer specific */
#define PRCMU_FW_PROJECT_U8520 13
#define PRCMU_FW_PROJECT_U8420 14
+#define PRCMU_FW_PROJECT_U8500_SSG2 15 /* Samsung specific */
+#define PRCMU_FW_PROJECT_U8420_SYSCLK 17
#define PRCMU_FW_PROJECT_A9420 20
/* [32..63] 9540 and derivatives */
#define PRCMU_FW_PROJECT_U9540 32
@@ -212,9 +213,9 @@ struct prcmu_fw_version {
#if defined(CONFIG_UX500_SOC_DB8500)
-static inline void prcmu_early_init(u32 phy_base, u32 size)
+static inline void __init prcmu_early_init(void)
{
- return db8500_prcmu_early_init(phy_base, size);
+ db8500_prcmu_early_init();
}
static inline int prcmu_set_power_state(u8 state, bool keep_ulp_clk,
@@ -301,7 +302,7 @@ static inline int prcmu_request_ape_opp_100_voltage(bool enable)
static inline void prcmu_system_reset(u16 reset_code)
{
- return db8500_prcmu_system_reset(reset_code);
+ db8500_prcmu_system_reset(reset_code);
}
static inline u16 prcmu_get_reset_code(void)
@@ -313,7 +314,7 @@ int prcmu_ac_wake_req(void);
void prcmu_ac_sleep_req(void);
static inline void prcmu_modem_reset(void)
{
- return db8500_prcmu_modem_reset();
+ db8500_prcmu_modem_reset();
}
static inline bool prcmu_is_ac_wake_requested(void)
@@ -321,21 +322,6 @@ static inline bool prcmu_is_ac_wake_requested(void)
return db8500_prcmu_is_ac_wake_requested();
}
-static inline int prcmu_set_display_clocks(void)
-{
- return db8500_prcmu_set_display_clocks();
-}
-
-static inline int prcmu_disable_dsipll(void)
-{
- return db8500_prcmu_disable_dsipll();
-}
-
-static inline int prcmu_enable_dsipll(void)
-{
- return db8500_prcmu_enable_dsipll();
-}
-
static inline int prcmu_config_esram0_deep_sleep(u8 state)
{
return db8500_prcmu_config_esram0_deep_sleep(state);
@@ -402,7 +388,7 @@ static inline int prcmu_config_a9wdog(u8 num, bool sleep_auto_off)
}
#else
-static inline void prcmu_early_init(u32 phy_base, u32 size) {}
+static inline void prcmu_early_init(void) {}
static inline int prcmu_set_power_state(u8 state, bool keep_ulp_clk,
bool keep_ap_pll)
@@ -511,21 +497,6 @@ static inline bool prcmu_is_ac_wake_requested(void)
return false;
}
-static inline int prcmu_set_display_clocks(void)
-{
- return 0;
-}
-
-static inline int prcmu_disable_dsipll(void)
-{
- return 0;
-}
-
-static inline int prcmu_enable_dsipll(void)
-{
- return 0;
-}
-
static inline int prcmu_config_esram0_deep_sleep(u8 state)
{
return 0;
@@ -585,36 +556,6 @@ static inline void prcmu_clear(unsigned int reg, u32 bits)
#define PRCMU_QOS_ARM_OPP 3
#define PRCMU_QOS_DEFAULT_VALUE -1
-#ifdef CONFIG_DBX500_PRCMU_QOS_POWER
-
-unsigned long prcmu_qos_get_cpufreq_opp_delay(void);
-void prcmu_qos_set_cpufreq_opp_delay(unsigned long);
-void prcmu_qos_force_opp(int, s32);
-int prcmu_qos_requirement(int pm_qos_class);
-int prcmu_qos_add_requirement(int pm_qos_class, char *name, s32 value);
-int prcmu_qos_update_requirement(int pm_qos_class, char *name, s32 new_value);
-void prcmu_qos_remove_requirement(int pm_qos_class, char *name);
-int prcmu_qos_add_notifier(int prcmu_qos_class,
- struct notifier_block *notifier);
-int prcmu_qos_remove_notifier(int prcmu_qos_class,
- struct notifier_block *notifier);
-
-#else
-
-static inline unsigned long prcmu_qos_get_cpufreq_opp_delay(void)
-{
- return 0;
-}
-
-static inline void prcmu_qos_set_cpufreq_opp_delay(unsigned long n) {}
-
-static inline void prcmu_qos_force_opp(int prcmu_qos_class, s32 i) {}
-
-static inline int prcmu_qos_requirement(int prcmu_qos_class)
-{
- return 0;
-}
-
static inline int prcmu_qos_add_requirement(int prcmu_qos_class,
char *name, s32 value)
{
@@ -631,17 +572,4 @@ static inline void prcmu_qos_remove_requirement(int prcmu_qos_class, char *name)
{
}
-static inline int prcmu_qos_add_notifier(int prcmu_qos_class,
- struct notifier_block *notifier)
-{
- return 0;
-}
-static inline int prcmu_qos_remove_notifier(int prcmu_qos_class,
- struct notifier_block *notifier)
-{
- return 0;
-}
-
-#endif
-
#endif /* __MACH_PRCMU_H */
diff --git a/include/linux/mfd/dln2.h b/include/linux/mfd/dln2.h
index 004b24576da8..4cade9aa8edb 100644
--- a/include/linux/mfd/dln2.h
+++ b/include/linux/mfd/dln2.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __LINUX_USB_DLN2_H
#define __LINUX_USB_DLN2_H
diff --git a/include/linux/mfd/ds1wm.h b/include/linux/mfd/ds1wm.h
deleted file mode 100644
index 2227c6a75d84..000000000000
--- a/include/linux/mfd/ds1wm.h
+++ /dev/null
@@ -1,28 +0,0 @@
-/* MFD cell driver data for the DS1WM driver
- *
- * to be defined in the MFD device that is
- * using this driver for one of his sub devices
- */
-
-struct ds1wm_driver_data {
- int active_high;
- int clock_rate;
- /* in milliseconds, the amount of time to
- * sleep following a reset pulse. Zero
- * should work if your bus devices recover
- * time respects the 1-wire spec since the
- * ds1wm implements the precise timings of
- * a reset pulse/presence detect sequence.
- */
- unsigned int reset_recover_delay;
-
- /* Say 1 here for big endian Hardware
- * (only relevant with bus-shift > 0
- */
- bool is_hw_big_endian;
-
- /* left shift of register number to get register address offsett.
- * Only 0,1,2 allowed for 8,16 or 32 bit bus width respectively
- */
- unsigned int bus_shift;
-};
diff --git a/include/linux/mfd/ezx-pcap.h b/include/linux/mfd/ezx-pcap.h
index 32a1b5cfeba1..ea51b1cdca5a 100644
--- a/include/linux/mfd/ezx-pcap.h
+++ b/include/linux/mfd/ezx-pcap.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright 2009 Daniel Ribeiro <drwyrm@gmail.com>
*
@@ -30,7 +31,6 @@ int ezx_pcap_set_bits(struct pcap_chip *, u8, u32, u32);
int pcap_to_irq(struct pcap_chip *, int);
int irq_to_pcap(struct pcap_chip *, int);
int pcap_adc_async(struct pcap_chip *, u8, u32, u8[], void *, void *);
-int pcap_adc_sync(struct pcap_chip *, u8, u32, u8[], u16[]);
void pcap_set_ts_bits(struct pcap_chip *, u32);
#define PCAP_SECOND_PORT 1
diff --git a/include/linux/mfd/gsc.h b/include/linux/mfd/gsc.h
new file mode 100644
index 000000000000..6bd639c285b4
--- /dev/null
+++ b/include/linux/mfd/gsc.h
@@ -0,0 +1,76 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright (C) 2020 Gateworks Corporation
+ */
+#ifndef __LINUX_MFD_GSC_H_
+#define __LINUX_MFD_GSC_H_
+
+#include <linux/regmap.h>
+
+/* Device Addresses */
+#define GSC_MISC 0x20
+#define GSC_UPDATE 0x21
+#define GSC_GPIO 0x23
+#define GSC_HWMON 0x29
+#define GSC_EEPROM0 0x50
+#define GSC_EEPROM1 0x51
+#define GSC_EEPROM2 0x52
+#define GSC_EEPROM3 0x53
+#define GSC_RTC 0x68
+
+/* Register offsets */
+enum {
+ GSC_CTRL_0 = 0x00,
+ GSC_CTRL_1 = 0x01,
+ GSC_TIME = 0x02,
+ GSC_TIME_ADD = 0x06,
+ GSC_IRQ_STATUS = 0x0A,
+ GSC_IRQ_ENABLE = 0x0B,
+ GSC_FW_CRC = 0x0C,
+ GSC_FW_VER = 0x0E,
+ GSC_WP = 0x0F,
+};
+
+/* Bit definitions */
+#define GSC_CTRL_0_PB_HARD_RESET 0
+#define GSC_CTRL_0_PB_CLEAR_SECURE_KEY 1
+#define GSC_CTRL_0_PB_SOFT_POWER_DOWN 2
+#define GSC_CTRL_0_PB_BOOT_ALTERNATE 3
+#define GSC_CTRL_0_PERFORM_CRC 4
+#define GSC_CTRL_0_TAMPER_DETECT 5
+#define GSC_CTRL_0_SWITCH_HOLD 6
+
+#define GSC_CTRL_1_SLEEP_ENABLE 0
+#define GSC_CTRL_1_SLEEP_ACTIVATE 1
+#define GSC_CTRL_1_SLEEP_ADD 2
+#define GSC_CTRL_1_SLEEP_NOWAKEPB 3
+#define GSC_CTRL_1_WDT_TIME 4
+#define GSC_CTRL_1_WDT_ENABLE 5
+#define GSC_CTRL_1_SWITCH_BOOT_ENABLE 6
+#define GSC_CTRL_1_SWITCH_BOOT_CLEAR 7
+
+#define GSC_IRQ_PB 0
+#define GSC_IRQ_KEY_ERASED 1
+#define GSC_IRQ_EEPROM_WP 2
+#define GSC_IRQ_RESV 3
+#define GSC_IRQ_GPIO 4
+#define GSC_IRQ_TAMPER 5
+#define GSC_IRQ_WDT_TIMEOUT 6
+#define GSC_IRQ_SWITCH_HOLD 7
+
+int gsc_read(void *context, unsigned int reg, unsigned int *val);
+int gsc_write(void *context, unsigned int reg, unsigned int val);
+
+struct gsc_dev {
+ struct device *dev;
+
+ struct i2c_client *i2c; /* 0x20: interrupt controller, WDT */
+ struct i2c_client *i2c_hwmon; /* 0x29: hwmon, fan controller */
+
+ struct regmap *regmap;
+
+ unsigned int fwver;
+ unsigned short fwcrc;
+};
+
+#endif /* __LINUX_MFD_GSC_H_ */
diff --git a/include/linux/mfd/hi6421-pmic.h b/include/linux/mfd/hi6421-pmic.h
index 587273e35acf..2cadf8897c64 100644
--- a/include/linux/mfd/hi6421-pmic.h
+++ b/include/linux/mfd/hi6421-pmic.h
@@ -1,16 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Header file for device driver Hi6421 PMIC
*
* Copyright (c) <2011-2014> HiSilicon Technologies Co., Ltd.
* http://www.hisilicon.com
* Copyright (c) <2013-2014> Linaro Ltd.
- * http://www.linaro.org
+ * https://www.linaro.org
*
* Author: Guodong Xu <guodong.xu@linaro.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef __HI6421_PMIC_H
@@ -38,4 +35,9 @@ struct hi6421_pmic {
struct regmap *regmap;
};
+enum hi6421_type {
+ HI6421 = 0,
+ HI6421_V530,
+};
+
#endif /* __HI6421_PMIC_H */
diff --git a/include/linux/mfd/hi655x-pmic.h b/include/linux/mfd/hi655x-pmic.h
index 62f03c2b1bb0..194556851ccf 100644
--- a/include/linux/mfd/hi655x-pmic.h
+++ b/include/linux/mfd/hi655x-pmic.h
@@ -1,20 +1,19 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Device driver for regulators in hi655x IC
*
- * Copyright (c) 2016 Hisilicon.
+ * Copyright (c) 2016 HiSilicon Ltd.
*
* Authors:
* Chen Feng <puck.chen@hisilicon.com>
* Fei Wang <w.f@huawei.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef __HI655X_PMIC_H
#define __HI655X_PMIC_H
+#include <linux/gpio/consumer.h>
+
/* Hi655x registers are mapped to memory bus in 4 bytes stride */
#define HI655X_STRIDE 4
#define HI655X_BUS_ADDR(x) ((x) << 2)
@@ -53,10 +52,9 @@
#define OTMP_D1R_INT_MASK BIT(OTMP_D1R_INT)
struct hi655x_pmic {
- struct resource *res;
struct device *dev;
struct regmap *regmap;
- int gpio;
+ struct gpio_desc *gpio;
unsigned int ver;
struct regmap_irq_chip_data *irq_data;
};
diff --git a/include/linux/mfd/htc-pasic3.h b/include/linux/mfd/htc-pasic3.h
deleted file mode 100644
index 3d3ed67bd969..000000000000
--- a/include/linux/mfd/htc-pasic3.h
+++ /dev/null
@@ -1,54 +0,0 @@
-/*
- * HTC PASIC3 driver - LEDs and DS1WM
- *
- * Copyright (c) 2007 Philipp Zabel <philipp.zabel@gmail.com>
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file COPYING in the main directory of this archive for
- * more details.
- *
- */
-
-#ifndef __PASIC3_H
-#define __PASIC3_H
-
-#include <linux/platform_device.h>
-#include <linux/leds.h>
-
-extern void pasic3_write_register(struct device *dev, u32 reg, u8 val);
-extern u8 pasic3_read_register(struct device *dev, u32 reg);
-
-/*
- * mask for registers 0x20,0x21,0x22
- */
-#define PASIC3_MASK_LED0 0x04
-#define PASIC3_MASK_LED1 0x08
-#define PASIC3_MASK_LED2 0x40
-
-/*
- * bits in register 0x06
- */
-#define PASIC3_BIT2_LED0 0x08
-#define PASIC3_BIT2_LED1 0x10
-#define PASIC3_BIT2_LED2 0x20
-
-struct pasic3_led {
- struct led_classdev led;
- unsigned int hw_num;
- unsigned int bit2;
- unsigned int mask;
- struct pasic3_leds_machinfo *pdata;
-};
-
-struct pasic3_leds_machinfo {
- unsigned int num_leds;
- unsigned int power_gpio;
- struct pasic3_led *leds;
-};
-
-struct pasic3_platform_data {
- struct pasic3_leds_machinfo *led_pdata;
- unsigned int clock_rate;
-};
-
-#endif
diff --git a/include/linux/mfd/idt82p33_reg.h b/include/linux/mfd/idt82p33_reg.h
new file mode 100644
index 000000000000..1db532feeb91
--- /dev/null
+++ b/include/linux/mfd/idt82p33_reg.h
@@ -0,0 +1,115 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Register Map - Based on AN888_SMUforIEEE_SynchEther_82P33xxx_RevH.pdf
+ *
+ * Copyright (C) 2021 Integrated Device Technology, Inc., a Renesas Company.
+ */
+#ifndef HAVE_IDT82P33_REG
+#define HAVE_IDT82P33_REG
+
+#define REG_ADDR(page, offset) (((page) << 0x7) | ((offset) & 0x7f))
+
+/* Register address */
+#define DPLL1_TOD_CNFG 0x134
+#define DPLL2_TOD_CNFG 0x1B4
+
+#define DPLL1_TOD_STS 0x10B
+#define DPLL2_TOD_STS 0x18B
+
+#define DPLL1_TOD_TRIGGER 0x115
+#define DPLL2_TOD_TRIGGER 0x195
+
+#define DPLL1_OPERATING_MODE_CNFG 0x120
+#define DPLL2_OPERATING_MODE_CNFG 0x1A0
+
+#define DPLL1_HOLDOVER_FREQ_CNFG 0x12C
+#define DPLL2_HOLDOVER_FREQ_CNFG 0x1AC
+
+#define DPLL1_PHASE_OFFSET_CNFG 0x143
+#define DPLL2_PHASE_OFFSET_CNFG 0x1C3
+
+#define DPLL1_SYNC_EDGE_CNFG 0x140
+#define DPLL2_SYNC_EDGE_CNFG 0x1C0
+
+#define DPLL1_INPUT_MODE_CNFG 0x116
+#define DPLL2_INPUT_MODE_CNFG 0x196
+
+#define DPLL1_OPERATING_STS 0x102
+#define DPLL2_OPERATING_STS 0x182
+
+#define DPLL1_CURRENT_FREQ_STS 0x103
+#define DPLL2_CURRENT_FREQ_STS 0x183
+
+#define REG_SOFT_RESET 0X381
+
+#define OUT_MUX_CNFG(outn) REG_ADDR(0x6, (0xC * (outn)))
+#define TOD_TRIGGER(wr_trig, rd_trig) ((wr_trig & 0xf) << 4 | (rd_trig & 0xf))
+
+/* Register bit definitions */
+#define SYNC_TOD BIT(1)
+#define PH_OFFSET_EN BIT(7)
+#define SQUELCH_ENABLE BIT(5)
+
+/* Bit definitions for the DPLL_MODE register */
+#define PLL_MODE_SHIFT (0)
+#define PLL_MODE_MASK (0x1F)
+#define COMBO_MODE_EN BIT(5)
+#define COMBO_MODE_SHIFT (6)
+#define COMBO_MODE_MASK (0x3)
+
+/* Bit definitions for DPLL_OPERATING_STS register */
+#define OPERATING_STS_MASK (0x7)
+#define OPERATING_STS_SHIFT (0x0)
+
+/* Bit definitions for DPLL_TOD_TRIGGER register */
+#define READ_TRIGGER_MASK (0xF)
+#define READ_TRIGGER_SHIFT (0x0)
+#define WRITE_TRIGGER_MASK (0xF0)
+#define WRITE_TRIGGER_SHIFT (0x4)
+
+/* Bit definitions for REG_SOFT_RESET register */
+#define SOFT_RESET_EN BIT(7)
+
+enum pll_mode {
+ PLL_MODE_MIN = 0,
+ PLL_MODE_AUTOMATIC = PLL_MODE_MIN,
+ PLL_MODE_FORCE_FREERUN = 1,
+ PLL_MODE_FORCE_HOLDOVER = 2,
+ PLL_MODE_FORCE_LOCKED = 4,
+ PLL_MODE_FORCE_PRE_LOCKED2 = 5,
+ PLL_MODE_FORCE_PRE_LOCKED = 6,
+ PLL_MODE_FORCE_LOST_PHASE = 7,
+ PLL_MODE_DCO = 10,
+ PLL_MODE_WPH = 18,
+ PLL_MODE_MAX = PLL_MODE_WPH,
+};
+
+enum hw_tod_trig_sel {
+ HW_TOD_TRIG_SEL_MIN = 0,
+ HW_TOD_TRIG_SEL_NO_WRITE = HW_TOD_TRIG_SEL_MIN,
+ HW_TOD_TRIG_SEL_NO_READ = HW_TOD_TRIG_SEL_MIN,
+ HW_TOD_TRIG_SEL_SYNC_SEL = 1,
+ HW_TOD_TRIG_SEL_IN12 = 2,
+ HW_TOD_TRIG_SEL_IN13 = 3,
+ HW_TOD_TRIG_SEL_IN14 = 4,
+ HW_TOD_TRIG_SEL_TOD_PPS = 5,
+ HW_TOD_TRIG_SEL_TIMER_INTERVAL = 6,
+ HW_TOD_TRIG_SEL_MSB_PHASE_OFFSET_CNFG = 7,
+ HW_TOD_TRIG_SEL_MSB_HOLDOVER_FREQ_CNFG = 8,
+ HW_TOD_WR_TRIG_SEL_MSB_TOD_CNFG = 9,
+ HW_TOD_RD_TRIG_SEL_LSB_TOD_STS = HW_TOD_WR_TRIG_SEL_MSB_TOD_CNFG,
+ WR_TRIG_SEL_MAX = HW_TOD_WR_TRIG_SEL_MSB_TOD_CNFG,
+};
+
+/** @brief Enumerated type listing DPLL operational modes */
+enum dpll_state {
+ DPLL_STATE_FREERUN = 1,
+ DPLL_STATE_HOLDOVER = 2,
+ DPLL_STATE_LOCKED = 4,
+ DPLL_STATE_PRELOCKED2 = 5,
+ DPLL_STATE_PRELOCKED = 6,
+ DPLL_STATE_LOSTPHASE = 7,
+ DPLL_STATE_MAX
+};
+
+#endif
diff --git a/include/linux/mfd/idt8a340_reg.h b/include/linux/mfd/idt8a340_reg.h
new file mode 100644
index 000000000000..53a222605526
--- /dev/null
+++ b/include/linux/mfd/idt8a340_reg.h
@@ -0,0 +1,768 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Based on 5.2.0, Family Programming Guide (Sept 30, 2020)
+ *
+ * Copyright (C) 2021 Integrated Device Technology, Inc., a Renesas Company.
+ */
+#ifndef HAVE_IDT8A340_REG
+#define HAVE_IDT8A340_REG
+
+#define PAGE_ADDR_BASE 0x0000
+#define PAGE_ADDR 0x00fc
+
+#define HW_REVISION 0x8180
+#define REV_ID 0x007a
+
+#define HW_DPLL_0 (0x8a00)
+#define HW_DPLL_1 (0x8b00)
+#define HW_DPLL_2 (0x8c00)
+#define HW_DPLL_3 (0x8d00)
+#define HW_DPLL_4 (0x8e00)
+#define HW_DPLL_5 (0x8f00)
+#define HW_DPLL_6 (0x9000)
+#define HW_DPLL_7 (0x9100)
+
+#define HW_DPLL_TOD_SW_TRIG_ADDR__0 (0x080)
+#define HW_DPLL_TOD_CTRL_1 (0x089)
+#define HW_DPLL_TOD_CTRL_2 (0x08A)
+#define HW_DPLL_TOD_OVR__0 (0x098)
+#define HW_DPLL_TOD_OUT_0__0 (0x0B0)
+
+#define HW_Q0_Q1_CH_SYNC_CTRL_0 (0xa740)
+#define HW_Q0_Q1_CH_SYNC_CTRL_1 (0xa741)
+#define HW_Q2_Q3_CH_SYNC_CTRL_0 (0xa742)
+#define HW_Q2_Q3_CH_SYNC_CTRL_1 (0xa743)
+#define HW_Q4_Q5_CH_SYNC_CTRL_0 (0xa744)
+#define HW_Q4_Q5_CH_SYNC_CTRL_1 (0xa745)
+#define HW_Q6_Q7_CH_SYNC_CTRL_0 (0xa746)
+#define HW_Q6_Q7_CH_SYNC_CTRL_1 (0xa747)
+#define HW_Q8_CH_SYNC_CTRL_0 (0xa748)
+#define HW_Q8_CH_SYNC_CTRL_1 (0xa749)
+#define HW_Q9_CH_SYNC_CTRL_0 (0xa74a)
+#define HW_Q9_CH_SYNC_CTRL_1 (0xa74b)
+#define HW_Q10_CH_SYNC_CTRL_0 (0xa74c)
+#define HW_Q10_CH_SYNC_CTRL_1 (0xa74d)
+#define HW_Q11_CH_SYNC_CTRL_0 (0xa74e)
+#define HW_Q11_CH_SYNC_CTRL_1 (0xa74f)
+
+#define SYNC_SOURCE_DPLL0_TOD_PPS 0x14
+#define SYNC_SOURCE_DPLL1_TOD_PPS 0x15
+#define SYNC_SOURCE_DPLL2_TOD_PPS 0x16
+#define SYNC_SOURCE_DPLL3_TOD_PPS 0x17
+
+#define SYNCTRL1_MASTER_SYNC_RST BIT(7)
+#define SYNCTRL1_MASTER_SYNC_TRIG BIT(5)
+#define SYNCTRL1_TOD_SYNC_TRIG BIT(4)
+#define SYNCTRL1_FBDIV_FRAME_SYNC_TRIG BIT(3)
+#define SYNCTRL1_FBDIV_SYNC_TRIG BIT(2)
+#define SYNCTRL1_Q1_DIV_SYNC_TRIG BIT(1)
+#define SYNCTRL1_Q0_DIV_SYNC_TRIG BIT(0)
+
+#define HW_Q8_CTRL_SPARE (0xa7d4)
+#define HW_Q11_CTRL_SPARE (0xa7ec)
+
+/*
+ * Select FOD5 as sync_trigger for Q8 divider.
+ * Transition from logic zero to one
+ * sets trigger to sync Q8 divider.
+ *
+ * Unused when FOD4 is driving Q8 divider (normal operation).
+ */
+#define Q9_TO_Q8_SYNC_TRIG BIT(1)
+
+/*
+ * Enable FOD5 as driver for clock and sync for Q8 divider.
+ * Enable fanout buffer for FOD5.
+ *
+ * Unused when FOD4 is driving Q8 divider (normal operation).
+ */
+#define Q9_TO_Q8_FANOUT_AND_CLOCK_SYNC_ENABLE_MASK (BIT(0) | BIT(2))
+
+/*
+ * Select FOD6 as sync_trigger for Q11 divider.
+ * Transition from logic zero to one
+ * sets trigger to sync Q11 divider.
+ *
+ * Unused when FOD7 is driving Q11 divider (normal operation).
+ */
+#define Q10_TO_Q11_SYNC_TRIG BIT(1)
+
+/*
+ * Enable FOD6 as driver for clock and sync for Q11 divider.
+ * Enable fanout buffer for FOD6.
+ *
+ * Unused when FOD7 is driving Q11 divider (normal operation).
+ */
+#define Q10_TO_Q11_FANOUT_AND_CLOCK_SYNC_ENABLE_MASK (BIT(0) | BIT(2))
+
+#define RESET_CTRL 0xc000
+#define SM_RESET 0x0012
+#define SM_RESET_V520 0x0013
+#define SM_RESET_CMD 0x5A
+
+#define GENERAL_STATUS 0xc014
+#define BOOT_STATUS 0x0000
+#define HW_REV_ID 0x000A
+#define BOND_ID 0x000B
+#define HW_CSR_ID 0x000C
+#define HW_IRQ_ID 0x000E
+#define MAJ_REL 0x0010
+#define MIN_REL 0x0011
+#define HOTFIX_REL 0x0012
+#define PIPELINE_ID 0x0014
+#define BUILD_ID 0x0018
+#define JTAG_DEVICE_ID 0x001c
+#define PRODUCT_ID 0x001e
+#define OTP_SCSR_CONFIG_SELECT 0x0022
+
+#define STATUS 0xc03c
+#define DPLL0_STATUS 0x0018
+#define DPLL1_STATUS 0x0019
+#define DPLL2_STATUS 0x001a
+#define DPLL3_STATUS 0x001b
+#define DPLL4_STATUS 0x001c
+#define DPLL5_STATUS 0x001d
+#define DPLL6_STATUS 0x001e
+#define DPLL7_STATUS 0x001f
+#define DPLL_SYS_STATUS 0x0020
+#define DPLL_SYS_APLL_STATUS 0x0021
+#define DPLL0_FILTER_STATUS 0x0044
+#define DPLL1_FILTER_STATUS 0x004c
+#define DPLL2_FILTER_STATUS 0x0054
+#define DPLL3_FILTER_STATUS 0x005c
+#define DPLL4_FILTER_STATUS 0x0064
+#define DPLL5_FILTER_STATUS 0x006c
+#define DPLL6_FILTER_STATUS 0x0074
+#define DPLL7_FILTER_STATUS 0x007c
+#define DPLLSYS_FILTER_STATUS 0x0084
+#define USER_GPIO0_TO_7_STATUS 0x008a
+#define USER_GPIO8_TO_15_STATUS 0x008b
+
+#define GPIO_USER_CONTROL 0xc160
+#define GPIO0_TO_7_OUT 0x0000
+#define GPIO8_TO_15_OUT 0x0001
+#define GPIO0_TO_7_OUT_V520 0x0002
+#define GPIO8_TO_15_OUT_V520 0x0003
+
+#define STICKY_STATUS_CLEAR 0xc164
+
+#define GPIO_TOD_NOTIFICATION_CLEAR 0xc16c
+
+#define ALERT_CFG 0xc188
+
+#define SYS_DPLL_XO 0xc194
+
+#define SYS_APLL 0xc19c
+
+#define INPUT_0 0xc1b0
+#define INPUT_1 0xc1c0
+#define INPUT_2 0xc1d0
+#define INPUT_3 0xc200
+#define INPUT_4 0xc210
+#define INPUT_5 0xc220
+#define INPUT_6 0xc230
+#define INPUT_7 0xc240
+#define INPUT_8 0xc250
+#define INPUT_9 0xc260
+#define INPUT_10 0xc280
+#define INPUT_11 0xc290
+#define INPUT_12 0xc2a0
+#define INPUT_13 0xc2b0
+#define INPUT_14 0xc2c0
+#define INPUT_15 0xc2d0
+
+#define REF_MON_0 0xc2e0
+#define REF_MON_1 0xc2ec
+#define REF_MON_2 0xc300
+#define REF_MON_3 0xc30c
+#define REF_MON_4 0xc318
+#define REF_MON_5 0xc324
+#define REF_MON_6 0xc330
+#define REF_MON_7 0xc33c
+#define REF_MON_8 0xc348
+#define REF_MON_9 0xc354
+#define REF_MON_10 0xc360
+#define REF_MON_11 0xc36c
+#define REF_MON_12 0xc380
+#define REF_MON_13 0xc38c
+#define REF_MON_14 0xc398
+#define REF_MON_15 0xc3a4
+
+#define DPLL_0 0xc3b0
+#define DPLL_CTRL_REG_0 0x0002
+#define DPLL_CTRL_REG_1 0x0003
+#define DPLL_CTRL_REG_2 0x0004
+#define DPLL_TOD_SYNC_CFG 0x0031
+#define DPLL_COMBO_SLAVE_CFG_0 0x0032
+#define DPLL_COMBO_SLAVE_CFG_1 0x0033
+#define DPLL_SLAVE_REF_CFG 0x0034
+#define DPLL_REF_MODE 0x0035
+#define DPLL_PHASE_MEASUREMENT_CFG 0x0036
+#define DPLL_MODE 0x0037
+#define DPLL_MODE_V520 0x003B
+#define DPLL_1 0xc400
+#define DPLL_2 0xc438
+#define DPLL_2_V520 0xc43c
+#define DPLL_3 0xc480
+#define DPLL_4 0xc4b8
+#define DPLL_4_V520 0xc4bc
+#define DPLL_5 0xc500
+#define DPLL_6 0xc538
+#define DPLL_6_V520 0xc53c
+#define DPLL_7 0xc580
+#define SYS_DPLL 0xc5b8
+#define SYS_DPLL_V520 0xc5bc
+
+#define DPLL_CTRL_0 0xc600
+#define DPLL_CTRL_DPLL_MANU_REF_CFG 0x0001
+#define DPLL_CTRL_DPLL_FOD_FREQ 0x001c
+#define DPLL_CTRL_COMBO_MASTER_CFG 0x003a
+#define DPLL_CTRL_1 0xc63c
+#define DPLL_CTRL_2 0xc680
+#define DPLL_CTRL_3 0xc6bc
+#define DPLL_CTRL_4 0xc700
+#define DPLL_CTRL_5 0xc73c
+#define DPLL_CTRL_6 0xc780
+#define DPLL_CTRL_7 0xc7bc
+#define SYS_DPLL_CTRL 0xc800
+
+#define DPLL_PHASE_0 0xc818
+/* Signed 42-bit FFO in units of 2^(-53) */
+#define DPLL_WR_PHASE 0x0000
+#define DPLL_PHASE_1 0xc81c
+#define DPLL_PHASE_2 0xc820
+#define DPLL_PHASE_3 0xc824
+#define DPLL_PHASE_4 0xc828
+#define DPLL_PHASE_5 0xc82c
+#define DPLL_PHASE_6 0xc830
+#define DPLL_PHASE_7 0xc834
+
+#define DPLL_FREQ_0 0xc838
+/* Signed 42-bit FFO in units of 2^(-53) */
+#define DPLL_WR_FREQ 0x0000
+#define DPLL_FREQ_1 0xc840
+#define DPLL_FREQ_2 0xc848
+#define DPLL_FREQ_3 0xc850
+#define DPLL_FREQ_4 0xc858
+#define DPLL_FREQ_5 0xc860
+#define DPLL_FREQ_6 0xc868
+#define DPLL_FREQ_7 0xc870
+
+#define DPLL_PHASE_PULL_IN_0 0xc880
+#define PULL_IN_OFFSET 0x0000 /* Signed 32 bit */
+#define PULL_IN_SLOPE_LIMIT 0x0004 /* Unsigned 24 bit */
+#define PULL_IN_CTRL 0x0007
+#define DPLL_PHASE_PULL_IN_1 0xc888
+#define DPLL_PHASE_PULL_IN_2 0xc890
+#define DPLL_PHASE_PULL_IN_3 0xc898
+#define DPLL_PHASE_PULL_IN_4 0xc8a0
+#define DPLL_PHASE_PULL_IN_5 0xc8a8
+#define DPLL_PHASE_PULL_IN_6 0xc8b0
+#define DPLL_PHASE_PULL_IN_7 0xc8b8
+
+#define GPIO_CFG 0xc8c0
+#define GPIO_CFG_GBL 0x0000
+#define GPIO_0 0xc8c2
+#define GPIO_DCO_INC_DEC 0x0000
+#define GPIO_OUT_CTRL_0 0x0001
+#define GPIO_OUT_CTRL_1 0x0002
+#define GPIO_TOD_TRIG 0x0003
+#define GPIO_DPLL_INDICATOR 0x0004
+#define GPIO_LOS_INDICATOR 0x0005
+#define GPIO_REF_INPUT_DSQ_0 0x0006
+#define GPIO_REF_INPUT_DSQ_1 0x0007
+#define GPIO_REF_INPUT_DSQ_2 0x0008
+#define GPIO_REF_INPUT_DSQ_3 0x0009
+#define GPIO_MAN_CLK_SEL_0 0x000a
+#define GPIO_MAN_CLK_SEL_1 0x000b
+#define GPIO_MAN_CLK_SEL_2 0x000c
+#define GPIO_SLAVE 0x000d
+#define GPIO_ALERT_OUT_CFG 0x000e
+#define GPIO_TOD_NOTIFICATION_CFG 0x000f
+#define GPIO_CTRL 0x0010
+#define GPIO_CTRL_V520 0x0011
+#define GPIO_1 0xc8d4
+#define GPIO_2 0xc8e6
+#define GPIO_3 0xc900
+#define GPIO_4 0xc912
+#define GPIO_5 0xc924
+#define GPIO_6 0xc936
+#define GPIO_7 0xc948
+#define GPIO_8 0xc95a
+#define GPIO_9 0xc980
+#define GPIO_10 0xc992
+#define GPIO_11 0xc9a4
+#define GPIO_12 0xc9b6
+#define GPIO_13 0xc9c8
+#define GPIO_14 0xc9da
+#define GPIO_15 0xca00
+
+#define OUT_DIV_MUX 0xca12
+#define OUTPUT_0 0xca14
+#define OUTPUT_0_V520 0xca20
+/* FOD frequency output divider value */
+#define OUT_DIV 0x0000
+#define OUT_DUTY_CYCLE_HIGH 0x0004
+#define OUT_CTRL_0 0x0008
+#define OUT_CTRL_1 0x0009
+/* Phase adjustment in FOD cycles */
+#define OUT_PHASE_ADJ 0x000c
+#define OUTPUT_1 0xca24
+#define OUTPUT_1_V520 0xca30
+#define OUTPUT_2 0xca34
+#define OUTPUT_2_V520 0xca40
+#define OUTPUT_3 0xca44
+#define OUTPUT_3_V520 0xca50
+#define OUTPUT_4 0xca54
+#define OUTPUT_4_V520 0xca60
+#define OUTPUT_5 0xca64
+#define OUTPUT_5_V520 0xca80
+#define OUTPUT_6 0xca80
+#define OUTPUT_6_V520 0xca90
+#define OUTPUT_7 0xca90
+#define OUTPUT_7_V520 0xcaa0
+#define OUTPUT_8 0xcaa0
+#define OUTPUT_8_V520 0xcab0
+#define OUTPUT_9 0xcab0
+#define OUTPUT_9_V520 0xcac0
+#define OUTPUT_10 0xcac0
+#define OUTPUT_10_V520 0xcad0
+#define OUTPUT_11 0xcad0
+#define OUTPUT_11_V520 0xcae0
+
+#define SERIAL 0xcae0
+#define SERIAL_V520 0xcaf0
+
+#define PWM_ENCODER_0 0xcb00
+#define PWM_ENCODER_1 0xcb08
+#define PWM_ENCODER_2 0xcb10
+#define PWM_ENCODER_3 0xcb18
+#define PWM_ENCODER_4 0xcb20
+#define PWM_ENCODER_5 0xcb28
+#define PWM_ENCODER_6 0xcb30
+#define PWM_ENCODER_7 0xcb38
+#define PWM_DECODER_0 0xcb40
+#define PWM_DECODER_1 0xcb48
+#define PWM_DECODER_1_V520 0xcb4a
+#define PWM_DECODER_2 0xcb50
+#define PWM_DECODER_2_V520 0xcb54
+#define PWM_DECODER_3 0xcb58
+#define PWM_DECODER_3_V520 0xcb5e
+#define PWM_DECODER_4 0xcb60
+#define PWM_DECODER_4_V520 0xcb68
+#define PWM_DECODER_5 0xcb68
+#define PWM_DECODER_5_V520 0xcb80
+#define PWM_DECODER_6 0xcb70
+#define PWM_DECODER_6_V520 0xcb8a
+#define PWM_DECODER_7 0xcb80
+#define PWM_DECODER_7_V520 0xcb94
+#define PWM_DECODER_8 0xcb88
+#define PWM_DECODER_8_V520 0xcb9e
+#define PWM_DECODER_9 0xcb90
+#define PWM_DECODER_9_V520 0xcba8
+#define PWM_DECODER_10 0xcb98
+#define PWM_DECODER_10_V520 0xcbb2
+#define PWM_DECODER_11 0xcba0
+#define PWM_DECODER_11_V520 0xcbbc
+#define PWM_DECODER_12 0xcba8
+#define PWM_DECODER_12_V520 0xcbc6
+#define PWM_DECODER_13 0xcbb0
+#define PWM_DECODER_13_V520 0xcbd0
+#define PWM_DECODER_14 0xcbb8
+#define PWM_DECODER_14_V520 0xcbda
+#define PWM_DECODER_15 0xcbc0
+#define PWM_DECODER_15_V520 0xcbe4
+#define PWM_USER_DATA 0xcbc8
+#define PWM_USER_DATA_V520 0xcbf0
+
+#define TOD_0 0xcbcc
+#define TOD_0_V520 0xcc00
+/* Enable TOD counter, output channel sync and even-PPS mode */
+#define TOD_CFG 0x0000
+#define TOD_CFG_V520 0x0001
+#define TOD_1 0xcbce
+#define TOD_1_V520 0xcc02
+#define TOD_2 0xcbd0
+#define TOD_2_V520 0xcc04
+#define TOD_3 0xcbd2
+#define TOD_3_V520 0xcc06
+
+#define TOD_WRITE_0 0xcc00
+#define TOD_WRITE_0_V520 0xcc10
+/* 8-bit subns, 32-bit ns, 48-bit seconds */
+#define TOD_WRITE 0x0000
+/* Counter increments after TOD write is completed */
+#define TOD_WRITE_COUNTER 0x000c
+/* TOD write trigger configuration */
+#define TOD_WRITE_SELECT_CFG_0 0x000d
+/* TOD write trigger selection */
+#define TOD_WRITE_CMD 0x000f
+#define TOD_WRITE_1 0xcc10
+#define TOD_WRITE_1_V520 0xcc20
+#define TOD_WRITE_2 0xcc20
+#define TOD_WRITE_2_V520 0xcc30
+#define TOD_WRITE_3 0xcc30
+#define TOD_WRITE_3_V520 0xcc40
+
+#define TOD_READ_PRIMARY_0 0xcc40
+#define TOD_READ_PRIMARY_0_V520 0xcc50
+/* 8-bit subns, 32-bit ns, 48-bit seconds */
+#define TOD_READ_PRIMARY_BASE 0x0000
+/* Counter increments after TOD write is completed */
+#define TOD_READ_PRIMARY_COUNTER 0x000b
+/* Read trigger configuration */
+#define TOD_READ_PRIMARY_SEL_CFG_0 0x000c
+/* Read trigger selection */
+#define TOD_READ_PRIMARY_CMD 0x000e
+#define TOD_READ_PRIMARY_CMD_V520 0x000f
+#define TOD_READ_PRIMARY_1 0xcc50
+#define TOD_READ_PRIMARY_1_V520 0xcc60
+#define TOD_READ_PRIMARY_2 0xcc60
+#define TOD_READ_PRIMARY_2_V520 0xcc80
+#define TOD_READ_PRIMARY_3 0xcc80
+#define TOD_READ_PRIMARY_3_V520 0xcc90
+
+#define TOD_READ_SECONDARY_0 0xcc90
+#define TOD_READ_SECONDARY_0_V520 0xcca0
+/* 8-bit subns, 32-bit ns, 48-bit seconds */
+#define TOD_READ_SECONDARY_BASE 0x0000
+/* Counter increments after TOD write is completed */
+#define TOD_READ_SECONDARY_COUNTER 0x000b
+/* Read trigger configuration */
+#define TOD_READ_SECONDARY_SEL_CFG_0 0x000c
+/* Read trigger selection */
+#define TOD_READ_SECONDARY_CMD 0x000e
+#define TOD_READ_SECONDARY_CMD_V520 0x000f
+
+#define TOD_READ_SECONDARY_1 0xcca0
+#define TOD_READ_SECONDARY_1_V520 0xccb0
+#define TOD_READ_SECONDARY_2 0xccb0
+#define TOD_READ_SECONDARY_2_V520 0xccc0
+#define TOD_READ_SECONDARY_3 0xccc0
+#define TOD_READ_SECONDARY_3_V520 0xccd0
+
+#define OUTPUT_TDC_CFG 0xccd0
+#define OUTPUT_TDC_CFG_V520 0xcce0
+#define OUTPUT_TDC_0 0xcd00
+#define OUTPUT_TDC_1 0xcd08
+#define OUTPUT_TDC_2 0xcd10
+#define OUTPUT_TDC_3 0xcd18
+#define INPUT_TDC 0xcd20
+
+#define SCRATCH 0xcf50
+#define SCRATCH_V520 0xcf4c
+
+#define EEPROM 0xcf68
+#define EEPROM_V520 0xcf64
+
+#define OTP 0xcf70
+
+#define BYTE 0xcf80
+
+/* Bit definitions for the MAJ_REL register */
+#define MAJOR_SHIFT (1)
+#define MAJOR_MASK (0x7f)
+#define PR_BUILD BIT(0)
+
+/* Bit definitions for the USER_GPIO0_TO_7_STATUS register */
+#define GPIO0_LEVEL BIT(0)
+#define GPIO1_LEVEL BIT(1)
+#define GPIO2_LEVEL BIT(2)
+#define GPIO3_LEVEL BIT(3)
+#define GPIO4_LEVEL BIT(4)
+#define GPIO5_LEVEL BIT(5)
+#define GPIO6_LEVEL BIT(6)
+#define GPIO7_LEVEL BIT(7)
+
+/* Bit definitions for the USER_GPIO8_TO_15_STATUS register */
+#define GPIO8_LEVEL BIT(0)
+#define GPIO9_LEVEL BIT(1)
+#define GPIO10_LEVEL BIT(2)
+#define GPIO11_LEVEL BIT(3)
+#define GPIO12_LEVEL BIT(4)
+#define GPIO13_LEVEL BIT(5)
+#define GPIO14_LEVEL BIT(6)
+#define GPIO15_LEVEL BIT(7)
+
+/* Bit definitions for the GPIO0_TO_7_OUT register */
+#define GPIO0_DRIVE_LEVEL BIT(0)
+#define GPIO1_DRIVE_LEVEL BIT(1)
+#define GPIO2_DRIVE_LEVEL BIT(2)
+#define GPIO3_DRIVE_LEVEL BIT(3)
+#define GPIO4_DRIVE_LEVEL BIT(4)
+#define GPIO5_DRIVE_LEVEL BIT(5)
+#define GPIO6_DRIVE_LEVEL BIT(6)
+#define GPIO7_DRIVE_LEVEL BIT(7)
+
+/* Bit definitions for the GPIO8_TO_15_OUT register */
+#define GPIO8_DRIVE_LEVEL BIT(0)
+#define GPIO9_DRIVE_LEVEL BIT(1)
+#define GPIO10_DRIVE_LEVEL BIT(2)
+#define GPIO11_DRIVE_LEVEL BIT(3)
+#define GPIO12_DRIVE_LEVEL BIT(4)
+#define GPIO13_DRIVE_LEVEL BIT(5)
+#define GPIO14_DRIVE_LEVEL BIT(6)
+#define GPIO15_DRIVE_LEVEL BIT(7)
+
+/* Bit definitions for the DPLL_TOD_SYNC_CFG register */
+#define TOD_SYNC_SOURCE_SHIFT (1)
+#define TOD_SYNC_SOURCE_MASK (0x3)
+#define TOD_SYNC_EN BIT(0)
+
+/* Bit definitions for the DPLL_MODE register */
+#define WRITE_TIMER_MODE BIT(6)
+#define PLL_MODE_SHIFT (3)
+#define PLL_MODE_MASK (0x7)
+#define STATE_MODE_SHIFT (0)
+#define STATE_MODE_MASK (0x7)
+
+/* Bit definitions for the DPLL_MANU_REF_CFG register */
+#define MANUAL_REFERENCE_SHIFT (0)
+#define MANUAL_REFERENCE_MASK (0x1f)
+
+/* Bit definitions for the GPIO_CFG_GBL register */
+#define SUPPLY_MODE_SHIFT (0)
+#define SUPPLY_MODE_MASK (0x3)
+
+/* Bit definitions for the GPIO_DCO_INC_DEC register */
+#define INCDEC_DPLL_INDEX_SHIFT (0)
+#define INCDEC_DPLL_INDEX_MASK (0x7)
+
+/* Bit definitions for the GPIO_OUT_CTRL_0 register */
+#define CTRL_OUT_0 BIT(0)
+#define CTRL_OUT_1 BIT(1)
+#define CTRL_OUT_2 BIT(2)
+#define CTRL_OUT_3 BIT(3)
+#define CTRL_OUT_4 BIT(4)
+#define CTRL_OUT_5 BIT(5)
+#define CTRL_OUT_6 BIT(6)
+#define CTRL_OUT_7 BIT(7)
+
+/* Bit definitions for the GPIO_OUT_CTRL_1 register */
+#define CTRL_OUT_8 BIT(0)
+#define CTRL_OUT_9 BIT(1)
+#define CTRL_OUT_10 BIT(2)
+#define CTRL_OUT_11 BIT(3)
+#define CTRL_OUT_12 BIT(4)
+#define CTRL_OUT_13 BIT(5)
+#define CTRL_OUT_14 BIT(6)
+#define CTRL_OUT_15 BIT(7)
+
+/* Bit definitions for the GPIO_TOD_TRIG register */
+#define TOD_TRIG_0 BIT(0)
+#define TOD_TRIG_1 BIT(1)
+#define TOD_TRIG_2 BIT(2)
+#define TOD_TRIG_3 BIT(3)
+
+/* Bit definitions for the GPIO_DPLL_INDICATOR register */
+#define IND_DPLL_INDEX_SHIFT (0)
+#define IND_DPLL_INDEX_MASK (0x7)
+
+/* Bit definitions for the GPIO_LOS_INDICATOR register */
+#define REFMON_INDEX_SHIFT (0)
+#define REFMON_INDEX_MASK (0xf)
+/* Active level of LOS indicator, 0=low 1=high */
+#define ACTIVE_LEVEL BIT(4)
+
+/* Bit definitions for the GPIO_REF_INPUT_DSQ_0 register */
+#define DSQ_INP_0 BIT(0)
+#define DSQ_INP_1 BIT(1)
+#define DSQ_INP_2 BIT(2)
+#define DSQ_INP_3 BIT(3)
+#define DSQ_INP_4 BIT(4)
+#define DSQ_INP_5 BIT(5)
+#define DSQ_INP_6 BIT(6)
+#define DSQ_INP_7 BIT(7)
+
+/* Bit definitions for the GPIO_REF_INPUT_DSQ_1 register */
+#define DSQ_INP_8 BIT(0)
+#define DSQ_INP_9 BIT(1)
+#define DSQ_INP_10 BIT(2)
+#define DSQ_INP_11 BIT(3)
+#define DSQ_INP_12 BIT(4)
+#define DSQ_INP_13 BIT(5)
+#define DSQ_INP_14 BIT(6)
+#define DSQ_INP_15 BIT(7)
+
+/* Bit definitions for the GPIO_REF_INPUT_DSQ_2 register */
+#define DSQ_DPLL_0 BIT(0)
+#define DSQ_DPLL_1 BIT(1)
+#define DSQ_DPLL_2 BIT(2)
+#define DSQ_DPLL_3 BIT(3)
+#define DSQ_DPLL_4 BIT(4)
+#define DSQ_DPLL_5 BIT(5)
+#define DSQ_DPLL_6 BIT(6)
+#define DSQ_DPLL_7 BIT(7)
+
+/* Bit definitions for the GPIO_REF_INPUT_DSQ_3 register */
+#define DSQ_DPLL_SYS BIT(0)
+#define GPIO_DSQ_LEVEL BIT(1)
+
+/* Bit definitions for the GPIO_TOD_NOTIFICATION_CFG register */
+#define DPLL_TOD_SHIFT (0)
+#define DPLL_TOD_MASK (0x3)
+#define TOD_READ_SECONDARY BIT(2)
+#define GPIO_ASSERT_LEVEL BIT(3)
+
+/* Bit definitions for the GPIO_CTRL register */
+#define GPIO_FUNCTION_EN BIT(0)
+#define GPIO_CMOS_OD_MODE BIT(1)
+#define GPIO_CONTROL_DIR BIT(2)
+#define GPIO_PU_PD_MODE BIT(3)
+#define GPIO_FUNCTION_SHIFT (4)
+#define GPIO_FUNCTION_MASK (0xf)
+
+/* Bit definitions for the OUT_CTRL_1 register */
+#define OUT_SYNC_DISABLE BIT(7)
+#define SQUELCH_VALUE BIT(6)
+#define SQUELCH_DISABLE BIT(5)
+#define PAD_VDDO_SHIFT (2)
+#define PAD_VDDO_MASK (0x7)
+#define PAD_CMOSDRV_SHIFT (0)
+#define PAD_CMOSDRV_MASK (0x3)
+
+/* Bit definitions for the TOD_CFG register */
+#define TOD_EVEN_PPS_MODE BIT(2)
+#define TOD_OUT_SYNC_ENABLE BIT(1)
+#define TOD_ENABLE BIT(0)
+
+/* Bit definitions for the TOD_WRITE_SELECT_CFG_0 register */
+#define WR_PWM_DECODER_INDEX_SHIFT (4)
+#define WR_PWM_DECODER_INDEX_MASK (0xf)
+#define WR_REF_INDEX_SHIFT (0)
+#define WR_REF_INDEX_MASK (0xf)
+
+/* Bit definitions for the TOD_WRITE_CMD register */
+#define TOD_WRITE_SELECTION_SHIFT (0)
+#define TOD_WRITE_SELECTION_MASK (0xf)
+/* 4.8.7 */
+#define TOD_WRITE_TYPE_SHIFT (4)
+#define TOD_WRITE_TYPE_MASK (0x3)
+
+/* Bit definitions for the TOD_READ_PRIMARY_SEL_CFG_0 register */
+#define RD_PWM_DECODER_INDEX_SHIFT (4)
+#define RD_PWM_DECODER_INDEX_MASK (0xf)
+#define RD_REF_INDEX_SHIFT (0)
+#define RD_REF_INDEX_MASK (0xf)
+
+/* Bit definitions for the TOD_READ_PRIMARY_CMD register */
+#define TOD_READ_TRIGGER_MODE BIT(4)
+#define TOD_READ_TRIGGER_SHIFT (0)
+#define TOD_READ_TRIGGER_MASK (0xf)
+
+/* Bit definitions for the DPLL_CTRL_COMBO_MASTER_CFG register */
+#define COMBO_MASTER_HOLD BIT(0)
+
+/* Bit definitions for DPLL_SYS_STATUS register */
+#define DPLL_SYS_STATE_MASK (0xf)
+
+/* Bit definitions for SYS_APLL_STATUS register */
+#define SYS_APLL_LOSS_LOCK_LIVE_MASK BIT(0)
+#define SYS_APLL_LOSS_LOCK_LIVE_LOCKED 0
+#define SYS_APLL_LOSS_LOCK_LIVE_UNLOCKED 1
+
+/* Bit definitions for the DPLL0_STATUS register */
+#define DPLL_STATE_MASK (0xf)
+#define DPLL_STATE_SHIFT (0x0)
+
+/* Values of DPLL_N.DPLL_MODE.PLL_MODE */
+enum pll_mode {
+ PLL_MODE_MIN = 0,
+ PLL_MODE_PLL = PLL_MODE_MIN,
+ PLL_MODE_WRITE_PHASE = 1,
+ PLL_MODE_WRITE_FREQUENCY = 2,
+ PLL_MODE_GPIO_INC_DEC = 3,
+ PLL_MODE_SYNTHESIS = 4,
+ PLL_MODE_PHASE_MEASUREMENT = 5,
+ PLL_MODE_DISABLED = 6,
+ PLL_MODE_MAX = PLL_MODE_DISABLED,
+};
+
+/* Values of DPLL_CTRL_n.DPLL_MANU_REF_CFG.MANUAL_REFERENCE */
+enum manual_reference {
+ MANU_REF_MIN = 0,
+ MANU_REF_CLK0 = MANU_REF_MIN,
+ MANU_REF_CLK1,
+ MANU_REF_CLK2,
+ MANU_REF_CLK3,
+ MANU_REF_CLK4,
+ MANU_REF_CLK5,
+ MANU_REF_CLK6,
+ MANU_REF_CLK7,
+ MANU_REF_CLK8,
+ MANU_REF_CLK9,
+ MANU_REF_CLK10,
+ MANU_REF_CLK11,
+ MANU_REF_CLK12,
+ MANU_REF_CLK13,
+ MANU_REF_CLK14,
+ MANU_REF_CLK15,
+ MANU_REF_WRITE_PHASE,
+ MANU_REF_WRITE_FREQUENCY,
+ MANU_REF_XO_DPLL,
+ MANU_REF_MAX = MANU_REF_XO_DPLL,
+};
+
+enum hw_tod_write_trig_sel {
+ HW_TOD_WR_TRIG_SEL_MIN = 0,
+ HW_TOD_WR_TRIG_SEL_MSB = HW_TOD_WR_TRIG_SEL_MIN,
+ HW_TOD_WR_TRIG_SEL_RESERVED = 1,
+ HW_TOD_WR_TRIG_SEL_TOD_PPS = 2,
+ HW_TOD_WR_TRIG_SEL_IRIGB_PPS = 3,
+ HW_TOD_WR_TRIG_SEL_PWM_PPS = 4,
+ HW_TOD_WR_TRIG_SEL_GPIO = 5,
+ HW_TOD_WR_TRIG_SEL_FOD_SYNC = 6,
+ WR_TRIG_SEL_MAX = HW_TOD_WR_TRIG_SEL_FOD_SYNC,
+};
+
+enum scsr_read_trig_sel {
+ /* CANCEL CURRENT TOD READ; MODULE BECOMES IDLE - NO TRIGGER OCCURS */
+ SCSR_TOD_READ_TRIG_SEL_DISABLE = 0,
+ /* TRIGGER IMMEDIATELY */
+ SCSR_TOD_READ_TRIG_SEL_IMMEDIATE = 1,
+ /* TRIGGER ON RISING EDGE OF INTERNAL TOD PPS SIGNAL */
+ SCSR_TOD_READ_TRIG_SEL_TODPPS = 2,
+ /* TRGGER ON RISING EDGE OF SELECTED REFERENCE INPUT */
+ SCSR_TOD_READ_TRIG_SEL_REFCLK = 3,
+ /* TRIGGER ON RISING EDGE OF SELECTED PWM DECODER 1PPS OUTPUT */
+ SCSR_TOD_READ_TRIG_SEL_PWMPPS = 4,
+ SCSR_TOD_READ_TRIG_SEL_RESERVED = 5,
+ /* TRIGGER WHEN WRITE FREQUENCY EVENT OCCURS */
+ SCSR_TOD_READ_TRIG_SEL_WRITEFREQUENCYEVENT = 6,
+ /* TRIGGER ON SELECTED GPIO */
+ SCSR_TOD_READ_TRIG_SEL_GPIO = 7,
+ SCSR_TOD_READ_TRIG_SEL_MAX = SCSR_TOD_READ_TRIG_SEL_GPIO,
+};
+
+/* Values STATUS.DPLL_SYS_STATUS.DPLL_SYS_STATE */
+enum dpll_state {
+ DPLL_STATE_MIN = 0,
+ DPLL_STATE_FREERUN = DPLL_STATE_MIN,
+ DPLL_STATE_LOCKACQ = 1,
+ DPLL_STATE_LOCKREC = 2,
+ DPLL_STATE_LOCKED = 3,
+ DPLL_STATE_HOLDOVER = 4,
+ DPLL_STATE_OPEN_LOOP = 5,
+ DPLL_STATE_MAX = DPLL_STATE_OPEN_LOOP,
+};
+
+/* 4.8.7 only */
+enum scsr_tod_write_trig_sel {
+ SCSR_TOD_WR_TRIG_SEL_DISABLE = 0,
+ SCSR_TOD_WR_TRIG_SEL_IMMEDIATE = 1,
+ SCSR_TOD_WR_TRIG_SEL_REFCLK = 2,
+ SCSR_TOD_WR_TRIG_SEL_PWMPPS = 3,
+ SCSR_TOD_WR_TRIG_SEL_TODPPS = 4,
+ SCSR_TOD_WR_TRIG_SEL_SYNCFOD = 5,
+ SCSR_TOD_WR_TRIG_SEL_GPIO = 6,
+ SCSR_TOD_WR_TRIG_SEL_MAX = SCSR_TOD_WR_TRIG_SEL_GPIO,
+};
+
+/* 4.8.7 only */
+enum scsr_tod_write_type_sel {
+ SCSR_TOD_WR_TYPE_SEL_ABSOLUTE = 0,
+ SCSR_TOD_WR_TYPE_SEL_DELTA_PLUS = 1,
+ SCSR_TOD_WR_TYPE_SEL_DELTA_MINUS = 2,
+ SCSR_TOD_WR_TYPE_SEL_MAX = SCSR_TOD_WR_TYPE_SEL_DELTA_MINUS,
+};
+#endif
diff --git a/include/linux/mfd/idtRC38xxx_reg.h b/include/linux/mfd/idtRC38xxx_reg.h
new file mode 100644
index 000000000000..ec11872f51ad
--- /dev/null
+++ b/include/linux/mfd/idtRC38xxx_reg.h
@@ -0,0 +1,273 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Register Map - Based on PolarBear_CSRs.RevA.xlsx (2023-04-21)
+ *
+ * Copyright (C) 2023 Integrated Device Technology, Inc., a Renesas Company.
+ */
+#ifndef MFD_IDTRC38XXX_REG
+#define MFD_IDTRC38XXX_REG
+
+/* GLOBAL */
+#define SOFT_RESET_CTRL (0x15) /* Specific to FC3W */
+#define MISC_CTRL (0x14) /* Specific to FC3A */
+#define APLL_REINIT BIT(1)
+#define APLL_REINIT_VFC3A BIT(2)
+
+#define DEVICE_ID (0x2)
+#define DEVICE_ID_MASK (0x1000) /* Bit 12 is 1 if FC3W and 0 if FC3A */
+#define DEVICE_ID_SHIFT (12)
+
+/* FOD */
+#define FOD_0 (0x300)
+#define FOD_0_VFC3A (0x400)
+#define FOD_1 (0x340)
+#define FOD_1_VFC3A (0x440)
+#define FOD_2 (0x380)
+#define FOD_2_VFC3A (0x480)
+
+/* TDCAPLL */
+#define TDC_CTRL (0x44a) /* Specific to FC3W */
+#define TDC_ENABLE_CTRL (0x169) /* Specific to FC3A */
+#define TDC_DAC_CAL_CTRL (0x16a) /* Specific to FC3A */
+#define TDC_EN BIT(0)
+#define TDC_DAC_RECAL_REQ BIT(1)
+#define TDC_DAC_RECAL_REQ_VFC3A BIT(0)
+
+#define TDC_FB_DIV_INT_CNFG (0x442)
+#define TDC_FB_DIV_INT_CNFG_VFC3A (0x162)
+#define TDC_FB_DIV_INT_MASK GENMASK(7, 0)
+#define TDC_REF_DIV_CNFG (0x443)
+#define TDC_REF_DIV_CNFG_VFC3A (0x163)
+#define TDC_REF_DIV_CONFIG_MASK GENMASK(2, 0)
+
+/* TIME SYNC CHANNEL */
+#define TIME_CLOCK_SRC (0xa01) /* Specific to FC3W */
+#define TIME_CLOCK_COUNT (0xa00) /* Specific to FC3W */
+#define TIME_CLOCK_COUNT_MASK GENMASK(5, 0)
+
+#define SUB_SYNC_GEN_CNFG (0xa04)
+
+#define TOD_COUNTER_READ_REQ (0xa5f)
+#define TOD_COUNTER_READ_REQ_VFC3A (0x6df)
+#define TOD_SYNC_LOAD_VAL_CTRL (0xa10)
+#define TOD_SYNC_LOAD_VAL_CTRL_VFC3A (0x690)
+#define SYNC_COUNTER_MASK GENMASK_ULL(51, 0)
+#define SUB_SYNC_COUNTER_MASK GENMASK(30, 0)
+#define TOD_SYNC_LOAD_REQ_CTRL (0xa21)
+#define TOD_SYNC_LOAD_REQ_CTRL_VFC3A (0x6a1)
+#define SYNC_LOAD_ENABLE BIT(1)
+#define SUB_SYNC_LOAD_ENABLE BIT(0)
+#define SYNC_LOAD_REQ BIT(0)
+
+#define LPF_MODE_CNFG (0xa80)
+#define LPF_MODE_CNFG_VFC3A (0x700)
+enum lpf_mode {
+ LPF_DISABLED = 0,
+ LPF_WP = 1,
+ LPF_HOLDOVER = 2,
+ LPF_WF = 3,
+ LPF_INVALID = 4
+};
+#define LPF_CTRL (0xa98)
+#define LPF_CTRL_VFC3A (0x718)
+#define LPF_EN BIT(0)
+
+#define LPF_BW_CNFG (0xa81)
+#define LPF_BW_SHIFT GENMASK(7, 3)
+#define LPF_BW_MULT GENMASK(2, 0)
+#define LPF_BW_SHIFT_DEFAULT (0xb)
+#define LPF_BW_MULT_DEFAULT (0x0)
+#define LPF_BW_SHIFT_1PPS (0x5)
+
+#define LPF_WR_PHASE_CTRL (0xaa8)
+#define LPF_WR_PHASE_CTRL_VFC3A (0x728)
+#define LPF_WR_FREQ_CTRL (0xab0)
+#define LPF_WR_FREQ_CTRL_VFC3A (0x730)
+
+#define TIME_CLOCK_TDC_FANOUT_CNFG (0xB00)
+#define TIME_SYNC_TO_TDC_EN BIT(0)
+#define SIG1_MUX_SEL_MASK GENMASK(7, 4)
+#define SIG2_MUX_SEL_MASK GENMASK(11, 8)
+enum tdc_mux_sel {
+ REF0 = 0,
+ REF1 = 1,
+ REF2 = 2,
+ REF3 = 3,
+ REF_CLK5 = 4,
+ REF_CLK6 = 5,
+ DPLL_FB_TO_TDC = 6,
+ DPLL_FB_DIVIDED_TO_TDC = 7,
+ TIME_CLK_DIVIDED = 8,
+ TIME_SYNC = 9,
+};
+
+#define TIME_CLOCK_MEAS_CNFG (0xB04)
+#define TDC_MEAS_MODE BIT(0)
+enum tdc_meas_mode {
+ CONTINUOUS = 0,
+ ONE_SHOT = 1,
+ MEAS_MODE_INVALID = 2,
+};
+
+#define TIME_CLOCK_MEAS_DIV_CNFG (0xB08)
+#define TIME_REF_DIV_MASK GENMASK(29, 24)
+
+#define TIME_CLOCK_MEAS_CTRL (0xB10)
+#define TDC_MEAS_EN BIT(0)
+#define TDC_MEAS_START BIT(1)
+
+#define TDC_FIFO_READ_REQ (0xB2F)
+#define TDC_FIFO_READ (0xB30)
+#define COARSE_MEAS_MASK GENMASK_ULL(39, 13)
+#define FINE_MEAS_MASK GENMASK(12, 0)
+
+#define TDC_FIFO_CTRL (0xB12)
+#define FIFO_CLEAR BIT(0)
+#define TDC_FIFO_STS (0xB38)
+#define FIFO_FULL BIT(1)
+#define FIFO_EMPTY BIT(0)
+#define TDC_FIFO_EVENT (0xB39)
+#define FIFO_OVERRUN BIT(1)
+
+/* DPLL */
+#define MAX_REFERENCE_INDEX (3)
+#define MAX_NUM_REF_PRIORITY (4)
+
+#define MAX_DPLL_INDEX (2)
+
+#define DPLL_STS (0x580)
+#define DPLL_STS_VFC3A (0x571)
+#define DPLL_STATE_STS_MASK (0x70)
+#define DPLL_STATE_STS_SHIFT (4)
+#define DPLL_REF_SEL_STS_MASK (0x6)
+#define DPLL_REF_SEL_STS_SHIFT (1)
+
+#define DPLL_REF_PRIORITY_CNFG (0x502)
+#define DPLL_REFX_PRIORITY_DISABLE_MASK (0xf)
+#define DPLL_REF0_PRIORITY_ENABLE_AND_SET_MASK (0x31)
+#define DPLL_REF1_PRIORITY_ENABLE_AND_SET_MASK (0xc2)
+#define DPLL_REF2_PRIORITY_ENABLE_AND_SET_MASK (0x304)
+#define DPLL_REF3_PRIORITY_ENABLE_AND_SET_MASK (0xc08)
+#define DPLL_REF0_PRIORITY_SHIFT (4)
+#define DPLL_REF1_PRIORITY_SHIFT (6)
+#define DPLL_REF2_PRIORITY_SHIFT (8)
+#define DPLL_REF3_PRIORITY_SHIFT (10)
+
+enum dpll_state {
+ DPLL_STATE_MIN = 0,
+ DPLL_STATE_FREERUN = DPLL_STATE_MIN,
+ DPLL_STATE_LOCKED = 1,
+ DPLL_STATE_HOLDOVER = 2,
+ DPLL_STATE_WRITE_FREQUENCY = 3,
+ DPLL_STATE_ACQUIRE = 4,
+ DPLL_STATE_HITLESS_SWITCH = 5,
+ DPLL_STATE_MAX = DPLL_STATE_HITLESS_SWITCH
+};
+
+/* REFMON */
+#define LOSMON_STS_0 (0x81e)
+#define LOSMON_STS_0_VFC3A (0x18e)
+#define LOSMON_STS_1 (0x82e)
+#define LOSMON_STS_1_VFC3A (0x19e)
+#define LOSMON_STS_2 (0x83e)
+#define LOSMON_STS_2_VFC3A (0x1ae)
+#define LOSMON_STS_3 (0x84e)
+#define LOSMON_STS_3_VFC3A (0x1be)
+#define LOS_STS_MASK (0x1)
+
+#define FREQMON_STS_0 (0x874)
+#define FREQMON_STS_0_VFC3A (0x1d4)
+#define FREQMON_STS_1 (0x894)
+#define FREQMON_STS_1_VFC3A (0x1f4)
+#define FREQMON_STS_2 (0x8b4)
+#define FREQMON_STS_2_VFC3A (0x214)
+#define FREQMON_STS_3 (0x8d4)
+#define FREQMON_STS_3_VFC3A (0x234)
+#define FREQ_FAIL_STS_SHIFT (31)
+
+/* Firmware interface */
+#define TIME_CLK_FREQ_ADDR (0xffa0)
+#define XTAL_FREQ_ADDR (0xffa1)
+
+/*
+ * Return register address and field mask based on passed in firmware version
+ */
+#define IDTFC3_FW_REG(FW, VER, REG) (((FW) < (VER)) ? (REG) : (REG##_##VER))
+#define IDTFC3_FW_FIELD(FW, VER, FIELD) (((FW) < (VER)) ? (FIELD) : (FIELD##_##VER))
+enum fw_version {
+ V_DEFAULT = 0,
+ VFC3W = 1,
+ VFC3A = 2
+};
+
+/* XTAL_FREQ_ADDR/TIME_CLK_FREQ_ADDR */
+enum {
+ FREQ_MIN = 0,
+ FREQ_25M = 1,
+ FREQ_49_152M = 2,
+ FREQ_50M = 3,
+ FREQ_100M = 4,
+ FREQ_125M = 5,
+ FREQ_250M = 6,
+ FREQ_MAX
+};
+
+struct idtfc3_hw_param {
+ u32 xtal_freq;
+ u32 time_clk_freq;
+};
+
+struct idtfc3_fwrc {
+ u8 hiaddr;
+ u8 loaddr;
+ u8 value;
+ u8 reserved;
+} __packed;
+
+static inline void idtfc3_default_hw_param(struct idtfc3_hw_param *hw_param)
+{
+ hw_param->xtal_freq = 49152000;
+ hw_param->time_clk_freq = 25000000;
+}
+
+static inline int idtfc3_set_hw_param(struct idtfc3_hw_param *hw_param,
+ u16 addr, u8 val)
+{
+ if (addr == XTAL_FREQ_ADDR)
+ switch (val) {
+ case FREQ_49_152M:
+ hw_param->xtal_freq = 49152000;
+ break;
+ case FREQ_50M:
+ hw_param->xtal_freq = 50000000;
+ break;
+ default:
+ return -EINVAL;
+ }
+ else if (addr == TIME_CLK_FREQ_ADDR)
+ switch (val) {
+ case FREQ_25M:
+ hw_param->time_clk_freq = 25000000;
+ break;
+ case FREQ_50M:
+ hw_param->time_clk_freq = 50000000;
+ break;
+ case FREQ_100M:
+ hw_param->time_clk_freq = 100000000;
+ break;
+ case FREQ_125M:
+ hw_param->time_clk_freq = 125000000;
+ break;
+ case FREQ_250M:
+ hw_param->time_clk_freq = 250000000;
+ break;
+ default:
+ return -EINVAL;
+ }
+ else
+ return -EFAULT;
+
+ return 0;
+}
+
+#endif
diff --git a/include/linux/mfd/imx25-tsadc.h b/include/linux/mfd/imx25-tsadc.h
index 7fe4b8c3baac..21f8adfefd1d 100644
--- a/include/linux/mfd/imx25-tsadc.h
+++ b/include/linux/mfd/imx25-tsadc.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_INCLUDE_MFD_IMX25_TSADC_H_
#define _LINUX_INCLUDE_MFD_IMX25_TSADC_H_
diff --git a/include/linux/mfd/ingenic-tcu.h b/include/linux/mfd/ingenic-tcu.h
new file mode 100644
index 000000000000..2083fa20821d
--- /dev/null
+++ b/include/linux/mfd/ingenic-tcu.h
@@ -0,0 +1,56 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Header file for the Ingenic JZ47xx TCU driver
+ */
+#ifndef __LINUX_MFD_INGENIC_TCU_H_
+#define __LINUX_MFD_INGENIC_TCU_H_
+
+#include <linux/bitops.h>
+
+#define TCU_REG_WDT_TDR 0x00
+#define TCU_REG_WDT_TCER 0x04
+#define TCU_REG_WDT_TCNT 0x08
+#define TCU_REG_WDT_TCSR 0x0c
+#define TCU_REG_TER 0x10
+#define TCU_REG_TESR 0x14
+#define TCU_REG_TECR 0x18
+#define TCU_REG_TSR 0x1c
+#define TCU_REG_TFR 0x20
+#define TCU_REG_TFSR 0x24
+#define TCU_REG_TFCR 0x28
+#define TCU_REG_TSSR 0x2c
+#define TCU_REG_TMR 0x30
+#define TCU_REG_TMSR 0x34
+#define TCU_REG_TMCR 0x38
+#define TCU_REG_TSCR 0x3c
+#define TCU_REG_TDFR0 0x40
+#define TCU_REG_TDHR0 0x44
+#define TCU_REG_TCNT0 0x48
+#define TCU_REG_TCSR0 0x4c
+#define TCU_REG_OST_DR 0xe0
+#define TCU_REG_OST_CNTL 0xe4
+#define TCU_REG_OST_CNTH 0xe8
+#define TCU_REG_OST_TCSR 0xec
+#define TCU_REG_TSTR 0xf0
+#define TCU_REG_TSTSR 0xf4
+#define TCU_REG_TSTCR 0xf8
+#define TCU_REG_OST_CNTHBUF 0xfc
+
+#define TCU_TCSR_RESERVED_BITS 0x3f
+#define TCU_TCSR_PARENT_CLOCK_MASK 0x07
+#define TCU_TCSR_PRESCALE_LSB 3
+#define TCU_TCSR_PRESCALE_MASK 0x38
+
+#define TCU_TCSR_PWM_SD BIT(9) /* 0: Shutdown gracefully 1: abruptly */
+#define TCU_TCSR_PWM_INITL_HIGH BIT(8) /* Sets the initial output level */
+#define TCU_TCSR_PWM_EN BIT(7) /* PWM pin output enable */
+
+#define TCU_WDT_TCER_TCEN BIT(0) /* Watchdog timer enable */
+
+#define TCU_CHANNEL_STRIDE 0x10
+#define TCU_REG_TDFRc(c) (TCU_REG_TDFR0 + ((c) * TCU_CHANNEL_STRIDE))
+#define TCU_REG_TDHRc(c) (TCU_REG_TDHR0 + ((c) * TCU_CHANNEL_STRIDE))
+#define TCU_REG_TCNTc(c) (TCU_REG_TCNT0 + ((c) * TCU_CHANNEL_STRIDE))
+#define TCU_REG_TCSRc(c) (TCU_REG_TCSR0 + ((c) * TCU_CHANNEL_STRIDE))
+
+#endif /* __LINUX_MFD_INGENIC_TCU_H_ */
diff --git a/include/linux/mfd/intel-m10-bmc.h b/include/linux/mfd/intel-m10-bmc.h
new file mode 100644
index 000000000000..988f1cd90032
--- /dev/null
+++ b/include/linux/mfd/intel-m10-bmc.h
@@ -0,0 +1,309 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Intel MAX 10 Board Management Controller chip.
+ *
+ * Copyright (C) 2018-2020 Intel Corporation, Inc.
+ */
+#ifndef __MFD_INTEL_M10_BMC_H
+#define __MFD_INTEL_M10_BMC_H
+
+#include <linux/bitfield.h>
+#include <linux/bits.h>
+#include <linux/dev_printk.h>
+#include <linux/regmap.h>
+#include <linux/rwsem.h>
+
+#define M10BMC_N3000_LEGACY_BUILD_VER 0x300468
+#define M10BMC_N3000_SYS_BASE 0x300800
+#define M10BMC_N3000_SYS_END 0x300fff
+#define M10BMC_N3000_FLASH_BASE 0x10000000
+#define M10BMC_N3000_FLASH_END 0x1fffffff
+#define M10BMC_N3000_MEM_END M10BMC_N3000_FLASH_END
+
+#define M10BMC_STAGING_BASE 0x18000000
+#define M10BMC_STAGING_SIZE 0x3800000
+
+/* Register offset of system registers */
+#define NIOS2_N3000_FW_VERSION 0x0
+#define M10BMC_N3000_MAC_LOW 0x10
+#define M10BMC_N3000_MAC_BYTE4 GENMASK(7, 0)
+#define M10BMC_N3000_MAC_BYTE3 GENMASK(15, 8)
+#define M10BMC_N3000_MAC_BYTE2 GENMASK(23, 16)
+#define M10BMC_N3000_MAC_BYTE1 GENMASK(31, 24)
+#define M10BMC_N3000_MAC_HIGH 0x14
+#define M10BMC_N3000_MAC_BYTE6 GENMASK(7, 0)
+#define M10BMC_N3000_MAC_BYTE5 GENMASK(15, 8)
+#define M10BMC_N3000_MAC_COUNT GENMASK(23, 16)
+#define M10BMC_N3000_TEST_REG 0x3c
+#define M10BMC_N3000_BUILD_VER 0x68
+#define M10BMC_N3000_VER_MAJOR_MSK GENMASK(23, 16)
+#define M10BMC_N3000_VER_PCB_INFO_MSK GENMASK(31, 24)
+#define M10BMC_N3000_VER_LEGACY_INVALID 0xffffffff
+
+/* Telemetry registers */
+#define M10BMC_N3000_TELEM_START 0x100
+#define M10BMC_N3000_TELEM_END 0x250
+#define M10BMC_D5005_TELEM_END 0x300
+
+/* Secure update doorbell register, in system register region */
+#define M10BMC_N3000_DOORBELL 0x400
+
+/* Authorization Result register, in system register region */
+#define M10BMC_N3000_AUTH_RESULT 0x404
+
+/* Doorbell register fields */
+#define DRBL_RSU_REQUEST BIT(0)
+#define DRBL_RSU_PROGRESS GENMASK(7, 4)
+#define DRBL_HOST_STATUS GENMASK(11, 8)
+#define DRBL_RSU_STATUS GENMASK(23, 16)
+#define DRBL_PKVL_EEPROM_LOAD_SEC BIT(24)
+#define DRBL_PKVL1_POLL_EN BIT(25)
+#define DRBL_PKVL2_POLL_EN BIT(26)
+#define DRBL_CONFIG_SEL BIT(28)
+#define DRBL_REBOOT_REQ BIT(29)
+#define DRBL_REBOOT_DISABLED BIT(30)
+
+/* Progress states */
+#define RSU_PROG_IDLE 0x0
+#define RSU_PROG_PREPARE 0x1
+#define RSU_PROG_READY 0x3
+#define RSU_PROG_AUTHENTICATING 0x4
+#define RSU_PROG_COPYING 0x5
+#define RSU_PROG_UPDATE_CANCEL 0x6
+#define RSU_PROG_PROGRAM_KEY_HASH 0x7
+#define RSU_PROG_RSU_DONE 0x8
+#define RSU_PROG_PKVL_PROM_DONE 0x9
+
+/* Device and error states */
+#define RSU_STAT_NORMAL 0x0
+#define RSU_STAT_TIMEOUT 0x1
+#define RSU_STAT_AUTH_FAIL 0x2
+#define RSU_STAT_COPY_FAIL 0x3
+#define RSU_STAT_FATAL 0x4
+#define RSU_STAT_PKVL_REJECT 0x5
+#define RSU_STAT_NON_INC 0x6
+#define RSU_STAT_ERASE_FAIL 0x7
+#define RSU_STAT_WEAROUT 0x8
+#define RSU_STAT_NIOS_OK 0x80
+#define RSU_STAT_USER_OK 0x81
+#define RSU_STAT_FACTORY_OK 0x82
+#define RSU_STAT_USER_FAIL 0x83
+#define RSU_STAT_FACTORY_FAIL 0x84
+#define RSU_STAT_NIOS_FLASH_ERR 0x85
+#define RSU_STAT_FPGA_FLASH_ERR 0x86
+
+#define HOST_STATUS_IDLE 0x0
+#define HOST_STATUS_WRITE_DONE 0x1
+#define HOST_STATUS_ABORT_RSU 0x2
+
+#define rsu_prog(doorbell) FIELD_GET(DRBL_RSU_PROGRESS, doorbell)
+
+/* interval 100ms and timeout 5s */
+#define NIOS_HANDSHAKE_INTERVAL_US (100 * 1000)
+#define NIOS_HANDSHAKE_TIMEOUT_US (5 * 1000 * 1000)
+
+/* RSU PREP Timeout (2 minutes) to erase flash staging area */
+#define RSU_PREP_INTERVAL_MS 100
+#define RSU_PREP_TIMEOUT_MS (2 * 60 * 1000)
+
+/* RSU Complete Timeout (40 minutes) for full flash update */
+#define RSU_COMPLETE_INTERVAL_MS 1000
+#define RSU_COMPLETE_TIMEOUT_MS (40 * 60 * 1000)
+
+/* Addresses for security related data in FLASH */
+#define M10BMC_N3000_BMC_REH_ADDR 0x17ffc004
+#define M10BMC_N3000_BMC_PROG_ADDR 0x17ffc000
+#define M10BMC_N3000_BMC_PROG_MAGIC 0x5746
+
+#define M10BMC_N3000_SR_REH_ADDR 0x17ffd004
+#define M10BMC_N3000_SR_PROG_ADDR 0x17ffd000
+#define M10BMC_N3000_SR_PROG_MAGIC 0x5253
+
+#define M10BMC_N3000_PR_REH_ADDR 0x17ffe004
+#define M10BMC_N3000_PR_PROG_ADDR 0x17ffe000
+#define M10BMC_N3000_PR_PROG_MAGIC 0x5250
+
+/* Address of 4KB inverted bit vector containing staging area FLASH count */
+#define M10BMC_N3000_STAGING_FLASH_COUNT 0x17ffb000
+
+#define M10BMC_N6000_INDIRECT_BASE 0x400
+
+#define M10BMC_N6000_SYS_BASE 0x0
+#define M10BMC_N6000_SYS_END 0xfff
+
+#define M10BMC_N6000_DOORBELL 0x1c0
+#define M10BMC_N6000_AUTH_RESULT 0x1c4
+#define AUTH_RESULT_RSU_STATUS GENMASK(23, 16)
+
+#define M10BMC_N6000_BUILD_VER 0x0
+#define NIOS2_N6000_FW_VERSION 0x4
+#define M10BMC_N6000_MAC_LOW 0x20
+#define M10BMC_N6000_MAC_HIGH (M10BMC_N6000_MAC_LOW + 4)
+
+/* Addresses for security related data in FLASH */
+#define M10BMC_N6000_BMC_REH_ADDR 0x7ffc004
+#define M10BMC_N6000_BMC_PROG_ADDR 0x7ffc000
+#define M10BMC_N6000_BMC_PROG_MAGIC 0x5746
+
+#define M10BMC_N6000_SR_REH_ADDR 0x7ffd004
+#define M10BMC_N6000_SR_PROG_ADDR 0x7ffd000
+#define M10BMC_N6000_SR_PROG_MAGIC 0x5253
+
+#define M10BMC_N6000_PR_REH_ADDR 0x7ffe004
+#define M10BMC_N6000_PR_PROG_ADDR 0x7ffe000
+#define M10BMC_N6000_PR_PROG_MAGIC 0x5250
+
+#define M10BMC_N6000_STAGING_FLASH_COUNT 0x7ff5000
+
+#define M10BMC_N6000_FLASH_MUX_CTRL 0x1d0
+#define M10BMC_N6000_FLASH_MUX_SELECTION GENMASK(2, 0)
+#define M10BMC_N6000_FLASH_MUX_IDLE 0
+#define M10BMC_N6000_FLASH_MUX_NIOS 1
+#define M10BMC_N6000_FLASH_MUX_HOST 2
+#define M10BMC_N6000_FLASH_MUX_PFL 4
+#define get_flash_mux(mux) FIELD_GET(M10BMC_N6000_FLASH_MUX_SELECTION, mux)
+
+#define M10BMC_N6000_FLASH_NIOS_REQUEST BIT(4)
+#define M10BMC_N6000_FLASH_HOST_REQUEST BIT(5)
+
+#define M10BMC_N6000_FLASH_CTRL 0x40
+#define M10BMC_N6000_FLASH_WR_MODE BIT(0)
+#define M10BMC_N6000_FLASH_RD_MODE BIT(1)
+#define M10BMC_N6000_FLASH_BUSY BIT(2)
+#define M10BMC_N6000_FLASH_FIFO_SPACE GENMASK(13, 4)
+#define M10BMC_N6000_FLASH_READ_COUNT GENMASK(25, 16)
+
+#define M10BMC_N6000_FLASH_ADDR 0x44
+#define M10BMC_N6000_FLASH_FIFO 0x800
+#define M10BMC_N6000_READ_BLOCK_SIZE 0x800
+#define M10BMC_N6000_FIFO_MAX_BYTES 0x800
+#define M10BMC_N6000_FIFO_WORD_SIZE 4
+#define M10BMC_N6000_FIFO_MAX_WORDS (M10BMC_N6000_FIFO_MAX_BYTES / \
+ M10BMC_N6000_FIFO_WORD_SIZE)
+
+#define M10BMC_FLASH_INT_US 1
+#define M10BMC_FLASH_TIMEOUT_US 10000
+
+/**
+ * struct m10bmc_csr_map - Intel MAX 10 BMC CSR register map
+ */
+struct m10bmc_csr_map {
+ unsigned int base;
+ unsigned int build_version;
+ unsigned int fw_version;
+ unsigned int mac_low;
+ unsigned int mac_high;
+ unsigned int doorbell;
+ unsigned int auth_result;
+ unsigned int bmc_prog_addr;
+ unsigned int bmc_reh_addr;
+ unsigned int bmc_magic;
+ unsigned int sr_prog_addr;
+ unsigned int sr_reh_addr;
+ unsigned int sr_magic;
+ unsigned int pr_prog_addr;
+ unsigned int pr_reh_addr;
+ unsigned int pr_magic;
+ unsigned int rsu_update_counter;
+ unsigned int staging_size;
+};
+
+/**
+ * struct intel_m10bmc_platform_info - Intel MAX 10 BMC platform specific information
+ * @cells: MFD cells
+ * @n_cells: MFD cells ARRAY_SIZE()
+ * @handshake_sys_reg_ranges: array of register ranges for fw handshake regs
+ * @handshake_sys_reg_nranges: number of register ranges for fw handshake regs
+ * @csr_map: the mappings for register definition of MAX10 BMC
+ */
+struct intel_m10bmc_platform_info {
+ struct mfd_cell *cells;
+ int n_cells;
+ const struct regmap_range *handshake_sys_reg_ranges;
+ unsigned int handshake_sys_reg_nranges;
+ const struct m10bmc_csr_map *csr_map;
+};
+
+struct intel_m10bmc;
+
+/**
+ * struct intel_m10bmc_flash_bulk_ops - device specific operations for flash R/W
+ * @read: read a block of data from flash
+ * @write: write a block of data to flash
+ * @lock_write: locks flash access for erase+write
+ * @unlock_write: unlock flash access
+ *
+ * Write must be protected with @lock_write and @unlock_write. While the flash
+ * is locked, @read returns -EBUSY.
+ */
+struct intel_m10bmc_flash_bulk_ops {
+ int (*read)(struct intel_m10bmc *m10bmc, u8 *buf, u32 addr, u32 size);
+ int (*write)(struct intel_m10bmc *m10bmc, const u8 *buf, u32 offset, u32 size);
+ int (*lock_write)(struct intel_m10bmc *m10bmc);
+ void (*unlock_write)(struct intel_m10bmc *m10bmc);
+};
+
+enum m10bmc_fw_state {
+ M10BMC_FW_STATE_NORMAL,
+ M10BMC_FW_STATE_SEC_UPDATE_PREPARE,
+ M10BMC_FW_STATE_SEC_UPDATE_WRITE,
+ M10BMC_FW_STATE_SEC_UPDATE_PROGRAM,
+};
+
+/**
+ * struct intel_m10bmc - Intel MAX 10 BMC parent driver data structure
+ * @dev: this device
+ * @regmap: the regmap used to access registers by m10bmc itself
+ * @info: the platform information for MAX10 BMC
+ * @flash_bulk_ops: optional device specific operations for flash R/W
+ * @bmcfw_lock: read/write semaphore to BMC firmware running state
+ * @bmcfw_state: BMC firmware running state. Available only when
+ * handshake_sys_reg_nranges > 0.
+ */
+struct intel_m10bmc {
+ struct device *dev;
+ struct regmap *regmap;
+ const struct intel_m10bmc_platform_info *info;
+ const struct intel_m10bmc_flash_bulk_ops *flash_bulk_ops;
+ struct rw_semaphore bmcfw_lock; /* Protects bmcfw_state */
+ enum m10bmc_fw_state bmcfw_state;
+};
+
+/*
+ * register access helper functions.
+ *
+ * m10bmc_raw_read - read m10bmc register per addr
+ * m10bmc_sys_read - read m10bmc system register per offset
+ * m10bmc_sys_update_bits - update m10bmc system register per offset
+ */
+static inline int
+m10bmc_raw_read(struct intel_m10bmc *m10bmc, unsigned int addr,
+ unsigned int *val)
+{
+ int ret;
+
+ ret = regmap_read(m10bmc->regmap, addr, val);
+ if (ret)
+ dev_err(m10bmc->dev, "fail to read raw reg %x: %d\n",
+ addr, ret);
+
+ return ret;
+}
+
+int m10bmc_sys_read(struct intel_m10bmc *m10bmc, unsigned int offset, unsigned int *val);
+int m10bmc_sys_update_bits(struct intel_m10bmc *m10bmc, unsigned int offset,
+ unsigned int msk, unsigned int val);
+
+/*
+ * Track the state of the firmware, as it is not available for register
+ * handshakes during secure updates on some MAX 10 cards.
+ */
+void m10bmc_fw_state_set(struct intel_m10bmc *m10bmc, enum m10bmc_fw_state new_state);
+
+/*
+ * MAX10 BMC Core support
+ */
+int m10bmc_dev_init(struct intel_m10bmc *m10bmc, const struct intel_m10bmc_platform_info *info);
+extern const struct attribute_group *m10bmc_dev_groups[];
+
+#endif /* __MFD_INTEL_M10_BMC_H */
diff --git a/include/linux/mfd/intel_msic.h b/include/linux/mfd/intel_msic.h
deleted file mode 100644
index 439a7a617bc9..000000000000
--- a/include/linux/mfd/intel_msic.h
+++ /dev/null
@@ -1,456 +0,0 @@
-/*
- * include/linux/mfd/intel_msic.h - Core interface for Intel MSIC
- *
- * Copyright (C) 2011, Intel Corporation
- * Author: Mika Westerberg <mika.westerberg@linux.intel.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#ifndef __LINUX_MFD_INTEL_MSIC_H__
-#define __LINUX_MFD_INTEL_MSIC_H__
-
-/* ID */
-#define INTEL_MSIC_ID0 0x000 /* RO */
-#define INTEL_MSIC_ID1 0x001 /* RO */
-
-/* IRQ */
-#define INTEL_MSIC_IRQLVL1 0x002
-#define INTEL_MSIC_ADC1INT 0x003
-#define INTEL_MSIC_CCINT 0x004
-#define INTEL_MSIC_PWRSRCINT 0x005
-#define INTEL_MSIC_PWRSRCINT1 0x006
-#define INTEL_MSIC_CHRINT 0x007
-#define INTEL_MSIC_CHRINT1 0x008
-#define INTEL_MSIC_RTCIRQ 0x009
-#define INTEL_MSIC_GPIO0LVIRQ 0x00a
-#define INTEL_MSIC_GPIO1LVIRQ 0x00b
-#define INTEL_MSIC_GPIOHVIRQ 0x00c
-#define INTEL_MSIC_VRINT 0x00d
-#define INTEL_MSIC_OCAUDIO 0x00e
-#define INTEL_MSIC_ACCDET 0x00f
-#define INTEL_MSIC_RESETIRQ1 0x010
-#define INTEL_MSIC_RESETIRQ2 0x011
-#define INTEL_MSIC_MADC1INT 0x012
-#define INTEL_MSIC_MCCINT 0x013
-#define INTEL_MSIC_MPWRSRCINT 0x014
-#define INTEL_MSIC_MPWRSRCINT1 0x015
-#define INTEL_MSIC_MCHRINT 0x016
-#define INTEL_MSIC_MCHRINT1 0x017
-#define INTEL_MSIC_RTCIRQMASK 0x018
-#define INTEL_MSIC_GPIO0LVIRQMASK 0x019
-#define INTEL_MSIC_GPIO1LVIRQMASK 0x01a
-#define INTEL_MSIC_GPIOHVIRQMASK 0x01b
-#define INTEL_MSIC_VRINTMASK 0x01c
-#define INTEL_MSIC_OCAUDIOMASK 0x01d
-#define INTEL_MSIC_ACCDETMASK 0x01e
-#define INTEL_MSIC_RESETIRQ1MASK 0x01f
-#define INTEL_MSIC_RESETIRQ2MASK 0x020
-#define INTEL_MSIC_IRQLVL1MSK 0x021
-#define INTEL_MSIC_PBCONFIG 0x03e
-#define INTEL_MSIC_PBSTATUS 0x03f /* RO */
-
-/* GPIO */
-#define INTEL_MSIC_GPIO0LV7CTLO 0x040
-#define INTEL_MSIC_GPIO0LV6CTLO 0x041
-#define INTEL_MSIC_GPIO0LV5CTLO 0x042
-#define INTEL_MSIC_GPIO0LV4CTLO 0x043
-#define INTEL_MSIC_GPIO0LV3CTLO 0x044
-#define INTEL_MSIC_GPIO0LV2CTLO 0x045
-#define INTEL_MSIC_GPIO0LV1CTLO 0x046
-#define INTEL_MSIC_GPIO0LV0CTLO 0x047
-#define INTEL_MSIC_GPIO1LV7CTLOS 0x048
-#define INTEL_MSIC_GPIO1LV6CTLO 0x049
-#define INTEL_MSIC_GPIO1LV5CTLO 0x04a
-#define INTEL_MSIC_GPIO1LV4CTLO 0x04b
-#define INTEL_MSIC_GPIO1LV3CTLO 0x04c
-#define INTEL_MSIC_GPIO1LV2CTLO 0x04d
-#define INTEL_MSIC_GPIO1LV1CTLO 0x04e
-#define INTEL_MSIC_GPIO1LV0CTLO 0x04f
-#define INTEL_MSIC_GPIO0LV7CTLI 0x050
-#define INTEL_MSIC_GPIO0LV6CTLI 0x051
-#define INTEL_MSIC_GPIO0LV5CTLI 0x052
-#define INTEL_MSIC_GPIO0LV4CTLI 0x053
-#define INTEL_MSIC_GPIO0LV3CTLI 0x054
-#define INTEL_MSIC_GPIO0LV2CTLI 0x055
-#define INTEL_MSIC_GPIO0LV1CTLI 0x056
-#define INTEL_MSIC_GPIO0LV0CTLI 0x057
-#define INTEL_MSIC_GPIO1LV7CTLIS 0x058
-#define INTEL_MSIC_GPIO1LV6CTLI 0x059
-#define INTEL_MSIC_GPIO1LV5CTLI 0x05a
-#define INTEL_MSIC_GPIO1LV4CTLI 0x05b
-#define INTEL_MSIC_GPIO1LV3CTLI 0x05c
-#define INTEL_MSIC_GPIO1LV2CTLI 0x05d
-#define INTEL_MSIC_GPIO1LV1CTLI 0x05e
-#define INTEL_MSIC_GPIO1LV0CTLI 0x05f
-#define INTEL_MSIC_PWM0CLKDIV1 0x061
-#define INTEL_MSIC_PWM0CLKDIV0 0x062
-#define INTEL_MSIC_PWM1CLKDIV1 0x063
-#define INTEL_MSIC_PWM1CLKDIV0 0x064
-#define INTEL_MSIC_PWM2CLKDIV1 0x065
-#define INTEL_MSIC_PWM2CLKDIV0 0x066
-#define INTEL_MSIC_PWM0DUTYCYCLE 0x067
-#define INTEL_MSIC_PWM1DUTYCYCLE 0x068
-#define INTEL_MSIC_PWM2DUTYCYCLE 0x069
-#define INTEL_MSIC_GPIO0HV3CTLO 0x06d
-#define INTEL_MSIC_GPIO0HV2CTLO 0x06e
-#define INTEL_MSIC_GPIO0HV1CTLO 0x06f
-#define INTEL_MSIC_GPIO0HV0CTLO 0x070
-#define INTEL_MSIC_GPIO1HV3CTLO 0x071
-#define INTEL_MSIC_GPIO1HV2CTLO 0x072
-#define INTEL_MSIC_GPIO1HV1CTLO 0x073
-#define INTEL_MSIC_GPIO1HV0CTLO 0x074
-#define INTEL_MSIC_GPIO0HV3CTLI 0x075
-#define INTEL_MSIC_GPIO0HV2CTLI 0x076
-#define INTEL_MSIC_GPIO0HV1CTLI 0x077
-#define INTEL_MSIC_GPIO0HV0CTLI 0x078
-#define INTEL_MSIC_GPIO1HV3CTLI 0x079
-#define INTEL_MSIC_GPIO1HV2CTLI 0x07a
-#define INTEL_MSIC_GPIO1HV1CTLI 0x07b
-#define INTEL_MSIC_GPIO1HV0CTLI 0x07c
-
-/* SVID */
-#define INTEL_MSIC_SVIDCTRL0 0x080
-#define INTEL_MSIC_SVIDCTRL1 0x081
-#define INTEL_MSIC_SVIDCTRL2 0x082
-#define INTEL_MSIC_SVIDTXLASTPKT3 0x083 /* RO */
-#define INTEL_MSIC_SVIDTXLASTPKT2 0x084 /* RO */
-#define INTEL_MSIC_SVIDTXLASTPKT1 0x085 /* RO */
-#define INTEL_MSIC_SVIDTXLASTPKT0 0x086 /* RO */
-#define INTEL_MSIC_SVIDPKTOUTBYTE3 0x087
-#define INTEL_MSIC_SVIDPKTOUTBYTE2 0x088
-#define INTEL_MSIC_SVIDPKTOUTBYTE1 0x089
-#define INTEL_MSIC_SVIDPKTOUTBYTE0 0x08a
-#define INTEL_MSIC_SVIDRXVPDEBUG1 0x08b
-#define INTEL_MSIC_SVIDRXVPDEBUG0 0x08c
-#define INTEL_MSIC_SVIDRXLASTPKT3 0x08d /* RO */
-#define INTEL_MSIC_SVIDRXLASTPKT2 0x08e /* RO */
-#define INTEL_MSIC_SVIDRXLASTPKT1 0x08f /* RO */
-#define INTEL_MSIC_SVIDRXLASTPKT0 0x090 /* RO */
-#define INTEL_MSIC_SVIDRXCHKSTATUS3 0x091 /* RO */
-#define INTEL_MSIC_SVIDRXCHKSTATUS2 0x092 /* RO */
-#define INTEL_MSIC_SVIDRXCHKSTATUS1 0x093 /* RO */
-#define INTEL_MSIC_SVIDRXCHKSTATUS0 0x094 /* RO */
-
-/* VREG */
-#define INTEL_MSIC_VCCLATCH 0x0c0
-#define INTEL_MSIC_VNNLATCH 0x0c1
-#define INTEL_MSIC_VCCCNT 0x0c2
-#define INTEL_MSIC_SMPSRAMP 0x0c3
-#define INTEL_MSIC_VNNCNT 0x0c4
-#define INTEL_MSIC_VNNAONCNT 0x0c5
-#define INTEL_MSIC_VCC122AONCNT 0x0c6
-#define INTEL_MSIC_V180AONCNT 0x0c7
-#define INTEL_MSIC_V500CNT 0x0c8
-#define INTEL_MSIC_VIHFCNT 0x0c9
-#define INTEL_MSIC_LDORAMP1 0x0ca
-#define INTEL_MSIC_LDORAMP2 0x0cb
-#define INTEL_MSIC_VCC108AONCNT 0x0cc
-#define INTEL_MSIC_VCC108ASCNT 0x0cd
-#define INTEL_MSIC_VCC108CNT 0x0ce
-#define INTEL_MSIC_VCCA100ASCNT 0x0cf
-#define INTEL_MSIC_VCCA100CNT 0x0d0
-#define INTEL_MSIC_VCC180AONCNT 0x0d1
-#define INTEL_MSIC_VCC180CNT 0x0d2
-#define INTEL_MSIC_VCC330CNT 0x0d3
-#define INTEL_MSIC_VUSB330CNT 0x0d4
-#define INTEL_MSIC_VCCSDIOCNT 0x0d5
-#define INTEL_MSIC_VPROG1CNT 0x0d6
-#define INTEL_MSIC_VPROG2CNT 0x0d7
-#define INTEL_MSIC_VEMMCSCNT 0x0d8
-#define INTEL_MSIC_VEMMC1CNT 0x0d9
-#define INTEL_MSIC_VEMMC2CNT 0x0da
-#define INTEL_MSIC_VAUDACNT 0x0db
-#define INTEL_MSIC_VHSPCNT 0x0dc
-#define INTEL_MSIC_VHSNCNT 0x0dd
-#define INTEL_MSIC_VHDMICNT 0x0de
-#define INTEL_MSIC_VOTGCNT 0x0df
-#define INTEL_MSIC_V1P35CNT 0x0e0
-#define INTEL_MSIC_V330AONCNT 0x0e1
-
-/* RESET */
-#define INTEL_MSIC_CHIPCNTRL 0x100 /* WO */
-#define INTEL_MSIC_ERCONFIG 0x101
-
-/* BURST */
-#define INTEL_MSIC_BATCURRENTLIMIT12 0x102
-#define INTEL_MSIC_BATTIMELIMIT12 0x103
-#define INTEL_MSIC_BATTIMELIMIT3 0x104
-#define INTEL_MSIC_BATTIMEDB 0x105
-#define INTEL_MSIC_BRSTCONFIGOUTPUTS 0x106
-#define INTEL_MSIC_BRSTCONFIGACTIONS 0x107
-#define INTEL_MSIC_BURSTCONTROLSTATUS 0x108
-
-/* RTC */
-#define INTEL_MSIC_RTCB1 0x140 /* RO */
-#define INTEL_MSIC_RTCB2 0x141 /* RO */
-#define INTEL_MSIC_RTCB3 0x142 /* RO */
-#define INTEL_MSIC_RTCB4 0x143 /* RO */
-#define INTEL_MSIC_RTCOB1 0x144
-#define INTEL_MSIC_RTCOB2 0x145
-#define INTEL_MSIC_RTCOB3 0x146
-#define INTEL_MSIC_RTCOB4 0x147
-#define INTEL_MSIC_RTCAB1 0x148
-#define INTEL_MSIC_RTCAB2 0x149
-#define INTEL_MSIC_RTCAB3 0x14a
-#define INTEL_MSIC_RTCAB4 0x14b
-#define INTEL_MSIC_RTCWAB1 0x14c
-#define INTEL_MSIC_RTCWAB2 0x14d
-#define INTEL_MSIC_RTCWAB3 0x14e
-#define INTEL_MSIC_RTCWAB4 0x14f
-#define INTEL_MSIC_RTCSC1 0x150
-#define INTEL_MSIC_RTCSC2 0x151
-#define INTEL_MSIC_RTCSC3 0x152
-#define INTEL_MSIC_RTCSC4 0x153
-#define INTEL_MSIC_RTCSTATUS 0x154 /* RO */
-#define INTEL_MSIC_RTCCONFIG1 0x155
-#define INTEL_MSIC_RTCCONFIG2 0x156
-
-/* CHARGER */
-#define INTEL_MSIC_BDTIMER 0x180
-#define INTEL_MSIC_BATTRMV 0x181
-#define INTEL_MSIC_VBUSDET 0x182
-#define INTEL_MSIC_VBUSDET1 0x183
-#define INTEL_MSIC_ADPHVDET 0x184
-#define INTEL_MSIC_ADPLVDET 0x185
-#define INTEL_MSIC_ADPDETDBDM 0x186
-#define INTEL_MSIC_LOWBATTDET 0x187
-#define INTEL_MSIC_CHRCTRL 0x188
-#define INTEL_MSIC_CHRCVOLTAGE 0x189
-#define INTEL_MSIC_CHRCCURRENT 0x18a
-#define INTEL_MSIC_SPCHARGER 0x18b
-#define INTEL_MSIC_CHRTTIME 0x18c
-#define INTEL_MSIC_CHRCTRL1 0x18d
-#define INTEL_MSIC_PWRSRCLMT 0x18e
-#define INTEL_MSIC_CHRSTWDT 0x18f
-#define INTEL_MSIC_WDTWRITE 0x190 /* WO */
-#define INTEL_MSIC_CHRSAFELMT 0x191
-#define INTEL_MSIC_SPWRSRCINT 0x192 /* RO */
-#define INTEL_MSIC_SPWRSRCINT1 0x193 /* RO */
-#define INTEL_MSIC_CHRLEDPWM 0x194
-#define INTEL_MSIC_CHRLEDCTRL 0x195
-
-/* ADC */
-#define INTEL_MSIC_ADC1CNTL1 0x1c0
-#define INTEL_MSIC_ADC1CNTL2 0x1c1
-#define INTEL_MSIC_ADC1CNTL3 0x1c2
-#define INTEL_MSIC_ADC1OFFSETH 0x1c3 /* RO */
-#define INTEL_MSIC_ADC1OFFSETL 0x1c4 /* RO */
-#define INTEL_MSIC_ADC1ADDR0 0x1c5
-#define INTEL_MSIC_ADC1ADDR1 0x1c6
-#define INTEL_MSIC_ADC1ADDR2 0x1c7
-#define INTEL_MSIC_ADC1ADDR3 0x1c8
-#define INTEL_MSIC_ADC1ADDR4 0x1c9
-#define INTEL_MSIC_ADC1ADDR5 0x1ca
-#define INTEL_MSIC_ADC1ADDR6 0x1cb
-#define INTEL_MSIC_ADC1ADDR7 0x1cc
-#define INTEL_MSIC_ADC1ADDR8 0x1cd
-#define INTEL_MSIC_ADC1ADDR9 0x1ce
-#define INTEL_MSIC_ADC1ADDR10 0x1cf
-#define INTEL_MSIC_ADC1ADDR11 0x1d0
-#define INTEL_MSIC_ADC1ADDR12 0x1d1
-#define INTEL_MSIC_ADC1ADDR13 0x1d2
-#define INTEL_MSIC_ADC1ADDR14 0x1d3
-#define INTEL_MSIC_ADC1SNS0H 0x1d4 /* RO */
-#define INTEL_MSIC_ADC1SNS0L 0x1d5 /* RO */
-#define INTEL_MSIC_ADC1SNS1H 0x1d6 /* RO */
-#define INTEL_MSIC_ADC1SNS1L 0x1d7 /* RO */
-#define INTEL_MSIC_ADC1SNS2H 0x1d8 /* RO */
-#define INTEL_MSIC_ADC1SNS2L 0x1d9 /* RO */
-#define INTEL_MSIC_ADC1SNS3H 0x1da /* RO */
-#define INTEL_MSIC_ADC1SNS3L 0x1db /* RO */
-#define INTEL_MSIC_ADC1SNS4H 0x1dc /* RO */
-#define INTEL_MSIC_ADC1SNS4L 0x1dd /* RO */
-#define INTEL_MSIC_ADC1SNS5H 0x1de /* RO */
-#define INTEL_MSIC_ADC1SNS5L 0x1df /* RO */
-#define INTEL_MSIC_ADC1SNS6H 0x1e0 /* RO */
-#define INTEL_MSIC_ADC1SNS6L 0x1e1 /* RO */
-#define INTEL_MSIC_ADC1SNS7H 0x1e2 /* RO */
-#define INTEL_MSIC_ADC1SNS7L 0x1e3 /* RO */
-#define INTEL_MSIC_ADC1SNS8H 0x1e4 /* RO */
-#define INTEL_MSIC_ADC1SNS8L 0x1e5 /* RO */
-#define INTEL_MSIC_ADC1SNS9H 0x1e6 /* RO */
-#define INTEL_MSIC_ADC1SNS9L 0x1e7 /* RO */
-#define INTEL_MSIC_ADC1SNS10H 0x1e8 /* RO */
-#define INTEL_MSIC_ADC1SNS10L 0x1e9 /* RO */
-#define INTEL_MSIC_ADC1SNS11H 0x1ea /* RO */
-#define INTEL_MSIC_ADC1SNS11L 0x1eb /* RO */
-#define INTEL_MSIC_ADC1SNS12H 0x1ec /* RO */
-#define INTEL_MSIC_ADC1SNS12L 0x1ed /* RO */
-#define INTEL_MSIC_ADC1SNS13H 0x1ee /* RO */
-#define INTEL_MSIC_ADC1SNS13L 0x1ef /* RO */
-#define INTEL_MSIC_ADC1SNS14H 0x1f0 /* RO */
-#define INTEL_MSIC_ADC1SNS14L 0x1f1 /* RO */
-#define INTEL_MSIC_ADC1BV0H 0x1f2 /* RO */
-#define INTEL_MSIC_ADC1BV0L 0x1f3 /* RO */
-#define INTEL_MSIC_ADC1BV1H 0x1f4 /* RO */
-#define INTEL_MSIC_ADC1BV1L 0x1f5 /* RO */
-#define INTEL_MSIC_ADC1BV2H 0x1f6 /* RO */
-#define INTEL_MSIC_ADC1BV2L 0x1f7 /* RO */
-#define INTEL_MSIC_ADC1BV3H 0x1f8 /* RO */
-#define INTEL_MSIC_ADC1BV3L 0x1f9 /* RO */
-#define INTEL_MSIC_ADC1BI0H 0x1fa /* RO */
-#define INTEL_MSIC_ADC1BI0L 0x1fb /* RO */
-#define INTEL_MSIC_ADC1BI1H 0x1fc /* RO */
-#define INTEL_MSIC_ADC1BI1L 0x1fd /* RO */
-#define INTEL_MSIC_ADC1BI2H 0x1fe /* RO */
-#define INTEL_MSIC_ADC1BI2L 0x1ff /* RO */
-#define INTEL_MSIC_ADC1BI3H 0x200 /* RO */
-#define INTEL_MSIC_ADC1BI3L 0x201 /* RO */
-#define INTEL_MSIC_CCCNTL 0x202
-#define INTEL_MSIC_CCOFFSETH 0x203 /* RO */
-#define INTEL_MSIC_CCOFFSETL 0x204 /* RO */
-#define INTEL_MSIC_CCADCHA 0x205 /* RO */
-#define INTEL_MSIC_CCADCLA 0x206 /* RO */
-
-/* AUDIO */
-#define INTEL_MSIC_AUDPLLCTRL 0x240
-#define INTEL_MSIC_DMICBUF0123 0x241
-#define INTEL_MSIC_DMICBUF45 0x242
-#define INTEL_MSIC_DMICGPO 0x244
-#define INTEL_MSIC_DMICMUX 0x245
-#define INTEL_MSIC_DMICCLK 0x246
-#define INTEL_MSIC_MICBIAS 0x247
-#define INTEL_MSIC_ADCCONFIG 0x248
-#define INTEL_MSIC_MICAMP1 0x249
-#define INTEL_MSIC_MICAMP2 0x24a
-#define INTEL_MSIC_NOISEMUX 0x24b
-#define INTEL_MSIC_AUDIOMUX12 0x24c
-#define INTEL_MSIC_AUDIOMUX34 0x24d
-#define INTEL_MSIC_AUDIOSINC 0x24e
-#define INTEL_MSIC_AUDIOTXEN 0x24f
-#define INTEL_MSIC_HSEPRXCTRL 0x250
-#define INTEL_MSIC_IHFRXCTRL 0x251
-#define INTEL_MSIC_VOICETXVOL 0x252
-#define INTEL_MSIC_SIDETONEVOL 0x253
-#define INTEL_MSIC_MUSICSHARVOL 0x254
-#define INTEL_MSIC_VOICETXCTRL 0x255
-#define INTEL_MSIC_HSMIXER 0x256
-#define INTEL_MSIC_DACCONFIG 0x257
-#define INTEL_MSIC_SOFTMUTE 0x258
-#define INTEL_MSIC_HSLVOLCTRL 0x259
-#define INTEL_MSIC_HSRVOLCTRL 0x25a
-#define INTEL_MSIC_IHFLVOLCTRL 0x25b
-#define INTEL_MSIC_IHFRVOLCTRL 0x25c
-#define INTEL_MSIC_DRIVEREN 0x25d
-#define INTEL_MSIC_LINEOUTCTRL 0x25e
-#define INTEL_MSIC_VIB1CTRL1 0x25f
-#define INTEL_MSIC_VIB1CTRL2 0x260
-#define INTEL_MSIC_VIB1CTRL3 0x261
-#define INTEL_MSIC_VIB1SPIPCM_1 0x262
-#define INTEL_MSIC_VIB1SPIPCM_2 0x263
-#define INTEL_MSIC_VIB1CTRL5 0x264
-#define INTEL_MSIC_VIB2CTRL1 0x265
-#define INTEL_MSIC_VIB2CTRL2 0x266
-#define INTEL_MSIC_VIB2CTRL3 0x267
-#define INTEL_MSIC_VIB2SPIPCM_1 0x268
-#define INTEL_MSIC_VIB2SPIPCM_2 0x269
-#define INTEL_MSIC_VIB2CTRL5 0x26a
-#define INTEL_MSIC_BTNCTRL1 0x26b
-#define INTEL_MSIC_BTNCTRL2 0x26c
-#define INTEL_MSIC_PCM1TXSLOT01 0x26d
-#define INTEL_MSIC_PCM1TXSLOT23 0x26e
-#define INTEL_MSIC_PCM1TXSLOT45 0x26f
-#define INTEL_MSIC_PCM1RXSLOT0123 0x270
-#define INTEL_MSIC_PCM1RXSLOT045 0x271
-#define INTEL_MSIC_PCM2TXSLOT01 0x272
-#define INTEL_MSIC_PCM2TXSLOT23 0x273
-#define INTEL_MSIC_PCM2TXSLOT45 0x274
-#define INTEL_MSIC_PCM2RXSLOT01 0x275
-#define INTEL_MSIC_PCM2RXSLOT23 0x276
-#define INTEL_MSIC_PCM2RXSLOT45 0x277
-#define INTEL_MSIC_PCM1CTRL1 0x278
-#define INTEL_MSIC_PCM1CTRL2 0x279
-#define INTEL_MSIC_PCM1CTRL3 0x27a
-#define INTEL_MSIC_PCM2CTRL1 0x27b
-#define INTEL_MSIC_PCM2CTRL2 0x27c
-
-/* HDMI */
-#define INTEL_MSIC_HDMIPUEN 0x280
-#define INTEL_MSIC_HDMISTATUS 0x281 /* RO */
-
-/* Physical address of the start of the MSIC interrupt tree in SRAM */
-#define INTEL_MSIC_IRQ_PHYS_BASE 0xffff7fc0
-
-/**
- * struct intel_msic_gpio_pdata - platform data for the MSIC GPIO driver
- * @gpio_base: base number for the GPIOs
- */
-struct intel_msic_gpio_pdata {
- unsigned gpio_base;
-};
-
-/**
- * struct intel_msic_ocd_pdata - platform data for the MSIC OCD driver
- * @gpio: GPIO number used for OCD interrupts
- *
- * The MSIC MFD driver converts @gpio into an IRQ number and passes it to
- * the OCD driver as %IORESOURCE_IRQ.
- */
-struct intel_msic_ocd_pdata {
- unsigned gpio;
-};
-
-/* MSIC embedded blocks (subdevices) */
-enum intel_msic_block {
- INTEL_MSIC_BLOCK_TOUCH,
- INTEL_MSIC_BLOCK_ADC,
- INTEL_MSIC_BLOCK_BATTERY,
- INTEL_MSIC_BLOCK_GPIO,
- INTEL_MSIC_BLOCK_AUDIO,
- INTEL_MSIC_BLOCK_HDMI,
- INTEL_MSIC_BLOCK_THERMAL,
- INTEL_MSIC_BLOCK_POWER_BTN,
- INTEL_MSIC_BLOCK_OCD,
-
- INTEL_MSIC_BLOCK_LAST,
-};
-
-/**
- * struct intel_msic_platform_data - platform data for the MSIC driver
- * @irq: array of interrupt numbers, one per device. If @irq is set to %0
- * for a given block, the corresponding platform device is not
- * created. For devices which don't have an interrupt, use %0xff
- * (this is same as in SFI spec).
- * @gpio: platform data for the MSIC GPIO driver
- * @ocd: platform data for the MSIC OCD driver
- *
- * Once the MSIC driver is initialized, the register interface is ready to
- * use. All the platform devices for subdevices are created after the
- * register interface is ready so that we can guarantee its availability to
- * the subdevice drivers.
- *
- * Interrupt numbers are passed to the subdevices via %IORESOURCE_IRQ
- * resources of the created platform device.
- */
-struct intel_msic_platform_data {
- int irq[INTEL_MSIC_BLOCK_LAST];
- struct intel_msic_gpio_pdata *gpio;
- struct intel_msic_ocd_pdata *ocd;
-};
-
-struct intel_msic;
-
-extern int intel_msic_reg_read(unsigned short reg, u8 *val);
-extern int intel_msic_reg_write(unsigned short reg, u8 val);
-extern int intel_msic_reg_update(unsigned short reg, u8 val, u8 mask);
-extern int intel_msic_bulk_read(unsigned short *reg, u8 *buf, size_t count);
-extern int intel_msic_bulk_write(unsigned short *reg, u8 *buf, size_t count);
-
-/*
- * pdev_to_intel_msic - gets an MSIC instance from the platform device
- * @pdev: platform device pointer
- *
- * The client drivers need to have pointer to the MSIC instance if they
- * want to call intel_msic_irq_read(). This macro can be used for
- * convenience to get the MSIC pointer from @pdev where needed. This is
- * _only_ valid for devices which are managed by the MSIC.
- */
-#define pdev_to_intel_msic(pdev) (dev_get_drvdata(pdev->dev.parent))
-
-extern int intel_msic_irq_read(struct intel_msic *msic, unsigned short reg,
- u8 *val);
-
-#endif /* __LINUX_MFD_INTEL_MSIC_H__ */
diff --git a/include/linux/mfd/intel_pmc_bxt.h b/include/linux/mfd/intel_pmc_bxt.h
new file mode 100644
index 000000000000..f51a43d25ffd
--- /dev/null
+++ b/include/linux/mfd/intel_pmc_bxt.h
@@ -0,0 +1,53 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef MFD_INTEL_PMC_BXT_H
+#define MFD_INTEL_PMC_BXT_H
+
+/* GCR reg offsets from GCR base */
+#define PMC_GCR_PMC_CFG_REG 0x08
+#define PMC_GCR_TELEM_DEEP_S0IX_REG 0x78
+#define PMC_GCR_TELEM_SHLW_S0IX_REG 0x80
+
+/* PMC_CFG_REG bit masks */
+#define PMC_CFG_NO_REBOOT_EN BIT(4)
+
+/**
+ * struct intel_pmc_dev - Intel PMC device structure
+ * @dev: Pointer to the parent PMC device
+ * @scu: Pointer to the SCU IPC device data structure
+ * @gcr_mem_base: Virtual base address of GCR (Global Configuration Registers)
+ * @gcr_lock: Lock used to serialize access to GCR registers
+ * @telem_base: Pointer to telemetry SSRAM base resource or %NULL if not
+ * available
+ */
+struct intel_pmc_dev {
+ struct device *dev;
+ struct intel_scu_ipc_dev *scu;
+ void __iomem *gcr_mem_base;
+ spinlock_t gcr_lock;
+ struct resource *telem_base;
+};
+
+#if IS_ENABLED(CONFIG_MFD_INTEL_PMC_BXT)
+int intel_pmc_gcr_read64(struct intel_pmc_dev *pmc, u32 offset, u64 *data);
+int intel_pmc_gcr_update(struct intel_pmc_dev *pmc, u32 offset, u32 mask, u32 val);
+int intel_pmc_s0ix_counter_read(struct intel_pmc_dev *pmc, u64 *data);
+#else
+static inline int intel_pmc_gcr_read64(struct intel_pmc_dev *pmc, u32 offset,
+ u64 *data)
+{
+ return -ENOTSUPP;
+}
+
+static inline int intel_pmc_gcr_update(struct intel_pmc_dev *pmc, u32 offset,
+ u32 mask, u32 val)
+{
+ return -ENOTSUPP;
+}
+
+static inline int intel_pmc_s0ix_counter_read(struct intel_pmc_dev *pmc, u64 *data)
+{
+ return -ENOTSUPP;
+}
+#endif
+
+#endif /* MFD_INTEL_PMC_BXT_H */
diff --git a/include/linux/mfd/intel_soc_pmic.h b/include/linux/mfd/intel_soc_pmic.h
index 5aacdb017a9f..9ba2c1a8d836 100644
--- a/include/linux/mfd/intel_soc_pmic.h
+++ b/include/linux/mfd/intel_soc_pmic.h
@@ -1,17 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
- * intel_soc_pmic.h - Intel SoC PMIC Driver
+ * Intel SoC PMIC Driver
*
* Copyright (C) 2012-2014 Intel Corporation. All rights reserved.
*
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version
- * 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
* Author: Yang, Bin <bin.yang@intel.com>
* Author: Zhu, Lejun <lejun.zhu@linux.intel.com>
*/
@@ -21,16 +13,44 @@
#include <linux/regmap.h>
+enum intel_cht_wc_models {
+ INTEL_CHT_WC_UNKNOWN,
+ INTEL_CHT_WC_GPD_WIN_POCKET,
+ INTEL_CHT_WC_XIAOMI_MIPAD2,
+ INTEL_CHT_WC_LENOVO_YOGABOOK1,
+ INTEL_CHT_WC_LENOVO_YT3_X90,
+};
+
+/**
+ * struct intel_soc_pmic - Intel SoC PMIC data
+ * @irq: Master interrupt number of the parent PMIC device
+ * @regmap: Pointer to the parent PMIC device regmap structure
+ * @irq_chip_data: IRQ chip data for the PMIC itself
+ * @irq_chip_data_pwrbtn: Chained IRQ chip data for the Power Button
+ * @irq_chip_data_tmu: Chained IRQ chip data for the Time Management Unit
+ * @irq_chip_data_bcu: Chained IRQ chip data for the Burst Control Unit
+ * @irq_chip_data_adc: Chained IRQ chip data for the General Purpose ADC
+ * @irq_chip_data_chgr: Chained IRQ chip data for the External Charger
+ * @irq_chip_data_crit: Chained IRQ chip data for the Critical Event Handler
+ * @dev: Pointer to the parent PMIC device
+ * @scu: Pointer to the SCU IPC device data structure
+ */
struct intel_soc_pmic {
int irq;
struct regmap *regmap;
struct regmap_irq_chip_data *irq_chip_data;
+ struct regmap_irq_chip_data *irq_chip_data_pwrbtn;
struct regmap_irq_chip_data *irq_chip_data_tmu;
struct regmap_irq_chip_data *irq_chip_data_bcu;
struct regmap_irq_chip_data *irq_chip_data_adc;
struct regmap_irq_chip_data *irq_chip_data_chgr;
struct regmap_irq_chip_data *irq_chip_data_crit;
struct device *dev;
+ struct intel_scu_ipc_dev *scu;
+ enum intel_cht_wc_models cht_wc_model;
};
+int intel_soc_pmic_exec_mipi_pmic_seq_element(u16 i2c_address, u32 reg_address,
+ u32 value, u32 mask);
+
#endif /* __INTEL_SOC_PMIC_H__ */
diff --git a/include/linux/mfd/intel_soc_pmic_bxtwc.h b/include/linux/mfd/intel_soc_pmic_bxtwc.h
index 0c351bc85d2d..9be566cc58c6 100644
--- a/include/linux/mfd/intel_soc_pmic_bxtwc.h
+++ b/include/linux/mfd/intel_soc_pmic_bxtwc.h
@@ -1,16 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Header file for Intel Broxton Whiskey Cove PMIC
*
* Copyright (C) 2015 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
*/
#ifndef __INTEL_BXTWC_H__
diff --git a/include/linux/mfd/intel_soc_pmic_mrfld.h b/include/linux/mfd/intel_soc_pmic_mrfld.h
new file mode 100644
index 000000000000..4daecd682275
--- /dev/null
+++ b/include/linux/mfd/intel_soc_pmic_mrfld.h
@@ -0,0 +1,81 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Header file for Intel Merrifield Basin Cove PMIC
+ *
+ * Copyright (C) 2019 Intel Corporation. All rights reserved.
+ */
+
+#ifndef __INTEL_SOC_PMIC_MRFLD_H__
+#define __INTEL_SOC_PMIC_MRFLD_H__
+
+#include <linux/bits.h>
+
+#define BCOVE_ID 0x00
+
+#define BCOVE_ID_MINREV0 GENMASK(2, 0)
+#define BCOVE_ID_MAJREV0 GENMASK(5, 3)
+#define BCOVE_ID_VENDID0 GENMASK(7, 6)
+
+#define BCOVE_MINOR(x) (unsigned int)(((x) & BCOVE_ID_MINREV0) >> 0)
+#define BCOVE_MAJOR(x) (unsigned int)(((x) & BCOVE_ID_MAJREV0) >> 3)
+#define BCOVE_VENDOR(x) (unsigned int)(((x) & BCOVE_ID_VENDID0) >> 6)
+
+#define BCOVE_IRQLVL1 0x01
+
+#define BCOVE_PBIRQ 0x02
+#define BCOVE_TMUIRQ 0x03
+#define BCOVE_THRMIRQ 0x04
+#define BCOVE_BCUIRQ 0x05
+#define BCOVE_ADCIRQ 0x06
+#define BCOVE_CHGRIRQ0 0x07
+#define BCOVE_CHGRIRQ1 0x08
+#define BCOVE_GPIOIRQ 0x09
+#define BCOVE_CRITIRQ 0x0B
+
+#define BCOVE_MIRQLVL1 0x0C
+
+#define BCOVE_MPBIRQ 0x0D
+#define BCOVE_MTMUIRQ 0x0E
+#define BCOVE_MTHRMIRQ 0x0F
+#define BCOVE_MBCUIRQ 0x10
+#define BCOVE_MADCIRQ 0x11
+#define BCOVE_MCHGRIRQ0 0x12
+#define BCOVE_MCHGRIRQ1 0x13
+#define BCOVE_MGPIOIRQ 0x14
+#define BCOVE_MCRITIRQ 0x16
+
+#define BCOVE_SCHGRIRQ0 0x4E
+#define BCOVE_SCHGRIRQ1 0x4F
+
+/* Level 1 IRQs */
+#define BCOVE_LVL1_PWRBTN BIT(0) /* power button */
+#define BCOVE_LVL1_TMU BIT(1) /* time management unit */
+#define BCOVE_LVL1_THRM BIT(2) /* thermal */
+#define BCOVE_LVL1_BCU BIT(3) /* burst control unit */
+#define BCOVE_LVL1_ADC BIT(4) /* ADC */
+#define BCOVE_LVL1_CHGR BIT(5) /* charger */
+#define BCOVE_LVL1_GPIO BIT(6) /* GPIO */
+#define BCOVE_LVL1_CRIT BIT(7) /* critical event */
+
+/* Level 2 IRQs: power button */
+#define BCOVE_PBIRQ_PBTN BIT(0)
+#define BCOVE_PBIRQ_UBTN BIT(1)
+
+/* Level 2 IRQs: ADC */
+#define BCOVE_ADCIRQ_BATTEMP BIT(2)
+#define BCOVE_ADCIRQ_SYSTEMP BIT(3)
+#define BCOVE_ADCIRQ_BATTID BIT(4)
+#define BCOVE_ADCIRQ_VIBATT BIT(5)
+#define BCOVE_ADCIRQ_CCTICK BIT(7)
+
+/* Level 2 IRQs: charger */
+#define BCOVE_CHGRIRQ_BAT0ALRT BIT(4)
+#define BCOVE_CHGRIRQ_BAT1ALRT BIT(5)
+#define BCOVE_CHGRIRQ_BATCRIT BIT(6)
+
+#define BCOVE_CHGRIRQ_VBUSDET BIT(0)
+#define BCOVE_CHGRIRQ_DCDET BIT(1)
+#define BCOVE_CHGRIRQ_BATTDET BIT(2)
+#define BCOVE_CHGRIRQ_USBIDDET BIT(3)
+
+#endif /* __INTEL_SOC_PMIC_MRFLD_H__ */
diff --git a/include/linux/mfd/ipaq-micro.h b/include/linux/mfd/ipaq-micro.h
index 5c4d29f6674f..d5caa4c86ecc 100644
--- a/include/linux/mfd/ipaq-micro.h
+++ b/include/linux/mfd/ipaq-micro.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Header file for the compaq Micro MFD
*/
@@ -74,8 +75,8 @@ struct ipaq_micro_rxdev {
* @id: 4-bit ID of the message
* @tx_len: length of TX data
* @tx_data: TX data to send
- * @rx_len: length of receieved RX data
- * @rx_data: RX data to recieve
+ * @rx_len: length of received RX data
+ * @rx_data: RX data to receive
* @ack: a completion that will be completed when RX is complete
* @node: list node if message gets queued
*/
diff --git a/include/linux/mfd/iqs62x.h b/include/linux/mfd/iqs62x.h
new file mode 100644
index 000000000000..ffc86010af74
--- /dev/null
+++ b/include/linux/mfd/iqs62x.h
@@ -0,0 +1,143 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Azoteq IQS620A/621/622/624/625 Multi-Function Sensors
+ *
+ * Copyright (C) 2019 Jeff LaBundy <jeff@labundy.com>
+ */
+
+#ifndef __LINUX_MFD_IQS62X_H
+#define __LINUX_MFD_IQS62X_H
+
+#define IQS620_PROD_NUM 0x41
+#define IQS621_PROD_NUM 0x46
+#define IQS622_PROD_NUM 0x42
+#define IQS624_PROD_NUM 0x43
+#define IQS625_PROD_NUM 0x4E
+
+#define IQS620_HW_NUM_V0 0x82
+#define IQS620_HW_NUM_V1 IQS620_HW_NUM_V0
+#define IQS620_HW_NUM_V2 IQS620_HW_NUM_V0
+#define IQS620_HW_NUM_V3 0x92
+
+#define IQS621_ALS_FLAGS 0x16
+#define IQS622_ALS_FLAGS 0x14
+
+#define IQS624_HALL_UI 0x70
+#define IQS624_HALL_UI_WHL_EVENT BIT(4)
+#define IQS624_HALL_UI_INT_EVENT BIT(3)
+#define IQS624_HALL_UI_AUTO_CAL BIT(2)
+
+#define IQS624_INTERVAL_DIV 0x7D
+
+#define IQS620_GLBL_EVENT_MASK 0xD7
+#define IQS620_GLBL_EVENT_MASK_PMU BIT(6)
+
+#define IQS62X_NUM_KEYS 16
+#define IQS62X_NUM_EVENTS (IQS62X_NUM_KEYS + 6)
+
+#define IQS62X_EVENT_SIZE 10
+
+enum iqs62x_ui_sel {
+ IQS62X_UI_PROX,
+ IQS62X_UI_SAR1,
+};
+
+enum iqs62x_event_reg {
+ IQS62X_EVENT_NONE,
+ IQS62X_EVENT_SYS,
+ IQS62X_EVENT_PROX,
+ IQS62X_EVENT_HYST,
+ IQS62X_EVENT_HALL,
+ IQS62X_EVENT_ALS,
+ IQS62X_EVENT_IR,
+ IQS62X_EVENT_WHEEL,
+ IQS62X_EVENT_INTER,
+ IQS62X_EVENT_UI_LO,
+ IQS62X_EVENT_UI_HI,
+};
+
+enum iqs62x_event_flag {
+ /* keys */
+ IQS62X_EVENT_PROX_CH0_T,
+ IQS62X_EVENT_PROX_CH0_P,
+ IQS62X_EVENT_PROX_CH1_T,
+ IQS62X_EVENT_PROX_CH1_P,
+ IQS62X_EVENT_PROX_CH2_T,
+ IQS62X_EVENT_PROX_CH2_P,
+ IQS62X_EVENT_HYST_POS_T,
+ IQS62X_EVENT_HYST_POS_P,
+ IQS62X_EVENT_HYST_NEG_T,
+ IQS62X_EVENT_HYST_NEG_P,
+ IQS62X_EVENT_SAR1_ACT,
+ IQS62X_EVENT_SAR1_QRD,
+ IQS62X_EVENT_SAR1_MOVE,
+ IQS62X_EVENT_SAR1_HALT,
+ IQS62X_EVENT_WHEEL_UP,
+ IQS62X_EVENT_WHEEL_DN,
+
+ /* switches */
+ IQS62X_EVENT_HALL_N_T,
+ IQS62X_EVENT_HALL_N_P,
+ IQS62X_EVENT_HALL_S_T,
+ IQS62X_EVENT_HALL_S_P,
+
+ /* everything else */
+ IQS62X_EVENT_SYS_RESET,
+ IQS62X_EVENT_SYS_ATI,
+};
+
+struct iqs62x_event_data {
+ u16 ui_data;
+ u8 als_flags;
+ u8 ir_flags;
+ u8 interval;
+};
+
+struct iqs62x_event_desc {
+ enum iqs62x_event_reg reg;
+ u8 mask;
+ u8 val;
+};
+
+struct iqs62x_dev_desc {
+ const char *dev_name;
+ const struct mfd_cell *sub_devs;
+ int num_sub_devs;
+ u8 prod_num;
+ u8 sw_num;
+ const u8 *cal_regs;
+ int num_cal_regs;
+ u8 prox_mask;
+ u8 sar_mask;
+ u8 hall_mask;
+ u8 hyst_mask;
+ u8 temp_mask;
+ u8 als_mask;
+ u8 ir_mask;
+ u8 prox_settings;
+ u8 als_flags;
+ u8 hall_flags;
+ u8 hyst_shift;
+ u8 interval;
+ u8 interval_div;
+ const char *fw_name;
+ const enum iqs62x_event_reg (*event_regs)[IQS62X_EVENT_SIZE];
+};
+
+struct iqs62x_core {
+ const struct iqs62x_dev_desc *dev_desc;
+ struct i2c_client *client;
+ struct regmap *regmap;
+ struct blocking_notifier_head nh;
+ struct list_head fw_blk_head;
+ struct completion ati_done;
+ struct completion fw_done;
+ enum iqs62x_ui_sel ui_sel;
+ unsigned long event_cache;
+ u8 sw_num;
+ u8 hw_num;
+};
+
+extern const struct iqs62x_event_desc iqs62x_events[IQS62X_NUM_EVENTS];
+
+#endif /* __LINUX_MFD_IQS62X_H */
diff --git a/include/linux/mfd/janz.h b/include/linux/mfd/janz.h
index e9994c469803..90dea65fd733 100644
--- a/include/linux/mfd/janz.h
+++ b/include/linux/mfd/janz.h
@@ -1,12 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* Common Definitions for Janz MODULbus devices
*
* Copyright (c) 2010 Ira W. Snyder <iws@ovro.caltech.edu>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
*/
#ifndef JANZ_H
diff --git a/include/linux/mfd/kempld.h b/include/linux/mfd/kempld.h
index 26e0b469e567..643c096b93ac 100644
--- a/include/linux/mfd/kempld.h
+++ b/include/linux/mfd/kempld.h
@@ -1,12 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Kontron PLD driver definitions
*
* Copyright (c) 2010-2012 Kontron Europe GmbH
* Author: Michael Brunner <michael.brunner@kontron.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License 2 as published
- * by the Free Software Foundation.
*/
#ifndef _LINUX_MFD_KEMPLD_H_
diff --git a/include/linux/mfd/khadas-mcu.h b/include/linux/mfd/khadas-mcu.h
new file mode 100644
index 000000000000..a99ba2ed0e4e
--- /dev/null
+++ b/include/linux/mfd/khadas-mcu.h
@@ -0,0 +1,91 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Khadas System control Microcontroller Register map
+ *
+ * Copyright (C) 2020 BayLibre SAS
+ *
+ * Author(s): Neil Armstrong <narmstrong@baylibre.com>
+ */
+
+#ifndef MFD_KHADAS_MCU_H
+#define MFD_KHADAS_MCU_H
+
+#define KHADAS_MCU_PASSWD_VEN_0_REG 0x00 /* RO */
+#define KHADAS_MCU_PASSWD_VEN_1_REG 0x01 /* RO */
+#define KHADAS_MCU_PASSWD_VEN_2_REG 0x02 /* RO */
+#define KHADAS_MCU_PASSWD_VEN_3_REG 0x03 /* RO */
+#define KHADAS_MCU_PASSWD_VEN_4_REG 0x04 /* RO */
+#define KHADAS_MCU_PASSWD_VEN_5_REG 0x05 /* RO */
+#define KHADAS_MCU_MAC_0_REG 0x06 /* RO */
+#define KHADAS_MCU_MAC_1_REG 0x07 /* RO */
+#define KHADAS_MCU_MAC_2_REG 0x08 /* RO */
+#define KHADAS_MCU_MAC_3_REG 0x09 /* RO */
+#define KHADAS_MCU_MAC_4_REG 0x0a /* RO */
+#define KHADAS_MCU_MAC_5_REG 0x0b /* RO */
+#define KHADAS_MCU_USID_0_REG 0x0c /* RO */
+#define KHADAS_MCU_USID_1_REG 0x0d /* RO */
+#define KHADAS_MCU_USID_2_REG 0x0e /* RO */
+#define KHADAS_MCU_USID_3_REG 0x0f /* RO */
+#define KHADAS_MCU_USID_4_REG 0x10 /* RO */
+#define KHADAS_MCU_USID_5_REG 0x11 /* RO */
+#define KHADAS_MCU_VERSION_0_REG 0x12 /* RO */
+#define KHADAS_MCU_VERSION_1_REG 0x13 /* RO */
+#define KHADAS_MCU_DEVICE_NO_0_REG 0x14 /* RO */
+#define KHADAS_MCU_DEVICE_NO_1_REG 0x15 /* RO */
+#define KHADAS_MCU_FACTORY_TEST_REG 0x16 /* R */
+#define KHADAS_MCU_BOOT_MODE_REG 0x20 /* RW */
+#define KHADAS_MCU_BOOT_EN_WOL_REG 0x21 /* RW */
+#define KHADAS_MCU_BOOT_EN_RTC_REG 0x22 /* RW */
+#define KHADAS_MCU_BOOT_EN_EXP_REG 0x23 /* RW */
+#define KHADAS_MCU_BOOT_EN_IR_REG 0x24 /* RW */
+#define KHADAS_MCU_BOOT_EN_DCIN_REG 0x25 /* RW */
+#define KHADAS_MCU_BOOT_EN_KEY_REG 0x26 /* RW */
+#define KHADAS_MCU_KEY_MODE_REG 0x27 /* RW */
+#define KHADAS_MCU_LED_MODE_ON_REG 0x28 /* RW */
+#define KHADAS_MCU_LED_MODE_OFF_REG 0x29 /* RW */
+#define KHADAS_MCU_SHUTDOWN_NORMAL_REG 0x2c /* RW */
+#define KHADAS_MCU_MAC_SWITCH_REG 0x2d /* RW */
+#define KHADAS_MCU_MCU_SLEEP_MODE_REG 0x2e /* RW */
+#define KHADAS_MCU_IR_CODE1_0_REG 0x2f /* RW */
+#define KHADAS_MCU_IR_CODE1_1_REG 0x30 /* RW */
+#define KHADAS_MCU_IR_CODE1_2_REG 0x31 /* RW */
+#define KHADAS_MCU_IR_CODE1_3_REG 0x32 /* RW */
+#define KHADAS_MCU_USB_PCIE_SWITCH_REG 0x33 /* RW */
+#define KHADAS_MCU_IR_CODE2_0_REG 0x34 /* RW */
+#define KHADAS_MCU_IR_CODE2_1_REG 0x35 /* RW */
+#define KHADAS_MCU_IR_CODE2_2_REG 0x36 /* RW */
+#define KHADAS_MCU_IR_CODE2_3_REG 0x37 /* RW */
+#define KHADAS_MCU_PASSWD_USER_0_REG 0x40 /* RW */
+#define KHADAS_MCU_PASSWD_USER_1_REG 0x41 /* RW */
+#define KHADAS_MCU_PASSWD_USER_2_REG 0x42 /* RW */
+#define KHADAS_MCU_PASSWD_USER_3_REG 0x43 /* RW */
+#define KHADAS_MCU_PASSWD_USER_4_REG 0x44 /* RW */
+#define KHADAS_MCU_PASSWD_USER_5_REG 0x45 /* RW */
+#define KHADAS_MCU_USER_DATA_0_REG 0x46 /* RW 56 bytes */
+#define KHADAS_MCU_PWR_OFF_CMD_REG 0x80 /* WO */
+#define KHADAS_MCU_PASSWD_START_REG 0x81 /* WO */
+#define KHADAS_MCU_CHECK_VEN_PASSWD_REG 0x82 /* WO */
+#define KHADAS_MCU_CHECK_USER_PASSWD_REG 0x83 /* WO */
+#define KHADAS_MCU_SHUTDOWN_NORMAL_STATUS_REG 0x86 /* RO */
+#define KHADAS_MCU_WOL_INIT_START_REG 0x87 /* WO */
+#define KHADAS_MCU_CMD_FAN_STATUS_CTRL_REG 0x88 /* WO */
+
+enum {
+ KHADAS_BOARD_VIM1 = 0x1,
+ KHADAS_BOARD_VIM2,
+ KHADAS_BOARD_VIM3,
+ KHADAS_BOARD_EDGE = 0x11,
+ KHADAS_BOARD_EDGE_V,
+};
+
+/**
+ * struct khadas_mcu - Khadas MCU structure
+ * @device: device reference used for logs
+ * @regmap: register map
+ */
+struct khadas_mcu {
+ struct device *dev;
+ struct regmap *regmap;
+};
+
+#endif /* MFD_KHADAS_MCU_H */
diff --git a/include/linux/mfd/lm3533.h b/include/linux/mfd/lm3533.h
index 594bc591f256..69059a7a2ce5 100644
--- a/include/linux/mfd/lm3533.h
+++ b/include/linux/mfd/lm3533.h
@@ -1,14 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* lm3533.h -- LM3533 interface
*
* Copyright (C) 2011-2012 Texas Instruments
*
* Author: Johan Hovold <jhovold@gmail.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
*/
#ifndef __LINUX_MFD_LM3533_H
@@ -20,6 +16,7 @@
DEVICE_ATTR(_name, S_IRUGO | S_IWUSR , show_##_name, store_##_name)
struct device;
+struct gpio_desc;
struct regmap;
struct lm3533 {
@@ -27,7 +24,7 @@ struct lm3533 {
struct regmap *regmap;
- int gpio_hwen;
+ struct gpio_desc *hwen;
int irq;
unsigned have_als:1;
@@ -73,8 +70,6 @@ enum lm3533_boost_ovp {
};
struct lm3533_platform_data {
- int gpio_hwen;
-
enum lm3533_boost_ovp boost_ovp;
enum lm3533_boost_freq boost_freq;
diff --git a/include/linux/mfd/lochnagar.h b/include/linux/mfd/lochnagar.h
new file mode 100644
index 000000000000..ff9e64cfc9fb
--- /dev/null
+++ b/include/linux/mfd/lochnagar.h
@@ -0,0 +1,55 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Lochnagar internals
+ *
+ * Copyright (c) 2013-2018 Cirrus Logic, Inc. and
+ * Cirrus Logic International Semiconductor Ltd.
+ *
+ * Author: Charles Keepax <ckeepax@opensource.cirrus.com>
+ */
+
+#include <linux/device.h>
+#include <linux/mutex.h>
+#include <linux/regmap.h>
+
+#ifndef CIRRUS_LOCHNAGAR_H
+#define CIRRUS_LOCHNAGAR_H
+
+enum lochnagar_type {
+ LOCHNAGAR1,
+ LOCHNAGAR2,
+};
+
+/**
+ * struct lochnagar - Core data for the Lochnagar audio board driver.
+ *
+ * @type: The type of Lochnagar device connected.
+ * @dev: A pointer to the struct device for the main MFD.
+ * @regmap: The devices main register map.
+ * @analogue_config_lock: Lock used to protect updates in the analogue
+ * configuration as these must not be changed whilst the hardware is processing
+ * the last update.
+ */
+struct lochnagar {
+ enum lochnagar_type type;
+ struct device *dev;
+ struct regmap *regmap;
+
+ /* Lock to protect updates to the analogue configuration */
+ struct mutex analogue_config_lock;
+};
+
+/* Register Addresses */
+#define LOCHNAGAR_SOFTWARE_RESET 0x00
+#define LOCHNAGAR_FIRMWARE_ID1 0x01
+#define LOCHNAGAR_FIRMWARE_ID2 0x02
+
+/* (0x0000) Software Reset */
+#define LOCHNAGAR_DEVICE_ID_MASK 0xFFFC
+#define LOCHNAGAR_DEVICE_ID_SHIFT 2
+#define LOCHNAGAR_REV_ID_MASK 0x0003
+#define LOCHNAGAR_REV_ID_SHIFT 0
+
+int lochnagar_update_config(struct lochnagar *lochnagar);
+
+#endif
diff --git a/include/linux/mfd/lochnagar1_regs.h b/include/linux/mfd/lochnagar1_regs.h
new file mode 100644
index 000000000000..114b846245d9
--- /dev/null
+++ b/include/linux/mfd/lochnagar1_regs.h
@@ -0,0 +1,157 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Lochnagar1 register definitions
+ *
+ * Copyright (c) 2017-2018 Cirrus Logic, Inc. and
+ * Cirrus Logic International Semiconductor Ltd.
+ *
+ * Author: Charles Keepax <ckeepax@opensource.cirrus.com>
+ */
+
+#ifndef LOCHNAGAR1_REGISTERS_H
+#define LOCHNAGAR1_REGISTERS_H
+
+/* Register Addresses */
+#define LOCHNAGAR1_CDC_AIF1_SEL 0x0008
+#define LOCHNAGAR1_CDC_AIF2_SEL 0x0009
+#define LOCHNAGAR1_CDC_AIF3_SEL 0x000A
+#define LOCHNAGAR1_CDC_MCLK1_SEL 0x000B
+#define LOCHNAGAR1_CDC_MCLK2_SEL 0x000C
+#define LOCHNAGAR1_CDC_AIF_CTRL1 0x000D
+#define LOCHNAGAR1_CDC_AIF_CTRL2 0x000E
+#define LOCHNAGAR1_EXT_AIF_CTRL 0x000F
+#define LOCHNAGAR1_DSP_AIF1_SEL 0x0010
+#define LOCHNAGAR1_DSP_AIF2_SEL 0x0011
+#define LOCHNAGAR1_DSP_CLKIN_SEL 0x0012
+#define LOCHNAGAR1_DSP_AIF 0x0013
+#define LOCHNAGAR1_GF_AIF1 0x0014
+#define LOCHNAGAR1_GF_AIF2 0x0015
+#define LOCHNAGAR1_PSIA_AIF 0x0016
+#define LOCHNAGAR1_PSIA1_SEL 0x0017
+#define LOCHNAGAR1_PSIA2_SEL 0x0018
+#define LOCHNAGAR1_SPDIF_AIF_SEL 0x0019
+#define LOCHNAGAR1_GF_AIF3_SEL 0x001C
+#define LOCHNAGAR1_GF_AIF4_SEL 0x001D
+#define LOCHNAGAR1_GF_CLKOUT1_SEL 0x001E
+#define LOCHNAGAR1_GF_AIF1_SEL 0x001F
+#define LOCHNAGAR1_GF_AIF2_SEL 0x0020
+#define LOCHNAGAR1_GF_GPIO2 0x0026
+#define LOCHNAGAR1_GF_GPIO3 0x0027
+#define LOCHNAGAR1_GF_GPIO7 0x0028
+#define LOCHNAGAR1_RST 0x0029
+#define LOCHNAGAR1_LED1 0x002A
+#define LOCHNAGAR1_LED2 0x002B
+#define LOCHNAGAR1_I2C_CTRL 0x0046
+
+/*
+ * (0x0008 - 0x000C, 0x0010 - 0x0012, 0x0017 - 0x0020)
+ * CDC_AIF1_SEL - GF_AIF2_SEL
+ */
+#define LOCHNAGAR1_SRC_MASK 0xFF
+#define LOCHNAGAR1_SRC_SHIFT 0
+
+/* (0x000D) CDC_AIF_CTRL1 */
+#define LOCHNAGAR1_CDC_AIF2_LRCLK_DIR_MASK 0x40
+#define LOCHNAGAR1_CDC_AIF2_LRCLK_DIR_SHIFT 6
+#define LOCHNAGAR1_CDC_AIF2_BCLK_DIR_MASK 0x20
+#define LOCHNAGAR1_CDC_AIF2_BCLK_DIR_SHIFT 5
+#define LOCHNAGAR1_CDC_AIF2_ENA_MASK 0x10
+#define LOCHNAGAR1_CDC_AIF2_ENA_SHIFT 4
+#define LOCHNAGAR1_CDC_AIF1_LRCLK_DIR_MASK 0x04
+#define LOCHNAGAR1_CDC_AIF1_LRCLK_DIR_SHIFT 2
+#define LOCHNAGAR1_CDC_AIF1_BCLK_DIR_MASK 0x02
+#define LOCHNAGAR1_CDC_AIF1_BCLK_DIR_SHIFT 1
+#define LOCHNAGAR1_CDC_AIF1_ENA_MASK 0x01
+#define LOCHNAGAR1_CDC_AIF1_ENA_SHIFT 0
+
+/* (0x000E) CDC_AIF_CTRL2 */
+#define LOCHNAGAR1_CDC_AIF3_LRCLK_DIR_MASK 0x40
+#define LOCHNAGAR1_CDC_AIF3_LRCLK_DIR_SHIFT 6
+#define LOCHNAGAR1_CDC_AIF3_BCLK_DIR_MASK 0x20
+#define LOCHNAGAR1_CDC_AIF3_BCLK_DIR_SHIFT 5
+#define LOCHNAGAR1_CDC_AIF3_ENA_MASK 0x10
+#define LOCHNAGAR1_CDC_AIF3_ENA_SHIFT 4
+#define LOCHNAGAR1_CDC_MCLK1_ENA_MASK 0x02
+#define LOCHNAGAR1_CDC_MCLK1_ENA_SHIFT 1
+#define LOCHNAGAR1_CDC_MCLK2_ENA_MASK 0x01
+#define LOCHNAGAR1_CDC_MCLK2_ENA_SHIFT 0
+
+/* (0x000F) EXT_AIF_CTRL */
+#define LOCHNAGAR1_SPDIF_AIF_LRCLK_DIR_MASK 0x20
+#define LOCHNAGAR1_SPDIF_AIF_LRCLK_DIR_SHIFT 5
+#define LOCHNAGAR1_SPDIF_AIF_BCLK_DIR_MASK 0x10
+#define LOCHNAGAR1_SPDIF_AIF_BCLK_DIR_SHIFT 4
+#define LOCHNAGAR1_SPDIF_AIF_ENA_MASK 0x08
+#define LOCHNAGAR1_SPDIF_AIF_ENA_SHIFT 3
+
+/* (0x0013) DSP_AIF */
+#define LOCHNAGAR1_DSP_AIF2_LRCLK_DIR_MASK 0x40
+#define LOCHNAGAR1_DSP_AIF2_LRCLK_DIR_SHIFT 6
+#define LOCHNAGAR1_DSP_AIF2_BCLK_DIR_MASK 0x20
+#define LOCHNAGAR1_DSP_AIF2_BCLK_DIR_SHIFT 5
+#define LOCHNAGAR1_DSP_AIF2_ENA_MASK 0x10
+#define LOCHNAGAR1_DSP_AIF2_ENA_SHIFT 4
+#define LOCHNAGAR1_DSP_CLKIN_ENA_MASK 0x08
+#define LOCHNAGAR1_DSP_CLKIN_ENA_SHIFT 3
+#define LOCHNAGAR1_DSP_AIF1_LRCLK_DIR_MASK 0x04
+#define LOCHNAGAR1_DSP_AIF1_LRCLK_DIR_SHIFT 2
+#define LOCHNAGAR1_DSP_AIF1_BCLK_DIR_MASK 0x02
+#define LOCHNAGAR1_DSP_AIF1_BCLK_DIR_SHIFT 1
+#define LOCHNAGAR1_DSP_AIF1_ENA_MASK 0x01
+#define LOCHNAGAR1_DSP_AIF1_ENA_SHIFT 0
+
+/* (0x0014) GF_AIF1 */
+#define LOCHNAGAR1_GF_CLKOUT1_ENA_MASK 0x40
+#define LOCHNAGAR1_GF_CLKOUT1_ENA_SHIFT 6
+#define LOCHNAGAR1_GF_AIF3_LRCLK_DIR_MASK 0x20
+#define LOCHNAGAR1_GF_AIF3_LRCLK_DIR_SHIFT 5
+#define LOCHNAGAR1_GF_AIF3_BCLK_DIR_MASK 0x10
+#define LOCHNAGAR1_GF_AIF3_BCLK_DIR_SHIFT 4
+#define LOCHNAGAR1_GF_AIF3_ENA_MASK 0x08
+#define LOCHNAGAR1_GF_AIF3_ENA_SHIFT 3
+#define LOCHNAGAR1_GF_AIF1_LRCLK_DIR_MASK 0x04
+#define LOCHNAGAR1_GF_AIF1_LRCLK_DIR_SHIFT 2
+#define LOCHNAGAR1_GF_AIF1_BCLK_DIR_MASK 0x02
+#define LOCHNAGAR1_GF_AIF1_BCLK_DIR_SHIFT 1
+#define LOCHNAGAR1_GF_AIF1_ENA_MASK 0x01
+#define LOCHNAGAR1_GF_AIF1_ENA_SHIFT 0
+
+/* (0x0015) GF_AIF2 */
+#define LOCHNAGAR1_GF_AIF4_LRCLK_DIR_MASK 0x20
+#define LOCHNAGAR1_GF_AIF4_LRCLK_DIR_SHIFT 5
+#define LOCHNAGAR1_GF_AIF4_BCLK_DIR_MASK 0x10
+#define LOCHNAGAR1_GF_AIF4_BCLK_DIR_SHIFT 4
+#define LOCHNAGAR1_GF_AIF4_ENA_MASK 0x08
+#define LOCHNAGAR1_GF_AIF4_ENA_SHIFT 3
+#define LOCHNAGAR1_GF_AIF2_LRCLK_DIR_MASK 0x04
+#define LOCHNAGAR1_GF_AIF2_LRCLK_DIR_SHIFT 2
+#define LOCHNAGAR1_GF_AIF2_BCLK_DIR_MASK 0x02
+#define LOCHNAGAR1_GF_AIF2_BCLK_DIR_SHIFT 1
+#define LOCHNAGAR1_GF_AIF2_ENA_MASK 0x01
+#define LOCHNAGAR1_GF_AIF2_ENA_SHIFT 0
+
+/* (0x0016) PSIA_AIF */
+#define LOCHNAGAR1_PSIA2_LRCLK_DIR_MASK 0x40
+#define LOCHNAGAR1_PSIA2_LRCLK_DIR_SHIFT 6
+#define LOCHNAGAR1_PSIA2_BCLK_DIR_MASK 0x20
+#define LOCHNAGAR1_PSIA2_BCLK_DIR_SHIFT 5
+#define LOCHNAGAR1_PSIA2_ENA_MASK 0x10
+#define LOCHNAGAR1_PSIA2_ENA_SHIFT 4
+#define LOCHNAGAR1_PSIA1_LRCLK_DIR_MASK 0x04
+#define LOCHNAGAR1_PSIA1_LRCLK_DIR_SHIFT 2
+#define LOCHNAGAR1_PSIA1_BCLK_DIR_MASK 0x02
+#define LOCHNAGAR1_PSIA1_BCLK_DIR_SHIFT 1
+#define LOCHNAGAR1_PSIA1_ENA_MASK 0x01
+#define LOCHNAGAR1_PSIA1_ENA_SHIFT 0
+
+/* (0x0029) RST */
+#define LOCHNAGAR1_DSP_RESET_MASK 0x02
+#define LOCHNAGAR1_DSP_RESET_SHIFT 1
+#define LOCHNAGAR1_CDC_RESET_MASK 0x01
+#define LOCHNAGAR1_CDC_RESET_SHIFT 0
+
+/* (0x0046) I2C_CTRL */
+#define LOCHNAGAR1_CDC_CIF_MODE_MASK 0x01
+#define LOCHNAGAR1_CDC_CIF_MODE_SHIFT 0
+
+#endif
diff --git a/include/linux/mfd/lochnagar2_regs.h b/include/linux/mfd/lochnagar2_regs.h
new file mode 100644
index 000000000000..419b25a332fd
--- /dev/null
+++ b/include/linux/mfd/lochnagar2_regs.h
@@ -0,0 +1,291 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Lochnagar2 register definitions
+ *
+ * Copyright (c) 2017-2018 Cirrus Logic, Inc. and
+ * Cirrus Logic International Semiconductor Ltd.
+ *
+ * Author: Charles Keepax <ckeepax@opensource.cirrus.com>
+ */
+
+#ifndef LOCHNAGAR2_REGISTERS_H
+#define LOCHNAGAR2_REGISTERS_H
+
+/* Register Addresses */
+#define LOCHNAGAR2_CDC_AIF1_CTRL 0x000D
+#define LOCHNAGAR2_CDC_AIF2_CTRL 0x000E
+#define LOCHNAGAR2_CDC_AIF3_CTRL 0x000F
+#define LOCHNAGAR2_DSP_AIF1_CTRL 0x0010
+#define LOCHNAGAR2_DSP_AIF2_CTRL 0x0011
+#define LOCHNAGAR2_PSIA1_CTRL 0x0012
+#define LOCHNAGAR2_PSIA2_CTRL 0x0013
+#define LOCHNAGAR2_GF_AIF3_CTRL 0x0014
+#define LOCHNAGAR2_GF_AIF4_CTRL 0x0015
+#define LOCHNAGAR2_GF_AIF1_CTRL 0x0016
+#define LOCHNAGAR2_GF_AIF2_CTRL 0x0017
+#define LOCHNAGAR2_SPDIF_AIF_CTRL 0x0018
+#define LOCHNAGAR2_USB_AIF1_CTRL 0x0019
+#define LOCHNAGAR2_USB_AIF2_CTRL 0x001A
+#define LOCHNAGAR2_ADAT_AIF_CTRL 0x001B
+#define LOCHNAGAR2_CDC_MCLK1_CTRL 0x001E
+#define LOCHNAGAR2_CDC_MCLK2_CTRL 0x001F
+#define LOCHNAGAR2_DSP_CLKIN_CTRL 0x0020
+#define LOCHNAGAR2_PSIA1_MCLK_CTRL 0x0021
+#define LOCHNAGAR2_PSIA2_MCLK_CTRL 0x0022
+#define LOCHNAGAR2_SPDIF_MCLK_CTRL 0x0023
+#define LOCHNAGAR2_GF_CLKOUT1_CTRL 0x0024
+#define LOCHNAGAR2_GF_CLKOUT2_CTRL 0x0025
+#define LOCHNAGAR2_ADAT_MCLK_CTRL 0x0026
+#define LOCHNAGAR2_SOUNDCARD_MCLK_CTRL 0x0027
+#define LOCHNAGAR2_GPIO_FPGA_GPIO1 0x0031
+#define LOCHNAGAR2_GPIO_FPGA_GPIO2 0x0032
+#define LOCHNAGAR2_GPIO_FPGA_GPIO3 0x0033
+#define LOCHNAGAR2_GPIO_FPGA_GPIO4 0x0034
+#define LOCHNAGAR2_GPIO_FPGA_GPIO5 0x0035
+#define LOCHNAGAR2_GPIO_FPGA_GPIO6 0x0036
+#define LOCHNAGAR2_GPIO_CDC_GPIO1 0x0037
+#define LOCHNAGAR2_GPIO_CDC_GPIO2 0x0038
+#define LOCHNAGAR2_GPIO_CDC_GPIO3 0x0039
+#define LOCHNAGAR2_GPIO_CDC_GPIO4 0x003A
+#define LOCHNAGAR2_GPIO_CDC_GPIO5 0x003B
+#define LOCHNAGAR2_GPIO_CDC_GPIO6 0x003C
+#define LOCHNAGAR2_GPIO_CDC_GPIO7 0x003D
+#define LOCHNAGAR2_GPIO_CDC_GPIO8 0x003E
+#define LOCHNAGAR2_GPIO_DSP_GPIO1 0x003F
+#define LOCHNAGAR2_GPIO_DSP_GPIO2 0x0040
+#define LOCHNAGAR2_GPIO_DSP_GPIO3 0x0041
+#define LOCHNAGAR2_GPIO_DSP_GPIO4 0x0042
+#define LOCHNAGAR2_GPIO_DSP_GPIO5 0x0043
+#define LOCHNAGAR2_GPIO_DSP_GPIO6 0x0044
+#define LOCHNAGAR2_GPIO_GF_GPIO2 0x0045
+#define LOCHNAGAR2_GPIO_GF_GPIO3 0x0046
+#define LOCHNAGAR2_GPIO_GF_GPIO7 0x0047
+#define LOCHNAGAR2_GPIO_CDC_AIF1_BCLK 0x0048
+#define LOCHNAGAR2_GPIO_CDC_AIF1_RXDAT 0x0049
+#define LOCHNAGAR2_GPIO_CDC_AIF1_LRCLK 0x004A
+#define LOCHNAGAR2_GPIO_CDC_AIF1_TXDAT 0x004B
+#define LOCHNAGAR2_GPIO_CDC_AIF2_BCLK 0x004C
+#define LOCHNAGAR2_GPIO_CDC_AIF2_RXDAT 0x004D
+#define LOCHNAGAR2_GPIO_CDC_AIF2_LRCLK 0x004E
+#define LOCHNAGAR2_GPIO_CDC_AIF2_TXDAT 0x004F
+#define LOCHNAGAR2_GPIO_CDC_AIF3_BCLK 0x0050
+#define LOCHNAGAR2_GPIO_CDC_AIF3_RXDAT 0x0051
+#define LOCHNAGAR2_GPIO_CDC_AIF3_LRCLK 0x0052
+#define LOCHNAGAR2_GPIO_CDC_AIF3_TXDAT 0x0053
+#define LOCHNAGAR2_GPIO_DSP_AIF1_BCLK 0x0054
+#define LOCHNAGAR2_GPIO_DSP_AIF1_RXDAT 0x0055
+#define LOCHNAGAR2_GPIO_DSP_AIF1_LRCLK 0x0056
+#define LOCHNAGAR2_GPIO_DSP_AIF1_TXDAT 0x0057
+#define LOCHNAGAR2_GPIO_DSP_AIF2_BCLK 0x0058
+#define LOCHNAGAR2_GPIO_DSP_AIF2_RXDAT 0x0059
+#define LOCHNAGAR2_GPIO_DSP_AIF2_LRCLK 0x005A
+#define LOCHNAGAR2_GPIO_DSP_AIF2_TXDAT 0x005B
+#define LOCHNAGAR2_GPIO_PSIA1_BCLK 0x005C
+#define LOCHNAGAR2_GPIO_PSIA1_RXDAT 0x005D
+#define LOCHNAGAR2_GPIO_PSIA1_LRCLK 0x005E
+#define LOCHNAGAR2_GPIO_PSIA1_TXDAT 0x005F
+#define LOCHNAGAR2_GPIO_PSIA2_BCLK 0x0060
+#define LOCHNAGAR2_GPIO_PSIA2_RXDAT 0x0061
+#define LOCHNAGAR2_GPIO_PSIA2_LRCLK 0x0062
+#define LOCHNAGAR2_GPIO_PSIA2_TXDAT 0x0063
+#define LOCHNAGAR2_GPIO_GF_AIF3_BCLK 0x0064
+#define LOCHNAGAR2_GPIO_GF_AIF3_RXDAT 0x0065
+#define LOCHNAGAR2_GPIO_GF_AIF3_LRCLK 0x0066
+#define LOCHNAGAR2_GPIO_GF_AIF3_TXDAT 0x0067
+#define LOCHNAGAR2_GPIO_GF_AIF4_BCLK 0x0068
+#define LOCHNAGAR2_GPIO_GF_AIF4_RXDAT 0x0069
+#define LOCHNAGAR2_GPIO_GF_AIF4_LRCLK 0x006A
+#define LOCHNAGAR2_GPIO_GF_AIF4_TXDAT 0x006B
+#define LOCHNAGAR2_GPIO_GF_AIF1_BCLK 0x006C
+#define LOCHNAGAR2_GPIO_GF_AIF1_RXDAT 0x006D
+#define LOCHNAGAR2_GPIO_GF_AIF1_LRCLK 0x006E
+#define LOCHNAGAR2_GPIO_GF_AIF1_TXDAT 0x006F
+#define LOCHNAGAR2_GPIO_GF_AIF2_BCLK 0x0070
+#define LOCHNAGAR2_GPIO_GF_AIF2_RXDAT 0x0071
+#define LOCHNAGAR2_GPIO_GF_AIF2_LRCLK 0x0072
+#define LOCHNAGAR2_GPIO_GF_AIF2_TXDAT 0x0073
+#define LOCHNAGAR2_GPIO_DSP_UART1_RX 0x0074
+#define LOCHNAGAR2_GPIO_DSP_UART1_TX 0x0075
+#define LOCHNAGAR2_GPIO_DSP_UART2_RX 0x0076
+#define LOCHNAGAR2_GPIO_DSP_UART2_TX 0x0077
+#define LOCHNAGAR2_GPIO_GF_UART2_RX 0x0078
+#define LOCHNAGAR2_GPIO_GF_UART2_TX 0x0079
+#define LOCHNAGAR2_GPIO_USB_UART_RX 0x007A
+#define LOCHNAGAR2_GPIO_CDC_PDMCLK1 0x007C
+#define LOCHNAGAR2_GPIO_CDC_PDMDAT1 0x007D
+#define LOCHNAGAR2_GPIO_CDC_PDMCLK2 0x007E
+#define LOCHNAGAR2_GPIO_CDC_PDMDAT2 0x007F
+#define LOCHNAGAR2_GPIO_CDC_DMICCLK1 0x0080
+#define LOCHNAGAR2_GPIO_CDC_DMICDAT1 0x0081
+#define LOCHNAGAR2_GPIO_CDC_DMICCLK2 0x0082
+#define LOCHNAGAR2_GPIO_CDC_DMICDAT2 0x0083
+#define LOCHNAGAR2_GPIO_CDC_DMICCLK3 0x0084
+#define LOCHNAGAR2_GPIO_CDC_DMICDAT3 0x0085
+#define LOCHNAGAR2_GPIO_CDC_DMICCLK4 0x0086
+#define LOCHNAGAR2_GPIO_CDC_DMICDAT4 0x0087
+#define LOCHNAGAR2_GPIO_DSP_DMICCLK1 0x0088
+#define LOCHNAGAR2_GPIO_DSP_DMICDAT1 0x0089
+#define LOCHNAGAR2_GPIO_DSP_DMICCLK2 0x008A
+#define LOCHNAGAR2_GPIO_DSP_DMICDAT2 0x008B
+#define LOCHNAGAR2_GPIO_I2C2_SCL 0x008C
+#define LOCHNAGAR2_GPIO_I2C2_SDA 0x008D
+#define LOCHNAGAR2_GPIO_I2C3_SCL 0x008E
+#define LOCHNAGAR2_GPIO_I2C3_SDA 0x008F
+#define LOCHNAGAR2_GPIO_I2C4_SCL 0x0090
+#define LOCHNAGAR2_GPIO_I2C4_SDA 0x0091
+#define LOCHNAGAR2_GPIO_DSP_STANDBY 0x0092
+#define LOCHNAGAR2_GPIO_CDC_MCLK1 0x0093
+#define LOCHNAGAR2_GPIO_CDC_MCLK2 0x0094
+#define LOCHNAGAR2_GPIO_DSP_CLKIN 0x0095
+#define LOCHNAGAR2_GPIO_PSIA1_MCLK 0x0096
+#define LOCHNAGAR2_GPIO_PSIA2_MCLK 0x0097
+#define LOCHNAGAR2_GPIO_GF_GPIO1 0x0098
+#define LOCHNAGAR2_GPIO_GF_GPIO5 0x0099
+#define LOCHNAGAR2_GPIO_DSP_GPIO20 0x009A
+#define LOCHNAGAR2_GPIO_CHANNEL1 0x00B9
+#define LOCHNAGAR2_GPIO_CHANNEL2 0x00BA
+#define LOCHNAGAR2_GPIO_CHANNEL3 0x00BB
+#define LOCHNAGAR2_GPIO_CHANNEL4 0x00BC
+#define LOCHNAGAR2_GPIO_CHANNEL5 0x00BD
+#define LOCHNAGAR2_GPIO_CHANNEL6 0x00BE
+#define LOCHNAGAR2_GPIO_CHANNEL7 0x00BF
+#define LOCHNAGAR2_GPIO_CHANNEL8 0x00C0
+#define LOCHNAGAR2_GPIO_CHANNEL9 0x00C1
+#define LOCHNAGAR2_GPIO_CHANNEL10 0x00C2
+#define LOCHNAGAR2_GPIO_CHANNEL11 0x00C3
+#define LOCHNAGAR2_GPIO_CHANNEL12 0x00C4
+#define LOCHNAGAR2_GPIO_CHANNEL13 0x00C5
+#define LOCHNAGAR2_GPIO_CHANNEL14 0x00C6
+#define LOCHNAGAR2_GPIO_CHANNEL15 0x00C7
+#define LOCHNAGAR2_GPIO_CHANNEL16 0x00C8
+#define LOCHNAGAR2_MINICARD_RESETS 0x00DF
+#define LOCHNAGAR2_ANALOGUE_PATH_CTRL1 0x00E3
+#define LOCHNAGAR2_ANALOGUE_PATH_CTRL2 0x00E4
+#define LOCHNAGAR2_COMMS_CTRL4 0x00F0
+#define LOCHNAGAR2_SPDIF_CTRL 0x00FE
+#define LOCHNAGAR2_IMON_CTRL1 0x0108
+#define LOCHNAGAR2_IMON_CTRL2 0x0109
+#define LOCHNAGAR2_IMON_CTRL3 0x010A
+#define LOCHNAGAR2_IMON_CTRL4 0x010B
+#define LOCHNAGAR2_IMON_DATA1 0x010C
+#define LOCHNAGAR2_IMON_DATA2 0x010D
+#define LOCHNAGAR2_POWER_CTRL 0x0116
+#define LOCHNAGAR2_MICVDD_CTRL1 0x0119
+#define LOCHNAGAR2_MICVDD_CTRL2 0x011B
+#define LOCHNAGAR2_VDDCORE_CDC_CTRL1 0x011E
+#define LOCHNAGAR2_VDDCORE_CDC_CTRL2 0x0120
+#define LOCHNAGAR2_SOUNDCARD_AIF_CTRL 0x0180
+
+/* (0x000D-0x001B, 0x0180) CDC_AIF1_CTRL - SOUNCARD_AIF_CTRL */
+#define LOCHNAGAR2_AIF_ENA_MASK 0x8000
+#define LOCHNAGAR2_AIF_ENA_SHIFT 15
+#define LOCHNAGAR2_AIF_LRCLK_DIR_MASK 0x4000
+#define LOCHNAGAR2_AIF_LRCLK_DIR_SHIFT 14
+#define LOCHNAGAR2_AIF_BCLK_DIR_MASK 0x2000
+#define LOCHNAGAR2_AIF_BCLK_DIR_SHIFT 13
+#define LOCHNAGAR2_AIF_SRC_MASK 0x00FF
+#define LOCHNAGAR2_AIF_SRC_SHIFT 0
+
+/* (0x001E - 0x0027) CDC_MCLK1_CTRL - SOUNDCARD_MCLK_CTRL */
+#define LOCHNAGAR2_CLK_ENA_MASK 0x8000
+#define LOCHNAGAR2_CLK_ENA_SHIFT 15
+#define LOCHNAGAR2_CLK_SRC_MASK 0x00FF
+#define LOCHNAGAR2_CLK_SRC_SHIFT 0
+
+/* (0x0031 - 0x009A) GPIO_FPGA_GPIO1 - GPIO_DSP_GPIO20 */
+#define LOCHNAGAR2_GPIO_SRC_MASK 0x00FF
+#define LOCHNAGAR2_GPIO_SRC_SHIFT 0
+
+/* (0x00B9 - 0x00C8) GPIO_CHANNEL1 - GPIO_CHANNEL16 */
+#define LOCHNAGAR2_GPIO_CHANNEL_STS_MASK 0x8000
+#define LOCHNAGAR2_GPIO_CHANNEL_STS_SHIFT 15
+#define LOCHNAGAR2_GPIO_CHANNEL_SRC_MASK 0x00FF
+#define LOCHNAGAR2_GPIO_CHANNEL_SRC_SHIFT 0
+
+/* (0x00DF) MINICARD_RESETS */
+#define LOCHNAGAR2_DSP_RESET_MASK 0x0002
+#define LOCHNAGAR2_DSP_RESET_SHIFT 1
+#define LOCHNAGAR2_CDC_RESET_MASK 0x0001
+#define LOCHNAGAR2_CDC_RESET_SHIFT 0
+
+/* (0x00E3) ANALOGUE_PATH_CTRL1 */
+#define LOCHNAGAR2_ANALOGUE_PATH_UPDATE_MASK 0x8000
+#define LOCHNAGAR2_ANALOGUE_PATH_UPDATE_SHIFT 15
+#define LOCHNAGAR2_ANALOGUE_PATH_UPDATE_STS_MASK 0x4000
+#define LOCHNAGAR2_ANALOGUE_PATH_UPDATE_STS_SHIFT 14
+
+/* (0x00E4) ANALOGUE_PATH_CTRL2 */
+#define LOCHNAGAR2_P2_INPUT_BIAS_ENA_MASK 0x0080
+#define LOCHNAGAR2_P2_INPUT_BIAS_ENA_SHIFT 7
+#define LOCHNAGAR2_P1_INPUT_BIAS_ENA_MASK 0x0040
+#define LOCHNAGAR2_P1_INPUT_BIAS_ENA_SHIFT 6
+#define LOCHNAGAR2_P2_MICBIAS_SRC_MASK 0x0038
+#define LOCHNAGAR2_P2_MICBIAS_SRC_SHIFT 3
+#define LOCHNAGAR2_P1_MICBIAS_SRC_MASK 0x0007
+#define LOCHNAGAR2_P1_MICBIAS_SRC_SHIFT 0
+
+/* (0x00F0) COMMS_CTRL4 */
+#define LOCHNAGAR2_CDC_CIF1MODE_MASK 0x0001
+#define LOCHNAGAR2_CDC_CIF1MODE_SHIFT 0
+
+/* (0x00FE) SPDIF_CTRL */
+#define LOCHNAGAR2_SPDIF_HWMODE_MASK 0x0008
+#define LOCHNAGAR2_SPDIF_HWMODE_SHIFT 3
+#define LOCHNAGAR2_SPDIF_RESET_MASK 0x0001
+#define LOCHNAGAR2_SPDIF_RESET_SHIFT 0
+
+/* (0x0108) IMON_CTRL1 */
+#define LOCHNAGAR2_IMON_ENA_MASK 0x8000
+#define LOCHNAGAR2_IMON_ENA_SHIFT 15
+#define LOCHNAGAR2_IMON_MEASURED_CHANNELS_MASK 0x03FC
+#define LOCHNAGAR2_IMON_MEASURED_CHANNELS_SHIFT 2
+#define LOCHNAGAR2_IMON_MODE_SEL_MASK 0x0003
+#define LOCHNAGAR2_IMON_MODE_SEL_SHIFT 0
+
+/* (0x0109) IMON_CTRL2 */
+#define LOCHNAGAR2_IMON_FSR_MASK 0x03FF
+#define LOCHNAGAR2_IMON_FSR_SHIFT 0
+
+/* (0x010A) IMON_CTRL3 */
+#define LOCHNAGAR2_IMON_DONE_MASK 0x0004
+#define LOCHNAGAR2_IMON_DONE_SHIFT 2
+#define LOCHNAGAR2_IMON_CONFIGURE_MASK 0x0002
+#define LOCHNAGAR2_IMON_CONFIGURE_SHIFT 1
+#define LOCHNAGAR2_IMON_MEASURE_MASK 0x0001
+#define LOCHNAGAR2_IMON_MEASURE_SHIFT 0
+
+/* (0x010B) IMON_CTRL4 */
+#define LOCHNAGAR2_IMON_DATA_REQ_MASK 0x0080
+#define LOCHNAGAR2_IMON_DATA_REQ_SHIFT 7
+#define LOCHNAGAR2_IMON_CH_SEL_MASK 0x0070
+#define LOCHNAGAR2_IMON_CH_SEL_SHIFT 4
+#define LOCHNAGAR2_IMON_DATA_RDY_MASK 0x0008
+#define LOCHNAGAR2_IMON_DATA_RDY_SHIFT 3
+#define LOCHNAGAR2_IMON_CH_SRC_MASK 0x0007
+#define LOCHNAGAR2_IMON_CH_SRC_SHIFT 0
+
+/* (0x010C, 0x010D) IMON_DATA1, IMON_DATA2 */
+#define LOCHNAGAR2_IMON_DATA_MASK 0xFFFF
+#define LOCHNAGAR2_IMON_DATA_SHIFT 0
+
+/* (0x0116) POWER_CTRL */
+#define LOCHNAGAR2_PWR_ENA_MASK 0x0001
+#define LOCHNAGAR2_PWR_ENA_SHIFT 0
+
+/* (0x0119) MICVDD_CTRL1 */
+#define LOCHNAGAR2_MICVDD_REG_ENA_MASK 0x8000
+#define LOCHNAGAR2_MICVDD_REG_ENA_SHIFT 15
+
+/* (0x011B) MICVDD_CTRL2 */
+#define LOCHNAGAR2_MICVDD_VSEL_MASK 0x001F
+#define LOCHNAGAR2_MICVDD_VSEL_SHIFT 0
+
+/* (0x011E) VDDCORE_CDC_CTRL1 */
+#define LOCHNAGAR2_VDDCORE_CDC_REG_ENA_MASK 0x8000
+#define LOCHNAGAR2_VDDCORE_CDC_REG_ENA_SHIFT 15
+
+/* (0x0120) VDDCORE_CDC_CTRL2 */
+#define LOCHNAGAR2_VDDCORE_CDC_VSEL_MASK 0x007F
+#define LOCHNAGAR2_VDDCORE_CDC_VSEL_SHIFT 0
+
+#endif
diff --git a/include/linux/mfd/loongson-se.h b/include/linux/mfd/loongson-se.h
new file mode 100644
index 000000000000..07afa0c2524d
--- /dev/null
+++ b/include/linux/mfd/loongson-se.h
@@ -0,0 +1,53 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/* Copyright (C) 2025 Loongson Technology Corporation Limited */
+
+#ifndef __MFD_LOONGSON_SE_H__
+#define __MFD_LOONGSON_SE_H__
+
+#define LOONGSON_ENGINE_CMD_TIMEOUT_US 10000
+#define SE_SEND_CMD_REG 0x0
+#define SE_SEND_CMD_REG_LEN 0x8
+/* Controller command ID */
+#define SE_CMD_START 0x0
+#define SE_CMD_SET_DMA 0x3
+#define SE_CMD_SET_ENGINE_CMDBUF 0x4
+
+#define SE_S2LINT_STAT 0x88
+#define SE_S2LINT_EN 0x8c
+#define SE_S2LINT_CL 0x94
+#define SE_L2SINT_STAT 0x98
+#define SE_L2SINT_SET 0xa0
+
+#define SE_INT_ALL 0xffffffff
+#define SE_INT_CONTROLLER BIT(0)
+
+#define SE_ENGINE_MAX 16
+#define SE_ENGINE_RNG 1
+#define SE_CMD_RNG 0x100
+
+#define SE_ENGINE_TPM 5
+#define SE_CMD_TPM 0x500
+
+#define SE_ENGINE_CMD_SIZE 32
+
+struct loongson_se_engine {
+ struct loongson_se *se;
+ int id;
+
+ /* Command buffer */
+ void *command;
+ void *command_ret;
+
+ void *data_buffer;
+ uint buffer_size;
+ /* Data buffer offset to DMA base */
+ uint buffer_off;
+
+ struct completion completion;
+
+};
+
+struct loongson_se_engine *loongson_se_init_engine(struct device *dev, int id);
+int loongson_se_send_engine_cmd(struct loongson_se_engine *engine);
+
+#endif
diff --git a/include/linux/mfd/lp3943.h b/include/linux/mfd/lp3943.h
index 3490db782988..402f01078fcc 100644
--- a/include/linux/mfd/lp3943.h
+++ b/include/linux/mfd/lp3943.h
@@ -1,21 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* TI/National Semiconductor LP3943 Device
*
* Copyright 2013 Texas Instruments
*
* Author: Milo Kim <milo.kim@ti.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
*/
#ifndef __MFD_LP3943_H__
#define __MFD_LP3943_H__
#include <linux/gpio.h>
-#include <linux/pwm.h>
#include <linux/regmap.h>
/* Registers */
diff --git a/include/linux/mfd/lp873x.h b/include/linux/mfd/lp873x.h
index edbec8350a49..fe8174cc8637 100644
--- a/include/linux/mfd/lp873x.h
+++ b/include/linux/mfd/lp873x.h
@@ -1,16 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Functions to access LP873X power management chip.
*
- * Copyright (C) 2016 Texas Instruments Incorporated - http://www.ti.com/
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation version 2.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether express or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
+ * Copyright (C) 2016 Texas Instruments Incorporated - https://www.ti.com/
*/
#ifndef __LINUX_MFD_LP873X_H
diff --git a/include/linux/mfd/lp87565.h b/include/linux/mfd/lp87565.h
index d0c91ba65525..4c895072d91b 100644
--- a/include/linux/mfd/lp87565.h
+++ b/include/linux/mfd/lp87565.h
@@ -1,11 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Functions to access LP87565 power management chip.
*
- * Copyright (C) 2017 Texas Instruments Incorporated - http://www.ti.com/
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation version 2.
+ * Copyright (C) 2017 Texas Instruments Incorporated - https://www.ti.com/
*/
#ifndef __LINUX_MFD_LP87565_H
@@ -17,6 +14,8 @@
enum lp87565_device_type {
LP87565_DEVICE_TYPE_UNKNOWN = 0,
+ LP87565_DEVICE_TYPE_LP87524_Q1,
+ LP87565_DEVICE_TYPE_LP87561_Q1,
LP87565_DEVICE_TYPE_LP87565_Q1,
};
@@ -223,33 +222,20 @@ enum lp87565_device_type {
#define LP87565_GPIO2_SEL BIT(1)
#define LP87565_GPIO1_SEL BIT(0)
-#define LP87565_GOIO3_OD BIT(6)
-#define LP87565_GOIO2_OD BIT(5)
-#define LP87565_GOIO1_OD BIT(4)
-#define LP87565_GOIO3_DIR BIT(2)
-#define LP87565_GOIO2_DIR BIT(1)
-#define LP87565_GOIO1_DIR BIT(0)
-
-#define LP87565_GOIO3_IN BIT(2)
-#define LP87565_GOIO2_IN BIT(1)
-#define LP87565_GOIO1_IN BIT(0)
-
-#define LP87565_GOIO3_OUT BIT(2)
-#define LP87565_GOIO2_OUT BIT(1)
-#define LP87565_GOIO1_OUT BIT(0)
-
-/* Number of step-down converters available */
-#define LP87565_NUM_BUCK 6
-
-enum LP87565_regulator_id {
- /* BUCK's */
- LP87565_BUCK_0,
- LP87565_BUCK_1,
- LP87565_BUCK_2,
- LP87565_BUCK_3,
- LP87565_BUCK_10,
- LP87565_BUCK_23,
-};
+#define LP87565_GPIO3_OD BIT(6)
+#define LP87565_GPIO2_OD BIT(5)
+#define LP87565_GPIO1_OD BIT(4)
+#define LP87565_GPIO3_DIR BIT(2)
+#define LP87565_GPIO2_DIR BIT(1)
+#define LP87565_GPIO1_DIR BIT(0)
+
+#define LP87565_GPIO3_IN BIT(2)
+#define LP87565_GPIO2_IN BIT(1)
+#define LP87565_GPIO1_IN BIT(0)
+
+#define LP87565_GPIO3_OUT BIT(2)
+#define LP87565_GPIO2_OUT BIT(1)
+#define LP87565_GPIO1_OUT BIT(0)
/**
* struct LP87565 - state holder for the LP87565 driver
@@ -266,5 +252,6 @@ struct lp87565 {
u8 rev;
u8 dev_type;
struct regmap *regmap;
+ struct gpio_desc *reset_gpio;
};
#endif /* __LINUX_MFD_LP87565_H */
diff --git a/include/linux/mfd/lp8788-isink.h b/include/linux/mfd/lp8788-isink.h
index f38262d21ff1..464dc4c937e4 100644
--- a/include/linux/mfd/lp8788-isink.h
+++ b/include/linux/mfd/lp8788-isink.h
@@ -1,14 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* TI LP8788 MFD - common definitions for current sinks
*
* Copyright 2012 Texas Instruments
*
* Author: Milo(Woogyom) Kim <milo.kim@ti.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
*/
#ifndef __ISINK_LP8788_H__
diff --git a/include/linux/mfd/lp8788.h b/include/linux/mfd/lp8788.h
index 786bf6679a28..fd17bec2a33e 100644
--- a/include/linux/mfd/lp8788.h
+++ b/include/linux/mfd/lp8788.h
@@ -1,22 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* TI LP8788 MFD Device
*
* Copyright 2012 Texas Instruments
*
* Author: Milo(Woogyom) Kim <milo.kim@ti.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
*/
#ifndef __MFD_LP8788_H__
#define __MFD_LP8788_H__
-#include <linux/gpio.h>
#include <linux/irqdomain.h>
-#include <linux/pwm.h>
#include <linux/regmap.h>
#define LP8788_DEV_BUCK "lp8788-buck"
@@ -92,12 +86,6 @@ enum lp8788_charger_event {
CHARGER_DETECTED,
};
-enum lp8788_bl_ctrl_mode {
- LP8788_BL_REGISTER_ONLY,
- LP8788_BL_COMB_PWM_BASED, /* PWM + I2C, changed by PWM input */
- LP8788_BL_COMB_REGISTER_BASED, /* PWM + I2C, changed by I2C */
-};
-
enum lp8788_bl_dim_mode {
LP8788_DIM_EXPONENTIAL,
LP8788_DIM_LINEAR,
@@ -163,39 +151,21 @@ struct lp8788;
/*
* lp8788_buck1_dvs
- * @gpio : gpio pin number for dvs control
* @vsel : dvs selector for buck v1 register
*/
struct lp8788_buck1_dvs {
- int gpio;
enum lp8788_dvs_sel vsel;
};
/*
* lp8788_buck2_dvs
- * @gpio : two gpio pin numbers are used for dvs
* @vsel : dvs selector for buck v2 register
*/
struct lp8788_buck2_dvs {
- int gpio[LP8788_NUM_BUCK2_DVS];
enum lp8788_dvs_sel vsel;
};
/*
- * struct lp8788_ldo_enable_pin
- *
- * Basically, all LDOs are enabled through the I2C commands.
- * But ALDO 1 ~ 5, 7, DLDO 7, 9, 11 can be enabled by external gpio pins.
- *
- * @gpio : gpio number which is used for enabling ldos
- * @init_state : initial gpio state (ex. GPIOF_OUT_INIT_LOW)
- */
-struct lp8788_ldo_enable_pin {
- int gpio;
- int init_state;
-};
-
-/*
* struct lp8788_chg_param
* @addr : charging control register address (range : 0x11 ~ 0x1C)
* @val : charging parameter value
@@ -225,31 +195,6 @@ struct lp8788_charger_platform_data {
};
/*
- * struct lp8788_backlight_platform_data
- * @name : backlight driver name. (default: "lcd-backlight")
- * @initial_brightness : initial value of backlight brightness
- * @bl_mode : brightness control by pwm or lp8788 register
- * @dim_mode : dimming mode selection
- * @full_scale : full scale current setting
- * @rise_time : brightness ramp up step time
- * @fall_time : brightness ramp down step time
- * @pwm_pol : pwm polarity setting when bl_mode is pwm based
- * @period_ns : platform specific pwm period value. unit is nano.
- Only valid when bl_mode is LP8788_BL_COMB_PWM_BASED
- */
-struct lp8788_backlight_platform_data {
- char *name;
- int initial_brightness;
- enum lp8788_bl_ctrl_mode bl_mode;
- enum lp8788_bl_dim_mode dim_mode;
- enum lp8788_bl_full_scale_current full_scale;
- enum lp8788_bl_ramp_step rise_time;
- enum lp8788_bl_ramp_step fall_time;
- enum pwm_polarity pwm_pol;
- unsigned int period_ns;
-};
-
-/*
* struct lp8788_led_platform_data
* @name : led driver name. (default: "keyboard-backlight")
* @scale : current scale
@@ -286,12 +231,10 @@ struct lp8788_vib_platform_data {
* @buck_data : regulator initial data for buck
* @dldo_data : regulator initial data for digital ldo
* @aldo_data : regulator initial data for analog ldo
- * @buck1_dvs : gpio configurations for buck1 dvs
- * @buck2_dvs : gpio configurations for buck2 dvs
- * @ldo_pin : gpio configurations for enabling LDOs
+ * @buck1_dvs : configurations for buck1 dvs
+ * @buck2_dvs : configurations for buck2 dvs
* @chg_pdata : platform data for charger driver
* @alarm_sel : rtc alarm selection (1 or 2)
- * @bl_pdata : configurable data for backlight driver
* @led_pdata : configurable data for led driver
* @vib_pdata : configurable data for vibrator driver
* @adc_pdata : iio map data for adc driver
@@ -306,7 +249,6 @@ struct lp8788_platform_data {
struct regulator_init_data *aldo_data[LP8788_NUM_ALDOS];
struct lp8788_buck1_dvs *buck1_dvs;
struct lp8788_buck2_dvs *buck2_dvs;
- struct lp8788_ldo_enable_pin *ldo_pin[EN_LDOS_MAX];
/* charger */
struct lp8788_charger_platform_data *chg_pdata;
@@ -314,9 +256,6 @@ struct lp8788_platform_data {
/* rtc alarm */
enum lp8788_alarm_sel alarm_sel;
- /* backlight */
- struct lp8788_backlight_platform_data *bl_pdata;
-
/* current sinks */
struct lp8788_led_platform_data *led_pdata;
struct lp8788_vib_platform_data *vib_pdata;
diff --git a/include/linux/mfd/lpc_ich.h b/include/linux/mfd/lpc_ich.h
index fba8fcb54f8c..1fbda1f8967d 100644
--- a/include/linux/mfd/lpc_ich.h
+++ b/include/linux/mfd/lpc_ich.h
@@ -1,33 +1,21 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* linux/drivers/mfd/lpc_ich.h
*
* Copyright (c) 2012 Extreme Engineering Solution, Inc.
* Author: Aaron Sierra <asierra@xes-inc.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License 2 as published
- * by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; see the file COPYING. If not, write to
- * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#ifndef LPC_ICH_H
#define LPC_ICH_H
-#include <linux/platform_data/intel-spi.h>
+#include <linux/platform_data/x86/spi-intel.h>
/* GPIO resources */
#define ICH_RES_GPIO 0
#define ICH_RES_GPE0 1
/* GPIO compatibility */
-enum {
+enum lpc_gpio_versions {
ICH_I3100_GPIO,
ICH_V5_GPIO,
ICH_V6_GPIO,
@@ -38,11 +26,14 @@ enum {
AVOTON_GPIO,
};
+struct lpc_ich_gpio_info;
+
struct lpc_ich_info {
char name[32];
unsigned int iTCO_version;
- unsigned int gpio_version;
+ enum lpc_gpio_versions gpio_version;
enum intel_spi_type spi_type;
+ const struct lpc_ich_gpio_info *gpio_info;
u8 use_gpio;
};
diff --git a/include/linux/mfd/macsmc.h b/include/linux/mfd/macsmc.h
new file mode 100644
index 000000000000..cc09ecce0df7
--- /dev/null
+++ b/include/linux/mfd/macsmc.h
@@ -0,0 +1,280 @@
+/* SPDX-License-Identifier: GPL-2.0-only OR MIT */
+/*
+ * Apple SMC (System Management Controller) core definitions
+ *
+ * Copyright (C) The Asahi Linux Contributors
+ */
+
+#ifndef _LINUX_MFD_MACSMC_H
+#define _LINUX_MFD_MACSMC_H
+
+#include <linux/soc/apple/rtkit.h>
+
+/**
+ * typedef smc_key - Alias for u32 to be used for SMC keys
+ *
+ * SMC keys are 32bit integers containing packed ASCII characters in natural
+ * integer order, i.e. 0xAABBCCDD, which represent the FourCC ABCD.
+ * The SMC driver is designed with this assumption and ensures the right
+ * endianness is used when these are stored to memory and sent to or received
+ * from the actual SMC firmware (which can be done in either shared memory or
+ * as 64bit mailbox message on Apple Silicon).
+ * Internally, SMC stores these keys in a table sorted lexicographically and
+ * allows resolving an index into this table to the corresponding SMC key.
+ * Thus, storing keys as u32 is very convenient as it allows to e.g. use
+ * normal comparison operators which directly map to the natural order used
+ * by SMC firmware.
+ *
+ * This simple type alias is introduced to allow easy recognition of SMC key
+ * variables and arguments.
+ */
+typedef u32 smc_key;
+
+/**
+ * SMC_KEY - Convert FourCC SMC keys in source code to smc_key
+ *
+ * This macro can be used to easily define FourCC SMC keys in source code
+ * and convert these to u32 / smc_key, e.g. SMC_KEY(NTAP) will expand to
+ * 0x4e544150.
+ *
+ * @s: FourCC SMC key to be converted
+ */
+#define SMC_KEY(s) (smc_key)(_SMC_KEY(#s))
+#define _SMC_KEY(s) (((s)[0] << 24) | ((s)[1] << 16) | ((s)[2] << 8) | (s)[3])
+#define __SMC_KEY(a, b, c, d) (((u32)(a) << 24) | ((u32)(b) << 16) | ((u32)(c) << 8) | ((u32)(d)))
+
+#define APPLE_SMC_READABLE BIT(7)
+#define APPLE_SMC_WRITABLE BIT(6)
+#define APPLE_SMC_FUNCTION BIT(4)
+
+/**
+ * struct apple_smc_key_info - Information for a SMC key as returned by SMC
+ * @type_code: FourCC code indicating the type for this key.
+ * Known types:
+ * ch8*: ASCII string
+ * flag: Boolean, 1 or 0
+ * flt: 32-bit single-precision IEEE 754 float
+ * hex: Binary data
+ * ioft: 64bit Unsigned fixed-point intger (48.16)
+ * {si,ui}{8,16,32,64}: Signed/Unsigned 8-/16-/32-/64-bit integer
+ * @size: Size of the buffer associated with this key
+ * @flags: Bitfield encoding flags (APPLE_SMC_{READABLE,WRITABLE,FUNCTION})
+ */
+struct apple_smc_key_info {
+ u32 type_code;
+ u8 size;
+ u8 flags;
+};
+
+/**
+ * enum apple_smc_boot_stage - SMC boot stage
+ * @APPLE_SMC_BOOTING: SMC is booting
+ * @APPLE_SMC_INITIALIZED: SMC is initialized and ready to use
+ * @APPLE_SMC_ERROR_NO_SHMEM: Shared memory could not be initialized during boot
+ * @APPLE_SMC_ERROR_CRASHED: SMC has crashed
+ */
+enum apple_smc_boot_stage {
+ APPLE_SMC_BOOTING,
+ APPLE_SMC_INITIALIZED,
+ APPLE_SMC_ERROR_NO_SHMEM,
+ APPLE_SMC_ERROR_CRASHED
+};
+
+/**
+ * struct apple_smc
+ * @dev: Underlying device struct for the physical backend device
+ * @key_count: Number of available SMC keys
+ * @first_key: First valid SMC key
+ * @last_key: Last valid SMC key
+ * @event_handlers: Notifier call chain for events received from SMC
+ * @rtk: Pointer to Apple RTKit instance
+ * @init_done: Completion for initialization
+ * @boot_stage: Current boot stage of SMC
+ * @sram: Pointer to SRAM resource
+ * @sram_base: SRAM base address
+ * @shmem: RTKit shared memory structure for SRAM
+ * @msg_id: Current message id for commands, will be incremented for each command
+ * @atomic_mode: Flag set when atomic mode is entered
+ * @atomic_pending: Flag indicating pending atomic command
+ * @cmd_done: Completion for command execution in non-atomic mode
+ * @cmd_ret: Return value from SMC for last command
+ * @mutex: Mutex for non-atomic mode
+ * @lock: Spinlock for atomic mode
+ */
+struct apple_smc {
+ struct device *dev;
+
+ u32 key_count;
+ smc_key first_key;
+ smc_key last_key;
+
+ struct blocking_notifier_head event_handlers;
+
+ struct apple_rtkit *rtk;
+
+ struct completion init_done;
+ enum apple_smc_boot_stage boot_stage;
+
+ struct resource *sram;
+ void __iomem *sram_base;
+ struct apple_rtkit_shmem shmem;
+
+ unsigned int msg_id;
+
+ bool atomic_mode;
+ bool atomic_pending;
+ struct completion cmd_done;
+ u64 cmd_ret;
+
+ struct mutex mutex;
+ spinlock_t lock;
+};
+
+/**
+ * apple_smc_read - Read size bytes from given SMC key into buf
+ * @smc: Pointer to apple_smc struct
+ * @key: smc_key to be read
+ * @buf: Buffer into which size bytes of data will be read from SMC
+ * @size: Number of bytes to be read into buf
+ *
+ * Return: Zero on success, negative errno on error
+ */
+int apple_smc_read(struct apple_smc *smc, smc_key key, void *buf, size_t size);
+
+/**
+ * apple_smc_write - Write size bytes into given SMC key from buf
+ * @smc: Pointer to apple_smc struct
+ * @key: smc_key data will be written to
+ * @buf: Buffer from which size bytes of data will be written to SMC
+ * @size: Number of bytes to be written
+ *
+ * Return: Zero on success, negative errno on error
+ */
+int apple_smc_write(struct apple_smc *smc, smc_key key, const void *buf, size_t size);
+
+/**
+ * apple_smc_enter_atomic - Enter atomic mode to be able to use apple_smc_write_atomic
+ * @smc: Pointer to apple_smc struct
+ *
+ * This function switches the SMC backend to atomic mode which allows the
+ * use of apple_smc_write_atomic while disabling *all* other functions.
+ * This is only used for shutdown/reboot which requires writing to a SMC
+ * key from atomic context.
+ *
+ * Return: Zero on success, negative errno on error
+ */
+int apple_smc_enter_atomic(struct apple_smc *smc);
+
+/**
+ * apple_smc_write_atomic - Write size bytes into given SMC key from buf without sleeping
+ * @smc: Pointer to apple_smc struct
+ * @key: smc_key data will be written to
+ * @buf: Buffer from which size bytes of data will be written to SMC
+ * @size: Number of bytes to be written
+ *
+ * Note that this function will fail if apple_smc_enter_atomic hasn't been
+ * called before.
+ *
+ * Return: Zero on success, negative errno on error
+ */
+int apple_smc_write_atomic(struct apple_smc *smc, smc_key key, const void *buf, size_t size);
+
+/**
+ * apple_smc_rw - Write and then read using the given SMC key
+ * @smc: Pointer to apple_smc struct
+ * @key: smc_key data will be written to
+ * @wbuf: Buffer from which size bytes of data will be written to SMC
+ * @wsize: Number of bytes to be written
+ * @rbuf: Buffer to which size bytes of data will be read from SMC
+ * @rsize: Number of bytes to be read
+ *
+ * Return: Zero on success, negative errno on error
+ */
+int apple_smc_rw(struct apple_smc *smc, smc_key key, const void *wbuf, size_t wsize,
+ void *rbuf, size_t rsize);
+
+/**
+ * apple_smc_get_key_by_index - Given an index return the corresponding SMC key
+ * @smc: Pointer to apple_smc struct
+ * @index: Index to be resolved
+ * @key: Buffer for SMC key to be returned
+ *
+ * Return: Zero on success, negative errno on error
+ */
+int apple_smc_get_key_by_index(struct apple_smc *smc, int index, smc_key *key);
+
+/**
+ * apple_smc_get_key_info - Get key information from SMC
+ * @smc: Pointer to apple_smc struct
+ * @key: Key to acquire information for
+ * @info: Pointer to struct apple_smc_key_info which will be filled
+ *
+ * Return: Zero on success, negative errno on error
+ */
+int apple_smc_get_key_info(struct apple_smc *smc, smc_key key, struct apple_smc_key_info *info);
+
+/**
+ * apple_smc_key_exists - Check if the given SMC key exists
+ * @smc: Pointer to apple_smc struct
+ * @key: smc_key to be checked
+ *
+ * Return: True if the key exists, false otherwise
+ */
+static inline bool apple_smc_key_exists(struct apple_smc *smc, smc_key key)
+{
+ return apple_smc_get_key_info(smc, key, NULL) >= 0;
+}
+
+#define APPLE_SMC_TYPE_OPS(type) \
+ static inline int apple_smc_read_##type(struct apple_smc *smc, smc_key key, type *p) \
+ { \
+ int ret = apple_smc_read(smc, key, p, sizeof(*p)); \
+ return (ret < 0) ? ret : ((ret != sizeof(*p)) ? -EINVAL : 0); \
+ } \
+ static inline int apple_smc_write_##type(struct apple_smc *smc, smc_key key, type p) \
+ { \
+ return apple_smc_write(smc, key, &p, sizeof(p)); \
+ } \
+ static inline int apple_smc_write_##type##_atomic(struct apple_smc *smc, smc_key key, type p) \
+ { \
+ return apple_smc_write_atomic(smc, key, &p, sizeof(p)); \
+ } \
+ static inline int apple_smc_rw_##type(struct apple_smc *smc, smc_key key, \
+ type w, type *r) \
+ { \
+ int ret = apple_smc_rw(smc, key, &w, sizeof(w), r, sizeof(*r)); \
+ return (ret < 0) ? ret : ((ret != sizeof(*r)) ? -EINVAL : 0); \
+ }
+
+APPLE_SMC_TYPE_OPS(u64)
+APPLE_SMC_TYPE_OPS(u32)
+APPLE_SMC_TYPE_OPS(u16)
+APPLE_SMC_TYPE_OPS(u8)
+APPLE_SMC_TYPE_OPS(s64)
+APPLE_SMC_TYPE_OPS(s32)
+APPLE_SMC_TYPE_OPS(s16)
+APPLE_SMC_TYPE_OPS(s8)
+
+static inline int apple_smc_read_flag(struct apple_smc *smc, smc_key key, bool *flag)
+{
+ u8 val;
+ int ret = apple_smc_read_u8(smc, key, &val);
+
+ if (ret < 0)
+ return ret;
+
+ *flag = val ? true : false;
+ return ret;
+}
+
+static inline int apple_smc_write_flag(struct apple_smc *smc, smc_key key, bool state)
+{
+ return apple_smc_write_u8(smc, key, state ? 1 : 0);
+}
+
+static inline int apple_smc_write_flag_atomic(struct apple_smc *smc, smc_key key, bool state)
+{
+ return apple_smc_write_u8_atomic(smc, key, state ? 1 : 0);
+}
+
+#endif
diff --git a/include/linux/mfd/madera/core.h b/include/linux/mfd/madera/core.h
new file mode 100644
index 000000000000..03a8a788424a
--- /dev/null
+++ b/include/linux/mfd/madera/core.h
@@ -0,0 +1,210 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * MFD internals for Cirrus Logic Madera codecs
+ *
+ * Copyright (C) 2015-2018 Cirrus Logic
+ */
+
+#ifndef MADERA_CORE_H
+#define MADERA_CORE_H
+
+#include <linux/clk.h>
+#include <linux/gpio/consumer.h>
+#include <linux/interrupt.h>
+#include <linux/mfd/madera/pdata.h>
+#include <linux/mutex.h>
+#include <linux/notifier.h>
+#include <linux/regmap.h>
+#include <linux/regulator/consumer.h>
+
+enum madera_type {
+ /* 0 is reserved for indicating failure to identify */
+ CS47L35 = 1,
+ CS47L85 = 2,
+ CS47L90 = 3,
+ CS47L91 = 4,
+ CS47L92 = 5,
+ CS47L93 = 6,
+ WM1840 = 7,
+ CS47L15 = 8,
+ CS42L92 = 9,
+};
+
+enum {
+ MADERA_MCLK1,
+ MADERA_MCLK2,
+ MADERA_MCLK3,
+ MADERA_NUM_MCLK
+};
+
+#define MADERA_MAX_CORE_SUPPLIES 2
+#define MADERA_MAX_GPIOS 40
+
+#define CS47L15_NUM_GPIOS 15
+#define CS47L35_NUM_GPIOS 16
+#define CS47L85_NUM_GPIOS 40
+#define CS47L90_NUM_GPIOS 38
+#define CS47L92_NUM_GPIOS 16
+
+#define MADERA_MAX_MICBIAS 4
+
+#define MADERA_MAX_HP_OUTPUT 3
+
+/* Notifier events */
+#define MADERA_NOTIFY_VOICE_TRIGGER 0x1
+#define MADERA_NOTIFY_HPDET 0x2
+#define MADERA_NOTIFY_MICDET 0x4
+
+/* GPIO Function Definitions */
+#define MADERA_GP_FN_ALTERNATE 0x00
+#define MADERA_GP_FN_GPIO 0x01
+#define MADERA_GP_FN_DSP_GPIO 0x02
+#define MADERA_GP_FN_IRQ1 0x03
+#define MADERA_GP_FN_IRQ2 0x04
+#define MADERA_GP_FN_FLL1_CLOCK 0x10
+#define MADERA_GP_FN_FLL2_CLOCK 0x11
+#define MADERA_GP_FN_FLL3_CLOCK 0x12
+#define MADERA_GP_FN_FLLAO_CLOCK 0x13
+#define MADERA_GP_FN_FLL1_LOCK 0x18
+#define MADERA_GP_FN_FLL2_LOCK 0x19
+#define MADERA_GP_FN_FLL3_LOCK 0x1A
+#define MADERA_GP_FN_FLLAO_LOCK 0x1B
+#define MADERA_GP_FN_OPCLK_OUT 0x40
+#define MADERA_GP_FN_OPCLK_ASYNC_OUT 0x41
+#define MADERA_GP_FN_PWM1 0x48
+#define MADERA_GP_FN_PWM2 0x49
+#define MADERA_GP_FN_SPDIF_OUT 0x4C
+#define MADERA_GP_FN_HEADPHONE_DET 0x50
+#define MADERA_GP_FN_MIC_DET 0x58
+#define MADERA_GP_FN_DRC1_SIGNAL_DETECT 0x80
+#define MADERA_GP_FN_DRC2_SIGNAL_DETECT 0x81
+#define MADERA_GP_FN_ASRC1_IN1_LOCK 0x88
+#define MADERA_GP_FN_ASRC1_IN2_LOCK 0x89
+#define MADERA_GP_FN_ASRC2_IN1_LOCK 0x8A
+#define MADERA_GP_FN_ASRC2_IN2_LOCK 0x8B
+#define MADERA_GP_FN_DSP_IRQ1 0xA0
+#define MADERA_GP_FN_DSP_IRQ2 0xA1
+#define MADERA_GP_FN_DSP_IRQ3 0xA2
+#define MADERA_GP_FN_DSP_IRQ4 0xA3
+#define MADERA_GP_FN_DSP_IRQ5 0xA4
+#define MADERA_GP_FN_DSP_IRQ6 0xA5
+#define MADERA_GP_FN_DSP_IRQ7 0xA6
+#define MADERA_GP_FN_DSP_IRQ8 0xA7
+#define MADERA_GP_FN_DSP_IRQ9 0xA8
+#define MADERA_GP_FN_DSP_IRQ10 0xA9
+#define MADERA_GP_FN_DSP_IRQ11 0xAA
+#define MADERA_GP_FN_DSP_IRQ12 0xAB
+#define MADERA_GP_FN_DSP_IRQ13 0xAC
+#define MADERA_GP_FN_DSP_IRQ14 0xAD
+#define MADERA_GP_FN_DSP_IRQ15 0xAE
+#define MADERA_GP_FN_DSP_IRQ16 0xAF
+#define MADERA_GP_FN_HPOUT1L_SC 0xB0
+#define MADERA_GP_FN_HPOUT1R_SC 0xB1
+#define MADERA_GP_FN_HPOUT2L_SC 0xB2
+#define MADERA_GP_FN_HPOUT2R_SC 0xB3
+#define MADERA_GP_FN_HPOUT3L_SC 0xB4
+#define MADERA_GP_FN_HPOUT4R_SC 0xB5
+#define MADERA_GP_FN_SPKOUTL_SC 0xB6
+#define MADERA_GP_FN_SPKOUTR_SC 0xB7
+#define MADERA_GP_FN_HPOUT1L_ENA 0xC0
+#define MADERA_GP_FN_HPOUT1R_ENA 0xC1
+#define MADERA_GP_FN_HPOUT2L_ENA 0xC2
+#define MADERA_GP_FN_HPOUT2R_ENA 0xC3
+#define MADERA_GP_FN_HPOUT3L_ENA 0xC4
+#define MADERA_GP_FN_HPOUT4R_ENA 0xC5
+#define MADERA_GP_FN_SPKOUTL_ENA 0xC6
+#define MADERA_GP_FN_SPKOUTR_ENA 0xC7
+#define MADERA_GP_FN_HPOUT1L_DIS 0xD0
+#define MADERA_GP_FN_HPOUT1R_DIS 0xD1
+#define MADERA_GP_FN_HPOUT2L_DIS 0xD2
+#define MADERA_GP_FN_HPOUT2R_DIS 0xD3
+#define MADERA_GP_FN_HPOUT3L_DIS 0xD4
+#define MADERA_GP_FN_HPOUT4R_DIS 0xD5
+#define MADERA_GP_FN_SPKOUTL_DIS 0xD6
+#define MADERA_GP_FN_SPKOUTR_DIS 0xD7
+#define MADERA_GP_FN_SPK_SHUTDOWN 0xE0
+#define MADERA_GP_FN_SPK_OVH_SHUTDOWN 0xE1
+#define MADERA_GP_FN_SPK_OVH_WARN 0xE2
+#define MADERA_GP_FN_TIMER1_STATUS 0x140
+#define MADERA_GP_FN_TIMER2_STATUS 0x141
+#define MADERA_GP_FN_TIMER3_STATUS 0x142
+#define MADERA_GP_FN_TIMER4_STATUS 0x143
+#define MADERA_GP_FN_TIMER5_STATUS 0x144
+#define MADERA_GP_FN_TIMER6_STATUS 0x145
+#define MADERA_GP_FN_TIMER7_STATUS 0x146
+#define MADERA_GP_FN_TIMER8_STATUS 0x147
+#define MADERA_GP_FN_EVENTLOG1_FIFO_STS 0x150
+#define MADERA_GP_FN_EVENTLOG2_FIFO_STS 0x151
+#define MADERA_GP_FN_EVENTLOG3_FIFO_STS 0x152
+#define MADERA_GP_FN_EVENTLOG4_FIFO_STS 0x153
+#define MADERA_GP_FN_EVENTLOG5_FIFO_STS 0x154
+#define MADERA_GP_FN_EVENTLOG6_FIFO_STS 0x155
+#define MADERA_GP_FN_EVENTLOG7_FIFO_STS 0x156
+#define MADERA_GP_FN_EVENTLOG8_FIFO_STS 0x157
+
+struct snd_soc_dapm_context;
+
+/*
+ * struct madera - internal data shared by the set of Madera drivers
+ *
+ * This should not be used by anything except child drivers of the Madera MFD
+ *
+ * @regmap: pointer to the regmap instance for 16-bit registers
+ * @regmap_32bit: pointer to the regmap instance for 32-bit registers
+ * @dev: pointer to the MFD device
+ * @type: type of codec
+ * @rev: silicon revision
+ * @type_name: display name of this codec
+ * @num_core_supplies: number of core supply regulators
+ * @core_supplies: list of core supplies that are always required
+ * @dcvdd: pointer to DCVDD regulator
+ * @internal_dcvdd: true if DCVDD is supplied from the internal LDO1
+ * @pdata: our pdata
+ * @irq_dev: the irqchip child driver device
+ * @irq_data: pointer to irqchip data for the child irqchip driver
+ * @irq: host irq number from SPI or I2C configuration
+ * @mclk: Structure holding clock supplies
+ * @out_clamp: indicates output clamp state for each analogue output
+ * @out_shorted: indicates short circuit state for each analogue output
+ * @hp_ena: bitflags of enable state for the headphone outputs
+ * @num_micbias: number of MICBIAS outputs
+ * @num_childbias: number of child biases for each MICBIAS
+ * @dapm: pointer to codec driver DAPM context
+ * @notifier: notifier for signalling events to ASoC machine driver
+ */
+struct madera {
+ struct regmap *regmap;
+ struct regmap *regmap_32bit;
+
+ struct device *dev;
+
+ enum madera_type type;
+ unsigned int rev;
+ const char *type_name;
+
+ int num_core_supplies;
+ struct regulator_bulk_data core_supplies[MADERA_MAX_CORE_SUPPLIES];
+ struct regulator *dcvdd;
+ bool internal_dcvdd;
+ bool reset_errata;
+
+ struct madera_pdata pdata;
+
+ struct device *irq_dev;
+ struct regmap_irq_chip_data *irq_data;
+ int irq;
+
+ struct clk_bulk_data mclk[MADERA_NUM_MCLK];
+
+ unsigned int num_micbias;
+ unsigned int num_childbias[MADERA_MAX_MICBIAS];
+
+ struct snd_soc_dapm_context *dapm;
+ struct mutex dapm_ptr_lock;
+ unsigned int hp_ena;
+ bool out_clamp[MADERA_MAX_HP_OUTPUT];
+ bool out_shorted[MADERA_MAX_HP_OUTPUT];
+
+ struct blocking_notifier_head notifier;
+};
+#endif
diff --git a/include/linux/mfd/madera/pdata.h b/include/linux/mfd/madera/pdata.h
new file mode 100644
index 000000000000..7e84738cbb20
--- /dev/null
+++ b/include/linux/mfd/madera/pdata.h
@@ -0,0 +1,59 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Platform data for Cirrus Logic Madera codecs
+ *
+ * Copyright (C) 2015-2018 Cirrus Logic
+ */
+
+#ifndef MADERA_PDATA_H
+#define MADERA_PDATA_H
+
+#include <linux/regulator/arizona-ldo1.h>
+#include <linux/regulator/arizona-micsupp.h>
+#include <linux/regulator/machine.h>
+#include <linux/types.h>
+
+#include <sound/madera-pdata.h>
+
+#define MADERA_MAX_MICBIAS 4
+#define MADERA_MAX_CHILD_MICBIAS 4
+
+#define MADERA_MAX_GPSW 2
+
+struct gpio_desc;
+struct pinctrl_map;
+
+/**
+ * struct madera_pdata - Configuration data for Madera devices
+ *
+ * @reset: GPIO controlling /RESET (NULL = none)
+ * @ldo1: Substruct of pdata for the LDO1 regulator
+ * @micvdd: Substruct of pdata for the MICVDD regulator
+ * @irq_flags: Mode for primary IRQ (defaults to active low)
+ * @gpio_base: Base GPIO number
+ * @gpio_configs: Array of GPIO configurations (See
+ * Documentation/driver-api/pin-control.rst)
+ * @n_gpio_configs: Number of entries in gpio_configs
+ * @gpsw: General purpose switch mode setting. Depends on the external
+ * hardware connected to the switch. (See the SW1_MODE field
+ * in the datasheet for the available values for your codec)
+ * @codec: Substruct of pdata for the ASoC codec driver
+ */
+struct madera_pdata {
+ struct gpio_desc *reset;
+
+ struct arizona_ldo1_pdata ldo1;
+ struct arizona_micsupp_pdata micvdd;
+
+ unsigned int irq_flags;
+ int gpio_base;
+
+ const struct pinctrl_map *gpio_configs;
+ int n_gpio_configs;
+
+ u32 gpsw[MADERA_MAX_GPSW];
+
+ struct madera_codec_pdata codec;
+};
+
+#endif
diff --git a/include/linux/mfd/madera/registers.h b/include/linux/mfd/madera/registers.h
new file mode 100644
index 000000000000..b44aeb461d0c
--- /dev/null
+++ b/include/linux/mfd/madera/registers.h
@@ -0,0 +1,3449 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Madera register definitions
+ *
+ * Copyright (C) 2015-2018 Cirrus Logic
+ */
+
+#ifndef MADERA_REGISTERS_H
+#define MADERA_REGISTERS_H
+
+/*
+ * Register Addresses.
+ */
+#define MADERA_SOFTWARE_RESET 0x00
+#define MADERA_HARDWARE_REVISION 0x01
+#define MADERA_CTRL_IF_CFG_1 0x08
+#define MADERA_CTRL_IF_CFG_2 0x09
+#define MADERA_CTRL_IF_CFG_3 0x0A
+#define MADERA_WRITE_SEQUENCER_CTRL_0 0x16
+#define MADERA_WRITE_SEQUENCER_CTRL_1 0x17
+#define MADERA_WRITE_SEQUENCER_CTRL_2 0x18
+#define MADERA_TONE_GENERATOR_1 0x20
+#define MADERA_TONE_GENERATOR_2 0x21
+#define MADERA_TONE_GENERATOR_3 0x22
+#define MADERA_TONE_GENERATOR_4 0x23
+#define MADERA_TONE_GENERATOR_5 0x24
+#define MADERA_PWM_DRIVE_1 0x30
+#define MADERA_PWM_DRIVE_2 0x31
+#define MADERA_PWM_DRIVE_3 0x32
+#define MADERA_SEQUENCE_CONTROL 0x41
+#define MADERA_SAMPLE_RATE_SEQUENCE_SELECT_1 0x61
+#define MADERA_SAMPLE_RATE_SEQUENCE_SELECT_2 0x62
+#define MADERA_SAMPLE_RATE_SEQUENCE_SELECT_3 0x63
+#define MADERA_SAMPLE_RATE_SEQUENCE_SELECT_4 0x64
+#define MADERA_ALWAYS_ON_TRIGGERS_SEQUENCE_SELECT_1 0x66
+#define MADERA_ALWAYS_ON_TRIGGERS_SEQUENCE_SELECT_2 0x67
+#define MADERA_HAPTICS_CONTROL_1 0x90
+#define MADERA_HAPTICS_CONTROL_2 0x91
+#define MADERA_HAPTICS_PHASE_1_INTENSITY 0x92
+#define MADERA_HAPTICS_PHASE_1_DURATION 0x93
+#define MADERA_HAPTICS_PHASE_2_INTENSITY 0x94
+#define MADERA_HAPTICS_PHASE_2_DURATION 0x95
+#define MADERA_HAPTICS_PHASE_3_INTENSITY 0x96
+#define MADERA_HAPTICS_PHASE_3_DURATION 0x97
+#define MADERA_HAPTICS_STATUS 0x98
+#define MADERA_COMFORT_NOISE_GENERATOR 0xA0
+#define MADERA_CLOCK_32K_1 0x100
+#define MADERA_SYSTEM_CLOCK_1 0x101
+#define MADERA_SAMPLE_RATE_1 0x102
+#define MADERA_SAMPLE_RATE_2 0x103
+#define MADERA_SAMPLE_RATE_3 0x104
+#define MADERA_SAMPLE_RATE_1_STATUS 0x10A
+#define MADERA_SAMPLE_RATE_2_STATUS 0x10B
+#define MADERA_SAMPLE_RATE_3_STATUS 0x10C
+#define MADERA_ASYNC_CLOCK_1 0x112
+#define MADERA_ASYNC_SAMPLE_RATE_1 0x113
+#define MADERA_ASYNC_SAMPLE_RATE_2 0x114
+#define MADERA_ASYNC_SAMPLE_RATE_1_STATUS 0x11B
+#define MADERA_ASYNC_SAMPLE_RATE_2_STATUS 0x11C
+#define MADERA_DSP_CLOCK_1 0x120
+#define MADERA_DSP_CLOCK_2 0x122
+#define MADERA_OUTPUT_SYSTEM_CLOCK 0x149
+#define MADERA_OUTPUT_ASYNC_CLOCK 0x14A
+#define MADERA_RATE_ESTIMATOR_1 0x152
+#define MADERA_RATE_ESTIMATOR_2 0x153
+#define MADERA_RATE_ESTIMATOR_3 0x154
+#define MADERA_RATE_ESTIMATOR_4 0x155
+#define MADERA_RATE_ESTIMATOR_5 0x156
+#define MADERA_FLL1_CONTROL_1 0x171
+#define MADERA_FLL1_CONTROL_2 0x172
+#define MADERA_FLL1_CONTROL_3 0x173
+#define MADERA_FLL1_CONTROL_4 0x174
+#define MADERA_FLL1_CONTROL_5 0x175
+#define MADERA_FLL1_CONTROL_6 0x176
+#define CS47L92_FLL1_CONTROL_7 0x177
+#define CS47L92_FLL1_CONTROL_8 0x178
+#define MADERA_FLL1_CONTROL_7 0x179
+#define CS47L92_FLL1_CONTROL_9 0x179
+#define MADERA_FLL1_EFS_2 0x17A
+#define CS47L92_FLL1_CONTROL_10 0x17A
+#define MADERA_FLL1_CONTROL_11 0x17B
+#define MADERA_FLL1_DIGITAL_TEST_1 0x17D
+#define CS47L35_FLL1_SYNCHRONISER_1 0x17F
+#define CS47L35_FLL1_SYNCHRONISER_2 0x180
+#define CS47L35_FLL1_SYNCHRONISER_3 0x181
+#define CS47L35_FLL1_SYNCHRONISER_4 0x182
+#define CS47L35_FLL1_SYNCHRONISER_5 0x183
+#define CS47L35_FLL1_SYNCHRONISER_6 0x184
+#define CS47L35_FLL1_SYNCHRONISER_7 0x185
+#define CS47L35_FLL1_SPREAD_SPECTRUM 0x187
+#define CS47L35_FLL1_GPIO_CLOCK 0x188
+#define MADERA_FLL1_SYNCHRONISER_1 0x181
+#define MADERA_FLL1_SYNCHRONISER_2 0x182
+#define MADERA_FLL1_SYNCHRONISER_3 0x183
+#define MADERA_FLL1_SYNCHRONISER_4 0x184
+#define MADERA_FLL1_SYNCHRONISER_5 0x185
+#define MADERA_FLL1_SYNCHRONISER_6 0x186
+#define MADERA_FLL1_SYNCHRONISER_7 0x187
+#define MADERA_FLL1_SPREAD_SPECTRUM 0x189
+#define MADERA_FLL1_GPIO_CLOCK 0x18A
+#define CS47L92_FLL1_GPIO_CLOCK 0x18E
+#define MADERA_FLL2_CONTROL_1 0x191
+#define MADERA_FLL2_CONTROL_2 0x192
+#define MADERA_FLL2_CONTROL_3 0x193
+#define MADERA_FLL2_CONTROL_4 0x194
+#define MADERA_FLL2_CONTROL_5 0x195
+#define MADERA_FLL2_CONTROL_6 0x196
+#define CS47L92_FLL2_CONTROL_7 0x197
+#define CS47L92_FLL2_CONTROL_8 0x198
+#define MADERA_FLL2_CONTROL_7 0x199
+#define CS47L92_FLL2_CONTROL_9 0x199
+#define MADERA_FLL2_EFS_2 0x19A
+#define CS47L92_FLL2_CONTROL_10 0x19A
+#define MADERA_FLL2_CONTROL_11 0x19B
+#define MADERA_FLL2_DIGITAL_TEST_1 0x19D
+#define MADERA_FLL2_SYNCHRONISER_1 0x1A1
+#define MADERA_FLL2_SYNCHRONISER_2 0x1A2
+#define MADERA_FLL2_SYNCHRONISER_3 0x1A3
+#define MADERA_FLL2_SYNCHRONISER_4 0x1A4
+#define MADERA_FLL2_SYNCHRONISER_5 0x1A5
+#define MADERA_FLL2_SYNCHRONISER_6 0x1A6
+#define MADERA_FLL2_SYNCHRONISER_7 0x1A7
+#define MADERA_FLL2_SPREAD_SPECTRUM 0x1A9
+#define MADERA_FLL2_GPIO_CLOCK 0x1AA
+#define CS47L92_FLL2_GPIO_CLOCK 0x1AE
+#define MADERA_FLL3_CONTROL_1 0x1B1
+#define MADERA_FLL3_CONTROL_2 0x1B2
+#define MADERA_FLL3_CONTROL_3 0x1B3
+#define MADERA_FLL3_CONTROL_4 0x1B4
+#define MADERA_FLL3_CONTROL_5 0x1B5
+#define MADERA_FLL3_CONTROL_6 0x1B6
+#define MADERA_FLL3_CONTROL_7 0x1B9
+#define MADERA_FLL3_SYNCHRONISER_1 0x1C1
+#define MADERA_FLL3_SYNCHRONISER_2 0x1C2
+#define MADERA_FLL3_SYNCHRONISER_3 0x1C3
+#define MADERA_FLL3_SYNCHRONISER_4 0x1C4
+#define MADERA_FLL3_SYNCHRONISER_5 0x1C5
+#define MADERA_FLL3_SYNCHRONISER_6 0x1C6
+#define MADERA_FLL3_SYNCHRONISER_7 0x1C7
+#define MADERA_FLL3_SPREAD_SPECTRUM 0x1C9
+#define MADERA_FLL3_GPIO_CLOCK 0x1CA
+#define MADERA_FLLAO_CONTROL_1 0x1D1
+#define MADERA_FLLAO_CONTROL_2 0x1D2
+#define MADERA_FLLAO_CONTROL_3 0x1D3
+#define MADERA_FLLAO_CONTROL_4 0x1D4
+#define MADERA_FLLAO_CONTROL_5 0x1D5
+#define MADERA_FLLAO_CONTROL_6 0x1D6
+#define MADERA_FLLAO_CONTROL_7 0x1D8
+#define MADERA_FLLAO_CONTROL_8 0x1DA
+#define MADERA_FLLAO_CONTROL_9 0x1DB
+#define MADERA_FLLAO_CONTROL_10 0x1DC
+#define MADERA_FLLAO_CONTROL_11 0x1DD
+#define MADERA_MIC_CHARGE_PUMP_1 0x200
+#define MADERA_HP_CHARGE_PUMP_8 0x20B
+#define MADERA_LDO1_CONTROL_1 0x210
+#define MADERA_LDO2_CONTROL_1 0x213
+#define MADERA_MIC_BIAS_CTRL_1 0x218
+#define MADERA_MIC_BIAS_CTRL_2 0x219
+#define MADERA_MIC_BIAS_CTRL_3 0x21A
+#define MADERA_MIC_BIAS_CTRL_4 0x21B
+#define MADERA_MIC_BIAS_CTRL_5 0x21C
+#define MADERA_MIC_BIAS_CTRL_6 0x21E
+#define MADERA_HP_CTRL_1L 0x225
+#define MADERA_HP_CTRL_1R 0x226
+#define MADERA_HP_CTRL_2L 0x227
+#define MADERA_HP_CTRL_2R 0x228
+#define MADERA_HP_CTRL_3L 0x229
+#define MADERA_HP_CTRL_3R 0x22A
+#define MADERA_DCS_HP1L_CONTROL 0x232
+#define MADERA_DCS_HP1R_CONTROL 0x238
+#define MADERA_EDRE_HP_STEREO_CONTROL 0x27E
+#define MADERA_ACCESSORY_DETECT_MODE_1 0x293
+#define MADERA_HEADPHONE_DETECT_0 0x299
+#define MADERA_HEADPHONE_DETECT_1 0x29B
+#define MADERA_HEADPHONE_DETECT_2 0x29C
+#define MADERA_HEADPHONE_DETECT_3 0x29D
+#define MADERA_HEADPHONE_DETECT_4 0x29E
+#define MADERA_HEADPHONE_DETECT_5 0x29F
+#define MADERA_MIC_DETECT_1_CONTROL_0 0x2A2
+#define MADERA_MIC_DETECT_1_CONTROL_1 0x2A3
+#define MADERA_MIC_DETECT_1_CONTROL_2 0x2A4
+#define MADERA_MIC_DETECT_1_CONTROL_3 0x2A5
+#define MADERA_MIC_DETECT_1_LEVEL_1 0x2A6
+#define MADERA_MIC_DETECT_1_LEVEL_2 0x2A7
+#define MADERA_MIC_DETECT_1_LEVEL_3 0x2A8
+#define MADERA_MIC_DETECT_1_LEVEL_4 0x2A9
+#define MADERA_MIC_DETECT_1_CONTROL_4 0x2AB
+#define MADERA_MIC_DETECT_2_CONTROL_0 0x2B2
+#define MADERA_MIC_DETECT_2_CONTROL_1 0x2B3
+#define MADERA_MIC_DETECT_2_CONTROL_2 0x2B4
+#define MADERA_MIC_DETECT_2_CONTROL_3 0x2B5
+#define MADERA_MIC_DETECT_2_LEVEL_1 0x2B6
+#define MADERA_MIC_DETECT_2_LEVEL_2 0x2B7
+#define MADERA_MIC_DETECT_2_LEVEL_3 0x2B8
+#define MADERA_MIC_DETECT_2_LEVEL_4 0x2B9
+#define MADERA_MIC_DETECT_2_CONTROL_4 0x2BB
+#define MADERA_MICD_CLAMP_CONTROL 0x2C6
+#define MADERA_GP_SWITCH_1 0x2C8
+#define MADERA_JACK_DETECT_ANALOGUE 0x2D3
+#define MADERA_INPUT_ENABLES 0x300
+#define MADERA_INPUT_ENABLES_STATUS 0x301
+#define MADERA_INPUT_RATE 0x308
+#define MADERA_INPUT_VOLUME_RAMP 0x309
+#define MADERA_HPF_CONTROL 0x30C
+#define MADERA_IN1L_CONTROL 0x310
+#define MADERA_ADC_DIGITAL_VOLUME_1L 0x311
+#define MADERA_DMIC1L_CONTROL 0x312
+#define MADERA_IN1L_RATE_CONTROL 0x313
+#define MADERA_IN1R_CONTROL 0x314
+#define MADERA_ADC_DIGITAL_VOLUME_1R 0x315
+#define MADERA_DMIC1R_CONTROL 0x316
+#define MADERA_IN1R_RATE_CONTROL 0x317
+#define MADERA_IN2L_CONTROL 0x318
+#define MADERA_ADC_DIGITAL_VOLUME_2L 0x319
+#define MADERA_DMIC2L_CONTROL 0x31A
+#define MADERA_IN2L_RATE_CONTROL 0x31B
+#define MADERA_IN2R_CONTROL 0x31C
+#define MADERA_ADC_DIGITAL_VOLUME_2R 0x31D
+#define MADERA_DMIC2R_CONTROL 0x31E
+#define MADERA_IN2R_RATE_CONTROL 0x31F
+#define MADERA_IN3L_CONTROL 0x320
+#define MADERA_ADC_DIGITAL_VOLUME_3L 0x321
+#define MADERA_DMIC3L_CONTROL 0x322
+#define MADERA_IN3L_RATE_CONTROL 0x323
+#define MADERA_IN3R_CONTROL 0x324
+#define MADERA_ADC_DIGITAL_VOLUME_3R 0x325
+#define MADERA_DMIC3R_CONTROL 0x326
+#define MADERA_IN3R_RATE_CONTROL 0x327
+#define MADERA_IN4L_CONTROL 0x328
+#define MADERA_ADC_DIGITAL_VOLUME_4L 0x329
+#define MADERA_DMIC4L_CONTROL 0x32A
+#define MADERA_IN4L_RATE_CONTROL 0x32B
+#define MADERA_IN4R_CONTROL 0x32C
+#define MADERA_ADC_DIGITAL_VOLUME_4R 0x32D
+#define MADERA_DMIC4R_CONTROL 0x32E
+#define MADERA_IN4R_RATE_CONTROL 0x32F
+#define MADERA_IN5L_CONTROL 0x330
+#define MADERA_ADC_DIGITAL_VOLUME_5L 0x331
+#define MADERA_DMIC5L_CONTROL 0x332
+#define MADERA_IN5L_RATE_CONTROL 0x333
+#define MADERA_IN5R_CONTROL 0x334
+#define MADERA_ADC_DIGITAL_VOLUME_5R 0x335
+#define MADERA_DMIC5R_CONTROL 0x336
+#define MADERA_IN5R_RATE_CONTROL 0x337
+#define MADERA_IN6L_CONTROL 0x338
+#define MADERA_ADC_DIGITAL_VOLUME_6L 0x339
+#define MADERA_DMIC6L_CONTROL 0x33A
+#define MADERA_IN6R_CONTROL 0x33C
+#define MADERA_ADC_DIGITAL_VOLUME_6R 0x33D
+#define MADERA_DMIC6R_CONTROL 0x33E
+#define CS47L15_ADC_INT_BIAS 0x3A8
+#define CS47L15_PGA_BIAS_SEL 0x3C4
+#define MADERA_OUTPUT_ENABLES_1 0x400
+#define MADERA_OUTPUT_STATUS_1 0x401
+#define MADERA_RAW_OUTPUT_STATUS_1 0x406
+#define MADERA_OUTPUT_RATE_1 0x408
+#define MADERA_OUTPUT_VOLUME_RAMP 0x409
+#define MADERA_OUTPUT_PATH_CONFIG_1L 0x410
+#define MADERA_DAC_DIGITAL_VOLUME_1L 0x411
+#define MADERA_OUTPUT_PATH_CONFIG_1 0x412
+#define MADERA_NOISE_GATE_SELECT_1L 0x413
+#define MADERA_OUTPUT_PATH_CONFIG_1R 0x414
+#define MADERA_DAC_DIGITAL_VOLUME_1R 0x415
+#define MADERA_NOISE_GATE_SELECT_1R 0x417
+#define MADERA_OUTPUT_PATH_CONFIG_2L 0x418
+#define MADERA_DAC_DIGITAL_VOLUME_2L 0x419
+#define MADERA_OUTPUT_PATH_CONFIG_2 0x41A
+#define MADERA_NOISE_GATE_SELECT_2L 0x41B
+#define MADERA_OUTPUT_PATH_CONFIG_2R 0x41C
+#define MADERA_DAC_DIGITAL_VOLUME_2R 0x41D
+#define MADERA_NOISE_GATE_SELECT_2R 0x41F
+#define MADERA_OUTPUT_PATH_CONFIG_3L 0x420
+#define MADERA_DAC_DIGITAL_VOLUME_3L 0x421
+#define MADERA_OUTPUT_PATH_CONFIG_3 0x422
+#define MADERA_NOISE_GATE_SELECT_3L 0x423
+#define MADERA_OUTPUT_PATH_CONFIG_3R 0x424
+#define MADERA_DAC_DIGITAL_VOLUME_3R 0x425
+#define MADERA_NOISE_GATE_SELECT_3R 0x427
+#define MADERA_OUTPUT_PATH_CONFIG_4L 0x428
+#define MADERA_DAC_DIGITAL_VOLUME_4L 0x429
+#define MADERA_NOISE_GATE_SELECT_4L 0x42B
+#define MADERA_OUTPUT_PATH_CONFIG_4R 0x42C
+#define MADERA_DAC_DIGITAL_VOLUME_4R 0x42D
+#define MADERA_NOISE_GATE_SELECT_4R 0x42F
+#define MADERA_OUTPUT_PATH_CONFIG_5L 0x430
+#define MADERA_DAC_DIGITAL_VOLUME_5L 0x431
+#define MADERA_NOISE_GATE_SELECT_5L 0x433
+#define MADERA_OUTPUT_PATH_CONFIG_5R 0x434
+#define MADERA_DAC_DIGITAL_VOLUME_5R 0x435
+#define MADERA_NOISE_GATE_SELECT_5R 0x437
+#define MADERA_OUTPUT_PATH_CONFIG_6L 0x438
+#define MADERA_DAC_DIGITAL_VOLUME_6L 0x439
+#define MADERA_NOISE_GATE_SELECT_6L 0x43B
+#define MADERA_OUTPUT_PATH_CONFIG_6R 0x43C
+#define MADERA_DAC_DIGITAL_VOLUME_6R 0x43D
+#define MADERA_NOISE_GATE_SELECT_6R 0x43F
+#define MADERA_DAC_AEC_CONTROL_1 0x450
+#define MADERA_DAC_AEC_CONTROL_2 0x451
+#define MADERA_NOISE_GATE_CONTROL 0x458
+#define MADERA_PDM_SPK1_CTRL_1 0x490
+#define MADERA_PDM_SPK1_CTRL_2 0x491
+#define MADERA_PDM_SPK2_CTRL_1 0x492
+#define MADERA_PDM_SPK2_CTRL_2 0x493
+#define MADERA_HP1_SHORT_CIRCUIT_CTRL 0x4A0
+#define MADERA_HP2_SHORT_CIRCUIT_CTRL 0x4A1
+#define MADERA_HP3_SHORT_CIRCUIT_CTRL 0x4A2
+#define MADERA_HP_TEST_CTRL_1 0x4A4
+#define MADERA_HP_TEST_CTRL_5 0x4A8
+#define MADERA_HP_TEST_CTRL_6 0x4A9
+#define MADERA_AIF1_BCLK_CTRL 0x500
+#define MADERA_AIF1_TX_PIN_CTRL 0x501
+#define MADERA_AIF1_RX_PIN_CTRL 0x502
+#define MADERA_AIF1_RATE_CTRL 0x503
+#define MADERA_AIF1_FORMAT 0x504
+#define MADERA_AIF1_RX_BCLK_RATE 0x506
+#define MADERA_AIF1_FRAME_CTRL_1 0x507
+#define MADERA_AIF1_FRAME_CTRL_2 0x508
+#define MADERA_AIF1_FRAME_CTRL_3 0x509
+#define MADERA_AIF1_FRAME_CTRL_4 0x50A
+#define MADERA_AIF1_FRAME_CTRL_5 0x50B
+#define MADERA_AIF1_FRAME_CTRL_6 0x50C
+#define MADERA_AIF1_FRAME_CTRL_7 0x50D
+#define MADERA_AIF1_FRAME_CTRL_8 0x50E
+#define MADERA_AIF1_FRAME_CTRL_9 0x50F
+#define MADERA_AIF1_FRAME_CTRL_10 0x510
+#define MADERA_AIF1_FRAME_CTRL_11 0x511
+#define MADERA_AIF1_FRAME_CTRL_12 0x512
+#define MADERA_AIF1_FRAME_CTRL_13 0x513
+#define MADERA_AIF1_FRAME_CTRL_14 0x514
+#define MADERA_AIF1_FRAME_CTRL_15 0x515
+#define MADERA_AIF1_FRAME_CTRL_16 0x516
+#define MADERA_AIF1_FRAME_CTRL_17 0x517
+#define MADERA_AIF1_FRAME_CTRL_18 0x518
+#define MADERA_AIF1_TX_ENABLES 0x519
+#define MADERA_AIF1_RX_ENABLES 0x51A
+#define MADERA_AIF1_FORCE_WRITE 0x51B
+#define MADERA_AIF2_BCLK_CTRL 0x540
+#define MADERA_AIF2_TX_PIN_CTRL 0x541
+#define MADERA_AIF2_RX_PIN_CTRL 0x542
+#define MADERA_AIF2_RATE_CTRL 0x543
+#define MADERA_AIF2_FORMAT 0x544
+#define MADERA_AIF2_RX_BCLK_RATE 0x546
+#define MADERA_AIF2_FRAME_CTRL_1 0x547
+#define MADERA_AIF2_FRAME_CTRL_2 0x548
+#define MADERA_AIF2_FRAME_CTRL_3 0x549
+#define MADERA_AIF2_FRAME_CTRL_4 0x54A
+#define MADERA_AIF2_FRAME_CTRL_5 0x54B
+#define MADERA_AIF2_FRAME_CTRL_6 0x54C
+#define MADERA_AIF2_FRAME_CTRL_7 0x54D
+#define MADERA_AIF2_FRAME_CTRL_8 0x54E
+#define MADERA_AIF2_FRAME_CTRL_9 0x54F
+#define MADERA_AIF2_FRAME_CTRL_10 0x550
+#define MADERA_AIF2_FRAME_CTRL_11 0x551
+#define MADERA_AIF2_FRAME_CTRL_12 0x552
+#define MADERA_AIF2_FRAME_CTRL_13 0x553
+#define MADERA_AIF2_FRAME_CTRL_14 0x554
+#define MADERA_AIF2_FRAME_CTRL_15 0x555
+#define MADERA_AIF2_FRAME_CTRL_16 0x556
+#define MADERA_AIF2_FRAME_CTRL_17 0x557
+#define MADERA_AIF2_FRAME_CTRL_18 0x558
+#define MADERA_AIF2_TX_ENABLES 0x559
+#define MADERA_AIF2_RX_ENABLES 0x55A
+#define MADERA_AIF2_FORCE_WRITE 0x55B
+#define MADERA_AIF3_BCLK_CTRL 0x580
+#define MADERA_AIF3_TX_PIN_CTRL 0x581
+#define MADERA_AIF3_RX_PIN_CTRL 0x582
+#define MADERA_AIF3_RATE_CTRL 0x583
+#define MADERA_AIF3_FORMAT 0x584
+#define MADERA_AIF3_RX_BCLK_RATE 0x586
+#define MADERA_AIF3_FRAME_CTRL_1 0x587
+#define MADERA_AIF3_FRAME_CTRL_2 0x588
+#define MADERA_AIF3_FRAME_CTRL_3 0x589
+#define MADERA_AIF3_FRAME_CTRL_4 0x58A
+#define MADERA_AIF3_FRAME_CTRL_5 0x58B
+#define MADERA_AIF3_FRAME_CTRL_6 0x58C
+#define MADERA_AIF3_FRAME_CTRL_7 0x58D
+#define MADERA_AIF3_FRAME_CTRL_8 0x58E
+#define MADERA_AIF3_FRAME_CTRL_9 0x58F
+#define MADERA_AIF3_FRAME_CTRL_10 0x590
+#define MADERA_AIF3_FRAME_CTRL_11 0x591
+#define MADERA_AIF3_FRAME_CTRL_12 0x592
+#define MADERA_AIF3_FRAME_CTRL_13 0x593
+#define MADERA_AIF3_FRAME_CTRL_14 0x594
+#define MADERA_AIF3_FRAME_CTRL_15 0x595
+#define MADERA_AIF3_FRAME_CTRL_16 0x596
+#define MADERA_AIF3_FRAME_CTRL_17 0x597
+#define MADERA_AIF3_FRAME_CTRL_18 0x598
+#define MADERA_AIF3_TX_ENABLES 0x599
+#define MADERA_AIF3_RX_ENABLES 0x59A
+#define MADERA_AIF3_FORCE_WRITE 0x59B
+#define MADERA_AIF4_BCLK_CTRL 0x5A0
+#define MADERA_AIF4_TX_PIN_CTRL 0x5A1
+#define MADERA_AIF4_RX_PIN_CTRL 0x5A2
+#define MADERA_AIF4_RATE_CTRL 0x5A3
+#define MADERA_AIF4_FORMAT 0x5A4
+#define MADERA_AIF4_RX_BCLK_RATE 0x5A6
+#define MADERA_AIF4_FRAME_CTRL_1 0x5A7
+#define MADERA_AIF4_FRAME_CTRL_2 0x5A8
+#define MADERA_AIF4_FRAME_CTRL_3 0x5A9
+#define MADERA_AIF4_FRAME_CTRL_4 0x5AA
+#define MADERA_AIF4_FRAME_CTRL_11 0x5B1
+#define MADERA_AIF4_FRAME_CTRL_12 0x5B2
+#define MADERA_AIF4_TX_ENABLES 0x5B9
+#define MADERA_AIF4_RX_ENABLES 0x5BA
+#define MADERA_AIF4_FORCE_WRITE 0x5BB
+#define MADERA_SPD1_TX_CONTROL 0x5C2
+#define MADERA_SPD1_TX_CHANNEL_STATUS_1 0x5C3
+#define MADERA_SPD1_TX_CHANNEL_STATUS_2 0x5C4
+#define MADERA_SPD1_TX_CHANNEL_STATUS_3 0x5C5
+#define MADERA_SLIMBUS_FRAMER_REF_GEAR 0x5E3
+#define MADERA_SLIMBUS_RATES_1 0x5E5
+#define MADERA_SLIMBUS_RATES_2 0x5E6
+#define MADERA_SLIMBUS_RATES_3 0x5E7
+#define MADERA_SLIMBUS_RATES_4 0x5E8
+#define MADERA_SLIMBUS_RATES_5 0x5E9
+#define MADERA_SLIMBUS_RATES_6 0x5EA
+#define MADERA_SLIMBUS_RATES_7 0x5EB
+#define MADERA_SLIMBUS_RATES_8 0x5EC
+#define MADERA_SLIMBUS_RX_CHANNEL_ENABLE 0x5F5
+#define MADERA_SLIMBUS_TX_CHANNEL_ENABLE 0x5F6
+#define MADERA_SLIMBUS_RX_PORT_STATUS 0x5F7
+#define MADERA_SLIMBUS_TX_PORT_STATUS 0x5F8
+#define MADERA_PWM1MIX_INPUT_1_SOURCE 0x640
+#define MADERA_PWM1MIX_INPUT_1_VOLUME 0x641
+#define MADERA_PWM1MIX_INPUT_2_SOURCE 0x642
+#define MADERA_PWM1MIX_INPUT_2_VOLUME 0x643
+#define MADERA_PWM1MIX_INPUT_3_SOURCE 0x644
+#define MADERA_PWM1MIX_INPUT_3_VOLUME 0x645
+#define MADERA_PWM1MIX_INPUT_4_SOURCE 0x646
+#define MADERA_PWM1MIX_INPUT_4_VOLUME 0x647
+#define MADERA_PWM2MIX_INPUT_1_SOURCE 0x648
+#define MADERA_PWM2MIX_INPUT_1_VOLUME 0x649
+#define MADERA_PWM2MIX_INPUT_2_SOURCE 0x64A
+#define MADERA_PWM2MIX_INPUT_2_VOLUME 0x64B
+#define MADERA_PWM2MIX_INPUT_3_SOURCE 0x64C
+#define MADERA_PWM2MIX_INPUT_3_VOLUME 0x64D
+#define MADERA_PWM2MIX_INPUT_4_SOURCE 0x64E
+#define MADERA_PWM2MIX_INPUT_4_VOLUME 0x64F
+#define MADERA_OUT1LMIX_INPUT_1_SOURCE 0x680
+#define MADERA_OUT1LMIX_INPUT_1_VOLUME 0x681
+#define MADERA_OUT1LMIX_INPUT_2_SOURCE 0x682
+#define MADERA_OUT1LMIX_INPUT_2_VOLUME 0x683
+#define MADERA_OUT1LMIX_INPUT_3_SOURCE 0x684
+#define MADERA_OUT1LMIX_INPUT_3_VOLUME 0x685
+#define MADERA_OUT1LMIX_INPUT_4_SOURCE 0x686
+#define MADERA_OUT1LMIX_INPUT_4_VOLUME 0x687
+#define MADERA_OUT1RMIX_INPUT_1_SOURCE 0x688
+#define MADERA_OUT1RMIX_INPUT_1_VOLUME 0x689
+#define MADERA_OUT1RMIX_INPUT_2_SOURCE 0x68A
+#define MADERA_OUT1RMIX_INPUT_2_VOLUME 0x68B
+#define MADERA_OUT1RMIX_INPUT_3_SOURCE 0x68C
+#define MADERA_OUT1RMIX_INPUT_3_VOLUME 0x68D
+#define MADERA_OUT1RMIX_INPUT_4_SOURCE 0x68E
+#define MADERA_OUT1RMIX_INPUT_4_VOLUME 0x68F
+#define MADERA_OUT2LMIX_INPUT_1_SOURCE 0x690
+#define MADERA_OUT2LMIX_INPUT_1_VOLUME 0x691
+#define MADERA_OUT2LMIX_INPUT_2_SOURCE 0x692
+#define MADERA_OUT2LMIX_INPUT_2_VOLUME 0x693
+#define MADERA_OUT2LMIX_INPUT_3_SOURCE 0x694
+#define MADERA_OUT2LMIX_INPUT_3_VOLUME 0x695
+#define MADERA_OUT2LMIX_INPUT_4_SOURCE 0x696
+#define MADERA_OUT2LMIX_INPUT_4_VOLUME 0x697
+#define MADERA_OUT2RMIX_INPUT_1_SOURCE 0x698
+#define MADERA_OUT2RMIX_INPUT_1_VOLUME 0x699
+#define MADERA_OUT2RMIX_INPUT_2_SOURCE 0x69A
+#define MADERA_OUT2RMIX_INPUT_2_VOLUME 0x69B
+#define MADERA_OUT2RMIX_INPUT_3_SOURCE 0x69C
+#define MADERA_OUT2RMIX_INPUT_3_VOLUME 0x69D
+#define MADERA_OUT2RMIX_INPUT_4_SOURCE 0x69E
+#define MADERA_OUT2RMIX_INPUT_4_VOLUME 0x69F
+#define MADERA_OUT3LMIX_INPUT_1_SOURCE 0x6A0
+#define MADERA_OUT3LMIX_INPUT_1_VOLUME 0x6A1
+#define MADERA_OUT3LMIX_INPUT_2_SOURCE 0x6A2
+#define MADERA_OUT3LMIX_INPUT_2_VOLUME 0x6A3
+#define MADERA_OUT3LMIX_INPUT_3_SOURCE 0x6A4
+#define MADERA_OUT3LMIX_INPUT_3_VOLUME 0x6A5
+#define MADERA_OUT3LMIX_INPUT_4_SOURCE 0x6A6
+#define MADERA_OUT3LMIX_INPUT_4_VOLUME 0x6A7
+#define MADERA_OUT3RMIX_INPUT_1_SOURCE 0x6A8
+#define MADERA_OUT3RMIX_INPUT_1_VOLUME 0x6A9
+#define MADERA_OUT3RMIX_INPUT_2_SOURCE 0x6AA
+#define MADERA_OUT3RMIX_INPUT_2_VOLUME 0x6AB
+#define MADERA_OUT3RMIX_INPUT_3_SOURCE 0x6AC
+#define MADERA_OUT3RMIX_INPUT_3_VOLUME 0x6AD
+#define MADERA_OUT3RMIX_INPUT_4_SOURCE 0x6AE
+#define MADERA_OUT3RMIX_INPUT_4_VOLUME 0x6AF
+#define MADERA_OUT4LMIX_INPUT_1_SOURCE 0x6B0
+#define MADERA_OUT4LMIX_INPUT_1_VOLUME 0x6B1
+#define MADERA_OUT4LMIX_INPUT_2_SOURCE 0x6B2
+#define MADERA_OUT4LMIX_INPUT_2_VOLUME 0x6B3
+#define MADERA_OUT4LMIX_INPUT_3_SOURCE 0x6B4
+#define MADERA_OUT4LMIX_INPUT_3_VOLUME 0x6B5
+#define MADERA_OUT4LMIX_INPUT_4_SOURCE 0x6B6
+#define MADERA_OUT4LMIX_INPUT_4_VOLUME 0x6B7
+#define MADERA_OUT4RMIX_INPUT_1_SOURCE 0x6B8
+#define MADERA_OUT4RMIX_INPUT_1_VOLUME 0x6B9
+#define MADERA_OUT4RMIX_INPUT_2_SOURCE 0x6BA
+#define MADERA_OUT4RMIX_INPUT_2_VOLUME 0x6BB
+#define MADERA_OUT4RMIX_INPUT_3_SOURCE 0x6BC
+#define MADERA_OUT4RMIX_INPUT_3_VOLUME 0x6BD
+#define MADERA_OUT4RMIX_INPUT_4_SOURCE 0x6BE
+#define MADERA_OUT4RMIX_INPUT_4_VOLUME 0x6BF
+#define MADERA_OUT5LMIX_INPUT_1_SOURCE 0x6C0
+#define MADERA_OUT5LMIX_INPUT_1_VOLUME 0x6C1
+#define MADERA_OUT5LMIX_INPUT_2_SOURCE 0x6C2
+#define MADERA_OUT5LMIX_INPUT_2_VOLUME 0x6C3
+#define MADERA_OUT5LMIX_INPUT_3_SOURCE 0x6C4
+#define MADERA_OUT5LMIX_INPUT_3_VOLUME 0x6C5
+#define MADERA_OUT5LMIX_INPUT_4_SOURCE 0x6C6
+#define MADERA_OUT5LMIX_INPUT_4_VOLUME 0x6C7
+#define MADERA_OUT5RMIX_INPUT_1_SOURCE 0x6C8
+#define MADERA_OUT5RMIX_INPUT_1_VOLUME 0x6C9
+#define MADERA_OUT5RMIX_INPUT_2_SOURCE 0x6CA
+#define MADERA_OUT5RMIX_INPUT_2_VOLUME 0x6CB
+#define MADERA_OUT5RMIX_INPUT_3_SOURCE 0x6CC
+#define MADERA_OUT5RMIX_INPUT_3_VOLUME 0x6CD
+#define MADERA_OUT5RMIX_INPUT_4_SOURCE 0x6CE
+#define MADERA_OUT5RMIX_INPUT_4_VOLUME 0x6CF
+#define MADERA_OUT6LMIX_INPUT_1_SOURCE 0x6D0
+#define MADERA_OUT6LMIX_INPUT_1_VOLUME 0x6D1
+#define MADERA_OUT6LMIX_INPUT_2_SOURCE 0x6D2
+#define MADERA_OUT6LMIX_INPUT_2_VOLUME 0x6D3
+#define MADERA_OUT6LMIX_INPUT_3_SOURCE 0x6D4
+#define MADERA_OUT6LMIX_INPUT_3_VOLUME 0x6D5
+#define MADERA_OUT6LMIX_INPUT_4_SOURCE 0x6D6
+#define MADERA_OUT6LMIX_INPUT_4_VOLUME 0x6D7
+#define MADERA_OUT6RMIX_INPUT_1_SOURCE 0x6D8
+#define MADERA_OUT6RMIX_INPUT_1_VOLUME 0x6D9
+#define MADERA_OUT6RMIX_INPUT_2_SOURCE 0x6DA
+#define MADERA_OUT6RMIX_INPUT_2_VOLUME 0x6DB
+#define MADERA_OUT6RMIX_INPUT_3_SOURCE 0x6DC
+#define MADERA_OUT6RMIX_INPUT_3_VOLUME 0x6DD
+#define MADERA_OUT6RMIX_INPUT_4_SOURCE 0x6DE
+#define MADERA_OUT6RMIX_INPUT_4_VOLUME 0x6DF
+#define MADERA_AIF1TX1MIX_INPUT_1_SOURCE 0x700
+#define MADERA_AIF1TX1MIX_INPUT_1_VOLUME 0x701
+#define MADERA_AIF1TX1MIX_INPUT_2_SOURCE 0x702
+#define MADERA_AIF1TX1MIX_INPUT_2_VOLUME 0x703
+#define MADERA_AIF1TX1MIX_INPUT_3_SOURCE 0x704
+#define MADERA_AIF1TX1MIX_INPUT_3_VOLUME 0x705
+#define MADERA_AIF1TX1MIX_INPUT_4_SOURCE 0x706
+#define MADERA_AIF1TX1MIX_INPUT_4_VOLUME 0x707
+#define MADERA_AIF1TX2MIX_INPUT_1_SOURCE 0x708
+#define MADERA_AIF1TX2MIX_INPUT_1_VOLUME 0x709
+#define MADERA_AIF1TX2MIX_INPUT_2_SOURCE 0x70A
+#define MADERA_AIF1TX2MIX_INPUT_2_VOLUME 0x70B
+#define MADERA_AIF1TX2MIX_INPUT_3_SOURCE 0x70C
+#define MADERA_AIF1TX2MIX_INPUT_3_VOLUME 0x70D
+#define MADERA_AIF1TX2MIX_INPUT_4_SOURCE 0x70E
+#define MADERA_AIF1TX2MIX_INPUT_4_VOLUME 0x70F
+#define MADERA_AIF1TX3MIX_INPUT_1_SOURCE 0x710
+#define MADERA_AIF1TX3MIX_INPUT_1_VOLUME 0x711
+#define MADERA_AIF1TX3MIX_INPUT_2_SOURCE 0x712
+#define MADERA_AIF1TX3MIX_INPUT_2_VOLUME 0x713
+#define MADERA_AIF1TX3MIX_INPUT_3_SOURCE 0x714
+#define MADERA_AIF1TX3MIX_INPUT_3_VOLUME 0x715
+#define MADERA_AIF1TX3MIX_INPUT_4_SOURCE 0x716
+#define MADERA_AIF1TX3MIX_INPUT_4_VOLUME 0x717
+#define MADERA_AIF1TX4MIX_INPUT_1_SOURCE 0x718
+#define MADERA_AIF1TX4MIX_INPUT_1_VOLUME 0x719
+#define MADERA_AIF1TX4MIX_INPUT_2_SOURCE 0x71A
+#define MADERA_AIF1TX4MIX_INPUT_2_VOLUME 0x71B
+#define MADERA_AIF1TX4MIX_INPUT_3_SOURCE 0x71C
+#define MADERA_AIF1TX4MIX_INPUT_3_VOLUME 0x71D
+#define MADERA_AIF1TX4MIX_INPUT_4_SOURCE 0x71E
+#define MADERA_AIF1TX4MIX_INPUT_4_VOLUME 0x71F
+#define MADERA_AIF1TX5MIX_INPUT_1_SOURCE 0x720
+#define MADERA_AIF1TX5MIX_INPUT_1_VOLUME 0x721
+#define MADERA_AIF1TX5MIX_INPUT_2_SOURCE 0x722
+#define MADERA_AIF1TX5MIX_INPUT_2_VOLUME 0x723
+#define MADERA_AIF1TX5MIX_INPUT_3_SOURCE 0x724
+#define MADERA_AIF1TX5MIX_INPUT_3_VOLUME 0x725
+#define MADERA_AIF1TX5MIX_INPUT_4_SOURCE 0x726
+#define MADERA_AIF1TX5MIX_INPUT_4_VOLUME 0x727
+#define MADERA_AIF1TX6MIX_INPUT_1_SOURCE 0x728
+#define MADERA_AIF1TX6MIX_INPUT_1_VOLUME 0x729
+#define MADERA_AIF1TX6MIX_INPUT_2_SOURCE 0x72A
+#define MADERA_AIF1TX6MIX_INPUT_2_VOLUME 0x72B
+#define MADERA_AIF1TX6MIX_INPUT_3_SOURCE 0x72C
+#define MADERA_AIF1TX6MIX_INPUT_3_VOLUME 0x72D
+#define MADERA_AIF1TX6MIX_INPUT_4_SOURCE 0x72E
+#define MADERA_AIF1TX6MIX_INPUT_4_VOLUME 0x72F
+#define MADERA_AIF1TX7MIX_INPUT_1_SOURCE 0x730
+#define MADERA_AIF1TX7MIX_INPUT_1_VOLUME 0x731
+#define MADERA_AIF1TX7MIX_INPUT_2_SOURCE 0x732
+#define MADERA_AIF1TX7MIX_INPUT_2_VOLUME 0x733
+#define MADERA_AIF1TX7MIX_INPUT_3_SOURCE 0x734
+#define MADERA_AIF1TX7MIX_INPUT_3_VOLUME 0x735
+#define MADERA_AIF1TX7MIX_INPUT_4_SOURCE 0x736
+#define MADERA_AIF1TX7MIX_INPUT_4_VOLUME 0x737
+#define MADERA_AIF1TX8MIX_INPUT_1_SOURCE 0x738
+#define MADERA_AIF1TX8MIX_INPUT_1_VOLUME 0x739
+#define MADERA_AIF1TX8MIX_INPUT_2_SOURCE 0x73A
+#define MADERA_AIF1TX8MIX_INPUT_2_VOLUME 0x73B
+#define MADERA_AIF1TX8MIX_INPUT_3_SOURCE 0x73C
+#define MADERA_AIF1TX8MIX_INPUT_3_VOLUME 0x73D
+#define MADERA_AIF1TX8MIX_INPUT_4_SOURCE 0x73E
+#define MADERA_AIF1TX8MIX_INPUT_4_VOLUME 0x73F
+#define MADERA_AIF2TX1MIX_INPUT_1_SOURCE 0x740
+#define MADERA_AIF2TX1MIX_INPUT_1_VOLUME 0x741
+#define MADERA_AIF2TX1MIX_INPUT_2_SOURCE 0x742
+#define MADERA_AIF2TX1MIX_INPUT_2_VOLUME 0x743
+#define MADERA_AIF2TX1MIX_INPUT_3_SOURCE 0x744
+#define MADERA_AIF2TX1MIX_INPUT_3_VOLUME 0x745
+#define MADERA_AIF2TX1MIX_INPUT_4_SOURCE 0x746
+#define MADERA_AIF2TX1MIX_INPUT_4_VOLUME 0x747
+#define MADERA_AIF2TX2MIX_INPUT_1_SOURCE 0x748
+#define MADERA_AIF2TX2MIX_INPUT_1_VOLUME 0x749
+#define MADERA_AIF2TX2MIX_INPUT_2_SOURCE 0x74A
+#define MADERA_AIF2TX2MIX_INPUT_2_VOLUME 0x74B
+#define MADERA_AIF2TX2MIX_INPUT_3_SOURCE 0x74C
+#define MADERA_AIF2TX2MIX_INPUT_3_VOLUME 0x74D
+#define MADERA_AIF2TX2MIX_INPUT_4_SOURCE 0x74E
+#define MADERA_AIF2TX2MIX_INPUT_4_VOLUME 0x74F
+#define MADERA_AIF2TX3MIX_INPUT_1_SOURCE 0x750
+#define MADERA_AIF2TX3MIX_INPUT_1_VOLUME 0x751
+#define MADERA_AIF2TX3MIX_INPUT_2_SOURCE 0x752
+#define MADERA_AIF2TX3MIX_INPUT_2_VOLUME 0x753
+#define MADERA_AIF2TX3MIX_INPUT_3_SOURCE 0x754
+#define MADERA_AIF2TX3MIX_INPUT_3_VOLUME 0x755
+#define MADERA_AIF2TX3MIX_INPUT_4_SOURCE 0x756
+#define MADERA_AIF2TX3MIX_INPUT_4_VOLUME 0x757
+#define MADERA_AIF2TX4MIX_INPUT_1_SOURCE 0x758
+#define MADERA_AIF2TX4MIX_INPUT_1_VOLUME 0x759
+#define MADERA_AIF2TX4MIX_INPUT_2_SOURCE 0x75A
+#define MADERA_AIF2TX4MIX_INPUT_2_VOLUME 0x75B
+#define MADERA_AIF2TX4MIX_INPUT_3_SOURCE 0x75C
+#define MADERA_AIF2TX4MIX_INPUT_3_VOLUME 0x75D
+#define MADERA_AIF2TX4MIX_INPUT_4_SOURCE 0x75E
+#define MADERA_AIF2TX4MIX_INPUT_4_VOLUME 0x75F
+#define MADERA_AIF2TX5MIX_INPUT_1_SOURCE 0x760
+#define MADERA_AIF2TX5MIX_INPUT_1_VOLUME 0x761
+#define MADERA_AIF2TX5MIX_INPUT_2_SOURCE 0x762
+#define MADERA_AIF2TX5MIX_INPUT_2_VOLUME 0x763
+#define MADERA_AIF2TX5MIX_INPUT_3_SOURCE 0x764
+#define MADERA_AIF2TX5MIX_INPUT_3_VOLUME 0x765
+#define MADERA_AIF2TX5MIX_INPUT_4_SOURCE 0x766
+#define MADERA_AIF2TX5MIX_INPUT_4_VOLUME 0x767
+#define MADERA_AIF2TX6MIX_INPUT_1_SOURCE 0x768
+#define MADERA_AIF2TX6MIX_INPUT_1_VOLUME 0x769
+#define MADERA_AIF2TX6MIX_INPUT_2_SOURCE 0x76A
+#define MADERA_AIF2TX6MIX_INPUT_2_VOLUME 0x76B
+#define MADERA_AIF2TX6MIX_INPUT_3_SOURCE 0x76C
+#define MADERA_AIF2TX6MIX_INPUT_3_VOLUME 0x76D
+#define MADERA_AIF2TX6MIX_INPUT_4_SOURCE 0x76E
+#define MADERA_AIF2TX6MIX_INPUT_4_VOLUME 0x76F
+#define MADERA_AIF2TX7MIX_INPUT_1_SOURCE 0x770
+#define MADERA_AIF2TX7MIX_INPUT_1_VOLUME 0x771
+#define MADERA_AIF2TX7MIX_INPUT_2_SOURCE 0x772
+#define MADERA_AIF2TX7MIX_INPUT_2_VOLUME 0x773
+#define MADERA_AIF2TX7MIX_INPUT_3_SOURCE 0x774
+#define MADERA_AIF2TX7MIX_INPUT_3_VOLUME 0x775
+#define MADERA_AIF2TX7MIX_INPUT_4_SOURCE 0x776
+#define MADERA_AIF2TX7MIX_INPUT_4_VOLUME 0x777
+#define MADERA_AIF2TX8MIX_INPUT_1_SOURCE 0x778
+#define MADERA_AIF2TX8MIX_INPUT_1_VOLUME 0x779
+#define MADERA_AIF2TX8MIX_INPUT_2_SOURCE 0x77A
+#define MADERA_AIF2TX8MIX_INPUT_2_VOLUME 0x77B
+#define MADERA_AIF2TX8MIX_INPUT_3_SOURCE 0x77C
+#define MADERA_AIF2TX8MIX_INPUT_3_VOLUME 0x77D
+#define MADERA_AIF2TX8MIX_INPUT_4_SOURCE 0x77E
+#define MADERA_AIF2TX8MIX_INPUT_4_VOLUME 0x77F
+#define MADERA_AIF3TX1MIX_INPUT_1_SOURCE 0x780
+#define MADERA_AIF3TX1MIX_INPUT_1_VOLUME 0x781
+#define MADERA_AIF3TX1MIX_INPUT_2_SOURCE 0x782
+#define MADERA_AIF3TX1MIX_INPUT_2_VOLUME 0x783
+#define MADERA_AIF3TX1MIX_INPUT_3_SOURCE 0x784
+#define MADERA_AIF3TX1MIX_INPUT_3_VOLUME 0x785
+#define MADERA_AIF3TX1MIX_INPUT_4_SOURCE 0x786
+#define MADERA_AIF3TX1MIX_INPUT_4_VOLUME 0x787
+#define MADERA_AIF3TX2MIX_INPUT_1_SOURCE 0x788
+#define MADERA_AIF3TX2MIX_INPUT_1_VOLUME 0x789
+#define MADERA_AIF3TX2MIX_INPUT_2_SOURCE 0x78A
+#define MADERA_AIF3TX2MIX_INPUT_2_VOLUME 0x78B
+#define MADERA_AIF3TX2MIX_INPUT_3_SOURCE 0x78C
+#define MADERA_AIF3TX2MIX_INPUT_3_VOLUME 0x78D
+#define MADERA_AIF3TX2MIX_INPUT_4_SOURCE 0x78E
+#define MADERA_AIF3TX2MIX_INPUT_4_VOLUME 0x78F
+#define MADERA_AIF3TX3MIX_INPUT_1_SOURCE 0x790
+#define MADERA_AIF3TX3MIX_INPUT_1_VOLUME 0x791
+#define MADERA_AIF3TX3MIX_INPUT_2_SOURCE 0x792
+#define MADERA_AIF3TX3MIX_INPUT_2_VOLUME 0x793
+#define MADERA_AIF3TX3MIX_INPUT_3_SOURCE 0x794
+#define MADERA_AIF3TX3MIX_INPUT_3_VOLUME 0x795
+#define MADERA_AIF3TX3MIX_INPUT_4_SOURCE 0x796
+#define MADERA_AIF3TX3MIX_INPUT_4_VOLUME 0x797
+#define MADERA_AIF3TX4MIX_INPUT_1_SOURCE 0x798
+#define MADERA_AIF3TX4MIX_INPUT_1_VOLUME 0x799
+#define MADERA_AIF3TX4MIX_INPUT_2_SOURCE 0x79A
+#define MADERA_AIF3TX4MIX_INPUT_2_VOLUME 0x79B
+#define MADERA_AIF3TX4MIX_INPUT_3_SOURCE 0x79C
+#define MADERA_AIF3TX4MIX_INPUT_3_VOLUME 0x79D
+#define MADERA_AIF3TX4MIX_INPUT_4_SOURCE 0x79E
+#define MADERA_AIF3TX4MIX_INPUT_4_VOLUME 0x79F
+#define CS47L92_AIF3TX5MIX_INPUT_1_SOURCE 0x7A0
+#define CS47L92_AIF3TX5MIX_INPUT_1_VOLUME 0x7A1
+#define CS47L92_AIF3TX5MIX_INPUT_2_SOURCE 0x7A2
+#define CS47L92_AIF3TX5MIX_INPUT_2_VOLUME 0x7A3
+#define CS47L92_AIF3TX5MIX_INPUT_3_SOURCE 0x7A4
+#define CS47L92_AIF3TX5MIX_INPUT_3_VOLUME 0x7A5
+#define CS47L92_AIF3TX5MIX_INPUT_4_SOURCE 0x7A6
+#define CS47L92_AIF3TX5MIX_INPUT_4_VOLUME 0x7A7
+#define CS47L92_AIF3TX6MIX_INPUT_1_SOURCE 0x7A8
+#define CS47L92_AIF3TX6MIX_INPUT_1_VOLUME 0x7A9
+#define CS47L92_AIF3TX6MIX_INPUT_2_SOURCE 0x7AA
+#define CS47L92_AIF3TX6MIX_INPUT_2_VOLUME 0x7AB
+#define CS47L92_AIF3TX6MIX_INPUT_3_SOURCE 0x7AC
+#define CS47L92_AIF3TX6MIX_INPUT_3_VOLUME 0x7AD
+#define CS47L92_AIF3TX6MIX_INPUT_4_SOURCE 0x7AE
+#define CS47L92_AIF3TX6MIX_INPUT_4_VOLUME 0x7AF
+#define CS47L92_AIF3TX7MIX_INPUT_1_SOURCE 0x7B0
+#define CS47L92_AIF3TX7MIX_INPUT_1_VOLUME 0x7B1
+#define CS47L92_AIF3TX7MIX_INPUT_2_SOURCE 0x7B2
+#define CS47L92_AIF3TX7MIX_INPUT_2_VOLUME 0x7B3
+#define CS47L92_AIF3TX7MIX_INPUT_3_SOURCE 0x7B4
+#define CS47L92_AIF3TX7MIX_INPUT_3_VOLUME 0x7B5
+#define CS47L92_AIF3TX7MIX_INPUT_4_SOURCE 0x7B6
+#define CS47L92_AIF3TX7MIX_INPUT_4_VOLUME 0x7B7
+#define CS47L92_AIF3TX8MIX_INPUT_1_SOURCE 0x7B8
+#define CS47L92_AIF3TX8MIX_INPUT_1_VOLUME 0x7B9
+#define CS47L92_AIF3TX8MIX_INPUT_2_SOURCE 0x7BA
+#define CS47L92_AIF3TX8MIX_INPUT_2_VOLUME 0x7BB
+#define CS47L92_AIF3TX8MIX_INPUT_3_SOURCE 0x7BC
+#define CS47L92_AIF3TX8MIX_INPUT_3_VOLUME 0x7BD
+#define CS47L92_AIF3TX8MIX_INPUT_4_SOURCE 0x7BE
+#define CS47L92_AIF3TX8MIX_INPUT_4_VOLUME 0x7BF
+#define MADERA_AIF4TX1MIX_INPUT_1_SOURCE 0x7A0
+#define MADERA_AIF4TX1MIX_INPUT_1_VOLUME 0x7A1
+#define MADERA_AIF4TX1MIX_INPUT_2_SOURCE 0x7A2
+#define MADERA_AIF4TX1MIX_INPUT_2_VOLUME 0x7A3
+#define MADERA_AIF4TX1MIX_INPUT_3_SOURCE 0x7A4
+#define MADERA_AIF4TX1MIX_INPUT_3_VOLUME 0x7A5
+#define MADERA_AIF4TX1MIX_INPUT_4_SOURCE 0x7A6
+#define MADERA_AIF4TX1MIX_INPUT_4_VOLUME 0x7A7
+#define MADERA_AIF4TX2MIX_INPUT_1_SOURCE 0x7A8
+#define MADERA_AIF4TX2MIX_INPUT_1_VOLUME 0x7A9
+#define MADERA_AIF4TX2MIX_INPUT_2_SOURCE 0x7AA
+#define MADERA_AIF4TX2MIX_INPUT_2_VOLUME 0x7AB
+#define MADERA_AIF4TX2MIX_INPUT_3_SOURCE 0x7AC
+#define MADERA_AIF4TX2MIX_INPUT_3_VOLUME 0x7AD
+#define MADERA_AIF4TX2MIX_INPUT_4_SOURCE 0x7AE
+#define MADERA_AIF4TX2MIX_INPUT_4_VOLUME 0x7AF
+#define MADERA_SLIMTX1MIX_INPUT_1_SOURCE 0x7C0
+#define MADERA_SLIMTX1MIX_INPUT_1_VOLUME 0x7C1
+#define MADERA_SLIMTX1MIX_INPUT_2_SOURCE 0x7C2
+#define MADERA_SLIMTX1MIX_INPUT_2_VOLUME 0x7C3
+#define MADERA_SLIMTX1MIX_INPUT_3_SOURCE 0x7C4
+#define MADERA_SLIMTX1MIX_INPUT_3_VOLUME 0x7C5
+#define MADERA_SLIMTX1MIX_INPUT_4_SOURCE 0x7C6
+#define MADERA_SLIMTX1MIX_INPUT_4_VOLUME 0x7C7
+#define MADERA_SLIMTX2MIX_INPUT_1_SOURCE 0x7C8
+#define MADERA_SLIMTX2MIX_INPUT_1_VOLUME 0x7C9
+#define MADERA_SLIMTX2MIX_INPUT_2_SOURCE 0x7CA
+#define MADERA_SLIMTX2MIX_INPUT_2_VOLUME 0x7CB
+#define MADERA_SLIMTX2MIX_INPUT_3_SOURCE 0x7CC
+#define MADERA_SLIMTX2MIX_INPUT_3_VOLUME 0x7CD
+#define MADERA_SLIMTX2MIX_INPUT_4_SOURCE 0x7CE
+#define MADERA_SLIMTX2MIX_INPUT_4_VOLUME 0x7CF
+#define MADERA_SLIMTX3MIX_INPUT_1_SOURCE 0x7D0
+#define MADERA_SLIMTX3MIX_INPUT_1_VOLUME 0x7D1
+#define MADERA_SLIMTX3MIX_INPUT_2_SOURCE 0x7D2
+#define MADERA_SLIMTX3MIX_INPUT_2_VOLUME 0x7D3
+#define MADERA_SLIMTX3MIX_INPUT_3_SOURCE 0x7D4
+#define MADERA_SLIMTX3MIX_INPUT_3_VOLUME 0x7D5
+#define MADERA_SLIMTX3MIX_INPUT_4_SOURCE 0x7D6
+#define MADERA_SLIMTX3MIX_INPUT_4_VOLUME 0x7D7
+#define MADERA_SLIMTX4MIX_INPUT_1_SOURCE 0x7D8
+#define MADERA_SLIMTX4MIX_INPUT_1_VOLUME 0x7D9
+#define MADERA_SLIMTX4MIX_INPUT_2_SOURCE 0x7DA
+#define MADERA_SLIMTX4MIX_INPUT_2_VOLUME 0x7DB
+#define MADERA_SLIMTX4MIX_INPUT_3_SOURCE 0x7DC
+#define MADERA_SLIMTX4MIX_INPUT_3_VOLUME 0x7DD
+#define MADERA_SLIMTX4MIX_INPUT_4_SOURCE 0x7DE
+#define MADERA_SLIMTX4MIX_INPUT_4_VOLUME 0x7DF
+#define MADERA_SLIMTX5MIX_INPUT_1_SOURCE 0x7E0
+#define MADERA_SLIMTX5MIX_INPUT_1_VOLUME 0x7E1
+#define MADERA_SLIMTX5MIX_INPUT_2_SOURCE 0x7E2
+#define MADERA_SLIMTX5MIX_INPUT_2_VOLUME 0x7E3
+#define MADERA_SLIMTX5MIX_INPUT_3_SOURCE 0x7E4
+#define MADERA_SLIMTX5MIX_INPUT_3_VOLUME 0x7E5
+#define MADERA_SLIMTX5MIX_INPUT_4_SOURCE 0x7E6
+#define MADERA_SLIMTX5MIX_INPUT_4_VOLUME 0x7E7
+#define MADERA_SLIMTX6MIX_INPUT_1_SOURCE 0x7E8
+#define MADERA_SLIMTX6MIX_INPUT_1_VOLUME 0x7E9
+#define MADERA_SLIMTX6MIX_INPUT_2_SOURCE 0x7EA
+#define MADERA_SLIMTX6MIX_INPUT_2_VOLUME 0x7EB
+#define MADERA_SLIMTX6MIX_INPUT_3_SOURCE 0x7EC
+#define MADERA_SLIMTX6MIX_INPUT_3_VOLUME 0x7ED
+#define MADERA_SLIMTX6MIX_INPUT_4_SOURCE 0x7EE
+#define MADERA_SLIMTX6MIX_INPUT_4_VOLUME 0x7EF
+#define MADERA_SLIMTX7MIX_INPUT_1_SOURCE 0x7F0
+#define MADERA_SLIMTX7MIX_INPUT_1_VOLUME 0x7F1
+#define MADERA_SLIMTX7MIX_INPUT_2_SOURCE 0x7F2
+#define MADERA_SLIMTX7MIX_INPUT_2_VOLUME 0x7F3
+#define MADERA_SLIMTX7MIX_INPUT_3_SOURCE 0x7F4
+#define MADERA_SLIMTX7MIX_INPUT_3_VOLUME 0x7F5
+#define MADERA_SLIMTX7MIX_INPUT_4_SOURCE 0x7F6
+#define MADERA_SLIMTX7MIX_INPUT_4_VOLUME 0x7F7
+#define MADERA_SLIMTX8MIX_INPUT_1_SOURCE 0x7F8
+#define MADERA_SLIMTX8MIX_INPUT_1_VOLUME 0x7F9
+#define MADERA_SLIMTX8MIX_INPUT_2_SOURCE 0x7FA
+#define MADERA_SLIMTX8MIX_INPUT_2_VOLUME 0x7FB
+#define MADERA_SLIMTX8MIX_INPUT_3_SOURCE 0x7FC
+#define MADERA_SLIMTX8MIX_INPUT_3_VOLUME 0x7FD
+#define MADERA_SLIMTX8MIX_INPUT_4_SOURCE 0x7FE
+#define MADERA_SLIMTX8MIX_INPUT_4_VOLUME 0x7FF
+#define MADERA_SPDIF1TX1MIX_INPUT_1_SOURCE 0x800
+#define MADERA_SPDIF1TX1MIX_INPUT_1_VOLUME 0x801
+#define MADERA_SPDIF1TX2MIX_INPUT_1_SOURCE 0x808
+#define MADERA_SPDIF1TX2MIX_INPUT_1_VOLUME 0x809
+#define MADERA_EQ1MIX_INPUT_1_SOURCE 0x880
+#define MADERA_EQ1MIX_INPUT_1_VOLUME 0x881
+#define MADERA_EQ1MIX_INPUT_2_SOURCE 0x882
+#define MADERA_EQ1MIX_INPUT_2_VOLUME 0x883
+#define MADERA_EQ1MIX_INPUT_3_SOURCE 0x884
+#define MADERA_EQ1MIX_INPUT_3_VOLUME 0x885
+#define MADERA_EQ1MIX_INPUT_4_SOURCE 0x886
+#define MADERA_EQ1MIX_INPUT_4_VOLUME 0x887
+#define MADERA_EQ2MIX_INPUT_1_SOURCE 0x888
+#define MADERA_EQ2MIX_INPUT_1_VOLUME 0x889
+#define MADERA_EQ2MIX_INPUT_2_SOURCE 0x88A
+#define MADERA_EQ2MIX_INPUT_2_VOLUME 0x88B
+#define MADERA_EQ2MIX_INPUT_3_SOURCE 0x88C
+#define MADERA_EQ2MIX_INPUT_3_VOLUME 0x88D
+#define MADERA_EQ2MIX_INPUT_4_SOURCE 0x88E
+#define MADERA_EQ2MIX_INPUT_4_VOLUME 0x88F
+#define MADERA_EQ3MIX_INPUT_1_SOURCE 0x890
+#define MADERA_EQ3MIX_INPUT_1_VOLUME 0x891
+#define MADERA_EQ3MIX_INPUT_2_SOURCE 0x892
+#define MADERA_EQ3MIX_INPUT_2_VOLUME 0x893
+#define MADERA_EQ3MIX_INPUT_3_SOURCE 0x894
+#define MADERA_EQ3MIX_INPUT_3_VOLUME 0x895
+#define MADERA_EQ3MIX_INPUT_4_SOURCE 0x896
+#define MADERA_EQ3MIX_INPUT_4_VOLUME 0x897
+#define MADERA_EQ4MIX_INPUT_1_SOURCE 0x898
+#define MADERA_EQ4MIX_INPUT_1_VOLUME 0x899
+#define MADERA_EQ4MIX_INPUT_2_SOURCE 0x89A
+#define MADERA_EQ4MIX_INPUT_2_VOLUME 0x89B
+#define MADERA_EQ4MIX_INPUT_3_SOURCE 0x89C
+#define MADERA_EQ4MIX_INPUT_3_VOLUME 0x89D
+#define MADERA_EQ4MIX_INPUT_4_SOURCE 0x89E
+#define MADERA_EQ4MIX_INPUT_4_VOLUME 0x89F
+#define MADERA_DRC1LMIX_INPUT_1_SOURCE 0x8C0
+#define MADERA_DRC1LMIX_INPUT_1_VOLUME 0x8C1
+#define MADERA_DRC1LMIX_INPUT_2_SOURCE 0x8C2
+#define MADERA_DRC1LMIX_INPUT_2_VOLUME 0x8C3
+#define MADERA_DRC1LMIX_INPUT_3_SOURCE 0x8C4
+#define MADERA_DRC1LMIX_INPUT_3_VOLUME 0x8C5
+#define MADERA_DRC1LMIX_INPUT_4_SOURCE 0x8C6
+#define MADERA_DRC1LMIX_INPUT_4_VOLUME 0x8C7
+#define MADERA_DRC1RMIX_INPUT_1_SOURCE 0x8C8
+#define MADERA_DRC1RMIX_INPUT_1_VOLUME 0x8C9
+#define MADERA_DRC1RMIX_INPUT_2_SOURCE 0x8CA
+#define MADERA_DRC1RMIX_INPUT_2_VOLUME 0x8CB
+#define MADERA_DRC1RMIX_INPUT_3_SOURCE 0x8CC
+#define MADERA_DRC1RMIX_INPUT_3_VOLUME 0x8CD
+#define MADERA_DRC1RMIX_INPUT_4_SOURCE 0x8CE
+#define MADERA_DRC1RMIX_INPUT_4_VOLUME 0x8CF
+#define MADERA_DRC2LMIX_INPUT_1_SOURCE 0x8D0
+#define MADERA_DRC2LMIX_INPUT_1_VOLUME 0x8D1
+#define MADERA_DRC2LMIX_INPUT_2_SOURCE 0x8D2
+#define MADERA_DRC2LMIX_INPUT_2_VOLUME 0x8D3
+#define MADERA_DRC2LMIX_INPUT_3_SOURCE 0x8D4
+#define MADERA_DRC2LMIX_INPUT_3_VOLUME 0x8D5
+#define MADERA_DRC2LMIX_INPUT_4_SOURCE 0x8D6
+#define MADERA_DRC2LMIX_INPUT_4_VOLUME 0x8D7
+#define MADERA_DRC2RMIX_INPUT_1_SOURCE 0x8D8
+#define MADERA_DRC2RMIX_INPUT_1_VOLUME 0x8D9
+#define MADERA_DRC2RMIX_INPUT_2_SOURCE 0x8DA
+#define MADERA_DRC2RMIX_INPUT_2_VOLUME 0x8DB
+#define MADERA_DRC2RMIX_INPUT_3_SOURCE 0x8DC
+#define MADERA_DRC2RMIX_INPUT_3_VOLUME 0x8DD
+#define MADERA_DRC2RMIX_INPUT_4_SOURCE 0x8DE
+#define MADERA_DRC2RMIX_INPUT_4_VOLUME 0x8DF
+#define MADERA_HPLP1MIX_INPUT_1_SOURCE 0x900
+#define MADERA_HPLP1MIX_INPUT_1_VOLUME 0x901
+#define MADERA_HPLP1MIX_INPUT_2_SOURCE 0x902
+#define MADERA_HPLP1MIX_INPUT_2_VOLUME 0x903
+#define MADERA_HPLP1MIX_INPUT_3_SOURCE 0x904
+#define MADERA_HPLP1MIX_INPUT_3_VOLUME 0x905
+#define MADERA_HPLP1MIX_INPUT_4_SOURCE 0x906
+#define MADERA_HPLP1MIX_INPUT_4_VOLUME 0x907
+#define MADERA_HPLP2MIX_INPUT_1_SOURCE 0x908
+#define MADERA_HPLP2MIX_INPUT_1_VOLUME 0x909
+#define MADERA_HPLP2MIX_INPUT_2_SOURCE 0x90A
+#define MADERA_HPLP2MIX_INPUT_2_VOLUME 0x90B
+#define MADERA_HPLP2MIX_INPUT_3_SOURCE 0x90C
+#define MADERA_HPLP2MIX_INPUT_3_VOLUME 0x90D
+#define MADERA_HPLP2MIX_INPUT_4_SOURCE 0x90E
+#define MADERA_HPLP2MIX_INPUT_4_VOLUME 0x90F
+#define MADERA_HPLP3MIX_INPUT_1_SOURCE 0x910
+#define MADERA_HPLP3MIX_INPUT_1_VOLUME 0x911
+#define MADERA_HPLP3MIX_INPUT_2_SOURCE 0x912
+#define MADERA_HPLP3MIX_INPUT_2_VOLUME 0x913
+#define MADERA_HPLP3MIX_INPUT_3_SOURCE 0x914
+#define MADERA_HPLP3MIX_INPUT_3_VOLUME 0x915
+#define MADERA_HPLP3MIX_INPUT_4_SOURCE 0x916
+#define MADERA_HPLP3MIX_INPUT_4_VOLUME 0x917
+#define MADERA_HPLP4MIX_INPUT_1_SOURCE 0x918
+#define MADERA_HPLP4MIX_INPUT_1_VOLUME 0x919
+#define MADERA_HPLP4MIX_INPUT_2_SOURCE 0x91A
+#define MADERA_HPLP4MIX_INPUT_2_VOLUME 0x91B
+#define MADERA_HPLP4MIX_INPUT_3_SOURCE 0x91C
+#define MADERA_HPLP4MIX_INPUT_3_VOLUME 0x91D
+#define MADERA_HPLP4MIX_INPUT_4_SOURCE 0x91E
+#define MADERA_HPLP4MIX_INPUT_4_VOLUME 0x91F
+#define MADERA_DSP1LMIX_INPUT_1_SOURCE 0x940
+#define MADERA_DSP1LMIX_INPUT_1_VOLUME 0x941
+#define MADERA_DSP1LMIX_INPUT_2_SOURCE 0x942
+#define MADERA_DSP1LMIX_INPUT_2_VOLUME 0x943
+#define MADERA_DSP1LMIX_INPUT_3_SOURCE 0x944
+#define MADERA_DSP1LMIX_INPUT_3_VOLUME 0x945
+#define MADERA_DSP1LMIX_INPUT_4_SOURCE 0x946
+#define MADERA_DSP1LMIX_INPUT_4_VOLUME 0x947
+#define MADERA_DSP1RMIX_INPUT_1_SOURCE 0x948
+#define MADERA_DSP1RMIX_INPUT_1_VOLUME 0x949
+#define MADERA_DSP1RMIX_INPUT_2_SOURCE 0x94A
+#define MADERA_DSP1RMIX_INPUT_2_VOLUME 0x94B
+#define MADERA_DSP1RMIX_INPUT_3_SOURCE 0x94C
+#define MADERA_DSP1RMIX_INPUT_3_VOLUME 0x94D
+#define MADERA_DSP1RMIX_INPUT_4_SOURCE 0x94E
+#define MADERA_DSP1RMIX_INPUT_4_VOLUME 0x94F
+#define MADERA_DSP1AUX1MIX_INPUT_1_SOURCE 0x950
+#define MADERA_DSP1AUX2MIX_INPUT_1_SOURCE 0x958
+#define MADERA_DSP1AUX3MIX_INPUT_1_SOURCE 0x960
+#define MADERA_DSP1AUX4MIX_INPUT_1_SOURCE 0x968
+#define MADERA_DSP1AUX5MIX_INPUT_1_SOURCE 0x970
+#define MADERA_DSP1AUX6MIX_INPUT_1_SOURCE 0x978
+#define MADERA_DSP2LMIX_INPUT_1_SOURCE 0x980
+#define MADERA_DSP2LMIX_INPUT_1_VOLUME 0x981
+#define MADERA_DSP2LMIX_INPUT_2_SOURCE 0x982
+#define MADERA_DSP2LMIX_INPUT_2_VOLUME 0x983
+#define MADERA_DSP2LMIX_INPUT_3_SOURCE 0x984
+#define MADERA_DSP2LMIX_INPUT_3_VOLUME 0x985
+#define MADERA_DSP2LMIX_INPUT_4_SOURCE 0x986
+#define MADERA_DSP2LMIX_INPUT_4_VOLUME 0x987
+#define MADERA_DSP2RMIX_INPUT_1_SOURCE 0x988
+#define MADERA_DSP2RMIX_INPUT_1_VOLUME 0x989
+#define MADERA_DSP2RMIX_INPUT_2_SOURCE 0x98A
+#define MADERA_DSP2RMIX_INPUT_2_VOLUME 0x98B
+#define MADERA_DSP2RMIX_INPUT_3_SOURCE 0x98C
+#define MADERA_DSP2RMIX_INPUT_3_VOLUME 0x98D
+#define MADERA_DSP2RMIX_INPUT_4_SOURCE 0x98E
+#define MADERA_DSP2RMIX_INPUT_4_VOLUME 0x98F
+#define MADERA_DSP2AUX1MIX_INPUT_1_SOURCE 0x990
+#define MADERA_DSP2AUX2MIX_INPUT_1_SOURCE 0x998
+#define MADERA_DSP2AUX3MIX_INPUT_1_SOURCE 0x9A0
+#define MADERA_DSP2AUX4MIX_INPUT_1_SOURCE 0x9A8
+#define MADERA_DSP2AUX5MIX_INPUT_1_SOURCE 0x9B0
+#define MADERA_DSP2AUX6MIX_INPUT_1_SOURCE 0x9B8
+#define MADERA_DSP3LMIX_INPUT_1_SOURCE 0x9C0
+#define MADERA_DSP3LMIX_INPUT_1_VOLUME 0x9C1
+#define MADERA_DSP3LMIX_INPUT_2_SOURCE 0x9C2
+#define MADERA_DSP3LMIX_INPUT_2_VOLUME 0x9C3
+#define MADERA_DSP3LMIX_INPUT_3_SOURCE 0x9C4
+#define MADERA_DSP3LMIX_INPUT_3_VOLUME 0x9C5
+#define MADERA_DSP3LMIX_INPUT_4_SOURCE 0x9C6
+#define MADERA_DSP3LMIX_INPUT_4_VOLUME 0x9C7
+#define MADERA_DSP3RMIX_INPUT_1_SOURCE 0x9C8
+#define MADERA_DSP3RMIX_INPUT_1_VOLUME 0x9C9
+#define MADERA_DSP3RMIX_INPUT_2_SOURCE 0x9CA
+#define MADERA_DSP3RMIX_INPUT_2_VOLUME 0x9CB
+#define MADERA_DSP3RMIX_INPUT_3_SOURCE 0x9CC
+#define MADERA_DSP3RMIX_INPUT_3_VOLUME 0x9CD
+#define MADERA_DSP3RMIX_INPUT_4_SOURCE 0x9CE
+#define MADERA_DSP3RMIX_INPUT_4_VOLUME 0x9CF
+#define MADERA_DSP3AUX1MIX_INPUT_1_SOURCE 0x9D0
+#define MADERA_DSP3AUX2MIX_INPUT_1_SOURCE 0x9D8
+#define MADERA_DSP3AUX3MIX_INPUT_1_SOURCE 0x9E0
+#define MADERA_DSP3AUX4MIX_INPUT_1_SOURCE 0x9E8
+#define MADERA_DSP3AUX5MIX_INPUT_1_SOURCE 0x9F0
+#define MADERA_DSP3AUX6MIX_INPUT_1_SOURCE 0x9F8
+#define MADERA_DSP4LMIX_INPUT_1_SOURCE 0xA00
+#define MADERA_DSP4LMIX_INPUT_1_VOLUME 0xA01
+#define MADERA_DSP4LMIX_INPUT_2_SOURCE 0xA02
+#define MADERA_DSP4LMIX_INPUT_2_VOLUME 0xA03
+#define MADERA_DSP4LMIX_INPUT_3_SOURCE 0xA04
+#define MADERA_DSP4LMIX_INPUT_3_VOLUME 0xA05
+#define MADERA_DSP4LMIX_INPUT_4_SOURCE 0xA06
+#define MADERA_DSP4LMIX_INPUT_4_VOLUME 0xA07
+#define MADERA_DSP4RMIX_INPUT_1_SOURCE 0xA08
+#define MADERA_DSP4RMIX_INPUT_1_VOLUME 0xA09
+#define MADERA_DSP4RMIX_INPUT_2_SOURCE 0xA0A
+#define MADERA_DSP4RMIX_INPUT_2_VOLUME 0xA0B
+#define MADERA_DSP4RMIX_INPUT_3_SOURCE 0xA0C
+#define MADERA_DSP4RMIX_INPUT_3_VOLUME 0xA0D
+#define MADERA_DSP4RMIX_INPUT_4_SOURCE 0xA0E
+#define MADERA_DSP4RMIX_INPUT_4_VOLUME 0xA0F
+#define MADERA_DSP4AUX1MIX_INPUT_1_SOURCE 0xA10
+#define MADERA_DSP4AUX2MIX_INPUT_1_SOURCE 0xA18
+#define MADERA_DSP4AUX3MIX_INPUT_1_SOURCE 0xA20
+#define MADERA_DSP4AUX4MIX_INPUT_1_SOURCE 0xA28
+#define MADERA_DSP4AUX5MIX_INPUT_1_SOURCE 0xA30
+#define MADERA_DSP4AUX6MIX_INPUT_1_SOURCE 0xA38
+#define MADERA_DSP5LMIX_INPUT_1_SOURCE 0xA40
+#define MADERA_DSP5LMIX_INPUT_1_VOLUME 0xA41
+#define MADERA_DSP5LMIX_INPUT_2_SOURCE 0xA42
+#define MADERA_DSP5LMIX_INPUT_2_VOLUME 0xA43
+#define MADERA_DSP5LMIX_INPUT_3_SOURCE 0xA44
+#define MADERA_DSP5LMIX_INPUT_3_VOLUME 0xA45
+#define MADERA_DSP5LMIX_INPUT_4_SOURCE 0xA46
+#define MADERA_DSP5LMIX_INPUT_4_VOLUME 0xA47
+#define MADERA_DSP5RMIX_INPUT_1_SOURCE 0xA48
+#define MADERA_DSP5RMIX_INPUT_1_VOLUME 0xA49
+#define MADERA_DSP5RMIX_INPUT_2_SOURCE 0xA4A
+#define MADERA_DSP5RMIX_INPUT_2_VOLUME 0xA4B
+#define MADERA_DSP5RMIX_INPUT_3_SOURCE 0xA4C
+#define MADERA_DSP5RMIX_INPUT_3_VOLUME 0xA4D
+#define MADERA_DSP5RMIX_INPUT_4_SOURCE 0xA4E
+#define MADERA_DSP5RMIX_INPUT_4_VOLUME 0xA4F
+#define MADERA_DSP5AUX1MIX_INPUT_1_SOURCE 0xA50
+#define MADERA_DSP5AUX2MIX_INPUT_1_SOURCE 0xA58
+#define MADERA_DSP5AUX3MIX_INPUT_1_SOURCE 0xA60
+#define MADERA_DSP5AUX4MIX_INPUT_1_SOURCE 0xA68
+#define MADERA_DSP5AUX5MIX_INPUT_1_SOURCE 0xA70
+#define MADERA_DSP5AUX6MIX_INPUT_1_SOURCE 0xA78
+#define MADERA_ASRC1_1LMIX_INPUT_1_SOURCE 0xA80
+#define MADERA_ASRC1_1RMIX_INPUT_1_SOURCE 0xA88
+#define MADERA_ASRC1_2LMIX_INPUT_1_SOURCE 0xA90
+#define MADERA_ASRC1_2RMIX_INPUT_1_SOURCE 0xA98
+#define MADERA_ASRC2_1LMIX_INPUT_1_SOURCE 0xAA0
+#define MADERA_ASRC2_1RMIX_INPUT_1_SOURCE 0xAA8
+#define MADERA_ASRC2_2LMIX_INPUT_1_SOURCE 0xAB0
+#define MADERA_ASRC2_2RMIX_INPUT_1_SOURCE 0xAB8
+#define MADERA_ISRC1DEC1MIX_INPUT_1_SOURCE 0xB00
+#define MADERA_ISRC1DEC2MIX_INPUT_1_SOURCE 0xB08
+#define MADERA_ISRC1DEC3MIX_INPUT_1_SOURCE 0xB10
+#define MADERA_ISRC1DEC4MIX_INPUT_1_SOURCE 0xB18
+#define MADERA_ISRC1INT1MIX_INPUT_1_SOURCE 0xB20
+#define MADERA_ISRC1INT2MIX_INPUT_1_SOURCE 0xB28
+#define MADERA_ISRC1INT3MIX_INPUT_1_SOURCE 0xB30
+#define MADERA_ISRC1INT4MIX_INPUT_1_SOURCE 0xB38
+#define MADERA_ISRC2DEC1MIX_INPUT_1_SOURCE 0xB40
+#define MADERA_ISRC2DEC2MIX_INPUT_1_SOURCE 0xB48
+#define MADERA_ISRC2DEC3MIX_INPUT_1_SOURCE 0xB50
+#define MADERA_ISRC2DEC4MIX_INPUT_1_SOURCE 0xB58
+#define MADERA_ISRC2INT1MIX_INPUT_1_SOURCE 0xB60
+#define MADERA_ISRC2INT2MIX_INPUT_1_SOURCE 0xB68
+#define MADERA_ISRC2INT3MIX_INPUT_1_SOURCE 0xB70
+#define MADERA_ISRC2INT4MIX_INPUT_1_SOURCE 0xB78
+#define MADERA_ISRC3DEC1MIX_INPUT_1_SOURCE 0xB80
+#define MADERA_ISRC3DEC2MIX_INPUT_1_SOURCE 0xB88
+#define MADERA_ISRC3DEC3MIX_INPUT_1_SOURCE 0xB90
+#define MADERA_ISRC3DEC4MIX_INPUT_1_SOURCE 0xB98
+#define MADERA_ISRC3INT1MIX_INPUT_1_SOURCE 0xBA0
+#define MADERA_ISRC3INT2MIX_INPUT_1_SOURCE 0xBA8
+#define MADERA_ISRC3INT3MIX_INPUT_1_SOURCE 0xBB0
+#define MADERA_ISRC3INT4MIX_INPUT_1_SOURCE 0xBB8
+#define MADERA_ISRC4DEC1MIX_INPUT_1_SOURCE 0xBC0
+#define MADERA_ISRC4DEC2MIX_INPUT_1_SOURCE 0xBC8
+#define MADERA_ISRC4INT1MIX_INPUT_1_SOURCE 0xBE0
+#define MADERA_ISRC4INT2MIX_INPUT_1_SOURCE 0xBE8
+#define MADERA_DSP6LMIX_INPUT_1_SOURCE 0xC00
+#define MADERA_DSP6LMIX_INPUT_1_VOLUME 0xC01
+#define MADERA_DSP6LMIX_INPUT_2_SOURCE 0xC02
+#define MADERA_DSP6LMIX_INPUT_2_VOLUME 0xC03
+#define MADERA_DSP6LMIX_INPUT_3_SOURCE 0xC04
+#define MADERA_DSP6LMIX_INPUT_3_VOLUME 0xC05
+#define MADERA_DSP6LMIX_INPUT_4_SOURCE 0xC06
+#define MADERA_DSP6LMIX_INPUT_4_VOLUME 0xC07
+#define MADERA_DSP6RMIX_INPUT_1_SOURCE 0xC08
+#define MADERA_DSP6RMIX_INPUT_1_VOLUME 0xC09
+#define MADERA_DSP6RMIX_INPUT_2_SOURCE 0xC0A
+#define MADERA_DSP6RMIX_INPUT_2_VOLUME 0xC0B
+#define MADERA_DSP6RMIX_INPUT_3_SOURCE 0xC0C
+#define MADERA_DSP6RMIX_INPUT_3_VOLUME 0xC0D
+#define MADERA_DSP6RMIX_INPUT_4_SOURCE 0xC0E
+#define MADERA_DSP6RMIX_INPUT_4_VOLUME 0xC0F
+#define MADERA_DSP6AUX1MIX_INPUT_1_SOURCE 0xC10
+#define MADERA_DSP6AUX2MIX_INPUT_1_SOURCE 0xC18
+#define MADERA_DSP6AUX3MIX_INPUT_1_SOURCE 0xC20
+#define MADERA_DSP6AUX4MIX_INPUT_1_SOURCE 0xC28
+#define MADERA_DSP6AUX5MIX_INPUT_1_SOURCE 0xC30
+#define MADERA_DSP6AUX6MIX_INPUT_1_SOURCE 0xC38
+#define MADERA_DSP7LMIX_INPUT_1_SOURCE 0xC40
+#define MADERA_DSP7LMIX_INPUT_1_VOLUME 0xC41
+#define MADERA_DSP7LMIX_INPUT_2_SOURCE 0xC42
+#define MADERA_DSP7LMIX_INPUT_2_VOLUME 0xC43
+#define MADERA_DSP7LMIX_INPUT_3_SOURCE 0xC44
+#define MADERA_DSP7LMIX_INPUT_3_VOLUME 0xC45
+#define MADERA_DSP7LMIX_INPUT_4_SOURCE 0xC46
+#define MADERA_DSP7LMIX_INPUT_4_VOLUME 0xC47
+#define MADERA_DSP7RMIX_INPUT_1_SOURCE 0xC48
+#define MADERA_DSP7RMIX_INPUT_1_VOLUME 0xC49
+#define MADERA_DSP7RMIX_INPUT_2_SOURCE 0xC4A
+#define MADERA_DSP7RMIX_INPUT_2_VOLUME 0xC4B
+#define MADERA_DSP7RMIX_INPUT_3_SOURCE 0xC4C
+#define MADERA_DSP7RMIX_INPUT_3_VOLUME 0xC4D
+#define MADERA_DSP7RMIX_INPUT_4_SOURCE 0xC4E
+#define MADERA_DSP7RMIX_INPUT_4_VOLUME 0xC4F
+#define MADERA_DSP7AUX1MIX_INPUT_1_SOURCE 0xC50
+#define MADERA_DSP7AUX2MIX_INPUT_1_SOURCE 0xC58
+#define MADERA_DSP7AUX3MIX_INPUT_1_SOURCE 0xC60
+#define MADERA_DSP7AUX4MIX_INPUT_1_SOURCE 0xC68
+#define MADERA_DSP7AUX5MIX_INPUT_1_SOURCE 0xC70
+#define MADERA_DSP7AUX6MIX_INPUT_1_SOURCE 0xC78
+#define MADERA_DFC1MIX_INPUT_1_SOURCE 0xDC0
+#define MADERA_DFC2MIX_INPUT_1_SOURCE 0xDC8
+#define MADERA_DFC3MIX_INPUT_1_SOURCE 0xDD0
+#define MADERA_DFC4MIX_INPUT_1_SOURCE 0xDD8
+#define MADERA_DFC5MIX_INPUT_1_SOURCE 0xDE0
+#define MADERA_DFC6MIX_INPUT_1_SOURCE 0xDE8
+#define MADERA_DFC7MIX_INPUT_1_SOURCE 0xDF0
+#define MADERA_DFC8MIX_INPUT_1_SOURCE 0xDF8
+#define MADERA_FX_CTRL1 0xE00
+#define MADERA_FX_CTRL2 0xE01
+#define MADERA_EQ1_1 0xE10
+#define MADERA_EQ1_2 0xE11
+#define MADERA_EQ1_21 0xE24
+#define MADERA_EQ2_1 0xE26
+#define MADERA_EQ2_2 0xE27
+#define MADERA_EQ2_21 0xE3A
+#define MADERA_EQ3_1 0xE3C
+#define MADERA_EQ3_2 0xE3D
+#define MADERA_EQ3_21 0xE50
+#define MADERA_EQ4_1 0xE52
+#define MADERA_EQ4_2 0xE53
+#define MADERA_EQ4_21 0xE66
+#define MADERA_DRC1_CTRL1 0xE80
+#define MADERA_DRC1_CTRL2 0xE81
+#define MADERA_DRC1_CTRL3 0xE82
+#define MADERA_DRC1_CTRL4 0xE83
+#define MADERA_DRC1_CTRL5 0xE84
+#define MADERA_DRC2_CTRL1 0xE88
+#define MADERA_DRC2_CTRL2 0xE89
+#define MADERA_DRC2_CTRL3 0xE8A
+#define MADERA_DRC2_CTRL4 0xE8B
+#define MADERA_DRC2_CTRL5 0xE8C
+#define MADERA_HPLPF1_1 0xEC0
+#define MADERA_HPLPF1_2 0xEC1
+#define MADERA_HPLPF2_1 0xEC4
+#define MADERA_HPLPF2_2 0xEC5
+#define MADERA_HPLPF3_1 0xEC8
+#define MADERA_HPLPF3_2 0xEC9
+#define MADERA_HPLPF4_1 0xECC
+#define MADERA_HPLPF4_2 0xECD
+#define MADERA_ASRC2_ENABLE 0xED0
+#define MADERA_ASRC2_STATUS 0xED1
+#define MADERA_ASRC2_RATE1 0xED2
+#define MADERA_ASRC2_RATE2 0xED3
+#define MADERA_ASRC1_ENABLE 0xEE0
+#define MADERA_ASRC1_STATUS 0xEE1
+#define MADERA_ASRC1_RATE1 0xEE2
+#define MADERA_ASRC1_RATE2 0xEE3
+#define MADERA_ISRC_1_CTRL_1 0xEF0
+#define MADERA_ISRC_1_CTRL_2 0xEF1
+#define MADERA_ISRC_1_CTRL_3 0xEF2
+#define MADERA_ISRC_2_CTRL_1 0xEF3
+#define MADERA_ISRC_2_CTRL_2 0xEF4
+#define MADERA_ISRC_2_CTRL_3 0xEF5
+#define MADERA_ISRC_3_CTRL_1 0xEF6
+#define MADERA_ISRC_3_CTRL_2 0xEF7
+#define MADERA_ISRC_3_CTRL_3 0xEF8
+#define MADERA_ISRC_4_CTRL_1 0xEF9
+#define MADERA_ISRC_4_CTRL_2 0xEFA
+#define MADERA_ISRC_4_CTRL_3 0xEFB
+#define MADERA_CLOCK_CONTROL 0xF00
+#define MADERA_ANC_SRC 0xF01
+#define MADERA_DSP_STATUS 0xF02
+#define MADERA_ANC_COEFF_START 0xF08
+#define MADERA_ANC_COEFF_END 0xF12
+#define MADERA_FCL_FILTER_CONTROL 0xF15
+#define MADERA_FCL_ADC_REFORMATTER_CONTROL 0xF17
+#define MADERA_FCL_COEFF_START 0xF18
+#define MADERA_FCL_COEFF_END 0xF69
+#define MADERA_FCR_FILTER_CONTROL 0xF71
+#define MADERA_FCR_ADC_REFORMATTER_CONTROL 0xF73
+#define MADERA_FCR_COEFF_START 0xF74
+#define MADERA_FCR_COEFF_END 0xFC5
+#define MADERA_AUXPDM1_CTRL_0 0x10C0
+#define MADERA_AUXPDM1_CTRL_1 0x10C1
+#define MADERA_DFC1_CTRL 0x1480
+#define MADERA_DFC1_RX 0x1482
+#define MADERA_DFC1_TX 0x1484
+#define MADERA_DFC2_CTRL 0x1486
+#define MADERA_DFC2_RX 0x1488
+#define MADERA_DFC2_TX 0x148A
+#define MADERA_DFC3_CTRL 0x148C
+#define MADERA_DFC3_RX 0x148E
+#define MADERA_DFC3_TX 0x1490
+#define MADERA_DFC4_CTRL 0x1492
+#define MADERA_DFC4_RX 0x1494
+#define MADERA_DFC4_TX 0x1496
+#define MADERA_DFC5_CTRL 0x1498
+#define MADERA_DFC5_RX 0x149A
+#define MADERA_DFC5_TX 0x149C
+#define MADERA_DFC6_CTRL 0x149E
+#define MADERA_DFC6_RX 0x14A0
+#define MADERA_DFC6_TX 0x14A2
+#define MADERA_DFC7_CTRL 0x14A4
+#define MADERA_DFC7_RX 0x14A6
+#define MADERA_DFC7_TX 0x14A8
+#define MADERA_DFC8_CTRL 0x14AA
+#define MADERA_DFC8_RX 0x14AC
+#define MADERA_DFC8_TX 0x14AE
+#define MADERA_DFC_STATUS 0x14B6
+#define MADERA_ADSP2_IRQ0 0x1600
+#define MADERA_ADSP2_IRQ1 0x1601
+#define MADERA_ADSP2_IRQ2 0x1602
+#define MADERA_ADSP2_IRQ3 0x1603
+#define MADERA_ADSP2_IRQ4 0x1604
+#define MADERA_ADSP2_IRQ5 0x1605
+#define MADERA_ADSP2_IRQ6 0x1606
+#define MADERA_ADSP2_IRQ7 0x1607
+#define MADERA_GPIO1_CTRL_1 0x1700
+#define MADERA_GPIO1_CTRL_2 0x1701
+#define MADERA_GPIO2_CTRL_1 0x1702
+#define MADERA_GPIO2_CTRL_2 0x1703
+#define MADERA_GPIO15_CTRL_1 0x171C
+#define MADERA_GPIO15_CTRL_2 0x171D
+#define MADERA_GPIO16_CTRL_1 0x171E
+#define MADERA_GPIO16_CTRL_2 0x171F
+#define MADERA_GPIO38_CTRL_1 0x174A
+#define MADERA_GPIO38_CTRL_2 0x174B
+#define MADERA_GPIO40_CTRL_1 0x174E
+#define MADERA_GPIO40_CTRL_2 0x174F
+#define MADERA_IRQ1_STATUS_1 0x1800
+#define MADERA_IRQ1_STATUS_2 0x1801
+#define MADERA_IRQ1_STATUS_6 0x1805
+#define MADERA_IRQ1_STATUS_7 0x1806
+#define MADERA_IRQ1_STATUS_9 0x1808
+#define MADERA_IRQ1_STATUS_11 0x180A
+#define MADERA_IRQ1_STATUS_12 0x180B
+#define MADERA_IRQ1_STATUS_15 0x180E
+#define MADERA_IRQ1_STATUS_33 0x1820
+#define MADERA_IRQ1_MASK_1 0x1840
+#define MADERA_IRQ1_MASK_2 0x1841
+#define MADERA_IRQ1_MASK_6 0x1845
+#define MADERA_IRQ1_MASK_33 0x1860
+#define MADERA_IRQ1_RAW_STATUS_1 0x1880
+#define MADERA_IRQ1_RAW_STATUS_2 0x1881
+#define MADERA_IRQ1_RAW_STATUS_7 0x1886
+#define MADERA_IRQ1_RAW_STATUS_15 0x188E
+#define MADERA_IRQ1_RAW_STATUS_33 0x18A0
+#define MADERA_INTERRUPT_DEBOUNCE_7 0x1A06
+#define MADERA_INTERRUPT_DEBOUNCE_15 0x1A0E
+#define MADERA_IRQ1_CTRL 0x1A80
+#define MADERA_IRQ2_CTRL 0x1A82
+#define MADERA_INTERRUPT_RAW_STATUS_1 0x1AA0
+#define MADERA_WSEQ_SEQUENCE_1 0x3000
+#define MADERA_WSEQ_SEQUENCE_225 0x31C0
+#define MADERA_WSEQ_SEQUENCE_252 0x31F6
+#define CS47L35_OTP_HPDET_CAL_1 0x31F8
+#define CS47L35_OTP_HPDET_CAL_2 0x31FA
+#define MADERA_WSEQ_SEQUENCE_508 0x33F6
+#define CS47L85_OTP_HPDET_CAL_1 0x33F8
+#define CS47L85_OTP_HPDET_CAL_2 0x33FA
+#define MADERA_OTP_HPDET_CAL_1 0x20004
+#define MADERA_OTP_HPDET_CAL_2 0x20006
+#define MADERA_DSP1_CONFIG_1 0x0FFE00
+#define MADERA_DSP1_CONFIG_2 0x0FFE02
+#define MADERA_DSP1_SCRATCH_1 0x0FFE40
+#define MADERA_DSP1_SCRATCH_2 0x0FFE42
+#define MADERA_DSP1_PMEM_ERR_ADDR___XMEM_ERR_ADDR 0xFFE7C
+#define MADERA_DSP2_CONFIG_1 0x17FE00
+#define MADERA_DSP2_CONFIG_2 0x17FE02
+#define MADERA_DSP2_SCRATCH_1 0x17FE40
+#define MADERA_DSP2_SCRATCH_2 0x17FE42
+#define MADERA_DSP2_PMEM_ERR_ADDR___XMEM_ERR_ADDR 0x17FE7C
+#define MADERA_DSP3_CONFIG_1 0x1FFE00
+#define MADERA_DSP3_CONFIG_2 0x1FFE02
+#define MADERA_DSP3_SCRATCH_1 0x1FFE40
+#define MADERA_DSP3_SCRATCH_2 0x1FFE42
+#define MADERA_DSP3_PMEM_ERR_ADDR___XMEM_ERR_ADDR 0x1FFE7C
+#define MADERA_DSP4_CONFIG_1 0x27FE00
+#define MADERA_DSP4_CONFIG_2 0x27FE02
+#define MADERA_DSP4_SCRATCH_1 0x27FE40
+#define MADERA_DSP4_SCRATCH_2 0x27FE42
+#define MADERA_DSP4_PMEM_ERR_ADDR___XMEM_ERR_ADDR 0x27FE7C
+#define MADERA_DSP5_CONFIG_1 0x2FFE00
+#define MADERA_DSP5_CONFIG_2 0x2FFE02
+#define MADERA_DSP5_SCRATCH_1 0x2FFE40
+#define MADERA_DSP5_SCRATCH_2 0x2FFE42
+#define MADERA_DSP5_PMEM_ERR_ADDR___XMEM_ERR_ADDR 0x2FFE7C
+#define MADERA_DSP6_CONFIG_1 0x37FE00
+#define MADERA_DSP6_CONFIG_2 0x37FE02
+#define MADERA_DSP6_SCRATCH_1 0x37FE40
+#define MADERA_DSP6_SCRATCH_2 0x37FE42
+#define MADERA_DSP6_PMEM_ERR_ADDR___XMEM_ERR_ADDR 0x37FE7C
+#define MADERA_DSP7_CONFIG_1 0x3FFE00
+#define MADERA_DSP7_CONFIG_2 0x3FFE02
+#define MADERA_DSP7_SCRATCH_1 0x3FFE40
+#define MADERA_DSP7_SCRATCH_2 0x3FFE42
+#define MADERA_DSP7_PMEM_ERR_ADDR___XMEM_ERR_ADDR 0x3FFE7C
+
+/* (0x0000) Software_Reset */
+#define MADERA_SW_RST_DEV_ID1_MASK 0xFFFF
+#define MADERA_SW_RST_DEV_ID1_SHIFT 0
+
+/* (0x0001) Hardware_Revision */
+#define MADERA_HW_REVISION_MASK 0x00FF
+#define MADERA_HW_REVISION_SHIFT 0
+
+/* (0x0020) Tone_Generator_1 */
+#define MADERA_TONE2_ENA 0x0002
+#define MADERA_TONE2_ENA_MASK 0x0002
+#define MADERA_TONE2_ENA_SHIFT 1
+#define MADERA_TONE1_ENA 0x0001
+#define MADERA_TONE1_ENA_MASK 0x0001
+#define MADERA_TONE1_ENA_SHIFT 0
+
+/* (0x0021) Tone_Generator_2 */
+#define MADERA_TONE1_LVL_0_MASK 0xFFFF
+#define MADERA_TONE1_LVL_0_SHIFT 0
+
+/* (0x0022) Tone_Generator_3 */
+#define MADERA_TONE1_LVL_MASK 0x00FF
+#define MADERA_TONE1_LVL_SHIFT 0
+
+/* (0x0023) Tone_Generator_4 */
+#define MADERA_TONE2_LVL_0_MASK 0xFFFF
+#define MADERA_TONE2_LVL_0_SHIFT 0
+
+/* (0x0024) Tone_Generator_5 */
+#define MADERA_TONE2_LVL_MASK 0x00FF
+#define MADERA_TONE2_LVL_SHIFT 0
+
+/* (0x0030) PWM_Drive_1 */
+#define MADERA_PWM2_ENA 0x0002
+#define MADERA_PWM2_ENA_MASK 0x0002
+#define MADERA_PWM2_ENA_SHIFT 1
+#define MADERA_PWM1_ENA 0x0001
+#define MADERA_PWM1_ENA_MASK 0x0001
+#define MADERA_PWM1_ENA_SHIFT 0
+
+/* (0x00A0) Comfort_Noise_Generator */
+#define MADERA_NOISE_GEN_ENA 0x0020
+#define MADERA_NOISE_GEN_ENA_MASK 0x0020
+#define MADERA_NOISE_GEN_ENA_SHIFT 5
+#define MADERA_NOISE_GEN_GAIN_MASK 0x001F
+#define MADERA_NOISE_GEN_GAIN_SHIFT 0
+
+/* (0x0100) Clock_32k_1 */
+#define MADERA_CLK_32K_ENA 0x0040
+#define MADERA_CLK_32K_ENA_MASK 0x0040
+#define MADERA_CLK_32K_ENA_SHIFT 6
+#define MADERA_CLK_32K_SRC_MASK 0x0003
+#define MADERA_CLK_32K_SRC_SHIFT 0
+
+/* (0x0101) System_Clock_1 */
+#define MADERA_SYSCLK_FRAC 0x8000
+#define MADERA_SYSCLK_FRAC_MASK 0x8000
+#define MADERA_SYSCLK_FRAC_SHIFT 15
+#define MADERA_SYSCLK_FREQ_MASK 0x0700
+#define MADERA_SYSCLK_FREQ_SHIFT 8
+#define MADERA_SYSCLK_ENA 0x0040
+#define MADERA_SYSCLK_ENA_MASK 0x0040
+#define MADERA_SYSCLK_ENA_SHIFT 6
+#define MADERA_SYSCLK_SRC_MASK 0x000F
+#define MADERA_SYSCLK_SRC_SHIFT 0
+
+/* (0x0102) Sample_rate_1 */
+#define MADERA_SAMPLE_RATE_1_MASK 0x001F
+#define MADERA_SAMPLE_RATE_1_SHIFT 0
+
+/* (0x0103) Sample_rate_2 */
+#define MADERA_SAMPLE_RATE_2_MASK 0x001F
+#define MADERA_SAMPLE_RATE_2_SHIFT 0
+
+/* (0x0104) Sample_rate_3 */
+#define MADERA_SAMPLE_RATE_3_MASK 0x001F
+#define MADERA_SAMPLE_RATE_3_SHIFT 0
+
+/* (0x0112) Async_clock_1 */
+#define MADERA_ASYNC_CLK_FREQ_MASK 0x0700
+#define MADERA_ASYNC_CLK_FREQ_SHIFT 8
+#define MADERA_ASYNC_CLK_ENA 0x0040
+#define MADERA_ASYNC_CLK_ENA_MASK 0x0040
+#define MADERA_ASYNC_CLK_ENA_SHIFT 6
+#define MADERA_ASYNC_CLK_SRC_MASK 0x000F
+#define MADERA_ASYNC_CLK_SRC_SHIFT 0
+
+/* (0x0113) Async_sample_rate_1 */
+#define MADERA_ASYNC_SAMPLE_RATE_1_MASK 0x001F
+#define MADERA_ASYNC_SAMPLE_RATE_1_SHIFT 0
+
+/* (0x0114) Async_sample_rate_2 */
+#define MADERA_ASYNC_SAMPLE_RATE_2_MASK 0x001F
+#define MADERA_ASYNC_SAMPLE_RATE_2_SHIFT 0
+
+/* (0x0120) DSP_Clock_1 */
+#define MADERA_DSP_CLK_FREQ_LEGACY 0x0700
+#define MADERA_DSP_CLK_FREQ_LEGACY_MASK 0x0700
+#define MADERA_DSP_CLK_FREQ_LEGACY_SHIFT 8
+#define MADERA_DSP_CLK_ENA 0x0040
+#define MADERA_DSP_CLK_ENA_MASK 0x0040
+#define MADERA_DSP_CLK_ENA_SHIFT 6
+#define MADERA_DSP_CLK_SRC 0x000F
+#define MADERA_DSP_CLK_SRC_MASK 0x000F
+#define MADERA_DSP_CLK_SRC_SHIFT 0
+
+/* (0x0122) DSP_Clock_2 */
+#define MADERA_DSP_CLK_FREQ_MASK 0x03FF
+#define MADERA_DSP_CLK_FREQ_SHIFT 0
+
+/* (0x0149) Output_system_clock */
+#define MADERA_OPCLK_ENA 0x8000
+#define MADERA_OPCLK_ENA_MASK 0x8000
+#define MADERA_OPCLK_ENA_SHIFT 15
+#define MADERA_OPCLK_DIV_MASK 0x00F8
+#define MADERA_OPCLK_DIV_SHIFT 3
+#define MADERA_OPCLK_SEL_MASK 0x0007
+#define MADERA_OPCLK_SEL_SHIFT 0
+
+/* (0x014A) Output_async_clock */
+#define MADERA_OPCLK_ASYNC_ENA 0x8000
+#define MADERA_OPCLK_ASYNC_ENA_MASK 0x8000
+#define MADERA_OPCLK_ASYNC_ENA_SHIFT 15
+#define MADERA_OPCLK_ASYNC_DIV_MASK 0x00F8
+#define MADERA_OPCLK_ASYNC_DIV_SHIFT 3
+#define MADERA_OPCLK_ASYNC_SEL_MASK 0x0007
+#define MADERA_OPCLK_ASYNC_SEL_SHIFT 0
+
+/* (0x0171) FLL1_Control_1 */
+#define CS47L92_FLL1_REFCLK_SRC_MASK 0xF000
+#define CS47L92_FLL1_REFCLK_SRC_SHIFT 12
+#define MADERA_FLL1_HOLD_MASK 0x0004
+#define MADERA_FLL1_HOLD_SHIFT 2
+#define MADERA_FLL1_FREERUN 0x0002
+#define MADERA_FLL1_FREERUN_MASK 0x0002
+#define MADERA_FLL1_FREERUN_SHIFT 1
+#define MADERA_FLL1_ENA 0x0001
+#define MADERA_FLL1_ENA_MASK 0x0001
+#define MADERA_FLL1_ENA_SHIFT 0
+
+/* (0x0172) FLL1_Control_2 */
+#define MADERA_FLL1_CTRL_UPD 0x8000
+#define MADERA_FLL1_CTRL_UPD_MASK 0x8000
+#define MADERA_FLL1_CTRL_UPD_SHIFT 15
+#define MADERA_FLL1_N_MASK 0x03FF
+#define MADERA_FLL1_N_SHIFT 0
+
+/* (0x0173) FLL1_Control_3 */
+#define MADERA_FLL1_THETA_MASK 0xFFFF
+#define MADERA_FLL1_THETA_SHIFT 0
+
+/* (0x0174) FLL1_Control_4 */
+#define MADERA_FLL1_LAMBDA_MASK 0xFFFF
+#define MADERA_FLL1_LAMBDA_SHIFT 0
+
+/* (0x0175) FLL1_Control_5 */
+#define MADERA_FLL1_FRATIO_MASK 0x0F00
+#define MADERA_FLL1_FRATIO_SHIFT 8
+#define MADERA_FLL1_FB_DIV_MASK 0x03FF
+#define MADERA_FLL1_FB_DIV_SHIFT 0
+
+/* (0x0176) FLL1_Control_6 */
+#define MADERA_FLL1_REFCLK_DIV_MASK 0x00C0
+#define MADERA_FLL1_REFCLK_DIV_SHIFT 6
+#define MADERA_FLL1_REFCLK_SRC_MASK 0x000F
+#define MADERA_FLL1_REFCLK_SRC_SHIFT 0
+
+/* (0x0179) FLL1_Control_7 */
+#define MADERA_FLL1_GAIN_MASK 0x003c
+#define MADERA_FLL1_GAIN_SHIFT 2
+
+/* (0x017A) FLL1_EFS_2 */
+#define MADERA_FLL1_PHASE_GAIN_MASK 0xF000
+#define MADERA_FLL1_PHASE_GAIN_SHIFT 12
+#define MADERA_FLL1_PHASE_ENA_MASK 0x0800
+#define MADERA_FLL1_PHASE_ENA_SHIFT 11
+
+/* (0x017A) FLL1_Control_10 */
+#define MADERA_FLL1_HP_MASK 0xC000
+#define MADERA_FLL1_HP_SHIFT 14
+#define MADERA_FLL1_PHASEDET_ENA_MASK 0x1000
+#define MADERA_FLL1_PHASEDET_ENA_SHIFT 12
+
+/* (0x017B) FLL1_Control_11 */
+#define MADERA_FLL1_LOCKDET_THR_MASK 0x001E
+#define MADERA_FLL1_LOCKDET_THR_SHIFT 1
+#define MADERA_FLL1_LOCKDET_MASK 0x0001
+#define MADERA_FLL1_LOCKDET_SHIFT 0
+
+/* (0x017D) FLL1_Digital_Test_1 */
+#define MADERA_FLL1_SYNC_EFS_ENA_MASK 0x0100
+#define MADERA_FLL1_SYNC_EFS_ENA_SHIFT 8
+#define MADERA_FLL1_CLK_VCO_FAST_SRC_MASK 0x0003
+#define MADERA_FLL1_CLK_VCO_FAST_SRC_SHIFT 0
+
+/* (0x0181) FLL1_Synchroniser_1 */
+#define MADERA_FLL1_SYNC_ENA 0x0001
+#define MADERA_FLL1_SYNC_ENA_MASK 0x0001
+#define MADERA_FLL1_SYNC_ENA_SHIFT 0
+
+/* (0x0182) FLL1_Synchroniser_2 */
+#define MADERA_FLL1_SYNC_N_MASK 0x03FF
+#define MADERA_FLL1_SYNC_N_SHIFT 0
+
+/* (0x0183) FLL1_Synchroniser_3 */
+#define MADERA_FLL1_SYNC_THETA_MASK 0xFFFF
+#define MADERA_FLL1_SYNC_THETA_SHIFT 0
+
+/* (0x0184) FLL1_Synchroniser_4 */
+#define MADERA_FLL1_SYNC_LAMBDA_MASK 0xFFFF
+#define MADERA_FLL1_SYNC_LAMBDA_SHIFT 0
+
+/* (0x0185) FLL1_Synchroniser_5 */
+#define MADERA_FLL1_SYNC_FRATIO_MASK 0x0700
+#define MADERA_FLL1_SYNC_FRATIO_SHIFT 8
+
+/* (0x0186) FLL1_Synchroniser_6 */
+#define MADERA_FLL1_SYNCCLK_DIV_MASK 0x00C0
+#define MADERA_FLL1_SYNCCLK_DIV_SHIFT 6
+#define MADERA_FLL1_SYNCCLK_SRC_MASK 0x000F
+#define MADERA_FLL1_SYNCCLK_SRC_SHIFT 0
+
+/* (0x0187) FLL1_Synchroniser_7 */
+#define MADERA_FLL1_SYNC_GAIN_MASK 0x003c
+#define MADERA_FLL1_SYNC_GAIN_SHIFT 2
+#define MADERA_FLL1_SYNC_DFSAT 0x0001
+#define MADERA_FLL1_SYNC_DFSAT_MASK 0x0001
+#define MADERA_FLL1_SYNC_DFSAT_SHIFT 0
+
+/* (0x01D1) FLL_AO_Control_1 */
+#define MADERA_FLL_AO_HOLD 0x0004
+#define MADERA_FLL_AO_HOLD_MASK 0x0004
+#define MADERA_FLL_AO_HOLD_SHIFT 2
+#define MADERA_FLL_AO_FREERUN 0x0002
+#define MADERA_FLL_AO_FREERUN_MASK 0x0002
+#define MADERA_FLL_AO_FREERUN_SHIFT 1
+#define MADERA_FLL_AO_ENA 0x0001
+#define MADERA_FLL_AO_ENA_MASK 0x0001
+#define MADERA_FLL_AO_ENA_SHIFT 0
+
+/* (0x01D2) FLL_AO_Control_2 */
+#define MADERA_FLL_AO_CTRL_UPD 0x8000
+#define MADERA_FLL_AO_CTRL_UPD_MASK 0x8000
+#define MADERA_FLL_AO_CTRL_UPD_SHIFT 15
+
+/* (0x01D6) FLL_AO_Control_6 */
+#define MADERA_FLL_AO_REFCLK_SRC_MASK 0x000F
+#define MADERA_FLL_AO_REFCLK_SRC_SHIFT 0
+
+/* (0x0200) Mic_Charge_Pump_1 */
+#define MADERA_CPMIC_BYPASS 0x0002
+#define MADERA_CPMIC_BYPASS_MASK 0x0002
+#define MADERA_CPMIC_BYPASS_SHIFT 1
+#define MADERA_CPMIC_ENA 0x0001
+#define MADERA_CPMIC_ENA_MASK 0x0001
+#define MADERA_CPMIC_ENA_SHIFT 0
+
+/* (0x0210) LDO1_Control_1 */
+#define MADERA_LDO1_VSEL_MASK 0x07E0
+#define MADERA_LDO1_VSEL_SHIFT 5
+#define MADERA_LDO1_FAST 0x0010
+#define MADERA_LDO1_FAST_MASK 0x0010
+#define MADERA_LDO1_FAST_SHIFT 4
+#define MADERA_LDO1_DISCH 0x0004
+#define MADERA_LDO1_DISCH_MASK 0x0004
+#define MADERA_LDO1_DISCH_SHIFT 2
+#define MADERA_LDO1_BYPASS 0x0002
+#define MADERA_LDO1_BYPASS_MASK 0x0002
+#define MADERA_LDO1_BYPASS_SHIFT 1
+#define MADERA_LDO1_ENA 0x0001
+#define MADERA_LDO1_ENA_MASK 0x0001
+#define MADERA_LDO1_ENA_SHIFT 0
+
+/* (0x0213) LDO2_Control_1 */
+#define MADERA_LDO2_VSEL_MASK 0x07E0
+#define MADERA_LDO2_VSEL_SHIFT 5
+#define MADERA_LDO2_FAST 0x0010
+#define MADERA_LDO2_FAST_MASK 0x0010
+#define MADERA_LDO2_FAST_SHIFT 4
+#define MADERA_LDO2_DISCH 0x0004
+#define MADERA_LDO2_DISCH_MASK 0x0004
+#define MADERA_LDO2_DISCH_SHIFT 2
+#define MADERA_LDO2_BYPASS 0x0002
+#define MADERA_LDO2_BYPASS_MASK 0x0002
+#define MADERA_LDO2_BYPASS_SHIFT 1
+#define MADERA_LDO2_ENA 0x0001
+#define MADERA_LDO2_ENA_MASK 0x0001
+#define MADERA_LDO2_ENA_SHIFT 0
+
+/* (0x0218) Mic_Bias_Ctrl_1 */
+#define MADERA_MICB1_EXT_CAP 0x8000
+#define MADERA_MICB1_EXT_CAP_MASK 0x8000
+#define MADERA_MICB1_EXT_CAP_SHIFT 15
+#define MADERA_MICB1_LVL_MASK 0x01E0
+#define MADERA_MICB1_LVL_SHIFT 5
+#define MADERA_MICB1_ENA 0x0001
+#define MADERA_MICB1_ENA_MASK 0x0001
+#define MADERA_MICB1_ENA_SHIFT 0
+
+/* (0x021C) Mic_Bias_Ctrl_5 */
+#define MADERA_MICB1D_ENA 0x1000
+#define MADERA_MICB1D_ENA_MASK 0x1000
+#define MADERA_MICB1D_ENA_SHIFT 12
+#define MADERA_MICB1C_ENA 0x0100
+#define MADERA_MICB1C_ENA_MASK 0x0100
+#define MADERA_MICB1C_ENA_SHIFT 8
+#define MADERA_MICB1B_ENA 0x0010
+#define MADERA_MICB1B_ENA_MASK 0x0010
+#define MADERA_MICB1B_ENA_SHIFT 4
+#define MADERA_MICB1A_ENA 0x0001
+#define MADERA_MICB1A_ENA_MASK 0x0001
+#define MADERA_MICB1A_ENA_SHIFT 0
+
+/* (0x021E) Mic_Bias_Ctrl_6 */
+#define MADERA_MICB2D_ENA 0x1000
+#define MADERA_MICB2D_ENA_MASK 0x1000
+#define MADERA_MICB2D_ENA_SHIFT 12
+#define MADERA_MICB2C_ENA 0x0100
+#define MADERA_MICB2C_ENA_MASK 0x0100
+#define MADERA_MICB2C_ENA_SHIFT 8
+#define MADERA_MICB2B_ENA 0x0010
+#define MADERA_MICB2B_ENA_MASK 0x0010
+#define MADERA_MICB2B_ENA_SHIFT 4
+#define MADERA_MICB2A_ENA 0x0001
+#define MADERA_MICB2A_ENA_MASK 0x0001
+#define MADERA_MICB2A_ENA_SHIFT 0
+
+/* (0x0225) - HP Ctrl 1L */
+#define MADERA_RMV_SHRT_HP1L 0x4000
+#define MADERA_RMV_SHRT_HP1L_MASK 0x4000
+#define MADERA_RMV_SHRT_HP1L_SHIFT 14
+#define MADERA_HP1L_FLWR 0x0004
+#define MADERA_HP1L_FLWR_MASK 0x0004
+#define MADERA_HP1L_FLWR_SHIFT 2
+#define MADERA_HP1L_SHRTI 0x0002
+#define MADERA_HP1L_SHRTI_MASK 0x0002
+#define MADERA_HP1L_SHRTI_SHIFT 1
+#define MADERA_HP1L_SHRTO 0x0001
+#define MADERA_HP1L_SHRTO_MASK 0x0001
+#define MADERA_HP1L_SHRTO_SHIFT 0
+
+/* (0x0226) - HP Ctrl 1R */
+#define MADERA_RMV_SHRT_HP1R 0x4000
+#define MADERA_RMV_SHRT_HP1R_MASK 0x4000
+#define MADERA_RMV_SHRT_HP1R_SHIFT 14
+#define MADERA_HP1R_FLWR 0x0004
+#define MADERA_HP1R_FLWR_MASK 0x0004
+#define MADERA_HP1R_FLWR_SHIFT 2
+#define MADERA_HP1R_SHRTI 0x0002
+#define MADERA_HP1R_SHRTI_MASK 0x0002
+#define MADERA_HP1R_SHRTI_SHIFT 1
+#define MADERA_HP1R_SHRTO 0x0001
+#define MADERA_HP1R_SHRTO_MASK 0x0001
+#define MADERA_HP1R_SHRTO_SHIFT 0
+
+/* (0x0293) Accessory_Detect_Mode_1 */
+#define MADERA_ACCDET_SRC 0x2000
+#define MADERA_ACCDET_SRC_MASK 0x2000
+#define MADERA_ACCDET_SRC_SHIFT 13
+#define MADERA_ACCDET_POLARITY_INV_ENA 0x0080
+#define MADERA_ACCDET_POLARITY_INV_ENA_MASK 0x0080
+#define MADERA_ACCDET_POLARITY_INV_ENA_SHIFT 7
+#define MADERA_ACCDET_MODE_MASK 0x0007
+#define MADERA_ACCDET_MODE_SHIFT 0
+
+/* (0x0299) Headphone_Detect_0 */
+#define MADERA_HPD_GND_SEL 0x0007
+#define MADERA_HPD_GND_SEL_MASK 0x0007
+#define MADERA_HPD_GND_SEL_SHIFT 0
+#define MADERA_HPD_SENSE_SEL 0x00F0
+#define MADERA_HPD_SENSE_SEL_MASK 0x00F0
+#define MADERA_HPD_SENSE_SEL_SHIFT 4
+#define MADERA_HPD_FRC_SEL 0x0F00
+#define MADERA_HPD_FRC_SEL_MASK 0x0F00
+#define MADERA_HPD_FRC_SEL_SHIFT 8
+#define MADERA_HPD_OUT_SEL 0x7000
+#define MADERA_HPD_OUT_SEL_MASK 0x7000
+#define MADERA_HPD_OUT_SEL_SHIFT 12
+#define MADERA_HPD_OVD_ENA_SEL 0x8000
+#define MADERA_HPD_OVD_ENA_SEL_MASK 0x8000
+#define MADERA_HPD_OVD_ENA_SEL_SHIFT 15
+
+/* (0x029B) Headphone_Detect_1 */
+#define MADERA_HP_IMPEDANCE_RANGE_MASK 0x0600
+#define MADERA_HP_IMPEDANCE_RANGE_SHIFT 9
+#define MADERA_HP_STEP_SIZE 0x0100
+#define MADERA_HP_STEP_SIZE_MASK 0x0100
+#define MADERA_HP_STEP_SIZE_SHIFT 8
+#define MADERA_HP_CLK_DIV_MASK 0x0018
+#define MADERA_HP_CLK_DIV_SHIFT 3
+#define MADERA_HP_RATE_MASK 0x0006
+#define MADERA_HP_RATE_SHIFT 1
+#define MADERA_HP_POLL 0x0001
+#define MADERA_HP_POLL_MASK 0x0001
+#define MADERA_HP_POLL_SHIFT 0
+
+/* (0x029C) Headphone_Detect_2 */
+#define MADERA_HP_DONE_MASK 0x8000
+#define MADERA_HP_DONE_SHIFT 15
+#define MADERA_HP_LVL_MASK 0x7FFF
+#define MADERA_HP_LVL_SHIFT 0
+
+/* (0x029D) Headphone_Detect_3 */
+#define MADERA_HP_DACVAL_MASK 0x03FF
+#define MADERA_HP_DACVAL_SHIFT 0
+
+/* (0x029F) - Headphone Detect 5 */
+#define MADERA_HP_DACVAL_DOWN_MASK 0x03FF
+#define MADERA_HP_DACVAL_DOWN_SHIFT 0
+
+/* (0x02A2) Mic_Detect_1_Control_0 */
+#define MADERA_MICD1_GND_MASK 0x0007
+#define MADERA_MICD1_GND_SHIFT 0
+#define MADERA_MICD1_SENSE_MASK 0x00F0
+#define MADERA_MICD1_SENSE_SHIFT 4
+#define MADERA_MICD1_ADC_MODE_MASK 0x8000
+#define MADERA_MICD1_ADC_MODE_SHIFT 15
+
+/* (0x02A3) Mic_Detect_1_Control_1 */
+#define MADERA_MICD_BIAS_STARTTIME_MASK 0xF000
+#define MADERA_MICD_BIAS_STARTTIME_SHIFT 12
+#define MADERA_MICD_RATE_MASK 0x0F00
+#define MADERA_MICD_RATE_SHIFT 8
+#define MADERA_MICD_BIAS_SRC_MASK 0x00F0
+#define MADERA_MICD_BIAS_SRC_SHIFT 4
+#define MADERA_MICD_DBTIME 0x0002
+#define MADERA_MICD_DBTIME_MASK 0x0002
+#define MADERA_MICD_DBTIME_SHIFT 1
+#define MADERA_MICD_ENA 0x0001
+#define MADERA_MICD_ENA_MASK 0x0001
+#define MADERA_MICD_ENA_SHIFT 0
+
+/* (0x02A4) Mic_Detect_1_Control_2 */
+#define MADERA_MICD_LVL_SEL_MASK 0x00FF
+#define MADERA_MICD_LVL_SEL_SHIFT 0
+
+/* (0x02A5) Mic_Detect_1_Control_3 */
+#define MADERA_MICD_LVL_0 0x0004
+#define MADERA_MICD_LVL_1 0x0008
+#define MADERA_MICD_LVL_2 0x0010
+#define MADERA_MICD_LVL_3 0x0020
+#define MADERA_MICD_LVL_4 0x0040
+#define MADERA_MICD_LVL_5 0x0080
+#define MADERA_MICD_LVL_6 0x0100
+#define MADERA_MICD_LVL_7 0x0200
+#define MADERA_MICD_LVL_8 0x0400
+#define MADERA_MICD_LVL_MASK 0x07FC
+#define MADERA_MICD_LVL_SHIFT 2
+#define MADERA_MICD_VALID 0x0002
+#define MADERA_MICD_VALID_MASK 0x0002
+#define MADERA_MICD_VALID_SHIFT 1
+#define MADERA_MICD_STS 0x0001
+#define MADERA_MICD_STS_MASK 0x0001
+#define MADERA_MICD_STS_SHIFT 0
+
+/* (0x02AB) Mic_Detect_1_Control_4 */
+#define MADERA_MICDET_ADCVAL_DIFF_MASK 0xFF00
+#define MADERA_MICDET_ADCVAL_DIFF_SHIFT 8
+#define MADERA_MICDET_ADCVAL_MASK 0x007F
+#define MADERA_MICDET_ADCVAL_SHIFT 0
+
+/* (0x02C6) Micd_Clamp_control */
+#define MADERA_MICD_CLAMP_OVD 0x0010
+#define MADERA_MICD_CLAMP_OVD_MASK 0x0010
+#define MADERA_MICD_CLAMP_OVD_SHIFT 4
+#define MADERA_MICD_CLAMP_MODE_MASK 0x000F
+#define MADERA_MICD_CLAMP_MODE_SHIFT 0
+
+/* (0x02C8) GP_Switch_1 */
+#define MADERA_SW2_MODE_MASK 0x000C
+#define MADERA_SW2_MODE_SHIFT 2
+#define MADERA_SW1_MODE_MASK 0x0003
+#define MADERA_SW1_MODE_SHIFT 0
+
+/* (0x02D3) Jack_detect_analogue */
+#define MADERA_JD2_ENA 0x0002
+#define MADERA_JD2_ENA_MASK 0x0002
+#define MADERA_JD2_ENA_SHIFT 1
+#define MADERA_JD1_ENA 0x0001
+#define MADERA_JD1_ENA_MASK 0x0001
+#define MADERA_JD1_ENA_SHIFT 0
+
+/* (0x0300) Input_Enables */
+#define MADERA_IN6L_ENA 0x0800
+#define MADERA_IN6L_ENA_MASK 0x0800
+#define MADERA_IN6L_ENA_SHIFT 11
+#define MADERA_IN6R_ENA 0x0400
+#define MADERA_IN6R_ENA_MASK 0x0400
+#define MADERA_IN6R_ENA_SHIFT 10
+#define MADERA_IN5L_ENA 0x0200
+#define MADERA_IN5L_ENA_MASK 0x0200
+#define MADERA_IN5L_ENA_SHIFT 9
+#define MADERA_IN5R_ENA 0x0100
+#define MADERA_IN5R_ENA_MASK 0x0100
+#define MADERA_IN5R_ENA_SHIFT 8
+#define MADERA_IN4L_ENA 0x0080
+#define MADERA_IN4L_ENA_MASK 0x0080
+#define MADERA_IN4L_ENA_SHIFT 7
+#define MADERA_IN4R_ENA 0x0040
+#define MADERA_IN4R_ENA_MASK 0x0040
+#define MADERA_IN4R_ENA_SHIFT 6
+#define MADERA_IN3L_ENA 0x0020
+#define MADERA_IN3L_ENA_MASK 0x0020
+#define MADERA_IN3L_ENA_SHIFT 5
+#define MADERA_IN3R_ENA 0x0010
+#define MADERA_IN3R_ENA_MASK 0x0010
+#define MADERA_IN3R_ENA_SHIFT 4
+#define MADERA_IN2L_ENA 0x0008
+#define MADERA_IN2L_ENA_MASK 0x0008
+#define MADERA_IN2L_ENA_SHIFT 3
+#define MADERA_IN2R_ENA 0x0004
+#define MADERA_IN2R_ENA_MASK 0x0004
+#define MADERA_IN2R_ENA_SHIFT 2
+#define MADERA_IN1L_ENA 0x0002
+#define MADERA_IN1L_ENA_MASK 0x0002
+#define MADERA_IN1L_ENA_SHIFT 1
+#define MADERA_IN1R_ENA 0x0001
+#define MADERA_IN1R_ENA_MASK 0x0001
+#define MADERA_IN1R_ENA_SHIFT 0
+
+/* (0x0308) Input_Rate */
+#define MADERA_IN_RATE_MASK 0xF800
+#define MADERA_IN_RATE_SHIFT 11
+#define MADERA_IN_MODE_MASK 0x0400
+#define MADERA_IN_MODE_SHIFT 10
+
+/* (0x0309) Input_Volume_Ramp */
+#define MADERA_IN_VD_RAMP_MASK 0x0070
+#define MADERA_IN_VD_RAMP_SHIFT 4
+#define MADERA_IN_VI_RAMP_MASK 0x0007
+#define MADERA_IN_VI_RAMP_SHIFT 0
+
+/* (0x030C) HPF_Control */
+#define MADERA_IN_HPF_CUT_MASK 0x0007
+#define MADERA_IN_HPF_CUT_SHIFT 0
+
+/* (0x0310) IN1L_Control */
+#define MADERA_IN1L_HPF_MASK 0x8000
+#define MADERA_IN1L_HPF_SHIFT 15
+#define MADERA_IN1_DMIC_SUP_MASK 0x1800
+#define MADERA_IN1_DMIC_SUP_SHIFT 11
+#define MADERA_IN1_MODE_MASK 0x0400
+#define MADERA_IN1_MODE_SHIFT 10
+#define MADERA_IN1L_PGA_VOL_MASK 0x00FE
+#define MADERA_IN1L_PGA_VOL_SHIFT 1
+
+/* (0x0311) ADC_Digital_Volume_1L */
+#define MADERA_IN1L_SRC_MASK 0x4000
+#define MADERA_IN1L_SRC_SHIFT 14
+#define MADERA_IN1L_SRC_SE_MASK 0x2000
+#define MADERA_IN1L_SRC_SE_SHIFT 13
+#define MADERA_IN1L_LP_MODE 0x0800
+#define MADERA_IN1L_LP_MODE_MASK 0x0800
+#define MADERA_IN1L_LP_MODE_SHIFT 11
+#define MADERA_IN_VU 0x0200
+#define MADERA_IN_VU_MASK 0x0200
+#define MADERA_IN_VU_SHIFT 9
+#define MADERA_IN1L_MUTE 0x0100
+#define MADERA_IN1L_MUTE_MASK 0x0100
+#define MADERA_IN1L_MUTE_SHIFT 8
+#define MADERA_IN1L_DIG_VOL_MASK 0x00FF
+#define MADERA_IN1L_DIG_VOL_SHIFT 0
+
+/* (0x0312) DMIC1L_Control */
+#define MADERA_IN1_OSR_MASK 0x0700
+#define MADERA_IN1_OSR_SHIFT 8
+
+/* (0x0313) IN1L_Rate_Control */
+#define MADERA_IN1L_RATE_MASK 0xF800
+#define MADERA_IN1L_RATE_SHIFT 11
+
+/* (0x0314) IN1R_Control */
+#define MADERA_IN1R_HPF_MASK 0x8000
+#define MADERA_IN1R_HPF_SHIFT 15
+#define MADERA_IN1R_PGA_VOL_MASK 0x00FE
+#define MADERA_IN1R_PGA_VOL_SHIFT 1
+#define MADERA_IN1_DMICCLK_SRC_MASK 0x1800
+#define MADERA_IN1_DMICCLK_SRC_SHIFT 11
+
+/* (0x0315) ADC_Digital_Volume_1R */
+#define MADERA_IN1R_SRC_MASK 0x4000
+#define MADERA_IN1R_SRC_SHIFT 14
+#define MADERA_IN1R_SRC_SE_MASK 0x2000
+#define MADERA_IN1R_SRC_SE_SHIFT 13
+#define MADERA_IN1R_LP_MODE 0x0800
+#define MADERA_IN1R_LP_MODE_MASK 0x0800
+#define MADERA_IN1R_LP_MODE_SHIFT 11
+#define MADERA_IN1R_MUTE 0x0100
+#define MADERA_IN1R_MUTE_MASK 0x0100
+#define MADERA_IN1R_MUTE_SHIFT 8
+#define MADERA_IN1R_DIG_VOL_MASK 0x00FF
+#define MADERA_IN1R_DIG_VOL_SHIFT 0
+
+/* (0x0317) IN1R_Rate_Control */
+#define MADERA_IN1R_RATE_MASK 0xF800
+#define MADERA_IN1R_RATE_SHIFT 11
+
+/* (0x0318) IN2L_Control */
+#define MADERA_IN2L_HPF_MASK 0x8000
+#define MADERA_IN2L_HPF_SHIFT 15
+#define MADERA_IN2_DMIC_SUP_MASK 0x1800
+#define MADERA_IN2_DMIC_SUP_SHIFT 11
+#define MADERA_IN2_MODE_MASK 0x0400
+#define MADERA_IN2_MODE_SHIFT 10
+#define MADERA_IN2L_PGA_VOL_MASK 0x00FE
+#define MADERA_IN2L_PGA_VOL_SHIFT 1
+
+/* (0x0319) ADC_Digital_Volume_2L */
+#define MADERA_IN2L_SRC_MASK 0x4000
+#define MADERA_IN2L_SRC_SHIFT 14
+#define MADERA_IN2L_SRC_SE_MASK 0x2000
+#define MADERA_IN2L_SRC_SE_SHIFT 13
+#define MADERA_IN2L_LP_MODE 0x0800
+#define MADERA_IN2L_LP_MODE_MASK 0x0800
+#define MADERA_IN2L_LP_MODE_SHIFT 11
+#define MADERA_IN2L_MUTE 0x0100
+#define MADERA_IN2L_MUTE_MASK 0x0100
+#define MADERA_IN2L_MUTE_SHIFT 8
+#define MADERA_IN2L_DIG_VOL_MASK 0x00FF
+#define MADERA_IN2L_DIG_VOL_SHIFT 0
+
+/* (0x031A) DMIC2L_Control */
+#define MADERA_IN2_OSR_MASK 0x0700
+#define MADERA_IN2_OSR_SHIFT 8
+
+/* (0x031C) IN2R_Control */
+#define MADERA_IN2R_HPF_MASK 0x8000
+#define MADERA_IN2R_HPF_SHIFT 15
+#define MADERA_IN2R_PGA_VOL_MASK 0x00FE
+#define MADERA_IN2R_PGA_VOL_SHIFT 1
+#define MADERA_IN2_DMICCLK_SRC_MASK 0x1800
+#define MADERA_IN2_DMICCLK_SRC_SHIFT 11
+
+/* (0x031D) ADC_Digital_Volume_2R */
+#define MADERA_IN2R_SRC_MASK 0x4000
+#define MADERA_IN2R_SRC_SHIFT 14
+#define MADERA_IN2R_SRC_SE_MASK 0x2000
+#define MADERA_IN2R_SRC_SE_SHIFT 13
+#define MADERA_IN2R_LP_MODE 0x0800
+#define MADERA_IN2R_LP_MODE_MASK 0x0800
+#define MADERA_IN2R_LP_MODE_SHIFT 11
+#define MADERA_IN2R_MUTE 0x0100
+#define MADERA_IN2R_MUTE_MASK 0x0100
+#define MADERA_IN2R_MUTE_SHIFT 8
+#define MADERA_IN2R_DIG_VOL_MASK 0x00FF
+#define MADERA_IN2R_DIG_VOL_SHIFT 0
+
+/* (0x0320) IN3L_Control */
+#define MADERA_IN3L_HPF_MASK 0x8000
+#define MADERA_IN3L_HPF_SHIFT 15
+#define MADERA_IN3_DMIC_SUP_MASK 0x1800
+#define MADERA_IN3_DMIC_SUP_SHIFT 11
+#define MADERA_IN3_MODE_MASK 0x0400
+#define MADERA_IN3_MODE_SHIFT 10
+#define MADERA_IN3L_PGA_VOL_MASK 0x00FE
+#define MADERA_IN3L_PGA_VOL_SHIFT 1
+
+/* (0x0321) ADC_Digital_Volume_3L */
+#define MADERA_IN3L_MUTE 0x0100
+#define MADERA_IN3L_MUTE_MASK 0x0100
+#define MADERA_IN3L_MUTE_SHIFT 8
+#define MADERA_IN3L_DIG_VOL_MASK 0x00FF
+#define MADERA_IN3L_DIG_VOL_SHIFT 0
+
+/* (0x0322) DMIC3L_Control */
+#define MADERA_IN3_OSR_MASK 0x0700
+#define MADERA_IN3_OSR_SHIFT 8
+
+/* (0x0324) IN3R_Control */
+#define MADERA_IN3R_HPF_MASK 0x8000
+#define MADERA_IN3R_HPF_SHIFT 15
+#define MADERA_IN3R_PGA_VOL_MASK 0x00FE
+#define MADERA_IN3R_PGA_VOL_SHIFT 1
+#define MADERA_IN3_DMICCLK_SRC_MASK 0x1800
+#define MADERA_IN3_DMICCLK_SRC_SHIFT 11
+
+/* (0x0325) ADC_Digital_Volume_3R */
+#define MADERA_IN3R_MUTE 0x0100
+#define MADERA_IN3R_MUTE_MASK 0x0100
+#define MADERA_IN3R_MUTE_SHIFT 8
+#define MADERA_IN3R_DIG_VOL_MASK 0x00FF
+#define MADERA_IN3R_DIG_VOL_SHIFT 0
+
+/* (0x0328) IN4L_Control */
+#define MADERA_IN4L_HPF_MASK 0x8000
+#define MADERA_IN4L_HPF_SHIFT 15
+#define MADERA_IN4_DMIC_SUP_MASK 0x1800
+#define MADERA_IN4_DMIC_SUP_SHIFT 11
+
+/* (0x0329) ADC_Digital_Volume_4L */
+#define MADERA_IN4L_MUTE 0x0100
+#define MADERA_IN4L_MUTE_MASK 0x0100
+#define MADERA_IN4L_MUTE_SHIFT 8
+#define MADERA_IN4L_DIG_VOL_MASK 0x00FF
+#define MADERA_IN4L_DIG_VOL_SHIFT 0
+
+/* (0x032A) DMIC4L_Control */
+#define MADERA_IN4_OSR_MASK 0x0700
+#define MADERA_IN4_OSR_SHIFT 8
+
+/* (0x032C) IN4R_Control */
+#define MADERA_IN4R_HPF_MASK 0x8000
+#define MADERA_IN4R_HPF_SHIFT 15
+#define MADERA_IN4_DMICCLK_SRC_MASK 0x1800
+#define MADERA_IN4_DMICCLK_SRC_SHIFT 11
+
+/* (0x032D) ADC_Digital_Volume_4R */
+#define MADERA_IN4R_MUTE 0x0100
+#define MADERA_IN4R_MUTE_MASK 0x0100
+#define MADERA_IN4R_MUTE_SHIFT 8
+#define MADERA_IN4R_DIG_VOL_MASK 0x00FF
+#define MADERA_IN4R_DIG_VOL_SHIFT 0
+
+/* (0x0330) IN5L_Control */
+#define MADERA_IN5L_HPF_MASK 0x8000
+#define MADERA_IN5L_HPF_SHIFT 15
+#define MADERA_IN5_DMIC_SUP_MASK 0x1800
+#define MADERA_IN5_DMIC_SUP_SHIFT 11
+
+/* (0x0331) ADC_Digital_Volume_5L */
+#define MADERA_IN5L_MUTE 0x0100
+#define MADERA_IN5L_MUTE_MASK 0x0100
+#define MADERA_IN5L_MUTE_SHIFT 8
+#define MADERA_IN5L_DIG_VOL_MASK 0x00FF
+#define MADERA_IN5L_DIG_VOL_SHIFT 0
+
+/* (0x0332) DMIC5L_Control */
+#define MADERA_IN5_OSR_MASK 0x0700
+#define MADERA_IN5_OSR_SHIFT 8
+
+/* (0x0334) IN5R_Control */
+#define MADERA_IN5R_HPF_MASK 0x8000
+#define MADERA_IN5R_HPF_SHIFT 15
+#define MADERA_IN5_DMICCLK_SRC_MASK 0x1800
+#define MADERA_IN5_DMICCLK_SRC_SHIFT 11
+
+/* (0x0335) ADC_Digital_Volume_5R */
+#define MADERA_IN5R_MUTE 0x0100
+#define MADERA_IN5R_MUTE_MASK 0x0100
+#define MADERA_IN5R_MUTE_SHIFT 8
+#define MADERA_IN5R_DIG_VOL_MASK 0x00FF
+#define MADERA_IN5R_DIG_VOL_SHIFT 0
+
+/* (0x0338) IN6L_Control */
+#define MADERA_IN6L_HPF_MASK 0x8000
+#define MADERA_IN6L_HPF_SHIFT 15
+#define MADERA_IN6_DMIC_SUP_MASK 0x1800
+#define MADERA_IN6_DMIC_SUP_SHIFT 11
+
+/* (0x0339) ADC_Digital_Volume_6L */
+#define MADERA_IN6L_MUTE 0x0100
+#define MADERA_IN6L_MUTE_MASK 0x0100
+#define MADERA_IN6L_MUTE_SHIFT 8
+#define MADERA_IN6L_DIG_VOL_MASK 0x00FF
+#define MADERA_IN6L_DIG_VOL_SHIFT 0
+
+/* (0x033A) DMIC6L_Control */
+#define MADERA_IN6_OSR_MASK 0x0700
+#define MADERA_IN6_OSR_SHIFT 8
+
+/* (0x033C) IN6R_Control */
+#define MADERA_IN6R_HPF_MASK 0x8000
+#define MADERA_IN6R_HPF_SHIFT 15
+
+/* (0x033D) ADC_Digital_Volume_6R */
+#define MADERA_IN6R_MUTE 0x0100
+#define MADERA_IN6R_MUTE_MASK 0x0100
+#define MADERA_IN6R_MUTE_SHIFT 8
+#define MADERA_IN6R_DIG_VOL_MASK 0x00FF
+#define MADERA_IN6R_DIG_VOL_SHIFT 0
+
+/* (0x033E) DMIC6R_Control */
+#define MADERA_IN6_DMICCLK_SRC_MASK 0x1800
+#define MADERA_IN6_DMICCLK_SRC_SHIFT 11
+
+/* (0x0400) Output_Enables_1 */
+#define MADERA_EP_SEL 0x8000
+#define MADERA_EP_SEL_MASK 0x8000
+#define MADERA_EP_SEL_SHIFT 15
+#define MADERA_OUT6L_ENA 0x0800
+#define MADERA_OUT6L_ENA_MASK 0x0800
+#define MADERA_OUT6L_ENA_SHIFT 11
+#define MADERA_OUT6R_ENA 0x0400
+#define MADERA_OUT6R_ENA_MASK 0x0400
+#define MADERA_OUT6R_ENA_SHIFT 10
+#define MADERA_OUT5L_ENA 0x0200
+#define MADERA_OUT5L_ENA_MASK 0x0200
+#define MADERA_OUT5L_ENA_SHIFT 9
+#define MADERA_OUT5R_ENA 0x0100
+#define MADERA_OUT5R_ENA_MASK 0x0100
+#define MADERA_OUT5R_ENA_SHIFT 8
+#define MADERA_OUT4L_ENA 0x0080
+#define MADERA_OUT4L_ENA_MASK 0x0080
+#define MADERA_OUT4L_ENA_SHIFT 7
+#define MADERA_OUT4R_ENA 0x0040
+#define MADERA_OUT4R_ENA_MASK 0x0040
+#define MADERA_OUT4R_ENA_SHIFT 6
+#define MADERA_OUT3L_ENA 0x0020
+#define MADERA_OUT3L_ENA_MASK 0x0020
+#define MADERA_OUT3L_ENA_SHIFT 5
+#define MADERA_OUT3R_ENA 0x0010
+#define MADERA_OUT3R_ENA_MASK 0x0010
+#define MADERA_OUT3R_ENA_SHIFT 4
+#define MADERA_OUT2L_ENA 0x0008
+#define MADERA_OUT2L_ENA_MASK 0x0008
+#define MADERA_OUT2L_ENA_SHIFT 3
+#define MADERA_OUT2R_ENA 0x0004
+#define MADERA_OUT2R_ENA_MASK 0x0004
+#define MADERA_OUT2R_ENA_SHIFT 2
+#define MADERA_OUT1L_ENA 0x0002
+#define MADERA_OUT1L_ENA_MASK 0x0002
+#define MADERA_OUT1L_ENA_SHIFT 1
+#define MADERA_OUT1R_ENA 0x0001
+#define MADERA_OUT1R_ENA_MASK 0x0001
+#define MADERA_OUT1R_ENA_SHIFT 0
+
+/* (0x0408) Output_Rate_1 */
+#define MADERA_CP_DAC_MODE_MASK 0x0040
+#define MADERA_CP_DAC_MODE_SHIFT 6
+#define MADERA_OUT_EXT_CLK_DIV_MASK 0x0030
+#define MADERA_OUT_EXT_CLK_DIV_SHIFT 4
+#define MADERA_OUT_CLK_SRC_MASK 0x0007
+#define MADERA_OUT_CLK_SRC_SHIFT 0
+
+/* (0x0409) Output_Volume_Ramp */
+#define MADERA_OUT_VD_RAMP_MASK 0x0070
+#define MADERA_OUT_VD_RAMP_SHIFT 4
+#define MADERA_OUT_VI_RAMP_MASK 0x0007
+#define MADERA_OUT_VI_RAMP_SHIFT 0
+
+/* (0x0410) Output_Path_Config_1L */
+#define MADERA_OUT1_MONO 0x1000
+#define MADERA_OUT1_MONO_MASK 0x1000
+#define MADERA_OUT1_MONO_SHIFT 12
+#define MADERA_OUT1L_ANC_SRC_MASK 0x0C00
+#define MADERA_OUT1L_ANC_SRC_SHIFT 10
+
+/* (0x0411) DAC_Digital_Volume_1L */
+#define MADERA_OUT1L_VU 0x0200
+#define MADERA_OUT1L_VU_MASK 0x0200
+#define MADERA_OUT1L_VU_SHIFT 9
+#define MADERA_OUT1L_MUTE 0x0100
+#define MADERA_OUT1L_MUTE_MASK 0x0100
+#define MADERA_OUT1L_MUTE_SHIFT 8
+#define MADERA_OUT1L_VOL_MASK 0x00FF
+#define MADERA_OUT1L_VOL_SHIFT 0
+
+/* (0x0412) Output_Path_Config_1 */
+#define MADERA_HP1_GND_SEL_MASK 0x0007
+#define MADERA_HP1_GND_SEL_SHIFT 0
+
+/* (0x0414) Output_Path_Config_1R */
+#define MADERA_OUT1R_ANC_SRC_MASK 0x0C00
+#define MADERA_OUT1R_ANC_SRC_SHIFT 10
+
+/* (0x0415) DAC_Digital_Volume_1R */
+#define MADERA_OUT1R_MUTE 0x0100
+#define MADERA_OUT1R_MUTE_MASK 0x0100
+#define MADERA_OUT1R_MUTE_SHIFT 8
+#define MADERA_OUT1R_VOL_MASK 0x00FF
+#define MADERA_OUT1R_VOL_SHIFT 0
+
+/* (0x0418) Output_Path_Config_2L */
+#define MADERA_OUT2L_ANC_SRC_MASK 0x0C00
+#define MADERA_OUT2L_ANC_SRC_SHIFT 10
+
+/* (0x0419) DAC_Digital_Volume_2L */
+#define MADERA_OUT2L_MUTE 0x0100
+#define MADERA_OUT2L_MUTE_MASK 0x0100
+#define MADERA_OUT2L_MUTE_SHIFT 8
+#define MADERA_OUT2L_VOL_MASK 0x00FF
+#define MADERA_OUT2L_VOL_SHIFT 0
+
+/* (0x041A) Output_Path_Config_2 */
+#define MADERA_HP2_GND_SEL_MASK 0x0007
+#define MADERA_HP2_GND_SEL_SHIFT 0
+
+/* (0x041C) Output_Path_Config_2R */
+#define MADERA_OUT2R_ANC_SRC_MASK 0x0C00
+#define MADERA_OUT2R_ANC_SRC_SHIFT 10
+
+/* (0x041D) DAC_Digital_Volume_2R */
+#define MADERA_OUT2R_MUTE 0x0100
+#define MADERA_OUT2R_MUTE_MASK 0x0100
+#define MADERA_OUT2R_MUTE_SHIFT 8
+#define MADERA_OUT2R_VOL_MASK 0x00FF
+#define MADERA_OUT2R_VOL_SHIFT 0
+
+/* (0x0420) Output_Path_Config_3L */
+#define MADERA_OUT3L_ANC_SRC_MASK 0x0C00
+#define MADERA_OUT3L_ANC_SRC_SHIFT 10
+
+/* (0x0421) DAC_Digital_Volume_3L */
+#define MADERA_OUT3L_MUTE 0x0100
+#define MADERA_OUT3L_MUTE_MASK 0x0100
+#define MADERA_OUT3L_MUTE_SHIFT 8
+#define MADERA_OUT3L_VOL_MASK 0x00FF
+#define MADERA_OUT3L_VOL_SHIFT 0
+
+/* (0x0424) Output_Path_Config_3R */
+#define MADERA_OUT3R_ANC_SRC_MASK 0x0C00
+#define MADERA_OUT3R_ANC_SRC_SHIFT 10
+
+/* (0x0425) DAC_Digital_Volume_3R */
+#define MADERA_OUT3R_MUTE 0x0100
+#define MADERA_OUT3R_MUTE_MASK 0x0100
+#define MADERA_OUT3R_MUTE_SHIFT 8
+#define MADERA_OUT3R_VOL_MASK 0x00FF
+#define MADERA_OUT3R_VOL_SHIFT 0
+
+/* (0x0428) Output_Path_Config_4L */
+#define MADERA_OUT4L_ANC_SRC_MASK 0x0C00
+#define MADERA_OUT4L_ANC_SRC_SHIFT 10
+
+/* (0x0429) DAC_Digital_Volume_4L */
+#define MADERA_OUT4L_MUTE 0x0100
+#define MADERA_OUT4L_MUTE_MASK 0x0100
+#define MADERA_OUT4L_MUTE_SHIFT 8
+#define MADERA_OUT4L_VOL_MASK 0x00FF
+#define MADERA_OUT4L_VOL_SHIFT 0
+
+/* (0x042C) Output_Path_Config_4R */
+#define MADERA_OUT4R_ANC_SRC_MASK 0x0C00
+#define MADERA_OUT4R_ANC_SRC_SHIFT 10
+
+/* (0x042D) DAC_Digital_Volume_4R */
+#define MADERA_OUT4R_MUTE 0x0100
+#define MADERA_OUT4R_MUTE_MASK 0x0100
+#define MADERA_OUT4R_MUTE_SHIFT 8
+#define MADERA_OUT4R_VOL_MASK 0x00FF
+#define MADERA_OUT4R_VOL_SHIFT 0
+
+/* (0x0430) Output_Path_Config_5L */
+#define MADERA_OUT5_OSR 0x2000
+#define MADERA_OUT5_OSR_MASK 0x2000
+#define MADERA_OUT5_OSR_SHIFT 13
+#define MADERA_OUT5L_ANC_SRC_MASK 0x0C00
+#define MADERA_OUT5L_ANC_SRC_SHIFT 10
+
+/* (0x0431) DAC_Digital_Volume_5L */
+#define MADERA_OUT5L_MUTE 0x0100
+#define MADERA_OUT5L_MUTE_MASK 0x0100
+#define MADERA_OUT5L_MUTE_SHIFT 8
+#define MADERA_OUT5L_VOL_MASK 0x00FF
+#define MADERA_OUT5L_VOL_SHIFT 0
+
+/* (0x0434) Output_Path_Config_5R */
+#define MADERA_OUT5R_ANC_SRC_MASK 0x0C00
+#define MADERA_OUT5R_ANC_SRC_SHIFT 10
+
+/* (0x0435) DAC_Digital_Volume_5R */
+#define MADERA_OUT5R_MUTE 0x0100
+#define MADERA_OUT5R_MUTE_MASK 0x0100
+#define MADERA_OUT5R_MUTE_SHIFT 8
+#define MADERA_OUT5R_VOL_MASK 0x00FF
+#define MADERA_OUT5R_VOL_SHIFT 0
+
+/* (0x0438) Output_Path_Config_6L */
+#define MADERA_OUT6_OSR 0x2000
+#define MADERA_OUT6_OSR_MASK 0x2000
+#define MADERA_OUT6_OSR_SHIFT 13
+#define MADERA_OUT6L_ANC_SRC_MASK 0x0C00
+#define MADERA_OUT6L_ANC_SRC_SHIFT 10
+
+/* (0x0439) DAC_Digital_Volume_6L */
+#define MADERA_OUT6L_MUTE 0x0100
+#define MADERA_OUT6L_MUTE_MASK 0x0100
+#define MADERA_OUT6L_MUTE_SHIFT 8
+#define MADERA_OUT6L_VOL_MASK 0x00FF
+#define MADERA_OUT6L_VOL_SHIFT 0
+
+/* (0x043C) Output_Path_Config_6R */
+#define MADERA_OUT6R_ANC_SRC_MASK 0x0C00
+#define MADERA_OUT6R_ANC_SRC_SHIFT 10
+
+/* (0x043D) DAC_Digital_Volume_6R */
+#define MADERA_OUT6R_MUTE 0x0100
+#define MADERA_OUT6R_MUTE_MASK 0x0100
+#define MADERA_OUT6R_MUTE_SHIFT 8
+#define MADERA_OUT6R_VOL_MASK 0x00FF
+#define MADERA_OUT6R_VOL_SHIFT 0
+
+/* (0x0450) - DAC AEC Control 1 */
+#define MADERA_AEC1_LOOPBACK_SRC_MASK 0x003C
+#define MADERA_AEC1_LOOPBACK_SRC_SHIFT 2
+#define MADERA_AEC1_ENA_STS 0x0002
+#define MADERA_AEC1_ENA_STS_MASK 0x0002
+#define MADERA_AEC1_ENA_STS_SHIFT 1
+#define MADERA_AEC1_LOOPBACK_ENA 0x0001
+#define MADERA_AEC1_LOOPBACK_ENA_MASK 0x0001
+#define MADERA_AEC1_LOOPBACK_ENA_SHIFT 0
+
+/* (0x0451) DAC_AEC_Control_2 */
+#define MADERA_AEC2_LOOPBACK_SRC_MASK 0x003C
+#define MADERA_AEC2_LOOPBACK_SRC_SHIFT 2
+#define MADERA_AEC2_ENA_STS 0x0002
+#define MADERA_AEC2_ENA_STS_MASK 0x0002
+#define MADERA_AEC2_ENA_STS_SHIFT 1
+#define MADERA_AEC2_LOOPBACK_ENA 0x0001
+#define MADERA_AEC2_LOOPBACK_ENA_MASK 0x0001
+#define MADERA_AEC2_LOOPBACK_ENA_SHIFT 0
+
+/* (0x0458) Noise_Gate_Control */
+#define MADERA_NGATE_HOLD_MASK 0x0030
+#define MADERA_NGATE_HOLD_SHIFT 4
+#define MADERA_NGATE_THR_MASK 0x000E
+#define MADERA_NGATE_THR_SHIFT 1
+#define MADERA_NGATE_ENA 0x0001
+#define MADERA_NGATE_ENA_MASK 0x0001
+#define MADERA_NGATE_ENA_SHIFT 0
+
+/* (0x0490) PDM_SPK1_CTRL_1 */
+#define MADERA_SPK1R_MUTE 0x2000
+#define MADERA_SPK1R_MUTE_MASK 0x2000
+#define MADERA_SPK1R_MUTE_SHIFT 13
+#define MADERA_SPK1L_MUTE 0x1000
+#define MADERA_SPK1L_MUTE_MASK 0x1000
+#define MADERA_SPK1L_MUTE_SHIFT 12
+#define MADERA_SPK1_MUTE_ENDIAN 0x0100
+#define MADERA_SPK1_MUTE_ENDIAN_MASK 0x0100
+#define MADERA_SPK1_MUTE_ENDIAN_SHIFT 8
+#define MADERA_SPK1_MUTE_SEQ1_MASK 0x00FF
+#define MADERA_SPK1_MUTE_SEQ1_SHIFT 0
+
+/* (0x0491) PDM_SPK1_CTRL_2 */
+#define MADERA_SPK1_FMT 0x0001
+#define MADERA_SPK1_FMT_MASK 0x0001
+#define MADERA_SPK1_FMT_SHIFT 0
+
+/* (0x0492) PDM_SPK2_CTRL_1 */
+#define MADERA_SPK2R_MUTE 0x2000
+#define MADERA_SPK2R_MUTE_MASK 0x2000
+#define MADERA_SPK2R_MUTE_SHIFT 13
+#define MADERA_SPK2L_MUTE 0x1000
+#define MADERA_SPK2L_MUTE_MASK 0x1000
+#define MADERA_SPK2L_MUTE_SHIFT 12
+
+/* (0x04A0) - HP1 Short Circuit Ctrl */
+#define MADERA_HP1_SC_ENA 0x1000
+#define MADERA_HP1_SC_ENA_MASK 0x1000
+#define MADERA_HP1_SC_ENA_SHIFT 12
+
+/* (0x04A1) - HP2 Short Circuit Ctrl */
+#define MADERA_HP2_SC_ENA 0x1000
+#define MADERA_HP2_SC_ENA_MASK 0x1000
+#define MADERA_HP2_SC_ENA_SHIFT 12
+
+/* (0x04A2) - HP3 Short Circuit Ctrl */
+#define MADERA_HP3_SC_ENA 0x1000
+#define MADERA_HP3_SC_ENA_MASK 0x1000
+#define MADERA_HP3_SC_ENA_SHIFT 12
+
+/* (0x04A8) - HP_Test_Ctrl_5 */
+#define MADERA_HP1L_ONEFLT 0x0100
+#define MADERA_HP1L_ONEFLT_MASK 0x0100
+#define MADERA_HP1L_ONEFLT_SHIFT 8
+
+/* (0x04A9) - HP_Test_Ctrl_6 */
+#define MADERA_HP1R_ONEFLT 0x0100
+#define MADERA_HP1R_ONEFLT_MASK 0x0100
+#define MADERA_HP1R_ONEFLT_SHIFT 8
+
+/* (0x0500) AIF1_BCLK_Ctrl */
+#define MADERA_AIF1_BCLK_INV 0x0080
+#define MADERA_AIF1_BCLK_INV_MASK 0x0080
+#define MADERA_AIF1_BCLK_INV_SHIFT 7
+#define MADERA_AIF1_BCLK_MSTR 0x0020
+#define MADERA_AIF1_BCLK_MSTR_MASK 0x0020
+#define MADERA_AIF1_BCLK_MSTR_SHIFT 5
+#define MADERA_AIF1_BCLK_FREQ_MASK 0x001F
+#define MADERA_AIF1_BCLK_FREQ_SHIFT 0
+
+/* (0x0501) AIF1_Tx_Pin_Ctrl */
+#define MADERA_AIF1TX_LRCLK_SRC 0x0008
+#define MADERA_AIF1TX_LRCLK_SRC_MASK 0x0008
+#define MADERA_AIF1TX_LRCLK_SRC_SHIFT 3
+#define MADERA_AIF1TX_LRCLK_INV 0x0004
+#define MADERA_AIF1TX_LRCLK_INV_MASK 0x0004
+#define MADERA_AIF1TX_LRCLK_INV_SHIFT 2
+#define MADERA_AIF1TX_LRCLK_MSTR 0x0001
+#define MADERA_AIF1TX_LRCLK_MSTR_MASK 0x0001
+#define MADERA_AIF1TX_LRCLK_MSTR_SHIFT 0
+
+/* (0x0502) AIF1_Rx_Pin_Ctrl */
+#define MADERA_AIF1RX_LRCLK_INV 0x0004
+#define MADERA_AIF1RX_LRCLK_INV_MASK 0x0004
+#define MADERA_AIF1RX_LRCLK_INV_SHIFT 2
+#define MADERA_AIF1RX_LRCLK_FRC 0x0002
+#define MADERA_AIF1RX_LRCLK_FRC_MASK 0x0002
+#define MADERA_AIF1RX_LRCLK_FRC_SHIFT 1
+#define MADERA_AIF1RX_LRCLK_MSTR 0x0001
+#define MADERA_AIF1RX_LRCLK_MSTR_MASK 0x0001
+#define MADERA_AIF1RX_LRCLK_MSTR_SHIFT 0
+
+/* (0x0503) AIF1_Rate_Ctrl */
+#define MADERA_AIF1_RATE_MASK 0xF800
+#define MADERA_AIF1_RATE_SHIFT 11
+#define MADERA_AIF1_TRI 0x0040
+#define MADERA_AIF1_TRI_MASK 0x0040
+#define MADERA_AIF1_TRI_SHIFT 6
+
+/* (0x0504) AIF1_Format */
+#define MADERA_AIF1_FMT_MASK 0x0007
+#define MADERA_AIF1_FMT_SHIFT 0
+
+/* (0x0506) AIF1_Rx_BCLK_Rate */
+#define MADERA_AIF1RX_BCPF_MASK 0x1FFF
+#define MADERA_AIF1RX_BCPF_SHIFT 0
+
+/* (0x0507) AIF1_Frame_Ctrl_1 */
+#define MADERA_AIF1TX_WL_MASK 0x3F00
+#define MADERA_AIF1TX_WL_SHIFT 8
+#define MADERA_AIF1TX_SLOT_LEN_MASK 0x00FF
+#define MADERA_AIF1TX_SLOT_LEN_SHIFT 0
+
+/* (0x0508) AIF1_Frame_Ctrl_2 */
+#define MADERA_AIF1RX_WL_MASK 0x3F00
+#define MADERA_AIF1RX_WL_SHIFT 8
+#define MADERA_AIF1RX_SLOT_LEN_MASK 0x00FF
+#define MADERA_AIF1RX_SLOT_LEN_SHIFT 0
+
+/* (0x0509) AIF1_Frame_Ctrl_3 */
+#define MADERA_AIF1TX1_SLOT_MASK 0x003F
+#define MADERA_AIF1TX1_SLOT_SHIFT 0
+
+/* (0x0519) AIF1_Tx_Enables */
+#define MADERA_AIF1TX8_ENA 0x0080
+#define MADERA_AIF1TX8_ENA_MASK 0x0080
+#define MADERA_AIF1TX8_ENA_SHIFT 7
+#define MADERA_AIF1TX7_ENA 0x0040
+#define MADERA_AIF1TX7_ENA_MASK 0x0040
+#define MADERA_AIF1TX7_ENA_SHIFT 6
+#define MADERA_AIF1TX6_ENA 0x0020
+#define MADERA_AIF1TX6_ENA_MASK 0x0020
+#define MADERA_AIF1TX6_ENA_SHIFT 5
+#define MADERA_AIF1TX5_ENA 0x0010
+#define MADERA_AIF1TX5_ENA_MASK 0x0010
+#define MADERA_AIF1TX5_ENA_SHIFT 4
+#define MADERA_AIF1TX4_ENA 0x0008
+#define MADERA_AIF1TX4_ENA_MASK 0x0008
+#define MADERA_AIF1TX4_ENA_SHIFT 3
+#define MADERA_AIF1TX3_ENA 0x0004
+#define MADERA_AIF1TX3_ENA_MASK 0x0004
+#define MADERA_AIF1TX3_ENA_SHIFT 2
+#define MADERA_AIF1TX2_ENA 0x0002
+#define MADERA_AIF1TX2_ENA_MASK 0x0002
+#define MADERA_AIF1TX2_ENA_SHIFT 1
+#define MADERA_AIF1TX1_ENA 0x0001
+#define MADERA_AIF1TX1_ENA_MASK 0x0001
+#define MADERA_AIF1TX1_ENA_SHIFT 0
+
+/* (0x051A) AIF1_Rx_Enables */
+#define MADERA_AIF1RX8_ENA 0x0080
+#define MADERA_AIF1RX8_ENA_MASK 0x0080
+#define MADERA_AIF1RX8_ENA_SHIFT 7
+#define MADERA_AIF1RX7_ENA 0x0040
+#define MADERA_AIF1RX7_ENA_MASK 0x0040
+#define MADERA_AIF1RX7_ENA_SHIFT 6
+#define MADERA_AIF1RX6_ENA 0x0020
+#define MADERA_AIF1RX6_ENA_MASK 0x0020
+#define MADERA_AIF1RX6_ENA_SHIFT 5
+#define MADERA_AIF1RX5_ENA 0x0010
+#define MADERA_AIF1RX5_ENA_MASK 0x0010
+#define MADERA_AIF1RX5_ENA_SHIFT 4
+#define MADERA_AIF1RX4_ENA 0x0008
+#define MADERA_AIF1RX4_ENA_MASK 0x0008
+#define MADERA_AIF1RX4_ENA_SHIFT 3
+#define MADERA_AIF1RX3_ENA 0x0004
+#define MADERA_AIF1RX3_ENA_MASK 0x0004
+#define MADERA_AIF1RX3_ENA_SHIFT 2
+#define MADERA_AIF1RX2_ENA 0x0002
+#define MADERA_AIF1RX2_ENA_MASK 0x0002
+#define MADERA_AIF1RX2_ENA_SHIFT 1
+#define MADERA_AIF1RX1_ENA 0x0001
+#define MADERA_AIF1RX1_ENA_MASK 0x0001
+#define MADERA_AIF1RX1_ENA_SHIFT 0
+
+/* (0x0559) AIF2_Tx_Enables */
+#define MADERA_AIF2TX8_ENA 0x0080
+#define MADERA_AIF2TX8_ENA_MASK 0x0080
+#define MADERA_AIF2TX8_ENA_SHIFT 7
+#define MADERA_AIF2TX7_ENA 0x0040
+#define MADERA_AIF2TX7_ENA_MASK 0x0040
+#define MADERA_AIF2TX7_ENA_SHIFT 6
+#define MADERA_AIF2TX6_ENA 0x0020
+#define MADERA_AIF2TX6_ENA_MASK 0x0020
+#define MADERA_AIF2TX6_ENA_SHIFT 5
+#define MADERA_AIF2TX5_ENA 0x0010
+#define MADERA_AIF2TX5_ENA_MASK 0x0010
+#define MADERA_AIF2TX5_ENA_SHIFT 4
+#define MADERA_AIF2TX4_ENA 0x0008
+#define MADERA_AIF2TX4_ENA_MASK 0x0008
+#define MADERA_AIF2TX4_ENA_SHIFT 3
+#define MADERA_AIF2TX3_ENA 0x0004
+#define MADERA_AIF2TX3_ENA_MASK 0x0004
+#define MADERA_AIF2TX3_ENA_SHIFT 2
+#define MADERA_AIF2TX2_ENA 0x0002
+#define MADERA_AIF2TX2_ENA_MASK 0x0002
+#define MADERA_AIF2TX2_ENA_SHIFT 1
+#define MADERA_AIF2TX1_ENA 0x0001
+#define MADERA_AIF2TX1_ENA_MASK 0x0001
+#define MADERA_AIF2TX1_ENA_SHIFT 0
+
+/* (0x055A) AIF2_Rx_Enables */
+#define MADERA_AIF2RX8_ENA 0x0080
+#define MADERA_AIF2RX8_ENA_MASK 0x0080
+#define MADERA_AIF2RX8_ENA_SHIFT 7
+#define MADERA_AIF2RX7_ENA 0x0040
+#define MADERA_AIF2RX7_ENA_MASK 0x0040
+#define MADERA_AIF2RX7_ENA_SHIFT 6
+#define MADERA_AIF2RX6_ENA 0x0020
+#define MADERA_AIF2RX6_ENA_MASK 0x0020
+#define MADERA_AIF2RX6_ENA_SHIFT 5
+#define MADERA_AIF2RX5_ENA 0x0010
+#define MADERA_AIF2RX5_ENA_MASK 0x0010
+#define MADERA_AIF2RX5_ENA_SHIFT 4
+#define MADERA_AIF2RX4_ENA 0x0008
+#define MADERA_AIF2RX4_ENA_MASK 0x0008
+#define MADERA_AIF2RX4_ENA_SHIFT 3
+#define MADERA_AIF2RX3_ENA 0x0004
+#define MADERA_AIF2RX3_ENA_MASK 0x0004
+#define MADERA_AIF2RX3_ENA_SHIFT 2
+#define MADERA_AIF2RX2_ENA 0x0002
+#define MADERA_AIF2RX2_ENA_MASK 0x0002
+#define MADERA_AIF2RX2_ENA_SHIFT 1
+#define MADERA_AIF2RX1_ENA 0x0001
+#define MADERA_AIF2RX1_ENA_MASK 0x0001
+#define MADERA_AIF2RX1_ENA_SHIFT 0
+
+/* (0x0599) AIF3_Tx_Enables */
+#define MADERA_AIF3TX8_ENA 0x0080
+#define MADERA_AIF3TX8_ENA_MASK 0x0080
+#define MADERA_AIF3TX8_ENA_SHIFT 7
+#define MADERA_AIF3TX7_ENA 0x0040
+#define MADERA_AIF3TX7_ENA_MASK 0x0040
+#define MADERA_AIF3TX7_ENA_SHIFT 6
+#define MADERA_AIF3TX6_ENA 0x0020
+#define MADERA_AIF3TX6_ENA_MASK 0x0020
+#define MADERA_AIF3TX6_ENA_SHIFT 5
+#define MADERA_AIF3TX5_ENA 0x0010
+#define MADERA_AIF3TX5_ENA_MASK 0x0010
+#define MADERA_AIF3TX5_ENA_SHIFT 4
+#define MADERA_AIF3TX4_ENA 0x0008
+#define MADERA_AIF3TX4_ENA_MASK 0x0008
+#define MADERA_AIF3TX4_ENA_SHIFT 3
+#define MADERA_AIF3TX3_ENA 0x0004
+#define MADERA_AIF3TX3_ENA_MASK 0x0004
+#define MADERA_AIF3TX3_ENA_SHIFT 2
+#define MADERA_AIF3TX2_ENA 0x0002
+#define MADERA_AIF3TX2_ENA_MASK 0x0002
+#define MADERA_AIF3TX2_ENA_SHIFT 1
+#define MADERA_AIF3TX1_ENA 0x0001
+#define MADERA_AIF3TX1_ENA_MASK 0x0001
+#define MADERA_AIF3TX1_ENA_SHIFT 0
+
+/* (0x059A) AIF3_Rx_Enables */
+#define MADERA_AIF3RX8_ENA 0x0080
+#define MADERA_AIF3RX8_ENA_MASK 0x0080
+#define MADERA_AIF3RX8_ENA_SHIFT 7
+#define MADERA_AIF3RX7_ENA 0x0040
+#define MADERA_AIF3RX7_ENA_MASK 0x0040
+#define MADERA_AIF3RX7_ENA_SHIFT 6
+#define MADERA_AIF3RX6_ENA 0x0020
+#define MADERA_AIF3RX6_ENA_MASK 0x0020
+#define MADERA_AIF3RX6_ENA_SHIFT 5
+#define MADERA_AIF3RX5_ENA 0x0010
+#define MADERA_AIF3RX5_ENA_MASK 0x0010
+#define MADERA_AIF3RX5_ENA_SHIFT 4
+#define MADERA_AIF3RX4_ENA 0x0008
+#define MADERA_AIF3RX4_ENA_MASK 0x0008
+#define MADERA_AIF3RX4_ENA_SHIFT 3
+#define MADERA_AIF3RX3_ENA 0x0004
+#define MADERA_AIF3RX3_ENA_MASK 0x0004
+#define MADERA_AIF3RX3_ENA_SHIFT 2
+#define MADERA_AIF3RX2_ENA 0x0002
+#define MADERA_AIF3RX2_ENA_MASK 0x0002
+#define MADERA_AIF3RX2_ENA_SHIFT 1
+#define MADERA_AIF3RX1_ENA 0x0001
+#define MADERA_AIF3RX1_ENA_MASK 0x0001
+#define MADERA_AIF3RX1_ENA_SHIFT 0
+
+/* (0x05B9) AIF4_Tx_Enables */
+#define MADERA_AIF4TX2_ENA 0x0002
+#define MADERA_AIF4TX2_ENA_MASK 0x0002
+#define MADERA_AIF4TX2_ENA_SHIFT 1
+#define MADERA_AIF4TX1_ENA 0x0001
+#define MADERA_AIF4TX1_ENA_MASK 0x0001
+#define MADERA_AIF4TX1_ENA_SHIFT 0
+
+/* (0x05BA) AIF4_Rx_Enables */
+#define MADERA_AIF4RX2_ENA 0x0002
+#define MADERA_AIF4RX2_ENA_MASK 0x0002
+#define MADERA_AIF4RX2_ENA_SHIFT 1
+#define MADERA_AIF4RX1_ENA 0x0001
+#define MADERA_AIF4RX1_ENA_MASK 0x0001
+#define MADERA_AIF4RX1_ENA_SHIFT 0
+
+/* (0x05C2) SPD1_TX_Control */
+#define MADERA_SPD1_VAL2 0x2000
+#define MADERA_SPD1_VAL2_MASK 0x2000
+#define MADERA_SPD1_VAL2_SHIFT 13
+#define MADERA_SPD1_VAL1 0x1000
+#define MADERA_SPD1_VAL1_MASK 0x1000
+#define MADERA_SPD1_VAL1_SHIFT 12
+#define MADERA_SPD1_RATE_MASK 0x00F0
+#define MADERA_SPD1_RATE_SHIFT 4
+#define MADERA_SPD1_ENA 0x0001
+#define MADERA_SPD1_ENA_MASK 0x0001
+#define MADERA_SPD1_ENA_SHIFT 0
+
+/* (0x05F5) SLIMbus_RX_Channel_Enable */
+#define MADERA_SLIMRX8_ENA 0x0080
+#define MADERA_SLIMRX8_ENA_MASK 0x0080
+#define MADERA_SLIMRX8_ENA_SHIFT 7
+#define MADERA_SLIMRX7_ENA 0x0040
+#define MADERA_SLIMRX7_ENA_MASK 0x0040
+#define MADERA_SLIMRX7_ENA_SHIFT 6
+#define MADERA_SLIMRX6_ENA 0x0020
+#define MADERA_SLIMRX6_ENA_MASK 0x0020
+#define MADERA_SLIMRX6_ENA_SHIFT 5
+#define MADERA_SLIMRX5_ENA 0x0010
+#define MADERA_SLIMRX5_ENA_MASK 0x0010
+#define MADERA_SLIMRX5_ENA_SHIFT 4
+#define MADERA_SLIMRX4_ENA 0x0008
+#define MADERA_SLIMRX4_ENA_MASK 0x0008
+#define MADERA_SLIMRX4_ENA_SHIFT 3
+#define MADERA_SLIMRX3_ENA 0x0004
+#define MADERA_SLIMRX3_ENA_MASK 0x0004
+#define MADERA_SLIMRX3_ENA_SHIFT 2
+#define MADERA_SLIMRX2_ENA 0x0002
+#define MADERA_SLIMRX2_ENA_MASK 0x0002
+#define MADERA_SLIMRX2_ENA_SHIFT 1
+#define MADERA_SLIMRX1_ENA 0x0001
+#define MADERA_SLIMRX1_ENA_MASK 0x0001
+#define MADERA_SLIMRX1_ENA_SHIFT 0
+
+/* (0x05F6) SLIMbus_TX_Channel_Enable */
+#define MADERA_SLIMTX8_ENA 0x0080
+#define MADERA_SLIMTX8_ENA_MASK 0x0080
+#define MADERA_SLIMTX8_ENA_SHIFT 7
+#define MADERA_SLIMTX7_ENA 0x0040
+#define MADERA_SLIMTX7_ENA_MASK 0x0040
+#define MADERA_SLIMTX7_ENA_SHIFT 6
+#define MADERA_SLIMTX6_ENA 0x0020
+#define MADERA_SLIMTX6_ENA_MASK 0x0020
+#define MADERA_SLIMTX6_ENA_SHIFT 5
+#define MADERA_SLIMTX5_ENA 0x0010
+#define MADERA_SLIMTX5_ENA_MASK 0x0010
+#define MADERA_SLIMTX5_ENA_SHIFT 4
+#define MADERA_SLIMTX4_ENA 0x0008
+#define MADERA_SLIMTX4_ENA_MASK 0x0008
+#define MADERA_SLIMTX4_ENA_SHIFT 3
+#define MADERA_SLIMTX3_ENA 0x0004
+#define MADERA_SLIMTX3_ENA_MASK 0x0004
+#define MADERA_SLIMTX3_ENA_SHIFT 2
+#define MADERA_SLIMTX2_ENA 0x0002
+#define MADERA_SLIMTX2_ENA_MASK 0x0002
+#define MADERA_SLIMTX2_ENA_SHIFT 1
+#define MADERA_SLIMTX1_ENA 0x0001
+#define MADERA_SLIMTX1_ENA_MASK 0x0001
+#define MADERA_SLIMTX1_ENA_SHIFT 0
+
+/* (0x0E10) EQ1_1 */
+#define MADERA_EQ1_B1_GAIN_MASK 0xF800
+#define MADERA_EQ1_B1_GAIN_SHIFT 11
+#define MADERA_EQ1_B2_GAIN_MASK 0x07C0
+#define MADERA_EQ1_B2_GAIN_SHIFT 6
+#define MADERA_EQ1_B3_GAIN_MASK 0x003E
+#define MADERA_EQ1_B3_GAIN_SHIFT 1
+#define MADERA_EQ1_ENA 0x0001
+#define MADERA_EQ1_ENA_MASK 0x0001
+#define MADERA_EQ1_ENA_SHIFT 0
+
+/* (0x0E11) EQ1_2 */
+#define MADERA_EQ1_B4_GAIN_MASK 0xF800
+#define MADERA_EQ1_B4_GAIN_SHIFT 11
+#define MADERA_EQ1_B5_GAIN_MASK 0x07C0
+#define MADERA_EQ1_B5_GAIN_SHIFT 6
+#define MADERA_EQ1_B1_MODE 0x0001
+#define MADERA_EQ1_B1_MODE_MASK 0x0001
+#define MADERA_EQ1_B1_MODE_SHIFT 0
+
+/* (0x0E26) EQ2_1 */
+#define MADERA_EQ2_B1_GAIN_MASK 0xF800
+#define MADERA_EQ2_B1_GAIN_SHIFT 11
+#define MADERA_EQ2_B2_GAIN_MASK 0x07C0
+#define MADERA_EQ2_B2_GAIN_SHIFT 6
+#define MADERA_EQ2_B3_GAIN_MASK 0x003E
+#define MADERA_EQ2_B3_GAIN_SHIFT 1
+#define MADERA_EQ2_ENA 0x0001
+#define MADERA_EQ2_ENA_MASK 0x0001
+#define MADERA_EQ2_ENA_SHIFT 0
+
+/* (0x0E27) EQ2_2 */
+#define MADERA_EQ2_B4_GAIN_MASK 0xF800
+#define MADERA_EQ2_B4_GAIN_SHIFT 11
+#define MADERA_EQ2_B5_GAIN_MASK 0x07C0
+#define MADERA_EQ2_B5_GAIN_SHIFT 6
+#define MADERA_EQ2_B1_MODE 0x0001
+#define MADERA_EQ2_B1_MODE_MASK 0x0001
+#define MADERA_EQ2_B1_MODE_SHIFT 0
+
+/* (0x0E3C) EQ3_1 */
+#define MADERA_EQ3_B1_GAIN_MASK 0xF800
+#define MADERA_EQ3_B1_GAIN_SHIFT 11
+#define MADERA_EQ3_B2_GAIN_MASK 0x07C0
+#define MADERA_EQ3_B2_GAIN_SHIFT 6
+#define MADERA_EQ3_B3_GAIN_MASK 0x003E
+#define MADERA_EQ3_B3_GAIN_SHIFT 1
+#define MADERA_EQ3_ENA 0x0001
+#define MADERA_EQ3_ENA_MASK 0x0001
+#define MADERA_EQ3_ENA_SHIFT 0
+
+/* (0x0E3D) EQ3_2 */
+#define MADERA_EQ3_B4_GAIN_MASK 0xF800
+#define MADERA_EQ3_B4_GAIN_SHIFT 11
+#define MADERA_EQ3_B5_GAIN_MASK 0x07C0
+#define MADERA_EQ3_B5_GAIN_SHIFT 6
+#define MADERA_EQ3_B1_MODE 0x0001
+#define MADERA_EQ3_B1_MODE_MASK 0x0001
+#define MADERA_EQ3_B1_MODE_SHIFT 0
+
+/* (0x0E52) EQ4_1 */
+#define MADERA_EQ4_B1_GAIN_MASK 0xF800
+#define MADERA_EQ4_B1_GAIN_SHIFT 11
+#define MADERA_EQ4_B2_GAIN_MASK 0x07C0
+#define MADERA_EQ4_B2_GAIN_SHIFT 6
+#define MADERA_EQ4_B3_GAIN_MASK 0x003E
+#define MADERA_EQ4_B3_GAIN_SHIFT 1
+#define MADERA_EQ4_ENA 0x0001
+#define MADERA_EQ4_ENA_MASK 0x0001
+#define MADERA_EQ4_ENA_SHIFT 0
+
+/* (0x0E53) EQ4_2 */
+#define MADERA_EQ4_B4_GAIN_MASK 0xF800
+#define MADERA_EQ4_B4_GAIN_SHIFT 11
+#define MADERA_EQ4_B5_GAIN_MASK 0x07C0
+#define MADERA_EQ4_B5_GAIN_SHIFT 6
+#define MADERA_EQ4_B1_MODE 0x0001
+#define MADERA_EQ4_B1_MODE_MASK 0x0001
+#define MADERA_EQ4_B1_MODE_SHIFT 0
+
+/* (0x0E80) DRC1_ctrl1 */
+#define MADERA_DRC1L_ENA 0x0002
+#define MADERA_DRC1L_ENA_MASK 0x0002
+#define MADERA_DRC1L_ENA_SHIFT 1
+#define MADERA_DRC1R_ENA 0x0001
+#define MADERA_DRC1R_ENA_MASK 0x0001
+#define MADERA_DRC1R_ENA_SHIFT 0
+
+/* (0x0E88) DRC2_ctrl1 */
+#define MADERA_DRC2L_ENA 0x0002
+#define MADERA_DRC2L_ENA_MASK 0x0002
+#define MADERA_DRC2L_ENA_SHIFT 1
+#define MADERA_DRC2R_ENA 0x0001
+#define MADERA_DRC2R_ENA_MASK 0x0001
+#define MADERA_DRC2R_ENA_SHIFT 0
+
+/* (0x0EC0) HPLPF1_1 */
+#define MADERA_LHPF1_MODE 0x0002
+#define MADERA_LHPF1_MODE_MASK 0x0002
+#define MADERA_LHPF1_MODE_SHIFT 1
+#define MADERA_LHPF1_ENA 0x0001
+#define MADERA_LHPF1_ENA_MASK 0x0001
+#define MADERA_LHPF1_ENA_SHIFT 0
+
+/* (0x0EC1) HPLPF1_2 */
+#define MADERA_LHPF1_COEFF_MASK 0xFFFF
+#define MADERA_LHPF1_COEFF_SHIFT 0
+
+/* (0x0EC4) HPLPF2_1 */
+#define MADERA_LHPF2_MODE 0x0002
+#define MADERA_LHPF2_MODE_MASK 0x0002
+#define MADERA_LHPF2_MODE_SHIFT 1
+#define MADERA_LHPF2_ENA 0x0001
+#define MADERA_LHPF2_ENA_MASK 0x0001
+#define MADERA_LHPF2_ENA_SHIFT 0
+
+/* (0x0EC5) HPLPF2_2 */
+#define MADERA_LHPF2_COEFF_MASK 0xFFFF
+#define MADERA_LHPF2_COEFF_SHIFT 0
+
+/* (0x0EC8) HPLPF3_1 */
+#define MADERA_LHPF3_MODE 0x0002
+#define MADERA_LHPF3_MODE_MASK 0x0002
+#define MADERA_LHPF3_MODE_SHIFT 1
+#define MADERA_LHPF3_ENA 0x0001
+#define MADERA_LHPF3_ENA_MASK 0x0001
+#define MADERA_LHPF3_ENA_SHIFT 0
+
+/* (0x0EC9) HPLPF3_2 */
+#define MADERA_LHPF3_COEFF_MASK 0xFFFF
+#define MADERA_LHPF3_COEFF_SHIFT 0
+
+/* (0x0ECC) HPLPF4_1 */
+#define MADERA_LHPF4_MODE 0x0002
+#define MADERA_LHPF4_MODE_MASK 0x0002
+#define MADERA_LHPF4_MODE_SHIFT 1
+#define MADERA_LHPF4_ENA 0x0001
+#define MADERA_LHPF4_ENA_MASK 0x0001
+#define MADERA_LHPF4_ENA_SHIFT 0
+
+/* (0x0ECD) HPLPF4_2 */
+#define MADERA_LHPF4_COEFF_MASK 0xFFFF
+#define MADERA_LHPF4_COEFF_SHIFT 0
+
+/* (0x0ED0) ASRC2_ENABLE */
+#define MADERA_ASRC2_IN2L_ENA 0x0008
+#define MADERA_ASRC2_IN2L_ENA_MASK 0x0008
+#define MADERA_ASRC2_IN2L_ENA_SHIFT 3
+#define MADERA_ASRC2_IN2R_ENA 0x0004
+#define MADERA_ASRC2_IN2R_ENA_MASK 0x0004
+#define MADERA_ASRC2_IN2R_ENA_SHIFT 2
+#define MADERA_ASRC2_IN1L_ENA 0x0002
+#define MADERA_ASRC2_IN1L_ENA_MASK 0x0002
+#define MADERA_ASRC2_IN1L_ENA_SHIFT 1
+#define MADERA_ASRC2_IN1R_ENA 0x0001
+#define MADERA_ASRC2_IN1R_ENA_MASK 0x0001
+#define MADERA_ASRC2_IN1R_ENA_SHIFT 0
+
+/* (0x0ED2) ASRC2_RATE1 */
+#define MADERA_ASRC2_RATE1_MASK 0xF800
+#define MADERA_ASRC2_RATE1_SHIFT 11
+
+/* (0x0ED3) ASRC2_RATE2 */
+#define MADERA_ASRC2_RATE2_MASK 0xF800
+#define MADERA_ASRC2_RATE2_SHIFT 11
+
+/* (0x0EE0) ASRC1_ENABLE */
+#define MADERA_ASRC1_IN2L_ENA 0x0008
+#define MADERA_ASRC1_IN2L_ENA_MASK 0x0008
+#define MADERA_ASRC1_IN2L_ENA_SHIFT 3
+#define MADERA_ASRC1_IN2R_ENA 0x0004
+#define MADERA_ASRC1_IN2R_ENA_MASK 0x0004
+#define MADERA_ASRC1_IN2R_ENA_SHIFT 2
+#define MADERA_ASRC1_IN1L_ENA 0x0002
+#define MADERA_ASRC1_IN1L_ENA_MASK 0x0002
+#define MADERA_ASRC1_IN1L_ENA_SHIFT 1
+#define MADERA_ASRC1_IN1R_ENA 0x0001
+#define MADERA_ASRC1_IN1R_ENA_MASK 0x0001
+#define MADERA_ASRC1_IN1R_ENA_SHIFT 0
+
+/* (0x0EE2) ASRC1_RATE1 */
+#define MADERA_ASRC1_RATE1_MASK 0xF800
+#define MADERA_ASRC1_RATE1_SHIFT 11
+
+/* (0x0EE3) ASRC1_RATE2 */
+#define MADERA_ASRC1_RATE2_MASK 0xF800
+#define MADERA_ASRC1_RATE2_SHIFT 11
+
+/* (0x0EF0) - ISRC1 CTRL 1 */
+#define MADERA_ISRC1_FSH_MASK 0xF800
+#define MADERA_ISRC1_FSH_SHIFT 11
+#define MADERA_ISRC1_CLK_SEL_MASK 0x0700
+#define MADERA_ISRC1_CLK_SEL_SHIFT 8
+
+/* (0x0EF1) ISRC1_CTRL_2 */
+#define MADERA_ISRC1_FSL_MASK 0xF800
+#define MADERA_ISRC1_FSL_SHIFT 11
+
+/* (0x0EF2) ISRC1_CTRL_3 */
+#define MADERA_ISRC1_INT1_ENA 0x8000
+#define MADERA_ISRC1_INT1_ENA_MASK 0x8000
+#define MADERA_ISRC1_INT1_ENA_SHIFT 15
+#define MADERA_ISRC1_INT2_ENA 0x4000
+#define MADERA_ISRC1_INT2_ENA_MASK 0x4000
+#define MADERA_ISRC1_INT2_ENA_SHIFT 14
+#define MADERA_ISRC1_INT3_ENA 0x2000
+#define MADERA_ISRC1_INT3_ENA_MASK 0x2000
+#define MADERA_ISRC1_INT3_ENA_SHIFT 13
+#define MADERA_ISRC1_INT4_ENA 0x1000
+#define MADERA_ISRC1_INT4_ENA_MASK 0x1000
+#define MADERA_ISRC1_INT4_ENA_SHIFT 12
+#define MADERA_ISRC1_DEC1_ENA 0x0200
+#define MADERA_ISRC1_DEC1_ENA_MASK 0x0200
+#define MADERA_ISRC1_DEC1_ENA_SHIFT 9
+#define MADERA_ISRC1_DEC2_ENA 0x0100
+#define MADERA_ISRC1_DEC2_ENA_MASK 0x0100
+#define MADERA_ISRC1_DEC2_ENA_SHIFT 8
+#define MADERA_ISRC1_DEC3_ENA 0x0080
+#define MADERA_ISRC1_DEC3_ENA_MASK 0x0080
+#define MADERA_ISRC1_DEC3_ENA_SHIFT 7
+#define MADERA_ISRC1_DEC4_ENA 0x0040
+#define MADERA_ISRC1_DEC4_ENA_MASK 0x0040
+#define MADERA_ISRC1_DEC4_ENA_SHIFT 6
+#define MADERA_ISRC1_NOTCH_ENA 0x0001
+#define MADERA_ISRC1_NOTCH_ENA_MASK 0x0001
+#define MADERA_ISRC1_NOTCH_ENA_SHIFT 0
+
+/* (0x0EF3) ISRC2_CTRL_1 */
+#define MADERA_ISRC2_FSH_MASK 0xF800
+#define MADERA_ISRC2_FSH_SHIFT 11
+#define MADERA_ISRC2_CLK_SEL_MASK 0x0700
+#define MADERA_ISRC2_CLK_SEL_SHIFT 8
+
+/* (0x0EF4) ISRC2_CTRL_2 */
+#define MADERA_ISRC2_FSL_MASK 0xF800
+#define MADERA_ISRC2_FSL_SHIFT 11
+
+/* (0x0EF5) ISRC2_CTRL_3 */
+#define MADERA_ISRC2_INT1_ENA 0x8000
+#define MADERA_ISRC2_INT1_ENA_MASK 0x8000
+#define MADERA_ISRC2_INT1_ENA_SHIFT 15
+#define MADERA_ISRC2_INT2_ENA 0x4000
+#define MADERA_ISRC2_INT2_ENA_MASK 0x4000
+#define MADERA_ISRC2_INT2_ENA_SHIFT 14
+#define MADERA_ISRC2_INT3_ENA 0x2000
+#define MADERA_ISRC2_INT3_ENA_MASK 0x2000
+#define MADERA_ISRC2_INT3_ENA_SHIFT 13
+#define MADERA_ISRC2_INT4_ENA 0x1000
+#define MADERA_ISRC2_INT4_ENA_MASK 0x1000
+#define MADERA_ISRC2_INT4_ENA_SHIFT 12
+#define MADERA_ISRC2_DEC1_ENA 0x0200
+#define MADERA_ISRC2_DEC1_ENA_MASK 0x0200
+#define MADERA_ISRC2_DEC1_ENA_SHIFT 9
+#define MADERA_ISRC2_DEC2_ENA 0x0100
+#define MADERA_ISRC2_DEC2_ENA_MASK 0x0100
+#define MADERA_ISRC2_DEC2_ENA_SHIFT 8
+#define MADERA_ISRC2_DEC3_ENA 0x0080
+#define MADERA_ISRC2_DEC3_ENA_MASK 0x0080
+#define MADERA_ISRC2_DEC3_ENA_SHIFT 7
+#define MADERA_ISRC2_DEC4_ENA 0x0040
+#define MADERA_ISRC2_DEC4_ENA_MASK 0x0040
+#define MADERA_ISRC2_DEC4_ENA_SHIFT 6
+#define MADERA_ISRC2_NOTCH_ENA 0x0001
+#define MADERA_ISRC2_NOTCH_ENA_MASK 0x0001
+#define MADERA_ISRC2_NOTCH_ENA_SHIFT 0
+
+/* (0x0EF6) ISRC3_CTRL_1 */
+#define MADERA_ISRC3_FSH_MASK 0xF800
+#define MADERA_ISRC3_FSH_SHIFT 11
+#define MADERA_ISRC3_CLK_SEL_MASK 0x0700
+#define MADERA_ISRC3_CLK_SEL_SHIFT 8
+
+/* (0x0EF7) ISRC3_CTRL_2 */
+#define MADERA_ISRC3_FSL_MASK 0xF800
+#define MADERA_ISRC3_FSL_SHIFT 11
+
+/* (0x0EF8) ISRC3_CTRL_3 */
+#define MADERA_ISRC3_INT1_ENA 0x8000
+#define MADERA_ISRC3_INT1_ENA_MASK 0x8000
+#define MADERA_ISRC3_INT1_ENA_SHIFT 15
+#define MADERA_ISRC3_INT2_ENA 0x4000
+#define MADERA_ISRC3_INT2_ENA_MASK 0x4000
+#define MADERA_ISRC3_INT2_ENA_SHIFT 14
+#define MADERA_ISRC3_INT3_ENA 0x2000
+#define MADERA_ISRC3_INT3_ENA_MASK 0x2000
+#define MADERA_ISRC3_INT3_ENA_SHIFT 13
+#define MADERA_ISRC3_INT4_ENA 0x1000
+#define MADERA_ISRC3_INT4_ENA_MASK 0x1000
+#define MADERA_ISRC3_INT4_ENA_SHIFT 12
+#define MADERA_ISRC3_DEC1_ENA 0x0200
+#define MADERA_ISRC3_DEC1_ENA_MASK 0x0200
+#define MADERA_ISRC3_DEC1_ENA_SHIFT 9
+#define MADERA_ISRC3_DEC2_ENA 0x0100
+#define MADERA_ISRC3_DEC2_ENA_MASK 0x0100
+#define MADERA_ISRC3_DEC2_ENA_SHIFT 8
+#define MADERA_ISRC3_DEC3_ENA 0x0080
+#define MADERA_ISRC3_DEC3_ENA_MASK 0x0080
+#define MADERA_ISRC3_DEC3_ENA_SHIFT 7
+#define MADERA_ISRC3_DEC4_ENA 0x0040
+#define MADERA_ISRC3_DEC4_ENA_MASK 0x0040
+#define MADERA_ISRC3_DEC4_ENA_SHIFT 6
+#define MADERA_ISRC3_NOTCH_ENA 0x0001
+#define MADERA_ISRC3_NOTCH_ENA_MASK 0x0001
+#define MADERA_ISRC3_NOTCH_ENA_SHIFT 0
+
+/* (0x0EF9) ISRC4_CTRL_1 */
+#define MADERA_ISRC4_FSH_MASK 0xF800
+#define MADERA_ISRC4_FSH_SHIFT 11
+#define MADERA_ISRC4_CLK_SEL_MASK 0x0700
+#define MADERA_ISRC4_CLK_SEL_SHIFT 8
+
+/* (0x0EFA) ISRC4_CTRL_2 */
+#define MADERA_ISRC4_FSL_MASK 0xF800
+#define MADERA_ISRC4_FSL_SHIFT 11
+
+/* (0x0EFB) ISRC4_CTRL_3 */
+#define MADERA_ISRC4_INT1_ENA 0x8000
+#define MADERA_ISRC4_INT1_ENA_MASK 0x8000
+#define MADERA_ISRC4_INT1_ENA_SHIFT 15
+#define MADERA_ISRC4_INT2_ENA 0x4000
+#define MADERA_ISRC4_INT2_ENA_MASK 0x4000
+#define MADERA_ISRC4_INT2_ENA_SHIFT 14
+#define MADERA_ISRC4_INT3_ENA 0x2000
+#define MADERA_ISRC4_INT3_ENA_MASK 0x2000
+#define MADERA_ISRC4_INT3_ENA_SHIFT 13
+#define MADERA_ISRC4_INT4_ENA 0x1000
+#define MADERA_ISRC4_INT4_ENA_MASK 0x1000
+#define MADERA_ISRC4_INT4_ENA_SHIFT 12
+#define MADERA_ISRC4_DEC1_ENA 0x0200
+#define MADERA_ISRC4_DEC1_ENA_MASK 0x0200
+#define MADERA_ISRC4_DEC1_ENA_SHIFT 9
+#define MADERA_ISRC4_DEC2_ENA 0x0100
+#define MADERA_ISRC4_DEC2_ENA_MASK 0x0100
+#define MADERA_ISRC4_DEC2_ENA_SHIFT 8
+#define MADERA_ISRC4_DEC3_ENA 0x0080
+#define MADERA_ISRC4_DEC3_ENA_MASK 0x0080
+#define MADERA_ISRC4_DEC3_ENA_SHIFT 7
+#define MADERA_ISRC4_DEC4_ENA 0x0040
+#define MADERA_ISRC4_DEC4_ENA_MASK 0x0040
+#define MADERA_ISRC4_DEC4_ENA_SHIFT 6
+#define MADERA_ISRC4_NOTCH_ENA 0x0001
+#define MADERA_ISRC4_NOTCH_ENA_MASK 0x0001
+#define MADERA_ISRC4_NOTCH_ENA_SHIFT 0
+
+/* (0x0F00) Clock_Control */
+#define MADERA_EXT_NG_SEL_CLR 0x0080
+#define MADERA_EXT_NG_SEL_CLR_MASK 0x0080
+#define MADERA_EXT_NG_SEL_CLR_SHIFT 7
+#define MADERA_EXT_NG_SEL_SET 0x0040
+#define MADERA_EXT_NG_SEL_SET_MASK 0x0040
+#define MADERA_EXT_NG_SEL_SET_SHIFT 6
+#define MADERA_CLK_R_ENA_CLR 0x0020
+#define MADERA_CLK_R_ENA_CLR_MASK 0x0020
+#define MADERA_CLK_R_ENA_CLR_SHIFT 5
+#define MADERA_CLK_R_ENA_SET 0x0010
+#define MADERA_CLK_R_ENA_SET_MASK 0x0010
+#define MADERA_CLK_R_ENA_SET_SHIFT 4
+#define MADERA_CLK_NG_ENA_CLR 0x0008
+#define MADERA_CLK_NG_ENA_CLR_MASK 0x0008
+#define MADERA_CLK_NG_ENA_CLR_SHIFT 3
+#define MADERA_CLK_NG_ENA_SET 0x0004
+#define MADERA_CLK_NG_ENA_SET_MASK 0x0004
+#define MADERA_CLK_NG_ENA_SET_SHIFT 2
+#define MADERA_CLK_L_ENA_CLR 0x0002
+#define MADERA_CLK_L_ENA_CLR_MASK 0x0002
+#define MADERA_CLK_L_ENA_CLR_SHIFT 1
+#define MADERA_CLK_L_ENA_SET 0x0001
+#define MADERA_CLK_L_ENA_SET_MASK 0x0001
+#define MADERA_CLK_L_ENA_SET_SHIFT 0
+
+/* (0x0F01) ANC_SRC */
+#define MADERA_IN_RXANCR_SEL_MASK 0x0070
+#define MADERA_IN_RXANCR_SEL_SHIFT 4
+#define MADERA_IN_RXANCL_SEL_MASK 0x0007
+#define MADERA_IN_RXANCL_SEL_SHIFT 0
+
+/* (0x0F17) FCL_ADC_reformatter_control */
+#define MADERA_FCL_MIC_MODE_SEL 0x000C
+#define MADERA_FCL_MIC_MODE_SEL_SHIFT 2
+
+/* (0x0F73) FCR_ADC_reformatter_control */
+#define MADERA_FCR_MIC_MODE_SEL 0x000C
+#define MADERA_FCR_MIC_MODE_SEL_SHIFT 2
+
+/* (0x10C0) AUXPDM1_CTRL_0 */
+#define MADERA_AUXPDM1_SRC_MASK 0x0F00
+#define MADERA_AUXPDM1_SRC_SHIFT 8
+#define MADERA_AUXPDM1_TXEDGE_MASK 0x0010
+#define MADERA_AUXPDM1_TXEDGE_SHIFT 4
+#define MADERA_AUXPDM1_MSTR_MASK 0x0008
+#define MADERA_AUXPDM1_MSTR_SHIFT 3
+#define MADERA_AUXPDM1_ENABLE_MASK 0x0001
+#define MADERA_AUXPDM1_ENABLE_SHIFT 0
+
+/* (0x10C1) AUXPDM1_CTRL_1 */
+#define MADERA_AUXPDM1_CLK_FREQ_MASK 0xC000
+#define MADERA_AUXPDM1_CLK_FREQ_SHIFT 14
+
+/* (0x1480) DFC1_CTRL_W0 */
+#define MADERA_DFC1_RATE_MASK 0x007C
+#define MADERA_DFC1_RATE_SHIFT 2
+#define MADERA_DFC1_DITH_ENA 0x0002
+#define MADERA_DFC1_DITH_ENA_MASK 0x0002
+#define MADERA_DFC1_DITH_ENA_SHIFT 1
+#define MADERA_DFC1_ENA 0x0001
+#define MADERA_DFC1_ENA_MASK 0x0001
+#define MADERA_DFC1_ENA_SHIFT 0
+
+/* (0x1482) DFC1_RX_W0 */
+#define MADERA_DFC1_RX_DATA_WIDTH_MASK 0x1F00
+#define MADERA_DFC1_RX_DATA_WIDTH_SHIFT 8
+
+#define MADERA_DFC1_RX_DATA_TYPE_MASK 0x0007
+#define MADERA_DFC1_RX_DATA_TYPE_SHIFT 0
+
+/* (0x1484) DFC1_TX_W0 */
+#define MADERA_DFC1_TX_DATA_WIDTH_MASK 0x1F00
+#define MADERA_DFC1_TX_DATA_WIDTH_SHIFT 8
+
+#define MADERA_DFC1_TX_DATA_TYPE_MASK 0x0007
+#define MADERA_DFC1_TX_DATA_TYPE_SHIFT 0
+
+/* (0x1600) ADSP2_IRQ0 */
+#define MADERA_DSP_IRQ2 0x0002
+#define MADERA_DSP_IRQ1 0x0001
+
+/* (0x1601) ADSP2_IRQ1 */
+#define MADERA_DSP_IRQ4 0x0002
+#define MADERA_DSP_IRQ3 0x0001
+
+/* (0x1602) ADSP2_IRQ2 */
+#define MADERA_DSP_IRQ6 0x0002
+#define MADERA_DSP_IRQ5 0x0001
+
+/* (0x1603) ADSP2_IRQ3 */
+#define MADERA_DSP_IRQ8 0x0002
+#define MADERA_DSP_IRQ7 0x0001
+
+/* (0x1604) ADSP2_IRQ4 */
+#define MADERA_DSP_IRQ10 0x0002
+#define MADERA_DSP_IRQ9 0x0001
+
+/* (0x1605) ADSP2_IRQ5 */
+#define MADERA_DSP_IRQ12 0x0002
+#define MADERA_DSP_IRQ11 0x0001
+
+/* (0x1606) ADSP2_IRQ6 */
+#define MADERA_DSP_IRQ14 0x0002
+#define MADERA_DSP_IRQ13 0x0001
+
+/* (0x1607) ADSP2_IRQ7 */
+#define MADERA_DSP_IRQ16 0x0002
+#define MADERA_DSP_IRQ15 0x0001
+
+/* (0x1700) GPIO1_CTRL_1 */
+#define MADERA_GP1_LVL 0x8000
+#define MADERA_GP1_LVL_MASK 0x8000
+#define MADERA_GP1_LVL_SHIFT 15
+#define MADERA_GP1_OP_CFG 0x4000
+#define MADERA_GP1_OP_CFG_MASK 0x4000
+#define MADERA_GP1_OP_CFG_SHIFT 14
+#define MADERA_GP1_DB 0x2000
+#define MADERA_GP1_DB_MASK 0x2000
+#define MADERA_GP1_DB_SHIFT 13
+#define MADERA_GP1_POL 0x1000
+#define MADERA_GP1_POL_MASK 0x1000
+#define MADERA_GP1_POL_SHIFT 12
+#define MADERA_GP1_IP_CFG 0x0800
+#define MADERA_GP1_IP_CFG_MASK 0x0800
+#define MADERA_GP1_IP_CFG_SHIFT 11
+#define MADERA_GP1_FN_MASK 0x03FF
+#define MADERA_GP1_FN_SHIFT 0
+
+/* (0x1701) GPIO1_CTRL_2 */
+#define MADERA_GP1_DIR 0x8000
+#define MADERA_GP1_DIR_MASK 0x8000
+#define MADERA_GP1_DIR_SHIFT 15
+#define MADERA_GP1_PU 0x4000
+#define MADERA_GP1_PU_MASK 0x4000
+#define MADERA_GP1_PU_SHIFT 14
+#define MADERA_GP1_PD 0x2000
+#define MADERA_GP1_PD_MASK 0x2000
+#define MADERA_GP1_PD_SHIFT 13
+#define MADERA_GP1_DRV_STR_MASK 0x1800
+#define MADERA_GP1_DRV_STR_SHIFT 11
+
+/* (0x1800) IRQ1_Status_1 */
+#define MADERA_CTRLIF_ERR_EINT1 0x1000
+#define MADERA_CTRLIF_ERR_EINT1_MASK 0x1000
+#define MADERA_CTRLIF_ERR_EINT1_SHIFT 12
+#define MADERA_SYSCLK_FAIL_EINT1 0x0200
+#define MADERA_SYSCLK_FAIL_EINT1_MASK 0x0200
+#define MADERA_SYSCLK_FAIL_EINT1_SHIFT 9
+#define MADERA_CLOCK_DETECT_EINT1 0x0100
+#define MADERA_CLOCK_DETECT_EINT1_MASK 0x0100
+#define MADERA_CLOCK_DETECT_EINT1_SHIFT 8
+#define MADERA_BOOT_DONE_EINT1 0x0080
+#define MADERA_BOOT_DONE_EINT1_MASK 0x0080
+#define MADERA_BOOT_DONE_EINT1_SHIFT 7
+
+/* (0x1801) IRQ1_Status_2 */
+#define MADERA_FLLAO_LOCK_EINT1 0x0800
+#define MADERA_FLLAO_LOCK_EINT1_MASK 0x0800
+#define MADERA_FLLAO_LOCK_EINT1_SHIFT 11
+#define MADERA_FLL3_LOCK_EINT1 0x0400
+#define MADERA_FLL3_LOCK_EINT1_MASK 0x0400
+#define MADERA_FLL3_LOCK_EINT1_SHIFT 10
+#define MADERA_FLL2_LOCK_EINT1 0x0200
+#define MADERA_FLL2_LOCK_EINT1_MASK 0x0200
+#define MADERA_FLL2_LOCK_EINT1_SHIFT 9
+#define MADERA_FLL1_LOCK_EINT1 0x0100
+#define MADERA_FLL1_LOCK_EINT1_MASK 0x0100
+#define MADERA_FLL1_LOCK_EINT1_SHIFT 8
+
+/* (0x1805) IRQ1_Status_6 */
+#define MADERA_MICDET2_EINT1 0x0200
+#define MADERA_MICDET2_EINT1_MASK 0x0200
+#define MADERA_MICDET2_EINT1_SHIFT 9
+#define MADERA_MICDET1_EINT1 0x0100
+#define MADERA_MICDET1_EINT1_MASK 0x0100
+#define MADERA_MICDET1_EINT1_SHIFT 8
+#define MADERA_HPDET_EINT1 0x0001
+#define MADERA_HPDET_EINT1_MASK 0x0001
+#define MADERA_HPDET_EINT1_SHIFT 0
+
+/* (0x1806) IRQ1_Status_7 */
+#define MADERA_MICD_CLAMP_FALL_EINT1 0x0020
+#define MADERA_MICD_CLAMP_FALL_EINT1_MASK 0x0020
+#define MADERA_MICD_CLAMP_FALL_EINT1_SHIFT 5
+#define MADERA_MICD_CLAMP_RISE_EINT1 0x0010
+#define MADERA_MICD_CLAMP_RISE_EINT1_MASK 0x0010
+#define MADERA_MICD_CLAMP_RISE_EINT1_SHIFT 4
+#define MADERA_JD2_FALL_EINT1 0x0008
+#define MADERA_JD2_FALL_EINT1_MASK 0x0008
+#define MADERA_JD2_FALL_EINT1_SHIFT 3
+#define MADERA_JD2_RISE_EINT1 0x0004
+#define MADERA_JD2_RISE_EINT1_MASK 0x0004
+#define MADERA_JD2_RISE_EINT1_SHIFT 2
+#define MADERA_JD1_FALL_EINT1 0x0002
+#define MADERA_JD1_FALL_EINT1_MASK 0x0002
+#define MADERA_JD1_FALL_EINT1_SHIFT 1
+#define MADERA_JD1_RISE_EINT1 0x0001
+#define MADERA_JD1_RISE_EINT1_MASK 0x0001
+#define MADERA_JD1_RISE_EINT1_SHIFT 0
+
+/* (0x1808) IRQ1_Status_9 */
+#define MADERA_ASRC2_IN2_LOCK_EINT1 0x0800
+#define MADERA_ASRC2_IN2_LOCK_EINT1_MASK 0x0800
+#define MADERA_ASRC2_IN2_LOCK_EINT1_SHIFT 11
+#define MADERA_ASRC2_IN1_LOCK_EINT1 0x0400
+#define MADERA_ASRC2_IN1_LOCK_EINT1_MASK 0x0400
+#define MADERA_ASRC2_IN1_LOCK_EINT1_SHIFT 10
+#define MADERA_ASRC1_IN2_LOCK_EINT1 0x0200
+#define MADERA_ASRC1_IN2_LOCK_EINT1_MASK 0x0200
+#define MADERA_ASRC1_IN2_LOCK_EINT1_SHIFT 9
+#define MADERA_ASRC1_IN1_LOCK_EINT1 0x0100
+#define MADERA_ASRC1_IN1_LOCK_EINT1_MASK 0x0100
+#define MADERA_ASRC1_IN1_LOCK_EINT1_SHIFT 8
+#define MADERA_DRC2_SIG_DET_EINT1 0x0002
+#define MADERA_DRC2_SIG_DET_EINT1_MASK 0x0002
+#define MADERA_DRC2_SIG_DET_EINT1_SHIFT 1
+#define MADERA_DRC1_SIG_DET_EINT1 0x0001
+#define MADERA_DRC1_SIG_DET_EINT1_MASK 0x0001
+#define MADERA_DRC1_SIG_DET_EINT1_SHIFT 0
+
+/* (0x180A) IRQ1_Status_11 */
+#define MADERA_DSP_IRQ16_EINT1 0x8000
+#define MADERA_DSP_IRQ16_EINT1_MASK 0x8000
+#define MADERA_DSP_IRQ16_EINT1_SHIFT 15
+#define MADERA_DSP_IRQ15_EINT1 0x4000
+#define MADERA_DSP_IRQ15_EINT1_MASK 0x4000
+#define MADERA_DSP_IRQ15_EINT1_SHIFT 14
+#define MADERA_DSP_IRQ14_EINT1 0x2000
+#define MADERA_DSP_IRQ14_EINT1_MASK 0x2000
+#define MADERA_DSP_IRQ14_EINT1_SHIFT 13
+#define MADERA_DSP_IRQ13_EINT1 0x1000
+#define MADERA_DSP_IRQ13_EINT1_MASK 0x1000
+#define MADERA_DSP_IRQ13_EINT1_SHIFT 12
+#define MADERA_DSP_IRQ12_EINT1 0x0800
+#define MADERA_DSP_IRQ12_EINT1_MASK 0x0800
+#define MADERA_DSP_IRQ12_EINT1_SHIFT 11
+#define MADERA_DSP_IRQ11_EINT1 0x0400
+#define MADERA_DSP_IRQ11_EINT1_MASK 0x0400
+#define MADERA_DSP_IRQ11_EINT1_SHIFT 10
+#define MADERA_DSP_IRQ10_EINT1 0x0200
+#define MADERA_DSP_IRQ10_EINT1_MASK 0x0200
+#define MADERA_DSP_IRQ10_EINT1_SHIFT 9
+#define MADERA_DSP_IRQ9_EINT1 0x0100
+#define MADERA_DSP_IRQ9_EINT1_MASK 0x0100
+#define MADERA_DSP_IRQ9_EINT1_SHIFT 8
+#define MADERA_DSP_IRQ8_EINT1 0x0080
+#define MADERA_DSP_IRQ8_EINT1_MASK 0x0080
+#define MADERA_DSP_IRQ8_EINT1_SHIFT 7
+#define MADERA_DSP_IRQ7_EINT1 0x0040
+#define MADERA_DSP_IRQ7_EINT1_MASK 0x0040
+#define MADERA_DSP_IRQ7_EINT1_SHIFT 6
+#define MADERA_DSP_IRQ6_EINT1 0x0020
+#define MADERA_DSP_IRQ6_EINT1_MASK 0x0020
+#define MADERA_DSP_IRQ6_EINT1_SHIFT 5
+#define MADERA_DSP_IRQ5_EINT1 0x0010
+#define MADERA_DSP_IRQ5_EINT1_MASK 0x0010
+#define MADERA_DSP_IRQ5_EINT1_SHIFT 4
+#define MADERA_DSP_IRQ4_EINT1 0x0008
+#define MADERA_DSP_IRQ4_EINT1_MASK 0x0008
+#define MADERA_DSP_IRQ4_EINT1_SHIFT 3
+#define MADERA_DSP_IRQ3_EINT1 0x0004
+#define MADERA_DSP_IRQ3_EINT1_MASK 0x0004
+#define MADERA_DSP_IRQ3_EINT1_SHIFT 2
+#define MADERA_DSP_IRQ2_EINT1 0x0002
+#define MADERA_DSP_IRQ2_EINT1_MASK 0x0002
+#define MADERA_DSP_IRQ2_EINT1_SHIFT 1
+#define MADERA_DSP_IRQ1_EINT1 0x0001
+#define MADERA_DSP_IRQ1_EINT1_MASK 0x0001
+#define MADERA_DSP_IRQ1_EINT1_SHIFT 0
+
+/* (0x180B) IRQ1_Status_12 */
+#define MADERA_SPKOUTR_SC_EINT1 0x0080
+#define MADERA_SPKOUTR_SC_EINT1_MASK 0x0080
+#define MADERA_SPKOUTR_SC_EINT1_SHIFT 7
+#define MADERA_SPKOUTL_SC_EINT1 0x0040
+#define MADERA_SPKOUTL_SC_EINT1_MASK 0x0040
+#define MADERA_SPKOUTL_SC_EINT1_SHIFT 6
+#define MADERA_HP3R_SC_EINT1 0x0020
+#define MADERA_HP3R_SC_EINT1_MASK 0x0020
+#define MADERA_HP3R_SC_EINT1_SHIFT 5
+#define MADERA_HP3L_SC_EINT1 0x0010
+#define MADERA_HP3L_SC_EINT1_MASK 0x0010
+#define MADERA_HP3L_SC_EINT1_SHIFT 4
+#define MADERA_HP2R_SC_EINT1 0x0008
+#define MADERA_HP2R_SC_EINT1_MASK 0x0008
+#define MADERA_HP2R_SC_EINT1_SHIFT 3
+#define MADERA_HP2L_SC_EINT1 0x0004
+#define MADERA_HP2L_SC_EINT1_MASK 0x0004
+#define MADERA_HP2L_SC_EINT1_SHIFT 2
+#define MADERA_HP1R_SC_EINT1 0x0002
+#define MADERA_HP1R_SC_EINT1_MASK 0x0002
+#define MADERA_HP1R_SC_EINT1_SHIFT 1
+#define MADERA_HP1L_SC_EINT1 0x0001
+#define MADERA_HP1L_SC_EINT1_MASK 0x0001
+#define MADERA_HP1L_SC_EINT1_SHIFT 0
+
+/* (0x180E) IRQ1_Status_15 */
+#define MADERA_SPK_OVERHEAT_WARN_EINT1 0x0004
+#define MADERA_SPK_OVERHEAT_WARN_EINT1_MASK 0x0004
+#define MADERA_SPK_OVERHEAT_WARN_EINT1_SHIFT 2
+#define MADERA_SPK_OVERHEAT_EINT1 0x0002
+#define MADERA_SPK_OVERHEAT_EINT1_MASK 0x0002
+#define MADERA_SPK_OVERHEAT_EINT1_SHIFT 1
+#define MADERA_SPK_SHUTDOWN_EINT1 0x0001
+#define MADERA_SPK_SHUTDOWN_EINT1_MASK 0x0001
+#define MADERA_SPK_SHUTDOWN_EINT1_SHIFT 0
+
+/* (0x1820) - IRQ1 Status 33 */
+#define MADERA_DSP7_BUS_ERR_EINT1 0x0040
+#define MADERA_DSP7_BUS_ERR_EINT1_MASK 0x0040
+#define MADERA_DSP7_BUS_ERR_EINT1_SHIFT 6
+#define MADERA_DSP6_BUS_ERR_EINT1 0x0020
+#define MADERA_DSP6_BUS_ERR_EINT1_MASK 0x0020
+#define MADERA_DSP6_BUS_ERR_EINT1_SHIFT 5
+#define MADERA_DSP5_BUS_ERR_EINT1 0x0010
+#define MADERA_DSP5_BUS_ERR_EINT1_MASK 0x0010
+#define MADERA_DSP5_BUS_ERR_EINT1_SHIFT 4
+#define MADERA_DSP4_BUS_ERR_EINT1 0x0008
+#define MADERA_DSP4_BUS_ERR_EINT1_MASK 0x0008
+#define MADERA_DSP4_BUS_ERR_EINT1_SHIFT 3
+#define MADERA_DSP3_BUS_ERR_EINT1 0x0004
+#define MADERA_DSP3_BUS_ERR_EINT1_MASK 0x0004
+#define MADERA_DSP3_BUS_ERR_EINT1_SHIFT 2
+#define MADERA_DSP2_BUS_ERR_EINT1 0x0002
+#define MADERA_DSP2_BUS_ERR_EINT1_MASK 0x0002
+#define MADERA_DSP2_BUS_ERR_EINT1_SHIFT 1
+#define MADERA_DSP1_BUS_ERR_EINT1 0x0001
+#define MADERA_DSP1_BUS_ERR_EINT1_MASK 0x0001
+#define MADERA_DSP1_BUS_ERR_EINT1_SHIFT 0
+
+/* (0x1845) IRQ1_Mask_6 */
+#define MADERA_IM_MICDET2_EINT1 0x0200
+#define MADERA_IM_MICDET2_EINT1_MASK 0x0200
+#define MADERA_IM_MICDET2_EINT1_SHIFT 9
+#define MADERA_IM_MICDET1_EINT1 0x0100
+#define MADERA_IM_MICDET1_EINT1_MASK 0x0100
+#define MADERA_IM_MICDET1_EINT1_SHIFT 8
+#define MADERA_IM_HPDET_EINT1 0x0001
+#define MADERA_IM_HPDET_EINT1_MASK 0x0001
+#define MADERA_IM_HPDET_EINT1_SHIFT 0
+/* (0x184E) IRQ1_Mask_15 */
+#define MADERA_IM_SPK_OVERHEAT_WARN_EINT1 0x0004
+#define MADERA_IM_SPK_OVERHEAT_WARN_EINT1_MASK 0x0004
+#define MADERA_IM_SPK_OVERHEAT_WARN_EINT1_SHIFT 2
+#define MADERA_IM_SPK_OVERHEAT_EINT1 0x0002
+#define MADERA_IM_SPK_OVERHEAT_EINT1_MASK 0x0002
+#define MADERA_IM_SPK_OVERHEAT_EINT1_SHIFT 1
+#define MADERA_IM_SPK_SHUTDOWN_EINT1 0x0001
+#define MADERA_IM_SPK_SHUTDOWN_EINT1_MASK 0x0001
+#define MADERA_IM_SPK_SHUTDOWN_EINT1_SHIFT 0
+
+/* (0x1880) - IRQ1 Raw Status 1 */
+#define MADERA_CTRLIF_ERR_STS1 0x1000
+#define MADERA_CTRLIF_ERR_STS1_MASK 0x1000
+#define MADERA_CTRLIF_ERR_STS1_SHIFT 12
+#define MADERA_SYSCLK_FAIL_STS1 0x0200
+#define MADERA_SYSCLK_FAIL_STS1_MASK 0x0200
+#define MADERA_SYSCLK_FAIL_STS1_SHIFT 9
+#define MADERA_CLOCK_DETECT_STS1 0x0100
+#define MADERA_CLOCK_DETECT_STS1_MASK 0x0100
+#define MADERA_CLOCK_DETECT_STS1_SHIFT 8
+#define MADERA_BOOT_DONE_STS1 0x0080
+#define MADERA_BOOT_DONE_STS1_MASK 0x0080
+#define MADERA_BOOT_DONE_STS1_SHIFT 7
+
+/* (0x1881) - IRQ1 Raw Status 2 */
+#define MADERA_FLL3_LOCK_STS1 0x0400
+#define MADERA_FLL3_LOCK_STS1_MASK 0x0400
+#define MADERA_FLL3_LOCK_STS1_SHIFT 10
+#define MADERA_FLL2_LOCK_STS1 0x0200
+#define MADERA_FLL2_LOCK_STS1_MASK 0x0200
+#define MADERA_FLL2_LOCK_STS1_SHIFT 9
+#define MADERA_FLL1_LOCK_STS1 0x0100
+#define MADERA_FLL1_LOCK_STS1_MASK 0x0100
+#define MADERA_FLL1_LOCK_STS1_SHIFT 8
+
+/* (0x1886) - IRQ1 Raw Status 7 */
+#define MADERA_MICD_CLAMP_FALL_STS1 0x0020
+#define MADERA_MICD_CLAMP_FALL_STS1_MASK 0x0020
+#define MADERA_MICD_CLAMP_FALL_STS1_SHIFT 5
+#define MADERA_MICD_CLAMP_RISE_STS1 0x0010
+#define MADERA_MICD_CLAMP_RISE_STS1_MASK 0x0010
+#define MADERA_MICD_CLAMP_RISE_STS1_SHIFT 4
+#define MADERA_JD2_FALL_STS1 0x0008
+#define MADERA_JD2_FALL_STS1_MASK 0x0008
+#define MADERA_JD2_FALL_STS1_SHIFT 3
+#define MADERA_JD2_RISE_STS1 0x0004
+#define MADERA_JD2_RISE_STS1_MASK 0x0004
+#define MADERA_JD2_RISE_STS1_SHIFT 2
+#define MADERA_JD1_FALL_STS1 0x0002
+#define MADERA_JD1_FALL_STS1_MASK 0x0002
+#define MADERA_JD1_FALL_STS1_SHIFT 1
+#define MADERA_JD1_RISE_STS1 0x0001
+#define MADERA_JD1_RISE_STS1_MASK 0x0001
+#define MADERA_JD1_RISE_STS1_SHIFT 0
+
+/* (0x188E) - IRQ1 Raw Status 15 */
+#define MADERA_SPK_OVERHEAT_WARN_STS1 0x0004
+#define MADERA_SPK_OVERHEAT_WARN_STS1_MASK 0x0004
+#define MADERA_SPK_OVERHEAT_WARN_STS1_SHIFT 2
+#define MADERA_SPK_OVERHEAT_STS1 0x0002
+#define MADERA_SPK_OVERHEAT_STS1_MASK 0x0002
+#define MADERA_SPK_OVERHEAT_STS1_SHIFT 1
+#define MADERA_SPK_SHUTDOWN_STS1 0x0001
+#define MADERA_SPK_SHUTDOWN_STS1_MASK 0x0001
+#define MADERA_SPK_SHUTDOWN_STS1_SHIFT 0
+
+/* (0x1A06) Interrupt_Debounce_7 */
+#define MADERA_MICD_CLAMP_DB 0x0010
+#define MADERA_MICD_CLAMP_DB_MASK 0x0010
+#define MADERA_MICD_CLAMP_DB_SHIFT 4
+#define MADERA_JD2_DB 0x0004
+#define MADERA_JD2_DB_MASK 0x0004
+#define MADERA_JD2_DB_SHIFT 2
+#define MADERA_JD1_DB 0x0001
+#define MADERA_JD1_DB_MASK 0x0001
+#define MADERA_JD1_DB_SHIFT 0
+
+/* (0x1A0E) Interrupt_Debounce_15 */
+#define MADERA_SPK_OVERHEAT_WARN_DB 0x0004
+#define MADERA_SPK_OVERHEAT_WARN_DB_MASK 0x0004
+#define MADERA_SPK_OVERHEAT_WARN_DB_SHIFT 2
+#define MADERA_SPK_OVERHEAT_DB 0x0002
+#define MADERA_SPK_OVERHEAT_DB_MASK 0x0002
+#define MADERA_SPK_OVERHEAT_DB_SHIFT 1
+
+/* (0x1A80) IRQ1_CTRL */
+#define MADERA_IM_IRQ1 0x0800
+#define MADERA_IM_IRQ1_MASK 0x0800
+#define MADERA_IM_IRQ1_SHIFT 11
+#define MADERA_IRQ_POL 0x0400
+#define MADERA_IRQ_POL_MASK 0x0400
+#define MADERA_IRQ_POL_SHIFT 10
+
+/* (0x20004) OTP_HPDET_Cal_1 */
+#define MADERA_OTP_HPDET_CALIB_OFFSET_11 0xFF000000
+#define MADERA_OTP_HPDET_CALIB_OFFSET_11_MASK 0xFF000000
+#define MADERA_OTP_HPDET_CALIB_OFFSET_11_SHIFT 24
+#define MADERA_OTP_HPDET_CALIB_OFFSET_10 0x00FF0000
+#define MADERA_OTP_HPDET_CALIB_OFFSET_10_MASK 0x00FF0000
+#define MADERA_OTP_HPDET_CALIB_OFFSET_10_SHIFT 16
+#define MADERA_OTP_HPDET_CALIB_OFFSET_01 0x0000FF00
+#define MADERA_OTP_HPDET_CALIB_OFFSET_01_MASK 0x0000FF00
+#define MADERA_OTP_HPDET_CALIB_OFFSET_01_SHIFT 8
+#define MADERA_OTP_HPDET_CALIB_OFFSET_00 0x000000FF
+#define MADERA_OTP_HPDET_CALIB_OFFSET_00_MASK 0x000000FF
+#define MADERA_OTP_HPDET_CALIB_OFFSET_00_SHIFT 0
+
+/* (0x20006) OTP_HPDET_Cal_2 */
+#define MADERA_OTP_HPDET_GRADIENT_1X 0x0000FF00
+#define MADERA_OTP_HPDET_GRADIENT_1X_MASK 0x0000FF00
+#define MADERA_OTP_HPDET_GRADIENT_1X_SHIFT 8
+#define MADERA_OTP_HPDET_GRADIENT_0X 0x000000FF
+#define MADERA_OTP_HPDET_GRADIENT_0X_MASK 0x000000FF
+#define MADERA_OTP_HPDET_GRADIENT_0X_SHIFT 0
+
+#endif
diff --git a/include/linux/mfd/max14577-private.h b/include/linux/mfd/max14577-private.h
index df75234f979d..dd51a37fa37f 100644
--- a/include/linux/mfd/max14577-private.h
+++ b/include/linux/mfd/max14577-private.h
@@ -1,19 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* max14577-private.h - Common API for the Maxim 14577/77836 internal sub chip
*
- * Copyright (C) 2014 Samsung Electrnoics
+ * Copyright (C) 2014 Samsung Electronics
* Chanwoo Choi <cw00.choi@samsung.com>
* Krzysztof Kozlowski <krzk@kernel.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#ifndef __MAX14577_PRIVATE_H__
diff --git a/include/linux/mfd/max14577.h b/include/linux/mfd/max14577.h
index d81b52bb8bee..0fda5c2e745a 100644
--- a/include/linux/mfd/max14577.h
+++ b/include/linux/mfd/max14577.h
@@ -1,20 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* max14577.h - Driver for the Maxim 14577/77836
*
- * Copyright (C) 2014 Samsung Electrnoics
+ * Copyright (C) 2014 Samsung Electronics
* Chanwoo Choi <cw00.choi@samsung.com>
* Krzysztof Kozlowski <krzk@kernel.org>
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
* This driver is based on max8997.h
*
* MAX14577 has MUIC, Charger devices.
diff --git a/include/linux/mfd/max5970.h b/include/linux/mfd/max5970.h
new file mode 100644
index 000000000000..fc50e89edfaa
--- /dev/null
+++ b/include/linux/mfd/max5970.h
@@ -0,0 +1,84 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Device driver for regulators in MAX5970 and MAX5978 IC
+ *
+ * Copyright (c) 2022 9elements GmbH
+ *
+ * Author: Patrick Rudolph <patrick.rudolph@9elements.com>
+ */
+
+#ifndef _MFD_MAX5970_H
+#define _MFD_MAX5970_H
+
+#include <linux/regmap.h>
+
+#define MAX5970_NUM_SWITCHES 2
+#define MAX5978_NUM_SWITCHES 1
+#define MAX5970_NUM_LEDS 4
+
+#define MAX5970_REG_CURRENT_L(ch) (0x01 + (ch) * 4)
+#define MAX5970_REG_CURRENT_H(ch) (0x00 + (ch) * 4)
+#define MAX5970_REG_VOLTAGE_L(ch) (0x03 + (ch) * 4)
+#define MAX5970_REG_VOLTAGE_H(ch) (0x02 + (ch) * 4)
+#define MAX5970_REG_MON_RANGE 0x18
+#define MAX5970_MON_MASK 0x3
+#define MAX5970_MON(reg, ch) (((reg) >> ((ch) * 2)) & MAX5970_MON_MASK)
+#define MAX5970_MON_MAX_RANGE_UV 16000000
+
+#define MAX5970_REG_CH_UV_WARN_H(ch) (0x1A + (ch) * 10)
+#define MAX5970_REG_CH_UV_WARN_L(ch) (0x1B + (ch) * 10)
+#define MAX5970_REG_CH_UV_CRIT_H(ch) (0x1C + (ch) * 10)
+#define MAX5970_REG_CH_UV_CRIT_L(ch) (0x1D + (ch) * 10)
+#define MAX5970_REG_CH_OV_WARN_H(ch) (0x1E + (ch) * 10)
+#define MAX5970_REG_CH_OV_WARN_L(ch) (0x1F + (ch) * 10)
+#define MAX5970_REG_CH_OV_CRIT_H(ch) (0x20 + (ch) * 10)
+#define MAX5970_REG_CH_OV_CRIT_L(ch) (0x21 + (ch) * 10)
+
+#define MAX5970_VAL2REG_H(x) (((x) >> 2) & 0xFF)
+#define MAX5970_VAL2REG_L(x) ((x) & 0x3)
+
+#define MAX5970_REG_DAC_FAST(ch) (0x2E + (ch))
+
+#define MAX5970_FAST2SLOW_RATIO 200
+
+#define MAX5970_REG_STATUS0 0x31
+#define MAX5970_CB_IFAULTF(ch) (1 << (ch))
+#define MAX5970_CB_IFAULTS(ch) (1 << ((ch) + 4))
+
+#define MAX5970_REG_STATUS1 0x32
+#define STATUS1_PROT_MASK 0x3
+#define STATUS1_PROT(reg) \
+ (((reg) >> 6) & STATUS1_PROT_MASK)
+#define STATUS1_PROT_SHUTDOWN 0
+#define STATUS1_PROT_CLEAR_PG 1
+#define STATUS1_PROT_ALERT_ONLY 2
+
+#define MAX5970_REG_STATUS2 0x33
+#define MAX5970_IRNG_MASK 0x3
+#define MAX5970_IRNG(reg, ch) \
+ (((reg) >> ((ch) * 2)) & MAX5970_IRNG_MASK)
+
+#define MAX5970_REG_STATUS3 0x34
+#define MAX5970_STATUS3_ALERT BIT(4)
+#define MAX5970_STATUS3_PG(ch) BIT(ch)
+
+#define MAX5970_REG_FAULT0 0x35
+#define UV_STATUS_WARN(ch) (1 << (ch))
+#define UV_STATUS_CRIT(ch) (1 << ((ch) + 4))
+
+#define MAX5970_REG_FAULT1 0x36
+#define OV_STATUS_WARN(ch) (1 << (ch))
+#define OV_STATUS_CRIT(ch) (1 << ((ch) + 4))
+
+#define MAX5970_REG_FAULT2 0x37
+#define OC_STATUS_WARN(ch) (1 << (ch))
+
+#define MAX5970_REG_CHXEN 0x3b
+#define CHXEN(ch) (3 << ((ch) * 2))
+
+#define MAX5970_REG_LED_FLASH 0x43
+
+#define MAX_REGISTERS 0x49
+#define ADC_MASK 0x3FF
+
+#endif /* _MFD_MAX5970_H */
diff --git a/include/linux/mfd/max7360.h b/include/linux/mfd/max7360.h
new file mode 100644
index 000000000000..44cf2bf651a2
--- /dev/null
+++ b/include/linux/mfd/max7360.h
@@ -0,0 +1,109 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#ifndef __LINUX_MFD_MAX7360_H
+#define __LINUX_MFD_MAX7360_H
+
+#include <linux/bits.h>
+
+#define MAX7360_MAX_KEY_ROWS 8
+#define MAX7360_MAX_KEY_COLS 8
+#define MAX7360_MAX_KEY_NUM (MAX7360_MAX_KEY_ROWS * MAX7360_MAX_KEY_COLS)
+#define MAX7360_ROW_SHIFT 3
+
+#define MAX7360_MAX_GPIO 8
+#define MAX7360_MAX_GPO 6
+#define MAX7360_PORT_PWM_COUNT 8
+#define MAX7360_PORT_RTR_PIN (MAX7360_PORT_PWM_COUNT - 1)
+
+/*
+ * MAX7360 registers
+ */
+#define MAX7360_REG_KEYFIFO 0x00
+#define MAX7360_REG_CONFIG 0x01
+#define MAX7360_REG_DEBOUNCE 0x02
+#define MAX7360_REG_INTERRUPT 0x03
+#define MAX7360_REG_PORTS 0x04
+#define MAX7360_REG_KEYREP 0x05
+#define MAX7360_REG_SLEEP 0x06
+
+/*
+ * MAX7360 GPIO registers
+ *
+ * All these registers are reset together when writing bit 3 of
+ * MAX7360_REG_GPIOCFG.
+ */
+#define MAX7360_REG_GPIOCFG 0x40
+#define MAX7360_REG_GPIOCTRL 0x41
+#define MAX7360_REG_GPIODEB 0x42
+#define MAX7360_REG_GPIOCURR 0x43
+#define MAX7360_REG_GPIOOUTM 0x44
+#define MAX7360_REG_PWMCOM 0x45
+#define MAX7360_REG_RTRCFG 0x46
+#define MAX7360_REG_I2C_TIMEOUT 0x48
+#define MAX7360_REG_GPIOIN 0x49
+#define MAX7360_REG_RTR_CNT 0x4A
+#define MAX7360_REG_PWMBASE 0x50
+#define MAX7360_REG_PWMCFGBASE 0x58
+
+#define MAX7360_REG_GPIO_LAST 0x5F
+
+#define MAX7360_REG_PWM(x) (MAX7360_REG_PWMBASE + (x))
+#define MAX7360_REG_PWMCFG(x) (MAX7360_REG_PWMCFGBASE + (x))
+
+/*
+ * Configuration register bits
+ */
+#define MAX7360_FIFO_EMPTY 0x3F
+#define MAX7360_FIFO_OVERFLOW 0x7F
+#define MAX7360_FIFO_RELEASE BIT(6)
+#define MAX7360_FIFO_COL GENMASK(5, 3)
+#define MAX7360_FIFO_ROW GENMASK(2, 0)
+
+#define MAX7360_CFG_SLEEP BIT(7)
+#define MAX7360_CFG_INTERRUPT BIT(5)
+#define MAX7360_CFG_KEY_RELEASE BIT(3)
+#define MAX7360_CFG_WAKEUP BIT(1)
+#define MAX7360_CFG_TIMEOUT BIT(0)
+
+#define MAX7360_DEBOUNCE GENMASK(4, 0)
+#define MAX7360_DEBOUNCE_MIN 9
+#define MAX7360_DEBOUNCE_MAX 40
+#define MAX7360_PORTS GENMASK(8, 5)
+
+#define MAX7360_INTERRUPT_TIME_MASK GENMASK(4, 0)
+#define MAX7360_INTERRUPT_FIFO_MASK GENMASK(7, 5)
+
+#define MAX7360_PORT_CFG_INTERRUPT_MASK BIT(7)
+#define MAX7360_PORT_CFG_INTERRUPT_EDGES BIT(6)
+#define MAX7360_PORT_CFG_COMMON_PWM BIT(5)
+
+/*
+ * Autosleep register values
+ */
+#define MAX7360_AUTOSLEEP_8192MS 0x01
+#define MAX7360_AUTOSLEEP_4096MS 0x02
+#define MAX7360_AUTOSLEEP_2048MS 0x03
+#define MAX7360_AUTOSLEEP_1024MS 0x04
+#define MAX7360_AUTOSLEEP_512MS 0x05
+#define MAX7360_AUTOSLEEP_256MS 0x06
+
+#define MAX7360_GPIO_CFG_RTR_EN BIT(7)
+#define MAX7360_GPIO_CFG_GPIO_EN BIT(4)
+#define MAX7360_GPIO_CFG_GPIO_RST BIT(3)
+
+#define MAX7360_ROT_DEBOUNCE GENMASK(3, 0)
+#define MAX7360_ROT_DEBOUNCE_MIN 0
+#define MAX7360_ROT_DEBOUNCE_MAX 15
+#define MAX7360_ROT_INTCNT GENMASK(6, 4)
+#define MAX7360_ROT_INTCNT_DLY BIT(7)
+
+#define MAX7360_INT_INTI 0
+#define MAX7360_INT_INTK 1
+
+#define MAX7360_INT_GPIO 0
+#define MAX7360_INT_KEYPAD 1
+#define MAX7360_INT_ROTARY 2
+
+#define MAX7360_NR_INTERNAL_IRQS 3
+
+#endif
diff --git a/include/linux/mfd/max77541.h b/include/linux/mfd/max77541.h
new file mode 100644
index 000000000000..fe5c0a3dc637
--- /dev/null
+++ b/include/linux/mfd/max77541.h
@@ -0,0 +1,91 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+
+#ifndef __MFD_MAX77541_H
+#define __MFD_MAX77541_H
+
+#include <linux/bits.h>
+#include <linux/types.h>
+
+/* REGISTERS */
+#define MAX77541_REG_INT_SRC 0x00
+#define MAX77541_REG_INT_SRC_M 0x01
+
+#define MAX77541_BIT_INT_SRC_TOPSYS BIT(0)
+#define MAX77541_BIT_INT_SRC_BUCK BIT(1)
+
+#define MAX77541_REG_TOPSYS_INT 0x02
+#define MAX77541_REG_TOPSYS_INT_M 0x03
+
+#define MAX77541_BIT_TOPSYS_INT_TJ_120C BIT(0)
+#define MAX77541_BIT_TOPSYS_INT_TJ_140C BIT(1)
+#define MAX77541_BIT_TOPSYS_INT_TSHDN BIT(2)
+#define MAX77541_BIT_TOPSYS_INT_UVLO BIT(3)
+#define MAX77541_BIT_TOPSYS_INT_ALT_SWO BIT(4)
+#define MAX77541_BIT_TOPSYS_INT_EXT_FREQ_DET BIT(5)
+
+/* REGULATORS */
+#define MAX77541_REG_BUCK_INT 0x20
+#define MAX77541_REG_BUCK_INT_M 0x21
+
+#define MAX77541_BIT_BUCK_INT_M1_POK_FLT BIT(0)
+#define MAX77541_BIT_BUCK_INT_M2_POK_FLT BIT(1)
+#define MAX77541_BIT_BUCK_INT_M1_SCFLT BIT(4)
+#define MAX77541_BIT_BUCK_INT_M2_SCFLT BIT(5)
+
+#define MAX77541_REG_EN_CTRL 0x0B
+
+#define MAX77541_BIT_M1_EN BIT(0)
+#define MAX77541_BIT_M2_EN BIT(1)
+
+#define MAX77541_REG_M1_VOUT 0x23
+#define MAX77541_REG_M2_VOUT 0x33
+
+#define MAX77541_BITS_MX_VOUT GENMASK(7, 0)
+
+#define MAX77541_REG_M1_CFG1 0x25
+#define MAX77541_REG_M2_CFG1 0x35
+
+#define MAX77541_BITS_MX_CFG1_RNG GENMASK(7, 6)
+
+/* ADC */
+#define MAX77541_REG_ADC_INT 0x70
+#define MAX77541_REG_ADC_INT_M 0x71
+
+#define MAX77541_BIT_ADC_INT_CH1_I BIT(0)
+#define MAX77541_BIT_ADC_INT_CH2_I BIT(1)
+#define MAX77541_BIT_ADC_INT_CH3_I BIT(2)
+#define MAX77541_BIT_ADC_INT_CH6_I BIT(5)
+
+#define MAX77541_REG_ADC_DATA_CH1 0x72
+#define MAX77541_REG_ADC_DATA_CH2 0x73
+#define MAX77541_REG_ADC_DATA_CH3 0x74
+#define MAX77541_REG_ADC_DATA_CH6 0x77
+
+/* INTERRUPT MASKS*/
+#define MAX77541_REG_INT_SRC_MASK 0x00
+#define MAX77541_REG_TOPSYS_INT_MASK 0x00
+#define MAX77541_REG_BUCK_INT_MASK 0x00
+
+#define MAX77541_MAX_REGULATORS 2
+
+enum max7754x_ids {
+ MAX77540 = 1,
+ MAX77541,
+};
+
+struct regmap;
+struct regmap_irq_chip_data;
+struct i2c_client;
+
+struct max77541 {
+ struct i2c_client *i2c;
+ struct regmap *regmap;
+ enum max7754x_ids id;
+
+ struct regmap_irq_chip_data *irq_data;
+ struct regmap_irq_chip_data *irq_buck;
+ struct regmap_irq_chip_data *irq_topsys;
+ struct regmap_irq_chip_data *irq_adc;
+};
+
+#endif /* __MFD_MAX77541_H */
diff --git a/include/linux/mfd/max77620.h b/include/linux/mfd/max77620.h
index ad2a9a852aea..f552ef5b1100 100644
--- a/include/linux/mfd/max77620.h
+++ b/include/linux/mfd/max77620.h
@@ -1,11 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Defining registers address and its bit definitions of MAX77620 and MAX20024
*
* Copyright (C) 2016 NVIDIA CORPORATION. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
*/
#ifndef _MFD_MAX77620_H_
@@ -136,8 +133,8 @@
#define MAX77620_FPS_PERIOD_MIN_US 40
#define MAX20024_FPS_PERIOD_MIN_US 20
-#define MAX77620_FPS_PERIOD_MAX_US 2560
-#define MAX20024_FPS_PERIOD_MAX_US 5120
+#define MAX20024_FPS_PERIOD_MAX_US 2560
+#define MAX77620_FPS_PERIOD_MAX_US 5120
#define MAX77620_REG_FPS_GPIO1 0x54
#define MAX77620_REG_FPS_GPIO2 0x55
@@ -324,6 +321,7 @@ enum max77620_fps_src {
enum max77620_chip_id {
MAX77620,
MAX20024,
+ MAX77663,
};
struct max77620_chip {
@@ -331,7 +329,6 @@ struct max77620_chip {
struct regmap *rmap;
int chip_irq;
- int irq_base;
/* chip id */
enum max77620_chip_id chip_id;
diff --git a/include/linux/mfd/max77650.h b/include/linux/mfd/max77650.h
new file mode 100644
index 000000000000..c809e211a8cd
--- /dev/null
+++ b/include/linux/mfd/max77650.h
@@ -0,0 +1,59 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2018 BayLibre SAS
+ * Author: Bartosz Golaszewski <bgolaszewski@baylibre.com>
+ *
+ * Common definitions for MAXIM 77650/77651 charger/power-supply.
+ */
+
+#ifndef MAX77650_H
+#define MAX77650_H
+
+#include <linux/bits.h>
+
+#define MAX77650_REG_INT_GLBL 0x00
+#define MAX77650_REG_INT_CHG 0x01
+#define MAX77650_REG_STAT_CHG_A 0x02
+#define MAX77650_REG_STAT_CHG_B 0x03
+#define MAX77650_REG_ERCFLAG 0x04
+#define MAX77650_REG_STAT_GLBL 0x05
+#define MAX77650_REG_INTM_GLBL 0x06
+#define MAX77650_REG_INTM_CHG 0x07
+#define MAX77650_REG_CNFG_GLBL 0x10
+#define MAX77650_REG_CID 0x11
+#define MAX77650_REG_CNFG_GPIO 0x12
+#define MAX77650_REG_CNFG_CHG_A 0x18
+#define MAX77650_REG_CNFG_CHG_B 0x19
+#define MAX77650_REG_CNFG_CHG_C 0x1a
+#define MAX77650_REG_CNFG_CHG_D 0x1b
+#define MAX77650_REG_CNFG_CHG_E 0x1c
+#define MAX77650_REG_CNFG_CHG_F 0x1d
+#define MAX77650_REG_CNFG_CHG_G 0x1e
+#define MAX77650_REG_CNFG_CHG_H 0x1f
+#define MAX77650_REG_CNFG_CHG_I 0x20
+#define MAX77650_REG_CNFG_SBB_TOP 0x28
+#define MAX77650_REG_CNFG_SBB0_A 0x29
+#define MAX77650_REG_CNFG_SBB0_B 0x2a
+#define MAX77650_REG_CNFG_SBB1_A 0x2b
+#define MAX77650_REG_CNFG_SBB1_B 0x2c
+#define MAX77650_REG_CNFG_SBB2_A 0x2d
+#define MAX77650_REG_CNFG_SBB2_B 0x2e
+#define MAX77650_REG_CNFG_LDO_A 0x38
+#define MAX77650_REG_CNFG_LDO_B 0x39
+#define MAX77650_REG_CNFG_LED0_A 0x40
+#define MAX77650_REG_CNFG_LED1_A 0x41
+#define MAX77650_REG_CNFG_LED2_A 0x42
+#define MAX77650_REG_CNFG_LED0_B 0x43
+#define MAX77650_REG_CNFG_LED1_B 0x44
+#define MAX77650_REG_CNFG_LED2_B 0x45
+#define MAX77650_REG_CNFG_LED_TOP 0x46
+
+#define MAX77650_CID_MASK GENMASK(3, 0)
+#define MAX77650_CID_BITS(_reg) (_reg & MAX77650_CID_MASK)
+
+#define MAX77650_CID_77650A 0x03
+#define MAX77650_CID_77650C 0x0a
+#define MAX77650_CID_77651A 0x06
+#define MAX77650_CID_77651B 0x08
+
+#endif /* MAX77650_H */
diff --git a/include/linux/mfd/max77686-private.h b/include/linux/mfd/max77686-private.h
index 643dae777b43..e6b8b4014dc0 100644
--- a/include/linux/mfd/max77686-private.h
+++ b/include/linux/mfd/max77686-private.h
@@ -1,22 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* max77686-private.h - Voltage regulator driver for the Maxim 77686/802
*
- * Copyright (C) 2012 Samsung Electrnoics
+ * Copyright (C) 2012 Samsung Electronics
* Chiwoong Byun <woong.byun@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#ifndef __LINUX_MFD_MAX77686_PRIV_H
@@ -146,35 +133,35 @@ enum max77686_pmic_reg {
/* Reserved: 0x7A-0x7D */
MAX77686_REG_BBAT_CHG = 0x7E,
- MAX77686_REG_32KHZ = 0x7F,
+ MAX77686_REG_32KHZ = 0x7F,
MAX77686_REG_PMIC_END = 0x80,
};
enum max77686_rtc_reg {
- MAX77686_RTC_INT = 0x00,
- MAX77686_RTC_INTM = 0x01,
+ MAX77686_RTC_INT = 0x00,
+ MAX77686_RTC_INTM = 0x01,
MAX77686_RTC_CONTROLM = 0x02,
MAX77686_RTC_CONTROL = 0x03,
MAX77686_RTC_UPDATE0 = 0x04,
/* Reserved: 0x5 */
MAX77686_WTSR_SMPL_CNTL = 0x06,
- MAX77686_RTC_SEC = 0x07,
- MAX77686_RTC_MIN = 0x08,
- MAX77686_RTC_HOUR = 0x09,
+ MAX77686_RTC_SEC = 0x07,
+ MAX77686_RTC_MIN = 0x08,
+ MAX77686_RTC_HOUR = 0x09,
MAX77686_RTC_WEEKDAY = 0x0A,
- MAX77686_RTC_MONTH = 0x0B,
- MAX77686_RTC_YEAR = 0x0C,
- MAX77686_RTC_DATE = 0x0D,
- MAX77686_ALARM1_SEC = 0x0E,
- MAX77686_ALARM1_MIN = 0x0F,
+ MAX77686_RTC_MONTH = 0x0B,
+ MAX77686_RTC_YEAR = 0x0C,
+ MAX77686_RTC_MONTHDAY = 0x0D,
+ MAX77686_ALARM1_SEC = 0x0E,
+ MAX77686_ALARM1_MIN = 0x0F,
MAX77686_ALARM1_HOUR = 0x10,
MAX77686_ALARM1_WEEKDAY = 0x11,
MAX77686_ALARM1_MONTH = 0x12,
MAX77686_ALARM1_YEAR = 0x13,
MAX77686_ALARM1_DATE = 0x14,
- MAX77686_ALARM2_SEC = 0x15,
- MAX77686_ALARM2_MIN = 0x16,
+ MAX77686_ALARM2_SEC = 0x15,
+ MAX77686_ALARM2_MIN = 0x16,
MAX77686_ALARM2_HOUR = 0x17,
MAX77686_ALARM2_WEEKDAY = 0x18,
MAX77686_ALARM2_MONTH = 0x19,
@@ -365,7 +352,7 @@ enum max77802_rtc_reg {
MAX77802_RTC_WEEKDAY = 0xCA,
MAX77802_RTC_MONTH = 0xCB,
MAX77802_RTC_YEAR = 0xCC,
- MAX77802_RTC_DATE = 0xCD,
+ MAX77802_RTC_MONTHDAY = 0xCD,
MAX77802_RTC_AE1 = 0xCE,
MAX77802_ALARM1_SEC = 0xCF,
MAX77802_ALARM1_MIN = 0xD0,
@@ -454,8 +441,4 @@ enum max77686_types {
TYPE_MAX77802,
};
-extern int max77686_irq_init(struct max77686_dev *max77686);
-extern void max77686_irq_exit(struct max77686_dev *max77686);
-extern int max77686_irq_resume(struct max77686_dev *max77686);
-
#endif /* __LINUX_MFD_MAX77686_PRIV_H */
diff --git a/include/linux/mfd/max77686.h b/include/linux/mfd/max77686.h
index d4b72d519115..7c4624acd1db 100644
--- a/include/linux/mfd/max77686.h
+++ b/include/linux/mfd/max77686.h
@@ -1,23 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* max77686.h - Driver for the Maxim 77686/802
*
- * Copyright (C) 2012 Samsung Electrnoics
+ * Copyright (C) 2012 Samsung Electronics
* Chiwoong Byun <woong.byun@samsung.com>
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
* This driver is based on max8997.h
*
* MAX77686 has PMIC, RTC devices.
diff --git a/include/linux/mfd/max77693-common.h b/include/linux/mfd/max77693-common.h
index 095b121aa725..ec2e1b2dceb8 100644
--- a/include/linux/mfd/max77693-common.h
+++ b/include/linux/mfd/max77693-common.h
@@ -1,12 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
- * Common data shared between Maxim 77693 and 77843 drivers
+ * Common data shared between Maxim 77693, 77705 and 77843 drivers
*
* Copyright (C) 2015 Samsung Electronics
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
*/
#ifndef __LINUX_MFD_MAX77693_COMMON_H
@@ -15,6 +11,7 @@
enum max77693_types {
TYPE_MAX77693_UNKNOWN,
TYPE_MAX77693,
+ TYPE_MAX77705,
TYPE_MAX77843,
TYPE_MAX77693_NUM,
@@ -36,6 +33,7 @@ struct max77693_dev {
struct regmap *regmap_muic;
struct regmap *regmap_haptic; /* Only MAX77693 */
struct regmap *regmap_chg; /* Only MAX77843 */
+ struct regmap *regmap_leds; /* Only MAX77705 */
struct regmap_irq_chip_data *irq_data_led;
struct regmap_irq_chip_data *irq_data_topsys;
diff --git a/include/linux/mfd/max77693-private.h b/include/linux/mfd/max77693-private.h
index 3c7a63b98ad6..8e7c35b5ea1c 100644
--- a/include/linux/mfd/max77693-private.h
+++ b/include/linux/mfd/max77693-private.h
@@ -1,24 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* max77693-private.h - Voltage regulator driver for the Maxim 77693
*
- * Copyright (C) 2012 Samsung Electrnoics
+ * Copyright (C) 2012 Samsung Electronics
* SangYoung Son <hello.son@samsung.com>
*
* This program is not provided / owned by Maxim Integrated Products.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#ifndef __LINUX_MFD_MAX77693_PRIV_H
@@ -144,7 +131,7 @@ enum max77693_pmic_reg {
#define FLASH_INT_FLED1_SHORT BIT(3)
#define FLASH_INT_OVER_CURRENT BIT(4)
-/* Fast charge timer in in hours */
+/* Fast charge timer in hours */
#define DEFAULT_FAST_CHARGE_TIMER 4
/* microamps */
#define DEFAULT_TOP_OFF_THRESHOLD_CURRENT 150000
@@ -230,6 +217,10 @@ enum max77693_charger_battery_state {
#define CHG_CNFG_01_CHGRSTRT_MASK (0x3 << CHG_CNFG_01_CHGRSTRT_SHIFT)
#define CHG_CNFG_01_PQEN_MAKS BIT(CHG_CNFG_01_PQEN_SHIFT)
+/* MAX77693_CHG_REG_CHG_CNFG_02 register */
+#define CHG_CNFG_02_CC_SHIFT 0
+#define CHG_CNFG_02_CC_MASK 0x3F
+
/* MAX77693_CHG_REG_CHG_CNFG_03 register */
#define CHG_CNFG_03_TOITH_SHIFT 0
#define CHG_CNFG_03_TOTIME_SHIFT 3
@@ -257,6 +248,7 @@ enum max77693_charger_battery_state {
#define CHG_CNFG_12_VCHGINREG_MASK (0x3 << CHG_CNFG_12_VCHGINREG_SHIFT)
/* MAX77693 CHG_CNFG_09 Register */
+#define CHG_CNFG_09_CHGIN_ILIM_SHIFT 0
#define CHG_CNFG_09_CHGIN_ILIM_MASK 0x7F
/* MAX77693 CHG_CTRL Register */
@@ -418,7 +410,7 @@ enum max77693_haptic_reg {
MAX77693_HAPTIC_REG_END,
};
-/* max77693-pmic LSCNFG configuraton register */
+/* max77693-pmic LSCNFG configuration register */
#define MAX77693_PMIC_LOW_SYS_MASK 0x80
#define MAX77693_PMIC_LOW_SYS_SHIFT 7
@@ -427,17 +419,6 @@ enum max77693_haptic_reg {
#define MAX77693_CONFIG2_MEN 6
#define MAX77693_CONFIG2_HTYP 5
-enum max77693_irq_source {
- LED_INT = 0,
- TOPSYS_INT,
- CHG_INT,
- MUIC_INT1,
- MUIC_INT2,
- MUIC_INT3,
-
- MAX77693_IRQ_GROUP_NR,
-};
-
#define SRC_IRQ_CHARGER BIT(0)
#define SRC_IRQ_TOP BIT(1)
#define SRC_IRQ_FLASH BIT(2)
diff --git a/include/linux/mfd/max77693.h b/include/linux/mfd/max77693.h
index d450f687301b..8e77ebeb7cf1 100644
--- a/include/linux/mfd/max77693.h
+++ b/include/linux/mfd/max77693.h
@@ -1,25 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* max77693.h - Driver for the Maxim 77693
*
- * Copyright (C) 2012 Samsung Electrnoics
+ * Copyright (C) 2012 Samsung Electronics
* SangYoung Son <hello.son@samsung.com>
*
* This program is not provided / owned by Maxim Integrated Products.
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
* This driver is based on max8997.h
*
* MAX77693 has PMIC, Charger, Flash LED, Haptic, MUIC devices.
diff --git a/include/linux/mfd/max77705-private.h b/include/linux/mfd/max77705-private.h
new file mode 100644
index 000000000000..214de7feeb8c
--- /dev/null
+++ b/include/linux/mfd/max77705-private.h
@@ -0,0 +1,195 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Maxim MAX77705 definitions.
+ *
+ * Copyright (C) 2015 Samsung Electronics, Inc.
+ * Copyright (C) 2025 Dzmitry Sankouski <dsankouski@gmail.com>
+ */
+
+#ifndef __LINUX_MFD_MAX77705_PRIV_H
+#define __LINUX_MFD_MAX77705_PRIV_H
+
+#define MAX77705_SRC_IRQ_CHG BIT(0)
+#define MAX77705_SRC_IRQ_TOP BIT(1)
+#define MAX77705_SRC_IRQ_FG BIT(2)
+#define MAX77705_SRC_IRQ_USBC BIT(3)
+#define MAX77705_SRC_IRQ_ALL (MAX77705_SRC_IRQ_CHG | MAX77705_SRC_IRQ_TOP | \
+ MAX77705_SRC_IRQ_FG | MAX77705_SRC_IRQ_USBC)
+
+/* MAX77705_PMIC_REG_PMICREV register */
+#define MAX77705_VERSION_SHIFT 3
+#define MAX77705_REVISION_MASK GENMASK(2, 0)
+#define MAX77705_VERSION_MASK GENMASK(7, MAX77705_VERSION_SHIFT)
+/* MAX77705_PMIC_REG_MAINCTRL1 register */
+#define MAX77705_MAINCTRL1_BIASEN_SHIFT 7
+#define MAX77705_MAINCTRL1_BIASEN_MASK BIT(MAX77705_MAINCTRL1_BIASEN_SHIFT)
+/* MAX77705_PMIC_REG_MCONFIG2 (haptics) register */
+#define MAX77705_CONFIG2_MEN_SHIFT 6
+#define MAX77705_CONFIG2_MODE_SHIFT 7
+#define MAX77705_CONFIG2_HTYP_SHIFT 5
+/* MAX77705_PMIC_REG_SYSTEM_INT_MASK register */
+#define MAX77705_SYSTEM_IRQ_BSTEN_INT BIT(3)
+#define MAX77705_SYSTEM_IRQ_SYSUVLO_INT BIT(4)
+#define MAX77705_SYSTEM_IRQ_SYSOVLO_INT BIT(5)
+#define MAX77705_SYSTEM_IRQ_TSHDN_INT BIT(6)
+#define MAX77705_SYSTEM_IRQ_TM_INT BIT(7)
+/* MAX77705_RGBLED_REG_LEDEN register */
+#define MAX77705_RGBLED_EN_WIDTH 2
+/* MAX77705_RGBLED_REG_LEDBLNK register */
+#define MAX77705_RGB_DELAY_100_STEP_LIM 500
+#define MAX77705_RGB_DELAY_100_STEP_COUNT 4
+#define MAX77705_RGB_DELAY_100_STEP 100
+#define MAX77705_RGB_DELAY_250_STEP_LIM 3250
+#define MAX77705_RGB_DELAY_250_STEP 250
+#define MAX77705_RGB_DELAY_500_STEP 500
+#define MAX77705_RGB_DELAY_500_STEP_COUNT 10
+#define MAX77705_RGB_DELAY_500_STEP_LIM 5000
+#define MAX77705_RGB_DELAY_1000_STEP_LIM 8000
+#define MAX77705_RGB_DELAY_1000_STEP_COUNT 13
+#define MAX77705_RGB_DELAY_1000_STEP 1000
+#define MAX77705_RGB_DELAY_2000_STEP 2000
+#define MAX77705_RGB_DELAY_2000_STEP_COUNT 13
+#define MAX77705_RGB_DELAY_2000_STEP_LIM 12000
+
+enum max77705_hw_rev {
+ MAX77705_PASS1 = 1,
+ MAX77705_PASS2,
+ MAX77705_PASS3
+};
+
+enum max77705_reg {
+ MAX77705_PMIC_REG_PMICID1 = 0x00,
+ MAX77705_PMIC_REG_PMICREV = 0x01,
+ MAX77705_PMIC_REG_MAINCTRL1 = 0x02,
+ MAX77705_PMIC_REG_BSTOUT_MASK = 0x03,
+ MAX77705_PMIC_REG_FORCE_EN_MASK = 0x08,
+ MAX77705_PMIC_REG_MCONFIG = 0x10,
+ MAX77705_PMIC_REG_MCONFIG2 = 0x11,
+ MAX77705_PMIC_REG_INTSRC = 0x22,
+ MAX77705_PMIC_REG_INTSRC_MASK = 0x23,
+ MAX77705_PMIC_REG_SYSTEM_INT = 0x24,
+ MAX77705_PMIC_REG_RESERVED_25 = 0x25,
+ MAX77705_PMIC_REG_SYSTEM_INT_MASK = 0x26,
+ MAX77705_PMIC_REG_RESERVED_27 = 0x27,
+ MAX77705_PMIC_REG_RESERVED_28 = 0x28,
+ MAX77705_PMIC_REG_RESERVED_29 = 0x29,
+ MAX77705_PMIC_REG_BOOSTCONTROL1 = 0x4C,
+ MAX77705_PMIC_REG_BOOSTCONTROL2 = 0x4F,
+ MAX77705_PMIC_REG_SW_RESET = 0x50,
+ MAX77705_PMIC_REG_USBC_RESET = 0x51,
+
+ MAX77705_PMIC_REG_END
+};
+
+enum max77705_chg_reg {
+ MAX77705_CHG_REG_BASE = 0xB0,
+ MAX77705_CHG_REG_INT = 0,
+ MAX77705_CHG_REG_INT_MASK,
+ MAX77705_CHG_REG_INT_OK,
+ MAX77705_CHG_REG_DETAILS_00,
+ MAX77705_CHG_REG_DETAILS_01,
+ MAX77705_CHG_REG_DETAILS_02,
+ MAX77705_CHG_REG_DTLS_03,
+ MAX77705_CHG_REG_CNFG_00,
+ MAX77705_CHG_REG_CNFG_01,
+ MAX77705_CHG_REG_CNFG_02,
+ MAX77705_CHG_REG_CNFG_03,
+ MAX77705_CHG_REG_CNFG_04,
+ MAX77705_CHG_REG_CNFG_05,
+ MAX77705_CHG_REG_CNFG_06,
+ MAX77705_CHG_REG_CNFG_07,
+ MAX77705_CHG_REG_CNFG_08,
+ MAX77705_CHG_REG_CNFG_09,
+ MAX77705_CHG_REG_CNFG_10,
+ MAX77705_CHG_REG_CNFG_11,
+
+ MAX77705_CHG_REG_CNFG_12,
+ MAX77705_CHG_REG_CNFG_13,
+ MAX77705_CHG_REG_CNFG_14,
+ MAX77705_CHG_REG_SAFEOUT_CTRL
+};
+
+enum max77705_fuelgauge_reg {
+ STATUS_REG = 0x00,
+ VALRT_THRESHOLD_REG = 0x01,
+ TALRT_THRESHOLD_REG = 0x02,
+ SALRT_THRESHOLD_REG = 0x03,
+ REMCAP_REP_REG = 0x05,
+ SOCREP_REG = 0x06,
+ TEMPERATURE_REG = 0x08,
+ VCELL_REG = 0x09,
+ TIME_TO_EMPTY_REG = 0x11,
+ FULLSOCTHR_REG = 0x13,
+ CURRENT_REG = 0x0A,
+ AVG_CURRENT_REG = 0x0B,
+ SOCMIX_REG = 0x0D,
+ SOCAV_REG = 0x0E,
+ REMCAP_MIX_REG = 0x0F,
+ FULLCAP_REG = 0x10,
+ RFAST_REG = 0x15,
+ AVR_TEMPERATURE_REG = 0x16,
+ CYCLES_REG = 0x17,
+ DESIGNCAP_REG = 0x18,
+ AVR_VCELL_REG = 0x19,
+ TIME_TO_FULL_REG = 0x20,
+ CONFIG_REG = 0x1D,
+ ICHGTERM_REG = 0x1E,
+ REMCAP_AV_REG = 0x1F,
+ FULLCAP_NOM_REG = 0x23,
+ LEARN_CFG_REG = 0x28,
+ FILTER_CFG_REG = 0x29,
+ MISCCFG_REG = 0x2B,
+ QRTABLE20_REG = 0x32,
+ FULLCAP_REP_REG = 0x35,
+ RCOMP_REG = 0x38,
+ VEMPTY_REG = 0x3A,
+ FSTAT_REG = 0x3D,
+ DISCHARGE_THRESHOLD_REG = 0x40,
+ QRTABLE30_REG = 0x42,
+ ISYS_REG = 0x43,
+ DQACC_REG = 0x45,
+ DPACC_REG = 0x46,
+ AVGISYS_REG = 0x4B,
+ QH_REG = 0x4D,
+ VSYS_REG = 0xB1,
+ TALRTTH2_REG = 0xB2,
+ VBYP_REG = 0xB3,
+ CONFIG2_REG = 0xBB,
+ IIN_REG = 0xD0,
+ OCV_REG = 0xEE,
+ VFOCV_REG = 0xFB,
+ VFSOC_REG = 0xFF,
+
+ MAX77705_FG_END
+};
+
+enum max77705_led_reg {
+ MAX77705_RGBLED_REG_BASE = 0x30,
+ MAX77705_RGBLED_REG_LEDEN = 0,
+ MAX77705_RGBLED_REG_LED0BRT,
+ MAX77705_RGBLED_REG_LED1BRT,
+ MAX77705_RGBLED_REG_LED2BRT,
+ MAX77705_RGBLED_REG_LED3BRT,
+ MAX77705_RGBLED_REG_LEDRMP,
+ MAX77705_RGBLED_REG_LEDBLNK,
+ MAX77705_LED_REG_END
+};
+
+enum max77705_charger_battery_state {
+ MAX77705_BATTERY_NOBAT,
+ MAX77705_BATTERY_PREQUALIFICATION,
+ MAX77705_BATTERY_DEAD,
+ MAX77705_BATTERY_GOOD,
+ MAX77705_BATTERY_LOWVOLTAGE,
+ MAX77705_BATTERY_OVERVOLTAGE,
+ MAX77705_BATTERY_RESERVED
+};
+
+enum max77705_charger_charge_type {
+ MAX77705_CHARGER_CONSTANT_CURRENT = 1,
+ MAX77705_CHARGER_CONSTANT_VOLTAGE,
+ MAX77705_CHARGER_END_OF_CHARGE,
+ MAX77705_CHARGER_DONE
+};
+
+#endif /* __LINUX_MFD_MAX77705_PRIV_H */
diff --git a/include/linux/mfd/max77714.h b/include/linux/mfd/max77714.h
new file mode 100644
index 000000000000..7947e0d697a5
--- /dev/null
+++ b/include/linux/mfd/max77714.h
@@ -0,0 +1,60 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Maxim MAX77714 Register and data structures definition.
+ *
+ * Copyright (C) 2022 Luca Ceresoli
+ * Author: Luca Ceresoli <luca.ceresoli@bootlin.com>
+ */
+
+#ifndef __LINUX_MFD_MAX77714_H_
+#define __LINUX_MFD_MAX77714_H_
+
+#include <linux/bits.h>
+
+#define MAX77714_INT_TOP 0x00
+#define MAX77714_INT_TOPM 0x07 /* Datasheet says "read only", but it is RW */
+
+#define MAX77714_INT_TOP_ONOFF BIT(1)
+#define MAX77714_INT_TOP_RTC BIT(3)
+#define MAX77714_INT_TOP_GPIO BIT(4)
+#define MAX77714_INT_TOP_LDO BIT(5)
+#define MAX77714_INT_TOP_SD BIT(6)
+#define MAX77714_INT_TOP_GLBL BIT(7)
+
+#define MAX77714_32K_STATUS 0x30
+#define MAX77714_32K_STATUS_SIOSCOK BIT(5)
+#define MAX77714_32K_STATUS_XOSCOK BIT(4)
+#define MAX77714_32K_STATUS_32KSOURCE BIT(3)
+#define MAX77714_32K_STATUS_32KLOAD_MSK 0x3
+#define MAX77714_32K_STATUS_32KLOAD_SHF 1
+#define MAX77714_32K_STATUS_CRYSTAL_CFG BIT(0)
+
+#define MAX77714_32K_CONFIG 0x31
+#define MAX77714_32K_CONFIG_XOSC_RETRY BIT(4)
+
+#define MAX77714_CNFG_GLBL2 0x91
+#define MAX77714_WDTEN BIT(2)
+#define MAX77714_WDTSLPC BIT(3)
+#define MAX77714_TWD_MASK 0x3
+#define MAX77714_TWD_2s 0x0
+#define MAX77714_TWD_16s 0x1
+#define MAX77714_TWD_64s 0x2
+#define MAX77714_TWD_128s 0x3
+
+#define MAX77714_CNFG_GLBL3 0x92
+#define MAX77714_WDTC BIT(0)
+
+#define MAX77714_CNFG2_ONOFF 0x94
+#define MAX77714_WD_RST_WK BIT(5)
+
+/* Interrupts */
+enum {
+ MAX77714_IRQ_TOP_ONOFF,
+ MAX77714_IRQ_TOP_RTC, /* Real-time clock */
+ MAX77714_IRQ_TOP_GPIO, /* GPIOs */
+ MAX77714_IRQ_TOP_LDO, /* Low-dropout regulators */
+ MAX77714_IRQ_TOP_SD, /* Step-down regulators */
+ MAX77714_IRQ_TOP_GLBL, /* "Global resources": Low-Battery, overtemp... */
+};
+
+#endif /* __LINUX_MFD_MAX77714_H_ */
diff --git a/include/linux/mfd/max77759.h b/include/linux/mfd/max77759.h
new file mode 100644
index 000000000000..c6face34e385
--- /dev/null
+++ b/include/linux/mfd/max77759.h
@@ -0,0 +1,165 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright 2020 Google Inc.
+ * Copyright 2025 Linaro Ltd.
+ *
+ * Maxim MAX77759 core driver
+ */
+
+#ifndef __LINUX_MFD_MAX77759_H
+#define __LINUX_MFD_MAX77759_H
+
+#include <linux/completion.h>
+#include <linux/mutex.h>
+#include <linux/regmap.h>
+
+#define MAX77759_PMIC_REG_PMIC_ID 0x00
+#define MAX77759_PMIC_REG_PMIC_REVISION 0x01
+#define MAX77759_PMIC_REG_OTP_REVISION 0x02
+#define MAX77759_PMIC_REG_INTSRC 0x22
+#define MAX77759_PMIC_REG_INTSRCMASK 0x23
+#define MAX77759_PMIC_REG_INTSRC_MAXQ BIT(3)
+#define MAX77759_PMIC_REG_INTSRC_TOPSYS BIT(1)
+#define MAX77759_PMIC_REG_INTSRC_CHGR BIT(0)
+#define MAX77759_PMIC_REG_TOPSYS_INT 0x24
+#define MAX77759_PMIC_REG_TOPSYS_INT_MASK 0x26
+#define MAX77759_PMIC_REG_TOPSYS_INT_TSHDN BIT(6)
+#define MAX77759_PMIC_REG_TOPSYS_INT_SYSOVLO BIT(5)
+#define MAX77759_PMIC_REG_TOPSYS_INT_SYSUVLO BIT(4)
+#define MAX77759_PMIC_REG_TOPSYS_INT_FSHIP BIT(0)
+#define MAX77759_PMIC_REG_I2C_CNFG 0x40
+#define MAX77759_PMIC_REG_SWRESET 0x50
+#define MAX77759_PMIC_REG_CONTROL_FG 0x51
+
+#define MAX77759_MAXQ_REG_UIC_INT1 0x64
+#define MAX77759_MAXQ_REG_UIC_INT1_APCMDRESI BIT(7)
+#define MAX77759_MAXQ_REG_UIC_INT1_SYSMSGI BIT(6)
+#define MAX77759_MAXQ_REG_UIC_INT1_GPIO6I BIT(1)
+#define MAX77759_MAXQ_REG_UIC_INT1_GPIO5I BIT(0)
+#define MAX77759_MAXQ_REG_UIC_INT1_GPIOxI(offs, en) (((en) & 1) << (offs))
+#define MAX77759_MAXQ_REG_UIC_INT1_GPIOxI_MASK(offs) \
+ MAX77759_MAXQ_REG_UIC_INT1_GPIOxI(offs, ~0)
+#define MAX77759_MAXQ_REG_UIC_INT2 0x65
+#define MAX77759_MAXQ_REG_UIC_INT3 0x66
+#define MAX77759_MAXQ_REG_UIC_INT4 0x67
+#define MAX77759_MAXQ_REG_UIC_UIC_STATUS1 0x68
+#define MAX77759_MAXQ_REG_UIC_UIC_STATUS2 0x69
+#define MAX77759_MAXQ_REG_UIC_UIC_STATUS3 0x6a
+#define MAX77759_MAXQ_REG_UIC_UIC_STATUS4 0x6b
+#define MAX77759_MAXQ_REG_UIC_UIC_STATUS5 0x6c
+#define MAX77759_MAXQ_REG_UIC_UIC_STATUS6 0x6d
+#define MAX77759_MAXQ_REG_UIC_UIC_STATUS7 0x6f
+#define MAX77759_MAXQ_REG_UIC_UIC_STATUS8 0x6f
+#define MAX77759_MAXQ_REG_UIC_INT1_M 0x70
+#define MAX77759_MAXQ_REG_UIC_INT2_M 0x71
+#define MAX77759_MAXQ_REG_UIC_INT3_M 0x72
+#define MAX77759_MAXQ_REG_UIC_INT4_M 0x73
+#define MAX77759_MAXQ_REG_AP_DATAOUT0 0x81
+#define MAX77759_MAXQ_REG_AP_DATAOUT32 0xa1
+#define MAX77759_MAXQ_REG_AP_DATAIN0 0xb1
+#define MAX77759_MAXQ_REG_UIC_SWRST 0xe0
+
+#define MAX77759_CHGR_REG_CHG_INT 0xb0
+#define MAX77759_CHGR_REG_CHG_INT2 0xb1
+#define MAX77759_CHGR_REG_CHG_INT_MASK 0xb2
+#define MAX77759_CHGR_REG_CHG_INT2_MASK 0xb3
+#define MAX77759_CHGR_REG_CHG_INT_OK 0xb4
+#define MAX77759_CHGR_REG_CHG_DETAILS_00 0xb5
+#define MAX77759_CHGR_REG_CHG_DETAILS_01 0xb6
+#define MAX77759_CHGR_REG_CHG_DETAILS_02 0xb7
+#define MAX77759_CHGR_REG_CHG_DETAILS_03 0xb8
+#define MAX77759_CHGR_REG_CHG_CNFG_00 0xb9
+#define MAX77759_CHGR_REG_CHG_CNFG_01 0xba
+#define MAX77759_CHGR_REG_CHG_CNFG_02 0xbb
+#define MAX77759_CHGR_REG_CHG_CNFG_03 0xbc
+#define MAX77759_CHGR_REG_CHG_CNFG_04 0xbd
+#define MAX77759_CHGR_REG_CHG_CNFG_05 0xbe
+#define MAX77759_CHGR_REG_CHG_CNFG_06 0xbf
+#define MAX77759_CHGR_REG_CHG_CNFG_07 0xc0
+#define MAX77759_CHGR_REG_CHG_CNFG_08 0xc1
+#define MAX77759_CHGR_REG_CHG_CNFG_09 0xc2
+#define MAX77759_CHGR_REG_CHG_CNFG_10 0xc3
+#define MAX77759_CHGR_REG_CHG_CNFG_11 0xc4
+#define MAX77759_CHGR_REG_CHG_CNFG_12 0xc5
+#define MAX77759_CHGR_REG_CHG_CNFG_13 0xc6
+#define MAX77759_CHGR_REG_CHG_CNFG_14 0xc7
+#define MAX77759_CHGR_REG_CHG_CNFG_15 0xc8
+#define MAX77759_CHGR_REG_CHG_CNFG_16 0xc9
+#define MAX77759_CHGR_REG_CHG_CNFG_17 0xca
+#define MAX77759_CHGR_REG_CHG_CNFG_18 0xcb
+#define MAX77759_CHGR_REG_CHG_CNFG_19 0xcc
+
+/* MaxQ opcodes for max77759_maxq_command() */
+#define MAX77759_MAXQ_OPCODE_MAXLENGTH (MAX77759_MAXQ_REG_AP_DATAOUT32 - \
+ MAX77759_MAXQ_REG_AP_DATAOUT0 + \
+ 1)
+
+#define MAX77759_MAXQ_OPCODE_GPIO_TRIGGER_READ 0x21
+#define MAX77759_MAXQ_OPCODE_GPIO_TRIGGER_WRITE 0x22
+#define MAX77759_MAXQ_OPCODE_GPIO_CONTROL_READ 0x23
+#define MAX77759_MAXQ_OPCODE_GPIO_CONTROL_WRITE 0x24
+#define MAX77759_MAXQ_OPCODE_USER_SPACE_READ 0x81
+#define MAX77759_MAXQ_OPCODE_USER_SPACE_WRITE 0x82
+
+/**
+ * struct max77759 - core max77759 internal data structure
+ *
+ * @regmap_top: Regmap for accessing TOP registers
+ * @maxq_lock: Lock for serializing access to MaxQ
+ * @regmap_maxq: Regmap for accessing MaxQ registers
+ * @cmd_done: Used to signal completion of a MaxQ command
+ * @regmap_charger: Regmap for accessing charger registers
+ *
+ * The MAX77759 comprises several sub-blocks, namely TOP, MaxQ, Charger,
+ * Fuel Gauge, and TCPCI.
+ */
+struct max77759 {
+ struct regmap *regmap_top;
+
+ /* This protects MaxQ commands - only one can be active */
+ struct mutex maxq_lock;
+ struct regmap *regmap_maxq;
+ struct completion cmd_done;
+
+ struct regmap *regmap_charger;
+};
+
+/**
+ * struct max77759_maxq_command - structure containing the MaxQ command to
+ * send
+ *
+ * @length: The number of bytes to send.
+ * @cmd: The data to send.
+ */
+struct max77759_maxq_command {
+ u8 length;
+ u8 cmd[] __counted_by(length);
+};
+
+/**
+ * struct max77759_maxq_response - structure containing the MaxQ response
+ *
+ * @length: The number of bytes to receive.
+ * @rsp: The data received. Must have at least @length bytes space.
+ */
+struct max77759_maxq_response {
+ u8 length;
+ u8 rsp[] __counted_by(length);
+};
+
+/**
+ * max77759_maxq_command() - issue a MaxQ command and wait for the response
+ * and associated data
+ *
+ * @max77759: The core max77759 device handle.
+ * @cmd: The command to be sent.
+ * @rsp: Any response data associated with the command will be copied here;
+ * can be %NULL if the command has no response (other than ACK).
+ *
+ * Return: 0 on success, a negative error number otherwise.
+ */
+int max77759_maxq_command(struct max77759 *max77759,
+ const struct max77759_maxq_command *cmd,
+ struct max77759_maxq_response *rsp);
+
+#endif /* __LINUX_MFD_MAX77759_H */
diff --git a/include/linux/mfd/max77843-private.h b/include/linux/mfd/max77843-private.h
index c19303b0ccfd..2fb4db67f110 100644
--- a/include/linux/mfd/max77843-private.h
+++ b/include/linux/mfd/max77843-private.h
@@ -1,14 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* Common variables for the Maxim MAX77843 driver
*
* Copyright (C) 2015 Samsung Electronics
* Author: Jaewon Kim <jaewon02.kim@samsung.com>
* Author: Beomho Seo <beomho.seo@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
*/
#ifndef __MAX77843_PRIVATE_H_
@@ -202,7 +198,7 @@ enum max77843_irq_muic {
#define MAX77843_MCONFIG_MEN_MASK BIT(MCONFIG_MEN_SHIFT)
#define MAX77843_MCONFIG_PDIV_MASK (0x3 << MCONFIG_PDIV_SHIFT)
-/* Max77843 charger insterrupts */
+/* Max77843 charger interrupts */
#define MAX77843_CHG_BYP_I BIT(0)
#define MAX77843_CHG_BATP_I BIT(2)
#define MAX77843_CHG_BAT_I BIT(3)
@@ -245,10 +241,13 @@ enum max77843_irq_muic {
#define MAX77843_CHG_OVER_CURRENT_BAT (0x06 << 4)
/* MAX77843 CHG_CNFG_00 register */
+#define MAX77843_CHG_MODE_MASK 0x0f
#define MAX77843_CHG_DISABLE 0x00
#define MAX77843_CHG_ENABLE 0x05
#define MAX77843_CHG_MASK 0x01
+#define MAX77843_CHG_OTG_MASK 0x02
#define MAX77843_CHG_BUCK_MASK 0x04
+#define MAX77843_CHG_BOOST_MASK 0x08
/* MAX77843 CHG_CNFG_01 register */
#define MAX77843_CHG_RESTART_THRESHOLD_100 0x00
@@ -347,6 +346,7 @@ enum max77843_irq_muic {
/* MAX77843 CONTROL register */
#define MAX77843_MUIC_CONTROL1_COMP1SW_SHIFT 0
#define MAX77843_MUIC_CONTROL1_COMP2SW_SHIFT 3
+#define MAX77843_MUIC_CONTROL1_NOBCCOMP_SHIFT 6
#define MAX77843_MUIC_CONTROL1_IDBEN_SHIFT 7
#define MAX77843_MUIC_CONTROL2_LOWPWR_SHIFT 0
#define MAX77843_MUIC_CONTROL2_ADCEN_SHIFT 1
@@ -363,6 +363,7 @@ enum max77843_irq_muic {
#define MAX77843_MUIC_CONTROL1_COMP1SW_MASK (0x7 << MAX77843_MUIC_CONTROL1_COMP1SW_SHIFT)
#define MAX77843_MUIC_CONTROL1_COMP2SW_MASK (0x7 << MAX77843_MUIC_CONTROL1_COMP2SW_SHIFT)
#define MAX77843_MUIC_CONTROL1_IDBEN_MASK BIT(MAX77843_MUIC_CONTROL1_IDBEN_SHIFT)
+#define MAX77843_MUIC_CONTROL1_NOBCCOMP_MASK BIT(MAX77843_MUIC_CONTROL1_NOBCCOMP_SHIFT)
#define MAX77843_MUIC_CONTROL2_LOWPWR_MASK BIT(MAX77843_MUIC_CONTROL2_LOWPWR_SHIFT)
#define MAX77843_MUIC_CONTROL2_ADCEN_MASK BIT(MAX77843_MUIC_CONTROL2_ADCEN_SHIFT)
#define MAX77843_MUIC_CONTROL2_CPEN_MASK BIT(MAX77843_MUIC_CONTROL2_CPEN_SHIFT)
diff --git a/include/linux/mfd/max8907.h b/include/linux/mfd/max8907.h
index b06f7a6a1e80..4be3c2370e2a 100644
--- a/include/linux/mfd/max8907.h
+++ b/include/linux/mfd/max8907.h
@@ -1,12 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Functions to access MAX8907 power management chip.
*
* Copyright (C) 2010 Gyungoh Yoo <jack.yoo@maxim-ic.com>
* Copyright (C) 2012, NVIDIA CORPORATION. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef __LINUX_MFD_MAX8907_H
diff --git a/include/linux/mfd/max8925.h b/include/linux/mfd/max8925.h
index ce8502e9e7dc..07f9af579fb9 100644
--- a/include/linux/mfd/max8925.h
+++ b/include/linux/mfd/max8925.h
@@ -1,12 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Maxim8925 Interface
*
* Copyright (C) 2009 Marvell International Ltd.
* Haojian Zhuang <haojian.zhuang@marvell.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef __LINUX_MFD_MAX8925_H
diff --git a/include/linux/mfd/max8997-private.h b/include/linux/mfd/max8997-private.h
index 78c76cd4d37b..261c0aae7d00 100644
--- a/include/linux/mfd/max8997-private.h
+++ b/include/linux/mfd/max8997-private.h
@@ -1,22 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* max8997-private.h - Voltage regulator driver for the Maxim 8997
*
- * Copyright (C) 2010 Samsung Electrnoics
+ * Copyright (C) 2010 Samsung Electronics
* MyungJoo Ham <myungjoo.ham@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#ifndef __LINUX_MFD_MAX8997_PRIV_H
@@ -410,7 +397,6 @@ enum max8997_types {
};
extern int max8997_irq_init(struct max8997_dev *max8997);
-extern void max8997_irq_exit(struct max8997_dev *max8997);
extern int max8997_irq_resume(struct max8997_dev *max8997);
extern int max8997_read_reg(struct i2c_client *i2c, u8 reg, u8 *dest);
diff --git a/include/linux/mfd/max8997.h b/include/linux/mfd/max8997.h
index cf815577bd68..fb36e1386069 100644
--- a/include/linux/mfd/max8997.h
+++ b/include/linux/mfd/max8997.h
@@ -1,23 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* max8997.h - Driver for the Maxim 8997/8966
*
- * Copyright (C) 2009-2010 Samsung Electrnoics
+ * Copyright (C) 2009-2010 Samsung Electronics
* MyungJoo Ham <myungjoo.ham@samsung.com>
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
* This driver is based on max8998.h
*
* MAX8997 has PMIC, MUIC, HAPTIC, RTC, FLASH, and Fuel Gauge devices.
@@ -27,13 +14,13 @@
* others and b) it can be enabled simply by using MAX17042 driver.
*/
-#ifndef __LINUX_MFD_MAX8998_H
-#define __LINUX_MFD_MAX8998_H
+#ifndef __LINUX_MFD_MAX8997_H
+#define __LINUX_MFD_MAX8997_H
#include <linux/regulator/consumer.h>
/* MAX8997/8966 regulator IDs */
-enum max8998_regulators {
+enum max8997_regulators {
MAX8997_LDO1 = 0,
MAX8997_LDO2,
MAX8997_LDO3,
@@ -123,8 +110,6 @@ enum max8997_haptic_pwm_divisor {
/**
* max8997_haptic_platform_data
- * @pwm_channel_id: channel number of PWM device
- * valid for MAX8997_EXTERNAL_MODE
* @pwm_period: period in nano second for PWM device
* valid for MAX8997_EXTERNAL_MODE
* @type: motor type
@@ -141,7 +126,6 @@ enum max8997_haptic_pwm_divisor {
* [0 - 255]: available period
*/
struct max8997_haptic_platform_data {
- unsigned int pwm_channel_id;
unsigned int pwm_period;
enum max8997_haptic_motor_type type;
@@ -178,7 +162,6 @@ struct max8997_led_platform_data {
struct max8997_platform_data {
/* IRQ */
int ono;
- int wakeup;
/* ---- PMIC ---- */
struct max8997_regulator_data *regulators;
@@ -195,7 +178,6 @@ struct max8997_platform_data {
*
*/
bool ignore_gpiodvs_side_effect;
- int buck125_gpios[3]; /* GPIO of [0]SET1, [1]SET2, [2]SET3 */
int buck125_default_idx; /* Default value of SET1, 2, 3 */
unsigned int buck1_voltage[8]; /* buckx_voltage in uV */
bool buck1_gpiodvs;
@@ -221,4 +203,4 @@ struct max8997_platform_data {
struct max8997_led_platform_data *led_pdata;
};
-#endif /* __LINUX_MFD_MAX8998_H */
+#endif /* __LINUX_MFD_MAX8997_H */
diff --git a/include/linux/mfd/max8998-private.h b/include/linux/mfd/max8998-private.h
index d68ada502ff3..d77dc18db6eb 100644
--- a/include/linux/mfd/max8998-private.h
+++ b/include/linux/mfd/max8998-private.h
@@ -1,23 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* max8998-private.h - Voltage regulator driver for the Maxim 8998
*
- * Copyright (C) 2009-2010 Samsung Electrnoics
+ * Copyright (C) 2009-2010 Samsung Electronics
* Kyungmin Park <kyungmin.park@samsung.com>
* Marek Szyprowski <m.szyprowski@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#ifndef __LINUX_MFD_MAX8998_PRIV_H
diff --git a/include/linux/mfd/max8998.h b/include/linux/mfd/max8998.h
index e3956a654cbc..5473f1983e31 100644
--- a/include/linux/mfd/max8998.h
+++ b/include/linux/mfd/max8998.h
@@ -1,23 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* max8998.h - Voltage regulator driver for the Maxim 8998
*
- * Copyright (C) 2009-2010 Samsung Electrnoics
+ * Copyright (C) 2009-2010 Samsung Electronics
* Kyungmin Park <kyungmin.park@samsung.com>
* Marek Szyprowski <m.szyprowski@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#ifndef __LINUX_MFD_MAX8998_H
@@ -52,6 +39,7 @@ enum {
MAX8998_ENVICHG,
MAX8998_ESAFEOUT1,
MAX8998_ESAFEOUT2,
+ MAX8998_CHARGER,
};
/**
@@ -77,10 +65,7 @@ struct max8998_regulator_data {
* be other than the preset values.
* @buck1_voltage: BUCK1 DVS mode 1 voltage registers
* @buck2_voltage: BUCK2 DVS mode 2 voltage registers
- * @buck1_set1: BUCK1 gpio pin 1 to set output voltage
- * @buck1_set2: BUCK1 gpio pin 2 to set output voltage
* @buck1_default_idx: Default for BUCK1 gpio pin 1, 2
- * @buck2_set3: BUCK2 gpio pin to set output voltage
* @buck2_default_idx: Default for BUCK2 gpio pin.
* @wakeup: Allow to wake up from suspend
* @rtc_delay: LP3974 RTC chip bug that requires delay after a register
@@ -103,10 +88,7 @@ struct max8998_platform_data {
bool buck_voltage_lock;
int buck1_voltage[4];
int buck2_voltage[2];
- int buck1_set1;
- int buck1_set2;
int buck1_default_idx;
- int buck2_set3;
int buck2_default_idx;
bool wakeup;
bool rtc_delay;
diff --git a/include/linux/mfd/mc13783.h b/include/linux/mfd/mc13783.h
index 4ff6137d8d67..c25b1676741b 100644
--- a/include/linux/mfd/mc13783.h
+++ b/include/linux/mfd/mc13783.h
@@ -1,11 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright 2010 Yong Shen <yong.shen@linaro.org>
* Copyright 2009-2010 Pengutronix
* Uwe Kleine-Koenig <u.kleine-koenig@pengutronix.de>
- *
- * This program is free software; you can redistribute it and/or modify it under
- * the terms of the GNU General Public License version 2 as published by the
- * Free Software Foundation.
*/
#ifndef __LINUX_MFD_MC13783_H
#define __LINUX_MFD_MC13783_H
diff --git a/include/linux/mfd/mc13892.h b/include/linux/mfd/mc13892.h
index a00f2bec178c..880cd949d12a 100644
--- a/include/linux/mfd/mc13892.h
+++ b/include/linux/mfd/mc13892.h
@@ -1,9 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright 2010 Yong Shen <yong.shen@linaro.org>
- *
- * This program is free software; you can redistribute it and/or modify it under
- * the terms of the GNU General Public License version 2 as published by the
- * Free Software Foundation.
*/
#ifndef __LINUX_MFD_MC13892_H
diff --git a/include/linux/mfd/mc13xxx.h b/include/linux/mfd/mc13xxx.h
index 638222e43e48..dd46fe424a80 100644
--- a/include/linux/mfd/mc13xxx.h
+++ b/include/linux/mfd/mc13xxx.h
@@ -1,10 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright 2009-2010 Pengutronix
* Uwe Kleine-Koenig <u.kleine-koenig@pengutronix.de>
- *
- * This program is free software; you can redistribute it and/or modify it under
- * the terms of the GNU General Public License version 2 as published by the
- * Free Software Foundation.
*/
#ifndef __LINUX_MFD_MC13XXX_H
#define __LINUX_MFD_MC13XXX_H
@@ -34,12 +31,6 @@ int mc13xxx_adc_do_conversion(struct mc13xxx *mc13xxx,
unsigned int mode, unsigned int channel,
u8 ato, bool atox, unsigned int *sample);
-/* Deprecated calls */
-static inline int mc13xxx_irq_ack(struct mc13xxx *mc13xxx, int irq)
-{
- return 0;
-}
-
static inline int mc13xxx_irq_request_nounmask(struct mc13xxx *mc13xxx, int irq,
irq_handler_t handler,
const char *name, void *dev)
@@ -243,10 +234,13 @@ struct mc13xxx_platform_data {
#define MC13XXX_ADC0_LICELLCON (1 << 0)
#define MC13XXX_ADC0_CHRGICON (1 << 1)
#define MC13XXX_ADC0_BATICON (1 << 2)
+#define MC13XXX_ADC0_ADIN7SEL_DIE (1 << 4)
+#define MC13XXX_ADC0_ADIN7SEL_UID (2 << 4)
#define MC13XXX_ADC0_ADREFEN (1 << 10)
#define MC13XXX_ADC0_TSMOD0 (1 << 12)
#define MC13XXX_ADC0_TSMOD1 (1 << 13)
#define MC13XXX_ADC0_TSMOD2 (1 << 14)
+#define MC13XXX_ADC0_CHRGRAWDIV (1 << 15)
#define MC13XXX_ADC0_ADINC1 (1 << 16)
#define MC13XXX_ADC0_ADINC2 (1 << 17)
diff --git a/include/linux/mfd/mcp.h b/include/linux/mfd/mcp.h
index f682953043ba..fd5cafc77e8a 100644
--- a/include/linux/mfd/mcp.h
+++ b/include/linux/mfd/mcp.h
@@ -1,11 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* linux/drivers/mfd/mcp.h
*
* Copyright (C) 2001 Russell King, All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License.
*/
#ifndef MCP_H
#define MCP_H
diff --git a/include/linux/mfd/menelaus.h b/include/linux/mfd/menelaus.h
index 9e85ac06da89..ce489aba88ec 100644
--- a/include/linux/mfd/menelaus.h
+++ b/include/linux/mfd/menelaus.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Functions to access Menelaus power management chip
*/
diff --git a/include/linux/mfd/motorola-cpcap.h b/include/linux/mfd/motorola-cpcap.h
index aefc49cb7ba9..981e5777deb7 100644
--- a/include/linux/mfd/motorola-cpcap.h
+++ b/include/linux/mfd/motorola-cpcap.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* The register defines are based on earlier cpcap.h in Motorola Linux kernel
* tree.
@@ -8,10 +9,6 @@
* to make the defines usable with Linux kernel regmap support
*
* Copyright (C) 2016 Tony Lindgren <tony@atomide.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#include <linux/device.h>
diff --git a/include/linux/mfd/mp2629.h b/include/linux/mfd/mp2629.h
new file mode 100644
index 000000000000..89b706900b57
--- /dev/null
+++ b/include/linux/mfd/mp2629.h
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright 2020 Monolithic Power Systems, Inc
+ */
+
+#ifndef __MP2629_H__
+#define __MP2629_H__
+
+#include <linux/device.h>
+#include <linux/regmap.h>
+
+struct mp2629_data {
+ struct device *dev;
+ struct regmap *regmap;
+};
+
+enum mp2629_adc_chan {
+ MP2629_BATT_VOLT,
+ MP2629_SYSTEM_VOLT,
+ MP2629_INPUT_VOLT,
+ MP2629_BATT_CURRENT,
+ MP2629_INPUT_CURRENT,
+ MP2629_ADC_CHAN_END
+};
+
+#endif
diff --git a/include/linux/mfd/mt6323/core.h b/include/linux/mfd/mt6323/core.h
index 06d0ec3b1f8f..2becc3443179 100644
--- a/include/linux/mfd/mt6323/core.h
+++ b/include/linux/mfd/mt6323/core.h
@@ -1,9 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2016 Chen Zhong <chen.zhong@mediatek.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef __MFD_MT6323_CORE_H__
diff --git a/include/linux/mfd/mt6323/registers.h b/include/linux/mfd/mt6323/registers.h
index 160f3c0e2589..4455e57544eb 100644
--- a/include/linux/mfd/mt6323/registers.h
+++ b/include/linux/mfd/mt6323/registers.h
@@ -1,9 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2016 Chen Zhong <chen.zhong@mediatek.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef __MFD_MT6323_REGISTERS_H__
diff --git a/include/linux/mfd/mt6328/core.h b/include/linux/mfd/mt6328/core.h
new file mode 100644
index 000000000000..9a08aed72b9f
--- /dev/null
+++ b/include/linux/mfd/mt6328/core.h
@@ -0,0 +1,53 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2015 MediaTek Inc.
+ * Copyright (c) 2022 Yassine Oudjana <y.oudjana@protonmail.com>
+ */
+
+#ifndef __MFD_MT6328_CORE_H__
+#define __MFD_MT6328_CORE_H__
+
+enum mt6328_irq_status_numbers {
+ MT6328_IRQ_STATUS_PWRKEY = 0,
+ MT6328_IRQ_STATUS_HOMEKEY,
+ MT6328_IRQ_STATUS_PWRKEY_R,
+ MT6328_IRQ_STATUS_HOMEKEY_R,
+ MT6328_IRQ_STATUS_THR_H,
+ MT6328_IRQ_STATUS_THR_L,
+ MT6328_IRQ_STATUS_BAT_H,
+ MT6328_IRQ_STATUS_BAT_L,
+ MT6328_IRQ_STATUS_RTC,
+ MT6328_IRQ_STATUS_AUDIO,
+ MT6328_IRQ_STATUS_ACCDET,
+ MT6328_IRQ_STATUS_ACCDET_EINT,
+ MT6328_IRQ_STATUS_ACCDET_NEGV,
+ MT6328_IRQ_STATUS_NI_LBAT_INT,
+ MT6328_IRQ_STATUS_VPROC_OC = 16,
+ MT6328_IRQ_STATUS_VSYS_OC,
+ MT6328_IRQ_STATUS_VLTE_OC,
+ MT6328_IRQ_STATUS_VCORE_OC,
+ MT6328_IRQ_STATUS_VPA_OC,
+ MT6328_IRQ_STATUS_LDO_OC,
+ MT6328_IRQ_STATUS_BAT2_H,
+ MT6328_IRQ_STATUS_BAT2_L,
+ MT6328_IRQ_STATUS_VISMPS0_H,
+ MT6328_IRQ_STATUS_VISMPS0_L,
+ MT6328_IRQ_STATUS_AUXADC_IMP,
+ MT6328_IRQ_STATUS_OV = 32,
+ MT6328_IRQ_STATUS_BVALID_DET,
+ MT6328_IRQ_STATUS_VBATON_HV,
+ MT6328_IRQ_STATUS_VBATON_UNDET,
+ MT6328_IRQ_STATUS_WATCHDOG,
+ MT6328_IRQ_STATUS_PCHR_CM_VDEC,
+ MT6328_IRQ_STATUS_CHRDET,
+ MT6328_IRQ_STATUS_PCHR_CM_VINC,
+ MT6328_IRQ_STATUS_FG_BAT_H,
+ MT6328_IRQ_STATUS_FG_BAT_L,
+ MT6328_IRQ_STATUS_FG_CUR_H,
+ MT6328_IRQ_STATUS_FG_CUR_L,
+ MT6328_IRQ_STATUS_FG_ZCV,
+ MT6328_IRQ_STATUS_SPKL_D,
+ MT6328_IRQ_STATUS_SPKL_AB,
+};
+
+#endif /* __MFD_MT6323_CORE_H__ */
diff --git a/include/linux/mfd/mt6328/registers.h b/include/linux/mfd/mt6328/registers.h
new file mode 100644
index 000000000000..8199aaea27b9
--- /dev/null
+++ b/include/linux/mfd/mt6328/registers.h
@@ -0,0 +1,822 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2022 Yassine Oudjana <y.oudjana@protonmail.com>
+ */
+
+#ifndef __MFD_MT6328_REGISTERS_H__
+#define __MFD_MT6328_REGISTERS_H__
+
+/* PMIC Registers */
+#define MT6328_STRUP_CON0 0x0000
+#define MT6328_STRUP_CON2 0x0002
+#define MT6328_STRUP_CON3 0x0004
+#define MT6328_STRUP_CON4 0x0006
+#define MT6328_STRUP_CON5 0x0008
+#define MT6328_STRUP_CON6 0x000a
+#define MT6328_STRUP_CON7 0x000c
+#define MT6328_STRUP_CON8 0x000e
+#define MT6328_STRUP_CON9 0x0010
+#define MT6328_STRUP_CON10 0x0012
+#define MT6328_STRUP_CON11 0x0014
+#define MT6328_STRUP_CON12 0x0016
+#define MT6328_STRUP_CON13 0x0018
+#define MT6328_STRUP_CON14 0x001a
+#define MT6328_STRUP_CON15 0x001c
+#define MT6328_STRUP_CON16 0x001e
+#define MT6328_STRUP_CON17 0x0020
+#define MT6328_STRUP_CON18 0x0022
+#define MT6328_STRUP_CON19 0x0024
+#define MT6328_STRUP_CON20 0x0026
+#define MT6328_STRUP_CON21 0x0028
+#define MT6328_STRUP_CON22 0x002a
+#define MT6328_STRUP_CON23 0x002c
+#define MT6328_STRUP_CON24 0x002e
+#define MT6328_STRUP_CON25 0x0030
+#define MT6328_STRUP_CON26 0x0032
+#define MT6328_STRUP_CON27 0x0034
+#define MT6328_STRUP_CON28 0x0036
+#define MT6328_STRUP_CON29 0x0038
+#define MT6328_STRUP_CON30 0x003a
+#define MT6328_STRUP_CON31 0x003c
+#define MT6328_STRUP_CON32 0x003e
+#define MT6328_STRUP_ANA_CON0 0x0040
+#define MT6328_HWCID 0x0200
+#define MT6328_SWCID 0x0202
+#define MT6328_TOP_CON 0x0204
+#define MT6328_TEST_OUT 0x0206
+#define MT6328_TEST_CON0 0x0208
+#define MT6328_TEST_CON1 0x020a
+#define MT6328_TESTMODE_SW 0x020c
+#define MT6328_EN_STATUS0 0x020e
+#define MT6328_EN_STATUS1 0x0210
+#define MT6328_EN_STATUS2 0x0212
+#define MT6328_OCSTATUS0 0x0214
+#define MT6328_OCSTATUS1 0x0216
+#define MT6328_OCSTATUS2 0x0218
+#define MT6328_PGDEBSTATUS 0x021a
+#define MT6328_PGSTATUS 0x021c
+#define MT6328_THERMALSTATUS 0x021e
+#define MT6328_TOPSTATUS 0x0220
+#define MT6328_TDSEL_CON 0x0222
+#define MT6328_RDSEL_CON 0x0224
+#define MT6328_SMT_CON0 0x0226
+#define MT6328_SMT_CON1 0x0228
+#define MT6328_SMT_CON2 0x022a
+#define MT6328_DRV_CON0 0x022c
+#define MT6328_DRV_CON1 0x022e
+#define MT6328_DRV_CON2 0x0230
+#define MT6328_DRV_CON3 0x0232
+#define MT6328_TOP_STATUS 0x0234
+#define MT6328_TOP_STATUS_SET 0x0236
+#define MT6328_TOP_STATUS_CLR 0x0238
+#define MT6328_RGS_ANA_MON 0x023a
+#define MT6328_TOP_CKPDN_CON0 0x023c
+#define MT6328_TOP_CKPDN_CON0_SET 0x023e
+#define MT6328_TOP_CKPDN_CON0_CLR 0x0240
+#define MT6328_TOP_CKPDN_CON1 0x0242
+#define MT6328_TOP_CKPDN_CON1_SET 0x0244
+#define MT6328_TOP_CKPDN_CON1_CLR 0x0246
+#define MT6328_TOP_CKPDN_CON2 0x0248
+#define MT6328_TOP_CKPDN_CON2_SET 0x024a
+#define MT6328_TOP_CKPDN_CON2_CLR 0x024c
+#define MT6328_TOP_CKPDN_CON3 0x024e
+#define MT6328_TOP_CKPDN_CON3_SET 0x0250
+#define MT6328_TOP_CKPDN_CON3_CLR 0x0252
+#define MT6328_TOP_CKPDN_CON4 0x0254
+#define MT6328_TOP_CKPDN_CON4_SET 0x0256
+#define MT6328_TOP_CKPDN_CON4_CLR 0x0258
+#define MT6328_TOP_CKSEL_CON0 0x025a
+#define MT6328_TOP_CKSEL_CON0_SET 0x025c
+#define MT6328_TOP_CKSEL_CON0_CLR 0x025e
+#define MT6328_TOP_CKSEL_CON1 0x0260
+#define MT6328_TOP_CKSEL_CON1_SET 0x0262
+#define MT6328_TOP_CKSEL_CON1_CLR 0x0264
+#define MT6328_TOP_CKSEL_CON2 0x0266
+#define MT6328_TOP_CKSEL_CON2_SET 0x0268
+#define MT6328_TOP_CKSEL_CON2_CLR 0x026a
+#define MT6328_TOP_CKDIVSEL_CON0 0x026c
+#define MT6328_TOP_CKDIVSEL_CON0_SET 0x026e
+#define MT6328_TOP_CKDIVSEL_CON0_CLR 0x0270
+#define MT6328_TOP_CKDIVSEL_CON1 0x0272
+#define MT6328_TOP_CKDIVSEL_CON1_SET 0x0274
+#define MT6328_TOP_CKDIVSEL_CON1_CLR 0x0276
+#define MT6328_TOP_CKHWEN_CON0 0x0278
+#define MT6328_TOP_CKHWEN_CON0_SET 0x027a
+#define MT6328_TOP_CKHWEN_CON0_CLR 0x027c
+#define MT6328_TOP_CKHWEN_CON1 0x027e
+#define MT6328_TOP_CKHWEN_CON1_SET 0x0280
+#define MT6328_TOP_CKHWEN_CON1_CLR 0x0282
+#define MT6328_TOP_CKTST_CON0 0x0284
+#define MT6328_TOP_CKTST_CON1 0x0286
+#define MT6328_TOP_CKTST_CON2 0x0288
+#define MT6328_TOP_CLKSQ 0x028a
+#define MT6328_TOP_CLKSQ_SET 0x028c
+#define MT6328_TOP_CLKSQ_CLR 0x028e
+#define MT6328_TOP_CLKSQ_RTC 0x0290
+#define MT6328_TOP_CLKSQ_RTC_SET 0x0292
+#define MT6328_TOP_CLKSQ_RTC_CLR 0x0294
+#define MT6328_TOP_CLK_TRIM 0x0296
+#define MT6328_TOP_RST_CON0 0x0298
+#define MT6328_TOP_RST_CON0_SET 0x029a
+#define MT6328_TOP_RST_CON0_CLR 0x029c
+#define MT6328_TOP_RST_CON1 0x029e
+#define MT6328_TOP_RST_MISC 0x02a0
+#define MT6328_TOP_RST_MISC_SET 0x02a2
+#define MT6328_TOP_RST_MISC_CLR 0x02a4
+#define MT6328_TOP_RST_STATUS 0x02a6
+#define MT6328_TOP_RST_STATUS_SET 0x02a8
+#define MT6328_TOP_RST_STATUS_CLR 0x02aa
+#define MT6328_INT_CON0 0x02ac
+#define MT6328_INT_CON0_SET 0x02ae
+#define MT6328_INT_CON0_CLR 0x02b0
+#define MT6328_INT_CON1 0x02b2
+#define MT6328_INT_CON1_SET 0x02b4
+#define MT6328_INT_CON1_CLR 0x02b6
+#define MT6328_INT_CON2 0x02b8
+#define MT6328_INT_CON2_SET 0x02ba
+#define MT6328_INT_CON2_CLR 0x02bc
+#define MT6328_INT_MISC_CON 0x02be
+#define MT6328_INT_MISC_CON_SET 0x02c0
+#define MT6328_INT_MISC_CON_CLR 0x02c2
+#define MT6328_INT_STATUS0 0x02c4
+#define MT6328_INT_STATUS1 0x02c6
+#define MT6328_INT_STATUS2 0x02c8
+#define MT6328_OC_GEAR_0 0x02ca
+#define MT6328_FQMTR_CON0 0x02cc
+#define MT6328_FQMTR_CON1 0x02ce
+#define MT6328_FQMTR_CON2 0x02d0
+#define MT6328_RG_SPI_CON 0x02d2
+#define MT6328_DEW_DIO_EN 0x02d4
+#define MT6328_DEW_READ_TEST 0x02d6
+#define MT6328_DEW_WRITE_TEST 0x02d8
+#define MT6328_DEW_CRC_SWRST 0x02da
+#define MT6328_DEW_CRC_EN 0x02dc
+#define MT6328_DEW_CRC_VAL 0x02de
+#define MT6328_DEW_DBG_MON_SEL 0x02e0
+#define MT6328_DEW_CIPHER_KEY_SEL 0x02e2
+#define MT6328_DEW_CIPHER_IV_SEL 0x02e4
+#define MT6328_DEW_CIPHER_EN 0x02e6
+#define MT6328_DEW_CIPHER_RDY 0x02e8
+#define MT6328_DEW_CIPHER_MODE 0x02ea
+#define MT6328_DEW_CIPHER_SWRST 0x02ec
+#define MT6328_DEW_RDDMY_NO 0x02ee
+#define MT6328_INT_TYPE_CON0 0x02f0
+#define MT6328_INT_TYPE_CON0_SET 0x02f2
+#define MT6328_INT_TYPE_CON0_CLR 0x02f4
+#define MT6328_INT_TYPE_CON1 0x02f6
+#define MT6328_INT_TYPE_CON1_SET 0x02f8
+#define MT6328_INT_TYPE_CON1_CLR 0x02fa
+#define MT6328_INT_TYPE_CON2 0x02fc
+#define MT6328_INT_TYPE_CON2_SET 0x02fe
+#define MT6328_INT_TYPE_CON2_CLR 0x0300
+#define MT6328_INT_STA 0x0302
+#define MT6328_BUCK_ALL_CON0 0x0400
+#define MT6328_BUCK_ALL_CON1 0x0402
+#define MT6328_BUCK_ALL_CON2 0x0404
+#define MT6328_BUCK_ALL_CON3 0x0406
+#define MT6328_BUCK_ALL_CON4 0x0408
+#define MT6328_BUCK_ALL_CON5 0x040a
+#define MT6328_BUCK_ALL_CON6 0x040c
+#define MT6328_BUCK_ALL_CON9 0x040e
+#define MT6328_BUCK_ALL_CON12 0x0410
+#define MT6328_BUCK_ALL_CON13 0x0412
+#define MT6328_BUCK_ALL_CON14 0x0414
+#define MT6328_BUCK_ALL_CON16 0x0416
+#define MT6328_BUCK_ALL_CON18 0x0418
+#define MT6328_BUCK_ALL_CON19 0x041a
+#define MT6328_BUCK_ALL_CON20 0x041c
+#define MT6328_BUCK_ALL_CON21 0x041e
+#define MT6328_BUCK_ALL_CON22 0x0420
+#define MT6328_BUCK_ALL_CON23 0x0422
+#define MT6328_BUCK_ALL_CON24 0x0424
+#define MT6328_BUCK_ALL_CON25 0x0426
+#define MT6328_BUCK_ALL_CON26 0x0428
+#define MT6328_BUCK_ALL_CON27 0x042a
+#define MT6328_BUCK_ALL_CON28 0x042c
+#define MT6328_SMPS_TOP_ANA_CON0 0x042e
+#define MT6328_SMPS_TOP_ANA_CON1 0x0430
+#define MT6328_SMPS_TOP_ANA_CON2 0x0432
+#define MT6328_SMPS_TOP_ANA_CON3 0x0434
+#define MT6328_SMPS_TOP_ANA_CON4 0x0436
+#define MT6328_SMPS_TOP_ANA_CON5 0x0438
+#define MT6328_SMPS_TOP_ANA_CON6 0x043a
+#define MT6328_SMPS_TOP_ANA_CON7 0x043c
+#define MT6328_SMPS_TOP_ANA_CON8 0x043e
+#define MT6328_VCORE_ANA_CON0 0x0440
+#define MT6328_VCORE_ANA_CON1 0x0442
+#define MT6328_VCORE_ANA_CON2 0x0444
+#define MT6328_VCORE_ANA_CON3 0x0446
+#define MT6328_VCORE_ANA_CON4 0x0448
+#define MT6328_VSYS22_ANA_CON0 0x044a
+#define MT6328_VSYS22_ANA_CON1 0x044c
+#define MT6328_VSYS22_ANA_CON2 0x044e
+#define MT6328_VSYS22_ANA_CON3 0x0450
+#define MT6328_VSYS22_ANA_CON4 0x0452
+#define MT6328_VPROC_ANA_CON0 0x0454
+#define MT6328_VPROC_ANA_CON1 0x0456
+#define MT6328_VPROC_ANA_CON2 0x0458
+#define MT6328_VPROC_ANA_CON3 0x045a
+#define MT6328_VPROC_ANA_CON4 0x045c
+#define MT6328_OSC32_ANA_CON0 0x045e
+#define MT6328_OSC32_ANA_CON1 0x0460
+#define MT6328_VPA_ANA_CON0 0x0462
+#define MT6328_VPA_ANA_CON1 0x0464
+#define MT6328_VPA_ANA_CON2 0x0466
+#define MT6328_VPA_ANA_CON3 0x0468
+#define MT6328_VLTE_ANA_CON0 0x046a
+#define MT6328_VLTE_ANA_CON1 0x046c
+#define MT6328_VLTE_ANA_CON2 0x046e
+#define MT6328_VLTE_ANA_CON3 0x0470
+#define MT6328_VLTE_ANA_CON4 0x0472
+#define MT6328_VPROC_CON0 0x0474
+#define MT6328_VPROC_CON1 0x0476
+#define MT6328_VPROC_CON2 0x0478
+#define MT6328_VPROC_CON3 0x047a
+#define MT6328_VPROC_CON4 0x047c
+#define MT6328_VPROC_CON5 0x047e
+#define MT6328_VPROC_CON6 0x0480
+#define MT6328_VPROC_CON7 0x0482
+#define MT6328_VPROC_CON8 0x0484
+#define MT6328_VPROC_CON9 0x0486
+#define MT6328_VPROC_CON10 0x0488
+#define MT6328_VPROC_CON11 0x048a
+#define MT6328_VPROC_CON12 0x048c
+#define MT6328_VPROC_CON13 0x048e
+#define MT6328_VPROC_CON14 0x0490
+#define MT6328_VPROC_CON15 0x0492
+#define MT6328_VPROC_CON16 0x0494
+#define MT6328_VPROC_CON17 0x0496
+#define MT6328_VPROC_CON18 0x0498
+#define MT6328_VPROC_CON19 0x049a
+#define MT6328_VSRAM_CON0 0x049c
+#define MT6328_VSRAM_CON1 0x049e
+#define MT6328_VSRAM_CON2 0x04a0
+#define MT6328_VSRAM_CON3 0x04a2
+#define MT6328_VSRAM_CON4 0x04a4
+#define MT6328_VSRAM_CON5 0x04a6
+#define MT6328_VSRAM_CON6 0x04a8
+#define MT6328_VSRAM_CON7 0x04aa
+#define MT6328_VSRAM_CON8 0x04ac
+#define MT6328_VSRAM_CON9 0x04ae
+#define MT6328_VSRAM_CON10 0x04b0
+#define MT6328_VSRAM_CON11 0x04b2
+#define MT6328_VSRAM_CON12 0x04b4
+#define MT6328_VSRAM_CON13 0x04b6
+#define MT6328_VSRAM_CON14 0x04b8
+#define MT6328_VSRAM_CON15 0x04ba
+#define MT6328_VSRAM_CON16 0x04bc
+#define MT6328_VSRAM_CON17 0x04be
+#define MT6328_VSRAM_CON18 0x04c0
+#define MT6328_VSRAM_CON19 0x04c2
+#define MT6328_VLTE_CON0 0x04c4
+#define MT6328_VLTE_CON1 0x04c6
+#define MT6328_VLTE_CON2 0x04c8
+#define MT6328_VLTE_CON3 0x04ca
+#define MT6328_VLTE_CON4 0x04cc
+#define MT6328_VLTE_CON5 0x04ce
+#define MT6328_VLTE_CON6 0x04d0
+#define MT6328_VLTE_CON7 0x04d2
+#define MT6328_VLTE_CON8 0x04d4
+#define MT6328_VLTE_CON9 0x04d6
+#define MT6328_VLTE_CON10 0x04d8
+#define MT6328_VLTE_CON11 0x04da
+#define MT6328_VLTE_CON12 0x04dc
+#define MT6328_VLTE_CON13 0x04de
+#define MT6328_VLTE_CON14 0x04e0
+#define MT6328_VLTE_CON15 0x04e2
+#define MT6328_VLTE_CON16 0x04e4
+#define MT6328_VLTE_CON17 0x04e6
+#define MT6328_VLTE_CON18 0x04e8
+#define MT6328_VLTE_CON19 0x04ea
+#define MT6328_VCORE1_CON0 0x0600
+#define MT6328_VCORE1_CON1 0x0602
+#define MT6328_VCORE1_CON2 0x0604
+#define MT6328_VCORE1_CON3 0x0606
+#define MT6328_VCORE1_CON4 0x0608
+#define MT6328_VCORE1_CON5 0x060a
+#define MT6328_VCORE1_CON6 0x060c
+#define MT6328_VCORE1_CON7 0x060e
+#define MT6328_VCORE1_CON8 0x0610
+#define MT6328_VCORE1_CON9 0x0612
+#define MT6328_VCORE1_CON10 0x0614
+#define MT6328_VCORE1_CON11 0x0616
+#define MT6328_VCORE1_CON12 0x0618
+#define MT6328_VCORE1_CON13 0x061a
+#define MT6328_VCORE1_CON14 0x061c
+#define MT6328_VCORE1_CON15 0x061e
+#define MT6328_VCORE1_CON16 0x0620
+#define MT6328_VCORE1_CON17 0x0622
+#define MT6328_VCORE1_CON18 0x0624
+#define MT6328_VCORE1_CON19 0x0626
+#define MT6328_VSYS22_CON0 0x0628
+#define MT6328_VSYS22_CON1 0x062a
+#define MT6328_VSYS22_CON2 0x062c
+#define MT6328_VSYS22_CON3 0x062e
+#define MT6328_VSYS22_CON4 0x0630
+#define MT6328_VSYS22_CON5 0x0632
+#define MT6328_VSYS22_CON6 0x0634
+#define MT6328_VSYS22_CON7 0x0636
+#define MT6328_VSYS22_CON8 0x0638
+#define MT6328_VSYS22_CON9 0x063a
+#define MT6328_VSYS22_CON10 0x063c
+#define MT6328_VSYS22_CON11 0x063e
+#define MT6328_VSYS22_CON12 0x0640
+#define MT6328_VSYS22_CON13 0x0642
+#define MT6328_VSYS22_CON14 0x0644
+#define MT6328_VSYS22_CON15 0x0646
+#define MT6328_VSYS22_CON16 0x0648
+#define MT6328_VSYS22_CON17 0x064a
+#define MT6328_VSYS22_CON18 0x064c
+#define MT6328_VSYS22_CON19 0x064e
+#define MT6328_VPA_CON0 0x0650
+#define MT6328_VPA_CON1 0x0652
+#define MT6328_VPA_CON2 0x0654
+#define MT6328_VPA_CON3 0x0656
+#define MT6328_VPA_CON4 0x0658
+#define MT6328_VPA_CON5 0x065a
+#define MT6328_VPA_CON6 0x065c
+#define MT6328_VPA_CON7 0x065e
+#define MT6328_VPA_CON8 0x0660
+#define MT6328_VPA_CON9 0x0662
+#define MT6328_VPA_CON10 0x0664
+#define MT6328_VPA_CON11 0x0666
+#define MT6328_VPA_CON12 0x0668
+#define MT6328_VPA_CON13 0x066a
+#define MT6328_VPA_CON14 0x066c
+#define MT6328_VPA_CON15 0x066e
+#define MT6328_VPA_CON16 0x0670
+#define MT6328_VPA_CON17 0x0672
+#define MT6328_VPA_CON18 0x0674
+#define MT6328_VPA_CON19 0x0676
+#define MT6328_VPA_CON20 0x0678
+#define MT6328_VPA_CON21 0x067a
+#define MT6328_VPA_CON22 0x067c
+#define MT6328_VPA_CON23 0x067e
+#define MT6328_VPA_CON24 0x0680
+#define MT6328_BUCK_K_CON0 0x0682
+#define MT6328_BUCK_K_CON1 0x0684
+#define MT6328_BUCK_K_CON2 0x0686
+#define MT6328_BUCK_K_CON3 0x0688
+#define MT6328_ZCD_CON0 0x0800
+#define MT6328_ZCD_CON1 0x0802
+#define MT6328_ZCD_CON2 0x0804
+#define MT6328_ZCD_CON3 0x0806
+#define MT6328_ZCD_CON4 0x0808
+#define MT6328_ZCD_CON5 0x080a
+#define MT6328_ISINK0_CON0 0x080c
+#define MT6328_ISINK0_CON1 0x080e
+#define MT6328_ISINK0_CON2 0x0810
+#define MT6328_ISINK0_CON3 0x0812
+#define MT6328_ISINK1_CON0 0x0814
+#define MT6328_ISINK1_CON1 0x0816
+#define MT6328_ISINK1_CON2 0x0818
+#define MT6328_ISINK1_CON3 0x081a
+#define MT6328_ISINK2_CON1 0x081c
+#define MT6328_ISINK3_CON1 0x081e
+#define MT6328_ISINK_ANA0 0x0820
+#define MT6328_ISINK_ANA1 0x0822
+#define MT6328_ISINK_PHASE_DLY 0x0824
+#define MT6328_ISINK_SFSTR 0x0826
+#define MT6328_ISINK_EN_CTRL 0x0828
+#define MT6328_ISINK_MODE_CTRL 0x082a
+#define MT6328_VTCXO_0_CON0 0x0a00
+#define MT6328_VTCXO_1_CON0 0x0a02
+#define MT6328_VAUD28_CON0 0x0a04
+#define MT6328_VAUX18_CON0 0x0a06
+#define MT6328_VRF18_0_CON0 0x0a08
+#define MT6328_VRF18_0_CON1 0x0a0a
+#define MT6328_VCAMA_CON0 0x0a0c
+#define MT6328_VCN28_CON0 0x0a0e
+#define MT6328_VCN33_CON0 0x0a10
+#define MT6328_VCN33_CON1 0x0a12
+#define MT6328_VCN33_CON2 0x0a14
+#define MT6328_VRF18_1_CON0 0x0a16
+#define MT6328_VRF18_1_CON1 0x0a18
+#define MT6328_VUSB33_CON0 0x0a1a
+#define MT6328_VMCH_CON0 0x0a1c
+#define MT6328_VMCH_CON1 0x0a1e
+#define MT6328_VMC_CON0 0x0a20
+#define MT6328_VMC_CON1 0x0a22
+#define MT6328_VEMC_3V3_CON0 0x0a24
+#define MT6328_VEMC_3V3_CON1 0x0a26
+#define MT6328_VIO28_CON0 0x0a28
+#define MT6328_VCAMAF_CON0 0x0a2a
+#define MT6328_VGP1_CON0 0x0a2c
+#define MT6328_VGP1_CON1 0x0a2e
+#define MT6328_VEFUSE_CON0 0x0a30
+#define MT6328_VSIM1_CON0 0x0a32
+#define MT6328_VSIM2_CON0 0x0a34
+#define MT6328_VIO18_CON0 0x0a36
+#define MT6328_VIBR_CON0 0x0a38
+#define MT6328_VCN18_CON0 0x0a3a
+#define MT6328_VCAM_CON0 0x0a3c
+#define MT6328_VCAMIO_CON0 0x0a3e
+#define MT6328_LDO_VSRAM_CON0 0x0a40
+#define MT6328_LDO_VSRAM_CON1 0x0a42
+#define MT6328_VTREF_CON0 0x0a44
+#define MT6328_VM_CON0 0x0a46
+#define MT6328_VM_CON1 0x0a48
+#define MT6328_VRTC_CON0 0x0a4a
+#define MT6328_LDO_OCFB0 0x0a4c
+#define MT6328_ALDO_ANA_CON0 0x0a4e
+#define MT6328_ADLDO_ANA_CON1 0x0a50
+#define MT6328_ADLDO_ANA_CON2 0x0a52
+#define MT6328_ADLDO_ANA_CON3 0x0a54
+#define MT6328_ADLDO_ANA_CON4 0x0a56
+#define MT6328_ADLDO_ANA_CON5 0x0a58
+#define MT6328_ADLDO_ANA_CON6 0x0a5a
+#define MT6328_ADLDO_ANA_CON7 0x0a5c
+#define MT6328_ADLDO_ANA_CON8 0x0a5e
+#define MT6328_ADLDO_ANA_CON9 0x0a60
+#define MT6328_ADLDO_ANA_CON10 0x0a62
+#define MT6328_ADLDO_ANA_CON11 0x0a64
+#define MT6328_ADLDO_ANA_CON12 0x0a66
+#define MT6328_ADLDO_ANA_CON13 0x0a68
+#define MT6328_DLDO_ANA_CON0 0x0a6a
+#define MT6328_DLDO_ANA_CON1 0x0a6c
+#define MT6328_DLDO_ANA_CON2 0x0a6e
+#define MT6328_DLDO_ANA_CON3 0x0a70
+#define MT6328_DLDO_ANA_CON4 0x0a72
+#define MT6328_DLDO_ANA_CON5 0x0a74
+#define MT6328_SLDO_ANA_CON0 0x0a76
+#define MT6328_SLDO_ANA_CON1 0x0a78
+#define MT6328_SLDO_ANA_CON2 0x0a7a
+#define MT6328_SLDO_ANA_CON3 0x0a7c
+#define MT6328_SLDO_ANA_CON4 0x0a7e
+#define MT6328_SLDO_ANA_CON5 0x0a80
+#define MT6328_SLDO_ANA_CON6 0x0a82
+#define MT6328_SLDO_ANA_CON7 0x0a84
+#define MT6328_SLDO_ANA_CON8 0x0a86
+#define MT6328_SLDO_ANA_CON9 0x0a88
+#define MT6328_SLDO_ANA_CON10 0x0a8a
+#define MT6328_LDO_RSV_CON0 0x0a8c
+#define MT6328_LDO_RSV_CON1 0x0a8e
+#define MT6328_SPK_CON0 0x0a90
+#define MT6328_SPK_CON1 0x0a92
+#define MT6328_SPK_CON2 0x0a94
+#define MT6328_SPK_CON3 0x0a96
+#define MT6328_SPK_CON4 0x0a98
+#define MT6328_SPK_CON5 0x0a9a
+#define MT6328_SPK_CON6 0x0a9c
+#define MT6328_SPK_CON7 0x0a9e
+#define MT6328_SPK_CON8 0x0aa0
+#define MT6328_SPK_CON9 0x0aa2
+#define MT6328_SPK_CON10 0x0aa4
+#define MT6328_SPK_CON11 0x0aa6
+#define MT6328_SPK_CON12 0x0aa8
+#define MT6328_SPK_CON13 0x0aaa
+#define MT6328_SPK_CON14 0x0aac
+#define MT6328_SPK_CON15 0x0aae
+#define MT6328_SPK_CON16 0x0ab0
+#define MT6328_SPK_ANA_CON0 0x0ab2
+#define MT6328_SPK_ANA_CON1 0x0ab4
+#define MT6328_SPK_ANA_CON3 0x0ab6
+#define MT6328_OTP_CON0 0x0c00
+#define MT6328_OTP_CON1 0x0c02
+#define MT6328_OTP_CON2 0x0c04
+#define MT6328_OTP_CON3 0x0c06
+#define MT6328_OTP_CON4 0x0c08
+#define MT6328_OTP_CON5 0x0c0a
+#define MT6328_OTP_CON6 0x0c0c
+#define MT6328_OTP_CON7 0x0c0e
+#define MT6328_OTP_CON8 0x0c10
+#define MT6328_OTP_CON9 0x0c12
+#define MT6328_OTP_CON10 0x0c14
+#define MT6328_OTP_CON11 0x0c16
+#define MT6328_OTP_CON12 0x0c18
+#define MT6328_OTP_CON13 0x0c1a
+#define MT6328_OTP_CON14 0x0c1c
+#define MT6328_OTP_DOUT_0_15 0x0c1e
+#define MT6328_OTP_DOUT_16_31 0x0c20
+#define MT6328_OTP_DOUT_32_47 0x0c22
+#define MT6328_OTP_DOUT_48_63 0x0c24
+#define MT6328_OTP_DOUT_64_79 0x0c26
+#define MT6328_OTP_DOUT_80_95 0x0c28
+#define MT6328_OTP_DOUT_96_111 0x0c2a
+#define MT6328_OTP_DOUT_112_127 0x0c2c
+#define MT6328_OTP_DOUT_128_143 0x0c2e
+#define MT6328_OTP_DOUT_144_159 0x0c30
+#define MT6328_OTP_DOUT_160_175 0x0c32
+#define MT6328_OTP_DOUT_176_191 0x0c34
+#define MT6328_OTP_DOUT_192_207 0x0c36
+#define MT6328_OTP_DOUT_208_223 0x0c38
+#define MT6328_OTP_DOUT_224_239 0x0c3a
+#define MT6328_OTP_DOUT_240_255 0x0c3c
+#define MT6328_OTP_DOUT_256_271 0x0c3e
+#define MT6328_OTP_DOUT_272_287 0x0c40
+#define MT6328_OTP_DOUT_288_303 0x0c42
+#define MT6328_OTP_DOUT_304_319 0x0c44
+#define MT6328_OTP_DOUT_320_335 0x0c46
+#define MT6328_OTP_DOUT_336_351 0x0c48
+#define MT6328_OTP_DOUT_352_367 0x0c4a
+#define MT6328_OTP_DOUT_368_383 0x0c4c
+#define MT6328_OTP_DOUT_384_399 0x0c4e
+#define MT6328_OTP_DOUT_400_415 0x0c50
+#define MT6328_OTP_DOUT_416_431 0x0c52
+#define MT6328_OTP_DOUT_432_447 0x0c54
+#define MT6328_OTP_DOUT_448_463 0x0c56
+#define MT6328_OTP_DOUT_464_479 0x0c58
+#define MT6328_OTP_DOUT_480_495 0x0c5a
+#define MT6328_OTP_DOUT_496_511 0x0c5c
+#define MT6328_OTP_VAL_0_15 0x0c5e
+#define MT6328_OTP_VAL_16_31 0x0c60
+#define MT6328_OTP_VAL_32_47 0x0c62
+#define MT6328_OTP_VAL_48_63 0x0c64
+#define MT6328_OTP_VAL_64_79 0x0c66
+#define MT6328_OTP_VAL_80_95 0x0c68
+#define MT6328_OTP_VAL_96_111 0x0c6a
+#define MT6328_OTP_VAL_112_127 0x0c6c
+#define MT6328_OTP_VAL_128_143 0x0c6e
+#define MT6328_OTP_VAL_144_159 0x0c70
+#define MT6328_OTP_VAL_160_175 0x0c72
+#define MT6328_OTP_VAL_176_191 0x0c74
+#define MT6328_OTP_VAL_192_207 0x0c76
+#define MT6328_OTP_VAL_208_223 0x0c78
+#define MT6328_OTP_VAL_224_239 0x0c7a
+#define MT6328_OTP_VAL_240_255 0x0c7c
+#define MT6328_OTP_VAL_256_271 0x0c7e
+#define MT6328_OTP_VAL_272_287 0x0c80
+#define MT6328_OTP_VAL_288_303 0x0c82
+#define MT6328_OTP_VAL_304_319 0x0c84
+#define MT6328_OTP_VAL_320_335 0x0c86
+#define MT6328_OTP_VAL_336_351 0x0c88
+#define MT6328_OTP_VAL_352_367 0x0c8a
+#define MT6328_OTP_VAL_368_383 0x0c8c
+#define MT6328_OTP_VAL_384_399 0x0c8e
+#define MT6328_OTP_VAL_400_415 0x0c90
+#define MT6328_OTP_VAL_416_431 0x0c92
+#define MT6328_OTP_VAL_432_447 0x0c94
+#define MT6328_OTP_VAL_448_463 0x0c96
+#define MT6328_OTP_VAL_464_479 0x0c98
+#define MT6328_OTP_VAL_480_495 0x0c9a
+#define MT6328_OTP_VAL_496_511 0x0c9c
+#define MT6328_RTC_MIX_CON0 0x0c9e
+#define MT6328_RTC_MIX_CON1 0x0ca0
+#define MT6328_RTC_MIX_CON2 0x0ca2
+#define MT6328_FGADC_CON0 0x0ca4
+#define MT6328_FGADC_CON1 0x0ca6
+#define MT6328_FGADC_CON2 0x0ca8
+#define MT6328_FGADC_CON3 0x0caa
+#define MT6328_FGADC_CON4 0x0cac
+#define MT6328_FGADC_CON5 0x0cae
+#define MT6328_FGADC_CON6 0x0cb0
+#define MT6328_FGADC_CON7 0x0cb2
+#define MT6328_FGADC_CON8 0x0cb4
+#define MT6328_FGADC_CON9 0x0cb6
+#define MT6328_FGADC_CON10 0x0cb8
+#define MT6328_FGADC_CON11 0x0cba
+#define MT6328_FGADC_CON12 0x0cbc
+#define MT6328_FGADC_CON13 0x0cbe
+#define MT6328_FGADC_CON14 0x0cc0
+#define MT6328_FGADC_CON15 0x0cc2
+#define MT6328_FGADC_CON16 0x0cc4
+#define MT6328_FGADC_CON17 0x0cc6
+#define MT6328_FGADC_CON18 0x0cc8
+#define MT6328_FGADC_CON19 0x0cca
+#define MT6328_FGADC_CON20 0x0ccc
+#define MT6328_FGADC_CON21 0x0cce
+#define MT6328_FGADC_CON22 0x0cd0
+#define MT6328_FGADC_CON23 0x0cd2
+#define MT6328_FGADC_CON24 0x0cd4
+#define MT6328_FGADC_CON25 0x0cd6
+#define MT6328_FGADC_CON26 0x0cd8
+#define MT6328_FGADC_CON27 0x0cda
+#define MT6328_AUDDEC_ANA_CON0 0x0cdc
+#define MT6328_AUDDEC_ANA_CON1 0x0cde
+#define MT6328_AUDDEC_ANA_CON2 0x0ce0
+#define MT6328_AUDDEC_ANA_CON3 0x0ce2
+#define MT6328_AUDDEC_ANA_CON4 0x0ce4
+#define MT6328_AUDDEC_ANA_CON5 0x0ce6
+#define MT6328_AUDDEC_ANA_CON6 0x0ce8
+#define MT6328_AUDDEC_ANA_CON7 0x0cea
+#define MT6328_AUDDEC_ANA_CON8 0x0cec
+#define MT6328_AUDENC_ANA_CON0 0x0cee
+#define MT6328_AUDENC_ANA_CON1 0x0cf0
+#define MT6328_AUDENC_ANA_CON2 0x0cf2
+#define MT6328_AUDENC_ANA_CON3 0x0cf4
+#define MT6328_AUDENC_ANA_CON4 0x0cf6
+#define MT6328_AUDENC_ANA_CON5 0x0cf8
+#define MT6328_AUDENC_ANA_CON6 0x0cfa
+#define MT6328_AUDENC_ANA_CON7 0x0cfc
+#define MT6328_AUDENC_ANA_CON8 0x0cfe
+#define MT6328_AUDENC_ANA_CON9 0x0d00
+#define MT6328_AUDENC_ANA_CON10 0x0d02
+#define MT6328_AUDNCP_CLKDIV_CON0 0x0d04
+#define MT6328_AUDNCP_CLKDIV_CON1 0x0d06
+#define MT6328_AUDNCP_CLKDIV_CON2 0x0d08
+#define MT6328_AUDNCP_CLKDIV_CON3 0x0d0a
+#define MT6328_AUDNCP_CLKDIV_CON4 0x0d0c
+#define MT6328_AUXADC_ADC0 0x0e00
+#define MT6328_AUXADC_ADC1 0x0e02
+#define MT6328_AUXADC_ADC2 0x0e04
+#define MT6328_AUXADC_ADC3 0x0e06
+#define MT6328_AUXADC_ADC4 0x0e08
+#define MT6328_AUXADC_ADC5 0x0e0a
+#define MT6328_AUXADC_ADC6 0x0e0c
+#define MT6328_AUXADC_ADC7 0x0e0e
+#define MT6328_AUXADC_ADC8 0x0e10
+#define MT6328_AUXADC_ADC9 0x0e12
+#define MT6328_AUXADC_ADC10 0x0e14
+#define MT6328_AUXADC_ADC11 0x0e16
+#define MT6328_AUXADC_ADC12 0x0e18
+#define MT6328_AUXADC_ADC13 0x0e1a
+#define MT6328_AUXADC_ADC14 0x0e1c
+#define MT6328_AUXADC_ADC15 0x0e1e
+#define MT6328_AUXADC_ADC16 0x0e20
+#define MT6328_AUXADC_ADC17 0x0e22
+#define MT6328_AUXADC_ADC18 0x0e24
+#define MT6328_AUXADC_ADC19 0x0e26
+#define MT6328_AUXADC_ADC20 0x0e28
+#define MT6328_AUXADC_ADC21 0x0e2a
+#define MT6328_AUXADC_ADC22 0x0e2c
+#define MT6328_AUXADC_ADC23 0x0e2e
+#define MT6328_AUXADC_ADC24 0x0e30
+#define MT6328_AUXADC_ADC25 0x0e32
+#define MT6328_AUXADC_ADC26 0x0e34
+#define MT6328_AUXADC_ADC27 0x0e36
+#define MT6328_AUXADC_ADC28 0x0e38
+#define MT6328_AUXADC_ADC29 0x0e3a
+#define MT6328_AUXADC_ADC30 0x0e3c
+#define MT6328_AUXADC_ADC31 0x0e3e
+#define MT6328_AUXADC_ADC32 0x0e40
+#define MT6328_AUXADC_ADC33 0x0e42
+#define MT6328_AUXADC_BUF0 0x0e44
+#define MT6328_AUXADC_BUF1 0x0e46
+#define MT6328_AUXADC_BUF2 0x0e48
+#define MT6328_AUXADC_BUF3 0x0e4a
+#define MT6328_AUXADC_BUF4 0x0e4c
+#define MT6328_AUXADC_BUF5 0x0e4e
+#define MT6328_AUXADC_BUF6 0x0e50
+#define MT6328_AUXADC_BUF7 0x0e52
+#define MT6328_AUXADC_BUF8 0x0e54
+#define MT6328_AUXADC_BUF9 0x0e56
+#define MT6328_AUXADC_BUF10 0x0e58
+#define MT6328_AUXADC_BUF11 0x0e5a
+#define MT6328_AUXADC_BUF12 0x0e5c
+#define MT6328_AUXADC_BUF13 0x0e5e
+#define MT6328_AUXADC_BUF14 0x0e60
+#define MT6328_AUXADC_BUF15 0x0e62
+#define MT6328_AUXADC_BUF16 0x0e64
+#define MT6328_AUXADC_BUF17 0x0e66
+#define MT6328_AUXADC_BUF18 0x0e68
+#define MT6328_AUXADC_BUF19 0x0e6a
+#define MT6328_AUXADC_BUF20 0x0e6c
+#define MT6328_AUXADC_BUF21 0x0e6e
+#define MT6328_AUXADC_BUF22 0x0e70
+#define MT6328_AUXADC_BUF23 0x0e72
+#define MT6328_AUXADC_BUF24 0x0e74
+#define MT6328_AUXADC_BUF25 0x0e76
+#define MT6328_AUXADC_BUF26 0x0e78
+#define MT6328_AUXADC_BUF27 0x0e7a
+#define MT6328_AUXADC_BUF28 0x0e7c
+#define MT6328_AUXADC_BUF29 0x0e7e
+#define MT6328_AUXADC_BUF30 0x0e80
+#define MT6328_AUXADC_BUF31 0x0e82
+#define MT6328_AUXADC_STA0 0x0e84
+#define MT6328_AUXADC_STA1 0x0e86
+#define MT6328_AUXADC_RQST0 0x0e88
+#define MT6328_AUXADC_RQST0_SET 0x0e8a
+#define MT6328_AUXADC_RQST0_CLR 0x0e8c
+#define MT6328_AUXADC_RQST1 0x0e8e
+#define MT6328_AUXADC_RQST1_SET 0x0e90
+#define MT6328_AUXADC_RQST1_CLR 0x0e92
+#define MT6328_AUXADC_CON0 0x0e94
+#define MT6328_AUXADC_CON0_SET 0x0e96
+#define MT6328_AUXADC_CON0_CLR 0x0e98
+#define MT6328_AUXADC_CON1 0x0e9a
+#define MT6328_AUXADC_CON2 0x0e9c
+#define MT6328_AUXADC_CON3 0x0e9e
+#define MT6328_AUXADC_CON4 0x0ea0
+#define MT6328_AUXADC_CON5 0x0ea2
+#define MT6328_AUXADC_CON6 0x0ea4
+#define MT6328_AUXADC_CON7 0x0ea6
+#define MT6328_AUXADC_CON8 0x0ea8
+#define MT6328_AUXADC_CON9 0x0eaa
+#define MT6328_AUXADC_CON10 0x0eac
+#define MT6328_AUXADC_CON11 0x0eae
+#define MT6328_AUXADC_CON12 0x0eb0
+#define MT6328_AUXADC_CON13 0x0eb2
+#define MT6328_AUXADC_CON14 0x0eb4
+#define MT6328_AUXADC_CON15 0x0eb6
+#define MT6328_AUXADC_CON16 0x0eb8
+#define MT6328_AUXADC_AUTORPT0 0x0eba
+#define MT6328_AUXADC_LBAT0 0x0ebc
+#define MT6328_AUXADC_LBAT1 0x0ebe
+#define MT6328_AUXADC_LBAT2 0x0ec0
+#define MT6328_AUXADC_LBAT3 0x0ec2
+#define MT6328_AUXADC_LBAT4 0x0ec4
+#define MT6328_AUXADC_LBAT5 0x0ec6
+#define MT6328_AUXADC_LBAT6 0x0ec8
+#define MT6328_AUXADC_ACCDET 0x0eca
+#define MT6328_AUXADC_THR0 0x0ecc
+#define MT6328_AUXADC_THR1 0x0ece
+#define MT6328_AUXADC_THR2 0x0ed0
+#define MT6328_AUXADC_THR3 0x0ed2
+#define MT6328_AUXADC_THR4 0x0ed4
+#define MT6328_AUXADC_THR5 0x0ed6
+#define MT6328_AUXADC_THR6 0x0ed8
+#define MT6328_AUXADC_EFUSE0 0x0eda
+#define MT6328_AUXADC_EFUSE1 0x0edc
+#define MT6328_AUXADC_EFUSE2 0x0ede
+#define MT6328_AUXADC_EFUSE3 0x0ee0
+#define MT6328_AUXADC_EFUSE4 0x0ee2
+#define MT6328_AUXADC_EFUSE5 0x0ee4
+#define MT6328_AUXADC_DBG0 0x0ee6
+#define MT6328_AUXADC_IMP0 0x0ee8
+#define MT6328_AUXADC_IMP1 0x0eea
+#define MT6328_AUXADC_VISMPS0_1 0x0eec
+#define MT6328_AUXADC_VISMPS0_2 0x0eee
+#define MT6328_AUXADC_VISMPS0_3 0x0ef0
+#define MT6328_AUXADC_VISMPS0_4 0x0ef2
+#define MT6328_AUXADC_VISMPS0_5 0x0ef4
+#define MT6328_AUXADC_VISMPS0_6 0x0ef6
+#define MT6328_AUXADC_VISMPS0_7 0x0ef8
+#define MT6328_AUXADC_LBAT2_1 0x0efa
+#define MT6328_AUXADC_LBAT2_2 0x0efc
+#define MT6328_AUXADC_LBAT2_3 0x0efe
+#define MT6328_AUXADC_LBAT2_4 0x0f00
+#define MT6328_AUXADC_LBAT2_5 0x0f02
+#define MT6328_AUXADC_LBAT2_6 0x0f04
+#define MT6328_AUXADC_LBAT2_7 0x0f06
+#define MT6328_AUXADC_MDBG_0 0x0f08
+#define MT6328_AUXADC_MDBG_1 0x0f0a
+#define MT6328_AUXADC_MDBG_2 0x0f0c
+#define MT6328_AUXADC_MDRT_0 0x0f0e
+#define MT6328_AUXADC_MDRT_1 0x0f10
+#define MT6328_AUXADC_MDRT_2 0x0f12
+#define MT6328_ACCDET_CON0 0x0f14
+#define MT6328_ACCDET_CON1 0x0f16
+#define MT6328_ACCDET_CON2 0x0f18
+#define MT6328_ACCDET_CON3 0x0f1a
+#define MT6328_ACCDET_CON4 0x0f1c
+#define MT6328_ACCDET_CON5 0x0f1e
+#define MT6328_ACCDET_CON6 0x0f20
+#define MT6328_ACCDET_CON7 0x0f22
+#define MT6328_ACCDET_CON8 0x0f24
+#define MT6328_ACCDET_CON9 0x0f26
+#define MT6328_ACCDET_CON10 0x0f28
+#define MT6328_ACCDET_CON11 0x0f2a
+#define MT6328_ACCDET_CON12 0x0f2c
+#define MT6328_ACCDET_CON13 0x0f2e
+#define MT6328_ACCDET_CON14 0x0f30
+#define MT6328_ACCDET_CON15 0x0f32
+#define MT6328_ACCDET_CON16 0x0f34
+#define MT6328_ACCDET_CON17 0x0f36
+#define MT6328_ACCDET_CON18 0x0f38
+#define MT6328_ACCDET_CON19 0x0f3a
+#define MT6328_ACCDET_CON20 0x0f3c
+#define MT6328_ACCDET_CON21 0x0f3e
+#define MT6328_ACCDET_CON22 0x0f40
+#define MT6328_ACCDET_CON23 0x0f42
+#define MT6328_ACCDET_CON24 0x0f44
+#define MT6328_ACCDET_CON25 0x0f46
+#define MT6328_CHR_CON0 0x0f48
+#define MT6328_CHR_CON1 0x0f4a
+#define MT6328_CHR_CON2 0x0f4c
+#define MT6328_CHR_CON3 0x0f4e
+#define MT6328_CHR_CON4 0x0f50
+#define MT6328_CHR_CON5 0x0f52
+#define MT6328_CHR_CON6 0x0f54
+#define MT6328_CHR_CON7 0x0f56
+#define MT6328_CHR_CON8 0x0f58
+#define MT6328_CHR_CON9 0x0f5a
+#define MT6328_CHR_CON10 0x0f5c
+#define MT6328_CHR_CON11 0x0f5e
+#define MT6328_CHR_CON12 0x0f60
+#define MT6328_CHR_CON13 0x0f62
+#define MT6328_CHR_CON14 0x0f64
+#define MT6328_CHR_CON15 0x0f66
+#define MT6328_CHR_CON16 0x0f68
+#define MT6328_CHR_CON17 0x0f6a
+#define MT6328_CHR_CON18 0x0f6c
+#define MT6328_CHR_CON19 0x0f6e
+#define MT6328_CHR_CON20 0x0f70
+#define MT6328_CHR_CON21 0x0f72
+#define MT6328_CHR_CON22 0x0f74
+#define MT6328_CHR_CON23 0x0f76
+#define MT6328_CHR_CON24 0x0f78
+#define MT6328_CHR_CON25 0x0f7a
+#define MT6328_CHR_CON26 0x0f7c
+#define MT6328_CHR_CON27 0x0f7e
+#define MT6328_CHR_CON28 0x0f80
+#define MT6328_CHR_CON29 0x0f82
+#define MT6328_CHR_CON30 0x0f84
+#define MT6328_CHR_CON31 0x0f86
+#define MT6328_CHR_CON32 0x0f88
+#define MT6328_CHR_CON33 0x0f8a
+#define MT6328_CHR_CON34 0x0f8c
+#define MT6328_CHR_CON35 0x0f8e
+#define MT6328_CHR_CON36 0x0f90
+#define MT6328_CHR_CON37 0x0f92
+#define MT6328_CHR_CON38 0x0f94
+#define MT6328_CHR_CON39 0x0f96
+#define MT6328_CHR_CON40 0x0f98
+#define MT6328_CHR_CON41 0x0f9a
+#define MT6328_CHR_CON42 0x0f9c
+#define MT6328_BATON_CON0 0x0f9e
+#define MT6328_CHR_CON43 0x0fa0
+#define MT6328_EOSC_CALI_CON0 0x0faa
+#define MT6328_EOSC_CALI_CON1 0x0fac
+#define MT6328_VRTC_PWM_CON0 0x0fae
+
+#endif /* __MFD_MT6328_REGISTERS_H__ */
diff --git a/include/linux/mfd/mt6331/core.h b/include/linux/mfd/mt6331/core.h
new file mode 100644
index 000000000000..df8e6b1e4bc1
--- /dev/null
+++ b/include/linux/mfd/mt6331/core.h
@@ -0,0 +1,40 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2022 AngeloGioacchino Del Regno <angelogioacchino.delregno@collabora.com>
+ */
+
+#ifndef __MFD_MT6331_CORE_H__
+#define __MFD_MT6331_CORE_H__
+
+enum mt6331_irq_status_numbers {
+ MT6331_IRQ_STATUS_PWRKEY = 0,
+ MT6331_IRQ_STATUS_HOMEKEY,
+ MT6331_IRQ_STATUS_CHRDET,
+ MT6331_IRQ_STATUS_THR_H,
+ MT6331_IRQ_STATUS_THR_L,
+ MT6331_IRQ_STATUS_BAT_H,
+ MT6331_IRQ_STATUS_BAT_L,
+ MT6331_IRQ_STATUS_RTC,
+ MT6331_IRQ_STATUS_AUDIO,
+ MT6331_IRQ_STATUS_MAD,
+ MT6331_IRQ_STATUS_ACCDET,
+ MT6331_IRQ_STATUS_ACCDET_EINT,
+ MT6331_IRQ_STATUS_ACCDET_NEGV = 12,
+ MT6331_IRQ_STATUS_VDVFS11_OC = 16,
+ MT6331_IRQ_STATUS_VDVFS12_OC,
+ MT6331_IRQ_STATUS_VDVFS13_OC,
+ MT6331_IRQ_STATUS_VDVFS14_OC,
+ MT6331_IRQ_STATUS_GPU_OC,
+ MT6331_IRQ_STATUS_VCORE1_OC,
+ MT6331_IRQ_STATUS_VCORE2_OC,
+ MT6331_IRQ_STATUS_VIO18_OC,
+ MT6331_IRQ_STATUS_LDO_OC,
+ MT6331_IRQ_STATUS_NR,
+};
+
+#define MT6331_IRQ_CON0_BASE MT6331_IRQ_STATUS_PWRKEY
+#define MT6331_IRQ_CON0_BITS (MT6331_IRQ_STATUS_ACCDET_NEGV + 1)
+#define MT6331_IRQ_CON1_BASE MT6331_IRQ_STATUS_VDVFS11_OC
+#define MT6331_IRQ_CON1_BITS (MT6331_IRQ_STATUS_LDO_OC - MT6331_IRQ_STATUS_VDFS11_OC + 1)
+
+#endif /* __MFD_MT6331_CORE_H__ */
diff --git a/include/linux/mfd/mt6331/registers.h b/include/linux/mfd/mt6331/registers.h
new file mode 100644
index 000000000000..e2be6bccd1a7
--- /dev/null
+++ b/include/linux/mfd/mt6331/registers.h
@@ -0,0 +1,584 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2022 AngeloGioacchino Del Regno <angelogioacchino.delregno@collabora.com>
+ */
+
+#ifndef __MFD_MT6331_REGISTERS_H__
+#define __MFD_MT6331_REGISTERS_H__
+
+/* PMIC Registers */
+#define MT6331_STRUP_CON0 0x0
+#define MT6331_STRUP_CON2 0x2
+#define MT6331_STRUP_CON3 0x4
+#define MT6331_STRUP_CON4 0x6
+#define MT6331_STRUP_CON5 0x8
+#define MT6331_STRUP_CON6 0xA
+#define MT6331_STRUP_CON7 0xC
+#define MT6331_STRUP_CON8 0xE
+#define MT6331_STRUP_CON9 0x10
+#define MT6331_STRUP_CON10 0x12
+#define MT6331_STRUP_CON11 0x14
+#define MT6331_STRUP_CON12 0x16
+#define MT6331_STRUP_CON13 0x18
+#define MT6331_STRUP_CON14 0x1A
+#define MT6331_STRUP_CON15 0x1C
+#define MT6331_STRUP_CON16 0x1E
+#define MT6331_STRUP_CON17 0x20
+#define MT6331_STRUP_CON18 0x22
+#define MT6331_HWCID 0x100
+#define MT6331_SWCID 0x102
+#define MT6331_EXT_PMIC_STATUS 0x104
+#define MT6331_TOP_CON 0x106
+#define MT6331_TEST_OUT 0x108
+#define MT6331_TEST_CON0 0x10A
+#define MT6331_TEST_CON1 0x10C
+#define MT6331_TESTMODE_SW 0x10E
+#define MT6331_EN_STATUS0 0x110
+#define MT6331_EN_STATUS1 0x112
+#define MT6331_EN_STATUS2 0x114
+#define MT6331_OCSTATUS0 0x116
+#define MT6331_OCSTATUS1 0x118
+#define MT6331_OCSTATUS2 0x11A
+#define MT6331_PGSTATUS 0x11C
+#define MT6331_TOPSTATUS 0x11E
+#define MT6331_TDSEL_CON 0x120
+#define MT6331_RDSEL_CON 0x122
+#define MT6331_SMT_CON0 0x124
+#define MT6331_SMT_CON1 0x126
+#define MT6331_SMT_CON2 0x128
+#define MT6331_DRV_CON0 0x12A
+#define MT6331_DRV_CON1 0x12C
+#define MT6331_DRV_CON2 0x12E
+#define MT6331_DRV_CON3 0x130
+#define MT6331_TOP_STATUS 0x132
+#define MT6331_TOP_STATUS_SET 0x134
+#define MT6331_TOP_STATUS_CLR 0x136
+#define MT6331_TOP_CKPDN_CON0 0x138
+#define MT6331_TOP_CKPDN_CON0_SET 0x13A
+#define MT6331_TOP_CKPDN_CON0_CLR 0x13C
+#define MT6331_TOP_CKPDN_CON1 0x13E
+#define MT6331_TOP_CKPDN_CON1_SET 0x140
+#define MT6331_TOP_CKPDN_CON1_CLR 0x142
+#define MT6331_TOP_CKPDN_CON2 0x144
+#define MT6331_TOP_CKPDN_CON2_SET 0x146
+#define MT6331_TOP_CKPDN_CON2_CLR 0x148
+#define MT6331_TOP_CKSEL_CON 0x14A
+#define MT6331_TOP_CKSEL_CON_SET 0x14C
+#define MT6331_TOP_CKSEL_CON_CLR 0x14E
+#define MT6331_TOP_CKHWEN_CON 0x150
+#define MT6331_TOP_CKHWEN_CON_SET 0x152
+#define MT6331_TOP_CKHWEN_CON_CLR 0x154
+#define MT6331_TOP_CKTST_CON0 0x156
+#define MT6331_TOP_CKTST_CON1 0x158
+#define MT6331_TOP_CLKSQ 0x15A
+#define MT6331_TOP_CLKSQ_SET 0x15C
+#define MT6331_TOP_CLKSQ_CLR 0x15E
+#define MT6331_TOP_RST_CON 0x160
+#define MT6331_TOP_RST_CON_SET 0x162
+#define MT6331_TOP_RST_CON_CLR 0x164
+#define MT6331_TOP_RST_MISC 0x166
+#define MT6331_TOP_RST_MISC_SET 0x168
+#define MT6331_TOP_RST_MISC_CLR 0x16A
+#define MT6331_INT_CON0 0x16C
+#define MT6331_INT_CON0_SET 0x16E
+#define MT6331_INT_CON0_CLR 0x170
+#define MT6331_INT_CON1 0x172
+#define MT6331_INT_CON1_SET 0x174
+#define MT6331_INT_CON1_CLR 0x176
+#define MT6331_INT_MISC_CON 0x178
+#define MT6331_INT_MISC_CON_SET 0x17A
+#define MT6331_INT_MISC_CON_CLR 0x17C
+#define MT6331_INT_STATUS_CON0 0x17E
+#define MT6331_INT_STATUS_CON1 0x180
+#define MT6331_OC_GEAR_0 0x182
+#define MT6331_FQMTR_CON0 0x184
+#define MT6331_FQMTR_CON1 0x186
+#define MT6331_FQMTR_CON2 0x188
+#define MT6331_RG_SPI_CON 0x18A
+#define MT6331_DEW_DIO_EN 0x18C
+#define MT6331_DEW_READ_TEST 0x18E
+#define MT6331_DEW_WRITE_TEST 0x190
+#define MT6331_DEW_CRC_SWRST 0x192
+#define MT6331_DEW_CRC_EN 0x194
+#define MT6331_DEW_CRC_VAL 0x196
+#define MT6331_DEW_DBG_MON_SEL 0x198
+#define MT6331_DEW_CIPHER_KEY_SEL 0x19A
+#define MT6331_DEW_CIPHER_IV_SEL 0x19C
+#define MT6331_DEW_CIPHER_EN 0x19E
+#define MT6331_DEW_CIPHER_RDY 0x1A0
+#define MT6331_DEW_CIPHER_MODE 0x1A2
+#define MT6331_DEW_CIPHER_SWRST 0x1A4
+#define MT6331_DEW_RDDMY_NO 0x1A6
+#define MT6331_INT_TYPE_CON0 0x1A8
+#define MT6331_INT_TYPE_CON0_SET 0x1AA
+#define MT6331_INT_TYPE_CON0_CLR 0x1AC
+#define MT6331_INT_TYPE_CON1 0x1AE
+#define MT6331_INT_TYPE_CON1_SET 0x1B0
+#define MT6331_INT_TYPE_CON1_CLR 0x1B2
+#define MT6331_INT_STA 0x1B4
+#define MT6331_BUCK_ALL_CON0 0x200
+#define MT6331_BUCK_ALL_CON1 0x202
+#define MT6331_BUCK_ALL_CON2 0x204
+#define MT6331_BUCK_ALL_CON3 0x206
+#define MT6331_BUCK_ALL_CON4 0x208
+#define MT6331_BUCK_ALL_CON5 0x20A
+#define MT6331_BUCK_ALL_CON6 0x20C
+#define MT6331_BUCK_ALL_CON7 0x20E
+#define MT6331_BUCK_ALL_CON8 0x210
+#define MT6331_BUCK_ALL_CON9 0x212
+#define MT6331_BUCK_ALL_CON10 0x214
+#define MT6331_BUCK_ALL_CON11 0x216
+#define MT6331_BUCK_ALL_CON12 0x218
+#define MT6331_BUCK_ALL_CON13 0x21A
+#define MT6331_BUCK_ALL_CON14 0x21C
+#define MT6331_BUCK_ALL_CON15 0x21E
+#define MT6331_BUCK_ALL_CON16 0x220
+#define MT6331_BUCK_ALL_CON17 0x222
+#define MT6331_BUCK_ALL_CON18 0x224
+#define MT6331_BUCK_ALL_CON19 0x226
+#define MT6331_BUCK_ALL_CON20 0x228
+#define MT6331_BUCK_ALL_CON21 0x22A
+#define MT6331_BUCK_ALL_CON22 0x22C
+#define MT6331_BUCK_ALL_CON23 0x22E
+#define MT6331_BUCK_ALL_CON24 0x230
+#define MT6331_BUCK_ALL_CON25 0x232
+#define MT6331_BUCK_ALL_CON26 0x234
+#define MT6331_VDVFS11_CON0 0x236
+#define MT6331_VDVFS11_CON1 0x238
+#define MT6331_VDVFS11_CON2 0x23A
+#define MT6331_VDVFS11_CON3 0x23C
+#define MT6331_VDVFS11_CON4 0x23E
+#define MT6331_VDVFS11_CON5 0x240
+#define MT6331_VDVFS11_CON6 0x242
+#define MT6331_VDVFS11_CON7 0x244
+#define MT6331_VDVFS11_CON8 0x246
+#define MT6331_VDVFS11_CON9 0x248
+#define MT6331_VDVFS11_CON10 0x24A
+#define MT6331_VDVFS11_CON11 0x24C
+#define MT6331_VDVFS11_CON12 0x24E
+#define MT6331_VDVFS11_CON13 0x250
+#define MT6331_VDVFS11_CON14 0x252
+#define MT6331_VDVFS11_CON18 0x25A
+#define MT6331_VDVFS11_CON19 0x25C
+#define MT6331_VDVFS11_CON20 0x25E
+#define MT6331_VDVFS11_CON21 0x260
+#define MT6331_VDVFS11_CON22 0x262
+#define MT6331_VDVFS11_CON23 0x264
+#define MT6331_VDVFS11_CON24 0x266
+#define MT6331_VDVFS11_CON25 0x268
+#define MT6331_VDVFS11_CON26 0x26A
+#define MT6331_VDVFS11_CON27 0x26C
+#define MT6331_VDVFS12_CON0 0x26E
+#define MT6331_VDVFS12_CON1 0x270
+#define MT6331_VDVFS12_CON2 0x272
+#define MT6331_VDVFS12_CON3 0x274
+#define MT6331_VDVFS12_CON4 0x276
+#define MT6331_VDVFS12_CON5 0x278
+#define MT6331_VDVFS12_CON6 0x27A
+#define MT6331_VDVFS12_CON7 0x27C
+#define MT6331_VDVFS12_CON8 0x27E
+#define MT6331_VDVFS12_CON9 0x280
+#define MT6331_VDVFS12_CON10 0x282
+#define MT6331_VDVFS12_CON11 0x284
+#define MT6331_VDVFS12_CON12 0x286
+#define MT6331_VDVFS12_CON13 0x288
+#define MT6331_VDVFS12_CON14 0x28A
+#define MT6331_VDVFS12_CON18 0x292
+#define MT6331_VDVFS12_CON19 0x294
+#define MT6331_VDVFS12_CON20 0x296
+#define MT6331_VDVFS13_CON0 0x298
+#define MT6331_VDVFS13_CON1 0x29A
+#define MT6331_VDVFS13_CON2 0x29C
+#define MT6331_VDVFS13_CON3 0x29E
+#define MT6331_VDVFS13_CON4 0x2A0
+#define MT6331_VDVFS13_CON5 0x2A2
+#define MT6331_VDVFS13_CON6 0x2A4
+#define MT6331_VDVFS13_CON7 0x2A6
+#define MT6331_VDVFS13_CON8 0x2A8
+#define MT6331_VDVFS13_CON9 0x2AA
+#define MT6331_VDVFS13_CON10 0x2AC
+#define MT6331_VDVFS13_CON11 0x2AE
+#define MT6331_VDVFS13_CON12 0x2B0
+#define MT6331_VDVFS13_CON13 0x2B2
+#define MT6331_VDVFS13_CON14 0x2B4
+#define MT6331_VDVFS13_CON18 0x2BC
+#define MT6331_VDVFS13_CON19 0x2BE
+#define MT6331_VDVFS13_CON20 0x2C0
+#define MT6331_VDVFS14_CON0 0x2C2
+#define MT6331_VDVFS14_CON1 0x2C4
+#define MT6331_VDVFS14_CON2 0x2C6
+#define MT6331_VDVFS14_CON3 0x2C8
+#define MT6331_VDVFS14_CON4 0x2CA
+#define MT6331_VDVFS14_CON5 0x2CC
+#define MT6331_VDVFS14_CON6 0x2CE
+#define MT6331_VDVFS14_CON7 0x2D0
+#define MT6331_VDVFS14_CON8 0x2D2
+#define MT6331_VDVFS14_CON9 0x2D4
+#define MT6331_VDVFS14_CON10 0x2D6
+#define MT6331_VDVFS14_CON11 0x2D8
+#define MT6331_VDVFS14_CON12 0x2DA
+#define MT6331_VDVFS14_CON13 0x2DC
+#define MT6331_VDVFS14_CON14 0x2DE
+#define MT6331_VDVFS14_CON18 0x2E6
+#define MT6331_VDVFS14_CON19 0x2E8
+#define MT6331_VDVFS14_CON20 0x2EA
+#define MT6331_VGPU_CON0 0x300
+#define MT6331_VGPU_CON1 0x302
+#define MT6331_VGPU_CON2 0x304
+#define MT6331_VGPU_CON3 0x306
+#define MT6331_VGPU_CON4 0x308
+#define MT6331_VGPU_CON5 0x30A
+#define MT6331_VGPU_CON6 0x30C
+#define MT6331_VGPU_CON7 0x30E
+#define MT6331_VGPU_CON8 0x310
+#define MT6331_VGPU_CON9 0x312
+#define MT6331_VGPU_CON10 0x314
+#define MT6331_VGPU_CON11 0x316
+#define MT6331_VGPU_CON12 0x318
+#define MT6331_VGPU_CON13 0x31A
+#define MT6331_VGPU_CON14 0x31C
+#define MT6331_VGPU_CON15 0x31E
+#define MT6331_VGPU_CON16 0x320
+#define MT6331_VGPU_CON17 0x322
+#define MT6331_VGPU_CON18 0x324
+#define MT6331_VGPU_CON19 0x326
+#define MT6331_VGPU_CON20 0x328
+#define MT6331_VCORE1_CON0 0x32A
+#define MT6331_VCORE1_CON1 0x32C
+#define MT6331_VCORE1_CON2 0x32E
+#define MT6331_VCORE1_CON3 0x330
+#define MT6331_VCORE1_CON4 0x332
+#define MT6331_VCORE1_CON5 0x334
+#define MT6331_VCORE1_CON6 0x336
+#define MT6331_VCORE1_CON7 0x338
+#define MT6331_VCORE1_CON8 0x33A
+#define MT6331_VCORE1_CON9 0x33C
+#define MT6331_VCORE1_CON10 0x33E
+#define MT6331_VCORE1_CON11 0x340
+#define MT6331_VCORE1_CON12 0x342
+#define MT6331_VCORE1_CON13 0x344
+#define MT6331_VCORE1_CON14 0x346
+#define MT6331_VCORE1_CON15 0x348
+#define MT6331_VCORE1_CON16 0x34A
+#define MT6331_VCORE1_CON17 0x34C
+#define MT6331_VCORE1_CON18 0x34E
+#define MT6331_VCORE1_CON19 0x350
+#define MT6331_VCORE1_CON20 0x352
+#define MT6331_VCORE2_CON0 0x354
+#define MT6331_VCORE2_CON1 0x356
+#define MT6331_VCORE2_CON2 0x358
+#define MT6331_VCORE2_CON3 0x35A
+#define MT6331_VCORE2_CON4 0x35C
+#define MT6331_VCORE2_CON5 0x35E
+#define MT6331_VCORE2_CON6 0x360
+#define MT6331_VCORE2_CON7 0x362
+#define MT6331_VCORE2_CON8 0x364
+#define MT6331_VCORE2_CON9 0x366
+#define MT6331_VCORE2_CON10 0x368
+#define MT6331_VCORE2_CON11 0x36A
+#define MT6331_VCORE2_CON12 0x36C
+#define MT6331_VCORE2_CON13 0x36E
+#define MT6331_VCORE2_CON14 0x370
+#define MT6331_VCORE2_CON15 0x372
+#define MT6331_VCORE2_CON16 0x374
+#define MT6331_VCORE2_CON17 0x376
+#define MT6331_VCORE2_CON18 0x378
+#define MT6331_VCORE2_CON19 0x37A
+#define MT6331_VCORE2_CON20 0x37C
+#define MT6331_VCORE2_CON21 0x37E
+#define MT6331_VIO18_CON0 0x380
+#define MT6331_VIO18_CON1 0x382
+#define MT6331_VIO18_CON2 0x384
+#define MT6331_VIO18_CON3 0x386
+#define MT6331_VIO18_CON4 0x388
+#define MT6331_VIO18_CON5 0x38A
+#define MT6331_VIO18_CON6 0x38C
+#define MT6331_VIO18_CON7 0x38E
+#define MT6331_VIO18_CON8 0x390
+#define MT6331_VIO18_CON9 0x392
+#define MT6331_VIO18_CON10 0x394
+#define MT6331_VIO18_CON11 0x396
+#define MT6331_VIO18_CON12 0x398
+#define MT6331_VIO18_CON13 0x39A
+#define MT6331_VIO18_CON14 0x39C
+#define MT6331_VIO18_CON15 0x39E
+#define MT6331_VIO18_CON16 0x3A0
+#define MT6331_VIO18_CON17 0x3A2
+#define MT6331_VIO18_CON18 0x3A4
+#define MT6331_VIO18_CON19 0x3A6
+#define MT6331_VIO18_CON20 0x3A8
+#define MT6331_BUCK_K_CON0 0x3AA
+#define MT6331_BUCK_K_CON1 0x3AC
+#define MT6331_BUCK_K_CON2 0x3AE
+#define MT6331_BUCK_K_CON3 0x3B0
+#define MT6331_ZCD_CON0 0x400
+#define MT6331_ZCD_CON1 0x402
+#define MT6331_ZCD_CON2 0x404
+#define MT6331_ZCD_CON3 0x406
+#define MT6331_ZCD_CON4 0x408
+#define MT6331_ZCD_CON5 0x40A
+#define MT6331_ISINK0_CON0 0x40C
+#define MT6331_ISINK0_CON1 0x40E
+#define MT6331_ISINK0_CON2 0x410
+#define MT6331_ISINK0_CON3 0x412
+#define MT6331_ISINK0_CON4 0x414
+#define MT6331_ISINK1_CON0 0x416
+#define MT6331_ISINK1_CON1 0x418
+#define MT6331_ISINK1_CON2 0x41A
+#define MT6331_ISINK1_CON3 0x41C
+#define MT6331_ISINK1_CON4 0x41E
+#define MT6331_ISINK2_CON0 0x420
+#define MT6331_ISINK2_CON1 0x422
+#define MT6331_ISINK2_CON2 0x424
+#define MT6331_ISINK2_CON3 0x426
+#define MT6331_ISINK2_CON4 0x428
+#define MT6331_ISINK3_CON0 0x42A
+#define MT6331_ISINK3_CON1 0x42C
+#define MT6331_ISINK3_CON2 0x42E
+#define MT6331_ISINK3_CON3 0x430
+#define MT6331_ISINK3_CON4 0x432
+#define MT6331_ISINK_ANA0 0x434
+#define MT6331_ISINK_ANA1 0x436
+#define MT6331_ISINK_PHASE_DLY 0x438
+#define MT6331_ISINK_EN_CTRL 0x43A
+#define MT6331_ANALDO_CON0 0x500
+#define MT6331_ANALDO_CON1 0x502
+#define MT6331_ANALDO_CON2 0x504
+#define MT6331_ANALDO_CON3 0x506
+#define MT6331_ANALDO_CON4 0x508
+#define MT6331_ANALDO_CON5 0x50A
+#define MT6331_ANALDO_CON6 0x50C
+#define MT6331_ANALDO_CON7 0x50E
+#define MT6331_ANALDO_CON8 0x510
+#define MT6331_ANALDO_CON9 0x512
+#define MT6331_ANALDO_CON10 0x514
+#define MT6331_ANALDO_CON11 0x516
+#define MT6331_ANALDO_CON12 0x518
+#define MT6331_ANALDO_CON13 0x51A
+#define MT6331_SYSLDO_CON0 0x51C
+#define MT6331_SYSLDO_CON1 0x51E
+#define MT6331_SYSLDO_CON2 0x520
+#define MT6331_SYSLDO_CON3 0x522
+#define MT6331_SYSLDO_CON4 0x524
+#define MT6331_SYSLDO_CON5 0x526
+#define MT6331_SYSLDO_CON6 0x528
+#define MT6331_SYSLDO_CON7 0x52A
+#define MT6331_SYSLDO_CON8 0x52C
+#define MT6331_SYSLDO_CON9 0x52E
+#define MT6331_SYSLDO_CON10 0x530
+#define MT6331_SYSLDO_CON11 0x532
+#define MT6331_SYSLDO_CON12 0x534
+#define MT6331_SYSLDO_CON13 0x536
+#define MT6331_SYSLDO_CON14 0x538
+#define MT6331_SYSLDO_CON15 0x53A
+#define MT6331_SYSLDO_CON16 0x53C
+#define MT6331_SYSLDO_CON17 0x53E
+#define MT6331_SYSLDO_CON18 0x540
+#define MT6331_SYSLDO_CON19 0x542
+#define MT6331_SYSLDO_CON20 0x544
+#define MT6331_SYSLDO_CON21 0x546
+#define MT6331_DIGLDO_CON0 0x548
+#define MT6331_DIGLDO_CON1 0x54A
+#define MT6331_DIGLDO_CON2 0x54C
+#define MT6331_DIGLDO_CON3 0x54E
+#define MT6331_DIGLDO_CON4 0x550
+#define MT6331_DIGLDO_CON5 0x552
+#define MT6331_DIGLDO_CON6 0x554
+#define MT6331_DIGLDO_CON7 0x556
+#define MT6331_DIGLDO_CON8 0x558
+#define MT6331_DIGLDO_CON9 0x55A
+#define MT6331_DIGLDO_CON10 0x55C
+#define MT6331_DIGLDO_CON11 0x55E
+#define MT6331_DIGLDO_CON12 0x560
+#define MT6331_DIGLDO_CON13 0x562
+#define MT6331_DIGLDO_CON14 0x564
+#define MT6331_DIGLDO_CON15 0x566
+#define MT6331_DIGLDO_CON16 0x568
+#define MT6331_DIGLDO_CON17 0x56A
+#define MT6331_DIGLDO_CON18 0x56C
+#define MT6331_DIGLDO_CON19 0x56E
+#define MT6331_DIGLDO_CON20 0x570
+#define MT6331_DIGLDO_CON21 0x572
+#define MT6331_DIGLDO_CON22 0x574
+#define MT6331_DIGLDO_CON23 0x576
+#define MT6331_DIGLDO_CON24 0x578
+#define MT6331_DIGLDO_CON25 0x57A
+#define MT6331_DIGLDO_CON26 0x57C
+#define MT6331_DIGLDO_CON27 0x57E
+#define MT6331_DIGLDO_CON28 0x580
+#define MT6331_OTP_CON0 0x600
+#define MT6331_OTP_CON1 0x602
+#define MT6331_OTP_CON2 0x604
+#define MT6331_OTP_CON3 0x606
+#define MT6331_OTP_CON4 0x608
+#define MT6331_OTP_CON5 0x60A
+#define MT6331_OTP_CON6 0x60C
+#define MT6331_OTP_CON7 0x60E
+#define MT6331_OTP_CON8 0x610
+#define MT6331_OTP_CON9 0x612
+#define MT6331_OTP_CON10 0x614
+#define MT6331_OTP_CON11 0x616
+#define MT6331_OTP_CON12 0x618
+#define MT6331_OTP_CON13 0x61A
+#define MT6331_OTP_CON14 0x61C
+#define MT6331_OTP_DOUT_0_15 0x61E
+#define MT6331_OTP_DOUT_16_31 0x620
+#define MT6331_OTP_DOUT_32_47 0x622
+#define MT6331_OTP_DOUT_48_63 0x624
+#define MT6331_OTP_DOUT_64_79 0x626
+#define MT6331_OTP_DOUT_80_95 0x628
+#define MT6331_OTP_DOUT_96_111 0x62A
+#define MT6331_OTP_DOUT_112_127 0x62C
+#define MT6331_OTP_DOUT_128_143 0x62E
+#define MT6331_OTP_DOUT_144_159 0x630
+#define MT6331_OTP_DOUT_160_175 0x632
+#define MT6331_OTP_DOUT_176_191 0x634
+#define MT6331_OTP_DOUT_192_207 0x636
+#define MT6331_OTP_DOUT_208_223 0x638
+#define MT6331_OTP_DOUT_224_239 0x63A
+#define MT6331_OTP_DOUT_240_255 0x63C
+#define MT6331_OTP_VAL_0_15 0x63E
+#define MT6331_OTP_VAL_16_31 0x640
+#define MT6331_OTP_VAL_32_47 0x642
+#define MT6331_OTP_VAL_48_63 0x644
+#define MT6331_OTP_VAL_64_79 0x646
+#define MT6331_OTP_VAL_80_95 0x648
+#define MT6331_OTP_VAL_96_111 0x64A
+#define MT6331_OTP_VAL_112_127 0x64C
+#define MT6331_OTP_VAL_128_143 0x64E
+#define MT6331_OTP_VAL_144_159 0x650
+#define MT6331_OTP_VAL_160_175 0x652
+#define MT6331_OTP_VAL_176_191 0x654
+#define MT6331_OTP_VAL_192_207 0x656
+#define MT6331_OTP_VAL_208_223 0x658
+#define MT6331_OTP_VAL_224_239 0x65A
+#define MT6331_OTP_VAL_240_255 0x65C
+#define MT6331_RTC_MIX_CON0 0x65E
+#define MT6331_RTC_MIX_CON1 0x660
+#define MT6331_AUDDAC_CFG0 0x662
+#define MT6331_AUDBUF_CFG0 0x664
+#define MT6331_AUDBUF_CFG1 0x666
+#define MT6331_AUDBUF_CFG2 0x668
+#define MT6331_AUDBUF_CFG3 0x66A
+#define MT6331_AUDBUF_CFG4 0x66C
+#define MT6331_AUDBUF_CFG5 0x66E
+#define MT6331_AUDBUF_CFG6 0x670
+#define MT6331_AUDBUF_CFG7 0x672
+#define MT6331_AUDBUF_CFG8 0x674
+#define MT6331_IBIASDIST_CFG0 0x676
+#define MT6331_AUDCLKGEN_CFG0 0x678
+#define MT6331_AUDLDO_CFG0 0x67A
+#define MT6331_AUDDCDC_CFG0 0x67C
+#define MT6331_AUDDCDC_CFG1 0x67E
+#define MT6331_AUDNVREGGLB_CFG0 0x680
+#define MT6331_AUD_NCP0 0x682
+#define MT6331_AUD_ZCD_CFG0 0x684
+#define MT6331_AUDPREAMP_CFG0 0x686
+#define MT6331_AUDPREAMP_CFG1 0x688
+#define MT6331_AUDPREAMP_CFG2 0x68A
+#define MT6331_AUDADC_CFG0 0x68C
+#define MT6331_AUDADC_CFG1 0x68E
+#define MT6331_AUDADC_CFG2 0x690
+#define MT6331_AUDADC_CFG3 0x692
+#define MT6331_AUDADC_CFG4 0x694
+#define MT6331_AUDADC_CFG5 0x696
+#define MT6331_AUDDIGMI_CFG0 0x698
+#define MT6331_AUDDIGMI_CFG1 0x69A
+#define MT6331_AUDMICBIAS_CFG0 0x69C
+#define MT6331_AUDMICBIAS_CFG1 0x69E
+#define MT6331_AUDENCSPARE_CFG0 0x6A0
+#define MT6331_AUDPREAMPGAIN_CFG0 0x6A2
+#define MT6331_AUDMADPLL_CFG0 0x6A4
+#define MT6331_AUDMADPLL_CFG1 0x6A6
+#define MT6331_AUDMADPLL_CFG2 0x6A8
+#define MT6331_AUDLDO_NVREG_CFG0 0x6AA
+#define MT6331_AUDLDO_NVREG_CFG1 0x6AC
+#define MT6331_AUDLDO_NVREG_CFG2 0x6AE
+#define MT6331_AUXADC_ADC0 0x700
+#define MT6331_AUXADC_ADC1 0x702
+#define MT6331_AUXADC_ADC2 0x704
+#define MT6331_AUXADC_ADC3 0x706
+#define MT6331_AUXADC_ADC4 0x708
+#define MT6331_AUXADC_ADC5 0x70A
+#define MT6331_AUXADC_ADC6 0x70C
+#define MT6331_AUXADC_ADC7 0x70E
+#define MT6331_AUXADC_ADC8 0x710
+#define MT6331_AUXADC_ADC9 0x712
+#define MT6331_AUXADC_ADC10 0x714
+#define MT6331_AUXADC_ADC11 0x716
+#define MT6331_AUXADC_ADC12 0x718
+#define MT6331_AUXADC_ADC13 0x71A
+#define MT6331_AUXADC_ADC14 0x71C
+#define MT6331_AUXADC_ADC15 0x71E
+#define MT6331_AUXADC_ADC16 0x720
+#define MT6331_AUXADC_ADC17 0x722
+#define MT6331_AUXADC_ADC18 0x724
+#define MT6331_AUXADC_ADC19 0x726
+#define MT6331_AUXADC_STA0 0x728
+#define MT6331_AUXADC_STA1 0x72A
+#define MT6331_AUXADC_RQST0 0x72C
+#define MT6331_AUXADC_RQST0_SET 0x72E
+#define MT6331_AUXADC_RQST0_CLR 0x730
+#define MT6331_AUXADC_RQST1 0x732
+#define MT6331_AUXADC_RQST1_SET 0x734
+#define MT6331_AUXADC_RQST1_CLR 0x736
+#define MT6331_AUXADC_CON0 0x738
+#define MT6331_AUXADC_CON1 0x73A
+#define MT6331_AUXADC_CON2 0x73C
+#define MT6331_AUXADC_CON3 0x73E
+#define MT6331_AUXADC_CON4 0x740
+#define MT6331_AUXADC_CON5 0x742
+#define MT6331_AUXADC_CON6 0x744
+#define MT6331_AUXADC_CON7 0x746
+#define MT6331_AUXADC_CON8 0x748
+#define MT6331_AUXADC_CON9 0x74A
+#define MT6331_AUXADC_CON10 0x74C
+#define MT6331_AUXADC_CON11 0x74E
+#define MT6331_AUXADC_CON12 0x750
+#define MT6331_AUXADC_CON13 0x752
+#define MT6331_AUXADC_CON14 0x754
+#define MT6331_AUXADC_CON15 0x756
+#define MT6331_AUXADC_CON16 0x758
+#define MT6331_AUXADC_CON17 0x75A
+#define MT6331_AUXADC_CON18 0x75C
+#define MT6331_AUXADC_CON19 0x75E
+#define MT6331_AUXADC_CON20 0x760
+#define MT6331_AUXADC_CON21 0x762
+#define MT6331_AUXADC_CON22 0x764
+#define MT6331_AUXADC_CON23 0x766
+#define MT6331_AUXADC_CON24 0x768
+#define MT6331_AUXADC_CON25 0x76A
+#define MT6331_AUXADC_CON26 0x76C
+#define MT6331_AUXADC_CON27 0x76E
+#define MT6331_AUXADC_CON28 0x770
+#define MT6331_AUXADC_CON29 0x772
+#define MT6331_AUXADC_CON30 0x774
+#define MT6331_AUXADC_CON31 0x776
+#define MT6331_AUXADC_CON32 0x778
+#define MT6331_ACCDET_CON0 0x77A
+#define MT6331_ACCDET_CON1 0x77C
+#define MT6331_ACCDET_CON2 0x77E
+#define MT6331_ACCDET_CON3 0x780
+#define MT6331_ACCDET_CON4 0x782
+#define MT6331_ACCDET_CON5 0x784
+#define MT6331_ACCDET_CON6 0x786
+#define MT6331_ACCDET_CON7 0x788
+#define MT6331_ACCDET_CON8 0x78A
+#define MT6331_ACCDET_CON9 0x78C
+#define MT6331_ACCDET_CON10 0x78E
+#define MT6331_ACCDET_CON11 0x790
+#define MT6331_ACCDET_CON12 0x792
+#define MT6331_ACCDET_CON13 0x794
+#define MT6331_ACCDET_CON14 0x796
+#define MT6331_ACCDET_CON15 0x798
+#define MT6331_ACCDET_CON16 0x79A
+#define MT6331_ACCDET_CON17 0x79C
+#define MT6331_ACCDET_CON18 0x79E
+#define MT6331_ACCDET_CON19 0x7A0
+#define MT6331_ACCDET_CON20 0x7A2
+#define MT6331_ACCDET_CON21 0x7A4
+#define MT6331_ACCDET_CON22 0x7A6
+#define MT6331_ACCDET_CON23 0x7A8
+#define MT6331_ACCDET_CON24 0x7AA
+
+#endif /* __MFD_MT6331_REGISTERS_H__ */
diff --git a/include/linux/mfd/mt6332/core.h b/include/linux/mfd/mt6332/core.h
new file mode 100644
index 000000000000..cd6013eb82d9
--- /dev/null
+++ b/include/linux/mfd/mt6332/core.h
@@ -0,0 +1,65 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2022 AngeloGioacchino Del Regno <angelogioacchino.delregno@collabora.com>
+ */
+
+#ifndef __MFD_MT6332_CORE_H__
+#define __MFD_MT6332_CORE_H__
+
+enum mt6332_irq_status_numbers {
+ MT6332_IRQ_STATUS_CHR_COMPLETE = 0,
+ MT6332_IRQ_STATUS_THERMAL_SD,
+ MT6332_IRQ_STATUS_THERMAL_REG_IN,
+ MT6332_IRQ_STATUS_THERMAL_REG_OUT,
+ MT6332_IRQ_STATUS_OTG_OC,
+ MT6332_IRQ_STATUS_CHR_OC,
+ MT6332_IRQ_STATUS_OTG_THERMAL,
+ MT6332_IRQ_STATUS_CHRIN_SHORT,
+ MT6332_IRQ_STATUS_DRVCDT_SHORT,
+ MT6332_IRQ_STATUS_PLUG_IN_FLASH,
+ MT6332_IRQ_STATUS_CHRWDT_FLAG,
+ MT6332_IRQ_STATUS_FLASH_EN_TIMEOUT,
+ MT6332_IRQ_STATUS_FLASH_VLED1_SHORT,
+ MT6332_IRQ_STATUS_FLASH_VLED1_OPEN = 13,
+ MT6332_IRQ_STATUS_OV = 16,
+ MT6332_IRQ_STATUS_BVALID_DET,
+ MT6332_IRQ_STATUS_VBATON_UNDET,
+ MT6332_IRQ_STATUS_CHR_PLUG_IN,
+ MT6332_IRQ_STATUS_CHR_PLUG_OUT,
+ MT6332_IRQ_STATUS_BC11_TIMEOUT,
+ MT6332_IRQ_STATUS_FLASH_VLED2_SHORT,
+ MT6332_IRQ_STATUS_FLASH_VLED2_OPEN = 23,
+ MT6332_IRQ_STATUS_THR_H = 32,
+ MT6332_IRQ_STATUS_THR_L,
+ MT6332_IRQ_STATUS_BAT_H,
+ MT6332_IRQ_STATUS_BAT_L,
+ MT6332_IRQ_STATUS_M3_H,
+ MT6332_IRQ_STATUS_M3_L,
+ MT6332_IRQ_STATUS_FG_BAT_H,
+ MT6332_IRQ_STATUS_FG_BAT_L,
+ MT6332_IRQ_STATUS_FG_CUR_H,
+ MT6332_IRQ_STATUS_FG_CUR_L,
+ MT6332_IRQ_STATUS_SPKL_D,
+ MT6332_IRQ_STATUS_SPKL_AB,
+ MT6332_IRQ_STATUS_BIF,
+ MT6332_IRQ_STATUS_VWLED_OC = 45,
+ MT6332_IRQ_STATUS_VDRAM_OC = 48,
+ MT6332_IRQ_STATUS_VDVFS2_OC,
+ MT6332_IRQ_STATUS_VRF1_OC,
+ MT6332_IRQ_STATUS_VRF2_OC,
+ MT6332_IRQ_STATUS_VPA_OC,
+ MT6332_IRQ_STATUS_VSBST_OC,
+ MT6332_IRQ_STATUS_LDO_OC,
+ MT6332_IRQ_STATUS_NR,
+};
+
+#define MT6332_IRQ_CON0_BASE MT6332_IRQ_STATUS_CHR_COMPLETE
+#define MT6332_IRQ_CON0_BITS (MT6332_IRQ_STATUS_FLASH_VLED1_OPEN + 1)
+#define MT6332_IRQ_CON1_BASE MT6332_IRQ_STATUS_OV
+#define MT6332_IRQ_CON1_BITS (MT6332_IRQ_STATUS_FLASH_VLED2_OPEN - MT6332_IRQ_STATUS_OV + 1)
+#define MT6332_IRQ_CON2_BASE MT6332_IRQ_STATUS_THR_H
+#define MT6332_IRQ_CON2_BITS (MT6332_IRQ_STATUS_VWLED_OC - MT6332_IRQ_STATUS_THR_H + 1)
+#define MT6332_IRQ_CON3_BASE MT6332_IRQ_STATUS_VDRAM_OC
+#define MT6332_IRQ_CON3_BITS (MT6332_IRQ_STATUS_LDO_OC - MT6332_IRQ_STATUS_VDRAM_OC + 1)
+
+#endif /* __MFD_MT6332_CORE_H__ */
diff --git a/include/linux/mfd/mt6332/registers.h b/include/linux/mfd/mt6332/registers.h
new file mode 100644
index 000000000000..65e0b86fceac
--- /dev/null
+++ b/include/linux/mfd/mt6332/registers.h
@@ -0,0 +1,642 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2022 AngeloGioacchino Del Regno <angelogioacchino.delregno@collabora.com>
+ */
+
+#ifndef __MFD_MT6332_REGISTERS_H__
+#define __MFD_MT6332_REGISTERS_H__
+
+/* PMIC Registers */
+#define MT6332_HWCID 0x8000
+#define MT6332_SWCID 0x8002
+#define MT6332_TOP_CON 0x8004
+#define MT6332_DDR_VREF_AP_CON 0x8006
+#define MT6332_DDR_VREF_DQ_CON 0x8008
+#define MT6332_DDR_VREF_CA_CON 0x800A
+#define MT6332_TEST_OUT 0x800C
+#define MT6332_TEST_CON0 0x800E
+#define MT6332_TEST_CON1 0x8010
+#define MT6332_TESTMODE_SW 0x8012
+#define MT6332_TESTMODE_ANA 0x8014
+#define MT6332_TDSEL_CON 0x8016
+#define MT6332_RDSEL_CON 0x8018
+#define MT6332_SMT_CON0 0x801A
+#define MT6332_SMT_CON1 0x801C
+#define MT6332_DRV_CON0 0x801E
+#define MT6332_DRV_CON1 0x8020
+#define MT6332_DRV_CON2 0x8022
+#define MT6332_EN_STATUS0 0x8024
+#define MT6332_OCSTATUS0 0x8026
+#define MT6332_TOP_STATUS 0x8028
+#define MT6332_TOP_STATUS_SET 0x802A
+#define MT6332_TOP_STATUS_CLR 0x802C
+#define MT6332_FLASH_CON0 0x802E
+#define MT6332_FLASH_CON1 0x8030
+#define MT6332_FLASH_CON2 0x8032
+#define MT6332_CORE_CON0 0x8034
+#define MT6332_CORE_CON1 0x8036
+#define MT6332_CORE_CON2 0x8038
+#define MT6332_CORE_CON3 0x803A
+#define MT6332_CORE_CON4 0x803C
+#define MT6332_CORE_CON5 0x803E
+#define MT6332_CORE_CON6 0x8040
+#define MT6332_CORE_CON7 0x8042
+#define MT6332_CORE_CON8 0x8044
+#define MT6332_CORE_CON9 0x8046
+#define MT6332_CORE_CON10 0x8048
+#define MT6332_CORE_CON11 0x804A
+#define MT6332_CORE_CON12 0x804C
+#define MT6332_CORE_CON13 0x804E
+#define MT6332_CORE_CON14 0x8050
+#define MT6332_CORE_CON15 0x8052
+#define MT6332_STA_CON0 0x8054
+#define MT6332_STA_CON1 0x8056
+#define MT6332_STA_CON2 0x8058
+#define MT6332_STA_CON3 0x805A
+#define MT6332_STA_CON4 0x805C
+#define MT6332_STA_CON5 0x805E
+#define MT6332_STA_CON6 0x8060
+#define MT6332_STA_CON7 0x8062
+#define MT6332_CHR_CON0 0x8064
+#define MT6332_CHR_CON1 0x8066
+#define MT6332_CHR_CON2 0x8068
+#define MT6332_CHR_CON3 0x806A
+#define MT6332_CHR_CON4 0x806C
+#define MT6332_CHR_CON5 0x806E
+#define MT6332_CHR_CON6 0x8070
+#define MT6332_CHR_CON7 0x8072
+#define MT6332_CHR_CON8 0x8074
+#define MT6332_CHR_CON9 0x8076
+#define MT6332_CHR_CON10 0x8078
+#define MT6332_CHR_CON11 0x807A
+#define MT6332_CHR_CON12 0x807C
+#define MT6332_CHR_CON13 0x807E
+#define MT6332_CHR_CON14 0x8080
+#define MT6332_CHR_CON15 0x8082
+#define MT6332_BOOST_CON0 0x8084
+#define MT6332_BOOST_CON1 0x8086
+#define MT6332_BOOST_CON2 0x8088
+#define MT6332_BOOST_CON3 0x808A
+#define MT6332_BOOST_CON4 0x808C
+#define MT6332_BOOST_CON5 0x808E
+#define MT6332_BOOST_CON6 0x8090
+#define MT6332_BOOST_CON7 0x8092
+#define MT6332_TOP_CKPDN_CON0 0x8094
+#define MT6332_TOP_CKPDN_CON0_SET 0x8096
+#define MT6332_TOP_CKPDN_CON0_CLR 0x8098
+#define MT6332_TOP_CKPDN_CON1 0x809A
+#define MT6332_TOP_CKPDN_CON1_SET 0x809C
+#define MT6332_TOP_CKPDN_CON1_CLR 0x809E
+#define MT6332_TOP_CKPDN_CON2 0x80A0
+#define MT6332_TOP_CKPDN_CON2_SET 0x80A2
+#define MT6332_TOP_CKPDN_CON2_CLR 0x80A4
+#define MT6332_TOP_CKSEL_CON0 0x80A6
+#define MT6332_TOP_CKSEL_CON0_SET 0x80A8
+#define MT6332_TOP_CKSEL_CON0_CLR 0x80AA
+#define MT6332_TOP_CKSEL_CON1 0x80AC
+#define MT6332_TOP_CKSEL_CON1_SET 0x80AE
+#define MT6332_TOP_CKSEL_CON1_CLR 0x80B0
+#define MT6332_TOP_CKHWEN_CON 0x80B2
+#define MT6332_TOP_CKHWEN_CON_SET 0x80B4
+#define MT6332_TOP_CKHWEN_CON_CLR 0x80B6
+#define MT6332_TOP_CKTST_CON0 0x80B8
+#define MT6332_TOP_CKTST_CON1 0x80BA
+#define MT6332_TOP_RST_CON 0x80BC
+#define MT6332_TOP_RST_CON_SET 0x80BE
+#define MT6332_TOP_RST_CON_CLR 0x80C0
+#define MT6332_TOP_RST_MISC 0x80C2
+#define MT6332_TOP_RST_MISC_SET 0x80C4
+#define MT6332_TOP_RST_MISC_CLR 0x80C6
+#define MT6332_INT_CON0 0x80C8
+#define MT6332_INT_CON0_SET 0x80CA
+#define MT6332_INT_CON0_CLR 0x80CC
+#define MT6332_INT_CON1 0x80CE
+#define MT6332_INT_CON1_SET 0x80D0
+#define MT6332_INT_CON1_CLR 0x80D2
+#define MT6332_INT_CON2 0x80D4
+#define MT6332_INT_CON2_SET 0x80D6
+#define MT6332_INT_CON2_CLR 0x80D8
+#define MT6332_INT_CON3 0x80DA
+#define MT6332_INT_CON3_SET 0x80DC
+#define MT6332_INT_CON3_CLR 0x80DE
+#define MT6332_CHRWDT_CON0 0x80E0
+#define MT6332_CHRWDT_STATUS0 0x80E2
+#define MT6332_INT_STATUS0 0x80E4
+#define MT6332_INT_STATUS1 0x80E6
+#define MT6332_INT_STATUS2 0x80E8
+#define MT6332_INT_STATUS3 0x80EA
+#define MT6332_OC_GEAR_0 0x80EC
+#define MT6332_OC_GEAR_1 0x80EE
+#define MT6332_OC_GEAR_2 0x80F0
+#define MT6332_INT_MISC_CON 0x80F2
+#define MT6332_RG_SPI_CON 0x80F4
+#define MT6332_DEW_DIO_EN 0x80F6
+#define MT6332_DEW_READ_TEST 0x80F8
+#define MT6332_DEW_WRITE_TEST 0x80FA
+#define MT6332_DEW_CRC_SWRST 0x80FC
+#define MT6332_DEW_CRC_EN 0x80FE
+#define MT6332_DEW_CRC_VAL 0x8100
+#define MT6332_DEW_DBG_MON_SEL 0x8102
+#define MT6332_DEW_CIPHER_KEY_SEL 0x8104
+#define MT6332_DEW_CIPHER_IV_SEL 0x8106
+#define MT6332_DEW_CIPHER_EN 0x8108
+#define MT6332_DEW_CIPHER_RDY 0x810A
+#define MT6332_DEW_CIPHER_MODE 0x810C
+#define MT6332_DEW_CIPHER_SWRST 0x810E
+#define MT6332_DEW_RDDMY_NO 0x8110
+#define MT6332_INT_STA 0x8112
+#define MT6332_BIF_CON0 0x8114
+#define MT6332_BIF_CON1 0x8116
+#define MT6332_BIF_CON2 0x8118
+#define MT6332_BIF_CON3 0x811A
+#define MT6332_BIF_CON4 0x811C
+#define MT6332_BIF_CON5 0x811E
+#define MT6332_BIF_CON6 0x8120
+#define MT6332_BIF_CON7 0x8122
+#define MT6332_BIF_CON8 0x8124
+#define MT6332_BIF_CON9 0x8126
+#define MT6332_BIF_CON10 0x8128
+#define MT6332_BIF_CON11 0x812A
+#define MT6332_BIF_CON12 0x812C
+#define MT6332_BIF_CON13 0x812E
+#define MT6332_BIF_CON14 0x8130
+#define MT6332_BIF_CON15 0x8132
+#define MT6332_BIF_CON16 0x8134
+#define MT6332_BIF_CON17 0x8136
+#define MT6332_BIF_CON18 0x8138
+#define MT6332_BIF_CON19 0x813A
+#define MT6332_BIF_CON20 0x813C
+#define MT6332_BIF_CON21 0x813E
+#define MT6332_BIF_CON22 0x8140
+#define MT6332_BIF_CON23 0x8142
+#define MT6332_BIF_CON24 0x8144
+#define MT6332_BIF_CON25 0x8146
+#define MT6332_BIF_CON26 0x8148
+#define MT6332_BIF_CON27 0x814A
+#define MT6332_BIF_CON28 0x814C
+#define MT6332_BIF_CON29 0x814E
+#define MT6332_BIF_CON30 0x8150
+#define MT6332_BIF_CON31 0x8152
+#define MT6332_BIF_CON32 0x8154
+#define MT6332_BIF_CON33 0x8156
+#define MT6332_BIF_CON34 0x8158
+#define MT6332_BIF_CON35 0x815A
+#define MT6332_BIF_CON36 0x815C
+#define MT6332_BATON_CON0 0x815E
+#define MT6332_BIF_CON37 0x8160
+#define MT6332_BIF_CON38 0x8162
+#define MT6332_CHR_CON16 0x8164
+#define MT6332_CHR_CON17 0x8166
+#define MT6332_CHR_CON18 0x8168
+#define MT6332_CHR_CON19 0x816A
+#define MT6332_CHR_CON20 0x816C
+#define MT6332_CHR_CON21 0x816E
+#define MT6332_CHR_CON22 0x8170
+#define MT6332_CHR_CON23 0x8172
+#define MT6332_CHR_CON24 0x8174
+#define MT6332_CHR_CON25 0x8176
+#define MT6332_STA_CON8 0x8178
+#define MT6332_BUCK_ALL_CON0 0x8400
+#define MT6332_BUCK_ALL_CON1 0x8402
+#define MT6332_BUCK_ALL_CON2 0x8404
+#define MT6332_BUCK_ALL_CON3 0x8406
+#define MT6332_BUCK_ALL_CON4 0x8408
+#define MT6332_BUCK_ALL_CON5 0x840A
+#define MT6332_BUCK_ALL_CON6 0x840C
+#define MT6332_BUCK_ALL_CON7 0x840E
+#define MT6332_BUCK_ALL_CON8 0x8410
+#define MT6332_BUCK_ALL_CON9 0x8412
+#define MT6332_BUCK_ALL_CON10 0x8414
+#define MT6332_BUCK_ALL_CON11 0x8416
+#define MT6332_BUCK_ALL_CON12 0x8418
+#define MT6332_BUCK_ALL_CON13 0x841A
+#define MT6332_BUCK_ALL_CON14 0x841C
+#define MT6332_BUCK_ALL_CON15 0x841E
+#define MT6332_BUCK_ALL_CON16 0x8420
+#define MT6332_BUCK_ALL_CON17 0x8422
+#define MT6332_BUCK_ALL_CON18 0x8424
+#define MT6332_BUCK_ALL_CON19 0x8426
+#define MT6332_BUCK_ALL_CON20 0x8428
+#define MT6332_BUCK_ALL_CON21 0x842A
+#define MT6332_BUCK_ALL_CON22 0x842C
+#define MT6332_BUCK_ALL_CON23 0x842E
+#define MT6332_BUCK_ALL_CON24 0x8430
+#define MT6332_BUCK_ALL_CON25 0x8432
+#define MT6332_BUCK_ALL_CON26 0x8434
+#define MT6332_BUCK_ALL_CON27 0x8436
+#define MT6332_VDRAM_CON0 0x8438
+#define MT6332_VDRAM_CON1 0x843A
+#define MT6332_VDRAM_CON2 0x843C
+#define MT6332_VDRAM_CON3 0x843E
+#define MT6332_VDRAM_CON4 0x8440
+#define MT6332_VDRAM_CON5 0x8442
+#define MT6332_VDRAM_CON6 0x8444
+#define MT6332_VDRAM_CON7 0x8446
+#define MT6332_VDRAM_CON8 0x8448
+#define MT6332_VDRAM_CON9 0x844A
+#define MT6332_VDRAM_CON10 0x844C
+#define MT6332_VDRAM_CON11 0x844E
+#define MT6332_VDRAM_CON12 0x8450
+#define MT6332_VDRAM_CON13 0x8452
+#define MT6332_VDRAM_CON14 0x8454
+#define MT6332_VDRAM_CON15 0x8456
+#define MT6332_VDRAM_CON16 0x8458
+#define MT6332_VDRAM_CON17 0x845A
+#define MT6332_VDRAM_CON18 0x845C
+#define MT6332_VDRAM_CON19 0x845E
+#define MT6332_VDRAM_CON20 0x8460
+#define MT6332_VDRAM_CON21 0x8462
+#define MT6332_VDVFS2_CON0 0x8464
+#define MT6332_VDVFS2_CON1 0x8466
+#define MT6332_VDVFS2_CON2 0x8468
+#define MT6332_VDVFS2_CON3 0x846A
+#define MT6332_VDVFS2_CON4 0x846C
+#define MT6332_VDVFS2_CON5 0x846E
+#define MT6332_VDVFS2_CON6 0x8470
+#define MT6332_VDVFS2_CON7 0x8472
+#define MT6332_VDVFS2_CON8 0x8474
+#define MT6332_VDVFS2_CON9 0x8476
+#define MT6332_VDVFS2_CON10 0x8478
+#define MT6332_VDVFS2_CON11 0x847A
+#define MT6332_VDVFS2_CON12 0x847C
+#define MT6332_VDVFS2_CON13 0x847E
+#define MT6332_VDVFS2_CON14 0x8480
+#define MT6332_VDVFS2_CON15 0x8482
+#define MT6332_VDVFS2_CON16 0x8484
+#define MT6332_VDVFS2_CON17 0x8486
+#define MT6332_VDVFS2_CON18 0x8488
+#define MT6332_VDVFS2_CON19 0x848A
+#define MT6332_VDVFS2_CON20 0x848C
+#define MT6332_VDVFS2_CON21 0x848E
+#define MT6332_VDVFS2_CON22 0x8490
+#define MT6332_VDVFS2_CON23 0x8492
+#define MT6332_VDVFS2_CON24 0x8494
+#define MT6332_VDVFS2_CON25 0x8496
+#define MT6332_VDVFS2_CON26 0x8498
+#define MT6332_VDVFS2_CON27 0x849A
+#define MT6332_VRF1_CON0 0x849C
+#define MT6332_VRF1_CON1 0x849E
+#define MT6332_VRF1_CON2 0x84A0
+#define MT6332_VRF1_CON3 0x84A2
+#define MT6332_VRF1_CON4 0x84A4
+#define MT6332_VRF1_CON5 0x84A6
+#define MT6332_VRF1_CON6 0x84A8
+#define MT6332_VRF1_CON7 0x84AA
+#define MT6332_VRF1_CON8 0x84AC
+#define MT6332_VRF1_CON9 0x84AE
+#define MT6332_VRF1_CON10 0x84B0
+#define MT6332_VRF1_CON11 0x84B2
+#define MT6332_VRF1_CON12 0x84B4
+#define MT6332_VRF1_CON13 0x84B6
+#define MT6332_VRF1_CON14 0x84B8
+#define MT6332_VRF1_CON15 0x84BA
+#define MT6332_VRF1_CON16 0x84BC
+#define MT6332_VRF1_CON17 0x84BE
+#define MT6332_VRF1_CON18 0x84C0
+#define MT6332_VRF1_CON19 0x84C2
+#define MT6332_VRF1_CON20 0x84C4
+#define MT6332_VRF1_CON21 0x84C6
+#define MT6332_VRF2_CON0 0x84C8
+#define MT6332_VRF2_CON1 0x84CA
+#define MT6332_VRF2_CON2 0x84CC
+#define MT6332_VRF2_CON3 0x84CE
+#define MT6332_VRF2_CON4 0x84D0
+#define MT6332_VRF2_CON5 0x84D2
+#define MT6332_VRF2_CON6 0x84D4
+#define MT6332_VRF2_CON7 0x84D6
+#define MT6332_VRF2_CON8 0x84D8
+#define MT6332_VRF2_CON9 0x84DA
+#define MT6332_VRF2_CON10 0x84DC
+#define MT6332_VRF2_CON11 0x84DE
+#define MT6332_VRF2_CON12 0x84E0
+#define MT6332_VRF2_CON13 0x84E2
+#define MT6332_VRF2_CON14 0x84E4
+#define MT6332_VRF2_CON15 0x84E6
+#define MT6332_VRF2_CON16 0x84E8
+#define MT6332_VRF2_CON17 0x84EA
+#define MT6332_VRF2_CON18 0x84EC
+#define MT6332_VRF2_CON19 0x84EE
+#define MT6332_VRF2_CON20 0x84F0
+#define MT6332_VRF2_CON21 0x84F2
+#define MT6332_VPA_CON0 0x84F4
+#define MT6332_VPA_CON1 0x84F6
+#define MT6332_VPA_CON2 0x84F8
+#define MT6332_VPA_CON3 0x84FC
+#define MT6332_VPA_CON4 0x84FE
+#define MT6332_VPA_CON5 0x8500
+#define MT6332_VPA_CON6 0x8502
+#define MT6332_VPA_CON7 0x8504
+#define MT6332_VPA_CON8 0x8506
+#define MT6332_VPA_CON9 0x8508
+#define MT6332_VPA_CON10 0x850A
+#define MT6332_VPA_CON11 0x850C
+#define MT6332_VPA_CON12 0x850E
+#define MT6332_VPA_CON13 0x8510
+#define MT6332_VPA_CON14 0x8512
+#define MT6332_VPA_CON15 0x8514
+#define MT6332_VPA_CON16 0x8516
+#define MT6332_VPA_CON17 0x8518
+#define MT6332_VPA_CON18 0x851A
+#define MT6332_VPA_CON19 0x851C
+#define MT6332_VPA_CON20 0x851E
+#define MT6332_VPA_CON21 0x8520
+#define MT6332_VPA_CON22 0x8522
+#define MT6332_VPA_CON23 0x8524
+#define MT6332_VPA_CON24 0x8526
+#define MT6332_VPA_CON25 0x8528
+#define MT6332_VSBST_CON0 0x852A
+#define MT6332_VSBST_CON1 0x852C
+#define MT6332_VSBST_CON2 0x852E
+#define MT6332_VSBST_CON3 0x8530
+#define MT6332_VSBST_CON4 0x8532
+#define MT6332_VSBST_CON5 0x8534
+#define MT6332_VSBST_CON6 0x8536
+#define MT6332_VSBST_CON7 0x8538
+#define MT6332_VSBST_CON8 0x853A
+#define MT6332_VSBST_CON9 0x853C
+#define MT6332_VSBST_CON10 0x853E
+#define MT6332_VSBST_CON11 0x8540
+#define MT6332_VSBST_CON12 0x8542
+#define MT6332_VSBST_CON13 0x8544
+#define MT6332_VSBST_CON14 0x8546
+#define MT6332_VSBST_CON15 0x8548
+#define MT6332_VSBST_CON16 0x854A
+#define MT6332_VSBST_CON17 0x854C
+#define MT6332_VSBST_CON18 0x854E
+#define MT6332_VSBST_CON19 0x8550
+#define MT6332_VSBST_CON20 0x8552
+#define MT6332_VSBST_CON21 0x8554
+#define MT6332_BUCK_K_CON0 0x8556
+#define MT6332_BUCK_K_CON1 0x8558
+#define MT6332_BUCK_K_CON2 0x855A
+#define MT6332_BUCK_K_CON3 0x855C
+#define MT6332_BUCK_K_CON4 0x855E
+#define MT6332_BUCK_K_CON5 0x8560
+#define MT6332_AUXADC_ADC0 0x8800
+#define MT6332_AUXADC_ADC1 0x8802
+#define MT6332_AUXADC_ADC2 0x8804
+#define MT6332_AUXADC_ADC3 0x8806
+#define MT6332_AUXADC_ADC4 0x8808
+#define MT6332_AUXADC_ADC5 0x880A
+#define MT6332_AUXADC_ADC6 0x880C
+#define MT6332_AUXADC_ADC7 0x880E
+#define MT6332_AUXADC_ADC8 0x8810
+#define MT6332_AUXADC_ADC9 0x8812
+#define MT6332_AUXADC_ADC10 0x8814
+#define MT6332_AUXADC_ADC11 0x8816
+#define MT6332_AUXADC_ADC12 0x8818
+#define MT6332_AUXADC_ADC13 0x881A
+#define MT6332_AUXADC_ADC14 0x881C
+#define MT6332_AUXADC_ADC15 0x881E
+#define MT6332_AUXADC_ADC16 0x8820
+#define MT6332_AUXADC_ADC17 0x8822
+#define MT6332_AUXADC_ADC18 0x8824
+#define MT6332_AUXADC_ADC19 0x8826
+#define MT6332_AUXADC_ADC20 0x8828
+#define MT6332_AUXADC_ADC21 0x882A
+#define MT6332_AUXADC_ADC22 0x882C
+#define MT6332_AUXADC_ADC23 0x882E
+#define MT6332_AUXADC_ADC24 0x8830
+#define MT6332_AUXADC_ADC25 0x8832
+#define MT6332_AUXADC_ADC26 0x8834
+#define MT6332_AUXADC_ADC27 0x8836
+#define MT6332_AUXADC_ADC28 0x8838
+#define MT6332_AUXADC_ADC29 0x883A
+#define MT6332_AUXADC_ADC30 0x883C
+#define MT6332_AUXADC_ADC31 0x883E
+#define MT6332_AUXADC_ADC32 0x8840
+#define MT6332_AUXADC_ADC33 0x8842
+#define MT6332_AUXADC_ADC34 0x8844
+#define MT6332_AUXADC_ADC35 0x8846
+#define MT6332_AUXADC_ADC36 0x8848
+#define MT6332_AUXADC_ADC37 0x884A
+#define MT6332_AUXADC_ADC38 0x884C
+#define MT6332_AUXADC_ADC39 0x884E
+#define MT6332_AUXADC_ADC40 0x8850
+#define MT6332_AUXADC_ADC41 0x8852
+#define MT6332_AUXADC_ADC42 0x8854
+#define MT6332_AUXADC_ADC43 0x8856
+#define MT6332_AUXADC_STA0 0x8858
+#define MT6332_AUXADC_STA1 0x885A
+#define MT6332_AUXADC_RQST0 0x885C
+#define MT6332_AUXADC_RQST0_SET 0x885E
+#define MT6332_AUXADC_RQST0_CLR 0x8860
+#define MT6332_AUXADC_RQST1 0x8862
+#define MT6332_AUXADC_RQST1_SET 0x8864
+#define MT6332_AUXADC_RQST1_CLR 0x8866
+#define MT6332_AUXADC_CON0 0x8868
+#define MT6332_AUXADC_CON1 0x886A
+#define MT6332_AUXADC_CON2 0x886C
+#define MT6332_AUXADC_CON3 0x886E
+#define MT6332_AUXADC_CON4 0x8870
+#define MT6332_AUXADC_CON5 0x8872
+#define MT6332_AUXADC_CON6 0x8874
+#define MT6332_AUXADC_CON7 0x8876
+#define MT6332_AUXADC_CON8 0x8878
+#define MT6332_AUXADC_CON9 0x887A
+#define MT6332_AUXADC_CON10 0x887C
+#define MT6332_AUXADC_CON11 0x887E
+#define MT6332_AUXADC_CON12 0x8880
+#define MT6332_AUXADC_CON13 0x8882
+#define MT6332_AUXADC_CON14 0x8884
+#define MT6332_AUXADC_CON15 0x8886
+#define MT6332_AUXADC_CON16 0x8888
+#define MT6332_AUXADC_CON17 0x888A
+#define MT6332_AUXADC_CON18 0x888C
+#define MT6332_AUXADC_CON19 0x888E
+#define MT6332_AUXADC_CON20 0x8890
+#define MT6332_AUXADC_CON21 0x8892
+#define MT6332_AUXADC_CON22 0x8894
+#define MT6332_AUXADC_CON23 0x8896
+#define MT6332_AUXADC_CON24 0x8898
+#define MT6332_AUXADC_CON25 0x889A
+#define MT6332_AUXADC_CON26 0x889C
+#define MT6332_AUXADC_CON27 0x889E
+#define MT6332_AUXADC_CON28 0x88A0
+#define MT6332_AUXADC_CON29 0x88A2
+#define MT6332_AUXADC_CON30 0x88A4
+#define MT6332_AUXADC_CON31 0x88A6
+#define MT6332_AUXADC_CON32 0x88A8
+#define MT6332_AUXADC_CON33 0x88AA
+#define MT6332_AUXADC_CON34 0x88AC
+#define MT6332_AUXADC_CON35 0x88AE
+#define MT6332_AUXADC_CON36 0x88B0
+#define MT6332_AUXADC_CON37 0x88B2
+#define MT6332_AUXADC_CON38 0x88B4
+#define MT6332_AUXADC_CON39 0x88B6
+#define MT6332_AUXADC_CON40 0x88B8
+#define MT6332_AUXADC_CON41 0x88BA
+#define MT6332_AUXADC_CON42 0x88BC
+#define MT6332_AUXADC_CON43 0x88BE
+#define MT6332_AUXADC_CON44 0x88C0
+#define MT6332_AUXADC_CON45 0x88C2
+#define MT6332_AUXADC_CON46 0x88C4
+#define MT6332_AUXADC_CON47 0x88C6
+#define MT6332_STRUP_CONA0 0x8C00
+#define MT6332_STRUP_CONA1 0x8C02
+#define MT6332_STRUP_CONA2 0x8C04
+#define MT6332_STRUP_CON0 0x8C06
+#define MT6332_STRUP_CON2 0x8C08
+#define MT6332_STRUP_CON3 0x8C0A
+#define MT6332_STRUP_CON4 0x8C0C
+#define MT6332_STRUP_CON5 0x8C0E
+#define MT6332_STRUP_CON6 0x8C10
+#define MT6332_STRUP_CON7 0x8C12
+#define MT6332_STRUP_CON8 0x8C14
+#define MT6332_STRUP_CON9 0x8C16
+#define MT6332_STRUP_CON10 0x8C18
+#define MT6332_STRUP_CON11 0x8C1A
+#define MT6332_STRUP_CON12 0x8C1C
+#define MT6332_STRUP_CON13 0x8C1E
+#define MT6332_STRUP_CON14 0x8C20
+#define MT6332_STRUP_CON15 0x8C22
+#define MT6332_STRUP_CON16 0x8C24
+#define MT6332_STRUP_CON17 0x8C26
+#define MT6332_FGADC_CON0 0x8C28
+#define MT6332_FGADC_CON1 0x8C2A
+#define MT6332_FGADC_CON2 0x8C2C
+#define MT6332_FGADC_CON3 0x8C2E
+#define MT6332_FGADC_CON4 0x8C30
+#define MT6332_FGADC_CON5 0x8C32
+#define MT6332_FGADC_CON6 0x8C34
+#define MT6332_FGADC_CON7 0x8C36
+#define MT6332_FGADC_CON8 0x8C38
+#define MT6332_FGADC_CON9 0x8C3A
+#define MT6332_FGADC_CON10 0x8C3C
+#define MT6332_FGADC_CON11 0x8C3E
+#define MT6332_FGADC_CON12 0x8C40
+#define MT6332_FGADC_CON13 0x8C42
+#define MT6332_FGADC_CON14 0x8C44
+#define MT6332_FGADC_CON15 0x8C46
+#define MT6332_FGADC_CON16 0x8C48
+#define MT6332_FGADC_CON17 0x8C4A
+#define MT6332_FGADC_CON18 0x8C4C
+#define MT6332_FGADC_CON19 0x8C4E
+#define MT6332_FGADC_CON20 0x8C50
+#define MT6332_FGADC_CON21 0x8C52
+#define MT6332_FGADC_CON22 0x8C54
+#define MT6332_OTP_CON0 0x8C56
+#define MT6332_OTP_CON1 0x8C58
+#define MT6332_OTP_CON2 0x8C5A
+#define MT6332_OTP_CON3 0x8C5C
+#define MT6332_OTP_CON4 0x8C5E
+#define MT6332_OTP_CON5 0x8C60
+#define MT6332_OTP_CON6 0x8C62
+#define MT6332_OTP_CON7 0x8C64
+#define MT6332_OTP_CON8 0x8C66
+#define MT6332_OTP_CON9 0x8C68
+#define MT6332_OTP_CON10 0x8C6A
+#define MT6332_OTP_CON11 0x8C6C
+#define MT6332_OTP_CON12 0x8C6E
+#define MT6332_OTP_CON13 0x8C70
+#define MT6332_OTP_CON14 0x8C72
+#define MT6332_OTP_DOUT_0_15 0x8C74
+#define MT6332_OTP_DOUT_16_31 0x8C76
+#define MT6332_OTP_DOUT_32_47 0x8C78
+#define MT6332_OTP_DOUT_48_63 0x8C7A
+#define MT6332_OTP_DOUT_64_79 0x8C7C
+#define MT6332_OTP_DOUT_80_95 0x8C7E
+#define MT6332_OTP_DOUT_96_111 0x8C80
+#define MT6332_OTP_DOUT_112_127 0x8C82
+#define MT6332_OTP_DOUT_128_143 0x8C84
+#define MT6332_OTP_DOUT_144_159 0x8C86
+#define MT6332_OTP_DOUT_160_175 0x8C88
+#define MT6332_OTP_DOUT_176_191 0x8C8A
+#define MT6332_OTP_DOUT_192_207 0x8C8C
+#define MT6332_OTP_DOUT_208_223 0x8C8E
+#define MT6332_OTP_DOUT_224_239 0x8C90
+#define MT6332_OTP_DOUT_240_255 0x8C92
+#define MT6332_OTP_VAL_0_15 0x8C94
+#define MT6332_OTP_VAL_16_31 0x8C96
+#define MT6332_OTP_VAL_32_47 0x8C98
+#define MT6332_OTP_VAL_48_63 0x8C9A
+#define MT6332_OTP_VAL_64_79 0x8C9C
+#define MT6332_OTP_VAL_80_95 0x8C9E
+#define MT6332_OTP_VAL_96_111 0x8CA0
+#define MT6332_OTP_VAL_112_127 0x8CA2
+#define MT6332_OTP_VAL_128_143 0x8CA4
+#define MT6332_OTP_VAL_144_159 0x8CA6
+#define MT6332_OTP_VAL_160_175 0x8CA8
+#define MT6332_OTP_VAL_176_191 0x8CAA
+#define MT6332_OTP_VAL_192_207 0x8CAC
+#define MT6332_OTP_VAL_208_223 0x8CAE
+#define MT6332_OTP_VAL_224_239 0x8CB0
+#define MT6332_OTP_VAL_240_255 0x8CB2
+#define MT6332_LDO_CON0 0x8CB4
+#define MT6332_LDO_CON1 0x8CB6
+#define MT6332_LDO_CON2 0x8CB8
+#define MT6332_LDO_CON3 0x8CBA
+#define MT6332_LDO_CON5 0x8CBC
+#define MT6332_LDO_CON6 0x8CBE
+#define MT6332_LDO_CON7 0x8CC0
+#define MT6332_LDO_CON8 0x8CC2
+#define MT6332_LDO_CON9 0x8CC4
+#define MT6332_LDO_CON10 0x8CC6
+#define MT6332_LDO_CON11 0x8CC8
+#define MT6332_LDO_CON12 0x8CCA
+#define MT6332_LDO_CON13 0x8CCC
+#define MT6332_FQMTR_CON0 0x8CCE
+#define MT6332_FQMTR_CON1 0x8CD0
+#define MT6332_FQMTR_CON2 0x8CD2
+#define MT6332_IWLED_CON0 0x8CD4
+#define MT6332_IWLED_DEG 0x8CD6
+#define MT6332_IWLED_STATUS 0x8CD8
+#define MT6332_IWLED_EN_CTRL 0x8CDA
+#define MT6332_IWLED_CON1 0x8CDC
+#define MT6332_IWLED_CON2 0x8CDE
+#define MT6332_IWLED_TRIM0 0x8CE0
+#define MT6332_IWLED_TRIM1 0x8CE2
+#define MT6332_IWLED_CON3 0x8CE4
+#define MT6332_IWLED_CON4 0x8CE6
+#define MT6332_IWLED_CON5 0x8CE8
+#define MT6332_IWLED_CON6 0x8CEA
+#define MT6332_IWLED_CON7 0x8CEC
+#define MT6332_IWLED_CON8 0x8CEE
+#define MT6332_IWLED_CON9 0x8CF0
+#define MT6332_SPK_CON0 0x8CF2
+#define MT6332_SPK_CON1 0x8CF4
+#define MT6332_SPK_CON2 0x8CF6
+#define MT6332_SPK_CON3 0x8CF8
+#define MT6332_SPK_CON4 0x8CFA
+#define MT6332_SPK_CON5 0x8CFC
+#define MT6332_SPK_CON6 0x8CFE
+#define MT6332_SPK_CON7 0x8D00
+#define MT6332_SPK_CON8 0x8D02
+#define MT6332_SPK_CON9 0x8D04
+#define MT6332_SPK_CON10 0x8D06
+#define MT6332_SPK_CON11 0x8D08
+#define MT6332_SPK_CON12 0x8D0A
+#define MT6332_SPK_CON13 0x8D0C
+#define MT6332_SPK_CON14 0x8D0E
+#define MT6332_SPK_CON15 0x8D10
+#define MT6332_SPK_CON16 0x8D12
+#define MT6332_TESTI_CON0 0x8D14
+#define MT6332_TESTI_CON1 0x8D16
+#define MT6332_TESTI_CON2 0x8D18
+#define MT6332_TESTI_CON3 0x8D1A
+#define MT6332_TESTI_CON4 0x8D1C
+#define MT6332_TESTI_CON5 0x8D1E
+#define MT6332_TESTI_CON6 0x8D20
+#define MT6332_TESTI_MUX_CON0 0x8D22
+#define MT6332_TESTI_MUX_CON1 0x8D24
+#define MT6332_TESTI_MUX_CON2 0x8D26
+#define MT6332_TESTI_MUX_CON3 0x8D28
+#define MT6332_TESTI_MUX_CON4 0x8D2A
+#define MT6332_TESTI_MUX_CON5 0x8D2C
+#define MT6332_TESTI_MUX_CON6 0x8D2E
+#define MT6332_TESTO_CON0 0x8D30
+#define MT6332_TESTO_CON1 0x8D32
+#define MT6332_TEST_OMUX_CON0 0x8D34
+#define MT6332_TEST_OMUX_CON1 0x8D36
+#define MT6332_DEBUG_CON0 0x8D38
+#define MT6332_DEBUG_CON1 0x8D3A
+#define MT6332_DEBUG_CON2 0x8D3C
+#define MT6332_FGADC_CON23 0x8D3E
+#define MT6332_FGADC_CON24 0x8D40
+#define MT6332_FGADC_CON25 0x8D42
+#define MT6332_TOP_RST_STATUS 0x8D44
+#define MT6332_TOP_RST_STATUS_SET 0x8D46
+#define MT6332_TOP_RST_STATUS_CLR 0x8D48
+#define MT6332_VDVFS2_CON28 0x8D4A
+
+#endif /* __MFD_MT6332_REGISTERS_H__ */
diff --git a/include/linux/mfd/mt6357/core.h b/include/linux/mfd/mt6357/core.h
new file mode 100644
index 000000000000..2441611264fd
--- /dev/null
+++ b/include/linux/mfd/mt6357/core.h
@@ -0,0 +1,119 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2022 BayLibre, SAS
+ * Author: Fabien Parent <fparent@baylibre.com>
+ */
+
+#ifndef __MFD_MT6357_CORE_H__
+#define __MFD_MT6357_CORE_H__
+
+enum mt6357_irq_top_status_shift {
+ MT6357_BUCK_TOP = 0,
+ MT6357_LDO_TOP,
+ MT6357_PSC_TOP,
+ MT6357_SCK_TOP,
+ MT6357_BM_TOP,
+ MT6357_HK_TOP,
+ MT6357_XPP_TOP,
+ MT6357_AUD_TOP,
+ MT6357_MISC_TOP,
+};
+
+enum mt6357_irq_numbers {
+ MT6357_IRQ_VPROC_OC = 0,
+ MT6357_IRQ_VCORE_OC,
+ MT6357_IRQ_VMODEM_OC,
+ MT6357_IRQ_VS1_OC,
+ MT6357_IRQ_VPA_OC,
+ MT6357_IRQ_VCORE_PREOC,
+ MT6357_IRQ_VFE28_OC = 16,
+ MT6357_IRQ_VXO22_OC,
+ MT6357_IRQ_VRF18_OC,
+ MT6357_IRQ_VRF12_OC,
+ MT6357_IRQ_VEFUSE_OC,
+ MT6357_IRQ_VCN33_OC,
+ MT6357_IRQ_VCN28_OC,
+ MT6357_IRQ_VCN18_OC,
+ MT6357_IRQ_VCAMA_OC,
+ MT6357_IRQ_VCAMD_OC,
+ MT6357_IRQ_VCAMIO_OC,
+ MT6357_IRQ_VLDO28_OC,
+ MT6357_IRQ_VUSB33_OC,
+ MT6357_IRQ_VAUX18_OC,
+ MT6357_IRQ_VAUD28_OC,
+ MT6357_IRQ_VIO28_OC,
+ MT6357_IRQ_VIO18_OC,
+ MT6357_IRQ_VSRAM_PROC_OC,
+ MT6357_IRQ_VSRAM_OTHERS_OC,
+ MT6357_IRQ_VIBR_OC,
+ MT6357_IRQ_VDRAM_OC,
+ MT6357_IRQ_VMC_OC,
+ MT6357_IRQ_VMCH_OC,
+ MT6357_IRQ_VEMC_OC,
+ MT6357_IRQ_VSIM1_OC,
+ MT6357_IRQ_VSIM2_OC,
+ MT6357_IRQ_PWRKEY = 48,
+ MT6357_IRQ_HOMEKEY,
+ MT6357_IRQ_PWRKEY_R,
+ MT6357_IRQ_HOMEKEY_R,
+ MT6357_IRQ_NI_LBAT_INT,
+ MT6357_IRQ_CHRDET,
+ MT6357_IRQ_CHRDET_EDGE,
+ MT6357_IRQ_VCDT_HV_DET,
+ MT6357_IRQ_WATCHDOG,
+ MT6357_IRQ_VBATON_UNDET,
+ MT6357_IRQ_BVALID_DET,
+ MT6357_IRQ_OV,
+ MT6357_IRQ_RTC = 64,
+ MT6357_IRQ_FG_BAT0_H = 80,
+ MT6357_IRQ_FG_BAT0_L,
+ MT6357_IRQ_FG_CUR_H,
+ MT6357_IRQ_FG_CUR_L,
+ MT6357_IRQ_FG_ZCV,
+ MT6357_IRQ_BATON_LV = 96,
+ MT6357_IRQ_BATON_HT,
+ MT6357_IRQ_BAT_H = 112,
+ MT6357_IRQ_BAT_L,
+ MT6357_IRQ_AUXADC_IMP,
+ MT6357_IRQ_NAG_C_DLTV,
+ MT6357_IRQ_AUDIO = 128,
+ MT6357_IRQ_ACCDET = 133,
+ MT6357_IRQ_ACCDET_EINT0,
+ MT6357_IRQ_ACCDET_EINT1,
+ MT6357_IRQ_SPI_CMD_ALERT = 144,
+ MT6357_IRQ_NR,
+};
+
+#define MT6357_IRQ_BUCK_BASE MT6357_IRQ_VPROC_OC
+#define MT6357_IRQ_LDO_BASE MT6357_IRQ_VFE28_OC
+#define MT6357_IRQ_PSC_BASE MT6357_IRQ_PWRKEY
+#define MT6357_IRQ_SCK_BASE MT6357_IRQ_RTC
+#define MT6357_IRQ_BM_BASE MT6357_IRQ_FG_BAT0_H
+#define MT6357_IRQ_HK_BASE MT6357_IRQ_BAT_H
+#define MT6357_IRQ_AUD_BASE MT6357_IRQ_AUDIO
+#define MT6357_IRQ_MISC_BASE MT6357_IRQ_SPI_CMD_ALERT
+
+#define MT6357_IRQ_BUCK_BITS (MT6357_IRQ_VCORE_PREOC - MT6357_IRQ_BUCK_BASE + 1)
+#define MT6357_IRQ_LDO_BITS (MT6357_IRQ_VSIM2_OC - MT6357_IRQ_LDO_BASE + 1)
+#define MT6357_IRQ_PSC_BITS (MT6357_IRQ_VCDT_HV_DET - MT6357_IRQ_PSC_BASE + 1)
+#define MT6357_IRQ_SCK_BITS (MT6357_IRQ_RTC - MT6357_IRQ_SCK_BASE + 1)
+#define MT6357_IRQ_BM_BITS (MT6357_IRQ_BATON_HT - MT6357_IRQ_BM_BASE + 1)
+#define MT6357_IRQ_HK_BITS (MT6357_IRQ_NAG_C_DLTV - MT6357_IRQ_HK_BASE + 1)
+#define MT6357_IRQ_AUD_BITS (MT6357_IRQ_ACCDET_EINT1 - MT6357_IRQ_AUD_BASE + 1)
+#define MT6357_IRQ_MISC_BITS \
+ (MT6357_IRQ_SPI_CMD_ALERT - MT6357_IRQ_MISC_BASE + 1)
+
+#define MT6357_TOP_GEN(sp) \
+{ \
+ .hwirq_base = MT6357_IRQ_##sp##_BASE, \
+ .num_int_regs = \
+ ((MT6357_IRQ_##sp##_BITS - 1) / \
+ MTK_PMIC_REG_WIDTH) + 1, \
+ .en_reg = MT6357_##sp##_TOP_INT_CON0, \
+ .en_reg_shift = 0x6, \
+ .sta_reg = MT6357_##sp##_TOP_INT_STATUS0, \
+ .sta_reg_shift = 0x2, \
+ .top_offset = MT6357_##sp##_TOP, \
+}
+
+#endif /* __MFD_MT6357_CORE_H__ */
diff --git a/include/linux/mfd/mt6357/registers.h b/include/linux/mfd/mt6357/registers.h
new file mode 100644
index 000000000000..e24af83b618d
--- /dev/null
+++ b/include/linux/mfd/mt6357/registers.h
@@ -0,0 +1,1574 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2022 MediaTek Inc.
+ */
+
+#ifndef __MFD_MT6357_REGISTERS_H__
+#define __MFD_MT6357_REGISTERS_H__
+
+/* PMIC Registers */
+#define MT6357_TOP0_ID 0x0
+#define MT6357_TOP0_REV0 0x2
+#define MT6357_TOP0_DSN_DBI 0x4
+#define MT6357_TOP0_DSN_DXI 0x6
+#define MT6357_HWCID 0x8
+#define MT6357_SWCID 0xa
+#define MT6357_PONSTS 0xc
+#define MT6357_POFFSTS 0xe
+#define MT6357_PSTSCTL 0x10
+#define MT6357_PG_DEB_STS0 0x12
+#define MT6357_PG_SDN_STS0 0x14
+#define MT6357_OC_SDN_STS0 0x16
+#define MT6357_THERMALSTATUS 0x18
+#define MT6357_TOP_CON 0x1a
+#define MT6357_TEST_OUT 0x1c
+#define MT6357_TEST_CON0 0x1e
+#define MT6357_TEST_CON1 0x20
+#define MT6357_TESTMODE_SW 0x22
+#define MT6357_TOPSTATUS 0x24
+#define MT6357_TDSEL_CON 0x26
+#define MT6357_RDSEL_CON 0x28
+#define MT6357_SMT_CON0 0x2a
+#define MT6357_SMT_CON1 0x2c
+#define MT6357_TOP_RSV0 0x2e
+#define MT6357_TOP_RSV1 0x30
+#define MT6357_DRV_CON0 0x32
+#define MT6357_DRV_CON1 0x34
+#define MT6357_DRV_CON2 0x36
+#define MT6357_DRV_CON3 0x38
+#define MT6357_FILTER_CON0 0x3a
+#define MT6357_FILTER_CON1 0x3c
+#define MT6357_FILTER_CON2 0x3e
+#define MT6357_FILTER_CON3 0x40
+#define MT6357_TOP_STATUS 0x42
+#define MT6357_TOP_STATUS_SET 0x44
+#define MT6357_TOP_STATUS_CLR 0x46
+#define MT6357_TOP_TRAP 0x48
+#define MT6357_TOP1_ID 0x80
+#define MT6357_TOP1_REV0 0x82
+#define MT6357_TOP1_DSN_DBI 0x84
+#define MT6357_TOP1_DSN_DXI 0x86
+#define MT6357_GPIO_DIR0 0x88
+#define MT6357_GPIO_DIR0_SET 0x8a
+#define MT6357_GPIO_DIR0_CLR 0x8c
+#define MT6357_GPIO_PULLEN0 0x8e
+#define MT6357_GPIO_PULLEN0_SET 0x90
+#define MT6357_GPIO_PULLEN0_CLR 0x92
+#define MT6357_GPIO_PULLSEL0 0x94
+#define MT6357_GPIO_PULLSEL0_SET 0x96
+#define MT6357_GPIO_PULLSEL0_CLR 0x98
+#define MT6357_GPIO_DINV0 0x9a
+#define MT6357_GPIO_DINV0_SET 0x9c
+#define MT6357_GPIO_DINV0_CLR 0x9e
+#define MT6357_GPIO_DOUT0 0xa0
+#define MT6357_GPIO_DOUT0_SET 0xa2
+#define MT6357_GPIO_DOUT0_CLR 0xa4
+#define MT6357_GPIO_PI0 0xa6
+#define MT6357_GPIO_POE0 0xa8
+#define MT6357_GPIO_MODE0 0xaa
+#define MT6357_GPIO_MODE0_SET 0xac
+#define MT6357_GPIO_MODE0_CLR 0xae
+#define MT6357_GPIO_MODE1 0xb0
+#define MT6357_GPIO_MODE1_SET 0xb2
+#define MT6357_GPIO_MODE1_CLR 0xb4
+#define MT6357_GPIO_MODE2 0xb6
+#define MT6357_GPIO_MODE2_SET 0xb8
+#define MT6357_GPIO_MODE2_CLR 0xba
+#define MT6357_GPIO_MODE3 0xbc
+#define MT6357_GPIO_MODE3_SET 0xbe
+#define MT6357_GPIO_MODE3_CLR 0xc0
+#define MT6357_GPIO_RSV 0xc2
+#define MT6357_TOP2_ID 0x100
+#define MT6357_TOP2_REV0 0x102
+#define MT6357_TOP2_DSN_DBI 0x104
+#define MT6357_TOP2_DSN_DXI 0x106
+#define MT6357_TOP_PAM0 0x108
+#define MT6357_TOP_PAM1 0x10a
+#define MT6357_TOP_CKPDN_CON0 0x10c
+#define MT6357_TOP_CKPDN_CON0_SET 0x10e
+#define MT6357_TOP_CKPDN_CON0_CLR 0x110
+#define MT6357_TOP_CKPDN_CON1 0x112
+#define MT6357_TOP_CKPDN_CON1_SET 0x114
+#define MT6357_TOP_CKPDN_CON1_CLR 0x116
+#define MT6357_TOP_CKSEL_CON0 0x118
+#define MT6357_TOP_CKSEL_CON0_SET 0x11a
+#define MT6357_TOP_CKSEL_CON0_CLR 0x11c
+#define MT6357_TOP_CKSEL_CON1 0x11e
+#define MT6357_TOP_CKSEL_CON1_SET 0x120
+#define MT6357_TOP_CKSEL_CON1_CLR 0x122
+#define MT6357_TOP_CKDIVSEL_CON0 0x124
+#define MT6357_TOP_CKDIVSEL_CON0_SET 0x126
+#define MT6357_TOP_CKDIVSEL_CON0_CLR 0x128
+#define MT6357_TOP_CKHWEN_CON0 0x12a
+#define MT6357_TOP_CKHWEN_CON0_SET 0x12c
+#define MT6357_TOP_CKHWEN_CON0_CLR 0x12e
+#define MT6357_TOP_CKTST_CON0 0x130
+#define MT6357_TOP_CKTST_CON1 0x132
+#define MT6357_TOP_CLK_CON0 0x134
+#define MT6357_TOP_CLK_CON0_SET 0x136
+#define MT6357_TOP_CLK_CON0_CLR 0x138
+#define MT6357_TOP_DCM_CON0 0x13a
+#define MT6357_TOP_HANDOVER_DEBUG0 0x13c
+#define MT6357_TOP_RST_CON0 0x13e
+#define MT6357_TOP_RST_CON0_SET 0x140
+#define MT6357_TOP_RST_CON0_CLR 0x142
+#define MT6357_TOP_RST_CON1 0x144
+#define MT6357_TOP_RST_CON1_SET 0x146
+#define MT6357_TOP_RST_CON1_CLR 0x148
+#define MT6357_TOP_RST_CON2 0x14a
+#define MT6357_TOP_RST_MISC 0x14c
+#define MT6357_TOP_RST_MISC_SET 0x14e
+#define MT6357_TOP_RST_MISC_CLR 0x150
+#define MT6357_TOP_RST_STATUS 0x152
+#define MT6357_TOP_RST_STATUS_SET 0x154
+#define MT6357_TOP_RST_STATUS_CLR 0x156
+#define MT6357_TOP2_ELR_NUM 0x158
+#define MT6357_TOP2_ELR0 0x15a
+#define MT6357_TOP2_ELR1 0x15c
+#define MT6357_TOP3_ID 0x180
+#define MT6357_TOP3_REV0 0x182
+#define MT6357_TOP3_DSN_DBI 0x184
+#define MT6357_TOP3_DSN_DXI 0x186
+#define MT6357_MISC_TOP_INT_CON0 0x188
+#define MT6357_MISC_TOP_INT_CON0_SET 0x18a
+#define MT6357_MISC_TOP_INT_CON0_CLR 0x18c
+#define MT6357_MISC_TOP_INT_MASK_CON0 0x18e
+#define MT6357_MISC_TOP_INT_MASK_CON0_SET 0x190
+#define MT6357_MISC_TOP_INT_MASK_CON0_CLR 0x192
+#define MT6357_MISC_TOP_INT_STATUS0 0x194
+#define MT6357_MISC_TOP_INT_RAW_STATUS0 0x196
+#define MT6357_TOP_INT_MASK_CON0 0x198
+#define MT6357_TOP_INT_MASK_CON0_SET 0x19a
+#define MT6357_TOP_INT_MASK_CON0_CLR 0x19c
+#define MT6357_TOP_INT_STATUS0 0x19e
+#define MT6357_TOP_INT_RAW_STATUS0 0x1a0
+#define MT6357_TOP_INT_CON0 0x1a2
+#define MT6357_PLT0_ID 0x380
+#define MT6357_PLT0_REV0 0x382
+#define MT6357_PLT0_REV1 0x384
+#define MT6357_PLT0_DSN_DXI 0x386
+#define MT6357_FQMTR_CON0 0x388
+#define MT6357_FQMTR_CON1 0x38a
+#define MT6357_FQMTR_CON2 0x38c
+#define MT6357_TOP_CLK_TRIM 0x38e
+#define MT6357_OTP_CON0 0x390
+#define MT6357_OTP_CON1 0x392
+#define MT6357_OTP_CON2 0x394
+#define MT6357_OTP_CON3 0x396
+#define MT6357_OTP_CON4 0x398
+#define MT6357_OTP_CON5 0x39a
+#define MT6357_OTP_CON6 0x39c
+#define MT6357_OTP_CON7 0x39e
+#define MT6357_OTP_CON8 0x3a0
+#define MT6357_OTP_CON9 0x3a2
+#define MT6357_OTP_CON10 0x3a4
+#define MT6357_OTP_CON11 0x3a6
+#define MT6357_OTP_CON12 0x3a8
+#define MT6357_OTP_CON13 0x3aa
+#define MT6357_OTP_CON14 0x3ac
+#define MT6357_TOP_TMA_KEY 0x3ae
+#define MT6357_TOP_MDB_CONF0 0x3b0
+#define MT6357_TOP_MDB_CONF1 0x3b2
+#define MT6357_TOP_MDB_CONF2 0x3b4
+#define MT6357_PLT0_ELR_NUM 0x3b6
+#define MT6357_PLT0_ELR0 0x3b8
+#define MT6357_PLT0_ELR1 0x3ba
+#define MT6357_SPISLV_ID 0x400
+#define MT6357_SPISLV_REV0 0x402
+#define MT6357_SPISLV_REV1 0x404
+#define MT6357_SPISLV_DSN_DXI 0x406
+#define MT6357_RG_SPI_CON0 0x408
+#define MT6357_DEW_DIO_EN 0x40a
+#define MT6357_DEW_READ_TEST 0x40c
+#define MT6357_DEW_WRITE_TEST 0x40e
+#define MT6357_DEW_CRC_SWRST 0x410
+#define MT6357_DEW_CRC_EN 0x412
+#define MT6357_DEW_CRC_VAL 0x414
+#define MT6357_DEW_DBG_MON_SEL 0x416
+#define MT6357_DEW_CIPHER_KEY_SEL 0x418
+#define MT6357_DEW_CIPHER_IV_SEL 0x41a
+#define MT6357_DEW_CIPHER_EN 0x41c
+#define MT6357_DEW_CIPHER_RDY 0x41e
+#define MT6357_DEW_CIPHER_MODE 0x420
+#define MT6357_DEW_CIPHER_SWRST 0x422
+#define MT6357_DEW_RDDMY_NO 0x424
+#define MT6357_INT_TYPE_CON0 0x426
+#define MT6357_INT_TYPE_CON0_SET 0x428
+#define MT6357_INT_TYPE_CON0_CLR 0x42a
+#define MT6357_INT_STA 0x42c
+#define MT6357_RG_SPI_CON1 0x42e
+#define MT6357_RG_SPI_CON2 0x430
+#define MT6357_RG_SPI_CON3 0x432
+#define MT6357_RG_SPI_CON4 0x434
+#define MT6357_RG_SPI_CON5 0x436
+#define MT6357_RG_SPI_CON6 0x438
+#define MT6357_RG_SPI_CON7 0x43a
+#define MT6357_RG_SPI_CON8 0x43c
+#define MT6357_RG_SPI_CON9 0x43e
+#define MT6357_RG_SPI_CON10 0x440
+#define MT6357_RG_SPI_CON11 0x442
+#define MT6357_RG_SPI_CON12 0x444
+#define MT6357_RG_SPI_CON13 0x446
+#define MT6357_TOP_SPI_CON0 0x448
+#define MT6357_TOP_SPI_CON1 0x44a
+#define MT6357_SCK_TOP_DSN_ID 0x500
+#define MT6357_SCK_TOP_DSN_REV0 0x502
+#define MT6357_SCK_TOP_DBI 0x504
+#define MT6357_SCK_TOP_DXI 0x506
+#define MT6357_SCK_TOP_TPM0 0x508
+#define MT6357_SCK_TOP_TPM1 0x50a
+#define MT6357_SCK_TOP_CON0 0x50c
+#define MT6357_SCK_TOP_CON1 0x50e
+#define MT6357_SCK_TOP_TEST_OUT 0x510
+#define MT6357_SCK_TOP_TEST_CON0 0x512
+#define MT6357_SCK_TOP_CKPDN_CON0 0x514
+#define MT6357_SCK_TOP_CKPDN_CON0_SET 0x516
+#define MT6357_SCK_TOP_CKPDN_CON0_CLR 0x518
+#define MT6357_SCK_TOP_CKHWEN_CON0 0x51a
+#define MT6357_SCK_TOP_CKHWEN_CON0_SET 0x51c
+#define MT6357_SCK_TOP_CKHWEN_CON0_CLR 0x51e
+#define MT6357_SCK_TOP_CKTST_CON 0x520
+#define MT6357_SCK_TOP_RST_CON0 0x522
+#define MT6357_SCK_TOP_RST_CON0_SET 0x524
+#define MT6357_SCK_TOP_RST_CON0_CLR 0x526
+#define MT6357_SCK_TOP_INT_CON0 0x528
+#define MT6357_SCK_TOP_INT_CON0_SET 0x52a
+#define MT6357_SCK_TOP_INT_CON0_CLR 0x52c
+#define MT6357_SCK_TOP_INT_MASK_CON0 0x52e
+#define MT6357_SCK_TOP_INT_MASK_CON0_SET 0x530
+#define MT6357_SCK_TOP_INT_MASK_CON0_CLR 0x532
+#define MT6357_SCK_TOP_INT_STATUS0 0x534
+#define MT6357_SCK_TOP_INT_RAW_STATUS0 0x536
+#define MT6357_SCK_TOP_INT_MISC_CON 0x538
+#define MT6357_EOSC_CALI_CON0 0x53a
+#define MT6357_EOSC_CALI_CON1 0x53c
+#define MT6357_RTC_MIX_CON0 0x53e
+#define MT6357_RTC_MIX_CON1 0x540
+#define MT6357_RTC_MIX_CON2 0x542
+#define MT6357_RTC_DSN_ID 0x580
+#define MT6357_RTC_DSN_REV0 0x582
+#define MT6357_RTC_DBI 0x584
+#define MT6357_RTC_DXI 0x586
+#define MT6357_RTC_BBPU 0x588
+#define MT6357_RTC_IRQ_STA 0x58a
+#define MT6357_RTC_IRQ_EN 0x58c
+#define MT6357_RTC_CII_EN 0x58e
+#define MT6357_RTC_AL_MASK 0x590
+#define MT6357_RTC_TC_SEC 0x592
+#define MT6357_RTC_TC_MIN 0x594
+#define MT6357_RTC_TC_HOU 0x596
+#define MT6357_RTC_TC_DOM 0x598
+#define MT6357_RTC_TC_DOW 0x59a
+#define MT6357_RTC_TC_MTH 0x59c
+#define MT6357_RTC_TC_YEA 0x59e
+#define MT6357_RTC_AL_SEC 0x5a0
+#define MT6357_RTC_AL_MIN 0x5a2
+#define MT6357_RTC_AL_HOU 0x5a4
+#define MT6357_RTC_AL_DOM 0x5a6
+#define MT6357_RTC_AL_DOW 0x5a8
+#define MT6357_RTC_AL_MTH 0x5aa
+#define MT6357_RTC_AL_YEA 0x5ac
+#define MT6357_RTC_OSC32CON 0x5ae
+#define MT6357_RTC_POWERKEY1 0x5b0
+#define MT6357_RTC_POWERKEY2 0x5b2
+#define MT6357_RTC_PDN1 0x5b4
+#define MT6357_RTC_PDN2 0x5b6
+#define MT6357_RTC_SPAR0 0x5b8
+#define MT6357_RTC_SPAR1 0x5ba
+#define MT6357_RTC_PROT 0x5bc
+#define MT6357_RTC_DIFF 0x5be
+#define MT6357_RTC_CALI 0x5c0
+#define MT6357_RTC_WRTGR 0x5c2
+#define MT6357_RTC_CON 0x5c4
+#define MT6357_RTC_SEC_CTRL 0x5c6
+#define MT6357_RTC_INT_CNT 0x5c8
+#define MT6357_RTC_SEC_DAT0 0x5ca
+#define MT6357_RTC_SEC_DAT1 0x5cc
+#define MT6357_RTC_SEC_DAT2 0x5ce
+#define MT6357_RTC_SEC_DSN_ID 0x600
+#define MT6357_RTC_SEC_DSN_REV0 0x602
+#define MT6357_RTC_SEC_DBI 0x604
+#define MT6357_RTC_SEC_DXI 0x606
+#define MT6357_RTC_TC_SEC_SEC 0x608
+#define MT6357_RTC_TC_MIN_SEC 0x60a
+#define MT6357_RTC_TC_HOU_SEC 0x60c
+#define MT6357_RTC_TC_DOM_SEC 0x60e
+#define MT6357_RTC_TC_DOW_SEC 0x610
+#define MT6357_RTC_TC_MTH_SEC 0x612
+#define MT6357_RTC_TC_YEA_SEC 0x614
+#define MT6357_RTC_SEC_CK_PDN 0x616
+#define MT6357_RTC_SEC_WRTGR 0x618
+#define MT6357_DCXO_DSN_ID 0x780
+#define MT6357_DCXO_DSN_REV0 0x782
+#define MT6357_DCXO_DSN_DBI 0x784
+#define MT6357_DCXO_DSN_DXI 0x786
+#define MT6357_DCXO_CW00 0x788
+#define MT6357_DCXO_CW00_SET 0x78a
+#define MT6357_DCXO_CW00_CLR 0x78c
+#define MT6357_DCXO_CW01 0x78e
+#define MT6357_DCXO_CW02 0x790
+#define MT6357_DCXO_CW03 0x792
+#define MT6357_DCXO_CW04 0x794
+#define MT6357_DCXO_CW05 0x796
+#define MT6357_DCXO_CW06 0x798
+#define MT6357_DCXO_CW07 0x79a
+#define MT6357_DCXO_CW08 0x79c
+#define MT6357_DCXO_CW09 0x79e
+#define MT6357_DCXO_CW10 0x7a0
+#define MT6357_DCXO_CW11 0x7a2
+#define MT6357_DCXO_CW11_SET 0x7a4
+#define MT6357_DCXO_CW11_CLR 0x7a6
+#define MT6357_DCXO_CW12 0x7a8
+#define MT6357_DCXO_CW13 0x7aa
+#define MT6357_DCXO_CW14 0x7ac
+#define MT6357_DCXO_CW15 0x7ae
+#define MT6357_DCXO_CW16 0x7b0
+#define MT6357_DCXO_CW17 0x7b2
+#define MT6357_DCXO_CW18 0x7b4
+#define MT6357_DCXO_CW19 0x7b6
+#define MT6357_DCXO_CW20 0x7b8
+#define MT6357_DCXO_CW21 0x7ba
+#define MT6357_DCXO_CW22 0x7bc
+#define MT6357_DCXO_ELR_NUM 0x7be
+#define MT6357_DCXO_ELR0 0x7c0
+#define MT6357_PSC_TOP_ID 0x900
+#define MT6357_PSC_TOP_REV0 0x902
+#define MT6357_PSC_TOP_DBI 0x904
+#define MT6357_PSC_TOP_DXI 0x906
+#define MT6357_PSC_TPM0 0x908
+#define MT6357_PSC_TPM1 0x90a
+#define MT6357_PSC_TOP_RSTCTL_0 0x90c
+#define MT6357_PSC_TOP_INT_CON0 0x90e
+#define MT6357_PSC_TOP_INT_CON0_SET 0x910
+#define MT6357_PSC_TOP_INT_CON0_CLR 0x912
+#define MT6357_PSC_TOP_INT_MASK_CON0 0x914
+#define MT6357_PSC_TOP_INT_MASK_CON0_SET 0x916
+#define MT6357_PSC_TOP_INT_MASK_CON0_CLR 0x918
+#define MT6357_PSC_TOP_INT_STATUS0 0x91a
+#define MT6357_PSC_TOP_INT_RAW_STATUS0 0x91c
+#define MT6357_PSC_TOP_INT_MISC_CON 0x91e
+#define MT6357_PSC_TOP_INT_MISC_CON_SET 0x920
+#define MT6357_PSC_TOP_INT_MISC_CON_CLR 0x922
+#define MT6357_PSC_TOP_MON_CTL 0x924
+#define MT6357_STRUP_ID 0x980
+#define MT6357_STRUP_REV0 0x982
+#define MT6357_STRUP_DBI 0x984
+#define MT6357_STRUP_DXI 0x986
+#define MT6357_STRUP_ANA_CON0 0x988
+#define MT6357_STRUP_ANA_CON1 0x98a
+#define MT6357_STRUP_ANA_CON2 0x98c
+#define MT6357_STRUP_ELR_NUM 0x98e
+#define MT6357_STRUP_ELR_0 0x990
+#define MT6357_PSEQ_ID 0xa00
+#define MT6357_PSEQ_REV0 0xa02
+#define MT6357_PSEQ_DBI 0xa04
+#define MT6357_PSEQ_DXI 0xa06
+#define MT6357_PPCCTL0 0xa08
+#define MT6357_PPCCTL1 0xa0a
+#define MT6357_PPCCTL2 0xa0c
+#define MT6357_PPCCFG0 0xa0e
+#define MT6357_PPCTST0 0xa10
+#define MT6357_PORFLAG 0xa12
+#define MT6357_STRUP_CON0 0xa14
+#define MT6357_STRUP_CON1 0xa16
+#define MT6357_STRUP_CON2 0xa18
+#define MT6357_STRUP_CON3 0xa1a
+#define MT6357_STRUP_CON4 0xa1c
+#define MT6357_STRUP_CON5 0xa1e
+#define MT6357_STRUP_CON6 0xa20
+#define MT6357_STRUP_CON7 0xa22
+#define MT6357_CPSCFG0 0xa24
+#define MT6357_STRUP_CON9 0xa26
+#define MT6357_STRUP_CON10 0xa28
+#define MT6357_STRUP_CON11 0xa2a
+#define MT6357_STRUP_CON12 0xa2c
+#define MT6357_STRUP_CON13 0xa2e
+#define MT6357_STRUP_CON14 0xa30
+#define MT6357_STRUP_CON15 0xa32
+#define MT6357_STRUP_CON16 0xa34
+#define MT6357_STRUP_CON19 0xa36
+#define MT6357_PSEQ_ELR_NUM 0xa38
+#define MT6357_PSEQ_ELR7 0xa3a
+#define MT6357_PSEQ_ELR8 0xa3c
+#define MT6357_PCHR_DIG_DSN_ID 0xa80
+#define MT6357_PCHR_DIG_DSN_REV0 0xa82
+#define MT6357_PCHR_DIG_DSN_DBI 0xa84
+#define MT6357_PCHR_DIG_DSN_DXI 0xa86
+#define MT6357_CHR_TOP_CON0 0xa88
+#define MT6357_CHR_TOP_CON1 0xa8a
+#define MT6357_CHR_TOP_CON2 0xa8c
+#define MT6357_CHR_TOP_CON3 0xa8e
+#define MT6357_CHR_TOP_CON4 0xa90
+#define MT6357_CHR_TOP_CON5 0xa92
+#define MT6357_CHR_TOP_CON6 0xa94
+#define MT6357_PCHR_DIG_ELR_NUM 0xa96
+#define MT6357_PCHR_ELR0 0xa98
+#define MT6357_PCHR_ELR1 0xa9a
+#define MT6357_PCHR_MACRO_DSN_ID 0xb80
+#define MT6357_PCHR_MACRO_DSN_REV0 0xb82
+#define MT6357_PCHR_MACRO_DSN_DBI 0xb84
+#define MT6357_PCHR_MACRO_DSN_DXI 0xb86
+#define MT6357_CHR_CON0 0xb88
+#define MT6357_CHR_CON1 0xb8a
+#define MT6357_CHR_CON2 0xb8c
+#define MT6357_CHR_CON3 0xb8e
+#define MT6357_CHR_CON4 0xb90
+#define MT6357_CHR_CON5 0xb92
+#define MT6357_CHR_CON6 0xb94
+#define MT6357_CHR_CON7 0xb96
+#define MT6357_CHR_CON8 0xb98
+#define MT6357_CHR_CON9 0xb9a
+#define MT6357_BM_TOP_DSN_ID 0xc00
+#define MT6357_BM_TOP_DSN_REV0 0xc02
+#define MT6357_BM_TOP_DBI 0xc04
+#define MT6357_BM_TOP_DXI 0xc06
+#define MT6357_BM_TPM0 0xc08
+#define MT6357_BM_TPM1 0xc0a
+#define MT6357_BM_TOP_CKPDN_CON0 0xc0c
+#define MT6357_BM_TOP_CKPDN_CON0_SET 0xc0e
+#define MT6357_BM_TOP_CKPDN_CON0_CLR 0xc10
+#define MT6357_BM_TOP_CKSEL_CON0 0xc12
+#define MT6357_BM_TOP_CKSEL_CON0_SET 0xc14
+#define MT6357_BM_TOP_CKSEL_CON0_CLR 0xc16
+#define MT6357_BM_TOP_CKTST_CON0 0xc18
+#define MT6357_BM_TOP_RST_CON0 0xc1a
+#define MT6357_BM_TOP_RST_CON0_SET 0xc1c
+#define MT6357_BM_TOP_RST_CON0_CLR 0xc1e
+#define MT6357_BM_TOP_INT_CON0 0xc20
+#define MT6357_BM_TOP_INT_CON0_SET 0xc22
+#define MT6357_BM_TOP_INT_CON0_CLR 0xc24
+#define MT6357_BM_TOP_INT_CON1 0xc26
+#define MT6357_BM_TOP_INT_CON1_SET 0xc28
+#define MT6357_BM_TOP_INT_CON1_CLR 0xc2a
+#define MT6357_BM_TOP_INT_MASK_CON0 0xc2c
+#define MT6357_BM_TOP_INT_MASK_CON0_SET 0xc2e
+#define MT6357_BM_TOP_INT_MASK_CON0_CLR 0xc30
+#define MT6357_BM_TOP_INT_MASK_CON1 0xc32
+#define MT6357_BM_TOP_INT_MASK_CON1_SET 0xc34
+#define MT6357_BM_TOP_INT_MASK_CON1_CLR 0xc36
+#define MT6357_BM_TOP_INT_STATUS0 0xc38
+#define MT6357_BM_TOP_INT_STATUS1 0xc3a
+#define MT6357_BM_TOP_INT_RAW_STATUS0 0xc3c
+#define MT6357_BM_TOP_INT_RAW_STATUS1 0xc3e
+#define MT6357_BM_TOP_INT_MISC_CON 0xc40
+#define MT6357_BM_TOP_DBG_CON 0xc42
+#define MT6357_BM_TOP_RSV0 0xc44
+#define MT6357_FGADC_ANA_DSN_ID 0xc80
+#define MT6357_FGADC_ANA_DSN_REV0 0xc82
+#define MT6357_FGADC_ANA_DSN_DBI 0xc84
+#define MT6357_FGADC_ANA_DSN_DXI 0xc86
+#define MT6357_FGADC_ANA_CON0 0xc88
+#define MT6357_FGADC_ANA_TEST_CON0 0xc8a
+#define MT6357_FGADC_ANA_ELR_NUM 0xc8c
+#define MT6357_FGADC_ANA_ELR0 0xc8e
+#define MT6357_FGADC_ANA_ELR1 0xc90
+#define MT6357_FGADC0_DSN_ID 0xd00
+#define MT6357_FGADC0_DSN_REV0 0xd02
+#define MT6357_FGADC0_DSN_DBI 0xd04
+#define MT6357_FGADC0_DSN_DXI 0xd06
+#define MT6357_FGADC_CON0 0xd08
+#define MT6357_FGADC_CON1 0xd0a
+#define MT6357_FGADC_CON2 0xd0c
+#define MT6357_FGADC_CON3 0xd0e
+#define MT6357_FGADC_CON4 0xd10
+#define MT6357_FGADC_CAR_CON0 0xd12
+#define MT6357_FGADC_CAR_CON1 0xd14
+#define MT6357_FGADC_CAR_CON2 0xd16
+#define MT6357_FGADC_CARTH_CON0 0xd18
+#define MT6357_FGADC_CARTH_CON1 0xd1a
+#define MT6357_FGADC_CARTH_CON2 0xd1c
+#define MT6357_FGADC_CARTH_CON3 0xd1e
+#define MT6357_FGADC_NTER_CON0 0xd20
+#define MT6357_FGADC_NTER_CON1 0xd22
+#define MT6357_FGADC_NTER_CON2 0xd24
+#define MT6357_FGADC_SON_CON0 0xd26
+#define MT6357_FGADC_SON_CON1 0xd28
+#define MT6357_FGADC_SON_CON2 0xd2a
+#define MT6357_FGADC_SON_CON3 0xd2c
+#define MT6357_FGADC_ZCV_CON0 0xd2e
+#define MT6357_FGADC_ZCV_CON1 0xd30
+#define MT6357_FGADC_ZCV_CON2 0xd32
+#define MT6357_FGADC_ZCV_CON3 0xd34
+#define MT6357_FGADC_ZCV_CON4 0xd36
+#define MT6357_FGADC_ZCVTH_CON0 0xd38
+#define MT6357_FGADC_ZCVTH_CON1 0xd3a
+#define MT6357_FGADC_ZCVTH_CON2 0xd3c
+#define MT6357_FGADC1_DSN_ID 0xd80
+#define MT6357_FGADC1_DSN_REV0 0xd82
+#define MT6357_FGADC1_DSN_DBI 0xd84
+#define MT6357_FGADC1_DSN_DXI 0xd86
+#define MT6357_FGADC_R_CON0 0xd88
+#define MT6357_FGADC_CUR_CON0 0xd8a
+#define MT6357_FGADC_CUR_CON1 0xd8c
+#define MT6357_FGADC_CUR_CON2 0xd8e
+#define MT6357_FGADC_CUR_CON3 0xd90
+#define MT6357_FGADC_OFFSET_CON0 0xd92
+#define MT6357_FGADC_OFFSET_CON1 0xd94
+#define MT6357_FGADC_GAIN_CON0 0xd96
+#define MT6357_FGADC_TEST_CON0 0xd98
+#define MT6357_SYSTEM_INFO_CON0 0xd9a
+#define MT6357_SYSTEM_INFO_CON1 0xd9c
+#define MT6357_SYSTEM_INFO_CON2 0xd9e
+#define MT6357_SYSTEM_INFO_CON3 0xda0
+#define MT6357_SYSTEM_INFO_CON4 0xda2
+#define MT6357_BATON_ANA_DSN_ID 0xe00
+#define MT6357_BATON_ANA_DSN_REV0 0xe02
+#define MT6357_BATON_ANA_DSN_DBI 0xe04
+#define MT6357_BATON_ANA_DSN_DXI 0xe06
+#define MT6357_BATON_ANA_CON0 0xe08
+#define MT6357_BATON_ANA_ELR_NUM 0xe0a
+#define MT6357_BATON_ANA_ELR0 0xe0c
+#define MT6357_HK_TOP_ID 0xf80
+#define MT6357_HK_TOP_REV0 0xf82
+#define MT6357_HK_TOP_DBI 0xf84
+#define MT6357_HK_TOP_DXI 0xf86
+#define MT6357_HK_TPM0 0xf88
+#define MT6357_HK_TPM1 0xf8a
+#define MT6357_HK_TOP_CLK_CON0 0xf8c
+#define MT6357_HK_TOP_CLK_CON1 0xf8e
+#define MT6357_HK_TOP_RST_CON0 0xf90
+#define MT6357_HK_TOP_INT_CON0 0xf92
+#define MT6357_HK_TOP_INT_CON0_SET 0xf94
+#define MT6357_HK_TOP_INT_CON0_CLR 0xf96
+#define MT6357_HK_TOP_INT_MASK_CON0 0xf98
+#define MT6357_HK_TOP_INT_MASK_CON0_SET 0xf9a
+#define MT6357_HK_TOP_INT_MASK_CON0_CLR 0xf9c
+#define MT6357_HK_TOP_INT_STATUS0 0xf9e
+#define MT6357_HK_TOP_INT_RAW_STATUS0 0xfa0
+#define MT6357_HK_TOP_MON_CON0 0xfa2
+#define MT6357_HK_TOP_MON_CON1 0xfa4
+#define MT6357_HK_TOP_MON_CON2 0xfa6
+#define MT6357_AUXADC_DSN_ID 0x1000
+#define MT6357_AUXADC_DSN_REV0 0x1002
+#define MT6357_AUXADC_DSN_DBI 0x1004
+#define MT6357_AUXADC_DSN_DXI 0x1006
+#define MT6357_AUXADC_ANA_CON0 0x1008
+#define MT6357_AUXADC_DIG_1_DSN_ID 0x1080
+#define MT6357_AUXADC_DIG_1_DSN_REV0 0x1082
+#define MT6357_AUXADC_DIG_1_DSN_DBI 0x1084
+#define MT6357_AUXADC_DIG_1_DSN_DXI 0x1086
+#define MT6357_AUXADC_ADC0 0x1088
+#define MT6357_AUXADC_ADC1 0x108a
+#define MT6357_AUXADC_ADC2 0x108c
+#define MT6357_AUXADC_ADC3 0x108e
+#define MT6357_AUXADC_ADC4 0x1090
+#define MT6357_AUXADC_ADC5 0x1092
+#define MT6357_AUXADC_ADC6 0x1094
+#define MT6357_AUXADC_ADC7 0x1096
+#define MT6357_AUXADC_ADC8 0x1098
+#define MT6357_AUXADC_ADC9 0x109a
+#define MT6357_AUXADC_ADC10 0x109c
+#define MT6357_AUXADC_ADC11 0x109e
+#define MT6357_AUXADC_ADC12 0x10a0
+#define MT6357_AUXADC_ADC14 0x10a2
+#define MT6357_AUXADC_ADC16 0x10a4
+#define MT6357_AUXADC_ADC17 0x10a6
+#define MT6357_AUXADC_ADC18 0x10a8
+#define MT6357_AUXADC_ADC19 0x10aa
+#define MT6357_AUXADC_ADC20 0x10ac
+#define MT6357_AUXADC_ADC21 0x10ae
+#define MT6357_AUXADC_ADC22 0x10b0
+#define MT6357_AUXADC_ADC23 0x10b2
+#define MT6357_AUXADC_ADC24 0x10b4
+#define MT6357_AUXADC_ADC25 0x10b6
+#define MT6357_AUXADC_ADC26 0x10b8
+#define MT6357_AUXADC_ADC27 0x10ba
+#define MT6357_AUXADC_ADC29 0x10bc
+#define MT6357_AUXADC_ADC30 0x10be
+#define MT6357_AUXADC_ADC31 0x10c0
+#define MT6357_AUXADC_ADC32 0x10c2
+#define MT6357_AUXADC_ADC33 0x10c4
+#define MT6357_AUXADC_ADC34 0x10c6
+#define MT6357_AUXADC_ADC35 0x10c8
+#define MT6357_AUXADC_ADC36 0x10ca
+#define MT6357_AUXADC_ADC38 0x10cc
+#define MT6357_AUXADC_ADC39 0x10ce
+#define MT6357_AUXADC_ADC40 0x10d0
+#define MT6357_AUXADC_ADC41 0x10d2
+#define MT6357_AUXADC_ADC42 0x10d4
+#define MT6357_AUXADC_ADC43 0x10d6
+#define MT6357_AUXADC_ADC46 0x10d8
+#define MT6357_AUXADC_ADC47 0x10da
+#define MT6357_AUXADC_DIG_1_ELR_NUM 0x10dc
+#define MT6357_AUXADC_DIG_1_ELR0 0x10de
+#define MT6357_AUXADC_DIG_1_ELR1 0x10e0
+#define MT6357_AUXADC_DIG_2_DSN_ID 0x1100
+#define MT6357_AUXADC_DIG_2_DSN_REV0 0x1102
+#define MT6357_AUXADC_DIG_2_DSN_DBI 0x1104
+#define MT6357_AUXADC_DIG_2_DSN_DXI 0x1106
+#define MT6357_AUXADC_STA0 0x1108
+#define MT6357_AUXADC_STA1 0x110a
+#define MT6357_AUXADC_STA2 0x110c
+#define MT6357_AUXADC_RQST0 0x110e
+#define MT6357_AUXADC_RQST0_SET 0x1110
+#define MT6357_AUXADC_RQST0_CLR 0x1112
+#define MT6357_AUXADC_RQST2 0x1114
+#define MT6357_AUXADC_RQST2_SET 0x1116
+#define MT6357_AUXADC_RQST2_CLR 0x1118
+#define MT6357_AUXADC_RQST1 0x111a
+#define MT6357_AUXADC_RQST1_SET 0x111c
+#define MT6357_AUXADC_RQST1_CLR 0x111e
+#define MT6357_AUXADC_CON0 0x1120
+#define MT6357_AUXADC_CON0_SET 0x1122
+#define MT6357_AUXADC_CON0_CLR 0x1124
+#define MT6357_AUXADC_CON1 0x1126
+#define MT6357_AUXADC_CON2 0x1128
+#define MT6357_AUXADC_CON3 0x112a
+#define MT6357_AUXADC_CON4 0x112c
+#define MT6357_AUXADC_CON5 0x112e
+#define MT6357_AUXADC_CON6 0x1130
+#define MT6357_AUXADC_CON7 0x1132
+#define MT6357_AUXADC_CON8 0x1134
+#define MT6357_AUXADC_CON9 0x1136
+#define MT6357_AUXADC_CON10 0x1138
+#define MT6357_AUXADC_CON11 0x113a
+#define MT6357_AUXADC_CON12 0x113c
+#define MT6357_AUXADC_CON13 0x113e
+#define MT6357_AUXADC_CON14 0x1140
+#define MT6357_AUXADC_CON15 0x1142
+#define MT6357_AUXADC_CON16 0x1144
+#define MT6357_AUXADC_CON17 0x1146
+#define MT6357_AUXADC_CON18 0x1148
+#define MT6357_AUXADC_CON19 0x114a
+#define MT6357_AUXADC_CON20 0x114c
+#define MT6357_AUXADC_DIG_3_DSN_ID 0x1180
+#define MT6357_AUXADC_DIG_3_DSN_REV0 0x1182
+#define MT6357_AUXADC_DIG_3_DSN_DBI 0x1184
+#define MT6357_AUXADC_DIG_3_DSN_DXI 0x1186
+#define MT6357_AUXADC_AUTORPT0 0x1188
+#define MT6357_AUXADC_LBAT0 0x118a
+#define MT6357_AUXADC_LBAT1 0x118c
+#define MT6357_AUXADC_LBAT2 0x118e
+#define MT6357_AUXADC_LBAT3 0x1190
+#define MT6357_AUXADC_LBAT4 0x1192
+#define MT6357_AUXADC_LBAT5 0x1194
+#define MT6357_AUXADC_LBAT6 0x1196
+#define MT6357_AUXADC_ACCDET 0x1198
+#define MT6357_AUXADC_DBG0 0x119a
+#define MT6357_AUXADC_IMP0 0x119c
+#define MT6357_AUXADC_IMP1 0x119e
+#define MT6357_AUXADC_DIG_3_ELR_NUM 0x11a0
+#define MT6357_AUXADC_DIG_3_ELR0 0x11a2
+#define MT6357_AUXADC_DIG_3_ELR1 0x11a4
+#define MT6357_AUXADC_DIG_3_ELR2 0x11a6
+#define MT6357_AUXADC_DIG_3_ELR3 0x11a8
+#define MT6357_AUXADC_DIG_3_ELR4 0x11aa
+#define MT6357_AUXADC_DIG_3_ELR5 0x11ac
+#define MT6357_AUXADC_DIG_3_ELR6 0x11ae
+#define MT6357_AUXADC_DIG_3_ELR7 0x11b0
+#define MT6357_AUXADC_DIG_3_ELR8 0x11b2
+#define MT6357_AUXADC_DIG_3_ELR9 0x11b4
+#define MT6357_AUXADC_DIG_3_ELR10 0x11b6
+#define MT6357_AUXADC_DIG_3_ELR11 0x11b8
+#define MT6357_AUXADC_DIG_4_DSN_ID 0x1200
+#define MT6357_AUXADC_DIG_4_DSN_REV0 0x1202
+#define MT6357_AUXADC_DIG_4_DSN_DBI 0x1204
+#define MT6357_AUXADC_DIG_4_DSN_DXI 0x1206
+#define MT6357_AUXADC_MDRT_0 0x1208
+#define MT6357_AUXADC_MDRT_1 0x120a
+#define MT6357_AUXADC_MDRT_2 0x120c
+#define MT6357_AUXADC_MDRT_3 0x120e
+#define MT6357_AUXADC_MDRT_4 0x1210
+#define MT6357_AUXADC_DCXO_MDRT_0 0x1212
+#define MT6357_AUXADC_DCXO_MDRT_1 0x1214
+#define MT6357_AUXADC_DCXO_MDRT_2 0x1216
+#define MT6357_AUXADC_NAG_0 0x1218
+#define MT6357_AUXADC_NAG_1 0x121a
+#define MT6357_AUXADC_NAG_2 0x121c
+#define MT6357_AUXADC_NAG_3 0x121e
+#define MT6357_AUXADC_NAG_4 0x1220
+#define MT6357_AUXADC_NAG_5 0x1222
+#define MT6357_AUXADC_NAG_6 0x1224
+#define MT6357_AUXADC_NAG_7 0x1226
+#define MT6357_AUXADC_NAG_8 0x1228
+#define MT6357_AUXADC_RSV_1 0x122a
+#define MT6357_AUXADC_ANA_0 0x122c
+#define MT6357_AUXADC_IMP_CG0 0x122e
+#define MT6357_AUXADC_LBAT_CG0 0x1230
+#define MT6357_AUXADC_NAG_CG0 0x1232
+#define MT6357_AUXADC_PRI_NEW 0x1234
+#define MT6357_AUXADC_CHR_TOP_CON2 0x1236
+#define MT6357_BUCK_TOP_DSN_ID 0x1400
+#define MT6357_BUCK_TOP_DSN_REV0 0x1402
+#define MT6357_BUCK_TOP_DBI 0x1404
+#define MT6357_BUCK_TOP_DXI 0x1406
+#define MT6357_BUCK_TOP_PAM0 0x1408
+#define MT6357_BUCK_TOP_PAM1 0x140a
+#define MT6357_BUCK_TOP_CLK_CON0 0x140c
+#define MT6357_BUCK_TOP_CLK_CON0_SET 0x140e
+#define MT6357_BUCK_TOP_CLK_CON0_CLR 0x1410
+#define MT6357_BUCK_TOP_CLK_HWEN_CON0 0x1412
+#define MT6357_BUCK_TOP_CLK_HWEN_CON0_SET 0x1414
+#define MT6357_BUCK_TOP_CLK_HWEN_CON0_CLR 0x1416
+#define MT6357_BUCK_TOP_CLK_MISC_CON0 0x1418
+#define MT6357_BUCK_TOP_INT_CON0 0x141a
+#define MT6357_BUCK_TOP_INT_CON0_SET 0x141c
+#define MT6357_BUCK_TOP_INT_CON0_CLR 0x141e
+#define MT6357_BUCK_TOP_INT_MASK_CON0 0x1420
+#define MT6357_BUCK_TOP_INT_MASK_CON0_SET 0x1422
+#define MT6357_BUCK_TOP_INT_MASK_CON0_CLR 0x1424
+#define MT6357_BUCK_TOP_INT_STATUS0 0x1426
+#define MT6357_BUCK_TOP_INT_RAW_STATUS0 0x1428
+#define MT6357_BUCK_TOP_STB_CON 0x142a
+#define MT6357_BUCK_TOP_SLP_CON0 0x142c
+#define MT6357_BUCK_TOP_SLP_CON1 0x142e
+#define MT6357_BUCK_TOP_SLP_CON2 0x1430
+#define MT6357_BUCK_TOP_MINFREQ_CON 0x1432
+#define MT6357_BUCK_TOP_OC_CON0 0x1434
+#define MT6357_BUCK_TOP_K_CON0 0x1436
+#define MT6357_BUCK_TOP_K_CON1 0x1438
+#define MT6357_BUCK_TOP_K_CON2 0x143a
+#define MT6357_BUCK_TOP_WDTDBG0 0x143c
+#define MT6357_BUCK_TOP_WDTDBG1 0x143e
+#define MT6357_BUCK_TOP_WDTDBG2 0x1440
+#define MT6357_BUCK_TOP_ELR_NUM 0x1442
+#define MT6357_BUCK_TOP_ELR0 0x1444
+#define MT6357_BUCK_TOP_ELR1 0x1446
+#define MT6357_BUCK_VPROC_DSN_ID 0x1480
+#define MT6357_BUCK_VPROC_DSN_REV0 0x1482
+#define MT6357_BUCK_VPROC_DSN_DBI 0x1484
+#define MT6357_BUCK_VPROC_DSN_DXI 0x1486
+#define MT6357_BUCK_VPROC_CON0 0x1488
+#define MT6357_BUCK_VPROC_CON1 0x148a
+#define MT6357_BUCK_VPROC_CFG0 0x148c
+#define MT6357_BUCK_VPROC_CFG1 0x148e
+#define MT6357_BUCK_VPROC_OP_EN 0x1490
+#define MT6357_BUCK_VPROC_OP_EN_SET 0x1492
+#define MT6357_BUCK_VPROC_OP_EN_CLR 0x1494
+#define MT6357_BUCK_VPROC_OP_CFG 0x1496
+#define MT6357_BUCK_VPROC_OP_CFG_SET 0x1498
+#define MT6357_BUCK_VPROC_OP_CFG_CLR 0x149a
+#define MT6357_BUCK_VPROC_SP_CON 0x149c
+#define MT6357_BUCK_VPROC_SP_CFG 0x149e
+#define MT6357_BUCK_VPROC_OC_CFG 0x14a0
+#define MT6357_BUCK_VPROC_DBG0 0x14a2
+#define MT6357_BUCK_VPROC_DBG1 0x14a4
+#define MT6357_BUCK_VPROC_DBG2 0x14a6
+#define MT6357_BUCK_VPROC_ELR_NUM 0x14a8
+#define MT6357_BUCK_VPROC_ELR0 0x14aa
+#define MT6357_BUCK_VCORE_DSN_ID 0x1500
+#define MT6357_BUCK_VCORE_DSN_REV0 0x1502
+#define MT6357_BUCK_VCORE_DSN_DBI 0x1504
+#define MT6357_BUCK_VCORE_DSN_DXI 0x1506
+#define MT6357_BUCK_VCORE_CON0 0x1508
+#define MT6357_BUCK_VCORE_CON1 0x150a
+#define MT6357_BUCK_VCORE_CFG0 0x150c
+#define MT6357_BUCK_VCORE_CFG1 0x150e
+#define MT6357_BUCK_VCORE_OP_EN 0x1510
+#define MT6357_BUCK_VCORE_OP_EN_SET 0x1512
+#define MT6357_BUCK_VCORE_OP_EN_CLR 0x1514
+#define MT6357_BUCK_VCORE_OP_CFG 0x1516
+#define MT6357_BUCK_VCORE_OP_CFG_SET 0x1518
+#define MT6357_BUCK_VCORE_OP_CFG_CLR 0x151a
+#define MT6357_BUCK_VCORE_SP_CON 0x151c
+#define MT6357_BUCK_VCORE_SP_CFG 0x151e
+#define MT6357_BUCK_VCORE_OC_CFG 0x1520
+#define MT6357_BUCK_VCORE_DBG0 0x1522
+#define MT6357_BUCK_VCORE_DBG1 0x1524
+#define MT6357_BUCK_VCORE_DBG2 0x1526
+#define MT6357_BUCK_VCORE_ELR_NUM 0x1528
+#define MT6357_BUCK_VCORE_ELR0 0x152a
+#define MT6357_BUCK_VMODEM_DSN_ID 0x1580
+#define MT6357_BUCK_VMODEM_DSN_REV0 0x1582
+#define MT6357_BUCK_VMODEM_DSN_DBI 0x1584
+#define MT6357_BUCK_VMODEM_DSN_DXI 0x1586
+#define MT6357_BUCK_VMODEM_CON0 0x1588
+#define MT6357_BUCK_VMODEM_CON1 0x158a
+#define MT6357_BUCK_VMODEM_CFG0 0x158c
+#define MT6357_BUCK_VMODEM_CFG1 0x158e
+#define MT6357_BUCK_VMODEM_OP_EN 0x1590
+#define MT6357_BUCK_VMODEM_OP_EN_SET 0x1592
+#define MT6357_BUCK_VMODEM_OP_EN_CLR 0x1594
+#define MT6357_BUCK_VMODEM_OP_CFG 0x1596
+#define MT6357_BUCK_VMODEM_OP_CFG_SET 0x1598
+#define MT6357_BUCK_VMODEM_OP_CFG_CLR 0x159a
+#define MT6357_BUCK_VMODEM_SP_CON 0x159c
+#define MT6357_BUCK_VMODEM_SP_CFG 0x159e
+#define MT6357_BUCK_VMODEM_OC_CFG 0x15a0
+#define MT6357_BUCK_VMODEM_DBG0 0x15a2
+#define MT6357_BUCK_VMODEM_DBG1 0x15a4
+#define MT6357_BUCK_VMODEM_DBG2 0x15a6
+#define MT6357_BUCK_VMODEM_ELR_NUM 0x15a8
+#define MT6357_BUCK_VMODEM_ELR0 0x15aa
+#define MT6357_BUCK_VS1_DSN_ID 0x1600
+#define MT6357_BUCK_VS1_DSN_REV0 0x1602
+#define MT6357_BUCK_VS1_DSN_DBI 0x1604
+#define MT6357_BUCK_VS1_DSN_DXI 0x1606
+#define MT6357_BUCK_VS1_CON0 0x1608
+#define MT6357_BUCK_VS1_CON1 0x160a
+#define MT6357_BUCK_VS1_CFG0 0x160c
+#define MT6357_BUCK_VS1_CFG1 0x160e
+#define MT6357_BUCK_VS1_OP_EN 0x1610
+#define MT6357_BUCK_VS1_OP_EN_SET 0x1612
+#define MT6357_BUCK_VS1_OP_EN_CLR 0x1614
+#define MT6357_BUCK_VS1_OP_CFG 0x1616
+#define MT6357_BUCK_VS1_OP_CFG_SET 0x1618
+#define MT6357_BUCK_VS1_OP_CFG_CLR 0x161a
+#define MT6357_BUCK_VS1_SP_CON 0x161c
+#define MT6357_BUCK_VS1_SP_CFG 0x161e
+#define MT6357_BUCK_VS1_OC_CFG 0x1620
+#define MT6357_BUCK_VS1_DBG0 0x1622
+#define MT6357_BUCK_VS1_DBG1 0x1624
+#define MT6357_BUCK_VS1_DBG2 0x1626
+#define MT6357_BUCK_VS1_VOTER 0x1628
+#define MT6357_BUCK_VS1_VOTER_SET 0x162a
+#define MT6357_BUCK_VS1_VOTER_CLR 0x162c
+#define MT6357_BUCK_VS1_VOTER_CFG 0x162e
+#define MT6357_BUCK_VS1_ELR_NUM 0x1630
+#define MT6357_BUCK_VS1_ELR0 0x1632
+#define MT6357_BUCK_VPA_DSN_ID 0x1680
+#define MT6357_BUCK_VPA_DSN_REV0 0x1682
+#define MT6357_BUCK_VPA_DSN_DBI 0x1684
+#define MT6357_BUCK_VPA_DSN_DXI 0x1686
+#define MT6357_BUCK_VPA_CON0 0x1688
+#define MT6357_BUCK_VPA_CON1 0x168a
+#define MT6357_BUCK_VPA_CFG0 0x168c
+#define MT6357_BUCK_VPA_CFG1 0x168e
+#define MT6357_BUCK_VPA_OC_CFG 0x1690
+#define MT6357_BUCK_VPA_DBG0 0x1692
+#define MT6357_BUCK_VPA_DBG1 0x1694
+#define MT6357_BUCK_VPA_DBG2 0x1696
+#define MT6357_BUCK_VPA_DLC_CON0 0x1698
+#define MT6357_BUCK_VPA_DLC_CON1 0x169a
+#define MT6357_BUCK_VPA_DLC_CON2 0x169c
+#define MT6357_BUCK_VPA_MSFG_CON0 0x169e
+#define MT6357_BUCK_VPA_MSFG_CON1 0x16a0
+#define MT6357_BUCK_VPA_MSFG_RRATE0 0x16a2
+#define MT6357_BUCK_VPA_MSFG_RRATE1 0x16a4
+#define MT6357_BUCK_VPA_MSFG_RRATE2 0x16a6
+#define MT6357_BUCK_VPA_MSFG_RTHD0 0x16a8
+#define MT6357_BUCK_VPA_MSFG_RTHD1 0x16aa
+#define MT6357_BUCK_VPA_MSFG_RTHD2 0x16ac
+#define MT6357_BUCK_VPA_MSFG_FRATE0 0x16ae
+#define MT6357_BUCK_VPA_MSFG_FRATE1 0x16b0
+#define MT6357_BUCK_VPA_MSFG_FRATE2 0x16b2
+#define MT6357_BUCK_VPA_MSFG_FTHD0 0x16b4
+#define MT6357_BUCK_VPA_MSFG_FTHD1 0x16b6
+#define MT6357_BUCK_VPA_MSFG_FTHD2 0x16b8
+#define MT6357_BUCK_ANA_DSN_ID 0x1700
+#define MT6357_BUCK_ANA_DSN_REV0 0x1702
+#define MT6357_BUCK_ANA_DSN_DBI 0x1704
+#define MT6357_BUCK_ANA_DSN_FPI 0x1706
+#define MT6357_SMPS_ANA_CON0 0x1708
+#define MT6357_SMPS_ANA_CON1 0x170a
+#define MT6357_SMPS_ANA_CON2 0x170c
+#define MT6357_VCORE_VPROC_ANA_CON0 0x170e
+#define MT6357_VCORE_VPROC_ANA_CON1 0x1710
+#define MT6357_VCORE_VPROC_ANA_CON2 0x1712
+#define MT6357_VCORE_VPROC_ANA_CON3 0x1714
+#define MT6357_VCORE_VPROC_ANA_CON4 0x1716
+#define MT6357_VCORE_VPROC_ANA_CON5 0x1718
+#define MT6357_VCORE_VPROC_ANA_CON6 0x171a
+#define MT6357_VCORE_VPROC_ANA_CON7 0x171c
+#define MT6357_VCORE_VPROC_ANA_CON8 0x171e
+#define MT6357_VCORE_VPROC_ANA_CON9 0x1720
+#define MT6357_VCORE_VPROC_ANA_CON10 0x1722
+#define MT6357_VCORE_VPROC_ANA_CON11 0x1724
+#define MT6357_VMODEM_ANA_CON0 0x1726
+#define MT6357_VMODEM_ANA_CON1 0x1728
+#define MT6357_VMODEM_ANA_CON2 0x172a
+#define MT6357_VMODEM_ANA_CON3 0x172c
+#define MT6357_VMODEM_ANA_CON4 0x172e
+#define MT6357_VMODEM_ANA_CON5 0x1730
+#define MT6357_VS1_ANA_CON0 0x1732
+#define MT6357_VS1_ANA_CON1 0x1734
+#define MT6357_VS1_ANA_CON2 0x1736
+#define MT6357_VS1_ANA_CON3 0x1738
+#define MT6357_VS1_ANA_CON4 0x173a
+#define MT6357_VS1_ANA_CON5 0x173c
+#define MT6357_VPA_ANA_CON0 0x173e
+#define MT6357_VPA_ANA_CON1 0x1740
+#define MT6357_VPA_ANA_CON2 0x1742
+#define MT6357_VPA_ANA_CON3 0x1744
+#define MT6357_VPA_ANA_CON4 0x1746
+#define MT6357_VPA_ANA_CON5 0x1748
+#define MT6357_BUCK_ANA_ELR_NUM 0x174a
+#define MT6357_SMPS_ELR_0 0x174c
+#define MT6357_SMPS_ELR_1 0x174e
+#define MT6357_SMPS_ELR_2 0x1750
+#define MT6357_SMPS_ELR_3 0x1752
+#define MT6357_SMPS_ELR_4 0x1754
+#define MT6357_SMPS_ELR_5 0x1756
+#define MT6357_VCORE_VPROC_ELR_0 0x1758
+#define MT6357_VCORE_VPROC_ELR_1 0x175a
+#define MT6357_VCORE_VPROC_ELR_2 0x175c
+#define MT6357_VCORE_VPROC_ELR_3 0x175e
+#define MT6357_VCORE_VPROC_ELR_4 0x1760
+#define MT6357_VMODEM_ELR_0 0x1762
+#define MT6357_VMODEM_ELR_1 0x1764
+#define MT6357_VMODEM_ELR_2 0x1766
+#define MT6357_VS1_ELR_0 0x1768
+#define MT6357_VS1_ELR_1 0x176a
+#define MT6357_VPA_ELR_0 0x176c
+#define MT6357_LDO_TOP_ID 0x1880
+#define MT6357_LDO_TOP_REV0 0x1882
+#define MT6357_LDO_TOP_DBI 0x1884
+#define MT6357_LDO_TOP_DXI 0x1886
+#define MT6357_LDO_TPM0 0x1888
+#define MT6357_LDO_TPM1 0x188a
+#define MT6357_LDO_TOP_CLK_DCM_CON0 0x188c
+#define MT6357_LDO_TOP_CLK_VIO28_CON0 0x188e
+#define MT6357_LDO_TOP_CLK_VIO18_CON0 0x1890
+#define MT6357_LDO_TOP_CLK_VAUD28_CON0 0x1892
+#define MT6357_LDO_TOP_CLK_VDRAM_CON0 0x1894
+#define MT6357_LDO_TOP_CLK_VSRAM_PROC_CON0 0x1896
+#define MT6357_LDO_TOP_CLK_VSRAM_OTHERS_CON0 0x1898
+#define MT6357_LDO_TOP_CLK_VAUX18_CON0 0x189a
+#define MT6357_LDO_TOP_CLK_VUSB33_CON0 0x189c
+#define MT6357_LDO_TOP_CLK_VEMC_CON0 0x189e
+#define MT6357_LDO_TOP_CLK_VXO22_CON0 0x18a0
+#define MT6357_LDO_TOP_CLK_VSIM1_CON0 0x18a2
+#define MT6357_LDO_TOP_CLK_VSIM2_CON0 0x18a4
+#define MT6357_LDO_TOP_CLK_VCAMD_CON0 0x18a6
+#define MT6357_LDO_TOP_CLK_VCAMIO_CON0 0x18a8
+#define MT6357_LDO_TOP_CLK_VEFUSE_CON0 0x18aa
+#define MT6357_LDO_TOP_CLK_VCN33_CON0 0x18ac
+#define MT6357_LDO_TOP_CLK_VCN18_CON0 0x18ae
+#define MT6357_LDO_TOP_CLK_VCN28_CON0 0x18b0
+#define MT6357_LDO_TOP_CLK_VIBR_CON0 0x18b2
+#define MT6357_LDO_TOP_CLK_VFE28_CON0 0x18b4
+#define MT6357_LDO_TOP_CLK_VMCH_CON0 0x18b6
+#define MT6357_LDO_TOP_CLK_VMC_CON0 0x18b8
+#define MT6357_LDO_TOP_CLK_VRF18_CON0 0x18ba
+#define MT6357_LDO_TOP_CLK_VLDO28_CON0 0x18bc
+#define MT6357_LDO_TOP_CLK_VRF12_CON0 0x18be
+#define MT6357_LDO_TOP_CLK_VCAMA_CON0 0x18c0
+#define MT6357_LDO_TOP_CLK_TREF_CON0 0x18c2
+#define MT6357_LDO_TOP_INT_CON0 0x18c4
+#define MT6357_LDO_TOP_INT_CON0_SET 0x18c6
+#define MT6357_LDO_TOP_INT_CON0_CLR 0x18c8
+#define MT6357_LDO_TOP_INT_CON1 0x18ca
+#define MT6357_LDO_TOP_INT_CON1_SET 0x18cc
+#define MT6357_LDO_TOP_INT_CON1_CLR 0x18ce
+#define MT6357_LDO_TOP_INT_MASK_CON0 0x18d0
+#define MT6357_LDO_TOP_INT_MASK_CON0_SET 0x18d2
+#define MT6357_LDO_TOP_INT_MASK_CON0_CLR 0x18d4
+#define MT6357_LDO_TOP_INT_MASK_CON1 0x18d6
+#define MT6357_LDO_TOP_INT_MASK_CON1_SET 0x18d8
+#define MT6357_LDO_TOP_INT_MASK_CON1_CLR 0x18da
+#define MT6357_LDO_TOP_INT_STATUS0 0x18dc
+#define MT6357_LDO_TOP_INT_STATUS1 0x18de
+#define MT6357_LDO_TOP_INT_RAW_STATUS0 0x18e0
+#define MT6357_LDO_TOP_INT_RAW_STATUS1 0x18e2
+#define MT6357_LDO_TEST_CON0 0x18e4
+#define MT6357_LDO_TOP_WDT_CON0 0x18e6
+#define MT6357_LDO_TOP_RSV_CON0 0x18e8
+#define MT6357_LDO_TOP_RSV_CON1 0x18ea
+#define MT6357_LDO_OCFB0 0x18ec
+#define MT6357_LDO_LP_PROTECTION 0x18ee
+#define MT6357_LDO_DUMMY_LOAD_GATED 0x18f0
+#define MT6357_LDO_GON0_DSN_ID 0x1900
+#define MT6357_LDO_GON0_DSN_REV0 0x1902
+#define MT6357_LDO_GON0_DSN_DBI 0x1904
+#define MT6357_LDO_GON0_DSN_DXI 0x1906
+#define MT6357_LDO_VXO22_CON0 0x1908
+#define MT6357_LDO_VXO22_OP_EN 0x190a
+#define MT6357_LDO_VXO22_OP_EN_SET 0x190c
+#define MT6357_LDO_VXO22_OP_EN_CLR 0x190e
+#define MT6357_LDO_VXO22_OP_CFG 0x1910
+#define MT6357_LDO_VXO22_OP_CFG_SET 0x1912
+#define MT6357_LDO_VXO22_OP_CFG_CLR 0x1914
+#define MT6357_LDO_VXO22_CON1 0x1916
+#define MT6357_LDO_VXO22_CON2 0x1918
+#define MT6357_LDO_VXO22_CON3 0x191a
+#define MT6357_LDO_VAUX18_CON0 0x191c
+#define MT6357_LDO_VAUX18_OP_EN 0x191e
+#define MT6357_LDO_VAUX18_OP_EN_SET 0x1920
+#define MT6357_LDO_VAUX18_OP_EN_CLR 0x1922
+#define MT6357_LDO_VAUX18_OP_CFG 0x1924
+#define MT6357_LDO_VAUX18_OP_CFG_SET 0x1926
+#define MT6357_LDO_VAUX18_OP_CFG_CLR 0x1928
+#define MT6357_LDO_VAUX18_CON1 0x192a
+#define MT6357_LDO_VAUX18_CON2 0x192c
+#define MT6357_LDO_VAUX18_CON3 0x192e
+#define MT6357_LDO_VAUD28_CON0 0x1930
+#define MT6357_LDO_VAUD28_OP_EN 0x1932
+#define MT6357_LDO_VAUD28_OP_EN_SET 0x1934
+#define MT6357_LDO_VAUD28_OP_EN_CLR 0x1936
+#define MT6357_LDO_VAUD28_OP_CFG 0x1938
+#define MT6357_LDO_VAUD28_OP_CFG_SET 0x193a
+#define MT6357_LDO_VAUD28_OP_CFG_CLR 0x193c
+#define MT6357_LDO_VAUD28_CON1 0x193e
+#define MT6357_LDO_VAUD28_CON2 0x1940
+#define MT6357_LDO_VAUD28_CON3 0x1942
+#define MT6357_LDO_VIO28_CON0 0x1944
+#define MT6357_LDO_VIO28_OP_EN 0x1946
+#define MT6357_LDO_VIO28_OP_EN_SET 0x1948
+#define MT6357_LDO_VIO28_OP_EN_CLR 0x194a
+#define MT6357_LDO_VIO28_OP_CFG 0x194c
+#define MT6357_LDO_VIO28_OP_CFG_SET 0x194e
+#define MT6357_LDO_VIO28_OP_CFG_CLR 0x1950
+#define MT6357_LDO_VIO28_CON1 0x1952
+#define MT6357_LDO_VIO28_CON2 0x1954
+#define MT6357_LDO_VIO28_CON3 0x1956
+#define MT6357_LDO_VIO18_CON0 0x1958
+#define MT6357_LDO_VIO18_OP_EN 0x195a
+#define MT6357_LDO_VIO18_OP_EN_SET 0x195c
+#define MT6357_LDO_VIO18_OP_EN_CLR 0x195e
+#define MT6357_LDO_VIO18_OP_CFG 0x1960
+#define MT6357_LDO_VIO18_OP_CFG_SET 0x1962
+#define MT6357_LDO_VIO18_OP_CFG_CLR 0x1964
+#define MT6357_LDO_VIO18_CON1 0x1966
+#define MT6357_LDO_VIO18_CON2 0x1968
+#define MT6357_LDO_VIO18_CON3 0x196a
+#define MT6357_LDO_VDRAM_CON0 0x196c
+#define MT6357_LDO_VDRAM_OP_EN 0x196e
+#define MT6357_LDO_VDRAM_OP_EN_SET 0x1970
+#define MT6357_LDO_VDRAM_OP_EN_CLR 0x1972
+#define MT6357_LDO_VDRAM_OP_CFG 0x1974
+#define MT6357_LDO_VDRAM_OP_CFG_SET 0x1976
+#define MT6357_LDO_VDRAM_OP_CFG_CLR 0x1978
+#define MT6357_LDO_VDRAM_CON1 0x197a
+#define MT6357_LDO_VDRAM_CON2 0x197c
+#define MT6357_LDO_VDRAM_CON3 0x197e
+#define MT6357_LDO_GON1_DSN_ID 0x1980
+#define MT6357_LDO_GON1_DSN_REV0 0x1982
+#define MT6357_LDO_GON1_DSN_DBI 0x1984
+#define MT6357_LDO_GON1_DSN_DXI 0x1986
+#define MT6357_LDO_VEMC_CON0 0x1988
+#define MT6357_LDO_VEMC_OP_EN 0x198a
+#define MT6357_LDO_VEMC_OP_EN_SET 0x198c
+#define MT6357_LDO_VEMC_OP_EN_CLR 0x198e
+#define MT6357_LDO_VEMC_OP_CFG 0x1990
+#define MT6357_LDO_VEMC_OP_CFG_SET 0x1992
+#define MT6357_LDO_VEMC_OP_CFG_CLR 0x1994
+#define MT6357_LDO_VEMC_CON1 0x1996
+#define MT6357_LDO_VEMC_CON2 0x1998
+#define MT6357_LDO_VEMC_CON3 0x199a
+#define MT6357_LDO_VUSB33_CON0_0 0x199c
+#define MT6357_LDO_VUSB33_OP_EN 0x199e
+#define MT6357_LDO_VUSB33_OP_EN_SET 0x19a0
+#define MT6357_LDO_VUSB33_OP_EN_CLR 0x19a2
+#define MT6357_LDO_VUSB33_OP_CFG 0x19a4
+#define MT6357_LDO_VUSB33_OP_CFG_SET 0x19a6
+#define MT6357_LDO_VUSB33_OP_CFG_CLR 0x19a8
+#define MT6357_LDO_VUSB33_CON0_1 0x19aa
+#define MT6357_LDO_VUSB33_CON1 0x19ac
+#define MT6357_LDO_VUSB33_CON2 0x19ae
+#define MT6357_LDO_VUSB33_CON3 0x19b0
+#define MT6357_LDO_VSRAM_PROC_CON0 0x19b2
+#define MT6357_LDO_VSRAM_PROC_CON2 0x19b4
+#define MT6357_LDO_VSRAM_PROC_CFG0 0x19b6
+#define MT6357_LDO_VSRAM_PROC_CFG1 0x19b8
+#define MT6357_LDO_VSRAM_PROC_OP_EN 0x19ba
+#define MT6357_LDO_VSRAM_PROC_OP_EN_SET 0x19bc
+#define MT6357_LDO_VSRAM_PROC_OP_EN_CLR 0x19be
+#define MT6357_LDO_VSRAM_PROC_OP_CFG 0x19c0
+#define MT6357_LDO_VSRAM_PROC_OP_CFG_SET 0x19c2
+#define MT6357_LDO_VSRAM_PROC_OP_CFG_CLR 0x19c4
+#define MT6357_LDO_VSRAM_PROC_CON3 0x19c6
+#define MT6357_LDO_VSRAM_PROC_CON4 0x19c8
+#define MT6357_LDO_VSRAM_PROC_CON5 0x19ca
+#define MT6357_LDO_VSRAM_PROC_DBG0 0x19cc
+#define MT6357_LDO_VSRAM_PROC_DBG1 0x19ce
+#define MT6357_LDO_VSRAM_OTHERS_CON0 0x19d0
+#define MT6357_LDO_VSRAM_OTHERS_CON2 0x19d2
+#define MT6357_LDO_VSRAM_OTHERS_CFG0 0x19d4
+#define MT6357_LDO_VSRAM_OTHERS_CFG1 0x19d6
+#define MT6357_LDO_VSRAM_OTHERS_OP_EN 0x19d8
+#define MT6357_LDO_VSRAM_OTHERS_OP_EN_SET 0x19da
+#define MT6357_LDO_VSRAM_OTHERS_OP_EN_CLR 0x19dc
+#define MT6357_LDO_VSRAM_OTHERS_OP_CFG 0x19de
+#define MT6357_LDO_VSRAM_OTHERS_OP_CFG_SET 0x19e0
+#define MT6357_LDO_VSRAM_OTHERS_OP_CFG_CLR 0x19e2
+#define MT6357_LDO_VSRAM_OTHERS_CON3 0x19e4
+#define MT6357_LDO_VSRAM_OTHERS_CON4 0x19e6
+#define MT6357_LDO_VSRAM_OTHERS_CON5 0x19e8
+#define MT6357_LDO_VSRAM_OTHERS_DBG0 0x19ea
+#define MT6357_LDO_VSRAM_OTHERS_DBG1 0x19ec
+#define MT6357_LDO_VSRAM_PROC_SP 0x19ee
+#define MT6357_LDO_VSRAM_OTHERS_SP 0x19f0
+#define MT6357_LDO_VSRAM_PROC_R2R_PDN_DIS 0x19f2
+#define MT6357_LDO_VSRAM_OTHERS_R2R_PDN_DIS 0x19f4
+#define MT6357_LDO_VSRAM_WDT_DBG0 0x19f6
+#define MT6357_LDO_GON1_ELR_NUM 0x19f8
+#define MT6357_LDO_VSRAM_CON0 0x19fa
+#define MT6357_LDO_VSRAM_CON1 0x19fc
+#define MT6357_LDO_VSRAM_CON2 0x19fe
+#define MT6357_LDO_GOFF0_DSN_ID 0x1a00
+#define MT6357_LDO_GOFF0_DSN_REV0 0x1a02
+#define MT6357_LDO_GOFF0_DSN_DBI 0x1a04
+#define MT6357_LDO_GOFF0_DSN_DXI 0x1a06
+#define MT6357_LDO_VFE28_CON0 0x1a08
+#define MT6357_LDO_VFE28_OP_EN 0x1a0a
+#define MT6357_LDO_VFE28_OP_EN_SET 0x1a0c
+#define MT6357_LDO_VFE28_OP_EN_CLR 0x1a0e
+#define MT6357_LDO_VFE28_OP_CFG 0x1a10
+#define MT6357_LDO_VFE28_OP_CFG_SET 0x1a12
+#define MT6357_LDO_VFE28_OP_CFG_CLR 0x1a14
+#define MT6357_LDO_VFE28_CON1 0x1a16
+#define MT6357_LDO_VFE28_CON2 0x1a18
+#define MT6357_LDO_VFE28_CON3 0x1a1a
+#define MT6357_LDO_VRF18_CON0 0x1a1c
+#define MT6357_LDO_VRF18_OP_EN 0x1a1e
+#define MT6357_LDO_VRF18_OP_EN_SET 0x1a20
+#define MT6357_LDO_VRF18_OP_EN_CLR 0x1a22
+#define MT6357_LDO_VRF18_OP_CFG 0x1a24
+#define MT6357_LDO_VRF18_OP_CFG_SET 0x1a26
+#define MT6357_LDO_VRF18_OP_CFG_CLR 0x1a28
+#define MT6357_LDO_VRF18_CON1 0x1a2a
+#define MT6357_LDO_VRF18_CON2 0x1a2c
+#define MT6357_LDO_VRF18_CON3 0x1a2e
+#define MT6357_LDO_VRF12_CON0 0x1a30
+#define MT6357_LDO_VRF12_OP_EN 0x1a32
+#define MT6357_LDO_VRF12_OP_EN_SET 0x1a34
+#define MT6357_LDO_VRF12_OP_EN_CLR 0x1a36
+#define MT6357_LDO_VRF12_OP_CFG 0x1a38
+#define MT6357_LDO_VRF12_OP_CFG_SET 0x1a3a
+#define MT6357_LDO_VRF12_OP_CFG_CLR 0x1a3c
+#define MT6357_LDO_VRF12_CON1 0x1a3e
+#define MT6357_LDO_VRF12_CON2 0x1a40
+#define MT6357_LDO_VRF12_CON3 0x1a42
+#define MT6357_LDO_VEFUSE_CON0 0x1a44
+#define MT6357_LDO_VEFUSE_OP_EN 0x1a46
+#define MT6357_LDO_VEFUSE_OP_EN_SET 0x1a48
+#define MT6357_LDO_VEFUSE_OP_EN_CLR 0x1a4a
+#define MT6357_LDO_VEFUSE_OP_CFG 0x1a4c
+#define MT6357_LDO_VEFUSE_OP_CFG_SET 0x1a4e
+#define MT6357_LDO_VEFUSE_OP_CFG_CLR 0x1a50
+#define MT6357_LDO_VEFUSE_CON1 0x1a52
+#define MT6357_LDO_VEFUSE_CON2 0x1a54
+#define MT6357_LDO_VEFUSE_CON3 0x1a56
+#define MT6357_LDO_VCN18_CON0 0x1a58
+#define MT6357_LDO_VCN18_OP_EN 0x1a5a
+#define MT6357_LDO_VCN18_OP_EN_SET 0x1a5c
+#define MT6357_LDO_VCN18_OP_EN_CLR 0x1a5e
+#define MT6357_LDO_VCN18_OP_CFG 0x1a60
+#define MT6357_LDO_VCN18_OP_CFG_SET 0x1a62
+#define MT6357_LDO_VCN18_OP_CFG_CLR 0x1a64
+#define MT6357_LDO_VCN18_CON1 0x1a66
+#define MT6357_LDO_VCN18_CON2 0x1a68
+#define MT6357_LDO_VCN18_CON3 0x1a6a
+#define MT6357_LDO_VCAMA_CON0 0x1a6c
+#define MT6357_LDO_VCAMA_OP_EN 0x1a6e
+#define MT6357_LDO_VCAMA_OP_EN_SET 0x1a70
+#define MT6357_LDO_VCAMA_OP_EN_CLR 0x1a72
+#define MT6357_LDO_VCAMA_OP_CFG 0x1a74
+#define MT6357_LDO_VCAMA_OP_CFG_SET 0x1a76
+#define MT6357_LDO_VCAMA_OP_CFG_CLR 0x1a78
+#define MT6357_LDO_VCAMA_CON1 0x1a7a
+#define MT6357_LDO_VCAMA_CON2 0x1a7c
+#define MT6357_LDO_VCAMA_CON3 0x1a7e
+#define MT6357_LDO_GOFF1_DSN_ID 0x1a80
+#define MT6357_LDO_GOFF1_DSN_REV0 0x1a82
+#define MT6357_LDO_GOFF1_DSN_DBI 0x1a84
+#define MT6357_LDO_GOFF1_DSN_DXI 0x1a86
+#define MT6357_LDO_VCAMD_CON0 0x1a88
+#define MT6357_LDO_VCAMD_OP_EN 0x1a8a
+#define MT6357_LDO_VCAMD_OP_EN_SET 0x1a8c
+#define MT6357_LDO_VCAMD_OP_EN_CLR 0x1a8e
+#define MT6357_LDO_VCAMD_OP_CFG 0x1a90
+#define MT6357_LDO_VCAMD_OP_CFG_SET 0x1a92
+#define MT6357_LDO_VCAMD_OP_CFG_CLR 0x1a94
+#define MT6357_LDO_VCAMD_CON1 0x1a96
+#define MT6357_LDO_VCAMD_CON2 0x1a98
+#define MT6357_LDO_VCAMD_CON3 0x1a9a
+#define MT6357_LDO_VCAMIO_CON0 0x1a9c
+#define MT6357_LDO_VCAMIO_OP_EN 0x1a9e
+#define MT6357_LDO_VCAMIO_OP_EN_SET 0x1aa0
+#define MT6357_LDO_VCAMIO_OP_EN_CLR 0x1aa2
+#define MT6357_LDO_VCAMIO_OP_CFG 0x1aa4
+#define MT6357_LDO_VCAMIO_OP_CFG_SET 0x1aa6
+#define MT6357_LDO_VCAMIO_OP_CFG_CLR 0x1aa8
+#define MT6357_LDO_VCAMIO_CON1 0x1aaa
+#define MT6357_LDO_VCAMIO_CON2 0x1aac
+#define MT6357_LDO_VCAMIO_CON3 0x1aae
+#define MT6357_LDO_VMC_CON0 0x1ab0
+#define MT6357_LDO_VMC_OP_EN 0x1ab2
+#define MT6357_LDO_VMC_OP_EN_SET 0x1ab4
+#define MT6357_LDO_VMC_OP_EN_CLR 0x1ab6
+#define MT6357_LDO_VMC_OP_CFG 0x1ab8
+#define MT6357_LDO_VMC_OP_CFG_SET 0x1aba
+#define MT6357_LDO_VMC_OP_CFG_CLR 0x1abc
+#define MT6357_LDO_VMC_CON1 0x1abe
+#define MT6357_LDO_VMC_CON2 0x1ac0
+#define MT6357_LDO_VMC_CON3 0x1ac2
+#define MT6357_LDO_VMCH_CON0 0x1ac4
+#define MT6357_LDO_VMCH_OP_EN 0x1ac6
+#define MT6357_LDO_VMCH_OP_EN_SET 0x1ac8
+#define MT6357_LDO_VMCH_OP_EN_CLR 0x1aca
+#define MT6357_LDO_VMCH_OP_CFG 0x1acc
+#define MT6357_LDO_VMCH_OP_CFG_SET 0x1ace
+#define MT6357_LDO_VMCH_OP_CFG_CLR 0x1ad0
+#define MT6357_LDO_VMCH_CON1 0x1ad2
+#define MT6357_LDO_VMCH_CON2 0x1ad4
+#define MT6357_LDO_VMCH_CON3 0x1ad6
+#define MT6357_LDO_VSIM1_CON0 0x1ad8
+#define MT6357_LDO_VSIM1_OP_EN 0x1ada
+#define MT6357_LDO_VSIM1_OP_EN_SET 0x1adc
+#define MT6357_LDO_VSIM1_OP_EN_CLR 0x1ade
+#define MT6357_LDO_VSIM1_OP_CFG 0x1ae0
+#define MT6357_LDO_VSIM1_OP_CFG_SET 0x1ae2
+#define MT6357_LDO_VSIM1_OP_CFG_CLR 0x1ae4
+#define MT6357_LDO_VSIM1_CON1 0x1ae6
+#define MT6357_LDO_VSIM1_CON2 0x1ae8
+#define MT6357_LDO_VSIM1_CON3 0x1aea
+#define MT6357_LDO_VSIM2_CON0 0x1aec
+#define MT6357_LDO_VSIM2_OP_EN 0x1aee
+#define MT6357_LDO_VSIM2_OP_EN_SET 0x1af0
+#define MT6357_LDO_VSIM2_OP_EN_CLR 0x1af2
+#define MT6357_LDO_VSIM2_OP_CFG 0x1af4
+#define MT6357_LDO_VSIM2_OP_CFG_SET 0x1af6
+#define MT6357_LDO_VSIM2_OP_CFG_CLR 0x1af8
+#define MT6357_LDO_VSIM2_CON1 0x1afa
+#define MT6357_LDO_VSIM2_CON2 0x1afc
+#define MT6357_LDO_VSIM2_CON3 0x1afe
+#define MT6357_LDO_GOFF2_DSN_ID 0x1b00
+#define MT6357_LDO_GOFF2_DSN_REV0 0x1b02
+#define MT6357_LDO_GOFF2_DSN_DBI 0x1b04
+#define MT6357_LDO_GOFF2_DSN_DXI 0x1b06
+#define MT6357_LDO_VIBR_CON0 0x1b08
+#define MT6357_LDO_VIBR_OP_EN 0x1b0a
+#define MT6357_LDO_VIBR_OP_EN_SET 0x1b0c
+#define MT6357_LDO_VIBR_OP_EN_CLR 0x1b0e
+#define MT6357_LDO_VIBR_OP_CFG 0x1b10
+#define MT6357_LDO_VIBR_OP_CFG_SET 0x1b12
+#define MT6357_LDO_VIBR_OP_CFG_CLR 0x1b14
+#define MT6357_LDO_VIBR_CON1 0x1b16
+#define MT6357_LDO_VIBR_CON2 0x1b18
+#define MT6357_LDO_VIBR_CON3 0x1b1a
+#define MT6357_LDO_VCN33_CON0_0 0x1b1c
+#define MT6357_LDO_VCN33_OP_EN 0x1b1e
+#define MT6357_LDO_VCN33_OP_EN_SET 0x1b20
+#define MT6357_LDO_VCN33_OP_EN_CLR 0x1b22
+#define MT6357_LDO_VCN33_OP_CFG 0x1b24
+#define MT6357_LDO_VCN33_OP_CFG_SET 0x1b26
+#define MT6357_LDO_VCN33_OP_CFG_CLR 0x1b28
+#define MT6357_LDO_VCN33_CON0_1 0x1b2a
+#define MT6357_LDO_VCN33_CON1 0x1b2c
+#define MT6357_LDO_VCN33_CON2 0x1b2e
+#define MT6357_LDO_VCN33_CON3 0x1b30
+#define MT6357_LDO_VLDO28_CON0_0 0x1b32
+#define MT6357_LDO_VLDO28_OP_EN 0x1b34
+#define MT6357_LDO_VLDO28_OP_EN_SET 0x1b36
+#define MT6357_LDO_VLDO28_OP_EN_CLR 0x1b38
+#define MT6357_LDO_VLDO28_OP_CFG 0x1b3a
+#define MT6357_LDO_VLDO28_OP_CFG_SET 0x1b3c
+#define MT6357_LDO_VLDO28_OP_CFG_CLR 0x1b3e
+#define MT6357_LDO_VLDO28_CON0_1 0x1b40
+#define MT6357_LDO_VLDO28_CON1 0x1b42
+#define MT6357_LDO_VLDO28_CON2 0x1b44
+#define MT6357_LDO_VLDO28_CON3 0x1b46
+#define MT6357_LDO_GOFF2_RSV_CON0 0x1b48
+#define MT6357_LDO_GOFF2_RSV_CON1 0x1b4a
+#define MT6357_LDO_GOFF3_DSN_ID 0x1b80
+#define MT6357_LDO_GOFF3_DSN_REV0 0x1b82
+#define MT6357_LDO_GOFF3_DSN_DBI 0x1b84
+#define MT6357_LDO_GOFF3_DSN_DXI 0x1b86
+#define MT6357_LDO_VCN28_CON0 0x1b88
+#define MT6357_LDO_VCN28_OP_EN 0x1b8a
+#define MT6357_LDO_VCN28_OP_EN_SET 0x1b8c
+#define MT6357_LDO_VCN28_OP_EN_CLR 0x1b8e
+#define MT6357_LDO_VCN28_OP_CFG 0x1b90
+#define MT6357_LDO_VCN28_OP_CFG_SET 0x1b92
+#define MT6357_LDO_VCN28_OP_CFG_CLR 0x1b94
+#define MT6357_LDO_VCN28_CON1 0x1b96
+#define MT6357_LDO_VCN28_CON2 0x1b98
+#define MT6357_LDO_VCN28_CON3 0x1b9a
+#define MT6357_VRTC_CON0 0x1b9c
+#define MT6357_LDO_TREF_CON0 0x1b9e
+#define MT6357_LDO_TREF_OP_EN 0x1ba0
+#define MT6357_LDO_TREF_OP_EN_SET 0x1ba2
+#define MT6357_LDO_TREF_OP_EN_CLR 0x1ba4
+#define MT6357_LDO_TREF_OP_CFG 0x1ba6
+#define MT6357_LDO_TREF_OP_CFG_SET 0x1ba8
+#define MT6357_LDO_TREF_OP_CFG_CLR 0x1baa
+#define MT6357_LDO_TREF_CON1 0x1bac
+#define MT6357_LDO_GOFF3_RSV_CON0 0x1bae
+#define MT6357_LDO_GOFF3_RSV_CON1 0x1bb0
+#define MT6357_LDO_ANA0_DSN_ID 0x1c00
+#define MT6357_LDO_ANA0_DSN_REV0 0x1c02
+#define MT6357_LDO_ANA0_DSN_DBI 0x1c04
+#define MT6357_LDO_ANA0_DSN_DXI 0x1c06
+#define MT6357_VFE28_ANA_CON0 0x1c08
+#define MT6357_VFE28_ANA_CON1 0x1c0a
+#define MT6357_VCN28_ANA_CON0 0x1c0c
+#define MT6357_VCN28_ANA_CON1 0x1c0e
+#define MT6357_VAUD28_ANA_CON0 0x1c10
+#define MT6357_VAUD28_ANA_CON1 0x1c12
+#define MT6357_VAUX18_ANA_CON0 0x1c14
+#define MT6357_VAUX18_ANA_CON1 0x1c16
+#define MT6357_VXO22_ANA_CON0 0x1c18
+#define MT6357_VXO22_ANA_CON1 0x1c1a
+#define MT6357_VCN33_ANA_CON0 0x1c1c
+#define MT6357_VCN33_ANA_CON1 0x1c1e
+#define MT6357_VEMC_ANA_CON0 0x1c20
+#define MT6357_VEMC_ANA_CON1 0x1c22
+#define MT6357_VLDO28_ANA_CON0 0x1c24
+#define MT6357_VLDO28_ANA_CON1 0x1c26
+#define MT6357_VIO28_ANA_CON0 0x1c28
+#define MT6357_VIO28_ANA_CON1 0x1c2a
+#define MT6357_VIBR_ANA_CON0 0x1c2c
+#define MT6357_VIBR_ANA_CON1 0x1c2e
+#define MT6357_VSIM1_ANA_CON0 0x1c30
+#define MT6357_VSIM1_ANA_CON1 0x1c32
+#define MT6357_VSIM2_ANA_CON0 0x1c34
+#define MT6357_VSIM2_ANA_CON1 0x1c36
+#define MT6357_VMCH_ANA_CON0 0x1c38
+#define MT6357_VMCH_ANA_CON1 0x1c3a
+#define MT6357_VMC_ANA_CON0 0x1c3c
+#define MT6357_VMC_ANA_CON1 0x1c3e
+#define MT6357_VCAMIO_ANA_CON0 0x1c40
+#define MT6357_VCAMIO_ANA_CON1 0x1c42
+#define MT6357_VCN18_ANA_CON0 0x1c44
+#define MT6357_VCN18_ANA_CON1 0x1c46
+#define MT6357_VRF18_ANA_CON0 0x1c48
+#define MT6357_VRF18_ANA_CON1 0x1c4a
+#define MT6357_VIO18_ANA_CON0 0x1c4c
+#define MT6357_VIO18_ANA_CON1 0x1c4e
+#define MT6357_VDRAM_ANA_CON1 0x1c50
+#define MT6357_VRF12_ANA_CON0 0x1c52
+#define MT6357_VRF12_ANA_CON1 0x1c54
+#define MT6357_VSRAM_PROC_ANA_CON0 0x1c56
+#define MT6357_VSRAM_OTHERS_ANA_CON0 0x1c58
+#define MT6357_LDO_ANA0_ELR_NUM 0x1c5a
+#define MT6357_VFE28_ELR_0 0x1c5c
+#define MT6357_VCN28_ELR_0 0x1c5e
+#define MT6357_VAUD28_ELR_0 0x1c60
+#define MT6357_VAUX18_ELR_0 0x1c62
+#define MT6357_VXO22_ELR_0 0x1c64
+#define MT6357_VCN33_ELR_0 0x1c66
+#define MT6357_VEMC_ELR_0 0x1c68
+#define MT6357_VLDO28_ELR_0 0x1c6a
+#define MT6357_VIO28_ELR_0 0x1c6c
+#define MT6357_VIBR_ELR_0 0x1c6e
+#define MT6357_VSIM1_ELR_0 0x1c70
+#define MT6357_VSIM2_ELR_0 0x1c72
+#define MT6357_VMCH_ELR_0 0x1c74
+#define MT6357_VMC_ELR_0 0x1c76
+#define MT6357_VCAMIO_ELR_0 0x1c78
+#define MT6357_VCN18_ELR_0 0x1c7a
+#define MT6357_VRF18_ELR_0 0x1c7c
+#define MT6357_LDO_ANA1_DSN_ID 0x1c80
+#define MT6357_LDO_ANA1_DSN_REV0 0x1c82
+#define MT6357_LDO_ANA1_DSN_DBI 0x1c84
+#define MT6357_LDO_ANA1_DSN_DXI 0x1c86
+#define MT6357_VUSB33_ANA_CON0 0x1c88
+#define MT6357_VUSB33_ANA_CON1 0x1c8a
+#define MT6357_VCAMA_ANA_CON0 0x1c8c
+#define MT6357_VCAMA_ANA_CON1 0x1c8e
+#define MT6357_VEFUSE_ANA_CON0 0x1c90
+#define MT6357_VEFUSE_ANA_CON1 0x1c92
+#define MT6357_VCAMD_ANA_CON0 0x1c94
+#define MT6357_VCAMD_ANA_CON1 0x1c96
+#define MT6357_LDO_ANA1_ELR_NUM 0x1c98
+#define MT6357_VUSB33_ELR_0 0x1c9a
+#define MT6357_VCAMA_ELR_0 0x1c9c
+#define MT6357_VEFUSE_ELR_0 0x1c9e
+#define MT6357_VCAMD_ELR_0 0x1ca0
+#define MT6357_VIO18_ELR_0 0x1ca2
+#define MT6357_VDRAM_ELR_0 0x1ca4
+#define MT6357_VRF12_ELR_0 0x1ca6
+#define MT6357_VRTC_ELR_0 0x1ca8
+#define MT6357_VDRAM_ELR_1 0x1caa
+#define MT6357_VDRAM_ELR_2 0x1cac
+#define MT6357_XPP_TOP_ID 0x1e00
+#define MT6357_XPP_TOP_REV0 0x1e02
+#define MT6357_XPP_TOP_DBI 0x1e04
+#define MT6357_XPP_TOP_DXI 0x1e06
+#define MT6357_XPP_TPM0 0x1e08
+#define MT6357_XPP_TPM1 0x1e0a
+#define MT6357_XPP_TOP_TEST_OUT 0x1e0c
+#define MT6357_XPP_TOP_TEST_CON0 0x1e0e
+#define MT6357_XPP_TOP_CKPDN_CON0 0x1e10
+#define MT6357_XPP_TOP_CKPDN_CON0_SET 0x1e12
+#define MT6357_XPP_TOP_CKPDN_CON0_CLR 0x1e14
+#define MT6357_XPP_TOP_CKSEL_CON0 0x1e16
+#define MT6357_XPP_TOP_CKSEL_CON0_SET 0x1e18
+#define MT6357_XPP_TOP_CKSEL_CON0_CLR 0x1e1a
+#define MT6357_XPP_TOP_RST_CON0 0x1e1c
+#define MT6357_XPP_TOP_RST_CON0_SET 0x1e1e
+#define MT6357_XPP_TOP_RST_CON0_CLR 0x1e20
+#define MT6357_XPP_TOP_RST_BANK_CON0 0x1e22
+#define MT6357_XPP_TOP_RST_BANK_CON0_SET 0x1e24
+#define MT6357_XPP_TOP_RST_BANK_CON0_CLR 0x1e26
+#define MT6357_DRIVER_BL_DSN_ID 0x1e80
+#define MT6357_DRIVER_BL_DSN_REV0 0x1e82
+#define MT6357_DRIVER_BL_DSN_DBI 0x1e84
+#define MT6357_DRIVER_BL_DSN_DXI 0x1e86
+#define MT6357_ISINK1_CON0 0x1e88
+#define MT6357_ISINK1_CON1 0x1e8a
+#define MT6357_ISINK1_CON2 0x1e8c
+#define MT6357_ISINK1_CON3 0x1e8e
+#define MT6357_ISINK_ANA1 0x1e90
+#define MT6357_ISINK_PHASE_DLY 0x1e92
+#define MT6357_ISINK_SFSTR 0x1e94
+#define MT6357_ISINK_EN_CTRL 0x1e96
+#define MT6357_ISINK_MODE_CTRL 0x1e98
+#define MT6357_DRIVER_ANA_CON0 0x1e9a
+#define MT6357_ISINK_ANA_CON0 0x1e9c
+#define MT6357_ISINK_ANA_CON1 0x1e9e
+#define MT6357_DRIVER_BL_ELR_NUM 0x1ea0
+#define MT6357_DRIVER_BL_ELR_0 0x1ea2
+#define MT6357_DRIVER_CI_DSN_ID 0x1f00
+#define MT6357_DRIVER_CI_DSN_REV0 0x1f02
+#define MT6357_DRIVER_CI_DSN_DBI 0x1f04
+#define MT6357_DRIVER_CI_DSN_DXI 0x1f06
+#define MT6357_CHRIND_CON0 0x1f08
+#define MT6357_CHRIND_CON1 0x1f0a
+#define MT6357_CHRIND_CON2 0x1f0c
+#define MT6357_CHRIND_CON3 0x1f0e
+#define MT6357_CHRIND_CON4 0x1f10
+#define MT6357_CHRIND_EN_CTRL 0x1f12
+#define MT6357_CHRIND_ANA_CON0 0x1f14
+#define MT6357_DRIVER_DL_DSN_ID 0x1f80
+#define MT6357_DRIVER_DL_DSN_REV0 0x1f82
+#define MT6357_DRIVER_DL_DSN_DBI 0x1f84
+#define MT6357_DRIVER_DL_DSN_DXI 0x1f86
+#define MT6357_ISINK2_CON0 0x1f88
+#define MT6357_ISINK3_CON0 0x1f8a
+#define MT6357_ISINK_EN_CTRL_SMPL 0x1f8c
+#define MT6357_AUD_TOP_ID 0x2080
+#define MT6357_AUD_TOP_REV0 0x2082
+#define MT6357_AUD_TOP_DBI 0x2084
+#define MT6357_AUD_TOP_DXI 0x2086
+#define MT6357_AUD_TOP_CKPDN_TPM0 0x2088
+#define MT6357_AUD_TOP_CKPDN_TPM1 0x208a
+#define MT6357_AUD_TOP_CKPDN_CON0 0x208c
+#define MT6357_AUD_TOP_CKPDN_CON0_SET 0x208e
+#define MT6357_AUD_TOP_CKPDN_CON0_CLR 0x2090
+#define MT6357_AUD_TOP_CKSEL_CON0 0x2092
+#define MT6357_AUD_TOP_CKSEL_CON0_SET 0x2094
+#define MT6357_AUD_TOP_CKSEL_CON0_CLR 0x2096
+#define MT6357_AUD_TOP_CKTST_CON0 0x2098
+#define MT6357_AUD_TOP_RST_CON0 0x209a
+#define MT6357_AUD_TOP_RST_CON0_SET 0x209c
+#define MT6357_AUD_TOP_RST_CON0_CLR 0x209e
+#define MT6357_AUD_TOP_RST_BANK_CON0 0x20a0
+#define MT6357_AUD_TOP_INT_CON0 0x20a2
+#define MT6357_AUD_TOP_INT_CON0_SET 0x20a4
+#define MT6357_AUD_TOP_INT_CON0_CLR 0x20a6
+#define MT6357_AUD_TOP_INT_MASK_CON0 0x20a8
+#define MT6357_AUD_TOP_INT_MASK_CON0_SET 0x20aa
+#define MT6357_AUD_TOP_INT_MASK_CON0_CLR 0x20ac
+#define MT6357_AUD_TOP_INT_STATUS0 0x20ae
+#define MT6357_AUD_TOP_INT_RAW_STATUS0 0x20b0
+#define MT6357_AUD_TOP_INT_MISC_CON0 0x20b2
+#define MT6357_AUDNCP_CLKDIV_CON0 0x20b4
+#define MT6357_AUDNCP_CLKDIV_CON1 0x20b6
+#define MT6357_AUDNCP_CLKDIV_CON2 0x20b8
+#define MT6357_AUDNCP_CLKDIV_CON3 0x20ba
+#define MT6357_AUDNCP_CLKDIV_CON4 0x20bc
+#define MT6357_AUD_TOP_MON_CON0 0x20be
+#define MT6357_AUDIO_DIG_DSN_ID 0x2100
+#define MT6357_AUDIO_DIG_DSN_REV0 0x2102
+#define MT6357_AUDIO_DIG_DSN_DBI 0x2104
+#define MT6357_AUDIO_DIG_DSN_DXI 0x2106
+#define MT6357_AFE_UL_DL_CON0 0x2108
+#define MT6357_AFE_DL_SRC2_CON0_L 0x210a
+#define MT6357_AFE_UL_SRC_CON0_H 0x210c
+#define MT6357_AFE_UL_SRC_CON0_L 0x210e
+#define MT6357_AFE_TOP_CON0 0x2110
+#define MT6357_AUDIO_TOP_CON0 0x2112
+#define MT6357_AFE_MON_DEBUG0 0x2114
+#define MT6357_AFUNC_AUD_CON0 0x2116
+#define MT6357_AFUNC_AUD_CON1 0x2118
+#define MT6357_AFUNC_AUD_CON2 0x211a
+#define MT6357_AFUNC_AUD_CON3 0x211c
+#define MT6357_AFUNC_AUD_CON4 0x211e
+#define MT6357_AFUNC_AUD_CON5 0x2120
+#define MT6357_AFUNC_AUD_CON6 0x2122
+#define MT6357_AFUNC_AUD_MON0 0x2124
+#define MT6357_AUDRC_TUNE_MON0 0x2126
+#define MT6357_AFE_ADDA_MTKAIF_FIFO_CFG0 0x2128
+#define MT6357_AFE_ADDA_MTKAIF_FIFO_LOG_MON1 0x212a
+#define MT6357_AFE_ADDA_MTKAIF_MON0 0x212c
+#define MT6357_AFE_ADDA_MTKAIF_MON1 0x212e
+#define MT6357_AFE_ADDA_MTKAIF_MON2 0x2130
+#define MT6357_AFE_ADDA_MTKAIF_MON3 0x2132
+#define MT6357_AFE_ADDA_MTKAIF_CFG0 0x2134
+#define MT6357_AFE_ADDA_MTKAIF_RX_CFG0 0x2136
+#define MT6357_AFE_ADDA_MTKAIF_RX_CFG1 0x2138
+#define MT6357_AFE_ADDA_MTKAIF_RX_CFG2 0x213a
+#define MT6357_AFE_ADDA_MTKAIF_RX_CFG3 0x213c
+#define MT6357_AFE_ADDA_MTKAIF_TX_CFG1 0x213e
+#define MT6357_AFE_SGEN_CFG0 0x2140
+#define MT6357_AFE_SGEN_CFG1 0x2142
+#define MT6357_AFE_ADC_ASYNC_FIFO_CFG 0x2144
+#define MT6357_AFE_DCCLK_CFG0 0x2146
+#define MT6357_AFE_DCCLK_CFG1 0x2148
+#define MT6357_AUDIO_DIG_CFG 0x214a
+#define MT6357_AFE_AUD_PAD_TOP 0x214c
+#define MT6357_AFE_AUD_PAD_TOP_MON 0x214e
+#define MT6357_AFE_AUD_PAD_TOP_MON1 0x2150
+#define MT6357_AUDENC_DSN_ID 0x2180
+#define MT6357_AUDENC_DSN_REV0 0x2182
+#define MT6357_AUDENC_DSN_DBI 0x2184
+#define MT6357_AUDENC_DSN_FPI 0x2186
+#define MT6357_AUDENC_ANA_CON0 0x2188
+#define MT6357_AUDENC_ANA_CON1 0x218a
+#define MT6357_AUDENC_ANA_CON2 0x218c
+#define MT6357_AUDENC_ANA_CON3 0x218e
+#define MT6357_AUDENC_ANA_CON4 0x2190
+#define MT6357_AUDENC_ANA_CON5 0x2192
+#define MT6357_AUDENC_ANA_CON6 0x2194
+#define MT6357_AUDENC_ANA_CON7 0x2196
+#define MT6357_AUDENC_ANA_CON8 0x2198
+#define MT6357_AUDENC_ANA_CON9 0x219a
+#define MT6357_AUDENC_ANA_CON10 0x219c
+#define MT6357_AUDENC_ANA_CON11 0x219e
+#define MT6357_AUDDEC_DSN_ID 0x2200
+#define MT6357_AUDDEC_DSN_REV0 0x2202
+#define MT6357_AUDDEC_DSN_DBI 0x2204
+#define MT6357_AUDDEC_DSN_FPI 0x2206
+#define MT6357_AUDDEC_ANA_CON0 0x2208
+#define MT6357_AUDDEC_ANA_CON1 0x220a
+#define MT6357_AUDDEC_ANA_CON2 0x220c
+#define MT6357_AUDDEC_ANA_CON3 0x220e
+#define MT6357_AUDDEC_ANA_CON4 0x2210
+#define MT6357_AUDDEC_ANA_CON5 0x2212
+#define MT6357_AUDDEC_ANA_CON6 0x2214
+#define MT6357_AUDDEC_ANA_CON7 0x2216
+#define MT6357_AUDDEC_ANA_CON8 0x2218
+#define MT6357_AUDDEC_ANA_CON9 0x221a
+#define MT6357_AUDDEC_ANA_CON10 0x221c
+#define MT6357_AUDDEC_ANA_CON11 0x221e
+#define MT6357_AUDDEC_ANA_CON12 0x2220
+#define MT6357_AUDDEC_ANA_CON13 0x2222
+#define MT6357_AUDDEC_ELR_NUM 0x2224
+#define MT6357_AUDDEC_ELR_0 0x2226
+#define MT6357_AUDZCD_DSN_ID 0x2280
+#define MT6357_AUDZCD_DSN_REV0 0x2282
+#define MT6357_AUDZCD_DSN_DBI 0x2284
+#define MT6357_AUDZCD_DSN_FPI 0x2286
+#define MT6357_ZCD_CON0 0x2288
+#define MT6357_ZCD_CON1 0x228a
+#define MT6357_ZCD_CON2 0x228c
+#define MT6357_ZCD_CON3 0x228e
+#define MT6357_ZCD_CON4 0x2290
+#define MT6357_ZCD_CON5 0x2292
+#define MT6357_ACCDET_DSN_DIG_ID 0x2300
+#define MT6357_ACCDET_DSN_DIG_REV0 0x2302
+#define MT6357_ACCDET_DSN_DBI 0x2304
+#define MT6357_ACCDET_DSN_FPI 0x2306
+#define MT6357_ACCDET_CON0 0x2308
+#define MT6357_ACCDET_CON1 0x230a
+#define MT6357_ACCDET_CON2 0x230c
+#define MT6357_ACCDET_CON3 0x230e
+#define MT6357_ACCDET_CON4 0x2310
+#define MT6357_ACCDET_CON5 0x2312
+#define MT6357_ACCDET_CON6 0x2314
+#define MT6357_ACCDET_CON7 0x2316
+#define MT6357_ACCDET_CON8 0x2318
+#define MT6357_ACCDET_CON9 0x231a
+#define MT6357_ACCDET_CON10 0x231c
+#define MT6357_ACCDET_CON11 0x231e
+#define MT6357_ACCDET_CON12 0x2320
+#define MT6357_ACCDET_CON13 0x2322
+#define MT6357_ACCDET_CON14 0x2324
+#define MT6357_ACCDET_CON15 0x2326
+#define MT6357_ACCDET_CON16 0x2328
+#define MT6357_ACCDET_CON17 0x232a
+#define MT6357_ACCDET_CON18 0x232c
+#define MT6357_ACCDET_CON19 0x232e
+#define MT6357_ACCDET_CON20 0x2330
+#define MT6357_ACCDET_CON21 0x2332
+#define MT6357_ACCDET_CON22 0x2334
+#define MT6357_ACCDET_CON23 0x2336
+#define MT6357_ACCDET_CON24 0x2338
+#define MT6357_ACCDET_CON25 0x233a
+#define MT6357_ACCDET_CON26 0x233c
+#define MT6357_ACCDET_CON27 0x233e
+#define MT6357_ACCDET_CON28 0x2340
+
+#endif /* __MFD_MT6357_REGISTERS_H__ */
diff --git a/include/linux/mfd/mt6358/core.h b/include/linux/mfd/mt6358/core.h
new file mode 100644
index 000000000000..68578e2019b0
--- /dev/null
+++ b/include/linux/mfd/mt6358/core.h
@@ -0,0 +1,156 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2020 MediaTek Inc.
+ */
+
+#ifndef __MFD_MT6358_CORE_H__
+#define __MFD_MT6358_CORE_H__
+
+struct irq_top_t {
+ int hwirq_base;
+ unsigned int num_int_regs;
+ unsigned int en_reg;
+ unsigned int en_reg_shift;
+ unsigned int sta_reg;
+ unsigned int sta_reg_shift;
+ unsigned int top_offset;
+};
+
+struct pmic_irq_data {
+ unsigned int num_top;
+ unsigned int num_pmic_irqs;
+ unsigned short top_int_status_reg;
+ bool *enable_hwirq;
+ bool *cache_hwirq;
+ const struct irq_top_t *pmic_ints;
+};
+
+enum mt6358_irq_top_status_shift {
+ MT6358_BUCK_TOP = 0,
+ MT6358_LDO_TOP,
+ MT6358_PSC_TOP,
+ MT6358_SCK_TOP,
+ MT6358_BM_TOP,
+ MT6358_HK_TOP,
+ MT6358_AUD_TOP,
+ MT6358_MISC_TOP,
+};
+
+enum mt6358_irq_numbers {
+ MT6358_IRQ_VPROC11_OC = 0,
+ MT6358_IRQ_VPROC12_OC,
+ MT6358_IRQ_VCORE_OC,
+ MT6358_IRQ_VGPU_OC,
+ MT6358_IRQ_VMODEM_OC,
+ MT6358_IRQ_VDRAM1_OC,
+ MT6358_IRQ_VS1_OC,
+ MT6358_IRQ_VS2_OC,
+ MT6358_IRQ_VPA_OC,
+ MT6358_IRQ_VCORE_PREOC,
+ MT6358_IRQ_VFE28_OC = 16,
+ MT6358_IRQ_VXO22_OC,
+ MT6358_IRQ_VRF18_OC,
+ MT6358_IRQ_VRF12_OC,
+ MT6358_IRQ_VEFUSE_OC,
+ MT6358_IRQ_VCN33_OC,
+ MT6358_IRQ_VCN28_OC,
+ MT6358_IRQ_VCN18_OC,
+ MT6358_IRQ_VCAMA1_OC,
+ MT6358_IRQ_VCAMA2_OC,
+ MT6358_IRQ_VCAMD_OC,
+ MT6358_IRQ_VCAMIO_OC,
+ MT6358_IRQ_VLDO28_OC,
+ MT6358_IRQ_VA12_OC,
+ MT6358_IRQ_VAUX18_OC,
+ MT6358_IRQ_VAUD28_OC,
+ MT6358_IRQ_VIO28_OC,
+ MT6358_IRQ_VIO18_OC,
+ MT6358_IRQ_VSRAM_PROC11_OC,
+ MT6358_IRQ_VSRAM_PROC12_OC,
+ MT6358_IRQ_VSRAM_OTHERS_OC,
+ MT6358_IRQ_VSRAM_GPU_OC,
+ MT6358_IRQ_VDRAM2_OC,
+ MT6358_IRQ_VMC_OC,
+ MT6358_IRQ_VMCH_OC,
+ MT6358_IRQ_VEMC_OC,
+ MT6358_IRQ_VSIM1_OC,
+ MT6358_IRQ_VSIM2_OC,
+ MT6358_IRQ_VIBR_OC,
+ MT6358_IRQ_VUSB_OC,
+ MT6358_IRQ_VBIF28_OC,
+ MT6358_IRQ_PWRKEY = 48,
+ MT6358_IRQ_HOMEKEY,
+ MT6358_IRQ_PWRKEY_R,
+ MT6358_IRQ_HOMEKEY_R,
+ MT6358_IRQ_NI_LBAT_INT,
+ MT6358_IRQ_CHRDET,
+ MT6358_IRQ_CHRDET_EDGE,
+ MT6358_IRQ_VCDT_HV_DET,
+ MT6358_IRQ_RTC = 64,
+ MT6358_IRQ_FG_BAT0_H = 80,
+ MT6358_IRQ_FG_BAT0_L,
+ MT6358_IRQ_FG_CUR_H,
+ MT6358_IRQ_FG_CUR_L,
+ MT6358_IRQ_FG_ZCV,
+ MT6358_IRQ_FG_BAT1_H,
+ MT6358_IRQ_FG_BAT1_L,
+ MT6358_IRQ_FG_N_CHARGE_L,
+ MT6358_IRQ_FG_IAVG_H,
+ MT6358_IRQ_FG_IAVG_L,
+ MT6358_IRQ_FG_TIME_H,
+ MT6358_IRQ_FG_DISCHARGE,
+ MT6358_IRQ_FG_CHARGE,
+ MT6358_IRQ_BATON_LV = 96,
+ MT6358_IRQ_BATON_HT,
+ MT6358_IRQ_BATON_BAT_IN,
+ MT6358_IRQ_BATON_BAT_OUT,
+ MT6358_IRQ_BIF,
+ MT6358_IRQ_BAT_H = 112,
+ MT6358_IRQ_BAT_L,
+ MT6358_IRQ_BAT2_H,
+ MT6358_IRQ_BAT2_L,
+ MT6358_IRQ_BAT_TEMP_H,
+ MT6358_IRQ_BAT_TEMP_L,
+ MT6358_IRQ_AUXADC_IMP,
+ MT6358_IRQ_NAG_C_DLTV,
+ MT6358_IRQ_AUDIO = 128,
+ MT6358_IRQ_ACCDET = 133,
+ MT6358_IRQ_ACCDET_EINT0,
+ MT6358_IRQ_ACCDET_EINT1,
+ MT6358_IRQ_SPI_CMD_ALERT = 144,
+ MT6358_IRQ_NR,
+};
+
+#define MT6358_IRQ_BUCK_BASE MT6358_IRQ_VPROC11_OC
+#define MT6358_IRQ_LDO_BASE MT6358_IRQ_VFE28_OC
+#define MT6358_IRQ_PSC_BASE MT6358_IRQ_PWRKEY
+#define MT6358_IRQ_SCK_BASE MT6358_IRQ_RTC
+#define MT6358_IRQ_BM_BASE MT6358_IRQ_FG_BAT0_H
+#define MT6358_IRQ_HK_BASE MT6358_IRQ_BAT_H
+#define MT6358_IRQ_AUD_BASE MT6358_IRQ_AUDIO
+#define MT6358_IRQ_MISC_BASE MT6358_IRQ_SPI_CMD_ALERT
+
+#define MT6358_IRQ_BUCK_BITS (MT6358_IRQ_VCORE_PREOC - MT6358_IRQ_BUCK_BASE + 1)
+#define MT6358_IRQ_LDO_BITS (MT6358_IRQ_VBIF28_OC - MT6358_IRQ_LDO_BASE + 1)
+#define MT6358_IRQ_PSC_BITS (MT6358_IRQ_VCDT_HV_DET - MT6358_IRQ_PSC_BASE + 1)
+#define MT6358_IRQ_SCK_BITS (MT6358_IRQ_RTC - MT6358_IRQ_SCK_BASE + 1)
+#define MT6358_IRQ_BM_BITS (MT6358_IRQ_BIF - MT6358_IRQ_BM_BASE + 1)
+#define MT6358_IRQ_HK_BITS (MT6358_IRQ_NAG_C_DLTV - MT6358_IRQ_HK_BASE + 1)
+#define MT6358_IRQ_AUD_BITS (MT6358_IRQ_ACCDET_EINT1 - MT6358_IRQ_AUD_BASE + 1)
+#define MT6358_IRQ_MISC_BITS \
+ (MT6358_IRQ_SPI_CMD_ALERT - MT6358_IRQ_MISC_BASE + 1)
+
+#define MT6358_TOP_GEN(sp) \
+{ \
+ .hwirq_base = MT6358_IRQ_##sp##_BASE, \
+ .num_int_regs = \
+ ((MT6358_IRQ_##sp##_BITS - 1) / \
+ MTK_PMIC_REG_WIDTH) + 1, \
+ .en_reg = MT6358_##sp##_TOP_INT_CON0, \
+ .en_reg_shift = 0x6, \
+ .sta_reg = MT6358_##sp##_TOP_INT_STATUS0, \
+ .sta_reg_shift = 0x2, \
+ .top_offset = MT6358_##sp##_TOP, \
+}
+
+#endif /* __MFD_MT6358_CORE_H__ */
diff --git a/include/linux/mfd/mt6358/registers.h b/include/linux/mfd/mt6358/registers.h
new file mode 100644
index 000000000000..d83e87298ac4
--- /dev/null
+++ b/include/linux/mfd/mt6358/registers.h
@@ -0,0 +1,314 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2020 MediaTek Inc.
+ */
+
+#ifndef __MFD_MT6358_REGISTERS_H__
+#define __MFD_MT6358_REGISTERS_H__
+
+/* PMIC Registers */
+#define MT6358_SWCID 0xa
+#define MT6358_TOPSTATUS 0x28
+#define MT6358_TOP_RST_MISC 0x14c
+#define MT6358_MISC_TOP_INT_CON0 0x188
+#define MT6358_MISC_TOP_INT_STATUS0 0x194
+#define MT6358_TOP_INT_STATUS0 0x19e
+#define MT6358_SCK_TOP_INT_CON0 0x52e
+#define MT6358_SCK_TOP_INT_STATUS0 0x53a
+#define MT6358_EOSC_CALI_CON0 0x540
+#define MT6358_EOSC_CALI_CON1 0x542
+#define MT6358_RTC_MIX_CON0 0x544
+#define MT6358_RTC_MIX_CON1 0x546
+#define MT6358_RTC_MIX_CON2 0x548
+#define MT6358_RTC_DSN_ID 0x580
+#define MT6358_RTC_DSN_REV0 0x582
+#define MT6358_RTC_DBI 0x584
+#define MT6358_RTC_DXI 0x586
+#define MT6358_RTC_BBPU 0x588
+#define MT6358_RTC_IRQ_STA 0x58a
+#define MT6358_RTC_IRQ_EN 0x58c
+#define MT6358_RTC_CII_EN 0x58e
+#define MT6358_RTC_AL_MASK 0x590
+#define MT6358_RTC_TC_SEC 0x592
+#define MT6358_RTC_TC_MIN 0x594
+#define MT6358_RTC_TC_HOU 0x596
+#define MT6358_RTC_TC_DOM 0x598
+#define MT6358_RTC_TC_DOW 0x59a
+#define MT6358_RTC_TC_MTH 0x59c
+#define MT6358_RTC_TC_YEA 0x59e
+#define MT6358_RTC_AL_SEC 0x5a0
+#define MT6358_RTC_AL_MIN 0x5a2
+#define MT6358_RTC_AL_HOU 0x5a4
+#define MT6358_RTC_AL_DOM 0x5a6
+#define MT6358_RTC_AL_DOW 0x5a8
+#define MT6358_RTC_AL_MTH 0x5aa
+#define MT6358_RTC_AL_YEA 0x5ac
+#define MT6358_RTC_OSC32CON 0x5ae
+#define MT6358_RTC_POWERKEY1 0x5b0
+#define MT6358_RTC_POWERKEY2 0x5b2
+#define MT6358_RTC_PDN1 0x5b4
+#define MT6358_RTC_PDN2 0x5b6
+#define MT6358_RTC_SPAR0 0x5b8
+#define MT6358_RTC_SPAR1 0x5ba
+#define MT6358_RTC_PROT 0x5bc
+#define MT6358_RTC_DIFF 0x5be
+#define MT6358_RTC_CALI 0x5c0
+#define MT6358_RTC_WRTGR 0x5c2
+#define MT6358_RTC_CON 0x5c4
+#define MT6358_RTC_SEC_CTRL 0x5c6
+#define MT6358_RTC_INT_CNT 0x5c8
+#define MT6358_RTC_SEC_DAT0 0x5ca
+#define MT6358_RTC_SEC_DAT1 0x5cc
+#define MT6358_RTC_SEC_DAT2 0x5ce
+#define MT6358_RTC_SEC_DSN_ID 0x600
+#define MT6358_RTC_SEC_DSN_REV0 0x602
+#define MT6358_RTC_SEC_DBI 0x604
+#define MT6358_RTC_SEC_DXI 0x606
+#define MT6358_RTC_TC_SEC_SEC 0x608
+#define MT6358_RTC_TC_MIN_SEC 0x60a
+#define MT6358_RTC_TC_HOU_SEC 0x60c
+#define MT6358_RTC_TC_DOM_SEC 0x60e
+#define MT6358_RTC_TC_DOW_SEC 0x610
+#define MT6358_RTC_TC_MTH_SEC 0x612
+#define MT6358_RTC_TC_YEA_SEC 0x614
+#define MT6358_RTC_SEC_CK_PDN 0x616
+#define MT6358_RTC_SEC_WRTGR 0x618
+#define MT6358_PSC_TOP_INT_CON0 0x910
+#define MT6358_PSC_TOP_INT_STATUS0 0x91c
+#define MT6358_BM_TOP_INT_CON0 0xc32
+#define MT6358_BM_TOP_INT_CON1 0xc38
+#define MT6358_BM_TOP_INT_STATUS0 0xc4a
+#define MT6358_BM_TOP_INT_STATUS1 0xc4c
+#define MT6358_HK_TOP_INT_CON0 0xf92
+#define MT6358_HK_TOP_INT_STATUS0 0xf9e
+#define MT6358_BUCK_TOP_INT_CON0 0x1318
+#define MT6358_BUCK_TOP_INT_STATUS0 0x1324
+#define MT6358_BUCK_VPROC11_CON0 0x1388
+#define MT6358_BUCK_VPROC11_DBG0 0x139e
+#define MT6358_BUCK_VPROC11_DBG1 0x13a0
+#define MT6358_BUCK_VPROC11_ELR0 0x13a6
+#define MT6358_BUCK_VPROC12_CON0 0x1408
+#define MT6358_BUCK_VPROC12_DBG0 0x141e
+#define MT6358_BUCK_VPROC12_DBG1 0x1420
+#define MT6358_BUCK_VPROC12_ELR0 0x1426
+#define MT6358_BUCK_VCORE_CON0 0x1488
+#define MT6358_BUCK_VCORE_DBG0 0x149e
+#define MT6358_BUCK_VCORE_DBG1 0x14a0
+#define MT6358_BUCK_VCORE_SSHUB_CON0 0x14a4
+#define MT6358_BUCK_VCORE_SSHUB_CON1 0x14a6
+#define MT6358_BUCK_VCORE_SSHUB_ELR0 MT6358_BUCK_VCORE_SSHUB_CON1
+#define MT6358_BUCK_VCORE_SSHUB_DBG1 MT6358_BUCK_VCORE_DBG1
+#define MT6358_BUCK_VCORE_ELR0 0x14aa
+#define MT6358_BUCK_VGPU_CON0 0x1508
+#define MT6358_BUCK_VGPU_DBG0 0x151e
+#define MT6358_BUCK_VGPU_DBG1 0x1520
+#define MT6358_BUCK_VGPU_ELR0 0x1526
+#define MT6358_BUCK_VMODEM_CON0 0x1588
+#define MT6358_BUCK_VMODEM_DBG0 0x159e
+#define MT6358_BUCK_VMODEM_DBG1 0x15a0
+#define MT6358_BUCK_VMODEM_ELR0 0x15a6
+#define MT6358_BUCK_VDRAM1_CON0 0x1608
+#define MT6358_BUCK_VDRAM1_DBG0 0x161e
+#define MT6358_BUCK_VDRAM1_DBG1 0x1620
+#define MT6358_BUCK_VDRAM1_ELR0 0x1626
+#define MT6358_BUCK_VS1_CON0 0x1688
+#define MT6358_BUCK_VS1_DBG0 0x169e
+#define MT6358_BUCK_VS1_DBG1 0x16a0
+#define MT6358_BUCK_VS1_ELR0 0x16ae
+#define MT6358_BUCK_VS2_CON0 0x1708
+#define MT6358_BUCK_VS2_DBG0 0x171e
+#define MT6358_BUCK_VS2_DBG1 0x1720
+#define MT6358_BUCK_VS2_ELR0 0x172e
+#define MT6358_BUCK_VPA_CON0 0x1788
+#define MT6358_BUCK_VPA_CON1 0x178a
+#define MT6358_BUCK_VPA_ELR0 MT6358_BUCK_VPA_CON1
+#define MT6358_BUCK_VPA_DBG0 0x1792
+#define MT6358_BUCK_VPA_DBG1 0x1794
+#define MT6358_VPROC_ANA_CON0 0x180c
+#define MT6358_VCORE_VGPU_ANA_CON0 0x1828
+#define MT6358_VMODEM_ANA_CON0 0x1888
+#define MT6358_VDRAM1_ANA_CON0 0x1896
+#define MT6358_VS1_ANA_CON0 0x18a2
+#define MT6358_VS2_ANA_CON0 0x18ae
+#define MT6358_VPA_ANA_CON0 0x18ba
+#define MT6358_LDO_TOP_INT_CON0 0x1a50
+#define MT6358_LDO_TOP_INT_CON1 0x1a56
+#define MT6358_LDO_TOP_INT_STATUS0 0x1a68
+#define MT6358_LDO_TOP_INT_STATUS1 0x1a6a
+#define MT6358_LDO_VXO22_CON0 0x1a88
+#define MT6358_LDO_VXO22_CON1 0x1a96
+#define MT6358_LDO_VA12_CON0 0x1a9c
+#define MT6358_LDO_VA12_CON1 0x1aaa
+#define MT6358_LDO_VAUX18_CON0 0x1ab0
+#define MT6358_LDO_VAUX18_CON1 0x1abe
+#define MT6358_LDO_VAUD28_CON0 0x1ac4
+#define MT6358_LDO_VAUD28_CON1 0x1ad2
+#define MT6358_LDO_VIO28_CON0 0x1ad8
+#define MT6358_LDO_VIO28_CON1 0x1ae6
+#define MT6358_LDO_VIO18_CON0 0x1aec
+#define MT6358_LDO_VIO18_CON1 0x1afa
+#define MT6358_LDO_VDRAM2_CON0 0x1b08
+#define MT6358_LDO_VDRAM2_CON1 0x1b16
+#define MT6358_LDO_VEMC_CON0 0x1b1c
+#define MT6358_LDO_VEMC_CON1 0x1b2a
+#define MT6358_LDO_VUSB_CON0_0 0x1b30
+#define MT6358_LDO_VUSB_CON1 0x1b40
+#define MT6358_LDO_VSRAM_PROC11_CON0 0x1b46
+#define MT6358_LDO_VSRAM_PROC11_DBG0 0x1b60
+#define MT6358_LDO_VSRAM_PROC11_DBG1 0x1b62
+#define MT6358_LDO_VSRAM_PROC11_TRACKING_CON0 0x1b64
+#define MT6358_LDO_VSRAM_PROC11_TRACKING_CON1 0x1b66
+#define MT6358_LDO_VSRAM_PROC11_TRACKING_CON2 0x1b68
+#define MT6358_LDO_VSRAM_PROC11_TRACKING_CON3 0x1b6a
+#define MT6358_LDO_VSRAM_PROC12_TRACKING_CON0 0x1b6c
+#define MT6358_LDO_VSRAM_PROC12_TRACKING_CON1 0x1b6e
+#define MT6358_LDO_VSRAM_PROC12_TRACKING_CON2 0x1b70
+#define MT6358_LDO_VSRAM_PROC12_TRACKING_CON3 0x1b72
+#define MT6358_LDO_VSRAM_WAKEUP_CON0 0x1b74
+#define MT6358_LDO_GON1_ELR_NUM 0x1b76
+#define MT6358_LDO_VDRAM2_ELR0 0x1b78
+#define MT6358_LDO_VSRAM_PROC12_CON0 0x1b88
+#define MT6358_LDO_VSRAM_PROC12_DBG0 0x1ba2
+#define MT6358_LDO_VSRAM_PROC12_DBG1 0x1ba4
+#define MT6358_LDO_VSRAM_OTHERS_CON0 0x1ba6
+#define MT6358_LDO_VSRAM_OTHERS_DBG0 0x1bc0
+#define MT6358_LDO_VSRAM_OTHERS_DBG1 0x1bc2
+#define MT6358_LDO_VSRAM_OTHERS_SSHUB_CON0 0x1bc4
+#define MT6358_LDO_VSRAM_OTHERS_SSHUB_CON1 0x1bc6
+#define MT6358_LDO_VSRAM_OTHERS_SSHUB_DBG1 MT6358_LDO_VSRAM_OTHERS_DBG1
+#define MT6358_LDO_VSRAM_GPU_CON0 0x1bc8
+#define MT6358_LDO_VSRAM_GPU_DBG0 0x1be2
+#define MT6358_LDO_VSRAM_GPU_DBG1 0x1be4
+#define MT6358_LDO_VSRAM_CON0 0x1bee
+#define MT6358_LDO_VSRAM_CON1 0x1bf0
+#define MT6358_LDO_VSRAM_CON2 0x1bf2
+#define MT6358_LDO_VSRAM_CON3 0x1bf4
+#define MT6358_LDO_VFE28_CON0 0x1c08
+#define MT6358_LDO_VFE28_CON1 0x1c16
+#define MT6358_LDO_VFE28_CON2 0x1c18
+#define MT6358_LDO_VFE28_CON3 0x1c1a
+#define MT6358_LDO_VRF18_CON0 0x1c1c
+#define MT6358_LDO_VRF18_CON1 0x1c2a
+#define MT6358_LDO_VRF18_CON2 0x1c2c
+#define MT6358_LDO_VRF18_CON3 0x1c2e
+#define MT6358_LDO_VRF12_CON0 0x1c30
+#define MT6358_LDO_VRF12_CON1 0x1c3e
+#define MT6358_LDO_VRF12_CON2 0x1c40
+#define MT6358_LDO_VRF12_CON3 0x1c42
+#define MT6358_LDO_VEFUSE_CON0 0x1c44
+#define MT6358_LDO_VEFUSE_CON1 0x1c52
+#define MT6358_LDO_VEFUSE_CON2 0x1c54
+#define MT6358_LDO_VEFUSE_CON3 0x1c56
+#define MT6358_LDO_VCN18_CON0 0x1c58
+#define MT6358_LDO_VCN18_CON1 0x1c66
+#define MT6358_LDO_VCN18_CON2 0x1c68
+#define MT6358_LDO_VCN18_CON3 0x1c6a
+#define MT6358_LDO_VCAMA1_CON0 0x1c6c
+#define MT6358_LDO_VCAMA1_CON1 0x1c7a
+#define MT6358_LDO_VCAMA1_CON2 0x1c7c
+#define MT6358_LDO_VCAMA1_CON3 0x1c7e
+#define MT6358_LDO_VCAMA2_CON0 0x1c88
+#define MT6358_LDO_VCAMA2_CON1 0x1c96
+#define MT6358_LDO_VCAMA2_CON2 0x1c98
+#define MT6358_LDO_VCAMA2_CON3 0x1c9a
+#define MT6358_LDO_VCAMD_CON0 0x1c9c
+#define MT6358_LDO_VCAMD_CON1 0x1caa
+#define MT6358_LDO_VCAMD_CON2 0x1cac
+#define MT6358_LDO_VCAMD_CON3 0x1cae
+#define MT6358_LDO_VCAMIO_CON0 0x1cb0
+#define MT6358_LDO_VCAMIO_CON1 0x1cbe
+#define MT6358_LDO_VCAMIO_CON2 0x1cc0
+#define MT6358_LDO_VCAMIO_CON3 0x1cc2
+#define MT6358_LDO_VMC_CON0 0x1cc4
+#define MT6358_LDO_VMC_CON1 0x1cd2
+#define MT6358_LDO_VMC_CON2 0x1cd4
+#define MT6358_LDO_VMC_CON3 0x1cd6
+#define MT6358_LDO_VMCH_CON0 0x1cd8
+#define MT6358_LDO_VMCH_CON1 0x1ce6
+#define MT6358_LDO_VMCH_CON2 0x1ce8
+#define MT6358_LDO_VMCH_CON3 0x1cea
+#define MT6358_LDO_VIBR_CON0 0x1d08
+#define MT6358_LDO_VIBR_CON1 0x1d16
+#define MT6358_LDO_VIBR_CON2 0x1d18
+#define MT6358_LDO_VIBR_CON3 0x1d1a
+#define MT6358_LDO_VCN33_CON0_0 0x1d1c
+#define MT6358_LDO_VCN33_CON0_1 0x1d2a
+#define MT6358_LDO_VCN33_CON1 0x1d2c
+#define MT6358_LDO_VCN33_BT_CON1 MT6358_LDO_VCN33_CON1
+#define MT6358_LDO_VCN33_WIFI_CON1 MT6358_LDO_VCN33_CON1
+#define MT6358_LDO_VCN33_CON2 0x1d2e
+#define MT6358_LDO_VCN33_CON3 0x1d30
+#define MT6358_LDO_VLDO28_CON0_0 0x1d32
+#define MT6358_LDO_VLDO28_CON0_1 0x1d40
+#define MT6358_LDO_VLDO28_CON1 0x1d42
+#define MT6358_LDO_VLDO28_CON2 0x1d44
+#define MT6358_LDO_VLDO28_CON3 0x1d46
+#define MT6358_LDO_VSIM1_CON0 0x1d48
+#define MT6358_LDO_VSIM1_CON1 0x1d56
+#define MT6358_LDO_VSIM1_CON2 0x1d58
+#define MT6358_LDO_VSIM1_CON3 0x1d5a
+#define MT6358_LDO_VSIM2_CON0 0x1d5c
+#define MT6358_LDO_VSIM2_CON1 0x1d6a
+#define MT6358_LDO_VSIM2_CON2 0x1d6c
+#define MT6358_LDO_VSIM2_CON3 0x1d6e
+#define MT6358_LDO_VCN28_CON0 0x1d88
+#define MT6358_LDO_VCN28_CON1 0x1d96
+#define MT6358_LDO_VCN28_CON2 0x1d98
+#define MT6358_LDO_VCN28_CON3 0x1d9a
+#define MT6358_VRTC28_CON0 0x1d9c
+#define MT6358_LDO_VBIF28_CON0 0x1d9e
+#define MT6358_LDO_VBIF28_CON1 0x1dac
+#define MT6358_LDO_VBIF28_CON2 0x1dae
+#define MT6358_LDO_VBIF28_CON3 0x1db0
+#define MT6358_VCAMA1_ANA_CON0 0x1e08
+#define MT6358_VCAMA2_ANA_CON0 0x1e0c
+#define MT6358_VFE28_ANA_CON0 0x1e10
+#define MT6358_VCN28_ANA_CON0 0x1e14
+#define MT6358_VBIF28_ANA_CON0 0x1e18
+#define MT6358_VAUD28_ANA_CON0 0x1e1c
+#define MT6358_VAUX18_ANA_CON0 0x1e20
+#define MT6358_VXO22_ANA_CON0 0x1e24
+#define MT6358_VCN33_ANA_CON0 0x1e28
+#define MT6358_VSIM1_ANA_CON0 0x1e2c
+#define MT6358_VSIM2_ANA_CON0 0x1e30
+#define MT6358_VUSB_ANA_CON0 0x1e34
+#define MT6358_VEMC_ANA_CON0 0x1e38
+#define MT6358_VLDO28_ANA_CON0 0x1e3c
+#define MT6358_VIO28_ANA_CON0 0x1e40
+#define MT6358_VIBR_ANA_CON0 0x1e44
+#define MT6358_VMCH_ANA_CON0 0x1e48
+#define MT6358_VMC_ANA_CON0 0x1e4c
+#define MT6358_VRF18_ANA_CON0 0x1e88
+#define MT6358_VCN18_ANA_CON0 0x1e8c
+#define MT6358_VCAMIO_ANA_CON0 0x1e90
+#define MT6358_VIO18_ANA_CON0 0x1e94
+#define MT6358_VEFUSE_ANA_CON0 0x1e98
+#define MT6358_VRF12_ANA_CON0 0x1e9c
+#define MT6358_VSRAM_PROC11_ANA_CON0 0x1ea0
+#define MT6358_VSRAM_PROC12_ANA_CON0 0x1ea4
+#define MT6358_VSRAM_OTHERS_ANA_CON0 0x1ea6
+#define MT6358_VSRAM_GPU_ANA_CON0 0x1ea8
+#define MT6358_VDRAM2_ANA_CON0 0x1eaa
+#define MT6358_VCAMD_ANA_CON0 0x1eae
+#define MT6358_VA12_ANA_CON0 0x1eb2
+#define MT6358_AUD_TOP_INT_CON0 0x2228
+#define MT6358_AUD_TOP_INT_STATUS0 0x2234
+
+/*
+ * MT6366 has no VCAM*, but has other regulators in its place. The names
+ * keep the MT6358 prefix for ease of use in the regulator driver.
+ */
+#define MT6358_LDO_VSRAM_CON5 0x1bf8
+#define MT6358_LDO_VM18_CON0 MT6358_LDO_VCAMA1_CON0
+#define MT6358_LDO_VM18_CON1 MT6358_LDO_VCAMA1_CON1
+#define MT6358_LDO_VM18_CON2 MT6358_LDO_VCAMA1_CON2
+#define MT6358_LDO_VMDDR_CON0 MT6358_LDO_VCAMA2_CON0
+#define MT6358_LDO_VMDDR_CON1 MT6358_LDO_VCAMA2_CON1
+#define MT6358_LDO_VMDDR_CON2 MT6358_LDO_VCAMA2_CON2
+#define MT6358_LDO_VSRAM_CORE_CON0 MT6358_LDO_VCAMD_CON0
+#define MT6358_LDO_VSRAM_CORE_DBG0 0x1cb6
+#define MT6358_LDO_VSRAM_CORE_DBG1 0x1cb8
+#define MT6358_VM18_ANA_CON0 MT6358_VCAMA1_ANA_CON0
+#define MT6358_VMDDR_ANA_CON0 MT6358_VCAMD_ANA_CON0
+
+#endif /* __MFD_MT6358_REGISTERS_H__ */
diff --git a/include/linux/mfd/mt6359/core.h b/include/linux/mfd/mt6359/core.h
new file mode 100644
index 000000000000..8d298868126d
--- /dev/null
+++ b/include/linux/mfd/mt6359/core.h
@@ -0,0 +1,133 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2021 MediaTek Inc.
+ */
+
+#ifndef __MFD_MT6359_CORE_H__
+#define __MFD_MT6359_CORE_H__
+
+enum mt6359_irq_top_status_shift {
+ MT6359_BUCK_TOP = 0,
+ MT6359_LDO_TOP,
+ MT6359_PSC_TOP,
+ MT6359_SCK_TOP,
+ MT6359_BM_TOP,
+ MT6359_HK_TOP,
+ MT6359_AUD_TOP = 7,
+ MT6359_MISC_TOP,
+};
+
+enum mt6359_irq_numbers {
+ MT6359_IRQ_VCORE_OC = 1,
+ MT6359_IRQ_VGPU11_OC,
+ MT6359_IRQ_VGPU12_OC,
+ MT6359_IRQ_VMODEM_OC,
+ MT6359_IRQ_VPROC1_OC,
+ MT6359_IRQ_VPROC2_OC,
+ MT6359_IRQ_VS1_OC,
+ MT6359_IRQ_VS2_OC,
+ MT6359_IRQ_VPA_OC = 9,
+ MT6359_IRQ_VFE28_OC = 16,
+ MT6359_IRQ_VXO22_OC,
+ MT6359_IRQ_VRF18_OC,
+ MT6359_IRQ_VRF12_OC,
+ MT6359_IRQ_VEFUSE_OC,
+ MT6359_IRQ_VCN33_1_OC,
+ MT6359_IRQ_VCN33_2_OC,
+ MT6359_IRQ_VCN13_OC,
+ MT6359_IRQ_VCN18_OC,
+ MT6359_IRQ_VA09_OC,
+ MT6359_IRQ_VCAMIO_OC,
+ MT6359_IRQ_VA12_OC,
+ MT6359_IRQ_VAUX18_OC,
+ MT6359_IRQ_VAUD18_OC,
+ MT6359_IRQ_VIO18_OC,
+ MT6359_IRQ_VSRAM_PROC1_OC,
+ MT6359_IRQ_VSRAM_PROC2_OC,
+ MT6359_IRQ_VSRAM_OTHERS_OC,
+ MT6359_IRQ_VSRAM_MD_OC,
+ MT6359_IRQ_VEMC_OC,
+ MT6359_IRQ_VSIM1_OC,
+ MT6359_IRQ_VSIM2_OC,
+ MT6359_IRQ_VUSB_OC,
+ MT6359_IRQ_VRFCK_OC,
+ MT6359_IRQ_VBBCK_OC,
+ MT6359_IRQ_VBIF28_OC,
+ MT6359_IRQ_VIBR_OC,
+ MT6359_IRQ_VIO28_OC,
+ MT6359_IRQ_VM18_OC,
+ MT6359_IRQ_VUFS_OC = 45,
+ MT6359_IRQ_PWRKEY = 48,
+ MT6359_IRQ_HOMEKEY,
+ MT6359_IRQ_PWRKEY_R,
+ MT6359_IRQ_HOMEKEY_R,
+ MT6359_IRQ_NI_LBAT_INT,
+ MT6359_IRQ_CHRDET_EDGE = 53,
+ MT6359_IRQ_RTC = 64,
+ MT6359_IRQ_FG_BAT_H = 80,
+ MT6359_IRQ_FG_BAT_L,
+ MT6359_IRQ_FG_CUR_H,
+ MT6359_IRQ_FG_CUR_L,
+ MT6359_IRQ_FG_ZCV = 84,
+ MT6359_IRQ_FG_N_CHARGE_L = 87,
+ MT6359_IRQ_FG_IAVG_H,
+ MT6359_IRQ_FG_IAVG_L = 89,
+ MT6359_IRQ_FG_DISCHARGE = 91,
+ MT6359_IRQ_FG_CHARGE,
+ MT6359_IRQ_BATON_LV = 96,
+ MT6359_IRQ_BATON_BAT_IN = 98,
+ MT6359_IRQ_BATON_BAT_OU,
+ MT6359_IRQ_BIF = 100,
+ MT6359_IRQ_BAT_H = 112,
+ MT6359_IRQ_BAT_L,
+ MT6359_IRQ_BAT2_H,
+ MT6359_IRQ_BAT2_L,
+ MT6359_IRQ_BAT_TEMP_H,
+ MT6359_IRQ_BAT_TEMP_L,
+ MT6359_IRQ_THR_H,
+ MT6359_IRQ_THR_L,
+ MT6359_IRQ_AUXADC_IMP,
+ MT6359_IRQ_NAG_C_DLTV = 121,
+ MT6359_IRQ_AUDIO = 128,
+ MT6359_IRQ_ACCDET = 133,
+ MT6359_IRQ_ACCDET_EINT0,
+ MT6359_IRQ_ACCDET_EINT1,
+ MT6359_IRQ_SPI_CMD_ALERT = 144,
+ MT6359_IRQ_NR,
+};
+
+#define MT6359_IRQ_BUCK_BASE MT6359_IRQ_VCORE_OC
+#define MT6359_IRQ_LDO_BASE MT6359_IRQ_VFE28_OC
+#define MT6359_IRQ_PSC_BASE MT6359_IRQ_PWRKEY
+#define MT6359_IRQ_SCK_BASE MT6359_IRQ_RTC
+#define MT6359_IRQ_BM_BASE MT6359_IRQ_FG_BAT_H
+#define MT6359_IRQ_HK_BASE MT6359_IRQ_BAT_H
+#define MT6359_IRQ_AUD_BASE MT6359_IRQ_AUDIO
+#define MT6359_IRQ_MISC_BASE MT6359_IRQ_SPI_CMD_ALERT
+
+#define MT6359_IRQ_BUCK_BITS (MT6359_IRQ_VPA_OC - MT6359_IRQ_BUCK_BASE + 1)
+#define MT6359_IRQ_LDO_BITS (MT6359_IRQ_VUFS_OC - MT6359_IRQ_LDO_BASE + 1)
+#define MT6359_IRQ_PSC_BITS \
+ (MT6359_IRQ_CHRDET_EDGE - MT6359_IRQ_PSC_BASE + 1)
+#define MT6359_IRQ_SCK_BITS (MT6359_IRQ_RTC - MT6359_IRQ_SCK_BASE + 1)
+#define MT6359_IRQ_BM_BITS (MT6359_IRQ_BIF - MT6359_IRQ_BM_BASE + 1)
+#define MT6359_IRQ_HK_BITS (MT6359_IRQ_NAG_C_DLTV - MT6359_IRQ_HK_BASE + 1)
+#define MT6359_IRQ_AUD_BITS \
+ (MT6359_IRQ_ACCDET_EINT1 - MT6359_IRQ_AUD_BASE + 1)
+#define MT6359_IRQ_MISC_BITS \
+ (MT6359_IRQ_SPI_CMD_ALERT - MT6359_IRQ_MISC_BASE + 1)
+
+#define MT6359_TOP_GEN(sp) \
+{ \
+ .hwirq_base = MT6359_IRQ_##sp##_BASE, \
+ .num_int_regs = \
+ ((MT6359_IRQ_##sp##_BITS - 1) / \
+ MTK_PMIC_REG_WIDTH) + 1, \
+ .en_reg = MT6359_##sp##_TOP_INT_CON0, \
+ .en_reg_shift = 0x6, \
+ .sta_reg = MT6359_##sp##_TOP_INT_STATUS0, \
+ .sta_reg_shift = 0x2, \
+ .top_offset = MT6359_##sp##_TOP, \
+}
+
+#endif /* __MFD_MT6359_CORE_H__ */
diff --git a/include/linux/mfd/mt6359/registers.h b/include/linux/mfd/mt6359/registers.h
new file mode 100644
index 000000000000..2a4394a27b1c
--- /dev/null
+++ b/include/linux/mfd/mt6359/registers.h
@@ -0,0 +1,531 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2021 MediaTek Inc.
+ */
+
+#ifndef __MFD_MT6359_REGISTERS_H__
+#define __MFD_MT6359_REGISTERS_H__
+
+/* PMIC Registers */
+#define MT6359_SWCID 0xa
+#define MT6359_TOPSTATUS 0x2a
+#define MT6359_TOP_RST_MISC 0x14c
+#define MT6359_MISC_TOP_INT_CON0 0x188
+#define MT6359_MISC_TOP_INT_STATUS0 0x194
+#define MT6359_TOP_INT_STATUS0 0x19e
+#define MT6359_SCK_TOP_INT_CON0 0x528
+#define MT6359_SCK_TOP_INT_STATUS0 0x534
+#define MT6359_EOSC_CALI_CON0 0x53a
+#define MT6359_EOSC_CALI_CON1 0x53c
+#define MT6359_RTC_MIX_CON0 0x53e
+#define MT6359_RTC_MIX_CON1 0x540
+#define MT6359_RTC_MIX_CON2 0x542
+#define MT6359_RTC_DSN_ID 0x580
+#define MT6359_RTC_DSN_REV0 0x582
+#define MT6359_RTC_DBI 0x584
+#define MT6359_RTC_DXI 0x586
+#define MT6359_RTC_BBPU 0x588
+#define MT6359_RTC_IRQ_STA 0x58a
+#define MT6359_RTC_IRQ_EN 0x58c
+#define MT6359_RTC_CII_EN 0x58e
+#define MT6359_RTC_AL_MASK 0x590
+#define MT6359_RTC_TC_SEC 0x592
+#define MT6359_RTC_TC_MIN 0x594
+#define MT6359_RTC_TC_HOU 0x596
+#define MT6359_RTC_TC_DOM 0x598
+#define MT6359_RTC_TC_DOW 0x59a
+#define MT6359_RTC_TC_MTH 0x59c
+#define MT6359_RTC_TC_YEA 0x59e
+#define MT6359_RTC_AL_SEC 0x5a0
+#define MT6359_RTC_AL_MIN 0x5a2
+#define MT6359_RTC_AL_HOU 0x5a4
+#define MT6359_RTC_AL_DOM 0x5a6
+#define MT6359_RTC_AL_DOW 0x5a8
+#define MT6359_RTC_AL_MTH 0x5aa
+#define MT6359_RTC_AL_YEA 0x5ac
+#define MT6359_RTC_OSC32CON 0x5ae
+#define MT6359_RTC_POWERKEY1 0x5b0
+#define MT6359_RTC_POWERKEY2 0x5b2
+#define MT6359_RTC_PDN1 0x5b4
+#define MT6359_RTC_PDN2 0x5b6
+#define MT6359_RTC_SPAR0 0x5b8
+#define MT6359_RTC_SPAR1 0x5ba
+#define MT6359_RTC_PROT 0x5bc
+#define MT6359_RTC_DIFF 0x5be
+#define MT6359_RTC_CALI 0x5c0
+#define MT6359_RTC_WRTGR 0x5c2
+#define MT6359_RTC_CON 0x5c4
+#define MT6359_RTC_SEC_CTRL 0x5c6
+#define MT6359_RTC_INT_CNT 0x5c8
+#define MT6359_RTC_SEC_DAT0 0x5ca
+#define MT6359_RTC_SEC_DAT1 0x5cc
+#define MT6359_RTC_SEC_DAT2 0x5ce
+#define MT6359_RTC_SEC_DSN_ID 0x600
+#define MT6359_RTC_SEC_DSN_REV0 0x602
+#define MT6359_RTC_SEC_DBI 0x604
+#define MT6359_RTC_SEC_DXI 0x606
+#define MT6359_RTC_TC_SEC_SEC 0x608
+#define MT6359_RTC_TC_MIN_SEC 0x60a
+#define MT6359_RTC_TC_HOU_SEC 0x60c
+#define MT6359_RTC_TC_DOM_SEC 0x60e
+#define MT6359_RTC_TC_DOW_SEC 0x610
+#define MT6359_RTC_TC_MTH_SEC 0x612
+#define MT6359_RTC_TC_YEA_SEC 0x614
+#define MT6359_RTC_SEC_CK_PDN 0x616
+#define MT6359_RTC_SEC_WRTGR 0x618
+#define MT6359_PSC_TOP_INT_CON0 0x910
+#define MT6359_PSC_TOP_INT_STATUS0 0x91c
+#define MT6359_BM_TOP_INT_CON0 0xc32
+#define MT6359_BM_TOP_INT_CON1 0xc38
+#define MT6359_BM_TOP_INT_STATUS0 0xc4a
+#define MT6359_BM_TOP_INT_STATUS1 0xc4c
+#define MT6359_HK_TOP_INT_CON0 0xf92
+#define MT6359_HK_TOP_INT_STATUS0 0xf9e
+#define MT6359_BUCK_TOP_INT_CON0 0x1418
+#define MT6359_BUCK_TOP_INT_STATUS0 0x1424
+#define MT6359_BUCK_VPU_CON0 0x1488
+#define MT6359_BUCK_VPU_DBG0 0x14a6
+#define MT6359_BUCK_VPU_DBG1 0x14a8
+#define MT6359_BUCK_VPU_ELR0 0x14ac
+#define MT6359_BUCK_VCORE_CON0 0x1508
+#define MT6359_BUCK_VCORE_DBG0 0x1526
+#define MT6359_BUCK_VCORE_DBG1 0x1528
+#define MT6359_BUCK_VCORE_SSHUB_CON0 0x152a
+#define MT6359_BUCK_VCORE_ELR0 0x1534
+#define MT6359_BUCK_VGPU11_CON0 0x1588
+#define MT6359_BUCK_VGPU11_DBG0 0x15a6
+#define MT6359_BUCK_VGPU11_DBG1 0x15a8
+#define MT6359_BUCK_VGPU11_ELR0 0x15ac
+#define MT6359_BUCK_VMODEM_CON0 0x1688
+#define MT6359_BUCK_VMODEM_DBG0 0x16a6
+#define MT6359_BUCK_VMODEM_DBG1 0x16a8
+#define MT6359_BUCK_VMODEM_ELR0 0x16ae
+#define MT6359_BUCK_VPROC1_CON0 0x1708
+#define MT6359_BUCK_VPROC1_DBG0 0x1726
+#define MT6359_BUCK_VPROC1_DBG1 0x1728
+#define MT6359_BUCK_VPROC1_ELR0 0x172e
+#define MT6359_BUCK_VPROC2_CON0 0x1788
+#define MT6359_BUCK_VPROC2_DBG0 0x17a6
+#define MT6359_BUCK_VPROC2_DBG1 0x17a8
+#define MT6359_BUCK_VPROC2_ELR0 0x17b2
+#define MT6359_BUCK_VS1_CON0 0x1808
+#define MT6359_BUCK_VS1_DBG0 0x1826
+#define MT6359_BUCK_VS1_DBG1 0x1828
+#define MT6359_BUCK_VS1_ELR0 0x1834
+#define MT6359_BUCK_VS2_CON0 0x1888
+#define MT6359_BUCK_VS2_DBG0 0x18a6
+#define MT6359_BUCK_VS2_DBG1 0x18a8
+#define MT6359_BUCK_VS2_ELR0 0x18b4
+#define MT6359_BUCK_VPA_CON0 0x1908
+#define MT6359_BUCK_VPA_CON1 0x190e
+#define MT6359_BUCK_VPA_CFG0 0x1910
+#define MT6359_BUCK_VPA_CFG1 0x1912
+#define MT6359_BUCK_VPA_DBG0 0x1914
+#define MT6359_BUCK_VPA_DBG1 0x1916
+#define MT6359_VGPUVCORE_ANA_CON2 0x198e
+#define MT6359_VGPUVCORE_ANA_CON13 0x19a4
+#define MT6359_VPROC1_ANA_CON3 0x19b2
+#define MT6359_VPROC2_ANA_CON3 0x1a0e
+#define MT6359_VMODEM_ANA_CON3 0x1a1a
+#define MT6359_VPU_ANA_CON3 0x1a26
+#define MT6359_VS1_ANA_CON0 0x1a2c
+#define MT6359_VS2_ANA_CON0 0x1a34
+#define MT6359_VPA_ANA_CON0 0x1a3c
+#define MT6359_LDO_TOP_INT_CON0 0x1b14
+#define MT6359_LDO_TOP_INT_CON1 0x1b1a
+#define MT6359_LDO_TOP_INT_STATUS0 0x1b28
+#define MT6359_LDO_TOP_INT_STATUS1 0x1b2a
+#define MT6359_LDO_VSRAM_PROC1_ELR 0x1b40
+#define MT6359_LDO_VSRAM_PROC2_ELR 0x1b42
+#define MT6359_LDO_VSRAM_OTHERS_ELR 0x1b44
+#define MT6359_LDO_VSRAM_MD_ELR 0x1b46
+#define MT6359_LDO_VFE28_CON0 0x1b88
+#define MT6359_LDO_VFE28_MON 0x1b8a
+#define MT6359_LDO_VXO22_CON0 0x1b98
+#define MT6359_LDO_VXO22_MON 0x1b9a
+#define MT6359_LDO_VRF18_CON0 0x1ba8
+#define MT6359_LDO_VRF18_MON 0x1baa
+#define MT6359_LDO_VRF12_CON0 0x1bb8
+#define MT6359_LDO_VRF12_MON 0x1bba
+#define MT6359_LDO_VEFUSE_CON0 0x1bc8
+#define MT6359_LDO_VEFUSE_MON 0x1bca
+#define MT6359_LDO_VCN33_1_CON0 0x1bd8
+#define MT6359_LDO_VCN33_1_MON 0x1bda
+#define MT6359_LDO_VCN33_1_MULTI_SW 0x1be8
+#define MT6359_LDO_VCN33_2_CON0 0x1c08
+#define MT6359_LDO_VCN33_2_MON 0x1c0a
+#define MT6359_LDO_VCN33_2_MULTI_SW 0x1c18
+#define MT6359_LDO_VCN13_CON0 0x1c1a
+#define MT6359_LDO_VCN13_MON 0x1c1c
+#define MT6359_LDO_VCN18_CON0 0x1c2a
+#define MT6359_LDO_VCN18_MON 0x1c2c
+#define MT6359_LDO_VA09_CON0 0x1c3a
+#define MT6359_LDO_VA09_MON 0x1c3c
+#define MT6359_LDO_VCAMIO_CON0 0x1c4a
+#define MT6359_LDO_VCAMIO_MON 0x1c4c
+#define MT6359_LDO_VA12_CON0 0x1c5a
+#define MT6359_LDO_VA12_MON 0x1c5c
+#define MT6359_LDO_VAUX18_CON0 0x1c88
+#define MT6359_LDO_VAUX18_MON 0x1c8a
+#define MT6359_LDO_VAUD18_CON0 0x1c98
+#define MT6359_LDO_VAUD18_MON 0x1c9a
+#define MT6359_LDO_VIO18_CON0 0x1ca8
+#define MT6359_LDO_VIO18_MON 0x1caa
+#define MT6359_LDO_VEMC_CON0 0x1cb8
+#define MT6359_LDO_VEMC_MON 0x1cba
+#define MT6359_LDO_VSIM1_CON0 0x1cc8
+#define MT6359_LDO_VSIM1_MON 0x1cca
+#define MT6359_LDO_VSIM2_CON0 0x1cd8
+#define MT6359_LDO_VSIM2_MON 0x1cda
+#define MT6359_LDO_VUSB_CON0 0x1d08
+#define MT6359_LDO_VUSB_MON 0x1d0a
+#define MT6359_LDO_VUSB_MULTI_SW 0x1d18
+#define MT6359_LDO_VRFCK_CON0 0x1d1a
+#define MT6359_LDO_VRFCK_MON 0x1d1c
+#define MT6359_LDO_VBBCK_CON0 0x1d2a
+#define MT6359_LDO_VBBCK_MON 0x1d2c
+#define MT6359_LDO_VBIF28_CON0 0x1d3a
+#define MT6359_LDO_VBIF28_MON 0x1d3c
+#define MT6359_LDO_VIBR_CON0 0x1d4a
+#define MT6359_LDO_VIBR_MON 0x1d4c
+#define MT6359_LDO_VIO28_CON0 0x1d5a
+#define MT6359_LDO_VIO28_MON 0x1d5c
+#define MT6359_LDO_VM18_CON0 0x1d88
+#define MT6359_LDO_VM18_MON 0x1d8a
+#define MT6359_LDO_VUFS_CON0 0x1d98
+#define MT6359_LDO_VUFS_MON 0x1d9a
+#define MT6359_LDO_VSRAM_PROC1_CON0 0x1e88
+#define MT6359_LDO_VSRAM_PROC1_MON 0x1e8a
+#define MT6359_LDO_VSRAM_PROC1_VOSEL1 0x1e8e
+#define MT6359_LDO_VSRAM_PROC2_CON0 0x1ea6
+#define MT6359_LDO_VSRAM_PROC2_MON 0x1ea8
+#define MT6359_LDO_VSRAM_PROC2_VOSEL1 0x1eac
+#define MT6359_LDO_VSRAM_OTHERS_CON0 0x1f08
+#define MT6359_LDO_VSRAM_OTHERS_MON 0x1f0a
+#define MT6359_LDO_VSRAM_OTHERS_VOSEL1 0x1f0e
+#define MT6359_LDO_VSRAM_OTHERS_SSHUB 0x1f26
+#define MT6359_LDO_VSRAM_MD_CON0 0x1f2c
+#define MT6359_LDO_VSRAM_MD_MON 0x1f2e
+#define MT6359_LDO_VSRAM_MD_VOSEL1 0x1f32
+#define MT6359_VFE28_ANA_CON0 0x1f88
+#define MT6359_VAUX18_ANA_CON0 0x1f8c
+#define MT6359_VUSB_ANA_CON0 0x1f90
+#define MT6359_VBIF28_ANA_CON0 0x1f94
+#define MT6359_VCN33_1_ANA_CON0 0x1f98
+#define MT6359_VCN33_2_ANA_CON0 0x1f9c
+#define MT6359_VEMC_ANA_CON0 0x1fa0
+#define MT6359_VSIM1_ANA_CON0 0x1fa4
+#define MT6359_VSIM2_ANA_CON0 0x1fa8
+#define MT6359_VIO28_ANA_CON0 0x1fac
+#define MT6359_VIBR_ANA_CON0 0x1fb0
+#define MT6359_VRF18_ANA_CON0 0x2008
+#define MT6359_VEFUSE_ANA_CON0 0x200c
+#define MT6359_VCN18_ANA_CON0 0x2010
+#define MT6359_VCAMIO_ANA_CON0 0x2014
+#define MT6359_VAUD18_ANA_CON0 0x2018
+#define MT6359_VIO18_ANA_CON0 0x201c
+#define MT6359_VM18_ANA_CON0 0x2020
+#define MT6359_VUFS_ANA_CON0 0x2024
+#define MT6359_VRF12_ANA_CON0 0x202a
+#define MT6359_VCN13_ANA_CON0 0x202e
+#define MT6359_VA09_ANA_CON0 0x2032
+#define MT6359_VA12_ANA_CON0 0x2036
+#define MT6359_VXO22_ANA_CON0 0x2088
+#define MT6359_VRFCK_ANA_CON0 0x208c
+#define MT6359_VBBCK_ANA_CON0 0x2094
+#define MT6359_AUD_TOP_INT_CON0 0x2328
+#define MT6359_AUD_TOP_INT_STATUS0 0x2334
+
+#define MT6359_RG_BUCK_VPU_EN_ADDR MT6359_BUCK_VPU_CON0
+#define MT6359_RG_BUCK_VPU_LP_ADDR MT6359_BUCK_VPU_CON0
+#define MT6359_RG_BUCK_VPU_LP_SHIFT 1
+#define MT6359_DA_VPU_VOSEL_ADDR MT6359_BUCK_VPU_DBG0
+#define MT6359_DA_VPU_VOSEL_MASK 0x7F
+#define MT6359_DA_VPU_VOSEL_SHIFT 0
+#define MT6359_DA_VPU_EN_ADDR MT6359_BUCK_VPU_DBG1
+#define MT6359_RG_BUCK_VPU_VOSEL_ADDR MT6359_BUCK_VPU_ELR0
+#define MT6359_RG_BUCK_VPU_VOSEL_MASK 0x7F
+#define MT6359_RG_BUCK_VPU_VOSEL_SHIFT 0
+#define MT6359_RG_BUCK_VCORE_EN_ADDR MT6359_BUCK_VCORE_CON0
+#define MT6359_RG_BUCK_VCORE_LP_ADDR MT6359_BUCK_VCORE_CON0
+#define MT6359_RG_BUCK_VCORE_LP_SHIFT 1
+#define MT6359_DA_VCORE_VOSEL_ADDR MT6359_BUCK_VCORE_DBG0
+#define MT6359_DA_VCORE_VOSEL_MASK 0x7F
+#define MT6359_DA_VCORE_VOSEL_SHIFT 0
+#define MT6359_DA_VCORE_EN_ADDR MT6359_BUCK_VCORE_DBG1
+#define MT6359_RG_BUCK_VCORE_SSHUB_EN_ADDR MT6359_BUCK_VCORE_SSHUB_CON0
+#define MT6359_RG_BUCK_VCORE_SSHUB_VOSEL_ADDR MT6359_BUCK_VCORE_SSHUB_CON0
+#define MT6359_RG_BUCK_VCORE_SSHUB_VOSEL_MASK 0x7F
+#define MT6359_RG_BUCK_VCORE_SSHUB_VOSEL_SHIFT 4
+#define MT6359_RG_BUCK_VCORE_VOSEL_ADDR MT6359_BUCK_VCORE_ELR0
+#define MT6359_RG_BUCK_VCORE_VOSEL_MASK 0x7F
+#define MT6359_RG_BUCK_VCORE_VOSEL_SHIFT 0
+#define MT6359_RG_BUCK_VGPU11_EN_ADDR MT6359_BUCK_VGPU11_CON0
+#define MT6359_RG_BUCK_VGPU11_LP_ADDR MT6359_BUCK_VGPU11_CON0
+#define MT6359_RG_BUCK_VGPU11_LP_SHIFT 1
+#define MT6359_DA_VGPU11_VOSEL_ADDR MT6359_BUCK_VGPU11_DBG0
+#define MT6359_DA_VGPU11_VOSEL_MASK 0x7F
+#define MT6359_DA_VGPU11_VOSEL_SHIFT 0
+#define MT6359_DA_VGPU11_EN_ADDR MT6359_BUCK_VGPU11_DBG1
+#define MT6359_RG_BUCK_VGPU11_VOSEL_ADDR MT6359_BUCK_VGPU11_ELR0
+#define MT6359_RG_BUCK_VGPU11_VOSEL_MASK 0x7F
+#define MT6359_RG_BUCK_VGPU11_VOSEL_SHIFT 0
+#define MT6359_RG_BUCK_VMODEM_EN_ADDR MT6359_BUCK_VMODEM_CON0
+#define MT6359_RG_BUCK_VMODEM_LP_ADDR MT6359_BUCK_VMODEM_CON0
+#define MT6359_RG_BUCK_VMODEM_LP_SHIFT 1
+#define MT6359_DA_VMODEM_VOSEL_ADDR MT6359_BUCK_VMODEM_DBG0
+#define MT6359_DA_VMODEM_VOSEL_MASK 0x7F
+#define MT6359_DA_VMODEM_VOSEL_SHIFT 0
+#define MT6359_DA_VMODEM_EN_ADDR MT6359_BUCK_VMODEM_DBG1
+#define MT6359_RG_BUCK_VMODEM_VOSEL_ADDR MT6359_BUCK_VMODEM_ELR0
+#define MT6359_RG_BUCK_VMODEM_VOSEL_MASK 0x7F
+#define MT6359_RG_BUCK_VMODEM_VOSEL_SHIFT 0
+#define MT6359_RG_BUCK_VPROC1_EN_ADDR MT6359_BUCK_VPROC1_CON0
+#define MT6359_RG_BUCK_VPROC1_LP_ADDR MT6359_BUCK_VPROC1_CON0
+#define MT6359_RG_BUCK_VPROC1_LP_SHIFT 1
+#define MT6359_DA_VPROC1_VOSEL_ADDR MT6359_BUCK_VPROC1_DBG0
+#define MT6359_DA_VPROC1_VOSEL_MASK 0x7F
+#define MT6359_DA_VPROC1_VOSEL_SHIFT 0
+#define MT6359_DA_VPROC1_EN_ADDR MT6359_BUCK_VPROC1_DBG1
+#define MT6359_RG_BUCK_VPROC1_VOSEL_ADDR MT6359_BUCK_VPROC1_ELR0
+#define MT6359_RG_BUCK_VPROC1_VOSEL_MASK 0x7F
+#define MT6359_RG_BUCK_VPROC1_VOSEL_SHIFT 0
+#define MT6359_RG_BUCK_VPROC2_EN_ADDR MT6359_BUCK_VPROC2_CON0
+#define MT6359_RG_BUCK_VPROC2_LP_ADDR MT6359_BUCK_VPROC2_CON0
+#define MT6359_RG_BUCK_VPROC2_LP_SHIFT 1
+#define MT6359_DA_VPROC2_VOSEL_ADDR MT6359_BUCK_VPROC2_DBG0
+#define MT6359_DA_VPROC2_VOSEL_MASK 0x7F
+#define MT6359_DA_VPROC2_VOSEL_SHIFT 0
+#define MT6359_DA_VPROC2_EN_ADDR MT6359_BUCK_VPROC2_DBG1
+#define MT6359_RG_BUCK_VPROC2_VOSEL_ADDR MT6359_BUCK_VPROC2_ELR0
+#define MT6359_RG_BUCK_VPROC2_VOSEL_MASK 0x7F
+#define MT6359_RG_BUCK_VPROC2_VOSEL_SHIFT 0
+#define MT6359_RG_BUCK_VS1_EN_ADDR MT6359_BUCK_VS1_CON0
+#define MT6359_RG_BUCK_VS1_LP_ADDR MT6359_BUCK_VS1_CON0
+#define MT6359_RG_BUCK_VS1_LP_SHIFT 1
+#define MT6359_DA_VS1_VOSEL_ADDR MT6359_BUCK_VS1_DBG0
+#define MT6359_DA_VS1_VOSEL_MASK 0x7F
+#define MT6359_DA_VS1_VOSEL_SHIFT 0
+#define MT6359_DA_VS1_EN_ADDR MT6359_BUCK_VS1_DBG1
+#define MT6359_RG_BUCK_VS1_VOSEL_ADDR MT6359_BUCK_VS1_ELR0
+#define MT6359_RG_BUCK_VS1_VOSEL_MASK 0x7F
+#define MT6359_RG_BUCK_VS1_VOSEL_SHIFT 0
+#define MT6359_RG_BUCK_VS2_EN_ADDR MT6359_BUCK_VS2_CON0
+#define MT6359_RG_BUCK_VS2_LP_ADDR MT6359_BUCK_VS2_CON0
+#define MT6359_RG_BUCK_VS2_LP_SHIFT 1
+#define MT6359_DA_VS2_VOSEL_ADDR MT6359_BUCK_VS2_DBG0
+#define MT6359_DA_VS2_VOSEL_MASK 0x7F
+#define MT6359_DA_VS2_VOSEL_SHIFT 0
+#define MT6359_DA_VS2_EN_ADDR MT6359_BUCK_VS2_DBG1
+#define MT6359_RG_BUCK_VS2_VOSEL_ADDR MT6359_BUCK_VS2_ELR0
+#define MT6359_RG_BUCK_VS2_VOSEL_MASK 0x7F
+#define MT6359_RG_BUCK_VS2_VOSEL_SHIFT 0
+#define MT6359_RG_BUCK_VPA_EN_ADDR MT6359_BUCK_VPA_CON0
+#define MT6359_RG_BUCK_VPA_LP_ADDR MT6359_BUCK_VPA_CON0
+#define MT6359_RG_BUCK_VPA_LP_SHIFT 1
+#define MT6359_RG_BUCK_VPA_VOSEL_ADDR MT6359_BUCK_VPA_CON1
+#define MT6359_RG_BUCK_VPA_VOSEL_MASK 0x3F
+#define MT6359_RG_BUCK_VPA_VOSEL_SHIFT 0
+#define MT6359_DA_VPA_VOSEL_ADDR MT6359_BUCK_VPA_DBG0
+#define MT6359_DA_VPA_VOSEL_MASK 0x3F
+#define MT6359_DA_VPA_VOSEL_SHIFT 0
+#define MT6359_DA_VPA_EN_ADDR MT6359_BUCK_VPA_DBG1
+#define MT6359_RG_VGPU11_FCCM_ADDR MT6359_VGPUVCORE_ANA_CON2
+#define MT6359_RG_VGPU11_FCCM_SHIFT 9
+#define MT6359_RG_VCORE_FCCM_ADDR MT6359_VGPUVCORE_ANA_CON13
+#define MT6359_RG_VCORE_FCCM_SHIFT 5
+#define MT6359_RG_VPROC1_FCCM_ADDR MT6359_VPROC1_ANA_CON3
+#define MT6359_RG_VPROC1_FCCM_SHIFT 1
+#define MT6359_RG_VPROC2_FCCM_ADDR MT6359_VPROC2_ANA_CON3
+#define MT6359_RG_VPROC2_FCCM_SHIFT 1
+#define MT6359_RG_VMODEM_FCCM_ADDR MT6359_VMODEM_ANA_CON3
+#define MT6359_RG_VMODEM_FCCM_SHIFT 1
+#define MT6359_RG_VPU_FCCM_ADDR MT6359_VPU_ANA_CON3
+#define MT6359_RG_VPU_FCCM_SHIFT 1
+#define MT6359_RG_VS1_FPWM_ADDR MT6359_VS1_ANA_CON0
+#define MT6359_RG_VS1_FPWM_SHIFT 3
+#define MT6359_RG_VS2_FPWM_ADDR MT6359_VS2_ANA_CON0
+#define MT6359_RG_VS2_FPWM_SHIFT 3
+#define MT6359_RG_VPA_MODESET_ADDR MT6359_VPA_ANA_CON0
+#define MT6359_RG_VPA_MODESET_SHIFT 1
+#define MT6359_RG_LDO_VSRAM_PROC1_VOSEL_ADDR MT6359_LDO_VSRAM_PROC1_ELR
+#define MT6359_RG_LDO_VSRAM_PROC1_VOSEL_MASK 0x7F
+#define MT6359_RG_LDO_VSRAM_PROC1_VOSEL_SHIFT 0
+#define MT6359_RG_LDO_VSRAM_PROC2_VOSEL_ADDR MT6359_LDO_VSRAM_PROC2_ELR
+#define MT6359_RG_LDO_VSRAM_PROC2_VOSEL_MASK 0x7F
+#define MT6359_RG_LDO_VSRAM_PROC2_VOSEL_SHIFT 0
+#define MT6359_RG_LDO_VSRAM_OTHERS_VOSEL_ADDR MT6359_LDO_VSRAM_OTHERS_ELR
+#define MT6359_RG_LDO_VSRAM_OTHERS_VOSEL_MASK 0x7F
+#define MT6359_RG_LDO_VSRAM_OTHERS_VOSEL_SHIFT 0
+#define MT6359_RG_LDO_VSRAM_MD_VOSEL_ADDR MT6359_LDO_VSRAM_MD_ELR
+#define MT6359_RG_LDO_VSRAM_MD_VOSEL_MASK 0x7F
+#define MT6359_RG_LDO_VSRAM_MD_VOSEL_SHIFT 0
+#define MT6359_RG_LDO_VFE28_EN_ADDR MT6359_LDO_VFE28_CON0
+#define MT6359_DA_VFE28_B_EN_ADDR MT6359_LDO_VFE28_MON
+#define MT6359_RG_LDO_VXO22_EN_ADDR MT6359_LDO_VXO22_CON0
+#define MT6359_RG_LDO_VXO22_EN_SHIFT 0
+#define MT6359_DA_VXO22_B_EN_ADDR MT6359_LDO_VXO22_MON
+#define MT6359_RG_LDO_VRF18_EN_ADDR MT6359_LDO_VRF18_CON0
+#define MT6359_RG_LDO_VRF18_EN_SHIFT 0
+#define MT6359_DA_VRF18_B_EN_ADDR MT6359_LDO_VRF18_MON
+#define MT6359_RG_LDO_VRF12_EN_ADDR MT6359_LDO_VRF12_CON0
+#define MT6359_RG_LDO_VRF12_EN_SHIFT 0
+#define MT6359_DA_VRF12_B_EN_ADDR MT6359_LDO_VRF12_MON
+#define MT6359_RG_LDO_VEFUSE_EN_ADDR MT6359_LDO_VEFUSE_CON0
+#define MT6359_RG_LDO_VEFUSE_EN_SHIFT 0
+#define MT6359_DA_VEFUSE_B_EN_ADDR MT6359_LDO_VEFUSE_MON
+#define MT6359_RG_LDO_VCN33_1_EN_0_ADDR MT6359_LDO_VCN33_1_CON0
+#define MT6359_RG_LDO_VCN33_1_EN_0_MASK 0x1
+#define MT6359_RG_LDO_VCN33_1_EN_0_SHIFT 0
+#define MT6359_DA_VCN33_1_B_EN_ADDR MT6359_LDO_VCN33_1_MON
+#define MT6359_RG_LDO_VCN33_1_EN_1_ADDR MT6359_LDO_VCN33_1_MULTI_SW
+#define MT6359_RG_LDO_VCN33_1_EN_1_SHIFT 15
+#define MT6359_RG_LDO_VCN33_2_EN_0_ADDR MT6359_LDO_VCN33_2_CON0
+#define MT6359_RG_LDO_VCN33_2_EN_0_SHIFT 0
+#define MT6359_DA_VCN33_2_B_EN_ADDR MT6359_LDO_VCN33_2_MON
+#define MT6359_RG_LDO_VCN33_2_EN_1_ADDR MT6359_LDO_VCN33_2_MULTI_SW
+#define MT6359_RG_LDO_VCN33_2_EN_1_MASK 0x1
+#define MT6359_RG_LDO_VCN33_2_EN_1_SHIFT 15
+#define MT6359_RG_LDO_VCN13_EN_ADDR MT6359_LDO_VCN13_CON0
+#define MT6359_RG_LDO_VCN13_EN_SHIFT 0
+#define MT6359_DA_VCN13_B_EN_ADDR MT6359_LDO_VCN13_MON
+#define MT6359_RG_LDO_VCN18_EN_ADDR MT6359_LDO_VCN18_CON0
+#define MT6359_DA_VCN18_B_EN_ADDR MT6359_LDO_VCN18_MON
+#define MT6359_RG_LDO_VA09_EN_ADDR MT6359_LDO_VA09_CON0
+#define MT6359_RG_LDO_VA09_EN_SHIFT 0
+#define MT6359_DA_VA09_B_EN_ADDR MT6359_LDO_VA09_MON
+#define MT6359_RG_LDO_VCAMIO_EN_ADDR MT6359_LDO_VCAMIO_CON0
+#define MT6359_RG_LDO_VCAMIO_EN_SHIFT 0
+#define MT6359_DA_VCAMIO_B_EN_ADDR MT6359_LDO_VCAMIO_MON
+#define MT6359_RG_LDO_VA12_EN_ADDR MT6359_LDO_VA12_CON0
+#define MT6359_RG_LDO_VA12_EN_SHIFT 0
+#define MT6359_DA_VA12_B_EN_ADDR MT6359_LDO_VA12_MON
+#define MT6359_RG_LDO_VAUX18_EN_ADDR MT6359_LDO_VAUX18_CON0
+#define MT6359_DA_VAUX18_B_EN_ADDR MT6359_LDO_VAUX18_MON
+#define MT6359_RG_LDO_VAUD18_EN_ADDR MT6359_LDO_VAUD18_CON0
+#define MT6359_DA_VAUD18_B_EN_ADDR MT6359_LDO_VAUD18_MON
+#define MT6359_RG_LDO_VIO18_EN_ADDR MT6359_LDO_VIO18_CON0
+#define MT6359_RG_LDO_VIO18_EN_SHIFT 0
+#define MT6359_DA_VIO18_B_EN_ADDR MT6359_LDO_VIO18_MON
+#define MT6359_RG_LDO_VEMC_EN_ADDR MT6359_LDO_VEMC_CON0
+#define MT6359_RG_LDO_VEMC_EN_SHIFT 0
+#define MT6359_DA_VEMC_B_EN_ADDR MT6359_LDO_VEMC_MON
+#define MT6359_RG_LDO_VSIM1_EN_ADDR MT6359_LDO_VSIM1_CON0
+#define MT6359_RG_LDO_VSIM1_EN_SHIFT 0
+#define MT6359_DA_VSIM1_B_EN_ADDR MT6359_LDO_VSIM1_MON
+#define MT6359_RG_LDO_VSIM2_EN_ADDR MT6359_LDO_VSIM2_CON0
+#define MT6359_RG_LDO_VSIM2_EN_SHIFT 0
+#define MT6359_DA_VSIM2_B_EN_ADDR MT6359_LDO_VSIM2_MON
+#define MT6359_RG_LDO_VUSB_EN_0_ADDR MT6359_LDO_VUSB_CON0
+#define MT6359_RG_LDO_VUSB_EN_0_MASK 0x1
+#define MT6359_RG_LDO_VUSB_EN_0_SHIFT 0
+#define MT6359_DA_VUSB_B_EN_ADDR MT6359_LDO_VUSB_MON
+#define MT6359_RG_LDO_VUSB_EN_1_ADDR MT6359_LDO_VUSB_MULTI_SW
+#define MT6359_RG_LDO_VUSB_EN_1_MASK 0x1
+#define MT6359_RG_LDO_VUSB_EN_1_SHIFT 15
+#define MT6359_RG_LDO_VRFCK_EN_ADDR MT6359_LDO_VRFCK_CON0
+#define MT6359_RG_LDO_VRFCK_EN_SHIFT 0
+#define MT6359_DA_VRFCK_B_EN_ADDR MT6359_LDO_VRFCK_MON
+#define MT6359_RG_LDO_VBBCK_EN_ADDR MT6359_LDO_VBBCK_CON0
+#define MT6359_RG_LDO_VBBCK_EN_SHIFT 0
+#define MT6359_DA_VBBCK_B_EN_ADDR MT6359_LDO_VBBCK_MON
+#define MT6359_RG_LDO_VBIF28_EN_ADDR MT6359_LDO_VBIF28_CON0
+#define MT6359_DA_VBIF28_B_EN_ADDR MT6359_LDO_VBIF28_MON
+#define MT6359_RG_LDO_VIBR_EN_ADDR MT6359_LDO_VIBR_CON0
+#define MT6359_RG_LDO_VIBR_EN_SHIFT 0
+#define MT6359_DA_VIBR_B_EN_ADDR MT6359_LDO_VIBR_MON
+#define MT6359_RG_LDO_VIO28_EN_ADDR MT6359_LDO_VIO28_CON0
+#define MT6359_RG_LDO_VIO28_EN_SHIFT 0
+#define MT6359_DA_VIO28_B_EN_ADDR MT6359_LDO_VIO28_MON
+#define MT6359_RG_LDO_VM18_EN_ADDR MT6359_LDO_VM18_CON0
+#define MT6359_RG_LDO_VM18_EN_SHIFT 0
+#define MT6359_DA_VM18_B_EN_ADDR MT6359_LDO_VM18_MON
+#define MT6359_RG_LDO_VUFS_EN_ADDR MT6359_LDO_VUFS_CON0
+#define MT6359_RG_LDO_VUFS_EN_SHIFT 0
+#define MT6359_DA_VUFS_B_EN_ADDR MT6359_LDO_VUFS_MON
+#define MT6359_RG_LDO_VSRAM_PROC1_EN_ADDR MT6359_LDO_VSRAM_PROC1_CON0
+#define MT6359_DA_VSRAM_PROC1_B_EN_ADDR MT6359_LDO_VSRAM_PROC1_MON
+#define MT6359_DA_VSRAM_PROC1_VOSEL_ADDR MT6359_LDO_VSRAM_PROC1_VOSEL1
+#define MT6359_DA_VSRAM_PROC1_VOSEL_MASK 0x7F
+#define MT6359_DA_VSRAM_PROC1_VOSEL_SHIFT 8
+#define MT6359_RG_LDO_VSRAM_PROC2_EN_ADDR MT6359_LDO_VSRAM_PROC2_CON0
+#define MT6359_DA_VSRAM_PROC2_B_EN_ADDR MT6359_LDO_VSRAM_PROC2_MON
+#define MT6359_DA_VSRAM_PROC2_VOSEL_ADDR MT6359_LDO_VSRAM_PROC2_VOSEL1
+#define MT6359_DA_VSRAM_PROC2_VOSEL_MASK 0x7F
+#define MT6359_DA_VSRAM_PROC2_VOSEL_SHIFT 8
+#define MT6359_RG_LDO_VSRAM_OTHERS_EN_ADDR MT6359_LDO_VSRAM_OTHERS_CON0
+#define MT6359_DA_VSRAM_OTHERS_B_EN_ADDR MT6359_LDO_VSRAM_OTHERS_MON
+#define MT6359_DA_VSRAM_OTHERS_VOSEL_ADDR MT6359_LDO_VSRAM_OTHERS_VOSEL1
+#define MT6359_DA_VSRAM_OTHERS_VOSEL_MASK 0x7F
+#define MT6359_DA_VSRAM_OTHERS_VOSEL_SHIFT 8
+#define MT6359_RG_LDO_VSRAM_OTHERS_SSHUB_EN_ADDR MT6359_LDO_VSRAM_OTHERS_SSHUB
+#define MT6359_RG_LDO_VSRAM_OTHERS_SSHUB_VOSEL_ADDR MT6359_LDO_VSRAM_OTHERS_SSHUB
+#define MT6359_RG_LDO_VSRAM_OTHERS_SSHUB_VOSEL_MASK 0x7F
+#define MT6359_RG_LDO_VSRAM_OTHERS_SSHUB_VOSEL_SHIFT 1
+#define MT6359_RG_LDO_VSRAM_MD_EN_ADDR MT6359_LDO_VSRAM_MD_CON0
+#define MT6359_DA_VSRAM_MD_B_EN_ADDR MT6359_LDO_VSRAM_MD_MON
+#define MT6359_DA_VSRAM_MD_VOSEL_ADDR MT6359_LDO_VSRAM_MD_VOSEL1
+#define MT6359_DA_VSRAM_MD_VOSEL_MASK 0x7F
+#define MT6359_DA_VSRAM_MD_VOSEL_SHIFT 8
+#define MT6359_RG_VCN33_1_VOSEL_ADDR MT6359_VCN33_1_ANA_CON0
+#define MT6359_RG_VCN33_1_VOSEL_MASK 0xF
+#define MT6359_RG_VCN33_1_VOSEL_SHIFT 8
+#define MT6359_RG_VCN33_2_VOSEL_ADDR MT6359_VCN33_2_ANA_CON0
+#define MT6359_RG_VCN33_2_VOSEL_MASK 0xF
+#define MT6359_RG_VCN33_2_VOSEL_SHIFT 8
+#define MT6359_RG_VEMC_VOSEL_ADDR MT6359_VEMC_ANA_CON0
+#define MT6359_RG_VEMC_VOSEL_MASK 0xF
+#define MT6359_RG_VEMC_VOSEL_SHIFT 8
+#define MT6359_RG_VSIM1_VOSEL_ADDR MT6359_VSIM1_ANA_CON0
+#define MT6359_RG_VSIM1_VOSEL_MASK 0xF
+#define MT6359_RG_VSIM1_VOSEL_SHIFT 8
+#define MT6359_RG_VSIM2_VOSEL_ADDR MT6359_VSIM2_ANA_CON0
+#define MT6359_RG_VSIM2_VOSEL_MASK 0xF
+#define MT6359_RG_VSIM2_VOSEL_SHIFT 8
+#define MT6359_RG_VIO28_VOSEL_ADDR MT6359_VIO28_ANA_CON0
+#define MT6359_RG_VIO28_VOSEL_MASK 0xF
+#define MT6359_RG_VIO28_VOSEL_SHIFT 8
+#define MT6359_RG_VIBR_VOSEL_ADDR MT6359_VIBR_ANA_CON0
+#define MT6359_RG_VIBR_VOSEL_MASK 0xF
+#define MT6359_RG_VIBR_VOSEL_SHIFT 8
+#define MT6359_RG_VRF18_VOSEL_ADDR MT6359_VRF18_ANA_CON0
+#define MT6359_RG_VRF18_VOSEL_MASK 0xF
+#define MT6359_RG_VRF18_VOSEL_SHIFT 8
+#define MT6359_RG_VEFUSE_VOSEL_ADDR MT6359_VEFUSE_ANA_CON0
+#define MT6359_RG_VEFUSE_VOSEL_MASK 0xF
+#define MT6359_RG_VEFUSE_VOSEL_SHIFT 8
+#define MT6359_RG_VCAMIO_VOSEL_ADDR MT6359_VCAMIO_ANA_CON0
+#define MT6359_RG_VCAMIO_VOSEL_MASK 0xF
+#define MT6359_RG_VCAMIO_VOSEL_SHIFT 8
+#define MT6359_RG_VIO18_VOSEL_ADDR MT6359_VIO18_ANA_CON0
+#define MT6359_RG_VIO18_VOSEL_MASK 0xF
+#define MT6359_RG_VIO18_VOSEL_SHIFT 8
+#define MT6359_RG_VM18_VOSEL_ADDR MT6359_VM18_ANA_CON0
+#define MT6359_RG_VM18_VOSEL_MASK 0xF
+#define MT6359_RG_VM18_VOSEL_SHIFT 8
+#define MT6359_RG_VUFS_VOSEL_ADDR MT6359_VUFS_ANA_CON0
+#define MT6359_RG_VUFS_VOSEL_MASK 0xF
+#define MT6359_RG_VUFS_VOSEL_SHIFT 8
+#define MT6359_RG_VRF12_VOSEL_ADDR MT6359_VRF12_ANA_CON0
+#define MT6359_RG_VRF12_VOSEL_MASK 0xF
+#define MT6359_RG_VRF12_VOSEL_SHIFT 8
+#define MT6359_RG_VCN13_VOSEL_ADDR MT6359_VCN13_ANA_CON0
+#define MT6359_RG_VCN13_VOSEL_MASK 0xF
+#define MT6359_RG_VCN13_VOSEL_SHIFT 8
+#define MT6359_RG_VA09_VOSEL_ADDR MT6359_VA09_ANA_CON0
+#define MT6359_RG_VA09_VOSEL_MASK 0xF
+#define MT6359_RG_VA09_VOSEL_SHIFT 8
+#define MT6359_RG_VA12_VOSEL_ADDR MT6359_VA12_ANA_CON0
+#define MT6359_RG_VA12_VOSEL_MASK 0xF
+#define MT6359_RG_VA12_VOSEL_SHIFT 8
+#define MT6359_RG_VXO22_VOSEL_ADDR MT6359_VXO22_ANA_CON0
+#define MT6359_RG_VXO22_VOSEL_MASK 0xF
+#define MT6359_RG_VXO22_VOSEL_SHIFT 8
+#define MT6359_RG_VRFCK_VOSEL_ADDR MT6359_VRFCK_ANA_CON0
+#define MT6359_RG_VRFCK_VOSEL_MASK 0xF
+#define MT6359_RG_VRFCK_VOSEL_SHIFT 8
+#define MT6359_RG_VBBCK_VOSEL_ADDR MT6359_VBBCK_ANA_CON0
+#define MT6359_RG_VBBCK_VOSEL_MASK 0xF
+#define MT6359_RG_VBBCK_VOSEL_SHIFT 8
+
+#endif /* __MFD_MT6359_REGISTERS_H__ */
diff --git a/include/linux/mfd/mt6359p/registers.h b/include/linux/mfd/mt6359p/registers.h
new file mode 100644
index 000000000000..3d97c1885171
--- /dev/null
+++ b/include/linux/mfd/mt6359p/registers.h
@@ -0,0 +1,249 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2021 MediaTek Inc.
+ */
+
+#ifndef __MFD_MT6359P_REGISTERS_H__
+#define __MFD_MT6359P_REGISTERS_H__
+
+#define MT6359P_CHIP_VER 0x5930
+
+/* PMIC Registers */
+#define MT6359P_HWCID 0x8
+#define MT6359P_TOP_TRAP 0x50
+#define MT6359P_TOP_TMA_KEY 0x3a8
+#define MT6359P_BUCK_VCORE_ELR_NUM 0x152a
+#define MT6359P_BUCK_VCORE_ELR0 0x152c
+#define MT6359P_BUCK_VGPU11_SSHUB_CON0 0x15aa
+#define MT6359P_BUCK_VGPU11_ELR0 0x15b4
+#define MT6359P_LDO_VSRAM_PROC1_ELR 0x1b44
+#define MT6359P_LDO_VSRAM_PROC2_ELR 0x1b46
+#define MT6359P_LDO_VSRAM_OTHERS_ELR 0x1b48
+#define MT6359P_LDO_VSRAM_MD_ELR 0x1b4a
+#define MT6359P_LDO_VEMC_ELR_0 0x1b4c
+#define MT6359P_LDO_VFE28_CON0 0x1b88
+#define MT6359P_LDO_VFE28_MON 0x1b8c
+#define MT6359P_LDO_VXO22_CON0 0x1b9a
+#define MT6359P_LDO_VXO22_MON 0x1b9e
+#define MT6359P_LDO_VRF18_CON0 0x1bac
+#define MT6359P_LDO_VRF18_MON 0x1bb0
+#define MT6359P_LDO_VRF12_CON0 0x1bbe
+#define MT6359P_LDO_VRF12_MON 0x1bc2
+#define MT6359P_LDO_VEFUSE_CON0 0x1bd0
+#define MT6359P_LDO_VEFUSE_MON 0x1bd4
+#define MT6359P_LDO_VCN33_1_CON0 0x1be2
+#define MT6359P_LDO_VCN33_1_MON 0x1be6
+#define MT6359P_LDO_VCN33_1_MULTI_SW 0x1bf4
+#define MT6359P_LDO_VCN33_2_CON0 0x1c08
+#define MT6359P_LDO_VCN33_2_MON 0x1c0c
+#define MT6359P_LDO_VCN33_2_MULTI_SW 0x1c1a
+#define MT6359P_LDO_VCN13_CON0 0x1c1c
+#define MT6359P_LDO_VCN13_MON 0x1c20
+#define MT6359P_LDO_VCN18_CON0 0x1c2e
+#define MT6359P_LDO_VCN18_MON 0x1c32
+#define MT6359P_LDO_VA09_CON0 0x1c40
+#define MT6359P_LDO_VA09_MON 0x1c44
+#define MT6359P_LDO_VCAMIO_CON0 0x1c52
+#define MT6359P_LDO_VCAMIO_MON 0x1c56
+#define MT6359P_LDO_VA12_CON0 0x1c64
+#define MT6359P_LDO_VA12_MON 0x1c68
+#define MT6359P_LDO_VAUX18_CON0 0x1c88
+#define MT6359P_LDO_VAUX18_MON 0x1c8c
+#define MT6359P_LDO_VAUD18_CON0 0x1c9a
+#define MT6359P_LDO_VAUD18_MON 0x1c9e
+#define MT6359P_LDO_VIO18_CON0 0x1cac
+#define MT6359P_LDO_VIO18_MON 0x1cb0
+#define MT6359P_LDO_VEMC_CON0 0x1cbe
+#define MT6359P_LDO_VEMC_MON 0x1cc2
+#define MT6359P_LDO_VSIM1_CON0 0x1cd0
+#define MT6359P_LDO_VSIM1_MON 0x1cd4
+#define MT6359P_LDO_VSIM2_CON0 0x1ce2
+#define MT6359P_LDO_VSIM2_MON 0x1ce6
+#define MT6359P_LDO_VUSB_CON0 0x1d08
+#define MT6359P_LDO_VUSB_MON 0x1d0c
+#define MT6359P_LDO_VUSB_MULTI_SW 0x1d1a
+#define MT6359P_LDO_VRFCK_CON0 0x1d1c
+#define MT6359P_LDO_VRFCK_MON 0x1d20
+#define MT6359P_LDO_VBBCK_CON0 0x1d2e
+#define MT6359P_LDO_VBBCK_MON 0x1d32
+#define MT6359P_LDO_VBIF28_CON0 0x1d40
+#define MT6359P_LDO_VBIF28_MON 0x1d44
+#define MT6359P_LDO_VIBR_CON0 0x1d52
+#define MT6359P_LDO_VIBR_MON 0x1d56
+#define MT6359P_LDO_VIO28_CON0 0x1d64
+#define MT6359P_LDO_VIO28_MON 0x1d68
+#define MT6359P_LDO_VM18_CON0 0x1d88
+#define MT6359P_LDO_VM18_MON 0x1d8c
+#define MT6359P_LDO_VUFS_CON0 0x1d9a
+#define MT6359P_LDO_VUFS_MON 0x1d9e
+#define MT6359P_LDO_VSRAM_PROC1_CON0 0x1e88
+#define MT6359P_LDO_VSRAM_PROC1_MON 0x1e8c
+#define MT6359P_LDO_VSRAM_PROC1_VOSEL1 0x1e90
+#define MT6359P_LDO_VSRAM_PROC2_CON0 0x1ea8
+#define MT6359P_LDO_VSRAM_PROC2_MON 0x1eac
+#define MT6359P_LDO_VSRAM_PROC2_VOSEL1 0x1eb0
+#define MT6359P_LDO_VSRAM_OTHERS_CON0 0x1f08
+#define MT6359P_LDO_VSRAM_OTHERS_MON 0x1f0c
+#define MT6359P_LDO_VSRAM_OTHERS_VOSEL1 0x1f10
+#define MT6359P_LDO_VSRAM_OTHERS_SSHUB 0x1f28
+#define MT6359P_LDO_VSRAM_MD_CON0 0x1f2e
+#define MT6359P_LDO_VSRAM_MD_MON 0x1f32
+#define MT6359P_LDO_VSRAM_MD_VOSEL1 0x1f36
+#define MT6359P_VFE28_ANA_CON0 0x1f88
+#define MT6359P_VAUX18_ANA_CON0 0x1f8c
+#define MT6359P_VUSB_ANA_CON0 0x1f90
+#define MT6359P_VBIF28_ANA_CON0 0x1f94
+#define MT6359P_VCN33_1_ANA_CON0 0x1f98
+#define MT6359P_VCN33_2_ANA_CON0 0x1f9c
+#define MT6359P_VEMC_ANA_CON0 0x1fa0
+#define MT6359P_VSIM1_ANA_CON0 0x1fa2
+#define MT6359P_VSIM2_ANA_CON0 0x1fa6
+#define MT6359P_VIO28_ANA_CON0 0x1faa
+#define MT6359P_VIBR_ANA_CON0 0x1fae
+#define MT6359P_VFE28_ELR_4 0x1fc0
+#define MT6359P_VRF18_ANA_CON0 0x2008
+#define MT6359P_VEFUSE_ANA_CON0 0x200c
+#define MT6359P_VCN18_ANA_CON0 0x2010
+#define MT6359P_VCAMIO_ANA_CON0 0x2014
+#define MT6359P_VAUD18_ANA_CON0 0x2018
+#define MT6359P_VIO18_ANA_CON0 0x201c
+#define MT6359P_VM18_ANA_CON0 0x2020
+#define MT6359P_VUFS_ANA_CON0 0x2024
+#define MT6359P_VRF12_ANA_CON0 0x202a
+#define MT6359P_VCN13_ANA_CON0 0x202e
+#define MT6359P_VA09_ANA_CON0 0x2032
+#define MT6359P_VRF18_ELR_3 0x204e
+#define MT6359P_VXO22_ANA_CON0 0x2088
+#define MT6359P_VRFCK_ANA_CON0 0x208c
+#define MT6359P_VBBCK_ANA_CON0 0x2096
+
+#define MT6359P_RG_BUCK_VCORE_VOSEL_ADDR MT6359P_BUCK_VCORE_ELR0
+#define MT6359P_RG_BUCK_VGPU11_SSHUB_EN_ADDR MT6359P_BUCK_VGPU11_SSHUB_CON0
+#define MT6359P_RG_BUCK_VGPU11_VOSEL_ADDR MT6359P_BUCK_VGPU11_ELR0
+#define MT6359P_RG_BUCK_VGPU11_SSHUB_VOSEL_ADDR MT6359P_BUCK_VGPU11_SSHUB_CON0
+#define MT6359P_RG_BUCK_VGPU11_SSHUB_VOSEL_MASK 0x7F
+#define MT6359P_RG_BUCK_VGPU11_SSHUB_VOSEL_SHIFT 4
+#define MT6359P_RG_LDO_VSRAM_PROC1_VOSEL_ADDR MT6359P_LDO_VSRAM_PROC1_ELR
+#define MT6359P_RG_LDO_VSRAM_PROC2_VOSEL_ADDR MT6359P_LDO_VSRAM_PROC2_ELR
+#define MT6359P_RG_LDO_VSRAM_OTHERS_VOSEL_ADDR MT6359P_LDO_VSRAM_OTHERS_ELR
+#define MT6359P_RG_LDO_VSRAM_MD_VOSEL_ADDR MT6359P_LDO_VSRAM_MD_ELR
+#define MT6359P_RG_LDO_VEMC_VOSEL_0_ADDR MT6359P_LDO_VEMC_ELR_0
+#define MT6359P_RG_LDO_VEMC_VOSEL_0_MASK 0xF
+#define MT6359P_RG_LDO_VEMC_VOSEL_0_SHIFT 0
+#define MT6359P_RG_LDO_VFE28_EN_ADDR MT6359P_LDO_VFE28_CON0
+#define MT6359P_DA_VFE28_B_EN_ADDR MT6359P_LDO_VFE28_MON
+#define MT6359P_RG_LDO_VXO22_EN_ADDR MT6359P_LDO_VXO22_CON0
+#define MT6359P_RG_LDO_VXO22_EN_SHIFT 0
+#define MT6359P_DA_VXO22_B_EN_ADDR MT6359P_LDO_VXO22_MON
+#define MT6359P_RG_LDO_VRF18_EN_ADDR MT6359P_LDO_VRF18_CON0
+#define MT6359P_RG_LDO_VRF18_EN_SHIFT 0
+#define MT6359P_DA_VRF18_B_EN_ADDR MT6359P_LDO_VRF18_MON
+#define MT6359P_RG_LDO_VRF12_EN_ADDR MT6359P_LDO_VRF12_CON0
+#define MT6359P_RG_LDO_VRF12_EN_SHIFT 0
+#define MT6359P_DA_VRF12_B_EN_ADDR MT6359P_LDO_VRF12_MON
+#define MT6359P_RG_LDO_VEFUSE_EN_ADDR MT6359P_LDO_VEFUSE_CON0
+#define MT6359P_RG_LDO_VEFUSE_EN_SHIFT 0
+#define MT6359P_DA_VEFUSE_B_EN_ADDR MT6359P_LDO_VEFUSE_MON
+#define MT6359P_RG_LDO_VCN33_1_EN_0_ADDR MT6359P_LDO_VCN33_1_CON0
+#define MT6359P_DA_VCN33_1_B_EN_ADDR MT6359P_LDO_VCN33_1_MON
+#define MT6359P_RG_LDO_VCN33_1_EN_1_ADDR MT6359P_LDO_VCN33_1_MULTI_SW
+#define MT6359P_RG_LDO_VCN33_1_EN_1_SHIFT 15
+#define MT6359P_RG_LDO_VCN33_2_EN_0_ADDR MT6359P_LDO_VCN33_2_CON0
+#define MT6359P_RG_LDO_VCN33_2_EN_0_SHIFT 0
+#define MT6359P_DA_VCN33_2_B_EN_ADDR MT6359P_LDO_VCN33_2_MON
+#define MT6359P_RG_LDO_VCN33_2_EN_1_ADDR MT6359P_LDO_VCN33_2_MULTI_SW
+#define MT6359P_RG_LDO_VCN13_EN_ADDR MT6359P_LDO_VCN13_CON0
+#define MT6359P_RG_LDO_VCN13_EN_SHIFT 0
+#define MT6359P_DA_VCN13_B_EN_ADDR MT6359P_LDO_VCN13_MON
+#define MT6359P_RG_LDO_VCN18_EN_ADDR MT6359P_LDO_VCN18_CON0
+#define MT6359P_DA_VCN18_B_EN_ADDR MT6359P_LDO_VCN18_MON
+#define MT6359P_RG_LDO_VA09_EN_ADDR MT6359P_LDO_VA09_CON0
+#define MT6359P_RG_LDO_VA09_EN_SHIFT 0
+#define MT6359P_DA_VA09_B_EN_ADDR MT6359P_LDO_VA09_MON
+#define MT6359P_RG_LDO_VCAMIO_EN_ADDR MT6359P_LDO_VCAMIO_CON0
+#define MT6359P_RG_LDO_VCAMIO_EN_SHIFT 0
+#define MT6359P_DA_VCAMIO_B_EN_ADDR MT6359P_LDO_VCAMIO_MON
+#define MT6359P_RG_LDO_VA12_EN_ADDR MT6359P_LDO_VA12_CON0
+#define MT6359P_RG_LDO_VA12_EN_SHIFT 0
+#define MT6359P_DA_VA12_B_EN_ADDR MT6359P_LDO_VA12_MON
+#define MT6359P_RG_LDO_VAUX18_EN_ADDR MT6359P_LDO_VAUX18_CON0
+#define MT6359P_DA_VAUX18_B_EN_ADDR MT6359P_LDO_VAUX18_MON
+#define MT6359P_RG_LDO_VAUD18_EN_ADDR MT6359P_LDO_VAUD18_CON0
+#define MT6359P_DA_VAUD18_B_EN_ADDR MT6359P_LDO_VAUD18_MON
+#define MT6359P_RG_LDO_VIO18_EN_ADDR MT6359P_LDO_VIO18_CON0
+#define MT6359P_RG_LDO_VIO18_EN_SHIFT 0
+#define MT6359P_DA_VIO18_B_EN_ADDR MT6359P_LDO_VIO18_MON
+#define MT6359P_RG_LDO_VEMC_EN_ADDR MT6359P_LDO_VEMC_CON0
+#define MT6359P_RG_LDO_VEMC_EN_SHIFT 0
+#define MT6359P_DA_VEMC_B_EN_ADDR MT6359P_LDO_VEMC_MON
+#define MT6359P_RG_LDO_VSIM1_EN_ADDR MT6359P_LDO_VSIM1_CON0
+#define MT6359P_RG_LDO_VSIM1_EN_SHIFT 0
+#define MT6359P_DA_VSIM1_B_EN_ADDR MT6359P_LDO_VSIM1_MON
+#define MT6359P_RG_LDO_VSIM2_EN_ADDR MT6359P_LDO_VSIM2_CON0
+#define MT6359P_RG_LDO_VSIM2_EN_SHIFT 0
+#define MT6359P_DA_VSIM2_B_EN_ADDR MT6359P_LDO_VSIM2_MON
+#define MT6359P_RG_LDO_VUSB_EN_0_ADDR MT6359P_LDO_VUSB_CON0
+#define MT6359P_DA_VUSB_B_EN_ADDR MT6359P_LDO_VUSB_MON
+#define MT6359P_RG_LDO_VUSB_EN_1_ADDR MT6359P_LDO_VUSB_MULTI_SW
+#define MT6359P_RG_LDO_VRFCK_EN_ADDR MT6359P_LDO_VRFCK_CON0
+#define MT6359P_RG_LDO_VRFCK_EN_SHIFT 0
+#define MT6359P_DA_VRFCK_B_EN_ADDR MT6359P_LDO_VRFCK_MON
+#define MT6359P_RG_LDO_VBBCK_EN_ADDR MT6359P_LDO_VBBCK_CON0
+#define MT6359P_RG_LDO_VBBCK_EN_SHIFT 0
+#define MT6359P_DA_VBBCK_B_EN_ADDR MT6359P_LDO_VBBCK_MON
+#define MT6359P_RG_LDO_VBIF28_EN_ADDR MT6359P_LDO_VBIF28_CON0
+#define MT6359P_DA_VBIF28_B_EN_ADDR MT6359P_LDO_VBIF28_MON
+#define MT6359P_RG_LDO_VIBR_EN_ADDR MT6359P_LDO_VIBR_CON0
+#define MT6359P_RG_LDO_VIBR_EN_SHIFT 0
+#define MT6359P_DA_VIBR_B_EN_ADDR MT6359P_LDO_VIBR_MON
+#define MT6359P_RG_LDO_VIO28_EN_ADDR MT6359P_LDO_VIO28_CON0
+#define MT6359P_RG_LDO_VIO28_EN_SHIFT 0
+#define MT6359P_DA_VIO28_B_EN_ADDR MT6359P_LDO_VIO28_MON
+#define MT6359P_RG_LDO_VM18_EN_ADDR MT6359P_LDO_VM18_CON0
+#define MT6359P_RG_LDO_VM18_EN_SHIFT 0
+#define MT6359P_DA_VM18_B_EN_ADDR MT6359P_LDO_VM18_MON
+#define MT6359P_RG_LDO_VUFS_EN_ADDR MT6359P_LDO_VUFS_CON0
+#define MT6359P_RG_LDO_VUFS_EN_SHIFT 0
+#define MT6359P_DA_VUFS_B_EN_ADDR MT6359P_LDO_VUFS_MON
+#define MT6359P_RG_LDO_VSRAM_PROC1_EN_ADDR MT6359P_LDO_VSRAM_PROC1_CON0
+#define MT6359P_DA_VSRAM_PROC1_B_EN_ADDR MT6359P_LDO_VSRAM_PROC1_MON
+#define MT6359P_DA_VSRAM_PROC1_VOSEL_ADDR MT6359P_LDO_VSRAM_PROC1_VOSEL1
+#define MT6359P_RG_LDO_VSRAM_PROC2_EN_ADDR MT6359P_LDO_VSRAM_PROC2_CON0
+#define MT6359P_DA_VSRAM_PROC2_B_EN_ADDR MT6359P_LDO_VSRAM_PROC2_MON
+#define MT6359P_DA_VSRAM_PROC2_VOSEL_ADDR MT6359P_LDO_VSRAM_PROC2_VOSEL1
+#define MT6359P_RG_LDO_VSRAM_OTHERS_EN_ADDR MT6359P_LDO_VSRAM_OTHERS_CON0
+#define MT6359P_DA_VSRAM_OTHERS_B_EN_ADDR MT6359P_LDO_VSRAM_OTHERS_MON
+#define MT6359P_DA_VSRAM_OTHERS_VOSEL_ADDR MT6359P_LDO_VSRAM_OTHERS_VOSEL1
+#define MT6359P_RG_LDO_VSRAM_OTHERS_SSHUB_EN_ADDR MT6359P_LDO_VSRAM_OTHERS_SSHUB
+#define MT6359P_RG_LDO_VSRAM_OTHERS_SSHUB_VOSEL_ADDR MT6359P_LDO_VSRAM_OTHERS_SSHUB
+#define MT6359P_RG_LDO_VSRAM_MD_EN_ADDR MT6359P_LDO_VSRAM_MD_CON0
+#define MT6359P_DA_VSRAM_MD_B_EN_ADDR MT6359P_LDO_VSRAM_MD_MON
+#define MT6359P_DA_VSRAM_MD_VOSEL_ADDR MT6359P_LDO_VSRAM_MD_VOSEL1
+#define MT6359P_RG_VCN33_1_VOSEL_ADDR MT6359P_VCN33_1_ANA_CON0
+#define MT6359P_RG_VCN33_2_VOSEL_ADDR MT6359P_VCN33_2_ANA_CON0
+#define MT6359P_RG_VEMC_VOSEL_ADDR MT6359P_VEMC_ANA_CON0
+#define MT6359P_RG_VSIM1_VOSEL_ADDR MT6359P_VSIM1_ANA_CON0
+#define MT6359P_RG_VSIM2_VOSEL_ADDR MT6359P_VSIM2_ANA_CON0
+#define MT6359P_RG_VIO28_VOSEL_ADDR MT6359P_VIO28_ANA_CON0
+#define MT6359P_RG_VIBR_VOSEL_ADDR MT6359P_VIBR_ANA_CON0
+#define MT6359P_RG_VRF18_VOSEL_ADDR MT6359P_VRF18_ANA_CON0
+#define MT6359P_RG_VEFUSE_VOSEL_ADDR MT6359P_VEFUSE_ANA_CON0
+#define MT6359P_RG_VCAMIO_VOSEL_ADDR MT6359P_VCAMIO_ANA_CON0
+#define MT6359P_RG_VIO18_VOSEL_ADDR MT6359P_VIO18_ANA_CON0
+#define MT6359P_RG_VM18_VOSEL_ADDR MT6359P_VM18_ANA_CON0
+#define MT6359P_RG_VUFS_VOSEL_ADDR MT6359P_VUFS_ANA_CON0
+#define MT6359P_RG_VRF12_VOSEL_ADDR MT6359P_VRF12_ANA_CON0
+#define MT6359P_RG_VCN13_VOSEL_ADDR MT6359P_VCN13_ANA_CON0
+#define MT6359P_RG_VA09_VOSEL_ADDR MT6359P_VRF18_ELR_3
+#define MT6359P_RG_VA12_VOSEL_ADDR MT6359P_VFE28_ELR_4
+#define MT6359P_RG_VXO22_VOSEL_ADDR MT6359P_VXO22_ANA_CON0
+#define MT6359P_RG_VRFCK_VOSEL_ADDR MT6359P_VRFCK_ANA_CON0
+#define MT6359P_RG_VBBCK_VOSEL_ADDR MT6359P_VBBCK_ANA_CON0
+#define MT6359P_RG_VBBCK_VOSEL_MASK 0xF
+#define MT6359P_RG_VBBCK_VOSEL_SHIFT 4
+#define MT6359P_VM_MODE_ADDR MT6359P_TOP_TRAP
+#define MT6359P_TMA_KEY_ADDR MT6359P_TOP_TMA_KEY
+
+#define TMA_KEY 0x9CA6
+
+#endif /* __MFD_MT6359P_REGISTERS_H__ */
diff --git a/include/linux/mfd/mt6397/core.h b/include/linux/mfd/mt6397/core.h
index d678f526e498..b774c3a4bb62 100644
--- a/include/linux/mfd/mt6397/core.h
+++ b/include/linux/mfd/mt6397/core.h
@@ -1,20 +1,28 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2014 MediaTek Inc.
* Author: Flora Fu, MediaTek
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#ifndef __MFD_MT6397_CORE_H__
#define __MFD_MT6397_CORE_H__
+#include <linux/mutex.h>
+#include <linux/notifier.h>
+
+enum chip_id {
+ MT6323_CHIP_ID = 0x23,
+ MT6328_CHIP_ID = 0x30,
+ MT6331_CHIP_ID = 0x20,
+ MT6332_CHIP_ID = 0x20,
+ MT6357_CHIP_ID = 0x57,
+ MT6358_CHIP_ID = 0x58,
+ MT6359_CHIP_ID = 0x59,
+ MT6366_CHIP_ID = 0x66,
+ MT6391_CHIP_ID = 0x91,
+ MT6397_CHIP_ID = 0x97,
+};
+
enum mt6397_irq_numbers {
MT6397_IRQ_SPKL_AB = 0,
MT6397_IRQ_SPKR_AB,
@@ -54,14 +62,20 @@ enum mt6397_irq_numbers {
struct mt6397_chip {
struct device *dev;
struct regmap *regmap;
+ struct notifier_block pm_nb;
int irq;
struct irq_domain *irq_domain;
struct mutex irqlock;
- u16 wake_mask[2];
- u16 irq_masks_cur[2];
- u16 irq_masks_cache[2];
- u16 int_con[2];
- u16 int_status[2];
+ u16 wake_mask[3];
+ u16 irq_masks_cur[3];
+ u16 irq_masks_cache[3];
+ u16 int_con[3];
+ u16 int_status[3];
+ u16 chip_id;
+ void *irq_data;
};
+int mt6358_irq_init(struct mt6397_chip *chip);
+int mt6397_irq_init(struct mt6397_chip *chip);
+
#endif /* __MFD_MT6397_CORE_H__ */
diff --git a/include/linux/mfd/mt6397/registers.h b/include/linux/mfd/mt6397/registers.h
index f23a0a60a877..34d140627a27 100644
--- a/include/linux/mfd/mt6397/registers.h
+++ b/include/linux/mfd/mt6397/registers.h
@@ -1,15 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2014 MediaTek Inc.
* Author: Flora Fu, MediaTek
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#ifndef __MFD_MT6397_REGISTERS_H__
diff --git a/include/linux/mfd/mt6397/rtc.h b/include/linux/mfd/mt6397/rtc.h
new file mode 100644
index 000000000000..27883af44f87
--- /dev/null
+++ b/include/linux/mfd/mt6397/rtc.h
@@ -0,0 +1,81 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2014-2019 MediaTek Inc.
+ *
+ * Author: Tianping.Fang <tianping.fang@mediatek.com>
+ * Sean Wang <sean.wang@mediatek.com>
+ */
+
+#ifndef _LINUX_MFD_MT6397_RTC_H_
+#define _LINUX_MFD_MT6397_RTC_H_
+
+#include <linux/jiffies.h>
+#include <linux/mutex.h>
+#include <linux/regmap.h>
+#include <linux/rtc.h>
+
+#define RTC_BBPU 0x0000
+#define RTC_BBPU_CBUSY BIT(6)
+#define RTC_BBPU_KEY (0x43 << 8)
+
+#define RTC_WRTGR_MT6358 0x003a
+#define RTC_WRTGR_MT6397 0x003c
+#define RTC_WRTGR_MT6323 RTC_WRTGR_MT6397
+
+#define RTC_IRQ_STA 0x0002
+#define RTC_IRQ_STA_AL BIT(0)
+#define RTC_IRQ_STA_LP BIT(3)
+
+#define RTC_IRQ_EN 0x0004
+#define RTC_IRQ_EN_AL BIT(0)
+#define RTC_IRQ_EN_ONESHOT BIT(2)
+#define RTC_IRQ_EN_LP BIT(3)
+#define RTC_IRQ_EN_ONESHOT_AL (RTC_IRQ_EN_ONESHOT | RTC_IRQ_EN_AL)
+
+#define RTC_AL_MASK 0x0008
+#define RTC_AL_MASK_DOW BIT(4)
+
+#define RTC_TC_SEC 0x000a
+#define RTC_TC_MTH_MASK 0x000f
+/* Min, Hour, Dom... register offset to RTC_TC_SEC */
+#define RTC_OFFSET_SEC 0
+#define RTC_OFFSET_MIN 1
+#define RTC_OFFSET_HOUR 2
+#define RTC_OFFSET_DOM 3
+#define RTC_OFFSET_DOW 4
+#define RTC_OFFSET_MTH 5
+#define RTC_OFFSET_YEAR 6
+#define RTC_OFFSET_COUNT 7
+
+#define RTC_AL_SEC 0x0018
+
+#define RTC_AL_SEC_MASK 0x003f
+#define RTC_AL_MIN_MASK 0x003f
+#define RTC_AL_HOU_MASK 0x001f
+#define RTC_AL_DOM_MASK 0x001f
+#define RTC_AL_DOW_MASK 0x0007
+#define RTC_AL_MTH_MASK 0x000f
+#define RTC_AL_YEA_MASK 0x007f
+
+#define RTC_PDN2 0x002e
+#define RTC_PDN2_PWRON_ALARM BIT(4)
+
+#define MTK_RTC_POLL_DELAY_US 10
+#define MTK_RTC_POLL_TIMEOUT (jiffies_to_usecs(HZ))
+
+struct mtk_rtc_data {
+ u32 wrtgr;
+};
+
+struct mt6397_rtc {
+ struct rtc_device *rtc_dev;
+
+ /* Protect register access from multiple tasks */
+ struct mutex lock;
+ struct regmap *regmap;
+ int irq;
+ u32 addr_base;
+ const struct mtk_rtc_data *data;
+};
+
+#endif /* _LINUX_MFD_MT6397_RTC_H_ */
diff --git a/include/linux/mfd/mxs-lradc.h b/include/linux/mfd/mxs-lradc.h
index 661a4521f723..ada3d81ee277 100644
--- a/include/linux/mfd/mxs-lradc.h
+++ b/include/linux/mfd/mxs-lradc.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* Freescale MXS Low Resolution Analog-to-Digital Converter driver
*
@@ -5,16 +6,6 @@
* Copyright (c) 2016 Ksenija Stanojevic <ksenija.stanojevic@gmail.com>
*
* Author: Marek Vasut <marex@denx.de>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#ifndef __MFD_MXS_LRADC_H
diff --git a/include/linux/mfd/nct6694.h b/include/linux/mfd/nct6694.h
new file mode 100644
index 000000000000..6eb9be2cd4a0
--- /dev/null
+++ b/include/linux/mfd/nct6694.h
@@ -0,0 +1,102 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2025 Nuvoton Technology Corp.
+ *
+ * Nuvoton NCT6694 USB transaction and data structure.
+ */
+
+#ifndef __MFD_NCT6694_H
+#define __MFD_NCT6694_H
+
+#define NCT6694_VENDOR_ID 0x0416
+#define NCT6694_PRODUCT_ID 0x200B
+#define NCT6694_INT_IN_EP 0x81
+#define NCT6694_BULK_IN_EP 0x02
+#define NCT6694_BULK_OUT_EP 0x03
+
+#define NCT6694_HCTRL_SET 0x40
+#define NCT6694_HCTRL_GET 0x80
+
+#define NCT6694_URB_TIMEOUT 1000
+
+enum nct6694_irq_id {
+ NCT6694_IRQ_GPIO0 = 0,
+ NCT6694_IRQ_GPIO1,
+ NCT6694_IRQ_GPIO2,
+ NCT6694_IRQ_GPIO3,
+ NCT6694_IRQ_GPIO4,
+ NCT6694_IRQ_GPIO5,
+ NCT6694_IRQ_GPIO6,
+ NCT6694_IRQ_GPIO7,
+ NCT6694_IRQ_GPIO8,
+ NCT6694_IRQ_GPIO9,
+ NCT6694_IRQ_GPIOA,
+ NCT6694_IRQ_GPIOB,
+ NCT6694_IRQ_GPIOC,
+ NCT6694_IRQ_GPIOD,
+ NCT6694_IRQ_GPIOE,
+ NCT6694_IRQ_GPIOF,
+ NCT6694_IRQ_CAN0,
+ NCT6694_IRQ_CAN1,
+ NCT6694_IRQ_RTC,
+ NCT6694_NR_IRQS,
+};
+
+enum nct6694_response_err_status {
+ NCT6694_NO_ERROR = 0,
+ NCT6694_FORMAT_ERROR,
+ NCT6694_RESERVED1,
+ NCT6694_RESERVED2,
+ NCT6694_NOT_SUPPORT_ERROR,
+ NCT6694_NO_RESPONSE_ERROR,
+ NCT6694_TIMEOUT_ERROR,
+ NCT6694_PENDING,
+};
+
+struct __packed nct6694_cmd_header {
+ u8 rsv1;
+ u8 mod;
+ union __packed {
+ __le16 offset;
+ struct __packed {
+ u8 cmd;
+ u8 sel;
+ };
+ };
+ u8 hctrl;
+ u8 rsv2;
+ __le16 len;
+};
+
+struct __packed nct6694_response_header {
+ u8 sequence_id;
+ u8 sts;
+ u8 reserved[4];
+ __le16 len;
+};
+
+union __packed nct6694_usb_msg {
+ struct nct6694_cmd_header cmd_header;
+ struct nct6694_response_header response_header;
+};
+
+struct nct6694 {
+ struct device *dev;
+ struct ida gpio_ida;
+ struct ida i2c_ida;
+ struct ida canfd_ida;
+ struct ida wdt_ida;
+ struct irq_domain *domain;
+ struct mutex access_lock;
+ spinlock_t irq_lock;
+ struct urb *int_in_urb;
+ struct usb_device *udev;
+ union nct6694_usb_msg *usb_msg;
+ __le32 *int_buffer;
+ unsigned int irq_enable;
+};
+
+int nct6694_read_msg(struct nct6694 *nct6694, const struct nct6694_cmd_header *cmd_hd, void *buf);
+int nct6694_write_msg(struct nct6694 *nct6694, const struct nct6694_cmd_header *cmd_hd, void *buf);
+
+#endif
diff --git a/include/linux/mfd/ntxec.h b/include/linux/mfd/ntxec.h
new file mode 100644
index 000000000000..e5880c346da9
--- /dev/null
+++ b/include/linux/mfd/ntxec.h
@@ -0,0 +1,38 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright 2020 Jonathan Neuschäfer
+ *
+ * Register access and version information for the Netronix embedded
+ * controller.
+ */
+
+#ifndef NTXEC_H
+#define NTXEC_H
+
+#include <linux/types.h>
+
+struct device;
+struct regmap;
+
+struct ntxec {
+ struct device *dev;
+ struct regmap *regmap;
+};
+
+/*
+ * Some registers, such as the battery status register (0x41), are in
+ * big-endian, but others only have eight significant bits, which are in the
+ * first byte transmitted over I2C (the MSB of the big-endian value).
+ * This convenience function converts an 8-bit value to 16-bit for use in the
+ * second kind of register.
+ */
+static inline u16 ntxec_reg8(u8 value)
+{
+ return value << 8;
+}
+
+/* Known firmware versions */
+#define NTXEC_VERSION_KOBO_AURA 0xd726 /* found in Kobo Aura */
+#define NTXEC_VERSION_TOLINO_SHINE2 0xf110 /* found in Tolino Shine 2 HD */
+#define NTXEC_VERSION_TOLINO_VISION 0xe135 /* found in Tolino Vision, contains RTC, ADC, PWM, home pad */
+#endif
diff --git a/include/linux/mfd/ocelot.h b/include/linux/mfd/ocelot.h
new file mode 100644
index 000000000000..dd72073d2d4f
--- /dev/null
+++ b/include/linux/mfd/ocelot.h
@@ -0,0 +1,62 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+/* Copyright 2022 Innovative Advantage Inc. */
+
+#ifndef _LINUX_MFD_OCELOT_H
+#define _LINUX_MFD_OCELOT_H
+
+#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/types.h>
+
+struct resource;
+
+static inline struct regmap *
+ocelot_regmap_from_resource_optional(struct platform_device *pdev,
+ unsigned int index,
+ const struct regmap_config *config)
+{
+ struct device *dev = &pdev->dev;
+ struct resource *res;
+ void __iomem *regs;
+
+ /*
+ * Don't use _get_and_ioremap_resource() here, since that will invoke
+ * prints of "invalid resource" which will simply add confusion.
+ */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, index);
+ if (res) {
+ regs = devm_ioremap_resource(dev, res);
+ if (IS_ERR(regs))
+ return ERR_CAST(regs);
+ return devm_regmap_init_mmio(dev, regs, config);
+ }
+
+ /*
+ * Fall back to using REG and getting the resource from the parent
+ * device, which is possible in an MFD configuration
+ */
+ if (dev->parent) {
+ res = platform_get_resource(pdev, IORESOURCE_REG, index);
+ if (!res)
+ return NULL;
+
+ return dev_get_regmap(dev->parent, res->name);
+ }
+
+ return NULL;
+}
+
+static inline struct regmap *
+ocelot_regmap_from_resource(struct platform_device *pdev, unsigned int index,
+ const struct regmap_config *config)
+{
+ struct regmap *map;
+
+ map = ocelot_regmap_from_resource_optional(pdev, index, config);
+ return map ?: ERR_PTR(-ENOENT);
+}
+
+#endif
diff --git a/include/linux/mfd/palmas.h b/include/linux/mfd/palmas.h
index 6dec43826303..dabcc0dea802 100644
--- a/include/linux/mfd/palmas.h
+++ b/include/linux/mfd/palmas.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* TI Palmas
*
@@ -5,12 +6,6 @@
*
* Author: Graeme Gregory <gg@slimlogic.co.uk>
* Author: Ian Lartey <ian@slimlogic.co.uk>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
*/
#ifndef __LINUX_MFD_PALMAS_H
@@ -20,8 +15,7 @@
#include <linux/leds.h>
#include <linux/regmap.h>
#include <linux/regulator/driver.h>
-#include <linux/extcon.h>
-#include <linux/of_gpio.h>
+#include <linux/extcon-provider.h>
#include <linux/usb/phy_companion.h>
#define PALMAS_NUM_CLIENTS 3
@@ -104,8 +98,8 @@ struct palmas_sleep_requestor_info {
};
struct palmas_regs_info {
- char *name;
- char *sname;
+ const char *name;
+ const char *sname;
u8 vsel_addr;
u8 ctrl_addr;
u8 tstep_addr;
@@ -134,12 +128,6 @@ struct palmas_pmic_driver_data {
struct regulator_config config);
};
-struct palmas_adc_wakeup_property {
- int adc_channel_number;
- int adc_high_threshold;
- int adc_low_threshold;
-};
-
struct palmas_gpadc_platform_data {
/* Channel 3 current source is only enabled during conversion */
int ch3_current; /* 0: off; 1: 10uA; 2: 400uA; 3: 800 uA */
@@ -158,8 +146,6 @@ struct palmas_gpadc_platform_data {
int start_polarity;
int auto_conversion_period_ms;
- struct palmas_adc_wakeup_property *adc_wakeup1_data;
- struct palmas_adc_wakeup_property *adc_wakeup2_data;
};
struct palmas_reg_init {
@@ -553,7 +539,6 @@ struct palmas_pmic {
struct palmas *palmas;
struct device *dev;
struct regulator_desc desc[PALMAS_NUM_REGS];
- struct regulator_dev *rdev[PALMAS_NUM_REGS];
struct mutex mutex;
int smps123;
@@ -3733,6 +3718,9 @@ enum usb_irq_events {
#define TPS65917_REGEN3_CTRL_MODE_ACTIVE 0x01
#define TPS65917_REGEN3_CTRL_MODE_ACTIVE_SHIFT 0x00
+/* POWERHOLD Mask field for PRIMARY_SECONDARY_PAD2 register */
+#define TPS65917_PRIMARY_SECONDARY_PAD2_GPIO_5_MASK 0xC
+
/* Registers for function RESOURCE */
#define TPS65917_REGEN1_CTRL 0x2
#define TPS65917_PLLEN_CTRL 0x3
diff --git a/include/linux/mfd/pcf50633/adc.h b/include/linux/mfd/pcf50633/adc.h
deleted file mode 100644
index b35e62801ffa..000000000000
--- a/include/linux/mfd/pcf50633/adc.h
+++ /dev/null
@@ -1,73 +0,0 @@
-/*
- * adc.h -- Driver for NXP PCF50633 ADC
- *
- * (C) 2006-2008 by Openmoko, Inc.
- * All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- */
-
-#ifndef __LINUX_MFD_PCF50633_ADC_H
-#define __LINUX_MFD_PCF50633_ADC_H
-
-#include <linux/mfd/pcf50633/core.h>
-#include <linux/platform_device.h>
-
-/* ADC Registers */
-#define PCF50633_REG_ADCC3 0x52
-#define PCF50633_REG_ADCC2 0x53
-#define PCF50633_REG_ADCC1 0x54
-#define PCF50633_REG_ADCS1 0x55
-#define PCF50633_REG_ADCS2 0x56
-#define PCF50633_REG_ADCS3 0x57
-
-#define PCF50633_ADCC1_ADCSTART 0x01
-#define PCF50633_ADCC1_RES_8BIT 0x02
-#define PCF50633_ADCC1_RES_10BIT 0x00
-#define PCF50633_ADCC1_AVERAGE_NO 0x00
-#define PCF50633_ADCC1_AVERAGE_4 0x04
-#define PCF50633_ADCC1_AVERAGE_8 0x08
-#define PCF50633_ADCC1_AVERAGE_16 0x0c
-#define PCF50633_ADCC1_MUX_BATSNS_RES 0x00
-#define PCF50633_ADCC1_MUX_BATSNS_SUBTR 0x10
-#define PCF50633_ADCC1_MUX_ADCIN2_RES 0x20
-#define PCF50633_ADCC1_MUX_ADCIN2_SUBTR 0x30
-#define PCF50633_ADCC1_MUX_BATTEMP 0x60
-#define PCF50633_ADCC1_MUX_ADCIN1 0x70
-#define PCF50633_ADCC1_AVERAGE_MASK 0x0c
-#define PCF50633_ADCC1_ADCMUX_MASK 0xf0
-
-#define PCF50633_ADCC2_RATIO_NONE 0x00
-#define PCF50633_ADCC2_RATIO_BATTEMP 0x01
-#define PCF50633_ADCC2_RATIO_ADCIN1 0x02
-#define PCF50633_ADCC2_RATIO_BOTH 0x03
-#define PCF50633_ADCC2_RATIOSETTL_100US 0x04
-
-#define PCF50633_ADCC3_ACCSW_EN 0x01
-#define PCF50633_ADCC3_NTCSW_EN 0x04
-#define PCF50633_ADCC3_RES_DIV_TWO 0x10
-#define PCF50633_ADCC3_RES_DIV_THREE 0x00
-
-#define PCF50633_ADCS3_REF_NTCSW 0x00
-#define PCF50633_ADCS3_REF_ACCSW 0x10
-#define PCF50633_ADCS3_REF_2V0 0x20
-#define PCF50633_ADCS3_REF_VISA 0x30
-#define PCF50633_ADCS3_REF_2V0_2 0x70
-#define PCF50633_ADCS3_ADCRDY 0x80
-
-#define PCF50633_ADCS3_ADCDAT1L_MASK 0x03
-#define PCF50633_ADCS3_ADCDAT2L_MASK 0x0c
-#define PCF50633_ADCS3_ADCDAT2L_SHIFT 2
-#define PCF50633_ASCS3_REF_MASK 0x70
-
-extern int
-pcf50633_adc_async_read(struct pcf50633 *pcf, int mux, int avg,
- void (*callback)(struct pcf50633 *, void *, int),
- void *callback_param);
-extern int
-pcf50633_adc_sync_read(struct pcf50633 *pcf, int mux, int avg);
-
-#endif /* __LINUX_PCF50633_ADC_H */
diff --git a/include/linux/mfd/pcf50633/backlight.h b/include/linux/mfd/pcf50633/backlight.h
deleted file mode 100644
index 83747e217b27..000000000000
--- a/include/linux/mfd/pcf50633/backlight.h
+++ /dev/null
@@ -1,51 +0,0 @@
-/*
- * Copyright (C) 2009-2010, Lars-Peter Clausen <lars@metafoo.de>
- * PCF50633 backlight device driver
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 675 Mass Ave, Cambridge, MA 02139, USA.
- *
- */
-
-#ifndef __LINUX_MFD_PCF50633_BACKLIGHT
-#define __LINUX_MFD_PCF50633_BACKLIGHT
-
-/*
-* @default_brightness: Backlight brightness is initialized to this value
-*
-* Brightness to be used after the driver has been probed.
-* Valid range 0-63.
-*
-* @default_brightness_limit: The actual brightness is limited by this value
-*
-* Brightness limit to be used after the driver has been probed. This is useful
-* when it is not known how much power is available for the backlight during
-* probe.
-* Valid range 0-63. Can be changed later with pcf50633_bl_set_brightness_limit.
-*
-* @ramp_time: Display ramp time when changing brightness
-*
-* When changing the backlights brightness the change is not instant, instead
-* it fades smooth from one state to another. This value specifies how long
-* the fade should take. The lower the value the higher the fade time.
-* Valid range 0-255
-*/
-struct pcf50633_bl_platform_data {
- unsigned int default_brightness;
- unsigned int default_brightness_limit;
- uint8_t ramp_time;
-};
-
-
-struct pcf50633;
-
-int pcf50633_bl_set_brightness_limit(struct pcf50633 *pcf, unsigned int limit);
-
-#endif
-
diff --git a/include/linux/mfd/pcf50633/core.h b/include/linux/mfd/pcf50633/core.h
deleted file mode 100644
index a80840752b4c..000000000000
--- a/include/linux/mfd/pcf50633/core.h
+++ /dev/null
@@ -1,238 +0,0 @@
-/*
- * core.h -- Core driver for NXP PCF50633
- *
- * (C) 2006-2008 by Openmoko, Inc.
- * All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- */
-
-#ifndef __LINUX_MFD_PCF50633_CORE_H
-#define __LINUX_MFD_PCF50633_CORE_H
-
-#include <linux/i2c.h>
-#include <linux/workqueue.h>
-#include <linux/regulator/driver.h>
-#include <linux/regulator/machine.h>
-#include <linux/power_supply.h>
-#include <linux/mfd/pcf50633/backlight.h>
-
-struct pcf50633;
-struct regmap;
-
-#define PCF50633_NUM_REGULATORS 11
-
-struct pcf50633_platform_data {
- struct regulator_init_data reg_init_data[PCF50633_NUM_REGULATORS];
-
- char **batteries;
- int num_batteries;
-
- /*
- * Should be set accordingly to the reference resistor used, see
- * I_{ch(ref)} charger reference current in the pcf50633 User
- * Manual.
- */
- int charger_reference_current_ma;
-
- /* Callbacks */
- void (*probe_done)(struct pcf50633 *);
- void (*mbc_event_callback)(struct pcf50633 *, int);
- void (*regulator_registered)(struct pcf50633 *, int);
- void (*force_shutdown)(struct pcf50633 *);
-
- u8 resumers[5];
-
- struct pcf50633_bl_platform_data *backlight_data;
-};
-
-struct pcf50633_irq {
- void (*handler) (int, void *);
- void *data;
-};
-
-int pcf50633_register_irq(struct pcf50633 *pcf, int irq,
- void (*handler) (int, void *), void *data);
-int pcf50633_free_irq(struct pcf50633 *pcf, int irq);
-
-int pcf50633_irq_mask(struct pcf50633 *pcf, int irq);
-int pcf50633_irq_unmask(struct pcf50633 *pcf, int irq);
-int pcf50633_irq_mask_get(struct pcf50633 *pcf, int irq);
-
-int pcf50633_read_block(struct pcf50633 *, u8 reg,
- int nr_regs, u8 *data);
-int pcf50633_write_block(struct pcf50633 *pcf, u8 reg,
- int nr_regs, u8 *data);
-u8 pcf50633_reg_read(struct pcf50633 *, u8 reg);
-int pcf50633_reg_write(struct pcf50633 *pcf, u8 reg, u8 val);
-
-int pcf50633_reg_set_bit_mask(struct pcf50633 *pcf, u8 reg, u8 mask, u8 val);
-int pcf50633_reg_clear_bits(struct pcf50633 *pcf, u8 reg, u8 bits);
-
-/* Interrupt registers */
-
-#define PCF50633_REG_INT1 0x02
-#define PCF50633_REG_INT2 0x03
-#define PCF50633_REG_INT3 0x04
-#define PCF50633_REG_INT4 0x05
-#define PCF50633_REG_INT5 0x06
-
-#define PCF50633_REG_INT1M 0x07
-#define PCF50633_REG_INT2M 0x08
-#define PCF50633_REG_INT3M 0x09
-#define PCF50633_REG_INT4M 0x0a
-#define PCF50633_REG_INT5M 0x0b
-
-enum {
- /* Chip IRQs */
- PCF50633_IRQ_ADPINS,
- PCF50633_IRQ_ADPREM,
- PCF50633_IRQ_USBINS,
- PCF50633_IRQ_USBREM,
- PCF50633_IRQ_RESERVED1,
- PCF50633_IRQ_RESERVED2,
- PCF50633_IRQ_ALARM,
- PCF50633_IRQ_SECOND,
- PCF50633_IRQ_ONKEYR,
- PCF50633_IRQ_ONKEYF,
- PCF50633_IRQ_EXTON1R,
- PCF50633_IRQ_EXTON1F,
- PCF50633_IRQ_EXTON2R,
- PCF50633_IRQ_EXTON2F,
- PCF50633_IRQ_EXTON3R,
- PCF50633_IRQ_EXTON3F,
- PCF50633_IRQ_BATFULL,
- PCF50633_IRQ_CHGHALT,
- PCF50633_IRQ_THLIMON,
- PCF50633_IRQ_THLIMOFF,
- PCF50633_IRQ_USBLIMON,
- PCF50633_IRQ_USBLIMOFF,
- PCF50633_IRQ_ADCRDY,
- PCF50633_IRQ_ONKEY1S,
- PCF50633_IRQ_LOWSYS,
- PCF50633_IRQ_LOWBAT,
- PCF50633_IRQ_HIGHTMP,
- PCF50633_IRQ_AUTOPWRFAIL,
- PCF50633_IRQ_DWN1PWRFAIL,
- PCF50633_IRQ_DWN2PWRFAIL,
- PCF50633_IRQ_LEDPWRFAIL,
- PCF50633_IRQ_LEDOVP,
- PCF50633_IRQ_LDO1PWRFAIL,
- PCF50633_IRQ_LDO2PWRFAIL,
- PCF50633_IRQ_LDO3PWRFAIL,
- PCF50633_IRQ_LDO4PWRFAIL,
- PCF50633_IRQ_LDO5PWRFAIL,
- PCF50633_IRQ_LDO6PWRFAIL,
- PCF50633_IRQ_HCLDOPWRFAIL,
- PCF50633_IRQ_HCLDOOVL,
-
- /* Always last */
- PCF50633_NUM_IRQ,
-};
-
-struct pcf50633 {
- struct device *dev;
- struct regmap *regmap;
-
- struct pcf50633_platform_data *pdata;
- int irq;
- struct pcf50633_irq irq_handler[PCF50633_NUM_IRQ];
- struct work_struct irq_work;
- struct workqueue_struct *work_queue;
- struct mutex lock;
-
- u8 mask_regs[5];
-
- u8 suspend_irq_masks[5];
- u8 resume_reason[5];
- int is_suspended;
-
- int onkey1s_held;
-
- struct platform_device *rtc_pdev;
- struct platform_device *mbc_pdev;
- struct platform_device *adc_pdev;
- struct platform_device *input_pdev;
- struct platform_device *bl_pdev;
- struct platform_device *regulator_pdev[PCF50633_NUM_REGULATORS];
-};
-
-enum pcf50633_reg_int1 {
- PCF50633_INT1_ADPINS = 0x01, /* Adapter inserted */
- PCF50633_INT1_ADPREM = 0x02, /* Adapter removed */
- PCF50633_INT1_USBINS = 0x04, /* USB inserted */
- PCF50633_INT1_USBREM = 0x08, /* USB removed */
- /* reserved */
- PCF50633_INT1_ALARM = 0x40, /* RTC alarm time is reached */
- PCF50633_INT1_SECOND = 0x80, /* RTC periodic second interrupt */
-};
-
-enum pcf50633_reg_int2 {
- PCF50633_INT2_ONKEYR = 0x01, /* ONKEY rising edge */
- PCF50633_INT2_ONKEYF = 0x02, /* ONKEY falling edge */
- PCF50633_INT2_EXTON1R = 0x04, /* EXTON1 rising edge */
- PCF50633_INT2_EXTON1F = 0x08, /* EXTON1 falling edge */
- PCF50633_INT2_EXTON2R = 0x10, /* EXTON2 rising edge */
- PCF50633_INT2_EXTON2F = 0x20, /* EXTON2 falling edge */
- PCF50633_INT2_EXTON3R = 0x40, /* EXTON3 rising edge */
- PCF50633_INT2_EXTON3F = 0x80, /* EXTON3 falling edge */
-};
-
-enum pcf50633_reg_int3 {
- PCF50633_INT3_BATFULL = 0x01, /* Battery full */
- PCF50633_INT3_CHGHALT = 0x02, /* Charger halt */
- PCF50633_INT3_THLIMON = 0x04,
- PCF50633_INT3_THLIMOFF = 0x08,
- PCF50633_INT3_USBLIMON = 0x10,
- PCF50633_INT3_USBLIMOFF = 0x20,
- PCF50633_INT3_ADCRDY = 0x40, /* ADC result ready */
- PCF50633_INT3_ONKEY1S = 0x80, /* ONKEY pressed 1 second */
-};
-
-enum pcf50633_reg_int4 {
- PCF50633_INT4_LOWSYS = 0x01,
- PCF50633_INT4_LOWBAT = 0x02,
- PCF50633_INT4_HIGHTMP = 0x04,
- PCF50633_INT4_AUTOPWRFAIL = 0x08,
- PCF50633_INT4_DWN1PWRFAIL = 0x10,
- PCF50633_INT4_DWN2PWRFAIL = 0x20,
- PCF50633_INT4_LEDPWRFAIL = 0x40,
- PCF50633_INT4_LEDOVP = 0x80,
-};
-
-enum pcf50633_reg_int5 {
- PCF50633_INT5_LDO1PWRFAIL = 0x01,
- PCF50633_INT5_LDO2PWRFAIL = 0x02,
- PCF50633_INT5_LDO3PWRFAIL = 0x04,
- PCF50633_INT5_LDO4PWRFAIL = 0x08,
- PCF50633_INT5_LDO5PWRFAIL = 0x10,
- PCF50633_INT5_LDO6PWRFAIL = 0x20,
- PCF50633_INT5_HCLDOPWRFAIL = 0x40,
- PCF50633_INT5_HCLDOOVL = 0x80,
-};
-
-/* misc. registers */
-#define PCF50633_REG_OOCSHDWN 0x0c
-
-/* LED registers */
-#define PCF50633_REG_LEDOUT 0x28
-#define PCF50633_REG_LEDENA 0x29
-#define PCF50633_REG_LEDCTL 0x2a
-#define PCF50633_REG_LEDDIM 0x2b
-
-static inline struct pcf50633 *dev_to_pcf50633(struct device *dev)
-{
- return dev_get_drvdata(dev);
-}
-
-int pcf50633_irq_init(struct pcf50633 *pcf, int irq);
-void pcf50633_irq_free(struct pcf50633 *pcf);
-#ifdef CONFIG_PM
-int pcf50633_irq_suspend(struct pcf50633 *pcf);
-int pcf50633_irq_resume(struct pcf50633 *pcf);
-#endif
-
-#endif
diff --git a/include/linux/mfd/pcf50633/gpio.h b/include/linux/mfd/pcf50633/gpio.h
deleted file mode 100644
index a42b845efc54..000000000000
--- a/include/linux/mfd/pcf50633/gpio.h
+++ /dev/null
@@ -1,52 +0,0 @@
-/*
- * gpio.h -- GPIO driver for NXP PCF50633
- *
- * (C) 2006-2008 by Openmoko, Inc.
- * All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- */
-
-#ifndef __LINUX_MFD_PCF50633_GPIO_H
-#define __LINUX_MFD_PCF50633_GPIO_H
-
-#include <linux/mfd/pcf50633/core.h>
-
-#define PCF50633_GPIO1 1
-#define PCF50633_GPIO2 2
-#define PCF50633_GPIO3 3
-#define PCF50633_GPO 4
-
-#define PCF50633_REG_GPIO1CFG 0x14
-#define PCF50633_REG_GPIO2CFG 0x15
-#define PCF50633_REG_GPIO3CFG 0x16
-#define PCF50633_REG_GPOCFG 0x17
-
-#define PCF50633_GPOCFG_GPOSEL_MASK 0x07
-
-enum pcf50633_reg_gpocfg {
- PCF50633_GPOCFG_GPOSEL_0 = 0x00,
- PCF50633_GPOCFG_GPOSEL_LED_NFET = 0x01,
- PCF50633_GPOCFG_GPOSEL_SYSxOK = 0x02,
- PCF50633_GPOCFG_GPOSEL_CLK32K = 0x03,
- PCF50633_GPOCFG_GPOSEL_ADAPUSB = 0x04,
- PCF50633_GPOCFG_GPOSEL_USBxOK = 0x05,
- PCF50633_GPOCFG_GPOSEL_ACTPH4 = 0x06,
- PCF50633_GPOCFG_GPOSEL_1 = 0x07,
- PCF50633_GPOCFG_GPOSEL_INVERSE = 0x08,
-};
-
-int pcf50633_gpio_set(struct pcf50633 *pcf, int gpio, u8 val);
-u8 pcf50633_gpio_get(struct pcf50633 *pcf, int gpio);
-
-int pcf50633_gpio_invert_set(struct pcf50633 *, int gpio, int invert);
-int pcf50633_gpio_invert_get(struct pcf50633 *pcf, int gpio);
-
-int pcf50633_gpio_power_supply_set(struct pcf50633 *,
- int gpio, int regulator, int on);
-#endif /* __LINUX_MFD_PCF50633_GPIO_H */
-
-
diff --git a/include/linux/mfd/pcf50633/mbc.h b/include/linux/mfd/pcf50633/mbc.h
deleted file mode 100644
index df4f5fa88de3..000000000000
--- a/include/linux/mfd/pcf50633/mbc.h
+++ /dev/null
@@ -1,134 +0,0 @@
-/*
- * mbc.h -- Driver for NXP PCF50633 Main Battery Charger
- *
- * (C) 2006-2008 by Openmoko, Inc.
- * All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- */
-
-#ifndef __LINUX_MFD_PCF50633_MBC_H
-#define __LINUX_MFD_PCF50633_MBC_H
-
-#include <linux/mfd/pcf50633/core.h>
-#include <linux/platform_device.h>
-
-#define PCF50633_REG_MBCC1 0x43
-#define PCF50633_REG_MBCC2 0x44
-#define PCF50633_REG_MBCC3 0x45
-#define PCF50633_REG_MBCC4 0x46
-#define PCF50633_REG_MBCC5 0x47
-#define PCF50633_REG_MBCC6 0x48
-#define PCF50633_REG_MBCC7 0x49
-#define PCF50633_REG_MBCC8 0x4a
-#define PCF50633_REG_MBCS1 0x4b
-#define PCF50633_REG_MBCS2 0x4c
-#define PCF50633_REG_MBCS3 0x4d
-
-enum pcf50633_reg_mbcc1 {
- PCF50633_MBCC1_CHGENA = 0x01, /* Charger enable */
- PCF50633_MBCC1_AUTOSTOP = 0x02,
- PCF50633_MBCC1_AUTORES = 0x04, /* automatic resume */
- PCF50633_MBCC1_RESUME = 0x08, /* explicit resume cmd */
- PCF50633_MBCC1_RESTART = 0x10, /* restart charging */
- PCF50633_MBCC1_PREWDTIME_60M = 0x20, /* max. precharging time */
- PCF50633_MBCC1_WDTIME_1H = 0x00,
- PCF50633_MBCC1_WDTIME_2H = 0x40,
- PCF50633_MBCC1_WDTIME_4H = 0x80,
- PCF50633_MBCC1_WDTIME_6H = 0xc0,
-};
-#define PCF50633_MBCC1_WDTIME_MASK 0xc0
-
-enum pcf50633_reg_mbcc2 {
- PCF50633_MBCC2_VBATCOND_2V7 = 0x00,
- PCF50633_MBCC2_VBATCOND_2V85 = 0x01,
- PCF50633_MBCC2_VBATCOND_3V0 = 0x02,
- PCF50633_MBCC2_VBATCOND_3V15 = 0x03,
- PCF50633_MBCC2_VMAX_4V = 0x00,
- PCF50633_MBCC2_VMAX_4V20 = 0x28,
- PCF50633_MBCC2_VRESDEBTIME_64S = 0x80, /* debounce time (32/64sec) */
-};
-
-enum pcf50633_reg_mbcc7 {
- PCF50633_MBCC7_USB_100mA = 0x00,
- PCF50633_MBCC7_USB_500mA = 0x01,
- PCF50633_MBCC7_USB_1000mA = 0x02,
- PCF50633_MBCC7_USB_SUSPEND = 0x03,
- PCF50633_MBCC7_BATTEMP_EN = 0x04,
- PCF50633_MBCC7_BATSYSIMAX_1A6 = 0x00,
- PCF50633_MBCC7_BATSYSIMAX_1A8 = 0x40,
- PCF50633_MBCC7_BATSYSIMAX_2A0 = 0x80,
- PCF50633_MBCC7_BATSYSIMAX_2A2 = 0xc0,
-};
-#define PCF50633_MBCC7_USB_MASK 0x03
-
-enum pcf50633_reg_mbcc8 {
- PCF50633_MBCC8_USBENASUS = 0x10,
-};
-
-enum pcf50633_reg_mbcs1 {
- PCF50633_MBCS1_USBPRES = 0x01,
- PCF50633_MBCS1_USBOK = 0x02,
- PCF50633_MBCS1_ADAPTPRES = 0x04,
- PCF50633_MBCS1_ADAPTOK = 0x08,
- PCF50633_MBCS1_TBAT_OK = 0x00,
- PCF50633_MBCS1_TBAT_ABOVE = 0x10,
- PCF50633_MBCS1_TBAT_BELOW = 0x20,
- PCF50633_MBCS1_TBAT_UNDEF = 0x30,
- PCF50633_MBCS1_PREWDTEXP = 0x40,
- PCF50633_MBCS1_WDTEXP = 0x80,
-};
-
-enum pcf50633_reg_mbcs2_mbcmod {
- PCF50633_MBCS2_MBC_PLAY = 0x00,
- PCF50633_MBCS2_MBC_USB_PRE = 0x01,
- PCF50633_MBCS2_MBC_USB_PRE_WAIT = 0x02,
- PCF50633_MBCS2_MBC_USB_FAST = 0x03,
- PCF50633_MBCS2_MBC_USB_FAST_WAIT = 0x04,
- PCF50633_MBCS2_MBC_USB_SUSPEND = 0x05,
- PCF50633_MBCS2_MBC_ADP_PRE = 0x06,
- PCF50633_MBCS2_MBC_ADP_PRE_WAIT = 0x07,
- PCF50633_MBCS2_MBC_ADP_FAST = 0x08,
- PCF50633_MBCS2_MBC_ADP_FAST_WAIT = 0x09,
- PCF50633_MBCS2_MBC_BAT_FULL = 0x0a,
- PCF50633_MBCS2_MBC_HALT = 0x0b,
-};
-#define PCF50633_MBCS2_MBC_MASK 0x0f
-enum pcf50633_reg_mbcs2_chgstat {
- PCF50633_MBCS2_CHGS_NONE = 0x00,
- PCF50633_MBCS2_CHGS_ADAPTER = 0x10,
- PCF50633_MBCS2_CHGS_USB = 0x20,
- PCF50633_MBCS2_CHGS_BOTH = 0x30,
-};
-#define PCF50633_MBCS2_RESSTAT_AUTO 0x40
-
-enum pcf50633_reg_mbcs3 {
- PCF50633_MBCS3_USBLIM_PLAY = 0x01,
- PCF50633_MBCS3_USBLIM_CGH = 0x02,
- PCF50633_MBCS3_TLIM_PLAY = 0x04,
- PCF50633_MBCS3_TLIM_CHG = 0x08,
- PCF50633_MBCS3_ILIM = 0x10, /* 1: Ibat > Icutoff */
- PCF50633_MBCS3_VLIM = 0x20, /* 1: Vbat == Vmax */
- PCF50633_MBCS3_VBATSTAT = 0x40, /* 1: Vbat > Vbatcond */
- PCF50633_MBCS3_VRES = 0x80, /* 1: Vbat > Vth(RES) */
-};
-
-#define PCF50633_MBCC2_VBATCOND_MASK 0x03
-#define PCF50633_MBCC2_VMAX_MASK 0x3c
-
-/* Charger status */
-#define PCF50633_MBC_USB_ONLINE 0x01
-#define PCF50633_MBC_USB_ACTIVE 0x02
-#define PCF50633_MBC_ADAPTER_ONLINE 0x04
-#define PCF50633_MBC_ADAPTER_ACTIVE 0x08
-
-int pcf50633_mbc_usb_curlim_set(struct pcf50633 *pcf, int ma);
-
-int pcf50633_mbc_get_status(struct pcf50633 *);
-int pcf50633_mbc_get_usb_online_status(struct pcf50633 *);
-
-#endif
-
diff --git a/include/linux/mfd/pcf50633/pmic.h b/include/linux/mfd/pcf50633/pmic.h
deleted file mode 100644
index 2d3dbe53b235..000000000000
--- a/include/linux/mfd/pcf50633/pmic.h
+++ /dev/null
@@ -1,67 +0,0 @@
-#ifndef __LINUX_MFD_PCF50633_PMIC_H
-#define __LINUX_MFD_PCF50633_PMIC_H
-
-#include <linux/mfd/pcf50633/core.h>
-#include <linux/platform_device.h>
-
-#define PCF50633_REG_AUTOOUT 0x1a
-#define PCF50633_REG_AUTOENA 0x1b
-#define PCF50633_REG_AUTOCTL 0x1c
-#define PCF50633_REG_AUTOMXC 0x1d
-#define PCF50633_REG_DOWN1OUT 0x1e
-#define PCF50633_REG_DOWN1ENA 0x1f
-#define PCF50633_REG_DOWN1CTL 0x20
-#define PCF50633_REG_DOWN1MXC 0x21
-#define PCF50633_REG_DOWN2OUT 0x22
-#define PCF50633_REG_DOWN2ENA 0x23
-#define PCF50633_REG_DOWN2CTL 0x24
-#define PCF50633_REG_DOWN2MXC 0x25
-#define PCF50633_REG_MEMLDOOUT 0x26
-#define PCF50633_REG_MEMLDOENA 0x27
-#define PCF50633_REG_LDO1OUT 0x2d
-#define PCF50633_REG_LDO1ENA 0x2e
-#define PCF50633_REG_LDO2OUT 0x2f
-#define PCF50633_REG_LDO2ENA 0x30
-#define PCF50633_REG_LDO3OUT 0x31
-#define PCF50633_REG_LDO3ENA 0x32
-#define PCF50633_REG_LDO4OUT 0x33
-#define PCF50633_REG_LDO4ENA 0x34
-#define PCF50633_REG_LDO5OUT 0x35
-#define PCF50633_REG_LDO5ENA 0x36
-#define PCF50633_REG_LDO6OUT 0x37
-#define PCF50633_REG_LDO6ENA 0x38
-#define PCF50633_REG_HCLDOOUT 0x39
-#define PCF50633_REG_HCLDOENA 0x3a
-#define PCF50633_REG_HCLDOOVL 0x40
-
-enum pcf50633_regulator_enable {
- PCF50633_REGULATOR_ON = 0x01,
- PCF50633_REGULATOR_ON_GPIO1 = 0x02,
- PCF50633_REGULATOR_ON_GPIO2 = 0x04,
- PCF50633_REGULATOR_ON_GPIO3 = 0x08,
-};
-#define PCF50633_REGULATOR_ON_MASK 0x0f
-
-enum pcf50633_regulator_phase {
- PCF50633_REGULATOR_ACTPH1 = 0x00,
- PCF50633_REGULATOR_ACTPH2 = 0x10,
- PCF50633_REGULATOR_ACTPH3 = 0x20,
- PCF50633_REGULATOR_ACTPH4 = 0x30,
-};
-#define PCF50633_REGULATOR_ACTPH_MASK 0x30
-
-enum pcf50633_regulator_id {
- PCF50633_REGULATOR_AUTO,
- PCF50633_REGULATOR_DOWN1,
- PCF50633_REGULATOR_DOWN2,
- PCF50633_REGULATOR_LDO1,
- PCF50633_REGULATOR_LDO2,
- PCF50633_REGULATOR_LDO3,
- PCF50633_REGULATOR_LDO4,
- PCF50633_REGULATOR_LDO5,
- PCF50633_REGULATOR_LDO6,
- PCF50633_REGULATOR_HCLDO,
- PCF50633_REGULATOR_MEMLDO,
-};
-#endif
-
diff --git a/include/linux/mfd/pf1550.h b/include/linux/mfd/pf1550.h
new file mode 100644
index 000000000000..7cb2340ff2bd
--- /dev/null
+++ b/include/linux/mfd/pf1550.h
@@ -0,0 +1,273 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Declarations for the PF1550 PMIC
+ *
+ * Copyright (C) 2016 Freescale Semiconductor, Inc.
+ * Robin Gong <yibin.gong@freescale.com>
+ *
+ * Portions Copyright (c) 2025 Savoir-faire Linux Inc.
+ * Samuel Kayode <samuel.kayode@savoirfairelinux.com>
+ */
+
+#ifndef __LINUX_MFD_PF1550_H
+#define __LINUX_MFD_PF1550_H
+
+#include <linux/i2c.h>
+#include <linux/regmap.h>
+
+enum pf1550_pmic_reg {
+ /* PMIC regulator part */
+ PF1550_PMIC_REG_DEVICE_ID = 0x00,
+ PF1550_PMIC_REG_OTP_FLAVOR = 0x01,
+ PF1550_PMIC_REG_SILICON_REV = 0x02,
+
+ PF1550_PMIC_REG_INT_CATEGORY = 0x06,
+ PF1550_PMIC_REG_SW_INT_STAT0 = 0x08,
+ PF1550_PMIC_REG_SW_INT_MASK0 = 0x09,
+ PF1550_PMIC_REG_SW_INT_SENSE0 = 0x0a,
+ PF1550_PMIC_REG_SW_INT_STAT1 = 0x0b,
+ PF1550_PMIC_REG_SW_INT_MASK1 = 0x0c,
+ PF1550_PMIC_REG_SW_INT_SENSE1 = 0x0d,
+ PF1550_PMIC_REG_SW_INT_STAT2 = 0x0e,
+ PF1550_PMIC_REG_SW_INT_MASK2 = 0x0f,
+ PF1550_PMIC_REG_SW_INT_SENSE2 = 0x10,
+ PF1550_PMIC_REG_LDO_INT_STAT0 = 0x18,
+ PF1550_PMIC_REG_LDO_INT_MASK0 = 0x19,
+ PF1550_PMIC_REG_LDO_INT_SENSE0 = 0x1a,
+ PF1550_PMIC_REG_TEMP_INT_STAT0 = 0x20,
+ PF1550_PMIC_REG_TEMP_INT_MASK0 = 0x21,
+ PF1550_PMIC_REG_TEMP_INT_SENSE0 = 0x22,
+ PF1550_PMIC_REG_ONKEY_INT_STAT0 = 0x24,
+ PF1550_PMIC_REG_ONKEY_INT_MASK0 = 0x25,
+ PF1550_PMIC_REG_ONKEY_INT_SENSE0 = 0x26,
+ PF1550_PMIC_REG_MISC_INT_STAT0 = 0x28,
+ PF1550_PMIC_REG_MISC_INT_MASK0 = 0x29,
+ PF1550_PMIC_REG_MISC_INT_SENSE0 = 0x2a,
+
+ PF1550_PMIC_REG_COINCELL_CONTROL = 0x30,
+
+ PF1550_PMIC_REG_SW1_VOLT = 0x32,
+ PF1550_PMIC_REG_SW1_STBY_VOLT = 0x33,
+ PF1550_PMIC_REG_SW1_SLP_VOLT = 0x34,
+ PF1550_PMIC_REG_SW1_CTRL = 0x35,
+ PF1550_PMIC_REG_SW1_CTRL1 = 0x36,
+ PF1550_PMIC_REG_SW2_VOLT = 0x38,
+ PF1550_PMIC_REG_SW2_STBY_VOLT = 0x39,
+ PF1550_PMIC_REG_SW2_SLP_VOLT = 0x3a,
+ PF1550_PMIC_REG_SW2_CTRL = 0x3b,
+ PF1550_PMIC_REG_SW2_CTRL1 = 0x3c,
+ PF1550_PMIC_REG_SW3_VOLT = 0x3e,
+ PF1550_PMIC_REG_SW3_STBY_VOLT = 0x3f,
+ PF1550_PMIC_REG_SW3_SLP_VOLT = 0x40,
+ PF1550_PMIC_REG_SW3_CTRL = 0x41,
+ PF1550_PMIC_REG_SW3_CTRL1 = 0x42,
+ PF1550_PMIC_REG_VSNVS_CTRL = 0x48,
+ PF1550_PMIC_REG_VREFDDR_CTRL = 0x4a,
+ PF1550_PMIC_REG_LDO1_VOLT = 0x4c,
+ PF1550_PMIC_REG_LDO1_CTRL = 0x4d,
+ PF1550_PMIC_REG_LDO2_VOLT = 0x4f,
+ PF1550_PMIC_REG_LDO2_CTRL = 0x50,
+ PF1550_PMIC_REG_LDO3_VOLT = 0x52,
+ PF1550_PMIC_REG_LDO3_CTRL = 0x53,
+ PF1550_PMIC_REG_PWRCTRL0 = 0x58,
+ PF1550_PMIC_REG_PWRCTRL1 = 0x59,
+ PF1550_PMIC_REG_PWRCTRL2 = 0x5a,
+ PF1550_PMIC_REG_PWRCTRL3 = 0x5b,
+ PF1550_PMIC_REG_SW1_PWRDN_SEQ = 0x5f,
+ PF1550_PMIC_REG_SW2_PWRDN_SEQ = 0x60,
+ PF1550_PMIC_REG_SW3_PWRDN_SEQ = 0x61,
+ PF1550_PMIC_REG_LDO1_PWRDN_SEQ = 0x62,
+ PF1550_PMIC_REG_LDO2_PWRDN_SEQ = 0x63,
+ PF1550_PMIC_REG_LDO3_PWRDN_SEQ = 0x64,
+ PF1550_PMIC_REG_VREFDDR_PWRDN_SEQ = 0x65,
+
+ PF1550_PMIC_REG_STATE_INFO = 0x67,
+ PF1550_PMIC_REG_I2C_ADDR = 0x68,
+ PF1550_PMIC_REG_IO_DRV0 = 0x69,
+ PF1550_PMIC_REG_IO_DRV1 = 0x6a,
+ PF1550_PMIC_REG_RC_16MHZ = 0x6b,
+ PF1550_PMIC_REG_KEY = 0x6f,
+
+ /* Charger part */
+ PF1550_CHARG_REG_CHG_INT = 0x80,
+ PF1550_CHARG_REG_CHG_INT_MASK = 0x82,
+ PF1550_CHARG_REG_CHG_INT_OK = 0x84,
+ PF1550_CHARG_REG_VBUS_SNS = 0x86,
+ PF1550_CHARG_REG_CHG_SNS = 0x87,
+ PF1550_CHARG_REG_BATT_SNS = 0x88,
+ PF1550_CHARG_REG_CHG_OPER = 0x89,
+ PF1550_CHARG_REG_CHG_TMR = 0x8a,
+ PF1550_CHARG_REG_CHG_EOC_CNFG = 0x8d,
+ PF1550_CHARG_REG_CHG_CURR_CNFG = 0x8e,
+ PF1550_CHARG_REG_BATT_REG = 0x8f,
+ PF1550_CHARG_REG_BATFET_CNFG = 0x91,
+ PF1550_CHARG_REG_THM_REG_CNFG = 0x92,
+ PF1550_CHARG_REG_VBUS_INLIM_CNFG = 0x94,
+ PF1550_CHARG_REG_VBUS_LIN_DPM = 0x95,
+ PF1550_CHARG_REG_USB_PHY_LDO_CNFG = 0x96,
+ PF1550_CHARG_REG_DBNC_DELAY_TIME = 0x98,
+ PF1550_CHARG_REG_CHG_INT_CNFG = 0x99,
+ PF1550_CHARG_REG_THM_ADJ_SETTING = 0x9a,
+ PF1550_CHARG_REG_VBUS2SYS_CNFG = 0x9b,
+ PF1550_CHARG_REG_LED_PWM = 0x9c,
+ PF1550_CHARG_REG_FAULT_BATFET_CNFG = 0x9d,
+ PF1550_CHARG_REG_LED_CNFG = 0x9e,
+ PF1550_CHARG_REG_CHGR_KEY2 = 0x9f,
+
+ PF1550_TEST_REG_FMRADDR = 0xc4,
+ PF1550_TEST_REG_FMRDATA = 0xc5,
+ PF1550_TEST_REG_KEY3 = 0xdf,
+
+ PF1550_PMIC_REG_END = 0xff,
+};
+
+/* One-Time Programmable(OTP) memory */
+enum pf1550_otp_reg {
+ PF1550_OTP_SW1_SW2 = 0x1e,
+ PF1550_OTP_SW2_SW3 = 0x1f,
+};
+
+#define PF1550_DEVICE_ID 0x7c
+
+/* Keys for reading OTP */
+#define PF1550_OTP_PMIC_KEY 0x15
+#define PF1550_OTP_CHGR_KEY 0x50
+#define PF1550_OTP_TEST_KEY 0xab
+
+/* Supported charger modes */
+#define PF1550_CHG_BAT_OFF 1
+#define PF1550_CHG_BAT_ON 2
+
+#define PF1550_CHG_PRECHARGE 0
+#define PF1550_CHG_CONSTANT_CURRENT 1
+#define PF1550_CHG_CONSTANT_VOL 2
+#define PF1550_CHG_EOC 3
+#define PF1550_CHG_DONE 4
+#define PF1550_CHG_TIMER_FAULT 6
+#define PF1550_CHG_SUSPEND 7
+#define PF1550_CHG_OFF_INV 8
+#define PF1550_CHG_BAT_OVER 9
+#define PF1550_CHG_OFF_TEMP 10
+#define PF1550_CHG_LINEAR_ONLY 12
+#define PF1550_CHG_SNS_MASK 0xf
+#define PF1550_CHG_INT_MASK 0x51
+
+#define PF1550_BAT_NO_VBUS 0
+#define PF1550_BAT_LOW_THAN_PRECHARG 1
+#define PF1550_BAT_CHARG_FAIL 2
+#define PF1550_BAT_HIGH_THAN_PRECHARG 4
+#define PF1550_BAT_OVER_VOL 5
+#define PF1550_BAT_NO_DETECT 6
+#define PF1550_BAT_SNS_MASK 0x7
+
+#define PF1550_VBUS_UVLO BIT(2)
+#define PF1550_VBUS_IN2SYS BIT(3)
+#define PF1550_VBUS_OVLO BIT(4)
+#define PF1550_VBUS_VALID BIT(5)
+
+#define PF1550_CHARG_REG_BATT_REG_CHGCV_MASK 0x3f
+#define PF1550_CHARG_REG_BATT_REG_VMINSYS_SHIFT 6
+#define PF1550_CHARG_REG_BATT_REG_VMINSYS_MASK GENMASK(7, 6)
+#define PF1550_CHARG_REG_THM_REG_CNFG_REGTEMP_SHIFT 2
+#define PF1550_CHARG_REG_THM_REG_CNFG_REGTEMP_MASK GENMASK(3, 2)
+
+#define PF1550_ONKEY_RST_EN BIT(7)
+
+/* DVS enable masks */
+#define OTP_SW1_DVS_ENB BIT(1)
+#define OTP_SW2_DVS_ENB BIT(3)
+
+/* Top level interrupt masks */
+#define IRQ_REGULATOR (BIT(1) | BIT(2) | BIT(3) | BIT(4) | BIT(6))
+#define IRQ_ONKEY BIT(5)
+#define IRQ_CHG BIT(0)
+
+/* Regulator interrupt masks */
+#define PMIC_IRQ_SW1_LS BIT(0)
+#define PMIC_IRQ_SW2_LS BIT(1)
+#define PMIC_IRQ_SW3_LS BIT(2)
+#define PMIC_IRQ_SW1_HS BIT(0)
+#define PMIC_IRQ_SW2_HS BIT(1)
+#define PMIC_IRQ_SW3_HS BIT(2)
+#define PMIC_IRQ_LDO1_FAULT BIT(0)
+#define PMIC_IRQ_LDO2_FAULT BIT(1)
+#define PMIC_IRQ_LDO3_FAULT BIT(2)
+#define PMIC_IRQ_TEMP_110 BIT(0)
+#define PMIC_IRQ_TEMP_125 BIT(1)
+
+/* Onkey interrupt masks */
+#define ONKEY_IRQ_PUSHI BIT(0)
+#define ONKEY_IRQ_1SI BIT(1)
+#define ONKEY_IRQ_2SI BIT(2)
+#define ONKEY_IRQ_3SI BIT(3)
+#define ONKEY_IRQ_4SI BIT(4)
+#define ONKEY_IRQ_8SI BIT(5)
+
+/* Charger interrupt masks */
+#define CHARG_IRQ_BAT2SOCI BIT(1)
+#define CHARG_IRQ_BATI BIT(2)
+#define CHARG_IRQ_CHGI BIT(3)
+#define CHARG_IRQ_VBUSI BIT(5)
+#define CHARG_IRQ_DPMI BIT(6)
+#define CHARG_IRQ_THMI BIT(7)
+
+enum pf1550_irq {
+ PF1550_IRQ_CHG,
+ PF1550_IRQ_REGULATOR,
+ PF1550_IRQ_ONKEY,
+};
+
+enum pf1550_pmic_irq {
+ PF1550_PMIC_IRQ_SW1_LS,
+ PF1550_PMIC_IRQ_SW2_LS,
+ PF1550_PMIC_IRQ_SW3_LS,
+ PF1550_PMIC_IRQ_SW1_HS,
+ PF1550_PMIC_IRQ_SW2_HS,
+ PF1550_PMIC_IRQ_SW3_HS,
+ PF1550_PMIC_IRQ_LDO1_FAULT,
+ PF1550_PMIC_IRQ_LDO2_FAULT,
+ PF1550_PMIC_IRQ_LDO3_FAULT,
+ PF1550_PMIC_IRQ_TEMP_110,
+ PF1550_PMIC_IRQ_TEMP_125,
+};
+
+enum pf1550_onkey_irq {
+ PF1550_ONKEY_IRQ_PUSHI,
+ PF1550_ONKEY_IRQ_1SI,
+ PF1550_ONKEY_IRQ_2SI,
+ PF1550_ONKEY_IRQ_3SI,
+ PF1550_ONKEY_IRQ_4SI,
+ PF1550_ONKEY_IRQ_8SI,
+};
+
+enum pf1550_charg_irq {
+ PF1550_CHARG_IRQ_BAT2SOCI,
+ PF1550_CHARG_IRQ_BATI,
+ PF1550_CHARG_IRQ_CHGI,
+ PF1550_CHARG_IRQ_VBUSI,
+ PF1550_CHARG_IRQ_THMI,
+};
+
+enum pf1550_regulators {
+ PF1550_SW1,
+ PF1550_SW2,
+ PF1550_SW3,
+ PF1550_VREFDDR,
+ PF1550_LDO1,
+ PF1550_LDO2,
+ PF1550_LDO3,
+};
+
+struct pf1550_ddata {
+ struct regmap_irq_chip_data *irq_data_regulator;
+ struct regmap_irq_chip_data *irq_data_charger;
+ struct regmap_irq_chip_data *irq_data_onkey;
+ struct regmap_irq_chip_data *irq_data;
+ struct regmap *regmap;
+ struct device *dev;
+ bool dvs1_enable;
+ bool dvs2_enable;
+ int irq;
+};
+
+#endif /* __LINUX_MFD_PF1550_H */
diff --git a/include/linux/mfd/qcom_rpm.h b/include/linux/mfd/qcom_rpm.h
index 742ebf1b76ca..4b6b644f1108 100644
--- a/include/linux/mfd/qcom_rpm.h
+++ b/include/linux/mfd/qcom_rpm.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __QCOM_RPM_H__
#define __QCOM_RPM_H__
diff --git a/include/linux/mfd/qnap-mcu.h b/include/linux/mfd/qnap-mcu.h
new file mode 100644
index 000000000000..42bf523f9a5b
--- /dev/null
+++ b/include/linux/mfd/qnap-mcu.h
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Core definitions for QNAP MCU MFD driver.
+ * Copyright (C) 2024 Heiko Stuebner <heiko@sntech.de>
+ */
+
+#ifndef _LINUX_QNAP_MCU_H_
+#define _LINUX_QNAP_MCU_H_
+
+#include <linux/types.h>
+
+struct qnap_mcu;
+
+struct qnap_mcu_variant {
+ u32 baud_rate;
+ int num_drives;
+ int fan_pwm_min;
+ int fan_pwm_max;
+ bool usb_led;
+};
+
+int qnap_mcu_exec(struct qnap_mcu *mcu,
+ const u8 *cmd_data, size_t cmd_data_size,
+ u8 *reply_data, size_t reply_data_size);
+int qnap_mcu_exec_with_ack(struct qnap_mcu *mcu,
+ const u8 *cmd_data, size_t cmd_data_size);
+
+#endif /* _LINUX_QNAP_MCU_H_ */
diff --git a/include/linux/mfd/rave-sp.h b/include/linux/mfd/rave-sp.h
new file mode 100644
index 000000000000..11eef77ef976
--- /dev/null
+++ b/include/linux/mfd/rave-sp.h
@@ -0,0 +1,62 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+
+/*
+ * Core definitions for RAVE SP MFD driver.
+ *
+ * Copyright (C) 2017 Zodiac Inflight Innovations
+ */
+
+#ifndef _LINUX_RAVE_SP_H_
+#define _LINUX_RAVE_SP_H_
+
+#include <linux/notifier.h>
+
+enum rave_sp_command {
+ RAVE_SP_CMD_GET_FIRMWARE_VERSION = 0x20,
+ RAVE_SP_CMD_GET_BOOTLOADER_VERSION = 0x21,
+ RAVE_SP_CMD_BOOT_SOURCE = 0x26,
+ RAVE_SP_CMD_GET_BOARD_COPPER_REV = 0x2B,
+ RAVE_SP_CMD_GET_GPIO_STATE = 0x2F,
+
+ RAVE_SP_CMD_STATUS = 0xA0,
+ RAVE_SP_CMD_SW_WDT = 0xA1,
+ RAVE_SP_CMD_PET_WDT = 0xA2,
+ RAVE_SP_CMD_RMB_EEPROM = 0xA4,
+ RAVE_SP_CMD_SET_BACKLIGHT = 0xA6,
+ RAVE_SP_CMD_RESET = 0xA7,
+ RAVE_SP_CMD_RESET_REASON = 0xA8,
+
+ RAVE_SP_CMD_REQ_COPPER_REV = 0xB6,
+ RAVE_SP_CMD_GET_I2C_DEVICE_STATUS = 0xBA,
+ RAVE_SP_CMD_GET_SP_SILICON_REV = 0xB9,
+ RAVE_SP_CMD_CONTROL_EVENTS = 0xBB,
+
+ RAVE_SP_EVNT_BASE = 0xE0,
+};
+
+struct rave_sp;
+
+static inline unsigned long rave_sp_action_pack(u8 event, u8 value)
+{
+ return ((unsigned long)value << 8) | event;
+}
+
+static inline u8 rave_sp_action_unpack_event(unsigned long action)
+{
+ return action;
+}
+
+static inline u8 rave_sp_action_unpack_value(unsigned long action)
+{
+ return action >> 8;
+}
+
+int rave_sp_exec(struct rave_sp *sp,
+ void *__data, size_t data_size,
+ void *reply_data, size_t reply_data_size);
+
+struct device;
+int devm_rave_sp_register_event_notifier(struct device *dev,
+ struct notifier_block *nb);
+
+#endif /* _LINUX_RAVE_SP_H_ */
diff --git a/include/linux/mfd/rc5t583.h b/include/linux/mfd/rc5t583.h
index 8d0a392e0a7f..4f220146cc02 100644
--- a/include/linux/mfd/rc5t583.h
+++ b/include/linux/mfd/rc5t583.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Core driver interface to access RICOH_RC5T583 power management chip.
*
@@ -6,19 +7,6 @@
*
* Based on code
* Copyright (C) 2011 RICOH COMPANY,LTD
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- *
*/
#ifndef __LINUX_MFD_RC5T583_H
diff --git a/include/linux/mfd/rdc321x.h b/include/linux/mfd/rdc321x.h
index 442743a8f915..697933b2227b 100644
--- a/include/linux/mfd/rdc321x.h
+++ b/include/linux/mfd/rdc321x.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __RDC321X_MFD_H
#define __RDC321X_MFD_H
diff --git a/include/linux/mfd/rk808.h b/include/linux/mfd/rk808.h
index 83701ef7d3c7..28170ee08898 100644
--- a/include/linux/mfd/rk808.h
+++ b/include/linux/mfd/rk808.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Register definitions for Rockchip's RK808/RK818 PMIC
*
@@ -9,15 +10,6 @@
* Copyright (C) 2016 PHYTEC Messtechnik GmbH
*
* Author: Wadim Egorov <w.egorov@phytec.de>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
*/
#ifndef __LINUX_REGULATOR_RK808_H
@@ -121,6 +113,148 @@ enum rk808_reg {
#define RK808_INT_STS_MSK_REG2 0x4f
#define RK808_IO_POL_REG 0x50
+/* RK816 */
+enum rk816_reg {
+ RK816_ID_DCDC1,
+ RK816_ID_DCDC2,
+ RK816_ID_DCDC3,
+ RK816_ID_DCDC4,
+ RK816_ID_LDO1,
+ RK816_ID_LDO2,
+ RK816_ID_LDO3,
+ RK816_ID_LDO4,
+ RK816_ID_LDO5,
+ RK816_ID_LDO6,
+ RK816_ID_BOOST,
+ RK816_ID_OTG_SW,
+};
+
+enum rk816_irqs {
+ /* INT_STS_REG1 */
+ RK816_IRQ_PWRON_FALL,
+ RK816_IRQ_PWRON_RISE,
+
+ /* INT_STS_REG2 */
+ RK816_IRQ_VB_LOW,
+ RK816_IRQ_PWRON,
+ RK816_IRQ_PWRON_LP,
+ RK816_IRQ_HOTDIE,
+ RK816_IRQ_RTC_ALARM,
+ RK816_IRQ_RTC_PERIOD,
+ RK816_IRQ_USB_OV,
+
+ /* INT_STS_REG3 */
+ RK816_IRQ_PLUG_IN,
+ RK816_IRQ_PLUG_OUT,
+ RK816_IRQ_CHG_OK,
+ RK816_IRQ_CHG_TE,
+ RK816_IRQ_CHG_TS,
+ RK816_IRQ_CHG_CVTLIM,
+ RK816_IRQ_DISCHG_ILIM,
+};
+
+/* power channel registers */
+#define RK816_DCDC_EN_REG1 0x23
+
+#define RK816_DCDC_EN_REG2 0x24
+#define RK816_BOOST_EN BIT(1)
+#define RK816_OTG_EN BIT(2)
+#define RK816_BOOST_EN_MSK BIT(5)
+#define RK816_OTG_EN_MSK BIT(6)
+#define RK816_BUCK_DVS_CONFIRM BIT(7)
+
+#define RK816_LDO_EN_REG1 0x27
+
+#define RK816_LDO_EN_REG2 0x28
+
+/* interrupt registers and irq definitions */
+#define RK816_INT_STS_REG1 0x49
+#define RK816_INT_STS_MSK_REG1 0x4a
+#define RK816_INT_STS_PWRON_FALL BIT(5)
+#define RK816_INT_STS_PWRON_RISE BIT(6)
+
+#define RK816_INT_STS_REG2 0x4c
+#define RK816_INT_STS_MSK_REG2 0x4d
+#define RK816_INT_STS_VB_LOW BIT(1)
+#define RK816_INT_STS_PWRON BIT(2)
+#define RK816_INT_STS_PWRON_LP BIT(3)
+#define RK816_INT_STS_HOTDIE BIT(4)
+#define RK816_INT_STS_RTC_ALARM BIT(5)
+#define RK816_INT_STS_RTC_PERIOD BIT(6)
+#define RK816_INT_STS_USB_OV BIT(7)
+
+#define RK816_INT_STS_REG3 0x4e
+#define RK816_INT_STS_MSK_REG3 0x4f
+#define RK816_INT_STS_PLUG_IN BIT(0)
+#define RK816_INT_STS_PLUG_OUT BIT(1)
+#define RK816_INT_STS_CHG_OK BIT(2)
+#define RK816_INT_STS_CHG_TE BIT(3)
+#define RK816_INT_STS_CHG_TS BIT(4)
+#define RK816_INT_STS_CHG_CVTLIM BIT(6)
+#define RK816_INT_STS_DISCHG_ILIM BIT(7)
+
+#define RK816_IRQ_STS_OFFSET(x) ((x) - RK816_INT_STS_REG1)
+#define RK816_IRQ_MSK_OFFSET(x) ((x) - RK816_INT_STS_MSK_REG1)
+
+/* charger, boost and OTG registers */
+#define RK816_OTG_BUCK_LDO_CONFIG_REG 0x2a
+#define RK816_CHRG_CONFIG_REG 0x2b
+#define RK816_BOOST_ON_VESL_REG 0x54
+#define RK816_BOOST_SLP_VSEL_REG 0x55
+#define RK816_CHRG_BOOST_CONFIG_REG 0x9a
+#define RK816_SUP_STS_REG 0xa0
+#define RK816_USB_CTRL_REG 0xa1
+#define RK816_CHRG_CTRL(x) (0xa3 + (x))
+#define RK816_BAT_CTRL_REG 0xa6
+#define RK816_BAT_HTS_TS_REG 0xa8
+#define RK816_BAT_LTS_TS_REG 0xa9
+
+/* adc and fuel gauge registers */
+#define RK816_TS_CTRL_REG 0xac
+#define RK816_ADC_CTRL_REG 0xad
+#define RK816_GGCON_REG 0xb0
+#define RK816_GGSTS_REG 0xb1
+#define RK816_ZERO_CUR_ADC_REGH 0xb2
+#define RK816_ZERO_CUR_ADC_REGL 0xb3
+#define RK816_GASCNT_CAL_REG(x) (0xb7 - (x))
+#define RK816_GASCNT_REG(x) (0xbb - (x))
+#define RK816_BAT_CUR_AVG_REGH 0xbc
+#define RK816_BAT_CUR_AVG_REGL 0xbd
+#define RK816_TS_ADC_REGH 0xbe
+#define RK816_TS_ADC_REGL 0xbf
+#define RK816_USB_ADC_REGH 0xc0
+#define RK816_USB_ADC_REGL 0xc1
+#define RK816_BAT_OCV_REGH 0xc2
+#define RK816_BAT_OCV_REGL 0xc3
+#define RK816_BAT_VOL_REGH 0xc4
+#define RK816_BAT_VOL_REGL 0xc5
+#define RK816_RELAX_ENTRY_THRES_REGH 0xc6
+#define RK816_RELAX_ENTRY_THRES_REGL 0xc7
+#define RK816_RELAX_EXIT_THRES_REGH 0xc8
+#define RK816_RELAX_EXIT_THRES_REGL 0xc9
+#define RK816_RELAX_VOL1_REGH 0xca
+#define RK816_RELAX_VOL1_REGL 0xcb
+#define RK816_RELAX_VOL2_REGH 0xcc
+#define RK816_RELAX_VOL2_REGL 0xcd
+#define RK816_RELAX_CUR1_REGH 0xce
+#define RK816_RELAX_CUR1_REGL 0xcf
+#define RK816_RELAX_CUR2_REGH 0xd0
+#define RK816_RELAX_CUR2_REGL 0xd1
+#define RK816_CAL_OFFSET_REGH 0xd2
+#define RK816_CAL_OFFSET_REGL 0xd3
+#define RK816_NON_ACT_TIMER_CNT_REG 0xd4
+#define RK816_VCALIB0_REGH 0xd5
+#define RK816_VCALIB0_REGL 0xd6
+#define RK816_VCALIB1_REGH 0xd7
+#define RK816_VCALIB1_REGL 0xd8
+#define RK816_FCC_GASCNT_REG(x) (0xdc - (x))
+#define RK816_IOFFSET_REGH 0xdd
+#define RK816_IOFFSET_REGL 0xde
+#define RK816_SLEEP_CON_SAMP_CUR_REG 0xdf
+
+/* general purpose data registers 0xe0 ~ 0xf2 */
+#define RK816_DATA_REG(x) (0xe0 + (x))
+
/* RK818 */
#define RK818_DCDC1 0
#define RK818_LDO1 4
@@ -206,6 +340,507 @@ enum rk818_reg {
#define RK818_USB_ILMIN_2000MA 0x7
#define RK818_USB_CHG_SD_VSEL_MASK 0x70
+/* RK805 */
+enum rk805_reg {
+ RK805_ID_DCDC1,
+ RK805_ID_DCDC2,
+ RK805_ID_DCDC3,
+ RK805_ID_DCDC4,
+ RK805_ID_LDO1,
+ RK805_ID_LDO2,
+ RK805_ID_LDO3,
+};
+
+/* CONFIG REGISTER */
+#define RK805_VB_MON_REG 0x21
+#define RK805_THERMAL_REG 0x22
+
+/* POWER CHANNELS ENABLE REGISTER */
+#define RK805_DCDC_EN_REG 0x23
+#define RK805_SLP_DCDC_EN_REG 0x25
+#define RK805_SLP_LDO_EN_REG 0x26
+#define RK805_LDO_EN_REG 0x27
+
+/* BUCK AND LDO CONFIG REGISTER */
+#define RK805_BUCK_LDO_SLP_LP_EN_REG 0x2A
+#define RK805_BUCK1_CONFIG_REG 0x2E
+#define RK805_BUCK1_ON_VSEL_REG 0x2F
+#define RK805_BUCK1_SLP_VSEL_REG 0x30
+#define RK805_BUCK2_CONFIG_REG 0x32
+#define RK805_BUCK2_ON_VSEL_REG 0x33
+#define RK805_BUCK2_SLP_VSEL_REG 0x34
+#define RK805_BUCK3_CONFIG_REG 0x36
+#define RK805_BUCK4_CONFIG_REG 0x37
+#define RK805_BUCK4_ON_VSEL_REG 0x38
+#define RK805_BUCK4_SLP_VSEL_REG 0x39
+#define RK805_LDO1_ON_VSEL_REG 0x3B
+#define RK805_LDO1_SLP_VSEL_REG 0x3C
+#define RK805_LDO2_ON_VSEL_REG 0x3D
+#define RK805_LDO2_SLP_VSEL_REG 0x3E
+#define RK805_LDO3_ON_VSEL_REG 0x3F
+#define RK805_LDO3_SLP_VSEL_REG 0x40
+
+/* INTERRUPT REGISTER */
+#define RK805_PWRON_LP_INT_TIME_REG 0x47
+#define RK805_PWRON_DB_REG 0x48
+#define RK805_DEV_CTRL_REG 0x4B
+#define RK805_INT_STS_REG 0x4C
+#define RK805_INT_STS_MSK_REG 0x4D
+#define RK805_GPIO_IO_POL_REG 0x50
+#define RK805_OUT_REG 0x52
+#define RK805_ON_SOURCE_REG 0xAE
+#define RK805_OFF_SOURCE_REG 0xAF
+
+#define RK805_NUM_REGULATORS 7
+
+#define RK805_PWRON_FALL_RISE_INT_EN 0x0
+#define RK805_PWRON_FALL_RISE_INT_MSK 0x81
+
+/* RK805 IRQ Definitions */
+#define RK805_IRQ_PWRON_RISE 0
+#define RK805_IRQ_VB_LOW 1
+#define RK805_IRQ_PWRON 2
+#define RK805_IRQ_PWRON_LP 3
+#define RK805_IRQ_HOTDIE 4
+#define RK805_IRQ_RTC_ALARM 5
+#define RK805_IRQ_RTC_PERIOD 6
+#define RK805_IRQ_PWRON_FALL 7
+
+#define RK805_IRQ_PWRON_RISE_MSK BIT(0)
+#define RK805_IRQ_VB_LOW_MSK BIT(1)
+#define RK805_IRQ_PWRON_MSK BIT(2)
+#define RK805_IRQ_PWRON_LP_MSK BIT(3)
+#define RK805_IRQ_HOTDIE_MSK BIT(4)
+#define RK805_IRQ_RTC_ALARM_MSK BIT(5)
+#define RK805_IRQ_RTC_PERIOD_MSK BIT(6)
+#define RK805_IRQ_PWRON_FALL_MSK BIT(7)
+
+#define RK805_PWR_RISE_INT_STATUS BIT(0)
+#define RK805_VB_LOW_INT_STATUS BIT(1)
+#define RK805_PWRON_INT_STATUS BIT(2)
+#define RK805_PWRON_LP_INT_STATUS BIT(3)
+#define RK805_HOTDIE_INT_STATUS BIT(4)
+#define RK805_ALARM_INT_STATUS BIT(5)
+#define RK805_PERIOD_INT_STATUS BIT(6)
+#define RK805_PWR_FALL_INT_STATUS BIT(7)
+
+#define RK805_BUCK1_2_ILMAX_MASK (3 << 6)
+#define RK805_BUCK3_4_ILMAX_MASK (3 << 3)
+#define RK805_RTC_PERIOD_INT_MASK (1 << 6)
+#define RK805_RTC_ALARM_INT_MASK (1 << 5)
+#define RK805_INT_ALARM_EN (1 << 3)
+#define RK805_INT_TIMER_EN (1 << 2)
+
+/* RK806 */
+#define RK806_POWER_EN0 0x0
+#define RK806_POWER_EN1 0x1
+#define RK806_POWER_EN2 0x2
+#define RK806_POWER_EN3 0x3
+#define RK806_POWER_EN4 0x4
+#define RK806_POWER_EN5 0x5
+#define RK806_POWER_SLP_EN0 0x6
+#define RK806_POWER_SLP_EN1 0x7
+#define RK806_POWER_SLP_EN2 0x8
+#define RK806_POWER_DISCHRG_EN0 0x9
+#define RK806_POWER_DISCHRG_EN1 0xA
+#define RK806_POWER_DISCHRG_EN2 0xB
+#define RK806_BUCK_FB_CONFIG 0xC
+#define RK806_SLP_LP_CONFIG 0xD
+#define RK806_POWER_FPWM_EN0 0xE
+#define RK806_POWER_FPWM_EN1 0xF
+#define RK806_BUCK1_CONFIG 0x10
+#define RK806_BUCK2_CONFIG 0x11
+#define RK806_BUCK3_CONFIG 0x12
+#define RK806_BUCK4_CONFIG 0x13
+#define RK806_BUCK5_CONFIG 0x14
+#define RK806_BUCK6_CONFIG 0x15
+#define RK806_BUCK7_CONFIG 0x16
+#define RK806_BUCK8_CONFIG 0x17
+#define RK806_BUCK9_CONFIG 0x18
+#define RK806_BUCK10_CONFIG 0x19
+#define RK806_BUCK1_ON_VSEL 0x1A
+#define RK806_BUCK2_ON_VSEL 0x1B
+#define RK806_BUCK3_ON_VSEL 0x1C
+#define RK806_BUCK4_ON_VSEL 0x1D
+#define RK806_BUCK5_ON_VSEL 0x1E
+#define RK806_BUCK6_ON_VSEL 0x1F
+#define RK806_BUCK7_ON_VSEL 0x20
+#define RK806_BUCK8_ON_VSEL 0x21
+#define RK806_BUCK9_ON_VSEL 0x22
+#define RK806_BUCK10_ON_VSEL 0x23
+#define RK806_BUCK1_SLP_VSEL 0x24
+#define RK806_BUCK2_SLP_VSEL 0x25
+#define RK806_BUCK3_SLP_VSEL 0x26
+#define RK806_BUCK4_SLP_VSEL 0x27
+#define RK806_BUCK5_SLP_VSEL 0x28
+#define RK806_BUCK6_SLP_VSEL 0x29
+#define RK806_BUCK7_SLP_VSEL 0x2A
+#define RK806_BUCK8_SLP_VSEL 0x2B
+#define RK806_BUCK9_SLP_VSEL 0x2D
+#define RK806_BUCK10_SLP_VSEL 0x2E
+#define RK806_BUCK_DEBUG1 0x30
+#define RK806_BUCK_DEBUG2 0x31
+#define RK806_BUCK_DEBUG3 0x32
+#define RK806_BUCK_DEBUG4 0x33
+#define RK806_BUCK_DEBUG5 0x34
+#define RK806_BUCK_DEBUG6 0x35
+#define RK806_BUCK_DEBUG7 0x36
+#define RK806_BUCK_DEBUG8 0x37
+#define RK806_BUCK_DEBUG9 0x38
+#define RK806_BUCK_DEBUG10 0x39
+#define RK806_BUCK_DEBUG11 0x3A
+#define RK806_BUCK_DEBUG12 0x3B
+#define RK806_BUCK_DEBUG13 0x3C
+#define RK806_BUCK_DEBUG14 0x3D
+#define RK806_BUCK_DEBUG15 0x3E
+#define RK806_BUCK_DEBUG16 0x3F
+#define RK806_BUCK_DEBUG17 0x40
+#define RK806_BUCK_DEBUG18 0x41
+#define RK806_NLDO_IMAX 0x42
+#define RK806_NLDO1_ON_VSEL 0x43
+#define RK806_NLDO2_ON_VSEL 0x44
+#define RK806_NLDO3_ON_VSEL 0x45
+#define RK806_NLDO4_ON_VSEL 0x46
+#define RK806_NLDO5_ON_VSEL 0x47
+#define RK806_NLDO1_SLP_VSEL 0x48
+#define RK806_NLDO2_SLP_VSEL 0x49
+#define RK806_NLDO3_SLP_VSEL 0x4A
+#define RK806_NLDO4_SLP_VSEL 0x4B
+#define RK806_NLDO5_SLP_VSEL 0x4C
+#define RK806_PLDO_IMAX 0x4D
+#define RK806_PLDO1_ON_VSEL 0x4E
+#define RK806_PLDO2_ON_VSEL 0x4F
+#define RK806_PLDO3_ON_VSEL 0x50
+#define RK806_PLDO4_ON_VSEL 0x51
+#define RK806_PLDO5_ON_VSEL 0x52
+#define RK806_PLDO6_ON_VSEL 0x53
+#define RK806_PLDO1_SLP_VSEL 0x54
+#define RK806_PLDO2_SLP_VSEL 0x55
+#define RK806_PLDO3_SLP_VSEL 0x56
+#define RK806_PLDO4_SLP_VSEL 0x57
+#define RK806_PLDO5_SLP_VSEL 0x58
+#define RK806_PLDO6_SLP_VSEL 0x59
+#define RK806_CHIP_NAME 0x5A
+#define RK806_CHIP_VER 0x5B
+#define RK806_OTP_VER 0x5C
+#define RK806_SYS_STS 0x5D
+#define RK806_SYS_CFG0 0x5E
+#define RK806_SYS_CFG1 0x5F
+#define RK806_SYS_OPTION 0x61
+#define RK806_SLEEP_CONFIG0 0x62
+#define RK806_SLEEP_CONFIG1 0x63
+#define RK806_SLEEP_CTR_SEL0 0x64
+#define RK806_SLEEP_CTR_SEL1 0x65
+#define RK806_SLEEP_CTR_SEL2 0x66
+#define RK806_SLEEP_CTR_SEL3 0x67
+#define RK806_SLEEP_CTR_SEL4 0x68
+#define RK806_SLEEP_CTR_SEL5 0x69
+#define RK806_DVS_CTRL_SEL0 0x6A
+#define RK806_DVS_CTRL_SEL1 0x6B
+#define RK806_DVS_CTRL_SEL2 0x6C
+#define RK806_DVS_CTRL_SEL3 0x6D
+#define RK806_DVS_CTRL_SEL4 0x6E
+#define RK806_DVS_CTRL_SEL5 0x6F
+#define RK806_DVS_START_CTRL 0x70
+#define RK806_SLEEP_GPIO 0x71
+#define RK806_SYS_CFG3 0x72
+#define RK806_ON_SOURCE 0x74
+#define RK806_OFF_SOURCE 0x75
+#define RK806_PWRON_KEY 0x76
+#define RK806_INT_STS0 0x77
+#define RK806_INT_MSK0 0x78
+#define RK806_INT_STS1 0x79
+#define RK806_INT_MSK1 0x7A
+#define RK806_GPIO_INT_CONFIG 0x7B
+#define RK806_DATA_REG0 0x7C
+#define RK806_DATA_REG1 0x7D
+#define RK806_DATA_REG2 0x7E
+#define RK806_DATA_REG3 0x7F
+#define RK806_DATA_REG4 0x80
+#define RK806_DATA_REG5 0x81
+#define RK806_DATA_REG6 0x82
+#define RK806_DATA_REG7 0x83
+#define RK806_DATA_REG8 0x84
+#define RK806_DATA_REG9 0x85
+#define RK806_DATA_REG10 0x86
+#define RK806_DATA_REG11 0x87
+#define RK806_DATA_REG12 0x88
+#define RK806_DATA_REG13 0x89
+#define RK806_DATA_REG14 0x8A
+#define RK806_DATA_REG15 0x8B
+#define RK806_TM_REG 0x8C
+#define RK806_OTP_EN_REG 0x8D
+#define RK806_FUNC_OTP_EN_REG 0x8E
+#define RK806_TEST_REG1 0x8F
+#define RK806_TEST_REG2 0x90
+#define RK806_TEST_REG3 0x91
+#define RK806_TEST_REG4 0x92
+#define RK806_TEST_REG5 0x93
+#define RK806_BUCK_VSEL_OTP_REG0 0x94
+#define RK806_BUCK_VSEL_OTP_REG1 0x95
+#define RK806_BUCK_VSEL_OTP_REG2 0x96
+#define RK806_BUCK_VSEL_OTP_REG3 0x97
+#define RK806_BUCK_VSEL_OTP_REG4 0x98
+#define RK806_BUCK_VSEL_OTP_REG5 0x99
+#define RK806_BUCK_VSEL_OTP_REG6 0x9A
+#define RK806_BUCK_VSEL_OTP_REG7 0x9B
+#define RK806_BUCK_VSEL_OTP_REG8 0x9C
+#define RK806_BUCK_VSEL_OTP_REG9 0x9D
+#define RK806_NLDO1_VSEL_OTP_REG0 0x9E
+#define RK806_NLDO1_VSEL_OTP_REG1 0x9F
+#define RK806_NLDO1_VSEL_OTP_REG2 0xA0
+#define RK806_NLDO1_VSEL_OTP_REG3 0xA1
+#define RK806_NLDO1_VSEL_OTP_REG4 0xA2
+#define RK806_PLDO_VSEL_OTP_REG0 0xA3
+#define RK806_PLDO_VSEL_OTP_REG1 0xA4
+#define RK806_PLDO_VSEL_OTP_REG2 0xA5
+#define RK806_PLDO_VSEL_OTP_REG3 0xA6
+#define RK806_PLDO_VSEL_OTP_REG4 0xA7
+#define RK806_PLDO_VSEL_OTP_REG5 0xA8
+#define RK806_BUCK_EN_OTP_REG1 0xA9
+#define RK806_NLDO_EN_OTP_REG1 0xAA
+#define RK806_PLDO_EN_OTP_REG1 0xAB
+#define RK806_BUCK_FB_RES_OTP_REG1 0xAC
+#define RK806_OTP_RESEV_REG0 0xAD
+#define RK806_OTP_RESEV_REG1 0xAE
+#define RK806_OTP_RESEV_REG2 0xAF
+#define RK806_OTP_RESEV_REG3 0xB0
+#define RK806_OTP_RESEV_REG4 0xB1
+#define RK806_BUCK_SEQ_REG0 0xB2
+#define RK806_BUCK_SEQ_REG1 0xB3
+#define RK806_BUCK_SEQ_REG2 0xB4
+#define RK806_BUCK_SEQ_REG3 0xB5
+#define RK806_BUCK_SEQ_REG4 0xB6
+#define RK806_BUCK_SEQ_REG5 0xB7
+#define RK806_BUCK_SEQ_REG6 0xB8
+#define RK806_BUCK_SEQ_REG7 0xB9
+#define RK806_BUCK_SEQ_REG8 0xBA
+#define RK806_BUCK_SEQ_REG9 0xBB
+#define RK806_BUCK_SEQ_REG10 0xBC
+#define RK806_BUCK_SEQ_REG11 0xBD
+#define RK806_BUCK_SEQ_REG12 0xBE
+#define RK806_BUCK_SEQ_REG13 0xBF
+#define RK806_BUCK_SEQ_REG14 0xC0
+#define RK806_BUCK_SEQ_REG15 0xC1
+#define RK806_BUCK_SEQ_REG16 0xC2
+#define RK806_BUCK_SEQ_REG17 0xC3
+#define RK806_HK_TRIM_REG1 0xC4
+#define RK806_HK_TRIM_REG2 0xC5
+#define RK806_BUCK_REF_TRIM_REG1 0xC6
+#define RK806_BUCK_REF_TRIM_REG2 0xC7
+#define RK806_BUCK_REF_TRIM_REG3 0xC8
+#define RK806_BUCK_REF_TRIM_REG4 0xC9
+#define RK806_BUCK_REF_TRIM_REG5 0xCA
+#define RK806_BUCK_OSC_TRIM_REG1 0xCB
+#define RK806_BUCK_OSC_TRIM_REG2 0xCC
+#define RK806_BUCK_OSC_TRIM_REG3 0xCD
+#define RK806_BUCK_OSC_TRIM_REG4 0xCE
+#define RK806_BUCK_OSC_TRIM_REG5 0xCF
+#define RK806_BUCK_TRIM_ZCDIOS_REG1 0xD0
+#define RK806_BUCK_TRIM_ZCDIOS_REG2 0xD1
+#define RK806_NLDO_TRIM_REG1 0xD2
+#define RK806_NLDO_TRIM_REG2 0xD3
+#define RK806_NLDO_TRIM_REG3 0xD4
+#define RK806_PLDO_TRIM_REG1 0xD5
+#define RK806_PLDO_TRIM_REG2 0xD6
+#define RK806_PLDO_TRIM_REG3 0xD7
+#define RK806_TRIM_ICOMP_REG1 0xD8
+#define RK806_TRIM_ICOMP_REG2 0xD9
+#define RK806_EFUSE_CONTROL_REGH 0xDA
+#define RK806_FUSE_PROG_REG 0xDB
+#define RK806_MAIN_FSM_STS_REG 0xDD
+#define RK806_FSM_REG 0xDE
+#define RK806_TOP_RESEV_OFFR 0xEC
+#define RK806_TOP_RESEV_POR 0xED
+#define RK806_BUCK_VRSN_REG1 0xEE
+#define RK806_BUCK_VRSN_REG2 0xEF
+#define RK806_NLDO_RLOAD_SEL_REG1 0xF0
+#define RK806_PLDO_RLOAD_SEL_REG1 0xF1
+#define RK806_PLDO_RLOAD_SEL_REG2 0xF2
+#define RK806_BUCK_CMIN_MX_REG1 0xF3
+#define RK806_BUCK_CMIN_MX_REG2 0xF4
+#define RK806_BUCK_FREQ_SET_REG1 0xF5
+#define RK806_BUCK_FREQ_SET_REG2 0xF6
+#define RK806_BUCK_RS_MEABS_REG1 0xF7
+#define RK806_BUCK_RS_MEABS_REG2 0xF8
+#define RK806_BUCK_RS_ZDLEB_REG1 0xF9
+#define RK806_BUCK_RS_ZDLEB_REG2 0xFA
+#define RK806_BUCK_RSERVE_REG1 0xFB
+#define RK806_BUCK_RSERVE_REG2 0xFC
+#define RK806_BUCK_RSERVE_REG3 0xFD
+#define RK806_BUCK_RSERVE_REG4 0xFE
+#define RK806_BUCK_RSERVE_REG5 0xFF
+
+/* INT_STS Register field definitions */
+#define RK806_INT_STS_PWRON_FALL BIT(0)
+#define RK806_INT_STS_PWRON_RISE BIT(1)
+#define RK806_INT_STS_PWRON BIT(2)
+#define RK806_INT_STS_PWRON_LP BIT(3)
+#define RK806_INT_STS_HOTDIE BIT(4)
+#define RK806_INT_STS_VDC_RISE BIT(5)
+#define RK806_INT_STS_VDC_FALL BIT(6)
+#define RK806_INT_STS_VB_LO BIT(7)
+#define RK806_INT_STS_REV0 BIT(0)
+#define RK806_INT_STS_REV1 BIT(1)
+#define RK806_INT_STS_REV2 BIT(2)
+#define RK806_INT_STS_CRC_ERROR BIT(3)
+#define RK806_INT_STS_SLP3_GPIO BIT(4)
+#define RK806_INT_STS_SLP2_GPIO BIT(5)
+#define RK806_INT_STS_SLP1_GPIO BIT(6)
+#define RK806_INT_STS_WDT BIT(7)
+
+/* SPI command */
+#define RK806_CMD_READ 0
+#define RK806_CMD_WRITE BIT(7)
+#define RK806_CMD_CRC_EN BIT(6)
+#define RK806_CMD_CRC_DIS 0
+#define RK806_CMD_LEN_MSK 0x0f
+#define RK806_REG_H 0x00
+
+#define VERSION_AB 0x01
+
+enum rk806_reg_id {
+ RK806_ID_DCDC1 = 0,
+ RK806_ID_DCDC2,
+ RK806_ID_DCDC3,
+ RK806_ID_DCDC4,
+ RK806_ID_DCDC5,
+ RK806_ID_DCDC6,
+ RK806_ID_DCDC7,
+ RK806_ID_DCDC8,
+ RK806_ID_DCDC9,
+ RK806_ID_DCDC10,
+
+ RK806_ID_NLDO1,
+ RK806_ID_NLDO2,
+ RK806_ID_NLDO3,
+ RK806_ID_NLDO4,
+ RK806_ID_NLDO5,
+
+ RK806_ID_PLDO1,
+ RK806_ID_PLDO2,
+ RK806_ID_PLDO3,
+ RK806_ID_PLDO4,
+ RK806_ID_PLDO5,
+ RK806_ID_PLDO6,
+ RK806_ID_END,
+};
+
+/* Define the RK806 IRQ numbers */
+enum rk806_irqs {
+ /* INT_STS0 registers */
+ RK806_IRQ_PWRON_FALL,
+ RK806_IRQ_PWRON_RISE,
+ RK806_IRQ_PWRON,
+ RK806_IRQ_PWRON_LP,
+ RK806_IRQ_HOTDIE,
+ RK806_IRQ_VDC_RISE,
+ RK806_IRQ_VDC_FALL,
+ RK806_IRQ_VB_LO,
+
+ /* INT_STS0 registers */
+ RK806_IRQ_REV0,
+ RK806_IRQ_REV1,
+ RK806_IRQ_REV2,
+ RK806_IRQ_CRC_ERROR,
+ RK806_IRQ_SLP3_GPIO,
+ RK806_IRQ_SLP2_GPIO,
+ RK806_IRQ_SLP1_GPIO,
+ RK806_IRQ_WDT,
+};
+
+/* VCC1 Low Voltage Threshold */
+enum rk806_lv_sel {
+ VB_LO_SEL_2800,
+ VB_LO_SEL_2900,
+ VB_LO_SEL_3000,
+ VB_LO_SEL_3100,
+ VB_LO_SEL_3200,
+ VB_LO_SEL_3300,
+ VB_LO_SEL_3400,
+ VB_LO_SEL_3500,
+};
+
+/* System Shutdown Voltage Select */
+enum rk806_uv_sel {
+ VB_UV_SEL_2700,
+ VB_UV_SEL_2800,
+ VB_UV_SEL_2900,
+ VB_UV_SEL_3000,
+ VB_UV_SEL_3100,
+ VB_UV_SEL_3200,
+ VB_UV_SEL_3300,
+ VB_UV_SEL_3400,
+};
+
+/* Pin Function */
+enum rk806_pwrctrl_fun {
+ PWRCTRL_NULL_FUN,
+ PWRCTRL_SLP_FUN,
+ PWRCTRL_POWOFF_FUN,
+ PWRCTRL_RST_FUN,
+ PWRCTRL_DVS_FUN,
+ PWRCTRL_GPIO_FUN,
+};
+
+/* Pin Polarity */
+enum rk806_pin_level {
+ POL_LOW,
+ POL_HIGH,
+};
+
+enum rk806_vsel_ctr_sel {
+ CTR_BY_NO_EFFECT,
+ CTR_BY_PWRCTRL1,
+ CTR_BY_PWRCTRL2,
+ CTR_BY_PWRCTRL3,
+};
+
+enum rk806_dvs_ctr_sel {
+ CTR_SEL_NO_EFFECT,
+ CTR_SEL_DVS_START1,
+ CTR_SEL_DVS_START2,
+ CTR_SEL_DVS_START3,
+};
+
+enum rk806_pin_dr_sel {
+ RK806_PIN_INPUT,
+ RK806_PIN_OUTPUT,
+};
+
+#define RK806_INT_POL_MSK BIT(1)
+#define RK806_INT_POL_H BIT(1)
+#define RK806_INT_POL_L 0
+
+/* SYS_CFG3 */
+#define RK806_RST_FUN_MSK GENMASK(7, 6)
+#define RK806_SLAVE_RESTART_FUN_MSK BIT(1)
+#define RK806_SLAVE_RESTART_FUN_EN BIT(1)
+#define RK806_SLAVE_RESTART_FUN_OFF 0
+
+#define RK806_SYS_ENB2_2M_MSK BIT(1)
+#define RK806_SYS_ENB2_2M_EN BIT(1)
+#define RK806_SYS_ENB2_2M_OFF 0
+
+enum rk806_int_fun {
+ RK806_INT_ONLY,
+ RK806_INT_ADN_WKUP,
+};
+
+enum rk806_dvs_mode {
+ RK806_DVS_NOT_SUPPORT,
+ RK806_DVS_START1,
+ RK806_DVS_START2,
+ RK806_DVS_START3,
+ RK806_DVS_PWRCTRL1,
+ RK806_DVS_PWRCTRL2,
+ RK806_DVS_PWRCTRL3,
+ RK806_DVS_START_PWRCTR1,
+ RK806_DVS_START_PWRCTR2,
+ RK806_DVS_START_PWRCTR3,
+ RK806_DVS_END,
+};
+
/* RK808 IRQ Definitions */
#define RK808_IRQ_VOUT_LO 0
#define RK808_IRQ_VB_LO 1
@@ -290,7 +925,9 @@ enum rk818_reg {
#define SWITCH2_EN BIT(6)
#define SWITCH1_EN BIT(5)
#define DEV_OFF_RST BIT(3)
+#define DEV_RST BIT(2)
#define DEV_OFF BIT(0)
+#define RTC_STOP BIT(0)
#define VB_LO_ACT BIT(4)
#define VB_LO_SEL_3500MV (7 << 0)
@@ -298,6 +935,359 @@ enum rk818_reg {
#define VOUT_LO_INT BIT(0)
#define CLK32KOUT2_EN BIT(0)
+#define TEMP105C 0x08
+#define TEMP115C 0x0c
+#define TEMP_HOTDIE_MSK 0x0c
+#define SLP_SD_MSK (0x3 << 2)
+#define SHUTDOWN_FUN (0x2 << 2)
+#define SLEEP_FUN (0x1 << 2)
+#define RK8XX_ID_MSK 0xfff0
+#define PWM_MODE_MSK BIT(7)
+#define FPWM_MODE BIT(7)
+#define AUTO_PWM_MODE 0
+
+enum rk817_reg_id {
+ RK817_ID_DCDC1 = 0,
+ RK817_ID_DCDC2,
+ RK817_ID_DCDC3,
+ RK817_ID_DCDC4,
+ RK817_ID_LDO1,
+ RK817_ID_LDO2,
+ RK817_ID_LDO3,
+ RK817_ID_LDO4,
+ RK817_ID_LDO5,
+ RK817_ID_LDO6,
+ RK817_ID_LDO7,
+ RK817_ID_LDO8,
+ RK817_ID_LDO9,
+ RK817_ID_BOOST,
+ RK817_ID_BOOST_OTG_SW,
+ RK817_NUM_REGULATORS
+};
+
+enum rk809_reg_id {
+ RK809_ID_DCDC5 = RK817_ID_BOOST,
+ RK809_ID_SW1,
+ RK809_ID_SW2,
+ RK809_NUM_REGULATORS
+};
+
+#define RK817_SECONDS_REG 0x00
+#define RK817_MINUTES_REG 0x01
+#define RK817_HOURS_REG 0x02
+#define RK817_DAYS_REG 0x03
+#define RK817_MONTHS_REG 0x04
+#define RK817_YEARS_REG 0x05
+#define RK817_WEEKS_REG 0x06
+#define RK817_ALARM_SECONDS_REG 0x07
+#define RK817_ALARM_MINUTES_REG 0x08
+#define RK817_ALARM_HOURS_REG 0x09
+#define RK817_ALARM_DAYS_REG 0x0a
+#define RK817_ALARM_MONTHS_REG 0x0b
+#define RK817_ALARM_YEARS_REG 0x0c
+#define RK817_RTC_CTRL_REG 0xd
+#define RK817_RTC_STATUS_REG 0xe
+#define RK817_RTC_INT_REG 0xf
+#define RK817_RTC_COMP_LSB_REG 0x10
+#define RK817_RTC_COMP_MSB_REG 0x11
+
+/* RK817 Codec Registers */
+#define RK817_CODEC_DTOP_VUCTL 0x12
+#define RK817_CODEC_DTOP_VUCTIME 0x13
+#define RK817_CODEC_DTOP_LPT_SRST 0x14
+#define RK817_CODEC_DTOP_DIGEN_CLKE 0x15
+#define RK817_CODEC_AREF_RTCFG0 0x16
+#define RK817_CODEC_AREF_RTCFG1 0x17
+#define RK817_CODEC_AADC_CFG0 0x18
+#define RK817_CODEC_AADC_CFG1 0x19
+#define RK817_CODEC_DADC_VOLL 0x1a
+#define RK817_CODEC_DADC_VOLR 0x1b
+#define RK817_CODEC_DADC_SR_ACL0 0x1e
+#define RK817_CODEC_DADC_ALC1 0x1f
+#define RK817_CODEC_DADC_ALC2 0x20
+#define RK817_CODEC_DADC_NG 0x21
+#define RK817_CODEC_DADC_HPF 0x22
+#define RK817_CODEC_DADC_RVOLL 0x23
+#define RK817_CODEC_DADC_RVOLR 0x24
+#define RK817_CODEC_AMIC_CFG0 0x27
+#define RK817_CODEC_AMIC_CFG1 0x28
+#define RK817_CODEC_DMIC_PGA_GAIN 0x29
+#define RK817_CODEC_DMIC_LMT1 0x2a
+#define RK817_CODEC_DMIC_LMT2 0x2b
+#define RK817_CODEC_DMIC_NG1 0x2c
+#define RK817_CODEC_DMIC_NG2 0x2d
+#define RK817_CODEC_ADAC_CFG0 0x2e
+#define RK817_CODEC_ADAC_CFG1 0x2f
+#define RK817_CODEC_DDAC_POPD_DACST 0x30
+#define RK817_CODEC_DDAC_VOLL 0x31
+#define RK817_CODEC_DDAC_VOLR 0x32
+#define RK817_CODEC_DDAC_SR_LMT0 0x35
+#define RK817_CODEC_DDAC_LMT1 0x36
+#define RK817_CODEC_DDAC_LMT2 0x37
+#define RK817_CODEC_DDAC_MUTE_MIXCTL 0x38
+#define RK817_CODEC_DDAC_RVOLL 0x39
+#define RK817_CODEC_DDAC_RVOLR 0x3a
+#define RK817_CODEC_AHP_ANTI0 0x3b
+#define RK817_CODEC_AHP_ANTI1 0x3c
+#define RK817_CODEC_AHP_CFG0 0x3d
+#define RK817_CODEC_AHP_CFG1 0x3e
+#define RK817_CODEC_AHP_CP 0x3f
+#define RK817_CODEC_ACLASSD_CFG1 0x40
+#define RK817_CODEC_ACLASSD_CFG2 0x41
+#define RK817_CODEC_APLL_CFG0 0x42
+#define RK817_CODEC_APLL_CFG1 0x43
+#define RK817_CODEC_APLL_CFG2 0x44
+#define RK817_CODEC_APLL_CFG3 0x45
+#define RK817_CODEC_APLL_CFG4 0x46
+#define RK817_CODEC_APLL_CFG5 0x47
+#define RK817_CODEC_DI2S_CKM 0x48
+#define RK817_CODEC_DI2S_RSD 0x49
+#define RK817_CODEC_DI2S_RXCR1 0x4a
+#define RK817_CODEC_DI2S_RXCR2 0x4b
+#define RK817_CODEC_DI2S_RXCMD_TSD 0x4c
+#define RK817_CODEC_DI2S_TXCR1 0x4d
+#define RK817_CODEC_DI2S_TXCR2 0x4e
+#define RK817_CODEC_DI2S_TXCR3_TXCMD 0x4f
+
+/* RK817_CODEC_DI2S_CKM */
+#define RK817_I2S_MODE_MASK (0x1 << 0)
+#define RK817_I2S_MODE_MST (0x1 << 0)
+#define RK817_I2S_MODE_SLV (0x0 << 0)
+
+/* RK817_CODEC_DDAC_MUTE_MIXCTL */
+#define DACMT_MASK (0x1 << 0)
+#define DACMT_ENABLE (0x1 << 0)
+#define DACMT_DISABLE (0x0 << 0)
+
+/* RK817_CODEC_DI2S_RXCR2 */
+#define VDW_RX_24BITS (0x17)
+#define VDW_RX_16BITS (0x0f)
+
+/* RK817_CODEC_DI2S_TXCR2 */
+#define VDW_TX_24BITS (0x17)
+#define VDW_TX_16BITS (0x0f)
+
+/* RK817_CODEC_AMIC_CFG0 */
+#define MIC_DIFF_MASK (0x1 << 7)
+#define MIC_DIFF_DIS (0x0 << 7)
+#define MIC_DIFF_EN (0x1 << 7)
+
+/* RK817 Battery Registers */
+#define RK817_GAS_GAUGE_ADC_CONFIG0 0x50
+#define RK817_GG_EN (0x1 << 7)
+#define RK817_SYS_VOL_ADC_EN (0x1 << 6)
+#define RK817_TS_ADC_EN (0x1 << 5)
+#define RK817_USB_VOL_ADC_EN (0x1 << 4)
+#define RK817_BAT_VOL_ADC_EN (0x1 << 3)
+#define RK817_BAT_CUR_ADC_EN (0x1 << 2)
+
+#define RK817_GAS_GAUGE_ADC_CONFIG1 0x55
+
+#define RK817_VOL_CUR_CALIB_UPD BIT(7)
+
+#define RK817_GAS_GAUGE_GG_CON 0x56
+#define RK817_GAS_GAUGE_GG_STS 0x57
+
+#define RK817_BAT_CON (0x1 << 4)
+#define RK817_RELAX_VOL_UPD (0x3 << 2)
+#define RK817_RELAX_STS (0x1 << 1)
+
+#define RK817_GAS_GAUGE_RELAX_THRE_H 0x58
+#define RK817_GAS_GAUGE_RELAX_THRE_L 0x59
+#define RK817_GAS_GAUGE_OCV_THRE_VOL 0x62
+#define RK817_GAS_GAUGE_OCV_VOL_H 0x63
+#define RK817_GAS_GAUGE_OCV_VOL_L 0x64
+#define RK817_GAS_GAUGE_PWRON_VOL_H 0x6b
+#define RK817_GAS_GAUGE_PWRON_VOL_L 0x6c
+#define RK817_GAS_GAUGE_PWRON_CUR_H 0x6d
+#define RK817_GAS_GAUGE_PWRON_CUR_L 0x6e
+#define RK817_GAS_GAUGE_OFF_CNT 0x6f
+#define RK817_GAS_GAUGE_Q_INIT_H3 0x70
+#define RK817_GAS_GAUGE_Q_INIT_H2 0x71
+#define RK817_GAS_GAUGE_Q_INIT_L1 0x72
+#define RK817_GAS_GAUGE_Q_INIT_L0 0x73
+#define RK817_GAS_GAUGE_Q_PRES_H3 0x74
+#define RK817_GAS_GAUGE_Q_PRES_H2 0x75
+#define RK817_GAS_GAUGE_Q_PRES_L1 0x76
+#define RK817_GAS_GAUGE_Q_PRES_L0 0x77
+#define RK817_GAS_GAUGE_BAT_VOL_H 0x78
+#define RK817_GAS_GAUGE_BAT_VOL_L 0x79
+#define RK817_GAS_GAUGE_BAT_CUR_H 0x7a
+#define RK817_GAS_GAUGE_BAT_CUR_L 0x7b
+#define RK817_GAS_GAUGE_USB_VOL_H 0x7e
+#define RK817_GAS_GAUGE_USB_VOL_L 0x7f
+#define RK817_GAS_GAUGE_SYS_VOL_H 0x80
+#define RK817_GAS_GAUGE_SYS_VOL_L 0x81
+#define RK817_GAS_GAUGE_Q_MAX_H3 0x82
+#define RK817_GAS_GAUGE_Q_MAX_H2 0x83
+#define RK817_GAS_GAUGE_Q_MAX_L1 0x84
+#define RK817_GAS_GAUGE_Q_MAX_L0 0x85
+#define RK817_GAS_GAUGE_SLEEP_CON_SAMP_CUR_H 0x8f
+#define RK817_GAS_GAUGE_SLEEP_CON_SAMP_CUR_L 0x90
+#define RK817_GAS_GAUGE_CAL_OFFSET_H 0x91
+#define RK817_GAS_GAUGE_CAL_OFFSET_L 0x92
+#define RK817_GAS_GAUGE_VCALIB0_H 0x93
+#define RK817_GAS_GAUGE_VCALIB0_L 0x94
+#define RK817_GAS_GAUGE_VCALIB1_H 0x95
+#define RK817_GAS_GAUGE_VCALIB1_L 0x96
+#define RK817_GAS_GAUGE_IOFFSET_H 0x97
+#define RK817_GAS_GAUGE_IOFFSET_L 0x98
+#define RK817_GAS_GAUGE_BAT_R1 0x9a
+#define RK817_GAS_GAUGE_BAT_R2 0x9b
+#define RK817_GAS_GAUGE_BAT_R3 0x9c
+#define RK817_GAS_GAUGE_DATA0 0x9d
+#define RK817_GAS_GAUGE_DATA1 0x9e
+#define RK817_GAS_GAUGE_DATA2 0x9f
+#define RK817_GAS_GAUGE_DATA3 0xa0
+#define RK817_GAS_GAUGE_DATA4 0xa1
+#define RK817_GAS_GAUGE_DATA5 0xa2
+#define RK817_GAS_GAUGE_CUR_ADC_K0 0xb0
+
+#define RK817_POWER_EN_REG(i) (0xb1 + (i))
+#define RK817_POWER_SLP_EN_REG(i) (0xb5 + (i))
+
+#define RK817_POWER_CONFIG (0xb9)
+
+#define RK817_BUCK_CONFIG_REG(i) (0xba + (i) * 3)
+
+#define RK817_BUCK1_ON_VSEL_REG 0xBB
+#define RK817_BUCK1_SLP_VSEL_REG 0xBC
+
+#define RK817_BUCK2_CONFIG_REG 0xBD
+#define RK817_BUCK2_ON_VSEL_REG 0xBE
+#define RK817_BUCK2_SLP_VSEL_REG 0xBF
+
+#define RK817_BUCK3_CONFIG_REG 0xC0
+#define RK817_BUCK3_ON_VSEL_REG 0xC1
+#define RK817_BUCK3_SLP_VSEL_REG 0xC2
+
+#define RK817_BUCK4_CONFIG_REG 0xC3
+#define RK817_BUCK4_ON_VSEL_REG 0xC4
+#define RK817_BUCK4_SLP_VSEL_REG 0xC5
+
+#define RK817_LDO_ON_VSEL_REG(idx) (0xcc + (idx) * 2)
+#define RK817_BOOST_OTG_CFG (0xde)
+
+#define RK817_PMIC_CHRG_OUT 0xe4
+#define RK817_CHRG_VOL_SEL (0x07 << 4)
+#define RK817_CHRG_CUR_SEL (0x07 << 0)
+
+#define RK817_PMIC_CHRG_IN 0xe5
+#define RK817_USB_VLIM_EN (0x01 << 7)
+#define RK817_USB_VLIM_SEL (0x07 << 4)
+#define RK817_USB_ILIM_EN (0x01 << 3)
+#define RK817_USB_ILIM_SEL (0x07 << 0)
+#define RK817_PMIC_CHRG_TERM 0xe6
+#define RK817_CHRG_TERM_ANA_DIG (0x01 << 2)
+#define RK817_CHRG_TERM_ANA_SEL (0x03 << 0)
+#define RK817_CHRG_EN (0x01 << 6)
+
+#define RK817_PMIC_CHRG_STS 0xeb
+#define RK817_BAT_EXS BIT(7)
+#define RK817_CHG_STS (0x07 << 4)
+
+#define RK817_ID_MSB 0xed
+#define RK817_ID_LSB 0xee
+
+#define RK817_SYS_STS 0xf0
+#define RK817_PLUG_IN_STS (0x1 << 6)
+
+#define RK817_SYS_CFG(i) (0xf1 + (i))
+
+#define RK817_ON_SOURCE_REG 0xf5
+#define RK817_OFF_SOURCE_REG 0xf6
+
+/* INTERRUPT REGISTER */
+#define RK817_INT_STS_REG0 0xf8
+#define RK817_INT_STS_MSK_REG0 0xf9
+#define RK817_INT_STS_REG1 0xfa
+#define RK817_INT_STS_MSK_REG1 0xfb
+#define RK817_INT_STS_REG2 0xfc
+#define RK817_INT_STS_MSK_REG2 0xfd
+#define RK817_GPIO_INT_CFG 0xfe
+
+/* IRQ Definitions */
+#define RK817_IRQ_PWRON_FALL 0
+#define RK817_IRQ_PWRON_RISE 1
+#define RK817_IRQ_PWRON 2
+#define RK817_IRQ_PWMON_LP 3
+#define RK817_IRQ_HOTDIE 4
+#define RK817_IRQ_RTC_ALARM 5
+#define RK817_IRQ_RTC_PERIOD 6
+#define RK817_IRQ_VB_LO 7
+#define RK817_IRQ_PLUG_IN 8
+#define RK817_IRQ_PLUG_OUT 9
+#define RK817_IRQ_CHRG_TERM 10
+#define RK817_IRQ_CHRG_TIME 11
+#define RK817_IRQ_CHRG_TS 12
+#define RK817_IRQ_USB_OV 13
+#define RK817_IRQ_CHRG_IN_CLMP 14
+#define RK817_IRQ_BAT_DIS_ILIM 15
+#define RK817_IRQ_GATE_GPIO 16
+#define RK817_IRQ_TS_GPIO 17
+#define RK817_IRQ_CODEC_PD 18
+#define RK817_IRQ_CODEC_PO 19
+#define RK817_IRQ_CLASSD_MUTE_DONE 20
+#define RK817_IRQ_CLASSD_OCP 21
+#define RK817_IRQ_BAT_OVP 22
+#define RK817_IRQ_CHRG_BAT_HI 23
+#define RK817_IRQ_END (RK817_IRQ_CHRG_BAT_HI + 1)
+
+/*
+ * rtc_ctrl 0xd
+ * same as 808, except bit4
+ */
+#define RK817_RTC_CTRL_RSV4 BIT(4)
+
+/* power config 0xb9 */
+#define RK817_BUCK3_FB_RES_MSK BIT(6)
+#define RK817_BUCK3_FB_RES_INTER BIT(6)
+#define RK817_BUCK3_FB_RES_EXT 0
+
+/* buck config 0xba */
+#define RK817_RAMP_RATE_OFFSET 6
+#define RK817_RAMP_RATE_MASK (0x3 << RK817_RAMP_RATE_OFFSET)
+#define RK817_RAMP_RATE_3MV_PER_US (0x0 << RK817_RAMP_RATE_OFFSET)
+#define RK817_RAMP_RATE_6_3MV_PER_US (0x1 << RK817_RAMP_RATE_OFFSET)
+#define RK817_RAMP_RATE_12_5MV_PER_US (0x2 << RK817_RAMP_RATE_OFFSET)
+#define RK817_RAMP_RATE_25MV_PER_US (0x3 << RK817_RAMP_RATE_OFFSET)
+
+/* sys_cfg1 0xf2 */
+#define RK817_HOTDIE_TEMP_MSK (0x3 << 4)
+#define RK817_HOTDIE_85 (0x0 << 4)
+#define RK817_HOTDIE_95 (0x1 << 4)
+#define RK817_HOTDIE_105 (0x2 << 4)
+#define RK817_HOTDIE_115 (0x3 << 4)
+
+#define RK817_TSD_TEMP_MSK BIT(6)
+#define RK817_TSD_140 0
+#define RK817_TSD_160 BIT(6)
+
+#define RK817_CLK32KOUT2_EN BIT(7)
+
+/* sys_cfg3 0xf4 */
+#define RK817_SLPPIN_FUNC_MSK (0x3 << 3)
+#define SLPPIN_NULL_FUN (0x0 << 3)
+#define SLPPIN_SLP_FUN (0x1 << 3)
+#define SLPPIN_DN_FUN (0x2 << 3)
+#define SLPPIN_RST_FUN (0x3 << 3)
+
+#define RK817_RST_FUNC_MSK (0x3 << 6)
+#define RK817_RST_FUNC_SFT (6)
+#define RK817_RST_FUNC_CNT (3)
+#define RK817_RST_FUNC_DEV (0) /* reset the dev */
+#define RK817_RST_FUNC_REG (0x1 << 6) /* reset the reg only */
+
+#define RK817_SLPPOL_MSK BIT(5)
+#define RK817_SLPPOL_H BIT(5)
+#define RK817_SLPPOL_L (0)
+
+/* gpio&int 0xfe */
+#define RK817_INT_POL_MSK BIT(1)
+#define RK817_INT_POL_H BIT(1)
+#define RK817_INT_POL_L 0
+#define RK809_BUCK5_CONFIG(i) (RK817_BOOST_OTG_CFG + (i) * 1)
+
enum {
BUCK_ILMIN_50MA,
BUCK_ILMIN_100MA,
@@ -321,16 +1311,48 @@ enum {
};
enum {
+ RK805_BUCK1_2_ILMAX_2500MA,
+ RK805_BUCK1_2_ILMAX_3000MA,
+ RK805_BUCK1_2_ILMAX_3500MA,
+ RK805_BUCK1_2_ILMAX_4000MA,
+};
+
+enum {
+ RK805_BUCK3_ILMAX_1500MA,
+ RK805_BUCK3_ILMAX_2000MA,
+ RK805_BUCK3_ILMAX_2500MA,
+ RK805_BUCK3_ILMAX_3000MA,
+};
+
+enum {
+ RK805_BUCK4_ILMAX_2000MA,
+ RK805_BUCK4_ILMAX_2500MA,
+ RK805_BUCK4_ILMAX_3000MA,
+ RK805_BUCK4_ILMAX_3500MA,
+};
+
+enum {
+ RK805_ID = 0x8050,
+ RK806_ID = 0x8060,
RK808_ID = 0x0000,
- RK818_ID = 0x8181,
+ RK809_ID = 0x8090,
+ RK816_ID = 0x8160,
+ RK817_ID = 0x8170,
+ RK818_ID = 0x8180,
};
struct rk808 {
- struct i2c_client *i2c;
+ struct device *dev;
struct regmap_irq_chip_data *irq_data;
struct regmap *regmap;
long variant;
const struct regmap_config *regmap_cfg;
const struct regmap_irq_chip *regmap_irq_chip;
};
+
+void rk8xx_shutdown(struct device *dev);
+int rk8xx_probe(struct device *dev, int variant, unsigned int irq, struct regmap *regmap);
+int rk8xx_suspend(struct device *dev);
+int rk8xx_resume(struct device *dev);
+
#endif /* __LINUX_REGULATOR_RK808_H */
diff --git a/include/linux/mfd/rn5t618.h b/include/linux/mfd/rn5t618.h
index d61bc58aba8a..aacb6d51e99c 100644
--- a/include/linux/mfd/rn5t618.h
+++ b/include/linux/mfd/rn5t618.h
@@ -1,14 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* MFD core driver for Ricoh RN5T618 PMIC
*
* Copyright (C) 2014 Beniamino Galvani <b.galvani@gmail.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * version 2 as published by the Free Software Foundation.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef __LINUX_MFD_RN5T618_H
@@ -145,6 +139,17 @@
#define RN5T618_INTPOL 0x9c
#define RN5T618_INTEN 0x9d
#define RN5T618_INTMON 0x9e
+
+#define RN5T618_RTC_SECONDS 0xA0
+#define RN5T618_RTC_MDAY 0xA4
+#define RN5T618_RTC_MONTH 0xA5
+#define RN5T618_RTC_YEAR 0xA6
+#define RN5T618_RTC_ADJUST 0xA7
+#define RN5T618_RTC_ALARM_Y_SEC 0xA8
+#define RN5T618_RTC_DAL_MONTH 0xAC
+#define RN5T618_RTC_CTRL1 0xAE
+#define RN5T618_RTC_CTRL2 0xAF
+
#define RN5T618_PREVINDAC 0xb0
#define RN5T618_BATDAC 0xb1
#define RN5T618_CHGCTL1 0xb3
@@ -183,6 +188,7 @@
#define RN5T618_CHGOSCSCORESET3 0xd7
#define RN5T618_CHGOSCFREQSET1 0xd8
#define RN5T618_CHGOSCFREQSET2 0xd9
+#define RN5T618_GCHGDET 0xda
#define RN5T618_CONTROL 0xe0
#define RN5T618_SOC 0xe1
#define RN5T618_RE_CAP_H 0xe2
@@ -221,6 +227,15 @@
#define RN5T618_WATCHDOG_WDOGTIM_S 0
#define RN5T618_PWRIRQ_IR_WDOG BIT(6)
+#define RN5T618_POFFHIS_PWRON BIT(0)
+#define RN5T618_POFFHIS_TSHUT BIT(1)
+#define RN5T618_POFFHIS_VINDET BIT(2)
+#define RN5T618_POFFHIS_IODET BIT(3)
+#define RN5T618_POFFHIS_CPU BIT(4)
+#define RN5T618_POFFHIS_WDG BIT(5)
+#define RN5T618_POFFHIS_DCLIM BIT(6)
+#define RN5T618_POFFHIS_N_OE BIT(7)
+
enum {
RN5T618_DCDC1,
RN5T618_DCDC2,
@@ -248,9 +263,24 @@ enum {
RC5T619,
};
+/* RN5T618 IRQ definitions */
+enum {
+ RN5T618_IRQ_SYS = 0,
+ RN5T618_IRQ_DCDC,
+ RN5T618_IRQ_RTC,
+ RN5T618_IRQ_ADC,
+ RN5T618_IRQ_GPIO,
+ RN5T618_IRQ_CHG,
+ RN5T618_NR_IRQS,
+};
+
struct rn5t618 {
struct regmap *regmap;
+ struct device *dev;
long variant;
+
+ int irq;
+ struct regmap_irq_chip_data *irq_data;
};
#endif /* __LINUX_MFD_RN5T618_H */
diff --git a/include/linux/mfd/rohm-bd71815.h b/include/linux/mfd/rohm-bd71815.h
new file mode 100644
index 000000000000..ec6d9612bebe
--- /dev/null
+++ b/include/linux/mfd/rohm-bd71815.h
@@ -0,0 +1,562 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright 2021 ROHM Semiconductors.
+ *
+ * Author: Matti Vaittinen <matti.vaittinen@fi.rohmeurope.com>
+ *
+ * Copyright 2014 Embest Technology Co. Ltd. Inc.
+ *
+ * Author: yanglsh@embest-tech.com
+ */
+
+#ifndef _MFD_BD71815_H
+#define _MFD_BD71815_H
+
+#include <linux/regmap.h>
+
+enum {
+ BD71815_BUCK1 = 0,
+ BD71815_BUCK2,
+ BD71815_BUCK3,
+ BD71815_BUCK4,
+ BD71815_BUCK5,
+ /* General Purpose */
+ BD71815_LDO1,
+ BD71815_LDO2,
+ BD71815_LDO3,
+ /* LDOs for SD Card and SD Card Interface */
+ BD71815_LDO4,
+ BD71815_LDO5,
+ /* LDO for DDR Reference Voltage */
+ BD71815_LDODVREF,
+ /* LDO for Low-Power State Retention */
+ BD71815_LDOLPSR,
+ BD71815_WLED,
+ BD71815_REGULATOR_CNT,
+};
+
+#define BD71815_SUPPLY_STATE_ENABLED 0x1
+
+enum {
+ BD71815_REG_DEVICE = 0,
+ BD71815_REG_PWRCTRL,
+ BD71815_REG_BUCK1_MODE,
+ BD71815_REG_BUCK2_MODE,
+ BD71815_REG_BUCK3_MODE,
+ BD71815_REG_BUCK4_MODE,
+ BD71815_REG_BUCK5_MODE,
+ BD71815_REG_BUCK1_VOLT_H,
+ BD71815_REG_BUCK1_VOLT_L,
+ BD71815_REG_BUCK2_VOLT_H,
+ BD71815_REG_BUCK2_VOLT_L,
+ BD71815_REG_BUCK3_VOLT,
+ BD71815_REG_BUCK4_VOLT,
+ BD71815_REG_BUCK5_VOLT,
+ BD71815_REG_LED_CTRL,
+ BD71815_REG_LED_DIMM,
+ BD71815_REG_LDO_MODE1,
+ BD71815_REG_LDO_MODE2,
+ BD71815_REG_LDO_MODE3,
+ BD71815_REG_LDO_MODE4,
+ BD71815_REG_LDO1_VOLT,
+ BD71815_REG_LDO2_VOLT,
+ BD71815_REG_LDO3_VOLT,
+ BD71815_REG_LDO4_VOLT,
+ BD71815_REG_LDO5_VOLT_H,
+ BD71815_REG_LDO5_VOLT_L,
+ BD71815_REG_BUCK_PD_DIS,
+ BD71815_REG_LDO_PD_DIS,
+ BD71815_REG_GPO,
+ BD71815_REG_OUT32K,
+ BD71815_REG_SEC,
+ BD71815_REG_MIN,
+ BD71815_REG_HOUR,
+ BD71815_REG_WEEK,
+ BD71815_REG_DAY,
+ BD71815_REG_MONTH,
+ BD71815_REG_YEAR,
+ BD71815_REG_ALM0_SEC,
+
+ BD71815_REG_ALM1_SEC = 0x2C,
+
+ BD71815_REG_ALM0_MASK = 0x33,
+ BD71815_REG_ALM1_MASK,
+ BD71815_REG_ALM2,
+ BD71815_REG_TRIM,
+ BD71815_REG_CONF,
+ BD71815_REG_SYS_INIT,
+ BD71815_REG_CHG_STATE,
+ BD71815_REG_CHG_LAST_STATE,
+ BD71815_REG_BAT_STAT,
+ BD71815_REG_DCIN_STAT,
+ BD71815_REG_VSYS_STAT,
+ BD71815_REG_CHG_STAT,
+ BD71815_REG_CHG_WDT_STAT,
+ BD71815_REG_BAT_TEMP,
+ BD71815_REG_IGNORE_0,
+ BD71815_REG_INHIBIT_0,
+ BD71815_REG_DCIN_CLPS,
+ BD71815_REG_VSYS_REG,
+ BD71815_REG_VSYS_MAX,
+ BD71815_REG_VSYS_MIN,
+ BD71815_REG_CHG_SET1,
+ BD71815_REG_CHG_SET2,
+ BD71815_REG_CHG_WDT_PRE,
+ BD71815_REG_CHG_WDT_FST,
+ BD71815_REG_CHG_IPRE,
+ BD71815_REG_CHG_IFST,
+ BD71815_REG_CHG_IFST_TERM,
+ BD71815_REG_CHG_VPRE,
+ BD71815_REG_CHG_VBAT_1,
+ BD71815_REG_CHG_VBAT_2,
+ BD71815_REG_CHG_VBAT_3,
+ BD71815_REG_CHG_LED_1,
+ BD71815_REG_VF_TH,
+ BD71815_REG_BAT_SET_1,
+ BD71815_REG_BAT_SET_2,
+ BD71815_REG_BAT_SET_3,
+ BD71815_REG_ALM_VBAT_TH_U,
+ BD71815_REG_ALM_VBAT_TH_L,
+ BD71815_REG_ALM_DCIN_TH,
+ BD71815_REG_ALM_VSYS_TH,
+ BD71815_REG_VM_IBAT_U,
+ BD71815_REG_VM_IBAT_L,
+ BD71815_REG_VM_VBAT_U,
+ BD71815_REG_VM_VBAT_L,
+ BD71815_REG_VM_BTMP,
+ BD71815_REG_VM_VTH,
+ BD71815_REG_VM_DCIN_U,
+ BD71815_REG_VM_DCIN_L,
+ BD71815_REG_VM_VSYS,
+ BD71815_REG_VM_VF,
+ BD71815_REG_VM_OCI_PRE_U,
+ BD71815_REG_VM_OCI_PRE_L,
+ BD71815_REG_VM_OCV_PRE_U,
+ BD71815_REG_VM_OCV_PRE_L,
+ BD71815_REG_VM_OCI_PST_U,
+ BD71815_REG_VM_OCI_PST_L,
+ BD71815_REG_VM_OCV_PST_U,
+ BD71815_REG_VM_OCV_PST_L,
+ BD71815_REG_VM_SA_VBAT_U,
+ BD71815_REG_VM_SA_VBAT_L,
+ BD71815_REG_VM_SA_IBAT_U,
+ BD71815_REG_VM_SA_IBAT_L,
+ BD71815_REG_CC_CTRL,
+ BD71815_REG_CC_BATCAP1_TH_U,
+ BD71815_REG_CC_BATCAP1_TH_L,
+ BD71815_REG_CC_BATCAP2_TH_U,
+ BD71815_REG_CC_BATCAP2_TH_L,
+ BD71815_REG_CC_BATCAP3_TH_U,
+ BD71815_REG_CC_BATCAP3_TH_L,
+ BD71815_REG_CC_STAT,
+ BD71815_REG_CC_CCNTD_3,
+ BD71815_REG_CC_CCNTD_2,
+ BD71815_REG_CC_CCNTD_1,
+ BD71815_REG_CC_CCNTD_0,
+ BD71815_REG_CC_CURCD_U,
+ BD71815_REG_CC_CURCD_L,
+ BD71815_REG_VM_OCUR_THR_1,
+ BD71815_REG_VM_OCUR_DUR_1,
+ BD71815_REG_VM_OCUR_THR_2,
+ BD71815_REG_VM_OCUR_DUR_2,
+ BD71815_REG_VM_OCUR_THR_3,
+ BD71815_REG_VM_OCUR_DUR_3,
+ BD71815_REG_VM_OCUR_MON,
+ BD71815_REG_VM_BTMP_OV_THR,
+ BD71815_REG_VM_BTMP_OV_DUR,
+ BD71815_REG_VM_BTMP_LO_THR,
+ BD71815_REG_VM_BTMP_LO_DUR,
+ BD71815_REG_VM_BTMP_MON,
+ BD71815_REG_INT_EN_01,
+
+ BD71815_REG_INT_EN_11 = 0x95,
+ BD71815_REG_INT_EN_12,
+ BD71815_REG_INT_STAT,
+ BD71815_REG_INT_STAT_01,
+ BD71815_REG_INT_STAT_02,
+ BD71815_REG_INT_STAT_03,
+ BD71815_REG_INT_STAT_04,
+ BD71815_REG_INT_STAT_05,
+ BD71815_REG_INT_STAT_06,
+ BD71815_REG_INT_STAT_07,
+ BD71815_REG_INT_STAT_08,
+ BD71815_REG_INT_STAT_09,
+ BD71815_REG_INT_STAT_10,
+ BD71815_REG_INT_STAT_11,
+ BD71815_REG_INT_STAT_12,
+ BD71815_REG_INT_UPDATE,
+
+ BD71815_REG_VM_VSYS_U = 0xC0,
+ BD71815_REG_VM_VSYS_L,
+ BD71815_REG_VM_SA_VSYS_U,
+ BD71815_REG_VM_SA_VSYS_L,
+
+ BD71815_REG_VM_SA_IBAT_MIN_U = 0xD0,
+ BD71815_REG_VM_SA_IBAT_MIN_L,
+ BD71815_REG_VM_SA_IBAT_MAX_U,
+ BD71815_REG_VM_SA_IBAT_MAX_L,
+ BD71815_REG_VM_SA_VBAT_MIN_U,
+ BD71815_REG_VM_SA_VBAT_MIN_L,
+ BD71815_REG_VM_SA_VBAT_MAX_U,
+ BD71815_REG_VM_SA_VBAT_MAX_L,
+ BD71815_REG_VM_SA_VSYS_MIN_U,
+ BD71815_REG_VM_SA_VSYS_MIN_L,
+ BD71815_REG_VM_SA_VSYS_MAX_U,
+ BD71815_REG_VM_SA_VSYS_MAX_L,
+ BD71815_REG_VM_SA_MINMAX_CLR,
+
+ BD71815_REG_REX_CCNTD_3 = 0xE0,
+ BD71815_REG_REX_CCNTD_2,
+ BD71815_REG_REX_CCNTD_1,
+ BD71815_REG_REX_CCNTD_0,
+ BD71815_REG_REX_SA_VBAT_U,
+ BD71815_REG_REX_SA_VBAT_L,
+ BD71815_REG_REX_CTRL_1,
+ BD71815_REG_REX_CTRL_2,
+ BD71815_REG_FULL_CCNTD_3,
+ BD71815_REG_FULL_CCNTD_2,
+ BD71815_REG_FULL_CCNTD_1,
+ BD71815_REG_FULL_CCNTD_0,
+ BD71815_REG_FULL_CTRL,
+
+ BD71815_REG_CCNTD_CHG_3 = 0xF0,
+ BD71815_REG_CCNTD_CHG_2,
+
+ BD71815_REG_TEST_MODE = 0xFE,
+ BD71815_MAX_REGISTER,
+};
+
+/* BD71815_REG_BUCK1_MODE bits */
+#define BD71815_BUCK_RAMPRATE_MASK 0xC0
+#define BD71815_BUCK_RAMPRATE_10P00MV 0x0
+#define BD71815_BUCK_RAMPRATE_5P00MV 0x01
+#define BD71815_BUCK_RAMPRATE_2P50MV 0x02
+#define BD71815_BUCK_RAMPRATE_1P25MV 0x03
+
+#define BD71815_BUCK_PWM_FIXED BIT(4)
+#define BD71815_BUCK_SNVS_ON BIT(3)
+#define BD71815_BUCK_RUN_ON BIT(2)
+#define BD71815_BUCK_LPSR_ON BIT(1)
+#define BD71815_BUCK_SUSP_ON BIT(0)
+
+/* BD71815_REG_BUCK1_VOLT_H bits */
+#define BD71815_BUCK_DVSSEL BIT(7)
+#define BD71815_BUCK_STBY_DVS BIT(6)
+#define BD71815_VOLT_MASK 0x3F
+#define BD71815_BUCK1_H_DEFAULT 0x14
+#define BD71815_BUCK1_L_DEFAULT 0x14
+
+/* BD71815_REG_BUCK2_VOLT_H bits */
+#define BD71815_BUCK2_H_DEFAULT 0x14
+#define BD71815_BUCK2_L_DEFAULT 0x14
+
+/* WLED output */
+/* current register mask */
+#define LED_DIMM_MASK 0x3f
+/* LED enable bits at LED_CTRL reg */
+#define LED_CHGDONE_EN BIT(4)
+#define LED_RUN_ON BIT(2)
+#define LED_LPSR_ON BIT(1)
+#define LED_SUSP_ON BIT(0)
+
+/* BD71815_REG_LDO1_CTRL bits */
+#define LDO1_EN BIT(0)
+#define LDO2_EN BIT(1)
+#define LDO3_EN BIT(2)
+#define DVREF_EN BIT(3)
+#define VOSNVS_SW_EN BIT(4)
+
+/* LDO_MODE1_register */
+#define LDO1_SNVS_ON BIT(7)
+#define LDO1_RUN_ON BIT(6)
+#define LDO1_LPSR_ON BIT(5)
+#define LDO1_SUSP_ON BIT(4)
+/* set => register control, unset => GPIO control */
+#define LDO4_MODE_MASK BIT(3)
+#define LDO4_MODE_I2C BIT(3)
+#define LDO4_MODE_GPIO 0
+/* set => register control, unset => start when DCIN connected */
+#define LDO3_MODE_MASK BIT(2)
+#define LDO3_MODE_I2C BIT(2)
+#define LDO3_MODE_DCIN 0
+
+/* LDO_MODE2 register */
+#define LDO3_SNVS_ON BIT(7)
+#define LDO3_RUN_ON BIT(6)
+#define LDO3_LPSR_ON BIT(5)
+#define LDO3_SUSP_ON BIT(4)
+#define LDO2_SNVS_ON BIT(3)
+#define LDO2_RUN_ON BIT(2)
+#define LDO2_LPSR_ON BIT(1)
+#define LDO2_SUSP_ON BIT(0)
+
+
+/* LDO_MODE3 register */
+#define LDO5_SNVS_ON BIT(7)
+#define LDO5_RUN_ON BIT(6)
+#define LDO5_LPSR_ON BIT(5)
+#define LDO5_SUSP_ON BIT(4)
+#define LDO4_SNVS_ON BIT(3)
+#define LDO4_RUN_ON BIT(2)
+#define LDO4_LPSR_ON BIT(1)
+#define LDO4_SUSP_ON BIT(0)
+
+/* LDO_MODE4 register */
+#define DVREF_SNVS_ON BIT(7)
+#define DVREF_RUN_ON BIT(6)
+#define DVREF_LPSR_ON BIT(5)
+#define DVREF_SUSP_ON BIT(4)
+#define LDO_LPSR_SNVS_ON BIT(3)
+#define LDO_LPSR_RUN_ON BIT(2)
+#define LDO_LPSR_LPSR_ON BIT(1)
+#define LDO_LPSR_SUSP_ON BIT(0)
+
+/* BD71815_REG_OUT32K bits */
+#define OUT32K_EN BIT(0)
+#define OUT32K_MODE BIT(1)
+#define OUT32K_MODE_CMOS BIT(1)
+#define OUT32K_MODE_OPEN_DRAIN 0
+
+/* BD71815_REG_BAT_STAT bits */
+#define BAT_DET BIT(5)
+#define BAT_DET_OFFSET 5
+#define BAT_DET_DONE BIT(4)
+#define VBAT_OV BIT(3)
+#define DBAT_DET BIT(0)
+
+/* BD71815_REG_VBUS_STAT bits */
+#define VBUS_DET BIT(0)
+
+#define BD71815_REG_RTC_START BD71815_REG_SEC
+#define BD71815_REG_RTC_ALM_START BD71815_REG_ALM0_SEC
+
+/* BD71815_REG_ALM0_MASK bits */
+#define A0_ONESEC BIT(7)
+
+/* BD71815_REG_INT_EN_00 bits */
+#define ALMALE BIT(0)
+
+/* BD71815_REG_INT_STAT_03 bits */
+#define DCIN_MON_DET BIT(1)
+#define DCIN_MON_RES BIT(0)
+#define POWERON_LONG BIT(2)
+#define POWERON_MID BIT(3)
+#define POWERON_SHORT BIT(4)
+#define POWERON_PRESS BIT(5)
+
+/* BD71805_REG_INT_STAT_08 bits */
+#define VBAT_MON_DET BIT(1)
+#define VBAT_MON_RES BIT(0)
+
+/* BD71805_REG_INT_STAT_11 bits */
+#define INT_STAT_11_VF_DET BIT(7)
+#define INT_STAT_11_VF_RES BIT(6)
+#define INT_STAT_11_VF125_DET BIT(5)
+#define INT_STAT_11_VF125_RES BIT(4)
+#define INT_STAT_11_OVTMP_DET BIT(3)
+#define INT_STAT_11_OVTMP_RES BIT(2)
+#define INT_STAT_11_LOTMP_DET BIT(1)
+#define INT_STAT_11_LOTMP_RES BIT(0)
+
+#define VBAT_MON_DET BIT(1)
+#define VBAT_MON_RES BIT(0)
+
+/* BD71815_REG_PWRCTRL bits */
+#define RESTARTEN BIT(0)
+
+/* BD71815_REG_GPO bits */
+#define READY_FORCE_LOW BIT(2)
+#define BD71815_GPIO_DRIVE_MASK BIT(4)
+#define BD71815_GPIO_OPEN_DRAIN 0
+#define BD71815_GPIO_CMOS BIT(4)
+
+/* BD71815 interrupt masks */
+enum {
+ BD71815_INT_EN_01_BUCKAST_MASK = 0x0F,
+ BD71815_INT_EN_02_DCINAST_MASK = 0x3E,
+ BD71815_INT_EN_03_DCINAST_MASK = 0x3F,
+ BD71815_INT_EN_04_VSYSAST_MASK = 0xCF,
+ BD71815_INT_EN_05_CHGAST_MASK = 0xFC,
+ BD71815_INT_EN_06_BATAST_MASK = 0xF3,
+ BD71815_INT_EN_07_BMONAST_MASK = 0xFE,
+ BD71815_INT_EN_08_BMONAST_MASK = 0x03,
+ BD71815_INT_EN_09_BMONAST_MASK = 0x07,
+ BD71815_INT_EN_10_BMONAST_MASK = 0x3F,
+ BD71815_INT_EN_11_TMPAST_MASK = 0xFF,
+ BD71815_INT_EN_12_ALMAST_MASK = 0x07,
+};
+/* BD71815 interrupt irqs */
+enum {
+ /* BUCK reg interrupts */
+ BD71815_INT_BUCK1_OCP,
+ BD71815_INT_BUCK2_OCP,
+ BD71815_INT_BUCK3_OCP,
+ BD71815_INT_BUCK4_OCP,
+ BD71815_INT_BUCK5_OCP,
+ BD71815_INT_LED_OVP,
+ BD71815_INT_LED_OCP,
+ BD71815_INT_LED_SCP,
+ /* DCIN1 interrupts */
+ BD71815_INT_DCIN_RMV,
+ BD71815_INT_CLPS_OUT,
+ BD71815_INT_CLPS_IN,
+ BD71815_INT_DCIN_OVP_RES,
+ BD71815_INT_DCIN_OVP_DET,
+ /* DCIN2 interrupts */
+ BD71815_INT_DCIN_MON_RES,
+ BD71815_INT_DCIN_MON_DET,
+ BD71815_INT_WDOG,
+ /* Vsys INT_STAT_04 */
+ BD71815_INT_VSYS_UV_RES,
+ BD71815_INT_VSYS_UV_DET,
+ BD71815_INT_VSYS_LOW_RES,
+ BD71815_INT_VSYS_LOW_DET,
+ BD71815_INT_VSYS_MON_RES,
+ BD71815_INT_VSYS_MON_DET,
+ /* Charger INT_STAT_05 */
+ BD71815_INT_CHG_WDG_TEMP,
+ BD71815_INT_CHG_WDG_TIME,
+ BD71815_INT_CHG_RECHARGE_RES,
+ BD71815_INT_CHG_RECHARGE_DET,
+ BD71815_INT_CHG_RANGED_TEMP_TRANSITION,
+ BD71815_INT_CHG_STATE_TRANSITION,
+ /* Battery INT_STAT_06 */
+ BD71815_INT_BAT_TEMP_NORMAL,
+ BD71815_INT_BAT_TEMP_ERANGE,
+ BD71815_INT_BAT_REMOVED,
+ BD71815_INT_BAT_DETECTED,
+ BD71815_INT_THERM_REMOVED,
+ BD71815_INT_THERM_DETECTED,
+ /* Battery Mon 1 INT_STAT_07 */
+ BD71815_INT_BAT_DEAD,
+ BD71815_INT_BAT_SHORTC_RES,
+ BD71815_INT_BAT_SHORTC_DET,
+ BD71815_INT_BAT_LOW_VOLT_RES,
+ BD71815_INT_BAT_LOW_VOLT_DET,
+ BD71815_INT_BAT_OVER_VOLT_RES,
+ BD71815_INT_BAT_OVER_VOLT_DET,
+ /* Battery Mon 2 INT_STAT_08 */
+ BD71815_INT_BAT_MON_RES,
+ BD71815_INT_BAT_MON_DET,
+ /* Battery Mon 3 (Coulomb counter) INT_STAT_09 */
+ BD71815_INT_BAT_CC_MON1,
+ BD71815_INT_BAT_CC_MON2,
+ BD71815_INT_BAT_CC_MON3,
+ /* Battery Mon 4 INT_STAT_10 */
+ BD71815_INT_BAT_OVER_CURR_1_RES,
+ BD71815_INT_BAT_OVER_CURR_1_DET,
+ BD71815_INT_BAT_OVER_CURR_2_RES,
+ BD71815_INT_BAT_OVER_CURR_2_DET,
+ BD71815_INT_BAT_OVER_CURR_3_RES,
+ BD71815_INT_BAT_OVER_CURR_3_DET,
+ /* Temperature INT_STAT_11 */
+ BD71815_INT_TEMP_BAT_LOW_RES,
+ BD71815_INT_TEMP_BAT_LOW_DET,
+ BD71815_INT_TEMP_BAT_HI_RES,
+ BD71815_INT_TEMP_BAT_HI_DET,
+ BD71815_INT_TEMP_CHIP_OVER_125_RES,
+ BD71815_INT_TEMP_CHIP_OVER_125_DET,
+ BD71815_INT_TEMP_CHIP_OVER_VF_RES,
+ BD71815_INT_TEMP_CHIP_OVER_VF_DET,
+ /* RTC Alarm INT_STAT_12 */
+ BD71815_INT_RTC0,
+ BD71815_INT_RTC1,
+ BD71815_INT_RTC2,
+};
+
+#define BD71815_INT_BUCK1_OCP_MASK BIT(0)
+#define BD71815_INT_BUCK2_OCP_MASK BIT(1)
+#define BD71815_INT_BUCK3_OCP_MASK BIT(2)
+#define BD71815_INT_BUCK4_OCP_MASK BIT(3)
+#define BD71815_INT_BUCK5_OCP_MASK BIT(4)
+#define BD71815_INT_LED_OVP_MASK BIT(5)
+#define BD71815_INT_LED_OCP_MASK BIT(6)
+#define BD71815_INT_LED_SCP_MASK BIT(7)
+
+#define BD71815_INT_DCIN_RMV_MASK BIT(1)
+#define BD71815_INT_CLPS_OUT_MASK BIT(2)
+#define BD71815_INT_CLPS_IN_MASK BIT(3)
+#define BD71815_INT_DCIN_OVP_RES_MASK BIT(4)
+#define BD71815_INT_DCIN_OVP_DET_MASK BIT(5)
+
+#define BD71815_INT_DCIN_MON_RES_MASK BIT(0)
+#define BD71815_INT_DCIN_MON_DET_MASK BIT(1)
+#define BD71815_INT_WDOG_MASK BIT(6)
+
+#define BD71815_INT_VSYS_UV_RES_MASK BIT(0)
+#define BD71815_INT_VSYS_UV_DET_MASK BIT(1)
+#define BD71815_INT_VSYS_LOW_RES_MASK BIT(2)
+#define BD71815_INT_VSYS_LOW_DET_MASK BIT(3)
+#define BD71815_INT_VSYS_MON_RES_MASK BIT(6)
+#define BD71815_INT_VSYS_MON_DET_MASK BIT(7)
+
+#define BD71815_INT_CHG_WDG_TEMP_MASK BIT(2)
+#define BD71815_INT_CHG_WDG_TIME_MASK BIT(3)
+#define BD71815_INT_CHG_RECHARGE_RES_MASK BIT(4)
+#define BD71815_INT_CHG_RECHARGE_DET_MASK BIT(5)
+#define BD71815_INT_CHG_RANGED_TEMP_TRANSITION_MASK BIT(6)
+#define BD71815_INT_CHG_STATE_TRANSITION_MASK BIT(7)
+
+#define BD71815_INT_BAT_TEMP_NORMAL_MASK BIT(0)
+#define BD71815_INT_BAT_TEMP_ERANGE_MASK BIT(1)
+#define BD71815_INT_BAT_REMOVED_MASK BIT(4)
+#define BD71815_INT_BAT_DETECTED_MASK BIT(5)
+#define BD71815_INT_THERM_REMOVED_MASK BIT(6)
+#define BD71815_INT_THERM_DETECTED_MASK BIT(7)
+
+#define BD71815_INT_BAT_DEAD_MASK BIT(1)
+#define BD71815_INT_BAT_SHORTC_RES_MASK BIT(2)
+#define BD71815_INT_BAT_SHORTC_DET_MASK BIT(3)
+#define BD71815_INT_BAT_LOW_VOLT_RES_MASK BIT(4)
+#define BD71815_INT_BAT_LOW_VOLT_DET_MASK BIT(5)
+#define BD71815_INT_BAT_OVER_VOLT_RES_MASK BIT(6)
+#define BD71815_INT_BAT_OVER_VOLT_DET_MASK BIT(7)
+
+#define BD71815_INT_BAT_MON_RES_MASK BIT(0)
+#define BD71815_INT_BAT_MON_DET_MASK BIT(1)
+
+#define BD71815_INT_BAT_CC_MON1_MASK BIT(0)
+#define BD71815_INT_BAT_CC_MON2_MASK BIT(1)
+#define BD71815_INT_BAT_CC_MON3_MASK BIT(2)
+
+#define BD71815_INT_BAT_OVER_CURR_1_RES_MASK BIT(0)
+#define BD71815_INT_BAT_OVER_CURR_1_DET_MASK BIT(1)
+#define BD71815_INT_BAT_OVER_CURR_2_RES_MASK BIT(2)
+#define BD71815_INT_BAT_OVER_CURR_2_DET_MASK BIT(3)
+#define BD71815_INT_BAT_OVER_CURR_3_RES_MASK BIT(4)
+#define BD71815_INT_BAT_OVER_CURR_3_DET_MASK BIT(5)
+
+#define BD71815_INT_TEMP_BAT_LOW_RES_MASK BIT(0)
+#define BD71815_INT_TEMP_BAT_LOW_DET_MASK BIT(1)
+#define BD71815_INT_TEMP_BAT_HI_RES_MASK BIT(2)
+#define BD71815_INT_TEMP_BAT_HI_DET_MASK BIT(3)
+#define BD71815_INT_TEMP_CHIP_OVER_125_RES_MASK BIT(4)
+#define BD71815_INT_TEMP_CHIP_OVER_125_DET_MASK BIT(5)
+#define BD71815_INT_TEMP_CHIP_OVER_VF_RES_MASK BIT(6)
+#define BD71815_INT_TEMP_CHIP_OVER_VF_DET_MASK BIT(7)
+
+#define BD71815_INT_RTC0_MASK BIT(0)
+#define BD71815_INT_RTC1_MASK BIT(1)
+#define BD71815_INT_RTC2_MASK BIT(2)
+
+/* BD71815_REG_CC_CTRL bits */
+#define CCNTRST 0x80
+#define CCNTENB 0x40
+#define CCCALIB 0x20
+
+/* BD71815_REG_CC_CURCD */
+#define CURDIR_Discharging 0x8000
+
+/* BD71815_REG_VM_SA_IBAT */
+#define IBAT_SA_DIR_Discharging 0x8000
+
+/* BD71815_REG_REX_CTRL_1 bits */
+#define REX_CLR BIT(4)
+
+/* BD71815_REG_REX_CTRL_1 bits */
+#define REX_PMU_STATE_MASK BIT(2)
+
+/* BD71815_REG_LED_CTRL bits */
+#define CHGDONE_LED_EN BIT(4)
+
+#endif /* __LINUX_MFD_BD71815_H */
diff --git a/include/linux/mfd/rohm-bd71828.h b/include/linux/mfd/rohm-bd71828.h
new file mode 100644
index 000000000000..73a71ef69152
--- /dev/null
+++ b/include/linux/mfd/rohm-bd71828.h
@@ -0,0 +1,490 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/* Copyright (C) 2019 ROHM Semiconductors */
+
+#ifndef __LINUX_MFD_BD71828_H__
+#define __LINUX_MFD_BD71828_H__
+
+#include <linux/bits.h>
+#include <linux/mfd/rohm-generic.h>
+#include <linux/mfd/rohm-shared.h>
+
+/* Regulator IDs */
+enum {
+ BD71828_BUCK1,
+ BD71828_BUCK2,
+ BD71828_BUCK3,
+ BD71828_BUCK4,
+ BD71828_BUCK5,
+ BD71828_BUCK6,
+ BD71828_BUCK7,
+ BD71828_LDO1,
+ BD71828_LDO2,
+ BD71828_LDO3,
+ BD71828_LDO4,
+ BD71828_LDO5,
+ BD71828_LDO6,
+ BD71828_LDO_SNVS,
+ BD71828_REGULATOR_AMOUNT,
+};
+
+#define BD71828_BUCK1267_VOLTS 0x100
+#define BD71828_BUCK3_VOLTS 0x20
+#define BD71828_BUCK4_VOLTS 0x40
+#define BD71828_BUCK5_VOLTS 0x20
+#define BD71828_LDO_VOLTS 0x40
+/* LDO6 is fixed 1.8V voltage */
+#define BD71828_LDO_6_VOLTAGE 1800000
+
+/* Registers and masks*/
+
+/* MODE control */
+#define BD71828_REG_PS_CTRL_1 0x04
+#define BD71828_REG_PS_CTRL_2 0x05
+#define BD71828_REG_PS_CTRL_3 0x06
+
+#define BD71828_MASK_STATE_HBNT BIT(1)
+
+#define BD71828_MASK_RUN_LVL_CTRL 0x30
+
+/* Regulator control masks */
+
+#define BD71828_MASK_RAMP_DELAY 0x6
+
+#define BD71828_MASK_RUN_EN 0x08
+#define BD71828_MASK_SUSP_EN 0x04
+#define BD71828_MASK_IDLE_EN 0x02
+#define BD71828_MASK_LPSR_EN 0x01
+
+#define BD71828_MASK_RUN0_EN 0x01
+#define BD71828_MASK_RUN1_EN 0x02
+#define BD71828_MASK_RUN2_EN 0x04
+#define BD71828_MASK_RUN3_EN 0x08
+
+#define BD71828_MASK_DVS_BUCK1_CTRL 0x10
+#define BD71828_DVS_BUCK1_CTRL_I2C 0
+#define BD71828_DVS_BUCK1_USE_RUNLVL 0x10
+
+#define BD71828_MASK_DVS_BUCK2_CTRL 0x20
+#define BD71828_DVS_BUCK2_CTRL_I2C 0
+#define BD71828_DVS_BUCK2_USE_RUNLVL 0x20
+
+#define BD71828_MASK_DVS_BUCK6_CTRL 0x40
+#define BD71828_DVS_BUCK6_CTRL_I2C 0
+#define BD71828_DVS_BUCK6_USE_RUNLVL 0x40
+
+#define BD71828_MASK_DVS_BUCK7_CTRL 0x80
+#define BD71828_DVS_BUCK7_CTRL_I2C 0
+#define BD71828_DVS_BUCK7_USE_RUNLVL 0x80
+
+#define BD71828_MASK_BUCK1267_VOLT 0xff
+#define BD71828_MASK_BUCK3_VOLT 0x1f
+#define BD71828_MASK_BUCK4_VOLT 0x3f
+#define BD71828_MASK_BUCK5_VOLT 0x1f
+#define BD71828_MASK_LDO_VOLT 0x3f
+
+/* Regulator control regs */
+#define BD71828_REG_BUCK1_EN 0x08
+#define BD71828_REG_BUCK1_CTRL 0x09
+#define BD71828_REG_BUCK1_MODE 0x0a
+#define BD71828_REG_BUCK1_IDLE_VOLT 0x0b
+#define BD71828_REG_BUCK1_SUSP_VOLT 0x0c
+#define BD71828_REG_BUCK1_VOLT 0x0d
+
+#define BD71828_REG_BUCK2_EN 0x12
+#define BD71828_REG_BUCK2_CTRL 0x13
+#define BD71828_REG_BUCK2_MODE 0x14
+#define BD71828_REG_BUCK2_IDLE_VOLT 0x15
+#define BD71828_REG_BUCK2_SUSP_VOLT 0x16
+#define BD71828_REG_BUCK2_VOLT 0x17
+
+#define BD71828_REG_BUCK3_EN 0x1c
+#define BD71828_REG_BUCK3_MODE 0x1d
+#define BD71828_REG_BUCK3_VOLT 0x1e
+
+#define BD71828_REG_BUCK4_EN 0x1f
+#define BD71828_REG_BUCK4_MODE 0x20
+#define BD71828_REG_BUCK4_VOLT 0x21
+
+#define BD71828_REG_BUCK5_EN 0x22
+#define BD71828_REG_BUCK5_MODE 0x23
+#define BD71828_REG_BUCK5_VOLT 0x24
+
+#define BD71828_REG_BUCK6_EN 0x25
+#define BD71828_REG_BUCK6_CTRL 0x26
+#define BD71828_REG_BUCK6_MODE 0x27
+#define BD71828_REG_BUCK6_IDLE_VOLT 0x28
+#define BD71828_REG_BUCK6_SUSP_VOLT 0x29
+#define BD71828_REG_BUCK6_VOLT 0x2a
+
+#define BD71828_REG_BUCK7_EN 0x2f
+#define BD71828_REG_BUCK7_CTRL 0x30
+#define BD71828_REG_BUCK7_MODE 0x31
+#define BD71828_REG_BUCK7_IDLE_VOLT 0x32
+#define BD71828_REG_BUCK7_SUSP_VOLT 0x33
+#define BD71828_REG_BUCK7_VOLT 0x34
+
+#define BD71828_REG_LDO1_EN 0x39
+#define BD71828_REG_LDO1_VOLT 0x3a
+#define BD71828_REG_LDO2_EN 0x3b
+#define BD71828_REG_LDO2_VOLT 0x3c
+#define BD71828_REG_LDO3_EN 0x3d
+#define BD71828_REG_LDO3_VOLT 0x3e
+#define BD71828_REG_LDO4_EN 0x3f
+#define BD71828_REG_LDO4_VOLT 0x40
+#define BD71828_REG_LDO5_EN 0x41
+#define BD71828_REG_LDO5_VOLT 0x43
+#define BD71828_REG_LDO5_VOLT_OPT 0x42
+#define BD71828_REG_LDO6_EN 0x44
+#define BD71828_REG_LDO7_EN 0x45
+#define BD71828_REG_LDO7_VOLT 0x46
+
+/* GPIO */
+
+#define BD71828_GPIO_DRIVE_MASK 0x2
+#define BD71828_GPIO_OPEN_DRAIN 0x0
+#define BD71828_GPIO_PUSH_PULL 0x2
+#define BD71828_GPIO_OUT_HI 0x1
+#define BD71828_GPIO_OUT_LO 0x0
+#define BD71828_GPIO_OUT_MASK 0x1
+
+#define BD71828_REG_GPIO_CTRL1 0x47
+#define BD71828_REG_GPIO_CTRL2 0x48
+#define BD71828_REG_GPIO_CTRL3 0x49
+#define BD71828_REG_IO_STAT 0xed
+
+/* clk */
+#define BD71828_REG_OUT32K 0x4b
+
+/* RTC */
+#define BD71828_REG_RTC_SEC 0x4c
+#define BD71828_REG_RTC_MINUTE 0x4d
+#define BD71828_REG_RTC_HOUR 0x4e
+#define BD71828_REG_RTC_WEEK 0x4f
+#define BD71828_REG_RTC_DAY 0x50
+#define BD71828_REG_RTC_MONTH 0x51
+#define BD71828_REG_RTC_YEAR 0x52
+
+#define BD71828_REG_RTC_ALM0_SEC 0x53
+#define BD71828_REG_RTC_ALM_START BD71828_REG_RTC_ALM0_SEC
+#define BD71828_REG_RTC_ALM0_MINUTE 0x54
+#define BD71828_REG_RTC_ALM0_HOUR 0x55
+#define BD71828_REG_RTC_ALM0_WEEK 0x56
+#define BD71828_REG_RTC_ALM0_DAY 0x57
+#define BD71828_REG_RTC_ALM0_MONTH 0x58
+#define BD71828_REG_RTC_ALM0_YEAR 0x59
+#define BD71828_REG_RTC_ALM0_MASK 0x61
+
+#define BD71828_REG_RTC_ALM1_SEC 0x5a
+#define BD71828_REG_RTC_ALM1_MINUTE 0x5b
+#define BD71828_REG_RTC_ALM1_HOUR 0x5c
+#define BD71828_REG_RTC_ALM1_WEEK 0x5d
+#define BD71828_REG_RTC_ALM1_DAY 0x5e
+#define BD71828_REG_RTC_ALM1_MONTH 0x5f
+#define BD71828_REG_RTC_ALM1_YEAR 0x60
+#define BD71828_REG_RTC_ALM1_MASK 0x62
+
+#define BD71828_REG_RTC_ALM2 0x63
+#define BD71828_REG_RTC_START BD71828_REG_RTC_SEC
+
+/* Charger/Battey */
+#define BD71828_REG_CHG_STATE 0x65
+#define BD71828_REG_CHG_FULL 0xd2
+#define BD71828_REG_CHG_EN 0x6F
+#define BD71828_REG_DCIN_STAT 0x68
+#define BD71828_MASK_DCIN_DET 0x01
+#define BD71828_REG_VDCIN_U 0x9c
+#define BD71828_MASK_CHG_EN 0x01
+#define BD71828_CHG_MASK_DCIN_U 0x0f
+#define BD71828_REG_BAT_STAT 0x67
+#define BD71828_REG_BAT_TEMP 0x6c
+#define BD71828_MASK_BAT_TEMP 0x07
+#define BD71828_BAT_TEMP_OPEN 0x07
+#define BD71828_MASK_BAT_DET 0x20
+#define BD71828_MASK_BAT_DET_DONE 0x10
+#define BD71828_REG_CHG_STATE 0x65
+#define BD71828_REG_VBAT_U 0x8c
+#define BD71828_MASK_VBAT_U 0x0f
+#define BD71828_REG_VBAT_REX_AVG_U 0x92
+
+#define BD71828_REG_OCV_PWRON_U 0x8A
+
+#define BD71828_REG_VBAT_MIN_AVG_U 0x8e
+#define BD71828_REG_VBAT_MIN_AVG_L 0x8f
+
+#define BD71828_REG_CC_CNT3 0xb5
+#define BD71828_REG_CC_CNT2 0xb6
+#define BD71828_REG_CC_CNT1 0xb7
+#define BD71828_REG_CC_CNT0 0xb8
+#define BD71828_REG_CC_CURCD_AVG_U 0xb2
+#define BD71828_MASK_CC_CURCD_AVG_U 0x3f
+#define BD71828_MASK_CC_CUR_DIR 0x80
+#define BD71828_REG_VM_BTMP_U 0xa1
+#define BD71828_REG_VM_BTMP_L 0xa2
+#define BD71828_MASK_VM_BTMP_U 0x0f
+#define BD71828_REG_COULOMB_CTRL 0xc4
+#define BD71828_REG_COULOMB_CTRL2 0xd2
+#define BD71828_MASK_REX_CC_CLR 0x01
+#define BD71828_MASK_FULL_CC_CLR 0x10
+#define BD71828_REG_CC_CNT_FULL3 0xbd
+#define BD71828_REG_CC_CNT_CHG3 0xc1
+
+#define BD71828_REG_VBAT_INITIAL1_U 0x86
+#define BD71828_REG_VBAT_INITIAL1_L 0x87
+
+#define BD71828_REG_VBAT_INITIAL2_U 0x88
+#define BD71828_REG_VBAT_INITIAL2_L 0x89
+
+#define BD71828_REG_IBAT_U 0xb0
+#define BD71828_REG_IBAT_L 0xb1
+
+#define BD71828_REG_IBAT_AVG_U 0xb2
+#define BD71828_REG_IBAT_AVG_L 0xb3
+
+#define BD71828_REG_VSYS_AVG_U 0x96
+#define BD71828_REG_VSYS_AVG_L 0x97
+#define BD71828_REG_VSYS_MIN_AVG_U 0x98
+#define BD71828_REG_VSYS_MIN_AVG_L 0x99
+#define BD71828_REG_CHG_SET1 0x75
+#define BD71828_REG_ALM_VBAT_LIMIT_U 0xaa
+#define BD71828_REG_BATCAP_MON_LIMIT_U 0xcc
+#define BD71828_REG_CONF 0x64
+
+#define BD71828_REG_DCIN_CLPS 0x71
+
+#define BD71828_REG_MEAS_CLEAR 0xaf
+
+/* LEDs */
+#define BD71828_REG_LED_CTRL 0x4A
+#define BD71828_MASK_LED_AMBER 0x80
+#define BD71828_MASK_LED_GREEN 0x40
+#define BD71828_LED_ON 0xff
+#define BD71828_LED_OFF 0x0
+
+/* IRQ registers */
+#define BD71828_REG_INT_MASK_BUCK 0xd3
+#define BD71828_REG_INT_MASK_DCIN1 0xd4
+#define BD71828_REG_INT_MASK_DCIN2 0xd5
+#define BD71828_REG_INT_MASK_VSYS 0xd6
+#define BD71828_REG_INT_MASK_CHG 0xd7
+#define BD71828_REG_INT_MASK_BAT 0xd8
+#define BD71828_REG_INT_MASK_BAT_MON1 0xd9
+#define BD71828_REG_INT_MASK_BAT_MON2 0xda
+#define BD71828_REG_INT_MASK_BAT_MON3 0xdb
+#define BD71828_REG_INT_MASK_BAT_MON4 0xdc
+#define BD71828_REG_INT_MASK_TEMP 0xdd
+#define BD71828_REG_INT_MASK_RTC 0xde
+
+#define BD71828_REG_INT_MAIN 0xdf
+#define BD71828_REG_INT_BUCK 0xe0
+#define BD71828_REG_INT_DCIN1 0xe1
+#define BD71828_REG_INT_DCIN2 0xe2
+#define BD71828_REG_INT_VSYS 0xe3
+#define BD71828_REG_INT_CHG 0xe4
+#define BD71828_REG_INT_BAT 0xe5
+#define BD71828_REG_INT_BAT_MON1 0xe6
+#define BD71828_REG_INT_BAT_MON2 0xe7
+#define BD71828_REG_INT_BAT_MON3 0xe8
+#define BD71828_REG_INT_BAT_MON4 0xe9
+#define BD71828_REG_INT_TEMP 0xea
+#define BD71828_REG_INT_RTC 0xeb
+#define BD71828_REG_INT_UPDATE 0xec
+
+#define BD71828_MAX_REGISTER BD71828_REG_IO_STAT
+
+/* Masks for main IRQ register bits */
+enum {
+ BD71828_INT_BUCK,
+#define BD71828_INT_BUCK_MASK BIT(BD71828_INT_BUCK)
+ BD71828_INT_DCIN,
+#define BD71828_INT_DCIN_MASK BIT(BD71828_INT_DCIN)
+ BD71828_INT_VSYS,
+#define BD71828_INT_VSYS_MASK BIT(BD71828_INT_VSYS)
+ BD71828_INT_CHG,
+#define BD71828_INT_CHG_MASK BIT(BD71828_INT_CHG)
+ BD71828_INT_BAT,
+#define BD71828_INT_BAT_MASK BIT(BD71828_INT_BAT)
+ BD71828_INT_BAT_MON,
+#define BD71828_INT_BAT_MON_MASK BIT(BD71828_INT_BAT_MON)
+ BD71828_INT_TEMP,
+#define BD71828_INT_TEMP_MASK BIT(BD71828_INT_TEMP)
+ BD71828_INT_RTC,
+#define BD71828_INT_RTC_MASK BIT(BD71828_INT_RTC)
+};
+
+/* Interrupts */
+enum {
+ /* BUCK reg interrupts */
+ BD71828_INT_BUCK1_OCP,
+ BD71828_INT_BUCK2_OCP,
+ BD71828_INT_BUCK3_OCP,
+ BD71828_INT_BUCK4_OCP,
+ BD71828_INT_BUCK5_OCP,
+ BD71828_INT_BUCK6_OCP,
+ BD71828_INT_BUCK7_OCP,
+ BD71828_INT_PGFAULT,
+ /* DCIN1 interrupts */
+ BD71828_INT_DCIN_DET,
+ BD71828_INT_DCIN_RMV,
+ BD71828_INT_CLPS_OUT,
+ BD71828_INT_CLPS_IN,
+ /* DCIN2 interrupts */
+ BD71828_INT_DCIN_MON_RES,
+ BD71828_INT_DCIN_MON_DET,
+ BD71828_INT_LONGPUSH,
+ BD71828_INT_MIDPUSH,
+ BD71828_INT_SHORTPUSH,
+ BD71828_INT_PUSH,
+ BD71828_INT_WDOG,
+ BD71828_INT_SWRESET,
+ /* Vsys */
+ BD71828_INT_VSYS_UV_RES,
+ BD71828_INT_VSYS_UV_DET,
+ BD71828_INT_VSYS_LOW_RES,
+ BD71828_INT_VSYS_LOW_DET,
+ BD71828_INT_VSYS_HALL_IN,
+ BD71828_INT_VSYS_HALL_TOGGLE,
+ BD71828_INT_VSYS_MON_RES,
+ BD71828_INT_VSYS_MON_DET,
+ /* Charger */
+ BD71828_INT_CHG_DCIN_ILIM,
+ BD71828_INT_CHG_TOPOFF_TO_DONE,
+ BD71828_INT_CHG_WDG_TEMP,
+ BD71828_INT_CHG_WDG_TIME,
+ BD71828_INT_CHG_RECHARGE_RES,
+ BD71828_INT_CHG_RECHARGE_DET,
+ BD71828_INT_CHG_RANGED_TEMP_TRANSITION,
+ BD71828_INT_CHG_STATE_TRANSITION,
+ /* Battery */
+ BD71828_INT_BAT_TEMP_NORMAL,
+ BD71828_INT_BAT_TEMP_ERANGE,
+ BD71828_INT_BAT_TEMP_WARN,
+ BD71828_INT_BAT_REMOVED,
+ BD71828_INT_BAT_DETECTED,
+ BD71828_INT_THERM_REMOVED,
+ BD71828_INT_THERM_DETECTED,
+ /* Battery Mon 1 */
+ BD71828_INT_BAT_DEAD,
+ BD71828_INT_BAT_SHORTC_RES,
+ BD71828_INT_BAT_SHORTC_DET,
+ BD71828_INT_BAT_LOW_VOLT_RES,
+ BD71828_INT_BAT_LOW_VOLT_DET,
+ BD71828_INT_BAT_OVER_VOLT_RES,
+ BD71828_INT_BAT_OVER_VOLT_DET,
+ /* Battery Mon 2 */
+ BD71828_INT_BAT_MON_RES,
+ BD71828_INT_BAT_MON_DET,
+ /* Battery Mon 3 (Coulomb counter) */
+ BD71828_INT_BAT_CC_MON1,
+ BD71828_INT_BAT_CC_MON2,
+ BD71828_INT_BAT_CC_MON3,
+ /* Battery Mon 4 */
+ BD71828_INT_BAT_OVER_CURR_1_RES,
+ BD71828_INT_BAT_OVER_CURR_1_DET,
+ BD71828_INT_BAT_OVER_CURR_2_RES,
+ BD71828_INT_BAT_OVER_CURR_2_DET,
+ BD71828_INT_BAT_OVER_CURR_3_RES,
+ BD71828_INT_BAT_OVER_CURR_3_DET,
+ /* Temperature */
+ BD71828_INT_TEMP_BAT_LOW_RES,
+ BD71828_INT_TEMP_BAT_LOW_DET,
+ BD71828_INT_TEMP_BAT_HI_RES,
+ BD71828_INT_TEMP_BAT_HI_DET,
+ BD71828_INT_TEMP_CHIP_OVER_125_RES,
+ BD71828_INT_TEMP_CHIP_OVER_125_DET,
+ BD71828_INT_TEMP_CHIP_OVER_VF_DET,
+ BD71828_INT_TEMP_CHIP_OVER_VF_RES,
+ /* RTC Alarm */
+ BD71828_INT_RTC0,
+ BD71828_INT_RTC1,
+ BD71828_INT_RTC2,
+};
+
+#define BD71828_INT_BUCK1_OCP_MASK 0x1
+#define BD71828_INT_BUCK2_OCP_MASK 0x2
+#define BD71828_INT_BUCK3_OCP_MASK 0x4
+#define BD71828_INT_BUCK4_OCP_MASK 0x8
+#define BD71828_INT_BUCK5_OCP_MASK 0x10
+#define BD71828_INT_BUCK6_OCP_MASK 0x20
+#define BD71828_INT_BUCK7_OCP_MASK 0x40
+#define BD71828_INT_PGFAULT_MASK 0x80
+
+#define BD71828_INT_DCIN_DET_MASK 0x1
+#define BD71828_INT_DCIN_RMV_MASK 0x2
+#define BD71828_INT_CLPS_OUT_MASK 0x4
+#define BD71828_INT_CLPS_IN_MASK 0x8
+ /* DCIN2 interrupts */
+#define BD71828_INT_DCIN_MON_RES_MASK 0x1
+#define BD71828_INT_DCIN_MON_DET_MASK 0x2
+#define BD71828_INT_LONGPUSH_MASK 0x4
+#define BD71828_INT_MIDPUSH_MASK 0x8
+#define BD71828_INT_SHORTPUSH_MASK 0x10
+#define BD71828_INT_PUSH_MASK 0x20
+#define BD71828_INT_WDOG_MASK 0x40
+#define BD71828_INT_SWRESET_MASK 0x80
+ /* Vsys */
+#define BD71828_INT_VSYS_UV_RES_MASK 0x1
+#define BD71828_INT_VSYS_UV_DET_MASK 0x2
+#define BD71828_INT_VSYS_LOW_RES_MASK 0x4
+#define BD71828_INT_VSYS_LOW_DET_MASK 0x8
+#define BD71828_INT_VSYS_HALL_IN_MASK 0x10
+#define BD71828_INT_VSYS_HALL_TOGGLE_MASK 0x20
+#define BD71828_INT_VSYS_MON_RES_MASK 0x40
+#define BD71828_INT_VSYS_MON_DET_MASK 0x80
+ /* Charger */
+#define BD71828_INT_CHG_DCIN_ILIM_MASK 0x1
+#define BD71828_INT_CHG_TOPOFF_TO_DONE_MASK 0x2
+#define BD71828_INT_CHG_WDG_TEMP_MASK 0x4
+#define BD71828_INT_CHG_WDG_TIME_MASK 0x8
+#define BD71828_INT_CHG_RECHARGE_RES_MASK 0x10
+#define BD71828_INT_CHG_RECHARGE_DET_MASK 0x20
+#define BD71828_INT_CHG_RANGED_TEMP_TRANSITION_MASK 0x40
+#define BD71828_INT_CHG_STATE_TRANSITION_MASK 0x80
+ /* Battery */
+#define BD71828_INT_BAT_TEMP_NORMAL_MASK 0x1
+#define BD71828_INT_BAT_TEMP_ERANGE_MASK 0x2
+#define BD71828_INT_BAT_TEMP_WARN_MASK 0x4
+#define BD71828_INT_BAT_REMOVED_MASK 0x10
+#define BD71828_INT_BAT_DETECTED_MASK 0x20
+#define BD71828_INT_THERM_REMOVED_MASK 0x40
+#define BD71828_INT_THERM_DETECTED_MASK 0x80
+ /* Battery Mon 1 */
+#define BD71828_INT_BAT_DEAD_MASK 0x2
+#define BD71828_INT_BAT_SHORTC_RES_MASK 0x4
+#define BD71828_INT_BAT_SHORTC_DET_MASK 0x8
+#define BD71828_INT_BAT_LOW_VOLT_RES_MASK 0x10
+#define BD71828_INT_BAT_LOW_VOLT_DET_MASK 0x20
+#define BD71828_INT_BAT_OVER_VOLT_RES_MASK 0x40
+#define BD71828_INT_BAT_OVER_VOLT_DET_MASK 0x80
+ /* Battery Mon 2 */
+#define BD71828_INT_BAT_MON_RES_MASK 0x1
+#define BD71828_INT_BAT_MON_DET_MASK 0x2
+ /* Battery Mon 3 (Coulomb counter) */
+#define BD71828_INT_BAT_CC_MON1_MASK 0x1
+#define BD71828_INT_BAT_CC_MON2_MASK 0x2
+#define BD71828_INT_BAT_CC_MON3_MASK 0x4
+ /* Battery Mon 4 */
+#define BD71828_INT_BAT_OVER_CURR_1_RES_MASK 0x1
+#define BD71828_INT_BAT_OVER_CURR_1_DET_MASK 0x2
+#define BD71828_INT_BAT_OVER_CURR_2_RES_MASK 0x4
+#define BD71828_INT_BAT_OVER_CURR_2_DET_MASK 0x8
+#define BD71828_INT_BAT_OVER_CURR_3_RES_MASK 0x10
+#define BD71828_INT_BAT_OVER_CURR_3_DET_MASK 0x20
+ /* Temperature */
+#define BD71828_INT_TEMP_BAT_LOW_RES_MASK 0x1
+#define BD71828_INT_TEMP_BAT_LOW_DET_MASK 0x2
+#define BD71828_INT_TEMP_BAT_HI_RES_MASK 0x4
+#define BD71828_INT_TEMP_BAT_HI_DET_MASK 0x8
+#define BD71828_INT_TEMP_CHIP_OVER_125_RES_MASK 0x10
+#define BD71828_INT_TEMP_CHIP_OVER_125_DET_MASK 0x20
+#define BD71828_INT_TEMP_CHIP_OVER_VF_RES_MASK 0x40
+#define BD71828_INT_TEMP_CHIP_OVER_VF_DET_MASK 0x80
+ /* RTC Alarm */
+#define BD71828_INT_RTC0_MASK 0x1
+#define BD71828_INT_RTC1_MASK 0x2
+#define BD71828_INT_RTC2_MASK 0x4
+
+#define BD71828_OUT_TYPE_MASK 0x2
+#define BD71828_OUT_TYPE_OPEN_DRAIN 0x0
+#define BD71828_OUT_TYPE_CMOS 0x2
+
+#endif /* __LINUX_MFD_BD71828_H__ */
diff --git a/include/linux/mfd/rohm-bd718x7.h b/include/linux/mfd/rohm-bd718x7.h
new file mode 100644
index 000000000000..df2918198d37
--- /dev/null
+++ b/include/linux/mfd/rohm-bd718x7.h
@@ -0,0 +1,313 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/* Copyright (C) 2018 ROHM Semiconductors */
+
+#ifndef __LINUX_MFD_BD718XX_H__
+#define __LINUX_MFD_BD718XX_H__
+
+#include <linux/mfd/rohm-generic.h>
+#include <linux/regmap.h>
+
+enum {
+ BD718XX_BUCK1 = 0,
+ BD718XX_BUCK2,
+ BD718XX_BUCK3,
+ BD718XX_BUCK4,
+ BD718XX_BUCK5,
+ BD718XX_BUCK6,
+ BD718XX_BUCK7,
+ BD718XX_BUCK8,
+ BD718XX_LDO1,
+ BD718XX_LDO2,
+ BD718XX_LDO3,
+ BD718XX_LDO4,
+ BD718XX_LDO5,
+ BD718XX_LDO6,
+ BD718XX_LDO7,
+ BD718XX_REGULATOR_AMOUNT,
+};
+
+/* Common voltage configurations */
+#define BD718XX_DVS_BUCK_VOLTAGE_NUM 0x3D
+#define BD718XX_4TH_NODVS_BUCK_VOLTAGE_NUM 0x3D
+
+#define BD718XX_LDO1_VOLTAGE_NUM 0x08
+#define BD718XX_LDO2_VOLTAGE_NUM 0x02
+#define BD718XX_LDO3_VOLTAGE_NUM 0x10
+#define BD718XX_LDO4_VOLTAGE_NUM 0x0A
+#define BD718XX_LDO6_VOLTAGE_NUM 0x0A
+
+/* BD71837 specific voltage configurations */
+#define BD71837_BUCK5_VOLTAGE_NUM 0x10
+#define BD71837_BUCK6_VOLTAGE_NUM 0x04
+#define BD71837_BUCK7_VOLTAGE_NUM 0x08
+#define BD71837_LDO5_VOLTAGE_NUM 0x10
+#define BD71837_LDO7_VOLTAGE_NUM 0x10
+
+/* BD71847 specific voltage configurations */
+#define BD71847_BUCK3_VOLTAGE_NUM 0x18
+#define BD71847_BUCK4_VOLTAGE_NUM 0x08
+#define BD71847_LDO5_VOLTAGE_NUM 0x20
+
+/* Registers specific to BD71837 */
+enum {
+ BD71837_REG_BUCK3_CTRL = 0x07,
+ BD71837_REG_BUCK4_CTRL = 0x08,
+ BD71837_REG_BUCK3_VOLT_RUN = 0x12,
+ BD71837_REG_BUCK4_VOLT_RUN = 0x13,
+ BD71837_REG_LDO7_VOLT = 0x1E,
+};
+
+/* Registers common for BD71837 and BD71847 */
+enum {
+ BD718XX_REG_REV = 0x00,
+ BD718XX_REG_SWRESET = 0x01,
+ BD718XX_REG_I2C_DEV = 0x02,
+ BD718XX_REG_PWRCTRL0 = 0x03,
+ BD718XX_REG_PWRCTRL1 = 0x04,
+ BD718XX_REG_BUCK1_CTRL = 0x05,
+ BD718XX_REG_BUCK2_CTRL = 0x06,
+ BD718XX_REG_1ST_NODVS_BUCK_CTRL = 0x09,
+ BD718XX_REG_2ND_NODVS_BUCK_CTRL = 0x0A,
+ BD718XX_REG_3RD_NODVS_BUCK_CTRL = 0x0B,
+ BD718XX_REG_4TH_NODVS_BUCK_CTRL = 0x0C,
+ BD718XX_REG_BUCK1_VOLT_RUN = 0x0D,
+ BD718XX_REG_BUCK1_VOLT_IDLE = 0x0E,
+ BD718XX_REG_BUCK1_VOLT_SUSP = 0x0F,
+ BD718XX_REG_BUCK2_VOLT_RUN = 0x10,
+ BD718XX_REG_BUCK2_VOLT_IDLE = 0x11,
+ BD718XX_REG_1ST_NODVS_BUCK_VOLT = 0x14,
+ BD718XX_REG_2ND_NODVS_BUCK_VOLT = 0x15,
+ BD718XX_REG_3RD_NODVS_BUCK_VOLT = 0x16,
+ BD718XX_REG_4TH_NODVS_BUCK_VOLT = 0x17,
+ BD718XX_REG_LDO1_VOLT = 0x18,
+ BD718XX_REG_LDO2_VOLT = 0x19,
+ BD718XX_REG_LDO3_VOLT = 0x1A,
+ BD718XX_REG_LDO4_VOLT = 0x1B,
+ BD718XX_REG_LDO5_VOLT = 0x1C,
+ BD718XX_REG_LDO6_VOLT = 0x1D,
+ BD718XX_REG_TRANS_COND0 = 0x1F,
+ BD718XX_REG_TRANS_COND1 = 0x20,
+ BD718XX_REG_VRFAULTEN = 0x21,
+ BD718XX_REG_MVRFLTMASK0 = 0x22,
+ BD718XX_REG_MVRFLTMASK1 = 0x23,
+ BD718XX_REG_MVRFLTMASK2 = 0x24,
+ BD718XX_REG_RCVCFG = 0x25,
+ BD718XX_REG_RCVNUM = 0x26,
+ BD718XX_REG_PWRONCONFIG0 = 0x27,
+ BD718XX_REG_PWRONCONFIG1 = 0x28,
+ BD718XX_REG_RESETSRC = 0x29,
+ BD718XX_REG_MIRQ = 0x2A,
+ BD718XX_REG_IRQ = 0x2B,
+ BD718XX_REG_IN_MON = 0x2C,
+ BD718XX_REG_POW_STATE = 0x2D,
+ BD718XX_REG_OUT32K = 0x2E,
+ BD718XX_REG_REGLOCK = 0x2F,
+ BD718XX_REG_OTPVER = 0xFF,
+ BD718XX_MAX_REGISTER = 0x100,
+};
+
+#define REGLOCK_PWRSEQ 0x1
+#define REGLOCK_VREG 0x10
+
+/* Generic BUCK control masks */
+#define BD718XX_BUCK_SEL 0x02
+#define BD718XX_BUCK_EN 0x01
+#define BD718XX_BUCK_RUN_ON 0x04
+
+/* Generic LDO masks */
+#define BD718XX_LDO_SEL 0x80
+#define BD718XX_LDO_EN 0x40
+
+/* BD71837 BUCK ramp rate CTRL reg bits */
+#define BUCK_RAMPRATE_MASK 0xC0
+#define BUCK_RAMPRATE_10P00MV 0x0
+#define BUCK_RAMPRATE_5P00MV 0x1
+#define BUCK_RAMPRATE_2P50MV 0x2
+#define BUCK_RAMPRATE_1P25MV 0x3
+
+#define DVS_BUCK_RUN_MASK 0x3F
+#define DVS_BUCK_SUSP_MASK 0x3F
+#define DVS_BUCK_IDLE_MASK 0x3F
+
+#define BD718XX_1ST_NODVS_BUCK_MASK 0x07
+#define BD718XX_3RD_NODVS_BUCK_MASK 0x07
+#define BD718XX_4TH_NODVS_BUCK_MASK 0x3F
+
+#define BD71847_BUCK3_MASK 0x07
+#define BD71847_BUCK3_RANGE_MASK 0xC0
+#define BD71847_BUCK4_MASK 0x03
+#define BD71847_BUCK4_RANGE_MASK 0x40
+
+#define BD71837_BUCK5_MASK 0x07
+#define BD71837_BUCK5_RANGE_MASK 0x80
+#define BD71837_BUCK6_MASK 0x03
+
+#define BD718XX_LDO1_MASK 0x03
+#define BD718XX_LDO1_RANGE_MASK 0x20
+#define BD718XX_LDO2_MASK 0x20
+#define BD718XX_LDO3_MASK 0x0F
+#define BD718XX_LDO4_MASK 0x0F
+#define BD718XX_LDO6_MASK 0x0F
+
+#define BD71837_LDO5_MASK 0x0F
+#define BD71847_LDO5_MASK 0x0F
+#define BD71847_LDO5_RANGE_MASK 0x20
+
+#define BD71837_LDO7_MASK 0x0F
+
+/* BD718XX Voltage monitoring masks */
+#define BD718XX_BUCK1_VRMON80 0x1
+#define BD718XX_BUCK1_VRMON130 0x2
+#define BD718XX_BUCK2_VRMON80 0x4
+#define BD718XX_BUCK2_VRMON130 0x8
+#define BD718XX_1ST_NODVS_BUCK_VRMON80 0x1
+#define BD718XX_1ST_NODVS_BUCK_VRMON130 0x2
+#define BD718XX_2ND_NODVS_BUCK_VRMON80 0x4
+#define BD718XX_2ND_NODVS_BUCK_VRMON130 0x8
+#define BD718XX_3RD_NODVS_BUCK_VRMON80 0x10
+#define BD718XX_3RD_NODVS_BUCK_VRMON130 0x20
+#define BD718XX_4TH_NODVS_BUCK_VRMON80 0x40
+#define BD718XX_4TH_NODVS_BUCK_VRMON130 0x80
+#define BD718XX_LDO1_VRMON80 0x1
+#define BD718XX_LDO2_VRMON80 0x2
+#define BD718XX_LDO3_VRMON80 0x4
+#define BD718XX_LDO4_VRMON80 0x8
+#define BD718XX_LDO5_VRMON80 0x10
+#define BD718XX_LDO6_VRMON80 0x20
+
+/* BD71837 specific voltage monitoring masks */
+#define BD71837_BUCK3_VRMON80 0x10
+#define BD71837_BUCK3_VRMON130 0x20
+#define BD71837_BUCK4_VRMON80 0x40
+#define BD71837_BUCK4_VRMON130 0x80
+#define BD71837_LDO7_VRMON80 0x40
+
+/* BD718XX_REG_IRQ bits */
+#define IRQ_SWRST 0x40
+#define IRQ_PWRON_S 0x20
+#define IRQ_PWRON_L 0x10
+#define IRQ_PWRON 0x08
+#define IRQ_WDOG 0x04
+#define IRQ_ON_REQ 0x02
+#define IRQ_STBY_REQ 0x01
+
+/* ROHM BD718XX irqs */
+enum {
+ BD718XX_INT_STBY_REQ,
+ BD718XX_INT_ON_REQ,
+ BD718XX_INT_WDOG,
+ BD718XX_INT_PWRBTN,
+ BD718XX_INT_PWRBTN_L,
+ BD718XX_INT_PWRBTN_S,
+ BD718XX_INT_SWRST
+};
+
+/* ROHM BD718XX interrupt masks */
+#define BD718XX_INT_SWRST_MASK 0x40
+#define BD718XX_INT_PWRBTN_S_MASK 0x20
+#define BD718XX_INT_PWRBTN_L_MASK 0x10
+#define BD718XX_INT_PWRBTN_MASK 0x8
+#define BD718XX_INT_WDOG_MASK 0x4
+#define BD718XX_INT_ON_REQ_MASK 0x2
+#define BD718XX_INT_STBY_REQ_MASK 0x1
+
+/* Register write induced reset settings */
+
+/*
+ * Even though the bit zero is not SWRESET type we still want to write zero
+ * to it when changing type. Bit zero is 'SWRESET' trigger bit and if we
+ * write 1 to it we will trigger the action. So always write 0 to it when
+ * changning SWRESET action - no matter what we read from it.
+ */
+#define BD718XX_SWRESET_TYPE_MASK 7
+#define BD718XX_SWRESET_TYPE_DISABLED 0
+#define BD718XX_SWRESET_TYPE_COLD 4
+#define BD718XX_SWRESET_TYPE_WARM 6
+
+#define BD718XX_SWRESET_RESET_MASK 1
+#define BD718XX_SWRESET_RESET 1
+
+/* Poweroff state transition conditions */
+
+#define BD718XX_ON_REQ_POWEROFF_MASK 1
+#define BD718XX_SWRESET_POWEROFF_MASK 2
+#define BD718XX_WDOG_POWEROFF_MASK 4
+#define BD718XX_KEY_L_POWEROFF_MASK 8
+
+#define BD718XX_POWOFF_TO_SNVS 0
+#define BD718XX_POWOFF_TO_RDY 0xF
+
+#define BD718XX_POWOFF_TIME_MASK 0xF0
+enum {
+ BD718XX_POWOFF_TIME_5MS = 0,
+ BD718XX_POWOFF_TIME_10MS,
+ BD718XX_POWOFF_TIME_15MS,
+ BD718XX_POWOFF_TIME_20MS,
+ BD718XX_POWOFF_TIME_25MS,
+ BD718XX_POWOFF_TIME_30MS,
+ BD718XX_POWOFF_TIME_35MS,
+ BD718XX_POWOFF_TIME_40MS,
+ BD718XX_POWOFF_TIME_45MS,
+ BD718XX_POWOFF_TIME_50MS,
+ BD718XX_POWOFF_TIME_75MS,
+ BD718XX_POWOFF_TIME_100MS,
+ BD718XX_POWOFF_TIME_250MS,
+ BD718XX_POWOFF_TIME_500MS,
+ BD718XX_POWOFF_TIME_750MS,
+ BD718XX_POWOFF_TIME_1500MS
+};
+
+/* Poweron sequence state transition conditions */
+#define BD718XX_RDY_TO_SNVS_MASK 0xF
+#define BD718XX_SNVS_TO_RUN_MASK 0xF0
+
+#define BD718XX_PWR_TRIG_KEY_L 1
+#define BD718XX_PWR_TRIG_KEY_S 2
+#define BD718XX_PWR_TRIG_PMIC_ON 4
+#define BD718XX_PWR_TRIG_VSYS_UVLO 8
+#define BD718XX_RDY_TO_SNVS_SIFT 0
+#define BD718XX_SNVS_TO_RUN_SIFT 4
+
+#define BD718XX_PWRBTN_PRESS_DURATION_MASK 0xF
+
+/* Timeout value for detecting short press */
+enum {
+ BD718XX_PWRBTN_SHORT_PRESS_10MS = 0,
+ BD718XX_PWRBTN_SHORT_PRESS_500MS,
+ BD718XX_PWRBTN_SHORT_PRESS_1000MS,
+ BD718XX_PWRBTN_SHORT_PRESS_1500MS,
+ BD718XX_PWRBTN_SHORT_PRESS_2000MS,
+ BD718XX_PWRBTN_SHORT_PRESS_2500MS,
+ BD718XX_PWRBTN_SHORT_PRESS_3000MS,
+ BD718XX_PWRBTN_SHORT_PRESS_3500MS,
+ BD718XX_PWRBTN_SHORT_PRESS_4000MS,
+ BD718XX_PWRBTN_SHORT_PRESS_4500MS,
+ BD718XX_PWRBTN_SHORT_PRESS_5000MS,
+ BD718XX_PWRBTN_SHORT_PRESS_5500MS,
+ BD718XX_PWRBTN_SHORT_PRESS_6000MS,
+ BD718XX_PWRBTN_SHORT_PRESS_6500MS,
+ BD718XX_PWRBTN_SHORT_PRESS_7000MS,
+ BD718XX_PWRBTN_SHORT_PRESS_7500MS
+};
+
+/* Timeout value for detecting LONG press */
+enum {
+ BD718XX_PWRBTN_LONG_PRESS_10MS = 0,
+ BD718XX_PWRBTN_LONG_PRESS_1S,
+ BD718XX_PWRBTN_LONG_PRESS_2S,
+ BD718XX_PWRBTN_LONG_PRESS_3S,
+ BD718XX_PWRBTN_LONG_PRESS_4S,
+ BD718XX_PWRBTN_LONG_PRESS_5S,
+ BD718XX_PWRBTN_LONG_PRESS_6S,
+ BD718XX_PWRBTN_LONG_PRESS_7S,
+ BD718XX_PWRBTN_LONG_PRESS_8S,
+ BD718XX_PWRBTN_LONG_PRESS_9S,
+ BD718XX_PWRBTN_LONG_PRESS_10S,
+ BD718XX_PWRBTN_LONG_PRESS_11S,
+ BD718XX_PWRBTN_LONG_PRESS_12S,
+ BD718XX_PWRBTN_LONG_PRESS_13S,
+ BD718XX_PWRBTN_LONG_PRESS_14S,
+ BD718XX_PWRBTN_LONG_PRESS_15S
+};
+
+#endif /* __LINUX_MFD_BD718XX_H__ */
diff --git a/include/linux/mfd/rohm-bd957x.h b/include/linux/mfd/rohm-bd957x.h
new file mode 100644
index 000000000000..acc920b64f75
--- /dev/null
+++ b/include/linux/mfd/rohm-bd957x.h
@@ -0,0 +1,140 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/* Copyright (C) 2021 ROHM Semiconductors */
+
+#ifndef __LINUX_MFD_BD957X_H__
+#define __LINUX_MFD_BD957X_H__
+
+enum {
+ BD957X_VD50,
+ BD957X_VD18,
+ BD957X_VDDDR,
+ BD957X_VD10,
+ BD957X_VOUTL1,
+ BD957X_VOUTS1,
+};
+
+/*
+ * The BD9576 has own IRQ 'blocks' for:
+ * - I2C/thermal,
+ * - Over voltage protection
+ * - Short-circuit protection
+ * - Over current protection
+ * - Over voltage detection
+ * - Under voltage detection
+ * - Under voltage protection
+ * - 'system interrupt'.
+ *
+ * Each of the blocks have a status register giving more accurate IRQ source
+ * information - for example which of the regulators have over-voltage.
+ *
+ * On top of this, there is "main IRQ" status register where each bit indicates
+ * which of sub-blocks have active IRQs. Fine. That would fit regmap-irq main
+ * status handling. Except that:
+ * - Only some sub-IRQs can be masked.
+ * - The IRQ informs us about fault-condition, not when fault state changes.
+ * The IRQ line it is kept asserted until the detected condition is acked
+ * AND cleared in HW. This is annoying for IRQs like the one informing high
+ * temperature because if IRQ is not disabled it keeps the CPU in IRQ
+ * handling loop.
+ *
+ * For now we do just use the main-IRQ register as source for our IRQ
+ * information and bind the regmap-irq to this. We leave fine-grained sub-IRQ
+ * register handling to handlers in sub-devices. The regulator driver shall
+ * read which regulators are source for problem - or if the detected error is
+ * regulator temperature error. The sub-drivers do also handle masking of "sub-
+ * IRQs" if this is supported/needed.
+ *
+ * To overcome the problem with HW keeping IRQ asserted we do call
+ * disable_irq_nosync() from sub-device handler and add a delayed work to
+ * re-enable IRQ roughly 1 second later. This should keep our CPU out of
+ * busy-loop.
+ */
+#define IRQS_SILENT_MS 1000
+
+enum {
+ BD9576_INT_THERM,
+ BD9576_INT_OVP,
+ BD9576_INT_SCP,
+ BD9576_INT_OCP,
+ BD9576_INT_OVD,
+ BD9576_INT_UVD,
+ BD9576_INT_UVP,
+ BD9576_INT_SYS,
+};
+
+#define BD957X_REG_SMRB_ASSERT 0x15
+#define BD957X_REG_PMIC_INTERNAL_STAT 0x20
+#define BD957X_REG_INT_THERM_STAT 0x23
+#define BD957X_REG_INT_THERM_MASK 0x24
+#define BD957X_REG_INT_OVP_STAT 0x25
+#define BD957X_REG_INT_SCP_STAT 0x26
+#define BD957X_REG_INT_OCP_STAT 0x27
+#define BD957X_REG_INT_OVD_STAT 0x28
+#define BD957X_REG_INT_UVD_STAT 0x29
+#define BD957X_REG_INT_UVP_STAT 0x2a
+#define BD957X_REG_INT_SYS_STAT 0x2b
+#define BD957X_REG_INT_SYS_MASK 0x2c
+#define BD957X_REG_INT_MAIN_STAT 0x30
+#define BD957X_REG_INT_MAIN_MASK 0x31
+
+#define UVD_IRQ_VALID_MASK 0x6F
+#define OVD_IRQ_VALID_MASK 0x2F
+
+#define BD957X_MASK_INT_MAIN_THERM BIT(0)
+#define BD957X_MASK_INT_MAIN_OVP BIT(1)
+#define BD957X_MASK_INT_MAIN_SCP BIT(2)
+#define BD957X_MASK_INT_MAIN_OCP BIT(3)
+#define BD957X_MASK_INT_MAIN_OVD BIT(4)
+#define BD957X_MASK_INT_MAIN_UVD BIT(5)
+#define BD957X_MASK_INT_MAIN_UVP BIT(6)
+#define BD957X_MASK_INT_MAIN_SYS BIT(7)
+#define BD957X_MASK_INT_ALL 0xff
+
+#define BD957X_REG_WDT_CONF 0x16
+
+#define BD957X_REG_POW_TRIGGER1 0x41
+#define BD957X_REG_POW_TRIGGER2 0x42
+#define BD957X_REG_POW_TRIGGER3 0x43
+#define BD957X_REG_POW_TRIGGER4 0x44
+#define BD957X_REG_POW_TRIGGERL1 0x45
+#define BD957X_REG_POW_TRIGGERS1 0x46
+
+#define BD957X_REGULATOR_EN_MASK 0xff
+#define BD957X_REGULATOR_DIS_VAL 0xff
+
+#define BD957X_VSEL_REG_MASK 0xff
+
+#define BD957X_MASK_VOUT1_TUNE 0x87
+#define BD957X_MASK_VOUT2_TUNE 0x87
+#define BD957X_MASK_VOUT3_TUNE 0x1f
+#define BD957X_MASK_VOUT4_TUNE 0x1f
+#define BD957X_MASK_VOUTL1_TUNE 0x87
+
+#define BD957X_REG_VOUT1_TUNE 0x50
+#define BD957X_REG_VOUT2_TUNE 0x53
+#define BD957X_REG_VOUT3_TUNE 0x56
+#define BD957X_REG_VOUT4_TUNE 0x59
+#define BD957X_REG_VOUTL1_TUNE 0x5c
+
+#define BD9576_REG_VOUT1_OVD 0x51
+#define BD9576_REG_VOUT1_UVD 0x52
+#define BD9576_REG_VOUT2_OVD 0x54
+#define BD9576_REG_VOUT2_UVD 0x55
+#define BD9576_REG_VOUT3_OVD 0x57
+#define BD9576_REG_VOUT3_UVD 0x58
+#define BD9576_REG_VOUT4_OVD 0x5a
+#define BD9576_REG_VOUT4_UVD 0x5b
+#define BD9576_REG_VOUTL1_OVD 0x5d
+#define BD9576_REG_VOUTL1_UVD 0x5e
+
+#define BD9576_MASK_XVD 0x7f
+
+#define BD9576_REG_VOUT1S_OCW 0x5f
+#define BD9576_REG_VOUT1S_OCP 0x60
+
+#define BD9576_MASK_VOUT1S_OCW 0x3f
+#define BD9576_MASK_VOUT1S_OCP 0x3f
+
+#define BD957X_MAX_REGISTER 0x61
+
+#endif
diff --git a/include/linux/mfd/rohm-bd96801.h b/include/linux/mfd/rohm-bd96801.h
new file mode 100644
index 000000000000..68c8ac8ad409
--- /dev/null
+++ b/include/linux/mfd/rohm-bd96801.h
@@ -0,0 +1,217 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/* Copyright (C) 2024 ROHM Semiconductors */
+
+#ifndef __MFD_BD96801_H__
+#define __MFD_BD96801_H__
+
+#define BD96801_REG_SSCG_CTRL 0x09
+#define BD96801_REG_SHD_INTB 0x20
+#define BD96801_LDO5_VOL_LVL_REG 0x2c
+#define BD96801_LDO6_VOL_LVL_REG 0x2d
+#define BD96801_LDO7_VOL_LVL_REG 0x2e
+#define BD96801_REG_BUCK_OVP 0x30
+#define BD96801_REG_BUCK_OVD 0x35
+#define BD96801_REG_LDO_OVP 0x31
+#define BD96801_REG_LDO_OVD 0x36
+#define BD96801_REG_BOOT_OVERTIME 0x3a
+#define BD96801_REG_WD_TMO 0x40
+#define BD96801_REG_WD_CONF 0x41
+#define BD96801_REG_WD_FEED 0x42
+#define BD96801_REG_WD_FAILCOUNT 0x43
+#define BD96801_REG_WD_ASK 0x46
+#define BD96801_REG_WD_STATUS 0x4a
+#define BD96801_REG_PMIC_STATE 0x4f
+#define BD96801_REG_EXT_STATE 0x50
+
+#define BD96801_STATE_STBY 0x09
+
+#define BD96801_LOCK_REG 0x04
+#define BD96801_UNLOCK 0x9d
+#define BD96801_LOCK 0x00
+
+/* IRQ register area */
+#define BD96801_REG_INT_MAIN 0x51
+
+/*
+ * The BD96801 has two physical IRQ lines, INTB and ERRB.
+ *
+ * The 'main status register' is located at 0x51.
+ * The ERRB status registers are located at 0x52 ... 0x5B
+ * INTB status registers are at range 0x5c ... 0x63
+ */
+#define BD96801_REG_INT_SYS_ERRB1 0x52
+#define BD96801_REG_INT_BUCK2_ERRB 0x56
+#define BD96801_REG_INT_SYS_INTB 0x5c
+#define BD96801_REG_INT_BUCK2_INTB 0x5e
+#define BD96801_REG_INT_LDO7_INTB 0x63
+
+/* MASK registers */
+#define BD96801_REG_MASK_SYS_INTB 0x73
+#define BD96801_REG_MASK_SYS_ERRB 0x69
+
+#define BD96801_MAX_REGISTER 0x7a
+
+#define BD96801_OTP_ERR_MASK BIT(0)
+#define BD96801_DBIST_ERR_MASK BIT(1)
+#define BD96801_EEP_ERR_MASK BIT(2)
+#define BD96801_ABIST_ERR_MASK BIT(3)
+#define BD96801_PRSTB_ERR_MASK BIT(4)
+#define BD96801_DRMOS1_ERR_MASK BIT(5)
+#define BD96801_DRMOS2_ERR_MASK BIT(6)
+#define BD96801_SLAVE_ERR_MASK BIT(7)
+#define BD96801_VREF_ERR_MASK BIT(0)
+#define BD96801_TSD_ERR_MASK BIT(1)
+#define BD96801_UVLO_ERR_MASK BIT(2)
+#define BD96801_OVLO_ERR_MASK BIT(3)
+#define BD96801_OSC_ERR_MASK BIT(4)
+#define BD96801_PON_ERR_MASK BIT(5)
+#define BD96801_POFF_ERR_MASK BIT(6)
+#define BD96801_CMD_SHDN_ERR_MASK BIT(7)
+#define BD96801_INT_PRSTB_WDT_ERR_MASK BIT(0)
+#define BD96801_INT_CHIP_IF_ERR_MASK BIT(3)
+#define BD96801_INT_SHDN_ERR_MASK BIT(7)
+#define BD96801_OUT_PVIN_ERR_MASK BIT(0)
+#define BD96801_OUT_OVP_ERR_MASK BIT(1)
+#define BD96801_OUT_UVP_ERR_MASK BIT(2)
+#define BD96801_OUT_SHDN_ERR_MASK BIT(7)
+
+/* ERRB IRQs */
+enum {
+ /* Reg 0x52, 0x53, 0x54 - ERRB system IRQs */
+ BD96801_OTP_ERR_STAT,
+ BD96801_DBIST_ERR_STAT,
+ BD96801_EEP_ERR_STAT,
+ BD96801_ABIST_ERR_STAT,
+ BD96801_PRSTB_ERR_STAT,
+ BD96801_DRMOS1_ERR_STAT,
+ BD96801_DRMOS2_ERR_STAT,
+ BD96801_SLAVE_ERR_STAT,
+ BD96801_VREF_ERR_STAT,
+ BD96801_TSD_ERR_STAT,
+ BD96801_UVLO_ERR_STAT,
+ BD96801_OVLO_ERR_STAT,
+ BD96801_OSC_ERR_STAT,
+ BD96801_PON_ERR_STAT,
+ BD96801_POFF_ERR_STAT,
+ BD96801_CMD_SHDN_ERR_STAT,
+ BD96801_INT_PRSTB_WDT_ERR,
+ BD96801_INT_CHIP_IF_ERR,
+ BD96801_INT_SHDN_ERR_STAT,
+
+ /* Reg 0x55 BUCK1 ERR IRQs */
+ BD96801_BUCK1_PVIN_ERR_STAT,
+ BD96801_BUCK1_OVP_ERR_STAT,
+ BD96801_BUCK1_UVP_ERR_STAT,
+ BD96801_BUCK1_SHDN_ERR_STAT,
+
+ /* Reg 0x56 BUCK2 ERR IRQs */
+ BD96801_BUCK2_PVIN_ERR_STAT,
+ BD96801_BUCK2_OVP_ERR_STAT,
+ BD96801_BUCK2_UVP_ERR_STAT,
+ BD96801_BUCK2_SHDN_ERR_STAT,
+
+ /* Reg 0x57 BUCK3 ERR IRQs */
+ BD96801_BUCK3_PVIN_ERR_STAT,
+ BD96801_BUCK3_OVP_ERR_STAT,
+ BD96801_BUCK3_UVP_ERR_STAT,
+ BD96801_BUCK3_SHDN_ERR_STAT,
+
+ /* Reg 0x58 BUCK4 ERR IRQs */
+ BD96801_BUCK4_PVIN_ERR_STAT,
+ BD96801_BUCK4_OVP_ERR_STAT,
+ BD96801_BUCK4_UVP_ERR_STAT,
+ BD96801_BUCK4_SHDN_ERR_STAT,
+
+ /* Reg 0x59 LDO5 ERR IRQs */
+ BD96801_LDO5_PVIN_ERR_STAT,
+ BD96801_LDO5_OVP_ERR_STAT,
+ BD96801_LDO5_UVP_ERR_STAT,
+ BD96801_LDO5_SHDN_ERR_STAT,
+
+ /* Reg 0x5a LDO6 ERR IRQs */
+ BD96801_LDO6_PVIN_ERR_STAT,
+ BD96801_LDO6_OVP_ERR_STAT,
+ BD96801_LDO6_UVP_ERR_STAT,
+ BD96801_LDO6_SHDN_ERR_STAT,
+
+ /* Reg 0x5b LDO7 ERR IRQs */
+ BD96801_LDO7_PVIN_ERR_STAT,
+ BD96801_LDO7_OVP_ERR_STAT,
+ BD96801_LDO7_UVP_ERR_STAT,
+ BD96801_LDO7_SHDN_ERR_STAT,
+};
+
+/* INTB IRQs */
+enum {
+ /* Reg 0x5c (System INTB) */
+ BD96801_TW_STAT,
+ BD96801_WDT_ERR_STAT,
+ BD96801_I2C_ERR_STAT,
+ BD96801_CHIP_IF_ERR_STAT,
+
+ /* Reg 0x5d (BUCK1 INTB) */
+ BD96801_BUCK1_OCPH_STAT,
+ BD96801_BUCK1_OCPL_STAT,
+ BD96801_BUCK1_OCPN_STAT,
+ BD96801_BUCK1_OVD_STAT,
+ BD96801_BUCK1_UVD_STAT,
+ BD96801_BUCK1_TW_CH_STAT,
+
+ /* Reg 0x5e (BUCK2 INTB) */
+ BD96801_BUCK2_OCPH_STAT,
+ BD96801_BUCK2_OCPL_STAT,
+ BD96801_BUCK2_OCPN_STAT,
+ BD96801_BUCK2_OVD_STAT,
+ BD96801_BUCK2_UVD_STAT,
+ BD96801_BUCK2_TW_CH_STAT,
+
+ /* Reg 0x5f (BUCK3 INTB)*/
+ BD96801_BUCK3_OCPH_STAT,
+ BD96801_BUCK3_OCPL_STAT,
+ BD96801_BUCK3_OCPN_STAT,
+ BD96801_BUCK3_OVD_STAT,
+ BD96801_BUCK3_UVD_STAT,
+ BD96801_BUCK3_TW_CH_STAT,
+
+ /* Reg 0x60 (BUCK4 INTB)*/
+ BD96801_BUCK4_OCPH_STAT,
+ BD96801_BUCK4_OCPL_STAT,
+ BD96801_BUCK4_OCPN_STAT,
+ BD96801_BUCK4_OVD_STAT,
+ BD96801_BUCK4_UVD_STAT,
+ BD96801_BUCK4_TW_CH_STAT,
+
+ /* Reg 0x61 (LDO5 INTB) */
+ BD96801_LDO5_OCPH_STAT, /* bit [0] */
+ BD96801_LDO5_OVD_STAT, /* bit [3] */
+ BD96801_LDO5_UVD_STAT, /* bit [4] */
+
+ /* Reg 0x62 (LDO6 INTB) */
+ BD96801_LDO6_OCPH_STAT, /* bit [0] */
+ BD96801_LDO6_OVD_STAT, /* bit [3] */
+ BD96801_LDO6_UVD_STAT, /* bit [4] */
+
+ /* Reg 0x63 (LDO7 INTB) */
+ BD96801_LDO7_OCPH_STAT, /* bit [0] */
+ BD96801_LDO7_OVD_STAT, /* bit [3] */
+ BD96801_LDO7_UVD_STAT, /* bit [4] */
+};
+
+/* IRQ MASKs */
+#define BD96801_TW_STAT_MASK BIT(0)
+#define BD96801_WDT_ERR_STAT_MASK BIT(1)
+#define BD96801_I2C_ERR_STAT_MASK BIT(2)
+#define BD96801_CHIP_IF_ERR_STAT_MASK BIT(3)
+
+#define BD96801_BUCK_OCPH_STAT_MASK BIT(0)
+#define BD96801_BUCK_OCPL_STAT_MASK BIT(1)
+#define BD96801_BUCK_OCPN_STAT_MASK BIT(2)
+#define BD96801_BUCK_OVD_STAT_MASK BIT(3)
+#define BD96801_BUCK_UVD_STAT_MASK BIT(4)
+#define BD96801_BUCK_TW_CH_STAT_MASK BIT(5)
+
+#define BD96801_LDO_OCPH_STAT_MASK BIT(0)
+#define BD96801_LDO_OVD_STAT_MASK BIT(3)
+#define BD96801_LDO_UVD_STAT_MASK BIT(4)
+
+#endif
diff --git a/include/linux/mfd/rohm-bd96802.h b/include/linux/mfd/rohm-bd96802.h
new file mode 100644
index 000000000000..bf4b77944edf
--- /dev/null
+++ b/include/linux/mfd/rohm-bd96802.h
@@ -0,0 +1,74 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (C) 2025 ROHM Semiconductors
+ *
+ * The digital interface of trhe BD96802 PMIC is a reduced version of the
+ * BD96801. Hence the BD96801 definitions are used for registers and masks
+ * while this header only holds the IRQ definitions - mainly to avoid gaps in
+ * IRQ numbers caused by the lack of some BUCKs / LDOs and their respective
+ * IRQs.
+ */
+
+#ifndef __LINUX_MFD_BD96802_H__
+#define __LINUX_MFD_BD96802_H__
+
+/* ERRB IRQs */
+enum {
+ /* Reg 0x52, 0x53, 0x54 - ERRB system IRQs */
+ BD96802_OTP_ERR_STAT,
+ BD96802_DBIST_ERR_STAT,
+ BD96802_EEP_ERR_STAT,
+ BD96802_ABIST_ERR_STAT,
+ BD96802_PRSTB_ERR_STAT,
+ BD96802_DRMOS1_ERR_STAT,
+ BD96802_DRMOS2_ERR_STAT,
+ BD96802_SLAVE_ERR_STAT,
+ BD96802_VREF_ERR_STAT,
+ BD96802_TSD_ERR_STAT,
+ BD96802_UVLO_ERR_STAT,
+ BD96802_OVLO_ERR_STAT,
+ BD96802_OSC_ERR_STAT,
+ BD96802_PON_ERR_STAT,
+ BD96802_POFF_ERR_STAT,
+ BD96802_CMD_SHDN_ERR_STAT,
+ BD96802_INT_SHDN_ERR_STAT,
+
+ /* Reg 0x55 BUCK1 ERR IRQs */
+ BD96802_BUCK1_PVIN_ERR_STAT,
+ BD96802_BUCK1_OVP_ERR_STAT,
+ BD96802_BUCK1_UVP_ERR_STAT,
+ BD96802_BUCK1_SHDN_ERR_STAT,
+
+ /* Reg 0x56 BUCK2 ERR IRQs */
+ BD96802_BUCK2_PVIN_ERR_STAT,
+ BD96802_BUCK2_OVP_ERR_STAT,
+ BD96802_BUCK2_UVP_ERR_STAT,
+ BD96802_BUCK2_SHDN_ERR_STAT,
+};
+
+/* INTB IRQs */
+enum {
+ /* Reg 0x5c (System INTB) */
+ BD96802_TW_STAT,
+ BD96802_WDT_ERR_STAT,
+ BD96802_I2C_ERR_STAT,
+ BD96802_CHIP_IF_ERR_STAT,
+
+ /* Reg 0x5d (BUCK1 INTB) */
+ BD96802_BUCK1_OCPH_STAT,
+ BD96802_BUCK1_OCPL_STAT,
+ BD96802_BUCK1_OCPN_STAT,
+ BD96802_BUCK1_OVD_STAT,
+ BD96802_BUCK1_UVD_STAT,
+ BD96802_BUCK1_TW_CH_STAT,
+
+ /* Reg 0x5e (BUCK2 INTB) */
+ BD96802_BUCK2_OCPH_STAT,
+ BD96802_BUCK2_OCPL_STAT,
+ BD96802_BUCK2_OCPN_STAT,
+ BD96802_BUCK2_OVD_STAT,
+ BD96802_BUCK2_UVD_STAT,
+ BD96802_BUCK2_TW_CH_STAT,
+};
+
+#endif
diff --git a/include/linux/mfd/rohm-generic.h b/include/linux/mfd/rohm-generic.h
new file mode 100644
index 000000000000..579e8dcfcca4
--- /dev/null
+++ b/include/linux/mfd/rohm-generic.h
@@ -0,0 +1,90 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/* Copyright (C) 2018 ROHM Semiconductors */
+
+#ifndef __LINUX_MFD_ROHM_H__
+#define __LINUX_MFD_ROHM_H__
+
+#include <linux/regmap.h>
+#include <linux/regulator/driver.h>
+
+enum rohm_chip_type {
+ ROHM_CHIP_TYPE_BD9571,
+ ROHM_CHIP_TYPE_BD9573,
+ ROHM_CHIP_TYPE_BD9574,
+ ROHM_CHIP_TYPE_BD9576,
+ ROHM_CHIP_TYPE_BD71815,
+ ROHM_CHIP_TYPE_BD71828,
+ ROHM_CHIP_TYPE_BD71837,
+ ROHM_CHIP_TYPE_BD71847,
+ ROHM_CHIP_TYPE_BD96801,
+ ROHM_CHIP_TYPE_BD96802,
+ ROHM_CHIP_TYPE_BD96805,
+ ROHM_CHIP_TYPE_BD96806,
+ ROHM_CHIP_TYPE_AMOUNT
+};
+
+struct rohm_regmap_dev {
+ struct device *dev;
+ struct regmap *regmap;
+};
+
+#define ROHM_DVS_LEVEL_RUN BIT(0)
+#define ROHM_DVS_LEVEL_IDLE BIT(1)
+#define ROHM_DVS_LEVEL_SUSPEND BIT(2)
+#define ROHM_DVS_LEVEL_LPSR BIT(3)
+#define ROHM_DVS_LEVEL_SNVS BIT(4)
+#define ROHM_DVS_LEVEL_VALID_AMOUNT 5
+#define ROHM_DVS_LEVEL_UNKNOWN 0
+
+/**
+ * struct rohm_dvs_config - dynamic voltage scaling register descriptions
+ *
+ * @level_map: bitmap representing supported run-levels for this
+ * regulator
+ * @run_reg: register address for regulator config at 'run' state
+ * @run_mask: value mask for regulator voltages at 'run' state
+ * @run_on_mask: enable mask for regulator at 'run' state
+ * @idle_reg: register address for regulator config at 'idle' state
+ * @idle_mask: value mask for regulator voltages at 'idle' state
+ * @idle_on_mask: enable mask for regulator at 'idle' state
+ * @suspend_reg: register address for regulator config at 'suspend' state
+ * @suspend_mask: value mask for regulator voltages at 'suspend' state
+ * @suspend_on_mask: enable mask for regulator at 'suspend' state
+ * @lpsr_reg: register address for regulator config at 'lpsr' state
+ * @lpsr_mask: value mask for regulator voltages at 'lpsr' state
+ * @lpsr_on_mask: enable mask for regulator at 'lpsr' state
+ *
+ * Description of ROHM PMICs voltage configuration registers for different
+ * system states. This is used to correctly configure the PMIC at startup
+ * based on values read from DT.
+ */
+struct rohm_dvs_config {
+ uint64_t level_map;
+ unsigned int run_reg;
+ unsigned int run_mask;
+ unsigned int run_on_mask;
+ unsigned int idle_reg;
+ unsigned int idle_mask;
+ unsigned int idle_on_mask;
+ unsigned int suspend_reg;
+ unsigned int suspend_mask;
+ unsigned int suspend_on_mask;
+ unsigned int lpsr_reg;
+ unsigned int lpsr_mask;
+ unsigned int lpsr_on_mask;
+ unsigned int snvs_reg;
+ unsigned int snvs_mask;
+ unsigned int snvs_on_mask;
+};
+
+#if IS_ENABLED(CONFIG_REGULATOR_ROHM)
+int rohm_regulator_set_dvs_levels(const struct rohm_dvs_config *dvs,
+ struct device_node *np,
+ const struct regulator_desc *desc,
+ struct regmap *regmap);
+
+int rohm_regulator_set_voltage_sel_restricted(struct regulator_dev *rdev,
+ unsigned int sel);
+#endif
+
+#endif
diff --git a/include/linux/mfd/rohm-shared.h b/include/linux/mfd/rohm-shared.h
new file mode 100644
index 000000000000..53dd7f638bfd
--- /dev/null
+++ b/include/linux/mfd/rohm-shared.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/* Copyright (C) 2020 ROHM Semiconductors */
+
+
+#ifndef __LINUX_MFD_ROHM_SHARED_H__
+#define __LINUX_MFD_ROHM_SHARED_H__
+
+/* RTC definitions shared between BD70528 and BD71828 */
+
+#define BD70528_MASK_RTC_SEC 0x7f
+#define BD70528_MASK_RTC_MINUTE 0x7f
+#define BD70528_MASK_RTC_HOUR_24H 0x80
+#define BD70528_MASK_RTC_HOUR_PM 0x20
+#define BD70528_MASK_RTC_HOUR 0x3f
+#define BD70528_MASK_RTC_DAY 0x3f
+#define BD70528_MASK_RTC_WEEK 0x07
+#define BD70528_MASK_RTC_MONTH 0x1f
+#define BD70528_MASK_RTC_YEAR 0xff
+#define BD70528_MASK_ALM_EN 0x7
+
+#endif /* __LINUX_MFD_ROHM_SHARED_H__ */
diff --git a/include/linux/mfd/rsmu.h b/include/linux/mfd/rsmu.h
new file mode 100644
index 000000000000..0379aa207428
--- /dev/null
+++ b/include/linux/mfd/rsmu.h
@@ -0,0 +1,39 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Core interface for Renesas Synchronization Management Unit (SMU) devices.
+ *
+ * Copyright (C) 2021 Integrated Device Technology, Inc., a Renesas Company.
+ */
+
+#ifndef __LINUX_MFD_RSMU_H
+#define __LINUX_MFD_RSMU_H
+
+#define RSMU_MAX_WRITE_COUNT (255)
+#define RSMU_MAX_READ_COUNT (255)
+
+/* The supported devices are ClockMatrix, Sabre and SnowLotus */
+enum rsmu_type {
+ RSMU_CM = 0x34000,
+ RSMU_SABRE = 0x33810,
+ RSMU_SL = 0x19850,
+};
+
+/**
+ *
+ * struct rsmu_ddata - device data structure for sub devices.
+ *
+ * @dev: i2c/spi device.
+ * @regmap: i2c/spi bus access.
+ * @lock: mutex used by sub devices to make sure a series of
+ * bus access requests are not interrupted.
+ * @type: RSMU device type.
+ * @page: i2c/spi bus driver internal use only.
+ */
+struct rsmu_ddata {
+ struct device *dev;
+ struct regmap *regmap;
+ struct mutex lock;
+ enum rsmu_type type;
+ u32 page;
+};
+#endif /* __LINUX_MFD_RSMU_H */
diff --git a/include/linux/mfd/rt5033-private.h b/include/linux/mfd/rt5033-private.h
index 1b63fc2f42d1..0221f806d139 100644
--- a/include/linux/mfd/rt5033-private.h
+++ b/include/linux/mfd/rt5033-private.h
@@ -1,12 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* MFD core driver for Richtek RT5033
*
* Copyright (C) 2014 Samsung Electronics, Co., Ltd.
* Author: Beomho Seo <beomho.seo@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published bythe Free Software Foundation.
*/
#ifndef __RT5033_PRIVATE_H__
@@ -58,21 +55,28 @@ enum rt5033_reg {
};
/* RT5033 Charger state register */
-#define RT5033_CHG_STAT_MASK 0x20
+#define RT5033_CHG_STAT_TYPE_MASK 0x60
+#define RT5033_CHG_STAT_TYPE_PRE 0x20
+#define RT5033_CHG_STAT_TYPE_FAST 0x60
+#define RT5033_CHG_STAT_MASK 0x30
#define RT5033_CHG_STAT_DISCHARGING 0x00
#define RT5033_CHG_STAT_FULL 0x10
#define RT5033_CHG_STAT_CHARGING 0x20
#define RT5033_CHG_STAT_NOT_CHARGING 0x30
-#define RT5033_CHG_STAT_TYPE_MASK 0x60
-#define RT5033_CHG_STAT_TYPE_PRE 0x20
-#define RT5033_CHG_STAT_TYPE_FAST 0x60
/* RT5033 CHGCTRL1 register */
#define RT5033_CHGCTRL1_IAICR_MASK 0xe0
+#define RT5033_CHGCTRL1_TE_EN_MASK 0x08
+#define RT5033_CHGCTRL1_HZ_MASK 0x02
#define RT5033_CHGCTRL1_MODE_MASK 0x01
/* RT5033 CHGCTRL2 register */
#define RT5033_CHGCTRL2_CV_MASK 0xfc
+#define RT5033_CHGCTRL2_CV_SHIFT 0x02
+
+/* RT5033 DEVICE_ID register */
+#define RT5033_VENDOR_ID_MASK 0xf0
+#define RT5033_CHIP_REV_MASK 0x0f
/* RT5033 CHGCTRL3 register */
#define RT5033_CHGCTRL3_CFO_EN_MASK 0x40
@@ -80,28 +84,28 @@ enum rt5033_reg {
#define RT5033_CHGCTRL3_TIMER_EN_MASK 0x01
/* RT5033 CHGCTRL4 register */
-#define RT5033_CHGCTRL4_EOC_MASK 0x07
+#define RT5033_CHGCTRL4_MIVR_MASK 0xe0
#define RT5033_CHGCTRL4_IPREC_MASK 0x18
+#define RT5033_CHGCTRL4_IPREC_SHIFT 0x03
+#define RT5033_CHGCTRL4_EOC_MASK 0x07
/* RT5033 CHGCTRL5 register */
-#define RT5033_CHGCTRL5_VPREC_MASK 0x0f
#define RT5033_CHGCTRL5_ICHG_MASK 0xf0
#define RT5033_CHGCTRL5_ICHG_SHIFT 0x04
-#define RT5033_CHG_MAX_CURRENT 0x0d
+#define RT5033_CHGCTRL5_VPREC_MASK 0x0f
/* RT5033 RT CTRL1 register */
#define RT5033_RT_CTRL1_UUG_MASK 0x02
-#define RT5033_RT_HZ_MASK 0x01
/* RT5033 control register */
-#define RT5033_CTRL_FCCM_BUCK_MASK 0x00
-#define RT5033_CTRL_BUCKOMS_MASK 0x01
-#define RT5033_CTRL_LDOOMS_MASK 0x02
-#define RT5033_CTRL_SLDOOMS_MASK 0x03
-#define RT5033_CTRL_EN_BUCK_MASK 0x04
-#define RT5033_CTRL_EN_LDO_MASK 0x05
-#define RT5033_CTRL_EN_SAFE_LDO_MASK 0x06
-#define RT5033_CTRL_LDO_SLEEP_MASK 0x07
+#define RT5033_CTRL_FCCM_BUCK_MASK BIT(0)
+#define RT5033_CTRL_BUCKOMS_MASK BIT(1)
+#define RT5033_CTRL_LDOOMS_MASK BIT(2)
+#define RT5033_CTRL_SLDOOMS_MASK BIT(3)
+#define RT5033_CTRL_EN_BUCK_MASK BIT(4)
+#define RT5033_CTRL_EN_LDO_MASK BIT(5)
+#define RT5033_CTRL_EN_SAFE_LDO_MASK BIT(6)
+#define RT5033_CTRL_LDO_SLEEP_MASK BIT(7)
/* RT5033 BUCK control register */
#define RT5033_BUCK_CTRL_MASK 0x1f
@@ -110,65 +114,77 @@ enum rt5033_reg {
#define RT5033_LDO_CTRL_MASK 0x1f
/* RT5033 charger property - model, manufacturer */
-
#define RT5033_CHARGER_MODEL "RT5033WSC Charger"
#define RT5033_MANUFACTURER "Richtek Technology Corporation"
/*
- * RT5033 charger fast-charge current lmits (as in CHGCTRL1 register),
- * AICR mode limits the input current for example,
- * the AIRC 100 mode limits the input current to 100 mA.
+ * While RT5033 charger can limit the fast-charge current (as in CHGCTRL1
+ * register), AICR mode limits the input current. For example, the AIRC 100
+ * mode limits the input current to 100 mA.
*/
+#define RT5033_AICR_DISABLE 0x00
#define RT5033_AICR_100_MODE 0x20
#define RT5033_AICR_500_MODE 0x40
#define RT5033_AICR_700_MODE 0x60
#define RT5033_AICR_900_MODE 0x80
+#define RT5033_AICR_1000_MODE 0xa0
#define RT5033_AICR_1500_MODE 0xc0
#define RT5033_AICR_2000_MODE 0xe0
-#define RT5033_AICR_MODE_MASK 0xe0
-/* RT5033 use internal timer need to set time */
-#define RT5033_FAST_CHARGE_TIMER4 0x00
-#define RT5033_FAST_CHARGE_TIMER6 0x01
-#define RT5033_FAST_CHARGE_TIMER8 0x02
-#define RT5033_FAST_CHARGE_TIMER9 0x03
-#define RT5033_FAST_CHARGE_TIMER12 0x04
-#define RT5033_FAST_CHARGE_TIMER14 0x05
-#define RT5033_FAST_CHARGE_TIMER16 0x06
+/* RT5033 charger minimum input voltage regulation */
+#define RT5033_CHARGER_MIVR_DISABLE 0x00
+#define RT5033_CHARGER_MIVR_4200MV 0x20
+#define RT5033_CHARGER_MIVR_4300MV 0x40
+#define RT5033_CHARGER_MIVR_4400MV 0x60
+#define RT5033_CHARGER_MIVR_4500MV 0x80
+#define RT5033_CHARGER_MIVR_4600MV 0xa0
+#define RT5033_CHARGER_MIVR_4700MV 0xc0
+#define RT5033_CHARGER_MIVR_4800MV 0xe0
+/* RT5033 use internal timer need to set time */
+#define RT5033_FAST_CHARGE_TIMER4 0x00 /* 4 hrs */
+#define RT5033_FAST_CHARGE_TIMER6 0x08 /* 6 hrs */
+#define RT5033_FAST_CHARGE_TIMER8 0x10 /* 8 hrs */
+#define RT5033_FAST_CHARGE_TIMER10 0x18 /* 10 hrs */
+#define RT5033_FAST_CHARGE_TIMER12 0x20 /* 12 hrs */
+#define RT5033_FAST_CHARGE_TIMER14 0x28 /* 14 hrs */
+#define RT5033_FAST_CHARGE_TIMER16 0x30 /* 16 hrs */
+
+#define RT5033_INT_TIMER_DISABLE 0x00
#define RT5033_INT_TIMER_ENABLE 0x01
-/* RT5033 charger termination enable mask */
-#define RT5033_TE_ENABLE_MASK 0x08
-
/*
- * RT5033 charger opa mode. RT50300 have two opa mode charger mode
- * and boost mode for OTG
+ * RT5033 charger opa mode. RT5033 has two opa modes for OTG: charger mode
+ * and boost mode.
*/
-
#define RT5033_CHARGER_MODE 0x00
#define RT5033_BOOST_MODE 0x01
/* RT5033 charger termination enable */
+#define RT5033_TE_DISABLE 0x00
#define RT5033_TE_ENABLE 0x08
/* RT5033 charger CFO enable */
+#define RT5033_CFO_DISABLE 0x00
#define RT5033_CFO_ENABLE 0x40
/* RT5033 charger constant charge voltage (as in CHGCTRL2 register), uV */
#define RT5033_CHARGER_CONST_VOLTAGE_LIMIT_MIN 3650000U
#define RT5033_CHARGER_CONST_VOLTAGE_STEP_NUM 25000U
#define RT5033_CHARGER_CONST_VOLTAGE_LIMIT_MAX 4400000U
+#define RT5033_CV_MAX_VOLTAGE 0x1e
/* RT5033 charger pre-charge current limits (as in CHGCTRL4 register), uA */
#define RT5033_CHARGER_PRE_CURRENT_LIMIT_MIN 350000U
#define RT5033_CHARGER_PRE_CURRENT_STEP_NUM 100000U
#define RT5033_CHARGER_PRE_CURRENT_LIMIT_MAX 650000U
+#define RT5033_CHG_MAX_PRE_CURRENT 0x03
/* RT5033 charger fast-charge current (as in CHGCTRL5 register), uA */
#define RT5033_CHARGER_FAST_CURRENT_MIN 700000U
#define RT5033_CHARGER_FAST_CURRENT_STEP_NUM 100000U
#define RT5033_CHARGER_FAST_CURRENT_MAX 2000000U
+#define RT5033_CHG_MAX_CURRENT 0x0d
/*
* RT5033 charger const-charge end of charger current (
@@ -184,20 +200,20 @@ enum rt5033_reg {
* RT5033 charger pre-charge threshold volt limits
* (as in CHGCTRL5 register), uV
*/
-
#define RT5033_CHARGER_PRE_THRESHOLD_LIMIT_MIN 2300000U
#define RT5033_CHARGER_PRE_THRESHOLD_STEP_NUM 100000U
#define RT5033_CHARGER_PRE_THRESHOLD_LIMIT_MAX 3800000U
/*
- * RT5033 charger enable UUG, If UUG enable MOS auto control by H/W charger
+ * RT5033 charger UUG. It enables MOS auto control by H/W charger
* circuit.
*/
+#define RT5033_CHARGER_UUG_DISABLE 0x00
#define RT5033_CHARGER_UUG_ENABLE 0x02
-/* RT5033 charger High impedance mode */
+/* RT5033 charger high impedance mode */
#define RT5033_CHARGER_HZ_DISABLE 0x00
-#define RT5033_CHARGER_HZ_ENABLE 0x01
+#define RT5033_CHARGER_HZ_ENABLE 0x02
/* RT5033 regulator BUCK output voltage uV */
#define RT5033_REGULATOR_BUCK_VOLTAGE_MIN 1000000U
@@ -250,11 +266,11 @@ enum rt5033_fuel_reg {
#define RT5033_FUEL_BAT_PRESENT 0x02
/* RT5033 PMIC interrupts */
-#define RT5033_PMIC_IRQ_BUCKOCP 2
-#define RT5033_PMIC_IRQ_BUCKLV 3
-#define RT5033_PMIC_IRQ_SAFELDOLV 4
-#define RT5033_PMIC_IRQ_LDOLV 5
-#define RT5033_PMIC_IRQ_OT 6
-#define RT5033_PMIC_IRQ_VDDA_UV 7
+#define RT5033_PMIC_IRQ_BUCKOCP BIT(2)
+#define RT5033_PMIC_IRQ_BUCKLV BIT(3)
+#define RT5033_PMIC_IRQ_SAFELDOLV BIT(4)
+#define RT5033_PMIC_IRQ_LDOLV BIT(5)
+#define RT5033_PMIC_IRQ_OT BIT(6)
+#define RT5033_PMIC_IRQ_VDDA_UV BIT(7)
#endif /* __RT5033_PRIVATE_H__ */
diff --git a/include/linux/mfd/rt5033.h b/include/linux/mfd/rt5033.h
index 6cff5cf458d2..bb3d18945d21 100644
--- a/include/linux/mfd/rt5033.h
+++ b/include/linux/mfd/rt5033.h
@@ -1,12 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* MFD core driver for the RT5033
*
* Copyright (C) 2014 Samsung Electronics
* Author: Beomho Seo <beomho.seo@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published bythe Free Software Foundation.
*/
#ifndef __RT5033_H__
@@ -15,7 +12,6 @@
#include <linux/regulator/consumer.h>
#include <linux/i2c.h>
#include <linux/regmap.h>
-#include <linux/power_supply.h>
/* RT5033 regulator IDs */
enum rt5033_regulators {
@@ -35,28 +31,4 @@ struct rt5033_dev {
bool wakeup;
};
-struct rt5033_battery {
- struct i2c_client *client;
- struct rt5033_dev *rt5033;
- struct regmap *regmap;
- struct power_supply *psy;
-};
-
-/* RT5033 charger platform data */
-struct rt5033_charger_data {
- unsigned int pre_uamp;
- unsigned int pre_uvolt;
- unsigned int const_uvolt;
- unsigned int eoc_uamp;
- unsigned int fast_uamp;
-};
-
-struct rt5033_charger {
- struct device *dev;
- struct rt5033_dev *rt5033;
- struct power_supply psy;
-
- struct rt5033_charger_data *chg;
-};
-
#endif /* __RT5033_H__ */
diff --git a/include/linux/mfd/rz-mtu3.h b/include/linux/mfd/rz-mtu3.h
new file mode 100644
index 000000000000..8421d49500bf
--- /dev/null
+++ b/include/linux/mfd/rz-mtu3.h
@@ -0,0 +1,191 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2022 Renesas Electronics Corporation
+ */
+#ifndef __MFD_RZ_MTU3_H__
+#define __MFD_RZ_MTU3_H__
+
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/mutex.h>
+
+/* 8-bit shared register offsets macros */
+#define RZ_MTU3_TSTRA 0x080 /* Timer start register A */
+#define RZ_MTU3_TSTRB 0x880 /* Timer start register B */
+
+/* 16-bit shared register offset macros */
+#define RZ_MTU3_TDDRA 0x016 /* Timer dead time data register A */
+#define RZ_MTU3_TDDRB 0x816 /* Timer dead time data register B */
+#define RZ_MTU3_TCDRA 0x014 /* Timer cycle data register A */
+#define RZ_MTU3_TCDRB 0x814 /* Timer cycle data register B */
+#define RZ_MTU3_TCBRA 0x022 /* Timer cycle buffer register A */
+#define RZ_MTU3_TCBRB 0x822 /* Timer cycle buffer register B */
+#define RZ_MTU3_TCNTSA 0x020 /* Timer subcounter A */
+#define RZ_MTU3_TCNTSB 0x820 /* Timer subcounter B */
+
+/*
+ * MTU5 contains 3 timer counter registers and is totaly different
+ * from other channels, so we must separate its offset
+ */
+
+/* 8-bit register offset macros of MTU3 channels except MTU5 */
+#define RZ_MTU3_TIER 0 /* Timer interrupt register */
+#define RZ_MTU3_NFCR 1 /* Noise filter control register */
+#define RZ_MTU3_TSR 2 /* Timer status register */
+#define RZ_MTU3_TCR 3 /* Timer control register */
+#define RZ_MTU3_TCR2 4 /* Timer control register 2 */
+
+/* Timer mode register 1 */
+#define RZ_MTU3_TMDR1 5
+#define RZ_MTU3_TMDR1_MD GENMASK(3, 0)
+#define RZ_MTU3_TMDR1_MD_NORMAL FIELD_PREP(RZ_MTU3_TMDR1_MD, 0)
+#define RZ_MTU3_TMDR1_MD_PWMMODE1 FIELD_PREP(RZ_MTU3_TMDR1_MD, 2)
+
+#define RZ_MTU3_TIOR 6 /* Timer I/O control register */
+#define RZ_MTU3_TIORH 6 /* Timer I/O control register H */
+#define RZ_MTU3_TIORL 7 /* Timer I/O control register L */
+/* Only MTU3/4/6/7 have TBTM registers */
+#define RZ_MTU3_TBTM 8 /* Timer buffer operation transfer mode register */
+
+/* 8-bit MTU5 register offset macros */
+#define RZ_MTU3_TSTR 2 /* MTU5 Timer start register */
+#define RZ_MTU3_TCNTCMPCLR 3 /* MTU5 Timer compare match clear register */
+#define RZ_MTU3_TCRU 4 /* Timer control register U */
+#define RZ_MTU3_TCR2U 5 /* Timer control register 2U */
+#define RZ_MTU3_TIORU 6 /* Timer I/O control register U */
+#define RZ_MTU3_TCRV 7 /* Timer control register V */
+#define RZ_MTU3_TCR2V 8 /* Timer control register 2V */
+#define RZ_MTU3_TIORV 9 /* Timer I/O control register V */
+#define RZ_MTU3_TCRW 10 /* Timer control register W */
+#define RZ_MTU3_TCR2W 11 /* Timer control register 2W */
+#define RZ_MTU3_TIORW 12 /* Timer I/O control register W */
+
+/* 16-bit register offset macros of MTU3 channels except MTU5 */
+#define RZ_MTU3_TCNT 0 /* Timer counter */
+#define RZ_MTU3_TGRA 1 /* Timer general register A */
+#define RZ_MTU3_TGRB 2 /* Timer general register B */
+#define RZ_MTU3_TGRC 3 /* Timer general register C */
+#define RZ_MTU3_TGRD 4 /* Timer general register D */
+#define RZ_MTU3_TGRE 5 /* Timer general register E */
+#define RZ_MTU3_TGRF 6 /* Timer general register F */
+/* Timer A/D converter start request registers */
+#define RZ_MTU3_TADCR 7 /* control register */
+#define RZ_MTU3_TADCORA 8 /* cycle set register A */
+#define RZ_MTU3_TADCORB 9 /* cycle set register B */
+#define RZ_MTU3_TADCOBRA 10 /* cycle set buffer register A */
+#define RZ_MTU3_TADCOBRB 11 /* cycle set buffer register B */
+
+/* 16-bit MTU5 register offset macros */
+#define RZ_MTU3_TCNTU 0 /* MTU5 Timer counter U */
+#define RZ_MTU3_TGRU 1 /* MTU5 Timer general register U */
+#define RZ_MTU3_TCNTV 2 /* MTU5 Timer counter V */
+#define RZ_MTU3_TGRV 3 /* MTU5 Timer general register V */
+#define RZ_MTU3_TCNTW 4 /* MTU5 Timer counter W */
+#define RZ_MTU3_TGRW 5 /* MTU5 Timer general register W */
+
+/* 32-bit register offset */
+#define RZ_MTU3_TCNTLW 0 /* Timer longword counter */
+#define RZ_MTU3_TGRALW 1 /* Timer longword general register A */
+#define RZ_MTU3_TGRBLW 2 /* Timer longowrd general register B */
+
+#define RZ_MTU3_TMDR3 0x191 /* MTU1 Timer Mode Register 3 */
+
+/* Macros for setting registers */
+#define RZ_MTU3_TCR_CCLR GENMASK(7, 5)
+#define RZ_MTU3_TCR_CKEG GENMASK(4, 3)
+#define RZ_MTU3_TCR_TPCS GENMASK(2, 0)
+#define RZ_MTU3_TCR_CCLR_TGRA BIT(5)
+#define RZ_MTU3_TCR_CCLR_TGRC FIELD_PREP(RZ_MTU3_TCR_CCLR, 5)
+#define RZ_MTU3_TCR_CKEG_RISING FIELD_PREP(RZ_MTU3_TCR_CKEG, 0)
+
+#define RZ_MTU3_TIOR_IOB GENMASK(7, 4)
+#define RZ_MTU3_TIOR_IOA GENMASK(3, 0)
+#define RZ_MTU3_TIOR_OC_RETAIN 0
+#define RZ_MTU3_TIOR_OC_INIT_OUT_LO_HI_OUT 2
+#define RZ_MTU3_TIOR_OC_INIT_OUT_HI_TOGGLE_OUT 7
+
+#define RZ_MTU3_TIOR_OC_IOA_H_COMP_MATCH \
+ FIELD_PREP(RZ_MTU3_TIOR_IOA, RZ_MTU3_TIOR_OC_INIT_OUT_LO_HI_OUT)
+#define RZ_MTU3_TIOR_OC_IOB_TOGGLE \
+ FIELD_PREP(RZ_MTU3_TIOR_IOB, RZ_MTU3_TIOR_OC_INIT_OUT_HI_TOGGLE_OUT)
+
+enum rz_mtu3_channels {
+ RZ_MTU3_CHAN_0,
+ RZ_MTU3_CHAN_1,
+ RZ_MTU3_CHAN_2,
+ RZ_MTU3_CHAN_3,
+ RZ_MTU3_CHAN_4,
+ RZ_MTU3_CHAN_5,
+ RZ_MTU3_CHAN_6,
+ RZ_MTU3_CHAN_7,
+ RZ_MTU3_CHAN_8,
+ RZ_MTU_NUM_CHANNELS
+};
+
+/**
+ * struct rz_mtu3_channel - MTU3 channel private data
+ *
+ * @dev: device handle
+ * @channel_number: channel number
+ * @lock: Lock to protect channel state
+ * @is_busy: channel state
+ */
+struct rz_mtu3_channel {
+ struct device *dev;
+ unsigned int channel_number;
+ struct mutex lock;
+ bool is_busy;
+};
+
+/**
+ * struct rz_mtu3 - MTU3 core private data
+ *
+ * @clk: MTU3 module clock
+ * @rz_mtu3_channel: HW channels
+ * @priv_data: MTU3 core driver private data
+ */
+struct rz_mtu3 {
+ struct clk *clk;
+ struct rz_mtu3_channel channels[RZ_MTU_NUM_CHANNELS];
+
+ void *priv_data;
+};
+
+static inline bool rz_mtu3_request_channel(struct rz_mtu3_channel *ch)
+{
+ mutex_lock(&ch->lock);
+ if (ch->is_busy) {
+ mutex_unlock(&ch->lock);
+ return false;
+ }
+
+ ch->is_busy = true;
+ mutex_unlock(&ch->lock);
+
+ return true;
+}
+
+static inline void rz_mtu3_release_channel(struct rz_mtu3_channel *ch)
+{
+ mutex_lock(&ch->lock);
+ ch->is_busy = false;
+ mutex_unlock(&ch->lock);
+}
+
+bool rz_mtu3_is_enabled(struct rz_mtu3_channel *ch);
+void rz_mtu3_disable(struct rz_mtu3_channel *ch);
+int rz_mtu3_enable(struct rz_mtu3_channel *ch);
+
+u8 rz_mtu3_8bit_ch_read(struct rz_mtu3_channel *ch, u16 off);
+u16 rz_mtu3_16bit_ch_read(struct rz_mtu3_channel *ch, u16 off);
+u32 rz_mtu3_32bit_ch_read(struct rz_mtu3_channel *ch, u16 off);
+u16 rz_mtu3_shared_reg_read(struct rz_mtu3_channel *ch, u16 off);
+
+void rz_mtu3_8bit_ch_write(struct rz_mtu3_channel *ch, u16 off, u8 val);
+void rz_mtu3_16bit_ch_write(struct rz_mtu3_channel *ch, u16 off, u16 val);
+void rz_mtu3_32bit_ch_write(struct rz_mtu3_channel *ch, u16 off, u32 val);
+void rz_mtu3_shared_reg_write(struct rz_mtu3_channel *ch, u16 off, u16 val);
+void rz_mtu3_shared_reg_update_bit(struct rz_mtu3_channel *ch, u16 off,
+ u16 pos, u8 val);
+
+#endif /* __MFD_RZ_MTU3_H__ */
diff --git a/include/linux/mfd/samsung/core.h b/include/linux/mfd/samsung/core.h
index 5a23dd4df432..d785e101fe79 100644
--- a/include/linux/mfd/samsung/core.h
+++ b/include/linux/mfd/samsung/core.h
@@ -1,14 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
- * core.h
- *
- * copyright (c) 2011 Samsung Electronics Co., Ltd
+ * Copyright (c) 2011 Samsung Electronics Co., Ltd
* http://www.samsung.com
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
*/
#ifndef __LINUX_MFD_SEC_CORE_H
@@ -27,6 +20,7 @@
#define MIN_850_MV 850000
#define MIN_800_MV 800000
#define MIN_750_MV 750000
+#define MIN_650_MV 650000
#define MIN_600_MV 600000
#define MIN_500_MV 500000
@@ -39,16 +33,19 @@
#define STEP_12_5_MV 12500
#define STEP_6_25_MV 6250
+struct gpio_desc;
+
enum sec_device_type {
- S5M8751X,
- S5M8763X,
S5M8767X,
+ S2DOS05,
S2MPA01,
+ S2MPG10,
S2MPS11X,
S2MPS13X,
S2MPS14X,
S2MPS15X,
S2MPU02,
+ S2MPU05,
};
/**
@@ -70,30 +67,16 @@ struct sec_pmic_dev {
struct regmap *regmap_pmic;
struct i2c_client *i2c;
- unsigned long device_type;
- int irq_base;
+ int device_type;
int irq;
struct regmap_irq_chip_data *irq_data;
-
- bool wakeup;
};
-int sec_irq_init(struct sec_pmic_dev *sec_pmic);
-void sec_irq_exit(struct sec_pmic_dev *sec_pmic);
-int sec_irq_resume(struct sec_pmic_dev *sec_pmic);
-
struct sec_platform_data {
struct sec_regulator_data *regulators;
struct sec_opmode_data *opmode;
- int device_type;
int num_regulators;
- int irq_base;
- int (*cfg_pmic_irq)(void);
-
- bool wakeup;
- bool buck_voltage_lock;
-
int buck_gpios[3];
int buck_ds[3];
unsigned int buck2_voltage[8];
@@ -103,35 +86,12 @@ struct sec_platform_data {
unsigned int buck4_voltage[8];
bool buck4_gpiodvs;
- int buck_set1;
- int buck_set2;
- int buck_set3;
- int buck2_enable;
- int buck3_enable;
- int buck4_enable;
int buck_default_idx;
- int buck2_default_idx;
- int buck3_default_idx;
- int buck4_default_idx;
-
int buck_ramp_delay;
- int buck2_ramp_delay;
- int buck34_ramp_delay;
- int buck5_ramp_delay;
- int buck16_ramp_delay;
- int buck7810_ramp_delay;
- int buck9_ramp_delay;
- int buck24_ramp_delay;
- int buck3_ramp_delay;
- int buck7_ramp_delay;
- int buck8910_ramp_delay;
-
- bool buck1_ramp_enable;
bool buck2_ramp_enable;
bool buck3_ramp_enable;
bool buck4_ramp_enable;
- bool buck6_ramp_enable;
int buck2_init;
int buck3_init;
@@ -151,7 +111,7 @@ struct sec_regulator_data {
int id;
struct regulator_init_data *initdata;
struct device_node *reg_node;
- int ext_control_gpio;
+ struct gpio_desc *ext_control_gpiod;
};
/*
diff --git a/include/linux/mfd/samsung/irq.h b/include/linux/mfd/samsung/irq.h
index 667aa40486dd..8402a5f8e18a 100644
--- a/include/linux/mfd/samsung/irq.h
+++ b/include/linux/mfd/samsung/irq.h
@@ -1,13 +1,7 @@
-/* irq.h
- *
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
* Copyright (c) 2012 Samsung Electronics Co., Ltd
* http://www.samsung.com
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
*/
#ifndef __LINUX_MFD_SEC_IRQ_H
@@ -63,6 +57,115 @@ enum s2mpa01_irq {
#define S2MPA01_IRQ_B24_TSD_MASK (1 << 4)
#define S2MPA01_IRQ_B35_TSD_MASK (1 << 5)
+enum s2mpg10_common_irq {
+ /* Top-level (common) block */
+ S2MPG10_COMMON_IRQ_PMIC,
+ S2MPG10_COMMON_IRQ_UNUSED,
+};
+
+enum s2mpg10_irq {
+ /* PMIC */
+ S2MPG10_IRQ_PWRONF,
+ S2MPG10_IRQ_PWRONR,
+ S2MPG10_IRQ_JIGONBF,
+ S2MPG10_IRQ_JIGONBR,
+ S2MPG10_IRQ_ACOKBF,
+ S2MPG10_IRQ_ACOKBR,
+ S2MPG10_IRQ_PWRON1S,
+ S2MPG10_IRQ_MRB,
+#define S2MPG10_IRQ_PWRONF_MASK BIT(0)
+#define S2MPG10_IRQ_PWRONR_MASK BIT(1)
+#define S2MPG10_IRQ_JIGONBF_MASK BIT(2)
+#define S2MPG10_IRQ_JIGONBR_MASK BIT(3)
+#define S2MPG10_IRQ_ACOKBF_MASK BIT(4)
+#define S2MPG10_IRQ_ACOKBR_MASK BIT(5)
+#define S2MPG10_IRQ_PWRON1S_MASK BIT(6)
+#define S2MPG10_IRQ_MRB_MASK BIT(7)
+
+ S2MPG10_IRQ_RTC60S,
+ S2MPG10_IRQ_RTCA1,
+ S2MPG10_IRQ_RTCA0,
+ S2MPG10_IRQ_RTC1S,
+ S2MPG10_IRQ_WTSR_COLDRST,
+ S2MPG10_IRQ_WTSR,
+ S2MPG10_IRQ_WRST,
+ S2MPG10_IRQ_SMPL,
+#define S2MPG10_IRQ_RTC60S_MASK BIT(0)
+#define S2MPG10_IRQ_RTCA1_MASK BIT(1)
+#define S2MPG10_IRQ_RTCA0_MASK BIT(2)
+#define S2MPG10_IRQ_RTC1S_MASK BIT(3)
+#define S2MPG10_IRQ_WTSR_COLDRST_MASK BIT(4)
+#define S2MPG10_IRQ_WTSR_MASK BIT(5)
+#define S2MPG10_IRQ_WRST_MASK BIT(6)
+#define S2MPG10_IRQ_SMPL_MASK BIT(7)
+
+ S2MPG10_IRQ_120C,
+ S2MPG10_IRQ_140C,
+ S2MPG10_IRQ_TSD,
+ S2MPG10_IRQ_PIF_TIMEOUT1,
+ S2MPG10_IRQ_PIF_TIMEOUT2,
+ S2MPG10_IRQ_SPD_PARITY_ERR,
+ S2MPG10_IRQ_SPD_ABNORMAL_STOP,
+ S2MPG10_IRQ_PMETER_OVERF,
+#define S2MPG10_IRQ_INT120C_MASK BIT(0)
+#define S2MPG10_IRQ_INT140C_MASK BIT(1)
+#define S2MPG10_IRQ_TSD_MASK BIT(2)
+#define S2MPG10_IRQ_PIF_TIMEOUT1_MASK BIT(3)
+#define S2MPG10_IRQ_PIF_TIMEOUT2_MASK BIT(4)
+#define S2MPG10_IRQ_SPD_PARITY_ERR_MASK BIT(5)
+#define S2MPG10_IRQ_SPD_ABNORMAL_STOP_MASK BIT(6)
+#define S2MPG10_IRQ_PMETER_OVERF_MASK BIT(7)
+
+ S2MPG10_IRQ_OCP_B1M,
+ S2MPG10_IRQ_OCP_B2M,
+ S2MPG10_IRQ_OCP_B3M,
+ S2MPG10_IRQ_OCP_B4M,
+ S2MPG10_IRQ_OCP_B5M,
+ S2MPG10_IRQ_OCP_B6M,
+ S2MPG10_IRQ_OCP_B7M,
+ S2MPG10_IRQ_OCP_B8M,
+#define S2MPG10_IRQ_OCP_B1M_MASK BIT(0)
+#define S2MPG10_IRQ_OCP_B2M_MASK BIT(1)
+#define S2MPG10_IRQ_OCP_B3M_MASK BIT(2)
+#define S2MPG10_IRQ_OCP_B4M_MASK BIT(3)
+#define S2MPG10_IRQ_OCP_B5M_MASK BIT(4)
+#define S2MPG10_IRQ_OCP_B6M_MASK BIT(5)
+#define S2MPG10_IRQ_OCP_B7M_MASK BIT(6)
+#define S2MPG10_IRQ_OCP_B8M_MASK BIT(7)
+
+ S2MPG10_IRQ_OCP_B9M,
+ S2MPG10_IRQ_OCP_B10M,
+ S2MPG10_IRQ_WLWP_ACC,
+ S2MPG10_IRQ_SMPL_TIMEOUT,
+ S2MPG10_IRQ_WTSR_TIMEOUT,
+ S2MPG10_IRQ_SPD_SRP_PKT_RST,
+#define S2MPG10_IRQ_OCP_B9M_MASK BIT(0)
+#define S2MPG10_IRQ_OCP_B10M_MASK BIT(1)
+#define S2MPG10_IRQ_WLWP_ACC_MASK BIT(2)
+#define S2MPG10_IRQ_SMPL_TIMEOUT_MASK BIT(5)
+#define S2MPG10_IRQ_WTSR_TIMEOUT_MASK BIT(6)
+#define S2MPG10_IRQ_SPD_SRP_PKT_RST_MASK BIT(7)
+
+ S2MPG10_IRQ_PWR_WARN_CH0,
+ S2MPG10_IRQ_PWR_WARN_CH1,
+ S2MPG10_IRQ_PWR_WARN_CH2,
+ S2MPG10_IRQ_PWR_WARN_CH3,
+ S2MPG10_IRQ_PWR_WARN_CH4,
+ S2MPG10_IRQ_PWR_WARN_CH5,
+ S2MPG10_IRQ_PWR_WARN_CH6,
+ S2MPG10_IRQ_PWR_WARN_CH7,
+#define S2MPG10_IRQ_PWR_WARN_CH0_MASK BIT(0)
+#define S2MPG10_IRQ_PWR_WARN_CH1_MASK BIT(1)
+#define S2MPG10_IRQ_PWR_WARN_CH2_MASK BIT(2)
+#define S2MPG10_IRQ_PWR_WARN_CH3_MASK BIT(3)
+#define S2MPG10_IRQ_PWR_WARN_CH4_MASK BIT(4)
+#define S2MPG10_IRQ_PWR_WARN_CH5_MASK BIT(5)
+#define S2MPG10_IRQ_PWR_WARN_CH6_MASK BIT(6)
+#define S2MPG10_IRQ_PWR_WARN_CH7_MASK BIT(7)
+
+ S2MPG10_IRQ_NR,
+};
+
enum s2mps11_irq {
S2MPS11_IRQ_PWRONF,
S2MPS11_IRQ_PWRONR,
@@ -156,6 +259,50 @@ enum s2mpu02_irq {
/* Masks for interrupts are the same as in s2mps11 */
#define S2MPS14_IRQ_TSD_MASK (1 << 2)
+enum s2mpu05_irq {
+ S2MPU05_IRQ_PWRONF,
+ S2MPU05_IRQ_PWRONR,
+ S2MPU05_IRQ_JIGONBF,
+ S2MPU05_IRQ_JIGONBR,
+ S2MPU05_IRQ_ACOKF,
+ S2MPU05_IRQ_ACOKR,
+ S2MPU05_IRQ_PWRON1S,
+ S2MPU05_IRQ_MRB,
+
+ S2MPU05_IRQ_RTC60S,
+ S2MPU05_IRQ_RTCA1,
+ S2MPU05_IRQ_RTCA0,
+ S2MPU05_IRQ_SMPL,
+ S2MPU05_IRQ_RTC1S,
+ S2MPU05_IRQ_WTSR,
+
+ S2MPU05_IRQ_INT120C,
+ S2MPU05_IRQ_INT140C,
+ S2MPU05_IRQ_TSD,
+
+ S2MPU05_IRQ_NR,
+};
+
+#define S2MPU05_IRQ_PWRONF_MASK BIT(0)
+#define S2MPU05_IRQ_PWRONR_MASK BIT(1)
+#define S2MPU05_IRQ_JIGONBF_MASK BIT(2)
+#define S2MPU05_IRQ_JIGONBR_MASK BIT(3)
+#define S2MPU05_IRQ_ACOKF_MASK BIT(4)
+#define S2MPU05_IRQ_ACOKR_MASK BIT(5)
+#define S2MPU05_IRQ_PWRON1S_MASK BIT(6)
+#define S2MPU05_IRQ_MRB_MASK BIT(7)
+
+#define S2MPU05_IRQ_RTC60S_MASK BIT(0)
+#define S2MPU05_IRQ_RTCA1_MASK BIT(1)
+#define S2MPU05_IRQ_RTCA0_MASK BIT(2)
+#define S2MPU05_IRQ_SMPL_MASK BIT(3)
+#define S2MPU05_IRQ_RTC1S_MASK BIT(4)
+#define S2MPU05_IRQ_WTSR_MASK BIT(5)
+
+#define S2MPU05_IRQ_INT120C_MASK BIT(0)
+#define S2MPU05_IRQ_INT140C_MASK BIT(1)
+#define S2MPU05_IRQ_TSD_MASK BIT(2)
+
enum s5m8767_irq {
S5M8767_IRQ_PWRR,
S5M8767_IRQ_PWRF,
@@ -200,54 +347,4 @@ enum s5m8767_irq {
#define S5M8767_IRQ_RTC1S_MASK (1 << 4)
#define S5M8767_IRQ_WTSR_MASK (1 << 5)
-enum s5m8763_irq {
- S5M8763_IRQ_DCINF,
- S5M8763_IRQ_DCINR,
- S5M8763_IRQ_JIGF,
- S5M8763_IRQ_JIGR,
- S5M8763_IRQ_PWRONF,
- S5M8763_IRQ_PWRONR,
-
- S5M8763_IRQ_WTSREVNT,
- S5M8763_IRQ_SMPLEVNT,
- S5M8763_IRQ_ALARM1,
- S5M8763_IRQ_ALARM0,
-
- S5M8763_IRQ_ONKEY1S,
- S5M8763_IRQ_TOPOFFR,
- S5M8763_IRQ_DCINOVPR,
- S5M8763_IRQ_CHGRSTF,
- S5M8763_IRQ_DONER,
- S5M8763_IRQ_CHGFAULT,
-
- S5M8763_IRQ_LOBAT1,
- S5M8763_IRQ_LOBAT2,
-
- S5M8763_IRQ_NR,
-};
-
-#define S5M8763_IRQ_DCINF_MASK (1 << 2)
-#define S5M8763_IRQ_DCINR_MASK (1 << 3)
-#define S5M8763_IRQ_JIGF_MASK (1 << 4)
-#define S5M8763_IRQ_JIGR_MASK (1 << 5)
-#define S5M8763_IRQ_PWRONF_MASK (1 << 6)
-#define S5M8763_IRQ_PWRONR_MASK (1 << 7)
-
-#define S5M8763_IRQ_WTSREVNT_MASK (1 << 0)
-#define S5M8763_IRQ_SMPLEVNT_MASK (1 << 1)
-#define S5M8763_IRQ_ALARM1_MASK (1 << 2)
-#define S5M8763_IRQ_ALARM0_MASK (1 << 3)
-
-#define S5M8763_IRQ_ONKEY1S_MASK (1 << 0)
-#define S5M8763_IRQ_TOPOFFR_MASK (1 << 2)
-#define S5M8763_IRQ_DCINOVPR_MASK (1 << 3)
-#define S5M8763_IRQ_CHGRSTF_MASK (1 << 4)
-#define S5M8763_IRQ_DONER_MASK (1 << 5)
-#define S5M8763_IRQ_CHGFAULT_MASK (1 << 7)
-
-#define S5M8763_IRQ_LOBAT1_MASK (1 << 0)
-#define S5M8763_IRQ_LOBAT2_MASK (1 << 1)
-
-#define S5M8763_ENRAMP (1 << 4)
-
#endif /* __LINUX_MFD_SEC_IRQ_H */
diff --git a/include/linux/mfd/samsung/rtc.h b/include/linux/mfd/samsung/rtc.h
index 48c3c5be7eb1..51c4239a1fa6 100644
--- a/include/linux/mfd/samsung/rtc.h
+++ b/include/linux/mfd/samsung/rtc.h
@@ -1,18 +1,7 @@
-/* rtc.h
- *
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
* Copyright (c) 2011-2014 Samsung Electronics Co., Ltd
* http://www.samsung.com
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
*/
#ifndef __LINUX_MFD_SEC_RTC_H
@@ -83,6 +72,37 @@ enum s2mps_rtc_reg {
S2MPS_RTC_REG_MAX,
};
+enum s2mpg10_rtc_reg {
+ S2MPG10_RTC_CTRL,
+ S2MPG10_RTC_UPDATE,
+ S2MPG10_RTC_SMPL,
+ S2MPG10_RTC_WTSR,
+ S2MPG10_RTC_CAP_SEL,
+ S2MPG10_RTC_MSEC,
+ S2MPG10_RTC_SEC,
+ S2MPG10_RTC_MIN,
+ S2MPG10_RTC_HOUR,
+ S2MPG10_RTC_WEEK,
+ S2MPG10_RTC_DAY,
+ S2MPG10_RTC_MON,
+ S2MPG10_RTC_YEAR,
+ S2MPG10_RTC_A0SEC,
+ S2MPG10_RTC_A0MIN,
+ S2MPG10_RTC_A0HOUR,
+ S2MPG10_RTC_A0WEEK,
+ S2MPG10_RTC_A0DAY,
+ S2MPG10_RTC_A0MON,
+ S2MPG10_RTC_A0YEAR,
+ S2MPG10_RTC_A1SEC,
+ S2MPG10_RTC_A1MIN,
+ S2MPG10_RTC_A1HOUR,
+ S2MPG10_RTC_A1WEEK,
+ S2MPG10_RTC_A1DAY,
+ S2MPG10_RTC_A1MON,
+ S2MPG10_RTC_A1YEAR,
+ S2MPG10_RTC_OSC_CTRL,
+};
+
#define RTC_I2C_ADDR (0x0C >> 1)
#define HOUR_12 (1 << 7)
@@ -135,21 +155,16 @@ enum s2mps_rtc_reg {
#define ALARM_ENABLE_SHIFT 7
#define ALARM_ENABLE_MASK (1 << ALARM_ENABLE_SHIFT)
+/* WTSR & SMPL registers */
#define SMPL_ENABLE_SHIFT 7
#define SMPL_ENABLE_MASK (1 << SMPL_ENABLE_SHIFT)
#define WTSR_ENABLE_SHIFT 6
#define WTSR_ENABLE_MASK (1 << WTSR_ENABLE_SHIFT)
-enum {
- RTC_SEC = 0,
- RTC_MIN,
- RTC_HOUR,
- RTC_WEEKDAY,
- RTC_DATE,
- RTC_MONTH,
- RTC_YEAR1,
- RTC_YEAR2,
-};
+#define S2MPG10_WTSR_COLDTIMER GENMASK(6, 5)
+#define S2MPG10_WTSR_COLDRST BIT(4)
+#define S2MPG10_WTSR_WTSRT GENMASK(3, 1)
+#define S2MPG10_WTSR_WTSR_EN BIT(0)
#endif /* __LINUX_MFD_SEC_RTC_H */
diff --git a/include/linux/mfd/samsung/s2mpa01.h b/include/linux/mfd/samsung/s2mpa01.h
index 2766108bca2f..0762e9de6f2f 100644
--- a/include/linux/mfd/samsung/s2mpa01.h
+++ b/include/linux/mfd/samsung/s2mpa01.h
@@ -1,12 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* Copyright (c) 2013 Samsung Electronics Co., Ltd
* http://www.samsung.com
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
*/
#ifndef __LINUX_MFD_S2MPA01_H
diff --git a/include/linux/mfd/samsung/s2mpg10.h b/include/linux/mfd/samsung/s2mpg10.h
new file mode 100644
index 000000000000..9f5919b89a3c
--- /dev/null
+++ b/include/linux/mfd/samsung/s2mpg10.h
@@ -0,0 +1,454 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright 2015 Samsung Electronics
+ * Copyright 2020 Google Inc
+ * Copyright 2025 Linaro Ltd.
+ */
+
+#ifndef __LINUX_MFD_S2MPG10_H
+#define __LINUX_MFD_S2MPG10_H
+
+/* Common registers (type 0x000) */
+enum s2mpg10_common_reg {
+ S2MPG10_COMMON_CHIPID,
+ S2MPG10_COMMON_INT,
+ S2MPG10_COMMON_INT_MASK,
+ S2MPG10_COMMON_SPD_CTRL1 = 0x0a,
+ S2MPG10_COMMON_SPD_CTRL2,
+ S2MPG10_COMMON_SPD_CTRL3,
+ S2MPG10_COMMON_MON1SEL = 0x1a,
+ S2MPG10_COMMON_MON2SEL,
+ S2MPG10_COMMON_MONR,
+ S2MPG10_COMMON_DEBUG_CTRL1,
+ S2MPG10_COMMON_DEBUG_CTRL2,
+ S2MPG10_COMMON_DEBUG_CTRL3,
+ S2MPG10_COMMON_DEBUG_CTRL4,
+ S2MPG10_COMMON_DEBUG_CTRL5,
+ S2MPG10_COMMON_DEBUG_CTRL6,
+ S2MPG10_COMMON_DEBUG_CTRL7,
+ S2MPG10_COMMON_DEBUG_CTRL8,
+ S2MPG10_COMMON_TEST_MODE1,
+ S2MPG10_COMMON_TEST_MODE2,
+ S2MPG10_COMMON_SPD_DEBUG1,
+ S2MPG10_COMMON_SPD_DEBUG2,
+ S2MPG10_COMMON_SPD_DEBUG3,
+ S2MPG10_COMMON_SPD_DEBUG4,
+};
+
+/* For S2MPG10_COMMON_INT and S2MPG10_COMMON_INT_MASK */
+#define S2MPG10_COMMON_INT_SRC GENMASK(7, 0)
+#define S2MPG10_COMMON_INT_SRC_PMIC BIT(0)
+
+/* PMIC registers (type 0x100) */
+enum s2mpg10_pmic_reg {
+ S2MPG10_PMIC_INT1,
+ S2MPG10_PMIC_INT2,
+ S2MPG10_PMIC_INT3,
+ S2MPG10_PMIC_INT4,
+ S2MPG10_PMIC_INT5,
+ S2MPG10_PMIC_INT6,
+ S2MPG10_PMIC_INT1M,
+ S2MPG10_PMIC_INT2M,
+ S2MPG10_PMIC_INT3M,
+ S2MPG10_PMIC_INT4M,
+ S2MPG10_PMIC_INT5M,
+ S2MPG10_PMIC_INT6M,
+ S2MPG10_PMIC_STATUS1,
+ S2MPG10_PMIC_STATUS2,
+ S2MPG10_PMIC_PWRONSRC,
+ S2MPG10_PMIC_OFFSRC,
+ S2MPG10_PMIC_BU_CHG,
+ S2MPG10_PMIC_RTCBUF,
+ S2MPG10_PMIC_COMMON_CTRL1,
+ S2MPG10_PMIC_COMMON_CTRL2,
+ S2MPG10_PMIC_COMMON_CTRL3,
+ S2MPG10_PMIC_COMMON_CTRL4,
+ S2MPG10_PMIC_SMPL_WARN_CTRL,
+ S2MPG10_PMIC_MIMICKING_CTRL,
+ S2MPG10_PMIC_B1M_CTRL,
+ S2MPG10_PMIC_B1M_OUT1,
+ S2MPG10_PMIC_B1M_OUT2,
+ S2MPG10_PMIC_B2M_CTRL,
+ S2MPG10_PMIC_B2M_OUT1,
+ S2MPG10_PMIC_B2M_OUT2,
+ S2MPG10_PMIC_B3M_CTRL,
+ S2MPG10_PMIC_B3M_OUT1,
+ S2MPG10_PMIC_B3M_OUT2,
+ S2MPG10_PMIC_B4M_CTRL,
+ S2MPG10_PMIC_B4M_OUT1,
+ S2MPG10_PMIC_B4M_OUT2,
+ S2MPG10_PMIC_B5M_CTRL,
+ S2MPG10_PMIC_B5M_OUT1,
+ S2MPG10_PMIC_B5M_OUT2,
+ S2MPG10_PMIC_B6M_CTRL,
+ S2MPG10_PMIC_B6M_OUT1,
+ S2MPG10_PMIC_B6M_OUT2,
+ S2MPG10_PMIC_B7M_CTRL,
+ S2MPG10_PMIC_B7M_OUT1,
+ S2MPG10_PMIC_B7M_OUT2,
+ S2MPG10_PMIC_B8M_CTRL,
+ S2MPG10_PMIC_B8M_OUT1,
+ S2MPG10_PMIC_B8M_OUT2,
+ S2MPG10_PMIC_B9M_CTRL,
+ S2MPG10_PMIC_B9M_OUT1,
+ S2MPG10_PMIC_B9M_OUT2,
+ S2MPG10_PMIC_B10M_CTRL,
+ S2MPG10_PMIC_B10M_OUT1,
+ S2MPG10_PMIC_B10M_OUT2,
+ S2MPG10_PMIC_BUCK1M_USONIC,
+ S2MPG10_PMIC_BUCK2M_USONIC,
+ S2MPG10_PMIC_BUCK3M_USONIC,
+ S2MPG10_PMIC_BUCK4M_USONIC,
+ S2MPG10_PMIC_BUCK5M_USONIC,
+ S2MPG10_PMIC_BUCK6M_USONIC,
+ S2MPG10_PMIC_BUCK7M_USONIC,
+ S2MPG10_PMIC_BUCK8M_USONIC,
+ S2MPG10_PMIC_BUCK9M_USONIC,
+ S2MPG10_PMIC_BUCK10M_USONIC,
+ S2MPG10_PMIC_L1M_CTRL,
+ S2MPG10_PMIC_L2M_CTRL,
+ S2MPG10_PMIC_L3M_CTRL,
+ S2MPG10_PMIC_L4M_CTRL,
+ S2MPG10_PMIC_L5M_CTRL,
+ S2MPG10_PMIC_L6M_CTRL,
+ S2MPG10_PMIC_L7M_CTRL,
+ S2MPG10_PMIC_L8M_CTRL,
+ S2MPG10_PMIC_L9M_CTRL,
+ S2MPG10_PMIC_L10M_CTRL,
+ S2MPG10_PMIC_L11M_CTRL1,
+ S2MPG10_PMIC_L11M_CTRL2,
+ S2MPG10_PMIC_L12M_CTRL1,
+ S2MPG10_PMIC_L12M_CTRL2,
+ S2MPG10_PMIC_L13M_CTRL1,
+ S2MPG10_PMIC_L13M_CTRL2,
+ S2MPG10_PMIC_L14M_CTRL,
+ S2MPG10_PMIC_L15M_CTRL1,
+ S2MPG10_PMIC_L15M_CTRL2,
+ S2MPG10_PMIC_L16M_CTRL,
+ S2MPG10_PMIC_L17M_CTRL,
+ S2MPG10_PMIC_L18M_CTRL,
+ S2MPG10_PMIC_L19M_CTRL,
+ S2MPG10_PMIC_L20M_CTRL,
+ S2MPG10_PMIC_L21M_CTRL,
+ S2MPG10_PMIC_L22M_CTRL,
+ S2MPG10_PMIC_L23M_CTRL,
+ S2MPG10_PMIC_L24M_CTRL,
+ S2MPG10_PMIC_L25M_CTRL,
+ S2MPG10_PMIC_L26M_CTRL,
+ S2MPG10_PMIC_L27M_CTRL,
+ S2MPG10_PMIC_L28M_CTRL,
+ S2MPG10_PMIC_L29M_CTRL,
+ S2MPG10_PMIC_L30M_CTRL,
+ S2MPG10_PMIC_L31M_CTRL,
+ S2MPG10_PMIC_LDO_CTRL1,
+ S2MPG10_PMIC_LDO_CTRL2,
+ S2MPG10_PMIC_LDO_DSCH1,
+ S2MPG10_PMIC_LDO_DSCH2,
+ S2MPG10_PMIC_LDO_DSCH3,
+ S2MPG10_PMIC_LDO_DSCH4,
+ S2MPG10_PMIC_LDO_BUCK7M_HLIMIT,
+ S2MPG10_PMIC_LDO_BUCK7M_LLIMIT,
+ S2MPG10_PMIC_LDO_LDO21M_HLIMIT,
+ S2MPG10_PMIC_LDO_LDO21M_LLIMIT,
+ S2MPG10_PMIC_LDO_LDO11M_HLIMIT,
+ S2MPG10_PMIC_DVS_RAMP1,
+ S2MPG10_PMIC_DVS_RAMP2,
+ S2MPG10_PMIC_DVS_RAMP3,
+ S2MPG10_PMIC_DVS_RAMP4,
+ S2MPG10_PMIC_DVS_RAMP5,
+ S2MPG10_PMIC_DVS_RAMP6,
+ S2MPG10_PMIC_DVS_SYNC_CTRL1,
+ S2MPG10_PMIC_DVS_SYNC_CTRL2,
+ S2MPG10_PMIC_DVS_SYNC_CTRL3,
+ S2MPG10_PMIC_DVS_SYNC_CTRL4,
+ S2MPG10_PMIC_DVS_SYNC_CTRL5,
+ S2MPG10_PMIC_DVS_SYNC_CTRL6,
+ S2MPG10_PMIC_OFF_CTRL1,
+ S2MPG10_PMIC_OFF_CTRL2,
+ S2MPG10_PMIC_OFF_CTRL3,
+ S2MPG10_PMIC_OFF_CTRL4,
+ S2MPG10_PMIC_SEQ_CTRL1,
+ S2MPG10_PMIC_SEQ_CTRL2,
+ S2MPG10_PMIC_SEQ_CTRL3,
+ S2MPG10_PMIC_SEQ_CTRL4,
+ S2MPG10_PMIC_SEQ_CTRL5,
+ S2MPG10_PMIC_SEQ_CTRL6,
+ S2MPG10_PMIC_SEQ_CTRL7,
+ S2MPG10_PMIC_SEQ_CTRL8,
+ S2MPG10_PMIC_SEQ_CTRL9,
+ S2MPG10_PMIC_SEQ_CTRL10,
+ S2MPG10_PMIC_SEQ_CTRL11,
+ S2MPG10_PMIC_SEQ_CTRL12,
+ S2MPG10_PMIC_SEQ_CTRL13,
+ S2MPG10_PMIC_SEQ_CTRL14,
+ S2MPG10_PMIC_SEQ_CTRL15,
+ S2MPG10_PMIC_SEQ_CTRL16,
+ S2MPG10_PMIC_SEQ_CTRL17,
+ S2MPG10_PMIC_SEQ_CTRL18,
+ S2MPG10_PMIC_SEQ_CTRL19,
+ S2MPG10_PMIC_SEQ_CTRL20,
+ S2MPG10_PMIC_SEQ_CTRL21,
+ S2MPG10_PMIC_SEQ_CTRL22,
+ S2MPG10_PMIC_SEQ_CTRL23,
+ S2MPG10_PMIC_SEQ_CTRL24,
+ S2MPG10_PMIC_SEQ_CTRL25,
+ S2MPG10_PMIC_SEQ_CTRL26,
+ S2MPG10_PMIC_SEQ_CTRL27,
+ S2MPG10_PMIC_SEQ_CTRL28,
+ S2MPG10_PMIC_SEQ_CTRL29,
+ S2MPG10_PMIC_SEQ_CTRL30,
+ S2MPG10_PMIC_SEQ_CTRL31,
+ S2MPG10_PMIC_SEQ_CTRL32,
+ S2MPG10_PMIC_SEQ_CTRL33,
+ S2MPG10_PMIC_SEQ_CTRL34,
+ S2MPG10_PMIC_SEQ_CTRL35,
+ S2MPG10_PMIC_OFF_SEQ_CTRL1,
+ S2MPG10_PMIC_OFF_SEQ_CTRL2,
+ S2MPG10_PMIC_OFF_SEQ_CTRL3,
+ S2MPG10_PMIC_OFF_SEQ_CTRL4,
+ S2MPG10_PMIC_OFF_SEQ_CTRL5,
+ S2MPG10_PMIC_OFF_SEQ_CTRL6,
+ S2MPG10_PMIC_OFF_SEQ_CTRL7,
+ S2MPG10_PMIC_OFF_SEQ_CTRL8,
+ S2MPG10_PMIC_OFF_SEQ_CTRL9,
+ S2MPG10_PMIC_OFF_SEQ_CTRL10,
+ S2MPG10_PMIC_OFF_SEQ_CTRL11,
+ S2MPG10_PMIC_OFF_SEQ_CTRL12,
+ S2MPG10_PMIC_OFF_SEQ_CTRL13,
+ S2MPG10_PMIC_OFF_SEQ_CTRL14,
+ S2MPG10_PMIC_OFF_SEQ_CTRL15,
+ S2MPG10_PMIC_OFF_SEQ_CTRL16,
+ S2MPG10_PMIC_OFF_SEQ_CTRL17,
+ S2MPG10_PMIC_OFF_SEQ_CTRL18,
+ S2MPG10_PMIC_PCTRLSEL1,
+ S2MPG10_PMIC_PCTRLSEL2,
+ S2MPG10_PMIC_PCTRLSEL3,
+ S2MPG10_PMIC_PCTRLSEL4,
+ S2MPG10_PMIC_PCTRLSEL5,
+ S2MPG10_PMIC_PCTRLSEL6,
+ S2MPG10_PMIC_PCTRLSEL7,
+ S2MPG10_PMIC_PCTRLSEL8,
+ S2MPG10_PMIC_PCTRLSEL9,
+ S2MPG10_PMIC_PCTRLSEL10,
+ S2MPG10_PMIC_PCTRLSEL11,
+ S2MPG10_PMIC_PCTRLSEL12,
+ S2MPG10_PMIC_PCTRLSEL13,
+ S2MPG10_PMIC_DCTRLSEL1,
+ S2MPG10_PMIC_DCTRLSEL2,
+ S2MPG10_PMIC_DCTRLSEL3,
+ S2MPG10_PMIC_DCTRLSEL4,
+ S2MPG10_PMIC_DCTRLSEL5,
+ S2MPG10_PMIC_DCTRLSEL6,
+ S2MPG10_PMIC_DCTRLSEL7,
+ S2MPG10_PMIC_GPIO_CTRL1,
+ S2MPG10_PMIC_GPIO_CTRL2,
+ S2MPG10_PMIC_GPIO_CTRL3,
+ S2MPG10_PMIC_GPIO_CTRL4,
+ S2MPG10_PMIC_GPIO_CTRL5,
+ S2MPG10_PMIC_GPIO_CTRL6,
+ S2MPG10_PMIC_GPIO_CTRL7,
+ S2MPG10_PMIC_B2M_OCP_WARN,
+ S2MPG10_PMIC_B2M_OCP_WARN_X,
+ S2MPG10_PMIC_B2M_OCP_WARN_Y,
+ S2MPG10_PMIC_B2M_OCP_WARN_Z,
+ S2MPG10_PMIC_B3M_OCP_WARN,
+ S2MPG10_PMIC_B3M_OCP_WARN_X,
+ S2MPG10_PMIC_B3M_OCP_WARN_Y,
+ S2MPG10_PMIC_B3M_OCP_WARN_Z,
+ S2MPG10_PMIC_B10M_OCP_WARN,
+ S2MPG10_PMIC_B10M_OCP_WARN_X,
+ S2MPG10_PMIC_B10M_OCP_WARN_Y,
+ S2MPG10_PMIC_B10M_OCP_WARN_Z,
+ S2MPG10_PMIC_B2M_SOFT_OCP_WARN,
+ S2MPG10_PMIC_B2M_SOFT_OCP_WARN_X,
+ S2MPG10_PMIC_B2M_SOFT_OCP_WARN_Y,
+ S2MPG10_PMIC_B2M_SOFT_OCP_WARN_Z,
+ S2MPG10_PMIC_B3M_SOFT_OCP_WARN,
+ S2MPG10_PMIC_B3M_SOFT_OCP_WARN_X,
+ S2MPG10_PMIC_B3M_SOFT_OCP_WARN_Y,
+ S2MPG10_PMIC_B3M_SOFT_OCP_WARN_Z,
+ S2MPG10_PMIC_B10M_SOFT_OCP_WARN,
+ S2MPG10_PMIC_B10M_SOFT_OCP_WARN_X,
+ S2MPG10_PMIC_B10M_SOFT_OCP_WARN_Y,
+ S2MPG10_PMIC_B10M_SOFT_OCP_WARN_Z,
+ S2MPG10_PMIC_BUCK_OCP_EN1,
+ S2MPG10_PMIC_BUCK_OCP_EN2,
+ S2MPG10_PMIC_BUCK_OCP_PD_EN1,
+ S2MPG10_PMIC_BUCK_OCP_PD_EN2,
+ S2MPG10_PMIC_BUCK_OCP_CTRL1,
+ S2MPG10_PMIC_BUCK_OCP_CTRL2,
+ S2MPG10_PMIC_BUCK_OCP_CTRL3,
+ S2MPG10_PMIC_BUCK_OCP_CTRL4,
+ S2MPG10_PMIC_BUCK_OCP_CTRL5,
+ S2MPG10_PMIC_PIF_CTRL,
+ S2MPG10_PMIC_BUCK_HR_MODE1,
+ S2MPG10_PMIC_BUCK_HR_MODE2,
+ S2MPG10_PMIC_FAULTOUT_CTRL,
+ S2MPG10_PMIC_LDO_SENSE1,
+ S2MPG10_PMIC_LDO_SENSE2,
+ S2MPG10_PMIC_LDO_SENSE3,
+ S2MPG10_PMIC_LDO_SENSE4,
+};
+
+/* Meter registers (type 0xa00) */
+enum s2mpg10_meter_reg {
+ S2MPG10_METER_CTRL1,
+ S2MPG10_METER_CTRL2,
+ S2MPG10_METER_CTRL3,
+ S2MPG10_METER_CTRL4,
+ S2MPG10_METER_BUCKEN1,
+ S2MPG10_METER_BUCKEN2,
+ S2MPG10_METER_MUXSEL0,
+ S2MPG10_METER_MUXSEL1,
+ S2MPG10_METER_MUXSEL2,
+ S2MPG10_METER_MUXSEL3,
+ S2MPG10_METER_MUXSEL4,
+ S2MPG10_METER_MUXSEL5,
+ S2MPG10_METER_MUXSEL6,
+ S2MPG10_METER_MUXSEL7,
+ S2MPG10_METER_LPF_C0_0,
+ S2MPG10_METER_LPF_C0_1,
+ S2MPG10_METER_LPF_C0_2,
+ S2MPG10_METER_LPF_C0_3,
+ S2MPG10_METER_LPF_C0_4,
+ S2MPG10_METER_LPF_C0_5,
+ S2MPG10_METER_LPF_C0_6,
+ S2MPG10_METER_LPF_C0_7,
+ S2MPG10_METER_PWR_WARN0,
+ S2MPG10_METER_PWR_WARN1,
+ S2MPG10_METER_PWR_WARN2,
+ S2MPG10_METER_PWR_WARN3,
+ S2MPG10_METER_PWR_WARN4,
+ S2MPG10_METER_PWR_WARN5,
+ S2MPG10_METER_PWR_WARN6,
+ S2MPG10_METER_PWR_WARN7,
+ S2MPG10_METER_PWR_HYS1,
+ S2MPG10_METER_PWR_HYS2,
+ S2MPG10_METER_PWR_HYS3,
+ S2MPG10_METER_PWR_HYS4,
+ S2MPG10_METER_ACC_DATA_CH0_1 = 0x40,
+ S2MPG10_METER_ACC_DATA_CH0_2,
+ S2MPG10_METER_ACC_DATA_CH0_3,
+ S2MPG10_METER_ACC_DATA_CH0_4,
+ S2MPG10_METER_ACC_DATA_CH0_5,
+ S2MPG10_METER_ACC_DATA_CH0_6,
+ S2MPG10_METER_ACC_DATA_CH1_1,
+ S2MPG10_METER_ACC_DATA_CH1_2,
+ S2MPG10_METER_ACC_DATA_CH1_3,
+ S2MPG10_METER_ACC_DATA_CH1_4,
+ S2MPG10_METER_ACC_DATA_CH1_5,
+ S2MPG10_METER_ACC_DATA_CH1_6,
+ S2MPG10_METER_ACC_DATA_CH2_1,
+ S2MPG10_METER_ACC_DATA_CH2_2,
+ S2MPG10_METER_ACC_DATA_CH2_3,
+ S2MPG10_METER_ACC_DATA_CH2_4,
+ S2MPG10_METER_ACC_DATA_CH2_5,
+ S2MPG10_METER_ACC_DATA_CH2_6,
+ S2MPG10_METER_ACC_DATA_CH3_1,
+ S2MPG10_METER_ACC_DATA_CH3_2,
+ S2MPG10_METER_ACC_DATA_CH3_3,
+ S2MPG10_METER_ACC_DATA_CH3_4,
+ S2MPG10_METER_ACC_DATA_CH3_5,
+ S2MPG10_METER_ACC_DATA_CH3_6,
+ S2MPG10_METER_ACC_DATA_CH4_1,
+ S2MPG10_METER_ACC_DATA_CH4_2,
+ S2MPG10_METER_ACC_DATA_CH4_3,
+ S2MPG10_METER_ACC_DATA_CH4_4,
+ S2MPG10_METER_ACC_DATA_CH4_5,
+ S2MPG10_METER_ACC_DATA_CH4_6,
+ S2MPG10_METER_ACC_DATA_CH5_1,
+ S2MPG10_METER_ACC_DATA_CH5_2,
+ S2MPG10_METER_ACC_DATA_CH5_3,
+ S2MPG10_METER_ACC_DATA_CH5_4,
+ S2MPG10_METER_ACC_DATA_CH5_5,
+ S2MPG10_METER_ACC_DATA_CH5_6,
+ S2MPG10_METER_ACC_DATA_CH6_1,
+ S2MPG10_METER_ACC_DATA_CH6_2,
+ S2MPG10_METER_ACC_DATA_CH6_3,
+ S2MPG10_METER_ACC_DATA_CH6_4,
+ S2MPG10_METER_ACC_DATA_CH6_5,
+ S2MPG10_METER_ACC_DATA_CH6_6,
+ S2MPG10_METER_ACC_DATA_CH7_1,
+ S2MPG10_METER_ACC_DATA_CH7_2,
+ S2MPG10_METER_ACC_DATA_CH7_3,
+ S2MPG10_METER_ACC_DATA_CH7_4,
+ S2MPG10_METER_ACC_DATA_CH7_5,
+ S2MPG10_METER_ACC_DATA_CH7_6,
+ S2MPG10_METER_ACC_COUNT_1,
+ S2MPG10_METER_ACC_COUNT_2,
+ S2MPG10_METER_ACC_COUNT_3,
+ S2MPG10_METER_LPF_DATA_CH0_1,
+ S2MPG10_METER_LPF_DATA_CH0_2,
+ S2MPG10_METER_LPF_DATA_CH0_3,
+ S2MPG10_METER_LPF_DATA_CH1_1,
+ S2MPG10_METER_LPF_DATA_CH1_2,
+ S2MPG10_METER_LPF_DATA_CH1_3,
+ S2MPG10_METER_LPF_DATA_CH2_1,
+ S2MPG10_METER_LPF_DATA_CH2_2,
+ S2MPG10_METER_LPF_DATA_CH2_3,
+ S2MPG10_METER_LPF_DATA_CH3_1,
+ S2MPG10_METER_LPF_DATA_CH3_2,
+ S2MPG10_METER_LPF_DATA_CH3_3,
+ S2MPG10_METER_LPF_DATA_CH4_1,
+ S2MPG10_METER_LPF_DATA_CH4_2,
+ S2MPG10_METER_LPF_DATA_CH4_3,
+ S2MPG10_METER_LPF_DATA_CH5_1,
+ S2MPG10_METER_LPF_DATA_CH5_2,
+ S2MPG10_METER_LPF_DATA_CH5_3,
+ S2MPG10_METER_LPF_DATA_CH6_1,
+ S2MPG10_METER_LPF_DATA_CH6_2,
+ S2MPG10_METER_LPF_DATA_CH6_3,
+ S2MPG10_METER_LPF_DATA_CH7_1,
+ S2MPG10_METER_LPF_DATA_CH7_2,
+ S2MPG10_METER_LPF_DATA_CH7_3,
+ S2MPG10_METER_DSM_TRIM_OFFSET = 0xee,
+ S2MPG10_METER_BUCK_METER_TRIM3 = 0xf1,
+};
+
+/* S2MPG10 regulator IDs */
+enum s2mpg10_regulators {
+ S2MPG10_LDO1,
+ S2MPG10_LDO2,
+ S2MPG10_LDO3,
+ S2MPG10_LDO4,
+ S2MPG10_LDO5,
+ S2MPG10_LDO6,
+ S2MPG10_LDO7,
+ S2MPG10_LDO8,
+ S2MPG10_LDO9,
+ S2MPG10_LDO10,
+ S2MPG10_LDO11,
+ S2MPG10_LDO12,
+ S2MPG10_LDO13,
+ S2MPG10_LDO14,
+ S2MPG10_LDO15,
+ S2MPG10_LDO16,
+ S2MPG10_LDO17,
+ S2MPG10_LDO18,
+ S2MPG10_LDO19,
+ S2MPG10_LDO20,
+ S2MPG10_LDO21,
+ S2MPG10_LDO22,
+ S2MPG10_LDO23,
+ S2MPG10_LDO24,
+ S2MPG10_LDO25,
+ S2MPG10_LDO26,
+ S2MPG10_LDO27,
+ S2MPG10_LDO28,
+ S2MPG10_LDO29,
+ S2MPG10_LDO30,
+ S2MPG10_LDO31,
+ S2MPG10_BUCK1,
+ S2MPG10_BUCK2,
+ S2MPG10_BUCK3,
+ S2MPG10_BUCK4,
+ S2MPG10_BUCK5,
+ S2MPG10_BUCK6,
+ S2MPG10_BUCK7,
+ S2MPG10_BUCK8,
+ S2MPG10_BUCK9,
+ S2MPG10_BUCK10,
+ S2MPG10_REGULATOR_MAX,
+};
+
+#endif /* __LINUX_MFD_S2MPG10_H */
diff --git a/include/linux/mfd/samsung/s2mps11.h b/include/linux/mfd/samsung/s2mps11.h
index 2c14eeca46f0..4805c90609c4 100644
--- a/include/linux/mfd/samsung/s2mps11.h
+++ b/include/linux/mfd/samsung/s2mps11.h
@@ -1,14 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
- * s2mps11.h
- *
* Copyright (c) 2012 Samsung Electronics Co., Ltd
* http://www.samsung.com
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
*/
#ifndef __LINUX_MFD_S2MPS11_H
@@ -177,7 +170,9 @@ enum s2mps11_regulators {
#define S2MPS11_ENABLE_MASK (0x03 << S2MPS11_ENABLE_SHIFT)
#define S2MPS11_ENABLE_SHIFT 0x06
#define S2MPS11_LDO_N_VOLTAGES (S2MPS11_LDO_VSEL_MASK + 1)
-#define S2MPS11_BUCK_N_VOLTAGES (S2MPS11_BUCK_VSEL_MASK + 1)
+#define S2MPS11_BUCK12346_N_VOLTAGES 153
+#define S2MPS11_BUCK5_N_VOLTAGES 216
+#define S2MPS11_BUCK7810_N_VOLTAGES 225
#define S2MPS11_BUCK9_N_VOLTAGES (S2MPS11_BUCK9_VSEL_MASK + 1)
#define S2MPS11_RAMP_DELAY 25000 /* uV/us */
@@ -195,4 +190,9 @@ enum s2mps11_regulators {
#define S2MPS11_BUCK6_RAMP_EN_SHIFT 0
#define S2MPS11_PMIC_EN_SHIFT 6
+/*
+ * Bits for "enable suspend" (On/Off controlled by PWREN)
+ * are the same as in S2MPS14: S2MPS14_ENABLE_SUSPEND
+ */
+
#endif /* __LINUX_MFD_S2MPS11_H */
diff --git a/include/linux/mfd/samsung/s2mps13.h b/include/linux/mfd/samsung/s2mps13.h
index 239e977ba45d..b96d8a11dcd3 100644
--- a/include/linux/mfd/samsung/s2mps13.h
+++ b/include/linux/mfd/samsung/s2mps13.h
@@ -1,19 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
- * s2mps13.h
- *
* Copyright (c) 2014 Samsung Electronics Co., Ltd
* http://www.samsung.com
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
*/
#ifndef __LINUX_MFD_S2MPS13_H
diff --git a/include/linux/mfd/samsung/s2mps14.h b/include/linux/mfd/samsung/s2mps14.h
index c92f4782afb5..f4afa0cfc24f 100644
--- a/include/linux/mfd/samsung/s2mps14.h
+++ b/include/linux/mfd/samsung/s2mps14.h
@@ -1,19 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
- * s2mps14.h
- *
* Copyright (c) 2014 Samsung Electronics Co., Ltd
* http://www.samsung.com
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
*/
#ifndef __LINUX_MFD_S2MPS14_H
diff --git a/include/linux/mfd/samsung/s2mps15.h b/include/linux/mfd/samsung/s2mps15.h
index 36d35287c3c0..eac6bf74b72e 100644
--- a/include/linux/mfd/samsung/s2mps15.h
+++ b/include/linux/mfd/samsung/s2mps15.h
@@ -1,16 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* Copyright (c) 2015 Samsung Electronics Co., Ltd
* http://www.samsung.com
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#ifndef __LINUX_MFD_S2MPS15_H
diff --git a/include/linux/mfd/samsung/s2mpu02.h b/include/linux/mfd/samsung/s2mpu02.h
index 47ae9bc583a7..76cd5380cf0f 100644
--- a/include/linux/mfd/samsung/s2mpu02.h
+++ b/include/linux/mfd/samsung/s2mpu02.h
@@ -1,19 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
- * s2mpu02.h
- *
* Copyright (c) 2014 Samsung Electronics Co., Ltd
* http://www.samsung.com
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
*/
#ifndef __LINUX_MFD_S2MPU02_H
diff --git a/include/linux/mfd/samsung/s2mpu05.h b/include/linux/mfd/samsung/s2mpu05.h
new file mode 100644
index 000000000000..fcdb6c8adb03
--- /dev/null
+++ b/include/linux/mfd/samsung/s2mpu05.h
@@ -0,0 +1,183 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright (c) 2015 Samsung Electronics Co., Ltd
+ * Copyright (c) 2025 Kaustabh Chakraborty <kauschluss@disroot.org>
+ */
+
+#ifndef __LINUX_MFD_S2MPU05_H
+#define __LINUX_MFD_S2MPU05_H
+
+/* S2MPU05 registers */
+enum S2MPU05_reg {
+ S2MPU05_REG_ID,
+ S2MPU05_REG_INT1,
+ S2MPU05_REG_INT2,
+ S2MPU05_REG_INT3,
+ S2MPU05_REG_INT1M,
+ S2MPU05_REG_INT2M,
+ S2MPU05_REG_INT3M,
+ S2MPU05_REG_ST1,
+ S2MPU05_REG_ST2,
+ S2MPU05_REG_PWRONSRC,
+ S2MPU05_REG_OFFSRC,
+ S2MPU05_REG_BU_CHG,
+ S2MPU05_REG_RTC_BUF,
+ S2MPU05_REG_CTRL1,
+ S2MPU05_REG_CTRL2,
+ S2MPU05_REG_ETC_TEST,
+ S2MPU05_REG_OTP_ADRL,
+ S2MPU05_REG_OTP_ADRH,
+ S2MPU05_REG_OTP_DATA,
+ S2MPU05_REG_MON1SEL,
+ S2MPU05_REG_MON2SEL,
+ S2MPU05_REG_CTRL3,
+ S2MPU05_REG_ETC_OTP,
+ S2MPU05_REG_UVLO,
+ S2MPU05_REG_TIME_CTRL1,
+ S2MPU05_REG_TIME_CTRL2,
+ S2MPU05_REG_B1CTRL1,
+ S2MPU05_REG_B1CTRL2,
+ S2MPU05_REG_B2CTRL1,
+ S2MPU05_REG_B2CTRL2,
+ S2MPU05_REG_B2CTRL3,
+ S2MPU05_REG_B2CTRL4,
+ S2MPU05_REG_B3CTRL1,
+ S2MPU05_REG_B3CTRL2,
+ S2MPU05_REG_B3CTRL3,
+ S2MPU05_REG_B4CTRL1,
+ S2MPU05_REG_B4CTRL2,
+ S2MPU05_REG_B5CTRL1,
+ S2MPU05_REG_B5CTRL2,
+ S2MPU05_REG_BUCK_RAMP,
+ S2MPU05_REG_LDO_DVS1,
+ S2MPU05_REG_LDO_DVS9,
+ S2MPU05_REG_LDO_DVS10,
+ S2MPU05_REG_L1CTRL,
+ S2MPU05_REG_L2CTRL,
+ S2MPU05_REG_L3CTRL,
+ S2MPU05_REG_L4CTRL,
+ S2MPU05_REG_L5CTRL,
+ S2MPU05_REG_L6CTRL,
+ S2MPU05_REG_L7CTRL,
+ S2MPU05_REG_L8CTRL,
+ S2MPU05_REG_L9CTRL1,
+ S2MPU05_REG_L9CTRL2,
+ S2MPU05_REG_L10CTRL,
+ S2MPU05_REG_L11CTRL1,
+ S2MPU05_REG_L11CTRL2,
+ S2MPU05_REG_L12CTRL,
+ S2MPU05_REG_L13CTRL,
+ S2MPU05_REG_L14CTRL,
+ S2MPU05_REG_L15CTRL,
+ S2MPU05_REG_L16CTRL,
+ S2MPU05_REG_L17CTRL1,
+ S2MPU05_REG_L17CTRL2,
+ S2MPU05_REG_L18CTRL1,
+ S2MPU05_REG_L18CTRL2,
+ S2MPU05_REG_L19CTRL,
+ S2MPU05_REG_L20CTRL,
+ S2MPU05_REG_L21CTRL,
+ S2MPU05_REG_L22CTRL,
+ S2MPU05_REG_L23CTRL,
+ S2MPU05_REG_L24CTRL,
+ S2MPU05_REG_L25CTRL,
+ S2MPU05_REG_L26CTRL,
+ S2MPU05_REG_L27CTRL,
+ S2MPU05_REG_L28CTRL,
+ S2MPU05_REG_L29CTRL,
+ S2MPU05_REG_L30CTRL,
+ S2MPU05_REG_L31CTRL,
+ S2MPU05_REG_L32CTRL,
+ S2MPU05_REG_L33CTRL,
+ S2MPU05_REG_L34CTRL,
+ S2MPU05_REG_L35CTRL,
+ S2MPU05_REG_LDO_DSCH1,
+ S2MPU05_REG_LDO_DSCH2,
+ S2MPU05_REG_LDO_DSCH3,
+ S2MPU05_REG_LDO_DSCH4,
+ S2MPU05_REG_LDO_DSCH5,
+ S2MPU05_REG_LDO_CTRL1,
+ S2MPU05_REG_LDO_CTRL2,
+ S2MPU05_REG_TCXO_CTRL,
+ S2MPU05_REG_SELMIF,
+};
+
+/* S2MPU05 regulator ids */
+enum S2MPU05_regulators {
+ S2MPU05_LDO1,
+ S2MPU05_LDO2,
+ S2MPU05_LDO3,
+ S2MPU05_LDO4,
+ S2MPU05_LDO5,
+ S2MPU05_LDO6,
+ S2MPU05_LDO7,
+ S2MPU05_LDO8,
+ S2MPU05_LDO9,
+ S2MPU05_LDO10,
+ S2MPU05_LDO11,
+ S2MPU05_LDO12,
+ S2MPU05_LDO13,
+ S2MPU05_LDO14,
+ S2MPU05_LDO15,
+ S2MPU05_LDO16,
+ S2MPU05_LDO17,
+ S2MPU05_LDO18,
+ S2MPU05_LDO19,
+ S2MPU05_LDO20,
+ S2MPU05_LDO21,
+ S2MPU05_LDO22,
+ S2MPU05_LDO23,
+ S2MPU05_LDO24,
+ S2MPU05_LDO25,
+ S2MPU05_LDO26,
+ S2MPU05_LDO27,
+ S2MPU05_LDO28,
+ S2MPU05_LDO29,
+ S2MPU05_LDO30,
+ S2MPU05_LDO31,
+ S2MPU05_LDO32,
+ S2MPU05_LDO33,
+ S2MPU05_LDO34,
+ S2MPU05_LDO35,
+ S2MPU05_BUCK1,
+ S2MPU05_BUCK2,
+ S2MPU05_BUCK3,
+ S2MPU05_BUCK4,
+ S2MPU05_BUCK5,
+
+ S2MPU05_REGULATOR_MAX,
+};
+
+#define S2MPU05_SW_ENABLE_MASK 0x03
+
+#define S2MPU05_ENABLE_TIME_LDO 128
+#define S2MPU05_ENABLE_TIME_BUCK1 110
+#define S2MPU05_ENABLE_TIME_BUCK2 110
+#define S2MPU05_ENABLE_TIME_BUCK3 110
+#define S2MPU05_ENABLE_TIME_BUCK4 150
+#define S2MPU05_ENABLE_TIME_BUCK5 150
+
+#define S2MPU05_LDO_MIN1 800000
+#define S2MPU05_LDO_MIN2 1800000
+#define S2MPU05_LDO_MIN3 400000
+#define S2MPU05_LDO_STEP1 12500
+#define S2MPU05_LDO_STEP2 25000
+
+#define S2MPU05_BUCK_MIN1 400000
+#define S2MPU05_BUCK_MIN2 600000
+#define S2MPU05_BUCK_STEP1 6250
+#define S2MPU05_BUCK_STEP2 12500
+
+#define S2MPU05_RAMP_DELAY 12000 /* uV/uS */
+
+#define S2MPU05_ENABLE_SHIFT 6
+#define S2MPU05_ENABLE_MASK (0x03 << S2MPU05_ENABLE_SHIFT)
+
+#define S2MPU05_LDO_VSEL_MASK 0x3F
+#define S2MPU05_BUCK_VSEL_MASK 0xFF
+#define S2MPU05_LDO_N_VOLTAGES (S2MPU05_LDO_VSEL_MASK + 1)
+#define S2MPU05_BUCK_N_VOLTAGES (S2MPU05_BUCK_VSEL_MASK + 1)
+
+#define S2MPU05_PMIC_EN_SHIFT 6
+
+#endif /* __LINUX_MFD_S2MPU05_H */
diff --git a/include/linux/mfd/samsung/s5m8763.h b/include/linux/mfd/samsung/s5m8763.h
deleted file mode 100644
index e025418e5589..000000000000
--- a/include/linux/mfd/samsung/s5m8763.h
+++ /dev/null
@@ -1,96 +0,0 @@
-/* s5m8763.h
- *
- * Copyright (c) 2011 Samsung Electronics Co., Ltd
- * http://www.samsung.com
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- */
-
-#ifndef __LINUX_MFD_S5M8763_H
-#define __LINUX_MFD_S5M8763_H
-
-/* S5M8763 registers */
-enum s5m8763_reg {
- S5M8763_REG_IRQ1,
- S5M8763_REG_IRQ2,
- S5M8763_REG_IRQ3,
- S5M8763_REG_IRQ4,
- S5M8763_REG_IRQM1,
- S5M8763_REG_IRQM2,
- S5M8763_REG_IRQM3,
- S5M8763_REG_IRQM4,
- S5M8763_REG_STATUS1,
- S5M8763_REG_STATUS2,
- S5M8763_REG_STATUSM1,
- S5M8763_REG_STATUSM2,
- S5M8763_REG_CHGR1,
- S5M8763_REG_CHGR2,
- S5M8763_REG_LDO_ACTIVE_DISCHARGE1,
- S5M8763_REG_LDO_ACTIVE_DISCHARGE2,
- S5M8763_REG_BUCK_ACTIVE_DISCHARGE3,
- S5M8763_REG_ONOFF1,
- S5M8763_REG_ONOFF2,
- S5M8763_REG_ONOFF3,
- S5M8763_REG_ONOFF4,
- S5M8763_REG_BUCK1_VOLTAGE1,
- S5M8763_REG_BUCK1_VOLTAGE2,
- S5M8763_REG_BUCK1_VOLTAGE3,
- S5M8763_REG_BUCK1_VOLTAGE4,
- S5M8763_REG_BUCK2_VOLTAGE1,
- S5M8763_REG_BUCK2_VOLTAGE2,
- S5M8763_REG_BUCK3,
- S5M8763_REG_BUCK4,
- S5M8763_REG_LDO1_LDO2,
- S5M8763_REG_LDO3,
- S5M8763_REG_LDO4,
- S5M8763_REG_LDO5,
- S5M8763_REG_LDO6,
- S5M8763_REG_LDO7,
- S5M8763_REG_LDO7_LDO8,
- S5M8763_REG_LDO9_LDO10,
- S5M8763_REG_LDO11,
- S5M8763_REG_LDO12,
- S5M8763_REG_LDO13,
- S5M8763_REG_LDO14,
- S5M8763_REG_LDO15,
- S5M8763_REG_LDO16,
- S5M8763_REG_BKCHR,
- S5M8763_REG_LBCNFG1,
- S5M8763_REG_LBCNFG2,
-};
-
-/* S5M8763 regulator ids */
-enum s5m8763_regulators {
- S5M8763_LDO1,
- S5M8763_LDO2,
- S5M8763_LDO3,
- S5M8763_LDO4,
- S5M8763_LDO5,
- S5M8763_LDO6,
- S5M8763_LDO7,
- S5M8763_LDO8,
- S5M8763_LDO9,
- S5M8763_LDO10,
- S5M8763_LDO11,
- S5M8763_LDO12,
- S5M8763_LDO13,
- S5M8763_LDO14,
- S5M8763_LDO15,
- S5M8763_LDO16,
- S5M8763_BUCK1,
- S5M8763_BUCK2,
- S5M8763_BUCK3,
- S5M8763_BUCK4,
- S5M8763_AP_EN32KHZ,
- S5M8763_CP_EN32KHZ,
- S5M8763_ENCHGVI,
- S5M8763_ESAFEUSB1,
- S5M8763_ESAFEUSB2,
-};
-
-#define S5M8763_ENRAMP (1 << 4)
-#endif /* __LINUX_MFD_S5M8763_H */
diff --git a/include/linux/mfd/samsung/s5m8767.h b/include/linux/mfd/samsung/s5m8767.h
index 243b58fec33d..704f8d80e96e 100644
--- a/include/linux/mfd/samsung/s5m8767.h
+++ b/include/linux/mfd/samsung/s5m8767.h
@@ -1,13 +1,7 @@
-/* s5m8767.h
- *
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
* Copyright (c) 2011 Samsung Electronics Co., Ltd
* http://www.samsung.com
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
*/
#ifndef __LINUX_MFD_S5M8767_H
diff --git a/include/linux/mfd/sc27xx-pmic.h b/include/linux/mfd/sc27xx-pmic.h
new file mode 100644
index 000000000000..57e45c0b3ae2
--- /dev/null
+++ b/include/linux/mfd/sc27xx-pmic.h
@@ -0,0 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __LINUX_MFD_SC27XX_PMIC_H
+#define __LINUX_MFD_SC27XX_PMIC_H
+
+extern enum usb_charger_type sprd_pmic_detect_charger_type(struct device *dev);
+
+#endif /* __LINUX_MFD_SC27XX_PMIC_H */
diff --git a/include/linux/mfd/si476x-core.h b/include/linux/mfd/si476x-core.h
index 674b45d5a757..dd95c37ca134 100644
--- a/include/linux/mfd/si476x-core.h
+++ b/include/linux/mfd/si476x-core.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* include/media/si476x-core.h -- Common definitions for si476x core
* device
@@ -6,16 +7,6 @@
* Copyright (C) 2013 Andrey Smirnov
*
* Author: Andrey Smirnov <andrew.smirnov@gmail.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
*/
#ifndef SI476X_CORE_H
@@ -66,7 +57,7 @@ enum si476x_mfd_cells {
* @SI476X_POWER_DOWN: In this state all regulators are turned off
* and the reset line is pulled low. The device is completely
* inactive.
- * @SI476X_POWER_UP_FULL: In this state all the power regualtors are
+ * @SI476X_POWER_UP_FULL: In this state all the power regulators are
* turned on, reset line pulled high, IRQ line is enabled(polling is
* active for polling use scenario) and device is turned on with
* POWER_UP command. The device is ready to be used.
diff --git a/include/linux/mfd/si476x-platform.h b/include/linux/mfd/si476x-platform.h
index 88bb93b7a9d5..cb99e16ca947 100644
--- a/include/linux/mfd/si476x-platform.h
+++ b/include/linux/mfd/si476x-platform.h
@@ -1,25 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* include/media/si476x-platform.h -- Platform data specific definitions
*
* Copyright (C) 2013 Andrey Smirnov
*
* Author: Andrey Smirnov <andrew.smirnov@gmail.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
*/
#ifndef __SI476X_PLATFORM_H__
#define __SI476X_PLATFORM_H__
-/* It is possible to select one of the four adresses using pins A0
+/* It is possible to select one of the four addresses using pins A0
* and A1 on SI476x */
#define SI476X_I2C_ADDR_1 0x60
#define SI476X_I2C_ADDR_2 0x61
diff --git a/include/linux/mfd/si476x-reports.h b/include/linux/mfd/si476x-reports.h
index e0b9455a79c0..93b34184699d 100644
--- a/include/linux/mfd/si476x-reports.h
+++ b/include/linux/mfd/si476x-reports.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* include/media/si476x-platform.h -- Definitions of the data formats
* returned by debugfs hooks
@@ -5,16 +6,6 @@
* Copyright (C) 2013 Andrey Smirnov
*
* Author: Andrey Smirnov <andrew.smirnov@gmail.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
*/
#ifndef __SI476X_REPORTS_H__
diff --git a/include/linux/mfd/sky81452.h b/include/linux/mfd/sky81452.h
index b0925fa3e9ef..b08570ff34df 100644
--- a/include/linux/mfd/sky81452.h
+++ b/include/linux/mfd/sky81452.h
@@ -1,30 +1,17 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* sky81452.h SKY81452 MFD driver
*
* Copyright 2014 Skyworks Solutions Inc.
* Author : Gyungoh Yoo <jack.yoo@skyworksinc.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#ifndef _SKY81452_H
#define _SKY81452_H
-#include <linux/platform_data/sky81452-backlight.h>
#include <linux/regulator/machine.h>
struct sky81452_platform_data {
- struct sky81452_bl_platform_data *bl_pdata;
struct regulator_init_data *regulator_init_data;
};
diff --git a/include/linux/mfd/smsc.h b/include/linux/mfd/smsc.h
deleted file mode 100644
index 9747b29f356f..000000000000
--- a/include/linux/mfd/smsc.h
+++ /dev/null
@@ -1,109 +0,0 @@
-/*
- * SMSC ECE1099
- *
- * Copyright 2012 Texas Instruments Inc.
- *
- * Author: Sourav Poddar <sourav.poddar@ti.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- */
-
-#ifndef __LINUX_MFD_SMSC_H
-#define __LINUX_MFD_SMSC_H
-
-#include <linux/regmap.h>
-
-#define SMSC_ID_ECE1099 1
-#define SMSC_NUM_CLIENTS 2
-
-#define SMSC_BASE_ADDR 0x38
-#define OMAP_GPIO_SMSC_IRQ 151
-
-#define SMSC_MAXGPIO 32
-#define SMSC_BANK(offs) ((offs) >> 3)
-#define SMSC_BIT(offs) (1u << ((offs) & 0x7))
-
-struct smsc {
- struct device *dev;
- struct i2c_client *i2c_clients[SMSC_NUM_CLIENTS];
- struct regmap *regmap;
- int clk;
- /* Stored chip id */
- int id;
-};
-
-struct smsc_gpio;
-struct smsc_keypad;
-
-static inline int smsc_read(struct device *child, unsigned int reg,
- unsigned int *dest)
-{
- struct smsc *smsc = dev_get_drvdata(child->parent);
-
- return regmap_read(smsc->regmap, reg, dest);
-}
-
-static inline int smsc_write(struct device *child, unsigned int reg,
- unsigned int value)
-{
- struct smsc *smsc = dev_get_drvdata(child->parent);
-
- return regmap_write(smsc->regmap, reg, value);
-}
-
-/* Registers for SMSC */
-#define SMSC_RESET 0xF5
-#define SMSC_GRP_INT 0xF9
-#define SMSC_CLK_CTRL 0xFA
-#define SMSC_WKUP_CTRL 0xFB
-#define SMSC_DEV_ID 0xFC
-#define SMSC_DEV_REV 0xFD
-#define SMSC_VEN_ID_L 0xFE
-#define SMSC_VEN_ID_H 0xFF
-
-/* CLK VALUE */
-#define SMSC_CLK_VALUE 0x13
-
-/* Registers for function GPIO INPUT */
-#define SMSC_GPIO_DATA_IN_START 0x00
-
-/* Registers for function GPIO OUPUT */
-#define SMSC_GPIO_DATA_OUT_START 0x05
-
-/* Definitions for SMSC GPIO CONFIGURATION REGISTER*/
-#define SMSC_GPIO_INPUT_LOW 0x01
-#define SMSC_GPIO_INPUT_RISING 0x09
-#define SMSC_GPIO_INPUT_FALLING 0x11
-#define SMSC_GPIO_INPUT_BOTH_EDGE 0x19
-#define SMSC_GPIO_OUTPUT_PP 0x21
-#define SMSC_GPIO_OUTPUT_OP 0x31
-
-#define GRP_INT_STAT 0xf9
-#define SMSC_GPI_INT 0x0f
-#define SMSC_CFG_START 0x0A
-
-/* Registers for SMSC GPIO INTERRUPT STATUS REGISTER*/
-#define SMSC_GPIO_INT_STAT_START 0x32
-
-/* Registers for SMSC GPIO INTERRUPT MASK REGISTER*/
-#define SMSC_GPIO_INT_MASK_START 0x37
-
-/* Registers for SMSC function KEYPAD*/
-#define SMSC_KP_OUT 0x40
-#define SMSC_KP_IN 0x41
-#define SMSC_KP_INT_STAT 0x42
-#define SMSC_KP_INT_MASK 0x43
-
-/* Definitions for keypad */
-#define SMSC_KP_KSO 0x70
-#define SMSC_KP_KSI 0x51
-#define SMSC_KSO_ALL_LOW 0x20
-#define SMSC_KP_SET_LOW_PWR 0x0B
-#define SMSC_KP_SET_HIGH 0xFF
-#define SMSC_KSO_EVAL 0x00
-
-#endif /* __LINUX_MFD_SMSC_H */
diff --git a/include/linux/mfd/sta2x11-mfd.h b/include/linux/mfd/sta2x11-mfd.h
deleted file mode 100644
index 9a855ac11cbf..000000000000
--- a/include/linux/mfd/sta2x11-mfd.h
+++ /dev/null
@@ -1,518 +0,0 @@
-/*
- * Copyright (c) 2009-2011 Wind River Systems, Inc.
- * Copyright (c) 2011 ST Microelectronics (Alessandro Rubini)
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
- * See the GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
- * The STMicroelectronics ConneXt (STA2X11) chip has several unrelated
- * functions in one PCI endpoint functions. This driver simply
- * registers the platform devices in this iomemregion and exports a few
- * functions to access common registers
- */
-
-#ifndef __STA2X11_MFD_H
-#define __STA2X11_MFD_H
-#include <linux/types.h>
-#include <linux/pci.h>
-
-enum sta2x11_mfd_plat_dev {
- sta2x11_sctl = 0,
- sta2x11_gpio,
- sta2x11_scr,
- sta2x11_time,
- sta2x11_apbreg,
- sta2x11_apb_soc_regs,
- sta2x11_vic,
- sta2x11_n_mfd_plat_devs,
-};
-
-#define STA2X11_MFD_SCTL_NAME "sta2x11-sctl"
-#define STA2X11_MFD_GPIO_NAME "sta2x11-gpio"
-#define STA2X11_MFD_SCR_NAME "sta2x11-scr"
-#define STA2X11_MFD_TIME_NAME "sta2x11-time"
-#define STA2X11_MFD_APBREG_NAME "sta2x11-apbreg"
-#define STA2X11_MFD_APB_SOC_REGS_NAME "sta2x11-apb-soc-regs"
-#define STA2X11_MFD_VIC_NAME "sta2x11-vic"
-
-extern u32
-__sta2x11_mfd_mask(struct pci_dev *, u32, u32, u32, enum sta2x11_mfd_plat_dev);
-
-/*
- * The MFD PCI block includes the GPIO peripherals and other register blocks.
- * For GPIO, we have 32*4 bits (I use "gsta" for "gpio sta2x11".)
- */
-#define GSTA_GPIO_PER_BLOCK 32
-#define GSTA_NR_BLOCKS 4
-#define GSTA_NR_GPIO (GSTA_GPIO_PER_BLOCK * GSTA_NR_BLOCKS)
-
-/* Pinconfig is set by the board definition: altfunc, pull-up, pull-down */
-struct sta2x11_gpio_pdata {
- unsigned pinconfig[GSTA_NR_GPIO];
-};
-
-/* Macros below lifted from sh_pfc.h, with minor differences */
-#define PINMUX_TYPE_NONE 0
-#define PINMUX_TYPE_FUNCTION 1
-#define PINMUX_TYPE_OUTPUT_LOW 2
-#define PINMUX_TYPE_OUTPUT_HIGH 3
-#define PINMUX_TYPE_INPUT 4
-#define PINMUX_TYPE_INPUT_PULLUP 5
-#define PINMUX_TYPE_INPUT_PULLDOWN 6
-
-/* Give names to GPIO pins, like PXA does, taken from the manual */
-#define STA2X11_GPIO0 0
-#define STA2X11_GPIO1 1
-#define STA2X11_GPIO2 2
-#define STA2X11_GPIO3 3
-#define STA2X11_GPIO4 4
-#define STA2X11_GPIO5 5
-#define STA2X11_GPIO6 6
-#define STA2X11_GPIO7 7
-#define STA2X11_GPIO8_RGBOUT_RED7 8
-#define STA2X11_GPIO9_RGBOUT_RED6 9
-#define STA2X11_GPIO10_RGBOUT_RED5 10
-#define STA2X11_GPIO11_RGBOUT_RED4 11
-#define STA2X11_GPIO12_RGBOUT_RED3 12
-#define STA2X11_GPIO13_RGBOUT_RED2 13
-#define STA2X11_GPIO14_RGBOUT_RED1 14
-#define STA2X11_GPIO15_RGBOUT_RED0 15
-#define STA2X11_GPIO16_RGBOUT_GREEN7 16
-#define STA2X11_GPIO17_RGBOUT_GREEN6 17
-#define STA2X11_GPIO18_RGBOUT_GREEN5 18
-#define STA2X11_GPIO19_RGBOUT_GREEN4 19
-#define STA2X11_GPIO20_RGBOUT_GREEN3 20
-#define STA2X11_GPIO21_RGBOUT_GREEN2 21
-#define STA2X11_GPIO22_RGBOUT_GREEN1 22
-#define STA2X11_GPIO23_RGBOUT_GREEN0 23
-#define STA2X11_GPIO24_RGBOUT_BLUE7 24
-#define STA2X11_GPIO25_RGBOUT_BLUE6 25
-#define STA2X11_GPIO26_RGBOUT_BLUE5 26
-#define STA2X11_GPIO27_RGBOUT_BLUE4 27
-#define STA2X11_GPIO28_RGBOUT_BLUE3 28
-#define STA2X11_GPIO29_RGBOUT_BLUE2 29
-#define STA2X11_GPIO30_RGBOUT_BLUE1 30
-#define STA2X11_GPIO31_RGBOUT_BLUE0 31
-#define STA2X11_GPIO32_RGBOUT_VSYNCH 32
-#define STA2X11_GPIO33_RGBOUT_HSYNCH 33
-#define STA2X11_GPIO34_RGBOUT_DEN 34
-#define STA2X11_GPIO35_ETH_CRS_DV 35
-#define STA2X11_GPIO36_ETH_TXD1 36
-#define STA2X11_GPIO37_ETH_TXD0 37
-#define STA2X11_GPIO38_ETH_TX_EN 38
-#define STA2X11_GPIO39_MDIO 39
-#define STA2X11_GPIO40_ETH_REF_CLK 40
-#define STA2X11_GPIO41_ETH_RXD1 41
-#define STA2X11_GPIO42_ETH_RXD0 42
-#define STA2X11_GPIO43_MDC 43
-#define STA2X11_GPIO44_CAN_TX 44
-#define STA2X11_GPIO45_CAN_RX 45
-#define STA2X11_GPIO46_MLB_DAT 46
-#define STA2X11_GPIO47_MLB_SIG 47
-#define STA2X11_GPIO48_SPI0_CLK 48
-#define STA2X11_GPIO49_SPI0_TXD 49
-#define STA2X11_GPIO50_SPI0_RXD 50
-#define STA2X11_GPIO51_SPI0_FRM 51
-#define STA2X11_GPIO52_SPI1_CLK 52
-#define STA2X11_GPIO53_SPI1_TXD 53
-#define STA2X11_GPIO54_SPI1_RXD 54
-#define STA2X11_GPIO55_SPI1_FRM 55
-#define STA2X11_GPIO56_SPI2_CLK 56
-#define STA2X11_GPIO57_SPI2_TXD 57
-#define STA2X11_GPIO58_SPI2_RXD 58
-#define STA2X11_GPIO59_SPI2_FRM 59
-#define STA2X11_GPIO60_I2C0_SCL 60
-#define STA2X11_GPIO61_I2C0_SDA 61
-#define STA2X11_GPIO62_I2C1_SCL 62
-#define STA2X11_GPIO63_I2C1_SDA 63
-#define STA2X11_GPIO64_I2C2_SCL 64
-#define STA2X11_GPIO65_I2C2_SDA 65
-#define STA2X11_GPIO66_I2C3_SCL 66
-#define STA2X11_GPIO67_I2C3_SDA 67
-#define STA2X11_GPIO68_MSP0_RCK 68
-#define STA2X11_GPIO69_MSP0_RXD 69
-#define STA2X11_GPIO70_MSP0_RFS 70
-#define STA2X11_GPIO71_MSP0_TCK 71
-#define STA2X11_GPIO72_MSP0_TXD 72
-#define STA2X11_GPIO73_MSP0_TFS 73
-#define STA2X11_GPIO74_MSP0_SCK 74
-#define STA2X11_GPIO75_MSP1_CK 75
-#define STA2X11_GPIO76_MSP1_RXD 76
-#define STA2X11_GPIO77_MSP1_FS 77
-#define STA2X11_GPIO78_MSP1_TXD 78
-#define STA2X11_GPIO79_MSP2_CK 79
-#define STA2X11_GPIO80_MSP2_RXD 80
-#define STA2X11_GPIO81_MSP2_FS 81
-#define STA2X11_GPIO82_MSP2_TXD 82
-#define STA2X11_GPIO83_MSP3_CK 83
-#define STA2X11_GPIO84_MSP3_RXD 84
-#define STA2X11_GPIO85_MSP3_FS 85
-#define STA2X11_GPIO86_MSP3_TXD 86
-#define STA2X11_GPIO87_MSP4_CK 87
-#define STA2X11_GPIO88_MSP4_RXD 88
-#define STA2X11_GPIO89_MSP4_FS 89
-#define STA2X11_GPIO90_MSP4_TXD 90
-#define STA2X11_GPIO91_MSP5_CK 91
-#define STA2X11_GPIO92_MSP5_RXD 92
-#define STA2X11_GPIO93_MSP5_FS 93
-#define STA2X11_GPIO94_MSP5_TXD 94
-#define STA2X11_GPIO95_SDIO3_DAT3 95
-#define STA2X11_GPIO96_SDIO3_DAT2 96
-#define STA2X11_GPIO97_SDIO3_DAT1 97
-#define STA2X11_GPIO98_SDIO3_DAT0 98
-#define STA2X11_GPIO99_SDIO3_CLK 99
-#define STA2X11_GPIO100_SDIO3_CMD 100
-#define STA2X11_GPIO101 101
-#define STA2X11_GPIO102 102
-#define STA2X11_GPIO103 103
-#define STA2X11_GPIO104 104
-#define STA2X11_GPIO105_SDIO2_DAT3 105
-#define STA2X11_GPIO106_SDIO2_DAT2 106
-#define STA2X11_GPIO107_SDIO2_DAT1 107
-#define STA2X11_GPIO108_SDIO2_DAT0 108
-#define STA2X11_GPIO109_SDIO2_CLK 109
-#define STA2X11_GPIO110_SDIO2_CMD 110
-#define STA2X11_GPIO111 111
-#define STA2X11_GPIO112 112
-#define STA2X11_GPIO113 113
-#define STA2X11_GPIO114 114
-#define STA2X11_GPIO115_SDIO1_DAT3 115
-#define STA2X11_GPIO116_SDIO1_DAT2 116
-#define STA2X11_GPIO117_SDIO1_DAT1 117
-#define STA2X11_GPIO118_SDIO1_DAT0 118
-#define STA2X11_GPIO119_SDIO1_CLK 119
-#define STA2X11_GPIO120_SDIO1_CMD 120
-#define STA2X11_GPIO121 121
-#define STA2X11_GPIO122 122
-#define STA2X11_GPIO123 123
-#define STA2X11_GPIO124 124
-#define STA2X11_GPIO125_UART2_TXD 125
-#define STA2X11_GPIO126_UART2_RXD 126
-#define STA2X11_GPIO127_UART3_TXD 127
-
-/*
- * The APB bridge has its own registers, needed by our users as well.
- * They are accessed with the following read/mask/write function.
- */
-static inline u32
-sta2x11_apbreg_mask(struct pci_dev *pdev, u32 reg, u32 mask, u32 val)
-{
- return __sta2x11_mfd_mask(pdev, reg, mask, val, sta2x11_apbreg);
-}
-
-/* CAN and MLB */
-#define APBREG_BSR 0x00 /* Bridge Status Reg */
-#define APBREG_PAER 0x08 /* Peripherals Address Error Reg */
-#define APBREG_PWAC 0x20 /* Peripheral Write Access Control reg */
-#define APBREG_PRAC 0x40 /* Peripheral Read Access Control reg */
-#define APBREG_PCG 0x60 /* Peripheral Clock Gating Reg */
-#define APBREG_PUR 0x80 /* Peripheral Under Reset Reg */
-#define APBREG_EMU_PCG 0xA0 /* Emulator Peripheral Clock Gating Reg */
-
-#define APBREG_CAN (1 << 1)
-#define APBREG_MLB (1 << 3)
-
-/* SARAC */
-#define APBREG_BSR_SARAC 0x100 /* Bridge Status Reg */
-#define APBREG_PAER_SARAC 0x108 /* Peripherals Address Error Reg */
-#define APBREG_PWAC_SARAC 0x120 /* Peripheral Write Access Control reg */
-#define APBREG_PRAC_SARAC 0x140 /* Peripheral Read Access Control reg */
-#define APBREG_PCG_SARAC 0x160 /* Peripheral Clock Gating Reg */
-#define APBREG_PUR_SARAC 0x180 /* Peripheral Under Reset Reg */
-#define APBREG_EMU_PCG_SARAC 0x1A0 /* Emulator Peripheral Clock Gating Reg */
-
-#define APBREG_SARAC (1 << 2)
-
-/*
- * The system controller has its own registers. Some of these are accessed
- * by out users as well, using the following read/mask/write/function
- */
-static inline
-u32 sta2x11_sctl_mask(struct pci_dev *pdev, u32 reg, u32 mask, u32 val)
-{
- return __sta2x11_mfd_mask(pdev, reg, mask, val, sta2x11_sctl);
-}
-
-#define SCTL_SCCTL 0x00 /* System controller control register */
-#define SCTL_ARMCFG 0x04 /* ARM configuration register */
-#define SCTL_SCPLLCTL 0x08 /* PLL control status register */
-
-#define SCTL_SCPLLCTL_AUDIO_PLL_PD BIT(1)
-#define SCTL_SCPLLCTL_FRAC_CONTROL BIT(3)
-#define SCTL_SCPLLCTL_STRB_BYPASS BIT(6)
-#define SCTL_SCPLLCTL_STRB_INPUT BIT(8)
-
-#define SCTL_SCPLLFCTRL 0x0c /* PLL frequency control register */
-
-#define SCTL_SCPLLFCTRL_AUDIO_PLL_NDIV_MASK 0xff
-#define SCTL_SCPLLFCTRL_AUDIO_PLL_NDIV_SHIFT 10
-#define SCTL_SCPLLFCTRL_AUDIO_PLL_IDF_MASK 7
-#define SCTL_SCPLLFCTRL_AUDIO_PLL_IDF_SHIFT 21
-#define SCTL_SCPLLFCTRL_AUDIO_PLL_ODF_MASK 7
-#define SCTL_SCPLLFCTRL_AUDIO_PLL_ODF_SHIFT 18
-#define SCTL_SCPLLFCTRL_DITHER_DISABLE_MASK 0x03
-#define SCTL_SCPLLFCTRL_DITHER_DISABLE_SHIFT 4
-
-
-#define SCTL_SCRESFRACT 0x10 /* PLL fractional input register */
-
-#define SCTL_SCRESFRACT_MASK 0x0000ffff
-
-
-#define SCTL_SCRESCTRL1 0x14 /* Peripheral reset control 1 */
-#define SCTL_SCRESXTRL2 0x18 /* Peripheral reset control 2 */
-#define SCTL_SCPEREN0 0x1c /* Peripheral clock enable register 0 */
-#define SCTL_SCPEREN1 0x20 /* Peripheral clock enable register 1 */
-#define SCTL_SCPEREN2 0x24 /* Peripheral clock enable register 2 */
-#define SCTL_SCGRST 0x28 /* Peripheral global reset */
-#define SCTL_SCPCIECSBRST 0x2c /* PCIe PAB CSB reset status register */
-#define SCTL_SCPCIPMCR1 0x30 /* PCI power management control 1 */
-#define SCTL_SCPCIPMCR2 0x34 /* PCI power management control 2 */
-#define SCTL_SCPCIPMSR1 0x38 /* PCI power management status 1 */
-#define SCTL_SCPCIPMSR2 0x3c /* PCI power management status 2 */
-#define SCTL_SCPCIPMSR3 0x40 /* PCI power management status 3 */
-#define SCTL_SCINTREN 0x44 /* Interrupt enable */
-#define SCTL_SCRISR 0x48 /* RAW interrupt status */
-#define SCTL_SCCLKSTAT0 0x4c /* Peripheral clocks status 0 */
-#define SCTL_SCCLKSTAT1 0x50 /* Peripheral clocks status 1 */
-#define SCTL_SCCLKSTAT2 0x54 /* Peripheral clocks status 2 */
-#define SCTL_SCRSTSTA 0x58 /* Reset status register */
-
-#define SCTL_SCRESCTRL1_USB_PHY_POR (1 << 0)
-#define SCTL_SCRESCTRL1_USB_OTG (1 << 1)
-#define SCTL_SCRESCTRL1_USB_HRST (1 << 2)
-#define SCTL_SCRESCTRL1_USB_PHY_HOST (1 << 3)
-#define SCTL_SCRESCTRL1_SATAII (1 << 4)
-#define SCTL_SCRESCTRL1_VIP (1 << 5)
-#define SCTL_SCRESCTRL1_PER_MMC0 (1 << 6)
-#define SCTL_SCRESCTRL1_PER_MMC1 (1 << 7)
-#define SCTL_SCRESCTRL1_PER_GPIO0 (1 << 8)
-#define SCTL_SCRESCTRL1_PER_GPIO1 (1 << 9)
-#define SCTL_SCRESCTRL1_PER_GPIO2 (1 << 10)
-#define SCTL_SCRESCTRL1_PER_GPIO3 (1 << 11)
-#define SCTL_SCRESCTRL1_PER_MTU0 (1 << 12)
-#define SCTL_SCRESCTRL1_KER_SPI0 (1 << 13)
-#define SCTL_SCRESCTRL1_KER_SPI1 (1 << 14)
-#define SCTL_SCRESCTRL1_KER_SPI2 (1 << 15)
-#define SCTL_SCRESCTRL1_KER_MCI0 (1 << 16)
-#define SCTL_SCRESCTRL1_KER_MCI1 (1 << 17)
-#define SCTL_SCRESCTRL1_PRE_HSI2C0 (1 << 18)
-#define SCTL_SCRESCTRL1_PER_HSI2C1 (1 << 19)
-#define SCTL_SCRESCTRL1_PER_HSI2C2 (1 << 20)
-#define SCTL_SCRESCTRL1_PER_HSI2C3 (1 << 21)
-#define SCTL_SCRESCTRL1_PER_MSP0 (1 << 22)
-#define SCTL_SCRESCTRL1_PER_MSP1 (1 << 23)
-#define SCTL_SCRESCTRL1_PER_MSP2 (1 << 24)
-#define SCTL_SCRESCTRL1_PER_MSP3 (1 << 25)
-#define SCTL_SCRESCTRL1_PER_MSP4 (1 << 26)
-#define SCTL_SCRESCTRL1_PER_MSP5 (1 << 27)
-#define SCTL_SCRESCTRL1_PER_MMC (1 << 28)
-#define SCTL_SCRESCTRL1_KER_MSP0 (1 << 29)
-#define SCTL_SCRESCTRL1_KER_MSP1 (1 << 30)
-#define SCTL_SCRESCTRL1_KER_MSP2 (1 << 31)
-
-#define SCTL_SCPEREN0_UART0 (1 << 0)
-#define SCTL_SCPEREN0_UART1 (1 << 1)
-#define SCTL_SCPEREN0_UART2 (1 << 2)
-#define SCTL_SCPEREN0_UART3 (1 << 3)
-#define SCTL_SCPEREN0_MSP0 (1 << 4)
-#define SCTL_SCPEREN0_MSP1 (1 << 5)
-#define SCTL_SCPEREN0_MSP2 (1 << 6)
-#define SCTL_SCPEREN0_MSP3 (1 << 7)
-#define SCTL_SCPEREN0_MSP4 (1 << 8)
-#define SCTL_SCPEREN0_MSP5 (1 << 9)
-#define SCTL_SCPEREN0_SPI0 (1 << 10)
-#define SCTL_SCPEREN0_SPI1 (1 << 11)
-#define SCTL_SCPEREN0_SPI2 (1 << 12)
-#define SCTL_SCPEREN0_I2C0 (1 << 13)
-#define SCTL_SCPEREN0_I2C1 (1 << 14)
-#define SCTL_SCPEREN0_I2C2 (1 << 15)
-#define SCTL_SCPEREN0_I2C3 (1 << 16)
-#define SCTL_SCPEREN0_SVDO_LVDS (1 << 17)
-#define SCTL_SCPEREN0_USB_HOST (1 << 18)
-#define SCTL_SCPEREN0_USB_OTG (1 << 19)
-#define SCTL_SCPEREN0_MCI0 (1 << 20)
-#define SCTL_SCPEREN0_MCI1 (1 << 21)
-#define SCTL_SCPEREN0_MCI2 (1 << 22)
-#define SCTL_SCPEREN0_MCI3 (1 << 23)
-#define SCTL_SCPEREN0_SATA (1 << 24)
-#define SCTL_SCPEREN0_ETHERNET (1 << 25)
-#define SCTL_SCPEREN0_VIC (1 << 26)
-#define SCTL_SCPEREN0_DMA_AUDIO (1 << 27)
-#define SCTL_SCPEREN0_DMA_SOC (1 << 28)
-#define SCTL_SCPEREN0_RAM (1 << 29)
-#define SCTL_SCPEREN0_VIP (1 << 30)
-#define SCTL_SCPEREN0_ARM (1 << 31)
-
-#define SCTL_SCPEREN1_UART0 (1 << 0)
-#define SCTL_SCPEREN1_UART1 (1 << 1)
-#define SCTL_SCPEREN1_UART2 (1 << 2)
-#define SCTL_SCPEREN1_UART3 (1 << 3)
-#define SCTL_SCPEREN1_MSP0 (1 << 4)
-#define SCTL_SCPEREN1_MSP1 (1 << 5)
-#define SCTL_SCPEREN1_MSP2 (1 << 6)
-#define SCTL_SCPEREN1_MSP3 (1 << 7)
-#define SCTL_SCPEREN1_MSP4 (1 << 8)
-#define SCTL_SCPEREN1_MSP5 (1 << 9)
-#define SCTL_SCPEREN1_SPI0 (1 << 10)
-#define SCTL_SCPEREN1_SPI1 (1 << 11)
-#define SCTL_SCPEREN1_SPI2 (1 << 12)
-#define SCTL_SCPEREN1_I2C0 (1 << 13)
-#define SCTL_SCPEREN1_I2C1 (1 << 14)
-#define SCTL_SCPEREN1_I2C2 (1 << 15)
-#define SCTL_SCPEREN1_I2C3 (1 << 16)
-#define SCTL_SCPEREN1_USB_PHY (1 << 17)
-
-/*
- * APB-SOC registers
- */
-static inline
-u32 sta2x11_apb_soc_regs_mask(struct pci_dev *pdev, u32 reg, u32 mask, u32 val)
-{
- return __sta2x11_mfd_mask(pdev, reg, mask, val, sta2x11_apb_soc_regs);
-}
-
-#define PCIE_EP1_FUNC3_0_INTR_REG 0x000
-#define PCIE_EP1_FUNC7_4_INTR_REG 0x004
-#define PCIE_EP2_FUNC3_0_INTR_REG 0x008
-#define PCIE_EP2_FUNC7_4_INTR_REG 0x00c
-#define PCIE_EP3_FUNC3_0_INTR_REG 0x010
-#define PCIE_EP3_FUNC7_4_INTR_REG 0x014
-#define PCIE_EP4_FUNC3_0_INTR_REG 0x018
-#define PCIE_EP4_FUNC7_4_INTR_REG 0x01c
-#define PCIE_INTR_ENABLE0_REG 0x020
-#define PCIE_INTR_ENABLE1_REG 0x024
-#define PCIE_EP1_FUNC_TC_REG 0x028
-#define PCIE_EP2_FUNC_TC_REG 0x02c
-#define PCIE_EP3_FUNC_TC_REG 0x030
-#define PCIE_EP4_FUNC_TC_REG 0x034
-#define PCIE_EP1_FUNC_F_REG 0x038
-#define PCIE_EP2_FUNC_F_REG 0x03c
-#define PCIE_EP3_FUNC_F_REG 0x040
-#define PCIE_EP4_FUNC_F_REG 0x044
-#define PCIE_PAB_AMBA_SW_RST_REG 0x048
-#define PCIE_PM_STATUS_0_PORT_0_4 0x04c
-#define PCIE_PM_STATUS_7_0_EP1 0x050
-#define PCIE_PM_STATUS_7_0_EP2 0x054
-#define PCIE_PM_STATUS_7_0_EP3 0x058
-#define PCIE_PM_STATUS_7_0_EP4 0x05c
-#define PCIE_DEV_ID_0_EP1_REG 0x060
-#define PCIE_CC_REV_ID_0_EP1_REG 0x064
-#define PCIE_DEV_ID_1_EP1_REG 0x068
-#define PCIE_CC_REV_ID_1_EP1_REG 0x06c
-#define PCIE_DEV_ID_2_EP1_REG 0x070
-#define PCIE_CC_REV_ID_2_EP1_REG 0x074
-#define PCIE_DEV_ID_3_EP1_REG 0x078
-#define PCIE_CC_REV_ID_3_EP1_REG 0x07c
-#define PCIE_DEV_ID_4_EP1_REG 0x080
-#define PCIE_CC_REV_ID_4_EP1_REG 0x084
-#define PCIE_DEV_ID_5_EP1_REG 0x088
-#define PCIE_CC_REV_ID_5_EP1_REG 0x08c
-#define PCIE_DEV_ID_6_EP1_REG 0x090
-#define PCIE_CC_REV_ID_6_EP1_REG 0x094
-#define PCIE_DEV_ID_7_EP1_REG 0x098
-#define PCIE_CC_REV_ID_7_EP1_REG 0x09c
-#define PCIE_DEV_ID_0_EP2_REG 0x0a0
-#define PCIE_CC_REV_ID_0_EP2_REG 0x0a4
-#define PCIE_DEV_ID_1_EP2_REG 0x0a8
-#define PCIE_CC_REV_ID_1_EP2_REG 0x0ac
-#define PCIE_DEV_ID_2_EP2_REG 0x0b0
-#define PCIE_CC_REV_ID_2_EP2_REG 0x0b4
-#define PCIE_DEV_ID_3_EP2_REG 0x0b8
-#define PCIE_CC_REV_ID_3_EP2_REG 0x0bc
-#define PCIE_DEV_ID_4_EP2_REG 0x0c0
-#define PCIE_CC_REV_ID_4_EP2_REG 0x0c4
-#define PCIE_DEV_ID_5_EP2_REG 0x0c8
-#define PCIE_CC_REV_ID_5_EP2_REG 0x0cc
-#define PCIE_DEV_ID_6_EP2_REG 0x0d0
-#define PCIE_CC_REV_ID_6_EP2_REG 0x0d4
-#define PCIE_DEV_ID_7_EP2_REG 0x0d8
-#define PCIE_CC_REV_ID_7_EP2_REG 0x0dC
-#define PCIE_DEV_ID_0_EP3_REG 0x0e0
-#define PCIE_CC_REV_ID_0_EP3_REG 0x0e4
-#define PCIE_DEV_ID_1_EP3_REG 0x0e8
-#define PCIE_CC_REV_ID_1_EP3_REG 0x0ec
-#define PCIE_DEV_ID_2_EP3_REG 0x0f0
-#define PCIE_CC_REV_ID_2_EP3_REG 0x0f4
-#define PCIE_DEV_ID_3_EP3_REG 0x0f8
-#define PCIE_CC_REV_ID_3_EP3_REG 0x0fc
-#define PCIE_DEV_ID_4_EP3_REG 0x100
-#define PCIE_CC_REV_ID_4_EP3_REG 0x104
-#define PCIE_DEV_ID_5_EP3_REG 0x108
-#define PCIE_CC_REV_ID_5_EP3_REG 0x10c
-#define PCIE_DEV_ID_6_EP3_REG 0x110
-#define PCIE_CC_REV_ID_6_EP3_REG 0x114
-#define PCIE_DEV_ID_7_EP3_REG 0x118
-#define PCIE_CC_REV_ID_7_EP3_REG 0x11c
-#define PCIE_DEV_ID_0_EP4_REG 0x120
-#define PCIE_CC_REV_ID_0_EP4_REG 0x124
-#define PCIE_DEV_ID_1_EP4_REG 0x128
-#define PCIE_CC_REV_ID_1_EP4_REG 0x12c
-#define PCIE_DEV_ID_2_EP4_REG 0x130
-#define PCIE_CC_REV_ID_2_EP4_REG 0x134
-#define PCIE_DEV_ID_3_EP4_REG 0x138
-#define PCIE_CC_REV_ID_3_EP4_REG 0x13c
-#define PCIE_DEV_ID_4_EP4_REG 0x140
-#define PCIE_CC_REV_ID_4_EP4_REG 0x144
-#define PCIE_DEV_ID_5_EP4_REG 0x148
-#define PCIE_CC_REV_ID_5_EP4_REG 0x14c
-#define PCIE_DEV_ID_6_EP4_REG 0x150
-#define PCIE_CC_REV_ID_6_EP4_REG 0x154
-#define PCIE_DEV_ID_7_EP4_REG 0x158
-#define PCIE_CC_REV_ID_7_EP4_REG 0x15c
-#define PCIE_SUBSYS_VEN_ID_REG 0x160
-#define PCIE_COMMON_CLOCK_CONFIG_0_4_0 0x164
-#define PCIE_MIPHYP_SSC_EN_REG 0x168
-#define PCIE_MIPHYP_ADDR_REG 0x16c
-#define PCIE_L1_ASPM_READY_REG 0x170
-#define PCIE_EXT_CFG_RDY_REG 0x174
-#define PCIE_SoC_INT_ROUTER_STATUS0_REG 0x178
-#define PCIE_SoC_INT_ROUTER_STATUS1_REG 0x17c
-#define PCIE_SoC_INT_ROUTER_STATUS2_REG 0x180
-#define PCIE_SoC_INT_ROUTER_STATUS3_REG 0x184
-#define DMA_IP_CTRL_REG 0x324
-#define DISP_BRIDGE_PU_PD_CTRL_REG 0x328
-#define VIP_PU_PD_CTRL_REG 0x32c
-#define USB_MLB_PU_PD_CTRL_REG 0x330
-#define SDIO_PU_PD_MISCFUNC_CTRL_REG1 0x334
-#define SDIO_PU_PD_MISCFUNC_CTRL_REG2 0x338
-#define UART_PU_PD_CTRL_REG 0x33c
-#define ARM_Lock 0x340
-#define SYS_IO_CHAR_REG1 0x344
-#define SYS_IO_CHAR_REG2 0x348
-#define SATA_CORE_ID_REG 0x34c
-#define SATA_CTRL_REG 0x350
-#define I2C_HSFIX_MISC_REG 0x354
-#define SPARE2_RESERVED 0x358
-#define SPARE3_RESERVED 0x35c
-#define MASTER_LOCK_REG 0x368
-#define SYSTEM_CONFIG_STATUS_REG 0x36c
-#define MSP_CLK_CTRL_REG 0x39c
-#define COMPENSATION_REG1 0x3c4
-#define COMPENSATION_REG2 0x3c8
-#define COMPENSATION_REG3 0x3cc
-#define TEST_CTL_REG 0x3d0
-
-/*
- * SECR (OTP) registers
- */
-#define STA2X11_SECR_CR 0x00
-#define STA2X11_SECR_FVR0 0x10
-#define STA2X11_SECR_FVR1 0x14
-
-extern int sta2x11_mfd_get_regs_data(struct platform_device *pdev,
- enum sta2x11_mfd_plat_dev index,
- void __iomem **regs,
- spinlock_t **lock);
-
-#endif /* __STA2X11_MFD_H */
diff --git a/include/linux/mfd/stm32-lptimer.h b/include/linux/mfd/stm32-lptimer.h
new file mode 100644
index 000000000000..a592c8dc716d
--- /dev/null
+++ b/include/linux/mfd/stm32-lptimer.h
@@ -0,0 +1,99 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * STM32 Low-Power Timer parent driver.
+ * Copyright (C) STMicroelectronics 2017
+ * Author: Fabrice Gasnier <fabrice.gasnier@st.com>
+ * Inspired by Benjamin Gaignard's stm32-timers driver
+ */
+
+#ifndef _LINUX_STM32_LPTIMER_H_
+#define _LINUX_STM32_LPTIMER_H_
+
+#include <linux/clk.h>
+#include <linux/regmap.h>
+
+#define STM32_LPTIM_ISR 0x00 /* Interrupt and Status Reg */
+#define STM32_LPTIM_ICR 0x04 /* Interrupt Clear Reg */
+#define STM32_LPTIM_IER 0x08 /* Interrupt Enable Reg */
+#define STM32_LPTIM_CFGR 0x0C /* Configuration Reg */
+#define STM32_LPTIM_CR 0x10 /* Control Reg */
+#define STM32_LPTIM_CMP 0x14 /* Compare Reg (MP25 CCR1) */
+#define STM32_LPTIM_ARR 0x18 /* Autoreload Reg */
+#define STM32_LPTIM_CNT 0x1C /* Counter Reg */
+#define STM32_LPTIM_CCMR1 0x2C /* Capture/Compare Mode MP25 */
+#define STM32_LPTIM_CCR2 0x34 /* Compare Reg2 MP25 */
+
+#define STM32_LPTIM_HWCFGR2 0x3EC /* Hardware configuration register 2 - MP25 */
+#define STM32_LPTIM_HWCFGR1 0x3F0 /* Hardware configuration register 1 - MP15 */
+#define STM32_LPTIM_VERR 0x3F4 /* Version identification register - MP15 */
+
+/* STM32_LPTIM_ISR - bit fields */
+#define STM32_LPTIM_DIEROK_ARROK (BIT(24) | BIT(4)) /* MP25 */
+#define STM32_LPTIM_CMP2_ARROK (BIT(19) | BIT(4))
+#define STM32_LPTIM_CMPOK_ARROK GENMASK(4, 3)
+#define STM32_LPTIM_ARROK BIT(4)
+#define STM32_LPTIM_CMPOK BIT(3)
+
+/* STM32_LPTIM_ICR - bit fields */
+#define STM32_LPTIM_DIEROKCF_ARROKCF (BIT(24) | BIT(4)) /* MP25 */
+#define STM32_LPTIM_CMP2OKCF_ARROKCF (BIT(19) | BIT(4))
+#define STM32_LPTIM_CMPOKCF_ARROKCF GENMASK(4, 3)
+#define STM32_LPTIM_ARRMCF BIT(1)
+
+/* STM32_LPTIM_IER - bit fields */
+#define STM32_LPTIM_ARRMIE BIT(1)
+
+/* STM32_LPTIM_CR - bit fields */
+#define STM32_LPTIM_CNTSTRT BIT(2)
+#define STM32_LPTIM_SNGSTRT BIT(1)
+#define STM32_LPTIM_ENABLE BIT(0)
+
+/* STM32_LPTIM_CFGR - bit fields */
+#define STM32_LPTIM_ENC BIT(24)
+#define STM32_LPTIM_COUNTMODE BIT(23)
+#define STM32_LPTIM_WAVPOL BIT(21)
+#define STM32_LPTIM_PRESC GENMASK(11, 9)
+#define STM32_LPTIM_CKPOL GENMASK(2, 1)
+
+/* STM32_LPTIM_CKPOL */
+#define STM32_LPTIM_CKPOL_RISING_EDGE 0
+#define STM32_LPTIM_CKPOL_FALLING_EDGE 1
+#define STM32_LPTIM_CKPOL_BOTH_EDGES 2
+
+/* STM32_LPTIM_ARR */
+#define STM32_LPTIM_MAX_ARR 0xFFFF
+
+/* STM32_LPTIM_CCMR1 */
+#define STM32_LPTIM_CC2P GENMASK(19, 18)
+#define STM32_LPTIM_CC2E BIT(17)
+#define STM32_LPTIM_CC2SEL BIT(16)
+#define STM32_LPTIM_CC1P GENMASK(3, 2)
+#define STM32_LPTIM_CC1E BIT(1)
+#define STM32_LPTIM_CC1SEL BIT(0)
+
+/* STM32_LPTIM_HWCFGR1 */
+#define STM32_LPTIM_HWCFGR1_ENCODER BIT(16)
+
+/* STM32_LPTIM_HWCFGR2 */
+#define STM32_LPTIM_HWCFGR2_CHAN_NUM GENMASK(3, 0)
+
+/* STM32_LPTIM_VERR */
+#define STM32_LPTIM_VERR_23 0x23 /* STM32MP25 */
+
+/**
+ * struct stm32_lptimer - STM32 Low-Power Timer data assigned by parent device
+ * @clk: clock reference for this instance
+ * @regmap: register map reference for this instance
+ * @has_encoder: indicates this Low-Power Timer supports encoder mode
+ * @num_cc_chans: indicates the number of capture/compare channels
+ * @version: indicates the major and minor revision of the controller
+ */
+struct stm32_lptimer {
+ struct clk *clk;
+ struct regmap *regmap;
+ bool has_encoder;
+ unsigned int num_cc_chans;
+ u32 version;
+};
+
+#endif
diff --git a/include/linux/mfd/stm32-timers.h b/include/linux/mfd/stm32-timers.h
index ce7346e7f77a..23b0cae4a9f8 100644
--- a/include/linux/mfd/stm32-timers.h
+++ b/include/linux/mfd/stm32-timers.h
@@ -1,75 +1,186 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) STMicroelectronics 2016
- *
* Author: Benjamin Gaignard <benjamin.gaignard@st.com>
- *
- * License terms: GNU General Public License (GPL), version 2
*/
#ifndef _LINUX_STM32_GPTIMER_H_
#define _LINUX_STM32_GPTIMER_H_
#include <linux/clk.h>
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
#include <linux/regmap.h>
-#define TIM_CR1 0x00 /* Control Register 1 */
-#define TIM_CR2 0x04 /* Control Register 2 */
-#define TIM_SMCR 0x08 /* Slave mode control reg */
-#define TIM_DIER 0x0C /* DMA/interrupt register */
-#define TIM_SR 0x10 /* Status register */
-#define TIM_EGR 0x14 /* Event Generation Reg */
-#define TIM_CCMR1 0x18 /* Capt/Comp 1 Mode Reg */
-#define TIM_CCMR2 0x1C /* Capt/Comp 2 Mode Reg */
-#define TIM_CCER 0x20 /* Capt/Comp Enable Reg */
-#define TIM_CNT 0x24 /* Counter */
-#define TIM_PSC 0x28 /* Prescaler */
-#define TIM_ARR 0x2c /* Auto-Reload Register */
-#define TIM_CCR1 0x34 /* Capt/Comp Register 1 */
-#define TIM_CCR2 0x38 /* Capt/Comp Register 2 */
-#define TIM_CCR3 0x3C /* Capt/Comp Register 3 */
-#define TIM_CCR4 0x40 /* Capt/Comp Register 4 */
-#define TIM_BDTR 0x44 /* Break and Dead-Time Reg */
+#define TIM_CR1 0x00 /* Control Register 1 */
+#define TIM_CR2 0x04 /* Control Register 2 */
+#define TIM_SMCR 0x08 /* Slave mode control reg */
+#define TIM_DIER 0x0C /* DMA/interrupt register */
+#define TIM_SR 0x10 /* Status register */
+#define TIM_EGR 0x14 /* Event Generation Reg */
+#define TIM_CCMR1 0x18 /* Capt/Comp 1 Mode Reg */
+#define TIM_CCMR2 0x1C /* Capt/Comp 2 Mode Reg */
+#define TIM_CCER 0x20 /* Capt/Comp Enable Reg */
+#define TIM_CNT 0x24 /* Counter */
+#define TIM_PSC 0x28 /* Prescaler */
+#define TIM_ARR 0x2c /* Auto-Reload Register */
+#define TIM_CCRx(x) (0x34 + 4 * ((x) - 1)) /* Capt/Comp Register x (x ∈ {1, .. 4}) */
+#define TIM_CCR1 TIM_CCRx(1) /* Capt/Comp Register 1 */
+#define TIM_CCR2 TIM_CCRx(2) /* Capt/Comp Register 2 */
+#define TIM_CCR3 TIM_CCRx(3) /* Capt/Comp Register 3 */
+#define TIM_CCR4 TIM_CCRx(4) /* Capt/Comp Register 4 */
+#define TIM_BDTR 0x44 /* Break and Dead-Time Reg */
+#define TIM_DCR 0x48 /* DMA control register */
+#define TIM_DMAR 0x4C /* DMA register for transfer */
+#define TIM_TISEL 0x68 /* Input Selection */
+#define TIM_HWCFGR2 0x3EC /* hardware configuration 2 Reg (MP25) */
+#define TIM_HWCFGR1 0x3F0 /* hardware configuration 1 Reg (MP25) */
+#define TIM_IPIDR 0x3F8 /* IP identification Reg (MP25) */
-#define TIM_CR1_CEN BIT(0) /* Counter Enable */
-#define TIM_CR1_DIR BIT(4) /* Counter Direction */
-#define TIM_CR1_ARPE BIT(7) /* Auto-reload Preload Ena */
-#define TIM_CR2_MMS (BIT(4) | BIT(5) | BIT(6)) /* Master mode selection */
-#define TIM_CR2_MMS2 GENMASK(23, 20) /* Master mode selection 2 */
-#define TIM_SMCR_SMS (BIT(0) | BIT(1) | BIT(2)) /* Slave mode selection */
-#define TIM_SMCR_TS (BIT(4) | BIT(5) | BIT(6)) /* Trigger selection */
-#define TIM_DIER_UIE BIT(0) /* Update interrupt */
-#define TIM_SR_UIF BIT(0) /* Update interrupt flag */
-#define TIM_EGR_UG BIT(0) /* Update Generation */
-#define TIM_CCMR_PE BIT(3) /* Channel Preload Enable */
-#define TIM_CCMR_M1 (BIT(6) | BIT(5)) /* Channel PWM Mode 1 */
-#define TIM_CCER_CC1E BIT(0) /* Capt/Comp 1 out Ena */
-#define TIM_CCER_CC1P BIT(1) /* Capt/Comp 1 Polarity */
-#define TIM_CCER_CC1NE BIT(2) /* Capt/Comp 1N out Ena */
-#define TIM_CCER_CC1NP BIT(3) /* Capt/Comp 1N Polarity */
-#define TIM_CCER_CC2E BIT(4) /* Capt/Comp 2 out Ena */
-#define TIM_CCER_CC3E BIT(8) /* Capt/Comp 3 out Ena */
-#define TIM_CCER_CC4E BIT(12) /* Capt/Comp 4 out Ena */
-#define TIM_CCER_CCXE (BIT(0) | BIT(4) | BIT(8) | BIT(12))
-#define TIM_BDTR_BKE BIT(12) /* Break input enable */
-#define TIM_BDTR_BKP BIT(13) /* Break input polarity */
-#define TIM_BDTR_AOE BIT(14) /* Automatic Output Enable */
-#define TIM_BDTR_MOE BIT(15) /* Main Output Enable */
-#define TIM_BDTR_BKF (BIT(16) | BIT(17) | BIT(18) | BIT(19))
-#define TIM_BDTR_BK2F (BIT(20) | BIT(21) | BIT(22) | BIT(23))
-#define TIM_BDTR_BK2E BIT(24) /* Break 2 input enable */
-#define TIM_BDTR_BK2P BIT(25) /* Break 2 input polarity */
+#define TIM_CR1_CEN BIT(0) /* Counter Enable */
+#define TIM_CR1_DIR BIT(4) /* Counter Direction */
+#define TIM_CR1_ARPE BIT(7) /* Auto-reload Preload Ena */
+#define TIM_CR2_MMS (BIT(4) | BIT(5) | BIT(6)) /* Master mode selection */
+#define TIM_CR2_MMS2 GENMASK(23, 20) /* Master mode selection 2 */
+#define TIM_SMCR_SMS (BIT(0) | BIT(1) | BIT(2)) /* Slave mode selection */
+#define TIM_SMCR_TS (BIT(4) | BIT(5) | BIT(6)) /* Trigger selection */
+#define TIM_DIER_UIE BIT(0) /* Update interrupt */
+#define TIM_DIER_CCxIE(x) BIT(1 + ((x) - 1)) /* CCx Interrupt Enable (x ∈ {1, .. 4}) */
+#define TIM_DIER_CC1IE TIM_DIER_CCxIE(1) /* CC1 Interrupt Enable */
+#define TIM_DIER_CC2IE TIM_DIER_CCxIE(2) /* CC2 Interrupt Enable */
+#define TIM_DIER_CC3IE TIM_DIER_CCxIE(3) /* CC3 Interrupt Enable */
+#define TIM_DIER_CC4IE TIM_DIER_CCxIE(4) /* CC4 Interrupt Enable */
+#define TIM_DIER_UDE BIT(8) /* Update DMA request Enable */
+#define TIM_DIER_CCxDE(x) BIT(9 + ((x) - 1)) /* CCx DMA request Enable (x ∈ {1, .. 4}) */
+#define TIM_DIER_CC1DE TIM_DIER_CCxDE(1) /* CC1 DMA request Enable */
+#define TIM_DIER_CC2DE TIM_DIER_CCxDE(2) /* CC2 DMA request Enable */
+#define TIM_DIER_CC3DE TIM_DIER_CCxDE(3) /* CC3 DMA request Enable */
+#define TIM_DIER_CC4DE TIM_DIER_CCxDE(4) /* CC4 DMA request Enable */
+#define TIM_DIER_COMDE BIT(13) /* COM DMA request Enable */
+#define TIM_DIER_TDE BIT(14) /* Trigger DMA request Enable */
+#define TIM_SR_UIF BIT(0) /* Update interrupt flag */
+#define TIM_SR_CC_IF(x) BIT((x) + 1) /* CC1, CC2, CC3, CC4 interrupt flag */
+#define TIM_EGR_UG BIT(0) /* Update Generation */
+#define TIM_CCMR_PE BIT(3) /* Channel Preload Enable */
+#define TIM_CCMR_M1 (BIT(6) | BIT(5)) /* Channel PWM Mode 1 */
+#define TIM_CCMR_CC1S (BIT(0) | BIT(1)) /* Capture/compare 1 sel */
+#define TIM_CCMR_IC1PSC GENMASK(3, 2) /* Input capture 1 prescaler */
+#define TIM_CCMR_CC2S (BIT(8) | BIT(9)) /* Capture/compare 2 sel */
+#define TIM_CCMR_IC2PSC GENMASK(11, 10) /* Input capture 2 prescaler */
+#define TIM_CCMR_CC1S_TI1 BIT(0) /* IC1/IC3 selects TI1/TI3 */
+#define TIM_CCMR_CC1S_TI2 BIT(1) /* IC1/IC3 selects TI2/TI4 */
+#define TIM_CCMR_CC2S_TI2 BIT(8) /* IC2/IC4 selects TI2/TI4 */
+#define TIM_CCMR_CC2S_TI1 BIT(9) /* IC2/IC4 selects TI1/TI3 */
+#define TIM_CCMR_CC3S (BIT(0) | BIT(1)) /* Capture/compare 3 sel */
+#define TIM_CCMR_CC4S (BIT(8) | BIT(9)) /* Capture/compare 4 sel */
+#define TIM_CCMR_CC3S_TI3 BIT(0) /* IC3 selects TI3 */
+#define TIM_CCMR_CC4S_TI4 BIT(8) /* IC4 selects TI4 */
+#define TIM_CCER_CCxE(x) BIT(0 + 4 * ((x) - 1)) /* Capt/Comp x out Ena (x ∈ {1, .. 4}) */
+#define TIM_CCER_CCxP(x) BIT(1 + 4 * ((x) - 1)) /* Capt/Comp x Polarity (x ∈ {1, .. 4}) */
+#define TIM_CCER_CCxNE(x) BIT(2 + 4 * ((x) - 1)) /* Capt/Comp xN out Ena (x ∈ {1, .. 4}) */
+#define TIM_CCER_CCxNP(x) BIT(3 + 4 * ((x) - 1)) /* Capt/Comp xN Polarity (x ∈ {1, .. 4}) */
+#define TIM_CCER_CC1E TIM_CCER_CCxE(1) /* Capt/Comp 1 out Ena */
+#define TIM_CCER_CC1P TIM_CCER_CCxP(1) /* Capt/Comp 1 Polarity */
+#define TIM_CCER_CC1NE TIM_CCER_CCxNE(1) /* Capt/Comp 1N out Ena */
+#define TIM_CCER_CC1NP TIM_CCER_CCxNP(1) /* Capt/Comp 1N Polarity */
+#define TIM_CCER_CC2E TIM_CCER_CCxE(2) /* Capt/Comp 2 out Ena */
+#define TIM_CCER_CC2P TIM_CCER_CCxP(2) /* Capt/Comp 2 Polarity */
+#define TIM_CCER_CC2NE TIM_CCER_CCxNE(2) /* Capt/Comp 2N out Ena */
+#define TIM_CCER_CC2NP TIM_CCER_CCxNP(2) /* Capt/Comp 2N Polarity */
+#define TIM_CCER_CC3E TIM_CCER_CCxE(3) /* Capt/Comp 3 out Ena */
+#define TIM_CCER_CC3P TIM_CCER_CCxP(3) /* Capt/Comp 3 Polarity */
+#define TIM_CCER_CC3NE TIM_CCER_CCxNE(3) /* Capt/Comp 3N out Ena */
+#define TIM_CCER_CC3NP TIM_CCER_CCxNP(3) /* Capt/Comp 3N Polarity */
+#define TIM_CCER_CC4E TIM_CCER_CCxE(4) /* Capt/Comp 4 out Ena */
+#define TIM_CCER_CC4P TIM_CCER_CCxP(4) /* Capt/Comp 4 Polarity */
+#define TIM_CCER_CC4NE TIM_CCER_CCxNE(4) /* Capt/Comp 4N out Ena */
+#define TIM_CCER_CC4NP TIM_CCER_CCxNP(4) /* Capt/Comp 4N Polarity */
+#define TIM_CCER_CCXE (BIT(0) | BIT(4) | BIT(8) | BIT(12))
+#define TIM_BDTR_BKE(x) BIT(12 + (x) * 12) /* Break input enable */
+#define TIM_BDTR_BKP(x) BIT(13 + (x) * 12) /* Break input polarity */
+#define TIM_BDTR_AOE BIT(14) /* Automatic Output Enable */
+#define TIM_BDTR_MOE BIT(15) /* Main Output Enable */
+#define TIM_BDTR_BKF(x) (0xf << (16 + (x) * 4))
+#define TIM_DCR_DBA GENMASK(4, 0) /* DMA base addr */
+#define TIM_DCR_DBL GENMASK(12, 8) /* DMA burst len */
+#define TIM_HWCFGR1_NB_OF_CC GENMASK(3, 0) /* Capture/compare channels */
+#define TIM_HWCFGR1_NB_OF_DT GENMASK(7, 4) /* Complementary outputs & dead-time generators */
+#define TIM_HWCFGR2_CNT_WIDTH GENMASK(15, 8) /* Counter width */
-#define MAX_TIM_PSC 0xFFFF
-#define TIM_CR2_MMS_SHIFT 4
-#define TIM_CR2_MMS2_SHIFT 20
-#define TIM_SMCR_TS_SHIFT 4
-#define TIM_BDTR_BKF_MASK 0xF
-#define TIM_BDTR_BKF_SHIFT 16
-#define TIM_BDTR_BK2F_SHIFT 20
+#define MAX_TIM_PSC 0xFFFF
+#define MAX_TIM_ICPSC 0x3
+#define TIM_CR2_MMS_SHIFT 4
+#define TIM_CR2_MMS2_SHIFT 20
+#define TIM_SMCR_SMS_SLAVE_MODE_DISABLED 0 /* counts on internal clock when CEN=1 */
+#define TIM_SMCR_SMS_ENCODER_MODE_1 1 /* counts TI1FP1 edges, depending on TI2FP2 level */
+#define TIM_SMCR_SMS_ENCODER_MODE_2 2 /* counts TI2FP2 edges, depending on TI1FP1 level */
+#define TIM_SMCR_SMS_ENCODER_MODE_3 3 /* counts on both TI1FP1 and TI2FP2 edges */
+#define TIM_SMCR_TS_SHIFT 4
+#define TIM_BDTR_BKF_MASK 0xF
+#define TIM_BDTR_BKF_SHIFT(x) (16 + (x) * 4)
+
+#define STM32MP25_TIM_IPIDR 0x00120002
+
+enum stm32_timers_dmas {
+ STM32_TIMERS_DMA_CH1,
+ STM32_TIMERS_DMA_CH2,
+ STM32_TIMERS_DMA_CH3,
+ STM32_TIMERS_DMA_CH4,
+ STM32_TIMERS_DMA_UP,
+ STM32_TIMERS_DMA_TRIG,
+ STM32_TIMERS_DMA_COM,
+ STM32_TIMERS_MAX_DMAS,
+};
+
+/* STM32 Timer may have either a unique global interrupt or 4 interrupt lines */
+enum stm32_timers_irqs {
+ STM32_TIMERS_IRQ_GLOBAL_BRK, /* global or brk IRQ */
+ STM32_TIMERS_IRQ_UP,
+ STM32_TIMERS_IRQ_TRG_COM,
+ STM32_TIMERS_IRQ_CC,
+ STM32_TIMERS_MAX_IRQS,
+};
+
+/**
+ * struct stm32_timers_dma - STM32 timer DMA handling.
+ * @completion: end of DMA transfer completion
+ * @phys_base: control registers physical base address
+ * @lock: protect DMA access
+ * @chan: DMA channel in use
+ * @chans: DMA channels available for this timer instance
+ */
+struct stm32_timers_dma {
+ struct completion completion;
+ phys_addr_t phys_base;
+ struct mutex lock;
+ struct dma_chan *chan;
+ struct dma_chan *chans[STM32_TIMERS_MAX_DMAS];
+};
struct stm32_timers {
struct clk *clk;
+ u32 ipidr;
struct regmap *regmap;
u32 max_arr;
+ struct stm32_timers_dma dma; /* Only to be used by the parent */
+ unsigned int nr_irqs;
+ int irq[STM32_TIMERS_MAX_IRQS];
};
+
+#if IS_REACHABLE(CONFIG_MFD_STM32_TIMERS)
+int stm32_timers_dma_burst_read(struct device *dev, u32 *buf,
+ enum stm32_timers_dmas id, u32 reg,
+ unsigned int num_reg, unsigned int bursts,
+ unsigned long tmo_ms);
+#else
+static inline int stm32_timers_dma_burst_read(struct device *dev, u32 *buf,
+ enum stm32_timers_dmas id,
+ u32 reg,
+ unsigned int num_reg,
+ unsigned int bursts,
+ unsigned long tmo_ms)
+{
+ return -ENODEV;
+}
+#endif
#endif
diff --git a/include/linux/mfd/stmfx.h b/include/linux/mfd/stmfx.h
new file mode 100644
index 000000000000..967a2e486800
--- /dev/null
+++ b/include/linux/mfd/stmfx.h
@@ -0,0 +1,122 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2019 STMicroelectronics
+ * Author(s): Amelie Delaunay <amelie.delaunay@st.com>.
+ */
+
+#ifndef MFD_STMFX_H
+#define MFD_STMFX_H
+
+#include <linux/regmap.h>
+
+/* General */
+#define STMFX_REG_CHIP_ID 0x00 /* R */
+#define STMFX_REG_FW_VERSION_MSB 0x01 /* R */
+#define STMFX_REG_FW_VERSION_LSB 0x02 /* R */
+#define STMFX_REG_SYS_CTRL 0x40 /* RW */
+/* IRQ output management */
+#define STMFX_REG_IRQ_OUT_PIN 0x41 /* RW */
+#define STMFX_REG_IRQ_SRC_EN 0x42 /* RW */
+#define STMFX_REG_IRQ_PENDING 0x08 /* R */
+#define STMFX_REG_IRQ_ACK 0x44 /* RW */
+/* GPIO management */
+#define STMFX_REG_IRQ_GPI_PENDING1 0x0C /* R */
+#define STMFX_REG_IRQ_GPI_PENDING2 0x0D /* R */
+#define STMFX_REG_IRQ_GPI_PENDING3 0x0E /* R */
+#define STMFX_REG_GPIO_STATE1 0x10 /* R */
+#define STMFX_REG_GPIO_STATE2 0x11 /* R */
+#define STMFX_REG_GPIO_STATE3 0x12 /* R */
+#define STMFX_REG_IRQ_GPI_SRC1 0x48 /* RW */
+#define STMFX_REG_IRQ_GPI_SRC2 0x49 /* RW */
+#define STMFX_REG_IRQ_GPI_SRC3 0x4A /* RW */
+#define STMFX_REG_IRQ_GPI_EVT1 0x4C /* RW */
+#define STMFX_REG_IRQ_GPI_EVT2 0x4D /* RW */
+#define STMFX_REG_IRQ_GPI_EVT3 0x4E /* RW */
+#define STMFX_REG_IRQ_GPI_TYPE1 0x50 /* RW */
+#define STMFX_REG_IRQ_GPI_TYPE2 0x51 /* RW */
+#define STMFX_REG_IRQ_GPI_TYPE3 0x52 /* RW */
+#define STMFX_REG_IRQ_GPI_ACK1 0x54 /* RW */
+#define STMFX_REG_IRQ_GPI_ACK2 0x55 /* RW */
+#define STMFX_REG_IRQ_GPI_ACK3 0x56 /* RW */
+#define STMFX_REG_GPIO_DIR1 0x60 /* RW */
+#define STMFX_REG_GPIO_DIR2 0x61 /* RW */
+#define STMFX_REG_GPIO_DIR3 0x62 /* RW */
+#define STMFX_REG_GPIO_TYPE1 0x64 /* RW */
+#define STMFX_REG_GPIO_TYPE2 0x65 /* RW */
+#define STMFX_REG_GPIO_TYPE3 0x66 /* RW */
+#define STMFX_REG_GPIO_PUPD1 0x68 /* RW */
+#define STMFX_REG_GPIO_PUPD2 0x69 /* RW */
+#define STMFX_REG_GPIO_PUPD3 0x6A /* RW */
+#define STMFX_REG_GPO_SET1 0x6C /* RW */
+#define STMFX_REG_GPO_SET2 0x6D /* RW */
+#define STMFX_REG_GPO_SET3 0x6E /* RW */
+#define STMFX_REG_GPO_CLR1 0x70 /* RW */
+#define STMFX_REG_GPO_CLR2 0x71 /* RW */
+#define STMFX_REG_GPO_CLR3 0x72 /* RW */
+
+#define STMFX_REG_MAX 0xB0
+
+/* MFX boot time is around 10ms, so after reset, we have to wait this delay */
+#define STMFX_BOOT_TIME_MS 10
+
+/* STMFX_REG_CHIP_ID bitfields */
+#define STMFX_REG_CHIP_ID_MASK GENMASK(7, 0)
+
+/* STMFX_REG_SYS_CTRL bitfields */
+#define STMFX_REG_SYS_CTRL_GPIO_EN BIT(0)
+#define STMFX_REG_SYS_CTRL_TS_EN BIT(1)
+#define STMFX_REG_SYS_CTRL_IDD_EN BIT(2)
+#define STMFX_REG_SYS_CTRL_ALTGPIO_EN BIT(3)
+#define STMFX_REG_SYS_CTRL_SWRST BIT(7)
+
+/* STMFX_REG_IRQ_OUT_PIN bitfields */
+#define STMFX_REG_IRQ_OUT_PIN_TYPE BIT(0) /* 0-OD 1-PP */
+#define STMFX_REG_IRQ_OUT_PIN_POL BIT(1) /* 0-active LOW 1-active HIGH */
+
+/* STMFX_REG_IRQ_(SRC_EN/PENDING/ACK) bit shift */
+enum stmfx_irqs {
+ STMFX_REG_IRQ_SRC_EN_GPIO = 0,
+ STMFX_REG_IRQ_SRC_EN_IDD,
+ STMFX_REG_IRQ_SRC_EN_ERROR,
+ STMFX_REG_IRQ_SRC_EN_TS_DET,
+ STMFX_REG_IRQ_SRC_EN_TS_NE,
+ STMFX_REG_IRQ_SRC_EN_TS_TH,
+ STMFX_REG_IRQ_SRC_EN_TS_FULL,
+ STMFX_REG_IRQ_SRC_EN_TS_OVF,
+ STMFX_REG_IRQ_SRC_MAX,
+};
+
+enum stmfx_functions {
+ STMFX_FUNC_GPIO = BIT(0), /* GPIO[15:0] */
+ STMFX_FUNC_ALTGPIO_LOW = BIT(1), /* aGPIO[3:0] */
+ STMFX_FUNC_ALTGPIO_HIGH = BIT(2), /* aGPIO[7:4] */
+ STMFX_FUNC_TS = BIT(3),
+ STMFX_FUNC_IDD = BIT(4),
+};
+
+/**
+ * struct stmfx_ddata - STMFX MFD structure
+ * @device: device reference used for logs
+ * @map: register map
+ * @vdd: STMFX power supply
+ * @irq_domain: IRQ domain
+ * @lock: IRQ bus lock
+ * @irq_src: cache of IRQ_SRC_EN register for bus_lock
+ * @bkp_sysctrl: backup of SYS_CTRL register for suspend/resume
+ * @bkp_irqoutpin: backup of IRQ_OUT_PIN register for suspend/resume
+ */
+struct stmfx {
+ struct device *dev;
+ struct regmap *map;
+ struct regulator *vdd;
+ int irq;
+ struct irq_domain *irq_domain;
+ struct mutex lock; /* IRQ bus lock */
+ u8 irq_src;
+ u8 bkp_sysctrl;
+ u8 bkp_irqoutpin;
+};
+
+int stmfx_function_enable(struct stmfx *stmfx, u32 func);
+int stmfx_function_disable(struct stmfx *stmfx, u32 func);
+#endif
diff --git a/include/linux/mfd/stmpe.h b/include/linux/mfd/stmpe.h
index 4a827af17e59..87e29d561e22 100644
--- a/include/linux/mfd/stmpe.h
+++ b/include/linux/mfd/stmpe.h
@@ -1,7 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) ST-Ericsson SA 2010
*
- * License Terms: GNU General Public License, version 2
* Author: Rabin Vincent <rabin.vincent@stericsson.com> for ST-Ericsson
*/
@@ -10,6 +10,20 @@
#include <linux/mutex.h>
+#define STMPE_SAMPLE_TIME(x) ((x & 0xf) << 4)
+#define STMPE_MOD_12B(x) ((x & 0x1) << 3)
+#define STMPE_REF_SEL(x) ((x & 0x1) << 1)
+#define STMPE_ADC_FREQ(x) (x & 0x3)
+#define STMPE_AVE_CTRL(x) ((x & 0x3) << 6)
+#define STMPE_DET_DELAY(x) ((x & 0x7) << 3)
+#define STMPE_SETTLING(x) (x & 0x7)
+#define STMPE_FRACTION_Z(x) (x & 0x7)
+#define STMPE_I_DRIVE(x) (x & 0x1)
+#define STMPE_OP_MODE(x) ((x & 0x7) << 1)
+
+#define STMPE811_REG_ADC_CTRL1 0x20
+#define STMPE811_REG_ADC_CTRL2 0x21
+
struct device;
struct regulator;
@@ -123,6 +137,12 @@ struct stmpe {
u8 ier[2];
u8 oldier[2];
struct stmpe_platform_data *pdata;
+
+ /* For devices that use an ADC */
+ u8 sample_time;
+ u8 mod_12b;
+ u8 ref_sel;
+ u8 adc_freq;
};
extern int stmpe_reg_write(struct stmpe *stmpe, u8 reg, u8 data);
@@ -136,6 +156,7 @@ extern int stmpe_set_altfunc(struct stmpe *stmpe, u32 pins,
enum stmpe_block block);
extern int stmpe_enable(struct stmpe *stmpe, unsigned int blocks);
extern int stmpe_disable(struct stmpe *stmpe, unsigned int blocks);
+extern int stmpe811_adc_common_init(struct stmpe *stmpe);
#define STMPE_GPIO_NOREQ_811_TOUCH (0xf0)
diff --git a/include/linux/mfd/stpmic1.h b/include/linux/mfd/stpmic1.h
new file mode 100644
index 000000000000..dc00bac24f5a
--- /dev/null
+++ b/include/linux/mfd/stpmic1.h
@@ -0,0 +1,212 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) STMicroelectronics 2018 - All Rights Reserved
+ * Author: Philippe Peurichard <philippe.peurichard@st.com>,
+ * Pascal Paillet <p.paillet@st.com> for STMicroelectronics.
+ */
+
+#ifndef __LINUX_MFD_STPMIC1_H
+#define __LINUX_MFD_STPMIC1_H
+
+#define TURN_ON_SR 0x1
+#define TURN_OFF_SR 0x2
+#define ICC_LDO_TURN_OFF_SR 0x3
+#define ICC_BUCK_TURN_OFF_SR 0x4
+#define RREQ_STATE_SR 0x5
+#define VERSION_SR 0x6
+
+#define MAIN_CR 0x10
+#define PADS_PULL_CR 0x11
+#define BUCKS_PD_CR 0x12
+#define LDO14_PD_CR 0x13
+#define LDO56_VREF_PD_CR 0x14
+#define VBUS_DET_VIN_CR 0x15
+#define PKEY_TURNOFF_CR 0x16
+#define BUCKS_MASK_RANK_CR 0x17
+#define BUCKS_MASK_RESET_CR 0x18
+#define LDOS_MASK_RANK_CR 0x19
+#define LDOS_MASK_RESET_CR 0x1A
+#define WCHDG_CR 0x1B
+#define WCHDG_TIMER_CR 0x1C
+#define BUCKS_ICCTO_CR 0x1D
+#define LDOS_ICCTO_CR 0x1E
+
+#define BUCK1_ACTIVE_CR 0x20
+#define BUCK2_ACTIVE_CR 0x21
+#define BUCK3_ACTIVE_CR 0x22
+#define BUCK4_ACTIVE_CR 0x23
+#define VREF_DDR_ACTIVE_CR 0x24
+#define LDO1_ACTIVE_CR 0x25
+#define LDO2_ACTIVE_CR 0x26
+#define LDO3_ACTIVE_CR 0x27
+#define LDO4_ACTIVE_CR 0x28
+#define LDO5_ACTIVE_CR 0x29
+#define LDO6_ACTIVE_CR 0x2A
+
+#define BUCK1_STDBY_CR 0x30
+#define BUCK2_STDBY_CR 0x31
+#define BUCK3_STDBY_CR 0x32
+#define BUCK4_STDBY_CR 0x33
+#define VREF_DDR_STDBY_CR 0x34
+#define LDO1_STDBY_CR 0x35
+#define LDO2_STDBY_CR 0x36
+#define LDO3_STDBY_CR 0x37
+#define LDO4_STDBY_CR 0x38
+#define LDO5_STDBY_CR 0x39
+#define LDO6_STDBY_CR 0x3A
+
+#define BST_SW_CR 0x40
+
+#define INT_PENDING_R1 0x50
+#define INT_PENDING_R2 0x51
+#define INT_PENDING_R3 0x52
+#define INT_PENDING_R4 0x53
+
+#define INT_DBG_LATCH_R1 0x60
+#define INT_DBG_LATCH_R2 0x61
+#define INT_DBG_LATCH_R3 0x62
+#define INT_DBG_LATCH_R4 0x63
+
+#define INT_CLEAR_R1 0x70
+#define INT_CLEAR_R2 0x71
+#define INT_CLEAR_R3 0x72
+#define INT_CLEAR_R4 0x73
+
+#define INT_MASK_R1 0x80
+#define INT_MASK_R2 0x81
+#define INT_MASK_R3 0x82
+#define INT_MASK_R4 0x83
+
+#define INT_SET_MASK_R1 0x90
+#define INT_SET_MASK_R2 0x91
+#define INT_SET_MASK_R3 0x92
+#define INT_SET_MASK_R4 0x93
+
+#define INT_CLEAR_MASK_R1 0xA0
+#define INT_CLEAR_MASK_R2 0xA1
+#define INT_CLEAR_MASK_R3 0xA2
+#define INT_CLEAR_MASK_R4 0xA3
+
+#define INT_SRC_R1 0xB0
+#define INT_SRC_R2 0xB1
+#define INT_SRC_R3 0xB2
+#define INT_SRC_R4 0xB3
+
+#define PMIC_MAX_REGISTER_ADDRESS INT_SRC_R4
+
+#define STPMIC1_PMIC_NUM_IRQ_REGS 4
+
+#define TURN_OFF_SR_ICC_EVENT 0x08
+
+#define LDO_VOLTAGE_MASK GENMASK(6, 2)
+#define BUCK_VOLTAGE_MASK GENMASK(7, 2)
+#define LDO_BUCK_VOLTAGE_SHIFT 2
+
+#define LDO_ENABLE_MASK BIT(0)
+#define BUCK_ENABLE_MASK BIT(0)
+
+#define BUCK_HPLP_ENABLE_MASK BIT(1)
+#define BUCK_HPLP_SHIFT 1
+
+#define STDBY_ENABLE_MASK BIT(0)
+
+#define BUCKS_PD_CR_REG_MASK GENMASK(7, 0)
+#define BUCK_MASK_RANK_REGISTER_MASK GENMASK(3, 0)
+#define BUCK_MASK_RESET_REGISTER_MASK GENMASK(3, 0)
+#define LDO1234_PULL_DOWN_REGISTER_MASK GENMASK(7, 0)
+#define LDO56_VREF_PD_CR_REG_MASK GENMASK(5, 0)
+#define LDO_MASK_RANK_REGISTER_MASK GENMASK(5, 0)
+#define LDO_MASK_RESET_REGISTER_MASK GENMASK(5, 0)
+
+#define BUCK1_PULL_DOWN_REG BUCKS_PD_CR
+#define BUCK1_PULL_DOWN_MASK BIT(0)
+#define BUCK2_PULL_DOWN_REG BUCKS_PD_CR
+#define BUCK2_PULL_DOWN_MASK BIT(2)
+#define BUCK3_PULL_DOWN_REG BUCKS_PD_CR
+#define BUCK3_PULL_DOWN_MASK BIT(4)
+#define BUCK4_PULL_DOWN_REG BUCKS_PD_CR
+#define BUCK4_PULL_DOWN_MASK BIT(6)
+
+#define LDO1_PULL_DOWN_REG LDO14_PD_CR
+#define LDO1_PULL_DOWN_MASK BIT(0)
+#define LDO2_PULL_DOWN_REG LDO14_PD_CR
+#define LDO2_PULL_DOWN_MASK BIT(2)
+#define LDO3_PULL_DOWN_REG LDO14_PD_CR
+#define LDO3_PULL_DOWN_MASK BIT(4)
+#define LDO4_PULL_DOWN_REG LDO14_PD_CR
+#define LDO4_PULL_DOWN_MASK BIT(6)
+#define LDO5_PULL_DOWN_REG LDO56_VREF_PD_CR
+#define LDO5_PULL_DOWN_MASK BIT(0)
+#define LDO6_PULL_DOWN_REG LDO56_VREF_PD_CR
+#define LDO6_PULL_DOWN_MASK BIT(2)
+#define VREF_DDR_PULL_DOWN_REG LDO56_VREF_PD_CR
+#define VREF_DDR_PULL_DOWN_MASK BIT(4)
+
+#define BUCKS_ICCTO_CR_REG_MASK GENMASK(6, 0)
+#define LDOS_ICCTO_CR_REG_MASK GENMASK(5, 0)
+
+#define LDO_BYPASS_MASK BIT(7)
+
+/* Main PMIC Control Register
+ * MAIN_CR
+ * Address : 0x10
+ */
+#define OCP_OFF_DBG BIT(4)
+#define PWRCTRL_POLARITY_HIGH BIT(3)
+#define PWRCTRL_ENABLE BIT(2)
+#define RESTART_REQUEST_ENABLE BIT(1)
+#define SOFTWARE_SWITCH_OFF BIT(0)
+
+/* Main PMIC PADS Control Register
+ * PADS_PULL_CR
+ * Address : 0x11
+ */
+#define WAKEUP_DETECTOR_DISABLED BIT(4)
+#define PWRCTRL_PD_ACTIVE BIT(3)
+#define PWRCTRL_PU_ACTIVE BIT(2)
+#define WAKEUP_PD_ACTIVE BIT(1)
+#define PONKEY_PU_INACTIVE BIT(0)
+
+/* Main PMIC VINLOW Control Register
+ * VBUS_DET_VIN_CRC DMSC
+ * Address : 0x15
+ */
+#define SWIN_DETECTOR_ENABLED BIT(7)
+#define SWOUT_DETECTOR_ENABLED BIT(6)
+#define VINLOW_ENABLED BIT(0)
+#define VINLOW_CTRL_REG_MASK GENMASK(7, 0)
+
+/* USB Control Register
+ * Address : 0x40
+ */
+#define BOOST_OVP_DISABLED BIT(7)
+#define VBUS_OTG_DETECTION_DISABLED BIT(6)
+#define SW_OUT_DISCHARGE BIT(5)
+#define VBUS_OTG_DISCHARGE BIT(4)
+#define OCP_LIMIT_HIGH BIT(3)
+#define SWIN_SWOUT_ENABLED BIT(2)
+#define USBSW_OTG_SWITCH_ENABLED BIT(1)
+#define BOOST_ENABLED BIT(0)
+
+/* PKEY_TURNOFF_CR
+ * Address : 0x16
+ */
+#define PONKEY_PWR_OFF BIT(7)
+#define PONKEY_CC_FLAG_CLEAR BIT(6)
+#define PONKEY_TURNOFF_TIMER_MASK GENMASK(3, 0)
+#define PONKEY_TURNOFF_MASK GENMASK(7, 0)
+
+/*
+ * struct stpmic1 - stpmic1 master device for sub-drivers
+ * @dev: master device of the chip (can be used to access platform data)
+ * @irq: main IRQ number
+ * @regmap_irq_chip_data: irq chip data
+ */
+struct stpmic1 {
+ struct device *dev;
+ struct regmap *regmap;
+ int irq;
+ struct regmap_irq_chip_data *irq_data;
+};
+
+#endif /* __LINUX_MFD_STPMIC1_H */
diff --git a/include/linux/mfd/stw481x.h b/include/linux/mfd/stw481x.h
index 833074b766bd..5312804666b3 100644
--- a/include/linux/mfd/stw481x.h
+++ b/include/linux/mfd/stw481x.h
@@ -1,10 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2011 ST-Ericsson SA
* Written on behalf of Linaro for ST-Ericsson
*
* Author: Linus Walleij <linus.walleij@linaro.org>
- *
- * License terms: GNU General Public License (GPL) version 2
*/
#ifndef MFD_STW481X_H
#define MFD_STW481X_H
diff --git a/include/linux/mfd/sun4i-gpadc.h b/include/linux/mfd/sun4i-gpadc.h
index 139872c2e0fe..021f820f9d52 100644
--- a/include/linux/mfd/sun4i-gpadc.h
+++ b/include/linux/mfd/sun4i-gpadc.h
@@ -1,10 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/* Header of ADC MFD core driver for sunxi platforms
*
* Copyright (c) 2016 Quentin Schulz <quentin.schulz@free-electrons.com>
- *
- * This program is free software; you can redistribute it and/or modify it under
- * the terms of the GNU General Public License version 2 as published by the
- * Free Software Foundation.
*/
#ifndef __SUN4I_GPADC__H__
@@ -84,8 +81,8 @@
#define SUN4I_GPADC_TEMP_DATA 0x20
#define SUN4I_GPADC_DATA 0x24
-#define SUN4I_GPADC_IRQ_FIFO_DATA 0
-#define SUN4I_GPADC_IRQ_TEMP_DATA 1
+#define SUN4I_GPADC_IRQ_FIFO_DATA 1
+#define SUN4I_GPADC_IRQ_TEMP_DATA 2
/* 10s delay before suspending the IP */
#define SUN4I_GPADC_AUTOSUSPEND_DELAY 10000
diff --git a/include/linux/mfd/sy7636a.h b/include/linux/mfd/sy7636a.h
new file mode 100644
index 000000000000..22f03b2f851e
--- /dev/null
+++ b/include/linux/mfd/sy7636a.h
@@ -0,0 +1,34 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Functions to access SY3686A power management chip.
+ *
+ * Copyright (C) 2021 reMarkable AS - http://www.remarkable.com/
+ */
+
+#ifndef __MFD_SY7636A_H
+#define __MFD_SY7636A_H
+
+#define SY7636A_REG_OPERATION_MODE_CRL 0x00
+/* It is set if a gpio is used to control the regulator */
+#define SY7636A_OPERATION_MODE_CRL_VCOMCTL BIT(6)
+#define SY7636A_OPERATION_MODE_CRL_ONOFF BIT(7)
+#define SY7636A_REG_VCOM_ADJUST_CTRL_L 0x01
+#define SY7636A_REG_VCOM_ADJUST_CTRL_H 0x02
+#define SY7636A_REG_VCOM_ADJUST_CTRL_MASK 0x01ff
+#define SY7636A_REG_VLDO_VOLTAGE_ADJULST_CTRL 0x03
+#define SY7636A_REG_POWER_ON_DELAY_TIME 0x06
+#define SY7636A_REG_FAULT_FLAG 0x07
+#define SY7636A_FAULT_FLAG_PG BIT(0)
+#define SY7636A_REG_TERMISTOR_READOUT 0x08
+
+#define SY7636A_REG_MAX 0x08
+
+#define VCOM_ADJUST_CTRL_MASK 0x1ff
+// Used to shift the high byte
+#define VCOM_ADJUST_CTRL_SHIFT 8
+// Used to scale from VCOM_ADJUST_CTRL to mv
+#define VCOM_ADJUST_CTRL_SCAL 10000
+
+#define FAULT_FLAG_SHIFT 1
+
+#endif /* __LINUX_MFD_SY7636A_H */
diff --git a/include/linux/mfd/syscon.h b/include/linux/mfd/syscon.h
index 40a76b97b7ab..aad9c6b50463 100644
--- a/include/linux/mfd/syscon.h
+++ b/include/linux/mfd/syscon.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* System Control Driver
*
@@ -5,11 +6,6 @@
* Copyright (C) 2012 Linaro Ltd.
*
* Author: Dong Aisheng <dong.aisheng@linaro.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
*/
#ifndef __LINUX_MFD_SYSCON_H__
@@ -21,24 +17,31 @@
struct device_node;
#ifdef CONFIG_MFD_SYSCON
-extern struct regmap *syscon_node_to_regmap(struct device_node *np);
-extern struct regmap *syscon_regmap_lookup_by_compatible(const char *s);
-extern struct regmap *syscon_regmap_lookup_by_pdevname(const char *s);
-extern struct regmap *syscon_regmap_lookup_by_phandle(
- struct device_node *np,
- const char *property);
+struct regmap *device_node_to_regmap(struct device_node *np);
+struct regmap *syscon_node_to_regmap(struct device_node *np);
+struct regmap *syscon_regmap_lookup_by_compatible(const char *s);
+struct regmap *syscon_regmap_lookup_by_phandle(struct device_node *np,
+ const char *property);
+struct regmap *syscon_regmap_lookup_by_phandle_args(struct device_node *np,
+ const char *property,
+ int arg_count,
+ unsigned int *out_args);
+struct regmap *syscon_regmap_lookup_by_phandle_optional(struct device_node *np,
+ const char *property);
+int of_syscon_register_regmap(struct device_node *np,
+ struct regmap *regmap);
#else
-static inline struct regmap *syscon_node_to_regmap(struct device_node *np)
+static inline struct regmap *device_node_to_regmap(struct device_node *np)
{
return ERR_PTR(-ENOTSUPP);
}
-static inline struct regmap *syscon_regmap_lookup_by_compatible(const char *s)
+static inline struct regmap *syscon_node_to_regmap(struct device_node *np)
{
return ERR_PTR(-ENOTSUPP);
}
-static inline struct regmap *syscon_regmap_lookup_by_pdevname(const char *s)
+static inline struct regmap *syscon_regmap_lookup_by_compatible(const char *s)
{
return ERR_PTR(-ENOTSUPP);
}
@@ -49,6 +52,29 @@ static inline struct regmap *syscon_regmap_lookup_by_phandle(
{
return ERR_PTR(-ENOTSUPP);
}
+
+static inline struct regmap *syscon_regmap_lookup_by_phandle_args(
+ struct device_node *np,
+ const char *property,
+ int arg_count,
+ unsigned int *out_args)
+{
+ return ERR_PTR(-ENOTSUPP);
+}
+
+static inline struct regmap *syscon_regmap_lookup_by_phandle_optional(
+ struct device_node *np,
+ const char *property)
+{
+ return NULL;
+}
+
+static inline int of_syscon_register_regmap(struct device_node *np,
+ struct regmap *regmap)
+{
+ return -EOPNOTSUPP;
+}
+
#endif
#endif /* __LINUX_MFD_SYSCON_H__ */
diff --git a/include/linux/mfd/syscon/atmel-matrix.h b/include/linux/mfd/syscon/atmel-matrix.h
index 8293c3e2a82a..20c25665216a 100644
--- a/include/linux/mfd/syscon/atmel-matrix.h
+++ b/include/linux/mfd/syscon/atmel-matrix.h
@@ -1,12 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* Copyright (C) 2014 Atmel Corporation.
*
* Memory Controllers (MATRIX, EBI) - System peripherals registers.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
*/
#ifndef _LINUX_MFD_SYSCON_ATMEL_MATRIX_H
@@ -110,7 +106,6 @@
#define AT91_MATRIX_DDR_IOSR BIT(18)
#define AT91_MATRIX_NFD0_SELECT BIT(24)
#define AT91_MATRIX_DDR_MP_EN BIT(25)
-#define AT91_MATRIX_EBI_NUM_CS 8
#define AT91_MATRIX_USBPUCR_PUON BIT(30)
diff --git a/include/linux/mfd/syscon/atmel-mc.h b/include/linux/mfd/syscon/atmel-mc.h
index afd9b8f1e363..99c56205c410 100644
--- a/include/linux/mfd/syscon/atmel-mc.h
+++ b/include/linux/mfd/syscon/atmel-mc.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* Copyright (C) 2005 Ivan Kokshaysky
* Copyright (C) SAN People
@@ -5,11 +6,6 @@
* Memory Controllers (MC, EBI, SMC, SDRAMC, BFC) - System peripherals
* registers.
* Based on AT91RM9200 datasheet revision E.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
*/
#ifndef _LINUX_MFD_SYSCON_ATMEL_MC_H_
diff --git a/include/linux/mfd/syscon/atmel-smc.h b/include/linux/mfd/syscon/atmel-smc.h
index afa266169800..9b9119c742a2 100644
--- a/include/linux/mfd/syscon/atmel-smc.h
+++ b/include/linux/mfd/syscon/atmel-smc.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Atmel SMC (Static Memory Controller) register offsets and bit definitions.
*
@@ -5,31 +6,34 @@
* Copyright (C) 2014 Free Electrons
*
* Author: Boris Brezillon <boris.brezillon@free-electrons.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef _LINUX_MFD_SYSCON_ATMEL_SMC_H_
#define _LINUX_MFD_SYSCON_ATMEL_SMC_H_
-#include <linux/kernel.h>
-#include <linux/regmap.h>
+#include <linux/bits.h>
+#include <linux/types.h>
+
+struct device_node;
+struct regmap;
#define ATMEL_SMC_SETUP(cs) (((cs) * 0x10))
-#define ATMEL_HSMC_SETUP(cs) (0x600 + ((cs) * 0x14))
+#define ATMEL_HSMC_SETUP(layout, cs) \
+ ((layout)->timing_regs_offset + ((cs) * 0x14))
#define ATMEL_SMC_PULSE(cs) (((cs) * 0x10) + 0x4)
-#define ATMEL_HSMC_PULSE(cs) (0x600 + ((cs) * 0x14) + 0x4)
+#define ATMEL_HSMC_PULSE(layout, cs) \
+ ((layout)->timing_regs_offset + ((cs) * 0x14) + 0x4)
#define ATMEL_SMC_CYCLE(cs) (((cs) * 0x10) + 0x8)
-#define ATMEL_HSMC_CYCLE(cs) (0x600 + ((cs) * 0x14) + 0x8)
+#define ATMEL_HSMC_CYCLE(layout, cs) \
+ ((layout)->timing_regs_offset + ((cs) * 0x14) + 0x8)
#define ATMEL_SMC_NWE_SHIFT 0
#define ATMEL_SMC_NCS_WR_SHIFT 8
#define ATMEL_SMC_NRD_SHIFT 16
#define ATMEL_SMC_NCS_RD_SHIFT 24
#define ATMEL_SMC_MODE(cs) (((cs) * 0x10) + 0xc)
-#define ATMEL_HSMC_MODE(cs) (0x600 + ((cs) * 0x14) + 0x10)
+#define ATMEL_HSMC_MODE(layout, cs) \
+ ((layout)->timing_regs_offset + ((cs) * 0x14) + 0x10)
#define ATMEL_SMC_MODE_READMODE_MASK BIT(0)
#define ATMEL_SMC_MODE_READMODE_NCS (0 << 0)
#define ATMEL_SMC_MODE_READMODE_NRD (1 << 0)
@@ -59,7 +63,8 @@
#define ATMEL_SMC_MODE_PS_16 (2 << 28)
#define ATMEL_SMC_MODE_PS_32 (3 << 28)
-#define ATMEL_HSMC_TIMINGS(cs) (0x600 + ((cs) * 0x14) + 0xc)
+#define ATMEL_HSMC_TIMINGS(layout, cs) \
+ ((layout)->timing_regs_offset + ((cs) * 0x14) + 0xc)
#define ATMEL_HSMC_TIMINGS_OCMS BIT(12)
#define ATMEL_HSMC_TIMINGS_RBNSEL(x) ((x) << 28)
#define ATMEL_HSMC_TIMINGS_NFSEL BIT(31)
@@ -69,6 +74,10 @@
#define ATMEL_HSMC_TIMINGS_TRR_SHIFT 16
#define ATMEL_HSMC_TIMINGS_TWB_SHIFT 24
+struct atmel_hsmc_reg_layout {
+ unsigned int timing_regs_offset;
+};
+
/**
* struct atmel_smc_cs_conf - SMC CS config as described in the datasheet.
* @setup: NCS/NWE/NRD setup timings (not applicable to at91rm9200)
@@ -98,11 +107,15 @@ int atmel_smc_cs_conf_set_cycle(struct atmel_smc_cs_conf *conf,
unsigned int shift, unsigned int ncycles);
void atmel_smc_cs_conf_apply(struct regmap *regmap, int cs,
const struct atmel_smc_cs_conf *conf);
-void atmel_hsmc_cs_conf_apply(struct regmap *regmap, int cs,
- const struct atmel_smc_cs_conf *conf);
+void atmel_hsmc_cs_conf_apply(struct regmap *regmap,
+ const struct atmel_hsmc_reg_layout *reglayout,
+ int cs, const struct atmel_smc_cs_conf *conf);
void atmel_smc_cs_conf_get(struct regmap *regmap, int cs,
struct atmel_smc_cs_conf *conf);
-void atmel_hsmc_cs_conf_get(struct regmap *regmap, int cs,
- struct atmel_smc_cs_conf *conf);
+void atmel_hsmc_cs_conf_get(struct regmap *regmap,
+ const struct atmel_hsmc_reg_layout *reglayout,
+ int cs, struct atmel_smc_cs_conf *conf);
+const struct atmel_hsmc_reg_layout *
+atmel_hsmc_get_reg_layout(struct device_node *np);
#endif /* _LINUX_MFD_SYSCON_ATMEL_SMC_H_ */
diff --git a/include/linux/mfd/syscon/atmel-st.h b/include/linux/mfd/syscon/atmel-st.h
index 8acf1ec1fa32..5b6013d0c440 100644
--- a/include/linux/mfd/syscon/atmel-st.h
+++ b/include/linux/mfd/syscon/atmel-st.h
@@ -1,14 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* Copyright (C) 2005 Ivan Kokshaysky
* Copyright (C) SAN People
*
* System Timer (ST) - System peripherals registers.
* Based on AT91RM9200 datasheet revision E.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
*/
#ifndef _LINUX_MFD_SYSCON_ATMEL_ST_H
diff --git a/include/linux/mfd/syscon/clps711x.h b/include/linux/mfd/syscon/clps711x.h
index 26355abae515..4c12850dec89 100644
--- a/include/linux/mfd/syscon/clps711x.h
+++ b/include/linux/mfd/syscon/clps711x.h
@@ -1,12 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* CLPS711X system register bits definitions
*
* Copyright (C) 2013 Alexander Shiyan <shc_work@mail.ru>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
*/
#ifndef _LINUX_MFD_SYSCON_CLPS711X_H_
diff --git a/include/linux/mfd/syscon/exynos4-pmu.h b/include/linux/mfd/syscon/exynos4-pmu.h
deleted file mode 100644
index 278b1b1549e9..000000000000
--- a/include/linux/mfd/syscon/exynos4-pmu.h
+++ /dev/null
@@ -1,21 +0,0 @@
-/*
- * Copyright (C) 2015 Samsung Electronics Co., Ltd.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#ifndef _LINUX_MFD_SYSCON_PMU_EXYNOS4_H_
-#define _LINUX_MFD_SYSCON_PMU_EXYNOS4_H_
-
-/* Exynos4 PMU register definitions */
-
-/* MIPI_PHYn_CONTROL register offset: n = 0..1 */
-#define EXYNOS4_MIPI_PHY_CONTROL(n) (0x710 + (n) * 4)
-#define EXYNOS4_MIPI_PHY_ENABLE (1 << 0)
-#define EXYNOS4_MIPI_PHY_SRESETN (1 << 1)
-#define EXYNOS4_MIPI_PHY_MRESETN (1 << 2)
-#define EXYNOS4_MIPI_PHY_RESET_MASK (3 << 1)
-
-#endif /* _LINUX_MFD_SYSCON_PMU_EXYNOS4_H_ */
diff --git a/include/linux/mfd/syscon/exynos5-pmu.h b/include/linux/mfd/syscon/exynos5-pmu.h
deleted file mode 100644
index b4942a32b81d..000000000000
--- a/include/linux/mfd/syscon/exynos5-pmu.h
+++ /dev/null
@@ -1,19 +0,0 @@
-/*
- * Exynos5 SoC series Power Management Unit (PMU) register offsets
- * and bit definitions.
- *
- * Copyright (C) 2014 Samsung Electronics Co., Ltd.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#ifndef _LINUX_MFD_SYSCON_PMU_EXYNOS5_H_
-#define _LINUX_MFD_SYSCON_PMU_EXYNOS5_H_
-
-#define EXYNOS5_PHY_ENABLE BIT(0)
-#define EXYNOS5_MIPI_PHY_S_RESETN BIT(1)
-#define EXYNOS5_MIPI_PHY_M_RESETN BIT(2)
-
-#endif /* _LINUX_MFD_SYSCON_PMU_EXYNOS5_H_ */
diff --git a/include/linux/mfd/syscon/imx6q-iomuxc-gpr.h b/include/linux/mfd/syscon/imx6q-iomuxc-gpr.h
index c8e0164c5423..09c6b3184bb0 100644
--- a/include/linux/mfd/syscon/imx6q-iomuxc-gpr.h
+++ b/include/linux/mfd/syscon/imx6q-iomuxc-gpr.h
@@ -1,9 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2012 Freescale Semiconductor, Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef __LINUX_IMX6Q_IOMUXC_GPR_H
@@ -243,6 +240,8 @@
#define IMX6Q_GPR4_IPU_RD_CACHE_CTL BIT(0)
#define IMX6Q_GPR5_L2_CLK_STOP BIT(8)
+#define IMX6Q_GPR5_SATA_SW_PD BIT(10)
+#define IMX6Q_GPR5_SATA_SW_RST BIT(11)
#define IMX6Q_GPR6_IPU1_ID00_WR_QOS_MASK (0xf << 0)
#define IMX6Q_GPR6_IPU1_ID01_WR_QOS_MASK (0xf << 4)
@@ -408,6 +407,15 @@
#define IMX6SX_GPR1_FEC_CLOCK_PAD_DIR_MASK (0x3 << 17)
#define IMX6SX_GPR1_FEC_CLOCK_MUX_SEL_EXT (0x3 << 13)
+#define IMX6SX_GPR2_MQS_OVERSAMPLE_MASK (0x1 << 26)
+#define IMX6SX_GPR2_MQS_OVERSAMPLE_SHIFT (26)
+#define IMX6SX_GPR2_MQS_EN_MASK (0x1 << 25)
+#define IMX6SX_GPR2_MQS_EN_SHIFT (25)
+#define IMX6SX_GPR2_MQS_SW_RST_MASK (0x1 << 24)
+#define IMX6SX_GPR2_MQS_SW_RST_SHIFT (24)
+#define IMX6SX_GPR2_MQS_CLK_DIV_MASK (0xFF << 16)
+#define IMX6SX_GPR2_MQS_CLK_DIV_SHIFT (16)
+
#define IMX6SX_GPR4_FEC_ENET1_STOP_REQ (0x1 << 3)
#define IMX6SX_GPR4_FEC_ENET2_STOP_REQ (0x1 << 4)
@@ -438,12 +446,15 @@
#define IMX6SX_GPR5_DISP_MUX_DCIC1_MASK (0x1 << 1)
#define IMX6SX_GPR12_PCIE_TEST_POWERDOWN BIT(30)
+#define IMX6SX_GPR12_PCIE_PM_TURN_OFF BIT(16)
#define IMX6SX_GPR12_PCIE_RX_EQ_MASK (0x7 << 0)
#define IMX6SX_GPR12_PCIE_RX_EQ_2 (0x2 << 0)
/* For imx6ul iomux gpr register field define */
-#define IMX6UL_GPR1_ENET1_CLK_DIR (0x1 << 17)
-#define IMX6UL_GPR1_ENET2_CLK_DIR (0x1 << 18)
+#define IMX6UL_GPR1_ENET2_TX_CLK_DIR BIT(18)
+#define IMX6UL_GPR1_ENET1_TX_CLK_DIR BIT(17)
+#define IMX6UL_GPR1_ENET2_CLK_SEL BIT(14)
+#define IMX6UL_GPR1_ENET1_CLK_SEL BIT(13)
#define IMX6UL_GPR1_ENET1_CLK_OUTPUT (0x1 << 17)
#define IMX6UL_GPR1_ENET2_CLK_OUTPUT (0x1 << 18)
#define IMX6UL_GPR1_ENET_CLK_DIR (0x3 << 17)
@@ -455,4 +466,7 @@
#define MCLK_DIR(x) (x == 1 ? IMX6UL_GPR1_SAI1_MCLK_DIR : x == 2 ? \
IMX6UL_GPR1_SAI2_MCLK_DIR : IMX6UL_GPR1_SAI3_MCLK_DIR)
+/* For imx6sll iomux gpr register field define */
+#define IMX6SLL_GPR5_AFCG_X_BYPASS_MASK (0x1f << 11)
+
#endif /* __LINUX_IMX6Q_IOMUXC_GPR_H */
diff --git a/include/linux/mfd/syscon/imx7-iomuxc-gpr.h b/include/linux/mfd/syscon/imx7-iomuxc-gpr.h
index abbd52466573..3d46907bab89 100644
--- a/include/linux/mfd/syscon/imx7-iomuxc-gpr.h
+++ b/include/linux/mfd/syscon/imx7-iomuxc-gpr.h
@@ -1,9 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2015 Freescale Semiconductor, Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef __LINUX_IMX7_IOMUXC_GPR_H
diff --git a/include/linux/mfd/syscon/xlnx-vcu.h b/include/linux/mfd/syscon/xlnx-vcu.h
new file mode 100644
index 000000000000..ff7bc3656f6e
--- /dev/null
+++ b/include/linux/mfd/syscon/xlnx-vcu.h
@@ -0,0 +1,39 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2020 Pengutronix, Michael Tretter <kernel@pengutronix.de>
+ */
+
+#ifndef __XLNX_VCU_H
+#define __XLNX_VCU_H
+
+#define VCU_ECODER_ENABLE 0x00
+#define VCU_DECODER_ENABLE 0x04
+#define VCU_MEMORY_DEPTH 0x08
+#define VCU_ENC_COLOR_DEPTH 0x0c
+#define VCU_ENC_VERTICAL_RANGE 0x10
+#define VCU_ENC_FRAME_SIZE_X 0x14
+#define VCU_ENC_FRAME_SIZE_Y 0x18
+#define VCU_ENC_COLOR_FORMAT 0x1c
+#define VCU_ENC_FPS 0x20
+#define VCU_MCU_CLK 0x24
+#define VCU_CORE_CLK 0x28
+#define VCU_PLL_BYPASS 0x2c
+#define VCU_ENC_CLK 0x30
+#define VCU_PLL_CLK 0x34
+#define VCU_ENC_VIDEO_STANDARD 0x38
+#define VCU_STATUS 0x3c
+#define VCU_AXI_ENC_CLK 0x40
+#define VCU_AXI_DEC_CLK 0x44
+#define VCU_AXI_MCU_CLK 0x48
+#define VCU_DEC_VIDEO_STANDARD 0x4c
+#define VCU_DEC_FRAME_SIZE_X 0x50
+#define VCU_DEC_FRAME_SIZE_Y 0x54
+#define VCU_DEC_FPS 0x58
+#define VCU_BUFFER_B_FRAME 0x5c
+#define VCU_WPP_EN 0x60
+#define VCU_PLL_CLK_DEC 0x64
+#define VCU_NUM_CORE 0x6c
+#define VCU_GASKET_INIT 0x74
+#define VCU_GASKET_VALUE 0x03
+
+#endif /* __XLNX_VCU_H */
diff --git a/include/linux/mfd/t7l66xb.h b/include/linux/mfd/t7l66xb.h
deleted file mode 100644
index b4629818aea5..000000000000
--- a/include/linux/mfd/t7l66xb.h
+++ /dev/null
@@ -1,34 +0,0 @@
-/*
- * This file contains the definitions for the T7L66XB
- *
- * (C) Copyright 2005 Ian Molton <spyro@f2s.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- */
-#ifndef MFD_T7L66XB_H
-#define MFD_T7L66XB_H
-
-#include <linux/mfd/core.h>
-#include <linux/mfd/tmio.h>
-
-struct t7l66xb_platform_data {
- int (*enable)(struct platform_device *dev);
- int (*disable)(struct platform_device *dev);
- int (*suspend)(struct platform_device *dev);
- int (*resume)(struct platform_device *dev);
-
- int irq_base; /* The base for subdevice irqs */
-
- struct tmio_nand_data *nand_data;
-};
-
-
-#define IRQ_T7L66XB_MMC (1)
-#define IRQ_T7L66XB_NAND (3)
-
-#define T7L66XB_NR_IRQS 8
-
-#endif
diff --git a/include/linux/mfd/tc3589x.h b/include/linux/mfd/tc3589x.h
index 468c31a27fcf..b84955410e03 100644
--- a/include/linux/mfd/tc3589x.h
+++ b/include/linux/mfd/tc3589x.h
@@ -1,7 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) ST-Ericsson SA 2010
- *
- * License Terms: GNU General Public License, version 2
*/
#ifndef __LINUX_MFD_TC3589x_H
@@ -20,6 +19,9 @@ enum tx3589x_block {
#define TC3589x_RSTCTRL_KBDRST (1 << 1)
#define TC3589x_RSTCTRL_GPIRST (1 << 0)
+#define TC3589x_DKBDMSK_ELINT (1 << 1)
+#define TC3589x_DKBDMSK_EINT (1 << 0)
+
/* Keyboard Configuration Registers */
#define TC3589x_KBDSETTLE_REG 0x01
#define TC3589x_KBDBOUNCE 0x02
@@ -102,6 +104,9 @@ enum tx3589x_block {
#define TC3589x_GPIOODM2 0xE4
#define TC3589x_GPIOODE2 0xE5
+#define TC3589x_DIRECT0 0xEC
+#define TC3589x_DKBDMSK 0xF3
+
#define TC3589x_INT_GPIIRQ 0
#define TC3589x_INT_TI0IRQ 1
#define TC3589x_INT_TI1IRQ 2
diff --git a/include/linux/mfd/tc6387xb.h b/include/linux/mfd/tc6387xb.h
deleted file mode 100644
index b4888209494a..000000000000
--- a/include/linux/mfd/tc6387xb.h
+++ /dev/null
@@ -1,20 +0,0 @@
-/*
- * This file contains the definitions for the TC6387XB
- *
- * (C) Copyright 2005 Ian Molton <spyro@f2s.com>
- *
- * May be copied or modified under the terms of the GNU General Public
- * License. See linux/COPYING for more information.
- *
- */
-#ifndef MFD_TC6387XB_H
-#define MFD_TC6387XB_H
-
-struct tc6387xb_platform_data {
- int (*enable)(struct platform_device *dev);
- int (*disable)(struct platform_device *dev);
- int (*suspend)(struct platform_device *dev);
- int (*resume)(struct platform_device *dev);
-};
-
-#endif
diff --git a/include/linux/mfd/tc6393xb.h b/include/linux/mfd/tc6393xb.h
deleted file mode 100644
index 626e448205c5..000000000000
--- a/include/linux/mfd/tc6393xb.h
+++ /dev/null
@@ -1,59 +0,0 @@
-/*
- * Toshiba TC6393XB SoC support
- *
- * Copyright(c) 2005-2006 Chris Humbert
- * Copyright(c) 2005 Dirk Opfer
- * Copyright(c) 2005 Ian Molton <spyro@f2s.com>
- * Copyright(c) 2007 Dmitry Baryshkov
- *
- * Based on code written by Sharp/Lineo for 2.4 kernels
- * Based on locomo.c
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#ifndef MFD_TC6393XB_H
-#define MFD_TC6393XB_H
-
-#include <linux/fb.h>
-
-/* Also one should provide the CK3P6MI clock */
-struct tc6393xb_platform_data {
- u16 scr_pll2cr; /* PLL2 Control */
- u16 scr_gper; /* GP Enable */
-
- int (*enable)(struct platform_device *dev);
- int (*disable)(struct platform_device *dev);
- int (*suspend)(struct platform_device *dev);
- int (*resume)(struct platform_device *dev);
-
- int irq_base; /* base for subdevice irqs */
- int gpio_base;
- int (*setup)(struct platform_device *dev);
- void (*teardown)(struct platform_device *dev);
-
- struct tmio_nand_data *nand_data;
- struct tmio_fb_data *fb_data;
-
- unsigned resume_restore : 1; /* make special actions
- to preserve the state
- on suspend/resume */
-};
-
-extern int tc6393xb_lcd_mode(struct platform_device *fb,
- const struct fb_videomode *mode);
-extern int tc6393xb_lcd_set_power(struct platform_device *fb, bool on);
-
-/*
- * Relative to irq_base
- */
-#define IRQ_TC6393_NAND 0
-#define IRQ_TC6393_MMC 1
-#define IRQ_TC6393_OHCI 2
-#define IRQ_TC6393_FB 4
-
-#define TC6393XB_NR_IRQS 8
-
-#endif
diff --git a/include/linux/mfd/ti-lmu-register.h b/include/linux/mfd/ti-lmu-register.h
index 2125c7c02818..116a749e0302 100644
--- a/include/linux/mfd/ti-lmu-register.h
+++ b/include/linux/mfd/ti-lmu-register.h
@@ -1,13 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* TI LMU (Lighting Management Unit) Device Register Map
*
* Copyright 2017 Texas Instruments
*
* Author: Milo Kim <milo.kim@ti.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef __MFD_TI_LMU_REGISTER_H__
@@ -15,50 +12,6 @@
#include <linux/bitops.h>
-/* LM3532 */
-#define LM3532_REG_OUTPUT_CFG 0x10
-#define LM3532_ILED1_CFG_MASK 0x03
-#define LM3532_ILED2_CFG_MASK 0x0C
-#define LM3532_ILED3_CFG_MASK 0x30
-#define LM3532_ILED1_CFG_SHIFT 0
-#define LM3532_ILED2_CFG_SHIFT 2
-#define LM3532_ILED3_CFG_SHIFT 4
-
-#define LM3532_REG_RAMPUP 0x12
-#define LM3532_REG_RAMPDN LM3532_REG_RAMPUP
-#define LM3532_RAMPUP_MASK 0x07
-#define LM3532_RAMPUP_SHIFT 0
-#define LM3532_RAMPDN_MASK 0x38
-#define LM3532_RAMPDN_SHIFT 3
-
-#define LM3532_REG_ENABLE 0x1D
-
-#define LM3532_REG_PWM_A_CFG 0x13
-#define LM3532_PWM_A_MASK 0x05 /* zone 0 */
-#define LM3532_PWM_ZONE_0 BIT(2)
-
-#define LM3532_REG_PWM_B_CFG 0x14
-#define LM3532_PWM_B_MASK 0x09 /* zone 1 */
-#define LM3532_PWM_ZONE_1 BIT(3)
-
-#define LM3532_REG_PWM_C_CFG 0x15
-#define LM3532_PWM_C_MASK 0x11 /* zone 2 */
-#define LM3532_PWM_ZONE_2 BIT(4)
-
-#define LM3532_REG_ZONE_CFG_A 0x16
-#define LM3532_REG_ZONE_CFG_B 0x18
-#define LM3532_REG_ZONE_CFG_C 0x1A
-#define LM3532_ZONE_MASK (BIT(2) | BIT(3) | BIT(4))
-#define LM3532_ZONE_0 0
-#define LM3532_ZONE_1 BIT(2)
-#define LM3532_ZONE_2 BIT(3)
-
-#define LM3532_REG_BRT_A 0x70 /* zone 0 */
-#define LM3532_REG_BRT_B 0x76 /* zone 1 */
-#define LM3532_REG_BRT_C 0x7C /* zone 2 */
-
-#define LM3532_MAX_REG 0x7E
-
/* LM3631 */
#define LM3631_REG_DEVCTRL 0x00
#define LM3631_LCD_EN_MASK BIT(1)
@@ -234,47 +187,26 @@
#define LM3695_MAX_REG 0x14
-/* LM3697 */
-#define LM3697_REG_HVLED_OUTPUT_CFG 0x10
-#define LM3697_HVLED1_CFG_MASK BIT(0)
-#define LM3697_HVLED2_CFG_MASK BIT(1)
-#define LM3697_HVLED3_CFG_MASK BIT(2)
-#define LM3697_HVLED1_CFG_SHIFT 0
-#define LM3697_HVLED2_CFG_SHIFT 1
-#define LM3697_HVLED3_CFG_SHIFT 2
-
-#define LM3697_REG_BL0_RAMP 0x11
-#define LM3697_REG_BL1_RAMP 0x12
-#define LM3697_RAMPUP_MASK 0xF0
-#define LM3697_RAMPUP_SHIFT 4
-#define LM3697_RAMPDN_MASK 0x0F
-#define LM3697_RAMPDN_SHIFT 0
-
-#define LM3697_REG_RAMP_CONF 0x14
-#define LM3697_RAMP_MASK 0x0F
-#define LM3697_RAMP_EACH 0x05
-
-#define LM3697_REG_PWM_CFG 0x1C
-#define LM3697_PWM_A_MASK BIT(0)
-#define LM3697_PWM_B_MASK BIT(1)
-
-#define LM3697_REG_IMAX_A 0x17
-#define LM3697_REG_IMAX_B 0x18
-
-#define LM3697_REG_FEEDBACK_ENABLE 0x19
-
-#define LM3697_REG_BRT_A_LSB 0x20
-#define LM3697_REG_BRT_A_MSB 0x21
-#define LM3697_REG_BRT_B_LSB 0x22
-#define LM3697_REG_BRT_B_MSB 0x23
-
-#define LM3697_REG_ENABLE 0x24
+/* LM36274 */
+#define LM36274_REG_REV 0x01
+#define LM36274_REG_BL_CFG_1 0x02
+#define LM36274_REG_BL_CFG_2 0x03
+#define LM36274_REG_BRT_LSB 0x04
+#define LM36274_REG_BRT_MSB 0x05
+#define LM36274_REG_BL_EN 0x08
-#define LM3697_REG_OPEN_FAULT_STATUS 0xB0
+#define LM36274_REG_BIAS_CONFIG_1 0x09
+#define LM36274_EXT_EN_MASK BIT(0)
+#define LM36274_EN_VNEG_MASK BIT(1)
+#define LM36274_EN_VPOS_MASK BIT(2)
-#define LM3697_REG_SHORT_FAULT_STATUS 0xB2
+#define LM36274_REG_BIAS_CONFIG_2 0x0a
+#define LM36274_REG_BIAS_CONFIG_3 0x0b
+#define LM36274_REG_VOUT_BOOST 0x0c
+#define LM36274_REG_VOUT_POS 0x0d
+#define LM36274_REG_VOUT_NEG 0x0e
+#define LM36274_VOUT_MASK 0x3F
-#define LM3697_REG_MONITOR_ENABLE 0xB4
+#define LM36274_MAX_REG 0x13
-#define LM3697_MAX_REG 0xB4
#endif
diff --git a/include/linux/mfd/ti-lmu.h b/include/linux/mfd/ti-lmu.h
index 09d5f30384e5..0bc0e8199798 100644
--- a/include/linux/mfd/ti-lmu.h
+++ b/include/linux/mfd/ti-lmu.h
@@ -1,13 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* TI LMU (Lighting Management Unit) Devices
*
* Copyright 2017 Texas Instruments
*
* Author: Milo Kim <milo.kim@ti.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef __MFD_TI_LMU_H__
@@ -16,17 +13,17 @@
#include <linux/gpio.h>
#include <linux/notifier.h>
#include <linux/regmap.h>
+#include <linux/gpio/consumer.h>
/* Notifier event */
#define LMU_EVENT_MONITOR_DONE 0x01
enum ti_lmu_id {
- LM3532,
LM3631,
LM3632,
LM3633,
LM3695,
- LM3697,
+ LM36274,
LMU_MAX_ID,
};
@@ -68,6 +65,9 @@ enum lm363x_regulator_id {
LM3632_BOOST, /* Boost output */
LM3632_LDO_POS, /* Positive display bias output */
LM3632_LDO_NEG, /* Negative display bias output */
+ LM36274_BOOST, /* Boost output */
+ LM36274_LDO_POS, /* Positive display bias output */
+ LM36274_LDO_NEG, /* Negative display bias output */
};
/**
@@ -81,7 +81,7 @@ enum lm363x_regulator_id {
struct ti_lmu {
struct device *dev;
struct regmap *regmap;
- int en_gpio;
+ struct gpio_desc *en_gpio;
struct blocking_notifier_head notifier;
};
#endif
diff --git a/include/linux/mfd/ti_am335x_tscadc.h b/include/linux/mfd/ti_am335x_tscadc.h
index b9a53e013bff..4063b0614d90 100644
--- a/include/linux/mfd/ti_am335x_tscadc.h
+++ b/include/linux/mfd/ti_am335x_tscadc.h
@@ -1,22 +1,16 @@
-#ifndef __LINUX_TI_AM335X_TSCADC_MFD_H
-#define __LINUX_TI_AM335X_TSCADC_MFD_H
-
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* TI Touch Screen / ADC MFD driver
*
- * Copyright (C) 2012 Texas Instruments Incorporated - http://www.ti.com/
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation version 2.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether express or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
+ * Copyright (C) 2012 Texas Instruments Incorporated - https://www.ti.com/
*/
+#ifndef __LINUX_TI_AM335X_TSCADC_MFD_H
+#define __LINUX_TI_AM335X_TSCADC_MFD_H
+
+#include <linux/bitfield.h>
#include <linux/mfd/core.h>
+#include <linux/units.h>
#define REG_RAWIRQSTATUS 0x024
#define REG_IRQSTATUS 0x028
@@ -46,13 +40,6 @@
/* IRQ wakeup enable */
#define IRQWKUP_ENB BIT(0)
-/* Step Enable */
-#define STEPENB_MASK (0x1FFFF << 0)
-#define STEPENB(val) ((val) << 0)
-#define ENB(val) (1 << (val))
-#define STPENB_STEPENB STEPENB(0x1FFFF)
-#define STPENB_STEPENB_TC STEPENB(0x1FFF)
-
/* IRQ enable */
#define IRQENB_HW_PEN BIT(0)
#define IRQENB_EOS BIT(1)
@@ -65,12 +52,10 @@
#define IRQENB_PENUP BIT(9)
/* Step Configuration */
-#define STEPCONFIG_MODE_MASK (3 << 0)
-#define STEPCONFIG_MODE(val) ((val) << 0)
+#define STEPCONFIG_MODE(val) FIELD_PREP(GENMASK(1, 0), (val))
#define STEPCONFIG_MODE_SWCNT STEPCONFIG_MODE(1)
#define STEPCONFIG_MODE_HWSYNC STEPCONFIG_MODE(2)
-#define STEPCONFIG_AVG_MASK (7 << 2)
-#define STEPCONFIG_AVG(val) ((val) << 2)
+#define STEPCONFIG_AVG(val) FIELD_PREP(GENMASK(4, 2), (val))
#define STEPCONFIG_AVG_16 STEPCONFIG_AVG(4)
#define STEPCONFIG_XPP BIT(5)
#define STEPCONFIG_XNN BIT(6)
@@ -78,66 +63,67 @@
#define STEPCONFIG_YNN BIT(8)
#define STEPCONFIG_XNP BIT(9)
#define STEPCONFIG_YPN BIT(10)
-#define STEPCONFIG_INM_MASK (0xF << 15)
-#define STEPCONFIG_INM(val) ((val) << 15)
+#define STEPCONFIG_RFP(val) FIELD_PREP(GENMASK(13, 12), (val))
+#define STEPCONFIG_RFP_VREFP STEPCONFIG_RFP(3)
+#define STEPCONFIG_INM(val) FIELD_PREP(GENMASK(18, 15), (val))
#define STEPCONFIG_INM_ADCREFM STEPCONFIG_INM(8)
-#define STEPCONFIG_INP_MASK (0xF << 19)
-#define STEPCONFIG_INP(val) ((val) << 19)
+#define STEPCONFIG_INP(val) FIELD_PREP(GENMASK(22, 19), (val))
#define STEPCONFIG_INP_AN4 STEPCONFIG_INP(4)
#define STEPCONFIG_INP_ADCREFM STEPCONFIG_INP(8)
#define STEPCONFIG_FIFO1 BIT(26)
+#define STEPCONFIG_RFM(val) FIELD_PREP(GENMASK(24, 23), (val))
+#define STEPCONFIG_RFM_VREFN STEPCONFIG_RFM(3)
/* Delay register */
-#define STEPDELAY_OPEN_MASK (0x3FFFF << 0)
-#define STEPDELAY_OPEN(val) ((val) << 0)
+#define STEPDELAY_OPEN(val) FIELD_PREP(GENMASK(17, 0), (val))
#define STEPCONFIG_OPENDLY STEPDELAY_OPEN(0x098)
-#define STEPDELAY_SAMPLE_MASK (0xFF << 24)
-#define STEPDELAY_SAMPLE(val) ((val) << 24)
+#define STEPCONFIG_MAX_OPENDLY GENMASK(17, 0)
+#define STEPDELAY_SAMPLE(val) FIELD_PREP(GENMASK(31, 24), (val))
#define STEPCONFIG_SAMPLEDLY STEPDELAY_SAMPLE(0)
+#define STEPCONFIG_MAX_SAMPLE GENMASK(7, 0)
/* Charge Config */
-#define STEPCHARGE_RFP_MASK (7 << 12)
-#define STEPCHARGE_RFP(val) ((val) << 12)
+#define STEPCHARGE_RFP(val) FIELD_PREP(GENMASK(14, 12), (val))
#define STEPCHARGE_RFP_XPUL STEPCHARGE_RFP(1)
-#define STEPCHARGE_INM_MASK (0xF << 15)
-#define STEPCHARGE_INM(val) ((val) << 15)
+#define STEPCHARGE_INM(val) FIELD_PREP(GENMASK(18, 15), (val))
#define STEPCHARGE_INM_AN1 STEPCHARGE_INM(1)
-#define STEPCHARGE_INP_MASK (0xF << 19)
-#define STEPCHARGE_INP(val) ((val) << 19)
-#define STEPCHARGE_RFM_MASK (3 << 23)
-#define STEPCHARGE_RFM(val) ((val) << 23)
+#define STEPCHARGE_INP(val) FIELD_PREP(GENMASK(22, 19), (val))
+#define STEPCHARGE_RFM(val) FIELD_PREP(GENMASK(24, 23), (val))
#define STEPCHARGE_RFM_XNUR STEPCHARGE_RFM(1)
/* Charge delay */
-#define CHARGEDLY_OPEN_MASK (0x3FFFF << 0)
-#define CHARGEDLY_OPEN(val) ((val) << 0)
+#define CHARGEDLY_OPEN(val) FIELD_PREP(GENMASK(17, 0), (val))
#define CHARGEDLY_OPENDLY CHARGEDLY_OPEN(0x400)
/* Control register */
-#define CNTRLREG_TSCSSENB BIT(0)
+#define CNTRLREG_SSENB BIT(0)
#define CNTRLREG_STEPID BIT(1)
-#define CNTRLREG_STEPCONFIGWRT BIT(2)
+#define CNTRLREG_TSC_STEPCONFIGWRT BIT(2)
#define CNTRLREG_POWERDOWN BIT(4)
-#define CNTRLREG_AFE_CTRL_MASK (3 << 5)
-#define CNTRLREG_AFE_CTRL(val) ((val) << 5)
-#define CNTRLREG_4WIRE CNTRLREG_AFE_CTRL(1)
-#define CNTRLREG_5WIRE CNTRLREG_AFE_CTRL(2)
-#define CNTRLREG_8WIRE CNTRLREG_AFE_CTRL(3)
-#define CNTRLREG_TSCENB BIT(7)
+#define CNTRLREG_TSC_AFE_CTRL(val) FIELD_PREP(GENMASK(6, 5), (val))
+#define CNTRLREG_TSC_4WIRE CNTRLREG_TSC_AFE_CTRL(1)
+#define CNTRLREG_TSC_5WIRE CNTRLREG_TSC_AFE_CTRL(2)
+#define CNTRLREG_TSC_ENB BIT(7)
+
+/*Control registers bitfields for MAGADC IP */
+#define CNTRLREG_MAGADCENB BIT(0)
+#define CNTRLREG_MAG_PREAMP_PWRDOWN BIT(5)
+#define CNTRLREG_MAG_PREAMP_BYPASS BIT(6)
/* FIFO READ Register */
-#define FIFOREAD_DATA_MASK (0xfff << 0)
-#define FIFOREAD_CHNLID_MASK (0xf << 16)
+#define FIFOREAD_DATA_MASK GENMASK(11, 0)
+#define FIFOREAD_CHNLID_MASK GENMASK(19, 16)
/* DMA ENABLE/CLEAR Register */
#define DMA_FIFO0 BIT(0)
#define DMA_FIFO1 BIT(1)
/* Sequencer Status */
-#define SEQ_STATUS BIT(5)
+#define SEQ_STATUS BIT(5)
#define CHARGE_STEP 0x11
-#define ADC_CLK 3000000
+#define TSC_ADC_CLK (3 * HZ_PER_MHZ)
+#define MAG_ADC_CLK (13 * HZ_PER_MHZ)
#define TOTAL_STEPS 16
#define TOTAL_CHANNELS 8
#define FIFO1_THRESHOLD 19
@@ -154,21 +140,27 @@
*
* max processing time: 266431 * 308ns = 83ms(approx)
*/
-#define IDLE_TIMEOUT 83 /* milliseconds */
+#define IDLE_TIMEOUT_MS 83 /* milliseconds */
#define TSCADC_CELLS 2
+struct ti_tscadc_data {
+ char *adc_feature_name;
+ char *adc_feature_compatible;
+ char *secondary_feature_name;
+ char *secondary_feature_compatible;
+ unsigned int target_clk_rate;
+};
+
struct ti_tscadc_dev {
struct device *dev;
struct regmap *regmap;
void __iomem *tscadc_base;
phys_addr_t tscadc_phys_base;
+ const struct ti_tscadc_data *data;
int irq;
- int used_cells; /* 1-2 */
- int tsc_wires;
- int tsc_cell; /* -1 if not used */
- int adc_cell; /* -1 if not used */
struct mfd_cell cells[TSCADC_CELLS];
+ u32 ctrl;
u32 reg_se_cache;
bool adc_waiting;
bool adc_in_use;
@@ -190,6 +182,12 @@ static inline struct ti_tscadc_dev *ti_tscadc_dev_get(struct platform_device *p)
return *tscadc_dev;
}
+static inline bool ti_adc_with_touchscreen(struct ti_tscadc_dev *tscadc)
+{
+ return of_device_is_compatible(tscadc->dev->of_node,
+ "ti,am3359-tscadc");
+}
+
void am335x_tsc_se_set_cache(struct ti_tscadc_dev *tsadc, u32 val);
void am335x_tsc_se_set_once(struct ti_tscadc_dev *tsadc, u32 val);
void am335x_tsc_se_clr(struct ti_tscadc_dev *tsadc, u32 val);
diff --git a/include/linux/mfd/tmio.h b/include/linux/mfd/tmio.h
deleted file mode 100644
index 26e8f8c0a6db..000000000000
--- a/include/linux/mfd/tmio.h
+++ /dev/null
@@ -1,160 +0,0 @@
-#ifndef MFD_TMIO_H
-#define MFD_TMIO_H
-
-#include <linux/device.h>
-#include <linux/fb.h>
-#include <linux/io.h>
-#include <linux/jiffies.h>
-#include <linux/mmc/card.h>
-#include <linux/platform_device.h>
-#include <linux/pm_runtime.h>
-
-#define tmio_ioread8(addr) readb(addr)
-#define tmio_ioread16(addr) readw(addr)
-#define tmio_ioread16_rep(r, b, l) readsw(r, b, l)
-#define tmio_ioread32(addr) \
- (((u32)readw((addr))) | (((u32)readw((addr) + 2)) << 16))
-
-#define tmio_iowrite8(val, addr) writeb((val), (addr))
-#define tmio_iowrite16(val, addr) writew((val), (addr))
-#define tmio_iowrite16_rep(r, b, l) writesw(r, b, l)
-#define tmio_iowrite32(val, addr) \
- do { \
- writew((val), (addr)); \
- writew((val) >> 16, (addr) + 2); \
- } while (0)
-
-#define CNF_CMD 0x04
-#define CNF_CTL_BASE 0x10
-#define CNF_INT_PIN 0x3d
-#define CNF_STOP_CLK_CTL 0x40
-#define CNF_GCLK_CTL 0x41
-#define CNF_SD_CLK_MODE 0x42
-#define CNF_PIN_STATUS 0x44
-#define CNF_PWR_CTL_1 0x48
-#define CNF_PWR_CTL_2 0x49
-#define CNF_PWR_CTL_3 0x4a
-#define CNF_CARD_DETECT_MODE 0x4c
-#define CNF_SD_SLOT 0x50
-#define CNF_EXT_GCLK_CTL_1 0xf0
-#define CNF_EXT_GCLK_CTL_2 0xf1
-#define CNF_EXT_GCLK_CTL_3 0xf9
-#define CNF_SD_LED_EN_1 0xfa
-#define CNF_SD_LED_EN_2 0xfe
-
-#define SDCREN 0x2 /* Enable access to MMC CTL regs. (flag in COMMAND_REG)*/
-
-#define sd_config_write8(base, shift, reg, val) \
- tmio_iowrite8((val), (base) + ((reg) << (shift)))
-#define sd_config_write16(base, shift, reg, val) \
- tmio_iowrite16((val), (base) + ((reg) << (shift)))
-#define sd_config_write32(base, shift, reg, val) \
- do { \
- tmio_iowrite16((val), (base) + ((reg) << (shift))); \
- tmio_iowrite16((val) >> 16, (base) + ((reg + 2) << (shift))); \
- } while (0)
-
-/* tmio MMC platform flags */
-#define TMIO_MMC_WRPROTECT_DISABLE BIT(0)
-/*
- * Some controllers can support a 2-byte block size when the bus width
- * is configured in 4-bit mode.
- */
-#define TMIO_MMC_BLKSZ_2BYTES BIT(1)
-/*
- * Some controllers can support SDIO IRQ signalling.
- */
-#define TMIO_MMC_SDIO_IRQ BIT(2)
-
-/* Some features are only available or tested on R-Car Gen2 or later */
-#define TMIO_MMC_MIN_RCAR2 BIT(3)
-
-/*
- * Some controllers require waiting for the SD bus to become
- * idle before writing to some registers.
- */
-#define TMIO_MMC_HAS_IDLE_WAIT BIT(4)
-/*
- * A GPIO is used for card hotplug detection. We need an extra flag for this,
- * because 0 is a valid GPIO number too, and requiring users to specify
- * cd_gpio < 0 to disable GPIO hotplug would break backwards compatibility.
- */
-#define TMIO_MMC_USE_GPIO_CD BIT(5)
-
-/*
- * Some controllers doesn't have over 0x100 register.
- * it is used to checking accessibility of
- * CTL_SD_CARD_CLK_CTL / CTL_CLK_AND_WAIT_CTL
- */
-#define TMIO_MMC_HAVE_HIGH_REG BIT(6)
-
-/*
- * Some controllers have CMD12 automatically
- * issue/non-issue register
- */
-#define TMIO_MMC_HAVE_CMD12_CTRL BIT(7)
-
-/* Controller has some SDIO status bits which must be 1 */
-#define TMIO_MMC_SDIO_STATUS_SETBITS BIT(8)
-
-/*
- * Some controllers have a 32-bit wide data port register
- */
-#define TMIO_MMC_32BIT_DATA_PORT BIT(9)
-
-/*
- * Some controllers allows to set SDx actual clock
- */
-#define TMIO_MMC_CLK_ACTUAL BIT(10)
-
-int tmio_core_mmc_enable(void __iomem *cnf, int shift, unsigned long base);
-int tmio_core_mmc_resume(void __iomem *cnf, int shift, unsigned long base);
-void tmio_core_mmc_pwr(void __iomem *cnf, int shift, int state);
-void tmio_core_mmc_clk_div(void __iomem *cnf, int shift, int state);
-
-struct dma_chan;
-
-/*
- * data for the MMC controller
- */
-struct tmio_mmc_data {
- void *chan_priv_tx;
- void *chan_priv_rx;
- unsigned int hclk;
- unsigned long capabilities;
- unsigned long capabilities2;
- unsigned long flags;
- u32 ocr_mask; /* available voltages */
- unsigned int cd_gpio;
- int alignment_shift;
- dma_addr_t dma_rx_offset;
- void (*set_pwr)(struct platform_device *host, int state);
- void (*set_clk_div)(struct platform_device *host, int state);
-};
-
-/*
- * data for the NAND controller
- */
-struct tmio_nand_data {
- struct nand_bbt_descr *badblock_pattern;
- struct mtd_partition *partition;
- unsigned int num_partitions;
-};
-
-#define FBIO_TMIO_ACC_WRITE 0x7C639300
-#define FBIO_TMIO_ACC_SYNC 0x7C639301
-
-struct tmio_fb_data {
- int (*lcd_set_power)(struct platform_device *fb_dev,
- bool on);
- int (*lcd_mode)(struct platform_device *fb_dev,
- const struct fb_videomode *mode);
- int num_modes;
- struct fb_videomode *modes;
-
- /* in mm: size of screen */
- int height;
- int width;
-};
-
-#endif
diff --git a/include/linux/mfd/tps6105x.h b/include/linux/mfd/tps6105x.h
index 8bc51180800a..b1313411ef09 100644
--- a/include/linux/mfd/tps6105x.h
+++ b/include/linux/mfd/tps6105x.h
@@ -1,10 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2011 ST-Ericsson SA
* Written on behalf of Linaro for ST-Ericsson
*
* Author: Linus Walleij <linus.walleij@linaro.org>
- *
- * License terms: GNU General Public License (GPL) version 2
*/
#ifndef MFD_TPS6105X_H
#define MFD_TPS6105X_H
diff --git a/include/linux/i2c/tps65010.h b/include/linux/mfd/tps65010.h
index 08aa92278d71..5edf1aef1118 100644
--- a/include/linux/i2c/tps65010.h
+++ b/include/linux/mfd/tps65010.h
@@ -1,4 +1,4 @@
-/* linux/i2c/tps65010.h
+/* linux/mfd/tps65010.h
*
* Functions to access TPS65010 power management device.
*
@@ -28,6 +28,8 @@
#ifndef __LINUX_I2C_TPS65010_H
#define __LINUX_I2C_TPS65010_H
+struct gpio_chip;
+
/*
* ----------------------------------------------------------------------------
* Registers, all 8 bits
@@ -176,12 +178,10 @@ struct i2c_client;
/**
* struct tps65010_board - packages GPIO and LED lines
- * @base: the GPIO number to assign to GPIO-1
* @outmask: bit (N-1) is set to allow GPIO-N to be used as an
* (open drain) output
* @setup: optional callback issued once the GPIOs are valid
* @teardown: optional callback issued before the GPIOs are invalidated
- * @context: optional parameter passed to setup() and teardown()
*
* Board data may be used to package the GPIO (and LED) lines for use
* in by the generic GPIO and LED frameworks. The first four GPIOs
@@ -193,12 +193,9 @@ struct i2c_client;
* devices in their initial states using these GPIOs.
*/
struct tps65010_board {
- int base;
unsigned outmask;
-
- int (*setup)(struct i2c_client *client, void *context);
- int (*teardown)(struct i2c_client *client, void *context);
- void *context;
+ int (*setup)(struct i2c_client *client, struct gpio_chip *gc);
+ void (*teardown)(struct i2c_client *client, struct gpio_chip *gc);
};
#endif /* __LINUX_I2C_TPS65010_H */
diff --git a/include/linux/mfd/tps65086.h b/include/linux/mfd/tps65086.h
index a228ae4c88d9..9185b5cd8371 100644
--- a/include/linux/mfd/tps65086.h
+++ b/include/linux/mfd/tps65086.h
@@ -1,16 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
- * Copyright (C) 2015 Texas Instruments Incorporated - http://www.ti.com/
+ * Copyright (C) 2015 Texas Instruments Incorporated - https://www.ti.com/
* Andrew F. Davis <afd@ti.com>
*
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether expressed or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License version 2 for more details.
- *
* Based on the TPS65912 driver
*/
@@ -21,8 +13,9 @@
#include <linux/regmap.h>
/* List of registers for TPS65086 */
-#define TPS65086_DEVICEID 0x01
-#define TPS65086_IRQ 0x02
+#define TPS65086_DEVICEID1 0x00
+#define TPS65086_DEVICEID2 0x01
+#define TPS65086_IRQ 0x02
#define TPS65086_IRQ_MASK 0x03
#define TPS65086_PMICSTAT 0x04
#define TPS65086_SHUTDNSRC 0x05
@@ -83,10 +76,16 @@
#define TPS65086_IRQ_SHUTDN_MASK BIT(3)
#define TPS65086_IRQ_FAULT_MASK BIT(7)
-/* DEVICEID Register field definitions */
-#define TPS65086_DEVICEID_PART_MASK GENMASK(3, 0)
-#define TPS65086_DEVICEID_OTP_MASK GENMASK(5, 4)
-#define TPS65086_DEVICEID_REV_MASK GENMASK(7, 6)
+/* DEVICEID1 Register field definitions */
+#define TPS6508640_ID 0x00
+#define TPS65086401_ID 0x01
+#define TPS6508641_ID 0x10
+#define TPS65086470_ID 0x70
+
+/* DEVICEID2 Register field definitions */
+#define TPS65086_DEVICEID2_PART_MASK GENMASK(3, 0)
+#define TPS65086_DEVICEID2_OTP_MASK GENMASK(5, 4)
+#define TPS65086_DEVICEID2_REV_MASK GENMASK(7, 6)
/* VID Masks */
#define BUCK_VID_MASK GENMASK(7, 1)
@@ -100,6 +99,8 @@ enum tps65086_irqs {
TPS65086_IRQ_FAULT,
};
+struct tps65086_regulator_config;
+
/**
* struct tps65086 - state holder for the tps65086 driver
*
@@ -108,6 +109,8 @@ enum tps65086_irqs {
struct tps65086 {
struct device *dev;
struct regmap *regmap;
+ unsigned int chip_id;
+ const struct tps65086_regulator_config *reg_config;
/* IRQ Data */
int irq;
diff --git a/include/linux/mfd/tps65090.h b/include/linux/mfd/tps65090.h
index 67d144b3b8f9..44ebcc4d8f01 100644
--- a/include/linux/mfd/tps65090.h
+++ b/include/linux/mfd/tps65090.h
@@ -1,22 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* Core driver interface for TI TPS65090 PMIC family
*
* Copyright (C) 2012 NVIDIA Corporation
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
- *
*/
#ifndef __LINUX_MFD_TPS65090_H
@@ -83,6 +69,8 @@ enum {
#define TPS65090_MAX_REG TPS65090_REG_AD_OUT2
#define TPS65090_NUM_REGS (TPS65090_MAX_REG + 1)
+struct gpio_desc;
+
struct tps65090 {
struct device *dev;
struct regmap *rmap;
@@ -95,8 +83,8 @@ struct tps65090 {
* @reg_init_data: The regulator init data.
* @enable_ext_control: Enable extrenal control or not. Only available for
* DCDC1, DCDC2 and DCDC3.
- * @gpio: Gpio number if external control is enabled and controlled through
- * gpio.
+ * @gpiod: Gpio descriptor if external control is enabled and controlled through
+ * gpio
* @overcurrent_wait_valid: True if the overcurrent_wait should be applied.
* @overcurrent_wait: Value to set as the overcurrent wait time. This is the
* actual bitfield value, not a time in ms (valid value are 0 - 3).
@@ -104,7 +92,7 @@ struct tps65090 {
struct tps65090_regulator_plat_data {
struct regulator_init_data *reg_init_data;
bool enable_ext_control;
- int gpio;
+ struct gpio_desc *gpiod;
bool overcurrent_wait_valid;
int overcurrent_wait;
};
diff --git a/include/linux/mfd/tps65217.h b/include/linux/mfd/tps65217.h
index eac285756b37..877d9c41c53d 100644
--- a/include/linux/mfd/tps65217.h
+++ b/include/linux/mfd/tps65217.h
@@ -1,18 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* linux/mfd/tps65217.h
*
* Functions to access TPS65217 power management chip.
*
- * Copyright (C) 2011 Texas Instruments Incorporated - http://www.ti.com/
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation version 2.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether express or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
+ * Copyright (C) 2011 Texas Instruments Incorporated - https://www.ti.com/
*/
#ifndef __LINUX_MFD_TPS65217_H
@@ -263,7 +255,6 @@ struct tps65217_board {
struct tps65217 {
struct device *dev;
struct tps65217_board *pdata;
- unsigned long id;
struct regulator_desc desc[TPS65217_NUM_REGULATOR];
struct regmap *regmap;
u8 *strobes;
@@ -278,11 +269,6 @@ static inline struct tps65217 *dev_to_tps65217(struct device *dev)
return dev_get_drvdata(dev);
}
-static inline unsigned long tps65217_chip_id(struct tps65217 *tps65217)
-{
- return tps65217->id;
-}
-
int tps65217_reg_read(struct tps65217 *tps, unsigned int reg,
unsigned int *val);
int tps65217_reg_write(struct tps65217 *tps, unsigned int reg,
diff --git a/include/linux/mfd/tps65218.h b/include/linux/mfd/tps65218.h
index bccd2d68b1e3..2946be2f15f3 100644
--- a/include/linux/mfd/tps65218.h
+++ b/include/linux/mfd/tps65218.h
@@ -1,18 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* linux/mfd/tps65218.h
*
- * Functions to access TPS65219 power management chip.
+ * Functions to access TPS65218 power management chip.
*
- * Copyright (C) 2014 Texas Instruments Incorporated - http://www.ti.com/
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether expressed or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License version 2 for more details.
+ * Copyright (C) 2014 Texas Instruments Incorporated - https://www.ti.com/
*/
#ifndef __LINUX_MFD_TPS65218_H
@@ -137,6 +129,10 @@
#define TPS65218_CONFIG1_PGDLY_MASK 0x18
#define TPS65218_CONFIG1_STRICT BIT(2)
#define TPS65218_CONFIG1_UVLO_MASK 0x3
+#define TPS65218_CONFIG1_UVLO_2750000 0x0
+#define TPS65218_CONFIG1_UVLO_2950000 0x1
+#define TPS65218_CONFIG1_UVLO_3250000 0x2
+#define TPS65218_CONFIG1_UVLO_3350000 0x3
#define TPS65218_CONFIG2_DC12_RST BIT(7)
#define TPS65218_CONFIG2_UVLOHYS BIT(6)
@@ -205,10 +201,11 @@ enum tps65218_regulator_id {
TPS65218_DCDC_4,
TPS65218_DCDC_5,
TPS65218_DCDC_6,
- /* LS's */
- TPS65218_LS_3,
/* LDOs */
TPS65218_LDO_1,
+ /* LS's */
+ TPS65218_LS_2,
+ TPS65218_LS_3,
};
#define TPS65218_MAX_REG_ID TPS65218_LDO_1
@@ -218,7 +215,7 @@ enum tps65218_regulator_id {
/* Number of LDO voltage regulators available */
#define TPS65218_NUM_LDO 1
/* Number of total LS current regulators available */
-#define TPS65218_NUM_LS 1
+#define TPS65218_NUM_LS 2
/* Number of total regulators available */
#define TPS65218_NUM_REGULATOR (TPS65218_NUM_DCDC + TPS65218_NUM_LDO \
+ TPS65218_NUM_LS)
@@ -246,24 +243,6 @@ enum tps65218_irqs {
};
/**
- * struct tps_info - packages regulator constraints
- * @id: Id of the regulator
- * @name: Voltage regulator name
- * @min_uV: minimum micro volts
- * @max_uV: minimum micro volts
- * @strobe: sequencing strobe value for the regulator
- *
- * This data is used to check the regualtor voltage limits while setting.
- */
-struct tps_info {
- int id;
- const char *name;
- int min_uV;
- int max_uV;
- int strobe;
-};
-
-/**
* struct tps65218 - tps65218 sub-driver chip access routines
*
* Device data may be used to access the TPS65218 chip
@@ -280,7 +259,6 @@ struct tps65218 {
u32 irq_mask;
struct regmap_irq_chip_data *irq_data;
struct regulator_desc desc[TPS65218_NUM_REGULATOR];
- struct tps_info *info[TPS65218_NUM_REGULATOR];
struct regmap *regmap;
u8 *strobes;
};
diff --git a/include/linux/mfd/tps65219.h b/include/linux/mfd/tps65219.h
new file mode 100644
index 000000000000..55234e771ba7
--- /dev/null
+++ b/include/linux/mfd/tps65219.h
@@ -0,0 +1,449 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Functions to access TPS65215/TPS65219 Power Management Integrated Chips
+ *
+ * Copyright (C) 2022 BayLibre Incorporated - https://www.baylibre.com/
+ * Copyright (C) 2024 Texas Instruments Incorporated - https://www.ti.com/
+ */
+
+#ifndef MFD_TPS65219_H
+#define MFD_TPS65219_H
+
+#include <linux/bitops.h>
+#include <linux/regmap.h>
+#include <linux/regulator/driver.h>
+
+/* Chip id list*/
+enum pmic_id {
+ TPS65214,
+ TPS65215,
+ TPS65219,
+};
+
+/* I2C ID for TPS65219 part */
+#define TPS65219_I2C_ID 0x24
+
+/* All register addresses */
+#define TPS65219_REG_TI_DEV_ID 0x00
+#define TPS65219_REG_NVM_ID 0x01
+#define TPS65219_REG_ENABLE_CTRL 0x02
+#define TPS65219_REG_BUCKS_CONFIG 0x03
+#define TPS65214_REG_LOCK 0x03
+#define TPS65219_REG_LDO4_VOUT 0x04
+#define TPS65214_REG_LDO1_VOUT_STBY 0x04
+#define TPS65219_REG_LDO3_VOUT 0x05
+#define TPS65215_REG_LDO2_VOUT 0x05
+#define TPS65214_REG_LDO1_VOUT 0x05
+#define TPS65219_REG_LDO2_VOUT 0x06
+#define TPS65214_REG_LDO2_VOUT 0x06
+#define TPS65219_REG_LDO1_VOUT 0x07
+#define TPS65214_REG_LDO2_VOUT_STBY 0x07
+#define TPS65219_REG_BUCK3_VOUT 0x8
+#define TPS65219_REG_BUCK2_VOUT 0x9
+#define TPS65219_REG_BUCK1_VOUT 0xA
+#define TPS65219_REG_LDO4_SEQUENCE_SLOT 0xB
+#define TPS65219_REG_LDO3_SEQUENCE_SLOT 0xC
+#define TPS65215_REG_LDO2_SEQUENCE_SLOT 0xC
+#define TPS65214_REG_LDO1_SEQUENCE_SLOT 0xC
+#define TPS65219_REG_LDO2_SEQUENCE_SLOT 0xD
+#define TPS65219_REG_LDO1_SEQUENCE_SLOT 0xE
+#define TPS65219_REG_BUCK3_SEQUENCE_SLOT 0xF
+#define TPS65219_REG_BUCK2_SEQUENCE_SLOT 0x10
+#define TPS65219_REG_BUCK1_SEQUENCE_SLOT 0x11
+#define TPS65219_REG_nRST_SEQUENCE_SLOT 0x12
+#define TPS65219_REG_GPIO_SEQUENCE_SLOT 0x13
+#define TPS65219_REG_GPO2_SEQUENCE_SLOT 0x14
+#define TPS65214_REG_GPIO_GPI_SEQUENCE_SLOT 0x14
+#define TPS65219_REG_GPO1_SEQUENCE_SLOT 0x15
+#define TPS65214_REG_GPO_SEQUENCE_SLOT 0x15
+#define TPS65219_REG_POWER_UP_SLOT_DURATION_1 0x16
+#define TPS65219_REG_POWER_UP_SLOT_DURATION_2 0x17
+/* _SLOT_DURATION_3 doesn't apply to TPS65215*/
+#define TPS65219_REG_POWER_UP_SLOT_DURATION_3 0x18
+#define TPS65219_REG_POWER_UP_SLOT_DURATION_4 0x19
+#define TPS65214_REG_BUCK3_VOUT_STBY 0x19
+#define TPS65219_REG_POWER_DOWN_SLOT_DURATION_1 0x1A
+#define TPS65219_REG_POWER_DOWN_SLOT_DURATION_2 0x1B
+#define TPS65219_REG_POWER_DOWN_SLOT_DURATION_3 0x1C
+#define TPS65214_REG_BUCK2_VOUT_STBY 0x1C
+#define TPS65219_REG_POWER_DOWN_SLOT_DURATION_4 0x1D
+#define TPS65214_REG_BUCK1_VOUT_STBY 0x1D
+#define TPS65219_REG_GENERAL_CONFIG 0x1E
+#define TPS65219_REG_MFP_1_CONFIG 0x1F
+#define TPS65219_REG_MFP_2_CONFIG 0x20
+#define TPS65219_REG_STBY_1_CONFIG 0x21
+#define TPS65219_REG_STBY_2_CONFIG 0x22
+#define TPS65219_REG_OC_DEGL_CONFIG 0x23
+/* 'sub irq' MASK registers */
+#define TPS65219_REG_INT_MASK_UV 0x24
+#define TPS65219_REG_MASK_CONFIG 0x25
+
+#define TPS65219_REG_I2C_ADDRESS_REG 0x26
+#define TPS65219_REG_USER_GENERAL_NVM_STORAGE 0x27
+#define TPS65219_REG_MANUFACTURING_VER 0x28
+#define TPS65219_REG_MFP_CTRL 0x29
+#define TPS65219_REG_DISCHARGE_CONFIG 0x2A
+/* main irq registers */
+#define TPS65219_REG_INT_SOURCE 0x2B
+
+/* TPS65219 'sub irq' registers */
+#define TPS65219_REG_INT_LDO_3_4 0x2C
+#define TPS65219_REG_INT_LDO_1_2 0x2D
+
+/* TPS65215 specific 'sub irq' registers */
+#define TPS65215_REG_INT_LDO_2 0x2C
+#define TPS65215_REG_INT_LDO_1 0x2D
+
+/* TPS65214 specific 'sub irq' register */
+#define TPS65214_REG_INT_LDO_1_2 0x2D
+
+/* Common TPS65215 & TPS65219 'sub irq' registers */
+#define TPS65219_REG_INT_BUCK_3 0x2E
+#define TPS65219_REG_INT_BUCK_1_2 0x2F
+#define TPS65219_REG_INT_SYSTEM 0x30
+#define TPS65219_REG_INT_RV 0x31
+#define TPS65219_REG_INT_TIMEOUT_RV_SD 0x32
+#define TPS65219_REG_INT_PB 0x33
+
+#define TPS65219_REG_INT_LDO_3_4_POS 0
+#define TPS65219_REG_INT_LDO_1_2_POS 1
+#define TPS65219_REG_INT_BUCK_3_POS 2
+#define TPS65219_REG_INT_BUCK_1_2_POS 3
+#define TPS65219_REG_INT_SYS_POS 4
+#define TPS65219_REG_INT_RV_POS 5
+#define TPS65219_REG_INT_TO_RV_POS 6
+#define TPS65219_REG_INT_PB_POS 7
+
+#define TPS65215_REG_INT_LDO_2_POS 0
+#define TPS65215_REG_INT_LDO_1_POS 1
+
+#define TPS65214_REG_INT_LDO_1_2_POS 0
+#define TPS65214_REG_INT_BUCK_3_POS 1
+#define TPS65214_REG_INT_BUCK_1_2_POS 2
+#define TPS65214_REG_INT_SYS_POS 3
+#define TPS65214_REG_INT_RV_POS 4
+#define TPS65214_REG_INT_TO_RV_POS 5
+#define TPS65214_REG_INT_PB_POS 6
+
+#define TPS65219_REG_USER_NVM_CMD 0x34
+#define TPS65219_REG_POWER_UP_STATUS 0x35
+#define TPS65219_REG_SPARE_2 0x36
+#define TPS65219_REG_SPARE_3 0x37
+#define TPS65219_REG_FACTORY_CONFIG_2 0x41
+
+/* Register field definitions */
+#define TPS65219_DEVID_REV_MASK GENMASK(7, 0)
+#define TPS65219_BUCKS_LDOS_VOUT_VSET_MASK GENMASK(5, 0)
+#define TPS65219_BUCKS_UV_THR_SEL_MASK BIT(6)
+#define TPS65219_BUCKS_BW_SEL_MASK BIT(7)
+#define LDO_BYP_SHIFT 6
+#define TPS65219_LDOS_BYP_CONFIG_MASK BIT(LDO_BYP_SHIFT)
+#define TPS65219_LDOS_LSW_CONFIG_MASK BIT(7)
+/* Regulators enable control */
+#define TPS65219_ENABLE_BUCK1_EN_MASK BIT(0)
+#define TPS65219_ENABLE_BUCK2_EN_MASK BIT(1)
+#define TPS65219_ENABLE_BUCK3_EN_MASK BIT(2)
+#define TPS65219_ENABLE_LDO1_EN_MASK BIT(3)
+#define TPS65219_ENABLE_LDO2_EN_MASK BIT(4)
+#define TPS65219_ENABLE_LDO3_EN_MASK BIT(5)
+#define TPS65215_ENABLE_LDO2_EN_MASK BIT(5)
+#define TPS65214_ENABLE_LDO1_EN_MASK BIT(5)
+#define TPS65219_ENABLE_LDO4_EN_MASK BIT(6)
+/* power ON-OFF sequence slot */
+#define TPS65219_BUCKS_LDOS_SEQUENCE_OFF_SLOT_MASK GENMASK(3, 0)
+#define TPS65219_BUCKS_LDOS_SEQUENCE_ON_SLOT_MASK GENMASK(7, 4)
+/* TODO: Not needed, same mapping as TPS65219_ENABLE_REGNAME_EN, factorize */
+#define TPS65219_STBY1_BUCK1_STBY_EN_MASK BIT(0)
+#define TPS65219_STBY1_BUCK2_STBY_EN_MASK BIT(1)
+#define TPS65219_STBY1_BUCK3_STBY_EN_MASK BIT(2)
+#define TPS65219_STBY1_LDO1_STBY_EN_MASK BIT(3)
+#define TPS65219_STBY1_LDO2_STBY_EN_MASK BIT(4)
+#define TPS65219_STBY1_LDO3_STBY_EN_MASK BIT(5)
+#define TPS65219_STBY1_LDO4_STBY_EN_MASK BIT(6)
+/* STBY_2 config */
+#define TPS65219_STBY2_GPO1_STBY_EN_MASK BIT(0)
+#define TPS65219_STBY2_GPO2_STBY_EN_MASK BIT(1)
+#define TPS65219_STBY2_GPIO_STBY_EN_MASK BIT(2)
+/* MFP Control */
+#define TPS65219_MFP_I2C_OFF_REQ_MASK BIT(0)
+#define TPS65219_MFP_STBY_I2C_CTRL_MASK BIT(1)
+#define TPS65219_MFP_COLD_RESET_I2C_CTRL_MASK BIT(2)
+#define TPS65219_MFP_WARM_RESET_I2C_CTRL_MASK BIT(3)
+#define TPS65219_MFP_GPIO_STATUS_MASK BIT(4)
+/* MFP_1 Config */
+#define TPS65219_MFP_1_VSEL_DDR_SEL_MASK BIT(0)
+#define TPS65219_MFP_1_VSEL_SD_POL_MASK BIT(1)
+#define TPS65219_MFP_1_VSEL_RAIL_MASK BIT(2)
+/* MFP_2 Config */
+#define TPS65219_MFP_2_MODE_STBY_MASK GENMASK(1, 0)
+#define TPS65219_MFP_2_MODE_RESET_MASK BIT(2)
+#define TPS65219_MFP_2_EN_PB_VSENSE_DEGL_MASK BIT(3)
+#define TPS65219_MFP_2_EN_PB_VSENSE_MASK GENMASK(5, 4)
+#define TPS65219_MFP_2_WARM_COLD_RESET_MASK BIT(6)
+#define TPS65219_MFP_2_PU_ON_FSD_MASK BIT(7)
+#define TPS65219_MFP_2_EN 0
+#define TPS65219_MFP_2_PB BIT(4)
+#define TPS65219_MFP_2_VSENSE BIT(5)
+/* MASK_UV Config */
+#define TPS65219_REG_MASK_UV_LDO1_UV_MASK BIT(0)
+#define TPS65219_REG_MASK_UV_LDO2_UV_MASK BIT(1)
+#define TPS65219_REG_MASK_UV_LDO3_UV_MASK BIT(2)
+#define TPS65219_REG_MASK_UV_LDO4_UV_MASK BIT(3)
+#define TPS65219_REG_MASK_UV_BUCK1_UV_MASK BIT(4)
+#define TPS65219_REG_MASK_UV_BUCK2_UV_MASK BIT(5)
+#define TPS65219_REG_MASK_UV_BUCK3_UV_MASK BIT(6)
+#define TPS65219_REG_MASK_UV_RETRY_MASK BIT(7)
+/* MASK Config */
+// SENSOR_N_WARM_MASK already defined in Thermal
+#define TPS65219_REG_MASK_INT_FOR_RV_MASK BIT(4)
+#define TPS65219_REG_MASK_EFFECT_MASK GENMASK(2, 1)
+#define TPS65219_REG_MASK_INT_FOR_PB_MASK BIT(7)
+/* UnderVoltage - Short to GND - OverCurrent*/
+/* LDO3-4: only for TPS65219*/
+#define TPS65219_INT_LDO3_SCG_MASK BIT(0)
+#define TPS65219_INT_LDO3_OC_MASK BIT(1)
+#define TPS65219_INT_LDO3_UV_MASK BIT(2)
+#define TPS65219_INT_LDO4_SCG_MASK BIT(3)
+#define TPS65219_INT_LDO4_OC_MASK BIT(4)
+#define TPS65219_INT_LDO4_UV_MASK BIT(5)
+/* LDO1-2: TPS65214 & TPS65219 */
+#define TPS65219_INT_LDO1_SCG_MASK BIT(0)
+#define TPS65219_INT_LDO1_OC_MASK BIT(1)
+#define TPS65219_INT_LDO1_UV_MASK BIT(2)
+#define TPS65219_INT_LDO2_SCG_MASK BIT(3)
+#define TPS65219_INT_LDO2_OC_MASK BIT(4)
+#define TPS65219_INT_LDO2_UV_MASK BIT(5)
+/* TPS65215 LDO1-2*/
+#define TPS65215_INT_LDO1_SCG_MASK BIT(0)
+#define TPS65215_INT_LDO1_OC_MASK BIT(1)
+#define TPS65215_INT_LDO1_UV_MASK BIT(2)
+#define TPS65215_INT_LDO2_SCG_MASK BIT(0)
+#define TPS65215_INT_LDO2_OC_MASK BIT(1)
+#define TPS65215_INT_LDO2_UV_MASK BIT(2)
+/* BUCK3 */
+#define TPS65219_INT_BUCK3_SCG_MASK BIT(0)
+#define TPS65219_INT_BUCK3_OC_MASK BIT(1)
+#define TPS65219_INT_BUCK3_NEG_OC_MASK BIT(2)
+#define TPS65219_INT_BUCK3_UV_MASK BIT(3)
+/* BUCK1-2 */
+#define TPS65219_INT_BUCK1_SCG_MASK BIT(0)
+#define TPS65219_INT_BUCK1_OC_MASK BIT(1)
+#define TPS65219_INT_BUCK1_NEG_OC_MASK BIT(2)
+#define TPS65219_INT_BUCK1_UV_MASK BIT(3)
+#define TPS65219_INT_BUCK2_SCG_MASK BIT(4)
+#define TPS65219_INT_BUCK2_OC_MASK BIT(5)
+#define TPS65219_INT_BUCK2_NEG_OC_MASK BIT(6)
+#define TPS65219_INT_BUCK2_UV_MASK BIT(7)
+/* Thermal Sensor: TPS65219/TPS65215 */
+#define TPS65219_INT_SENSOR_3_WARM_MASK BIT(0)
+#define TPS65219_INT_SENSOR_3_HOT_MASK BIT(4)
+/* Thermal Sensor: TPS65219/TPS65215/TPS65214 */
+#define TPS65219_INT_SENSOR_2_WARM_MASK BIT(1)
+#define TPS65219_INT_SENSOR_1_WARM_MASK BIT(2)
+#define TPS65219_INT_SENSOR_0_WARM_MASK BIT(3)
+#define TPS65219_INT_SENSOR_2_HOT_MASK BIT(5)
+#define TPS65219_INT_SENSOR_1_HOT_MASK BIT(6)
+#define TPS65219_INT_SENSOR_0_HOT_MASK BIT(7)
+/* Residual Voltage */
+#define TPS65219_INT_BUCK1_RV_MASK BIT(0)
+#define TPS65219_INT_BUCK2_RV_MASK BIT(1)
+#define TPS65219_INT_BUCK3_RV_MASK BIT(2)
+#define TPS65219_INT_LDO1_RV_MASK BIT(3)
+#define TPS65219_INT_LDO2_RV_MASK BIT(4)
+#define TPS65219_INT_LDO3_RV_MASK BIT(5)
+#define TPS65215_INT_LDO2_RV_MASK BIT(5)
+#define TPS65214_INT_LDO2_RV_MASK BIT(5)
+#define TPS65219_INT_LDO4_RV_MASK BIT(6)
+/* Residual Voltage ShutDown */
+#define TPS65219_INT_BUCK1_RV_SD_MASK BIT(0)
+#define TPS65219_INT_BUCK2_RV_SD_MASK BIT(1)
+#define TPS65219_INT_BUCK3_RV_SD_MASK BIT(2)
+#define TPS65219_INT_LDO1_RV_SD_MASK BIT(3)
+#define TPS65219_INT_LDO2_RV_SD_MASK BIT(4)
+#define TPS65219_INT_LDO3_RV_SD_MASK BIT(5)
+#define TPS65215_INT_LDO2_RV_SD_MASK BIT(5)
+#define TPS65214_INT_LDO1_RV_SD_MASK BIT(5)
+#define TPS65219_INT_LDO4_RV_SD_MASK BIT(6)
+#define TPS65219_INT_TIMEOUT_MASK BIT(7)
+/* Power Button */
+#define TPS65219_INT_PB_FALLING_EDGE_DETECT_MASK BIT(0)
+#define TPS65219_INT_PB_RISING_EDGE_DETECT_MASK BIT(1)
+#define TPS65219_INT_PB_REAL_TIME_STATUS_MASK BIT(2)
+
+#define TPS65219_PB_POS 7
+#define TPS65219_TO_RV_POS 6
+#define TPS65219_RV_POS 5
+#define TPS65219_SYS_POS 4
+#define TPS65219_BUCK_1_2_POS 3
+#define TPS65219_BUCK_3_POS 2
+#define TPS65219_LDO_1_2_POS 1
+#define TPS65219_LDO_3_4_POS 0
+
+/* IRQs */
+enum {
+ /* LDO3-4 register IRQs */
+ TPS65219_INT_LDO3_SCG,
+ TPS65219_INT_LDO3_OC,
+ TPS65219_INT_LDO3_UV,
+ TPS65219_INT_LDO4_SCG,
+ TPS65219_INT_LDO4_OC,
+ TPS65219_INT_LDO4_UV,
+ /* TPS65215 LDO1*/
+ TPS65215_INT_LDO1_SCG,
+ TPS65215_INT_LDO1_OC,
+ TPS65215_INT_LDO1_UV,
+ /* TPS65215 LDO2*/
+ TPS65215_INT_LDO2_SCG,
+ TPS65215_INT_LDO2_OC,
+ TPS65215_INT_LDO2_UV,
+ /* LDO1-2: TPS65219/TPS65214 */
+ TPS65219_INT_LDO1_SCG,
+ TPS65219_INT_LDO1_OC,
+ TPS65219_INT_LDO1_UV,
+ TPS65219_INT_LDO2_SCG,
+ TPS65219_INT_LDO2_OC,
+ TPS65219_INT_LDO2_UV,
+ /* BUCK3 */
+ TPS65219_INT_BUCK3_SCG,
+ TPS65219_INT_BUCK3_OC,
+ TPS65219_INT_BUCK3_NEG_OC,
+ TPS65219_INT_BUCK3_UV,
+ /* BUCK1-2 */
+ TPS65219_INT_BUCK1_SCG,
+ TPS65219_INT_BUCK1_OC,
+ TPS65219_INT_BUCK1_NEG_OC,
+ TPS65219_INT_BUCK1_UV,
+ TPS65219_INT_BUCK2_SCG,
+ TPS65219_INT_BUCK2_OC,
+ TPS65219_INT_BUCK2_NEG_OC,
+ TPS65219_INT_BUCK2_UV,
+ /* Thermal Sensor */
+ TPS65219_INT_SENSOR_3_WARM,
+ TPS65219_INT_SENSOR_2_WARM,
+ TPS65219_INT_SENSOR_1_WARM,
+ TPS65219_INT_SENSOR_0_WARM,
+ TPS65219_INT_SENSOR_3_HOT,
+ TPS65219_INT_SENSOR_2_HOT,
+ TPS65219_INT_SENSOR_1_HOT,
+ TPS65219_INT_SENSOR_0_HOT,
+ /* Residual Voltage */
+ TPS65219_INT_BUCK1_RV,
+ TPS65219_INT_BUCK2_RV,
+ TPS65219_INT_BUCK3_RV,
+ TPS65219_INT_LDO1_RV,
+ TPS65219_INT_LDO2_RV,
+ TPS65215_INT_LDO2_RV,
+ TPS65214_INT_LDO2_RV,
+ TPS65219_INT_LDO3_RV,
+ TPS65219_INT_LDO4_RV,
+ /* Residual Voltage ShutDown */
+ TPS65219_INT_BUCK1_RV_SD,
+ TPS65219_INT_BUCK2_RV_SD,
+ TPS65219_INT_BUCK3_RV_SD,
+ TPS65219_INT_LDO1_RV_SD,
+ TPS65214_INT_LDO1_RV_SD,
+ TPS65215_INT_LDO2_RV_SD,
+ TPS65219_INT_LDO2_RV_SD,
+ TPS65219_INT_LDO3_RV_SD,
+ TPS65219_INT_LDO4_RV_SD,
+ TPS65219_INT_TIMEOUT,
+ /* Power Button */
+ TPS65219_INT_PB_FALLING_EDGE_DETECT,
+ TPS65219_INT_PB_RISING_EDGE_DETECT,
+};
+
+enum tps65214_regulator_id {
+ /*
+ * DCDC's same as TPS65219
+ * LDO1 maps to TPS65219's LDO3
+ * LDO2 is the same as TPS65219
+ *
+ */
+ TPS65214_LDO_1 = 3,
+ TPS65214_LDO_2 = 4,
+};
+
+enum tps65215_regulator_id {
+ /* DCDC's same as TPS65219 */
+ /* LDO1 is the same as TPS65219 */
+ TPS65215_LDO_2 = 4,
+};
+
+enum tps65219_regulator_id {
+ /* DCDC's */
+ TPS65219_BUCK_1,
+ TPS65219_BUCK_2,
+ TPS65219_BUCK_3,
+ /* LDOs */
+ TPS65219_LDO_1,
+ TPS65219_LDO_2,
+ TPS65219_LDO_3,
+ TPS65219_LDO_4,
+};
+
+/* Number of step-down converters available */
+#define TPS6521X_NUM_BUCKS 3
+/* Number of LDO voltage regulators available */
+#define TPS65219_NUM_LDO 4
+#define TPS65215_NUM_LDO 2
+#define TPS65214_NUM_LDO 2
+/* Number of total regulators available */
+#define TPS65219_NUM_REGULATOR (TPS6521X_NUM_BUCKS + TPS65219_NUM_LDO)
+#define TPS65215_NUM_REGULATOR (TPS6521X_NUM_BUCKS + TPS65215_NUM_LDO)
+#define TPS65214_NUM_REGULATOR (TPS6521X_NUM_BUCKS + TPS65214_NUM_LDO)
+
+/* Define the TPS65214 IRQ numbers */
+enum tps65214_irqs {
+ /* INT source registers */
+ TPS65214_TO_RV_SD_SET_IRQ,
+ TPS65214_RV_SET_IRQ,
+ TPS65214_SYS_SET_IRQ,
+ TPS65214_BUCK_1_2_SET_IRQ,
+ TPS65214_BUCK_3_SET_IRQ,
+ TPS65214_LDO_1_2_SET_IRQ,
+ TPS65214_PB_SET_IRQ = 7,
+};
+
+/* Define the TPS65215 IRQ numbers */
+enum tps65215_irqs {
+ /* INT source registers */
+ TPS65215_TO_RV_SD_SET_IRQ,
+ TPS65215_RV_SET_IRQ,
+ TPS65215_SYS_SET_IRQ,
+ TPS65215_BUCK_1_2_SET_IRQ,
+ TPS65215_BUCK_3_SET_IRQ,
+ TPS65215_LDO_1_SET_IRQ,
+ TPS65215_LDO_2_SET_IRQ,
+ TPS65215_PB_SET_IRQ,
+};
+
+/* Define the TPS65219 IRQ numbers */
+enum tps65219_irqs {
+ /* INT source registers */
+ TPS65219_TO_RV_SD_SET_IRQ,
+ TPS65219_RV_SET_IRQ,
+ TPS65219_SYS_SET_IRQ,
+ TPS65219_BUCK_1_2_SET_IRQ,
+ TPS65219_BUCK_3_SET_IRQ,
+ TPS65219_LDO_1_2_SET_IRQ,
+ TPS65219_LDO_3_4_SET_IRQ,
+ TPS65219_PB_SET_IRQ,
+};
+
+/**
+ * struct tps65219 - tps65219 sub-driver chip access routines
+ *
+ * Device data may be used to access the TPS65219 chip
+ *
+ * @dev: MFD device
+ * @regmap: Regmap for accessing the device registers
+ * @irq_data: Regmap irq data used for the irq chip
+ */
+struct tps65219 {
+ struct device *dev;
+ struct regmap *regmap;
+
+ struct regmap_irq_chip_data *irq_data;
+};
+
+#endif /* MFD_TPS65219_H */
diff --git a/include/linux/mfd/tps6586x.h b/include/linux/mfd/tps6586x.h
index 96187ed9f9bb..b19c2801a30e 100644
--- a/include/linux/mfd/tps6586x.h
+++ b/include/linux/mfd/tps6586x.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __LINUX_MFD_TPS6586X_H
#define __LINUX_MFD_TPS6586X_H
@@ -17,6 +18,7 @@
#define TPS658621A 0x15
#define TPS658621CD 0x2c
#define TPS658623 0x1b
+#define TPS658624 0x0a
#define TPS658640 0x01
#define TPS658640v2 0x02
#define TPS658643 0x03
diff --git a/include/linux/mfd/tps65910.h b/include/linux/mfd/tps65910.h
index deffdcd0236f..f67ef0a4e041 100644
--- a/include/linux/mfd/tps65910.h
+++ b/include/linux/mfd/tps65910.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* tps65910.h -- TI TPS6591x
*
@@ -6,12 +7,6 @@
* Author: Graeme Gregory <gg@slimlogic.co.uk>
* Author: Jorge Eduardo Candelaria <jedu@slimlogic.co.uk>
* Author: Arnaud Deconinck <a-deconinck@ti.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
*/
#ifndef __LINUX_MFD_TPS65910_H
@@ -754,7 +749,7 @@
#define VDDCTRL_ST_SHIFT 0
-/*Register VDDCTRL_OP (0x28) bit definitios */
+/*Register VDDCTRL_OP (0x28) bit definitions */
#define VDDCTRL_OP_CMD_MASK 0x80
#define VDDCTRL_OP_CMD_SHIFT 7
#define VDDCTRL_OP_SEL_MASK 0x7F
@@ -895,11 +890,6 @@ struct tps65910 {
struct regmap *regmap;
unsigned long id;
- /* Client devices */
- struct tps65910_pmic *pmic;
- struct tps65910_rtc *rtc;
- struct tps65910_power *power;
-
/* Device node parsed board data */
struct tps65910_board *of_plat_data;
@@ -918,39 +908,4 @@ static inline int tps65910_chip_id(struct tps65910 *tps65910)
return tps65910->id;
}
-static inline int tps65910_reg_read(struct tps65910 *tps65910, u8 reg,
- unsigned int *val)
-{
- return regmap_read(tps65910->regmap, reg, val);
-}
-
-static inline int tps65910_reg_write(struct tps65910 *tps65910, u8 reg,
- unsigned int val)
-{
- return regmap_write(tps65910->regmap, reg, val);
-}
-
-static inline int tps65910_reg_set_bits(struct tps65910 *tps65910, u8 reg,
- u8 mask)
-{
- return regmap_update_bits(tps65910->regmap, reg, mask, mask);
-}
-
-static inline int tps65910_reg_clear_bits(struct tps65910 *tps65910, u8 reg,
- u8 mask)
-{
- return regmap_update_bits(tps65910->regmap, reg, mask, 0);
-}
-
-static inline int tps65910_reg_update_bits(struct tps65910 *tps65910, u8 reg,
- u8 mask, u8 val)
-{
- return regmap_update_bits(tps65910->regmap, reg, mask, val);
-}
-
-static inline int tps65910_irq_get_virq(struct tps65910 *tps65910, int irq)
-{
- return regmap_irq_get_virq(tps65910->irq_data, irq);
-}
-
#endif /* __LINUX_MFD_TPS65910_H */
diff --git a/include/linux/mfd/tps65912.h b/include/linux/mfd/tps65912.h
index b25d0297ba88..e5373c302722 100644
--- a/include/linux/mfd/tps65912.h
+++ b/include/linux/mfd/tps65912.h
@@ -1,16 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
- * Copyright (C) 2015 Texas Instruments Incorporated - http://www.ti.com/
+ * Copyright (C) 2015 Texas Instruments Incorporated - https://www.ti.com/
* Andrew F. Davis <afd@ti.com>
*
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether expressed or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License version 2 for more details.
- *
* Based on the TPS65218 driver and the previous TPS65912 driver by
* Margarita Olaya Cabrera <magi@slimlogic.co.uk>
*/
@@ -322,6 +314,5 @@ struct tps65912 {
extern const struct regmap_config tps65912_regmap_config;
int tps65912_device_init(struct tps65912 *tps);
-int tps65912_device_exit(struct tps65912 *tps);
#endif /* __LINUX_MFD_TPS65912_H */
diff --git a/include/linux/mfd/tps6594.h b/include/linux/mfd/tps6594.h
new file mode 100644
index 000000000000..021db8875963
--- /dev/null
+++ b/include/linux/mfd/tps6594.h
@@ -0,0 +1,1346 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Functions to access TPS6594 Power Management IC
+ *
+ * Copyright (C) 2023 BayLibre Incorporated - https://www.baylibre.com/
+ */
+
+#ifndef __LINUX_MFD_TPS6594_H
+#define __LINUX_MFD_TPS6594_H
+
+#include <linux/device.h>
+#include <linux/regmap.h>
+
+struct regmap_irq_chip_data;
+
+/* Chip id list */
+enum pmic_id {
+ TPS6594,
+ TPS6593,
+ LP8764,
+ TPS65224,
+ TPS652G1,
+};
+
+/* Macro to get page index from register address */
+#define TPS6594_REG_TO_PAGE(reg) ((reg) >> 8)
+
+/* Registers for page 0 */
+#define TPS6594_REG_DEV_REV 0x01
+
+#define TPS6594_REG_NVM_CODE_1 0x02
+#define TPS6594_REG_NVM_CODE_2 0x03
+
+#define TPS6594_REG_BUCKX_CTRL(buck_inst) (0x04 + ((buck_inst) << 1))
+#define TPS6594_REG_BUCKX_CONF(buck_inst) (0x05 + ((buck_inst) << 1))
+#define TPS6594_REG_BUCKX_VOUT_1(buck_inst) (0x0e + ((buck_inst) << 1))
+#define TPS6594_REG_BUCKX_VOUT_2(buck_inst) (0x0f + ((buck_inst) << 1))
+#define TPS6594_REG_BUCKX_PG_WINDOW(buck_inst) (0x18 + (buck_inst))
+
+#define TPS6594_REG_LDOX_CTRL(ldo_inst) (0x1d + (ldo_inst))
+#define TPS6594_REG_LDORTC_CTRL 0x22
+#define TPS6594_REG_LDOX_VOUT(ldo_inst) (0x23 + (ldo_inst))
+#define TPS6594_REG_LDOX_PG_WINDOW(ldo_inst) (0x27 + (ldo_inst))
+
+#define TPS6594_REG_VCCA_VMON_CTRL 0x2b
+#define TPS6594_REG_VCCA_PG_WINDOW 0x2c
+#define TPS6594_REG_VMON1_PG_WINDOW 0x2d
+#define TPS6594_REG_VMON1_PG_LEVEL 0x2e
+#define TPS6594_REG_VMON2_PG_WINDOW 0x2f
+#define TPS6594_REG_VMON2_PG_LEVEL 0x30
+
+#define TPS6594_REG_GPIOX_CONF(gpio_inst) (0x31 + (gpio_inst))
+#define TPS6594_REG_NPWRON_CONF 0x3c
+#define TPS6594_REG_GPIO_OUT_1 0x3d
+#define TPS6594_REG_GPIO_OUT_2 0x3e
+#define TPS6594_REG_GPIO_IN_1 0x3f
+#define TPS6594_REG_GPIO_IN_2 0x40
+#define TPS6594_REG_GPIOX_OUT(gpio_inst) (TPS6594_REG_GPIO_OUT_1 + (gpio_inst) / 8)
+#define TPS6594_REG_GPIOX_IN(gpio_inst) (TPS6594_REG_GPIO_IN_1 + (gpio_inst) / 8)
+
+#define TPS6594_REG_RAIL_SEL_1 0x41
+#define TPS6594_REG_RAIL_SEL_2 0x42
+#define TPS6594_REG_RAIL_SEL_3 0x43
+
+#define TPS6594_REG_FSM_TRIG_SEL_1 0x44
+#define TPS6594_REG_FSM_TRIG_SEL_2 0x45
+#define TPS6594_REG_FSM_TRIG_MASK_1 0x46
+#define TPS6594_REG_FSM_TRIG_MASK_2 0x47
+#define TPS6594_REG_FSM_TRIG_MASK_3 0x48
+
+#define TPS6594_REG_MASK_BUCK1_2 0x49
+#define TPS65224_REG_MASK_BUCKS 0x49
+#define TPS6594_REG_MASK_BUCK3_4 0x4a
+#define TPS6594_REG_MASK_BUCK5 0x4b
+#define TPS6594_REG_MASK_LDO1_2 0x4c
+#define TPS65224_REG_MASK_LDOS 0x4c
+#define TPS6594_REG_MASK_LDO3_4 0x4d
+#define TPS6594_REG_MASK_VMON 0x4e
+#define TPS6594_REG_MASK_GPIO_FALL 0x4f
+#define TPS6594_REG_MASK_GPIO_RISE 0x50
+#define TPS6594_REG_MASK_GPIO9_11 0x51
+#define TPS6594_REG_MASK_STARTUP 0x52
+#define TPS6594_REG_MASK_MISC 0x53
+#define TPS6594_REG_MASK_MODERATE_ERR 0x54
+#define TPS6594_REG_MASK_FSM_ERR 0x56
+#define TPS6594_REG_MASK_COMM_ERR 0x57
+#define TPS6594_REG_MASK_READBACK_ERR 0x58
+#define TPS6594_REG_MASK_ESM 0x59
+
+#define TPS6594_REG_INT_TOP 0x5a
+#define TPS6594_REG_INT_BUCK 0x5b
+#define TPS6594_REG_INT_BUCK1_2 0x5c
+#define TPS6594_REG_INT_BUCK3_4 0x5d
+#define TPS6594_REG_INT_BUCK5 0x5e
+#define TPS6594_REG_INT_LDO_VMON 0x5f
+#define TPS6594_REG_INT_LDO1_2 0x60
+#define TPS6594_REG_INT_LDO3_4 0x61
+#define TPS6594_REG_INT_VMON 0x62
+#define TPS6594_REG_INT_GPIO 0x63
+#define TPS6594_REG_INT_GPIO1_8 0x64
+#define TPS6594_REG_INT_STARTUP 0x65
+#define TPS6594_REG_INT_MISC 0x66
+#define TPS6594_REG_INT_MODERATE_ERR 0x67
+#define TPS6594_REG_INT_SEVERE_ERR 0x68
+#define TPS6594_REG_INT_FSM_ERR 0x69
+#define TPS6594_REG_INT_COMM_ERR 0x6a
+#define TPS6594_REG_INT_READBACK_ERR 0x6b
+#define TPS6594_REG_INT_ESM 0x6c
+
+#define TPS6594_REG_STAT_BUCK1_2 0x6d
+#define TPS6594_REG_STAT_BUCK3_4 0x6e
+#define TPS6594_REG_STAT_BUCK5 0x6f
+#define TPS6594_REG_STAT_LDO1_2 0x70
+#define TPS6594_REG_STAT_LDO3_4 0x71
+#define TPS6594_REG_STAT_VMON 0x72
+#define TPS6594_REG_STAT_STARTUP 0x73
+#define TPS6594_REG_STAT_MISC 0x74
+#define TPS6594_REG_STAT_MODERATE_ERR 0x75
+#define TPS6594_REG_STAT_SEVERE_ERR 0x76
+#define TPS6594_REG_STAT_READBACK_ERR 0x77
+
+#define TPS6594_REG_PGOOD_SEL_1 0x78
+#define TPS6594_REG_PGOOD_SEL_2 0x79
+#define TPS6594_REG_PGOOD_SEL_3 0x7a
+#define TPS6594_REG_PGOOD_SEL_4 0x7b
+
+#define TPS6594_REG_PLL_CTRL 0x7c
+
+#define TPS6594_REG_CONFIG_1 0x7d
+#define TPS6594_REG_CONFIG_2 0x7e
+
+#define TPS6594_REG_ENABLE_DRV_REG 0x80
+
+#define TPS6594_REG_MISC_CTRL 0x81
+
+#define TPS6594_REG_ENABLE_DRV_STAT 0x82
+
+#define TPS6594_REG_RECOV_CNT_REG_1 0x83
+#define TPS6594_REG_RECOV_CNT_REG_2 0x84
+
+#define TPS6594_REG_FSM_I2C_TRIGGERS 0x85
+#define TPS6594_REG_FSM_NSLEEP_TRIGGERS 0x86
+
+#define TPS6594_REG_BUCK_RESET_REG 0x87
+
+#define TPS6594_REG_SPREAD_SPECTRUM_1 0x88
+
+#define TPS6594_REG_FREQ_SEL 0x8a
+
+#define TPS6594_REG_FSM_STEP_SIZE 0x8b
+
+#define TPS6594_REG_LDO_RV_TIMEOUT_REG_1 0x8c
+#define TPS6594_REG_LDO_RV_TIMEOUT_REG_2 0x8d
+
+#define TPS6594_REG_USER_SPARE_REGS 0x8e
+
+#define TPS6594_REG_ESM_MCU_START_REG 0x8f
+#define TPS6594_REG_ESM_MCU_DELAY1_REG 0x90
+#define TPS6594_REG_ESM_MCU_DELAY2_REG 0x91
+#define TPS6594_REG_ESM_MCU_MODE_CFG 0x92
+#define TPS6594_REG_ESM_MCU_HMAX_REG 0x93
+#define TPS6594_REG_ESM_MCU_HMIN_REG 0x94
+#define TPS6594_REG_ESM_MCU_LMAX_REG 0x95
+#define TPS6594_REG_ESM_MCU_LMIN_REG 0x96
+#define TPS6594_REG_ESM_MCU_ERR_CNT_REG 0x97
+#define TPS6594_REG_ESM_SOC_START_REG 0x98
+#define TPS6594_REG_ESM_SOC_DELAY1_REG 0x99
+#define TPS6594_REG_ESM_SOC_DELAY2_REG 0x9a
+#define TPS6594_REG_ESM_SOC_MODE_CFG 0x9b
+#define TPS6594_REG_ESM_SOC_HMAX_REG 0x9c
+#define TPS6594_REG_ESM_SOC_HMIN_REG 0x9d
+#define TPS6594_REG_ESM_SOC_LMAX_REG 0x9e
+#define TPS6594_REG_ESM_SOC_LMIN_REG 0x9f
+#define TPS6594_REG_ESM_SOC_ERR_CNT_REG 0xa0
+
+#define TPS6594_REG_REGISTER_LOCK 0xa1
+
+#define TPS65224_REG_SRAM_ACCESS_1 0xa2
+#define TPS65224_REG_SRAM_ACCESS_2 0xa3
+#define TPS65224_REG_SRAM_ADDR_CTRL 0xa4
+#define TPS65224_REG_RECOV_CNT_PFSM_INCR 0xa5
+#define TPS6594_REG_MANUFACTURING_VER 0xa6
+
+#define TPS6594_REG_CUSTOMER_NVM_ID_REG 0xa7
+
+#define TPS6594_REG_VMON_CONF_REG 0xa8
+
+#define TPS6594_REG_SOFT_REBOOT_REG 0xab
+
+#define TPS65224_REG_ADC_CTRL 0xac
+#define TPS65224_REG_ADC_RESULT_REG_1 0xad
+#define TPS65224_REG_ADC_RESULT_REG_2 0xae
+#define TPS6594_REG_RTC_SECONDS 0xb5
+#define TPS6594_REG_RTC_MINUTES 0xb6
+#define TPS6594_REG_RTC_HOURS 0xb7
+#define TPS6594_REG_RTC_DAYS 0xb8
+#define TPS6594_REG_RTC_MONTHS 0xb9
+#define TPS6594_REG_RTC_YEARS 0xba
+#define TPS6594_REG_RTC_WEEKS 0xbb
+
+#define TPS6594_REG_ALARM_SECONDS 0xbc
+#define TPS6594_REG_ALARM_MINUTES 0xbd
+#define TPS6594_REG_ALARM_HOURS 0xbe
+#define TPS6594_REG_ALARM_DAYS 0xbf
+#define TPS6594_REG_ALARM_MONTHS 0xc0
+#define TPS6594_REG_ALARM_YEARS 0xc1
+
+#define TPS6594_REG_RTC_CTRL_1 0xc2
+#define TPS6594_REG_RTC_CTRL_2 0xc3
+#define TPS65224_REG_STARTUP_CTRL 0xc3
+#define TPS6594_REG_RTC_STATUS 0xc4
+#define TPS6594_REG_RTC_INTERRUPTS 0xc5
+#define TPS6594_REG_RTC_COMP_LSB 0xc6
+#define TPS6594_REG_RTC_COMP_MSB 0xc7
+#define TPS6594_REG_RTC_RESET_STATUS 0xc8
+
+#define TPS6594_REG_SCRATCH_PAD_REG_1 0xc9
+#define TPS6594_REG_SCRATCH_PAD_REG_2 0xca
+#define TPS6594_REG_SCRATCH_PAD_REG_3 0xcb
+#define TPS6594_REG_SCRATCH_PAD_REG_4 0xcc
+
+#define TPS6594_REG_PFSM_DELAY_REG_1 0xcd
+#define TPS6594_REG_PFSM_DELAY_REG_2 0xce
+#define TPS6594_REG_PFSM_DELAY_REG_3 0xcf
+#define TPS6594_REG_PFSM_DELAY_REG_4 0xd0
+#define TPS65224_REG_ADC_GAIN_COMP_REG 0xd0
+#define TPS65224_REG_CRC_CALC_CONTROL 0xef
+#define TPS65224_REG_REGMAP_USER_CRC_LOW 0xf0
+#define TPS65224_REG_REGMAP_USER_CRC_HIGH 0xf1
+
+/* Registers for page 1 */
+#define TPS6594_REG_SERIAL_IF_CONFIG 0x11a
+#define TPS6594_REG_I2C1_ID 0x122
+#define TPS6594_REG_I2C2_ID 0x123
+
+/* Registers for page 4 */
+#define TPS6594_REG_WD_ANSWER_REG 0x401
+#define TPS6594_REG_WD_QUESTION_ANSW_CNT 0x402
+#define TPS6594_REG_WD_WIN1_CFG 0x403
+#define TPS6594_REG_WD_WIN2_CFG 0x404
+#define TPS6594_REG_WD_LONGWIN_CFG 0x405
+#define TPS6594_REG_WD_MODE_REG 0x406
+#define TPS6594_REG_WD_QA_CFG 0x407
+#define TPS6594_REG_WD_ERR_STATUS 0x408
+#define TPS6594_REG_WD_THR_CFG 0x409
+#define TPS6594_REG_DWD_FAIL_CNT_REG 0x40a
+
+/* BUCKX_CTRL register field definition */
+#define TPS6594_BIT_BUCK_EN BIT(0)
+#define TPS6594_BIT_BUCK_FPWM BIT(1)
+#define TPS6594_BIT_BUCK_FPWM_MP BIT(2)
+#define TPS6594_BIT_BUCK_VSEL BIT(3)
+#define TPS6594_BIT_BUCK_VMON_EN BIT(4)
+#define TPS6594_BIT_BUCK_PLDN BIT(5)
+#define TPS6594_BIT_BUCK_RV_SEL BIT(7)
+
+/* TPS6594 BUCKX_CONF register field definition */
+#define TPS6594_MASK_BUCK_SLEW_RATE GENMASK(2, 0)
+#define TPS6594_MASK_BUCK_ILIM GENMASK(5, 3)
+
+/* TPS65224 BUCKX_CONF register field definition */
+#define TPS65224_MASK_BUCK_SLEW_RATE GENMASK(1, 0)
+
+/* TPS6594 BUCKX_PG_WINDOW register field definition */
+#define TPS6594_MASK_BUCK_OV_THR GENMASK(2, 0)
+#define TPS6594_MASK_BUCK_UV_THR GENMASK(5, 3)
+
+/* TPS65224 BUCKX_PG_WINDOW register field definition */
+#define TPS65224_MASK_BUCK_VMON_THR GENMASK(1, 0)
+
+/* TPS6594 BUCKX_VOUT register field definition */
+#define TPS6594_MASK_BUCKS_VSET GENMASK(7, 0)
+
+/* TPS65224 BUCKX_VOUT register field definition */
+#define TPS65224_MASK_BUCK1_VSET GENMASK(7, 0)
+#define TPS65224_MASK_BUCKS_VSET GENMASK(6, 0)
+
+/* LDOX_CTRL register field definition */
+#define TPS6594_BIT_LDO_EN BIT(0)
+#define TPS6594_BIT_LDO_SLOW_RAMP BIT(1)
+#define TPS6594_BIT_LDO_VMON_EN BIT(4)
+#define TPS6594_MASK_LDO_PLDN GENMASK(6, 5)
+#define TPS6594_BIT_LDO_RV_SEL BIT(7)
+#define TPS65224_BIT_LDO_DISCHARGE_EN BIT(5)
+
+/* LDORTC_CTRL register field definition */
+#define TPS6594_BIT_LDORTC_DIS BIT(0)
+
+/* LDOX_VOUT register field definition */
+#define TPS6594_MASK_LDO123_VSET GENMASK(6, 1)
+#define TPS6594_MASK_LDO4_VSET GENMASK(6, 0)
+#define TPS6594_BIT_LDO_BYPASS BIT(7)
+
+/* LDOX_PG_WINDOW register field definition */
+#define TPS6594_MASK_LDO_OV_THR GENMASK(2, 0)
+#define TPS6594_MASK_LDO_UV_THR GENMASK(5, 3)
+
+/* LDOX_PG_WINDOW register field definition */
+#define TPS65224_MASK_LDO_VMON_THR GENMASK(1, 0)
+
+/* VCCA_VMON_CTRL register field definition */
+#define TPS6594_BIT_VMON_EN BIT(0)
+#define TPS6594_BIT_VMON1_EN BIT(1)
+#define TPS6594_BIT_VMON1_RV_SEL BIT(2)
+#define TPS6594_BIT_VMON2_EN BIT(3)
+#define TPS6594_BIT_VMON2_RV_SEL BIT(4)
+#define TPS6594_BIT_VMON_DEGLITCH_SEL BIT(5)
+#define TPS65224_BIT_VMON_DEGLITCH_SEL GENMASK(7, 5)
+
+/* VCCA_PG_WINDOW register field definition */
+#define TPS6594_MASK_VCCA_OV_THR GENMASK(2, 0)
+#define TPS6594_MASK_VCCA_UV_THR GENMASK(5, 3)
+#define TPS65224_MASK_VCCA_VMON_THR GENMASK(1, 0)
+#define TPS6594_BIT_VCCA_PG_SET BIT(6)
+
+/* VMONX_PG_WINDOW register field definition */
+#define TPS6594_MASK_VMONX_OV_THR GENMASK(2, 0)
+#define TPS6594_MASK_VMONX_UV_THR GENMASK(5, 3)
+#define TPS6594_BIT_VMONX_RANGE BIT(6)
+
+/* VMONX_PG_WINDOW register field definition */
+#define TPS65224_MASK_VMONX_THR GENMASK(1, 0)
+
+/* GPIOX_CONF register field definition */
+#define TPS6594_BIT_GPIO_DIR BIT(0)
+#define TPS6594_BIT_GPIO_OD BIT(1)
+#define TPS6594_BIT_GPIO_PU_SEL BIT(2)
+#define TPS6594_BIT_GPIO_PU_PD_EN BIT(3)
+#define TPS6594_BIT_GPIO_DEGLITCH_EN BIT(4)
+#define TPS6594_MASK_GPIO_SEL GENMASK(7, 5)
+#define TPS65224_MASK_GPIO_SEL GENMASK(6, 5)
+#define TPS65224_MASK_GPIO_SEL_GPIO6 GENMASK(7, 5)
+
+/* NPWRON_CONF register field definition */
+#define TPS6594_BIT_NRSTOUT_OD BIT(0)
+#define TPS6594_BIT_ENABLE_PU_SEL BIT(2)
+#define TPS6594_BIT_ENABLE_PU_PD_EN BIT(3)
+#define TPS6594_BIT_ENABLE_DEGLITCH_EN BIT(4)
+#define TPS6594_BIT_ENABLE_POL BIT(5)
+#define TPS6594_MASK_NPWRON_SEL GENMASK(7, 6)
+
+/* POWER_ON_CONFIG register field definition */
+#define TPS65224_BIT_NINT_ENDRV_PU_SEL BIT(0)
+#define TPS65224_BIT_NINT_ENDRV_SEL BIT(1)
+#define TPS65224_BIT_EN_PB_DEGL BIT(5)
+#define TPS65224_MASK_EN_PB_VSENSE_CONFIG GENMASK(7, 6)
+
+/* GPIO_OUT_X register field definition */
+#define TPS6594_BIT_GPIOX_OUT(gpio_inst) BIT((gpio_inst) % 8)
+
+/* GPIO_IN_X register field definition */
+#define TPS6594_BIT_GPIOX_IN(gpio_inst) BIT((gpio_inst) % 8)
+#define TPS6594_BIT_NPWRON_IN BIT(3)
+
+/* GPIO_OUT_X register field definition */
+#define TPS65224_BIT_GPIOX_OUT(gpio_inst) BIT((gpio_inst))
+
+/* GPIO_IN_X register field definition */
+#define TPS65224_BIT_GPIOX_IN(gpio_inst) BIT((gpio_inst))
+
+/* RAIL_SEL_1 register field definition */
+#define TPS6594_MASK_BUCK1_GRP_SEL GENMASK(1, 0)
+#define TPS6594_MASK_BUCK2_GRP_SEL GENMASK(3, 2)
+#define TPS6594_MASK_BUCK3_GRP_SEL GENMASK(5, 4)
+#define TPS6594_MASK_BUCK4_GRP_SEL GENMASK(7, 6)
+
+/* RAIL_SEL_2 register field definition */
+#define TPS6594_MASK_BUCK5_GRP_SEL GENMASK(1, 0)
+#define TPS6594_MASK_LDO1_GRP_SEL GENMASK(3, 2)
+#define TPS6594_MASK_LDO2_GRP_SEL GENMASK(5, 4)
+#define TPS6594_MASK_LDO3_GRP_SEL GENMASK(7, 6)
+
+/* RAIL_SEL_3 register field definition */
+#define TPS6594_MASK_LDO4_GRP_SEL GENMASK(1, 0)
+#define TPS6594_MASK_VCCA_GRP_SEL GENMASK(3, 2)
+#define TPS6594_MASK_VMON1_GRP_SEL GENMASK(5, 4)
+#define TPS6594_MASK_VMON2_GRP_SEL GENMASK(7, 6)
+
+/* FSM_TRIG_SEL_1 register field definition */
+#define TPS6594_MASK_MCU_RAIL_TRIG GENMASK(1, 0)
+#define TPS6594_MASK_SOC_RAIL_TRIG GENMASK(3, 2)
+#define TPS6594_MASK_OTHER_RAIL_TRIG GENMASK(5, 4)
+#define TPS6594_MASK_SEVERE_ERR_TRIG GENMASK(7, 6)
+
+/* FSM_TRIG_SEL_2 register field definition */
+#define TPS6594_MASK_MODERATE_ERR_TRIG GENMASK(1, 0)
+
+/* FSM_TRIG_MASK_X register field definition */
+#define TPS6594_BIT_GPIOX_FSM_MASK(gpio_inst) BIT(((gpio_inst) << 1) % 8)
+#define TPS6594_BIT_GPIOX_FSM_MASK_POL(gpio_inst) BIT(((gpio_inst) << 1) % 8 + 1)
+
+#define TPS65224_BIT_GPIOX_FSM_MASK(gpio_inst) BIT(((gpio_inst) << 1) % 6)
+#define TPS65224_BIT_GPIOX_FSM_MASK_POL(gpio_inst) BIT(((gpio_inst) << 1) % 6 + 1)
+
+/* MASK_BUCKX register field definition */
+#define TPS6594_BIT_BUCKX_OV_MASK(buck_inst) BIT(((buck_inst) << 2) % 8)
+#define TPS6594_BIT_BUCKX_UV_MASK(buck_inst) BIT(((buck_inst) << 2) % 8 + 1)
+#define TPS6594_BIT_BUCKX_ILIM_MASK(buck_inst) BIT(((buck_inst) << 2) % 8 + 3)
+
+/* MASK_LDOX register field definition */
+#define TPS6594_BIT_LDOX_OV_MASK(ldo_inst) BIT(((ldo_inst) << 2) % 8)
+#define TPS6594_BIT_LDOX_UV_MASK(ldo_inst) BIT(((ldo_inst) << 2) % 8 + 1)
+#define TPS6594_BIT_LDOX_ILIM_MASK(ldo_inst) BIT(((ldo_inst) << 2) % 8 + 3)
+
+/* MASK_VMON register field definition */
+#define TPS6594_BIT_VCCA_OV_MASK BIT(0)
+#define TPS6594_BIT_VCCA_UV_MASK BIT(1)
+#define TPS6594_BIT_VMON1_OV_MASK BIT(2)
+#define TPS6594_BIT_VMON1_UV_MASK BIT(3)
+#define TPS6594_BIT_VMON2_OV_MASK BIT(5)
+#define TPS6594_BIT_VMON2_UV_MASK BIT(6)
+
+/* MASK_BUCK Register field definition */
+#define TPS65224_BIT_BUCK1_UVOV_MASK BIT(0)
+#define TPS65224_BIT_BUCK2_UVOV_MASK BIT(1)
+#define TPS65224_BIT_BUCK3_UVOV_MASK BIT(2)
+#define TPS65224_BIT_BUCK4_UVOV_MASK BIT(4)
+
+/* MASK_LDO_VMON register field definition */
+#define TPS65224_BIT_LDO1_UVOV_MASK BIT(0)
+#define TPS65224_BIT_LDO2_UVOV_MASK BIT(1)
+#define TPS65224_BIT_LDO3_UVOV_MASK BIT(2)
+#define TPS65224_BIT_VCCA_UVOV_MASK BIT(4)
+#define TPS65224_BIT_VMON1_UVOV_MASK BIT(5)
+#define TPS65224_BIT_VMON2_UVOV_MASK BIT(6)
+
+/* MASK_GPIOX register field definition */
+#define TPS6594_BIT_GPIOX_FALL_MASK(gpio_inst) BIT((gpio_inst) < 8 ? \
+ (gpio_inst) : (gpio_inst) % 8)
+#define TPS6594_BIT_GPIOX_RISE_MASK(gpio_inst) BIT((gpio_inst) < 8 ? \
+ (gpio_inst) : (gpio_inst) % 8 + 3)
+/* MASK_GPIOX register field definition */
+#define TPS65224_BIT_GPIOX_FALL_MASK(gpio_inst) BIT((gpio_inst))
+#define TPS65224_BIT_GPIOX_RISE_MASK(gpio_inst) BIT((gpio_inst))
+
+/* MASK_STARTUP register field definition */
+#define TPS6594_BIT_NPWRON_START_MASK BIT(0)
+#define TPS6594_BIT_ENABLE_MASK BIT(1)
+#define TPS6594_BIT_FSD_MASK BIT(4)
+#define TPS6594_BIT_SOFT_REBOOT_MASK BIT(5)
+#define TPS65224_BIT_VSENSE_MASK BIT(0)
+#define TPS65224_BIT_PB_SHORT_MASK BIT(2)
+
+/* MASK_MISC register field definition */
+#define TPS6594_BIT_BIST_PASS_MASK BIT(0)
+#define TPS6594_BIT_EXT_CLK_MASK BIT(1)
+#define TPS65224_BIT_REG_UNLOCK_MASK BIT(2)
+#define TPS6594_BIT_TWARN_MASK BIT(3)
+#define TPS65224_BIT_PB_LONG_MASK BIT(4)
+#define TPS65224_BIT_PB_FALL_MASK BIT(5)
+#define TPS65224_BIT_PB_RISE_MASK BIT(6)
+#define TPS65224_BIT_ADC_CONV_READY_MASK BIT(7)
+
+/* MASK_MODERATE_ERR register field definition */
+#define TPS6594_BIT_BIST_FAIL_MASK BIT(1)
+#define TPS6594_BIT_REG_CRC_ERR_MASK BIT(2)
+#define TPS6594_BIT_SPMI_ERR_MASK BIT(4)
+#define TPS6594_BIT_NPWRON_LONG_MASK BIT(5)
+#define TPS6594_BIT_NINT_READBACK_MASK BIT(6)
+#define TPS6594_BIT_NRSTOUT_READBACK_MASK BIT(7)
+
+/* MASK_FSM_ERR register field definition */
+#define TPS6594_BIT_IMM_SHUTDOWN_MASK BIT(0)
+#define TPS6594_BIT_ORD_SHUTDOWN_MASK BIT(1)
+#define TPS6594_BIT_MCU_PWR_ERR_MASK BIT(2)
+#define TPS6594_BIT_SOC_PWR_ERR_MASK BIT(3)
+#define TPS65224_BIT_COMM_ERR_MASK BIT(4)
+#define TPS65224_BIT_I2C2_ERR_MASK BIT(5)
+
+/* MASK_COMM_ERR register field definition */
+#define TPS6594_BIT_COMM_FRM_ERR_MASK BIT(0)
+#define TPS6594_BIT_COMM_CRC_ERR_MASK BIT(1)
+#define TPS6594_BIT_COMM_ADR_ERR_MASK BIT(3)
+#define TPS6594_BIT_I2C2_CRC_ERR_MASK BIT(5)
+#define TPS6594_BIT_I2C2_ADR_ERR_MASK BIT(7)
+
+/* MASK_READBACK_ERR register field definition */
+#define TPS6594_BIT_EN_DRV_READBACK_MASK BIT(0)
+#define TPS6594_BIT_NRSTOUT_SOC_READBACK_MASK BIT(3)
+
+/* MASK_ESM register field definition */
+#define TPS6594_BIT_ESM_SOC_PIN_MASK BIT(0)
+#define TPS6594_BIT_ESM_SOC_FAIL_MASK BIT(1)
+#define TPS6594_BIT_ESM_SOC_RST_MASK BIT(2)
+#define TPS6594_BIT_ESM_MCU_PIN_MASK BIT(3)
+#define TPS6594_BIT_ESM_MCU_FAIL_MASK BIT(4)
+#define TPS6594_BIT_ESM_MCU_RST_MASK BIT(5)
+
+/* INT_TOP register field definition */
+#define TPS6594_BIT_BUCK_INT BIT(0)
+#define TPS6594_BIT_LDO_VMON_INT BIT(1)
+#define TPS6594_BIT_GPIO_INT BIT(2)
+#define TPS6594_BIT_STARTUP_INT BIT(3)
+#define TPS6594_BIT_MISC_INT BIT(4)
+#define TPS6594_BIT_MODERATE_ERR_INT BIT(5)
+#define TPS6594_BIT_SEVERE_ERR_INT BIT(6)
+#define TPS6594_BIT_FSM_ERR_INT BIT(7)
+
+/* INT_BUCK register field definition */
+#define TPS6594_BIT_BUCK1_2_INT BIT(0)
+#define TPS6594_BIT_BUCK3_4_INT BIT(1)
+#define TPS6594_BIT_BUCK5_INT BIT(2)
+
+/* INT_BUCK register field definition */
+#define TPS65224_BIT_BUCK1_UVOV_INT BIT(0)
+#define TPS65224_BIT_BUCK2_UVOV_INT BIT(1)
+#define TPS65224_BIT_BUCK3_UVOV_INT BIT(2)
+#define TPS65224_BIT_BUCK4_UVOV_INT BIT(3)
+
+/* INT_BUCKX register field definition */
+#define TPS6594_BIT_BUCKX_OV_INT(buck_inst) BIT(((buck_inst) << 2) % 8)
+#define TPS6594_BIT_BUCKX_UV_INT(buck_inst) BIT(((buck_inst) << 2) % 8 + 1)
+#define TPS6594_BIT_BUCKX_SC_INT(buck_inst) BIT(((buck_inst) << 2) % 8 + 2)
+#define TPS6594_BIT_BUCKX_ILIM_INT(buck_inst) BIT(((buck_inst) << 2) % 8 + 3)
+
+/* INT_LDO_VMON register field definition */
+#define TPS6594_BIT_LDO1_2_INT BIT(0)
+#define TPS6594_BIT_LDO3_4_INT BIT(1)
+#define TPS6594_BIT_VCCA_INT BIT(4)
+
+/* INT_LDO_VMON register field definition */
+#define TPS65224_BIT_LDO1_UVOV_INT BIT(0)
+#define TPS65224_BIT_LDO2_UVOV_INT BIT(1)
+#define TPS65224_BIT_LDO3_UVOV_INT BIT(2)
+#define TPS65224_BIT_VCCA_UVOV_INT BIT(4)
+#define TPS65224_BIT_VMON1_UVOV_INT BIT(5)
+#define TPS65224_BIT_VMON2_UVOV_INT BIT(6)
+
+/* INT_LDOX register field definition */
+#define TPS6594_BIT_LDOX_OV_INT(ldo_inst) BIT(((ldo_inst) << 2) % 8)
+#define TPS6594_BIT_LDOX_UV_INT(ldo_inst) BIT(((ldo_inst) << 2) % 8 + 1)
+#define TPS6594_BIT_LDOX_SC_INT(ldo_inst) BIT(((ldo_inst) << 2) % 8 + 2)
+#define TPS6594_BIT_LDOX_ILIM_INT(ldo_inst) BIT(((ldo_inst) << 2) % 8 + 3)
+
+/* INT_VMON register field definition */
+#define TPS6594_BIT_VCCA_OV_INT BIT(0)
+#define TPS6594_BIT_VCCA_UV_INT BIT(1)
+#define TPS6594_BIT_VMON1_OV_INT BIT(2)
+#define TPS6594_BIT_VMON1_UV_INT BIT(3)
+#define TPS6594_BIT_VMON1_RV_INT BIT(4)
+#define TPS6594_BIT_VMON2_OV_INT BIT(5)
+#define TPS6594_BIT_VMON2_UV_INT BIT(6)
+#define TPS6594_BIT_VMON2_RV_INT BIT(7)
+
+/* INT_GPIO register field definition */
+#define TPS6594_BIT_GPIO9_INT BIT(0)
+#define TPS6594_BIT_GPIO10_INT BIT(1)
+#define TPS6594_BIT_GPIO11_INT BIT(2)
+#define TPS6594_BIT_GPIO1_8_INT BIT(3)
+
+/* INT_GPIOX register field definition */
+#define TPS6594_BIT_GPIOX_INT(gpio_inst) BIT(gpio_inst)
+
+/* INT_GPIO register field definition */
+#define TPS65224_BIT_GPIO1_INT BIT(0)
+#define TPS65224_BIT_GPIO2_INT BIT(1)
+#define TPS65224_BIT_GPIO3_INT BIT(2)
+#define TPS65224_BIT_GPIO4_INT BIT(3)
+#define TPS65224_BIT_GPIO5_INT BIT(4)
+#define TPS65224_BIT_GPIO6_INT BIT(5)
+
+/* INT_STARTUP register field definition */
+#define TPS6594_BIT_NPWRON_START_INT BIT(0)
+#define TPS65224_BIT_VSENSE_INT BIT(0)
+#define TPS6594_BIT_ENABLE_INT BIT(1)
+#define TPS6594_BIT_RTC_INT BIT(2)
+#define TPS65224_BIT_PB_SHORT_INT BIT(2)
+#define TPS6594_BIT_FSD_INT BIT(4)
+#define TPS6594_BIT_SOFT_REBOOT_INT BIT(5)
+
+/* INT_MISC register field definition */
+#define TPS6594_BIT_BIST_PASS_INT BIT(0)
+#define TPS6594_BIT_EXT_CLK_INT BIT(1)
+#define TPS65224_BIT_REG_UNLOCK_INT BIT(2)
+#define TPS6594_BIT_TWARN_INT BIT(3)
+#define TPS65224_BIT_PB_LONG_INT BIT(4)
+#define TPS65224_BIT_PB_FALL_INT BIT(5)
+#define TPS65224_BIT_PB_RISE_INT BIT(6)
+#define TPS65224_BIT_ADC_CONV_READY_INT BIT(7)
+
+/* INT_MODERATE_ERR register field definition */
+#define TPS6594_BIT_TSD_ORD_INT BIT(0)
+#define TPS6594_BIT_BIST_FAIL_INT BIT(1)
+#define TPS6594_BIT_REG_CRC_ERR_INT BIT(2)
+#define TPS6594_BIT_RECOV_CNT_INT BIT(3)
+#define TPS6594_BIT_SPMI_ERR_INT BIT(4)
+#define TPS6594_BIT_NPWRON_LONG_INT BIT(5)
+#define TPS6594_BIT_NINT_READBACK_INT BIT(6)
+#define TPS6594_BIT_NRSTOUT_READBACK_INT BIT(7)
+
+/* INT_SEVERE_ERR register field definition */
+#define TPS6594_BIT_TSD_IMM_INT BIT(0)
+#define TPS6594_BIT_VCCA_OVP_INT BIT(1)
+#define TPS6594_BIT_PFSM_ERR_INT BIT(2)
+#define TPS65224_BIT_BG_XMON_INT BIT(3)
+
+/* INT_FSM_ERR register field definition */
+#define TPS6594_BIT_IMM_SHUTDOWN_INT BIT(0)
+#define TPS6594_BIT_ORD_SHUTDOWN_INT BIT(1)
+#define TPS6594_BIT_MCU_PWR_ERR_INT BIT(2)
+#define TPS6594_BIT_SOC_PWR_ERR_INT BIT(3)
+#define TPS6594_BIT_COMM_ERR_INT BIT(4)
+#define TPS6594_BIT_READBACK_ERR_INT BIT(5)
+#define TPS65224_BIT_I2C2_ERR_INT BIT(5)
+#define TPS6594_BIT_ESM_INT BIT(6)
+#define TPS6594_BIT_WD_INT BIT(7)
+
+/* INT_COMM_ERR register field definition */
+#define TPS6594_BIT_COMM_FRM_ERR_INT BIT(0)
+#define TPS6594_BIT_COMM_CRC_ERR_INT BIT(1)
+#define TPS6594_BIT_COMM_ADR_ERR_INT BIT(3)
+#define TPS6594_BIT_I2C2_CRC_ERR_INT BIT(5)
+#define TPS6594_BIT_I2C2_ADR_ERR_INT BIT(7)
+
+/* INT_READBACK_ERR register field definition */
+#define TPS6594_BIT_EN_DRV_READBACK_INT BIT(0)
+#define TPS6594_BIT_NRSTOUT_SOC_READBACK_INT BIT(3)
+
+/* INT_ESM register field definition */
+#define TPS6594_BIT_ESM_SOC_PIN_INT BIT(0)
+#define TPS6594_BIT_ESM_SOC_FAIL_INT BIT(1)
+#define TPS6594_BIT_ESM_SOC_RST_INT BIT(2)
+#define TPS6594_BIT_ESM_MCU_PIN_INT BIT(3)
+#define TPS6594_BIT_ESM_MCU_FAIL_INT BIT(4)
+#define TPS6594_BIT_ESM_MCU_RST_INT BIT(5)
+
+/* STAT_BUCKX register field definition */
+#define TPS6594_BIT_BUCKX_OV_STAT(buck_inst) BIT(((buck_inst) << 2) % 8)
+#define TPS6594_BIT_BUCKX_UV_STAT(buck_inst) BIT(((buck_inst) << 2) % 8 + 1)
+#define TPS6594_BIT_BUCKX_ILIM_STAT(buck_inst) BIT(((buck_inst) << 2) % 8 + 3)
+
+/* STAT_LDOX register field definition */
+#define TPS6594_BIT_LDOX_OV_STAT(ldo_inst) BIT(((ldo_inst) << 2) % 8)
+#define TPS6594_BIT_LDOX_UV_STAT(ldo_inst) BIT(((ldo_inst) << 2) % 8 + 1)
+#define TPS6594_BIT_LDOX_ILIM_STAT(ldo_inst) BIT(((ldo_inst) << 2) % 8 + 3)
+
+/* STAT_VMON register field definition */
+#define TPS6594_BIT_VCCA_OV_STAT BIT(0)
+#define TPS6594_BIT_VCCA_UV_STAT BIT(1)
+#define TPS6594_BIT_VMON1_OV_STAT BIT(2)
+#define TPS6594_BIT_VMON1_UV_STAT BIT(3)
+#define TPS6594_BIT_VMON2_OV_STAT BIT(5)
+#define TPS6594_BIT_VMON2_UV_STAT BIT(6)
+
+/* STAT_LDO_VMON register field definition */
+#define TPS65224_BIT_LDO1_UVOV_STAT BIT(0)
+#define TPS65224_BIT_LDO2_UVOV_STAT BIT(1)
+#define TPS65224_BIT_LDO3_UVOV_STAT BIT(2)
+#define TPS65224_BIT_VCCA_UVOV_STAT BIT(4)
+#define TPS65224_BIT_VMON1_UVOV_STAT BIT(5)
+#define TPS65224_BIT_VMON2_UVOV_STAT BIT(6)
+
+/* STAT_STARTUP register field definition */
+#define TPS65224_BIT_VSENSE_STAT BIT(0)
+#define TPS6594_BIT_ENABLE_STAT BIT(1)
+#define TPS65224_BIT_PB_LEVEL_STAT BIT(2)
+
+/* STAT_MISC register field definition */
+#define TPS6594_BIT_EXT_CLK_STAT BIT(1)
+#define TPS6594_BIT_TWARN_STAT BIT(3)
+
+/* STAT_MODERATE_ERR register field definition */
+#define TPS6594_BIT_TSD_ORD_STAT BIT(0)
+
+/* STAT_SEVERE_ERR register field definition */
+#define TPS6594_BIT_TSD_IMM_STAT BIT(0)
+#define TPS6594_BIT_VCCA_OVP_STAT BIT(1)
+#define TPS65224_BIT_BG_XMON_STAT BIT(3)
+
+/* STAT_READBACK_ERR register field definition */
+#define TPS6594_BIT_EN_DRV_READBACK_STAT BIT(0)
+#define TPS6594_BIT_NINT_READBACK_STAT BIT(1)
+#define TPS6594_BIT_NRSTOUT_READBACK_STAT BIT(2)
+#define TPS6594_BIT_NRSTOUT_SOC_READBACK_STAT BIT(3)
+
+/* PGOOD_SEL_1 register field definition */
+#define TPS6594_MASK_PGOOD_SEL_BUCK1 GENMASK(1, 0)
+#define TPS6594_MASK_PGOOD_SEL_BUCK2 GENMASK(3, 2)
+#define TPS6594_MASK_PGOOD_SEL_BUCK3 GENMASK(5, 4)
+#define TPS6594_MASK_PGOOD_SEL_BUCK4 GENMASK(7, 6)
+
+/* PGOOD_SEL_2 register field definition */
+#define TPS6594_MASK_PGOOD_SEL_BUCK5 GENMASK(1, 0)
+
+/* PGOOD_SEL_3 register field definition */
+#define TPS6594_MASK_PGOOD_SEL_LDO1 GENMASK(1, 0)
+#define TPS6594_MASK_PGOOD_SEL_LDO2 GENMASK(3, 2)
+#define TPS6594_MASK_PGOOD_SEL_LDO3 GENMASK(5, 4)
+#define TPS6594_MASK_PGOOD_SEL_LDO4 GENMASK(7, 6)
+
+/* PGOOD_SEL_4 register field definition */
+#define TPS6594_BIT_PGOOD_SEL_VCCA BIT(0)
+#define TPS6594_BIT_PGOOD_SEL_VMON1 BIT(1)
+#define TPS6594_BIT_PGOOD_SEL_VMON2 BIT(2)
+#define TPS6594_BIT_PGOOD_SEL_TDIE_WARN BIT(3)
+#define TPS6594_BIT_PGOOD_SEL_NRSTOUT BIT(4)
+#define TPS6594_BIT_PGOOD_SEL_NRSTOUT_SOC BIT(5)
+#define TPS6594_BIT_PGOOD_POL BIT(6)
+#define TPS6594_BIT_PGOOD_WINDOW BIT(7)
+
+/* PLL_CTRL register field definition */
+#define TPS6594_MASK_EXT_CLK_FREQ GENMASK(1, 0)
+
+/* CONFIG_1 register field definition */
+#define TPS6594_BIT_TWARN_LEVEL BIT(0)
+#define TPS6594_BIT_TSD_ORD_LEVEL BIT(1)
+#define TPS6594_BIT_I2C1_HS BIT(3)
+#define TPS6594_BIT_I2C2_HS BIT(4)
+#define TPS6594_BIT_EN_ILIM_FSM_CTRL BIT(5)
+#define TPS6594_BIT_NSLEEP1_MASK BIT(6)
+#define TPS6594_BIT_NSLEEP2_MASK BIT(7)
+
+/* CONFIG_2 register field definition */
+#define TPS6594_BIT_BB_CHARGER_EN BIT(0)
+#define TPS6594_BIT_BB_ICHR BIT(1)
+#define TPS6594_MASK_BB_VEOC GENMASK(3, 2)
+#define TPS65224_BIT_I2C1_SPI_CRC_EN BIT(4)
+#define TPS65224_BIT_I2C2_CRC_EN BIT(5)
+#define TPS6594_BB_EOC_RDY BIT(7)
+
+/* ENABLE_DRV_REG register field definition */
+#define TPS6594_BIT_ENABLE_DRV BIT(0)
+
+/* MISC_CTRL register field definition */
+#define TPS6594_BIT_NRSTOUT BIT(0)
+#define TPS6594_BIT_NRSTOUT_SOC BIT(1)
+#define TPS6594_BIT_LPM_EN BIT(2)
+#define TPS6594_BIT_CLKMON_EN BIT(3)
+#define TPS6594_BIT_AMUXOUT_EN BIT(4)
+#define TPS6594_BIT_SEL_EXT_CLK BIT(5)
+#define TPS6594_MASK_SYNCCLKOUT_FREQ_SEL GENMASK(7, 6)
+
+/* ENABLE_DRV_STAT register field definition */
+#define TPS6594_BIT_EN_DRV_IN BIT(0)
+#define TPS6594_BIT_NRSTOUT_IN BIT(1)
+#define TPS6594_BIT_NRSTOUT_SOC_IN BIT(2)
+#define TPS6594_BIT_FORCE_EN_DRV_LOW BIT(3)
+#define TPS6594_BIT_SPMI_LPM_EN BIT(4)
+#define TPS65224_BIT_TSD_DISABLE BIT(5)
+
+/* RECOV_CNT_REG_1 register field definition */
+#define TPS6594_MASK_RECOV_CNT GENMASK(3, 0)
+
+/* RECOV_CNT_REG_2 register field definition */
+#define TPS6594_MASK_RECOV_CNT_THR GENMASK(3, 0)
+#define TPS6594_BIT_RECOV_CNT_CLR BIT(4)
+
+/* FSM_I2C_TRIGGERS register field definition */
+#define TPS6594_BIT_TRIGGER_I2C(bit) BIT(bit)
+
+/* FSM_NSLEEP_TRIGGERS register field definition */
+#define TPS6594_BIT_NSLEEP1B BIT(0)
+#define TPS6594_BIT_NSLEEP2B BIT(1)
+
+/* BUCK_RESET_REG register field definition */
+#define TPS6594_BIT_BUCKX_RESET(buck_inst) BIT(buck_inst)
+
+/* SPREAD_SPECTRUM_1 register field definition */
+#define TPS6594_MASK_SS_DEPTH GENMASK(1, 0)
+#define TPS6594_BIT_SS_EN BIT(2)
+
+/* FREQ_SEL register field definition */
+#define TPS6594_BIT_BUCKX_FREQ_SEL(buck_inst) BIT(buck_inst)
+
+/* FSM_STEP_SIZE register field definition */
+#define TPS6594_MASK_PFSM_DELAY_STEP GENMASK(4, 0)
+
+/* LDO_RV_TIMEOUT_REG_1 register field definition */
+#define TPS6594_MASK_LDO1_RV_TIMEOUT GENMASK(3, 0)
+#define TPS6594_MASK_LDO2_RV_TIMEOUT GENMASK(7, 4)
+
+/* LDO_RV_TIMEOUT_REG_2 register field definition */
+#define TPS6594_MASK_LDO3_RV_TIMEOUT GENMASK(3, 0)
+#define TPS6594_MASK_LDO4_RV_TIMEOUT GENMASK(7, 4)
+
+/* USER_SPARE_REGS register field definition */
+#define TPS6594_BIT_USER_SPARE(bit) BIT(bit)
+
+/* ESM_MCU_START_REG register field definition */
+#define TPS6594_BIT_ESM_MCU_START BIT(0)
+
+/* ESM_MCU_MODE_CFG register field definition */
+#define TPS6594_MASK_ESM_MCU_ERR_CNT_TH GENMASK(3, 0)
+#define TPS6594_BIT_ESM_MCU_ENDRV BIT(5)
+#define TPS6594_BIT_ESM_MCU_EN BIT(6)
+#define TPS6594_BIT_ESM_MCU_MODE BIT(7)
+
+/* ESM_MCU_ERR_CNT_REG register field definition */
+#define TPS6594_MASK_ESM_MCU_ERR_CNT GENMASK(4, 0)
+
+/* ESM_SOC_START_REG register field definition */
+#define TPS6594_BIT_ESM_SOC_START BIT(0)
+
+/* ESM_MCU_START_REG register field definition */
+#define TPS65224_BIT_ESM_MCU_START BIT(0)
+
+/* ESM_SOC_MODE_CFG register field definition */
+#define TPS6594_MASK_ESM_SOC_ERR_CNT_TH GENMASK(3, 0)
+#define TPS6594_BIT_ESM_SOC_ENDRV BIT(5)
+#define TPS6594_BIT_ESM_SOC_EN BIT(6)
+#define TPS6594_BIT_ESM_SOC_MODE BIT(7)
+
+/* ESM_MCU_MODE_CFG register field definition */
+#define TPS65224_MASK_ESM_MCU_ERR_CNT_TH GENMASK(3, 0)
+#define TPS65224_BIT_ESM_MCU_ENDRV BIT(5)
+#define TPS65224_BIT_ESM_MCU_EN BIT(6)
+#define TPS65224_BIT_ESM_MCU_MODE BIT(7)
+
+/* ESM_SOC_ERR_CNT_REG register field definition */
+#define TPS6594_MASK_ESM_SOC_ERR_CNT GENMASK(4, 0)
+
+/* ESM_MCU_ERR_CNT_REG register field definition */
+#define TPS6594_MASK_ESM_MCU_ERR_CNT GENMASK(4, 0)
+
+/* REGISTER_LOCK register field definition */
+#define TPS6594_BIT_REGISTER_LOCK_STATUS BIT(0)
+
+/* VMON_CONF register field definition */
+#define TPS6594_MASK_VMON1_SLEW_RATE GENMASK(2, 0)
+#define TPS6594_MASK_VMON2_SLEW_RATE GENMASK(5, 3)
+
+/* SRAM_ACCESS_1 Register field definition */
+#define TPS65224_MASk_SRAM_UNLOCK_SEQ GENMASK(7, 0)
+
+/* SRAM_ACCESS_2 Register field definition */
+#define TPS65224_BIT_SRAM_WRITE_MODE BIT(0)
+#define TPS65224_BIT_OTP_PROG_USER BIT(1)
+#define TPS65224_BIT_OTP_PROG_PFSM BIT(2)
+#define TPS65224_BIT_OTP_PROG_STATUS BIT(3)
+#define TPS65224_BIT_SRAM_UNLOCKED BIT(6)
+#define TPS65224_USER_PROG_ALLOWED BIT(7)
+
+/* SRAM_ADDR_CTRL Register field definition */
+#define TPS65224_MASk_SRAM_SEL GENMASK(1, 0)
+
+/* RECOV_CNT_PFSM_INCR Register field definition */
+#define TPS65224_BIT_INCREMENT_RECOV_CNT BIT(0)
+
+/* MANUFACTURING_VER Register field definition */
+#define TPS65224_MASK_SILICON_REV GENMASK(7, 0)
+
+/* CUSTOMER_NVM_ID_REG Register field definition */
+#define TPS65224_MASK_CUSTOMER_NVM_ID GENMASK(7, 0)
+
+/* SOFT_REBOOT_REG register field definition */
+#define TPS6594_BIT_SOFT_REBOOT BIT(0)
+
+/* RTC_SECONDS & ALARM_SECONDS register field definition */
+#define TPS6594_MASK_SECOND_0 GENMASK(3, 0)
+#define TPS6594_MASK_SECOND_1 GENMASK(6, 4)
+
+/* RTC_MINUTES & ALARM_MINUTES register field definition */
+#define TPS6594_MASK_MINUTE_0 GENMASK(3, 0)
+#define TPS6594_MASK_MINUTE_1 GENMASK(6, 4)
+
+/* RTC_HOURS & ALARM_HOURS register field definition */
+#define TPS6594_MASK_HOUR_0 GENMASK(3, 0)
+#define TPS6594_MASK_HOUR_1 GENMASK(5, 4)
+#define TPS6594_BIT_PM_NAM BIT(7)
+
+/* RTC_DAYS & ALARM_DAYS register field definition */
+#define TPS6594_MASK_DAY_0 GENMASK(3, 0)
+#define TPS6594_MASK_DAY_1 GENMASK(5, 4)
+
+/* RTC_MONTHS & ALARM_MONTHS register field definition */
+#define TPS6594_MASK_MONTH_0 GENMASK(3, 0)
+#define TPS6594_BIT_MONTH_1 BIT(4)
+
+/* RTC_YEARS & ALARM_YEARS register field definition */
+#define TPS6594_MASK_YEAR_0 GENMASK(3, 0)
+#define TPS6594_MASK_YEAR_1 GENMASK(7, 4)
+
+/* RTC_WEEKS register field definition */
+#define TPS6594_MASK_WEEK GENMASK(2, 0)
+
+/* RTC_CTRL_1 register field definition */
+#define TPS6594_BIT_STOP_RTC BIT(0)
+#define TPS6594_BIT_ROUND_30S BIT(1)
+#define TPS6594_BIT_AUTO_COMP BIT(2)
+#define TPS6594_BIT_MODE_12_24 BIT(3)
+#define TPS6594_BIT_SET_32_COUNTER BIT(5)
+#define TPS6594_BIT_GET_TIME BIT(6)
+#define TPS6594_BIT_RTC_V_OPT BIT(7)
+
+/* RTC_CTRL_2 register field definition */
+#define TPS6594_BIT_XTAL_EN BIT(0)
+#define TPS6594_MASK_XTAL_SEL GENMASK(2, 1)
+#define TPS6594_BIT_LP_STANDBY_SEL BIT(3)
+#define TPS6594_BIT_FAST_BIST BIT(4)
+#define TPS6594_MASK_STARTUP_DEST GENMASK(6, 5)
+#define TPS6594_BIT_FIRST_STARTUP_DONE BIT(7)
+
+/* RTC_STATUS register field definition */
+#define TPS6594_BIT_RUN BIT(1)
+#define TPS6594_BIT_TIMER BIT(5)
+#define TPS6594_BIT_ALARM BIT(6)
+#define TPS6594_BIT_POWER_UP BIT(7)
+
+/* RTC_INTERRUPTS register field definition */
+#define TPS6594_MASK_EVERY GENMASK(1, 0)
+#define TPS6594_BIT_IT_TIMER BIT(2)
+#define TPS6594_BIT_IT_ALARM BIT(3)
+
+/* RTC_RESET_STATUS register field definition */
+#define TPS6594_BIT_RESET_STATUS_RTC BIT(0)
+
+/* SERIAL_IF_CONFIG register field definition */
+#define TPS6594_BIT_I2C_SPI_SEL BIT(0)
+#define TPS6594_BIT_I2C1_SPI_CRC_EN BIT(1)
+#define TPS6594_BIT_I2C2_CRC_EN BIT(2)
+#define TPS6594_MASK_T_CRC GENMASK(7, 3)
+
+/* ADC_CTRL Register field definition */
+#define TPS65224_BIT_ADC_START BIT(0)
+#define TPS65224_BIT_ADC_CONT_CONV BIT(1)
+#define TPS65224_BIT_ADC_THERMAL_SEL BIT(2)
+#define TPS65224_BIT_ADC_RDIV_EN BIT(3)
+#define TPS65224_BIT_ADC_STATUS BIT(7)
+
+/* ADC_RESULT_REG_1 Register field definition */
+#define TPS65224_MASK_ADC_RESULT_11_4 GENMASK(7, 0)
+
+/* ADC_RESULT_REG_2 Register field definition */
+#define TPS65224_MASK_ADC_RESULT_3_0 GENMASK(7, 4)
+
+/* STARTUP_CTRL Register field definition */
+#define TPS65224_MASK_STARTUP_DEST GENMASK(6, 5)
+#define TPS65224_BIT_FIRST_STARTUP_DONE BIT(7)
+
+/* SCRATCH_PAD_REG_1 Register field definition */
+#define TPS6594_MASK_SCRATCH_PAD_1 GENMASK(7, 0)
+
+/* SCRATCH_PAD_REG_2 Register field definition */
+#define TPS6594_MASK_SCRATCH_PAD_2 GENMASK(7, 0)
+
+/* SCRATCH_PAD_REG_3 Register field definition */
+#define TPS6594_MASK_SCRATCH_PAD_3 GENMASK(7, 0)
+
+/* SCRATCH_PAD_REG_4 Register field definition */
+#define TPS6594_MASK_SCRATCH_PAD_4 GENMASK(7, 0)
+
+/* PFSM_DELAY_REG_1 Register field definition */
+#define TPS6594_MASK_PFSM_DELAY1 GENMASK(7, 0)
+
+/* PFSM_DELAY_REG_2 Register field definition */
+#define TPS6594_MASK_PFSM_DELAY2 GENMASK(7, 0)
+
+/* PFSM_DELAY_REG_3 Register field definition */
+#define TPS6594_MASK_PFSM_DELAY3 GENMASK(7, 0)
+
+/* PFSM_DELAY_REG_4 Register field definition */
+#define TPS6594_MASK_PFSM_DELAY4 GENMASK(7, 0)
+
+/* CRC_CALC_CONTROL Register field definition */
+#define TPS65224_BIT_RUN_CRC_BIST BIT(0)
+#define TPS65224_BIT_RUN_CRC_UPDATE BIT(1)
+
+/* ADC_GAIN_COMP_REG Register field definition */
+#define TPS65224_MASK_ADC_GAIN_COMP GENMASK(7, 0)
+
+/* REGMAP_USER_CRC_LOW Register field definition */
+#define TPS65224_MASK_REGMAP_USER_CRC16_LOW GENMASK(7, 0)
+
+/* REGMAP_USER_CRC_HIGH Register field definition */
+#define TPS65224_MASK_REGMAP_USER_CRC16_HIGH GENMASK(7, 0)
+
+/* WD_ANSWER_REG Register field definition */
+#define TPS6594_MASK_WD_ANSWER GENMASK(7, 0)
+
+/* WD_QUESTION_ANSW_CNT register field definition */
+#define TPS6594_MASK_WD_QUESTION GENMASK(3, 0)
+#define TPS6594_MASK_WD_ANSW_CNT GENMASK(5, 4)
+#define TPS65224_BIT_INT_TOP_STATUS BIT(7)
+
+/* WD WIN1_CFG register field definition */
+#define TPS6594_MASK_WD_WIN1_CFG GENMASK(6, 0)
+
+/* WD WIN2_CFG register field definition */
+#define TPS6594_MASK_WD_WIN2_CFG GENMASK(6, 0)
+
+/* WD LongWin register field definition */
+#define TPS6594_MASK_WD_LONGWIN_CFG GENMASK(7, 0)
+
+/* WD_MODE_REG register field definition */
+#define TPS6594_BIT_WD_RETURN_LONGWIN BIT(0)
+#define TPS6594_BIT_WD_MODE_SELECT BIT(1)
+#define TPS6594_BIT_WD_PWRHOLD BIT(2)
+#define TPS65224_BIT_WD_ENDRV_SEL BIT(6)
+#define TPS65224_BIT_WD_CNT_SEL BIT(7)
+
+/* WD_QA_CFG register field definition */
+#define TPS6594_MASK_WD_QUESTION_SEED GENMASK(3, 0)
+#define TPS6594_MASK_WD_QA_LFSR GENMASK(5, 4)
+#define TPS6594_MASK_WD_QA_FDBK GENMASK(7, 6)
+
+/* WD_ERR_STATUS register field definition */
+#define TPS6594_BIT_WD_LONGWIN_TIMEOUT_INT BIT(0)
+#define TPS6594_BIT_WD_TIMEOUT BIT(1)
+#define TPS6594_BIT_WD_TRIG_EARLY BIT(2)
+#define TPS6594_BIT_WD_ANSW_EARLY BIT(3)
+#define TPS6594_BIT_WD_SEQ_ERR BIT(4)
+#define TPS6594_BIT_WD_ANSW_ERR BIT(5)
+#define TPS6594_BIT_WD_FAIL_INT BIT(6)
+#define TPS6594_BIT_WD_RST_INT BIT(7)
+
+/* WD_THR_CFG register field definition */
+#define TPS6594_MASK_WD_RST_TH GENMASK(2, 0)
+#define TPS6594_MASK_WD_FAIL_TH GENMASK(5, 3)
+#define TPS6594_BIT_WD_EN BIT(6)
+#define TPS6594_BIT_WD_RST_EN BIT(7)
+
+/* WD_FAIL_CNT_REG register field definition */
+#define TPS6594_MASK_WD_FAIL_CNT GENMASK(3, 0)
+#define TPS6594_BIT_WD_FIRST_OK BIT(5)
+#define TPS6594_BIT_WD_BAD_EVENT BIT(6)
+
+/* CRC8 polynomial for I2C & SPI protocols */
+#define TPS6594_CRC8_POLYNOMIAL 0x07
+
+/* IRQs */
+enum tps6594_irqs {
+ /* INT_BUCK1_2 register */
+ TPS6594_IRQ_BUCK1_OV,
+ TPS6594_IRQ_BUCK1_UV,
+ TPS6594_IRQ_BUCK1_SC,
+ TPS6594_IRQ_BUCK1_ILIM,
+ TPS6594_IRQ_BUCK2_OV,
+ TPS6594_IRQ_BUCK2_UV,
+ TPS6594_IRQ_BUCK2_SC,
+ TPS6594_IRQ_BUCK2_ILIM,
+ /* INT_BUCK3_4 register */
+ TPS6594_IRQ_BUCK3_OV,
+ TPS6594_IRQ_BUCK3_UV,
+ TPS6594_IRQ_BUCK3_SC,
+ TPS6594_IRQ_BUCK3_ILIM,
+ TPS6594_IRQ_BUCK4_OV,
+ TPS6594_IRQ_BUCK4_UV,
+ TPS6594_IRQ_BUCK4_SC,
+ TPS6594_IRQ_BUCK4_ILIM,
+ /* INT_BUCK5 register */
+ TPS6594_IRQ_BUCK5_OV,
+ TPS6594_IRQ_BUCK5_UV,
+ TPS6594_IRQ_BUCK5_SC,
+ TPS6594_IRQ_BUCK5_ILIM,
+ /* INT_LDO1_2 register */
+ TPS6594_IRQ_LDO1_OV,
+ TPS6594_IRQ_LDO1_UV,
+ TPS6594_IRQ_LDO1_SC,
+ TPS6594_IRQ_LDO1_ILIM,
+ TPS6594_IRQ_LDO2_OV,
+ TPS6594_IRQ_LDO2_UV,
+ TPS6594_IRQ_LDO2_SC,
+ TPS6594_IRQ_LDO2_ILIM,
+ /* INT_LDO3_4 register */
+ TPS6594_IRQ_LDO3_OV,
+ TPS6594_IRQ_LDO3_UV,
+ TPS6594_IRQ_LDO3_SC,
+ TPS6594_IRQ_LDO3_ILIM,
+ TPS6594_IRQ_LDO4_OV,
+ TPS6594_IRQ_LDO4_UV,
+ TPS6594_IRQ_LDO4_SC,
+ TPS6594_IRQ_LDO4_ILIM,
+ /* INT_VMON register */
+ TPS6594_IRQ_VCCA_OV,
+ TPS6594_IRQ_VCCA_UV,
+ TPS6594_IRQ_VMON1_OV,
+ TPS6594_IRQ_VMON1_UV,
+ TPS6594_IRQ_VMON1_RV,
+ TPS6594_IRQ_VMON2_OV,
+ TPS6594_IRQ_VMON2_UV,
+ TPS6594_IRQ_VMON2_RV,
+ /* INT_GPIO register */
+ TPS6594_IRQ_GPIO9,
+ TPS6594_IRQ_GPIO10,
+ TPS6594_IRQ_GPIO11,
+ /* INT_GPIO1_8 register */
+ TPS6594_IRQ_GPIO1,
+ TPS6594_IRQ_GPIO2,
+ TPS6594_IRQ_GPIO3,
+ TPS6594_IRQ_GPIO4,
+ TPS6594_IRQ_GPIO5,
+ TPS6594_IRQ_GPIO6,
+ TPS6594_IRQ_GPIO7,
+ TPS6594_IRQ_GPIO8,
+ /* INT_STARTUP register */
+ TPS6594_IRQ_NPWRON_START,
+ TPS6594_IRQ_ENABLE,
+ TPS6594_IRQ_FSD,
+ TPS6594_IRQ_SOFT_REBOOT,
+ /* INT_MISC register */
+ TPS6594_IRQ_BIST_PASS,
+ TPS6594_IRQ_EXT_CLK,
+ TPS6594_IRQ_TWARN,
+ /* INT_MODERATE_ERR register */
+ TPS6594_IRQ_TSD_ORD,
+ TPS6594_IRQ_BIST_FAIL,
+ TPS6594_IRQ_REG_CRC_ERR,
+ TPS6594_IRQ_RECOV_CNT,
+ TPS6594_IRQ_SPMI_ERR,
+ TPS6594_IRQ_NPWRON_LONG,
+ TPS6594_IRQ_NINT_READBACK,
+ TPS6594_IRQ_NRSTOUT_READBACK,
+ /* INT_SEVERE_ERR register */
+ TPS6594_IRQ_TSD_IMM,
+ TPS6594_IRQ_VCCA_OVP,
+ TPS6594_IRQ_PFSM_ERR,
+ /* INT_FSM_ERR register */
+ TPS6594_IRQ_IMM_SHUTDOWN,
+ TPS6594_IRQ_ORD_SHUTDOWN,
+ TPS6594_IRQ_MCU_PWR_ERR,
+ TPS6594_IRQ_SOC_PWR_ERR,
+ /* INT_COMM_ERR register */
+ TPS6594_IRQ_COMM_FRM_ERR,
+ TPS6594_IRQ_COMM_CRC_ERR,
+ TPS6594_IRQ_COMM_ADR_ERR,
+ TPS6594_IRQ_I2C2_CRC_ERR,
+ TPS6594_IRQ_I2C2_ADR_ERR,
+ /* INT_READBACK_ERR register */
+ TPS6594_IRQ_EN_DRV_READBACK,
+ TPS6594_IRQ_NRSTOUT_SOC_READBACK,
+ /* INT_ESM register */
+ TPS6594_IRQ_ESM_SOC_PIN,
+ TPS6594_IRQ_ESM_SOC_FAIL,
+ TPS6594_IRQ_ESM_SOC_RST,
+ /* RTC_STATUS register */
+ TPS6594_IRQ_TIMER,
+ TPS6594_IRQ_ALARM,
+ TPS6594_IRQ_POWER_UP,
+};
+
+#define TPS6594_IRQ_NAME_BUCK1_OV "buck1_ov"
+#define TPS6594_IRQ_NAME_BUCK1_UV "buck1_uv"
+#define TPS6594_IRQ_NAME_BUCK1_SC "buck1_sc"
+#define TPS6594_IRQ_NAME_BUCK1_ILIM "buck1_ilim"
+#define TPS6594_IRQ_NAME_BUCK2_OV "buck2_ov"
+#define TPS6594_IRQ_NAME_BUCK2_UV "buck2_uv"
+#define TPS6594_IRQ_NAME_BUCK2_SC "buck2_sc"
+#define TPS6594_IRQ_NAME_BUCK2_ILIM "buck2_ilim"
+#define TPS6594_IRQ_NAME_BUCK3_OV "buck3_ov"
+#define TPS6594_IRQ_NAME_BUCK3_UV "buck3_uv"
+#define TPS6594_IRQ_NAME_BUCK3_SC "buck3_sc"
+#define TPS6594_IRQ_NAME_BUCK3_ILIM "buck3_ilim"
+#define TPS6594_IRQ_NAME_BUCK4_OV "buck4_ov"
+#define TPS6594_IRQ_NAME_BUCK4_UV "buck4_uv"
+#define TPS6594_IRQ_NAME_BUCK4_SC "buck4_sc"
+#define TPS6594_IRQ_NAME_BUCK4_ILIM "buck4_ilim"
+#define TPS6594_IRQ_NAME_BUCK5_OV "buck5_ov"
+#define TPS6594_IRQ_NAME_BUCK5_UV "buck5_uv"
+#define TPS6594_IRQ_NAME_BUCK5_SC "buck5_sc"
+#define TPS6594_IRQ_NAME_BUCK5_ILIM "buck5_ilim"
+#define TPS6594_IRQ_NAME_LDO1_OV "ldo1_ov"
+#define TPS6594_IRQ_NAME_LDO1_UV "ldo1_uv"
+#define TPS6594_IRQ_NAME_LDO1_SC "ldo1_sc"
+#define TPS6594_IRQ_NAME_LDO1_ILIM "ldo1_ilim"
+#define TPS6594_IRQ_NAME_LDO2_OV "ldo2_ov"
+#define TPS6594_IRQ_NAME_LDO2_UV "ldo2_uv"
+#define TPS6594_IRQ_NAME_LDO2_SC "ldo2_sc"
+#define TPS6594_IRQ_NAME_LDO2_ILIM "ldo2_ilim"
+#define TPS6594_IRQ_NAME_LDO3_OV "ldo3_ov"
+#define TPS6594_IRQ_NAME_LDO3_UV "ldo3_uv"
+#define TPS6594_IRQ_NAME_LDO3_SC "ldo3_sc"
+#define TPS6594_IRQ_NAME_LDO3_ILIM "ldo3_ilim"
+#define TPS6594_IRQ_NAME_LDO4_OV "ldo4_ov"
+#define TPS6594_IRQ_NAME_LDO4_UV "ldo4_uv"
+#define TPS6594_IRQ_NAME_LDO4_SC "ldo4_sc"
+#define TPS6594_IRQ_NAME_LDO4_ILIM "ldo4_ilim"
+#define TPS6594_IRQ_NAME_VCCA_OV "vcca_ov"
+#define TPS6594_IRQ_NAME_VCCA_UV "vcca_uv"
+#define TPS6594_IRQ_NAME_VMON1_OV "vmon1_ov"
+#define TPS6594_IRQ_NAME_VMON1_UV "vmon1_uv"
+#define TPS6594_IRQ_NAME_VMON1_RV "vmon1_rv"
+#define TPS6594_IRQ_NAME_VMON2_OV "vmon2_ov"
+#define TPS6594_IRQ_NAME_VMON2_UV "vmon2_uv"
+#define TPS6594_IRQ_NAME_VMON2_RV "vmon2_rv"
+#define TPS6594_IRQ_NAME_GPIO9 "gpio9"
+#define TPS6594_IRQ_NAME_GPIO10 "gpio10"
+#define TPS6594_IRQ_NAME_GPIO11 "gpio11"
+#define TPS6594_IRQ_NAME_GPIO1 "gpio1"
+#define TPS6594_IRQ_NAME_GPIO2 "gpio2"
+#define TPS6594_IRQ_NAME_GPIO3 "gpio3"
+#define TPS6594_IRQ_NAME_GPIO4 "gpio4"
+#define TPS6594_IRQ_NAME_GPIO5 "gpio5"
+#define TPS6594_IRQ_NAME_GPIO6 "gpio6"
+#define TPS6594_IRQ_NAME_GPIO7 "gpio7"
+#define TPS6594_IRQ_NAME_GPIO8 "gpio8"
+#define TPS6594_IRQ_NAME_NPWRON_START "npwron_start"
+#define TPS6594_IRQ_NAME_ENABLE "enable"
+#define TPS6594_IRQ_NAME_FSD "fsd"
+#define TPS6594_IRQ_NAME_SOFT_REBOOT "soft_reboot"
+#define TPS6594_IRQ_NAME_BIST_PASS "bist_pass"
+#define TPS6594_IRQ_NAME_EXT_CLK "ext_clk"
+#define TPS6594_IRQ_NAME_TWARN "twarn"
+#define TPS6594_IRQ_NAME_TSD_ORD "tsd_ord"
+#define TPS6594_IRQ_NAME_BIST_FAIL "bist_fail"
+#define TPS6594_IRQ_NAME_REG_CRC_ERR "reg_crc_err"
+#define TPS6594_IRQ_NAME_RECOV_CNT "recov_cnt"
+#define TPS6594_IRQ_NAME_SPMI_ERR "spmi_err"
+#define TPS6594_IRQ_NAME_NPWRON_LONG "npwron_long"
+#define TPS6594_IRQ_NAME_NINT_READBACK "nint_readback"
+#define TPS6594_IRQ_NAME_NRSTOUT_READBACK "nrstout_readback"
+#define TPS6594_IRQ_NAME_TSD_IMM "tsd_imm"
+#define TPS6594_IRQ_NAME_VCCA_OVP "vcca_ovp"
+#define TPS6594_IRQ_NAME_PFSM_ERR "pfsm_err"
+#define TPS6594_IRQ_NAME_IMM_SHUTDOWN "imm_shutdown"
+#define TPS6594_IRQ_NAME_ORD_SHUTDOWN "ord_shutdown"
+#define TPS6594_IRQ_NAME_MCU_PWR_ERR "mcu_pwr_err"
+#define TPS6594_IRQ_NAME_SOC_PWR_ERR "soc_pwr_err"
+#define TPS6594_IRQ_NAME_COMM_FRM_ERR "comm_frm_err"
+#define TPS6594_IRQ_NAME_COMM_CRC_ERR "comm_crc_err"
+#define TPS6594_IRQ_NAME_COMM_ADR_ERR "comm_adr_err"
+#define TPS6594_IRQ_NAME_EN_DRV_READBACK "en_drv_readback"
+#define TPS6594_IRQ_NAME_NRSTOUT_SOC_READBACK "nrstout_soc_readback"
+#define TPS6594_IRQ_NAME_ESM_SOC_PIN "esm_soc_pin"
+#define TPS6594_IRQ_NAME_ESM_SOC_FAIL "esm_soc_fail"
+#define TPS6594_IRQ_NAME_ESM_SOC_RST "esm_soc_rst"
+#define TPS6594_IRQ_NAME_TIMER "timer"
+#define TPS6594_IRQ_NAME_ALARM "alarm"
+#define TPS6594_IRQ_NAME_POWERUP "powerup"
+
+/* IRQs */
+enum tps65224_irqs {
+ /* INT_BUCK register */
+ TPS65224_IRQ_BUCK1_UVOV,
+ TPS65224_IRQ_BUCK2_UVOV,
+ TPS65224_IRQ_BUCK3_UVOV,
+ TPS65224_IRQ_BUCK4_UVOV,
+ /* INT_LDO_VMON register */
+ TPS65224_IRQ_LDO1_UVOV,
+ TPS65224_IRQ_LDO2_UVOV,
+ TPS65224_IRQ_LDO3_UVOV,
+ TPS65224_IRQ_VCCA_UVOV,
+ TPS65224_IRQ_VMON1_UVOV,
+ TPS65224_IRQ_VMON2_UVOV,
+ /* INT_GPIO register */
+ TPS65224_IRQ_GPIO1,
+ TPS65224_IRQ_GPIO2,
+ TPS65224_IRQ_GPIO3,
+ TPS65224_IRQ_GPIO4,
+ TPS65224_IRQ_GPIO5,
+ TPS65224_IRQ_GPIO6,
+ /* INT_STARTUP register */
+ TPS65224_IRQ_VSENSE,
+ TPS65224_IRQ_ENABLE,
+ TPS65224_IRQ_PB_SHORT,
+ TPS65224_IRQ_FSD,
+ TPS65224_IRQ_SOFT_REBOOT,
+ /* INT_MISC register */
+ TPS65224_IRQ_BIST_PASS,
+ TPS65224_IRQ_EXT_CLK,
+ TPS65224_IRQ_REG_UNLOCK,
+ TPS65224_IRQ_TWARN,
+ TPS65224_IRQ_PB_LONG,
+ TPS65224_IRQ_PB_FALL,
+ TPS65224_IRQ_PB_RISE,
+ TPS65224_IRQ_ADC_CONV_READY,
+ /* INT_MODERATE_ERR register */
+ TPS65224_IRQ_TSD_ORD,
+ TPS65224_IRQ_BIST_FAIL,
+ TPS65224_IRQ_REG_CRC_ERR,
+ TPS65224_IRQ_RECOV_CNT,
+ /* INT_SEVERE_ERR register */
+ TPS65224_IRQ_TSD_IMM,
+ TPS65224_IRQ_VCCA_OVP,
+ TPS65224_IRQ_PFSM_ERR,
+ TPS65224_IRQ_BG_XMON,
+ /* INT_FSM_ERR register */
+ TPS65224_IRQ_IMM_SHUTDOWN,
+ TPS65224_IRQ_ORD_SHUTDOWN,
+ TPS65224_IRQ_MCU_PWR_ERR,
+ TPS65224_IRQ_SOC_PWR_ERR,
+ TPS65224_IRQ_COMM_ERR,
+ TPS65224_IRQ_I2C2_ERR,
+};
+
+#define TPS65224_IRQ_NAME_BUCK1_UVOV "buck1_uvov"
+#define TPS65224_IRQ_NAME_BUCK2_UVOV "buck2_uvov"
+#define TPS65224_IRQ_NAME_BUCK3_UVOV "buck3_uvov"
+#define TPS65224_IRQ_NAME_BUCK4_UVOV "buck4_uvov"
+#define TPS65224_IRQ_NAME_LDO1_UVOV "ldo1_uvov"
+#define TPS65224_IRQ_NAME_LDO2_UVOV "ldo2_uvov"
+#define TPS65224_IRQ_NAME_LDO3_UVOV "ldo3_uvov"
+#define TPS65224_IRQ_NAME_VCCA_UVOV "vcca_uvov"
+#define TPS65224_IRQ_NAME_VMON1_UVOV "vmon1_uvov"
+#define TPS65224_IRQ_NAME_VMON2_UVOV "vmon2_uvov"
+#define TPS65224_IRQ_NAME_GPIO1 "gpio1"
+#define TPS65224_IRQ_NAME_GPIO2 "gpio2"
+#define TPS65224_IRQ_NAME_GPIO3 "gpio3"
+#define TPS65224_IRQ_NAME_GPIO4 "gpio4"
+#define TPS65224_IRQ_NAME_GPIO5 "gpio5"
+#define TPS65224_IRQ_NAME_GPIO6 "gpio6"
+#define TPS65224_IRQ_NAME_VSENSE "vsense"
+#define TPS65224_IRQ_NAME_ENABLE "enable"
+#define TPS65224_IRQ_NAME_PB_SHORT "pb_short"
+#define TPS65224_IRQ_NAME_FSD "fsd"
+#define TPS65224_IRQ_NAME_SOFT_REBOOT "soft_reboot"
+#define TPS65224_IRQ_NAME_BIST_PASS "bist_pass"
+#define TPS65224_IRQ_NAME_EXT_CLK "ext_clk"
+#define TPS65224_IRQ_NAME_REG_UNLOCK "reg_unlock"
+#define TPS65224_IRQ_NAME_TWARN "twarn"
+#define TPS65224_IRQ_NAME_PB_LONG "pb_long"
+#define TPS65224_IRQ_NAME_PB_FALL "pb_fall"
+#define TPS65224_IRQ_NAME_PB_RISE "pb_rise"
+#define TPS65224_IRQ_NAME_ADC_CONV_READY "adc_conv_ready"
+#define TPS65224_IRQ_NAME_TSD_ORD "tsd_ord"
+#define TPS65224_IRQ_NAME_BIST_FAIL "bist_fail"
+#define TPS65224_IRQ_NAME_REG_CRC_ERR "reg_crc_err"
+#define TPS65224_IRQ_NAME_RECOV_CNT "recov_cnt"
+#define TPS65224_IRQ_NAME_TSD_IMM "tsd_imm"
+#define TPS65224_IRQ_NAME_VCCA_OVP "vcca_ovp"
+#define TPS65224_IRQ_NAME_PFSM_ERR "pfsm_err"
+#define TPS65224_IRQ_NAME_BG_XMON "bg_xmon"
+#define TPS65224_IRQ_NAME_IMM_SHUTDOWN "imm_shutdown"
+#define TPS65224_IRQ_NAME_ORD_SHUTDOWN "ord_shutdown"
+#define TPS65224_IRQ_NAME_MCU_PWR_ERR "mcu_pwr_err"
+#define TPS65224_IRQ_NAME_SOC_PWR_ERR "soc_pwr_err"
+#define TPS65224_IRQ_NAME_COMM_ERR "comm_err"
+#define TPS65224_IRQ_NAME_I2C2_ERR "i2c2_err"
+#define TPS65224_IRQ_NAME_POWERUP "powerup"
+
+/**
+ * struct tps6594 - device private data structure
+ *
+ * @dev: MFD parent device
+ * @chip_id: chip ID
+ * @reg: I2C slave address or SPI chip select number
+ * @use_crc: if true, use CRC for I2C and SPI interface protocols
+ * @regmap: regmap for accessing the device registers
+ * @irq: irq generated by the device
+ * @irq_data: regmap irq data used for the irq chip
+ */
+struct tps6594 {
+ struct device *dev;
+ unsigned long chip_id;
+ unsigned short reg;
+ bool use_crc;
+ struct regmap *regmap;
+ int irq;
+ struct regmap_irq_chip_data *irq_data;
+};
+
+extern const struct regmap_access_table tps6594_volatile_table;
+extern const struct regmap_access_table tps65224_volatile_table;
+
+int tps6594_device_init(struct tps6594 *tps, bool enable_crc);
+
+#endif /* __LINUX_MFD_TPS6594_H */
diff --git a/include/linux/mfd/tps68470.h b/include/linux/mfd/tps68470.h
new file mode 100644
index 000000000000..7807fa329db0
--- /dev/null
+++ b/include/linux/mfd/tps68470.h
@@ -0,0 +1,97 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2017 Intel Corporation */
+/* Functions to access TPS68470 power management chip. */
+
+#ifndef __LINUX_MFD_TPS68470_H
+#define __LINUX_MFD_TPS68470_H
+
+/* Register addresses */
+#define TPS68470_REG_POSTDIV2 0x06
+#define TPS68470_REG_BOOSTDIV 0x07
+#define TPS68470_REG_BUCKDIV 0x08
+#define TPS68470_REG_PLLSWR 0x09
+#define TPS68470_REG_XTALDIV 0x0A
+#define TPS68470_REG_PLLDIV 0x0B
+#define TPS68470_REG_POSTDIV 0x0C
+#define TPS68470_REG_PLLCTL 0x0D
+#define TPS68470_REG_PLLCTL2 0x0E
+#define TPS68470_REG_CLKCFG1 0x0F
+#define TPS68470_REG_CLKCFG2 0x10
+#define TPS68470_REG_GPCTL0A 0x14
+#define TPS68470_REG_GPCTL0B 0x15
+#define TPS68470_REG_GPCTL1A 0x16
+#define TPS68470_REG_GPCTL1B 0x17
+#define TPS68470_REG_GPCTL2A 0x18
+#define TPS68470_REG_GPCTL2B 0x19
+#define TPS68470_REG_GPCTL3A 0x1A
+#define TPS68470_REG_GPCTL3B 0x1B
+#define TPS68470_REG_GPCTL4A 0x1C
+#define TPS68470_REG_GPCTL4B 0x1D
+#define TPS68470_REG_GPCTL5A 0x1E
+#define TPS68470_REG_GPCTL5B 0x1F
+#define TPS68470_REG_GPCTL6A 0x20
+#define TPS68470_REG_GPCTL6B 0x21
+#define TPS68470_REG_SGPO 0x22
+#define TPS68470_REG_GPDI 0x26
+#define TPS68470_REG_GPDO 0x27
+#define TPS68470_REG_VCMVAL 0x3C
+#define TPS68470_REG_VAUX1VAL 0x3D
+#define TPS68470_REG_VAUX2VAL 0x3E
+#define TPS68470_REG_VIOVAL 0x3F
+#define TPS68470_REG_VSIOVAL 0x40
+#define TPS68470_REG_VAVAL 0x41
+#define TPS68470_REG_VDVAL 0x42
+#define TPS68470_REG_S_I2C_CTL 0x43
+#define TPS68470_REG_VCMCTL 0x44
+#define TPS68470_REG_VAUX1CTL 0x45
+#define TPS68470_REG_VAUX2CTL 0x46
+#define TPS68470_REG_VACTL 0x47
+#define TPS68470_REG_VDCTL 0x48
+#define TPS68470_REG_RESET 0x50
+#define TPS68470_REG_REVID 0xFF
+
+#define TPS68470_REG_MAX TPS68470_REG_REVID
+
+/* Register field definitions */
+
+#define TPS68470_REG_RESET_MASK GENMASK(7, 0)
+#define TPS68470_VAVAL_AVOLT_MASK GENMASK(6, 0)
+
+#define TPS68470_VDVAL_DVOLT_MASK GENMASK(5, 0)
+#define TPS68470_VCMVAL_VCVOLT_MASK GENMASK(6, 0)
+#define TPS68470_VIOVAL_IOVOLT_MASK GENMASK(6, 0)
+#define TPS68470_VSIOVAL_IOVOLT_MASK GENMASK(6, 0)
+#define TPS68470_VAUX1VAL_AUX1VOLT_MASK GENMASK(6, 0)
+#define TPS68470_VAUX2VAL_AUX2VOLT_MASK GENMASK(6, 0)
+
+#define TPS68470_VACTL_EN_MASK GENMASK(0, 0)
+#define TPS68470_VDCTL_EN_MASK GENMASK(0, 0)
+#define TPS68470_VCMCTL_EN_MASK GENMASK(0, 0)
+#define TPS68470_S_I2C_CTL_EN_MASK GENMASK(1, 0)
+#define TPS68470_VAUX1CTL_EN_MASK GENMASK(0, 0)
+#define TPS68470_VAUX2CTL_EN_MASK GENMASK(0, 0)
+#define TPS68470_PLL_EN_MASK GENMASK(0, 0)
+
+#define TPS68470_CLKCFG1_MODE_A_MASK GENMASK(1, 0)
+#define TPS68470_CLKCFG1_MODE_B_MASK GENMASK(3, 2)
+
+#define TPS68470_CLKCFG2_DRV_STR_2MA 0x05
+#define TPS68470_PLL_OUTPUT_ENABLE 0x02
+#define TPS68470_CLK_SRC_XTAL BIT(0)
+#define TPS68470_PLLSWR_DEFAULT GENMASK(1, 0)
+#define TPS68470_OSC_EXT_CAP_DEFAULT 0x05
+
+#define TPS68470_OUTPUT_A_SHIFT 0x00
+#define TPS68470_OUTPUT_B_SHIFT 0x02
+#define TPS68470_CLK_SRC_SHIFT GENMASK(2, 0)
+#define TPS68470_OSC_EXT_CAP_SHIFT BIT(2)
+
+#define TPS68470_GPIO_CTL_REG_A(x) (TPS68470_REG_GPCTL0A + (x) * 2)
+#define TPS68470_GPIO_CTL_REG_B(x) (TPS68470_REG_GPCTL0B + (x) * 2)
+#define TPS68470_GPIO_MODE_MASK GENMASK(1, 0)
+#define TPS68470_GPIO_MODE_IN 0
+#define TPS68470_GPIO_MODE_IN_PULLUP 1
+#define TPS68470_GPIO_MODE_OUT_CMOS 2
+#define TPS68470_GPIO_MODE_OUT_ODRAIN 3
+
+#endif /* __LINUX_MFD_TPS68470_H */
diff --git a/include/linux/mfd/tps80031.h b/include/linux/mfd/tps80031.h
deleted file mode 100644
index 2c75c9c9318f..000000000000
--- a/include/linux/mfd/tps80031.h
+++ /dev/null
@@ -1,637 +0,0 @@
-/*
- * tps80031.h -- TI TPS80031 and TI TPS80032 PMIC driver.
- *
- * Copyright (c) 2012, NVIDIA Corporation.
- *
- * Author: Laxman Dewangan <ldewangan@nvidia.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation version 2.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any kind,
- * whether express or implied; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
- * 02111-1307, USA
- */
-
-#ifndef __LINUX_MFD_TPS80031_H
-#define __LINUX_MFD_TPS80031_H
-
-#include <linux/device.h>
-#include <linux/regmap.h>
-
-/* Pull-ups/Pull-downs */
-#define TPS80031_CFG_INPUT_PUPD1 0xF0
-#define TPS80031_CFG_INPUT_PUPD2 0xF1
-#define TPS80031_CFG_INPUT_PUPD3 0xF2
-#define TPS80031_CFG_INPUT_PUPD4 0xF3
-#define TPS80031_CFG_LDO_PD1 0xF4
-#define TPS80031_CFG_LDO_PD2 0xF5
-#define TPS80031_CFG_SMPS_PD 0xF6
-
-/* Real Time Clock */
-#define TPS80031_SECONDS_REG 0x00
-#define TPS80031_MINUTES_REG 0x01
-#define TPS80031_HOURS_REG 0x02
-#define TPS80031_DAYS_REG 0x03
-#define TPS80031_MONTHS_REG 0x04
-#define TPS80031_YEARS_REG 0x05
-#define TPS80031_WEEKS_REG 0x06
-#define TPS80031_ALARM_SECONDS_REG 0x08
-#define TPS80031_ALARM_MINUTES_REG 0x09
-#define TPS80031_ALARM_HOURS_REG 0x0A
-#define TPS80031_ALARM_DAYS_REG 0x0B
-#define TPS80031_ALARM_MONTHS_REG 0x0C
-#define TPS80031_ALARM_YEARS_REG 0x0D
-#define TPS80031_RTC_CTRL_REG 0x10
-#define TPS80031_RTC_STATUS_REG 0x11
-#define TPS80031_RTC_INTERRUPTS_REG 0x12
-#define TPS80031_RTC_COMP_LSB_REG 0x13
-#define TPS80031_RTC_COMP_MSB_REG 0x14
-#define TPS80031_RTC_RESET_STATUS_REG 0x16
-
-/*PMC Master Module */
-#define TPS80031_PHOENIX_START_CONDITION 0x1F
-#define TPS80031_PHOENIX_MSK_TRANSITION 0x20
-#define TPS80031_STS_HW_CONDITIONS 0x21
-#define TPS80031_PHOENIX_LAST_TURNOFF_STS 0x22
-#define TPS80031_VSYSMIN_LO_THRESHOLD 0x23
-#define TPS80031_VSYSMIN_HI_THRESHOLD 0x24
-#define TPS80031_PHOENIX_DEV_ON 0x25
-#define TPS80031_STS_PWR_GRP_STATE 0x27
-#define TPS80031_PH_CFG_VSYSLOW 0x28
-#define TPS80031_PH_STS_BOOT 0x29
-#define TPS80031_PHOENIX_SENS_TRANSITION 0x2A
-#define TPS80031_PHOENIX_SEQ_CFG 0x2B
-#define TPS80031_PRIMARY_WATCHDOG_CFG 0X2C
-#define TPS80031_KEY_PRESS_DUR_CFG 0X2D
-#define TPS80031_SMPS_LDO_SHORT_STS 0x2E
-
-/* PMC Slave Module - Broadcast */
-#define TPS80031_BROADCAST_ADDR_ALL 0x31
-#define TPS80031_BROADCAST_ADDR_REF 0x32
-#define TPS80031_BROADCAST_ADDR_PROV 0x33
-#define TPS80031_BROADCAST_ADDR_CLK_RST 0x34
-
-/* PMC Slave Module SMPS Regulators */
-#define TPS80031_SMPS4_CFG_TRANS 0x41
-#define TPS80031_SMPS4_CFG_STATE 0x42
-#define TPS80031_SMPS4_CFG_VOLTAGE 0x44
-#define TPS80031_VIO_CFG_TRANS 0x47
-#define TPS80031_VIO_CFG_STATE 0x48
-#define TPS80031_VIO_CFG_FORCE 0x49
-#define TPS80031_VIO_CFG_VOLTAGE 0x4A
-#define TPS80031_VIO_CFG_STEP 0x48
-#define TPS80031_SMPS1_CFG_TRANS 0x53
-#define TPS80031_SMPS1_CFG_STATE 0x54
-#define TPS80031_SMPS1_CFG_FORCE 0x55
-#define TPS80031_SMPS1_CFG_VOLTAGE 0x56
-#define TPS80031_SMPS1_CFG_STEP 0x57
-#define TPS80031_SMPS2_CFG_TRANS 0x59
-#define TPS80031_SMPS2_CFG_STATE 0x5A
-#define TPS80031_SMPS2_CFG_FORCE 0x5B
-#define TPS80031_SMPS2_CFG_VOLTAGE 0x5C
-#define TPS80031_SMPS2_CFG_STEP 0x5D
-#define TPS80031_SMPS3_CFG_TRANS 0x65
-#define TPS80031_SMPS3_CFG_STATE 0x66
-#define TPS80031_SMPS3_CFG_VOLTAGE 0x68
-
-/* PMC Slave Module LDO Regulators */
-#define TPS80031_VANA_CFG_TRANS 0x81
-#define TPS80031_VANA_CFG_STATE 0x82
-#define TPS80031_VANA_CFG_VOLTAGE 0x83
-#define TPS80031_LDO2_CFG_TRANS 0x85
-#define TPS80031_LDO2_CFG_STATE 0x86
-#define TPS80031_LDO2_CFG_VOLTAGE 0x87
-#define TPS80031_LDO4_CFG_TRANS 0x89
-#define TPS80031_LDO4_CFG_STATE 0x8A
-#define TPS80031_LDO4_CFG_VOLTAGE 0x8B
-#define TPS80031_LDO3_CFG_TRANS 0x8D
-#define TPS80031_LDO3_CFG_STATE 0x8E
-#define TPS80031_LDO3_CFG_VOLTAGE 0x8F
-#define TPS80031_LDO6_CFG_TRANS 0x91
-#define TPS80031_LDO6_CFG_STATE 0x92
-#define TPS80031_LDO6_CFG_VOLTAGE 0x93
-#define TPS80031_LDOLN_CFG_TRANS 0x95
-#define TPS80031_LDOLN_CFG_STATE 0x96
-#define TPS80031_LDOLN_CFG_VOLTAGE 0x97
-#define TPS80031_LDO5_CFG_TRANS 0x99
-#define TPS80031_LDO5_CFG_STATE 0x9A
-#define TPS80031_LDO5_CFG_VOLTAGE 0x9B
-#define TPS80031_LDO1_CFG_TRANS 0x9D
-#define TPS80031_LDO1_CFG_STATE 0x9E
-#define TPS80031_LDO1_CFG_VOLTAGE 0x9F
-#define TPS80031_LDOUSB_CFG_TRANS 0xA1
-#define TPS80031_LDOUSB_CFG_STATE 0xA2
-#define TPS80031_LDOUSB_CFG_VOLTAGE 0xA3
-#define TPS80031_LDO7_CFG_TRANS 0xA5
-#define TPS80031_LDO7_CFG_STATE 0xA6
-#define TPS80031_LDO7_CFG_VOLTAGE 0xA7
-
-/* PMC Slave Module External Control */
-#define TPS80031_REGEN1_CFG_TRANS 0xAE
-#define TPS80031_REGEN1_CFG_STATE 0xAF
-#define TPS80031_REGEN2_CFG_TRANS 0xB1
-#define TPS80031_REGEN2_CFG_STATE 0xB2
-#define TPS80031_SYSEN_CFG_TRANS 0xB4
-#define TPS80031_SYSEN_CFG_STATE 0xB5
-
-/* PMC Slave Module Internal Control */
-#define TPS80031_NRESPWRON_CFG_TRANS 0xB7
-#define TPS80031_NRESPWRON_CFG_STATE 0xB8
-#define TPS80031_CLK32KAO_CFG_TRANS 0xBA
-#define TPS80031_CLK32KAO_CFG_STATE 0xBB
-#define TPS80031_CLK32KG_CFG_TRANS 0xBD
-#define TPS80031_CLK32KG_CFG_STATE 0xBE
-#define TPS80031_CLK32KAUDIO_CFG_TRANS 0xC0
-#define TPS80031_CLK32KAUDIO_CFG_STATE 0xC1
-#define TPS80031_VRTC_CFG_TRANS 0xC3
-#define TPS80031_VRTC_CFG_STATE 0xC4
-#define TPS80031_BIAS_CFG_TRANS 0xC6
-#define TPS80031_BIAS_CFG_STATE 0xC7
-#define TPS80031_VSYSMIN_HI_CFG_TRANS 0xC9
-#define TPS80031_VSYSMIN_HI_CFG_STATE 0xCA
-#define TPS80031_RC6MHZ_CFG_TRANS 0xCC
-#define TPS80031_RC6MHZ_CFG_STATE 0xCD
-#define TPS80031_TMP_CFG_TRANS 0xCF
-#define TPS80031_TMP_CFG_STATE 0xD0
-
-/* PMC Slave Module resources assignment */
-#define TPS80031_PREQ1_RES_ASS_A 0xD7
-#define TPS80031_PREQ1_RES_ASS_B 0xD8
-#define TPS80031_PREQ1_RES_ASS_C 0xD9
-#define TPS80031_PREQ2_RES_ASS_A 0xDA
-#define TPS80031_PREQ2_RES_ASS_B 0xDB
-#define TPS80031_PREQ2_RES_ASS_C 0xDC
-#define TPS80031_PREQ3_RES_ASS_A 0xDD
-#define TPS80031_PREQ3_RES_ASS_B 0xDE
-#define TPS80031_PREQ3_RES_ASS_C 0xDF
-
-/* PMC Slave Module Miscellaneous */
-#define TPS80031_SMPS_OFFSET 0xE0
-#define TPS80031_SMPS_MULT 0xE3
-#define TPS80031_MISC1 0xE4
-#define TPS80031_MISC2 0xE5
-#define TPS80031_BBSPOR_CFG 0xE6
-#define TPS80031_TMP_CFG 0xE7
-
-/* Battery Charging Controller and Indicator LED */
-#define TPS80031_CONTROLLER_CTRL2 0xDA
-#define TPS80031_CONTROLLER_VSEL_COMP 0xDB
-#define TPS80031_CHARGERUSB_VSYSREG 0xDC
-#define TPS80031_CHARGERUSB_VICHRG_PC 0xDD
-#define TPS80031_LINEAR_CHRG_STS 0xDE
-#define TPS80031_CONTROLLER_INT_MASK 0xE0
-#define TPS80031_CONTROLLER_CTRL1 0xE1
-#define TPS80031_CONTROLLER_WDG 0xE2
-#define TPS80031_CONTROLLER_STAT1 0xE3
-#define TPS80031_CHARGERUSB_INT_STATUS 0xE4
-#define TPS80031_CHARGERUSB_INT_MASK 0xE5
-#define TPS80031_CHARGERUSB_STATUS_INT1 0xE6
-#define TPS80031_CHARGERUSB_STATUS_INT2 0xE7
-#define TPS80031_CHARGERUSB_CTRL1 0xE8
-#define TPS80031_CHARGERUSB_CTRL2 0xE9
-#define TPS80031_CHARGERUSB_CTRL3 0xEA
-#define TPS80031_CHARGERUSB_STAT1 0xEB
-#define TPS80031_CHARGERUSB_VOREG 0xEC
-#define TPS80031_CHARGERUSB_VICHRG 0xED
-#define TPS80031_CHARGERUSB_CINLIMIT 0xEE
-#define TPS80031_CHARGERUSB_CTRLLIMIT1 0xEF
-#define TPS80031_CHARGERUSB_CTRLLIMIT2 0xF0
-#define TPS80031_LED_PWM_CTRL1 0xF4
-#define TPS80031_LED_PWM_CTRL2 0xF5
-
-/* USB On-The-Go */
-#define TPS80031_BACKUP_REG 0xFA
-#define TPS80031_USB_VENDOR_ID_LSB 0x00
-#define TPS80031_USB_VENDOR_ID_MSB 0x01
-#define TPS80031_USB_PRODUCT_ID_LSB 0x02
-#define TPS80031_USB_PRODUCT_ID_MSB 0x03
-#define TPS80031_USB_VBUS_CTRL_SET 0x04
-#define TPS80031_USB_VBUS_CTRL_CLR 0x05
-#define TPS80031_USB_ID_CTRL_SET 0x06
-#define TPS80031_USB_ID_CTRL_CLR 0x07
-#define TPS80031_USB_VBUS_INT_SRC 0x08
-#define TPS80031_USB_VBUS_INT_LATCH_SET 0x09
-#define TPS80031_USB_VBUS_INT_LATCH_CLR 0x0A
-#define TPS80031_USB_VBUS_INT_EN_LO_SET 0x0B
-#define TPS80031_USB_VBUS_INT_EN_LO_CLR 0x0C
-#define TPS80031_USB_VBUS_INT_EN_HI_SET 0x0D
-#define TPS80031_USB_VBUS_INT_EN_HI_CLR 0x0E
-#define TPS80031_USB_ID_INT_SRC 0x0F
-#define TPS80031_USB_ID_INT_LATCH_SET 0x10
-#define TPS80031_USB_ID_INT_LATCH_CLR 0x11
-#define TPS80031_USB_ID_INT_EN_LO_SET 0x12
-#define TPS80031_USB_ID_INT_EN_LO_CLR 0x13
-#define TPS80031_USB_ID_INT_EN_HI_SET 0x14
-#define TPS80031_USB_ID_INT_EN_HI_CLR 0x15
-#define TPS80031_USB_OTG_ADP_CTRL 0x16
-#define TPS80031_USB_OTG_ADP_HIGH 0x17
-#define TPS80031_USB_OTG_ADP_LOW 0x18
-#define TPS80031_USB_OTG_ADP_RISE 0x19
-#define TPS80031_USB_OTG_REVISION 0x1A
-
-/* Gas Gauge */
-#define TPS80031_FG_REG_00 0xC0
-#define TPS80031_FG_REG_01 0xC1
-#define TPS80031_FG_REG_02 0xC2
-#define TPS80031_FG_REG_03 0xC3
-#define TPS80031_FG_REG_04 0xC4
-#define TPS80031_FG_REG_05 0xC5
-#define TPS80031_FG_REG_06 0xC6
-#define TPS80031_FG_REG_07 0xC7
-#define TPS80031_FG_REG_08 0xC8
-#define TPS80031_FG_REG_09 0xC9
-#define TPS80031_FG_REG_10 0xCA
-#define TPS80031_FG_REG_11 0xCB
-
-/* General Purpose ADC */
-#define TPS80031_GPADC_CTRL 0x2E
-#define TPS80031_GPADC_CTRL2 0x2F
-#define TPS80031_RTSELECT_LSB 0x32
-#define TPS80031_RTSELECT_ISB 0x33
-#define TPS80031_RTSELECT_MSB 0x34
-#define TPS80031_GPSELECT_ISB 0x35
-#define TPS80031_CTRL_P1 0x36
-#define TPS80031_RTCH0_LSB 0x37
-#define TPS80031_RTCH0_MSB 0x38
-#define TPS80031_RTCH1_LSB 0x39
-#define TPS80031_RTCH1_MSB 0x3A
-#define TPS80031_GPCH0_LSB 0x3B
-#define TPS80031_GPCH0_MSB 0x3C
-
-/* SIM, MMC and Battery Detection */
-#define TPS80031_SIMDEBOUNCING 0xEB
-#define TPS80031_SIMCTRL 0xEC
-#define TPS80031_MMCDEBOUNCING 0xED
-#define TPS80031_MMCCTRL 0xEE
-#define TPS80031_BATDEBOUNCING 0xEF
-
-/* Vibrator Driver and PWMs */
-#define TPS80031_VIBCTRL 0x9B
-#define TPS80031_VIBMODE 0x9C
-#define TPS80031_PWM1ON 0xBA
-#define TPS80031_PWM1OFF 0xBB
-#define TPS80031_PWM2ON 0xBD
-#define TPS80031_PWM2OFF 0xBE
-
-/* Control Interface */
-#define TPS80031_INT_STS_A 0xD0
-#define TPS80031_INT_STS_B 0xD1
-#define TPS80031_INT_STS_C 0xD2
-#define TPS80031_INT_MSK_LINE_A 0xD3
-#define TPS80031_INT_MSK_LINE_B 0xD4
-#define TPS80031_INT_MSK_LINE_C 0xD5
-#define TPS80031_INT_MSK_STS_A 0xD6
-#define TPS80031_INT_MSK_STS_B 0xD7
-#define TPS80031_INT_MSK_STS_C 0xD8
-#define TPS80031_TOGGLE1 0x90
-#define TPS80031_TOGGLE2 0x91
-#define TPS80031_TOGGLE3 0x92
-#define TPS80031_PWDNSTATUS1 0x93
-#define TPS80031_PWDNSTATUS2 0x94
-#define TPS80031_VALIDITY0 0x17
-#define TPS80031_VALIDITY1 0x18
-#define TPS80031_VALIDITY2 0x19
-#define TPS80031_VALIDITY3 0x1A
-#define TPS80031_VALIDITY4 0x1B
-#define TPS80031_VALIDITY5 0x1C
-#define TPS80031_VALIDITY6 0x1D
-#define TPS80031_VALIDITY7 0x1E
-
-/* Version number related register */
-#define TPS80031_JTAGVERNUM 0x87
-#define TPS80031_EPROM_REV 0xDF
-
-/* GPADC Trimming Bits. */
-#define TPS80031_GPADC_TRIM0 0xCC
-#define TPS80031_GPADC_TRIM1 0xCD
-#define TPS80031_GPADC_TRIM2 0xCE
-#define TPS80031_GPADC_TRIM3 0xCF
-#define TPS80031_GPADC_TRIM4 0xD0
-#define TPS80031_GPADC_TRIM5 0xD1
-#define TPS80031_GPADC_TRIM6 0xD2
-#define TPS80031_GPADC_TRIM7 0xD3
-#define TPS80031_GPADC_TRIM8 0xD4
-#define TPS80031_GPADC_TRIM9 0xD5
-#define TPS80031_GPADC_TRIM10 0xD6
-#define TPS80031_GPADC_TRIM11 0xD7
-#define TPS80031_GPADC_TRIM12 0xD8
-#define TPS80031_GPADC_TRIM13 0xD9
-#define TPS80031_GPADC_TRIM14 0xDA
-#define TPS80031_GPADC_TRIM15 0xDB
-#define TPS80031_GPADC_TRIM16 0xDC
-#define TPS80031_GPADC_TRIM17 0xDD
-#define TPS80031_GPADC_TRIM18 0xDE
-
-/* TPS80031_CONTROLLER_STAT1 bit fields */
-#define TPS80031_CONTROLLER_STAT1_BAT_TEMP 0
-#define TPS80031_CONTROLLER_STAT1_BAT_REMOVED 1
-#define TPS80031_CONTROLLER_STAT1_VBUS_DET 2
-#define TPS80031_CONTROLLER_STAT1_VAC_DET 3
-#define TPS80031_CONTROLLER_STAT1_FAULT_WDG 4
-#define TPS80031_CONTROLLER_STAT1_LINCH_GATED 6
-/* TPS80031_CONTROLLER_INT_MASK bit filed */
-#define TPS80031_CONTROLLER_INT_MASK_MVAC_DET 0
-#define TPS80031_CONTROLLER_INT_MASK_MVBUS_DET 1
-#define TPS80031_CONTROLLER_INT_MASK_MBAT_TEMP 2
-#define TPS80031_CONTROLLER_INT_MASK_MFAULT_WDG 3
-#define TPS80031_CONTROLLER_INT_MASK_MBAT_REMOVED 4
-#define TPS80031_CONTROLLER_INT_MASK_MLINCH_GATED 5
-
-#define TPS80031_CHARGE_CONTROL_SUB_INT_MASK 0x3F
-
-/* TPS80031_PHOENIX_DEV_ON bit field */
-#define TPS80031_DEVOFF 0x1
-
-#define TPS80031_EXT_CONTROL_CFG_TRANS 0
-#define TPS80031_EXT_CONTROL_CFG_STATE 1
-
-/* State register field */
-#define TPS80031_STATE_OFF 0x00
-#define TPS80031_STATE_ON 0x01
-#define TPS80031_STATE_MASK 0x03
-
-/* Trans register field */
-#define TPS80031_TRANS_ACTIVE_OFF 0x00
-#define TPS80031_TRANS_ACTIVE_ON 0x01
-#define TPS80031_TRANS_ACTIVE_MASK 0x03
-#define TPS80031_TRANS_SLEEP_OFF 0x00
-#define TPS80031_TRANS_SLEEP_ON 0x04
-#define TPS80031_TRANS_SLEEP_MASK 0x0C
-#define TPS80031_TRANS_OFF_OFF 0x00
-#define TPS80031_TRANS_OFF_ACTIVE 0x10
-#define TPS80031_TRANS_OFF_MASK 0x30
-
-#define TPS80031_EXT_PWR_REQ (TPS80031_PWR_REQ_INPUT_PREQ1 | \
- TPS80031_PWR_REQ_INPUT_PREQ2 | \
- TPS80031_PWR_REQ_INPUT_PREQ3)
-
-/* TPS80031_BBSPOR_CFG bit field */
-#define TPS80031_BBSPOR_CHG_EN 0x8
-#define TPS80031_MAX_REGISTER 0xFF
-
-struct i2c_client;
-
-/* Supported chips */
-enum chips {
- TPS80031 = 0x00000001,
- TPS80032 = 0x00000002,
-};
-
-enum {
- TPS80031_INT_PWRON,
- TPS80031_INT_RPWRON,
- TPS80031_INT_SYS_VLOW,
- TPS80031_INT_RTC_ALARM,
- TPS80031_INT_RTC_PERIOD,
- TPS80031_INT_HOT_DIE,
- TPS80031_INT_VXX_SHORT,
- TPS80031_INT_SPDURATION,
- TPS80031_INT_WATCHDOG,
- TPS80031_INT_BAT,
- TPS80031_INT_SIM,
- TPS80031_INT_MMC,
- TPS80031_INT_RES,
- TPS80031_INT_GPADC_RT,
- TPS80031_INT_GPADC_SW2_EOC,
- TPS80031_INT_CC_AUTOCAL,
- TPS80031_INT_ID_WKUP,
- TPS80031_INT_VBUSS_WKUP,
- TPS80031_INT_ID,
- TPS80031_INT_VBUS,
- TPS80031_INT_CHRG_CTRL,
- TPS80031_INT_EXT_CHRG,
- TPS80031_INT_INT_CHRG,
- TPS80031_INT_RES2,
- TPS80031_INT_BAT_TEMP_OVRANGE,
- TPS80031_INT_BAT_REMOVED,
- TPS80031_INT_VBUS_DET,
- TPS80031_INT_VAC_DET,
- TPS80031_INT_FAULT_WDG,
- TPS80031_INT_LINCH_GATED,
-
- /* Last interrupt id to get the end number */
- TPS80031_INT_NR,
-};
-
-/* TPS80031 Slave IDs */
-#define TPS80031_NUM_SLAVES 4
-#define TPS80031_SLAVE_ID0 0
-#define TPS80031_SLAVE_ID1 1
-#define TPS80031_SLAVE_ID2 2
-#define TPS80031_SLAVE_ID3 3
-
-/* TPS80031 I2C addresses */
-#define TPS80031_I2C_ID0_ADDR 0x12
-#define TPS80031_I2C_ID1_ADDR 0x48
-#define TPS80031_I2C_ID2_ADDR 0x49
-#define TPS80031_I2C_ID3_ADDR 0x4A
-
-enum {
- TPS80031_REGULATOR_VIO,
- TPS80031_REGULATOR_SMPS1,
- TPS80031_REGULATOR_SMPS2,
- TPS80031_REGULATOR_SMPS3,
- TPS80031_REGULATOR_SMPS4,
- TPS80031_REGULATOR_VANA,
- TPS80031_REGULATOR_LDO1,
- TPS80031_REGULATOR_LDO2,
- TPS80031_REGULATOR_LDO3,
- TPS80031_REGULATOR_LDO4,
- TPS80031_REGULATOR_LDO5,
- TPS80031_REGULATOR_LDO6,
- TPS80031_REGULATOR_LDO7,
- TPS80031_REGULATOR_LDOLN,
- TPS80031_REGULATOR_LDOUSB,
- TPS80031_REGULATOR_VBUS,
- TPS80031_REGULATOR_REGEN1,
- TPS80031_REGULATOR_REGEN2,
- TPS80031_REGULATOR_SYSEN,
- TPS80031_REGULATOR_MAX,
-};
-
-/* Different configurations for the rails */
-enum {
- /* USBLDO input selection */
- TPS80031_USBLDO_INPUT_VSYS = 0x00000001,
- TPS80031_USBLDO_INPUT_PMID = 0x00000002,
-
- /* LDO3 output mode */
- TPS80031_LDO3_OUTPUT_VIB = 0x00000004,
-
- /* VBUS configuration */
- TPS80031_VBUS_DISCHRG_EN_PDN = 0x00000004,
- TPS80031_VBUS_SW_ONLY = 0x00000008,
- TPS80031_VBUS_SW_N_ID = 0x00000010,
-};
-
-/* External controls requests */
-enum tps80031_ext_control {
- TPS80031_PWR_REQ_INPUT_NONE = 0x00000000,
- TPS80031_PWR_REQ_INPUT_PREQ1 = 0x00000001,
- TPS80031_PWR_REQ_INPUT_PREQ2 = 0x00000002,
- TPS80031_PWR_REQ_INPUT_PREQ3 = 0x00000004,
- TPS80031_PWR_OFF_ON_SLEEP = 0x00000008,
- TPS80031_PWR_ON_ON_SLEEP = 0x00000010,
-};
-
-enum tps80031_pupd_pins {
- TPS80031_PREQ1 = 0,
- TPS80031_PREQ2A,
- TPS80031_PREQ2B,
- TPS80031_PREQ2C,
- TPS80031_PREQ3,
- TPS80031_NRES_WARM,
- TPS80031_PWM_FORCE,
- TPS80031_CHRG_EXT_CHRG_STATZ,
- TPS80031_SIM,
- TPS80031_MMC,
- TPS80031_GPADC_START,
- TPS80031_DVSI2C_SCL,
- TPS80031_DVSI2C_SDA,
- TPS80031_CTLI2C_SCL,
- TPS80031_CTLI2C_SDA,
-};
-
-enum tps80031_pupd_settings {
- TPS80031_PUPD_NORMAL,
- TPS80031_PUPD_PULLDOWN,
- TPS80031_PUPD_PULLUP,
-};
-
-struct tps80031 {
- struct device *dev;
- unsigned long chip_info;
- int es_version;
- struct i2c_client *clients[TPS80031_NUM_SLAVES];
- struct regmap *regmap[TPS80031_NUM_SLAVES];
- struct regmap_irq_chip_data *irq_data;
-};
-
-struct tps80031_pupd_init_data {
- int input_pin;
- int setting;
-};
-
-/*
- * struct tps80031_regulator_platform_data - tps80031 regulator platform data.
- *
- * @reg_init_data: The regulator init data.
- * @ext_ctrl_flag: External control flag for sleep/power request control.
- * @config_flags: Configuration flag to configure the rails.
- * It should be ORed of config enums.
- */
-
-struct tps80031_regulator_platform_data {
- struct regulator_init_data *reg_init_data;
- unsigned int ext_ctrl_flag;
- unsigned int config_flags;
-};
-
-struct tps80031_platform_data {
- int irq_base;
- bool use_power_off;
- struct tps80031_pupd_init_data *pupd_init_data;
- int pupd_init_data_size;
- struct tps80031_regulator_platform_data
- *regulator_pdata[TPS80031_REGULATOR_MAX];
-};
-
-static inline int tps80031_write(struct device *dev, int sid,
- int reg, uint8_t val)
-{
- struct tps80031 *tps80031 = dev_get_drvdata(dev);
-
- return regmap_write(tps80031->regmap[sid], reg, val);
-}
-
-static inline int tps80031_writes(struct device *dev, int sid, int reg,
- int len, uint8_t *val)
-{
- struct tps80031 *tps80031 = dev_get_drvdata(dev);
-
- return regmap_bulk_write(tps80031->regmap[sid], reg, val, len);
-}
-
-static inline int tps80031_read(struct device *dev, int sid,
- int reg, uint8_t *val)
-{
- struct tps80031 *tps80031 = dev_get_drvdata(dev);
- unsigned int ival;
- int ret;
-
- ret = regmap_read(tps80031->regmap[sid], reg, &ival);
- if (ret < 0) {
- dev_err(dev, "failed reading from reg 0x%02x\n", reg);
- return ret;
- }
-
- *val = ival;
- return ret;
-}
-
-static inline int tps80031_reads(struct device *dev, int sid,
- int reg, int len, uint8_t *val)
-{
- struct tps80031 *tps80031 = dev_get_drvdata(dev);
-
- return regmap_bulk_read(tps80031->regmap[sid], reg, val, len);
-}
-
-static inline int tps80031_set_bits(struct device *dev, int sid,
- int reg, uint8_t bit_mask)
-{
- struct tps80031 *tps80031 = dev_get_drvdata(dev);
-
- return regmap_update_bits(tps80031->regmap[sid], reg,
- bit_mask, bit_mask);
-}
-
-static inline int tps80031_clr_bits(struct device *dev, int sid,
- int reg, uint8_t bit_mask)
-{
- struct tps80031 *tps80031 = dev_get_drvdata(dev);
-
- return regmap_update_bits(tps80031->regmap[sid], reg, bit_mask, 0);
-}
-
-static inline int tps80031_update(struct device *dev, int sid,
- int reg, uint8_t val, uint8_t mask)
-{
- struct tps80031 *tps80031 = dev_get_drvdata(dev);
-
- return regmap_update_bits(tps80031->regmap[sid], reg, mask, val);
-}
-
-static inline unsigned long tps80031_get_chip_info(struct device *dev)
-{
- struct tps80031 *tps80031 = dev_get_drvdata(dev);
-
- return tps80031->chip_info;
-}
-
-static inline int tps80031_get_pmu_version(struct device *dev)
-{
- struct tps80031 *tps80031 = dev_get_drvdata(dev);
-
- return tps80031->es_version;
-}
-
-static inline int tps80031_irq_get_virq(struct device *dev, int irq)
-{
- struct tps80031 *tps80031 = dev_get_drvdata(dev);
-
- return regmap_irq_get_virq(tps80031->irq_data, irq);
-}
-
-extern int tps80031_ext_power_req_config(struct device *dev,
- unsigned long ext_ctrl_flag, int preq_bit,
- int state_reg_add, int trans_reg_add);
-#endif /*__LINUX_MFD_TPS80031_H */
diff --git a/include/linux/i2c/twl.h b/include/linux/mfd/twl.h
index 9ad7828d9d34..b31e07fa4d51 100644
--- a/include/linux/i2c/twl.h
+++ b/include/linux/mfd/twl.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* twl4030.h - header for TWL4030 PM and audio CODEC device
*
@@ -5,21 +6,6 @@
*
* Based on tlv320aic23.c:
* Copyright (c) by Kai Svahn <kai.svahn@nokia.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
*/
#ifndef __TWL_H_
@@ -83,6 +69,8 @@ enum twl6030_module_ids {
TWL6030_MODULE_GPADC,
TWL6030_MODULE_GASGAUGE,
+ /* A few extra registers before the registers shared with the 6030 */
+ TWL6032_MODULE_CHARGE,
TWL6030_MODULE_LAST,
};
@@ -195,14 +183,18 @@ static inline int twl_i2c_read_u8(u8 mod_no, u8 *val, u8 reg) {
}
static inline int twl_i2c_write_u16(u8 mod_no, u16 val, u8 reg) {
- val = cpu_to_le16(val);
- return twl_i2c_write(mod_no, (u8*) &val, reg, 2);
+ __le16 value;
+
+ value = cpu_to_le16(val);
+ return twl_i2c_write(mod_no, (u8 *) &value, reg, 2);
}
static inline int twl_i2c_read_u16(u8 mod_no, u16 *val, u8 reg) {
int ret;
- ret = twl_i2c_read(mod_no, (u8*) val, reg, 2);
- *val = le16_to_cpu(*val);
+ __le16 value;
+
+ ret = twl_i2c_read(mod_no, (u8 *) &value, reg, 2);
+ *val = le16_to_cpu(value);
return ret;
}
@@ -213,27 +205,6 @@ int twl_get_hfclk_rate(void);
int twl6030_interrupt_unmask(u8 bit_mask, u8 offset);
int twl6030_interrupt_mask(u8 bit_mask, u8 offset);
-/* Card detect Configuration for MMC1 Controller on OMAP4 */
-#ifdef CONFIG_TWL4030_CORE
-int twl6030_mmc_card_detect_config(void);
-#else
-static inline int twl6030_mmc_card_detect_config(void)
-{
- pr_debug("twl6030_mmc_card_detect_config not supported\n");
- return 0;
-}
-#endif
-
-/* MMC1 Controller on OMAP4 uses Phoenix irq for Card detect */
-#ifdef CONFIG_TWL4030_CORE
-int twl6030_mmc_card_detect(struct device *dev, int slot);
-#else
-static inline int twl6030_mmc_card_detect(struct device *dev, int slot)
-{
- pr_debug("Call back twl6030_mmc_card_detect not supported\n");
- return -EIO;
-}
-#endif
/*----------------------------------------------------------------------*/
/*
@@ -469,6 +440,7 @@ static inline int twl6030_mmc_card_detect(struct device *dev, int slot)
#define TWL4030_PM_MASTER_GLOBAL_TST 0xb6
+#define TWL6030_PHOENIX_DEV_ON 0x06
/*----------------------------------------------------------------------*/
/* Power bus message definitions */
@@ -601,11 +573,6 @@ struct twl4030_gpio_platform_data {
*/
u32 pullups;
u32 pulldowns;
-
- int (*setup)(struct device *dev,
- unsigned gpio, unsigned ngpio);
- int (*teardown)(struct device *dev,
- unsigned gpio, unsigned ngpio);
};
struct twl4030_madc_platform_data {
@@ -704,61 +671,6 @@ struct twl4030_audio_data {
unsigned int irq_base;
};
-struct twl4030_platform_data {
- struct twl4030_clock_init_data *clock;
- struct twl4030_bci_platform_data *bci;
- struct twl4030_gpio_platform_data *gpio;
- struct twl4030_madc_platform_data *madc;
- struct twl4030_keypad_data *keypad;
- struct twl4030_usb_data *usb;
- struct twl4030_power_data *power;
- struct twl4030_audio_data *audio;
-
- /* Common LDO regulators for TWL4030/TWL6030 */
- struct regulator_init_data *vdac;
- struct regulator_init_data *vaux1;
- struct regulator_init_data *vaux2;
- struct regulator_init_data *vaux3;
- struct regulator_init_data *vdd1;
- struct regulator_init_data *vdd2;
- struct regulator_init_data *vdd3;
- /* TWL4030 LDO regulators */
- struct regulator_init_data *vpll1;
- struct regulator_init_data *vpll2;
- struct regulator_init_data *vmmc1;
- struct regulator_init_data *vmmc2;
- struct regulator_init_data *vsim;
- struct regulator_init_data *vaux4;
- struct regulator_init_data *vio;
- struct regulator_init_data *vintana1;
- struct regulator_init_data *vintana2;
- struct regulator_init_data *vintdig;
- /* TWL6030 LDO regulators */
- struct regulator_init_data *vmmc;
- struct regulator_init_data *vpp;
- struct regulator_init_data *vusim;
- struct regulator_init_data *vana;
- struct regulator_init_data *vcxio;
- struct regulator_init_data *vusb;
- struct regulator_init_data *clk32kg;
- struct regulator_init_data *v1v8;
- struct regulator_init_data *v2v1;
- /* TWL6032 LDO regulators */
- struct regulator_init_data *ldo1;
- struct regulator_init_data *ldo2;
- struct regulator_init_data *ldo3;
- struct regulator_init_data *ldo4;
- struct regulator_init_data *ldo5;
- struct regulator_init_data *ldo6;
- struct regulator_init_data *ldo7;
- struct regulator_init_data *ldoln;
- struct regulator_init_data *ldousb;
- /* TWL6032 DCDC regulators */
- struct regulator_init_data *smps3;
- struct regulator_init_data *smps4;
- struct regulator_init_data *vio6025;
-};
-
struct twl_regulator_driver_data {
int (*set_voltage)(void *data, int target_uV);
int (*get_voltage)(void *data);
@@ -791,8 +703,6 @@ int twl4030_sih_setup(struct device *dev, int module, int irq_base);
#define TWL4030_VAUX3_DEV_GRP 0x1F
#define TWL4030_VAUX3_DEDICATED 0x22
-static inline int twl4030charger_usb_en(int enable) { return 0; }
-
/*----------------------------------------------------------------------*/
/* Linux-specific regulator identifiers ... for now, we only support
diff --git a/include/linux/mfd/twl4030-audio.h b/include/linux/mfd/twl4030-audio.h
index 3d22b72df076..1c28605dfda8 100644
--- a/include/linux/mfd/twl4030-audio.h
+++ b/include/linux/mfd/twl4030-audio.h
@@ -1,24 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* MFD driver for twl4030 audio submodule
*
* Author: Peter Ujfalusi <peter.ujfalusi@ti.com>
*
* Copyright: (C) 2009 Nokia Corporation
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
- * 02110-1301 USA
- *
*/
#ifndef __TWL4030_CODEC_H__
diff --git a/include/linux/mfd/twl6040.h b/include/linux/mfd/twl6040.h
index a2e88761c09f..286a724e379a 100644
--- a/include/linux/mfd/twl6040.h
+++ b/include/linux/mfd/twl6040.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* MFD driver for twl6040
*
@@ -5,21 +6,6 @@
* Misael Lopez Cruz <misael.lopez@ti.com>
*
* Copyright: (C) 2011 Texas Instruments, Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
- * 02110-1301 USA
- *
*/
#ifndef __TWL6040_CODEC_H__
@@ -188,35 +174,7 @@
#define TWL6040_GPO_MAX 3
-/* TODO: All platform data struct can be removed */
-struct twl6040_codec_data {
- u16 hs_left_step;
- u16 hs_right_step;
- u16 hf_left_step;
- u16 hf_right_step;
-};
-
-struct twl6040_vibra_data {
- unsigned int vibldrv_res; /* left driver resistance */
- unsigned int vibrdrv_res; /* right driver resistance */
- unsigned int viblmotor_res; /* left motor resistance */
- unsigned int vibrmotor_res; /* right motor resistance */
- int vddvibl_uV; /* VDDVIBL volt, set 0 for fixed reg */
- int vddvibr_uV; /* VDDVIBR volt, set 0 for fixed reg */
-};
-
-struct twl6040_gpo_data {
- int gpio_base;
-};
-
-struct twl6040_platform_data {
- int audpwron_gpio; /* audio power-on gpio */
-
- struct twl6040_codec_data *codec;
- struct twl6040_vibra_data *vibra;
- struct twl6040_gpo_data *gpo;
-};
-
+struct gpio_desc;
struct regmap;
struct regmap_irq_chips_data;
@@ -232,7 +190,7 @@ struct twl6040 {
struct mfd_cell cells[TWL6040_CELLS];
struct completion ready;
- int audpwron;
+ struct gpio_desc *audpwron;
int power_count;
int rev;
diff --git a/include/linux/mfd/ucb1x00.h b/include/linux/mfd/ucb1x00.h
index 88f90cbf8e6a..ede237384723 100644
--- a/include/linux/mfd/ucb1x00.h
+++ b/include/linux/mfd/ucb1x00.h
@@ -1,11 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* linux/include/mfd/ucb1x00.h
*
* Copyright (C) 2001 Russell King, All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License.
*/
#ifndef UCB1200_H
#define UCB1200_H
@@ -13,6 +10,7 @@
#include <linux/device.h>
#include <linux/mfd/mcp.h>
#include <linux/gpio.h>
+#include <linux/gpio/driver.h>
#include <linux/mutex.h>
#define UCB_IO_DATA 0x00
diff --git a/include/linux/mfd/upboard-fpga.h b/include/linux/mfd/upboard-fpga.h
new file mode 100644
index 000000000000..12231e40f5da
--- /dev/null
+++ b/include/linux/mfd/upboard-fpga.h
@@ -0,0 +1,55 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * UP Board CPLD/FPGA driver
+ *
+ * Copyright (c) AAEON. All rights reserved.
+ * Copyright (C) 2024 Bootlin
+ *
+ * Author: Gary Wang <garywang@aaeon.com.tw>
+ * Author: Thomas Richard <thomas.richard@bootlin.com>
+ *
+ */
+
+#ifndef __LINUX_MFD_UPBOARD_FPGA_H
+#define __LINUX_MFD_UPBOARD_FPGA_H
+
+#define UPBOARD_REGISTER_SIZE 16
+
+enum upboard_fpgareg {
+ UPBOARD_REG_PLATFORM_ID = 0x10,
+ UPBOARD_REG_FIRMWARE_ID = 0x11,
+ UPBOARD_REG_FUNC_EN0 = 0x20,
+ UPBOARD_REG_FUNC_EN1 = 0x21,
+ UPBOARD_REG_GPIO_EN0 = 0x30,
+ UPBOARD_REG_GPIO_EN1 = 0x31,
+ UPBOARD_REG_GPIO_EN2 = 0x32,
+ UPBOARD_REG_GPIO_DIR0 = 0x40,
+ UPBOARD_REG_GPIO_DIR1 = 0x41,
+ UPBOARD_REG_GPIO_DIR2 = 0x42,
+ UPBOARD_REG_MAX,
+};
+
+enum upboard_fpga_type {
+ UPBOARD_UP_FPGA,
+ UPBOARD_UP2_FPGA,
+};
+
+struct upboard_fpga_data {
+ enum upboard_fpga_type type;
+ const struct regmap_config *regmap_config;
+};
+
+struct upboard_fpga {
+ struct device *dev;
+ struct regmap *regmap;
+ struct gpio_desc *enable_gpio;
+ struct gpio_desc *reset_gpio;
+ struct gpio_desc *clear_gpio;
+ struct gpio_desc *strobe_gpio;
+ struct gpio_desc *datain_gpio;
+ struct gpio_desc *dataout_gpio;
+ unsigned int firmware_version;
+ const struct upboard_fpga_data *fpga_data;
+};
+
+#endif /* __LINUX_MFD_UPBOARD_FPGA_H */
diff --git a/include/linux/mfd/viperboard.h b/include/linux/mfd/viperboard.h
index 193452848c04..0557667fe544 100644
--- a/include/linux/mfd/viperboard.h
+++ b/include/linux/mfd/viperboard.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* include/linux/mfd/viperboard.h
*
@@ -6,12 +7,6 @@
* (C) 2012 by Lemonage GmbH
* Author: Lars Poeschel <poeschel@lemonage.de>
* All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
*/
#ifndef __MFD_VIPERBOARD_H__
diff --git a/include/linux/mfd/wcd934x/registers.h b/include/linux/mfd/wcd934x/registers.h
new file mode 100644
index 000000000000..76a943c83c63
--- /dev/null
+++ b/include/linux/mfd/wcd934x/registers.h
@@ -0,0 +1,588 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef _WCD934X_REGISTERS_H
+#define _WCD934X_REGISTERS_H
+
+#define WCD934X_CODEC_RPM_CLK_GATE 0x0002
+#define WCD934X_CODEC_RPM_CLK_GATE_MASK GENMASK(1, 0)
+#define WCD934X_CODEC_RPM_CLK_MCLK_CFG 0x0003
+#define WCD934X_CODEC_RPM_CLK_MCLK_CFG_9P6MHZ BIT(0)
+#define WCD934X_CODEC_RPM_CLK_MCLK_CFG_12P288MHZ BIT(1)
+#define WCD934X_CODEC_RPM_CLK_MCLK_CFG_MCLK_MASK GENMASK(1, 0)
+#define WCD934X_CODEC_RPM_RST_CTL 0x0009
+#define WCD934X_CODEC_RPM_PWR_CDC_DIG_HM_CTL 0x0011
+#define WCD934X_CHIP_TIER_CTRL_CHIP_ID_BYTE0 0x0021
+#define WCD934X_CHIP_TIER_CTRL_CHIP_ID_BYTE2 0x0023
+#define WCD934X_CHIP_TIER_CTRL_EFUSE_CTL 0x0025
+#define WCD934X_EFUSE_SENSE_STATE_MASK GENMASK(4, 1)
+#define WCD934X_EFUSE_SENSE_STATE_DEF 0x10
+#define WCD934X_EFUSE_SENSE_EN_MASK BIT(0)
+#define WCD934X_EFUSE_SENSE_ENABLE BIT(0)
+#define WCD934X_CHIP_TIER_CTRL_EFUSE_VAL_OUT1 0x002a
+#define WCD934X_CHIP_TIER_CTRL_EFUSE_VAL_OUT2 0x002b
+#define WCD934X_CHIP_TIER_CTRL_EFUSE_VAL_OUT14 0x0037
+#define WCD934X_CHIP_TIER_CTRL_EFUSE_VAL_OUT15 0x0038
+#define WCD934X_CHIP_TIER_CTRL_EFUSE_STATUS 0x0039
+#define WCD934X_DATA_HUB_SB_TX10_INP_CFG 0x006b
+#define WCD934X_DATA_HUB_SB_TX11_INP_CFG 0x006c
+#define WCD934X_DATA_HUB_SB_TX13_INP_CFG 0x006e
+#define WCD934X_CPE_FLL_CONFIG_CTL_2 0x0111
+#define WCD934X_CPE_SS_CPARMAD_BUFRDY_INT_PERIOD 0x0213
+#define WCD934X_CPE_SS_SVA_CFG 0x0214
+#define WCD934X_CPE_SS_DMIC0_CTL 0x0218
+#define WCD934X_CPE_SS_DMIC1_CTL 0x0219
+#define WCD934X_DMIC_RATE_MASK GENMASK(3, 1)
+#define WCD934X_CPE_SS_DMIC2_CTL 0x021a
+#define WCD934X_CPE_SS_DMIC_CFG 0x021b
+#define WCD934X_CPE_SS_DMIC_CFG 0x021b
+#define WCD934X_CPE_SS_CPAR_CFG 0x021c
+#define WCD934X_INTR_PIN1_MASK0 0x0409
+#define WCD934X_INTR_PIN1_STATUS0 0x0411
+#define WCD934X_INTR_PIN1_CLEAR0 0x0419
+#define WCD934X_INTR_PIN2_CLEAR3 0x0434
+#define WCD934X_INTR_LEVEL0 0x0461
+/* INTR_REG 0 */
+#define WCD934X_IRQ_SLIMBUS 0
+#define WCD934X_IRQ_MISC 1
+#define WCD934X_IRQ_HPH_PA_OCPL_FAULT 2
+#define WCD934X_IRQ_HPH_PA_OCPR_FAULT 3
+#define WCD934X_IRQ_EAR_PA_OCP_FAULT 4
+#define WCD934X_IRQ_HPH_PA_CNPL_COMPLETE 5
+#define WCD934X_IRQ_HPH_PA_CNPR_COMPLETE 6
+#define WCD934X_IRQ_EAR_PA_CNP_COMPLETE 7
+/* INTR_REG 1 */
+#define WCD934X_IRQ_MBHC_SW_DET 8
+#define WCD934X_IRQ_MBHC_ELECT_INS_REM_DET 9
+#define WCD934X_IRQ_MBHC_BUTTON_PRESS_DET 10
+#define WCD934X_IRQ_MBHC_BUTTON_RELEASE_DET 11
+#define WCD934X_IRQ_MBHC_ELECT_INS_REM_LEG_DET 12
+#define WCD934X_IRQ_RESERVED_0 13
+#define WCD934X_IRQ_RESERVED_1 14
+#define WCD934X_IRQ_RESERVED_2 15
+/* INTR_REG 2 */
+#define WCD934X_IRQ_LINE_PA1_CNP_COMPLETE 16
+#define WCD934X_IRQ_LINE_PA2_CNP_COMPLETE 17
+#define WCD934X_IRQ_SLNQ_ANALOG_ERROR 18
+#define WCD934X_IRQ_RESERVED_3 19
+#define WCD934X_IRQ_SOUNDWIRE 20
+#define WCD934X_IRQ_VDD_DIG_RAMP_COMPLETE 21
+#define WCD934X_IRQ_RCO_ERROR 22
+#define WCD934X_IRQ_CPE_ERROR 23
+/* INTR_REG 3 */
+#define WCD934X_IRQ_MAD_AUDIO 24
+#define WCD934X_IRQ_MAD_BEACON 25
+#define WCD934X_IRQ_MAD_ULTRASOUND 26
+#define WCD934X_IRQ_VBAT_ATTACK 27
+#define WCD934X_IRQ_VBAT_RESTORE 28
+#define WCD934X_IRQ_CPE1_INTR 29
+#define WCD934X_IRQ_RESERVED_4 30
+#define WCD934X_IRQ_SLNQ_DIGITAL 31
+#define WCD934X_NUM_IRQS 32
+#define WCD934X_ANA_BIAS 0x0601
+#define WCD934X_ANA_BIAS_EN_MASK BIT(7)
+#define WCD934X_ANA_BIAS_EN BIT(7)
+#define WCD934X_ANA_PRECHRG_EN_MASK BIT(6)
+#define WCD934X_ANA_PRECHRG_EN BIT(6)
+#define WCD934X_ANA_PRECHRG_MODE_MASK BIT(5)
+#define WCD934X_ANA_PRECHRG_MODE_AUTO BIT(5)
+#define WCD934X_ANA_RCO 0x0603
+#define WCD934X_ANA_RCO_BG_EN_MASK BIT(7)
+#define WCD934X_ANA_RCO_BG_ENABLE BIT(7)
+#define WCD934X_ANA_BUCK_CTL 0x0606
+#define WCD934X_ANA_BUCK_HI_ACCU_PRE_ENX_MASK GENMASK(1, 0)
+#define WCD934X_ANA_BUCK_PRE_EN2_MASK BIT(0)
+#define WCD934X_ANA_BUCK_PRE_EN2_ENABLE BIT(0)
+#define WCD934X_ANA_BUCK_PRE_EN1_MASK BIT(1)
+#define WCD934X_ANA_BUCK_PRE_EN1_ENABLE BIT(1)
+#define WCD934X_ANA_BUCK_HI_ACCU_EN_MASK BIT(2)
+#define WCD934X_ANA_BUCK_HI_ACCU_ENABLE BIT(2)
+#define WCD934X_ANA_RX_SUPPLIES 0x0608
+#define WCD934X_ANA_HPH 0x0609
+#define WCD934X_ANA_EAR 0x060a
+#define WCD934X_ANA_LO_1_2 0x060b
+#define WCD934X_ANA_AMIC1 0x060e
+#define WCD934X_ANA_AMIC2 0x060f
+#define WCD934X_ANA_AMIC3 0x0610
+#define WCD934X_ANA_AMIC4 0x0611
+#define WCD934X_ANA_MBHC_MECH 0x0614
+#define WCD934X_MBHC_L_DET_EN_MASK BIT(7)
+#define WCD934X_MBHC_L_DET_EN BIT(7)
+#define WCD934X_MBHC_GND_DET_EN_MASK BIT(6)
+#define WCD934X_MBHC_MECH_DETECT_TYPE_MASK BIT(5)
+#define WCD934X_MBHC_MECH_DETECT_TYPE_INS 1
+#define WCD934X_MBHC_HPHL_PLUG_TYPE_MASK BIT(4)
+#define WCD934X_MBHC_HPHL_PLUG_TYPE_NO 1
+#define WCD934X_MBHC_GND_PLUG_TYPE_MASK BIT(3)
+#define WCD934X_MBHC_GND_PLUG_TYPE_NO 1
+#define WCD934X_MBHC_HSL_PULLUP_COMP_EN BIT(2)
+#define WCD934X_MBHC_HSG_PULLUP_COMP_EN BIT(1)
+#define WCD934X_MBHC_HPHL_100K_TO_GND_EN BIT(0)
+#define WCD934X_ANA_MBHC_ELECT 0x0615
+#define WCD934X_ANA_MBHC_BIAS_EN_MASK BIT(0)
+#define WCD934X_ANA_MBHC_BIAS_EN BIT(0)
+#define WCD934X_ANA_MBHC_ZDET 0x0616
+#define WCD934X_ANA_MBHC_RESULT_1 0x0617
+#define WCD934X_ANA_MBHC_RESULT_2 0x0618
+#define WCD934X_ANA_MBHC_RESULT_3 0x0619
+#define WCD934X_ANA_MBHC_BTN0 0x061a
+#define WCD934X_VTH_MASK GENMASK(7, 2)
+#define WCD934X_ANA_MBHC_BTN1 0x061b
+#define WCD934X_ANA_MBHC_BTN2 0x061c
+#define WCD934X_ANA_MBHC_BTN3 0x061d
+#define WCD934X_ANA_MBHC_BTN4 0x061e
+#define WCD934X_ANA_MBHC_BTN5 0x061f
+#define WCD934X_ANA_MBHC_BTN6 0x0620
+#define WCD934X_ANA_MBHC_BTN7 0x0621
+#define WCD934X_MBHC_BTN_VTH_MASK GENMASK(7, 2)
+#define WCD934X_ANA_MICB1 0x0622
+#define WCD934X_MICB_VAL_MASK GENMASK(5, 0)
+#define WCD934X_ANA_MICB_EN_MASK GENMASK(7, 6)
+#define WCD934X_MICB_DISABLE 0
+#define WCD934X_MICB_ENABLE 1
+#define WCD934X_MICB_PULL_UP 2
+#define WCD934X_MICB_PULL_DOWN 3
+#define WCD934X_ANA_MICB_PULL_UP 0x80
+#define WCD934X_ANA_MICB_ENABLE 0x40
+#define WCD934X_ANA_MICB_DISABLE 0x0
+#define WCD934X_ANA_MICB2 0x0623
+#define WCD934X_ANA_MICB2_ENABLE BIT(6)
+#define WCD934X_ANA_MICB2_ENABLE_MASK GENMASK(7, 6)
+#define WCD934X_ANA_MICB2_VOUT_MASK GENMASK(5, 0)
+#define WCD934X_ANA_MICB2_RAMP 0x0624
+#define WCD934X_RAMP_EN_MASK BIT(7)
+#define WCD934X_RAMP_SHIFT_CTRL_MASK GENMASK(4, 2)
+#define WCD934X_ANA_MICB3 0x0625
+#define WCD934X_ANA_MICB4 0x0626
+#define WCD934X_BIAS_VBG_FINE_ADJ 0x0629
+#define WCD934X_MBHC_CTL_CLK 0x0656
+#define WCD934X_MBHC_CTL_BCS 0x065a
+#define WCD934X_MBHC_STATUS_SPARE_1 0x065b
+#define WCD934X_MICB1_TEST_CTL_1 0x066b
+#define WCD934X_MICB1_TEST_CTL_2 0x066c
+#define WCD934X_MICB2_TEST_CTL_1 0x066e
+#define WCD934X_MICB3_TEST_CTL_1 0x0671
+#define WCD934X_MICB4_TEST_CTL_1 0x0674
+#define WCD934X_CLASSH_MODE_1 0x0697
+#define WCD934X_CLASSH_MODE_2 0x0698
+#define WCD934X_CLASSH_MODE_3 0x0699
+#define WCD934X_CLASSH_CTRL_VCL_1 0x069a
+#define WCD934X_CLASSH_CTRL_VCL_2 0x069b
+#define WCD934X_CLASSH_CTRL_CCL_1 0x069c
+#define WCD934X_CLASSH_CTRL_CCL_2 0x069d
+#define WCD934X_CLASSH_CTRL_CCL_3 0x069e
+#define WCD934X_CLASSH_CTRL_CCL_4 0x069f
+#define WCD934X_CLASSH_CTRL_CCL_5 0x06a0
+#define WCD934X_CLASSH_BUCK_TMUX_A_D 0x06a1
+#define WCD934X_CLASSH_BUCK_SW_DRV_CNTL 0x06a2
+#define WCD934X_RX_OCP_CTL 0x06b6
+#define WCD934X_RX_OCP_COUNT 0x06b7
+#define WCD934X_HPH_CNP_EN 0x06cb
+#define WCD934X_HPH_CNP_WG_CTL 0x06cc
+#define WCD934X_HPH_GM3_BOOST_EN_MASK BIT(7)
+#define WCD934X_HPH_GM3_BOOST_ENABLE BIT(7)
+#define WCD934X_HPH_CNP_WG_TIME 0x06cd
+#define WCD934X_HPH_OCP_CTL 0x06ce
+#define WCD934X_HPH_PA_CTL2 0x06d2
+#define WCD934X_HPHPA_GND_R_MASK BIT(6)
+#define WCD934X_HPHPA_GND_L_MASK BIT(4)
+#define WCD934X_HPH_L_EN 0x06d3
+#define WCD934X_HPH_GAIN_SRC_SEL_MASK BIT(5)
+#define WCD934X_HPH_GAIN_SRC_SEL_COMPANDER 0
+#define WCD934X_HPH_GAIN_SRC_SEL_REGISTER BIT(5)
+#define WCD934X_HPH_L_TEST 0x06d4
+#define WCD934X_HPH_R_EN 0x06d6
+#define WCD934X_HPH_R_TEST 0x06d7
+#define WCD934X_HPH_OCP_DET_MASK BIT(0)
+#define WCD934X_HPH_OCP_DET_ENABLE BIT(0)
+#define WCD934X_HPH_OCP_DET_DISABLE 0
+#define WCD934X_HPH_R_ATEST 0x06d8
+#define WCD934X_HPHPA_GND_OVR_MASK BIT(1)
+#define WCD934X_DIFF_LO_LO2_COMPANDER 0x06ea
+#define WCD934X_DIFF_LO_LO1_COMPANDER 0x06eb
+#define WCD934X_CLK_SYS_MCLK_PRG 0x0711
+#define WCD934X_EXT_CLK_BUF_EN_MASK BIT(7)
+#define WCD934X_EXT_CLK_BUF_EN BIT(7)
+#define WCD934X_EXT_CLK_DIV_RATIO_MASK GENMASK(5, 4)
+#define WCD934X_EXT_CLK_DIV_BY_2 0x10
+#define WCD934X_MCLK_SRC_MASK BIT(1)
+#define WCD934X_MCLK_SRC_EXT_CLK 0
+#define WCD934X_MCLK_SRC_MASK BIT(1)
+#define WCD934X_MCLK_EN_MASK BIT(0)
+#define WCD934X_MCLK_EN BIT(0)
+#define WCD934X_CLK_SYS_MCLK2_PRG1 0x0712
+#define WCD934X_CLK_SYS_MCLK2_PRG2 0x0713
+#define WCD934X_SIDO_NEW_VOUT_A_STARTUP 0x071b
+#define WCD934X_SIDO_NEW_VOUT_D_STARTUP 0x071c
+#define WCD934X_SIDO_NEW_VOUT_D_FREQ1 0x071d
+#define WCD934X_SIDO_NEW_VOUT_D_FREQ2 0x071e
+#define WCD934X_SIDO_RIPPLE_FREQ_EN_MASK BIT(0)
+#define WCD934X_SIDO_RIPPLE_FREQ_ENABLE BIT(0)
+#define WCD934X_MBHC_NEW_CTL_1 0x0720
+#define WCD934X_MBHC_CTL_RCO_EN_MASK BIT(7)
+#define WCD935X_MBHC_CTL_RCO_EN BIT(7)
+#define WCD934X_MBHC_NEW_CTL_2 0x0721
+#define WCD934X_M_RTH_CTL_MASK GENMASK(3, 2)
+#define WCD934X_MBHC_NEW_PLUG_DETECT_CTL 0x0722
+#define WCD934X_HSDET_PULLUP_C_MASK GENMASK(7, 6)
+#define WCD934X_MBHC_NEW_ZDET_ANA_CTL 0x0723
+#define WCD934X_ZDET_RANGE_CTL_MASK GENMASK(3, 0)
+#define WCD934X_ZDET_MAXV_CTL_MASK GENMASK(6, 4)
+#define WCD934X_MBHC_NEW_ZDET_RAMP_CTL 0x0724
+#define WCD934X_MBHC_NEW_FSM_STATUS 0x0725
+#define WCD934X_MBHC_NEW_ADC_RESULT 0x0726
+#define WCD934X_TX_NEW_AMIC_4_5_SEL 0x0727
+#define WCD934X_HPH_NEW_INT_RDAC_HD2_CTL_L 0x0733
+#define WCD934X_HPH_NEW_INT_RDAC_OVERRIDE_CTL 0x0735
+#define WCD934X_HPH_NEW_INT_RDAC_HD2_CTL_R 0x0736
+#define WCD934X_HPH_NEW_INT_HPH_TIMER1 0x073a
+#define WCD934X_HPH_AUTOCHOP_TIMER_EN_MASK BIT(1)
+#define WCD934X_HPH_AUTOCHOP_TIMER_ENABLE BIT(1)
+#define WCD934X_CDC_TX0_TX_PATH_CTL 0x0a31
+#define WCD934X_CDC_TX_PATH_CTL_PCM_RATE_MASK GENMASK(3, 0)
+#define WCD934X_CDC_TX_PATH_CTL(dec) (0xa31 + dec * 0x10)
+#define WCD934X_CDC_TX0_TX_PATH_CFG0 0x0a32
+#define WCD934X_CDC_TX0_TX_PATH_CFG1 0x0a33
+#define WCD934X_CDC_TX0_TX_VOL_CTL 0x0a34
+#define WCD934X_CDC_TX0_TX_PATH_192_CTL 0x0a35
+#define WCD934X_CDC_TX0_TX_PATH_192_CFG 0x0a36
+#define WCD934X_CDC_TX0_TX_PATH_SEC2 0x0a39
+#define WCD934X_HPH_CUTOFF_FREQ_CHANGE_REQ_MASK BIT(1)
+#define WCD934X_HPH_CUTOFF_FREQ_CHANGE_REQ BIT(1)
+#define WCD934X_CDC_TX1_TX_PATH_CTL 0x0a41
+#define WCD934X_CDC_TX1_TX_PATH_CFG0 0x0a42
+#define WCD934X_CDC_TX1_TX_PATH_CFG1 0x0a43
+#define WCD934X_CDC_TX1_TX_VOL_CTL 0x0a44
+#define WCD934X_CDC_TX2_TX_PATH_CTL 0x0a51
+#define WCD934X_CDC_TX2_TX_PATH_CFG0 0x0a52
+#define WCD934X_CDC_TX2_TX_PATH_CFG1 0x0a53
+#define WCD934X_CDC_TX2_TX_VOL_CTL 0x0a54
+#define WCD934X_CDC_TX3_TX_PATH_CTL 0x0a61
+#define WCD934X_CDC_TX3_TX_PATH_CFG0 0x0a62
+#define WCD934X_CDC_TX3_TX_PATH_CFG1 0x0a63
+#define WCD934X_CDC_TX3_TX_VOL_CTL 0x0a64
+#define WCD934X_CDC_TX3_TX_PATH_192_CTL 0x0a65
+#define WCD934X_CDC_TX3_TX_PATH_192_CFG 0x0a66
+#define WCD934X_CDC_TX4_TX_PATH_CTL 0x0a71
+#define WCD934X_CDC_TX4_TX_PATH_CFG0 0x0a72
+#define WCD934X_CDC_TX4_TX_PATH_CFG1 0x0a73
+#define WCD934X_CDC_TX4_TX_VOL_CTL 0x0a74
+#define WCD934X_CDC_TX4_TX_PATH_192_CTL 0x0a75
+#define WCD934X_CDC_TX4_TX_PATH_192_CFG 0x0a76
+#define WCD934X_CDC_TX5_TX_PATH_CTL 0x0a81
+#define WCD934X_CDC_TX5_TX_PATH_CFG0 0x0a82
+#define WCD934X_CDC_TX5_TX_PATH_CFG1 0x0a83
+#define WCD934X_CDC_TX5_TX_VOL_CTL 0x0a84
+#define WCD934X_CDC_TX5_TX_PATH_192_CTL 0x0a85
+#define WCD934X_CDC_TX5_TX_PATH_192_CFG 0x0a86
+#define WCD934X_CDC_TX6_TX_PATH_CTL 0x0a91
+#define WCD934X_CDC_TX6_TX_PATH_CFG0 0x0a92
+#define WCD934X_CDC_TX6_TX_PATH_CFG1 0x0a93
+#define WCD934X_CDC_TX6_TX_VOL_CTL 0x0a94
+#define WCD934X_CDC_TX6_TX_PATH_192_CTL 0x0a95
+#define WCD934X_CDC_TX6_TX_PATH_192_CFG 0x0a96
+#define WCD934X_CDC_TX7_TX_PATH_CTL 0x0aa1
+#define WCD934X_CDC_TX7_TX_PATH_CFG0 0x0aa2
+#define WCD934X_CDC_TX7_TX_PATH_CFG1 0x0aa3
+#define WCD934X_CDC_TX7_TX_VOL_CTL 0x0aa4
+#define WCD934X_CDC_TX7_TX_PATH_192_CTL 0x0aa5
+#define WCD934X_CDC_TX7_TX_PATH_192_CFG 0x0aa6
+#define WCD934X_CDC_TX8_TX_PATH_CTL 0x0ab1
+#define WCD934X_CDC_TX8_TX_PATH_CFG0 0x0ab2
+#define WCD934X_CDC_TX8_TX_PATH_CFG1 0x0ab3
+#define WCD934X_CDC_TX8_TX_VOL_CTL 0x0ab4
+#define WCD934X_CDC_TX8_TX_PATH_192_CTL 0x0ab5
+#define WCD934X_CDC_TX8_TX_PATH_192_CFG 0x0ab6
+#define WCD934X_CDC_TX9_SPKR_PROT_PATH_CFG0 0x0ac3
+#define WCD934X_CDC_TX10_SPKR_PROT_PATH_CFG0 0x0ac7
+#define WCD934X_CDC_TX11_SPKR_PROT_PATH_CFG0 0x0acb
+#define WCD934X_CDC_TX12_SPKR_PROT_PATH_CFG0 0x0acf
+#define WCD934X_CDC_COMPANDER1_CTL0 0x0b01
+#define WCD934X_COMP_CLK_EN_MASK BIT(0)
+#define WCD934X_COMP_CLK_ENABLE BIT(0)
+#define WCD934X_COMP_SOFT_RST_MASK BIT(1)
+#define WCD934X_COMP_SOFT_RST_ENABLE BIT(1)
+#define WCD934X_COMP_HALT_MASK BIT(2)
+#define WCD934X_COMP_HALT BIT(2)
+#define WCD934X_COMP_SOFT_RST_DISABLE 0
+#define WCD934X_CDC_COMPANDER1_CTL7 0x0b08
+#define WCD934X_HPH_LOW_PWR_MODE_EN_MASK BIT(5)
+#define WCD934X_CDC_COMPANDER2_CTL7 0x0b10
+#define WCD934X_CDC_COMPANDER7_CTL3 0x0b34
+#define WCD934X_CDC_COMPANDER7_CTL7 0x0b38
+#define WCD934X_CDC_COMPANDER8_CTL3 0x0b3c
+#define WCD934X_CDC_COMPANDER8_CTL7 0x0b40
+#define WCD934X_CDC_RX0_RX_PATH_CTL 0x0b41
+#define WCD934X_CDC_RX_PGA_MUTE_EN_MASK BIT(4)
+#define WCD934X_CDC_RX_PGA_MUTE_ENABLE BIT(4)
+#define WCD934X_CDC_RX_PGA_MUTE_DISABLE 0
+#define WCD934X_RX_CLK_EN_MASK BIT(5)
+#define WCD934X_RX_CLK_ENABLE BIT(5)
+#define WCD934X_RX_RESET_MASK BIT(6)
+#define WCD934X_RX_RESET_ENABLE BIT(6)
+#define WCD934X_RX_RESET_DISABLE 0
+#define WCD934X_RX_PCM_RATE_MASK GENMASK(3, 0)
+#define WCD934X_RX_PCM_RATE_F_48K 0x04
+#define WCD934X_CDC_RX_PATH_CTL(rx) (0xb41 + rx * 0x14)
+#define WCD934X_CDC_MIX_PCM_RATE_MASK GENMASK(3, 0)
+#define WCD934X_CDC_RX0_RX_PATH_CFG0 0x0b42
+#define WCD934X_RX_DLY_ZN_EN_MASK BIT(3)
+#define WCD934X_RX_DLY_ZN_ENABLE BIT(3)
+#define WCD934X_RX_DLY_ZN_DISABLE 0
+#define WCD934X_CDC_RX0_RX_PATH_CFG1 0x0b43
+#define WCD934X_CDC_RX0_RX_PATH_CFG2 0x0b44
+#define WCD934X_CDC_RX0_RX_VOL_CTL 0x0b45
+#define WCD934X_CDC_RX0_RX_PATH_MIX_CTL 0x0b46
+#define WCD934X_CDC_RX_MIX_CLK_EN_MASK BIT(5)
+#define WCD934X_CDC_RX_MIX_CLK_ENABLE BIT(5)
+#define WCD934X_CDC_RX_PATH_MIX_CTL(rx) (0xb46 + rx * 0x14)
+#define WCD934X_CDC_RX0_RX_PATH_MIX_CFG 0x0b47
+#define WCD934X_CDC_RX0_RX_VOL_MIX_CTL 0x0b48
+#define WCD934X_CDC_RX0_RX_PATH_SEC0 0x0b49
+#define WCD934X_CDC_RX0_RX_PATH_DSMDEM_CTL 0x0b53
+#define WCD934X_CDC_RX1_RX_PATH_CTL 0x0b55
+#define WCD934X_RX_PATH_PGA_MUTE_EN_MASK BIT(4)
+#define WCD934X_RX_PATH_PGA_MUTE_ENABLE BIT(4)
+#define WCD934X_CDC_RX_PATH_PGA_MUTE_DISABLE 0
+#define WCD934X_CDC_RX_PATH_CLK_EN_MASK BIT(5)
+#define WCD934X_CDC_RX_PATH_CLK_ENABLE BIT(5)
+#define WCD934X_CDC_RX_PATH_CLK_DISABLE 0
+#define WCD934X_CDC_RX1_RX_PATH_CFG0 0x0b56
+#define WCD934X_HPH_CMP_EN_MASK BIT(1)
+#define WCD934X_HPH_CMP_ENABLE BIT(1)
+#define WCD934X_HPH_CMP_DISABLE 0
+#define WCD934X_CDC_RX1_RX_PATH_CFG2 0x0b58
+#define WCD934X_CDC_RX1_RX_VOL_CTL 0x0b59
+#define WCD934X_CDC_RX1_RX_PATH_MIX_CTL 0x0b5a
+#define WCD934X_CDC_RX1_RX_PATH_MIX_CFG 0x0b5b
+#define WCD934X_CDC_RX1_RX_VOL_MIX_CTL 0x0b5c
+#define WCD934X_CDC_RX1_RX_PATH_SEC0 0x0b5d
+#define WCD934X_CDC_RX1_RX_PATH_SEC3 0x0b60
+#define WCD934X_CDC_RX_PATH_SEC_HD2_ALPHA_MASK GENMASK(5, 2)
+#define WCD934X_CDC_RX_PATH_SEC_HD2_ALPHA_0P3125 0x14
+#define WCD934X_CDC_RX_PATH_SEC_HD2_ALPHA_0P0000 0
+#define WCD934X_CDC_RX1_RX_PATH_DSMDEM_CTL 0x0b67
+#define WCD934X_CDC_RX2_RX_PATH_CTL 0x0b69
+#define WCD934X_CDC_RX2_RX_PATH_CFG0 0x0b6a
+#define WCD934X_CDC_RX_PATH_CFG_HD2_EN_MASK BIT(2)
+#define WCD934X_CDC_RX_PATH_CFG_HD2_ENABLE BIT(2)
+#define WCD934X_CDC_RX_PATH_CFG_HD2_DISABLE 0
+#define WCD934X_CDC_RX2_RX_PATH_CFG2 0x0b6c
+#define WCD934X_CDC_RX2_RX_VOL_CTL 0x0b6d
+#define WCD934X_CDC_RX2_RX_PATH_MIX_CTL 0x0b6e
+#define WCD934X_CDC_RX2_RX_PATH_MIX_CFG 0x0b6f
+#define WCD934X_CDC_RX2_RX_VOL_MIX_CTL 0x0b70
+#define WCD934X_CDC_RX2_RX_PATH_SEC0 0x0b71
+#define WCD934X_CDC_RX2_RX_PATH_SEC3 0x0b74
+#define WCD934X_CDC_RX2_RX_PATH_DSMDEM_CTL 0x0b7b
+#define WCD934X_CDC_RX3_RX_PATH_CTL 0x0b7d
+#define WCD934X_CDC_RX3_RX_PATH_CFG0 0x0b6e
+#define WCD934X_CDC_RX3_RX_PATH_CFG2 0x0b80
+#define WCD934X_CDC_RX3_RX_VOL_CTL 0x0b81
+#define WCD934X_CDC_RX3_RX_PATH_MIX_CTL 0x0b82
+#define WCD934X_CDC_RX3_RX_PATH_MIX_CFG 0x0b83
+#define WCD934X_CDC_RX3_RX_VOL_MIX_CTL 0x0b84
+#define WCD934X_CDC_RX3_RX_PATH_SEC0 0x0b85
+#define WCD934X_CDC_RX3_RX_PATH_DSMDEM_CTL 0x0b8f
+#define WCD934X_CDC_RX4_RX_PATH_CTL 0x0b91
+#define WCD934X_CDC_RX4_RX_PATH_CFG0 0x0b92
+#define WCD934X_CDC_RX4_RX_PATH_CFG2 0x0b94
+#define WCD934X_CDC_RX4_RX_VOL_CTL 0x0b95
+#define WCD934X_CDC_RX4_RX_PATH_MIX_CTL 0x0b96
+#define WCD934X_CDC_RX4_RX_PATH_MIX_CFG 0x0b97
+#define WCD934X_CDC_RX4_RX_VOL_MIX_CTL 0x0b98
+#define WCD934X_CDC_RX4_RX_PATH_SEC0 0x0b99
+#define WCD934X_CDC_RX4_RX_PATH_DSMDEM_CTL 0x0ba3
+#define WCD934X_CDC_RX7_RX_PATH_CTL 0x0bcd
+#define WCD934X_CDC_RX7_RX_PATH_CFG0 0x0bce
+#define WCD934X_CDC_RX7_RX_PATH_CFG1 0x0bcf
+#define WCD934X_CDC_RX7_RX_PATH_CFG2 0x0bd0
+#define WCD934X_CDC_RX7_RX_VOL_CTL 0x0bd1
+#define WCD934X_CDC_RX7_RX_PATH_MIX_CTL 0x0bd2
+#define WCD934X_CDC_RX7_RX_PATH_MIX_CFG 0x0bd3
+#define WCD934X_CDC_RX7_RX_VOL_MIX_CTL 0x0bd4
+#define WCD934X_CDC_RX7_RX_PATH_SEC1 0x0bd6
+#define WCD934X_CDC_RX7_RX_PATH_MIX_SEC0 0x0bdd
+#define WCD934X_CDC_RX7_RX_PATH_DSMDEM_CTL 0x0bdf
+#define WCD934X_CDC_RX8_RX_PATH_CTL 0x0be1
+#define WCD934X_CDC_RX8_RX_PATH_CFG0 0x0be2
+#define WCD934X_CDC_RX8_RX_PATH_CFG1 0x0be3
+#define WCD934X_RX_SMART_BOOST_EN_MASK BIT(0)
+#define WCD934X_RX_SMART_BOOST_ENABLE BIT(0)
+#define WCD934X_RX_SMART_BOOST_DISABLE 0
+#define WCD934X_CDC_RX8_RX_PATH_CFG2 0x0be4
+#define WCD934X_CDC_RX8_RX_VOL_CTL 0x0be5
+#define WCD934X_CDC_RX8_RX_PATH_MIX_CTL 0x0be6
+#define WCD934X_CDC_RX8_RX_PATH_MIX_CFG 0x0be7
+#define WCD934X_CDC_RX8_RX_VOL_MIX_CTL 0x0be8
+#define WCD934X_CDC_RX8_RX_PATH_SEC1 0x0bea
+#define WCD934X_CDC_RX8_RX_PATH_MIX_SEC0 0x0bf1
+#define WCD934X_CDC_RX8_RX_PATH_DSMDEM_CTL 0x0bf3
+#define WCD934X_CDC_CLSH_DECAY_CTRL 0x0c03
+#define WCD934X_CDC_CLSH_K2_MSB 0x0c0a
+#define WCD934X_CDC_CLSH_K2_LSB 0x0c0b
+#define WCD934X_CDC_CLSH_TEST0 0x0c0f
+#define WCD934X_CDC_BOOST0_BOOST_PATH_CTL 0x0c19
+#define WCD934X_BOOST_PATH_CLK_EN_MASK BIT(4)
+#define WCD934X_BOOST_PATH_CLK_ENABLE BIT(4)
+#define WCD934X_BOOST_PATH_CLK_DISABLE 0
+#define WCD934X_CDC_BOOST0_BOOST_CTL 0x0c1a
+#define WCD934X_CDC_BOOST0_BOOST_CFG1 0x0c1b
+#define WCD934X_CDC_BOOST0_BOOST_CFG2 0x0c1c
+#define WCD934X_CDC_BOOST1_BOOST_PATH_CTL 0x0c21
+#define WCD934X_CDC_BOOST1_BOOST_CTL 0x0c22
+#define WCD934X_CDC_BOOST1_BOOST_CFG1 0x0c23
+#define WCD934X_CDC_BOOST1_BOOST_CFG2 0x0c24
+#define WCD934X_SWR_AHB_BRIDGE_RD_DATA_0 0x0c91
+#define WCD934X_SWR_AHB_BRIDGE_RD_DATA_1 0x0c92
+#define WCD934X_SWR_AHB_BRIDGE_RD_DATA_2 0x0c93
+#define WCD934X_SWR_AHB_BRIDGE_RD_DATA_3 0x0c94
+#define WCD934X_SWR_AHB_BRIDGE_ACCESS_STATUS 0x0c96
+#define WCD934X_CDC_SIDETONE_SRC0_ST_SRC_PATH_CTL 0x0cb5
+#define WCD934X_CDC_SIDETONE_SRC1_ST_SRC_PATH_CTL 0x0cb9
+#define WCD934X_CDC_RX_INP_MUX_RX_INT0_CFG0 0x0d01
+#define WCD934X_CDC_RX_INP_MUX_RX_INT_CFG0(i) (0xd01 + i * 0x2)
+#define WCD934X_CDC_RX_INP_MUX_RX_INT_SEL_MASK GENMASK(3, 0)
+#define WCD934X_CDC_RX_INP_MUX_RX_INT0_CFG1 0x0d02
+#define WCD934X_CDC_RX_INP_MUX_RX_INT_CFG1(i) (0xd02 + i * 0x2)
+#define WCD934X_CDC_RX_INP_MUX_RX_INT1_CFG0 0x0d03
+#define WCD934X_CDC_RX_INP_MUX_RX_INT1_CFG1 0x0d04
+#define WCD934X_CDC_RX_INP_MUX_RX_INT2_CFG0 0x0d05
+#define WCD934X_CDC_RX_INP_MUX_RX_INT2_CFG1 0x0d06
+#define WCD934X_CDC_RX_INP_MUX_RX_INT3_CFG0 0x0d07
+#define WCD934X_CDC_RX_INP_MUX_RX_INT3_CFG1 0x0d08
+#define WCD934X_CDC_RX_INP_MUX_RX_INT4_CFG0 0x0d09
+#define WCD934X_CDC_RX_INP_MUX_RX_INT4_CFG1 0x0d0a
+#define WCD934X_CDC_RX_INP_MUX_RX_INT7_CFG0 0x0d0f
+#define WCD934X_CDC_RX_INP_MUX_RX_INT7_CFG1 0x0d10
+#define WCD934X_CDC_RX_INP_MUX_RX_INT8_CFG0 0x0d11
+#define WCD934X_CDC_RX_INP_MUX_RX_INT8_CFG1 0x0d12
+#define WCD934X_CDC_RX_INP_MUX_RX_MIX_CFG0 0x0d13
+#define WCD934X_CDC_RX_INP_MUX_RX_MIX_CFG1 0x0d14
+#define WCD934X_CDC_RX_INP_MUX_RX_MIX_CFG2 0x0d15
+#define WCD934X_CDC_RX_INP_MUX_RX_MIX_CFG3 0x0d16
+#define WCD934X_CDC_RX_INP_MUX_RX_MIX_CFG4 0x0d17
+#define WCD934X_CDC_RX_INP_MUX_SIDETONE_SRC_CFG0 0x0d18
+#define WCD934X_CDC_RX_INP_MUX_SIDETONE_SRC_CFG1 0x0d19
+#define WCD934X_CDC_TX_INP_MUX_ADC_MUX0_CFG0 0x0d1d
+#define WCD934X_CDC_TX_INP_MUX_ADC_MUX0_CFG1 0x0d1e
+#define WCD934X_CDC_TX_INP_MUX_ADC_MUX1_CFG0 0x0d1f
+#define WCD934X_CDC_TX_INP_MUX_ADC_MUX1_CFG1 0x0d20
+#define WCD934X_CDC_TX_INP_MUX_ADC_MUX2_CFG0 0x0d21
+#define WCD934X_CDC_TX_INP_MUX_ADC_MUX2_CFG1 0x0d22
+#define WCD934X_CDC_TX_INP_MUX_ADC_MUX3_CFG0 0x0d23
+#define WCD934X_CDC_TX_INP_MUX_ADC_MUX3_CFG1 0x0d25
+#define WCD934X_CDC_TX_INP_MUX_ADC_MUX4_CFG0 0x0d26
+#define WCD934X_CDC_TX_INP_MUX_ADC_MUX5_CFG0 0x0d27
+#define WCD934X_CDC_TX_INP_MUX_ADC_MUX6_CFG0 0x0d28
+#define WCD934X_CDC_TX_INP_MUX_ADC_MUX7_CFG0 0x0d29
+#define WCD934X_CDC_TX_INP_MUX_ADC_MUX8_CFG0 0x0d2a
+#define WCD934X_CDC_TX_INP_MUX_ADC_MUX10_CFG0 0x0d2b
+#define WCD934X_CDC_TX_INP_MUX_ADC_MUX11_CFG0 0x0d2c
+#define WCD934X_CDC_TX_INP_MUX_ADC_MUX12_CFG0 0x0d2d
+#define WCD934X_CDC_TX_INP_MUX_ADC_MUX13_CFG0 0x0d2e
+#define WCD934X_CDC_SIDETONE_IIR_INP_MUX_IIR0_MIX_CFG0 0x0d31
+#define WCD934X_CDC_SIDETONE_IIR_INP_MUX_IIR0_MIX_CFG1 0x0d32
+#define WCD934X_CDC_SIDETONE_IIR_INP_MUX_IIR0_MIX_CFG2 0x0d33
+#define WCD934X_CDC_SIDETONE_IIR_INP_MUX_IIR0_MIX_CFG3 0x0d34
+#define WCD934X_CDC_SIDETONE_IIR_INP_MUX_IIR1_MIX_CFG0 0x0d35
+#define WCD934X_CDC_SIDETONE_IIR_INP_MUX_IIR1_MIX_CFG1 0x0d36
+#define WCD934X_CDC_SIDETONE_IIR_INP_MUX_IIR1_MIX_CFG2 0x0d37
+#define WCD934X_CDC_SIDETONE_IIR_INP_MUX_IIR1_MIX_CFG3 0x0d38
+#define WCD934X_CDC_IF_ROUTER_TX_MUX_CFG0 0x0d3a
+#define WCD934X_CDC_IF_ROUTER_TX_MUX_CFG1 0x0d3b
+#define WCD934X_CDC_IF_ROUTER_TX_MUX_CFG2 0x0d3c
+#define WCD934X_CDC_IF_ROUTER_TX_MUX_CFG3 0x0d3d
+#define WCD934X_CDC_CLK_RST_CTRL_MCLK_CONTROL 0x0d41
+#define WCD934X_CDC_MCLK_EN_MASK BIT(0)
+#define WCD934X_CDC_MCLK_EN_ENABLE BIT(0)
+#define WCD934X_CDC_CLK_RST_CTRL_FS_CNT_CONTROL 0x0d42
+#define WCD934X_CDC_FS_MCLK_CNT_EN_MASK BIT(0)
+#define WCD934X_CDC_FS_MCLK_CNT_ENABLE BIT(0)
+#define WCD934X_CDC_CLK_RST_CTRL_SWR_CONTROL 0x0d43
+#define WCD934X_CDC_SWR_CLK_EN_MASK BIT(0)
+#define WCD934X_CDC_SWR_CLK_ENABLE BIT(0)
+#define WCD934X_CDC_CLK_RST_CTRL_DSD_CONTROL 0x0d44
+#define WCD934X_CDC_CLK_RST_CTRL_ASRC_SHARE_CONTROL 0x0d45
+#define WCD934X_CDC_CLK_RST_CTRL_GFM_CONTROL 0x0d46
+#define WCD934X_CDC_SIDETONE_IIR0_IIR_PATH_CTL 0x0d55
+#define WCD934X_CDC_SIDETONE_IIR0_IIR_GAIN_B1_CTL 0x0d56
+#define WCD934X_CDC_SIDETONE_IIR0_IIR_GAIN_B2_CTL 0x0d57
+#define WCD934X_CDC_SIDETONE_IIR0_IIR_GAIN_B3_CTL 0x0d58
+#define WCD934X_CDC_SIDETONE_IIR0_IIR_GAIN_B4_CTL 0x0d59
+#define WCD934X_CDC_SIDETONE_IIR0_IIR_GAIN_B5_CTL 0x0d5a
+#define WCD934X_CDC_SIDETONE_IIR0_IIR_GAIN_B6_CTL 0x0d5b
+#define WCD934X_CDC_SIDETONE_IIR0_IIR_GAIN_B7_CTL 0x0d5c
+#define WCD934X_CDC_SIDETONE_IIR0_IIR_GAIN_B8_CTL 0x0d5d
+#define WCD934X_CDC_SIDETONE_IIR0_IIR_CTL 0x0d5e
+#define WCD934X_CDC_SIDETONE_IIR0_IIR_GAIN_TIMER_CTL 0x0d5f
+#define WCD934X_CDC_SIDETONE_IIR0_IIR_COEF_B1_CTL 0x0d60
+#define WCD934X_CDC_SIDETONE_IIR0_IIR_COEF_B2_CTL 0x0d61
+#define WCD934X_CDC_SIDETONE_IIR1_IIR_PATH_CTL 0x0d65
+#define WCD934X_CDC_SIDETONE_IIR1_IIR_GAIN_B1_CTL 0x0d66
+#define WCD934X_CDC_SIDETONE_IIR1_IIR_GAIN_B2_CTL 0x0d67
+#define WCD934X_CDC_SIDETONE_IIR1_IIR_GAIN_B3_CTL 0x0d68
+#define WCD934X_CDC_SIDETONE_IIR1_IIR_GAIN_B4_CTL 0x0d69
+#define WCD934X_CDC_SIDETONE_IIR1_IIR_GAIN_B5_CTL 0x0d6a
+#define WCD934X_CDC_SIDETONE_IIR1_IIR_GAIN_B6_CTL 0x0d6b
+#define WCD934X_CDC_SIDETONE_IIR1_IIR_GAIN_B7_CTL 0x0d6c
+#define WCD934X_CDC_SIDETONE_IIR1_IIR_GAIN_B8_CTL 0x0d6d
+#define WCD934X_CDC_SIDETONE_IIR1_IIR_CTL 0x0d6e
+#define WCD934X_CDC_SIDETONE_IIR1_IIR_GAIN_TIMER_CTL 0x0d6f
+#define WCD934X_CDC_SIDETONE_IIR1_IIR_COEF_B1_CTL 0x0d70
+#define WCD934X_CDC_SIDETONE_IIR1_IIR_COEF_B2_CTL 0x0d71
+#define WCD934X_CDC_TOP_TOP_CFG1 0x0d82
+#define WCD934X_CDC_TOP_TOP_CFG7 0x0d88
+#define WCD934X_CDC_TOP_HPHL_COMP_LUT 0x0d8b
+#define WCD934X_CDC_TOP_HPHR_COMP_LUT 0x0d90
+#define WCD934X_HPH_LUT_BYPASS_MASK BIT(7)
+#define WCD934X_HPH_LUT_BYPASS_ENABLE BIT(7)
+#define WCD934X_HPH_LUT_BYPASS_DISABLE 0
+#define WCD934X_CODEC_CPR_WR_DATA_0 0x5001
+#define WCD934X_CODEC_CPR_WR_ADDR_0 0x5005
+#define WCD934X_CODEC_CPR_SVS_CX_VDD 0x5022
+#define WCD934X_CODEC_CPR_SVS2_CX_VDD 0x5023
+#define WCD934X_CODEC_CPR_SVS2_MIN_CX_VDD 0x5027
+#define WCD934X_TLMM_DMIC1_CLK_PINCFG 0x8015
+#define WCD934X_TLMM_DMIC1_DATA_PINCFG 0x8016
+#define WCD934X_TLMM_DMIC2_CLK_PINCFG 0x8017
+#define WCD934X_TLMM_DMIC2_DATA_PINCFG 0x8018
+#define WCD934X_TLMM_DMIC3_CLK_PINCFG 0x8019
+#define WCD934X_TLMM_DMIC3_DATA_PINCFG 0x801a
+#define WCD934X_TEST_DEBUG_PAD_DRVCTL_0 0x803b
+#define WCD934X_TEST_DEBUG_NPL_DLY_TEST_1 0x803e
+
+#define WCD934X_MAX_REGISTER 0xffff
+#define WCD934X_SEL_REGISTER 0x800
+#define WCD934X_SEL_MASK 0xff
+#define WCD934X_SEL_SHIFT 0x0
+#define WCD934X_WINDOW_START 0x800
+#define WCD934X_WINDOW_LENGTH 0x100
+
+/* SLIMBUS Slave Registers */
+#define WCD934X_SLIM_PGD_PORT_INT_EN0 0x30
+#define WCD934X_SLIM_PGD_PORT_INT_STATUS_RX_0 0x34
+#define WCD934X_SLIM_PGD_PORT_INT_STATUS_RX_1 0x35
+#define WCD934X_SLIM_PGD_PORT_INT_STATUS_TX_0 0x36
+#define WCD934X_SLIM_PGD_PORT_INT_STATUS_TX_1 0x37
+#define WCD934X_SLIM_PGD_PORT_INT_CLR_RX_0 0x38
+#define WCD934X_SLIM_PGD_PORT_INT_CLR_RX_1 0x39
+#define WCD934X_SLIM_PGD_PORT_INT_CLR_TX_0 0x3A
+#define WCD934X_SLIM_PGD_PORT_INT_CLR_TX_1 0x3B
+#define WCD934X_SLIM_PGD_PORT_INT_RX_SOURCE0 0x60
+#define WCD934X_SLIM_PGD_PORT_INT_TX_SOURCE0 0x70
+#define WCD934X_SLIM_PGD_RX_PORT_CFG(p) (0x30 + p)
+#define WCD934X_SLIM_PGD_PORT_CFG(p) (0x40 + p)
+#define WCD934X_SLIM_PGD_TX_PORT_CFG(p) (0x50 + p)
+#define WCD934X_SLIM_PGD_PORT_INT_SRC(p) (0x60 + p)
+#define WCD934X_SLIM_PGD_PORT_INT_STATUS(p) (0x80 + p)
+#define WCD934X_SLIM_PGD_TX_PORT_MULTI_CHNL_0(p) (0x100 + 4 * p)
+/* ports range from 10-16 */
+#define WCD934X_SLIM_PGD_TX_PORT_MULTI_CHNL_1(p) (0x101 + 4 * p)
+#define WCD934X_SLIM_PGD_RX_PORT_MULTI_CHNL_0(p) (0x140 + 4 * p)
+
+#define SLIM_MANF_ID_QCOM 0x217
+#define SLIM_PROD_CODE_WCD9340 0x250
+#define SLIM_DEV_IDX_WCD9340 0x1
+#define SLIM_DEV_INSTANCE_ID_WCD9340 0
+
+#endif
diff --git a/include/linux/mfd/wcd934x/wcd934x.h b/include/linux/mfd/wcd934x/wcd934x.h
new file mode 100644
index 000000000000..f3c65a035150
--- /dev/null
+++ b/include/linux/mfd/wcd934x/wcd934x.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __WCD934X_H__
+#define __WCD934X_H__
+#include <linux/clk.h>
+#include <linux/regulator/consumer.h>
+#include <linux/regmap.h>
+#include <linux/slimbus.h>
+
+#define WCD934X_MAX_SUPPLY 5
+
+/**
+ * struct wcd934x_ddata - wcd934x driver data
+ *
+ * @supplies: wcd934x regulator supplies
+ * @irq_data: wcd934x irq_chip data
+ * @regmap: wcd934x regmap pointer
+ * @extclk: External clock
+ * @dev: device instance of wcd934x slim device
+ * @irq: irq for wcd934x.
+ */
+struct wcd934x_ddata {
+ struct regulator_bulk_data supplies[WCD934X_MAX_SUPPLY];
+ struct regmap_irq_chip_data *irq_data;
+ struct regmap *regmap;
+ struct clk *extclk;
+ struct device *dev;
+ int irq;
+};
+
+#endif /* __WCD934X_H__ */
diff --git a/include/linux/mfd/wl1273-core.h b/include/linux/mfd/wl1273-core.h
deleted file mode 100644
index db2f3f454a1b..000000000000
--- a/include/linux/mfd/wl1273-core.h
+++ /dev/null
@@ -1,290 +0,0 @@
-/*
- * include/linux/mfd/wl1273-core.h
- *
- * Some definitions for the wl1273 radio receiver/transmitter chip.
- *
- * Copyright (C) 2010 Nokia Corporation
- * Author: Matti J. Aaltonen <matti.j.aaltonen@nokia.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
- * 02110-1301 USA
- */
-
-#ifndef WL1273_CORE_H
-#define WL1273_CORE_H
-
-#include <linux/i2c.h>
-#include <linux/mfd/core.h>
-
-#define WL1273_FM_DRIVER_NAME "wl1273-fm"
-#define RX71_FM_I2C_ADDR 0x22
-
-#define WL1273_STEREO_GET 0
-#define WL1273_RSSI_LVL_GET 1
-#define WL1273_IF_COUNT_GET 2
-#define WL1273_FLAG_GET 3
-#define WL1273_RDS_SYNC_GET 4
-#define WL1273_RDS_DATA_GET 5
-#define WL1273_FREQ_SET 10
-#define WL1273_AF_FREQ_SET 11
-#define WL1273_MOST_MODE_SET 12
-#define WL1273_MOST_BLEND_SET 13
-#define WL1273_DEMPH_MODE_SET 14
-#define WL1273_SEARCH_LVL_SET 15
-#define WL1273_BAND_SET 16
-#define WL1273_MUTE_STATUS_SET 17
-#define WL1273_RDS_PAUSE_LVL_SET 18
-#define WL1273_RDS_PAUSE_DUR_SET 19
-#define WL1273_RDS_MEM_SET 20
-#define WL1273_RDS_BLK_B_SET 21
-#define WL1273_RDS_MSK_B_SET 22
-#define WL1273_RDS_PI_MASK_SET 23
-#define WL1273_RDS_PI_SET 24
-#define WL1273_RDS_SYSTEM_SET 25
-#define WL1273_INT_MASK_SET 26
-#define WL1273_SEARCH_DIR_SET 27
-#define WL1273_VOLUME_SET 28
-#define WL1273_AUDIO_ENABLE 29
-#define WL1273_PCM_MODE_SET 30
-#define WL1273_I2S_MODE_CONFIG_SET 31
-#define WL1273_POWER_SET 32
-#define WL1273_INTX_CONFIG_SET 33
-#define WL1273_PULL_EN_SET 34
-#define WL1273_HILO_SET 35
-#define WL1273_SWITCH2FREF 36
-#define WL1273_FREQ_DRIFT_REPORT 37
-
-#define WL1273_PCE_GET 40
-#define WL1273_FIRM_VER_GET 41
-#define WL1273_ASIC_VER_GET 42
-#define WL1273_ASIC_ID_GET 43
-#define WL1273_MAN_ID_GET 44
-#define WL1273_TUNER_MODE_SET 45
-#define WL1273_STOP_SEARCH 46
-#define WL1273_RDS_CNTRL_SET 47
-
-#define WL1273_WRITE_HARDWARE_REG 100
-#define WL1273_CODE_DOWNLOAD 101
-#define WL1273_RESET 102
-
-#define WL1273_FM_POWER_MODE 254
-#define WL1273_FM_INTERRUPT 255
-
-/* Transmitter API */
-
-#define WL1273_CHANL_SET 55
-#define WL1273_SCAN_SPACING_SET 56
-#define WL1273_REF_SET 57
-#define WL1273_POWER_ENB_SET 90
-#define WL1273_POWER_ATT_SET 58
-#define WL1273_POWER_LEV_SET 59
-#define WL1273_AUDIO_DEV_SET 60
-#define WL1273_PILOT_DEV_SET 61
-#define WL1273_RDS_DEV_SET 62
-#define WL1273_PUPD_SET 91
-#define WL1273_AUDIO_IO_SET 63
-#define WL1273_PREMPH_SET 64
-#define WL1273_MONO_SET 66
-#define WL1273_MUTE 92
-#define WL1273_MPX_LMT_ENABLE 67
-#define WL1273_PI_SET 93
-#define WL1273_ECC_SET 69
-#define WL1273_PTY 70
-#define WL1273_AF 71
-#define WL1273_DISPLAY_MODE 74
-#define WL1273_RDS_REP_SET 77
-#define WL1273_RDS_CONFIG_DATA_SET 98
-#define WL1273_RDS_DATA_SET 99
-#define WL1273_RDS_DATA_ENB 94
-#define WL1273_TA_SET 78
-#define WL1273_TP_SET 79
-#define WL1273_DI_SET 80
-#define WL1273_MS_SET 81
-#define WL1273_PS_SCROLL_SPEED 82
-#define WL1273_TX_AUDIO_LEVEL_TEST 96
-#define WL1273_TX_AUDIO_LEVEL_TEST_THRESHOLD 73
-#define WL1273_TX_AUDIO_INPUT_LEVEL_RANGE_SET 54
-#define WL1273_RX_ANTENNA_SELECT 87
-#define WL1273_I2C_DEV_ADDR_SET 86
-#define WL1273_REF_ERR_CALIB_PARAM_SET 88
-#define WL1273_REF_ERR_CALIB_PERIODICITY_SET 89
-#define WL1273_SOC_INT_TRIGGER 52
-#define WL1273_SOC_AUDIO_PATH_SET 83
-#define WL1273_SOC_PCMI_OVERRIDE 84
-#define WL1273_SOC_I2S_OVERRIDE 85
-#define WL1273_RSSI_BLOCK_SCAN_FREQ_SET 95
-#define WL1273_RSSI_BLOCK_SCAN_START 97
-#define WL1273_RSSI_BLOCK_SCAN_DATA_GET 5
-#define WL1273_READ_FMANT_TUNE_VALUE 104
-
-#define WL1273_RDS_OFF 0
-#define WL1273_RDS_ON 1
-#define WL1273_RDS_RESET 2
-
-#define WL1273_AUDIO_DIGITAL 0
-#define WL1273_AUDIO_ANALOG 1
-
-#define WL1273_MODE_RX BIT(0)
-#define WL1273_MODE_TX BIT(1)
-#define WL1273_MODE_OFF BIT(2)
-#define WL1273_MODE_SUSPENDED BIT(3)
-
-#define WL1273_RADIO_CHILD BIT(0)
-#define WL1273_CODEC_CHILD BIT(1)
-
-#define WL1273_RX_MONO 1
-#define WL1273_RX_STEREO 0
-#define WL1273_TX_MONO 0
-#define WL1273_TX_STEREO 1
-
-#define WL1273_MAX_VOLUME 0xffff
-#define WL1273_DEFAULT_VOLUME 0x78b8
-
-/* I2S protocol, left channel first, data width 16 bits */
-#define WL1273_PCM_DEF_MODE 0x00
-
-/* Rx */
-#define WL1273_AUDIO_ENABLE_I2S BIT(0)
-#define WL1273_AUDIO_ENABLE_ANALOG BIT(1)
-
-/* Tx */
-#define WL1273_AUDIO_IO_SET_ANALOG 0
-#define WL1273_AUDIO_IO_SET_I2S 1
-
-#define WL1273_PUPD_SET_OFF 0x00
-#define WL1273_PUPD_SET_ON 0x01
-#define WL1273_PUPD_SET_RETENTION 0x10
-
-/* I2S mode */
-#define WL1273_IS2_WIDTH_32 0x0
-#define WL1273_IS2_WIDTH_40 0x1
-#define WL1273_IS2_WIDTH_22_23 0x2
-#define WL1273_IS2_WIDTH_23_22 0x3
-#define WL1273_IS2_WIDTH_48 0x4
-#define WL1273_IS2_WIDTH_50 0x5
-#define WL1273_IS2_WIDTH_60 0x6
-#define WL1273_IS2_WIDTH_64 0x7
-#define WL1273_IS2_WIDTH_80 0x8
-#define WL1273_IS2_WIDTH_96 0x9
-#define WL1273_IS2_WIDTH_128 0xa
-#define WL1273_IS2_WIDTH 0xf
-
-#define WL1273_IS2_FORMAT_STD (0x0 << 4)
-#define WL1273_IS2_FORMAT_LEFT (0x1 << 4)
-#define WL1273_IS2_FORMAT_RIGHT (0x2 << 4)
-#define WL1273_IS2_FORMAT_USER (0x3 << 4)
-
-#define WL1273_IS2_MASTER (0x0 << 6)
-#define WL1273_IS2_SLAVEW (0x1 << 6)
-
-#define WL1273_IS2_TRI_AFTER_SENDING (0x0 << 7)
-#define WL1273_IS2_TRI_ALWAYS_ACTIVE (0x1 << 7)
-
-#define WL1273_IS2_SDOWS_RR (0x0 << 8)
-#define WL1273_IS2_SDOWS_RF (0x1 << 8)
-#define WL1273_IS2_SDOWS_FR (0x2 << 8)
-#define WL1273_IS2_SDOWS_FF (0x3 << 8)
-
-#define WL1273_IS2_TRI_OPT (0x0 << 10)
-#define WL1273_IS2_TRI_ALWAYS (0x1 << 10)
-
-#define WL1273_IS2_RATE_48K (0x0 << 12)
-#define WL1273_IS2_RATE_44_1K (0x1 << 12)
-#define WL1273_IS2_RATE_32K (0x2 << 12)
-#define WL1273_IS2_RATE_22_05K (0x4 << 12)
-#define WL1273_IS2_RATE_16K (0x5 << 12)
-#define WL1273_IS2_RATE_12K (0x8 << 12)
-#define WL1273_IS2_RATE_11_025 (0x9 << 12)
-#define WL1273_IS2_RATE_8K (0xa << 12)
-#define WL1273_IS2_RATE (0xf << 12)
-
-#define WL1273_I2S_DEF_MODE (WL1273_IS2_WIDTH_32 | \
- WL1273_IS2_FORMAT_STD | \
- WL1273_IS2_MASTER | \
- WL1273_IS2_TRI_AFTER_SENDING | \
- WL1273_IS2_SDOWS_RR | \
- WL1273_IS2_TRI_OPT | \
- WL1273_IS2_RATE_48K)
-
-#define SCHAR_MIN (-128)
-#define SCHAR_MAX 127
-
-#define WL1273_FR_EVENT BIT(0)
-#define WL1273_BL_EVENT BIT(1)
-#define WL1273_RDS_EVENT BIT(2)
-#define WL1273_BBLK_EVENT BIT(3)
-#define WL1273_LSYNC_EVENT BIT(4)
-#define WL1273_LEV_EVENT BIT(5)
-#define WL1273_IFFR_EVENT BIT(6)
-#define WL1273_PI_EVENT BIT(7)
-#define WL1273_PD_EVENT BIT(8)
-#define WL1273_STIC_EVENT BIT(9)
-#define WL1273_MAL_EVENT BIT(10)
-#define WL1273_POW_ENB_EVENT BIT(11)
-#define WL1273_SCAN_OVER_EVENT BIT(12)
-#define WL1273_ERROR_EVENT BIT(13)
-
-#define TUNER_MODE_STOP_SEARCH 0
-#define TUNER_MODE_PRESET 1
-#define TUNER_MODE_AUTO_SEEK 2
-#define TUNER_MODE_AF 3
-#define TUNER_MODE_AUTO_SEEK_PI 4
-#define TUNER_MODE_AUTO_SEEK_BULK 5
-
-#define RDS_BLOCK_SIZE 3
-
-struct wl1273_fm_platform_data {
- int (*request_resources) (struct i2c_client *client);
- void (*free_resources) (void);
- void (*enable) (void);
- void (*disable) (void);
-
- u8 forbidden_modes;
- unsigned int children;
-};
-
-#define WL1273_FM_CORE_CELLS 2
-
-#define WL1273_BAND_OTHER 0
-#define WL1273_BAND_JAPAN 1
-
-#define WL1273_BAND_JAPAN_LOW 76000
-#define WL1273_BAND_JAPAN_HIGH 90000
-#define WL1273_BAND_OTHER_LOW 87500
-#define WL1273_BAND_OTHER_HIGH 108000
-
-#define WL1273_BAND_TX_LOW 76000
-#define WL1273_BAND_TX_HIGH 108000
-
-struct wl1273_core {
- struct mfd_cell cells[WL1273_FM_CORE_CELLS];
- struct wl1273_fm_platform_data *pdata;
-
- unsigned int mode;
- unsigned int i2s_mode;
- unsigned int volume;
- unsigned int audio_mode;
- unsigned int channel_number;
- struct mutex lock; /* for serializing fm radio operations */
-
- struct i2c_client *client;
-
- int (*read)(struct wl1273_core *core, u8, u16 *);
- int (*write)(struct wl1273_core *core, u8, u16);
- int (*write_data)(struct wl1273_core *core, u8 *, u16);
- int (*set_audio)(struct wl1273_core *core, unsigned int);
- int (*set_volume)(struct wl1273_core *core, unsigned int);
-};
-
-#endif /* ifndef WL1273_CORE_H */
diff --git a/include/linux/mfd/wm831x/auxadc.h b/include/linux/mfd/wm831x/auxadc.h
index 867aa23f9370..02ddb4fe1608 100644
--- a/include/linux/mfd/wm831x/auxadc.h
+++ b/include/linux/mfd/wm831x/auxadc.h
@@ -1,15 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* include/linux/mfd/wm831x/auxadc.h -- Auxiliary ADC interface for WM831x
*
* Copyright 2009 Wolfson Microelectronics PLC.
*
* Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
*/
#ifndef __MFD_WM831X_AUXADC_H__
diff --git a/include/linux/mfd/wm831x/core.h b/include/linux/mfd/wm831x/core.h
index b49fa67612f1..511bcad876f0 100644
--- a/include/linux/mfd/wm831x/core.h
+++ b/include/linux/mfd/wm831x/core.h
@@ -1,15 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* include/linux/mfd/wm831x/core.h -- Core interface for WM831x
*
* Copyright 2009 Wolfson Microelectronics PLC.
*
* Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
*/
#ifndef __MFD_WM831X_CORE_H__
@@ -418,7 +413,6 @@ int wm831x_bulk_read(struct wm831x *wm831x, unsigned short reg,
int count, u16 *buf);
int wm831x_device_init(struct wm831x *wm831x, int irq);
-void wm831x_device_exit(struct wm831x *wm831x);
int wm831x_device_suspend(struct wm831x *wm831x);
void wm831x_device_shutdown(struct wm831x *wm831x);
int wm831x_irq_init(struct wm831x *wm831x, int irq);
diff --git a/include/linux/mfd/wm831x/gpio.h b/include/linux/mfd/wm831x/gpio.h
index 9b163c58865f..70587a4ec634 100644
--- a/include/linux/mfd/wm831x/gpio.h
+++ b/include/linux/mfd/wm831x/gpio.h
@@ -1,15 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* include/linux/mfd/wm831x/gpio.h -- GPIO for WM831x
*
* Copyright 2009 Wolfson Microelectronics PLC.
*
* Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
*/
#ifndef __MFD_WM831X_GPIO_H__
diff --git a/include/linux/mfd/wm831x/irq.h b/include/linux/mfd/wm831x/irq.h
index 3a8c97656fda..ab2d1524e729 100644
--- a/include/linux/mfd/wm831x/irq.h
+++ b/include/linux/mfd/wm831x/irq.h
@@ -1,15 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* include/linux/mfd/wm831x/irq.h -- Interrupt controller for WM831x
*
* Copyright 2009 Wolfson Microelectronics PLC.
*
* Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
*/
#ifndef __MFD_WM831X_IRQ_H__
diff --git a/include/linux/mfd/wm831x/otp.h b/include/linux/mfd/wm831x/otp.h
index ce1f81a39bfc..bc244456ad56 100644
--- a/include/linux/mfd/wm831x/otp.h
+++ b/include/linux/mfd/wm831x/otp.h
@@ -1,15 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* include/linux/mfd/wm831x/otp.h -- OTP interface for WM831x
*
* Copyright 2009 Wolfson Microelectronics PLC.
*
* Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
*/
#ifndef __MFD_WM831X_OTP_H__
diff --git a/include/linux/mfd/wm831x/pdata.h b/include/linux/mfd/wm831x/pdata.h
index dcc9631b3052..75aa94dadf1c 100644
--- a/include/linux/mfd/wm831x/pdata.h
+++ b/include/linux/mfd/wm831x/pdata.h
@@ -1,15 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* include/linux/mfd/wm831x/pdata.h -- Platform data for WM831x
*
* Copyright 2009 Wolfson Microelectronics PLC.
*
* Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
*/
#ifndef __MFD_WM831X_PDATA_H__
@@ -52,7 +47,6 @@ struct wm831x_battery_pdata {
* I2C or SPI buses.
*/
struct wm831x_buckv_pdata {
- int dvs_gpio; /** CPU GPIO to use for DVS switching */
int dvs_control_src; /** Hardware DVS source to use (1 or 2) */
int dvs_init_state; /** DVS state to expect on startup */
int dvs_state_gpio; /** CPU GPIO to use for monitoring status */
@@ -95,7 +89,6 @@ enum wm831x_watchdog_action {
struct wm831x_watchdog_pdata {
enum wm831x_watchdog_action primary, secondary;
- int update_gpio;
unsigned int software:1;
};
diff --git a/include/linux/mfd/wm831x/pmu.h b/include/linux/mfd/wm831x/pmu.h
index b18cbb027bc3..77187fcaf226 100644
--- a/include/linux/mfd/wm831x/pmu.h
+++ b/include/linux/mfd/wm831x/pmu.h
@@ -1,15 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* include/linux/mfd/wm831x/pmu.h -- PMU for WM831x
*
* Copyright 2009 Wolfson Microelectronics PLC.
*
* Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
*/
#ifndef __MFD_WM831X_PMU_H__
diff --git a/include/linux/mfd/wm831x/regulator.h b/include/linux/mfd/wm831x/regulator.h
index 955d30fc6a27..233b3017954a 100644
--- a/include/linux/mfd/wm831x/regulator.h
+++ b/include/linux/mfd/wm831x/regulator.h
@@ -1,15 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* linux/mfd/wm831x/regulator.h -- Regulator definitons for wm831x
*
* Copyright 2009 Wolfson Microelectronics PLC.
*
* Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
*/
#ifndef __MFD_WM831X_REGULATOR_H__
@@ -1213,6 +1208,6 @@
#define WM831X_LDO1_OK_WIDTH 1 /* LDO1_OK */
#define WM831X_ISINK_MAX_ISEL 55
-extern int wm831x_isinkv_values[WM831X_ISINK_MAX_ISEL + 1];
+extern const unsigned int wm831x_isinkv_values[WM831X_ISINK_MAX_ISEL + 1];
#endif
diff --git a/include/linux/mfd/wm831x/status.h b/include/linux/mfd/wm831x/status.h
index 6bc090d0e3ac..0d263577d21d 100644
--- a/include/linux/mfd/wm831x/status.h
+++ b/include/linux/mfd/wm831x/status.h
@@ -1,15 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* include/linux/mfd/wm831x/status.h -- Status LEDs for WM831x
*
* Copyright 2009 Wolfson Microelectronics PLC.
*
* Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
*/
#ifndef __MFD_WM831X_STATUS_H__
diff --git a/include/linux/mfd/wm831x/watchdog.h b/include/linux/mfd/wm831x/watchdog.h
index 97a99b52956f..c997c792946c 100644
--- a/include/linux/mfd/wm831x/watchdog.h
+++ b/include/linux/mfd/wm831x/watchdog.h
@@ -1,15 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* include/linux/mfd/wm831x/watchdog.h -- Watchdog for WM831x
*
* Copyright 2009 Wolfson Microelectronics PLC.
*
* Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
*/
#ifndef __MFD_WM831X_WATCHDOG_H__
diff --git a/include/linux/mfd/wm8350/audio.h b/include/linux/mfd/wm8350/audio.h
index bd581c6fa085..ec01ec84d495 100644
--- a/include/linux/mfd/wm8350/audio.h
+++ b/include/linux/mfd/wm8350/audio.h
@@ -1,13 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* audio.h -- Audio Driver for Wolfson WM8350 PMIC
*
* Copyright 2007, 2008 Wolfson Microelectronics PLC
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
*/
#ifndef __LINUX_MFD_WM8350_AUDIO_H_
@@ -617,11 +612,8 @@ struct wm8350_audio_platform_data {
u32 codec_current_charge:2; /* codec current @ vmid charge */
};
-struct snd_soc_codec;
-
struct wm8350_codec {
struct platform_device *pdev;
- struct snd_soc_codec *codec;
struct wm8350_audio_platform_data *platform_data;
};
diff --git a/include/linux/mfd/wm8350/comparator.h b/include/linux/mfd/wm8350/comparator.h
index 54bc5d0fd502..250d89239528 100644
--- a/include/linux/mfd/wm8350/comparator.h
+++ b/include/linux/mfd/wm8350/comparator.h
@@ -1,12 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* comparator.h -- Comparator Aux ADC for Wolfson WM8350 PMIC
*
* Copyright 2007 Wolfson Microelectronics PLC
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
*/
#ifndef __LINUX_MFD_WM8350_COMPARATOR_H_
diff --git a/include/linux/mfd/wm8350/core.h b/include/linux/mfd/wm8350/core.h
index 509481d9cf19..5f70d3b5d1b1 100644
--- a/include/linux/mfd/wm8350/core.h
+++ b/include/linux/mfd/wm8350/core.h
@@ -1,23 +1,19 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* core.h -- Core Driver for Wolfson WM8350 PMIC
*
* Copyright 2007 Wolfson Microelectronics PLC
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
*/
#ifndef __LINUX_MFD_WM8350_CORE_H_
#define __LINUX_MFD_WM8350_CORE_H_
-#include <linux/kernel.h>
-#include <linux/mutex.h>
-#include <linux/interrupt.h>
#include <linux/completion.h>
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <linux/mutex.h>
#include <linux/regmap.h>
+#include <linux/types.h>
#include <linux/mfd/wm8350/audio.h>
#include <linux/mfd/wm8350/gpio.h>
@@ -26,6 +22,9 @@
#include <linux/mfd/wm8350/supply.h>
#include <linux/mfd/wm8350/wdt.h>
+struct device;
+struct platform_device;
+
/*
* Register values.
*/
@@ -643,7 +642,6 @@ struct wm8350_platform_data {
*/
int wm8350_device_init(struct wm8350 *wm8350, int irq,
struct wm8350_platform_data *pdata);
-void wm8350_device_exit(struct wm8350 *wm8350);
/*
* WM8350 device IO
diff --git a/include/linux/mfd/wm8350/gpio.h b/include/linux/mfd/wm8350/gpio.h
index d657bcd6d955..e831b30dde3e 100644
--- a/include/linux/mfd/wm8350/gpio.h
+++ b/include/linux/mfd/wm8350/gpio.h
@@ -1,13 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* gpio.h -- GPIO Driver for Wolfson WM8350 PMIC
*
* Copyright 2007 Wolfson Microelectronics PLC
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
*/
#ifndef __LINUX_MFD_WM8350_GPIO_H_
diff --git a/include/linux/mfd/wm8350/pmic.h b/include/linux/mfd/wm8350/pmic.h
index 7a09e7f1f984..04b09a2ddb28 100644
--- a/include/linux/mfd/wm8350/pmic.h
+++ b/include/linux/mfd/wm8350/pmic.h
@@ -1,13 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* pmic.h -- Power Management Driver for Wolfson WM8350 PMIC
*
* Copyright 2007 Wolfson Microelectronics PLC
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
*/
#ifndef __LINUX_MFD_WM8350_PMIC_H
diff --git a/include/linux/mfd/wm8350/rtc.h b/include/linux/mfd/wm8350/rtc.h
index ebd72ffc62d1..b2f58359b2eb 100644
--- a/include/linux/mfd/wm8350/rtc.h
+++ b/include/linux/mfd/wm8350/rtc.h
@@ -1,12 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* rtc.h -- RTC driver for Wolfson WM8350 PMIC
*
* Copyright 2007 Wolfson Microelectronics PLC
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
*/
#ifndef __LINUX_MFD_WM8350_RTC_H
diff --git a/include/linux/mfd/wm8350/supply.h b/include/linux/mfd/wm8350/supply.h
index 8dc93673e34a..d7a91e26177c 100644
--- a/include/linux/mfd/wm8350/supply.h
+++ b/include/linux/mfd/wm8350/supply.h
@@ -1,13 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* supply.h -- Power Supply Driver for Wolfson WM8350 PMIC
*
* Copyright 2007 Wolfson Microelectronics PLC
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
*/
#ifndef __LINUX_MFD_WM8350_SUPPLY_H_
diff --git a/include/linux/mfd/wm8350/wdt.h b/include/linux/mfd/wm8350/wdt.h
index f6135b5e5ef4..97454aa8c436 100644
--- a/include/linux/mfd/wm8350/wdt.h
+++ b/include/linux/mfd/wm8350/wdt.h
@@ -1,12 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* wdt.h -- Watchdog Driver for Wolfson WM8350 PMIC
*
* Copyright 2007, 2008 Wolfson Microelectronics PLC
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
*/
#ifndef __LINUX_MFD_WM8350_WDT_H_
diff --git a/include/linux/mfd/wm8400-audio.h b/include/linux/mfd/wm8400-audio.h
index e06ed3eb1d0a..d47bdcc7a765 100644
--- a/include/linux/mfd/wm8400-audio.h
+++ b/include/linux/mfd/wm8400-audio.h
@@ -1,21 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* wm8400 private definitions for audio
*
* Copyright 2008 Wolfson Microelectronics plc
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#ifndef __LINUX_MFD_WM8400_AUDIO_H
diff --git a/include/linux/mfd/wm8400-private.h b/include/linux/mfd/wm8400-private.h
index 4ee908f5b834..bc8c2ca6dc70 100644
--- a/include/linux/mfd/wm8400-private.h
+++ b/include/linux/mfd/wm8400-private.h
@@ -1,21 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* wm8400 private definitions.
*
* Copyright 2008 Wolfson Microelectronics plc
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#ifndef __LINUX_MFD_WM8400_PRIV_H
@@ -923,12 +910,4 @@ struct wm8400 {
#define WM8400_LINE_CMP_VTHD_SHIFT 0 /* LINE_CMP_VTHD - [3:0] */
#define WM8400_LINE_CMP_VTHD_WIDTH 4 /* LINE_CMP_VTHD - [3:0] */
-int wm8400_block_read(struct wm8400 *wm8400, u8 reg, int count, u16 *data);
-
-static inline int wm8400_set_bits(struct wm8400 *wm8400, u8 reg,
- u16 mask, u16 val)
-{
- return regmap_update_bits(wm8400->regmap, reg, mask, val);
-}
-
#endif
diff --git a/include/linux/mfd/wm8400.h b/include/linux/mfd/wm8400.h
index b46b566ac1ac..a812d89e7cb3 100644
--- a/include/linux/mfd/wm8400.h
+++ b/include/linux/mfd/wm8400.h
@@ -1,21 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* wm8400 client interface
*
* Copyright 2008 Wolfson Microelectronics plc
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#ifndef __LINUX_MFD_WM8400_H
diff --git a/include/linux/mfd/wm8994/core.h b/include/linux/mfd/wm8994/core.h
index eefafa62d304..e8b093522ffd 100644
--- a/include/linux/mfd/wm8994/core.h
+++ b/include/linux/mfd/wm8994/core.h
@@ -1,15 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* include/linux/mfd/wm8994/core.h -- Core interface for WM8994
*
* Copyright 2009 Wolfson Microelectronics PLC.
*
* Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
*/
#ifndef __MFD_WM8994_CORE_H__
diff --git a/include/linux/mfd/wm8994/gpio.h b/include/linux/mfd/wm8994/gpio.h
index 0c79b5ff4b5a..723fa331776e 100644
--- a/include/linux/mfd/wm8994/gpio.h
+++ b/include/linux/mfd/wm8994/gpio.h
@@ -1,15 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* include/linux/mfd/wm8994/gpio.h - GPIO configuration for WM8994
*
* Copyright 2009 Wolfson Microelectronics PLC.
*
* Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
*/
#ifndef __MFD_WM8994_GPIO_H__
diff --git a/include/linux/mfd/wm8994/pdata.h b/include/linux/mfd/wm8994/pdata.h
index 90c60524a496..6e2962ef5b81 100644
--- a/include/linux/mfd/wm8994/pdata.h
+++ b/include/linux/mfd/wm8994/pdata.h
@@ -1,15 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* include/linux/mfd/wm8994/pdata.h -- Platform data for WM8994
*
* Copyright 2009 Wolfson Microelectronics PLC.
*
* Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
*/
#ifndef __MFD_WM8994_PDATA_H__
@@ -20,9 +15,6 @@
#define WM8994_NUM_AIF 3
struct wm8994_ldo_pdata {
- /** GPIOs to enable regulator, 0 or less if not available */
- int enable;
-
const struct regulator_init_data *init_data;
};
@@ -41,7 +33,7 @@ struct wm8994_ldo_pdata {
* DRC configurations are specified with a label and a set of register
* values to write (the enable bits will be ignored). At runtime an
* enumerated control will be presented for each DRC block allowing
- * the user to choose the configration to use.
+ * the user to choose the configuration to use.
*
* Configurations may be generated by hand or by using the DRC control
* panel provided by the WISCE - see http://www.wolfsonmicro.com/wisce/
@@ -222,6 +214,12 @@ struct wm8994_pdata {
*/
bool spkmode_pu;
+ /*
+ * CS/ADDR must be pulled internally by the device on this
+ * system.
+ */
+ bool csnaddr_pd;
+
/**
* Maximum number of channels clocks will be generated for,
* useful for systems where and I2S bus with multiple data
diff --git a/include/linux/mfd/wm8994/registers.h b/include/linux/mfd/wm8994/registers.h
index db8cef3d5321..8782a207faf7 100644
--- a/include/linux/mfd/wm8994/registers.h
+++ b/include/linux/mfd/wm8994/registers.h
@@ -1,15 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* include/linux/mfd/wm8994/registers.h -- Register definitions for WM8994
*
* Copyright 2009 Wolfson Microelectronics PLC.
*
* Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
*/
#ifndef __MFD_WM8994_REGISTERS_H__
diff --git a/include/linux/mfd/wm97xx.h b/include/linux/mfd/wm97xx.h
new file mode 100644
index 000000000000..446a5546ceca
--- /dev/null
+++ b/include/linux/mfd/wm97xx.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * wm97xx client interface
+ *
+ * Copyright (C) 2017 Robert Jarzmik
+ */
+
+#ifndef __LINUX_MFD_WM97XX_H
+#define __LINUX_MFD_WM97XX_H
+
+struct regmap;
+struct wm97xx_batt_pdata;
+struct snd_ac97;
+
+struct wm97xx_platform_data {
+ struct snd_ac97 *ac97;
+ struct regmap *regmap;
+ struct wm97xx_batt_pdata *batt_pdata;
+};
+
+#endif
diff --git a/include/linux/mhi.h b/include/linux/mhi.h
new file mode 100644
index 000000000000..dd372b0123a6
--- /dev/null
+++ b/include/linux/mhi.h
@@ -0,0 +1,812 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2018-2020, The Linux Foundation. All rights reserved.
+ *
+ */
+#ifndef _MHI_H_
+#define _MHI_H_
+
+#include <linux/device.h>
+#include <linux/dma-direction.h>
+#include <linux/mutex.h>
+#include <linux/skbuff.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/wait.h>
+#include <linux/workqueue.h>
+
+#define MHI_MAX_OEM_PK_HASH_SEGMENTS 16
+
+struct mhi_chan;
+struct mhi_event;
+struct mhi_ctxt;
+struct mhi_cmd;
+struct mhi_buf_info;
+
+/**
+ * enum mhi_callback - MHI callback
+ * @MHI_CB_IDLE: MHI entered idle state
+ * @MHI_CB_PENDING_DATA: New data available for client to process
+ * @MHI_CB_LPM_ENTER: MHI host entered low power mode
+ * @MHI_CB_LPM_EXIT: MHI host about to exit low power mode
+ * @MHI_CB_EE_RDDM: MHI device entered RDDM exec env
+ * @MHI_CB_EE_MISSION_MODE: MHI device entered Mission Mode exec env
+ * @MHI_CB_SYS_ERROR: MHI device entered error state (may recover)
+ * @MHI_CB_FATAL_ERROR: MHI device entered fatal error state
+ * @MHI_CB_BW_REQ: Received a bandwidth switch request from device
+ */
+enum mhi_callback {
+ MHI_CB_IDLE,
+ MHI_CB_PENDING_DATA,
+ MHI_CB_LPM_ENTER,
+ MHI_CB_LPM_EXIT,
+ MHI_CB_EE_RDDM,
+ MHI_CB_EE_MISSION_MODE,
+ MHI_CB_SYS_ERROR,
+ MHI_CB_FATAL_ERROR,
+ MHI_CB_BW_REQ,
+};
+
+/**
+ * enum mhi_flags - Transfer flags
+ * @MHI_EOB: End of buffer for bulk transfer
+ * @MHI_EOT: End of transfer
+ * @MHI_CHAIN: Linked transfer
+ */
+enum mhi_flags {
+ MHI_EOB = BIT(0),
+ MHI_EOT = BIT(1),
+ MHI_CHAIN = BIT(2),
+};
+
+/**
+ * enum mhi_device_type - Device types
+ * @MHI_DEVICE_XFER: Handles data transfer
+ * @MHI_DEVICE_CONTROLLER: Control device
+ */
+enum mhi_device_type {
+ MHI_DEVICE_XFER,
+ MHI_DEVICE_CONTROLLER,
+};
+
+/**
+ * enum mhi_ch_type - Channel types
+ * @MHI_CH_TYPE_INVALID: Invalid channel type
+ * @MHI_CH_TYPE_OUTBOUND: Outbound channel to the device
+ * @MHI_CH_TYPE_INBOUND: Inbound channel from the device
+ * @MHI_CH_TYPE_INBOUND_COALESCED: Coalesced channel for the device to combine
+ * multiple packets and send them as a single
+ * large packet to reduce CPU consumption
+ */
+enum mhi_ch_type {
+ MHI_CH_TYPE_INVALID = 0,
+ MHI_CH_TYPE_OUTBOUND = DMA_TO_DEVICE,
+ MHI_CH_TYPE_INBOUND = DMA_FROM_DEVICE,
+ MHI_CH_TYPE_INBOUND_COALESCED = 3,
+};
+
+/**
+ * struct image_info - Firmware and RDDM table
+ * @mhi_buf: Buffer for firmware and RDDM table
+ * @entries: # of entries in table
+ */
+struct image_info {
+ struct mhi_buf *mhi_buf;
+ /* private: from internal.h */
+ struct bhi_vec_entry *bhi_vec;
+ /* public: */
+ u32 entries;
+};
+
+/**
+ * struct mhi_link_info - BW requirement
+ * target_link_speed - Link speed as defined by TLS bits in LinkControl reg
+ * target_link_width - Link width as defined by NLW bits in LinkStatus reg
+ */
+struct mhi_link_info {
+ unsigned int target_link_speed;
+ unsigned int target_link_width;
+};
+
+/**
+ * enum mhi_ee_type - Execution environment types
+ * @MHI_EE_PBL: Primary Bootloader
+ * @MHI_EE_SBL: Secondary Bootloader
+ * @MHI_EE_AMSS: Modem, aka the primary runtime EE
+ * @MHI_EE_RDDM: Ram dump download mode
+ * @MHI_EE_WFW: WLAN firmware mode
+ * @MHI_EE_PTHRU: Passthrough
+ * @MHI_EE_EDL: Embedded downloader
+ * @MHI_EE_FP: Flash Programmer Environment
+ */
+enum mhi_ee_type {
+ MHI_EE_PBL,
+ MHI_EE_SBL,
+ MHI_EE_AMSS,
+ MHI_EE_RDDM,
+ MHI_EE_WFW,
+ MHI_EE_PTHRU,
+ MHI_EE_EDL,
+ MHI_EE_FP,
+ MHI_EE_MAX_SUPPORTED = MHI_EE_FP,
+ MHI_EE_DISABLE_TRANSITION, /* local EE, not related to mhi spec */
+ MHI_EE_NOT_SUPPORTED,
+ MHI_EE_MAX,
+};
+
+/**
+ * enum mhi_state - MHI states
+ * @MHI_STATE_RESET: Reset state
+ * @MHI_STATE_READY: Ready state
+ * @MHI_STATE_M0: M0 state
+ * @MHI_STATE_M1: M1 state
+ * @MHI_STATE_M2: M2 state
+ * @MHI_STATE_M3: M3 state
+ * @MHI_STATE_M3_FAST: M3 Fast state
+ * @MHI_STATE_BHI: BHI state
+ * @MHI_STATE_SYS_ERR: System Error state
+ */
+enum mhi_state {
+ MHI_STATE_RESET = 0x0,
+ MHI_STATE_READY = 0x1,
+ MHI_STATE_M0 = 0x2,
+ MHI_STATE_M1 = 0x3,
+ MHI_STATE_M2 = 0x4,
+ MHI_STATE_M3 = 0x5,
+ MHI_STATE_M3_FAST = 0x6,
+ MHI_STATE_BHI = 0x7,
+ MHI_STATE_SYS_ERR = 0xFF,
+ MHI_STATE_MAX,
+};
+
+/**
+ * enum mhi_ch_ee_mask - Execution environment mask for channel
+ * @MHI_CH_EE_PBL: Allow channel to be used in PBL EE
+ * @MHI_CH_EE_SBL: Allow channel to be used in SBL EE
+ * @MHI_CH_EE_AMSS: Allow channel to be used in AMSS EE
+ * @MHI_CH_EE_RDDM: Allow channel to be used in RDDM EE
+ * @MHI_CH_EE_PTHRU: Allow channel to be used in PTHRU EE
+ * @MHI_CH_EE_WFW: Allow channel to be used in WFW EE
+ * @MHI_CH_EE_EDL: Allow channel to be used in EDL EE
+ */
+enum mhi_ch_ee_mask {
+ MHI_CH_EE_PBL = BIT(MHI_EE_PBL),
+ MHI_CH_EE_SBL = BIT(MHI_EE_SBL),
+ MHI_CH_EE_AMSS = BIT(MHI_EE_AMSS),
+ MHI_CH_EE_RDDM = BIT(MHI_EE_RDDM),
+ MHI_CH_EE_PTHRU = BIT(MHI_EE_PTHRU),
+ MHI_CH_EE_WFW = BIT(MHI_EE_WFW),
+ MHI_CH_EE_EDL = BIT(MHI_EE_EDL),
+};
+
+/**
+ * enum mhi_er_data_type - Event ring data types
+ * @MHI_ER_DATA: Only client data over this ring
+ * @MHI_ER_CTRL: MHI control data and client data
+ */
+enum mhi_er_data_type {
+ MHI_ER_DATA,
+ MHI_ER_CTRL,
+};
+
+/**
+ * enum mhi_db_brst_mode - Doorbell mode
+ * @MHI_DB_BRST_DISABLE: Burst mode disable
+ * @MHI_DB_BRST_ENABLE: Burst mode enable
+ */
+enum mhi_db_brst_mode {
+ MHI_DB_BRST_DISABLE = 0x2,
+ MHI_DB_BRST_ENABLE = 0x3,
+};
+
+/**
+ * struct mhi_channel_config - Channel configuration structure for controller
+ * @name: The name of this channel
+ * @num: The number assigned to this channel
+ * @num_elements: The number of elements that can be queued to this channel
+ * @local_elements: The local ring length of the channel
+ * @event_ring: The event ring index that services this channel
+ * @dir: Direction that data may flow on this channel
+ * @type: Channel type
+ * @ee_mask: Execution Environment mask for this channel
+ * @pollcfg: Polling configuration for burst mode. 0 is default. milliseconds
+ for UL channels, multiple of 8 ring elements for DL channels
+ * @doorbell: Doorbell mode
+ * @lpm_notify: The channel master requires low power mode notifications
+ * @offload_channel: The client manages the channel completely
+ * @doorbell_mode_switch: Channel switches to doorbell mode on M0 transition
+ * @auto_queue: Framework will automatically queue buffers for DL traffic
+ * @wake-capable: Channel capable of waking up the system
+ */
+struct mhi_channel_config {
+ char *name;
+ u32 num;
+ u32 num_elements;
+ u32 local_elements;
+ u32 event_ring;
+ enum dma_data_direction dir;
+ enum mhi_ch_type type;
+ u32 ee_mask;
+ u32 pollcfg;
+ enum mhi_db_brst_mode doorbell;
+ bool lpm_notify;
+ bool offload_channel;
+ bool doorbell_mode_switch;
+ bool auto_queue;
+ bool wake_capable;
+};
+
+/**
+ * struct mhi_event_config - Event ring configuration structure for controller
+ * @num_elements: The number of elements that can be queued to this ring
+ * @irq_moderation_ms: Delay irq for additional events to be aggregated
+ * @irq: IRQ associated with this ring
+ * @channel: Dedicated channel number. U32_MAX indicates a non-dedicated ring
+ * @priority: Priority of this ring. Use 1 for now
+ * @mode: Doorbell mode
+ * @data_type: Type of data this ring will process
+ * @hardware_event: This ring is associated with hardware channels
+ * @client_managed: This ring is client managed
+ * @offload_channel: This ring is associated with an offloaded channel
+ */
+struct mhi_event_config {
+ u32 num_elements;
+ u32 irq_moderation_ms;
+ u32 irq;
+ u32 channel;
+ u32 priority;
+ enum mhi_db_brst_mode mode;
+ enum mhi_er_data_type data_type;
+ bool hardware_event;
+ bool client_managed;
+ bool offload_channel;
+};
+
+/**
+ * struct mhi_controller_config - Root MHI controller configuration
+ * @max_channels: Maximum number of channels supported
+ * @timeout_ms: Timeout value for operations. 0 means use default
+ * @ready_timeout_ms: Timeout value for waiting device to be ready (optional)
+ * @buf_len: Size of automatically allocated buffers. 0 means use default
+ * @num_channels: Number of channels defined in @ch_cfg
+ * @ch_cfg: Array of defined channels
+ * @num_events: Number of event rings defined in @event_cfg
+ * @event_cfg: Array of defined event rings
+ * @use_bounce_buf: Use a bounce buffer pool due to limited DDR access
+ * @m2_no_db: Host is not allowed to ring DB in M2 state
+ */
+struct mhi_controller_config {
+ u32 max_channels;
+ u32 timeout_ms;
+ u32 ready_timeout_ms;
+ u32 buf_len;
+ u32 num_channels;
+ const struct mhi_channel_config *ch_cfg;
+ u32 num_events;
+ struct mhi_event_config *event_cfg;
+ bool use_bounce_buf;
+ bool m2_no_db;
+};
+
+/**
+ * struct mhi_controller - Master MHI controller structure
+ * @name: Device name of the MHI controller
+ * @cntrl_dev: Pointer to the struct device of physical bus acting as the MHI
+ * controller (required)
+ * @mhi_dev: MHI device instance for the controller
+ * @debugfs_dentry: MHI controller debugfs directory
+ * @regs: Base address of MHI MMIO register space (required)
+ * @bhi: Points to base of MHI BHI register space
+ * @bhie: Points to base of MHI BHIe register space
+ * @wake_db: MHI WAKE doorbell register address
+ * @iova_start: IOMMU starting address for data (required)
+ * @iova_stop: IOMMU stop address for data (required)
+ * @fw_image: Firmware image name for normal booting (optional)
+ * @fw_data: Firmware image data content for normal booting, used only
+ * if fw_image is NULL and fbc_download is true (optional)
+ * @fw_sz: Firmware image data size for normal booting, used only if fw_image
+ * is NULL and fbc_download is true (optional)
+ * @edl_image: Firmware image name for emergency download mode (optional)
+ * @rddm_size: RAM dump size that host should allocate for debugging purpose
+ * @sbl_size: SBL image size downloaded through BHIe (optional)
+ * @seg_len: BHIe vector size (optional)
+ * @reg_len: Length of the MHI MMIO region (required)
+ * @fbc_image: Points to firmware image buffer
+ * @rddm_image: Points to RAM dump buffer
+ * @mhi_chan: Points to the channel configuration table
+ * @lpm_chans: List of channels that require LPM notifications
+ * @irq: base irq # to request (required)
+ * @max_chan: Maximum number of channels the controller supports
+ * @total_ev_rings: Total # of event rings allocated
+ * @hw_ev_rings: Number of hardware event rings
+ * @sw_ev_rings: Number of software event rings
+ * @nr_irqs: Number of IRQ allocated by bus master (required)
+ * @serial_number: MHI controller serial number obtained from BHI
+ * @mhi_event: MHI event ring configurations table
+ * @mhi_cmd: MHI command ring configurations table
+ * @mhi_ctxt: MHI device context, shared memory between host and device
+ * @pm_mutex: Mutex for suspend/resume operation
+ * @pm_lock: Lock for protecting MHI power management state
+ * @timeout_ms: Timeout in ms for state transitions
+ * @ready_timeout_ms: Timeout in ms for waiting device to be ready (optional)
+ * @pm_state: MHI power management state
+ * @db_access: DB access states
+ * @ee: MHI device execution environment
+ * @dev_state: MHI device state
+ * @dev_wake: Device wakeup count
+ * @pending_pkts: Pending packets for the controller
+ * @M0, M2, M3: Counters to track number of device MHI state changes
+ * @transition_list: List of MHI state transitions
+ * @transition_lock: Lock for protecting MHI state transition list
+ * @wlock: Lock for protecting device wakeup
+ * @mhi_link_info: Device bandwidth info
+ * @st_worker: State transition worker
+ * @hiprio_wq: High priority workqueue for MHI work such as state transitions
+ * @state_event: State change event
+ * @status_cb: CB function to notify power states of the device (required)
+ * @wake_get: CB function to assert device wake (optional)
+ * @wake_put: CB function to de-assert device wake (optional)
+ * @wake_toggle: CB function to assert and de-assert device wake (optional)
+ * @runtime_get: CB function to controller runtime resume (required)
+ * @runtime_put: CB function to decrement pm usage (required)
+ * @map_single: CB function to create TRE buffer
+ * @unmap_single: CB function to destroy TRE buffer
+ * @read_reg: Read a MHI register via the physical link (required)
+ * @write_reg: Write a MHI register via the physical link (required)
+ * @reset: Controller specific reset function (optional)
+ * @edl_trigger: CB function to trigger EDL mode (optional)
+ * @buffer_len: Bounce buffer length
+ * @index: Index of the MHI controller instance
+ * @bounce_buf: Use of bounce buffer
+ * @fbc_download: MHI host needs to do complete image transfer (optional)
+ * @wake_set: Device wakeup set flag
+ * @irq_flags: irq flags passed to request_irq (optional)
+ * @mru: the default MRU for the MHI device
+ *
+ * Fields marked as (required) need to be populated by the controller driver
+ * before calling mhi_register_controller(). For the fields marked as (optional)
+ * they can be populated depending on the usecase.
+ */
+struct mhi_controller {
+ const char *name;
+ struct device *cntrl_dev;
+ struct mhi_device *mhi_dev;
+ struct dentry *debugfs_dentry;
+ void __iomem *regs;
+ void __iomem *bhi;
+ void __iomem *bhie;
+ void __iomem *wake_db;
+
+ dma_addr_t iova_start;
+ dma_addr_t iova_stop;
+ const char *fw_image;
+ const u8 *fw_data;
+ size_t fw_sz;
+ const char *edl_image;
+ size_t rddm_size;
+ size_t sbl_size;
+ size_t seg_len;
+ size_t reg_len;
+ struct image_info *fbc_image;
+ struct image_info *rddm_image;
+ struct mhi_chan *mhi_chan;
+ struct list_head lpm_chans;
+ int *irq;
+ u32 max_chan;
+ u32 total_ev_rings;
+ u32 hw_ev_rings;
+ u32 sw_ev_rings;
+ u32 nr_irqs;
+ u32 serial_number;
+
+ struct mhi_event *mhi_event;
+ struct mhi_cmd *mhi_cmd;
+ struct mhi_ctxt *mhi_ctxt;
+
+ struct mutex pm_mutex;
+ rwlock_t pm_lock;
+ u32 timeout_ms;
+ u32 ready_timeout_ms;
+ u32 pm_state;
+ u32 db_access;
+ enum mhi_ee_type ee;
+ enum mhi_state dev_state;
+ atomic_t dev_wake;
+ atomic_t pending_pkts;
+ u32 M0, M2, M3;
+ struct list_head transition_list;
+ spinlock_t transition_lock;
+ spinlock_t wlock;
+ struct mhi_link_info mhi_link_info;
+ struct work_struct st_worker;
+ struct workqueue_struct *hiprio_wq;
+ wait_queue_head_t state_event;
+
+ void (*status_cb)(struct mhi_controller *mhi_cntrl,
+ enum mhi_callback cb);
+ void (*wake_get)(struct mhi_controller *mhi_cntrl, bool override);
+ void (*wake_put)(struct mhi_controller *mhi_cntrl, bool override);
+ void (*wake_toggle)(struct mhi_controller *mhi_cntrl);
+ int (*runtime_get)(struct mhi_controller *mhi_cntrl);
+ void (*runtime_put)(struct mhi_controller *mhi_cntrl);
+ int (*map_single)(struct mhi_controller *mhi_cntrl,
+ struct mhi_buf_info *buf);
+ void (*unmap_single)(struct mhi_controller *mhi_cntrl,
+ struct mhi_buf_info *buf);
+ int (*read_reg)(struct mhi_controller *mhi_cntrl, void __iomem *addr,
+ u32 *out);
+ void (*write_reg)(struct mhi_controller *mhi_cntrl, void __iomem *addr,
+ u32 val);
+ void (*reset)(struct mhi_controller *mhi_cntrl);
+ int (*edl_trigger)(struct mhi_controller *mhi_cntrl);
+
+ size_t buffer_len;
+ int index;
+ bool bounce_buf;
+ bool fbc_download;
+ bool wake_set;
+ unsigned long irq_flags;
+ u32 mru;
+};
+
+/**
+ * struct mhi_device - Structure representing an MHI device which binds
+ * to channels or is associated with controllers
+ * @id: Pointer to MHI device ID struct
+ * @name: Name of the associated MHI device
+ * @mhi_cntrl: Controller the device belongs to
+ * @ul_chan: UL channel for the device
+ * @dl_chan: DL channel for the device
+ * @dev: Driver model device node for the MHI device
+ * @dev_type: MHI device type
+ * @ul_chan_id: MHI channel id for UL transfer
+ * @dl_chan_id: MHI channel id for DL transfer
+ * @dev_wake: Device wakeup counter
+ */
+struct mhi_device {
+ const struct mhi_device_id *id;
+ const char *name;
+ struct mhi_controller *mhi_cntrl;
+ struct mhi_chan *ul_chan;
+ struct mhi_chan *dl_chan;
+ struct device dev;
+ enum mhi_device_type dev_type;
+ int ul_chan_id;
+ int dl_chan_id;
+ u32 dev_wake;
+};
+
+/**
+ * struct mhi_result - Completed buffer information
+ * @buf_addr: Address of data buffer
+ * @bytes_xferd: # of bytes transferred
+ * @dir: Channel direction
+ * @transaction_status: Status of last transaction
+ */
+struct mhi_result {
+ void *buf_addr;
+ size_t bytes_xferd;
+ enum dma_data_direction dir;
+ int transaction_status;
+};
+
+/**
+ * struct mhi_buf - MHI Buffer description
+ * @buf: Virtual address of the buffer
+ * @name: Buffer label. For offload channel, configurations name must be:
+ * ECA - Event context array data
+ * CCA - Channel context array data
+ * @dma_addr: IOMMU address of the buffer
+ * @len: # of bytes
+ */
+struct mhi_buf {
+ void *buf;
+ const char *name;
+ dma_addr_t dma_addr;
+ size_t len;
+};
+
+/**
+ * struct mhi_driver - Structure representing a MHI client driver
+ * @probe: CB function for client driver probe function
+ * @remove: CB function for client driver remove function
+ * @ul_xfer_cb: CB function for UL data transfer
+ * @dl_xfer_cb: CB function for DL data transfer
+ * @status_cb: CB functions for asynchronous status
+ * @driver: Device driver model driver
+ */
+struct mhi_driver {
+ const struct mhi_device_id *id_table;
+ int (*probe)(struct mhi_device *mhi_dev,
+ const struct mhi_device_id *id);
+ void (*remove)(struct mhi_device *mhi_dev);
+ void (*ul_xfer_cb)(struct mhi_device *mhi_dev,
+ struct mhi_result *result);
+ void (*dl_xfer_cb)(struct mhi_device *mhi_dev,
+ struct mhi_result *result);
+ void (*status_cb)(struct mhi_device *mhi_dev, enum mhi_callback mhi_cb);
+ struct device_driver driver;
+};
+
+#define to_mhi_driver(drv) container_of_const(drv, struct mhi_driver, driver)
+#define to_mhi_device(dev) container_of(dev, struct mhi_device, dev)
+
+/**
+ * mhi_alloc_controller - Allocate the MHI Controller structure
+ * Allocate the mhi_controller structure using zero initialized memory
+ */
+struct mhi_controller *mhi_alloc_controller(void);
+
+/**
+ * mhi_free_controller - Free the MHI Controller structure
+ * Free the mhi_controller structure which was previously allocated
+ */
+void mhi_free_controller(struct mhi_controller *mhi_cntrl);
+
+/**
+ * mhi_register_controller - Register MHI controller
+ * @mhi_cntrl: MHI controller to register
+ * @config: Configuration to use for the controller
+ */
+int mhi_register_controller(struct mhi_controller *mhi_cntrl,
+ const struct mhi_controller_config *config);
+
+/**
+ * mhi_unregister_controller - Unregister MHI controller
+ * @mhi_cntrl: MHI controller to unregister
+ */
+void mhi_unregister_controller(struct mhi_controller *mhi_cntrl);
+
+/*
+ * module_mhi_driver() - Helper macro for drivers that don't do
+ * anything special other than using default mhi_driver_register() and
+ * mhi_driver_unregister(). This eliminates a lot of boilerplate.
+ * Each module may only use this macro once.
+ */
+#define module_mhi_driver(mhi_drv) \
+ module_driver(mhi_drv, mhi_driver_register, \
+ mhi_driver_unregister)
+
+/*
+ * Macro to avoid include chaining to get THIS_MODULE
+ */
+#define mhi_driver_register(mhi_drv) \
+ __mhi_driver_register(mhi_drv, THIS_MODULE)
+
+/**
+ * __mhi_driver_register - Register driver with MHI framework
+ * @mhi_drv: Driver associated with the device
+ * @owner: The module owner
+ */
+int __mhi_driver_register(struct mhi_driver *mhi_drv, struct module *owner);
+
+/**
+ * mhi_driver_unregister - Unregister a driver for mhi_devices
+ * @mhi_drv: Driver associated with the device
+ */
+void mhi_driver_unregister(struct mhi_driver *mhi_drv);
+
+/**
+ * mhi_set_mhi_state - Set MHI device state
+ * @mhi_cntrl: MHI controller
+ * @state: State to set
+ */
+void mhi_set_mhi_state(struct mhi_controller *mhi_cntrl,
+ enum mhi_state state);
+
+/**
+ * mhi_notify - Notify the MHI client driver about client device status
+ * @mhi_dev: MHI device instance
+ * @cb_reason: MHI callback reason
+ */
+void mhi_notify(struct mhi_device *mhi_dev, enum mhi_callback cb_reason);
+
+/**
+ * mhi_get_free_desc_count - Get transfer ring length
+ * Get # of TD available to queue buffers
+ * @mhi_dev: Device associated with the channels
+ * @dir: Direction of the channel
+ */
+int mhi_get_free_desc_count(struct mhi_device *mhi_dev,
+ enum dma_data_direction dir);
+
+/**
+ * mhi_prepare_for_power_up - Do pre-initialization before power up.
+ * This is optional, call this before power up if
+ * the controller does not want bus framework to
+ * automatically free any allocated memory during
+ * shutdown process.
+ * @mhi_cntrl: MHI controller
+ */
+int mhi_prepare_for_power_up(struct mhi_controller *mhi_cntrl);
+
+/**
+ * mhi_async_power_up - Start MHI power up sequence
+ * @mhi_cntrl: MHI controller
+ */
+int mhi_async_power_up(struct mhi_controller *mhi_cntrl);
+
+/**
+ * mhi_sync_power_up - Start MHI power up sequence and wait till the device
+ * enters valid EE state
+ * @mhi_cntrl: MHI controller
+ */
+int mhi_sync_power_up(struct mhi_controller *mhi_cntrl);
+
+/**
+ * mhi_power_down - Power down the MHI device and also destroy the
+ * 'struct device' for the channels associated with it.
+ * See also mhi_power_down_keep_dev() which is a variant
+ * of this API that keeps the 'struct device' for channels
+ * (useful during suspend/hibernation).
+ * @mhi_cntrl: MHI controller
+ * @graceful: Link is still accessible, so do a graceful shutdown process
+ */
+void mhi_power_down(struct mhi_controller *mhi_cntrl, bool graceful);
+
+/**
+ * mhi_power_down_keep_dev - Power down the MHI device but keep the 'struct
+ * device' for the channels associated with it.
+ * This is a variant of 'mhi_power_down()' and
+ * useful in scenarios such as suspend/hibernation
+ * where destroying of the 'struct device' is not
+ * needed.
+ * @mhi_cntrl: MHI controller
+ * @graceful: Link is still accessible, so do a graceful shutdown process
+ */
+void mhi_power_down_keep_dev(struct mhi_controller *mhi_cntrl, bool graceful);
+
+/**
+ * mhi_unprepare_after_power_down - Free any allocated memory after power down
+ * @mhi_cntrl: MHI controller
+ */
+void mhi_unprepare_after_power_down(struct mhi_controller *mhi_cntrl);
+
+/**
+ * mhi_pm_suspend - Move MHI into a suspended state
+ * @mhi_cntrl: MHI controller
+ */
+int mhi_pm_suspend(struct mhi_controller *mhi_cntrl);
+
+/**
+ * mhi_pm_resume - Resume MHI from suspended state
+ * @mhi_cntrl: MHI controller
+ */
+int mhi_pm_resume(struct mhi_controller *mhi_cntrl);
+
+/**
+ * mhi_pm_resume_force - Force resume MHI from suspended state
+ * @mhi_cntrl: MHI controller
+ *
+ * Resume the device irrespective of its MHI state. As per the MHI spec, devices
+ * has to be in M3 state during resume. But some devices seem to be in a
+ * different MHI state other than M3 but they continue working fine if allowed.
+ * This API is intented to be used for such devices.
+ *
+ * Return: 0 if the resume succeeds, a negative error code otherwise
+ */
+int mhi_pm_resume_force(struct mhi_controller *mhi_cntrl);
+
+/**
+ * mhi_download_rddm_image - Download ramdump image from device for
+ * debugging purpose.
+ * @mhi_cntrl: MHI controller
+ * @in_panic: Download rddm image during kernel panic
+ */
+int mhi_download_rddm_image(struct mhi_controller *mhi_cntrl, bool in_panic);
+
+/**
+ * mhi_force_rddm_mode - Force device into rddm mode
+ * @mhi_cntrl: MHI controller
+ */
+int mhi_force_rddm_mode(struct mhi_controller *mhi_cntrl);
+
+/**
+ * mhi_get_exec_env - Get BHI execution environment of the device
+ * @mhi_cntrl: MHI controller
+ */
+enum mhi_ee_type mhi_get_exec_env(struct mhi_controller *mhi_cntrl);
+
+/**
+ * mhi_get_mhi_state - Get MHI state of the device
+ * @mhi_cntrl: MHI controller
+ */
+enum mhi_state mhi_get_mhi_state(struct mhi_controller *mhi_cntrl);
+
+/**
+ * mhi_soc_reset - Trigger a device reset. This can be used as a last resort
+ * to reset and recover a device.
+ * @mhi_cntrl: MHI controller
+ */
+void mhi_soc_reset(struct mhi_controller *mhi_cntrl);
+
+/**
+ * mhi_device_get_sync - Disable device low power mode. Synchronously
+ * take the controller out of suspended state
+ * @mhi_dev: Device associated with the channel
+ */
+int mhi_device_get_sync(struct mhi_device *mhi_dev);
+
+/**
+ * mhi_device_put - Re-enable device low power mode
+ * @mhi_dev: Device associated with the channel
+ */
+void mhi_device_put(struct mhi_device *mhi_dev);
+
+/**
+ * mhi_prepare_for_transfer - Setup UL and DL channels for data transfer.
+ * @mhi_dev: Device associated with the channels
+ *
+ * Allocate and initialize the channel context and also issue the START channel
+ * command to both channels. Channels can be started only if both host and
+ * device execution environments match and channels are in a DISABLED state.
+ */
+int mhi_prepare_for_transfer(struct mhi_device *mhi_dev);
+
+/**
+ * mhi_prepare_for_transfer_autoqueue - Setup UL and DL channels with auto queue
+ * buffers for DL traffic
+ * @mhi_dev: Device associated with the channels
+ *
+ * Allocate and initialize the channel context and also issue the START channel
+ * command to both channels. Channels can be started only if both host and
+ * device execution environments match and channels are in a DISABLED state.
+ * The MHI core will automatically allocate and queue buffers for the DL traffic.
+ */
+int mhi_prepare_for_transfer_autoqueue(struct mhi_device *mhi_dev);
+
+/**
+ * mhi_unprepare_from_transfer - Reset UL and DL channels for data transfer.
+ * Issue the RESET channel command and let the
+ * device clean-up the context so no incoming
+ * transfers are seen on the host. Free memory
+ * associated with the context on host. If device
+ * is unresponsive, only perform a host side
+ * clean-up. Channels can be reset only if both
+ * host and device execution environments match
+ * and channels are in an ENABLED, STOPPED or
+ * SUSPENDED state.
+ * @mhi_dev: Device associated with the channels
+ */
+void mhi_unprepare_from_transfer(struct mhi_device *mhi_dev);
+
+/**
+ * mhi_queue_buf - Send or receive raw buffers from client device over MHI
+ * channel
+ * @mhi_dev: Device associated with the channels
+ * @dir: DMA direction for the channel
+ * @buf: Buffer for holding the data
+ * @len: Buffer length
+ * @mflags: MHI transfer flags used for the transfer
+ */
+int mhi_queue_buf(struct mhi_device *mhi_dev, enum dma_data_direction dir,
+ void *buf, size_t len, enum mhi_flags mflags);
+
+/**
+ * mhi_queue_skb - Send or receive SKBs from client device over MHI channel
+ * @mhi_dev: Device associated with the channels
+ * @dir: DMA direction for the channel
+ * @skb: Buffer for holding SKBs
+ * @len: Buffer length
+ * @mflags: MHI transfer flags used for the transfer
+ */
+int mhi_queue_skb(struct mhi_device *mhi_dev, enum dma_data_direction dir,
+ struct sk_buff *skb, size_t len, enum mhi_flags mflags);
+
+/**
+ * mhi_queue_is_full - Determine whether queueing new elements is possible
+ * @mhi_dev: Device associated with the channels
+ * @dir: DMA direction for the channel
+ */
+bool mhi_queue_is_full(struct mhi_device *mhi_dev, enum dma_data_direction dir);
+
+/**
+ * mhi_get_channel_doorbell_offset - Get the channel doorbell offset
+ * @mhi_cntrl: MHI controller
+ * @chdb_offset: Read channel doorbell offset
+ *
+ * Return: 0 if the read succeeds, a negative error code otherwise
+ */
+int mhi_get_channel_doorbell_offset(struct mhi_controller *mhi_cntrl, u32 *chdb_offset);
+
+#endif /* _MHI_H_ */
diff --git a/include/linux/mhi_ep.h b/include/linux/mhi_ep.h
new file mode 100644
index 000000000000..7b40fc8cbe77
--- /dev/null
+++ b/include/linux/mhi_ep.h
@@ -0,0 +1,305 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2022, Linaro Ltd.
+ *
+ */
+#ifndef _MHI_EP_H_
+#define _MHI_EP_H_
+
+#include <linux/dma-direction.h>
+#include <linux/mhi.h>
+
+#define MHI_EP_DEFAULT_MTU 0x8000
+
+/**
+ * struct mhi_ep_channel_config - Channel configuration structure for controller
+ * @name: The name of this channel
+ * @num: The number assigned to this channel
+ * @num_elements: The number of elements that can be queued to this channel
+ * @dir: Direction that data may flow on this channel
+ */
+struct mhi_ep_channel_config {
+ char *name;
+ u32 num;
+ u32 num_elements;
+ enum dma_data_direction dir;
+};
+
+/**
+ * struct mhi_ep_cntrl_config - MHI Endpoint controller configuration
+ * @mhi_version: MHI spec version supported by the controller
+ * @max_channels: Maximum number of channels supported
+ * @num_channels: Number of channels defined in @ch_cfg
+ * @ch_cfg: Array of defined channels
+ */
+struct mhi_ep_cntrl_config {
+ u32 mhi_version;
+ u32 max_channels;
+ u32 num_channels;
+ const struct mhi_ep_channel_config *ch_cfg;
+};
+
+/**
+ * struct mhi_ep_db_info - MHI Endpoint doorbell info
+ * @mask: Mask of the doorbell interrupt
+ * @status: Status of the doorbell interrupt
+ */
+struct mhi_ep_db_info {
+ u32 mask;
+ u32 status;
+};
+
+/**
+ * struct mhi_ep_buf_info - MHI Endpoint transfer buffer info
+ * @mhi_dev: MHI device associated with this buffer
+ * @dev_addr: Address of the buffer in endpoint
+ * @host_addr: Address of the bufffer in host
+ * @size: Size of the buffer
+ * @code: Transfer completion code
+ * @cb: Callback to be executed by controller drivers after transfer completion (async)
+ * @cb_buf: Opaque buffer to be passed to the callback
+ */
+struct mhi_ep_buf_info {
+ struct mhi_ep_device *mhi_dev;
+ void *dev_addr;
+ u64 host_addr;
+ size_t size;
+ int code;
+
+ void (*cb)(struct mhi_ep_buf_info *buf_info);
+ void *cb_buf;
+};
+
+/**
+ * struct mhi_ep_cntrl - MHI Endpoint controller structure
+ * @cntrl_dev: Pointer to the struct device of physical bus acting as the MHI
+ * Endpoint controller
+ * @mhi_dev: MHI Endpoint device instance for the controller
+ * @mmio: MMIO region containing the MHI registers
+ * @mhi_chan: Points to the channel configuration table
+ * @mhi_event: Points to the event ring configurations table
+ * @mhi_cmd: Points to the command ring configurations table
+ * @sm: MHI Endpoint state machine
+ * @ch_ctx_cache: Cache of host channel context data structure
+ * @ev_ctx_cache: Cache of host event context data structure
+ * @cmd_ctx_cache: Cache of host command context data structure
+ * @ch_ctx_host_pa: Physical address of host channel context data structure
+ * @ev_ctx_host_pa: Physical address of host event context data structure
+ * @cmd_ctx_host_pa: Physical address of host command context data structure
+ * @ch_ctx_cache_phys: Physical address of the host channel context cache
+ * @ev_ctx_cache_phys: Physical address of the host event context cache
+ * @cmd_ctx_cache_phys: Physical address of the host command context cache
+ * @chdb: Array of channel doorbell interrupt info
+ * @event_lock: Lock for protecting event rings
+ * @state_lock: Lock for protecting state transitions
+ * @list_lock: Lock for protecting state transition and channel doorbell lists
+ * @st_transition_list: List of state transitions
+ * @ch_db_list: List of queued channel doorbells
+ * @wq: Dedicated workqueue for handling rings and state changes
+ * @state_work: State transition worker
+ * @reset_work: Worker for MHI Endpoint reset
+ * @cmd_ring_work: Worker for processing command rings
+ * @ch_ring_work: Worker for processing channel rings
+ * @raise_irq: CB function for raising IRQ to the host
+ * @alloc_map: CB function for allocating memory in endpoint for storing host context and mapping it
+ * @unmap_free: CB function to unmap and free the allocated memory in endpoint for storing host context
+ * @read_sync: CB function for reading from host memory synchronously
+ * @write_sync: CB function for writing to host memory synchronously
+ * @read_async: CB function for reading from host memory asynchronously
+ * @write_async: CB function for writing to host memory asynchronously
+ * @mhi_state: MHI Endpoint state
+ * @max_chan: Maximum channels supported by the endpoint controller
+ * @mru: MRU (Maximum Receive Unit) value of the endpoint controller
+ * @event_rings: Number of event rings supported by the endpoint controller
+ * @hw_event_rings: Number of hardware event rings supported by the endpoint controller
+ * @chdb_offset: Channel doorbell offset set by the host
+ * @erdb_offset: Event ring doorbell offset set by the host
+ * @index: MHI Endpoint controller index
+ * @irq: IRQ used by the endpoint controller
+ * @enabled: Check if the endpoint controller is enabled or not
+ */
+struct mhi_ep_cntrl {
+ struct device *cntrl_dev;
+ struct mhi_ep_device *mhi_dev;
+ void __iomem *mmio;
+
+ struct mhi_ep_chan *mhi_chan;
+ struct mhi_ep_event *mhi_event;
+ struct mhi_ep_cmd *mhi_cmd;
+ struct mhi_ep_sm *sm;
+
+ struct mhi_chan_ctxt *ch_ctx_cache;
+ struct mhi_event_ctxt *ev_ctx_cache;
+ struct mhi_cmd_ctxt *cmd_ctx_cache;
+ u64 ch_ctx_host_pa;
+ u64 ev_ctx_host_pa;
+ u64 cmd_ctx_host_pa;
+ phys_addr_t ch_ctx_cache_phys;
+ phys_addr_t ev_ctx_cache_phys;
+ phys_addr_t cmd_ctx_cache_phys;
+
+ struct mhi_ep_db_info chdb[4];
+ struct mutex event_lock;
+ struct mutex state_lock;
+ spinlock_t list_lock;
+
+ struct list_head st_transition_list;
+ struct list_head ch_db_list;
+
+ struct workqueue_struct *wq;
+ struct work_struct state_work;
+ struct work_struct reset_work;
+ struct work_struct cmd_ring_work;
+ struct work_struct ch_ring_work;
+ struct kmem_cache *ring_item_cache;
+ struct kmem_cache *ev_ring_el_cache;
+ struct kmem_cache *tre_buf_cache;
+
+ void (*raise_irq)(struct mhi_ep_cntrl *mhi_cntrl, u32 vector);
+ int (*alloc_map)(struct mhi_ep_cntrl *mhi_cntrl, u64 pci_addr, phys_addr_t *phys_ptr,
+ void __iomem **virt, size_t size);
+ void (*unmap_free)(struct mhi_ep_cntrl *mhi_cntrl, u64 pci_addr, phys_addr_t phys,
+ void __iomem *virt, size_t size);
+ int (*read_sync)(struct mhi_ep_cntrl *mhi_cntrl, struct mhi_ep_buf_info *buf_info);
+ int (*write_sync)(struct mhi_ep_cntrl *mhi_cntrl, struct mhi_ep_buf_info *buf_info);
+ int (*read_async)(struct mhi_ep_cntrl *mhi_cntrl, struct mhi_ep_buf_info *buf_info);
+ int (*write_async)(struct mhi_ep_cntrl *mhi_cntrl, struct mhi_ep_buf_info *buf_info);
+
+ enum mhi_state mhi_state;
+
+ u32 max_chan;
+ u32 mru;
+ u32 event_rings;
+ u32 hw_event_rings;
+ u32 chdb_offset;
+ u32 erdb_offset;
+ u32 index;
+ int irq;
+ bool enabled;
+};
+
+/**
+ * struct mhi_ep_device - Structure representing an MHI Endpoint device that binds
+ * to channels or is associated with controllers
+ * @dev: Driver model device node for the MHI Endpoint device
+ * @mhi_cntrl: Controller the device belongs to
+ * @id: Pointer to MHI Endpoint device ID struct
+ * @name: Name of the associated MHI Endpoint device
+ * @ul_chan: UL (from host to endpoint) channel for the device
+ * @dl_chan: DL (from endpoint to host) channel for the device
+ * @dev_type: MHI device type
+ */
+struct mhi_ep_device {
+ struct device dev;
+ struct mhi_ep_cntrl *mhi_cntrl;
+ const struct mhi_device_id *id;
+ const char *name;
+ struct mhi_ep_chan *ul_chan;
+ struct mhi_ep_chan *dl_chan;
+ enum mhi_device_type dev_type;
+};
+
+/**
+ * struct mhi_ep_driver - Structure representing a MHI Endpoint client driver
+ * @id_table: Pointer to MHI Endpoint device ID table
+ * @driver: Device driver model driver
+ * @probe: CB function for client driver probe function
+ * @remove: CB function for client driver remove function
+ * @ul_xfer_cb: CB function for UL (from host to endpoint) data transfer
+ * @dl_xfer_cb: CB function for DL (from endpoint to host) data transfer
+ */
+struct mhi_ep_driver {
+ const struct mhi_device_id *id_table;
+ struct device_driver driver;
+ int (*probe)(struct mhi_ep_device *mhi_ep,
+ const struct mhi_device_id *id);
+ void (*remove)(struct mhi_ep_device *mhi_ep);
+ void (*ul_xfer_cb)(struct mhi_ep_device *mhi_dev,
+ struct mhi_result *result);
+ void (*dl_xfer_cb)(struct mhi_ep_device *mhi_dev,
+ struct mhi_result *result);
+};
+
+#define to_mhi_ep_device(dev) container_of(dev, struct mhi_ep_device, dev)
+#define to_mhi_ep_driver(drv) container_of_const(drv, struct mhi_ep_driver, driver)
+
+/*
+ * module_mhi_ep_driver() - Helper macro for drivers that don't do
+ * anything special other than using default mhi_ep_driver_register() and
+ * mhi_ep_driver_unregister(). This eliminates a lot of boilerplate.
+ * Each module may only use this macro once.
+ */
+#define module_mhi_ep_driver(mhi_drv) \
+ module_driver(mhi_drv, mhi_ep_driver_register, \
+ mhi_ep_driver_unregister)
+
+/*
+ * Macro to avoid include chaining to get THIS_MODULE
+ */
+#define mhi_ep_driver_register(mhi_drv) \
+ __mhi_ep_driver_register(mhi_drv, THIS_MODULE)
+
+/**
+ * __mhi_ep_driver_register - Register a driver with MHI Endpoint bus
+ * @mhi_drv: Driver to be associated with the device
+ * @owner: The module owner
+ *
+ * Return: 0 if driver registrations succeeds, a negative error code otherwise.
+ */
+int __mhi_ep_driver_register(struct mhi_ep_driver *mhi_drv, struct module *owner);
+
+/**
+ * mhi_ep_driver_unregister - Unregister a driver from MHI Endpoint bus
+ * @mhi_drv: Driver associated with the device
+ */
+void mhi_ep_driver_unregister(struct mhi_ep_driver *mhi_drv);
+
+/**
+ * mhi_ep_register_controller - Register MHI Endpoint controller
+ * @mhi_cntrl: MHI Endpoint controller to register
+ * @config: Configuration to use for the controller
+ *
+ * Return: 0 if controller registrations succeeds, a negative error code otherwise.
+ */
+int mhi_ep_register_controller(struct mhi_ep_cntrl *mhi_cntrl,
+ const struct mhi_ep_cntrl_config *config);
+
+/**
+ * mhi_ep_unregister_controller - Unregister MHI Endpoint controller
+ * @mhi_cntrl: MHI Endpoint controller to unregister
+ */
+void mhi_ep_unregister_controller(struct mhi_ep_cntrl *mhi_cntrl);
+
+/**
+ * mhi_ep_power_up - Power up the MHI endpoint stack
+ * @mhi_cntrl: MHI Endpoint controller
+ *
+ * Return: 0 if power up succeeds, a negative error code otherwise.
+ */
+int mhi_ep_power_up(struct mhi_ep_cntrl *mhi_cntrl);
+
+/**
+ * mhi_ep_power_down - Power down the MHI endpoint stack
+ * @mhi_cntrl: MHI controller
+ */
+void mhi_ep_power_down(struct mhi_ep_cntrl *mhi_cntrl);
+
+/**
+ * mhi_ep_queue_is_empty - Determine whether the transfer queue is empty
+ * @mhi_dev: Device associated with the channels
+ * @dir: DMA direction for the channel
+ *
+ * Return: true if the queue is empty, false otherwise.
+ */
+bool mhi_ep_queue_is_empty(struct mhi_ep_device *mhi_dev, enum dma_data_direction dir);
+
+/**
+ * mhi_ep_queue_skb - Send SKBs to host over MHI Endpoint
+ * @mhi_dev: Device associated with the DL channel
+ * @skb: SKBs to be queued
+ *
+ * Return: 0 if the SKBs has been sent successfully, a negative error code otherwise.
+ */
+int mhi_ep_queue_skb(struct mhi_ep_device *mhi_dev, struct sk_buff *skb);
+
+#endif
diff --git a/include/linux/mic_bus.h b/include/linux/mic_bus.h
deleted file mode 100644
index 504d54c71bdb..000000000000
--- a/include/linux/mic_bus.h
+++ /dev/null
@@ -1,111 +0,0 @@
-/*
- * Intel MIC Platform Software Stack (MPSS)
- *
- * Copyright(c) 2014 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License, version 2, as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * Intel MIC Bus driver.
- *
- * This implementation is very similar to the the virtio bus driver
- * implementation @ include/linux/virtio.h.
- */
-#ifndef _MIC_BUS_H_
-#define _MIC_BUS_H_
-/*
- * Everything a mbus driver needs to work with any particular mbus
- * implementation.
- */
-#include <linux/interrupt.h>
-#include <linux/dma-mapping.h>
-
-struct mbus_device_id {
- __u32 device;
- __u32 vendor;
-};
-
-#define MBUS_DEV_DMA_HOST 2
-#define MBUS_DEV_DMA_MIC 3
-#define MBUS_DEV_ANY_ID 0xffffffff
-
-/**
- * mbus_device - representation of a device using mbus
- * @mmio_va: virtual address of mmio space
- * @hw_ops: the hardware ops supported by this device.
- * @id: the device type identification (used to match it with a driver).
- * @dev: underlying device.
- * be used to communicate with.
- * @index: unique position on the mbus bus
- */
-struct mbus_device {
- void __iomem *mmio_va;
- struct mbus_hw_ops *hw_ops;
- struct mbus_device_id id;
- struct device dev;
- int index;
-};
-
-/**
- * mbus_driver - operations for a mbus I/O driver
- * @driver: underlying device driver (populate name and owner).
- * @id_table: the ids serviced by this driver.
- * @probe: the function to call when a device is found. Returns 0 or -errno.
- * @remove: the function to call when a device is removed.
- */
-struct mbus_driver {
- struct device_driver driver;
- const struct mbus_device_id *id_table;
- int (*probe)(struct mbus_device *dev);
- void (*scan)(struct mbus_device *dev);
- void (*remove)(struct mbus_device *dev);
-};
-
-/**
- * struct mic_irq - opaque pointer used as cookie
- */
-struct mic_irq;
-
-/**
- * mbus_hw_ops - Hardware operations for accessing a MIC device on the MIC bus.
- */
-struct mbus_hw_ops {
- struct mic_irq* (*request_threaded_irq)(struct mbus_device *mbdev,
- irq_handler_t handler,
- irq_handler_t thread_fn,
- const char *name, void *data,
- int intr_src);
- void (*free_irq)(struct mbus_device *mbdev,
- struct mic_irq *cookie, void *data);
- void (*ack_interrupt)(struct mbus_device *mbdev, int num);
-};
-
-struct mbus_device *
-mbus_register_device(struct device *pdev, int id, const struct dma_map_ops *dma_ops,
- struct mbus_hw_ops *hw_ops, int index,
- void __iomem *mmio_va);
-void mbus_unregister_device(struct mbus_device *mbdev);
-
-int mbus_register_driver(struct mbus_driver *drv);
-void mbus_unregister_driver(struct mbus_driver *drv);
-
-static inline struct mbus_device *dev_to_mbus(struct device *_dev)
-{
- return container_of(_dev, struct mbus_device, dev);
-}
-
-static inline struct mbus_driver *drv_to_mbus(struct device_driver *drv)
-{
- return container_of(drv, struct mbus_driver, driver);
-}
-
-#endif /* _MIC_BUS_H */
diff --git a/include/linux/micrel_phy.h b/include/linux/micrel_phy.h
index 472fa4d4ea62..ca691641788b 100644
--- a/include/linux/micrel_phy.h
+++ b/include/linux/micrel_phy.h
@@ -1,18 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* include/linux/micrel_phy.h
*
* Micrel PHY IDs
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
*/
#ifndef _MICREL_PHY_H
#define _MICREL_PHY_H
+#define MICREL_OUI 0x0885
+
#define MICREL_PHY_ID_MASK 0x00fffff0
#define PHY_ID_KSZ8873MLL 0x000e7237
@@ -31,21 +28,46 @@
#define PHY_ID_KSZ8081 0x00221560
#define PHY_ID_KSZ8061 0x00221570
#define PHY_ID_KSZ9031 0x00221620
+#define PHY_ID_KSZ9131 0x00221640
+#define PHY_ID_LAN8814 0x00221660
+#define PHY_ID_LAN8804 0x00221670
+#define PHY_ID_LAN8841 0x00221650
+#define PHY_ID_LAN8842 0x002216C0
#define PHY_ID_KSZ886X 0x00221430
#define PHY_ID_KSZ8863 0x00221435
-#define PHY_ID_KSZ8795 0x00221550
+#define PHY_ID_KSZ87XX 0x00221550
#define PHY_ID_KSZ9477 0x00221631
/* struct phy_device dev_flags definitions */
-#define MICREL_PHY_50MHZ_CLK 0x00000001
-#define MICREL_PHY_FXEN 0x00000002
+#define MICREL_PHY_50MHZ_CLK BIT(0)
+#define MICREL_PHY_FXEN BIT(1)
+#define MICREL_KSZ8_P1_ERRATA BIT(2)
#define MICREL_KSZ9021_EXTREG_CTRL 0xB
#define MICREL_KSZ9021_EXTREG_DATA_WRITE 0xC
#define MICREL_KSZ9021_RGMII_CLK_CTRL_PAD_SCEW 0x104
#define MICREL_KSZ9021_RGMII_RX_DATA_PAD_SCEW 0x105
+/* Device specific MII_BMCR (Reg 0) bits */
+/* 1 = HP Auto MDI/MDI-X mode, 0 = Microchip Auto MDI/MDI-X mode */
+#define KSZ886X_BMCR_HP_MDIX BIT(5)
+/* 1 = Force MDI (transmit on RXP/RXM pins), 0 = Normal operation
+ * (transmit on TXP/TXM pins)
+ */
+#define KSZ886X_BMCR_FORCE_MDI BIT(4)
+/* 1 = Disable auto MDI-X */
+#define KSZ886X_BMCR_DISABLE_AUTO_MDIX BIT(3)
+#define KSZ886X_BMCR_DISABLE_FAR_END_FAULT BIT(2)
+#define KSZ886X_BMCR_DISABLE_TRANSMIT BIT(1)
+#define KSZ886X_BMCR_DISABLE_LED BIT(0)
+
+/* PHY Special Control/Status Register (Reg 31) */
+#define KSZ886X_CTRL_MDIX_STAT BIT(4)
+#define KSZ886X_CTRL_FORCE_LINK BIT(3)
+#define KSZ886X_CTRL_PWRSAVE BIT(2)
+#define KSZ886X_CTRL_REMOTE_LOOPBACK BIT(1)
+
#endif /* _MICREL_PHY_H */
diff --git a/include/linux/microchipphy.h b/include/linux/microchipphy.h
index eb492d47f717..517288da19fd 100644
--- a/include/linux/microchipphy.h
+++ b/include/linux/microchipphy.h
@@ -1,18 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* Copyright (C) 2015 Microchip Technology
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version 2
- * of the License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#ifndef _MICROCHIPPHY_H
@@ -70,4 +58,15 @@
#define LAN88XX_MMD3_CHIP_ID (32877)
#define LAN88XX_MMD3_CHIP_REV (32878)
+/* Registers specific to the LAN7800/LAN7850 embedded phy */
+#define LAN78XX_PHY_LED_MODE_SELECT (0x1D)
+
+/* DSP registers */
+#define PHY_ARDENNES_MMD_DEV_3_PHY_CFG (0x806A)
+#define PHY_ARDENNES_MMD_DEV_3_PHY_CFG_ZD_DLY_EN_ (0x2000)
+#define LAN88XX_EXT_PAGE_ACCESS_TR (0x52B5)
+#define LAN88XX_EXT_PAGE_TR_CR 16
+#define LAN88XX_EXT_PAGE_TR_LOW_DATA 17
+#define LAN88XX_EXT_PAGE_TR_HIGH_DATA 18
+
#endif /* _MICROCHIPPHY_H */
diff --git a/include/linux/migrate.h b/include/linux/migrate.h
index 3e0d405dc842..26ca00c325d9 100644
--- a/include/linux/migrate.h
+++ b/include/linux/migrate.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_MIGRATE_H
#define _LINUX_MIGRATE_H
@@ -6,136 +7,192 @@
#include <linux/migrate_mode.h>
#include <linux/hugetlb.h>
-typedef struct page *new_page_t(struct page *page, unsigned long private,
- int **reason);
-typedef void free_page_t(struct page *page, unsigned long private);
-
-/*
- * Return values from addresss_space_operations.migratepage():
- * - negative errno on page migration failure;
- * - zero on page migration success;
+typedef struct folio *new_folio_t(struct folio *folio, unsigned long private);
+typedef void free_folio_t(struct folio *folio, unsigned long private);
+
+struct migration_target_control;
+
+/**
+ * struct movable_operations - Driver page migration
+ * @isolate_page:
+ * The VM calls this function to prepare the page to be moved. The page
+ * is locked and the driver should not unlock it. The driver should
+ * return ``true`` if the page is movable and ``false`` if it is not
+ * currently movable. After this function returns, the VM uses the
+ * page->lru field, so the driver must preserve any information which
+ * is usually stored here.
+ *
+ * @migrate_page:
+ * After isolation, the VM calls this function with the isolated
+ * @src page. The driver should copy the contents of the
+ * @src page to the @dst page and set up the fields of @dst page.
+ * Both pages are locked.
+ * If page migration is successful, the driver should return 0.
+ * If the driver cannot migrate the page at the moment, it can return
+ * -EAGAIN. The VM interprets this as a temporary migration failure and
+ * will retry it later. Any other error value is a permanent migration
+ * failure and migration will not be retried.
+ * The driver shouldn't touch the @src->lru field while in the
+ * migrate_page() function. It may write to @dst->lru.
+ *
+ * @putback_page:
+ * If migration fails on the isolated page, the VM informs the driver
+ * that the page is no longer a candidate for migration by calling
+ * this function. The driver should put the isolated page back into
+ * its own data structure.
*/
-#define MIGRATEPAGE_SUCCESS 0
-
-enum migrate_reason {
- MR_COMPACTION,
- MR_MEMORY_FAILURE,
- MR_MEMORY_HOTPLUG,
- MR_SYSCALL, /* also applies to cpusets */
- MR_MEMPOLICY_MBIND,
- MR_NUMA_MISPLACED,
- MR_CMA,
- MR_TYPES
+struct movable_operations {
+ bool (*isolate_page)(struct page *, isolate_mode_t);
+ int (*migrate_page)(struct page *dst, struct page *src,
+ enum migrate_mode);
+ void (*putback_page)(struct page *);
};
-/* In mm/debug.c; also keep sync with include/trace/events/migrate.h */
-extern char *migrate_reason_names[MR_TYPES];
-
-static inline struct page *new_page_nodemask(struct page *page,
- int preferred_nid, nodemask_t *nodemask)
-{
- gfp_t gfp_mask = GFP_USER | __GFP_MOVABLE | __GFP_RETRY_MAYFAIL;
-
- if (PageHuge(page))
- return alloc_huge_page_nodemask(page_hstate(compound_head(page)),
- preferred_nid, nodemask);
-
- if (PageHighMem(page) || (zone_idx(page_zone(page)) == ZONE_MOVABLE))
- gfp_mask |= __GFP_HIGHMEM;
-
- return __alloc_pages_nodemask(gfp_mask, 0, preferred_nid, nodemask);
-}
+/* Defined in mm/debug.c: */
+extern const char *migrate_reason_names[MR_TYPES];
#ifdef CONFIG_MIGRATION
-extern void putback_movable_pages(struct list_head *l);
-extern int migrate_page(struct address_space *mapping,
- struct page *newpage, struct page *page,
- enum migrate_mode mode);
-extern int migrate_pages(struct list_head *l, new_page_t new, free_page_t free,
- unsigned long private, enum migrate_mode mode, int reason);
-extern int isolate_movable_page(struct page *page, isolate_mode_t mode);
-extern void putback_movable_page(struct page *page);
-
-extern int migrate_prep(void);
-extern int migrate_prep_local(void);
-extern void migrate_page_copy(struct page *newpage, struct page *page);
-extern int migrate_huge_page_move_mapping(struct address_space *mapping,
- struct page *newpage, struct page *page);
-extern int migrate_page_move_mapping(struct address_space *mapping,
- struct page *newpage, struct page *page,
- struct buffer_head *head, enum migrate_mode mode,
- int extra_count);
+void putback_movable_pages(struct list_head *l);
+int migrate_folio(struct address_space *mapping, struct folio *dst,
+ struct folio *src, enum migrate_mode mode);
+int migrate_pages(struct list_head *l, new_folio_t new, free_folio_t free,
+ unsigned long private, enum migrate_mode mode, int reason,
+ unsigned int *ret_succeeded);
+struct folio *alloc_migration_target(struct folio *src, unsigned long private);
+bool isolate_movable_ops_page(struct page *page, isolate_mode_t mode);
+bool isolate_folio_to_list(struct folio *folio, struct list_head *list);
+
+int migrate_huge_page_move_mapping(struct address_space *mapping,
+ struct folio *dst, struct folio *src);
+void migration_entry_wait_on_locked(softleaf_t entry, spinlock_t *ptl)
+ __releases(ptl);
+void folio_migrate_flags(struct folio *newfolio, struct folio *folio);
+int folio_migrate_mapping(struct address_space *mapping,
+ struct folio *newfolio, struct folio *folio, int extra_count);
+int set_movable_ops(const struct movable_operations *ops, enum pagetype type);
+
#else
static inline void putback_movable_pages(struct list_head *l) {}
-static inline int migrate_pages(struct list_head *l, new_page_t new,
- free_page_t free, unsigned long private, enum migrate_mode mode,
- int reason)
+static inline int migrate_pages(struct list_head *l, new_folio_t new,
+ free_folio_t free, unsigned long private,
+ enum migrate_mode mode, int reason, unsigned int *ret_succeeded)
{ return -ENOSYS; }
-static inline int isolate_movable_page(struct page *page, isolate_mode_t mode)
- { return -EBUSY; }
-
-static inline int migrate_prep(void) { return -ENOSYS; }
-static inline int migrate_prep_local(void) { return -ENOSYS; }
-
-static inline void migrate_page_copy(struct page *newpage,
- struct page *page) {}
+static inline struct folio *alloc_migration_target(struct folio *src,
+ unsigned long private)
+ { return NULL; }
+static inline bool isolate_movable_ops_page(struct page *page, isolate_mode_t mode)
+ { return false; }
+static inline bool isolate_folio_to_list(struct folio *folio, struct list_head *list)
+ { return false; }
static inline int migrate_huge_page_move_mapping(struct address_space *mapping,
- struct page *newpage, struct page *page)
+ struct folio *dst, struct folio *src)
{
return -ENOSYS;
}
-
-#endif /* CONFIG_MIGRATION */
-
-#ifdef CONFIG_COMPACTION
-extern int PageMovable(struct page *page);
-extern void __SetPageMovable(struct page *page, struct address_space *mapping);
-extern void __ClearPageMovable(struct page *page);
-#else
-static inline int PageMovable(struct page *page) { return 0; };
-static inline void __SetPageMovable(struct page *page,
- struct address_space *mapping)
-{
-}
-static inline void __ClearPageMovable(struct page *page)
+static inline int set_movable_ops(const struct movable_operations *ops, enum pagetype type)
{
+ return -ENOSYS;
}
-#endif
+
+#endif /* CONFIG_MIGRATION */
#ifdef CONFIG_NUMA_BALANCING
-extern bool pmd_trans_migrating(pmd_t pmd);
-extern int migrate_misplaced_page(struct page *page,
- struct vm_area_struct *vma, int node);
+int migrate_misplaced_folio_prepare(struct folio *folio,
+ struct vm_area_struct *vma, int node);
+int migrate_misplaced_folio(struct folio *folio, int node);
#else
-static inline bool pmd_trans_migrating(pmd_t pmd)
+static inline int migrate_misplaced_folio_prepare(struct folio *folio,
+ struct vm_area_struct *vma, int node)
{
- return false;
+ return -EAGAIN; /* can't migrate now */
}
-static inline int migrate_misplaced_page(struct page *page,
- struct vm_area_struct *vma, int node)
+static inline int migrate_misplaced_folio(struct folio *folio, int node)
{
return -EAGAIN; /* can't migrate now */
}
#endif /* CONFIG_NUMA_BALANCING */
-#if defined(CONFIG_NUMA_BALANCING) && defined(CONFIG_TRANSPARENT_HUGEPAGE)
-extern int migrate_misplaced_transhuge_page(struct mm_struct *mm,
- struct vm_area_struct *vma,
- pmd_t *pmd, pmd_t entry,
- unsigned long address,
- struct page *page, int node);
-#else
-static inline int migrate_misplaced_transhuge_page(struct mm_struct *mm,
- struct vm_area_struct *vma,
- pmd_t *pmd, pmd_t entry,
- unsigned long address,
- struct page *page, int node)
+#ifdef CONFIG_MIGRATION
+
+/*
+ * Watch out for PAE architecture, which has an unsigned long, and might not
+ * have enough bits to store all physical address and flags. So far we have
+ * enough room for all our flags.
+ */
+#define MIGRATE_PFN_VALID (1UL << 0)
+#define MIGRATE_PFN_MIGRATE (1UL << 1)
+#define MIGRATE_PFN_WRITE (1UL << 3)
+#define MIGRATE_PFN_COMPOUND (1UL << 4)
+#define MIGRATE_PFN_SHIFT 6
+
+static inline struct page *migrate_pfn_to_page(unsigned long mpfn)
+{
+ if (!(mpfn & MIGRATE_PFN_VALID))
+ return NULL;
+ return pfn_to_page(mpfn >> MIGRATE_PFN_SHIFT);
+}
+
+static inline unsigned long migrate_pfn(unsigned long pfn)
{
- return -EAGAIN;
+ return (pfn << MIGRATE_PFN_SHIFT) | MIGRATE_PFN_VALID;
}
-#endif /* CONFIG_NUMA_BALANCING && CONFIG_TRANSPARENT_HUGEPAGE*/
+
+enum migrate_vma_direction {
+ MIGRATE_VMA_SELECT_SYSTEM = 1 << 0,
+ MIGRATE_VMA_SELECT_DEVICE_PRIVATE = 1 << 1,
+ MIGRATE_VMA_SELECT_DEVICE_COHERENT = 1 << 2,
+ MIGRATE_VMA_SELECT_COMPOUND = 1 << 3,
+};
+
+struct migrate_vma {
+ struct vm_area_struct *vma;
+ /*
+ * Both src and dst array must be big enough for
+ * (end - start) >> PAGE_SHIFT entries.
+ *
+ * The src array must not be modified by the caller after
+ * migrate_vma_setup(), and must not change the dst array after
+ * migrate_vma_pages() returns.
+ */
+ unsigned long *dst;
+ unsigned long *src;
+ unsigned long cpages;
+ unsigned long npages;
+ unsigned long start;
+ unsigned long end;
+
+ /*
+ * Set to the owner value also stored in page_pgmap(page)->owner
+ * for migrating out of device private memory. The flags also need to
+ * be set to MIGRATE_VMA_SELECT_DEVICE_PRIVATE.
+ * The caller should always set this field when using mmu notifier
+ * callbacks to avoid device MMU invalidations for device private
+ * pages that are not being migrated.
+ */
+ void *pgmap_owner;
+ unsigned long flags;
+
+ /*
+ * Set to vmf->page if this is being called to migrate a page as part of
+ * a migrate_to_ram() callback.
+ */
+ struct page *fault_page;
+};
+
+int migrate_vma_setup(struct migrate_vma *args);
+void migrate_vma_pages(struct migrate_vma *migrate);
+void migrate_vma_finalize(struct migrate_vma *migrate);
+int migrate_device_range(unsigned long *src_pfns, unsigned long start,
+ unsigned long npages);
+int migrate_device_pfns(unsigned long *src_pfns, unsigned long npages);
+void migrate_device_pages(unsigned long *src_pfns, unsigned long *dst_pfns,
+ unsigned long npages);
+void migrate_device_finalize(unsigned long *src_pfns,
+ unsigned long *dst_pfns, unsigned long npages);
+
+#endif /* CONFIG_MIGRATION */
#endif /* _LINUX_MIGRATE_H */
diff --git a/include/linux/migrate_mode.h b/include/linux/migrate_mode.h
index ebf3d89a3919..265c4328b36a 100644
--- a/include/linux/migrate_mode.h
+++ b/include/linux/migrate_mode.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef MIGRATE_MODE_H_INCLUDED
#define MIGRATE_MODE_H_INCLUDED
/*
@@ -13,4 +14,18 @@ enum migrate_mode {
MIGRATE_SYNC,
};
+enum migrate_reason {
+ MR_COMPACTION,
+ MR_MEMORY_FAILURE,
+ MR_MEMORY_HOTPLUG,
+ MR_SYSCALL, /* also applies to cpusets */
+ MR_MEMPOLICY_MBIND,
+ MR_NUMA_MISPLACED,
+ MR_CONTIG_RANGE,
+ MR_LONGTERM_PIN,
+ MR_DEMOTION,
+ MR_DAMON,
+ MR_TYPES
+};
+
#endif /* MIGRATE_MODE_H_INCLUDED */
diff --git a/include/linux/mii.h b/include/linux/mii.h
index e870bfa6abfe..b8f26d4513c3 100644
--- a/include/linux/mii.h
+++ b/include/linux/mii.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* linux/mii.h: definitions for MII-compatible transceivers
* Originally drivers/net/sunhme.h.
@@ -9,6 +10,7 @@
#include <linux/if.h>
+#include <linux/linkmode.h>
#include <uapi/linux/mii.h>
struct ethtool_cmd;
@@ -30,7 +32,7 @@ struct mii_if_info {
extern int mii_link_ok (struct mii_if_info *mii);
extern int mii_nway_restart (struct mii_if_info *mii);
-extern int mii_ethtool_gset(struct mii_if_info *mii, struct ethtool_cmd *ecmd);
+extern void mii_ethtool_gset(struct mii_if_info *mii, struct ethtool_cmd *ecmd);
extern void mii_ethtool_get_link_ksettings(
struct mii_if_info *mii, struct ethtool_link_ksettings *cmd);
extern int mii_ethtool_sset(struct mii_if_info *mii, struct ethtool_cmd *ecmd);
@@ -131,6 +133,34 @@ static inline u32 ethtool_adv_to_mii_adv_t(u32 ethadv)
}
/**
+ * linkmode_adv_to_mii_adv_t
+ * @advertising: the linkmode advertisement settings
+ *
+ * A small helper function that translates linkmode advertisement
+ * settings to phy autonegotiation advertisements for the
+ * MII_ADVERTISE register.
+ */
+static inline u32 linkmode_adv_to_mii_adv_t(const unsigned long *advertising)
+{
+ u32 result = 0;
+
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, advertising))
+ result |= ADVERTISE_10HALF;
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, advertising))
+ result |= ADVERTISE_10FULL;
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT, advertising))
+ result |= ADVERTISE_100HALF;
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT, advertising))
+ result |= ADVERTISE_100FULL;
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_Pause_BIT, advertising))
+ result |= ADVERTISE_PAUSE_CAP;
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, advertising))
+ result |= ADVERTISE_PAUSE_ASYM;
+
+ return result;
+}
+
+/**
* mii_adv_to_ethtool_adv_t
* @adv: value of the MII_ADVERTISE register
*
@@ -178,6 +208,29 @@ static inline u32 ethtool_adv_to_mii_ctrl1000_t(u32 ethadv)
}
/**
+ * linkmode_adv_to_mii_ctrl1000_t
+ * @advertising: the linkmode advertisement settings
+ *
+ * A small helper function that translates linkmode advertisement
+ * settings to phy autonegotiation advertisements for the
+ * MII_CTRL1000 register when in 1000T mode.
+ */
+static inline u32
+linkmode_adv_to_mii_ctrl1000_t(const unsigned long *advertising)
+{
+ u32 result = 0;
+
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_1000baseT_Half_BIT,
+ advertising))
+ result |= ADVERTISE_1000HALF;
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
+ advertising))
+ result |= ADVERTISE_1000FULL;
+
+ return result;
+}
+
+/**
* mii_ctrl1000_to_ethtool_adv_t
* @adv: value of the MII_CTRL1000 register
*
@@ -236,6 +289,25 @@ static inline u32 mii_stat1000_to_ethtool_lpa_t(u32 lpa)
}
/**
+ * mii_stat1000_mod_linkmode_lpa_t
+ * @advertising: target the linkmode advertisement settings
+ * @adv: value of the MII_STAT1000 register
+ *
+ * A small helper function that translates MII_STAT1000 bits, when in
+ * 1000Base-T mode, to linkmode advertisement settings. Other bits in
+ * advertising are not changes.
+ */
+static inline void mii_stat1000_mod_linkmode_lpa_t(unsigned long *advertising,
+ u32 lpa)
+{
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_1000baseT_Half_BIT,
+ advertising, lpa & LPA_1000HALF);
+
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
+ advertising, lpa & LPA_1000FULL);
+}
+
+/**
* ethtool_adv_to_mii_adv_x
* @ethadv: the ethtool advertisement settings
*
@@ -284,21 +356,155 @@ static inline u32 mii_adv_to_ethtool_adv_x(u32 adv)
}
/**
- * mii_lpa_to_ethtool_lpa_x
+ * mii_adv_mod_linkmode_adv_t
+ * @advertising:pointer to destination link mode.
+ * @adv: value of the MII_ADVERTISE register
+ *
+ * A small helper function that translates MII_ADVERTISE bits to
+ * linkmode advertisement settings. Leaves other bits unchanged.
+ */
+static inline void mii_adv_mod_linkmode_adv_t(unsigned long *advertising,
+ u32 adv)
+{
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT,
+ advertising, adv & ADVERTISE_10HALF);
+
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT,
+ advertising, adv & ADVERTISE_10FULL);
+
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT,
+ advertising, adv & ADVERTISE_100HALF);
+
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
+ advertising, adv & ADVERTISE_100FULL);
+
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_Pause_BIT, advertising,
+ adv & ADVERTISE_PAUSE_CAP);
+
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT,
+ advertising, adv & ADVERTISE_PAUSE_ASYM);
+}
+
+/**
+ * mii_adv_to_linkmode_adv_t
+ * @advertising:pointer to destination link mode.
+ * @adv: value of the MII_ADVERTISE register
+ *
+ * A small helper function that translates MII_ADVERTISE bits
+ * to linkmode advertisement settings. Clears the old value
+ * of advertising.
+ */
+static inline void mii_adv_to_linkmode_adv_t(unsigned long *advertising,
+ u32 adv)
+{
+ linkmode_zero(advertising);
+
+ mii_adv_mod_linkmode_adv_t(advertising, adv);
+}
+
+/**
+ * mii_lpa_to_linkmode_lpa_t
* @adv: value of the MII_LPA register
*
- * A small helper function that translates MII_LPA
- * bits, when in 1000Base-X mode, to ethtool
- * LP advertisement settings.
+ * A small helper function that translates MII_LPA bits, when in
+ * 1000Base-T mode, to linkmode LP advertisement settings. Clears the
+ * old value of advertising
*/
-static inline u32 mii_lpa_to_ethtool_lpa_x(u32 lpa)
+static inline void mii_lpa_to_linkmode_lpa_t(unsigned long *lp_advertising,
+ u32 lpa)
{
- u32 result = 0;
+ mii_adv_to_linkmode_adv_t(lp_advertising, lpa);
if (lpa & LPA_LPACK)
- result |= ADVERTISED_Autoneg;
+ linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
+ lp_advertising);
+
+}
+
+/**
+ * mii_lpa_mod_linkmode_lpa_t
+ * @adv: value of the MII_LPA register
+ *
+ * A small helper function that translates MII_LPA bits, when in
+ * 1000Base-T mode, to linkmode LP advertisement settings. Leaves
+ * other bits unchanged.
+ */
+static inline void mii_lpa_mod_linkmode_lpa_t(unsigned long *lp_advertising,
+ u32 lpa)
+{
+ mii_adv_mod_linkmode_adv_t(lp_advertising, lpa);
+
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
+ lp_advertising, lpa & LPA_LPACK);
+}
+
+static inline void mii_ctrl1000_mod_linkmode_adv_t(unsigned long *advertising,
+ u32 ctrl1000)
+{
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_1000baseT_Half_BIT, advertising,
+ ctrl1000 & ADVERTISE_1000HALF);
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT, advertising,
+ ctrl1000 & ADVERTISE_1000FULL);
+}
+
+/**
+ * linkmode_adv_to_lcl_adv_t
+ * @advertising:pointer to linkmode advertising
+ *
+ * A small helper function that translates linkmode advertising to LVL
+ * pause capabilities.
+ */
+static inline u32 linkmode_adv_to_lcl_adv_t(const unsigned long *advertising)
+{
+ u32 lcl_adv = 0;
+
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_Pause_BIT,
+ advertising))
+ lcl_adv |= ADVERTISE_PAUSE_CAP;
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT,
+ advertising))
+ lcl_adv |= ADVERTISE_PAUSE_ASYM;
- return result | mii_adv_to_ethtool_adv_x(lpa);
+ return lcl_adv;
+}
+
+/**
+ * mii_lpa_mod_linkmode_x - decode the link partner's config_reg to linkmodes
+ * @linkmodes: link modes array
+ * @lpa: config_reg word from link partner
+ * @fd_bit: link mode for 1000XFULL bit
+ */
+static inline void mii_lpa_mod_linkmode_x(unsigned long *linkmodes, u16 lpa,
+ int fd_bit)
+{
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, linkmodes,
+ lpa & LPA_LPACK);
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_Pause_BIT, linkmodes,
+ lpa & LPA_1000XPAUSE);
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, linkmodes,
+ lpa & LPA_1000XPAUSE_ASYM);
+ linkmode_mod_bit(fd_bit, linkmodes,
+ lpa & LPA_1000XFULL);
+}
+
+/**
+ * linkmode_adv_to_mii_adv_x - encode a linkmode to config_reg
+ * @linkmodes: linkmodes
+ * @fd_bit: full duplex bit
+ */
+static inline u16 linkmode_adv_to_mii_adv_x(const unsigned long *linkmodes,
+ int fd_bit)
+{
+ u16 adv = 0;
+
+ if (linkmode_test_bit(fd_bit, linkmodes))
+ adv |= ADVERTISE_1000XFULL;
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_Pause_BIT, linkmodes))
+ adv |= ADVERTISE_1000XPAUSE;
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, linkmodes))
+ adv |= ADVERTISE_1000XPSE_ASYM;
+
+ return adv;
}
/**
@@ -340,4 +546,39 @@ static inline u8 mii_resolve_flowctrl_fdx(u16 lcladv, u16 rmtadv)
return cap;
}
+/**
+ * mii_bmcr_encode_fixed - encode fixed speed/duplex settings to a BMCR value
+ * @speed: a SPEED_* value
+ * @duplex: a DUPLEX_* value
+ *
+ * Encode the speed and duplex to a BMCR value. 2500, 1000, 100 and 10 Mbps are
+ * supported. 2500Mbps is encoded to 1000Mbps. Other speeds are encoded as 10
+ * Mbps. Unknown duplex values are encoded to half-duplex.
+ */
+static inline u16 mii_bmcr_encode_fixed(int speed, int duplex)
+{
+ u16 bmcr;
+
+ switch (speed) {
+ case SPEED_2500:
+ case SPEED_1000:
+ bmcr = BMCR_SPEED1000;
+ break;
+
+ case SPEED_100:
+ bmcr = BMCR_SPEED100;
+ break;
+
+ case SPEED_10:
+ default:
+ bmcr = BMCR_SPEED10;
+ break;
+ }
+
+ if (duplex == DUPLEX_FULL)
+ bmcr |= BMCR_FULLDPLX;
+
+ return bmcr;
+}
+
#endif /* __LINUX_MII_H__ */
diff --git a/include/linux/mii_timestamper.h b/include/linux/mii_timestamper.h
new file mode 100644
index 000000000000..3102c425c8e0
--- /dev/null
+++ b/include/linux/mii_timestamper.h
@@ -0,0 +1,128 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for generic time stamping devices on MII buses.
+ * Copyright (C) 2018 Richard Cochran <richardcochran@gmail.com>
+ */
+#ifndef _LINUX_MII_TIMESTAMPER_H
+#define _LINUX_MII_TIMESTAMPER_H
+
+#include <linux/device.h>
+#include <linux/ethtool.h>
+#include <linux/skbuff.h>
+#include <linux/net_tstamp.h>
+
+struct phy_device;
+
+/**
+ * struct mii_timestamper - Callback interface to MII time stamping devices.
+ *
+ * @rxtstamp: Requests a Rx timestamp for 'skb'. If the skb is accepted,
+ * the MII time stamping device promises to deliver it using
+ * netif_rx() as soon as a timestamp becomes available. One of
+ * the PTP_CLASS_ values is passed in 'type'. The function
+ * must return true if the skb is accepted for delivery.
+ *
+ * @txtstamp: Requests a Tx timestamp for 'skb'. The MII time stamping
+ * device promises to deliver it using skb_complete_tx_timestamp()
+ * as soon as a timestamp becomes available. One of the PTP_CLASS_
+ * values is passed in 'type'.
+ *
+ * @hwtstamp_set: Handles SIOCSHWTSTAMP ioctl for hardware time stamping.
+ *
+ * @hwtstamp_get: Handles SIOCGHWTSTAMP ioctl for hardware time stamping.
+ *
+ * @link_state: Allows the device to respond to changes in the link
+ * state. The caller invokes this function while holding
+ * the phy_device mutex.
+ *
+ * @ts_info: Handles ethtool queries for hardware time stamping.
+ * @device: Remembers the device to which the instance belongs.
+ *
+ * Drivers for PHY time stamping devices should embed their
+ * mii_timestamper within a private structure, obtaining a reference
+ * to it using container_of().
+ *
+ * Drivers for non-PHY time stamping devices should return a pointer
+ * to a mii_timestamper from the probe_channel() callback of their
+ * mii_timestamping_ctrl interface.
+ */
+struct mii_timestamper {
+ bool (*rxtstamp)(struct mii_timestamper *mii_ts,
+ struct sk_buff *skb, int type);
+
+ void (*txtstamp)(struct mii_timestamper *mii_ts,
+ struct sk_buff *skb, int type);
+
+ int (*hwtstamp_set)(struct mii_timestamper *mii_ts,
+ struct kernel_hwtstamp_config *kernel_config,
+ struct netlink_ext_ack *extack);
+
+ int (*hwtstamp_get)(struct mii_timestamper *mii_ts,
+ struct kernel_hwtstamp_config *kernel_config);
+
+ void (*link_state)(struct mii_timestamper *mii_ts,
+ struct phy_device *phydev);
+
+ int (*ts_info)(struct mii_timestamper *mii_ts,
+ struct kernel_ethtool_ts_info *ts_info);
+
+ struct device *device;
+};
+
+/**
+ * struct mii_timestamping_ctrl - MII time stamping controller interface.
+ *
+ * @probe_channel: Callback into the controller driver announcing the
+ * presence of the 'port' channel. The 'device' field
+ * had been passed to register_mii_tstamp_controller().
+ * The driver must return either a pointer to a valid
+ * MII timestamper instance or PTR_ERR.
+ *
+ * @release_channel: Releases an instance obtained via .probe_channel.
+ */
+struct mii_timestamping_ctrl {
+ struct mii_timestamper *(*probe_channel)(struct device *device,
+ unsigned int port);
+ void (*release_channel)(struct device *device,
+ struct mii_timestamper *mii_ts);
+};
+
+#ifdef CONFIG_NETWORK_PHY_TIMESTAMPING
+
+int register_mii_tstamp_controller(struct device *device,
+ struct mii_timestamping_ctrl *ctrl);
+
+void unregister_mii_tstamp_controller(struct device *device);
+
+struct mii_timestamper *register_mii_timestamper(struct device_node *node,
+ unsigned int port);
+
+void unregister_mii_timestamper(struct mii_timestamper *mii_ts);
+
+#else
+
+static inline
+int register_mii_tstamp_controller(struct device *device,
+ struct mii_timestamping_ctrl *ctrl)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline void unregister_mii_tstamp_controller(struct device *device)
+{
+}
+
+static inline
+struct mii_timestamper *register_mii_timestamper(struct device_node *node,
+ unsigned int port)
+{
+ return NULL;
+}
+
+static inline void unregister_mii_timestamper(struct mii_timestamper *mii_ts)
+{
+}
+
+#endif
+
+#endif
diff --git a/include/linux/min_heap.h b/include/linux/min_heap.h
new file mode 100644
index 000000000000..79ddc0adbf2b
--- /dev/null
+++ b/include/linux/min_heap.h
@@ -0,0 +1,477 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_MIN_HEAP_H
+#define _LINUX_MIN_HEAP_H
+
+#include <linux/bug.h>
+#include <linux/string.h>
+#include <linux/types.h>
+
+/*
+ * The Min Heap API provides utilities for managing min-heaps, a binary tree
+ * structure where each node's value is less than or equal to its children's
+ * values, ensuring the smallest element is at the root.
+ *
+ * Users should avoid directly calling functions prefixed with __min_heap_*().
+ * Instead, use the provided macro wrappers.
+ *
+ * For further details and examples, refer to Documentation/core-api/min_heap.rst.
+ */
+
+/**
+ * Data structure to hold a min-heap.
+ * @nr: Number of elements currently in the heap.
+ * @size: Maximum number of elements that can be held in current storage.
+ * @data: Pointer to the start of array holding the heap elements.
+ * @preallocated: Start of the static preallocated array holding the heap elements.
+ */
+#define MIN_HEAP_PREALLOCATED(_type, _name, _nr) \
+struct _name { \
+ size_t nr; \
+ size_t size; \
+ _type *data; \
+ _type preallocated[_nr]; \
+}
+
+#define DEFINE_MIN_HEAP(_type, _name) MIN_HEAP_PREALLOCATED(_type, _name, 0)
+
+typedef DEFINE_MIN_HEAP(char, min_heap_char) min_heap_char;
+
+#define __minheap_cast(_heap) (typeof((_heap)->data[0]) *)
+#define __minheap_obj_size(_heap) sizeof((_heap)->data[0])
+
+/**
+ * struct min_heap_callbacks - Data/functions to customise the min_heap.
+ * @less: Partial order function for this heap.
+ * @swp: Swap elements function.
+ */
+struct min_heap_callbacks {
+ bool (*less)(const void *lhs, const void *rhs, void *args);
+ void (*swp)(void *lhs, void *rhs, void *args);
+};
+
+/**
+ * is_aligned - is this pointer & size okay for word-wide copying?
+ * @base: pointer to data
+ * @size: size of each element
+ * @align: required alignment (typically 4 or 8)
+ *
+ * Returns true if elements can be copied using word loads and stores.
+ * The size must be a multiple of the alignment, and the base address must
+ * be if we do not have CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS.
+ *
+ * For some reason, gcc doesn't know to optimize "if (a & mask || b & mask)"
+ * to "if ((a | b) & mask)", so we do that by hand.
+ */
+__attribute_const__ __always_inline
+static bool is_aligned(const void *base, size_t size, unsigned char align)
+{
+ unsigned char lsbits = (unsigned char)size;
+
+ (void)base;
+#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
+ lsbits |= (unsigned char)(uintptr_t)base;
+#endif
+ return (lsbits & (align - 1)) == 0;
+}
+
+/**
+ * swap_words_32 - swap two elements in 32-bit chunks
+ * @a: pointer to the first element to swap
+ * @b: pointer to the second element to swap
+ * @n: element size (must be a multiple of 4)
+ *
+ * Exchange the two objects in memory. This exploits base+index addressing,
+ * which basically all CPUs have, to minimize loop overhead computations.
+ *
+ * For some reason, on x86 gcc 7.3.0 adds a redundant test of n at the
+ * bottom of the loop, even though the zero flag is still valid from the
+ * subtract (since the intervening mov instructions don't alter the flags).
+ * Gcc 8.1.0 doesn't have that problem.
+ */
+static __always_inline
+void swap_words_32(void *a, void *b, size_t n)
+{
+ do {
+ u32 t = *(u32 *)(a + (n -= 4));
+ *(u32 *)(a + n) = *(u32 *)(b + n);
+ *(u32 *)(b + n) = t;
+ } while (n);
+}
+
+/**
+ * swap_words_64 - swap two elements in 64-bit chunks
+ * @a: pointer to the first element to swap
+ * @b: pointer to the second element to swap
+ * @n: element size (must be a multiple of 8)
+ *
+ * Exchange the two objects in memory. This exploits base+index
+ * addressing, which basically all CPUs have, to minimize loop overhead
+ * computations.
+ *
+ * We'd like to use 64-bit loads if possible. If they're not, emulating
+ * one requires base+index+4 addressing which x86 has but most other
+ * processors do not. If CONFIG_64BIT, we definitely have 64-bit loads,
+ * but it's possible to have 64-bit loads without 64-bit pointers (e.g.
+ * x32 ABI). Are there any cases the kernel needs to worry about?
+ */
+static __always_inline
+void swap_words_64(void *a, void *b, size_t n)
+{
+ do {
+#ifdef CONFIG_64BIT
+ u64 t = *(u64 *)(a + (n -= 8));
+ *(u64 *)(a + n) = *(u64 *)(b + n);
+ *(u64 *)(b + n) = t;
+#else
+ /* Use two 32-bit transfers to avoid base+index+4 addressing */
+ u32 t = *(u32 *)(a + (n -= 4));
+ *(u32 *)(a + n) = *(u32 *)(b + n);
+ *(u32 *)(b + n) = t;
+
+ t = *(u32 *)(a + (n -= 4));
+ *(u32 *)(a + n) = *(u32 *)(b + n);
+ *(u32 *)(b + n) = t;
+#endif
+ } while (n);
+}
+
+/**
+ * swap_bytes - swap two elements a byte at a time
+ * @a: pointer to the first element to swap
+ * @b: pointer to the second element to swap
+ * @n: element size
+ *
+ * This is the fallback if alignment doesn't allow using larger chunks.
+ */
+static __always_inline
+void swap_bytes(void *a, void *b, size_t n)
+{
+ do {
+ char t = ((char *)a)[--n];
+ ((char *)a)[n] = ((char *)b)[n];
+ ((char *)b)[n] = t;
+ } while (n);
+}
+
+/*
+ * The values are arbitrary as long as they can't be confused with
+ * a pointer, but small integers make for the smallest compare
+ * instructions.
+ */
+#define SWAP_WORDS_64 ((void (*)(void *, void *, void *))0)
+#define SWAP_WORDS_32 ((void (*)(void *, void *, void *))1)
+#define SWAP_BYTES ((void (*)(void *, void *, void *))2)
+
+/*
+ * Selects the appropriate swap function based on the element size.
+ */
+static __always_inline
+void *select_swap_func(const void *base, size_t size)
+{
+ if (is_aligned(base, size, 8))
+ return SWAP_WORDS_64;
+ else if (is_aligned(base, size, 4))
+ return SWAP_WORDS_32;
+ else
+ return SWAP_BYTES;
+}
+
+static __always_inline
+void do_swap(void *a, void *b, size_t size, void (*swap_func)(void *lhs, void *rhs, void *args),
+ void *priv)
+{
+ if (swap_func == SWAP_WORDS_64)
+ swap_words_64(a, b, size);
+ else if (swap_func == SWAP_WORDS_32)
+ swap_words_32(a, b, size);
+ else if (swap_func == SWAP_BYTES)
+ swap_bytes(a, b, size);
+ else
+ swap_func(a, b, priv);
+}
+
+/**
+ * parent - given the offset of the child, find the offset of the parent.
+ * @i: the offset of the heap element whose parent is sought. Non-zero.
+ * @lsbit: a precomputed 1-bit mask, equal to "size & -size"
+ * @size: size of each element
+ *
+ * In terms of array indexes, the parent of element j = @i/@size is simply
+ * (j-1)/2. But when working in byte offsets, we can't use implicit
+ * truncation of integer divides.
+ *
+ * Fortunately, we only need one bit of the quotient, not the full divide.
+ * @size has a least significant bit. That bit will be clear if @i is
+ * an even multiple of @size, and set if it's an odd multiple.
+ *
+ * Logically, we're doing "if (i & lsbit) i -= size;", but since the
+ * branch is unpredictable, it's done with a bit of clever branch-free
+ * code instead.
+ */
+__attribute_const__ __always_inline
+static size_t parent(size_t i, unsigned int lsbit, size_t size)
+{
+ i -= size;
+ i -= size & -(i & lsbit);
+ return i / 2;
+}
+
+/* Initialize a min-heap. */
+static __always_inline
+void __min_heap_init_inline(min_heap_char *heap, void *data, size_t size)
+{
+ heap->nr = 0;
+ heap->size = size;
+ if (data)
+ heap->data = data;
+ else
+ heap->data = heap->preallocated;
+}
+
+#define min_heap_init_inline(_heap, _data, _size) \
+ __min_heap_init_inline(container_of(&(_heap)->nr, min_heap_char, nr), _data, _size)
+
+/* Get the minimum element from the heap. */
+static __always_inline
+void *__min_heap_peek_inline(struct min_heap_char *heap)
+{
+ return heap->nr ? heap->data : NULL;
+}
+
+#define min_heap_peek_inline(_heap) \
+ (__minheap_cast(_heap) \
+ __min_heap_peek_inline(container_of(&(_heap)->nr, min_heap_char, nr)))
+
+/* Check if the heap is full. */
+static __always_inline
+bool __min_heap_full_inline(min_heap_char *heap)
+{
+ return heap->nr == heap->size;
+}
+
+#define min_heap_full_inline(_heap) \
+ __min_heap_full_inline(container_of(&(_heap)->nr, min_heap_char, nr))
+
+/* Sift the element at pos down the heap. */
+static __always_inline
+void __min_heap_sift_down_inline(min_heap_char *heap, size_t pos, size_t elem_size,
+ const struct min_heap_callbacks *func, void *args)
+{
+ const unsigned long lsbit = elem_size & -elem_size;
+ void *data = heap->data;
+ void (*swp)(void *lhs, void *rhs, void *args) = func->swp;
+ /* pre-scale counters for performance */
+ size_t a = pos * elem_size;
+ size_t b, c, d;
+ size_t n = heap->nr * elem_size;
+
+ if (!swp)
+ swp = select_swap_func(data, elem_size);
+
+ /* Find the sift-down path all the way to the leaves. */
+ for (b = a; c = 2 * b + elem_size, (d = c + elem_size) < n;)
+ b = func->less(data + c, data + d, args) ? c : d;
+
+ /* Special case for the last leaf with no sibling. */
+ if (d == n)
+ b = c;
+
+ /* Backtrack to the correct location. */
+ while (b != a && func->less(data + a, data + b, args))
+ b = parent(b, lsbit, elem_size);
+
+ /* Shift the element into its correct place. */
+ c = b;
+ while (b != a) {
+ b = parent(b, lsbit, elem_size);
+ do_swap(data + b, data + c, elem_size, swp, args);
+ }
+}
+
+#define min_heap_sift_down_inline(_heap, _pos, _func, _args) \
+ __min_heap_sift_down_inline(container_of(&(_heap)->nr, min_heap_char, nr), _pos, \
+ __minheap_obj_size(_heap), _func, _args)
+
+/* Sift up ith element from the heap, O(log2(nr)). */
+static __always_inline
+void __min_heap_sift_up_inline(min_heap_char *heap, size_t elem_size, size_t idx,
+ const struct min_heap_callbacks *func, void *args)
+{
+ const unsigned long lsbit = elem_size & -elem_size;
+ void *data = heap->data;
+ void (*swp)(void *lhs, void *rhs, void *args) = func->swp;
+ /* pre-scale counters for performance */
+ size_t a = idx * elem_size, b;
+
+ if (!swp)
+ swp = select_swap_func(data, elem_size);
+
+ while (a) {
+ b = parent(a, lsbit, elem_size);
+ if (func->less(data + b, data + a, args))
+ break;
+ do_swap(data + a, data + b, elem_size, swp, args);
+ a = b;
+ }
+}
+
+#define min_heap_sift_up_inline(_heap, _idx, _func, _args) \
+ __min_heap_sift_up_inline(container_of(&(_heap)->nr, min_heap_char, nr), \
+ __minheap_obj_size(_heap), _idx, _func, _args)
+
+/* Floyd's approach to heapification that is O(nr). */
+static __always_inline
+void __min_heapify_all_inline(min_heap_char *heap, size_t elem_size,
+ const struct min_heap_callbacks *func, void *args)
+{
+ ssize_t i;
+
+ for (i = heap->nr / 2 - 1; i >= 0; i--)
+ __min_heap_sift_down_inline(heap, i, elem_size, func, args);
+}
+
+#define min_heapify_all_inline(_heap, _func, _args) \
+ __min_heapify_all_inline(container_of(&(_heap)->nr, min_heap_char, nr), \
+ __minheap_obj_size(_heap), _func, _args)
+
+/* Remove minimum element from the heap, O(log2(nr)). */
+static __always_inline
+bool __min_heap_pop_inline(min_heap_char *heap, size_t elem_size,
+ const struct min_heap_callbacks *func, void *args)
+{
+ void *data = heap->data;
+
+ if (WARN_ONCE(heap->nr <= 0, "Popping an empty heap"))
+ return false;
+
+ /* Place last element at the root (position 0) and then sift down. */
+ heap->nr--;
+ memcpy(data, data + (heap->nr * elem_size), elem_size);
+ __min_heap_sift_down_inline(heap, 0, elem_size, func, args);
+
+ return true;
+}
+
+#define min_heap_pop_inline(_heap, _func, _args) \
+ __min_heap_pop_inline(container_of(&(_heap)->nr, min_heap_char, nr), \
+ __minheap_obj_size(_heap), _func, _args)
+
+/*
+ * Remove the minimum element and then push the given element. The
+ * implementation performs 1 sift (O(log2(nr))) and is therefore more
+ * efficient than a pop followed by a push that does 2.
+ */
+static __always_inline
+void __min_heap_pop_push_inline(min_heap_char *heap, const void *element, size_t elem_size,
+ const struct min_heap_callbacks *func, void *args)
+{
+ memcpy(heap->data, element, elem_size);
+ __min_heap_sift_down_inline(heap, 0, elem_size, func, args);
+}
+
+#define min_heap_pop_push_inline(_heap, _element, _func, _args) \
+ __min_heap_pop_push_inline(container_of(&(_heap)->nr, min_heap_char, nr), _element, \
+ __minheap_obj_size(_heap), _func, _args)
+
+/* Push an element on to the heap, O(log2(nr)). */
+static __always_inline
+bool __min_heap_push_inline(min_heap_char *heap, const void *element, size_t elem_size,
+ const struct min_heap_callbacks *func, void *args)
+{
+ void *data = heap->data;
+ size_t pos;
+
+ if (WARN_ONCE(heap->nr >= heap->size, "Pushing on a full heap"))
+ return false;
+
+ /* Place at the end of data. */
+ pos = heap->nr;
+ memcpy(data + (pos * elem_size), element, elem_size);
+ heap->nr++;
+
+ /* Sift child at pos up. */
+ __min_heap_sift_up_inline(heap, elem_size, pos, func, args);
+
+ return true;
+}
+
+#define min_heap_push_inline(_heap, _element, _func, _args) \
+ __min_heap_push_inline(container_of(&(_heap)->nr, min_heap_char, nr), _element, \
+ __minheap_obj_size(_heap), _func, _args)
+
+/* Remove ith element from the heap, O(log2(nr)). */
+static __always_inline
+bool __min_heap_del_inline(min_heap_char *heap, size_t elem_size, size_t idx,
+ const struct min_heap_callbacks *func, void *args)
+{
+ void *data = heap->data;
+ void (*swp)(void *lhs, void *rhs, void *args) = func->swp;
+
+ if (WARN_ONCE(heap->nr <= 0, "Popping an empty heap"))
+ return false;
+
+ if (!swp)
+ swp = select_swap_func(data, elem_size);
+
+ /* Place last element at the root (position 0) and then sift down. */
+ heap->nr--;
+ if (idx == heap->nr)
+ return true;
+ do_swap(data + (idx * elem_size), data + (heap->nr * elem_size), elem_size, swp, args);
+ __min_heap_sift_up_inline(heap, elem_size, idx, func, args);
+ __min_heap_sift_down_inline(heap, idx, elem_size, func, args);
+
+ return true;
+}
+
+#define min_heap_del_inline(_heap, _idx, _func, _args) \
+ __min_heap_del_inline(container_of(&(_heap)->nr, min_heap_char, nr), \
+ __minheap_obj_size(_heap), _idx, _func, _args)
+
+void __min_heap_init(min_heap_char *heap, void *data, size_t size);
+void *__min_heap_peek(struct min_heap_char *heap);
+bool __min_heap_full(min_heap_char *heap);
+void __min_heap_sift_down(min_heap_char *heap, size_t pos, size_t elem_size,
+ const struct min_heap_callbacks *func, void *args);
+void __min_heap_sift_up(min_heap_char *heap, size_t elem_size, size_t idx,
+ const struct min_heap_callbacks *func, void *args);
+void __min_heapify_all(min_heap_char *heap, size_t elem_size,
+ const struct min_heap_callbacks *func, void *args);
+bool __min_heap_pop(min_heap_char *heap, size_t elem_size,
+ const struct min_heap_callbacks *func, void *args);
+void __min_heap_pop_push(min_heap_char *heap, const void *element, size_t elem_size,
+ const struct min_heap_callbacks *func, void *args);
+bool __min_heap_push(min_heap_char *heap, const void *element, size_t elem_size,
+ const struct min_heap_callbacks *func, void *args);
+bool __min_heap_del(min_heap_char *heap, size_t elem_size, size_t idx,
+ const struct min_heap_callbacks *func, void *args);
+
+#define min_heap_init(_heap, _data, _size) \
+ __min_heap_init(container_of(&(_heap)->nr, min_heap_char, nr), _data, _size)
+#define min_heap_peek(_heap) \
+ (__minheap_cast(_heap) __min_heap_peek(container_of(&(_heap)->nr, min_heap_char, nr)))
+#define min_heap_full(_heap) \
+ __min_heap_full(container_of(&(_heap)->nr, min_heap_char, nr))
+#define min_heap_sift_down(_heap, _pos, _func, _args) \
+ __min_heap_sift_down(container_of(&(_heap)->nr, min_heap_char, nr), _pos, \
+ __minheap_obj_size(_heap), _func, _args)
+#define min_heap_sift_up(_heap, _idx, _func, _args) \
+ __min_heap_sift_up(container_of(&(_heap)->nr, min_heap_char, nr), \
+ __minheap_obj_size(_heap), _idx, _func, _args)
+#define min_heapify_all(_heap, _func, _args) \
+ __min_heapify_all(container_of(&(_heap)->nr, min_heap_char, nr), \
+ __minheap_obj_size(_heap), _func, _args)
+#define min_heap_pop(_heap, _func, _args) \
+ __min_heap_pop(container_of(&(_heap)->nr, min_heap_char, nr), \
+ __minheap_obj_size(_heap), _func, _args)
+#define min_heap_pop_push(_heap, _element, _func, _args) \
+ __min_heap_pop_push(container_of(&(_heap)->nr, min_heap_char, nr), _element, \
+ __minheap_obj_size(_heap), _func, _args)
+#define min_heap_push(_heap, _element, _func, _args) \
+ __min_heap_push(container_of(&(_heap)->nr, min_heap_char, nr), _element, \
+ __minheap_obj_size(_heap), _func, _args)
+#define min_heap_del(_heap, _idx, _func, _args) \
+ __min_heap_del(container_of(&(_heap)->nr, min_heap_char, nr), \
+ __minheap_obj_size(_heap), _idx, _func, _args)
+
+#endif /* _LINUX_MIN_HEAP_H */
diff --git a/include/linux/minmax.h b/include/linux/minmax.h
new file mode 100644
index 000000000000..a0158db54a04
--- /dev/null
+++ b/include/linux/minmax.h
@@ -0,0 +1,319 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_MINMAX_H
+#define _LINUX_MINMAX_H
+
+#include <linux/build_bug.h>
+#include <linux/compiler.h>
+#include <linux/const.h>
+#include <linux/types.h>
+
+/*
+ * min()/max()/clamp() macros must accomplish several things:
+ *
+ * - Avoid multiple evaluations of the arguments (so side-effects like
+ * "x++" happen only once) when non-constant.
+ * - Perform signed v unsigned type-checking (to generate compile
+ * errors instead of nasty runtime surprises).
+ * - Unsigned char/short are always promoted to signed int and can be
+ * compared against signed or unsigned arguments.
+ * - Unsigned arguments can be compared against non-negative signed constants.
+ * - Comparison of a signed argument against an unsigned constant fails
+ * even if the constant is below __INT_MAX__ and could be cast to int.
+ */
+#define __typecheck(x, y) \
+ (!!(sizeof((typeof(x) *)1 == (typeof(y) *)1)))
+
+/*
+ * __sign_use for integer expressions:
+ * bit #0 set if ok for unsigned comparisons
+ * bit #1 set if ok for signed comparisons
+ *
+ * In particular, statically non-negative signed integer expressions
+ * are ok for both.
+ *
+ * NOTE! Unsigned types smaller than 'int' are implicitly converted to 'int'
+ * in expressions, and are accepted for signed conversions for now.
+ * This is debatable.
+ *
+ * Note that 'x' is the original expression, and 'ux' is the unique variable
+ * that contains the value.
+ *
+ * We use 'ux' for pure type checking, and 'x' for when we need to look at the
+ * value (but without evaluating it for side effects!
+ * Careful to only ever evaluate it with sizeof() or __builtin_constant_p() etc).
+ *
+ * Pointers end up being checked by the normal C type rules at the actual
+ * comparison, and these expressions only need to be careful to not cause
+ * warnings for pointer use.
+ */
+#define __sign_use(ux) (is_signed_type(typeof(ux)) ? \
+ (2 + __is_nonneg(ux)) : (1 + 2 * (sizeof(ux) < 4)))
+
+/*
+ * Check whether a signed value is always non-negative.
+ *
+ * A cast is needed to avoid any warnings from values that aren't signed
+ * integer types (in which case the result doesn't matter).
+ *
+ * On 64-bit any integer or pointer type can safely be cast to 'long long'.
+ * But on 32-bit we need to avoid warnings about casting pointers to integers
+ * of different sizes without truncating 64-bit values so 'long' or 'long long'
+ * must be used depending on the size of the value.
+ *
+ * This does not work for 128-bit signed integers since the cast would truncate
+ * them, but we do not use s128 types in the kernel (we do use 'u128',
+ * but they are handled by the !is_signed_type() case).
+ */
+#if __SIZEOF_POINTER__ == __SIZEOF_LONG_LONG__
+#define __is_nonneg(ux) statically_true((long long)(ux) >= 0)
+#else
+#define __is_nonneg(ux) statically_true( \
+ (typeof(__builtin_choose_expr(sizeof(ux) > 4, 1LL, 1L)))(ux) >= 0)
+#endif
+
+#define __types_ok(ux, uy) \
+ (__sign_use(ux) & __sign_use(uy))
+
+#define __types_ok3(ux, uy, uz) \
+ (__sign_use(ux) & __sign_use(uy) & __sign_use(uz))
+
+#define __cmp_op_min <
+#define __cmp_op_max >
+
+#define __cmp(op, x, y) ((x) __cmp_op_##op (y) ? (x) : (y))
+
+#define __cmp_once_unique(op, type, x, y, ux, uy) \
+ ({ type ux = (x); type uy = (y); __cmp(op, ux, uy); })
+
+#define __cmp_once(op, type, x, y) \
+ __cmp_once_unique(op, type, x, y, __UNIQUE_ID(x_), __UNIQUE_ID(y_))
+
+#define __careful_cmp_once(op, x, y, ux, uy) ({ \
+ auto ux = (x); auto uy = (y); \
+ BUILD_BUG_ON_MSG(!__types_ok(ux, uy), \
+ #op"("#x", "#y") signedness error"); \
+ __cmp(op, ux, uy); })
+
+#define __careful_cmp(op, x, y) \
+ __careful_cmp_once(op, x, y, __UNIQUE_ID(x_), __UNIQUE_ID(y_))
+
+/**
+ * min - return minimum of two values of the same or compatible types
+ * @x: first value
+ * @y: second value
+ */
+#define min(x, y) __careful_cmp(min, x, y)
+
+/**
+ * max - return maximum of two values of the same or compatible types
+ * @x: first value
+ * @y: second value
+ */
+#define max(x, y) __careful_cmp(max, x, y)
+
+/**
+ * umin - return minimum of two non-negative values
+ * Signed types are zero extended to match a larger unsigned type.
+ * @x: first value
+ * @y: second value
+ */
+#define umin(x, y) \
+ __careful_cmp(min, (x) + 0u + 0ul + 0ull, (y) + 0u + 0ul + 0ull)
+
+/**
+ * umax - return maximum of two non-negative values
+ * @x: first value
+ * @y: second value
+ */
+#define umax(x, y) \
+ __careful_cmp(max, (x) + 0u + 0ul + 0ull, (y) + 0u + 0ul + 0ull)
+
+#define __careful_op3(op, x, y, z, ux, uy, uz) ({ \
+ auto ux = (x); auto uy = (y); auto uz = (z); \
+ BUILD_BUG_ON_MSG(!__types_ok3(ux, uy, uz), \
+ #op"3("#x", "#y", "#z") signedness error"); \
+ __cmp(op, ux, __cmp(op, uy, uz)); })
+
+/**
+ * min3 - return minimum of three values
+ * @x: first value
+ * @y: second value
+ * @z: third value
+ */
+#define min3(x, y, z) \
+ __careful_op3(min, x, y, z, __UNIQUE_ID(x_), __UNIQUE_ID(y_), __UNIQUE_ID(z_))
+
+/**
+ * max3 - return maximum of three values
+ * @x: first value
+ * @y: second value
+ * @z: third value
+ */
+#define max3(x, y, z) \
+ __careful_op3(max, x, y, z, __UNIQUE_ID(x_), __UNIQUE_ID(y_), __UNIQUE_ID(z_))
+
+/**
+ * min_t - return minimum of two values, using the specified type
+ * @type: data type to use
+ * @x: first value
+ * @y: second value
+ */
+#define min_t(type, x, y) __cmp_once(min, type, x, y)
+
+/**
+ * max_t - return maximum of two values, using the specified type
+ * @type: data type to use
+ * @x: first value
+ * @y: second value
+ */
+#define max_t(type, x, y) __cmp_once(max, type, x, y)
+
+/**
+ * min_not_zero - return the minimum that is _not_ zero, unless both are zero
+ * @x: value1
+ * @y: value2
+ */
+#define min_not_zero(x, y) ({ \
+ typeof(x) __x = (x); \
+ typeof(y) __y = (y); \
+ __x == 0 ? __y : ((__y == 0) ? __x : min(__x, __y)); })
+
+#define __clamp(val, lo, hi) \
+ ((val) >= (hi) ? (hi) : ((val) <= (lo) ? (lo) : (val)))
+
+#define __clamp_once(type, val, lo, hi, uval, ulo, uhi) ({ \
+ type uval = (val); \
+ type ulo = (lo); \
+ type uhi = (hi); \
+ BUILD_BUG_ON_MSG(statically_true(ulo > uhi), \
+ "clamp() low limit " #lo " greater than high limit " #hi); \
+ BUILD_BUG_ON_MSG(!__types_ok3(uval, ulo, uhi), \
+ "clamp("#val", "#lo", "#hi") signedness error"); \
+ __clamp(uval, ulo, uhi); })
+
+#define __careful_clamp(type, val, lo, hi) \
+ __clamp_once(type, val, lo, hi, __UNIQUE_ID(v_), __UNIQUE_ID(l_), __UNIQUE_ID(h_))
+
+/**
+ * clamp - return a value clamped to a given range with typechecking
+ * @val: current value
+ * @lo: lowest allowable value
+ * @hi: highest allowable value
+ *
+ * This macro checks @val/@lo/@hi to make sure they have compatible
+ * signedness.
+ */
+#define clamp(val, lo, hi) __careful_clamp(auto, val, lo, hi)
+
+/**
+ * clamp_t - return a value clamped to a given range using a given type
+ * @type: the type of variable to use
+ * @val: current value
+ * @lo: minimum allowable value
+ * @hi: maximum allowable value
+ *
+ * This macro does no typechecking and uses temporary variables of type
+ * @type to make all the comparisons.
+ */
+#define clamp_t(type, val, lo, hi) __careful_clamp(type, val, lo, hi)
+
+/**
+ * clamp_val - return a value clamped to a given range using val's type
+ * @val: current value
+ * @lo: minimum allowable value
+ * @hi: maximum allowable value
+ *
+ * This macro does no typechecking and uses temporary variables of whatever
+ * type the input argument @val is. This is useful when @val is an unsigned
+ * type and @lo and @hi are literals that will otherwise be assigned a signed
+ * integer type.
+ */
+#define clamp_val(val, lo, hi) __careful_clamp(typeof(val), val, lo, hi)
+
+/*
+ * Do not check the array parameter using __must_be_array().
+ * In the following legit use-case where the "array" passed is a simple pointer,
+ * __must_be_array() will return a failure.
+ * --- 8< ---
+ * int *buff
+ * ...
+ * min = min_array(buff, nb_items);
+ * --- 8< ---
+ *
+ * The first typeof(&(array)[0]) is needed in order to support arrays of both
+ * 'int *buff' and 'int buff[N]' types.
+ *
+ * The array can be an array of const items.
+ * typeof() keeps the const qualifier. Use __unqual_scalar_typeof() in order
+ * to discard the const qualifier for the __element variable.
+ */
+#define __minmax_array(op, array, len) ({ \
+ typeof(&(array)[0]) __array = (array); \
+ typeof(len) __len = (len); \
+ __unqual_scalar_typeof(__array[0]) __element = __array[--__len];\
+ while (__len--) \
+ __element = op(__element, __array[__len]); \
+ __element; })
+
+/**
+ * min_array - return minimum of values present in an array
+ * @array: array
+ * @len: array length
+ *
+ * Note that @len must not be zero (empty array).
+ */
+#define min_array(array, len) __minmax_array(min, array, len)
+
+/**
+ * max_array - return maximum of values present in an array
+ * @array: array
+ * @len: array length
+ *
+ * Note that @len must not be zero (empty array).
+ */
+#define max_array(array, len) __minmax_array(max, array, len)
+
+static inline bool in_range64(u64 val, u64 start, u64 len)
+{
+ return (val - start) < len;
+}
+
+static inline bool in_range32(u32 val, u32 start, u32 len)
+{
+ return (val - start) < len;
+}
+
+/**
+ * in_range - Determine if a value lies within a range.
+ * @val: Value to test.
+ * @start: First value in range.
+ * @len: Number of values in range.
+ *
+ * This is more efficient than "if (start <= val && val < (start + len))".
+ * It also gives a different answer if @start + @len overflows the size of
+ * the type by a sufficient amount to encompass @val. Decide for yourself
+ * which behaviour you want, or prove that start + len never overflow.
+ * Do not blindly replace one form with the other.
+ */
+#define in_range(val, start, len) \
+ ((sizeof(start) | sizeof(len) | sizeof(val)) <= sizeof(u32) ? \
+ in_range32(val, start, len) : in_range64(val, start, len))
+
+/**
+ * swap - swap values of @a and @b
+ * @a: first value
+ * @b: second value
+ */
+#define swap(a, b) \
+ do { typeof(a) __tmp = (a); (a) = (b); (b) = __tmp; } while (0)
+
+/*
+ * Use these carefully: no type checking, and uses the arguments
+ * multiple times. Use for obvious constants only.
+ */
+#define MIN(a, b) __cmp(min, a, b)
+#define MAX(a, b) __cmp(max, a, b)
+#define MIN_T(type, a, b) __cmp(min, (type)(a), (type)(b))
+#define MAX_T(type, a, b) __cmp(max, (type)(a), (type)(b))
+
+#endif /* _LINUX_MINMAX_H */
diff --git a/include/linux/misc/keba.h b/include/linux/misc/keba.h
new file mode 100644
index 000000000000..a81d6fa70851
--- /dev/null
+++ b/include/linux/misc/keba.h
@@ -0,0 +1,72 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2024, KEBA Industrial Automation Gmbh */
+
+#ifndef _LINUX_MISC_KEBA_H
+#define _LINUX_MISC_KEBA_H
+
+#include <linux/auxiliary_bus.h>
+
+struct i2c_board_info;
+struct spi_board_info;
+
+/**
+ * struct keba_i2c_auxdev - KEBA I2C auxiliary device
+ * @auxdev: auxiliary device object
+ * @io: address range of I2C controller IO memory
+ * @info_size: number of I2C devices to be probed
+ * @info: I2C devices to be probed
+ */
+struct keba_i2c_auxdev {
+ struct auxiliary_device auxdev;
+ struct resource io;
+ int info_size;
+ struct i2c_board_info *info;
+};
+
+/**
+ * struct keba_spi_auxdev - KEBA SPI auxiliary device
+ * @auxdev: auxiliary device object
+ * @io: address range of SPI controller IO memory
+ * @info_size: number of SPI devices to be probed
+ * @info: SPI devices to be probed
+ */
+struct keba_spi_auxdev {
+ struct auxiliary_device auxdev;
+ struct resource io;
+ int info_size;
+ struct spi_board_info *info;
+};
+
+/**
+ * struct keba_fan_auxdev - KEBA fan auxiliary device
+ * @auxdev: auxiliary device object
+ * @io: address range of fan controller IO memory
+ */
+struct keba_fan_auxdev {
+ struct auxiliary_device auxdev;
+ struct resource io;
+};
+
+/**
+ * struct keba_batt_auxdev - KEBA battery auxiliary device
+ * @auxdev: auxiliary device object
+ * @io: address range of battery controller IO memory
+ */
+struct keba_batt_auxdev {
+ struct auxiliary_device auxdev;
+ struct resource io;
+};
+
+/**
+ * struct keba_uart_auxdev - KEBA UART auxiliary device
+ * @auxdev: auxiliary device object
+ * @io: address range of UART controller IO memory
+ * @irq: number of UART controller interrupt
+ */
+struct keba_uart_auxdev {
+ struct auxiliary_device auxdev;
+ struct resource io;
+ unsigned int irq;
+};
+
+#endif /* _LINUX_MISC_KEBA_H */
diff --git a/include/linux/misc_cgroup.h b/include/linux/misc_cgroup.h
new file mode 100644
index 000000000000..0cb36a3ffc47
--- /dev/null
+++ b/include/linux/misc_cgroup.h
@@ -0,0 +1,138 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Miscellaneous cgroup controller.
+ *
+ * Copyright 2020 Google LLC
+ * Author: Vipin Sharma <vipinsh@google.com>
+ */
+#ifndef _MISC_CGROUP_H_
+#define _MISC_CGROUP_H_
+
+/**
+ * enum misc_res_type - Types of misc cgroup entries supported by the host.
+ */
+enum misc_res_type {
+#ifdef CONFIG_KVM_AMD_SEV
+ /** @MISC_CG_RES_SEV: AMD SEV ASIDs resource */
+ MISC_CG_RES_SEV,
+ /** @MISC_CG_RES_SEV_ES: AMD SEV-ES ASIDs resource */
+ MISC_CG_RES_SEV_ES,
+#endif
+#ifdef CONFIG_INTEL_TDX_HOST
+ /** @MISC_CG_RES_TDX: Intel TDX HKIDs resource */
+ MISC_CG_RES_TDX,
+#endif
+ /** @MISC_CG_RES_TYPES: count of enum misc_res_type constants */
+ MISC_CG_RES_TYPES
+};
+
+struct misc_cg;
+
+#ifdef CONFIG_CGROUP_MISC
+
+#include <linux/cgroup.h>
+
+/**
+ * struct misc_res: Per cgroup per misc type resource
+ * @max: Maximum limit on the resource.
+ * @watermark: Historical maximum usage of the resource.
+ * @usage: Current usage of the resource.
+ * @events: Number of times, the resource limit exceeded.
+ */
+struct misc_res {
+ u64 max;
+ atomic64_t watermark;
+ atomic64_t usage;
+ atomic64_t events;
+ atomic64_t events_local;
+};
+
+/**
+ * struct misc_cg - Miscellaneous controller's cgroup structure.
+ * @css: cgroup subsys state object.
+ * @events_file: Handle for the misc resources events file.
+ * @res: Array of misc resources usage in the cgroup.
+ */
+struct misc_cg {
+ struct cgroup_subsys_state css;
+
+ /* misc.events */
+ struct cgroup_file events_file;
+ /* misc.events.local */
+ struct cgroup_file events_local_file;
+
+ struct misc_res res[MISC_CG_RES_TYPES];
+};
+
+int misc_cg_set_capacity(enum misc_res_type type, u64 capacity);
+int misc_cg_try_charge(enum misc_res_type type, struct misc_cg *cg, u64 amount);
+void misc_cg_uncharge(enum misc_res_type type, struct misc_cg *cg, u64 amount);
+
+/**
+ * css_misc() - Get misc cgroup from the css.
+ * @css: cgroup subsys state object.
+ *
+ * Context: Any context.
+ * Return:
+ * * %NULL - If @css is null.
+ * * struct misc_cg* - misc cgroup pointer of the passed css.
+ */
+static inline struct misc_cg *css_misc(struct cgroup_subsys_state *css)
+{
+ return css ? container_of(css, struct misc_cg, css) : NULL;
+}
+
+/*
+ * get_current_misc_cg() - Find and get the misc cgroup of the current task.
+ *
+ * Returned cgroup has its ref count increased by 1. Caller must call
+ * put_misc_cg() to return the reference.
+ *
+ * Return: Misc cgroup to which the current task belongs to.
+ */
+static inline struct misc_cg *get_current_misc_cg(void)
+{
+ return css_misc(task_get_css(current, misc_cgrp_id));
+}
+
+/*
+ * put_misc_cg() - Put the misc cgroup and reduce its ref count.
+ * @cg - cgroup to put.
+ */
+static inline void put_misc_cg(struct misc_cg *cg)
+{
+ if (cg)
+ css_put(&cg->css);
+}
+
+#else /* !CONFIG_CGROUP_MISC */
+
+static inline int misc_cg_set_capacity(enum misc_res_type type, u64 capacity)
+{
+ return 0;
+}
+
+static inline int misc_cg_try_charge(enum misc_res_type type,
+ struct misc_cg *cg,
+ u64 amount)
+{
+ return 0;
+}
+
+static inline void misc_cg_uncharge(enum misc_res_type type,
+ struct misc_cg *cg,
+ u64 amount)
+{
+}
+
+static inline struct misc_cg *get_current_misc_cg(void)
+{
+ return NULL;
+}
+
+static inline void put_misc_cg(struct misc_cg *cg)
+{
+}
+
+#endif /* CONFIG_CGROUP_MISC */
+#endif /* _MISC_CGROUP_H_ */
diff --git a/include/linux/miscdevice.h b/include/linux/miscdevice.h
index 58751eae5f77..7d0aa718499c 100644
--- a/include/linux/miscdevice.h
+++ b/include/linux/miscdevice.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_MISCDEVICE_H
#define _LINUX_MISCDEVICE_H
#include <linux/major.h>
@@ -6,9 +7,9 @@
#include <linux/device.h>
/*
- * These allocations are managed by device@lanana.org. If you use an
- * entry that is not in assigned your entry may well be moved and
- * reassigned, or set dynamic if a fixed value is not justified.
+ * These allocations are managed by device@lanana.org. If you need
+ * an entry that is not assigned here, it can be moved and
+ * reassigned or dynamically set if a fixed value is not justified.
*/
#define PSMOUSE_MINOR 1
@@ -24,18 +25,31 @@
#define TEMP_MINOR 131 /* Temperature Sensor */
#define APM_MINOR_DEV 134
#define RTC_MINOR 135
-#define EFI_RTC_MINOR 136 /* EFI Time services */
+/*#define EFI_RTC_MINOR 136 was EFI Time services */
#define VHCI_MINOR 137
#define SUN_OPENPROM_MINOR 139
#define DMAPI_MINOR 140 /* unused */
#define NVRAM_MINOR 144
+#define SBUS_FLASH_MINOR 152
#define SGI_MMTIMER 153
+#define PMU_MINOR 154
#define STORE_QUEUE_MINOR 155 /* unused */
+#define LCD_MINOR 156
+#define AC_MINOR 157
+#define BUTTON_MINOR 158 /* Major 10, Minor 158, /dev/nwbutton */
+#define NWFLASH_MINOR 160 /* MAJOR is 10 - miscdevice */
+#define ENVCTRL_MINOR 162
#define I2O_MINOR 166
+#define UCTRL_MINOR 174
+#define AGPGART_MINOR 175
+#define TOSH_MINOR_DEV 181
#define HWRNG_MINOR 183
-#define MICROCODE_MINOR 184
+/*#define MICROCODE_MINOR 184 unused */
+#define KEYPAD_MINOR 185
#define IRNET_MINOR 187
+#define D7S_MINOR 193
#define VFIO_MINOR 196
+#define PXA3XX_GCU_MINOR 197
#define TUN_MINOR 200
#define CUSE_MINOR 203
#define MWAVE_MINOR 219 /* ACP/Mwave Modem */
@@ -46,6 +60,7 @@
#define MISC_MCELOG_MINOR 227
#define HPET_MINOR 228
#define FUSE_MINOR 229
+#define SNAPSHOT_MINOR 231
#define KVM_MINOR 232
#define BTRFS_MINOR 234
#define AUTOFS_MINOR 235
@@ -55,12 +70,19 @@
#define UHID_MINOR 239
#define USERIO_MINOR 240
#define VHOST_VSOCK_MINOR 241
-#define MISC_DYNAMIC_MINOR 255
+#define EISA_EEPROM_MINOR 241
+#define RFKILL_MINOR 242
-struct device;
-struct attribute_group;
+/*
+ * Misc char device minor code space division related to below macro:
+ *
+ * < 255 : Fixed minor code
+ * == 255 : Indicator to request dynamic minor code
+ * > 255 : Dynamic minor code requested, 1048320 minor codes totally.
+ */
+#define MISC_DYNAMIC_MINOR 255
-struct miscdevice {
+struct miscdevice {
int minor;
const char *name;
const struct file_operations *fops;
@@ -77,14 +99,14 @@ extern void misc_deregister(struct miscdevice *misc);
/*
* Helper macro for drivers that don't do anything special in the initcall.
- * This helps in eleminating of boilerplate code.
+ * This helps to eliminate boilerplate code.
*/
#define builtin_misc_device(__misc_device) \
builtin_driver(__misc_device, misc_register)
/*
* Helper macro for drivers that don't do anything special in module init / exit
- * call. This helps in eleminating of boilerplate code.
+ * call. This helps to eliminate boilerplate code.
*/
#define module_misc_device(__misc_device) \
module_driver(__misc_device, misc_register, misc_deregister)
diff --git a/include/linux/mlx4/cq.h b/include/linux/mlx4/cq.h
index 09cebe528488..653d2a0aa44c 100644
--- a/include/linux/mlx4/cq.h
+++ b/include/linux/mlx4/cq.h
@@ -130,12 +130,20 @@ enum {
MLX4_CQE_STATUS_IPOK = 1 << 12,
};
+/* L4_CSUM is logically part of status, but has to checked against badfcs_enc */
+enum {
+ MLX4_CQE_STATUS_L4_CSUM = 1 << 2,
+};
+
enum {
MLX4_CQE_LLC = 1,
MLX4_CQE_SNAP = 1 << 1,
MLX4_CQE_BAD_FCS = 1 << 4,
};
+#define MLX4_MAX_CQ_PERIOD (BIT(16) - 1)
+#define MLX4_MAX_CQ_COUNT (BIT(16) - 1)
+
static inline void mlx4_cq_arm(struct mlx4_cq *cq, u32 cmd,
void __iomem *uar_page,
spinlock_t *doorbell_lock)
diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h
index c8a63e148a98..f016263e1fcf 100644
--- a/include/linux/mlx4/device.h
+++ b/include/linux/mlx4/device.h
@@ -33,6 +33,7 @@
#ifndef MLX4_DEVICE_H
#define MLX4_DEVICE_H
+#include <linux/auxiliary_bus.h>
#include <linux/if_ether.h>
#include <linux/pci.h>
#include <linux/completion.h>
@@ -40,14 +41,13 @@
#include <linux/cpu_rmap.h>
#include <linux/crash_dump.h>
-#include <linux/atomic.h>
+#include <linux/refcount.h>
#include <linux/timecounter.h>
#define DEFAULT_UAR_PAGE_SHIFT 12
-#define MAX_MSIX_P_PORT 17
-#define MAX_MSIX 64
+#define MAX_MSIX 128
#define MIN_MSIX_P_PORT 5
#define MLX4_IS_LEGACY_EQ_MODE(dev_cap) ((dev_cap).num_comp_vectors < \
(dev_cap).num_ports * MIN_MSIX_P_PORT)
@@ -224,6 +224,9 @@ enum {
MLX4_DEV_CAP_FLAG2_DIAG_PER_PORT = 1ULL << 35,
MLX4_DEV_CAP_FLAG2_SVLAN_BY_QP = 1ULL << 36,
MLX4_DEV_CAP_FLAG2_SL_TO_VL_CHANGE_EVENT = 1ULL << 37,
+ MLX4_DEV_CAP_FLAG2_USER_MAC_EN = 1ULL << 38,
+ MLX4_DEV_CAP_FLAG2_DRIVER_VERSION_TO_FW = 1ULL << 39,
+ MLX4_DEV_CAP_FLAG2_SW_CQ_INIT = 1ULL << 40,
};
enum {
@@ -256,10 +259,6 @@ enum {
};
enum {
- MLX4_USER_DEV_CAP_LARGE_CQE = 1L << 0
-};
-
-enum {
MLX4_FUNC_CAP_64B_EQE_CQE = 1L << 0,
MLX4_FUNC_CAP_EQE_CQE_STRIDE = 1L << 1,
MLX4_FUNC_CAP_DMFS_A0_STATIC = 1L << 2
@@ -524,6 +523,14 @@ struct mlx4_phys_caps {
u32 base_tunnel_sqpn;
};
+struct mlx4_spec_qps {
+ u32 qp0_qkey;
+ u32 qp0_proxy;
+ u32 qp0_tunnel;
+ u32 qp1_proxy;
+ u32 qp1_tunnel;
+};
+
struct mlx4_caps {
u64 fw_ver;
u32 function;
@@ -553,11 +560,7 @@ struct mlx4_caps {
int max_qp_init_rdma;
int max_qp_dest_rdma;
int max_tc_eth;
- u32 *qp0_qkey;
- u32 *qp0_proxy;
- u32 *qp1_proxy;
- u32 *qp0_tunnel;
- u32 *qp1_tunnel;
+ struct mlx4_spec_qps *spec_qps;
int num_srqs;
int max_srq_wqes;
int max_srq_sge;
@@ -570,7 +573,6 @@ struct mlx4_caps {
int reserved_eqs;
int num_comp_vectors;
int num_mpts;
- int max_fmr_maps;
int num_mtts;
int fmr_reserved_mtts;
int reserved_mtts;
@@ -628,6 +630,8 @@ struct mlx4_caps {
u32 vf_caps;
bool wol_port[MLX4_MAX_PORTS + 1];
struct mlx4_rate_limit_caps rl_caps;
+ u32 health_buffer_addrs;
+ bool map_clock_to_user;
};
struct mlx4_buf_list {
@@ -703,17 +707,6 @@ struct mlx4_mw {
int enabled;
};
-struct mlx4_fmr {
- struct mlx4_mr mr;
- struct mlx4_mpt_entry *mpt;
- __be64 *mtts;
- dma_addr_t dma_handle;
- int max_pages;
- int max_maps;
- int maps;
- u8 page_shift;
-};
-
struct mlx4_uar {
unsigned long pfn;
int index;
@@ -746,7 +739,7 @@ struct mlx4_cq {
int cqn;
unsigned vector;
- atomic_t refcount;
+ refcount_t refcount;
struct completion free;
struct {
struct list_head list;
@@ -763,7 +756,7 @@ struct mlx4_qp {
int qpn;
- atomic_t refcount;
+ refcount_t refcount;
struct completion free;
u8 usage;
};
@@ -776,7 +769,7 @@ struct mlx4_srq {
int max_gs;
int wqe_shift;
- atomic_t refcount;
+ refcount_t refcount;
struct completion free;
};
@@ -849,6 +842,12 @@ struct mlx4_vf_dev {
u8 n_ports;
};
+struct mlx4_fw_crdump {
+ bool snapshot_enable;
+ struct devlink_region *region_crspace;
+ struct devlink_region *region_fw_health;
+};
+
enum mlx4_pci_status {
MLX4_PCI_STATUS_DISABLED,
MLX4_PCI_STATUS_ENABLED,
@@ -869,6 +868,7 @@ struct mlx4_dev_persistent {
u8 interface_state;
struct mutex pci_status_mutex; /* sync pci state */
enum mlx4_pci_status pci_status;
+ struct mlx4_fw_crdump crdump;
};
struct mlx4_dev {
@@ -890,6 +890,12 @@ struct mlx4_dev {
u8 uar_page_shift;
};
+struct mlx4_adev {
+ struct auxiliary_device adev;
+ struct mlx4_dev *mdev;
+ int idx;
+};
+
struct mlx4_clock_params {
u64 offset;
u8 bar;
@@ -1088,6 +1094,19 @@ static inline void *mlx4_buf_offset(struct mlx4_buf *buf, int offset)
(offset & (PAGE_SIZE - 1));
}
+static inline int mlx4_is_bonded(struct mlx4_dev *dev)
+{
+ return !!(dev->flags & MLX4_FLAG_BONDED);
+}
+
+static inline int mlx4_is_mf_bonded(struct mlx4_dev *dev)
+{
+ return (mlx4_is_bonded(dev) && mlx4_is_mfunc(dev));
+}
+
+int mlx4_queue_bond_work(struct mlx4_dev *dev, int is_bonded, u8 v2p_p1,
+ u8 v2p_p2);
+
int mlx4_pd_alloc(struct mlx4_dev *dev, u32 *pdn);
void mlx4_pd_free(struct mlx4_dev *dev, u32 pdn);
int mlx4_xrcd_alloc(struct mlx4_dev *dev, u32 *xrcdn);
@@ -1116,7 +1135,7 @@ int mlx4_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
int mlx4_buf_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
struct mlx4_buf *buf);
-int mlx4_db_alloc(struct mlx4_dev *dev, struct mlx4_db *db, int order);
+int mlx4_db_alloc(struct mlx4_dev *dev, struct mlx4_db *db, unsigned int order);
void mlx4_db_free(struct mlx4_dev *dev, struct mlx4_db *db);
int mlx4_alloc_hwq_res(struct mlx4_dev *dev, struct mlx4_hwq_resources *wqres,
@@ -1126,7 +1145,8 @@ void mlx4_free_hwq_res(struct mlx4_dev *mdev, struct mlx4_hwq_resources *wqres,
int mlx4_cq_alloc(struct mlx4_dev *dev, int nent, struct mlx4_mtt *mtt,
struct mlx4_uar *uar, u64 db_rec, struct mlx4_cq *cq,
- unsigned vector, int collapsed, int timestamp_en);
+ unsigned int vector, int collapsed, int timestamp_en,
+ void *buf_addr, bool user_cq);
void mlx4_cq_free(struct mlx4_dev *dev, struct mlx4_cq *cq);
int mlx4_qp_reserve_range(struct mlx4_dev *dev, int cnt, int align,
int *base, u8 flags, u8 usage);
@@ -1381,6 +1401,7 @@ int mlx4_get_base_qpn(struct mlx4_dev *dev, u8 port);
int __mlx4_replace_mac(struct mlx4_dev *dev, u8 port, int qpn, u64 new_mac);
int mlx4_SET_PORT_general(struct mlx4_dev *dev, u8 port, int mtu,
u8 pptx, u8 pfctx, u8 pprx, u8 pfcrx);
+int mlx4_SET_PORT_user_mac(struct mlx4_dev *dev, u8 port, u8 *user_mac);
int mlx4_SET_PORT_user_mtu(struct mlx4_dev *dev, u8 port, u16 user_mtu);
int mlx4_SET_PORT_qpn_calc(struct mlx4_dev *dev, u8 port, u32 base_qpn,
u8 promisc);
@@ -1394,19 +1415,10 @@ int mlx4_get_is_vlan_offload_disabled(struct mlx4_dev *dev, u8 port,
bool *vlan_offload_disabled);
void mlx4_handle_eth_header_mcast_prio(struct mlx4_net_trans_rule_hw_ctrl *ctrl,
struct _rule_hw *eth_header);
-int mlx4_find_cached_mac(struct mlx4_dev *dev, u8 port, u64 mac, int *idx);
int mlx4_find_cached_vlan(struct mlx4_dev *dev, u8 port, u16 vid, int *idx);
int mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, int *index);
void mlx4_unregister_vlan(struct mlx4_dev *dev, u8 port, u16 vlan);
-int mlx4_map_phys_fmr(struct mlx4_dev *dev, struct mlx4_fmr *fmr, u64 *page_list,
- int npages, u64 iova, u32 *lkey, u32 *rkey);
-int mlx4_fmr_alloc(struct mlx4_dev *dev, u32 pd, u32 access, int max_pages,
- int max_maps, u8 page_shift, struct mlx4_fmr *fmr);
-int mlx4_fmr_enable(struct mlx4_dev *dev, struct mlx4_fmr *fmr);
-void mlx4_fmr_unmap(struct mlx4_dev *dev, struct mlx4_fmr *fmr,
- u32 *lkey, u32 *rkey);
-int mlx4_fmr_free(struct mlx4_dev *dev, struct mlx4_fmr *fmr);
int mlx4_SYNC_TPT(struct mlx4_dev *dev);
int mlx4_test_interrupt(struct mlx4_dev *dev, int vector);
int mlx4_test_async(struct mlx4_dev *dev);
@@ -1443,7 +1455,7 @@ int mlx4_map_sw_to_hw_steering_id(struct mlx4_dev *dev,
enum mlx4_net_trans_rule_id id);
int mlx4_hw_rule_sz(struct mlx4_dev *dev, enum mlx4_net_trans_rule_id id);
-int mlx4_tunnel_steer_add(struct mlx4_dev *dev, unsigned char *addr,
+int mlx4_tunnel_steer_add(struct mlx4_dev *dev, const unsigned char *addr,
int port, int qpn, u16 prio, u64 *reg_id);
void mlx4_sync_pkey_table(struct mlx4_dev *dev, int slave, int port,
@@ -1509,6 +1521,8 @@ int mlx4_vf_smi_enabled(struct mlx4_dev *dev, int slave, int port);
int mlx4_vf_get_enable_smi_admin(struct mlx4_dev *dev, int slave, int port);
int mlx4_vf_set_enable_smi_admin(struct mlx4_dev *dev, int slave, int port,
int enable);
+
+struct mlx4_mpt_entry;
int mlx4_mr_hw_get_mpt(struct mlx4_dev *dev, struct mlx4_mr *mmr,
struct mlx4_mpt_entry ***mpt_entry);
int mlx4_mr_hw_write_mpt(struct mlx4_dev *dev, struct mlx4_mr *mmr,
diff --git a/include/linux/mlx4/driver.h b/include/linux/mlx4/driver.h
index a858bcb6220b..69825223081f 100644
--- a/include/linux/mlx4/driver.h
+++ b/include/linux/mlx4/driver.h
@@ -34,8 +34,12 @@
#define MLX4_DRIVER_H
#include <net/devlink.h>
+#include <linux/auxiliary_bus.h>
+#include <linux/notifier.h>
#include <linux/mlx4/device.h>
+#define MLX4_ADEV_NAME "mlx4_core"
+
struct mlx4_dev;
#define MLX4_MAC_MASK 0xffffffffffffULL
@@ -54,64 +58,20 @@ enum {
MLX4_INTFF_BONDING = 1 << 0
};
-struct mlx4_interface {
- void * (*add) (struct mlx4_dev *dev);
- void (*remove)(struct mlx4_dev *dev, void *context);
- void (*event) (struct mlx4_dev *dev, void *context,
- enum mlx4_dev_event event, unsigned long param);
- void * (*get_dev)(struct mlx4_dev *dev, void *context, u8 port);
- void (*activate)(struct mlx4_dev *dev, void *context);
- struct list_head list;
+struct mlx4_adrv {
+ struct auxiliary_driver adrv;
enum mlx4_protocol protocol;
int flags;
};
-int mlx4_register_interface(struct mlx4_interface *intf);
-void mlx4_unregister_interface(struct mlx4_interface *intf);
-
-int mlx4_bond(struct mlx4_dev *dev);
-int mlx4_unbond(struct mlx4_dev *dev);
-static inline int mlx4_is_bonded(struct mlx4_dev *dev)
-{
- return !!(dev->flags & MLX4_FLAG_BONDED);
-}
-
-static inline int mlx4_is_mf_bonded(struct mlx4_dev *dev)
-{
- return (mlx4_is_bonded(dev) && mlx4_is_mfunc(dev));
-}
-
-struct mlx4_port_map {
- u8 port1;
- u8 port2;
-};
-
-int mlx4_port_map_set(struct mlx4_dev *dev, struct mlx4_port_map *v2p);
+int mlx4_register_auxiliary_driver(struct mlx4_adrv *madrv);
+void mlx4_unregister_auxiliary_driver(struct mlx4_adrv *madrv);
-void *mlx4_get_protocol_dev(struct mlx4_dev *dev, enum mlx4_protocol proto, int port);
+int mlx4_register_event_notifier(struct mlx4_dev *dev,
+ struct notifier_block *nb);
+int mlx4_unregister_event_notifier(struct mlx4_dev *dev,
+ struct notifier_block *nb);
struct devlink_port *mlx4_get_devlink_port(struct mlx4_dev *dev, int port);
-static inline u64 mlx4_mac_to_u64(u8 *addr)
-{
- u64 mac = 0;
- int i;
-
- for (i = 0; i < ETH_ALEN; i++) {
- mac <<= 8;
- mac |= addr[i];
- }
- return mac;
-}
-
-static inline void mlx4_u64_to_mac(u8 *addr, u64 mac)
-{
- int i;
-
- for (i = ETH_ALEN; i > 0; i--) {
- addr[i - 1] = mac & 0xFF;
- mac >>= 8;
- }
-}
-
#endif /* MLX4_DRIVER_H */
diff --git a/include/linux/mlx4/qp.h b/include/linux/mlx4/qp.h
index 8e2828d48d7f..b9a7b1319f5d 100644
--- a/include/linux/mlx4/qp.h
+++ b/include/linux/mlx4/qp.h
@@ -362,7 +362,7 @@ struct mlx4_wqe_datagram_seg {
struct mlx4_wqe_lso_seg {
__be32 mss_hdr_size;
- __be32 header[0];
+ __be32 header[];
};
enum mlx4_wqe_bind_seg_flags2 {
@@ -446,6 +446,7 @@ enum {
struct mlx4_wqe_inline_seg {
__be32 byte_count;
+ __u8 data[];
};
enum mlx4_update_qp_attr {
@@ -503,4 +504,5 @@ static inline u16 folded_qp(u32 q)
u16 mlx4_qp_roce_entropy(struct mlx4_dev *dev, u32 qpn);
+void mlx4_put_qp(struct mlx4_qp *qp);
#endif /* MLX4_QP_H */
diff --git a/include/linux/mlx5/cmd.h b/include/linux/mlx5/cmd.h
deleted file mode 100644
index 68cd08f02c2f..000000000000
--- a/include/linux/mlx5/cmd.h
+++ /dev/null
@@ -1,51 +0,0 @@
-/*
- * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#ifndef MLX5_CMD_H
-#define MLX5_CMD_H
-
-#include <linux/types.h>
-
-struct manage_pages_layout {
- u64 ptr;
- u32 reserved;
- u16 num_entries;
- u16 func_id;
-};
-
-
-struct mlx5_cmd_alloc_uar_imm_out {
- u32 rsvd[3];
- u32 uarn;
-};
-
-#endif /* MLX5_CMD_H */
diff --git a/include/linux/mlx5/cq.h b/include/linux/mlx5/cq.h
index 95898847c7d4..9d47cdc727ad 100644
--- a/include/linux/mlx5/cq.h
+++ b/include/linux/mlx5/cq.h
@@ -33,21 +33,19 @@
#ifndef MLX5_CORE_CQ_H
#define MLX5_CORE_CQ_H
-#include <rdma/ib_verbs.h>
#include <linux/mlx5/driver.h>
-
+#include <linux/refcount.h>
struct mlx5_core_cq {
u32 cqn;
int cqe_sz;
__be32 *set_ci_db;
__be32 *arm_db;
- struct mlx5_uars_page *uar;
- atomic_t refcount;
+ refcount_t refcount;
struct completion free;
unsigned vector;
unsigned int irqn;
- void (*comp) (struct mlx5_core_cq *);
+ void (*comp)(struct mlx5_core_cq *cq, struct mlx5_eqe *eqe);
void (*event) (struct mlx5_core_cq *, enum mlx5_event);
u32 cons_index;
unsigned arm_sn;
@@ -55,11 +53,13 @@ struct mlx5_core_cq {
int pid;
struct {
struct list_head list;
- void (*comp)(struct mlx5_core_cq *);
+ void (*comp)(struct mlx5_core_cq *cq, struct mlx5_eqe *eqe);
void *priv;
} tasklet_ctx;
int reset_notify_added;
struct list_head reset_notify;
+ struct mlx5_eq_comp *eq;
+ u16 uid;
};
@@ -94,9 +94,10 @@ enum {
};
enum {
- MLX5_CQ_MODIFY_PERIOD = 1 << 0,
- MLX5_CQ_MODIFY_COUNT = 1 << 1,
- MLX5_CQ_MODIFY_OVERRUN = 1 << 2,
+ MLX5_CQ_MODIFY_PERIOD = BIT(0),
+ MLX5_CQ_MODIFY_COUNT = BIT(1),
+ MLX5_CQ_MODIFY_OVERRUN = BIT(2),
+ MLX5_CQ_MODIFY_PERIOD_MODE = BIT(4),
};
enum {
@@ -123,13 +124,18 @@ struct mlx5_cq_modify_params {
};
enum {
- CQE_SIZE_64 = 0,
- CQE_SIZE_128 = 1,
+ CQE_STRIDE_64 = 0,
+ CQE_STRIDE_128 = 1,
+ CQE_STRIDE_128_PAD = 2,
};
-static inline int cqe_sz_to_mlx_sz(u8 size)
+#define MLX5_MAX_CQ_PERIOD (BIT(__mlx5_bit_sz(cqc, cq_period)) - 1)
+#define MLX5_MAX_CQ_COUNT (BIT(__mlx5_bit_sz(cqc, cq_max_count)) - 1)
+
+static inline int cqe_sz_to_mlx_sz(u8 size, int padding_128_en)
{
- return size == 64 ? CQE_SIZE_64 : CQE_SIZE_128;
+ return padding_128_en ? CQE_STRIDE_128_PAD :
+ size == 64 ? CQE_STRIDE_64 : CQE_STRIDE_128;
}
static inline void mlx5_cq_set_ci(struct mlx5_core_cq *cq)
@@ -163,21 +169,39 @@ static inline void mlx5_cq_arm(struct mlx5_core_cq *cq, u32 cmd,
doorbell[0] = cpu_to_be32(sn << 28 | cmd | ci);
doorbell[1] = cpu_to_be32(cq->cqn);
- mlx5_write64(doorbell, uar_page + MLX5_CQ_DOORBELL, NULL);
+ mlx5_write64(doorbell, uar_page + MLX5_CQ_DOORBELL);
+}
+
+static inline void mlx5_cq_hold(struct mlx5_core_cq *cq)
+{
+ refcount_inc(&cq->refcount);
}
-int mlx5_init_cq_table(struct mlx5_core_dev *dev);
-void mlx5_cleanup_cq_table(struct mlx5_core_dev *dev);
+static inline void mlx5_cq_put(struct mlx5_core_cq *cq)
+{
+ if (refcount_dec_and_test(&cq->refcount))
+ complete(&cq->free);
+}
+
+void mlx5_add_cq_to_tasklet(struct mlx5_core_cq *cq, struct mlx5_eqe *eqe);
+int mlx5_create_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
+ u32 *in, int inlen, u32 *out, int outlen);
int mlx5_core_create_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
- u32 *in, int inlen);
+ u32 *in, int inlen, u32 *out, int outlen);
int mlx5_core_destroy_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq);
int mlx5_core_query_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
- u32 *out, int outlen);
+ u32 *out);
int mlx5_core_modify_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
u32 *in, int inlen);
int mlx5_core_modify_cq_moderation(struct mlx5_core_dev *dev,
struct mlx5_core_cq *cq, u16 cq_period,
u16 cq_max_count);
+static inline void mlx5_dump_err_cqe(struct mlx5_core_dev *dev,
+ struct mlx5_err_cqe *err_cqe)
+{
+ print_hex_dump(KERN_WARNING, "", DUMP_PREFIX_OFFSET, 16, 1, err_cqe,
+ sizeof(*err_cqe), false);
+}
int mlx5_debug_cq_add(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq);
void mlx5_debug_cq_remove(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq);
diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h
index c13d71deaeca..d7f46a8fbfa1 100644
--- a/include/linux/mlx5/device.h
+++ b/include/linux/mlx5/device.h
@@ -36,6 +36,7 @@
#include <linux/types.h>
#include <rdma/ib_verbs.h>
#include <linux/mlx5/mlx5_ifc.h>
+#include <linux/bitfield.h>
#if defined(__LITTLE_ENDIAN)
#define MLX5_SET_HOST_ENDIANNESS 0
@@ -48,12 +49,16 @@
/* helper macros */
#define __mlx5_nullp(typ) ((struct mlx5_ifc_##typ##_bits *)0)
#define __mlx5_bit_sz(typ, fld) sizeof(__mlx5_nullp(typ)->fld)
-#define __mlx5_bit_off(typ, fld) ((unsigned)(unsigned long)(&(__mlx5_nullp(typ)->fld)))
+#define __mlx5_bit_off(typ, fld) (offsetof(struct mlx5_ifc_##typ##_bits, fld))
+#define __mlx5_16_off(typ, fld) (__mlx5_bit_off(typ, fld) / 16)
#define __mlx5_dw_off(typ, fld) (__mlx5_bit_off(typ, fld) / 32)
#define __mlx5_64_off(typ, fld) (__mlx5_bit_off(typ, fld) / 64)
+#define __mlx5_16_bit_off(typ, fld) (16 - __mlx5_bit_sz(typ, fld) - (__mlx5_bit_off(typ, fld) & 0xf))
#define __mlx5_dw_bit_off(typ, fld) (32 - __mlx5_bit_sz(typ, fld) - (__mlx5_bit_off(typ, fld) & 0x1f))
#define __mlx5_mask(typ, fld) ((u32)((1ull << __mlx5_bit_sz(typ, fld)) - 1))
#define __mlx5_dw_mask(typ, fld) (__mlx5_mask(typ, fld) << __mlx5_dw_bit_off(typ, fld))
+#define __mlx5_mask16(typ, fld) ((u16)((1ull << __mlx5_bit_sz(typ, fld)) - 1))
+#define __mlx5_16_mask(typ, fld) (__mlx5_mask16(typ, fld) << __mlx5_16_bit_off(typ, fld))
#define __mlx5_st_sz_bits(typ) sizeof(struct mlx5_ifc_##typ##_bits)
#define MLX5_FLD_SZ_BYTES(typ, fld) (__mlx5_bit_sz(typ, fld) / 8)
@@ -63,7 +68,7 @@
#define MLX5_UN_SZ_BYTES(typ) (sizeof(union mlx5_ifc_##typ##_bits) / 8)
#define MLX5_UN_SZ_DW(typ) (sizeof(union mlx5_ifc_##typ##_bits) / 32)
#define MLX5_BYTE_OFF(typ, fld) (__mlx5_bit_off(typ, fld) / 8)
-#define MLX5_ADDR_OF(typ, p, fld) ((char *)(p) + MLX5_BYTE_OFF(typ, fld))
+#define MLX5_ADDR_OF(typ, p, fld) ((void *)((u8 *)(p) + MLX5_BYTE_OFF(typ, fld)))
/* insert a value to a struct */
#define MLX5_SET(typ, p, fld, v) do { \
@@ -75,6 +80,11 @@
<< __mlx5_dw_bit_off(typ, fld))); \
} while (0)
+#define MLX5_ARRAY_SET(typ, p, fld, idx, v) do { \
+ BUILD_BUG_ON(__mlx5_bit_off(typ, fld) % 32); \
+ MLX5_SET(typ, p, fld[idx], v); \
+} while (0)
+
#define MLX5_SET_TO_ONES(typ, p, fld) do { \
BUILD_BUG_ON(__mlx5_st_sz_bits(typ) % 32); \
*((__be32 *)(p) + __mlx5_dw_off(typ, fld)) = \
@@ -116,6 +126,19 @@ __mlx5_mask(typ, fld))
___t; \
})
+#define MLX5_GET16(typ, p, fld) ((be16_to_cpu(*((__be16 *)(p) +\
+__mlx5_16_off(typ, fld))) >> __mlx5_16_bit_off(typ, fld)) & \
+__mlx5_mask16(typ, fld))
+
+#define MLX5_SET16(typ, p, fld, v) do { \
+ u16 _v = v; \
+ BUILD_BUG_ON(__mlx5_st_sz_bits(typ) % 16); \
+ *((__be16 *)(p) + __mlx5_16_off(typ, fld)) = \
+ cpu_to_be16((be16_to_cpu(*((__be16 *)(p) + __mlx5_16_off(typ, fld))) & \
+ (~__mlx5_16_mask(typ, fld))) | (((_v) & __mlx5_mask16(typ, fld)) \
+ << __mlx5_16_bit_off(typ, fld))); \
+} while (0)
+
/* Big endian getters */
#define MLX5_GET64_BE(typ, p, fld) (*((__be64 *)(p) +\
__mlx5_64_off(typ, fld)))
@@ -188,6 +211,14 @@ enum {
enum {
MLX5_PFAULT_SUBTYPE_WQE = 0,
MLX5_PFAULT_SUBTYPE_RDMA = 1,
+ MLX5_PFAULT_SUBTYPE_MEMORY = 2,
+};
+
+enum wqe_page_fault_type {
+ MLX5_WQE_PF_TYPE_RMP = 0,
+ MLX5_WQE_PF_TYPE_REQ_SEND_OR_WRITE = 1,
+ MLX5_WQE_PF_TYPE_RESP = 2,
+ MLX5_WQE_PF_TYPE_REQ_READ_OR_ATOMIC = 3,
};
enum {
@@ -227,6 +258,8 @@ enum {
MLX5_NON_FP_BFREGS_PER_UAR,
MLX5_UARS_IN_PAGE = PAGE_SIZE / MLX5_ADAPTER_PAGE_SIZE,
MLX5_NON_FP_BFREGS_IN_PAGE = MLX5_NON_FP_BFREGS_PER_UAR * MLX5_UARS_IN_PAGE,
+ MLX5_MIN_DYN_BFREGS = 512,
+ MLX5_MAX_DYN_BFREGS = 1024,
};
enum {
@@ -245,7 +278,10 @@ enum {
MLX5_MKEY_MASK_RW = 1ull << 20,
MLX5_MKEY_MASK_A = 1ull << 21,
MLX5_MKEY_MASK_SMALL_FENCE = 1ull << 23,
- MLX5_MKEY_MASK_FREE = 1ull << 29,
+ MLX5_MKEY_MASK_RELAXED_ORDERING_WRITE = 1ull << 25,
+ MLX5_MKEY_MASK_FREE = 1ull << 29,
+ MLX5_MKEY_MASK_PAGE_SIZE_5 = 1ull << 42,
+ MLX5_MKEY_MASK_RELAXED_ORDERING_READ = 1ull << 47,
};
enum {
@@ -257,9 +293,10 @@ enum {
MLX5_UMR_INLINE = (1 << 7),
};
-#define MLX5_UMR_MTT_ALIGNMENT 0x40
-#define MLX5_UMR_MTT_MASK (MLX5_UMR_MTT_ALIGNMENT - 1)
-#define MLX5_UMR_MTT_MIN_CHUNK_SIZE MLX5_UMR_MTT_ALIGNMENT
+#define MLX5_UMR_FLEX_ALIGNMENT 0x40
+#define MLX5_UMR_MTT_NUM_ENTRIES_ALIGNMENT (MLX5_UMR_FLEX_ALIGNMENT / sizeof(struct mlx5_mtt))
+#define MLX5_UMR_KLM_NUM_ENTRIES_ALIGNMENT (MLX5_UMR_FLEX_ALIGNMENT / sizeof(struct mlx5_klm))
+#define MLX5_UMR_KSM_NUM_ENTRIES_ALIGNMENT (MLX5_UMR_FLEX_ALIGNMENT / sizeof(struct mlx5_ksm))
#define MLX5_USER_INDEX_LEN (MLX5_FLD_SZ_BYTES(qpc, user_index) * 8)
@@ -267,11 +304,18 @@ enum {
MLX5_EVENT_QUEUE_TYPE_QP = 0,
MLX5_EVENT_QUEUE_TYPE_RQ = 1,
MLX5_EVENT_QUEUE_TYPE_SQ = 2,
+ MLX5_EVENT_QUEUE_TYPE_DCT = 6,
};
+/* mlx5 components can subscribe to any one of these events via
+ * mlx5_eq_notifier_register API.
+ */
enum mlx5_event {
+ /* Special value to subscribe to any event */
+ MLX5_EVENT_TYPE_NOTIFY_ANY = 0x0,
+ /* HW events enum start: comp events are not subscribable */
MLX5_EVENT_TYPE_COMP = 0x0,
-
+ /* HW Async events enum start: subscribable events */
MLX5_EVENT_TYPE_PATH_MIG = 0x01,
MLX5_EVENT_TYPE_COMM_EST = 0x02,
MLX5_EVENT_TYPE_SQ_DRAINED = 0x03,
@@ -284,13 +328,17 @@ enum mlx5_event {
MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR = 0x10,
MLX5_EVENT_TYPE_WQ_ACCESS_ERROR = 0x11,
MLX5_EVENT_TYPE_SRQ_CATAS_ERROR = 0x12,
+ MLX5_EVENT_TYPE_OBJECT_CHANGE = 0x27,
MLX5_EVENT_TYPE_INTERNAL_ERROR = 0x08,
MLX5_EVENT_TYPE_PORT_CHANGE = 0x09,
MLX5_EVENT_TYPE_GPIO_EVENT = 0x15,
MLX5_EVENT_TYPE_PORT_MODULE_EVENT = 0x16,
+ MLX5_EVENT_TYPE_TEMP_WARN_EVENT = 0x17,
+ MLX5_EVENT_TYPE_XRQ_ERROR = 0x18,
MLX5_EVENT_TYPE_REMOTE_CONFIG = 0x19,
MLX5_EVENT_TYPE_GENERAL_EVENT = 0x22,
+ MLX5_EVENT_TYPE_MONITOR_COUNTER = 0x24,
MLX5_EVENT_TYPE_PPS_EVENT = 0x25,
MLX5_EVENT_TYPE_DB_BF_CONGESTION = 0x1a,
@@ -302,11 +350,42 @@ enum mlx5_event {
MLX5_EVENT_TYPE_PAGE_FAULT = 0xc,
MLX5_EVENT_TYPE_NIC_VPORT_CHANGE = 0xd,
+ MLX5_EVENT_TYPE_ESW_FUNCTIONS_CHANGED = 0xe,
+ MLX5_EVENT_TYPE_VHCA_STATE_CHANGE = 0xf,
+
+ MLX5_EVENT_TYPE_DCT_DRAINED = 0x1c,
+ MLX5_EVENT_TYPE_DCT_KEY_VIOLATION = 0x1d,
+
MLX5_EVENT_TYPE_FPGA_ERROR = 0x20,
+ MLX5_EVENT_TYPE_FPGA_QP_ERROR = 0x21,
+
+ MLX5_EVENT_TYPE_DEVICE_TRACER = 0x26,
+
+ MLX5_EVENT_TYPE_MAX = 0x100,
+};
+
+enum mlx5_driver_event {
+ MLX5_DRIVER_EVENT_TYPE_TRAP = 0,
+ MLX5_DRIVER_EVENT_UPLINK_NETDEV,
+ MLX5_DRIVER_EVENT_MACSEC_SA_ADDED,
+ MLX5_DRIVER_EVENT_MACSEC_SA_DELETED,
+ MLX5_DRIVER_EVENT_SF_PEER_DEVLINK,
+ MLX5_DRIVER_EVENT_AFFILIATION_DONE,
+ MLX5_DRIVER_EVENT_AFFILIATION_REMOVED,
+ MLX5_DRIVER_EVENT_ACTIVE_BACKUP_LAG_CHANGE_LOWERSTATE,
+};
+
+enum {
+ MLX5_TRACER_SUBTYPE_OWNERSHIP_CHANGE = 0x0,
+ MLX5_TRACER_SUBTYPE_TRACES_AVAILABLE = 0x1,
+ MLX5_TRACER_SUBTYPE_STRINGS_DB_UPDATE = 0x2,
};
enum {
MLX5_GENERAL_SUBTYPE_DELAY_DROP_TIMEOUT = 0x1,
+ MLX5_GENERAL_SUBTYPE_PCI_POWER_CHANGE_EVENT = 0x5,
+ MLX5_GENERAL_SUBTYPE_FW_LIVE_PATCH_EVENT = 0x7,
+ MLX5_GENERAL_SUBTYPE_PCI_SYNC_FOR_FW_UPDATE_EVENT = 0x8,
};
enum {
@@ -320,21 +399,6 @@ enum {
};
enum {
- MLX5_DEV_CAP_FLAG_XRC = 1LL << 3,
- MLX5_DEV_CAP_FLAG_BAD_PKEY_CNTR = 1LL << 8,
- MLX5_DEV_CAP_FLAG_BAD_QKEY_CNTR = 1LL << 9,
- MLX5_DEV_CAP_FLAG_APM = 1LL << 17,
- MLX5_DEV_CAP_FLAG_ATOMIC = 1LL << 18,
- MLX5_DEV_CAP_FLAG_BLOCK_MCAST = 1LL << 23,
- MLX5_DEV_CAP_FLAG_ON_DMND_PG = 1LL << 24,
- MLX5_DEV_CAP_FLAG_CQ_MODER = 1LL << 29,
- MLX5_DEV_CAP_FLAG_RESIZE_CQ = 1LL << 30,
- MLX5_DEV_CAP_FLAG_DCT = 1LL << 37,
- MLX5_DEV_CAP_FLAG_SIG_HAND_OVER = 1LL << 40,
- MLX5_DEV_CAP_FLAG_CMDIF_CSUM = 3LL << 46,
-};
-
-enum {
MLX5_ROCE_VERSION_1 = 0,
MLX5_ROCE_VERSION_2 = 2,
};
@@ -369,6 +433,7 @@ enum {
MLX5_OPCODE_ATOMIC_MASKED_FA = 0x15,
MLX5_OPCODE_BIND_MW = 0x18,
MLX5_OPCODE_CONFIG_CMD = 0x1f,
+ MLX5_OPCODE_ENHANCED_MPSW = 0x29,
MLX5_RECV_OPCODE_RDMA_WRITE_IMM = 0x00,
MLX5_RECV_OPCODE_SEND = 0x01,
@@ -381,11 +446,34 @@ enum {
MLX5_OPCODE_SET_PSV = 0x20,
MLX5_OPCODE_GET_PSV = 0x21,
MLX5_OPCODE_CHECK_PSV = 0x22,
+ MLX5_OPCODE_DUMP = 0x23,
MLX5_OPCODE_RGET_PSV = 0x26,
MLX5_OPCODE_RCHECK_PSV = 0x27,
MLX5_OPCODE_UMR = 0x25,
+ MLX5_OPCODE_FLOW_TBL_ACCESS = 0x2c,
+
+ MLX5_OPCODE_ACCESS_ASO = 0x2d,
+};
+
+enum {
+ MLX5_OPC_MOD_TLS_TIS_STATIC_PARAMS = 0x1,
+ MLX5_OPC_MOD_TLS_TIR_STATIC_PARAMS = 0x2,
+};
+
+enum {
+ MLX5_OPC_MOD_TLS_TIS_PROGRESS_PARAMS = 0x1,
+ MLX5_OPC_MOD_TLS_TIR_PROGRESS_PARAMS = 0x2,
+};
+
+struct mlx5_wqe_tls_static_params_seg {
+ u8 ctx[MLX5_ST_SZ_BYTES(tls_static_params)];
+};
+
+struct mlx5_wqe_tls_progress_params_seg {
+ __be32 tis_tir_num;
+ u8 ctx[MLX5_ST_SZ_BYTES(tls_progress_params)];
};
enum {
@@ -408,10 +496,6 @@ enum {
};
enum {
- MLX5_CAP_OFF_CMDIF_CSUM = 46,
-};
-
-enum {
/*
* Max wqe size for rdma read is 512 bytes, so this
* limits our max_sge_rd as the wqe needs to fit:
@@ -454,20 +538,35 @@ struct mlx5_cmd_layout {
u8 status_own;
};
+enum mlx5_rfr_severity_bit_offsets {
+ MLX5_CRR_BIT_OFFSET = 0x6,
+ MLX5_RFR_BIT_OFFSET = 0x7,
+};
+
struct health_buffer {
- __be32 assert_var[5];
- __be32 rsvd0[3];
+ __be32 assert_var[6];
+ __be32 rsvd0[2];
__be32 assert_exit_ptr;
__be32 assert_callra;
- __be32 rsvd1[2];
+ __be32 rsvd1[1];
+ __be32 time;
__be32 fw_ver;
__be32 hw_id;
- __be32 rsvd2;
+ u8 rfr_severity;
+ u8 rsvd2[3];
u8 irisc_index;
u8 synd;
__be16 ext_synd;
};
+enum mlx5_initializing_bit_offsets {
+ MLX5_FW_RESET_SUPPORTED_OFFSET = 30,
+};
+
+enum mlx5_cmd_addr_l_sz_offset {
+ MLX5_NIC_IFC_OFFSET = 8,
+};
+
struct mlx5_init_seg {
__be32 fw_rev;
__be32 cmdif_rev_fw_sub;
@@ -478,12 +577,17 @@ struct mlx5_init_seg {
__be32 rsvd1[120];
__be32 initializing;
struct health_buffer health;
- __be32 rsvd2[880];
+ __be32 rsvd2[878];
+ __be32 cmd_exec_to;
+ __be32 cmd_q_init_to;
__be32 internal_timer_h;
__be32 internal_timer_l;
__be32 rsvd3[2];
__be32 health_counter;
- __be32 rsvd4[1019];
+ __be32 rsvd4[11];
+ __be32 real_time_h;
+ __be32 real_time_l;
+ __be32 rsvd5[1006];
__be64 ieee1588_clk;
__be32 ieee1588_clk_type;
__be32 clr_intx;
@@ -507,6 +611,12 @@ struct mlx5_eqe_cq_err {
u8 syndrome;
};
+struct mlx5_eqe_xrq_err {
+ __be32 reserved1[5];
+ __be32 type_xrqn;
+ __be32 reserved2;
+};
+
struct mlx5_eqe_port_state {
u8 reserved0[8];
u8 port;
@@ -534,16 +644,17 @@ struct mlx5_eqe_cmd {
};
struct mlx5_eqe_page_req {
- u8 rsvd0[2];
+ __be16 ec_function;
__be16 func_id;
__be32 num_pages;
__be32 rsvd1[5];
};
+#define MEMORY_SCHEME_PAGE_FAULT_GRANULARITY 4096
struct mlx5_eqe_page_fault {
- __be32 bytes_committed;
union {
struct {
+ __be32 bytes_committed;
u16 reserved1;
__be16 wqe_index;
u16 reserved2;
@@ -553,6 +664,7 @@ struct mlx5_eqe_page_fault {
__be32 pftype_wq;
} __packed wqe;
struct {
+ __be32 bytes_committed;
__be32 r_key;
u16 reserved1;
__be16 packet_length;
@@ -560,6 +672,23 @@ struct mlx5_eqe_page_fault {
__be64 rdma_va;
__be32 pftype_token;
} __packed rdma;
+ struct {
+ u8 flags;
+ u8 reserved1;
+ __be16 post_demand_fault_pages;
+ __be16 pre_demand_fault_pages;
+ __be16 token47_32;
+ __be32 token31_0;
+ /*
+ * FW changed from specifying the fault size in byte
+ * count to 4k pages granularity. The size specified
+ * in pages uses bits 31:12, to keep backward
+ * compatibility.
+ */
+ __be32 demand_fault_pages;
+ __be32 mkey;
+ __be64 va;
+ } __packed memory;
} __packed;
} __packed;
@@ -594,6 +723,41 @@ struct mlx5_eqe_pps {
u8 rsvd2[12];
} __packed;
+struct mlx5_eqe_dct {
+ __be32 reserved[6];
+ __be32 dctn;
+};
+
+struct mlx5_eqe_temp_warning {
+ __be64 sensor_warning_msb;
+ __be64 sensor_warning_lsb;
+} __packed;
+
+struct mlx5_eqe_obj_change {
+ u8 rsvd0[2];
+ __be16 obj_type;
+ __be32 obj_id;
+} __packed;
+
+#define SYNC_RST_STATE_MASK 0xf
+
+enum sync_rst_state_type {
+ MLX5_SYNC_RST_STATE_RESET_REQUEST = 0x0,
+ MLX5_SYNC_RST_STATE_RESET_NOW = 0x1,
+ MLX5_SYNC_RST_STATE_RESET_ABORT = 0x2,
+ MLX5_SYNC_RST_STATE_RESET_UNLOAD = 0x3,
+};
+
+struct mlx5_eqe_sync_fw_update {
+ u8 reserved_at_0[3];
+ u8 sync_rst_state;
+};
+
+struct mlx5_eqe_vhca_state {
+ __be16 ec_function;
+ __be16 function_id;
+} __packed;
+
union ev_data {
__be32 raw[7];
struct mlx5_eqe_cmd cmd;
@@ -609,6 +773,12 @@ union ev_data {
struct mlx5_eqe_vport_change vport_change;
struct mlx5_eqe_port_module port_module;
struct mlx5_eqe_pps pps;
+ struct mlx5_eqe_dct dct;
+ struct mlx5_eqe_temp_warning temp_warning;
+ struct mlx5_eqe_xrq_err xrq_err;
+ struct mlx5_eqe_sync_fw_update sync_fw_update;
+ struct mlx5_eqe_vhca_state vhca_state;
+ struct mlx5_eqe_obj_change obj_change;
} __packed;
struct mlx5_eqe {
@@ -651,13 +821,26 @@ struct mlx5_err_cqe {
};
struct mlx5_cqe64 {
- u8 outer_l3_tunneled;
+ u8 tls_outer_l3_tunneled;
u8 rsvd0;
__be16 wqe_id;
- u8 lro_tcppsh_abort_dupack;
- u8 lro_min_ttl;
- __be16 lro_tcp_win;
- __be32 lro_ack_seq_num;
+ union {
+ struct {
+ u8 tcppsh_abort_dupack;
+ u8 min_ttl;
+ __be16 tcp_win;
+ __be32 ack_seq_num;
+ } lro;
+ struct {
+ u8 reserved0:1;
+ u8 match:1;
+ u8 flush:1;
+ u8 reserved3:5;
+ u8 header_size;
+ __be16 header_entry_index;
+ __be32 data_offset;
+ } shampo;
+ };
__be32 rss_hash_result;
u8 rss_hash_type;
u8 ml_path;
@@ -669,14 +852,22 @@ struct mlx5_cqe64 {
u8 l4_l3_hdr_type;
__be16 vlan_info;
__be32 srqn; /* [31:24]: lro_num_seg, [23:0]: srqn */
- __be32 imm_inval_pkey;
+ union {
+ __be32 immediate;
+ __be32 inval_rkey;
+ __be32 pkey;
+ __be32 ft_metadata;
+ };
u8 rsvd40[4];
__be32 byte_cnt;
__be32 timestamp_h;
__be32 timestamp_l;
__be32 sop_drop_qpn;
__be16 wqe_counter;
- u8 signature;
+ union {
+ u8 signature;
+ u8 validity_iteration_count;
+ };
u8 op_own;
};
@@ -685,7 +876,7 @@ struct mlx5_mini_cqe8 {
__be32 rx_hash_result;
struct {
__be16 checksum;
- __be16 rsvd;
+ __be16 stridx;
};
struct {
__be16 wqe_counter;
@@ -705,18 +896,35 @@ enum {
enum {
MLX5_CQE_FORMAT_CSUM = 0x1,
+ MLX5_CQE_FORMAT_CSUM_STRIDX = 0x3,
+};
+
+enum {
+ MLX5_CQE_COMPRESS_LAYOUT_BASIC = 0,
+ MLX5_CQE_COMPRESS_LAYOUT_ENHANCED = 1,
};
#define MLX5_MINI_CQE_ARRAY_SIZE 8
-static inline int mlx5_get_cqe_format(struct mlx5_cqe64 *cqe)
+static inline u8 mlx5_get_cqe_format(struct mlx5_cqe64 *cqe)
{
return (cqe->op_own >> 2) & 0x3;
}
-static inline int get_cqe_lro_tcppsh(struct mlx5_cqe64 *cqe)
+static inline u8 get_cqe_opcode(struct mlx5_cqe64 *cqe)
{
- return (cqe->lro_tcppsh_abort_dupack >> 6) & 1;
+ return cqe->op_own >> 4;
+}
+
+static inline u8 get_cqe_enhanced_num_mini_cqes(struct mlx5_cqe64 *cqe)
+{
+ /* num_of_mini_cqes is zero based */
+ return get_cqe_opcode(cqe) + 1;
+}
+
+static inline u8 get_cqe_lro_tcppsh(struct mlx5_cqe64 *cqe)
+{
+ return (cqe->lro.tcppsh_abort_dupack >> 6) & 1;
}
static inline u8 get_cqe_l4_hdr_type(struct mlx5_cqe64 *cqe)
@@ -724,19 +932,19 @@ static inline u8 get_cqe_l4_hdr_type(struct mlx5_cqe64 *cqe)
return (cqe->l4_l3_hdr_type >> 4) & 0x7;
}
-static inline u8 get_cqe_l3_hdr_type(struct mlx5_cqe64 *cqe)
+static inline bool cqe_is_tunneled(struct mlx5_cqe64 *cqe)
{
- return (cqe->l4_l3_hdr_type >> 2) & 0x3;
+ return cqe->tls_outer_l3_tunneled & 0x1;
}
-static inline u8 cqe_is_tunneled(struct mlx5_cqe64 *cqe)
+static inline u8 get_cqe_tls_offload(struct mlx5_cqe64 *cqe)
{
- return cqe->outer_l3_tunneled & 0x1;
+ return (cqe->tls_outer_l3_tunneled >> 3) & 0x3;
}
-static inline int cqe_has_vlan(struct mlx5_cqe64 *cqe)
+static inline bool cqe_has_vlan(const struct mlx5_cqe64 *cqe)
{
- return !!(cqe->l4_l3_hdr_type & 0x1);
+ return cqe->l4_l3_hdr_type & 0x1;
}
static inline u64 get_cqe_ts(struct mlx5_cqe64 *cqe)
@@ -749,6 +957,17 @@ static inline u64 get_cqe_ts(struct mlx5_cqe64 *cqe)
return (u64)lo | ((u64)hi << 32);
}
+static inline u16 get_cqe_flow_tag(struct mlx5_cqe64 *cqe)
+{
+ return be32_to_cpu(cqe->sop_drop_qpn) & 0xFFF;
+}
+
+#define MLX5_MPWQE_LOG_NUM_STRIDES_EXT_BASE 3
+#define MLX5_MPWQE_LOG_NUM_STRIDES_BASE 9
+#define MLX5_MPWQE_LOG_NUM_STRIDES_MAX 16
+#define MLX5_MPWQE_LOG_STRIDE_SZ_BASE 6
+#define MLX5_MPWQE_LOG_STRIDE_SZ_MAX 13
+
struct mpwrq_cqe_bc {
__be16 filler_consumed_strides;
__be16 byte_cnt;
@@ -794,14 +1013,23 @@ enum {
};
enum {
- CQE_RSS_HTYPE_IP = 0x3 << 2,
+ CQE_RSS_HTYPE_IP = GENMASK(3, 2),
/* cqe->rss_hash_type[3:2] - IP destination selected for hash
* (00 = none, 01 = IPv4, 10 = IPv6, 11 = Reserved)
*/
- CQE_RSS_HTYPE_L4 = 0x3 << 6,
+ CQE_RSS_IP_NONE = 0x0,
+ CQE_RSS_IPV4 = 0x1,
+ CQE_RSS_IPV6 = 0x2,
+ CQE_RSS_RESERVED = 0x3,
+
+ CQE_RSS_HTYPE_L4 = GENMASK(7, 6),
/* cqe->rss_hash_type[7:6] - L4 destination selected for hash
* (00 = none, 01 = TCP. 10 = UDP, 11 = IPSEC.SPI
*/
+ CQE_RSS_L4_NONE = 0x0,
+ CQE_RSS_L4_TCP = 0x1,
+ CQE_RSS_L4_UDP = 0x2,
+ CQE_RSS_L4_IPSEC = 0x3,
};
enum {
@@ -816,6 +1044,13 @@ enum {
CQE_L4_OK = 1 << 2,
};
+enum {
+ CQE_TLS_OFFLOAD_NOT_DECRYPTED = 0x0,
+ CQE_TLS_OFFLOAD_DECRYPTED = 0x1,
+ CQE_TLS_OFFLOAD_RESYNC = 0x2,
+ CQE_TLS_OFFLOAD_ERROR = 0x3,
+};
+
struct mlx5_sig_err_cqe {
u8 rsvd0[16];
__be32 expected_trans_sig;
@@ -858,13 +1093,12 @@ enum {
MLX5_MKEY_REMOTE_INVAL = 1 << 24,
MLX5_MKEY_FLAG_SYNC_UMR = 1 << 29,
MLX5_MKEY_BSF_EN = 1 << 30,
- MLX5_MKEY_LEN64 = 1 << 31,
};
struct mlx5_mkey_seg {
/* This is a two bit field occupying bits 31-30.
* bit 31 is always 0,
- * bit 30 is zero for regular MRs and 1 (e.g free) for UMRs that do not have tanslation
+ * bit 30 is zero for regular MRs and 1 (e.g free) for UMRs that do not have translation
*/
u8 status;
u8 pcie_control;
@@ -895,9 +1129,14 @@ enum {
};
enum {
- MLX5_ESW_VPORT_ADMIN_STATE_DOWN = 0x0,
- MLX5_ESW_VPORT_ADMIN_STATE_UP = 0x1,
- MLX5_ESW_VPORT_ADMIN_STATE_AUTO = 0x2,
+ MLX5_VPORT_ADMIN_STATE_DOWN = 0x0,
+ MLX5_VPORT_ADMIN_STATE_UP = 0x1,
+ MLX5_VPORT_ADMIN_STATE_AUTO = 0x2,
+};
+
+enum {
+ MLX5_VPORT_CVLAN_INSERT_WHEN_NO_CVLAN = 0x1,
+ MLX5_VPORT_CVLAN_INSERT_ALWAYS = 0x3,
};
enum {
@@ -922,7 +1161,10 @@ enum {
MLX5_MATCH_OUTER_HEADERS = 1 << 0,
MLX5_MATCH_MISC_PARAMETERS = 1 << 1,
MLX5_MATCH_INNER_HEADERS = 1 << 2,
-
+ MLX5_MATCH_MISC_PARAMETERS_2 = 1 << 3,
+ MLX5_MATCH_MISC_PARAMETERS_3 = 1 << 4,
+ MLX5_MATCH_MISC_PARAMETERS_4 = 1 << 5,
+ MLX5_MATCH_MISC_PARAMETERS_5 = 1 << 6,
};
enum {
@@ -958,6 +1200,21 @@ enum mlx5_wol_mode {
MLX5_WOL_PHY_ACTIVITY = 1 << 7,
};
+enum mlx5_mpls_supported_fields {
+ MLX5_FIELD_SUPPORT_MPLS_LABEL = 1 << 0,
+ MLX5_FIELD_SUPPORT_MPLS_EXP = 1 << 1,
+ MLX5_FIELD_SUPPORT_MPLS_S_BOS = 1 << 2,
+ MLX5_FIELD_SUPPORT_MPLS_TTL = 1 << 3
+};
+
+enum mlx5_flex_parser_protos {
+ MLX5_FLEX_PROTO_GENEVE = 1 << 3,
+ MLX5_FLEX_PROTO_CW_MPLS_GRE = 1 << 4,
+ MLX5_FLEX_PROTO_CW_MPLS_UDP = 1 << 5,
+ MLX5_FLEX_PROTO_ICMP = 1 << 8,
+ MLX5_FLEX_PROTO_ICMPV6 = 1 << 9,
+};
+
/* MLX5 DEV CAPs */
/* TODO: EAT.ME */
@@ -966,6 +1223,9 @@ enum mlx5_cap_mode {
HCA_CAP_OPMOD_GET_CUR = 1,
};
+/* Any new cap addition must update mlx5_hca_caps_alloc() to allocate
+ * capability memory.
+ */
enum mlx5_cap_type {
MLX5_CAP_GENERAL = 0,
MLX5_CAP_ETHERNET_OFFLOADS,
@@ -977,10 +1237,23 @@ enum mlx5_cap_type {
MLX5_CAP_FLOW_TABLE,
MLX5_CAP_ESWITCH_FLOW_TABLE,
MLX5_CAP_ESWITCH,
- MLX5_CAP_RESERVED,
- MLX5_CAP_VECTOR_CALC,
- MLX5_CAP_QOS,
- MLX5_CAP_FPGA,
+ MLX5_CAP_QOS = 0xc,
+ MLX5_CAP_DEBUG,
+ MLX5_CAP_RESERVED_14,
+ MLX5_CAP_DEV_MEM,
+ MLX5_CAP_RESERVED_16,
+ MLX5_CAP_TLS,
+ MLX5_CAP_VDPA_EMULATION = 0x13,
+ MLX5_CAP_DEV_EVENT = 0x14,
+ MLX5_CAP_IPSEC,
+ MLX5_CAP_CRYPTO = 0x1a,
+ MLX5_CAP_SHAMPO = 0x1d,
+ MLX5_CAP_PSP = 0x1e,
+ MLX5_CAP_MACSEC = 0x1f,
+ MLX5_CAP_GENERAL_2 = 0x20,
+ MLX5_CAP_PORT_SELECTION = 0x25,
+ MLX5_CAP_ADV_VIRTUALIZATION = 0x26,
+ MLX5_CAP_ADV_RDMA = 0x28,
/* NUM OF CAP Types */
MLX5_CAP_NUM
};
@@ -995,125 +1268,228 @@ enum mlx5_pcam_feature_groups {
enum mlx5_mcam_reg_groups {
MLX5_MCAM_REGS_FIRST_128 = 0x0,
+ MLX5_MCAM_REGS_0x9100_0x917F = 0x2,
+ MLX5_MCAM_REGS_0x9180_0x91FF = 0x3,
+ MLX5_MCAM_REGS_NUM = 0x4,
};
enum mlx5_mcam_feature_groups {
MLX5_MCAM_FEATURE_ENHANCED_FEATURES = 0x0,
};
+enum mlx5_qcam_reg_groups {
+ MLX5_QCAM_REGS_FIRST_128 = 0x0,
+};
+
+enum mlx5_qcam_feature_groups {
+ MLX5_QCAM_FEATURE_ENHANCED_FEATURES = 0x0,
+};
+
/* GET Dev Caps macros */
#define MLX5_CAP_GEN(mdev, cap) \
- MLX5_GET(cmd_hca_cap, mdev->caps.hca_cur[MLX5_CAP_GENERAL], cap)
+ MLX5_GET(cmd_hca_cap, mdev->caps.hca[MLX5_CAP_GENERAL]->cur, cap)
+
+#define MLX5_CAP_GEN_64(mdev, cap) \
+ MLX5_GET64(cmd_hca_cap, mdev->caps.hca[MLX5_CAP_GENERAL]->cur, cap)
#define MLX5_CAP_GEN_MAX(mdev, cap) \
- MLX5_GET(cmd_hca_cap, mdev->caps.hca_max[MLX5_CAP_GENERAL], cap)
+ MLX5_GET(cmd_hca_cap, mdev->caps.hca[MLX5_CAP_GENERAL]->max, cap)
-#define MLX5_CAP_ETH(mdev, cap) \
- MLX5_GET(per_protocol_networking_offload_caps,\
- mdev->caps.hca_cur[MLX5_CAP_ETHERNET_OFFLOADS], cap)
+#define MLX5_CAP_GEN_2(mdev, cap) \
+ MLX5_GET(cmd_hca_cap_2, mdev->caps.hca[MLX5_CAP_GENERAL_2]->cur, cap)
+
+#define MLX5_CAP_GEN_2_64(mdev, cap) \
+ MLX5_GET64(cmd_hca_cap_2, mdev->caps.hca[MLX5_CAP_GENERAL_2]->cur, cap)
-#define MLX5_CAP_ETH_MAX(mdev, cap) \
+#define MLX5_CAP_GEN_2_MAX(mdev, cap) \
+ MLX5_GET(cmd_hca_cap_2, mdev->caps.hca[MLX5_CAP_GENERAL_2]->max, cap)
+
+#define MLX5_CAP_ETH(mdev, cap) \
MLX5_GET(per_protocol_networking_offload_caps,\
- mdev->caps.hca_max[MLX5_CAP_ETHERNET_OFFLOADS], cap)
+ mdev->caps.hca[MLX5_CAP_ETHERNET_OFFLOADS]->cur, cap)
#define MLX5_CAP_IPOIB_ENHANCED(mdev, cap) \
MLX5_GET(per_protocol_networking_offload_caps,\
- mdev->caps.hca_cur[MLX5_CAP_IPOIB_ENHANCED_OFFLOADS], cap)
+ mdev->caps.hca[MLX5_CAP_IPOIB_ENHANCED_OFFLOADS]->cur, cap)
#define MLX5_CAP_ROCE(mdev, cap) \
- MLX5_GET(roce_cap, mdev->caps.hca_cur[MLX5_CAP_ROCE], cap)
+ MLX5_GET(roce_cap, mdev->caps.hca[MLX5_CAP_ROCE]->cur, cap)
#define MLX5_CAP_ROCE_MAX(mdev, cap) \
- MLX5_GET(roce_cap, mdev->caps.hca_max[MLX5_CAP_ROCE], cap)
+ MLX5_GET(roce_cap, mdev->caps.hca[MLX5_CAP_ROCE]->max, cap)
#define MLX5_CAP_ATOMIC(mdev, cap) \
- MLX5_GET(atomic_caps, mdev->caps.hca_cur[MLX5_CAP_ATOMIC], cap)
+ MLX5_GET(atomic_caps, mdev->caps.hca[MLX5_CAP_ATOMIC]->cur, cap)
#define MLX5_CAP_ATOMIC_MAX(mdev, cap) \
- MLX5_GET(atomic_caps, mdev->caps.hca_max[MLX5_CAP_ATOMIC], cap)
+ MLX5_GET(atomic_caps, mdev->caps.hca[MLX5_CAP_ATOMIC]->max, cap)
#define MLX5_CAP_FLOWTABLE(mdev, cap) \
- MLX5_GET(flow_table_nic_cap, mdev->caps.hca_cur[MLX5_CAP_FLOW_TABLE], cap)
+ MLX5_GET(flow_table_nic_cap, mdev->caps.hca[MLX5_CAP_FLOW_TABLE]->cur, cap)
-#define MLX5_CAP_FLOWTABLE_MAX(mdev, cap) \
- MLX5_GET(flow_table_nic_cap, mdev->caps.hca_max[MLX5_CAP_FLOW_TABLE], cap)
+#define MLX5_CAP64_FLOWTABLE(mdev, cap) \
+ MLX5_GET64(flow_table_nic_cap, (mdev)->caps.hca[MLX5_CAP_FLOW_TABLE]->cur, cap)
#define MLX5_CAP_FLOWTABLE_NIC_RX(mdev, cap) \
MLX5_CAP_FLOWTABLE(mdev, flow_table_properties_nic_receive.cap)
-#define MLX5_CAP_FLOWTABLE_NIC_RX_MAX(mdev, cap) \
- MLX5_CAP_FLOWTABLE_MAX(mdev, flow_table_properties_nic_receive.cap)
+#define MLX5_CAP_FLOWTABLE_NIC_TX(mdev, cap) \
+ MLX5_CAP_FLOWTABLE(mdev, flow_table_properties_nic_transmit.cap)
#define MLX5_CAP_FLOWTABLE_SNIFFER_RX(mdev, cap) \
MLX5_CAP_FLOWTABLE(mdev, flow_table_properties_nic_receive_sniffer.cap)
-#define MLX5_CAP_FLOWTABLE_SNIFFER_RX_MAX(mdev, cap) \
- MLX5_CAP_FLOWTABLE_MAX(mdev, flow_table_properties_nic_receive_sniffer.cap)
-
#define MLX5_CAP_FLOWTABLE_SNIFFER_TX(mdev, cap) \
MLX5_CAP_FLOWTABLE(mdev, flow_table_properties_nic_transmit_sniffer.cap)
-#define MLX5_CAP_FLOWTABLE_SNIFFER_TX_MAX(mdev, cap) \
- MLX5_CAP_FLOWTABLE_MAX(mdev, flow_table_properties_nic_transmit_sniffer.cap)
+#define MLX5_CAP_FLOWTABLE_RDMA_RX(mdev, cap) \
+ MLX5_CAP_FLOWTABLE(mdev, flow_table_properties_nic_receive_rdma.cap)
-#define MLX5_CAP_ESW_FLOWTABLE(mdev, cap) \
- MLX5_GET(flow_table_eswitch_cap, \
- mdev->caps.hca_cur[MLX5_CAP_ESWITCH_FLOW_TABLE], cap)
+#define MLX5_CAP_FLOWTABLE_RDMA_TX(mdev, cap) \
+ MLX5_CAP_FLOWTABLE(mdev, flow_table_properties_nic_transmit_rdma.cap)
+
+#define MLX5_CAP_FLOWTABLE_RDMA_TRANSPORT_RX(mdev, cap) \
+ MLX5_CAP_ADV_RDMA(mdev, rdma_transport_rx_flow_table_properties.cap)
+
+#define MLX5_CAP_FLOWTABLE_RDMA_TRANSPORT_TX(mdev, cap) \
+ MLX5_CAP_ADV_RDMA(mdev, rdma_transport_tx_flow_table_properties.cap)
-#define MLX5_CAP_ESW_FLOWTABLE_MAX(mdev, cap) \
+#define MLX5_CAP_ESW_FLOWTABLE(mdev, cap) \
MLX5_GET(flow_table_eswitch_cap, \
- mdev->caps.hca_max[MLX5_CAP_ESWITCH_FLOW_TABLE], cap)
+ mdev->caps.hca[MLX5_CAP_ESWITCH_FLOW_TABLE]->cur, cap)
#define MLX5_CAP_ESW_FLOWTABLE_FDB(mdev, cap) \
MLX5_CAP_ESW_FLOWTABLE(mdev, flow_table_properties_nic_esw_fdb.cap)
-#define MLX5_CAP_ESW_FLOWTABLE_FDB_MAX(mdev, cap) \
- MLX5_CAP_ESW_FLOWTABLE_MAX(mdev, flow_table_properties_nic_esw_fdb.cap)
-
#define MLX5_CAP_ESW_EGRESS_ACL(mdev, cap) \
MLX5_CAP_ESW_FLOWTABLE(mdev, flow_table_properties_esw_acl_egress.cap)
-#define MLX5_CAP_ESW_EGRESS_ACL_MAX(mdev, cap) \
- MLX5_CAP_ESW_FLOWTABLE_MAX(mdev, flow_table_properties_esw_acl_egress.cap)
-
#define MLX5_CAP_ESW_INGRESS_ACL(mdev, cap) \
MLX5_CAP_ESW_FLOWTABLE(mdev, flow_table_properties_esw_acl_ingress.cap)
-#define MLX5_CAP_ESW_INGRESS_ACL_MAX(mdev, cap) \
- MLX5_CAP_ESW_FLOWTABLE_MAX(mdev, flow_table_properties_esw_acl_ingress.cap)
+#define MLX5_CAP_ESW_FT_FIELD_SUPPORT_2(mdev, cap) \
+ MLX5_CAP_ESW_FLOWTABLE(mdev, ft_field_support_2_esw_fdb.cap)
+
+#define MLX5_CAP_NIC_RX_FT_FIELD_SUPPORT_2(mdev, cap) \
+ MLX5_CAP_FLOWTABLE(mdev, ft_field_support_2_nic_receive.cap)
#define MLX5_CAP_ESW(mdev, cap) \
MLX5_GET(e_switch_cap, \
- mdev->caps.hca_cur[MLX5_CAP_ESWITCH], cap)
+ mdev->caps.hca[MLX5_CAP_ESWITCH]->cur, cap)
-#define MLX5_CAP_ESW_MAX(mdev, cap) \
- MLX5_GET(e_switch_cap, \
- mdev->caps.hca_max[MLX5_CAP_ESWITCH], cap)
+#define MLX5_CAP64_ESW_FLOWTABLE(mdev, cap) \
+ MLX5_GET64(flow_table_eswitch_cap, \
+ (mdev)->caps.hca[MLX5_CAP_ESWITCH_FLOW_TABLE]->cur, cap)
+
+#define MLX5_CAP_PORT_SELECTION(mdev, cap) \
+ MLX5_GET(port_selection_cap, \
+ mdev->caps.hca[MLX5_CAP_PORT_SELECTION]->cur, cap)
+
+#define MLX5_CAP_PORT_SELECTION_MAX(mdev, cap) \
+ MLX5_GET(port_selection_cap, \
+ mdev->caps.hca[MLX5_CAP_PORT_SELECTION]->max, cap)
+
+#define MLX5_CAP_ADV_VIRTUALIZATION(mdev, cap) \
+ MLX5_GET(adv_virtualization_cap, \
+ mdev->caps.hca[MLX5_CAP_ADV_VIRTUALIZATION]->cur, cap)
+
+#define MLX5_CAP_ADV_RDMA(mdev, cap) \
+ MLX5_GET(adv_rdma_cap, \
+ mdev->caps.hca[MLX5_CAP_ADV_RDMA]->cur, cap)
+
+#define MLX5_CAP_FLOWTABLE_PORT_SELECTION(mdev, cap) \
+ MLX5_CAP_PORT_SELECTION(mdev, flow_table_properties_port_selection.cap)
+
+#define MLX5_CAP_PORT_SELECTION_FT_FIELD_SUPPORT_2(mdev, cap) \
+ MLX5_CAP_PORT_SELECTION(mdev, ft_field_support_2_port_selection.cap)
#define MLX5_CAP_ODP(mdev, cap)\
- MLX5_GET(odp_cap, mdev->caps.hca_cur[MLX5_CAP_ODP], cap)
+ MLX5_GET(odp_cap, mdev->caps.hca[MLX5_CAP_ODP]->cur, cap)
+
+#define MLX5_CAP_ODP_SCHEME(mdev, cap) \
+ (MLX5_GET(odp_cap, mdev->caps.hca[MLX5_CAP_ODP]->cur, \
+ mem_page_fault) ? \
+ MLX5_GET(odp_cap, mdev->caps.hca[MLX5_CAP_ODP]->cur, \
+ memory_page_fault_scheme_cap.cap) : \
+ MLX5_GET(odp_cap, mdev->caps.hca[MLX5_CAP_ODP]->cur, \
+ transport_page_fault_scheme_cap.cap))
-#define MLX5_CAP_VECTOR_CALC(mdev, cap) \
- MLX5_GET(vector_calc_cap, \
- mdev->caps.hca_cur[MLX5_CAP_VECTOR_CALC], cap)
+#define MLX5_CAP_ODP_MAX(mdev, cap)\
+ MLX5_GET(odp_cap, mdev->caps.hca[MLX5_CAP_ODP]->max, cap)
#define MLX5_CAP_QOS(mdev, cap)\
- MLX5_GET(qos_cap, mdev->caps.hca_cur[MLX5_CAP_QOS], cap)
+ MLX5_GET(qos_cap, mdev->caps.hca[MLX5_CAP_QOS]->cur, cap)
+
+#define MLX5_CAP_DEBUG(mdev, cap)\
+ MLX5_GET(debug_cap, mdev->caps.hca[MLX5_CAP_DEBUG]->cur, cap)
#define MLX5_CAP_PCAM_FEATURE(mdev, fld) \
MLX5_GET(pcam_reg, (mdev)->caps.pcam, feature_cap_mask.enhanced_features.fld)
+#define MLX5_CAP_PCAM_REG(mdev, reg) \
+ MLX5_GET(pcam_reg, (mdev)->caps.pcam, port_access_reg_cap_mask.regs_5000_to_507f.reg)
+
#define MLX5_CAP_MCAM_REG(mdev, reg) \
- MLX5_GET(mcam_reg, (mdev)->caps.mcam, mng_access_reg_cap_mask.access_regs.reg)
+ MLX5_GET(mcam_reg, (mdev)->caps.mcam[MLX5_MCAM_REGS_FIRST_128], \
+ mng_access_reg_cap_mask.access_regs.reg)
+
+#define MLX5_CAP_MCAM_REG2(mdev, reg) \
+ MLX5_GET(mcam_reg, (mdev)->caps.mcam[MLX5_MCAM_REGS_0x9100_0x917F], \
+ mng_access_reg_cap_mask.access_regs2.reg)
+
+#define MLX5_CAP_MCAM_REG3(mdev, reg) \
+ MLX5_GET(mcam_reg, (mdev)->caps.mcam[MLX5_MCAM_REGS_0x9180_0x91FF], \
+ mng_access_reg_cap_mask.access_regs3.reg)
#define MLX5_CAP_MCAM_FEATURE(mdev, fld) \
MLX5_GET(mcam_reg, (mdev)->caps.mcam, mng_feature_cap_mask.enhanced_features.fld)
+#define MLX5_CAP_QCAM_REG(mdev, fld) \
+ MLX5_GET(qcam_reg, (mdev)->caps.qcam, qos_access_reg_cap_mask.reg_cap.fld)
+
+#define MLX5_CAP_QCAM_FEATURE(mdev, fld) \
+ MLX5_GET(qcam_reg, (mdev)->caps.qcam, qos_feature_cap_mask.feature_cap.fld)
+
#define MLX5_CAP_FPGA(mdev, cap) \
- MLX5_GET(fpga_cap, (mdev)->caps.hca_cur[MLX5_CAP_FPGA], cap)
+ MLX5_GET(fpga_cap, (mdev)->caps.fpga, cap)
#define MLX5_CAP64_FPGA(mdev, cap) \
- MLX5_GET64(fpga_cap, (mdev)->caps.hca_cur[MLX5_CAP_FPGA], cap)
+ MLX5_GET64(fpga_cap, (mdev)->caps.fpga, cap)
+
+#define MLX5_CAP_DEV_MEM(mdev, cap)\
+ MLX5_GET(device_mem_cap, mdev->caps.hca[MLX5_CAP_DEV_MEM]->cur, cap)
+
+#define MLX5_CAP64_DEV_MEM(mdev, cap)\
+ MLX5_GET64(device_mem_cap, mdev->caps.hca[MLX5_CAP_DEV_MEM]->cur, cap)
+
+#define MLX5_CAP_TLS(mdev, cap) \
+ MLX5_GET(tls_cap, (mdev)->caps.hca[MLX5_CAP_TLS]->cur, cap)
+
+#define MLX5_CAP_DEV_EVENT(mdev, cap)\
+ MLX5_ADDR_OF(device_event_cap, (mdev)->caps.hca[MLX5_CAP_DEV_EVENT]->cur, cap)
+
+#define MLX5_CAP_DEV_VDPA_EMULATION(mdev, cap)\
+ MLX5_GET(virtio_emulation_cap, \
+ (mdev)->caps.hca[MLX5_CAP_VDPA_EMULATION]->cur, cap)
+
+#define MLX5_CAP64_DEV_VDPA_EMULATION(mdev, cap)\
+ MLX5_GET64(virtio_emulation_cap, \
+ (mdev)->caps.hca[MLX5_CAP_VDPA_EMULATION]->cur, cap)
+
+#define MLX5_CAP_IPSEC(mdev, cap)\
+ MLX5_GET(ipsec_cap, (mdev)->caps.hca[MLX5_CAP_IPSEC]->cur, cap)
+
+#define MLX5_CAP_CRYPTO(mdev, cap)\
+ MLX5_GET(crypto_cap, (mdev)->caps.hca[MLX5_CAP_CRYPTO]->cur, cap)
+
+#define MLX5_CAP_MACSEC(mdev, cap)\
+ MLX5_GET(macsec_cap, (mdev)->caps.hca[MLX5_CAP_MACSEC]->cur, cap)
+
+#define MLX5_CAP_SHAMPO(mdev, cap) \
+ MLX5_GET(shampo_cap, mdev->caps.hca[MLX5_CAP_SHAMPO]->cur, cap)
+
+#define MLX5_CAP_PSP(mdev, cap)\
+ MLX5_GET(psp_cap, (mdev)->caps.hca[MLX5_CAP_PSP]->cur, cap)
enum {
MLX5_CMD_STAT_OK = 0x0,
@@ -1123,6 +1499,7 @@ enum {
MLX5_CMD_STAT_BAD_SYS_STATE_ERR = 0x4,
MLX5_CMD_STAT_BAD_RES_ERR = 0x5,
MLX5_CMD_STAT_RES_BUSY = 0x6,
+ MLX5_CMD_STAT_NOT_READY = 0x7,
MLX5_CMD_STAT_LIM_ERR = 0x8,
MLX5_CMD_STAT_BAD_RES_STATE_ERR = 0x9,
MLX5_CMD_STAT_IX_ERR = 0xa,
@@ -1143,8 +1520,12 @@ enum {
MLX5_PER_PRIORITY_COUNTERS_GROUP = 0x10,
MLX5_PER_TRAFFIC_CLASS_COUNTERS_GROUP = 0x11,
MLX5_PHYSICAL_LAYER_COUNTERS_GROUP = 0x12,
+ MLX5_PER_TRAFFIC_CLASS_CONGESTION_GROUP = 0x13,
MLX5_PHYSICAL_LAYER_STATISTICAL_GROUP = 0x16,
+ MLX5_PHYSICAL_LAYER_RECOVERY_GROUP = 0x1a,
MLX5_INFINIBAND_PORT_COUNTERS_GROUP = 0x20,
+ MLX5_INFINIBAND_EXTENDED_PORT_COUNTERS_GROUP = 0x21,
+ MLX5_RS_FEC_HISTOGRAM_GROUP = 0x23,
};
enum {
@@ -1158,8 +1539,10 @@ static inline u16 mlx5_to_sw_pkey_sz(int pkey_sz)
return MLX5_MIN_PKEY_TABLE_SIZE << pkey_sz;
}
-#define MLX5_BY_PASS_NUM_REGULAR_PRIOS 8
-#define MLX5_BY_PASS_NUM_DONT_TRAP_PRIOS 8
+#define MLX5_RDMA_RX_NUM_COUNTERS_PRIOS 6
+#define MLX5_RDMA_TX_NUM_COUNTERS_PRIOS 4
+#define MLX5_BY_PASS_NUM_REGULAR_PRIOS 16
+#define MLX5_BY_PASS_NUM_DONT_TRAP_PRIOS 16
#define MLX5_BY_PASS_NUM_MULTICAST_PRIOS 1
#define MLX5_BY_PASS_NUM_PRIOS (MLX5_BY_PASS_NUM_REGULAR_PRIOS +\
MLX5_BY_PASS_NUM_DONT_TRAP_PRIOS +\
diff --git a/include/linux/mlx5/doorbell.h b/include/linux/mlx5/doorbell.h
index 0787de28f2fc..5c267707e1df 100644
--- a/include/linux/mlx5/doorbell.h
+++ b/include/linux/mlx5/doorbell.h
@@ -36,46 +36,25 @@
#define MLX5_BF_OFFSET 0x800
#define MLX5_CQ_DOORBELL 0x20
-#if BITS_PER_LONG == 64
/* Assume that we can just write a 64-bit doorbell atomically. s390
* actually doesn't have writeq() but S/390 systems don't even have
* PCI so we won't worry about it.
+ *
+ * Note that the write is not atomic on 32-bit systems! In contrast to 64-bit
+ * ones, it requires proper locking. mlx5_write64 doesn't do any locking, so use
+ * it at your own discretion, protected by some kind of lock on 32 bits.
+ *
+ * TODO: use write{q,l}_relaxed()
*/
-#define MLX5_DECLARE_DOORBELL_LOCK(name)
-#define MLX5_INIT_DOORBELL_LOCK(ptr) do { } while (0)
-#define MLX5_GET_DOORBELL_LOCK(ptr) (NULL)
-
-static inline void mlx5_write64(__be32 val[2], void __iomem *dest,
- spinlock_t *doorbell_lock)
+static inline void mlx5_write64(__be32 val[2], void __iomem *dest)
{
+#if BITS_PER_LONG == 64
__raw_writeq(*(u64 *)val, dest);
-}
-
#else
-
-/* Just fall back to a spinlock to protect the doorbell if
- * BITS_PER_LONG is 32 -- there's no portable way to do atomic 64-bit
- * MMIO writes.
- */
-
-#define MLX5_DECLARE_DOORBELL_LOCK(name) spinlock_t name;
-#define MLX5_INIT_DOORBELL_LOCK(ptr) spin_lock_init(ptr)
-#define MLX5_GET_DOORBELL_LOCK(ptr) (ptr)
-
-static inline void mlx5_write64(__be32 val[2], void __iomem *dest,
- spinlock_t *doorbell_lock)
-{
- unsigned long flags;
-
- if (doorbell_lock)
- spin_lock_irqsave(doorbell_lock, flags);
__raw_writel((__force u32) val[0], dest);
__raw_writel((__force u32) val[1], dest + 4);
- if (doorbell_lock)
- spin_unlock_irqrestore(doorbell_lock, flags);
-}
-
#endif
+}
#endif /* MLX5_DOORBELL_H */
diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
index b3fc9d586a9f..1c54aa6f74fb 100644
--- a/include/linux/mlx5/driver.h
+++ b/include/linux/mlx5/driver.h
@@ -36,30 +36,36 @@
#include <linux/kernel.h>
#include <linux/completion.h>
#include <linux/pci.h>
+#include <linux/pci-tph.h>
+#include <linux/irq.h>
#include <linux/spinlock_types.h>
#include <linux/semaphore.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
-#include <linux/radix-tree.h>
+#include <linux/xarray.h>
#include <linux/workqueue.h>
#include <linux/mempool.h>
#include <linux/interrupt.h>
-#include <linux/idr.h>
+#include <linux/notifier.h>
+#include <linux/refcount.h>
+#include <linux/auxiliary_bus.h>
+#include <linux/mutex.h>
#include <linux/mlx5/device.h>
#include <linux/mlx5/doorbell.h>
-#include <linux/mlx5/srq.h>
+#include <linux/mlx5/eq.h>
+#include <linux/timecounter.h>
+#include <net/devlink.h>
+
+#define MLX5_ADEV_NAME "mlx5_core"
+
+#define MLX5_IRQ_EQ_CTRL (U8_MAX)
enum {
MLX5_BOARD_ID_LEN = 64,
- MLX5_MAX_NAME_LEN = 16,
};
enum {
- /* one minute for the sake of bringup. Generally, commands must always
- * complete and we may need to increase this timeout value
- */
- MLX5_CMD_TIMEOUT_MSEC = 60 * 1000,
MLX5_CMD_WQ_MAX_NAME = 32,
};
@@ -78,65 +84,95 @@ enum mlx5_sqp_t {
};
enum {
- MLX5_MAX_PORTS = 2,
-};
-
-enum {
- MLX5_EQ_VEC_PAGES = 0,
- MLX5_EQ_VEC_CMD = 1,
- MLX5_EQ_VEC_ASYNC = 2,
- MLX5_EQ_VEC_PFAULT = 3,
- MLX5_EQ_VEC_COMP_BASE,
-};
-
-enum {
- MLX5_MAX_IRQ_NAME = 32
+ MLX5_MAX_PORTS = 8,
};
enum {
- MLX5_ATOMIC_MODE_IB_COMP = 1 << 16,
- MLX5_ATOMIC_MODE_CX = 2 << 16,
- MLX5_ATOMIC_MODE_8B = 3 << 16,
- MLX5_ATOMIC_MODE_16B = 4 << 16,
- MLX5_ATOMIC_MODE_32B = 5 << 16,
- MLX5_ATOMIC_MODE_64B = 6 << 16,
- MLX5_ATOMIC_MODE_128B = 7 << 16,
- MLX5_ATOMIC_MODE_256B = 8 << 16,
+ MLX5_ATOMIC_MODE_OFFSET = 16,
+ MLX5_ATOMIC_MODE_IB_COMP = 1,
+ MLX5_ATOMIC_MODE_CX = 2,
+ MLX5_ATOMIC_MODE_8B = 3,
+ MLX5_ATOMIC_MODE_16B = 4,
+ MLX5_ATOMIC_MODE_32B = 5,
+ MLX5_ATOMIC_MODE_64B = 6,
+ MLX5_ATOMIC_MODE_128B = 7,
+ MLX5_ATOMIC_MODE_256B = 8,
};
enum {
+ MLX5_REG_SBPR = 0xb001,
+ MLX5_REG_SBCM = 0xb002,
+ MLX5_REG_QPTS = 0x4002,
MLX5_REG_QETCR = 0x4005,
MLX5_REG_QTCT = 0x400a,
+ MLX5_REG_QPDPM = 0x4013,
+ MLX5_REG_QCAM = 0x4019,
MLX5_REG_DCBX_PARAM = 0x4020,
MLX5_REG_DCBX_APP = 0x4021,
MLX5_REG_FPGA_CAP = 0x4022,
MLX5_REG_FPGA_CTRL = 0x4023,
MLX5_REG_FPGA_ACCESS_REG = 0x4024,
+ MLX5_REG_CORE_DUMP = 0x402e,
MLX5_REG_PCAP = 0x5001,
MLX5_REG_PMTU = 0x5003,
MLX5_REG_PTYS = 0x5004,
MLX5_REG_PAOS = 0x5006,
MLX5_REG_PFCC = 0x5007,
MLX5_REG_PPCNT = 0x5008,
+ MLX5_REG_PPTB = 0x500b,
+ MLX5_REG_PBMC = 0x500c,
MLX5_REG_PMAOS = 0x5012,
MLX5_REG_PUDE = 0x5009,
MLX5_REG_PMPE = 0x5010,
MLX5_REG_PELC = 0x500e,
MLX5_REG_PVLC = 0x500f,
MLX5_REG_PCMR = 0x5041,
+ MLX5_REG_PDDR = 0x5031,
MLX5_REG_PMLP = 0x5002,
+ MLX5_REG_PPLM = 0x5023,
+ MLX5_REG_PPHCR = 0x503E,
MLX5_REG_PCAM = 0x507f,
MLX5_REG_NODE_DESC = 0x6001,
MLX5_REG_HOST_ENDIANNESS = 0x7004,
+ MLX5_REG_MTCAP = 0x9009,
+ MLX5_REG_MTMP = 0x900A,
MLX5_REG_MCIA = 0x9014,
+ MLX5_REG_MNVDA = 0x9024,
+ MLX5_REG_MFRL = 0x9028,
MLX5_REG_MLCR = 0x902b,
+ MLX5_REG_MRTC = 0x902d,
+ MLX5_REG_MTRC_CAP = 0x9040,
+ MLX5_REG_MTRC_CONF = 0x9041,
+ MLX5_REG_MTRC_STDB = 0x9042,
+ MLX5_REG_MTRC_CTRL = 0x9043,
+ MLX5_REG_MPEIN = 0x9050,
MLX5_REG_MPCNT = 0x9051,
MLX5_REG_MTPPS = 0x9053,
MLX5_REG_MTPPSE = 0x9054,
+ MLX5_REG_MTUTC = 0x9055,
+ MLX5_REG_MPEGC = 0x9056,
+ MLX5_REG_MPIR = 0x9059,
+ MLX5_REG_MCQS = 0x9060,
MLX5_REG_MCQI = 0x9061,
MLX5_REG_MCC = 0x9062,
MLX5_REG_MCDA = 0x9063,
MLX5_REG_MCAM = 0x907f,
+ MLX5_REG_MSECQ = 0x9155,
+ MLX5_REG_MSEES = 0x9156,
+ MLX5_REG_MIRC = 0x9162,
+ MLX5_REG_MTPTM = 0x9180,
+ MLX5_REG_MTCTR = 0x9181,
+ MLX5_REG_MRTCQ = 0x9182,
+ MLX5_REG_SBCAM = 0xB01F,
+ MLX5_REG_RESOURCE_DUMP = 0xC000,
+ MLX5_REG_NIC_CAP = 0xC00D,
+ MLX5_REG_DTOR = 0xC00E,
+ MLX5_REG_VHCA_ICM_CTRL = 0xC010,
+};
+
+enum mlx5_qpts_trust_state {
+ MLX5_QPTS_TRUST_PCP = 1,
+ MLX5_QPTS_TRUST_DSCP = 2,
};
enum mlx5_dcbx_oper_mode {
@@ -147,6 +183,8 @@ enum mlx5_dcbx_oper_mode {
enum {
MLX5_ATOMIC_OPS_CMP_SWAP = 1 << 0,
MLX5_ATOMIC_OPS_FETCH_ADD = 1 << 1,
+ MLX5_ATOMIC_OPS_EXTENDED_CMP_SWAP = 1 << 2,
+ MLX5_ATOMIC_OPS_EXTENDED_FETCH_ADD = 1 << 3,
};
enum mlx5_page_fault_resume_flags {
@@ -169,8 +207,13 @@ enum port_state_policy {
MLX5_POLICY_INVALID = 0xffffffff
};
+enum mlx5_coredev_type {
+ MLX5_COREDEV_PF,
+ MLX5_COREDEV_VF,
+ MLX5_COREDEV_SF,
+};
+
struct mlx5_field_desc {
- struct dentry *dent;
int i;
};
@@ -179,20 +222,13 @@ struct mlx5_rsc_debug {
void *object;
enum dbg_rsc_type type;
struct dentry *root;
- struct mlx5_field_desc fields[0];
+ struct mlx5_field_desc fields[];
};
enum mlx5_dev_event {
- MLX5_DEV_EVENT_SYS_ERROR,
- MLX5_DEV_EVENT_PORT_UP,
- MLX5_DEV_EVENT_PORT_DOWN,
- MLX5_DEV_EVENT_PORT_INITIALIZED,
- MLX5_DEV_EVENT_LID_CHANGE,
- MLX5_DEV_EVENT_PKEY_CHANGE,
- MLX5_DEV_EVENT_GUID_CHANGE,
- MLX5_DEV_EVENT_CLIENT_REREG,
- MLX5_DEV_EVENT_PPS,
- MLX5_DEV_EVENT_DELAY_DROP_TIMEOUT,
+ MLX5_DEV_EVENT_SYS_ERROR = 128, /* 0 - 127 are FW events */
+ MLX5_DEV_EVENT_PORT_AFFINITY = 129,
+ MLX5_DEV_EVENT_MULTIPORT_ESW = 130,
};
enum mlx5_port_status {
@@ -200,26 +236,10 @@ enum mlx5_port_status {
MLX5_PORT_DOWN = 2,
};
-enum mlx5_eq_type {
- MLX5_EQ_TYPE_COMP,
- MLX5_EQ_TYPE_ASYNC,
-#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
- MLX5_EQ_TYPE_PF,
-#endif
-};
-
-struct mlx5_bfreg_info {
- u32 *sys_pages;
- int num_low_latency_bfregs;
- unsigned int *count;
-
- /*
- * protect bfreg allocation data structs
- */
- struct mutex lock;
- u32 ver;
- bool lib_uar_4k;
- u32 num_sys_pages;
+enum mlx5_cmdif_state {
+ MLX5_CMDIF_STATE_UNINITIALIZED,
+ MLX5_CMDIF_STATE_UP,
+ MLX5_CMDIF_STATE_DOWN,
};
struct mlx5_cmd_first {
@@ -236,11 +256,6 @@ struct mlx5_cmd_msg {
struct mlx5_cmd_debug {
struct dentry *dbg_root;
- struct dentry *dbg_in;
- struct dentry *dbg_out;
- struct dentry *dbg_outlen;
- struct dentry *dbg_status;
- struct dentry *dbg_run;
void *in_msg;
void *out_msg;
u8 status;
@@ -264,25 +279,43 @@ enum {
struct mlx5_cmd_stats {
u64 sum;
u64 n;
+ /* number of times command failed */
+ u64 failed;
+ /* number of times command failed on bad status returned by FW */
+ u64 failed_mbox_status;
+ /* last command failed returned errno */
+ u32 last_failed_errno;
+ /* last bad status returned by FW */
+ u8 last_failed_mbox_status;
+ /* last command failed syndrome returned by FW */
+ u32 last_failed_syndrome;
struct dentry *root;
- struct dentry *avg;
- struct dentry *count;
/* protect command average calculations */
spinlock_t lock;
};
struct mlx5_cmd {
+ struct mlx5_nb nb;
+
+ /* members which needs to be queried or reinitialized each reload */
+ struct {
+ u16 cmdif_rev;
+ u8 log_sz;
+ u8 log_stride;
+ int max_reg_cmds;
+ unsigned long bitmask;
+ struct semaphore sem;
+ struct semaphore pages_sem;
+ struct semaphore throttle_sem;
+ struct semaphore unprivileged_sem;
+ struct xarray privileged_uids;
+ } vars;
+ enum mlx5_cmdif_state state;
void *cmd_alloc_buf;
dma_addr_t alloc_dma;
int alloc_size;
void *cmd_buf;
dma_addr_t dma;
- u16 cmdif_rev;
- u8 log_sz;
- u8 log_stride;
- int max_reg_cmds;
- int events;
- u32 __iomem *vector;
/* protect command queue allocations
*/
@@ -292,25 +325,16 @@ struct mlx5_cmd {
*/
spinlock_t token_lock;
u8 token;
- unsigned long bitmask;
char wq_name[MLX5_CMD_WQ_MAX_NAME];
struct workqueue_struct *wq;
- struct semaphore sem;
- struct semaphore pages_sem;
int mode;
+ u16 allowed_opcode;
struct mlx5_cmd_work_ent *ent_arr[MLX5_MAX_COMMANDS];
struct dma_pool *pool;
struct mlx5_cmd_debug dbg;
struct cmd_msg_cache cache[MLX5_NUM_COMMAND_CACHES];
int checksum_disabled;
- struct mlx5_cmd_stats stats[MLX5_CMD_OP_MAX];
-};
-
-struct mlx5_port_caps {
- int gid_table_len;
- int pkey_table_len;
- u8 ext_port_cap;
- bool has_smi;
+ struct xarray stats;
};
struct mlx5_cmd_mailbox {
@@ -324,13 +348,6 @@ struct mlx5_buf_list {
dma_addr_t map;
};
-struct mlx5_buf {
- struct mlx5_buf_list direct;
- int npages;
- int size;
- u8 page_shift;
-};
-
struct mlx5_frag_buf {
struct mlx5_buf_list *frags;
int npages;
@@ -338,42 +355,14 @@ struct mlx5_frag_buf {
u8 page_shift;
};
-struct mlx5_eq_tasklet {
- struct list_head list;
- struct list_head process_list;
- struct tasklet_struct task;
- /* lock on completion tasklet list */
- spinlock_t lock;
-};
-
-struct mlx5_eq_pagefault {
- struct work_struct work;
- /* Pagefaults lock */
- spinlock_t lock;
- struct workqueue_struct *wq;
- mempool_t *pool;
-};
-
-struct mlx5_eq {
- struct mlx5_core_dev *dev;
- __be32 __iomem *doorbell;
- u32 cons_index;
- struct mlx5_buf buf;
- int size;
- unsigned int irqn;
- u8 eqn;
- int nent;
- u64 mask;
- struct list_head list;
- int index;
- struct mlx5_rsc_debug *dbg;
- enum mlx5_eq_type type;
- union {
- struct mlx5_eq_tasklet tasklet_ctx;
-#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
- struct mlx5_eq_pagefault pf_ctx;
-#endif
- };
+struct mlx5_frag_buf_ctrl {
+ struct mlx5_buf_list *frags;
+ u32 sz_m1;
+ u16 frag_sz_m1;
+ u16 strides_offset;
+ u8 log_sz;
+ u8 log_stride;
+ u8 log_frag_strides;
};
struct mlx5_core_psv {
@@ -397,19 +386,6 @@ struct mlx5_core_sig_ctx {
u32 sigerr_count;
};
-enum {
- MLX5_MKEY_MR = 1,
- MLX5_MKEY_MW,
-};
-
-struct mlx5_core_mkey {
- u64 iova;
- u64 size;
- u32 key;
- u32 pd;
- u32 type;
-};
-
#define MLX5_24BIT_MASK ((1 << 24) - 1)
enum mlx5_res_type {
@@ -423,37 +399,9 @@ enum mlx5_res_type {
struct mlx5_core_rsc_common {
enum mlx5_res_type res;
- atomic_t refcount;
- struct completion free;
-};
-
-struct mlx5_core_srq {
- struct mlx5_core_rsc_common common; /* must be first */
- u32 srqn;
- int max;
- int max_gs;
- int max_avail_gather;
- int wqe_shift;
- void (*event) (struct mlx5_core_srq *, enum mlx5_event);
-
- atomic_t refcount;
+ refcount_t refcount;
struct completion free;
-};
-
-struct mlx5_eq_table {
- void __iomem *update_ci;
- void __iomem *update_arm_ci;
- struct list_head comp_eqs_list;
- struct mlx5_eq pages_eq;
- struct mlx5_eq async_eq;
- struct mlx5_eq cmd_eq;
-#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
- struct mlx5_eq pfault_eq;
-#endif
- int num_comp_vectors;
- /* protect EQs list
- */
- spinlock_t lock;
+ bool invalid;
};
struct mlx5_uars_page {
@@ -486,7 +434,6 @@ struct mlx5_sq_bfreg {
struct mlx5_uars_page *up;
bool wc;
u32 index;
- unsigned int offset;
};
struct mlx5_core_health {
@@ -495,80 +442,69 @@ struct mlx5_core_health {
struct timer_list timer;
u32 prev;
int miss_counter;
- bool sick;
- /* wq spinlock to synchronize draining */
- spinlock_t wq_lock;
+ u8 synd;
+ u32 fatal_error;
+ u32 crdump_size;
struct workqueue_struct *wq;
unsigned long flags;
- struct work_struct work;
- struct delayed_work recover_work;
-};
-
-struct mlx5_cq_table {
- /* protect radix tree
- */
- spinlock_t lock;
- struct radix_tree_root tree;
-};
-
-struct mlx5_qp_table {
- /* protect radix tree
- */
- spinlock_t lock;
- struct radix_tree_root tree;
-};
-
-struct mlx5_srq_table {
- /* protect radix tree
- */
- spinlock_t lock;
- struct radix_tree_root tree;
+ struct work_struct fatal_report_work;
+ struct work_struct report_work;
+ struct devlink_health_reporter *fw_reporter;
+ struct devlink_health_reporter *fw_fatal_reporter;
+ struct devlink_health_reporter *vnic_reporter;
+ struct delayed_work update_fw_log_ts_work;
};
-struct mlx5_mkey_table {
- /* protect radix tree
- */
- rwlock_t lock;
- struct radix_tree_root tree;
+enum {
+ MLX5_PF_NOTIFY_DISABLE_VF,
+ MLX5_PF_NOTIFY_ENABLE_VF,
};
struct mlx5_vf_context {
int enabled;
u64 port_guid;
u64 node_guid;
+ /* Valid bits are used to validate administrative guid only.
+ * Enabled after ndo_set_vf_guid
+ */
+ u8 port_guid_valid:1;
+ u8 node_guid_valid:1;
enum port_state_policy policy;
+ struct blocking_notifier_head notifier;
};
struct mlx5_core_sriov {
struct mlx5_vf_context *vfs_ctx;
int num_vfs;
- int enabled_vfs;
-};
-
-struct mlx5_irq_info {
- char name[MLX5_MAX_IRQ_NAME];
-};
-
-struct mlx5_fc_stats {
- struct rb_root counters;
- struct list_head addlist;
- /* protect addlist add/splice operations */
- spinlock_t addlist_lock;
-
- struct workqueue_struct *wq;
- struct delayed_work work;
- unsigned long next_query;
- unsigned long sampling_interval; /* jiffies */
+ u16 max_vfs;
+ u16 max_ec_vfs;
};
+struct mlx5_events;
+struct mlx5_mpfs;
struct mlx5_eswitch;
struct mlx5_lag;
-struct mlx5_pagefault;
+struct mlx5_devcom_dev;
+struct mlx5_fw_reset;
+struct mlx5_eq_table;
+struct mlx5_irq_table;
+struct mlx5_sf_dev_table;
+struct mlx5_sf_hw_table;
+struct mlx5_sf_table;
+struct mlx5_crypto_dek_priv;
+
+struct mlx5_rate_limit {
+ u32 rate;
+ u32 max_burst_sz;
+ u16 typical_pkt_sz;
+};
struct mlx5_rl_entry {
- u32 rate;
- u16 index;
- u16 refcount;
+ u8 rl_raw[MLX5_ST_SZ_BYTES(set_pp_rate_limit_context)];
+ u64 refcount;
+ u16 index;
+ u16 uid;
+ u8 dedicated : 1;
};
struct mlx5_rl_table {
@@ -578,112 +514,131 @@ struct mlx5_rl_table {
u32 max_rate;
u32 min_rate;
struct mlx5_rl_entry *rl_entry;
+ u64 refcount;
};
-enum port_module_event_status_type {
- MLX5_MODULE_STATUS_PLUGGED = 0x1,
- MLX5_MODULE_STATUS_UNPLUGGED = 0x2,
- MLX5_MODULE_STATUS_ERROR = 0x3,
- MLX5_MODULE_STATUS_NUM = 0x3,
+struct mlx5_core_roce {
+ struct mlx5_flow_table *ft;
+ struct mlx5_flow_group *fg;
+ struct mlx5_flow_handle *allow_rule;
};
-enum port_module_event_error_type {
- MLX5_MODULE_EVENT_ERROR_POWER_BUDGET_EXCEEDED,
- MLX5_MODULE_EVENT_ERROR_LONG_RANGE_FOR_NON_MLNX_CABLE_MODULE,
- MLX5_MODULE_EVENT_ERROR_BUS_STUCK,
- MLX5_MODULE_EVENT_ERROR_NO_EEPROM_RETRY_TIMEOUT,
- MLX5_MODULE_EVENT_ERROR_ENFORCE_PART_NUMBER_LIST,
- MLX5_MODULE_EVENT_ERROR_UNKNOWN_IDENTIFIER,
- MLX5_MODULE_EVENT_ERROR_HIGH_TEMPERATURE,
- MLX5_MODULE_EVENT_ERROR_BAD_CABLE,
- MLX5_MODULE_EVENT_ERROR_UNKNOWN,
- MLX5_MODULE_EVENT_ERROR_NUM,
+enum {
+ MLX5_PRIV_FLAGS_DISABLE_IB_ADEV = 1 << 0,
+ MLX5_PRIV_FLAGS_DISABLE_ALL_ADEV = 1 << 1,
+ /* Set during device detach to block any further devices
+ * creation/deletion on drivers rescan. Unset during device attach.
+ */
+ MLX5_PRIV_FLAGS_DETACH = 1 << 2,
+ MLX5_PRIV_FLAGS_SWITCH_LEGACY = 1 << 3,
+};
+
+struct mlx5_adev {
+ struct auxiliary_device adev;
+ struct mlx5_core_dev *mdev;
+ int idx;
};
-struct mlx5_port_module_event_stats {
- u64 status_counters[MLX5_MODULE_STATUS_NUM];
- u64 error_counters[MLX5_MODULE_EVENT_ERROR_NUM];
+struct mlx5_debugfs_entries {
+ struct dentry *dbg_root;
+ struct dentry *qp_debugfs;
+ struct dentry *eq_debugfs;
+ struct dentry *cq_debugfs;
+ struct dentry *cmdif_debugfs;
+ struct dentry *pages_debugfs;
+ struct dentry *lag_debugfs;
};
+enum mlx5_func_type {
+ MLX5_PF,
+ MLX5_VF,
+ MLX5_SF,
+ MLX5_HOST_PF,
+ MLX5_EC_VF,
+ MLX5_FUNC_TYPE_NUM,
+};
+
+struct mlx5_ft_pool;
struct mlx5_priv {
- char name[MLX5_MAX_NAME_LEN];
- struct mlx5_eq_table eq_table;
- struct mlx5_irq_info *irq_info;
+ /* IRQ table valid only for real pci devices PF or VF */
+ struct mlx5_irq_table *irq_table;
+ struct mlx5_eq_table *eq_table;
/* pages stuff */
+ struct mlx5_nb pg_nb;
struct workqueue_struct *pg_wq;
- struct rb_root page_root;
- int fw_pages;
+ struct xarray page_root_xa;
atomic_t reg_pages;
struct list_head free_list;
- int vfs_pages;
+ u32 fw_pages;
+ u32 page_counters[MLX5_FUNC_TYPE_NUM];
+ u32 fw_pages_alloc_failed;
+ u32 give_pages_dropped;
+ u32 reclaim_pages_discard;
struct mlx5_core_health health;
+ struct list_head traps;
- struct mlx5_srq_table srq_table;
-
- /* start: qp staff */
- struct mlx5_qp_table qp_table;
- struct dentry *qp_debugfs;
- struct dentry *eq_debugfs;
- struct dentry *cq_debugfs;
- struct dentry *cmdif_debugfs;
- /* end: qp staff */
-
- /* start: cq staff */
- struct mlx5_cq_table cq_table;
- /* end: cq staff */
-
- /* start: mkey staff */
- struct mlx5_mkey_table mkey_table;
- /* end: mkey staff */
+ struct mlx5_debugfs_entries dbg;
/* start: alloc staff */
- /* protect buffer alocation according to numa node */
+ /* protect buffer allocation according to numa node */
struct mutex alloc_mutex;
int numa_node;
struct mutex pgdir_mutex;
struct list_head pgdir_list;
/* end: alloc staff */
- struct dentry *dbg_root;
- /* protect mkey key part */
- spinlock_t mkey_lock;
- u8 mkey_key;
-
- struct list_head dev_list;
- struct list_head ctx_list;
- spinlock_t ctx_lock;
+ struct mlx5_adev **adev;
+ int adev_idx;
+ int sw_vhca_id;
+ struct mlx5_events *events;
+ struct mlx5_vhca_events *vhca_events;
struct mlx5_flow_steering *steering;
+ struct mlx5_mpfs *mpfs;
+ struct blocking_notifier_head esw_n_head;
struct mlx5_eswitch *eswitch;
struct mlx5_core_sriov sriov;
struct mlx5_lag *lag;
- unsigned long pci_dev_data;
- struct mlx5_fc_stats fc_stats;
+ u32 flags;
+ struct mlx5_devcom_dev *devc;
+ struct mlx5_devcom_comp_dev *hca_devcom_comp;
+ struct mlx5_fw_reset *fw_reset;
+ struct mlx5_core_roce roce;
+ struct mlx5_fc_stats *fc_stats;
struct mlx5_rl_table rl_table;
+ struct mlx5_ft_pool *ft_pool;
- struct mlx5_port_module_event_stats pme_stats;
-
-#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
- void (*pfault)(struct mlx5_core_dev *dev,
- void *context,
- struct mlx5_pagefault *pfault);
- void *pfault_ctx;
- struct srcu_struct pfault_srcu;
-#endif
struct mlx5_bfreg_data bfregs;
- struct mlx5_uars_page *uar;
+ struct mlx5_sq_bfreg bfreg;
+#ifdef CONFIG_MLX5_SF
+ struct mlx5_nb vhca_state_nb;
+ struct blocking_notifier_head vhca_state_n_head;
+ struct notifier_block sf_dev_nb;
+ struct mlx5_sf_dev_table *sf_dev_table;
+ struct mlx5_core_dev *parent_mdev;
+#endif
+#ifdef CONFIG_MLX5_SF_MANAGER
+ struct notifier_block sf_hw_table_vhca_nb;
+ struct mlx5_sf_hw_table *sf_hw_table;
+ struct notifier_block sf_table_esw_nb;
+ struct notifier_block sf_table_vhca_nb;
+ struct notifier_block sf_table_mdev_nb;
+ struct mlx5_sf_table *sf_table;
+#endif
+ struct blocking_notifier_head lag_nh;
};
enum mlx5_device_state {
- MLX5_DEVICE_STATE_UP,
+ MLX5_DEVICE_STATE_UP = 1,
MLX5_DEVICE_STATE_INTERNAL_ERROR,
};
enum mlx5_interface_state {
MLX5_INTERFACE_STATE_UP = BIT(0),
+ MLX5_BREAK_FW_WAIT = BIT(1),
};
enum mlx5_pci_status {
@@ -697,54 +652,35 @@ enum mlx5_pagefault_type_flags {
MLX5_PFAULT_RDMA = 1 << 2,
};
-/* Contains the details of a pagefault. */
-struct mlx5_pagefault {
- u32 bytes_committed;
- u32 token;
- u8 event_subtype;
- u8 type;
- union {
- /* Initiator or send message responder pagefault details. */
- struct {
- /* Received packet size, only valid for responders. */
- u32 packet_size;
- /*
- * Number of resource holding WQE, depends on type.
- */
- u32 wq_num;
- /*
- * WQE index. Refers to either the send queue or
- * receive queue, according to event_subtype.
- */
- u16 wqe_index;
- } wqe;
- /* RDMA responder pagefault details */
- struct {
- u32 r_key;
- /*
- * Received packet size, minimal size page fault
- * resolution required for forward progress.
- */
- u32 packet_size;
- u32 rdma_op_len;
- u64 rdma_va;
- } rdma;
- };
-
- struct mlx5_eq *eq;
- struct work_struct work;
-};
-
struct mlx5_td {
+ /* protects tirs list changes while tirs refresh */
+ struct mutex list_lock;
struct list_head tirs_list;
u32 tdn;
};
struct mlx5e_resources {
- u32 pdn;
- struct mlx5_td td;
- struct mlx5_core_mkey mkey;
- struct mlx5_sq_bfreg bfreg;
+ struct mlx5e_hw_objs {
+ u32 pdn;
+ struct mlx5_td td;
+ u32 mkey;
+ struct mlx5_sq_bfreg *bfregs;
+ unsigned int num_bfregs;
+#define MLX5_MAX_NUM_TC 8
+ u32 tisn[MLX5_MAX_PORTS][MLX5_MAX_NUM_TC];
+ bool tisn_valid;
+ } hw_objs;
+ struct net_device *uplink_netdev;
+ netdevice_tracker tracker;
+ struct mutex uplink_netdev_lock;
+ struct mlx5_crypto_dek_priv *dek_priv;
+};
+
+enum mlx5_sw_icm_type {
+ MLX5_SW_ICM_TYPE_STEERING,
+ MLX5_SW_ICM_TYPE_HEADER_MODIFY,
+ MLX5_SW_ICM_TYPE_HEADER_MODIFY_PATTERN,
+ MLX5_SW_ICM_TYPE_SW_ENCAP,
};
#define MLX5_MAX_RESERVED_GIDS 8
@@ -755,7 +691,53 @@ struct mlx5_rsvd_gids {
struct ida ida;
};
+struct mlx5_clock;
+struct mlx5_clock_dev_state;
+struct mlx5_dm;
+struct mlx5_fw_tracer;
+struct mlx5_vxlan;
+struct mlx5_geneve;
+struct mlx5_hv_vhca;
+struct mlx5_st;
+
+#define MLX5_LOG_SW_ICM_BLOCK_SIZE(dev) (MLX5_CAP_DEV_MEM(dev, log_sw_icm_alloc_granularity))
+#define MLX5_SW_ICM_BLOCK_SIZE(dev) (1 << MLX5_LOG_SW_ICM_BLOCK_SIZE(dev))
+
+enum {
+ MLX5_PROF_MASK_QP_SIZE = (u64)1 << 0,
+ MLX5_PROF_MASK_MR_CACHE = (u64)1 << 1,
+};
+
+enum {
+ MKEY_CACHE_LAST_STD_ENTRY = 20,
+ MLX5_IMR_KSM_CACHE_ENTRY,
+ MAX_MKEY_CACHE_ENTRIES
+};
+
+struct mlx5_profile {
+ u64 mask;
+ u8 log_max_qp;
+ u8 num_cmd_caches;
+ struct {
+ int size;
+ int limit;
+ } mr_cache[MAX_MKEY_CACHE_ENTRIES];
+};
+
+struct mlx5_hca_cap {
+ u32 cur[MLX5_UN_SZ_DW(hca_cap_union)];
+ u32 max[MLX5_UN_SZ_DW(hca_cap_union)];
+};
+
+enum mlx5_wc_state {
+ MLX5_WC_STATE_UNINITIALIZED,
+ MLX5_WC_STATE_UNSUPPORTED,
+ MLX5_WC_STATE_SUPPORTED,
+};
+
struct mlx5_core_dev {
+ struct device *device;
+ enum mlx5_coredev_type coredev_type;
struct pci_dev *pdev;
/* sync pci state */
struct mutex pci_status_mutex;
@@ -763,37 +745,59 @@ struct mlx5_core_dev {
u8 rev_id;
char board_id[MLX5_BOARD_ID_LEN];
struct mlx5_cmd cmd;
- struct mlx5_port_caps port_caps[MLX5_MAX_PORTS];
struct {
- u32 hca_cur[MLX5_CAP_NUM][MLX5_UN_SZ_DW(hca_cap_union)];
- u32 hca_max[MLX5_CAP_NUM][MLX5_UN_SZ_DW(hca_cap_union)];
+ struct mlx5_hca_cap *hca[MLX5_CAP_NUM];
u32 pcam[MLX5_ST_SZ_DW(pcam_reg)];
- u32 mcam[MLX5_ST_SZ_DW(mcam_reg)];
+ u32 mcam[MLX5_MCAM_REGS_NUM][MLX5_ST_SZ_DW(mcam_reg)];
+ u32 fpga[MLX5_ST_SZ_DW(fpga_cap)];
+ u32 qcam[MLX5_ST_SZ_DW(qcam_reg)];
+ u8 embedded_cpu;
} caps;
+ struct mlx5_timeouts *timeouts;
+ u64 sys_image_guid;
phys_addr_t iseg_base;
struct mlx5_init_seg __iomem *iseg;
+ phys_addr_t bar_addr;
enum mlx5_device_state state;
/* sync interface state */
struct mutex intf_state_mutex;
+ struct lock_class_key lock_key;
unsigned long intf_state;
- void (*event) (struct mlx5_core_dev *dev,
- enum mlx5_dev_event event,
- unsigned long param);
struct mlx5_priv priv;
- struct mlx5_profile *profile;
- atomic_t num_qps;
+ struct mlx5_profile profile;
u32 issi;
struct mlx5e_resources mlx5e_res;
+ struct mlx5_dm *dm;
+ struct mlx5_st *st;
+ struct mlx5_vxlan *vxlan;
+ struct mlx5_geneve *geneve;
struct {
struct mlx5_rsvd_gids reserved_gids;
- atomic_t roce_en;
+ u32 roce_en;
} roce;
#ifdef CONFIG_MLX5_FPGA
struct mlx5_fpga_device *fpga;
#endif
-#ifdef CONFIG_RFS_ACCEL
- struct cpu_rmap *rmap;
+ struct mlx5_clock *clock;
+ struct mlx5_clock_dev_state *clock_state;
+ struct mlx5_ib_clock_info *clock_info;
+ struct mlx5_fw_tracer *tracer;
+ struct mlx5_rsc_dump *rsc_dump;
+ u32 vsc_addr;
+ struct mlx5_hv_vhca *hv_vhca;
+ struct mlx5_hwmon *hwmon;
+ u64 num_block_tc;
+ u64 num_block_ipsec;
+#ifdef CONFIG_MLX5_MACSEC
+ struct mlx5_macsec_fs *macsec_fs;
+ /* MACsec notifier chain to sync MACsec core and IB database */
+ struct blocking_notifier_head macsec_nh;
#endif
+ u64 num_ipsec_offloads;
+ struct mlx5_sd *sd;
+ enum mlx5_wc_state wc_state;
+ /* sync write combining state */
+ struct mutex wc_state_lock;
};
struct mlx5_db {
@@ -806,6 +810,8 @@ struct mlx5_db {
int index;
};
+#define MLX5_DEFAULT_NUM_DOORBELLS 8
+
enum {
MLX5_COMP_EQ_SIZE = 1024,
};
@@ -819,6 +825,7 @@ typedef void (*mlx5_cmd_cbk_t)(int status, void *context);
enum {
MLX5_CMD_ENT_STATE_PENDING_COMP,
+ MLX5_CMD_ENT_STATE_TIMEDOUT,
};
struct mlx5_cmd_work_ent {
@@ -831,6 +838,8 @@ struct mlx5_cmd_work_ent {
struct delayed_work cb_timeout_work;
void *context;
int idx;
+ struct completion handling;
+ struct completion slotted;
struct completion done;
struct mlx5_cmd *cmd;
struct work_struct work;
@@ -843,11 +852,8 @@ struct mlx5_cmd_work_ent {
u64 ts2;
u16 op;
bool polling;
-};
-
-struct mlx5_pas {
- u64 pa;
- u8 log_sz;
+ /* Track the max comp handlers */
+ refcount_t refcnt;
};
enum phy_port_state {
@@ -868,8 +874,8 @@ struct mlx5_hca_vport_context {
u64 node_guid;
u32 cap_mask1;
u32 cap_mask1_perm;
- u32 cap_mask2;
- u32 cap_mask2_perm;
+ u16 cap_mask2;
+ u16 cap_mask2_perm;
u16 lid;
u8 init_type_reply; /* bitmask: see ib spec 14.2.5.6 InitTypeReply */
u8 lmc;
@@ -879,24 +885,13 @@ struct mlx5_hca_vport_context {
u16 qkey_violation_counter;
u16 pkey_violation_counter;
bool grh_required;
+ u8 num_plane;
};
-static inline void *mlx5_buf_offset(struct mlx5_buf *buf, int offset)
-{
- return buf->direct.buf + offset;
-}
-
-extern struct workqueue_struct *mlx5_core_wq;
-
#define STRUCT_FIELD(header, field) \
.struct_offset_bytes = offsetof(struct ib_unpacked_ ## header, field), \
.struct_size_bytes = sizeof((struct ib_unpacked_ ## header *)0)->field
-static inline struct mlx5_core_dev *pci2mlx5_core_dev(struct pci_dev *pdev)
-{
- return pci_get_drvdata(pdev);
-}
-
extern struct dentry *mlx5_debugfs_root;
static inline u16 fw_rev_maj(struct mlx5_core_dev *dev)
@@ -914,162 +909,203 @@ static inline u16 fw_rev_sub(struct mlx5_core_dev *dev)
return ioread32be(&dev->iseg->cmdif_rev_fw_sub) & 0xffff;
}
-static inline u16 cmdif_rev(struct mlx5_core_dev *dev)
+static inline u32 mlx5_base_mkey(const u32 key)
{
- return ioread32be(&dev->iseg->cmdif_rev_fw_sub) >> 16;
+ return key & 0xffffff00u;
}
-static inline u32 mlx5_base_mkey(const u32 key)
+static inline u32 wq_get_byte_sz(u8 log_sz, u8 log_stride)
{
- return key & 0xffffff00u;
+ return ((u32)1 << log_sz) << log_stride;
}
-int mlx5_cmd_init(struct mlx5_core_dev *dev);
-void mlx5_cmd_cleanup(struct mlx5_core_dev *dev);
+static inline void mlx5_init_fbc_offset(struct mlx5_buf_list *frags,
+ u8 log_stride, u8 log_sz,
+ u16 strides_offset,
+ struct mlx5_frag_buf_ctrl *fbc)
+{
+ fbc->frags = frags;
+ fbc->log_stride = log_stride;
+ fbc->log_sz = log_sz;
+ fbc->sz_m1 = (1 << fbc->log_sz) - 1;
+ fbc->log_frag_strides = PAGE_SHIFT - fbc->log_stride;
+ fbc->frag_sz_m1 = (1 << fbc->log_frag_strides) - 1;
+ fbc->strides_offset = strides_offset;
+}
+
+static inline void mlx5_init_fbc(struct mlx5_buf_list *frags,
+ u8 log_stride, u8 log_sz,
+ struct mlx5_frag_buf_ctrl *fbc)
+{
+ mlx5_init_fbc_offset(frags, log_stride, log_sz, 0, fbc);
+}
+
+static inline void *mlx5_frag_buf_get_wqe(struct mlx5_frag_buf_ctrl *fbc,
+ u32 ix)
+{
+ unsigned int frag;
+
+ ix += fbc->strides_offset;
+ frag = ix >> fbc->log_frag_strides;
+
+ return fbc->frags[frag].buf + ((fbc->frag_sz_m1 & ix) << fbc->log_stride);
+}
+
+static inline u32
+mlx5_frag_buf_get_idx_last_contig_stride(struct mlx5_frag_buf_ctrl *fbc, u32 ix)
+{
+ u32 last_frag_stride_idx = (ix + fbc->strides_offset) | fbc->frag_sz_m1;
+
+ return min_t(u32, last_frag_stride_idx - fbc->strides_offset, fbc->sz_m1);
+}
+
+enum {
+ CMD_ALLOWED_OPCODE_ALL,
+};
+
void mlx5_cmd_use_events(struct mlx5_core_dev *dev);
void mlx5_cmd_use_polling(struct mlx5_core_dev *dev);
+void mlx5_cmd_allowed_opcode(struct mlx5_core_dev *dev, u16 opcode);
+
+struct mlx5_async_ctx {
+ struct mlx5_core_dev *dev;
+ atomic_t num_inflight;
+ struct completion inflight_done;
+};
+
+struct mlx5_async_work;
+typedef void (*mlx5_async_cbk_t)(int status, struct mlx5_async_work *context);
+
+struct mlx5_async_work {
+ struct mlx5_async_ctx *ctx;
+ mlx5_async_cbk_t user_callback;
+ u16 opcode; /* cmd opcode */
+ u16 op_mod; /* cmd op_mod */
+ u8 throttle_locked:1;
+ u8 unpriv_locked:1;
+ void *out; /* pointer to the cmd output buffer */
+};
+
+void mlx5_cmd_init_async_ctx(struct mlx5_core_dev *dev,
+ struct mlx5_async_ctx *ctx);
+void mlx5_cmd_cleanup_async_ctx(struct mlx5_async_ctx *ctx);
+int mlx5_cmd_exec_cb(struct mlx5_async_ctx *ctx, void *in, int in_size,
+ void *out, int out_size, mlx5_async_cbk_t callback,
+ struct mlx5_async_work *work);
+void mlx5_cmd_out_err(struct mlx5_core_dev *dev, u16 opcode, u16 op_mod, void *out);
+int mlx5_cmd_do(struct mlx5_core_dev *dev, void *in, int in_size, void *out, int out_size);
+int mlx5_cmd_check(struct mlx5_core_dev *dev, int err, void *in, void *out);
int mlx5_cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out,
int out_size);
-int mlx5_cmd_exec_cb(struct mlx5_core_dev *dev, void *in, int in_size,
- void *out, int out_size, mlx5_cmd_cbk_t callback,
- void *context);
+
+#define mlx5_cmd_exec_inout(dev, ifc_cmd, in, out) \
+ ({ \
+ mlx5_cmd_exec(dev, in, MLX5_ST_SZ_BYTES(ifc_cmd##_in), out, \
+ MLX5_ST_SZ_BYTES(ifc_cmd##_out)); \
+ })
+
+#define mlx5_cmd_exec_in(dev, ifc_cmd, in) \
+ ({ \
+ u32 _out[MLX5_ST_SZ_DW(ifc_cmd##_out)] = {}; \
+ mlx5_cmd_exec_inout(dev, ifc_cmd, in, _out); \
+ })
+
int mlx5_cmd_exec_polling(struct mlx5_core_dev *dev, void *in, int in_size,
void *out, int out_size);
-void mlx5_cmd_mbox_status(void *out, u8 *status, u32 *syndrome);
+bool mlx5_cmd_is_down(struct mlx5_core_dev *dev);
+int mlx5_cmd_add_privileged_uid(struct mlx5_core_dev *dev, u16 uid);
+void mlx5_cmd_remove_privileged_uid(struct mlx5_core_dev *dev, u16 uid);
+
+void mlx5_core_uplink_netdev_set(struct mlx5_core_dev *mdev, struct net_device *netdev);
+void mlx5_core_uplink_netdev_event_replay(struct mlx5_core_dev *mdev);
+
+void mlx5_core_mp_event_replay(struct mlx5_core_dev *dev, u32 event, void *data);
-int mlx5_core_get_caps(struct mlx5_core_dev *dev, enum mlx5_cap_type cap_type);
-int mlx5_cmd_alloc_uar(struct mlx5_core_dev *dev, u32 *uarn);
-int mlx5_cmd_free_uar(struct mlx5_core_dev *dev, u32 uarn);
void mlx5_health_cleanup(struct mlx5_core_dev *dev);
int mlx5_health_init(struct mlx5_core_dev *dev);
void mlx5_start_health_poll(struct mlx5_core_dev *dev);
-void mlx5_stop_health_poll(struct mlx5_core_dev *dev);
+void mlx5_stop_health_poll(struct mlx5_core_dev *dev, bool disable_health);
+void mlx5_start_health_fw_log_up(struct mlx5_core_dev *dev);
void mlx5_drain_health_wq(struct mlx5_core_dev *dev);
void mlx5_trigger_health_work(struct mlx5_core_dev *dev);
-void mlx5_drain_health_recovery(struct mlx5_core_dev *dev);
-int mlx5_buf_alloc_node(struct mlx5_core_dev *dev, int size,
- struct mlx5_buf *buf, int node);
-int mlx5_buf_alloc(struct mlx5_core_dev *dev, int size, struct mlx5_buf *buf);
-void mlx5_buf_free(struct mlx5_core_dev *dev, struct mlx5_buf *buf);
int mlx5_frag_buf_alloc_node(struct mlx5_core_dev *dev, int size,
struct mlx5_frag_buf *buf, int node);
void mlx5_frag_buf_free(struct mlx5_core_dev *dev, struct mlx5_frag_buf *buf);
-struct mlx5_cmd_mailbox *mlx5_alloc_cmd_mailbox_chain(struct mlx5_core_dev *dev,
- gfp_t flags, int npages);
-void mlx5_free_cmd_mailbox_chain(struct mlx5_core_dev *dev,
- struct mlx5_cmd_mailbox *head);
-int mlx5_core_create_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
- struct mlx5_srq_attr *in);
-int mlx5_core_destroy_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq);
-int mlx5_core_query_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
- struct mlx5_srq_attr *out);
-int mlx5_core_arm_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
- u16 lwm, int is_srq);
-void mlx5_init_mkey_table(struct mlx5_core_dev *dev);
-void mlx5_cleanup_mkey_table(struct mlx5_core_dev *dev);
-int mlx5_core_create_mkey_cb(struct mlx5_core_dev *dev,
- struct mlx5_core_mkey *mkey,
- u32 *in, int inlen,
- u32 *out, int outlen,
- mlx5_cmd_cbk_t callback, void *context);
-int mlx5_core_create_mkey(struct mlx5_core_dev *dev,
- struct mlx5_core_mkey *mkey,
- u32 *in, int inlen);
-int mlx5_core_destroy_mkey(struct mlx5_core_dev *dev,
- struct mlx5_core_mkey *mkey);
-int mlx5_core_query_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mkey *mkey,
- u32 *out, int outlen);
-int mlx5_core_dump_fill_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mkey *_mkey,
- u32 *mkey);
+int mlx5_core_create_mkey(struct mlx5_core_dev *dev, u32 *mkey, u32 *in,
+ int inlen);
+int mlx5_core_destroy_mkey(struct mlx5_core_dev *dev, u32 mkey);
+int mlx5_core_query_mkey(struct mlx5_core_dev *dev, u32 mkey, u32 *out,
+ int outlen);
int mlx5_core_alloc_pd(struct mlx5_core_dev *dev, u32 *pdn);
int mlx5_core_dealloc_pd(struct mlx5_core_dev *dev, u32 pdn);
-int mlx5_core_mad_ifc(struct mlx5_core_dev *dev, const void *inb, void *outb,
- u16 opmod, u8 port);
-void mlx5_pagealloc_init(struct mlx5_core_dev *dev);
+int mlx5_pagealloc_init(struct mlx5_core_dev *dev);
void mlx5_pagealloc_cleanup(struct mlx5_core_dev *dev);
-int mlx5_pagealloc_start(struct mlx5_core_dev *dev);
+void mlx5_pagealloc_start(struct mlx5_core_dev *dev);
void mlx5_pagealloc_stop(struct mlx5_core_dev *dev);
-void mlx5_core_req_pages_handler(struct mlx5_core_dev *dev, u16 func_id,
- s32 npages);
+void mlx5_pages_debugfs_init(struct mlx5_core_dev *dev);
+void mlx5_pages_debugfs_cleanup(struct mlx5_core_dev *dev);
int mlx5_satisfy_startup_pages(struct mlx5_core_dev *dev, int boot);
int mlx5_reclaim_startup_pages(struct mlx5_core_dev *dev);
void mlx5_register_debugfs(void);
void mlx5_unregister_debugfs(void);
-int mlx5_eq_init(struct mlx5_core_dev *dev);
-void mlx5_eq_cleanup(struct mlx5_core_dev *dev);
-void mlx5_fill_page_array(struct mlx5_buf *buf, __be64 *pas);
+
+void mlx5_fill_page_frag_array_perm(struct mlx5_frag_buf *buf, __be64 *pas, u8 perm);
void mlx5_fill_page_frag_array(struct mlx5_frag_buf *frag_buf, __be64 *pas);
-void mlx5_cq_completion(struct mlx5_core_dev *dev, u32 cqn);
-void mlx5_rsc_event(struct mlx5_core_dev *dev, u32 rsn, int event_type);
-void mlx5_srq_event(struct mlx5_core_dev *dev, u32 srqn, int event_type);
-struct mlx5_core_srq *mlx5_core_get_srq(struct mlx5_core_dev *dev, u32 srqn);
-void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec, bool forced);
-void mlx5_cq_event(struct mlx5_core_dev *dev, u32 cqn, int event_type);
-int mlx5_create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, u8 vecidx,
- int nent, u64 mask, const char *name,
- enum mlx5_eq_type type);
-int mlx5_destroy_unmap_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq);
-int mlx5_start_eqs(struct mlx5_core_dev *dev);
-int mlx5_stop_eqs(struct mlx5_core_dev *dev);
-int mlx5_vector2eqn(struct mlx5_core_dev *dev, int vector, int *eqn,
- unsigned int *irqn);
+int mlx5_comp_eqn_get(struct mlx5_core_dev *dev, u16 vecidx, int *eqn);
int mlx5_core_attach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid, u32 qpn);
int mlx5_core_detach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid, u32 qpn);
-int mlx5_qp_debugfs_init(struct mlx5_core_dev *dev);
+struct dentry *mlx5_debugfs_get_dev_root(struct mlx5_core_dev *dev);
+void mlx5_qp_debugfs_init(struct mlx5_core_dev *dev);
void mlx5_qp_debugfs_cleanup(struct mlx5_core_dev *dev);
+int mlx5_access_reg(struct mlx5_core_dev *dev, void *data_in, int size_in,
+ void *data_out, int size_out, u16 reg_id, int arg,
+ int write, bool verbose);
int mlx5_core_access_reg(struct mlx5_core_dev *dev, void *data_in,
int size_in, void *data_out, int size_out,
u16 reg_num, int arg, int write);
-int mlx5_debug_eq_add(struct mlx5_core_dev *dev, struct mlx5_eq *eq);
-void mlx5_debug_eq_remove(struct mlx5_core_dev *dev, struct mlx5_eq *eq);
-int mlx5_core_eq_query(struct mlx5_core_dev *dev, struct mlx5_eq *eq,
- u32 *out, int outlen);
-int mlx5_eq_debugfs_init(struct mlx5_core_dev *dev);
-void mlx5_eq_debugfs_cleanup(struct mlx5_core_dev *dev);
-int mlx5_cq_debugfs_init(struct mlx5_core_dev *dev);
-void mlx5_cq_debugfs_cleanup(struct mlx5_core_dev *dev);
-int mlx5_db_alloc(struct mlx5_core_dev *dev, struct mlx5_db *db);
int mlx5_db_alloc_node(struct mlx5_core_dev *dev, struct mlx5_db *db,
int node);
+
+static inline int mlx5_db_alloc(struct mlx5_core_dev *dev, struct mlx5_db *db)
+{
+ return mlx5_db_alloc_node(dev, db, dev->priv.numa_node);
+}
+
void mlx5_db_free(struct mlx5_core_dev *dev, struct mlx5_db *db);
const char *mlx5_command_str(int command);
-int mlx5_cmdif_debugfs_init(struct mlx5_core_dev *dev);
+void mlx5_cmdif_debugfs_init(struct mlx5_core_dev *dev);
void mlx5_cmdif_debugfs_cleanup(struct mlx5_core_dev *dev);
int mlx5_core_create_psv(struct mlx5_core_dev *dev, u32 pdn,
int npsvs, u32 *sig_index);
int mlx5_core_destroy_psv(struct mlx5_core_dev *dev, int psv_num);
+__be32 mlx5_core_get_terminate_scatter_list_mkey(struct mlx5_core_dev *dev);
void mlx5_core_put_rsc(struct mlx5_core_rsc_common *common);
-int mlx5_query_odp_caps(struct mlx5_core_dev *dev,
- struct mlx5_odp_caps *odp_caps);
-int mlx5_core_query_ib_ppcnt(struct mlx5_core_dev *dev,
- u8 port_num, void *out, size_t sz);
-#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
-int mlx5_core_page_fault_resume(struct mlx5_core_dev *dev, u32 token,
- u32 wq_num, u8 type, int error);
-#endif
int mlx5_init_rl_table(struct mlx5_core_dev *dev);
void mlx5_cleanup_rl_table(struct mlx5_core_dev *dev);
-int mlx5_rl_add_rate(struct mlx5_core_dev *dev, u32 rate, u16 *index);
-void mlx5_rl_remove_rate(struct mlx5_core_dev *dev, u32 rate);
+int mlx5_rl_add_rate(struct mlx5_core_dev *dev, u16 *index,
+ struct mlx5_rate_limit *rl);
+void mlx5_rl_remove_rate(struct mlx5_core_dev *dev, struct mlx5_rate_limit *rl);
bool mlx5_rl_is_in_range(struct mlx5_core_dev *dev, u32 rate);
+int mlx5_rl_add_rate_raw(struct mlx5_core_dev *dev, void *rl_in, u16 uid,
+ bool dedicated_entry, u16 *index);
+void mlx5_rl_remove_rate_raw(struct mlx5_core_dev *dev, u16 index);
+bool mlx5_rl_are_equal(struct mlx5_rate_limit *rl_0,
+ struct mlx5_rate_limit *rl_1);
int mlx5_alloc_bfreg(struct mlx5_core_dev *mdev, struct mlx5_sq_bfreg *bfreg,
bool map_wc, bool fast_path);
void mlx5_free_bfreg(struct mlx5_core_dev *mdev, struct mlx5_sq_bfreg *bfreg);
+unsigned int mlx5_comp_vectors_max(struct mlx5_core_dev *dev);
+int mlx5_comp_vector_get_cpu(struct mlx5_core_dev *dev, int vector);
unsigned int mlx5_core_reserved_gids_count(struct mlx5_core_dev *dev);
int mlx5_core_roce_gid_set(struct mlx5_core_dev *dev, unsigned int index,
u8 roce_version, u8 roce_l3_type, const u8 *gid,
- const u8 *mac, bool vlan, u16 vlan_id);
-
-static inline int fw_initializing(struct mlx5_core_dev *dev)
-{
- return ioread32be(&dev->iseg->initializing) >> 31;
-}
+ const u8 *mac, bool vlan, u16 vlan_id, u8 port_num);
static inline u32 mlx5_mkey_to_idx(u32 mkey)
{
@@ -1086,85 +1122,145 @@ static inline u8 mlx5_mkey_variant(u32 mkey)
return mkey & 0xff;
}
-enum {
- MLX5_PROF_MASK_QP_SIZE = (u64)1 << 0,
- MLX5_PROF_MASK_MR_CACHE = (u64)1 << 1,
-};
+/* Async-atomic event notifier used by mlx5 core to forward FW
+ * evetns received from event queue to mlx5 consumers.
+ * Optimise event queue dipatching.
+ */
+int mlx5_notifier_register(struct mlx5_core_dev *dev, struct notifier_block *nb);
+int mlx5_notifier_unregister(struct mlx5_core_dev *dev, struct notifier_block *nb);
-enum {
- MR_CACHE_LAST_STD_ENTRY = 20,
- MLX5_IMR_MTT_CACHE_ENTRY,
- MLX5_IMR_KSM_CACHE_ENTRY,
- MAX_MR_CACHE_ENTRIES
-};
+/* Async-atomic event notifier used for forwarding
+ * evetns from the event queue into the to mlx5 events dispatcher,
+ * eswitch, clock and others.
+ */
+int mlx5_eq_notifier_register(struct mlx5_core_dev *dev, struct mlx5_nb *nb);
+int mlx5_eq_notifier_unregister(struct mlx5_core_dev *dev, struct mlx5_nb *nb);
-enum {
- MLX5_INTERFACE_PROTOCOL_IB = 0,
- MLX5_INTERFACE_PROTOCOL_ETH = 1,
-};
-
-struct mlx5_interface {
- void * (*add)(struct mlx5_core_dev *dev);
- void (*remove)(struct mlx5_core_dev *dev, void *context);
- int (*attach)(struct mlx5_core_dev *dev, void *context);
- void (*detach)(struct mlx5_core_dev *dev, void *context);
- void (*event)(struct mlx5_core_dev *dev, void *context,
- enum mlx5_dev_event event, unsigned long param);
- void (*pfault)(struct mlx5_core_dev *dev,
- void *context,
- struct mlx5_pagefault *pfault);
- void * (*get_dev)(void *context);
- int protocol;
- struct list_head list;
-};
+/* Blocking event notifier used to forward SW events, used for slow path */
+int mlx5_blocking_notifier_register(struct mlx5_core_dev *dev, struct notifier_block *nb);
+int mlx5_blocking_notifier_unregister(struct mlx5_core_dev *dev, struct notifier_block *nb);
+int mlx5_blocking_notifier_call_chain(struct mlx5_core_dev *dev, unsigned int event,
+ void *data);
-void *mlx5_get_protocol_dev(struct mlx5_core_dev *mdev, int protocol);
-int mlx5_register_interface(struct mlx5_interface *intf);
-void mlx5_unregister_interface(struct mlx5_interface *intf);
int mlx5_core_query_vendor_id(struct mlx5_core_dev *mdev, u32 *vendor_id);
int mlx5_cmd_create_vport_lag(struct mlx5_core_dev *dev);
int mlx5_cmd_destroy_vport_lag(struct mlx5_core_dev *dev);
+bool mlx5_lag_is_roce(struct mlx5_core_dev *dev);
+bool mlx5_lag_is_sriov(struct mlx5_core_dev *dev);
bool mlx5_lag_is_active(struct mlx5_core_dev *dev);
-struct net_device *mlx5_lag_get_roce_netdev(struct mlx5_core_dev *dev);
+bool mlx5_lag_mode_is_hash(struct mlx5_core_dev *dev);
+bool mlx5_lag_is_master(struct mlx5_core_dev *dev);
+bool mlx5_lag_is_shared_fdb(struct mlx5_core_dev *dev);
+bool mlx5_lag_is_mpesw(struct mlx5_core_dev *dev);
+u8 mlx5_lag_get_slave_port(struct mlx5_core_dev *dev,
+ struct net_device *slave);
+int mlx5_lag_query_cong_counters(struct mlx5_core_dev *dev,
+ u64 *values,
+ int num_counters,
+ size_t *offsets);
+struct mlx5_core_dev *mlx5_lag_get_next_peer_mdev(struct mlx5_core_dev *dev, int *i);
+
+#define mlx5_lag_for_each_peer_mdev(dev, peer, i) \
+ for (i = 0, peer = mlx5_lag_get_next_peer_mdev(dev, &i); \
+ peer; \
+ peer = mlx5_lag_get_next_peer_mdev(dev, &i))
+
+u8 mlx5_lag_get_num_ports(struct mlx5_core_dev *dev);
struct mlx5_uars_page *mlx5_get_uars_page(struct mlx5_core_dev *mdev);
void mlx5_put_uars_page(struct mlx5_core_dev *mdev, struct mlx5_uars_page *up);
-
-#ifndef CONFIG_MLX5_CORE_IPOIB
-static inline
-struct net_device *mlx5_rdma_netdev_alloc(struct mlx5_core_dev *mdev,
- struct ib_device *ibdev,
- const char *name,
- void (*setup)(struct net_device *))
+int mlx5_dm_sw_icm_alloc(struct mlx5_core_dev *dev, enum mlx5_sw_icm_type type,
+ u64 length, u32 log_alignment, u16 uid,
+ phys_addr_t *addr, u32 *obj_id);
+int mlx5_dm_sw_icm_dealloc(struct mlx5_core_dev *dev, enum mlx5_sw_icm_type type,
+ u64 length, u16 uid, phys_addr_t addr, u32 obj_id);
+
+#ifdef CONFIG_PCIE_TPH
+int mlx5_st_alloc_index(struct mlx5_core_dev *dev, enum tph_mem_type mem_type,
+ unsigned int cpu_uid, u16 *st_index);
+int mlx5_st_dealloc_index(struct mlx5_core_dev *dev, u16 st_index);
+#else
+static inline int mlx5_st_alloc_index(struct mlx5_core_dev *dev,
+ enum tph_mem_type mem_type,
+ unsigned int cpu_uid, u16 *st_index)
+{
+ return -EOPNOTSUPP;
+}
+static inline int mlx5_st_dealloc_index(struct mlx5_core_dev *dev, u16 st_index)
{
- return ERR_PTR(-EOPNOTSUPP);
+ return -EOPNOTSUPP;
}
+#endif
-static inline void mlx5_rdma_netdev_free(struct net_device *netdev) {}
-#else
-struct net_device *mlx5_rdma_netdev_alloc(struct mlx5_core_dev *mdev,
- struct ib_device *ibdev,
- const char *name,
- void (*setup)(struct net_device *));
-void mlx5_rdma_netdev_free(struct net_device *netdev);
-#endif /* CONFIG_MLX5_CORE_IPOIB */
+struct mlx5_core_dev *mlx5_vf_get_core_dev(struct pci_dev *pdev);
+void mlx5_vf_put_core_dev(struct mlx5_core_dev *mdev);
-struct mlx5_profile {
- u64 mask;
- u8 log_max_qp;
- struct {
- int size;
- int limit;
- } mr_cache[MAX_MR_CACHE_ENTRIES];
-};
+int mlx5_sriov_blocking_notifier_register(struct mlx5_core_dev *mdev,
+ int vf_id,
+ struct notifier_block *nb);
+void mlx5_sriov_blocking_notifier_unregister(struct mlx5_core_dev *mdev,
+ int vf_id,
+ struct notifier_block *nb);
+int mlx5_rdma_rn_get_params(struct mlx5_core_dev *mdev,
+ struct ib_device *device,
+ struct rdma_netdev_alloc_params *params);
enum {
MLX5_PCI_DEV_IS_VF = 1 << 0,
};
-static inline int mlx5_core_is_pf(struct mlx5_core_dev *dev)
+static inline bool mlx5_core_is_pf(const struct mlx5_core_dev *dev)
+{
+ return dev->coredev_type == MLX5_COREDEV_PF;
+}
+
+static inline bool mlx5_core_is_vf(const struct mlx5_core_dev *dev)
{
- return !(dev->priv.pci_dev_data & MLX5_PCI_DEV_IS_VF);
+ return dev->coredev_type == MLX5_COREDEV_VF;
+}
+
+static inline bool mlx5_core_same_coredev_type(const struct mlx5_core_dev *dev1,
+ const struct mlx5_core_dev *dev2)
+{
+ return dev1->coredev_type == dev2->coredev_type;
+}
+
+static inline bool mlx5_core_is_ecpf(const struct mlx5_core_dev *dev)
+{
+ return dev->caps.embedded_cpu;
+}
+
+static inline bool
+mlx5_core_is_ecpf_esw_manager(const struct mlx5_core_dev *dev)
+{
+ return dev->caps.embedded_cpu && MLX5_CAP_GEN(dev, eswitch_manager);
+}
+
+static inline bool mlx5_ecpf_vport_exists(const struct mlx5_core_dev *dev)
+{
+ return mlx5_core_is_pf(dev) && MLX5_CAP_ESW(dev, ecpf_vport_exists);
+}
+
+static inline u16 mlx5_core_max_vfs(const struct mlx5_core_dev *dev)
+{
+ return dev->priv.sriov.max_vfs;
+}
+
+static inline int mlx5_lag_is_lacp_owner(struct mlx5_core_dev *dev)
+{
+ /* LACP owner conditions:
+ * 1) Function is physical.
+ * 2) LAG is supported by FW.
+ * 3) LAG is managed by driver (currently the only option).
+ */
+ return MLX5_CAP_GEN(dev, vport_group_manager) &&
+ (MLX5_CAP_GEN(dev, num_lag_ports) > 1) &&
+ MLX5_CAP_GEN(dev, lag_master);
+}
+
+static inline u16 mlx5_core_max_ec_vfs(const struct mlx5_core_dev *dev)
+{
+ return dev->priv.sriov.max_ec_vfs;
}
static inline int mlx5_get_gid_table_len(u16 param)
@@ -1182,14 +1278,115 @@ static inline bool mlx5_rl_is_supported(struct mlx5_core_dev *dev)
return !!(dev->priv.rl_table.max_size);
}
+static inline int mlx5_core_is_mp_slave(struct mlx5_core_dev *dev)
+{
+ return MLX5_CAP_GEN(dev, affiliate_nic_vport_criteria) &&
+ MLX5_CAP_GEN(dev, num_vhca_ports) <= 1;
+}
+
+static inline int mlx5_core_is_mp_master(struct mlx5_core_dev *dev)
+{
+ return MLX5_CAP_GEN(dev, num_vhca_ports) > 1;
+}
+
+static inline int mlx5_core_mp_enabled(struct mlx5_core_dev *dev)
+{
+ return mlx5_core_is_mp_slave(dev) ||
+ mlx5_core_is_mp_master(dev);
+}
+
+static inline int mlx5_core_native_port_num(struct mlx5_core_dev *dev)
+{
+ if (!mlx5_core_mp_enabled(dev))
+ return 1;
+
+ return MLX5_CAP_GEN(dev, native_port_num);
+}
+
+static inline int mlx5_get_dev_index(struct mlx5_core_dev *dev)
+{
+ int idx = MLX5_CAP_GEN(dev, native_port_num);
+
+ if (idx >= 1 && idx <= MLX5_MAX_PORTS)
+ return idx - 1;
+ else
+ return PCI_FUNC(dev->pdev->devfn);
+}
+
enum {
MLX5_TRIGGERED_CMD_COMP = (u64)1 << 32,
};
-static inline const struct cpumask *
-mlx5_get_vector_affinity(struct mlx5_core_dev *dev, int vector)
+bool mlx5_is_roce_on(struct mlx5_core_dev *dev);
+
+static inline bool mlx5_get_roce_state(struct mlx5_core_dev *dev)
{
- return pci_irq_get_affinity(dev->pdev, MLX5_EQ_VEC_COMP_BASE + vector);
+ if (MLX5_CAP_GEN(dev, roce_rw_supported))
+ return MLX5_CAP_GEN(dev, roce);
+
+ /* If RoCE cap is read-only in FW, get RoCE state from devlink
+ * in order to support RoCE enable/disable feature
+ */
+ return mlx5_is_roce_on(dev);
}
+#ifdef CONFIG_MLX5_MACSEC
+static inline bool mlx5e_is_macsec_device(const struct mlx5_core_dev *mdev)
+{
+ if (!(MLX5_CAP_GEN_64(mdev, general_obj_types) &
+ MLX5_GENERAL_OBJ_TYPES_CAP_MACSEC_OFFLOAD))
+ return false;
+
+ if (!MLX5_CAP_GEN(mdev, log_max_dek))
+ return false;
+
+ if (!MLX5_CAP_MACSEC(mdev, log_max_macsec_offload))
+ return false;
+
+ if (!MLX5_CAP_FLOWTABLE_NIC_RX(mdev, macsec_decrypt) ||
+ !MLX5_CAP_FLOWTABLE_NIC_RX(mdev, reformat_remove_macsec))
+ return false;
+
+ if (!MLX5_CAP_FLOWTABLE_NIC_TX(mdev, macsec_encrypt) ||
+ !MLX5_CAP_FLOWTABLE_NIC_TX(mdev, reformat_add_macsec))
+ return false;
+
+ if (!MLX5_CAP_MACSEC(mdev, macsec_crypto_esp_aes_gcm_128_encrypt) &&
+ !MLX5_CAP_MACSEC(mdev, macsec_crypto_esp_aes_gcm_256_encrypt))
+ return false;
+
+ if (!MLX5_CAP_MACSEC(mdev, macsec_crypto_esp_aes_gcm_128_decrypt) &&
+ !MLX5_CAP_MACSEC(mdev, macsec_crypto_esp_aes_gcm_256_decrypt))
+ return false;
+
+ return true;
+}
+
+#define NIC_RDMA_BOTH_DIRS_CAPS (MLX5_FT_NIC_RX_2_NIC_RX_RDMA | MLX5_FT_NIC_TX_RDMA_2_NIC_TX)
+
+static inline bool mlx5_is_macsec_roce_supported(struct mlx5_core_dev *mdev)
+{
+ if (((MLX5_CAP_GEN_2(mdev, flow_table_type_2_type) &
+ NIC_RDMA_BOTH_DIRS_CAPS) != NIC_RDMA_BOTH_DIRS_CAPS) ||
+ !MLX5_CAP_FLOWTABLE_RDMA_TX(mdev, max_modify_header_actions) ||
+ !mlx5e_is_macsec_device(mdev) || !mdev->macsec_fs)
+ return false;
+
+ return true;
+}
+#endif
+
+enum {
+ MLX5_OCTWORD = 16,
+};
+
+bool mlx5_wc_support_get(struct mlx5_core_dev *mdev);
+
+static inline struct net *mlx5_core_net(struct mlx5_core_dev *dev)
+{
+ return devlink_net(priv_to_devlink(dev));
+}
+
+#define MLX5_SW_IMAGE_GUID_MAX_BYTES 9
+
#endif /* MLX5_DRIVER_H */
diff --git a/include/linux/mlx5/eq.h b/include/linux/mlx5/eq.h
new file mode 100644
index 000000000000..3705a382276b
--- /dev/null
+++ b/include/linux/mlx5/eq.h
@@ -0,0 +1,63 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2018 Mellanox Technologies. */
+
+#ifndef MLX5_CORE_EQ_H
+#define MLX5_CORE_EQ_H
+
+#define MLX5_NUM_CMD_EQE (32)
+#define MLX5_NUM_ASYNC_EQE (0x1000)
+#define MLX5_NUM_SPARE_EQE (0x80)
+
+struct mlx5_eq;
+struct mlx5_irq;
+struct mlx5_core_dev;
+
+struct mlx5_eq_param {
+ int nent;
+ u64 mask[4];
+ struct mlx5_irq *irq;
+};
+
+struct mlx5_eq *
+mlx5_eq_create_generic(struct mlx5_core_dev *dev, struct mlx5_eq_param *param);
+int
+mlx5_eq_destroy_generic(struct mlx5_core_dev *dev, struct mlx5_eq *eq);
+int mlx5_eq_enable(struct mlx5_core_dev *dev, struct mlx5_eq *eq,
+ struct notifier_block *nb);
+void mlx5_eq_disable(struct mlx5_core_dev *dev, struct mlx5_eq *eq,
+ struct notifier_block *nb);
+
+struct mlx5_eqe *mlx5_eq_get_eqe(struct mlx5_eq *eq, u32 cc);
+void mlx5_eq_update_ci(struct mlx5_eq *eq, u32 cc, bool arm);
+
+/* The HCA will think the queue has overflowed if we
+ * don't tell it we've been processing events. We
+ * create EQs with MLX5_NUM_SPARE_EQE extra entries,
+ * so we must update our consumer index at
+ * least that often.
+ *
+ * mlx5_eq_update_cc must be called on every EQE @EQ irq handler
+ */
+static inline u32 mlx5_eq_update_cc(struct mlx5_eq *eq, u32 cc)
+{
+ if (unlikely(cc >= MLX5_NUM_SPARE_EQE)) {
+ mlx5_eq_update_ci(eq, cc, 0);
+ cc = 0;
+ }
+ return cc;
+}
+
+struct mlx5_nb {
+ struct notifier_block nb;
+ u8 event_type;
+};
+
+#define mlx5_nb_cof(ptr, type, member) \
+ (container_of(container_of(ptr, struct mlx5_nb, nb), type, member))
+
+#define MLX5_NB_INIT(name, handler, event) do { \
+ (name)->nb.notifier_call = handler; \
+ (name)->event_type = MLX5_EVENT_TYPE_##event; \
+} while (0)
+
+#endif /* MLX5_CORE_EQ_H */
diff --git a/include/linux/mlx5/eswitch.h b/include/linux/mlx5/eswitch.h
new file mode 100644
index 000000000000..67256e776566
--- /dev/null
+++ b/include/linux/mlx5/eswitch.h
@@ -0,0 +1,223 @@
+/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
+/*
+ * Copyright (c) 2018 Mellanox Technologies. All rights reserved.
+ */
+
+#ifndef _MLX5_ESWITCH_
+#define _MLX5_ESWITCH_
+
+#include <linux/mlx5/driver.h>
+#include <linux/mlx5/vport.h>
+#include <net/devlink.h>
+
+#define MLX5_ESWITCH_MANAGER(mdev) MLX5_CAP_GEN(mdev, eswitch_manager)
+
+enum {
+ MLX5_ESWITCH_LEGACY,
+ MLX5_ESWITCH_OFFLOADS
+};
+
+enum {
+ REP_ETH,
+ REP_IB,
+ NUM_REP_TYPES,
+};
+
+enum {
+ REP_UNREGISTERED,
+ REP_REGISTERED,
+ REP_LOADED,
+};
+
+enum mlx5_switchdev_event {
+ MLX5_SWITCHDEV_EVENT_PAIR,
+ MLX5_SWITCHDEV_EVENT_UNPAIR,
+};
+
+struct mlx5_eswitch_rep;
+struct mlx5_eswitch_rep_ops {
+ int (*load)(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep);
+ void (*unload)(struct mlx5_eswitch_rep *rep);
+ void *(*get_proto_dev)(struct mlx5_eswitch_rep *rep);
+ int (*event)(struct mlx5_eswitch *esw,
+ struct mlx5_eswitch_rep *rep,
+ enum mlx5_switchdev_event event,
+ void *data);
+};
+
+struct mlx5_eswitch_rep_data {
+ void *priv;
+ atomic_t state;
+};
+
+struct mlx5_eswitch_rep {
+ struct mlx5_eswitch_rep_data rep_data[NUM_REP_TYPES];
+ u16 vport;
+ u16 vlan;
+ /* Only IB rep is using vport_index */
+ u16 vport_index;
+ u32 vlan_refcount;
+ struct mlx5_eswitch *esw;
+};
+
+void mlx5_eswitch_register_vport_reps(struct mlx5_eswitch *esw,
+ const struct mlx5_eswitch_rep_ops *ops,
+ u8 rep_type);
+void mlx5_eswitch_unregister_vport_reps(struct mlx5_eswitch *esw, u8 rep_type);
+void *mlx5_eswitch_get_proto_dev(struct mlx5_eswitch *esw,
+ u16 vport_num,
+ u8 rep_type);
+struct mlx5_eswitch_rep *mlx5_eswitch_vport_rep(struct mlx5_eswitch *esw,
+ u16 vport_num);
+void *mlx5_eswitch_uplink_get_proto_dev(struct mlx5_eswitch *esw, u8 rep_type);
+struct mlx5_flow_handle *
+mlx5_eswitch_add_send_to_vport_rule(struct mlx5_eswitch *on_esw,
+ struct mlx5_eswitch *from_esw,
+ struct mlx5_eswitch_rep *rep, u32 sqn);
+
+#ifdef CONFIG_MLX5_ESWITCH
+enum devlink_eswitch_encap_mode
+mlx5_eswitch_get_encap_mode(const struct mlx5_core_dev *dev);
+
+bool mlx5_eswitch_reg_c1_loopback_enabled(const struct mlx5_eswitch *esw);
+bool mlx5_eswitch_vport_match_metadata_enabled(const struct mlx5_eswitch *esw);
+
+/* Reg C0 usage:
+ * Reg C0 = < ESW_PFNUM_BITS(4) | ESW_VPORT BITS(12) | ESW_REG_C0_OBJ(16) >
+ *
+ * Highest 4 bits of the reg c0 is the PF_NUM (range 0-15), 12 bits of
+ * unique non-zero vport id (range 1-4095). The rest (lowest 16 bits) is left
+ * for user data objects managed by a common mapping context.
+ * PFNUM + VPORT comprise the SOURCE_PORT matching.
+ */
+#define ESW_VPORT_BITS 12
+#define ESW_PFNUM_BITS 4
+#define ESW_SOURCE_PORT_METADATA_BITS (ESW_PFNUM_BITS + ESW_VPORT_BITS)
+#define ESW_SOURCE_PORT_METADATA_OFFSET (32 - ESW_SOURCE_PORT_METADATA_BITS)
+#define ESW_REG_C0_USER_DATA_METADATA_BITS (32 - ESW_SOURCE_PORT_METADATA_BITS)
+#define ESW_REG_C0_USER_DATA_METADATA_MASK GENMASK(ESW_REG_C0_USER_DATA_METADATA_BITS - 1, 0)
+
+static inline u32 mlx5_eswitch_get_vport_metadata_mask(void)
+{
+ return GENMASK(31, 32 - ESW_SOURCE_PORT_METADATA_BITS);
+}
+
+u32 mlx5_eswitch_get_vport_metadata_for_match(struct mlx5_eswitch *esw,
+ u16 vport_num);
+u32 mlx5_eswitch_get_vport_metadata_for_set(struct mlx5_eswitch *esw,
+ u16 vport_num);
+
+/* Reg C1 usage:
+ * Reg C1 = < Reserved(1) | ESW_TUN_ID(12) | ESW_TUN_OPTS(11) | ESW_ZONE_ID(8) >
+ *
+ * Highest bit is reserved for other offloads as marker bit, next 12 bits of reg c1
+ * is the encapsulation tunnel id, next 11 bits is encapsulation tunnel options,
+ * and the lowest 8 bits are used for zone id.
+ *
+ * Zone id is used to restore CT flow when packet misses on chain.
+ *
+ * Tunnel id and options are used together to restore the tunnel info metadata
+ * on miss and to support inner header rewrite by means of implicit chain 0
+ * flows.
+ */
+#define ESW_RESERVED_BITS 1
+#define ESW_ZONE_ID_BITS 8
+#define ESW_TUN_OPTS_BITS 11
+#define ESW_TUN_ID_BITS 12
+#define ESW_TUN_OPTS_OFFSET ESW_ZONE_ID_BITS
+#define ESW_TUN_OFFSET ESW_TUN_OPTS_OFFSET
+#define ESW_ZONE_ID_MASK GENMASK(ESW_ZONE_ID_BITS - 1, 0)
+#define ESW_TUN_OPTS_MASK GENMASK(31 - ESW_TUN_ID_BITS - ESW_RESERVED_BITS, ESW_TUN_OPTS_OFFSET)
+#define ESW_TUN_MASK GENMASK(31 - ESW_RESERVED_BITS, ESW_TUN_OFFSET)
+#define ESW_TUN_ID_SLOW_TABLE_GOTO_VPORT 0 /* 0 is not a valid tunnel id */
+#define ESW_TUN_ID_BRIDGE_INGRESS_PUSH_VLAN ESW_TUN_ID_SLOW_TABLE_GOTO_VPORT
+/* 0x7FF is a reserved mapping */
+#define ESW_TUN_OPTS_SLOW_TABLE_GOTO_VPORT GENMASK(ESW_TUN_OPTS_BITS - 1, 0)
+#define ESW_TUN_SLOW_TABLE_GOTO_VPORT ((ESW_TUN_ID_SLOW_TABLE_GOTO_VPORT << ESW_TUN_OPTS_BITS) | \
+ ESW_TUN_OPTS_SLOW_TABLE_GOTO_VPORT)
+#define ESW_TUN_SLOW_TABLE_GOTO_VPORT_MARK ESW_TUN_OPTS_MASK
+/* 0x7FE is a reserved mapping for bridge ingress push vlan mark */
+#define ESW_TUN_OPTS_BRIDGE_INGRESS_PUSH_VLAN (ESW_TUN_OPTS_SLOW_TABLE_GOTO_VPORT - 1)
+#define ESW_TUN_BRIDGE_INGRESS_PUSH_VLAN ((ESW_TUN_ID_BRIDGE_INGRESS_PUSH_VLAN << \
+ ESW_TUN_OPTS_BITS) | \
+ ESW_TUN_OPTS_BRIDGE_INGRESS_PUSH_VLAN)
+#define ESW_TUN_BRIDGE_INGRESS_PUSH_VLAN_MARK \
+ GENMASK(31 - ESW_TUN_ID_BITS - ESW_RESERVED_BITS, \
+ ESW_TUN_OPTS_OFFSET + 1)
+
+/* reuse tun_opts for the mapped ipsec obj id when tun_id is 0 (invalid) */
+#define ESW_IPSEC_RX_MAPPED_ID_MASK GENMASK(ESW_TUN_OPTS_BITS - 1, 0)
+#define ESW_IPSEC_RX_MAPPED_ID_MATCH_MASK \
+ GENMASK(31 - ESW_RESERVED_BITS, ESW_ZONE_ID_BITS)
+
+u8 mlx5_eswitch_mode(const struct mlx5_core_dev *dev);
+u16 mlx5_eswitch_get_total_vports(const struct mlx5_core_dev *dev);
+struct mlx5_core_dev *mlx5_eswitch_get_core_dev(struct mlx5_eswitch *esw);
+
+#else /* CONFIG_MLX5_ESWITCH */
+
+static inline u8 mlx5_eswitch_mode(const struct mlx5_core_dev *dev)
+{
+ return MLX5_ESWITCH_LEGACY;
+}
+
+static inline enum devlink_eswitch_encap_mode
+mlx5_eswitch_get_encap_mode(const struct mlx5_core_dev *dev)
+{
+ return DEVLINK_ESWITCH_ENCAP_MODE_NONE;
+}
+
+static inline bool
+mlx5_eswitch_reg_c1_loopback_enabled(const struct mlx5_eswitch *esw)
+{
+ return false;
+};
+
+static inline bool
+mlx5_eswitch_vport_match_metadata_enabled(const struct mlx5_eswitch *esw)
+{
+ return false;
+};
+
+static inline u32
+mlx5_eswitch_get_vport_metadata_for_match(struct mlx5_eswitch *esw, u16 vport_num)
+{
+ return 0;
+};
+
+static inline u32
+mlx5_eswitch_get_vport_metadata_mask(void)
+{
+ return 0;
+}
+
+static inline u16 mlx5_eswitch_get_total_vports(const struct mlx5_core_dev *dev)
+{
+ return 0;
+}
+
+static inline struct mlx5_core_dev *mlx5_eswitch_get_core_dev(struct mlx5_eswitch *esw)
+{
+ return NULL;
+}
+
+#endif /* CONFIG_MLX5_ESWITCH */
+
+static inline bool is_mdev_legacy_mode(struct mlx5_core_dev *dev)
+{
+ return mlx5_eswitch_mode(dev) == MLX5_ESWITCH_LEGACY;
+}
+
+static inline bool is_mdev_switchdev_mode(struct mlx5_core_dev *dev)
+{
+ return mlx5_eswitch_mode(dev) == MLX5_ESWITCH_OFFLOADS;
+}
+
+/* The returned number is valid only when the dev is eswitch manager. */
+static inline u16 mlx5_eswitch_manager_vport(struct mlx5_core_dev *dev)
+{
+ return mlx5_core_is_ecpf_esw_manager(dev) ?
+ MLX5_VPORT_ECPF : MLX5_VPORT_PF;
+}
+
+#endif
diff --git a/include/linux/mlx5/fs.h b/include/linux/mlx5/fs.h
index b25e7baa273e..9cadb1d5e6df 100644
--- a/include/linux/mlx5/fs.h
+++ b/include/linux/mlx5/fs.h
@@ -38,12 +38,40 @@
#define MLX5_FS_DEFAULT_FLOW_TAG 0x0
+#define MLX5_SET_CFG(p, f, v) MLX5_SET(create_flow_group_in, p, f, v)
+
+#define MLX5_RDMA_TRANSPORT_BYPASS_PRIO 16
+#define MLX5_FS_MAX_POOL_SIZE BIT(30)
+
+enum mlx5_flow_destination_type {
+ MLX5_FLOW_DESTINATION_TYPE_NONE,
+ MLX5_FLOW_DESTINATION_TYPE_VPORT,
+ MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE,
+ MLX5_FLOW_DESTINATION_TYPE_TIR,
+ MLX5_FLOW_DESTINATION_TYPE_FLOW_SAMPLER,
+ MLX5_FLOW_DESTINATION_TYPE_UPLINK,
+ MLX5_FLOW_DESTINATION_TYPE_PORT,
+ MLX5_FLOW_DESTINATION_TYPE_COUNTER,
+ MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE_NUM,
+ MLX5_FLOW_DESTINATION_TYPE_RANGE,
+ MLX5_FLOW_DESTINATION_TYPE_TABLE_TYPE,
+};
+
enum {
MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO = 1 << 16,
+ MLX5_FLOW_CONTEXT_ACTION_ENCRYPT = 1 << 17,
+ MLX5_FLOW_CONTEXT_ACTION_DECRYPT = 1 << 18,
+ MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_NS = 1 << 19,
};
enum {
- MLX5_FLOW_TABLE_TUNNEL_EN = BIT(0),
+ MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT = BIT(0),
+ MLX5_FLOW_TABLE_TUNNEL_EN_DECAP = BIT(1),
+ MLX5_FLOW_TABLE_TERMINATION = BIT(2),
+ MLX5_FLOW_TABLE_UNMANAGED = BIT(3),
+ MLX5_FLOW_TABLE_OTHER_VPORT = BIT(4),
+ MLX5_FLOW_TABLE_UPLINK_VPORT = BIT(5),
+ MLX5_FLOW_TABLE_OTHER_ESWITCH = BIT(6),
};
#define LEFTOVERS_RULE_NUM 2
@@ -58,57 +86,155 @@ static inline void build_leftovers_ft_param(int *priority,
enum mlx5_flow_namespace_type {
MLX5_FLOW_NAMESPACE_BYPASS,
+ MLX5_FLOW_NAMESPACE_KERNEL_RX_MACSEC,
MLX5_FLOW_NAMESPACE_LAG,
MLX5_FLOW_NAMESPACE_OFFLOADS,
MLX5_FLOW_NAMESPACE_ETHTOOL,
MLX5_FLOW_NAMESPACE_KERNEL,
MLX5_FLOW_NAMESPACE_LEFTOVERS,
MLX5_FLOW_NAMESPACE_ANCHOR,
+ MLX5_FLOW_NAMESPACE_FDB_BYPASS,
MLX5_FLOW_NAMESPACE_FDB,
MLX5_FLOW_NAMESPACE_ESW_EGRESS,
MLX5_FLOW_NAMESPACE_ESW_INGRESS,
MLX5_FLOW_NAMESPACE_SNIFFER_RX,
MLX5_FLOW_NAMESPACE_SNIFFER_TX,
+ MLX5_FLOW_NAMESPACE_EGRESS,
+ MLX5_FLOW_NAMESPACE_EGRESS_IPSEC,
+ MLX5_FLOW_NAMESPACE_EGRESS_MACSEC,
+ MLX5_FLOW_NAMESPACE_RDMA_RX,
+ MLX5_FLOW_NAMESPACE_RDMA_RX_KERNEL,
+ MLX5_FLOW_NAMESPACE_RDMA_TX,
+ MLX5_FLOW_NAMESPACE_PORT_SEL,
+ MLX5_FLOW_NAMESPACE_RDMA_RX_COUNTERS,
+ MLX5_FLOW_NAMESPACE_RDMA_TX_COUNTERS,
+ MLX5_FLOW_NAMESPACE_RDMA_RX_IPSEC,
+ MLX5_FLOW_NAMESPACE_RDMA_TX_IPSEC,
+ MLX5_FLOW_NAMESPACE_RDMA_RX_MACSEC,
+ MLX5_FLOW_NAMESPACE_RDMA_TX_MACSEC,
+ MLX5_FLOW_NAMESPACE_RDMA_TRANSPORT_RX,
+ MLX5_FLOW_NAMESPACE_RDMA_TRANSPORT_TX,
+};
+
+enum {
+ FDB_DROP_ROOT,
+ FDB_BYPASS_PATH,
+ FDB_CRYPTO_INGRESS,
+ FDB_TC_OFFLOAD,
+ FDB_FT_OFFLOAD,
+ FDB_TC_MISS,
+ FDB_BR_OFFLOAD,
+ FDB_SLOW_PATH,
+ FDB_CRYPTO_EGRESS,
+ FDB_PER_VPORT,
};
+enum fs_flow_table_type {
+ FS_FT_NIC_RX = 0x0,
+ FS_FT_NIC_TX = 0x1,
+ FS_FT_ESW_EGRESS_ACL = 0x2,
+ FS_FT_ESW_INGRESS_ACL = 0x3,
+ FS_FT_FDB = 0X4,
+ FS_FT_SNIFFER_RX = 0X5,
+ FS_FT_SNIFFER_TX = 0X6,
+ FS_FT_RDMA_RX = 0X7,
+ FS_FT_RDMA_TX = 0X8,
+ FS_FT_PORT_SEL = 0X9,
+ FS_FT_FDB_RX = 0xa,
+ FS_FT_FDB_TX = 0xb,
+ FS_FT_RDMA_TRANSPORT_RX = 0xd,
+ FS_FT_RDMA_TRANSPORT_TX = 0xe,
+ FS_FT_MAX_TYPE = FS_FT_RDMA_TRANSPORT_TX,
+};
+
+struct mlx5_pkt_reformat;
+struct mlx5_modify_hdr;
+struct mlx5_flow_definer;
struct mlx5_flow_table;
struct mlx5_flow_group;
struct mlx5_flow_namespace;
struct mlx5_flow_handle;
+enum {
+ FLOW_CONTEXT_HAS_TAG = BIT(0),
+ FLOW_CONTEXT_UPLINK_HAIRPIN_EN = BIT(1),
+};
+
+struct mlx5_flow_context {
+ u32 flags;
+ u32 flow_tag;
+ u32 flow_source;
+};
+
struct mlx5_flow_spec {
u8 match_criteria_enable;
u32 match_criteria[MLX5_ST_SZ_DW(fte_match_param)];
u32 match_value[MLX5_ST_SZ_DW(fte_match_param)];
+ struct mlx5_flow_context flow_context;
+};
+
+enum {
+ MLX5_FLOW_DEST_VPORT_VHCA_ID = BIT(0),
+ MLX5_FLOW_DEST_VPORT_REFORMAT_ID = BIT(1),
+};
+
+enum mlx5_flow_dest_range_field {
+ MLX5_FLOW_DEST_RANGE_FIELD_PKT_LEN = 0,
};
struct mlx5_flow_destination {
enum mlx5_flow_destination_type type;
union {
u32 tir_num;
+ u32 ft_num;
struct mlx5_flow_table *ft;
- u32 vport_num;
- struct mlx5_fc *counter;
+ struct mlx5_fc *counter;
+ struct {
+ u16 num;
+ u16 vhca_id;
+ struct mlx5_pkt_reformat *pkt_reformat;
+ u8 flags;
+ } vport;
+ struct {
+ struct mlx5_flow_table *hit_ft;
+ struct mlx5_flow_table *miss_ft;
+ enum mlx5_flow_dest_range_field field;
+ u32 min;
+ u32 max;
+ } range;
+ u32 sampler_id;
};
};
+struct mod_hdr_tbl {
+ struct mutex lock; /* protects hlist */
+ DECLARE_HASHTABLE(hlist, 8);
+};
+
+struct mlx5_flow_namespace *
+mlx5_get_fdb_sub_ns(struct mlx5_core_dev *dev, int n);
struct mlx5_flow_namespace *
mlx5_get_flow_namespace(struct mlx5_core_dev *dev,
enum mlx5_flow_namespace_type type);
-
-struct mlx5_flow_table *
-mlx5_create_auto_grouped_flow_table(struct mlx5_flow_namespace *ns,
- int prio,
- int num_flow_table_entries,
- int max_num_groups,
- u32 level,
- u32 flags);
+struct mlx5_flow_namespace *
+mlx5_get_flow_vport_namespace(struct mlx5_core_dev *dev,
+ enum mlx5_flow_namespace_type type,
+ int vport_idx);
struct mlx5_flow_table_attr {
int prio;
int max_fte;
u32 level;
u32 flags;
+ u16 uid;
+ u16 vport;
+ u16 esw_owner_vhca_id;
+ struct mlx5_flow_table *next_ft;
+
+ struct {
+ int max_num_groups;
+ int num_reserved_entries;
+ } autogroup;
};
struct mlx5_flow_table *
@@ -116,10 +242,12 @@ mlx5_create_flow_table(struct mlx5_flow_namespace *ns,
struct mlx5_flow_table_attr *ft_attr);
struct mlx5_flow_table *
+mlx5_create_auto_grouped_flow_table(struct mlx5_flow_namespace *ns,
+ struct mlx5_flow_table_attr *ft_attr);
+
+struct mlx5_flow_table *
mlx5_create_vport_flow_table(struct mlx5_flow_namespace *ns,
- int prio,
- int num_flow_table_entries,
- u32 level, u16 vport);
+ struct mlx5_flow_table_attr *ft_attr, u16 vport);
struct mlx5_flow_table *mlx5_create_lag_demux_flow_table(
struct mlx5_flow_namespace *ns,
int prio, u32 level);
@@ -135,38 +263,119 @@ struct mlx5_flow_group *
mlx5_create_flow_group(struct mlx5_flow_table *ft, u32 *in);
void mlx5_destroy_flow_group(struct mlx5_flow_group *fg);
+struct mlx5_exe_aso {
+ u32 object_id;
+ int base_id;
+ u8 type;
+ u8 return_reg_id;
+ union {
+ u32 ctrl_data;
+ struct {
+ u8 meter_idx;
+ u8 init_color;
+ } flow_meter;
+ };
+};
+
+struct mlx5_fs_vlan {
+ u16 ethtype;
+ u16 vid;
+ u8 prio;
+};
+
+#define MLX5_FS_VLAN_DEPTH 2
+
+enum {
+ FLOW_ACT_NO_APPEND = BIT(0),
+ FLOW_ACT_IGNORE_FLOW_LEVEL = BIT(1),
+};
+
struct mlx5_flow_act {
u32 action;
- u32 flow_tag;
- u32 encap_id;
- u32 modify_id;
+ struct mlx5_modify_hdr *modify_hdr;
+ struct mlx5_pkt_reformat *pkt_reformat;
+ struct mlx5_flow_act_crypto_params {
+ u8 type;
+ u32 obj_id;
+ } crypto;
+ u32 flags;
+ struct mlx5_fs_vlan vlan[MLX5_FS_VLAN_DEPTH];
+ struct ib_counters *counters;
+ struct mlx5_flow_group *fg;
+ struct mlx5_exe_aso exe_aso;
};
#define MLX5_DECLARE_FLOW_ACT(name) \
- struct mlx5_flow_act name = {MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,\
- MLX5_FS_DEFAULT_FLOW_TAG, 0, 0}
+ struct mlx5_flow_act name = { .action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,\
+ .flags = 0, }
/* Single destination per rule.
* Group ID is implied by the match criteria.
*/
struct mlx5_flow_handle *
mlx5_add_flow_rules(struct mlx5_flow_table *ft,
- struct mlx5_flow_spec *spec,
+ const struct mlx5_flow_spec *spec,
struct mlx5_flow_act *flow_act,
struct mlx5_flow_destination *dest,
- int dest_num);
+ int num_dest);
void mlx5_del_flow_rules(struct mlx5_flow_handle *fr);
int mlx5_modify_rule_destination(struct mlx5_flow_handle *handler,
struct mlx5_flow_destination *new_dest,
struct mlx5_flow_destination *old_dest);
-struct mlx5_fc *mlx5_flow_rule_counter(struct mlx5_flow_handle *handler);
struct mlx5_fc *mlx5_fc_create(struct mlx5_core_dev *dev, bool aging);
+
void mlx5_fc_destroy(struct mlx5_core_dev *dev, struct mlx5_fc *counter);
+struct mlx5_fc *mlx5_fc_local_create(u32 counter_id, u32 offset, u32 bulk_size);
+void mlx5_fc_local_destroy(struct mlx5_fc *counter);
+void mlx5_fc_local_get(struct mlx5_fc *counter);
+void mlx5_fc_local_put(struct mlx5_fc *counter);
+u64 mlx5_fc_query_lastuse(struct mlx5_fc *counter);
void mlx5_fc_query_cached(struct mlx5_fc *counter,
u64 *bytes, u64 *packets, u64 *lastuse);
+void mlx5_fc_query_cached_raw(struct mlx5_fc *counter,
+ u64 *bytes, u64 *packets, u64 *lastuse);
+int mlx5_fc_query(struct mlx5_core_dev *dev, struct mlx5_fc *counter,
+ u64 *packets, u64 *bytes);
+u32 mlx5_fc_id(struct mlx5_fc *counter);
+
int mlx5_fs_add_rx_underlay_qpn(struct mlx5_core_dev *dev, u32 underlay_qpn);
int mlx5_fs_remove_rx_underlay_qpn(struct mlx5_core_dev *dev, u32 underlay_qpn);
+struct mlx5_modify_hdr *mlx5_modify_header_alloc(struct mlx5_core_dev *dev,
+ u8 ns_type, u8 num_actions,
+ void *modify_actions);
+void mlx5_modify_header_dealloc(struct mlx5_core_dev *dev,
+ struct mlx5_modify_hdr *modify_hdr);
+struct mlx5_flow_definer *
+mlx5_create_match_definer(struct mlx5_core_dev *dev,
+ enum mlx5_flow_namespace_type ns_type, u16 format_id,
+ u32 *match_mask);
+void mlx5_destroy_match_definer(struct mlx5_core_dev *dev,
+ struct mlx5_flow_definer *definer);
+int mlx5_get_match_definer_id(struct mlx5_flow_definer *definer);
+
+struct mlx5_pkt_reformat_params {
+ int type;
+ u8 param_0;
+ u8 param_1;
+ size_t size;
+ void *data;
+};
+
+struct mlx5_pkt_reformat *mlx5_packet_reformat_alloc(struct mlx5_core_dev *dev,
+ struct mlx5_pkt_reformat_params *params,
+ enum mlx5_flow_namespace_type ns_type);
+void mlx5_packet_reformat_dealloc(struct mlx5_core_dev *dev,
+ struct mlx5_pkt_reformat *reformat);
+
+u32 mlx5_flow_table_id(struct mlx5_flow_table *ft);
+
+struct mlx5_flow_root_namespace *
+mlx5_get_root_namespace(struct mlx5_core_dev *dev, enum mlx5_flow_namespace_type ns_type);
+
+int mlx5_fs_set_root_dev(struct mlx5_core_dev *dev,
+ struct mlx5_core_dev *new_dev,
+ enum fs_flow_table_type table_type);
#endif
diff --git a/include/linux/mlx5/fs_helpers.h b/include/linux/mlx5/fs_helpers.h
new file mode 100644
index 000000000000..bc5125bc0561
--- /dev/null
+++ b/include/linux/mlx5/fs_helpers.h
@@ -0,0 +1,94 @@
+/*
+ * Copyright (c) 2018, Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef _MLX5_FS_HELPERS_
+#define _MLX5_FS_HELPERS_
+
+#include <linux/mlx5/mlx5_ifc.h>
+
+#define MLX5_FS_IPV4_VERSION 4
+#define MLX5_FS_IPV6_VERSION 6
+
+static inline bool _mlx5_fs_is_outer_ipv_flow(struct mlx5_core_dev *mdev,
+ const u32 *match_c,
+ const u32 *match_v, int version)
+{
+ int match_ipv = MLX5_CAP_FLOWTABLE_NIC_RX(mdev,
+ ft_field_support.outer_ip_version);
+ const void *headers_c = MLX5_ADDR_OF(fte_match_param, match_c,
+ outer_headers);
+ const void *headers_v = MLX5_ADDR_OF(fte_match_param, match_v,
+ outer_headers);
+
+ if (!match_ipv) {
+ u16 ethertype;
+
+ switch (version) {
+ case MLX5_FS_IPV4_VERSION:
+ ethertype = ETH_P_IP;
+ break;
+ case MLX5_FS_IPV6_VERSION:
+ ethertype = ETH_P_IPV6;
+ break;
+ default:
+ return false;
+ }
+
+ return MLX5_GET(fte_match_set_lyr_2_4, headers_c,
+ ethertype) == 0xffff &&
+ MLX5_GET(fte_match_set_lyr_2_4, headers_v,
+ ethertype) == ethertype;
+ }
+
+ return MLX5_GET(fte_match_set_lyr_2_4, headers_c,
+ ip_version) == 0xf &&
+ MLX5_GET(fte_match_set_lyr_2_4, headers_v,
+ ip_version) == version;
+}
+
+static inline bool
+mlx5_fs_is_outer_ipv4_flow(struct mlx5_core_dev *mdev, const u32 *match_c,
+ const u32 *match_v)
+{
+ return _mlx5_fs_is_outer_ipv_flow(mdev, match_c, match_v,
+ MLX5_FS_IPV4_VERSION);
+}
+
+static inline bool
+mlx5_fs_is_outer_ipv6_flow(struct mlx5_core_dev *mdev, const u32 *match_c,
+ const u32 *match_v)
+{
+ return _mlx5_fs_is_outer_ipv_flow(mdev, match_c, match_v,
+ MLX5_FS_IPV6_VERSION);
+}
+
+#endif
diff --git a/include/linux/mlx5/macsec.h b/include/linux/mlx5/macsec.h
new file mode 100644
index 000000000000..f7ff4c2a95d0
--- /dev/null
+++ b/include/linux/mlx5/macsec.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. */
+
+#ifndef MLX5_MACSEC_H
+#define MLX5_MACSEC_H
+
+#ifdef CONFIG_MLX5_MACSEC
+struct mlx5_macsec_event_data {
+ struct mlx5_macsec_fs *macsec_fs;
+ void *macdev;
+ u32 fs_id;
+ bool is_tx;
+};
+
+int mlx5_macsec_add_roce_rule(void *macdev, const struct sockaddr *addr, u16 gid_idx,
+ struct list_head *tx_rules_list, struct list_head *rx_rules_list,
+ struct mlx5_macsec_fs *macsec_fs);
+
+void mlx5_macsec_del_roce_rule(u16 gid_idx, struct mlx5_macsec_fs *macsec_fs,
+ struct list_head *tx_rules_list, struct list_head *rx_rules_list);
+
+void mlx5_macsec_add_roce_sa_rules(u32 fs_id, const struct sockaddr *addr, u16 gid_idx,
+ struct list_head *tx_rules_list,
+ struct list_head *rx_rules_list,
+ struct mlx5_macsec_fs *macsec_fs, bool is_tx);
+
+void mlx5_macsec_del_roce_sa_rules(u32 fs_id, struct mlx5_macsec_fs *macsec_fs,
+ struct list_head *tx_rules_list,
+ struct list_head *rx_rules_list, bool is_tx);
+
+#endif
+#endif /* MLX5_MACSEC_H */
diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h
index b7338a21c780..e9dcd4bf355d 100644
--- a/include/linux/mlx5/mlx5_ifc.h
+++ b/include/linux/mlx5/mlx5_ifc.h
@@ -60,18 +60,60 @@ enum {
MLX5_EVENT_TYPE_CODING_COMMAND_INTERFACE_COMPLETION = 0xa,
MLX5_EVENT_TYPE_CODING_PAGE_REQUEST = 0xb,
MLX5_EVENT_TYPE_CODING_FPGA_ERROR = 0x20,
+ MLX5_EVENT_TYPE_CODING_FPGA_QP_ERROR = 0x21
};
enum {
- MLX5_MODIFY_TIR_BITMASK_LRO = 0x0,
- MLX5_MODIFY_TIR_BITMASK_INDIRECT_TABLE = 0x1,
- MLX5_MODIFY_TIR_BITMASK_HASH = 0x2,
- MLX5_MODIFY_TIR_BITMASK_TUNNELED_OFFLOAD_EN = 0x3
+ MLX5_SET_HCA_CAP_OP_MOD_GENERAL_DEVICE = 0x0,
+ MLX5_SET_HCA_CAP_OP_MOD_ETHERNET_OFFLOADS = 0x1,
+ MLX5_SET_HCA_CAP_OP_MOD_ODP = 0x2,
+ MLX5_SET_HCA_CAP_OP_MOD_ATOMIC = 0x3,
+ MLX5_SET_HCA_CAP_OP_MOD_ROCE = 0x4,
+ MLX5_SET_HCA_CAP_OP_MOD_IPSEC = 0x15,
+ MLX5_SET_HCA_CAP_OP_MOD_GENERAL_DEVICE2 = 0x20,
+ MLX5_SET_HCA_CAP_OP_MOD_PORT_SELECTION = 0x25,
};
enum {
- MLX5_SET_HCA_CAP_OP_MOD_GENERAL_DEVICE = 0x0,
- MLX5_SET_HCA_CAP_OP_MOD_ATOMIC = 0x3,
+ MLX5_SHARED_RESOURCE_UID = 0xffff,
+};
+
+enum {
+ MLX5_OBJ_TYPE_SW_ICM = 0x0008,
+ MLX5_OBJ_TYPE_GENEVE_TLV_OPT = 0x000b,
+ MLX5_OBJ_TYPE_VIRTIO_NET_Q = 0x000d,
+ MLX5_OBJ_TYPE_VIRTIO_Q_COUNTERS = 0x001c,
+ MLX5_OBJ_TYPE_MATCH_DEFINER = 0x0018,
+ MLX5_OBJ_TYPE_HEADER_MODIFY_ARGUMENT = 0x23,
+ MLX5_OBJ_TYPE_STC = 0x0040,
+ MLX5_OBJ_TYPE_RTC = 0x0041,
+ MLX5_OBJ_TYPE_STE = 0x0042,
+ MLX5_OBJ_TYPE_MODIFY_HDR_PATTERN = 0x0043,
+ MLX5_OBJ_TYPE_PAGE_TRACK = 0x46,
+ MLX5_OBJ_TYPE_MKEY = 0xff01,
+ MLX5_OBJ_TYPE_QP = 0xff02,
+ MLX5_OBJ_TYPE_PSV = 0xff03,
+ MLX5_OBJ_TYPE_RMP = 0xff04,
+ MLX5_OBJ_TYPE_XRC_SRQ = 0xff05,
+ MLX5_OBJ_TYPE_RQ = 0xff06,
+ MLX5_OBJ_TYPE_SQ = 0xff07,
+ MLX5_OBJ_TYPE_TIR = 0xff08,
+ MLX5_OBJ_TYPE_TIS = 0xff09,
+ MLX5_OBJ_TYPE_DCT = 0xff0a,
+ MLX5_OBJ_TYPE_XRQ = 0xff0b,
+ MLX5_OBJ_TYPE_RQT = 0xff0e,
+ MLX5_OBJ_TYPE_FLOW_COUNTER = 0xff0f,
+ MLX5_OBJ_TYPE_CQ = 0xff10,
+ MLX5_OBJ_TYPE_FT_ALIAS = 0xff15,
+};
+
+enum {
+ MLX5_GENERAL_OBJ_TYPES_CAP_SW_ICM = (1ULL << MLX5_OBJ_TYPE_SW_ICM),
+ MLX5_GENERAL_OBJ_TYPES_CAP_GENEVE_TLV_OPT = (1ULL << 11),
+ MLX5_GENERAL_OBJ_TYPES_CAP_VIRTIO_NET_Q = (1ULL << 13),
+ MLX5_GENERAL_OBJ_TYPES_CAP_HEADER_MODIFY_ARGUMENT =
+ (1ULL << MLX5_OBJ_TYPE_HEADER_MODIFY_ARGUMENT),
+ MLX5_GENERAL_OBJ_TYPES_CAP_MACSEC_OFFLOAD = (1ULL << 39),
};
enum {
@@ -87,11 +129,22 @@ enum {
MLX5_CMD_OP_QUERY_ISSI = 0x10a,
MLX5_CMD_OP_SET_ISSI = 0x10b,
MLX5_CMD_OP_SET_DRIVER_VERSION = 0x10d,
+ MLX5_CMD_OP_QUERY_SF_PARTITION = 0x111,
+ MLX5_CMD_OP_ALLOC_SF = 0x113,
+ MLX5_CMD_OP_DEALLOC_SF = 0x114,
+ MLX5_CMD_OP_SUSPEND_VHCA = 0x115,
+ MLX5_CMD_OP_RESUME_VHCA = 0x116,
+ MLX5_CMD_OP_QUERY_VHCA_MIGRATION_STATE = 0x117,
+ MLX5_CMD_OP_SAVE_VHCA_STATE = 0x118,
+ MLX5_CMD_OP_LOAD_VHCA_STATE = 0x119,
MLX5_CMD_OP_CREATE_MKEY = 0x200,
MLX5_CMD_OP_QUERY_MKEY = 0x201,
MLX5_CMD_OP_DESTROY_MKEY = 0x202,
MLX5_CMD_OP_QUERY_SPECIAL_CONTEXTS = 0x203,
MLX5_CMD_OP_PAGE_FAULT_RESUME = 0x204,
+ MLX5_CMD_OP_ALLOC_MEMIC = 0x205,
+ MLX5_CMD_OP_DEALLOC_MEMIC = 0x206,
+ MLX5_CMD_OP_MODIFY_MEMIC = 0x207,
MLX5_CMD_OP_CREATE_EQ = 0x301,
MLX5_CMD_OP_DESTROY_EQ = 0x302,
MLX5_CMD_OP_QUERY_EQ = 0x303,
@@ -131,6 +184,15 @@ enum {
MLX5_CMD_OP_DESTROY_XRQ = 0x718,
MLX5_CMD_OP_QUERY_XRQ = 0x719,
MLX5_CMD_OP_ARM_XRQ = 0x71a,
+ MLX5_CMD_OP_QUERY_XRQ_DC_PARAMS_ENTRY = 0x725,
+ MLX5_CMD_OP_SET_XRQ_DC_PARAMS_ENTRY = 0x726,
+ MLX5_CMD_OP_QUERY_XRQ_ERROR_PARAMS = 0x727,
+ MLX5_CMD_OP_RELEASE_XRQ_ERROR = 0x729,
+ MLX5_CMD_OP_MODIFY_XRQ = 0x72a,
+ MLX5_CMD_OPCODE_QUERY_DELEGATED_VHCA = 0x732,
+ MLX5_CMD_OPCODE_CREATE_ESW_VPORT = 0x733,
+ MLX5_CMD_OPCODE_DESTROY_ESW_VPORT = 0x734,
+ MLX5_CMD_OP_QUERY_ESW_FUNCTIONS = 0x740,
MLX5_CMD_OP_QUERY_VPORT_STATE = 0x750,
MLX5_CMD_OP_MODIFY_VPORT_STATE = 0x751,
MLX5_CMD_OP_QUERY_ESW_VPORT_CONTEXT = 0x752,
@@ -143,11 +205,14 @@ enum {
MLX5_CMD_OP_MODIFY_HCA_VPORT_CONTEXT = 0x763,
MLX5_CMD_OP_QUERY_HCA_VPORT_GID = 0x764,
MLX5_CMD_OP_QUERY_HCA_VPORT_PKEY = 0x765,
+ MLX5_CMD_OP_QUERY_VNIC_ENV = 0x76f,
MLX5_CMD_OP_QUERY_VPORT_COUNTER = 0x770,
MLX5_CMD_OP_ALLOC_Q_COUNTER = 0x771,
MLX5_CMD_OP_DEALLOC_Q_COUNTER = 0x772,
MLX5_CMD_OP_QUERY_Q_COUNTER = 0x773,
- MLX5_CMD_OP_SET_RATE_LIMIT = 0x780,
+ MLX5_CMD_OP_SET_MONITOR_COUNTER = 0x774,
+ MLX5_CMD_OP_ARM_MONITOR_COUNTER = 0x775,
+ MLX5_CMD_OP_SET_PP_RATE_LIMIT = 0x780,
MLX5_CMD_OP_QUERY_RATE_LIMIT = 0x781,
MLX5_CMD_OP_CREATE_SCHEDULING_ELEMENT = 0x782,
MLX5_CMD_OP_DESTROY_SCHEDULING_ELEMENT = 0x783,
@@ -229,18 +294,52 @@ enum {
MLX5_CMD_OP_DEALLOC_FLOW_COUNTER = 0x93a,
MLX5_CMD_OP_QUERY_FLOW_COUNTER = 0x93b,
MLX5_CMD_OP_MODIFY_FLOW_TABLE = 0x93c,
- MLX5_CMD_OP_ALLOC_ENCAP_HEADER = 0x93d,
- MLX5_CMD_OP_DEALLOC_ENCAP_HEADER = 0x93e,
+ MLX5_CMD_OP_ALLOC_PACKET_REFORMAT_CONTEXT = 0x93d,
+ MLX5_CMD_OP_DEALLOC_PACKET_REFORMAT_CONTEXT = 0x93e,
+ MLX5_CMD_OP_QUERY_PACKET_REFORMAT_CONTEXT = 0x93f,
MLX5_CMD_OP_ALLOC_MODIFY_HEADER_CONTEXT = 0x940,
MLX5_CMD_OP_DEALLOC_MODIFY_HEADER_CONTEXT = 0x941,
+ MLX5_CMD_OP_QUERY_MODIFY_HEADER_CONTEXT = 0x942,
MLX5_CMD_OP_FPGA_CREATE_QP = 0x960,
MLX5_CMD_OP_FPGA_MODIFY_QP = 0x961,
MLX5_CMD_OP_FPGA_QUERY_QP = 0x962,
MLX5_CMD_OP_FPGA_DESTROY_QP = 0x963,
MLX5_CMD_OP_FPGA_QUERY_QP_COUNTERS = 0x964,
+ MLX5_CMD_OP_CREATE_GENERAL_OBJECT = 0xa00,
+ MLX5_CMD_OP_MODIFY_GENERAL_OBJECT = 0xa01,
+ MLX5_CMD_OP_QUERY_GENERAL_OBJECT = 0xa02,
+ MLX5_CMD_OP_DESTROY_GENERAL_OBJECT = 0xa03,
+ MLX5_CMD_OP_CREATE_UCTX = 0xa04,
+ MLX5_CMD_OP_DESTROY_UCTX = 0xa06,
+ MLX5_CMD_OP_CREATE_UMEM = 0xa08,
+ MLX5_CMD_OP_DESTROY_UMEM = 0xa0a,
+ MLX5_CMD_OP_SYNC_STEERING = 0xb00,
+ MLX5_CMD_OP_PSP_GEN_SPI = 0xb10,
+ MLX5_CMD_OP_PSP_ROTATE_KEY = 0xb11,
+ MLX5_CMD_OP_QUERY_VHCA_STATE = 0xb0d,
+ MLX5_CMD_OP_MODIFY_VHCA_STATE = 0xb0e,
+ MLX5_CMD_OP_SYNC_CRYPTO = 0xb12,
+ MLX5_CMD_OP_ALLOW_OTHER_VHCA_ACCESS = 0xb16,
+ MLX5_CMD_OP_GENERATE_WQE = 0xb17,
+ MLX5_CMD_OPCODE_QUERY_VUID = 0xb22,
MLX5_CMD_OP_MAX
};
+/* Valid range for general commands that don't work over an object */
+enum {
+ MLX5_CMD_OP_GENERAL_START = 0xb00,
+ MLX5_CMD_OP_GENERAL_END = 0xd00,
+};
+
+enum {
+ MLX5_FT_NIC_RX_2_NIC_RX_RDMA = BIT(0),
+ MLX5_FT_NIC_TX_RDMA_2_NIC_TX = BIT(1),
+};
+
+enum {
+ MLX5_CMD_OP_MOD_UPDATE_HEADER_MODIFY_ARGUMENT = 0x1,
+};
+
struct mlx5_ifc_flow_table_fields_supported_bits {
u8 outer_dmac[0x1];
u8 outer_smac[0x1];
@@ -268,7 +367,11 @@ struct mlx5_ifc_flow_table_fields_supported_bits {
u8 outer_gre_protocol[0x1];
u8 outer_gre_key[0x1];
u8 outer_vxlan_vni[0x1];
- u8 reserved_at_1a[0x5];
+ u8 outer_geneve_vni[0x1];
+ u8 outer_geneve_oam[0x1];
+ u8 outer_geneve_protocol_type[0x1];
+ u8 outer_geneve_opt_len[0x1];
+ u8 source_vhca_port[0x1];
u8 source_eswitch_port[0x1];
u8 inner_dmac[0x1];
@@ -295,10 +398,48 @@ struct mlx5_ifc_flow_table_fields_supported_bits {
u8 inner_tcp_dport[0x1];
u8 inner_tcp_flags[0x1];
u8 reserved_at_37[0x9];
- u8 reserved_at_40[0x1a];
+
+ u8 geneve_tlv_option_0_data[0x1];
+ u8 geneve_tlv_option_0_exist[0x1];
+ u8 reserved_at_42[0x3];
+ u8 outer_first_mpls_over_udp[0x4];
+ u8 outer_first_mpls_over_gre[0x4];
+ u8 inner_first_mpls[0x4];
+ u8 outer_first_mpls[0x4];
+ u8 reserved_at_55[0x2];
+ u8 outer_esp_spi[0x1];
+ u8 reserved_at_58[0x2];
u8 bth_dst_qp[0x1];
+ u8 reserved_at_5b[0x5];
- u8 reserved_at_5b[0x25];
+ u8 reserved_at_60[0x18];
+ u8 metadata_reg_c_7[0x1];
+ u8 metadata_reg_c_6[0x1];
+ u8 metadata_reg_c_5[0x1];
+ u8 metadata_reg_c_4[0x1];
+ u8 metadata_reg_c_3[0x1];
+ u8 metadata_reg_c_2[0x1];
+ u8 metadata_reg_c_1[0x1];
+ u8 metadata_reg_c_0[0x1];
+};
+
+/* Table 2170 - Flow Table Fields Supported 2 Format */
+struct mlx5_ifc_flow_table_fields_supported_2_bits {
+ u8 inner_l4_type_ext[0x1];
+ u8 outer_l4_type_ext[0x1];
+ u8 inner_l4_type[0x1];
+ u8 outer_l4_type[0x1];
+ u8 reserved_at_4[0xa];
+ u8 bth_opcode[0x1];
+ u8 reserved_at_f[0x1];
+ u8 tunnel_header_0_1[0x1];
+ u8 reserved_at_11[0xf];
+
+ u8 reserved_at_20[0xf];
+ u8 ipsec_next_header[0x1];
+ u8 reserved_at_30[0x10];
+
+ u8 reserved_at_40[0x40];
};
struct mlx5_ifc_flow_table_prop_layout_bits {
@@ -309,22 +450,68 @@ struct mlx5_ifc_flow_table_prop_layout_bits {
u8 modify_root[0x1];
u8 identified_miss_table_mode[0x1];
u8 flow_table_modify[0x1];
- u8 encap[0x1];
+ u8 reformat[0x1];
u8 decap[0x1];
- u8 reserved_at_9[0x17];
+ u8 reset_root_to_default[0x1];
+ u8 pop_vlan[0x1];
+ u8 push_vlan[0x1];
+ u8 reserved_at_c[0x1];
+ u8 pop_vlan_2[0x1];
+ u8 push_vlan_2[0x1];
+ u8 reformat_and_vlan_action[0x1];
+ u8 reserved_at_10[0x1];
+ u8 sw_owner[0x1];
+ u8 reformat_l3_tunnel_to_l2[0x1];
+ u8 reformat_l2_to_l3_tunnel[0x1];
+ u8 reformat_and_modify_action[0x1];
+ u8 ignore_flow_level[0x1];
+ u8 reserved_at_16[0x1];
+ u8 table_miss_action_domain[0x1];
+ u8 termination_table[0x1];
+ u8 reformat_and_fwd_to_table[0x1];
+ u8 reserved_at_1a[0x2];
+ u8 ipsec_encrypt[0x1];
+ u8 ipsec_decrypt[0x1];
+ u8 sw_owner_v2[0x1];
+ u8 reserved_at_1f[0x1];
- u8 reserved_at_20[0x2];
+ u8 termination_table_raw_traffic[0x1];
+ u8 reserved_at_21[0x1];
u8 log_max_ft_size[0x6];
u8 log_max_modify_header_context[0x8];
u8 max_modify_header_actions[0x8];
u8 max_ft_level[0x8];
- u8 reserved_at_40[0x20];
+ u8 reformat_add_esp_trasport[0x1];
+ u8 reformat_l2_to_l3_esp_tunnel[0x1];
+ u8 reformat_add_esp_transport_over_udp[0x1];
+ u8 reformat_del_esp_trasport[0x1];
+ u8 reformat_l3_esp_tunnel_to_l2[0x1];
+ u8 reformat_del_esp_transport_over_udp[0x1];
+ u8 execute_aso[0x1];
+ u8 reserved_at_47[0x19];
- u8 reserved_at_60[0x18];
+ u8 reformat_l2_to_l3_psp_tunnel[0x1];
+ u8 reformat_l3_psp_tunnel_to_l2[0x1];
+ u8 reformat_insert[0x1];
+ u8 reformat_remove[0x1];
+ u8 macsec_encrypt[0x1];
+ u8 macsec_decrypt[0x1];
+ u8 psp_encrypt[0x1];
+ u8 psp_decrypt[0x1];
+ u8 reformat_add_macsec[0x1];
+ u8 reformat_remove_macsec[0x1];
+ u8 reparse[0x1];
+ u8 reserved_at_6b[0x1];
+ u8 cross_vhca_object[0x1];
+ u8 reformat_l2_to_l3_audp_tunnel[0x1];
+ u8 reformat_l3_audp_tunnel_to_l2[0x1];
+ u8 ignore_flow_level_rtc_valid[0x1];
+ u8 reserved_at_70[0x8];
u8 log_max_ft_num[0x8];
- u8 reserved_at_80[0x18];
+ u8 reserved_at_80[0x10];
+ u8 log_max_flow_counter[0x8];
u8 log_max_destination[0x8];
u8 reserved_at_a0[0x18];
@@ -357,12 +544,33 @@ struct mlx5_ifc_ipv6_layout_bits {
u8 ipv6[16][0x8];
};
+struct mlx5_ifc_ipv6_simple_layout_bits {
+ u8 ipv6_127_96[0x20];
+ u8 ipv6_95_64[0x20];
+ u8 ipv6_63_32[0x20];
+ u8 ipv6_31_0[0x20];
+};
+
union mlx5_ifc_ipv6_layout_ipv4_layout_auto_bits {
+ struct mlx5_ifc_ipv6_simple_layout_bits ipv6_simple_layout;
struct mlx5_ifc_ipv6_layout_bits ipv6_layout;
struct mlx5_ifc_ipv4_layout_bits ipv4_layout;
u8 reserved_at_0[0x80];
};
+enum {
+ MLX5_PACKET_L4_TYPE_NONE,
+ MLX5_PACKET_L4_TYPE_TCP,
+ MLX5_PACKET_L4_TYPE_UDP,
+};
+
+enum {
+ MLX5_PACKET_L4_TYPE_EXT_NONE,
+ MLX5_PACKET_L4_TYPE_EXT_TCP,
+ MLX5_PACKET_L4_TYPE_EXT_UDP,
+ MLX5_PACKET_L4_TYPE_EXT_ICMP,
+};
+
struct mlx5_ifc_fte_match_set_lyr_2_4_bits {
u8 smac_47_16[0x20];
@@ -388,7 +596,11 @@ struct mlx5_ifc_fte_match_set_lyr_2_4_bits {
u8 tcp_sport[0x10];
u8 tcp_dport[0x10];
- u8 reserved_at_c0[0x18];
+ u8 l4_type[0x2];
+ u8 l4_type_ext[0x4];
+ u8 reserved_at_c6[0xa];
+ u8 ipv4_ihl[0x4];
+ u8 reserved_at_d4[0x4];
u8 ttl_hoplimit[0x8];
u8 udp_sport[0x10];
@@ -399,11 +611,25 @@ struct mlx5_ifc_fte_match_set_lyr_2_4_bits {
union mlx5_ifc_ipv6_layout_ipv4_layout_auto_bits dst_ipv4_dst_ipv6;
};
+struct mlx5_ifc_nvgre_key_bits {
+ u8 hi[0x18];
+ u8 lo[0x8];
+};
+
+union mlx5_ifc_gre_key_bits {
+ struct mlx5_ifc_nvgre_key_bits nvgre;
+ u8 key[0x20];
+};
+
struct mlx5_ifc_fte_match_set_misc_bits {
- u8 reserved_at_0[0x8];
+ u8 gre_c_present[0x1];
+ u8 reserved_at_1[0x1];
+ u8 gre_k_present[0x1];
+ u8 gre_s_present[0x1];
+ u8 source_vhca_port[0x4];
u8 source_sqn[0x18];
- u8 reserved_at_20[0x10];
+ u8 source_eswitch_owner_vhca_id[0x10];
u8 source_port[0x10];
u8 outer_second_prio[0x3];
@@ -420,13 +646,15 @@ struct mlx5_ifc_fte_match_set_misc_bits {
u8 reserved_at_64[0xc];
u8 gre_protocol[0x10];
- u8 gre_key_h[0x18];
- u8 gre_key_l[0x8];
+ union mlx5_ifc_gre_key_bits gre_key;
u8 vxlan_vni[0x18];
- u8 reserved_at_b8[0x8];
+ u8 bth_opcode[0x8];
- u8 reserved_at_c0[0x20];
+ u8 geneve_vni[0x18];
+ u8 reserved_at_d8[0x6];
+ u8 geneve_tlv_option_0_exist[0x1];
+ u8 geneve_oam[0x1];
u8 reserved_at_e0[0xc];
u8 outer_ipv6_flow_label[0x14];
@@ -434,9 +662,139 @@ struct mlx5_ifc_fte_match_set_misc_bits {
u8 reserved_at_100[0xc];
u8 inner_ipv6_flow_label[0x14];
- u8 reserved_at_120[0x28];
+ u8 reserved_at_120[0xa];
+ u8 geneve_opt_len[0x6];
+ u8 geneve_protocol_type[0x10];
+
+ u8 reserved_at_140[0x8];
u8 bth_dst_qp[0x18];
- u8 reserved_at_160[0xa0];
+ u8 inner_esp_spi[0x20];
+ u8 outer_esp_spi[0x20];
+ u8 reserved_at_1a0[0x60];
+};
+
+struct mlx5_ifc_fte_match_mpls_bits {
+ u8 mpls_label[0x14];
+ u8 mpls_exp[0x3];
+ u8 mpls_s_bos[0x1];
+ u8 mpls_ttl[0x8];
+};
+
+struct mlx5_ifc_fte_match_set_misc2_bits {
+ struct mlx5_ifc_fte_match_mpls_bits outer_first_mpls;
+
+ struct mlx5_ifc_fte_match_mpls_bits inner_first_mpls;
+
+ struct mlx5_ifc_fte_match_mpls_bits outer_first_mpls_over_gre;
+
+ struct mlx5_ifc_fte_match_mpls_bits outer_first_mpls_over_udp;
+
+ u8 metadata_reg_c_7[0x20];
+
+ u8 metadata_reg_c_6[0x20];
+
+ u8 metadata_reg_c_5[0x20];
+
+ u8 metadata_reg_c_4[0x20];
+
+ u8 metadata_reg_c_3[0x20];
+
+ u8 metadata_reg_c_2[0x20];
+
+ u8 metadata_reg_c_1[0x20];
+
+ u8 metadata_reg_c_0[0x20];
+
+ u8 metadata_reg_a[0x20];
+
+ u8 psp_syndrome[0x8];
+ u8 macsec_syndrome[0x8];
+ u8 ipsec_syndrome[0x8];
+ u8 ipsec_next_header[0x8];
+
+ u8 reserved_at_1c0[0x40];
+};
+
+struct mlx5_ifc_fte_match_set_misc3_bits {
+ u8 inner_tcp_seq_num[0x20];
+
+ u8 outer_tcp_seq_num[0x20];
+
+ u8 inner_tcp_ack_num[0x20];
+
+ u8 outer_tcp_ack_num[0x20];
+
+ u8 reserved_at_80[0x8];
+ u8 outer_vxlan_gpe_vni[0x18];
+
+ u8 outer_vxlan_gpe_next_protocol[0x8];
+ u8 outer_vxlan_gpe_flags[0x8];
+ u8 reserved_at_b0[0x10];
+
+ u8 icmp_header_data[0x20];
+
+ u8 icmpv6_header_data[0x20];
+
+ u8 icmp_type[0x8];
+ u8 icmp_code[0x8];
+ u8 icmpv6_type[0x8];
+ u8 icmpv6_code[0x8];
+
+ u8 geneve_tlv_option_0_data[0x20];
+
+ u8 gtpu_teid[0x20];
+
+ u8 gtpu_msg_type[0x8];
+ u8 gtpu_msg_flags[0x8];
+ u8 reserved_at_170[0x10];
+
+ u8 gtpu_dw_2[0x20];
+
+ u8 gtpu_first_ext_dw_0[0x20];
+
+ u8 gtpu_dw_0[0x20];
+
+ u8 reserved_at_1e0[0x20];
+};
+
+struct mlx5_ifc_fte_match_set_misc4_bits {
+ u8 prog_sample_field_value_0[0x20];
+
+ u8 prog_sample_field_id_0[0x20];
+
+ u8 prog_sample_field_value_1[0x20];
+
+ u8 prog_sample_field_id_1[0x20];
+
+ u8 prog_sample_field_value_2[0x20];
+
+ u8 prog_sample_field_id_2[0x20];
+
+ u8 prog_sample_field_value_3[0x20];
+
+ u8 prog_sample_field_id_3[0x20];
+
+ u8 reserved_at_100[0x100];
+};
+
+struct mlx5_ifc_fte_match_set_misc5_bits {
+ u8 macsec_tag_0[0x20];
+
+ u8 macsec_tag_1[0x20];
+
+ u8 macsec_tag_2[0x20];
+
+ u8 macsec_tag_3[0x20];
+
+ u8 tunnel_header_0[0x20];
+
+ u8 tunnel_header_1[0x20];
+
+ u8 tunnel_header_2[0x20];
+
+ u8 tunnel_header_3[0x20];
+
+ u8 reserved_at_100[0x100];
};
struct mlx5_ifc_cmd_pas_bits {
@@ -471,7 +829,7 @@ struct mlx5_ifc_ads_bits {
u8 reserved_at_2[0xe];
u8 pkey_index[0x10];
- u8 reserved_at_20[0x8];
+ u8 plane_index[0x8];
u8 grh[0x1];
u8 mlid[0x7];
u8 rlid[0x10];
@@ -501,7 +859,7 @@ struct mlx5_ifc_ads_bits {
u8 dei_cfi[0x1];
u8 eth_prio[0x3];
u8 sl[0x4];
- u8 port[0x8];
+ u8 vhca_port_num[0x8];
u8 rmac_47_32[0x10];
u8 rmac_31_0[0x20];
@@ -511,25 +869,99 @@ struct mlx5_ifc_flow_table_nic_cap_bits {
u8 nic_rx_multi_path_tirs[0x1];
u8 nic_rx_multi_path_tirs_fts[0x1];
u8 allow_sniffer_and_nic_rx_shared_tir[0x1];
- u8 reserved_at_3[0x1fd];
+ u8 reserved_at_3[0x4];
+ u8 sw_owner_reformat_supported[0x1];
+ u8 reserved_at_8[0x18];
+
+ u8 encap_general_header[0x1];
+ u8 reserved_at_21[0xa];
+ u8 log_max_packet_reformat_context[0x5];
+ u8 reserved_at_30[0x6];
+ u8 max_encap_header_size[0xa];
+ u8 reserved_at_40[0x1c0];
struct mlx5_ifc_flow_table_prop_layout_bits flow_table_properties_nic_receive;
- u8 reserved_at_400[0x200];
+ struct mlx5_ifc_flow_table_prop_layout_bits flow_table_properties_nic_receive_rdma;
struct mlx5_ifc_flow_table_prop_layout_bits flow_table_properties_nic_receive_sniffer;
struct mlx5_ifc_flow_table_prop_layout_bits flow_table_properties_nic_transmit;
- u8 reserved_at_a00[0x200];
+ struct mlx5_ifc_flow_table_prop_layout_bits flow_table_properties_nic_transmit_rdma;
struct mlx5_ifc_flow_table_prop_layout_bits flow_table_properties_nic_transmit_sniffer;
- u8 reserved_at_e00[0x7200];
+ u8 reserved_at_e00[0x600];
+
+ struct mlx5_ifc_flow_table_fields_supported_2_bits ft_field_support_2_nic_receive;
+
+ u8 reserved_at_1480[0x80];
+
+ struct mlx5_ifc_flow_table_fields_supported_2_bits ft_field_support_2_nic_receive_rdma;
+
+ u8 reserved_at_1580[0x280];
+
+ struct mlx5_ifc_flow_table_fields_supported_2_bits ft_field_support_2_nic_transmit_rdma;
+
+ u8 reserved_at_1880[0x780];
+
+ u8 sw_steering_nic_rx_action_drop_icm_address[0x40];
+
+ u8 sw_steering_nic_tx_action_drop_icm_address[0x40];
+
+ u8 sw_steering_nic_tx_action_allow_icm_address[0x40];
+
+ u8 reserved_at_20c0[0x5f40];
+};
+
+struct mlx5_ifc_port_selection_cap_bits {
+ u8 reserved_at_0[0x10];
+ u8 port_select_flow_table[0x1];
+ u8 reserved_at_11[0x1];
+ u8 port_select_flow_table_bypass[0x1];
+ u8 reserved_at_13[0xd];
+
+ u8 reserved_at_20[0x1e0];
+
+ struct mlx5_ifc_flow_table_prop_layout_bits flow_table_properties_port_selection;
+
+ struct mlx5_ifc_flow_table_fields_supported_2_bits ft_field_support_2_port_selection;
+
+ u8 reserved_at_480[0x7b80];
+};
+
+enum {
+ MLX5_FDB_TO_VPORT_REG_C_0 = 0x01,
+ MLX5_FDB_TO_VPORT_REG_C_1 = 0x02,
+ MLX5_FDB_TO_VPORT_REG_C_2 = 0x04,
+ MLX5_FDB_TO_VPORT_REG_C_3 = 0x08,
+ MLX5_FDB_TO_VPORT_REG_C_4 = 0x10,
+ MLX5_FDB_TO_VPORT_REG_C_5 = 0x20,
+ MLX5_FDB_TO_VPORT_REG_C_6 = 0x40,
+ MLX5_FDB_TO_VPORT_REG_C_7 = 0x80,
};
struct mlx5_ifc_flow_table_eswitch_cap_bits {
- u8 reserved_at_0[0x200];
+ u8 fdb_to_vport_reg_c_id[0x8];
+ u8 reserved_at_8[0x5];
+ u8 fdb_uplink_hairpin[0x1];
+ u8 fdb_multi_path_any_table_limit_regc[0x1];
+ u8 reserved_at_f[0x1];
+ u8 fdb_dynamic_tunnel[0x1];
+ u8 reserved_at_11[0x1];
+ u8 fdb_multi_path_any_table[0x1];
+ u8 reserved_at_13[0x2];
+ u8 fdb_modify_header_fwd_to_table[0x1];
+ u8 fdb_ipv4_ttl_modify[0x1];
+ u8 flow_source[0x1];
+ u8 reserved_at_18[0x2];
+ u8 multi_fdb_encap[0x1];
+ u8 egress_acl_forward_to_vport[0x1];
+ u8 fdb_multi_path_to_table[0x1];
+ u8 reserved_at_1d[0x3];
+
+ u8 reserved_at_20[0x1e0];
struct mlx5_ifc_flow_table_prop_layout_bits flow_table_properties_nic_esw_fdb;
@@ -537,7 +969,95 @@ struct mlx5_ifc_flow_table_eswitch_cap_bits {
struct mlx5_ifc_flow_table_prop_layout_bits flow_table_properties_esw_acl_egress;
- u8 reserved_at_800[0x7800];
+ u8 reserved_at_800[0xC00];
+
+ struct mlx5_ifc_flow_table_fields_supported_2_bits ft_field_support_2_esw_fdb;
+
+ struct mlx5_ifc_flow_table_fields_supported_2_bits ft_field_bitmask_support_2_esw_fdb;
+
+ u8 reserved_at_1500[0x300];
+
+ u8 sw_steering_fdb_action_drop_icm_address_rx[0x40];
+
+ u8 sw_steering_fdb_action_drop_icm_address_tx[0x40];
+
+ u8 sw_steering_uplink_icm_address_rx[0x40];
+
+ u8 sw_steering_uplink_icm_address_tx[0x40];
+
+ u8 reserved_at_1900[0x6700];
+};
+
+struct mlx5_ifc_wqe_based_flow_table_cap_bits {
+ u8 reserved_at_0[0x3];
+ u8 log_max_num_ste[0x5];
+ u8 reserved_at_8[0x3];
+ u8 log_max_num_stc[0x5];
+ u8 reserved_at_10[0x3];
+ u8 log_max_num_rtc[0x5];
+ u8 reserved_at_18[0x3];
+ u8 log_max_num_header_modify_pattern[0x5];
+
+ u8 rtc_hash_split_table[0x1];
+ u8 rtc_linear_lookup_table[0x1];
+ u8 reserved_at_22[0x1];
+ u8 stc_alloc_log_granularity[0x5];
+ u8 reserved_at_28[0x3];
+ u8 stc_alloc_log_max[0x5];
+ u8 reserved_at_30[0x3];
+ u8 ste_alloc_log_granularity[0x5];
+ u8 reserved_at_38[0x3];
+ u8 ste_alloc_log_max[0x5];
+
+ u8 reserved_at_40[0xb];
+ u8 rtc_reparse_mode[0x5];
+ u8 reserved_at_50[0x3];
+ u8 rtc_index_mode[0x5];
+ u8 reserved_at_58[0x3];
+ u8 rtc_log_depth_max[0x5];
+
+ u8 reserved_at_60[0x10];
+ u8 ste_format[0x10];
+
+ u8 stc_action_type[0x80];
+
+ u8 header_insert_type[0x10];
+ u8 header_remove_type[0x10];
+
+ u8 trivial_match_definer[0x20];
+
+ u8 reserved_at_140[0x1b];
+ u8 rtc_max_num_hash_definer_gen_wqe[0x5];
+
+ u8 reserved_at_160[0x18];
+ u8 access_index_mode[0x8];
+
+ u8 reserved_at_180[0x10];
+ u8 ste_format_gen_wqe[0x10];
+
+ u8 linear_match_definer_reg_c3[0x20];
+
+ u8 fdb_jump_to_tir_stc[0x1];
+ u8 reserved_at_1c1[0x1f];
+};
+
+struct mlx5_ifc_esw_cap_bits {
+ u8 reserved_at_0[0x1d];
+ u8 merged_eswitch[0x1];
+ u8 reserved_at_1e[0x2];
+
+ u8 reserved_at_20[0x40];
+
+ u8 esw_manager_vport_number_valid[0x1];
+ u8 reserved_at_61[0xf];
+ u8 esw_manager_vport_number[0x10];
+
+ u8 reserved_at_80[0x780];
+};
+
+enum {
+ MLX5_COUNTER_SOURCE_ESWITCH = 0x0,
+ MLX5_COUNTER_FLOW_ESWITCH = 0x1,
};
struct mlx5_ifc_e_switch_cap_bits {
@@ -546,18 +1066,34 @@ struct mlx5_ifc_e_switch_cap_bits {
u8 vport_svlan_insert[0x1];
u8 vport_cvlan_insert_if_not_exist[0x1];
u8 vport_cvlan_insert_overwrite[0x1];
- u8 reserved_at_5[0x19];
+ u8 reserved_at_5[0x1];
+ u8 vport_cvlan_insert_always[0x1];
+ u8 esw_shared_ingress_acl[0x1];
+ u8 esw_uplink_ingress_acl[0x1];
+ u8 root_ft_on_other_esw[0x1];
+ u8 reserved_at_a[0xf];
+ u8 esw_functions_changed[0x1];
+ u8 reserved_at_1a[0x1];
+ u8 ecpf_vport_exists[0x1];
+ u8 counter_eswitch_affinity[0x1];
+ u8 merged_eswitch[0x1];
u8 nic_vport_node_guid_modify[0x1];
u8 nic_vport_port_guid_modify[0x1];
u8 vxlan_encap_decap[0x1];
u8 nvgre_encap_decap[0x1];
- u8 reserved_at_22[0x9];
- u8 log_max_encap_headers[0x5];
+ u8 reserved_at_22[0x1];
+ u8 log_max_fdb_encap_uplink[0x5];
+ u8 reserved_at_21[0x3];
+ u8 log_max_packet_reformat_context[0x5];
u8 reserved_2b[0x6];
u8 max_encap_header_size[0xa];
- u8 reserved_40[0x7c0];
+ u8 reserved_at_40[0xb];
+ u8 log_max_esw_sf[0x5];
+ u8 esw_sf_base_id[0x10];
+
+ u8 reserved_at_60[0x7a0];
};
@@ -566,15 +1102,29 @@ struct mlx5_ifc_qos_cap_bits {
u8 esw_scheduling[0x1];
u8 esw_bw_share[0x1];
u8 esw_rate_limit[0x1];
- u8 reserved_at_4[0x1c];
-
- u8 reserved_at_20[0x20];
+ u8 reserved_at_4[0x1];
+ u8 packet_pacing_burst_bound[0x1];
+ u8 packet_pacing_typical_size[0x1];
+ u8 reserved_at_7[0x1];
+ u8 nic_sq_scheduling[0x1];
+ u8 nic_bw_share[0x1];
+ u8 nic_rate_limit[0x1];
+ u8 packet_pacing_uid[0x1];
+ u8 log_esw_max_sched_depth[0x4];
+ u8 reserved_at_10[0x10];
+
+ u8 reserved_at_20[0x9];
+ u8 esw_cross_esw_sched[0x1];
+ u8 reserved_at_2a[0x1];
+ u8 log_max_qos_nic_queue_group[0x5];
+ u8 reserved_at_30[0x10];
u8 packet_pacing_max_rate[0x20];
u8 packet_pacing_min_rate[0x20];
- u8 reserved_at_80[0x10];
+ u8 reserved_at_80[0xb];
+ u8 log_esw_max_rate_limit[0x5];
u8 packet_pacing_rate_table_size[0x10];
u8 esw_element_type[0x10];
@@ -585,7 +1135,32 @@ struct mlx5_ifc_qos_cap_bits {
u8 max_tsar_bw_share[0x20];
- u8 reserved_at_100[0x700];
+ u8 nic_element_type[0x10];
+ u8 nic_tsar_type[0x10];
+
+ u8 reserved_at_120[0x3];
+ u8 log_meter_aso_granularity[0x5];
+ u8 reserved_at_128[0x3];
+ u8 log_meter_aso_max_alloc[0x5];
+ u8 reserved_at_130[0x3];
+ u8 log_max_num_meter_aso[0x5];
+ u8 reserved_at_138[0x8];
+
+ u8 reserved_at_140[0x6c0];
+};
+
+struct mlx5_ifc_debug_cap_bits {
+ u8 core_dump_general[0x1];
+ u8 core_dump_qp[0x1];
+ u8 reserved_at_2[0x7];
+ u8 resource_dump[0x1];
+ u8 reserved_at_a[0x16];
+
+ u8 reserved_at_20[0x2];
+ u8 stall_detect[0x1];
+ u8 reserved_at_23[0x1d];
+
+ u8 reserved_at_40[0x7c0];
};
struct mlx5_ifc_per_protocol_networking_offload_caps_bits {
@@ -606,14 +1181,31 @@ struct mlx5_ifc_per_protocol_networking_offload_caps_bits {
u8 scatter_fcs[0x1];
u8 enhanced_multi_pkt_send_wqe[0x1];
u8 tunnel_lso_const_out_ip_id[0x1];
- u8 reserved_at_1c[0x2];
- u8 tunnel_statless_gre[0x1];
+ u8 tunnel_lro_gre[0x1];
+ u8 tunnel_lro_vxlan[0x1];
+ u8 tunnel_stateless_gre[0x1];
u8 tunnel_stateless_vxlan[0x1];
u8 swp[0x1];
u8 swp_csum[0x1];
u8 swp_lso[0x1];
- u8 reserved_at_23[0x1d];
+ u8 cqe_checksum_full[0x1];
+ u8 tunnel_stateless_geneve_tx[0x1];
+ u8 tunnel_stateless_mpls_over_udp[0x1];
+ u8 tunnel_stateless_mpls_over_gre[0x1];
+ u8 tunnel_stateless_vxlan_gpe[0x1];
+ u8 tunnel_stateless_ipv4_over_vxlan[0x1];
+ u8 tunnel_stateless_ip_over_ip[0x1];
+ u8 insert_trailer[0x1];
+ u8 reserved_at_2b[0x1];
+ u8 tunnel_stateless_ip_over_ip_rx[0x1];
+ u8 tunnel_stateless_ip_over_ip_tx[0x1];
+ u8 reserved_at_2e[0x2];
+ u8 max_vxlan_udp_ports[0x8];
+ u8 swp_csum_l4_partial[0x1];
+ u8 reserved_at_39[0x5];
+ u8 max_geneve_opt_len[0x1];
+ u8 tunnel_stateless_geneve_rx[0x1];
u8 reserved_at_40[0x10];
u8 lro_min_mss_size[0x10];
@@ -625,9 +1217,22 @@ struct mlx5_ifc_per_protocol_networking_offload_caps_bits {
u8 reserved_at_200[0x600];
};
+enum {
+ MLX5_TIMESTAMP_FORMAT_CAP_FREE_RUNNING = 0x0,
+ MLX5_TIMESTAMP_FORMAT_CAP_REAL_TIME = 0x1,
+ MLX5_TIMESTAMP_FORMAT_CAP_FREE_RUNNING_AND_REAL_TIME = 0x2,
+};
+
struct mlx5_ifc_roce_cap_bits {
u8 roce_apm[0x1];
- u8 reserved_at_1[0x1f];
+ u8 reserved_at_1[0x3];
+ u8 sw_r_roce_src_udp_port[0x1];
+ u8 fl_rc_qp_when_roce_disabled[0x1];
+ u8 fl_rc_qp_when_roce_enabled[0x1];
+ u8 roce_cc_general[0x1];
+ u8 qp_ooo_transmit_default[0x1];
+ u8 reserved_at_9[0x15];
+ u8 qp_ts_format[0x2];
u8 reserved_at_20[0x60];
@@ -648,6 +1253,140 @@ struct mlx5_ifc_roce_cap_bits {
u8 reserved_at_100[0x700];
};
+struct mlx5_ifc_sync_steering_in_bits {
+ u8 opcode[0x10];
+ u8 uid[0x10];
+
+ u8 reserved_at_20[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_at_40[0xc0];
+};
+
+struct mlx5_ifc_sync_steering_out_bits {
+ u8 status[0x8];
+ u8 reserved_at_8[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_at_40[0x40];
+};
+
+struct mlx5_ifc_sync_crypto_in_bits {
+ u8 opcode[0x10];
+ u8 uid[0x10];
+
+ u8 reserved_at_20[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_at_40[0x20];
+
+ u8 reserved_at_60[0x10];
+ u8 crypto_type[0x10];
+
+ u8 reserved_at_80[0x80];
+};
+
+struct mlx5_ifc_sync_crypto_out_bits {
+ u8 status[0x8];
+ u8 reserved_at_8[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_at_40[0x40];
+};
+
+struct mlx5_ifc_device_mem_cap_bits {
+ u8 memic[0x1];
+ u8 reserved_at_1[0x1f];
+
+ u8 reserved_at_20[0xb];
+ u8 log_min_memic_alloc_size[0x5];
+ u8 reserved_at_30[0x8];
+ u8 log_max_memic_addr_alignment[0x8];
+
+ u8 memic_bar_start_addr[0x40];
+
+ u8 memic_bar_size[0x20];
+
+ u8 max_memic_size[0x20];
+
+ u8 steering_sw_icm_start_address[0x40];
+
+ u8 reserved_at_100[0x8];
+ u8 log_header_modify_sw_icm_size[0x8];
+ u8 reserved_at_110[0x2];
+ u8 log_sw_icm_alloc_granularity[0x6];
+ u8 log_steering_sw_icm_size[0x8];
+
+ u8 log_indirect_encap_sw_icm_size[0x8];
+ u8 reserved_at_128[0x10];
+ u8 log_header_modify_pattern_sw_icm_size[0x8];
+
+ u8 header_modify_sw_icm_start_address[0x40];
+
+ u8 reserved_at_180[0x40];
+
+ u8 header_modify_pattern_sw_icm_start_address[0x40];
+
+ u8 memic_operations[0x20];
+
+ u8 reserved_at_220[0x20];
+
+ u8 indirect_encap_sw_icm_start_address[0x40];
+
+ u8 reserved_at_280[0x580];
+};
+
+struct mlx5_ifc_device_event_cap_bits {
+ u8 user_affiliated_events[4][0x40];
+
+ u8 user_unaffiliated_events[4][0x40];
+};
+
+struct mlx5_ifc_virtio_emulation_cap_bits {
+ u8 desc_tunnel_offload_type[0x1];
+ u8 eth_frame_offload_type[0x1];
+ u8 virtio_version_1_0[0x1];
+ u8 device_features_bits_mask[0xd];
+ u8 event_mode[0x8];
+ u8 virtio_queue_type[0x8];
+
+ u8 max_tunnel_desc[0x10];
+ u8 reserved_at_30[0x3];
+ u8 log_doorbell_stride[0x5];
+ u8 reserved_at_38[0x3];
+ u8 log_doorbell_bar_size[0x5];
+
+ u8 doorbell_bar_offset[0x40];
+
+ u8 max_emulated_devices[0x8];
+ u8 max_num_virtio_queues[0x18];
+
+ u8 reserved_at_a0[0x20];
+
+ u8 reserved_at_c0[0x13];
+ u8 desc_group_mkey_supported[0x1];
+ u8 freeze_to_rdy_supported[0x1];
+ u8 reserved_at_d5[0xb];
+
+ u8 reserved_at_e0[0x20];
+
+ u8 umem_1_buffer_param_a[0x20];
+
+ u8 umem_1_buffer_param_b[0x20];
+
+ u8 umem_2_buffer_param_a[0x20];
+
+ u8 umem_2_buffer_param_b[0x20];
+
+ u8 umem_3_buffer_param_a[0x20];
+
+ u8 umem_3_buffer_param_b[0x20];
+
+ u8 reserved_at_1c0[0x640];
+};
+
enum {
MLX5_ATOMIC_CAPS_ATOMIC_SIZE_QP_1_BYTE = 0x0,
MLX5_ATOMIC_CAPS_ATOMIC_SIZE_QP_2_BYTES = 0x2,
@@ -695,11 +1434,13 @@ struct mlx5_ifc_atomic_caps_bits {
u8 reserved_at_e0[0x720];
};
-struct mlx5_ifc_odp_cap_bits {
+struct mlx5_ifc_odp_scheme_cap_bits {
u8 reserved_at_0[0x40];
u8 sig[0x1];
- u8 reserved_at_41[0x1f];
+ u8 reserved_at_41[0x4];
+ u8 page_prefetch[0x1];
+ u8 reserved_at_46[0x1a];
u8 reserved_at_60[0x20];
@@ -709,40 +1450,91 @@ struct mlx5_ifc_odp_cap_bits {
struct mlx5_ifc_odp_per_transport_service_cap_bits ud_odp_caps;
- u8 reserved_at_e0[0x720];
+ struct mlx5_ifc_odp_per_transport_service_cap_bits xrc_odp_caps;
+
+ struct mlx5_ifc_odp_per_transport_service_cap_bits dc_odp_caps;
+
+ u8 reserved_at_120[0xe0];
+};
+
+struct mlx5_ifc_odp_cap_bits {
+ struct mlx5_ifc_odp_scheme_cap_bits transport_page_fault_scheme_cap;
+
+ struct mlx5_ifc_odp_scheme_cap_bits memory_page_fault_scheme_cap;
+
+ u8 reserved_at_400[0x200];
+
+ u8 mem_page_fault[0x1];
+ u8 reserved_at_601[0x1f];
+
+ u8 reserved_at_620[0x1e0];
};
-struct mlx5_ifc_calc_op {
- u8 reserved_at_0[0x10];
- u8 reserved_at_10[0x9];
- u8 op_swap_endianness[0x1];
- u8 op_min[0x1];
- u8 op_xor[0x1];
- u8 op_or[0x1];
- u8 op_and[0x1];
- u8 op_max[0x1];
- u8 op_add[0x1];
+struct mlx5_ifc_tls_cap_bits {
+ u8 tls_1_2_aes_gcm_128[0x1];
+ u8 tls_1_3_aes_gcm_128[0x1];
+ u8 tls_1_2_aes_gcm_256[0x1];
+ u8 tls_1_3_aes_gcm_256[0x1];
+ u8 reserved_at_4[0x1c];
+
+ u8 reserved_at_20[0x7e0];
};
-struct mlx5_ifc_vector_calc_cap_bits {
- u8 calc_matrix[0x1];
- u8 reserved_at_1[0x1f];
- u8 reserved_at_20[0x8];
- u8 max_vec_count[0x8];
- u8 reserved_at_30[0xd];
- u8 max_chunk_size[0x3];
- struct mlx5_ifc_calc_op calc0;
- struct mlx5_ifc_calc_op calc1;
- struct mlx5_ifc_calc_op calc2;
- struct mlx5_ifc_calc_op calc3;
+struct mlx5_ifc_ipsec_cap_bits {
+ u8 ipsec_full_offload[0x1];
+ u8 ipsec_crypto_offload[0x1];
+ u8 ipsec_esn[0x1];
+ u8 ipsec_crypto_esp_aes_gcm_256_encrypt[0x1];
+ u8 ipsec_crypto_esp_aes_gcm_128_encrypt[0x1];
+ u8 ipsec_crypto_esp_aes_gcm_256_decrypt[0x1];
+ u8 ipsec_crypto_esp_aes_gcm_128_decrypt[0x1];
+ u8 reserved_at_7[0x4];
+ u8 log_max_ipsec_offload[0x5];
+ u8 reserved_at_10[0x10];
- u8 reserved_at_e0[0x720];
+ u8 min_log_ipsec_full_replay_window[0x8];
+ u8 max_log_ipsec_full_replay_window[0x8];
+ u8 reserved_at_30[0x7d0];
+};
+
+struct mlx5_ifc_macsec_cap_bits {
+ u8 macsec_epn[0x1];
+ u8 reserved_at_1[0x2];
+ u8 macsec_crypto_esp_aes_gcm_256_encrypt[0x1];
+ u8 macsec_crypto_esp_aes_gcm_128_encrypt[0x1];
+ u8 macsec_crypto_esp_aes_gcm_256_decrypt[0x1];
+ u8 macsec_crypto_esp_aes_gcm_128_decrypt[0x1];
+ u8 reserved_at_7[0x4];
+ u8 log_max_macsec_offload[0x5];
+ u8 reserved_at_10[0x10];
+
+ u8 min_log_macsec_full_replay_window[0x8];
+ u8 max_log_macsec_full_replay_window[0x8];
+ u8 reserved_at_30[0x10];
+
+ u8 reserved_at_40[0x7c0];
+};
+
+struct mlx5_ifc_psp_cap_bits {
+ u8 reserved_at_0[0x1];
+ u8 psp_crypto_offload[0x1];
+ u8 reserved_at_2[0x1];
+ u8 psp_crypto_esp_aes_gcm_256_encrypt[0x1];
+ u8 psp_crypto_esp_aes_gcm_128_encrypt[0x1];
+ u8 psp_crypto_esp_aes_gcm_256_decrypt[0x1];
+ u8 psp_crypto_esp_aes_gcm_128_decrypt[0x1];
+ u8 reserved_at_7[0x4];
+ u8 log_max_num_of_psp_spi[0x5];
+ u8 reserved_at_10[0x10];
+
+ u8 reserved_at_20[0x7e0];
};
enum {
MLX5_WQ_TYPE_LINKED_LIST = 0x0,
MLX5_WQ_TYPE_CYCLIC = 0x1,
MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ = 0x2,
+ MLX5_WQ_TYPE_CYCLIC_STRIDING_RQ = 0x3,
};
enum {
@@ -789,27 +1581,120 @@ enum {
MLX5_CAP_UMR_FENCE_NONE = 0x2,
};
+enum {
+ MLX5_FLEX_IPV4_OVER_VXLAN_ENABLED = 1 << 0,
+ MLX5_FLEX_IPV6_OVER_VXLAN_ENABLED = 1 << 1,
+ MLX5_FLEX_IPV6_OVER_IP_ENABLED = 1 << 2,
+ MLX5_FLEX_PARSER_GENEVE_ENABLED = 1 << 3,
+ MLX5_FLEX_PARSER_MPLS_OVER_GRE_ENABLED = 1 << 4,
+ MLX5_FLEX_PARSER_MPLS_OVER_UDP_ENABLED = 1 << 5,
+ MLX5_FLEX_P_BIT_VXLAN_GPE_ENABLED = 1 << 6,
+ MLX5_FLEX_PARSER_VXLAN_GPE_ENABLED = 1 << 7,
+ MLX5_FLEX_PARSER_ICMP_V4_ENABLED = 1 << 8,
+ MLX5_FLEX_PARSER_ICMP_V6_ENABLED = 1 << 9,
+ MLX5_FLEX_PARSER_GENEVE_TLV_OPTION_0_ENABLED = 1 << 10,
+ MLX5_FLEX_PARSER_GTPU_ENABLED = 1 << 11,
+ MLX5_FLEX_PARSER_GTPU_DW_2_ENABLED = 1 << 16,
+ MLX5_FLEX_PARSER_GTPU_FIRST_EXT_DW_0_ENABLED = 1 << 17,
+ MLX5_FLEX_PARSER_GTPU_DW_0_ENABLED = 1 << 18,
+ MLX5_FLEX_PARSER_GTPU_TEID_ENABLED = 1 << 19,
+};
+
+enum {
+ MLX5_UCTX_CAP_RAW_TX = 1UL << 0,
+ MLX5_UCTX_CAP_INTERNAL_DEV_RES = 1UL << 1,
+ MLX5_UCTX_CAP_RDMA_CTRL = 1UL << 3,
+ MLX5_UCTX_CAP_RDMA_CTRL_OTHER_VHCA = 1UL << 4,
+};
+
+#define MLX5_FC_BULK_SIZE_FACTOR 128
+
+enum mlx5_fc_bulk_alloc_bitmask {
+ MLX5_FC_BULK_128 = (1 << 0),
+ MLX5_FC_BULK_256 = (1 << 1),
+ MLX5_FC_BULK_512 = (1 << 2),
+ MLX5_FC_BULK_1024 = (1 << 3),
+ MLX5_FC_BULK_2048 = (1 << 4),
+ MLX5_FC_BULK_4096 = (1 << 5),
+ MLX5_FC_BULK_8192 = (1 << 6),
+ MLX5_FC_BULK_16384 = (1 << 7),
+};
+
+#define MLX5_FC_BULK_NUM_FCS(fc_enum) (MLX5_FC_BULK_SIZE_FACTOR * (fc_enum))
+
+#define MLX5_FT_MAX_MULTIPATH_LEVEL 63
+
+enum {
+ MLX5_STEERING_FORMAT_CONNECTX_5 = 0,
+ MLX5_STEERING_FORMAT_CONNECTX_6DX = 1,
+ MLX5_STEERING_FORMAT_CONNECTX_7 = 2,
+ MLX5_STEERING_FORMAT_CONNECTX_8 = 3,
+};
+
struct mlx5_ifc_cmd_hca_cap_bits {
- u8 reserved_at_0[0x80];
+ u8 reserved_at_0[0x6];
+ u8 page_request_disable[0x1];
+ u8 abs_native_port_num[0x1];
+ u8 reserved_at_8[0x8];
+ u8 shared_object_to_user_object_allowed[0x1];
+ u8 reserved_at_13[0xe];
+ u8 vhca_resource_manager[0x1];
+
+ u8 hca_cap_2[0x1];
+ u8 create_lag_when_not_master_up[0x1];
+ u8 dtor[0x1];
+ u8 event_on_vhca_state_teardown_request[0x1];
+ u8 event_on_vhca_state_in_use[0x1];
+ u8 event_on_vhca_state_active[0x1];
+ u8 event_on_vhca_state_allocated[0x1];
+ u8 event_on_vhca_state_invalid[0x1];
+ u8 reserved_at_28[0x8];
+ u8 vhca_id[0x10];
+
+ u8 reserved_at_40[0x40];
u8 log_max_srq_sz[0x8];
u8 log_max_qp_sz[0x8];
- u8 reserved_at_90[0xb];
+ u8 event_cap[0x1];
+ u8 reserved_at_91[0x2];
+ u8 isolate_vl_tc_new[0x1];
+ u8 reserved_at_94[0x4];
+ u8 prio_tag_required[0x1];
+ u8 reserved_at_99[0x2];
u8 log_max_qp[0x5];
- u8 reserved_at_a0[0xb];
+ u8 reserved_at_a0[0x3];
+ u8 ece_support[0x1];
+ u8 reserved_at_a4[0x5];
+ u8 reg_c_preserve[0x1];
+ u8 reserved_at_aa[0x1];
u8 log_max_srq[0x5];
- u8 reserved_at_b0[0x10];
-
- u8 reserved_at_c0[0x8];
+ u8 reserved_at_b0[0x1];
+ u8 uplink_follow[0x1];
+ u8 ts_cqe_to_dest_cqn[0x1];
+ u8 reserved_at_b3[0x6];
+ u8 go_back_n[0x1];
+ u8 reserved_at_ba[0x6];
+
+ u8 max_sgl_for_optimized_performance[0x8];
u8 log_max_cq_sz[0x8];
- u8 reserved_at_d0[0xb];
+ u8 relaxed_ordering_write_umr[0x1];
+ u8 relaxed_ordering_read_umr[0x1];
+ u8 reserved_at_d2[0x7];
+ u8 virtio_net_device_emualtion_manager[0x1];
+ u8 virtio_blk_device_emualtion_manager[0x1];
u8 log_max_cq[0x5];
u8 log_max_eq_sz[0x8];
- u8 reserved_at_e8[0x2];
+ u8 relaxed_ordering_write[0x1];
+ u8 relaxed_ordering_read_pci_enabled[0x1];
u8 log_max_mkey[0x6];
- u8 reserved_at_f0[0xc];
+ u8 reserved_at_f0[0x6];
+ u8 terminate_scatter_list_mkey[0x1];
+ u8 repeated_mkey[0x1];
+ u8 dump_fill_mkey[0x1];
+ u8 reserved_at_f9[0x2];
+ u8 fast_teardown[0x1];
u8 log_max_eq[0x4];
u8 max_indirection[0x8];
@@ -822,12 +1707,21 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 null_mkey[0x1];
u8 log_max_klm_list_size[0x6];
- u8 reserved_at_120[0xa];
+ u8 reserved_at_120[0x2];
+ u8 qpc_extension[0x1];
+ u8 reserved_at_123[0x7];
u8 log_max_ra_req_dc[0x6];
- u8 reserved_at_130[0xa];
+ u8 reserved_at_130[0x2];
+ u8 eth_wqe_too_small[0x1];
+ u8 reserved_at_133[0x6];
+ u8 vnic_env_cq_overrun[0x1];
u8 log_max_ra_res_dc[0x6];
- u8 reserved_at_140[0xa];
+ u8 reserved_at_140[0x5];
+ u8 release_all_pages[0x1];
+ u8 must_not_use[0x1];
+ u8 reserved_at_147[0x2];
+ u8 roce_accl[0x1];
u8 log_max_ra_req_qp[0x6];
u8 reserved_at_150[0xa];
u8 log_max_ra_res_qp[0x6];
@@ -837,13 +1731,19 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 cc_modify_allowed[0x1];
u8 start_pad[0x1];
u8 cache_line_128byte[0x1];
- u8 reserved_at_165[0xb];
+ u8 reserved_at_165[0x4];
+ u8 rts2rts_qp_counters_set_id[0x1];
+ u8 reserved_at_16a[0x2];
+ u8 vnic_env_int_rq_oob[0x1];
+ u8 sbcam_reg[0x1];
+ u8 reserved_at_16e[0x1];
+ u8 qcam_reg[0x1];
u8 gid_table_size[0x10];
u8 out_of_seq_cnt[0x1];
u8 vport_counters[0x1];
u8 retransmission_q_counters[0x1];
- u8 reserved_at_183[0x1];
+ u8 debug[0x1];
u8 modify_rq_counter_set_id[0x1];
u8 rq_delay_drop[0x1];
u8 max_qp_cnt[0xa];
@@ -853,11 +1753,11 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 vhca_group_manager[0x1];
u8 ib_virt[0x1];
u8 eth_virt[0x1];
- u8 reserved_at_1a4[0x1];
+ u8 vnic_env_queue_counters[0x1];
u8 ets[0x1];
u8 nic_flow_table[0x1];
- u8 eswitch_flow_table[0x1];
- u8 early_vf_enable[0x1];
+ u8 eswitch_manager[0x1];
+ u8 device_memory[0x1];
u8 mcam_reg[0x1];
u8 pcam_reg[0x1];
u8 local_ca_ack_delay[0x5];
@@ -876,7 +1776,7 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 log_max_msg[0x5];
u8 reserved_at_1c8[0x4];
u8 max_tc[0x4];
- u8 reserved_at_1d0[0x1];
+ u8 temp_warn_event[0x1];
u8 dcbx[0x1];
u8 general_notification_event[0x1];
u8 reserved_at_1d3[0x2];
@@ -893,7 +1793,12 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 wol_p[0x1];
u8 stat_rate_support[0x10];
- u8 reserved_at_1f0[0xc];
+ u8 reserved_at_1f0[0x1];
+ u8 pci_sync_for_fw_update_event[0x1];
+ u8 reserved_at_1f2[0x6];
+ u8 init2_lag_tx_port_affinity[0x1];
+ u8 reserved_at_1fa[0x2];
+ u8 wqe_based_flow_table_update_cap[0x1];
u8 cqe_version[0x4];
u8 compact_address_vector[0x1];
@@ -901,9 +1806,14 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 reserved_at_202[0x1];
u8 ipoib_enhanced_offloads[0x1];
u8 ipoib_basic_offloads[0x1];
- u8 reserved_at_205[0x5];
+ u8 reserved_at_205[0x1];
+ u8 repeated_block_disabled[0x1];
+ u8 umr_modify_entity_size_disabled[0x1];
+ u8 umr_modify_atomic_disabled[0x1];
+ u8 umr_indirect_mkey_disabled[0x1];
u8 umr_fence[0x2];
- u8 reserved_at_20c[0x3];
+ u8 dc_req_scat_data_cqe[0x1];
+ u8 reserved_at_20d[0x2];
u8 drain_sigerr[0x1];
u8 cmdif_checksum[0x2];
u8 sigerr_cqe[0x1];
@@ -924,7 +1834,8 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 cq_oi[0x1];
u8 cq_resize[0x1];
u8 cq_moderation[0x1];
- u8 reserved_at_223[0x3];
+ u8 cq_period_mode_modify[0x1];
+ u8 reserved_at_224[0x2];
u8 cq_eq_remap[0x1];
u8 pg[0x1];
u8 block_lb_mc[0x1];
@@ -937,7 +1848,8 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 vector_calc[0x1];
u8 umr_ptr_rlky[0x1];
u8 imaicl[0x1];
- u8 reserved_at_232[0x4];
+ u8 qp_packet_based[0x1];
+ u8 reserved_at_233[0x3];
u8 qkv[0x1];
u8 pkv[0x1];
u8 set_deth_sqpn[0x1];
@@ -948,46 +1860,87 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 rc[0x1];
u8 uar_4k[0x1];
- u8 reserved_at_241[0x9];
+ u8 reserved_at_241[0x7];
+ u8 fl_rc_qp_when_roce_disabled[0x1];
+ u8 regexp_params[0x1];
u8 uar_sz[0x6];
- u8 reserved_at_250[0x8];
+ u8 port_selection_cap[0x1];
+ u8 nic_cap_reg[0x1];
+ u8 umem_uid_0[0x1];
+ u8 reserved_at_253[0x5];
u8 log_pg_sz[0x8];
u8 bf[0x1];
u8 driver_version[0x1];
u8 pad_tx_eth_packet[0x1];
- u8 reserved_at_263[0x8];
+ u8 reserved_at_263[0x3];
+ u8 mkey_by_name[0x1];
+ u8 reserved_at_267[0x4];
+
u8 log_bf_reg_size[0x5];
- u8 reserved_at_270[0xb];
+ u8 disciplined_fr_counter[0x1];
+ u8 reserved_at_271[0x2];
+ u8 qp_error_syndrome[0x1];
+ u8 reserved_at_274[0x2];
+ u8 lag_dct[0x2];
+ u8 lag_tx_port_affinity[0x1];
+ u8 lag_native_fdb_selection[0x1];
+ u8 reserved_at_27a[0x1];
u8 lag_master[0x1];
u8 num_lag_ports[0x4];
u8 reserved_at_280[0x10];
u8 max_wqe_sz_sq[0x10];
- u8 reserved_at_2a0[0x10];
+ u8 reserved_at_2a0[0x7];
+ u8 mkey_pcie_tph[0x1];
+ u8 reserved_at_2a8[0x1];
+ u8 tis_tir_td_order[0x1];
+
+ u8 psp[0x1];
+ u8 shampo[0x1];
+ u8 reserved_at_2ac[0x4];
u8 max_wqe_sz_rq[0x10];
- u8 reserved_at_2c0[0x10];
+ u8 max_flow_counter_31_16[0x10];
u8 max_wqe_sz_sq_dc[0x10];
u8 reserved_at_2e0[0x7];
u8 max_qp_mcg[0x19];
- u8 reserved_at_300[0x18];
+ u8 reserved_at_300[0x10];
+ u8 flow_counter_bulk_alloc[0x8];
u8 log_max_mcg[0x8];
u8 reserved_at_320[0x3];
u8 log_max_transport_domain[0x5];
- u8 reserved_at_328[0x3];
+ u8 reserved_at_328[0x2];
+ u8 relaxed_ordering_read[0x1];
u8 log_max_pd[0x5];
- u8 reserved_at_330[0xb];
+ u8 dp_ordering_ooo_all_ud[0x1];
+ u8 dp_ordering_ooo_all_uc[0x1];
+ u8 dp_ordering_ooo_all_xrc[0x1];
+ u8 dp_ordering_ooo_all_dc[0x1];
+ u8 dp_ordering_ooo_all_rc[0x1];
+ u8 pcie_reset_using_hotreset_method[0x1];
+ u8 pci_sync_for_fw_update_with_driver_unload[0x1];
+ u8 vnic_env_cnt_steering_fail[0x1];
+ u8 vport_counter_local_loopback[0x1];
+ u8 q_counter_aggregation[0x1];
+ u8 q_counter_other_vport[0x1];
u8 log_max_xrcd[0x5];
- u8 reserved_at_340[0x8];
+ u8 nic_receive_steering_discard[0x1];
+ u8 receive_discard_vport_down[0x1];
+ u8 transmit_discard_vport_down[0x1];
+ u8 eq_overrun_count[0x1];
+ u8 reserved_at_344[0x1];
+ u8 invalid_command_count[0x1];
+ u8 quota_exceeded_count[0x1];
+ u8 reserved_at_347[0x1];
u8 log_max_flow_counter_bulk[0x8];
- u8 max_flow_counter[0x10];
+ u8 max_flow_counter_15_0[0x10];
u8 reserved_at_360[0x3];
@@ -1006,10 +1959,14 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 log_max_rqt[0x5];
u8 reserved_at_390[0x3];
u8 log_max_rqt_size[0x5];
- u8 reserved_at_398[0x3];
+ u8 reserved_at_398[0x1];
+ u8 vnic_env_cnt_bar_uar_access[0x1];
+ u8 vnic_env_cnt_odp_page_fault[0x1];
u8 log_max_tis_per_sq[0x5];
- u8 reserved_at_3a0[0x3];
+ u8 ext_stride_num_range[0x1];
+ u8 roce_rw_supported[0x1];
+ u8 log_max_current_uc_list_wr_supported[0x1];
u8 log_max_stride_sz_rq[0x5];
u8 reserved_at_3a8[0x3];
u8 log_min_stride_sz_rq[0x5];
@@ -1018,21 +1975,49 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 reserved_at_3b8[0x3];
u8 log_min_stride_sz_sq[0x5];
- u8 reserved_at_3c0[0x1b];
+ u8 hairpin[0x1];
+ u8 reserved_at_3c1[0x2];
+ u8 log_max_hairpin_queues[0x5];
+ u8 reserved_at_3c8[0x3];
+ u8 log_max_hairpin_wq_data_sz[0x5];
+ u8 reserved_at_3d0[0x3];
+ u8 log_max_hairpin_num_packets[0x5];
+ u8 reserved_at_3d8[0x3];
u8 log_max_wq_sz[0x5];
u8 nic_vport_change_event[0x1];
- u8 disable_local_lb[0x1];
- u8 reserved_at_3e2[0x9];
+ u8 disable_local_lb_uc[0x1];
+ u8 disable_local_lb_mc[0x1];
+ u8 log_min_hairpin_wq_data_sz[0x5];
+ u8 reserved_at_3e8[0x1];
+ u8 silent_mode[0x1];
+ u8 vhca_state[0x1];
u8 log_max_vlan_list[0x5];
u8 reserved_at_3f0[0x3];
u8 log_max_current_mc_list[0x5];
u8 reserved_at_3f8[0x3];
u8 log_max_current_uc_list[0x5];
- u8 reserved_at_400[0x80];
-
- u8 reserved_at_480[0x3];
+ u8 general_obj_types[0x40];
+
+ u8 sq_ts_format[0x2];
+ u8 rq_ts_format[0x2];
+ u8 steering_format_version[0x4];
+ u8 create_qp_start_hint[0x18];
+
+ u8 reserved_at_460[0x1];
+ u8 ats[0x1];
+ u8 cross_vhca_rqt[0x1];
+ u8 log_max_uctx[0x5];
+ u8 reserved_at_468[0x1];
+ u8 crypto[0x1];
+ u8 ipsec_offload[0x1];
+ u8 log_max_umem[0x5];
+ u8 max_num_eqs[0x10];
+
+ u8 reserved_at_480[0x1];
+ u8 tls_tx[0x1];
+ u8 tls_rx[0x1];
u8 log_max_l2_table[0x5];
u8 reserved_at_488[0x8];
u8 log_uar_page_sz[0x10];
@@ -1043,15 +2028,41 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 reserved_at_500[0x20];
u8 num_of_uars_per_page[0x20];
- u8 reserved_at_540[0x40];
- u8 reserved_at_580[0x3f];
+ u8 flex_parser_protocols[0x20];
+
+ u8 max_geneve_tlv_options[0x8];
+ u8 reserved_at_568[0x3];
+ u8 max_geneve_tlv_option_data_len[0x5];
+ u8 reserved_at_570[0x1];
+ u8 adv_rdma[0x1];
+ u8 reserved_at_572[0x7];
+ u8 adv_virtualization[0x1];
+ u8 reserved_at_57a[0x6];
+
+ u8 reserved_at_580[0xb];
+ u8 log_max_dci_stream_channels[0x5];
+ u8 reserved_at_590[0x3];
+ u8 log_max_dci_errored_streams[0x5];
+ u8 reserved_at_598[0x8];
+
+ u8 reserved_at_5a0[0x10];
+ u8 enhanced_cqe_compression[0x1];
+ u8 reserved_at_5b1[0x1];
+ u8 crossing_vhca_mkey[0x1];
+ u8 log_max_dek[0x5];
+ u8 reserved_at_5b8[0x4];
+ u8 mini_cqe_resp_stride_index[0x1];
+ u8 cqe_128_always[0x1];
+ u8 cqe_compression_128[0x1];
u8 cqe_compression[0x1];
u8 cqe_compression_timeout[0x10];
u8 cqe_compression_max_num[0x10];
- u8 reserved_at_5e0[0x10];
+ u8 reserved_at_5e0[0x8];
+ u8 flex_parser_id_gtpu_dw_0[0x4];
+ u8 reserved_at_5ec[0x4];
u8 tag_matching[0x1];
u8 rndv_offload_rc[0x1];
u8 rndv_offload_dc[0x1];
@@ -1059,36 +2070,232 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 reserved_at_5f8[0x3];
u8 log_max_xrq[0x5];
+ u8 affiliate_nic_vport_criteria[0x8];
+ u8 native_port_num[0x8];
+ u8 num_vhca_ports[0x8];
+ u8 flex_parser_id_gtpu_teid[0x4];
+ u8 reserved_at_61c[0x2];
+ u8 sw_owner_id[0x1];
+ u8 reserved_at_61f[0x1];
+
+ u8 max_num_of_monitor_counters[0x10];
+ u8 num_ppcnt_monitor_counters[0x10];
+
+ u8 max_num_sf[0x10];
+ u8 num_q_monitor_counters[0x10];
+
+ u8 reserved_at_660[0x20];
+
+ u8 sf[0x1];
+ u8 sf_set_partition[0x1];
+ u8 reserved_at_682[0x1];
+ u8 log_max_sf[0x5];
+ u8 apu[0x1];
+ u8 reserved_at_689[0x4];
+ u8 migration[0x1];
+ u8 reserved_at_68e[0x2];
+ u8 log_min_sf_size[0x8];
+ u8 max_num_sf_partitions[0x8];
+
+ u8 uctx_cap[0x20];
+
+ u8 reserved_at_6c0[0x4];
+ u8 flex_parser_id_geneve_tlv_option_0[0x4];
+ u8 flex_parser_id_icmp_dw1[0x4];
+ u8 flex_parser_id_icmp_dw0[0x4];
+ u8 flex_parser_id_icmpv6_dw1[0x4];
+ u8 flex_parser_id_icmpv6_dw0[0x4];
+ u8 flex_parser_id_outer_first_mpls_over_gre[0x4];
+ u8 flex_parser_id_outer_first_mpls_over_udp_label[0x4];
+
+ u8 max_num_match_definer[0x10];
+ u8 sf_base_id[0x10];
+
+ u8 flex_parser_id_gtpu_dw_2[0x4];
+ u8 flex_parser_id_gtpu_first_ext_dw_0[0x4];
+ u8 num_total_dynamic_vf_msix[0x18];
+ u8 reserved_at_720[0x14];
+ u8 dynamic_msix_table_size[0xc];
+ u8 reserved_at_740[0xc];
+ u8 min_dynamic_vf_msix_table_size[0x4];
+ u8 reserved_at_750[0x2];
+ u8 data_direct[0x1];
+ u8 reserved_at_753[0x1];
+ u8 max_dynamic_vf_msix_table_size[0xc];
+
+ u8 reserved_at_760[0x3];
+ u8 log_max_num_header_modify_argument[0x5];
+ u8 log_header_modify_argument_granularity_offset[0x4];
+ u8 log_header_modify_argument_granularity[0x4];
+ u8 reserved_at_770[0x3];
+ u8 log_header_modify_argument_max_alloc[0x5];
+ u8 reserved_at_778[0x8];
+
+ u8 vhca_tunnel_commands[0x40];
+ u8 match_definer_format_supported[0x40];
+};
+
+enum {
+ MLX5_CROSS_VHCA_OBJ_TO_OBJ_SUPPORTED_LOCAL_FLOW_TABLE_TO_REMOTE_FLOW_TABLE_MISS = 0x80000,
+ MLX5_CROSS_VHCA_OBJ_TO_OBJ_SUPPORTED_LOCAL_FLOW_TABLE_ROOT_TO_REMOTE_FLOW_TABLE = (1ULL << 20),
+};
+
+enum {
+ MLX5_ALLOWED_OBJ_FOR_OTHER_VHCA_ACCESS_FLOW_TABLE = 0x200,
+};
+
+struct mlx5_ifc_cmd_hca_cap_2_bits {
+ u8 reserved_at_0[0x80];
+
+ u8 migratable[0x1];
+ u8 reserved_at_81[0x7];
+ u8 dp_ordering_force[0x1];
+ u8 reserved_at_89[0x9];
+ u8 query_vuid[0x1];
+ u8 reserved_at_93[0x5];
+ u8 umr_log_entity_size_5[0x1];
+ u8 reserved_at_99[0x7];
+
+ u8 max_reformat_insert_size[0x8];
+ u8 max_reformat_insert_offset[0x8];
+ u8 max_reformat_remove_size[0x8];
+ u8 max_reformat_remove_offset[0x8];
+
+ u8 reserved_at_c0[0x8];
+ u8 migration_multi_load[0x1];
+ u8 migration_tracking_state[0x1];
+ u8 multiplane_qp_ud[0x1];
+ u8 reserved_at_cb[0x5];
+ u8 migration_in_chunks[0x1];
+ u8 reserved_at_d1[0x1];
+ u8 sf_eq_usage[0x1];
+ u8 reserved_at_d3[0x5];
+ u8 multiplane[0x1];
+ u8 reserved_at_d9[0x7];
+
+ u8 cross_vhca_object_to_object_supported[0x20];
+
+ u8 allowed_object_for_other_vhca_access[0x40];
+
+ u8 reserved_at_140[0x60];
+
+ u8 flow_table_type_2_type[0x8];
+ u8 reserved_at_1a8[0x2];
+ u8 format_select_dw_8_6_ext[0x1];
+ u8 log_min_mkey_entity_size[0x5];
+ u8 reserved_at_1b0[0x10];
+
+ u8 general_obj_types_127_64[0x40];
+ u8 reserved_at_200[0x20];
+
+ u8 reserved_at_220[0x1];
+ u8 sw_vhca_id_valid[0x1];
+ u8 sw_vhca_id[0xe];
+ u8 reserved_at_230[0x10];
+
+ u8 reserved_at_240[0xb];
+ u8 ts_cqe_metadata_size2wqe_counter[0x5];
+ u8 reserved_at_250[0x10];
+
+ u8 reserved_at_260[0x20];
+
+ u8 format_select_dw_gtpu_dw_0[0x8];
+ u8 format_select_dw_gtpu_dw_1[0x8];
+ u8 format_select_dw_gtpu_dw_2[0x8];
+ u8 format_select_dw_gtpu_first_ext_dw_0[0x8];
+
+ u8 generate_wqe_type[0x20];
+
+ u8 reserved_at_2c0[0xc0];
+
+ u8 reserved_at_380[0xb];
+ u8 min_mkey_log_entity_size_fixed_buffer[0x5];
+ u8 ec_vf_vport_base[0x10];
+
+ u8 reserved_at_3a0[0x2];
+ u8 max_mkey_log_entity_size_fixed_buffer[0x6];
+ u8 reserved_at_3a8[0x2];
+ u8 max_mkey_log_entity_size_mtt[0x6];
+ u8 max_rqt_vhca_id[0x10];
+
+ u8 reserved_at_3c0[0x20];
+
+ u8 reserved_at_3e0[0x10];
+ u8 pcc_ifa2[0x1];
+ u8 reserved_at_3f1[0xf];
+
+ u8 reserved_at_400[0x1];
+ u8 min_mkey_log_entity_size_fixed_buffer_valid[0x1];
+ u8 reserved_at_402[0xe];
+ u8 return_reg_id[0x10];
+
+ u8 reserved_at_420[0x1c];
+ u8 flow_table_hash_type[0x4];
+
+ u8 reserved_at_440[0x8];
+ u8 max_num_eqs_24b[0x18];
+
+ u8 reserved_at_460[0x144];
+ u8 load_balance_id[0x4];
+ u8 reserved_at_5a8[0x18];
+
+ u8 query_adjacent_functions_id[0x1];
+ u8 ingress_egress_esw_vport_connect[0x1];
+ u8 function_id_type_vhca_id[0x1];
+ u8 reserved_at_5c3[0x1];
+ u8 lag_per_mp_group[0x1];
+ u8 reserved_at_5c5[0xb];
+ u8 delegate_vhca_management_profiles[0x10];
+
+ u8 delegated_vhca_max[0x10];
+ u8 delegate_vhca_max[0x10];
+
u8 reserved_at_600[0x200];
};
-enum mlx5_flow_destination_type {
- MLX5_FLOW_DESTINATION_TYPE_VPORT = 0x0,
- MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE = 0x1,
- MLX5_FLOW_DESTINATION_TYPE_TIR = 0x2,
+enum mlx5_ifc_flow_destination_type {
+ MLX5_IFC_FLOW_DESTINATION_TYPE_VPORT = 0x0,
+ MLX5_IFC_FLOW_DESTINATION_TYPE_FLOW_TABLE = 0x1,
+ MLX5_IFC_FLOW_DESTINATION_TYPE_TIR = 0x2,
+ MLX5_IFC_FLOW_DESTINATION_TYPE_FLOW_SAMPLER = 0x6,
+ MLX5_IFC_FLOW_DESTINATION_TYPE_UPLINK = 0x8,
+ MLX5_IFC_FLOW_DESTINATION_TYPE_TABLE_TYPE = 0xA,
+};
- MLX5_FLOW_DESTINATION_TYPE_COUNTER = 0x100,
+enum mlx5_flow_table_miss_action {
+ MLX5_FLOW_TABLE_MISS_ACTION_DEF,
+ MLX5_FLOW_TABLE_MISS_ACTION_FWD,
+ MLX5_FLOW_TABLE_MISS_ACTION_SWITCH_DOMAIN,
};
struct mlx5_ifc_dest_format_struct_bits {
u8 destination_type[0x8];
u8 destination_id[0x18];
- u8 reserved_at_20[0x20];
+ u8 destination_eswitch_owner_vhca_id_valid[0x1];
+ u8 packet_reformat[0x1];
+ u8 reserved_at_22[0x6];
+ u8 destination_table_type[0x8];
+ u8 destination_eswitch_owner_vhca_id[0x10];
};
struct mlx5_ifc_flow_counter_list_bits {
- u8 clear[0x1];
- u8 num_of_counters[0xf];
- u8 flow_counter_id[0x10];
+ u8 flow_counter_id[0x20];
u8 reserved_at_20[0x20];
};
-union mlx5_ifc_dest_format_struct_flow_counter_list_auto_bits {
- struct mlx5_ifc_dest_format_struct_bits dest_format_struct;
+struct mlx5_ifc_extended_dest_format_bits {
+ struct mlx5_ifc_dest_format_struct_bits destination_entry;
+
+ u8 packet_reformat_id[0x20];
+
+ u8 reserved_at_60[0x20];
+};
+
+union mlx5_ifc_dest_format_flow_counter_list_auto_bits {
+ struct mlx5_ifc_extended_dest_format_bits extended_dest_format;
struct mlx5_ifc_flow_counter_list_bits flow_counter_list;
- u8 reserved_at_0[0x40];
};
struct mlx5_ifc_fte_match_param_bits {
@@ -1098,7 +2305,15 @@ struct mlx5_ifc_fte_match_param_bits {
struct mlx5_ifc_fte_match_set_lyr_2_4_bits inner_headers;
- u8 reserved_at_600[0xa00];
+ struct mlx5_ifc_fte_match_set_misc2_bits misc_parameters_2;
+
+ struct mlx5_ifc_fte_match_set_misc3_bits misc_parameters_3;
+
+ struct mlx5_ifc_fte_match_set_misc4_bits misc_parameters_4;
+
+ struct mlx5_ifc_fte_match_set_misc5_bits misc_parameters_5;
+
+ u8 reserved_at_e00[0x200];
};
enum {
@@ -1157,15 +2372,41 @@ struct mlx5_ifc_wq_bits {
u8 reserved_at_118[0x3];
u8 log_wq_sz[0x5];
- u8 reserved_at_120[0x15];
- u8 log_wqe_num_of_strides[0x3];
+ u8 dbr_umem_valid[0x1];
+ u8 wq_umem_valid[0x1];
+ u8 reserved_at_122[0x1];
+ u8 log_hairpin_num_packets[0x5];
+ u8 reserved_at_128[0x3];
+ u8 log_hairpin_data_sz[0x5];
+
+ u8 reserved_at_130[0x4];
+ u8 log_wqe_num_of_strides[0x4];
u8 two_byte_shift_en[0x1];
u8 reserved_at_139[0x4];
u8 log_wqe_stride_size[0x3];
- u8 reserved_at_140[0x4c0];
+ u8 dbr_umem_id[0x20];
+ u8 wq_umem_id[0x20];
- struct mlx5_ifc_cmd_pas_bits pas[0];
+ u8 wq_umem_offset[0x40];
+
+ u8 headers_mkey[0x20];
+
+ u8 shampo_enable[0x1];
+ u8 reserved_at_1e1[0x1];
+ u8 shampo_mode[0x2];
+ u8 reserved_at_1e4[0x1];
+ u8 log_reservation_size[0x3];
+ u8 reserved_at_1e8[0x5];
+ u8 log_max_num_of_packets_per_reservation[0x3];
+ u8 reserved_at_1f0[0x6];
+ u8 log_headers_entry_size[0x2];
+ u8 reserved_at_1f8[0x4];
+ u8 log_headers_buffer_entry_num[0x4];
+
+ u8 reserved_at_200[0x400];
+
+ struct mlx5_ifc_cmd_pas_bits pas[];
};
struct mlx5_ifc_rq_num_bits {
@@ -1173,6 +2414,13 @@ struct mlx5_ifc_rq_num_bits {
u8 rq_num[0x18];
};
+struct mlx5_ifc_rq_vhca_bits {
+ u8 reserved_at_0[0x8];
+ u8 rq_num[0x18];
+ u8 reserved_at_20[0x10];
+ u8 rq_vhca_id[0x10];
+};
+
struct mlx5_ifc_mac_address_layout_bits {
u8 reserved_at_0[0x10];
u8 mac_addr_47_32[0x10];
@@ -1247,6 +2495,17 @@ struct mlx5_ifc_cong_control_r_roce_ecn_rp_bits {
u8 reserved_at_360[0x4a0];
};
+struct mlx5_ifc_cong_control_r_roce_general_bits {
+ u8 reserved_at_0[0x80];
+
+ u8 reserved_at_80[0x10];
+ u8 rtt_resp_dscp_valid[0x1];
+ u8 reserved_at_91[0x9];
+ u8 rtt_resp_dscp[0x6];
+
+ u8 reserved_at_a0[0x760];
+};
+
struct mlx5_ifc_cong_control_802_1qau_rp_bits {
u8 reserved_at_0[0x80];
@@ -1283,6 +2542,132 @@ struct mlx5_ifc_resize_field_select_bits {
u8 resize_field_select[0x20];
};
+struct mlx5_ifc_resource_dump_bits {
+ u8 more_dump[0x1];
+ u8 inline_dump[0x1];
+ u8 reserved_at_2[0xa];
+ u8 seq_num[0x4];
+ u8 segment_type[0x10];
+
+ u8 reserved_at_20[0x10];
+ u8 vhca_id[0x10];
+
+ u8 index1[0x20];
+
+ u8 index2[0x20];
+
+ u8 num_of_obj1[0x10];
+ u8 num_of_obj2[0x10];
+
+ u8 reserved_at_a0[0x20];
+
+ u8 device_opaque[0x40];
+
+ u8 mkey[0x20];
+
+ u8 size[0x20];
+
+ u8 address[0x40];
+
+ u8 inline_data[52][0x20];
+};
+
+struct mlx5_ifc_resource_dump_menu_record_bits {
+ u8 reserved_at_0[0x4];
+ u8 num_of_obj2_supports_active[0x1];
+ u8 num_of_obj2_supports_all[0x1];
+ u8 must_have_num_of_obj2[0x1];
+ u8 support_num_of_obj2[0x1];
+ u8 num_of_obj1_supports_active[0x1];
+ u8 num_of_obj1_supports_all[0x1];
+ u8 must_have_num_of_obj1[0x1];
+ u8 support_num_of_obj1[0x1];
+ u8 must_have_index2[0x1];
+ u8 support_index2[0x1];
+ u8 must_have_index1[0x1];
+ u8 support_index1[0x1];
+ u8 segment_type[0x10];
+
+ u8 segment_name[4][0x20];
+
+ u8 index1_name[4][0x20];
+
+ u8 index2_name[4][0x20];
+};
+
+struct mlx5_ifc_resource_dump_segment_header_bits {
+ u8 length_dw[0x10];
+ u8 segment_type[0x10];
+};
+
+struct mlx5_ifc_resource_dump_command_segment_bits {
+ struct mlx5_ifc_resource_dump_segment_header_bits segment_header;
+
+ u8 segment_called[0x10];
+ u8 vhca_id[0x10];
+
+ u8 index1[0x20];
+
+ u8 index2[0x20];
+
+ u8 num_of_obj1[0x10];
+ u8 num_of_obj2[0x10];
+};
+
+struct mlx5_ifc_resource_dump_error_segment_bits {
+ struct mlx5_ifc_resource_dump_segment_header_bits segment_header;
+
+ u8 reserved_at_20[0x10];
+ u8 syndrome_id[0x10];
+
+ u8 reserved_at_40[0x40];
+
+ u8 error[8][0x20];
+};
+
+struct mlx5_ifc_resource_dump_info_segment_bits {
+ struct mlx5_ifc_resource_dump_segment_header_bits segment_header;
+
+ u8 reserved_at_20[0x18];
+ u8 dump_version[0x8];
+
+ u8 hw_version[0x20];
+
+ u8 fw_version[0x20];
+};
+
+struct mlx5_ifc_resource_dump_menu_segment_bits {
+ struct mlx5_ifc_resource_dump_segment_header_bits segment_header;
+
+ u8 reserved_at_20[0x10];
+ u8 num_of_records[0x10];
+
+ struct mlx5_ifc_resource_dump_menu_record_bits record[];
+};
+
+struct mlx5_ifc_resource_dump_resource_segment_bits {
+ struct mlx5_ifc_resource_dump_segment_header_bits segment_header;
+
+ u8 reserved_at_20[0x20];
+
+ u8 index1[0x20];
+
+ u8 index2[0x20];
+
+ u8 payload[][0x20];
+};
+
+struct mlx5_ifc_resource_dump_terminate_segment_bits {
+ struct mlx5_ifc_resource_dump_segment_header_bits segment_header;
+};
+
+struct mlx5_ifc_menu_resource_dump_response_bits {
+ struct mlx5_ifc_resource_dump_info_segment_bits info;
+ struct mlx5_ifc_resource_dump_command_segment_bits cmd;
+ struct mlx5_ifc_resource_dump_menu_segment_bits menu;
+ struct mlx5_ifc_resource_dump_terminate_segment_bits terminate;
+};
+
enum {
MLX5_MODIFY_FIELD_SELECT_MODIFY_FIELD_SELECT_CQ_PERIOD = 0x1,
MLX5_MODIFY_FIELD_SELECT_MODIFY_FIELD_SELECT_CQ_MAX_COUNT = 0x2,
@@ -1319,6 +2704,12 @@ struct mlx5_ifc_field_select_802_1qau_rp_bits {
u8 field_select_8021qaurp[0x20];
};
+struct mlx5_ifc_phys_layer_recovery_cntrs_bits {
+ u8 total_successful_recovery_events[0x20];
+
+ u8 reserved_at_20[0x7a0];
+};
+
struct mlx5_ifc_phys_layer_cntrs_bits {
u8 time_since_last_clear_high[0x20];
@@ -1491,12 +2882,68 @@ struct mlx5_ifc_ib_port_cntrs_grp_data_layout_bits {
u8 port_xmit_wait[0x20];
};
-struct mlx5_ifc_eth_per_traffic_grp_data_layout_bits {
+struct mlx5_ifc_ib_ext_port_cntrs_grp_data_layout_bits {
+ u8 reserved_at_0[0x300];
+
+ u8 port_xmit_data_high[0x20];
+
+ u8 port_xmit_data_low[0x20];
+
+ u8 port_rcv_data_high[0x20];
+
+ u8 port_rcv_data_low[0x20];
+
+ u8 port_xmit_pkts_high[0x20];
+
+ u8 port_xmit_pkts_low[0x20];
+
+ u8 port_rcv_pkts_high[0x20];
+
+ u8 port_rcv_pkts_low[0x20];
+
+ u8 reserved_at_400[0x80];
+
+ u8 port_unicast_xmit_pkts_high[0x20];
+
+ u8 port_unicast_xmit_pkts_low[0x20];
+
+ u8 port_multicast_xmit_pkts_high[0x20];
+
+ u8 port_multicast_xmit_pkts_low[0x20];
+
+ u8 port_unicast_rcv_pkts_high[0x20];
+
+ u8 port_unicast_rcv_pkts_low[0x20];
+
+ u8 port_multicast_rcv_pkts_high[0x20];
+
+ u8 port_multicast_rcv_pkts_low[0x20];
+
+ u8 reserved_at_580[0x240];
+};
+
+struct mlx5_ifc_eth_per_tc_prio_grp_data_layout_bits {
u8 transmit_queue_high[0x20];
u8 transmit_queue_low[0x20];
- u8 reserved_at_40[0x780];
+ u8 no_buffer_discard_uc_high[0x20];
+
+ u8 no_buffer_discard_uc_low[0x20];
+
+ u8 reserved_at_80[0x740];
+};
+
+struct mlx5_ifc_eth_per_tc_congest_prio_grp_data_layout_bits {
+ u8 wred_discard_high[0x20];
+
+ u8 wred_discard_low[0x20];
+
+ u8 ecn_marked_tc_high[0x20];
+
+ u8 ecn_marked_tc_low[0x20];
+
+ u8 reserved_at_80[0x740];
};
struct mlx5_ifc_eth_per_prio_grp_data_layout_bits {
@@ -1540,7 +2987,19 @@ struct mlx5_ifc_eth_per_prio_grp_data_layout_bits {
u8 rx_pause_transition_low[0x20];
- u8 reserved_at_3c0[0x400];
+ u8 rx_discards_high[0x20];
+
+ u8 rx_discards_low[0x20];
+
+ u8 device_stall_minor_watermark_cnt_high[0x20];
+
+ u8 device_stall_minor_watermark_cnt_low[0x20];
+
+ u8 device_stall_critical_watermark_cnt_high[0x20];
+
+ u8 device_stall_critical_watermark_cnt_low[0x20];
+
+ u8 reserved_at_480[0x340];
};
struct mlx5_ifc_eth_extended_cntrs_grp_data_layout_bits {
@@ -1548,7 +3007,21 @@ struct mlx5_ifc_eth_extended_cntrs_grp_data_layout_bits {
u8 port_transmit_wait_low[0x20];
- u8 reserved_at_40[0x780];
+ u8 reserved_at_40[0x100];
+
+ u8 rx_buffer_almost_full_high[0x20];
+
+ u8 rx_buffer_almost_full_low[0x20];
+
+ u8 rx_buffer_full_high[0x20];
+
+ u8 rx_buffer_full_low[0x20];
+
+ u8 rx_icrc_encapsulated_high[0x20];
+
+ u8 rx_icrc_encapsulated_low[0x20];
+
+ u8 reserved_at_200[0x5c0];
};
struct mlx5_ifc_eth_3635_cntrs_grp_data_layout_bits {
@@ -1864,7 +3337,19 @@ struct mlx5_ifc_pcie_perf_cntrs_grp_data_layout_bits {
u8 crc_error_tlp[0x20];
- u8 reserved_at_140[0x680];
+ u8 tx_overflow_buffer_pkt_high[0x20];
+
+ u8 tx_overflow_buffer_pkt_low[0x20];
+
+ u8 outbound_stalled_reads[0x20];
+
+ u8 outbound_stalled_writes[0x20];
+
+ u8 outbound_stalled_reads_events[0x20];
+
+ u8 outbound_stalled_writes_events[0x20];
+
+ u8 reserved_at_200[0x5c0];
};
struct mlx5_ifc_cmd_inter_comp_event_bits {
@@ -1914,6 +3399,62 @@ struct mlx5_ifc_dropped_packet_logged_bits {
u8 reserved_at_0[0xe0];
};
+struct mlx5_ifc_nic_cap_reg_bits {
+ u8 reserved_at_0[0x1a];
+ u8 vhca_icm_ctrl[0x1];
+ u8 reserved_at_1b[0x5];
+
+ u8 reserved_at_20[0x60];
+};
+
+struct mlx5_ifc_default_timeout_bits {
+ u8 to_multiplier[0x3];
+ u8 reserved_at_3[0x9];
+ u8 to_value[0x14];
+};
+
+struct mlx5_ifc_dtor_reg_bits {
+ u8 reserved_at_0[0x20];
+
+ struct mlx5_ifc_default_timeout_bits pcie_toggle_to;
+
+ u8 reserved_at_40[0x60];
+
+ struct mlx5_ifc_default_timeout_bits health_poll_to;
+
+ struct mlx5_ifc_default_timeout_bits full_crdump_to;
+
+ struct mlx5_ifc_default_timeout_bits fw_reset_to;
+
+ struct mlx5_ifc_default_timeout_bits flush_on_err_to;
+
+ struct mlx5_ifc_default_timeout_bits pci_sync_update_to;
+
+ struct mlx5_ifc_default_timeout_bits tear_down_to;
+
+ struct mlx5_ifc_default_timeout_bits fsm_reactivate_to;
+
+ struct mlx5_ifc_default_timeout_bits reclaim_pages_to;
+
+ struct mlx5_ifc_default_timeout_bits reclaim_vfs_pages_to;
+
+ struct mlx5_ifc_default_timeout_bits reset_unload_to;
+
+ u8 reserved_at_1c0[0x20];
+};
+
+struct mlx5_ifc_vhca_icm_ctrl_reg_bits {
+ u8 vhca_id_valid[0x1];
+ u8 reserved_at_1[0xf];
+ u8 vhca_id[0x10];
+
+ u8 reserved_at_20[0xa0];
+
+ u8 cur_alloc_icm[0x20];
+
+ u8 reserved_at_e0[0x120];
+};
+
enum {
MLX5_CQ_ERROR_SYNDROME_CQ_OVERRUN = 0x1,
MLX5_CQ_ERROR_SYNDROME_CQ_ACCESS_VIOLATION_ERROR = 0x2,
@@ -2063,13 +3604,21 @@ enum {
MLX5_QPC_CS_RES_UP_TO_64B = 0x2,
};
+enum {
+ MLX5_TIMESTAMP_FORMAT_FREE_RUNNING = 0x0,
+ MLX5_TIMESTAMP_FORMAT_DEFAULT = 0x1,
+ MLX5_TIMESTAMP_FORMAT_REAL_TIME = 0x2,
+};
+
struct mlx5_ifc_qpc_bits {
u8 state[0x4];
u8 lag_tx_port_affinity[0x4];
u8 st[0x8];
- u8 reserved_at_10[0x3];
+ u8 reserved_at_10[0x2];
+ u8 isolate_vl_tc[0x1];
u8 pm_state[0x2];
- u8 reserved_at_15[0x3];
+ u8 reserved_at_15[0x1];
+ u8 req_e2e_credit_mode[0x2];
u8 offload_type[0x4];
u8 end_padding_mode[0x2];
u8 reserved_at_1e[0x2];
@@ -2080,7 +3629,8 @@ struct mlx5_ifc_qpc_bits {
u8 latency_sensitive[0x1];
u8 reserved_at_24[0x1];
u8 drain_sigerr[0x1];
- u8 reserved_at_26[0x2];
+ u8 reserved_at_26[0x1];
+ u8 dp_ordering_force[0x1];
u8 pd[0x18];
u8 mtu[0x3];
@@ -2090,7 +3640,10 @@ struct mlx5_ifc_qpc_bits {
u8 log_rq_stride[0x3];
u8 no_sq[0x1];
u8 log_sq_size[0x4];
- u8 reserved_at_55[0x6];
+ u8 reserved_at_55[0x1];
+ u8 retry_mode[0x2];
+ u8 ts_format[0x2];
+ u8 reserved_at_5a[0x1];
u8 rlky[0x1];
u8 ulp_stateless_offload_mode[0x4];
@@ -2125,10 +3678,12 @@ struct mlx5_ifc_qpc_bits {
u8 reserved_at_3c0[0x8];
u8 next_send_psn[0x18];
- u8 reserved_at_3e0[0x8];
+ u8 reserved_at_3e0[0x3];
+ u8 log_num_dci_stream_channels[0x5];
u8 cqn_snd[0x18];
- u8 reserved_at_400[0x8];
+ u8 reserved_at_400[0x3];
+ u8 log_num_dci_errored_streams[0x5];
u8 deth_sqpn[0x18];
u8 reserved_at_420[0x20];
@@ -2148,7 +3703,8 @@ struct mlx5_ifc_qpc_bits {
u8 rae[0x1];
u8 reserved_at_493[0x1];
u8 page_offset[0x6];
- u8 reserved_at_49a[0x3];
+ u8 reserved_at_49a[0x2];
+ u8 dp_ordering_1[0x1];
u8 cd_slave_receive[0x1];
u8 cd_slave_send[0x1];
u8 cd_master[0x1];
@@ -2190,7 +3746,10 @@ struct mlx5_ifc_qpc_bits {
u8 dc_access_key[0x40];
- u8 reserved_at_680[0xc0];
+ u8 reserved_at_680[0x3];
+ u8 dbr_umem_valid[0x1];
+
+ u8 reserved_at_684[0xbc];
};
struct mlx5_ifc_roce_addr_layout_bits {
@@ -2210,18 +3769,69 @@ struct mlx5_ifc_roce_addr_layout_bits {
u8 reserved_at_e0[0x20];
};
+struct mlx5_ifc_crypto_cap_bits {
+ u8 reserved_at_0[0x3];
+ u8 synchronize_dek[0x1];
+ u8 int_kek_manual[0x1];
+ u8 int_kek_auto[0x1];
+ u8 reserved_at_6[0x1a];
+
+ u8 reserved_at_20[0x3];
+ u8 log_dek_max_alloc[0x5];
+ u8 reserved_at_28[0x3];
+ u8 log_max_num_deks[0x5];
+ u8 reserved_at_30[0x10];
+
+ u8 reserved_at_40[0x20];
+
+ u8 reserved_at_60[0x3];
+ u8 log_dek_granularity[0x5];
+ u8 reserved_at_68[0x3];
+ u8 log_max_num_int_kek[0x5];
+ u8 sw_wrapped_dek[0x10];
+
+ u8 reserved_at_80[0x780];
+};
+
+struct mlx5_ifc_shampo_cap_bits {
+ u8 reserved_at_0[0x3];
+ u8 shampo_log_max_reservation_size[0x5];
+ u8 reserved_at_8[0x3];
+ u8 shampo_log_min_reservation_size[0x5];
+ u8 shampo_min_mss_size[0x10];
+
+ u8 shampo_header_split[0x1];
+ u8 shampo_header_split_data_merge[0x1];
+ u8 reserved_at_22[0x1];
+ u8 shampo_log_max_headers_entry_size[0x5];
+ u8 reserved_at_28[0x18];
+
+ u8 reserved_at_40[0x7c0];
+};
+
union mlx5_ifc_hca_cap_union_bits {
struct mlx5_ifc_cmd_hca_cap_bits cmd_hca_cap;
+ struct mlx5_ifc_cmd_hca_cap_2_bits cmd_hca_cap_2;
struct mlx5_ifc_odp_cap_bits odp_cap;
struct mlx5_ifc_atomic_caps_bits atomic_caps;
struct mlx5_ifc_roce_cap_bits roce_cap;
struct mlx5_ifc_per_protocol_networking_offload_caps_bits per_protocol_networking_offload_caps;
struct mlx5_ifc_flow_table_nic_cap_bits flow_table_nic_cap;
struct mlx5_ifc_flow_table_eswitch_cap_bits flow_table_eswitch_cap;
+ struct mlx5_ifc_wqe_based_flow_table_cap_bits wqe_based_flow_table_cap;
+ struct mlx5_ifc_esw_cap_bits esw_cap;
struct mlx5_ifc_e_switch_cap_bits e_switch_cap;
- struct mlx5_ifc_vector_calc_cap_bits vector_calc_cap;
+ struct mlx5_ifc_port_selection_cap_bits port_selection_cap;
struct mlx5_ifc_qos_cap_bits qos_cap;
+ struct mlx5_ifc_debug_cap_bits debug_cap;
struct mlx5_ifc_fpga_cap_bits fpga_cap;
+ struct mlx5_ifc_tls_cap_bits tls_cap;
+ struct mlx5_ifc_device_mem_cap_bits device_mem_cap;
+ struct mlx5_ifc_virtio_emulation_cap_bits virtio_emulation_cap;
+ struct mlx5_ifc_macsec_cap_bits macsec_cap;
+ struct mlx5_ifc_crypto_cap_bits crypto_cap;
+ struct mlx5_ifc_ipsec_cap_bits ipsec_cap;
+ struct mlx5_ifc_psp_cap_bits psp_cap;
u8 reserved_at_0[0x8000];
};
@@ -2230,13 +3840,71 @@ enum {
MLX5_FLOW_CONTEXT_ACTION_DROP = 0x2,
MLX5_FLOW_CONTEXT_ACTION_FWD_DEST = 0x4,
MLX5_FLOW_CONTEXT_ACTION_COUNT = 0x8,
- MLX5_FLOW_CONTEXT_ACTION_ENCAP = 0x10,
+ MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT = 0x10,
MLX5_FLOW_CONTEXT_ACTION_DECAP = 0x20,
MLX5_FLOW_CONTEXT_ACTION_MOD_HDR = 0x40,
+ MLX5_FLOW_CONTEXT_ACTION_VLAN_POP = 0x80,
+ MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH = 0x100,
+ MLX5_FLOW_CONTEXT_ACTION_VLAN_POP_2 = 0x400,
+ MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2 = 0x800,
+ MLX5_FLOW_CONTEXT_ACTION_CRYPTO_DECRYPT = 0x1000,
+ MLX5_FLOW_CONTEXT_ACTION_CRYPTO_ENCRYPT = 0x2000,
+ MLX5_FLOW_CONTEXT_ACTION_EXECUTE_ASO = 0x4000,
+};
+
+enum {
+ MLX5_FLOW_CONTEXT_FLOW_SOURCE_ANY_VPORT = 0x0,
+ MLX5_FLOW_CONTEXT_FLOW_SOURCE_UPLINK = 0x1,
+ MLX5_FLOW_CONTEXT_FLOW_SOURCE_LOCAL_VPORT = 0x2,
+};
+
+enum {
+ MLX5_FLOW_CONTEXT_ENCRYPT_DECRYPT_TYPE_IPSEC = 0x0,
+ MLX5_FLOW_CONTEXT_ENCRYPT_DECRYPT_TYPE_MACSEC = 0x1,
+ MLX5_FLOW_CONTEXT_ENCRYPT_DECRYPT_TYPE_PSP = 0x2,
+};
+
+struct mlx5_ifc_vlan_bits {
+ u8 ethtype[0x10];
+ u8 prio[0x3];
+ u8 cfi[0x1];
+ u8 vid[0xc];
+};
+
+enum {
+ MLX5_FLOW_METER_COLOR_RED = 0x0,
+ MLX5_FLOW_METER_COLOR_YELLOW = 0x1,
+ MLX5_FLOW_METER_COLOR_GREEN = 0x2,
+ MLX5_FLOW_METER_COLOR_UNDEFINED = 0x3,
+};
+
+enum {
+ MLX5_EXE_ASO_FLOW_METER = 0x2,
+};
+
+struct mlx5_ifc_exe_aso_ctrl_flow_meter_bits {
+ u8 return_reg_id[0x4];
+ u8 aso_type[0x4];
+ u8 reserved_at_8[0x14];
+ u8 action[0x1];
+ u8 init_color[0x2];
+ u8 meter_id[0x1];
+};
+
+union mlx5_ifc_exe_aso_ctrl {
+ struct mlx5_ifc_exe_aso_ctrl_flow_meter_bits exe_aso_ctrl_flow_meter;
+};
+
+struct mlx5_ifc_execute_aso_bits {
+ u8 valid[0x1];
+ u8 reserved_at_1[0x7];
+ u8 aso_object_id[0x18];
+
+ union mlx5_ifc_exe_aso_ctrl exe_aso_ctrl;
};
struct mlx5_ifc_flow_context_bits {
- u8 reserved_at_0[0x20];
+ struct mlx5_ifc_vlan_bits push_vlan;
u8 group_id[0x20];
@@ -2246,23 +3914,31 @@ struct mlx5_ifc_flow_context_bits {
u8 reserved_at_60[0x10];
u8 action[0x10];
- u8 reserved_at_80[0x8];
+ u8 extended_destination[0x1];
+ u8 uplink_hairpin_en[0x1];
+ u8 flow_source[0x2];
+ u8 encrypt_decrypt_type[0x4];
u8 destination_list_size[0x18];
u8 reserved_at_a0[0x8];
u8 flow_counter_list_size[0x18];
- u8 encap_id[0x20];
+ u8 packet_reformat_id[0x20];
u8 modify_header_id[0x20];
- u8 reserved_at_100[0x100];
+ struct mlx5_ifc_vlan_bits push_vlan_2;
+
+ u8 encrypt_decrypt_obj_id[0x20];
+ u8 reserved_at_140[0xc0];
struct mlx5_ifc_fte_match_param_bits match_value;
- u8 reserved_at_1200[0x600];
+ struct mlx5_ifc_execute_aso_bits execute_aso[4];
- union mlx5_ifc_dest_format_struct_flow_counter_list_auto_bits destination[0];
+ u8 reserved_at_1300[0x500];
+
+ union mlx5_ifc_dest_format_flow_counter_list_auto_bits destination[];
};
enum {
@@ -2284,7 +3960,8 @@ struct mlx5_ifc_xrc_srqc_bits {
u8 xrcd[0x18];
u8 page_offset[0x6];
- u8 reserved_at_46[0x2];
+ u8 reserved_at_46[0x1];
+ u8 dbr_umem_valid[0x1];
u8 cqn[0x18];
u8 reserved_at_60[0x20];
@@ -2312,6 +3989,52 @@ struct mlx5_ifc_xrc_srqc_bits {
u8 reserved_at_180[0x80];
};
+struct mlx5_ifc_vnic_diagnostic_statistics_bits {
+ u8 counter_error_queues[0x20];
+
+ u8 total_error_queues[0x20];
+
+ u8 send_queue_priority_update_flow[0x20];
+
+ u8 reserved_at_60[0x20];
+
+ u8 nic_receive_steering_discard[0x40];
+
+ u8 receive_discard_vport_down[0x40];
+
+ u8 transmit_discard_vport_down[0x40];
+
+ u8 async_eq_overrun[0x20];
+
+ u8 comp_eq_overrun[0x20];
+
+ u8 reserved_at_180[0x20];
+
+ u8 invalid_command[0x20];
+
+ u8 quota_exceeded_command[0x20];
+
+ u8 internal_rq_out_of_buffer[0x20];
+
+ u8 cq_overrun[0x20];
+
+ u8 eth_wqe_too_small[0x20];
+
+ u8 reserved_at_220[0xc0];
+
+ u8 generated_pkt_steering_fail[0x40];
+
+ u8 handled_pkt_steering_fail[0x40];
+
+ u8 bar_uar_access[0x20];
+
+ u8 odp_local_triggered_page_fault[0x20];
+
+ u8 odp_remote_triggered_page_fault[0x20];
+
+ u8 reserved_at_3c0[0xc20];
+};
+
struct mlx5_ifc_traffic_counter_bits {
u8 packets[0x40];
@@ -2320,7 +4043,8 @@ struct mlx5_ifc_traffic_counter_bits {
struct mlx5_ifc_tisc_bits {
u8 strict_lag_tx_port_affinity[0x1];
- u8 reserved_at_1[0x3];
+ u8 tls_en[0x1];
+ u8 reserved_at_2[0x2];
u8 lag_tx_port_affinity[0x04];
u8 reserved_at_8[0x4];
@@ -2334,7 +4058,11 @@ struct mlx5_ifc_tisc_bits {
u8 reserved_at_140[0x8];
u8 underlay_qpn[0x18];
- u8 reserved_at_160[0x3a0];
+
+ u8 reserved_at_160[0x8];
+ u8 pd[0x18];
+
+ u8 reserved_at_180[0x380];
};
enum {
@@ -2343,8 +4071,8 @@ enum {
};
enum {
- MLX5_TIRC_LRO_ENABLE_MASK_IPV4_LRO = 0x1,
- MLX5_TIRC_LRO_ENABLE_MASK_IPV6_LRO = 0x2,
+ MLX5_TIRC_PACKET_MERGE_MASK_IPV4_LRO = BIT(0),
+ MLX5_TIRC_PACKET_MERGE_MASK_IPV6_LRO = BIT(1),
};
enum {
@@ -2354,21 +4082,22 @@ enum {
};
enum {
- MLX5_TIRC_SELF_LB_BLOCK_BLOCK_UNICAST_ = 0x1,
- MLX5_TIRC_SELF_LB_BLOCK_BLOCK_MULTICAST_ = 0x2,
+ MLX5_TIRC_SELF_LB_BLOCK_BLOCK_UNICAST = 0x1,
+ MLX5_TIRC_SELF_LB_BLOCK_BLOCK_MULTICAST = 0x2,
};
struct mlx5_ifc_tirc_bits {
u8 reserved_at_0[0x20];
u8 disp_type[0x4];
- u8 reserved_at_24[0x1c];
+ u8 tls_en[0x1];
+ u8 reserved_at_25[0x1b];
u8 reserved_at_40[0x40];
u8 reserved_at_80[0x4];
u8 lro_timeout_period_usecs[0x10];
- u8 lro_enable_mask[0x4];
+ u8 packet_merge_mask[0x4];
u8 lro_max_ip_payload_size[0x8];
u8 reserved_at_a0[0x40];
@@ -2455,7 +4184,11 @@ struct mlx5_ifc_sqc_bits {
u8 state[0x4];
u8 reg_umr[0x1];
u8 allow_swp[0x1];
- u8 reserved_at_e[0x12];
+ u8 hairpin[0x1];
+ u8 non_wire[0x1];
+ u8 reserved_at_10[0xa];
+ u8 ts_format[0x2];
+ u8 reserved_at_1c[0x4];
u8 reserved_at_20[0x8];
u8 user_index[0x18];
@@ -2463,11 +4196,21 @@ struct mlx5_ifc_sqc_bits {
u8 reserved_at_40[0x8];
u8 cqn[0x18];
- u8 reserved_at_60[0x90];
+ u8 reserved_at_60[0x8];
+ u8 hairpin_peer_rq[0x18];
+
+ u8 reserved_at_80[0x10];
+ u8 hairpin_peer_vhca[0x10];
+ u8 reserved_at_a0[0x20];
+
+ u8 reserved_at_c0[0x8];
+ u8 ts_cqe_to_dest_cqn[0x18];
+
+ u8 reserved_at_e0[0x10];
u8 packet_pacing_rate_limit_index[0x10];
u8 tis_lst_sz[0x10];
- u8 reserved_at_110[0x10];
+ u8 qos_queue_group_id[0x10];
u8 reserved_at_120[0x40];
@@ -2482,13 +4225,66 @@ enum {
SCHEDULING_CONTEXT_ELEMENT_TYPE_VPORT = 0x1,
SCHEDULING_CONTEXT_ELEMENT_TYPE_VPORT_TC = 0x2,
SCHEDULING_CONTEXT_ELEMENT_TYPE_PARA_VPORT_TC = 0x3,
+ SCHEDULING_CONTEXT_ELEMENT_TYPE_QUEUE_GROUP = 0x4,
+ SCHEDULING_CONTEXT_ELEMENT_TYPE_RATE_LIMIT = 0x5,
+};
+
+enum {
+ ELEMENT_TYPE_CAP_MASK_TSAR = 1 << 0,
+ ELEMENT_TYPE_CAP_MASK_VPORT = 1 << 1,
+ ELEMENT_TYPE_CAP_MASK_VPORT_TC = 1 << 2,
+ ELEMENT_TYPE_CAP_MASK_PARA_VPORT_TC = 1 << 3,
+ ELEMENT_TYPE_CAP_MASK_QUEUE_GROUP = 1 << 4,
+ ELEMENT_TYPE_CAP_MASK_RATE_LIMIT = 1 << 5,
+};
+
+enum {
+ TSAR_ELEMENT_TSAR_TYPE_DWRR = 0x0,
+ TSAR_ELEMENT_TSAR_TYPE_ROUND_ROBIN = 0x1,
+ TSAR_ELEMENT_TSAR_TYPE_ETS = 0x2,
+ TSAR_ELEMENT_TSAR_TYPE_TC_ARB = 0x3,
+};
+
+enum {
+ TSAR_TYPE_CAP_MASK_DWRR = 1 << 0,
+ TSAR_TYPE_CAP_MASK_ROUND_ROBIN = 1 << 1,
+ TSAR_TYPE_CAP_MASK_ETS = 1 << 2,
+ TSAR_TYPE_CAP_MASK_TC_ARB = 1 << 3,
+};
+
+struct mlx5_ifc_tsar_element_bits {
+ u8 traffic_class[0x4];
+ u8 reserved_at_4[0x4];
+ u8 tsar_type[0x8];
+ u8 reserved_at_10[0x10];
+};
+
+struct mlx5_ifc_vport_element_bits {
+ u8 reserved_at_0[0x4];
+ u8 eswitch_owner_vhca_id_valid[0x1];
+ u8 eswitch_owner_vhca_id[0xb];
+ u8 vport_number[0x10];
+};
+
+struct mlx5_ifc_vport_tc_element_bits {
+ u8 traffic_class[0x4];
+ u8 eswitch_owner_vhca_id_valid[0x1];
+ u8 eswitch_owner_vhca_id[0xb];
+ u8 vport_number[0x10];
+};
+
+union mlx5_ifc_element_attributes_bits {
+ struct mlx5_ifc_tsar_element_bits tsar;
+ struct mlx5_ifc_vport_element_bits vport;
+ struct mlx5_ifc_vport_tc_element_bits vport_tc;
+ u8 reserved_at_0[0x20];
};
struct mlx5_ifc_scheduling_context_bits {
u8 element_type[0x8];
u8 reserved_at_8[0x18];
- u8 element_attributes[0x20];
+ union mlx5_ifc_element_attributes_bits element_attributes;
u8 parent_element_id[0x20];
@@ -2498,21 +4294,29 @@ struct mlx5_ifc_scheduling_context_bits {
u8 max_average_bw[0x20];
- u8 reserved_at_e0[0x120];
+ u8 max_bw_obj_id[0x20];
+
+ u8 reserved_at_100[0x100];
};
struct mlx5_ifc_rqtc_bits {
- u8 reserved_at_0[0xa0];
+ u8 reserved_at_0[0xa0];
- u8 reserved_at_a0[0x10];
- u8 rqt_max_size[0x10];
+ u8 reserved_at_a0[0x5];
+ u8 list_q_type[0x3];
+ u8 reserved_at_a8[0x8];
+ u8 rqt_max_size[0x10];
- u8 reserved_at_c0[0x10];
- u8 rqt_actual_size[0x10];
+ u8 rq_vhca_id_format[0x1];
+ u8 reserved_at_c1[0xf];
+ u8 rqt_actual_size[0x10];
- u8 reserved_at_e0[0x6a0];
+ u8 reserved_at_e0[0x6a0];
- struct mlx5_ifc_rq_num_bits rq_num[0];
+ union {
+ DECLARE_FLEX_ARRAY(struct mlx5_ifc_rq_num_bits, rq_num);
+ DECLARE_FLEX_ARRAY(struct mlx5_ifc_rq_vhca_bits, rq_vhca);
+ };
};
enum {
@@ -2526,6 +4330,18 @@ enum {
MLX5_RQC_STATE_ERR = 0x3,
};
+enum {
+ MLX5_RQC_SHAMPO_NO_MATCH_ALIGNMENT_GRANULARITY_BYTE = 0x0,
+ MLX5_RQC_SHAMPO_NO_MATCH_ALIGNMENT_GRANULARITY_STRIDE = 0x1,
+ MLX5_RQC_SHAMPO_NO_MATCH_ALIGNMENT_GRANULARITY_PAGE = 0x2,
+};
+
+enum {
+ MLX5_RQC_SHAMPO_MATCH_CRITERIA_TYPE_NO_MATCH = 0x0,
+ MLX5_RQC_SHAMPO_MATCH_CRITERIA_TYPE_EXTENDED = 0x1,
+ MLX5_RQC_SHAMPO_MATCH_CRITERIA_TYPE_FIVE_TUPLE = 0x2,
+};
+
struct mlx5_ifc_rqc_bits {
u8 rlky[0x1];
u8 delay_drop_en[0x1];
@@ -2535,7 +4351,10 @@ struct mlx5_ifc_rqc_bits {
u8 state[0x4];
u8 reserved_at_c[0x1];
u8 flush_in_error_en[0x1];
- u8 reserved_at_e[0x12];
+ u8 hairpin[0x1];
+ u8 reserved_at_f[0xb];
+ u8 ts_format[0x2];
+ u8 reserved_at_1c[0x4];
u8 reserved_at_20[0x8];
u8 user_index[0x18];
@@ -2549,7 +4368,19 @@ struct mlx5_ifc_rqc_bits {
u8 reserved_at_80[0x8];
u8 rmpn[0x18];
- u8 reserved_at_a0[0xe0];
+ u8 reserved_at_a0[0x8];
+ u8 hairpin_peer_sq[0x18];
+
+ u8 reserved_at_c0[0x10];
+ u8 hairpin_peer_vhca[0x10];
+
+ u8 reserved_at_e0[0x46];
+ u8 shampo_no_match_alignment_granularity[0x2];
+ u8 reserved_at_128[0x6];
+ u8 shampo_match_criteria_type[0x2];
+ u8 reservation_timeout[0x10];
+
+ u8 reserved_at_140[0x40];
struct mlx5_ifc_wq_bits wq;
};
@@ -2572,6 +4403,11 @@ struct mlx5_ifc_rmpc_bits {
struct mlx5_ifc_wq_bits wq;
};
+enum {
+ VHCA_ID_TYPE_HW = 0,
+ VHCA_ID_TYPE_SW = 1,
+};
+
struct mlx5_ifc_nic_vport_context_bits {
u8 reserved_at_0[0x5];
u8 min_wqe_inline_mode[0x3];
@@ -2588,8 +4424,18 @@ struct mlx5_ifc_nic_vport_context_bits {
u8 event_on_mc_address_change[0x1];
u8 event_on_uc_address_change[0x1];
- u8 reserved_at_40[0xf0];
+ u8 vhca_id_type[0x1];
+ u8 reserved_at_41[0xb];
+ u8 affiliation_criteria[0x4];
+ u8 affiliated_vhca_id[0x10];
+
+ u8 reserved_at_60[0xa0];
+ u8 reserved_at_100[0x1];
+ u8 sd_group[0x3];
+ u8 reserved_at_104[0x1c];
+
+ u8 reserved_at_120[0x10];
u8 mtu[0x10];
u8 system_image_guid[0x40];
@@ -2612,7 +4458,7 @@ struct mlx5_ifc_nic_vport_context_bits {
u8 reserved_at_7e0[0x20];
- u8 current_uc_mac_address[0][0x40];
+ u8 current_uc_mac_address[][0x40];
};
enum {
@@ -2620,12 +4466,23 @@ enum {
MLX5_MKC_ACCESS_MODE_MTT = 0x1,
MLX5_MKC_ACCESS_MODE_KLMS = 0x2,
MLX5_MKC_ACCESS_MODE_KSM = 0x3,
+ MLX5_MKC_ACCESS_MODE_SW_ICM = 0x4,
+ MLX5_MKC_ACCESS_MODE_MEMIC = 0x5,
+ MLX5_MKC_ACCESS_MODE_CROSSING = 0x6,
+};
+
+enum {
+ MLX5_MKC_PCIE_TPH_NO_STEERING_TAG_INDEX = 0,
};
struct mlx5_ifc_mkc_bits {
u8 reserved_at_0[0x1];
u8 free[0x1];
- u8 reserved_at_2[0xd];
+ u8 reserved_at_2[0x1];
+ u8 access_mode_4_2[0x3];
+ u8 reserved_at_6[0x7];
+ u8 relaxed_ordering_write[0x1];
+ u8 reserved_at_e[0x1];
u8 small_fence_on_rdma_read_response[0x1];
u8 umr_en[0x1];
u8 a[0x1];
@@ -2633,8 +4490,10 @@ struct mlx5_ifc_mkc_bits {
u8 rr[0x1];
u8 lw[0x1];
u8 lr[0x1];
- u8 access_mode[0x2];
- u8 reserved_at_18[0x8];
+ u8 access_mode_1_0[0x2];
+ u8 reserved_at_18[0x2];
+ u8 ma_translation_mode[0x2];
+ u8 reserved_at_1c[0x4];
u8 qpn[0x18];
u8 mkey_7_0[0x8];
@@ -2656,14 +4515,22 @@ struct mlx5_ifc_mkc_bits {
u8 bsf_octword_size[0x20];
- u8 reserved_at_120[0x80];
+ u8 reserved_at_120[0x60];
+
+ u8 crossing_target_vhca_id[0x10];
+ u8 reserved_at_190[0x10];
u8 translations_octword_size[0x20];
- u8 reserved_at_1c0[0x1b];
- u8 log_page_size[0x5];
+ u8 reserved_at_1c0[0x19];
+ u8 relaxed_ordering_read[0x1];
+ u8 log_page_size[0x6];
- u8 reserved_at_1e0[0x20];
+ u8 reserved_at_1e0[0x5];
+ u8 pcie_tph_en[0x1];
+ u8 pcie_tph_ph[0x2];
+ u8 pcie_tph_steering_tag_index[0x8];
+ u8 reserved_at_1f0[0x10];
};
struct mlx5_ifc_pkey_bits {
@@ -2684,7 +4551,8 @@ struct mlx5_ifc_hca_vport_context_bits {
u8 has_smi[0x1];
u8 has_raw[0x1];
u8 grh_required[0x1];
- u8 reserved_at_104[0xc];
+ u8 reserved_at_104[0x4];
+ u8 num_port_plane[0x8];
u8 port_physical_state[0x4];
u8 vport_state_policy[0x4];
u8 port_state[0x4];
@@ -2725,12 +4593,14 @@ struct mlx5_ifc_hca_vport_context_bits {
};
struct mlx5_ifc_esw_vport_context_bits {
- u8 reserved_at_0[0x3];
+ u8 fdb_to_vport_reg_c[0x1];
+ u8 reserved_at_1[0x2];
u8 vport_svlan_strip[0x1];
u8 vport_cvlan_strip[0x1];
u8 vport_svlan_insert[0x1];
u8 vport_cvlan_insert[0x2];
- u8 reserved_at_8[0x18];
+ u8 fdb_to_vport_reg_c_id[0x8];
+ u8 reserved_at_10[0x10];
u8 reserved_at_20[0x20];
@@ -2741,7 +4611,11 @@ struct mlx5_ifc_esw_vport_context_bits {
u8 cvlan_pcp[0x3];
u8 cvlan_id[0xc];
- u8 reserved_at_60[0x7a0];
+ u8 reserved_at_60[0x720];
+
+ u8 sw_steering_vport_icm_address_rx[0x40];
+
+ u8 sw_steering_vport_icm_address_tx[0x40];
};
enum {
@@ -2775,8 +4649,8 @@ struct mlx5_ifc_eqc_bits {
u8 reserved_at_80[0x20];
- u8 reserved_at_a0[0x18];
- u8 intr[0x8];
+ u8 reserved_at_a0[0x14];
+ u8 intr[0xc];
u8 reserved_at_c0[0x3];
u8 log_page_size[0x5];
@@ -2818,7 +4692,8 @@ struct mlx5_ifc_dctc_bits {
u8 state[0x4];
u8 reserved_at_8[0x18];
- u8 reserved_at_20[0x8];
+ u8 reserved_at_20[0x7];
+ u8 dp_ordering_force[0x1];
u8 user_index[0x18];
u8 reserved_at_40[0x8];
@@ -2833,7 +4708,9 @@ struct mlx5_ifc_dctc_bits {
u8 latency_sensitive[0x1];
u8 rlky[0x1];
u8 free_ar[0x1];
- u8 reserved_at_73[0xd];
+ u8 reserved_at_73[0x1];
+ u8 dp_ordering_1[0x1];
+ u8 reserved_at_75[0xb];
u8 reserved_at_80[0x8];
u8 cs_res[0x8];
@@ -2871,7 +4748,8 @@ struct mlx5_ifc_dctc_bits {
u8 ecn[0x2];
u8 dscp[0x6];
- u8 reserved_at_1c0[0x40];
+ u8 reserved_at_1c0[0x20];
+ u8 ece[0x20];
};
enum {
@@ -2891,15 +4769,17 @@ enum {
MLX5_CQC_ST_FIRED = 0xa,
};
-enum {
+enum mlx5_cq_period_mode {
MLX5_CQ_PERIOD_MODE_START_FROM_EQE = 0x0,
MLX5_CQ_PERIOD_MODE_START_FROM_CQE = 0x1,
- MLX5_CQ_PERIOD_NUM_MODES
+ MLX5_CQ_PERIOD_NUM_MODES,
};
struct mlx5_ifc_cqc_bits {
u8 status[0x4];
- u8 reserved_at_4[0x4];
+ u8 reserved_at_4[0x2];
+ u8 dbr_umem_valid[0x1];
+ u8 apu_cq[0x1];
u8 cqe_sz[0x3];
u8 cc[0x1];
u8 reserved_at_c[0x1];
@@ -2909,7 +4789,8 @@ struct mlx5_ifc_cqc_bits {
u8 cqe_comp_en[0x1];
u8 mini_cqe_res_format[0x2];
u8 st[0x4];
- u8 reserved_at_18[0x8];
+ u8 reserved_at_18[0x6];
+ u8 cqe_compression_layout[0x2];
u8 reserved_at_20[0x20];
@@ -2925,8 +4806,7 @@ struct mlx5_ifc_cqc_bits {
u8 cq_period[0xc];
u8 cq_max_count[0x10];
- u8 reserved_at_a0[0x18];
- u8 c_eqn[0x8];
+ u8 c_eqn_or_apu_element[0x20];
u8 reserved_at_c0[0x3];
u8 log_page_size[0x5];
@@ -2955,6 +4835,7 @@ union mlx5_ifc_cong_control_roce_ecn_auto_bits {
struct mlx5_ifc_cong_control_802_1qau_rp_bits cong_control_802_1qau_rp;
struct mlx5_ifc_cong_control_r_roce_ecn_rp_bits cong_control_r_roce_ecn_rp;
struct mlx5_ifc_cong_control_r_roce_ecn_np_bits cong_control_r_roce_ecn_np;
+ struct mlx5_ifc_cong_control_r_roce_general_bits cong_control_r_roce_general;
u8 reserved_at_0[0x800];
};
@@ -3033,6 +4914,11 @@ union mlx5_ifc_field_select_802_1_r_roce_auto_bits {
u8 reserved_at_0[0x20];
};
+struct mlx5_ifc_rs_histogram_cntrs_bits {
+ u8 hist[16][0x40];
+ u8 reserved_at_400[0x2c0];
+};
+
union mlx5_ifc_eth_cntrs_grp_data_layout_auto_bits {
struct mlx5_ifc_eth_802_3_cntrs_grp_data_layout_bits eth_802_3_cntrs_grp_data_layout;
struct mlx5_ifc_eth_2863_cntrs_grp_data_layout_bits eth_2863_cntrs_grp_data_layout;
@@ -3040,10 +4926,14 @@ union mlx5_ifc_eth_cntrs_grp_data_layout_auto_bits {
struct mlx5_ifc_eth_3635_cntrs_grp_data_layout_bits eth_3635_cntrs_grp_data_layout;
struct mlx5_ifc_eth_extended_cntrs_grp_data_layout_bits eth_extended_cntrs_grp_data_layout;
struct mlx5_ifc_eth_per_prio_grp_data_layout_bits eth_per_prio_grp_data_layout;
- struct mlx5_ifc_eth_per_traffic_grp_data_layout_bits eth_per_traffic_grp_data_layout;
+ struct mlx5_ifc_eth_per_tc_prio_grp_data_layout_bits eth_per_tc_prio_grp_data_layout;
+ struct mlx5_ifc_eth_per_tc_congest_prio_grp_data_layout_bits eth_per_tc_congest_prio_grp_data_layout;
struct mlx5_ifc_ib_port_cntrs_grp_data_layout_bits ib_port_cntrs_grp_data_layout;
+ struct mlx5_ifc_ib_ext_port_cntrs_grp_data_layout_bits ib_ext_port_cntrs_grp_data_layout;
struct mlx5_ifc_phys_layer_cntrs_bits phys_layer_cntrs;
struct mlx5_ifc_phys_layer_statistical_cntrs_bits phys_layer_statistical_cntrs;
+ struct mlx5_ifc_phys_layer_recovery_cntrs_bits phys_layer_recovery_cntrs;
+ struct mlx5_ifc_rs_histogram_cntrs_bits rs_histogram_cntrs;
u8 reserved_at_0[0x7c0];
};
@@ -3075,13 +4965,19 @@ struct mlx5_ifc_health_buffer_bits {
u8 assert_callra[0x20];
- u8 reserved_at_140[0x40];
+ u8 reserved_at_140[0x20];
+
+ u8 time[0x20];
u8 fw_version[0x20];
u8 hw_id[0x20];
- u8 reserved_at_1c0[0x20];
+ u8 rfr[0x1];
+ u8 reserved_at_1c1[0x3];
+ u8 valid[0x1];
+ u8 severity[0x3];
+ u8 reserved_at_1c8[0x18];
u8 irisc_index[0x8];
u8 synd[0x8];
@@ -3097,29 +4993,6 @@ struct mlx5_ifc_register_loopback_control_bits {
u8 reserved_at_20[0x60];
};
-struct mlx5_ifc_vport_tc_element_bits {
- u8 traffic_class[0x4];
- u8 reserved_at_4[0xc];
- u8 vport_number[0x10];
-};
-
-struct mlx5_ifc_vport_element_bits {
- u8 reserved_at_0[0x10];
- u8 vport_number[0x10];
-};
-
-enum {
- TSAR_ELEMENT_TSAR_TYPE_DWRR = 0x0,
- TSAR_ELEMENT_TSAR_TYPE_ROUND_ROBIN = 0x1,
- TSAR_ELEMENT_TSAR_TYPE_ETS = 0x2,
-};
-
-struct mlx5_ifc_tsar_element_bits {
- u8 reserved_at_0[0x8];
- u8 tsar_type[0x8];
- u8 reserved_at_10[0x10];
-};
-
enum {
MLX5_TEARDOWN_HCA_OUT_FORCE_STATE_SUCCESS = 0x0,
MLX5_TEARDOWN_HCA_OUT_FORCE_STATE_FAIL = 0x1,
@@ -3133,12 +5006,13 @@ struct mlx5_ifc_teardown_hca_out_bits {
u8 reserved_at_40[0x3f];
- u8 force_state[0x1];
+ u8 state[0x1];
};
enum {
MLX5_TEARDOWN_HCA_IN_PROFILE_GRACEFUL_CLOSE = 0x0,
MLX5_TEARDOWN_HCA_IN_PROFILE_FORCE_CLOSE = 0x1,
+ MLX5_TEARDOWN_HCA_IN_PROFILE_PREPARE_FAST_TEARDOWN = 0x2,
};
struct mlx5_ifc_teardown_hca_in_bits {
@@ -3165,7 +5039,7 @@ struct mlx5_ifc_sqerr2rts_qp_out_bits {
struct mlx5_ifc_sqerr2rts_qp_in_bits {
u8 opcode[0x10];
- u8 reserved_at_10[0x10];
+ u8 uid[0x10];
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
@@ -3195,7 +5069,7 @@ struct mlx5_ifc_sqd2rts_qp_out_bits {
struct mlx5_ifc_sqd2rts_qp_in_bits {
u8 opcode[0x10];
- u8 reserved_at_10[0x10];
+ u8 uid[0x10];
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
@@ -3231,7 +5105,8 @@ struct mlx5_ifc_set_roce_address_in_bits {
u8 op_mod[0x10];
u8 roce_address_index[0x10];
- u8 reserved_at_50[0x10];
+ u8 reserved_at_50[0xc];
+ u8 vhca_port_num[0x4];
u8 reserved_at_60[0x20];
@@ -3289,7 +5164,10 @@ struct mlx5_ifc_set_l2_table_entry_in_bits {
u8 reserved_at_c0[0x20];
- u8 reserved_at_e0[0x13];
+ u8 reserved_at_e0[0x10];
+ u8 silent_mode_valid[0x1];
+ u8 silent_mode[0x1];
+ u8 reserved_at_f2[0x1];
u8 vlan_valid[0x1];
u8 vlan[0xc];
@@ -3336,7 +5214,14 @@ struct mlx5_ifc_set_hca_cap_in_bits {
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
- u8 reserved_at_40[0x40];
+ u8 other_function[0x1];
+ u8 ec_vf_function[0x1];
+ u8 reserved_at_42[0x1];
+ u8 function_id_type[0x1];
+ u8 reserved_at_44[0xc];
+ u8 function_id[0x10];
+
+ u8 reserved_at_60[0x20];
union mlx5_ifc_hca_cap_union_bits capability;
};
@@ -3345,7 +5230,8 @@ enum {
MLX5_SET_FTE_MODIFY_ENABLE_MASK_ACTION = 0x0,
MLX5_SET_FTE_MODIFY_ENABLE_MASK_FLOW_TAG = 0x1,
MLX5_SET_FTE_MODIFY_ENABLE_MASK_DESTINATION_LIST = 0x2,
- MLX5_SET_FTE_MODIFY_ENABLE_MASK_FLOW_COUNTERS = 0x3
+ MLX5_SET_FTE_MODIFY_ENABLE_MASK_FLOW_COUNTERS = 0x3,
+ MLX5_SET_FTE_MODIFY_ENABLE_MASK_IPSEC_OBJ_ID = 0x4
};
struct mlx5_ifc_set_fte_out_bits {
@@ -3365,18 +5251,21 @@ struct mlx5_ifc_set_fte_in_bits {
u8 op_mod[0x10];
u8 other_vport[0x1];
- u8 reserved_at_41[0xf];
+ u8 other_eswitch[0x1];
+ u8 reserved_at_42[0xe];
u8 vport_number[0x10];
u8 reserved_at_60[0x20];
u8 table_type[0x8];
- u8 reserved_at_88[0x18];
+ u8 reserved_at_88[0x8];
+ u8 eswitch_owner_vhca_id[0x10];
u8 reserved_at_a0[0x8];
u8 table_id[0x18];
- u8 reserved_at_c0[0x18];
+ u8 ignore_flow_level[0x1];
+ u8 reserved_at_c1[0x17];
u8 modify_enable_mask[0x8];
u8 reserved_at_e0[0x20];
@@ -3388,18 +5277,29 @@ struct mlx5_ifc_set_fte_in_bits {
struct mlx5_ifc_flow_context_bits flow_context;
};
+struct mlx5_ifc_dest_format_bits {
+ u8 destination_type[0x8];
+ u8 destination_id[0x18];
+
+ u8 destination_eswitch_owner_vhca_id_valid[0x1];
+ u8 packet_reformat[0x1];
+ u8 reserved_at_22[0xe];
+ u8 destination_eswitch_owner_vhca_id[0x10];
+};
+
struct mlx5_ifc_rts2rts_qp_out_bits {
u8 status[0x8];
u8 reserved_at_8[0x18];
u8 syndrome[0x20];
- u8 reserved_at_40[0x40];
+ u8 reserved_at_40[0x20];
+ u8 ece[0x20];
};
struct mlx5_ifc_rts2rts_qp_in_bits {
u8 opcode[0x10];
- u8 reserved_at_10[0x10];
+ u8 uid[0x10];
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
@@ -3411,7 +5311,7 @@ struct mlx5_ifc_rts2rts_qp_in_bits {
u8 opt_param_mask[0x20];
- u8 reserved_at_a0[0x20];
+ u8 ece[0x20];
struct mlx5_ifc_qpc_bits qpc;
@@ -3424,12 +5324,13 @@ struct mlx5_ifc_rtr2rts_qp_out_bits {
u8 syndrome[0x20];
- u8 reserved_at_40[0x40];
+ u8 reserved_at_40[0x20];
+ u8 ece[0x20];
};
struct mlx5_ifc_rtr2rts_qp_in_bits {
u8 opcode[0x10];
- u8 reserved_at_10[0x10];
+ u8 uid[0x10];
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
@@ -3441,7 +5342,7 @@ struct mlx5_ifc_rtr2rts_qp_in_bits {
u8 opt_param_mask[0x20];
- u8 reserved_at_a0[0x20];
+ u8 ece[0x20];
struct mlx5_ifc_qpc_bits qpc;
@@ -3454,12 +5355,13 @@ struct mlx5_ifc_rst2init_qp_out_bits {
u8 syndrome[0x20];
- u8 reserved_at_40[0x40];
+ u8 reserved_at_40[0x20];
+ u8 ece[0x20];
};
struct mlx5_ifc_rst2init_qp_in_bits {
u8 opcode[0x10];
- u8 reserved_at_10[0x10];
+ u8 uid[0x10];
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
@@ -3471,7 +5373,7 @@ struct mlx5_ifc_rst2init_qp_in_bits {
u8 opt_param_mask[0x20];
- u8 reserved_at_a0[0x20];
+ u8 ece[0x20];
struct mlx5_ifc_qpc_bits qpc;
@@ -3514,7 +5416,7 @@ struct mlx5_ifc_query_xrc_srq_out_bits {
u8 reserved_at_280[0x600];
- u8 pas[0][0x40];
+ u8 pas[][0x40];
};
struct mlx5_ifc_query_xrc_srq_in_bits {
@@ -3548,9 +5450,117 @@ struct mlx5_ifc_query_vport_state_out_bits {
u8 state[0x4];
};
+struct mlx5_ifc_array1024_auto_bits {
+ u8 array1024_auto[32][0x20];
+};
+
+struct mlx5_ifc_query_vuid_in_bits {
+ u8 opcode[0x10];
+ u8 uid[0x10];
+
+ u8 reserved_at_20[0x40];
+
+ u8 query_vfs_vuid[0x1];
+ u8 data_direct[0x1];
+ u8 reserved_at_62[0xe];
+ u8 vhca_id[0x10];
+};
+
+struct mlx5_ifc_query_vuid_out_bits {
+ u8 status[0x8];
+ u8 reserved_at_8[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_at_40[0x1a0];
+
+ u8 reserved_at_1e0[0x10];
+ u8 num_of_entries[0x10];
+
+ struct mlx5_ifc_array1024_auto_bits vuid[];
+};
+
enum {
- MLX5_QUERY_VPORT_STATE_IN_OP_MOD_VNIC_VPORT = 0x0,
- MLX5_QUERY_VPORT_STATE_IN_OP_MOD_ESW_VPORT = 0x1,
+ MLX5_VPORT_STATE_OP_MOD_VNIC_VPORT = 0x0,
+ MLX5_VPORT_STATE_OP_MOD_ESW_VPORT = 0x1,
+ MLX5_VPORT_STATE_OP_MOD_UPLINK = 0x2,
+};
+
+struct mlx5_ifc_arm_monitor_counter_in_bits {
+ u8 opcode[0x10];
+ u8 uid[0x10];
+
+ u8 reserved_at_20[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_at_40[0x20];
+
+ u8 reserved_at_60[0x20];
+};
+
+struct mlx5_ifc_arm_monitor_counter_out_bits {
+ u8 status[0x8];
+ u8 reserved_at_8[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_at_40[0x40];
+};
+
+enum {
+ MLX5_QUERY_MONITOR_CNT_TYPE_PPCNT = 0x0,
+ MLX5_QUERY_MONITOR_CNT_TYPE_Q_COUNTER = 0x1,
+};
+
+enum mlx5_monitor_counter_ppcnt {
+ MLX5_QUERY_MONITOR_PPCNT_IN_RANGE_LENGTH_ERRORS = 0x0,
+ MLX5_QUERY_MONITOR_PPCNT_OUT_OF_RANGE_LENGTH_FIELD = 0x1,
+ MLX5_QUERY_MONITOR_PPCNT_FRAME_TOO_LONG_ERRORS = 0x2,
+ MLX5_QUERY_MONITOR_PPCNT_FRAME_CHECK_SEQUENCE_ERRORS = 0x3,
+ MLX5_QUERY_MONITOR_PPCNT_ALIGNMENT_ERRORS = 0x4,
+ MLX5_QUERY_MONITOR_PPCNT_IF_OUT_DISCARDS = 0x5,
+};
+
+enum {
+ MLX5_QUERY_MONITOR_Q_COUNTER_RX_OUT_OF_BUFFER = 0x4,
+};
+
+struct mlx5_ifc_monitor_counter_output_bits {
+ u8 reserved_at_0[0x4];
+ u8 type[0x4];
+ u8 reserved_at_8[0x8];
+ u8 counter[0x10];
+
+ u8 counter_group_id[0x20];
+};
+
+#define MLX5_CMD_SET_MONITOR_NUM_PPCNT_COUNTER_SET1 (6)
+#define MLX5_CMD_SET_MONITOR_NUM_Q_COUNTERS_SET1 (1)
+#define MLX5_CMD_SET_MONITOR_NUM_COUNTER (MLX5_CMD_SET_MONITOR_NUM_PPCNT_COUNTER_SET1 +\
+ MLX5_CMD_SET_MONITOR_NUM_Q_COUNTERS_SET1)
+
+struct mlx5_ifc_set_monitor_counter_in_bits {
+ u8 opcode[0x10];
+ u8 uid[0x10];
+
+ u8 reserved_at_20[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_at_40[0x10];
+ u8 num_of_counters[0x10];
+
+ u8 reserved_at_60[0x20];
+
+ struct mlx5_ifc_monitor_counter_output_bits monitor_counter[MLX5_CMD_SET_MONITOR_NUM_COUNTER];
+};
+
+struct mlx5_ifc_set_monitor_counter_out_bits {
+ u8 status[0x8];
+ u8 reserved_at_8[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_at_40[0x40];
};
struct mlx5_ifc_query_vport_state_in_bits {
@@ -3567,6 +5577,35 @@ struct mlx5_ifc_query_vport_state_in_bits {
u8 reserved_at_60[0x20];
};
+struct mlx5_ifc_query_vnic_env_out_bits {
+ u8 status[0x8];
+ u8 reserved_at_8[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_at_40[0x40];
+
+ struct mlx5_ifc_vnic_diagnostic_statistics_bits vport_env;
+};
+
+enum {
+ MLX5_QUERY_VNIC_ENV_IN_OP_MOD_VPORT_DIAG_STATISTICS = 0x0,
+};
+
+struct mlx5_ifc_query_vnic_env_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_at_10[0x10];
+
+ u8 reserved_at_20[0x10];
+ u8 op_mod[0x10];
+
+ u8 other_vport[0x1];
+ u8 reserved_at_41[0xf];
+ u8 vport_number[0x10];
+
+ u8 reserved_at_60[0x20];
+};
+
struct mlx5_ifc_query_vport_counter_out_bits {
u8 status[0x8];
u8 reserved_at_8[0x18];
@@ -3599,7 +5638,9 @@ struct mlx5_ifc_query_vport_counter_out_bits {
struct mlx5_ifc_traffic_counter_bits transmitted_eth_multicast;
- u8 reserved_at_680[0xa00];
+ struct mlx5_ifc_traffic_counter_bits local_loopback;
+
+ u8 reserved_at_700[0x980];
};
enum {
@@ -3686,7 +5727,7 @@ struct mlx5_ifc_query_srq_out_bits {
u8 reserved_at_280[0x600];
- u8 pas[0][0x40];
+ u8 pas[][0x40];
};
struct mlx5_ifc_query_srq_in_bits {
@@ -3738,7 +5779,11 @@ struct mlx5_ifc_query_special_contexts_out_bits {
u8 null_mkey[0x20];
- u8 reserved_at_a0[0x60];
+ u8 terminate_scatter_list_mkey[0x20];
+
+ u8 repeated_mkey[0x20];
+
+ u8 reserved_at_a0[0x20];
};
struct mlx5_ifc_query_special_contexts_in_bits {
@@ -3767,6 +5812,7 @@ struct mlx5_ifc_query_scheduling_element_out_bits {
enum {
SCHEDULING_HIERARCHY_E_SWITCH = 0x2,
+ SCHEDULING_HIERARCHY_NIC = 0x3,
};
struct mlx5_ifc_query_scheduling_element_in_bits {
@@ -3851,7 +5897,8 @@ struct mlx5_ifc_query_roce_address_in_bits {
u8 op_mod[0x10];
u8 roce_address_index[0x10];
- u8 reserved_at_50[0x10];
+ u8 reserved_at_50[0xc];
+ u8 vhca_port_num[0x4];
u8 reserved_at_60[0x20];
};
@@ -3880,6 +5927,37 @@ struct mlx5_ifc_query_rmp_in_bits {
u8 reserved_at_60[0x20];
};
+struct mlx5_ifc_cqe_error_syndrome_bits {
+ u8 hw_error_syndrome[0x8];
+ u8 hw_syndrome_type[0x4];
+ u8 reserved_at_c[0x4];
+ u8 vendor_error_syndrome[0x8];
+ u8 syndrome[0x8];
+};
+
+struct mlx5_ifc_qp_context_extension_bits {
+ u8 reserved_at_0[0x60];
+
+ struct mlx5_ifc_cqe_error_syndrome_bits error_syndrome;
+
+ u8 reserved_at_80[0x580];
+};
+
+struct mlx5_ifc_qpc_extension_and_pas_list_in_bits {
+ struct mlx5_ifc_qp_context_extension_bits qpc_data_extension;
+
+ u8 pas[0][0x40];
+};
+
+struct mlx5_ifc_qp_pas_list_in_bits {
+ struct mlx5_ifc_cmd_pas_bits pas[0];
+};
+
+union mlx5_ifc_qp_pas_or_qpc_ext_and_pas_bits {
+ struct mlx5_ifc_qp_pas_list_in_bits qp_pas_list;
+ struct mlx5_ifc_qpc_extension_and_pas_list_in_bits qpc_ext_and_pas_list;
+};
+
struct mlx5_ifc_query_qp_out_bits {
u8 status[0x8];
u8 reserved_at_8[0x18];
@@ -3890,13 +5968,13 @@ struct mlx5_ifc_query_qp_out_bits {
u8 opt_param_mask[0x20];
- u8 reserved_at_a0[0x20];
+ u8 ece[0x20];
struct mlx5_ifc_qpc_bits qpc;
u8 reserved_at_800[0x80];
- u8 pas[0][0x40];
+ union mlx5_ifc_qp_pas_or_qpc_ext_and_pas_bits qp_pas_or_qpc_ext_and_pas;
};
struct mlx5_ifc_query_qp_in_bits {
@@ -3906,7 +5984,8 @@ struct mlx5_ifc_query_qp_in_bits {
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
- u8 reserved_at_40[0x8];
+ u8 qpc_ext[0x1];
+ u8 reserved_at_41[0x7];
u8 qpn[0x18];
u8 reserved_at_60[0x20];
@@ -3962,7 +6041,11 @@ struct mlx5_ifc_query_q_counter_out_bits {
u8 local_ack_timeout_err[0x20];
- u8 reserved_at_320[0xa0];
+ u8 reserved_at_320[0x60];
+
+ u8 req_rnr_retries_exceeded[0x20];
+
+ u8 reserved_at_3a0[0x20];
u8 resp_local_length_error[0x20];
@@ -4002,7 +6085,19 @@ struct mlx5_ifc_query_q_counter_out_bits {
u8 req_cqe_flush_error[0x20];
- u8 reserved_at_620[0x1e0];
+ u8 reserved_at_620[0x20];
+
+ u8 roce_adp_retrans[0x20];
+
+ u8 roce_adp_retrans_to[0x20];
+
+ u8 roce_slow_restart[0x20];
+
+ u8 roce_slow_restart_cnps[0x20];
+
+ u8 roce_slow_restart_trans[0x20];
+
+ u8 reserved_at_6e0[0x120];
};
struct mlx5_ifc_query_q_counter_in_bits {
@@ -4012,10 +6107,15 @@ struct mlx5_ifc_query_q_counter_in_bits {
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
- u8 reserved_at_40[0x80];
+ u8 other_vport[0x1];
+ u8 reserved_at_41[0xf];
+ u8 vport_number[0x10];
+
+ u8 reserved_at_60[0x60];
u8 clear[0x1];
- u8 reserved_at_c1[0x1f];
+ u8 aggregate[0x1];
+ u8 reserved_at_c2[0x1e];
u8 reserved_at_e0[0x18];
u8 counter_set_id[0x8];
@@ -4027,7 +6127,8 @@ struct mlx5_ifc_query_pages_out_bits {
u8 syndrome[0x20];
- u8 reserved_at_40[0x10];
+ u8 embedded_cpu_function[0x1];
+ u8 reserved_at_41[0xf];
u8 function_id[0x10];
u8 num_pages[0x20];
@@ -4046,7 +6147,8 @@ struct mlx5_ifc_query_pages_in_bits {
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
- u8 reserved_at_40[0x10];
+ u8 embedded_cpu_function[0x1];
+ u8 reserved_at_41[0xf];
u8 function_id[0x10];
u8 reserved_at_60[0x20];
@@ -4215,7 +6317,7 @@ struct mlx5_ifc_query_hca_vport_pkey_out_bits {
u8 reserved_at_40[0x40];
- struct mlx5_ifc_pkey_bits pkey[0];
+ struct mlx5_ifc_pkey_bits pkey[];
};
struct mlx5_ifc_query_hca_vport_pkey_in_bits {
@@ -4251,7 +6353,7 @@ struct mlx5_ifc_query_hca_vport_gid_out_bits {
u8 gids_num[0x10];
u8 reserved_at_70[0x10];
- struct mlx5_ifc_array128_auto_bits gid[0];
+ struct mlx5_ifc_array128_auto_bits gid[];
};
struct mlx5_ifc_query_hca_vport_gid_in_bits {
@@ -4314,23 +6416,116 @@ struct mlx5_ifc_query_hca_cap_in_bits {
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
+ u8 other_function[0x1];
+ u8 ec_vf_function[0x1];
+ u8 reserved_at_42[0x1];
+ u8 function_id_type[0x1];
+ u8 reserved_at_44[0xc];
+ u8 function_id[0x10];
+
+ u8 reserved_at_60[0x20];
+};
+
+struct mlx5_ifc_other_hca_cap_bits {
+ u8 roce[0x1];
+ u8 reserved_at_1[0x27f];
+};
+
+struct mlx5_ifc_query_other_hca_cap_out_bits {
+ u8 status[0x8];
+ u8 reserved_at_8[0x18];
+
+ u8 syndrome[0x20];
+
u8 reserved_at_40[0x40];
+
+ struct mlx5_ifc_other_hca_cap_bits other_capability;
};
-struct mlx5_ifc_query_flow_table_out_bits {
+struct mlx5_ifc_query_other_hca_cap_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_at_10[0x10];
+
+ u8 reserved_at_20[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_at_40[0x10];
+ u8 function_id[0x10];
+
+ u8 reserved_at_60[0x20];
+};
+
+struct mlx5_ifc_modify_other_hca_cap_out_bits {
u8 status[0x8];
u8 reserved_at_8[0x18];
u8 syndrome[0x20];
- u8 reserved_at_40[0x80];
+ u8 reserved_at_40[0x40];
+};
- u8 reserved_at_c0[0x8];
+struct mlx5_ifc_modify_other_hca_cap_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_at_10[0x10];
+
+ u8 reserved_at_20[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_at_40[0x10];
+ u8 function_id[0x10];
+ u8 field_select[0x20];
+
+ struct mlx5_ifc_other_hca_cap_bits other_capability;
+};
+
+struct mlx5_ifc_sw_owner_icm_root_params_bits {
+ u8 sw_owner_icm_root_1[0x40];
+
+ u8 sw_owner_icm_root_0[0x40];
+};
+
+struct mlx5_ifc_rtc_params_bits {
+ u8 rtc_id_0[0x20];
+
+ u8 rtc_id_1[0x20];
+
+ u8 reserved_at_40[0x40];
+};
+
+struct mlx5_ifc_flow_table_context_bits {
+ u8 reformat_en[0x1];
+ u8 decap_en[0x1];
+ u8 sw_owner[0x1];
+ u8 termination_table[0x1];
+ u8 table_miss_action[0x4];
u8 level[0x8];
- u8 reserved_at_d0[0x8];
+ u8 rtc_valid[0x1];
+ u8 reserved_at_11[0x7];
u8 log_size[0x8];
- u8 reserved_at_e0[0x120];
+ u8 reserved_at_20[0x8];
+ u8 table_miss_id[0x18];
+
+ u8 reserved_at_40[0x8];
+ u8 lag_master_next_table_id[0x18];
+
+ u8 reserved_at_60[0x60];
+
+ union {
+ struct mlx5_ifc_sw_owner_icm_root_params_bits sws;
+ struct mlx5_ifc_rtc_params_bits hws;
+ };
+};
+
+struct mlx5_ifc_query_flow_table_out_bits {
+ u8 status[0x8];
+ u8 reserved_at_8[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_at_40[0x80];
+
+ struct mlx5_ifc_flow_table_context_bits flow_table_context;
};
struct mlx5_ifc_query_flow_table_in_bits {
@@ -4384,10 +6579,375 @@ struct mlx5_ifc_query_fte_in_bits {
u8 reserved_at_120[0xe0];
};
+struct mlx5_ifc_match_definer_format_0_bits {
+ u8 reserved_at_0[0x100];
+
+ u8 metadata_reg_c_0[0x20];
+
+ u8 metadata_reg_c_1[0x20];
+
+ u8 outer_dmac_47_16[0x20];
+
+ u8 outer_dmac_15_0[0x10];
+ u8 outer_ethertype[0x10];
+
+ u8 reserved_at_180[0x1];
+ u8 sx_sniffer[0x1];
+ u8 functional_lb[0x1];
+ u8 outer_ip_frag[0x1];
+ u8 outer_qp_type[0x2];
+ u8 outer_encap_type[0x2];
+ u8 port_number[0x2];
+ u8 outer_l3_type[0x2];
+ u8 outer_l4_type[0x2];
+ u8 outer_first_vlan_type[0x2];
+ u8 outer_first_vlan_prio[0x3];
+ u8 outer_first_vlan_cfi[0x1];
+ u8 outer_first_vlan_vid[0xc];
+
+ u8 outer_l4_type_ext[0x4];
+ u8 reserved_at_1a4[0x2];
+ u8 outer_ipsec_layer[0x2];
+ u8 outer_l2_type[0x2];
+ u8 force_lb[0x1];
+ u8 outer_l2_ok[0x1];
+ u8 outer_l3_ok[0x1];
+ u8 outer_l4_ok[0x1];
+ u8 outer_second_vlan_type[0x2];
+ u8 outer_second_vlan_prio[0x3];
+ u8 outer_second_vlan_cfi[0x1];
+ u8 outer_second_vlan_vid[0xc];
+
+ u8 outer_smac_47_16[0x20];
+
+ u8 outer_smac_15_0[0x10];
+ u8 inner_ipv4_checksum_ok[0x1];
+ u8 inner_l4_checksum_ok[0x1];
+ u8 outer_ipv4_checksum_ok[0x1];
+ u8 outer_l4_checksum_ok[0x1];
+ u8 inner_l3_ok[0x1];
+ u8 inner_l4_ok[0x1];
+ u8 outer_l3_ok_duplicate[0x1];
+ u8 outer_l4_ok_duplicate[0x1];
+ u8 outer_tcp_cwr[0x1];
+ u8 outer_tcp_ece[0x1];
+ u8 outer_tcp_urg[0x1];
+ u8 outer_tcp_ack[0x1];
+ u8 outer_tcp_psh[0x1];
+ u8 outer_tcp_rst[0x1];
+ u8 outer_tcp_syn[0x1];
+ u8 outer_tcp_fin[0x1];
+};
+
+struct mlx5_ifc_match_definer_format_22_bits {
+ u8 reserved_at_0[0x100];
+
+ u8 outer_ip_src_addr[0x20];
+
+ u8 outer_ip_dest_addr[0x20];
+
+ u8 outer_l4_sport[0x10];
+ u8 outer_l4_dport[0x10];
+
+ u8 reserved_at_160[0x1];
+ u8 sx_sniffer[0x1];
+ u8 functional_lb[0x1];
+ u8 outer_ip_frag[0x1];
+ u8 outer_qp_type[0x2];
+ u8 outer_encap_type[0x2];
+ u8 port_number[0x2];
+ u8 outer_l3_type[0x2];
+ u8 outer_l4_type[0x2];
+ u8 outer_first_vlan_type[0x2];
+ u8 outer_first_vlan_prio[0x3];
+ u8 outer_first_vlan_cfi[0x1];
+ u8 outer_first_vlan_vid[0xc];
+
+ u8 metadata_reg_c_0[0x20];
+
+ u8 outer_dmac_47_16[0x20];
+
+ u8 outer_smac_47_16[0x20];
+
+ u8 outer_smac_15_0[0x10];
+ u8 outer_dmac_15_0[0x10];
+};
+
+struct mlx5_ifc_match_definer_format_23_bits {
+ u8 reserved_at_0[0x100];
+
+ u8 inner_ip_src_addr[0x20];
+
+ u8 inner_ip_dest_addr[0x20];
+
+ u8 inner_l4_sport[0x10];
+ u8 inner_l4_dport[0x10];
+
+ u8 reserved_at_160[0x1];
+ u8 sx_sniffer[0x1];
+ u8 functional_lb[0x1];
+ u8 inner_ip_frag[0x1];
+ u8 inner_qp_type[0x2];
+ u8 inner_encap_type[0x2];
+ u8 port_number[0x2];
+ u8 inner_l3_type[0x2];
+ u8 inner_l4_type[0x2];
+ u8 inner_first_vlan_type[0x2];
+ u8 inner_first_vlan_prio[0x3];
+ u8 inner_first_vlan_cfi[0x1];
+ u8 inner_first_vlan_vid[0xc];
+
+ u8 tunnel_header_0[0x20];
+
+ u8 inner_dmac_47_16[0x20];
+
+ u8 inner_smac_47_16[0x20];
+
+ u8 inner_smac_15_0[0x10];
+ u8 inner_dmac_15_0[0x10];
+};
+
+struct mlx5_ifc_match_definer_format_29_bits {
+ u8 reserved_at_0[0xc0];
+
+ u8 outer_ip_dest_addr[0x80];
+
+ u8 outer_ip_src_addr[0x80];
+
+ u8 outer_l4_sport[0x10];
+ u8 outer_l4_dport[0x10];
+
+ u8 reserved_at_1e0[0x20];
+};
+
+struct mlx5_ifc_match_definer_format_30_bits {
+ u8 reserved_at_0[0xa0];
+
+ u8 outer_ip_dest_addr[0x80];
+
+ u8 outer_ip_src_addr[0x80];
+
+ u8 outer_dmac_47_16[0x20];
+
+ u8 outer_smac_47_16[0x20];
+
+ u8 outer_smac_15_0[0x10];
+ u8 outer_dmac_15_0[0x10];
+};
+
+struct mlx5_ifc_match_definer_format_31_bits {
+ u8 reserved_at_0[0xc0];
+
+ u8 inner_ip_dest_addr[0x80];
+
+ u8 inner_ip_src_addr[0x80];
+
+ u8 inner_l4_sport[0x10];
+ u8 inner_l4_dport[0x10];
+
+ u8 reserved_at_1e0[0x20];
+};
+
+struct mlx5_ifc_match_definer_format_32_bits {
+ u8 reserved_at_0[0xa0];
+
+ u8 inner_ip_dest_addr[0x80];
+
+ u8 inner_ip_src_addr[0x80];
+
+ u8 inner_dmac_47_16[0x20];
+
+ u8 inner_smac_47_16[0x20];
+
+ u8 inner_smac_15_0[0x10];
+ u8 inner_dmac_15_0[0x10];
+};
+
+enum {
+ MLX5_IFC_DEFINER_FORMAT_ID_SELECT = 61,
+};
+
+#define MLX5_IFC_DEFINER_FORMAT_OFFSET_UNUSED 0x0
+#define MLX5_IFC_DEFINER_FORMAT_OFFSET_OUTER_ETH_PKT_LEN 0x48
+#define MLX5_IFC_DEFINER_DW_SELECTORS_NUM 9
+#define MLX5_IFC_DEFINER_BYTE_SELECTORS_NUM 8
+
+struct mlx5_ifc_match_definer_match_mask_bits {
+ u8 reserved_at_1c0[5][0x20];
+ u8 match_dw_8[0x20];
+ u8 match_dw_7[0x20];
+ u8 match_dw_6[0x20];
+ u8 match_dw_5[0x20];
+ u8 match_dw_4[0x20];
+ u8 match_dw_3[0x20];
+ u8 match_dw_2[0x20];
+ u8 match_dw_1[0x20];
+ u8 match_dw_0[0x20];
+
+ u8 match_byte_7[0x8];
+ u8 match_byte_6[0x8];
+ u8 match_byte_5[0x8];
+ u8 match_byte_4[0x8];
+
+ u8 match_byte_3[0x8];
+ u8 match_byte_2[0x8];
+ u8 match_byte_1[0x8];
+ u8 match_byte_0[0x8];
+};
+
+struct mlx5_ifc_match_definer_bits {
+ u8 modify_field_select[0x40];
+
+ u8 reserved_at_40[0x40];
+
+ u8 reserved_at_80[0x10];
+ u8 format_id[0x10];
+
+ u8 reserved_at_a0[0x60];
+
+ u8 format_select_dw3[0x8];
+ u8 format_select_dw2[0x8];
+ u8 format_select_dw1[0x8];
+ u8 format_select_dw0[0x8];
+
+ u8 format_select_dw7[0x8];
+ u8 format_select_dw6[0x8];
+ u8 format_select_dw5[0x8];
+ u8 format_select_dw4[0x8];
+
+ u8 reserved_at_100[0x18];
+ u8 format_select_dw8[0x8];
+
+ u8 reserved_at_120[0x20];
+
+ u8 format_select_byte3[0x8];
+ u8 format_select_byte2[0x8];
+ u8 format_select_byte1[0x8];
+ u8 format_select_byte0[0x8];
+
+ u8 format_select_byte7[0x8];
+ u8 format_select_byte6[0x8];
+ u8 format_select_byte5[0x8];
+ u8 format_select_byte4[0x8];
+
+ u8 reserved_at_180[0x40];
+
+ union {
+ struct {
+ u8 match_mask[16][0x20];
+ };
+ struct mlx5_ifc_match_definer_match_mask_bits match_mask_format;
+ };
+};
+
+struct mlx5_ifc_general_obj_create_param_bits {
+ u8 alias_object[0x1];
+ u8 reserved_at_1[0x2];
+ u8 log_obj_range[0x5];
+ u8 reserved_at_8[0x18];
+};
+
+struct mlx5_ifc_general_obj_query_param_bits {
+ u8 alias_object[0x1];
+ u8 obj_offset[0x1f];
+};
+
+struct mlx5_ifc_general_obj_in_cmd_hdr_bits {
+ u8 opcode[0x10];
+ u8 uid[0x10];
+
+ u8 vhca_tunnel_id[0x10];
+ u8 obj_type[0x10];
+
+ u8 obj_id[0x20];
+
+ union {
+ struct mlx5_ifc_general_obj_create_param_bits create;
+ struct mlx5_ifc_general_obj_query_param_bits query;
+ } op_param;
+};
+
+struct mlx5_ifc_general_obj_out_cmd_hdr_bits {
+ u8 status[0x8];
+ u8 reserved_at_8[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 obj_id[0x20];
+
+ u8 reserved_at_60[0x20];
+};
+
+struct mlx5_ifc_allow_other_vhca_access_in_bits {
+ u8 opcode[0x10];
+ u8 uid[0x10];
+ u8 reserved_at_20[0x10];
+ u8 op_mod[0x10];
+ u8 reserved_at_40[0x50];
+ u8 object_type_to_be_accessed[0x10];
+ u8 object_id_to_be_accessed[0x20];
+ u8 reserved_at_c0[0x40];
+ union {
+ u8 access_key_raw[0x100];
+ u8 access_key[8][0x20];
+ };
+};
+
+struct mlx5_ifc_allow_other_vhca_access_out_bits {
+ u8 status[0x8];
+ u8 reserved_at_8[0x18];
+ u8 syndrome[0x20];
+ u8 reserved_at_40[0x40];
+};
+
+struct mlx5_ifc_modify_header_arg_bits {
+ u8 reserved_at_0[0x80];
+
+ u8 reserved_at_80[0x8];
+ u8 access_pd[0x18];
+};
+
+struct mlx5_ifc_create_modify_header_arg_in_bits {
+ struct mlx5_ifc_general_obj_in_cmd_hdr_bits hdr;
+ struct mlx5_ifc_modify_header_arg_bits arg;
+};
+
+struct mlx5_ifc_create_match_definer_in_bits {
+ struct mlx5_ifc_general_obj_in_cmd_hdr_bits general_obj_in_cmd_hdr;
+
+ struct mlx5_ifc_match_definer_bits obj_context;
+};
+
+struct mlx5_ifc_create_match_definer_out_bits {
+ struct mlx5_ifc_general_obj_out_cmd_hdr_bits general_obj_out_cmd_hdr;
+};
+
+struct mlx5_ifc_alias_context_bits {
+ u8 vhca_id_to_be_accessed[0x10];
+ u8 reserved_at_10[0xd];
+ u8 status[0x3];
+ u8 object_id_to_be_accessed[0x20];
+ u8 reserved_at_40[0x40];
+ union {
+ u8 access_key_raw[0x100];
+ u8 access_key[8][0x20];
+ };
+ u8 metadata[0x80];
+};
+
+struct mlx5_ifc_create_alias_obj_in_bits {
+ struct mlx5_ifc_general_obj_in_cmd_hdr_bits hdr;
+ struct mlx5_ifc_alias_context_bits alias_ctx;
+};
+
enum {
MLX5_QUERY_FLOW_GROUP_OUT_MATCH_CRITERIA_ENABLE_OUTER_HEADERS = 0x0,
MLX5_QUERY_FLOW_GROUP_OUT_MATCH_CRITERIA_ENABLE_MISC_PARAMETERS = 0x1,
MLX5_QUERY_FLOW_GROUP_OUT_MATCH_CRITERIA_ENABLE_INNER_HEADERS = 0x2,
+ MLX5_QUERY_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_MISC_PARAMETERS_2 = 0x3,
+ MLX5_QUERY_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_MISC_PARAMETERS_3 = 0x4,
+ MLX5_QUERY_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_MISC_PARAMETERS_4 = 0x5,
+ MLX5_QUERY_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_MISC_PARAMETERS_5 = 0x6,
};
struct mlx5_ifc_query_flow_group_out_bits {
@@ -4442,7 +7002,7 @@ struct mlx5_ifc_query_flow_counter_out_bits {
u8 reserved_at_40[0x40];
- struct mlx5_ifc_traffic_counter_bits flow_statistics[0];
+ struct mlx5_ifc_traffic_counter_bits flow_statistics[];
};
struct mlx5_ifc_query_flow_counter_in_bits {
@@ -4458,8 +7018,7 @@ struct mlx5_ifc_query_flow_counter_in_bits {
u8 reserved_at_c1[0xf];
u8 num_of_counters[0x10];
- u8 reserved_at_e0[0x10];
- u8 flow_counter_id[0x10];
+ u8 flow_counter_id[0x20];
};
struct mlx5_ifc_query_esw_vport_context_out_bits {
@@ -4487,6 +7046,28 @@ struct mlx5_ifc_query_esw_vport_context_in_bits {
u8 reserved_at_60[0x20];
};
+struct mlx5_ifc_destroy_esw_vport_out_bits {
+ u8 status[0x8];
+ u8 reserved_at_8[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_at_40[0x20];
+};
+
+struct mlx5_ifc_destroy_esw_vport_in_bits {
+ u8 opcode[0x10];
+ u8 uid[0x10];
+
+ u8 reserved_at_20[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_at_40[0x10];
+ u8 vport_num[0x10];
+
+ u8 reserved_at_60[0x20];
+};
+
struct mlx5_ifc_modify_esw_vport_context_out_bits {
u8 status[0x8];
u8 reserved_at_8[0x18];
@@ -4497,7 +7078,8 @@ struct mlx5_ifc_modify_esw_vport_context_out_bits {
};
struct mlx5_ifc_esw_vport_context_fields_select_bits {
- u8 reserved_at_0[0x1c];
+ u8 reserved_at_0[0x1b];
+ u8 fdb_to_vport_reg_c_id[0x1];
u8 vport_cvlan_insert[0x1];
u8 vport_svlan_insert[0x1];
u8 vport_cvlan_strip[0x1];
@@ -4536,7 +7118,7 @@ struct mlx5_ifc_query_eq_out_bits {
u8 reserved_at_300[0x580];
- u8 pas[0][0x40];
+ u8 pas[][0x40];
};
struct mlx5_ifc_query_eq_in_bits {
@@ -4552,19 +7134,21 @@ struct mlx5_ifc_query_eq_in_bits {
u8 reserved_at_60[0x20];
};
-struct mlx5_ifc_encap_header_in_bits {
- u8 reserved_at_0[0x5];
- u8 header_type[0x3];
- u8 reserved_at_8[0xe];
- u8 encap_header_size[0xa];
+struct mlx5_ifc_packet_reformat_context_in_bits {
+ u8 reformat_type[0x8];
+ u8 reserved_at_8[0x4];
+ u8 reformat_param_0[0x4];
+ u8 reserved_at_10[0x6];
+ u8 reformat_data_size[0xa];
- u8 reserved_at_20[0x10];
- u8 encap_header[2][0x8];
+ u8 reformat_param_1[0x8];
+ u8 reserved_at_28[0x8];
+ u8 reformat_data[2][0x8];
- u8 more_encap_header[0][0x8];
+ u8 more_reformat_data[][0x8];
};
-struct mlx5_ifc_query_encap_header_out_bits {
+struct mlx5_ifc_query_packet_reformat_context_out_bits {
u8 status[0x8];
u8 reserved_at_8[0x18];
@@ -4572,33 +7156,62 @@ struct mlx5_ifc_query_encap_header_out_bits {
u8 reserved_at_40[0xa0];
- struct mlx5_ifc_encap_header_in_bits encap_header[0];
+ struct mlx5_ifc_packet_reformat_context_in_bits packet_reformat_context[];
};
-struct mlx5_ifc_query_encap_header_in_bits {
+struct mlx5_ifc_query_packet_reformat_context_in_bits {
u8 opcode[0x10];
u8 reserved_at_10[0x10];
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
- u8 encap_id[0x20];
+ u8 packet_reformat_id[0x20];
u8 reserved_at_60[0xa0];
};
-struct mlx5_ifc_alloc_encap_header_out_bits {
+struct mlx5_ifc_alloc_packet_reformat_context_out_bits {
u8 status[0x8];
u8 reserved_at_8[0x18];
u8 syndrome[0x20];
- u8 encap_id[0x20];
+ u8 packet_reformat_id[0x20];
u8 reserved_at_60[0x20];
};
-struct mlx5_ifc_alloc_encap_header_in_bits {
+enum {
+ MLX5_REFORMAT_CONTEXT_ANCHOR_MAC_START = 0x1,
+ MLX5_REFORMAT_CONTEXT_ANCHOR_VLAN_START = 0x2,
+ MLX5_REFORMAT_CONTEXT_ANCHOR_IP_START = 0x7,
+ MLX5_REFORMAT_CONTEXT_ANCHOR_TCP_UDP_START = 0x9,
+};
+
+enum mlx5_reformat_ctx_type {
+ MLX5_REFORMAT_TYPE_L2_TO_VXLAN = 0x0,
+ MLX5_REFORMAT_TYPE_L2_TO_NVGRE = 0x1,
+ MLX5_REFORMAT_TYPE_L2_TO_L2_TUNNEL = 0x2,
+ MLX5_REFORMAT_TYPE_L3_TUNNEL_TO_L2 = 0x3,
+ MLX5_REFORMAT_TYPE_L2_TO_L3_TUNNEL = 0x4,
+ MLX5_REFORMAT_TYPE_ADD_ESP_TRANSPORT_OVER_IPV4 = 0x5,
+ MLX5_REFORMAT_TYPE_L2_TO_L3_ESP_TUNNEL = 0x6,
+ MLX5_REFORMAT_TYPE_ADD_ESP_TRANSPORT_OVER_UDPV4 = 0x7,
+ MLX5_REFORMAT_TYPE_DEL_ESP_TRANSPORT = 0x8,
+ MLX5_REFORMAT_TYPE_L3_ESP_TUNNEL_TO_L2 = 0x9,
+ MLX5_REFORMAT_TYPE_DEL_ESP_TRANSPORT_OVER_UDP = 0xa,
+ MLX5_REFORMAT_TYPE_ADD_ESP_TRANSPORT_OVER_IPV6 = 0xb,
+ MLX5_REFORMAT_TYPE_ADD_ESP_TRANSPORT_OVER_UDPV6 = 0xc,
+ MLX5_REFORMAT_TYPE_ADD_PSP_TUNNEL = 0xd,
+ MLX5_REFORMAT_TYPE_DEL_PSP_TUNNEL = 0xe,
+ MLX5_REFORMAT_TYPE_INSERT_HDR = 0xf,
+ MLX5_REFORMAT_TYPE_REMOVE_HDR = 0x10,
+ MLX5_REFORMAT_TYPE_ADD_MACSEC = 0x11,
+ MLX5_REFORMAT_TYPE_DEL_MACSEC = 0x12,
+};
+
+struct mlx5_ifc_alloc_packet_reformat_context_in_bits {
u8 opcode[0x10];
u8 reserved_at_10[0x10];
@@ -4607,10 +7220,10 @@ struct mlx5_ifc_alloc_encap_header_in_bits {
u8 reserved_at_40[0xa0];
- struct mlx5_ifc_encap_header_in_bits encap_header;
+ struct mlx5_ifc_packet_reformat_context_in_bits packet_reformat_context;
};
-struct mlx5_ifc_dealloc_encap_header_out_bits {
+struct mlx5_ifc_dealloc_packet_reformat_context_out_bits {
u8 status[0x8];
u8 reserved_at_8[0x18];
@@ -4619,14 +7232,14 @@ struct mlx5_ifc_dealloc_encap_header_out_bits {
u8 reserved_at_40[0x40];
};
-struct mlx5_ifc_dealloc_encap_header_in_bits {
+struct mlx5_ifc_dealloc_packet_reformat_context_in_bits {
u8 opcode[0x10];
u8 reserved_at_10[0x10];
u8 reserved_20[0x10];
u8 op_mod[0x10];
- u8 encap_id[0x20];
+ u8 packet_reformat_id[0x20];
u8 reserved_60[0x20];
};
@@ -4650,15 +7263,32 @@ struct mlx5_ifc_add_action_in_bits {
u8 data[0x20];
};
-union mlx5_ifc_set_action_in_add_action_in_auto_bits {
- struct mlx5_ifc_set_action_in_bits set_action_in;
- struct mlx5_ifc_add_action_in_bits add_action_in;
+struct mlx5_ifc_copy_action_in_bits {
+ u8 action_type[0x4];
+ u8 src_field[0xc];
+ u8 reserved_at_10[0x3];
+ u8 src_offset[0x5];
+ u8 reserved_at_18[0x3];
+ u8 length[0x5];
+
+ u8 reserved_at_20[0x4];
+ u8 dst_field[0xc];
+ u8 reserved_at_30[0x3];
+ u8 dst_offset[0x5];
+ u8 reserved_at_38[0x8];
+};
+
+union mlx5_ifc_set_add_copy_action_in_auto_bits {
+ struct mlx5_ifc_set_action_in_bits set_action_in;
+ struct mlx5_ifc_add_action_in_bits add_action_in;
+ struct mlx5_ifc_copy_action_in_bits copy_action_in;
u8 reserved_at_0[0x40];
};
enum {
MLX5_ACTION_TYPE_SET = 0x1,
MLX5_ACTION_TYPE_ADD = 0x2,
+ MLX5_ACTION_TYPE_COPY = 0x3,
};
enum {
@@ -4684,7 +7314,24 @@ enum {
MLX5_ACTION_IN_FIELD_OUT_DIPV6_31_0 = 0x14,
MLX5_ACTION_IN_FIELD_OUT_SIPV4 = 0x15,
MLX5_ACTION_IN_FIELD_OUT_DIPV4 = 0x16,
+ MLX5_ACTION_IN_FIELD_OUT_FIRST_VID = 0x17,
MLX5_ACTION_IN_FIELD_OUT_IPV6_HOPLIMIT = 0x47,
+ MLX5_ACTION_IN_FIELD_METADATA_REG_A = 0x49,
+ MLX5_ACTION_IN_FIELD_METADATA_REG_B = 0x50,
+ MLX5_ACTION_IN_FIELD_METADATA_REG_C_0 = 0x51,
+ MLX5_ACTION_IN_FIELD_METADATA_REG_C_1 = 0x52,
+ MLX5_ACTION_IN_FIELD_METADATA_REG_C_2 = 0x53,
+ MLX5_ACTION_IN_FIELD_METADATA_REG_C_3 = 0x54,
+ MLX5_ACTION_IN_FIELD_METADATA_REG_C_4 = 0x55,
+ MLX5_ACTION_IN_FIELD_METADATA_REG_C_5 = 0x56,
+ MLX5_ACTION_IN_FIELD_METADATA_REG_C_6 = 0x57,
+ MLX5_ACTION_IN_FIELD_METADATA_REG_C_7 = 0x58,
+ MLX5_ACTION_IN_FIELD_OUT_TCP_SEQ_NUM = 0x59,
+ MLX5_ACTION_IN_FIELD_OUT_TCP_ACK_NUM = 0x5B,
+ MLX5_ACTION_IN_FIELD_IPSEC_SYNDROME = 0x5D,
+ MLX5_ACTION_IN_FIELD_OUT_EMD_47_32 = 0x6F,
+ MLX5_ACTION_IN_FIELD_OUT_EMD_31_0 = 0x70,
+ MLX5_ACTION_IN_FIELD_PSP_SYNDROME = 0x71,
};
struct mlx5_ifc_alloc_modify_header_context_out_bits {
@@ -4711,7 +7358,7 @@ struct mlx5_ifc_alloc_modify_header_context_in_bits {
u8 reserved_at_68[0x10];
u8 num_of_actions[0x8];
- union mlx5_ifc_set_action_in_add_action_in_auto_bits actions[0];
+ union mlx5_ifc_set_add_copy_action_in_auto_bits actions[];
};
struct mlx5_ifc_dealloc_modify_header_context_out_bits {
@@ -4735,6 +7382,18 @@ struct mlx5_ifc_dealloc_modify_header_context_in_bits {
u8 reserved_at_60[0x20];
};
+struct mlx5_ifc_query_modify_header_context_in_bits {
+ u8 opcode[0x10];
+ u8 uid[0x10];
+
+ u8 reserved_at_20[0x10];
+ u8 op_mod[0x10];
+
+ u8 modify_header_id[0x20];
+
+ u8 reserved_at_60[0xa0];
+};
+
struct mlx5_ifc_query_dct_out_bits {
u8 status[0x8];
u8 reserved_at_8[0x18];
@@ -4773,7 +7432,7 @@ struct mlx5_ifc_query_cq_out_bits {
u8 reserved_at_280[0x600];
- u8 pas[0][0x40];
+ u8 pas[][0x40];
};
struct mlx5_ifc_query_cq_in_bits {
@@ -4913,6 +7572,85 @@ struct mlx5_ifc_query_adapter_in_bits {
u8 reserved_at_40[0x40];
};
+struct mlx5_ifc_function_vhca_rid_info_reg_bits {
+ u8 host_number[0x8];
+ u8 host_pci_device_function[0x8];
+ u8 host_pci_bus[0x8];
+ u8 reserved_at_18[0x3];
+ u8 pci_bus_assigned[0x1];
+ u8 function_type[0x4];
+
+ u8 parent_pci_device_function[0x8];
+ u8 parent_pci_bus[0x8];
+ u8 vhca_id[0x10];
+
+ u8 reserved_at_40[0x10];
+ u8 function_id[0x10];
+
+ u8 reserved_at_60[0x20];
+};
+
+struct mlx5_ifc_delegated_function_vhca_rid_info_bits {
+ struct mlx5_ifc_function_vhca_rid_info_reg_bits function_vhca_rid_info;
+
+ u8 reserved_at_80[0x18];
+ u8 manage_profile[0x8];
+
+ u8 reserved_at_a0[0x60];
+};
+
+struct mlx5_ifc_query_delegated_vhca_out_bits {
+ u8 status[0x8];
+ u8 reserved_at_8[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_at_40[0x20];
+
+ u8 reserved_at_60[0x10];
+ u8 functions_count[0x10];
+
+ u8 reserved_at_80[0x80];
+
+ struct mlx5_ifc_delegated_function_vhca_rid_info_bits
+ delegated_function_vhca_rid_info[];
+};
+
+struct mlx5_ifc_query_delegated_vhca_in_bits {
+ u8 opcode[0x10];
+ u8 uid[0x10];
+
+ u8 reserved_at_20[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_at_40[0x40];
+};
+
+struct mlx5_ifc_create_esw_vport_out_bits {
+ u8 status[0x8];
+ u8 reserved_at_8[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_at_40[0x20];
+
+ u8 reserved_at_60[0x10];
+ u8 vport_num[0x10];
+};
+
+struct mlx5_ifc_create_esw_vport_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_at_10[0x10];
+
+ u8 reserved_at_20[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_at_40[0x10];
+ u8 managed_vhca_id[0x10];
+
+ u8 reserved_at_60[0x20];
+};
+
struct mlx5_ifc_qp_2rst_out_bits {
u8 status[0x8];
u8 reserved_at_8[0x18];
@@ -4924,7 +7662,7 @@ struct mlx5_ifc_qp_2rst_out_bits {
struct mlx5_ifc_qp_2rst_in_bits {
u8 opcode[0x10];
- u8 reserved_at_10[0x10];
+ u8 uid[0x10];
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
@@ -4946,7 +7684,7 @@ struct mlx5_ifc_qp_2err_out_bits {
struct mlx5_ifc_qp_2err_in_bits {
u8 opcode[0x10];
- u8 reserved_at_10[0x10];
+ u8 uid[0x10];
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
@@ -4957,6 +7695,30 @@ struct mlx5_ifc_qp_2err_in_bits {
u8 reserved_at_60[0x20];
};
+struct mlx5_ifc_trans_page_fault_info_bits {
+ u8 error[0x1];
+ u8 reserved_at_1[0x4];
+ u8 page_fault_type[0x3];
+ u8 wq_number[0x18];
+
+ u8 reserved_at_20[0x8];
+ u8 fault_token[0x18];
+};
+
+struct mlx5_ifc_mem_page_fault_info_bits {
+ u8 error[0x1];
+ u8 reserved_at_1[0xf];
+ u8 fault_token_47_32[0x10];
+
+ u8 fault_token_31_0[0x20];
+};
+
+union mlx5_ifc_page_fault_resume_in_page_fault_info_auto_bits {
+ struct mlx5_ifc_trans_page_fault_info_bits trans_page_fault_info;
+ struct mlx5_ifc_mem_page_fault_info_bits mem_page_fault_info;
+ u8 reserved_at_0[0x40];
+};
+
struct mlx5_ifc_page_fault_resume_out_bits {
u8 status[0x8];
u8 reserved_at_8[0x18];
@@ -4973,13 +7735,8 @@ struct mlx5_ifc_page_fault_resume_in_bits {
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
- u8 error[0x1];
- u8 reserved_at_41[0x4];
- u8 page_fault_type[0x3];
- u8 wq_number[0x18];
-
- u8 reserved_at_60[0x8];
- u8 token[0x18];
+ union mlx5_ifc_page_fault_resume_in_page_fault_info_auto_bits
+ page_fault_info;
};
struct mlx5_ifc_nop_out_bits {
@@ -5021,7 +7778,12 @@ struct mlx5_ifc_modify_vport_state_in_bits {
u8 reserved_at_41[0xf];
u8 vport_number[0x10];
- u8 reserved_at_60[0x18];
+ u8 reserved_at_60[0x10];
+ u8 ingress_connect[0x1];
+ u8 egress_connect[0x1];
+ u8 ingress_connect_valid[0x1];
+ u8 egress_connect_valid[0x1];
+ u8 reserved_at_74[0x4];
u8 admin_state[0x4];
u8 reserved_at_7c[0x4];
};
@@ -5046,7 +7808,7 @@ struct mlx5_ifc_modify_tis_bitmask_bits {
struct mlx5_ifc_modify_tis_in_bits {
u8 opcode[0x10];
- u8 reserved_at_10[0x10];
+ u8 uid[0x10];
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
@@ -5071,7 +7833,7 @@ struct mlx5_ifc_modify_tir_bitmask_bits {
u8 reserved_at_3c[0x1];
u8 hash[0x1];
u8 reserved_at_3e[0x1];
- u8 lro[0x1];
+ u8 packet_merge[0x1];
};
struct mlx5_ifc_modify_tir_out_bits {
@@ -5085,7 +7847,7 @@ struct mlx5_ifc_modify_tir_out_bits {
struct mlx5_ifc_modify_tir_in_bits {
u8 opcode[0x10];
- u8 reserved_at_10[0x10];
+ u8 uid[0x10];
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
@@ -5113,7 +7875,7 @@ struct mlx5_ifc_modify_sq_out_bits {
struct mlx5_ifc_modify_sq_in_bits {
u8 opcode[0x10];
- u8 reserved_at_10[0x10];
+ u8 uid[0x10];
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
@@ -5186,7 +7948,7 @@ struct mlx5_ifc_rqt_bitmask_bits {
struct mlx5_ifc_modify_rqt_in_bits {
u8 opcode[0x10];
- u8 reserved_at_10[0x10];
+ u8 uid[0x10];
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
@@ -5220,7 +7982,7 @@ enum {
struct mlx5_ifc_modify_rq_in_bits {
u8 opcode[0x10];
- u8 reserved_at_10[0x10];
+ u8 uid[0x10];
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
@@ -5256,7 +8018,7 @@ struct mlx5_ifc_rmp_bitmask_bits {
struct mlx5_ifc_modify_rmp_in_bits {
u8 opcode[0x10];
- u8 reserved_at_10[0x10];
+ u8 uid[0x10];
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
@@ -5284,7 +8046,9 @@ struct mlx5_ifc_modify_nic_vport_context_out_bits {
};
struct mlx5_ifc_modify_nic_vport_field_select_bits {
- u8 reserved_at_0[0x14];
+ u8 reserved_at_0[0x12];
+ u8 affiliation[0x1];
+ u8 reserved_at_13[0x1];
u8 disable_uc_local_lb[0x1];
u8 disable_mc_local_lb[0x1];
u8 node_guid[0x1];
@@ -5359,7 +8123,7 @@ enum {
struct mlx5_ifc_modify_cq_in_bits {
u8 opcode[0x10];
- u8 reserved_at_10[0x10];
+ u8 uid[0x10];
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
@@ -5371,9 +8135,14 @@ struct mlx5_ifc_modify_cq_in_bits {
struct mlx5_ifc_cqc_bits cq_context;
- u8 reserved_at_280[0x600];
+ u8 reserved_at_280[0x60];
- u8 pas[0][0x40];
+ u8 cq_umem_valid[0x1];
+ u8 reserved_at_2e1[0x1f];
+
+ u8 reserved_at_300[0x580];
+
+ u8 pas[][0x40];
};
struct mlx5_ifc_modify_cong_status_out_bits {
@@ -5437,7 +8206,7 @@ struct mlx5_ifc_manage_pages_out_bits {
u8 reserved_at_60[0x20];
- u8 pas[0][0x40];
+ u8 pas[][0x40];
};
enum {
@@ -5453,12 +8222,13 @@ struct mlx5_ifc_manage_pages_in_bits {
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
- u8 reserved_at_40[0x10];
+ u8 embedded_cpu_function[0x1];
+ u8 reserved_at_41[0xf];
u8 function_id[0x10];
u8 input_num_entries[0x20];
- u8 pas[0][0x40];
+ u8 pas[][0x40];
};
struct mlx5_ifc_mad_ifc_out_bits {
@@ -5480,7 +8250,7 @@ struct mlx5_ifc_mad_ifc_in_bits {
u8 op_mod[0x10];
u8 remote_lid[0x10];
- u8 reserved_at_50[0x8];
+ u8 plane_index[0x8];
u8 port[0x8];
u8 reserved_at_60[0x20];
@@ -5504,7 +8274,13 @@ struct mlx5_ifc_init_hca_in_bits {
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
- u8 reserved_at_40[0x40];
+ u8 reserved_at_40[0x20];
+
+ u8 reserved_at_60[0x2];
+ u8 sw_vhca_id[0xe];
+ u8 reserved_at_70[0x10];
+
+ u8 sw_owner_id[4][0x20];
};
struct mlx5_ifc_init2rtr_qp_out_bits {
@@ -5513,12 +8289,13 @@ struct mlx5_ifc_init2rtr_qp_out_bits {
u8 syndrome[0x20];
- u8 reserved_at_40[0x40];
+ u8 reserved_at_40[0x20];
+ u8 ece[0x20];
};
struct mlx5_ifc_init2rtr_qp_in_bits {
u8 opcode[0x10];
- u8 reserved_at_10[0x10];
+ u8 uid[0x10];
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
@@ -5530,7 +8307,7 @@ struct mlx5_ifc_init2rtr_qp_in_bits {
u8 opt_param_mask[0x20];
- u8 reserved_at_a0[0x20];
+ u8 ece[0x20];
struct mlx5_ifc_qpc_bits qpc;
@@ -5543,12 +8320,13 @@ struct mlx5_ifc_init2init_qp_out_bits {
u8 syndrome[0x20];
- u8 reserved_at_40[0x40];
+ u8 reserved_at_40[0x20];
+ u8 ece[0x20];
};
struct mlx5_ifc_init2init_qp_in_bits {
u8 opcode[0x10];
- u8 reserved_at_10[0x10];
+ u8 uid[0x10];
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
@@ -5560,7 +8338,7 @@ struct mlx5_ifc_init2init_qp_in_bits {
u8 opt_param_mask[0x20];
- u8 reserved_at_a0[0x20];
+ u8 ece[0x20];
struct mlx5_ifc_qpc_bits qpc;
@@ -5630,7 +8408,8 @@ struct mlx5_ifc_enable_hca_in_bits {
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
- u8 reserved_at_40[0x10];
+ u8 embedded_cpu_function[0x1];
+ u8 reserved_at_41[0xf];
u8 function_id[0x10];
u8 reserved_at_60[0x20];
@@ -5647,7 +8426,7 @@ struct mlx5_ifc_drain_dct_out_bits {
struct mlx5_ifc_drain_dct_in_bits {
u8 opcode[0x10];
- u8 reserved_at_10[0x10];
+ u8 uid[0x10];
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
@@ -5674,7 +8453,8 @@ struct mlx5_ifc_disable_hca_in_bits {
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
- u8 reserved_at_40[0x10];
+ u8 embedded_cpu_function[0x1];
+ u8 reserved_at_41[0xf];
u8 function_id[0x10];
u8 reserved_at_60[0x20];
@@ -5691,7 +8471,7 @@ struct mlx5_ifc_detach_from_mcg_out_bits {
struct mlx5_ifc_detach_from_mcg_in_bits {
u8 opcode[0x10];
- u8 reserved_at_10[0x10];
+ u8 uid[0x10];
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
@@ -5715,7 +8495,7 @@ struct mlx5_ifc_destroy_xrq_out_bits {
struct mlx5_ifc_destroy_xrq_in_bits {
u8 opcode[0x10];
- u8 reserved_at_10[0x10];
+ u8 uid[0x10];
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
@@ -5737,7 +8517,7 @@ struct mlx5_ifc_destroy_xrc_srq_out_bits {
struct mlx5_ifc_destroy_xrc_srq_in_bits {
u8 opcode[0x10];
- u8 reserved_at_10[0x10];
+ u8 uid[0x10];
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
@@ -5759,7 +8539,7 @@ struct mlx5_ifc_destroy_tis_out_bits {
struct mlx5_ifc_destroy_tis_in_bits {
u8 opcode[0x10];
- u8 reserved_at_10[0x10];
+ u8 uid[0x10];
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
@@ -5781,7 +8561,7 @@ struct mlx5_ifc_destroy_tir_out_bits {
struct mlx5_ifc_destroy_tir_in_bits {
u8 opcode[0x10];
- u8 reserved_at_10[0x10];
+ u8 uid[0x10];
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
@@ -5803,7 +8583,7 @@ struct mlx5_ifc_destroy_srq_out_bits {
struct mlx5_ifc_destroy_srq_in_bits {
u8 opcode[0x10];
- u8 reserved_at_10[0x10];
+ u8 uid[0x10];
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
@@ -5825,7 +8605,7 @@ struct mlx5_ifc_destroy_sq_out_bits {
struct mlx5_ifc_destroy_sq_in_bits {
u8 opcode[0x10];
- u8 reserved_at_10[0x10];
+ u8 uid[0x10];
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
@@ -5871,7 +8651,7 @@ struct mlx5_ifc_destroy_rqt_out_bits {
struct mlx5_ifc_destroy_rqt_in_bits {
u8 opcode[0x10];
- u8 reserved_at_10[0x10];
+ u8 uid[0x10];
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
@@ -5893,7 +8673,7 @@ struct mlx5_ifc_destroy_rq_out_bits {
struct mlx5_ifc_destroy_rq_in_bits {
u8 opcode[0x10];
- u8 reserved_at_10[0x10];
+ u8 uid[0x10];
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
@@ -5937,7 +8717,7 @@ struct mlx5_ifc_destroy_rmp_out_bits {
struct mlx5_ifc_destroy_rmp_in_bits {
u8 opcode[0x10];
- u8 reserved_at_10[0x10];
+ u8 uid[0x10];
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
@@ -5959,7 +8739,7 @@ struct mlx5_ifc_destroy_qp_out_bits {
struct mlx5_ifc_destroy_qp_in_bits {
u8 opcode[0x10];
- u8 reserved_at_10[0x10];
+ u8 uid[0x10];
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
@@ -6003,7 +8783,7 @@ struct mlx5_ifc_destroy_mkey_out_bits {
struct mlx5_ifc_destroy_mkey_in_bits {
u8 opcode[0x10];
- u8 reserved_at_10[0x10];
+ u8 uid[0x10];
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
@@ -6031,13 +8811,15 @@ struct mlx5_ifc_destroy_flow_table_in_bits {
u8 op_mod[0x10];
u8 other_vport[0x1];
- u8 reserved_at_41[0xf];
+ u8 other_eswitch[0x1];
+ u8 reserved_at_42[0xe];
u8 vport_number[0x10];
u8 reserved_at_60[0x20];
u8 table_type[0x8];
- u8 reserved_at_88[0x18];
+ u8 reserved_at_88[0x8];
+ u8 eswitch_owner_vhca_id[0x10];
u8 reserved_at_a0[0x8];
u8 table_id[0x18];
@@ -6062,13 +8844,15 @@ struct mlx5_ifc_destroy_flow_group_in_bits {
u8 op_mod[0x10];
u8 other_vport[0x1];
- u8 reserved_at_41[0xf];
+ u8 other_eswitch[0x1];
+ u8 reserved_at_42[0xe];
u8 vport_number[0x10];
u8 reserved_at_60[0x20];
u8 table_type[0x8];
- u8 reserved_at_88[0x18];
+ u8 reserved_at_88[0x8];
+ u8 eswitch_owner_vhca_id[0x10];
u8 reserved_at_a0[0x8];
u8 table_id[0x18];
@@ -6111,7 +8895,7 @@ struct mlx5_ifc_destroy_dct_out_bits {
struct mlx5_ifc_destroy_dct_in_bits {
u8 opcode[0x10];
- u8 reserved_at_10[0x10];
+ u8 uid[0x10];
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
@@ -6133,7 +8917,7 @@ struct mlx5_ifc_destroy_cq_out_bits {
struct mlx5_ifc_destroy_cq_in_bits {
u8 opcode[0x10];
- u8 reserved_at_10[0x10];
+ u8 uid[0x10];
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
@@ -6207,13 +8991,15 @@ struct mlx5_ifc_delete_fte_in_bits {
u8 op_mod[0x10];
u8 other_vport[0x1];
- u8 reserved_at_41[0xf];
+ u8 other_eswitch[0x1];
+ u8 reserved_at_42[0xe];
u8 vport_number[0x10];
u8 reserved_at_60[0x20];
u8 table_type[0x8];
- u8 reserved_at_88[0x18];
+ u8 reserved_at_88[0x8];
+ u8 eswitch_owner_vhca_id[0x10];
u8 reserved_at_a0[0x8];
u8 table_id[0x18];
@@ -6236,7 +9022,7 @@ struct mlx5_ifc_dealloc_xrcd_out_bits {
struct mlx5_ifc_dealloc_xrcd_in_bits {
u8 opcode[0x10];
- u8 reserved_at_10[0x10];
+ u8 uid[0x10];
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
@@ -6258,7 +9044,7 @@ struct mlx5_ifc_dealloc_uar_out_bits {
struct mlx5_ifc_dealloc_uar_in_bits {
u8 opcode[0x10];
- u8 reserved_at_10[0x10];
+ u8 uid[0x10];
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
@@ -6280,7 +9066,7 @@ struct mlx5_ifc_dealloc_transport_domain_out_bits {
struct mlx5_ifc_dealloc_transport_domain_in_bits {
u8 opcode[0x10];
- u8 reserved_at_10[0x10];
+ u8 uid[0x10];
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
@@ -6324,7 +9110,7 @@ struct mlx5_ifc_dealloc_pd_out_bits {
struct mlx5_ifc_dealloc_pd_in_bits {
u8 opcode[0x10];
- u8 reserved_at_10[0x10];
+ u8 uid[0x10];
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
@@ -6351,8 +9137,7 @@ struct mlx5_ifc_dealloc_flow_counter_in_bits {
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
- u8 reserved_at_40[0x10];
- u8 flow_counter_id[0x10];
+ u8 flow_counter_id[0x20];
u8 reserved_at_60[0x20];
};
@@ -6371,7 +9156,7 @@ struct mlx5_ifc_create_xrq_out_bits {
struct mlx5_ifc_create_xrq_in_bits {
u8 opcode[0x10];
- u8 reserved_at_10[0x10];
+ u8 uid[0x10];
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
@@ -6395,7 +9180,7 @@ struct mlx5_ifc_create_xrc_srq_out_bits {
struct mlx5_ifc_create_xrc_srq_in_bits {
u8 opcode[0x10];
- u8 reserved_at_10[0x10];
+ u8 uid[0x10];
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
@@ -6404,9 +9189,14 @@ struct mlx5_ifc_create_xrc_srq_in_bits {
struct mlx5_ifc_xrc_srqc_bits xrc_srq_context_entry;
- u8 reserved_at_280[0x600];
+ u8 reserved_at_280[0x60];
- u8 pas[0][0x40];
+ u8 xrc_srq_umem_valid[0x1];
+ u8 reserved_at_2e1[0x1f];
+
+ u8 reserved_at_300[0x580];
+
+ u8 pas[][0x40];
};
struct mlx5_ifc_create_tis_out_bits {
@@ -6423,7 +9213,7 @@ struct mlx5_ifc_create_tis_out_bits {
struct mlx5_ifc_create_tis_in_bits {
u8 opcode[0x10];
- u8 reserved_at_10[0x10];
+ u8 uid[0x10];
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
@@ -6435,19 +9225,19 @@ struct mlx5_ifc_create_tis_in_bits {
struct mlx5_ifc_create_tir_out_bits {
u8 status[0x8];
- u8 reserved_at_8[0x18];
+ u8 icm_address_63_40[0x18];
u8 syndrome[0x20];
- u8 reserved_at_40[0x8];
+ u8 icm_address_39_32[0x8];
u8 tirn[0x18];
- u8 reserved_at_60[0x20];
+ u8 icm_address_31_0[0x20];
};
struct mlx5_ifc_create_tir_in_bits {
u8 opcode[0x10];
- u8 reserved_at_10[0x10];
+ u8 uid[0x10];
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
@@ -6471,7 +9261,7 @@ struct mlx5_ifc_create_srq_out_bits {
struct mlx5_ifc_create_srq_in_bits {
u8 opcode[0x10];
- u8 reserved_at_10[0x10];
+ u8 uid[0x10];
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
@@ -6482,7 +9272,7 @@ struct mlx5_ifc_create_srq_in_bits {
u8 reserved_at_280[0x600];
- u8 pas[0][0x40];
+ u8 pas[][0x40];
};
struct mlx5_ifc_create_sq_out_bits {
@@ -6499,7 +9289,7 @@ struct mlx5_ifc_create_sq_out_bits {
struct mlx5_ifc_create_sq_in_bits {
u8 opcode[0x10];
- u8 reserved_at_10[0x10];
+ u8 uid[0x10];
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
@@ -6553,7 +9343,7 @@ struct mlx5_ifc_create_rqt_out_bits {
struct mlx5_ifc_create_rqt_in_bits {
u8 opcode[0x10];
- u8 reserved_at_10[0x10];
+ u8 uid[0x10];
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
@@ -6577,7 +9367,7 @@ struct mlx5_ifc_create_rq_out_bits {
struct mlx5_ifc_create_rq_in_bits {
u8 opcode[0x10];
- u8 reserved_at_10[0x10];
+ u8 uid[0x10];
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
@@ -6601,7 +9391,7 @@ struct mlx5_ifc_create_rmp_out_bits {
struct mlx5_ifc_create_rmp_in_bits {
u8 opcode[0x10];
- u8 reserved_at_10[0x10];
+ u8 uid[0x10];
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
@@ -6620,27 +9410,35 @@ struct mlx5_ifc_create_qp_out_bits {
u8 reserved_at_40[0x8];
u8 qpn[0x18];
- u8 reserved_at_60[0x20];
+ u8 ece[0x20];
};
struct mlx5_ifc_create_qp_in_bits {
u8 opcode[0x10];
- u8 reserved_at_10[0x10];
+ u8 uid[0x10];
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
- u8 reserved_at_40[0x40];
+ u8 qpc_ext[0x1];
+ u8 reserved_at_41[0x7];
+ u8 input_qpn[0x18];
+ u8 reserved_at_60[0x20];
u8 opt_param_mask[0x20];
- u8 reserved_at_a0[0x20];
+ u8 ece[0x20];
struct mlx5_ifc_qpc_bits qpc;
- u8 reserved_at_800[0x80];
+ u8 wq_umem_offset[0x40];
- u8 pas[0][0x40];
+ u8 wq_umem_id[0x20];
+
+ u8 wq_umem_valid[0x1];
+ u8 reserved_at_861[0x1f];
+
+ u8 pas[][0x40];
};
struct mlx5_ifc_create_psv_out_bits {
@@ -6692,7 +9490,7 @@ struct mlx5_ifc_create_mkey_out_bits {
struct mlx5_ifc_create_mkey_in_bits {
u8 opcode[0x10];
- u8 reserved_at_10[0x10];
+ u8 uid[0x10];
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
@@ -6700,7 +9498,9 @@ struct mlx5_ifc_create_mkey_in_bits {
u8 reserved_at_40[0x20];
u8 pg_access[0x1];
- u8 reserved_at_61[0x1f];
+ u8 mkey_umem_valid[0x1];
+ u8 data_direct[0x1];
+ u8 reserved_at_63[0x1d];
struct mlx5_ifc_mkc_bits memory_key_mkey_entry;
@@ -6710,54 +9510,48 @@ struct mlx5_ifc_create_mkey_in_bits {
u8 reserved_at_320[0x560];
- u8 klm_pas_mtt[0][0x20];
+ u8 klm_pas_mtt[][0x20];
+};
+
+enum {
+ MLX5_FLOW_TABLE_TYPE_NIC_RX = 0x0,
+ MLX5_FLOW_TABLE_TYPE_NIC_TX = 0x1,
+ MLX5_FLOW_TABLE_TYPE_ESW_EGRESS_ACL = 0x2,
+ MLX5_FLOW_TABLE_TYPE_ESW_INGRESS_ACL = 0x3,
+ MLX5_FLOW_TABLE_TYPE_FDB = 0X4,
+ MLX5_FLOW_TABLE_TYPE_SNIFFER_RX = 0X5,
+ MLX5_FLOW_TABLE_TYPE_SNIFFER_TX = 0X6,
};
struct mlx5_ifc_create_flow_table_out_bits {
u8 status[0x8];
- u8 reserved_at_8[0x18];
+ u8 icm_address_63_40[0x18];
u8 syndrome[0x20];
- u8 reserved_at_40[0x8];
+ u8 icm_address_39_32[0x8];
u8 table_id[0x18];
- u8 reserved_at_60[0x20];
-};
-
-struct mlx5_ifc_flow_table_context_bits {
- u8 encap_en[0x1];
- u8 decap_en[0x1];
- u8 reserved_at_2[0x2];
- u8 table_miss_action[0x4];
- u8 level[0x8];
- u8 reserved_at_10[0x8];
- u8 log_size[0x8];
-
- u8 reserved_at_20[0x8];
- u8 table_miss_id[0x18];
-
- u8 reserved_at_40[0x8];
- u8 lag_master_next_table_id[0x18];
-
- u8 reserved_at_60[0xe0];
+ u8 icm_address_31_0[0x20];
};
struct mlx5_ifc_create_flow_table_in_bits {
u8 opcode[0x10];
- u8 reserved_at_10[0x10];
+ u8 uid[0x10];
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
u8 other_vport[0x1];
- u8 reserved_at_41[0xf];
+ u8 other_eswitch[0x1];
+ u8 reserved_at_42[0xe];
u8 vport_number[0x10];
u8 reserved_at_60[0x20];
u8 table_type[0x8];
- u8 reserved_at_88[0x18];
+ u8 reserved_at_88[0x8];
+ u8 eswitch_owner_vhca_id[0x10];
u8 reserved_at_a0[0x20];
@@ -6777,9 +9571,15 @@ struct mlx5_ifc_create_flow_group_out_bits {
};
enum {
- MLX5_CREATE_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_OUTER_HEADERS = 0x0,
- MLX5_CREATE_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_MISC_PARAMETERS = 0x1,
- MLX5_CREATE_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_INNER_HEADERS = 0x2,
+ MLX5_CREATE_FLOW_GROUP_IN_GROUP_TYPE_TCAM_SUBTABLE = 0x0,
+ MLX5_CREATE_FLOW_GROUP_IN_GROUP_TYPE_HASH_SPLIT = 0x1,
+};
+
+enum {
+ MLX5_CREATE_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_OUTER_HEADERS = 0x0,
+ MLX5_CREATE_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_MISC_PARAMETERS = 0x1,
+ MLX5_CREATE_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_INNER_HEADERS = 0x2,
+ MLX5_CREATE_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_MISC_PARAMETERS_2 = 0x3,
};
struct mlx5_ifc_create_flow_group_in_bits {
@@ -6790,18 +9590,23 @@ struct mlx5_ifc_create_flow_group_in_bits {
u8 op_mod[0x10];
u8 other_vport[0x1];
- u8 reserved_at_41[0xf];
+ u8 other_eswitch[0x1];
+ u8 reserved_at_42[0xe];
u8 vport_number[0x10];
u8 reserved_at_60[0x20];
u8 table_type[0x8];
- u8 reserved_at_88[0x18];
+ u8 reserved_at_88[0x4];
+ u8 group_type[0x4];
+ u8 eswitch_owner_vhca_id[0x10];
u8 reserved_at_a0[0x8];
u8 table_id[0x18];
- u8 reserved_at_c0[0x20];
+ u8 source_eswitch_owner_vhca_id_valid[0x1];
+
+ u8 reserved_at_c1[0x1f];
u8 start_flow_index[0x20];
@@ -6809,7 +9614,10 @@ struct mlx5_ifc_create_flow_group_in_bits {
u8 end_flow_index[0x20];
- u8 reserved_at_140[0xa0];
+ u8 reserved_at_140[0x10];
+ u8 match_definer_id[0x10];
+
+ u8 reserved_at_160[0x80];
u8 reserved_at_1e0[0x18];
u8 match_criteria_enable[0x8];
@@ -6833,7 +9641,7 @@ struct mlx5_ifc_create_eq_out_bits {
struct mlx5_ifc_create_eq_in_bits {
u8 opcode[0x10];
- u8 reserved_at_10[0x10];
+ u8 uid[0x10];
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
@@ -6844,11 +9652,11 @@ struct mlx5_ifc_create_eq_in_bits {
u8 reserved_at_280[0x40];
- u8 event_bitmask[0x40];
+ u8 event_bitmask[4][0x40];
- u8 reserved_at_300[0x580];
+ u8 reserved_at_3c0[0x4c0];
- u8 pas[0][0x40];
+ u8 pas[][0x40];
};
struct mlx5_ifc_create_dct_out_bits {
@@ -6860,12 +9668,12 @@ struct mlx5_ifc_create_dct_out_bits {
u8 reserved_at_40[0x8];
u8 dctn[0x18];
- u8 reserved_at_60[0x20];
+ u8 ece[0x20];
};
struct mlx5_ifc_create_dct_in_bits {
u8 opcode[0x10];
- u8 reserved_at_10[0x10];
+ u8 uid[0x10];
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
@@ -6891,7 +9699,7 @@ struct mlx5_ifc_create_cq_out_bits {
struct mlx5_ifc_create_cq_in_bits {
u8 opcode[0x10];
- u8 reserved_at_10[0x10];
+ u8 uid[0x10];
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
@@ -6900,9 +9708,12 @@ struct mlx5_ifc_create_cq_in_bits {
struct mlx5_ifc_cqc_bits cq_context;
- u8 reserved_at_280[0x600];
+ u8 reserved_at_280[0x60];
- u8 pas[0][0x40];
+ u8 cq_umem_valid[0x1];
+ u8 reserved_at_2e1[0x59f];
+
+ u8 pas[][0x40];
};
struct mlx5_ifc_config_int_moderation_out_bits {
@@ -6948,7 +9759,7 @@ struct mlx5_ifc_attach_to_mcg_out_bits {
struct mlx5_ifc_attach_to_mcg_in_bits {
u8 opcode[0x10];
- u8 reserved_at_10[0x10];
+ u8 uid[0x10];
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
@@ -6999,7 +9810,7 @@ enum {
struct mlx5_ifc_arm_xrc_srq_in_bits {
u8 opcode[0x10];
- u8 reserved_at_10[0x10];
+ u8 uid[0x10];
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
@@ -7027,7 +9838,7 @@ enum {
struct mlx5_ifc_arm_rq_in_bits {
u8 opcode[0x10];
- u8 reserved_at_10[0x10];
+ u8 uid[0x10];
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
@@ -7075,7 +9886,7 @@ struct mlx5_ifc_alloc_xrcd_out_bits {
struct mlx5_ifc_alloc_xrcd_in_bits {
u8 opcode[0x10];
- u8 reserved_at_10[0x10];
+ u8 uid[0x10];
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
@@ -7097,7 +9908,7 @@ struct mlx5_ifc_alloc_uar_out_bits {
struct mlx5_ifc_alloc_uar_in_bits {
u8 opcode[0x10];
- u8 reserved_at_10[0x10];
+ u8 uid[0x10];
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
@@ -7119,7 +9930,7 @@ struct mlx5_ifc_alloc_transport_domain_out_bits {
struct mlx5_ifc_alloc_transport_domain_in_bits {
u8 opcode[0x10];
- u8 reserved_at_10[0x10];
+ u8 uid[0x10];
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
@@ -7141,7 +9952,7 @@ struct mlx5_ifc_alloc_q_counter_out_bits {
struct mlx5_ifc_alloc_q_counter_in_bits {
u8 opcode[0x10];
- u8 reserved_at_10[0x10];
+ u8 uid[0x10];
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
@@ -7163,7 +9974,7 @@ struct mlx5_ifc_alloc_pd_out_bits {
struct mlx5_ifc_alloc_pd_in_bits {
u8 opcode[0x10];
- u8 reserved_at_10[0x10];
+ u8 uid[0x10];
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
@@ -7177,8 +9988,7 @@ struct mlx5_ifc_alloc_flow_counter_out_bits {
u8 syndrome[0x20];
- u8 reserved_at_40[0x10];
- u8 flow_counter_id[0x10];
+ u8 flow_counter_id[0x20];
u8 reserved_at_60[0x20];
};
@@ -7190,7 +10000,9 @@ struct mlx5_ifc_alloc_flow_counter_in_bits {
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
- u8 reserved_at_40[0x40];
+ u8 reserved_at_40[0x33];
+ u8 flow_counter_bulk_log_size[0x5];
+ u8 flow_counter_bulk[0x8];
};
struct mlx5_ifc_add_vxlan_udp_dport_out_bits {
@@ -7215,7 +10027,7 @@ struct mlx5_ifc_add_vxlan_udp_dport_in_bits {
u8 vxlan_udp_port[0x10];
};
-struct mlx5_ifc_set_rate_limit_out_bits {
+struct mlx5_ifc_set_pp_rate_limit_out_bits {
u8 status[0x8];
u8 reserved_at_8[0x18];
@@ -7224,9 +10036,20 @@ struct mlx5_ifc_set_rate_limit_out_bits {
u8 reserved_at_40[0x40];
};
-struct mlx5_ifc_set_rate_limit_in_bits {
+struct mlx5_ifc_set_pp_rate_limit_context_bits {
+ u8 rate_limit[0x20];
+
+ u8 burst_upper_bound[0x20];
+
+ u8 reserved_at_40[0x10];
+ u8 typical_packet_size[0x10];
+
+ u8 reserved_at_60[0x120];
+};
+
+struct mlx5_ifc_set_pp_rate_limit_in_bits {
u8 opcode[0x10];
- u8 reserved_at_10[0x10];
+ u8 uid[0x10];
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
@@ -7236,7 +10059,7 @@ struct mlx5_ifc_set_rate_limit_in_bits {
u8 reserved_at_60[0x20];
- u8 rate_limit[0x20];
+ struct mlx5_ifc_set_pp_rate_limit_context_bits ctx;
};
struct mlx5_ifc_access_register_out_bits {
@@ -7247,7 +10070,7 @@ struct mlx5_ifc_access_register_out_bits {
u8 reserved_at_40[0x40];
- u8 register_data[0][0x20];
+ u8 register_data[][0x20];
};
enum {
@@ -7267,7 +10090,7 @@ struct mlx5_ifc_access_register_in_bits {
u8 argument[0x20];
- u8 register_data[0][0x20];
+ u8 register_data[][0x20];
};
struct mlx5_ifc_sltp_reg_bits {
@@ -7362,38 +10185,48 @@ struct mlx5_ifc_pude_reg_bits {
u8 reserved_at_20[0x60];
};
+enum {
+ MLX5_PTYS_CONNECTOR_TYPE_PORT_DA = 0x7,
+};
+
struct mlx5_ifc_ptys_reg_bits {
u8 reserved_at_0[0x1];
u8 an_disable_admin[0x1];
u8 an_disable_cap[0x1];
u8 reserved_at_3[0x5];
u8 local_port[0x8];
- u8 reserved_at_10[0xd];
+ u8 reserved_at_10[0x8];
+ u8 plane_ind[0x4];
+ u8 reserved_at_1c[0x1];
u8 proto_mask[0x3];
u8 an_status[0x4];
- u8 reserved_at_24[0x3c];
+ u8 reserved_at_24[0xc];
+ u8 data_rate_oper[0x10];
+
+ u8 ext_eth_proto_capability[0x20];
u8 eth_proto_capability[0x20];
u8 ib_link_width_capability[0x10];
u8 ib_proto_capability[0x10];
- u8 reserved_at_a0[0x20];
+ u8 ext_eth_proto_admin[0x20];
u8 eth_proto_admin[0x20];
u8 ib_link_width_admin[0x10];
u8 ib_proto_admin[0x10];
- u8 reserved_at_100[0x20];
+ u8 ext_eth_proto_oper[0x20];
u8 eth_proto_oper[0x20];
u8 ib_link_width_oper[0x10];
u8 ib_proto_oper[0x10];
- u8 reserved_at_160[0x1c];
+ u8 reserved_at_160[0x8];
+ u8 lane_rate_oper[0x14];
u8 connector_type[0x4];
u8 eth_proto_lp_advertise[0x20];
@@ -7525,20 +10358,76 @@ struct mlx5_ifc_pplr_reg_bits {
struct mlx5_ifc_pplm_reg_bits {
u8 reserved_at_0[0x8];
- u8 local_port[0x8];
- u8 reserved_at_10[0x10];
+ u8 local_port[0x8];
+ u8 reserved_at_10[0x10];
- u8 reserved_at_20[0x20];
+ u8 reserved_at_20[0x20];
- u8 port_profile_mode[0x8];
- u8 static_port_profile[0x8];
- u8 active_port_profile[0x8];
- u8 reserved_at_58[0x8];
+ u8 port_profile_mode[0x8];
+ u8 static_port_profile[0x8];
+ u8 active_port_profile[0x8];
+ u8 reserved_at_58[0x8];
- u8 retransmission_active[0x8];
- u8 fec_mode_active[0x18];
+ u8 retransmission_active[0x8];
+ u8 fec_mode_active[0x18];
- u8 reserved_at_80[0x20];
+ u8 rs_fec_correction_bypass_cap[0x4];
+ u8 reserved_at_84[0x8];
+ u8 fec_override_cap_56g[0x4];
+ u8 fec_override_cap_100g[0x4];
+ u8 fec_override_cap_50g[0x4];
+ u8 fec_override_cap_25g[0x4];
+ u8 fec_override_cap_10g_40g[0x4];
+
+ u8 rs_fec_correction_bypass_admin[0x4];
+ u8 reserved_at_a4[0x8];
+ u8 fec_override_admin_56g[0x4];
+ u8 fec_override_admin_100g[0x4];
+ u8 fec_override_admin_50g[0x4];
+ u8 fec_override_admin_25g[0x4];
+ u8 fec_override_admin_10g_40g[0x4];
+
+ u8 fec_override_cap_400g_8x[0x10];
+ u8 fec_override_cap_200g_4x[0x10];
+
+ u8 fec_override_cap_100g_2x[0x10];
+ u8 fec_override_cap_50g_1x[0x10];
+
+ u8 fec_override_admin_400g_8x[0x10];
+ u8 fec_override_admin_200g_4x[0x10];
+
+ u8 fec_override_admin_100g_2x[0x10];
+ u8 fec_override_admin_50g_1x[0x10];
+
+ u8 fec_override_cap_800g_8x[0x10];
+ u8 fec_override_cap_400g_4x[0x10];
+
+ u8 fec_override_cap_200g_2x[0x10];
+ u8 fec_override_cap_100g_1x[0x10];
+
+ u8 reserved_at_180[0xa0];
+
+ u8 fec_override_admin_800g_8x[0x10];
+ u8 fec_override_admin_400g_4x[0x10];
+
+ u8 fec_override_admin_200g_2x[0x10];
+ u8 fec_override_admin_100g_1x[0x10];
+
+ u8 reserved_at_260[0x60];
+
+ u8 fec_override_cap_1600g_8x[0x10];
+ u8 fec_override_cap_800g_4x[0x10];
+
+ u8 fec_override_cap_400g_2x[0x10];
+ u8 fec_override_cap_200g_1x[0x10];
+
+ u8 fec_override_admin_1600g_8x[0x10];
+ u8 fec_override_admin_800g_4x[0x10];
+
+ u8 fec_override_admin_400g_2x[0x10];
+ u8 fec_override_admin_200g_1x[0x10];
+
+ u8 reserved_at_340[0x80];
};
struct mlx5_ifc_ppcnt_reg_bits {
@@ -7549,12 +10438,60 @@ struct mlx5_ifc_ppcnt_reg_bits {
u8 grp[0x6];
u8 clr[0x1];
- u8 reserved_at_21[0x1c];
- u8 prio_tc[0x3];
+ u8 reserved_at_21[0x13];
+ u8 plane_ind[0x4];
+ u8 reserved_at_38[0x3];
+ u8 prio_tc[0x5];
union mlx5_ifc_eth_cntrs_grp_data_layout_auto_bits counter_set;
};
+struct mlx5_ifc_mpein_reg_bits {
+ u8 reserved_at_0[0x2];
+ u8 depth[0x6];
+ u8 pcie_index[0x8];
+ u8 node[0x8];
+ u8 reserved_at_18[0x8];
+
+ u8 capability_mask[0x20];
+
+ u8 reserved_at_40[0x8];
+ u8 link_width_enabled[0x8];
+ u8 link_speed_enabled[0x10];
+
+ u8 lane0_physical_position[0x8];
+ u8 link_width_active[0x8];
+ u8 link_speed_active[0x10];
+
+ u8 num_of_pfs[0x10];
+ u8 num_of_vfs[0x10];
+
+ u8 bdf0[0x10];
+ u8 reserved_at_b0[0x10];
+
+ u8 max_read_request_size[0x4];
+ u8 max_payload_size[0x4];
+ u8 reserved_at_c8[0x5];
+ u8 pwr_status[0x3];
+ u8 port_type[0x4];
+ u8 reserved_at_d4[0xb];
+ u8 lane_reversal[0x1];
+
+ u8 reserved_at_e0[0x14];
+ u8 pci_power[0xc];
+
+ u8 reserved_at_100[0x20];
+
+ u8 device_status[0x10];
+ u8 port_state[0x8];
+ u8 reserved_at_138[0x8];
+
+ u8 reserved_at_140[0x10];
+ u8 receiver_detect_result[0x10];
+
+ u8 reserved_at_160[0x20];
+};
+
struct mlx5_ifc_mpcnt_reg_bits {
u8 reserved_at_0[0x8];
u8 pcie_index[0x8];
@@ -7733,10 +10670,23 @@ struct mlx5_ifc_pifr_reg_bits {
u8 port_filter_update_en[8][0x20];
};
+enum {
+ MLX5_BUF_OWNERSHIP_UNKNOWN = 0x0,
+ MLX5_BUF_OWNERSHIP_FW_OWNED = 0x1,
+ MLX5_BUF_OWNERSHIP_SW_OWNED = 0x2,
+};
+
struct mlx5_ifc_pfcc_reg_bits {
- u8 reserved_at_0[0x8];
+ u8 reserved_at_0[0x4];
+ u8 buf_ownership[0x2];
+ u8 reserved_at_6[0x2];
u8 local_port[0x8];
- u8 reserved_at_10[0x10];
+ u8 reserved_at_10[0xa];
+ u8 cable_length_mask[0x1];
+ u8 ppan_mask_n[0x1];
+ u8 minor_stall_mask[0x1];
+ u8 critical_stall_mask[0x1];
+ u8 reserved_at_1e[0x2];
u8 ppan[0x4];
u8 reserved_at_24[0x4];
@@ -7746,17 +10696,25 @@ struct mlx5_ifc_pfcc_reg_bits {
u8 pptx[0x1];
u8 aptx[0x1];
- u8 reserved_at_42[0x6];
+ u8 pptx_mask_n[0x1];
+ u8 reserved_at_43[0x5];
u8 pfctx[0x8];
u8 reserved_at_50[0x10];
u8 pprx[0x1];
u8 aprx[0x1];
- u8 reserved_at_62[0x6];
+ u8 pprx_mask_n[0x1];
+ u8 reserved_at_63[0x5];
u8 pfcrx[0x8];
u8 reserved_at_70[0x10];
- u8 reserved_at_80[0x80];
+ u8 device_stall_minor_watermark[0x10];
+ u8 device_stall_critical_watermark[0x10];
+
+ u8 reserved_at_a0[0x18];
+ u8 cable_length[0x8];
+
+ u8 reserved_at_c0[0x40];
};
struct mlx5_ifc_pelc_reg_bits {
@@ -7796,15 +10754,110 @@ struct mlx5_ifc_peir_reg_bits {
u8 error_type[0x8];
};
-struct mlx5_ifc_pcam_enhanced_features_bits {
- u8 reserved_at_0[0x7c];
+struct mlx5_ifc_mpegc_reg_bits {
+ u8 reserved_at_0[0x30];
+ u8 field_select[0x10];
+ u8 tx_overflow_sense[0x1];
+ u8 mark_cqe[0x1];
+ u8 mark_cnp[0x1];
+ u8 reserved_at_43[0x1b];
+ u8 tx_lossy_overflow_oper[0x2];
+
+ u8 reserved_at_60[0x100];
+};
+
+struct mlx5_ifc_mpir_reg_bits {
+ u8 sdm[0x1];
+ u8 reserved_at_1[0x1b];
+ u8 host_buses[0x4];
+
+ u8 reserved_at_20[0x20];
+
+ u8 local_port[0x8];
+ u8 reserved_at_28[0x18];
+
+ u8 reserved_at_60[0x20];
+};
+
+enum {
+ MLX5_MTUTC_FREQ_ADJ_UNITS_PPB = 0x0,
+ MLX5_MTUTC_FREQ_ADJ_UNITS_SCALED_PPM = 0x1,
+};
+
+enum {
+ MLX5_MTUTC_OPERATION_SET_TIME_IMMEDIATE = 0x1,
+ MLX5_MTUTC_OPERATION_ADJUST_TIME = 0x2,
+ MLX5_MTUTC_OPERATION_ADJUST_FREQ_UTC = 0x3,
+};
+
+struct mlx5_ifc_mtutc_reg_bits {
+ u8 reserved_at_0[0x5];
+ u8 freq_adj_units[0x3];
+ u8 reserved_at_8[0x3];
+ u8 log_max_freq_adjustment[0x5];
+
+ u8 reserved_at_10[0xc];
+ u8 operation[0x4];
+
+ u8 freq_adjustment[0x20];
+
+ u8 reserved_at_40[0x40];
+
+ u8 utc_sec[0x20];
+
+ u8 reserved_at_a0[0x2];
+ u8 utc_nsec[0x1e];
+
+ u8 time_adjustment[0x20];
+};
+
+struct mlx5_ifc_pcam_enhanced_features_bits {
+ u8 reserved_at_0[0x10];
+ u8 ppcnt_recovery_counters[0x1];
+ u8 reserved_at_11[0x7];
+ u8 cable_length[0x1];
+ u8 reserved_at_19[0x4];
+ u8 fec_200G_per_lane_in_pplm[0x1];
+ u8 reserved_at_1e[0x2a];
+ u8 fec_100G_per_lane_in_pplm[0x1];
+ u8 reserved_at_49[0xa];
+ u8 buffer_ownership[0x1];
+ u8 resereved_at_54[0x14];
+ u8 fec_50G_per_lane_in_pplm[0x1];
+ u8 reserved_at_69[0x4];
+ u8 rx_icrc_encapsulated_counter[0x1];
+ u8 reserved_at_6e[0x4];
+ u8 ptys_extended_ethernet[0x1];
+ u8 reserved_at_73[0x3];
+ u8 pfcc_mask[0x1];
+ u8 reserved_at_77[0x3];
+ u8 per_lane_error_counters[0x1];
+ u8 rx_buffer_fullness_counters[0x1];
u8 ptys_connector_type[0x1];
u8 reserved_at_7d[0x1];
u8 ppcnt_discard_group[0x1];
u8 ppcnt_statistical_group[0x1];
};
+struct mlx5_ifc_pcam_regs_5000_to_507f_bits {
+ u8 port_access_reg_cap_mask_127_to_96[0x20];
+ u8 port_access_reg_cap_mask_95_to_64[0x20];
+
+ u8 port_access_reg_cap_mask_63[0x1];
+ u8 pphcr[0x1];
+ u8 port_access_reg_cap_mask_61_to_36[0x1a];
+ u8 pplm[0x1];
+ u8 port_access_reg_cap_mask_34_to_32[0x3];
+
+ u8 port_access_reg_cap_mask_31_to_13[0x13];
+ u8 pbmc[0x1];
+ u8 pptb[0x1];
+ u8 port_access_reg_cap_mask_10_to_09[0x2];
+ u8 ppcnt[0x1];
+ u8 port_access_reg_cap_mask_07_to_00[0x8];
+};
+
struct mlx5_ifc_pcam_reg_bits {
u8 reserved_at_0[0x8];
u8 feature_group[0x8];
@@ -7814,6 +10867,7 @@ struct mlx5_ifc_pcam_reg_bits {
u8 reserved_at_20[0x20];
union {
+ struct mlx5_ifc_pcam_regs_5000_to_507f_bits regs_5000_to_507f;
u8 reserved_at_0[0x80];
} port_access_reg_cap_mask;
@@ -7828,8 +10882,25 @@ struct mlx5_ifc_pcam_reg_bits {
};
struct mlx5_ifc_mcam_enhanced_features_bits {
- u8 reserved_at_0[0x7d];
-
+ u8 reserved_at_0[0x50];
+ u8 mtutc_freq_adj_units[0x1];
+ u8 mtutc_time_adjustment_extended_range[0x1];
+ u8 reserved_at_52[0xb];
+ u8 mcia_32dwords[0x1];
+ u8 out_pulse_duration_ns[0x1];
+ u8 npps_period[0x1];
+ u8 reserved_at_60[0xa];
+ u8 reset_state[0x1];
+ u8 ptpcyc2realtime_modify[0x1];
+ u8 reserved_at_6c[0x2];
+ u8 pci_status_and_power[0x1];
+ u8 reserved_at_6f[0x5];
+ u8 mark_tx_action_cnp[0x1];
+ u8 mark_tx_action_cqe[0x1];
+ u8 dynamic_tx_overflow[0x1];
+ u8 reserved_at_77[0x4];
+ u8 pcie_outbound_stalled[0x1];
+ u8 tx_overflow_buffer_pkt[0x1];
u8 mtpps_enh_out_per_adj[0x1];
u8 mtpps_fs[0x1];
u8 pcie_performance_group[0x1];
@@ -7840,13 +10911,64 @@ struct mlx5_ifc_mcam_access_reg_bits {
u8 mcda[0x1];
u8 mcc[0x1];
u8 mcqi[0x1];
- u8 reserved_at_1f[0x1];
+ u8 mcqs[0x1];
+
+ u8 regs_95_to_90[0x6];
+ u8 mpir[0x1];
+ u8 regs_88_to_87[0x2];
+ u8 mpegc[0x1];
+ u8 mtutc[0x1];
+ u8 regs_84_to_68[0x11];
+ u8 tracer_registers[0x4];
+
+ u8 regs_63_to_46[0x12];
+ u8 mrtc[0x1];
+ u8 regs_44_to_41[0x4];
+ u8 mfrl[0x1];
+ u8 regs_39_to_32[0x8];
+
+ u8 regs_31_to_11[0x15];
+ u8 mtmp[0x1];
+ u8 regs_9_to_0[0xa];
+};
+
+struct mlx5_ifc_mcam_access_reg_bits1 {
+ u8 regs_127_to_96[0x20];
u8 regs_95_to_64[0x20];
+
u8 regs_63_to_32[0x20];
+
u8 regs_31_to_0[0x20];
};
+struct mlx5_ifc_mcam_access_reg_bits2 {
+ u8 regs_127_to_99[0x1d];
+ u8 mirc[0x1];
+ u8 regs_97_to_96[0x2];
+
+ u8 regs_95_to_87[0x09];
+ u8 synce_registers[0x2];
+ u8 regs_84_to_64[0x15];
+
+ u8 regs_63_to_32[0x20];
+
+ u8 regs_31_to_0[0x20];
+};
+
+struct mlx5_ifc_mcam_access_reg_bits3 {
+ u8 regs_127_to_96[0x20];
+
+ u8 regs_95_to_64[0x20];
+
+ u8 regs_63_to_32[0x20];
+
+ u8 regs_31_to_3[0x1d];
+ u8 mrtcq[0x1];
+ u8 mtctr[0x1];
+ u8 mtptm[0x1];
+};
+
struct mlx5_ifc_mcam_reg_bits {
u8 reserved_at_0[0x8];
u8 feature_group[0x8];
@@ -7857,6 +10979,9 @@ struct mlx5_ifc_mcam_reg_bits {
union {
struct mlx5_ifc_mcam_access_reg_bits access_regs;
+ struct mlx5_ifc_mcam_access_reg_bits1 access_regs1;
+ struct mlx5_ifc_mcam_access_reg_bits2 access_regs2;
+ struct mlx5_ifc_mcam_access_reg_bits3 access_regs3;
u8 reserved_at_0[0x80];
} mng_access_reg_cap_mask;
@@ -7870,6 +10995,55 @@ struct mlx5_ifc_mcam_reg_bits {
u8 reserved_at_1c0[0x80];
};
+struct mlx5_ifc_qcam_access_reg_cap_mask {
+ u8 qcam_access_reg_cap_mask_127_to_20[0x6C];
+ u8 qpdpm[0x1];
+ u8 qcam_access_reg_cap_mask_18_to_4[0x0F];
+ u8 qdpm[0x1];
+ u8 qpts[0x1];
+ u8 qcap[0x1];
+ u8 qcam_access_reg_cap_mask_0[0x1];
+};
+
+struct mlx5_ifc_qcam_qos_feature_cap_mask {
+ u8 qcam_qos_feature_cap_mask_127_to_1[0x7F];
+ u8 qpts_trust_both[0x1];
+};
+
+struct mlx5_ifc_qcam_reg_bits {
+ u8 reserved_at_0[0x8];
+ u8 feature_group[0x8];
+ u8 reserved_at_10[0x8];
+ u8 access_reg_group[0x8];
+ u8 reserved_at_20[0x20];
+
+ union {
+ struct mlx5_ifc_qcam_access_reg_cap_mask reg_cap;
+ u8 reserved_at_0[0x80];
+ } qos_access_reg_cap_mask;
+
+ u8 reserved_at_c0[0x80];
+
+ union {
+ struct mlx5_ifc_qcam_qos_feature_cap_mask feature_cap;
+ u8 reserved_at_0[0x80];
+ } qos_feature_cap_mask;
+
+ u8 reserved_at_1c0[0x80];
+};
+
+struct mlx5_ifc_core_dump_reg_bits {
+ u8 reserved_at_0[0x18];
+ u8 core_dump_type[0x8];
+
+ u8 reserved_at_20[0x30];
+ u8 vhca_id[0x10];
+
+ u8 reserved_at_60[0x8];
+ u8 qpn[0x18];
+ u8 reserved_at_80[0x180];
+};
+
struct mlx5_ifc_pcap_reg_bits {
u8 reserved_at_0[0x8];
u8 local_port[0x8];
@@ -7910,18 +11084,32 @@ struct mlx5_ifc_pamp_reg_bits {
struct mlx5_ifc_pcmr_reg_bits {
u8 reserved_at_0[0x8];
u8 local_port[0x8];
- u8 reserved_at_10[0x2e];
+ u8 reserved_at_10[0x10];
+
+ u8 entropy_force_cap[0x1];
+ u8 entropy_calc_cap[0x1];
+ u8 entropy_gre_calc_cap[0x1];
+ u8 reserved_at_23[0xf];
+ u8 rx_ts_over_crc_cap[0x1];
+ u8 reserved_at_33[0xb];
u8 fcs_cap[0x1];
- u8 reserved_at_3f[0x1f];
+ u8 reserved_at_3f[0x1];
+
+ u8 entropy_force[0x1];
+ u8 entropy_calc[0x1];
+ u8 entropy_gre_calc[0x1];
+ u8 reserved_at_43[0xf];
+ u8 rx_ts_over_crc[0x1];
+ u8 reserved_at_53[0xb];
u8 fcs_chk[0x1];
u8 reserved_at_5f[0x1];
};
struct mlx5_ifc_lane_2_module_mapping_bits {
- u8 reserved_at_0[0x6];
- u8 rx_lane[0x2];
- u8 reserved_at_8[0x6];
- u8 tx_lane[0x2];
+ u8 reserved_at_0[0x4];
+ u8 rx_lane[0x4];
+ u8 reserved_at_8[0x4];
+ u8 tx_lane[0x4];
u8 reserved_at_10[0x8];
u8 module[0x8];
};
@@ -7930,8 +11118,8 @@ struct mlx5_ifc_bufferx_reg_bits {
u8 reserved_at_0[0x6];
u8 lossy[0x1];
u8 epsb[0x1];
- u8 reserved_at_8[0xc];
- u8 size[0xc];
+ u8 reserved_at_8[0x8];
+ u8 size[0x10];
u8 xoff_threshold[0x10];
u8 xon_threshold[0x10];
@@ -8069,7 +11257,7 @@ struct mlx5_ifc_cmd_in_bits {
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
- u8 command[0][0x20];
+ u8 command[][0x20];
};
struct mlx5_ifc_cmd_if_box_bits {
@@ -8151,6 +11339,7 @@ enum {
MLX5_INITIAL_SEG_NIC_INTERFACE_FULL_DRIVER = 0x0,
MLX5_INITIAL_SEG_NIC_INTERFACE_DISABLED = 0x1,
MLX5_INITIAL_SEG_NIC_INTERFACE_NO_DRAM_NIC = 0x2,
+ MLX5_INITIAL_SEG_NIC_INTERFACE_SW_RESET = 0x7,
};
enum {
@@ -8171,6 +11360,8 @@ enum {
MLX5_INITIAL_SEG_HEALTH_SYNDROME_EQ_INV = 0xe,
MLX5_INITIAL_SEG_HEALTH_SYNDROME_FFSER_ERR = 0xf,
MLX5_INITIAL_SEG_HEALTH_SYNDROME_HIGH_TEMP_ERR = 0x10,
+ MLX5_INITIAL_SEG_HEALTH_SYNDROME_ICM_PCI_POISONED_ERR = 0x12,
+ MLX5_INITIAL_SEG_HEALTH_SYNDROME_TRUST_LOCKDOWN_ERR = 0x13,
};
struct mlx5_ifc_initial_seg_bits {
@@ -8197,7 +11388,8 @@ struct mlx5_ifc_initial_seg_bits {
u8 initializing[0x1];
u8 reserved_at_fe1[0x4];
u8 nic_interface_supported[0x3];
- u8 reserved_at_fe8[0x18];
+ u8 embedded_cpu[0x1];
+ u8 reserved_at_fe9[0x17];
struct mlx5_ifc_health_buffer_bits health_buffer;
@@ -8222,7 +11414,12 @@ struct mlx5_ifc_mtpps_reg_bits {
u8 reserved_at_18[0x4];
u8 cap_max_num_of_pps_out_pins[0x4];
- u8 reserved_at_20[0x24];
+ u8 reserved_at_20[0x13];
+ u8 cap_log_min_npps_period[0x5];
+ u8 reserved_at_38[0x3];
+ u8 cap_log_min_out_pulse_duration_ns[0x5];
+
+ u8 reserved_at_40[0x4];
u8 cap_pin_3_mode[0x4];
u8 reserved_at_48[0x4];
u8 cap_pin_2_mode[0x4];
@@ -8241,7 +11438,9 @@ struct mlx5_ifc_mtpps_reg_bits {
u8 cap_pin_4_mode[0x4];
u8 field_select[0x20];
- u8 reserved_at_a0[0x60];
+ u8 reserved_at_a0[0x20];
+
+ u8 npps_period[0x40];
u8 enable[0x1];
u8 reserved_at_101[0xb];
@@ -8250,7 +11449,8 @@ struct mlx5_ifc_mtpps_reg_bits {
u8 pin_mode[0x4];
u8 pin[0x8];
- u8 reserved_at_120[0x20];
+ u8 reserved_at_120[0x2];
+ u8 out_pulse_duration_ns[0x1e];
u8 time_stamp[0x40];
@@ -8270,6 +11470,24 @@ struct mlx5_ifc_mtppse_reg_bits {
u8 reserved_at_40[0x40];
};
+struct mlx5_ifc_mcqs_reg_bits {
+ u8 last_index_flag[0x1];
+ u8 reserved_at_1[0x7];
+ u8 fw_device[0x8];
+ u8 component_index[0x10];
+
+ u8 reserved_at_20[0x10];
+ u8 identifier[0x10];
+
+ u8 reserved_at_40[0x17];
+ u8 component_status[0x5];
+ u8 component_update_state[0x4];
+
+ u8 last_update_state_changer_type[0x4];
+ u8 last_update_state_changer_host_id[0x4];
+ u8 reserved_at_68[0x18];
+};
+
struct mlx5_ifc_mcqi_cap_bits {
u8 supported_info_bitmask[0x20];
@@ -8290,6 +11508,43 @@ struct mlx5_ifc_mcqi_cap_bits {
u8 reserved_at_86[0x1a];
};
+struct mlx5_ifc_mcqi_version_bits {
+ u8 reserved_at_0[0x2];
+ u8 build_time_valid[0x1];
+ u8 user_defined_time_valid[0x1];
+ u8 reserved_at_4[0x14];
+ u8 version_string_length[0x8];
+
+ u8 version[0x20];
+
+ u8 build_time[0x40];
+
+ u8 user_defined_time[0x40];
+
+ u8 build_tool_version[0x20];
+
+ u8 reserved_at_e0[0x20];
+
+ u8 version_string[92][0x8];
+};
+
+struct mlx5_ifc_mcqi_activation_method_bits {
+ u8 pending_server_ac_power_cycle[0x1];
+ u8 pending_server_dc_power_cycle[0x1];
+ u8 pending_server_reboot[0x1];
+ u8 pending_fw_reset[0x1];
+ u8 auto_activate[0x1];
+ u8 all_hosts_sync[0x1];
+ u8 device_hw_reset[0x1];
+ u8 reserved_at_7[0x19];
+};
+
+union mlx5_ifc_mcqi_reg_data_bits {
+ struct mlx5_ifc_mcqi_cap_bits mcqi_caps;
+ struct mlx5_ifc_mcqi_version_bits mcqi_version;
+ struct mlx5_ifc_mcqi_activation_method_bits mcqi_activation_mathod;
+};
+
struct mlx5_ifc_mcqi_reg_bits {
u8 read_pending_component[0x1];
u8 reserved_at_1[0xf];
@@ -8307,7 +11562,7 @@ struct mlx5_ifc_mcqi_reg_bits {
u8 reserved_at_a0[0x10];
u8 data_size[0x10];
- u8 data[0][0x20];
+ union mlx5_ifc_mcqi_reg_data_bits data[];
};
struct mlx5_ifc_mcc_reg_bits {
@@ -8346,7 +11601,195 @@ struct mlx5_ifc_mcda_reg_bits {
u8 reserved_at_60[0x20];
- u8 data[0][0x20];
+ u8 data[][0x20];
+};
+
+enum {
+ MLX5_MFRL_REG_PCI_RESET_METHOD_LINK_TOGGLE = 0,
+ MLX5_MFRL_REG_PCI_RESET_METHOD_HOT_RESET = 1,
+};
+
+enum {
+ MLX5_MFRL_REG_RESET_STATE_IDLE = 0,
+ MLX5_MFRL_REG_RESET_STATE_IN_NEGOTIATION = 1,
+ MLX5_MFRL_REG_RESET_STATE_RESET_IN_PROGRESS = 2,
+ MLX5_MFRL_REG_RESET_STATE_NEG_TIMEOUT = 3,
+ MLX5_MFRL_REG_RESET_STATE_NACK = 4,
+ MLX5_MFRL_REG_RESET_STATE_UNLOAD_TIMEOUT = 5,
+};
+
+enum {
+ MLX5_MFRL_REG_RESET_TYPE_FULL_CHIP = BIT(0),
+ MLX5_MFRL_REG_RESET_TYPE_NET_PORT_ALIVE = BIT(1),
+};
+
+enum {
+ MLX5_MFRL_REG_RESET_LEVEL0 = BIT(0),
+ MLX5_MFRL_REG_RESET_LEVEL3 = BIT(3),
+ MLX5_MFRL_REG_RESET_LEVEL6 = BIT(6),
+};
+
+struct mlx5_ifc_mfrl_reg_bits {
+ u8 reserved_at_0[0x20];
+
+ u8 reserved_at_20[0x2];
+ u8 pci_sync_for_fw_update_start[0x1];
+ u8 pci_sync_for_fw_update_resp[0x2];
+ u8 rst_type_sel[0x3];
+ u8 pci_reset_req_method[0x3];
+ u8 reserved_at_2b[0x1];
+ u8 reset_state[0x4];
+ u8 reset_type[0x8];
+ u8 reset_level[0x8];
+};
+
+struct mlx5_ifc_mirc_reg_bits {
+ u8 reserved_at_0[0x18];
+ u8 status_code[0x8];
+
+ u8 reserved_at_20[0x20];
+};
+
+struct mlx5_ifc_pddr_monitor_opcode_bits {
+ u8 reserved_at_0[0x10];
+ u8 monitor_opcode[0x10];
+};
+
+union mlx5_ifc_pddr_troubleshooting_page_status_opcode_auto_bits {
+ struct mlx5_ifc_pddr_monitor_opcode_bits pddr_monitor_opcode;
+ u8 reserved_at_0[0x20];
+};
+
+enum {
+ /* Monitor opcodes */
+ MLX5_PDDR_REG_TRBLSH_GROUP_OPCODE_MONITOR = 0x0,
+};
+
+struct mlx5_ifc_pddr_troubleshooting_page_bits {
+ u8 reserved_at_0[0x10];
+ u8 group_opcode[0x10];
+
+ union mlx5_ifc_pddr_troubleshooting_page_status_opcode_auto_bits status_opcode;
+
+ u8 reserved_at_40[0x20];
+
+ u8 status_message[59][0x20];
+};
+
+union mlx5_ifc_pddr_reg_page_data_auto_bits {
+ struct mlx5_ifc_pddr_troubleshooting_page_bits pddr_troubleshooting_page;
+ u8 reserved_at_0[0x7c0];
+};
+
+enum {
+ MLX5_PDDR_REG_PAGE_SELECT_TROUBLESHOOTING_INFO_PAGE = 0x1,
+};
+
+struct mlx5_ifc_pddr_reg_bits {
+ u8 reserved_at_0[0x8];
+ u8 local_port[0x8];
+ u8 pnat[0x2];
+ u8 reserved_at_12[0xe];
+
+ u8 reserved_at_20[0x18];
+ u8 page_select[0x8];
+
+ union mlx5_ifc_pddr_reg_page_data_auto_bits page_data;
+};
+
+struct mlx5_ifc_mrtc_reg_bits {
+ u8 time_synced[0x1];
+ u8 reserved_at_1[0x1f];
+
+ u8 reserved_at_20[0x20];
+
+ u8 time_h[0x20];
+
+ u8 time_l[0x20];
+};
+
+struct mlx5_ifc_mtcap_reg_bits {
+ u8 reserved_at_0[0x19];
+ u8 sensor_count[0x7];
+
+ u8 reserved_at_20[0x20];
+
+ u8 sensor_map[0x40];
+};
+
+struct mlx5_ifc_mtmp_reg_bits {
+ u8 reserved_at_0[0x14];
+ u8 sensor_index[0xc];
+
+ u8 reserved_at_20[0x10];
+ u8 temperature[0x10];
+
+ u8 mte[0x1];
+ u8 mtr[0x1];
+ u8 reserved_at_42[0xe];
+ u8 max_temperature[0x10];
+
+ u8 tee[0x2];
+ u8 reserved_at_62[0xe];
+ u8 temp_threshold_hi[0x10];
+
+ u8 reserved_at_80[0x10];
+ u8 temp_threshold_lo[0x10];
+
+ u8 reserved_at_a0[0x20];
+
+ u8 sensor_name_hi[0x20];
+ u8 sensor_name_lo[0x20];
+};
+
+struct mlx5_ifc_mtptm_reg_bits {
+ u8 reserved_at_0[0x10];
+ u8 psta[0x1];
+ u8 reserved_at_11[0xf];
+
+ u8 reserved_at_20[0x60];
+};
+
+enum {
+ MLX5_MTCTR_REQUEST_NOP = 0x0,
+ MLX5_MTCTR_REQUEST_PTM_ROOT_CLOCK = 0x1,
+ MLX5_MTCTR_REQUEST_FREE_RUNNING_COUNTER = 0x2,
+ MLX5_MTCTR_REQUEST_REAL_TIME_CLOCK = 0x3,
+};
+
+struct mlx5_ifc_mtctr_reg_bits {
+ u8 first_clock_timestamp_request[0x8];
+ u8 second_clock_timestamp_request[0x8];
+ u8 reserved_at_10[0x10];
+
+ u8 first_clock_valid[0x1];
+ u8 second_clock_valid[0x1];
+ u8 reserved_at_22[0x1e];
+
+ u8 first_clock_timestamp[0x40];
+ u8 second_clock_timestamp[0x40];
+};
+
+struct mlx5_ifc_bin_range_layout_bits {
+ u8 reserved_at_0[0xa];
+ u8 high_val[0x6];
+ u8 reserved_at_10[0xa];
+ u8 low_val[0x6];
+};
+
+struct mlx5_ifc_pphcr_reg_bits {
+ u8 active_hist_type[0x4];
+ u8 reserved_at_4[0x4];
+ u8 local_port[0x8];
+ u8 reserved_at_10[0x10];
+
+ u8 reserved_at_20[0x8];
+ u8 num_of_bins[0x8];
+ u8 reserved_at_30[0x10];
+
+ u8 reserved_at_40[0x40];
+
+ struct mlx5_ifc_bin_range_layout_bits bin_range[16];
};
union mlx5_ifc_ports_control_registers_document_bits {
@@ -8357,11 +11800,15 @@ union mlx5_ifc_ports_control_registers_document_bits {
struct mlx5_ifc_eth_802_3_cntrs_grp_data_layout_bits eth_802_3_cntrs_grp_data_layout;
struct mlx5_ifc_eth_extended_cntrs_grp_data_layout_bits eth_extended_cntrs_grp_data_layout;
struct mlx5_ifc_eth_per_prio_grp_data_layout_bits eth_per_prio_grp_data_layout;
- struct mlx5_ifc_eth_per_traffic_grp_data_layout_bits eth_per_traffic_grp_data_layout;
+ struct mlx5_ifc_eth_per_tc_prio_grp_data_layout_bits eth_per_tc_prio_grp_data_layout;
+ struct mlx5_ifc_eth_per_tc_congest_prio_grp_data_layout_bits eth_per_tc_congest_prio_grp_data_layout;
struct mlx5_ifc_lane_2_module_mapping_bits lane_2_module_mapping;
struct mlx5_ifc_pamp_reg_bits pamp_reg;
struct mlx5_ifc_paos_reg_bits paos_reg;
struct mlx5_ifc_pcap_reg_bits pcap_reg;
+ struct mlx5_ifc_pddr_monitor_opcode_bits pddr_monitor_opcode;
+ struct mlx5_ifc_pddr_reg_bits pddr_reg;
+ struct mlx5_ifc_pddr_troubleshooting_page_bits pddr_troubleshooting_page;
struct mlx5_ifc_peir_reg_bits peir_reg;
struct mlx5_ifc_pelc_reg_bits pelc_reg;
struct mlx5_ifc_pfcc_reg_bits pfcc_reg;
@@ -8381,6 +11828,7 @@ union mlx5_ifc_ports_control_registers_document_bits {
struct mlx5_ifc_pmtu_reg_bits pmtu_reg;
struct mlx5_ifc_ppad_reg_bits ppad_reg;
struct mlx5_ifc_ppcnt_reg_bits ppcnt_reg;
+ struct mlx5_ifc_mpein_reg_bits mpein_reg;
struct mlx5_ifc_mpcnt_reg_bits mpcnt_reg;
struct mlx5_ifc_pplm_reg_bits pplm_reg;
struct mlx5_ifc_pplr_reg_bits pplr_reg;
@@ -8402,6 +11850,15 @@ union mlx5_ifc_ports_control_registers_document_bits {
struct mlx5_ifc_mcqi_reg_bits mcqi_reg;
struct mlx5_ifc_mcc_reg_bits mcc_reg;
struct mlx5_ifc_mcda_reg_bits mcda_reg;
+ struct mlx5_ifc_mirc_reg_bits mirc_reg;
+ struct mlx5_ifc_mfrl_reg_bits mfrl_reg;
+ struct mlx5_ifc_mtutc_reg_bits mtutc_reg;
+ struct mlx5_ifc_mrtc_reg_bits mrtc_reg;
+ struct mlx5_ifc_mtcap_reg_bits mtcap_reg;
+ struct mlx5_ifc_mtmp_reg_bits mtmp_reg;
+ struct mlx5_ifc_mtptm_reg_bits mtptm_reg;
+ struct mlx5_ifc_mtctr_reg_bits mtctr_reg;
+ struct mlx5_ifc_pphcr_reg_bits pphcr_reg;
u8 reserved_at_0[0x60e0];
};
@@ -8432,20 +11889,27 @@ struct mlx5_ifc_set_flow_table_root_in_bits {
u8 op_mod[0x10];
u8 other_vport[0x1];
- u8 reserved_at_41[0xf];
+ u8 other_eswitch[0x1];
+ u8 reserved_at_42[0xe];
u8 vport_number[0x10];
- u8 reserved_at_60[0x20];
+ u8 reserved_at_60[0x10];
+ u8 eswitch_owner_vhca_id[0x10];
u8 table_type[0x8];
- u8 reserved_at_88[0x18];
+ u8 reserved_at_88[0x7];
+ u8 table_of_other_vport[0x1];
+ u8 table_vport_number[0x10];
u8 reserved_at_a0[0x8];
u8 table_id[0x18];
u8 reserved_at_c0[0x8];
u8 underlay_qpn[0x18];
- u8 reserved_at_e0[0x120];
+ u8 table_eswitch_owner_vhca_id_valid[0x1];
+ u8 reserved_at_e1[0xf];
+ u8 table_eswitch_owner_vhca_id[0x10];
+ u8 reserved_at_100[0x100];
};
enum {
@@ -8470,14 +11934,16 @@ struct mlx5_ifc_modify_flow_table_in_bits {
u8 op_mod[0x10];
u8 other_vport[0x1];
- u8 reserved_at_41[0xf];
+ u8 other_eswitch[0x1];
+ u8 reserved_at_42[0xe];
u8 vport_number[0x10];
u8 reserved_at_60[0x10];
u8 modify_field_select[0x10];
u8 table_type[0x8];
- u8 reserved_at_88[0x18];
+ u8 reserved_at_88[0x8];
+ u8 eswitch_owner_vhca_id[0x10];
u8 reserved_at_a0[0x8];
u8 table_id[0x18];
@@ -8520,6 +11986,150 @@ struct mlx5_ifc_qetc_reg_bits {
struct mlx5_ifc_ets_global_config_reg_bits global_configuration;
};
+struct mlx5_ifc_qpdpm_dscp_reg_bits {
+ u8 e[0x1];
+ u8 reserved_at_01[0x0b];
+ u8 prio[0x04];
+};
+
+struct mlx5_ifc_qpdpm_reg_bits {
+ u8 reserved_at_0[0x8];
+ u8 local_port[0x8];
+ u8 reserved_at_10[0x10];
+ struct mlx5_ifc_qpdpm_dscp_reg_bits dscp[64];
+};
+
+struct mlx5_ifc_qpts_reg_bits {
+ u8 reserved_at_0[0x8];
+ u8 local_port[0x8];
+ u8 reserved_at_10[0x2d];
+ u8 trust_state[0x3];
+};
+
+struct mlx5_ifc_pptb_reg_bits {
+ u8 reserved_at_0[0x2];
+ u8 mm[0x2];
+ u8 reserved_at_4[0x4];
+ u8 local_port[0x8];
+ u8 reserved_at_10[0x6];
+ u8 cm[0x1];
+ u8 um[0x1];
+ u8 pm[0x8];
+
+ u8 prio_x_buff[0x20];
+
+ u8 pm_msb[0x8];
+ u8 reserved_at_48[0x10];
+ u8 ctrl_buff[0x4];
+ u8 untagged_buff[0x4];
+};
+
+struct mlx5_ifc_sbcam_reg_bits {
+ u8 reserved_at_0[0x8];
+ u8 feature_group[0x8];
+ u8 reserved_at_10[0x8];
+ u8 access_reg_group[0x8];
+
+ u8 reserved_at_20[0x20];
+
+ u8 sb_access_reg_cap_mask[4][0x20];
+
+ u8 reserved_at_c0[0x80];
+
+ u8 sb_feature_cap_mask[4][0x20];
+
+ u8 reserved_at_1c0[0x40];
+
+ u8 cap_total_buffer_size[0x20];
+
+ u8 cap_cell_size[0x10];
+ u8 cap_max_pg_buffers[0x8];
+ u8 cap_num_pool_supported[0x8];
+
+ u8 reserved_at_240[0x8];
+ u8 cap_sbsr_stat_size[0x8];
+ u8 cap_max_tclass_data[0x8];
+ u8 cap_max_cpu_ingress_tclass_sb[0x8];
+};
+
+struct mlx5_ifc_pbmc_reg_bits {
+ u8 reserved_at_0[0x8];
+ u8 local_port[0x8];
+ u8 reserved_at_10[0x10];
+
+ u8 xoff_timer_value[0x10];
+ u8 xoff_refresh[0x10];
+
+ u8 reserved_at_40[0x9];
+ u8 fullness_threshold[0x7];
+ u8 port_buffer_size[0x10];
+
+ struct mlx5_ifc_bufferx_reg_bits buffer[10];
+
+ u8 reserved_at_2e0[0x80];
+};
+
+struct mlx5_ifc_sbpr_reg_bits {
+ u8 desc[0x1];
+ u8 snap[0x1];
+ u8 reserved_at_2[0x4];
+ u8 dir[0x2];
+ u8 reserved_at_8[0x14];
+ u8 pool[0x4];
+
+ u8 infi_size[0x1];
+ u8 reserved_at_21[0x7];
+ u8 size[0x18];
+
+ u8 reserved_at_40[0x1c];
+ u8 mode[0x4];
+
+ u8 reserved_at_60[0x8];
+ u8 buff_occupancy[0x18];
+
+ u8 clr[0x1];
+ u8 reserved_at_81[0x7];
+ u8 max_buff_occupancy[0x18];
+
+ u8 reserved_at_a0[0x8];
+ u8 ext_buff_occupancy[0x18];
+};
+
+struct mlx5_ifc_sbcm_reg_bits {
+ u8 desc[0x1];
+ u8 snap[0x1];
+ u8 reserved_at_2[0x6];
+ u8 local_port[0x8];
+ u8 pnat[0x2];
+ u8 pg_buff[0x6];
+ u8 reserved_at_18[0x6];
+ u8 dir[0x2];
+
+ u8 reserved_at_20[0x1f];
+ u8 exc[0x1];
+
+ u8 reserved_at_40[0x40];
+
+ u8 reserved_at_80[0x8];
+ u8 buff_occupancy[0x18];
+
+ u8 clr[0x1];
+ u8 reserved_at_a1[0x7];
+ u8 max_buff_occupancy[0x18];
+
+ u8 reserved_at_c0[0x8];
+ u8 min_buff[0x18];
+
+ u8 infi_max[0x1];
+ u8 reserved_at_e1[0x7];
+ u8 max_buff[0x18];
+
+ u8 reserved_at_100[0x20];
+
+ u8 reserved_at_120[0x1c];
+ u8 pool[0x4];
+};
+
struct mlx5_ifc_qtct_reg_bits {
u8 reserved_at_0[0x8];
u8 port_number[0x8];
@@ -8564,7 +12174,7 @@ struct mlx5_ifc_dcbx_param_bits {
u8 dcbx_cee_cap[0x1];
u8 dcbx_ieee_cap[0x1];
u8 dcbx_standby_cap[0x1];
- u8 reserved_at_0[0x5];
+ u8 reserved_at_3[0x5];
u8 port_number[0x8];
u8 reserved_at_10[0xa];
u8 max_application_table_size[6];
@@ -8591,11 +12201,22 @@ struct mlx5_ifc_dcbx_param_bits {
u8 reserved_at_a0[0x160];
};
+enum {
+ MLX5_LAG_PORT_SELECT_MODE_QUEUE_AFFINITY = 0,
+ MLX5_LAG_PORT_SELECT_MODE_PORT_SELECT_FT = 1,
+ MLX5_LAG_PORT_SELECT_MODE_PORT_SELECT_MPESW = 2,
+};
+
struct mlx5_ifc_lagc_bits {
- u8 reserved_at_0[0x1d];
+ u8 fdb_selection_mode[0x1];
+ u8 reserved_at_1[0x14];
+ u8 port_select_mode[0x3];
+ u8 reserved_at_18[0x5];
u8 lag_state[0x3];
- u8 reserved_at_20[0x14];
+ u8 reserved_at_20[0xc];
+ u8 active_port[0x4];
+ u8 reserved_at_30[0x4];
u8 tx_remap_affinity_2[0x4];
u8 reserved_at_38[0x4];
u8 tx_remap_affinity_1[0x4];
@@ -8648,8 +12269,6 @@ struct mlx5_ifc_query_lag_out_bits {
u8 syndrome[0x20];
- u8 reserved_at_40[0x40];
-
struct mlx5_ifc_lagc_bits ctx;
};
@@ -8720,4 +12339,1307 @@ struct mlx5_ifc_destroy_vport_lag_in_bits {
u8 reserved_at_40[0x40];
};
+enum {
+ MLX5_MODIFY_MEMIC_OP_MOD_ALLOC,
+ MLX5_MODIFY_MEMIC_OP_MOD_DEALLOC,
+};
+
+struct mlx5_ifc_modify_memic_in_bits {
+ u8 opcode[0x10];
+ u8 uid[0x10];
+
+ u8 reserved_at_20[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_at_40[0x20];
+
+ u8 reserved_at_60[0x18];
+ u8 memic_operation_type[0x8];
+
+ u8 memic_start_addr[0x40];
+
+ u8 reserved_at_c0[0x140];
+};
+
+struct mlx5_ifc_modify_memic_out_bits {
+ u8 status[0x8];
+ u8 reserved_at_8[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_at_40[0x40];
+
+ u8 memic_operation_addr[0x40];
+
+ u8 reserved_at_c0[0x140];
+};
+
+struct mlx5_ifc_alloc_memic_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_at_10[0x10];
+
+ u8 reserved_at_20[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_at_30[0x20];
+
+ u8 reserved_at_40[0x18];
+ u8 log_memic_addr_alignment[0x8];
+
+ u8 range_start_addr[0x40];
+
+ u8 range_size[0x20];
+
+ u8 memic_size[0x20];
+};
+
+struct mlx5_ifc_alloc_memic_out_bits {
+ u8 status[0x8];
+ u8 reserved_at_8[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 memic_start_addr[0x40];
+};
+
+struct mlx5_ifc_dealloc_memic_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_at_10[0x10];
+
+ u8 reserved_at_20[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_at_40[0x40];
+
+ u8 memic_start_addr[0x40];
+
+ u8 memic_size[0x20];
+
+ u8 reserved_at_e0[0x20];
+};
+
+struct mlx5_ifc_dealloc_memic_out_bits {
+ u8 status[0x8];
+ u8 reserved_at_8[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_at_40[0x40];
+};
+
+struct mlx5_ifc_umem_bits {
+ u8 reserved_at_0[0x80];
+
+ u8 ats[0x1];
+ u8 reserved_at_81[0x1a];
+ u8 log_page_size[0x5];
+
+ u8 page_offset[0x20];
+
+ u8 num_of_mtt[0x40];
+
+ struct mlx5_ifc_mtt_bits mtt[];
+};
+
+struct mlx5_ifc_uctx_bits {
+ u8 cap[0x20];
+
+ u8 reserved_at_20[0x160];
+};
+
+struct mlx5_ifc_sw_icm_bits {
+ u8 modify_field_select[0x40];
+
+ u8 reserved_at_40[0x18];
+ u8 log_sw_icm_size[0x8];
+
+ u8 reserved_at_60[0x20];
+
+ u8 sw_icm_start_addr[0x40];
+
+ u8 reserved_at_c0[0x140];
+};
+
+struct mlx5_ifc_geneve_tlv_option_bits {
+ u8 modify_field_select[0x40];
+
+ u8 reserved_at_40[0x18];
+ u8 geneve_option_fte_index[0x8];
+
+ u8 option_class[0x10];
+ u8 option_type[0x8];
+ u8 reserved_at_78[0x3];
+ u8 option_data_length[0x5];
+
+ u8 reserved_at_80[0x180];
+};
+
+struct mlx5_ifc_create_umem_in_bits {
+ u8 opcode[0x10];
+ u8 uid[0x10];
+
+ u8 reserved_at_20[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_at_40[0x40];
+
+ struct mlx5_ifc_umem_bits umem;
+};
+
+struct mlx5_ifc_create_umem_out_bits {
+ u8 status[0x8];
+ u8 reserved_at_8[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_at_40[0x8];
+ u8 umem_id[0x18];
+
+ u8 reserved_at_60[0x20];
+};
+
+struct mlx5_ifc_destroy_umem_in_bits {
+ u8 opcode[0x10];
+ u8 uid[0x10];
+
+ u8 reserved_at_20[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_at_40[0x8];
+ u8 umem_id[0x18];
+
+ u8 reserved_at_60[0x20];
+};
+
+struct mlx5_ifc_destroy_umem_out_bits {
+ u8 status[0x8];
+ u8 reserved_at_8[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_at_40[0x40];
+};
+
+struct mlx5_ifc_create_uctx_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_at_10[0x10];
+
+ u8 reserved_at_20[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_at_40[0x40];
+
+ struct mlx5_ifc_uctx_bits uctx;
+};
+
+struct mlx5_ifc_create_uctx_out_bits {
+ u8 status[0x8];
+ u8 reserved_at_8[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_at_40[0x10];
+ u8 uid[0x10];
+
+ u8 reserved_at_60[0x20];
+};
+
+struct mlx5_ifc_destroy_uctx_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_at_10[0x10];
+
+ u8 reserved_at_20[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_at_40[0x10];
+ u8 uid[0x10];
+
+ u8 reserved_at_60[0x20];
+};
+
+struct mlx5_ifc_destroy_uctx_out_bits {
+ u8 status[0x8];
+ u8 reserved_at_8[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_at_40[0x40];
+};
+
+struct mlx5_ifc_create_sw_icm_in_bits {
+ struct mlx5_ifc_general_obj_in_cmd_hdr_bits hdr;
+ struct mlx5_ifc_sw_icm_bits sw_icm;
+};
+
+struct mlx5_ifc_create_geneve_tlv_option_in_bits {
+ struct mlx5_ifc_general_obj_in_cmd_hdr_bits hdr;
+ struct mlx5_ifc_geneve_tlv_option_bits geneve_tlv_opt;
+};
+
+struct mlx5_ifc_mtrc_string_db_param_bits {
+ u8 string_db_base_address[0x20];
+
+ u8 reserved_at_20[0x8];
+ u8 string_db_size[0x18];
+};
+
+struct mlx5_ifc_mtrc_cap_bits {
+ u8 trace_owner[0x1];
+ u8 trace_to_memory[0x1];
+ u8 reserved_at_2[0x4];
+ u8 trc_ver[0x2];
+ u8 reserved_at_8[0x14];
+ u8 num_string_db[0x4];
+
+ u8 first_string_trace[0x8];
+ u8 num_string_trace[0x8];
+ u8 reserved_at_30[0x28];
+
+ u8 log_max_trace_buffer_size[0x8];
+
+ u8 reserved_at_60[0x20];
+
+ struct mlx5_ifc_mtrc_string_db_param_bits string_db_param[8];
+
+ u8 reserved_at_280[0x180];
+};
+
+struct mlx5_ifc_mtrc_conf_bits {
+ u8 reserved_at_0[0x1c];
+ u8 trace_mode[0x4];
+ u8 reserved_at_20[0x18];
+ u8 log_trace_buffer_size[0x8];
+ u8 trace_mkey[0x20];
+ u8 reserved_at_60[0x3a0];
+};
+
+struct mlx5_ifc_mtrc_stdb_bits {
+ u8 string_db_index[0x4];
+ u8 reserved_at_4[0x4];
+ u8 read_size[0x18];
+ u8 start_offset[0x20];
+ u8 string_db_data[];
+};
+
+struct mlx5_ifc_mtrc_ctrl_bits {
+ u8 trace_status[0x2];
+ u8 reserved_at_2[0x2];
+ u8 arm_event[0x1];
+ u8 reserved_at_5[0xb];
+ u8 modify_field_select[0x10];
+ u8 reserved_at_20[0x2b];
+ u8 current_timestamp52_32[0x15];
+ u8 current_timestamp31_0[0x20];
+ u8 reserved_at_80[0x180];
+};
+
+struct mlx5_ifc_host_params_context_bits {
+ u8 host_number[0x8];
+ u8 reserved_at_8[0x5];
+ u8 host_pf_not_exist[0x1];
+ u8 reserved_at_14[0x1];
+ u8 host_pf_disabled[0x1];
+ u8 host_num_of_vfs[0x10];
+
+ u8 host_total_vfs[0x10];
+ u8 host_pci_bus[0x10];
+
+ u8 reserved_at_40[0x10];
+ u8 host_pci_device[0x10];
+
+ u8 reserved_at_60[0x10];
+ u8 host_pci_function[0x10];
+
+ u8 reserved_at_80[0x180];
+};
+
+struct mlx5_ifc_query_esw_functions_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_at_10[0x10];
+
+ u8 reserved_at_20[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_at_40[0x40];
+};
+
+struct mlx5_ifc_query_esw_functions_out_bits {
+ u8 status[0x8];
+ u8 reserved_at_8[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_at_40[0x40];
+
+ struct mlx5_ifc_host_params_context_bits host_params_context;
+
+ u8 reserved_at_280[0x180];
+ u8 host_sf_enable[][0x40];
+};
+
+struct mlx5_ifc_sf_partition_bits {
+ u8 reserved_at_0[0x10];
+ u8 log_num_sf[0x8];
+ u8 log_sf_bar_size[0x8];
+};
+
+struct mlx5_ifc_query_sf_partitions_out_bits {
+ u8 status[0x8];
+ u8 reserved_at_8[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_at_40[0x18];
+ u8 num_sf_partitions[0x8];
+
+ u8 reserved_at_60[0x20];
+
+ struct mlx5_ifc_sf_partition_bits sf_partition[];
+};
+
+struct mlx5_ifc_query_sf_partitions_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_at_10[0x10];
+
+ u8 reserved_at_20[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_at_40[0x40];
+};
+
+struct mlx5_ifc_dealloc_sf_out_bits {
+ u8 status[0x8];
+ u8 reserved_at_8[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_at_40[0x40];
+};
+
+struct mlx5_ifc_dealloc_sf_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_at_10[0x10];
+
+ u8 reserved_at_20[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_at_40[0x10];
+ u8 function_id[0x10];
+
+ u8 reserved_at_60[0x20];
+};
+
+struct mlx5_ifc_alloc_sf_out_bits {
+ u8 status[0x8];
+ u8 reserved_at_8[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_at_40[0x40];
+};
+
+struct mlx5_ifc_alloc_sf_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_at_10[0x10];
+
+ u8 reserved_at_20[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_at_40[0x10];
+ u8 function_id[0x10];
+
+ u8 reserved_at_60[0x20];
+};
+
+struct mlx5_ifc_affiliated_event_header_bits {
+ u8 reserved_at_0[0x10];
+ u8 obj_type[0x10];
+
+ u8 obj_id[0x20];
+};
+
+enum {
+ MLX5_GENERAL_OBJECT_TYPES_ENCRYPTION_KEY = 0xc,
+ MLX5_GENERAL_OBJECT_TYPES_IPSEC = 0x13,
+ MLX5_GENERAL_OBJECT_TYPES_SAMPLER = 0x20,
+ MLX5_GENERAL_OBJECT_TYPES_FLOW_METER_ASO = 0x24,
+ MLX5_GENERAL_OBJECT_TYPES_MACSEC = 0x27,
+ MLX5_GENERAL_OBJECT_TYPES_INT_KEK = 0x47,
+ MLX5_GENERAL_OBJECT_TYPES_RDMA_CTRL = 0x53,
+ MLX5_GENERAL_OBJECT_TYPES_PCIE_CONG_EVENT = 0x58,
+ MLX5_GENERAL_OBJECT_TYPES_FLOW_TABLE_ALIAS = 0xff15,
+};
+
+enum {
+ MLX5_HCA_CAP_GENERAL_OBJECT_TYPES_ENCRYPTION_KEY =
+ BIT_ULL(MLX5_GENERAL_OBJECT_TYPES_ENCRYPTION_KEY),
+ MLX5_HCA_CAP_GENERAL_OBJECT_TYPES_IPSEC =
+ BIT_ULL(MLX5_GENERAL_OBJECT_TYPES_IPSEC),
+ MLX5_HCA_CAP_GENERAL_OBJECT_TYPES_SAMPLER =
+ BIT_ULL(MLX5_GENERAL_OBJECT_TYPES_SAMPLER),
+ MLX5_HCA_CAP_GENERAL_OBJECT_TYPES_FLOW_METER_ASO =
+ BIT_ULL(MLX5_GENERAL_OBJECT_TYPES_FLOW_METER_ASO),
+};
+
+enum {
+ MLX5_HCA_CAP_2_GENERAL_OBJECT_TYPES_RDMA_CTRL =
+ BIT_ULL(MLX5_GENERAL_OBJECT_TYPES_RDMA_CTRL - 0x40),
+ MLX5_HCA_CAP_2_GENERAL_OBJECT_TYPES_PCIE_CONG_EVENT =
+ BIT_ULL(MLX5_GENERAL_OBJECT_TYPES_PCIE_CONG_EVENT - 0x40),
+};
+
+enum {
+ MLX5_IPSEC_OBJECT_ICV_LEN_16B,
+};
+
+enum {
+ MLX5_IPSEC_ASO_REG_C_0_1 = 0x0,
+ MLX5_IPSEC_ASO_REG_C_2_3 = 0x1,
+ MLX5_IPSEC_ASO_REG_C_4_5 = 0x2,
+ MLX5_IPSEC_ASO_REG_C_6_7 = 0x3,
+};
+
+enum {
+ MLX5_IPSEC_ASO_MODE = 0x0,
+ MLX5_IPSEC_ASO_REPLAY_PROTECTION = 0x1,
+ MLX5_IPSEC_ASO_INC_SN = 0x2,
+};
+
+enum {
+ MLX5_IPSEC_ASO_REPLAY_WIN_32BIT = 0x0,
+ MLX5_IPSEC_ASO_REPLAY_WIN_64BIT = 0x1,
+ MLX5_IPSEC_ASO_REPLAY_WIN_128BIT = 0x2,
+ MLX5_IPSEC_ASO_REPLAY_WIN_256BIT = 0x3,
+};
+
+struct mlx5_ifc_ipsec_aso_bits {
+ u8 valid[0x1];
+ u8 reserved_at_201[0x1];
+ u8 mode[0x2];
+ u8 window_sz[0x2];
+ u8 soft_lft_arm[0x1];
+ u8 hard_lft_arm[0x1];
+ u8 remove_flow_enable[0x1];
+ u8 esn_event_arm[0x1];
+ u8 reserved_at_20a[0x16];
+
+ u8 remove_flow_pkt_cnt[0x20];
+
+ u8 remove_flow_soft_lft[0x20];
+
+ u8 reserved_at_260[0x80];
+
+ u8 mode_parameter[0x20];
+
+ u8 replay_protection_window[0x100];
+};
+
+struct mlx5_ifc_ipsec_obj_bits {
+ u8 modify_field_select[0x40];
+ u8 full_offload[0x1];
+ u8 reserved_at_41[0x1];
+ u8 esn_en[0x1];
+ u8 esn_overlap[0x1];
+ u8 reserved_at_44[0x2];
+ u8 icv_length[0x2];
+ u8 reserved_at_48[0x4];
+ u8 aso_return_reg[0x4];
+ u8 reserved_at_50[0x10];
+
+ u8 esn_msb[0x20];
+
+ u8 reserved_at_80[0x8];
+ u8 dekn[0x18];
+
+ u8 salt[0x20];
+
+ u8 implicit_iv[0x40];
+
+ u8 reserved_at_100[0x8];
+ u8 ipsec_aso_access_pd[0x18];
+ u8 reserved_at_120[0xe0];
+
+ struct mlx5_ifc_ipsec_aso_bits ipsec_aso;
+};
+
+struct mlx5_ifc_create_ipsec_obj_in_bits {
+ struct mlx5_ifc_general_obj_in_cmd_hdr_bits general_obj_in_cmd_hdr;
+ struct mlx5_ifc_ipsec_obj_bits ipsec_object;
+};
+
+enum {
+ MLX5_MODIFY_IPSEC_BITMASK_ESN_OVERLAP = BIT(0),
+ MLX5_MODIFY_IPSEC_BITMASK_ESN_MSB = BIT(1),
+};
+
+struct mlx5_ifc_query_ipsec_obj_out_bits {
+ struct mlx5_ifc_general_obj_out_cmd_hdr_bits general_obj_out_cmd_hdr;
+ struct mlx5_ifc_ipsec_obj_bits ipsec_object;
+};
+
+struct mlx5_ifc_modify_ipsec_obj_in_bits {
+ struct mlx5_ifc_general_obj_in_cmd_hdr_bits general_obj_in_cmd_hdr;
+ struct mlx5_ifc_ipsec_obj_bits ipsec_object;
+};
+
+enum {
+ MLX5_MACSEC_ASO_REPLAY_PROTECTION = 0x1,
+};
+
+enum {
+ MLX5_MACSEC_ASO_REPLAY_WIN_32BIT = 0x0,
+ MLX5_MACSEC_ASO_REPLAY_WIN_64BIT = 0x1,
+ MLX5_MACSEC_ASO_REPLAY_WIN_128BIT = 0x2,
+ MLX5_MACSEC_ASO_REPLAY_WIN_256BIT = 0x3,
+};
+
+#define MLX5_MACSEC_ASO_INC_SN 0x2
+#define MLX5_MACSEC_ASO_REG_C_4_5 0x2
+
+struct mlx5_ifc_macsec_aso_bits {
+ u8 valid[0x1];
+ u8 reserved_at_1[0x1];
+ u8 mode[0x2];
+ u8 window_size[0x2];
+ u8 soft_lifetime_arm[0x1];
+ u8 hard_lifetime_arm[0x1];
+ u8 remove_flow_enable[0x1];
+ u8 epn_event_arm[0x1];
+ u8 reserved_at_a[0x16];
+
+ u8 remove_flow_packet_count[0x20];
+
+ u8 remove_flow_soft_lifetime[0x20];
+
+ u8 reserved_at_60[0x80];
+
+ u8 mode_parameter[0x20];
+
+ u8 replay_protection_window[8][0x20];
+};
+
+struct mlx5_ifc_macsec_offload_obj_bits {
+ u8 modify_field_select[0x40];
+
+ u8 confidentiality_en[0x1];
+ u8 reserved_at_41[0x1];
+ u8 epn_en[0x1];
+ u8 epn_overlap[0x1];
+ u8 reserved_at_44[0x2];
+ u8 confidentiality_offset[0x2];
+ u8 reserved_at_48[0x4];
+ u8 aso_return_reg[0x4];
+ u8 reserved_at_50[0x10];
+
+ u8 epn_msb[0x20];
+
+ u8 reserved_at_80[0x8];
+ u8 dekn[0x18];
+
+ u8 reserved_at_a0[0x20];
+
+ u8 sci[0x40];
+
+ u8 reserved_at_100[0x8];
+ u8 macsec_aso_access_pd[0x18];
+
+ u8 reserved_at_120[0x60];
+
+ u8 salt[3][0x20];
+
+ u8 reserved_at_1e0[0x20];
+
+ struct mlx5_ifc_macsec_aso_bits macsec_aso;
+};
+
+struct mlx5_ifc_create_macsec_obj_in_bits {
+ struct mlx5_ifc_general_obj_in_cmd_hdr_bits general_obj_in_cmd_hdr;
+ struct mlx5_ifc_macsec_offload_obj_bits macsec_object;
+};
+
+struct mlx5_ifc_modify_macsec_obj_in_bits {
+ struct mlx5_ifc_general_obj_in_cmd_hdr_bits general_obj_in_cmd_hdr;
+ struct mlx5_ifc_macsec_offload_obj_bits macsec_object;
+};
+
+enum {
+ MLX5_MODIFY_MACSEC_BITMASK_EPN_OVERLAP = BIT(0),
+ MLX5_MODIFY_MACSEC_BITMASK_EPN_MSB = BIT(1),
+};
+
+struct mlx5_ifc_query_macsec_obj_out_bits {
+ struct mlx5_ifc_general_obj_out_cmd_hdr_bits general_obj_out_cmd_hdr;
+ struct mlx5_ifc_macsec_offload_obj_bits macsec_object;
+};
+
+struct mlx5_ifc_wrapped_dek_bits {
+ u8 gcm_iv[0x60];
+
+ u8 reserved_at_60[0x20];
+
+ u8 const0[0x1];
+ u8 key_size[0x1];
+ u8 reserved_at_82[0x2];
+ u8 key2_invalid[0x1];
+ u8 reserved_at_85[0x3];
+ u8 pd[0x18];
+
+ u8 key_purpose[0x5];
+ u8 reserved_at_a5[0x13];
+ u8 kek_id[0x8];
+
+ u8 reserved_at_c0[0x40];
+
+ u8 key1[0x8][0x20];
+
+ u8 key2[0x8][0x20];
+
+ u8 reserved_at_300[0x40];
+
+ u8 const1[0x1];
+ u8 reserved_at_341[0x1f];
+
+ u8 reserved_at_360[0x20];
+
+ u8 auth_tag[0x80];
+};
+
+struct mlx5_ifc_encryption_key_obj_bits {
+ u8 modify_field_select[0x40];
+
+ u8 state[0x8];
+ u8 sw_wrapped[0x1];
+ u8 reserved_at_49[0xb];
+ u8 key_size[0x4];
+ u8 reserved_at_58[0x4];
+ u8 key_purpose[0x4];
+
+ u8 reserved_at_60[0x8];
+ u8 pd[0x18];
+
+ u8 reserved_at_80[0x100];
+
+ u8 opaque[0x40];
+
+ u8 reserved_at_1c0[0x40];
+
+ u8 key[8][0x80];
+
+ u8 sw_wrapped_dek[8][0x80];
+
+ u8 reserved_at_a00[0x600];
+};
+
+struct mlx5_ifc_create_encryption_key_in_bits {
+ struct mlx5_ifc_general_obj_in_cmd_hdr_bits general_obj_in_cmd_hdr;
+ struct mlx5_ifc_encryption_key_obj_bits encryption_key_object;
+};
+
+struct mlx5_ifc_modify_encryption_key_in_bits {
+ struct mlx5_ifc_general_obj_in_cmd_hdr_bits general_obj_in_cmd_hdr;
+ struct mlx5_ifc_encryption_key_obj_bits encryption_key_object;
+};
+
+enum {
+ MLX5_FLOW_METER_MODE_BYTES_IP_LENGTH = 0x0,
+ MLX5_FLOW_METER_MODE_BYTES_CALC_WITH_L2 = 0x1,
+ MLX5_FLOW_METER_MODE_BYTES_CALC_WITH_L2_IPG = 0x2,
+ MLX5_FLOW_METER_MODE_NUM_PACKETS = 0x3,
+};
+
+struct mlx5_ifc_flow_meter_parameters_bits {
+ u8 valid[0x1];
+ u8 bucket_overflow[0x1];
+ u8 start_color[0x2];
+ u8 both_buckets_on_green[0x1];
+ u8 reserved_at_5[0x1];
+ u8 meter_mode[0x2];
+ u8 reserved_at_8[0x18];
+
+ u8 reserved_at_20[0x20];
+
+ u8 reserved_at_40[0x3];
+ u8 cbs_exponent[0x5];
+ u8 cbs_mantissa[0x8];
+ u8 reserved_at_50[0x3];
+ u8 cir_exponent[0x5];
+ u8 cir_mantissa[0x8];
+
+ u8 reserved_at_60[0x20];
+
+ u8 reserved_at_80[0x3];
+ u8 ebs_exponent[0x5];
+ u8 ebs_mantissa[0x8];
+ u8 reserved_at_90[0x3];
+ u8 eir_exponent[0x5];
+ u8 eir_mantissa[0x8];
+
+ u8 reserved_at_a0[0x60];
+};
+
+struct mlx5_ifc_flow_meter_aso_obj_bits {
+ u8 modify_field_select[0x40];
+
+ u8 reserved_at_40[0x40];
+
+ u8 reserved_at_80[0x8];
+ u8 meter_aso_access_pd[0x18];
+
+ u8 reserved_at_a0[0x160];
+
+ struct mlx5_ifc_flow_meter_parameters_bits flow_meter_parameters[2];
+};
+
+struct mlx5_ifc_create_flow_meter_aso_obj_in_bits {
+ struct mlx5_ifc_general_obj_in_cmd_hdr_bits hdr;
+ struct mlx5_ifc_flow_meter_aso_obj_bits flow_meter_aso_obj;
+};
+
+struct mlx5_ifc_int_kek_obj_bits {
+ u8 modify_field_select[0x40];
+
+ u8 state[0x8];
+ u8 auto_gen[0x1];
+ u8 reserved_at_49[0xb];
+ u8 key_size[0x4];
+ u8 reserved_at_58[0x8];
+
+ u8 reserved_at_60[0x8];
+ u8 pd[0x18];
+
+ u8 reserved_at_80[0x180];
+ u8 key[8][0x80];
+
+ u8 reserved_at_600[0x200];
+};
+
+struct mlx5_ifc_create_int_kek_obj_in_bits {
+ struct mlx5_ifc_general_obj_in_cmd_hdr_bits general_obj_in_cmd_hdr;
+ struct mlx5_ifc_int_kek_obj_bits int_kek_object;
+};
+
+struct mlx5_ifc_create_int_kek_obj_out_bits {
+ struct mlx5_ifc_general_obj_out_cmd_hdr_bits general_obj_out_cmd_hdr;
+ struct mlx5_ifc_int_kek_obj_bits int_kek_object;
+};
+
+struct mlx5_ifc_sampler_obj_bits {
+ u8 modify_field_select[0x40];
+
+ u8 table_type[0x8];
+ u8 level[0x8];
+ u8 reserved_at_50[0xf];
+ u8 ignore_flow_level[0x1];
+
+ u8 sample_ratio[0x20];
+
+ u8 reserved_at_80[0x8];
+ u8 sample_table_id[0x18];
+
+ u8 reserved_at_a0[0x8];
+ u8 default_table_id[0x18];
+
+ u8 sw_steering_icm_address_rx[0x40];
+ u8 sw_steering_icm_address_tx[0x40];
+
+ u8 reserved_at_140[0xa0];
+};
+
+struct mlx5_ifc_create_sampler_obj_in_bits {
+ struct mlx5_ifc_general_obj_in_cmd_hdr_bits general_obj_in_cmd_hdr;
+ struct mlx5_ifc_sampler_obj_bits sampler_object;
+};
+
+struct mlx5_ifc_query_sampler_obj_out_bits {
+ struct mlx5_ifc_general_obj_out_cmd_hdr_bits general_obj_out_cmd_hdr;
+ struct mlx5_ifc_sampler_obj_bits sampler_object;
+};
+
+enum {
+ MLX5_GENERAL_OBJECT_TYPE_ENCRYPTION_KEY_KEY_SIZE_128 = 0x0,
+ MLX5_GENERAL_OBJECT_TYPE_ENCRYPTION_KEY_KEY_SIZE_256 = 0x1,
+};
+
+enum {
+ MLX5_GENERAL_OBJECT_TYPE_ENCRYPTION_KEY_PURPOSE_TLS = 0x1,
+ MLX5_GENERAL_OBJECT_TYPE_ENCRYPTION_KEY_PURPOSE_IPSEC = 0x2,
+ MLX5_GENERAL_OBJECT_TYPE_ENCRYPTION_KEY_PURPOSE_MACSEC = 0x4,
+ MLX5_GENERAL_OBJECT_TYPE_ENCRYPTION_KEY_PURPOSE_PSP = 0x6,
+};
+
+struct mlx5_ifc_tls_static_params_bits {
+ u8 const_2[0x2];
+ u8 tls_version[0x4];
+ u8 const_1[0x2];
+ u8 reserved_at_8[0x14];
+ u8 encryption_standard[0x4];
+
+ u8 reserved_at_20[0x20];
+
+ u8 initial_record_number[0x40];
+
+ u8 resync_tcp_sn[0x20];
+
+ u8 gcm_iv[0x20];
+
+ u8 implicit_iv[0x40];
+
+ u8 reserved_at_100[0x8];
+ u8 dek_index[0x18];
+
+ u8 reserved_at_120[0xe0];
+};
+
+struct mlx5_ifc_tls_progress_params_bits {
+ u8 next_record_tcp_sn[0x20];
+
+ u8 hw_resync_tcp_sn[0x20];
+
+ u8 record_tracker_state[0x2];
+ u8 auth_state[0x2];
+ u8 reserved_at_44[0x4];
+ u8 hw_offset_record_number[0x18];
+};
+
+enum {
+ MLX5_MTT_PERM_READ = 1 << 0,
+ MLX5_MTT_PERM_WRITE = 1 << 1,
+ MLX5_MTT_PERM_RW = MLX5_MTT_PERM_READ | MLX5_MTT_PERM_WRITE,
+};
+
+enum {
+ MLX5_SUSPEND_VHCA_IN_OP_MOD_SUSPEND_INITIATOR = 0x0,
+ MLX5_SUSPEND_VHCA_IN_OP_MOD_SUSPEND_RESPONDER = 0x1,
+};
+
+struct mlx5_ifc_suspend_vhca_in_bits {
+ u8 opcode[0x10];
+ u8 uid[0x10];
+
+ u8 reserved_at_20[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_at_40[0x10];
+ u8 vhca_id[0x10];
+
+ u8 reserved_at_60[0x20];
+};
+
+struct mlx5_ifc_suspend_vhca_out_bits {
+ u8 status[0x8];
+ u8 reserved_at_8[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_at_40[0x40];
+};
+
+enum {
+ MLX5_RESUME_VHCA_IN_OP_MOD_RESUME_RESPONDER = 0x0,
+ MLX5_RESUME_VHCA_IN_OP_MOD_RESUME_INITIATOR = 0x1,
+};
+
+struct mlx5_ifc_resume_vhca_in_bits {
+ u8 opcode[0x10];
+ u8 uid[0x10];
+
+ u8 reserved_at_20[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_at_40[0x10];
+ u8 vhca_id[0x10];
+
+ u8 reserved_at_60[0x20];
+};
+
+struct mlx5_ifc_resume_vhca_out_bits {
+ u8 status[0x8];
+ u8 reserved_at_8[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_at_40[0x40];
+};
+
+struct mlx5_ifc_query_vhca_migration_state_in_bits {
+ u8 opcode[0x10];
+ u8 uid[0x10];
+
+ u8 reserved_at_20[0x10];
+ u8 op_mod[0x10];
+
+ u8 incremental[0x1];
+ u8 chunk[0x1];
+ u8 reserved_at_42[0xe];
+ u8 vhca_id[0x10];
+
+ u8 reserved_at_60[0x20];
+};
+
+struct mlx5_ifc_query_vhca_migration_state_out_bits {
+ u8 status[0x8];
+ u8 reserved_at_8[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_at_40[0x40];
+
+ u8 required_umem_size[0x20];
+
+ u8 reserved_at_a0[0x20];
+
+ u8 remaining_total_size[0x40];
+
+ u8 reserved_at_100[0x100];
+};
+
+struct mlx5_ifc_save_vhca_state_in_bits {
+ u8 opcode[0x10];
+ u8 uid[0x10];
+
+ u8 reserved_at_20[0x10];
+ u8 op_mod[0x10];
+
+ u8 incremental[0x1];
+ u8 set_track[0x1];
+ u8 reserved_at_42[0xe];
+ u8 vhca_id[0x10];
+
+ u8 reserved_at_60[0x20];
+
+ u8 va[0x40];
+
+ u8 mkey[0x20];
+
+ u8 size[0x20];
+};
+
+struct mlx5_ifc_save_vhca_state_out_bits {
+ u8 status[0x8];
+ u8 reserved_at_8[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 actual_image_size[0x20];
+
+ u8 next_required_umem_size[0x20];
+};
+
+struct mlx5_ifc_load_vhca_state_in_bits {
+ u8 opcode[0x10];
+ u8 uid[0x10];
+
+ u8 reserved_at_20[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_at_40[0x10];
+ u8 vhca_id[0x10];
+
+ u8 reserved_at_60[0x20];
+
+ u8 va[0x40];
+
+ u8 mkey[0x20];
+
+ u8 size[0x20];
+};
+
+struct mlx5_ifc_load_vhca_state_out_bits {
+ u8 status[0x8];
+ u8 reserved_at_8[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_at_40[0x40];
+};
+
+struct mlx5_ifc_adv_rdma_cap_bits {
+ u8 rdma_transport_manager[0x1];
+ u8 rdma_transport_manager_other_eswitch[0x1];
+ u8 reserved_at_2[0x1e];
+
+ u8 rcx_type[0x8];
+ u8 reserved_at_28[0x2];
+ u8 ps_entry_log_max_value[0x6];
+ u8 reserved_at_30[0x6];
+ u8 qp_max_ps_num_entry[0xa];
+
+ u8 mp_max_num_queues[0x8];
+ u8 ps_user_context_max_log_size[0x8];
+ u8 message_based_qp_and_striding_wq[0x8];
+ u8 reserved_at_58[0x8];
+
+ u8 max_receive_send_message_size_stride[0x10];
+ u8 reserved_at_70[0x10];
+
+ u8 max_receive_send_message_size_byte[0x20];
+
+ u8 reserved_at_a0[0x160];
+
+ struct mlx5_ifc_flow_table_prop_layout_bits rdma_transport_rx_flow_table_properties;
+
+ struct mlx5_ifc_flow_table_prop_layout_bits rdma_transport_tx_flow_table_properties;
+
+ struct mlx5_ifc_flow_table_fields_supported_2_bits rdma_transport_rx_ft_field_support_2;
+
+ struct mlx5_ifc_flow_table_fields_supported_2_bits rdma_transport_tx_ft_field_support_2;
+
+ struct mlx5_ifc_flow_table_fields_supported_2_bits rdma_transport_rx_ft_field_bitmask_support_2;
+
+ struct mlx5_ifc_flow_table_fields_supported_2_bits rdma_transport_tx_ft_field_bitmask_support_2;
+
+ u8 reserved_at_800[0x3800];
+};
+
+struct mlx5_ifc_adv_virtualization_cap_bits {
+ u8 reserved_at_0[0x3];
+ u8 pg_track_log_max_num[0x5];
+ u8 pg_track_max_num_range[0x8];
+ u8 pg_track_log_min_addr_space[0x8];
+ u8 pg_track_log_max_addr_space[0x8];
+
+ u8 reserved_at_20[0x3];
+ u8 pg_track_log_min_msg_size[0x5];
+ u8 reserved_at_28[0x3];
+ u8 pg_track_log_max_msg_size[0x5];
+ u8 reserved_at_30[0x3];
+ u8 pg_track_log_min_page_size[0x5];
+ u8 reserved_at_38[0x3];
+ u8 pg_track_log_max_page_size[0x5];
+
+ u8 reserved_at_40[0x7c0];
+};
+
+struct mlx5_ifc_page_track_report_entry_bits {
+ u8 dirty_address_high[0x20];
+
+ u8 dirty_address_low[0x20];
+};
+
+enum {
+ MLX5_PAGE_TRACK_STATE_TRACKING,
+ MLX5_PAGE_TRACK_STATE_REPORTING,
+ MLX5_PAGE_TRACK_STATE_ERROR,
+};
+
+struct mlx5_ifc_page_track_range_bits {
+ u8 start_address[0x40];
+
+ u8 length[0x40];
+};
+
+struct mlx5_ifc_page_track_bits {
+ u8 modify_field_select[0x40];
+
+ u8 reserved_at_40[0x10];
+ u8 vhca_id[0x10];
+
+ u8 reserved_at_60[0x20];
+
+ u8 state[0x4];
+ u8 track_type[0x4];
+ u8 log_addr_space_size[0x8];
+ u8 reserved_at_90[0x3];
+ u8 log_page_size[0x5];
+ u8 reserved_at_98[0x3];
+ u8 log_msg_size[0x5];
+
+ u8 reserved_at_a0[0x8];
+ u8 reporting_qpn[0x18];
+
+ u8 reserved_at_c0[0x18];
+ u8 num_ranges[0x8];
+
+ u8 reserved_at_e0[0x20];
+
+ u8 range_start_address[0x40];
+
+ u8 length[0x40];
+
+ struct mlx5_ifc_page_track_range_bits track_range[0];
+};
+
+struct mlx5_ifc_create_page_track_obj_in_bits {
+ struct mlx5_ifc_general_obj_in_cmd_hdr_bits general_obj_in_cmd_hdr;
+ struct mlx5_ifc_page_track_bits obj_context;
+};
+
+struct mlx5_ifc_modify_page_track_obj_in_bits {
+ struct mlx5_ifc_general_obj_in_cmd_hdr_bits general_obj_in_cmd_hdr;
+ struct mlx5_ifc_page_track_bits obj_context;
+};
+
+struct mlx5_ifc_query_page_track_obj_out_bits {
+ struct mlx5_ifc_general_obj_out_cmd_hdr_bits general_obj_out_cmd_hdr;
+ struct mlx5_ifc_page_track_bits obj_context;
+};
+
+struct mlx5_ifc_msecq_reg_bits {
+ u8 reserved_at_0[0x20];
+
+ u8 reserved_at_20[0x12];
+ u8 network_option[0x2];
+ u8 local_ssm_code[0x4];
+ u8 local_enhanced_ssm_code[0x8];
+
+ u8 local_clock_identity[0x40];
+
+ u8 reserved_at_80[0x180];
+};
+
+enum {
+ MLX5_MSEES_FIELD_SELECT_ENABLE = BIT(0),
+ MLX5_MSEES_FIELD_SELECT_ADMIN_STATUS = BIT(1),
+ MLX5_MSEES_FIELD_SELECT_ADMIN_FREQ_MEASURE = BIT(2),
+};
+
+enum mlx5_msees_admin_status {
+ MLX5_MSEES_ADMIN_STATUS_FREE_RUNNING = 0x0,
+ MLX5_MSEES_ADMIN_STATUS_TRACK = 0x1,
+};
+
+enum mlx5_msees_oper_status {
+ MLX5_MSEES_OPER_STATUS_FREE_RUNNING = 0x0,
+ MLX5_MSEES_OPER_STATUS_SELF_TRACK = 0x1,
+ MLX5_MSEES_OPER_STATUS_OTHER_TRACK = 0x2,
+ MLX5_MSEES_OPER_STATUS_HOLDOVER = 0x3,
+ MLX5_MSEES_OPER_STATUS_FAIL_HOLDOVER = 0x4,
+ MLX5_MSEES_OPER_STATUS_FAIL_FREE_RUNNING = 0x5,
+};
+
+enum mlx5_msees_failure_reason {
+ MLX5_MSEES_FAILURE_REASON_UNDEFINED_ERROR = 0x0,
+ MLX5_MSEES_FAILURE_REASON_PORT_DOWN = 0x1,
+ MLX5_MSEES_FAILURE_REASON_TOO_HIGH_FREQUENCY_DIFF = 0x2,
+ MLX5_MSEES_FAILURE_REASON_NET_SYNCHRONIZER_DEVICE_ERROR = 0x3,
+ MLX5_MSEES_FAILURE_REASON_LACK_OF_RESOURCES = 0x4,
+};
+
+struct mlx5_ifc_msees_reg_bits {
+ u8 reserved_at_0[0x8];
+ u8 local_port[0x8];
+ u8 pnat[0x2];
+ u8 lp_msb[0x2];
+ u8 reserved_at_14[0xc];
+
+ u8 field_select[0x20];
+
+ u8 admin_status[0x4];
+ u8 oper_status[0x4];
+ u8 ho_acq[0x1];
+ u8 reserved_at_49[0xc];
+ u8 admin_freq_measure[0x1];
+ u8 oper_freq_measure[0x1];
+ u8 failure_reason[0x9];
+
+ u8 frequency_diff[0x20];
+
+ u8 reserved_at_80[0x180];
+};
+
+struct mlx5_ifc_mrtcq_reg_bits {
+ u8 reserved_at_0[0x40];
+
+ u8 rt_clock_identity[0x40];
+
+ u8 reserved_at_80[0x180];
+};
+
+struct mlx5_ifc_pcie_cong_event_obj_bits {
+ u8 modify_select_field[0x40];
+
+ u8 inbound_event_en[0x1];
+ u8 outbound_event_en[0x1];
+ u8 reserved_at_42[0x1e];
+
+ u8 reserved_at_60[0x1];
+ u8 inbound_cong_state[0x3];
+ u8 reserved_at_64[0x1];
+ u8 outbound_cong_state[0x3];
+ u8 reserved_at_68[0x18];
+
+ u8 inbound_cong_low_threshold[0x10];
+ u8 inbound_cong_high_threshold[0x10];
+
+ u8 outbound_cong_low_threshold[0x10];
+ u8 outbound_cong_high_threshold[0x10];
+
+ u8 reserved_at_e0[0x340];
+};
+
+struct mlx5_ifc_pcie_cong_event_cmd_in_bits {
+ struct mlx5_ifc_general_obj_in_cmd_hdr_bits hdr;
+ struct mlx5_ifc_pcie_cong_event_obj_bits cong_obj;
+};
+
+struct mlx5_ifc_pcie_cong_event_cmd_out_bits {
+ struct mlx5_ifc_general_obj_out_cmd_hdr_bits hdr;
+ struct mlx5_ifc_pcie_cong_event_obj_bits cong_obj;
+};
+
+enum mlx5e_pcie_cong_event_mod_field {
+ MLX5_PCIE_CONG_EVENT_MOD_EVENT_EN = BIT(0),
+ MLX5_PCIE_CONG_EVENT_MOD_THRESH = BIT(2),
+};
+
+struct mlx5_ifc_psp_rotate_key_in_bits {
+ u8 opcode[0x10];
+ u8 uid[0x10];
+
+ u8 reserved_at_20[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_at_40[0x40];
+};
+
+struct mlx5_ifc_psp_rotate_key_out_bits {
+ u8 status[0x8];
+ u8 reserved_at_8[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_at_40[0x40];
+};
+
+enum mlx5_psp_gen_spi_in_key_size {
+ MLX5_PSP_GEN_SPI_IN_KEY_SIZE_128 = 0x0,
+ MLX5_PSP_GEN_SPI_IN_KEY_SIZE_256 = 0x1,
+};
+
+struct mlx5_ifc_key_spi_bits {
+ u8 spi[0x20];
+
+ u8 reserved_at_20[0x60];
+
+ u8 key[8][0x20];
+};
+
+struct mlx5_ifc_psp_gen_spi_in_bits {
+ u8 opcode[0x10];
+ u8 uid[0x10];
+
+ u8 reserved_at_20[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_at_40[0x20];
+
+ u8 key_size[0x2];
+ u8 reserved_at_62[0xe];
+ u8 num_of_spi[0x10];
+};
+
+struct mlx5_ifc_psp_gen_spi_out_bits {
+ u8 status[0x8];
+ u8 reserved_at_8[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_at_40[0x10];
+ u8 num_of_spi[0x10];
+
+ u8 reserved_at_60[0x20];
+
+ struct mlx5_ifc_key_spi_bits key_spi[];
+};
+
#endif /* MLX5_IFC_H */
diff --git a/include/linux/mlx5/mlx5_ifc_fpga.h b/include/linux/mlx5/mlx5_ifc_fpga.h
index 255a88d08078..0596472923ad 100644
--- a/include/linux/mlx5/mlx5_ifc_fpga.h
+++ b/include/linux/mlx5/mlx5_ifc_fpga.h
@@ -32,14 +32,6 @@
#ifndef MLX5_IFC_FPGA_H
#define MLX5_IFC_FPGA_H
-enum {
- MLX5_FPGA_CAP_SANDBOX_VENDOR_ID_MLNX = 0x2c9,
-};
-
-enum {
- MLX5_FPGA_CAP_SANDBOX_PRODUCT_ID_IPSEC = 0x2,
-};
-
struct mlx5_ifc_fpga_shell_caps_bits {
u8 max_num_qps[0x10];
u8 reserved_at_10[0x8];
@@ -370,63 +362,20 @@ struct mlx5_ifc_fpga_destroy_qp_out_bits {
u8 reserved_at_40[0x40];
};
-struct mlx5_ifc_ipsec_extended_cap_bits {
- u8 encapsulation[0x20];
-
- u8 reserved_0[0x15];
- u8 ipv4_fragment[0x1];
- u8 ipv6[0x1];
- u8 esn[0x1];
- u8 lso[0x1];
- u8 transport_and_tunnel_mode[0x1];
- u8 tunnel_mode[0x1];
- u8 transport_mode[0x1];
- u8 ah_esp[0x1];
- u8 esp[0x1];
- u8 ah[0x1];
- u8 ipv4_options[0x1];
-
- u8 auth_alg[0x20];
-
- u8 enc_alg[0x20];
-
- u8 sa_cap[0x20];
-
- u8 reserved_1[0x10];
- u8 number_of_ipsec_counters[0x10];
-
- u8 ipsec_counters_addr_low[0x20];
- u8 ipsec_counters_addr_high[0x20];
+enum {
+ MLX5_FPGA_QP_ERROR_EVENT_SYNDROME_RETRY_COUNTER_EXPIRED = 0x1,
+ MLX5_FPGA_QP_ERROR_EVENT_SYNDROME_RNR_EXPIRED = 0x2,
};
-struct mlx5_ifc_ipsec_counters_bits {
- u8 dec_in_packets[0x40];
-
- u8 dec_out_packets[0x40];
-
- u8 dec_bypass_packets[0x40];
-
- u8 enc_in_packets[0x40];
-
- u8 enc_out_packets[0x40];
-
- u8 enc_bypass_packets[0x40];
-
- u8 drop_dec_packets[0x40];
-
- u8 failed_auth_dec_packets[0x40];
-
- u8 drop_enc_packets[0x40];
-
- u8 success_add_sa[0x40];
-
- u8 fail_add_sa[0x40];
+struct mlx5_ifc_fpga_qp_error_event_bits {
+ u8 reserved_at_0[0x40];
- u8 success_delete_sa[0x40];
+ u8 reserved_at_40[0x18];
+ u8 syndrome[0x8];
- u8 fail_delete_sa[0x40];
+ u8 reserved_at_60[0x60];
- u8 dropped_cmd[0x40];
+ u8 reserved_at_c0[0x8];
+ u8 fpga_qpn[0x18];
};
-
#endif /* MLX5_IFC_FPGA_H */
diff --git a/include/linux/mlx5/mlx5_ifc_vdpa.h b/include/linux/mlx5/mlx5_ifc_vdpa.h
new file mode 100644
index 000000000000..58dfa2ee7c83
--- /dev/null
+++ b/include/linux/mlx5/mlx5_ifc_vdpa.h
@@ -0,0 +1,226 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2020 Mellanox Technologies Ltd. */
+
+#ifndef __MLX5_IFC_VDPA_H_
+#define __MLX5_IFC_VDPA_H_
+
+enum {
+ MLX5_VIRTIO_Q_EVENT_MODE_NO_MSIX_MODE = 0x0,
+ MLX5_VIRTIO_Q_EVENT_MODE_QP_MODE = 0x1,
+ MLX5_VIRTIO_Q_EVENT_MODE_MSIX_MODE = 0x2,
+};
+
+enum {
+ MLX5_VIRTIO_EMULATION_VIRTIO_QUEUE_TYPE_SPLIT = 0,
+ MLX5_VIRTIO_EMULATION_VIRTIO_QUEUE_TYPE_PACKED = 1,
+};
+
+enum {
+ MLX5_VIRTIO_EMULATION_CAP_VIRTIO_QUEUE_TYPE_SPLIT =
+ BIT(MLX5_VIRTIO_EMULATION_VIRTIO_QUEUE_TYPE_SPLIT),
+ MLX5_VIRTIO_EMULATION_CAP_VIRTIO_QUEUE_TYPE_PACKED =
+ BIT(MLX5_VIRTIO_EMULATION_VIRTIO_QUEUE_TYPE_PACKED),
+};
+
+struct mlx5_ifc_virtio_q_bits {
+ u8 virtio_q_type[0x8];
+ u8 reserved_at_8[0x5];
+ u8 event_mode[0x3];
+ u8 queue_index[0x10];
+
+ u8 full_emulation[0x1];
+ u8 virtio_version_1_0[0x1];
+ u8 reserved_at_22[0x2];
+ u8 offload_type[0x4];
+ u8 event_qpn_or_msix[0x18];
+
+ u8 doorbell_stride_index[0x10];
+ u8 queue_size[0x10];
+
+ u8 device_emulation_id[0x20];
+
+ u8 desc_addr[0x40];
+
+ u8 used_addr[0x40];
+
+ u8 available_addr[0x40];
+
+ u8 virtio_q_mkey[0x20];
+
+ u8 max_tunnel_desc[0x10];
+ u8 reserved_at_170[0x8];
+ u8 error_type[0x8];
+
+ u8 umem_1_id[0x20];
+
+ u8 umem_1_size[0x20];
+
+ u8 umem_1_offset[0x40];
+
+ u8 umem_2_id[0x20];
+
+ u8 umem_2_size[0x20];
+
+ u8 umem_2_offset[0x40];
+
+ u8 umem_3_id[0x20];
+
+ u8 umem_3_size[0x20];
+
+ u8 umem_3_offset[0x40];
+
+ u8 counter_set_id[0x20];
+
+ u8 reserved_at_320[0x8];
+ u8 pd[0x18];
+
+ u8 reserved_at_340[0x20];
+
+ u8 desc_group_mkey[0x20];
+
+ u8 reserved_at_380[0x80];
+};
+
+struct mlx5_ifc_virtio_net_q_object_bits {
+ u8 modify_field_select[0x40];
+
+ u8 reserved_at_40[0x20];
+
+ u8 vhca_id[0x10];
+ u8 reserved_at_70[0x10];
+
+ u8 queue_feature_bit_mask_12_3[0xa];
+ u8 dirty_bitmap_dump_enable[0x1];
+ u8 vhost_log_page[0x5];
+ u8 reserved_at_90[0xc];
+ u8 state[0x4];
+
+ u8 reserved_at_a0[0x5];
+ u8 queue_feature_bit_mask_2_0[0x3];
+ u8 tisn_or_qpn[0x18];
+
+ u8 dirty_bitmap_mkey[0x20];
+
+ u8 dirty_bitmap_size[0x20];
+
+ u8 dirty_bitmap_addr[0x40];
+
+ u8 hw_available_index[0x10];
+ u8 hw_used_index[0x10];
+
+ u8 reserved_at_160[0xa0];
+
+ struct mlx5_ifc_virtio_q_bits virtio_q_context;
+};
+
+struct mlx5_ifc_create_virtio_net_q_in_bits {
+ struct mlx5_ifc_general_obj_in_cmd_hdr_bits general_obj_in_cmd_hdr;
+
+ struct mlx5_ifc_virtio_net_q_object_bits obj_context;
+};
+
+struct mlx5_ifc_create_virtio_net_q_out_bits {
+ struct mlx5_ifc_general_obj_out_cmd_hdr_bits general_obj_out_cmd_hdr;
+};
+
+struct mlx5_ifc_destroy_virtio_net_q_in_bits {
+ struct mlx5_ifc_general_obj_in_cmd_hdr_bits general_obj_out_cmd_hdr;
+};
+
+struct mlx5_ifc_destroy_virtio_net_q_out_bits {
+ struct mlx5_ifc_general_obj_out_cmd_hdr_bits general_obj_out_cmd_hdr;
+};
+
+struct mlx5_ifc_query_virtio_net_q_in_bits {
+ struct mlx5_ifc_general_obj_in_cmd_hdr_bits general_obj_in_cmd_hdr;
+};
+
+struct mlx5_ifc_query_virtio_net_q_out_bits {
+ struct mlx5_ifc_general_obj_out_cmd_hdr_bits general_obj_out_cmd_hdr;
+
+ struct mlx5_ifc_virtio_net_q_object_bits obj_context;
+};
+
+enum {
+ MLX5_VIRTQ_MODIFY_MASK_STATE = (u64)1 << 0,
+ MLX5_VIRTQ_MODIFY_MASK_DIRTY_BITMAP_PARAMS = (u64)1 << 3,
+ MLX5_VIRTQ_MODIFY_MASK_DIRTY_BITMAP_DUMP_ENABLE = (u64)1 << 4,
+ MLX5_VIRTQ_MODIFY_MASK_VIRTIO_Q_ADDRS = (u64)1 << 6,
+ MLX5_VIRTQ_MODIFY_MASK_VIRTIO_Q_AVAIL_IDX = (u64)1 << 7,
+ MLX5_VIRTQ_MODIFY_MASK_VIRTIO_Q_USED_IDX = (u64)1 << 8,
+ MLX5_VIRTQ_MODIFY_MASK_QUEUE_VIRTIO_VERSION = (u64)1 << 10,
+ MLX5_VIRTQ_MODIFY_MASK_VIRTIO_Q_MKEY = (u64)1 << 11,
+ MLX5_VIRTQ_MODIFY_MASK_QUEUE_FEATURES = (u64)1 << 12,
+ MLX5_VIRTQ_MODIFY_MASK_DESC_GROUP_MKEY = (u64)1 << 14,
+};
+
+enum {
+ MLX5_VIRTIO_NET_Q_OBJECT_STATE_INIT = 0x0,
+ MLX5_VIRTIO_NET_Q_OBJECT_STATE_RDY = 0x1,
+ MLX5_VIRTIO_NET_Q_OBJECT_STATE_SUSPEND = 0x2,
+ MLX5_VIRTIO_NET_Q_OBJECT_STATE_ERR = 0x3,
+};
+
+/* This indicates that the object was not created or has already
+ * been desroyed. It is very safe to assume that this object will never
+ * have so many states
+ */
+enum {
+ MLX5_VIRTIO_NET_Q_OBJECT_NONE = 0xffffffff
+};
+
+enum {
+ MLX5_RQTC_LIST_Q_TYPE_RQ = 0x0,
+ MLX5_RQTC_LIST_Q_TYPE_VIRTIO_NET_Q = 0x1,
+};
+
+struct mlx5_ifc_modify_virtio_net_q_in_bits {
+ struct mlx5_ifc_general_obj_in_cmd_hdr_bits general_obj_in_cmd_hdr;
+
+ struct mlx5_ifc_virtio_net_q_object_bits obj_context;
+};
+
+struct mlx5_ifc_modify_virtio_net_q_out_bits {
+ struct mlx5_ifc_general_obj_out_cmd_hdr_bits general_obj_out_cmd_hdr;
+};
+
+struct mlx5_ifc_virtio_q_counters_bits {
+ u8 modify_field_select[0x40];
+ u8 reserved_at_40[0x40];
+ u8 received_desc[0x40];
+ u8 completed_desc[0x40];
+ u8 error_cqes[0x20];
+ u8 bad_desc_errors[0x20];
+ u8 exceed_max_chain[0x20];
+ u8 invalid_buffer[0x20];
+ u8 reserved_at_180[0x280];
+};
+
+struct mlx5_ifc_create_virtio_q_counters_in_bits {
+ struct mlx5_ifc_general_obj_in_cmd_hdr_bits hdr;
+ struct mlx5_ifc_virtio_q_counters_bits virtio_q_counters;
+};
+
+struct mlx5_ifc_create_virtio_q_counters_out_bits {
+ struct mlx5_ifc_general_obj_in_cmd_hdr_bits hdr;
+ struct mlx5_ifc_virtio_q_counters_bits virtio_q_counters;
+};
+
+struct mlx5_ifc_destroy_virtio_q_counters_in_bits {
+ struct mlx5_ifc_general_obj_in_cmd_hdr_bits hdr;
+};
+
+struct mlx5_ifc_destroy_virtio_q_counters_out_bits {
+ struct mlx5_ifc_general_obj_out_cmd_hdr_bits hdr;
+};
+
+struct mlx5_ifc_query_virtio_q_counters_in_bits {
+ struct mlx5_ifc_general_obj_in_cmd_hdr_bits hdr;
+};
+
+struct mlx5_ifc_query_virtio_q_counters_out_bits {
+ struct mlx5_ifc_general_obj_in_cmd_hdr_bits hdr;
+ struct mlx5_ifc_virtio_q_counters_bits counters;
+};
+
+#endif /* __MLX5_IFC_VDPA_H_ */
diff --git a/include/linux/mlx5/mpfs.h b/include/linux/mlx5/mpfs.h
new file mode 100644
index 000000000000..bf700c8d5516
--- /dev/null
+++ b/include/linux/mlx5/mpfs.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+ * Copyright (c) 2021 Mellanox Technologies Ltd.
+ */
+
+#ifndef _MLX5_MPFS_
+#define _MLX5_MPFS_
+
+struct mlx5_core_dev;
+
+#ifdef CONFIG_MLX5_MPFS
+int mlx5_mpfs_add_mac(struct mlx5_core_dev *dev, u8 *mac);
+int mlx5_mpfs_del_mac(struct mlx5_core_dev *dev, u8 *mac);
+#else /* #ifndef CONFIG_MLX5_MPFS */
+static inline int mlx5_mpfs_add_mac(struct mlx5_core_dev *dev, u8 *mac) { return 0; }
+static inline int mlx5_mpfs_del_mac(struct mlx5_core_dev *dev, u8 *mac) { return 0; }
+#endif
+
+#endif
diff --git a/include/linux/mlx5/port.h b/include/linux/mlx5/port.h
index c57d4b7de3a8..1df9d9a57bbc 100644
--- a/include/linux/mlx5/port.h
+++ b/include/linux/mlx5/port.h
@@ -45,6 +45,7 @@ enum mlx5_module_id {
MLX5_MODULE_ID_QSFP = 0xC,
MLX5_MODULE_ID_QSFP_PLUS = 0xD,
MLX5_MODULE_ID_QSFP28 = 0x11,
+ MLX5_MODULE_ID_DSFP = 0x1B,
};
enum mlx5_an_status {
@@ -55,11 +56,10 @@ enum mlx5_an_status {
MLX5_AN_LINK_DOWN = 4,
};
-#define MLX5_EEPROM_MAX_BYTES 32
-#define MLX5_EEPROM_IDENTIFIER_BYTE_MASK 0x000000ff
#define MLX5_I2C_ADDR_LOW 0x50
#define MLX5_I2C_ADDR_HIGH 0x51
#define MLX5_EEPROM_PAGE_LENGTH 256
+#define MLX5_EEPROM_HIGH_PAGE_LENGTH 128
enum mlx5e_link_mode {
MLX5E_1000BASE_CX_SGMII = 0,
@@ -92,6 +92,30 @@ enum mlx5e_link_mode {
MLX5E_LINK_MODES_NUMBER,
};
+enum mlx5e_ext_link_mode {
+ MLX5E_SGMII_100M = 0,
+ MLX5E_1000BASE_X_SGMII = 1,
+ MLX5E_5GBASE_R = 3,
+ MLX5E_10GBASE_XFI_XAUI_1 = 4,
+ MLX5E_40GBASE_XLAUI_4_XLPPI_4 = 5,
+ MLX5E_25GAUI_1_25GBASE_CR_KR = 6,
+ MLX5E_50GAUI_2_LAUI_2_50GBASE_CR2_KR2 = 7,
+ MLX5E_50GAUI_1_LAUI_1_50GBASE_CR_KR = 8,
+ MLX5E_CAUI_4_100GBASE_CR4_KR4 = 9,
+ MLX5E_100GAUI_2_100GBASE_CR2_KR2 = 10,
+ MLX5E_100GAUI_1_100GBASE_CR_KR = 11,
+ MLX5E_200GAUI_4_200GBASE_CR4_KR4 = 12,
+ MLX5E_200GAUI_2_200GBASE_CR2_KR2 = 13,
+ MLX5E_200GAUI_1_200GBASE_CR1_KR1 = 14,
+ MLX5E_400GAUI_8_400GBASE_CR8 = 15,
+ MLX5E_400GAUI_4_400GBASE_CR4_KR4 = 16,
+ MLX5E_400GAUI_2_400GBASE_CR2_KR2 = 17,
+ MLX5E_800GAUI_8_800GBASE_CR8_KR8 = 19,
+ MLX5E_800GAUI_4_800GBASE_CR4_KR4 = 20,
+ MLX5E_1600TAUI_8_1600TBASE_CR8_KR8 = 23,
+ MLX5E_EXT_LINK_MODES_NUMBER,
+};
+
enum mlx5e_connector_type {
MLX5E_PORT_UNKNOWN = 0,
MLX5E_PORT_NONE = 1,
@@ -105,37 +129,27 @@ enum mlx5e_connector_type {
MLX5E_CONNECTOR_TYPE_NUMBER,
};
-#define MLX5E_PROT_MASK(link_mode) (1 << link_mode)
+enum mlx5_ptys_width {
+ MLX5_PTYS_WIDTH_1X = 1 << 0,
+ MLX5_PTYS_WIDTH_2X = 1 << 1,
+ MLX5_PTYS_WIDTH_4X = 1 << 2,
+ MLX5_PTYS_WIDTH_8X = 1 << 3,
+ MLX5_PTYS_WIDTH_12X = 1 << 4,
+};
-#define PORT_MODULE_EVENT_MODULE_STATUS_MASK 0xF
-#define PORT_MODULE_EVENT_ERROR_TYPE_MASK 0xF
+#define MLX5E_PROT_MASK(link_mode) (1U << link_mode)
+#define MLX5_GET_ETH_PROTO(reg, out, ext, field) \
+ (ext ? MLX5_GET(reg, out, ext_##field) : \
+ MLX5_GET(reg, out, field))
int mlx5_set_port_caps(struct mlx5_core_dev *dev, u8 port_num, u32 caps);
int mlx5_query_port_ptys(struct mlx5_core_dev *dev, u32 *ptys,
- int ptys_size, int proto_mask, u8 local_port);
-int mlx5_query_port_proto_cap(struct mlx5_core_dev *dev,
- u32 *proto_cap, int proto_mask);
-int mlx5_query_port_proto_admin(struct mlx5_core_dev *dev,
- u32 *proto_admin, int proto_mask);
-int mlx5_query_port_link_width_oper(struct mlx5_core_dev *dev,
- u8 *link_width_oper, u8 local_port);
-int mlx5_query_port_ib_proto_oper(struct mlx5_core_dev *dev,
- u8 *proto_oper, u8 local_port);
-int mlx5_query_port_eth_proto_oper(struct mlx5_core_dev *dev,
- u32 *proto_oper, u8 local_port);
-int mlx5_set_port_ptys(struct mlx5_core_dev *dev, bool an_disable,
- u32 proto_admin, int proto_mask);
-void mlx5_toggle_port_link(struct mlx5_core_dev *dev);
-int mlx5_set_port_admin_status(struct mlx5_core_dev *dev,
- enum mlx5_port_status status);
-int mlx5_query_port_admin_status(struct mlx5_core_dev *dev,
- enum mlx5_port_status *status);
-int mlx5_set_port_beacon(struct mlx5_core_dev *dev, u16 beacon_duration);
-void mlx5_query_port_autoneg(struct mlx5_core_dev *dev, int proto_mask,
- u8 *an_status,
- u8 *an_disable_cap, u8 *an_disable_admin);
-
-int mlx5_set_port_mtu(struct mlx5_core_dev *dev, u16 mtu, u8 port);
+ int ptys_size, int proto_mask,
+ u8 local_port, u8 plane_index);
+
+int mlx5_query_ib_port_oper(struct mlx5_core_dev *dev, u16 *link_width_oper,
+ u16 *proto_oper, u8 local_port, u8 plane_index);
+
void mlx5_query_port_max_mtu(struct mlx5_core_dev *dev, u16 *max_mtu, u8 port);
void mlx5_query_port_oper_mtu(struct mlx5_core_dev *dev, u16 *oper_mtu,
u8 port);
@@ -143,38 +157,4 @@ void mlx5_query_port_oper_mtu(struct mlx5_core_dev *dev, u16 *oper_mtu,
int mlx5_query_port_vl_hw_cap(struct mlx5_core_dev *dev,
u8 *vl_hw_cap, u8 local_port);
-int mlx5_set_port_pause(struct mlx5_core_dev *dev, u32 rx_pause, u32 tx_pause);
-int mlx5_query_port_pause(struct mlx5_core_dev *dev,
- u32 *rx_pause, u32 *tx_pause);
-
-int mlx5_set_port_pfc(struct mlx5_core_dev *dev, u8 pfc_en_tx, u8 pfc_en_rx);
-int mlx5_query_port_pfc(struct mlx5_core_dev *dev, u8 *pfc_en_tx,
- u8 *pfc_en_rx);
-
-int mlx5_max_tc(struct mlx5_core_dev *mdev);
-
-int mlx5_set_port_prio_tc(struct mlx5_core_dev *mdev, u8 *prio_tc);
-int mlx5_query_port_prio_tc(struct mlx5_core_dev *mdev,
- u8 prio, u8 *tc);
-int mlx5_set_port_tc_group(struct mlx5_core_dev *mdev, u8 *tc_group);
-int mlx5_set_port_tc_bw_alloc(struct mlx5_core_dev *mdev, u8 *tc_bw);
-int mlx5_query_port_tc_bw_alloc(struct mlx5_core_dev *mdev,
- u8 tc, u8 *bw_pct);
-int mlx5_modify_port_ets_rate_limit(struct mlx5_core_dev *mdev,
- u8 *max_bw_value,
- u8 *max_bw_unit);
-int mlx5_query_port_ets_rate_limit(struct mlx5_core_dev *mdev,
- u8 *max_bw_value,
- u8 *max_bw_unit);
-int mlx5_set_port_wol(struct mlx5_core_dev *mdev, u8 wol_mode);
-int mlx5_query_port_wol(struct mlx5_core_dev *mdev, u8 *wol_mode);
-
-int mlx5_set_port_fcs(struct mlx5_core_dev *mdev, u8 enable);
-void mlx5_query_port_fcs(struct mlx5_core_dev *mdev, bool *supported,
- bool *enabled);
-int mlx5_query_module_eeprom(struct mlx5_core_dev *dev,
- u16 offset, u16 size, u8 *data);
-
-int mlx5_query_port_dcbx_param(struct mlx5_core_dev *mdev, u32 *out);
-int mlx5_set_port_dcbx_param(struct mlx5_core_dev *mdev, u32 *in);
#endif /* __MLX5_PORT_H__ */
diff --git a/include/linux/mlx5/qp.h b/include/linux/mlx5/qp.h
index 66d19b611fe4..d67aedc6ea68 100644
--- a/include/linux/mlx5/qp.h
+++ b/include/linux/mlx5/qp.h
@@ -36,8 +36,9 @@
#include <linux/mlx5/device.h>
#include <linux/mlx5/driver.h>
-#define MLX5_INVALID_LKEY 0x100
-#define MLX5_SIG_WQE_SIZE (MLX5_SEND_WQE_BB * 5)
+#define MLX5_TERMINATE_SCATTER_LIST_LKEY cpu_to_be32(0x100)
+/* UMR (3 WQE_BB's) + SIG (3 WQE_BB's) + PSV (mem) + PSV (wire) */
+#define MLX5_SIG_WQE_SIZE (MLX5_SEND_WQE_BB * 8)
#define MLX5_DIF_SIZE 8
#define MLX5_STRIDE_BLOCK_OP 0x400
#define MLX5_CPY_GRD_MASK 0xc0
@@ -65,11 +66,13 @@ enum mlx5_qp_optpar {
MLX5_QP_OPTPAR_RETRY_COUNT = 1 << 12,
MLX5_QP_OPTPAR_RNR_RETRY = 1 << 13,
MLX5_QP_OPTPAR_ACK_TIMEOUT = 1 << 14,
+ MLX5_QP_OPTPAR_LAG_TX_AFF = 1 << 15,
MLX5_QP_OPTPAR_PRI_PORT = 1 << 16,
MLX5_QP_OPTPAR_SRQN = 1 << 18,
MLX5_QP_OPTPAR_CQN_RCV = 1 << 19,
MLX5_QP_OPTPAR_DC_HS = 1 << 20,
MLX5_QP_OPTPAR_DC_KEY = 1 << 21,
+ MLX5_QP_OPTPAR_COUNTER_SET_ID = 1 << 25,
};
enum mlx5_qp_state {
@@ -146,6 +149,7 @@ enum {
MLX5_WQE_CTRL_CQ_UPDATE = 2 << 2,
MLX5_WQE_CTRL_CQ_UPDATE_AND_EQE = 3 << 2,
MLX5_WQE_CTRL_SOLICITED = 1 << 1,
+ MLX5_WQE_CTRL_INITIATOR_SMALL_FENCE = 1 << 5,
};
enum {
@@ -159,6 +163,8 @@ enum {
MLX5_SEND_WQE_MAX_WQEBBS = 16,
};
+#define MLX5_SEND_WQE_MAX_SIZE (MLX5_SEND_WQE_MAX_WQEBBS * MLX5_SEND_WQE_BB)
+
enum {
MLX5_WQE_FMR_PERM_LOCAL_READ = 1 << 27,
MLX5_WQE_FMR_PERM_LOCAL_WRITE = 1 << 28,
@@ -199,10 +205,20 @@ struct mlx5_wqe_fmr_seg {
struct mlx5_wqe_ctrl_seg {
__be32 opmod_idx_opcode;
__be32 qpn_ds;
+
+ struct_group(trailer,
+
u8 signature;
u8 rsvd[2];
u8 fm_ce_se;
- __be32 imm;
+ union {
+ __be32 general_id;
+ __be32 imm;
+ __be32 umr_mkey;
+ __be32 tis_tir_num;
+ };
+
+ ); /* end of trailer group */
};
#define MLX5_WQE_CTRL_DS_MASK 0x3f
@@ -221,7 +237,11 @@ enum {
};
enum {
- MLX5_ETH_WQE_INSERT_VLAN = 1 << 15,
+ MLX5_ETH_WQE_TRAILER_HDR_OUTER_IP_ASSOC = 1 << 26,
+ MLX5_ETH_WQE_TRAILER_HDR_OUTER_L4_ASSOC = 1 << 27,
+ MLX5_ETH_WQE_TRAILER_HDR_INNER_IP_ASSOC = 3 << 26,
+ MLX5_ETH_WQE_TRAILER_HDR_INNER_L4_ASSOC = 1 << 28,
+ MLX5_ETH_WQE_INSERT_TRAILER = 1 << 30,
};
enum {
@@ -231,6 +251,17 @@ enum {
MLX5_ETH_WQE_SWP_OUTER_L4_UDP = 1 << 5,
};
+/* Metadata bits 0-7 are used by timestamping */
+/* Base shift for metadata bits used by IPsec and MACsec */
+#define MLX5_ETH_WQE_FT_META_SHIFT 8
+
+enum {
+ MLX5_ETH_WQE_FT_META_IPSEC = BIT(0) << MLX5_ETH_WQE_FT_META_SHIFT,
+ MLX5_ETH_WQE_FT_META_MACSEC = BIT(1) << MLX5_ETH_WQE_FT_META_SHIFT,
+ MLX5_ETH_WQE_FT_META_MACSEC_FS_ID_MASK =
+ GENMASK(5, 2) << MLX5_ETH_WQE_FT_META_SHIFT,
+};
+
struct mlx5_wqe_eth_seg {
u8 swp_outer_l4_offset;
u8 swp_outer_l3_offset;
@@ -239,16 +270,16 @@ struct mlx5_wqe_eth_seg {
u8 cs_flags;
u8 swp_flags;
__be16 mss;
- __be32 rsvd2;
+ __be32 flow_table_metadata;
union {
struct {
__be16 sz;
- u8 start[2];
+ union {
+ u8 start[2];
+ DECLARE_FLEX_ARRAY(u8, data);
+ };
} inline_hdr;
- struct {
- __be16 type;
- __be16 vlan_tci;
- } insert;
+ __be32 trailer;
};
};
@@ -307,6 +338,7 @@ struct mlx5_av {
struct mlx5_ib_ah {
struct ib_ah ibah;
struct mlx5_av av;
+ u8 xmit_port;
};
static inline struct mlx5_ib_ah *to_mah(struct ib_ah *ibah)
@@ -394,6 +426,7 @@ struct mlx5_wqe_signature_seg {
struct mlx5_wqe_inline_seg {
__be32 byte_count;
+ __be32 data[];
};
enum mlx5_sig_type {
@@ -449,6 +482,12 @@ struct mlx5_klm {
__be64 va;
};
+struct mlx5_ksm {
+ __be32 reserved;
+ __be32 key;
+ __be64 va;
+};
+
struct mlx5_stride_block_entry {
__be16 stride;
__be16 bcount;
@@ -464,123 +503,32 @@ struct mlx5_stride_block_ctrl_seg {
__be16 num_entries;
};
+struct mlx5_wqe_flow_update_ctrl_seg {
+ __be32 flow_idx_update;
+ __be32 dest_handle;
+ u8 reserved0[40];
+};
+
+struct mlx5_wqe_header_modify_argument_update_seg {
+ u8 argument_list[64];
+};
+
struct mlx5_core_qp {
struct mlx5_core_rsc_common common; /* must be first */
void (*event) (struct mlx5_core_qp *, int);
int qpn;
struct mlx5_rsc_debug *dbg;
int pid;
+ u16 uid;
};
-struct mlx5_qp_path {
- u8 fl_free_ar;
- u8 rsvd3;
- __be16 pkey_index;
- u8 rsvd0;
- u8 grh_mlid;
- __be16 rlid;
- u8 ackto_lt;
- u8 mgid_index;
- u8 static_rate;
- u8 hop_limit;
- __be32 tclass_flowlabel;
- union {
- u8 rgid[16];
- u8 rip[16];
- };
- u8 f_dscp_ecn_prio;
- u8 ecn_dscp;
- __be16 udp_sport;
- u8 dci_cfi_prio_sl;
- u8 port;
- u8 rmac[6];
+struct mlx5_core_dct {
+ struct mlx5_core_qp mqp;
+ struct completion drained;
};
-/* FIXME: use mlx5_ifc.h qpc */
-struct mlx5_qp_context {
- __be32 flags;
- __be32 flags_pd;
- u8 mtu_msgmax;
- u8 rq_size_stride;
- __be16 sq_crq_size;
- __be32 qp_counter_set_usr_page;
- __be32 wire_qpn;
- __be32 log_pg_sz_remote_qpn;
- struct mlx5_qp_path pri_path;
- struct mlx5_qp_path alt_path;
- __be32 params1;
- u8 reserved2[4];
- __be32 next_send_psn;
- __be32 cqn_send;
- __be32 deth_sqpn;
- u8 reserved3[4];
- __be32 last_acked_psn;
- __be32 ssn;
- __be32 params2;
- __be32 rnr_nextrecvpsn;
- __be32 xrcd;
- __be32 cqn_recv;
- __be64 db_rec_addr;
- __be32 qkey;
- __be32 rq_type_srqn;
- __be32 rmsn;
- __be16 hw_sq_wqe_counter;
- __be16 sw_sq_wqe_counter;
- __be16 hw_rcyclic_byte_counter;
- __be16 hw_rq_counter;
- __be16 sw_rcyclic_byte_counter;
- __be16 sw_rq_counter;
- u8 rsvd0[5];
- u8 cgs;
- u8 cs_req;
- u8 cs_res;
- __be64 dc_access_key;
- u8 rsvd1[24];
-};
-
-static inline struct mlx5_core_qp *__mlx5_qp_lookup(struct mlx5_core_dev *dev, u32 qpn)
-{
- return radix_tree_lookup(&dev->priv.qp_table.tree, qpn);
-}
-
-static inline struct mlx5_core_mkey *__mlx5_mr_lookup(struct mlx5_core_dev *dev, u32 key)
-{
- return radix_tree_lookup(&dev->priv.mkey_table.tree, key);
-}
-
-int mlx5_core_create_qp(struct mlx5_core_dev *dev,
- struct mlx5_core_qp *qp,
- u32 *in,
- int inlen);
-int mlx5_core_qp_modify(struct mlx5_core_dev *dev, u16 opcode,
- u32 opt_param_mask, void *qpc,
- struct mlx5_core_qp *qp);
-int mlx5_core_destroy_qp(struct mlx5_core_dev *dev,
- struct mlx5_core_qp *qp);
-int mlx5_core_qp_query(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp,
- u32 *out, int outlen);
-
-int mlx5_core_set_delay_drop(struct mlx5_core_dev *dev,
- u32 timeout_usec);
-
-int mlx5_core_xrcd_alloc(struct mlx5_core_dev *dev, u32 *xrcdn);
-int mlx5_core_xrcd_dealloc(struct mlx5_core_dev *dev, u32 xrcdn);
-void mlx5_init_qp_table(struct mlx5_core_dev *dev);
-void mlx5_cleanup_qp_table(struct mlx5_core_dev *dev);
int mlx5_debug_qp_add(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp);
void mlx5_debug_qp_remove(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp);
-int mlx5_core_create_rq_tracked(struct mlx5_core_dev *dev, u32 *in, int inlen,
- struct mlx5_core_qp *rq);
-void mlx5_core_destroy_rq_tracked(struct mlx5_core_dev *dev,
- struct mlx5_core_qp *rq);
-int mlx5_core_create_sq_tracked(struct mlx5_core_dev *dev, u32 *in, int inlen,
- struct mlx5_core_qp *sq);
-void mlx5_core_destroy_sq_tracked(struct mlx5_core_dev *dev,
- struct mlx5_core_qp *sq);
-int mlx5_core_alloc_q_counter(struct mlx5_core_dev *dev, u16 *counter_id);
-int mlx5_core_dealloc_q_counter(struct mlx5_core_dev *dev, u16 counter_id);
-int mlx5_core_query_q_counter(struct mlx5_core_dev *dev, u16 counter_id,
- int reset, void *out, int out_size);
static inline const char *mlx5_qp_type_str(int type)
{
@@ -627,4 +575,14 @@ static inline const char *mlx5_qp_state_str(int state)
}
}
+static inline int mlx5_get_qp_default_ts(struct mlx5_core_dev *dev)
+{
+ u8 supported_ts_cap = mlx5_get_roce_state(dev) ?
+ MLX5_CAP_ROCE(dev, qp_ts_format) :
+ MLX5_CAP_GEN(dev, sq_ts_format);
+
+ return supported_ts_cap ? MLX5_TIMESTAMP_FORMAT_DEFAULT :
+ MLX5_TIMESTAMP_FORMAT_FREE_RUNNING;
+}
+
#endif /* MLX5_QP_H */
diff --git a/include/linux/mlx5/rsc_dump.h b/include/linux/mlx5/rsc_dump.h
new file mode 100644
index 000000000000..d11c0b228620
--- /dev/null
+++ b/include/linux/mlx5/rsc_dump.h
@@ -0,0 +1,51 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2020 Mellanox Technologies inc. */
+
+#include <linux/mlx5/driver.h>
+
+#ifndef __MLX5_RSC_DUMP
+#define __MLX5_RSC_DUMP
+
+enum mlx5_sgmt_type {
+ MLX5_SGMT_TYPE_HW_CQPC,
+ MLX5_SGMT_TYPE_HW_SQPC,
+ MLX5_SGMT_TYPE_HW_RQPC,
+ MLX5_SGMT_TYPE_FULL_SRQC,
+ MLX5_SGMT_TYPE_FULL_CQC,
+ MLX5_SGMT_TYPE_FULL_EQC,
+ MLX5_SGMT_TYPE_FULL_QPC,
+ MLX5_SGMT_TYPE_SND_BUFF,
+ MLX5_SGMT_TYPE_RCV_BUFF,
+ MLX5_SGMT_TYPE_SRQ_BUFF,
+ MLX5_SGMT_TYPE_CQ_BUFF,
+ MLX5_SGMT_TYPE_EQ_BUFF,
+ MLX5_SGMT_TYPE_SX_SLICE,
+ MLX5_SGMT_TYPE_SX_SLICE_ALL,
+ MLX5_SGMT_TYPE_RDB,
+ MLX5_SGMT_TYPE_RX_SLICE_ALL,
+ MLX5_SGMT_TYPE_PRM_QUERY_QP,
+ MLX5_SGMT_TYPE_PRM_QUERY_CQ,
+ MLX5_SGMT_TYPE_PRM_QUERY_MKEY,
+ MLX5_SGMT_TYPE_MENU,
+ MLX5_SGMT_TYPE_TERMINATE,
+
+ MLX5_SGMT_TYPE_NUM, /* Keep last */
+};
+
+struct mlx5_rsc_key {
+ enum mlx5_sgmt_type rsc;
+ int index1;
+ int index2;
+ int num_of_obj1;
+ int num_of_obj2;
+ int size;
+};
+
+struct mlx5_rsc_dump_cmd;
+
+struct mlx5_rsc_dump_cmd *mlx5_rsc_dump_cmd_create(struct mlx5_core_dev *dev,
+ struct mlx5_rsc_key *key);
+void mlx5_rsc_dump_cmd_destroy(struct mlx5_rsc_dump_cmd *cmd);
+int mlx5_rsc_dump_next(struct mlx5_core_dev *dev, struct mlx5_rsc_dump_cmd *cmd,
+ struct page *page, int *size);
+#endif /* __MLX5_RSC_DUMP */
diff --git a/include/linux/mlx5/srq.h b/include/linux/mlx5/srq.h
deleted file mode 100644
index 24ff23e27c8a..000000000000
--- a/include/linux/mlx5/srq.h
+++ /dev/null
@@ -1,71 +0,0 @@
-/*
- * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#ifndef MLX5_SRQ_H
-#define MLX5_SRQ_H
-
-#include <linux/mlx5/driver.h>
-
-enum {
- MLX5_SRQ_FLAG_ERR = (1 << 0),
- MLX5_SRQ_FLAG_WQ_SIG = (1 << 1),
- MLX5_SRQ_FLAG_RNDV = (1 << 2),
-};
-
-struct mlx5_srq_attr {
- u32 type;
- u32 flags;
- u32 log_size;
- u32 wqe_shift;
- u32 log_page_size;
- u32 wqe_cnt;
- u32 srqn;
- u32 xrcd;
- u32 page_offset;
- u32 cqn;
- u32 pd;
- u32 lwm;
- u32 user_index;
- u64 db_record;
- __be64 *pas;
- u32 tm_log_list_size;
- u32 tm_next_tag;
- u32 tm_hw_phase_cnt;
- u32 tm_sw_phase_cnt;
-};
-
-struct mlx5_core_dev;
-
-void mlx5_init_srq_table(struct mlx5_core_dev *dev);
-void mlx5_cleanup_srq_table(struct mlx5_core_dev *dev);
-
-#endif /* MLX5_SRQ_H */
diff --git a/include/linux/mlx5/transobj.h b/include/linux/mlx5/transobj.h
index 88441f5ece25..60ffeb6b67ae 100644
--- a/include/linux/mlx5/transobj.h
+++ b/include/linux/mlx5/transobj.h
@@ -39,40 +39,51 @@ int mlx5_core_alloc_transport_domain(struct mlx5_core_dev *dev, u32 *tdn);
void mlx5_core_dealloc_transport_domain(struct mlx5_core_dev *dev, u32 tdn);
int mlx5_core_create_rq(struct mlx5_core_dev *dev, u32 *in, int inlen,
u32 *rqn);
-int mlx5_core_modify_rq(struct mlx5_core_dev *dev, u32 rqn, u32 *in, int inlen);
+int mlx5_core_modify_rq(struct mlx5_core_dev *dev, u32 rqn, u32 *in);
void mlx5_core_destroy_rq(struct mlx5_core_dev *dev, u32 rqn);
int mlx5_core_query_rq(struct mlx5_core_dev *dev, u32 rqn, u32 *out);
int mlx5_core_create_sq(struct mlx5_core_dev *dev, u32 *in, int inlen,
u32 *sqn);
-int mlx5_core_modify_sq(struct mlx5_core_dev *dev, u32 sqn, u32 *in, int inlen);
+int mlx5_core_modify_sq(struct mlx5_core_dev *dev, u32 sqn, u32 *in);
void mlx5_core_destroy_sq(struct mlx5_core_dev *dev, u32 sqn);
int mlx5_core_query_sq(struct mlx5_core_dev *dev, u32 sqn, u32 *out);
-int mlx5_core_create_tir(struct mlx5_core_dev *dev, u32 *in, int inlen,
- u32 *tirn);
-int mlx5_core_modify_tir(struct mlx5_core_dev *dev, u32 tirn, u32 *in,
- int inlen);
+int mlx5_core_query_sq_state(struct mlx5_core_dev *dev, u32 sqn, u8 *state);
+int mlx5_core_create_tir(struct mlx5_core_dev *dev, u32 *in, u32 *tirn);
+int mlx5_core_modify_tir(struct mlx5_core_dev *dev, u32 tirn, u32 *in);
void mlx5_core_destroy_tir(struct mlx5_core_dev *dev, u32 tirn);
-int mlx5_core_create_tis(struct mlx5_core_dev *dev, u32 *in, int inlen,
- u32 *tisn);
-int mlx5_core_modify_tis(struct mlx5_core_dev *dev, u32 tisn, u32 *in,
- int inlen);
+int mlx5_core_create_tis(struct mlx5_core_dev *dev, u32 *in, u32 *tisn);
+int mlx5_core_modify_tis(struct mlx5_core_dev *dev, u32 tisn, u32 *in);
void mlx5_core_destroy_tis(struct mlx5_core_dev *dev, u32 tisn);
-int mlx5_core_create_rmp(struct mlx5_core_dev *dev, u32 *in, int inlen,
- u32 *rmpn);
-int mlx5_core_modify_rmp(struct mlx5_core_dev *dev, u32 *in, int inlen);
-int mlx5_core_destroy_rmp(struct mlx5_core_dev *dev, u32 rmpn);
-int mlx5_core_query_rmp(struct mlx5_core_dev *dev, u32 rmpn, u32 *out);
-int mlx5_core_arm_rmp(struct mlx5_core_dev *dev, u32 rmpn, u16 lwm);
-int mlx5_core_create_xsrq(struct mlx5_core_dev *dev, u32 *in, int inlen,
- u32 *rmpn);
-int mlx5_core_destroy_xsrq(struct mlx5_core_dev *dev, u32 rmpn);
-int mlx5_core_query_xsrq(struct mlx5_core_dev *dev, u32 rmpn, u32 *out);
-int mlx5_core_arm_xsrq(struct mlx5_core_dev *dev, u32 rmpn, u16 lwm);
-
int mlx5_core_create_rqt(struct mlx5_core_dev *dev, u32 *in, int inlen,
u32 *rqtn);
int mlx5_core_modify_rqt(struct mlx5_core_dev *dev, u32 rqtn, u32 *in,
int inlen);
void mlx5_core_destroy_rqt(struct mlx5_core_dev *dev, u32 rqtn);
+struct mlx5_hairpin_params {
+ u8 log_data_size;
+ u8 log_num_packets;
+ u16 q_counter;
+ int num_channels;
+};
+
+struct mlx5_hairpin {
+ struct mlx5_core_dev *func_mdev;
+ struct mlx5_core_dev *peer_mdev;
+
+ int num_channels;
+
+ u32 *rqn;
+ u32 *sqn;
+
+ bool peer_gone;
+};
+
+struct mlx5_hairpin *
+mlx5_core_hairpin_create(struct mlx5_core_dev *func_mdev,
+ struct mlx5_core_dev *peer_mdev,
+ struct mlx5_hairpin_params *params);
+
+void mlx5_core_hairpin_destroy(struct mlx5_hairpin *pair);
+void mlx5_core_hairpin_clear_dead_peer(struct mlx5_hairpin *hp);
#endif /* __TRANSOBJ_H__ */
diff --git a/include/linux/mlx5/vport.h b/include/linux/mlx5/vport.h
index aaa0bb9e7655..f876bfc0669c 100644
--- a/include/linux/mlx5/vport.h
+++ b/include/linux/mlx5/vport.h
@@ -36,33 +36,47 @@
#include <linux/mlx5/driver.h>
#include <linux/mlx5/device.h>
+#define MLX5_VPORT_MANAGER(mdev) \
+ (MLX5_CAP_GEN(mdev, vport_group_manager) && \
+ (MLX5_CAP_GEN(mdev, port_type) == MLX5_CAP_PORT_TYPE_ETH) && \
+ mlx5_core_is_pf(mdev))
+
enum {
MLX5_CAP_INLINE_MODE_L2,
MLX5_CAP_INLINE_MODE_VPORT_CONTEXT,
MLX5_CAP_INLINE_MODE_NOT_REQUIRED,
};
+/* Vport number for each function must keep unchanged */
+enum {
+ MLX5_VPORT_PF = 0x0,
+ MLX5_VPORT_FIRST_VF = 0x1,
+ MLX5_VPORT_ECPF = 0xfffe,
+ MLX5_VPORT_UPLINK = 0xffff
+};
+
u8 mlx5_query_vport_state(struct mlx5_core_dev *mdev, u8 opmod, u16 vport);
-u8 mlx5_query_vport_admin_state(struct mlx5_core_dev *mdev, u8 opmod,
- u16 vport);
int mlx5_modify_vport_admin_state(struct mlx5_core_dev *mdev, u8 opmod,
- u16 vport, u8 state);
+ u16 vport, u8 other_vport, u8 state);
int mlx5_query_nic_vport_mac_address(struct mlx5_core_dev *mdev,
- u16 vport, u8 *addr);
+ u16 vport, bool other, u8 *addr);
+int mlx5_query_mac_address(struct mlx5_core_dev *mdev, u8 *addr);
int mlx5_query_nic_vport_min_inline(struct mlx5_core_dev *mdev,
u16 vport, u8 *min_inline);
void mlx5_query_min_inline(struct mlx5_core_dev *mdev, u8 *min_inline);
int mlx5_modify_nic_vport_min_inline(struct mlx5_core_dev *mdev,
u16 vport, u8 min_inline);
int mlx5_modify_nic_vport_mac_address(struct mlx5_core_dev *dev,
- u16 vport, u8 *addr);
+ u16 vport, const u8 *addr);
int mlx5_query_nic_vport_mtu(struct mlx5_core_dev *mdev, u16 *mtu);
int mlx5_modify_nic_vport_mtu(struct mlx5_core_dev *mdev, u16 mtu);
int mlx5_query_nic_vport_system_image_guid(struct mlx5_core_dev *mdev,
u64 *system_image_guid);
-int mlx5_query_nic_vport_node_guid(struct mlx5_core_dev *mdev, u64 *node_guid);
+int mlx5_query_nic_vport_sd_group(struct mlx5_core_dev *mdev, u8 *sd_group);
+int mlx5_query_nic_vport_node_guid(struct mlx5_core_dev *mdev,
+ u16 vport, bool other_vport, u64 *node_guid);
int mlx5_modify_nic_vport_node_guid(struct mlx5_core_dev *mdev,
- u32 vport, u64 node_guid);
+ u16 vport, u64 node_guid);
int mlx5_query_nic_vport_qkey_viol_cntr(struct mlx5_core_dev *mdev,
u16 *qkey_viol_cntr);
int mlx5_query_hca_vport_gid(struct mlx5_core_dev *dev, u8 other_vport,
@@ -80,7 +94,7 @@ int mlx5_query_hca_vport_system_image_guid(struct mlx5_core_dev *dev,
int mlx5_query_hca_vport_node_guid(struct mlx5_core_dev *dev,
u64 *node_guid);
int mlx5_query_nic_vport_mac_list(struct mlx5_core_dev *dev,
- u32 vport,
+ u16 vport,
enum mlx5_list_type list_type,
u8 addr_list[][ETH_ALEN],
int *list_size);
@@ -89,7 +103,7 @@ int mlx5_modify_nic_vport_mac_list(struct mlx5_core_dev *dev,
u8 addr_list[][ETH_ALEN],
int list_size);
int mlx5_query_nic_vport_promisc(struct mlx5_core_dev *mdev,
- u32 vport,
+ u16 vport,
int *promisc_uc,
int *promisc_mc,
int *promisc_all);
@@ -97,23 +111,31 @@ int mlx5_modify_nic_vport_promisc(struct mlx5_core_dev *mdev,
int promisc_uc,
int promisc_mc,
int promisc_all);
-int mlx5_query_nic_vport_vlans(struct mlx5_core_dev *dev,
- u32 vport,
- u16 vlans[],
- int *size);
int mlx5_modify_nic_vport_vlans(struct mlx5_core_dev *dev,
u16 vlans[],
int list_size);
int mlx5_nic_vport_enable_roce(struct mlx5_core_dev *mdev);
int mlx5_nic_vport_disable_roce(struct mlx5_core_dev *mdev);
+int mlx5_query_vport_down_stats(struct mlx5_core_dev *mdev, u16 vport,
+ u8 other_vport, u64 *rx_discard_vport_down,
+ u64 *tx_discard_vport_down);
int mlx5_core_query_vport_counter(struct mlx5_core_dev *dev, u8 other_vport,
- int vf, u8 port_num, void *out,
- size_t out_sz);
+ int vf, u8 port_num, void *out);
int mlx5_core_modify_hca_vport_context(struct mlx5_core_dev *dev,
u8 other_vport, u8 port_num,
int vf,
struct mlx5_hca_vport_context *req);
int mlx5_nic_vport_update_local_lb(struct mlx5_core_dev *mdev, bool enable);
int mlx5_nic_vport_query_local_lb(struct mlx5_core_dev *mdev, bool *status);
+
+int mlx5_nic_vport_affiliate_multiport(struct mlx5_core_dev *master_mdev,
+ struct mlx5_core_dev *port_mdev);
+int mlx5_nic_vport_unaffiliate_multiport(struct mlx5_core_dev *port_mdev);
+
+u64 mlx5_query_nic_system_image_guid(struct mlx5_core_dev *mdev);
+int mlx5_vport_get_other_func_cap(struct mlx5_core_dev *dev, u16 vport, void *out,
+ u16 opmod);
+int mlx5_vport_get_vhca_id(struct mlx5_core_dev *dev, u16 vport, u16 *vhca_id);
+
#endif /* __MLX5_VPORT_H__ */
diff --git a/include/linux/mm-arch-hooks.h b/include/linux/mm-arch-hooks.h
deleted file mode 100644
index 4efc3f56e6df..000000000000
--- a/include/linux/mm-arch-hooks.h
+++ /dev/null
@@ -1,25 +0,0 @@
-/*
- * Generic mm no-op hooks.
- *
- * Copyright (C) 2015, IBM Corporation
- * Author: Laurent Dufour <ldufour@linux.vnet.ibm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-#ifndef _LINUX_MM_ARCH_HOOKS_H
-#define _LINUX_MM_ARCH_HOOKS_H
-
-#include <asm/mm-arch-hooks.h>
-
-#ifndef arch_remap
-static inline void arch_remap(struct mm_struct *mm,
- unsigned long old_start, unsigned long old_end,
- unsigned long new_start, unsigned long new_end)
-{
-}
-#define arch_remap arch_remap
-#endif
-
-#endif /* _LINUX_MM_ARCH_HOOKS_H */
diff --git a/include/linux/mm.h b/include/linux/mm.h
index eb5e4bc946cc..7a1819c20643 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h